Import prebuilt clang toolchain for linux.
diff --git a/linux-x64/clang/include/llvm/ADT/APFloat.h b/linux-x64/clang/include/llvm/ADT/APFloat.h
new file mode 100644
index 0000000..6c0b6ae
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APFloat.h
@@ -0,0 +1,1249 @@
+//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief
+/// This file declares a class to represent arbitrary precision floating point
+/// values and provide a variety of arithmetic operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APFLOAT_H
+#define LLVM_ADT_APFLOAT_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
+
+#define APFLOAT_DISPATCH_ON_SEMANTICS(METHOD_CALL)                             \
+  do {                                                                         \
+    if (usesLayout<IEEEFloat>(getSemantics()))                                 \
+      return U.IEEE.METHOD_CALL;                                               \
+    if (usesLayout<DoubleAPFloat>(getSemantics()))                             \
+      return U.Double.METHOD_CALL;                                             \
+    llvm_unreachable("Unexpected semantics");                                  \
+  } while (false)
+
+namespace llvm {
+
+struct fltSemantics;
+class APSInt;
+class StringRef;
+class APFloat;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+
+/// Enum that represents what fraction of the LSB truncated bits of an fp number
+/// represent.
+///
+/// This essentially combines the roles of guard and sticky bits.
+enum lostFraction { // Example of truncated bits:
+  lfExactlyZero,    // 000000
+  lfLessThanHalf,   // 0xxxxx  x's not all zero
+  lfExactlyHalf,    // 100000
+  lfMoreThanHalf    // 1xxxxx  x's not all zero
+};
+
+/// A self-contained host- and target-independent arbitrary-precision
+/// floating-point software implementation.
+///
+/// APFloat uses bignum integer arithmetic as provided by static functions in
+/// the APInt class.  The library will work with bignum integers whose parts are
+/// any unsigned type at least 16 bits wide, but 64 bits is recommended.
+///
+/// Written for clarity rather than speed, in particular with a view to use in
+/// the front-end of a cross compiler so that target arithmetic can be correctly
+/// performed on the host.  Performance should nonetheless be reasonable,
+/// particularly for its intended use.  It may be useful as a base
+/// implementation for a run-time library during development of a faster
+/// target-specific one.
+///
+/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
+/// implemented operations.  Currently implemented operations are add, subtract,
+/// multiply, divide, fused-multiply-add, conversion-to-float,
+/// conversion-to-integer and conversion-from-integer.  New rounding modes
+/// (e.g. away from zero) can be added with three or four lines of code.
+///
+/// Four formats are built-in: IEEE single precision, double precision,
+/// quadruple precision, and x87 80-bit extended double (when operating with
+/// full extended precision).  Adding a new format that obeys IEEE semantics
+/// only requires adding two lines of code: a declaration and definition of the
+/// format.
+///
+/// All operations return the status of that operation as an exception bit-mask,
+/// so multiple operations can be done consecutively with their results or-ed
+/// together.  The returned status can be useful for compiler diagnostics; e.g.,
+/// inexact, underflow and overflow can be easily diagnosed on constant folding,
+/// and compiler optimizers can determine what exceptions would be raised by
+/// folding operations and optimize, or perhaps not optimize, accordingly.
+///
+/// At present, underflow tininess is detected after rounding; it should be
+/// straight forward to add support for the before-rounding case too.
+///
+/// The library reads hexadecimal floating point numbers as per C99, and
+/// correctly rounds if necessary according to the specified rounding mode.
+/// Syntax is required to have been validated by the caller.  It also converts
+/// floating point numbers to hexadecimal text as per the C99 %a and %A
+/// conversions.  The output precision (or alternatively the natural minimal
+/// precision) can be specified; if the requested precision is less than the
+/// natural precision the output is correctly rounded for the specified rounding
+/// mode.
+///
+/// It also reads decimal floating point numbers and correctly rounds according
+/// to the specified rounding mode.
+///
+/// Conversion to decimal text is not currently implemented.
+///
+/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
+/// signed exponent, and the significand as an array of integer parts.  After
+/// normalization of a number of precision P the exponent is within the range of
+/// the format, and if the number is not denormal the P-th bit of the
+/// significand is set as an explicit integer bit.  For denormals the most
+/// significant bit is shifted right so that the exponent is maintained at the
+/// format's minimum, so that the smallest denormal has just the least
+/// significant bit of the significand set.  The sign of zeroes and infinities
+/// is significant; the exponent and significand of such numbers is not stored,
+/// but has a known implicit (deterministic) value: 0 for the significands, 0
+/// for zero exponent, all 1 bits for infinity exponent.  For NaNs the sign and
+/// significand are deterministic, although not really meaningful, and preserved
+/// in non-conversion operations.  The exponent is implicitly all 1 bits.
+///
+/// APFloat does not provide any exception handling beyond default exception
+/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
+/// by encoding Signaling NaNs with the first bit of its trailing significand as
+/// 0.
+///
+/// TODO
+/// ====
+///
+/// Some features that may or may not be worth adding:
+///
+/// Binary to decimal conversion (hard).
+///
+/// Optional ability to detect underflow tininess before rounding.
+///
+/// New formats: x87 in single and double precision mode (IEEE apart from
+/// extended exponent range) (hard).
+///
+/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
+///
+
+// This is the common type definitions shared by APFloat and its internal
+// implementation classes. This struct should not define any non-static data
+// members.
+struct APFloatBase {
+  typedef APInt::WordType integerPart;
+  static const unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
+
+  /// A signed type to represent a floating point numbers unbiased exponent.
+  typedef signed short ExponentType;
+
+  /// \name Floating Point Semantics.
+  /// @{
+
+  static const fltSemantics &IEEEhalf() LLVM_READNONE;
+  static const fltSemantics &IEEEsingle() LLVM_READNONE;
+  static const fltSemantics &IEEEdouble() LLVM_READNONE;
+  static const fltSemantics &IEEEquad() LLVM_READNONE;
+  static const fltSemantics &PPCDoubleDouble() LLVM_READNONE;
+  static const fltSemantics &x87DoubleExtended() LLVM_READNONE;
+
+  /// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
+  /// anything real.
+  static const fltSemantics &Bogus() LLVM_READNONE;
+
+  /// @}
+
+  /// IEEE-754R 5.11: Floating Point Comparison Relations.
+  enum cmpResult {
+    cmpLessThan,
+    cmpEqual,
+    cmpGreaterThan,
+    cmpUnordered
+  };
+
+  /// IEEE-754R 4.3: Rounding-direction attributes.
+  enum roundingMode {
+    rmNearestTiesToEven,
+    rmTowardPositive,
+    rmTowardNegative,
+    rmTowardZero,
+    rmNearestTiesToAway
+  };
+
+  /// IEEE-754R 7: Default exception handling.
+  ///
+  /// opUnderflow or opOverflow are always returned or-ed with opInexact.
+  enum opStatus {
+    opOK = 0x00,
+    opInvalidOp = 0x01,
+    opDivByZero = 0x02,
+    opOverflow = 0x04,
+    opUnderflow = 0x08,
+    opInexact = 0x10
+  };
+
+  /// Category of internally-represented number.
+  enum fltCategory {
+    fcInfinity,
+    fcNaN,
+    fcNormal,
+    fcZero
+  };
+
+  /// Convenience enum used to construct an uninitialized APFloat.
+  enum uninitializedTag {
+    uninitialized
+  };
+
+  /// Enumeration of \c ilogb error results.
+  enum IlogbErrorKinds {
+    IEK_Zero = INT_MIN + 1,
+    IEK_NaN = INT_MIN,
+    IEK_Inf = INT_MAX
+  };
+
+  static unsigned int semanticsPrecision(const fltSemantics &);
+  static ExponentType semanticsMinExponent(const fltSemantics &);
+  static ExponentType semanticsMaxExponent(const fltSemantics &);
+  static unsigned int semanticsSizeInBits(const fltSemantics &);
+
+  /// Returns the size of the floating point number (in bits) in the given
+  /// semantics.
+  static unsigned getSizeInBits(const fltSemantics &Sem);
+};
+
+namespace detail {
+
+class IEEEFloat final : public APFloatBase {
+public:
+  /// \name Constructors
+  /// @{
+
+  IEEEFloat(const fltSemantics &); // Default construct to 0.0
+  IEEEFloat(const fltSemantics &, integerPart);
+  IEEEFloat(const fltSemantics &, uninitializedTag);
+  IEEEFloat(const fltSemantics &, const APInt &);
+  explicit IEEEFloat(double d);
+  explicit IEEEFloat(float f);
+  IEEEFloat(const IEEEFloat &);
+  IEEEFloat(IEEEFloat &&);
+  ~IEEEFloat();
+
+  /// @}
+
+  /// Returns whether this instance allocated memory.
+  bool needsCleanup() const { return partCount() > 1; }
+
+  /// \name Convenience "constructors"
+  /// @{
+
+  /// @}
+
+  /// \name Arithmetic
+  /// @{
+
+  opStatus add(const IEEEFloat &, roundingMode);
+  opStatus subtract(const IEEEFloat &, roundingMode);
+  opStatus multiply(const IEEEFloat &, roundingMode);
+  opStatus divide(const IEEEFloat &, roundingMode);
+  /// IEEE remainder.
+  opStatus remainder(const IEEEFloat &);
+  /// C fmod, or llvm frem.
+  opStatus mod(const IEEEFloat &);
+  opStatus fusedMultiplyAdd(const IEEEFloat &, const IEEEFloat &, roundingMode);
+  opStatus roundToIntegral(roundingMode);
+  /// IEEE-754R 5.3.1: nextUp/nextDown.
+  opStatus next(bool nextDown);
+
+  /// @}
+
+  /// \name Sign operations.
+  /// @{
+
+  void changeSign();
+
+  /// @}
+
+  /// \name Conversions
+  /// @{
+
+  opStatus convert(const fltSemantics &, roundingMode, bool *);
+  opStatus convertToInteger(MutableArrayRef<integerPart>, unsigned int, bool,
+                            roundingMode, bool *) const;
+  opStatus convertFromAPInt(const APInt &, bool, roundingMode);
+  opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
+                                          bool, roundingMode);
+  opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
+                                          bool, roundingMode);
+  opStatus convertFromString(StringRef, roundingMode);
+  APInt bitcastToAPInt() const;
+  double convertToDouble() const;
+  float convertToFloat() const;
+
+  /// @}
+
+  /// The definition of equality is not straightforward for floating point, so
+  /// we won't use operator==.  Use one of the following, or write whatever it
+  /// is you really mean.
+  bool operator==(const IEEEFloat &) const = delete;
+
+  /// IEEE comparison with another floating point number (NaNs compare
+  /// unordered, 0==-0).
+  cmpResult compare(const IEEEFloat &) const;
+
+  /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
+  bool bitwiseIsEqual(const IEEEFloat &) const;
+
+  /// Write out a hexadecimal representation of the floating point value to DST,
+  /// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
+  /// Return the number of characters written, excluding the terminating NUL.
+  unsigned int convertToHexString(char *dst, unsigned int hexDigits,
+                                  bool upperCase, roundingMode) const;
+
+  /// \name IEEE-754R 5.7.2 General operations.
+  /// @{
+
+  /// IEEE-754R isSignMinus: Returns true if and only if the current value is
+  /// negative.
+  ///
+  /// This applies to zeros and NaNs as well.
+  bool isNegative() const { return sign; }
+
+  /// IEEE-754R isNormal: Returns true if and only if the current value is normal.
+  ///
+  /// This implies that the current value of the float is not zero, subnormal,
+  /// infinite, or NaN following the definition of normality from IEEE-754R.
+  bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+
+  /// Returns true if and only if the current value is zero, subnormal, or
+  /// normal.
+  ///
+  /// This means that the value is not infinite or NaN.
+  bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+  /// Returns true if and only if the float is plus or minus zero.
+  bool isZero() const { return category == fcZero; }
+
+  /// IEEE-754R isSubnormal(): Returns true if and only if the float is a
+  /// denormal.
+  bool isDenormal() const;
+
+  /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
+  bool isInfinity() const { return category == fcInfinity; }
+
+  /// Returns true if and only if the float is a quiet or signaling NaN.
+  bool isNaN() const { return category == fcNaN; }
+
+  /// Returns true if and only if the float is a signaling NaN.
+  bool isSignaling() const;
+
+  /// @}
+
+  /// \name Simple Queries
+  /// @{
+
+  fltCategory getCategory() const { return category; }
+  const fltSemantics &getSemantics() const { return *semantics; }
+  bool isNonZero() const { return category != fcZero; }
+  bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+  bool isPosZero() const { return isZero() && !isNegative(); }
+  bool isNegZero() const { return isZero() && isNegative(); }
+
+  /// Returns true if and only if the number has the smallest possible non-zero
+  /// magnitude in the current semantics.
+  bool isSmallest() const;
+
+  /// Returns true if and only if the number has the largest possible finite
+  /// magnitude in the current semantics.
+  bool isLargest() const;
+
+  /// Returns true if and only if the number is an exact integer.
+  bool isInteger() const;
+
+  /// @}
+
+  IEEEFloat &operator=(const IEEEFloat &);
+  IEEEFloat &operator=(IEEEFloat &&);
+
+  /// Overload to compute a hash code for an APFloat value.
+  ///
+  /// Note that the use of hash codes for floating point values is in general
+  /// frought with peril. Equality is hard to define for these values. For
+  /// example, should negative and positive zero hash to different codes? Are
+  /// they equal or not? This hash value implementation specifically
+  /// emphasizes producing different codes for different inputs in order to
+  /// be used in canonicalization and memoization. As such, equality is
+  /// bitwiseIsEqual, and 0 != -0.
+  friend hash_code hash_value(const IEEEFloat &Arg);
+
+  /// Converts this value into a decimal string.
+  ///
+  /// \param FormatPrecision The maximum number of digits of
+  ///   precision to output.  If there are fewer digits available,
+  ///   zero padding will not be used unless the value is
+  ///   integral and small enough to be expressed in
+  ///   FormatPrecision digits.  0 means to use the natural
+  ///   precision of the number.
+  /// \param FormatMaxPadding The maximum number of zeros to
+  ///   consider inserting before falling back to scientific
+  ///   notation.  0 means to always use scientific notation.
+  ///
+  /// \param TruncateZero Indicate whether to remove the trailing zero in
+  ///   fraction part or not. Also setting this parameter to false forcing
+  ///   producing of output more similar to default printf behavior.
+  ///   Specifically the lower e is used as exponent delimiter and exponent
+  ///   always contains no less than two digits.
+  ///
+  /// Number       Precision    MaxPadding      Result
+  /// ------       ---------    ----------      ------
+  /// 1.01E+4              5             2       10100
+  /// 1.01E+4              4             2       1.01E+4
+  /// 1.01E+4              5             1       1.01E+4
+  /// 1.01E-2              5             2       0.0101
+  /// 1.01E-2              4             2       0.0101
+  /// 1.01E-2              4             1       1.01E-2
+  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+                unsigned FormatMaxPadding = 3, bool TruncateZero = true) const;
+
+  /// If this value has an exact multiplicative inverse, store it in inv and
+  /// return true.
+  bool getExactInverse(APFloat *inv) const;
+
+  /// Returns the exponent of the internal representation of the APFloat.
+  ///
+  /// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)).
+  /// For special APFloat values, this returns special error codes:
+  ///
+  ///   NaN -> \c IEK_NaN
+  ///   0   -> \c IEK_Zero
+  ///   Inf -> \c IEK_Inf
+  ///
+  friend int ilogb(const IEEEFloat &Arg);
+
+  /// Returns: X * 2^Exp for integral exponents.
+  friend IEEEFloat scalbn(IEEEFloat X, int Exp, roundingMode);
+
+  friend IEEEFloat frexp(const IEEEFloat &X, int &Exp, roundingMode);
+
+  /// \name Special value setters.
+  /// @{
+
+  void makeLargest(bool Neg = false);
+  void makeSmallest(bool Neg = false);
+  void makeNaN(bool SNaN = false, bool Neg = false,
+               const APInt *fill = nullptr);
+  void makeInf(bool Neg = false);
+  void makeZero(bool Neg = false);
+  void makeQuiet();
+
+  /// Returns the smallest (by magnitude) normalized finite number in the given
+  /// semantics.
+  ///
+  /// \param Negative - True iff the number should be negative
+  void makeSmallestNormalized(bool Negative = false);
+
+  /// @}
+
+  cmpResult compareAbsoluteValue(const IEEEFloat &) const;
+
+private:
+  /// \name Simple Queries
+  /// @{
+
+  integerPart *significandParts();
+  const integerPart *significandParts() const;
+  unsigned int partCount() const;
+
+  /// @}
+
+  /// \name Significand operations.
+  /// @{
+
+  integerPart addSignificand(const IEEEFloat &);
+  integerPart subtractSignificand(const IEEEFloat &, integerPart);
+  lostFraction addOrSubtractSignificand(const IEEEFloat &, bool subtract);
+  lostFraction multiplySignificand(const IEEEFloat &, const IEEEFloat *);
+  lostFraction divideSignificand(const IEEEFloat &);
+  void incrementSignificand();
+  void initialize(const fltSemantics *);
+  void shiftSignificandLeft(unsigned int);
+  lostFraction shiftSignificandRight(unsigned int);
+  unsigned int significandLSB() const;
+  unsigned int significandMSB() const;
+  void zeroSignificand();
+  /// Return true if the significand excluding the integral bit is all ones.
+  bool isSignificandAllOnes() const;
+  /// Return true if the significand excluding the integral bit is all zeros.
+  bool isSignificandAllZeros() const;
+
+  /// @}
+
+  /// \name Arithmetic on special values.
+  /// @{
+
+  opStatus addOrSubtractSpecials(const IEEEFloat &, bool subtract);
+  opStatus divideSpecials(const IEEEFloat &);
+  opStatus multiplySpecials(const IEEEFloat &);
+  opStatus modSpecials(const IEEEFloat &);
+
+  /// @}
+
+  /// \name Miscellany
+  /// @{
+
+  bool convertFromStringSpecials(StringRef str);
+  opStatus normalize(roundingMode, lostFraction);
+  opStatus addOrSubtract(const IEEEFloat &, roundingMode, bool subtract);
+  opStatus handleOverflow(roundingMode);
+  bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
+  opStatus convertToSignExtendedInteger(MutableArrayRef<integerPart>,
+                                        unsigned int, bool, roundingMode,
+                                        bool *) const;
+  opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
+                                    roundingMode);
+  opStatus convertFromHexadecimalString(StringRef, roundingMode);
+  opStatus convertFromDecimalString(StringRef, roundingMode);
+  char *convertNormalToHexString(char *, unsigned int, bool,
+                                 roundingMode) const;
+  opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
+                                        roundingMode);
+
+  /// @}
+
+  APInt convertHalfAPFloatToAPInt() const;
+  APInt convertFloatAPFloatToAPInt() const;
+  APInt convertDoubleAPFloatToAPInt() const;
+  APInt convertQuadrupleAPFloatToAPInt() const;
+  APInt convertF80LongDoubleAPFloatToAPInt() const;
+  APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
+  void initFromAPInt(const fltSemantics *Sem, const APInt &api);
+  void initFromHalfAPInt(const APInt &api);
+  void initFromFloatAPInt(const APInt &api);
+  void initFromDoubleAPInt(const APInt &api);
+  void initFromQuadrupleAPInt(const APInt &api);
+  void initFromF80LongDoubleAPInt(const APInt &api);
+  void initFromPPCDoubleDoubleAPInt(const APInt &api);
+
+  void assign(const IEEEFloat &);
+  void copySignificand(const IEEEFloat &);
+  void freeSignificand();
+
+  /// Note: this must be the first data member.
+  /// The semantics that this value obeys.
+  const fltSemantics *semantics;
+
+  /// A binary fraction with an explicit integer bit.
+  ///
+  /// The significand must be at least one bit wider than the target precision.
+  union Significand {
+    integerPart part;
+    integerPart *parts;
+  } significand;
+
+  /// The signed unbiased exponent of the value.
+  ExponentType exponent;
+
+  /// What kind of floating point number this is.
+  ///
+  /// Only 2 bits are required, but VisualStudio incorrectly sign extends it.
+  /// Using the extra bit keeps it from failing under VisualStudio.
+  fltCategory category : 3;
+
+  /// Sign bit of the number.
+  unsigned int sign : 1;
+};
+
+hash_code hash_value(const IEEEFloat &Arg);
+int ilogb(const IEEEFloat &Arg);
+IEEEFloat scalbn(IEEEFloat X, int Exp, IEEEFloat::roundingMode);
+IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM);
+
+// This mode implements more precise float in terms of two APFloats.
+// The interface and layout is designed for arbitray underlying semantics,
+// though currently only PPCDoubleDouble semantics are supported, whose
+// corresponding underlying semantics are IEEEdouble.
+class DoubleAPFloat final : public APFloatBase {
+  // Note: this must be the first data member.
+  const fltSemantics *Semantics;
+  std::unique_ptr<APFloat[]> Floats;
+
+  opStatus addImpl(const APFloat &a, const APFloat &aa, const APFloat &c,
+                   const APFloat &cc, roundingMode RM);
+
+  opStatus addWithSpecial(const DoubleAPFloat &LHS, const DoubleAPFloat &RHS,
+                          DoubleAPFloat &Out, roundingMode RM);
+
+public:
+  DoubleAPFloat(const fltSemantics &S);
+  DoubleAPFloat(const fltSemantics &S, uninitializedTag);
+  DoubleAPFloat(const fltSemantics &S, integerPart);
+  DoubleAPFloat(const fltSemantics &S, const APInt &I);
+  DoubleAPFloat(const fltSemantics &S, APFloat &&First, APFloat &&Second);
+  DoubleAPFloat(const DoubleAPFloat &RHS);
+  DoubleAPFloat(DoubleAPFloat &&RHS);
+
+  DoubleAPFloat &operator=(const DoubleAPFloat &RHS);
+
+  DoubleAPFloat &operator=(DoubleAPFloat &&RHS) {
+    if (this != &RHS) {
+      this->~DoubleAPFloat();
+      new (this) DoubleAPFloat(std::move(RHS));
+    }
+    return *this;
+  }
+
+  bool needsCleanup() const { return Floats != nullptr; }
+
+  APFloat &getFirst() { return Floats[0]; }
+  const APFloat &getFirst() const { return Floats[0]; }
+  APFloat &getSecond() { return Floats[1]; }
+  const APFloat &getSecond() const { return Floats[1]; }
+
+  opStatus add(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus subtract(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus multiply(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus divide(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus remainder(const DoubleAPFloat &RHS);
+  opStatus mod(const DoubleAPFloat &RHS);
+  opStatus fusedMultiplyAdd(const DoubleAPFloat &Multiplicand,
+                            const DoubleAPFloat &Addend, roundingMode RM);
+  opStatus roundToIntegral(roundingMode RM);
+  void changeSign();
+  cmpResult compareAbsoluteValue(const DoubleAPFloat &RHS) const;
+
+  fltCategory getCategory() const;
+  bool isNegative() const;
+
+  void makeInf(bool Neg);
+  void makeZero(bool Neg);
+  void makeLargest(bool Neg);
+  void makeSmallest(bool Neg);
+  void makeSmallestNormalized(bool Neg);
+  void makeNaN(bool SNaN, bool Neg, const APInt *fill);
+
+  cmpResult compare(const DoubleAPFloat &RHS) const;
+  bool bitwiseIsEqual(const DoubleAPFloat &RHS) const;
+  APInt bitcastToAPInt() const;
+  opStatus convertFromString(StringRef, roundingMode);
+  opStatus next(bool nextDown);
+
+  opStatus convertToInteger(MutableArrayRef<integerPart> Input,
+                            unsigned int Width, bool IsSigned, roundingMode RM,
+                            bool *IsExact) const;
+  opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM);
+  opStatus convertFromSignExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM);
+  opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM);
+  unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+                                  bool UpperCase, roundingMode RM) const;
+
+  bool isDenormal() const;
+  bool isSmallest() const;
+  bool isLargest() const;
+  bool isInteger() const;
+
+  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
+                unsigned FormatMaxPadding, bool TruncateZero = true) const;
+
+  bool getExactInverse(APFloat *inv) const;
+
+  friend int ilogb(const DoubleAPFloat &Arg);
+  friend DoubleAPFloat scalbn(DoubleAPFloat X, int Exp, roundingMode);
+  friend DoubleAPFloat frexp(const DoubleAPFloat &X, int &Exp, roundingMode);
+  friend hash_code hash_value(const DoubleAPFloat &Arg);
+};
+
+hash_code hash_value(const DoubleAPFloat &Arg);
+
+} // End detail namespace
+
+// This is a interface class that is currently forwarding functionalities from
+// detail::IEEEFloat.
+class APFloat : public APFloatBase {
+  typedef detail::IEEEFloat IEEEFloat;
+  typedef detail::DoubleAPFloat DoubleAPFloat;
+
+  static_assert(std::is_standard_layout<IEEEFloat>::value, "");
+
+  union Storage {
+    const fltSemantics *semantics;
+    IEEEFloat IEEE;
+    DoubleAPFloat Double;
+
+    explicit Storage(IEEEFloat F, const fltSemantics &S);
+    explicit Storage(DoubleAPFloat F, const fltSemantics &S)
+        : Double(std::move(F)) {
+      assert(&S == &PPCDoubleDouble());
+    }
+
+    template <typename... ArgTypes>
+    Storage(const fltSemantics &Semantics, ArgTypes &&... Args) {
+      if (usesLayout<IEEEFloat>(Semantics)) {
+        new (&IEEE) IEEEFloat(Semantics, std::forward<ArgTypes>(Args)...);
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(Semantics)) {
+        new (&Double) DoubleAPFloat(Semantics, std::forward<ArgTypes>(Args)...);
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    ~Storage() {
+      if (usesLayout<IEEEFloat>(*semantics)) {
+        IEEE.~IEEEFloat();
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(*semantics)) {
+        Double.~DoubleAPFloat();
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    Storage(const Storage &RHS) {
+      if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+        new (this) IEEEFloat(RHS.IEEE);
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        new (this) DoubleAPFloat(RHS.Double);
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    Storage(Storage &&RHS) {
+      if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+        new (this) IEEEFloat(std::move(RHS.IEEE));
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        new (this) DoubleAPFloat(std::move(RHS.Double));
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    Storage &operator=(const Storage &RHS) {
+      if (usesLayout<IEEEFloat>(*semantics) &&
+          usesLayout<IEEEFloat>(*RHS.semantics)) {
+        IEEE = RHS.IEEE;
+      } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+                 usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        Double = RHS.Double;
+      } else if (this != &RHS) {
+        this->~Storage();
+        new (this) Storage(RHS);
+      }
+      return *this;
+    }
+
+    Storage &operator=(Storage &&RHS) {
+      if (usesLayout<IEEEFloat>(*semantics) &&
+          usesLayout<IEEEFloat>(*RHS.semantics)) {
+        IEEE = std::move(RHS.IEEE);
+      } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+                 usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        Double = std::move(RHS.Double);
+      } else if (this != &RHS) {
+        this->~Storage();
+        new (this) Storage(std::move(RHS));
+      }
+      return *this;
+    }
+  } U;
+
+  template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
+    static_assert(std::is_same<T, IEEEFloat>::value ||
+                  std::is_same<T, DoubleAPFloat>::value, "");
+    if (std::is_same<T, DoubleAPFloat>::value) {
+      return &Semantics == &PPCDoubleDouble();
+    }
+    return &Semantics != &PPCDoubleDouble();
+  }
+
+  IEEEFloat &getIEEE() {
+    if (usesLayout<IEEEFloat>(*U.semantics))
+      return U.IEEE;
+    if (usesLayout<DoubleAPFloat>(*U.semantics))
+      return U.Double.getFirst().U.IEEE;
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  const IEEEFloat &getIEEE() const {
+    if (usesLayout<IEEEFloat>(*U.semantics))
+      return U.IEEE;
+    if (usesLayout<DoubleAPFloat>(*U.semantics))
+      return U.Double.getFirst().U.IEEE;
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  void makeZero(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeZero(Neg)); }
+
+  void makeInf(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeInf(Neg)); }
+
+  void makeNaN(bool SNaN, bool Neg, const APInt *fill) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeNaN(SNaN, Neg, fill));
+  }
+
+  void makeLargest(bool Neg) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeLargest(Neg));
+  }
+
+  void makeSmallest(bool Neg) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallest(Neg));
+  }
+
+  void makeSmallestNormalized(bool Neg) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallestNormalized(Neg));
+  }
+
+  // FIXME: This is due to clang 3.3 (or older version) always checks for the
+  // default constructor in an array aggregate initialization, even if no
+  // elements in the array is default initialized.
+  APFloat() : U(IEEEdouble()) {
+    llvm_unreachable("This is a workaround for old clang.");
+  }
+
+  explicit APFloat(IEEEFloat F, const fltSemantics &S) : U(std::move(F), S) {}
+  explicit APFloat(DoubleAPFloat F, const fltSemantics &S)
+      : U(std::move(F), S) {}
+
+  cmpResult compareAbsoluteValue(const APFloat &RHS) const {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only compare APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.compareAbsoluteValue(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.compareAbsoluteValue(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+
+public:
+  APFloat(const fltSemantics &Semantics) : U(Semantics) {}
+  APFloat(const fltSemantics &Semantics, StringRef S);
+  APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {}
+  // TODO: Remove this constructor. This isn't faster than the first one.
+  APFloat(const fltSemantics &Semantics, uninitializedTag)
+      : U(Semantics, uninitialized) {}
+  APFloat(const fltSemantics &Semantics, const APInt &I) : U(Semantics, I) {}
+  explicit APFloat(double d) : U(IEEEFloat(d), IEEEdouble()) {}
+  explicit APFloat(float f) : U(IEEEFloat(f), IEEEsingle()) {}
+  APFloat(const APFloat &RHS) = default;
+  APFloat(APFloat &&RHS) = default;
+
+  ~APFloat() = default;
+
+  bool needsCleanup() const { APFLOAT_DISPATCH_ON_SEMANTICS(needsCleanup()); }
+
+  /// Factory for Positive and Negative Zero.
+  ///
+  /// \param Negative True iff the number should be negative.
+  static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeZero(Negative);
+    return Val;
+  }
+
+  /// Factory for Positive and Negative Infinity.
+  ///
+  /// \param Negative True iff the number should be negative.
+  static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeInf(Negative);
+    return Val;
+  }
+
+  /// Factory for NaN values.
+  ///
+  /// \param Negative - True iff the NaN generated should be negative.
+  /// \param type - The unspecified fill bits for creating the NaN, 0 by
+  /// default.  The value is truncated as necessary.
+  static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
+                        unsigned type = 0) {
+    if (type) {
+      APInt fill(64, type);
+      return getQNaN(Sem, Negative, &fill);
+    } else {
+      return getQNaN(Sem, Negative, nullptr);
+    }
+  }
+
+  /// Factory for QNaN values.
+  static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
+                         const APInt *payload = nullptr) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeNaN(false, Negative, payload);
+    return Val;
+  }
+
+  /// Factory for SNaN values.
+  static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
+                         const APInt *payload = nullptr) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeNaN(true, Negative, payload);
+    return Val;
+  }
+
+  /// Returns the largest finite number in the given semantics.
+  ///
+  /// \param Negative - True iff the number should be negative
+  static APFloat getLargest(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeLargest(Negative);
+    return Val;
+  }
+
+  /// Returns the smallest (by magnitude) finite number in the given semantics.
+  /// Might be denormalized, which implies a relative loss of precision.
+  ///
+  /// \param Negative - True iff the number should be negative
+  static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeSmallest(Negative);
+    return Val;
+  }
+
+  /// Returns the smallest (by magnitude) normalized finite number in the given
+  /// semantics.
+  ///
+  /// \param Negative - True iff the number should be negative
+  static APFloat getSmallestNormalized(const fltSemantics &Sem,
+                                       bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeSmallestNormalized(Negative);
+    return Val;
+  }
+
+  /// Returns a float which is bitcasted from an all one value int.
+  ///
+  /// \param BitWidth - Select float type
+  /// \param isIEEE   - If 128 bit number, select between PPC and IEEE
+  static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+
+  /// Used to insert APFloat objects, or objects that contain APFloat objects,
+  /// into FoldingSets.
+  void Profile(FoldingSetNodeID &NID) const;
+
+  opStatus add(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.add(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.add(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus subtract(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.subtract(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.subtract(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus multiply(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.multiply(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.multiply(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus divide(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.divide(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.divide(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus remainder(const APFloat &RHS) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.remainder(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.remainder(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus mod(const APFloat &RHS) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.mod(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.mod(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend,
+                            roundingMode RM) {
+    assert(&getSemantics() == &Multiplicand.getSemantics() &&
+           "Should only call on APFloats with the same semantics");
+    assert(&getSemantics() == &Addend.getSemantics() &&
+           "Should only call on APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.fusedMultiplyAdd(Multiplicand.U.IEEE, Addend.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.fusedMultiplyAdd(Multiplicand.U.Double, Addend.U.Double,
+                                       RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus roundToIntegral(roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(roundToIntegral(RM));
+  }
+
+  // TODO: bool parameters are not readable and a source of bugs.
+  // Do something.
+  opStatus next(bool nextDown) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(next(nextDown));
+  }
+
+  /// Add two APFloats, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator+(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.add(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  /// Subtract two APFloats, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator-(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.subtract(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  /// Multiply two APFloats, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator*(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.multiply(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  /// Divide the first APFloat by the second, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator/(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.divide(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  void changeSign() { APFLOAT_DISPATCH_ON_SEMANTICS(changeSign()); }
+  void clearSign() {
+    if (isNegative())
+      changeSign();
+  }
+  void copySign(const APFloat &RHS) {
+    if (isNegative() != RHS.isNegative())
+      changeSign();
+  }
+
+  /// A static helper to produce a copy of an APFloat value with its sign
+  /// copied from some other APFloat.
+  static APFloat copySign(APFloat Value, const APFloat &Sign) {
+    Value.copySign(Sign);
+    return Value;
+  }
+
+  opStatus convert(const fltSemantics &ToSemantics, roundingMode RM,
+                   bool *losesInfo);
+  opStatus convertToInteger(MutableArrayRef<integerPart> Input,
+                            unsigned int Width, bool IsSigned, roundingMode RM,
+                            bool *IsExact) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertToInteger(Input, Width, IsSigned, RM, IsExact));
+  }
+  opStatus convertToInteger(APSInt &Result, roundingMode RM,
+                            bool *IsExact) const;
+  opStatus convertFromAPInt(const APInt &Input, bool IsSigned,
+                            roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(convertFromAPInt(Input, IsSigned, RM));
+  }
+  opStatus convertFromSignExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertFromSignExtendedInteger(Input, InputSize, IsSigned, RM));
+  }
+  opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertFromZeroExtendedInteger(Input, InputSize, IsSigned, RM));
+  }
+  opStatus convertFromString(StringRef, roundingMode);
+  APInt bitcastToAPInt() const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(bitcastToAPInt());
+  }
+  double convertToDouble() const { return getIEEE().convertToDouble(); }
+  float convertToFloat() const { return getIEEE().convertToFloat(); }
+
+  bool operator==(const APFloat &) const = delete;
+
+  cmpResult compare(const APFloat &RHS) const {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only compare APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.compare(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.compare(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  bool bitwiseIsEqual(const APFloat &RHS) const {
+    if (&getSemantics() != &RHS.getSemantics())
+      return false;
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.bitwiseIsEqual(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.bitwiseIsEqual(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  /// We don't rely on operator== working on double values, as
+  /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+  /// As such, this method can be used to do an exact bit-for-bit comparison of
+  /// two floating point values.
+  ///
+  /// We leave the version with the double argument here because it's just so
+  /// convenient to write "2.0" and the like.  Without this function we'd
+  /// have to duplicate its logic everywhere it's called.
+  bool isExactlyValue(double V) const {
+    bool ignored;
+    APFloat Tmp(V);
+    Tmp.convert(getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
+    return bitwiseIsEqual(Tmp);
+  }
+
+  unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+                                  bool UpperCase, roundingMode RM) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertToHexString(DST, HexDigits, UpperCase, RM));
+  }
+
+  bool isZero() const { return getCategory() == fcZero; }
+  bool isInfinity() const { return getCategory() == fcInfinity; }
+  bool isNaN() const { return getCategory() == fcNaN; }
+
+  bool isNegative() const { return getIEEE().isNegative(); }
+  bool isDenormal() const { APFLOAT_DISPATCH_ON_SEMANTICS(isDenormal()); }
+  bool isSignaling() const { return getIEEE().isSignaling(); }
+
+  bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+  bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+  fltCategory getCategory() const { return getIEEE().getCategory(); }
+  const fltSemantics &getSemantics() const { return *U.semantics; }
+  bool isNonZero() const { return !isZero(); }
+  bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+  bool isPosZero() const { return isZero() && !isNegative(); }
+  bool isNegZero() const { return isZero() && isNegative(); }
+  bool isSmallest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isSmallest()); }
+  bool isLargest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isLargest()); }
+  bool isInteger() const { APFLOAT_DISPATCH_ON_SEMANTICS(isInteger()); }
+
+  APFloat &operator=(const APFloat &RHS) = default;
+  APFloat &operator=(APFloat &&RHS) = default;
+
+  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+                unsigned FormatMaxPadding = 3, bool TruncateZero = true) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero));
+  }
+
+  void print(raw_ostream &) const;
+  void dump() const;
+
+  bool getExactInverse(APFloat *inv) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(getExactInverse(inv));
+  }
+
+  friend hash_code hash_value(const APFloat &Arg);
+  friend int ilogb(const APFloat &Arg) { return ilogb(Arg.getIEEE()); }
+  friend APFloat scalbn(APFloat X, int Exp, roundingMode RM);
+  friend APFloat frexp(const APFloat &X, int &Exp, roundingMode RM);
+  friend IEEEFloat;
+  friend DoubleAPFloat;
+};
+
+/// See friend declarations above.
+///
+/// These additional declarations are required in order to compile LLVM with IBM
+/// xlC compiler.
+hash_code hash_value(const APFloat &Arg);
+inline APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM) {
+  if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
+    return APFloat(scalbn(X.U.IEEE, Exp, RM), X.getSemantics());
+  if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
+    return APFloat(scalbn(X.U.Double, Exp, RM), X.getSemantics());
+  llvm_unreachable("Unexpected semantics");
+}
+
+/// Equivalent of C standard library function.
+///
+/// While the C standard says Exp is an unspecified value for infinity and nan,
+/// this returns INT_MAX for infinities, and INT_MIN for NaNs.
+inline APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM) {
+  if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
+    return APFloat(frexp(X.U.IEEE, Exp, RM), X.getSemantics());
+  if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
+    return APFloat(frexp(X.U.Double, Exp, RM), X.getSemantics());
+  llvm_unreachable("Unexpected semantics");
+}
+/// Returns the absolute value of the argument.
+inline APFloat abs(APFloat X) {
+  X.clearSign();
+  return X;
+}
+
+/// \brief Returns the negated value of the argument.
+inline APFloat neg(APFloat X) {
+  X.changeSign();
+  return X;
+}
+
+/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat minnum(const APFloat &A, const APFloat &B) {
+  if (A.isNaN())
+    return B;
+  if (B.isNaN())
+    return A;
+  return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
+}
+
+/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat maxnum(const APFloat &A, const APFloat &B) {
+  if (A.isNaN())
+    return B;
+  if (B.isNaN())
+    return A;
+  return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
+}
+
+} // namespace llvm
+
+#undef APFLOAT_DISPATCH_ON_SEMANTICS
+#endif // LLVM_ADT_APFLOAT_H
diff --git a/linux-x64/clang/include/llvm/ADT/APInt.h b/linux-x64/clang/include/llvm/ADT/APInt.h
new file mode 100644
index 0000000..118c62e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APInt.h
@@ -0,0 +1,2156 @@
+//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a class to represent arbitrary precision
+/// integral constant values and operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APINT_H
+#define LLVM_ADT_APINT_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+class FoldingSetNodeID;
+class StringRef;
+class hash_code;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+template <typename T> class ArrayRef;
+
+class APInt;
+
+inline APInt operator-(APInt);
+
+//===----------------------------------------------------------------------===//
+//                              APInt Class
+//===----------------------------------------------------------------------===//
+
+/// \brief Class for arbitrary precision integers.
+///
+/// APInt is a functional replacement for common case unsigned integer type like
+/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
+/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
+/// than 64-bits of precision. APInt provides a variety of arithmetic operators
+/// and methods to manipulate integer values of any bit-width. It supports both
+/// the typical integer arithmetic and comparison operations as well as bitwise
+/// manipulation.
+///
+/// The class has several invariants worth noting:
+///   * All bit, byte, and word positions are zero-based.
+///   * Once the bit width is set, it doesn't change except by the Truncate,
+///     SignExtend, or ZeroExtend operations.
+///   * All binary operators must be on APInt instances of the same bit width.
+///     Attempting to use these operators on instances with different bit
+///     widths will yield an assertion.
+///   * The value is stored canonically as an unsigned value. For operations
+///     where it makes a difference, there are both signed and unsigned variants
+///     of the operation. For example, sdiv and udiv. However, because the bit
+///     widths must be the same, operations such as Mul and Add produce the same
+///     results regardless of whether the values are interpreted as signed or
+///     not.
+///   * In general, the class tries to follow the style of computation that LLVM
+///     uses in its IR. This simplifies its use for LLVM.
+///
+class LLVM_NODISCARD APInt {
+public:
+  typedef uint64_t WordType;
+
+  /// This enum is used to hold the constants we needed for APInt.
+  enum : unsigned {
+    /// Byte size of a word.
+    APINT_WORD_SIZE = sizeof(WordType),
+    /// Bits in a word.
+    APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT
+  };
+
+  static const WordType WORD_MAX = ~WordType(0);
+
+private:
+  /// This union is used to store the integer value. When the
+  /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
+  union {
+    uint64_t VAL;   ///< Used to store the <= 64 bits integer value.
+    uint64_t *pVal; ///< Used to store the >64 bits integer value.
+  } U;
+
+  unsigned BitWidth; ///< The number of bits in this APInt.
+
+  friend struct DenseMapAPIntKeyInfo;
+
+  friend class APSInt;
+
+  /// \brief Fast internal constructor
+  ///
+  /// This constructor is used only internally for speed of construction of
+  /// temporaries. It is unsafe for general use so it is not public.
+  APInt(uint64_t *val, unsigned bits) : BitWidth(bits) {
+    U.pVal = val;
+  }
+
+  /// \brief Determine if this APInt just has one word to store value.
+  ///
+  /// \returns true if the number of bits <= 64, false otherwise.
+  bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }
+
+  /// \brief Determine which word a bit is in.
+  ///
+  /// \returns the word position for the specified bit position.
+  static unsigned whichWord(unsigned bitPosition) {
+    return bitPosition / APINT_BITS_PER_WORD;
+  }
+
+  /// \brief Determine which bit in a word a bit is in.
+  ///
+  /// \returns the bit position in a word for the specified bit position
+  /// in the APInt.
+  static unsigned whichBit(unsigned bitPosition) {
+    return bitPosition % APINT_BITS_PER_WORD;
+  }
+
+  /// \brief Get a single bit mask.
+  ///
+  /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
+  /// This method generates and returns a uint64_t (word) mask for a single
+  /// bit at a specific bit position. This is used to mask the bit in the
+  /// corresponding word.
+  static uint64_t maskBit(unsigned bitPosition) {
+    return 1ULL << whichBit(bitPosition);
+  }
+
+  /// \brief Clear unused high order bits
+  ///
+  /// This method is used internally to clear the top "N" bits in the high order
+  /// word that are not used by the APInt. This is needed after the most
+  /// significant word is assigned a value to ensure that those bits are
+  /// zero'd out.
+  APInt &clearUnusedBits() {
+    // Compute how many bits are used in the final word
+    unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
+
+    // Mask out the high bits.
+    uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - WordBits);
+    if (isSingleWord())
+      U.VAL &= mask;
+    else
+      U.pVal[getNumWords() - 1] &= mask;
+    return *this;
+  }
+
+  /// \brief Get the word corresponding to a bit position
+  /// \returns the corresponding word for the specified bit position.
+  uint64_t getWord(unsigned bitPosition) const {
+    return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)];
+  }
+
+  /// Utility method to change the bit width of this APInt to new bit width,
+  /// allocating and/or deallocating as necessary. There is no guarantee on the
+  /// value of any bits upon return. Caller should populate the bits after.
+  void reallocate(unsigned NewBitWidth);
+
+  /// \brief Convert a char array into an APInt
+  ///
+  /// \param radix 2, 8, 10, 16, or 36
+  /// Converts a string into a number.  The string must be non-empty
+  /// and well-formed as a number of the given base. The bit-width
+  /// must be sufficient to hold the result.
+  ///
+  /// This is used by the constructors that take string arguments.
+  ///
+  /// StringRef::getAsInteger is superficially similar but (1) does
+  /// not assume that the string is well-formed and (2) grows the
+  /// result to hold the input.
+  void fromString(unsigned numBits, StringRef str, uint8_t radix);
+
+  /// \brief An internal division function for dividing APInts.
+  ///
+  /// This is used by the toString method to divide by the radix. It simply
+  /// provides a more convenient form of divide for internal use since KnuthDiv
+  /// has specific constraints on its inputs. If those constraints are not met
+  /// then it provides a simpler form of divide.
+  static void divide(const WordType *LHS, unsigned lhsWords,
+                     const WordType *RHS, unsigned rhsWords, WordType *Quotient,
+                     WordType *Remainder);
+
+  /// out-of-line slow case for inline constructor
+  void initSlowCase(uint64_t val, bool isSigned);
+
+  /// shared code between two array constructors
+  void initFromArray(ArrayRef<uint64_t> array);
+
+  /// out-of-line slow case for inline copy constructor
+  void initSlowCase(const APInt &that);
+
+  /// out-of-line slow case for shl
+  void shlSlowCase(unsigned ShiftAmt);
+
+  /// out-of-line slow case for lshr.
+  void lshrSlowCase(unsigned ShiftAmt);
+
+  /// out-of-line slow case for ashr.
+  void ashrSlowCase(unsigned ShiftAmt);
+
+  /// out-of-line slow case for operator=
+  void AssignSlowCase(const APInt &RHS);
+
+  /// out-of-line slow case for operator==
+  bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+  /// out-of-line slow case for countLeadingZeros
+  unsigned countLeadingZerosSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countLeadingOnes.
+  unsigned countLeadingOnesSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countTrailingZeros.
+  unsigned countTrailingZerosSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countTrailingOnes
+  unsigned countTrailingOnesSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countPopulation
+  unsigned countPopulationSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for intersects.
+  bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+  /// out-of-line slow case for isSubsetOf.
+  bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+  /// out-of-line slow case for setBits.
+  void setBitsSlowCase(unsigned loBit, unsigned hiBit);
+
+  /// out-of-line slow case for flipAllBits.
+  void flipAllBitsSlowCase();
+
+  /// out-of-line slow case for operator&=.
+  void AndAssignSlowCase(const APInt& RHS);
+
+  /// out-of-line slow case for operator|=.
+  void OrAssignSlowCase(const APInt& RHS);
+
+  /// out-of-line slow case for operator^=.
+  void XorAssignSlowCase(const APInt& RHS);
+
+  /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
+  /// to, or greater than RHS.
+  int compare(const APInt &RHS) const LLVM_READONLY;
+
+  /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
+  /// to, or greater than RHS.
+  int compareSigned(const APInt &RHS) const LLVM_READONLY;
+
+public:
+  /// \name Constructors
+  /// @{
+
+  /// \brief Create a new APInt of numBits width, initialized as val.
+  ///
+  /// If isSigned is true then val is treated as if it were a signed value
+  /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
+  /// will be done. Otherwise, no sign extension occurs (high order bits beyond
+  /// the range of val are zero filled).
+  ///
+  /// \param numBits the bit width of the constructed APInt
+  /// \param val the initial value of the APInt
+  /// \param isSigned how to treat signedness of val
+  APInt(unsigned numBits, uint64_t val, bool isSigned = false)
+      : BitWidth(numBits) {
+    assert(BitWidth && "bitwidth too small");
+    if (isSingleWord()) {
+      U.VAL = val;
+      clearUnusedBits();
+    } else {
+      initSlowCase(val, isSigned);
+    }
+  }
+
+  /// \brief Construct an APInt of numBits width, initialized as bigVal[].
+  ///
+  /// Note that bigVal.size() can be smaller or larger than the corresponding
+  /// bit width but any extraneous bits will be dropped.
+  ///
+  /// \param numBits the bit width of the constructed APInt
+  /// \param bigVal a sequence of words to form the initial value of the APInt
+  APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
+
+  /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
+  /// deprecated because this constructor is prone to ambiguity with the
+  /// APInt(unsigned, uint64_t, bool) constructor.
+  ///
+  /// If this overload is ever deleted, care should be taken to prevent calls
+  /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
+  /// constructor.
+  APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
+
+  /// \brief Construct an APInt from a string representation.
+  ///
+  /// This constructor interprets the string \p str in the given radix. The
+  /// interpretation stops when the first character that is not suitable for the
+  /// radix is encountered, or the end of the string. Acceptable radix values
+  /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
+  /// string to require more bits than numBits.
+  ///
+  /// \param numBits the bit width of the constructed APInt
+  /// \param str the string to be interpreted
+  /// \param radix the radix to use for the conversion
+  APInt(unsigned numBits, StringRef str, uint8_t radix);
+
+  /// Simply makes *this a copy of that.
+  /// @brief Copy Constructor.
+  APInt(const APInt &that) : BitWidth(that.BitWidth) {
+    if (isSingleWord())
+      U.VAL = that.U.VAL;
+    else
+      initSlowCase(that);
+  }
+
+  /// \brief Move Constructor.
+  APInt(APInt &&that) : BitWidth(that.BitWidth) {
+    memcpy(&U, &that.U, sizeof(U));
+    that.BitWidth = 0;
+  }
+
+  /// \brief Destructor.
+  ~APInt() {
+    if (needsCleanup())
+      delete[] U.pVal;
+  }
+
+  /// \brief Default constructor that creates an uninteresting APInt
+  /// representing a 1-bit zero value.
+  ///
+  /// This is useful for object deserialization (pair this with the static
+  ///  method Read).
+  explicit APInt() : BitWidth(1) { U.VAL = 0; }
+
+  /// \brief Returns whether this instance allocated memory.
+  bool needsCleanup() const { return !isSingleWord(); }
+
+  /// Used to insert APInt objects, or objects that contain APInt objects, into
+  ///  FoldingSets.
+  void Profile(FoldingSetNodeID &id) const;
+
+  /// @}
+  /// \name Value Tests
+  /// @{
+
+  /// \brief Determine sign of this APInt.
+  ///
+  /// This tests the high bit of this APInt to determine if it is set.
+  ///
+  /// \returns true if this APInt is negative, false otherwise
+  bool isNegative() const { return (*this)[BitWidth - 1]; }
+
+  /// \brief Determine if this APInt Value is non-negative (>= 0)
+  ///
+  /// This tests the high bit of the APInt to determine if it is unset.
+  bool isNonNegative() const { return !isNegative(); }
+
+  /// \brief Determine if sign bit of this APInt is set.
+  ///
+  /// This tests the high bit of this APInt to determine if it is set.
+  ///
+  /// \returns true if this APInt has its sign bit set, false otherwise.
+  bool isSignBitSet() const { return (*this)[BitWidth-1]; }
+
+  /// \brief Determine if sign bit of this APInt is clear.
+  ///
+  /// This tests the high bit of this APInt to determine if it is clear.
+  ///
+  /// \returns true if this APInt has its sign bit clear, false otherwise.
+  bool isSignBitClear() const { return !isSignBitSet(); }
+
+  /// \brief Determine if this APInt Value is positive.
+  ///
+  /// This tests if the value of this APInt is positive (> 0). Note
+  /// that 0 is not a positive value.
+  ///
+  /// \returns true if this APInt is positive.
+  bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
+
+  /// \brief Determine if all bits are set
+  ///
+  /// This checks to see if the value has all bits of the APInt are set or not.
+  bool isAllOnesValue() const {
+    if (isSingleWord())
+      return U.VAL == WORD_MAX >> (APINT_BITS_PER_WORD - BitWidth);
+    return countTrailingOnesSlowCase() == BitWidth;
+  }
+
+  /// \brief Determine if all bits are clear
+  ///
+  /// This checks to see if the value has all bits of the APInt are clear or
+  /// not.
+  bool isNullValue() const { return !*this; }
+
+  /// \brief Determine if this is a value of 1.
+  ///
+  /// This checks to see if the value of this APInt is one.
+  bool isOneValue() const {
+    if (isSingleWord())
+      return U.VAL == 1;
+    return countLeadingZerosSlowCase() == BitWidth - 1;
+  }
+
+  /// \brief Determine if this is the largest unsigned value.
+  ///
+  /// This checks to see if the value of this APInt is the maximum unsigned
+  /// value for the APInt's bit width.
+  bool isMaxValue() const { return isAllOnesValue(); }
+
+  /// \brief Determine if this is the largest signed value.
+  ///
+  /// This checks to see if the value of this APInt is the maximum signed
+  /// value for the APInt's bit width.
+  bool isMaxSignedValue() const {
+    if (isSingleWord())
+      return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
+    return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
+  }
+
+  /// \brief Determine if this is the smallest unsigned value.
+  ///
+  /// This checks to see if the value of this APInt is the minimum unsigned
+  /// value for the APInt's bit width.
+  bool isMinValue() const { return isNullValue(); }
+
+  /// \brief Determine if this is the smallest signed value.
+  ///
+  /// This checks to see if the value of this APInt is the minimum signed
+  /// value for the APInt's bit width.
+  bool isMinSignedValue() const {
+    if (isSingleWord())
+      return U.VAL == (WordType(1) << (BitWidth - 1));
+    return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
+  }
+
+  /// \brief Check if this APInt has an N-bits unsigned integer value.
+  bool isIntN(unsigned N) const {
+    assert(N && "N == 0 ???");
+    return getActiveBits() <= N;
+  }
+
+  /// \brief Check if this APInt has an N-bits signed integer value.
+  bool isSignedIntN(unsigned N) const {
+    assert(N && "N == 0 ???");
+    return getMinSignedBits() <= N;
+  }
+
+  /// \brief Check if this APInt's value is a power of two greater than zero.
+  ///
+  /// \returns true if the argument APInt value is a power of two > 0.
+  bool isPowerOf2() const {
+    if (isSingleWord())
+      return isPowerOf2_64(U.VAL);
+    return countPopulationSlowCase() == 1;
+  }
+
+  /// \brief Check if the APInt's value is returned by getSignMask.
+  ///
+  /// \returns true if this is the value returned by getSignMask.
+  bool isSignMask() const { return isMinSignedValue(); }
+
+  /// \brief Convert APInt to a boolean value.
+  ///
+  /// This converts the APInt to a boolean value as a test against zero.
+  bool getBoolValue() const { return !!*this; }
+
+  /// If this value is smaller than the specified limit, return it, otherwise
+  /// return the limit value.  This causes the value to saturate to the limit.
+  uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const {
+    return ugt(Limit) ? Limit : getZExtValue();
+  }
+
+  /// \brief Check if the APInt consists of a repeated bit pattern.
+  ///
+  /// e.g. 0x01010101 satisfies isSplat(8).
+  /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
+  /// width without remainder.
+  bool isSplat(unsigned SplatSizeInBits) const;
+
+  /// \returns true if this APInt value is a sequence of \param numBits ones
+  /// starting at the least significant bit with the remainder zero.
+  bool isMask(unsigned numBits) const {
+    assert(numBits != 0 && "numBits must be non-zero");
+    assert(numBits <= BitWidth && "numBits out of range");
+    if (isSingleWord())
+      return U.VAL == (WORD_MAX >> (APINT_BITS_PER_WORD - numBits));
+    unsigned Ones = countTrailingOnesSlowCase();
+    return (numBits == Ones) &&
+           ((Ones + countLeadingZerosSlowCase()) == BitWidth);
+  }
+
+  /// \returns true if this APInt is a non-empty sequence of ones starting at
+  /// the least significant bit with the remainder zero.
+  /// Ex. isMask(0x0000FFFFU) == true.
+  bool isMask() const {
+    if (isSingleWord())
+      return isMask_64(U.VAL);
+    unsigned Ones = countTrailingOnesSlowCase();
+    return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
+  }
+
+  /// \brief Return true if this APInt value contains a sequence of ones with
+  /// the remainder zero.
+  bool isShiftedMask() const {
+    if (isSingleWord())
+      return isShiftedMask_64(U.VAL);
+    unsigned Ones = countPopulationSlowCase();
+    unsigned LeadZ = countLeadingZerosSlowCase();
+    return (Ones + LeadZ + countTrailingZeros()) == BitWidth;
+  }
+
+  /// @}
+  /// \name Value Generators
+  /// @{
+
+  /// \brief Gets maximum unsigned value of APInt for specific bit width.
+  static APInt getMaxValue(unsigned numBits) {
+    return getAllOnesValue(numBits);
+  }
+
+  /// \brief Gets maximum signed value of APInt for a specific bit width.
+  static APInt getSignedMaxValue(unsigned numBits) {
+    APInt API = getAllOnesValue(numBits);
+    API.clearBit(numBits - 1);
+    return API;
+  }
+
+  /// \brief Gets minimum unsigned value of APInt for a specific bit width.
+  static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }
+
+  /// \brief Gets minimum signed value of APInt for a specific bit width.
+  static APInt getSignedMinValue(unsigned numBits) {
+    APInt API(numBits, 0);
+    API.setBit(numBits - 1);
+    return API;
+  }
+
+  /// \brief Get the SignMask for a specific bit width.
+  ///
+  /// This is just a wrapper function of getSignedMinValue(), and it helps code
+  /// readability when we want to get a SignMask.
+  static APInt getSignMask(unsigned BitWidth) {
+    return getSignedMinValue(BitWidth);
+  }
+
+  /// \brief Get the all-ones value.
+  ///
+  /// \returns the all-ones value for an APInt of the specified bit-width.
+  static APInt getAllOnesValue(unsigned numBits) {
+    return APInt(numBits, WORD_MAX, true);
+  }
+
+  /// \brief Get the '0' value.
+  ///
+  /// \returns the '0' value for an APInt of the specified bit-width.
+  static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); }
+
+  /// \brief Compute an APInt containing numBits highbits from this APInt.
+  ///
+  /// Get an APInt with the same BitWidth as this APInt, just zero mask
+  /// the low bits and right shift to the least significant bit.
+  ///
+  /// \returns the high "numBits" bits of this APInt.
+  APInt getHiBits(unsigned numBits) const;
+
+  /// \brief Compute an APInt containing numBits lowbits from this APInt.
+  ///
+  /// Get an APInt with the same BitWidth as this APInt, just zero mask
+  /// the high bits.
+  ///
+  /// \returns the low "numBits" bits of this APInt.
+  APInt getLoBits(unsigned numBits) const;
+
+  /// \brief Return an APInt with exactly one bit set in the result.
+  static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
+    APInt Res(numBits, 0);
+    Res.setBit(BitNo);
+    return Res;
+  }
+
+  /// \brief Get a value with a block of bits set.
+  ///
+  /// Constructs an APInt value that has a contiguous range of bits set. The
+  /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
+  /// bits will be zero. For example, with parameters(32, 0, 16) you would get
+  /// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For
+  /// example, with parameters (32, 28, 4), you would get 0xF000000F.
+  ///
+  /// \param numBits the intended bit width of the result
+  /// \param loBit the index of the lowest bit set.
+  /// \param hiBit the index of the highest bit set.
+  ///
+  /// \returns An APInt value with the requested bits set.
+  static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
+    APInt Res(numBits, 0);
+    Res.setBits(loBit, hiBit);
+    return Res;
+  }
+
+  /// \brief Get a value with upper bits starting at loBit set.
+  ///
+  /// Constructs an APInt value that has a contiguous range of bits set. The
+  /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other
+  /// bits will be zero. For example, with parameters(32, 12) you would get
+  /// 0xFFFFF000.
+  ///
+  /// \param numBits the intended bit width of the result
+  /// \param loBit the index of the lowest bit to set.
+  ///
+  /// \returns An APInt value with the requested bits set.
+  static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) {
+    APInt Res(numBits, 0);
+    Res.setBitsFrom(loBit);
+    return Res;
+  }
+
+  /// \brief Get a value with high bits set
+  ///
+  /// Constructs an APInt value that has the top hiBitsSet bits set.
+  ///
+  /// \param numBits the bitwidth of the result
+  /// \param hiBitsSet the number of high-order bits set in the result.
+  static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
+    APInt Res(numBits, 0);
+    Res.setHighBits(hiBitsSet);
+    return Res;
+  }
+
+  /// \brief Get a value with low bits set
+  ///
+  /// Constructs an APInt value that has the bottom loBitsSet bits set.
+  ///
+  /// \param numBits the bitwidth of the result
+  /// \param loBitsSet the number of low-order bits set in the result.
+  static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
+    APInt Res(numBits, 0);
+    Res.setLowBits(loBitsSet);
+    return Res;
+  }
+
+  /// \brief Return a value containing V broadcasted over NewLen bits.
+  static APInt getSplat(unsigned NewLen, const APInt &V);
+
+  /// \brief Determine if two APInts have the same value, after zero-extending
+  /// one of them (if needed!) to ensure that the bit-widths match.
+  static bool isSameValue(const APInt &I1, const APInt &I2) {
+    if (I1.getBitWidth() == I2.getBitWidth())
+      return I1 == I2;
+
+    if (I1.getBitWidth() > I2.getBitWidth())
+      return I1 == I2.zext(I1.getBitWidth());
+
+    return I1.zext(I2.getBitWidth()) == I2;
+  }
+
+  /// \brief Overload to compute a hash_code for an APInt value.
+  friend hash_code hash_value(const APInt &Arg);
+
+  /// This function returns a pointer to the internal storage of the APInt.
+  /// This is useful for writing out the APInt in binary form without any
+  /// conversions.
+  const uint64_t *getRawData() const {
+    if (isSingleWord())
+      return &U.VAL;
+    return &U.pVal[0];
+  }
+
+  /// @}
+  /// \name Unary Operators
+  /// @{
+
+  /// \brief Postfix increment operator.
+  ///
+  /// Increments *this by 1.
+  ///
+  /// \returns a new APInt value representing the original value of *this.
+  const APInt operator++(int) {
+    APInt API(*this);
+    ++(*this);
+    return API;
+  }
+
+  /// \brief Prefix increment operator.
+  ///
+  /// \returns *this incremented by one
+  APInt &operator++();
+
+  /// \brief Postfix decrement operator.
+  ///
+  /// Decrements *this by 1.
+  ///
+  /// \returns a new APInt value representing the original value of *this.
+  const APInt operator--(int) {
+    APInt API(*this);
+    --(*this);
+    return API;
+  }
+
+  /// \brief Prefix decrement operator.
+  ///
+  /// \returns *this decremented by one.
+  APInt &operator--();
+
+  /// \brief Logical negation operator.
+  ///
+  /// Performs logical negation operation on this APInt.
+  ///
+  /// \returns true if *this is zero, false otherwise.
+  bool operator!() const {
+    if (isSingleWord())
+      return U.VAL == 0;
+    return countLeadingZerosSlowCase() == BitWidth;
+  }
+
+  /// @}
+  /// \name Assignment Operators
+  /// @{
+
+  /// \brief Copy assignment operator.
+  ///
+  /// \returns *this after assignment of RHS.
+  APInt &operator=(const APInt &RHS) {
+    // If the bitwidths are the same, we can avoid mucking with memory
+    if (isSingleWord() && RHS.isSingleWord()) {
+      U.VAL = RHS.U.VAL;
+      BitWidth = RHS.BitWidth;
+      return clearUnusedBits();
+    }
+
+    AssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// @brief Move assignment operator.
+  APInt &operator=(APInt &&that) {
+    assert(this != &that && "Self-move not supported");
+    if (!isSingleWord())
+      delete[] U.pVal;
+
+    // Use memcpy so that type based alias analysis sees both VAL and pVal
+    // as modified.
+    memcpy(&U, &that.U, sizeof(U));
+
+    BitWidth = that.BitWidth;
+    that.BitWidth = 0;
+
+    return *this;
+  }
+
+  /// \brief Assignment operator.
+  ///
+  /// The RHS value is assigned to *this. If the significant bits in RHS exceed
+  /// the bit width, the excess bits are truncated. If the bit width is larger
+  /// than 64, the value is zero filled in the unspecified high order bits.
+  ///
+  /// \returns *this after assignment of RHS value.
+  APInt &operator=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL = RHS;
+      clearUnusedBits();
+    } else {
+      U.pVal[0] = RHS;
+      memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+    }
+    return *this;
+  }
+
+  /// \brief Bitwise AND assignment operator.
+  ///
+  /// Performs a bitwise AND operation on this APInt and RHS. The result is
+  /// assigned to *this.
+  ///
+  /// \returns *this after ANDing with RHS.
+  APInt &operator&=(const APInt &RHS) {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      U.VAL &= RHS.U.VAL;
+    else
+      AndAssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// \brief Bitwise AND assignment operator.
+  ///
+  /// Performs a bitwise AND operation on this APInt and RHS. RHS is
+  /// logically zero-extended or truncated to match the bit-width of
+  /// the LHS.
+  APInt &operator&=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL &= RHS;
+      return *this;
+    }
+    U.pVal[0] &= RHS;
+    memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+    return *this;
+  }
+
+  /// \brief Bitwise OR assignment operator.
+  ///
+  /// Performs a bitwise OR operation on this APInt and RHS. The result is
+  /// assigned *this;
+  ///
+  /// \returns *this after ORing with RHS.
+  APInt &operator|=(const APInt &RHS) {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      U.VAL |= RHS.U.VAL;
+    else
+      OrAssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// \brief Bitwise OR assignment operator.
+  ///
+  /// Performs a bitwise OR operation on this APInt and RHS. RHS is
+  /// logically zero-extended or truncated to match the bit-width of
+  /// the LHS.
+  APInt &operator|=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL |= RHS;
+      clearUnusedBits();
+    } else {
+      U.pVal[0] |= RHS;
+    }
+    return *this;
+  }
+
+  /// \brief Bitwise XOR assignment operator.
+  ///
+  /// Performs a bitwise XOR operation on this APInt and RHS. The result is
+  /// assigned to *this.
+  ///
+  /// \returns *this after XORing with RHS.
+  APInt &operator^=(const APInt &RHS) {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      U.VAL ^= RHS.U.VAL;
+    else
+      XorAssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// \brief Bitwise XOR assignment operator.
+  ///
+  /// Performs a bitwise XOR operation on this APInt and RHS. RHS is
+  /// logically zero-extended or truncated to match the bit-width of
+  /// the LHS.
+  APInt &operator^=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL ^= RHS;
+      clearUnusedBits();
+    } else {
+      U.pVal[0] ^= RHS;
+    }
+    return *this;
+  }
+
+  /// \brief Multiplication assignment operator.
+  ///
+  /// Multiplies this APInt by RHS and assigns the result to *this.
+  ///
+  /// \returns *this
+  APInt &operator*=(const APInt &RHS);
+  APInt &operator*=(uint64_t RHS);
+
+  /// \brief Addition assignment operator.
+  ///
+  /// Adds RHS to *this and assigns the result to *this.
+  ///
+  /// \returns *this
+  APInt &operator+=(const APInt &RHS);
+  APInt &operator+=(uint64_t RHS);
+
+  /// \brief Subtraction assignment operator.
+  ///
+  /// Subtracts RHS from *this and assigns the result to *this.
+  ///
+  /// \returns *this
+  APInt &operator-=(const APInt &RHS);
+  APInt &operator-=(uint64_t RHS);
+
+  /// \brief Left-shift assignment function.
+  ///
+  /// Shifts *this left by shiftAmt and assigns the result to *this.
+  ///
+  /// \returns *this after shifting left by ShiftAmt
+  APInt &operator<<=(unsigned ShiftAmt) {
+    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+    if (isSingleWord()) {
+      if (ShiftAmt == BitWidth)
+        U.VAL = 0;
+      else
+        U.VAL <<= ShiftAmt;
+      return clearUnusedBits();
+    }
+    shlSlowCase(ShiftAmt);
+    return *this;
+  }
+
+  /// \brief Left-shift assignment function.
+  ///
+  /// Shifts *this left by shiftAmt and assigns the result to *this.
+  ///
+  /// \returns *this after shifting left by ShiftAmt
+  APInt &operator<<=(const APInt &ShiftAmt);
+
+  /// @}
+  /// \name Binary Operators
+  /// @{
+
+  /// \brief Multiplication operator.
+  ///
+  /// Multiplies this APInt by RHS and returns the result.
+  APInt operator*(const APInt &RHS) const;
+
+  /// \brief Left logical shift operator.
+  ///
+  /// Shifts this APInt left by \p Bits and returns the result.
+  APInt operator<<(unsigned Bits) const { return shl(Bits); }
+
+  /// \brief Left logical shift operator.
+  ///
+  /// Shifts this APInt left by \p Bits and returns the result.
+  APInt operator<<(const APInt &Bits) const { return shl(Bits); }
+
+  /// \brief Arithmetic right-shift function.
+  ///
+  /// Arithmetic right-shift this APInt by shiftAmt.
+  APInt ashr(unsigned ShiftAmt) const {
+    APInt R(*this);
+    R.ashrInPlace(ShiftAmt);
+    return R;
+  }
+
+  /// Arithmetic right-shift this APInt by ShiftAmt in place.
+  void ashrInPlace(unsigned ShiftAmt) {
+    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+    if (isSingleWord()) {
+      int64_t SExtVAL = SignExtend64(U.VAL, BitWidth);
+      if (ShiftAmt == BitWidth)
+        U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
+      else
+        U.VAL = SExtVAL >> ShiftAmt;
+      clearUnusedBits();
+      return;
+    }
+    ashrSlowCase(ShiftAmt);
+  }
+
+  /// \brief Logical right-shift function.
+  ///
+  /// Logical right-shift this APInt by shiftAmt.
+  APInt lshr(unsigned shiftAmt) const {
+    APInt R(*this);
+    R.lshrInPlace(shiftAmt);
+    return R;
+  }
+
+  /// Logical right-shift this APInt by ShiftAmt in place.
+  void lshrInPlace(unsigned ShiftAmt) {
+    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+    if (isSingleWord()) {
+      if (ShiftAmt == BitWidth)
+        U.VAL = 0;
+      else
+        U.VAL >>= ShiftAmt;
+      return;
+    }
+    lshrSlowCase(ShiftAmt);
+  }
+
+  /// \brief Left-shift function.
+  ///
+  /// Left-shift this APInt by shiftAmt.
+  APInt shl(unsigned shiftAmt) const {
+    APInt R(*this);
+    R <<= shiftAmt;
+    return R;
+  }
+
+  /// \brief Rotate left by rotateAmt.
+  APInt rotl(unsigned rotateAmt) const;
+
+  /// \brief Rotate right by rotateAmt.
+  APInt rotr(unsigned rotateAmt) const;
+
+  /// \brief Arithmetic right-shift function.
+  ///
+  /// Arithmetic right-shift this APInt by shiftAmt.
+  APInt ashr(const APInt &ShiftAmt) const {
+    APInt R(*this);
+    R.ashrInPlace(ShiftAmt);
+    return R;
+  }
+
+  /// Arithmetic right-shift this APInt by shiftAmt in place.
+  void ashrInPlace(const APInt &shiftAmt);
+
+  /// \brief Logical right-shift function.
+  ///
+  /// Logical right-shift this APInt by shiftAmt.
+  APInt lshr(const APInt &ShiftAmt) const {
+    APInt R(*this);
+    R.lshrInPlace(ShiftAmt);
+    return R;
+  }
+
+  /// Logical right-shift this APInt by ShiftAmt in place.
+  void lshrInPlace(const APInt &ShiftAmt);
+
+  /// \brief Left-shift function.
+  ///
+  /// Left-shift this APInt by shiftAmt.
+  APInt shl(const APInt &ShiftAmt) const {
+    APInt R(*this);
+    R <<= ShiftAmt;
+    return R;
+  }
+
+  /// \brief Rotate left by rotateAmt.
+  APInt rotl(const APInt &rotateAmt) const;
+
+  /// \brief Rotate right by rotateAmt.
+  APInt rotr(const APInt &rotateAmt) const;
+
+  /// \brief Unsigned division operation.
+  ///
+  /// Perform an unsigned divide operation on this APInt by RHS. Both this and
+  /// RHS are treated as unsigned quantities for purposes of this division.
+  ///
+  /// \returns a new APInt value containing the division result
+  APInt udiv(const APInt &RHS) const;
+  APInt udiv(uint64_t RHS) const;
+
+  /// \brief Signed division function for APInt.
+  ///
+  /// Signed divide this APInt by APInt RHS.
+  APInt sdiv(const APInt &RHS) const;
+  APInt sdiv(int64_t RHS) const;
+
+  /// \brief Unsigned remainder operation.
+  ///
+  /// Perform an unsigned remainder operation on this APInt with RHS being the
+  /// divisor. Both this and RHS are treated as unsigned quantities for purposes
+  /// of this operation. Note that this is a true remainder operation and not a
+  /// modulo operation because the sign follows the sign of the dividend which
+  /// is *this.
+  ///
+  /// \returns a new APInt value containing the remainder result
+  APInt urem(const APInt &RHS) const;
+  uint64_t urem(uint64_t RHS) const;
+
+  /// \brief Function for signed remainder operation.
+  ///
+  /// Signed remainder operation on APInt.
+  APInt srem(const APInt &RHS) const;
+  int64_t srem(int64_t RHS) const;
+
+  /// \brief Dual division/remainder interface.
+  ///
+  /// Sometimes it is convenient to divide two APInt values and obtain both the
+  /// quotient and remainder. This function does both operations in the same
+  /// computation making it a little more efficient. The pair of input arguments
+  /// may overlap with the pair of output arguments. It is safe to call
+  /// udivrem(X, Y, X, Y), for example.
+  static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+                      APInt &Remainder);
+  static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
+                      uint64_t &Remainder);
+
+  static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+                      APInt &Remainder);
+  static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
+                      int64_t &Remainder);
+
+  // Operations that return overflow indicators.
+  APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
+  APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
+  APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
+  APInt usub_ov(const APInt &RHS, bool &Overflow) const;
+  APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
+  APInt smul_ov(const APInt &RHS, bool &Overflow) const;
+  APInt umul_ov(const APInt &RHS, bool &Overflow) const;
+  APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
+  APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
+
+  /// \brief Array-indexing support.
+  ///
+  /// \returns the bit value at bitPosition
+  bool operator[](unsigned bitPosition) const {
+    assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
+    return (maskBit(bitPosition) & getWord(bitPosition)) != 0;
+  }
+
+  /// @}
+  /// \name Comparison Operators
+  /// @{
+
+  /// \brief Equality operator.
+  ///
+  /// Compares this APInt with RHS for the validity of the equality
+  /// relationship.
+  bool operator==(const APInt &RHS) const {
+    assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths");
+    if (isSingleWord())
+      return U.VAL == RHS.U.VAL;
+    return EqualSlowCase(RHS);
+  }
+
+  /// \brief Equality operator.
+  ///
+  /// Compares this APInt with a uint64_t for the validity of the equality
+  /// relationship.
+  ///
+  /// \returns true if *this == Val
+  bool operator==(uint64_t Val) const {
+    return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
+  }
+
+  /// \brief Equality comparison.
+  ///
+  /// Compares this APInt with RHS for the validity of the equality
+  /// relationship.
+  ///
+  /// \returns true if *this == Val
+  bool eq(const APInt &RHS) const { return (*this) == RHS; }
+
+  /// \brief Inequality operator.
+  ///
+  /// Compares this APInt with RHS for the validity of the inequality
+  /// relationship.
+  ///
+  /// \returns true if *this != Val
+  bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }
+
+  /// \brief Inequality operator.
+  ///
+  /// Compares this APInt with a uint64_t for the validity of the inequality
+  /// relationship.
+  ///
+  /// \returns true if *this != Val
+  bool operator!=(uint64_t Val) const { return !((*this) == Val); }
+
+  /// \brief Inequality comparison
+  ///
+  /// Compares this APInt with RHS for the validity of the inequality
+  /// relationship.
+  ///
+  /// \returns true if *this != Val
+  bool ne(const APInt &RHS) const { return !((*this) == RHS); }
+
+  /// \brief Unsigned less than comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// the validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when both are considered unsigned.
+  bool ult(const APInt &RHS) const { return compare(RHS) < 0; }
+
+  /// \brief Unsigned less than comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when considered unsigned.
+  bool ult(uint64_t RHS) const {
+    // Only need to check active bits if not a single word.
+    return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
+  }
+
+  /// \brief Signed less than comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for
+  /// validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when both are considered signed.
+  bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }
+
+  /// \brief Signed less than comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for
+  /// the validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when considered signed.
+  bool slt(int64_t RHS) const {
+    return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative()
+                                                        : getSExtValue() < RHS;
+  }
+
+  /// \brief Unsigned less or equal comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when both are considered unsigned.
+  bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }
+
+  /// \brief Unsigned less or equal comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when considered unsigned.
+  bool ule(uint64_t RHS) const { return !ugt(RHS); }
+
+  /// \brief Signed less or equal comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for
+  /// validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when both are considered signed.
+  bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }
+
+  /// \brief Signed less or equal comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for the
+  /// validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when considered signed.
+  bool sle(uint64_t RHS) const { return !sgt(RHS); }
+
+  /// \brief Unsigned greather than comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// the validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when both are considered unsigned.
+  bool ugt(const APInt &RHS) const { return !ule(RHS); }
+
+  /// \brief Unsigned greater than comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when considered unsigned.
+  bool ugt(uint64_t RHS) const {
+    // Only need to check active bits if not a single word.
+    return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
+  }
+
+  /// \brief Signed greather than comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for the
+  /// validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when both are considered signed.
+  bool sgt(const APInt &RHS) const { return !sle(RHS); }
+
+  /// \brief Signed greater than comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for
+  /// the validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when considered signed.
+  bool sgt(int64_t RHS) const {
+    return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative()
+                                                        : getSExtValue() > RHS;
+  }
+
+  /// \brief Unsigned greater or equal comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when both are considered unsigned.
+  bool uge(const APInt &RHS) const { return !ult(RHS); }
+
+  /// \brief Unsigned greater or equal comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when considered unsigned.
+  bool uge(uint64_t RHS) const { return !ult(RHS); }
+
+  /// \brief Signed greater or equal comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for
+  /// validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when both are considered signed.
+  bool sge(const APInt &RHS) const { return !slt(RHS); }
+
+  /// \brief Signed greater or equal comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for
+  /// the validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when considered signed.
+  bool sge(int64_t RHS) const { return !slt(RHS); }
+
+  /// This operation tests if there are any pairs of corresponding bits
+  /// between this APInt and RHS that are both set.
+  bool intersects(const APInt &RHS) const {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      return (U.VAL & RHS.U.VAL) != 0;
+    return intersectsSlowCase(RHS);
+  }
+
+  /// This operation checks that all bits set in this APInt are also set in RHS.
+  bool isSubsetOf(const APInt &RHS) const {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      return (U.VAL & ~RHS.U.VAL) == 0;
+    return isSubsetOfSlowCase(RHS);
+  }
+
+  /// @}
+  /// \name Resizing Operators
+  /// @{
+
+  /// \brief Truncate to new width.
+  ///
+  /// Truncate the APInt to a specified width. It is an error to specify a width
+  /// that is greater than or equal to the current width.
+  APInt trunc(unsigned width) const;
+
+  /// \brief Sign extend to a new width.
+  ///
+  /// This operation sign extends the APInt to a new width. If the high order
+  /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
+  /// It is an error to specify a width that is less than or equal to the
+  /// current width.
+  APInt sext(unsigned width) const;
+
+  /// \brief Zero extend to a new width.
+  ///
+  /// This operation zero extends the APInt to a new width. The high order bits
+  /// are filled with 0 bits.  It is an error to specify a width that is less
+  /// than or equal to the current width.
+  APInt zext(unsigned width) const;
+
+  /// \brief Sign extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is sign
+  /// extended, truncated, or left alone to make it that width.
+  APInt sextOrTrunc(unsigned width) const;
+
+  /// \brief Zero extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is zero
+  /// extended, truncated, or left alone to make it that width.
+  APInt zextOrTrunc(unsigned width) const;
+
+  /// \brief Sign extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is sign
+  /// extended, or left alone to make it that width.
+  APInt sextOrSelf(unsigned width) const;
+
+  /// \brief Zero extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is zero
+  /// extended, or left alone to make it that width.
+  APInt zextOrSelf(unsigned width) const;
+
+  /// @}
+  /// \name Bit Manipulation Operators
+  /// @{
+
+  /// \brief Set every bit to 1.
+  void setAllBits() {
+    if (isSingleWord())
+      U.VAL = WORD_MAX;
+    else
+      // Set all the bits in all the words.
+      memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
+    // Clear the unused ones
+    clearUnusedBits();
+  }
+
+  /// \brief Set a given bit to 1.
+  ///
+  /// Set the given bit to 1 whose position is given as "bitPosition".
+  void setBit(unsigned BitPosition) {
+    assert(BitPosition <= BitWidth && "BitPosition out of range");
+    WordType Mask = maskBit(BitPosition);
+    if (isSingleWord())
+      U.VAL |= Mask;
+    else
+      U.pVal[whichWord(BitPosition)] |= Mask;
+  }
+
+  /// Set the sign bit to 1.
+  void setSignBit() {
+    setBit(BitWidth - 1);
+  }
+
+  /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
+  void setBits(unsigned loBit, unsigned hiBit) {
+    assert(hiBit <= BitWidth && "hiBit out of range");
+    assert(loBit <= BitWidth && "loBit out of range");
+    assert(loBit <= hiBit && "loBit greater than hiBit");
+    if (loBit == hiBit)
+      return;
+    if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
+      uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
+      mask <<= loBit;
+      if (isSingleWord())
+        U.VAL |= mask;
+      else
+        U.pVal[0] |= mask;
+    } else {
+      setBitsSlowCase(loBit, hiBit);
+    }
+  }
+
+  /// Set the top bits starting from loBit.
+  void setBitsFrom(unsigned loBit) {
+    return setBits(loBit, BitWidth);
+  }
+
+  /// Set the bottom loBits bits.
+  void setLowBits(unsigned loBits) {
+    return setBits(0, loBits);
+  }
+
+  /// Set the top hiBits bits.
+  void setHighBits(unsigned hiBits) {
+    return setBits(BitWidth - hiBits, BitWidth);
+  }
+
+  /// \brief Set every bit to 0.
+  void clearAllBits() {
+    if (isSingleWord())
+      U.VAL = 0;
+    else
+      memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE);
+  }
+
+  /// \brief Set a given bit to 0.
+  ///
+  /// Set the given bit to 0 whose position is given as "bitPosition".
+  void clearBit(unsigned BitPosition) {
+    assert(BitPosition <= BitWidth && "BitPosition out of range");
+    WordType Mask = ~maskBit(BitPosition);
+    if (isSingleWord())
+      U.VAL &= Mask;
+    else
+      U.pVal[whichWord(BitPosition)] &= Mask;
+  }
+
+  /// Set the sign bit to 0.
+  void clearSignBit() {
+    clearBit(BitWidth - 1);
+  }
+
+  /// \brief Toggle every bit to its opposite value.
+  void flipAllBits() {
+    if (isSingleWord()) {
+      U.VAL ^= WORD_MAX;
+      clearUnusedBits();
+    } else {
+      flipAllBitsSlowCase();
+    }
+  }
+
+  /// \brief Toggles a given bit to its opposite value.
+  ///
+  /// Toggle a given bit to its opposite value whose position is given
+  /// as "bitPosition".
+  void flipBit(unsigned bitPosition);
+
+  /// Negate this APInt in place.
+  void negate() {
+    flipAllBits();
+    ++(*this);
+  }
+
+  /// Insert the bits from a smaller APInt starting at bitPosition.
+  void insertBits(const APInt &SubBits, unsigned bitPosition);
+
+  /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
+  APInt extractBits(unsigned numBits, unsigned bitPosition) const;
+
+  /// @}
+  /// \name Value Characterization Functions
+  /// @{
+
+  /// \brief Return the number of bits in the APInt.
+  unsigned getBitWidth() const { return BitWidth; }
+
+  /// \brief Get the number of words.
+  ///
+  /// Here one word's bitwidth equals to that of uint64_t.
+  ///
+  /// \returns the number of words to hold the integer value of this APInt.
+  unsigned getNumWords() const { return getNumWords(BitWidth); }
+
+  /// \brief Get the number of words.
+  ///
+  /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
+  ///
+  /// \returns the number of words to hold the integer value with a given bit
+  /// width.
+  static unsigned getNumWords(unsigned BitWidth) {
+    return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
+  }
+
+  /// \brief Compute the number of active bits in the value
+  ///
+  /// This function returns the number of active bits which is defined as the
+  /// bit width minus the number of leading zeros. This is used in several
+  /// computations to see how "wide" the value is.
+  unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); }
+
+  /// \brief Compute the number of active words in the value of this APInt.
+  ///
+  /// This is used in conjunction with getActiveData to extract the raw value of
+  /// the APInt.
+  unsigned getActiveWords() const {
+    unsigned numActiveBits = getActiveBits();
+    return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
+  }
+
+  /// \brief Get the minimum bit size for this signed APInt
+  ///
+  /// Computes the minimum bit width for this APInt while considering it to be a
+  /// signed (and probably negative) value. If the value is not negative, this
+  /// function returns the same value as getActiveBits()+1. Otherwise, it
+  /// returns the smallest bit width that will retain the negative value. For
+  /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
+  /// for -1, this function will always return 1.
+  unsigned getMinSignedBits() const {
+    if (isNegative())
+      return BitWidth - countLeadingOnes() + 1;
+    return getActiveBits() + 1;
+  }
+
+  /// \brief Get zero extended value
+  ///
+  /// This method attempts to return the value of this APInt as a zero extended
+  /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
+  /// uint64_t. Otherwise an assertion will result.
+  uint64_t getZExtValue() const {
+    if (isSingleWord())
+      return U.VAL;
+    assert(getActiveBits() <= 64 && "Too many bits for uint64_t");
+    return U.pVal[0];
+  }
+
+  /// \brief Get sign extended value
+  ///
+  /// This method attempts to return the value of this APInt as a sign extended
+  /// int64_t. The bit width must be <= 64 or the value must fit within an
+  /// int64_t. Otherwise an assertion will result.
+  int64_t getSExtValue() const {
+    if (isSingleWord())
+      return SignExtend64(U.VAL, BitWidth);
+    assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+    return int64_t(U.pVal[0]);
+  }
+
+  /// \brief Get bits required for string value.
+  ///
+  /// This method determines how many bits are required to hold the APInt
+  /// equivalent of the string given by \p str.
+  static unsigned getBitsNeeded(StringRef str, uint8_t radix);
+
+  /// \brief The APInt version of the countLeadingZeros functions in
+  ///   MathExtras.h.
+  ///
+  /// It counts the number of zeros from the most significant bit to the first
+  /// one bit.
+  ///
+  /// \returns BitWidth if the value is zero, otherwise returns the number of
+  ///   zeros from the most significant bit to the first one bits.
+  unsigned countLeadingZeros() const {
+    if (isSingleWord()) {
+      unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
+      return llvm::countLeadingZeros(U.VAL) - unusedBits;
+    }
+    return countLeadingZerosSlowCase();
+  }
+
+  /// \brief Count the number of leading one bits.
+  ///
+  /// This function is an APInt version of the countLeadingOnes
+  /// functions in MathExtras.h. It counts the number of ones from the most
+  /// significant bit to the first zero bit.
+  ///
+  /// \returns 0 if the high order bit is not set, otherwise returns the number
+  /// of 1 bits from the most significant to the least
+  unsigned countLeadingOnes() const {
+    if (isSingleWord())
+      return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
+    return countLeadingOnesSlowCase();
+  }
+
+  /// Computes the number of leading bits of this APInt that are equal to its
+  /// sign bit.
+  unsigned getNumSignBits() const {
+    return isNegative() ? countLeadingOnes() : countLeadingZeros();
+  }
+
+  /// \brief Count the number of trailing zero bits.
+  ///
+  /// This function is an APInt version of the countTrailingZeros
+  /// functions in MathExtras.h. It counts the number of zeros from the least
+  /// significant bit to the first set bit.
+  ///
+  /// \returns BitWidth if the value is zero, otherwise returns the number of
+  /// zeros from the least significant bit to the first one bit.
+  unsigned countTrailingZeros() const {
+    if (isSingleWord())
+      return std::min(unsigned(llvm::countTrailingZeros(U.VAL)), BitWidth);
+    return countTrailingZerosSlowCase();
+  }
+
+  /// \brief Count the number of trailing one bits.
+  ///
+  /// This function is an APInt version of the countTrailingOnes
+  /// functions in MathExtras.h. It counts the number of ones from the least
+  /// significant bit to the first zero bit.
+  ///
+  /// \returns BitWidth if the value is all ones, otherwise returns the number
+  /// of ones from the least significant bit to the first zero bit.
+  unsigned countTrailingOnes() const {
+    if (isSingleWord())
+      return llvm::countTrailingOnes(U.VAL);
+    return countTrailingOnesSlowCase();
+  }
+
+  /// \brief Count the number of bits set.
+  ///
+  /// This function is an APInt version of the countPopulation functions
+  /// in MathExtras.h. It counts the number of 1 bits in the APInt value.
+  ///
+  /// \returns 0 if the value is zero, otherwise returns the number of set bits.
+  unsigned countPopulation() const {
+    if (isSingleWord())
+      return llvm::countPopulation(U.VAL);
+    return countPopulationSlowCase();
+  }
+
+  /// @}
+  /// \name Conversion Functions
+  /// @{
+  void print(raw_ostream &OS, bool isSigned) const;
+
+  /// Converts an APInt to a string and append it to Str.  Str is commonly a
+  /// SmallString.
+  void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
+                bool formatAsCLiteral = false) const;
+
+  /// Considers the APInt to be unsigned and converts it into a string in the
+  /// radix given. The radix can be 2, 8, 10 16, or 36.
+  void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+    toString(Str, Radix, false, false);
+  }
+
+  /// Considers the APInt to be signed and converts it into a string in the
+  /// radix given. The radix can be 2, 8, 10, 16, or 36.
+  void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+    toString(Str, Radix, true, false);
+  }
+
+  /// \brief Return the APInt as a std::string.
+  ///
+  /// Note that this is an inefficient method.  It is better to pass in a
+  /// SmallVector/SmallString to the methods above to avoid thrashing the heap
+  /// for the string.
+  std::string toString(unsigned Radix, bool Signed) const;
+
+  /// \returns a byte-swapped representation of this APInt Value.
+  APInt byteSwap() const;
+
+  /// \returns the value with the bit representation reversed of this APInt
+  /// Value.
+  APInt reverseBits() const;
+
+  /// \brief Converts this APInt to a double value.
+  double roundToDouble(bool isSigned) const;
+
+  /// \brief Converts this unsigned APInt to a double value.
+  double roundToDouble() const { return roundToDouble(false); }
+
+  /// \brief Converts this signed APInt to a double value.
+  double signedRoundToDouble() const { return roundToDouble(true); }
+
+  /// \brief Converts APInt bits to a double
+  ///
+  /// The conversion does not do a translation from integer to double, it just
+  /// re-interprets the bits as a double. Note that it is valid to do this on
+  /// any bit width. Exactly 64 bits will be translated.
+  double bitsToDouble() const {
+    return BitsToDouble(getWord(0));
+  }
+
+  /// \brief Converts APInt bits to a double
+  ///
+  /// The conversion does not do a translation from integer to float, it just
+  /// re-interprets the bits as a float. Note that it is valid to do this on
+  /// any bit width. Exactly 32 bits will be translated.
+  float bitsToFloat() const {
+    return BitsToFloat(getWord(0));
+  }
+
+  /// \brief Converts a double to APInt bits.
+  ///
+  /// The conversion does not do a translation from double to integer, it just
+  /// re-interprets the bits of the double.
+  static APInt doubleToBits(double V) {
+    return APInt(sizeof(double) * CHAR_BIT, DoubleToBits(V));
+  }
+
+  /// \brief Converts a float to APInt bits.
+  ///
+  /// The conversion does not do a translation from float to integer, it just
+  /// re-interprets the bits of the float.
+  static APInt floatToBits(float V) {
+    return APInt(sizeof(float) * CHAR_BIT, FloatToBits(V));
+  }
+
+  /// @}
+  /// \name Mathematics Operations
+  /// @{
+
+  /// \returns the floor log base 2 of this APInt.
+  unsigned logBase2() const { return getActiveBits() -  1; }
+
+  /// \returns the ceil log base 2 of this APInt.
+  unsigned ceilLogBase2() const {
+    APInt temp(*this);
+    --temp;
+    return temp.getActiveBits();
+  }
+
+  /// \returns the nearest log base 2 of this APInt. Ties round up.
+  ///
+  /// NOTE: When we have a BitWidth of 1, we define:
+  ///
+  ///   log2(0) = UINT32_MAX
+  ///   log2(1) = 0
+  ///
+  /// to get around any mathematical concerns resulting from
+  /// referencing 2 in a space where 2 does no exist.
+  unsigned nearestLogBase2() const {
+    // Special case when we have a bitwidth of 1. If VAL is 1, then we
+    // get 0. If VAL is 0, we get WORD_MAX which gets truncated to
+    // UINT32_MAX.
+    if (BitWidth == 1)
+      return U.VAL - 1;
+
+    // Handle the zero case.
+    if (isNullValue())
+      return UINT32_MAX;
+
+    // The non-zero case is handled by computing:
+    //
+    //   nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1].
+    //
+    // where x[i] is referring to the value of the ith bit of x.
+    unsigned lg = logBase2();
+    return lg + unsigned((*this)[lg - 1]);
+  }
+
+  /// \returns the log base 2 of this APInt if its an exact power of two, -1
+  /// otherwise
+  int32_t exactLogBase2() const {
+    if (!isPowerOf2())
+      return -1;
+    return logBase2();
+  }
+
+  /// \brief Compute the square root
+  APInt sqrt() const;
+
+  /// \brief Get the absolute value;
+  ///
+  /// If *this is < 0 then return -(*this), otherwise *this;
+  APInt abs() const {
+    if (isNegative())
+      return -(*this);
+    return *this;
+  }
+
+  /// \returns the multiplicative inverse for a given modulo.
+  APInt multiplicativeInverse(const APInt &modulo) const;
+
+  /// @}
+  /// \name Support for division by constant
+  /// @{
+
+  /// Calculate the magic number for signed division by a constant.
+  struct ms;
+  ms magic() const;
+
+  /// Calculate the magic number for unsigned division by a constant.
+  struct mu;
+  mu magicu(unsigned LeadingZeros = 0) const;
+
+  /// @}
+  /// \name Building-block Operations for APInt and APFloat
+  /// @{
+
+  // These building block operations operate on a representation of arbitrary
+  // precision, two's-complement, bignum integer values. They should be
+  // sufficient to implement APInt and APFloat bignum requirements. Inputs are
+  // generally a pointer to the base of an array of integer parts, representing
+  // an unsigned bignum, and a count of how many parts there are.
+
+  /// Sets the least significant part of a bignum to the input value, and zeroes
+  /// out higher parts.
+  static void tcSet(WordType *, WordType, unsigned);
+
+  /// Assign one bignum to another.
+  static void tcAssign(WordType *, const WordType *, unsigned);
+
+  /// Returns true if a bignum is zero, false otherwise.
+  static bool tcIsZero(const WordType *, unsigned);
+
+  /// Extract the given bit of a bignum; returns 0 or 1.  Zero-based.
+  static int tcExtractBit(const WordType *, unsigned bit);
+
+  /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
+  /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
+  /// significant bit of DST.  All high bits above srcBITS in DST are
+  /// zero-filled.
+  static void tcExtract(WordType *, unsigned dstCount,
+                        const WordType *, unsigned srcBits,
+                        unsigned srcLSB);
+
+  /// Set the given bit of a bignum.  Zero-based.
+  static void tcSetBit(WordType *, unsigned bit);
+
+  /// Clear the given bit of a bignum.  Zero-based.
+  static void tcClearBit(WordType *, unsigned bit);
+
+  /// Returns the bit number of the least or most significant set bit of a
+  /// number.  If the input number has no bits set -1U is returned.
+  static unsigned tcLSB(const WordType *, unsigned n);
+  static unsigned tcMSB(const WordType *parts, unsigned n);
+
+  /// Negate a bignum in-place.
+  static void tcNegate(WordType *, unsigned);
+
+  /// DST += RHS + CARRY where CARRY is zero or one.  Returns the carry flag.
+  static WordType tcAdd(WordType *, const WordType *,
+                        WordType carry, unsigned);
+  /// DST += RHS.  Returns the carry flag.
+  static WordType tcAddPart(WordType *, WordType, unsigned);
+
+  /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
+  static WordType tcSubtract(WordType *, const WordType *,
+                             WordType carry, unsigned);
+  /// DST -= RHS.  Returns the carry flag.
+  static WordType tcSubtractPart(WordType *, WordType, unsigned);
+
+  /// DST += SRC * MULTIPLIER + PART   if add is true
+  /// DST  = SRC * MULTIPLIER + PART   if add is false
+  ///
+  /// Requires 0 <= DSTPARTS <= SRCPARTS + 1.  If DST overlaps SRC they must
+  /// start at the same point, i.e. DST == SRC.
+  ///
+  /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
+  /// Otherwise DST is filled with the least significant DSTPARTS parts of the
+  /// result, and if all of the omitted higher parts were zero return zero,
+  /// otherwise overflow occurred and return one.
+  static int tcMultiplyPart(WordType *dst, const WordType *src,
+                            WordType multiplier, WordType carry,
+                            unsigned srcParts, unsigned dstParts,
+                            bool add);
+
+  /// DST = LHS * RHS, where DST has the same width as the operands and is
+  /// filled with the least significant parts of the result.  Returns one if
+  /// overflow occurred, otherwise zero.  DST must be disjoint from both
+  /// operands.
+  static int tcMultiply(WordType *, const WordType *, const WordType *,
+                        unsigned);
+
+  /// DST = LHS * RHS, where DST has width the sum of the widths of the
+  /// operands. No overflow occurs. DST must be disjoint from both operands.
+  static void tcFullMultiply(WordType *, const WordType *,
+                             const WordType *, unsigned, unsigned);
+
+  /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
+  /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
+  /// REMAINDER to the remainder, return zero.  i.e.
+  ///
+  ///  OLD_LHS = RHS * LHS + REMAINDER
+  ///
+  /// SCRATCH is a bignum of the same size as the operands and result for use by
+  /// the routine; its contents need not be initialized and are destroyed.  LHS,
+  /// REMAINDER and SCRATCH must be distinct.
+  static int tcDivide(WordType *lhs, const WordType *rhs,
+                      WordType *remainder, WordType *scratch,
+                      unsigned parts);
+
+  /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
+  /// restrictions on Count.
+  static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);
+
+  /// Shift a bignum right Count bits.  Shifted in bits are zero.  There are no
+  /// restrictions on Count.
+  static void tcShiftRight(WordType *, unsigned Words, unsigned Count);
+
+  /// The obvious AND, OR and XOR and complement operations.
+  static void tcAnd(WordType *, const WordType *, unsigned);
+  static void tcOr(WordType *, const WordType *, unsigned);
+  static void tcXor(WordType *, const WordType *, unsigned);
+  static void tcComplement(WordType *, unsigned);
+
+  /// Comparison (unsigned) of two bignums.
+  static int tcCompare(const WordType *, const WordType *, unsigned);
+
+  /// Increment a bignum in-place.  Return the carry flag.
+  static WordType tcIncrement(WordType *dst, unsigned parts) {
+    return tcAddPart(dst, 1, parts);
+  }
+
+  /// Decrement a bignum in-place.  Return the borrow flag.
+  static WordType tcDecrement(WordType *dst, unsigned parts) {
+    return tcSubtractPart(dst, 1, parts);
+  }
+
+  /// Set the least significant BITS and clear the rest.
+  static void tcSetLeastSignificantBits(WordType *, unsigned, unsigned bits);
+
+  /// \brief debug method
+  void dump() const;
+
+  /// @}
+};
+
+/// Magic data for optimising signed division by a constant.
+struct APInt::ms {
+  APInt m;    ///< magic number
+  unsigned s; ///< shift amount
+};
+
+/// Magic data for optimising unsigned division by a constant.
+struct APInt::mu {
+  APInt m;    ///< magic number
+  bool a;     ///< add indicator
+  unsigned s; ///< shift amount
+};
+
+inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }
+
+inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }
+
+/// \brief Unary bitwise complement operator.
+///
+/// \returns an APInt that is the bitwise complement of \p v.
+inline APInt operator~(APInt v) {
+  v.flipAllBits();
+  return v;
+}
+
+inline APInt operator&(APInt a, const APInt &b) {
+  a &= b;
+  return a;
+}
+
+inline APInt operator&(const APInt &a, APInt &&b) {
+  b &= a;
+  return std::move(b);
+}
+
+inline APInt operator&(APInt a, uint64_t RHS) {
+  a &= RHS;
+  return a;
+}
+
+inline APInt operator&(uint64_t LHS, APInt b) {
+  b &= LHS;
+  return b;
+}
+
+inline APInt operator|(APInt a, const APInt &b) {
+  a |= b;
+  return a;
+}
+
+inline APInt operator|(const APInt &a, APInt &&b) {
+  b |= a;
+  return std::move(b);
+}
+
+inline APInt operator|(APInt a, uint64_t RHS) {
+  a |= RHS;
+  return a;
+}
+
+inline APInt operator|(uint64_t LHS, APInt b) {
+  b |= LHS;
+  return b;
+}
+
+inline APInt operator^(APInt a, const APInt &b) {
+  a ^= b;
+  return a;
+}
+
+inline APInt operator^(const APInt &a, APInt &&b) {
+  b ^= a;
+  return std::move(b);
+}
+
+inline APInt operator^(APInt a, uint64_t RHS) {
+  a ^= RHS;
+  return a;
+}
+
+inline APInt operator^(uint64_t LHS, APInt b) {
+  b ^= LHS;
+  return b;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
+  I.print(OS, true);
+  return OS;
+}
+
+inline APInt operator-(APInt v) {
+  v.negate();
+  return v;
+}
+
+inline APInt operator+(APInt a, const APInt &b) {
+  a += b;
+  return a;
+}
+
+inline APInt operator+(const APInt &a, APInt &&b) {
+  b += a;
+  return std::move(b);
+}
+
+inline APInt operator+(APInt a, uint64_t RHS) {
+  a += RHS;
+  return a;
+}
+
+inline APInt operator+(uint64_t LHS, APInt b) {
+  b += LHS;
+  return b;
+}
+
+inline APInt operator-(APInt a, const APInt &b) {
+  a -= b;
+  return a;
+}
+
+inline APInt operator-(const APInt &a, APInt &&b) {
+  b.negate();
+  b += a;
+  return std::move(b);
+}
+
+inline APInt operator-(APInt a, uint64_t RHS) {
+  a -= RHS;
+  return a;
+}
+
+inline APInt operator-(uint64_t LHS, APInt b) {
+  b.negate();
+  b += LHS;
+  return b;
+}
+
+inline APInt operator*(APInt a, uint64_t RHS) {
+  a *= RHS;
+  return a;
+}
+
+inline APInt operator*(uint64_t LHS, APInt b) {
+  b *= LHS;
+  return b;
+}
+
+
+namespace APIntOps {
+
+/// \brief Determine the smaller of two APInts considered to be signed.
+inline const APInt &smin(const APInt &A, const APInt &B) {
+  return A.slt(B) ? A : B;
+}
+
+/// \brief Determine the larger of two APInts considered to be signed.
+inline const APInt &smax(const APInt &A, const APInt &B) {
+  return A.sgt(B) ? A : B;
+}
+
+/// \brief Determine the smaller of two APInts considered to be signed.
+inline const APInt &umin(const APInt &A, const APInt &B) {
+  return A.ult(B) ? A : B;
+}
+
+/// \brief Determine the larger of two APInts considered to be unsigned.
+inline const APInt &umax(const APInt &A, const APInt &B) {
+  return A.ugt(B) ? A : B;
+}
+
+/// \brief Compute GCD of two unsigned APInt values.
+///
+/// This function returns the greatest common divisor of the two APInt values
+/// using Stein's algorithm.
+///
+/// \returns the greatest common divisor of A and B.
+APInt GreatestCommonDivisor(APInt A, APInt B);
+
+/// \brief Converts the given APInt to a double value.
+///
+/// Treats the APInt as an unsigned value for conversion purposes.
+inline double RoundAPIntToDouble(const APInt &APIVal) {
+  return APIVal.roundToDouble();
+}
+
+/// \brief Converts the given APInt to a double value.
+///
+/// Treats the APInt as a signed value for conversion purposes.
+inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
+  return APIVal.signedRoundToDouble();
+}
+
+/// \brief Converts the given APInt to a float vlalue.
+inline float RoundAPIntToFloat(const APInt &APIVal) {
+  return float(RoundAPIntToDouble(APIVal));
+}
+
+/// \brief Converts the given APInt to a float value.
+///
+/// Treast the APInt as a signed value for conversion purposes.
+inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
+  return float(APIVal.signedRoundToDouble());
+}
+
+/// \brief Converts the given double value into a APInt.
+///
+/// This function convert a double value to an APInt value.
+APInt RoundDoubleToAPInt(double Double, unsigned width);
+
+/// \brief Converts a float value into a APInt.
+///
+/// Converts a float value into an APInt value.
+inline APInt RoundFloatToAPInt(float Float, unsigned width) {
+  return RoundDoubleToAPInt(double(Float), width);
+}
+
+} // End of APIntOps namespace
+
+// See friend declaration above. This additional declaration is required in
+// order to compile LLVM with IBM xlC compiler.
+hash_code hash_value(const APInt &Arg);
+} // End of llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/APSInt.h b/linux-x64/clang/include/llvm/ADT/APSInt.h
new file mode 100644
index 0000000..dabbf33
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APSInt.h
@@ -0,0 +1,336 @@
+//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APSInt class, which is a simple class that
+// represents an arbitrary sized integer that knows its signedness.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APSINT_H
+#define LLVM_ADT_APSINT_H
+
+#include "llvm/ADT/APInt.h"
+
+namespace llvm {
+
+class LLVM_NODISCARD APSInt : public APInt {
+  bool IsUnsigned;
+
+public:
+  /// Default constructor that creates an uninitialized APInt.
+  explicit APSInt() : IsUnsigned(false) {}
+
+  /// APSInt ctor - Create an APSInt with the specified width, default to
+  /// unsigned.
+  explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
+   : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}
+
+  explicit APSInt(APInt I, bool isUnsigned = true)
+   : APInt(std::move(I)), IsUnsigned(isUnsigned) {}
+
+  /// Construct an APSInt from a string representation.
+  ///
+  /// This constructor interprets the string \p Str using the radix of 10.
+  /// The interpretation stops at the end of the string. The bit width of the
+  /// constructed APSInt is determined automatically.
+  ///
+  /// \param Str the string to be interpreted.
+  explicit APSInt(StringRef Str);
+
+  APSInt &operator=(APInt RHS) {
+    // Retain our current sign.
+    APInt::operator=(std::move(RHS));
+    return *this;
+  }
+
+  APSInt &operator=(uint64_t RHS) {
+    // Retain our current sign.
+    APInt::operator=(RHS);
+    return *this;
+  }
+
+  // Query sign information.
+  bool isSigned() const { return !IsUnsigned; }
+  bool isUnsigned() const { return IsUnsigned; }
+  void setIsUnsigned(bool Val) { IsUnsigned = Val; }
+  void setIsSigned(bool Val) { IsUnsigned = !Val; }
+
+  /// toString - Append this APSInt to the specified SmallString.
+  void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+    APInt::toString(Str, Radix, isSigned());
+  }
+  /// toString - Converts an APInt to a std::string.  This is an inefficient
+  /// method; you should prefer passing in a SmallString instead.
+  std::string toString(unsigned Radix) const {
+    return APInt::toString(Radix, isSigned());
+  }
+  using APInt::toString;
+
+  /// \brief Get the correctly-extended \c int64_t value.
+  int64_t getExtValue() const {
+    assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+    return isSigned() ? getSExtValue() : getZExtValue();
+  }
+
+  APSInt trunc(uint32_t width) const {
+    return APSInt(APInt::trunc(width), IsUnsigned);
+  }
+
+  APSInt extend(uint32_t width) const {
+    if (IsUnsigned)
+      return APSInt(zext(width), IsUnsigned);
+    else
+      return APSInt(sext(width), IsUnsigned);
+  }
+
+  APSInt extOrTrunc(uint32_t width) const {
+    if (IsUnsigned)
+      return APSInt(zextOrTrunc(width), IsUnsigned);
+    else
+      return APSInt(sextOrTrunc(width), IsUnsigned);
+  }
+
+  const APSInt &operator%=(const APSInt &RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    if (IsUnsigned)
+      *this = urem(RHS);
+    else
+      *this = srem(RHS);
+    return *this;
+  }
+  const APSInt &operator/=(const APSInt &RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    if (IsUnsigned)
+      *this = udiv(RHS);
+    else
+      *this = sdiv(RHS);
+    return *this;
+  }
+  APSInt operator%(const APSInt &RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
+  }
+  APSInt operator/(const APSInt &RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
+  }
+
+  APSInt operator>>(unsigned Amt) const {
+    return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
+  }
+  APSInt& operator>>=(unsigned Amt) {
+    if (IsUnsigned)
+      lshrInPlace(Amt);
+    else
+      ashrInPlace(Amt);
+    return *this;
+  }
+
+  inline bool operator<(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? ult(RHS) : slt(RHS);
+  }
+  inline bool operator>(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? ugt(RHS) : sgt(RHS);
+  }
+  inline bool operator<=(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? ule(RHS) : sle(RHS);
+  }
+  inline bool operator>=(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? uge(RHS) : sge(RHS);
+  }
+  inline bool operator==(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return eq(RHS);
+  }
+  inline bool operator!=(const APSInt& RHS) const {
+    return !((*this) == RHS);
+  }
+
+  bool operator==(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) == 0;
+  }
+  bool operator!=(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) != 0;
+  }
+  bool operator<=(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) <= 0;
+  }
+  bool operator>=(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) >= 0;
+  }
+  bool operator<(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) < 0;
+  }
+  bool operator>(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) > 0;
+  }
+
+  // The remaining operators just wrap the logic of APInt, but retain the
+  // signedness information.
+
+  APSInt operator<<(unsigned Bits) const {
+    return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
+  }
+  APSInt& operator<<=(unsigned Amt) {
+    static_cast<APInt&>(*this) <<= Amt;
+    return *this;
+  }
+
+  APSInt& operator++() {
+    ++(static_cast<APInt&>(*this));
+    return *this;
+  }
+  APSInt& operator--() {
+    --(static_cast<APInt&>(*this));
+    return *this;
+  }
+  APSInt operator++(int) {
+    return APSInt(++static_cast<APInt&>(*this), IsUnsigned);
+  }
+  APSInt operator--(int) {
+    return APSInt(--static_cast<APInt&>(*this), IsUnsigned);
+  }
+  APSInt operator-() const {
+    return APSInt(-static_cast<const APInt&>(*this), IsUnsigned);
+  }
+  APSInt& operator+=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) += RHS;
+    return *this;
+  }
+  APSInt& operator-=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) -= RHS;
+    return *this;
+  }
+  APSInt& operator*=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) *= RHS;
+    return *this;
+  }
+  APSInt& operator&=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) &= RHS;
+    return *this;
+  }
+  APSInt& operator|=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) |= RHS;
+    return *this;
+  }
+  APSInt& operator^=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) ^= RHS;
+    return *this;
+  }
+
+  APSInt operator&(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
+  }
+
+  APSInt operator|(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
+  }
+
+  APSInt operator^(const APSInt &RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
+  }
+
+  APSInt operator*(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned);
+  }
+  APSInt operator+(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned);
+  }
+  APSInt operator-(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned);
+  }
+  APSInt operator~() const {
+    return APSInt(~static_cast<const APInt&>(*this), IsUnsigned);
+  }
+
+  /// getMaxValue - Return the APSInt representing the maximum integer value
+  ///  with the given bit width and signedness.
+  static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
+    return APSInt(Unsigned ? APInt::getMaxValue(numBits)
+                           : APInt::getSignedMaxValue(numBits), Unsigned);
+  }
+
+  /// getMinValue - Return the APSInt representing the minimum integer value
+  ///  with the given bit width and signedness.
+  static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
+    return APSInt(Unsigned ? APInt::getMinValue(numBits)
+                           : APInt::getSignedMinValue(numBits), Unsigned);
+  }
+
+  /// \brief Determine if two APSInts have the same value, zero- or
+  /// sign-extending as needed.
+  static bool isSameValue(const APSInt &I1, const APSInt &I2) {
+    return !compareValues(I1, I2);
+  }
+
+  /// \brief Compare underlying values of two numbers.
+  static int compareValues(const APSInt &I1, const APSInt &I2) {
+    if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+      return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);
+
+    // Check for a bit-width mismatch.
+    if (I1.getBitWidth() > I2.getBitWidth())
+      return compareValues(I1, I2.extend(I1.getBitWidth()));
+    if (I2.getBitWidth() > I1.getBitWidth())
+      return compareValues(I1.extend(I2.getBitWidth()), I2);
+
+    // We have a signedness mismatch. Check for negative values and do an
+    // unsigned compare if both are positive.
+    if (I1.isSigned()) {
+      assert(!I2.isSigned() && "Expected signed mismatch");
+      if (I1.isNegative())
+        return -1;
+    } else {
+      assert(I2.isSigned() && "Expected signed mismatch");
+      if (I2.isNegative())
+        return 1;
+    }
+
+    return I1.compare(I2);
+  }
+
+  static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
+  static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); }
+
+  /// Profile - Used to insert APSInt objects, or objects that contain APSInt
+  ///  objects, into FoldingSets.
+  void Profile(FoldingSetNodeID& ID) const;
+};
+
+inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; }
+inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; }
+inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; }
+inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; }
+inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; }
+inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; }
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
+  I.print(OS, I.isSigned());
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/AllocatorList.h b/linux-x64/clang/include/llvm/ADT/AllocatorList.h
new file mode 100644
index 0000000..178c674
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/AllocatorList.h
@@ -0,0 +1,241 @@
+//===- llvm/ADT/AllocatorList.h - Custom allocator list ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ALLOCATORLIST_H
+#define LLVM_ADT_ALLOCATORLIST_H
+
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/Support/Allocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// A linked-list with a custom, local allocator.
+///
+/// Expose a std::list-like interface that owns and uses a custom LLVM-style
+/// allocator (e.g., BumpPtrAllocator), leveraging \a simple_ilist for the
+/// implementation details.
+///
+/// Because this list owns the allocator, calling \a splice() with a different
+/// list isn't generally safe.  As such, \a splice has been left out of the
+/// interface entirely.
+template <class T, class AllocatorT> class AllocatorList : AllocatorT {
+  struct Node : ilist_node<Node> {
+    Node(Node &&) = delete;
+    Node(const Node &) = delete;
+    Node &operator=(Node &&) = delete;
+    Node &operator=(const Node &) = delete;
+
+    Node(T &&V) : V(std::move(V)) {}
+    Node(const T &V) : V(V) {}
+    template <class... Ts> Node(Ts &&... Vs) : V(std::forward<Ts>(Vs)...) {}
+    T V;
+  };
+
+  using list_type = simple_ilist<Node>;
+
+  list_type List;
+
+  AllocatorT &getAlloc() { return *this; }
+  const AllocatorT &getAlloc() const { return *this; }
+
+  template <class... ArgTs> Node *create(ArgTs &&... Args) {
+    return new (getAlloc()) Node(std::forward<ArgTs>(Args)...);
+  }
+
+  struct Cloner {
+    AllocatorList &AL;
+
+    Cloner(AllocatorList &AL) : AL(AL) {}
+
+    Node *operator()(const Node &N) const { return AL.create(N.V); }
+  };
+
+  struct Disposer {
+    AllocatorList &AL;
+
+    Disposer(AllocatorList &AL) : AL(AL) {}
+
+    void operator()(Node *N) const {
+      N->~Node();
+      AL.getAlloc().Deallocate(N);
+    }
+  };
+
+public:
+  using value_type = T;
+  using pointer = T *;
+  using reference = T &;
+  using const_pointer = const T *;
+  using const_reference = const T &;
+  using size_type = typename list_type::size_type;
+  using difference_type = typename list_type::difference_type;
+
+private:
+  template <class ValueT, class IteratorBase>
+  class IteratorImpl
+      : public iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
+                                     IteratorBase,
+                                     std::bidirectional_iterator_tag, ValueT> {
+    template <class OtherValueT, class OtherIteratorBase>
+    friend class IteratorImpl;
+    friend AllocatorList;
+
+    using base_type =
+        iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>, IteratorBase,
+                              std::bidirectional_iterator_tag, ValueT>;
+
+  public:
+    using value_type = ValueT;
+    using pointer = ValueT *;
+    using reference = ValueT &;
+
+    IteratorImpl() = default;
+    IteratorImpl(const IteratorImpl &) = default;
+    IteratorImpl &operator=(const IteratorImpl &) = default;
+
+    explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}
+
+    template <class OtherValueT, class OtherIteratorBase>
+    IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X,
+                 typename std::enable_if<std::is_convertible<
+                     OtherIteratorBase, IteratorBase>::value>::type * = nullptr)
+        : base_type(X.wrapped()) {}
+
+    ~IteratorImpl() = default;
+
+    reference operator*() const { return base_type::wrapped()->V; }
+    pointer operator->() const { return &operator*(); }
+
+    friend bool operator==(const IteratorImpl &L, const IteratorImpl &R) {
+      return L.wrapped() == R.wrapped();
+    }
+    friend bool operator!=(const IteratorImpl &L, const IteratorImpl &R) {
+      return !(L == R);
+    }
+  };
+
+public:
+  using iterator = IteratorImpl<T, typename list_type::iterator>;
+  using reverse_iterator =
+      IteratorImpl<T, typename list_type::reverse_iterator>;
+  using const_iterator =
+      IteratorImpl<const T, typename list_type::const_iterator>;
+  using const_reverse_iterator =
+      IteratorImpl<const T, typename list_type::const_reverse_iterator>;
+
+  AllocatorList() = default;
+  AllocatorList(AllocatorList &&X)
+      : AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}
+
+  AllocatorList(const AllocatorList &X) {
+    List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+  }
+
+  AllocatorList &operator=(AllocatorList &&X) {
+    clear(); // Dispose of current nodes explicitly.
+    List = std::move(X.List);
+    getAlloc() = std::move(X.getAlloc());
+    return *this;
+  }
+
+  AllocatorList &operator=(const AllocatorList &X) {
+    List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+    return *this;
+  }
+
+  ~AllocatorList() { clear(); }
+
+  void swap(AllocatorList &RHS) {
+    List.swap(RHS.List);
+    std::swap(getAlloc(), RHS.getAlloc());
+  }
+
+  bool empty() { return List.empty(); }
+  size_t size() { return List.size(); }
+
+  iterator begin() { return iterator(List.begin()); }
+  iterator end() { return iterator(List.end()); }
+  const_iterator begin() const { return const_iterator(List.begin()); }
+  const_iterator end() const { return const_iterator(List.end()); }
+  reverse_iterator rbegin() { return reverse_iterator(List.rbegin()); }
+  reverse_iterator rend() { return reverse_iterator(List.rend()); }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(List.rbegin());
+  }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(List.rend());
+  }
+
+  T &back() { return List.back().V; }
+  T &front() { return List.front().V; }
+  const T &back() const { return List.back().V; }
+  const T &front() const { return List.front().V; }
+
+  template <class... Ts> iterator emplace(iterator I, Ts &&... Vs) {
+    return iterator(List.insert(I.wrapped(), *create(std::forward<Ts>(Vs)...)));
+  }
+
+  iterator insert(iterator I, T &&V) {
+    return iterator(List.insert(I.wrapped(), *create(std::move(V))));
+  }
+  iterator insert(iterator I, const T &V) {
+    return iterator(List.insert(I.wrapped(), *create(V)));
+  }
+
+  template <class Iterator>
+  void insert(iterator I, Iterator First, Iterator Last) {
+    for (; First != Last; ++First)
+      List.insert(I.wrapped(), *create(*First));
+  }
+
+  iterator erase(iterator I) {
+    return iterator(List.eraseAndDispose(I.wrapped(), Disposer(*this)));
+  }
+
+  iterator erase(iterator First, iterator Last) {
+    return iterator(
+        List.eraseAndDispose(First.wrapped(), Last.wrapped(), Disposer(*this)));
+  }
+
+  void clear() { List.clearAndDispose(Disposer(*this)); }
+  void pop_back() { List.eraseAndDispose(--List.end(), Disposer(*this)); }
+  void pop_front() { List.eraseAndDispose(List.begin(), Disposer(*this)); }
+  void push_back(T &&V) { insert(end(), std::move(V)); }
+  void push_front(T &&V) { insert(begin(), std::move(V)); }
+  void push_back(const T &V) { insert(end(), V); }
+  void push_front(const T &V) { insert(begin(), V); }
+  template <class... Ts> void emplace_back(Ts &&... Vs) {
+    emplace(end(), std::forward<Ts>(Vs)...);
+  }
+  template <class... Ts> void emplace_front(Ts &&... Vs) {
+    emplace(begin(), std::forward<Ts>(Vs)...);
+  }
+
+  /// Reset the underlying allocator.
+  ///
+  /// \pre \c empty()
+  void resetAlloc() {
+    assert(empty() && "Cannot reset allocator if not empty");
+    getAlloc().Reset();
+  }
+};
+
+template <class T> using BumpPtrList = AllocatorList<T, BumpPtrAllocator>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ALLOCATORLIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/ArrayRef.h b/linux-x64/clang/include/llvm/ADT/ArrayRef.h
new file mode 100644
index 0000000..5f7a769
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ArrayRef.h
@@ -0,0 +1,541 @@
+//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ARRAYREF_H
+#define LLVM_ADT_ARRAYREF_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+  /// ArrayRef - Represent a constant reference to an array (0 or more elements
+  /// consecutively in memory), i.e. a start pointer and a length.  It allows
+  /// various APIs to take consecutive elements easily and conveniently.
+  ///
+  /// This class does not own the underlying data, it is expected to be used in
+  /// situations where the data resides in some other buffer, whose lifetime
+  /// extends past that of the ArrayRef. For this reason, it is not in general
+  /// safe to store an ArrayRef.
+  ///
+  /// This is intended to be trivially copyable, so it should be passed by
+  /// value.
+  template<typename T>
+  class LLVM_NODISCARD ArrayRef {
+  public:
+    using iterator = const T *;
+    using const_iterator = const T *;
+    using size_type = size_t;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+  private:
+    /// The start of the array, in an external buffer.
+    const T *Data = nullptr;
+
+    /// The number of elements.
+    size_type Length = 0;
+
+  public:
+    /// @name Constructors
+    /// @{
+
+    /// Construct an empty ArrayRef.
+    /*implicit*/ ArrayRef() = default;
+
+    /// Construct an empty ArrayRef from None.
+    /*implicit*/ ArrayRef(NoneType) {}
+
+    /// Construct an ArrayRef from a single element.
+    /*implicit*/ ArrayRef(const T &OneElt)
+      : Data(&OneElt), Length(1) {}
+
+    /// Construct an ArrayRef from a pointer and length.
+    /*implicit*/ ArrayRef(const T *data, size_t length)
+      : Data(data), Length(length) {}
+
+    /// Construct an ArrayRef from a range.
+    ArrayRef(const T *begin, const T *end)
+      : Data(begin), Length(end - begin) {}
+
+    /// Construct an ArrayRef from a SmallVector. This is templated in order to
+    /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
+    /// copy-construct an ArrayRef.
+    template<typename U>
+    /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
+      : Data(Vec.data()), Length(Vec.size()) {
+    }
+
+    /// Construct an ArrayRef from a std::vector.
+    template<typename A>
+    /*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
+      : Data(Vec.data()), Length(Vec.size()) {}
+
+    /// Construct an ArrayRef from a std::array
+    template <size_t N>
+    /*implicit*/ constexpr ArrayRef(const std::array<T, N> &Arr)
+        : Data(Arr.data()), Length(N) {}
+
+    /// Construct an ArrayRef from a C array.
+    template <size_t N>
+    /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
+
+    /// Construct an ArrayRef from a std::initializer_list.
+    /*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
+    : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()),
+      Length(Vec.size()) {}
+
+    /// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
+    /// ensure that only ArrayRefs of pointers can be converted.
+    template <typename U>
+    ArrayRef(
+        const ArrayRef<U *> &A,
+        typename std::enable_if<
+           std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
+      : Data(A.data()), Length(A.size()) {}
+
+    /// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
+    /// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
+    /// whenever we copy-construct an ArrayRef.
+    template<typename U, typename DummyT>
+    /*implicit*/ ArrayRef(
+      const SmallVectorTemplateCommon<U *, DummyT> &Vec,
+      typename std::enable_if<
+          std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
+      : Data(Vec.data()), Length(Vec.size()) {
+    }
+
+    /// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
+    /// to ensure that only vectors of pointers can be converted.
+    template<typename U, typename A>
+    ArrayRef(const std::vector<U *, A> &Vec,
+             typename std::enable_if<
+                 std::is_convertible<U *const *, T const *>::value>::type* = 0)
+      : Data(Vec.data()), Length(Vec.size()) {}
+
+    /// @}
+    /// @name Simple Operations
+    /// @{
+
+    iterator begin() const { return Data; }
+    iterator end() const { return Data + Length; }
+
+    reverse_iterator rbegin() const { return reverse_iterator(end()); }
+    reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+    /// empty - Check if the array is empty.
+    bool empty() const { return Length == 0; }
+
+    const T *data() const { return Data; }
+
+    /// size - Get the array size.
+    size_t size() const { return Length; }
+
+    /// front - Get the first element.
+    const T &front() const {
+      assert(!empty());
+      return Data[0];
+    }
+
+    /// back - Get the last element.
+    const T &back() const {
+      assert(!empty());
+      return Data[Length-1];
+    }
+
+    // copy - Allocate copy in Allocator and return ArrayRef<T> to it.
+    template <typename Allocator> ArrayRef<T> copy(Allocator &A) {
+      T *Buff = A.template Allocate<T>(Length);
+      std::uninitialized_copy(begin(), end(), Buff);
+      return ArrayRef<T>(Buff, Length);
+    }
+
+    /// equals - Check for element-wise equality.
+    bool equals(ArrayRef RHS) const {
+      if (Length != RHS.Length)
+        return false;
+      return std::equal(begin(), end(), RHS.begin());
+    }
+
+    /// slice(n, m) - Chop off the first N elements of the array, and keep M
+    /// elements in the array.
+    ArrayRef<T> slice(size_t N, size_t M) const {
+      assert(N+M <= size() && "Invalid specifier");
+      return ArrayRef<T>(data()+N, M);
+    }
+
+    /// slice(n) - Chop off the first N elements of the array.
+    ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
+
+    /// \brief Drop the first \p N elements of the array.
+    ArrayRef<T> drop_front(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return slice(N, size() - N);
+    }
+
+    /// \brief Drop the last \p N elements of the array.
+    ArrayRef<T> drop_back(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return slice(0, size() - N);
+    }
+
+    /// \brief Return a copy of *this with the first N elements satisfying the
+    /// given predicate removed.
+    template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
+      return ArrayRef<T>(find_if_not(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with the first N elements not satisfying
+    /// the given predicate removed.
+    template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
+      return ArrayRef<T>(find_if(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with only the first \p N elements.
+    ArrayRef<T> take_front(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_back(size() - N);
+    }
+
+    /// \brief Return a copy of *this with only the last \p N elements.
+    ArrayRef<T> take_back(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_front(size() - N);
+    }
+
+    /// \brief Return the first N elements of this Array that satisfy the given
+    /// predicate.
+    template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
+      return ArrayRef<T>(begin(), find_if_not(*this, Pred));
+    }
+
+    /// \brief Return the first N elements of this Array that don't satisfy the
+    /// given predicate.
+    template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
+      return ArrayRef<T>(begin(), find_if(*this, Pred));
+    }
+
+    /// @}
+    /// @name Operator Overloads
+    /// @{
+    const T &operator[](size_t Index) const {
+      assert(Index < Length && "Invalid index!");
+      return Data[Index];
+    }
+
+    /// Disallow accidental assignment from a temporary.
+    ///
+    /// The declaration here is extra complicated so that "arrayRef = {}"
+    /// continues to select the move assignment operator.
+    template <typename U>
+    typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+    operator=(U &&Temporary) = delete;
+
+    /// Disallow accidental assignment from a temporary.
+    ///
+    /// The declaration here is extra complicated so that "arrayRef = {}"
+    /// continues to select the move assignment operator.
+    template <typename U>
+    typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+    operator=(std::initializer_list<U>) = delete;
+
+    /// @}
+    /// @name Expensive Operations
+    /// @{
+    std::vector<T> vec() const {
+      return std::vector<T>(Data, Data+Length);
+    }
+
+    /// @}
+    /// @name Conversion operators
+    /// @{
+    operator std::vector<T>() const {
+      return std::vector<T>(Data, Data+Length);
+    }
+
+    /// @}
+  };
+
+  /// MutableArrayRef - Represent a mutable reference to an array (0 or more
+  /// elements consecutively in memory), i.e. a start pointer and a length.  It
+  /// allows various APIs to take and modify consecutive elements easily and
+  /// conveniently.
+  ///
+  /// This class does not own the underlying data, it is expected to be used in
+  /// situations where the data resides in some other buffer, whose lifetime
+  /// extends past that of the MutableArrayRef. For this reason, it is not in
+  /// general safe to store a MutableArrayRef.
+  ///
+  /// This is intended to be trivially copyable, so it should be passed by
+  /// value.
+  template<typename T>
+  class LLVM_NODISCARD MutableArrayRef : public ArrayRef<T> {
+  public:
+    using iterator = T *;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    /// Construct an empty MutableArrayRef.
+    /*implicit*/ MutableArrayRef() = default;
+
+    /// Construct an empty MutableArrayRef from None.
+    /*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {}
+
+    /// Construct an MutableArrayRef from a single element.
+    /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
+
+    /// Construct an MutableArrayRef from a pointer and length.
+    /*implicit*/ MutableArrayRef(T *data, size_t length)
+      : ArrayRef<T>(data, length) {}
+
+    /// Construct an MutableArrayRef from a range.
+    MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
+
+    /// Construct an MutableArrayRef from a SmallVector.
+    /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
+    : ArrayRef<T>(Vec) {}
+
+    /// Construct a MutableArrayRef from a std::vector.
+    /*implicit*/ MutableArrayRef(std::vector<T> &Vec)
+    : ArrayRef<T>(Vec) {}
+
+    /// Construct an ArrayRef from a std::array
+    template <size_t N>
+    /*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr)
+        : ArrayRef<T>(Arr) {}
+
+    /// Construct an MutableArrayRef from a C array.
+    template <size_t N>
+    /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {}
+
+    T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
+
+    iterator begin() const { return data(); }
+    iterator end() const { return data() + this->size(); }
+
+    reverse_iterator rbegin() const { return reverse_iterator(end()); }
+    reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+    /// front - Get the first element.
+    T &front() const {
+      assert(!this->empty());
+      return data()[0];
+    }
+
+    /// back - Get the last element.
+    T &back() const {
+      assert(!this->empty());
+      return data()[this->size()-1];
+    }
+
+    /// slice(n, m) - Chop off the first N elements of the array, and keep M
+    /// elements in the array.
+    MutableArrayRef<T> slice(size_t N, size_t M) const {
+      assert(N + M <= this->size() && "Invalid specifier");
+      return MutableArrayRef<T>(this->data() + N, M);
+    }
+
+    /// slice(n) - Chop off the first N elements of the array.
+    MutableArrayRef<T> slice(size_t N) const {
+      return slice(N, this->size() - N);
+    }
+
+    /// \brief Drop the first \p N elements of the array.
+    MutableArrayRef<T> drop_front(size_t N = 1) const {
+      assert(this->size() >= N && "Dropping more elements than exist");
+      return slice(N, this->size() - N);
+    }
+
+    MutableArrayRef<T> drop_back(size_t N = 1) const {
+      assert(this->size() >= N && "Dropping more elements than exist");
+      return slice(0, this->size() - N);
+    }
+
+    /// \brief Return a copy of *this with the first N elements satisfying the
+    /// given predicate removed.
+    template <class PredicateT>
+    MutableArrayRef<T> drop_while(PredicateT Pred) const {
+      return MutableArrayRef<T>(find_if_not(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with the first N elements not satisfying
+    /// the given predicate removed.
+    template <class PredicateT>
+    MutableArrayRef<T> drop_until(PredicateT Pred) const {
+      return MutableArrayRef<T>(find_if(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with only the first \p N elements.
+    MutableArrayRef<T> take_front(size_t N = 1) const {
+      if (N >= this->size())
+        return *this;
+      return drop_back(this->size() - N);
+    }
+
+    /// \brief Return a copy of *this with only the last \p N elements.
+    MutableArrayRef<T> take_back(size_t N = 1) const {
+      if (N >= this->size())
+        return *this;
+      return drop_front(this->size() - N);
+    }
+
+    /// \brief Return the first N elements of this Array that satisfy the given
+    /// predicate.
+    template <class PredicateT>
+    MutableArrayRef<T> take_while(PredicateT Pred) const {
+      return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
+    }
+
+    /// \brief Return the first N elements of this Array that don't satisfy the
+    /// given predicate.
+    template <class PredicateT>
+    MutableArrayRef<T> take_until(PredicateT Pred) const {
+      return MutableArrayRef<T>(begin(), find_if(*this, Pred));
+    }
+
+    /// @}
+    /// @name Operator Overloads
+    /// @{
+    T &operator[](size_t Index) const {
+      assert(Index < this->size() && "Invalid index!");
+      return data()[Index];
+    }
+  };
+
+  /// This is a MutableArrayRef that owns its array.
+  template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
+  public:
+    OwningArrayRef() = default;
+    OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}
+
+    OwningArrayRef(ArrayRef<T> Data)
+        : MutableArrayRef<T>(new T[Data.size()], Data.size()) {
+      std::copy(Data.begin(), Data.end(), this->begin());
+    }
+
+    OwningArrayRef(OwningArrayRef &&Other) { *this = Other; }
+
+    OwningArrayRef &operator=(OwningArrayRef &&Other) {
+      delete[] this->data();
+      this->MutableArrayRef<T>::operator=(Other);
+      Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
+      return *this;
+    }
+
+    ~OwningArrayRef() { delete[] this->data(); }
+  };
+
+  /// @name ArrayRef Convenience constructors
+  /// @{
+
+  /// Construct an ArrayRef from a single element.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const T &OneElt) {
+    return OneElt;
+  }
+
+  /// Construct an ArrayRef from a pointer and length.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const T *data, size_t length) {
+    return ArrayRef<T>(data, length);
+  }
+
+  /// Construct an ArrayRef from a range.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
+    return ArrayRef<T>(begin, end);
+  }
+
+  /// Construct an ArrayRef from a SmallVector.
+  template <typename T>
+  ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from a SmallVector.
+  template <typename T, unsigned N>
+  ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from a std::vector.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from an ArrayRef (no-op) (const)
+  template <typename T> ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from an ArrayRef (no-op)
+  template <typename T> ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from a C array.
+  template<typename T, size_t N>
+  ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
+    return ArrayRef<T>(Arr);
+  }
+
+  /// Construct a MutableArrayRef from a single element.
+  template<typename T>
+  MutableArrayRef<T> makeMutableArrayRef(T &OneElt) {
+    return OneElt;
+  }
+
+  /// Construct a MutableArrayRef from a pointer and length.
+  template<typename T>
+  MutableArrayRef<T> makeMutableArrayRef(T *data, size_t length) {
+    return MutableArrayRef<T>(data, length);
+  }
+
+  /// @}
+  /// @name ArrayRef Comparison Operators
+  /// @{
+
+  template<typename T>
+  inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+    return LHS.equals(RHS);
+  }
+
+  template<typename T>
+  inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+    return !(LHS == RHS);
+  }
+
+  /// @}
+
+  // ArrayRefs can be treated like a POD type.
+  template <typename T> struct isPodLike;
+  template <typename T> struct isPodLike<ArrayRef<T>> {
+    static const bool value = true;
+  };
+
+  template <typename T> hash_code hash_value(ArrayRef<T> S) {
+    return hash_combine_range(S.begin(), S.end());
+  }
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ARRAYREF_H
diff --git a/linux-x64/clang/include/llvm/ADT/BitVector.h b/linux-x64/clang/include/llvm/ADT/BitVector.h
new file mode 100644
index 0000000..124c2a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/BitVector.h
@@ -0,0 +1,929 @@
+//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BitVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITVECTOR_H
+#define LLVM_ADT_BITVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <utility>
+
+namespace llvm {
+
+/// ForwardIterator for the bits that are set.
+/// Iterators get invalidated when resize / reserve is called.
+template <typename BitVectorT> class const_set_bits_iterator_impl {
+  const BitVectorT &Parent;
+  int Current = 0;
+
+  void advance() {
+    assert(Current != -1 && "Trying to advance past end.");
+    Current = Parent.find_next(Current);
+  }
+
+public:
+  const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
+      : Parent(Parent), Current(Current) {}
+  explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
+      : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
+  const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;
+
+  const_set_bits_iterator_impl operator++(int) {
+    auto Prev = *this;
+    advance();
+    return Prev;
+  }
+
+  const_set_bits_iterator_impl &operator++() {
+    advance();
+    return *this;
+  }
+
+  unsigned operator*() const { return Current; }
+
+  bool operator==(const const_set_bits_iterator_impl &Other) const {
+    assert(&Parent == &Other.Parent &&
+           "Comparing iterators from different BitVectors");
+    return Current == Other.Current;
+  }
+
+  bool operator!=(const const_set_bits_iterator_impl &Other) const {
+    assert(&Parent == &Other.Parent &&
+           "Comparing iterators from different BitVectors");
+    return Current != Other.Current;
+  }
+};
+
+class BitVector {
+  typedef unsigned long BitWord;
+
+  enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
+
+  static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
+                "Unsupported word size");
+
+  MutableArrayRef<BitWord> Bits; // Actual bits.
+  unsigned Size;                 // Size of bitvector in bits.
+
+public:
+  typedef unsigned size_type;
+  // Encapsulation of a single bit.
+  class reference {
+    friend class BitVector;
+
+    BitWord *WordRef;
+    unsigned BitPos;
+
+  public:
+    reference(BitVector &b, unsigned Idx) {
+      WordRef = &b.Bits[Idx / BITWORD_SIZE];
+      BitPos = Idx % BITWORD_SIZE;
+    }
+
+    reference() = delete;
+    reference(const reference&) = default;
+
+    reference &operator=(reference t) {
+      *this = bool(t);
+      return *this;
+    }
+
+    reference& operator=(bool t) {
+      if (t)
+        *WordRef |= BitWord(1) << BitPos;
+      else
+        *WordRef &= ~(BitWord(1) << BitPos);
+      return *this;
+    }
+
+    operator bool() const {
+      return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
+    }
+  };
+
+  typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
+  typedef const_set_bits_iterator set_iterator;
+
+  const_set_bits_iterator set_bits_begin() const {
+    return const_set_bits_iterator(*this);
+  }
+  const_set_bits_iterator set_bits_end() const {
+    return const_set_bits_iterator(*this, -1);
+  }
+  iterator_range<const_set_bits_iterator> set_bits() const {
+    return make_range(set_bits_begin(), set_bits_end());
+  }
+
+  /// BitVector default ctor - Creates an empty bitvector.
+  BitVector() : Size(0) {}
+
+  /// BitVector ctor - Creates a bitvector of specified number of bits. All
+  /// bits are initialized to the specified value.
+  explicit BitVector(unsigned s, bool t = false) : Size(s) {
+    size_t Capacity = NumBitWords(s);
+    Bits = allocate(Capacity);
+    init_words(Bits, t);
+    if (t)
+      clear_unused_bits();
+  }
+
+  /// BitVector copy ctor.
+  BitVector(const BitVector &RHS) : Size(RHS.size()) {
+    if (Size == 0) {
+      Bits = MutableArrayRef<BitWord>();
+      return;
+    }
+
+    size_t Capacity = NumBitWords(RHS.size());
+    Bits = allocate(Capacity);
+    std::memcpy(Bits.data(), RHS.Bits.data(), Capacity * sizeof(BitWord));
+  }
+
+  BitVector(BitVector &&RHS) : Bits(RHS.Bits), Size(RHS.Size) {
+    RHS.Bits = MutableArrayRef<BitWord>();
+    RHS.Size = 0;
+  }
+
+  ~BitVector() { std::free(Bits.data()); }
+
+  /// empty - Tests whether there are no bits in this bitvector.
+  bool empty() const { return Size == 0; }
+
+  /// size - Returns the number of bits in this bitvector.
+  size_type size() const { return Size; }
+
+  /// count - Returns the number of bits which are set.
+  size_type count() const {
+    unsigned NumBits = 0;
+    for (unsigned i = 0; i < NumBitWords(size()); ++i)
+      NumBits += countPopulation(Bits[i]);
+    return NumBits;
+  }
+
+  /// any - Returns true if any bit is set.
+  bool any() const {
+    for (unsigned i = 0; i < NumBitWords(size()); ++i)
+      if (Bits[i] != 0)
+        return true;
+    return false;
+  }
+
+  /// all - Returns true if all bits are set.
+  bool all() const {
+    for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
+      if (Bits[i] != ~0UL)
+        return false;
+
+    // If bits remain check that they are ones. The unused bits are always zero.
+    if (unsigned Remainder = Size % BITWORD_SIZE)
+      return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
+
+    return true;
+  }
+
+  /// none - Returns true if none of the bits are set.
+  bool none() const {
+    return !any();
+  }
+
+  /// find_first_in - Returns the index of the first set bit in the range
+  /// [Begin, End).  Returns -1 if all bits in the range are unset.
+  int find_first_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+
+    // Check subsequent words.
+    for (unsigned i = FirstWord; i <= LastWord; ++i) {
+      BitWord Copy = Bits[i];
+
+      if (i == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy &= maskTrailingZeros<BitWord>(FirstBit);
+      }
+
+      if (i == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
+      }
+      if (Copy != 0)
+        return i * BITWORD_SIZE + countTrailingZeros(Copy);
+    }
+    return -1;
+  }
+
+  /// find_last_in - Returns the index of the last set bit in the range
+  /// [Begin, End).  Returns -1 if all bits in the range are unset.
+  int find_last_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+
+    for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
+      unsigned CurrentWord = i - 1;
+
+      BitWord Copy = Bits[CurrentWord];
+      if (CurrentWord == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
+      }
+
+      if (CurrentWord == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy &= maskTrailingZeros<BitWord>(FirstBit);
+      }
+
+      if (Copy != 0)
+        return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
+    }
+
+    return -1;
+  }
+
+  /// find_first_unset_in - Returns the index of the first unset bit in the
+  /// range [Begin, End).  Returns -1 if all bits in the range are set.
+  int find_first_unset_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+
+    // Check subsequent words.
+    for (unsigned i = FirstWord; i <= LastWord; ++i) {
+      BitWord Copy = Bits[i];
+
+      if (i == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy |= maskTrailingOnes<BitWord>(FirstBit);
+      }
+
+      if (i == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
+      }
+      if (Copy != ~0UL) {
+        unsigned Result = i * BITWORD_SIZE + countTrailingOnes(Copy);
+        return Result < size() ? Result : -1;
+      }
+    }
+    return -1;
+  }
+
+  /// find_last_unset_in - Returns the index of the last unset bit in the
+  /// range [Begin, End).  Returns -1 if all bits in the range are set.
+  int find_last_unset_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+
+    for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
+      unsigned CurrentWord = i - 1;
+
+      BitWord Copy = Bits[CurrentWord];
+      if (CurrentWord == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
+      }
+
+      if (CurrentWord == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy |= maskTrailingOnes<BitWord>(FirstBit);
+      }
+
+      if (Copy != ~0UL) {
+        unsigned Result =
+            (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
+        return Result < Size ? Result : -1;
+      }
+    }
+    return -1;
+  }
+
+  /// find_first - Returns the index of the first set bit, -1 if none
+  /// of the bits are set.
+  int find_first() const { return find_first_in(0, Size); }
+
+  /// find_last - Returns the index of the last set bit, -1 if none of the bits
+  /// are set.
+  int find_last() const { return find_last_in(0, Size); }
+
+  /// find_next - Returns the index of the next set bit following the
+  /// "Prev" bit. Returns -1 if the next set bit is not found.
+  int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }
+
+  /// find_prev - Returns the index of the first set bit that precedes the
+  /// the bit at \p PriorTo.  Returns -1 if all previous bits are unset.
+  int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }
+
+  /// find_first_unset - Returns the index of the first unset bit, -1 if all
+  /// of the bits are set.
+  int find_first_unset() const { return find_first_unset_in(0, Size); }
+
+  /// find_next_unset - Returns the index of the next unset bit following the
+  /// "Prev" bit.  Returns -1 if all remaining bits are set.
+  int find_next_unset(unsigned Prev) const {
+    return find_first_unset_in(Prev + 1, Size);
+  }
+
+  /// find_last_unset - Returns the index of the last unset bit, -1 if all of
+  /// the bits are set.
+  int find_last_unset() const { return find_last_unset_in(0, Size); }
+
+  /// find_prev_unset - Returns the index of the first unset bit that precedes
+  /// the bit at \p PriorTo.  Returns -1 if all previous bits are set.
+  int find_prev_unset(unsigned PriorTo) {
+    return find_last_unset_in(0, PriorTo);
+  }
+
+  /// clear - Removes all bits from the bitvector. Does not change capacity.
+  void clear() {
+    Size = 0;
+  }
+
+  /// resize - Grow or shrink the bitvector.
+  void resize(unsigned N, bool t = false) {
+    if (N > getBitCapacity()) {
+      unsigned OldCapacity = Bits.size();
+      grow(N);
+      init_words(Bits.drop_front(OldCapacity), t);
+    }
+
+    // Set any old unused bits that are now included in the BitVector. This
+    // may set bits that are not included in the new vector, but we will clear
+    // them back out below.
+    if (N > Size)
+      set_unused_bits(t);
+
+    // Update the size, and clear out any bits that are now unused
+    unsigned OldSize = Size;
+    Size = N;
+    if (t || N < OldSize)
+      clear_unused_bits();
+  }
+
+  void reserve(unsigned N) {
+    if (N > getBitCapacity())
+      grow(N);
+  }
+
+  // Set, reset, flip
+  BitVector &set() {
+    init_words(Bits, true);
+    clear_unused_bits();
+    return *this;
+  }
+
+  BitVector &set(unsigned Idx) {
+    assert(Bits.data() && "Bits never allocated");
+    Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
+    return *this;
+  }
+
+  /// set - Efficiently set a range of bits in [I, E)
+  BitVector &set(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to set backwards range!");
+    assert(E <= size() && "Attempted to set out-of-bounds range!");
+
+    if (I == E) return *this;
+
+    if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+      BitWord EMask = 1UL << (E % BITWORD_SIZE);
+      BitWord IMask = 1UL << (I % BITWORD_SIZE);
+      BitWord Mask = EMask - IMask;
+      Bits[I / BITWORD_SIZE] |= Mask;
+      return *this;
+    }
+
+    BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+    Bits[I / BITWORD_SIZE] |= PrefixMask;
+    I = alignTo(I, BITWORD_SIZE);
+
+    for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+      Bits[I / BITWORD_SIZE] = ~0UL;
+
+    BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+    if (I < E)
+      Bits[I / BITWORD_SIZE] |= PostfixMask;
+
+    return *this;
+  }
+
+  BitVector &reset() {
+    init_words(Bits, false);
+    return *this;
+  }
+
+  BitVector &reset(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
+    return *this;
+  }
+
+  /// reset - Efficiently reset a range of bits in [I, E)
+  BitVector &reset(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to reset backwards range!");
+    assert(E <= size() && "Attempted to reset out-of-bounds range!");
+
+    if (I == E) return *this;
+
+    if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+      BitWord EMask = 1UL << (E % BITWORD_SIZE);
+      BitWord IMask = 1UL << (I % BITWORD_SIZE);
+      BitWord Mask = EMask - IMask;
+      Bits[I / BITWORD_SIZE] &= ~Mask;
+      return *this;
+    }
+
+    BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+    Bits[I / BITWORD_SIZE] &= ~PrefixMask;
+    I = alignTo(I, BITWORD_SIZE);
+
+    for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+      Bits[I / BITWORD_SIZE] = 0UL;
+
+    BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+    if (I < E)
+      Bits[I / BITWORD_SIZE] &= ~PostfixMask;
+
+    return *this;
+  }
+
+  BitVector &flip() {
+    for (unsigned i = 0; i < NumBitWords(size()); ++i)
+      Bits[i] = ~Bits[i];
+    clear_unused_bits();
+    return *this;
+  }
+
+  BitVector &flip(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
+    return *this;
+  }
+
+  // Indexing.
+  reference operator[](unsigned Idx) {
+    assert (Idx < Size && "Out-of-bounds Bit access.");
+    return reference(*this, Idx);
+  }
+
+  bool operator[](unsigned Idx) const {
+    assert (Idx < Size && "Out-of-bounds Bit access.");
+    BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
+    return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
+  }
+
+  bool test(unsigned Idx) const {
+    return (*this)[Idx];
+  }
+
+  /// Test if any common bits are set.
+  bool anyCommon(const BitVector &RHS) const {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
+      if (Bits[i] & RHS.Bits[i])
+        return true;
+    return false;
+  }
+
+  // Comparison operators.
+  bool operator==(const BitVector &RHS) const {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      if (Bits[i] != RHS.Bits[i])
+        return false;
+
+    // Verify that any extra words are all zeros.
+    if (i != ThisWords) {
+      for (; i != ThisWords; ++i)
+        if (Bits[i])
+          return false;
+    } else if (i != RHSWords) {
+      for (; i != RHSWords; ++i)
+        if (RHS.Bits[i])
+          return false;
+    }
+    return true;
+  }
+
+  bool operator!=(const BitVector &RHS) const {
+    return !(*this == RHS);
+  }
+
+  /// Intersection, union, disjoint union.
+  BitVector &operator&=(const BitVector &RHS) {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      Bits[i] &= RHS.Bits[i];
+
+    // Any bits that are just in this bitvector become zero, because they aren't
+    // in the RHS bit vector.  Any words only in RHS are ignored because they
+    // are already zero in the LHS.
+    for (; i != ThisWords; ++i)
+      Bits[i] = 0;
+
+    return *this;
+  }
+
+  /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+  BitVector &reset(const BitVector &RHS) {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      Bits[i] &= ~RHS.Bits[i];
+    return *this;
+  }
+
+  /// test - Check if (This - RHS) is zero.
+  /// This is the same as reset(RHS) and any().
+  bool test(const BitVector &RHS) const {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      if ((Bits[i] & ~RHS.Bits[i]) != 0)
+        return true;
+
+    for (; i != ThisWords ; ++i)
+      if (Bits[i] != 0)
+        return true;
+
+    return false;
+  }
+
+  BitVector &operator|=(const BitVector &RHS) {
+    if (size() < RHS.size())
+      resize(RHS.size());
+    for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
+      Bits[i] |= RHS.Bits[i];
+    return *this;
+  }
+
+  BitVector &operator^=(const BitVector &RHS) {
+    if (size() < RHS.size())
+      resize(RHS.size());
+    for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
+      Bits[i] ^= RHS.Bits[i];
+    return *this;
+  }
+
+  BitVector &operator>>=(unsigned N) {
+    assert(N <= Size);
+    if (LLVM_UNLIKELY(empty() || N == 0))
+      return *this;
+
+    unsigned NumWords = NumBitWords(Size);
+    assert(NumWords >= 1);
+
+    wordShr(N / BITWORD_SIZE);
+
+    unsigned BitDistance = N % BITWORD_SIZE;
+    if (BitDistance == 0)
+      return *this;
+
+    // When the shift size is not a multiple of the word size, then we have
+    // a tricky situation where each word in succession needs to extract some
+    // of the bits from the next word and or them into this word while
+    // shifting this word to make room for the new bits.  This has to be done
+    // for every word in the array.
+
+    // Since we're shifting each word right, some bits will fall off the end
+    // of each word to the right, and empty space will be created on the left.
+    // The final word in the array will lose bits permanently, so starting at
+    // the beginning, work forwards shifting each word to the right, and
+    // OR'ing in the bits from the end of the next word to the beginning of
+    // the current word.
+
+    // Example:
+    //   Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
+    //   by 4 bits.
+    // Step 1: Word[0] >>= 4           ; 0x0ABBCCDD
+    // Step 2: Word[0] |= 0x10000000   ; 0x1ABBCCDD
+    // Step 3: Word[1] >>= 4           ; 0x0EEFF001
+    // Step 4: Word[1] |= 0x50000000   ; 0x5EEFF001
+    // Step 5: Word[2] >>= 4           ; 0x02334455
+    // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
+    const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
+    const unsigned LSH = BITWORD_SIZE - BitDistance;
+
+    for (unsigned I = 0; I < NumWords - 1; ++I) {
+      Bits[I] >>= BitDistance;
+      Bits[I] |= (Bits[I + 1] & Mask) << LSH;
+    }
+
+    Bits[NumWords - 1] >>= BitDistance;
+
+    return *this;
+  }
+
+  BitVector &operator<<=(unsigned N) {
+    assert(N <= Size);
+    if (LLVM_UNLIKELY(empty() || N == 0))
+      return *this;
+
+    unsigned NumWords = NumBitWords(Size);
+    assert(NumWords >= 1);
+
+    wordShl(N / BITWORD_SIZE);
+
+    unsigned BitDistance = N % BITWORD_SIZE;
+    if (BitDistance == 0)
+      return *this;
+
+    // When the shift size is not a multiple of the word size, then we have
+    // a tricky situation where each word in succession needs to extract some
+    // of the bits from the previous word and or them into this word while
+    // shifting this word to make room for the new bits.  This has to be done
+    // for every word in the array.  This is similar to the algorithm outlined
+    // in operator>>=, but backwards.
+
+    // Since we're shifting each word left, some bits will fall off the end
+    // of each word to the left, and empty space will be created on the right.
+    // The first word in the array will lose bits permanently, so starting at
+    // the end, work backwards shifting each word to the left, and OR'ing
+    // in the bits from the end of the next word to the beginning of the
+    // current word.
+
+    // Example:
+    //   Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
+    //   by 4 bits.
+    // Step 1: Word[2] <<= 4           ; 0x23344550
+    // Step 2: Word[2] |= 0x0000000E   ; 0x2334455E
+    // Step 3: Word[1] <<= 4           ; 0xEFF00110
+    // Step 4: Word[1] |= 0x0000000A   ; 0xEFF0011A
+    // Step 5: Word[0] <<= 4           ; 0xABBCCDD0
+    // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
+    const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
+    const unsigned RSH = BITWORD_SIZE - BitDistance;
+
+    for (int I = NumWords - 1; I > 0; --I) {
+      Bits[I] <<= BitDistance;
+      Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
+    }
+    Bits[0] <<= BitDistance;
+    clear_unused_bits();
+
+    return *this;
+  }
+
+  // Assignment operator.
+  const BitVector &operator=(const BitVector &RHS) {
+    if (this == &RHS) return *this;
+
+    Size = RHS.size();
+    unsigned RHSWords = NumBitWords(Size);
+    if (Size <= getBitCapacity()) {
+      if (Size)
+        std::memcpy(Bits.data(), RHS.Bits.data(), RHSWords * sizeof(BitWord));
+      clear_unused_bits();
+      return *this;
+    }
+
+    // Grow the bitvector to have enough elements.
+    unsigned NewCapacity = RHSWords;
+    assert(NewCapacity > 0 && "negative capacity?");
+    auto NewBits = allocate(NewCapacity);
+    std::memcpy(NewBits.data(), RHS.Bits.data(), NewCapacity * sizeof(BitWord));
+
+    // Destroy the old bits.
+    std::free(Bits.data());
+    Bits = NewBits;
+
+    return *this;
+  }
+
+  const BitVector &operator=(BitVector &&RHS) {
+    if (this == &RHS) return *this;
+
+    std::free(Bits.data());
+    Bits = RHS.Bits;
+    Size = RHS.Size;
+
+    RHS.Bits = MutableArrayRef<BitWord>();
+    RHS.Size = 0;
+
+    return *this;
+  }
+
+  void swap(BitVector &RHS) {
+    std::swap(Bits, RHS.Bits);
+    std::swap(Size, RHS.Size);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Portable bit mask operations.
+  //===--------------------------------------------------------------------===//
+  //
+  // These methods all operate on arrays of uint32_t, each holding 32 bits. The
+  // fixed word size makes it easier to work with literal bit vector constants
+  // in portable code.
+  //
+  // The LSB in each word is the lowest numbered bit.  The size of a portable
+  // bit mask is always a whole multiple of 32 bits.  If no bit mask size is
+  // given, the bit mask is assumed to cover the entire BitVector.
+
+  /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+  /// This computes "*this |= Mask".
+  void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<true, false>(Mask, MaskWords);
+  }
+
+  /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+  /// Don't resize. This computes "*this &= ~Mask".
+  void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<false, false>(Mask, MaskWords);
+  }
+
+  /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+  /// Don't resize.  This computes "*this |= ~Mask".
+  void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<true, true>(Mask, MaskWords);
+  }
+
+  /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+  /// Don't resize.  This computes "*this &= Mask".
+  void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<false, true>(Mask, MaskWords);
+  }
+
+private:
+  /// \brief Perform a logical left shift of \p Count words by moving everything
+  /// \p Count words to the right in memory.
+  ///
+  /// While confusing, words are stored from least significant at Bits[0] to
+  /// most significant at Bits[NumWords-1].  A logical shift left, however,
+  /// moves the current least significant bit to a higher logical index, and
+  /// fills the previous least significant bits with 0.  Thus, we actually
+  /// need to move the bytes of the memory to the right, not to the left.
+  /// Example:
+  ///   Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
+  /// represents a BitVector where 0xBBBBAAAA contain the least significant
+  /// bits.  So if we want to shift the BitVector left by 2 words, we need to
+  /// turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
+  /// memmove which moves right, not left.
+  void wordShl(uint32_t Count) {
+    if (Count == 0)
+      return;
+
+    uint32_t NumWords = NumBitWords(Size);
+
+    auto Src = Bits.take_front(NumWords).drop_back(Count);
+    auto Dest = Bits.take_front(NumWords).drop_front(Count);
+
+    // Since we always move Word-sized chunks of data with src and dest both
+    // aligned to a word-boundary, we don't need to worry about endianness
+    // here.
+    std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
+    std::memset(Bits.data(), 0, Count * sizeof(BitWord));
+    clear_unused_bits();
+  }
+
+  /// \brief Perform a logical right shift of \p Count words by moving those
+  /// words to the left in memory.  See wordShl for more information.
+  ///
+  void wordShr(uint32_t Count) {
+    if (Count == 0)
+      return;
+
+    uint32_t NumWords = NumBitWords(Size);
+
+    auto Src = Bits.take_front(NumWords).drop_front(Count);
+    auto Dest = Bits.take_front(NumWords).drop_back(Count);
+    assert(Dest.size() == Src.size());
+
+    std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
+    std::memset(Dest.end(), 0, Count * sizeof(BitWord));
+  }
+
+  MutableArrayRef<BitWord> allocate(size_t NumWords) {
+    BitWord *RawBits = static_cast<BitWord *>(
+        safe_malloc(NumWords * sizeof(BitWord)));
+    return MutableArrayRef<BitWord>(RawBits, NumWords);
+  }
+
+  int next_unset_in_word(int WordIndex, BitWord Word) const {
+    unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
+    return Result < size() ? Result : -1;
+  }
+
+  unsigned NumBitWords(unsigned S) const {
+    return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
+  }
+
+  // Set the unused bits in the high words.
+  void set_unused_bits(bool t = true) {
+    //  Set high words first.
+    unsigned UsedWords = NumBitWords(Size);
+    if (Bits.size() > UsedWords)
+      init_words(Bits.drop_front(UsedWords), t);
+
+    //  Then set any stray high bits of the last used word.
+    unsigned ExtraBits = Size % BITWORD_SIZE;
+    if (ExtraBits) {
+      BitWord ExtraBitMask = ~0UL << ExtraBits;
+      if (t)
+        Bits[UsedWords-1] |= ExtraBitMask;
+      else
+        Bits[UsedWords-1] &= ~ExtraBitMask;
+    }
+  }
+
+  // Clear the unused bits in the high words.
+  void clear_unused_bits() {
+    set_unused_bits(false);
+  }
+
+  void grow(unsigned NewSize) {
+    size_t NewCapacity = std::max<size_t>(NumBitWords(NewSize), Bits.size() * 2);
+    assert(NewCapacity > 0 && "realloc-ing zero space");
+    BitWord *NewBits = static_cast<BitWord *>(
+        safe_realloc(Bits.data(), NewCapacity * sizeof(BitWord)));
+    Bits = MutableArrayRef<BitWord>(NewBits, NewCapacity);
+    clear_unused_bits();
+  }
+
+  void init_words(MutableArrayRef<BitWord> B, bool t) {
+    if (B.size() > 0)
+      memset(B.data(), 0 - (int)t, B.size() * sizeof(BitWord));
+  }
+
+  template<bool AddBits, bool InvertMask>
+  void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+    static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
+    MaskWords = std::min(MaskWords, (size() + 31) / 32);
+    const unsigned Scale = BITWORD_SIZE / 32;
+    unsigned i;
+    for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
+      BitWord BW = Bits[i];
+      // This inner loop should unroll completely when BITWORD_SIZE > 32.
+      for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
+        uint32_t M = *Mask++;
+        if (InvertMask) M = ~M;
+        if (AddBits) BW |=   BitWord(M) << b;
+        else         BW &= ~(BitWord(M) << b);
+      }
+      Bits[i] = BW;
+    }
+    for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
+      uint32_t M = *Mask++;
+      if (InvertMask) M = ~M;
+      if (AddBits) Bits[i] |=   BitWord(M) << b;
+      else         Bits[i] &= ~(BitWord(M) << b);
+    }
+    if (AddBits)
+      clear_unused_bits();
+  }
+
+public:
+  /// Return the size (in bytes) of the bit vector.
+  size_t getMemorySize() const { return Bits.size() * sizeof(BitWord); }
+  size_t getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
+};
+
+inline size_t capacity_in_bytes(const BitVector &X) {
+  return X.getMemorySize();
+}
+
+} // end namespace llvm
+
+namespace std {
+  /// Implement std::swap in terms of BitVector swap.
+  inline void
+  swap(llvm::BitVector &LHS, llvm::BitVector &RHS) {
+    LHS.swap(RHS);
+  }
+} // end namespace std
+
+#endif // LLVM_ADT_BITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h b/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
new file mode 100644
index 0000000..18c6ba5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
@@ -0,0 +1,153 @@
+//===-- llvm/ADT/BitmaskEnum.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITMASKENUM_H
+#define LLVM_ADT_BITMASKENUM_H
+
+#include <cassert>
+#include <type_traits>
+#include <utility>
+
+#include "llvm/Support/MathExtras.h"
+
+/// LLVM_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you can
+/// perform bitwise operations on it without putting static_cast everywhere.
+///
+/// \code
+///   enum MyEnum {
+///     E1 = 1, E2 = 2, E3 = 4, E4 = 8,
+///     LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
+///   };
+///
+///   void Foo() {
+///     MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
+///   }
+/// \endcode
+///
+/// Normally when you do a bitwise operation on an enum value, you get back an
+/// instance of the underlying type (e.g. int).  But using this macro, bitwise
+/// ops on your enum will return you back instances of the enum.  This is
+/// particularly useful for enums which represent a combination of flags.
+///
+/// The parameter to LLVM_MARK_AS_BITMASK_ENUM should be the largest individual
+/// value in your enum.
+///
+/// All of the enum's values must be non-negative.
+#define LLVM_MARK_AS_BITMASK_ENUM(LargestValue)                                \
+  LLVM_BITMASK_LARGEST_ENUMERATOR = LargestValue
+
+/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() pulls the operator overloads used
+/// by LLVM_MARK_AS_BITMASK_ENUM into the current namespace.
+///
+/// Suppose you have an enum foo::bar::MyEnum.  Before using
+/// LLVM_MARK_AS_BITMASK_ENUM on MyEnum, you must put
+/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() somewhere inside namespace foo or
+/// namespace foo::bar.  This allows the relevant operator overloads to be found
+/// by ADL.
+///
+/// You don't need to use this macro in namespace llvm; it's done at the bottom
+/// of this file.
+#define LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()                               \
+  using ::llvm::BitmaskEnumDetail::operator~;                                  \
+  using ::llvm::BitmaskEnumDetail::operator|;                                  \
+  using ::llvm::BitmaskEnumDetail::operator&;                                  \
+  using ::llvm::BitmaskEnumDetail::operator^;                                  \
+  using ::llvm::BitmaskEnumDetail::operator|=;                                 \
+  using ::llvm::BitmaskEnumDetail::operator&=;                                 \
+  /* Force a semicolon at the end of this macro. */                            \
+  using ::llvm::BitmaskEnumDetail::operator^=
+
+namespace llvm {
+
+/// Traits class to determine whether an enum has a
+/// LLVM_BITMASK_LARGEST_ENUMERATOR enumerator.
+template <typename E, typename Enable = void>
+struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+struct is_bitmask_enum<
+    E, typename std::enable_if<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >=
+                               0>::type> : std::true_type {};
+namespace BitmaskEnumDetail {
+
+/// Get a bitmask with 1s in all places up to the high-order bit of E's largest
+/// value.
+template <typename E> typename std::underlying_type<E>::type Mask() {
+  // On overflow, NextPowerOf2 returns zero with the type uint64_t, so
+  // subtracting 1 gives us the mask with all bits set, like we want.
+  return NextPowerOf2(static_cast<typename std::underlying_type<E>::type>(
+             E::LLVM_BITMASK_LARGEST_ENUMERATOR)) -
+         1;
+}
+
+/// Check that Val is in range for E, and return Val cast to E's underlying
+/// type.
+template <typename E> typename std::underlying_type<E>::type Underlying(E Val) {
+  auto U = static_cast<typename std::underlying_type<E>::type>(Val);
+  assert(U >= 0 && "Negative enum values are not allowed.");
+  assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)");
+  return U;
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator~(E Val) {
+  return static_cast<E>(~Underlying(Val) & Mask<E>());
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator|(E LHS, E RHS) {
+  return static_cast<E>(Underlying(LHS) | Underlying(RHS));
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator&(E LHS, E RHS) {
+  return static_cast<E>(Underlying(LHS) & Underlying(RHS));
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator^(E LHS, E RHS) {
+  return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
+}
+
+// |=, &=, and ^= return a reference to LHS, to match the behavior of the
+// operators on builtin types.
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E &operator|=(E &LHS, E RHS) {
+  LHS = LHS | RHS;
+  return LHS;
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E &operator&=(E &LHS, E RHS) {
+  LHS = LHS & RHS;
+  return LHS;
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E &operator^=(E &LHS, E RHS) {
+  LHS = LHS ^ RHS;
+  return LHS;
+}
+
+} // namespace BitmaskEnumDetail
+
+// Enable bitmask enums in namespace ::llvm and all nested namespaces.
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/BreadthFirstIterator.h b/linux-x64/clang/include/llvm/ADT/BreadthFirstIterator.h
new file mode 100644
index 0000000..6bc63c2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/BreadthFirstIterator.h
@@ -0,0 +1,163 @@
+//===- llvm/ADT/BreadthFirstIterator.h - Breadth First iterator -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build a generic breadth
+// first graph iterator.  This file exposes the following functions/types:
+//
+// bf_begin/bf_end/bf_iterator
+//   * Normal breadth-first iteration - visit a graph level-by-level.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BREADTHFIRSTITERATOR_H
+#define LLVM_ADT_BREADTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <queue>
+#include <utility>
+
+namespace llvm {
+
+// bf_iterator_storage - A private class which is used to figure out where to
+// store the visited set. We only provide a non-external variant for now.
+template <class SetType> class bf_iterator_storage {
+public:
+  SetType Visited;
+};
+
+// The visited state for the iteration is a simple set.
+template <typename NodeRef, unsigned SmallSize = 8>
+using bf_iterator_default_set = SmallPtrSet<NodeRef, SmallSize>;
+
+// Generic Breadth first search iterator.
+template <class GraphT,
+          class SetType =
+              bf_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+          class GT = GraphTraits<GraphT>>
+class bf_iterator
+    : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+      public bf_iterator_storage<SetType> {
+  using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+
+  // First element is the node reference, second is the next child to visit.
+  using QueueElement = std::pair<NodeRef, Optional<ChildItTy>>;
+
+  // Visit queue - used to maintain BFS ordering.
+  // Optional<> because we need markers for levels.
+  std::queue<Optional<QueueElement>> VisitQueue;
+
+  // Current level.
+  unsigned Level;
+
+private:
+  inline bf_iterator(NodeRef Node) {
+    this->Visited.insert(Node);
+    Level = 0;
+
+    // Also, insert a dummy node as marker.
+    VisitQueue.push(QueueElement(Node, None));
+    VisitQueue.push(None);
+  }
+
+  inline bf_iterator() = default;
+
+  inline void toNext() {
+    Optional<QueueElement> Head = VisitQueue.front();
+    QueueElement H = Head.getValue();
+    NodeRef Node = H.first;
+    Optional<ChildItTy> &ChildIt = H.second;
+
+    if (!ChildIt)
+      ChildIt.emplace(GT::child_begin(Node));
+    while (*ChildIt != GT::child_end(Node)) {
+      NodeRef Next = *(*ChildIt)++;
+
+      // Already visited?
+      if (this->Visited.insert(Next).second)
+        VisitQueue.push(QueueElement(Next, None));
+    }
+    VisitQueue.pop();
+
+    // Go to the next element skipping markers if needed.
+    if (!VisitQueue.empty()) {
+      Head = VisitQueue.front();
+      if (Head != None)
+        return;
+      Level += 1;
+      VisitQueue.pop();
+
+      // Don't push another marker if this is the last
+      // element.
+      if (!VisitQueue.empty())
+        VisitQueue.push(None);
+    }
+  }
+
+public:
+  using pointer = typename super::pointer;
+
+  // Provide static begin and end methods as our public "constructors"
+  static bf_iterator begin(const GraphT &G) {
+    return bf_iterator(GT::getEntryNode(G));
+  }
+
+  static bf_iterator end(const GraphT &G) { return bf_iterator(); }
+
+  bool operator==(const bf_iterator &RHS) const {
+    return VisitQueue == RHS.VisitQueue;
+  }
+
+  bool operator!=(const bf_iterator &RHS) const { return !(*this == RHS); }
+
+  const NodeRef &operator*() const { return VisitQueue.front()->first; }
+
+  // This is a nonstandard operator-> that dereferenfces the pointer an extra
+  // time so that you can actually call methods on the node, because the
+  // contained type is a pointer.
+  NodeRef operator->() const { return **this; }
+
+  bf_iterator &operator++() { // Pre-increment
+    toNext();
+    return *this;
+  }
+
+  bf_iterator operator++(int) { // Post-increment
+    bf_iterator ItCopy = *this;
+    ++*this;
+    return ItCopy;
+  }
+
+  unsigned getLevel() const { return Level; }
+};
+
+// Provide global constructors that automatically figure out correct types.
+template <class T> bf_iterator<T> bf_begin(const T &G) {
+  return bf_iterator<T>::begin(G);
+}
+
+template <class T> bf_iterator<T> bf_end(const T &G) {
+  return bf_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T> iterator_range<bf_iterator<T>> breadth_first(const T &G) {
+  return make_range(bf_begin(G), bf_end(G));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_BREADTHFIRSTITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/CachedHashString.h b/linux-x64/clang/include/llvm/ADT/CachedHashString.h
new file mode 100644
index 0000000..a56a621
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/CachedHashString.h
@@ -0,0 +1,184 @@
+//===- llvm/ADT/CachedHashString.h - Prehashed string/StringRef -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CachedHashString and CachedHashStringRef.  These are owning
+// and not-owning string types that store their hash in addition to their string
+// data.
+//
+// Unlike std::string, CachedHashString can be used in DenseSet/DenseMap
+// (because, unlike std::string, CachedHashString lets us have empty and
+// tombstone values).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_CACHED_HASH_STRING_H
+#define LLVM_ADT_CACHED_HASH_STRING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// A container which contains a StringRef plus a precomputed hash.
+class CachedHashStringRef {
+  const char *P;
+  uint32_t Size;
+  uint32_t Hash;
+
+public:
+  // Explicit because hashing a string isn't free.
+  explicit CachedHashStringRef(StringRef S)
+      : CachedHashStringRef(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+  CachedHashStringRef(StringRef S, uint32_t Hash)
+      : P(S.data()), Size(S.size()), Hash(Hash) {
+    assert(S.size() <= std::numeric_limits<uint32_t>::max());
+  }
+
+  StringRef val() const { return StringRef(P, Size); }
+  uint32_t size() const { return Size; }
+  uint32_t hash() const { return Hash; }
+};
+
+template <> struct DenseMapInfo<CachedHashStringRef> {
+  static CachedHashStringRef getEmptyKey() {
+    return CachedHashStringRef(DenseMapInfo<StringRef>::getEmptyKey(), 0);
+  }
+  static CachedHashStringRef getTombstoneKey() {
+    return CachedHashStringRef(DenseMapInfo<StringRef>::getTombstoneKey(), 1);
+  }
+  static unsigned getHashValue(const CachedHashStringRef &S) {
+    assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+    assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+    return S.hash();
+  }
+  static bool isEqual(const CachedHashStringRef &LHS,
+                      const CachedHashStringRef &RHS) {
+    return LHS.hash() == RHS.hash() &&
+           DenseMapInfo<StringRef>::isEqual(LHS.val(), RHS.val());
+  }
+};
+
+/// A container which contains a string, which it owns, plus a precomputed hash.
+///
+/// We do not null-terminate the string.
+class CachedHashString {
+  friend struct DenseMapInfo<CachedHashString>;
+
+  char *P;
+  uint32_t Size;
+  uint32_t Hash;
+
+  static char *getEmptyKeyPtr() { return DenseMapInfo<char *>::getEmptyKey(); }
+  static char *getTombstoneKeyPtr() {
+    return DenseMapInfo<char *>::getTombstoneKey();
+  }
+
+  bool isEmptyOrTombstone() const {
+    return P == getEmptyKeyPtr() || P == getTombstoneKeyPtr();
+  }
+
+  struct ConstructEmptyOrTombstoneTy {};
+
+  CachedHashString(ConstructEmptyOrTombstoneTy, char *EmptyOrTombstonePtr)
+      : P(EmptyOrTombstonePtr), Size(0), Hash(0) {
+    assert(isEmptyOrTombstone());
+  }
+
+  // TODO: Use small-string optimization to avoid allocating.
+
+public:
+  explicit CachedHashString(const char *S) : CachedHashString(StringRef(S)) {}
+
+  // Explicit because copying and hashing a string isn't free.
+  explicit CachedHashString(StringRef S)
+      : CachedHashString(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+  CachedHashString(StringRef S, uint32_t Hash)
+      : P(new char[S.size()]), Size(S.size()), Hash(Hash) {
+    memcpy(P, S.data(), S.size());
+  }
+
+  // Ideally this class would not be copyable.  But SetVector requires copyable
+  // keys, and we want this to be usable there.
+  CachedHashString(const CachedHashString &Other)
+      : Size(Other.Size), Hash(Other.Hash) {
+    if (Other.isEmptyOrTombstone()) {
+      P = Other.P;
+    } else {
+      P = new char[Size];
+      memcpy(P, Other.P, Size);
+    }
+  }
+
+  CachedHashString &operator=(CachedHashString Other) {
+    swap(*this, Other);
+    return *this;
+  }
+
+  CachedHashString(CachedHashString &&Other) noexcept
+      : P(Other.P), Size(Other.Size), Hash(Other.Hash) {
+    Other.P = getEmptyKeyPtr();
+  }
+
+  ~CachedHashString() {
+    if (!isEmptyOrTombstone())
+      delete[] P;
+  }
+
+  StringRef val() const { return StringRef(P, Size); }
+  uint32_t size() const { return Size; }
+  uint32_t hash() const { return Hash; }
+
+  operator StringRef() const { return val(); }
+  operator CachedHashStringRef() const {
+    return CachedHashStringRef(val(), Hash);
+  }
+
+  friend void swap(CachedHashString &LHS, CachedHashString &RHS) {
+    using std::swap;
+    swap(LHS.P, RHS.P);
+    swap(LHS.Size, RHS.Size);
+    swap(LHS.Hash, RHS.Hash);
+  }
+};
+
+template <> struct DenseMapInfo<CachedHashString> {
+  static CachedHashString getEmptyKey() {
+    return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+                            CachedHashString::getEmptyKeyPtr());
+  }
+  static CachedHashString getTombstoneKey() {
+    return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+                            CachedHashString::getTombstoneKeyPtr());
+  }
+  static unsigned getHashValue(const CachedHashString &S) {
+    assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+    assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+    return S.hash();
+  }
+  static bool isEqual(const CachedHashString &LHS,
+                      const CachedHashString &RHS) {
+    if (LHS.hash() != RHS.hash())
+      return false;
+    if (LHS.P == CachedHashString::getEmptyKeyPtr())
+      return RHS.P == CachedHashString::getEmptyKeyPtr();
+    if (LHS.P == CachedHashString::getTombstoneKeyPtr())
+      return RHS.P == CachedHashString::getTombstoneKeyPtr();
+
+    // This is safe because if RHS.P is the empty or tombstone key, it will have
+    // length 0, so we'll never dereference its pointer.
+    return LHS.val() == RHS.val();
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h b/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 0000000..41fdd43
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,79 @@
+//===- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+///   P(S) => P(S union pred(S))
+///
+/// The minization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+  virtual void anchor();
+
+public:
+  using change_ty = unsigned;
+  using edge_ty = std::pair<change_ty, change_ty>;
+
+  // FIXME: Use a decent data structure.
+  using changeset_ty = std::set<change_ty>;
+  using changesetlist_ty = std::vector<changeset_ty>;
+
+public:
+  virtual ~DAGDeltaAlgorithm() = default;
+
+  /// Run - Minimize the DAG formed by the \p Changes vertices and the
+  /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
+  /// changes and returning the smallest set which still satisfies the test
+  /// predicate and the input \p Dependencies.
+  ///
+  /// \param Changes The list of changes.
+  ///
+  /// \param Dependencies The list of dependencies amongst changes. For each
+  /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
+  /// minimization algorithm guarantees that for each tested changed set S,
+  /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
+  /// dependencies.
+  changeset_ty Run(const changeset_ty &Changes,
+                   const std::vector<edge_ty> &Dependencies);
+
+  /// UpdatedSearchState - Callback used when the search state changes.
+  virtual void UpdatedSearchState(const changeset_ty &Changes,
+                                  const changesetlist_ty &Sets,
+                                  const changeset_ty &Required) {}
+
+  /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+  virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DAGDELTAALGORITHM_H
diff --git a/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h b/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
new file mode 100644
index 0000000..6becb2a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
@@ -0,0 +1,93 @@
+//===- DeltaAlgorithm.h - A Set Minimization Algorithm ---------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DELTAALGORITHM_H
+#define LLVM_ADT_DELTAALGORITHM_H
+
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+/// DeltaAlgorithm - Implements the delta debugging algorithm (A. Zeller '99)
+/// for minimizing arbitrary sets using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element would falsify the predicate.
+///
+/// For best results the predicate function *should* (but need not) satisfy
+/// certain properties, in particular:
+///  (1) The predicate should return false on an empty set and true on the full
+///  set.
+///  (2) If the predicate returns true for a set of changes, it should return
+///  true for all supersets of that set.
+///
+/// It is not an error to provide a predicate that does not satisfy these
+/// requirements, and the algorithm will generally produce reasonable
+/// results. However, it may run substantially more tests than with a good
+/// predicate.
+class DeltaAlgorithm {
+public:
+  using change_ty = unsigned;
+  // FIXME: Use a decent data structure.
+  using changeset_ty = std::set<change_ty>;
+  using changesetlist_ty = std::vector<changeset_ty>;
+
+private:
+  /// Cache of failed test results. Successful test results are never cached
+  /// since we always reduce following a success.
+  std::set<changeset_ty> FailedTestsCache;
+
+  /// GetTestResult - Get the test result for the \p Changes from the
+  /// cache, executing the test if necessary.
+  ///
+  /// \param Changes - The change set to test.
+  /// \return - The test result.
+  bool GetTestResult(const changeset_ty &Changes);
+
+  /// Split - Partition a set of changes \p S into one or two subsets.
+  void Split(const changeset_ty &S, changesetlist_ty &Res);
+
+  /// Delta - Minimize a set of \p Changes which has been partioned into
+  /// smaller sets, by attempting to remove individual subsets.
+  changeset_ty Delta(const changeset_ty &Changes,
+                     const changesetlist_ty &Sets);
+
+  /// Search - Search for a subset (or subsets) in \p Sets which can be
+  /// removed from \p Changes while still satisfying the predicate.
+  ///
+  /// \param Res - On success, a subset of Changes which satisfies the
+  /// predicate.
+  /// \return - True on success.
+  bool Search(const changeset_ty &Changes, const changesetlist_ty &Sets,
+              changeset_ty &Res);
+
+protected:
+  /// UpdatedSearchState - Callback used when the search state changes.
+  virtual void UpdatedSearchState(const changeset_ty &Changes,
+                                  const changesetlist_ty &Sets) {}
+
+  /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+  virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+
+  DeltaAlgorithm& operator=(const DeltaAlgorithm&) = default;
+
+public:
+  virtual ~DeltaAlgorithm();
+
+  /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
+  /// subsets of changes and returning the smallest set which still satisfies
+  /// the test predicate.
+  changeset_ty Run(const changeset_ty &Changes);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DELTAALGORITHM_H
diff --git a/linux-x64/clang/include/llvm/ADT/DenseMap.h b/linux-x64/clang/include/llvm/ADT/DenseMap.h
new file mode 100644
index 0000000..ba60b79
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DenseMap.h
@@ -0,0 +1,1224 @@
+//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAP_H
+#define LLVM_ADT_DENSEMAP_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ReverseIteration.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair : public std::pair<KeyT, ValueT> {
+  KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
+  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
+  ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
+  const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
+};
+
+} // end namespace detail
+
+template <
+    typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
+    typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
+class DenseMapIterator;
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+          typename BucketT>
+class DenseMapBase : public DebugEpochBase {
+  template <typename T>
+  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+  using size_type = unsigned;
+  using key_type = KeyT;
+  using mapped_type = ValueT;
+  using value_type = BucketT;
+
+  using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
+  using const_iterator =
+      DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
+
+  inline iterator begin() {
+    // When the map is empty, avoid the overhead of advancing/retreating past
+    // empty buckets.
+    if (empty())
+      return end();
+    if (shouldReverseIterate<KeyT>())
+      return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
+    return makeIterator(getBuckets(), getBucketsEnd(), *this);
+  }
+  inline iterator end() {
+    return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+  }
+  inline const_iterator begin() const {
+    if (empty())
+      return end();
+    if (shouldReverseIterate<KeyT>())
+      return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
+    return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
+  }
+  inline const_iterator end() const {
+    return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+  }
+
+  LLVM_NODISCARD bool empty() const {
+    return getNumEntries() == 0;
+  }
+  unsigned size() const { return getNumEntries(); }
+
+  /// Grow the densemap so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_type NumEntries) {
+    auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+    incrementEpoch();
+    if (NumBuckets > getNumBuckets())
+      grow(NumBuckets);
+  }
+
+  void clear() {
+    incrementEpoch();
+    if (getNumEntries() == 0 && getNumTombstones() == 0) return;
+
+    // If the capacity of the array is huge, and the # elements used is small,
+    // shrink the array.
+    if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
+      shrink_and_clear();
+      return;
+    }
+
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) {
+      // Use a simpler loop when these are trivial types.
+      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+        P->getFirst() = EmptyKey;
+    } else {
+      unsigned NumEntries = getNumEntries();
+      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+          if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+            P->getSecond().~ValueT();
+            --NumEntries;
+          }
+          P->getFirst() = EmptyKey;
+        }
+      }
+      assert(NumEntries == 0 && "Node count imbalance!");
+    }
+    setNumEntries(0);
+    setNumTombstones(0);
+  }
+
+  /// Return 1 if the specified key is in the map, 0 otherwise.
+  size_type count(const_arg_type_t<KeyT> Val) const {
+    const BucketT *TheBucket;
+    return LookupBucketFor(Val, TheBucket) ? 1 : 0;
+  }
+
+  iterator find(const_arg_type_t<KeyT> Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+  const_iterator find(const_arg_type_t<KeyT> Val) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+
+  /// Alternate version of find() which allows a different, and possibly
+  /// less expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+  /// type used.
+  template<class LookupKeyT>
+  iterator find_as(const LookupKeyT &Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+  template<class LookupKeyT>
+  const_iterator find_as(const LookupKeyT &Val) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+
+  /// lookup - Return the entry for the specified key, or a default
+  /// constructed value if no such entry exists.
+  ValueT lookup(const_arg_type_t<KeyT> Val) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return TheBucket->getSecond();
+    return ValueT();
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+    return try_emplace(KV.first, KV.second);
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+    return try_emplace(std::move(KV.first), std::move(KV.second));
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // The value is constructed in-place if the key is not in the map, otherwise
+  // it is not moved.
+  template <typename... Ts>
+  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return std::make_pair(
+               makeIterator(TheBucket, getBucketsEnd(), *this, true),
+               false); // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket =
+        InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
+    return std::make_pair(
+             makeIterator(TheBucket, getBucketsEnd(), *this, true),
+             true);
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // The value is constructed in-place if the key is not in the map, otherwise
+  // it is not moved.
+  template <typename... Ts>
+  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return std::make_pair(
+               makeIterator(TheBucket, getBucketsEnd(), *this, true),
+               false); // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
+    return std::make_pair(
+             makeIterator(TheBucket, getBucketsEnd(), *this, true),
+             true);
+  }
+
+  /// Alternate version of insert() which allows a different, and possibly
+  /// less expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+  /// type used.
+  template <typename LookupKeyT>
+  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
+                                      const LookupKeyT &Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return std::make_pair(
+               makeIterator(TheBucket, getBucketsEnd(), *this, true),
+               false); // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
+                                           std::move(KV.second), Val);
+    return std::make_pair(
+             makeIterator(TheBucket, getBucketsEnd(), *this, true),
+             true);
+  }
+
+  /// insert - Range insertion of pairs.
+  template<typename InputIt>
+  void insert(InputIt I, InputIt E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  bool erase(const KeyT &Val) {
+    BucketT *TheBucket;
+    if (!LookupBucketFor(Val, TheBucket))
+      return false; // not in map.
+
+    TheBucket->getSecond().~ValueT();
+    TheBucket->getFirst() = getTombstoneKey();
+    decrementNumEntries();
+    incrementNumTombstones();
+    return true;
+  }
+  void erase(iterator I) {
+    BucketT *TheBucket = &*I;
+    TheBucket->getSecond().~ValueT();
+    TheBucket->getFirst() = getTombstoneKey();
+    decrementNumEntries();
+    incrementNumTombstones();
+  }
+
+  value_type& FindAndConstruct(const KeyT &Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return *TheBucket;
+
+    return *InsertIntoBucket(TheBucket, Key);
+  }
+
+  ValueT &operator[](const KeyT &Key) {
+    return FindAndConstruct(Key).second;
+  }
+
+  value_type& FindAndConstruct(KeyT &&Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return *TheBucket;
+
+    return *InsertIntoBucket(TheBucket, std::move(Key));
+  }
+
+  ValueT &operator[](KeyT &&Key) {
+    return FindAndConstruct(std::move(Key)).second;
+  }
+
+  /// isPointerIntoBucketsArray - Return true if the specified pointer points
+  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
+  /// value in the DenseMap).
+  bool isPointerIntoBucketsArray(const void *Ptr) const {
+    return Ptr >= getBuckets() && Ptr < getBucketsEnd();
+  }
+
+  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+  /// array.  In conjunction with the previous method, this can be used to
+  /// determine whether an insertion caused the DenseMap to reallocate.
+  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
+
+protected:
+  DenseMapBase() = default;
+
+  void destroyAll() {
+    if (getNumBuckets() == 0) // Nothing to do.
+      return;
+
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+      if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+        P->getSecond().~ValueT();
+      P->getFirst().~KeyT();
+    }
+  }
+
+  void initEmpty() {
+    setNumEntries(0);
+    setNumTombstones(0);
+
+    assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+           "# initial buckets must be a power of two!");
+    const KeyT EmptyKey = getEmptyKey();
+    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+      ::new (&B->getFirst()) KeyT(EmptyKey);
+  }
+
+  /// Returns the number of buckets to allocate to ensure that the DenseMap can
+  /// accommodate \p NumEntries without need to grow().
+  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+    // Ensure that "NumEntries * 4 < NumBuckets * 3"
+    if (NumEntries == 0)
+      return 0;
+    // +1 is required because of the strict equality.
+    // For example if NumEntries is 48, we need to return 401.
+    return NextPowerOf2(NumEntries * 4 / 3 + 1);
+  }
+
+  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+    initEmpty();
+
+    // Insert all the old elements.
+    const KeyT EmptyKey = getEmptyKey();
+    const KeyT TombstoneKey = getTombstoneKey();
+    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+      if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+        // Insert the key/value into the new table.
+        BucketT *DestBucket;
+        bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+        (void)FoundVal; // silence warning.
+        assert(!FoundVal && "Key already in new map?");
+        DestBucket->getFirst() = std::move(B->getFirst());
+        ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
+        incrementNumEntries();
+
+        // Free the value.
+        B->getSecond().~ValueT();
+      }
+      B->getFirst().~KeyT();
+    }
+  }
+
+  template <typename OtherBaseT>
+  void copyFrom(
+      const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+    assert(&other != this);
+    assert(getNumBuckets() == other.getNumBuckets());
+
+    setNumEntries(other.getNumEntries());
+    setNumTombstones(other.getNumTombstones());
+
+    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
+      memcpy(getBuckets(), other.getBuckets(),
+             getNumBuckets() * sizeof(BucketT));
+    else
+      for (size_t i = 0; i < getNumBuckets(); ++i) {
+        ::new (&getBuckets()[i].getFirst())
+            KeyT(other.getBuckets()[i].getFirst());
+        if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+            !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+          ::new (&getBuckets()[i].getSecond())
+              ValueT(other.getBuckets()[i].getSecond());
+      }
+  }
+
+  static unsigned getHashValue(const KeyT &Val) {
+    return KeyInfoT::getHashValue(Val);
+  }
+
+  template<typename LookupKeyT>
+  static unsigned getHashValue(const LookupKeyT &Val) {
+    return KeyInfoT::getHashValue(Val);
+  }
+
+  static const KeyT getEmptyKey() {
+    static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+    return KeyInfoT::getEmptyKey();
+  }
+
+  static const KeyT getTombstoneKey() {
+    return KeyInfoT::getTombstoneKey();
+  }
+
+private:
+  iterator makeIterator(BucketT *P, BucketT *E,
+                        DebugEpochBase &Epoch,
+                        bool NoAdvance=false) {
+    if (shouldReverseIterate<KeyT>()) {
+      BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
+      return iterator(B, E, Epoch, NoAdvance);
+    }
+    return iterator(P, E, Epoch, NoAdvance);
+  }
+
+  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
+                                   const DebugEpochBase &Epoch,
+                                   const bool NoAdvance=false) const {
+    if (shouldReverseIterate<KeyT>()) {
+      const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
+      return const_iterator(B, E, Epoch, NoAdvance);
+    }
+    return const_iterator(P, E, Epoch, NoAdvance);
+  }
+
+  unsigned getNumEntries() const {
+    return static_cast<const DerivedT *>(this)->getNumEntries();
+  }
+
+  void setNumEntries(unsigned Num) {
+    static_cast<DerivedT *>(this)->setNumEntries(Num);
+  }
+
+  void incrementNumEntries() {
+    setNumEntries(getNumEntries() + 1);
+  }
+
+  void decrementNumEntries() {
+    setNumEntries(getNumEntries() - 1);
+  }
+
+  unsigned getNumTombstones() const {
+    return static_cast<const DerivedT *>(this)->getNumTombstones();
+  }
+
+  void setNumTombstones(unsigned Num) {
+    static_cast<DerivedT *>(this)->setNumTombstones(Num);
+  }
+
+  void incrementNumTombstones() {
+    setNumTombstones(getNumTombstones() + 1);
+  }
+
+  void decrementNumTombstones() {
+    setNumTombstones(getNumTombstones() - 1);
+  }
+
+  const BucketT *getBuckets() const {
+    return static_cast<const DerivedT *>(this)->getBuckets();
+  }
+
+  BucketT *getBuckets() {
+    return static_cast<DerivedT *>(this)->getBuckets();
+  }
+
+  unsigned getNumBuckets() const {
+    return static_cast<const DerivedT *>(this)->getNumBuckets();
+  }
+
+  BucketT *getBucketsEnd() {
+    return getBuckets() + getNumBuckets();
+  }
+
+  const BucketT *getBucketsEnd() const {
+    return getBuckets() + getNumBuckets();
+  }
+
+  void grow(unsigned AtLeast) {
+    static_cast<DerivedT *>(this)->grow(AtLeast);
+  }
+
+  void shrink_and_clear() {
+    static_cast<DerivedT *>(this)->shrink_and_clear();
+  }
+
+  template <typename KeyArg, typename... ValueArgs>
+  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+                            ValueArgs &&... Values) {
+    TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
+
+    TheBucket->getFirst() = std::forward<KeyArg>(Key);
+    ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
+    return TheBucket;
+  }
+
+  template <typename LookupKeyT>
+  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+                                      ValueT &&Value, LookupKeyT &Lookup) {
+    TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
+
+    TheBucket->getFirst() = std::move(Key);
+    ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
+    return TheBucket;
+  }
+
+  template <typename LookupKeyT>
+  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+                                BucketT *TheBucket) {
+    incrementEpoch();
+
+    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+    // the buckets are empty (meaning that many are filled with tombstones),
+    // grow the table.
+    //
+    // The later case is tricky.  For example, if we had one empty bucket with
+    // tons of tombstones, failing lookups (e.g. for insertion) would have to
+    // probe almost the entire table until it found the empty bucket.  If the
+    // table completely filled with tombstones, no lookup would ever succeed,
+    // causing infinite loops in lookup.
+    unsigned NewNumEntries = getNumEntries() + 1;
+    unsigned NumBuckets = getNumBuckets();
+    if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+      this->grow(NumBuckets * 2);
+      LookupBucketFor(Lookup, TheBucket);
+      NumBuckets = getNumBuckets();
+    } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
+                             NumBuckets/8)) {
+      this->grow(NumBuckets);
+      LookupBucketFor(Lookup, TheBucket);
+    }
+    assert(TheBucket);
+
+    // Only update the state after we've grown our bucket space appropriately
+    // so that when growing buckets we have self-consistent entry count.
+    incrementNumEntries();
+
+    // If we are writing over a tombstone, remember this.
+    const KeyT EmptyKey = getEmptyKey();
+    if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+      decrementNumTombstones();
+
+    return TheBucket;
+  }
+
+  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+  /// FoundBucket.  If the bucket contains the key and a value, this returns
+  /// true, otherwise it returns a bucket with an empty marker or tombstone and
+  /// returns false.
+  template<typename LookupKeyT>
+  bool LookupBucketFor(const LookupKeyT &Val,
+                       const BucketT *&FoundBucket) const {
+    const BucketT *BucketsPtr = getBuckets();
+    const unsigned NumBuckets = getNumBuckets();
+
+    if (NumBuckets == 0) {
+      FoundBucket = nullptr;
+      return false;
+    }
+
+    // FoundTombstone - Keep track of whether we find a tombstone while probing.
+    const BucketT *FoundTombstone = nullptr;
+    const KeyT EmptyKey = getEmptyKey();
+    const KeyT TombstoneKey = getTombstoneKey();
+    assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
+           !KeyInfoT::isEqual(Val, TombstoneKey) &&
+           "Empty/Tombstone value shouldn't be inserted into map!");
+
+    unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+    unsigned ProbeAmt = 1;
+    while (true) {
+      const BucketT *ThisBucket = BucketsPtr + BucketNo;
+      // Found Val's bucket?  If so, return it.
+      if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+        FoundBucket = ThisBucket;
+        return true;
+      }
+
+      // If we found an empty bucket, the key doesn't exist in the set.
+      // Insert it and return the default value.
+      if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+        // If we've already seen a tombstone while probing, fill it in instead
+        // of the empty bucket we eventually probed to.
+        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+        return false;
+      }
+
+      // If this is a tombstone, remember it.  If Val ends up not in the map, we
+      // prefer to return it than something that would require more probing.
+      if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+          !FoundTombstone)
+        FoundTombstone = ThisBucket;  // Remember the first tombstone found.
+
+      // Otherwise, it's a hash collision or a tombstone, continue quadratic
+      // probing.
+      BucketNo += ProbeAmt++;
+      BucketNo &= (NumBuckets-1);
+    }
+  }
+
+  template <typename LookupKeyT>
+  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+    const BucketT *ConstFoundBucket;
+    bool Result = const_cast<const DenseMapBase *>(this)
+      ->LookupBucketFor(Val, ConstFoundBucket);
+    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+    return Result;
+  }
+
+public:
+  /// Return the approximate size (in bytes) of the actual map.
+  /// This is just the raw memory used by DenseMap.
+  /// If entries are pointers to objects, the size of the referenced objects
+  /// are not included.
+  size_t getMemorySize() const {
+    return getNumBuckets() * sizeof(BucketT);
+  }
+};
+
+template <typename KeyT, typename ValueT,
+          typename KeyInfoT = DenseMapInfo<KeyT>,
+          typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+                                     KeyT, ValueT, KeyInfoT, BucketT> {
+  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  // Lift some types from the dependent base class into this class for
+  // simplicity of referring to them.
+  using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  BucketT *Buckets;
+  unsigned NumEntries;
+  unsigned NumTombstones;
+  unsigned NumBuckets;
+
+public:
+  /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
+  /// this number of elements can be inserted in the map without grow()
+  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
+
+  DenseMap(const DenseMap &other) : BaseT() {
+    init(0);
+    copyFrom(other);
+  }
+
+  DenseMap(DenseMap &&other) : BaseT() {
+    init(0);
+    swap(other);
+  }
+
+  template<typename InputIt>
+  DenseMap(const InputIt &I, const InputIt &E) {
+    init(std::distance(I, E));
+    this->insert(I, E);
+  }
+
+  ~DenseMap() {
+    this->destroyAll();
+    operator delete(Buckets);
+  }
+
+  void swap(DenseMap& RHS) {
+    this->incrementEpoch();
+    RHS.incrementEpoch();
+    std::swap(Buckets, RHS.Buckets);
+    std::swap(NumEntries, RHS.NumEntries);
+    std::swap(NumTombstones, RHS.NumTombstones);
+    std::swap(NumBuckets, RHS.NumBuckets);
+  }
+
+  DenseMap& operator=(const DenseMap& other) {
+    if (&other != this)
+      copyFrom(other);
+    return *this;
+  }
+
+  DenseMap& operator=(DenseMap &&other) {
+    this->destroyAll();
+    operator delete(Buckets);
+    init(0);
+    swap(other);
+    return *this;
+  }
+
+  void copyFrom(const DenseMap& other) {
+    this->destroyAll();
+    operator delete(Buckets);
+    if (allocateBuckets(other.NumBuckets)) {
+      this->BaseT::copyFrom(other);
+    } else {
+      NumEntries = 0;
+      NumTombstones = 0;
+    }
+  }
+
+  void init(unsigned InitNumEntries) {
+    auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+    if (allocateBuckets(InitBuckets)) {
+      this->BaseT::initEmpty();
+    } else {
+      NumEntries = 0;
+      NumTombstones = 0;
+    }
+  }
+
+  void grow(unsigned AtLeast) {
+    unsigned OldNumBuckets = NumBuckets;
+    BucketT *OldBuckets = Buckets;
+
+    allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
+    assert(Buckets);
+    if (!OldBuckets) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
+
+    // Free the old table.
+    operator delete(OldBuckets);
+  }
+
+  void shrink_and_clear() {
+    unsigned OldNumEntries = NumEntries;
+    this->destroyAll();
+
+    // Reduce the number of buckets.
+    unsigned NewNumBuckets = 0;
+    if (OldNumEntries)
+      NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+    if (NewNumBuckets == NumBuckets) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    operator delete(Buckets);
+    init(NewNumBuckets);
+  }
+
+private:
+  unsigned getNumEntries() const {
+    return NumEntries;
+  }
+
+  void setNumEntries(unsigned Num) {
+    NumEntries = Num;
+  }
+
+  unsigned getNumTombstones() const {
+    return NumTombstones;
+  }
+
+  void setNumTombstones(unsigned Num) {
+    NumTombstones = Num;
+  }
+
+  BucketT *getBuckets() const {
+    return Buckets;
+  }
+
+  unsigned getNumBuckets() const {
+    return NumBuckets;
+  }
+
+  bool allocateBuckets(unsigned Num) {
+    NumBuckets = Num;
+    if (NumBuckets == 0) {
+      Buckets = nullptr;
+      return false;
+    }
+
+    Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+    return true;
+  }
+};
+
+template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
+          typename KeyInfoT = DenseMapInfo<KeyT>,
+          typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class SmallDenseMap
+    : public DenseMapBase<
+          SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
+          ValueT, KeyInfoT, BucketT> {
+  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  // Lift some types from the dependent base class into this class for
+  // simplicity of referring to them.
+  using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  static_assert(isPowerOf2_64(InlineBuckets),
+                "InlineBuckets must be a power of 2.");
+
+  unsigned Small : 1;
+  unsigned NumEntries : 31;
+  unsigned NumTombstones;
+
+  struct LargeRep {
+    BucketT *Buckets;
+    unsigned NumBuckets;
+  };
+
+  /// A "union" of an inline bucket array and the struct representing
+  /// a large bucket. This union will be discriminated by the 'Small' bit.
+  AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
+
+public:
+  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+    init(NumInitBuckets);
+  }
+
+  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
+    init(0);
+    copyFrom(other);
+  }
+
+  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
+    init(0);
+    swap(other);
+  }
+
+  template<typename InputIt>
+  SmallDenseMap(const InputIt &I, const InputIt &E) {
+    init(NextPowerOf2(std::distance(I, E)));
+    this->insert(I, E);
+  }
+
+  ~SmallDenseMap() {
+    this->destroyAll();
+    deallocateBuckets();
+  }
+
+  void swap(SmallDenseMap& RHS) {
+    unsigned TmpNumEntries = RHS.NumEntries;
+    RHS.NumEntries = NumEntries;
+    NumEntries = TmpNumEntries;
+    std::swap(NumTombstones, RHS.NumTombstones);
+
+    const KeyT EmptyKey = this->getEmptyKey();
+    const KeyT TombstoneKey = this->getTombstoneKey();
+    if (Small && RHS.Small) {
+      // If we're swapping inline bucket arrays, we have to cope with some of
+      // the tricky bits of DenseMap's storage system: the buckets are not
+      // fully initialized. Thus we swap every key, but we may have
+      // a one-directional move of the value.
+      for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+        BucketT *LHSB = &getInlineBuckets()[i],
+                *RHSB = &RHS.getInlineBuckets()[i];
+        bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
+                            !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
+        bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
+                            !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
+        if (hasLHSValue && hasRHSValue) {
+          // Swap together if we can...
+          std::swap(*LHSB, *RHSB);
+          continue;
+        }
+        // Swap separately and handle any assymetry.
+        std::swap(LHSB->getFirst(), RHSB->getFirst());
+        if (hasLHSValue) {
+          ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
+          LHSB->getSecond().~ValueT();
+        } else if (hasRHSValue) {
+          ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
+          RHSB->getSecond().~ValueT();
+        }
+      }
+      return;
+    }
+    if (!Small && !RHS.Small) {
+      std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+      std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+      return;
+    }
+
+    SmallDenseMap &SmallSide = Small ? *this : RHS;
+    SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+    // First stash the large side's rep and move the small side across.
+    LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
+    LargeSide.getLargeRep()->~LargeRep();
+    LargeSide.Small = true;
+    // This is similar to the standard move-from-old-buckets, but the bucket
+    // count hasn't actually rotated in this case. So we have to carefully
+    // move construct the keys and values into their new locations, but there
+    // is no need to re-hash things.
+    for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+      BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+              *OldB = &SmallSide.getInlineBuckets()[i];
+      ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
+      OldB->getFirst().~KeyT();
+      if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
+        ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
+        OldB->getSecond().~ValueT();
+      }
+    }
+
+    // The hard part of moving the small buckets across is done, just move
+    // the TmpRep into its new home.
+    SmallSide.Small = false;
+    new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
+  }
+
+  SmallDenseMap& operator=(const SmallDenseMap& other) {
+    if (&other != this)
+      copyFrom(other);
+    return *this;
+  }
+
+  SmallDenseMap& operator=(SmallDenseMap &&other) {
+    this->destroyAll();
+    deallocateBuckets();
+    init(0);
+    swap(other);
+    return *this;
+  }
+
+  void copyFrom(const SmallDenseMap& other) {
+    this->destroyAll();
+    deallocateBuckets();
+    Small = true;
+    if (other.getNumBuckets() > InlineBuckets) {
+      Small = false;
+      new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
+    }
+    this->BaseT::copyFrom(other);
+  }
+
+  void init(unsigned InitBuckets) {
+    Small = true;
+    if (InitBuckets > InlineBuckets) {
+      Small = false;
+      new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+    }
+    this->BaseT::initEmpty();
+  }
+
+  void grow(unsigned AtLeast) {
+    if (AtLeast >= InlineBuckets)
+      AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
+
+    if (Small) {
+      if (AtLeast < InlineBuckets)
+        return; // Nothing to do.
+
+      // First move the inline buckets into a temporary storage.
+      AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+      BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+      BucketT *TmpEnd = TmpBegin;
+
+      // Loop over the buckets, moving non-empty, non-tombstones into the
+      // temporary storage. Have the loop move the TmpEnd forward as it goes.
+      const KeyT EmptyKey = this->getEmptyKey();
+      const KeyT TombstoneKey = this->getTombstoneKey();
+      for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+            !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+          assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+                 "Too many inline buckets!");
+          ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
+          ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
+          ++TmpEnd;
+          P->getSecond().~ValueT();
+        }
+        P->getFirst().~KeyT();
+      }
+
+      // Now make this map use the large rep, and move all the entries back
+      // into it.
+      Small = false;
+      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+      this->moveFromOldBuckets(TmpBegin, TmpEnd);
+      return;
+    }
+
+    LargeRep OldRep = std::move(*getLargeRep());
+    getLargeRep()->~LargeRep();
+    if (AtLeast <= InlineBuckets) {
+      Small = true;
+    } else {
+      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+    }
+
+    this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
+    // Free the old table.
+    operator delete(OldRep.Buckets);
+  }
+
+  void shrink_and_clear() {
+    unsigned OldSize = this->size();
+    this->destroyAll();
+
+    // Reduce the number of buckets.
+    unsigned NewNumBuckets = 0;
+    if (OldSize) {
+      NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+      if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
+        NewNumBuckets = 64;
+    }
+    if ((Small && NewNumBuckets <= InlineBuckets) ||
+        (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    deallocateBuckets();
+    init(NewNumBuckets);
+  }
+
+private:
+  unsigned getNumEntries() const {
+    return NumEntries;
+  }
+
+  void setNumEntries(unsigned Num) {
+    // NumEntries is hardcoded to be 31 bits wide.
+    assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
+    NumEntries = Num;
+  }
+
+  unsigned getNumTombstones() const {
+    return NumTombstones;
+  }
+
+  void setNumTombstones(unsigned Num) {
+    NumTombstones = Num;
+  }
+
+  const BucketT *getInlineBuckets() const {
+    assert(Small);
+    // Note that this cast does not violate aliasing rules as we assert that
+    // the memory's dynamic type is the small, inline bucket buffer, and the
+    // 'storage.buffer' static type is 'char *'.
+    return reinterpret_cast<const BucketT *>(storage.buffer);
+  }
+
+  BucketT *getInlineBuckets() {
+    return const_cast<BucketT *>(
+      const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+  }
+
+  const LargeRep *getLargeRep() const {
+    assert(!Small);
+    // Note, same rule about aliasing as with getInlineBuckets.
+    return reinterpret_cast<const LargeRep *>(storage.buffer);
+  }
+
+  LargeRep *getLargeRep() {
+    return const_cast<LargeRep *>(
+      const_cast<const SmallDenseMap *>(this)->getLargeRep());
+  }
+
+  const BucketT *getBuckets() const {
+    return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+  }
+
+  BucketT *getBuckets() {
+    return const_cast<BucketT *>(
+      const_cast<const SmallDenseMap *>(this)->getBuckets());
+  }
+
+  unsigned getNumBuckets() const {
+    return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+  }
+
+  void deallocateBuckets() {
+    if (Small)
+      return;
+
+    operator delete(getLargeRep()->Buckets);
+    getLargeRep()->~LargeRep();
+  }
+
+  LargeRep allocateBuckets(unsigned Num) {
+    assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+    LargeRep Rep = {
+      static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
+    };
+    return Rep;
+  }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
+          bool IsConst>
+class DenseMapIterator : DebugEpochBase::HandleBase {
+  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
+
+  using ConstIterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+
+public:
+  using difference_type = ptrdiff_t;
+  using value_type =
+      typename std::conditional<IsConst, const Bucket, Bucket>::type;
+  using pointer = value_type *;
+  using reference = value_type &;
+  using iterator_category = std::forward_iterator_tag;
+
+private:
+  pointer Ptr = nullptr;
+  pointer End = nullptr;
+
+public:
+  DenseMapIterator() = default;
+
+  DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
+                   bool NoAdvance = false)
+      : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
+    assert(isHandleInSync() && "invalid construction!");
+
+    if (NoAdvance) return;
+    if (shouldReverseIterate<KeyT>()) {
+      RetreatPastEmptyBuckets();
+      return;
+    }
+    AdvancePastEmptyBuckets();
+  }
+
+  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
+  // for const iterator destinations so it doesn't end up as a user defined copy
+  // constructor.
+  template <bool IsConstSrc,
+            typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
+  DenseMapIterator(
+      const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
+      : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
+
+  reference operator*() const {
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate<KeyT>())
+      return Ptr[-1];
+    return *Ptr;
+  }
+  pointer operator->() const {
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate<KeyT>())
+      return &(Ptr[-1]);
+    return Ptr;
+  }
+
+  bool operator==(const ConstIterator &RHS) const {
+    assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+    assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+    assert(getEpochAddress() == RHS.getEpochAddress() &&
+           "comparing incomparable iterators!");
+    return Ptr == RHS.Ptr;
+  }
+  bool operator!=(const ConstIterator &RHS) const {
+    assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+    assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+    assert(getEpochAddress() == RHS.getEpochAddress() &&
+           "comparing incomparable iterators!");
+    return Ptr != RHS.Ptr;
+  }
+
+  inline DenseMapIterator& operator++() {  // Preincrement
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate<KeyT>()) {
+      --Ptr;
+      RetreatPastEmptyBuckets();
+      return *this;
+    }
+    ++Ptr;
+    AdvancePastEmptyBuckets();
+    return *this;
+  }
+  DenseMapIterator operator++(int) {  // Postincrement
+    assert(isHandleInSync() && "invalid iterator access!");
+    DenseMapIterator tmp = *this; ++*this; return tmp;
+  }
+
+private:
+  void AdvancePastEmptyBuckets() {
+    assert(Ptr <= End);
+    const KeyT Empty = KeyInfoT::getEmptyKey();
+    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+    while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
+                          KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
+      ++Ptr;
+  }
+
+  void RetreatPastEmptyBuckets() {
+    assert(Ptr >= End);
+    const KeyT Empty = KeyInfoT::getEmptyKey();
+    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+    while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
+                          KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
+      --Ptr;
+  }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT>
+inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
+  return X.getMemorySize();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSEMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h b/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
new file mode 100644
index 0000000..a96904c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
@@ -0,0 +1,267 @@
+//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DenseMapInfo traits for DenseMap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAPINFO_H
+#define LLVM_ADT_DENSEMAPINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+template<typename T>
+struct DenseMapInfo {
+  //static inline T getEmptyKey();
+  //static inline T getTombstoneKey();
+  //static unsigned getHashValue(const T &Val);
+  //static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers.
+template<typename T>
+struct DenseMapInfo<T*> {
+  static inline T* getEmptyKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+    return reinterpret_cast<T*>(Val);
+  }
+
+  static inline T* getTombstoneKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-2);
+    Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+    return reinterpret_cast<T*>(Val);
+  }
+
+  static unsigned getHashValue(const T *PtrVal) {
+    return (unsigned((uintptr_t)PtrVal) >> 4) ^
+           (unsigned((uintptr_t)PtrVal) >> 9);
+  }
+
+  static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for chars.
+template<> struct DenseMapInfo<char> {
+  static inline char getEmptyKey() { return ~0; }
+  static inline char getTombstoneKey() { return ~0 - 1; }
+  static unsigned getHashValue(const char& Val) { return Val * 37U; }
+
+  static bool isEqual(const char &LHS, const char &RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned shorts.
+template <> struct DenseMapInfo<unsigned short> {
+  static inline unsigned short getEmptyKey() { return 0xFFFF; }
+  static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; }
+  static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; }
+
+  static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template<> struct DenseMapInfo<unsigned> {
+  static inline unsigned getEmptyKey() { return ~0U; }
+  static inline unsigned getTombstoneKey() { return ~0U - 1; }
+  static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
+
+  static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template<> struct DenseMapInfo<unsigned long> {
+  static inline unsigned long getEmptyKey() { return ~0UL; }
+  static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
+
+  static unsigned getHashValue(const unsigned long& Val) {
+    return (unsigned)(Val * 37UL);
+  }
+
+  static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template<> struct DenseMapInfo<unsigned long long> {
+  static inline unsigned long long getEmptyKey() { return ~0ULL; }
+  static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+
+  static unsigned getHashValue(const unsigned long long& Val) {
+    return (unsigned)(Val * 37ULL);
+  }
+
+  static bool isEqual(const unsigned long long& LHS,
+                      const unsigned long long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for shorts.
+template <> struct DenseMapInfo<short> {
+  static inline short getEmptyKey() { return 0x7FFF; }
+  static inline short getTombstoneKey() { return -0x7FFF - 1; }
+  static unsigned getHashValue(const short &Val) { return Val * 37U; }
+  static bool isEqual(const short &LHS, const short &RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for ints.
+template<> struct DenseMapInfo<int> {
+  static inline int getEmptyKey() { return 0x7fffffff; }
+  static inline int getTombstoneKey() { return -0x7fffffff - 1; }
+  static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
+
+  static bool isEqual(const int& LHS, const int& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+  static inline long getEmptyKey() {
+    return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+  }
+
+  static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+
+  static unsigned getHashValue(const long& Val) {
+    return (unsigned)(Val * 37UL);
+  }
+
+  static bool isEqual(const long& LHS, const long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for long longs.
+template<> struct DenseMapInfo<long long> {
+  static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+  static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
+
+  static unsigned getHashValue(const long long& Val) {
+    return (unsigned)(Val * 37ULL);
+  }
+
+  static bool isEqual(const long long& LHS,
+                      const long long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template<typename T, typename U>
+struct DenseMapInfo<std::pair<T, U>> {
+  using Pair = std::pair<T, U>;
+  using FirstInfo = DenseMapInfo<T>;
+  using SecondInfo = DenseMapInfo<U>;
+
+  static inline Pair getEmptyKey() {
+    return std::make_pair(FirstInfo::getEmptyKey(),
+                          SecondInfo::getEmptyKey());
+  }
+
+  static inline Pair getTombstoneKey() {
+    return std::make_pair(FirstInfo::getTombstoneKey(),
+                          SecondInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const Pair& PairVal) {
+    uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
+          | (uint64_t)SecondInfo::getHashValue(PairVal.second);
+    key += ~(key << 32);
+    key ^= (key >> 22);
+    key += ~(key << 13);
+    key ^= (key >> 8);
+    key += (key << 3);
+    key ^= (key >> 15);
+    key += ~(key << 27);
+    key ^= (key >> 31);
+    return (unsigned)key;
+  }
+
+  static bool isEqual(const Pair &LHS, const Pair &RHS) {
+    return FirstInfo::isEqual(LHS.first, RHS.first) &&
+           SecondInfo::isEqual(LHS.second, RHS.second);
+  }
+};
+
+// Provide DenseMapInfo for StringRefs.
+template <> struct DenseMapInfo<StringRef> {
+  static inline StringRef getEmptyKey() {
+    return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
+                     0);
+  }
+
+  static inline StringRef getTombstoneKey() {
+    return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
+                     0);
+  }
+
+  static unsigned getHashValue(StringRef Val) {
+    assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
+    assert(Val.data() != getTombstoneKey().data() &&
+           "Cannot hash the tombstone key!");
+    return (unsigned)(hash_value(Val));
+  }
+
+  static bool isEqual(StringRef LHS, StringRef RHS) {
+    if (RHS.data() == getEmptyKey().data())
+      return LHS.data() == getEmptyKey().data();
+    if (RHS.data() == getTombstoneKey().data())
+      return LHS.data() == getTombstoneKey().data();
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for ArrayRefs.
+template <typename T> struct DenseMapInfo<ArrayRef<T>> {
+  static inline ArrayRef<T> getEmptyKey() {
+    return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)),
+                       size_t(0));
+  }
+
+  static inline ArrayRef<T> getTombstoneKey() {
+    return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)),
+                       size_t(0));
+  }
+
+  static unsigned getHashValue(ArrayRef<T> Val) {
+    assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
+    assert(Val.data() != getTombstoneKey().data() &&
+           "Cannot hash the tombstone key!");
+    return (unsigned)(hash_value(Val));
+  }
+
+  static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+    if (RHS.data() == getEmptyKey().data())
+      return LHS.data() == getEmptyKey().data();
+    if (RHS.data() == getTombstoneKey().data())
+      return LHS.data() == getTombstoneKey().data();
+    return LHS == RHS;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSEMAPINFO_H
diff --git a/linux-x64/clang/include/llvm/ADT/DenseSet.h b/linux-x64/clang/include/llvm/ADT/DenseSet.h
new file mode 100644
index 0000000..7e5171c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DenseSet.h
@@ -0,0 +1,255 @@
+//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseSet and SmallDenseSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSESET_H
+#define LLVM_ADT_DENSESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm> 
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+struct DenseSetEmpty {};
+
+// Use the empty base class trick so we can create a DenseMap where the buckets
+// contain only a single item.
+template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
+  KeyT key;
+
+public:
+  KeyT &getFirst() { return key; }
+  const KeyT &getFirst() const { return key; }
+  DenseSetEmpty &getSecond() { return *this; }
+  const DenseSetEmpty &getSecond() const { return *this; }
+};
+
+/// Base class for DenseSet and DenseSmallSet.
+///
+/// MapTy should be either
+///
+///   DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+///            detail::DenseSetPair<ValueT>>
+///
+/// or the equivalent SmallDenseMap type.  ValueInfoT must implement the
+/// DenseMapInfo "concept".
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+class DenseSetImpl {
+  static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
+                "DenseMap buckets unexpectedly large!");
+  MapTy TheMap;
+
+  template <typename T>
+  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+  using key_type = ValueT;
+  using value_type = ValueT;
+  using size_type = unsigned;
+
+  explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
+
+  DenseSetImpl(std::initializer_list<ValueT> Elems)
+      : DenseSetImpl(Elems.size()) {
+    insert(Elems.begin(), Elems.end());
+  }
+
+  bool empty() const { return TheMap.empty(); }
+  size_type size() const { return TheMap.size(); }
+  size_t getMemorySize() const { return TheMap.getMemorySize(); }
+
+  /// Grow the DenseSet so that it has at least Size buckets. Will not shrink
+  /// the Size of the set.
+  void resize(size_t Size) { TheMap.resize(Size); }
+
+  /// Grow the DenseSet so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_t Size) { TheMap.reserve(Size); }
+
+  void clear() {
+    TheMap.clear();
+  }
+
+  /// Return 1 if the specified key is in the set, 0 otherwise.
+  size_type count(const_arg_type_t<ValueT> V) const {
+    return TheMap.count(V);
+  }
+
+  bool erase(const ValueT &V) {
+    return TheMap.erase(V);
+  }
+
+  void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); }
+
+  // Iterators.
+
+  class ConstIterator;
+
+  class Iterator {
+    typename MapTy::iterator I;
+    friend class DenseSetImpl;
+    friend class ConstIterator;
+
+  public:
+    using difference_type = typename MapTy::iterator::difference_type;
+    using value_type = ValueT;
+    using pointer = value_type *;
+    using reference = value_type &;
+    using iterator_category = std::forward_iterator_tag;
+
+    Iterator() = default;
+    Iterator(const typename MapTy::iterator &i) : I(i) {}
+
+    ValueT &operator*() { return I->getFirst(); }
+    const ValueT &operator*() const { return I->getFirst(); }
+    ValueT *operator->() { return &I->getFirst(); }
+    const ValueT *operator->() const { return &I->getFirst(); }
+
+    Iterator& operator++() { ++I; return *this; }
+    Iterator operator++(int) { auto T = *this; ++I; return T; }
+    bool operator==(const ConstIterator& X) const { return I == X.I; }
+    bool operator!=(const ConstIterator& X) const { return I != X.I; }
+  };
+
+  class ConstIterator {
+    typename MapTy::const_iterator I;
+    friend class DenseSet;
+    friend class Iterator;
+
+  public:
+    using difference_type = typename MapTy::const_iterator::difference_type;
+    using value_type = ValueT;
+    using pointer = value_type *;
+    using reference = value_type &;
+    using iterator_category = std::forward_iterator_tag;
+
+    ConstIterator() = default;
+    ConstIterator(const Iterator &B) : I(B.I) {}
+    ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
+
+    const ValueT &operator*() const { return I->getFirst(); }
+    const ValueT *operator->() const { return &I->getFirst(); }
+
+    ConstIterator& operator++() { ++I; return *this; }
+    ConstIterator operator++(int) { auto T = *this; ++I; return T; }
+    bool operator==(const ConstIterator& X) const { return I == X.I; }
+    bool operator!=(const ConstIterator& X) const { return I != X.I; }
+  };
+
+  using iterator = Iterator;
+  using const_iterator = ConstIterator;
+
+  iterator begin() { return Iterator(TheMap.begin()); }
+  iterator end() { return Iterator(TheMap.end()); }
+
+  const_iterator begin() const { return ConstIterator(TheMap.begin()); }
+  const_iterator end() const { return ConstIterator(TheMap.end()); }
+
+  iterator find(const_arg_type_t<ValueT> V) { return Iterator(TheMap.find(V)); }
+  const_iterator find(const_arg_type_t<ValueT> V) const {
+    return ConstIterator(TheMap.find(V));
+  }
+
+  /// Alternative version of find() which allows a different, and possibly less
+  /// expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
+  /// used.
+  template <class LookupKeyT>
+  iterator find_as(const LookupKeyT &Val) {
+    return Iterator(TheMap.find_as(Val));
+  }
+  template <class LookupKeyT>
+  const_iterator find_as(const LookupKeyT &Val) const {
+    return ConstIterator(TheMap.find_as(Val));
+  }
+
+  void erase(Iterator I) { return TheMap.erase(I.I); }
+  void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
+
+  std::pair<iterator, bool> insert(const ValueT &V) {
+    detail::DenseSetEmpty Empty;
+    return TheMap.try_emplace(V, Empty);
+  }
+
+  std::pair<iterator, bool> insert(ValueT &&V) {
+    detail::DenseSetEmpty Empty;
+    return TheMap.try_emplace(std::move(V), Empty);
+  }
+
+  /// Alternative version of insert that uses a different (and possibly less
+  /// expensive) key type.
+  template <typename LookupKeyT>
+  std::pair<iterator, bool> insert_as(const ValueT &V,
+                                      const LookupKeyT &LookupKey) {
+    return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey);
+  }
+  template <typename LookupKeyT>
+  std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
+    return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey);
+  }
+
+  // Range insertion of values.
+  template<typename InputIt>
+  void insert(InputIt I, InputIt E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+};
+
+} // end namespace detail
+
+/// Implements a dense probed hash-table based set.
+template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
+class DenseSet : public detail::DenseSetImpl<
+                     ValueT, DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+                                      detail::DenseSetPair<ValueT>>,
+                     ValueInfoT> {
+  using BaseT =
+      detail::DenseSetImpl<ValueT,
+                           DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+                                    detail::DenseSetPair<ValueT>>,
+                           ValueInfoT>;
+
+public:
+  using BaseT::BaseT;
+};
+
+/// Implements a dense probed hash-table based set with some number of buckets
+/// stored inline.
+template <typename ValueT, unsigned InlineBuckets = 4,
+          typename ValueInfoT = DenseMapInfo<ValueT>>
+class SmallDenseSet
+    : public detail::DenseSetImpl<
+          ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+                                ValueInfoT, detail::DenseSetPair<ValueT>>,
+          ValueInfoT> {
+  using BaseT = detail::DenseSetImpl<
+      ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+                            ValueInfoT, detail::DenseSetPair<ValueT>>,
+      ValueInfoT>;
+
+public:
+  using BaseT::BaseT;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSESET_H
diff --git a/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h b/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
new file mode 100644
index 0000000..e964d7f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
@@ -0,0 +1,308 @@
+//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build generic depth
+// first graph iterator.  This file exposes the following functions/types:
+//
+// df_begin/df_end/df_iterator
+//   * Normal depth-first iteration - visit a node and then all of its children.
+//
+// idf_begin/idf_end/idf_iterator
+//   * Depth-first iteration on the 'inverse' graph.
+//
+// df_ext_begin/df_ext_end/df_ext_iterator
+//   * Normal depth-first iteration - visit a node and then all of its children.
+//     This iterator stores the 'visited' set in an external set, which allows
+//     it to be more efficient, and allows external clients to use the set for
+//     other purposes.
+//
+// idf_ext_begin/idf_ext_end/idf_ext_iterator
+//   * Depth-first iteration on the 'inverse' graph.
+//     This iterator stores the 'visited' set in an external set, which allows
+//     it to be more efficient, and allows external clients to use the set for
+//     other purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
+#define LLVM_ADT_DEPTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+// df_iterator_storage - A private class which is used to figure out where to
+// store the visited set.
+template<class SetType, bool External>   // Non-external set
+class df_iterator_storage {
+public:
+  SetType Visited;
+};
+
+template<class SetType>
+class df_iterator_storage<SetType, true> {
+public:
+  df_iterator_storage(SetType &VSet) : Visited(VSet) {}
+  df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}
+
+  SetType &Visited;
+};
+
+// The visited stated for the iteration is a simple set augmented with
+// one more method, completed, which is invoked when all children of a
+// node have been processed. It is intended to distinguish of back and
+// cross edges in the spanning tree but is not used in the common case.
+template <typename NodeRef, unsigned SmallSize=8>
+struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
+  using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
+  using iterator = typename BaseSet::iterator;
+
+  std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N); }
+  template <typename IterT>
+  void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }
+
+  void completed(NodeRef) {}
+};
+
+// Generic Depth First Iterator
+template <class GraphT,
+          class SetType =
+              df_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+          bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class df_iterator
+    : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+      public df_iterator_storage<SetType, ExtStorage> {
+  using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+
+  // First element is node reference, second is the 'next child' to visit.
+  // The second child is initialized lazily to pick up graph changes during the
+  // DFS.
+  using StackElement = std::pair<NodeRef, Optional<ChildItTy>>;
+
+  // VisitStack - Used to maintain the ordering.  Top = current block
+  std::vector<StackElement> VisitStack;
+
+private:
+  inline df_iterator(NodeRef Node) {
+    this->Visited.insert(Node);
+    VisitStack.push_back(StackElement(Node, None));
+  }
+
+  inline df_iterator() = default; // End is when stack is empty
+
+  inline df_iterator(NodeRef Node, SetType &S)
+      : df_iterator_storage<SetType, ExtStorage>(S) {
+    if (this->Visited.insert(Node).second)
+      VisitStack.push_back(StackElement(Node, None));
+  }
+
+  inline df_iterator(SetType &S)
+    : df_iterator_storage<SetType, ExtStorage>(S) {
+    // End is when stack is empty
+  }
+
+  inline void toNext() {
+    do {
+      NodeRef Node = VisitStack.back().first;
+      Optional<ChildItTy> &Opt = VisitStack.back().second;
+
+      if (!Opt)
+        Opt.emplace(GT::child_begin(Node));
+
+      // Notice that we directly mutate *Opt here, so that
+      // VisitStack.back().second actually gets updated as the iterator
+      // increases.
+      while (*Opt != GT::child_end(Node)) {
+        NodeRef Next = *(*Opt)++;
+        // Has our next sibling been visited?
+        if (this->Visited.insert(Next).second) {
+          // No, do it now.
+          VisitStack.push_back(StackElement(Next, None));
+          return;
+        }
+      }
+      this->Visited.completed(Node);
+
+      // Oops, ran out of successors... go up a level on the stack.
+      VisitStack.pop_back();
+    } while (!VisitStack.empty());
+  }
+
+public:
+  using pointer = typename super::pointer;
+
+  // Provide static begin and end methods as our public "constructors"
+  static df_iterator begin(const GraphT &G) {
+    return df_iterator(GT::getEntryNode(G));
+  }
+  static df_iterator end(const GraphT &G) { return df_iterator(); }
+
+  // Static begin and end methods as our public ctors for external iterators
+  static df_iterator begin(const GraphT &G, SetType &S) {
+    return df_iterator(GT::getEntryNode(G), S);
+  }
+  static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); }
+
+  bool operator==(const df_iterator &x) const {
+    return VisitStack == x.VisitStack;
+  }
+  bool operator!=(const df_iterator &x) const { return !(*this == x); }
+
+  const NodeRef &operator*() const { return VisitStack.back().first; }
+
+  // This is a nonstandard operator-> that dereferences the pointer an extra
+  // time... so that you can actually call methods ON the Node, because
+  // the contained type is a pointer.  This allows BBIt->getTerminator() f.e.
+  //
+  NodeRef operator->() const { return **this; }
+
+  df_iterator &operator++() { // Preincrement
+    toNext();
+    return *this;
+  }
+
+  /// \brief Skips all children of the current node and traverses to next node
+  ///
+  /// Note: This function takes care of incrementing the iterator. If you
+  /// always increment and call this function, you risk walking off the end.
+  df_iterator &skipChildren() {
+    VisitStack.pop_back();
+    if (!VisitStack.empty())
+      toNext();
+    return *this;
+  }
+
+  df_iterator operator++(int) { // Postincrement
+    df_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+
+  // nodeVisited - return true if this iterator has already visited the
+  // specified node.  This is public, and will probably be used to iterate over
+  // nodes that a depth first iteration did not find: ie unreachable nodes.
+  //
+  bool nodeVisited(NodeRef Node) const {
+    return this->Visited.count(Node) != 0;
+  }
+
+  /// getPathLength - Return the length of the path from the entry node to the
+  /// current node, counting both nodes.
+  unsigned getPathLength() const { return VisitStack.size(); }
+
+  /// getPath - Return the n'th node in the path from the entry node to the
+  /// current node.
+  NodeRef getPath(unsigned n) const { return VisitStack[n].first; }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+df_iterator<T> df_begin(const T& G) {
+  return df_iterator<T>::begin(G);
+}
+
+template <class T>
+df_iterator<T> df_end(const T& G) {
+  return df_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<df_iterator<T>> depth_first(const T& G) {
+  return make_range(df_begin(G), df_end(G));
+}
+
+// Provide global definitions of external depth first iterators...
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
+struct df_ext_iterator : public df_iterator<T, SetTy, true> {
+  df_ext_iterator(const df_iterator<T, SetTy, true> &V)
+    : df_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) {
+  return df_ext_iterator<T, SetTy>::begin(G, S);
+}
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) {
+  return df_ext_iterator<T, SetTy>::end(G, S);
+}
+
+template <class T, class SetTy>
+iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
+                                                          SetTy &S) {
+  return make_range(df_ext_begin(G, S), df_ext_end(G, S));
+}
+
+// Provide global definitions of inverse depth first iterators...
+template <class T,
+          class SetTy =
+              df_iterator_default_set<typename GraphTraits<T>::NodeRef>,
+          bool External = false>
+struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
+  idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
+    : df_iterator<Inverse<T>, SetTy, External>(V) {}
+};
+
+template <class T>
+idf_iterator<T> idf_begin(const T& G) {
+  return idf_iterator<T>::begin(Inverse<T>(G));
+}
+
+template <class T>
+idf_iterator<T> idf_end(const T& G){
+  return idf_iterator<T>::end(Inverse<T>(G));
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
+  return make_range(idf_begin(G), idf_end(G));
+}
+
+// Provide global definitions of external inverse depth first iterators...
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
+struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
+  idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
+    : idf_iterator<T, SetTy, true>(V) {}
+  idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
+    : idf_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) {
+  return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) {
+  return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
+                                                                   SetTy &S) {
+  return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DEPTHFIRSTITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/EpochTracker.h b/linux-x64/clang/include/llvm/ADT/EpochTracker.h
new file mode 100644
index 0000000..db39ba4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/EpochTracker.h
@@ -0,0 +1,100 @@
+//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
+// These can be used to write iterators that are fail-fast when LLVM is built
+// with asserts enabled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EPOCH_TRACKER_H
+#define LLVM_ADT_EPOCH_TRACKER_H
+
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Config/llvm-config.h"
+
+#include <cstdint>
+
+namespace llvm {
+
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+/// \brief A base class for data structure classes wishing to make iterators
+/// ("handles") pointing into themselves fail-fast.  When building without
+/// asserts, this class is empty and does nothing.
+///
+/// DebugEpochBase does not by itself track handles pointing into itself.  The
+/// expectation is that routines touching the handles will poll on
+/// isHandleInSync at appropriate points to assert that the handle they're using
+/// is still valid.
+///
+class DebugEpochBase {
+  uint64_t Epoch;
+
+public:
+  DebugEpochBase() : Epoch(0) {}
+
+  /// \brief Calling incrementEpoch invalidates all handles pointing into the
+  /// calling instance.
+  void incrementEpoch() { ++Epoch; }
+
+  /// \brief The destructor calls incrementEpoch to make use-after-free bugs
+  /// more likely to crash deterministically.
+  ~DebugEpochBase() { incrementEpoch(); }
+
+  /// \brief A base class for iterator classes ("handles") that wish to poll for
+  /// iterator invalidating modifications in the underlying data structure.
+  /// When LLVM is built without asserts, this class is empty and does nothing.
+  ///
+  /// HandleBase does not track the parent data structure by itself.  It expects
+  /// the routines modifying the data structure to call incrementEpoch when they
+  /// make an iterator-invalidating modification.
+  ///
+  class HandleBase {
+    const uint64_t *EpochAddress;
+    uint64_t EpochAtCreation;
+
+  public:
+    HandleBase() : EpochAddress(nullptr), EpochAtCreation(UINT64_MAX) {}
+
+    explicit HandleBase(const DebugEpochBase *Parent)
+        : EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
+
+    /// \brief Returns true if the DebugEpochBase this Handle is linked to has
+    /// not called incrementEpoch on itself since the creation of this
+    /// HandleBase instance.
+    bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
+
+    /// \brief Returns a pointer to the epoch word stored in the data structure
+    /// this handle points into.  Can be used to check if two iterators point
+    /// into the same data structure.
+    const void *getEpochAddress() const { return EpochAddress; }
+  };
+};
+
+#else
+
+class DebugEpochBase {
+public:
+  void incrementEpoch() {}
+
+  class HandleBase {
+  public:
+    HandleBase() = default;
+    explicit HandleBase(const DebugEpochBase *) {}
+    bool isHandleInSync() const { return true; }
+    const void *getEpochAddress() const { return nullptr; }
+  };
+};
+
+#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/EquivalenceClasses.h b/linux-x64/clang/include/llvm/ADT/EquivalenceClasses.h
new file mode 100644
index 0000000..e3f4843
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/EquivalenceClasses.h
@@ -0,0 +1,298 @@
+//===- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic implementation of equivalence classes through the use Tarjan's
+// efficient union-find algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
+#define LLVM_ADT_EQUIVALENCECLASSES_H
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <set>
+
+namespace llvm {
+
+/// EquivalenceClasses - This represents a collection of equivalence classes and
+/// supports three efficient operations: insert an element into a class of its
+/// own, union two classes, and find the class for a given element.  In
+/// addition to these modification methods, it is possible to iterate over all
+/// of the equivalence classes and all of the elements in a class.
+///
+/// This implementation is an efficient implementation that only stores one copy
+/// of the element being indexed per entry in the set, and allows any arbitrary
+/// type to be indexed (as long as it can be ordered with operator<).
+///
+/// Here is a simple example using integers:
+///
+/// \code
+///  EquivalenceClasses<int> EC;
+///  EC.unionSets(1, 2);                // insert 1, 2 into the same set
+///  EC.insert(4); EC.insert(5);        // insert 4, 5 into own sets
+///  EC.unionSets(5, 1);                // merge the set for 1 with 5's set.
+///
+///  for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end();
+///       I != E; ++I) {           // Iterate over all of the equivalence sets.
+///    if (!I->isLeader()) continue;   // Ignore non-leader sets.
+///    for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I);
+///         MI != EC.member_end(); ++MI)   // Loop over members in this set.
+///      cerr << *MI << " ";  // Print member.
+///    cerr << "\n";   // Finish set.
+///  }
+/// \endcode
+///
+/// This example prints:
+///   4
+///   5 1 2
+///
+template <class ElemTy>
+class EquivalenceClasses {
+  /// ECValue - The EquivalenceClasses data structure is just a set of these.
+  /// Each of these represents a relation for a value.  First it stores the
+  /// value itself, which provides the ordering that the set queries.  Next, it
+  /// provides a "next pointer", which is used to enumerate all of the elements
+  /// in the unioned set.  Finally, it defines either a "end of list pointer" or
+  /// "leader pointer" depending on whether the value itself is a leader.  A
+  /// "leader pointer" points to the node that is the leader for this element,
+  /// if the node is not a leader.  A "end of list pointer" points to the last
+  /// node in the list of members of this list.  Whether or not a node is a
+  /// leader is determined by a bit stolen from one of the pointers.
+  class ECValue {
+    friend class EquivalenceClasses;
+
+    mutable const ECValue *Leader, *Next;
+    ElemTy Data;
+
+    // ECValue ctor - Start out with EndOfList pointing to this node, Next is
+    // Null, isLeader = true.
+    ECValue(const ElemTy &Elt)
+      : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {}
+
+    const ECValue *getLeader() const {
+      if (isLeader()) return this;
+      if (Leader->isLeader()) return Leader;
+      // Path compression.
+      return Leader = Leader->getLeader();
+    }
+
+    const ECValue *getEndOfList() const {
+      assert(isLeader() && "Cannot get the end of a list for a non-leader!");
+      return Leader;
+    }
+
+    void setNext(const ECValue *NewNext) const {
+      assert(getNext() == nullptr && "Already has a next pointer!");
+      Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
+    }
+
+  public:
+    ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
+                                  Data(RHS.Data) {
+      // Only support copying of singleton nodes.
+      assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
+    }
+
+    bool operator<(const ECValue &UFN) const { return Data < UFN.Data; }
+
+    bool isLeader() const { return (intptr_t)Next & 1; }
+    const ElemTy &getData() const { return Data; }
+
+    const ECValue *getNext() const {
+      return (ECValue*)((intptr_t)Next & ~(intptr_t)1);
+    }
+
+    template<typename T>
+    bool operator<(const T &Val) const { return Data < Val; }
+  };
+
+  /// TheMapping - This implicitly provides a mapping from ElemTy values to the
+  /// ECValues, it just keeps the key as part of the value.
+  std::set<ECValue> TheMapping;
+
+public:
+  EquivalenceClasses() = default;
+  EquivalenceClasses(const EquivalenceClasses &RHS) {
+    operator=(RHS);
+  }
+
+  const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) {
+    TheMapping.clear();
+    for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
+      if (I->isLeader()) {
+        member_iterator MI = RHS.member_begin(I);
+        member_iterator LeaderIt = member_begin(insert(*MI));
+        for (++MI; MI != member_end(); ++MI)
+          unionSets(LeaderIt, member_begin(insert(*MI)));
+      }
+    return *this;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Inspection methods
+  //
+
+  /// iterator* - Provides a way to iterate over all values in the set.
+  using iterator = typename std::set<ECValue>::const_iterator;
+
+  iterator begin() const { return TheMapping.begin(); }
+  iterator end() const { return TheMapping.end(); }
+
+  bool empty() const { return TheMapping.empty(); }
+
+  /// member_* Iterate over the members of an equivalence class.
+  class member_iterator;
+  member_iterator member_begin(iterator I) const {
+    // Only leaders provide anything to iterate over.
+    return member_iterator(I->isLeader() ? &*I : nullptr);
+  }
+  member_iterator member_end() const {
+    return member_iterator(nullptr);
+  }
+
+  /// findValue - Return an iterator to the specified value.  If it does not
+  /// exist, end() is returned.
+  iterator findValue(const ElemTy &V) const {
+    return TheMapping.find(V);
+  }
+
+  /// getLeaderValue - Return the leader for the specified value that is in the
+  /// set.  It is an error to call this method for a value that is not yet in
+  /// the set.  For that, call getOrInsertLeaderValue(V).
+  const ElemTy &getLeaderValue(const ElemTy &V) const {
+    member_iterator MI = findLeader(V);
+    assert(MI != member_end() && "Value is not in the set!");
+    return *MI;
+  }
+
+  /// getOrInsertLeaderValue - Return the leader for the specified value that is
+  /// in the set.  If the member is not in the set, it is inserted, then
+  /// returned.
+  const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
+    member_iterator MI = findLeader(insert(V));
+    assert(MI != member_end() && "Value is not in the set!");
+    return *MI;
+  }
+
+  /// getNumClasses - Return the number of equivalence classes in this set.
+  /// Note that this is a linear time operation.
+  unsigned getNumClasses() const {
+    unsigned NC = 0;
+    for (iterator I = begin(), E = end(); I != E; ++I)
+      if (I->isLeader()) ++NC;
+    return NC;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Mutation methods
+
+  /// insert - Insert a new value into the union/find set, ignoring the request
+  /// if the value already exists.
+  iterator insert(const ElemTy &Data) {
+    return TheMapping.insert(ECValue(Data)).first;
+  }
+
+  /// findLeader - Given a value in the set, return a member iterator for the
+  /// equivalence class it is in.  This does the path-compression part that
+  /// makes union-find "union findy".  This returns an end iterator if the value
+  /// is not in the equivalence class.
+  member_iterator findLeader(iterator I) const {
+    if (I == TheMapping.end()) return member_end();
+    return member_iterator(I->getLeader());
+  }
+  member_iterator findLeader(const ElemTy &V) const {
+    return findLeader(TheMapping.find(V));
+  }
+
+  /// union - Merge the two equivalence sets for the specified values, inserting
+  /// them if they do not already exist in the equivalence set.
+  member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
+    iterator V1I = insert(V1), V2I = insert(V2);
+    return unionSets(findLeader(V1I), findLeader(V2I));
+  }
+  member_iterator unionSets(member_iterator L1, member_iterator L2) {
+    assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!");
+    if (L1 == L2) return L1;   // Unifying the same two sets, noop.
+
+    // Otherwise, this is a real union operation.  Set the end of the L1 list to
+    // point to the L2 leader node.
+    const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node;
+    L1LV.getEndOfList()->setNext(&L2LV);
+
+    // Update L1LV's end of list pointer.
+    L1LV.Leader = L2LV.getEndOfList();
+
+    // Clear L2's leader flag:
+    L2LV.Next = L2LV.getNext();
+
+    // L2's leader is now L1.
+    L2LV.Leader = &L1LV;
+    return L1;
+  }
+
+  // isEquivalent - Return true if V1 is equivalent to V2. This can happen if
+  // V1 is equal to V2 or if they belong to one equivalence class.
+  bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const {
+    // Fast path: any element is equivalent to itself.
+    if (V1 == V2)
+      return true;
+    auto It = findLeader(V1);
+    return It != member_end() && It == findLeader(V2);
+  }
+
+  class member_iterator : public std::iterator<std::forward_iterator_tag,
+                                               const ElemTy, ptrdiff_t> {
+    friend class EquivalenceClasses;
+
+    using super = std::iterator<std::forward_iterator_tag,
+                                const ElemTy, ptrdiff_t>;
+
+    const ECValue *Node;
+
+  public:
+    using size_type = size_t;
+    using pointer = typename super::pointer;
+    using reference = typename super::reference;
+
+    explicit member_iterator() = default;
+    explicit member_iterator(const ECValue *N) : Node(N) {}
+
+    reference operator*() const {
+      assert(Node != nullptr && "Dereferencing end()!");
+      return Node->getData();
+    }
+    pointer operator->() const { return &operator*(); }
+
+    member_iterator &operator++() {
+      assert(Node != nullptr && "++'d off the end of the list!");
+      Node = Node->getNext();
+      return *this;
+    }
+
+    member_iterator operator++(int) {    // postincrement operators.
+      member_iterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    bool operator==(const member_iterator &RHS) const {
+      return Node == RHS.Node;
+    }
+    bool operator!=(const member_iterator &RHS) const {
+      return Node != RHS.Node;
+    }
+  };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_EQUIVALENCECLASSES_H
diff --git a/linux-x64/clang/include/llvm/ADT/FoldingSet.h b/linux-x64/clang/include/llvm/ADT/FoldingSet.h
new file mode 100644
index 0000000..e363e69
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/FoldingSet.h
@@ -0,0 +1,762 @@
+//===- llvm/ADT/FoldingSet.h - Uniquing Hash Set ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a hash set that can be used to remove duplication of nodes
+// in a graph.  This code was originally created by Chris Lattner for use with
+// SelectionDAGCSEMap, but was isolated to provide use across the llvm code set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_FOLDINGSET_H
+#define LLVM_ADT_FOLDINGSET_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+/// This folding set used for two purposes:
+///   1. Given information about a node we want to create, look up the unique
+///      instance of the node in the set.  If the node already exists, return
+///      it, otherwise return the bucket it should be inserted into.
+///   2. Given a node that has already been created, remove it from the set.
+///
+/// This class is implemented as a single-link chained hash table, where the
+/// "buckets" are actually the nodes themselves (the next pointer is in the
+/// node).  The last node points back to the bucket to simplify node removal.
+///
+/// Any node that is to be included in the folding set must be a subclass of
+/// FoldingSetNode.  The node class must also define a Profile method used to
+/// establish the unique bits of data for the node.  The Profile method is
+/// passed a FoldingSetNodeID object which is used to gather the bits.  Just
+/// call one of the Add* functions defined in the FoldingSetBase::NodeID class.
+/// NOTE: That the folding set does not own the nodes and it is the
+/// responsibility of the user to dispose of the nodes.
+///
+/// Eg.
+///    class MyNode : public FoldingSetNode {
+///    private:
+///      std::string Name;
+///      unsigned Value;
+///    public:
+///      MyNode(const char *N, unsigned V) : Name(N), Value(V) {}
+///       ...
+///      void Profile(FoldingSetNodeID &ID) const {
+///        ID.AddString(Name);
+///        ID.AddInteger(Value);
+///      }
+///      ...
+///    };
+///
+/// To define the folding set itself use the FoldingSet template;
+///
+/// Eg.
+///    FoldingSet<MyNode> MyFoldingSet;
+///
+/// Four public methods are available to manipulate the folding set;
+///
+/// 1) If you have an existing node that you want add to the set but unsure
+/// that the node might already exist then call;
+///
+///    MyNode *M = MyFoldingSet.GetOrInsertNode(N);
+///
+/// If The result is equal to the input then the node has been inserted.
+/// Otherwise, the result is the node existing in the folding set, and the
+/// input can be discarded (use the result instead.)
+///
+/// 2) If you are ready to construct a node but want to check if it already
+/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to
+/// check;
+///
+///   FoldingSetNodeID ID;
+///   ID.AddString(Name);
+///   ID.AddInteger(Value);
+///   void *InsertPoint;
+///
+///    MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
+///
+/// If found then M with be non-NULL, else InsertPoint will point to where it
+/// should be inserted using InsertNode.
+///
+/// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new
+/// node with FindNodeOrInsertPos;
+///
+///    InsertNode(N, InsertPoint);
+///
+/// 4) Finally, if you want to remove a node from the folding set call;
+///
+///    bool WasRemoved = RemoveNode(N);
+///
+/// The result indicates whether the node existed in the folding set.
+
+class FoldingSetNodeID;
+class StringRef;
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBase - Implements the folding set functionality.  The main
+/// structure is an array of buckets.  Each bucket is indexed by the hash of
+/// the nodes it contains.  The bucket itself points to the nodes contained
+/// in the bucket via a singly linked list.  The last node in the list points
+/// back to the bucket to facilitate node removal.
+///
+class FoldingSetBase {
+  virtual void anchor(); // Out of line virtual method.
+
+protected:
+  /// Buckets - Array of bucket chains.
+  void **Buckets;
+
+  /// NumBuckets - Length of the Buckets array.  Always a power of 2.
+  unsigned NumBuckets;
+
+  /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes
+  /// is greater than twice the number of buckets.
+  unsigned NumNodes;
+
+  explicit FoldingSetBase(unsigned Log2InitSize = 6);
+  FoldingSetBase(FoldingSetBase &&Arg);
+  FoldingSetBase &operator=(FoldingSetBase &&RHS);
+  ~FoldingSetBase();
+
+public:
+  //===--------------------------------------------------------------------===//
+  /// Node - This class is used to maintain the singly linked bucket list in
+  /// a folding set.
+  class Node {
+  private:
+    // NextInFoldingSetBucket - next link in the bucket list.
+    void *NextInFoldingSetBucket = nullptr;
+
+  public:
+    Node() = default;
+
+    // Accessors
+    void *getNextInBucket() const { return NextInFoldingSetBucket; }
+    void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; }
+  };
+
+  /// clear - Remove all nodes from the folding set.
+  void clear();
+
+  /// size - Returns the number of nodes in the folding set.
+  unsigned size() const { return NumNodes; }
+
+  /// empty - Returns true if there are no nodes in the folding set.
+  bool empty() const { return NumNodes == 0; }
+
+  /// reserve - Increase the number of buckets such that adding the
+  /// EltCount-th node won't cause a rebucket operation. reserve is permitted
+  /// to allocate more space than requested by EltCount.
+  void reserve(unsigned EltCount);
+
+  /// capacity - Returns the number of nodes permitted in the folding set
+  /// before a rebucket operation is performed.
+  unsigned capacity() {
+    // We allow a load factor of up to 2.0,
+    // so that means our capacity is NumBuckets * 2
+    return NumBuckets * 2;
+  }
+
+private:
+  /// GrowHashTable - Double the size of the hash table and rehash everything.
+  void GrowHashTable();
+
+  /// GrowBucketCount - resize the hash table and rehash everything.
+  /// NewBucketCount must be a power of two, and must be greater than the old
+  /// bucket count.
+  void GrowBucketCount(unsigned NewBucketCount);
+
+protected:
+  /// GetNodeProfile - Instantiations of the FoldingSet template implement
+  /// this function to gather data bits for the given node.
+  virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
+
+  /// NodeEquals - Instantiations of the FoldingSet template implement
+  /// this function to compare the given node with the given ID.
+  virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+                          FoldingSetNodeID &TempID) const=0;
+
+  /// ComputeNodeHash - Instantiations of the FoldingSet template implement
+  /// this function to compute a hash value for the given node.
+  virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
+
+  // The below methods are protected to encourage subclasses to provide a more
+  // type-safe API.
+
+  /// RemoveNode - Remove a node from the folding set, returning true if one
+  /// was removed or false if the node was not in the folding set.
+  bool RemoveNode(Node *N);
+
+  /// GetOrInsertNode - If there is an existing simple Node exactly
+  /// equal to the specified node, return it.  Otherwise, insert 'N' and return
+  /// it instead.
+  Node *GetOrInsertNode(Node *N);
+
+  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
+  /// return it.  If not, return the insertion token that will make insertion
+  /// faster.
+  Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.  InsertPos must be obtained from
+  /// FindNodeOrInsertPos.
+  void InsertNode(Node *N, void *InsertPos);
+};
+
+//===----------------------------------------------------------------------===//
+
+/// DefaultFoldingSetTrait - This class provides default implementations
+/// for FoldingSetTrait implementations.
+template<typename T> struct DefaultFoldingSetTrait {
+  static void Profile(const T &X, FoldingSetNodeID &ID) {
+    X.Profile(ID);
+  }
+  static void Profile(T &X, FoldingSetNodeID &ID) {
+    X.Profile(ID);
+  }
+
+  // Equals - Test if the profile for X would match ID, using TempID
+  // to compute a temporary ID if necessary. The default implementation
+  // just calls Profile and does a regular comparison. Implementations
+  // can override this to provide more efficient implementations.
+  static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+                            FoldingSetNodeID &TempID);
+
+  // ComputeHash - Compute a hash value for X, using TempID to
+  // compute a temporary ID if necessary. The default implementation
+  // just calls Profile and does a regular hash computation.
+  // Implementations can override this to provide more efficient
+  // implementations.
+  static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
+};
+
+/// FoldingSetTrait - This trait class is used to define behavior of how
+/// to "profile" (in the FoldingSet parlance) an object of a given type.
+/// The default behavior is to invoke a 'Profile' method on an object, but
+/// through template specialization the behavior can be tailored for specific
+/// types.  Combined with the FoldingSetNodeWrapper class, one can add objects
+/// to FoldingSets that were not originally designed to have that behavior.
+template<typename T> struct FoldingSetTrait
+  : public DefaultFoldingSetTrait<T> {};
+
+/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
+/// for ContextualFoldingSets.
+template<typename T, typename Ctx>
+struct DefaultContextualFoldingSetTrait {
+  static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+    X.Profile(ID, Context);
+  }
+
+  static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+                            FoldingSetNodeID &TempID, Ctx Context);
+  static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
+                                     Ctx Context);
+};
+
+/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
+/// ContextualFoldingSets.
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait
+  : public DefaultContextualFoldingSetTrait<T, Ctx> {};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeIDRef - This class describes a reference to an interned
+/// FoldingSetNodeID, which can be a useful to store node id data rather
+/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
+/// is often much larger than necessary, and the possibility of heap
+/// allocation means it requires a non-trivial destructor call.
+class FoldingSetNodeIDRef {
+  const unsigned *Data = nullptr;
+  size_t Size = 0;
+
+public:
+  FoldingSetNodeIDRef() = default;
+  FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
+
+  /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
+  /// used to lookup the node in the FoldingSetBase.
+  unsigned ComputeHash() const;
+
+  bool operator==(FoldingSetNodeIDRef) const;
+
+  bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); }
+
+  /// Used to compare the "ordering" of two nodes as defined by the
+  /// profiled bits and their ordering defined by memcmp().
+  bool operator<(FoldingSetNodeIDRef) const;
+
+  const unsigned *getData() const { return Data; }
+  size_t getSize() const { return Size; }
+};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeID - This class is used to gather all the unique data bits of
+/// a node.  When all the bits are gathered this class is used to produce a
+/// hash value for the node.
+class FoldingSetNodeID {
+  /// Bits - Vector of all the data bits that make the node unique.
+  /// Use a SmallVector to avoid a heap allocation in the common case.
+  SmallVector<unsigned, 32> Bits;
+
+public:
+  FoldingSetNodeID() = default;
+
+  FoldingSetNodeID(FoldingSetNodeIDRef Ref)
+    : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
+
+  /// Add* - Add various data types to Bit data.
+  void AddPointer(const void *Ptr);
+  void AddInteger(signed I);
+  void AddInteger(unsigned I);
+  void AddInteger(long I);
+  void AddInteger(unsigned long I);
+  void AddInteger(long long I);
+  void AddInteger(unsigned long long I);
+  void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
+  void AddString(StringRef String);
+  void AddNodeID(const FoldingSetNodeID &ID);
+
+  template <typename T>
+  inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); }
+
+  /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
+  /// object to be used to compute a new profile.
+  inline void clear() { Bits.clear(); }
+
+  /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
+  /// to lookup the node in the FoldingSetBase.
+  unsigned ComputeHash() const;
+
+  /// operator== - Used to compare two nodes to each other.
+  bool operator==(const FoldingSetNodeID &RHS) const;
+  bool operator==(const FoldingSetNodeIDRef RHS) const;
+
+  bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); }
+  bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);}
+
+  /// Used to compare the "ordering" of two nodes as defined by the
+  /// profiled bits and their ordering defined by memcmp().
+  bool operator<(const FoldingSetNodeID &RHS) const;
+  bool operator<(const FoldingSetNodeIDRef RHS) const;
+
+  /// Intern - Copy this node's data to a memory region allocated from the
+  /// given allocator and return a FoldingSetNodeIDRef describing the
+  /// interned data.
+  FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
+};
+
+// Convenience type to hide the implementation of the folding set.
+using FoldingSetNode = FoldingSetBase::Node;
+template<class T> class FoldingSetIterator;
+template<class T> class FoldingSetBucketIterator;
+
+// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
+// require the definition of FoldingSetNodeID.
+template<typename T>
+inline bool
+DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
+                                  unsigned /*IDHash*/,
+                                  FoldingSetNodeID &TempID) {
+  FoldingSetTrait<T>::Profile(X, TempID);
+  return TempID == ID;
+}
+template<typename T>
+inline unsigned
+DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
+  FoldingSetTrait<T>::Profile(X, TempID);
+  return TempID.ComputeHash();
+}
+template<typename T, typename Ctx>
+inline bool
+DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
+                                                 const FoldingSetNodeID &ID,
+                                                 unsigned /*IDHash*/,
+                                                 FoldingSetNodeID &TempID,
+                                                 Ctx Context) {
+  ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+  return TempID == ID;
+}
+template<typename T, typename Ctx>
+inline unsigned
+DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
+                                                      FoldingSetNodeID &TempID,
+                                                      Ctx Context) {
+  ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+  return TempID.ComputeHash();
+}
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetImpl - An implementation detail that lets us share code between
+/// FoldingSet and ContextualFoldingSet.
+template <class T> class FoldingSetImpl : public FoldingSetBase {
+protected:
+  explicit FoldingSetImpl(unsigned Log2InitSize)
+      : FoldingSetBase(Log2InitSize) {}
+
+  FoldingSetImpl(FoldingSetImpl &&Arg) = default;
+  FoldingSetImpl &operator=(FoldingSetImpl &&RHS) = default;
+  ~FoldingSetImpl() = default;
+
+public:
+  using iterator = FoldingSetIterator<T>;
+
+  iterator begin() { return iterator(Buckets); }
+  iterator end() { return iterator(Buckets+NumBuckets); }
+
+  using const_iterator = FoldingSetIterator<const T>;
+
+  const_iterator begin() const { return const_iterator(Buckets); }
+  const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+  using bucket_iterator = FoldingSetBucketIterator<T>;
+
+  bucket_iterator bucket_begin(unsigned hash) {
+    return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+  }
+
+  bucket_iterator bucket_end(unsigned hash) {
+    return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+  }
+
+  /// RemoveNode - Remove a node from the folding set, returning true if one
+  /// was removed or false if the node was not in the folding set.
+  bool RemoveNode(T *N) { return FoldingSetBase::RemoveNode(N); }
+
+  /// GetOrInsertNode - If there is an existing simple Node exactly
+  /// equal to the specified node, return it.  Otherwise, insert 'N' and
+  /// return it instead.
+  T *GetOrInsertNode(T *N) {
+    return static_cast<T *>(FoldingSetBase::GetOrInsertNode(N));
+  }
+
+  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
+  /// return it.  If not, return the insertion token that will make insertion
+  /// faster.
+  T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+    return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(ID, InsertPos));
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.  InsertPos must be obtained from
+  /// FindNodeOrInsertPos.
+  void InsertNode(T *N, void *InsertPos) {
+    FoldingSetBase::InsertNode(N, InsertPos);
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.
+  void InsertNode(T *N) {
+    T *Inserted = GetOrInsertNode(N);
+    (void)Inserted;
+    assert(Inserted == N && "Node already inserted!");
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSet - This template class is used to instantiate a specialized
+/// implementation of the folding set to the node class T.  T must be a
+/// subclass of FoldingSetNode and implement a Profile function.
+///
+/// Note that this set type is movable and move-assignable. However, its
+/// moved-from state is not a valid state for anything other than
+/// move-assigning and destroying. This is primarily to enable movable APIs
+/// that incorporate these objects.
+template <class T> class FoldingSet final : public FoldingSetImpl<T> {
+  using Super = FoldingSetImpl<T>;
+  using Node = typename Super::Node;
+
+  /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+  /// way to convert nodes into a unique specifier.
+  void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+    T *TN = static_cast<T *>(N);
+    FoldingSetTrait<T>::Profile(*TN, ID);
+  }
+
+  /// NodeEquals - Instantiations may optionally provide a way to compare a
+  /// node with a specified ID.
+  bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+                  FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
+  }
+
+  /// ComputeNodeHash - Instantiations may optionally provide a way to compute a
+  /// hash value directly from a node.
+  unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
+  }
+
+public:
+  explicit FoldingSet(unsigned Log2InitSize = 6) : Super(Log2InitSize) {}
+  FoldingSet(FoldingSet &&Arg) = default;
+  FoldingSet &operator=(FoldingSet &&RHS) = default;
+};
+
+//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes.  Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+///   void Profile(FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet final : public FoldingSetImpl<T> {
+  // Unfortunately, this can't derive from FoldingSet<T> because the
+  // construction of the vtable for FoldingSet<T> requires
+  // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+  // requires a single-argument T::Profile().
+
+  using Super = FoldingSetImpl<T>;
+  using Node = typename Super::Node;
+
+  Ctx Context;
+
+  /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+  /// way to convert nodes into a unique specifier.
+  void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+    T *TN = static_cast<T *>(N);
+    ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
+  }
+
+  bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+                  FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
+                                                     Context);
+  }
+
+  unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
+  }
+
+public:
+  explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+      : Super(Log2InitSize), Context(Context) {}
+
+  Ctx getContext() const { return Context; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetVector - This template class combines a FoldingSet and a vector
+/// to provide the interface of FoldingSet but with deterministic iteration
+/// order based on the insertion order. T must be a subclass of FoldingSetNode
+/// and implement a Profile function.
+template <class T, class VectorT = SmallVector<T*, 8>>
+class FoldingSetVector {
+  FoldingSet<T> Set;
+  VectorT Vector;
+
+public:
+  explicit FoldingSetVector(unsigned Log2InitSize = 6) : Set(Log2InitSize) {}
+
+  using iterator = pointee_iterator<typename VectorT::iterator>;
+
+  iterator begin() { return Vector.begin(); }
+  iterator end()   { return Vector.end(); }
+
+  using const_iterator = pointee_iterator<typename VectorT::const_iterator>;
+
+  const_iterator begin() const { return Vector.begin(); }
+  const_iterator end()   const { return Vector.end(); }
+
+  /// clear - Remove all nodes from the folding set.
+  void clear() { Set.clear(); Vector.clear(); }
+
+  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
+  /// return it.  If not, return the insertion token that will make insertion
+  /// faster.
+  T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+    return Set.FindNodeOrInsertPos(ID, InsertPos);
+  }
+
+  /// GetOrInsertNode - If there is an existing simple Node exactly
+  /// equal to the specified node, return it.  Otherwise, insert 'N' and
+  /// return it instead.
+  T *GetOrInsertNode(T *N) {
+    T *Result = Set.GetOrInsertNode(N);
+    if (Result == N) Vector.push_back(N);
+    return Result;
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.  InsertPos must be obtained from
+  /// FindNodeOrInsertPos.
+  void InsertNode(T *N, void *InsertPos) {
+    Set.InsertNode(N, InsertPos);
+    Vector.push_back(N);
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.
+  void InsertNode(T *N) {
+    Set.InsertNode(N);
+    Vector.push_back(N);
+  }
+
+  /// size - Returns the number of nodes in the folding set.
+  unsigned size() const { return Set.size(); }
+
+  /// empty - Returns true if there are no nodes in the folding set.
+  bool empty() const { return Set.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetIteratorImpl - This is the common iterator support shared by all
+/// folding sets, which knows how to walk the folding set hash table.
+class FoldingSetIteratorImpl {
+protected:
+  FoldingSetNode *NodePtr;
+
+  FoldingSetIteratorImpl(void **Bucket);
+
+  void advance();
+
+public:
+  bool operator==(const FoldingSetIteratorImpl &RHS) const {
+    return NodePtr == RHS.NodePtr;
+  }
+  bool operator!=(const FoldingSetIteratorImpl &RHS) const {
+    return NodePtr != RHS.NodePtr;
+  }
+};
+
+template <class T> class FoldingSetIterator : public FoldingSetIteratorImpl {
+public:
+  explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {}
+
+  T &operator*() const {
+    return *static_cast<T*>(NodePtr);
+  }
+
+  T *operator->() const {
+    return static_cast<T*>(NodePtr);
+  }
+
+  inline FoldingSetIterator &operator++() {          // Preincrement
+    advance();
+    return *this;
+  }
+  FoldingSetIterator operator++(int) {        // Postincrement
+    FoldingSetIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
+/// shared by all folding sets, which knows how to walk a particular bucket
+/// of a folding set hash table.
+class FoldingSetBucketIteratorImpl {
+protected:
+  void *Ptr;
+
+  explicit FoldingSetBucketIteratorImpl(void **Bucket);
+
+  FoldingSetBucketIteratorImpl(void **Bucket, bool) : Ptr(Bucket) {}
+
+  void advance() {
+    void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket();
+    uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1;
+    Ptr = reinterpret_cast<void*>(x);
+  }
+
+public:
+  bool operator==(const FoldingSetBucketIteratorImpl &RHS) const {
+    return Ptr == RHS.Ptr;
+  }
+  bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const {
+    return Ptr != RHS.Ptr;
+  }
+};
+
+template <class T>
+class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl {
+public:
+  explicit FoldingSetBucketIterator(void **Bucket) :
+    FoldingSetBucketIteratorImpl(Bucket) {}
+
+  FoldingSetBucketIterator(void **Bucket, bool) :
+    FoldingSetBucketIteratorImpl(Bucket, true) {}
+
+  T &operator*() const { return *static_cast<T*>(Ptr); }
+  T *operator->() const { return static_cast<T*>(Ptr); }
+
+  inline FoldingSetBucketIterator &operator++() { // Preincrement
+    advance();
+    return *this;
+  }
+  FoldingSetBucketIterator operator++(int) {      // Postincrement
+    FoldingSetBucketIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary
+/// types in an enclosing object so that they can be inserted into FoldingSets.
+template <typename T>
+class FoldingSetNodeWrapper : public FoldingSetNode {
+  T data;
+
+public:
+  template <typename... Ts>
+  explicit FoldingSetNodeWrapper(Ts &&... Args)
+      : data(std::forward<Ts>(Args)...) {}
+
+  void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); }
+
+  T &getValue() { return data; }
+  const T &getValue() const { return data; }
+
+  operator T&() { return data; }
+  operator const T&() const { return data; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores
+/// a FoldingSetNodeID value rather than requiring the node to recompute it
+/// each time it is needed. This trades space for speed (which can be
+/// significant if the ID is long), and it also permits nodes to drop
+/// information that would otherwise only be required for recomputing an ID.
+class FastFoldingSetNode : public FoldingSetNode {
+  FoldingSetNodeID FastID;
+
+protected:
+  explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}
+
+public:
+  void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); }
+};
+
+//===----------------------------------------------------------------------===//
+// Partial specializations of FoldingSetTrait.
+
+template<typename T> struct FoldingSetTrait<T*> {
+  static inline void Profile(T *X, FoldingSetNodeID &ID) {
+    ID.AddPointer(X);
+  }
+};
+template <typename T1, typename T2>
+struct FoldingSetTrait<std::pair<T1, T2>> {
+  static inline void Profile(const std::pair<T1, T2> &P,
+                             FoldingSetNodeID &ID) {
+    ID.Add(P.first);
+    ID.Add(P.second);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_FOLDINGSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/GraphTraits.h b/linux-x64/clang/include/llvm/ADT/GraphTraits.h
new file mode 100644
index 0000000..27c647f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/GraphTraits.h
@@ -0,0 +1,136 @@
+//===- llvm/ADT/GraphTraits.h - Graph traits template -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the little GraphTraits<X> template class that should be
+// specialized by classes that want to be iteratable by generic graph iterators.
+//
+// This file also defines the marker class Inverse that is used to iterate over
+// graphs in a graph defined, inverse ordering...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GRAPHTRAITS_H
+#define LLVM_ADT_GRAPHTRAITS_H
+
+#include "llvm/ADT/iterator_range.h"
+
+namespace llvm {
+
+// GraphTraits - This class should be specialized by different graph types...
+// which is why the default version is empty.
+//
+template<class GraphType>
+struct GraphTraits {
+  // Elements to provide:
+
+  // typedef NodeRef           - Type of Node token in the graph, which should
+  //                             be cheap to copy.
+  // typedef ChildIteratorType - Type used to iterate over children in graph,
+  //                             dereference to a NodeRef.
+
+  // static NodeRef getEntryNode(const GraphType &)
+  //    Return the entry node of the graph
+
+  // static ChildIteratorType child_begin(NodeRef)
+  // static ChildIteratorType child_end  (NodeRef)
+  //    Return iterators that point to the beginning and ending of the child
+  //    node list for the specified node.
+
+  // typedef  ...iterator nodes_iterator; - dereference to a NodeRef
+  // static nodes_iterator nodes_begin(GraphType *G)
+  // static nodes_iterator nodes_end  (GraphType *G)
+  //    nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+
+  // typedef EdgeRef           - Type of Edge token in the graph, which should
+  //                             be cheap to copy.
+  // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
+  //                             graph, dereference to a EdgeRef.
+
+  // static ChildEdgeIteratorType child_edge_begin(NodeRef)
+  // static ChildEdgeIteratorType child_edge_end(NodeRef)
+  //     Return iterators that point to the beginning and ending of the
+  //     edge list for the given callgraph node.
+  //
+  // static NodeRef edge_dest(EdgeRef)
+  //     Return the destination node of an edge.
+
+  // static unsigned       size       (GraphType *G)
+  //    Return total number of nodes in the graph
+
+  // If anyone tries to use this class without having an appropriate
+  // specialization, make an error.  If you get this error, it's because you
+  // need to include the appropriate specialization of GraphTraits<> for your
+  // graph, or you need to define it for a new graph type. Either that or
+  // your argument to XXX_begin(...) is unknown or needs to have the proper .h
+  // file #include'd.
+  using NodeRef = typename GraphType::UnknownGraphTypeError;
+};
+
+// Inverse - This class is used as a little marker class to tell the graph
+// iterator to iterate over the graph in a graph defined "Inverse" ordering.
+// Not all graphs define an inverse ordering, and if they do, it depends on
+// the graph exactly what that is.  Here's an example of usage with the
+// df_iterator:
+//
+// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+// Which is equivalent to:
+// df_iterator<Inverse<Method*>> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+template <class GraphType>
+struct Inverse {
+  const GraphType &Graph;
+
+  inline Inverse(const GraphType &G) : Graph(G) {}
+};
+
+// Provide a partial specialization of GraphTraits so that the inverse of an
+// inverse falls back to the original graph.
+template <class T> struct GraphTraits<Inverse<Inverse<T>>> : GraphTraits<T> {};
+
+// Provide iterator ranges for the graph traits nodes and children
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::nodes_iterator>
+nodes(const GraphType &G) {
+  return make_range(GraphTraits<GraphType>::nodes_begin(G),
+                    GraphTraits<GraphType>::nodes_end(G));
+}
+template <class GraphType>
+iterator_range<typename GraphTraits<Inverse<GraphType>>::nodes_iterator>
+inverse_nodes(const GraphType &G) {
+  return make_range(GraphTraits<Inverse<GraphType>>::nodes_begin(G),
+                    GraphTraits<Inverse<GraphType>>::nodes_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::ChildIteratorType>
+children(const typename GraphTraits<GraphType>::NodeRef &G) {
+  return make_range(GraphTraits<GraphType>::child_begin(G),
+                    GraphTraits<GraphType>::child_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<Inverse<GraphType>>::ChildIteratorType>
+inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
+  return make_range(GraphTraits<Inverse<GraphType>>::child_begin(G),
+                    GraphTraits<Inverse<GraphType>>::child_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::ChildEdgeIteratorType>
+children_edges(const typename GraphTraits<GraphType>::NodeRef &G) {
+  return make_range(GraphTraits<GraphType>::child_edge_begin(G),
+                    GraphTraits<GraphType>::child_edge_end(G));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_GRAPHTRAITS_H
diff --git a/linux-x64/clang/include/llvm/ADT/Hashing.h b/linux-x64/clang/include/llvm/ADT/Hashing.h
new file mode 100644
index 0000000..c3b5741
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Hashing.h
@@ -0,0 +1,661 @@
+//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the newly proposed standard C++ interfaces for hashing
+// arbitrary data and building hash functions for user-defined types. This
+// interface was originally proposed in N3333[1] and is currently under review
+// for inclusion in a future TR and/or standard.
+//
+// The primary interfaces provide are comprised of one type and three functions:
+//
+//  -- 'hash_code' class is an opaque type representing the hash code for some
+//     data. It is the intended product of hashing, and can be used to implement
+//     hash tables, checksumming, and other common uses of hashes. It is not an
+//     integer type (although it can be converted to one) because it is risky
+//     to assume much about the internals of a hash_code. In particular, each
+//     execution of the program has a high probability of producing a different
+//     hash_code for a given input. Thus their values are not stable to save or
+//     persist, and should only be used during the execution for the
+//     construction of hashing datastructures.
+//
+//  -- 'hash_value' is a function designed to be overloaded for each
+//     user-defined type which wishes to be used within a hashing context. It
+//     should be overloaded within the user-defined type's namespace and found
+//     via ADL. Overloads for primitive types are provided by this library.
+//
+//  -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
+//      programmers in easily and intuitively combining a set of data into
+//      a single hash_code for their object. They should only logically be used
+//      within the implementation of a 'hash_value' routine or similar context.
+//
+// Note that 'hash_combine_range' contains very special logic for hashing
+// a contiguous array of integers or pointers. This logic is *extremely* fast,
+// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
+// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
+// under 32-bytes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_HASHING_H
+#define LLVM_ADT_HASHING_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+/// \brief An opaque object representing a hash code.
+///
+/// This object represents the result of hashing some entity. It is intended to
+/// be used to implement hashtables or other hashing-based data structures.
+/// While it wraps and exposes a numeric value, this value should not be
+/// trusted to be stable or predictable across processes or executions.
+///
+/// In order to obtain the hash_code for an object 'x':
+/// \code
+///   using llvm::hash_value;
+///   llvm::hash_code code = hash_value(x);
+/// \endcode
+class hash_code {
+  size_t value;
+
+public:
+  /// \brief Default construct a hash_code.
+  /// Note that this leaves the value uninitialized.
+  hash_code() = default;
+
+  /// \brief Form a hash code directly from a numerical value.
+  hash_code(size_t value) : value(value) {}
+
+  /// \brief Convert the hash code to its numerical value for use.
+  /*explicit*/ operator size_t() const { return value; }
+
+  friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
+    return lhs.value == rhs.value;
+  }
+  friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
+    return lhs.value != rhs.value;
+  }
+
+  /// \brief Allow a hash_code to be directly run through hash_value.
+  friend size_t hash_value(const hash_code &code) { return code.value; }
+};
+
+/// \brief Compute a hash_code for any integer value.
+///
+/// Note that this function is intended to compute the same hash_code for
+/// a particular value without regard to the pre-promotion type. This is in
+/// contrast to hash_combine which may produce different hash_codes for
+/// differing argument types even if they would implicit promote to a common
+/// type without changing the value.
+template <typename T>
+typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
+hash_value(T value);
+
+/// \brief Compute a hash_code for a pointer's address.
+///
+/// N.B.: This hashes the *address*. Not the value and not the type.
+template <typename T> hash_code hash_value(const T *ptr);
+
+/// \brief Compute a hash_code for a pair of objects.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg);
+
+/// \brief Compute a hash_code for a standard string.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg);
+
+
+/// \brief Override the execution seed with a fixed value.
+///
+/// This hashing library uses a per-execution seed designed to change on each
+/// run with high probability in order to ensure that the hash codes are not
+/// attackable and to ensure that output which is intended to be stable does
+/// not rely on the particulars of the hash codes produced.
+///
+/// That said, there are use cases where it is important to be able to
+/// reproduce *exactly* a specific behavior. To that end, we provide a function
+/// which will forcibly set the seed to a fixed value. This must be done at the
+/// start of the program, before any hashes are computed. Also, it cannot be
+/// undone. This makes it thread-hostile and very hard to use outside of
+/// immediately on start of a simple program designed for reproducible
+/// behavior.
+void set_fixed_execution_hash_seed(size_t fixed_value);
+
+
+// All of the implementation details of actually computing the various hash
+// code values are held within this namespace. These routines are included in
+// the header file mainly to allow inlining and constant propagation.
+namespace hashing {
+namespace detail {
+
+inline uint64_t fetch64(const char *p) {
+  uint64_t result;
+  memcpy(&result, p, sizeof(result));
+  if (sys::IsBigEndianHost)
+    sys::swapByteOrder(result);
+  return result;
+}
+
+inline uint32_t fetch32(const char *p) {
+  uint32_t result;
+  memcpy(&result, p, sizeof(result));
+  if (sys::IsBigEndianHost)
+    sys::swapByteOrder(result);
+  return result;
+}
+
+/// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static const uint64_t k3 = 0xc949d7c7509e6557ULL;
+
+/// \brief Bitwise right rotate.
+/// Normally this will compile to a single instruction, especially if the
+/// shift is a manifest constant.
+inline uint64_t rotate(uint64_t val, size_t shift) {
+  // Avoid shifting by 64: doing so yields an undefined result.
+  return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+inline uint64_t shift_mix(uint64_t val) {
+  return val ^ (val >> 47);
+}
+
+inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
+  // Murmur-inspired hashing.
+  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+  uint64_t a = (low ^ high) * kMul;
+  a ^= (a >> 47);
+  uint64_t b = (high ^ a) * kMul;
+  b ^= (b >> 47);
+  b *= kMul;
+  return b;
+}
+
+inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
+  uint8_t a = s[0];
+  uint8_t b = s[len >> 1];
+  uint8_t c = s[len - 1];
+  uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+  uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+  return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
+}
+
+inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch32(s);
+  return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
+}
+
+inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch64(s);
+  uint64_t b = fetch64(s + len - 8);
+  return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
+}
+
+inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch64(s) * k1;
+  uint64_t b = fetch64(s + 8);
+  uint64_t c = fetch64(s + len - 8) * k2;
+  uint64_t d = fetch64(s + len - 16) * k0;
+  return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
+                       a + rotate(b ^ k3, 20) - c + len + seed);
+}
+
+inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t z = fetch64(s + 24);
+  uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
+  uint64_t b = rotate(a + z, 52);
+  uint64_t c = rotate(a, 37);
+  a += fetch64(s + 8);
+  c += rotate(a, 7);
+  a += fetch64(s + 16);
+  uint64_t vf = a + z;
+  uint64_t vs = b + rotate(a, 31) + c;
+  a = fetch64(s + 16) + fetch64(s + len - 32);
+  z = fetch64(s + len - 8);
+  b = rotate(a + z, 52);
+  c = rotate(a, 37);
+  a += fetch64(s + len - 24);
+  c += rotate(a, 7);
+  a += fetch64(s + len - 16);
+  uint64_t wf = a + z;
+  uint64_t ws = b + rotate(a, 31) + c;
+  uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
+  return shift_mix((seed ^ (r * k0)) + vs) * k2;
+}
+
+inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
+  if (length >= 4 && length <= 8)
+    return hash_4to8_bytes(s, length, seed);
+  if (length > 8 && length <= 16)
+    return hash_9to16_bytes(s, length, seed);
+  if (length > 16 && length <= 32)
+    return hash_17to32_bytes(s, length, seed);
+  if (length > 32)
+    return hash_33to64_bytes(s, length, seed);
+  if (length != 0)
+    return hash_1to3_bytes(s, length, seed);
+
+  return k2 ^ seed;
+}
+
+/// \brief The intermediate state used during hashing.
+/// Currently, the algorithm for computing hash codes is based on CityHash and
+/// keeps 56 bytes of arbitrary state.
+struct hash_state {
+  uint64_t h0, h1, h2, h3, h4, h5, h6;
+
+  /// \brief Create a new hash_state structure and initialize it based on the
+  /// seed and the first 64-byte chunk.
+  /// This effectively performs the initial mix.
+  static hash_state create(const char *s, uint64_t seed) {
+    hash_state state = {
+      0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
+      seed * k1, shift_mix(seed), 0 };
+    state.h6 = hash_16_bytes(state.h4, state.h5);
+    state.mix(s);
+    return state;
+  }
+
+  /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
+  /// and 'b', including whatever is already in 'a' and 'b'.
+  static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+    a += fetch64(s);
+    uint64_t c = fetch64(s + 24);
+    b = rotate(b + a + c, 21);
+    uint64_t d = a;
+    a += fetch64(s + 8) + fetch64(s + 16);
+    b += rotate(a, 44) + d;
+    a += c;
+  }
+
+  /// \brief Mix in a 64-byte buffer of data.
+  /// We mix all 64 bytes even when the chunk length is smaller, but we
+  /// record the actual length.
+  void mix(const char *s) {
+    h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+    h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+    h0 ^= h6;
+    h1 += h3 + fetch64(s + 40);
+    h2 = rotate(h2 + h5, 33) * k1;
+    h3 = h4 * k1;
+    h4 = h0 + h5;
+    mix_32_bytes(s, h3, h4);
+    h5 = h2 + h6;
+    h6 = h1 + fetch64(s + 16);
+    mix_32_bytes(s + 32, h5, h6);
+    std::swap(h2, h0);
+  }
+
+  /// \brief Compute the final 64-bit hash code value based on the current
+  /// state and the length of bytes hashed.
+  uint64_t finalize(size_t length) {
+    return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+                         hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+  }
+};
+
+
+/// \brief A global, fixed seed-override variable.
+///
+/// This variable can be set using the \see llvm::set_fixed_execution_seed
+/// function. See that function for details. Do not, under any circumstances,
+/// set or read this variable.
+extern size_t fixed_seed_override;
+
+inline size_t get_execution_seed() {
+  // FIXME: This needs to be a per-execution seed. This is just a placeholder
+  // implementation. Switching to a per-execution seed is likely to flush out
+  // instability bugs and so will happen as its own commit.
+  //
+  // However, if there is a fixed seed override set the first time this is
+  // called, return that instead of the per-execution seed.
+  const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
+  static size_t seed = fixed_seed_override ? fixed_seed_override
+                                           : (size_t)seed_prime;
+  return seed;
+}
+
+
+/// \brief Trait to indicate whether a type's bits can be hashed directly.
+///
+/// A type trait which is true if we want to combine values for hashing by
+/// reading the underlying data. It is false if values of this type must
+/// first be passed to hash_value, and the resulting hash_codes combined.
+//
+// FIXME: We want to replace is_integral_or_enum and is_pointer here with
+// a predicate which asserts that comparing the underlying storage of two
+// values of the type for equality is equivalent to comparing the two values
+// for equality. For all the platforms we care about, this holds for integers
+// and pointers, but there are platforms where it doesn't and we would like to
+// support user-defined types which happen to satisfy this property.
+template <typename T> struct is_hashable_data
+  : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
+                                   std::is_pointer<T>::value) &&
+                                  64 % sizeof(T) == 0)> {};
+
+// Special case std::pair to detect when both types are viable and when there
+// is no alignment-derived padding in the pair. This is a bit of a lie because
+// std::pair isn't truly POD, but it's close enough in all reasonable
+// implementations for our use case of hashing the underlying data.
+template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
+  : std::integral_constant<bool, (is_hashable_data<T>::value &&
+                                  is_hashable_data<U>::value &&
+                                  (sizeof(T) + sizeof(U)) ==
+                                   sizeof(std::pair<T, U>))> {};
+
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when the type itself can be used.
+template <typename T>
+typename std::enable_if<is_hashable_data<T>::value, T>::type
+get_hashable_data(const T &value) {
+  return value;
+}
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when we must first call hash_value and use the
+/// result as our data.
+template <typename T>
+typename std::enable_if<!is_hashable_data<T>::value, size_t>::type
+get_hashable_data(const T &value) {
+  using ::llvm::hash_value;
+  return hash_value(value);
+}
+
+/// \brief Helper to store data from a value into a buffer and advance the
+/// pointer into that buffer.
+///
+/// This routine first checks whether there is enough space in the provided
+/// buffer, and if not immediately returns false. If there is space, it
+/// copies the underlying bytes of value into the buffer, advances the
+/// buffer_ptr past the copied bytes, and returns true.
+template <typename T>
+bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
+                       size_t offset = 0) {
+  size_t store_size = sizeof(value) - offset;
+  if (buffer_ptr + store_size > buffer_end)
+    return false;
+  const char *value_data = reinterpret_cast<const char *>(&value);
+  memcpy(buffer_ptr, value_data + offset, store_size);
+  buffer_ptr += store_size;
+  return true;
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is
+/// integral. Rather than computing a hash_code for each object and then
+/// combining them, this (as an optimization) directly combines the integers.
+template <typename InputIteratorT>
+hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
+  const size_t seed = get_execution_seed();
+  char buffer[64], *buffer_ptr = buffer;
+  char *const buffer_end = std::end(buffer);
+  while (first != last && store_and_advance(buffer_ptr, buffer_end,
+                                            get_hashable_data(*first)))
+    ++first;
+  if (first == last)
+    return hash_short(buffer, buffer_ptr - buffer, seed);
+  assert(buffer_ptr == buffer_end);
+
+  hash_state state = state.create(buffer, seed);
+  size_t length = 64;
+  while (first != last) {
+    // Fill up the buffer. We don't clear it, which re-mixes the last round
+    // when only a partial 64-byte chunk is left.
+    buffer_ptr = buffer;
+    while (first != last && store_and_advance(buffer_ptr, buffer_end,
+                                              get_hashable_data(*first)))
+      ++first;
+
+    // Rotate the buffer if we did a partial fill in order to simulate doing
+    // a mix of the last 64-bytes. That is how the algorithm works when we
+    // have a contiguous byte sequence, and we want to emulate that here.
+    std::rotate(buffer, buffer_ptr, buffer_end);
+
+    // Mix this chunk into the current state.
+    state.mix(buffer);
+    length += buffer_ptr - buffer;
+  };
+
+  return state.finalize(length);
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is integral
+/// and when the input iterator is actually a pointer. Rather than computing
+/// a hash_code for each object and then combining them, this (as an
+/// optimization) directly combines the integers. Also, because the integers
+/// are stored in contiguous memory, this routine avoids copying each value
+/// and directly reads from the underlying memory.
+template <typename ValueT>
+typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
+hash_combine_range_impl(ValueT *first, ValueT *last) {
+  const size_t seed = get_execution_seed();
+  const char *s_begin = reinterpret_cast<const char *>(first);
+  const char *s_end = reinterpret_cast<const char *>(last);
+  const size_t length = std::distance(s_begin, s_end);
+  if (length <= 64)
+    return hash_short(s_begin, length, seed);
+
+  const char *s_aligned_end = s_begin + (length & ~63);
+  hash_state state = state.create(s_begin, seed);
+  s_begin += 64;
+  while (s_begin != s_aligned_end) {
+    state.mix(s_begin);
+    s_begin += 64;
+  }
+  if (length & 63)
+    state.mix(s_end - 64);
+
+  return state.finalize(length);
+}
+
+} // namespace detail
+} // namespace hashing
+
+
+/// \brief Compute a hash_code for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same hash_code as
+/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
+/// and is significantly faster given pointers and types which can be hashed as
+/// a sequence of bytes.
+template <typename InputIteratorT>
+hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
+  return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
+}
+
+
+// Implementation details for hash_combine.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper class to manage the recursive combining of hash_combine
+/// arguments.
+///
+/// This class exists to manage the state and various calls involved in the
+/// recursive combining of arguments used in hash_combine. It is particularly
+/// useful at minimizing the code in the recursive calls to ease the pain
+/// caused by a lack of variadic functions.
+struct hash_combine_recursive_helper {
+  char buffer[64];
+  hash_state state;
+  const size_t seed;
+
+public:
+  /// \brief Construct a recursive hash combining helper.
+  ///
+  /// This sets up the state for a recursive hash combine, including getting
+  /// the seed and buffer setup.
+  hash_combine_recursive_helper()
+    : seed(get_execution_seed()) {}
+
+  /// \brief Combine one chunk of data into the current in-flight hash.
+  ///
+  /// This merges one chunk of data into the hash. First it tries to buffer
+  /// the data. If the buffer is full, it hashes the buffer into its
+  /// hash_state, empties it, and then merges the new chunk in. This also
+  /// handles cases where the data straddles the end of the buffer.
+  template <typename T>
+  char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
+    if (!store_and_advance(buffer_ptr, buffer_end, data)) {
+      // Check for skew which prevents the buffer from being packed, and do
+      // a partial store into the buffer to fill it. This is only a concern
+      // with the variadic combine because that formation can have varying
+      // argument types.
+      size_t partial_store_size = buffer_end - buffer_ptr;
+      memcpy(buffer_ptr, &data, partial_store_size);
+
+      // If the store fails, our buffer is full and ready to hash. We have to
+      // either initialize the hash state (on the first full buffer) or mix
+      // this buffer into the existing hash state. Length tracks the *hashed*
+      // length, not the buffered length.
+      if (length == 0) {
+        state = state.create(buffer, seed);
+        length = 64;
+      } else {
+        // Mix this chunk into the current state and bump length up by 64.
+        state.mix(buffer);
+        length += 64;
+      }
+      // Reset the buffer_ptr to the head of the buffer for the next chunk of
+      // data.
+      buffer_ptr = buffer;
+
+      // Try again to store into the buffer -- this cannot fail as we only
+      // store types smaller than the buffer.
+      if (!store_and_advance(buffer_ptr, buffer_end, data,
+                             partial_store_size))
+        abort();
+    }
+    return buffer_ptr;
+  }
+
+  /// \brief Recursive, variadic combining method.
+  ///
+  /// This function recurses through each argument, combining that argument
+  /// into a single hash.
+  template <typename T, typename ...Ts>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T &arg, const Ts &...args) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
+
+    // Recurse to the next argument.
+    return combine(length, buffer_ptr, buffer_end, args...);
+  }
+
+  /// \brief Base case for recursive, variadic combining.
+  ///
+  /// The base case when combining arguments recursively is reached when all
+  /// arguments have been handled. It flushes the remaining buffer and
+  /// constructs a hash_code.
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
+    // Check whether the entire set of values fit in the buffer. If so, we'll
+    // use the optimized short hashing routine and skip state entirely.
+    if (length == 0)
+      return hash_short(buffer, buffer_ptr - buffer, seed);
+
+    // Mix the final buffer, rotating it if we did a partial fill in order to
+    // simulate doing a mix of the last 64-bytes. That is how the algorithm
+    // works when we have a contiguous byte sequence, and we want to emulate
+    // that here.
+    std::rotate(buffer, buffer_ptr, buffer_end);
+
+    // Mix this chunk into the current state.
+    state.mix(buffer);
+    length += buffer_ptr - buffer;
+
+    return state.finalize(length);
+  }
+};
+
+} // namespace detail
+} // namespace hashing
+
+/// \brief Combine values into a single hash_code.
+///
+/// This routine accepts a varying number of arguments of any type. It will
+/// attempt to combine them into a single hash_code. For user-defined types it
+/// attempts to call a \see hash_value overload (via ADL) for the type. For
+/// integer and pointer types it directly combines their data into the
+/// resulting hash_code.
+///
+/// The result is suitable for returning from a user's hash_value
+/// *implementation* for their user-defined type. Consumers of a type should
+/// *not* call this routine, they should instead call 'hash_value'.
+template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
+  // Recursively hash each argument using a helper class.
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
+}
+
+// Implementation details for implementations of hash_value overloads provided
+// here.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper to hash the value of a single integer.
+///
+/// Overloads for smaller integer types are not provided to ensure consistent
+/// behavior in the presence of integral promotions. Essentially,
+/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
+inline hash_code hash_integer_value(uint64_t value) {
+  // Similar to hash_4to8_bytes but using a seed instead of length.
+  const uint64_t seed = get_execution_seed();
+  const char *s = reinterpret_cast<const char *>(&value);
+  const uint64_t a = fetch32(s);
+  return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
+}
+
+} // namespace detail
+} // namespace hashing
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
+hash_value(T value) {
+  return ::llvm::hashing::detail::hash_integer_value(
+      static_cast<uint64_t>(value));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T> hash_code hash_value(const T *ptr) {
+  return ::llvm::hashing::detail::hash_integer_value(
+    reinterpret_cast<uintptr_t>(ptr));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg) {
+  return hash_combine(arg.first, arg.second);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg) {
+  return hash_combine_range(arg.begin(), arg.end());
+}
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableList.h b/linux-x64/clang/include/llvm/ADT/ImmutableList.h
new file mode 100644
index 0000000..60d63e0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableList.h
@@ -0,0 +1,235 @@
+//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImmutableList class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLELIST_H
+#define LLVM_ADT_IMMUTABLELIST_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <new>
+
+namespace llvm {
+
+template <typename T> class ImmutableListFactory;
+
+template <typename T>
+class ImmutableListImpl : public FoldingSetNode {
+  friend class ImmutableListFactory<T>;
+
+  T Head;
+  const ImmutableListImpl* Tail;
+
+  ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
+    : Head(head), Tail(tail) {}
+
+public:
+  ImmutableListImpl(const ImmutableListImpl &) = delete;
+  ImmutableListImpl &operator=(const ImmutableListImpl &) = delete;
+
+  const T& getHead() const { return Head; }
+  const ImmutableListImpl* getTail() const { return Tail; }
+
+  static inline void Profile(FoldingSetNodeID& ID, const T& H,
+                             const ImmutableListImpl* L){
+    ID.AddPointer(L);
+    ID.Add(H);
+  }
+
+  void Profile(FoldingSetNodeID& ID) {
+    Profile(ID, Head, Tail);
+  }
+};
+
+/// ImmutableList - This class represents an immutable (functional) list.
+///  It is implemented as a smart pointer (wraps ImmutableListImpl), so it
+///  it is intended to always be copied by value as if it were a pointer.
+///  This interface matches ImmutableSet and ImmutableMap.  ImmutableList
+///  objects should almost never be created directly, and instead should
+///  be created by ImmutableListFactory objects that manage the lifetime
+///  of a group of lists.  When the factory object is reclaimed, all lists
+///  created by that factory are released as well.
+template <typename T>
+class ImmutableList {
+public:
+  using value_type = T;
+  using Factory = ImmutableListFactory<T>;
+
+private:
+  const ImmutableListImpl<T>* X;
+
+public:
+  // This constructor should normally only be called by ImmutableListFactory<T>.
+  // There may be cases, however, when one needs to extract the internal pointer
+  // and reconstruct a list object from that pointer.
+  ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}
+
+  const ImmutableListImpl<T>* getInternalPointer() const {
+    return X;
+  }
+
+  class iterator {
+    const ImmutableListImpl<T>* L = nullptr;
+
+  public:
+    iterator() = default;
+    iterator(ImmutableList l) : L(l.getInternalPointer()) {}
+
+    iterator& operator++() { L = L->getTail(); return *this; }
+    bool operator==(const iterator& I) const { return L == I.L; }
+    bool operator!=(const iterator& I) const { return L != I.L; }
+    const value_type& operator*() const { return L->getHead(); }
+
+    ImmutableList getList() const { return L; }
+  };
+
+  /// begin - Returns an iterator referring to the head of the list, or
+  ///  an iterator denoting the end of the list if the list is empty.
+  iterator begin() const { return iterator(X); }
+
+  /// end - Returns an iterator denoting the end of the list.  This iterator
+  ///  does not refer to a valid list element.
+  iterator end() const { return iterator(); }
+
+  /// isEmpty - Returns true if the list is empty.
+  bool isEmpty() const { return !X; }
+
+  bool contains(const T& V) const {
+    for (iterator I = begin(), E = end(); I != E; ++I) {
+      if (*I == V)
+        return true;
+    }
+    return false;
+  }
+
+  /// isEqual - Returns true if two lists are equal.  Because all lists created
+  ///  from the same ImmutableListFactory are uniqued, this has O(1) complexity
+  ///  because it the contents of the list do not need to be compared.  Note
+  ///  that you should only compare two lists created from the same
+  ///  ImmutableListFactory.
+  bool isEqual(const ImmutableList& L) const { return X == L.X; }
+
+  bool operator==(const ImmutableList& L) const { return isEqual(L); }
+
+  /// getHead - Returns the head of the list.
+  const T& getHead() {
+    assert(!isEmpty() && "Cannot get the head of an empty list.");
+    return X->getHead();
+  }
+
+  /// getTail - Returns the tail of the list, which is another (possibly empty)
+  ///  ImmutableList.
+  ImmutableList getTail() {
+    return X ? X->getTail() : nullptr;
+  }
+
+  void Profile(FoldingSetNodeID& ID) const {
+    ID.AddPointer(X);
+  }
+};
+
+template <typename T>
+class ImmutableListFactory {
+  using ListTy = ImmutableListImpl<T>;
+  using CacheTy = FoldingSet<ListTy>;
+
+  CacheTy Cache;
+  uintptr_t Allocator;
+
+  bool ownsAllocator() const {
+    return (Allocator & 0x1) == 0;
+  }
+
+  BumpPtrAllocator& getAllocator() const {
+    return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+  }
+
+public:
+  ImmutableListFactory()
+    : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+  ImmutableListFactory(BumpPtrAllocator& Alloc)
+  : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+  ~ImmutableListFactory() {
+    if (ownsAllocator()) delete &getAllocator();
+  }
+
+  ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
+    // Profile the new list to see if it already exists in our cache.
+    FoldingSetNodeID ID;
+    void* InsertPos;
+
+    const ListTy* TailImpl = Tail.getInternalPointer();
+    ListTy::Profile(ID, Head, TailImpl);
+    ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);
+
+    if (!L) {
+      // The list does not exist in our cache.  Create it.
+      BumpPtrAllocator& A = getAllocator();
+      L = (ListTy*) A.Allocate<ListTy>();
+      new (L) ListTy(Head, TailImpl);
+
+      // Insert the new list into the cache.
+      Cache.InsertNode(L, InsertPos);
+    }
+
+    return L;
+  }
+
+  ImmutableList<T> add(const T& D, ImmutableList<T> L) {
+    return concat(D, L);
+  }
+
+  ImmutableList<T> getEmptyList() const {
+    return ImmutableList<T>(nullptr);
+  }
+
+  ImmutableList<T> create(const T& X) {
+    return Concat(X, getEmptyList());
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Partially-specialized Traits.
+//===----------------------------------------------------------------------===//
+
+template<typename T> struct DenseMapInfo;
+template<typename T> struct DenseMapInfo<ImmutableList<T>> {
+  static inline ImmutableList<T> getEmptyKey() {
+    return reinterpret_cast<ImmutableListImpl<T>*>(-1);
+  }
+
+  static inline ImmutableList<T> getTombstoneKey() {
+    return reinterpret_cast<ImmutableListImpl<T>*>(-2);
+  }
+
+  static unsigned getHashValue(ImmutableList<T> X) {
+    uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
+    return (unsigned((uintptr_t)PtrVal) >> 4) ^
+           (unsigned((uintptr_t)PtrVal) >> 9);
+  }
+
+  static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
+    return X1 == X2;
+  }
+};
+
+template <typename T> struct isPodLike;
+template <typename T>
+struct isPodLike<ImmutableList<T>> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLELIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableMap.h b/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
new file mode 100644
index 0000000..10d1e1f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
@@ -0,0 +1,414 @@
+//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImmutableMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLEMAP_H
+#define LLVM_ADT_IMMUTABLEMAP_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Allocator.h"
+#include <utility>
+
+namespace llvm {
+
+/// ImutKeyValueInfo -Traits class used by ImmutableMap.  While both the first
+/// and second elements in a pair are used to generate profile information,
+/// only the first element (the key) is used by isEqual and isLess.
+template <typename T, typename S>
+struct ImutKeyValueInfo {
+  using value_type = const std::pair<T,S>;
+  using value_type_ref = const value_type&;
+  using key_type = const T;
+  using key_type_ref = const T&;
+  using data_type = const S;
+  using data_type_ref = const S&;
+
+  static inline key_type_ref KeyOfValue(value_type_ref V) {
+    return V.first;
+  }
+
+  static inline data_type_ref DataOfValue(value_type_ref V) {
+    return V.second;
+  }
+
+  static inline bool isEqual(key_type_ref L, key_type_ref R) {
+    return ImutContainerInfo<T>::isEqual(L,R);
+  }
+  static inline bool isLess(key_type_ref L, key_type_ref R) {
+    return ImutContainerInfo<T>::isLess(L,R);
+  }
+
+  static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
+    return ImutContainerInfo<S>::isEqual(L,R);
+  }
+
+  static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
+    ImutContainerInfo<T>::Profile(ID, V.first);
+    ImutContainerInfo<S>::Profile(ID, V.second);
+  }
+};
+
+template <typename KeyT, typename ValT,
+          typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMap {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using key_type = typename ValInfo::key_type;
+  using key_type_ref = typename ValInfo::key_type_ref;
+  using data_type = typename ValInfo::data_type;
+  using data_type_ref = typename ValInfo::data_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+
+protected:
+  TreeTy* Root;
+
+public:
+  /// Constructs a map from a pointer to a tree root.  In general one
+  /// should use a Factory object to create maps instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
+    if (Root) { Root->retain(); }
+  }
+
+  ~ImmutableMap() {
+    if (Root) { Root->release(); }
+  }
+
+  ImmutableMap &operator=(const ImmutableMap &X) {
+    if (Root != X.Root) {
+      if (X.Root) { X.Root->retain(); }
+      if (Root) { Root->release(); }
+      Root = X.Root;
+    }
+    return *this;
+  }
+
+  class Factory {
+    typename TreeTy::Factory F;
+    const bool Canonicalize;
+
+  public:
+    Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}
+
+    Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
+        : F(Alloc), Canonicalize(canonicalize) {}
+
+    Factory(const Factory &) = delete;
+    Factory &operator=(const Factory &) = delete;
+
+    ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
+
+    ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
+      TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D));
+      return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+    }
+
+    ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+      TreeTy *T = F.remove(Old.Root,K);
+      return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+    }
+
+    typename TreeTy::Factory *getTreeFactory() const {
+      return const_cast<typename TreeTy::Factory *>(&F);
+    }
+  };
+
+  bool contains(key_type_ref K) const {
+    return Root ? Root->contains(K) : false;
+  }
+
+  bool operator==(const ImmutableMap &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableMap &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  TreeTy *getRoot() const {
+    if (Root) { Root->retain(); }
+    return Root;
+  }
+
+  TreeTy *getRootWithoutRetain() const { return Root; }
+
+  void manualRetain() {
+    if (Root) Root->retain();
+  }
+
+  void manualRelease() {
+    if (Root) Root->release();
+  }
+
+  bool isEmpty() const { return !Root; }
+
+  //===--------------------------------------------------===//
+  // Foreach - A limited form of map iteration.
+  //===--------------------------------------------------===//
+
+private:
+  template <typename Callback>
+  struct CBWrapper {
+    Callback C;
+
+    void operator()(value_type_ref V) { C(V.first,V.second); }
+  };
+
+  template <typename Callback>
+  struct CBWrapperRef {
+    Callback &C;
+
+    CBWrapperRef(Callback& c) : C(c) {}
+
+    void operator()(value_type_ref V) { C(V.first,V.second); }
+  };
+
+public:
+  template <typename Callback>
+  void foreach(Callback& C) {
+    if (Root) {
+      CBWrapperRef<Callback> CB(C);
+      Root->foreach(CB);
+    }
+  }
+
+  template <typename Callback>
+  void foreach() {
+    if (Root) {
+      CBWrapper<Callback> CB;
+      Root->foreach(CB);
+    }
+  }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void verify() const { if (Root) Root->verify(); }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  class iterator : public ImutAVLValueIterator<ImmutableMap> {
+    friend class ImmutableMap;
+
+    iterator() = default;
+    explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+  public:
+    key_type_ref getKey() const { return (*this)->first; }
+    data_type_ref getData() const { return (*this)->second; }
+  };
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  data_type* lookup(key_type_ref K) const {
+    if (Root) {
+      TreeTy* T = Root->find(K);
+      if (T) return &T->getValue().second;
+    }
+
+    return nullptr;
+  }
+
+  /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+  ///  which key is the highest in the ordering of keys in the map.  This
+  ///  method returns NULL if the map is empty.
+  value_type* getMaxElement() const {
+    return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+  }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
+    ID.AddPointer(M.Root);
+  }
+
+  inline void Profile(FoldingSetNodeID& ID) const {
+    return Profile(ID,*this);
+  }
+};
+
+// NOTE: This will possibly become the new implementation of ImmutableMap some day.
+template <typename KeyT, typename ValT,
+typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMapRef {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using key_type = typename ValInfo::key_type;
+  using key_type_ref = typename ValInfo::key_type_ref;
+  using data_type = typename ValInfo::data_type;
+  using data_type_ref = typename ValInfo::data_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+  using FactoryTy = typename TreeTy::Factory;
+
+protected:
+  TreeTy *Root;
+  FactoryTy *Factory;
+
+public:
+  /// Constructs a map from a pointer to a tree root.  In general one
+  /// should use a Factory object to create maps instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableMapRef(const TreeTy *R, FactoryTy *F)
+      : Root(const_cast<TreeTy *>(R)), Factory(F) {
+    if (Root) {
+      Root->retain();
+    }
+  }
+
+  explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
+                           typename ImmutableMap<KeyT, ValT>::Factory &F)
+    : Root(X.getRootWithoutRetain()),
+      Factory(F.getTreeFactory()) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) {
+    if (Root) {
+      Root->retain();
+    }
+  }
+
+  ~ImmutableMapRef() {
+    if (Root)
+      Root->release();
+  }
+
+  ImmutableMapRef &operator=(const ImmutableMapRef &X) {
+    if (Root != X.Root) {
+      if (X.Root)
+        X.Root->retain();
+
+      if (Root)
+        Root->release();
+
+      Root = X.Root;
+      Factory = X.Factory;
+    }
+    return *this;
+  }
+
+  static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
+    return ImmutableMapRef(0, F);
+  }
+
+  void manualRetain() {
+    if (Root) Root->retain();
+  }
+
+  void manualRelease() {
+    if (Root) Root->release();
+  }
+
+  ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
+    TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D));
+    return ImmutableMapRef(NewT, Factory);
+  }
+
+  ImmutableMapRef remove(key_type_ref K) const {
+    TreeTy *NewT = Factory->remove(Root, K);
+    return ImmutableMapRef(NewT, Factory);
+  }
+
+  bool contains(key_type_ref K) const {
+    return Root ? Root->contains(K) : false;
+  }
+
+  ImmutableMap<KeyT, ValT> asImmutableMap() const {
+    return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root));
+  }
+
+  bool operator==(const ImmutableMapRef &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableMapRef &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  bool isEmpty() const { return !Root; }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void verify() const {
+    if (Root)
+      Root->verify();
+  }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
+    friend class ImmutableMapRef;
+
+    iterator() = default;
+    explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+  public:
+    key_type_ref getKey() const { return (*this)->first; }
+    data_type_ref getData() const { return (*this)->second; }
+  };
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  data_type *lookup(key_type_ref K) const {
+    if (Root) {
+      TreeTy* T = Root->find(K);
+      if (T) return &T->getValue().second;
+    }
+
+    return nullptr;
+  }
+
+  /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+  ///  which key is the highest in the ordering of keys in the map.  This
+  ///  method returns NULL if the map is empty.
+  value_type* getMaxElement() const {
+    return Root ? &(Root->getMaxElement()->getValue()) : 0;
+  }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
+    ID.AddPointer(M.Root);
+  }
+
+  inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLEMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableSet.h b/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
new file mode 100644
index 0000000..9d580c5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
@@ -0,0 +1,1224 @@
+//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImutAVLTree and ImmutableSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLESET_H
+#define LLVM_ADT_IMMUTABLESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <new>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Definition.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLFactory;
+template <typename ImutInfo> class ImutIntervalAVLFactory;
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
+template <typename ImutInfo> class ImutAVLTreeGenericIterator;
+
+template <typename ImutInfo >
+class ImutAVLTree {
+public:
+  using key_type_ref = typename ImutInfo::key_type_ref;
+  using value_type = typename ImutInfo::value_type;
+  using value_type_ref = typename ImutInfo::value_type_ref;
+  using Factory = ImutAVLFactory<ImutInfo>;
+  using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;
+
+  friend class ImutAVLFactory<ImutInfo>;
+  friend class ImutIntervalAVLFactory<ImutInfo>;
+  friend class ImutAVLTreeGenericIterator<ImutInfo>;
+
+  //===----------------------------------------------------===//
+  // Public Interface.
+  //===----------------------------------------------------===//
+
+  /// Return a pointer to the left subtree.  This value
+  ///  is NULL if there is no left subtree.
+  ImutAVLTree *getLeft() const { return left; }
+
+  /// Return a pointer to the right subtree.  This value is
+  ///  NULL if there is no right subtree.
+  ImutAVLTree *getRight() const { return right; }
+
+  /// getHeight - Returns the height of the tree.  A tree with no subtrees
+  ///  has a height of 1.
+  unsigned getHeight() const { return height; }
+
+  /// getValue - Returns the data value associated with the tree node.
+  const value_type& getValue() const { return value; }
+
+  /// find - Finds the subtree associated with the specified key value.
+  ///  This method returns NULL if no matching subtree is found.
+  ImutAVLTree* find(key_type_ref K) {
+    ImutAVLTree *T = this;
+    while (T) {
+      key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
+      if (ImutInfo::isEqual(K,CurrentKey))
+        return T;
+      else if (ImutInfo::isLess(K,CurrentKey))
+        T = T->getLeft();
+      else
+        T = T->getRight();
+    }
+    return nullptr;
+  }
+
+  /// getMaxElement - Find the subtree associated with the highest ranged
+  ///  key value.
+  ImutAVLTree* getMaxElement() {
+    ImutAVLTree *T = this;
+    ImutAVLTree *Right = T->getRight();
+    while (Right) { T = Right; Right = T->getRight(); }
+    return T;
+  }
+
+  /// size - Returns the number of nodes in the tree, which includes
+  ///  both leaves and non-leaf nodes.
+  unsigned size() const {
+    unsigned n = 1;
+    if (const ImutAVLTree* L = getLeft())
+      n += L->size();
+    if (const ImutAVLTree* R = getRight())
+      n += R->size();
+    return n;
+  }
+
+  /// begin - Returns an iterator that iterates over the nodes of the tree
+  ///  in an inorder traversal.  The returned iterator thus refers to the
+  ///  the tree node with the minimum data element.
+  iterator begin() const { return iterator(this); }
+
+  /// end - Returns an iterator for the tree that denotes the end of an
+  ///  inorder traversal.
+  iterator end() const { return iterator(); }
+
+  bool isElementEqual(value_type_ref V) const {
+    // Compare the keys.
+    if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
+                           ImutInfo::KeyOfValue(V)))
+      return false;
+
+    // Also compare the data values.
+    if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()),
+                               ImutInfo::DataOfValue(V)))
+      return false;
+
+    return true;
+  }
+
+  bool isElementEqual(const ImutAVLTree* RHS) const {
+    return isElementEqual(RHS->getValue());
+  }
+
+  /// isEqual - Compares two trees for structural equality and returns true
+  ///   if they are equal.  This worst case performance of this operation is
+  //    linear in the sizes of the trees.
+  bool isEqual(const ImutAVLTree& RHS) const {
+    if (&RHS == this)
+      return true;
+
+    iterator LItr = begin(), LEnd = end();
+    iterator RItr = RHS.begin(), REnd = RHS.end();
+
+    while (LItr != LEnd && RItr != REnd) {
+      if (&*LItr == &*RItr) {
+        LItr.skipSubTree();
+        RItr.skipSubTree();
+        continue;
+      }
+
+      if (!LItr->isElementEqual(&*RItr))
+        return false;
+
+      ++LItr;
+      ++RItr;
+    }
+
+    return LItr == LEnd && RItr == REnd;
+  }
+
+  /// isNotEqual - Compares two trees for structural inequality.  Performance
+  ///  is the same is isEqual.
+  bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); }
+
+  /// contains - Returns true if this tree contains a subtree (node) that
+  ///  has an data element that matches the specified key.  Complexity
+  ///  is logarithmic in the size of the tree.
+  bool contains(key_type_ref K) { return (bool) find(K); }
+
+  /// foreach - A member template the accepts invokes operator() on a functor
+  ///  object (specifed by Callback) for every node/subtree in the tree.
+  ///  Nodes are visited using an inorder traversal.
+  template <typename Callback>
+  void foreach(Callback& C) {
+    if (ImutAVLTree* L = getLeft())
+      L->foreach(C);
+
+    C(value);
+
+    if (ImutAVLTree* R = getRight())
+      R->foreach(C);
+  }
+
+  /// validateTree - A utility method that checks that the balancing and
+  ///  ordering invariants of the tree are satisifed.  It is a recursive
+  ///  method that returns the height of the tree, which is then consumed
+  ///  by the enclosing validateTree call.  External callers should ignore the
+  ///  return value.  An invalid tree will cause an assertion to fire in
+  ///  a debug build.
+  unsigned validateTree() const {
+    unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+    unsigned HR = getRight() ? getRight()->validateTree() : 0;
+    (void) HL;
+    (void) HR;
+
+    assert(getHeight() == ( HL > HR ? HL : HR ) + 1
+            && "Height calculation wrong");
+
+    assert((HL > HR ? HL-HR : HR-HL) <= 2
+           && "Balancing invariant violated");
+
+    assert((!getLeft() ||
+            ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+                             ImutInfo::KeyOfValue(getValue()))) &&
+           "Value in left child is not less that current value");
+
+
+    assert(!(getRight() ||
+             ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+                              ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+           "Current value is not less that value of right child");
+
+    return getHeight();
+  }
+
+  //===----------------------------------------------------===//
+  // Internal values.
+  //===----------------------------------------------------===//
+
+private:
+  Factory *factory;
+  ImutAVLTree *left;
+  ImutAVLTree *right;
+  ImutAVLTree *prev = nullptr;
+  ImutAVLTree *next = nullptr;
+
+  unsigned height : 28;
+  bool IsMutable : 1;
+  bool IsDigestCached : 1;
+  bool IsCanonicalized : 1;
+
+  value_type value;
+  uint32_t digest = 0;
+  uint32_t refCount = 0;
+
+  //===----------------------------------------------------===//
+  // Internal methods (node manipulation; used by Factory).
+  //===----------------------------------------------------===//
+
+private:
+  /// ImutAVLTree - Internal constructor that is only called by
+  ///   ImutAVLFactory.
+  ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+              unsigned height)
+    : factory(f), left(l), right(r), height(height), IsMutable(true),
+      IsDigestCached(false), IsCanonicalized(false), value(v)
+  {
+    if (left) left->retain();
+    if (right) right->retain();
+  }
+
+  /// isMutable - Returns true if the left and right subtree references
+  ///  (as well as height) can be changed.  If this method returns false,
+  ///  the tree is truly immutable.  Trees returned from an ImutAVLFactory
+  ///  object should always have this method return true.  Further, if this
+  ///  method returns false for an instance of ImutAVLTree, all subtrees
+  ///  will also have this method return false.  The converse is not true.
+  bool isMutable() const { return IsMutable; }
+
+  /// hasCachedDigest - Returns true if the digest for this tree is cached.
+  ///  This can only be true if the tree is immutable.
+  bool hasCachedDigest() const { return IsDigestCached; }
+
+  //===----------------------------------------------------===//
+  // Mutating operations.  A tree root can be manipulated as
+  // long as its reference has not "escaped" from internal
+  // methods of a factory object (see below).  When a tree
+  // pointer is externally viewable by client code, the
+  // internal "mutable bit" is cleared to mark the tree
+  // immutable.  Note that a tree that still has its mutable
+  // bit set may have children (subtrees) that are themselves
+  // immutable.
+  //===----------------------------------------------------===//
+
+  /// markImmutable - Clears the mutable flag for a tree.  After this happens,
+  ///   it is an error to call setLeft(), setRight(), and setHeight().
+  void markImmutable() {
+    assert(isMutable() && "Mutable flag already removed.");
+    IsMutable = false;
+  }
+
+  /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+  void markedCachedDigest() {
+    assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
+    IsDigestCached = true;
+  }
+
+  /// setHeight - Changes the height of the tree.  Used internally by
+  ///  ImutAVLFactory.
+  void setHeight(unsigned h) {
+    assert(isMutable() && "Only a mutable tree can have its height changed.");
+    height = h;
+  }
+
+  static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R,
+                                value_type_ref V) {
+    uint32_t digest = 0;
+
+    if (L)
+      digest += L->computeDigest();
+
+    // Compute digest of stored data.
+    FoldingSetNodeID ID;
+    ImutInfo::Profile(ID,V);
+    digest += ID.ComputeHash();
+
+    if (R)
+      digest += R->computeDigest();
+
+    return digest;
+  }
+
+  uint32_t computeDigest() {
+    // Check the lowest bit to determine if digest has actually been
+    // pre-computed.
+    if (hasCachedDigest())
+      return digest;
+
+    uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+    digest = X;
+    markedCachedDigest();
+    return X;
+  }
+
+  //===----------------------------------------------------===//
+  // Reference count operations.
+  //===----------------------------------------------------===//
+
+public:
+  void retain() { ++refCount; }
+
+  void release() {
+    assert(refCount > 0);
+    if (--refCount == 0)
+      destroy();
+  }
+
+  void destroy() {
+    if (left)
+      left->release();
+    if (right)
+      right->release();
+    if (IsCanonicalized) {
+      if (next)
+        next->prev = prev;
+
+      if (prev)
+        prev->next = next;
+      else
+        factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
+    }
+
+    // We need to clear the mutability bit in case we are
+    // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+    IsMutable = false;
+    factory->freeNodes.push_back(this);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Factory class.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo >
+class ImutAVLFactory {
+  friend class ImutAVLTree<ImutInfo>;
+
+  using TreeTy = ImutAVLTree<ImutInfo>;
+  using value_type_ref = typename TreeTy::value_type_ref;
+  using key_type_ref = typename TreeTy::key_type_ref;
+  using CacheTy = DenseMap<unsigned, TreeTy*>;
+
+  CacheTy Cache;
+  uintptr_t Allocator;
+  std::vector<TreeTy*> createdNodes;
+  std::vector<TreeTy*> freeNodes;
+
+  bool ownsAllocator() const {
+    return (Allocator & 0x1) == 0;
+  }
+
+  BumpPtrAllocator& getAllocator() const {
+    return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+  }
+
+  //===--------------------------------------------------===//
+  // Public interface.
+  //===--------------------------------------------------===//
+
+public:
+  ImutAVLFactory()
+    : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+  ImutAVLFactory(BumpPtrAllocator& Alloc)
+    : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+  ~ImutAVLFactory() {
+    if (ownsAllocator()) delete &getAllocator();
+  }
+
+  TreeTy* add(TreeTy* T, value_type_ref V) {
+    T = add_internal(V,T);
+    markImmutable(T);
+    recoverNodes();
+    return T;
+  }
+
+  TreeTy* remove(TreeTy* T, key_type_ref V) {
+    T = remove_internal(V,T);
+    markImmutable(T);
+    recoverNodes();
+    return T;
+  }
+
+  TreeTy* getEmptyTree() const { return nullptr; }
+
+protected:
+  //===--------------------------------------------------===//
+  // A bunch of quick helper functions used for reasoning
+  // about the properties of trees and their children.
+  // These have succinct names so that the balancing code
+  // is as terse (and readable) as possible.
+  //===--------------------------------------------------===//
+
+  bool            isEmpty(TreeTy* T) const { return !T; }
+  unsigned        getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+  TreeTy*         getLeft(TreeTy* T) const { return T->getLeft(); }
+  TreeTy*         getRight(TreeTy* T) const { return T->getRight(); }
+  value_type_ref  getValue(TreeTy* T) const { return T->value; }
+
+  // Make sure the index is not the Tombstone or Entry key of the DenseMap.
+  static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); }
+
+  unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+    unsigned hl = getHeight(L);
+    unsigned hr = getHeight(R);
+    return (hl > hr ? hl : hr) + 1;
+  }
+
+  static bool compareTreeWithSection(TreeTy* T,
+                                     typename TreeTy::iterator& TI,
+                                     typename TreeTy::iterator& TE) {
+    typename TreeTy::iterator I = T->begin(), E = T->end();
+    for ( ; I!=E ; ++I, ++TI) {
+      if (TI == TE || !I->isElementEqual(&*TI))
+        return false;
+    }
+    return true;
+  }
+
+  //===--------------------------------------------------===//
+  // "createNode" is used to generate new tree roots that link
+  // to other trees.  The functon may also simply move links
+  // in an existing root if that root is still marked mutable.
+  // This is necessary because otherwise our balancing code
+  // would leak memory as it would create nodes that are
+  // then discarded later before the finished tree is
+  // returned to the caller.
+  //===--------------------------------------------------===//
+
+  TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+    BumpPtrAllocator& A = getAllocator();
+    TreeTy* T;
+    if (!freeNodes.empty()) {
+      T = freeNodes.back();
+      freeNodes.pop_back();
+      assert(T != L);
+      assert(T != R);
+    } else {
+      T = (TreeTy*) A.Allocate<TreeTy>();
+    }
+    new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+    createdNodes.push_back(T);
+    return T;
+  }
+
+  TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+    return createNode(newLeft, getValue(oldTree), newRight);
+  }
+
+  void recoverNodes() {
+    for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+      TreeTy *N = createdNodes[i];
+      if (N->isMutable() && N->refCount == 0)
+        N->destroy();
+    }
+    createdNodes.clear();
+  }
+
+  /// balanceTree - Used by add_internal and remove_internal to
+  ///  balance a newly created tree.
+  TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+    unsigned hl = getHeight(L);
+    unsigned hr = getHeight(R);
+
+    if (hl > hr + 2) {
+      assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
+
+      TreeTy *LL = getLeft(L);
+      TreeTy *LR = getRight(L);
+
+      if (getHeight(LL) >= getHeight(LR))
+        return createNode(LL, L, createNode(LR,V,R));
+
+      assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
+
+      TreeTy *LRL = getLeft(LR);
+      TreeTy *LRR = getRight(LR);
+
+      return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
+    }
+
+    if (hr > hl + 2) {
+      assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
+
+      TreeTy *RL = getLeft(R);
+      TreeTy *RR = getRight(R);
+
+      if (getHeight(RR) >= getHeight(RL))
+        return createNode(createNode(L,V,RL), R, RR);
+
+      assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
+
+      TreeTy *RLL = getLeft(RL);
+      TreeTy *RLR = getRight(RL);
+
+      return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
+    }
+
+    return createNode(L,V,R);
+  }
+
+  /// add_internal - Creates a new tree that includes the specified
+  ///  data and the data from the original tree.  If the original tree
+  ///  already contained the data item, the original tree is returned.
+  TreeTy* add_internal(value_type_ref V, TreeTy* T) {
+    if (isEmpty(T))
+      return createNode(T, V, T);
+    assert(!T->isMutable());
+
+    key_type_ref K = ImutInfo::KeyOfValue(V);
+    key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+    if (ImutInfo::isEqual(K,KCurrent))
+      return createNode(getLeft(T), V, getRight(T));
+    else if (ImutInfo::isLess(K,KCurrent))
+      return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
+    else
+      return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
+  }
+
+  /// remove_internal - Creates a new tree that includes all the data
+  ///  from the original tree except the specified data.  If the
+  ///  specified data did not exist in the original tree, the original
+  ///  tree is returned.
+  TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
+    if (isEmpty(T))
+      return T;
+
+    assert(!T->isMutable());
+
+    key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+    if (ImutInfo::isEqual(K,KCurrent)) {
+      return combineTrees(getLeft(T), getRight(T));
+    } else if (ImutInfo::isLess(K,KCurrent)) {
+      return balanceTree(remove_internal(K, getLeft(T)),
+                                            getValue(T), getRight(T));
+    } else {
+      return balanceTree(getLeft(T), getValue(T),
+                         remove_internal(K, getRight(T)));
+    }
+  }
+
+  TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+    if (isEmpty(L))
+      return R;
+    if (isEmpty(R))
+      return L;
+    TreeTy* OldNode;
+    TreeTy* newRight = removeMinBinding(R,OldNode);
+    return balanceTree(L, getValue(OldNode), newRight);
+  }
+
+  TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
+    assert(!isEmpty(T));
+    if (isEmpty(getLeft(T))) {
+      Noderemoved = T;
+      return getRight(T);
+    }
+    return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+                       getValue(T), getRight(T));
+  }
+
+  /// markImmutable - Clears the mutable bits of a root and all of its
+  ///  descendants.
+  void markImmutable(TreeTy* T) {
+    if (!T || !T->isMutable())
+      return;
+    T->markImmutable();
+    markImmutable(getLeft(T));
+    markImmutable(getRight(T));
+  }
+
+public:
+  TreeTy *getCanonicalTree(TreeTy *TNew) {
+    if (!TNew)
+      return nullptr;
+
+    if (TNew->IsCanonicalized)
+      return TNew;
+
+    // Search the hashtable for another tree with the same digest, and
+    // if find a collision compare those trees by their contents.
+    unsigned digest = TNew->computeDigest();
+    TreeTy *&entry = Cache[maskCacheIndex(digest)];
+    do {
+      if (!entry)
+        break;
+      for (TreeTy *T = entry ; T != nullptr; T = T->next) {
+        // Compare the Contents('T') with Contents('TNew')
+        typename TreeTy::iterator TI = T->begin(), TE = T->end();
+        if (!compareTreeWithSection(TNew, TI, TE))
+          continue;
+        if (TI != TE)
+          continue; // T has more contents than TNew.
+        // Trees did match!  Return 'T'.
+        if (TNew->refCount == 0)
+          TNew->destroy();
+        return T;
+      }
+      entry->prev = TNew;
+      TNew->next = entry;
+    }
+    while (false);
+
+    entry = TNew;
+    TNew->IsCanonicalized = true;
+    return TNew;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Iterators.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo>
+class ImutAVLTreeGenericIterator
+    : public std::iterator<std::bidirectional_iterator_tag,
+                           ImutAVLTree<ImutInfo>> {
+  SmallVector<uintptr_t,20> stack;
+
+public:
+  enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
+                   Flags=0x3 };
+
+  using TreeTy = ImutAVLTree<ImutInfo>;
+
+  ImutAVLTreeGenericIterator() = default;
+  ImutAVLTreeGenericIterator(const TreeTy *Root) {
+    if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
+  }
+
+  TreeTy &operator*() const {
+    assert(!stack.empty());
+    return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags);
+  }
+  TreeTy *operator->() const { return &*this; }
+
+  uintptr_t getVisitState() const {
+    assert(!stack.empty());
+    return stack.back() & Flags;
+  }
+
+  bool atEnd() const { return stack.empty(); }
+
+  bool atBeginning() const {
+    return stack.size() == 1 && getVisitState() == VisitedNone;
+  }
+
+  void skipToParent() {
+    assert(!stack.empty());
+    stack.pop_back();
+    if (stack.empty())
+      return;
+    switch (getVisitState()) {
+      case VisitedNone:
+        stack.back() |= VisitedLeft;
+        break;
+      case VisitedLeft:
+        stack.back() |= VisitedRight;
+        break;
+      default:
+        llvm_unreachable("Unreachable.");
+    }
+  }
+
+  bool operator==(const ImutAVLTreeGenericIterator &x) const {
+    return stack == x.stack;
+  }
+
+  bool operator!=(const ImutAVLTreeGenericIterator &x) const {
+    return !(*this == x);
+  }
+
+  ImutAVLTreeGenericIterator &operator++() {
+    assert(!stack.empty());
+    TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+    assert(Current);
+    switch (getVisitState()) {
+      case VisitedNone:
+        if (TreeTy* L = Current->getLeft())
+          stack.push_back(reinterpret_cast<uintptr_t>(L));
+        else
+          stack.back() |= VisitedLeft;
+        break;
+      case VisitedLeft:
+        if (TreeTy* R = Current->getRight())
+          stack.push_back(reinterpret_cast<uintptr_t>(R));
+        else
+          stack.back() |= VisitedRight;
+        break;
+      case VisitedRight:
+        skipToParent();
+        break;
+      default:
+        llvm_unreachable("Unreachable.");
+    }
+    return *this;
+  }
+
+  ImutAVLTreeGenericIterator &operator--() {
+    assert(!stack.empty());
+    TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+    assert(Current);
+    switch (getVisitState()) {
+      case VisitedNone:
+        stack.pop_back();
+        break;
+      case VisitedLeft:
+        stack.back() &= ~Flags; // Set state to "VisitedNone."
+        if (TreeTy* L = Current->getLeft())
+          stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
+        break;
+      case VisitedRight:
+        stack.back() &= ~Flags;
+        stack.back() |= VisitedLeft;
+        if (TreeTy* R = Current->getRight())
+          stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
+        break;
+      default:
+        llvm_unreachable("Unreachable.");
+    }
+    return *this;
+  }
+};
+
+template <typename ImutInfo>
+class ImutAVLTreeInOrderIterator
+    : public std::iterator<std::bidirectional_iterator_tag,
+                           ImutAVLTree<ImutInfo>> {
+  using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;
+
+  InternalIteratorTy InternalItr;
+
+public:
+  using TreeTy = ImutAVLTree<ImutInfo>;
+
+  ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
+    if (Root)
+      ++*this; // Advance to first element.
+  }
+
+  ImutAVLTreeInOrderIterator() : InternalItr() {}
+
+  bool operator==(const ImutAVLTreeInOrderIterator &x) const {
+    return InternalItr == x.InternalItr;
+  }
+
+  bool operator!=(const ImutAVLTreeInOrderIterator &x) const {
+    return !(*this == x);
+  }
+
+  TreeTy &operator*() const { return *InternalItr; }
+  TreeTy *operator->() const { return &*InternalItr; }
+
+  ImutAVLTreeInOrderIterator &operator++() {
+    do ++InternalItr;
+    while (!InternalItr.atEnd() &&
+           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+    return *this;
+  }
+
+  ImutAVLTreeInOrderIterator &operator--() {
+    do --InternalItr;
+    while (!InternalItr.atBeginning() &&
+           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+    return *this;
+  }
+
+  void skipSubTree() {
+    InternalItr.skipToParent();
+
+    while (!InternalItr.atEnd() &&
+           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
+      ++InternalItr;
+  }
+};
+
+/// Generic iterator that wraps a T::TreeTy::iterator and exposes
+/// iterator::getValue() on dereference.
+template <typename T>
+struct ImutAVLValueIterator
+    : iterator_adaptor_base<
+          ImutAVLValueIterator<T>, typename T::TreeTy::iterator,
+          typename std::iterator_traits<
+              typename T::TreeTy::iterator>::iterator_category,
+          const typename T::value_type> {
+  ImutAVLValueIterator() = default;
+  explicit ImutAVLValueIterator(typename T::TreeTy *Tree)
+      : ImutAVLValueIterator::iterator_adaptor_base(Tree) {}
+
+  typename ImutAVLValueIterator::reference operator*() const {
+    return this->I->getValue();
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes for Profile information.
+//===----------------------------------------------------------------------===//
+
+/// Generic profile template.  The default behavior is to invoke the
+/// profile method of an object.  Specializations for primitive integers
+/// and generic handling of pointers is done below.
+template <typename T>
+struct ImutProfileInfo {
+  using value_type = const T;
+  using value_type_ref = const T&;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    FoldingSetTrait<T>::Profile(X,ID);
+  }
+};
+
+/// Profile traits for integers.
+template <typename T>
+struct ImutProfileInteger {
+  using value_type = const T;
+  using value_type_ref = const T&;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    ID.AddInteger(X);
+  }
+};
+
+#define PROFILE_INTEGER_INFO(X)\
+template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {};
+
+PROFILE_INTEGER_INFO(char)
+PROFILE_INTEGER_INFO(unsigned char)
+PROFILE_INTEGER_INFO(short)
+PROFILE_INTEGER_INFO(unsigned short)
+PROFILE_INTEGER_INFO(unsigned)
+PROFILE_INTEGER_INFO(signed)
+PROFILE_INTEGER_INFO(long)
+PROFILE_INTEGER_INFO(unsigned long)
+PROFILE_INTEGER_INFO(long long)
+PROFILE_INTEGER_INFO(unsigned long long)
+
+#undef PROFILE_INTEGER_INFO
+
+/// Profile traits for booleans.
+template <>
+struct ImutProfileInfo<bool> {
+  using value_type = const bool;
+  using value_type_ref = const bool&;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    ID.AddBoolean(X);
+  }
+};
+
+/// Generic profile trait for pointer types.  We treat pointers as
+/// references to unique objects.
+template <typename T>
+struct ImutProfileInfo<T*> {
+  using value_type = const T*;
+  using value_type_ref = value_type;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    ID.AddPointer(X);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes that contain element comparison operators and type
+//  definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap.  These
+//  inherit from the profile traits (ImutProfileInfo) to include operations
+//  for element profiling.
+//===----------------------------------------------------------------------===//
+
+/// ImutContainerInfo - Generic definition of comparison operations for
+///   elements of immutable containers that defaults to using
+///   std::equal_to<> and std::less<> to perform comparison of elements.
+template <typename T>
+struct ImutContainerInfo : public ImutProfileInfo<T> {
+  using value_type = typename ImutProfileInfo<T>::value_type;
+  using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
+  using key_type = value_type;
+  using key_type_ref = value_type_ref;
+  using data_type = bool;
+  using data_type_ref = bool;
+
+  static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+  static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+  static bool isEqual(key_type_ref LHS, key_type_ref RHS) {
+    return std::equal_to<key_type>()(LHS,RHS);
+  }
+
+  static bool isLess(key_type_ref LHS, key_type_ref RHS) {
+    return std::less<key_type>()(LHS,RHS);
+  }
+
+  static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+/// ImutContainerInfo - Specialization for pointer values to treat pointers
+///  as references to unique objects.  Pointers are thus compared by
+///  their addresses.
+template <typename T>
+struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
+  using value_type = typename ImutProfileInfo<T*>::value_type;
+  using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
+  using key_type = value_type;
+  using key_type_ref = value_type_ref;
+  using data_type = bool;
+  using data_type_ref = bool;
+
+  static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+  static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+  static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; }
+
+  static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; }
+
+  static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable Set
+//===----------------------------------------------------------------------===//
+
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSet {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+
+private:
+  TreeTy *Root;
+
+public:
+  /// Constructs a set from a pointer to a tree root.  In general one
+  /// should use a Factory object to create sets instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableSet(TreeTy* R) : Root(R) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
+    if (Root) { Root->retain(); }
+  }
+
+  ~ImmutableSet() {
+    if (Root) { Root->release(); }
+  }
+
+  ImmutableSet &operator=(const ImmutableSet &X) {
+    if (Root != X.Root) {
+      if (X.Root) { X.Root->retain(); }
+      if (Root) { Root->release(); }
+      Root = X.Root;
+    }
+    return *this;
+  }
+
+  class Factory {
+    typename TreeTy::Factory F;
+    const bool Canonicalize;
+
+  public:
+    Factory(bool canonicalize = true)
+      : Canonicalize(canonicalize) {}
+
+    Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
+      : F(Alloc), Canonicalize(canonicalize) {}
+
+    Factory(const Factory& RHS) = delete;
+    void operator=(const Factory& RHS) = delete;
+
+    /// getEmptySet - Returns an immutable set that contains no elements.
+    ImmutableSet getEmptySet() {
+      return ImmutableSet(F.getEmptyTree());
+    }
+
+    /// add - Creates a new immutable set that contains all of the values
+    ///  of the original set with the addition of the specified value.  If
+    ///  the original set already included the value, then the original set is
+    ///  returned and no memory is allocated.  The time and space complexity
+    ///  of this operation is logarithmic in the size of the original set.
+    ///  The memory allocated to represent the set is released when the
+    ///  factory object that created the set is destroyed.
+    ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+      TreeTy *NewT = F.add(Old.Root, V);
+      return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+    }
+
+    /// remove - Creates a new immutable set that contains all of the values
+    ///  of the original set with the exception of the specified value.  If
+    ///  the original set did not contain the value, the original set is
+    ///  returned and no memory is allocated.  The time and space complexity
+    ///  of this operation is logarithmic in the size of the original set.
+    ///  The memory allocated to represent the set is released when the
+    ///  factory object that created the set is destroyed.
+    ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+      TreeTy *NewT = F.remove(Old.Root, V);
+      return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+    }
+
+    BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
+
+    typename TreeTy::Factory *getTreeFactory() const {
+      return const_cast<typename TreeTy::Factory *>(&F);
+    }
+  };
+
+  friend class Factory;
+
+  /// Returns true if the set contains the specified value.
+  bool contains(value_type_ref V) const {
+    return Root ? Root->contains(V) : false;
+  }
+
+  bool operator==(const ImmutableSet &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableSet &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  TreeTy *getRoot() {
+    if (Root) { Root->retain(); }
+    return Root;
+  }
+
+  TreeTy *getRootWithoutRetain() const {
+    return Root;
+  }
+
+  /// isEmpty - Return true if the set contains no elements.
+  bool isEmpty() const { return !Root; }
+
+  /// isSingleton - Return true if the set contains exactly one element.
+  ///   This method runs in constant time.
+  bool isSingleton() const { return getHeight() == 1; }
+
+  template <typename Callback>
+  void foreach(Callback& C) { if (Root) Root->foreach(C); }
+
+  template <typename Callback>
+  void foreach() { if (Root) { Callback C; Root->foreach(C); } }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  using iterator = ImutAVLValueIterator<ImmutableSet>;
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
+    ID.AddPointer(S.Root);
+  }
+
+  void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+// NOTE: This may some day replace the current ImmutableSet.
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSetRef {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+  using FactoryTy = typename TreeTy::Factory;
+
+private:
+  TreeTy *Root;
+  FactoryTy *Factory;
+
+public:
+  /// Constructs a set from a pointer to a tree root.  In general one
+  /// should use a Factory object to create sets instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableSetRef(TreeTy* R, FactoryTy *F)
+    : Root(R),
+      Factory(F) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableSetRef(const ImmutableSetRef &X)
+    : Root(X.Root),
+      Factory(X.Factory) {
+    if (Root) { Root->retain(); }
+  }
+
+  ~ImmutableSetRef() {
+    if (Root) { Root->release(); }
+  }
+
+  ImmutableSetRef &operator=(const ImmutableSetRef &X) {
+    if (Root != X.Root) {
+      if (X.Root) { X.Root->retain(); }
+      if (Root) { Root->release(); }
+      Root = X.Root;
+      Factory = X.Factory;
+    }
+    return *this;
+  }
+
+  static ImmutableSetRef getEmptySet(FactoryTy *F) {
+    return ImmutableSetRef(0, F);
+  }
+
+  ImmutableSetRef add(value_type_ref V) {
+    return ImmutableSetRef(Factory->add(Root, V), Factory);
+  }
+
+  ImmutableSetRef remove(value_type_ref V) {
+    return ImmutableSetRef(Factory->remove(Root, V), Factory);
+  }
+
+  /// Returns true if the set contains the specified value.
+  bool contains(value_type_ref V) const {
+    return Root ? Root->contains(V) : false;
+  }
+
+  ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
+    return ImmutableSet<ValT>(canonicalize ?
+                              Factory->getCanonicalTree(Root) : Root);
+  }
+
+  TreeTy *getRootWithoutRetain() const {
+    return Root;
+  }
+
+  bool operator==(const ImmutableSetRef &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableSetRef &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  /// isEmpty - Return true if the set contains no elements.
+  bool isEmpty() const { return !Root; }
+
+  /// isSingleton - Return true if the set contains exactly one element.
+  ///   This method runs in constant time.
+  bool isSingleton() const { return getHeight() == 1; }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  using iterator = ImutAVLValueIterator<ImmutableSetRef>;
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
+    ID.AddPointer(S.Root);
+  }
+
+  void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLESET_H
diff --git a/linux-x64/clang/include/llvm/ADT/IndexedMap.h b/linux-x64/clang/include/llvm/ADT/IndexedMap.h
new file mode 100644
index 0000000..2ee80d2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IndexedMap.h
@@ -0,0 +1,85 @@
+//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an indexed map. The index map template takes two
+// types. The first is the mapped type and the second is a functor
+// that maps its argument to a size_t. On instantiation a "null" value
+// can be provided to be used as a "does not exist" indicator in the
+// map. A member function grow() is provided that given the value of
+// the maximally indexed key (the argument of the functor) makes sure
+// the map has enough space for it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INDEXEDMAP_H
+#define LLVM_ADT_INDEXEDMAP_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+template <typename T, typename ToIndexT = identity<unsigned>>
+  class IndexedMap {
+    using IndexT = typename ToIndexT::argument_type;
+    // Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
+    // can grow very large and SmallVector grows more efficiently as long as T
+    // is trivially copyable.
+    using StorageT = SmallVector<T, 0>;
+
+    StorageT storage_;
+    T nullVal_;
+    ToIndexT toIndex_;
+
+  public:
+    IndexedMap() : nullVal_(T()) {}
+
+    explicit IndexedMap(const T& val) : nullVal_(val) {}
+
+    typename StorageT::reference operator[](IndexT n) {
+      assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+      return storage_[toIndex_(n)];
+    }
+
+    typename StorageT::const_reference operator[](IndexT n) const {
+      assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+      return storage_[toIndex_(n)];
+    }
+
+    void reserve(typename StorageT::size_type s) {
+      storage_.reserve(s);
+    }
+
+    void resize(typename StorageT::size_type s) {
+      storage_.resize(s, nullVal_);
+    }
+
+    void clear() {
+      storage_.clear();
+    }
+
+    void grow(IndexT n) {
+      unsigned NewSize = toIndex_(n) + 1;
+      if (NewSize > storage_.size())
+        resize(NewSize);
+    }
+
+    bool inBounds(IndexT n) const {
+      return toIndex_(n) < storage_.size();
+    }
+
+    typename StorageT::size_type size() const {
+      return storage_.size();
+    }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INDEXEDMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/IntEqClasses.h b/linux-x64/clang/include/llvm/ADT/IntEqClasses.h
new file mode 100644
index 0000000..0baee2f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IntEqClasses.h
@@ -0,0 +1,88 @@
+//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Equivalence classes for small integers. This is a mapping of the integers
+// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+//
+// Initially each integer has its own equivalence class. Classes are joined by
+// passing a representative member of each class to join().
+//
+// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+// further changes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTEQCLASSES_H
+#define LLVM_ADT_INTEQCLASSES_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class IntEqClasses {
+  /// EC - When uncompressed, map each integer to a smaller member of its
+  /// equivalence class. The class leader is the smallest member and maps to
+  /// itself.
+  ///
+  /// When compressed, EC[i] is the equivalence class of i.
+  SmallVector<unsigned, 8> EC;
+
+  /// NumClasses - The number of equivalence classes when compressed, or 0 when
+  /// uncompressed.
+  unsigned NumClasses;
+
+public:
+  /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
+  IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
+
+  /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
+  /// equivalence classes.
+  /// This requires an uncompressed map.
+  void grow(unsigned N);
+
+  /// clear - Clear all classes so that grow() will assign a unique class to
+  /// every integer.
+  void clear() {
+    EC.clear();
+    NumClasses = 0;
+  }
+
+  /// Join the equivalence classes of a and b. After joining classes,
+  /// findLeader(a) == findLeader(b). This requires an uncompressed map.
+  /// Returns the new leader.
+  unsigned join(unsigned a, unsigned b);
+
+  /// findLeader - Compute the leader of a's equivalence class. This is the
+  /// smallest member of the class.
+  /// This requires an uncompressed map.
+  unsigned findLeader(unsigned a) const;
+
+  /// compress - Compress equivalence classes by numbering them 0 .. M.
+  /// This makes the equivalence class map immutable.
+  void compress();
+
+  /// getNumClasses - Return the number of equivalence classes after compress()
+  /// was called.
+  unsigned getNumClasses() const { return NumClasses; }
+
+  /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
+  /// This requires a compressed map.
+  unsigned operator[](unsigned a) const {
+    assert(NumClasses && "operator[] called before compress()");
+    return EC[a];
+  }
+
+  /// uncompress - Change back to the uncompressed representation that allows
+  /// editing.
+  void uncompress();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/IntervalMap.h b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
new file mode 100644
index 0000000..f713668
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
@@ -0,0 +1,2158 @@
+//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a coalescing interval map for small objects.
+//
+// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
+// same value are represented in a compressed form.
+//
+// Iterators provide ordered access to the compressed intervals rather than the
+// individual keys, and insert and erase operations use key intervals as well.
+//
+// Like SmallVector, IntervalMap will store the first N intervals in the map
+// object itself without any allocations. When space is exhausted it switches to
+// a B+-tree representation with very small overhead for small key and value
+// objects.
+//
+// A Traits class specifies how keys are compared. It also allows IntervalMap to
+// work with both closed and half-open intervals.
+//
+// Keys and values are not stored next to each other in a std::pair, so we don't
+// provide such a value_type. Dereferencing iterators only returns the mapped
+// value. The interval bounds are accessible through the start() and stop()
+// iterator methods.
+//
+// IntervalMap is optimized for small key and value objects, 4 or 8 bytes each
+// is the optimal size. For large objects use std::map instead.
+//
+//===----------------------------------------------------------------------===//
+//
+// Synopsis:
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap {
+// public:
+//   typedef KeyT key_type;
+//   typedef ValT mapped_type;
+//   typedef RecyclingAllocator<...> Allocator;
+//   class iterator;
+//   class const_iterator;
+//
+//   explicit IntervalMap(Allocator&);
+//   ~IntervalMap():
+//
+//   bool empty() const;
+//   KeyT start() const;
+//   KeyT stop() const;
+//   ValT lookup(KeyT x, Value NotFound = Value()) const;
+//
+//   const_iterator begin() const;
+//   const_iterator end() const;
+//   iterator begin();
+//   iterator end();
+//   const_iterator find(KeyT x) const;
+//   iterator find(KeyT x);
+//
+//   void insert(KeyT a, KeyT b, ValT y);
+//   void clear();
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::const_iterator :
+//   public std::iterator<std::bidirectional_iterator_tag, ValT> {
+// public:
+//   bool operator==(const const_iterator &) const;
+//   bool operator!=(const const_iterator &) const;
+//   bool valid() const;
+//
+//   const KeyT &start() const;
+//   const KeyT &stop() const;
+//   const ValT &value() const;
+//   const ValT &operator*() const;
+//   const ValT *operator->() const;
+//
+//   const_iterator &operator++();
+//   const_iterator &operator++(int);
+//   const_iterator &operator--();
+//   const_iterator &operator--(int);
+//   void goToBegin();
+//   void goToEnd();
+//   void find(KeyT x);
+//   void advanceTo(KeyT x);
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::iterator : public const_iterator {
+// public:
+//   void insert(KeyT a, KeyT b, Value y);
+//   void erase();
+// };
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALMAP_H
+#define LLVM_ADT_INTERVALMAP_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//---                              Key traits                              ---//
+//===----------------------------------------------------------------------===//
+//
+// The IntervalMap works with closed or half-open intervals.
+// Adjacent intervals that map to the same value are coalesced.
+//
+// The IntervalMapInfo traits class is used to determine if a key is contained
+// in an interval, and if two intervals are adjacent so they can be coalesced.
+// The provided implementation works for closed integer intervals, other keys
+// probably need a specialized version.
+//
+// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
+//
+// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
+// allowed. This is so that stopLess(a, b) can be used to determine if two
+// intervals overlap.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct IntervalMapInfo {
+  /// startLess - Return true if x is not in [a;b].
+  /// This is x < a both for closed intervals and for [a;b) half-open intervals.
+  static inline bool startLess(const T &x, const T &a) {
+    return x < a;
+  }
+
+  /// stopLess - Return true if x is not in [a;b].
+  /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
+  static inline bool stopLess(const T &b, const T &x) {
+    return b < x;
+  }
+
+  /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
+  /// This is a+1 == b for closed intervals, a == b for half-open intervals.
+  static inline bool adjacent(const T &a, const T &b) {
+    return a+1 == b;
+  }
+
+  /// nonEmpty - Return true if [a;b] is non-empty.
+  /// This is a <= b for a closed interval, a < b for [a;b) half-open intervals.
+  static inline bool nonEmpty(const T &a, const T &b) {
+    return a <= b;
+  }
+};
+
+template <typename T>
+struct IntervalMapHalfOpenInfo {
+  /// startLess - Return true if x is not in [a;b).
+  static inline bool startLess(const T &x, const T &a) {
+    return x < a;
+  }
+
+  /// stopLess - Return true if x is not in [a;b).
+  static inline bool stopLess(const T &b, const T &x) {
+    return b <= x;
+  }
+
+  /// adjacent - Return true when the intervals [x;a) and [b;y) can coalesce.
+  static inline bool adjacent(const T &a, const T &b) {
+    return a == b;
+  }
+
+  /// nonEmpty - Return true if [a;b) is non-empty.
+  static inline bool nonEmpty(const T &a, const T &b) {
+    return a < b;
+  }
+};
+
+/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
+/// It should be considered private to the implementation.
+namespace IntervalMapImpl {
+
+using IdxPair = std::pair<unsigned,unsigned>;
+
+//===----------------------------------------------------------------------===//
+//---                    IntervalMapImpl::NodeBase                         ---//
+//===----------------------------------------------------------------------===//
+//
+// Both leaf and branch nodes store vectors of pairs.
+// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
+//
+// Keys and values are stored in separate arrays to avoid padding caused by
+// different object alignments. This also helps improve locality of reference
+// when searching the keys.
+//
+// The nodes don't know how many elements they contain - that information is
+// stored elsewhere. Omitting the size field prevents padding and allows a node
+// to fill the allocated cache lines completely.
+//
+// These are typical key and value sizes, the node branching factor (N), and
+// wasted space when nodes are sized to fit in three cache lines (192 bytes):
+//
+//   T1  T2   N Waste  Used by
+//    4   4  24   0    Branch<4> (32-bit pointers)
+//    8   4  16   0    Leaf<4,4>, Branch<4>
+//    8   8  12   0    Leaf<4,8>, Branch<8>
+//   16   4   9  12    Leaf<8,4>
+//   16   8   8   0    Leaf<8,8>
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T1, typename T2, unsigned N>
+class NodeBase {
+public:
+  enum { Capacity = N };
+
+  T1 first[N];
+  T2 second[N];
+
+  /// copy - Copy elements from another node.
+  /// @param Other Node elements are copied from.
+  /// @param i     Beginning of the source range in other.
+  /// @param j     Beginning of the destination range in this.
+  /// @param Count Number of elements to copy.
+  template <unsigned M>
+  void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
+            unsigned j, unsigned Count) {
+    assert(i + Count <= M && "Invalid source range");
+    assert(j + Count <= N && "Invalid dest range");
+    for (unsigned e = i + Count; i != e; ++i, ++j) {
+      first[j]  = Other.first[i];
+      second[j] = Other.second[i];
+    }
+  }
+
+  /// moveLeft - Move elements to the left.
+  /// @param i     Beginning of the source range.
+  /// @param j     Beginning of the destination range.
+  /// @param Count Number of elements to copy.
+  void moveLeft(unsigned i, unsigned j, unsigned Count) {
+    assert(j <= i && "Use moveRight shift elements right");
+    copy(*this, i, j, Count);
+  }
+
+  /// moveRight - Move elements to the right.
+  /// @param i     Beginning of the source range.
+  /// @param j     Beginning of the destination range.
+  /// @param Count Number of elements to copy.
+  void moveRight(unsigned i, unsigned j, unsigned Count) {
+    assert(i <= j && "Use moveLeft shift elements left");
+    assert(j + Count <= N && "Invalid range");
+    while (Count--) {
+      first[j + Count]  = first[i + Count];
+      second[j + Count] = second[i + Count];
+    }
+  }
+
+  /// erase - Erase elements [i;j).
+  /// @param i    Beginning of the range to erase.
+  /// @param j    End of the range. (Exclusive).
+  /// @param Size Number of elements in node.
+  void erase(unsigned i, unsigned j, unsigned Size) {
+    moveLeft(j, i, Size - j);
+  }
+
+  /// erase - Erase element at i.
+  /// @param i    Index of element to erase.
+  /// @param Size Number of elements in node.
+  void erase(unsigned i, unsigned Size) {
+    erase(i, i+1, Size);
+  }
+
+  /// shift - Shift elements [i;size) 1 position to the right.
+  /// @param i    Beginning of the range to move.
+  /// @param Size Number of elements in node.
+  void shift(unsigned i, unsigned Size) {
+    moveRight(i, i + 1, Size - i);
+  }
+
+  /// transferToLeftSib - Transfer elements to a left sibling node.
+  /// @param Size  Number of elements in this.
+  /// @param Sib   Left sibling node.
+  /// @param SSize Number of elements in sib.
+  /// @param Count Number of elements to transfer.
+  void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+                         unsigned Count) {
+    Sib.copy(*this, 0, SSize, Count);
+    erase(0, Count, Size);
+  }
+
+  /// transferToRightSib - Transfer elements to a right sibling node.
+  /// @param Size  Number of elements in this.
+  /// @param Sib   Right sibling node.
+  /// @param SSize Number of elements in sib.
+  /// @param Count Number of elements to transfer.
+  void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+                          unsigned Count) {
+    Sib.moveRight(0, Count, SSize);
+    Sib.copy(*this, Size-Count, 0, Count);
+  }
+
+  /// adjustFromLeftSib - Adjust the number if elements in this node by moving
+  /// elements to or from a left sibling node.
+  /// @param Size  Number of elements in this.
+  /// @param Sib   Right sibling node.
+  /// @param SSize Number of elements in sib.
+  /// @param Add   The number of elements to add to this node, possibly < 0.
+  /// @return      Number of elements added to this node, possibly negative.
+  int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
+    if (Add > 0) {
+      // We want to grow, copy from sib.
+      unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
+      Sib.transferToRightSib(SSize, *this, Size, Count);
+      return Count;
+    } else {
+      // We want to shrink, copy to sib.
+      unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
+      transferToLeftSib(Size, Sib, SSize, Count);
+      return -Count;
+    }
+  }
+};
+
+/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
+/// @param Node  Array of pointers to sibling nodes.
+/// @param Nodes Number of nodes.
+/// @param CurSize Array of current node sizes, will be overwritten.
+/// @param NewSize Array of desired node sizes.
+template <typename NodeT>
+void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
+                        unsigned CurSize[], const unsigned NewSize[]) {
+  // Move elements right.
+  for (int n = Nodes - 1; n; --n) {
+    if (CurSize[n] == NewSize[n])
+      continue;
+    for (int m = n - 1; m != -1; --m) {
+      int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
+                                         NewSize[n] - CurSize[n]);
+      CurSize[m] -= d;
+      CurSize[n] += d;
+      // Keep going if the current node was exhausted.
+      if (CurSize[n] >= NewSize[n])
+          break;
+    }
+  }
+
+  if (Nodes == 0)
+    return;
+
+  // Move elements left.
+  for (unsigned n = 0; n != Nodes - 1; ++n) {
+    if (CurSize[n] == NewSize[n])
+      continue;
+    for (unsigned m = n + 1; m != Nodes; ++m) {
+      int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
+                                        CurSize[n] -  NewSize[n]);
+      CurSize[m] += d;
+      CurSize[n] -= d;
+      // Keep going if the current node was exhausted.
+      if (CurSize[n] >= NewSize[n])
+          break;
+    }
+  }
+
+#ifndef NDEBUG
+  for (unsigned n = 0; n != Nodes; n++)
+    assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
+#endif
+}
+
+/// IntervalMapImpl::distribute - Compute a new distribution of node elements
+/// after an overflow or underflow. Reserve space for a new element at Position,
+/// and compute the node that will hold Position after redistributing node
+/// elements.
+///
+/// It is required that
+///
+///   Elements == sum(CurSize), and
+///   Elements + Grow <= Nodes * Capacity.
+///
+/// NewSize[] will be filled in such that:
+///
+///   sum(NewSize) == Elements, and
+///   NewSize[i] <= Capacity.
+///
+/// The returned index is the node where Position will go, so:
+///
+///   sum(NewSize[0..idx-1]) <= Position
+///   sum(NewSize[0..idx])   >= Position
+///
+/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
+/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
+/// before the one holding the Position'th element where there is room for an
+/// insertion.
+///
+/// @param Nodes    The number of nodes.
+/// @param Elements Total elements in all nodes.
+/// @param Capacity The capacity of each node.
+/// @param CurSize  Array[Nodes] of current node sizes, or NULL.
+/// @param NewSize  Array[Nodes] to receive the new node sizes.
+/// @param Position Insert position.
+/// @param Grow     Reserve space for a new element at Position.
+/// @return         (node, offset) for Position.
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+                   const unsigned *CurSize, unsigned NewSize[],
+                   unsigned Position, bool Grow);
+
+//===----------------------------------------------------------------------===//
+//---                   IntervalMapImpl::NodeSizer                         ---//
+//===----------------------------------------------------------------------===//
+//
+// Compute node sizes from key and value types.
+//
+// The branching factors are chosen to make nodes fit in three cache lines.
+// This may not be possible if keys or values are very large. Such large objects
+// are handled correctly, but a std::map would probably give better performance.
+//
+//===----------------------------------------------------------------------===//
+
+enum {
+  // Cache line size. Most architectures have 32 or 64 byte cache lines.
+  // We use 64 bytes here because it provides good branching factors.
+  Log2CacheLine = 6,
+  CacheLineBytes = 1 << Log2CacheLine,
+  DesiredNodeBytes = 3 * CacheLineBytes
+};
+
+template <typename KeyT, typename ValT>
+struct NodeSizer {
+  enum {
+    // Compute the leaf node branching factor that makes a node fit in three
+    // cache lines. The branching factor must be at least 3, or some B+-tree
+    // balancing algorithms won't work.
+    // LeafSize can't be larger than CacheLineBytes. This is required by the
+    // PointerIntPair used by NodeRef.
+    DesiredLeafSize = DesiredNodeBytes /
+      static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
+    MinLeafSize = 3,
+    LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
+  };
+
+  using LeafBase = NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize>;
+
+  enum {
+    // Now that we have the leaf branching factor, compute the actual allocation
+    // unit size by rounding up to a whole number of cache lines.
+    AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),
+
+    // Determine the branching factor for branch nodes.
+    BranchSize = AllocBytes /
+      static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
+  };
+
+  /// Allocator - The recycling allocator used for both branch and leaf nodes.
+  /// This typedef is very likely to be identical for all IntervalMaps with
+  /// reasonably sized entries, so the same allocator can be shared among
+  /// different kinds of maps.
+  using Allocator =
+      RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes>;
+};
+
+//===----------------------------------------------------------------------===//
+//---                     IntervalMapImpl::NodeRef                         ---//
+//===----------------------------------------------------------------------===//
+//
+// B+-tree nodes can be leaves or branches, so we need a polymorphic node
+// pointer that can point to both kinds.
+//
+// All nodes are cache line aligned and the low 6 bits of a node pointer are
+// always 0. These bits are used to store the number of elements in the
+// referenced node. Besides saving space, placing node sizes in the parents
+// allow tree balancing algorithms to run without faulting cache lines for nodes
+// that may not need to be modified.
+//
+// A NodeRef doesn't know whether it references a leaf node or a branch node.
+// It is the responsibility of the caller to use the correct types.
+//
+// Nodes are never supposed to be empty, and it is invalid to store a node size
+// of 0 in a NodeRef. The valid range of sizes is 1-64.
+//
+//===----------------------------------------------------------------------===//
+
+class NodeRef {
+  struct CacheAlignedPointerTraits {
+    static inline void *getAsVoidPointer(void *P) { return P; }
+    static inline void *getFromVoidPointer(void *P) { return P; }
+    enum { NumLowBitsAvailable = Log2CacheLine };
+  };
+  PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
+
+public:
+  /// NodeRef - Create a null ref.
+  NodeRef() = default;
+
+  /// operator bool - Detect a null ref.
+  explicit operator bool() const { return pip.getOpaqueValue(); }
+
+  /// NodeRef - Create a reference to the node p with n elements.
+  template <typename NodeT>
+  NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
+    assert(n <= NodeT::Capacity && "Size too big for node");
+  }
+
+  /// size - Return the number of elements in the referenced node.
+  unsigned size() const { return pip.getInt() + 1; }
+
+  /// setSize - Update the node size.
+  void setSize(unsigned n) { pip.setInt(n - 1); }
+
+  /// subtree - Access the i'th subtree reference in a branch node.
+  /// This depends on branch nodes storing the NodeRef array as their first
+  /// member.
+  NodeRef &subtree(unsigned i) const {
+    return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
+  }
+
+  /// get - Dereference as a NodeT reference.
+  template <typename NodeT>
+  NodeT &get() const {
+    return *reinterpret_cast<NodeT*>(pip.getPointer());
+  }
+
+  bool operator==(const NodeRef &RHS) const {
+    if (pip == RHS.pip)
+      return true;
+    assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
+    return false;
+  }
+
+  bool operator!=(const NodeRef &RHS) const {
+    return !operator==(RHS);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//---                      IntervalMapImpl::LeafNode                       ---//
+//===----------------------------------------------------------------------===//
+//
+// Leaf nodes store up to N disjoint intervals with corresponding values.
+//
+// The intervals are kept sorted and fully coalesced so there are no adjacent
+// intervals mapping to the same value.
+//
+// These constraints are always satisfied:
+//
+// - Traits::stopLess(start(i), stop(i))    - Non-empty, sane intervals.
+//
+// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
+//
+// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
+//                                          - Fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
+public:
+  const KeyT &start(unsigned i) const { return this->first[i].first; }
+  const KeyT &stop(unsigned i) const { return this->first[i].second; }
+  const ValT &value(unsigned i) const { return this->second[i]; }
+
+  KeyT &start(unsigned i) { return this->first[i].first; }
+  KeyT &stop(unsigned i) { return this->first[i].second; }
+  ValT &value(unsigned i) { return this->second[i]; }
+
+  /// findFrom - Find the first interval after i that may contain x.
+  /// @param i    Starting index for the search.
+  /// @param Size Number of elements in node.
+  /// @param x    Key to search for.
+  /// @return     First index with !stopLess(key[i].stop, x), or size.
+  ///             This is the first interval that can possibly contain x.
+  unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+    assert(i <= Size && Size <= N && "Bad indices");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index is past the needed point");
+    while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+    return i;
+  }
+
+  /// safeFind - Find an interval that is known to exist. This is the same as
+  /// findFrom except is it assumed that x is at least within range of the last
+  /// interval.
+  /// @param i Starting index for the search.
+  /// @param x Key to search for.
+  /// @return  First index with !stopLess(key[i].stop, x), never size.
+  ///          This is the first interval that can possibly contain x.
+  unsigned safeFind(unsigned i, KeyT x) const {
+    assert(i < N && "Bad index");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index is past the needed point");
+    while (Traits::stopLess(stop(i), x)) ++i;
+    assert(i < N && "Unsafe intervals");
+    return i;
+  }
+
+  /// safeLookup - Lookup mapped value for a safe key.
+  /// It is assumed that x is within range of the last entry.
+  /// @param x        Key to search for.
+  /// @param NotFound Value to return if x is not in any interval.
+  /// @return         The mapped value at x or NotFound.
+  ValT safeLookup(KeyT x, ValT NotFound) const {
+    unsigned i = safeFind(0, x);
+    return Traits::startLess(x, start(i)) ? NotFound : value(i);
+  }
+
+  unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
+};
+
+/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
+/// possible. This may cause the node to grow by 1, or it may cause the node
+/// to shrink because of coalescing.
+/// @param Pos  Starting index = insertFrom(0, size, a)
+/// @param Size Number of elements in node.
+/// @param a    Interval start.
+/// @param b    Interval stop.
+/// @param y    Value be mapped.
+/// @return     (insert position, new size), or (i, Capacity+1) on overflow.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+unsigned LeafNode<KeyT, ValT, N, Traits>::
+insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
+  unsigned i = Pos;
+  assert(i <= Size && Size <= N && "Invalid index");
+  assert(!Traits::stopLess(b, a) && "Invalid interval");
+
+  // Verify the findFrom invariant.
+  assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
+  assert((i == Size || !Traits::stopLess(stop(i), a)));
+  assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");
+
+  // Coalesce with previous interval.
+  if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
+    Pos = i - 1;
+    // Also coalesce with next interval?
+    if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
+      stop(i - 1) = stop(i);
+      this->erase(i, Size);
+      return Size - 1;
+    }
+    stop(i - 1) = b;
+    return Size;
+  }
+
+  // Detect overflow.
+  if (i == N)
+    return N + 1;
+
+  // Add new interval at end.
+  if (i == Size) {
+    start(i) = a;
+    stop(i) = b;
+    value(i) = y;
+    return Size + 1;
+  }
+
+  // Try to coalesce with following interval.
+  if (value(i) == y && Traits::adjacent(b, start(i))) {
+    start(i) = a;
+    return Size;
+  }
+
+  // We must insert before i. Detect overflow.
+  if (Size == N)
+    return N + 1;
+
+  // Insert before i.
+  this->shift(i, Size);
+  start(i) = a;
+  stop(i) = b;
+  value(i) = y;
+  return Size + 1;
+}
+
+//===----------------------------------------------------------------------===//
+//---                   IntervalMapImpl::BranchNode                        ---//
+//===----------------------------------------------------------------------===//
+//
+// A branch node stores references to 1--N subtrees all of the same height.
+//
+// The key array in a branch node holds the rightmost stop key of each subtree.
+// It is redundant to store the last stop key since it can be found in the
+// parent node, but doing so makes tree balancing a lot simpler.
+//
+// It is unusual for a branch node to only have one subtree, but it can happen
+// in the root node if it is smaller than the normal nodes.
+//
+// When all of the leaf nodes from all the subtrees are concatenated, they must
+// satisfy the same constraints as a single leaf node. They must be sorted,
+// sane, and fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class BranchNode : public NodeBase<NodeRef, KeyT, N> {
+public:
+  const KeyT &stop(unsigned i) const { return this->second[i]; }
+  const NodeRef &subtree(unsigned i) const { return this->first[i]; }
+
+  KeyT &stop(unsigned i) { return this->second[i]; }
+  NodeRef &subtree(unsigned i) { return this->first[i]; }
+
+  /// findFrom - Find the first subtree after i that may contain x.
+  /// @param i    Starting index for the search.
+  /// @param Size Number of elements in node.
+  /// @param x    Key to search for.
+  /// @return     First index with !stopLess(key[i], x), or size.
+  ///             This is the first subtree that can possibly contain x.
+  unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+    assert(i <= Size && Size <= N && "Bad indices");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index to findFrom is past the needed point");
+    while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+    return i;
+  }
+
+  /// safeFind - Find a subtree that is known to exist. This is the same as
+  /// findFrom except is it assumed that x is in range.
+  /// @param i Starting index for the search.
+  /// @param x Key to search for.
+  /// @return  First index with !stopLess(key[i], x), never size.
+  ///          This is the first subtree that can possibly contain x.
+  unsigned safeFind(unsigned i, KeyT x) const {
+    assert(i < N && "Bad index");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index is past the needed point");
+    while (Traits::stopLess(stop(i), x)) ++i;
+    assert(i < N && "Unsafe intervals");
+    return i;
+  }
+
+  /// safeLookup - Get the subtree containing x, Assuming that x is in range.
+  /// @param x Key to search for.
+  /// @return  Subtree containing x
+  NodeRef safeLookup(KeyT x) const {
+    return subtree(safeFind(0, x));
+  }
+
+  /// insert - Insert a new (subtree, stop) pair.
+  /// @param i    Insert position, following entries will be shifted.
+  /// @param Size Number of elements in node.
+  /// @param Node Subtree to insert.
+  /// @param Stop Last key in subtree.
+  void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
+    assert(Size < N && "branch node overflow");
+    assert(i <= Size && "Bad insert position");
+    this->shift(i, Size);
+    subtree(i) = Node;
+    stop(i) = Stop;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//---                         IntervalMapImpl::Path                        ---//
+//===----------------------------------------------------------------------===//
+//
+// A Path is used by iterators to represent a position in a B+-tree, and the
+// path to get there from the root.
+//
+// The Path class also contains the tree navigation code that doesn't have to
+// be templatized.
+//
+//===----------------------------------------------------------------------===//
+
+class Path {
+  /// Entry - Each step in the path is a node pointer and an offset into that
+  /// node.
+  struct Entry {
+    void *node;
+    unsigned size;
+    unsigned offset;
+
+    Entry(void *Node, unsigned Size, unsigned Offset)
+      : node(Node), size(Size), offset(Offset) {}
+
+    Entry(NodeRef Node, unsigned Offset)
+      : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}
+
+    NodeRef &subtree(unsigned i) const {
+      return reinterpret_cast<NodeRef*>(node)[i];
+    }
+  };
+
+  /// path - The path entries, path[0] is the root node, path.back() is a leaf.
+  SmallVector<Entry, 4> path;
+
+public:
+  // Node accessors.
+  template <typename NodeT> NodeT &node(unsigned Level) const {
+    return *reinterpret_cast<NodeT*>(path[Level].node);
+  }
+  unsigned size(unsigned Level) const { return path[Level].size; }
+  unsigned offset(unsigned Level) const { return path[Level].offset; }
+  unsigned &offset(unsigned Level) { return path[Level].offset; }
+
+  // Leaf accessors.
+  template <typename NodeT> NodeT &leaf() const {
+    return *reinterpret_cast<NodeT*>(path.back().node);
+  }
+  unsigned leafSize() const { return path.back().size; }
+  unsigned leafOffset() const { return path.back().offset; }
+  unsigned &leafOffset() { return path.back().offset; }
+
+  /// valid - Return true if path is at a valid node, not at end().
+  bool valid() const {
+    return !path.empty() && path.front().offset < path.front().size;
+  }
+
+  /// height - Return the height of the tree corresponding to this path.
+  /// This matches map->height in a full path.
+  unsigned height() const { return path.size() - 1; }
+
+  /// subtree - Get the subtree referenced from Level. When the path is
+  /// consistent, node(Level + 1) == subtree(Level).
+  /// @param Level 0..height-1. The leaves have no subtrees.
+  NodeRef &subtree(unsigned Level) const {
+    return path[Level].subtree(path[Level].offset);
+  }
+
+  /// reset - Reset cached information about node(Level) from subtree(Level -1).
+  /// @param Level 1..height. THe node to update after parent node changed.
+  void reset(unsigned Level) {
+    path[Level] = Entry(subtree(Level - 1), offset(Level));
+  }
+
+  /// push - Add entry to path.
+  /// @param Node Node to add, should be subtree(path.size()-1).
+  /// @param Offset Offset into Node.
+  void push(NodeRef Node, unsigned Offset) {
+    path.push_back(Entry(Node, Offset));
+  }
+
+  /// pop - Remove the last path entry.
+  void pop() {
+    path.pop_back();
+  }
+
+  /// setSize - Set the size of a node both in the path and in the tree.
+  /// @param Level 0..height. Note that setting the root size won't change
+  ///              map->rootSize.
+  /// @param Size New node size.
+  void setSize(unsigned Level, unsigned Size) {
+    path[Level].size = Size;
+    if (Level)
+      subtree(Level - 1).setSize(Size);
+  }
+
+  /// setRoot - Clear the path and set a new root node.
+  /// @param Node New root node.
+  /// @param Size New root size.
+  /// @param Offset Offset into root node.
+  void setRoot(void *Node, unsigned Size, unsigned Offset) {
+    path.clear();
+    path.push_back(Entry(Node, Size, Offset));
+  }
+
+  /// replaceRoot - Replace the current root node with two new entries after the
+  /// tree height has increased.
+  /// @param Root The new root node.
+  /// @param Size Number of entries in the new root.
+  /// @param Offsets Offsets into the root and first branch nodes.
+  void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);
+
+  /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+  /// @param Level Get the sibling to node(Level).
+  /// @return Left sibling, or NodeRef().
+  NodeRef getLeftSibling(unsigned Level) const;
+
+  /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
+  /// unaltered.
+  /// @param Level Move node(Level).
+  void moveLeft(unsigned Level);
+
+  /// fillLeft - Grow path to Height by taking leftmost branches.
+  /// @param Height The target height.
+  void fillLeft(unsigned Height) {
+    while (height() < Height)
+      push(subtree(height()), 0);
+  }
+
+  /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+  /// @param Level Get the sinbling to node(Level).
+  /// @return Left sibling, or NodeRef().
+  NodeRef getRightSibling(unsigned Level) const;
+
+  /// moveRight - Move path to the left sibling at Level. Leave nodes below
+  /// Level unaltered.
+  /// @param Level Move node(Level).
+  void moveRight(unsigned Level);
+
+  /// atBegin - Return true if path is at begin().
+  bool atBegin() const {
+    for (unsigned i = 0, e = path.size(); i != e; ++i)
+      if (path[i].offset != 0)
+        return false;
+    return true;
+  }
+
+  /// atLastEntry - Return true if the path is at the last entry of the node at
+  /// Level.
+  /// @param Level Node to examine.
+  bool atLastEntry(unsigned Level) const {
+    return path[Level].offset == path[Level].size - 1;
+  }
+
+  /// legalizeForInsert - Prepare the path for an insertion at Level. When the
+  /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
+  /// ensures that node(Level) is real by moving back to the last node at Level,
+  /// and setting offset(Level) to size(Level) if required.
+  /// @param Level The level where an insertion is about to take place.
+  void legalizeForInsert(unsigned Level) {
+    if (valid())
+      return;
+    moveLeft(Level);
+    ++path[Level].offset;
+  }
+};
+
+} // end namespace IntervalMapImpl
+
+//===----------------------------------------------------------------------===//
+//---                          IntervalMap                                ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT,
+          unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
+          typename Traits = IntervalMapInfo<KeyT>>
+class IntervalMap {
+  using Sizer = IntervalMapImpl::NodeSizer<KeyT, ValT>;
+  using Leaf = IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits>;
+  using Branch =
+      IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>;
+  using RootLeaf = IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits>;
+  using IdxPair = IntervalMapImpl::IdxPair;
+
+  // The RootLeaf capacity is given as a template parameter. We must compute the
+  // corresponding RootBranch capacity.
+  enum {
+    DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
+      (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
+    RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
+  };
+
+  using RootBranch =
+      IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>;
+
+  // When branched, we store a global start key as well as the branch node.
+  struct RootBranchData {
+    KeyT start;
+    RootBranch node;
+  };
+
+public:
+  using Allocator = typename Sizer::Allocator;
+  using KeyType = KeyT;
+  using ValueType = ValT;
+  using KeyTraits = Traits;
+
+private:
+  // The root data is either a RootLeaf or a RootBranchData instance.
+  AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
+
+  // Tree height.
+  // 0: Leaves in root.
+  // 1: Root points to leaf.
+  // 2: root->branch->leaf ...
+  unsigned height;
+
+  // Number of entries in the root node.
+  unsigned rootSize;
+
+  // Allocator used for creating external nodes.
+  Allocator &allocator;
+
+  /// dataAs - Represent data as a node type without breaking aliasing rules.
+  template <typename T>
+  T &dataAs() const {
+    union {
+      const char *d;
+      T *t;
+    } u;
+    u.d = data.buffer;
+    return *u.t;
+  }
+
+  const RootLeaf &rootLeaf() const {
+    assert(!branched() && "Cannot acces leaf data in branched root");
+    return dataAs<RootLeaf>();
+  }
+  RootLeaf &rootLeaf() {
+    assert(!branched() && "Cannot acces leaf data in branched root");
+    return dataAs<RootLeaf>();
+  }
+
+  RootBranchData &rootBranchData() const {
+    assert(branched() && "Cannot access branch data in non-branched root");
+    return dataAs<RootBranchData>();
+  }
+  RootBranchData &rootBranchData() {
+    assert(branched() && "Cannot access branch data in non-branched root");
+    return dataAs<RootBranchData>();
+  }
+
+  const RootBranch &rootBranch() const { return rootBranchData().node; }
+  RootBranch &rootBranch()             { return rootBranchData().node; }
+  KeyT rootBranchStart() const { return rootBranchData().start; }
+  KeyT &rootBranchStart()      { return rootBranchData().start; }
+
+  template <typename NodeT> NodeT *newNode() {
+    return new(allocator.template Allocate<NodeT>()) NodeT();
+  }
+
+  template <typename NodeT> void deleteNode(NodeT *P) {
+    P->~NodeT();
+    allocator.Deallocate(P);
+  }
+
+  IdxPair branchRoot(unsigned Position);
+  IdxPair splitRoot(unsigned Position);
+
+  void switchRootToBranch() {
+    rootLeaf().~RootLeaf();
+    height = 1;
+    new (&rootBranchData()) RootBranchData();
+  }
+
+  void switchRootToLeaf() {
+    rootBranchData().~RootBranchData();
+    height = 0;
+    new(&rootLeaf()) RootLeaf();
+  }
+
+  bool branched() const { return height > 0; }
+
+  ValT treeSafeLookup(KeyT x, ValT NotFound) const;
+  void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
+                  unsigned Level));
+  void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);
+
+public:
+  explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
+    assert((uintptr_t(data.buffer) & (alignof(RootLeaf) - 1)) == 0 &&
+           "Insufficient alignment");
+    new(&rootLeaf()) RootLeaf();
+  }
+
+  ~IntervalMap() {
+    clear();
+    rootLeaf().~RootLeaf();
+  }
+
+  /// empty -  Return true when no intervals are mapped.
+  bool empty() const {
+    return rootSize == 0;
+  }
+
+  /// start - Return the smallest mapped key in a non-empty map.
+  KeyT start() const {
+    assert(!empty() && "Empty IntervalMap has no start");
+    return !branched() ? rootLeaf().start(0) : rootBranchStart();
+  }
+
+  /// stop - Return the largest mapped key in a non-empty map.
+  KeyT stop() const {
+    assert(!empty() && "Empty IntervalMap has no stop");
+    return !branched() ? rootLeaf().stop(rootSize - 1) :
+                         rootBranch().stop(rootSize - 1);
+  }
+
+  /// lookup - Return the mapped value at x or NotFound.
+  ValT lookup(KeyT x, ValT NotFound = ValT()) const {
+    if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
+      return NotFound;
+    return branched() ? treeSafeLookup(x, NotFound) :
+                        rootLeaf().safeLookup(x, NotFound);
+  }
+
+  /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
+  /// It is assumed that no key in the interval is mapped to another value, but
+  /// overlapping intervals already mapped to y will be coalesced.
+  void insert(KeyT a, KeyT b, ValT y) {
+    if (branched() || rootSize == RootLeaf::Capacity)
+      return find(a).insert(a, b, y);
+
+    // Easy insert into root leaf.
+    unsigned p = rootLeaf().findFrom(0, rootSize, a);
+    rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
+  }
+
+  /// clear - Remove all entries.
+  void clear();
+
+  class const_iterator;
+  class iterator;
+  friend class const_iterator;
+  friend class iterator;
+
+  const_iterator begin() const {
+    const_iterator I(*this);
+    I.goToBegin();
+    return I;
+  }
+
+  iterator begin() {
+    iterator I(*this);
+    I.goToBegin();
+    return I;
+  }
+
+  const_iterator end() const {
+    const_iterator I(*this);
+    I.goToEnd();
+    return I;
+  }
+
+  iterator end() {
+    iterator I(*this);
+    I.goToEnd();
+    return I;
+  }
+
+  /// find - Return an iterator pointing to the first interval ending at or
+  /// after x, or end().
+  const_iterator find(KeyT x) const {
+    const_iterator I(*this);
+    I.find(x);
+    return I;
+  }
+
+  iterator find(KeyT x) {
+    iterator I(*this);
+    I.find(x);
+    return I;
+  }
+};
+
+/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
+/// branched root.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+ValT IntervalMap<KeyT, ValT, N, Traits>::
+treeSafeLookup(KeyT x, ValT NotFound) const {
+  assert(branched() && "treeLookup assumes a branched root");
+
+  IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
+  for (unsigned h = height-1; h; --h)
+    NR = NR.get<Branch>().safeLookup(x);
+  return NR.get<Leaf>().safeLookup(x, NotFound);
+}
+
+// branchRoot - Switch from a leaf root to a branched root.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+branchRoot(unsigned Position) {
+  using namespace IntervalMapImpl;
+  // How many external leaf nodes to hold RootLeaf+1?
+  const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;
+
+  // Compute element distribution among new nodes.
+  unsigned size[Nodes];
+  IdxPair NewOffset(0, Position);
+
+  // Is is very common for the root node to be smaller than external nodes.
+  if (Nodes == 1)
+    size[0] = rootSize;
+  else
+    NewOffset = distribute(Nodes, rootSize, Leaf::Capacity,  nullptr, size,
+                           Position, true);
+
+  // Allocate new nodes.
+  unsigned pos = 0;
+  NodeRef node[Nodes];
+  for (unsigned n = 0; n != Nodes; ++n) {
+    Leaf *L = newNode<Leaf>();
+    L->copy(rootLeaf(), pos, 0, size[n]);
+    node[n] = NodeRef(L, size[n]);
+    pos += size[n];
+  }
+
+  // Destroy the old leaf node, construct branch node instead.
+  switchRootToBranch();
+  for (unsigned n = 0; n != Nodes; ++n) {
+    rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
+    rootBranch().subtree(n) = node[n];
+  }
+  rootBranchStart() = node[0].template get<Leaf>().start(0);
+  rootSize = Nodes;
+  return NewOffset;
+}
+
+// splitRoot - Split the current BranchRoot into multiple Branch nodes.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+splitRoot(unsigned Position) {
+  using namespace IntervalMapImpl;
+  // How many external leaf nodes to hold RootBranch+1?
+  const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;
+
+  // Compute element distribution among new nodes.
+  unsigned Size[Nodes];
+  IdxPair NewOffset(0, Position);
+
+  // Is is very common for the root node to be smaller than external nodes.
+  if (Nodes == 1)
+    Size[0] = rootSize;
+  else
+    NewOffset = distribute(Nodes, rootSize, Leaf::Capacity,  nullptr, Size,
+                           Position, true);
+
+  // Allocate new nodes.
+  unsigned Pos = 0;
+  NodeRef Node[Nodes];
+  for (unsigned n = 0; n != Nodes; ++n) {
+    Branch *B = newNode<Branch>();
+    B->copy(rootBranch(), Pos, 0, Size[n]);
+    Node[n] = NodeRef(B, Size[n]);
+    Pos += Size[n];
+  }
+
+  for (unsigned n = 0; n != Nodes; ++n) {
+    rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
+    rootBranch().subtree(n) = Node[n];
+  }
+  rootSize = Nodes;
+  ++height;
+  return NewOffset;
+}
+
+/// visitNodes - Visit each external node.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
+  if (!branched())
+    return;
+  SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;
+
+  // Collect level 0 nodes from the root.
+  for (unsigned i = 0; i != rootSize; ++i)
+    Refs.push_back(rootBranch().subtree(i));
+
+  // Visit all branch nodes.
+  for (unsigned h = height - 1; h; --h) {
+    for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
+      for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
+        NextRefs.push_back(Refs[i].subtree(j));
+      (this->*f)(Refs[i], h);
+    }
+    Refs.clear();
+    Refs.swap(NextRefs);
+  }
+
+  // Visit all leaf nodes.
+  for (unsigned i = 0, e = Refs.size(); i != e; ++i)
+    (this->*f)(Refs[i], 0);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
+  if (Level)
+    deleteNode(&Node.get<Branch>());
+  else
+    deleteNode(&Node.get<Leaf>());
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+clear() {
+  if (branched()) {
+    visitNodes(&IntervalMap::deleteNode);
+    switchRootToLeaf();
+  }
+  rootSize = 0;
+}
+
+//===----------------------------------------------------------------------===//
+//---                   IntervalMap::const_iterator                       ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
+  public std::iterator<std::bidirectional_iterator_tag, ValT> {
+
+protected:
+  friend class IntervalMap;
+
+  // The map referred to.
+  IntervalMap *map = nullptr;
+
+  // We store a full path from the root to the current position.
+  // The path may be partially filled, but never between iterator calls.
+  IntervalMapImpl::Path path;
+
+  explicit const_iterator(const IntervalMap &map) :
+    map(const_cast<IntervalMap*>(&map)) {}
+
+  bool branched() const {
+    assert(map && "Invalid iterator");
+    return map->branched();
+  }
+
+  void setRoot(unsigned Offset) {
+    if (branched())
+      path.setRoot(&map->rootBranch(), map->rootSize, Offset);
+    else
+      path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
+  }
+
+  void pathFillFind(KeyT x);
+  void treeFind(KeyT x);
+  void treeAdvanceTo(KeyT x);
+
+  /// unsafeStart - Writable access to start() for iterator.
+  KeyT &unsafeStart() const {
+    assert(valid() && "Cannot access invalid iterator");
+    return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
+                        path.leaf<RootLeaf>().start(path.leafOffset());
+  }
+
+  /// unsafeStop - Writable access to stop() for iterator.
+  KeyT &unsafeStop() const {
+    assert(valid() && "Cannot access invalid iterator");
+    return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
+                        path.leaf<RootLeaf>().stop(path.leafOffset());
+  }
+
+  /// unsafeValue - Writable access to value() for iterator.
+  ValT &unsafeValue() const {
+    assert(valid() && "Cannot access invalid iterator");
+    return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
+                        path.leaf<RootLeaf>().value(path.leafOffset());
+  }
+
+public:
+  /// const_iterator - Create an iterator that isn't pointing anywhere.
+  const_iterator() = default;
+
+  /// setMap - Change the map iterated over. This call must be followed by a
+  /// call to goToBegin(), goToEnd(), or find()
+  void setMap(const IntervalMap &m) { map = const_cast<IntervalMap*>(&m); }
+
+  /// valid - Return true if the current position is valid, false for end().
+  bool valid() const { return path.valid(); }
+
+  /// atBegin - Return true if the current position is the first map entry.
+  bool atBegin() const { return path.atBegin(); }
+
+  /// start - Return the beginning of the current interval.
+  const KeyT &start() const { return unsafeStart(); }
+
+  /// stop - Return the end of the current interval.
+  const KeyT &stop() const { return unsafeStop(); }
+
+  /// value - Return the mapped value at the current interval.
+  const ValT &value() const { return unsafeValue(); }
+
+  const ValT &operator*() const { return value(); }
+
+  bool operator==(const const_iterator &RHS) const {
+    assert(map == RHS.map && "Cannot compare iterators from different maps");
+    if (!valid())
+      return !RHS.valid();
+    if (path.leafOffset() != RHS.path.leafOffset())
+      return false;
+    return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
+  }
+
+  bool operator!=(const const_iterator &RHS) const {
+    return !operator==(RHS);
+  }
+
+  /// goToBegin - Move to the first interval in map.
+  void goToBegin() {
+    setRoot(0);
+    if (branched())
+      path.fillLeft(map->height);
+  }
+
+  /// goToEnd - Move beyond the last interval in map.
+  void goToEnd() {
+    setRoot(map->rootSize);
+  }
+
+  /// preincrement - move to the next interval.
+  const_iterator &operator++() {
+    assert(valid() && "Cannot increment end()");
+    if (++path.leafOffset() == path.leafSize() && branched())
+      path.moveRight(map->height);
+    return *this;
+  }
+
+  /// postincrement - Dont do that!
+  const_iterator operator++(int) {
+    const_iterator tmp = *this;
+    operator++();
+    return tmp;
+  }
+
+  /// predecrement - move to the previous interval.
+  const_iterator &operator--() {
+    if (path.leafOffset() && (valid() || !branched()))
+      --path.leafOffset();
+    else
+      path.moveLeft(map->height);
+    return *this;
+  }
+
+  /// postdecrement - Dont do that!
+  const_iterator operator--(int) {
+    const_iterator tmp = *this;
+    operator--();
+    return tmp;
+  }
+
+  /// find - Move to the first interval with stop >= x, or end().
+  /// This is a full search from the root, the current position is ignored.
+  void find(KeyT x) {
+    if (branched())
+      treeFind(x);
+    else
+      setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
+  }
+
+  /// advanceTo - Move to the first interval with stop >= x, or end().
+  /// The search is started from the current position, and no earlier positions
+  /// can be found. This is much faster than find() for small moves.
+  void advanceTo(KeyT x) {
+    if (!valid())
+      return;
+    if (branched())
+      treeAdvanceTo(x);
+    else
+      path.leafOffset() =
+        map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
+  }
+};
+
+/// pathFillFind - Complete path by searching for x.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::pathFillFind(KeyT x) {
+  IntervalMapImpl::NodeRef NR = path.subtree(path.height());
+  for (unsigned i = map->height - path.height() - 1; i; --i) {
+    unsigned p = NR.get<Branch>().safeFind(0, x);
+    path.push(NR, p);
+    NR = NR.subtree(p);
+  }
+  path.push(NR, NR.get<Leaf>().safeFind(0, x));
+}
+
+/// treeFind - Find in a branched tree.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeFind(KeyT x) {
+  setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
+  if (valid())
+    pathFillFind(x);
+}
+
+/// treeAdvanceTo - Find position after the current one.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeAdvanceTo(KeyT x) {
+  // Can we stay on the same leaf node?
+  if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
+    path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
+    return;
+  }
+
+  // Drop the current leaf.
+  path.pop();
+
+  // Search towards the root for a usable subtree.
+  if (path.height()) {
+    for (unsigned l = path.height() - 1; l; --l) {
+      if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
+        // The branch node at l+1 is usable
+        path.offset(l + 1) =
+          path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
+        return pathFillFind(x);
+      }
+      path.pop();
+    }
+    // Is the level-1 Branch usable?
+    if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
+      path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
+      return pathFillFind(x);
+    }
+  }
+
+  // We reached the root.
+  setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
+  if (valid())
+    pathFillFind(x);
+}
+
+//===----------------------------------------------------------------------===//
+//---                       IntervalMap::iterator                         ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
+  friend class IntervalMap;
+
+  using IdxPair = IntervalMapImpl::IdxPair;
+
+  explicit iterator(IntervalMap &map) : const_iterator(map) {}
+
+  void setNodeStop(unsigned Level, KeyT Stop);
+  bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
+  template <typename NodeT> bool overflow(unsigned Level);
+  void treeInsert(KeyT a, KeyT b, ValT y);
+  void eraseNode(unsigned Level);
+  void treeErase(bool UpdateRoot = true);
+  bool canCoalesceLeft(KeyT Start, ValT x);
+  bool canCoalesceRight(KeyT Stop, ValT x);
+
+public:
+  /// iterator - Create null iterator.
+  iterator() = default;
+
+  /// setStart - Move the start of the current interval.
+  /// This may cause coalescing with the previous interval.
+  /// @param a New start key, must not overlap the previous interval.
+  void setStart(KeyT a);
+
+  /// setStop - Move the end of the current interval.
+  /// This may cause coalescing with the following interval.
+  /// @param b New stop key, must not overlap the following interval.
+  void setStop(KeyT b);
+
+  /// setValue - Change the mapped value of the current interval.
+  /// This may cause coalescing with the previous and following intervals.
+  /// @param x New value.
+  void setValue(ValT x);
+
+  /// setStartUnchecked - Move the start of the current interval without
+  /// checking for coalescing or overlaps.
+  /// This should only be used when it is known that coalescing is not required.
+  /// @param a New start key.
+  void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }
+
+  /// setStopUnchecked - Move the end of the current interval without checking
+  /// for coalescing or overlaps.
+  /// This should only be used when it is known that coalescing is not required.
+  /// @param b New stop key.
+  void setStopUnchecked(KeyT b) {
+    this->unsafeStop() = b;
+    // Update keys in branch nodes as well.
+    if (this->path.atLastEntry(this->path.height()))
+      setNodeStop(this->path.height(), b);
+  }
+
+  /// setValueUnchecked - Change the mapped value of the current interval
+  /// without checking for coalescing.
+  /// @param x New value.
+  void setValueUnchecked(ValT x) { this->unsafeValue() = x; }
+
+  /// insert - Insert mapping [a;b] -> y before the current position.
+  void insert(KeyT a, KeyT b, ValT y);
+
+  /// erase - Erase the current interval.
+  void erase();
+
+  iterator &operator++() {
+    const_iterator::operator++();
+    return *this;
+  }
+
+  iterator operator++(int) {
+    iterator tmp = *this;
+    operator++();
+    return tmp;
+  }
+
+  iterator &operator--() {
+    const_iterator::operator--();
+    return *this;
+  }
+
+  iterator operator--(int) {
+    iterator tmp = *this;
+    operator--();
+    return tmp;
+  }
+};
+
+/// canCoalesceLeft - Can the current interval coalesce to the left after
+/// changing start or value?
+/// @param Start New start of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceLeft(KeyT Start, ValT Value) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+  if (!this->branched()) {
+    unsigned i = P.leafOffset();
+    RootLeaf &Node = P.leaf<RootLeaf>();
+    return i && Node.value(i-1) == Value &&
+                Traits::adjacent(Node.stop(i-1), Start);
+  }
+  // Branched.
+  if (unsigned i = P.leafOffset()) {
+    Leaf &Node = P.leaf<Leaf>();
+    return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
+  } else if (NodeRef NR = P.getLeftSibling(P.height())) {
+    unsigned i = NR.size() - 1;
+    Leaf &Node = NR.get<Leaf>();
+    return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
+  }
+  return false;
+}
+
+/// canCoalesceRight - Can the current interval coalesce to the right after
+/// changing stop or value?
+/// @param Stop New stop of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceRight(KeyT Stop, ValT Value) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+  unsigned i = P.leafOffset() + 1;
+  if (!this->branched()) {
+    if (i >= P.leafSize())
+      return false;
+    RootLeaf &Node = P.leaf<RootLeaf>();
+    return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+  }
+  // Branched.
+  if (i < P.leafSize()) {
+    Leaf &Node = P.leaf<Leaf>();
+    return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+  } else if (NodeRef NR = P.getRightSibling(P.height())) {
+    Leaf &Node = NR.get<Leaf>();
+    return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
+  }
+  return false;
+}
+
+/// setNodeStop - Update the stop key of the current node at level and above.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setNodeStop(unsigned Level, KeyT Stop) {
+  // There are no references to the root node, so nothing to update.
+  if (!Level)
+    return;
+  IntervalMapImpl::Path &P = this->path;
+  // Update nodes pointing to the current node.
+  while (--Level) {
+    P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
+    if (!P.atLastEntry(Level))
+      return;
+  }
+  // Update root separately since it has a different layout.
+  P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStart(KeyT a) {
+  assert(Traits::nonEmpty(a, this->stop()) && "Cannot move start beyond stop");
+  KeyT &CurStart = this->unsafeStart();
+  if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
+    CurStart = a;
+    return;
+  }
+  // Coalesce with the interval to the left.
+  --*this;
+  a = this->start();
+  erase();
+  setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStop(KeyT b) {
+  assert(Traits::nonEmpty(this->start(), b) && "Cannot move stop beyond start");
+  if (Traits::startLess(b, this->stop()) ||
+      !canCoalesceRight(b, this->value())) {
+    setStopUnchecked(b);
+    return;
+  }
+  // Coalesce with interval to the right.
+  KeyT a = this->start();
+  erase();
+  setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setValue(ValT x) {
+  setValueUnchecked(x);
+  if (canCoalesceRight(this->stop(), x)) {
+    KeyT a = this->start();
+    erase();
+    setStartUnchecked(a);
+  }
+  if (canCoalesceLeft(this->start(), x)) {
+    --*this;
+    KeyT a = this->start();
+    erase();
+    setStartUnchecked(a);
+  }
+}
+
+/// insertNode - insert a node before the current path at level.
+/// Leave the current path pointing at the new node.
+/// @param Level path index of the node to be inserted.
+/// @param Node The node to be inserted.
+/// @param Stop The last index in the new node.
+/// @return True if the tree height was increased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
+  assert(Level && "Cannot insert next to the root");
+  bool SplitRoot = false;
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+
+  if (Level == 1) {
+    // Insert into the root branch node.
+    if (IM.rootSize < RootBranch::Capacity) {
+      IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
+      P.setSize(0, ++IM.rootSize);
+      P.reset(Level);
+      return SplitRoot;
+    }
+
+    // We need to split the root while keeping our position.
+    SplitRoot = true;
+    IdxPair Offset = IM.splitRoot(P.offset(0));
+    P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+    // Fall through to insert at the new higher level.
+    ++Level;
+  }
+
+  // When inserting before end(), make sure we have a valid path.
+  P.legalizeForInsert(--Level);
+
+  // Insert into the branch node at Level-1.
+  if (P.size(Level) == Branch::Capacity) {
+    // Branch node is full, handle handle the overflow.
+    assert(!SplitRoot && "Cannot overflow after splitting the root");
+    SplitRoot = overflow<Branch>(Level);
+    Level += SplitRoot;
+  }
+  P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
+  P.setSize(Level, P.size(Level) + 1);
+  if (P.atLastEntry(Level))
+    setNodeStop(Level, Stop);
+  P.reset(Level + 1);
+  return SplitRoot;
+}
+
+// insert
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insert(KeyT a, KeyT b, ValT y) {
+  if (this->branched())
+    return treeInsert(a, b, y);
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+
+  // Try simple root leaf insert.
+  unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);
+
+  // Was the root node insert successful?
+  if (Size <= RootLeaf::Capacity) {
+    P.setSize(0, IM.rootSize = Size);
+    return;
+  }
+
+  // Root leaf node is full, we must branch.
+  IdxPair Offset = IM.branchRoot(P.leafOffset());
+  P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+  // Now it fits in the new leaf.
+  treeInsert(a, b, y);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeInsert(KeyT a, KeyT b, ValT y) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+
+  if (!P.valid())
+    P.legalizeForInsert(this->map->height);
+
+  // Check if this insertion will extend the node to the left.
+  if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
+    // Node is growing to the left, will it affect a left sibling node?
+    if (NodeRef Sib = P.getLeftSibling(P.height())) {
+      Leaf &SibLeaf = Sib.get<Leaf>();
+      unsigned SibOfs = Sib.size() - 1;
+      if (SibLeaf.value(SibOfs) == y &&
+          Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
+        // This insertion will coalesce with the last entry in SibLeaf. We can
+        // handle it in two ways:
+        //  1. Extend SibLeaf.stop to b and be done, or
+        //  2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
+        // We prefer 1., but need 2 when coalescing to the right as well.
+        Leaf &CurLeaf = P.leaf<Leaf>();
+        P.moveLeft(P.height());
+        if (Traits::stopLess(b, CurLeaf.start(0)) &&
+            (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
+          // Easy, just extend SibLeaf and we're done.
+          setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
+          return;
+        } else {
+          // We have both left and right coalescing. Erase the old SibLeaf entry
+          // and continue inserting the larger interval.
+          a = SibLeaf.start(SibOfs);
+          treeErase(/* UpdateRoot= */false);
+        }
+      }
+    } else {
+      // No left sibling means we are at begin(). Update cached bound.
+      this->map->rootBranchStart() = a;
+    }
+  }
+
+  // When we are inserting at the end of a leaf node, we must update stops.
+  unsigned Size = P.leafSize();
+  bool Grow = P.leafOffset() == Size;
+  Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);
+
+  // Leaf insertion unsuccessful? Overflow and try again.
+  if (Size > Leaf::Capacity) {
+    overflow<Leaf>(P.height());
+    Grow = P.leafOffset() == P.leafSize();
+    Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
+    assert(Size <= Leaf::Capacity && "overflow() didn't make room");
+  }
+
+  // Inserted, update offset and leaf size.
+  P.setSize(P.height(), Size);
+
+  // Insert was the last node entry, update stops.
+  if (Grow)
+    setNodeStop(P.height(), b);
+}
+
+/// erase - erase the current interval and move to the next position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::erase() {
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+  assert(P.valid() && "Cannot erase end()");
+  if (this->branched())
+    return treeErase();
+  IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
+  P.setSize(0, --IM.rootSize);
+}
+
+/// treeErase - erase() for a branched tree.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeErase(bool UpdateRoot) {
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+  Leaf &Node = P.leaf<Leaf>();
+
+  // Nodes are not allowed to become empty.
+  if (P.leafSize() == 1) {
+    IM.deleteNode(&Node);
+    eraseNode(IM.height);
+    // Update rootBranchStart if we erased begin().
+    if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
+      IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+    return;
+  }
+
+  // Erase current entry.
+  Node.erase(P.leafOffset(), P.leafSize());
+  unsigned NewSize = P.leafSize() - 1;
+  P.setSize(IM.height, NewSize);
+  // When we erase the last entry, update stop and move to a legal position.
+  if (P.leafOffset() == NewSize) {
+    setNodeStop(IM.height, Node.stop(NewSize - 1));
+    P.moveRight(IM.height);
+  } else if (UpdateRoot && P.atBegin())
+    IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+}
+
+/// eraseNode - Erase the current node at Level from its parent and move path to
+/// the first entry of the next sibling node.
+/// The node must be deallocated by the caller.
+/// @param Level 1..height, the root node cannot be erased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::eraseNode(unsigned Level) {
+  assert(Level && "Cannot erase root node");
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+
+  if (--Level == 0) {
+    IM.rootBranch().erase(P.offset(0), IM.rootSize);
+    P.setSize(0, --IM.rootSize);
+    // If this cleared the root, switch to height=0.
+    if (IM.empty()) {
+      IM.switchRootToLeaf();
+      this->setRoot(0);
+      return;
+    }
+  } else {
+    // Remove node ref from branch node at Level.
+    Branch &Parent = P.node<Branch>(Level);
+    if (P.size(Level) == 1) {
+      // Branch node became empty, remove it recursively.
+      IM.deleteNode(&Parent);
+      eraseNode(Level);
+    } else {
+      // Branch node won't become empty.
+      Parent.erase(P.offset(Level), P.size(Level));
+      unsigned NewSize = P.size(Level) - 1;
+      P.setSize(Level, NewSize);
+      // If we removed the last branch, update stop and move to a legal pos.
+      if (P.offset(Level) == NewSize) {
+        setNodeStop(Level, Parent.stop(NewSize - 1));
+        P.moveRight(Level);
+      }
+    }
+  }
+  // Update path cache for the new right sibling position.
+  if (P.valid()) {
+    P.reset(Level + 1);
+    P.offset(Level + 1) = 0;
+  }
+}
+
+/// overflow - Distribute entries of the current node evenly among
+/// its siblings and ensure that the current node is not full.
+/// This may require allocating a new node.
+/// @tparam NodeT The type of node at Level (Leaf or Branch).
+/// @param Level path index of the overflowing node.
+/// @return True when the tree height was changed.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+template <typename NodeT>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::overflow(unsigned Level) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+  unsigned CurSize[4];
+  NodeT *Node[4];
+  unsigned Nodes = 0;
+  unsigned Elements = 0;
+  unsigned Offset = P.offset(Level);
+
+  // Do we have a left sibling?
+  NodeRef LeftSib = P.getLeftSibling(Level);
+  if (LeftSib) {
+    Offset += Elements = CurSize[Nodes] = LeftSib.size();
+    Node[Nodes++] = &LeftSib.get<NodeT>();
+  }
+
+  // Current node.
+  Elements += CurSize[Nodes] = P.size(Level);
+  Node[Nodes++] = &P.node<NodeT>(Level);
+
+  // Do we have a right sibling?
+  NodeRef RightSib = P.getRightSibling(Level);
+  if (RightSib) {
+    Elements += CurSize[Nodes] = RightSib.size();
+    Node[Nodes++] = &RightSib.get<NodeT>();
+  }
+
+  // Do we need to allocate a new node?
+  unsigned NewNode = 0;
+  if (Elements + 1 > Nodes * NodeT::Capacity) {
+    // Insert NewNode at the penultimate position, or after a single node.
+    NewNode = Nodes == 1 ? 1 : Nodes - 1;
+    CurSize[Nodes] = CurSize[NewNode];
+    Node[Nodes] = Node[NewNode];
+    CurSize[NewNode] = 0;
+    Node[NewNode] = this->map->template newNode<NodeT>();
+    ++Nodes;
+  }
+
+  // Compute the new element distribution.
+  unsigned NewSize[4];
+  IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
+                                 CurSize, NewSize, Offset, true);
+  adjustSiblingSizes(Node, Nodes, CurSize, NewSize);
+
+  // Move current location to the leftmost node.
+  if (LeftSib)
+    P.moveLeft(Level);
+
+  // Elements have been rearranged, now update node sizes and stops.
+  bool SplitRoot = false;
+  unsigned Pos = 0;
+  while (true) {
+    KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
+    if (NewNode && Pos == NewNode) {
+      SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
+      Level += SplitRoot;
+    } else {
+      P.setSize(Level, NewSize[Pos]);
+      setNodeStop(Level, Stop);
+    }
+    if (Pos + 1 == Nodes)
+      break;
+    P.moveRight(Level);
+    ++Pos;
+  }
+
+  // Where was I? Find NewOffset.
+  while(Pos != NewOffset.first) {
+    P.moveLeft(Level);
+    --Pos;
+  }
+  P.offset(Level) = NewOffset.second;
+  return SplitRoot;
+}
+
+//===----------------------------------------------------------------------===//
+//---                       IntervalMapOverlaps                           ----//
+//===----------------------------------------------------------------------===//
+
+/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
+/// IntervalMaps. The maps may be different, but the KeyT and Traits types
+/// should be the same.
+///
+/// Typical uses:
+///
+/// 1. Test for overlap:
+///    bool overlap = IntervalMapOverlaps(a, b).valid();
+///
+/// 2. Enumerate overlaps:
+///    for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
+///
+template <typename MapA, typename MapB>
+class IntervalMapOverlaps {
+  using KeyType = typename MapA::KeyType;
+  using Traits = typename MapA::KeyTraits;
+
+  typename MapA::const_iterator posA;
+  typename MapB::const_iterator posB;
+
+  /// advance - Move posA and posB forward until reaching an overlap, or until
+  /// either meets end.
+  /// Don't move the iterators if they are already overlapping.
+  void advance() {
+    if (!valid())
+      return;
+
+    if (Traits::stopLess(posA.stop(), posB.start())) {
+      // A ends before B begins. Catch up.
+      posA.advanceTo(posB.start());
+      if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+        return;
+    } else if (Traits::stopLess(posB.stop(), posA.start())) {
+      // B ends before A begins. Catch up.
+      posB.advanceTo(posA.start());
+      if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+        return;
+    } else
+      // Already overlapping.
+      return;
+
+    while (true) {
+      // Make a.end > b.start.
+      posA.advanceTo(posB.start());
+      if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+        return;
+      // Make b.end > a.start.
+      posB.advanceTo(posA.start());
+      if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+        return;
+    }
+  }
+
+public:
+  /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
+  IntervalMapOverlaps(const MapA &a, const MapB &b)
+    : posA(b.empty() ? a.end() : a.find(b.start())),
+      posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }
+
+  /// valid - Return true if iterator is at an overlap.
+  bool valid() const {
+    return posA.valid() && posB.valid();
+  }
+
+  /// a - access the left hand side in the overlap.
+  const typename MapA::const_iterator &a() const { return posA; }
+
+  /// b - access the right hand side in the overlap.
+  const typename MapB::const_iterator &b() const { return posB; }
+
+  /// start - Beginning of the overlapping interval.
+  KeyType start() const {
+    KeyType ak = a().start();
+    KeyType bk = b().start();
+    return Traits::startLess(ak, bk) ? bk : ak;
+  }
+
+  /// stop - End of the overlapping interval.
+  KeyType stop() const {
+    KeyType ak = a().stop();
+    KeyType bk = b().stop();
+    return Traits::startLess(ak, bk) ? ak : bk;
+  }
+
+  /// skipA - Move to the next overlap that doesn't involve a().
+  void skipA() {
+    ++posA;
+    advance();
+  }
+
+  /// skipB - Move to the next overlap that doesn't involve b().
+  void skipB() {
+    ++posB;
+    advance();
+  }
+
+  /// Preincrement - Move to the next overlap.
+  IntervalMapOverlaps &operator++() {
+    // Bump the iterator that ends first. The other one may have more overlaps.
+    if (Traits::startLess(posB.stop(), posA.stop()))
+      skipB();
+    else
+      skipA();
+    return *this;
+  }
+
+  /// advanceTo - Move to the first overlapping interval with
+  /// stopLess(x, stop()).
+  void advanceTo(KeyType x) {
+    if (!valid())
+      return;
+    // Make sure advanceTo sees monotonic keys.
+    if (Traits::stopLess(posA.stop(), x))
+      posA.advanceTo(x);
+    if (Traits::stopLess(posB.stop(), x))
+      posB.advanceTo(x);
+    advance();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTERVALMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h b/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
new file mode 100644
index 0000000..430ef86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -0,0 +1,270 @@
+//==- llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RefCountedBase, ThreadSafeRefCountedBase, and
+// IntrusiveRefCntPtr classes.
+//
+// IntrusiveRefCntPtr is a smart pointer to an object which maintains a
+// reference count.  (ThreadSafe)RefCountedBase is a mixin class that adds a
+// refcount member variable and methods for updating the refcount.  An object
+// that inherits from (ThreadSafe)RefCountedBase deletes itself when its
+// refcount hits zero.
+//
+// For example:
+//
+//   class MyClass : public RefCountedBase<MyClass> {};
+//
+//   void foo() {
+//     // Constructing an IntrusiveRefCntPtr increases the pointee's refcount by
+//     // 1 (from 0 in this case).
+//     IntrusiveRefCntPtr<MyClass> Ptr1(new MyClass());
+//
+//     // Copying an IntrusiveRefCntPtr increases the pointee's refcount by 1.
+//     IntrusiveRefCntPtr<MyClass> Ptr2(Ptr1);
+//
+//     // Constructing an IntrusiveRefCntPtr has no effect on the object's
+//     // refcount.  After a move, the moved-from pointer is null.
+//     IntrusiveRefCntPtr<MyClass> Ptr3(std::move(Ptr1));
+//     assert(Ptr1 == nullptr);
+//
+//     // Clearing an IntrusiveRefCntPtr decreases the pointee's refcount by 1.
+//     Ptr2.reset();
+//
+//     // The object deletes itself when we return from the function, because
+//     // Ptr3's destructor decrements its refcount to 0.
+//   }
+//
+// You can use IntrusiveRefCntPtr with isa<T>(), dyn_cast<T>(), etc.:
+//
+//   IntrusiveRefCntPtr<MyClass> Ptr(new MyClass());
+//   OtherClass *Other = dyn_cast<OtherClass>(Ptr);  // Ptr.get() not required
+//
+// IntrusiveRefCntPtr works with any class that
+//
+//  - inherits from (ThreadSafe)RefCountedBase,
+//  - has Retain() and Release() methods, or
+//  - specializes IntrusiveRefCntPtrInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H
+#define LLVM_ADT_INTRUSIVEREFCNTPTR_H
+
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+
+/// A CRTP mixin class that adds reference counting to a type.
+///
+/// The lifetime of an object which inherits from RefCountedBase is managed by
+/// calls to Release() and Retain(), which increment and decrement the object's
+/// refcount, respectively.  When a Release() call decrements the refcount to 0,
+/// the object deletes itself.
+template <class Derived> class RefCountedBase {
+  mutable unsigned RefCount = 0;
+
+public:
+  RefCountedBase() = default;
+  RefCountedBase(const RefCountedBase &) {}
+
+  void Retain() const { ++RefCount; }
+
+  void Release() const {
+    assert(RefCount > 0 && "Reference count is already zero.");
+    if (--RefCount == 0)
+      delete static_cast<const Derived *>(this);
+  }
+};
+
+/// A thread-safe version of \c RefCountedBase.
+template <class Derived> class ThreadSafeRefCountedBase {
+  mutable std::atomic<int> RefCount;
+
+protected:
+  ThreadSafeRefCountedBase() : RefCount(0) {}
+
+public:
+  void Retain() const { RefCount.fetch_add(1, std::memory_order_relaxed); }
+
+  void Release() const {
+    int NewRefCount = RefCount.fetch_sub(1, std::memory_order_acq_rel) - 1;
+    assert(NewRefCount >= 0 && "Reference count was already zero.");
+    if (NewRefCount == 0)
+      delete static_cast<const Derived *>(this);
+  }
+};
+
+/// Class you can specialize to provide custom retain/release functionality for
+/// a type.
+///
+/// Usually specializing this class is not necessary, as IntrusiveRefCntPtr
+/// works with any type which defines Retain() and Release() functions -- you
+/// can define those functions yourself if RefCountedBase doesn't work for you.
+///
+/// One case when you might want to specialize this type is if you have
+///  - Foo.h defines type Foo and includes Bar.h, and
+///  - Bar.h uses IntrusiveRefCntPtr<Foo> in inline functions.
+///
+/// Because Foo.h includes Bar.h, Bar.h can't include Foo.h in order to pull in
+/// the declaration of Foo.  Without the declaration of Foo, normally Bar.h
+/// wouldn't be able to use IntrusiveRefCntPtr<Foo>, which wants to call
+/// T::Retain and T::Release.
+///
+/// To resolve this, Bar.h could include a third header, FooFwd.h, which
+/// forward-declares Foo and specializes IntrusiveRefCntPtrInfo<Foo>.  Then
+/// Bar.h could use IntrusiveRefCntPtr<Foo>, although it still couldn't call any
+/// functions on Foo itself, because Foo would be an incomplete type.
+template <typename T> struct IntrusiveRefCntPtrInfo {
+  static void retain(T *obj) { obj->Retain(); }
+  static void release(T *obj) { obj->Release(); }
+};
+
+/// A smart pointer to a reference-counted object that inherits from
+/// RefCountedBase or ThreadSafeRefCountedBase.
+///
+/// This class increments its pointee's reference count when it is created, and
+/// decrements its refcount when it's destroyed (or is changed to point to a
+/// different object).
+template <typename T> class IntrusiveRefCntPtr {
+  T *Obj = nullptr;
+
+public:
+  using element_type = T;
+
+  explicit IntrusiveRefCntPtr() = default;
+  IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
+  IntrusiveRefCntPtr(const IntrusiveRefCntPtr &S) : Obj(S.Obj) { retain(); }
+  IntrusiveRefCntPtr(IntrusiveRefCntPtr &&S) : Obj(S.Obj) { S.Obj = nullptr; }
+
+  template <class X>
+  IntrusiveRefCntPtr(IntrusiveRefCntPtr<X> &&S) : Obj(S.get()) {
+    S.Obj = nullptr;
+  }
+
+  template <class X>
+  IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X> &S) : Obj(S.get()) {
+    retain();
+  }
+
+  ~IntrusiveRefCntPtr() { release(); }
+
+  IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
+    swap(S);
+    return *this;
+  }
+
+  T &operator*() const { return *Obj; }
+  T *operator->() const { return Obj; }
+  T *get() const { return Obj; }
+  explicit operator bool() const { return Obj; }
+
+  void swap(IntrusiveRefCntPtr &other) {
+    T *tmp = other.Obj;
+    other.Obj = Obj;
+    Obj = tmp;
+  }
+
+  void reset() {
+    release();
+    Obj = nullptr;
+  }
+
+  void resetWithoutRelease() { Obj = nullptr; }
+
+private:
+  void retain() {
+    if (Obj)
+      IntrusiveRefCntPtrInfo<T>::retain(Obj);
+  }
+
+  void release() {
+    if (Obj)
+      IntrusiveRefCntPtrInfo<T>::release(Obj);
+  }
+
+  template <typename X> friend class IntrusiveRefCntPtr;
+};
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A,
+                       const IntrusiveRefCntPtr<U> &B) {
+  return A.get() == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A,
+                       const IntrusiveRefCntPtr<U> &B) {
+  return A.get() != B.get();
+}
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A, U *B) {
+  return A.get() == B;
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A, U *B) {
+  return A.get() != B;
+}
+
+template <class T, class U>
+inline bool operator==(T *A, const IntrusiveRefCntPtr<U> &B) {
+  return A == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(T *A, const IntrusiveRefCntPtr<U> &B) {
+  return A != B.get();
+}
+
+template <class T>
+bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+  return !B;
+}
+
+template <class T>
+bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+  return B == A;
+}
+
+template <class T>
+bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+  return !(A == B);
+}
+
+template <class T>
+bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+  return !(A == B);
+}
+
+// Make IntrusiveRefCntPtr work with dyn_cast, isa, and the other idioms from
+// Casting.h.
+template <typename From> struct simplify_type;
+
+template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
+  using SimpleType = T *;
+
+  static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
+    return Val.get();
+  }
+};
+
+template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
+  using SimpleType = /*const*/ T *;
+
+  static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
+    return Val.get();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
diff --git a/linux-x64/clang/include/llvm/ADT/MapVector.h b/linux-x64/clang/include/llvm/ADT/MapVector.h
new file mode 100644
index 0000000..f69f8fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/MapVector.h
@@ -0,0 +1,236 @@
+//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a map that provides insertion order iteration. The
+// interface is purposefully minimal. The key is assumed to be cheap to copy
+// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
+// a std::vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_MAPVECTOR_H
+#define LLVM_ADT_MAPVECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// This class implements a map that also provides access to all stored values
+/// in a deterministic order. The values are kept in a std::vector and the
+/// mapping is done with DenseMap from Keys to indexes in that vector.
+template<typename KeyT, typename ValueT,
+         typename MapType = DenseMap<KeyT, unsigned>,
+         typename VectorType = std::vector<std::pair<KeyT, ValueT>>>
+class MapVector {
+  MapType Map;
+  VectorType Vector;
+
+public:
+  using value_type = typename VectorType::value_type;
+  using size_type = typename VectorType::size_type;
+
+  using iterator = typename VectorType::iterator;
+  using const_iterator = typename VectorType::const_iterator;
+  using reverse_iterator = typename VectorType::reverse_iterator;
+  using const_reverse_iterator = typename VectorType::const_reverse_iterator;
+
+  /// Clear the MapVector and return the underlying vector.
+  VectorType takeVector() {
+    Map.clear();
+    return std::move(Vector);
+  }
+
+  size_type size() const { return Vector.size(); }
+
+  /// Grow the MapVector so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_type NumEntries) {
+    Map.reserve(NumEntries);
+    Vector.reserve(NumEntries);
+  }
+
+  iterator begin() { return Vector.begin(); }
+  const_iterator begin() const { return Vector.begin(); }
+  iterator end() { return Vector.end(); }
+  const_iterator end() const { return Vector.end(); }
+
+  reverse_iterator rbegin() { return Vector.rbegin(); }
+  const_reverse_iterator rbegin() const { return Vector.rbegin(); }
+  reverse_iterator rend() { return Vector.rend(); }
+  const_reverse_iterator rend() const { return Vector.rend(); }
+
+  bool empty() const {
+    return Vector.empty();
+  }
+
+  std::pair<KeyT, ValueT>       &front()       { return Vector.front(); }
+  const std::pair<KeyT, ValueT> &front() const { return Vector.front(); }
+  std::pair<KeyT, ValueT>       &back()        { return Vector.back(); }
+  const std::pair<KeyT, ValueT> &back()  const { return Vector.back(); }
+
+  void clear() {
+    Map.clear();
+    Vector.clear();
+  }
+
+  void swap(MapVector &RHS) {
+    std::swap(Map, RHS.Map);
+    std::swap(Vector, RHS.Vector);
+  }
+
+  ValueT &operator[](const KeyT &Key) {
+    std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0);
+    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+    unsigned &I = Result.first->second;
+    if (Result.second) {
+      Vector.push_back(std::make_pair(Key, ValueT()));
+      I = Vector.size() - 1;
+    }
+    return Vector[I].second;
+  }
+
+  // Returns a copy of the value.  Only allowed if ValueT is copyable.
+  ValueT lookup(const KeyT &Key) const {
+    static_assert(std::is_copy_constructible<ValueT>::value,
+                  "Cannot call lookup() if ValueT is not copyable.");
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
+  }
+
+  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+    std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
+    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+    unsigned &I = Result.first->second;
+    if (Result.second) {
+      Vector.push_back(std::make_pair(KV.first, KV.second));
+      I = Vector.size() - 1;
+      return std::make_pair(std::prev(end()), true);
+    }
+    return std::make_pair(begin() + I, false);
+  }
+
+  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+    // Copy KV.first into the map, then move it into the vector.
+    std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
+    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+    unsigned &I = Result.first->second;
+    if (Result.second) {
+      Vector.push_back(std::move(KV));
+      I = Vector.size() - 1;
+      return std::make_pair(std::prev(end()), true);
+    }
+    return std::make_pair(begin() + I, false);
+  }
+
+  size_type count(const KeyT &Key) const {
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? 0 : 1;
+  }
+
+  iterator find(const KeyT &Key) {
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? Vector.end() :
+                            (Vector.begin() + Pos->second);
+  }
+
+  const_iterator find(const KeyT &Key) const {
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? Vector.end() :
+                            (Vector.begin() + Pos->second);
+  }
+
+  /// \brief Remove the last element from the vector.
+  void pop_back() {
+    typename MapType::iterator Pos = Map.find(Vector.back().first);
+    Map.erase(Pos);
+    Vector.pop_back();
+  }
+
+  /// \brief Remove the element given by Iterator.
+  ///
+  /// Returns an iterator to the element following the one which was removed,
+  /// which may be end().
+  ///
+  /// \note This is a deceivingly expensive operation (linear time).  It's
+  /// usually better to use \a remove_if() if possible.
+  typename VectorType::iterator erase(typename VectorType::iterator Iterator) {
+    Map.erase(Iterator->first);
+    auto Next = Vector.erase(Iterator);
+    if (Next == Vector.end())
+      return Next;
+
+    // Update indices in the map.
+    size_t Index = Next - Vector.begin();
+    for (auto &I : Map) {
+      assert(I.second != Index && "Index was already erased!");
+      if (I.second > Index)
+        --I.second;
+    }
+    return Next;
+  }
+
+  /// \brief Remove all elements with the key value Key.
+  ///
+  /// Returns the number of elements removed.
+  size_type erase(const KeyT &Key) {
+    auto Iterator = find(Key);
+    if (Iterator == end())
+      return 0;
+    erase(Iterator);
+    return 1;
+  }
+
+  /// \brief Remove the elements that match the predicate.
+  ///
+  /// Erase all elements that match \c Pred in a single pass.  Takes linear
+  /// time.
+  template <class Predicate> void remove_if(Predicate Pred);
+};
+
+template <typename KeyT, typename ValueT, typename MapType, typename VectorType>
+template <class Function>
+void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
+  auto O = Vector.begin();
+  for (auto I = O, E = Vector.end(); I != E; ++I) {
+    if (Pred(*I)) {
+      // Erase from the map.
+      Map.erase(I->first);
+      continue;
+    }
+
+    if (I != O) {
+      // Move the value and update the index in the map.
+      *O = std::move(*I);
+      Map[O->first] = O - Vector.begin();
+    }
+    ++O;
+  }
+  // Erase trailing entries in the vector.
+  Vector.erase(O, Vector.end());
+}
+
+/// \brief A MapVector that performs no allocations if smaller than a certain
+/// size.
+template <typename KeyT, typename ValueT, unsigned N>
+struct SmallMapVector
+    : MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>,
+                SmallVector<std::pair<KeyT, ValueT>, N>> {
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_MAPVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/None.h b/linux-x64/clang/include/llvm/ADT/None.h
new file mode 100644
index 0000000..c7a99c6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/None.h
@@ -0,0 +1,27 @@
+//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file provides None, an enumerator for use in implicit constructors
+//  of various (usually templated) types to make such construction more
+//  terse.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_NONE_H
+#define LLVM_ADT_NONE_H
+
+namespace llvm {
+/// \brief A simple null object to allow implicit construction of Optional<T>
+/// and similar types without having to spell out the specialization's name.
+// (constant value 1 in an attempt to workaround MSVC build issue... )
+enum class NoneType { None = 1 };
+const NoneType None = NoneType::None;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/Optional.h b/linux-x64/clang/include/llvm/ADT/Optional.h
new file mode 100644
index 0000000..353e5d0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Optional.h
@@ -0,0 +1,346 @@
+//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file provides Optional, a template class modeled in the spirit of
+//  OCaml's 'opt' variant.  The idea is to strongly type whether or not
+//  a value can be optional.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_OPTIONAL_H
+#define LLVM_ADT_OPTIONAL_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+namespace optional_detail {
+/// Storage for any type.
+template <typename T, bool IsPodLike> struct OptionalStorage {
+  AlignedCharArrayUnion<T> storage;
+  bool hasVal = false;
+
+  OptionalStorage() = default;
+
+  OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
+  OptionalStorage(const OptionalStorage &O) : hasVal(O.hasVal) {
+    if (hasVal)
+      new (storage.buffer) T(*O.getPointer());
+  }
+  OptionalStorage(T &&y) : hasVal(true) {
+    new (storage.buffer) T(std::forward<T>(y));
+  }
+  OptionalStorage(OptionalStorage &&O) : hasVal(O.hasVal) {
+    if (O.hasVal) {
+      new (storage.buffer) T(std::move(*O.getPointer()));
+    }
+  }
+
+  OptionalStorage &operator=(T &&y) {
+    if (hasVal)
+      *getPointer() = std::move(y);
+    else {
+      new (storage.buffer) T(std::move(y));
+      hasVal = true;
+    }
+    return *this;
+  }
+  OptionalStorage &operator=(OptionalStorage &&O) {
+    if (!O.hasVal)
+      reset();
+    else {
+      *this = std::move(*O.getPointer());
+    }
+    return *this;
+  }
+
+  // FIXME: these assignments (& the equivalent const T&/const Optional& ctors)
+  // could be made more efficient by passing by value, possibly unifying them
+  // with the rvalue versions above - but this could place a different set of
+  // requirements (notably: the existence of a default ctor) when implemented
+  // in that way. Careful SFINAE to avoid such pitfalls would be required.
+  OptionalStorage &operator=(const T &y) {
+    if (hasVal)
+      *getPointer() = y;
+    else {
+      new (storage.buffer) T(y);
+      hasVal = true;
+    }
+    return *this;
+  }
+  OptionalStorage &operator=(const OptionalStorage &O) {
+    if (!O.hasVal)
+      reset();
+    else
+      *this = *O.getPointer();
+    return *this;
+  }
+
+  ~OptionalStorage() { reset(); }
+
+  void reset() {
+    if (hasVal) {
+      (*getPointer()).~T();
+      hasVal = false;
+    }
+  }
+
+  T *getPointer() {
+    assert(hasVal);
+    return reinterpret_cast<T *>(storage.buffer);
+  }
+  const T *getPointer() const {
+    assert(hasVal);
+    return reinterpret_cast<const T *>(storage.buffer);
+  }
+};
+
+#if !defined(__GNUC__) || defined(__clang__) // GCC up to GCC7 miscompiles this.
+/// Storage for trivially copyable types only.
+template <typename T> struct OptionalStorage<T, true> {
+  AlignedCharArrayUnion<T> storage;
+  bool hasVal = false;
+
+  OptionalStorage() = default;
+
+  OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
+  OptionalStorage &operator=(const T &y) {
+    *reinterpret_cast<T *>(storage.buffer) = y;
+    hasVal = true;
+    return *this;
+  }
+
+  void reset() { hasVal = false; }
+};
+#endif
+} // namespace optional_detail
+
+template <typename T> class Optional {
+  optional_detail::OptionalStorage<T, isPodLike<T>::value> Storage;
+
+public:
+  using value_type = T;
+
+  constexpr Optional() {}
+  constexpr Optional(NoneType) {}
+
+  Optional(const T &y) : Storage(y) {}
+  Optional(const Optional &O) = default;
+
+  Optional(T &&y) : Storage(std::forward<T>(y)) {}
+  Optional(Optional &&O) = default;
+
+  Optional &operator=(T &&y) {
+    Storage = std::move(y);
+    return *this;
+  }
+  Optional &operator=(Optional &&O) = default;
+
+  /// Create a new object by constructing it in place with the given arguments.
+  template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
+    reset();
+    Storage.hasVal = true;
+    new (getPointer()) T(std::forward<ArgTypes>(Args)...);
+  }
+
+  static inline Optional create(const T *y) {
+    return y ? Optional(*y) : Optional();
+  }
+
+  Optional &operator=(const T &y) {
+    Storage = y;
+    return *this;
+  }
+  Optional &operator=(const Optional &O) = default;
+
+  void reset() { Storage.reset(); }
+
+  const T *getPointer() const {
+    assert(Storage.hasVal);
+    return reinterpret_cast<const T *>(Storage.storage.buffer);
+  }
+  T *getPointer() {
+    assert(Storage.hasVal);
+    return reinterpret_cast<T *>(Storage.storage.buffer);
+  }
+  const T &getValue() const LLVM_LVALUE_FUNCTION { return *getPointer(); }
+  T &getValue() LLVM_LVALUE_FUNCTION { return *getPointer(); }
+
+  explicit operator bool() const { return Storage.hasVal; }
+  bool hasValue() const { return Storage.hasVal; }
+  const T *operator->() const { return getPointer(); }
+  T *operator->() { return getPointer(); }
+  const T &operator*() const LLVM_LVALUE_FUNCTION { return *getPointer(); }
+  T &operator*() LLVM_LVALUE_FUNCTION { return *getPointer(); }
+
+  template <typename U>
+  constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
+    return hasValue() ? getValue() : std::forward<U>(value);
+  }
+
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+  T &&getValue() && { return std::move(*getPointer()); }
+  T &&operator*() && { return std::move(*getPointer()); }
+
+  template <typename U>
+  T getValueOr(U &&value) && {
+    return hasValue() ? std::move(getValue()) : std::forward<U>(value);
+  }
+#endif
+};
+
+template <typename T> struct isPodLike<Optional<T>> {
+  // An Optional<T> is pod-like if T is.
+  static const bool value = isPodLike<T>::value;
+};
+
+template <typename T, typename U>
+bool operator==(const Optional<T> &X, const Optional<U> &Y) {
+  if (X && Y)
+    return *X == *Y;
+  return X.hasValue() == Y.hasValue();
+}
+
+template <typename T, typename U>
+bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
+  return !(X == Y);
+}
+
+template <typename T, typename U>
+bool operator<(const Optional<T> &X, const Optional<U> &Y) {
+  if (X && Y)
+    return *X < *Y;
+  return X.hasValue() < Y.hasValue();
+}
+
+template <typename T, typename U>
+bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
+  return !(Y < X);
+}
+
+template <typename T, typename U>
+bool operator>(const Optional<T> &X, const Optional<U> &Y) {
+  return Y < X;
+}
+
+template <typename T, typename U>
+bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
+  return !(X < Y);
+}
+
+template<typename T>
+bool operator==(const Optional<T> &X, NoneType) {
+  return !X;
+}
+
+template<typename T>
+bool operator==(NoneType, const Optional<T> &X) {
+  return X == None;
+}
+
+template<typename T>
+bool operator!=(const Optional<T> &X, NoneType) {
+  return !(X == None);
+}
+
+template<typename T>
+bool operator!=(NoneType, const Optional<T> &X) {
+  return X != None;
+}
+
+template <typename T> bool operator<(const Optional<T> &X, NoneType) {
+  return false;
+}
+
+template <typename T> bool operator<(NoneType, const Optional<T> &X) {
+  return X.hasValue();
+}
+
+template <typename T> bool operator<=(const Optional<T> &X, NoneType) {
+  return !(None < X);
+}
+
+template <typename T> bool operator<=(NoneType, const Optional<T> &X) {
+  return !(X < None);
+}
+
+template <typename T> bool operator>(const Optional<T> &X, NoneType) {
+  return None < X;
+}
+
+template <typename T> bool operator>(NoneType, const Optional<T> &X) {
+  return X < None;
+}
+
+template <typename T> bool operator>=(const Optional<T> &X, NoneType) {
+  return None <= X;
+}
+
+template <typename T> bool operator>=(NoneType, const Optional<T> &X) {
+  return X <= None;
+}
+
+template <typename T> bool operator==(const Optional<T> &X, const T &Y) {
+  return X && *X == Y;
+}
+
+template <typename T> bool operator==(const T &X, const Optional<T> &Y) {
+  return Y && X == *Y;
+}
+
+template <typename T> bool operator!=(const Optional<T> &X, const T &Y) {
+  return !(X == Y);
+}
+
+template <typename T> bool operator!=(const T &X, const Optional<T> &Y) {
+  return !(X == Y);
+}
+
+template <typename T> bool operator<(const Optional<T> &X, const T &Y) {
+  return !X || *X < Y;
+}
+
+template <typename T> bool operator<(const T &X, const Optional<T> &Y) {
+  return Y && X < *Y;
+}
+
+template <typename T> bool operator<=(const Optional<T> &X, const T &Y) {
+  return !(Y < X);
+}
+
+template <typename T> bool operator<=(const T &X, const Optional<T> &Y) {
+  return !(Y < X);
+}
+
+template <typename T> bool operator>(const Optional<T> &X, const T &Y) {
+  return Y < X;
+}
+
+template <typename T> bool operator>(const T &X, const Optional<T> &Y) {
+  return Y < X;
+}
+
+template <typename T> bool operator>=(const Optional<T> &X, const T &Y) {
+  return !(X < Y);
+}
+
+template <typename T> bool operator>=(const T &X, const Optional<T> &Y) {
+  return !(X < Y);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_OPTIONAL_H
diff --git a/linux-x64/clang/include/llvm/ADT/PackedVector.h b/linux-x64/clang/include/llvm/ADT/PackedVector.h
new file mode 100644
index 0000000..95adc29
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PackedVector.h
@@ -0,0 +1,151 @@
+//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PackedVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PACKEDVECTOR_H
+#define LLVM_ADT_PACKEDVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
+class PackedVectorBase;
+
+// This won't be necessary if we can specialize members without specializing
+// the parent template.
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, false> {
+protected:
+  static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+    T val = T();
+    for (unsigned i = 0; i != BitNum; ++i)
+      val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+    return val;
+  }
+
+  static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+    assert((val >> BitNum) == 0 && "value is too big");
+    for (unsigned i = 0; i != BitNum; ++i)
+      Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+  }
+};
+
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, true> {
+protected:
+  static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+    T val = T();
+    for (unsigned i = 0; i != BitNum-1; ++i)
+      val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+    if (Bits[(Idx << (BitNum-1)) + BitNum-1])
+      val = ~val;
+    return val;
+  }
+
+  static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+    if (val < 0) {
+      val = ~val;
+      Bits.set((Idx << (BitNum-1)) + BitNum-1);
+    }
+    assert((val >> (BitNum-1)) == 0 && "value is too big");
+    for (unsigned i = 0; i != BitNum-1; ++i)
+      Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+  }
+};
+
+/// \brief Store a vector of values using a specific number of bits for each
+/// value. Both signed and unsigned types can be used, e.g
+/// @code
+///   PackedVector<signed, 2> vec;
+/// @endcode
+/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
+/// an assertion.
+template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
+class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
+                                            std::numeric_limits<T>::is_signed> {
+  BitVectorTy Bits;
+  using base = PackedVectorBase<T, BitNum, BitVectorTy,
+                                std::numeric_limits<T>::is_signed>;
+
+public:
+  class reference {
+    PackedVector &Vec;
+    const unsigned Idx;
+
+  public:
+    reference() = delete;
+    reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}
+
+    reference &operator=(T val) {
+      Vec.setValue(Vec.Bits, Idx, val);
+      return *this;
+    }
+
+    operator T() const {
+      return Vec.getValue(Vec.Bits, Idx);
+    }
+  };
+
+  PackedVector() = default;
+  explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) {}
+
+  bool empty() const { return Bits.empty(); }
+
+  unsigned size() const { return Bits.size() >> (BitNum - 1); }
+
+  void clear() { Bits.clear(); }
+
+  void resize(unsigned N) { Bits.resize(N << (BitNum - 1)); }
+
+  void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }
+
+  PackedVector &reset() {
+    Bits.reset();
+    return *this;
+  }
+
+  void push_back(T val) {
+    resize(size()+1);
+    (*this)[size()-1] = val;
+  }
+
+  reference operator[](unsigned Idx) {
+    return reference(*this, Idx);
+  }
+
+  T operator[](unsigned Idx) const {
+    return base::getValue(Bits, Idx);
+  }
+
+  bool operator==(const PackedVector &RHS) const {
+    return Bits == RHS.Bits;
+  }
+
+  bool operator!=(const PackedVector &RHS) const {
+    return Bits != RHS.Bits;
+  }
+
+  PackedVector &operator|=(const PackedVector &RHS) {
+    Bits |= RHS.Bits;
+    return *this;
+  }
+};
+
+// Leave BitNum=0 undefined.
+template <typename T> class PackedVector<T, 0>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_PACKEDVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h b/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
new file mode 100644
index 0000000..ab4e104
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
@@ -0,0 +1,120 @@
+//===- llvm/ADT/PointerEmbeddedInt.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTEREMBEDDEDINT_H
+#define LLVM_ADT_POINTEREMBEDDEDINT_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+/// Utility to embed an integer into a pointer-like type. This is specifically
+/// intended to allow embedding integers where fewer bits are required than
+/// exist in a pointer, and the integer can participate in abstractions along
+/// side other pointer-like types. For example it can be placed into a \c
+/// PointerSumType or \c PointerUnion.
+///
+/// Note that much like pointers, an integer value of zero has special utility
+/// due to boolean conversions. For example, a non-null value can be tested for
+/// in the above abstractions without testing the particular active member.
+/// Also, the default constructed value zero initializes the integer.
+template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
+class PointerEmbeddedInt {
+  uintptr_t Value = 0;
+
+  // Note: This '<' is correct; using '<=' would result in some shifts
+  // overflowing their storage types.
+  static_assert(Bits < sizeof(uintptr_t) * CHAR_BIT,
+                "Cannot embed more bits than we have in a pointer!");
+
+  enum : uintptr_t {
+    // We shift as many zeros into the value as we can while preserving the
+    // number of bits desired for the integer.
+    Shift = sizeof(uintptr_t) * CHAR_BIT - Bits,
+
+    // We also want to be able to mask out the preserved bits for asserts.
+    Mask = static_cast<uintptr_t>(-1) << Bits
+  };
+
+  struct RawValueTag {
+    explicit RawValueTag() = default;
+  };
+
+  friend struct PointerLikeTypeTraits<PointerEmbeddedInt>;
+
+  explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}
+
+public:
+  PointerEmbeddedInt() = default;
+
+  PointerEmbeddedInt(IntT I) { *this = I; }
+
+  PointerEmbeddedInt &operator=(IntT I) {
+    assert((std::is_signed<IntT>::value ? isInt<Bits>(I) : isUInt<Bits>(I)) &&
+           "Integer has bits outside those preserved!");
+    Value = static_cast<uintptr_t>(I) << Shift;
+    return *this;
+  }
+
+  // Note that this implicit conversion additionally allows all of the basic
+  // comparison operators to work transparently, etc.
+  operator IntT() const {
+    if (std::is_signed<IntT>::value)
+      return static_cast<IntT>(static_cast<intptr_t>(Value) >> Shift);
+    return static_cast<IntT>(Value >> Shift);
+  }
+};
+
+// Provide pointer like traits to support use with pointer unions and sum
+// types.
+template <typename IntT, int Bits>
+struct PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
+  using T = PointerEmbeddedInt<IntT, Bits>;
+
+  static inline void *getAsVoidPointer(const T &P) {
+    return reinterpret_cast<void *>(P.Value);
+  }
+
+  static inline T getFromVoidPointer(void *P) {
+    return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
+  }
+
+  static inline T getFromVoidPointer(const void *P) {
+    return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
+  }
+
+  enum { NumLowBitsAvailable = T::Shift };
+};
+
+// Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type
+// itself can be a key.
+template <typename IntT, int Bits>
+struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
+  using T = PointerEmbeddedInt<IntT, Bits>;
+  using IntInfo = DenseMapInfo<IntT>;
+
+  static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
+  static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }
+
+  static unsigned getHashValue(const T &Arg) {
+    return IntInfo::getHashValue(Arg);
+  }
+
+  static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTEREMBEDDEDINT_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerIntPair.h b/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
new file mode 100644
index 0000000..884d051
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
@@ -0,0 +1,233 @@
+//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerIntPair class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERINTPAIR_H
+#define LLVM_ADT_POINTERINTPAIR_H
+
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <limits>
+
+namespace llvm {
+
+template <typename T> struct DenseMapInfo;
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo;
+
+/// PointerIntPair - This class implements a pair of a pointer and small
+/// integer.  It is designed to represent this in the space required by one
+/// pointer by bitmangling the integer into the low part of the pointer.  This
+/// can only be done for small integers: typically up to 3 bits, but it depends
+/// on the number of bits available according to PointerLikeTypeTraits for the
+/// type.
+///
+/// Note that PointerIntPair always puts the IntVal part in the highest bits
+/// possible.  For example, PointerIntPair<void*, 1, bool> will put the bit for
+/// the bool into bit #2, not bit #0, which allows the low two bits to be used
+/// for something else.  For example, this allows:
+///   PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
+/// ... and the two bools will land in different bits.
+template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
+          typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
+          typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
+class PointerIntPair {
+  intptr_t Value = 0;
+
+public:
+  constexpr PointerIntPair() = default;
+
+  PointerIntPair(PointerTy PtrVal, IntType IntVal) {
+    setPointerAndInt(PtrVal, IntVal);
+  }
+
+  explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
+
+  PointerTy getPointer() const { return Info::getPointer(Value); }
+
+  IntType getInt() const { return (IntType)Info::getInt(Value); }
+
+  void setPointer(PointerTy PtrVal) {
+    Value = Info::updatePointer(Value, PtrVal);
+  }
+
+  void setInt(IntType IntVal) {
+    Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
+  }
+
+  void initWithPointer(PointerTy PtrVal) {
+    Value = Info::updatePointer(0, PtrVal);
+  }
+
+  void setPointerAndInt(PointerTy PtrVal, IntType IntVal) {
+    Value = Info::updateInt(Info::updatePointer(0, PtrVal),
+                            static_cast<intptr_t>(IntVal));
+  }
+
+  PointerTy const *getAddrOfPointer() const {
+    return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
+  }
+
+  PointerTy *getAddrOfPointer() {
+    assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+           "Can only return the address if IntBits is cleared and "
+           "PtrTraits doesn't change the pointer");
+    return reinterpret_cast<PointerTy *>(&Value);
+  }
+
+  void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
+
+  void setFromOpaqueValue(void *Val) {
+    Value = reinterpret_cast<intptr_t>(Val);
+  }
+
+  static PointerIntPair getFromOpaqueValue(void *V) {
+    PointerIntPair P;
+    P.setFromOpaqueValue(V);
+    return P;
+  }
+
+  // Allow PointerIntPairs to be created from const void * if and only if the
+  // pointer type could be created from a const void *.
+  static PointerIntPair getFromOpaqueValue(const void *V) {
+    (void)PtrTraits::getFromVoidPointer(V);
+    return getFromOpaqueValue(const_cast<void *>(V));
+  }
+
+  bool operator==(const PointerIntPair &RHS) const {
+    return Value == RHS.Value;
+  }
+
+  bool operator!=(const PointerIntPair &RHS) const {
+    return Value != RHS.Value;
+  }
+
+  bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
+  bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
+
+  bool operator<=(const PointerIntPair &RHS) const {
+    return Value <= RHS.Value;
+  }
+
+  bool operator>=(const PointerIntPair &RHS) const {
+    return Value >= RHS.Value;
+  }
+};
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo {
+  static_assert(PtrTraits::NumLowBitsAvailable <
+                    std::numeric_limits<uintptr_t>::digits,
+                "cannot use a pointer type that has all bits free");
+  static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
+                "PointerIntPair with integer size too large for pointer");
+  enum : uintptr_t {
+    /// PointerBitMask - The bits that come from the pointer.
+    PointerBitMask =
+        ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
+
+    /// IntShift - The number of low bits that we reserve for other uses, and
+    /// keep zero.
+    IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
+
+    /// IntMask - This is the unshifted mask for valid bits of the int type.
+    IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
+
+    // ShiftedIntMask - This is the bits for the integer shifted in place.
+    ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
+  };
+
+  static PointerT getPointer(intptr_t Value) {
+    return PtrTraits::getFromVoidPointer(
+        reinterpret_cast<void *>(Value & PointerBitMask));
+  }
+
+  static intptr_t getInt(intptr_t Value) {
+    return (Value >> IntShift) & IntMask;
+  }
+
+  static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
+    intptr_t PtrWord =
+        reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
+    assert((PtrWord & ~PointerBitMask) == 0 &&
+           "Pointer is not sufficiently aligned");
+    // Preserve all low bits, just update the pointer.
+    return PtrWord | (OrigValue & ~PointerBitMask);
+  }
+
+  static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
+    intptr_t IntWord = static_cast<intptr_t>(Int);
+    assert((IntWord & ~IntMask) == 0 && "Integer too large for field");
+
+    // Preserve all bits other than the ones we are updating.
+    return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
+  }
+};
+
+template <typename T> struct isPodLike;
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct isPodLike<PointerIntPair<PointerTy, IntBits, IntType>> {
+  static const bool value = true;
+};
+
+// Provide specialization of DenseMapInfo for PointerIntPair.
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>> {
+  using Ty = PointerIntPair<PointerTy, IntBits, IntType>;
+
+  static Ty getEmptyKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
+    return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+  }
+
+  static Ty getTombstoneKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-2);
+    Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
+    return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+  }
+
+  static unsigned getHashValue(Ty V) {
+    uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
+    return unsigned(IV) ^ unsigned(IV >> 9);
+  }
+
+  static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
+};
+
+// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
+template <typename PointerTy, unsigned IntBits, typename IntType,
+          typename PtrTraits>
+struct PointerLikeTypeTraits<
+    PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
+  static inline void *
+  getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerIntPair<PointerTy, IntBits, IntType>
+  getFromVoidPointer(void *P) {
+    return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+  }
+
+  static inline PointerIntPair<PointerTy, IntBits, IntType>
+  getFromVoidPointer(const void *P) {
+    return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+  }
+
+  enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERINTPAIR_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerSumType.h b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
new file mode 100644
index 0000000..e379571
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
@@ -0,0 +1,207 @@
+//===- llvm/ADT/PointerSumType.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERSUMTYPE_H
+#define LLVM_ADT_POINTERSUMTYPE_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+/// A compile time pair of an integer tag and the pointer-like type which it
+/// indexes within a sum type. Also allows the user to specify a particular
+/// traits class for pointer types with custom behavior such as over-aligned
+/// allocation.
+template <uintptr_t N, typename PointerArgT,
+          typename TraitsArgT = PointerLikeTypeTraits<PointerArgT>>
+struct PointerSumTypeMember {
+  enum { Tag = N };
+  using PointerT = PointerArgT;
+  using TraitsT = TraitsArgT;
+};
+
+namespace detail {
+
+template <typename TagT, typename... MemberTs> struct PointerSumTypeHelper;
+
+} // end namespace detail
+
+/// A sum type over pointer-like types.
+///
+/// This is a normal tagged union across pointer-like types that uses the low
+/// bits of the pointers to store the tag.
+///
+/// Each member of the sum type is specified by passing a \c
+/// PointerSumTypeMember specialization in the variadic member argument list.
+/// This allows the user to control the particular tag value associated with
+/// a particular type, use the same type for multiple different tags, and
+/// customize the pointer-like traits used for a particular member. Note that
+/// these *must* be specializations of \c PointerSumTypeMember, no other type
+/// will suffice, even if it provides a compatible interface.
+///
+/// This type implements all of the comparison operators and even hash table
+/// support by comparing the underlying storage of the pointer values. It
+/// doesn't support delegating to particular members for comparisons.
+///
+/// It also default constructs to a zero tag with a null pointer, whatever that
+/// would be. This means that the zero value for the tag type is significant
+/// and may be desirable to set to a state that is particularly desirable to
+/// default construct.
+///
+/// There is no support for constructing or accessing with a dynamic tag as
+/// that would fundamentally violate the type safety provided by the sum type.
+template <typename TagT, typename... MemberTs> class PointerSumType {
+  uintptr_t Value = 0;
+
+  using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+
+public:
+  constexpr PointerSumType() = default;
+
+  /// A typed constructor for a specific tagged member of the sum type.
+  template <TagT N>
+  static PointerSumType
+  create(typename HelperT::template Lookup<N>::PointerT Pointer) {
+    PointerSumType Result;
+    void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+    assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+           "Pointer is insufficiently aligned to store the discriminant!");
+    Result.Value = reinterpret_cast<uintptr_t>(V) | N;
+    return Result;
+  }
+
+  TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
+
+  template <TagT N> bool is() const { return N == getTag(); }
+
+  template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
+    void *P = is<N>() ? getImpl() : nullptr;
+    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
+  }
+
+  template <TagT N>
+  typename HelperT::template Lookup<N>::PointerT cast() const {
+    assert(is<N>() && "This instance has a different active member.");
+    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
+  }
+
+  explicit operator bool() const { return Value & HelperT::PointerMask; }
+  bool operator==(const PointerSumType &R) const { return Value == R.Value; }
+  bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
+  bool operator<(const PointerSumType &R) const { return Value < R.Value; }
+  bool operator>(const PointerSumType &R) const { return Value > R.Value; }
+  bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
+  bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
+
+  uintptr_t getOpaqueValue() const { return Value; }
+
+protected:
+  void *getImpl() const {
+    return reinterpret_cast<void *>(Value & HelperT::PointerMask);
+  }
+};
+
+namespace detail {
+
+/// A helper template for implementing \c PointerSumType. It provides fast
+/// compile-time lookup of the member from a particular tag value, along with
+/// useful constants and compile time checking infrastructure..
+template <typename TagT, typename... MemberTs>
+struct PointerSumTypeHelper : MemberTs... {
+  // First we use a trick to allow quickly looking up information about
+  // a particular member of the sum type. This works because we arranged to
+  // have this type derive from all of the member type templates. We can select
+  // the matching member for a tag using type deduction during overload
+  // resolution.
+  template <TagT N, typename PointerT, typename TraitsT>
+  static PointerSumTypeMember<N, PointerT, TraitsT>
+  LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
+  template <TagT N> static void LookupOverload(...);
+  template <TagT N> struct Lookup {
+    // Compute a particular member type by resolving the lookup helper ovorload.
+    using MemberT = decltype(
+        LookupOverload<N>(static_cast<PointerSumTypeHelper *>(nullptr)));
+
+    /// The Nth member's pointer type.
+    using PointerT = typename MemberT::PointerT;
+
+    /// The Nth member's traits type.
+    using TraitsT = typename MemberT::TraitsT;
+  };
+
+  // Next we need to compute the number of bits available for the discriminant
+  // by taking the min of the bits available for each member. Much of this
+  // would be amazingly easier with good constexpr support.
+  template <uintptr_t V, uintptr_t... Vs>
+  struct Min : std::integral_constant<
+                   uintptr_t, (V < Min<Vs...>::value ? V : Min<Vs...>::value)> {
+  };
+  template <uintptr_t V>
+  struct Min<V> : std::integral_constant<uintptr_t, V> {};
+  enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
+
+  // Also compute the smallest discriminant and various masks for convenience.
+  enum : uint64_t {
+    MinTag = Min<MemberTs::Tag...>::value,
+    PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
+    TagMask = ~PointerMask
+  };
+
+  // Finally we need a recursive template to do static checks of each
+  // member.
+  template <typename MemberT, typename... InnerMemberTs>
+  struct Checker : Checker<InnerMemberTs...> {
+    static_assert(MemberT::Tag < (1 << NumTagBits),
+                  "This discriminant value requires too many bits!");
+  };
+  template <typename MemberT> struct Checker<MemberT> : std::true_type {
+    static_assert(MemberT::Tag < (1 << NumTagBits),
+                  "This discriminant value requires too many bits!");
+  };
+  static_assert(Checker<MemberTs...>::value,
+                "Each member must pass the checker.");
+};
+
+} // end namespace detail
+
+// Teach DenseMap how to use PointerSumTypes as keys.
+template <typename TagT, typename... MemberTs>
+struct DenseMapInfo<PointerSumType<TagT, MemberTs...>> {
+  using SumType = PointerSumType<TagT, MemberTs...>;
+  using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+  enum { SomeTag = HelperT::MinTag };
+  using SomePointerT =
+      typename HelperT::template Lookup<HelperT::MinTag>::PointerT;
+  using SomePointerInfo = DenseMapInfo<SomePointerT>;
+
+  static inline SumType getEmptyKey() {
+    return SumType::create<SomeTag>(SomePointerInfo::getEmptyKey());
+  }
+
+  static inline SumType getTombstoneKey() {
+    return SumType::create<SomeTag>(SomePointerInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const SumType &Arg) {
+    uintptr_t OpaqueValue = Arg.getOpaqueValue();
+    return DenseMapInfo<uintptr_t>::getHashValue(OpaqueValue);
+  }
+
+  static bool isEqual(const SumType &LHS, const SumType &RHS) {
+    return LHS == RHS;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERSUMTYPE_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerUnion.h b/linux-x64/clang/include/llvm/ADT/PointerUnion.h
new file mode 100644
index 0000000..315e583
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerUnion.h
@@ -0,0 +1,491 @@
+//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerUnion class, which is a discriminated union of
+// pointer types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERUNION_H
+#define LLVM_ADT_POINTERUNION_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+template <typename T> struct PointerUnionTypeSelectorReturn {
+  using Return = T;
+};
+
+/// Get a type based on whether two types are the same or not.
+///
+/// For:
+///
+/// \code
+///   using Ret = typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return;
+/// \endcode
+///
+/// Ret will be EQ type if T1 is same as T2 or NE type otherwise.
+template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelector {
+  using Return = typename PointerUnionTypeSelectorReturn<RET_NE>::Return;
+};
+
+template <typename T, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> {
+  using Return = typename PointerUnionTypeSelectorReturn<RET_EQ>::Return;
+};
+
+template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelectorReturn<
+    PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>> {
+  using Return =
+      typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return;
+};
+
+/// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
+/// for the two template arguments.
+template <typename PT1, typename PT2> class PointerUnionUIntTraits {
+public:
+  static inline void *getAsVoidPointer(void *P) { return P; }
+  static inline void *getFromVoidPointer(void *P) { return P; }
+
+  enum {
+    PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
+    PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
+    NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
+  };
+};
+
+/// A discriminated union of two pointer types, with the discriminator in the
+/// low bit of the pointer.
+///
+/// This implementation is extremely efficient in space due to leveraging the
+/// low bits of the pointer, while exposing a natural and type-safe API.
+///
+/// Common use patterns would be something like this:
+///    PointerUnion<int*, float*> P;
+///    P = (int*)0;
+///    printf("%d %d", P.is<int*>(), P.is<float*>());  // prints "1 0"
+///    X = P.get<int*>();     // ok.
+///    Y = P.get<float*>();   // runtime assertion failure.
+///    Z = P.get<double*>();  // compile time failure.
+///    P = (float*)0;
+///    Y = P.get<float*>();   // ok.
+///    X = P.get<int*>();     // runtime assertion failure.
+template <typename PT1, typename PT2> class PointerUnion {
+public:
+  using ValTy =
+      PointerIntPair<void *, 1, bool, PointerUnionUIntTraits<PT1, PT2>>;
+
+private:
+  ValTy Val;
+
+  struct IsPT1 {
+    static const int Num = 0;
+  };
+  struct IsPT2 {
+    static const int Num = 1;
+  };
+  template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {};
+
+public:
+  PointerUnion() = default;
+  PointerUnion(PT1 V)
+      : Val(const_cast<void *>(
+            PointerLikeTypeTraits<PT1>::getAsVoidPointer(V))) {}
+  PointerUnion(PT2 V)
+      : Val(const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(V)),
+            1) {}
+
+  /// Test if the pointer held in the union is null, regardless of
+  /// which type it is.
+  bool isNull() const {
+    // Convert from the void* to one of the pointer types, to make sure that
+    // we recursively strip off low bits if we have a nested PointerUnion.
+    return !PointerLikeTypeTraits<PT1>::getFromVoidPointer(Val.getPointer());
+  }
+
+  explicit operator bool() const { return !isNull(); }
+
+  /// Test if the Union currently holds the type matching T.
+  template <typename T> int is() const {
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, IsPT1,
+        ::llvm::PointerUnionTypeSelector<PT2, T, IsPT2,
+                                         UNION_DOESNT_CONTAIN_TYPE<T>>>::Return;
+    int TyNo = Ty::Num;
+    return static_cast<int>(Val.getInt()) == TyNo;
+  }
+
+  /// Returns the value of the specified pointer type.
+  ///
+  /// If the specified pointer type is incorrect, assert.
+  template <typename T> T get() const {
+    assert(is<T>() && "Invalid accessor called");
+    return PointerLikeTypeTraits<T>::getFromVoidPointer(Val.getPointer());
+  }
+
+  /// Returns the current pointer if it is of the specified pointer type,
+  /// otherwises returns null.
+  template <typename T> T dyn_cast() const {
+    if (is<T>())
+      return get<T>();
+    return T();
+  }
+
+  /// If the union is set to the first pointer type get an address pointing to
+  /// it.
+  PT1 const *getAddrOfPtr1() const {
+    return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
+  }
+
+  /// If the union is set to the first pointer type get an address pointing to
+  /// it.
+  PT1 *getAddrOfPtr1() {
+    assert(is<PT1>() && "Val is not the first pointer");
+    assert(
+        get<PT1>() == Val.getPointer() &&
+        "Can't get the address because PointerLikeTypeTraits changes the ptr");
+    return const_cast<PT1 *>(
+        reinterpret_cast<const PT1 *>(Val.getAddrOfPointer()));
+  }
+
+  /// Assignment from nullptr which just clears the union.
+  const PointerUnion &operator=(std::nullptr_t) {
+    Val.initWithPointer(nullptr);
+    return *this;
+  }
+
+  /// Assignment operators - Allow assigning into this union from either
+  /// pointer type, setting the discriminator to remember what it came from.
+  const PointerUnion &operator=(const PT1 &RHS) {
+    Val.initWithPointer(
+        const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(RHS)));
+    return *this;
+  }
+  const PointerUnion &operator=(const PT2 &RHS) {
+    Val.setPointerAndInt(
+        const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(RHS)),
+        1);
+    return *this;
+  }
+
+  void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+  static inline PointerUnion getFromOpaqueValue(void *VP) {
+    PointerUnion V;
+    V.Val = ValTy::getFromOpaqueValue(VP);
+    return V;
+  }
+};
+
+template <typename PT1, typename PT2>
+bool operator==(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+  return lhs.getOpaqueValue() == rhs.getOpaqueValue();
+}
+
+template <typename PT1, typename PT2>
+bool operator!=(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+  return lhs.getOpaqueValue() != rhs.getOpaqueValue();
+}
+
+template <typename PT1, typename PT2>
+bool operator<(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+  return lhs.getOpaqueValue() < rhs.getOpaqueValue();
+}
+
+// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits)-1.
+template <typename PT1, typename PT2>
+struct PointerLikeTypeTraits<PointerUnion<PT1, PT2>> {
+  static inline void *getAsVoidPointer(const PointerUnion<PT1, PT2> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerUnion<PT1, PT2> getFromVoidPointer(void *P) {
+    return PointerUnion<PT1, PT2>::getFromOpaqueValue(P);
+  }
+
+  // The number of bits available are the min of the two pointer types.
+  enum {
+    NumLowBitsAvailable = PointerLikeTypeTraits<
+        typename PointerUnion<PT1, PT2>::ValTy>::NumLowBitsAvailable
+  };
+};
+
+/// A pointer union of three pointer types. See documentation for PointerUnion
+/// for usage.
+template <typename PT1, typename PT2, typename PT3> class PointerUnion3 {
+public:
+  using InnerUnion = PointerUnion<PT1, PT2>;
+  using ValTy = PointerUnion<InnerUnion, PT3>;
+
+private:
+  ValTy Val;
+
+  struct IsInnerUnion {
+    ValTy Val;
+
+    IsInnerUnion(ValTy val) : Val(val) {}
+
+    template <typename T> int is() const {
+      return Val.template is<InnerUnion>() &&
+             Val.template get<InnerUnion>().template is<T>();
+    }
+
+    template <typename T> T get() const {
+      return Val.template get<InnerUnion>().template get<T>();
+    }
+  };
+
+  struct IsPT3 {
+    ValTy Val;
+
+    IsPT3(ValTy val) : Val(val) {}
+
+    template <typename T> int is() const { return Val.template is<T>(); }
+    template <typename T> T get() const { return Val.template get<T>(); }
+  };
+
+public:
+  PointerUnion3() = default;
+  PointerUnion3(PT1 V) { Val = InnerUnion(V); }
+  PointerUnion3(PT2 V) { Val = InnerUnion(V); }
+  PointerUnion3(PT3 V) { Val = V; }
+
+  /// Test if the pointer held in the union is null, regardless of
+  /// which type it is.
+  bool isNull() const { return Val.isNull(); }
+  explicit operator bool() const { return !isNull(); }
+
+  /// Test if the Union currently holds the type matching T.
+  template <typename T> int is() const {
+    // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, IsInnerUnion,
+        ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return;
+    return Ty(Val).template is<T>();
+  }
+
+  /// Returns the value of the specified pointer type.
+  ///
+  /// If the specified pointer type is incorrect, assert.
+  template <typename T> T get() const {
+    assert(is<T>() && "Invalid accessor called");
+    // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, IsInnerUnion,
+        ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return;
+    return Ty(Val).template get<T>();
+  }
+
+  /// Returns the current pointer if it is of the specified pointer type,
+  /// otherwises returns null.
+  template <typename T> T dyn_cast() const {
+    if (is<T>())
+      return get<T>();
+    return T();
+  }
+
+  /// Assignment from nullptr which just clears the union.
+  const PointerUnion3 &operator=(std::nullptr_t) {
+    Val = nullptr;
+    return *this;
+  }
+
+  /// Assignment operators - Allow assigning into this union from either
+  /// pointer type, setting the discriminator to remember what it came from.
+  const PointerUnion3 &operator=(const PT1 &RHS) {
+    Val = InnerUnion(RHS);
+    return *this;
+  }
+  const PointerUnion3 &operator=(const PT2 &RHS) {
+    Val = InnerUnion(RHS);
+    return *this;
+  }
+  const PointerUnion3 &operator=(const PT3 &RHS) {
+    Val = RHS;
+    return *this;
+  }
+
+  void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+  static inline PointerUnion3 getFromOpaqueValue(void *VP) {
+    PointerUnion3 V;
+    V.Val = ValTy::getFromOpaqueValue(VP);
+    return V;
+  }
+};
+
+// Teach SmallPtrSet that PointerUnion3 is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
+template <typename PT1, typename PT2, typename PT3>
+struct PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3>> {
+  static inline void *getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerUnion3<PT1, PT2, PT3> getFromVoidPointer(void *P) {
+    return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P);
+  }
+
+  // The number of bits available are the min of the two pointer types.
+  enum {
+    NumLowBitsAvailable = PointerLikeTypeTraits<
+        typename PointerUnion3<PT1, PT2, PT3>::ValTy>::NumLowBitsAvailable
+  };
+};
+
+template <typename PT1, typename PT2, typename PT3>
+bool operator<(PointerUnion3<PT1, PT2, PT3> lhs,
+               PointerUnion3<PT1, PT2, PT3> rhs) {
+  return lhs.getOpaqueValue() < rhs.getOpaqueValue();
+}
+
+/// A pointer union of four pointer types. See documentation for PointerUnion
+/// for usage.
+template <typename PT1, typename PT2, typename PT3, typename PT4>
+class PointerUnion4 {
+public:
+  using InnerUnion1 = PointerUnion<PT1, PT2>;
+  using InnerUnion2 = PointerUnion<PT3, PT4>;
+  using ValTy = PointerUnion<InnerUnion1, InnerUnion2>;
+
+private:
+  ValTy Val;
+
+public:
+  PointerUnion4() = default;
+  PointerUnion4(PT1 V) { Val = InnerUnion1(V); }
+  PointerUnion4(PT2 V) { Val = InnerUnion1(V); }
+  PointerUnion4(PT3 V) { Val = InnerUnion2(V); }
+  PointerUnion4(PT4 V) { Val = InnerUnion2(V); }
+
+  /// Test if the pointer held in the union is null, regardless of
+  /// which type it is.
+  bool isNull() const { return Val.isNull(); }
+  explicit operator bool() const { return !isNull(); }
+
+  /// Test if the Union currently holds the type matching T.
+  template <typename T> int is() const {
+    // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, InnerUnion1,
+        ::llvm::PointerUnionTypeSelector<PT2, T, InnerUnion1,
+                                         InnerUnion2>>::Return;
+    return Val.template is<Ty>() && Val.template get<Ty>().template is<T>();
+  }
+
+  /// Returns the value of the specified pointer type.
+  ///
+  /// If the specified pointer type is incorrect, assert.
+  template <typename T> T get() const {
+    assert(is<T>() && "Invalid accessor called");
+    // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, InnerUnion1,
+        ::llvm::PointerUnionTypeSelector<PT2, T, InnerUnion1,
+                                         InnerUnion2>>::Return;
+    return Val.template get<Ty>().template get<T>();
+  }
+
+  /// Returns the current pointer if it is of the specified pointer type,
+  /// otherwises returns null.
+  template <typename T> T dyn_cast() const {
+    if (is<T>())
+      return get<T>();
+    return T();
+  }
+
+  /// Assignment from nullptr which just clears the union.
+  const PointerUnion4 &operator=(std::nullptr_t) {
+    Val = nullptr;
+    return *this;
+  }
+
+  /// Assignment operators - Allow assigning into this union from either
+  /// pointer type, setting the discriminator to remember what it came from.
+  const PointerUnion4 &operator=(const PT1 &RHS) {
+    Val = InnerUnion1(RHS);
+    return *this;
+  }
+  const PointerUnion4 &operator=(const PT2 &RHS) {
+    Val = InnerUnion1(RHS);
+    return *this;
+  }
+  const PointerUnion4 &operator=(const PT3 &RHS) {
+    Val = InnerUnion2(RHS);
+    return *this;
+  }
+  const PointerUnion4 &operator=(const PT4 &RHS) {
+    Val = InnerUnion2(RHS);
+    return *this;
+  }
+
+  void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+  static inline PointerUnion4 getFromOpaqueValue(void *VP) {
+    PointerUnion4 V;
+    V.Val = ValTy::getFromOpaqueValue(VP);
+    return V;
+  }
+};
+
+// Teach SmallPtrSet that PointerUnion4 is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
+template <typename PT1, typename PT2, typename PT3, typename PT4>
+struct PointerLikeTypeTraits<PointerUnion4<PT1, PT2, PT3, PT4>> {
+  static inline void *
+  getAsVoidPointer(const PointerUnion4<PT1, PT2, PT3, PT4> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerUnion4<PT1, PT2, PT3, PT4> getFromVoidPointer(void *P) {
+    return PointerUnion4<PT1, PT2, PT3, PT4>::getFromOpaqueValue(P);
+  }
+
+  // The number of bits available are the min of the two pointer types.
+  enum {
+    NumLowBitsAvailable = PointerLikeTypeTraits<
+        typename PointerUnion4<PT1, PT2, PT3, PT4>::ValTy>::NumLowBitsAvailable
+  };
+};
+
+// Teach DenseMap how to use PointerUnions as keys.
+template <typename T, typename U> struct DenseMapInfo<PointerUnion<T, U>> {
+  using Pair = PointerUnion<T, U>;
+  using FirstInfo = DenseMapInfo<T>;
+  using SecondInfo = DenseMapInfo<U>;
+
+  static inline Pair getEmptyKey() { return Pair(FirstInfo::getEmptyKey()); }
+
+  static inline Pair getTombstoneKey() {
+    return Pair(FirstInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const Pair &PairVal) {
+    intptr_t key = (intptr_t)PairVal.getOpaqueValue();
+    return DenseMapInfo<intptr_t>::getHashValue(key);
+  }
+
+  static bool isEqual(const Pair &LHS, const Pair &RHS) {
+    return LHS.template is<T>() == RHS.template is<T>() &&
+           (LHS.template is<T>() ? FirstInfo::isEqual(LHS.template get<T>(),
+                                                      RHS.template get<T>())
+                                 : SecondInfo::isEqual(LHS.template get<U>(),
+                                                       RHS.template get<U>()));
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERUNION_H
diff --git a/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h b/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
new file mode 100644
index 0000000..dc8a9b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
@@ -0,0 +1,309 @@
+//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build a generic graph
+// post order iterator.  This should work over any graph type that has a
+// GraphTraits specialization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POSTORDERITERATOR_H
+#define LLVM_ADT_POSTORDERITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+// The po_iterator_storage template provides access to the set of already
+// visited nodes during the po_iterator's depth-first traversal.
+//
+// The default implementation simply contains a set of visited nodes, while
+// the External=true version uses a reference to an external set.
+//
+// It is possible to prune the depth-first traversal in several ways:
+//
+// - When providing an external set that already contains some graph nodes,
+//   those nodes won't be visited again. This is useful for restarting a
+//   post-order traversal on a graph with nodes that aren't dominated by a
+//   single node.
+//
+// - By providing a custom SetType class, unwanted graph nodes can be excluded
+//   by having the insert() function return false. This could for example
+//   confine a CFG traversal to blocks in a specific loop.
+//
+// - Finally, by specializing the po_iterator_storage template itself, graph
+//   edges can be pruned by returning false in the insertEdge() function. This
+//   could be used to remove loop back-edges from the CFG seen by po_iterator.
+//
+// A specialized po_iterator_storage class can observe both the pre-order and
+// the post-order. The insertEdge() function is called in a pre-order, while
+// the finishPostorder() function is called just before the po_iterator moves
+// on to the next node.
+
+/// Default po_iterator_storage implementation with an internal set object.
+template<class SetType, bool External>
+class po_iterator_storage {
+  SetType Visited;
+
+public:
+  // Return true if edge destination should be visited.
+  template <typename NodeRef>
+  bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+    return Visited.insert(To).second;
+  }
+
+  // Called after all children of BB have been visited.
+  template <typename NodeRef> void finishPostorder(NodeRef BB) {}
+};
+
+/// Specialization of po_iterator_storage that references an external set.
+template<class SetType>
+class po_iterator_storage<SetType, true> {
+  SetType &Visited;
+
+public:
+  po_iterator_storage(SetType &VSet) : Visited(VSet) {}
+  po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
+
+  // Return true if edge destination should be visited, called with From = 0 for
+  // the root node.
+  // Graph edges can be pruned by specializing this function.
+  template <class NodeRef> bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+    return Visited.insert(To).second;
+  }
+
+  // Called after all children of BB have been visited.
+  template <class NodeRef> void finishPostorder(NodeRef BB) {}
+};
+
+template <class GraphT,
+          class SetType =
+              SmallPtrSet<typename GraphTraits<GraphT>::NodeRef, 8>,
+          bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class po_iterator
+    : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+      public po_iterator_storage<SetType, ExtStorage> {
+  using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+
+  // VisitStack - Used to maintain the ordering.  Top = current block
+  // First element is basic block pointer, second is the 'next child' to visit
+  std::vector<std::pair<NodeRef, ChildItTy>> VisitStack;
+
+  po_iterator(NodeRef BB) {
+    this->insertEdge(Optional<NodeRef>(), BB);
+    VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+    traverseChild();
+  }
+
+  po_iterator() = default; // End is when stack is empty.
+
+  po_iterator(NodeRef BB, SetType &S)
+      : po_iterator_storage<SetType, ExtStorage>(S) {
+    if (this->insertEdge(Optional<NodeRef>(), BB)) {
+      VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+      traverseChild();
+    }
+  }
+
+  po_iterator(SetType &S)
+      : po_iterator_storage<SetType, ExtStorage>(S) {
+  } // End is when stack is empty.
+
+  void traverseChild() {
+    while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+      NodeRef BB = *VisitStack.back().second++;
+      if (this->insertEdge(Optional<NodeRef>(VisitStack.back().first), BB)) {
+        // If the block is not visited...
+        VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+      }
+    }
+  }
+
+public:
+  using pointer = typename super::pointer;
+
+  // Provide static "constructors"...
+  static po_iterator begin(GraphT G) {
+    return po_iterator(GT::getEntryNode(G));
+  }
+  static po_iterator end(GraphT G) { return po_iterator(); }
+
+  static po_iterator begin(GraphT G, SetType &S) {
+    return po_iterator(GT::getEntryNode(G), S);
+  }
+  static po_iterator end(GraphT G, SetType &S) { return po_iterator(S); }
+
+  bool operator==(const po_iterator &x) const {
+    return VisitStack == x.VisitStack;
+  }
+  bool operator!=(const po_iterator &x) const { return !(*this == x); }
+
+  const NodeRef &operator*() const { return VisitStack.back().first; }
+
+  // This is a nonstandard operator-> that dereferences the pointer an extra
+  // time... so that you can actually call methods ON the BasicBlock, because
+  // the contained type is a pointer.  This allows BBIt->getTerminator() f.e.
+  //
+  NodeRef operator->() const { return **this; }
+
+  po_iterator &operator++() { // Preincrement
+    this->finishPostorder(VisitStack.back().first);
+    VisitStack.pop_back();
+    if (!VisitStack.empty())
+      traverseChild();
+    return *this;
+  }
+
+  po_iterator operator++(int) { // Postincrement
+    po_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); }
+template <class T>
+po_iterator<T> po_end  (const T &G) { return po_iterator<T>::end(G); }
+
+template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
+  return make_range(po_begin(G), po_end(G));
+}
+
+// Provide global definitions of external postorder iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
+struct po_ext_iterator : public po_iterator<T, SetType, true> {
+  po_ext_iterator(const po_iterator<T, SetType, true> &V) :
+  po_iterator<T, SetType, true>(V) {}
+};
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) {
+  return po_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) {
+  return po_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) {
+  return make_range(po_ext_begin(G, S), po_ext_end(G, S));
+}
+
+// Provide global definitions of inverse post order iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>,
+          bool External = false>
+struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External> {
+  ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
+     po_iterator<Inverse<T>, SetType, External> (V) {}
+};
+
+template <class T>
+ipo_iterator<T> ipo_begin(const T &G) {
+  return ipo_iterator<T>::begin(G);
+}
+
+template <class T>
+ipo_iterator<T> ipo_end(const T &G){
+  return ipo_iterator<T>::end(G);
+}
+
+template <class T>
+iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
+  return make_range(ipo_begin(G), ipo_end(G));
+}
+
+// Provide global definitions of external inverse postorder iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
+struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
+  ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
+    ipo_iterator<T, SetType, true>(V) {}
+  ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
+    ipo_iterator<T, SetType, true>(V) {}
+};
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) {
+  return ipo_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) {
+  return ipo_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<ipo_ext_iterator<T, SetType>>
+inverse_post_order_ext(const T &G, SetType &S) {
+  return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S));
+}
+
+//===--------------------------------------------------------------------===//
+// Reverse Post Order CFG iterator code
+//===--------------------------------------------------------------------===//
+//
+// This is used to visit basic blocks in a method in reverse post order.  This
+// class is awkward to use because I don't know a good incremental algorithm to
+// computer RPO from a graph.  Because of this, the construction of the
+// ReversePostOrderTraversal object is expensive (it must walk the entire graph
+// with a postorder iterator to build the data structures).  The moral of this
+// story is: Don't create more ReversePostOrderTraversal classes than necessary.
+//
+// Because it does the traversal in its constructor, it won't invalidate when
+// BasicBlocks are removed, *but* it may contain erased blocks. Some places
+// rely on this behavior (i.e. GVN).
+//
+// This class should be used like this:
+// {
+//   ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create
+//   for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+//      ...
+//   }
+//   for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+//      ...
+//   }
+// }
+//
+
+template<class GraphT, class GT = GraphTraits<GraphT>>
+class ReversePostOrderTraversal {
+  using NodeRef = typename GT::NodeRef;
+
+  std::vector<NodeRef> Blocks; // Block list in normal PO order
+
+  void Initialize(NodeRef BB) {
+    std::copy(po_begin(BB), po_end(BB), std::back_inserter(Blocks));
+  }
+
+public:
+  using rpo_iterator = typename std::vector<NodeRef>::reverse_iterator;
+
+  ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); }
+
+  // Because we want a reverse post order, use reverse iterators from the vector
+  rpo_iterator begin() { return Blocks.rbegin(); }
+  rpo_iterator end() { return Blocks.rend(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POSTORDERITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/PriorityQueue.h b/linux-x64/clang/include/llvm/ADT/PriorityQueue.h
new file mode 100644
index 0000000..8ba871e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PriorityQueue.h
@@ -0,0 +1,83 @@
+//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PriorityQueue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYQUEUE_H
+#define LLVM_ADT_PRIORITYQUEUE_H
+
+#include <algorithm>
+#include <queue>
+
+namespace llvm {
+
+/// PriorityQueue - This class behaves like std::priority_queue and
+/// provides a few additional convenience functions.
+///
+template<class T,
+         class Sequence = std::vector<T>,
+         class Compare = std::less<typename Sequence::value_type> >
+class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
+public:
+  explicit PriorityQueue(const Compare &compare = Compare(),
+                         const Sequence &sequence = Sequence())
+    : std::priority_queue<T, Sequence, Compare>(compare, sequence)
+  {}
+
+  template<class Iterator>
+  PriorityQueue(Iterator begin, Iterator end,
+                const Compare &compare = Compare(),
+                const Sequence &sequence = Sequence())
+    : std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence)
+  {}
+
+  /// erase_one - Erase one element from the queue, regardless of its
+  /// position. This operation performs a linear search to find an element
+  /// equal to t, but then uses all logarithmic-time algorithms to do
+  /// the erase operation.
+  ///
+  void erase_one(const T &t) {
+    // Linear-search to find the element.
+    typename Sequence::size_type i = find(this->c, t) - this->c.begin();
+
+    // Logarithmic-time heap bubble-up.
+    while (i != 0) {
+      typename Sequence::size_type parent = (i - 1) / 2;
+      this->c[i] = this->c[parent];
+      i = parent;
+    }
+
+    // The element we want to remove is now at the root, so we can use
+    // priority_queue's plain pop to remove it.
+    this->pop();
+  }
+
+  /// reheapify - If an element in the queue has changed in a way that
+  /// affects its standing in the comparison function, the queue's
+  /// internal state becomes invalid. Calling reheapify() resets the
+  /// queue's state, making it valid again. This operation has time
+  /// complexity proportional to the number of elements in the queue,
+  /// so don't plan to use it a lot.
+  ///
+  void reheapify() {
+    std::make_heap(this->c.begin(), this->c.end(), this->comp);
+  }
+
+  /// clear - Erase all elements from the queue.
+  ///
+  void clear() {
+    this->c.clear();
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h b/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
new file mode 100644
index 0000000..aa531f3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
@@ -0,0 +1,266 @@
+//===- PriorityWorklist.h - Worklist with insertion priority ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+///
+/// This file provides a priority worklist. See the class comments for details.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYWORKLIST_H
+#define LLVM_ADT_PRIORITYWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+/// A FILO worklist that prioritizes on re-insertion without duplication.
+///
+/// This is very similar to a \c SetVector with the primary difference that
+/// while re-insertion does not create a duplicate, it does adjust the
+/// visitation order to respect the last insertion point. This can be useful
+/// when the visit order needs to be prioritized based on insertion point
+/// without actually having duplicate visits.
+///
+/// Note that this doesn't prevent re-insertion of elements which have been
+/// visited -- if you need to break cycles, a set will still be necessary.
+///
+/// The type \c T must be default constructable to a null value that will be
+/// ignored. It is an error to insert such a value, and popping elements will
+/// never produce such a value. It is expected to be used with common nullable
+/// types like pointers or optionals.
+///
+/// Internally this uses a vector to store the worklist and a map to identify
+/// existing elements in the worklist. Both of these may be customized, but the
+/// map must support the basic DenseMap API for mapping from a T to an integer
+/// index into the vector.
+///
+/// A partial specialization is provided to automatically select a SmallVector
+/// and a SmallDenseMap if custom data structures are not provided.
+template <typename T, typename VectorT = std::vector<T>,
+          typename MapT = DenseMap<T, ptrdiff_t>>
+class PriorityWorklist {
+public:
+  using value_type = T;
+  using key_type = T;
+  using reference = T&;
+  using const_reference = const T&;
+  using size_type = typename MapT::size_type;
+
+  /// Construct an empty PriorityWorklist
+  PriorityWorklist() = default;
+
+  /// Determine if the PriorityWorklist is empty or not.
+  bool empty() const {
+    return V.empty();
+  }
+
+  /// Returns the number of elements in the worklist.
+  size_type size() const {
+    return M.size();
+  }
+
+  /// Count the number of elements of a given key in the PriorityWorklist.
+  /// \returns 0 if the element is not in the PriorityWorklist, 1 if it is.
+  size_type count(const key_type &key) const {
+    return M.count(key);
+  }
+
+  /// Return the last element of the PriorityWorklist.
+  const T &back() const {
+    assert(!empty() && "Cannot call back() on empty PriorityWorklist!");
+    return V.back();
+  }
+
+  /// Insert a new element into the PriorityWorklist.
+  /// \returns true if the element was inserted into the PriorityWorklist.
+  bool insert(const T &X) {
+    assert(X != T() && "Cannot insert a null (default constructed) value!");
+    auto InsertResult = M.insert({X, V.size()});
+    if (InsertResult.second) {
+      // Fresh value, just append it to the vector.
+      V.push_back(X);
+      return true;
+    }
+
+    auto &Index = InsertResult.first->second;
+    assert(V[Index] == X && "Value not actually at index in map!");
+    if (Index != (ptrdiff_t)(V.size() - 1)) {
+      // If the element isn't at the back, null it out and append a fresh one.
+      V[Index] = T();
+      Index = (ptrdiff_t)V.size();
+      V.push_back(X);
+    }
+    return false;
+  }
+
+  /// Insert a sequence of new elements into the PriorityWorklist.
+  template <typename SequenceT>
+  typename std::enable_if<!std::is_convertible<SequenceT, T>::value>::type
+  insert(SequenceT &&Input) {
+    if (std::begin(Input) == std::end(Input))
+      // Nothing to do for an empty input sequence.
+      return;
+
+    // First pull the input sequence into the vector as a bulk append
+    // operation.
+    ptrdiff_t StartIndex = V.size();
+    V.insert(V.end(), std::begin(Input), std::end(Input));
+    // Now walk backwards fixing up the index map and deleting any duplicates.
+    for (ptrdiff_t i = V.size() - 1; i >= StartIndex; --i) {
+      auto InsertResult = M.insert({V[i], i});
+      if (InsertResult.second)
+        continue;
+
+      // If the existing index is before this insert's start, nuke that one and
+      // move it up.
+      ptrdiff_t &Index = InsertResult.first->second;
+      if (Index < StartIndex) {
+        V[Index] = T();
+        Index = i;
+        continue;
+      }
+
+      // Otherwise the existing one comes first so just clear out the value in
+      // this slot.
+      V[i] = T();
+    }
+  }
+
+  /// Remove the last element of the PriorityWorklist.
+  void pop_back() {
+    assert(!empty() && "Cannot remove an element when empty!");
+    assert(back() != T() && "Cannot have a null element at the back!");
+    M.erase(back());
+    do {
+      V.pop_back();
+    } while (!V.empty() && V.back() == T());
+  }
+
+  LLVM_NODISCARD T pop_back_val() {
+    T Ret = back();
+    pop_back();
+    return Ret;
+  }
+
+  /// Erase an item from the worklist.
+  ///
+  /// Note that this is constant time due to the nature of the worklist implementation.
+  bool erase(const T& X) {
+    auto I = M.find(X);
+    if (I == M.end())
+      return false;
+
+    assert(V[I->second] == X && "Value not actually at index in map!");
+    if (I->second == (ptrdiff_t)(V.size() - 1)) {
+      do {
+        V.pop_back();
+      } while (!V.empty() && V.back() == T());
+    } else {
+      V[I->second] = T();
+    }
+    M.erase(I);
+    return true;
+  }
+
+  /// Erase items from the set vector based on a predicate function.
+  ///
+  /// This is intended to be equivalent to the following code, if we could
+  /// write it:
+  ///
+  /// \code
+  ///   V.erase(remove_if(V, P), V.end());
+  /// \endcode
+  ///
+  /// However, PriorityWorklist doesn't expose non-const iterators, making any
+  /// algorithm like remove_if impossible to use.
+  ///
+  /// \returns true if any element is removed.
+  template <typename UnaryPredicate>
+  bool erase_if(UnaryPredicate P) {
+    typename VectorT::iterator E =
+        remove_if(V, TestAndEraseFromMap<UnaryPredicate>(P, M));
+    if (E == V.end())
+      return false;
+    for (auto I = V.begin(); I != E; ++I)
+      if (*I != T())
+        M[*I] = I - V.begin();
+    V.erase(E, V.end());
+    return true;
+  }
+
+  /// Reverse the items in the PriorityWorklist.
+  ///
+  /// This does an in-place reversal. Other kinds of reverse aren't easy to
+  /// support in the face of the worklist semantics.
+
+  /// Completely clear the PriorityWorklist
+  void clear() {
+    M.clear();
+    V.clear();
+  }
+
+private:
+  /// A wrapper predicate designed for use with std::remove_if.
+  ///
+  /// This predicate wraps a predicate suitable for use with std::remove_if to
+  /// call M.erase(x) on each element which is slated for removal. This just
+  /// allows the predicate to be move only which we can't do with lambdas
+  /// today.
+  template <typename UnaryPredicateT>
+  class TestAndEraseFromMap {
+    UnaryPredicateT P;
+    MapT &M;
+
+  public:
+    TestAndEraseFromMap(UnaryPredicateT P, MapT &M)
+        : P(std::move(P)), M(M) {}
+
+    bool operator()(const T &Arg) {
+      if (Arg == T())
+        // Skip null values in the PriorityWorklist.
+        return false;
+
+      if (P(Arg)) {
+        M.erase(Arg);
+        return true;
+      }
+      return false;
+    }
+  };
+
+  /// The map from value to index in the vector.
+  MapT M;
+
+  /// The vector of elements in insertion order.
+  VectorT V;
+};
+
+/// A version of \c PriorityWorklist that selects small size optimized data
+/// structures for the vector and map.
+template <typename T, unsigned N>
+class SmallPriorityWorklist
+    : public PriorityWorklist<T, SmallVector<T, N>,
+                              SmallDenseMap<T, ptrdiff_t>> {
+public:
+  SmallPriorityWorklist() = default;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_PRIORITYWORKLIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/SCCIterator.h b/linux-x64/clang/include/llvm/ADT/SCCIterator.h
new file mode 100644
index 0000000..784a58d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SCCIterator.h
@@ -0,0 +1,237 @@
+//===- ADT/SCCIterator.h - Strongly Connected Comp. Iter. -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This builds on the llvm/ADT/GraphTraits.h file to find the strongly
+/// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS
+/// algorithm.
+///
+/// The SCC iterator has the important property that if a node in SCC S1 has an
+/// edge to a node in SCC S2, then it visits S1 *after* S2.
+///
+/// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE:
+/// This requires some simple wrappers and is not supported yet.)
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCCITERATOR_H
+#define LLVM_ADT_SCCITERATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+
+/// \brief Enumerate the SCCs of a directed graph in reverse topological order
+/// of the SCC DAG.
+///
+/// This is implemented using Tarjan's DFS algorithm using an internal stack to
+/// build up a vector of nodes in a particular SCC. Note that it is a forward
+/// iterator and thus you cannot backtrack or re-visit nodes.
+template <class GraphT, class GT = GraphTraits<GraphT>>
+class scc_iterator : public iterator_facade_base<
+                         scc_iterator<GraphT, GT>, std::forward_iterator_tag,
+                         const std::vector<typename GT::NodeRef>, ptrdiff_t> {
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+  using SccTy = std::vector<NodeRef>;
+  using reference = typename scc_iterator::reference;
+
+  /// Element of VisitStack during DFS.
+  struct StackElement {
+    NodeRef Node;         ///< The current node pointer.
+    ChildItTy NextChild;  ///< The next child, modified inplace during DFS.
+    unsigned MinVisited;  ///< Minimum uplink value of all children of Node.
+
+    StackElement(NodeRef Node, const ChildItTy &Child, unsigned Min)
+        : Node(Node), NextChild(Child), MinVisited(Min) {}
+
+    bool operator==(const StackElement &Other) const {
+      return Node == Other.Node &&
+             NextChild == Other.NextChild &&
+             MinVisited == Other.MinVisited;
+    }
+  };
+
+  /// The visit counters used to detect when a complete SCC is on the stack.
+  /// visitNum is the global counter.
+  ///
+  /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
+  unsigned visitNum;
+  DenseMap<NodeRef, unsigned> nodeVisitNumbers;
+
+  /// Stack holding nodes of the SCC.
+  std::vector<NodeRef> SCCNodeStack;
+
+  /// The current SCC, retrieved using operator*().
+  SccTy CurrentSCC;
+
+  /// DFS stack, Used to maintain the ordering.  The top contains the current
+  /// node, the next child to visit, and the minimum uplink value of all child
+  std::vector<StackElement> VisitStack;
+
+  /// A single "visit" within the non-recursive DFS traversal.
+  void DFSVisitOne(NodeRef N);
+
+  /// The stack-based DFS traversal; defined below.
+  void DFSVisitChildren();
+
+  /// Compute the next SCC using the DFS traversal.
+  void GetNextSCC();
+
+  scc_iterator(NodeRef entryN) : visitNum(0) {
+    DFSVisitOne(entryN);
+    GetNextSCC();
+  }
+
+  /// End is when the DFS stack is empty.
+  scc_iterator() = default;
+
+public:
+  static scc_iterator begin(const GraphT &G) {
+    return scc_iterator(GT::getEntryNode(G));
+  }
+  static scc_iterator end(const GraphT &) { return scc_iterator(); }
+
+  /// \brief Direct loop termination test which is more efficient than
+  /// comparison with \c end().
+  bool isAtEnd() const {
+    assert(!CurrentSCC.empty() || VisitStack.empty());
+    return CurrentSCC.empty();
+  }
+
+  bool operator==(const scc_iterator &x) const {
+    return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
+  }
+
+  scc_iterator &operator++() {
+    GetNextSCC();
+    return *this;
+  }
+
+  reference operator*() const {
+    assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+    return CurrentSCC;
+  }
+
+  /// \brief Test if the current SCC has a loop.
+  ///
+  /// If the SCC has more than one node, this is trivially true.  If not, it may
+  /// still contain a loop if the node has an edge back to itself.
+  bool hasLoop() const;
+
+  /// This informs the \c scc_iterator that the specified \c Old node
+  /// has been deleted, and \c New is to be used in its place.
+  void ReplaceNode(NodeRef Old, NodeRef New) {
+    assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
+    nodeVisitNumbers[New] = nodeVisitNumbers[Old];
+    nodeVisitNumbers.erase(Old);
+  }
+};
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitOne(NodeRef N) {
+  ++visitNum;
+  nodeVisitNumbers[N] = visitNum;
+  SCCNodeStack.push_back(N);
+  VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
+#if 0 // Enable if needed when debugging.
+  dbgs() << "TarjanSCC: Node " << N <<
+        " : visitNum = " << visitNum << "\n";
+#endif
+}
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitChildren() {
+  assert(!VisitStack.empty());
+  while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
+    // TOS has at least one more child so continue DFS
+    NodeRef childN = *VisitStack.back().NextChild++;
+    typename DenseMap<NodeRef, unsigned>::iterator Visited =
+        nodeVisitNumbers.find(childN);
+    if (Visited == nodeVisitNumbers.end()) {
+      // this node has never been seen.
+      DFSVisitOne(childN);
+      continue;
+    }
+
+    unsigned childNum = Visited->second;
+    if (VisitStack.back().MinVisited > childNum)
+      VisitStack.back().MinVisited = childNum;
+  }
+}
+
+template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
+  CurrentSCC.clear(); // Prepare to compute the next SCC
+  while (!VisitStack.empty()) {
+    DFSVisitChildren();
+
+    // Pop the leaf on top of the VisitStack.
+    NodeRef visitingN = VisitStack.back().Node;
+    unsigned minVisitNum = VisitStack.back().MinVisited;
+    assert(VisitStack.back().NextChild == GT::child_end(visitingN));
+    VisitStack.pop_back();
+
+    // Propagate MinVisitNum to parent so we can detect the SCC starting node.
+    if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
+      VisitStack.back().MinVisited = minVisitNum;
+
+#if 0 // Enable if needed when debugging.
+    dbgs() << "TarjanSCC: Popped node " << visitingN <<
+          " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
+          nodeVisitNumbers[visitingN] << "\n";
+#endif
+
+    if (minVisitNum != nodeVisitNumbers[visitingN])
+      continue;
+
+    // A full SCC is on the SCCNodeStack!  It includes all nodes below
+    // visitingN on the stack.  Copy those nodes to CurrentSCC,
+    // reset their minVisit values, and return (this suspends
+    // the DFS traversal till the next ++).
+    do {
+      CurrentSCC.push_back(SCCNodeStack.back());
+      SCCNodeStack.pop_back();
+      nodeVisitNumbers[CurrentSCC.back()] = ~0U;
+    } while (CurrentSCC.back() != visitingN);
+    return;
+  }
+}
+
+template <class GraphT, class GT>
+bool scc_iterator<GraphT, GT>::hasLoop() const {
+    assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+    if (CurrentSCC.size() > 1)
+      return true;
+    NodeRef N = CurrentSCC.front();
+    for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE;
+         ++CI)
+      if (*CI == N)
+        return true;
+    return false;
+  }
+
+/// \brief Construct the begin iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_begin(const T &G) {
+  return scc_iterator<T>::begin(G);
+}
+
+/// \brief Construct the end iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_end(const T &G) {
+  return scc_iterator<T>::end(G);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SCCITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/STLExtras.h b/linux-x64/clang/include/llvm/ADT/STLExtras.h
new file mode 100644
index 0000000..051b900
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/STLExtras.h
@@ -0,0 +1,1181 @@
+//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some templates that are useful if you are working with the
+// STL at all.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLEXTRAS_H
+#define LLVM_ADT_STLEXTRAS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#ifdef EXPENSIVE_CHECKS
+#include <random> // for std::mt19937
+#endif
+
+namespace llvm {
+
+// Only used by compiler if both template types are the same.  Useful when
+// using SFINAE to test for the existence of member functions.
+template <typename T, T> struct SameType;
+
+namespace detail {
+
+template <typename RangeT>
+using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
+
+template <typename RangeT>
+using ValueOfRange = typename std::remove_reference<decltype(
+    *std::begin(std::declval<RangeT &>()))>::type;
+
+} // end namespace detail
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <functional>
+//===----------------------------------------------------------------------===//
+
+template <class Ty> struct identity {
+  using argument_type = Ty;
+
+  Ty &operator()(Ty &self) const {
+    return self;
+  }
+  const Ty &operator()(const Ty &self) const {
+    return self;
+  }
+};
+
+template <class Ty> struct less_ptr {
+  bool operator()(const Ty* left, const Ty* right) const {
+    return *left < *right;
+  }
+};
+
+template <class Ty> struct greater_ptr {
+  bool operator()(const Ty* left, const Ty* right) const {
+    return *right < *left;
+  }
+};
+
+/// An efficient, type-erasing, non-owning reference to a callable. This is
+/// intended for use as the type of a function parameter that is not used
+/// after the function in question returns.
+///
+/// This class does not own the callable, so it is not in general safe to store
+/// a function_ref.
+template<typename Fn> class function_ref;
+
+template<typename Ret, typename ...Params>
+class function_ref<Ret(Params...)> {
+  Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
+  intptr_t callable;
+
+  template<typename Callable>
+  static Ret callback_fn(intptr_t callable, Params ...params) {
+    return (*reinterpret_cast<Callable*>(callable))(
+        std::forward<Params>(params)...);
+  }
+
+public:
+  function_ref() = default;
+  function_ref(std::nullptr_t) {}
+
+  template <typename Callable>
+  function_ref(Callable &&callable,
+               typename std::enable_if<
+                   !std::is_same<typename std::remove_reference<Callable>::type,
+                                 function_ref>::value>::type * = nullptr)
+      : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+        callable(reinterpret_cast<intptr_t>(&callable)) {}
+
+  Ret operator()(Params ...params) const {
+    return callback(callable, std::forward<Params>(params)...);
+  }
+
+  operator bool() const { return callback; }
+};
+
+// deleter - Very very very simple method that is used to invoke operator
+// delete on something.  It is used like this:
+//
+//   for_each(V.begin(), B.end(), deleter<Interval>);
+template <class T>
+inline void deleter(T *Ptr) {
+  delete Ptr;
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <iterator>
+//===----------------------------------------------------------------------===//
+
+namespace adl_detail {
+
+using std::begin;
+
+template <typename ContainerTy>
+auto adl_begin(ContainerTy &&container)
+    -> decltype(begin(std::forward<ContainerTy>(container))) {
+  return begin(std::forward<ContainerTy>(container));
+}
+
+using std::end;
+
+template <typename ContainerTy>
+auto adl_end(ContainerTy &&container)
+    -> decltype(end(std::forward<ContainerTy>(container))) {
+  return end(std::forward<ContainerTy>(container));
+}
+
+using std::swap;
+
+template <typename T>
+void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
+                                                       std::declval<T>()))) {
+  swap(std::forward<T>(lhs), std::forward<T>(rhs));
+}
+
+} // end namespace adl_detail
+
+template <typename ContainerTy>
+auto adl_begin(ContainerTy &&container)
+    -> decltype(adl_detail::adl_begin(std::forward<ContainerTy>(container))) {
+  return adl_detail::adl_begin(std::forward<ContainerTy>(container));
+}
+
+template <typename ContainerTy>
+auto adl_end(ContainerTy &&container)
+    -> decltype(adl_detail::adl_end(std::forward<ContainerTy>(container))) {
+  return adl_detail::adl_end(std::forward<ContainerTy>(container));
+}
+
+template <typename T>
+void adl_swap(T &&lhs, T &&rhs) noexcept(
+    noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
+  adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
+}
+
+// mapped_iterator - This is a simple iterator adapter that causes a function to
+// be applied whenever operator* is invoked on the iterator.
+
+template <typename ItTy, typename FuncTy,
+          typename FuncReturnTy =
+            decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
+class mapped_iterator
+    : public iterator_adaptor_base<
+             mapped_iterator<ItTy, FuncTy>, ItTy,
+             typename std::iterator_traits<ItTy>::iterator_category,
+             typename std::remove_reference<FuncReturnTy>::type> {
+public:
+  mapped_iterator(ItTy U, FuncTy F)
+    : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
+
+  ItTy getCurrent() { return this->I; }
+
+  FuncReturnTy operator*() { return F(*this->I); }
+
+private:
+  FuncTy F;
+};
+
+// map_iterator - Provide a convenient way to create mapped_iterators, just like
+// make_pair is useful for creating pairs...
+template <class ItTy, class FuncTy>
+inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
+  return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
+}
+
+/// Helper to determine if type T has a member called rbegin().
+template <typename Ty> class has_rbegin_impl {
+  using yes = char[1];
+  using no = char[2];
+
+  template <typename Inner>
+  static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
+
+  template <typename>
+  static no& test(...);
+
+public:
+  static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
+};
+
+/// Metafunction to determine if T& or T has a member called rbegin().
+template <typename Ty>
+struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
+};
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have rbegin()/rend() methods for this to work.
+template <typename ContainerTy>
+auto reverse(ContainerTy &&C,
+             typename std::enable_if<has_rbegin<ContainerTy>::value>::type * =
+                 nullptr) -> decltype(make_range(C.rbegin(), C.rend())) {
+  return make_range(C.rbegin(), C.rend());
+}
+
+// Returns a std::reverse_iterator wrapped around the given iterator.
+template <typename IteratorTy>
+std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
+  return std::reverse_iterator<IteratorTy>(It);
+}
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have begin()/end() methods which return
+// bidirectional iterators for this to work.
+template <typename ContainerTy>
+auto reverse(
+    ContainerTy &&C,
+    typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr)
+    -> decltype(make_range(llvm::make_reverse_iterator(std::end(C)),
+                           llvm::make_reverse_iterator(std::begin(C)))) {
+  return make_range(llvm::make_reverse_iterator(std::end(C)),
+                    llvm::make_reverse_iterator(std::begin(C)));
+}
+
+/// An iterator adaptor that filters the elements of given inner iterators.
+///
+/// The predicate parameter should be a callable object that accepts the wrapped
+/// iterator's reference type and returns a bool. When incrementing or
+/// decrementing the iterator, it will call the predicate on each element and
+/// skip any where it returns false.
+///
+/// \code
+///   int A[] = { 1, 2, 3, 4 };
+///   auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
+///   // R contains { 1, 3 }.
+/// \endcode
+template <typename WrappedIteratorT, typename PredicateT>
+class filter_iterator
+    : public iterator_adaptor_base<
+          filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
+          typename std::common_type<
+              std::forward_iterator_tag,
+              typename std::iterator_traits<
+                  WrappedIteratorT>::iterator_category>::type> {
+  using BaseT = iterator_adaptor_base<
+      filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
+      typename std::common_type<
+          std::forward_iterator_tag,
+          typename std::iterator_traits<WrappedIteratorT>::iterator_category>::
+          type>;
+
+  struct PayloadType {
+    WrappedIteratorT End;
+    PredicateT Pred;
+  };
+
+  Optional<PayloadType> Payload;
+
+  void findNextValid() {
+    assert(Payload && "Payload should be engaged when findNextValid is called");
+    while (this->I != Payload->End && !Payload->Pred(*this->I))
+      BaseT::operator++();
+  }
+
+  // Construct the begin iterator. The begin iterator requires to know where end
+  // is, so that it can properly stop when it hits end.
+  filter_iterator(WrappedIteratorT Begin, WrappedIteratorT End, PredicateT Pred)
+      : BaseT(std::move(Begin)),
+        Payload(PayloadType{std::move(End), std::move(Pred)}) {
+    findNextValid();
+  }
+
+  // Construct the end iterator. It's not incrementable, so Payload doesn't
+  // have to be engaged.
+  filter_iterator(WrappedIteratorT End) : BaseT(End) {}
+
+public:
+  using BaseT::operator++;
+
+  filter_iterator &operator++() {
+    BaseT::operator++();
+    findNextValid();
+    return *this;
+  }
+
+  template <typename RT, typename PT>
+  friend iterator_range<filter_iterator<detail::IterOfRange<RT>, PT>>
+  make_filter_range(RT &&, PT);
+};
+
+/// Convenience function that takes a range of elements and a predicate,
+/// and return a new filter_iterator range.
+///
+/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
+/// lifetime of that temporary is not kept by the returned range object, and the
+/// temporary is going to be dropped on the floor after the make_iterator_range
+/// full expression that contains this function call.
+template <typename RangeT, typename PredicateT>
+iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
+make_filter_range(RangeT &&Range, PredicateT Pred) {
+  using FilterIteratorT =
+      filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
+  return make_range(FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
+                                    std::end(std::forward<RangeT>(Range)),
+                                    std::move(Pred)),
+                    FilterIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+// forward declarations required by zip_shortest/zip_first
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&range, UnaryPredicate P);
+
+template <size_t... I> struct index_sequence;
+
+template <class... Ts> struct index_sequence_for;
+
+namespace detail {
+
+using std::declval;
+
+// We have to alias this since inlining the actual type at the usage site
+// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
+template<typename... Iters> struct ZipTupleType {
+  using type = std::tuple<decltype(*declval<Iters>())...>;
+};
+
+template <typename ZipType, typename... Iters>
+using zip_traits = iterator_facade_base<
+    ZipType, typename std::common_type<std::bidirectional_iterator_tag,
+                                       typename std::iterator_traits<
+                                           Iters>::iterator_category...>::type,
+    // ^ TODO: Implement random access methods.
+    typename ZipTupleType<Iters...>::type,
+    typename std::iterator_traits<typename std::tuple_element<
+        0, std::tuple<Iters...>>::type>::difference_type,
+    // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
+    // inner iterators have the same difference_type. It would fail if, for
+    // instance, the second field's difference_type were non-numeric while the
+    // first is.
+    typename ZipTupleType<Iters...>::type *,
+    typename ZipTupleType<Iters...>::type>;
+
+template <typename ZipType, typename... Iters>
+struct zip_common : public zip_traits<ZipType, Iters...> {
+  using Base = zip_traits<ZipType, Iters...>;
+  using value_type = typename Base::value_type;
+
+  std::tuple<Iters...> iterators;
+
+protected:
+  template <size_t... Ns> value_type deref(index_sequence<Ns...>) const {
+    return value_type(*std::get<Ns>(iterators)...);
+  }
+
+  template <size_t... Ns>
+  decltype(iterators) tup_inc(index_sequence<Ns...>) const {
+    return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
+  }
+
+  template <size_t... Ns>
+  decltype(iterators) tup_dec(index_sequence<Ns...>) const {
+    return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
+  }
+
+public:
+  zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
+
+  value_type operator*() { return deref(index_sequence_for<Iters...>{}); }
+
+  const value_type operator*() const {
+    return deref(index_sequence_for<Iters...>{});
+  }
+
+  ZipType &operator++() {
+    iterators = tup_inc(index_sequence_for<Iters...>{});
+    return *reinterpret_cast<ZipType *>(this);
+  }
+
+  ZipType &operator--() {
+    static_assert(Base::IsBidirectional,
+                  "All inner iterators must be at least bidirectional.");
+    iterators = tup_dec(index_sequence_for<Iters...>{});
+    return *reinterpret_cast<ZipType *>(this);
+  }
+};
+
+template <typename... Iters>
+struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
+  using Base = zip_common<zip_first<Iters...>, Iters...>;
+
+  bool operator==(const zip_first<Iters...> &other) const {
+    return std::get<0>(this->iterators) == std::get<0>(other.iterators);
+  }
+
+  zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
+};
+
+template <typename... Iters>
+class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
+  template <size_t... Ns>
+  bool test(const zip_shortest<Iters...> &other, index_sequence<Ns...>) const {
+    return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
+                                              std::get<Ns>(other.iterators)...},
+                  identity<bool>{});
+  }
+
+public:
+  using Base = zip_common<zip_shortest<Iters...>, Iters...>;
+
+  zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
+
+  bool operator==(const zip_shortest<Iters...> &other) const {
+    return !test(other, index_sequence_for<Iters...>{});
+  }
+};
+
+template <template <typename...> class ItType, typename... Args> class zippy {
+public:
+  using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
+  using iterator_category = typename iterator::iterator_category;
+  using value_type = typename iterator::value_type;
+  using difference_type = typename iterator::difference_type;
+  using pointer = typename iterator::pointer;
+  using reference = typename iterator::reference;
+
+private:
+  std::tuple<Args...> ts;
+
+  template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) const {
+    return iterator(std::begin(std::get<Ns>(ts))...);
+  }
+  template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) const {
+    return iterator(std::end(std::get<Ns>(ts))...);
+  }
+
+public:
+  zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
+
+  iterator begin() const { return begin_impl(index_sequence_for<Args...>{}); }
+  iterator end() const { return end_impl(index_sequence_for<Args...>{}); }
+};
+
+} // end namespace detail
+
+/// zip iterator for two or more iteratable types.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
+                                                       Args &&... args) {
+  return detail::zippy<detail::zip_shortest, T, U, Args...>(
+      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
+/// be the shortest.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
+                                                          Args &&... args) {
+  return detail::zippy<detail::zip_first, T, U, Args...>(
+      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// Iterator wrapper that concatenates sequences together.
+///
+/// This can concatenate different iterators, even with different types, into
+/// a single iterator provided the value types of all the concatenated
+/// iterators expose `reference` and `pointer` types that can be converted to
+/// `ValueT &` and `ValueT *` respectively. It doesn't support more
+/// interesting/customized pointer or reference types.
+///
+/// Currently this only supports forward or higher iterator categories as
+/// inputs and always exposes a forward iterator interface.
+template <typename ValueT, typename... IterTs>
+class concat_iterator
+    : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
+                                  std::forward_iterator_tag, ValueT> {
+  using BaseT = typename concat_iterator::iterator_facade_base;
+
+  /// We store both the current and end iterators for each concatenated
+  /// sequence in a tuple of pairs.
+  ///
+  /// Note that something like iterator_range seems nice at first here, but the
+  /// range properties are of little benefit and end up getting in the way
+  /// because we need to do mutation on the current iterators.
+  std::tuple<std::pair<IterTs, IterTs>...> IterPairs;
+
+  /// Attempts to increment a specific iterator.
+  ///
+  /// Returns true if it was able to increment the iterator. Returns false if
+  /// the iterator is already at the end iterator.
+  template <size_t Index> bool incrementHelper() {
+    auto &IterPair = std::get<Index>(IterPairs);
+    if (IterPair.first == IterPair.second)
+      return false;
+
+    ++IterPair.first;
+    return true;
+  }
+
+  /// Increments the first non-end iterator.
+  ///
+  /// It is an error to call this with all iterators at the end.
+  template <size_t... Ns> void increment(index_sequence<Ns...>) {
+    // Build a sequence of functions to increment each iterator if possible.
+    bool (concat_iterator::*IncrementHelperFns[])() = {
+        &concat_iterator::incrementHelper<Ns>...};
+
+    // Loop over them, and stop as soon as we succeed at incrementing one.
+    for (auto &IncrementHelperFn : IncrementHelperFns)
+      if ((this->*IncrementHelperFn)())
+        return;
+
+    llvm_unreachable("Attempted to increment an end concat iterator!");
+  }
+
+  /// Returns null if the specified iterator is at the end. Otherwise,
+  /// dereferences the iterator and returns the address of the resulting
+  /// reference.
+  template <size_t Index> ValueT *getHelper() const {
+    auto &IterPair = std::get<Index>(IterPairs);
+    if (IterPair.first == IterPair.second)
+      return nullptr;
+
+    return &*IterPair.first;
+  }
+
+  /// Finds the first non-end iterator, dereferences, and returns the resulting
+  /// reference.
+  ///
+  /// It is an error to call this with all iterators at the end.
+  template <size_t... Ns> ValueT &get(index_sequence<Ns...>) const {
+    // Build a sequence of functions to get from iterator if possible.
+    ValueT *(concat_iterator::*GetHelperFns[])() const = {
+        &concat_iterator::getHelper<Ns>...};
+
+    // Loop over them, and return the first result we find.
+    for (auto &GetHelperFn : GetHelperFns)
+      if (ValueT *P = (this->*GetHelperFn)())
+        return *P;
+
+    llvm_unreachable("Attempted to get a pointer from an end concat iterator!");
+  }
+
+public:
+  /// Constructs an iterator from a squence of ranges.
+  ///
+  /// We need the full range to know how to switch between each of the
+  /// iterators.
+  template <typename... RangeTs>
+  explicit concat_iterator(RangeTs &&... Ranges)
+      : IterPairs({std::begin(Ranges), std::end(Ranges)}...) {}
+
+  using BaseT::operator++;
+
+  concat_iterator &operator++() {
+    increment(index_sequence_for<IterTs...>());
+    return *this;
+  }
+
+  ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); }
+
+  bool operator==(const concat_iterator &RHS) const {
+    return IterPairs == RHS.IterPairs;
+  }
+};
+
+namespace detail {
+
+/// Helper to store a sequence of ranges being concatenated and access them.
+///
+/// This is designed to facilitate providing actual storage when temporaries
+/// are passed into the constructor such that we can use it as part of range
+/// based for loops.
+template <typename ValueT, typename... RangeTs> class concat_range {
+public:
+  using iterator =
+      concat_iterator<ValueT,
+                      decltype(std::begin(std::declval<RangeTs &>()))...>;
+
+private:
+  std::tuple<RangeTs...> Ranges;
+
+  template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) {
+    return iterator(std::get<Ns>(Ranges)...);
+  }
+  template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) {
+    return iterator(make_range(std::end(std::get<Ns>(Ranges)),
+                               std::end(std::get<Ns>(Ranges)))...);
+  }
+
+public:
+  concat_range(RangeTs &&... Ranges)
+      : Ranges(std::forward<RangeTs>(Ranges)...) {}
+
+  iterator begin() { return begin_impl(index_sequence_for<RangeTs...>{}); }
+  iterator end() { return end_impl(index_sequence_for<RangeTs...>{}); }
+};
+
+} // end namespace detail
+
+/// Concatenated range across two or more ranges.
+///
+/// The desired value type must be explicitly specified.
+template <typename ValueT, typename... RangeTs>
+detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
+  static_assert(sizeof...(RangeTs) > 1,
+                "Need more than one range to concatenate!");
+  return detail::concat_range<ValueT, RangeTs...>(
+      std::forward<RangeTs>(Ranges)...);
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <utility>
+//===----------------------------------------------------------------------===//
+
+/// \brief Function object to check whether the first component of a std::pair
+/// compares less than the first component of another std::pair.
+struct less_first {
+  template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+    return lhs.first < rhs.first;
+  }
+};
+
+/// \brief Function object to check whether the second component of a std::pair
+/// compares less than the second component of another std::pair.
+struct less_second {
+  template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+    return lhs.second < rhs.second;
+  }
+};
+
+// A subset of N3658. More stuff can be added as-needed.
+
+/// \brief Represents a compile-time sequence of integers.
+template <class T, T... I> struct integer_sequence {
+  using value_type = T;
+
+  static constexpr size_t size() { return sizeof...(I); }
+};
+
+/// \brief Alias for the common case of a sequence of size_ts.
+template <size_t... I>
+struct index_sequence : integer_sequence<std::size_t, I...> {};
+
+template <std::size_t N, std::size_t... I>
+struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
+template <std::size_t... I>
+struct build_index_impl<0, I...> : index_sequence<I...> {};
+
+/// \brief Creates a compile-time integer sequence for a parameter pack.
+template <class... Ts>
+struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
+
+/// Utility type to build an inheritance chain that makes it easy to rank
+/// overload candidates.
+template <int N> struct rank : rank<N - 1> {};
+template <> struct rank<0> {};
+
+/// \brief traits class for checking whether type T is one of any of the given
+/// types in the variadic list.
+template <typename T, typename... Ts> struct is_one_of {
+  static const bool value = false;
+};
+
+template <typename T, typename U, typename... Ts>
+struct is_one_of<T, U, Ts...> {
+  static const bool value =
+      std::is_same<T, U>::value || is_one_of<T, Ts...>::value;
+};
+
+/// \brief traits class for checking whether type T is a base class for all
+///  the given types in the variadic list.
+template <typename T, typename... Ts> struct are_base_of {
+  static const bool value = true;
+};
+
+template <typename T, typename U, typename... Ts>
+struct are_base_of<T, U, Ts...> {
+  static const bool value =
+      std::is_base_of<T, U>::value && are_base_of<T, Ts...>::value;
+};
+
+//===----------------------------------------------------------------------===//
+//     Extra additions for arrays
+//===----------------------------------------------------------------------===//
+
+/// Find the length of an array.
+template <class T, std::size_t N>
+constexpr inline size_t array_lengthof(T (&)[N]) {
+  return N;
+}
+
+/// Adapt std::less<T> for array_pod_sort.
+template<typename T>
+inline int array_pod_sort_comparator(const void *P1, const void *P2) {
+  if (std::less<T>()(*reinterpret_cast<const T*>(P1),
+                     *reinterpret_cast<const T*>(P2)))
+    return -1;
+  if (std::less<T>()(*reinterpret_cast<const T*>(P2),
+                     *reinterpret_cast<const T*>(P1)))
+    return 1;
+  return 0;
+}
+
+/// get_array_pod_sort_comparator - This is an internal helper function used to
+/// get type deduction of T right.
+template<typename T>
+inline int (*get_array_pod_sort_comparator(const T &))
+             (const void*, const void*) {
+  return array_pod_sort_comparator<T>;
+}
+
+/// array_pod_sort - This sorts an array with the specified start and end
+/// extent.  This is just like std::sort, except that it calls qsort instead of
+/// using an inlined template.  qsort is slightly slower than std::sort, but
+/// most sorts are not performance critical in LLVM and std::sort has to be
+/// template instantiated for each type, leading to significant measured code
+/// bloat.  This function should generally be used instead of std::sort where
+/// possible.
+///
+/// This function assumes that you have simple POD-like types that can be
+/// compared with std::less and can be moved with memcpy.  If this isn't true,
+/// you should use std::sort.
+///
+/// NOTE: If qsort_r were portable, we could allow a custom comparator and
+/// default to std::less.
+template<class IteratorTy>
+inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
+  // Don't inefficiently call qsort with one element or trigger undefined
+  // behavior with an empty sequence.
+  auto NElts = End - Start;
+  if (NElts <= 1) return;
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
+}
+
+template <class IteratorTy>
+inline void array_pod_sort(
+    IteratorTy Start, IteratorTy End,
+    int (*Compare)(
+        const typename std::iterator_traits<IteratorTy>::value_type *,
+        const typename std::iterator_traits<IteratorTy>::value_type *)) {
+  // Don't inefficiently call qsort with one element or trigger undefined
+  // behavior with an empty sequence.
+  auto NElts = End - Start;
+  if (NElts <= 1) return;
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  qsort(&*Start, NElts, sizeof(*Start),
+        reinterpret_cast<int (*)(const void *, const void *)>(Compare));
+}
+
+// Provide wrappers to std::sort which shuffle the elements before sorting
+// to help uncover non-deterministic behavior (PR35135).
+template <typename IteratorTy>
+inline void sort(IteratorTy Start, IteratorTy End) {
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  std::sort(Start, End);
+}
+
+template <typename IteratorTy, typename Compare>
+inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  std::sort(Start, End, Comp);
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <algorithm>
+//===----------------------------------------------------------------------===//
+
+/// For a container of pointers, deletes the pointers and then clears the
+/// container.
+template<typename Container>
+void DeleteContainerPointers(Container &C) {
+  for (auto V : C)
+    delete V;
+  C.clear();
+}
+
+/// In a container of pairs (usually a map) whose second element is a pointer,
+/// deletes the second elements and then clears the container.
+template<typename Container>
+void DeleteContainerSeconds(Container &C) {
+  for (auto &V : C)
+    delete V.second;
+  C.clear();
+}
+
+/// Provide wrappers to std::for_each which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+UnaryPredicate for_each(R &&Range, UnaryPredicate P) {
+  return std::for_each(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::all_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&Range, UnaryPredicate P) {
+  return std::all_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::any_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool any_of(R &&Range, UnaryPredicate P) {
+  return std::any_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::none_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool none_of(R &&Range, UnaryPredicate P) {
+  return std::none_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::find which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename T>
+auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range)) {
+  return std::find(adl_begin(Range), adl_end(Range), Val);
+}
+
+/// Provide wrappers to std::find_if which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::find_if(adl_begin(Range), adl_end(Range), P);
+}
+
+template <typename R, typename UnaryPredicate>
+auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::find_if_not(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::remove_if which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::remove_if(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::copy_if which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename OutputIt, typename UnaryPredicate>
+OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
+  return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
+}
+
+template <typename R, typename OutputIt>
+OutputIt copy(R &&Range, OutputIt Out) {
+  return std::copy(adl_begin(Range), adl_end(Range), Out);
+}
+
+/// Wrapper function around std::find to detect if an element exists
+/// in a container.
+template <typename R, typename E>
+bool is_contained(R &&Range, const E &Element) {
+  return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
+}
+
+/// Wrapper function around std::count to count the number of times an element
+/// \p Element occurs in the given range \p Range.
+template <typename R, typename E>
+auto count(R &&Range, const E &Element) ->
+    typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type {
+  return std::count(adl_begin(Range), adl_end(Range), Element);
+}
+
+/// Wrapper function around std::count_if to count the number of times an
+/// element satisfying a given predicate occurs in a range.
+template <typename R, typename UnaryPredicate>
+auto count_if(R &&Range, UnaryPredicate P) ->
+    typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type {
+  return std::count_if(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Wrapper function around std::transform to apply a function to a range and
+/// store the result elsewhere.
+template <typename R, typename OutputIt, typename UnaryPredicate>
+OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P) {
+  return std::transform(adl_begin(Range), adl_end(Range), d_first, P);
+}
+
+/// Provide wrappers to std::partition which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto partition(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::partition(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::lower_bound which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename ForwardIt>
+auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
+  return std::lower_bound(adl_begin(Range), adl_end(Range), I);
+}
+
+/// \brief Given a range of type R, iterate the entire range and return a
+/// SmallVector with elements of the vector.  This is useful, for example,
+/// when you want to iterate a range and then sort the results.
+template <unsigned Size, typename R>
+SmallVector<typename std::remove_const<detail::ValueOfRange<R>>::type, Size>
+to_vector(R &&Range) {
+  return {adl_begin(Range), adl_end(Range)};
+}
+
+/// Provide a container algorithm similar to C++ Library Fundamentals v2's
+/// `erase_if` which is equivalent to:
+///
+///   C.erase(remove_if(C, pred), C.end());
+///
+/// This version works for any container with an erase method call accepting
+/// two iterators.
+template <typename Container, typename UnaryPredicate>
+void erase_if(Container &C, UnaryPredicate P) {
+  C.erase(remove_if(C, P), C.end());
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <memory>
+//===----------------------------------------------------------------------===//
+
+// Implement make_unique according to N3656.
+
+/// \brief Constructs a `new T()` with the given args and returns a
+///        `unique_ptr<T>` which owns the object.
+///
+/// Example:
+///
+///     auto p = make_unique<int>();
+///     auto p = make_unique<std::tuple<int, int>>(0, 1);
+template <class T, class... Args>
+typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
+make_unique(Args &&... args) {
+  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+/// \brief Constructs a `new T[n]` with the given args and returns a
+///        `unique_ptr<T[]>` which owns the object.
+///
+/// \param n size of the new array.
+///
+/// Example:
+///
+///     auto p = make_unique<int[]>(2); // value-initializes the array with 0's.
+template <class T>
+typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0,
+                        std::unique_ptr<T>>::type
+make_unique(size_t n) {
+  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
+}
+
+/// This function isn't used and is only here to provide better compile errors.
+template <class T, class... Args>
+typename std::enable_if<std::extent<T>::value != 0>::type
+make_unique(Args &&...) = delete;
+
+struct FreeDeleter {
+  void operator()(void* v) {
+    ::free(v);
+  }
+};
+
+template<typename First, typename Second>
+struct pair_hash {
+  size_t operator()(const std::pair<First, Second> &P) const {
+    return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
+  }
+};
+
+/// A functor like C++14's std::less<void> in its absence.
+struct less {
+  template <typename A, typename B> bool operator()(A &&a, B &&b) const {
+    return std::forward<A>(a) < std::forward<B>(b);
+  }
+};
+
+/// A functor like C++14's std::equal<void> in its absence.
+struct equal {
+  template <typename A, typename B> bool operator()(A &&a, B &&b) const {
+    return std::forward<A>(a) == std::forward<B>(b);
+  }
+};
+
+/// Binary functor that adapts to any other binary functor after dereferencing
+/// operands.
+template <typename T> struct deref {
+  T func;
+
+  // Could be further improved to cope with non-derivable functors and
+  // non-binary functors (should be a variadic template member function
+  // operator()).
+  template <typename A, typename B>
+  auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) {
+    assert(lhs);
+    assert(rhs);
+    return func(*lhs, *rhs);
+  }
+};
+
+namespace detail {
+
+template <typename R> class enumerator_iter;
+
+template <typename R> struct result_pair {
+  friend class enumerator_iter<R>;
+
+  result_pair() = default;
+  result_pair(std::size_t Index, IterOfRange<R> Iter)
+      : Index(Index), Iter(Iter) {}
+
+  result_pair<R> &operator=(const result_pair<R> &Other) {
+    Index = Other.Index;
+    Iter = Other.Iter;
+    return *this;
+  }
+
+  std::size_t index() const { return Index; }
+  const ValueOfRange<R> &value() const { return *Iter; }
+  ValueOfRange<R> &value() { return *Iter; }
+
+private:
+  std::size_t Index = std::numeric_limits<std::size_t>::max();
+  IterOfRange<R> Iter;
+};
+
+template <typename R>
+class enumerator_iter
+    : public iterator_facade_base<
+          enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>,
+          typename std::iterator_traits<IterOfRange<R>>::difference_type,
+          typename std::iterator_traits<IterOfRange<R>>::pointer,
+          typename std::iterator_traits<IterOfRange<R>>::reference> {
+  using result_type = result_pair<R>;
+
+public:
+  explicit enumerator_iter(IterOfRange<R> EndIter)
+      : Result(std::numeric_limits<size_t>::max(), EndIter) {}
+
+  enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
+      : Result(Index, Iter) {}
+
+  result_type &operator*() { return Result; }
+  const result_type &operator*() const { return Result; }
+
+  enumerator_iter<R> &operator++() {
+    assert(Result.Index != std::numeric_limits<size_t>::max());
+    ++Result.Iter;
+    ++Result.Index;
+    return *this;
+  }
+
+  bool operator==(const enumerator_iter<R> &RHS) const {
+    // Don't compare indices here, only iterators.  It's possible for an end
+    // iterator to have different indices depending on whether it was created
+    // by calling std::end() versus incrementing a valid iterator.
+    return Result.Iter == RHS.Result.Iter;
+  }
+
+  enumerator_iter<R> &operator=(const enumerator_iter<R> &Other) {
+    Result = Other.Result;
+    return *this;
+  }
+
+private:
+  result_type Result;
+};
+
+template <typename R> class enumerator {
+public:
+  explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
+
+  enumerator_iter<R> begin() {
+    return enumerator_iter<R>(0, std::begin(TheRange));
+  }
+
+  enumerator_iter<R> end() {
+    return enumerator_iter<R>(std::end(TheRange));
+  }
+
+private:
+  R TheRange;
+};
+
+} // end namespace detail
+
+/// Given an input range, returns a new range whose values are are pair (A,B)
+/// such that A is the 0-based index of the item in the sequence, and B is
+/// the value from the original sequence.  Example:
+///
+/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
+/// for (auto X : enumerate(Items)) {
+///   printf("Item %d - %c\n", X.index(), X.value());
+/// }
+///
+/// Output:
+///   Item 0 - A
+///   Item 1 - B
+///   Item 2 - C
+///   Item 3 - D
+///
+template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
+  return detail::enumerator<R>(std::forward<R>(TheRange));
+}
+
+namespace detail {
+
+template <typename F, typename Tuple, std::size_t... I>
+auto apply_tuple_impl(F &&f, Tuple &&t, index_sequence<I...>)
+    -> decltype(std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...)) {
+  return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
+}
+
+} // end namespace detail
+
+/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
+/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
+/// return the result.
+template <typename F, typename Tuple>
+auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl(
+    std::forward<F>(f), std::forward<Tuple>(t),
+    build_index_impl<
+        std::tuple_size<typename std::decay<Tuple>::type>::value>{})) {
+  using Indices = build_index_impl<
+      std::tuple_size<typename std::decay<Tuple>::type>::value>;
+
+  return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
+                                  Indices{});
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STLEXTRAS_H
diff --git a/linux-x64/clang/include/llvm/ADT/ScopeExit.h b/linux-x64/clang/include/llvm/ADT/ScopeExit.h
new file mode 100644
index 0000000..bd13755
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ScopeExit.h
@@ -0,0 +1,66 @@
+//===- llvm/ADT/ScopeExit.h - Execute code at scope exit --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the make_scope_exit function, which executes user-defined
+// cleanup logic at scope exit.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPE_EXIT_H
+#define LLVM_ADT_SCOPE_EXIT_H
+
+#include "llvm/Support/Compiler.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+namespace detail {
+
+template <typename Callable> class scope_exit {
+  Callable ExitFunction;
+  bool Engaged = true; // False once moved-from or release()d.
+
+public:
+  template <typename Fp>
+  explicit scope_exit(Fp &&F) : ExitFunction(std::forward<Fp>(F)) {}
+
+  scope_exit(scope_exit &&Rhs)
+      : ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
+    Rhs.release();
+  }
+  scope_exit(const scope_exit &) = delete;
+  scope_exit &operator=(scope_exit &&) = delete;
+  scope_exit &operator=(const scope_exit &) = delete;
+
+  void release() { Engaged = false; }
+
+  ~scope_exit() {
+    if (Engaged)
+      ExitFunction();
+  }
+};
+
+} // end namespace detail
+
+// Keeps the callable object that is passed in, and execute it at the
+// destruction of the returned object (usually at the scope exit where the
+// returned object is kept).
+//
+// Interface is specified by p0052r2.
+template <typename Callable>
+LLVM_NODISCARD detail::scope_exit<typename std::decay<Callable>::type>
+make_scope_exit(Callable &&F) {
+  return detail::scope_exit<typename std::decay<Callable>::type>(
+      std::forward<Callable>(F));
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h b/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
new file mode 100644
index 0000000..22b0c1b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
@@ -0,0 +1,264 @@
+//===- ScopedHashTable.h - A simple scoped hash table -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an efficient scoped hash table, which is useful for
+// things like dominator-based optimizations.  This allows clients to do things
+// like this:
+//
+//  ScopedHashTable<int, int> HT;
+//  {
+//    ScopedHashTableScope<int, int> Scope1(HT);
+//    HT.insert(0, 0);
+//    HT.insert(1, 1);
+//    {
+//      ScopedHashTableScope<int, int> Scope2(HT);
+//      HT.insert(0, 42);
+//    }
+//  }
+//
+// Looking up the value for "0" in the Scope2 block will return 42.  Looking
+// up the value for 0 before 42 is inserted or after Scope2 is popped will
+// return 0.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
+#define LLVM_ADT_SCOPEDHASHTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <new>
+
+namespace llvm {
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+          typename AllocatorTy = MallocAllocator>
+class ScopedHashTable;
+
+template <typename K, typename V>
+class ScopedHashTableVal {
+  ScopedHashTableVal *NextInScope;
+  ScopedHashTableVal *NextForKey;
+  K Key;
+  V Val;
+
+  ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
+
+public:
+  const K &getKey() const { return Key; }
+  const V &getValue() const { return Val; }
+  V &getValue() { return Val; }
+
+  ScopedHashTableVal *getNextForKey() { return NextForKey; }
+  const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
+  ScopedHashTableVal *getNextInScope() { return NextInScope; }
+
+  template <typename AllocatorTy>
+  static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
+                                    ScopedHashTableVal *nextForKey,
+                                    const K &key, const V &val,
+                                    AllocatorTy &Allocator) {
+    ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
+    // Set up the value.
+    new (New) ScopedHashTableVal(key, val);
+    New->NextInScope = nextInScope;
+    New->NextForKey = nextForKey;
+    return New;
+  }
+
+  template <typename AllocatorTy> void Destroy(AllocatorTy &Allocator) {
+    // Free memory referenced by the item.
+    this->~ScopedHashTableVal();
+    Allocator.Deallocate(this);
+  }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+          typename AllocatorTy = MallocAllocator>
+class ScopedHashTableScope {
+  /// HT - The hashtable that we are active for.
+  ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
+
+  /// PrevScope - This is the scope that we are shadowing in HT.
+  ScopedHashTableScope *PrevScope;
+
+  /// LastValInScope - This is the last value that was inserted for this scope
+  /// or null if none have been inserted yet.
+  ScopedHashTableVal<K, V> *LastValInScope;
+
+public:
+  ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
+  ScopedHashTableScope(ScopedHashTableScope &) = delete;
+  ScopedHashTableScope &operator=(ScopedHashTableScope &) = delete;
+  ~ScopedHashTableScope();
+
+  ScopedHashTableScope *getParentScope() { return PrevScope; }
+  const ScopedHashTableScope *getParentScope() const { return PrevScope; }
+
+private:
+  friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+
+  ScopedHashTableVal<K, V> *getLastValInScope() {
+    return LastValInScope;
+  }
+
+  void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
+    LastValInScope = Val;
+  }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>>
+class ScopedHashTableIterator {
+  ScopedHashTableVal<K, V> *Node;
+
+public:
+  ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
+
+  V &operator*() const {
+    assert(Node && "Dereference end()");
+    return Node->getValue();
+  }
+  V *operator->() const {
+    return &Node->getValue();
+  }
+
+  bool operator==(const ScopedHashTableIterator &RHS) const {
+    return Node == RHS.Node;
+  }
+  bool operator!=(const ScopedHashTableIterator &RHS) const {
+    return Node != RHS.Node;
+  }
+
+  inline ScopedHashTableIterator& operator++() {          // Preincrement
+    assert(Node && "incrementing past end()");
+    Node = Node->getNextForKey();
+    return *this;
+  }
+  ScopedHashTableIterator operator++(int) {        // Postincrement
+    ScopedHashTableIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+template <typename K, typename V, typename KInfo, typename AllocatorTy>
+class ScopedHashTable {
+public:
+  /// ScopeTy - This is a helpful typedef that allows clients to get easy access
+  /// to the name of the scope for this hash table.
+  using ScopeTy = ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+  using size_type = unsigned;
+
+private:
+  friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+
+  using ValTy = ScopedHashTableVal<K, V>;
+
+  DenseMap<K, ValTy*, KInfo> TopLevelMap;
+  ScopeTy *CurScope = nullptr;
+
+  AllocatorTy Allocator;
+
+public:
+  ScopedHashTable() = default;
+  ScopedHashTable(AllocatorTy A) : Allocator(A) {}
+  ScopedHashTable(const ScopedHashTable &) = delete;
+  ScopedHashTable &operator=(const ScopedHashTable &) = delete;
+
+  ~ScopedHashTable() {
+    assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
+  }
+
+  /// Access to the allocator.
+  AllocatorTy &getAllocator() { return Allocator; }
+  const AllocatorTy &getAllocator() const { return Allocator; }
+
+  /// Return 1 if the specified key is in the table, 0 otherwise.
+  size_type count(const K &Key) const {
+    return TopLevelMap.count(Key);
+  }
+
+  V lookup(const K &Key) const {
+    auto I = TopLevelMap.find(Key);
+    if (I != TopLevelMap.end())
+      return I->second->getValue();
+
+    return V();
+  }
+
+  void insert(const K &Key, const V &Val) {
+    insertIntoScope(CurScope, Key, Val);
+  }
+
+  using iterator = ScopedHashTableIterator<K, V, KInfo>;
+
+  iterator end() { return iterator(0); }
+
+  iterator begin(const K &Key) {
+    typename DenseMap<K, ValTy*, KInfo>::iterator I =
+      TopLevelMap.find(Key);
+    if (I == TopLevelMap.end()) return end();
+    return iterator(I->second);
+  }
+
+  ScopeTy *getCurScope() { return CurScope; }
+  const ScopeTy *getCurScope() const { return CurScope; }
+
+  /// insertIntoScope - This inserts the specified key/value at the specified
+  /// (possibly not the current) scope.  While it is ok to insert into a scope
+  /// that isn't the current one, it isn't ok to insert *underneath* an existing
+  /// value of the specified key.
+  void insertIntoScope(ScopeTy *S, const K &Key, const V &Val) {
+    assert(S && "No scope active!");
+    ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
+    KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
+                             Allocator);
+    S->setLastValInScope(KeyEntry);
+  }
+};
+
+/// ScopedHashTableScope ctor - Install this as the current scope for the hash
+/// table.
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::
+  ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
+  PrevScope = HT.CurScope;
+  HT.CurScope = this;
+  LastValInScope = nullptr;
+}
+
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
+  assert(HT.CurScope == this && "Scope imbalance!");
+  HT.CurScope = PrevScope;
+
+  // Pop and delete all values corresponding to this scope.
+  while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
+    // Pop this value out of the TopLevelMap.
+    if (!ThisEntry->getNextForKey()) {
+      assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
+             "Scope imbalance!");
+      HT.TopLevelMap.erase(ThisEntry->getKey());
+    } else {
+      ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
+      assert(KeyEntry == ThisEntry && "Scope imbalance!");
+      KeyEntry = ThisEntry->getNextForKey();
+    }
+
+    // Pop this value out of the scope.
+    LastValInScope = ThisEntry->getNextInScope();
+
+    // Delete this entry.
+    ThisEntry->Destroy(HT.getAllocator());
+  }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SCOPEDHASHTABLE_H
diff --git a/linux-x64/clang/include/llvm/ADT/Sequence.h b/linux-x64/clang/include/llvm/ADT/Sequence.h
new file mode 100644
index 0000000..3d4a897
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Sequence.h
@@ -0,0 +1,84 @@
+//===- Sequence.h - Utility for producing sequences of values ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This routine provides some synthesis utilities to produce sequences of
+/// values. The names are intentionally kept very short as they tend to occur
+/// in common and widely used contexts.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SEQUENCE_H
+#define LLVM_ADT_SEQUENCE_H
+
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+template <typename ValueT>
+class value_sequence_iterator
+    : public iterator_facade_base<value_sequence_iterator<ValueT>,
+                                  std::random_access_iterator_tag,
+                                  const ValueT> {
+  using BaseT = typename value_sequence_iterator::iterator_facade_base;
+
+  ValueT Value;
+
+public:
+  using difference_type = typename BaseT::difference_type;
+  using reference = typename BaseT::reference;
+
+  value_sequence_iterator() = default;
+  value_sequence_iterator(const value_sequence_iterator &) = default;
+  value_sequence_iterator(value_sequence_iterator &&Arg)
+      : Value(std::move(Arg.Value)) {}
+
+  template <typename U, typename Enabler = decltype(ValueT(std::declval<U>()))>
+  value_sequence_iterator(U &&Value) : Value(std::forward<U>(Value)) {}
+
+  value_sequence_iterator &operator+=(difference_type N) {
+    Value += N;
+    return *this;
+  }
+  value_sequence_iterator &operator-=(difference_type N) {
+    Value -= N;
+    return *this;
+  }
+  using BaseT::operator-;
+  difference_type operator-(const value_sequence_iterator &RHS) const {
+    return Value - RHS.Value;
+  }
+
+  bool operator==(const value_sequence_iterator &RHS) const {
+    return Value == RHS.Value;
+  }
+  bool operator<(const value_sequence_iterator &RHS) const {
+    return Value < RHS.Value;
+  }
+
+  reference operator*() const { return Value; }
+};
+
+} // end namespace detail
+
+template <typename ValueT>
+iterator_range<detail::value_sequence_iterator<ValueT>> seq(ValueT Begin,
+                                                            ValueT End) {
+  return make_range(detail::value_sequence_iterator<ValueT>(Begin),
+                    detail::value_sequence_iterator<ValueT>(End));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SEQUENCE_H
diff --git a/linux-x64/clang/include/llvm/ADT/SetOperations.h b/linux-x64/clang/include/llvm/ADT/SetOperations.h
new file mode 100644
index 0000000..7c9f2fb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SetOperations.h
@@ -0,0 +1,71 @@
+//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines generic set operations that may be used on set's of
+// different types, and different element types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETOPERATIONS_H
+#define LLVM_ADT_SETOPERATIONS_H
+
+namespace llvm {
+
+/// set_union(A, B) - Compute A := A u B, return whether A changed.
+///
+template <class S1Ty, class S2Ty>
+bool set_union(S1Ty &S1, const S2Ty &S2) {
+  bool Changed = false;
+
+  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+       SI != SE; ++SI)
+    if (S1.insert(*SI).second)
+      Changed = true;
+
+  return Changed;
+}
+
+/// set_intersect(A, B) - Compute A := A ^ B
+/// Identical to set_intersection, except that it works on set<>'s and
+/// is nicer to use.  Functionally, this iterates through S1, removing
+/// elements that are not contained in S2.
+///
+template <class S1Ty, class S2Ty>
+void set_intersect(S1Ty &S1, const S2Ty &S2) {
+   for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) {
+     const auto &E = *I;
+     ++I;
+     if (!S2.count(E)) S1.erase(E);   // Erase element if not in S2
+   }
+}
+
+/// set_difference(A, B) - Return A - B
+///
+template <class S1Ty, class S2Ty>
+S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) {
+  S1Ty Result;
+  for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end();
+       SI != SE; ++SI)
+    if (!S2.count(*SI))       // if the element is not in set2
+      Result.insert(*SI);
+  return Result;
+}
+
+/// set_subtract(A, B) - Compute A := A - B
+///
+template <class S1Ty, class S2Ty>
+void set_subtract(S1Ty &S1, const S2Ty &S2) {
+  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+       SI != SE; ++SI)
+    S1.erase(*SI);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/SetVector.h b/linux-x64/clang/include/llvm/ADT/SetVector.h
new file mode 100644
index 0000000..04ed52f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SetVector.h
@@ -0,0 +1,312 @@
+//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a set that has insertion order iteration
+// characteristics. This is useful for keeping a set of things that need to be
+// visited later but in a deterministic order (insertion order). The interface
+// is purposefully minimal.
+//
+// This file defines SetVector and SmallSetVector, which performs no allocations
+// if the SetVector has less than a certain number of elements.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETVECTOR_H
+#define LLVM_ADT_SETVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+
+/// \brief A vector that has set insertion semantics.
+///
+/// This adapter class provides a way to keep a set of things that also has the
+/// property of a deterministic iteration order. The order of iteration is the
+/// order of insertion.
+template <typename T, typename Vector = std::vector<T>,
+          typename Set = DenseSet<T>>
+class SetVector {
+public:
+  using value_type = T;
+  using key_type = T;
+  using reference = T&;
+  using const_reference = const T&;
+  using set_type = Set;
+  using vector_type = Vector;
+  using iterator = typename vector_type::const_iterator;
+  using const_iterator = typename vector_type::const_iterator;
+  using reverse_iterator = typename vector_type::const_reverse_iterator;
+  using const_reverse_iterator = typename vector_type::const_reverse_iterator;
+  using size_type = typename vector_type::size_type;
+
+  /// \brief Construct an empty SetVector
+  SetVector() = default;
+
+  /// \brief Initialize a SetVector with a range of elements
+  template<typename It>
+  SetVector(It Start, It End) {
+    insert(Start, End);
+  }
+
+  ArrayRef<T> getArrayRef() const { return vector_; }
+
+  /// Clear the SetVector and return the underlying vector.
+  Vector takeVector() {
+    set_.clear();
+    return std::move(vector_);
+  }
+
+  /// \brief Determine if the SetVector is empty or not.
+  bool empty() const {
+    return vector_.empty();
+  }
+
+  /// \brief Determine the number of elements in the SetVector.
+  size_type size() const {
+    return vector_.size();
+  }
+
+  /// \brief Get an iterator to the beginning of the SetVector.
+  iterator begin() {
+    return vector_.begin();
+  }
+
+  /// \brief Get a const_iterator to the beginning of the SetVector.
+  const_iterator begin() const {
+    return vector_.begin();
+  }
+
+  /// \brief Get an iterator to the end of the SetVector.
+  iterator end() {
+    return vector_.end();
+  }
+
+  /// \brief Get a const_iterator to the end of the SetVector.
+  const_iterator end() const {
+    return vector_.end();
+  }
+
+  /// \brief Get an reverse_iterator to the end of the SetVector.
+  reverse_iterator rbegin() {
+    return vector_.rbegin();
+  }
+
+  /// \brief Get a const_reverse_iterator to the end of the SetVector.
+  const_reverse_iterator rbegin() const {
+    return vector_.rbegin();
+  }
+
+  /// \brief Get a reverse_iterator to the beginning of the SetVector.
+  reverse_iterator rend() {
+    return vector_.rend();
+  }
+
+  /// \brief Get a const_reverse_iterator to the beginning of the SetVector.
+  const_reverse_iterator rend() const {
+    return vector_.rend();
+  }
+
+  /// \brief Return the first element of the SetVector.
+  const T &front() const {
+    assert(!empty() && "Cannot call front() on empty SetVector!");
+    return vector_.front();
+  }
+
+  /// \brief Return the last element of the SetVector.
+  const T &back() const {
+    assert(!empty() && "Cannot call back() on empty SetVector!");
+    return vector_.back();
+  }
+
+  /// \brief Index into the SetVector.
+  const_reference operator[](size_type n) const {
+    assert(n < vector_.size() && "SetVector access out of range!");
+    return vector_[n];
+  }
+
+  /// \brief Insert a new element into the SetVector.
+  /// \returns true if the element was inserted into the SetVector.
+  bool insert(const value_type &X) {
+    bool result = set_.insert(X).second;
+    if (result)
+      vector_.push_back(X);
+    return result;
+  }
+
+  /// \brief Insert a range of elements into the SetVector.
+  template<typename It>
+  void insert(It Start, It End) {
+    for (; Start != End; ++Start)
+      if (set_.insert(*Start).second)
+        vector_.push_back(*Start);
+  }
+
+  /// \brief Remove an item from the set vector.
+  bool remove(const value_type& X) {
+    if (set_.erase(X)) {
+      typename vector_type::iterator I = find(vector_, X);
+      assert(I != vector_.end() && "Corrupted SetVector instances!");
+      vector_.erase(I);
+      return true;
+    }
+    return false;
+  }
+
+  /// Erase a single element from the set vector.
+  /// \returns an iterator pointing to the next element that followed the
+  /// element erased. This is the end of the SetVector if the last element is
+  /// erased.
+  iterator erase(iterator I) {
+    const key_type &V = *I;
+    assert(set_.count(V) && "Corrupted SetVector instances!");
+    set_.erase(V);
+
+    // FIXME: No need to use the non-const iterator when built with
+    // std:vector.erase(const_iterator) as defined in C++11. This is for
+    // compatibility with non-standard libstdc++ up to 4.8 (fixed in 4.9).
+    auto NI = vector_.begin();
+    std::advance(NI, std::distance<iterator>(NI, I));
+
+    return vector_.erase(NI);
+  }
+
+  /// \brief Remove items from the set vector based on a predicate function.
+  ///
+  /// This is intended to be equivalent to the following code, if we could
+  /// write it:
+  ///
+  /// \code
+  ///   V.erase(remove_if(V, P), V.end());
+  /// \endcode
+  ///
+  /// However, SetVector doesn't expose non-const iterators, making any
+  /// algorithm like remove_if impossible to use.
+  ///
+  /// \returns true if any element is removed.
+  template <typename UnaryPredicate>
+  bool remove_if(UnaryPredicate P) {
+    typename vector_type::iterator I =
+        llvm::remove_if(vector_, TestAndEraseFromSet<UnaryPredicate>(P, set_));
+    if (I == vector_.end())
+      return false;
+    vector_.erase(I, vector_.end());
+    return true;
+  }
+
+  /// \brief Count the number of elements of a given key in the SetVector.
+  /// \returns 0 if the element is not in the SetVector, 1 if it is.
+  size_type count(const key_type &key) const {
+    return set_.count(key);
+  }
+
+  /// \brief Completely clear the SetVector
+  void clear() {
+    set_.clear();
+    vector_.clear();
+  }
+
+  /// \brief Remove the last element of the SetVector.
+  void pop_back() {
+    assert(!empty() && "Cannot remove an element from an empty SetVector!");
+    set_.erase(back());
+    vector_.pop_back();
+  }
+
+  LLVM_NODISCARD T pop_back_val() {
+    T Ret = back();
+    pop_back();
+    return Ret;
+  }
+
+  bool operator==(const SetVector &that) const {
+    return vector_ == that.vector_;
+  }
+
+  bool operator!=(const SetVector &that) const {
+    return vector_ != that.vector_;
+  }
+
+  /// \brief Compute This := This u S, return whether 'This' changed.
+  /// TODO: We should be able to use set_union from SetOperations.h, but
+  ///       SetVector interface is inconsistent with DenseSet.
+  template <class STy>
+  bool set_union(const STy &S) {
+    bool Changed = false;
+
+    for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
+         ++SI)
+      if (insert(*SI))
+        Changed = true;
+
+    return Changed;
+  }
+
+  /// \brief Compute This := This - B
+  /// TODO: We should be able to use set_subtract from SetOperations.h, but
+  ///       SetVector interface is inconsistent with DenseSet.
+  template <class STy>
+  void set_subtract(const STy &S) {
+    for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
+         ++SI)
+      remove(*SI);
+  }
+
+private:
+  /// \brief A wrapper predicate designed for use with std::remove_if.
+  ///
+  /// This predicate wraps a predicate suitable for use with std::remove_if to
+  /// call set_.erase(x) on each element which is slated for removal.
+  template <typename UnaryPredicate>
+  class TestAndEraseFromSet {
+    UnaryPredicate P;
+    set_type &set_;
+
+  public:
+    TestAndEraseFromSet(UnaryPredicate P, set_type &set_)
+        : P(std::move(P)), set_(set_) {}
+
+    template <typename ArgumentT>
+    bool operator()(const ArgumentT &Arg) {
+      if (P(Arg)) {
+        set_.erase(Arg);
+        return true;
+      }
+      return false;
+    }
+  };
+
+  set_type set_;         ///< The set.
+  vector_type vector_;   ///< The vector.
+};
+
+/// \brief A SetVector that performs no allocations if smaller than
+/// a certain size.
+template <typename T, unsigned N>
+class SmallSetVector
+    : public SetVector<T, SmallVector<T, N>, SmallDenseSet<T, N>> {
+public:
+  SmallSetVector() = default;
+
+  /// \brief Initialize a SmallSetVector with a range of elements
+  template<typename It>
+  SmallSetVector(It Start, It End) {
+    this->insert(Start, End);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SETVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallBitVector.h b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
new file mode 100644
index 0000000..b639174
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
@@ -0,0 +1,702 @@
+//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SmallBitVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLBITVECTOR_H
+#define LLVM_ADT_SMALLBITVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// This is a 'bitvector' (really, a variable-sized bit array), optimized for
+/// the case when the array is small. It contains one pointer-sized field, which
+/// is directly used as a plain collection of bits when possible, or as a
+/// pointer to a larger heap-allocated array when necessary. This allows normal
+/// "small" cases to be fast without losing generality for large inputs.
+class SmallBitVector {
+  // TODO: In "large" mode, a pointer to a BitVector is used, leading to an
+  // unnecessary level of indirection. It would be more efficient to use a
+  // pointer to memory containing size, allocation size, and the array of bits.
+  uintptr_t X = 1;
+
+  enum {
+    // The number of bits in this class.
+    NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,
+
+    // One bit is used to discriminate between small and large mode. The
+    // remaining bits are used for the small-mode representation.
+    SmallNumRawBits = NumBaseBits - 1,
+
+    // A few more bits are used to store the size of the bit set in small mode.
+    // Theoretically this is a ceil-log2. These bits are encoded in the most
+    // significant bits of the raw bits.
+    SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
+                        NumBaseBits == 64 ? 6 :
+                        SmallNumRawBits),
+
+    // The remaining bits are used to store the actual set in small mode.
+    SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
+  };
+
+  static_assert(NumBaseBits == 64 || NumBaseBits == 32,
+                "Unsupported word size");
+
+public:
+  using size_type = unsigned;
+
+  // Encapsulation of a single bit.
+  class reference {
+    SmallBitVector &TheVector;
+    unsigned BitPos;
+
+  public:
+    reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
+
+    reference(const reference&) = default;
+
+    reference& operator=(reference t) {
+      *this = bool(t);
+      return *this;
+    }
+
+    reference& operator=(bool t) {
+      if (t)
+        TheVector.set(BitPos);
+      else
+        TheVector.reset(BitPos);
+      return *this;
+    }
+
+    operator bool() const {
+      return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
+    }
+  };
+
+private:
+  bool isSmall() const {
+    return X & uintptr_t(1);
+  }
+
+  BitVector *getPointer() const {
+    assert(!isSmall());
+    return reinterpret_cast<BitVector *>(X);
+  }
+
+  void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) {
+    X = 1;
+    setSmallSize(NewSize);
+    setSmallBits(NewSmallBits);
+  }
+
+  void switchToLarge(BitVector *BV) {
+    X = reinterpret_cast<uintptr_t>(BV);
+    assert(!isSmall() && "Tried to use an unaligned pointer");
+  }
+
+  // Return all the bits used for the "small" representation; this includes
+  // bits for the size as well as the element bits.
+  uintptr_t getSmallRawBits() const {
+    assert(isSmall());
+    return X >> 1;
+  }
+
+  void setSmallRawBits(uintptr_t NewRawBits) {
+    assert(isSmall());
+    X = (NewRawBits << 1) | uintptr_t(1);
+  }
+
+  // Return the size.
+  size_t getSmallSize() const { return getSmallRawBits() >> SmallNumDataBits; }
+
+  void setSmallSize(size_t Size) {
+    setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
+  }
+
+  // Return the element bits.
+  uintptr_t getSmallBits() const {
+    return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
+  }
+
+  void setSmallBits(uintptr_t NewBits) {
+    setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
+                    (getSmallSize() << SmallNumDataBits));
+  }
+
+public:
+  /// Creates an empty bitvector.
+  SmallBitVector() = default;
+
+  /// Creates a bitvector of specified number of bits. All bits are initialized
+  /// to the specified value.
+  explicit SmallBitVector(unsigned s, bool t = false) {
+    if (s <= SmallNumDataBits)
+      switchToSmall(t ? ~uintptr_t(0) : 0, s);
+    else
+      switchToLarge(new BitVector(s, t));
+  }
+
+  /// SmallBitVector copy ctor.
+  SmallBitVector(const SmallBitVector &RHS) {
+    if (RHS.isSmall())
+      X = RHS.X;
+    else
+      switchToLarge(new BitVector(*RHS.getPointer()));
+  }
+
+  SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
+    RHS.X = 1;
+  }
+
+  ~SmallBitVector() {
+    if (!isSmall())
+      delete getPointer();
+  }
+
+  using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
+  using set_iterator = const_set_bits_iterator;
+
+  const_set_bits_iterator set_bits_begin() const {
+    return const_set_bits_iterator(*this);
+  }
+
+  const_set_bits_iterator set_bits_end() const {
+    return const_set_bits_iterator(*this, -1);
+  }
+
+  iterator_range<const_set_bits_iterator> set_bits() const {
+    return make_range(set_bits_begin(), set_bits_end());
+  }
+
+  /// Tests whether there are no bits in this bitvector.
+  bool empty() const {
+    return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
+  }
+
+  /// Returns the number of bits in this bitvector.
+  size_t size() const {
+    return isSmall() ? getSmallSize() : getPointer()->size();
+  }
+
+  /// Returns the number of bits which are set.
+  size_type count() const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      return countPopulation(Bits);
+    }
+    return getPointer()->count();
+  }
+
+  /// Returns true if any bit is set.
+  bool any() const {
+    if (isSmall())
+      return getSmallBits() != 0;
+    return getPointer()->any();
+  }
+
+  /// Returns true if all bits are set.
+  bool all() const {
+    if (isSmall())
+      return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
+    return getPointer()->all();
+  }
+
+  /// Returns true if none of the bits are set.
+  bool none() const {
+    if (isSmall())
+      return getSmallBits() == 0;
+    return getPointer()->none();
+  }
+
+  /// Returns the index of the first set bit, -1 if none of the bits are set.
+  int find_first() const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      if (Bits == 0)
+        return -1;
+      return countTrailingZeros(Bits);
+    }
+    return getPointer()->find_first();
+  }
+
+  int find_last() const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      if (Bits == 0)
+        return -1;
+      return NumBaseBits - countLeadingZeros(Bits);
+    }
+    return getPointer()->find_last();
+  }
+
+  /// Returns the index of the first unset bit, -1 if all of the bits are set.
+  int find_first_unset() const {
+    if (isSmall()) {
+      if (count() == getSmallSize())
+        return -1;
+
+      uintptr_t Bits = getSmallBits();
+      return countTrailingOnes(Bits);
+    }
+    return getPointer()->find_first_unset();
+  }
+
+  int find_last_unset() const {
+    if (isSmall()) {
+      if (count() == getSmallSize())
+        return -1;
+
+      uintptr_t Bits = getSmallBits();
+      return NumBaseBits - countLeadingOnes(Bits);
+    }
+    return getPointer()->find_last_unset();
+  }
+
+  /// Returns the index of the next set bit following the "Prev" bit.
+  /// Returns -1 if the next set bit is not found.
+  int find_next(unsigned Prev) const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      // Mask off previous bits.
+      Bits &= ~uintptr_t(0) << (Prev + 1);
+      if (Bits == 0 || Prev + 1 >= getSmallSize())
+        return -1;
+      return countTrailingZeros(Bits);
+    }
+    return getPointer()->find_next(Prev);
+  }
+
+  /// Returns the index of the next unset bit following the "Prev" bit.
+  /// Returns -1 if the next unset bit is not found.
+  int find_next_unset(unsigned Prev) const {
+    if (isSmall()) {
+      ++Prev;
+      uintptr_t Bits = getSmallBits();
+      // Mask in previous bits.
+      uintptr_t Mask = (1 << Prev) - 1;
+      Bits |= Mask;
+
+      if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize())
+        return -1;
+      return countTrailingOnes(Bits);
+    }
+    return getPointer()->find_next_unset(Prev);
+  }
+
+  /// find_prev - Returns the index of the first set bit that precedes the
+  /// the bit at \p PriorTo.  Returns -1 if all previous bits are unset.
+  int find_prev(unsigned PriorTo) const {
+    if (isSmall()) {
+      if (PriorTo == 0)
+        return -1;
+
+      --PriorTo;
+      uintptr_t Bits = getSmallBits();
+      Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
+      if (Bits == 0)
+        return -1;
+
+      return NumBaseBits - countLeadingZeros(Bits) - 1;
+    }
+    return getPointer()->find_prev(PriorTo);
+  }
+
+  /// Clear all bits.
+  void clear() {
+    if (!isSmall())
+      delete getPointer();
+    switchToSmall(0, 0);
+  }
+
+  /// Grow or shrink the bitvector.
+  void resize(unsigned N, bool t = false) {
+    if (!isSmall()) {
+      getPointer()->resize(N, t);
+    } else if (SmallNumDataBits >= N) {
+      uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
+      setSmallSize(N);
+      setSmallBits(NewBits | getSmallBits());
+    } else {
+      BitVector *BV = new BitVector(N, t);
+      uintptr_t OldBits = getSmallBits();
+      for (size_t i = 0, e = getSmallSize(); i != e; ++i)
+        (*BV)[i] = (OldBits >> i) & 1;
+      switchToLarge(BV);
+    }
+  }
+
+  void reserve(unsigned N) {
+    if (isSmall()) {
+      if (N > SmallNumDataBits) {
+        uintptr_t OldBits = getSmallRawBits();
+        size_t SmallSize = getSmallSize();
+        BitVector *BV = new BitVector(SmallSize);
+        for (size_t i = 0; i < SmallSize; ++i)
+          if ((OldBits >> i) & 1)
+            BV->set(i);
+        BV->reserve(N);
+        switchToLarge(BV);
+      }
+    } else {
+      getPointer()->reserve(N);
+    }
+  }
+
+  // Set, reset, flip
+  SmallBitVector &set() {
+    if (isSmall())
+      setSmallBits(~uintptr_t(0));
+    else
+      getPointer()->set();
+    return *this;
+  }
+
+  SmallBitVector &set(unsigned Idx) {
+    if (isSmall()) {
+      assert(Idx <= static_cast<unsigned>(
+                        std::numeric_limits<uintptr_t>::digits) &&
+             "undefined behavior");
+      setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
+    }
+    else
+      getPointer()->set(Idx);
+    return *this;
+  }
+
+  /// Efficiently set a range of bits in [I, E)
+  SmallBitVector &set(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to set backwards range!");
+    assert(E <= size() && "Attempted to set out-of-bounds range!");
+    if (I == E) return *this;
+    if (isSmall()) {
+      uintptr_t EMask = ((uintptr_t)1) << E;
+      uintptr_t IMask = ((uintptr_t)1) << I;
+      uintptr_t Mask = EMask - IMask;
+      setSmallBits(getSmallBits() | Mask);
+    } else
+      getPointer()->set(I, E);
+    return *this;
+  }
+
+  SmallBitVector &reset() {
+    if (isSmall())
+      setSmallBits(0);
+    else
+      getPointer()->reset();
+    return *this;
+  }
+
+  SmallBitVector &reset(unsigned Idx) {
+    if (isSmall())
+      setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
+    else
+      getPointer()->reset(Idx);
+    return *this;
+  }
+
+  /// Efficiently reset a range of bits in [I, E)
+  SmallBitVector &reset(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to reset backwards range!");
+    assert(E <= size() && "Attempted to reset out-of-bounds range!");
+    if (I == E) return *this;
+    if (isSmall()) {
+      uintptr_t EMask = ((uintptr_t)1) << E;
+      uintptr_t IMask = ((uintptr_t)1) << I;
+      uintptr_t Mask = EMask - IMask;
+      setSmallBits(getSmallBits() & ~Mask);
+    } else
+      getPointer()->reset(I, E);
+    return *this;
+  }
+
+  SmallBitVector &flip() {
+    if (isSmall())
+      setSmallBits(~getSmallBits());
+    else
+      getPointer()->flip();
+    return *this;
+  }
+
+  SmallBitVector &flip(unsigned Idx) {
+    if (isSmall())
+      setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
+    else
+      getPointer()->flip(Idx);
+    return *this;
+  }
+
+  // No argument flip.
+  SmallBitVector operator~() const {
+    return SmallBitVector(*this).flip();
+  }
+
+  // Indexing.
+  reference operator[](unsigned Idx) {
+    assert(Idx < size() && "Out-of-bounds Bit access.");
+    return reference(*this, Idx);
+  }
+
+  bool operator[](unsigned Idx) const {
+    assert(Idx < size() && "Out-of-bounds Bit access.");
+    if (isSmall())
+      return ((getSmallBits() >> Idx) & 1) != 0;
+    return getPointer()->operator[](Idx);
+  }
+
+  bool test(unsigned Idx) const {
+    return (*this)[Idx];
+  }
+
+  /// Test if any common bits are set.
+  bool anyCommon(const SmallBitVector &RHS) const {
+    if (isSmall() && RHS.isSmall())
+      return (getSmallBits() & RHS.getSmallBits()) != 0;
+    if (!isSmall() && !RHS.isSmall())
+      return getPointer()->anyCommon(*RHS.getPointer());
+
+    for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+      if (test(i) && RHS.test(i))
+        return true;
+    return false;
+  }
+
+  // Comparison operators.
+  bool operator==(const SmallBitVector &RHS) const {
+    if (size() != RHS.size())
+      return false;
+    if (isSmall())
+      return getSmallBits() == RHS.getSmallBits();
+    else
+      return *getPointer() == *RHS.getPointer();
+  }
+
+  bool operator!=(const SmallBitVector &RHS) const {
+    return !(*this == RHS);
+  }
+
+  // Intersection, union, disjoint union.
+  SmallBitVector &operator&=(const SmallBitVector &RHS) {
+    resize(std::max(size(), RHS.size()));
+    if (isSmall())
+      setSmallBits(getSmallBits() & RHS.getSmallBits());
+    else if (!RHS.isSmall())
+      getPointer()->operator&=(*RHS.getPointer());
+    else {
+      SmallBitVector Copy = RHS;
+      Copy.resize(size());
+      getPointer()->operator&=(*Copy.getPointer());
+    }
+    return *this;
+  }
+
+  /// Reset bits that are set in RHS. Same as *this &= ~RHS.
+  SmallBitVector &reset(const SmallBitVector &RHS) {
+    if (isSmall() && RHS.isSmall())
+      setSmallBits(getSmallBits() & ~RHS.getSmallBits());
+    else if (!isSmall() && !RHS.isSmall())
+      getPointer()->reset(*RHS.getPointer());
+    else
+      for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+        if (RHS.test(i))
+          reset(i);
+
+    return *this;
+  }
+
+  /// Check if (This - RHS) is zero. This is the same as reset(RHS) and any().
+  bool test(const SmallBitVector &RHS) const {
+    if (isSmall() && RHS.isSmall())
+      return (getSmallBits() & ~RHS.getSmallBits()) != 0;
+    if (!isSmall() && !RHS.isSmall())
+      return getPointer()->test(*RHS.getPointer());
+
+    unsigned i, e;
+    for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+      if (test(i) && !RHS.test(i))
+        return true;
+
+    for (e = size(); i != e; ++i)
+      if (test(i))
+        return true;
+
+    return false;
+  }
+
+  SmallBitVector &operator|=(const SmallBitVector &RHS) {
+    resize(std::max(size(), RHS.size()));
+    if (isSmall())
+      setSmallBits(getSmallBits() | RHS.getSmallBits());
+    else if (!RHS.isSmall())
+      getPointer()->operator|=(*RHS.getPointer());
+    else {
+      SmallBitVector Copy = RHS;
+      Copy.resize(size());
+      getPointer()->operator|=(*Copy.getPointer());
+    }
+    return *this;
+  }
+
+  SmallBitVector &operator^=(const SmallBitVector &RHS) {
+    resize(std::max(size(), RHS.size()));
+    if (isSmall())
+      setSmallBits(getSmallBits() ^ RHS.getSmallBits());
+    else if (!RHS.isSmall())
+      getPointer()->operator^=(*RHS.getPointer());
+    else {
+      SmallBitVector Copy = RHS;
+      Copy.resize(size());
+      getPointer()->operator^=(*Copy.getPointer());
+    }
+    return *this;
+  }
+
+  SmallBitVector &operator<<=(unsigned N) {
+    if (isSmall())
+      setSmallBits(getSmallBits() << N);
+    else
+      getPointer()->operator<<=(N);
+    return *this;
+  }
+
+  SmallBitVector &operator>>=(unsigned N) {
+    if (isSmall())
+      setSmallBits(getSmallBits() >> N);
+    else
+      getPointer()->operator>>=(N);
+    return *this;
+  }
+
+  // Assignment operator.
+  const SmallBitVector &operator=(const SmallBitVector &RHS) {
+    if (isSmall()) {
+      if (RHS.isSmall())
+        X = RHS.X;
+      else
+        switchToLarge(new BitVector(*RHS.getPointer()));
+    } else {
+      if (!RHS.isSmall())
+        *getPointer() = *RHS.getPointer();
+      else {
+        delete getPointer();
+        X = RHS.X;
+      }
+    }
+    return *this;
+  }
+
+  const SmallBitVector &operator=(SmallBitVector &&RHS) {
+    if (this != &RHS) {
+      clear();
+      swap(RHS);
+    }
+    return *this;
+  }
+
+  void swap(SmallBitVector &RHS) {
+    std::swap(X, RHS.X);
+  }
+
+  /// Add '1' bits from Mask to this vector. Don't resize.
+  /// This computes "*this |= Mask".
+  void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<true, false>(Mask, MaskWords);
+    else
+      getPointer()->setBitsInMask(Mask, MaskWords);
+  }
+
+  /// Clear any bits in this vector that are set in Mask. Don't resize.
+  /// This computes "*this &= ~Mask".
+  void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<false, false>(Mask, MaskWords);
+    else
+      getPointer()->clearBitsInMask(Mask, MaskWords);
+  }
+
+  /// Add a bit to this vector for every '0' bit in Mask. Don't resize.
+  /// This computes "*this |= ~Mask".
+  void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<true, true>(Mask, MaskWords);
+    else
+      getPointer()->setBitsNotInMask(Mask, MaskWords);
+  }
+
+  /// Clear a bit in this vector for every '0' bit in Mask. Don't resize.
+  /// This computes "*this &= Mask".
+  void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<false, true>(Mask, MaskWords);
+    else
+      getPointer()->clearBitsNotInMask(Mask, MaskWords);
+  }
+
+private:
+  template <bool AddBits, bool InvertMask>
+  void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+    assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!");
+    uintptr_t M = Mask[0];
+    if (NumBaseBits == 64)
+      M |= uint64_t(Mask[1]) << 32;
+    if (InvertMask)
+      M = ~M;
+    if (AddBits)
+      setSmallBits(getSmallBits() | M);
+    else
+      setSmallBits(getSmallBits() & ~M);
+  }
+};
+
+inline SmallBitVector
+operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+  SmallBitVector Result(LHS);
+  Result &= RHS;
+  return Result;
+}
+
+inline SmallBitVector
+operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+  SmallBitVector Result(LHS);
+  Result |= RHS;
+  return Result;
+}
+
+inline SmallBitVector
+operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+  SmallBitVector Result(LHS);
+  Result ^= RHS;
+  return Result;
+}
+
+} // end namespace llvm
+
+namespace std {
+
+/// Implement std::swap in terms of BitVector swap.
+inline void
+swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
+  LHS.swap(RHS);
+}
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLBITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h b/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
new file mode 100644
index 0000000..78ea613
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
@@ -0,0 +1,486 @@
+//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallPtrSet class.  See the doxygen comment for
+// SmallPtrSetImplBase for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLPTRSET_H
+#define LLVM_ADT_SMALLPTRSET_H
+
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ReverseIteration.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// SmallPtrSetImplBase - This is the common code shared among all the
+/// SmallPtrSet<>'s, which is almost everything.  SmallPtrSet has two modes, one
+/// for small and one for large sets.
+///
+/// Small sets use an array of pointers allocated in the SmallPtrSet object,
+/// which is treated as a simple array of pointers.  When a pointer is added to
+/// the set, the array is scanned to see if the element already exists, if not
+/// the element is 'pushed back' onto the array.  If we run out of space in the
+/// array, we grow into the 'large set' case.  SmallSet should be used when the
+/// sets are often small.  In this case, no memory allocation is used, and only
+/// light-weight and cache-efficient scanning is used.
+///
+/// Large sets use a classic exponentially-probed hash table.  Empty buckets are
+/// represented with an illegal pointer value (-1) to allow null pointers to be
+/// inserted.  Tombstones are represented with another illegal pointer value
+/// (-2), to allow deletion.  The hash table is resized when the table is 3/4 or
+/// more.  When this happens, the table is doubled in size.
+///
+class SmallPtrSetImplBase : public DebugEpochBase {
+  friend class SmallPtrSetIteratorImpl;
+
+protected:
+  /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+  const void **SmallArray;
+  /// CurArray - This is the current set of buckets.  If equal to SmallArray,
+  /// then the set is in 'small mode'.
+  const void **CurArray;
+  /// CurArraySize - The allocated size of CurArray, always a power of two.
+  unsigned CurArraySize;
+
+  /// Number of elements in CurArray that contain a value or are a tombstone.
+  /// If small, all these elements are at the beginning of CurArray and the rest
+  /// is uninitialized.
+  unsigned NumNonEmpty;
+  /// Number of tombstones in CurArray.
+  unsigned NumTombstones;
+
+  // Helpers to copy and move construct a SmallPtrSet.
+  SmallPtrSetImplBase(const void **SmallStorage,
+                      const SmallPtrSetImplBase &that);
+  SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
+                      SmallPtrSetImplBase &&that);
+
+  explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
+      : SmallArray(SmallStorage), CurArray(SmallStorage),
+        CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
+    assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
+           "Initial size must be a power of two!");
+  }
+
+  ~SmallPtrSetImplBase() {
+    if (!isSmall())
+      free(CurArray);
+  }
+
+public:
+  using size_type = unsigned;
+
+  SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;
+
+  LLVM_NODISCARD bool empty() const { return size() == 0; }
+  size_type size() const { return NumNonEmpty - NumTombstones; }
+
+  void clear() {
+    incrementEpoch();
+    // If the capacity of the array is huge, and the # elements used is small,
+    // shrink the array.
+    if (!isSmall()) {
+      if (size() * 4 < CurArraySize && CurArraySize > 32)
+        return shrink_and_clear();
+      // Fill the array with empty markers.
+      memset(CurArray, -1, CurArraySize * sizeof(void *));
+    }
+
+    NumNonEmpty = 0;
+    NumTombstones = 0;
+  }
+
+protected:
+  static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
+
+  static void *getEmptyMarker() {
+    // Note that -1 is chosen to make clear() efficiently implementable with
+    // memset and because it's not a valid pointer value.
+    return reinterpret_cast<void*>(-1);
+  }
+
+  const void **EndPointer() const {
+    return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
+  }
+
+  /// insert_imp - This returns true if the pointer was new to the set, false if
+  /// it was already in the set.  This is hidden from the client so that the
+  /// derived class can check that the right type of pointer is passed in.
+  std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
+    if (isSmall()) {
+      // Check to see if it is already in the set.
+      const void **LastTombstone = nullptr;
+      for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
+           APtr != E; ++APtr) {
+        const void *Value = *APtr;
+        if (Value == Ptr)
+          return std::make_pair(APtr, false);
+        if (Value == getTombstoneMarker())
+          LastTombstone = APtr;
+      }
+
+      // Did we find any tombstone marker?
+      if (LastTombstone != nullptr) {
+        *LastTombstone = Ptr;
+        --NumTombstones;
+        incrementEpoch();
+        return std::make_pair(LastTombstone, true);
+      }
+
+      // Nope, there isn't.  If we stay small, just 'pushback' now.
+      if (NumNonEmpty < CurArraySize) {
+        SmallArray[NumNonEmpty++] = Ptr;
+        incrementEpoch();
+        return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
+      }
+      // Otherwise, hit the big set case, which will call grow.
+    }
+    return insert_imp_big(Ptr);
+  }
+
+  /// erase_imp - If the set contains the specified pointer, remove it and
+  /// return true, otherwise return false.  This is hidden from the client so
+  /// that the derived class can check that the right type of pointer is passed
+  /// in.
+  bool erase_imp(const void * Ptr) {
+    const void *const *P = find_imp(Ptr);
+    if (P == EndPointer())
+      return false;
+
+    const void **Loc = const_cast<const void **>(P);
+    assert(*Loc == Ptr && "broken find!");
+    *Loc = getTombstoneMarker();
+    NumTombstones++;
+    return true;
+  }
+
+  /// Returns the raw pointer needed to construct an iterator.  If element not
+  /// found, this will be EndPointer.  Otherwise, it will be a pointer to the
+  /// slot which stores Ptr;
+  const void *const * find_imp(const void * Ptr) const {
+    if (isSmall()) {
+      // Linear search for the item.
+      for (const void *const *APtr = SmallArray,
+                      *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
+        if (*APtr == Ptr)
+          return APtr;
+      return EndPointer();
+    }
+
+    // Big set case.
+    auto *Bucket = FindBucketFor(Ptr);
+    if (*Bucket == Ptr)
+      return Bucket;
+    return EndPointer();
+  }
+
+private:
+  bool isSmall() const { return CurArray == SmallArray; }
+
+  std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);
+
+  const void * const *FindBucketFor(const void *Ptr) const;
+  void shrink_and_clear();
+
+  /// Grow - Allocate a larger backing store for the buckets and move it over.
+  void Grow(unsigned NewSize);
+
+protected:
+  /// swap - Swaps the elements of two sets.
+  /// Note: This method assumes that both sets have the same small size.
+  void swap(SmallPtrSetImplBase &RHS);
+
+  void CopyFrom(const SmallPtrSetImplBase &RHS);
+  void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+
+private:
+  /// Code shared by MoveFrom() and move constructor.
+  void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+  /// Code shared by CopyFrom() and copy constructor.
+  void CopyHelper(const SmallPtrSetImplBase &RHS);
+};
+
+/// SmallPtrSetIteratorImpl - This is the common base class shared between all
+/// instances of SmallPtrSetIterator.
+class SmallPtrSetIteratorImpl {
+protected:
+  const void *const *Bucket;
+  const void *const *End;
+
+public:
+  explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
+    : Bucket(BP), End(E) {
+    if (shouldReverseIterate()) {
+      RetreatIfNotValid();
+      return;
+    }
+    AdvanceIfNotValid();
+  }
+
+  bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
+    return Bucket == RHS.Bucket;
+  }
+  bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
+    return Bucket != RHS.Bucket;
+  }
+
+protected:
+  /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
+  /// that is.   This is guaranteed to stop because the end() bucket is marked
+  /// valid.
+  void AdvanceIfNotValid() {
+    assert(Bucket <= End);
+    while (Bucket != End &&
+           (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
+            *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
+      ++Bucket;
+  }
+  void RetreatIfNotValid() {
+    assert(Bucket >= End);
+    while (Bucket != End &&
+           (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() ||
+            Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) {
+      --Bucket;
+    }
+  }
+};
+
+/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
+template <typename PtrTy>
+class SmallPtrSetIterator : public SmallPtrSetIteratorImpl,
+                            DebugEpochBase::HandleBase {
+  using PtrTraits = PointerLikeTypeTraits<PtrTy>;
+
+public:
+  using value_type = PtrTy;
+  using reference = PtrTy;
+  using pointer = PtrTy;
+  using difference_type = std::ptrdiff_t;
+  using iterator_category = std::forward_iterator_tag;
+
+  explicit SmallPtrSetIterator(const void *const *BP, const void *const *E,
+                               const DebugEpochBase &Epoch)
+      : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}
+
+  // Most methods provided by baseclass.
+
+  const PtrTy operator*() const {
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate()) {
+      assert(Bucket > End);
+      return PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket[-1]));
+    }
+    assert(Bucket < End);
+    return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
+  }
+
+  inline SmallPtrSetIterator& operator++() {          // Preincrement
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate()) {
+      --Bucket;
+      RetreatIfNotValid();
+      return *this;
+    }
+    ++Bucket;
+    AdvanceIfNotValid();
+    return *this;
+  }
+
+  SmallPtrSetIterator operator++(int) {        // Postincrement
+    SmallPtrSetIterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+};
+
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
+template<unsigned N>
+struct RoundUpToPowerOfTwo;
+
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it.  This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
+template<unsigned N, bool isPowerTwo>
+struct RoundUpToPowerOfTwoH {
+  enum { Val = N };
+};
+template<unsigned N>
+struct RoundUpToPowerOfTwoH<N, false> {
+  enum {
+    // We could just use NextVal = N+1, but this converges faster.  N|(N-1) sets
+    // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
+    Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
+  };
+};
+
+template<unsigned N>
+struct RoundUpToPowerOfTwo {
+  enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+};
+
+/// \brief A templated base class for \c SmallPtrSet which provides the
+/// typesafe interface that is common across all small sizes.
+///
+/// This is particularly useful for passing around between interface boundaries
+/// to avoid encoding a particular small size in the interface boundary.
+template <typename PtrType>
+class SmallPtrSetImpl : public SmallPtrSetImplBase {
+  using ConstPtrType = typename add_const_past_pointer<PtrType>::type;
+  using PtrTraits = PointerLikeTypeTraits<PtrType>;
+  using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;
+
+protected:
+  // Constructors that forward to the base.
+  SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that)
+      : SmallPtrSetImplBase(SmallStorage, that) {}
+  SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize,
+                  SmallPtrSetImpl &&that)
+      : SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {}
+  explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize)
+      : SmallPtrSetImplBase(SmallStorage, SmallSize) {}
+
+public:
+  using iterator = SmallPtrSetIterator<PtrType>;
+  using const_iterator = SmallPtrSetIterator<PtrType>;
+  using key_type = ConstPtrType;
+  using value_type = PtrType;
+
+  SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
+
+  /// Inserts Ptr if and only if there is no element in the container equal to
+  /// Ptr. The bool component of the returned pair is true if and only if the
+  /// insertion takes place, and the iterator component of the pair points to
+  /// the element equal to Ptr.
+  std::pair<iterator, bool> insert(PtrType Ptr) {
+    auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
+    return std::make_pair(makeIterator(p.first), p.second);
+  }
+
+  /// erase - If the set contains the specified pointer, remove it and return
+  /// true, otherwise return false.
+  bool erase(PtrType Ptr) {
+    return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
+  }
+  /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
+  size_type count(ConstPtrType Ptr) const { return find(Ptr) != end() ? 1 : 0; }
+  iterator find(ConstPtrType Ptr) const {
+    return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
+  }
+
+  template <typename IterT>
+  void insert(IterT I, IterT E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  void insert(std::initializer_list<PtrType> IL) {
+    insert(IL.begin(), IL.end());
+  }
+
+  iterator begin() const {
+    if (shouldReverseIterate())
+      return makeIterator(EndPointer() - 1);
+    return makeIterator(CurArray);
+  }
+  iterator end() const { return makeIterator(EndPointer()); }
+
+private:
+  /// Create an iterator that dereferences to same place as the given pointer.
+  iterator makeIterator(const void *const *P) const {
+    if (shouldReverseIterate())
+      return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
+    return iterator(P, EndPointer(), *this);
+  }
+};
+
+/// SmallPtrSet - This class implements a set which is optimized for holding
+/// SmallSize or less elements.  This internally rounds up SmallSize to the next
+/// power of two if it is not already a power of two.  See the comments above
+/// SmallPtrSetImplBase for details of the algorithm.
+template<class PtrType, unsigned SmallSize>
+class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
+  // In small mode SmallPtrSet uses linear search for the elements, so it is
+  // not a good idea to choose this value too high. You may consider using a
+  // DenseSet<> instead if you expect many elements in the set.
+  static_assert(SmallSize <= 32, "SmallSize should be small");
+
+  using BaseT = SmallPtrSetImpl<PtrType>;
+
+  // Make sure that SmallSize is a power of two, round up if not.
+  enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+  /// SmallStorage - Fixed size storage used in 'small mode'.
+  const void *SmallStorage[SmallSizePowTwo];
+
+public:
+  SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
+  SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
+  SmallPtrSet(SmallPtrSet &&that)
+      : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
+
+  template<typename It>
+  SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
+    this->insert(I, E);
+  }
+
+  SmallPtrSet(std::initializer_list<PtrType> IL)
+      : BaseT(SmallStorage, SmallSizePowTwo) {
+    this->insert(IL.begin(), IL.end());
+  }
+
+  SmallPtrSet<PtrType, SmallSize> &
+  operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
+    if (&RHS != this)
+      this->CopyFrom(RHS);
+    return *this;
+  }
+
+  SmallPtrSet<PtrType, SmallSize> &
+  operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
+    if (&RHS != this)
+      this->MoveFrom(SmallSizePowTwo, std::move(RHS));
+    return *this;
+  }
+
+  SmallPtrSet<PtrType, SmallSize> &
+  operator=(std::initializer_list<PtrType> IL) {
+    this->clear();
+    this->insert(IL.begin(), IL.end());
+    return *this;
+  }
+
+  /// swap - Swaps the elements of two sets.
+  void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
+    SmallPtrSetImplBase::swap(RHS);
+  }
+};
+
+} // end namespace llvm
+
+namespace std {
+
+  /// Implement std::swap in terms of SmallPtrSet swap.
+  template<class T, unsigned N>
+  inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
+    LHS.swap(RHS);
+  }
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLPTRSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallSet.h b/linux-x64/clang/include/llvm/ADT/SmallSet.h
new file mode 100644
index 0000000..d52d0f0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallSet.h
@@ -0,0 +1,142 @@
+//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallSet class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSET_H
+#define LLVM_ADT_SMALLSET_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+#include <functional>
+#include <set>
+#include <utility>
+
+namespace llvm {
+
+/// SmallSet - This maintains a set of unique values, optimizing for the case
+/// when the set is small (less than N).  In this case, the set can be
+/// maintained with no mallocs.  If the set gets large, we expand to using an
+/// std::set to maintain reasonable lookup times.
+///
+/// Note that this set does not provide a way to iterate over members in the
+/// set.
+template <typename T, unsigned N, typename C = std::less<T>>
+class SmallSet {
+  /// Use a SmallVector to hold the elements here (even though it will never
+  /// reach its 'large' stage) to avoid calling the default ctors of elements
+  /// we will never use.
+  SmallVector<T, N> Vector;
+  std::set<T, C> Set;
+
+  using VIterator = typename SmallVector<T, N>::const_iterator;
+  using mutable_iterator = typename SmallVector<T, N>::iterator;
+
+  // In small mode SmallPtrSet uses linear search for the elements, so it is
+  // not a good idea to choose this value too high. You may consider using a
+  // DenseSet<> instead if you expect many elements in the set.
+  static_assert(N <= 32, "N should be small");
+
+public:
+  using size_type = size_t;
+
+  SmallSet() = default;
+
+  LLVM_NODISCARD bool empty() const {
+    return Vector.empty() && Set.empty();
+  }
+
+  size_type size() const {
+    return isSmall() ? Vector.size() : Set.size();
+  }
+
+  /// count - Return 1 if the element is in the set, 0 otherwise.
+  size_type count(const T &V) const {
+    if (isSmall()) {
+      // Since the collection is small, just do a linear search.
+      return vfind(V) == Vector.end() ? 0 : 1;
+    } else {
+      return Set.count(V);
+    }
+  }
+
+  /// insert - Insert an element into the set if it isn't already there.
+  /// Returns true if the element is inserted (it was not in the set before).
+  /// The first value of the returned pair is unused and provided for
+  /// partial compatibility with the standard library self-associative container
+  /// concept.
+  // FIXME: Add iterators that abstract over the small and large form, and then
+  // return those here.
+  std::pair<NoneType, bool> insert(const T &V) {
+    if (!isSmall())
+      return std::make_pair(None, Set.insert(V).second);
+
+    VIterator I = vfind(V);
+    if (I != Vector.end())    // Don't reinsert if it already exists.
+      return std::make_pair(None, false);
+    if (Vector.size() < N) {
+      Vector.push_back(V);
+      return std::make_pair(None, true);
+    }
+
+    // Otherwise, grow from vector to set.
+    while (!Vector.empty()) {
+      Set.insert(Vector.back());
+      Vector.pop_back();
+    }
+    Set.insert(V);
+    return std::make_pair(None, true);
+  }
+
+  template <typename IterT>
+  void insert(IterT I, IterT E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  bool erase(const T &V) {
+    if (!isSmall())
+      return Set.erase(V);
+    for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+      if (*I == V) {
+        Vector.erase(I);
+        return true;
+      }
+    return false;
+  }
+
+  void clear() {
+    Vector.clear();
+    Set.clear();
+  }
+
+private:
+  bool isSmall() const { return Set.empty(); }
+
+  VIterator vfind(const T &V) const {
+    for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+      if (*I == V)
+        return I;
+    return Vector.end();
+  }
+};
+
+/// If this set is of pointer values, transparently switch over to using
+/// SmallPtrSet for performance.
+template <typename PointeeType, unsigned N>
+class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SMALLSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallString.h b/linux-x64/clang/include/llvm/ADT/SmallString.h
new file mode 100644
index 0000000..ff46e85
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallString.h
@@ -0,0 +1,297 @@
+//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallString class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSTRING_H
+#define LLVM_ADT_SMALLSTRING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstddef>
+
+namespace llvm {
+
+/// SmallString - A SmallString is just a SmallVector with methods and accessors
+/// that make it work better as a string (e.g. operator+ etc).
+template<unsigned InternalLen>
+class SmallString : public SmallVector<char, InternalLen> {
+public:
+  /// Default ctor - Initialize to empty.
+  SmallString() = default;
+
+  /// Initialize from a StringRef.
+  SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+
+  /// Initialize with a range.
+  template<typename ItTy>
+  SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
+
+  // Note that in order to add new overloads for append & assign, we have to
+  // duplicate the inherited versions so as not to inadvertently hide them.
+
+  /// @}
+  /// @name String Assignment
+  /// @{
+
+  /// Assign from a repeated element.
+  void assign(size_t NumElts, char Elt) {
+    this->SmallVectorImpl<char>::assign(NumElts, Elt);
+  }
+
+  /// Assign from an iterator pair.
+  template<typename in_iter>
+  void assign(in_iter S, in_iter E) {
+    this->clear();
+    SmallVectorImpl<char>::append(S, E);
+  }
+
+  /// Assign from a StringRef.
+  void assign(StringRef RHS) {
+    this->clear();
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// Assign from a SmallVector.
+  void assign(const SmallVectorImpl<char> &RHS) {
+    this->clear();
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// @}
+  /// @name String Concatenation
+  /// @{
+
+  /// Append from an iterator pair.
+  template<typename in_iter>
+  void append(in_iter S, in_iter E) {
+    SmallVectorImpl<char>::append(S, E);
+  }
+
+  void append(size_t NumInputs, char Elt) {
+    SmallVectorImpl<char>::append(NumInputs, Elt);
+  }
+
+  /// Append from a StringRef.
+  void append(StringRef RHS) {
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// Append from a SmallVector.
+  void append(const SmallVectorImpl<char> &RHS) {
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// @}
+  /// @name String Comparison
+  /// @{
+
+  /// Check for string equality.  This is more efficient than compare() when
+  /// the relative ordering of inequal strings isn't needed.
+  bool equals(StringRef RHS) const {
+    return str().equals(RHS);
+  }
+
+  /// Check for string equality, ignoring case.
+  bool equals_lower(StringRef RHS) const {
+    return str().equals_lower(RHS);
+  }
+
+  /// Compare two strings; the result is -1, 0, or 1 if this string is
+  /// lexicographically less than, equal to, or greater than the \p RHS.
+  int compare(StringRef RHS) const {
+    return str().compare(RHS);
+  }
+
+  /// compare_lower - Compare two strings, ignoring case.
+  int compare_lower(StringRef RHS) const {
+    return str().compare_lower(RHS);
+  }
+
+  /// compare_numeric - Compare two strings, treating sequences of digits as
+  /// numbers.
+  int compare_numeric(StringRef RHS) const {
+    return str().compare_numeric(RHS);
+  }
+
+  /// @}
+  /// @name String Predicates
+  /// @{
+
+  /// startswith - Check if this string starts with the given \p Prefix.
+  bool startswith(StringRef Prefix) const {
+    return str().startswith(Prefix);
+  }
+
+  /// endswith - Check if this string ends with the given \p Suffix.
+  bool endswith(StringRef Suffix) const {
+    return str().endswith(Suffix);
+  }
+
+  /// @}
+  /// @name String Searching
+  /// @{
+
+  /// find - Search for the first character \p C in the string.
+  ///
+  /// \return - The index of the first occurrence of \p C, or npos if not
+  /// found.
+  size_t find(char C, size_t From = 0) const {
+    return str().find(C, From);
+  }
+
+  /// Search for the first string \p Str in the string.
+  ///
+  /// \returns The index of the first occurrence of \p Str, or npos if not
+  /// found.
+  size_t find(StringRef Str, size_t From = 0) const {
+    return str().find(Str, From);
+  }
+
+  /// Search for the last character \p C in the string.
+  ///
+  /// \returns The index of the last occurrence of \p C, or npos if not
+  /// found.
+  size_t rfind(char C, size_t From = StringRef::npos) const {
+    return str().rfind(C, From);
+  }
+
+  /// Search for the last string \p Str in the string.
+  ///
+  /// \returns The index of the last occurrence of \p Str, or npos if not
+  /// found.
+  size_t rfind(StringRef Str) const {
+    return str().rfind(Str);
+  }
+
+  /// Find the first character in the string that is \p C, or npos if not
+  /// found. Same as find.
+  size_t find_first_of(char C, size_t From = 0) const {
+    return str().find_first_of(C, From);
+  }
+
+  /// Find the first character in the string that is in \p Chars, or npos if
+  /// not found.
+  ///
+  /// Complexity: O(size() + Chars.size())
+  size_t find_first_of(StringRef Chars, size_t From = 0) const {
+    return str().find_first_of(Chars, From);
+  }
+
+  /// Find the first character in the string that is not \p C or npos if not
+  /// found.
+  size_t find_first_not_of(char C, size_t From = 0) const {
+    return str().find_first_not_of(C, From);
+  }
+
+  /// Find the first character in the string that is not in the string
+  /// \p Chars, or npos if not found.
+  ///
+  /// Complexity: O(size() + Chars.size())
+  size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
+    return str().find_first_not_of(Chars, From);
+  }
+
+  /// Find the last character in the string that is \p C, or npos if not
+  /// found.
+  size_t find_last_of(char C, size_t From = StringRef::npos) const {
+    return str().find_last_of(C, From);
+  }
+
+  /// Find the last character in the string that is in \p C, or npos if not
+  /// found.
+  ///
+  /// Complexity: O(size() + Chars.size())
+  size_t find_last_of(
+      StringRef Chars, size_t From = StringRef::npos) const {
+    return str().find_last_of(Chars, From);
+  }
+
+  /// @}
+  /// @name Helpful Algorithms
+  /// @{
+
+  /// Return the number of occurrences of \p C in the string.
+  size_t count(char C) const {
+    return str().count(C);
+  }
+
+  /// Return the number of non-overlapped occurrences of \p Str in the
+  /// string.
+  size_t count(StringRef Str) const {
+    return str().count(Str);
+  }
+
+  /// @}
+  /// @name Substring Operations
+  /// @{
+
+  /// Return a reference to the substring from [Start, Start + N).
+  ///
+  /// \param Start The index of the starting character in the substring; if
+  /// the index is npos or greater than the length of the string then the
+  /// empty substring will be returned.
+  ///
+  /// \param N The number of characters to included in the substring. If \p N
+  /// exceeds the number of characters remaining in the string, the string
+  /// suffix (starting with \p Start) will be returned.
+  StringRef substr(size_t Start, size_t N = StringRef::npos) const {
+    return str().substr(Start, N);
+  }
+
+  /// Return a reference to the substring from [Start, End).
+  ///
+  /// \param Start The index of the starting character in the substring; if
+  /// the index is npos or greater than the length of the string then the
+  /// empty substring will be returned.
+  ///
+  /// \param End The index following the last character to include in the
+  /// substring. If this is npos, or less than \p Start, or exceeds the
+  /// number of characters remaining in the string, the string suffix
+  /// (starting with \p Start) will be returned.
+  StringRef slice(size_t Start, size_t End) const {
+    return str().slice(Start, End);
+  }
+
+  // Extra methods.
+
+  /// Explicit conversion to StringRef.
+  StringRef str() const { return StringRef(this->begin(), this->size()); }
+
+  // TODO: Make this const, if it's safe...
+  const char* c_str() {
+    this->push_back(0);
+    this->pop_back();
+    return this->data();
+  }
+
+  /// Implicit conversion to StringRef.
+  operator StringRef() const { return str(); }
+
+  // Extra operators.
+  const SmallString &operator=(StringRef RHS) {
+    this->clear();
+    return *this += RHS;
+  }
+
+  SmallString &operator+=(StringRef RHS) {
+    this->append(RHS.begin(), RHS.end());
+    return *this;
+  }
+  SmallString &operator+=(char C) {
+    this->push_back(C);
+    return *this;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SMALLSTRING_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallVector.h b/linux-x64/clang/include/llvm/ADT/SmallVector.h
new file mode 100644
index 0000000..3d17e70
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallVector.h
@@ -0,0 +1,958 @@
+//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLVECTOR_H
+#define LLVM_ADT_SMALLVECTOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/type_traits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// This is all the non-templated stuff common to all SmallVectors.
+class SmallVectorBase {
+protected:
+  void *BeginX, *EndX, *CapacityX;
+
+protected:
+  SmallVectorBase(void *FirstEl, size_t Size)
+    : BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
+
+  /// This is an implementation of the grow() method which only works
+  /// on POD-like data types and is out of line to reduce code duplication.
+  void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize);
+
+public:
+  /// This returns size()*sizeof(T).
+  size_t size_in_bytes() const {
+    return size_t((char*)EndX - (char*)BeginX);
+  }
+
+  /// capacity_in_bytes - This returns capacity()*sizeof(T).
+  size_t capacity_in_bytes() const {
+    return size_t((char*)CapacityX - (char*)BeginX);
+  }
+
+  LLVM_NODISCARD bool empty() const { return BeginX == EndX; }
+};
+
+/// This is the part of SmallVectorTemplateBase which does not depend on whether
+/// the type T is a POD. The extra dummy template argument is used by ArrayRef
+/// to avoid unnecessarily requiring T to be complete.
+template <typename T, typename = void>
+class SmallVectorTemplateCommon : public SmallVectorBase {
+private:
+  template <typename, unsigned> friend struct SmallVectorStorage;
+
+  // Allocate raw space for N elements of type T.  If T has a ctor or dtor, we
+  // don't want it to be automatically run, so we need to represent the space as
+  // something else.  Use an array of char of sufficient alignment.
+  using U = AlignedCharArrayUnion<T>;
+  U FirstEl;
+  // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
+
+protected:
+  SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
+
+  void grow_pod(size_t MinSizeInBytes, size_t TSize) {
+    SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
+  }
+
+  /// Return true if this is a smallvector which has not had dynamic
+  /// memory allocated for it.
+  bool isSmall() const {
+    return BeginX == static_cast<const void*>(&FirstEl);
+  }
+
+  /// Put this vector in a state of being small.
+  void resetToSmall() {
+    BeginX = EndX = CapacityX = &FirstEl;
+  }
+
+  void setEnd(T *P) { this->EndX = P; }
+
+public:
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+  using value_type = T;
+  using iterator = T *;
+  using const_iterator = const T *;
+
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+
+  using reference = T &;
+  using const_reference = const T &;
+  using pointer = T *;
+  using const_pointer = const T *;
+
+  // forward iterator creation methods.
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  iterator begin() { return (iterator)this->BeginX; }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  const_iterator begin() const { return (const_iterator)this->BeginX; }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  iterator end() { return (iterator)this->EndX; }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  const_iterator end() const { return (const_iterator)this->EndX; }
+
+protected:
+  iterator capacity_ptr() { return (iterator)this->CapacityX; }
+  const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
+
+public:
+  // reverse iterator creation methods.
+  reverse_iterator rbegin()            { return reverse_iterator(end()); }
+  const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+  reverse_iterator rend()              { return reverse_iterator(begin()); }
+  const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  size_type size() const { return end()-begin(); }
+  size_type max_size() const { return size_type(-1) / sizeof(T); }
+
+  /// Return the total number of elements in the currently allocated buffer.
+  size_t capacity() const { return capacity_ptr() - begin(); }
+
+  /// Return a pointer to the vector's buffer, even if empty().
+  pointer data() { return pointer(begin()); }
+  /// Return a pointer to the vector's buffer, even if empty().
+  const_pointer data() const { return const_pointer(begin()); }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  reference operator[](size_type idx) {
+    assert(idx < size());
+    return begin()[idx];
+  }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  const_reference operator[](size_type idx) const {
+    assert(idx < size());
+    return begin()[idx];
+  }
+
+  reference front() {
+    assert(!empty());
+    return begin()[0];
+  }
+  const_reference front() const {
+    assert(!empty());
+    return begin()[0];
+  }
+
+  reference back() {
+    assert(!empty());
+    return end()[-1];
+  }
+  const_reference back() const {
+    assert(!empty());
+    return end()[-1];
+  }
+};
+
+/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
+/// implementations that are designed to work with non-POD-like T's.
+template <typename T, bool isPodLike>
+class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
+protected:
+  SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+  static void destroy_range(T *S, T *E) {
+    while (S != E) {
+      --E;
+      E->~T();
+    }
+  }
+
+  /// Move the range [I, E) into the uninitialized memory starting with "Dest",
+  /// constructing elements as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+    std::uninitialized_copy(std::make_move_iterator(I),
+                            std::make_move_iterator(E), Dest);
+  }
+
+  /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
+  /// constructing elements as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+    std::uninitialized_copy(I, E, Dest);
+  }
+
+  /// Grow the allocated memory (without initializing new elements), doubling
+  /// the size of the allocated memory. Guarantees space for at least one more
+  /// element, or MinSize more elements if specified.
+  void grow(size_t MinSize = 0);
+
+public:
+  void push_back(const T &Elt) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    ::new ((void*) this->end()) T(Elt);
+    this->setEnd(this->end()+1);
+  }
+
+  void push_back(T &&Elt) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    ::new ((void*) this->end()) T(::std::move(Elt));
+    this->setEnd(this->end()+1);
+  }
+
+  void pop_back() {
+    this->setEnd(this->end()-1);
+    this->end()->~T();
+  }
+};
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T, bool isPodLike>
+void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
+  size_t CurCapacity = this->capacity();
+  size_t CurSize = this->size();
+  // Always grow, even from zero.
+  size_t NewCapacity = size_t(NextPowerOf2(CurCapacity+2));
+  if (NewCapacity < MinSize)
+    NewCapacity = MinSize;
+  T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
+  if (NewElts == nullptr)
+    report_bad_alloc_error("Allocation of SmallVector element failed.");
+
+  // Move the elements over.
+  this->uninitialized_move(this->begin(), this->end(), NewElts);
+
+  // Destroy the original elements.
+  destroy_range(this->begin(), this->end());
+
+  // If this wasn't grown from the inline copy, deallocate the old space.
+  if (!this->isSmall())
+    free(this->begin());
+
+  this->setEnd(NewElts+CurSize);
+  this->BeginX = NewElts;
+  this->CapacityX = this->begin()+NewCapacity;
+}
+
+
+/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
+/// implementations that are designed to work with POD-like T's.
+template <typename T>
+class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
+protected:
+  SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+  // No need to do a destroy loop for POD's.
+  static void destroy_range(T *, T *) {}
+
+  /// Move the range [I, E) onto the uninitialized memory
+  /// starting with "Dest", constructing elements into it as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+    // Just do a copy.
+    uninitialized_copy(I, E, Dest);
+  }
+
+  /// Copy the range [I, E) onto the uninitialized memory
+  /// starting with "Dest", constructing elements into it as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+    // Arbitrary iterator types; just use the basic implementation.
+    std::uninitialized_copy(I, E, Dest);
+  }
+
+  /// Copy the range [I, E) onto the uninitialized memory
+  /// starting with "Dest", constructing elements into it as needed.
+  template <typename T1, typename T2>
+  static void uninitialized_copy(
+      T1 *I, T1 *E, T2 *Dest,
+      typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
+                                           T2>::value>::type * = nullptr) {
+    // Use memcpy for PODs iterated by pointers (which includes SmallVector
+    // iterators): std::uninitialized_copy optimizes to memmove, but we can
+    // use memcpy here. Note that I and E are iterators and thus might be
+    // invalid for memcpy if they are equal.
+    if (I != E)
+      memcpy(Dest, I, (E - I) * sizeof(T));
+  }
+
+  /// Double the size of the allocated memory, guaranteeing space for at
+  /// least one more element or MinSize if specified.
+  void grow(size_t MinSize = 0) {
+    this->grow_pod(MinSize*sizeof(T), sizeof(T));
+  }
+
+public:
+  void push_back(const T &Elt) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    memcpy(this->end(), &Elt, sizeof(T));
+    this->setEnd(this->end()+1);
+  }
+
+  void pop_back() {
+    this->setEnd(this->end()-1);
+  }
+};
+
+/// This class consists of common code factored out of the SmallVector class to
+/// reduce code duplication based on the SmallVector 'N' template parameter.
+template <typename T>
+class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
+  using SuperClass = SmallVectorTemplateBase<T, isPodLike<T>::value>;
+
+public:
+  using iterator = typename SuperClass::iterator;
+  using const_iterator = typename SuperClass::const_iterator;
+  using size_type = typename SuperClass::size_type;
+
+protected:
+  // Default ctor - Initialize to empty.
+  explicit SmallVectorImpl(unsigned N)
+    : SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
+  }
+
+public:
+  SmallVectorImpl(const SmallVectorImpl &) = delete;
+
+  ~SmallVectorImpl() {
+    // Subclass has already destructed this vector's elements.
+    // If this wasn't grown from the inline copy, deallocate the old space.
+    if (!this->isSmall())
+      free(this->begin());
+  }
+
+  void clear() {
+    this->destroy_range(this->begin(), this->end());
+    this->EndX = this->BeginX;
+  }
+
+  void resize(size_type N) {
+    if (N < this->size()) {
+      this->destroy_range(this->begin()+N, this->end());
+      this->setEnd(this->begin()+N);
+    } else if (N > this->size()) {
+      if (this->capacity() < N)
+        this->grow(N);
+      for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
+        new (&*I) T();
+      this->setEnd(this->begin()+N);
+    }
+  }
+
+  void resize(size_type N, const T &NV) {
+    if (N < this->size()) {
+      this->destroy_range(this->begin()+N, this->end());
+      this->setEnd(this->begin()+N);
+    } else if (N > this->size()) {
+      if (this->capacity() < N)
+        this->grow(N);
+      std::uninitialized_fill(this->end(), this->begin()+N, NV);
+      this->setEnd(this->begin()+N);
+    }
+  }
+
+  void reserve(size_type N) {
+    if (this->capacity() < N)
+      this->grow(N);
+  }
+
+  LLVM_NODISCARD T pop_back_val() {
+    T Result = ::std::move(this->back());
+    this->pop_back();
+    return Result;
+  }
+
+  void swap(SmallVectorImpl &RHS);
+
+  /// Add the specified range to the end of the SmallVector.
+  template <typename in_iter,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<in_iter>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  void append(in_iter in_start, in_iter in_end) {
+    size_type NumInputs = std::distance(in_start, in_end);
+    // Grow allocated space if needed.
+    if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+      this->grow(this->size()+NumInputs);
+
+    // Copy the new elements over.
+    this->uninitialized_copy(in_start, in_end, this->end());
+    this->setEnd(this->end() + NumInputs);
+  }
+
+  /// Add the specified range to the end of the SmallVector.
+  void append(size_type NumInputs, const T &Elt) {
+    // Grow allocated space if needed.
+    if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+      this->grow(this->size()+NumInputs);
+
+    // Copy the new elements over.
+    std::uninitialized_fill_n(this->end(), NumInputs, Elt);
+    this->setEnd(this->end() + NumInputs);
+  }
+
+  void append(std::initializer_list<T> IL) {
+    append(IL.begin(), IL.end());
+  }
+
+  // FIXME: Consider assigning over existing elements, rather than clearing &
+  // re-initializing them - for all assign(...) variants.
+
+  void assign(size_type NumElts, const T &Elt) {
+    clear();
+    if (this->capacity() < NumElts)
+      this->grow(NumElts);
+    this->setEnd(this->begin()+NumElts);
+    std::uninitialized_fill(this->begin(), this->end(), Elt);
+  }
+
+  template <typename in_iter,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<in_iter>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  void assign(in_iter in_start, in_iter in_end) {
+    clear();
+    append(in_start, in_end);
+  }
+
+  void assign(std::initializer_list<T> IL) {
+    clear();
+    append(IL);
+  }
+
+  iterator erase(const_iterator CI) {
+    // Just cast away constness because this is a non-const member function.
+    iterator I = const_cast<iterator>(CI);
+
+    assert(I >= this->begin() && "Iterator to erase is out of bounds.");
+    assert(I < this->end() && "Erasing at past-the-end iterator.");
+
+    iterator N = I;
+    // Shift all elts down one.
+    std::move(I+1, this->end(), I);
+    // Drop the last elt.
+    this->pop_back();
+    return(N);
+  }
+
+  iterator erase(const_iterator CS, const_iterator CE) {
+    // Just cast away constness because this is a non-const member function.
+    iterator S = const_cast<iterator>(CS);
+    iterator E = const_cast<iterator>(CE);
+
+    assert(S >= this->begin() && "Range to erase is out of bounds.");
+    assert(S <= E && "Trying to erase invalid range.");
+    assert(E <= this->end() && "Trying to erase past the end.");
+
+    iterator N = S;
+    // Shift all elts down.
+    iterator I = std::move(E, this->end(), S);
+    // Drop the last elts.
+    this->destroy_range(I, this->end());
+    this->setEnd(I);
+    return(N);
+  }
+
+  iterator insert(iterator I, T &&Elt) {
+    if (I == this->end()) {  // Important special case for empty vector.
+      this->push_back(::std::move(Elt));
+      return this->end()-1;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    if (this->EndX >= this->CapacityX) {
+      size_t EltNo = I-this->begin();
+      this->grow();
+      I = this->begin()+EltNo;
+    }
+
+    ::new ((void*) this->end()) T(::std::move(this->back()));
+    // Push everything else over.
+    std::move_backward(I, this->end()-1, this->end());
+    this->setEnd(this->end()+1);
+
+    // If we just moved the element we're inserting, be sure to update
+    // the reference.
+    T *EltPtr = &Elt;
+    if (I <= EltPtr && EltPtr < this->EndX)
+      ++EltPtr;
+
+    *I = ::std::move(*EltPtr);
+    return I;
+  }
+
+  iterator insert(iterator I, const T &Elt) {
+    if (I == this->end()) {  // Important special case for empty vector.
+      this->push_back(Elt);
+      return this->end()-1;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    if (this->EndX >= this->CapacityX) {
+      size_t EltNo = I-this->begin();
+      this->grow();
+      I = this->begin()+EltNo;
+    }
+    ::new ((void*) this->end()) T(std::move(this->back()));
+    // Push everything else over.
+    std::move_backward(I, this->end()-1, this->end());
+    this->setEnd(this->end()+1);
+
+    // If we just moved the element we're inserting, be sure to update
+    // the reference.
+    const T *EltPtr = &Elt;
+    if (I <= EltPtr && EltPtr < this->EndX)
+      ++EltPtr;
+
+    *I = *EltPtr;
+    return I;
+  }
+
+  iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
+    // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+    size_t InsertElt = I - this->begin();
+
+    if (I == this->end()) {  // Important special case for empty vector.
+      append(NumToInsert, Elt);
+      return this->begin()+InsertElt;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    // Ensure there is enough space.
+    reserve(this->size() + NumToInsert);
+
+    // Uninvalidate the iterator.
+    I = this->begin()+InsertElt;
+
+    // If there are more elements between the insertion point and the end of the
+    // range than there are being inserted, we can use a simple approach to
+    // insertion.  Since we already reserved space, we know that this won't
+    // reallocate the vector.
+    if (size_t(this->end()-I) >= NumToInsert) {
+      T *OldEnd = this->end();
+      append(std::move_iterator<iterator>(this->end() - NumToInsert),
+             std::move_iterator<iterator>(this->end()));
+
+      // Copy the existing elements that get replaced.
+      std::move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+      std::fill_n(I, NumToInsert, Elt);
+      return I;
+    }
+
+    // Otherwise, we're inserting more elements than exist already, and we're
+    // not inserting at the end.
+
+    // Move over the elements that we're about to overwrite.
+    T *OldEnd = this->end();
+    this->setEnd(this->end() + NumToInsert);
+    size_t NumOverwritten = OldEnd-I;
+    this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+    // Replace the overwritten part.
+    std::fill_n(I, NumOverwritten, Elt);
+
+    // Insert the non-overwritten middle part.
+    std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
+    return I;
+  }
+
+  template <typename ItTy,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<ItTy>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  iterator insert(iterator I, ItTy From, ItTy To) {
+    // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+    size_t InsertElt = I - this->begin();
+
+    if (I == this->end()) {  // Important special case for empty vector.
+      append(From, To);
+      return this->begin()+InsertElt;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    size_t NumToInsert = std::distance(From, To);
+
+    // Ensure there is enough space.
+    reserve(this->size() + NumToInsert);
+
+    // Uninvalidate the iterator.
+    I = this->begin()+InsertElt;
+
+    // If there are more elements between the insertion point and the end of the
+    // range than there are being inserted, we can use a simple approach to
+    // insertion.  Since we already reserved space, we know that this won't
+    // reallocate the vector.
+    if (size_t(this->end()-I) >= NumToInsert) {
+      T *OldEnd = this->end();
+      append(std::move_iterator<iterator>(this->end() - NumToInsert),
+             std::move_iterator<iterator>(this->end()));
+
+      // Copy the existing elements that get replaced.
+      std::move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+      std::copy(From, To, I);
+      return I;
+    }
+
+    // Otherwise, we're inserting more elements than exist already, and we're
+    // not inserting at the end.
+
+    // Move over the elements that we're about to overwrite.
+    T *OldEnd = this->end();
+    this->setEnd(this->end() + NumToInsert);
+    size_t NumOverwritten = OldEnd-I;
+    this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+    // Replace the overwritten part.
+    for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
+      *J = *From;
+      ++J; ++From;
+    }
+
+    // Insert the non-overwritten middle part.
+    this->uninitialized_copy(From, To, OldEnd);
+    return I;
+  }
+
+  void insert(iterator I, std::initializer_list<T> IL) {
+    insert(I, IL.begin(), IL.end());
+  }
+
+  template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
+    this->setEnd(this->end() + 1);
+  }
+
+  SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
+
+  SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
+
+  bool operator==(const SmallVectorImpl &RHS) const {
+    if (this->size() != RHS.size()) return false;
+    return std::equal(this->begin(), this->end(), RHS.begin());
+  }
+  bool operator!=(const SmallVectorImpl &RHS) const {
+    return !(*this == RHS);
+  }
+
+  bool operator<(const SmallVectorImpl &RHS) const {
+    return std::lexicographical_compare(this->begin(), this->end(),
+                                        RHS.begin(), RHS.end());
+  }
+
+  /// Set the array size to \p N, which the current array must have enough
+  /// capacity for.
+  ///
+  /// This does not construct or destroy any elements in the vector.
+  ///
+  /// Clients can use this in conjunction with capacity() to write past the end
+  /// of the buffer when they know that more elements are available, and only
+  /// update the size later. This avoids the cost of value initializing elements
+  /// which will only be overwritten.
+  void set_size(size_type N) {
+    assert(N <= this->capacity());
+    this->setEnd(this->begin() + N);
+  }
+};
+
+template <typename T>
+void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
+  if (this == &RHS) return;
+
+  // We can only avoid copying elements if neither vector is small.
+  if (!this->isSmall() && !RHS.isSmall()) {
+    std::swap(this->BeginX, RHS.BeginX);
+    std::swap(this->EndX, RHS.EndX);
+    std::swap(this->CapacityX, RHS.CapacityX);
+    return;
+  }
+  if (RHS.size() > this->capacity())
+    this->grow(RHS.size());
+  if (this->size() > RHS.capacity())
+    RHS.grow(this->size());
+
+  // Swap the shared elements.
+  size_t NumShared = this->size();
+  if (NumShared > RHS.size()) NumShared = RHS.size();
+  for (size_type i = 0; i != NumShared; ++i)
+    std::swap((*this)[i], RHS[i]);
+
+  // Copy over the extra elts.
+  if (this->size() > RHS.size()) {
+    size_t EltDiff = this->size() - RHS.size();
+    this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
+    RHS.setEnd(RHS.end()+EltDiff);
+    this->destroy_range(this->begin()+NumShared, this->end());
+    this->setEnd(this->begin()+NumShared);
+  } else if (RHS.size() > this->size()) {
+    size_t EltDiff = RHS.size() - this->size();
+    this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
+    this->setEnd(this->end() + EltDiff);
+    this->destroy_range(RHS.begin()+NumShared, RHS.end());
+    RHS.setEnd(RHS.begin()+NumShared);
+  }
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::
+  operator=(const SmallVectorImpl<T> &RHS) {
+  // Avoid self-assignment.
+  if (this == &RHS) return *this;
+
+  // If we already have sufficient space, assign the common elements, then
+  // destroy any excess.
+  size_t RHSSize = RHS.size();
+  size_t CurSize = this->size();
+  if (CurSize >= RHSSize) {
+    // Assign common elements.
+    iterator NewEnd;
+    if (RHSSize)
+      NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
+    else
+      NewEnd = this->begin();
+
+    // Destroy excess elements.
+    this->destroy_range(NewEnd, this->end());
+
+    // Trim.
+    this->setEnd(NewEnd);
+    return *this;
+  }
+
+  // If we have to grow to have enough elements, destroy the current elements.
+  // This allows us to avoid copying them during the grow.
+  // FIXME: don't do this if they're efficiently moveable.
+  if (this->capacity() < RHSSize) {
+    // Destroy current elements.
+    this->destroy_range(this->begin(), this->end());
+    this->setEnd(this->begin());
+    CurSize = 0;
+    this->grow(RHSSize);
+  } else if (CurSize) {
+    // Otherwise, use assignment for the already-constructed elements.
+    std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
+  }
+
+  // Copy construct the new elements in place.
+  this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
+                           this->begin()+CurSize);
+
+  // Set end.
+  this->setEnd(this->begin()+RHSSize);
+  return *this;
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
+  // Avoid self-assignment.
+  if (this == &RHS) return *this;
+
+  // If the RHS isn't small, clear this vector and then steal its buffer.
+  if (!RHS.isSmall()) {
+    this->destroy_range(this->begin(), this->end());
+    if (!this->isSmall()) free(this->begin());
+    this->BeginX = RHS.BeginX;
+    this->EndX = RHS.EndX;
+    this->CapacityX = RHS.CapacityX;
+    RHS.resetToSmall();
+    return *this;
+  }
+
+  // If we already have sufficient space, assign the common elements, then
+  // destroy any excess.
+  size_t RHSSize = RHS.size();
+  size_t CurSize = this->size();
+  if (CurSize >= RHSSize) {
+    // Assign common elements.
+    iterator NewEnd = this->begin();
+    if (RHSSize)
+      NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
+
+    // Destroy excess elements and trim the bounds.
+    this->destroy_range(NewEnd, this->end());
+    this->setEnd(NewEnd);
+
+    // Clear the RHS.
+    RHS.clear();
+
+    return *this;
+  }
+
+  // If we have to grow to have enough elements, destroy the current elements.
+  // This allows us to avoid copying them during the grow.
+  // FIXME: this may not actually make any sense if we can efficiently move
+  // elements.
+  if (this->capacity() < RHSSize) {
+    // Destroy current elements.
+    this->destroy_range(this->begin(), this->end());
+    this->setEnd(this->begin());
+    CurSize = 0;
+    this->grow(RHSSize);
+  } else if (CurSize) {
+    // Otherwise, use assignment for the already-constructed elements.
+    std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
+  }
+
+  // Move-construct the new elements in place.
+  this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
+                           this->begin()+CurSize);
+
+  // Set end.
+  this->setEnd(this->begin()+RHSSize);
+
+  RHS.clear();
+  return *this;
+}
+
+/// Storage for the SmallVector elements which aren't contained in
+/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1'
+/// element is in the base class. This is specialized for the N=1 and N=0 cases
+/// to avoid allocating unnecessary storage.
+template <typename T, unsigned N>
+struct SmallVectorStorage {
+  typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
+};
+template <typename T> struct SmallVectorStorage<T, 1> {};
+template <typename T> struct SmallVectorStorage<T, 0> {};
+
+/// This is a 'vector' (really, a variable-sized array), optimized
+/// for the case when the array is small.  It contains some number of elements
+/// in-place, which allows it to avoid heap allocation when the actual number of
+/// elements is below that threshold.  This allows normal "small" cases to be
+/// fast without losing generality for large inputs.
+///
+/// Note that this does not attempt to be exception safe.
+///
+template <typename T, unsigned N>
+class SmallVector : public SmallVectorImpl<T> {
+  /// Inline space for elements which aren't stored in the base class.
+  SmallVectorStorage<T, N> Storage;
+
+public:
+  SmallVector() : SmallVectorImpl<T>(N) {}
+
+  ~SmallVector() {
+    // Destroy the constructed elements in the vector.
+    this->destroy_range(this->begin(), this->end());
+  }
+
+  explicit SmallVector(size_t Size, const T &Value = T())
+    : SmallVectorImpl<T>(N) {
+    this->assign(Size, Value);
+  }
+
+  template <typename ItTy,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<ItTy>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
+    this->append(S, E);
+  }
+
+  template <typename RangeTy>
+  explicit SmallVector(const iterator_range<RangeTy> &R)
+      : SmallVectorImpl<T>(N) {
+    this->append(R.begin(), R.end());
+  }
+
+  SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
+    this->assign(IL);
+  }
+
+  SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
+    if (!RHS.empty())
+      SmallVectorImpl<T>::operator=(RHS);
+  }
+
+  const SmallVector &operator=(const SmallVector &RHS) {
+    SmallVectorImpl<T>::operator=(RHS);
+    return *this;
+  }
+
+  SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
+    if (!RHS.empty())
+      SmallVectorImpl<T>::operator=(::std::move(RHS));
+  }
+
+  SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
+    if (!RHS.empty())
+      SmallVectorImpl<T>::operator=(::std::move(RHS));
+  }
+
+  const SmallVector &operator=(SmallVector &&RHS) {
+    SmallVectorImpl<T>::operator=(::std::move(RHS));
+    return *this;
+  }
+
+  const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
+    SmallVectorImpl<T>::operator=(::std::move(RHS));
+    return *this;
+  }
+
+  const SmallVector &operator=(std::initializer_list<T> IL) {
+    this->assign(IL);
+    return *this;
+  }
+};
+
+template <typename T, unsigned N>
+inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
+  return X.capacity_in_bytes();
+}
+
+} // end namespace llvm
+
+namespace std {
+
+  /// Implement std::swap in terms of SmallVector swap.
+  template<typename T>
+  inline void
+  swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
+    LHS.swap(RHS);
+  }
+
+  /// Implement std::swap in terms of SmallVector swap.
+  template<typename T, unsigned N>
+  inline void
+  swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
+    LHS.swap(RHS);
+  }
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SparseBitVector.h b/linux-x64/clang/include/llvm/ADT/SparseBitVector.h
new file mode 100644
index 0000000..4cbf40c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SparseBitVector.h
@@ -0,0 +1,888 @@
+//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseBitVector class.  See the doxygen comment for
+// SparseBitVector for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEBITVECTOR_H
+#define LLVM_ADT_SPARSEBITVECTOR_H
+
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <iterator>
+#include <list>
+
+namespace llvm {
+
+/// SparseBitVector is an implementation of a bitvector that is sparse by only
+/// storing the elements that have non-zero bits set.  In order to make this
+/// fast for the most common cases, SparseBitVector is implemented as a linked
+/// list of SparseBitVectorElements.  We maintain a pointer to the last
+/// SparseBitVectorElement accessed (in the form of a list iterator), in order
+/// to make multiple in-order test/set constant time after the first one is
+/// executed.  Note that using vectors to store SparseBitVectorElement's does
+/// not work out very well because it causes insertion in the middle to take
+/// enormous amounts of time with a large amount of bits.  Other structures that
+/// have better worst cases for insertion in the middle (various balanced trees,
+/// etc) do not perform as well in practice as a linked list with this iterator
+/// kept up to date.  They are also significantly more memory intensive.
+
+template <unsigned ElementSize = 128> struct SparseBitVectorElement {
+public:
+  using BitWord = unsigned long;
+  using size_type = unsigned;
+  enum {
+    BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT,
+    BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE,
+    BITS_PER_ELEMENT = ElementSize
+  };
+
+private:
+  // Index of Element in terms of where first bit starts.
+  unsigned ElementIndex;
+  BitWord Bits[BITWORDS_PER_ELEMENT];
+
+  SparseBitVectorElement() {
+    ElementIndex = ~0U;
+    memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+  }
+
+public:
+  explicit SparseBitVectorElement(unsigned Idx) {
+    ElementIndex = Idx;
+    memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+  }
+
+  // Comparison.
+  bool operator==(const SparseBitVectorElement &RHS) const {
+    if (ElementIndex != RHS.ElementIndex)
+      return false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i] != RHS.Bits[i])
+        return false;
+    return true;
+  }
+
+  bool operator!=(const SparseBitVectorElement &RHS) const {
+    return !(*this == RHS);
+  }
+
+  // Return the bits that make up word Idx in our element.
+  BitWord word(unsigned Idx) const {
+    assert(Idx < BITWORDS_PER_ELEMENT);
+    return Bits[Idx];
+  }
+
+  unsigned index() const {
+    return ElementIndex;
+  }
+
+  bool empty() const {
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i])
+        return false;
+    return true;
+  }
+
+  void set(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
+  }
+
+  bool test_and_set(unsigned Idx) {
+    bool old = test(Idx);
+    if (!old) {
+      set(Idx);
+      return true;
+    }
+    return false;
+  }
+
+  void reset(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
+  }
+
+  bool test(unsigned Idx) const {
+    return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE));
+  }
+
+  size_type count() const {
+    unsigned NumBits = 0;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      NumBits += countPopulation(Bits[i]);
+    return NumBits;
+  }
+
+  /// find_first - Returns the index of the first set bit.
+  int find_first() const {
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i] != 0)
+        return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+    llvm_unreachable("Illegal empty element");
+  }
+
+  /// find_last - Returns the index of the last set bit.
+  int find_last() const {
+    for (unsigned I = 0; I < BITWORDS_PER_ELEMENT; ++I) {
+      unsigned Idx = BITWORDS_PER_ELEMENT - I - 1;
+      if (Bits[Idx] != 0)
+        return Idx * BITWORD_SIZE + BITWORD_SIZE -
+               countLeadingZeros(Bits[Idx]) - 1;
+    }
+    llvm_unreachable("Illegal empty element");
+  }
+
+  /// find_next - Returns the index of the next set bit starting from the
+  /// "Curr" bit. Returns -1 if the next set bit is not found.
+  int find_next(unsigned Curr) const {
+    if (Curr >= BITS_PER_ELEMENT)
+      return -1;
+
+    unsigned WordPos = Curr / BITWORD_SIZE;
+    unsigned BitPos = Curr % BITWORD_SIZE;
+    BitWord Copy = Bits[WordPos];
+    assert(WordPos <= BITWORDS_PER_ELEMENT
+           && "Word Position outside of element");
+
+    // Mask off previous bits.
+    Copy &= ~0UL << BitPos;
+
+    if (Copy != 0)
+      return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
+
+    // Check subsequent words.
+    for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i] != 0)
+        return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+    return -1;
+  }
+
+  // Union this element with RHS and return true if this one changed.
+  bool unionWith(const SparseBitVectorElement &RHS) {
+    bool changed = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      BitWord old = changed ? 0 : Bits[i];
+
+      Bits[i] |= RHS.Bits[i];
+      if (!changed && old != Bits[i])
+        changed = true;
+    }
+    return changed;
+  }
+
+  // Return true if we have any bits in common with RHS
+  bool intersects(const SparseBitVectorElement &RHS) const {
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      if (RHS.Bits[i] & Bits[i])
+        return true;
+    }
+    return false;
+  }
+
+  // Intersect this Element with RHS and return true if this one changed.
+  // BecameZero is set to true if this element became all-zero bits.
+  bool intersectWith(const SparseBitVectorElement &RHS,
+                     bool &BecameZero) {
+    bool changed = false;
+    bool allzero = true;
+
+    BecameZero = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      BitWord old = changed ? 0 : Bits[i];
+
+      Bits[i] &= RHS.Bits[i];
+      if (Bits[i] != 0)
+        allzero = false;
+
+      if (!changed && old != Bits[i])
+        changed = true;
+    }
+    BecameZero = allzero;
+    return changed;
+  }
+
+  // Intersect this Element with the complement of RHS and return true if this
+  // one changed.  BecameZero is set to true if this element became all-zero
+  // bits.
+  bool intersectWithComplement(const SparseBitVectorElement &RHS,
+                               bool &BecameZero) {
+    bool changed = false;
+    bool allzero = true;
+
+    BecameZero = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      BitWord old = changed ? 0 : Bits[i];
+
+      Bits[i] &= ~RHS.Bits[i];
+      if (Bits[i] != 0)
+        allzero = false;
+
+      if (!changed && old != Bits[i])
+        changed = true;
+    }
+    BecameZero = allzero;
+    return changed;
+  }
+
+  // Three argument version of intersectWithComplement that intersects
+  // RHS1 & ~RHS2 into this element
+  void intersectWithComplement(const SparseBitVectorElement &RHS1,
+                               const SparseBitVectorElement &RHS2,
+                               bool &BecameZero) {
+    bool allzero = true;
+
+    BecameZero = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i];
+      if (Bits[i] != 0)
+        allzero = false;
+    }
+    BecameZero = allzero;
+  }
+};
+
+template <unsigned ElementSize = 128>
+class SparseBitVector {
+  using ElementList = std::list<SparseBitVectorElement<ElementSize>>;
+  using ElementListIter = typename ElementList::iterator;
+  using ElementListConstIter = typename ElementList::const_iterator;
+  enum {
+    BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
+  };
+
+  // Pointer to our current Element.
+  ElementListIter CurrElementIter;
+  ElementList Elements;
+
+  // This is like std::lower_bound, except we do linear searching from the
+  // current position.
+  ElementListIter FindLowerBound(unsigned ElementIndex) {
+
+    if (Elements.empty()) {
+      CurrElementIter = Elements.begin();
+      return Elements.begin();
+    }
+
+    // Make sure our current iterator is valid.
+    if (CurrElementIter == Elements.end())
+      --CurrElementIter;
+
+    // Search from our current iterator, either backwards or forwards,
+    // depending on what element we are looking for.
+    ElementListIter ElementIter = CurrElementIter;
+    if (CurrElementIter->index() == ElementIndex) {
+      return ElementIter;
+    } else if (CurrElementIter->index() > ElementIndex) {
+      while (ElementIter != Elements.begin()
+             && ElementIter->index() > ElementIndex)
+        --ElementIter;
+    } else {
+      while (ElementIter != Elements.end() &&
+             ElementIter->index() < ElementIndex)
+        ++ElementIter;
+    }
+    CurrElementIter = ElementIter;
+    return ElementIter;
+  }
+
+  // Iterator to walk set bits in the bitmap.  This iterator is a lot uglier
+  // than it would be, in order to be efficient.
+  class SparseBitVectorIterator {
+  private:
+    bool AtEnd;
+
+    const SparseBitVector<ElementSize> *BitVector = nullptr;
+
+    // Current element inside of bitmap.
+    ElementListConstIter Iter;
+
+    // Current bit number inside of our bitmap.
+    unsigned BitNumber;
+
+    // Current word number inside of our element.
+    unsigned WordNumber;
+
+    // Current bits from the element.
+    typename SparseBitVectorElement<ElementSize>::BitWord Bits;
+
+    // Move our iterator to the first non-zero bit in the bitmap.
+    void AdvanceToFirstNonZero() {
+      if (AtEnd)
+        return;
+      if (BitVector->Elements.empty()) {
+        AtEnd = true;
+        return;
+      }
+      Iter = BitVector->Elements.begin();
+      BitNumber = Iter->index() * ElementSize;
+      unsigned BitPos = Iter->find_first();
+      BitNumber += BitPos;
+      WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+      Bits = Iter->word(WordNumber);
+      Bits >>= BitPos % BITWORD_SIZE;
+    }
+
+    // Move our iterator to the next non-zero bit.
+    void AdvanceToNextNonZero() {
+      if (AtEnd)
+        return;
+
+      while (Bits && !(Bits & 1)) {
+        Bits >>= 1;
+        BitNumber += 1;
+      }
+
+      // See if we ran out of Bits in this word.
+      if (!Bits) {
+        int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ;
+        // If we ran out of set bits in this element, move to next element.
+        if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) {
+          ++Iter;
+          WordNumber = 0;
+
+          // We may run out of elements in the bitmap.
+          if (Iter == BitVector->Elements.end()) {
+            AtEnd = true;
+            return;
+          }
+          // Set up for next non-zero word in bitmap.
+          BitNumber = Iter->index() * ElementSize;
+          NextSetBitNumber = Iter->find_first();
+          BitNumber += NextSetBitNumber;
+          WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+          Bits = Iter->word(WordNumber);
+          Bits >>= NextSetBitNumber % BITWORD_SIZE;
+        } else {
+          WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE;
+          Bits = Iter->word(WordNumber);
+          Bits >>= NextSetBitNumber % BITWORD_SIZE;
+          BitNumber = Iter->index() * ElementSize;
+          BitNumber += NextSetBitNumber;
+        }
+      }
+    }
+
+  public:
+    SparseBitVectorIterator() = default;
+
+    SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
+                            bool end = false):BitVector(RHS) {
+      Iter = BitVector->Elements.begin();
+      BitNumber = 0;
+      Bits = 0;
+      WordNumber = ~0;
+      AtEnd = end;
+      AdvanceToFirstNonZero();
+    }
+
+    // Preincrement.
+    inline SparseBitVectorIterator& operator++() {
+      ++BitNumber;
+      Bits >>= 1;
+      AdvanceToNextNonZero();
+      return *this;
+    }
+
+    // Postincrement.
+    inline SparseBitVectorIterator operator++(int) {
+      SparseBitVectorIterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    // Return the current set bit number.
+    unsigned operator*() const {
+      return BitNumber;
+    }
+
+    bool operator==(const SparseBitVectorIterator &RHS) const {
+      // If they are both at the end, ignore the rest of the fields.
+      if (AtEnd && RHS.AtEnd)
+        return true;
+      // Otherwise they are the same if they have the same bit number and
+      // bitmap.
+      return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber;
+    }
+
+    bool operator!=(const SparseBitVectorIterator &RHS) const {
+      return !(*this == RHS);
+    }
+  };
+
+public:
+  using iterator = SparseBitVectorIterator;
+
+  SparseBitVector() {
+    CurrElementIter = Elements.begin();
+  }
+
+  // SparseBitVector copy ctor.
+  SparseBitVector(const SparseBitVector &RHS) {
+    ElementListConstIter ElementIter = RHS.Elements.begin();
+    while (ElementIter != RHS.Elements.end()) {
+      Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
+      ++ElementIter;
+    }
+
+    CurrElementIter = Elements.begin ();
+  }
+
+  ~SparseBitVector() = default;
+
+  // Clear.
+  void clear() {
+    Elements.clear();
+  }
+
+  // Assignment
+  SparseBitVector& operator=(const SparseBitVector& RHS) {
+    if (this == &RHS)
+      return *this;
+
+    Elements.clear();
+
+    ElementListConstIter ElementIter = RHS.Elements.begin();
+    while (ElementIter != RHS.Elements.end()) {
+      Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
+      ++ElementIter;
+    }
+
+    CurrElementIter = Elements.begin ();
+
+    return *this;
+  }
+
+  // Test, Reset, and Set a bit in the bitmap.
+  bool test(unsigned Idx) {
+    if (Elements.empty())
+      return false;
+
+    unsigned ElementIndex = Idx / ElementSize;
+    ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+    // If we can't find an element that is supposed to contain this bit, there
+    // is nothing more to do.
+    if (ElementIter == Elements.end() ||
+        ElementIter->index() != ElementIndex)
+      return false;
+    return ElementIter->test(Idx % ElementSize);
+  }
+
+  void reset(unsigned Idx) {
+    if (Elements.empty())
+      return;
+
+    unsigned ElementIndex = Idx / ElementSize;
+    ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+    // If we can't find an element that is supposed to contain this bit, there
+    // is nothing more to do.
+    if (ElementIter == Elements.end() ||
+        ElementIter->index() != ElementIndex)
+      return;
+    ElementIter->reset(Idx % ElementSize);
+
+    // When the element is zeroed out, delete it.
+    if (ElementIter->empty()) {
+      ++CurrElementIter;
+      Elements.erase(ElementIter);
+    }
+  }
+
+  void set(unsigned Idx) {
+    unsigned ElementIndex = Idx / ElementSize;
+    ElementListIter ElementIter;
+    if (Elements.empty()) {
+      ElementIter = Elements.emplace(Elements.end(), ElementIndex);
+    } else {
+      ElementIter = FindLowerBound(ElementIndex);
+
+      if (ElementIter == Elements.end() ||
+          ElementIter->index() != ElementIndex) {
+        // We may have hit the beginning of our SparseBitVector, in which case,
+        // we may need to insert right after this element, which requires moving
+        // the current iterator forward one, because insert does insert before.
+        if (ElementIter != Elements.end() &&
+            ElementIter->index() < ElementIndex)
+          ++ElementIter;
+        ElementIter = Elements.emplace(ElementIter, ElementIndex);
+      }
+    }
+    CurrElementIter = ElementIter;
+
+    ElementIter->set(Idx % ElementSize);
+  }
+
+  bool test_and_set(unsigned Idx) {
+    bool old = test(Idx);
+    if (!old) {
+      set(Idx);
+      return true;
+    }
+    return false;
+  }
+
+  bool operator!=(const SparseBitVector &RHS) const {
+    return !(*this == RHS);
+  }
+
+  bool operator==(const SparseBitVector &RHS) const {
+    ElementListConstIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end();
+         ++Iter1, ++Iter2) {
+      if (*Iter1 != *Iter2)
+        return false;
+    }
+    return Iter1 == Elements.end() && Iter2 == RHS.Elements.end();
+  }
+
+  // Union our bitmap with the RHS and return true if we changed.
+  bool operator|=(const SparseBitVector &RHS) {
+    if (this == &RHS)
+      return false;
+
+    bool changed = false;
+    ElementListIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // If RHS is empty, we are done
+    if (RHS.Elements.empty())
+      return false;
+
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
+        Elements.insert(Iter1, *Iter2);
+        ++Iter2;
+        changed = true;
+      } else if (Iter1->index() == Iter2->index()) {
+        changed |= Iter1->unionWith(*Iter2);
+        ++Iter1;
+        ++Iter2;
+      } else {
+        ++Iter1;
+      }
+    }
+    CurrElementIter = Elements.begin();
+    return changed;
+  }
+
+  // Intersect our bitmap with the RHS and return true if ours changed.
+  bool operator&=(const SparseBitVector &RHS) {
+    if (this == &RHS)
+      return false;
+
+    bool changed = false;
+    ElementListIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // Check if both bitmaps are empty.
+    if (Elements.empty() && RHS.Elements.empty())
+      return false;
+
+    // Loop through, intersecting as we go, erasing elements when necessary.
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end()) {
+        CurrElementIter = Elements.begin();
+        return changed;
+      }
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        bool BecameZero;
+        changed |= Iter1->intersectWith(*Iter2, BecameZero);
+        if (BecameZero) {
+          ElementListIter IterTmp = Iter1;
+          ++Iter1;
+          Elements.erase(IterTmp);
+        } else {
+          ++Iter1;
+        }
+        ++Iter2;
+      } else {
+        ElementListIter IterTmp = Iter1;
+        ++Iter1;
+        Elements.erase(IterTmp);
+        changed = true;
+      }
+    }
+    if (Iter1 != Elements.end()) {
+      Elements.erase(Iter1, Elements.end());
+      changed = true;
+    }
+    CurrElementIter = Elements.begin();
+    return changed;
+  }
+
+  // Intersect our bitmap with the complement of the RHS and return true
+  // if ours changed.
+  bool intersectWithComplement(const SparseBitVector &RHS) {
+    if (this == &RHS) {
+      if (!empty()) {
+        clear();
+        return true;
+      }
+      return false;
+    }
+
+    bool changed = false;
+    ElementListIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // If either our bitmap or RHS is empty, we are done
+    if (Elements.empty() || RHS.Elements.empty())
+      return false;
+
+    // Loop through, intersecting as we go, erasing elements when necessary.
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end()) {
+        CurrElementIter = Elements.begin();
+        return changed;
+      }
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        bool BecameZero;
+        changed |= Iter1->intersectWithComplement(*Iter2, BecameZero);
+        if (BecameZero) {
+          ElementListIter IterTmp = Iter1;
+          ++Iter1;
+          Elements.erase(IterTmp);
+        } else {
+          ++Iter1;
+        }
+        ++Iter2;
+      } else {
+        ++Iter1;
+      }
+    }
+    CurrElementIter = Elements.begin();
+    return changed;
+  }
+
+  bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const {
+    return intersectWithComplement(*RHS);
+  }
+
+  //  Three argument version of intersectWithComplement.
+  //  Result of RHS1 & ~RHS2 is stored into this bitmap.
+  void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1,
+                               const SparseBitVector<ElementSize> &RHS2)
+  {
+    if (this == &RHS1) {
+      intersectWithComplement(RHS2);
+      return;
+    } else if (this == &RHS2) {
+      SparseBitVector RHS2Copy(RHS2);
+      intersectWithComplement(RHS1, RHS2Copy);
+      return;
+    }
+
+    Elements.clear();
+    CurrElementIter = Elements.begin();
+    ElementListConstIter Iter1 = RHS1.Elements.begin();
+    ElementListConstIter Iter2 = RHS2.Elements.begin();
+
+    // If RHS1 is empty, we are done
+    // If RHS2 is empty, we still have to copy RHS1
+    if (RHS1.Elements.empty())
+      return;
+
+    // Loop through, intersecting as we go, erasing elements when necessary.
+    while (Iter2 != RHS2.Elements.end()) {
+      if (Iter1 == RHS1.Elements.end())
+        return;
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        bool BecameZero = false;
+        Elements.emplace_back(Iter1->index());
+        Elements.back().intersectWithComplement(*Iter1, *Iter2, BecameZero);
+        if (BecameZero)
+          Elements.pop_back();
+        ++Iter1;
+        ++Iter2;
+      } else {
+        Elements.push_back(*Iter1++);
+      }
+    }
+
+    // copy the remaining elements
+    std::copy(Iter1, RHS1.Elements.end(), std::back_inserter(Elements));
+  }
+
+  void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
+                               const SparseBitVector<ElementSize> *RHS2) {
+    intersectWithComplement(*RHS1, *RHS2);
+  }
+
+  bool intersects(const SparseBitVector<ElementSize> *RHS) const {
+    return intersects(*RHS);
+  }
+
+  // Return true if we share any bits in common with RHS
+  bool intersects(const SparseBitVector<ElementSize> &RHS) const {
+    ElementListConstIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // Check if both bitmaps are empty.
+    if (Elements.empty() && RHS.Elements.empty())
+      return false;
+
+    // Loop through, intersecting stopping when we hit bits in common.
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end())
+        return false;
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        if (Iter1->intersects(*Iter2))
+          return true;
+        ++Iter1;
+        ++Iter2;
+      } else {
+        ++Iter1;
+      }
+    }
+    return false;
+  }
+
+  // Return true iff all bits set in this SparseBitVector are
+  // also set in RHS.
+  bool contains(const SparseBitVector<ElementSize> &RHS) const {
+    SparseBitVector<ElementSize> Result(*this);
+    Result &= RHS;
+    return (Result == RHS);
+  }
+
+  // Return the first set bit in the bitmap.  Return -1 if no bits are set.
+  int find_first() const {
+    if (Elements.empty())
+      return -1;
+    const SparseBitVectorElement<ElementSize> &First = *(Elements.begin());
+    return (First.index() * ElementSize) + First.find_first();
+  }
+
+  // Return the last set bit in the bitmap.  Return -1 if no bits are set.
+  int find_last() const {
+    if (Elements.empty())
+      return -1;
+    const SparseBitVectorElement<ElementSize> &Last = *(Elements.rbegin());
+    return (Last.index() * ElementSize) + Last.find_last();
+  }
+
+  // Return true if the SparseBitVector is empty
+  bool empty() const {
+    return Elements.empty();
+  }
+
+  unsigned count() const {
+    unsigned BitCount = 0;
+    for (ElementListConstIter Iter = Elements.begin();
+         Iter != Elements.end();
+         ++Iter)
+      BitCount += Iter->count();
+
+    return BitCount;
+  }
+
+  iterator begin() const {
+    return iterator(this);
+  }
+
+  iterator end() const {
+    return iterator(this, true);
+  }
+};
+
+// Convenience functions to allow Or and And without dereferencing in the user
+// code.
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> &LHS,
+                        const SparseBitVector<ElementSize> *RHS) {
+  return LHS |= *RHS;
+}
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> *LHS,
+                        const SparseBitVector<ElementSize> &RHS) {
+  return LHS->operator|=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> *LHS,
+                        const SparseBitVector<ElementSize> &RHS) {
+  return LHS->operator&=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> &LHS,
+                        const SparseBitVector<ElementSize> *RHS) {
+  return LHS &= *RHS;
+}
+
+// Convenience functions for infix union, intersection, difference operators.
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator|(const SparseBitVector<ElementSize> &LHS,
+          const SparseBitVector<ElementSize> &RHS) {
+  SparseBitVector<ElementSize> Result(LHS);
+  Result |= RHS;
+  return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator&(const SparseBitVector<ElementSize> &LHS,
+          const SparseBitVector<ElementSize> &RHS) {
+  SparseBitVector<ElementSize> Result(LHS);
+  Result &= RHS;
+  return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator-(const SparseBitVector<ElementSize> &LHS,
+          const SparseBitVector<ElementSize> &RHS) {
+  SparseBitVector<ElementSize> Result;
+  Result.intersectWithComplement(LHS, RHS);
+  return Result;
+}
+
+// Dump a SparseBitVector to a stream
+template <unsigned ElementSize>
+void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
+  out << "[";
+
+  typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
+    be = LHS.end();
+  if (bi != be) {
+    out << *bi;
+    for (++bi; bi != be; ++bi) {
+      out << " " << *bi;
+    }
+  }
+  out << "]\n";
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEBITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h b/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
new file mode 100644
index 0000000..3c86376
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
@@ -0,0 +1,523 @@
+//===- llvm/ADT/SparseMultiSet.h - Sparse multiset --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseMultiSet class, which adds multiset behavior to
+// the SparseSet.
+//
+// A sparse multiset holds a small number of objects identified by integer keys
+// from a moderately sized universe. The sparse multiset uses more memory than
+// other containers in order to provide faster operations. Any key can map to
+// multiple values. A SparseMultiSetNode class is provided, which serves as a
+// convenient base class for the contents of a SparseMultiSet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEMULTISET_H
+#define LLVM_ADT_SPARSEMULTISET_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// Fast multiset implementation for objects that can be identified by small
+/// unsigned keys.
+///
+/// SparseMultiSet allocates memory proportional to the size of the key
+/// universe, so it is not recommended for building composite data structures.
+/// It is useful for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
+/// fast clear() as fast as a vector.  The find(), insert(), and erase()
+/// operations are all constant time, and typically faster than a hash table.
+/// The iteration order doesn't depend on numerical key values, it only depends
+/// on the order of insert() and erase() operations.  Iteration order is the
+/// insertion order. Iteration is only provided over elements of equivalent
+/// keys, but iterators are bidirectional.
+///
+/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast iteration
+/// independent on the size of the universe.
+///
+/// SparseMultiSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector.  Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT template
+/// parameter provides a space/speed tradeoff for sets holding many elements.
+///
+/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
+/// sparse array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
+/// lines, but the sparse array is 4x smaller.  N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// Multiset behavior is provided by providing doubly linked lists for values
+/// that are inlined in the dense vector. SparseMultiSet is a good choice when
+/// one desires a growable number of entries per key, as it will retain the
+/// SparseSet algorithmic properties despite being growable. Thus, it is often a
+/// better choice than a SparseSet of growable containers or a vector of
+/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
+/// the iterators don't point to the element erased), allowing for more
+/// intuitive and fast removal.
+///
+/// @tparam ValueT      The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT     An unsigned integer type. See above.
+///
+template<typename ValueT,
+         typename KeyFunctorT = identity<unsigned>,
+         typename SparseT = uint8_t>
+class SparseMultiSet {
+  static_assert(std::numeric_limits<SparseT>::is_integer &&
+                !std::numeric_limits<SparseT>::is_signed,
+                "SparseT must be an unsigned integer type");
+
+  /// The actual data that's stored, as a doubly-linked list implemented via
+  /// indices into the DenseVector.  The doubly linked list is implemented
+  /// circular in Prev indices, and INVALID-terminated in Next indices. This
+  /// provides efficient access to list tails. These nodes can also be
+  /// tombstones, in which case they are actually nodes in a single-linked
+  /// freelist of recyclable slots.
+  struct SMSNode {
+    static const unsigned INVALID = ~0U;
+
+    ValueT Data;
+    unsigned Prev;
+    unsigned Next;
+
+    SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) {}
+
+    /// List tails have invalid Nexts.
+    bool isTail() const {
+      return Next == INVALID;
+    }
+
+    /// Whether this node is a tombstone node, and thus is in our freelist.
+    bool isTombstone() const {
+      return Prev == INVALID;
+    }
+
+    /// Since the list is circular in Prev, all non-tombstone nodes have a valid
+    /// Prev.
+    bool isValid() const { return Prev != INVALID; }
+  };
+
+  using KeyT = typename KeyFunctorT::argument_type;
+  using DenseT = SmallVector<SMSNode, 8>;
+  DenseT Dense;
+  SparseT *Sparse = nullptr;
+  unsigned Universe = 0;
+  KeyFunctorT KeyIndexOf;
+  SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+  /// We have a built-in recycler for reusing tombstone slots. This recycler
+  /// puts a singly-linked free list into tombstone slots, allowing us quick
+  /// erasure, iterator preservation, and dense size.
+  unsigned FreelistIdx = SMSNode::INVALID;
+  unsigned NumFree = 0;
+
+  unsigned sparseIndex(const ValueT &Val) const {
+    assert(ValIndexOf(Val) < Universe &&
+           "Invalid key in set. Did object mutate?");
+    return ValIndexOf(Val);
+  }
+  unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
+
+  /// Whether the given entry is the head of the list. List heads's previous
+  /// pointers are to the tail of the list, allowing for efficient access to the
+  /// list tail. D must be a valid entry node.
+  bool isHead(const SMSNode &D) const {
+    assert(D.isValid() && "Invalid node for head");
+    return Dense[D.Prev].isTail();
+  }
+
+  /// Whether the given entry is a singleton entry, i.e. the only entry with
+  /// that key.
+  bool isSingleton(const SMSNode &N) const {
+    assert(N.isValid() && "Invalid node for singleton");
+    // Is N its own predecessor?
+    return &Dense[N.Prev] == &N;
+  }
+
+  /// Add in the given SMSNode. Uses a free entry in our freelist if
+  /// available. Returns the index of the added node.
+  unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
+    if (NumFree == 0) {
+      Dense.push_back(SMSNode(V, Prev, Next));
+      return Dense.size() - 1;
+    }
+
+    // Peel off a free slot
+    unsigned Idx = FreelistIdx;
+    unsigned NextFree = Dense[Idx].Next;
+    assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
+
+    Dense[Idx] = SMSNode(V, Prev, Next);
+    FreelistIdx = NextFree;
+    --NumFree;
+    return Idx;
+  }
+
+  /// Make the current index a new tombstone. Pushes it onto the freelist.
+  void makeTombstone(unsigned Idx) {
+    Dense[Idx].Prev = SMSNode::INVALID;
+    Dense[Idx].Next = FreelistIdx;
+    FreelistIdx = Idx;
+    ++NumFree;
+  }
+
+public:
+  using value_type = ValueT;
+  using reference = ValueT &;
+  using const_reference = const ValueT &;
+  using pointer = ValueT *;
+  using const_pointer = const ValueT *;
+  using size_type = unsigned;
+
+  SparseMultiSet() = default;
+  SparseMultiSet(const SparseMultiSet &) = delete;
+  SparseMultiSet &operator=(const SparseMultiSet &) = delete;
+  ~SparseMultiSet() { free(Sparse); }
+
+  /// Set the universe size which determines the largest key the set can hold.
+  /// The universe must be sized before any elements can be added.
+  ///
+  /// @param U Universe size. All object keys must be less than U.
+  ///
+  void setUniverse(unsigned U) {
+    // It's not hard to resize the universe on a non-empty set, but it doesn't
+    // seem like a likely use case, so we can add that code when we need it.
+    assert(empty() && "Can only resize universe on an empty map");
+    // Hysteresis prevents needless reallocations.
+    if (U >= Universe/4 && U <= Universe)
+      return;
+    free(Sparse);
+    // The Sparse array doesn't actually need to be initialized, so malloc
+    // would be enough here, but that will cause tools like valgrind to
+    // complain about branching on uninitialized data.
+    Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
+    Universe = U;
+  }
+
+  /// Our iterators are iterators over the collection of objects that share a
+  /// key.
+  template<typename SMSPtrTy>
+  class iterator_base : public std::iterator<std::bidirectional_iterator_tag,
+                                             ValueT> {
+    friend class SparseMultiSet;
+
+    SMSPtrTy SMS;
+    unsigned Idx;
+    unsigned SparseIdx;
+
+    iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
+      : SMS(P), Idx(I), SparseIdx(SI) {}
+
+    /// Whether our iterator has fallen outside our dense vector.
+    bool isEnd() const {
+      if (Idx == SMSNode::INVALID)
+        return true;
+
+      assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
+      return false;
+    }
+
+    /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
+    bool isKeyed() const { return SparseIdx < SMS->Universe; }
+
+    unsigned Prev() const { return SMS->Dense[Idx].Prev; }
+    unsigned Next() const { return SMS->Dense[Idx].Next; }
+
+    void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
+    void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
+
+  public:
+    using super = std::iterator<std::bidirectional_iterator_tag, ValueT>;
+    using value_type = typename super::value_type;
+    using difference_type = typename super::difference_type;
+    using pointer = typename super::pointer;
+    using reference = typename super::reference;
+
+    reference operator*() const {
+      assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
+             "Dereferencing iterator of invalid key or index");
+
+      return SMS->Dense[Idx].Data;
+    }
+    pointer operator->() const { return &operator*(); }
+
+    /// Comparison operators
+    bool operator==(const iterator_base &RHS) const {
+      // end compares equal
+      if (SMS == RHS.SMS && Idx == RHS.Idx) {
+        assert((isEnd() || SparseIdx == RHS.SparseIdx) &&
+               "Same dense entry, but different keys?");
+        return true;
+      }
+
+      return false;
+    }
+
+    bool operator!=(const iterator_base &RHS) const {
+      return !operator==(RHS);
+    }
+
+    /// Increment and decrement operators
+    iterator_base &operator--() { // predecrement - Back up
+      assert(isKeyed() && "Decrementing an invalid iterator");
+      assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
+             "Decrementing head of list");
+
+      // If we're at the end, then issue a new find()
+      if (isEnd())
+        Idx = SMS->findIndex(SparseIdx).Prev();
+      else
+        Idx = Prev();
+
+      return *this;
+    }
+    iterator_base &operator++() { // preincrement - Advance
+      assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
+      Idx = Next();
+      return *this;
+    }
+    iterator_base operator--(int) { // postdecrement
+      iterator_base I(*this);
+      --*this;
+      return I;
+    }
+    iterator_base operator++(int) { // postincrement
+      iterator_base I(*this);
+      ++*this;
+      return I;
+    }
+  };
+
+  using iterator = iterator_base<SparseMultiSet *>;
+  using const_iterator = iterator_base<const SparseMultiSet *>;
+
+  // Convenience types
+  using RangePair = std::pair<iterator, iterator>;
+
+  /// Returns an iterator past this container. Note that such an iterator cannot
+  /// be decremented, but will compare equal to other end iterators.
+  iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
+  const_iterator end() const {
+    return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
+  }
+
+  /// Returns true if the set is empty.
+  ///
+  /// This is not the same as BitVector::empty().
+  ///
+  bool empty() const { return size() == 0; }
+
+  /// Returns the number of elements in the set.
+  ///
+  /// This is not the same as BitVector::size() which returns the size of the
+  /// universe.
+  ///
+  size_type size() const {
+    assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
+    return Dense.size() - NumFree;
+  }
+
+  /// Clears the set.  This is a very fast constant time operation.
+  ///
+  void clear() {
+    // Sparse does not need to be cleared, see find().
+    Dense.clear();
+    NumFree = 0;
+    FreelistIdx = SMSNode::INVALID;
+  }
+
+  /// Find an element by its index.
+  ///
+  /// @param   Idx A valid index to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator findIndex(unsigned Idx) {
+    assert(Idx < Universe && "Key out of range");
+    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+    for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
+      const unsigned FoundIdx = sparseIndex(Dense[i]);
+      // Check that we're pointing at the correct entry and that it is the head
+      // of a valid list.
+      if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
+        return iterator(this, i, Idx);
+      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
+      if (!Stride)
+        break;
+    }
+    return end();
+  }
+
+  /// Find an element by its key.
+  ///
+  /// @param   Key A valid key to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator find(const KeyT &Key) {
+    return findIndex(KeyIndexOf(Key));
+  }
+
+  const_iterator find(const KeyT &Key) const {
+    iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
+    return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
+  }
+
+  /// Returns the number of elements identified by Key. This will be linear in
+  /// the number of elements of that key.
+  size_type count(const KeyT &Key) const {
+    unsigned Ret = 0;
+    for (const_iterator It = find(Key); It != end(); ++It)
+      ++Ret;
+
+    return Ret;
+  }
+
+  /// Returns true if this set contains an element identified by Key.
+  bool contains(const KeyT &Key) const {
+    return find(Key) != end();
+  }
+
+  /// Return the head and tail of the subset's list, otherwise returns end().
+  iterator getHead(const KeyT &Key) { return find(Key); }
+  iterator getTail(const KeyT &Key) {
+    iterator I = find(Key);
+    if (I != end())
+      I = iterator(this, I.Prev(), KeyIndexOf(Key));
+    return I;
+  }
+
+  /// The bounds of the range of items sharing Key K. First member is the head
+  /// of the list, and the second member is a decrementable end iterator for
+  /// that key.
+  RangePair equal_range(const KeyT &K) {
+    iterator B = find(K);
+    iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
+    return make_pair(B, E);
+  }
+
+  /// Insert a new element at the tail of the subset list. Returns an iterator
+  /// to the newly added entry.
+  iterator insert(const ValueT &Val) {
+    unsigned Idx = sparseIndex(Val);
+    iterator I = findIndex(Idx);
+
+    unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
+
+    if (I == end()) {
+      // Make a singleton list
+      Sparse[Idx] = NodeIdx;
+      Dense[NodeIdx].Prev = NodeIdx;
+      return iterator(this, NodeIdx, Idx);
+    }
+
+    // Stick it at the end.
+    unsigned HeadIdx = I.Idx;
+    unsigned TailIdx = I.Prev();
+    Dense[TailIdx].Next = NodeIdx;
+    Dense[HeadIdx].Prev = NodeIdx;
+    Dense[NodeIdx].Prev = TailIdx;
+
+    return iterator(this, NodeIdx, Idx);
+  }
+
+  /// Erases an existing element identified by a valid iterator.
+  ///
+  /// This invalidates iterators pointing at the same entry, but erase() returns
+  /// an iterator pointing to the next element in the subset's list. This makes
+  /// it possible to erase selected elements while iterating over the subset:
+  ///
+  ///   tie(I, E) = Set.equal_range(Key);
+  ///   while (I != E)
+  ///     if (test(*I))
+  ///       I = Set.erase(I);
+  ///     else
+  ///       ++I;
+  ///
+  /// Note that if the last element in the subset list is erased, this will
+  /// return an end iterator which can be decremented to get the new tail (if it
+  /// exists):
+  ///
+  ///  tie(B, I) = Set.equal_range(Key);
+  ///  for (bool isBegin = B == I; !isBegin; /* empty */) {
+  ///    isBegin = (--I) == B;
+  ///    if (test(I))
+  ///      break;
+  ///    I = erase(I);
+  ///  }
+  iterator erase(iterator I) {
+    assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
+           "erasing invalid/end/tombstone iterator");
+
+    // First, unlink the node from its list. Then swap the node out with the
+    // dense vector's last entry
+    iterator NextI = unlink(Dense[I.Idx]);
+
+    // Put in a tombstone.
+    makeTombstone(I.Idx);
+
+    return NextI;
+  }
+
+  /// Erase all elements with the given key. This invalidates all
+  /// iterators of that key.
+  void eraseAll(const KeyT &K) {
+    for (iterator I = find(K); I != end(); /* empty */)
+      I = erase(I);
+  }
+
+private:
+  /// Unlink the node from its list. Returns the next node in the list.
+  iterator unlink(const SMSNode &N) {
+    if (isSingleton(N)) {
+      // Singleton is already unlinked
+      assert(N.Next == SMSNode::INVALID && "Singleton has next?");
+      return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
+    }
+
+    if (isHead(N)) {
+      // If we're the head, then update the sparse array and our next.
+      Sparse[sparseIndex(N)] = N.Next;
+      Dense[N.Next].Prev = N.Prev;
+      return iterator(this, N.Next, ValIndexOf(N.Data));
+    }
+
+    if (N.isTail()) {
+      // If we're the tail, then update our head and our previous.
+      findIndex(sparseIndex(N)).setPrev(N.Prev);
+      Dense[N.Prev].Next = N.Next;
+
+      // Give back an end iterator that can be decremented
+      iterator I(this, N.Prev, ValIndexOf(N.Data));
+      return ++I;
+    }
+
+    // Otherwise, just drop us
+    Dense[N.Next].Prev = N.Prev;
+    Dense[N.Prev].Next = N.Next;
+    return iterator(this, N.Next, ValIndexOf(N.Data));
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEMULTISET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SparseSet.h b/linux-x64/clang/include/llvm/ADT/SparseSet.h
new file mode 100644
index 0000000..74cc6da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SparseSet.h
@@ -0,0 +1,316 @@
+//===- llvm/ADT/SparseSet.h - Sparse set ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseSet class derived from the version described in
+// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
+// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec.  1993.
+//
+// A sparse set holds a small number of objects identified by integer keys from
+// a moderately sized universe. The sparse set uses more memory than other
+// containers in order to provide faster operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSESET_H
+#define LLVM_ADT_SPARSESET_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
+/// be uniquely converted to a small integer less than the set's universe. This
+/// class allows the set to hold values that differ from the set's key type as
+/// long as an index can still be derived from the value. SparseSet never
+/// directly compares ValueT, only their indices, so it can map keys to
+/// arbitrary values. SparseSetValTraits computes the index from the value
+/// object. To compute the index from a key, SparseSet uses a separate
+/// KeyFunctorT template argument.
+///
+/// A simple type declaration, SparseSet<Type>, handles these cases:
+/// - unsigned key, identity index, identity value
+/// - unsigned key, identity index, fat value providing getSparseSetIndex()
+///
+/// The type declaration SparseSet<Type, UnaryFunction> handles:
+/// - unsigned key, remapped index, identity value (virtual registers)
+/// - pointer key, pointer-derived index, identity value (node+ID)
+/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
+///
+/// Only other, unexpected cases require specializing SparseSetValTraits.
+///
+/// For best results, ValueT should not require a destructor.
+///
+template<typename ValueT>
+struct SparseSetValTraits {
+  static unsigned getValIndex(const ValueT &Val) {
+    return Val.getSparseSetIndex();
+  }
+};
+
+/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
+/// generic implementation handles ValueT classes which either provide
+/// getSparseSetIndex() or specialize SparseSetValTraits<>.
+///
+template<typename KeyT, typename ValueT, typename KeyFunctorT>
+struct SparseSetValFunctor {
+  unsigned operator()(const ValueT &Val) const {
+    return SparseSetValTraits<ValueT>::getValIndex(Val);
+  }
+};
+
+/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
+/// identity key/value sets.
+template<typename KeyT, typename KeyFunctorT>
+struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
+  unsigned operator()(const KeyT &Key) const {
+    return KeyFunctorT()(Key);
+  }
+};
+
+/// SparseSet - Fast set implmentation for objects that can be identified by
+/// small unsigned keys.
+///
+/// SparseSet allocates memory proportional to the size of the key universe, so
+/// it is not recommended for building composite data structures.  It is useful
+/// for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
+/// clear() and iteration as fast as a vector.  The find(), insert(), and
+/// erase() operations are all constant time, and typically faster than a hash
+/// table.  The iteration order doesn't depend on numerical key values, it only
+/// depends on the order of insert() and erase() operations.  When no elements
+/// have been erased, the iteration order is the insertion order.
+///
+/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast
+/// iteration independent on the size of the universe.
+///
+/// SparseSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector.  Most of the memory is used by
+/// the sparse array which is the size of the key universe.  The SparseT
+/// template parameter provides a space/speed tradeoff for sets holding many
+/// elements.
+///
+/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
+/// array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
+/// lines, but the sparse array is 4x smaller.  N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// @tparam ValueT      The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT     An unsigned integer type. See above.
+///
+template<typename ValueT,
+         typename KeyFunctorT = identity<unsigned>,
+         typename SparseT = uint8_t>
+class SparseSet {
+  static_assert(std::numeric_limits<SparseT>::is_integer &&
+                !std::numeric_limits<SparseT>::is_signed,
+                "SparseT must be an unsigned integer type");
+
+  using KeyT = typename KeyFunctorT::argument_type;
+  using DenseT = SmallVector<ValueT, 8>;
+  using size_type = unsigned;
+  DenseT Dense;
+  SparseT *Sparse = nullptr;
+  unsigned Universe = 0;
+  KeyFunctorT KeyIndexOf;
+  SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+public:
+  using value_type = ValueT;
+  using reference = ValueT &;
+  using const_reference = const ValueT &;
+  using pointer = ValueT *;
+  using const_pointer = const ValueT *;
+
+  SparseSet() = default;
+  SparseSet(const SparseSet &) = delete;
+  SparseSet &operator=(const SparseSet &) = delete;
+  ~SparseSet() { free(Sparse); }
+
+  /// setUniverse - Set the universe size which determines the largest key the
+  /// set can hold.  The universe must be sized before any elements can be
+  /// added.
+  ///
+  /// @param U Universe size. All object keys must be less than U.
+  ///
+  void setUniverse(unsigned U) {
+    // It's not hard to resize the universe on a non-empty set, but it doesn't
+    // seem like a likely use case, so we can add that code when we need it.
+    assert(empty() && "Can only resize universe on an empty map");
+    // Hysteresis prevents needless reallocations.
+    if (U >= Universe/4 && U <= Universe)
+      return;
+    free(Sparse);
+    // The Sparse array doesn't actually need to be initialized, so malloc
+    // would be enough here, but that will cause tools like valgrind to
+    // complain about branching on uninitialized data.
+    Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
+    Universe = U;
+  }
+
+  // Import trivial vector stuff from DenseT.
+  using iterator = typename DenseT::iterator;
+  using const_iterator = typename DenseT::const_iterator;
+
+  const_iterator begin() const { return Dense.begin(); }
+  const_iterator end() const { return Dense.end(); }
+  iterator begin() { return Dense.begin(); }
+  iterator end() { return Dense.end(); }
+
+  /// empty - Returns true if the set is empty.
+  ///
+  /// This is not the same as BitVector::empty().
+  ///
+  bool empty() const { return Dense.empty(); }
+
+  /// size - Returns the number of elements in the set.
+  ///
+  /// This is not the same as BitVector::size() which returns the size of the
+  /// universe.
+  ///
+  size_type size() const { return Dense.size(); }
+
+  /// clear - Clears the set.  This is a very fast constant time operation.
+  ///
+  void clear() {
+    // Sparse does not need to be cleared, see find().
+    Dense.clear();
+  }
+
+  /// findIndex - Find an element by its index.
+  ///
+  /// @param   Idx A valid index to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator findIndex(unsigned Idx) {
+    assert(Idx < Universe && "Key out of range");
+    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+    for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
+      const unsigned FoundIdx = ValIndexOf(Dense[i]);
+      assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
+      if (Idx == FoundIdx)
+        return begin() + i;
+      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
+      if (!Stride)
+        break;
+    }
+    return end();
+  }
+
+  /// find - Find an element by its key.
+  ///
+  /// @param   Key A valid key to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator find(const KeyT &Key) {
+    return findIndex(KeyIndexOf(Key));
+  }
+
+  const_iterator find(const KeyT &Key) const {
+    return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
+  }
+
+  /// count - Returns 1 if this set contains an element identified by Key,
+  /// 0 otherwise.
+  ///
+  size_type count(const KeyT &Key) const {
+    return find(Key) == end() ? 0 : 1;
+  }
+
+  /// insert - Attempts to insert a new element.
+  ///
+  /// If Val is successfully inserted, return (I, true), where I is an iterator
+  /// pointing to the newly inserted element.
+  ///
+  /// If the set already contains an element with the same key as Val, return
+  /// (I, false), where I is an iterator pointing to the existing element.
+  ///
+  /// Insertion invalidates all iterators.
+  ///
+  std::pair<iterator, bool> insert(const ValueT &Val) {
+    unsigned Idx = ValIndexOf(Val);
+    iterator I = findIndex(Idx);
+    if (I != end())
+      return std::make_pair(I, false);
+    Sparse[Idx] = size();
+    Dense.push_back(Val);
+    return std::make_pair(end() - 1, true);
+  }
+
+  /// array subscript - If an element already exists with this key, return it.
+  /// Otherwise, automatically construct a new value from Key, insert it,
+  /// and return the newly inserted element.
+  ValueT &operator[](const KeyT &Key) {
+    return *insert(ValueT(Key)).first;
+  }
+
+  ValueT pop_back_val() {
+    // Sparse does not need to be cleared, see find().
+    return Dense.pop_back_val();
+  }
+
+  /// erase - Erases an existing element identified by a valid iterator.
+  ///
+  /// This invalidates all iterators, but erase() returns an iterator pointing
+  /// to the next element.  This makes it possible to erase selected elements
+  /// while iterating over the set:
+  ///
+  ///   for (SparseSet::iterator I = Set.begin(); I != Set.end();)
+  ///     if (test(*I))
+  ///       I = Set.erase(I);
+  ///     else
+  ///       ++I;
+  ///
+  /// Note that end() changes when elements are erased, unlike std::list.
+  ///
+  iterator erase(iterator I) {
+    assert(unsigned(I - begin()) < size() && "Invalid iterator");
+    if (I != end() - 1) {
+      *I = Dense.back();
+      unsigned BackIdx = ValIndexOf(Dense.back());
+      assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
+      Sparse[BackIdx] = I - begin();
+    }
+    // This depends on SmallVector::pop_back() not invalidating iterators.
+    // std::vector::pop_back() doesn't give that guarantee.
+    Dense.pop_back();
+    return I;
+  }
+
+  /// erase - Erases an element identified by Key, if it exists.
+  ///
+  /// @param   Key The key identifying the element to erase.
+  /// @returns True when an element was erased, false if no element was found.
+  ///
+  bool erase(const KeyT &Key) {
+    iterator I = find(Key);
+    if (I == end())
+      return false;
+    erase(I);
+    return true;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSESET_H
diff --git a/linux-x64/clang/include/llvm/ADT/Statistic.h b/linux-x64/clang/include/llvm/ADT/Statistic.h
new file mode 100644
index 0000000..3a08997
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Statistic.h
@@ -0,0 +1,219 @@
+//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the 'Statistic' class, which is designed to be an easy way
+// to expose various metrics from passes.  These statistics are printed at the
+// end of a run (from llvm_shutdown), when the -stats command line option is
+// passed on the command line.
+//
+// This is useful for reporting information like the number of instructions
+// simplified, optimized or removed by various transformations, like this:
+//
+// static Statistic NumInstsKilled("gcse", "Number of instructions killed");
+//
+// Later, in the code: ++NumInstsKilled;
+//
+// NOTE: Statistics *must* be declared as global variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STATISTIC_H
+#define LLVM_ADT_STATISTIC_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
+#include <atomic>
+#include <memory>
+#include <vector>
+
+// Determine whether statistics should be enabled. We must do it here rather
+// than in CMake because multi-config generators cannot determine this at
+// configure time.
+#if !defined(NDEBUG) || LLVM_FORCE_ENABLE_STATS
+#define LLVM_ENABLE_STATS 1
+#endif
+
+namespace llvm {
+
+class raw_ostream;
+class raw_fd_ostream;
+class StringRef;
+
+class Statistic {
+public:
+  const char *DebugType;
+  const char *Name;
+  const char *Desc;
+  std::atomic<unsigned> Value;
+  std::atomic<bool> Initialized;
+
+  unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
+  const char *getDebugType() const { return DebugType; }
+  const char *getName() const { return Name; }
+  const char *getDesc() const { return Desc; }
+
+  /// construct - This should only be called for non-global statistics.
+  void construct(const char *debugtype, const char *name, const char *desc) {
+    DebugType = debugtype;
+    Name = name;
+    Desc = desc;
+    Value = 0;
+    Initialized = false;
+  }
+
+  // Allow use of this class as the value itself.
+  operator unsigned() const { return getValue(); }
+
+#if LLVM_ENABLE_STATS
+   const Statistic &operator=(unsigned Val) {
+    Value.store(Val, std::memory_order_relaxed);
+    return init();
+  }
+
+  const Statistic &operator++() {
+    Value.fetch_add(1, std::memory_order_relaxed);
+    return init();
+  }
+
+  unsigned operator++(int) {
+    init();
+    return Value.fetch_add(1, std::memory_order_relaxed);
+  }
+
+  const Statistic &operator--() {
+    Value.fetch_sub(1, std::memory_order_relaxed);
+    return init();
+  }
+
+  unsigned operator--(int) {
+    init();
+    return Value.fetch_sub(1, std::memory_order_relaxed);
+  }
+
+  const Statistic &operator+=(unsigned V) {
+    if (V == 0)
+      return *this;
+    Value.fetch_add(V, std::memory_order_relaxed);
+    return init();
+  }
+
+  const Statistic &operator-=(unsigned V) {
+    if (V == 0)
+      return *this;
+    Value.fetch_sub(V, std::memory_order_relaxed);
+    return init();
+  }
+
+  void updateMax(unsigned V) {
+    unsigned PrevMax = Value.load(std::memory_order_relaxed);
+    // Keep trying to update max until we succeed or another thread produces
+    // a bigger max than us.
+    while (V > PrevMax && !Value.compare_exchange_weak(
+                              PrevMax, V, std::memory_order_relaxed)) {
+    }
+    init();
+  }
+
+#else  // Statistics are disabled in release builds.
+
+  const Statistic &operator=(unsigned Val) {
+    return *this;
+  }
+
+  const Statistic &operator++() {
+    return *this;
+  }
+
+  unsigned operator++(int) {
+    return 0;
+  }
+
+  const Statistic &operator--() {
+    return *this;
+  }
+
+  unsigned operator--(int) {
+    return 0;
+  }
+
+  const Statistic &operator+=(const unsigned &V) {
+    return *this;
+  }
+
+  const Statistic &operator-=(const unsigned &V) {
+    return *this;
+  }
+
+  void updateMax(unsigned V) {}
+
+#endif  // LLVM_ENABLE_STATS
+
+protected:
+  Statistic &init() {
+    if (!Initialized.load(std::memory_order_acquire))
+      RegisterStatistic();
+    return *this;
+  }
+
+  void RegisterStatistic();
+};
+
+// STATISTIC - A macro to make definition of statistics really simple.  This
+// automatically passes the DEBUG_TYPE of the file into the statistic.
+#define STATISTIC(VARNAME, DESC)                                               \
+  static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, {false}}
+
+/// \brief Enable the collection and printing of statistics.
+void EnableStatistics(bool PrintOnExit = true);
+
+/// \brief Check if statistics are enabled.
+bool AreStatisticsEnabled();
+
+/// \brief Return a file stream to print our output on.
+std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
+
+/// \brief Print statistics to the file returned by CreateInfoOutputFile().
+void PrintStatistics();
+
+/// \brief Print statistics to the given output stream.
+void PrintStatistics(raw_ostream &OS);
+
+/// Print statistics in JSON format. This does include all global timers (\see
+/// Timer, TimerGroup). Note that the timers are cleared after printing and will
+/// not be printed in human readable form or in a second call of
+/// PrintStatisticsJSON().
+void PrintStatisticsJSON(raw_ostream &OS);
+
+/// \brief Get the statistics. This can be used to look up the value of
+/// statistics without needing to parse JSON.
+///
+/// This function does not prevent statistics being updated by other threads
+/// during it's execution. It will return the value at the point that it is
+/// read. However, it will prevent new statistics from registering until it
+/// completes.
+const std::vector<std::pair<StringRef, unsigned>> GetStatistics();
+
+/// \brief Reset the statistics. This can be used to zero and de-register the
+/// statistics in order to measure a compilation.
+///
+/// When this function begins to call destructors prior to returning, all
+/// statistics will be zero and unregistered. However, that might not remain the
+/// case by the time this function finishes returning. Whether update from other
+/// threads are lost or merely deferred until during the function return is
+/// timing sensitive.
+///
+/// Callers who intend to use this to measure statistics for a single
+/// compilation should ensure that no compilations are in progress at the point
+/// this function is called and that only one compilation executes until calling
+/// GetStatistics().
+void ResetStatistics();
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STATISTIC_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringExtras.h b/linux-x64/clang/include/llvm/ADT/StringExtras.h
new file mode 100644
index 0000000..45f6677
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringExtras.h
@@ -0,0 +1,367 @@
+//===- llvm/ADT/StringExtras.h - Useful string functions --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some functions that are useful when dealing with strings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGEXTRAS_H
+#define LLVM_ADT_STRINGEXTRAS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+template<typename T> class SmallVectorImpl;
+class raw_ostream;
+
+/// hexdigit - Return the hexadecimal character for the
+/// given number \p X (which should be less than 16).
+inline char hexdigit(unsigned X, bool LowerCase = false) {
+  const char HexChar = LowerCase ? 'a' : 'A';
+  return X < 10 ? '0' + X : HexChar + X - 10;
+}
+
+/// Construct a string ref from a boolean.
+inline StringRef toStringRef(bool B) { return StringRef(B ? "true" : "false"); }
+
+/// Construct a string ref from an array ref of unsigned chars.
+inline StringRef toStringRef(ArrayRef<uint8_t> Input) {
+  return StringRef(reinterpret_cast<const char *>(Input.begin()), Input.size());
+}
+
+/// Construct a string ref from an array ref of unsigned chars.
+inline ArrayRef<uint8_t> arrayRefFromStringRef(StringRef Input) {
+  return {Input.bytes_begin(), Input.bytes_end()};
+}
+
+/// Interpret the given character \p C as a hexadecimal digit and return its
+/// value.
+///
+/// If \p C is not a valid hex digit, -1U is returned.
+inline unsigned hexDigitValue(char C) {
+  if (C >= '0' && C <= '9') return C-'0';
+  if (C >= 'a' && C <= 'f') return C-'a'+10U;
+  if (C >= 'A' && C <= 'F') return C-'A'+10U;
+  return -1U;
+}
+
+/// Checks if character \p C is one of the 10 decimal digits.
+inline bool isDigit(char C) { return C >= '0' && C <= '9'; }
+
+/// Checks if character \p C is a hexadecimal numeric character.
+inline bool isHexDigit(char C) { return hexDigitValue(C) != -1U; }
+
+/// Checks if character \p C is a valid letter as classified by "C" locale.
+inline bool isAlpha(char C) {
+  return ('a' <= C && C <= 'z') || ('A' <= C && C <= 'Z');
+}
+
+/// Checks whether character \p C is either a decimal digit or an uppercase or
+/// lowercase letter as classified by "C" locale.
+inline bool isAlnum(char C) { return isAlpha(C) || isDigit(C); }
+
+/// Returns the corresponding lowercase character if \p x is uppercase.
+inline char toLower(char x) {
+  if (x >= 'A' && x <= 'Z')
+    return x - 'A' + 'a';
+  return x;
+}
+
+/// Returns the corresponding uppercase character if \p x is lowercase.
+inline char toUpper(char x) {
+  if (x >= 'a' && x <= 'z')
+    return x - 'a' + 'A';
+  return x;
+}
+
+inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
+  char Buffer[17];
+  char *BufPtr = std::end(Buffer);
+
+  if (X == 0) *--BufPtr = '0';
+
+  while (X) {
+    unsigned char Mod = static_cast<unsigned char>(X) & 15;
+    *--BufPtr = hexdigit(Mod, LowerCase);
+    X >>= 4;
+  }
+
+  return std::string(BufPtr, std::end(Buffer));
+}
+
+/// Convert buffer \p Input to its hexadecimal representation.
+/// The returned string is double the size of \p Input.
+inline std::string toHex(StringRef Input) {
+  static const char *const LUT = "0123456789ABCDEF";
+  size_t Length = Input.size();
+
+  std::string Output;
+  Output.reserve(2 * Length);
+  for (size_t i = 0; i < Length; ++i) {
+    const unsigned char c = Input[i];
+    Output.push_back(LUT[c >> 4]);
+    Output.push_back(LUT[c & 15]);
+  }
+  return Output;
+}
+
+inline std::string toHex(ArrayRef<uint8_t> Input) {
+  return toHex(toStringRef(Input));
+}
+
+inline uint8_t hexFromNibbles(char MSB, char LSB) {
+  unsigned U1 = hexDigitValue(MSB);
+  unsigned U2 = hexDigitValue(LSB);
+  assert(U1 != -1U && U2 != -1U);
+
+  return static_cast<uint8_t>((U1 << 4) | U2);
+}
+
+/// Convert hexadecimal string \p Input to its binary representation.
+/// The return string is half the size of \p Input.
+inline std::string fromHex(StringRef Input) {
+  if (Input.empty())
+    return std::string();
+
+  std::string Output;
+  Output.reserve((Input.size() + 1) / 2);
+  if (Input.size() % 2 == 1) {
+    Output.push_back(hexFromNibbles('0', Input.front()));
+    Input = Input.drop_front();
+  }
+
+  assert(Input.size() % 2 == 0);
+  while (!Input.empty()) {
+    uint8_t Hex = hexFromNibbles(Input[0], Input[1]);
+    Output.push_back(Hex);
+    Input = Input.drop_front(2);
+  }
+  return Output;
+}
+
+/// \brief Convert the string \p S to an integer of the specified type using
+/// the radix \p Base.  If \p Base is 0, auto-detects the radix.
+/// Returns true if the number was successfully converted, false otherwise.
+template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {
+  return !S.getAsInteger(Base, Num);
+}
+
+namespace detail {
+template <typename N>
+inline bool to_float(const Twine &T, N &Num, N (*StrTo)(const char *, char **)) {
+  SmallString<32> Storage;
+  StringRef S = T.toNullTerminatedStringRef(Storage);
+  char *End;
+  N Temp = StrTo(S.data(), &End);
+  if (*End != '\0')
+    return false;
+  Num = Temp;
+  return true;
+}
+}
+
+inline bool to_float(const Twine &T, float &Num) {
+  return detail::to_float(T, Num, strtof);
+}
+
+inline bool to_float(const Twine &T, double &Num) {
+  return detail::to_float(T, Num, strtod);
+}
+
+inline bool to_float(const Twine &T, long double &Num) {
+  return detail::to_float(T, Num, strtold);
+}
+
+inline std::string utostr(uint64_t X, bool isNeg = false) {
+  char Buffer[21];
+  char *BufPtr = std::end(Buffer);
+
+  if (X == 0) *--BufPtr = '0';  // Handle special case...
+
+  while (X) {
+    *--BufPtr = '0' + char(X % 10);
+    X /= 10;
+  }
+
+  if (isNeg) *--BufPtr = '-';   // Add negative sign...
+  return std::string(BufPtr, std::end(Buffer));
+}
+
+inline std::string itostr(int64_t X) {
+  if (X < 0)
+    return utostr(static_cast<uint64_t>(-X), true);
+  else
+    return utostr(static_cast<uint64_t>(X));
+}
+
+/// StrInStrNoCase - Portable version of strcasestr.  Locates the first
+/// occurrence of string 's1' in string 's2', ignoring case.  Returns
+/// the offset of s2 in s1 or npos if s2 cannot be found.
+StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);
+
+/// getToken - This function extracts one token from source, ignoring any
+/// leading characters that appear in the Delimiters string, and ending the
+/// token at any of the characters that appear in the Delimiters string.  If
+/// there are no tokens in the source string, an empty string is returned.
+/// The function returns a pair containing the extracted token and the
+/// remaining tail string.
+std::pair<StringRef, StringRef> getToken(StringRef Source,
+                                         StringRef Delimiters = " \t\n\v\f\r");
+
+/// SplitString - Split up the specified string according to the specified
+/// delimiters, appending the result fragments to the output list.
+void SplitString(StringRef Source,
+                 SmallVectorImpl<StringRef> &OutFragments,
+                 StringRef Delimiters = " \t\n\v\f\r");
+
+/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
+inline StringRef getOrdinalSuffix(unsigned Val) {
+  // It is critically important that we do this perfectly for
+  // user-written sequences with over 100 elements.
+  switch (Val % 100) {
+  case 11:
+  case 12:
+  case 13:
+    return "th";
+  default:
+    switch (Val % 10) {
+      case 1: return "st";
+      case 2: return "nd";
+      case 3: return "rd";
+      default: return "th";
+    }
+  }
+}
+
+/// PrintEscapedString - Print each character of the specified string, escaping
+/// it if it is not printable or if it is an escape char.
+void PrintEscapedString(StringRef Name, raw_ostream &Out);
+
+/// printLowerCase - Print each character as lowercase if it is uppercase.
+void printLowerCase(StringRef String, raw_ostream &Out);
+
+namespace detail {
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+                             StringRef Separator, std::input_iterator_tag) {
+  std::string S;
+  if (Begin == End)
+    return S;
+
+  S += (*Begin);
+  while (++Begin != End) {
+    S += Separator;
+    S += (*Begin);
+  }
+  return S;
+}
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+                             StringRef Separator, std::forward_iterator_tag) {
+  std::string S;
+  if (Begin == End)
+    return S;
+
+  size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
+  for (IteratorT I = Begin; I != End; ++I)
+    Len += (*Begin).size();
+  S.reserve(Len);
+  S += (*Begin);
+  while (++Begin != End) {
+    S += Separator;
+    S += (*Begin);
+  }
+  return S;
+}
+
+template <typename Sep>
+inline void join_items_impl(std::string &Result, Sep Separator) {}
+
+template <typename Sep, typename Arg>
+inline void join_items_impl(std::string &Result, Sep Separator,
+                            const Arg &Item) {
+  Result += Item;
+}
+
+template <typename Sep, typename Arg1, typename... Args>
+inline void join_items_impl(std::string &Result, Sep Separator, const Arg1 &A1,
+                            Args &&... Items) {
+  Result += A1;
+  Result += Separator;
+  join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+}
+
+inline size_t join_one_item_size(char C) { return 1; }
+inline size_t join_one_item_size(const char *S) { return S ? ::strlen(S) : 0; }
+
+template <typename T> inline size_t join_one_item_size(const T &Str) {
+  return Str.size();
+}
+
+inline size_t join_items_size() { return 0; }
+
+template <typename A1> inline size_t join_items_size(const A1 &A) {
+  return join_one_item_size(A);
+}
+template <typename A1, typename... Args>
+inline size_t join_items_size(const A1 &A, Args &&... Items) {
+  return join_one_item_size(A) + join_items_size(std::forward<Args>(Items)...);
+}
+
+} // end namespace detail
+
+/// Joins the strings in the range [Begin, End), adding Separator between
+/// the elements.
+template <typename IteratorT>
+inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
+  using tag = typename std::iterator_traits<IteratorT>::iterator_category;
+  return detail::join_impl(Begin, End, Separator, tag());
+}
+
+/// Joins the strings in the range [R.begin(), R.end()), adding Separator
+/// between the elements.
+template <typename Range>
+inline std::string join(Range &&R, StringRef Separator) {
+  return join(R.begin(), R.end(), Separator);
+}
+
+/// Joins the strings in the parameter pack \p Items, adding \p Separator
+/// between the elements.  All arguments must be implicitly convertible to
+/// std::string, or there should be an overload of std::string::operator+=()
+/// that accepts the argument explicitly.
+template <typename Sep, typename... Args>
+inline std::string join_items(Sep Separator, Args &&... Items) {
+  std::string Result;
+  if (sizeof...(Items) == 0)
+    return Result;
+
+  size_t NS = detail::join_one_item_size(Separator);
+  size_t NI = detail::join_items_size(std::forward<Args>(Items)...);
+  Result.reserve(NI + (sizeof...(Items) - 1) * NS + 1);
+  detail::join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+  return Result;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGEXTRAS_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringMap.h b/linux-x64/clang/include/llvm/ADT/StringMap.h
new file mode 100644
index 0000000..d34d5ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringMap.h
@@ -0,0 +1,558 @@
+//===- StringMap.h - String Hash table map interface ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the StringMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGMAP_H
+#define LLVM_ADT_STRINGMAP_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+template<typename ValueTy> class StringMapConstIterator;
+template<typename ValueTy> class StringMapIterator;
+template<typename ValueTy> class StringMapKeyIterator;
+
+/// StringMapEntryBase - Shared base class of StringMapEntry instances.
+class StringMapEntryBase {
+  size_t StrLen;
+
+public:
+  explicit StringMapEntryBase(size_t Len) : StrLen(Len) {}
+
+  size_t getKeyLength() const { return StrLen; }
+};
+
+/// StringMapImpl - This is the base class of StringMap that is shared among
+/// all of its instantiations.
+class StringMapImpl {
+protected:
+  // Array of NumBuckets pointers to entries, null pointers are holes.
+  // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed
+  // by an array of the actual hash values as unsigned integers.
+  StringMapEntryBase **TheTable = nullptr;
+  unsigned NumBuckets = 0;
+  unsigned NumItems = 0;
+  unsigned NumTombstones = 0;
+  unsigned ItemSize;
+
+protected:
+  explicit StringMapImpl(unsigned itemSize)
+      : ItemSize(itemSize) {}
+  StringMapImpl(StringMapImpl &&RHS)
+      : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
+        NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
+        ItemSize(RHS.ItemSize) {
+    RHS.TheTable = nullptr;
+    RHS.NumBuckets = 0;
+    RHS.NumItems = 0;
+    RHS.NumTombstones = 0;
+  }
+
+  StringMapImpl(unsigned InitSize, unsigned ItemSize);
+  unsigned RehashTable(unsigned BucketNo = 0);
+
+  /// LookupBucketFor - Look up the bucket that the specified string should end
+  /// up in.  If it already exists as a key in the map, the Item pointer for the
+  /// specified bucket will be non-null.  Otherwise, it will be null.  In either
+  /// case, the FullHashValue field of the bucket will be set to the hash value
+  /// of the string.
+  unsigned LookupBucketFor(StringRef Key);
+
+  /// FindKey - Look up the bucket that contains the specified key. If it exists
+  /// in the map, return the bucket number of the key.  Otherwise return -1.
+  /// This does not modify the map.
+  int FindKey(StringRef Key) const;
+
+  /// RemoveKey - Remove the specified StringMapEntry from the table, but do not
+  /// delete it.  This aborts if the value isn't in the table.
+  void RemoveKey(StringMapEntryBase *V);
+
+  /// RemoveKey - Remove the StringMapEntry for the specified key from the
+  /// table, returning it.  If the key is not in the table, this returns null.
+  StringMapEntryBase *RemoveKey(StringRef Key);
+
+  /// Allocate the table with the specified number of buckets and otherwise
+  /// setup the map as empty.
+  void init(unsigned Size);
+
+public:
+  static StringMapEntryBase *getTombstoneVal() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;
+    return reinterpret_cast<StringMapEntryBase *>(Val);
+  }
+
+  unsigned getNumBuckets() const { return NumBuckets; }
+  unsigned getNumItems() const { return NumItems; }
+
+  bool empty() const { return NumItems == 0; }
+  unsigned size() const { return NumItems; }
+
+  void swap(StringMapImpl &Other) {
+    std::swap(TheTable, Other.TheTable);
+    std::swap(NumBuckets, Other.NumBuckets);
+    std::swap(NumItems, Other.NumItems);
+    std::swap(NumTombstones, Other.NumTombstones);
+  }
+};
+
+/// StringMapEntry - This is used to represent one value that is inserted into
+/// a StringMap.  It contains the Value itself and the key: the string length
+/// and data.
+template<typename ValueTy>
+class StringMapEntry : public StringMapEntryBase {
+public:
+  ValueTy second;
+
+  explicit StringMapEntry(size_t strLen)
+    : StringMapEntryBase(strLen), second() {}
+  template <typename... InitTy>
+  StringMapEntry(size_t strLen, InitTy &&... InitVals)
+      : StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {}
+  StringMapEntry(StringMapEntry &E) = delete;
+
+  StringRef getKey() const {
+    return StringRef(getKeyData(), getKeyLength());
+  }
+
+  const ValueTy &getValue() const { return second; }
+  ValueTy &getValue() { return second; }
+
+  void setValue(const ValueTy &V) { second = V; }
+
+  /// getKeyData - Return the start of the string data that is the key for this
+  /// value.  The string data is always stored immediately after the
+  /// StringMapEntry object.
+  const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);}
+
+  StringRef first() const { return StringRef(getKeyData(), getKeyLength()); }
+
+  /// Create a StringMapEntry for the specified key construct the value using
+  /// \p InitiVals.
+  template <typename AllocatorTy, typename... InitTy>
+  static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator,
+                                InitTy &&... InitVals) {
+    size_t KeyLength = Key.size();
+
+    // Allocate a new item with space for the string at the end and a null
+    // terminator.
+    size_t AllocSize = sizeof(StringMapEntry) + KeyLength + 1;
+    size_t Alignment = alignof(StringMapEntry);
+
+    StringMapEntry *NewItem =
+      static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
+
+    if (NewItem == nullptr)
+      report_bad_alloc_error("Allocation of StringMap entry failed.");
+
+    // Construct the value.
+    new (NewItem) StringMapEntry(KeyLength, std::forward<InitTy>(InitVals)...);
+
+    // Copy the string information.
+    char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
+    if (KeyLength > 0)
+      memcpy(StrBuffer, Key.data(), KeyLength);
+    StrBuffer[KeyLength] = 0;  // Null terminate for convenience of clients.
+    return NewItem;
+  }
+
+  /// Create - Create a StringMapEntry with normal malloc/free.
+  template <typename... InitType>
+  static StringMapEntry *Create(StringRef Key, InitType &&... InitVal) {
+    MallocAllocator A;
+    return Create(Key, A, std::forward<InitType>(InitVal)...);
+  }
+
+  static StringMapEntry *Create(StringRef Key) {
+    return Create(Key, ValueTy());
+  }
+
+  /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
+  /// into a StringMapEntry, return the StringMapEntry itself.
+  static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
+    char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
+    return *reinterpret_cast<StringMapEntry*>(Ptr);
+  }
+
+  /// Destroy - Destroy this StringMapEntry, releasing memory back to the
+  /// specified allocator.
+  template<typename AllocatorTy>
+  void Destroy(AllocatorTy &Allocator) {
+    // Free memory referenced by the item.
+    size_t AllocSize = sizeof(StringMapEntry) + getKeyLength() + 1;
+    this->~StringMapEntry();
+    Allocator.Deallocate(static_cast<void *>(this), AllocSize);
+  }
+
+  /// Destroy this object, releasing memory back to the malloc allocator.
+  void Destroy() {
+    MallocAllocator A;
+    Destroy(A);
+  }
+};
+
+/// StringMap - This is an unconventional map that is specialized for handling
+/// keys that are "strings", which are basically ranges of bytes. This does some
+/// funky memory allocation and hashing things to make it extremely efficient,
+/// storing the string data *after* the value in the map.
+template<typename ValueTy, typename AllocatorTy = MallocAllocator>
+class StringMap : public StringMapImpl {
+  AllocatorTy Allocator;
+
+public:
+  using MapEntryTy = StringMapEntry<ValueTy>;
+
+  StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+  explicit StringMap(unsigned InitialSize)
+    : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+  explicit StringMap(AllocatorTy A)
+    : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
+
+  StringMap(unsigned InitialSize, AllocatorTy A)
+    : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
+      Allocator(A) {}
+
+  StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
+      : StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
+    for (const auto &P : List) {
+      insert(P);
+    }
+  }
+
+  StringMap(StringMap &&RHS)
+      : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
+
+  StringMap(const StringMap &RHS) :
+    StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
+    Allocator(RHS.Allocator) {
+    if (RHS.empty())
+      return;
+
+    // Allocate TheTable of the same size as RHS's TheTable, and set the
+    // sentinel appropriately (and NumBuckets).
+    init(RHS.NumBuckets);
+    unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1),
+             *RHSHashTable = (unsigned *)(RHS.TheTable + NumBuckets + 1);
+
+    NumItems = RHS.NumItems;
+    NumTombstones = RHS.NumTombstones;
+    for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+      StringMapEntryBase *Bucket = RHS.TheTable[I];
+      if (!Bucket || Bucket == getTombstoneVal()) {
+        TheTable[I] = Bucket;
+        continue;
+      }
+
+      TheTable[I] = MapEntryTy::Create(
+          static_cast<MapEntryTy *>(Bucket)->getKey(), Allocator,
+          static_cast<MapEntryTy *>(Bucket)->getValue());
+      HashTable[I] = RHSHashTable[I];
+    }
+
+    // Note that here we've copied everything from the RHS into this object,
+    // tombstones included. We could, instead, have re-probed for each key to
+    // instantiate this new object without any tombstone buckets. The
+    // assumption here is that items are rarely deleted from most StringMaps,
+    // and so tombstones are rare, so the cost of re-probing for all inputs is
+    // not worthwhile.
+  }
+
+  StringMap &operator=(StringMap RHS) {
+    StringMapImpl::swap(RHS);
+    std::swap(Allocator, RHS.Allocator);
+    return *this;
+  }
+
+  ~StringMap() {
+    // Delete all the elements in the map, but don't reset the elements
+    // to default values.  This is a copy of clear(), but avoids unnecessary
+    // work not required in the destructor.
+    if (!empty()) {
+      for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+        StringMapEntryBase *Bucket = TheTable[I];
+        if (Bucket && Bucket != getTombstoneVal()) {
+          static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+        }
+      }
+    }
+    free(TheTable);
+  }
+
+  AllocatorTy &getAllocator() { return Allocator; }
+  const AllocatorTy &getAllocator() const { return Allocator; }
+
+  using key_type = const char*;
+  using mapped_type = ValueTy;
+  using value_type = StringMapEntry<ValueTy>;
+  using size_type = size_t;
+
+  using const_iterator = StringMapConstIterator<ValueTy>;
+  using iterator = StringMapIterator<ValueTy>;
+
+  iterator begin() {
+    return iterator(TheTable, NumBuckets == 0);
+  }
+  iterator end() {
+    return iterator(TheTable+NumBuckets, true);
+  }
+  const_iterator begin() const {
+    return const_iterator(TheTable, NumBuckets == 0);
+  }
+  const_iterator end() const {
+    return const_iterator(TheTable+NumBuckets, true);
+  }
+
+  iterator_range<StringMapKeyIterator<ValueTy>> keys() const {
+    return make_range(StringMapKeyIterator<ValueTy>(begin()),
+                      StringMapKeyIterator<ValueTy>(end()));
+  }
+
+  iterator find(StringRef Key) {
+    int Bucket = FindKey(Key);
+    if (Bucket == -1) return end();
+    return iterator(TheTable+Bucket, true);
+  }
+
+  const_iterator find(StringRef Key) const {
+    int Bucket = FindKey(Key);
+    if (Bucket == -1) return end();
+    return const_iterator(TheTable+Bucket, true);
+  }
+
+  /// lookup - Return the entry for the specified key, or a default
+  /// constructed value if no such entry exists.
+  ValueTy lookup(StringRef Key) const {
+    const_iterator it = find(Key);
+    if (it != end())
+      return it->second;
+    return ValueTy();
+  }
+
+  /// Lookup the ValueTy for the \p Key, or create a default constructed value
+  /// if the key is not in the map.
+  ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; }
+
+  /// count - Return 1 if the element is in the map, 0 otherwise.
+  size_type count(StringRef Key) const {
+    return find(Key) == end() ? 0 : 1;
+  }
+
+  /// insert - Insert the specified key/value pair into the map.  If the key
+  /// already exists in the map, return false and ignore the request, otherwise
+  /// insert it and return true.
+  bool insert(MapEntryTy *KeyValue) {
+    unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
+    StringMapEntryBase *&Bucket = TheTable[BucketNo];
+    if (Bucket && Bucket != getTombstoneVal())
+      return false;  // Already exists in map.
+
+    if (Bucket == getTombstoneVal())
+      --NumTombstones;
+    Bucket = KeyValue;
+    ++NumItems;
+    assert(NumItems + NumTombstones <= NumBuckets);
+
+    RehashTable();
+    return true;
+  }
+
+  /// insert - Inserts the specified key/value pair into the map if the key
+  /// isn't already in the map. The bool component of the returned pair is true
+  /// if and only if the insertion takes place, and the iterator component of
+  /// the pair points to the element with key equivalent to the key of the pair.
+  std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
+    return try_emplace(KV.first, std::move(KV.second));
+  }
+
+  /// Emplace a new element for the specified key into the map if the key isn't
+  /// already in the map. The bool component of the returned pair is true
+  /// if and only if the insertion takes place, and the iterator component of
+  /// the pair points to the element with key equivalent to the key of the pair.
+  template <typename... ArgsTy>
+  std::pair<iterator, bool> try_emplace(StringRef Key, ArgsTy &&... Args) {
+    unsigned BucketNo = LookupBucketFor(Key);
+    StringMapEntryBase *&Bucket = TheTable[BucketNo];
+    if (Bucket && Bucket != getTombstoneVal())
+      return std::make_pair(iterator(TheTable + BucketNo, false),
+                            false); // Already exists in map.
+
+    if (Bucket == getTombstoneVal())
+      --NumTombstones;
+    Bucket = MapEntryTy::Create(Key, Allocator, std::forward<ArgsTy>(Args)...);
+    ++NumItems;
+    assert(NumItems + NumTombstones <= NumBuckets);
+
+    BucketNo = RehashTable(BucketNo);
+    return std::make_pair(iterator(TheTable + BucketNo, false), true);
+  }
+
+  // clear - Empties out the StringMap
+  void clear() {
+    if (empty()) return;
+
+    // Zap all values, resetting the keys back to non-present (not tombstone),
+    // which is safe because we're removing all elements.
+    for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+      StringMapEntryBase *&Bucket = TheTable[I];
+      if (Bucket && Bucket != getTombstoneVal()) {
+        static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+      }
+      Bucket = nullptr;
+    }
+
+    NumItems = 0;
+    NumTombstones = 0;
+  }
+
+  /// remove - Remove the specified key/value pair from the map, but do not
+  /// erase it.  This aborts if the key is not in the map.
+  void remove(MapEntryTy *KeyValue) {
+    RemoveKey(KeyValue);
+  }
+
+  void erase(iterator I) {
+    MapEntryTy &V = *I;
+    remove(&V);
+    V.Destroy(Allocator);
+  }
+
+  bool erase(StringRef Key) {
+    iterator I = find(Key);
+    if (I == end()) return false;
+    erase(I);
+    return true;
+  }
+};
+
+template <typename DerivedTy, typename ValueTy>
+class StringMapIterBase
+    : public iterator_facade_base<DerivedTy, std::forward_iterator_tag,
+                                  ValueTy> {
+protected:
+  StringMapEntryBase **Ptr = nullptr;
+
+public:
+  StringMapIterBase() = default;
+
+  explicit StringMapIterBase(StringMapEntryBase **Bucket,
+                             bool NoAdvance = false)
+      : Ptr(Bucket) {
+    if (!NoAdvance) AdvancePastEmptyBuckets();
+  }
+
+  DerivedTy &operator=(const DerivedTy &Other) {
+    Ptr = Other.Ptr;
+    return static_cast<DerivedTy &>(*this);
+  }
+
+  bool operator==(const DerivedTy &RHS) const { return Ptr == RHS.Ptr; }
+
+  DerivedTy &operator++() { // Preincrement
+    ++Ptr;
+    AdvancePastEmptyBuckets();
+    return static_cast<DerivedTy &>(*this);
+  }
+
+  DerivedTy operator++(int) { // Post-increment
+    DerivedTy Tmp(Ptr);
+    ++*this;
+    return Tmp;
+  }
+
+private:
+  void AdvancePastEmptyBuckets() {
+    while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
+      ++Ptr;
+  }
+};
+
+template <typename ValueTy>
+class StringMapConstIterator
+    : public StringMapIterBase<StringMapConstIterator<ValueTy>,
+                               const StringMapEntry<ValueTy>> {
+  using base = StringMapIterBase<StringMapConstIterator<ValueTy>,
+                                 const StringMapEntry<ValueTy>>;
+
+public:
+  StringMapConstIterator() = default;
+  explicit StringMapConstIterator(StringMapEntryBase **Bucket,
+                                  bool NoAdvance = false)
+      : base(Bucket, NoAdvance) {}
+
+  const StringMapEntry<ValueTy> &operator*() const {
+    return *static_cast<const StringMapEntry<ValueTy> *>(*this->Ptr);
+  }
+};
+
+template <typename ValueTy>
+class StringMapIterator : public StringMapIterBase<StringMapIterator<ValueTy>,
+                                                   StringMapEntry<ValueTy>> {
+  using base =
+      StringMapIterBase<StringMapIterator<ValueTy>, StringMapEntry<ValueTy>>;
+
+public:
+  StringMapIterator() = default;
+  explicit StringMapIterator(StringMapEntryBase **Bucket,
+                             bool NoAdvance = false)
+      : base(Bucket, NoAdvance) {}
+
+  StringMapEntry<ValueTy> &operator*() const {
+    return *static_cast<StringMapEntry<ValueTy> *>(*this->Ptr);
+  }
+
+  operator StringMapConstIterator<ValueTy>() const {
+    return StringMapConstIterator<ValueTy>(this->Ptr, true);
+  }
+};
+
+template <typename ValueTy>
+class StringMapKeyIterator
+    : public iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
+                                   StringMapConstIterator<ValueTy>,
+                                   std::forward_iterator_tag, StringRef> {
+  using base = iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
+                                     StringMapConstIterator<ValueTy>,
+                                     std::forward_iterator_tag, StringRef>;
+
+public:
+  StringMapKeyIterator() = default;
+  explicit StringMapKeyIterator(StringMapConstIterator<ValueTy> Iter)
+      : base(std::move(Iter)) {}
+
+  StringRef &operator*() {
+    Key = this->wrapped()->getKey();
+    return Key;
+  }
+
+private:
+  StringRef Key;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringRef.h b/linux-x64/clang/include/llvm/ADT/StringRef.h
new file mode 100644
index 0000000..3d2417a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringRef.h
@@ -0,0 +1,925 @@
+//===- StringRef.h - Constant String Reference Wrapper ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGREF_H
+#define LLVM_ADT_STRINGREF_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <limits>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+  class APInt;
+  class hash_code;
+  template <typename T> class SmallVectorImpl;
+  class StringRef;
+
+  /// Helper functions for StringRef::getAsInteger.
+  bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
+                            unsigned long long &Result);
+
+  bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
+
+  bool consumeUnsignedInteger(StringRef &Str, unsigned Radix,
+                              unsigned long long &Result);
+  bool consumeSignedInteger(StringRef &Str, unsigned Radix, long long &Result);
+
+  /// StringRef - Represent a constant reference to a string, i.e. a character
+  /// array and a length, which need not be null terminated.
+  ///
+  /// This class does not own the string data, it is expected to be used in
+  /// situations where the character data resides in some other buffer, whose
+  /// lifetime extends past that of the StringRef. For this reason, it is not in
+  /// general safe to store a StringRef.
+  class StringRef {
+  public:
+    static const size_t npos = ~size_t(0);
+
+    using iterator = const char *;
+    using const_iterator = const char *;
+    using size_type = size_t;
+
+  private:
+    /// The start of the string, in an external buffer.
+    const char *Data = nullptr;
+
+    /// The length of the string.
+    size_t Length = 0;
+
+    // Workaround memcmp issue with null pointers (undefined behavior)
+    // by providing a specialized version
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
+      if (Length == 0) { return 0; }
+      return ::memcmp(Lhs,Rhs,Length);
+    }
+
+  public:
+    /// @name Constructors
+    /// @{
+
+    /// Construct an empty string ref.
+    /*implicit*/ StringRef() = default;
+
+    /// Disable conversion from nullptr.  This prevents things like
+    /// if (S == nullptr)
+    StringRef(std::nullptr_t) = delete;
+
+    /// Construct a string ref from a cstring.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    /*implicit*/ StringRef(const char *Str)
+        : Data(Str), Length(Str ? ::strlen(Str) : 0) {}
+
+    /// Construct a string ref from a pointer and length.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    /*implicit*/ constexpr StringRef(const char *data, size_t length)
+        : Data(data), Length(length) {}
+
+    /// Construct a string ref from an std::string.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    /*implicit*/ StringRef(const std::string &Str)
+      : Data(Str.data()), Length(Str.length()) {}
+
+    static StringRef withNullAsEmpty(const char *data) {
+      return StringRef(data ? data : "");
+    }
+
+    /// @}
+    /// @name Iterators
+    /// @{
+
+    iterator begin() const { return Data; }
+
+    iterator end() const { return Data + Length; }
+
+    const unsigned char *bytes_begin() const {
+      return reinterpret_cast<const unsigned char *>(begin());
+    }
+    const unsigned char *bytes_end() const {
+      return reinterpret_cast<const unsigned char *>(end());
+    }
+    iterator_range<const unsigned char *> bytes() const {
+      return make_range(bytes_begin(), bytes_end());
+    }
+
+    /// @}
+    /// @name String Operations
+    /// @{
+
+    /// data - Get a pointer to the start of the string (which may not be null
+    /// terminated).
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    const char *data() const { return Data; }
+
+    /// empty - Check if the string is empty.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool empty() const { return Length == 0; }
+
+    /// size - Get the string size.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t size() const { return Length; }
+
+    /// front - Get the first character in the string.
+    LLVM_NODISCARD
+    char front() const {
+      assert(!empty());
+      return Data[0];
+    }
+
+    /// back - Get the last character in the string.
+    LLVM_NODISCARD
+    char back() const {
+      assert(!empty());
+      return Data[Length-1];
+    }
+
+    // copy - Allocate copy in Allocator and return StringRef to it.
+    template <typename Allocator>
+    LLVM_NODISCARD StringRef copy(Allocator &A) const {
+      // Don't request a length 0 copy from the allocator.
+      if (empty())
+        return StringRef();
+      char *S = A.template Allocate<char>(Length);
+      std::copy(begin(), end(), S);
+      return StringRef(S, Length);
+    }
+
+    /// equals - Check for string equality, this is more efficient than
+    /// compare() when the relative ordering of inequal strings isn't needed.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool equals(StringRef RHS) const {
+      return (Length == RHS.Length &&
+              compareMemory(Data, RHS.Data, RHS.Length) == 0);
+    }
+
+    /// equals_lower - Check for string equality, ignoring case.
+    LLVM_NODISCARD
+    bool equals_lower(StringRef RHS) const {
+      return Length == RHS.Length && compare_lower(RHS) == 0;
+    }
+
+    /// compare - Compare two strings; the result is -1, 0, or 1 if this string
+    /// is lexicographically less than, equal to, or greater than the \p RHS.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    int compare(StringRef RHS) const {
+      // Check the prefix for a mismatch.
+      if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length)))
+        return Res < 0 ? -1 : 1;
+
+      // Otherwise the prefixes match, so we only need to check the lengths.
+      if (Length == RHS.Length)
+        return 0;
+      return Length < RHS.Length ? -1 : 1;
+    }
+
+    /// compare_lower - Compare two strings, ignoring case.
+    LLVM_NODISCARD
+    int compare_lower(StringRef RHS) const;
+
+    /// compare_numeric - Compare two strings, treating sequences of digits as
+    /// numbers.
+    LLVM_NODISCARD
+    int compare_numeric(StringRef RHS) const;
+
+    /// \brief Determine the edit distance between this string and another
+    /// string.
+    ///
+    /// \param Other the string to compare this string against.
+    ///
+    /// \param AllowReplacements whether to allow character
+    /// replacements (change one character into another) as a single
+    /// operation, rather than as two operations (an insertion and a
+    /// removal).
+    ///
+    /// \param MaxEditDistance If non-zero, the maximum edit distance that
+    /// this routine is allowed to compute. If the edit distance will exceed
+    /// that maximum, returns \c MaxEditDistance+1.
+    ///
+    /// \returns the minimum number of character insertions, removals,
+    /// or (if \p AllowReplacements is \c true) replacements needed to
+    /// transform one of the given strings into the other. If zero,
+    /// the strings are identical.
+    LLVM_NODISCARD
+    unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
+                           unsigned MaxEditDistance = 0) const;
+
+    /// str - Get the contents as an std::string.
+    LLVM_NODISCARD
+    std::string str() const {
+      if (!Data) return std::string();
+      return std::string(Data, Length);
+    }
+
+    /// @}
+    /// @name Operator Overloads
+    /// @{
+
+    LLVM_NODISCARD
+    char operator[](size_t Index) const {
+      assert(Index < Length && "Invalid index!");
+      return Data[Index];
+    }
+
+    /// Disallow accidental assignment from a temporary std::string.
+    ///
+    /// The declaration here is extra complicated so that `stringRef = {}`
+    /// and `stringRef = "abc"` continue to select the move assignment operator.
+    template <typename T>
+    typename std::enable_if<std::is_same<T, std::string>::value,
+                            StringRef>::type &
+    operator=(T &&Str) = delete;
+
+    /// @}
+    /// @name Type Conversions
+    /// @{
+
+    operator std::string() const {
+      return str();
+    }
+
+    /// @}
+    /// @name String Predicates
+    /// @{
+
+    /// Check if this string starts with the given \p Prefix.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool startswith(StringRef Prefix) const {
+      return Length >= Prefix.Length &&
+             compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
+    }
+
+    /// Check if this string starts with the given \p Prefix, ignoring case.
+    LLVM_NODISCARD
+    bool startswith_lower(StringRef Prefix) const;
+
+    /// Check if this string ends with the given \p Suffix.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool endswith(StringRef Suffix) const {
+      return Length >= Suffix.Length &&
+        compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
+    }
+
+    /// Check if this string ends with the given \p Suffix, ignoring case.
+    LLVM_NODISCARD
+    bool endswith_lower(StringRef Suffix) const;
+
+    /// @}
+    /// @name String Searching
+    /// @{
+
+    /// Search for the first character \p C in the string.
+    ///
+    /// \returns The index of the first occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t find(char C, size_t From = 0) const {
+      size_t FindBegin = std::min(From, Length);
+      if (FindBegin < Length) { // Avoid calling memchr with nullptr.
+        // Just forward to memchr, which is faster than a hand-rolled loop.
+        if (const void *P = ::memchr(Data + FindBegin, C, Length - FindBegin))
+          return static_cast<const char *>(P) - Data;
+      }
+      return npos;
+    }
+
+    /// Search for the first character \p C in the string, ignoring case.
+    ///
+    /// \returns The index of the first occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_lower(char C, size_t From = 0) const;
+
+    /// Search for the first character satisfying the predicate \p F
+    ///
+    /// \returns The index of the first character satisfying \p F starting from
+    /// \p From, or npos if not found.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t find_if(function_ref<bool(char)> F, size_t From = 0) const {
+      StringRef S = drop_front(From);
+      while (!S.empty()) {
+        if (F(S.front()))
+          return size() - S.size();
+        S = S.drop_front();
+      }
+      return npos;
+    }
+
+    /// Search for the first character not satisfying the predicate \p F
+    ///
+    /// \returns The index of the first character not satisfying \p F starting
+    /// from \p From, or npos if not found.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t find_if_not(function_ref<bool(char)> F, size_t From = 0) const {
+      return find_if([F](char c) { return !F(c); }, From);
+    }
+
+    /// Search for the first string \p Str in the string.
+    ///
+    /// \returns The index of the first occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find(StringRef Str, size_t From = 0) const;
+
+    /// Search for the first string \p Str in the string, ignoring case.
+    ///
+    /// \returns The index of the first occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_lower(StringRef Str, size_t From = 0) const;
+
+    /// Search for the last character \p C in the string.
+    ///
+    /// \returns The index of the last occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind(char C, size_t From = npos) const {
+      From = std::min(From, Length);
+      size_t i = From;
+      while (i != 0) {
+        --i;
+        if (Data[i] == C)
+          return i;
+      }
+      return npos;
+    }
+
+    /// Search for the last character \p C in the string, ignoring case.
+    ///
+    /// \returns The index of the last occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind_lower(char C, size_t From = npos) const;
+
+    /// Search for the last string \p Str in the string.
+    ///
+    /// \returns The index of the last occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind(StringRef Str) const;
+
+    /// Search for the last string \p Str in the string, ignoring case.
+    ///
+    /// \returns The index of the last occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind_lower(StringRef Str) const;
+
+    /// Find the first character in the string that is \p C, or npos if not
+    /// found. Same as find.
+    LLVM_NODISCARD
+    size_t find_first_of(char C, size_t From = 0) const {
+      return find(C, From);
+    }
+
+    /// Find the first character in the string that is in \p Chars, or npos if
+    /// not found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_first_of(StringRef Chars, size_t From = 0) const;
+
+    /// Find the first character in the string that is not \p C or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_first_not_of(char C, size_t From = 0) const;
+
+    /// Find the first character in the string that is not in the string
+    /// \p Chars, or npos if not found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_first_not_of(StringRef Chars, size_t From = 0) const;
+
+    /// Find the last character in the string that is \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_last_of(char C, size_t From = npos) const {
+      return rfind(C, From);
+    }
+
+    /// Find the last character in the string that is in \p C, or npos if not
+    /// found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_last_of(StringRef Chars, size_t From = npos) const;
+
+    /// Find the last character in the string that is not \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_last_not_of(char C, size_t From = npos) const;
+
+    /// Find the last character in the string that is not in \p Chars, or
+    /// npos if not found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_last_not_of(StringRef Chars, size_t From = npos) const;
+
+    /// Return true if the given string is a substring of *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains(StringRef Other) const { return find(Other) != npos; }
+
+    /// Return true if the given character is contained in *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains(char C) const { return find_first_of(C) != npos; }
+
+    /// Return true if the given string is a substring of *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains_lower(StringRef Other) const {
+      return find_lower(Other) != npos;
+    }
+
+    /// Return true if the given character is contained in *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains_lower(char C) const { return find_lower(C) != npos; }
+
+    /// @}
+    /// @name Helpful Algorithms
+    /// @{
+
+    /// Return the number of occurrences of \p C in the string.
+    LLVM_NODISCARD
+    size_t count(char C) const {
+      size_t Count = 0;
+      for (size_t i = 0, e = Length; i != e; ++i)
+        if (Data[i] == C)
+          ++Count;
+      return Count;
+    }
+
+    /// Return the number of non-overlapped occurrences of \p Str in
+    /// the string.
+    size_t count(StringRef Str) const;
+
+    /// Parse the current string as an integer of the specified radix.  If
+    /// \p Radix is specified as zero, this does radix autosensing using
+    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+    ///
+    /// If the string is invalid or if only a subset of the string is valid,
+    /// this returns true to signify the error.  The string is considered
+    /// erroneous if empty or if it overflows T.
+    template <typename T>
+    typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+    getAsInteger(unsigned Radix, T &Result) const {
+      long long LLVal;
+      if (getAsSignedInteger(*this, Radix, LLVal) ||
+            static_cast<T>(LLVal) != LLVal)
+        return true;
+      Result = LLVal;
+      return false;
+    }
+
+    template <typename T>
+    typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+    getAsInteger(unsigned Radix, T &Result) const {
+      unsigned long long ULLVal;
+      // The additional cast to unsigned long long is required to avoid the
+      // Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type
+      // 'unsigned __int64' when instantiating getAsInteger with T = bool.
+      if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
+          static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+        return true;
+      Result = ULLVal;
+      return false;
+    }
+
+    /// Parse the current string as an integer of the specified radix.  If
+    /// \p Radix is specified as zero, this does radix autosensing using
+    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+    ///
+    /// If the string does not begin with a number of the specified radix,
+    /// this returns true to signify the error. The string is considered
+    /// erroneous if empty or if it overflows T.
+    /// The portion of the string representing the discovered numeric value
+    /// is removed from the beginning of the string.
+    template <typename T>
+    typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+    consumeInteger(unsigned Radix, T &Result) {
+      long long LLVal;
+      if (consumeSignedInteger(*this, Radix, LLVal) ||
+          static_cast<long long>(static_cast<T>(LLVal)) != LLVal)
+        return true;
+      Result = LLVal;
+      return false;
+    }
+
+    template <typename T>
+    typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+    consumeInteger(unsigned Radix, T &Result) {
+      unsigned long long ULLVal;
+      if (consumeUnsignedInteger(*this, Radix, ULLVal) ||
+          static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+        return true;
+      Result = ULLVal;
+      return false;
+    }
+
+    /// Parse the current string as an integer of the specified \p Radix, or of
+    /// an autosensed radix if the \p Radix given is 0.  The current value in
+    /// \p Result is discarded, and the storage is changed to be wide enough to
+    /// store the parsed integer.
+    ///
+    /// \returns true if the string does not solely consist of a valid
+    /// non-empty number in the appropriate base.
+    ///
+    /// APInt::fromString is superficially similar but assumes the
+    /// string is well-formed in the given radix.
+    bool getAsInteger(unsigned Radix, APInt &Result) const;
+
+    /// Parse the current string as an IEEE double-precision floating
+    /// point value.  The string must be a well-formed double.
+    ///
+    /// If \p AllowInexact is false, the function will fail if the string
+    /// cannot be represented exactly.  Otherwise, the function only fails
+    /// in case of an overflow or underflow.
+    bool getAsDouble(double &Result, bool AllowInexact = true) const;
+
+    /// @}
+    /// @name String Operations
+    /// @{
+
+    // Convert the given ASCII string to lowercase.
+    LLVM_NODISCARD
+    std::string lower() const;
+
+    /// Convert the given ASCII string to uppercase.
+    LLVM_NODISCARD
+    std::string upper() const;
+
+    /// @}
+    /// @name Substring Operations
+    /// @{
+
+    /// Return a reference to the substring from [Start, Start + N).
+    ///
+    /// \param Start The index of the starting character in the substring; if
+    /// the index is npos or greater than the length of the string then the
+    /// empty substring will be returned.
+    ///
+    /// \param N The number of characters to included in the substring. If N
+    /// exceeds the number of characters remaining in the string, the string
+    /// suffix (starting with \p Start) will be returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef substr(size_t Start, size_t N = npos) const {
+      Start = std::min(Start, Length);
+      return StringRef(Data + Start, std::min(N, Length - Start));
+    }
+
+    /// Return a StringRef equal to 'this' but with only the first \p N
+    /// elements remaining.  If \p N is greater than the length of the
+    /// string, the entire string is returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_front(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_back(size() - N);
+    }
+
+    /// Return a StringRef equal to 'this' but with only the last \p N
+    /// elements remaining.  If \p N is greater than the length of the
+    /// string, the entire string is returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_back(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_front(size() - N);
+    }
+
+    /// Return the longest prefix of 'this' such that every character
+    /// in the prefix satisfies the given predicate.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_while(function_ref<bool(char)> F) const {
+      return substr(0, find_if_not(F));
+    }
+
+    /// Return the longest prefix of 'this' such that no character in
+    /// the prefix satisfies the given predicate.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_until(function_ref<bool(char)> F) const {
+      return substr(0, find_if(F));
+    }
+
+    /// Return a StringRef equal to 'this' but with the first \p N elements
+    /// dropped.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_front(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return substr(N);
+    }
+
+    /// Return a StringRef equal to 'this' but with the last \p N elements
+    /// dropped.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_back(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return substr(0, size()-N);
+    }
+
+    /// Return a StringRef equal to 'this', but with all characters satisfying
+    /// the given predicate dropped from the beginning of the string.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_while(function_ref<bool(char)> F) const {
+      return substr(find_if_not(F));
+    }
+
+    /// Return a StringRef equal to 'this', but with all characters not
+    /// satisfying the given predicate dropped from the beginning of the string.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_until(function_ref<bool(char)> F) const {
+      return substr(find_if(F));
+    }
+
+    /// Returns true if this StringRef has the given prefix and removes that
+    /// prefix.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool consume_front(StringRef Prefix) {
+      if (!startswith(Prefix))
+        return false;
+
+      *this = drop_front(Prefix.size());
+      return true;
+    }
+
+    /// Returns true if this StringRef has the given suffix and removes that
+    /// suffix.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool consume_back(StringRef Suffix) {
+      if (!endswith(Suffix))
+        return false;
+
+      *this = drop_back(Suffix.size());
+      return true;
+    }
+
+    /// Return a reference to the substring from [Start, End).
+    ///
+    /// \param Start The index of the starting character in the substring; if
+    /// the index is npos or greater than the length of the string then the
+    /// empty substring will be returned.
+    ///
+    /// \param End The index following the last character to include in the
+    /// substring. If this is npos or exceeds the number of characters
+    /// remaining in the string, the string suffix (starting with \p Start)
+    /// will be returned. If this is less than \p Start, an empty string will
+    /// be returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef slice(size_t Start, size_t End) const {
+      Start = std::min(Start, Length);
+      End = std::min(std::max(Start, End), Length);
+      return StringRef(Data + Start, End - Start);
+    }
+
+    /// Split into two substrings around the first occurrence of a separator
+    /// character.
+    ///
+    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+    /// such that (*this == LHS + Separator + RHS) is true and RHS is
+    /// maximal. If \p Separator is not in the string, then the result is a
+    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+    ///
+    /// \param Separator The character to split on.
+    /// \returns The split substrings.
+    LLVM_NODISCARD
+    std::pair<StringRef, StringRef> split(char Separator) const {
+      size_t Idx = find(Separator);
+      if (Idx == npos)
+        return std::make_pair(*this, StringRef());
+      return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
+    }
+
+    /// Split into two substrings around the first occurrence of a separator
+    /// string.
+    ///
+    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+    /// such that (*this == LHS + Separator + RHS) is true and RHS is
+    /// maximal. If \p Separator is not in the string, then the result is a
+    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+    ///
+    /// \param Separator - The string to split on.
+    /// \return - The split substrings.
+    LLVM_NODISCARD
+    std::pair<StringRef, StringRef> split(StringRef Separator) const {
+      size_t Idx = find(Separator);
+      if (Idx == npos)
+        return std::make_pair(*this, StringRef());
+      return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
+    }
+
+    /// Split into substrings around the occurrences of a separator string.
+    ///
+    /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+    /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+    /// elements are added to A.
+    /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+    /// still count when considering \p MaxSplit
+    /// An useful invariant is that
+    /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+    ///
+    /// \param A - Where to put the substrings.
+    /// \param Separator - The string to split on.
+    /// \param MaxSplit - The maximum number of times the string is split.
+    /// \param KeepEmpty - True if empty substring should be added.
+    void split(SmallVectorImpl<StringRef> &A,
+               StringRef Separator, int MaxSplit = -1,
+               bool KeepEmpty = true) const;
+
+    /// Split into substrings around the occurrences of a separator character.
+    ///
+    /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+    /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+    /// elements are added to A.
+    /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+    /// still count when considering \p MaxSplit
+    /// An useful invariant is that
+    /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+    ///
+    /// \param A - Where to put the substrings.
+    /// \param Separator - The string to split on.
+    /// \param MaxSplit - The maximum number of times the string is split.
+    /// \param KeepEmpty - True if empty substring should be added.
+    void split(SmallVectorImpl<StringRef> &A, char Separator, int MaxSplit = -1,
+               bool KeepEmpty = true) const;
+
+    /// Split into two substrings around the last occurrence of a separator
+    /// character.
+    ///
+    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+    /// such that (*this == LHS + Separator + RHS) is true and RHS is
+    /// minimal. If \p Separator is not in the string, then the result is a
+    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+    ///
+    /// \param Separator - The character to split on.
+    /// \return - The split substrings.
+    LLVM_NODISCARD
+    std::pair<StringRef, StringRef> rsplit(char Separator) const {
+      size_t Idx = rfind(Separator);
+      if (Idx == npos)
+        return std::make_pair(*this, StringRef());
+      return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
+    }
+
+    /// Return string with consecutive \p Char characters starting from the
+    /// the left removed.
+    LLVM_NODISCARD
+    StringRef ltrim(char Char) const {
+      return drop_front(std::min(Length, find_first_not_of(Char)));
+    }
+
+    /// Return string with consecutive characters in \p Chars starting from
+    /// the left removed.
+    LLVM_NODISCARD
+    StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
+      return drop_front(std::min(Length, find_first_not_of(Chars)));
+    }
+
+    /// Return string with consecutive \p Char characters starting from the
+    /// right removed.
+    LLVM_NODISCARD
+    StringRef rtrim(char Char) const {
+      return drop_back(Length - std::min(Length, find_last_not_of(Char) + 1));
+    }
+
+    /// Return string with consecutive characters in \p Chars starting from
+    /// the right removed.
+    LLVM_NODISCARD
+    StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
+      return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
+    }
+
+    /// Return string with consecutive \p Char characters starting from the
+    /// left and right removed.
+    LLVM_NODISCARD
+    StringRef trim(char Char) const {
+      return ltrim(Char).rtrim(Char);
+    }
+
+    /// Return string with consecutive characters in \p Chars starting from
+    /// the left and right removed.
+    LLVM_NODISCARD
+    StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
+      return ltrim(Chars).rtrim(Chars);
+    }
+
+    /// @}
+  };
+
+  /// A wrapper around a string literal that serves as a proxy for constructing
+  /// global tables of StringRefs with the length computed at compile time.
+  /// In order to avoid the invocation of a global constructor, StringLiteral
+  /// should *only* be used in a constexpr context, as such:
+  ///
+  /// constexpr StringLiteral S("test");
+  ///
+  class StringLiteral : public StringRef {
+  private:
+    constexpr StringLiteral(const char *Str, size_t N) : StringRef(Str, N) {
+    }
+
+  public:
+    template <size_t N>
+    constexpr StringLiteral(const char (&Str)[N])
+#if defined(__clang__) && __has_attribute(enable_if)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wgcc-compat"
+        __attribute((enable_if(__builtin_strlen(Str) == N - 1,
+                               "invalid string literal")))
+#pragma clang diagnostic pop
+#endif
+        : StringRef(Str, N - 1) {
+    }
+
+    // Explicit construction for strings like "foo\0bar".
+    template <size_t N>
+    static constexpr StringLiteral withInnerNUL(const char (&Str)[N]) {
+      return StringLiteral(Str, N - 1);
+    }
+  };
+
+  /// @name StringRef Comparison Operators
+  /// @{
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  inline bool operator==(StringRef LHS, StringRef RHS) {
+    return LHS.equals(RHS);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  inline bool operator!=(StringRef LHS, StringRef RHS) { return !(LHS == RHS); }
+
+  inline bool operator<(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) == -1;
+  }
+
+  inline bool operator<=(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) != 1;
+  }
+
+  inline bool operator>(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) == 1;
+  }
+
+  inline bool operator>=(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) != -1;
+  }
+
+  inline std::string &operator+=(std::string &buffer, StringRef string) {
+    return buffer.append(string.data(), string.size());
+  }
+
+  /// @}
+
+  /// \brief Compute a hash_code for a StringRef.
+  LLVM_NODISCARD
+  hash_code hash_value(StringRef S);
+
+  // StringRefs can be treated like a POD type.
+  template <typename T> struct isPodLike;
+  template <> struct isPodLike<StringRef> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGREF_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringSet.h b/linux-x64/clang/include/llvm/ADT/StringSet.h
new file mode 100644
index 0000000..9af44c0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringSet.h
@@ -0,0 +1,52 @@
+//===- StringSet.h - The LLVM Compiler Driver -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open
+// Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  StringSet - A set-like wrapper for the StringMap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGSET_H
+#define LLVM_ADT_STRINGSET_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <initializer_list>
+#include <utility>
+
+namespace llvm {
+
+  /// StringSet - A wrapper for StringMap that provides set-like functionality.
+  template <class AllocatorTy = MallocAllocator>
+  class StringSet : public StringMap<char, AllocatorTy> {
+    using base = StringMap<char, AllocatorTy>;
+
+  public:
+    StringSet() = default;
+    StringSet(std::initializer_list<StringRef> S) {
+      for (StringRef X : S)
+        insert(X);
+    }
+
+    std::pair<typename base::iterator, bool> insert(StringRef Key) {
+      assert(!Key.empty());
+      return base::insert(std::make_pair(Key, '\0'));
+    }
+
+    template <typename InputIt>
+    void insert(const InputIt &Begin, const InputIt &End) {
+      for (auto It = Begin; It != End; ++It)
+        base::insert(std::make_pair(*It, '\0'));
+    }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringSwitch.h b/linux-x64/clang/include/llvm/ADT/StringSwitch.h
new file mode 100644
index 0000000..9e07303
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringSwitch.h
@@ -0,0 +1,219 @@
+//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+//  This file implements the StringSwitch template, which mimics a switch()
+//  statement whose cases are string literals.
+//
+//===----------------------------------------------------------------------===/
+#ifndef LLVM_ADT_STRINGSWITCH_H
+#define LLVM_ADT_STRINGSWITCH_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+
+/// \brief A switch()-like statement whose cases are string literals.
+///
+/// The StringSwitch class is a simple form of a switch() statement that
+/// determines whether the given string matches one of the given string
+/// literals. The template type parameter \p T is the type of the value that
+/// will be returned from the string-switch expression. For example,
+/// the following code switches on the name of a color in \c argv[i]:
+///
+/// \code
+/// Color color = StringSwitch<Color>(argv[i])
+///   .Case("red", Red)
+///   .Case("orange", Orange)
+///   .Case("yellow", Yellow)
+///   .Case("green", Green)
+///   .Case("blue", Blue)
+///   .Case("indigo", Indigo)
+///   .Cases("violet", "purple", Violet)
+///   .Default(UnknownColor);
+/// \endcode
+template<typename T, typename R = T>
+class StringSwitch {
+  /// \brief The string we are matching.
+  const StringRef Str;
+
+  /// \brief The pointer to the result of this switch statement, once known,
+  /// null before that.
+  Optional<T> Result;
+
+public:
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  explicit StringSwitch(StringRef S)
+  : Str(S), Result() { }
+
+  // StringSwitch is not copyable.
+  StringSwitch(const StringSwitch &) = delete;
+
+  // StringSwitch is not assignable due to 'Str' being 'const'.
+  void operator=(const StringSwitch &) = delete;
+  void operator=(StringSwitch &&other) = delete;
+
+  StringSwitch(StringSwitch &&other)
+    : Str(other.Str), Result(std::move(other.Result)) { }
+
+  ~StringSwitch() = default;
+
+  // Case-sensitive case matchers
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Case(StringLiteral S, T Value) {
+    if (!Result && Str == S) {
+      Result = std::move(Value);
+    }
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch& EndsWith(StringLiteral S, T Value) {
+    if (!Result && Str.endswith(S)) {
+      Result = std::move(Value);
+    }
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch& StartsWith(StringLiteral S, T Value) {
+    if (!Result && Str.startswith(S)) {
+      Result = std::move(Value);
+    }
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, T Value) {
+    return Case(S0, Value).Case(S1, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      T Value) {
+    return Case(S0, Value).Cases(S1, S2, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, StringLiteral S7, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
+                      T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
+                      StringLiteral S9, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, S9, Value);
+  }
+
+  // Case-insensitive case matchers.
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CaseLower(StringLiteral S, T Value) {
+    if (!Result && Str.equals_lower(S))
+      Result = std::move(Value);
+
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &EndsWithLower(StringLiteral S, T Value) {
+    if (!Result && Str.endswith_lower(S))
+      Result = Value;
+
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &StartsWithLower(StringLiteral S, T Value) {
+    if (!Result && Str.startswith_lower(S))
+      Result = std::move(Value);
+
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, T Value) {
+    return CaseLower(S0, Value).CaseLower(S1, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                           T Value) {
+    return CaseLower(S0, Value).CasesLower(S1, S2, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                           StringLiteral S3, T Value) {
+    return CaseLower(S0, Value).CasesLower(S1, S2, S3, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                           StringLiteral S3, StringLiteral S4, T Value) {
+    return CaseLower(S0, Value).CasesLower(S1, S2, S3, S4, Value);
+  }
+
+  LLVM_NODISCARD
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  R Default(T Value) {
+    if (Result)
+      return std::move(*Result);
+    return Value;
+  }
+
+  LLVM_NODISCARD
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  operator R() {
+    assert(Result && "Fell off the end of a string-switch");
+    return std::move(*Result);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSWITCH_H
diff --git a/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h b/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
new file mode 100644
index 0000000..73573d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
@@ -0,0 +1,347 @@
+//===- llvm/ADT/TinyPtrVector.h - 'Normally tiny' vectors -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TINYPTRVECTOR_H
+#define LLVM_ADT_TINYPTRVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+/// TinyPtrVector - This class is specialized for cases where there are
+/// normally 0 or 1 element in a vector, but is general enough to go beyond that
+/// when required.
+///
+/// NOTE: This container doesn't allow you to store a null pointer into it.
+///
+template <typename EltTy>
+class TinyPtrVector {
+public:
+  using VecTy = SmallVector<EltTy, 4>;
+  using value_type = typename VecTy::value_type;
+  using PtrUnion = PointerUnion<EltTy, VecTy *>;
+
+private:
+  PtrUnion Val;
+
+public:
+  TinyPtrVector() = default;
+
+  ~TinyPtrVector() {
+    if (VecTy *V = Val.template dyn_cast<VecTy*>())
+      delete V;
+  }
+
+  TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
+    if (VecTy *V = Val.template dyn_cast<VecTy*>())
+      Val = new VecTy(*V);
+  }
+
+  TinyPtrVector &operator=(const TinyPtrVector &RHS) {
+    if (this == &RHS)
+      return *this;
+    if (RHS.empty()) {
+      this->clear();
+      return *this;
+    }
+
+    // Try to squeeze into the single slot. If it won't fit, allocate a copied
+    // vector.
+    if (Val.template is<EltTy>()) {
+      if (RHS.size() == 1)
+        Val = RHS.front();
+      else
+        Val = new VecTy(*RHS.Val.template get<VecTy*>());
+      return *this;
+    }
+
+    // If we have a full vector allocated, try to re-use it.
+    if (RHS.Val.template is<EltTy>()) {
+      Val.template get<VecTy*>()->clear();
+      Val.template get<VecTy*>()->push_back(RHS.front());
+    } else {
+      *Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
+    }
+    return *this;
+  }
+
+  TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
+    RHS.Val = (EltTy)nullptr;
+  }
+
+  TinyPtrVector &operator=(TinyPtrVector &&RHS) {
+    if (this == &RHS)
+      return *this;
+    if (RHS.empty()) {
+      this->clear();
+      return *this;
+    }
+
+    // If this vector has been allocated on the heap, re-use it if cheap. If it
+    // would require more copying, just delete it and we'll steal the other
+    // side.
+    if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
+      if (RHS.Val.template is<EltTy>()) {
+        V->clear();
+        V->push_back(RHS.front());
+        RHS.Val = (EltTy)nullptr;
+        return *this;
+      }
+      delete V;
+    }
+
+    Val = RHS.Val;
+    RHS.Val = (EltTy)nullptr;
+    return *this;
+  }
+
+  /// Constructor from an ArrayRef.
+  ///
+  /// This also is a constructor for individual array elements due to the single
+  /// element constructor for ArrayRef.
+  explicit TinyPtrVector(ArrayRef<EltTy> Elts)
+      : Val(Elts.empty()
+                ? PtrUnion()
+                : Elts.size() == 1
+                      ? PtrUnion(Elts[0])
+                      : PtrUnion(new VecTy(Elts.begin(), Elts.end()))) {}
+
+  TinyPtrVector(size_t Count, EltTy Value)
+      : Val(Count == 0 ? PtrUnion()
+                       : Count == 1 ? PtrUnion(Value)
+                                    : PtrUnion(new VecTy(Count, Value))) {}
+
+  // implicit conversion operator to ArrayRef.
+  operator ArrayRef<EltTy>() const {
+    if (Val.isNull())
+      return None;
+    if (Val.template is<EltTy>())
+      return *Val.getAddrOfPtr1();
+    return *Val.template get<VecTy*>();
+  }
+
+  // implicit conversion operator to MutableArrayRef.
+  operator MutableArrayRef<EltTy>() {
+    if (Val.isNull())
+      return None;
+    if (Val.template is<EltTy>())
+      return *Val.getAddrOfPtr1();
+    return *Val.template get<VecTy*>();
+  }
+
+  // Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*.
+  template<typename U,
+           typename std::enable_if<
+               std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
+               bool>::type = false>
+  operator ArrayRef<U>() const {
+    return operator ArrayRef<EltTy>();
+  }
+
+  bool empty() const {
+    // This vector can be empty if it contains no element, or if it
+    // contains a pointer to an empty vector.
+    if (Val.isNull()) return true;
+    if (VecTy *Vec = Val.template dyn_cast<VecTy*>())
+      return Vec->empty();
+    return false;
+  }
+
+  unsigned size() const {
+    if (empty())
+      return 0;
+    if (Val.template is<EltTy>())
+      return 1;
+    return Val.template get<VecTy*>()->size();
+  }
+
+  using iterator = EltTy *;
+  using const_iterator = const EltTy *;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+  iterator begin() {
+    if (Val.template is<EltTy>())
+      return Val.getAddrOfPtr1();
+
+    return Val.template get<VecTy *>()->begin();
+  }
+
+  iterator end() {
+    if (Val.template is<EltTy>())
+      return begin() + (Val.isNull() ? 0 : 1);
+
+    return Val.template get<VecTy *>()->end();
+  }
+
+  const_iterator begin() const {
+    return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
+  }
+
+  const_iterator end() const {
+    return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
+  }
+
+  reverse_iterator rbegin() { return reverse_iterator(end()); }
+  reverse_iterator rend() { return reverse_iterator(begin()); }
+
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(end());
+  }
+
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(begin());
+  }
+
+  EltTy operator[](unsigned i) const {
+    assert(!Val.isNull() && "can't index into an empty vector");
+    if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      assert(i == 0 && "tinyvector index out of range");
+      return V;
+    }
+
+    assert(i < Val.template get<VecTy*>()->size() &&
+           "tinyvector index out of range");
+    return (*Val.template get<VecTy*>())[i];
+  }
+
+  EltTy front() const {
+    assert(!empty() && "vector empty");
+    if (EltTy V = Val.template dyn_cast<EltTy>())
+      return V;
+    return Val.template get<VecTy*>()->front();
+  }
+
+  EltTy back() const {
+    assert(!empty() && "vector empty");
+    if (EltTy V = Val.template dyn_cast<EltTy>())
+      return V;
+    return Val.template get<VecTy*>()->back();
+  }
+
+  void push_back(EltTy NewVal) {
+    assert(NewVal && "Can't add a null value");
+
+    // If we have nothing, add something.
+    if (Val.isNull()) {
+      Val = NewVal;
+      return;
+    }
+
+    // If we have a single value, convert to a vector.
+    if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      Val = new VecTy();
+      Val.template get<VecTy*>()->push_back(V);
+    }
+
+    // Add the new value, we know we have a vector.
+    Val.template get<VecTy*>()->push_back(NewVal);
+  }
+
+  void pop_back() {
+    // If we have a single value, convert to empty.
+    if (Val.template is<EltTy>())
+      Val = (EltTy)nullptr;
+    else if (VecTy *Vec = Val.template get<VecTy*>())
+      Vec->pop_back();
+  }
+
+  void clear() {
+    // If we have a single value, convert to empty.
+    if (Val.template is<EltTy>()) {
+      Val = (EltTy)nullptr;
+    } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+      // If we have a vector form, just clear it.
+      Vec->clear();
+    }
+    // Otherwise, we're already empty.
+  }
+
+  iterator erase(iterator I) {
+    assert(I >= begin() && "Iterator to erase is out of bounds.");
+    assert(I < end() && "Erasing at past-the-end iterator.");
+
+    // If we have a single value, convert to empty.
+    if (Val.template is<EltTy>()) {
+      if (I == begin())
+        Val = (EltTy)nullptr;
+    } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+      // multiple items in a vector; just do the erase, there is no
+      // benefit to collapsing back to a pointer
+      return Vec->erase(I);
+    }
+    return end();
+  }
+
+  iterator erase(iterator S, iterator E) {
+    assert(S >= begin() && "Range to erase is out of bounds.");
+    assert(S <= E && "Trying to erase invalid range.");
+    assert(E <= end() && "Trying to erase past the end.");
+
+    if (Val.template is<EltTy>()) {
+      if (S == begin() && S != E)
+        Val = (EltTy)nullptr;
+    } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+      return Vec->erase(S, E);
+    }
+    return end();
+  }
+
+  iterator insert(iterator I, const EltTy &Elt) {
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+    if (I == end()) {
+      push_back(Elt);
+      return std::prev(end());
+    }
+    assert(!Val.isNull() && "Null value with non-end insert iterator.");
+    if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      assert(I == begin());
+      Val = Elt;
+      push_back(V);
+      return begin();
+    }
+
+    return Val.template get<VecTy*>()->insert(I, Elt);
+  }
+
+  template<typename ItTy>
+  iterator insert(iterator I, ItTy From, ItTy To) {
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+    if (From == To)
+      return I;
+
+    // If we have a single value, convert to a vector.
+    ptrdiff_t Offset = I - begin();
+    if (Val.isNull()) {
+      if (std::next(From) == To) {
+        Val = *From;
+        return begin();
+      }
+
+      Val = new VecTy();
+    } else if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      Val = new VecTy();
+      Val.template get<VecTy*>()->push_back(V);
+    }
+    return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_TINYPTRVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/Triple.h b/linux-x64/clang/include/llvm/ADT/Triple.h
new file mode 100644
index 0000000..8ba50d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Triple.h
@@ -0,0 +1,809 @@
+//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TRIPLE_H
+#define LLVM_ADT_TRIPLE_H
+
+#include "llvm/ADT/Twine.h"
+
+// Some system headers or GCC predefined macros conflict with identifiers in
+// this file.  Undefine them here.
+#undef NetBSD
+#undef mips
+#undef sparc
+
+namespace llvm {
+
+/// Triple - Helper class for working with autoconf configuration names. For
+/// historical reasons, we also call these 'triples' (they used to contain
+/// exactly three fields).
+///
+/// Configuration names are strings in the canonical form:
+///   ARCHITECTURE-VENDOR-OPERATING_SYSTEM
+/// or
+///   ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
+///
+/// This class is used for clients which want to support arbitrary
+/// configuration names, but also want to implement certain special
+/// behavior for particular configurations. This class isolates the mapping
+/// from the components of the configuration name to well known IDs.
+///
+/// At its core the Triple class is designed to be a wrapper for a triple
+/// string; the constructor does not change or normalize the triple string.
+/// Clients that need to handle the non-canonical triples that users often
+/// specify should use the normalize method.
+///
+/// See autoconf/config.guess for a glimpse into what configuration names
+/// look like in practice.
+class Triple {
+public:
+  enum ArchType {
+    UnknownArch,
+
+    arm,            // ARM (little endian): arm, armv.*, xscale
+    armeb,          // ARM (big endian): armeb
+    aarch64,        // AArch64 (little endian): aarch64
+    aarch64_be,     // AArch64 (big endian): aarch64_be
+    arc,            // ARC: Synopsys ARC
+    avr,            // AVR: Atmel AVR microcontroller
+    bpfel,          // eBPF or extended BPF or 64-bit BPF (little endian)
+    bpfeb,          // eBPF or extended BPF or 64-bit BPF (big endian)
+    hexagon,        // Hexagon: hexagon
+    mips,           // MIPS: mips, mipsallegrex
+    mipsel,         // MIPSEL: mipsel, mipsallegrexel
+    mips64,         // MIPS64: mips64
+    mips64el,       // MIPS64EL: mips64el
+    msp430,         // MSP430: msp430
+    nios2,          // NIOSII: nios2
+    ppc,            // PPC: powerpc
+    ppc64,          // PPC64: powerpc64, ppu
+    ppc64le,        // PPC64LE: powerpc64le
+    r600,           // R600: AMD GPUs HD2XXX - HD6XXX
+    amdgcn,         // AMDGCN: AMD GCN GPUs
+    riscv32,        // RISC-V (32-bit): riscv32
+    riscv64,        // RISC-V (64-bit): riscv64
+    sparc,          // Sparc: sparc
+    sparcv9,        // Sparcv9: Sparcv9
+    sparcel,        // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
+    systemz,        // SystemZ: s390x
+    tce,            // TCE (http://tce.cs.tut.fi/): tce
+    tcele,          // TCE little endian (http://tce.cs.tut.fi/): tcele
+    thumb,          // Thumb (little endian): thumb, thumbv.*
+    thumbeb,        // Thumb (big endian): thumbeb
+    x86,            // X86: i[3-9]86
+    x86_64,         // X86-64: amd64, x86_64
+    xcore,          // XCore: xcore
+    nvptx,          // NVPTX: 32-bit
+    nvptx64,        // NVPTX: 64-bit
+    le32,           // le32: generic little-endian 32-bit CPU (PNaCl)
+    le64,           // le64: generic little-endian 64-bit CPU (PNaCl)
+    amdil,          // AMDIL
+    amdil64,        // AMDIL with 64-bit pointers
+    hsail,          // AMD HSAIL
+    hsail64,        // AMD HSAIL with 64-bit pointers
+    spir,           // SPIR: standard portable IR for OpenCL 32-bit version
+    spir64,         // SPIR: standard portable IR for OpenCL 64-bit version
+    kalimba,        // Kalimba: generic kalimba
+    shave,          // SHAVE: Movidius vector VLIW processors
+    lanai,          // Lanai: Lanai 32-bit
+    wasm32,         // WebAssembly with 32-bit pointers
+    wasm64,         // WebAssembly with 64-bit pointers
+    renderscript32, // 32-bit RenderScript
+    renderscript64, // 64-bit RenderScript
+    LastArchType = renderscript64
+  };
+  enum SubArchType {
+    NoSubArch,
+
+    ARMSubArch_v8_3a,
+    ARMSubArch_v8_2a,
+    ARMSubArch_v8_1a,
+    ARMSubArch_v8,
+    ARMSubArch_v8r,
+    ARMSubArch_v8m_baseline,
+    ARMSubArch_v8m_mainline,
+    ARMSubArch_v7,
+    ARMSubArch_v7em,
+    ARMSubArch_v7m,
+    ARMSubArch_v7s,
+    ARMSubArch_v7k,
+    ARMSubArch_v7ve,
+    ARMSubArch_v6,
+    ARMSubArch_v6m,
+    ARMSubArch_v6k,
+    ARMSubArch_v6t2,
+    ARMSubArch_v5,
+    ARMSubArch_v5te,
+    ARMSubArch_v4t,
+
+    KalimbaSubArch_v3,
+    KalimbaSubArch_v4,
+    KalimbaSubArch_v5
+  };
+  enum VendorType {
+    UnknownVendor,
+
+    Apple,
+    PC,
+    SCEI,
+    BGP,
+    BGQ,
+    Freescale,
+    IBM,
+    ImaginationTechnologies,
+    MipsTechnologies,
+    NVIDIA,
+    CSR,
+    Myriad,
+    AMD,
+    Mesa,
+    SUSE,
+    LastVendorType = SUSE
+  };
+  enum OSType {
+    UnknownOS,
+
+    Ananas,
+    CloudABI,
+    Darwin,
+    DragonFly,
+    FreeBSD,
+    Fuchsia,
+    IOS,
+    KFreeBSD,
+    Linux,
+    Lv2,        // PS3
+    MacOSX,
+    NetBSD,
+    OpenBSD,
+    Solaris,
+    Win32,
+    Haiku,
+    Minix,
+    RTEMS,
+    NaCl,       // Native Client
+    CNK,        // BG/P Compute-Node Kernel
+    AIX,
+    CUDA,       // NVIDIA CUDA
+    NVCL,       // NVIDIA OpenCL
+    AMDHSA,     // AMD HSA Runtime
+    PS4,
+    ELFIAMCU,
+    TvOS,       // Apple tvOS
+    WatchOS,    // Apple watchOS
+    Mesa3D,
+    Contiki,
+    AMDPAL,     // AMD PAL Runtime
+    LastOSType = AMDPAL
+  };
+  enum EnvironmentType {
+    UnknownEnvironment,
+
+    GNU,
+    GNUABIN32,
+    GNUABI64,
+    GNUEABI,
+    GNUEABIHF,
+    GNUX32,
+    CODE16,
+    EABI,
+    EABIHF,
+    Android,
+    Musl,
+    MuslEABI,
+    MuslEABIHF,
+
+    MSVC,
+    Itanium,
+    Cygnus,
+    CoreCLR,
+    Simulator,  // Simulator variants of other systems, e.g., Apple's iOS
+    LastEnvironmentType = Simulator
+  };
+  enum ObjectFormatType {
+    UnknownObjectFormat,
+
+    COFF,
+    ELF,
+    MachO,
+    Wasm,
+  };
+
+private:
+  std::string Data;
+
+  /// The parsed arch type.
+  ArchType Arch;
+
+  /// The parsed subarchitecture type.
+  SubArchType SubArch;
+
+  /// The parsed vendor type.
+  VendorType Vendor;
+
+  /// The parsed OS type.
+  OSType OS;
+
+  /// The parsed Environment type.
+  EnvironmentType Environment;
+
+  /// The object format type.
+  ObjectFormatType ObjectFormat;
+
+public:
+  /// @name Constructors
+  /// @{
+
+  /// Default constructor is the same as an empty string and leaves all
+  /// triple fields unknown.
+  Triple()
+      : Data(), Arch(), SubArch(), Vendor(), OS(), Environment(),
+        ObjectFormat() {}
+
+  explicit Triple(const Twine &Str);
+  Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
+  Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
+         const Twine &EnvironmentStr);
+
+  bool operator==(const Triple &Other) const {
+    return Arch == Other.Arch && SubArch == Other.SubArch &&
+           Vendor == Other.Vendor && OS == Other.OS &&
+           Environment == Other.Environment &&
+           ObjectFormat == Other.ObjectFormat;
+  }
+
+  bool operator!=(const Triple &Other) const {
+    return !(*this == Other);
+  }
+
+  /// @}
+  /// @name Normalization
+  /// @{
+
+  /// normalize - Turn an arbitrary machine specification into the canonical
+  /// triple form (or something sensible that the Triple class understands if
+  /// nothing better can reasonably be done).  In particular, it handles the
+  /// common case in which otherwise valid components are in the wrong order.
+  static std::string normalize(StringRef Str);
+
+  /// Return the normalized form of this triple's string.
+  std::string normalize() const { return normalize(Data); }
+
+  /// @}
+  /// @name Typed Component Access
+  /// @{
+
+  /// getArch - Get the parsed architecture type of this triple.
+  ArchType getArch() const { return Arch; }
+
+  /// getSubArch - get the parsed subarchitecture type for this triple.
+  SubArchType getSubArch() const { return SubArch; }
+
+  /// getVendor - Get the parsed vendor type of this triple.
+  VendorType getVendor() const { return Vendor; }
+
+  /// getOS - Get the parsed operating system type of this triple.
+  OSType getOS() const { return OS; }
+
+  /// hasEnvironment - Does this triple have the optional environment
+  /// (fourth) component?
+  bool hasEnvironment() const {
+    return getEnvironmentName() != "";
+  }
+
+  /// getEnvironment - Get the parsed environment type of this triple.
+  EnvironmentType getEnvironment() const { return Environment; }
+
+  /// Parse the version number from the OS name component of the
+  /// triple, if present.
+  ///
+  /// For example, "fooos1.2.3" would return (1, 2, 3).
+  ///
+  /// If an entry is not defined, it will be returned as 0.
+  void getEnvironmentVersion(unsigned &Major, unsigned &Minor,
+                             unsigned &Micro) const;
+
+  /// getFormat - Get the object format for this triple.
+  ObjectFormatType getObjectFormat() const { return ObjectFormat; }
+
+  /// getOSVersion - Parse the version number from the OS name component of the
+  /// triple, if present.
+  ///
+  /// For example, "fooos1.2.3" would return (1, 2, 3).
+  ///
+  /// If an entry is not defined, it will be returned as 0.
+  void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
+
+  /// getOSMajorVersion - Return just the major version number, this is
+  /// specialized because it is a common query.
+  unsigned getOSMajorVersion() const {
+    unsigned Maj, Min, Micro;
+    getOSVersion(Maj, Min, Micro);
+    return Maj;
+  }
+
+  /// getMacOSXVersion - Parse the version number as with getOSVersion and then
+  /// translate generic "darwin" versions to the corresponding OS X versions.
+  /// This may also be called with IOS triples but the OS X version number is
+  /// just set to a constant 10.4.0 in that case.  Returns true if successful.
+  bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
+                        unsigned &Micro) const;
+
+  /// getiOSVersion - Parse the version number as with getOSVersion.  This should
+  /// only be called with IOS or generic triples.
+  void getiOSVersion(unsigned &Major, unsigned &Minor,
+                     unsigned &Micro) const;
+
+  /// getWatchOSVersion - Parse the version number as with getOSVersion.  This
+  /// should only be called with WatchOS or generic triples.
+  void getWatchOSVersion(unsigned &Major, unsigned &Minor,
+                         unsigned &Micro) const;
+
+  /// @}
+  /// @name Direct Component Access
+  /// @{
+
+  const std::string &str() const { return Data; }
+
+  const std::string &getTriple() const { return Data; }
+
+  /// getArchName - Get the architecture (first) component of the
+  /// triple.
+  StringRef getArchName() const;
+
+  /// getVendorName - Get the vendor (second) component of the triple.
+  StringRef getVendorName() const;
+
+  /// getOSName - Get the operating system (third) component of the
+  /// triple.
+  StringRef getOSName() const;
+
+  /// getEnvironmentName - Get the optional environment (fourth)
+  /// component of the triple, or "" if empty.
+  StringRef getEnvironmentName() const;
+
+  /// getOSAndEnvironmentName - Get the operating system and optional
+  /// environment components as a single string (separated by a '-'
+  /// if the environment component is present).
+  StringRef getOSAndEnvironmentName() const;
+
+  /// @}
+  /// @name Convenience Predicates
+  /// @{
+
+  /// Test whether the architecture is 64-bit
+  ///
+  /// Note that this tests for 64-bit pointer width, and nothing else. Note
+  /// that we intentionally expose only three predicates, 64-bit, 32-bit, and
+  /// 16-bit. The inner details of pointer width for particular architectures
+  /// is not summed up in the triple, and so only a coarse grained predicate
+  /// system is provided.
+  bool isArch64Bit() const;
+
+  /// Test whether the architecture is 32-bit
+  ///
+  /// Note that this tests for 32-bit pointer width, and nothing else.
+  bool isArch32Bit() const;
+
+  /// Test whether the architecture is 16-bit
+  ///
+  /// Note that this tests for 16-bit pointer width, and nothing else.
+  bool isArch16Bit() const;
+
+  /// isOSVersionLT - Helper function for doing comparisons against version
+  /// numbers included in the target triple.
+  bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
+                     unsigned Micro = 0) const {
+    unsigned LHS[3];
+    getOSVersion(LHS[0], LHS[1], LHS[2]);
+
+    if (LHS[0] != Major)
+      return LHS[0] < Major;
+    if (LHS[1] != Minor)
+      return LHS[1] < Minor;
+    if (LHS[2] != Micro)
+      return LHS[1] < Micro;
+
+    return false;
+  }
+
+  bool isOSVersionLT(const Triple &Other) const {
+    unsigned RHS[3];
+    Other.getOSVersion(RHS[0], RHS[1], RHS[2]);
+    return isOSVersionLT(RHS[0], RHS[1], RHS[2]);
+  }
+
+  /// isMacOSXVersionLT - Comparison function for checking OS X version
+  /// compatibility, which handles supporting skewed version numbering schemes
+  /// used by the "darwin" triples.
+  bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
+                         unsigned Micro = 0) const {
+    assert(isMacOSX() && "Not an OS X triple!");
+
+    // If this is OS X, expect a sane version number.
+    if (getOS() == Triple::MacOSX)
+      return isOSVersionLT(Major, Minor, Micro);
+
+    // Otherwise, compare to the "Darwin" number.
+    assert(Major == 10 && "Unexpected major version");
+    return isOSVersionLT(Minor + 4, Micro, 0);
+  }
+
+  /// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both
+  /// "darwin" and "osx" as OS X triples.
+  bool isMacOSX() const {
+    return getOS() == Triple::Darwin || getOS() == Triple::MacOSX;
+  }
+
+  /// Is this an iOS triple.
+  /// Note: This identifies tvOS as a variant of iOS. If that ever
+  /// changes, i.e., if the two operating systems diverge or their version
+  /// numbers get out of sync, that will need to be changed.
+  /// watchOS has completely different version numbers so it is not included.
+  bool isiOS() const {
+    return getOS() == Triple::IOS || isTvOS();
+  }
+
+  /// Is this an Apple tvOS triple.
+  bool isTvOS() const {
+    return getOS() == Triple::TvOS;
+  }
+
+  /// Is this an Apple watchOS triple.
+  bool isWatchOS() const {
+    return getOS() == Triple::WatchOS;
+  }
+
+  bool isWatchABI() const {
+    return getSubArch() == Triple::ARMSubArch_v7k;
+  }
+
+  /// isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
+  bool isOSDarwin() const {
+    return isMacOSX() || isiOS() || isWatchOS();
+  }
+
+  bool isSimulatorEnvironment() const {
+    return getEnvironment() == Triple::Simulator;
+  }
+
+  bool isOSNetBSD() const {
+    return getOS() == Triple::NetBSD;
+  }
+
+  bool isOSOpenBSD() const {
+    return getOS() == Triple::OpenBSD;
+  }
+
+  bool isOSFreeBSD() const {
+    return getOS() == Triple::FreeBSD;
+  }
+
+  bool isOSFuchsia() const {
+    return getOS() == Triple::Fuchsia;
+  }
+
+  bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }
+
+  bool isOSSolaris() const {
+    return getOS() == Triple::Solaris;
+  }
+
+  bool isOSIAMCU() const {
+    return getOS() == Triple::ELFIAMCU;
+  }
+
+  bool isOSUnknown() const { return getOS() == Triple::UnknownOS; }
+
+  bool isGNUEnvironment() const {
+    EnvironmentType Env = getEnvironment();
+    return Env == Triple::GNU || Env == Triple::GNUABIN32 ||
+           Env == Triple::GNUABI64 || Env == Triple::GNUEABI ||
+           Env == Triple::GNUEABIHF || Env == Triple::GNUX32;
+  }
+
+  bool isOSContiki() const {
+    return getOS() == Triple::Contiki;
+  }
+
+  /// Tests whether the OS is Haiku.
+  bool isOSHaiku() const {
+    return getOS() == Triple::Haiku;
+  }
+
+  /// Checks if the environment could be MSVC.
+  bool isWindowsMSVCEnvironment() const {
+    return getOS() == Triple::Win32 &&
+           (getEnvironment() == Triple::UnknownEnvironment ||
+            getEnvironment() == Triple::MSVC);
+  }
+
+  /// Checks if the environment is MSVC.
+  bool isKnownWindowsMSVCEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::MSVC;
+  }
+
+  bool isWindowsCoreCLREnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::CoreCLR;
+  }
+
+  bool isWindowsItaniumEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::Itanium;
+  }
+
+  bool isWindowsCygwinEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::Cygnus;
+  }
+
+  bool isWindowsGNUEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::GNU;
+  }
+
+  /// Tests for either Cygwin or MinGW OS
+  bool isOSCygMing() const {
+    return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment();
+  }
+
+  /// Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
+  bool isOSMSVCRT() const {
+    return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() ||
+           isWindowsItaniumEnvironment();
+  }
+
+  /// Tests whether the OS is Windows.
+  bool isOSWindows() const {
+    return getOS() == Triple::Win32;
+  }
+
+  /// Tests whether the OS is NaCl (Native Client)
+  bool isOSNaCl() const {
+    return getOS() == Triple::NaCl;
+  }
+
+  /// Tests whether the OS is Linux.
+  bool isOSLinux() const {
+    return getOS() == Triple::Linux;
+  }
+
+  /// Tests whether the OS is kFreeBSD.
+  bool isOSKFreeBSD() const {
+    return getOS() == Triple::KFreeBSD;
+  }
+
+  /// Tests whether the OS uses glibc.
+  bool isOSGlibc() const {
+    return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD) &&
+           !isAndroid();
+  }
+
+  /// Tests whether the OS uses the ELF binary format.
+  bool isOSBinFormatELF() const {
+    return getObjectFormat() == Triple::ELF;
+  }
+
+  /// Tests whether the OS uses the COFF binary format.
+  bool isOSBinFormatCOFF() const {
+    return getObjectFormat() == Triple::COFF;
+  }
+
+  /// Tests whether the environment is MachO.
+  bool isOSBinFormatMachO() const {
+    return getObjectFormat() == Triple::MachO;
+  }
+
+  /// Tests whether the OS uses the Wasm binary format.
+  bool isOSBinFormatWasm() const {
+    return getObjectFormat() == Triple::Wasm;
+  }
+
+  /// Tests whether the target is the PS4 CPU
+  bool isPS4CPU() const {
+    return getArch() == Triple::x86_64 &&
+           getVendor() == Triple::SCEI &&
+           getOS() == Triple::PS4;
+  }
+
+  /// Tests whether the target is the PS4 platform
+  bool isPS4() const {
+    return getVendor() == Triple::SCEI &&
+           getOS() == Triple::PS4;
+  }
+
+  /// Tests whether the target is Android
+  bool isAndroid() const { return getEnvironment() == Triple::Android; }
+
+  bool isAndroidVersionLT(unsigned Major) const {
+    assert(isAndroid() && "Not an Android triple!");
+
+    unsigned Env[3];
+    getEnvironmentVersion(Env[0], Env[1], Env[2]);
+
+    // 64-bit targets did not exist before API level 21 (Lollipop).
+    if (isArch64Bit() && Env[0] < 21)
+      Env[0] = 21;
+
+    return Env[0] < Major;
+  }
+
+  /// Tests whether the environment is musl-libc
+  bool isMusl() const {
+    return getEnvironment() == Triple::Musl ||
+           getEnvironment() == Triple::MuslEABI ||
+           getEnvironment() == Triple::MuslEABIHF;
+  }
+
+  /// Tests whether the target is NVPTX (32- or 64-bit).
+  bool isNVPTX() const {
+    return getArch() == Triple::nvptx || getArch() == Triple::nvptx64;
+  }
+
+  /// Tests whether the target is Thumb (little and big endian).
+  bool isThumb() const {
+    return getArch() == Triple::thumb || getArch() == Triple::thumbeb;
+  }
+
+  /// Tests whether the target is ARM (little and big endian).
+  bool isARM() const {
+    return getArch() == Triple::arm || getArch() == Triple::armeb;
+  }
+
+  /// Tests whether the target is AArch64 (little and big endian).
+  bool isAArch64() const {
+    return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be;
+  }
+
+  /// Tests whether the target supports comdat
+  bool supportsCOMDAT() const {
+    return !isOSBinFormatMachO();
+  }
+
+  /// Tests whether the target uses emulated TLS as default.
+  bool hasDefaultEmulatedTLS() const {
+    return isAndroid() || isOSOpenBSD() || isWindowsCygwinEnvironment();
+  }
+
+  /// @}
+  /// @name Mutators
+  /// @{
+
+  /// setArch - Set the architecture (first) component of the triple
+  /// to a known type.
+  void setArch(ArchType Kind);
+
+  /// setVendor - Set the vendor (second) component of the triple to a
+  /// known type.
+  void setVendor(VendorType Kind);
+
+  /// setOS - Set the operating system (third) component of the triple
+  /// to a known type.
+  void setOS(OSType Kind);
+
+  /// setEnvironment - Set the environment (fourth) component of the triple
+  /// to a known type.
+  void setEnvironment(EnvironmentType Kind);
+
+  /// setObjectFormat - Set the object file format
+  void setObjectFormat(ObjectFormatType Kind);
+
+  /// setTriple - Set all components to the new triple \p Str.
+  void setTriple(const Twine &Str);
+
+  /// setArchName - Set the architecture (first) component of the
+  /// triple by name.
+  void setArchName(StringRef Str);
+
+  /// setVendorName - Set the vendor (second) component of the triple
+  /// by name.
+  void setVendorName(StringRef Str);
+
+  /// setOSName - Set the operating system (third) component of the
+  /// triple by name.
+  void setOSName(StringRef Str);
+
+  /// setEnvironmentName - Set the optional environment (fourth)
+  /// component of the triple by name.
+  void setEnvironmentName(StringRef Str);
+
+  /// setOSAndEnvironmentName - Set the operating system and optional
+  /// environment components with a single string.
+  void setOSAndEnvironmentName(StringRef Str);
+
+  /// @}
+  /// @name Helpers to build variants of a particular triple.
+  /// @{
+
+  /// Form a triple with a 32-bit variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a 32-bit architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple get32BitArchVariant() const;
+
+  /// Form a triple with a 64-bit variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a 64-bit architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple get64BitArchVariant() const;
+
+  /// Form a triple with a big endian variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a big endian architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple getBigEndianArchVariant() const;
+
+  /// Form a triple with a little endian variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a little endian architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple getLittleEndianArchVariant() const;
+
+  /// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
+  ///
+  /// \param Arch the architecture name (e.g., "armv7s"). If it is an empty
+  /// string then the triple's arch name is used.
+  StringRef getARMCPUForArch(StringRef Arch = StringRef()) const;
+
+  /// Tests whether the target triple is little endian.
+  ///
+  /// \returns true if the triple is little endian, false otherwise.
+  bool isLittleEndian() const;
+
+  /// Test whether target triples are compatible.
+  bool isCompatibleWith(const Triple &Other) const;
+
+  /// Merge target triples.
+  std::string merge(const Triple &Other) const;
+
+  /// @}
+  /// @name Static helpers for IDs.
+  /// @{
+
+  /// getArchTypeName - Get the canonical name for the \p Kind architecture.
+  static StringRef getArchTypeName(ArchType Kind);
+
+  /// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind
+  /// architecture. This is the prefix used by the architecture specific
+  /// builtins, and is suitable for passing to \see
+  /// Intrinsic::getIntrinsicForGCCBuiltin().
+  ///
+  /// \return - The architecture prefix, or 0 if none is defined.
+  static StringRef getArchTypePrefix(ArchType Kind);
+
+  /// getVendorTypeName - Get the canonical name for the \p Kind vendor.
+  static StringRef getVendorTypeName(VendorType Kind);
+
+  /// getOSTypeName - Get the canonical name for the \p Kind operating system.
+  static StringRef getOSTypeName(OSType Kind);
+
+  /// getEnvironmentTypeName - Get the canonical name for the \p Kind
+  /// environment.
+  static StringRef getEnvironmentTypeName(EnvironmentType Kind);
+
+  /// @}
+  /// @name Static helpers for converting alternate architecture names.
+  /// @{
+
+  /// getArchTypeForLLVMName - The canonical type for the given LLVM
+  /// architecture name (e.g., "x86").
+  static ArchType getArchTypeForLLVMName(StringRef Str);
+
+  /// @}
+};
+
+} // End llvm namespace
+
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/Twine.h b/linux-x64/clang/include/llvm/ADT/Twine.h
new file mode 100644
index 0000000..b60fd09
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Twine.h
@@ -0,0 +1,542 @@
+//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TWINE_H
+#define LLVM_ADT_TWINE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+  class formatv_object_base;
+  class raw_ostream;
+
+  /// Twine - A lightweight data structure for efficiently representing the
+  /// concatenation of temporary values as strings.
+  ///
+  /// A Twine is a kind of rope, it represents a concatenated string using a
+  /// binary-tree, where the string is the preorder of the nodes. Since the
+  /// Twine can be efficiently rendered into a buffer when its result is used,
+  /// it avoids the cost of generating temporary values for intermediate string
+  /// results -- particularly in cases when the Twine result is never
+  /// required. By explicitly tracking the type of leaf nodes, we can also avoid
+  /// the creation of temporary strings for conversions operations (such as
+  /// appending an integer to a string).
+  ///
+  /// A Twine is not intended for use directly and should not be stored, its
+  /// implementation relies on the ability to store pointers to temporary stack
+  /// objects which may be deallocated at the end of a statement. Twines should
+  /// only be used accepted as const references in arguments, when an API wishes
+  /// to accept possibly-concatenated strings.
+  ///
+  /// Twines support a special 'null' value, which always concatenates to form
+  /// itself, and renders as an empty string. This can be returned from APIs to
+  /// effectively nullify any concatenations performed on the result.
+  ///
+  /// \b Implementation
+  ///
+  /// Given the nature of a Twine, it is not possible for the Twine's
+  /// concatenation method to construct interior nodes; the result must be
+  /// represented inside the returned value. For this reason a Twine object
+  /// actually holds two values, the left- and right-hand sides of a
+  /// concatenation. We also have nullary Twine objects, which are effectively
+  /// sentinel values that represent empty strings.
+  ///
+  /// Thus, a Twine can effectively have zero, one, or two children. The \see
+  /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
+  /// testing the number of children.
+  ///
+  /// We maintain a number of invariants on Twine objects (FIXME: Why):
+  ///  - Nullary twines are always represented with their Kind on the left-hand
+  ///    side, and the Empty kind on the right-hand side.
+  ///  - Unary twines are always represented with the value on the left-hand
+  ///    side, and the Empty kind on the right-hand side.
+  ///  - If a Twine has another Twine as a child, that child should always be
+  ///    binary (otherwise it could have been folded into the parent).
+  ///
+  /// These invariants are check by \see isValid().
+  ///
+  /// \b Efficiency Considerations
+  ///
+  /// The Twine is designed to yield efficient and small code for common
+  /// situations. For this reason, the concat() method is inlined so that
+  /// concatenations of leaf nodes can be optimized into stores directly into a
+  /// single stack allocated object.
+  ///
+  /// In practice, not all compilers can be trusted to optimize concat() fully,
+  /// so we provide two additional methods (and accompanying operator+
+  /// overloads) to guarantee that particularly important cases (cstring plus
+  /// StringRef) codegen as desired.
+  class Twine {
+    /// NodeKind - Represent the type of an argument.
+    enum NodeKind : unsigned char {
+      /// An empty string; the result of concatenating anything with it is also
+      /// empty.
+      NullKind,
+
+      /// The empty string.
+      EmptyKind,
+
+      /// A pointer to a Twine instance.
+      TwineKind,
+
+      /// A pointer to a C string instance.
+      CStringKind,
+
+      /// A pointer to an std::string instance.
+      StdStringKind,
+
+      /// A pointer to a StringRef instance.
+      StringRefKind,
+
+      /// A pointer to a SmallString instance.
+      SmallStringKind,
+
+      /// A pointer to a formatv_object_base instance.
+      FormatvObjectKind,
+
+      /// A char value, to render as a character.
+      CharKind,
+
+      /// An unsigned int value, to render as an unsigned decimal integer.
+      DecUIKind,
+
+      /// An int value, to render as a signed decimal integer.
+      DecIKind,
+
+      /// A pointer to an unsigned long value, to render as an unsigned decimal
+      /// integer.
+      DecULKind,
+
+      /// A pointer to a long value, to render as a signed decimal integer.
+      DecLKind,
+
+      /// A pointer to an unsigned long long value, to render as an unsigned
+      /// decimal integer.
+      DecULLKind,
+
+      /// A pointer to a long long value, to render as a signed decimal integer.
+      DecLLKind,
+
+      /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
+      /// integer.
+      UHexKind
+    };
+
+    union Child
+    {
+      const Twine *twine;
+      const char *cString;
+      const std::string *stdString;
+      const StringRef *stringRef;
+      const SmallVectorImpl<char> *smallString;
+      const formatv_object_base *formatvObject;
+      char character;
+      unsigned int decUI;
+      int decI;
+      const unsigned long *decUL;
+      const long *decL;
+      const unsigned long long *decULL;
+      const long long *decLL;
+      const uint64_t *uHex;
+    };
+
+    /// LHS - The prefix in the concatenation, which may be uninitialized for
+    /// Null or Empty kinds.
+    Child LHS;
+
+    /// RHS - The suffix in the concatenation, which may be uninitialized for
+    /// Null or Empty kinds.
+    Child RHS;
+
+    /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
+    NodeKind LHSKind = EmptyKind;
+
+    /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
+    NodeKind RHSKind = EmptyKind;
+
+    /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
+    explicit Twine(NodeKind Kind) : LHSKind(Kind) {
+      assert(isNullary() && "Invalid kind!");
+    }
+
+    /// Construct a binary twine.
+    explicit Twine(const Twine &LHS, const Twine &RHS)
+        : LHSKind(TwineKind), RHSKind(TwineKind) {
+      this->LHS.twine = &LHS;
+      this->RHS.twine = &RHS;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct a twine from explicit values.
+    explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
+        : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Check for the null twine.
+    bool isNull() const {
+      return getLHSKind() == NullKind;
+    }
+
+    /// Check for the empty twine.
+    bool isEmpty() const {
+      return getLHSKind() == EmptyKind;
+    }
+
+    /// Check if this is a nullary twine (null or empty).
+    bool isNullary() const {
+      return isNull() || isEmpty();
+    }
+
+    /// Check if this is a unary twine.
+    bool isUnary() const {
+      return getRHSKind() == EmptyKind && !isNullary();
+    }
+
+    /// Check if this is a binary twine.
+    bool isBinary() const {
+      return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
+    }
+
+    /// Check if this is a valid twine (satisfying the invariants on
+    /// order and number of arguments).
+    bool isValid() const {
+      // Nullary twines always have Empty on the RHS.
+      if (isNullary() && getRHSKind() != EmptyKind)
+        return false;
+
+      // Null should never appear on the RHS.
+      if (getRHSKind() == NullKind)
+        return false;
+
+      // The RHS cannot be non-empty if the LHS is empty.
+      if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
+        return false;
+
+      // A twine child should always be binary.
+      if (getLHSKind() == TwineKind &&
+          !LHS.twine->isBinary())
+        return false;
+      if (getRHSKind() == TwineKind &&
+          !RHS.twine->isBinary())
+        return false;
+
+      return true;
+    }
+
+    /// Get the NodeKind of the left-hand side.
+    NodeKind getLHSKind() const { return LHSKind; }
+
+    /// Get the NodeKind of the right-hand side.
+    NodeKind getRHSKind() const { return RHSKind; }
+
+    /// Print one child from a twine.
+    void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
+
+    /// Print the representation of one child from a twine.
+    void printOneChildRepr(raw_ostream &OS, Child Ptr,
+                           NodeKind Kind) const;
+
+  public:
+    /// @name Constructors
+    /// @{
+
+    /// Construct from an empty string.
+    /*implicit*/ Twine() {
+      assert(isValid() && "Invalid twine!");
+    }
+
+    Twine(const Twine &) = default;
+
+    /// Construct from a C string.
+    ///
+    /// We take care here to optimize "" into the empty twine -- this will be
+    /// optimized out for string constants. This allows Twine arguments have
+    /// default "" values, without introducing unnecessary string constants.
+    /*implicit*/ Twine(const char *Str) {
+      if (Str[0] != '\0') {
+        LHS.cString = Str;
+        LHSKind = CStringKind;
+      } else
+        LHSKind = EmptyKind;
+
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from an std::string.
+    /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
+      LHS.stdString = &Str;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a StringRef.
+    /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) {
+      LHS.stringRef = &Str;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a SmallString.
+    /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
+        : LHSKind(SmallStringKind) {
+      LHS.smallString = &Str;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a formatv_object_base.
+    /*implicit*/ Twine(const formatv_object_base &Fmt)
+        : LHSKind(FormatvObjectKind) {
+      LHS.formatvObject = &Fmt;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a char.
+    explicit Twine(char Val) : LHSKind(CharKind) {
+      LHS.character = Val;
+    }
+
+    /// Construct from a signed char.
+    explicit Twine(signed char Val) : LHSKind(CharKind) {
+      LHS.character = static_cast<char>(Val);
+    }
+
+    /// Construct from an unsigned char.
+    explicit Twine(unsigned char Val) : LHSKind(CharKind) {
+      LHS.character = static_cast<char>(Val);
+    }
+
+    /// Construct a twine to print \p Val as an unsigned decimal integer.
+    explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
+      LHS.decUI = Val;
+    }
+
+    /// Construct a twine to print \p Val as a signed decimal integer.
+    explicit Twine(int Val) : LHSKind(DecIKind) {
+      LHS.decI = Val;
+    }
+
+    /// Construct a twine to print \p Val as an unsigned decimal integer.
+    explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
+      LHS.decUL = &Val;
+    }
+
+    /// Construct a twine to print \p Val as a signed decimal integer.
+    explicit Twine(const long &Val) : LHSKind(DecLKind) {
+      LHS.decL = &Val;
+    }
+
+    /// Construct a twine to print \p Val as an unsigned decimal integer.
+    explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
+      LHS.decULL = &Val;
+    }
+
+    /// Construct a twine to print \p Val as a signed decimal integer.
+    explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
+      LHS.decLL = &Val;
+    }
+
+    // FIXME: Unfortunately, to make sure this is as efficient as possible we
+    // need extra binary constructors from particular types. We can't rely on
+    // the compiler to be smart enough to fold operator+()/concat() down to the
+    // right thing. Yet.
+
+    /// Construct as the concatenation of a C string and a StringRef.
+    /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
+        : LHSKind(CStringKind), RHSKind(StringRefKind) {
+      this->LHS.cString = LHS;
+      this->RHS.stringRef = &RHS;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct as the concatenation of a StringRef and a C string.
+    /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
+        : LHSKind(StringRefKind), RHSKind(CStringKind) {
+      this->LHS.stringRef = &LHS;
+      this->RHS.cString = RHS;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Since the intended use of twines is as temporary objects, assignments
+    /// when concatenating might cause undefined behavior or stack corruptions
+    Twine &operator=(const Twine &) = delete;
+
+    /// Create a 'null' string, which is an empty string that always
+    /// concatenates to form another empty string.
+    static Twine createNull() {
+      return Twine(NullKind);
+    }
+
+    /// @}
+    /// @name Numeric Conversions
+    /// @{
+
+    // Construct a twine to print \p Val as an unsigned hexadecimal integer.
+    static Twine utohexstr(const uint64_t &Val) {
+      Child LHS, RHS;
+      LHS.uHex = &Val;
+      RHS.twine = nullptr;
+      return Twine(LHS, UHexKind, RHS, EmptyKind);
+    }
+
+    /// @}
+    /// @name Predicate Operations
+    /// @{
+
+    /// Check if this twine is trivially empty; a false return value does not
+    /// necessarily mean the twine is empty.
+    bool isTriviallyEmpty() const {
+      return isNullary();
+    }
+
+    /// Return true if this twine can be dynamically accessed as a single
+    /// StringRef value with getSingleStringRef().
+    bool isSingleStringRef() const {
+      if (getRHSKind() != EmptyKind) return false;
+
+      switch (getLHSKind()) {
+      case EmptyKind:
+      case CStringKind:
+      case StdStringKind:
+      case StringRefKind:
+      case SmallStringKind:
+        return true;
+      default:
+        return false;
+      }
+    }
+
+    /// @}
+    /// @name String Operations
+    /// @{
+
+    Twine concat(const Twine &Suffix) const;
+
+    /// @}
+    /// @name Output & Conversion.
+    /// @{
+
+    /// Return the twine contents as a std::string.
+    std::string str() const;
+
+    /// Append the concatenated string into the given SmallString or SmallVector.
+    void toVector(SmallVectorImpl<char> &Out) const;
+
+    /// This returns the twine as a single StringRef.  This method is only valid
+    /// if isSingleStringRef() is true.
+    StringRef getSingleStringRef() const {
+      assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
+      switch (getLHSKind()) {
+      default: llvm_unreachable("Out of sync with isSingleStringRef");
+      case EmptyKind:      return StringRef();
+      case CStringKind:    return StringRef(LHS.cString);
+      case StdStringKind:  return StringRef(*LHS.stdString);
+      case StringRefKind:  return *LHS.stringRef;
+      case SmallStringKind:
+        return StringRef(LHS.smallString->data(), LHS.smallString->size());
+      }
+    }
+
+    /// This returns the twine as a single StringRef if it can be
+    /// represented as such. Otherwise the twine is written into the given
+    /// SmallVector and a StringRef to the SmallVector's data is returned.
+    StringRef toStringRef(SmallVectorImpl<char> &Out) const {
+      if (isSingleStringRef())
+        return getSingleStringRef();
+      toVector(Out);
+      return StringRef(Out.data(), Out.size());
+    }
+
+    /// This returns the twine as a single null terminated StringRef if it
+    /// can be represented as such. Otherwise the twine is written into the
+    /// given SmallVector and a StringRef to the SmallVector's data is returned.
+    ///
+    /// The returned StringRef's size does not include the null terminator.
+    StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
+
+    /// Write the concatenated string represented by this twine to the
+    /// stream \p OS.
+    void print(raw_ostream &OS) const;
+
+    /// Dump the concatenated string represented by this twine to stderr.
+    void dump() const;
+
+    /// Write the representation of this twine to the stream \p OS.
+    void printRepr(raw_ostream &OS) const;
+
+    /// Dump the representation of this twine to stderr.
+    void dumpRepr() const;
+
+    /// @}
+  };
+
+  /// @name Twine Inline Implementations
+  /// @{
+
+  inline Twine Twine::concat(const Twine &Suffix) const {
+    // Concatenation with null is null.
+    if (isNull() || Suffix.isNull())
+      return Twine(NullKind);
+
+    // Concatenation with empty yields the other side.
+    if (isEmpty())
+      return Suffix;
+    if (Suffix.isEmpty())
+      return *this;
+
+    // Otherwise we need to create a new node, taking care to fold in unary
+    // twines.
+    Child NewLHS, NewRHS;
+    NewLHS.twine = this;
+    NewRHS.twine = &Suffix;
+    NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
+    if (isUnary()) {
+      NewLHS = LHS;
+      NewLHSKind = getLHSKind();
+    }
+    if (Suffix.isUnary()) {
+      NewRHS = Suffix.LHS;
+      NewRHSKind = Suffix.getLHSKind();
+    }
+
+    return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
+  }
+
+  inline Twine operator+(const Twine &LHS, const Twine &RHS) {
+    return LHS.concat(RHS);
+  }
+
+  /// Additional overload to guarantee simplified codegen; this is equivalent to
+  /// concat().
+
+  inline Twine operator+(const char *LHS, const StringRef &RHS) {
+    return Twine(LHS, RHS);
+  }
+
+  /// Additional overload to guarantee simplified codegen; this is equivalent to
+  /// concat().
+
+  inline Twine operator+(const StringRef &LHS, const char *RHS) {
+    return Twine(LHS, RHS);
+  }
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
+    RHS.print(OS);
+    return OS;
+  }
+
+  /// @}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_TWINE_H
diff --git a/linux-x64/clang/include/llvm/ADT/UniqueVector.h b/linux-x64/clang/include/llvm/ADT/UniqueVector.h
new file mode 100644
index 0000000..b17fb23
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/UniqueVector.h
@@ -0,0 +1,102 @@
+//===- llvm/ADT/UniqueVector.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_UNIQUEVECTOR_H
+#define LLVM_ADT_UNIQUEVECTOR_H
+
+#include <cassert>
+#include <cstddef>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// UniqueVector - This class produces a sequential ID number (base 1) for each
+/// unique entry that is added.  T is the type of entries in the vector. This
+/// class should have an implementation of operator== and of operator<.
+/// Entries can be fetched using operator[] with the entry ID.
+template<class T> class UniqueVector {
+public:
+  using VectorType = typename std::vector<T>;
+  using iterator = typename VectorType::iterator;
+  using const_iterator = typename VectorType::const_iterator;
+
+private:
+  // Map - Used to handle the correspondence of entry to ID.
+  std::map<T, unsigned> Map;
+
+  // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1.
+  VectorType Vector;
+
+public:
+  /// insert - Append entry to the vector if it doesn't already exist.  Returns
+  /// the entry's index + 1 to be used as a unique ID.
+  unsigned insert(const T &Entry) {
+    // Check if the entry is already in the map.
+    unsigned &Val = Map[Entry];
+
+    // See if entry exists, if so return prior ID.
+    if (Val) return Val;
+
+    // Compute ID for entry.
+    Val = static_cast<unsigned>(Vector.size()) + 1;
+
+    // Insert in vector.
+    Vector.push_back(Entry);
+    return Val;
+  }
+
+  /// idFor - return the ID for an existing entry.  Returns 0 if the entry is
+  /// not found.
+  unsigned idFor(const T &Entry) const {
+    // Search for entry in the map.
+    typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry);
+
+    // See if entry exists, if so return ID.
+    if (MI != Map.end()) return MI->second;
+
+    // No luck.
+    return 0;
+  }
+
+  /// operator[] - Returns a reference to the entry with the specified ID.
+  const T &operator[](unsigned ID) const {
+    assert(ID-1 < size() && "ID is 0 or out of range!");
+    return Vector[ID - 1];
+  }
+
+  /// \brief Return an iterator to the start of the vector.
+  iterator begin() { return Vector.begin(); }
+
+  /// \brief Return an iterator to the start of the vector.
+  const_iterator begin() const { return Vector.begin(); }
+
+  /// \brief Return an iterator to the end of the vector.
+  iterator end() { return Vector.end(); }
+
+  /// \brief Return an iterator to the end of the vector.
+  const_iterator end() const { return Vector.end(); }
+
+  /// size - Returns the number of entries in the vector.
+  size_t size() const { return Vector.size(); }
+
+  /// empty - Returns true if the vector is empty.
+  bool empty() const { return Vector.empty(); }
+
+  /// reset - Clears all the entries.
+  void reset() {
+    Map.clear();
+    Vector.resize(0, 0);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_UNIQUEVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/VariadicFunction.h b/linux-x64/clang/include/llvm/ADT/VariadicFunction.h
new file mode 100644
index 0000000..403130c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/VariadicFunction.h
@@ -0,0 +1,331 @@
+//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file implements compile-time type-safe variadic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_VARIADICFUNCTION_H
+#define LLVM_ADT_VARIADICFUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+// Define macros to aid in expanding a comma separated series with the index of
+// the series pasted onto the last token.
+#define LLVM_COMMA_JOIN1(x) x ## 0
+#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
+#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
+#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
+#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
+#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
+#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
+#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
+#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
+#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
+#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
+#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
+#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
+#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
+#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
+#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
+#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
+#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
+#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
+#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
+#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
+#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
+#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
+#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
+#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
+#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
+#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
+#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
+#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
+#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
+#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
+#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
+
+/// \brief Class which can simulate a type-safe variadic function.
+///
+/// The VariadicFunction class template makes it easy to define
+/// type-safe variadic functions where all arguments have the same
+/// type.
+///
+/// Suppose we need a variadic function like this:
+///
+///   ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
+///
+/// Instead of many overloads of Foo(), we only need to define a helper
+/// function that takes an array of arguments:
+///
+///   ResultT FooImpl(ArrayRef<const ArgT *> Args) {
+///     // 'Args[i]' is a pointer to the i-th argument passed to Foo().
+///     ...
+///   }
+///
+/// and then define Foo() like this:
+///
+///   const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
+///
+/// VariadicFunction takes care of defining the overloads of Foo().
+///
+/// Actually, Foo is a function object (i.e. functor) instead of a plain
+/// function.  This object is stateless and its constructor/destructor
+/// does nothing, so it's safe to create global objects and call Foo(...) at
+/// any time.
+///
+/// Sometimes we need a variadic function to have some fixed leading
+/// arguments whose types may be different from that of the optional
+/// arguments.  For example:
+///
+///   bool FullMatch(const StringRef &S, const RE &Regex,
+///                  const ArgT &A_0, ..., const ArgT &A_N);
+///
+/// VariadicFunctionN is for such cases, where N is the number of fixed
+/// arguments.  It is like VariadicFunction, except that it takes N more
+/// template arguments for the types of the fixed arguments:
+///
+///   bool FullMatchImpl(const StringRef &S, const RE &Regex,
+///                      ArrayRef<const ArgT *> Args) { ... }
+///   const VariadicFunction2<bool, const StringRef&,
+///                           const RE&, ArgT, FullMatchImpl>
+///       FullMatch;
+///
+/// Currently VariadicFunction and friends support up-to 3
+/// fixed leading arguments and up-to 32 optional arguments.
+template <typename ResultT, typename ArgT,
+          ResultT (*Func)(ArrayRef<const ArgT *>)>
+struct VariadicFunction {
+  ResultT operator()() const {
+    return Func(None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename ArgT,
+          ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
+struct VariadicFunction1 {
+  ResultT operator()(Param0T P0) const {
+    return Func(P0, None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
+          ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
+struct VariadicFunction2 {
+  ResultT operator()(Param0T P0, Param1T P1) const {
+    return Func(P0, P1, None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, Param1T P1, \
+                     LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, P1, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T,
+          typename Param2T, typename ArgT,
+          ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
+struct VariadicFunction3 {
+  ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
+    return Func(P0, P1, P2, None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
+                     LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, P1, P2, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+// Cleanup the macro namespace.
+#undef LLVM_COMMA_JOIN1
+#undef LLVM_COMMA_JOIN2
+#undef LLVM_COMMA_JOIN3
+#undef LLVM_COMMA_JOIN4
+#undef LLVM_COMMA_JOIN5
+#undef LLVM_COMMA_JOIN6
+#undef LLVM_COMMA_JOIN7
+#undef LLVM_COMMA_JOIN8
+#undef LLVM_COMMA_JOIN9
+#undef LLVM_COMMA_JOIN10
+#undef LLVM_COMMA_JOIN11
+#undef LLVM_COMMA_JOIN12
+#undef LLVM_COMMA_JOIN13
+#undef LLVM_COMMA_JOIN14
+#undef LLVM_COMMA_JOIN15
+#undef LLVM_COMMA_JOIN16
+#undef LLVM_COMMA_JOIN17
+#undef LLVM_COMMA_JOIN18
+#undef LLVM_COMMA_JOIN19
+#undef LLVM_COMMA_JOIN20
+#undef LLVM_COMMA_JOIN21
+#undef LLVM_COMMA_JOIN22
+#undef LLVM_COMMA_JOIN23
+#undef LLVM_COMMA_JOIN24
+#undef LLVM_COMMA_JOIN25
+#undef LLVM_COMMA_JOIN26
+#undef LLVM_COMMA_JOIN27
+#undef LLVM_COMMA_JOIN28
+#undef LLVM_COMMA_JOIN29
+#undef LLVM_COMMA_JOIN30
+#undef LLVM_COMMA_JOIN31
+#undef LLVM_COMMA_JOIN32
+
+} // end namespace llvm
+
+#endif  // LLVM_ADT_VARIADICFUNCTION_H
diff --git a/linux-x64/clang/include/llvm/ADT/edit_distance.h b/linux-x64/clang/include/llvm/ADT/edit_distance.h
new file mode 100644
index 0000000..06a01b1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/edit_distance.h
@@ -0,0 +1,103 @@
+//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a Levenshtein distance function that works for any two
+// sequences, with each element of each sequence being analogous to a character
+// in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EDIT_DISTANCE_H
+#define LLVM_ADT_EDIT_DISTANCE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <algorithm>
+#include <memory>
+
+namespace llvm {
+
+/// \brief Determine the edit distance between two sequences.
+///
+/// \param FromArray the first sequence to compare.
+///
+/// \param ToArray the second sequence to compare.
+///
+/// \param AllowReplacements whether to allow element replacements (change one
+/// element into another) as a single operation, rather than as two operations
+/// (an insertion and a removal).
+///
+/// \param MaxEditDistance If non-zero, the maximum edit distance that this
+/// routine is allowed to compute. If the edit distance will exceed that
+/// maximum, returns \c MaxEditDistance+1.
+///
+/// \returns the minimum number of element insertions, removals, or (if
+/// \p AllowReplacements is \c true) replacements needed to transform one of
+/// the given sequences into the other. If zero, the sequences are identical.
+template<typename T>
+unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
+                             bool AllowReplacements = true,
+                             unsigned MaxEditDistance = 0) {
+  // The algorithm implemented below is the "classic"
+  // dynamic-programming algorithm for computing the Levenshtein
+  // distance, which is described here:
+  //
+  //   http://en.wikipedia.org/wiki/Levenshtein_distance
+  //
+  // Although the algorithm is typically described using an m x n
+  // array, only one row plus one element are used at a time, so this
+  // implementation just keeps one vector for the row.  To update one entry,
+  // only the entries to the left, top, and top-left are needed.  The left
+  // entry is in Row[x-1], the top entry is what's in Row[x] from the last
+  // iteration, and the top-left entry is stored in Previous.
+  typename ArrayRef<T>::size_type m = FromArray.size();
+  typename ArrayRef<T>::size_type n = ToArray.size();
+
+  const unsigned SmallBufferSize = 64;
+  unsigned SmallBuffer[SmallBufferSize];
+  std::unique_ptr<unsigned[]> Allocated;
+  unsigned *Row = SmallBuffer;
+  if (n + 1 > SmallBufferSize) {
+    Row = new unsigned[n + 1];
+    Allocated.reset(Row);
+  }
+
+  for (unsigned i = 1; i <= n; ++i)
+    Row[i] = i;
+
+  for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
+    Row[0] = y;
+    unsigned BestThisRow = Row[0];
+
+    unsigned Previous = y - 1;
+    for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
+      int OldRow = Row[x];
+      if (AllowReplacements) {
+        Row[x] = std::min(
+            Previous + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
+            std::min(Row[x-1], Row[x])+1);
+      }
+      else {
+        if (FromArray[y-1] == ToArray[x-1]) Row[x] = Previous;
+        else Row[x] = std::min(Row[x-1], Row[x]) + 1;
+      }
+      Previous = OldRow;
+      BestThisRow = std::min(BestThisRow, Row[x]);
+    }
+
+    if (MaxEditDistance && BestThisRow > MaxEditDistance)
+      return MaxEditDistance + 1;
+  }
+
+  unsigned Result = Row[n];
+  return Result;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/ilist.h b/linux-x64/clang/include/llvm/ADT/ilist.h
new file mode 100644
index 0000000..a788f81
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist.h
@@ -0,0 +1,434 @@
+//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes to implement an intrusive doubly linked list class
+// (i.e. each node of the list must contain a next and previous field for the
+// list.
+//
+// The ilist class itself should be a plug in replacement for list.  This list
+// replacement does not provide a constant time size() method, so be careful to
+// use empty() when you really want to know if it's empty.
+//
+// The ilist class is implemented as a circular list.  The list itself contains
+// a sentinel node, whose Next points at begin() and whose Prev points at
+// rbegin().  The sentinel node itself serves as end() and rend().
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_H
+#define LLVM_ADT_ILIST_H
+
+#include "llvm/ADT/simple_ilist.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+/// Use delete by default for iplist and ilist.
+///
+/// Specialize this to get different behaviour for ownership-related API.  (If
+/// you really want ownership semantics, consider using std::list or building
+/// something like \a BumpPtrList.)
+///
+/// \see ilist_noalloc_traits
+template <typename NodeTy> struct ilist_alloc_traits {
+  static void deleteNode(NodeTy *V) { delete V; }
+};
+
+/// Custom traits to do nothing on deletion.
+///
+/// Specialize ilist_alloc_traits to inherit from this to disable the
+/// non-intrusive deletion in iplist (which implies ownership).
+///
+/// If you want purely intrusive semantics with no callbacks, consider using \a
+/// simple_ilist instead.
+///
+/// \code
+/// template <>
+/// struct ilist_alloc_traits<MyType> : ilist_noalloc_traits<MyType> {};
+/// \endcode
+template <typename NodeTy> struct ilist_noalloc_traits {
+  static void deleteNode(NodeTy *V) {}
+};
+
+/// Callbacks do nothing by default in iplist and ilist.
+///
+/// Specialize this for to use callbacks for when nodes change their list
+/// membership.
+template <typename NodeTy> struct ilist_callback_traits {
+  void addNodeToList(NodeTy *) {}
+  void removeNodeFromList(NodeTy *) {}
+
+  /// Callback before transferring nodes to this list.
+  ///
+  /// \pre \c this!=&OldList
+  template <class Iterator>
+  void transferNodesFromList(ilist_callback_traits &OldList, Iterator /*first*/,
+                             Iterator /*last*/) {
+    (void)OldList;
+  }
+};
+
+/// A fragment for template traits for intrusive list that provides default
+/// node related operations.
+///
+/// TODO: Remove this layer of indirection.  It's not necessary.
+template <typename NodeTy>
+struct ilist_node_traits : ilist_alloc_traits<NodeTy>,
+                           ilist_callback_traits<NodeTy> {};
+
+/// Default template traits for intrusive list.
+///
+/// By inheriting from this, you can easily use default implementations for all
+/// common operations.
+///
+/// TODO: Remove this customization point.  Specializing ilist_traits is
+/// already fully general.
+template <typename NodeTy>
+struct ilist_default_traits : public ilist_node_traits<NodeTy> {};
+
+/// Template traits for intrusive list.
+///
+/// Customize callbacks and allocation semantics.
+template <typename NodeTy>
+struct ilist_traits : public ilist_default_traits<NodeTy> {};
+
+/// Const traits should never be instantiated.
+template <typename Ty> struct ilist_traits<const Ty> {};
+
+namespace ilist_detail {
+
+template <class T> T &make();
+
+/// Type trait to check for a traits class that has a getNext member (as a
+/// canary for any of the ilist_nextprev_traits API).
+template <class TraitsT, class NodeT> struct HasGetNext {
+  typedef char Yes[1];
+  typedef char No[2];
+  template <size_t N> struct SFINAE {};
+
+  template <class U>
+  static Yes &test(U *I, decltype(I->getNext(&make<NodeT>())) * = 0);
+  template <class> static No &test(...);
+
+public:
+  static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+/// Type trait to check for a traits class that has a createSentinel member (as
+/// a canary for any of the ilist_sentinel_traits API).
+template <class TraitsT> struct HasCreateSentinel {
+  typedef char Yes[1];
+  typedef char No[2];
+
+  template <class U>
+  static Yes &test(U *I, decltype(I->createSentinel()) * = 0);
+  template <class> static No &test(...);
+
+public:
+  static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+/// Type trait to check for a traits class that has a createNode member.
+/// Allocation should be managed in a wrapper class, instead of in
+/// ilist_traits.
+template <class TraitsT, class NodeT> struct HasCreateNode {
+  typedef char Yes[1];
+  typedef char No[2];
+  template <size_t N> struct SFINAE {};
+
+  template <class U>
+  static Yes &test(U *I, decltype(I->createNode(make<NodeT>())) * = 0);
+  template <class> static No &test(...);
+
+public:
+  static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+template <class TraitsT, class NodeT> struct HasObsoleteCustomization {
+  static const bool value = HasGetNext<TraitsT, NodeT>::value ||
+                            HasCreateSentinel<TraitsT>::value ||
+                            HasCreateNode<TraitsT, NodeT>::value;
+};
+
+} // end namespace ilist_detail
+
+//===----------------------------------------------------------------------===//
+//
+/// A wrapper around an intrusive list with callbacks and non-intrusive
+/// ownership.
+///
+/// This wraps a purely intrusive list (like simple_ilist) with a configurable
+/// traits class.  The traits can implement callbacks and customize the
+/// ownership semantics.
+///
+/// This is a subset of ilist functionality that can safely be used on nodes of
+/// polymorphic types, i.e. a heterogeneous list with a common base class that
+/// holds the next/prev pointers.  The only state of the list itself is an
+/// ilist_sentinel, which holds pointers to the first and last nodes in the
+/// list.
+template <class IntrusiveListT, class TraitsT>
+class iplist_impl : public TraitsT, IntrusiveListT {
+  typedef IntrusiveListT base_list_type;
+
+protected:
+  typedef iplist_impl iplist_impl_type;
+
+public:
+  typedef typename base_list_type::pointer pointer;
+  typedef typename base_list_type::const_pointer const_pointer;
+  typedef typename base_list_type::reference reference;
+  typedef typename base_list_type::const_reference const_reference;
+  typedef typename base_list_type::value_type value_type;
+  typedef typename base_list_type::size_type size_type;
+  typedef typename base_list_type::difference_type difference_type;
+  typedef typename base_list_type::iterator iterator;
+  typedef typename base_list_type::const_iterator const_iterator;
+  typedef typename base_list_type::reverse_iterator reverse_iterator;
+  typedef
+      typename base_list_type::const_reverse_iterator const_reverse_iterator;
+
+private:
+  // TODO: Drop this assertion and the transitive type traits anytime after
+  // v4.0 is branched (i.e,. keep them for one release to help out-of-tree code
+  // update).
+  static_assert(
+      !ilist_detail::HasObsoleteCustomization<TraitsT, value_type>::value,
+      "ilist customization points have changed!");
+
+  static bool op_less(const_reference L, const_reference R) { return L < R; }
+  static bool op_equal(const_reference L, const_reference R) { return L == R; }
+
+public:
+  iplist_impl() = default;
+
+  iplist_impl(const iplist_impl &) = delete;
+  iplist_impl &operator=(const iplist_impl &) = delete;
+
+  iplist_impl(iplist_impl &&X)
+      : TraitsT(std::move(X)), IntrusiveListT(std::move(X)) {}
+  iplist_impl &operator=(iplist_impl &&X) {
+    *static_cast<TraitsT *>(this) = std::move(X);
+    *static_cast<IntrusiveListT *>(this) = std::move(X);
+    return *this;
+  }
+
+  ~iplist_impl() { clear(); }
+
+  // Miscellaneous inspection routines.
+  size_type max_size() const { return size_type(-1); }
+
+  using base_list_type::begin;
+  using base_list_type::end;
+  using base_list_type::rbegin;
+  using base_list_type::rend;
+  using base_list_type::empty;
+  using base_list_type::front;
+  using base_list_type::back;
+
+  void swap(iplist_impl &RHS) {
+    assert(0 && "Swap does not use list traits callback correctly yet!");
+    base_list_type::swap(RHS);
+  }
+
+  iterator insert(iterator where, pointer New) {
+    this->addNodeToList(New); // Notify traits that we added a node...
+    return base_list_type::insert(where, *New);
+  }
+
+  iterator insert(iterator where, const_reference New) {
+    return this->insert(where, new value_type(New));
+  }
+
+  iterator insertAfter(iterator where, pointer New) {
+    if (empty())
+      return insert(begin(), New);
+    else
+      return insert(++where, New);
+  }
+
+  /// Clone another list.
+  template <class Cloner> void cloneFrom(const iplist_impl &L2, Cloner clone) {
+    clear();
+    for (const_reference V : L2)
+      push_back(clone(V));
+  }
+
+  pointer remove(iterator &IT) {
+    pointer Node = &*IT++;
+    this->removeNodeFromList(Node); // Notify traits that we removed a node...
+    base_list_type::remove(*Node);
+    return Node;
+  }
+
+  pointer remove(const iterator &IT) {
+    iterator MutIt = IT;
+    return remove(MutIt);
+  }
+
+  pointer remove(pointer IT) { return remove(iterator(IT)); }
+  pointer remove(reference IT) { return remove(iterator(IT)); }
+
+  // erase - remove a node from the controlled sequence... and delete it.
+  iterator erase(iterator where) {
+    this->deleteNode(remove(where));
+    return where;
+  }
+
+  iterator erase(pointer IT) { return erase(iterator(IT)); }
+  iterator erase(reference IT) { return erase(iterator(IT)); }
+
+  /// Remove all nodes from the list like clear(), but do not call
+  /// removeNodeFromList() or deleteNode().
+  ///
+  /// This should only be used immediately before freeing nodes in bulk to
+  /// avoid traversing the list and bringing all the nodes into cache.
+  void clearAndLeakNodesUnsafely() { base_list_type::clear(); }
+
+private:
+  // transfer - The heart of the splice function.  Move linked list nodes from
+  // [first, last) into position.
+  //
+  void transfer(iterator position, iplist_impl &L2, iterator first, iterator last) {
+    if (position == last)
+      return;
+
+    if (this != &L2) // Notify traits we moved the nodes...
+      this->transferNodesFromList(L2, first, last);
+
+    base_list_type::splice(position, L2, first, last);
+  }
+
+public:
+  //===----------------------------------------------------------------------===
+  // Functionality derived from other functions defined above...
+  //
+
+  using base_list_type::size;
+
+  iterator erase(iterator first, iterator last) {
+    while (first != last)
+      first = erase(first);
+    return last;
+  }
+
+  void clear() { erase(begin(), end()); }
+
+  // Front and back inserters...
+  void push_front(pointer val) { insert(begin(), val); }
+  void push_back(pointer val) { insert(end(), val); }
+  void pop_front() {
+    assert(!empty() && "pop_front() on empty list!");
+    erase(begin());
+  }
+  void pop_back() {
+    assert(!empty() && "pop_back() on empty list!");
+    iterator t = end(); erase(--t);
+  }
+
+  // Special forms of insert...
+  template<class InIt> void insert(iterator where, InIt first, InIt last) {
+    for (; first != last; ++first) insert(where, *first);
+  }
+
+  // Splice members - defined in terms of transfer...
+  void splice(iterator where, iplist_impl &L2) {
+    if (!L2.empty())
+      transfer(where, L2, L2.begin(), L2.end());
+  }
+  void splice(iterator where, iplist_impl &L2, iterator first) {
+    iterator last = first; ++last;
+    if (where == first || where == last) return; // No change
+    transfer(where, L2, first, last);
+  }
+  void splice(iterator where, iplist_impl &L2, iterator first, iterator last) {
+    if (first != last) transfer(where, L2, first, last);
+  }
+  void splice(iterator where, iplist_impl &L2, reference N) {
+    splice(where, L2, iterator(N));
+  }
+  void splice(iterator where, iplist_impl &L2, pointer N) {
+    splice(where, L2, iterator(N));
+  }
+
+  template <class Compare>
+  void merge(iplist_impl &Right, Compare comp) {
+    if (this == &Right)
+      return;
+    this->transferNodesFromList(Right, Right.begin(), Right.end());
+    base_list_type::merge(Right, comp);
+  }
+  void merge(iplist_impl &Right) { return merge(Right, op_less); }
+
+  using base_list_type::sort;
+
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  pointer getPrevNode(reference N) const {
+    auto I = N.getIterator();
+    if (I == begin())
+      return nullptr;
+    return &*std::prev(I);
+  }
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  const_pointer getPrevNode(const_reference N) const {
+    return getPrevNode(const_cast<reference >(N));
+  }
+
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  pointer getNextNode(reference N) const {
+    auto Next = std::next(N.getIterator());
+    if (Next == end())
+      return nullptr;
+    return &*Next;
+  }
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  const_pointer getNextNode(const_reference N) const {
+    return getNextNode(const_cast<reference >(N));
+  }
+};
+
+/// An intrusive list with ownership and callbacks specified/controlled by
+/// ilist_traits, only with API safe for polymorphic types.
+///
+/// The \p Options parameters are the same as those for \a simple_ilist.  See
+/// there for a description of what's available.
+template <class T, class... Options>
+class iplist
+    : public iplist_impl<simple_ilist<T, Options...>, ilist_traits<T>> {
+  typedef typename iplist::iplist_impl_type iplist_impl_type;
+
+public:
+  iplist() = default;
+
+  iplist(const iplist &X) = delete;
+  iplist &operator=(const iplist &X) = delete;
+
+  iplist(iplist &&X) : iplist_impl_type(std::move(X)) {}
+  iplist &operator=(iplist &&X) {
+    *static_cast<iplist_impl_type *>(this) = std::move(X);
+    return *this;
+  }
+};
+
+template <class T, class... Options> using ilist = iplist<T, Options...>;
+
+} // end namespace llvm
+
+namespace std {
+
+  // Ensure that swap uses the fast list swap...
+  template<class Ty>
+  void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
+    Left.swap(Right);
+  }
+
+} // end namespace std
+
+#endif // LLVM_ADT_ILIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_base.h b/linux-x64/clang/include/llvm/ADT/ilist_base.h
new file mode 100644
index 0000000..3d818a4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_base.h
@@ -0,0 +1,93 @@
+//===- llvm/ADT/ilist_base.h - Intrusive List Base --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_BASE_H
+#define LLVM_ADT_ILIST_BASE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include <cassert>
+
+namespace llvm {
+
+/// Implementations of list algorithms using ilist_node_base.
+template <bool EnableSentinelTracking> class ilist_base {
+public:
+  using node_base_type = ilist_node_base<EnableSentinelTracking>;
+
+  static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
+    node_base_type &Prev = *Next.getPrev();
+    N.setNext(&Next);
+    N.setPrev(&Prev);
+    Prev.setNext(&N);
+    Next.setPrev(&N);
+  }
+
+  static void removeImpl(node_base_type &N) {
+    node_base_type *Prev = N.getPrev();
+    node_base_type *Next = N.getNext();
+    Next->setPrev(Prev);
+    Prev->setNext(Next);
+
+    // Not strictly necessary, but helps catch a class of bugs.
+    N.setPrev(nullptr);
+    N.setNext(nullptr);
+  }
+
+  static void removeRangeImpl(node_base_type &First, node_base_type &Last) {
+    node_base_type *Prev = First.getPrev();
+    node_base_type *Final = Last.getPrev();
+    Last.setPrev(Prev);
+    Prev->setNext(&Last);
+
+    // Not strictly necessary, but helps catch a class of bugs.
+    First.setPrev(nullptr);
+    Final->setNext(nullptr);
+  }
+
+  static void transferBeforeImpl(node_base_type &Next, node_base_type &First,
+                                 node_base_type &Last) {
+    if (&Next == &Last || &First == &Last)
+      return;
+
+    // Position cannot be contained in the range to be transferred.
+    assert(&Next != &First &&
+           // Check for the most common mistake.
+           "Insertion point can't be one of the transferred nodes");
+
+    node_base_type &Final = *Last.getPrev();
+
+    // Detach from old list/position.
+    First.getPrev()->setNext(&Last);
+    Last.setPrev(First.getPrev());
+
+    // Splice [First, Final] into its new list/position.
+    node_base_type &Prev = *Next.getPrev();
+    Final.setNext(&Next);
+    First.setPrev(&Prev);
+    Prev.setNext(&First);
+    Next.setPrev(&Final);
+  }
+
+  template <class T> static void insertBefore(T &Next, T &N) {
+    insertBeforeImpl(Next, N);
+  }
+
+  template <class T> static void remove(T &N) { removeImpl(N); }
+  template <class T> static void removeRange(T &First, T &Last) {
+    removeRangeImpl(First, Last);
+  }
+
+  template <class T> static void transferBefore(T &Next, T &First, T &Last) {
+    transferBeforeImpl(Next, First, Last);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_BASE_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_iterator.h b/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
new file mode 100644
index 0000000..671e644
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
@@ -0,0 +1,199 @@
+//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_ITERATOR_H
+#define LLVM_ADT_ILIST_ITERATOR_H
+
+#include "llvm/ADT/ilist_node.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+namespace ilist_detail {
+
+/// Find const-correct node types.
+template <class OptionsT, bool IsConst> struct IteratorTraits;
+template <class OptionsT> struct IteratorTraits<OptionsT, false> {
+  using value_type = typename OptionsT::value_type;
+  using pointer = typename OptionsT::pointer;
+  using reference = typename OptionsT::reference;
+  using node_pointer = ilist_node_impl<OptionsT> *;
+  using node_reference = ilist_node_impl<OptionsT> &;
+};
+template <class OptionsT> struct IteratorTraits<OptionsT, true> {
+  using value_type = const typename OptionsT::value_type;
+  using pointer = typename OptionsT::const_pointer;
+  using reference = typename OptionsT::const_reference;
+  using node_pointer = const ilist_node_impl<OptionsT> *;
+  using node_reference = const ilist_node_impl<OptionsT> &;
+};
+
+template <bool IsReverse> struct IteratorHelper;
+template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
+  using Access = ilist_detail::NodeAccess;
+
+  template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
+  template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
+};
+template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
+  using Access = ilist_detail::NodeAccess;
+
+  template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
+  template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
+};
+
+} // end namespace ilist_detail
+
+/// Iterator for intrusive lists  based on ilist_node.
+template <class OptionsT, bool IsReverse, bool IsConst>
+class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
+  friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
+  friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
+  friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
+
+  using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
+  using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
+
+public:
+  using value_type = typename Traits::value_type;
+  using pointer = typename Traits::pointer;
+  using reference = typename Traits::reference;
+  using difference_type = ptrdiff_t;
+  using iterator_category = std::bidirectional_iterator_tag;
+  using const_pointer = typename OptionsT::const_pointer;
+  using const_reference = typename OptionsT::const_reference;
+
+private:
+  using node_pointer = typename Traits::node_pointer;
+  using node_reference = typename Traits::node_reference;
+
+  node_pointer NodePtr = nullptr;
+
+public:
+  /// Create from an ilist_node.
+  explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
+
+  explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
+  explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
+  ilist_iterator() = default;
+
+  // This is templated so that we can allow constructing a const iterator from
+  // a nonconst iterator...
+  template <bool RHSIsConst>
+  ilist_iterator(
+      const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
+      typename std::enable_if<IsConst || !RHSIsConst, void *>::type = nullptr)
+      : NodePtr(RHS.NodePtr) {}
+
+  // This is templated so that we can allow assigning to a const iterator from
+  // a nonconst iterator...
+  template <bool RHSIsConst>
+  typename std::enable_if<IsConst || !RHSIsConst, ilist_iterator &>::type
+  operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
+    NodePtr = RHS.NodePtr;
+    return *this;
+  }
+
+  /// Explicit conversion between forward/reverse iterators.
+  ///
+  /// Translate between forward and reverse iterators without changing range
+  /// boundaries.  The resulting iterator will dereference (and have a handle)
+  /// to the previous node, which is somewhat unexpected; but converting the
+  /// two endpoints in a range will give the same range in reverse.
+  ///
+  /// This matches std::reverse_iterator conversions.
+  explicit ilist_iterator(
+      const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
+      : ilist_iterator(++RHS.getReverse()) {}
+
+  /// Get a reverse iterator to the same node.
+  ///
+  /// Gives a reverse iterator that will dereference (and have a handle) to the
+  /// same node.  Converting the endpoint iterators in a range will give a
+  /// different range; for range operations, use the explicit conversions.
+  ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
+    if (NodePtr)
+      return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
+    return ilist_iterator<OptionsT, !IsReverse, IsConst>();
+  }
+
+  /// Const-cast.
+  ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
+    if (NodePtr)
+      return ilist_iterator<OptionsT, IsReverse, false>(
+          const_cast<typename ilist_iterator<OptionsT, IsReverse,
+                                             false>::node_reference>(*NodePtr));
+    return ilist_iterator<OptionsT, IsReverse, false>();
+  }
+
+  // Accessors...
+  reference operator*() const {
+    assert(!NodePtr->isKnownSentinel());
+    return *Access::getValuePtr(NodePtr);
+  }
+  pointer operator->() const { return &operator*(); }
+
+  // Comparison operators
+  friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+    return LHS.NodePtr == RHS.NodePtr;
+  }
+  friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+    return LHS.NodePtr != RHS.NodePtr;
+  }
+
+  // Increment and decrement operators...
+  ilist_iterator &operator--() {
+    NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
+    return *this;
+  }
+  ilist_iterator &operator++() {
+    NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
+    return *this;
+  }
+  ilist_iterator operator--(int) {
+    ilist_iterator tmp = *this;
+    --*this;
+    return tmp;
+  }
+  ilist_iterator operator++(int) {
+    ilist_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+
+  /// Get the underlying ilist_node.
+  node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
+
+  /// Check for end.  Only valid if ilist_sentinel_tracking<true>.
+  bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
+};
+
+template <typename From> struct simplify_type;
+
+/// Allow ilist_iterators to convert into pointers to a node automatically when
+/// used by the dyn_cast, cast, isa mechanisms...
+///
+/// FIXME: remove this, since there is no implicit conversion to NodeTy.
+template <class OptionsT, bool IsConst>
+struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
+  using iterator = ilist_iterator<OptionsT, false, IsConst>;
+  using SimpleType = typename iterator::pointer;
+
+  static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
+};
+template <class OptionsT, bool IsConst>
+struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
+    : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_ITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_node.h b/linux-x64/clang/include/llvm/ADT/ilist_node.h
new file mode 100644
index 0000000..3362611
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_node.h
@@ -0,0 +1,306 @@
+//===- llvm/ADT/ilist_node.h - Intrusive Linked List Helper -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ilist_node class template, which is a convenient
+// base class for creating classes that can be used with ilists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_H
+#define LLVM_ADT_ILIST_NODE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include "llvm/ADT/ilist_node_options.h"
+
+namespace llvm {
+
+namespace ilist_detail {
+
+struct NodeAccess;
+
+} // end namespace ilist_detail
+
+template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
+template <class OptionsT> class ilist_sentinel;
+
+/// Implementation for an ilist node.
+///
+/// Templated on an appropriate \a ilist_detail::node_options, usually computed
+/// by \a ilist_detail::compute_node_options.
+///
+/// This is a wrapper around \a ilist_node_base whose main purpose is to
+/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
+/// wrong \a simple_ilist or \a iplist.
+template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
+  using value_type = typename OptionsT::value_type;
+  using node_base_type = typename OptionsT::node_base_type;
+  using list_base_type = typename OptionsT::list_base_type;
+
+  friend typename OptionsT::list_base_type;
+  friend struct ilist_detail::NodeAccess;
+  friend class ilist_sentinel<OptionsT>;
+  friend class ilist_iterator<OptionsT, false, false>;
+  friend class ilist_iterator<OptionsT, false, true>;
+  friend class ilist_iterator<OptionsT, true, false>;
+  friend class ilist_iterator<OptionsT, true, true>;
+
+protected:
+  using self_iterator = ilist_iterator<OptionsT, false, false>;
+  using const_self_iterator = ilist_iterator<OptionsT, false, true>;
+  using reverse_self_iterator = ilist_iterator<OptionsT, true, false>;
+  using const_reverse_self_iterator = ilist_iterator<OptionsT, true, true>;
+
+  ilist_node_impl() = default;
+
+private:
+  ilist_node_impl *getPrev() {
+    return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+  }
+
+  ilist_node_impl *getNext() {
+    return static_cast<ilist_node_impl *>(node_base_type::getNext());
+  }
+
+  const ilist_node_impl *getPrev() const {
+    return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+  }
+
+  const ilist_node_impl *getNext() const {
+    return static_cast<ilist_node_impl *>(node_base_type::getNext());
+  }
+
+  void setPrev(ilist_node_impl *N) { node_base_type::setPrev(N); }
+  void setNext(ilist_node_impl *N) { node_base_type::setNext(N); }
+
+public:
+  self_iterator getIterator() { return self_iterator(*this); }
+  const_self_iterator getIterator() const { return const_self_iterator(*this); }
+
+  reverse_self_iterator getReverseIterator() {
+    return reverse_self_iterator(*this);
+  }
+
+  const_reverse_self_iterator getReverseIterator() const {
+    return const_reverse_self_iterator(*this);
+  }
+
+  // Under-approximation, but always available for assertions.
+  using node_base_type::isKnownSentinel;
+
+  /// Check whether this is the sentinel node.
+  ///
+  /// This requires sentinel tracking to be explicitly enabled.  Use the
+  /// ilist_sentinel_tracking<true> option to get this API.
+  bool isSentinel() const {
+    static_assert(OptionsT::is_sentinel_tracking_explicit,
+                  "Use ilist_sentinel_tracking<true> to enable isSentinel()");
+    return node_base_type::isSentinel();
+  }
+};
+
+/// An intrusive list node.
+///
+/// A base class to enable membership in intrusive lists, including \a
+/// simple_ilist, \a iplist, and \a ilist.  The first template parameter is the
+/// \a value_type for the list.
+///
+/// An ilist node can be configured with compile-time options to change
+/// behaviour and/or add API.
+///
+/// By default, an \a ilist_node knows whether it is the list sentinel (an
+/// instance of \a ilist_sentinel) if and only if
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS.  The function \a isKnownSentinel() always
+/// returns \c false tracking is off.  Sentinel tracking steals a bit from the
+/// "prev" link, which adds a mask operation when decrementing an iterator, but
+/// enables bug-finding assertions in \a ilist_iterator.
+///
+/// To turn sentinel tracking on all the time, pass in the
+/// ilist_sentinel_tracking<true> template parameter.  This also enables the \a
+/// isSentinel() function.  The same option must be passed to the intrusive
+/// list.  (ilist_sentinel_tracking<false> turns sentinel tracking off all the
+/// time.)
+///
+/// A type can inherit from ilist_node multiple times by passing in different
+/// \a ilist_tag options.  This allows a single instance to be inserted into
+/// multiple lists simultaneously, where each list is given the same tag.
+///
+/// \example
+/// struct A {};
+/// struct B {};
+/// struct N : ilist_node<N, ilist_tag<A>>, ilist_node<N, ilist_tag<B>> {};
+///
+/// void foo() {
+///   simple_ilist<N, ilist_tag<A>> ListA;
+///   simple_ilist<N, ilist_tag<B>> ListB;
+///   N N1;
+///   ListA.push_back(N1);
+///   ListB.push_back(N1);
+/// }
+/// \endexample
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <class T, class... Options>
+class ilist_node
+    : public ilist_node_impl<
+          typename ilist_detail::compute_node_options<T, Options...>::type> {
+  static_assert(ilist_detail::check_options<Options...>::value,
+                "Unrecognized node option!");
+};
+
+namespace ilist_detail {
+
+/// An access class for ilist_node private API.
+///
+/// This gives access to the private parts of ilist nodes.  Nodes for an ilist
+/// should friend this class if they inherit privately from ilist_node.
+///
+/// Using this class outside of the ilist implementation is unsupported.
+struct NodeAccess {
+protected:
+  template <class OptionsT>
+  static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
+    return N;
+  }
+
+  template <class OptionsT>
+  static const ilist_node_impl<OptionsT> *
+  getNodePtr(typename OptionsT::const_pointer N) {
+    return N;
+  }
+
+  template <class OptionsT>
+  static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
+    return static_cast<typename OptionsT::pointer>(N);
+  }
+
+  template <class OptionsT>
+  static typename OptionsT::const_pointer
+  getValuePtr(const ilist_node_impl<OptionsT> *N) {
+    return static_cast<typename OptionsT::const_pointer>(N);
+  }
+
+  template <class OptionsT>
+  static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
+    return N.getPrev();
+  }
+
+  template <class OptionsT>
+  static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
+    return N.getNext();
+  }
+
+  template <class OptionsT>
+  static const ilist_node_impl<OptionsT> *
+  getPrev(const ilist_node_impl<OptionsT> &N) {
+    return N.getPrev();
+  }
+
+  template <class OptionsT>
+  static const ilist_node_impl<OptionsT> *
+  getNext(const ilist_node_impl<OptionsT> &N) {
+    return N.getNext();
+  }
+};
+
+template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
+protected:
+  using pointer = typename OptionsT::pointer;
+  using const_pointer = typename OptionsT::const_pointer;
+  using node_type = ilist_node_impl<OptionsT>;
+
+  static node_type *getNodePtr(pointer N) {
+    return NodeAccess::getNodePtr<OptionsT>(N);
+  }
+
+  static const node_type *getNodePtr(const_pointer N) {
+    return NodeAccess::getNodePtr<OptionsT>(N);
+  }
+
+  static pointer getValuePtr(node_type *N) {
+    return NodeAccess::getValuePtr<OptionsT>(N);
+  }
+
+  static const_pointer getValuePtr(const node_type *N) {
+    return NodeAccess::getValuePtr<OptionsT>(N);
+  }
+};
+
+} // end namespace ilist_detail
+
+template <class OptionsT>
+class ilist_sentinel : public ilist_node_impl<OptionsT> {
+public:
+  ilist_sentinel() {
+    this->initializeSentinel();
+    reset();
+  }
+
+  void reset() {
+    this->setPrev(this);
+    this->setNext(this);
+  }
+
+  bool empty() const { return this == this->getPrev(); }
+};
+
+/// An ilist node that can access its parent list.
+///
+/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
+/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
+template <typename NodeTy, typename ParentTy, class... Options>
+class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
+protected:
+  ilist_node_with_parent() = default;
+
+private:
+  /// Forward to NodeTy::getParent().
+  ///
+  /// Note: do not use the name "getParent()".  We want a compile error
+  /// (instead of recursion) when the subclass fails to implement \a
+  /// getParent().
+  const ParentTy *getNodeParent() const {
+    return static_cast<const NodeTy *>(this)->getParent();
+  }
+
+public:
+  /// @name Adjacent Node Accessors
+  /// @{
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  NodeTy *getPrevNode() {
+    // Should be separated to a reused function, but then we couldn't use auto
+    // (and would need the type of the list).
+    const auto &List =
+        getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+    return List.getPrevNode(*static_cast<NodeTy *>(this));
+  }
+
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  const NodeTy *getPrevNode() const {
+    return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
+  }
+
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  NodeTy *getNextNode() {
+    // Should be separated to a reused function, but then we couldn't use auto
+    // (and would need the type of the list).
+    const auto &List =
+        getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+    return List.getNextNode(*static_cast<NodeTy *>(this));
+  }
+
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  const NodeTy *getNextNode() const {
+    return const_cast<ilist_node_with_parent *>(this)->getNextNode();
+  }
+  /// @}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_node_base.h b/linux-x64/clang/include/llvm/ADT/ilist_node_base.h
new file mode 100644
index 0000000..e5062ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_node_base.h
@@ -0,0 +1,53 @@
+//===- llvm/ADT/ilist_node_base.h - Intrusive List Node Base -----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_BASE_H
+#define LLVM_ADT_ILIST_NODE_BASE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace llvm {
+
+/// Base class for ilist nodes.
+///
+/// Optionally tracks whether this node is the sentinel.
+template <bool EnableSentinelTracking> class ilist_node_base;
+
+template <> class ilist_node_base<false> {
+  ilist_node_base *Prev = nullptr;
+  ilist_node_base *Next = nullptr;
+
+public:
+  void setPrev(ilist_node_base *Prev) { this->Prev = Prev; }
+  void setNext(ilist_node_base *Next) { this->Next = Next; }
+  ilist_node_base *getPrev() const { return Prev; }
+  ilist_node_base *getNext() const { return Next; }
+
+  bool isKnownSentinel() const { return false; }
+  void initializeSentinel() {}
+};
+
+template <> class ilist_node_base<true> {
+  PointerIntPair<ilist_node_base *, 1> PrevAndSentinel;
+  ilist_node_base *Next = nullptr;
+
+public:
+  void setPrev(ilist_node_base *Prev) { PrevAndSentinel.setPointer(Prev); }
+  void setNext(ilist_node_base *Next) { this->Next = Next; }
+  ilist_node_base *getPrev() const { return PrevAndSentinel.getPointer(); }
+  ilist_node_base *getNext() const { return Next; }
+
+  bool isSentinel() const { return PrevAndSentinel.getInt(); }
+  bool isKnownSentinel() const { return isSentinel(); }
+  void initializeSentinel() { PrevAndSentinel.setInt(true); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_BASE_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_node_options.h b/linux-x64/clang/include/llvm/ADT/ilist_node_options.h
new file mode 100644
index 0000000..c33df1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_node_options.h
@@ -0,0 +1,133 @@
+//===- llvm/ADT/ilist_node_options.h - ilist_node Options -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_OPTIONS_H
+#define LLVM_ADT_ILIST_NODE_OPTIONS_H
+
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Config/llvm-config.h"
+
+#include <type_traits>
+
+namespace llvm {
+
+template <bool EnableSentinelTracking> class ilist_node_base;
+template <bool EnableSentinelTracking> class ilist_base;
+
+/// Option to choose whether to track sentinels.
+///
+/// This option affects the ABI for the nodes.  When not specified explicitly,
+/// the ABI depends on LLVM_ENABLE_ABI_BREAKING_CHECKS.  Specify explicitly to
+/// enable \a ilist_node::isSentinel().
+template <bool EnableSentinelTracking> struct ilist_sentinel_tracking {};
+
+/// Option to specify a tag for the node type.
+///
+/// This option allows a single value type to be inserted in multiple lists
+/// simultaneously.  See \a ilist_node for usage examples.
+template <class Tag> struct ilist_tag {};
+
+namespace ilist_detail {
+
+/// Helper trait for recording whether an option is specified explicitly.
+template <bool IsExplicit> struct explicitness {
+  static const bool is_explicit = IsExplicit;
+};
+typedef explicitness<true> is_explicit;
+typedef explicitness<false> is_implicit;
+
+/// Check whether an option is valid.
+///
+/// The steps for adding and enabling a new ilist option include:
+/// \li define the option, ilist_foo<Bar>, above;
+/// \li add new parameters for Bar to \a ilist_detail::node_options;
+/// \li add an extraction meta-function, ilist_detail::extract_foo;
+/// \li call extract_foo from \a ilist_detail::compute_node_options and pass it
+/// into \a ilist_detail::node_options; and
+/// \li specialize \c is_valid_option<ilist_foo<Bar>> to inherit from \c
+/// std::true_type to get static assertions passing in \a simple_ilist and \a
+/// ilist_node.
+template <class Option> struct is_valid_option : std::false_type {};
+
+/// Extract sentinel tracking option.
+///
+/// Look through \p Options for the \a ilist_sentinel_tracking option, with the
+/// default depending on LLVM_ENABLE_ABI_BREAKING_CHECKS.
+template <class... Options> struct extract_sentinel_tracking;
+template <bool EnableSentinelTracking, class... Options>
+struct extract_sentinel_tracking<
+    ilist_sentinel_tracking<EnableSentinelTracking>, Options...>
+    : std::integral_constant<bool, EnableSentinelTracking>, is_explicit {};
+template <class Option1, class... Options>
+struct extract_sentinel_tracking<Option1, Options...>
+    : extract_sentinel_tracking<Options...> {};
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+template <> struct extract_sentinel_tracking<> : std::true_type, is_implicit {};
+#else
+template <>
+struct extract_sentinel_tracking<> : std::false_type, is_implicit {};
+#endif
+template <bool EnableSentinelTracking>
+struct is_valid_option<ilist_sentinel_tracking<EnableSentinelTracking>>
+    : std::true_type {};
+
+/// Extract custom tag option.
+///
+/// Look through \p Options for the \a ilist_tag option, pulling out the
+/// custom tag type, using void as a default.
+template <class... Options> struct extract_tag;
+template <class Tag, class... Options>
+struct extract_tag<ilist_tag<Tag>, Options...> {
+  typedef Tag type;
+};
+template <class Option1, class... Options>
+struct extract_tag<Option1, Options...> : extract_tag<Options...> {};
+template <> struct extract_tag<> { typedef void type; };
+template <class Tag> struct is_valid_option<ilist_tag<Tag>> : std::true_type {};
+
+/// Check whether options are valid.
+///
+/// The conjunction of \a is_valid_option on each individual option.
+template <class... Options> struct check_options;
+template <> struct check_options<> : std::true_type {};
+template <class Option1, class... Options>
+struct check_options<Option1, Options...>
+    : std::integral_constant<bool, is_valid_option<Option1>::value &&
+                                       check_options<Options...>::value> {};
+
+/// Traits for options for \a ilist_node.
+///
+/// This is usually computed via \a compute_node_options.
+template <class T, bool EnableSentinelTracking, bool IsSentinelTrackingExplicit,
+          class TagT>
+struct node_options {
+  typedef T value_type;
+  typedef T *pointer;
+  typedef T &reference;
+  typedef const T *const_pointer;
+  typedef const T &const_reference;
+
+  static const bool enable_sentinel_tracking = EnableSentinelTracking;
+  static const bool is_sentinel_tracking_explicit = IsSentinelTrackingExplicit;
+  typedef TagT tag;
+  typedef ilist_node_base<enable_sentinel_tracking> node_base_type;
+  typedef ilist_base<enable_sentinel_tracking> list_base_type;
+};
+
+template <class T, class... Options> struct compute_node_options {
+  typedef node_options<T, extract_sentinel_tracking<Options...>::value,
+                       extract_sentinel_tracking<Options...>::is_explicit,
+                       typename extract_tag<Options...>::type>
+      type;
+};
+
+} // end namespace ilist_detail
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_OPTIONS_H
diff --git a/linux-x64/clang/include/llvm/ADT/iterator.h b/linux-x64/clang/include/llvm/ADT/iterator.h
new file mode 100644
index 0000000..711f8f2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/iterator.h
@@ -0,0 +1,339 @@
+//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_H
+#define LLVM_ADT_ITERATOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// \brief CRTP base class which implements the entire standard iterator facade
+/// in terms of a minimal subset of the interface.
+///
+/// Use this when it is reasonable to implement most of the iterator
+/// functionality in terms of a core subset. If you need special behavior or
+/// there are performance implications for this, you may want to override the
+/// relevant members instead.
+///
+/// Note, one abstraction that this does *not* provide is implementing
+/// subtraction in terms of addition by negating the difference. Negation isn't
+/// always information preserving, and I can see very reasonable iterator
+/// designs where this doesn't work well. It doesn't really force much added
+/// boilerplate anyways.
+///
+/// Another abstraction that this doesn't provide is implementing increment in
+/// terms of addition of one. These aren't equivalent for all iterator
+/// categories, and respecting that adds a lot of complexity for little gain.
+///
+/// Classes wishing to use `iterator_facade_base` should implement the following
+/// methods:
+///
+/// Forward Iterators:
+///   (All of the following methods)
+///   - DerivedT &operator=(const DerivedT &R);
+///   - bool operator==(const DerivedT &R) const;
+///   - const T &operator*() const;
+///   - T &operator*();
+///   - DerivedT &operator++();
+///
+/// Bidirectional Iterators:
+///   (All methods of forward iterators, plus the following)
+///   - DerivedT &operator--();
+///
+/// Random-access Iterators:
+///   (All methods of bidirectional iterators excluding the following)
+///   - DerivedT &operator++();
+///   - DerivedT &operator--();
+///   (and plus the following)
+///   - bool operator<(const DerivedT &RHS) const;
+///   - DifferenceTypeT operator-(const DerivedT &R) const;
+///   - DerivedT &operator+=(DifferenceTypeT N);
+///   - DerivedT &operator-=(DifferenceTypeT N);
+///
+template <typename DerivedT, typename IteratorCategoryT, typename T,
+          typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
+          typename ReferenceT = T &>
+class iterator_facade_base
+    : public std::iterator<IteratorCategoryT, T, DifferenceTypeT, PointerT,
+                           ReferenceT> {
+protected:
+  enum {
+    IsRandomAccess = std::is_base_of<std::random_access_iterator_tag,
+                                     IteratorCategoryT>::value,
+    IsBidirectional = std::is_base_of<std::bidirectional_iterator_tag,
+                                      IteratorCategoryT>::value,
+  };
+
+  /// A proxy object for computing a reference via indirecting a copy of an
+  /// iterator. This is used in APIs which need to produce a reference via
+  /// indirection but for which the iterator object might be a temporary. The
+  /// proxy preserves the iterator internally and exposes the indirected
+  /// reference via a conversion operator.
+  class ReferenceProxy {
+    friend iterator_facade_base;
+
+    DerivedT I;
+
+    ReferenceProxy(DerivedT I) : I(std::move(I)) {}
+
+  public:
+    operator ReferenceT() const { return *I; }
+  };
+
+public:
+  DerivedT operator+(DifferenceTypeT n) const {
+    static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+    static_assert(
+        IsRandomAccess,
+        "The '+' operator is only defined for random access iterators.");
+    DerivedT tmp = *static_cast<const DerivedT *>(this);
+    tmp += n;
+    return tmp;
+  }
+  friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
+    static_assert(
+        IsRandomAccess,
+        "The '+' operator is only defined for random access iterators.");
+    return i + n;
+  }
+  DerivedT operator-(DifferenceTypeT n) const {
+    static_assert(
+        IsRandomAccess,
+        "The '-' operator is only defined for random access iterators.");
+    DerivedT tmp = *static_cast<const DerivedT *>(this);
+    tmp -= n;
+    return tmp;
+  }
+
+  DerivedT &operator++() {
+    static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+    return static_cast<DerivedT *>(this)->operator+=(1);
+  }
+  DerivedT operator++(int) {
+    DerivedT tmp = *static_cast<DerivedT *>(this);
+    ++*static_cast<DerivedT *>(this);
+    return tmp;
+  }
+  DerivedT &operator--() {
+    static_assert(
+        IsBidirectional,
+        "The decrement operator is only defined for bidirectional iterators.");
+    return static_cast<DerivedT *>(this)->operator-=(1);
+  }
+  DerivedT operator--(int) {
+    static_assert(
+        IsBidirectional,
+        "The decrement operator is only defined for bidirectional iterators.");
+    DerivedT tmp = *static_cast<DerivedT *>(this);
+    --*static_cast<DerivedT *>(this);
+    return tmp;
+  }
+
+  bool operator!=(const DerivedT &RHS) const {
+    return !static_cast<const DerivedT *>(this)->operator==(RHS);
+  }
+
+  bool operator>(const DerivedT &RHS) const {
+    static_assert(
+        IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return !static_cast<const DerivedT *>(this)->operator<(RHS) &&
+           !static_cast<const DerivedT *>(this)->operator==(RHS);
+  }
+  bool operator<=(const DerivedT &RHS) const {
+    static_assert(
+        IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return !static_cast<const DerivedT *>(this)->operator>(RHS);
+  }
+  bool operator>=(const DerivedT &RHS) const {
+    static_assert(
+        IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return !static_cast<const DerivedT *>(this)->operator<(RHS);
+  }
+
+  PointerT operator->() { return &static_cast<DerivedT *>(this)->operator*(); }
+  PointerT operator->() const {
+    return &static_cast<const DerivedT *>(this)->operator*();
+  }
+  ReferenceProxy operator[](DifferenceTypeT n) {
+    static_assert(IsRandomAccess,
+                  "Subscripting is only defined for random access iterators.");
+    return ReferenceProxy(static_cast<DerivedT *>(this)->operator+(n));
+  }
+  ReferenceProxy operator[](DifferenceTypeT n) const {
+    static_assert(IsRandomAccess,
+                  "Subscripting is only defined for random access iterators.");
+    return ReferenceProxy(static_cast<const DerivedT *>(this)->operator+(n));
+  }
+};
+
+/// \brief CRTP base class for adapting an iterator to a different type.
+///
+/// This class can be used through CRTP to adapt one iterator into another.
+/// Typically this is done through providing in the derived class a custom \c
+/// operator* implementation. Other methods can be overridden as well.
+template <
+    typename DerivedT, typename WrappedIteratorT,
+    typename IteratorCategoryT =
+        typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+    typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
+    typename DifferenceTypeT =
+        typename std::iterator_traits<WrappedIteratorT>::difference_type,
+    typename PointerT = typename std::conditional<
+        std::is_same<T, typename std::iterator_traits<
+                            WrappedIteratorT>::value_type>::value,
+        typename std::iterator_traits<WrappedIteratorT>::pointer, T *>::type,
+    typename ReferenceT = typename std::conditional<
+        std::is_same<T, typename std::iterator_traits<
+                            WrappedIteratorT>::value_type>::value,
+        typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type,
+    // Don't provide these, they are mostly to act as aliases below.
+    typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>>
+class iterator_adaptor_base
+    : public iterator_facade_base<DerivedT, IteratorCategoryT, T,
+                                  DifferenceTypeT, PointerT, ReferenceT> {
+  using BaseT = typename iterator_adaptor_base::iterator_facade_base;
+
+protected:
+  WrappedIteratorT I;
+
+  iterator_adaptor_base() = default;
+
+  explicit iterator_adaptor_base(WrappedIteratorT u) : I(std::move(u)) {
+    static_assert(std::is_base_of<iterator_adaptor_base, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+  }
+
+  const WrappedIteratorT &wrapped() const { return I; }
+
+public:
+  using difference_type = DifferenceTypeT;
+
+  DerivedT &operator+=(difference_type n) {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "The '+=' operator is only defined for random access iterators.");
+    I += n;
+    return *static_cast<DerivedT *>(this);
+  }
+  DerivedT &operator-=(difference_type n) {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "The '-=' operator is only defined for random access iterators.");
+    I -= n;
+    return *static_cast<DerivedT *>(this);
+  }
+  using BaseT::operator-;
+  difference_type operator-(const DerivedT &RHS) const {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "The '-' operator is only defined for random access iterators.");
+    return I - RHS.I;
+  }
+
+  // We have to explicitly provide ++ and -- rather than letting the facade
+  // forward to += because WrappedIteratorT might not support +=.
+  using BaseT::operator++;
+  DerivedT &operator++() {
+    ++I;
+    return *static_cast<DerivedT *>(this);
+  }
+  using BaseT::operator--;
+  DerivedT &operator--() {
+    static_assert(
+        BaseT::IsBidirectional,
+        "The decrement operator is only defined for bidirectional iterators.");
+    --I;
+    return *static_cast<DerivedT *>(this);
+  }
+
+  bool operator==(const DerivedT &RHS) const { return I == RHS.I; }
+  bool operator<(const DerivedT &RHS) const {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return I < RHS.I;
+  }
+
+  ReferenceT operator*() const { return *I; }
+};
+
+/// \brief An iterator type that allows iterating over the pointees via some
+/// other iterator.
+///
+/// The typical usage of this is to expose a type that iterates over Ts, but
+/// which is implemented with some iterator over T*s:
+///
+/// \code
+///   using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
+/// \endcode
+template <typename WrappedIteratorT,
+          typename T = typename std::remove_reference<
+              decltype(**std::declval<WrappedIteratorT>())>::type>
+struct pointee_iterator
+    : iterator_adaptor_base<
+          pointee_iterator<WrappedIteratorT>, WrappedIteratorT,
+          typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+          T> {
+  pointee_iterator() = default;
+  template <typename U>
+  pointee_iterator(U &&u)
+      : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}
+
+  T &operator*() const { return **this->I; }
+};
+
+template <typename RangeT, typename WrappedIteratorT =
+                               decltype(std::begin(std::declval<RangeT>()))>
+iterator_range<pointee_iterator<WrappedIteratorT>>
+make_pointee_range(RangeT &&Range) {
+  using PointeeIteratorT = pointee_iterator<WrappedIteratorT>;
+  return make_range(PointeeIteratorT(std::begin(std::forward<RangeT>(Range))),
+                    PointeeIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+template <typename WrappedIteratorT,
+          typename T = decltype(&*std::declval<WrappedIteratorT>())>
+class pointer_iterator
+    : public iterator_adaptor_base<pointer_iterator<WrappedIteratorT>,
+                                   WrappedIteratorT, T> {
+  mutable T Ptr;
+
+public:
+  pointer_iterator() = default;
+
+  explicit pointer_iterator(WrappedIteratorT u)
+      : pointer_iterator::iterator_adaptor_base(std::move(u)) {}
+
+  T &operator*() { return Ptr = &*this->I; }
+  const T &operator*() const { return Ptr = &*this->I; }
+};
+
+template <typename RangeT, typename WrappedIteratorT =
+                               decltype(std::begin(std::declval<RangeT>()))>
+iterator_range<pointer_iterator<WrappedIteratorT>>
+make_pointer_range(RangeT &&Range) {
+  using PointerIteratorT = pointer_iterator<WrappedIteratorT>;
+  return make_range(PointerIteratorT(std::begin(std::forward<RangeT>(Range))),
+                    PointerIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/iterator_range.h b/linux-x64/clang/include/llvm/ADT/iterator_range.h
new file mode 100644
index 0000000..3cbf619
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/iterator_range.h
@@ -0,0 +1,68 @@
+//===- iterator_range.h - A range adaptor for iterators ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This provides a very simple, boring adaptor for a begin and end iterator
+/// into a range type. This should be used to build range views that work well
+/// with range based for loops and range based constructors.
+///
+/// Note that code here follows more standards-based coding conventions as it
+/// is mirroring proposed interfaces for standardization.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_RANGE_H
+#define LLVM_ADT_ITERATOR_RANGE_H
+
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// \brief A range adaptor for a pair of iterators.
+///
+/// This just wraps two iterators into a range-compatible interface. Nothing
+/// fancy at all.
+template <typename IteratorT>
+class iterator_range {
+  IteratorT begin_iterator, end_iterator;
+
+public:
+  //TODO: Add SFINAE to test that the Container's iterators match the range's
+  //      iterators.
+  template <typename Container>
+  iterator_range(Container &&c)
+  //TODO: Consider ADL/non-member begin/end calls.
+      : begin_iterator(c.begin()), end_iterator(c.end()) {}
+  iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
+      : begin_iterator(std::move(begin_iterator)),
+        end_iterator(std::move(end_iterator)) {}
+
+  IteratorT begin() const { return begin_iterator; }
+  IteratorT end() const { return end_iterator; }
+};
+
+/// \brief Convenience function for iterating over sub-ranges.
+///
+/// This provides a bit of syntactic sugar to make using sub-ranges
+/// in for loops a bit easier. Analogous to std::make_pair().
+template <class T> iterator_range<T> make_range(T x, T y) {
+  return iterator_range<T>(std::move(x), std::move(y));
+}
+
+template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
+  return iterator_range<T>(std::move(p.first), std::move(p.second));
+}
+
+template<typename T>
+iterator_range<decltype(begin(std::declval<T>()))> drop_begin(T &&t, int n) {
+  return make_range(std::next(begin(t), n), end(t));
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/simple_ilist.h b/linux-x64/clang/include/llvm/ADT/simple_ilist.h
new file mode 100644
index 0000000..4c7598a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/simple_ilist.h
@@ -0,0 +1,315 @@
+//===- llvm/ADT/simple_ilist.h - Simple Intrusive List ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SIMPLE_ILIST_H
+#define LLVM_ADT_SIMPLE_ILIST_H
+
+#include "llvm/ADT/ilist_base.h"
+#include "llvm/ADT/ilist_iterator.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/ilist_node_options.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// A simple intrusive list implementation.
+///
+/// This is a simple intrusive list for a \c T that inherits from \c
+/// ilist_node<T>.  The list never takes ownership of anything inserted in it.
+///
+/// Unlike \a iplist<T> and \a ilist<T>, \a simple_ilist<T> never allocates or
+/// deletes values, and has no callback traits.
+///
+/// The API for adding nodes include \a push_front(), \a push_back(), and \a
+/// insert().  These all take values by reference (not by pointer), except for
+/// the range version of \a insert().
+///
+/// There are three sets of API for discarding nodes from the list: \a
+/// remove(), which takes a reference to the node to remove, \a erase(), which
+/// takes an iterator or iterator range and returns the next one, and \a
+/// clear(), which empties out the container.  All three are constant time
+/// operations.  None of these deletes any nodes; in particular, if there is a
+/// single node in the list, then these have identical semantics:
+/// \li \c L.remove(L.front());
+/// \li \c L.erase(L.begin());
+/// \li \c L.clear();
+///
+/// As a convenience for callers, there are parallel APIs that take a \c
+/// Disposer (such as \c std::default_delete<T>): \a removeAndDispose(), \a
+/// eraseAndDispose(), and \a clearAndDispose().  These have different names
+/// because the extra semantic is otherwise non-obvious.  They are equivalent
+/// to calling \a std::for_each() on the range to be discarded.
+///
+/// The currently available \p Options customize the nodes in the list.  The
+/// same options must be specified in the \a ilist_node instantation for
+/// compatibility (although the order is irrelevant).
+/// \li Use \a ilist_tag to designate which ilist_node for a given \p T this
+/// list should use.  This is useful if a type \p T is part of multiple,
+/// independent lists simultaneously.
+/// \li Use \a ilist_sentinel_tracking to always (or never) track whether a
+/// node is a sentinel.  Specifying \c true enables the \a
+/// ilist_node::isSentinel() API.  Unlike \a ilist_node::isKnownSentinel(),
+/// which is only appropriate for assertions, \a ilist_node::isSentinel() is
+/// appropriate for real logic.
+///
+/// Here are examples of \p Options usage:
+/// \li \c simple_ilist<T> gives the defaults.  \li \c
+/// simple_ilist<T,ilist_sentinel_tracking<true>> enables the \a
+/// ilist_node::isSentinel() API.
+/// \li \c simple_ilist<T,ilist_tag<A>,ilist_sentinel_tracking<false>>
+/// specifies a tag of A and that tracking should be off (even when
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS are enabled).
+/// \li \c simple_ilist<T,ilist_sentinel_tracking<false>,ilist_tag<A>> is
+/// equivalent to the last.
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <typename T, class... Options>
+class simple_ilist
+    : ilist_detail::compute_node_options<T, Options...>::type::list_base_type,
+      ilist_detail::SpecificNodeAccess<
+          typename ilist_detail::compute_node_options<T, Options...>::type> {
+  static_assert(ilist_detail::check_options<Options...>::value,
+                "Unrecognized node option!");
+  using OptionsT =
+      typename ilist_detail::compute_node_options<T, Options...>::type;
+  using list_base_type = typename OptionsT::list_base_type;
+  ilist_sentinel<OptionsT> Sentinel;
+
+public:
+  using value_type = typename OptionsT::value_type;
+  using pointer = typename OptionsT::pointer;
+  using reference = typename OptionsT::reference;
+  using const_pointer = typename OptionsT::const_pointer;
+  using const_reference = typename OptionsT::const_reference;
+  using iterator = ilist_iterator<OptionsT, false, false>;
+  using const_iterator = ilist_iterator<OptionsT, false, true>;
+  using reverse_iterator = ilist_iterator<OptionsT, true, false>;
+  using const_reverse_iterator = ilist_iterator<OptionsT, true, true>;
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+
+  simple_ilist() = default;
+  ~simple_ilist() = default;
+
+  // No copy constructors.
+  simple_ilist(const simple_ilist &) = delete;
+  simple_ilist &operator=(const simple_ilist &) = delete;
+
+  // Move constructors.
+  simple_ilist(simple_ilist &&X) { splice(end(), X); }
+  simple_ilist &operator=(simple_ilist &&X) {
+    clear();
+    splice(end(), X);
+    return *this;
+  }
+
+  iterator begin() { return ++iterator(Sentinel); }
+  const_iterator begin() const { return ++const_iterator(Sentinel); }
+  iterator end() { return iterator(Sentinel); }
+  const_iterator end() const { return const_iterator(Sentinel); }
+  reverse_iterator rbegin() { return ++reverse_iterator(Sentinel); }
+  const_reverse_iterator rbegin() const {
+    return ++const_reverse_iterator(Sentinel);
+  }
+  reverse_iterator rend() { return reverse_iterator(Sentinel); }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(Sentinel);
+  }
+
+  /// Check if the list is empty in constant time.
+  LLVM_NODISCARD bool empty() const { return Sentinel.empty(); }
+
+  /// Calculate the size of the list in linear time.
+  LLVM_NODISCARD size_type size() const {
+    return std::distance(begin(), end());
+  }
+
+  reference front() { return *begin(); }
+  const_reference front() const { return *begin(); }
+  reference back() { return *rbegin(); }
+  const_reference back() const { return *rbegin(); }
+
+  /// Insert a node at the front; never copies.
+  void push_front(reference Node) { insert(begin(), Node); }
+
+  /// Insert a node at the back; never copies.
+  void push_back(reference Node) { insert(end(), Node); }
+
+  /// Remove the node at the front; never deletes.
+  void pop_front() { erase(begin()); }
+
+  /// Remove the node at the back; never deletes.
+  void pop_back() { erase(--end()); }
+
+  /// Swap with another list in place using std::swap.
+  void swap(simple_ilist &X) { std::swap(*this, X); }
+
+  /// Insert a node by reference; never copies.
+  iterator insert(iterator I, reference Node) {
+    list_base_type::insertBefore(*I.getNodePtr(), *this->getNodePtr(&Node));
+    return iterator(&Node);
+  }
+
+  /// Insert a range of nodes; never copies.
+  template <class Iterator>
+  void insert(iterator I, Iterator First, Iterator Last) {
+    for (; First != Last; ++First)
+      insert(I, *First);
+  }
+
+  /// Clone another list.
+  template <class Cloner, class Disposer>
+  void cloneFrom(const simple_ilist &L2, Cloner clone, Disposer dispose) {
+    clearAndDispose(dispose);
+    for (const_reference V : L2)
+      push_back(*clone(V));
+  }
+
+  /// Remove a node by reference; never deletes.
+  ///
+  /// \see \a erase() for removing by iterator.
+  /// \see \a removeAndDispose() if the node should be deleted.
+  void remove(reference N) { list_base_type::remove(*this->getNodePtr(&N)); }
+
+  /// Remove a node by reference and dispose of it.
+  template <class Disposer>
+  void removeAndDispose(reference N, Disposer dispose) {
+    remove(N);
+    dispose(&N);
+  }
+
+  /// Remove a node by iterator; never deletes.
+  ///
+  /// \see \a remove() for removing by reference.
+  /// \see \a eraseAndDispose() it the node should be deleted.
+  iterator erase(iterator I) {
+    assert(I != end() && "Cannot remove end of list!");
+    remove(*I++);
+    return I;
+  }
+
+  /// Remove a range of nodes; never deletes.
+  ///
+  /// \see \a eraseAndDispose() if the nodes should be deleted.
+  iterator erase(iterator First, iterator Last) {
+    list_base_type::removeRange(*First.getNodePtr(), *Last.getNodePtr());
+    return Last;
+  }
+
+  /// Remove a node by iterator and dispose of it.
+  template <class Disposer>
+  iterator eraseAndDispose(iterator I, Disposer dispose) {
+    auto Next = std::next(I);
+    erase(I);
+    dispose(&*I);
+    return Next;
+  }
+
+  /// Remove a range of nodes and dispose of them.
+  template <class Disposer>
+  iterator eraseAndDispose(iterator First, iterator Last, Disposer dispose) {
+    while (First != Last)
+      First = eraseAndDispose(First, dispose);
+    return Last;
+  }
+
+  /// Clear the list; never deletes.
+  ///
+  /// \see \a clearAndDispose() if the nodes should be deleted.
+  void clear() { Sentinel.reset(); }
+
+  /// Clear the list and dispose of the nodes.
+  template <class Disposer> void clearAndDispose(Disposer dispose) {
+    eraseAndDispose(begin(), end(), dispose);
+  }
+
+  /// Splice in another list.
+  void splice(iterator I, simple_ilist &L2) {
+    splice(I, L2, L2.begin(), L2.end());
+  }
+
+  /// Splice in a node from another list.
+  void splice(iterator I, simple_ilist &L2, iterator Node) {
+    splice(I, L2, Node, std::next(Node));
+  }
+
+  /// Splice in a range of nodes from another list.
+  void splice(iterator I, simple_ilist &, iterator First, iterator Last) {
+    list_base_type::transferBefore(*I.getNodePtr(), *First.getNodePtr(),
+                                   *Last.getNodePtr());
+  }
+
+  /// Merge in another list.
+  ///
+  /// \pre \c this and \p RHS are sorted.
+  ///@{
+  void merge(simple_ilist &RHS) { merge(RHS, std::less<T>()); }
+  template <class Compare> void merge(simple_ilist &RHS, Compare comp);
+  ///@}
+
+  /// Sort the list.
+  ///@{
+  void sort() { sort(std::less<T>()); }
+  template <class Compare> void sort(Compare comp);
+  ///@}
+};
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::merge(simple_ilist &RHS, Compare comp) {
+  if (this == &RHS || RHS.empty())
+    return;
+  iterator LI = begin(), LE = end();
+  iterator RI = RHS.begin(), RE = RHS.end();
+  while (LI != LE) {
+    if (comp(*RI, *LI)) {
+      // Transfer a run of at least size 1 from RHS to LHS.
+      iterator RunStart = RI++;
+      RI = std::find_if(RI, RE, [&](reference RV) { return !comp(RV, *LI); });
+      splice(LI, RHS, RunStart, RI);
+      if (RI == RE)
+        return;
+    }
+    ++LI;
+  }
+  // Transfer the remaining RHS nodes once LHS is finished.
+  splice(LE, RHS, RI, RE);
+}
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::sort(Compare comp) {
+  // Vacuously sorted.
+  if (empty() || std::next(begin()) == end())
+    return;
+
+  // Split the list in the middle.
+  iterator Center = begin(), End = begin();
+  while (End != end() && ++End != end()) {
+    ++Center;
+    ++End;
+  }
+  simple_ilist RHS;
+  RHS.splice(RHS.end(), *this, Center, end());
+
+  // Sort the sublists and merge back together.
+  sort(comp);
+  RHS.sort(comp);
+  merge(RHS, comp);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SIMPLE_ILIST_H
diff --git a/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h
new file mode 100644
index 0000000..ec4a90c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h
@@ -0,0 +1,1096 @@
+//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the generic AliasAnalysis interface, which is used as the
+// common interface used by all clients of alias analysis information, and
+// implemented by all alias analysis implementations.  Mod/Ref information is
+// also captured by this interface.
+//
+// Implementations of this interface must implement the various virtual methods,
+// which automatically provides functionality for the entire suite of client
+// APIs.
+//
+// This API identifies memory regions with the MemoryLocation class. The pointer
+// component specifies the base memory address of the region. The Size specifies
+// the maximum size (in address units) of the memory region, or
+// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
+// identifies the "type" of the memory reference; see the
+// TypeBasedAliasAnalysis class for details.
+//
+// Some non-obvious details include:
+//  - Pointers that point to two completely different objects in memory never
+//    alias, regardless of the value of the Size component.
+//  - NoAlias doesn't imply inequal pointers. The most obvious example of this
+//    is two pointers to constant memory. Even if they are equal, constant
+//    memory is never stored to, so there will never be any dependencies.
+//    In this and other situations, the pointers may be both NoAlias and
+//    MustAlias at the same time. The current API can only return one result,
+//    though this is rarely a problem in practice.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
+#define LLVM_ANALYSIS_ALIASANALYSIS_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include <cstdint>
+#include <functional>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class AnalysisUsage;
+class BasicAAResult;
+class BasicBlock;
+class DominatorTree;
+class OrderedBasicBlock;
+class Value;
+
+/// The possible results of an alias query.
+///
+/// These results are always computed between two MemoryLocation objects as
+/// a query to some alias analysis.
+///
+/// Note that these are unscoped enumerations because we would like to support
+/// implicitly testing a result for the existence of any possible aliasing with
+/// a conversion to bool, but an "enum class" doesn't support this. The
+/// canonical names from the literature are suffixed and unique anyways, and so
+/// they serve as global constants in LLVM for these results.
+///
+/// See docs/AliasAnalysis.html for more information on the specific meanings
+/// of these values.
+enum AliasResult : uint8_t {
+  /// The two locations do not alias at all.
+  ///
+  /// This value is arranged to convert to false, while all other values
+  /// convert to true. This allows a boolean context to convert the result to
+  /// a binary flag indicating whether there is the possibility of aliasing.
+  NoAlias = 0,
+  /// The two locations may or may not alias. This is the least precise result.
+  MayAlias,
+  /// The two locations alias, but only due to a partial overlap.
+  PartialAlias,
+  /// The two locations precisely alias each other.
+  MustAlias,
+};
+
+/// Flags indicating whether a memory access modifies or references memory.
+///
+/// This is no access at all, a modification, a reference, or both
+/// a modification and a reference. These are specifically structured such that
+/// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must'
+/// work with any of the possible values.
+enum class ModRefInfo : uint8_t {
+  /// Must is provided for completeness, but no routines will return only
+  /// Must today. See definition of Must below.
+  Must = 0,
+  /// The access may reference the value stored in memory,
+  /// a mustAlias relation was found, and no mayAlias or partialAlias found.
+  MustRef = 1,
+  /// The access may modify the value stored in memory,
+  /// a mustAlias relation was found, and no mayAlias or partialAlias found.
+  MustMod = 2,
+  /// The access may reference, modify or both the value stored in memory,
+  /// a mustAlias relation was found, and no mayAlias or partialAlias found.
+  MustModRef = MustRef | MustMod,
+  /// The access neither references nor modifies the value stored in memory.
+  NoModRef = 4,
+  /// The access may reference the value stored in memory.
+  Ref = NoModRef | MustRef,
+  /// The access may modify the value stored in memory.
+  Mod = NoModRef | MustMod,
+  /// The access may reference and may modify the value stored in memory.
+  ModRef = Ref | Mod,
+
+  /// About Must:
+  /// Must is set in a best effort manner.
+  /// We usually do not try our best to infer Must, instead it is merely
+  /// another piece of "free" information that is presented when available.
+  /// Must set means there was certainly a MustAlias found. For calls,
+  /// where multiple arguments are checked (argmemonly), this translates to
+  /// only MustAlias or NoAlias was found.
+  /// Must is not set for RAR accesses, even if the two locations must
+  /// alias. The reason is that two read accesses translate to an early return
+  /// of NoModRef. An additional alias check to set Must may be
+  /// expensive. Other cases may also not set Must(e.g. callCapturesBefore).
+  /// We refer to Must being *set* when the most significant bit is *cleared*.
+  /// Conversely we *clear* Must information by *setting* the Must bit to 1.
+};
+
+LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) {
+  return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
+         static_cast<int>(ModRefInfo::Must);
+}
+LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) {
+  return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef);
+}
+LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) {
+  return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
+         static_cast<int>(ModRefInfo::MustModRef);
+}
+LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) {
+  return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustMod);
+}
+LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) {
+  return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustRef);
+}
+LLVM_NODISCARD inline bool isMustSet(const ModRefInfo MRI) {
+  return !(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::NoModRef));
+}
+
+LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) |
+                    static_cast<int>(ModRefInfo::MustMod));
+}
+LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) |
+                    static_cast<int>(ModRefInfo::MustRef));
+}
+LLVM_NODISCARD inline ModRefInfo setMust(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) &
+                    static_cast<int>(ModRefInfo::MustModRef));
+}
+LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) |
+                    static_cast<int>(ModRefInfo::MustModRef));
+}
+LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref));
+}
+LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod));
+}
+LLVM_NODISCARD inline ModRefInfo clearMust(const ModRefInfo MRI) {
+  return ModRefInfo(static_cast<int>(MRI) |
+                    static_cast<int>(ModRefInfo::NoModRef));
+}
+LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1,
+                                             const ModRefInfo MRI2) {
+  return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2));
+}
+LLVM_NODISCARD inline ModRefInfo intersectModRef(const ModRefInfo MRI1,
+                                                 const ModRefInfo MRI2) {
+  return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2));
+}
+
+/// The locations at which a function might access memory.
+///
+/// These are primarily used in conjunction with the \c AccessKind bits to
+/// describe both the nature of access and the locations of access for a
+/// function call.
+enum FunctionModRefLocation {
+  /// Base case is no access to memory.
+  FMRL_Nowhere = 0,
+  /// Access to memory via argument pointers.
+  FMRL_ArgumentPointees = 8,
+  /// Memory that is inaccessible via LLVM IR.
+  FMRL_InaccessibleMem = 16,
+  /// Access to any memory.
+  FMRL_Anywhere = 32 | FMRL_InaccessibleMem | FMRL_ArgumentPointees
+};
+
+/// Summary of how a function affects memory in the program.
+///
+/// Loads from constant globals are not considered memory accesses for this
+/// interface. Also, functions may freely modify stack space local to their
+/// invocation without having to report it through these interfaces.
+enum FunctionModRefBehavior {
+  /// This function does not perform any non-local loads or stores to memory.
+  ///
+  /// This property corresponds to the GCC 'const' attribute.
+  /// This property corresponds to the LLVM IR 'readnone' attribute.
+  /// This property corresponds to the IntrNoMem LLVM intrinsic flag.
+  FMRB_DoesNotAccessMemory =
+      FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef),
+
+  /// The only memory references in this function (if it has any) are
+  /// non-volatile loads from objects pointed to by its pointer-typed
+  /// arguments, with arbitrary offsets.
+  ///
+  /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
+  FMRB_OnlyReadsArgumentPointees =
+      FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref),
+
+  /// The only memory references in this function (if it has any) are
+  /// non-volatile loads and stores from objects pointed to by its
+  /// pointer-typed arguments, with arbitrary offsets.
+  ///
+  /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
+  FMRB_OnlyAccessesArgumentPointees =
+      FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef),
+
+  /// The only memory references in this function (if it has any) are
+  /// references of memory that is otherwise inaccessible via LLVM IR.
+  ///
+  /// This property corresponds to the LLVM IR inaccessiblememonly attribute.
+  FMRB_OnlyAccessesInaccessibleMem =
+      FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef),
+
+  /// The function may perform non-volatile loads and stores of objects
+  /// pointed to by its pointer-typed arguments, with arbitrary offsets, and
+  /// it may also perform loads and stores of memory that is otherwise
+  /// inaccessible via LLVM IR.
+  ///
+  /// This property corresponds to the LLVM IR
+  /// inaccessiblemem_or_argmemonly attribute.
+  FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem |
+                                          FMRL_ArgumentPointees |
+                                          static_cast<int>(ModRefInfo::ModRef),
+
+  /// This function does not perform any non-local stores or volatile loads,
+  /// but may read from any memory location.
+  ///
+  /// This property corresponds to the GCC 'pure' attribute.
+  /// This property corresponds to the LLVM IR 'readonly' attribute.
+  /// This property corresponds to the IntrReadMem LLVM intrinsic flag.
+  FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref),
+
+  // This function does not read from memory anywhere, but may write to any
+  // memory location.
+  //
+  // This property corresponds to the LLVM IR 'writeonly' attribute.
+  // This property corresponds to the IntrWriteMem LLVM intrinsic flag.
+  FMRB_DoesNotReadMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod),
+
+  /// This indicates that the function could not be classified into one of the
+  /// behaviors above.
+  FMRB_UnknownModRefBehavior =
+      FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef)
+};
+
+// Wrapper method strips bits significant only in FunctionModRefBehavior,
+// to obtain a valid ModRefInfo. The benefit of using the wrapper is that if
+// ModRefInfo enum changes, the wrapper can be updated to & with the new enum
+// entry with all bits set to 1.
+LLVM_NODISCARD inline ModRefInfo
+createModRefInfo(const FunctionModRefBehavior FMRB) {
+  return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
+}
+
+class AAResults {
+public:
+  // Make these results default constructable and movable. We have to spell
+  // these out because MSVC won't synthesize them.
+  AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
+  AAResults(AAResults &&Arg);
+  ~AAResults();
+
+  /// Register a specific AA result.
+  template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
+    // FIXME: We should use a much lighter weight system than the usual
+    // polymorphic pattern because we don't own AAResult. It should
+    // ideally involve two pointers and no separate allocation.
+    AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
+  }
+
+  /// Register a function analysis ID that the results aggregation depends on.
+  ///
+  /// This is used in the new pass manager to implement the invalidation logic
+  /// where we must invalidate the results aggregation if any of our component
+  /// analyses become invalid.
+  void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); }
+
+  /// Handle invalidation events in the new pass manager.
+  ///
+  /// The aggregation is invalidated if any of the underlying analyses is
+  /// invalidated.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+
+  //===--------------------------------------------------------------------===//
+  /// \name Alias Queries
+  /// @{
+
+  /// The main low level interface to the alias analysis implementation.
+  /// Returns an AliasResult indicating whether the two pointers are aliased to
+  /// each other. This is the interface that must be implemented by specific
+  /// alias analysis implementations.
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+  /// A convenience wrapper around the primary \c alias interface.
+  AliasResult alias(const Value *V1, uint64_t V1Size, const Value *V2,
+                    uint64_t V2Size) {
+    return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
+  }
+
+  /// A convenience wrapper around the primary \c alias interface.
+  AliasResult alias(const Value *V1, const Value *V2) {
+    return alias(V1, MemoryLocation::UnknownSize, V2,
+                 MemoryLocation::UnknownSize);
+  }
+
+  /// A trivial helper function to check to see if the specified pointers are
+  /// no-alias.
+  bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+    return alias(LocA, LocB) == NoAlias;
+  }
+
+  /// A convenience wrapper around the \c isNoAlias helper interface.
+  bool isNoAlias(const Value *V1, uint64_t V1Size, const Value *V2,
+                 uint64_t V2Size) {
+    return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
+  }
+
+  /// A convenience wrapper around the \c isNoAlias helper interface.
+  bool isNoAlias(const Value *V1, const Value *V2) {
+    return isNoAlias(MemoryLocation(V1), MemoryLocation(V2));
+  }
+
+  /// A trivial helper function to check to see if the specified pointers are
+  /// must-alias.
+  bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+    return alias(LocA, LocB) == MustAlias;
+  }
+
+  /// A convenience wrapper around the \c isMustAlias helper interface.
+  bool isMustAlias(const Value *V1, const Value *V2) {
+    return alias(V1, 1, V2, 1) == MustAlias;
+  }
+
+  /// Checks whether the given location points to constant memory, or if
+  /// \p OrLocal is true whether it points to a local alloca.
+  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false);
+
+  /// A convenience wrapper around the primary \c pointsToConstantMemory
+  /// interface.
+  bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
+    return pointsToConstantMemory(MemoryLocation(P), OrLocal);
+  }
+
+  /// @}
+  //===--------------------------------------------------------------------===//
+  /// \name Simple mod/ref information
+  /// @{
+
+  /// Get the ModRef info associated with a pointer argument of a callsite. The
+  /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
+  /// that these bits do not necessarily account for the overall behavior of
+  /// the function, but rather only provide additional per-argument
+  /// information. This never sets ModRefInfo::Must.
+  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+
+  /// Return the behavior of the given call site.
+  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+  /// Return the behavior when calling the given function.
+  FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+  /// Checks if the specified call is known to never read or write memory.
+  ///
+  /// Note that if the call only reads from known-constant memory, it is also
+  /// legal to return true. Also, calls that unwind the stack are legal for
+  /// this predicate.
+  ///
+  /// Many optimizations (such as CSE and LICM) can be performed on such calls
+  /// without worrying about aliasing properties, and many calls have this
+  /// property (e.g. calls to 'sin' and 'cos').
+  ///
+  /// This property corresponds to the GCC 'const' attribute.
+  bool doesNotAccessMemory(ImmutableCallSite CS) {
+    return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory;
+  }
+
+  /// Checks if the specified function is known to never read or write memory.
+  ///
+  /// Note that if the function only reads from known-constant memory, it is
+  /// also legal to return true. Also, function that unwind the stack are legal
+  /// for this predicate.
+  ///
+  /// Many optimizations (such as CSE and LICM) can be performed on such calls
+  /// to such functions without worrying about aliasing properties, and many
+  /// functions have this property (e.g. 'sin' and 'cos').
+  ///
+  /// This property corresponds to the GCC 'const' attribute.
+  bool doesNotAccessMemory(const Function *F) {
+    return getModRefBehavior(F) == FMRB_DoesNotAccessMemory;
+  }
+
+  /// Checks if the specified call is known to only read from non-volatile
+  /// memory (or not access memory at all).
+  ///
+  /// Calls that unwind the stack are legal for this predicate.
+  ///
+  /// This property allows many common optimizations to be performed in the
+  /// absence of interfering store instructions, such as CSE of strlen calls.
+  ///
+  /// This property corresponds to the GCC 'pure' attribute.
+  bool onlyReadsMemory(ImmutableCallSite CS) {
+    return onlyReadsMemory(getModRefBehavior(CS));
+  }
+
+  /// Checks if the specified function is known to only read from non-volatile
+  /// memory (or not access memory at all).
+  ///
+  /// Functions that unwind the stack are legal for this predicate.
+  ///
+  /// This property allows many common optimizations to be performed in the
+  /// absence of interfering store instructions, such as CSE of strlen calls.
+  ///
+  /// This property corresponds to the GCC 'pure' attribute.
+  bool onlyReadsMemory(const Function *F) {
+    return onlyReadsMemory(getModRefBehavior(F));
+  }
+
+  /// Checks if functions with the specified behavior are known to only read
+  /// from non-volatile memory (or not access memory at all).
+  static bool onlyReadsMemory(FunctionModRefBehavior MRB) {
+    return !isModSet(createModRefInfo(MRB));
+  }
+
+  /// Checks if functions with the specified behavior are known to only write
+  /// memory (or not access memory at all).
+  static bool doesNotReadMemory(FunctionModRefBehavior MRB) {
+    return !isRefSet(createModRefInfo(MRB));
+  }
+
+  /// Checks if functions with the specified behavior are known to read and
+  /// write at most from objects pointed to by their pointer-typed arguments
+  /// (with arbitrary offsets).
+  static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) {
+    return !(MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
+  }
+
+  /// Checks if functions with the specified behavior are known to potentially
+  /// read or write from objects pointed to be their pointer-typed arguments
+  /// (with arbitrary offsets).
+  static bool doesAccessArgPointees(FunctionModRefBehavior MRB) {
+    return isModOrRefSet(createModRefInfo(MRB)) &&
+           (MRB & FMRL_ArgumentPointees);
+  }
+
+  /// Checks if functions with the specified behavior are known to read and
+  /// write at most from memory that is inaccessible from LLVM IR.
+  static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) {
+    return !(MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem);
+  }
+
+  /// Checks if functions with the specified behavior are known to potentially
+  /// read or write from memory that is inaccessible from LLVM IR.
+  static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) {
+    return isModOrRefSet(createModRefInfo(MRB)) && (MRB & FMRL_InaccessibleMem);
+  }
+
+  /// Checks if functions with the specified behavior are known to read and
+  /// write at most from memory that is inaccessible from LLVM IR or objects
+  /// pointed to by their pointer-typed arguments (with arbitrary offsets).
+  static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) {
+    return !(MRB & FMRL_Anywhere &
+             ~(FMRL_InaccessibleMem | FMRL_ArgumentPointees));
+  }
+
+  /// getModRefInfo (for call sites) - Return information about whether
+  /// a particular call site modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for call sites) - A convenience wrapper.
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
+                           uint64_t Size) {
+    return getModRefInfo(CS, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for calls) - Return information about whether
+  /// a particular call modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
+    return getModRefInfo(ImmutableCallSite(C), Loc);
+  }
+
+  /// getModRefInfo (for calls) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
+    return getModRefInfo(C, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for invokes) - Return information about whether
+  /// a particular invoke modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
+    return getModRefInfo(ImmutableCallSite(I), Loc);
+  }
+
+  /// getModRefInfo (for invokes) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, uint64_t Size) {
+    return getModRefInfo(I, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for loads) - Return information about whether
+  /// a particular load modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for loads) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
+    return getModRefInfo(L, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for stores) - Return information about whether
+  /// a particular store modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for stores) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size) {
+    return getModRefInfo(S, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for fences) - Return information about whether
+  /// a particular store modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for fences) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size) {
+    return getModRefInfo(S, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for cmpxchges) - Return information about whether
+  /// a particular cmpxchg modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
+                           const MemoryLocation &Loc);
+
+  /// getModRefInfo (for cmpxchges) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
+                           unsigned Size) {
+    return getModRefInfo(CX, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for atomicrmws) - Return information about whether
+  /// a particular atomicrmw modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for atomicrmws) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
+                           unsigned Size) {
+    return getModRefInfo(RMW, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for va_args) - Return information about whether
+  /// a particular va_arg modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for va_args) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, uint64_t Size) {
+    return getModRefInfo(I, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for catchpads) - Return information about whether
+  /// a particular catchpad modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for catchpads) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
+                           uint64_t Size) {
+    return getModRefInfo(I, MemoryLocation(P, Size));
+  }
+
+  /// getModRefInfo (for catchrets) - Return information about whether
+  /// a particular catchret modifies or reads the specified memory location.
+  ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc);
+
+  /// getModRefInfo (for catchrets) - A convenience wrapper.
+  ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
+                           uint64_t Size) {
+    return getModRefInfo(I, MemoryLocation(P, Size));
+  }
+
+  /// Check whether or not an instruction may read or write the optionally
+  /// specified memory location.
+  ///
+  ///
+  /// An instruction that doesn't read or write memory may be trivially LICM'd
+  /// for example.
+  ///
+  /// For function calls, this delegates to the alias-analysis specific
+  /// call-site mod-ref behavior queries. Otherwise it delegates to the specific
+  /// helpers above.
+  ModRefInfo getModRefInfo(const Instruction *I,
+                           const Optional<MemoryLocation> &OptLoc) {
+    if (OptLoc == None) {
+      if (auto CS = ImmutableCallSite(I)) {
+        return createModRefInfo(getModRefBehavior(CS));
+      }
+    }
+
+    const MemoryLocation &Loc = OptLoc.getValueOr(MemoryLocation());
+
+    switch (I->getOpcode()) {
+    case Instruction::VAArg:  return getModRefInfo((const VAArgInst*)I, Loc);
+    case Instruction::Load:   return getModRefInfo((const LoadInst*)I,  Loc);
+    case Instruction::Store:  return getModRefInfo((const StoreInst*)I, Loc);
+    case Instruction::Fence:  return getModRefInfo((const FenceInst*)I, Loc);
+    case Instruction::AtomicCmpXchg:
+      return getModRefInfo((const AtomicCmpXchgInst*)I, Loc);
+    case Instruction::AtomicRMW:
+      return getModRefInfo((const AtomicRMWInst*)I, Loc);
+    case Instruction::Call:   return getModRefInfo((const CallInst*)I,  Loc);
+    case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,Loc);
+    case Instruction::CatchPad:
+      return getModRefInfo((const CatchPadInst *)I, Loc);
+    case Instruction::CatchRet:
+      return getModRefInfo((const CatchReturnInst *)I, Loc);
+    default:
+      return ModRefInfo::NoModRef;
+    }
+  }
+
+  /// A convenience wrapper for constructing the memory location.
+  ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
+                           uint64_t Size) {
+    return getModRefInfo(I, MemoryLocation(P, Size));
+  }
+
+  /// Return information about whether a call and an instruction may refer to
+  /// the same memory locations.
+  ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call);
+
+  /// Return information about whether two call sites may refer to the same set
+  /// of memory locations. See the AA documentation for details:
+  ///   http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
+  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+  /// \brief Return information about whether a particular call site modifies
+  /// or reads the specified memory location \p MemLoc before instruction \p I
+  /// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
+  /// instruction ordering queries inside the BasicBlock containing \p I.
+  /// Early exits in callCapturesBefore may lead to ModRefInfo::Must not being
+  /// set.
+  ModRefInfo callCapturesBefore(const Instruction *I,
+                                const MemoryLocation &MemLoc, DominatorTree *DT,
+                                OrderedBasicBlock *OBB = nullptr);
+
+  /// \brief A convenience wrapper to synthesize a memory location.
+  ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
+                                uint64_t Size, DominatorTree *DT,
+                                OrderedBasicBlock *OBB = nullptr) {
+    return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB);
+  }
+
+  /// @}
+  //===--------------------------------------------------------------------===//
+  /// \name Higher level methods for querying mod/ref information.
+  /// @{
+
+  /// Check if it is possible for execution of the specified basic block to
+  /// modify the location Loc.
+  bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
+
+  /// A convenience wrapper synthesizing a memory location.
+  bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
+                           uint64_t Size) {
+    return canBasicBlockModify(BB, MemoryLocation(P, Size));
+  }
+
+  /// Check if it is possible for the execution of the specified instructions
+  /// to mod\ref (according to the mode) the location Loc.
+  ///
+  /// The instructions to consider are all of the instructions in the range of
+  /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
+  bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
+                                 const MemoryLocation &Loc,
+                                 const ModRefInfo Mode);
+
+  /// A convenience wrapper synthesizing a memory location.
+  bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
+                                 const Value *Ptr, uint64_t Size,
+                                 const ModRefInfo Mode) {
+    return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
+  }
+
+private:
+  class Concept;
+
+  template <typename T> class Model;
+
+  template <typename T> friend class AAResultBase;
+
+  const TargetLibraryInfo &TLI;
+
+  std::vector<std::unique_ptr<Concept>> AAs;
+
+  std::vector<AnalysisKey *> AADeps;
+};
+
+/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
+/// pointer or reference.
+using AliasAnalysis = AAResults;
+
+/// A private abstract base class describing the concept of an individual alias
+/// analysis implementation.
+///
+/// This interface is implemented by any \c Model instantiation. It is also the
+/// interface which a type used to instantiate the model must provide.
+///
+/// All of these methods model methods by the same name in the \c
+/// AAResults class. Only differences and specifics to how the
+/// implementations are called are documented here.
+class AAResults::Concept {
+public:
+  virtual ~Concept() = 0;
+
+  /// An update API used internally by the AAResults to provide
+  /// a handle back to the top level aggregation.
+  virtual void setAAResults(AAResults *NewAAR) = 0;
+
+  //===--------------------------------------------------------------------===//
+  /// \name Alias Queries
+  /// @{
+
+  /// The main low level interface to the alias analysis implementation.
+  /// Returns an AliasResult indicating whether the two pointers are aliased to
+  /// each other. This is the interface that must be implemented by specific
+  /// alias analysis implementations.
+  virtual AliasResult alias(const MemoryLocation &LocA,
+                            const MemoryLocation &LocB) = 0;
+
+  /// Checks whether the given location points to constant memory, or if
+  /// \p OrLocal is true whether it points to a local alloca.
+  virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
+                                      bool OrLocal) = 0;
+
+  /// @}
+  //===--------------------------------------------------------------------===//
+  /// \name Simple mod/ref information
+  /// @{
+
+  /// Get the ModRef info associated with a pointer argument of a callsite. The
+  /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
+  /// that these bits do not necessarily account for the overall behavior of
+  /// the function, but rather only provide additional per-argument
+  /// information.
+  virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS,
+                                      unsigned ArgIdx) = 0;
+
+  /// Return the behavior of the given call site.
+  virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0;
+
+  /// Return the behavior when calling the given function.
+  virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
+
+  /// getModRefInfo (for call sites) - Return information about whether
+  /// a particular call site modifies or reads the specified memory location.
+  virtual ModRefInfo getModRefInfo(ImmutableCallSite CS,
+                                   const MemoryLocation &Loc) = 0;
+
+  /// Return information about whether two call sites may refer to the same set
+  /// of memory locations. See the AA documentation for details:
+  ///   http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
+  virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1,
+                                   ImmutableCallSite CS2) = 0;
+
+  /// @}
+};
+
+/// A private class template which derives from \c Concept and wraps some other
+/// type.
+///
+/// This models the concept by directly forwarding each interface point to the
+/// wrapped type which must implement a compatible interface. This provides
+/// a type erased binding.
+template <typename AAResultT> class AAResults::Model final : public Concept {
+  AAResultT &Result;
+
+public:
+  explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) {
+    Result.setAAResults(&AAR);
+  }
+  ~Model() override = default;
+
+  void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); }
+
+  AliasResult alias(const MemoryLocation &LocA,
+                    const MemoryLocation &LocB) override {
+    return Result.alias(LocA, LocB);
+  }
+
+  bool pointsToConstantMemory(const MemoryLocation &Loc,
+                              bool OrLocal) override {
+    return Result.pointsToConstantMemory(Loc, OrLocal);
+  }
+
+  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override {
+    return Result.getArgModRefInfo(CS, ArgIdx);
+  }
+
+  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
+    return Result.getModRefBehavior(CS);
+  }
+
+  FunctionModRefBehavior getModRefBehavior(const Function *F) override {
+    return Result.getModRefBehavior(F);
+  }
+
+  ModRefInfo getModRefInfo(ImmutableCallSite CS,
+                           const MemoryLocation &Loc) override {
+    return Result.getModRefInfo(CS, Loc);
+  }
+
+  ModRefInfo getModRefInfo(ImmutableCallSite CS1,
+                           ImmutableCallSite CS2) override {
+    return Result.getModRefInfo(CS1, CS2);
+  }
+};
+
+/// A CRTP-driven "mixin" base class to help implement the function alias
+/// analysis results concept.
+///
+/// Because of the nature of many alias analysis implementations, they often
+/// only implement a subset of the interface. This base class will attempt to
+/// implement the remaining portions of the interface in terms of simpler forms
+/// of the interface where possible, and otherwise provide conservatively
+/// correct fallback implementations.
+///
+/// Implementors of an alias analysis should derive from this CRTP, and then
+/// override specific methods that they wish to customize. There is no need to
+/// use virtual anywhere, the CRTP base class does static dispatch to the
+/// derived type passed into it.
+template <typename DerivedT> class AAResultBase {
+  // Expose some parts of the interface only to the AAResults::Model
+  // for wrapping. Specifically, this allows the model to call our
+  // setAAResults method without exposing it as a fully public API.
+  friend class AAResults::Model<DerivedT>;
+
+  /// A pointer to the AAResults object that this AAResult is
+  /// aggregated within. May be null if not aggregated.
+  AAResults *AAR;
+
+  /// Helper to dispatch calls back through the derived type.
+  DerivedT &derived() { return static_cast<DerivedT &>(*this); }
+
+  /// A setter for the AAResults pointer, which is used to satisfy the
+  /// AAResults::Model contract.
+  void setAAResults(AAResults *NewAAR) { AAR = NewAAR; }
+
+protected:
+  /// This proxy class models a common pattern where we delegate to either the
+  /// top-level \c AAResults aggregation if one is registered, or to the
+  /// current result if none are registered.
+  class AAResultsProxy {
+    AAResults *AAR;
+    DerivedT &CurrentResult;
+
+  public:
+    AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult)
+        : AAR(AAR), CurrentResult(CurrentResult) {}
+
+    AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+      return AAR ? AAR->alias(LocA, LocB) : CurrentResult.alias(LocA, LocB);
+    }
+
+    bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
+      return AAR ? AAR->pointsToConstantMemory(Loc, OrLocal)
+                 : CurrentResult.pointsToConstantMemory(Loc, OrLocal);
+    }
+
+    ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+      return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx);
+    }
+
+    FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+      return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS);
+    }
+
+    FunctionModRefBehavior getModRefBehavior(const Function *F) {
+      return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
+    }
+
+    ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
+      return AAR ? AAR->getModRefInfo(CS, Loc)
+                 : CurrentResult.getModRefInfo(CS, Loc);
+    }
+
+    ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+      return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2);
+    }
+  };
+
+  explicit AAResultBase() = default;
+
+  // Provide all the copy and move constructors so that derived types aren't
+  // constrained.
+  AAResultBase(const AAResultBase &Arg) {}
+  AAResultBase(AAResultBase &&Arg) {}
+
+  /// Get a proxy for the best AA result set to query at this time.
+  ///
+  /// When this result is part of a larger aggregation, this will proxy to that
+  /// aggregation. When this result is used in isolation, it will just delegate
+  /// back to the derived class's implementation.
+  ///
+  /// Note that callers of this need to take considerable care to not cause
+  /// performance problems when they use this routine, in the case of a large
+  /// number of alias analyses being aggregated, it can be expensive to walk
+  /// back across the chain.
+  AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); }
+
+public:
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+    return MayAlias;
+  }
+
+  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
+    return false;
+  }
+
+  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+    return ModRefInfo::ModRef;
+  }
+
+  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+    return FMRB_UnknownModRefBehavior;
+  }
+
+  FunctionModRefBehavior getModRefBehavior(const Function *F) {
+    return FMRB_UnknownModRefBehavior;
+  }
+
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
+    return ModRefInfo::ModRef;
+  }
+
+  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+    return ModRefInfo::ModRef;
+  }
+};
+
+/// Return true if this pointer is returned by a noalias function.
+bool isNoAliasCall(const Value *V);
+
+/// Return true if this is an argument with the noalias attribute.
+bool isNoAliasArgument(const Value *V);
+
+/// Return true if this pointer refers to a distinct and identifiable object.
+/// This returns true for:
+///    Global Variables and Functions (but not Global Aliases)
+///    Allocas
+///    ByVal and NoAlias Arguments
+///    NoAlias returns (e.g. calls to malloc)
+///
+bool isIdentifiedObject(const Value *V);
+
+/// Return true if V is umabigously identified at the function-level.
+/// Different IdentifiedFunctionLocals can't alias.
+/// Further, an IdentifiedFunctionLocal can not alias with any function
+/// arguments other than itself, which is not necessarily true for
+/// IdentifiedObjects.
+bool isIdentifiedFunctionLocal(const Value *V);
+
+/// A manager for alias analyses.
+///
+/// This class can have analyses registered with it and when run, it will run
+/// all of them and aggregate their results into single AA results interface
+/// that dispatches across all of the alias analysis results available.
+///
+/// Note that the order in which analyses are registered is very significant.
+/// That is the order in which the results will be aggregated and queried.
+///
+/// This manager effectively wraps the AnalysisManager for registering alias
+/// analyses. When you register your alias analysis with this manager, it will
+/// ensure the analysis itself is registered with its AnalysisManager.
+class AAManager : public AnalysisInfoMixin<AAManager> {
+public:
+  using Result = AAResults;
+
+  /// Register a specific AA result.
+  template <typename AnalysisT> void registerFunctionAnalysis() {
+    ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
+  }
+
+  /// Register a specific AA result.
+  template <typename AnalysisT> void registerModuleAnalysis() {
+    ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
+  }
+
+  Result run(Function &F, FunctionAnalysisManager &AM) {
+    Result R(AM.getResult<TargetLibraryAnalysis>(F));
+    for (auto &Getter : ResultGetters)
+      (*Getter)(F, AM, R);
+    return R;
+  }
+
+private:
+  friend AnalysisInfoMixin<AAManager>;
+
+  static AnalysisKey Key;
+
+  SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM,
+                       AAResults &AAResults),
+              4> ResultGetters;
+
+  template <typename AnalysisT>
+  static void getFunctionAAResultImpl(Function &F,
+                                      FunctionAnalysisManager &AM,
+                                      AAResults &AAResults) {
+    AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
+    AAResults.addAADependencyID(AnalysisT::ID());
+  }
+
+  template <typename AnalysisT>
+  static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM,
+                                    AAResults &AAResults) {
+    auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
+    auto &MAM = MAMProxy.getManager();
+    if (auto *R = MAM.template getCachedResult<AnalysisT>(*F.getParent())) {
+      AAResults.addAAResult(*R);
+      MAMProxy
+          .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>();
+    }
+  }
+};
+
+/// A wrapper pass to provide the legacy pass manager access to a suitably
+/// prepared AAResults object.
+class AAResultsWrapperPass : public FunctionPass {
+  std::unique_ptr<AAResults> AAR;
+
+public:
+  static char ID;
+
+  AAResultsWrapperPass();
+
+  AAResults &getAAResults() { return *AAR; }
+  const AAResults &getAAResults() const { return *AAR; }
+
+  bool runOnFunction(Function &F) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+FunctionPass *createAAResultsWrapperPass();
+
+/// A wrapper pass around a callback which can be used to populate the
+/// AAResults in the AAResultsWrapperPass from an external AA.
+///
+/// The callback provided here will be used each time we prepare an AAResults
+/// object, and will receive a reference to the function wrapper pass, the
+/// function, and the AAResults object to populate. This should be used when
+/// setting up a custom pass pipeline to inject a hook into the AA results.
+ImmutablePass *createExternalAAWrapperPass(
+    std::function<void(Pass &, Function &, AAResults &)> Callback);
+
+/// A helper for the legacy pass manager to create a \c AAResults
+/// object populated to the best of our ability for a particular function when
+/// inside of a \c ModulePass or a \c CallGraphSCCPass.
+///
+/// If a \c ModulePass or a \c CallGraphSCCPass calls \p
+/// createLegacyPMAAResults, it also needs to call \p addUsedAAAnalyses in \p
+/// getAnalysisUsage.
+AAResults createLegacyPMAAResults(Pass &P, Function &F, BasicAAResult &BAR);
+
+/// A helper for the legacy pass manager to populate \p AU to add uses to make
+/// sure the analyses required by \p createLegacyPMAAResults are available.
+void getAAResultsAnalysisUsage(AnalysisUsage &AU);
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_ALIASANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h b/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h
new file mode 100644
index 0000000..cd2f631
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h
@@ -0,0 +1,74 @@
+//===- AliasAnalysisEvaluator.h - Alias Analysis Accuracy Evaluator -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements a simple N^2 alias analysis accuracy evaluator. The
+/// analysis result is a set of statistics of how many times the AA
+/// infrastructure provides each kind of alias result and mod/ref result when
+/// queried with all pairs of pointers in the function.
+///
+/// It can be used to evaluate a change in an alias analysis implementation,
+/// algorithm, or the AA pipeline infrastructure itself. It acts like a stable
+/// and easily tested consumer of all AA information exposed.
+///
+/// This is inspired and adapted from code by: Naveen Neelakantam, Francesco
+/// Spadini, and Wojciech Stryjewski.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
+#define LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class AAResults;
+
+class AAEvaluator : public PassInfoMixin<AAEvaluator> {
+  int64_t FunctionCount;
+  int64_t NoAliasCount, MayAliasCount, PartialAliasCount, MustAliasCount;
+  int64_t NoModRefCount, ModCount, RefCount, ModRefCount;
+  int64_t MustCount, MustRefCount, MustModCount, MustModRefCount;
+
+public:
+  AAEvaluator()
+      : FunctionCount(), NoAliasCount(), MayAliasCount(), PartialAliasCount(),
+        MustAliasCount(), NoModRefCount(), ModCount(), RefCount(),
+        ModRefCount(), MustCount(), MustRefCount(), MustModCount(),
+        MustModRefCount() {}
+  AAEvaluator(AAEvaluator &&Arg)
+      : FunctionCount(Arg.FunctionCount), NoAliasCount(Arg.NoAliasCount),
+        MayAliasCount(Arg.MayAliasCount),
+        PartialAliasCount(Arg.PartialAliasCount),
+        MustAliasCount(Arg.MustAliasCount), NoModRefCount(Arg.NoModRefCount),
+        ModCount(Arg.ModCount), RefCount(Arg.RefCount),
+        ModRefCount(Arg.ModRefCount), MustCount(Arg.MustCount),
+        MustRefCount(Arg.MustRefCount), MustModCount(Arg.MustModCount),
+        MustModRefCount(Arg.MustModRefCount) {
+    Arg.FunctionCount = 0;
+  }
+  ~AAEvaluator();
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+  // Allow the legacy pass to run this using an internal API.
+  friend class AAEvalLegacyPass;
+
+  void runInternal(Function &F, AAResults &AA);
+};
+
+/// Create a wrapper of the above for the legacy pass manager.
+FunctionPass *createAAEvalPass();
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h b/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h
new file mode 100644
index 0000000..7da3eba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h
@@ -0,0 +1,468 @@
+//===- llvm/Analysis/AliasSetTracker.h - Build Alias Sets -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines two classes: AliasSetTracker and AliasSet. These interfaces
+// are used to classify a collection of pointer references into a maximal number
+// of disjoint sets. Each AliasSet object constructed by the AliasSetTracker
+// object refers to memory disjoint from the other sets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
+#define LLVM_ANALYSIS_ALIASSETTRACKER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+
+class AliasSetTracker;
+class BasicBlock;
+class LoadInst;
+class MemSetInst;
+class MemTransferInst;
+class raw_ostream;
+class StoreInst;
+class VAArgInst;
+class Value;
+
+class AliasSet : public ilist_node<AliasSet> {
+  friend class AliasSetTracker;
+
+  class PointerRec {
+    Value *Val;  // The pointer this record corresponds to.
+    PointerRec **PrevInList = nullptr;
+    PointerRec *NextInList = nullptr;
+    AliasSet *AS = nullptr;
+    uint64_t Size = 0;
+    AAMDNodes AAInfo;
+
+  public:
+    PointerRec(Value *V)
+      : Val(V), AAInfo(DenseMapInfo<AAMDNodes>::getEmptyKey()) {}
+
+    Value *getValue() const { return Val; }
+
+    PointerRec *getNext() const { return NextInList; }
+    bool hasAliasSet() const { return AS != nullptr; }
+
+    PointerRec** setPrevInList(PointerRec **PIL) {
+      PrevInList = PIL;
+      return &NextInList;
+    }
+
+    bool updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) {
+      bool SizeChanged = false;
+      if (NewSize > Size) {
+        Size = NewSize;
+        SizeChanged = true;
+      }
+
+      if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey())
+        // We don't have a AAInfo yet. Set it to NewAAInfo.
+        AAInfo = NewAAInfo;
+      else {
+        AAMDNodes Intersection(AAInfo.intersect(NewAAInfo));
+        if (!Intersection) {
+          // NewAAInfo conflicts with AAInfo.
+          AAInfo = DenseMapInfo<AAMDNodes>::getTombstoneKey();
+          return SizeChanged;
+        }
+        AAInfo = Intersection;
+      }
+      return SizeChanged;
+    }
+
+    uint64_t getSize() const { return Size; }
+
+    /// Return the AAInfo, or null if there is no information or conflicting
+    /// information.
+    AAMDNodes getAAInfo() const {
+      // If we have missing or conflicting AAInfo, return null.
+      if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey() ||
+          AAInfo == DenseMapInfo<AAMDNodes>::getTombstoneKey())
+        return AAMDNodes();
+      return AAInfo;
+    }
+
+    AliasSet *getAliasSet(AliasSetTracker &AST) {
+      assert(AS && "No AliasSet yet!");
+      if (AS->Forward) {
+        AliasSet *OldAS = AS;
+        AS = OldAS->getForwardedTarget(AST);
+        AS->addRef();
+        OldAS->dropRef(AST);
+      }
+      return AS;
+    }
+
+    void setAliasSet(AliasSet *as) {
+      assert(!AS && "Already have an alias set!");
+      AS = as;
+    }
+
+    void eraseFromList() {
+      if (NextInList) NextInList->PrevInList = PrevInList;
+      *PrevInList = NextInList;
+      if (AS->PtrListEnd == &NextInList) {
+        AS->PtrListEnd = PrevInList;
+        assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
+      }
+      delete this;
+    }
+  };
+
+  // Doubly linked list of nodes.
+  PointerRec *PtrList = nullptr;
+  PointerRec **PtrListEnd;
+  // Forwarding pointer.
+  AliasSet *Forward = nullptr;
+
+  /// All instructions without a specific address in this alias set.
+  /// In rare cases this vector can have a null'ed out WeakVH
+  /// instances (can happen if some other loop pass deletes an
+  /// instruction in this list).
+  std::vector<WeakVH> UnknownInsts;
+
+  /// Number of nodes pointing to this AliasSet plus the number of AliasSets
+  /// forwarding to it.
+  unsigned RefCount : 27;
+
+  // Signifies that this set should be considered to alias any pointer.
+  // Use when the tracker holding this set is saturated.
+  unsigned AliasAny : 1;
+
+  /// The kinds of access this alias set models.
+  ///
+  /// We keep track of whether this alias set merely refers to the locations of
+  /// memory (and not any particular access), whether it modifies or references
+  /// the memory, or whether it does both. The lattice goes from "NoAccess" to
+  /// either RefAccess or ModAccess, then to ModRefAccess as necessary.
+  enum AccessLattice {
+    NoAccess = 0,
+    RefAccess = 1,
+    ModAccess = 2,
+    ModRefAccess = RefAccess | ModAccess
+  };
+  unsigned Access : 2;
+
+  /// The kind of alias relationship between pointers of the set.
+  ///
+  /// These represent conservatively correct alias results between any members
+  /// of the set. We represent these independently of the values of alias
+  /// results in order to pack it into a single bit. Lattice goes from
+  /// MustAlias to MayAlias.
+  enum AliasLattice {
+    SetMustAlias = 0, SetMayAlias = 1
+  };
+  unsigned Alias : 1;
+
+  /// True if this alias set contains volatile loads or stores.
+  unsigned Volatile : 1;
+
+  unsigned SetSize = 0;
+
+  void addRef() { ++RefCount; }
+
+  void dropRef(AliasSetTracker &AST) {
+    assert(RefCount >= 1 && "Invalid reference count detected!");
+    if (--RefCount == 0)
+      removeFromTracker(AST);
+  }
+
+  Instruction *getUnknownInst(unsigned i) const {
+    assert(i < UnknownInsts.size());
+    return cast_or_null<Instruction>(UnknownInsts[i]);
+  }
+
+public:
+  AliasSet(const AliasSet &) = delete;
+  AliasSet &operator=(const AliasSet &) = delete;
+
+  /// Accessors...
+  bool isRef() const { return Access & RefAccess; }
+  bool isMod() const { return Access & ModAccess; }
+  bool isMustAlias() const { return Alias == SetMustAlias; }
+  bool isMayAlias()  const { return Alias == SetMayAlias; }
+
+  /// Return true if this alias set contains volatile loads or stores.
+  bool isVolatile() const { return Volatile; }
+
+  /// Return true if this alias set should be ignored as part of the
+  /// AliasSetTracker object.
+  bool isForwardingAliasSet() const { return Forward; }
+
+  /// Merge the specified alias set into this alias set.
+  void mergeSetIn(AliasSet &AS, AliasSetTracker &AST);
+
+  // Alias Set iteration - Allow access to all of the pointers which are part of
+  // this alias set.
+  class iterator;
+  iterator begin() const { return iterator(PtrList); }
+  iterator end()   const { return iterator(); }
+  bool empty() const { return PtrList == nullptr; }
+
+  // Unfortunately, ilist::size() is linear, so we have to add code to keep
+  // track of the list's exact size.
+  unsigned size() { return SetSize; }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+  /// Define an iterator for alias sets... this is just a forward iterator.
+  class iterator : public std::iterator<std::forward_iterator_tag,
+                                        PointerRec, ptrdiff_t> {
+    PointerRec *CurNode;
+
+  public:
+    explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
+
+    bool operator==(const iterator& x) const {
+      return CurNode == x.CurNode;
+    }
+    bool operator!=(const iterator& x) const { return !operator==(x); }
+
+    value_type &operator*() const {
+      assert(CurNode && "Dereferencing AliasSet.end()!");
+      return *CurNode;
+    }
+    value_type *operator->() const { return &operator*(); }
+
+    Value *getPointer() const { return CurNode->getValue(); }
+    uint64_t getSize() const { return CurNode->getSize(); }
+    AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }
+
+    iterator& operator++() {                // Preincrement
+      assert(CurNode && "Advancing past AliasSet.end()!");
+      CurNode = CurNode->getNext();
+      return *this;
+    }
+    iterator operator++(int) { // Postincrement
+      iterator tmp = *this; ++*this; return tmp;
+    }
+  };
+
+private:
+  // Can only be created by AliasSetTracker.
+  AliasSet()
+      : PtrListEnd(&PtrList), RefCount(0),  AliasAny(false), Access(NoAccess),
+        Alias(SetMustAlias), Volatile(false) {}
+
+  PointerRec *getSomePointer() const {
+    return PtrList;
+  }
+
+  /// Return the real alias set this represents. If this has been merged with
+  /// another set and is forwarding, return the ultimate destination set. This
+  /// also implements the union-find collapsing as well.
+  AliasSet *getForwardedTarget(AliasSetTracker &AST) {
+    if (!Forward) return this;
+
+    AliasSet *Dest = Forward->getForwardedTarget(AST);
+    if (Dest != Forward) {
+      Dest->addRef();
+      Forward->dropRef(AST);
+      Forward = Dest;
+    }
+    return Dest;
+  }
+
+  void removeFromTracker(AliasSetTracker &AST);
+
+  void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
+                  const AAMDNodes &AAInfo,
+                  bool KnownMustAlias = false);
+  void addUnknownInst(Instruction *I, AliasAnalysis &AA);
+
+  void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
+    bool WasEmpty = UnknownInsts.empty();
+    for (size_t i = 0, e = UnknownInsts.size(); i != e; ++i)
+      if (UnknownInsts[i] == I) {
+        UnknownInsts[i] = UnknownInsts.back();
+        UnknownInsts.pop_back();
+        --i; --e;  // Revisit the moved entry.
+      }
+    if (!WasEmpty && UnknownInsts.empty())
+      dropRef(AST);
+  }
+
+  void setVolatile() { Volatile = true; }
+
+public:
+  /// Return true if the specified pointer "may" (or must) alias one of the
+  /// members in the set.
+  bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo,
+                      AliasAnalysis &AA) const;
+  bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const;
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
+  AS.print(OS);
+  return OS;
+}
+
+class AliasSetTracker {
+  /// A CallbackVH to arrange for AliasSetTracker to be notified whenever a
+  /// Value is deleted.
+  class ASTCallbackVH final : public CallbackVH {
+    AliasSetTracker *AST;
+
+    void deleted() override;
+    void allUsesReplacedWith(Value *) override;
+
+  public:
+    ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
+
+    ASTCallbackVH &operator=(Value *V);
+  };
+  /// Traits to tell DenseMap that tell us how to compare and hash the value
+  /// handle.
+  struct ASTCallbackVHDenseMapInfo : public DenseMapInfo<Value *> {};
+
+  AliasAnalysis &AA;
+  ilist<AliasSet> AliasSets;
+
+  using PointerMapType = DenseMap<ASTCallbackVH, AliasSet::PointerRec *,
+                                  ASTCallbackVHDenseMapInfo>;
+
+  // Map from pointers to their node
+  PointerMapType PointerMap;
+
+public:
+  /// Create an empty collection of AliasSets, and use the specified alias
+  /// analysis object to disambiguate load and store addresses.
+  explicit AliasSetTracker(AliasAnalysis &aa) : AA(aa) {}
+  ~AliasSetTracker() { clear(); }
+
+  /// These methods are used to add different types of instructions to the alias
+  /// sets. Adding a new instruction can result in one of three actions
+  /// happening:
+  ///
+  ///   1. If the instruction doesn't alias any other sets, create a new set.
+  ///   2. If the instruction aliases exactly one set, add it to the set
+  ///   3. If the instruction aliases multiple sets, merge the sets, and add
+  ///      the instruction to the result.
+  ///
+  /// These methods return true if inserting the instruction resulted in the
+  /// addition of a new alias set (i.e., the pointer did not alias anything).
+  ///
+  void add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
+  void add(LoadInst *LI);
+  void add(StoreInst *SI);
+  void add(VAArgInst *VAAI);
+  void add(MemSetInst *MSI);
+  void add(MemTransferInst *MTI);
+  void add(Instruction *I);       // Dispatch to one of the other add methods...
+  void add(BasicBlock &BB);       // Add all instructions in basic block
+  void add(const AliasSetTracker &AST); // Add alias relations from another AST
+  void addUnknown(Instruction *I);
+
+  void clear();
+
+  /// Return the alias sets that are active.
+  const ilist<AliasSet> &getAliasSets() const { return AliasSets; }
+
+  /// Return the alias set that the specified pointer lives in. If the New
+  /// argument is non-null, this method sets the value to true if a new alias
+  /// set is created to contain the pointer (because the pointer didn't alias
+  /// anything).
+  AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
+                                  const AAMDNodes &AAInfo);
+
+  /// Return the alias set containing the location specified if one exists,
+  /// otherwise return null.
+  AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size,
+                                          const AAMDNodes &AAInfo) {
+    return mergeAliasSetsForPointer(P, Size, AAInfo);
+  }
+
+  /// Return true if the specified instruction "may" (or must) alias one of the
+  /// members in any of the sets.
+  bool containsUnknown(const Instruction *I) const;
+
+  /// Return the underlying alias analysis object used by this tracker.
+  AliasAnalysis &getAliasAnalysis() const { return AA; }
+
+  /// This method is used to remove a pointer value from the AliasSetTracker
+  /// entirely. It should be used when an instruction is deleted from the
+  /// program to update the AST. If you don't use this, you would have dangling
+  /// pointers to deleted instructions.
+  void deleteValue(Value *PtrVal);
+
+  /// This method should be used whenever a preexisting value in the program is
+  /// copied or cloned, introducing a new value.  Note that it is ok for clients
+  /// that use this method to introduce the same value multiple times: if the
+  /// tracker already knows about a value, it will ignore the request.
+  void copyValue(Value *From, Value *To);
+
+  using iterator = ilist<AliasSet>::iterator;
+  using const_iterator = ilist<AliasSet>::const_iterator;
+
+  const_iterator begin() const { return AliasSets.begin(); }
+  const_iterator end()   const { return AliasSets.end(); }
+
+  iterator begin() { return AliasSets.begin(); }
+  iterator end()   { return AliasSets.end(); }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+private:
+  friend class AliasSet;
+
+  // The total number of pointers contained in all "may" alias sets.
+  unsigned TotalMayAliasSetSize = 0;
+
+  // A non-null value signifies this AST is saturated. A saturated AST lumps
+  // all pointers into a single "May" set.
+  AliasSet *AliasAnyAS = nullptr;
+
+  void removeAliasSet(AliasSet *AS);
+
+  /// Just like operator[] on the map, except that it creates an entry for the
+  /// pointer if it doesn't already exist.
+  AliasSet::PointerRec &getEntryFor(Value *V) {
+    AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
+    if (!Entry)
+      Entry = new AliasSet::PointerRec(V);
+    return *Entry;
+  }
+
+  AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
+                       AliasSet::AccessLattice E);
+  AliasSet *mergeAliasSetsForPointer(const Value *Ptr, uint64_t Size,
+                                     const AAMDNodes &AAInfo);
+
+  /// Merge all alias sets into a single set that is considered to alias any
+  /// pointer.
+  AliasSet &mergeAllAliasSets();
+
+  AliasSet *findAliasSetForUnknownInst(Instruction *Inst);
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
+  AST.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_ALIASSETTRACKER_H
diff --git a/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h b/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h
new file mode 100644
index 0000000..c965e62
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h
@@ -0,0 +1,232 @@
+//===- llvm/Analysis/AssumptionCache.h - Track @llvm.assume -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that keeps track of @llvm.assume intrinsics in
+// the functions of a module (allowing assumptions within any function to be
+// found cheaply by other parts of the optimizer).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
+#define LLVM_ANALYSIS_ASSUMPTIONCACHE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+
+class CallInst;
+class Function;
+class raw_ostream;
+class Value;
+
+/// \brief A cache of @llvm.assume calls within a function.
+///
+/// This cache provides fast lookup of assumptions within a function by caching
+/// them and amortizing the cost of scanning for them across all queries. Passes
+/// that create new assumptions are required to call registerAssumption() to
+/// register any new @llvm.assume calls that they create. Deletions of
+/// @llvm.assume calls do not require special handling.
+class AssumptionCache {
+  /// \brief The function for which this cache is handling assumptions.
+  ///
+  /// We track this to lazily populate our assumptions.
+  Function &F;
+
+  /// \brief Vector of weak value handles to calls of the @llvm.assume
+  /// intrinsic.
+  SmallVector<WeakTrackingVH, 4> AssumeHandles;
+
+  class AffectedValueCallbackVH final : public CallbackVH {
+    AssumptionCache *AC;
+
+    void deleted() override;
+    void allUsesReplacedWith(Value *) override;
+
+  public:
+    using DMI = DenseMapInfo<Value *>;
+
+    AffectedValueCallbackVH(Value *V, AssumptionCache *AC = nullptr)
+        : CallbackVH(V), AC(AC) {}
+  };
+
+  friend AffectedValueCallbackVH;
+
+  /// \brief A map of values about which an assumption might be providing
+  /// information to the relevant set of assumptions.
+  using AffectedValuesMap =
+      DenseMap<AffectedValueCallbackVH, SmallVector<WeakTrackingVH, 1>,
+               AffectedValueCallbackVH::DMI>;
+  AffectedValuesMap AffectedValues;
+
+  /// Get the vector of assumptions which affect a value from the cache.
+  SmallVector<WeakTrackingVH, 1> &getOrInsertAffectedValues(Value *V);
+
+  /// Copy affected values in the cache for OV to be affected values for NV.
+  void copyAffectedValuesInCache(Value *OV, Value *NV);
+
+  /// \brief Flag tracking whether we have scanned the function yet.
+  ///
+  /// We want to be as lazy about this as possible, and so we scan the function
+  /// at the last moment.
+  bool Scanned = false;
+
+  /// \brief Scan the function for assumptions and add them to the cache.
+  void scanFunction();
+
+public:
+  /// \brief Construct an AssumptionCache from a function by scanning all of
+  /// its instructions.
+  AssumptionCache(Function &F) : F(F) {}
+
+  /// This cache is designed to be self-updating and so it should never be
+  /// invalidated.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  /// \brief Add an @llvm.assume intrinsic to this function's cache.
+  ///
+  /// The call passed in must be an instruction within this function and must
+  /// not already be in the cache.
+  void registerAssumption(CallInst *CI);
+
+  /// \brief Update the cache of values being affected by this assumption (i.e.
+  /// the values about which this assumption provides information).
+  void updateAffectedValues(CallInst *CI);
+
+  /// \brief Clear the cache of @llvm.assume intrinsics for a function.
+  ///
+  /// It will be re-scanned the next time it is requested.
+  void clear() {
+    AssumeHandles.clear();
+    AffectedValues.clear();
+    Scanned = false;
+  }
+
+  /// \brief Access the list of assumption handles currently tracked for this
+  /// function.
+  ///
+  /// Note that these produce weak handles that may be null. The caller must
+  /// handle that case.
+  /// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
+  /// when we can write that to filter out the null values. Then caller code
+  /// will become simpler.
+  MutableArrayRef<WeakTrackingVH> assumptions() {
+    if (!Scanned)
+      scanFunction();
+    return AssumeHandles;
+  }
+
+  /// \brief Access the list of assumptions which affect this value.
+  MutableArrayRef<WeakTrackingVH> assumptionsFor(const Value *V) {
+    if (!Scanned)
+      scanFunction();
+
+    auto AVI = AffectedValues.find_as(const_cast<Value *>(V));
+    if (AVI == AffectedValues.end())
+      return MutableArrayRef<WeakTrackingVH>();
+
+    return AVI->second;
+  }
+};
+
+/// \brief A function analysis which provides an \c AssumptionCache.
+///
+/// This analysis is intended for use with the new pass manager and will vend
+/// assumption caches for a given function.
+class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
+  friend AnalysisInfoMixin<AssumptionAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = AssumptionCache;
+
+  AssumptionCache run(Function &F, FunctionAnalysisManager &) {
+    return AssumptionCache(F);
+  }
+};
+
+/// \brief Printer pass for the \c AssumptionAnalysis results.
+class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief An immutable pass that tracks lazily created \c AssumptionCache
+/// objects.
+///
+/// This is essentially a workaround for the legacy pass manager's weaknesses
+/// which associates each assumption cache with Function and clears it if the
+/// function is deleted. The nature of the AssumptionCache is that it is not
+/// invalidated by any changes to the function body and so this is sufficient
+/// to be conservatively correct.
+class AssumptionCacheTracker : public ImmutablePass {
+  /// A callback value handle applied to function objects, which we use to
+  /// delete our cache of intrinsics for a function when it is deleted.
+  class FunctionCallbackVH final : public CallbackVH {
+    AssumptionCacheTracker *ACT;
+
+    void deleted() override;
+
+  public:
+    using DMI = DenseMapInfo<Value *>;
+
+    FunctionCallbackVH(Value *V, AssumptionCacheTracker *ACT = nullptr)
+        : CallbackVH(V), ACT(ACT) {}
+  };
+
+  friend FunctionCallbackVH;
+
+  using FunctionCallsMap =
+      DenseMap<FunctionCallbackVH, std::unique_ptr<AssumptionCache>,
+               FunctionCallbackVH::DMI>;
+
+  FunctionCallsMap AssumptionCaches;
+
+public:
+  /// \brief Get the cached assumptions for a function.
+  ///
+  /// If no assumptions are cached, this will scan the function. Otherwise, the
+  /// existing cache will be returned.
+  AssumptionCache &getAssumptionCache(Function &F);
+
+  AssumptionCacheTracker();
+  ~AssumptionCacheTracker() override;
+
+  void releaseMemory() override {
+    verifyAnalysis();
+    AssumptionCaches.shrink_and_clear();
+  }
+
+  void verifyAnalysis() const override;
+
+  bool doFinalization(Module &) override {
+    verifyAnalysis();
+    return false;
+  }
+
+  static char ID; // Pass identification, replacement for typeid
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_ASSUMPTIONCACHE_H
diff --git a/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h
new file mode 100644
index 0000000..42e5e97
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -0,0 +1,270 @@
+//===- BasicAliasAnalysis.h - Stateless, local Alias Analysis ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for LLVM's primary stateless and local alias analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BASICALIASANALYSIS_H
+#define LLVM_ANALYSIS_BASICALIASANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+struct AAMDNodes;
+class APInt;
+class AssumptionCache;
+class BasicBlock;
+class DataLayout;
+class DominatorTree;
+class Function;
+class GEPOperator;
+class LoopInfo;
+class PHINode;
+class SelectInst;
+class TargetLibraryInfo;
+class Value;
+
+/// This is the AA result object for the basic, local, and stateless alias
+/// analysis. It implements the AA query interface in an entirely stateless
+/// manner. As one consequence, it is never invalidated due to IR changes.
+/// While it does retain some storage, that is used as an optimization and not
+/// to preserve information from query to query. However it does retain handles
+/// to various other analyses and must be recomputed when those analyses are.
+class BasicAAResult : public AAResultBase<BasicAAResult> {
+  friend AAResultBase<BasicAAResult>;
+
+  const DataLayout &DL;
+  const TargetLibraryInfo &TLI;
+  AssumptionCache &AC;
+  DominatorTree *DT;
+  LoopInfo *LI;
+
+public:
+  BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI,
+                AssumptionCache &AC, DominatorTree *DT = nullptr,
+                LoopInfo *LI = nullptr)
+      : AAResultBase(), DL(DL), TLI(TLI), AC(AC), DT(DT), LI(LI) {}
+
+  BasicAAResult(const BasicAAResult &Arg)
+      : AAResultBase(Arg), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
+        LI(Arg.LI) {}
+  BasicAAResult(BasicAAResult &&Arg)
+      : AAResultBase(std::move(Arg)), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC),
+        DT(Arg.DT), LI(Arg.LI) {}
+
+  /// Handle invalidation events in the new pass manager.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+  /// Chases pointers until we find a (constant global) or not.
+  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+
+  /// Get the location associated with a pointer argument of a callsite.
+  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+
+  /// Returns the behavior when calling the given call site.
+  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+  /// Returns the behavior when calling the given function. For use when the
+  /// call site is not known.
+  FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+private:
+  // A linear transformation of a Value; this class represents ZExt(SExt(V,
+  // SExtBits), ZExtBits) * Scale + Offset.
+  struct VariableGEPIndex {
+    // An opaque Value - we can't decompose this further.
+    const Value *V;
+
+    // We need to track what extensions we've done as we consider the same Value
+    // with different extensions as different variables in a GEP's linear
+    // expression;
+    // e.g.: if V == -1, then sext(x) != zext(x).
+    unsigned ZExtBits;
+    unsigned SExtBits;
+
+    int64_t Scale;
+
+    bool operator==(const VariableGEPIndex &Other) const {
+      return V == Other.V && ZExtBits == Other.ZExtBits &&
+             SExtBits == Other.SExtBits && Scale == Other.Scale;
+    }
+
+    bool operator!=(const VariableGEPIndex &Other) const {
+      return !operator==(Other);
+    }
+  };
+
+  // Represents the internal structure of a GEP, decomposed into a base pointer,
+  // constant offsets, and variable scaled indices.
+  struct DecomposedGEP {
+    // Base pointer of the GEP
+    const Value *Base;
+    // Total constant offset w.r.t the base from indexing into structs
+    int64_t StructOffset;
+    // Total constant offset w.r.t the base from indexing through
+    // pointers/arrays/vectors
+    int64_t OtherOffset;
+    // Scaled variable (non-constant) indices.
+    SmallVector<VariableGEPIndex, 4> VarIndices;
+  };
+
+  /// Track alias queries to guard against recursion.
+  using LocPair = std::pair<MemoryLocation, MemoryLocation>;
+  using AliasCacheTy = SmallDenseMap<LocPair, AliasResult, 8>;
+  AliasCacheTy AliasCache;
+
+  /// Tracks phi nodes we have visited.
+  ///
+  /// When interpret "Value" pointer equality as value equality we need to make
+  /// sure that the "Value" is not part of a cycle. Otherwise, two uses could
+  /// come from different "iterations" of a cycle and see different values for
+  /// the same "Value" pointer.
+  ///
+  /// The following example shows the problem:
+  ///   %p = phi(%alloca1, %addr2)
+  ///   %l = load %ptr
+  ///   %addr1 = gep, %alloca2, 0, %l
+  ///   %addr2 = gep  %alloca2, 0, (%l + 1)
+  ///      alias(%p, %addr1) -> MayAlias !
+  ///   store %l, ...
+  SmallPtrSet<const BasicBlock *, 8> VisitedPhiBBs;
+
+  /// Tracks instructions visited by pointsToConstantMemory.
+  SmallPtrSet<const Value *, 16> Visited;
+
+  static const Value *
+  GetLinearExpression(const Value *V, APInt &Scale, APInt &Offset,
+                      unsigned &ZExtBits, unsigned &SExtBits,
+                      const DataLayout &DL, unsigned Depth, AssumptionCache *AC,
+                      DominatorTree *DT, bool &NSW, bool &NUW);
+
+  static bool DecomposeGEPExpression(const Value *V, DecomposedGEP &Decomposed,
+      const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT);
+
+  static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
+      const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
+      uint64_t ObjectAccessSize);
+
+  /// \brief A Heuristic for aliasGEP that searches for a constant offset
+  /// between the variables.
+  ///
+  /// GetLinearExpression has some limitations, as generally zext(%x + 1)
+  /// != zext(%x) + zext(1) if the arithmetic overflows. GetLinearExpression
+  /// will therefore conservatively refuse to decompose these expressions.
+  /// However, we know that, for all %x, zext(%x) != zext(%x + 1), even if
+  /// the addition overflows.
+  bool
+  constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
+                          uint64_t V1Size, uint64_t V2Size, int64_t BaseOffset,
+                          AssumptionCache *AC, DominatorTree *DT);
+
+  bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
+
+  void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
+                          const SmallVectorImpl<VariableGEPIndex> &Src);
+
+  AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+                       const AAMDNodes &V1AAInfo, const Value *V2,
+                       uint64_t V2Size, const AAMDNodes &V2AAInfo,
+                       const Value *UnderlyingV1, const Value *UnderlyingV2);
+
+  AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
+                       const AAMDNodes &PNAAInfo, const Value *V2,
+                       uint64_t V2Size, const AAMDNodes &V2AAInfo,
+                       const Value *UnderV2);
+
+  AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
+                          const AAMDNodes &SIAAInfo, const Value *V2,
+                          uint64_t V2Size, const AAMDNodes &V2AAInfo,
+                          const Value *UnderV2);
+
+  AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag,
+                         const Value *V2, uint64_t V2Size, AAMDNodes V2AATag,
+                         const Value *O1 = nullptr, const Value *O2 = nullptr);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class BasicAA : public AnalysisInfoMixin<BasicAA> {
+  friend AnalysisInfoMixin<BasicAA>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = BasicAAResult;
+
+  BasicAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the BasicAAResult object.
+class BasicAAWrapperPass : public FunctionPass {
+  std::unique_ptr<BasicAAResult> Result;
+
+  virtual void anchor();
+
+public:
+  static char ID;
+
+  BasicAAWrapperPass();
+
+  BasicAAResult &getResult() { return *Result; }
+  const BasicAAResult &getResult() const { return *Result; }
+
+  bool runOnFunction(Function &F) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+FunctionPass *createBasicAAWrapperPass();
+
+/// A helper for the legacy pass manager to create a \c BasicAAResult object
+/// populated to the best of our ability for a particular function when inside
+/// of a \c ModulePass or a \c CallGraphSCCPass.
+BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F);
+
+/// This class is a functor to be used in legacy module or SCC passes for
+/// computing AA results for a function. We store the results in fields so that
+/// they live long enough to be queried, but we re-use them each time.
+class LegacyAARGetter {
+  Pass &P;
+  Optional<BasicAAResult> BAR;
+  Optional<AAResults> AAR;
+
+public:
+  LegacyAARGetter(Pass &P) : P(P) {}
+  AAResults &operator()(Function &F) {
+    BAR.emplace(createLegacyPMBasicAAResult(P, F));
+    AAR.emplace(createLegacyPMAAResults(P, F, *BAR));
+    return *AAR;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_BASICALIASANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h
new file mode 100644
index 0000000..89370cb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -0,0 +1,156 @@
+//===- BlockFrequencyInfo.h - Block Frequency Analysis ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loops should be simplified before this analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
+#define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BlockFrequency.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class BasicBlock;
+class BranchProbabilityInfo;
+class Function;
+class LoopInfo;
+class Module;
+class raw_ostream;
+template <class BlockT> class BlockFrequencyInfoImpl;
+
+enum PGOViewCountsType { PGOVCT_None, PGOVCT_Graph, PGOVCT_Text };
+
+/// BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to
+/// estimate IR basic block frequencies.
+class BlockFrequencyInfo {
+  using ImplType = BlockFrequencyInfoImpl<BasicBlock>;
+
+  std::unique_ptr<ImplType> BFI;
+
+public:
+  BlockFrequencyInfo();
+  BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
+                     const LoopInfo &LI);
+  BlockFrequencyInfo(const BlockFrequencyInfo &) = delete;
+  BlockFrequencyInfo &operator=(const BlockFrequencyInfo &) = delete;
+  BlockFrequencyInfo(BlockFrequencyInfo &&Arg);
+  BlockFrequencyInfo &operator=(BlockFrequencyInfo &&RHS);
+  ~BlockFrequencyInfo();
+
+  /// Handle invalidation explicitly.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &);
+
+  const Function *getFunction() const;
+  const BranchProbabilityInfo *getBPI() const;
+  void view() const;
+
+  /// getblockFreq - Return block frequency. Return 0 if we don't have the
+  /// information. Please note that initial frequency is equal to ENTRY_FREQ. It
+  /// means that we should not rely on the value itself, but only on the
+  /// comparison to the other block frequencies. We do this to avoid using of
+  /// floating points.
+  BlockFrequency getBlockFreq(const BasicBlock *BB) const;
+
+  /// \brief Returns the estimated profile count of \p BB.
+  /// This computes the relative block frequency of \p BB and multiplies it by
+  /// the enclosing function's count (if available) and returns the value.
+  Optional<uint64_t> getBlockProfileCount(const BasicBlock *BB) const;
+
+  /// \brief Returns the estimated profile count of \p Freq.
+  /// This uses the frequency \p Freq and multiplies it by
+  /// the enclosing function's count (if available) and returns the value.
+  Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
+
+  /// \brief Returns true if \p BB is an irreducible loop header
+  /// block. Otherwise false.
+  bool isIrrLoopHeader(const BasicBlock *BB);
+
+  // Set the frequency of the given basic block.
+  void setBlockFreq(const BasicBlock *BB, uint64_t Freq);
+
+  /// Set the frequency of \p ReferenceBB to \p Freq and scale the frequencies
+  /// of the blocks in \p BlocksToScale such that their frequencies relative
+  /// to \p ReferenceBB remain unchanged.
+  void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq,
+                            SmallPtrSetImpl<BasicBlock *> &BlocksToScale);
+
+  /// calculate - compute block frequency info for the given function.
+  void calculate(const Function &F, const BranchProbabilityInfo &BPI,
+                 const LoopInfo &LI);
+
+  // Print the block frequency Freq to OS using the current functions entry
+  // frequency to convert freq into a relative decimal form.
+  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
+
+  // Convenience method that attempts to look up the frequency associated with
+  // BB and print it to OS.
+  raw_ostream &printBlockFreq(raw_ostream &OS, const BasicBlock *BB) const;
+
+  uint64_t getEntryFreq() const;
+  void releaseMemory();
+  void print(raw_ostream &OS) const;
+};
+
+/// \brief Analysis pass which computes \c BlockFrequencyInfo.
+class BlockFrequencyAnalysis
+    : public AnalysisInfoMixin<BlockFrequencyAnalysis> {
+  friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result type for this analysis pass.
+  using Result = BlockFrequencyInfo;
+
+  /// \brief Run the analysis pass over a function and produce BFI.
+  Result run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for the \c BlockFrequencyInfo results.
+class BlockFrequencyPrinterPass
+    : public PassInfoMixin<BlockFrequencyPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit BlockFrequencyPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
+class BlockFrequencyInfoWrapperPass : public FunctionPass {
+  BlockFrequencyInfo BFI;
+
+public:
+  static char ID;
+
+  BlockFrequencyInfoWrapperPass();
+  ~BlockFrequencyInfoWrapperPass() override;
+
+  BlockFrequencyInfo &getBFI() { return BFI; }
+  const BlockFrequencyInfo &getBFI() const { return BFI; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnFunction(Function &F) override;
+  void releaseMemory() override;
+  void print(raw_ostream &OS, const Module *M) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
diff --git a/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h
new file mode 100644
index 0000000..40c40b8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -0,0 +1,1473 @@
+//==- BlockFrequencyInfoImpl.h - Block Frequency Implementation --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared implementation of BlockFrequency for IR and Machine Instructions.
+// See the documentation below for BlockFrequencyInfoImpl for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+#define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Support/BlockFrequency.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/DOTGraphTraits.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ScaledNumber.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <iterator>
+#include <limits>
+#include <list>
+#include <string>
+#include <utility>
+#include <vector>
+
+#define DEBUG_TYPE "block-freq"
+
+namespace llvm {
+
+class BranchProbabilityInfo;
+class Function;
+class Loop;
+class LoopInfo;
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineLoop;
+class MachineLoopInfo;
+
+namespace bfi_detail {
+
+struct IrreducibleGraph;
+
+// This is part of a workaround for a GCC 4.7 crash on lambdas.
+template <class BT> struct BlockEdgesAdder;
+
+/// \brief Mass of a block.
+///
+/// This class implements a sort of fixed-point fraction always between 0.0 and
+/// 1.0.  getMass() == std::numeric_limits<uint64_t>::max() indicates a value of
+/// 1.0.
+///
+/// Masses can be added and subtracted.  Simple saturation arithmetic is used,
+/// so arithmetic operations never overflow or underflow.
+///
+/// Masses can be multiplied.  Multiplication treats full mass as 1.0 and uses
+/// an inexpensive floating-point algorithm that's off-by-one (almost, but not
+/// quite, maximum precision).
+///
+/// Masses can be scaled by \a BranchProbability at maximum precision.
+class BlockMass {
+  uint64_t Mass = 0;
+
+public:
+  BlockMass() = default;
+  explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
+
+  static BlockMass getEmpty() { return BlockMass(); }
+
+  static BlockMass getFull() {
+    return BlockMass(std::numeric_limits<uint64_t>::max());
+  }
+
+  uint64_t getMass() const { return Mass; }
+
+  bool isFull() const { return Mass == std::numeric_limits<uint64_t>::max(); }
+  bool isEmpty() const { return !Mass; }
+
+  bool operator!() const { return isEmpty(); }
+
+  /// \brief Add another mass.
+  ///
+  /// Adds another mass, saturating at \a isFull() rather than overflowing.
+  BlockMass &operator+=(BlockMass X) {
+    uint64_t Sum = Mass + X.Mass;
+    Mass = Sum < Mass ? std::numeric_limits<uint64_t>::max() : Sum;
+    return *this;
+  }
+
+  /// \brief Subtract another mass.
+  ///
+  /// Subtracts another mass, saturating at \a isEmpty() rather than
+  /// undeflowing.
+  BlockMass &operator-=(BlockMass X) {
+    uint64_t Diff = Mass - X.Mass;
+    Mass = Diff > Mass ? 0 : Diff;
+    return *this;
+  }
+
+  BlockMass &operator*=(BranchProbability P) {
+    Mass = P.scale(Mass);
+    return *this;
+  }
+
+  bool operator==(BlockMass X) const { return Mass == X.Mass; }
+  bool operator!=(BlockMass X) const { return Mass != X.Mass; }
+  bool operator<=(BlockMass X) const { return Mass <= X.Mass; }
+  bool operator>=(BlockMass X) const { return Mass >= X.Mass; }
+  bool operator<(BlockMass X) const { return Mass < X.Mass; }
+  bool operator>(BlockMass X) const { return Mass > X.Mass; }
+
+  /// \brief Convert to scaled number.
+  ///
+  /// Convert to \a ScaledNumber.  \a isFull() gives 1.0, while \a isEmpty()
+  /// gives slightly above 0.0.
+  ScaledNumber<uint64_t> toScaled() const;
+
+  void dump() const;
+  raw_ostream &print(raw_ostream &OS) const;
+};
+
+inline BlockMass operator+(BlockMass L, BlockMass R) {
+  return BlockMass(L) += R;
+}
+inline BlockMass operator-(BlockMass L, BlockMass R) {
+  return BlockMass(L) -= R;
+}
+inline BlockMass operator*(BlockMass L, BranchProbability R) {
+  return BlockMass(L) *= R;
+}
+inline BlockMass operator*(BranchProbability L, BlockMass R) {
+  return BlockMass(R) *= L;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, BlockMass X) {
+  return X.print(OS);
+}
+
+} // end namespace bfi_detail
+
+template <> struct isPodLike<bfi_detail::BlockMass> {
+  static const bool value = true;
+};
+
+/// \brief Base class for BlockFrequencyInfoImpl
+///
+/// BlockFrequencyInfoImplBase has supporting data structures and some
+/// algorithms for BlockFrequencyInfoImplBase.  Only algorithms that depend on
+/// the block type (or that call such algorithms) are skipped here.
+///
+/// Nevertheless, the majority of the overall algorithm documention lives with
+/// BlockFrequencyInfoImpl.  See there for details.
+class BlockFrequencyInfoImplBase {
+public:
+  using Scaled64 = ScaledNumber<uint64_t>;
+  using BlockMass = bfi_detail::BlockMass;
+
+  /// \brief Representative of a block.
+  ///
+  /// This is a simple wrapper around an index into the reverse-post-order
+  /// traversal of the blocks.
+  ///
+  /// Unlike a block pointer, its order has meaning (location in the
+  /// topological sort) and it's class is the same regardless of block type.
+  struct BlockNode {
+    using IndexType = uint32_t;
+
+    IndexType Index = std::numeric_limits<uint32_t>::max();
+
+    BlockNode() = default;
+    BlockNode(IndexType Index) : Index(Index) {}
+
+    bool operator==(const BlockNode &X) const { return Index == X.Index; }
+    bool operator!=(const BlockNode &X) const { return Index != X.Index; }
+    bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
+    bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
+    bool operator<(const BlockNode &X) const { return Index < X.Index; }
+    bool operator>(const BlockNode &X) const { return Index > X.Index; }
+
+    bool isValid() const { return Index <= getMaxIndex(); }
+
+    static size_t getMaxIndex() {
+       return std::numeric_limits<uint32_t>::max() - 1;
+    }
+  };
+
+  /// \brief Stats about a block itself.
+  struct FrequencyData {
+    Scaled64 Scaled;
+    uint64_t Integer;
+  };
+
+  /// \brief Data about a loop.
+  ///
+  /// Contains the data necessary to represent a loop as a pseudo-node once it's
+  /// packaged.
+  struct LoopData {
+    using ExitMap = SmallVector<std::pair<BlockNode, BlockMass>, 4>;
+    using NodeList = SmallVector<BlockNode, 4>;
+    using HeaderMassList = SmallVector<BlockMass, 1>;
+
+    LoopData *Parent;            ///< The parent loop.
+    bool IsPackaged = false;     ///< Whether this has been packaged.
+    uint32_t NumHeaders = 1;     ///< Number of headers.
+    ExitMap Exits;               ///< Successor edges (and weights).
+    NodeList Nodes;              ///< Header and the members of the loop.
+    HeaderMassList BackedgeMass; ///< Mass returned to each loop header.
+    BlockMass Mass;
+    Scaled64 Scale;
+
+    LoopData(LoopData *Parent, const BlockNode &Header)
+      : Parent(Parent), Nodes(1, Header), BackedgeMass(1) {}
+
+    template <class It1, class It2>
+    LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
+             It2 LastOther)
+        : Parent(Parent), Nodes(FirstHeader, LastHeader) {
+      NumHeaders = Nodes.size();
+      Nodes.insert(Nodes.end(), FirstOther, LastOther);
+      BackedgeMass.resize(NumHeaders);
+    }
+
+    bool isHeader(const BlockNode &Node) const {
+      if (isIrreducible())
+        return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
+                                  Node);
+      return Node == Nodes[0];
+    }
+
+    BlockNode getHeader() const { return Nodes[0]; }
+    bool isIrreducible() const { return NumHeaders > 1; }
+
+    HeaderMassList::difference_type getHeaderIndex(const BlockNode &B) {
+      assert(isHeader(B) && "this is only valid on loop header blocks");
+      if (isIrreducible())
+        return std::lower_bound(Nodes.begin(), Nodes.begin() + NumHeaders, B) -
+               Nodes.begin();
+      return 0;
+    }
+
+    NodeList::const_iterator members_begin() const {
+      return Nodes.begin() + NumHeaders;
+    }
+
+    NodeList::const_iterator members_end() const { return Nodes.end(); }
+    iterator_range<NodeList::const_iterator> members() const {
+      return make_range(members_begin(), members_end());
+    }
+  };
+
+  /// \brief Index of loop information.
+  struct WorkingData {
+    BlockNode Node;           ///< This node.
+    LoopData *Loop = nullptr; ///< The loop this block is inside.
+    BlockMass Mass;           ///< Mass distribution from the entry block.
+
+    WorkingData(const BlockNode &Node) : Node(Node) {}
+
+    bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
+
+    bool isDoubleLoopHeader() const {
+      return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
+             Loop->Parent->isHeader(Node);
+    }
+
+    LoopData *getContainingLoop() const {
+      if (!isLoopHeader())
+        return Loop;
+      if (!isDoubleLoopHeader())
+        return Loop->Parent;
+      return Loop->Parent->Parent;
+    }
+
+    /// \brief Resolve a node to its representative.
+    ///
+    /// Get the node currently representing Node, which could be a containing
+    /// loop.
+    ///
+    /// This function should only be called when distributing mass.  As long as
+    /// there are no irreducible edges to Node, then it will have complexity
+    /// O(1) in this context.
+    ///
+    /// In general, the complexity is O(L), where L is the number of loop
+    /// headers Node has been packaged into.  Since this method is called in
+    /// the context of distributing mass, L will be the number of loop headers
+    /// an early exit edge jumps out of.
+    BlockNode getResolvedNode() const {
+      auto L = getPackagedLoop();
+      return L ? L->getHeader() : Node;
+    }
+
+    LoopData *getPackagedLoop() const {
+      if (!Loop || !Loop->IsPackaged)
+        return nullptr;
+      auto L = Loop;
+      while (L->Parent && L->Parent->IsPackaged)
+        L = L->Parent;
+      return L;
+    }
+
+    /// \brief Get the appropriate mass for a node.
+    ///
+    /// Get appropriate mass for Node.  If Node is a loop-header (whose loop
+    /// has been packaged), returns the mass of its pseudo-node.  If it's a
+    /// node inside a packaged loop, it returns the loop's mass.
+    BlockMass &getMass() {
+      if (!isAPackage())
+        return Mass;
+      if (!isADoublePackage())
+        return Loop->Mass;
+      return Loop->Parent->Mass;
+    }
+
+    /// \brief Has ContainingLoop been packaged up?
+    bool isPackaged() const { return getResolvedNode() != Node; }
+
+    /// \brief Has Loop been packaged up?
+    bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
+
+    /// \brief Has Loop been packaged up twice?
+    bool isADoublePackage() const {
+      return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
+    }
+  };
+
+  /// \brief Unscaled probability weight.
+  ///
+  /// Probability weight for an edge in the graph (including the
+  /// successor/target node).
+  ///
+  /// All edges in the original function are 32-bit.  However, exit edges from
+  /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
+  /// space in general.
+  ///
+  /// In addition to the raw weight amount, Weight stores the type of the edge
+  /// in the current context (i.e., the context of the loop being processed).
+  /// Is this a local edge within the loop, an exit from the loop, or a
+  /// backedge to the loop header?
+  struct Weight {
+    enum DistType { Local, Exit, Backedge };
+    DistType Type = Local;
+    BlockNode TargetNode;
+    uint64_t Amount = 0;
+
+    Weight() = default;
+    Weight(DistType Type, BlockNode TargetNode, uint64_t Amount)
+        : Type(Type), TargetNode(TargetNode), Amount(Amount) {}
+  };
+
+  /// \brief Distribution of unscaled probability weight.
+  ///
+  /// Distribution of unscaled probability weight to a set of successors.
+  ///
+  /// This class collates the successor edge weights for later processing.
+  ///
+  /// \a DidOverflow indicates whether \a Total did overflow while adding to
+  /// the distribution.  It should never overflow twice.
+  struct Distribution {
+    using WeightList = SmallVector<Weight, 4>;
+
+    WeightList Weights;       ///< Individual successor weights.
+    uint64_t Total = 0;       ///< Sum of all weights.
+    bool DidOverflow = false; ///< Whether \a Total did overflow.
+
+    Distribution() = default;
+
+    void addLocal(const BlockNode &Node, uint64_t Amount) {
+      add(Node, Amount, Weight::Local);
+    }
+
+    void addExit(const BlockNode &Node, uint64_t Amount) {
+      add(Node, Amount, Weight::Exit);
+    }
+
+    void addBackedge(const BlockNode &Node, uint64_t Amount) {
+      add(Node, Amount, Weight::Backedge);
+    }
+
+    /// \brief Normalize the distribution.
+    ///
+    /// Combines multiple edges to the same \a Weight::TargetNode and scales
+    /// down so that \a Total fits into 32-bits.
+    ///
+    /// This is linear in the size of \a Weights.  For the vast majority of
+    /// cases, adjacent edge weights are combined by sorting WeightList and
+    /// combining adjacent weights.  However, for very large edge lists an
+    /// auxiliary hash table is used.
+    void normalize();
+
+  private:
+    void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
+  };
+
+  /// \brief Data about each block.  This is used downstream.
+  std::vector<FrequencyData> Freqs;
+
+  /// \brief Whether each block is an irreducible loop header.
+  /// This is used downstream.
+  SparseBitVector<> IsIrrLoopHeader;
+
+  /// \brief Loop data: see initializeLoops().
+  std::vector<WorkingData> Working;
+
+  /// \brief Indexed information about loops.
+  std::list<LoopData> Loops;
+
+  /// \brief Virtual destructor.
+  ///
+  /// Need a virtual destructor to mask the compiler warning about
+  /// getBlockName().
+  virtual ~BlockFrequencyInfoImplBase() = default;
+
+  /// \brief Add all edges out of a packaged loop to the distribution.
+  ///
+  /// Adds all edges from LocalLoopHead to Dist.  Calls addToDist() to add each
+  /// successor edge.
+  ///
+  /// \return \c true unless there's an irreducible backedge.
+  bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
+                               Distribution &Dist);
+
+  /// \brief Add an edge to the distribution.
+  ///
+  /// Adds an edge to Succ to Dist.  If \c LoopHead.isValid(), then whether the
+  /// edge is local/exit/backedge is in the context of LoopHead.  Otherwise,
+  /// every edge should be a local edge (since all the loops are packaged up).
+  ///
+  /// \return \c true unless aborted due to an irreducible backedge.
+  bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
+                 const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
+
+  LoopData &getLoopPackage(const BlockNode &Head) {
+    assert(Head.Index < Working.size());
+    assert(Working[Head.Index].isLoopHeader());
+    return *Working[Head.Index].Loop;
+  }
+
+  /// \brief Analyze irreducible SCCs.
+  ///
+  /// Separate irreducible SCCs from \c G, which is an explict graph of \c
+  /// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
+  /// Insert them into \a Loops before \c Insert.
+  ///
+  /// \return the \c LoopData nodes representing the irreducible SCCs.
+  iterator_range<std::list<LoopData>::iterator>
+  analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
+                     std::list<LoopData>::iterator Insert);
+
+  /// \brief Update a loop after packaging irreducible SCCs inside of it.
+  ///
+  /// Update \c OuterLoop.  Before finding irreducible control flow, it was
+  /// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
+  /// LoopData::BackedgeMass need to be reset.  Also, nodes that were packaged
+  /// up need to be removed from \a OuterLoop::Nodes.
+  void updateLoopWithIrreducible(LoopData &OuterLoop);
+
+  /// \brief Distribute mass according to a distribution.
+  ///
+  /// Distributes the mass in Source according to Dist.  If LoopHead.isValid(),
+  /// backedges and exits are stored in its entry in Loops.
+  ///
+  /// Mass is distributed in parallel from two copies of the source mass.
+  void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
+                      Distribution &Dist);
+
+  /// \brief Compute the loop scale for a loop.
+  void computeLoopScale(LoopData &Loop);
+
+  /// Adjust the mass of all headers in an irreducible loop.
+  ///
+  /// Initially, irreducible loops are assumed to distribute their mass
+  /// equally among its headers. This can lead to wrong frequency estimates
+  /// since some headers may be executed more frequently than others.
+  ///
+  /// This adjusts header mass distribution so it matches the weights of
+  /// the backedges going into each of the loop headers.
+  void adjustLoopHeaderMass(LoopData &Loop);
+
+  void distributeIrrLoopHeaderMass(Distribution &Dist);
+
+  /// \brief Package up a loop.
+  void packageLoop(LoopData &Loop);
+
+  /// \brief Unwrap loops.
+  void unwrapLoops();
+
+  /// \brief Finalize frequency metrics.
+  ///
+  /// Calculates final frequencies and cleans up no-longer-needed data
+  /// structures.
+  void finalizeMetrics();
+
+  /// \brief Clear all memory.
+  void clear();
+
+  virtual std::string getBlockName(const BlockNode &Node) const;
+  std::string getLoopName(const LoopData &Loop) const;
+
+  virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
+  void dump() const { print(dbgs()); }
+
+  Scaled64 getFloatingBlockFreq(const BlockNode &Node) const;
+
+  BlockFrequency getBlockFreq(const BlockNode &Node) const;
+  Optional<uint64_t> getBlockProfileCount(const Function &F,
+                                          const BlockNode &Node) const;
+  Optional<uint64_t> getProfileCountFromFreq(const Function &F,
+                                             uint64_t Freq) const;
+  bool isIrrLoopHeader(const BlockNode &Node);
+
+  void setBlockFreq(const BlockNode &Node, uint64_t Freq);
+
+  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
+  raw_ostream &printBlockFreq(raw_ostream &OS,
+                              const BlockFrequency &Freq) const;
+
+  uint64_t getEntryFreq() const {
+    assert(!Freqs.empty());
+    return Freqs[0].Integer;
+  }
+};
+
+namespace bfi_detail {
+
+template <class BlockT> struct TypeMap {};
+template <> struct TypeMap<BasicBlock> {
+  using BlockT = BasicBlock;
+  using FunctionT = Function;
+  using BranchProbabilityInfoT = BranchProbabilityInfo;
+  using LoopT = Loop;
+  using LoopInfoT = LoopInfo;
+};
+template <> struct TypeMap<MachineBasicBlock> {
+  using BlockT = MachineBasicBlock;
+  using FunctionT = MachineFunction;
+  using BranchProbabilityInfoT = MachineBranchProbabilityInfo;
+  using LoopT = MachineLoop;
+  using LoopInfoT = MachineLoopInfo;
+};
+
+/// \brief Get the name of a MachineBasicBlock.
+///
+/// Get the name of a MachineBasicBlock.  It's templated so that including from
+/// CodeGen is unnecessary (that would be a layering issue).
+///
+/// This is used mainly for debug output.  The name is similar to
+/// MachineBasicBlock::getFullName(), but skips the name of the function.
+template <class BlockT> std::string getBlockName(const BlockT *BB) {
+  assert(BB && "Unexpected nullptr");
+  auto MachineName = "BB" + Twine(BB->getNumber());
+  if (BB->getBasicBlock())
+    return (MachineName + "[" + BB->getName() + "]").str();
+  return MachineName.str();
+}
+/// \brief Get the name of a BasicBlock.
+template <> inline std::string getBlockName(const BasicBlock *BB) {
+  assert(BB && "Unexpected nullptr");
+  return BB->getName().str();
+}
+
+/// \brief Graph of irreducible control flow.
+///
+/// This graph is used for determining the SCCs in a loop (or top-level
+/// function) that has irreducible control flow.
+///
+/// During the block frequency algorithm, the local graphs are defined in a
+/// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
+/// graphs for most edges, but getting others from \a LoopData::ExitMap.  The
+/// latter only has successor information.
+///
+/// \a IrreducibleGraph makes this graph explicit.  It's in a form that can use
+/// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
+/// and it explicitly lists predecessors and successors.  The initialization
+/// that relies on \c MachineBasicBlock is defined in the header.
+struct IrreducibleGraph {
+  using BFIBase = BlockFrequencyInfoImplBase;
+
+  BFIBase &BFI;
+
+  using BlockNode = BFIBase::BlockNode;
+  struct IrrNode {
+    BlockNode Node;
+    unsigned NumIn = 0;
+    std::deque<const IrrNode *> Edges;
+
+    IrrNode(const BlockNode &Node) : Node(Node) {}
+
+    using iterator = std::deque<const IrrNode *>::const_iterator;
+
+    iterator pred_begin() const { return Edges.begin(); }
+    iterator succ_begin() const { return Edges.begin() + NumIn; }
+    iterator pred_end() const { return succ_begin(); }
+    iterator succ_end() const { return Edges.end(); }
+  };
+  BlockNode Start;
+  const IrrNode *StartIrr = nullptr;
+  std::vector<IrrNode> Nodes;
+  SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
+
+  /// \brief Construct an explicit graph containing irreducible control flow.
+  ///
+  /// Construct an explicit graph of the control flow in \c OuterLoop (or the
+  /// top-level function, if \c OuterLoop is \c nullptr).  Uses \c
+  /// addBlockEdges to add block successors that have not been packaged into
+  /// loops.
+  ///
+  /// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
+  /// user of this.
+  template <class BlockEdgesAdder>
+  IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
+                   BlockEdgesAdder addBlockEdges) : BFI(BFI) {
+    initialize(OuterLoop, addBlockEdges);
+  }
+
+  template <class BlockEdgesAdder>
+  void initialize(const BFIBase::LoopData *OuterLoop,
+                  BlockEdgesAdder addBlockEdges);
+  void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
+  void addNodesInFunction();
+
+  void addNode(const BlockNode &Node) {
+    Nodes.emplace_back(Node);
+    BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
+  }
+
+  void indexNodes();
+  template <class BlockEdgesAdder>
+  void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
+                BlockEdgesAdder addBlockEdges);
+  void addEdge(IrrNode &Irr, const BlockNode &Succ,
+               const BFIBase::LoopData *OuterLoop);
+};
+
+template <class BlockEdgesAdder>
+void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
+                                  BlockEdgesAdder addBlockEdges) {
+  if (OuterLoop) {
+    addNodesInLoop(*OuterLoop);
+    for (auto N : OuterLoop->Nodes)
+      addEdges(N, OuterLoop, addBlockEdges);
+  } else {
+    addNodesInFunction();
+    for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
+      addEdges(Index, OuterLoop, addBlockEdges);
+  }
+  StartIrr = Lookup[Start.Index];
+}
+
+template <class BlockEdgesAdder>
+void IrreducibleGraph::addEdges(const BlockNode &Node,
+                                const BFIBase::LoopData *OuterLoop,
+                                BlockEdgesAdder addBlockEdges) {
+  auto L = Lookup.find(Node.Index);
+  if (L == Lookup.end())
+    return;
+  IrrNode &Irr = *L->second;
+  const auto &Working = BFI.Working[Node.Index];
+
+  if (Working.isAPackage())
+    for (const auto &I : Working.Loop->Exits)
+      addEdge(Irr, I.first, OuterLoop);
+  else
+    addBlockEdges(*this, Irr, OuterLoop);
+}
+
+} // end namespace bfi_detail
+
+/// \brief Shared implementation for block frequency analysis.
+///
+/// This is a shared implementation of BlockFrequencyInfo and
+/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
+/// blocks.
+///
+/// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
+/// which is called the header.  A given loop, L, can have sub-loops, which are
+/// loops within the subgraph of L that exclude its header.  (A "trivial" SCC
+/// consists of a single block that does not have a self-edge.)
+///
+/// In addition to loops, this algorithm has limited support for irreducible
+/// SCCs, which are SCCs with multiple entry blocks.  Irreducible SCCs are
+/// discovered on they fly, and modelled as loops with multiple headers.
+///
+/// The headers of irreducible sub-SCCs consist of its entry blocks and all
+/// nodes that are targets of a backedge within it (excluding backedges within
+/// true sub-loops).  Block frequency calculations act as if a block is
+/// inserted that intercepts all the edges to the headers.  All backedges and
+/// entries point to this block.  Its successors are the headers, which split
+/// the frequency evenly.
+///
+/// This algorithm leverages BlockMass and ScaledNumber to maintain precision,
+/// separates mass distribution from loop scaling, and dithers to eliminate
+/// probability mass loss.
+///
+/// The implementation is split between BlockFrequencyInfoImpl, which knows the
+/// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
+/// BlockFrequencyInfoImplBase, which doesn't.  The base class uses \a
+/// BlockNode, a wrapper around a uint32_t.  BlockNode is numbered from 0 in
+/// reverse-post order.  This gives two advantages:  it's easy to compare the
+/// relative ordering of two nodes, and maps keyed on BlockT can be represented
+/// by vectors.
+///
+/// This algorithm is O(V+E), unless there is irreducible control flow, in
+/// which case it's O(V*E) in the worst case.
+///
+/// These are the main stages:
+///
+///  0. Reverse post-order traversal (\a initializeRPOT()).
+///
+///     Run a single post-order traversal and save it (in reverse) in RPOT.
+///     All other stages make use of this ordering.  Save a lookup from BlockT
+///     to BlockNode (the index into RPOT) in Nodes.
+///
+///  1. Loop initialization (\a initializeLoops()).
+///
+///     Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
+///     the algorithm.  In particular, store the immediate members of each loop
+///     in reverse post-order.
+///
+///  2. Calculate mass and scale in loops (\a computeMassInLoops()).
+///
+///     For each loop (bottom-up), distribute mass through the DAG resulting
+///     from ignoring backedges and treating sub-loops as a single pseudo-node.
+///     Track the backedge mass distributed to the loop header, and use it to
+///     calculate the loop scale (number of loop iterations).  Immediate
+///     members that represent sub-loops will already have been visited and
+///     packaged into a pseudo-node.
+///
+///     Distributing mass in a loop is a reverse-post-order traversal through
+///     the loop.  Start by assigning full mass to the Loop header.  For each
+///     node in the loop:
+///
+///         - Fetch and categorize the weight distribution for its successors.
+///           If this is a packaged-subloop, the weight distribution is stored
+///           in \a LoopData::Exits.  Otherwise, fetch it from
+///           BranchProbabilityInfo.
+///
+///         - Each successor is categorized as \a Weight::Local, a local edge
+///           within the current loop, \a Weight::Backedge, a backedge to the
+///           loop header, or \a Weight::Exit, any successor outside the loop.
+///           The weight, the successor, and its category are stored in \a
+///           Distribution.  There can be multiple edges to each successor.
+///
+///         - If there's a backedge to a non-header, there's an irreducible SCC.
+///           The usual flow is temporarily aborted.  \a
+///           computeIrreducibleMass() finds the irreducible SCCs within the
+///           loop, packages them up, and restarts the flow.
+///
+///         - Normalize the distribution:  scale weights down so that their sum
+///           is 32-bits, and coalesce multiple edges to the same node.
+///
+///         - Distribute the mass accordingly, dithering to minimize mass loss,
+///           as described in \a distributeMass().
+///
+///     In the case of irreducible loops, instead of a single loop header,
+///     there will be several. The computation of backedge masses is similar
+///     but instead of having a single backedge mass, there will be one
+///     backedge per loop header. In these cases, each backedge will carry
+///     a mass proportional to the edge weights along the corresponding
+///     path.
+///
+///     At the end of propagation, the full mass assigned to the loop will be
+///     distributed among the loop headers proportionally according to the
+///     mass flowing through their backedges.
+///
+///     Finally, calculate the loop scale from the accumulated backedge mass.
+///
+///  3. Distribute mass in the function (\a computeMassInFunction()).
+///
+///     Finally, distribute mass through the DAG resulting from packaging all
+///     loops in the function.  This uses the same algorithm as distributing
+///     mass in a loop, except that there are no exit or backedge edges.
+///
+///  4. Unpackage loops (\a unwrapLoops()).
+///
+///     Initialize each block's frequency to a floating point representation of
+///     its mass.
+///
+///     Visit loops top-down, scaling the frequencies of its immediate members
+///     by the loop's pseudo-node's frequency.
+///
+///  5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
+///
+///     Using the min and max frequencies as a guide, translate floating point
+///     frequencies to an appropriate range in uint64_t.
+///
+/// It has some known flaws.
+///
+///   - The model of irreducible control flow is a rough approximation.
+///
+///     Modelling irreducible control flow exactly involves setting up and
+///     solving a group of infinite geometric series.  Such precision is
+///     unlikely to be worthwhile, since most of our algorithms give up on
+///     irreducible control flow anyway.
+///
+///     Nevertheless, we might find that we need to get closer.  Here's a sort
+///     of TODO list for the model with diminishing returns, to be completed as
+///     necessary.
+///
+///       - The headers for the \a LoopData representing an irreducible SCC
+///         include non-entry blocks.  When these extra blocks exist, they
+///         indicate a self-contained irreducible sub-SCC.  We could treat them
+///         as sub-loops, rather than arbitrarily shoving the problematic
+///         blocks into the headers of the main irreducible SCC.
+///
+///       - Entry frequencies are assumed to be evenly split between the
+///         headers of a given irreducible SCC, which is the only option if we
+///         need to compute mass in the SCC before its parent loop.  Instead,
+///         we could partially compute mass in the parent loop, and stop when
+///         we get to the SCC.  Here, we have the correct ratio of entry
+///         masses, which we can use to adjust their relative frequencies.
+///         Compute mass in the SCC, and then continue propagation in the
+///         parent.
+///
+///       - We can propagate mass iteratively through the SCC, for some fixed
+///         number of iterations.  Each iteration starts by assigning the entry
+///         blocks their backedge mass from the prior iteration.  The final
+///         mass for each block (and each exit, and the total backedge mass
+///         used for computing loop scale) is the sum of all iterations.
+///         (Running this until fixed point would "solve" the geometric
+///         series by simulation.)
+template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
+  // This is part of a workaround for a GCC 4.7 crash on lambdas.
+  friend struct bfi_detail::BlockEdgesAdder<BT>;
+
+  using BlockT = typename bfi_detail::TypeMap<BT>::BlockT;
+  using FunctionT = typename bfi_detail::TypeMap<BT>::FunctionT;
+  using BranchProbabilityInfoT =
+      typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT;
+  using LoopT = typename bfi_detail::TypeMap<BT>::LoopT;
+  using LoopInfoT = typename bfi_detail::TypeMap<BT>::LoopInfoT;
+  using Successor = GraphTraits<const BlockT *>;
+  using Predecessor = GraphTraits<Inverse<const BlockT *>>;
+
+  const BranchProbabilityInfoT *BPI = nullptr;
+  const LoopInfoT *LI = nullptr;
+  const FunctionT *F = nullptr;
+
+  // All blocks in reverse postorder.
+  std::vector<const BlockT *> RPOT;
+  DenseMap<const BlockT *, BlockNode> Nodes;
+
+  using rpot_iterator = typename std::vector<const BlockT *>::const_iterator;
+
+  rpot_iterator rpot_begin() const { return RPOT.begin(); }
+  rpot_iterator rpot_end() const { return RPOT.end(); }
+
+  size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
+
+  BlockNode getNode(const rpot_iterator &I) const {
+    return BlockNode(getIndex(I));
+  }
+  BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
+
+  const BlockT *getBlock(const BlockNode &Node) const {
+    assert(Node.Index < RPOT.size());
+    return RPOT[Node.Index];
+  }
+
+  /// \brief Run (and save) a post-order traversal.
+  ///
+  /// Saves a reverse post-order traversal of all the nodes in \a F.
+  void initializeRPOT();
+
+  /// \brief Initialize loop data.
+  ///
+  /// Build up \a Loops using \a LoopInfo.  \a LoopInfo gives us a mapping from
+  /// each block to the deepest loop it's in, but we need the inverse.  For each
+  /// loop, we store in reverse post-order its "immediate" members, defined as
+  /// the header, the headers of immediate sub-loops, and all other blocks in
+  /// the loop that are not in sub-loops.
+  void initializeLoops();
+
+  /// \brief Propagate to a block's successors.
+  ///
+  /// In the context of distributing mass through \c OuterLoop, divide the mass
+  /// currently assigned to \c Node between its successors.
+  ///
+  /// \return \c true unless there's an irreducible backedge.
+  bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
+
+  /// \brief Compute mass in a particular loop.
+  ///
+  /// Assign mass to \c Loop's header, and then for each block in \c Loop in
+  /// reverse post-order, distribute mass to its successors.  Only visits nodes
+  /// that have not been packaged into sub-loops.
+  ///
+  /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
+  /// \return \c true unless there's an irreducible backedge.
+  bool computeMassInLoop(LoopData &Loop);
+
+  /// \brief Try to compute mass in the top-level function.
+  ///
+  /// Assign mass to the entry block, and then for each block in reverse
+  /// post-order, distribute mass to its successors.  Skips nodes that have
+  /// been packaged into loops.
+  ///
+  /// \pre \a computeMassInLoops() has been called.
+  /// \return \c true unless there's an irreducible backedge.
+  bool tryToComputeMassInFunction();
+
+  /// \brief Compute mass in (and package up) irreducible SCCs.
+  ///
+  /// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
+  /// of \c Insert), and call \a computeMassInLoop() on each of them.
+  ///
+  /// If \c OuterLoop is \c nullptr, it refers to the top-level function.
+  ///
+  /// \pre \a computeMassInLoop() has been called for each subloop of \c
+  /// OuterLoop.
+  /// \pre \c Insert points at the last loop successfully processed by \a
+  /// computeMassInLoop().
+  /// \pre \c OuterLoop has irreducible SCCs.
+  void computeIrreducibleMass(LoopData *OuterLoop,
+                              std::list<LoopData>::iterator Insert);
+
+  /// \brief Compute mass in all loops.
+  ///
+  /// For each loop bottom-up, call \a computeMassInLoop().
+  ///
+  /// \a computeMassInLoop() aborts (and returns \c false) on loops that
+  /// contain a irreducible sub-SCCs.  Use \a computeIrreducibleMass() and then
+  /// re-enter \a computeMassInLoop().
+  ///
+  /// \post \a computeMassInLoop() has returned \c true for every loop.
+  void computeMassInLoops();
+
+  /// \brief Compute mass in the top-level function.
+  ///
+  /// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
+  /// compute mass in the top-level function.
+  ///
+  /// \post \a tryToComputeMassInFunction() has returned \c true.
+  void computeMassInFunction();
+
+  std::string getBlockName(const BlockNode &Node) const override {
+    return bfi_detail::getBlockName(getBlock(Node));
+  }
+
+public:
+  BlockFrequencyInfoImpl() = default;
+
+  const FunctionT *getFunction() const { return F; }
+
+  void calculate(const FunctionT &F, const BranchProbabilityInfoT &BPI,
+                 const LoopInfoT &LI);
+
+  using BlockFrequencyInfoImplBase::getEntryFreq;
+
+  BlockFrequency getBlockFreq(const BlockT *BB) const {
+    return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
+  }
+
+  Optional<uint64_t> getBlockProfileCount(const Function &F,
+                                          const BlockT *BB) const {
+    return BlockFrequencyInfoImplBase::getBlockProfileCount(F, getNode(BB));
+  }
+
+  Optional<uint64_t> getProfileCountFromFreq(const Function &F,
+                                             uint64_t Freq) const {
+    return BlockFrequencyInfoImplBase::getProfileCountFromFreq(F, Freq);
+  }
+
+  bool isIrrLoopHeader(const BlockT *BB) {
+    return BlockFrequencyInfoImplBase::isIrrLoopHeader(getNode(BB));
+  }
+
+  void setBlockFreq(const BlockT *BB, uint64_t Freq);
+
+  Scaled64 getFloatingBlockFreq(const BlockT *BB) const {
+    return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
+  }
+
+  const BranchProbabilityInfoT &getBPI() const { return *BPI; }
+
+  /// \brief Print the frequencies for the current function.
+  ///
+  /// Prints the frequencies for the blocks in the current function.
+  ///
+  /// Blocks are printed in the natural iteration order of the function, rather
+  /// than reverse post-order.  This provides two advantages:  writing -analyze
+  /// tests is easier (since blocks come out in source order), and even
+  /// unreachable blocks are printed.
+  ///
+  /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
+  /// we need to override it here.
+  raw_ostream &print(raw_ostream &OS) const override;
+
+  using BlockFrequencyInfoImplBase::dump;
+  using BlockFrequencyInfoImplBase::printBlockFreq;
+
+  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
+    return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
+  }
+};
+
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::calculate(const FunctionT &F,
+                                           const BranchProbabilityInfoT &BPI,
+                                           const LoopInfoT &LI) {
+  // Save the parameters.
+  this->BPI = &BPI;
+  this->LI = &LI;
+  this->F = &F;
+
+  // Clean up left-over data structures.
+  BlockFrequencyInfoImplBase::clear();
+  RPOT.clear();
+  Nodes.clear();
+
+  // Initialize.
+  DEBUG(dbgs() << "\nblock-frequency: " << F.getName() << "\n================="
+               << std::string(F.getName().size(), '=') << "\n");
+  initializeRPOT();
+  initializeLoops();
+
+  // Visit loops in post-order to find the local mass distribution, and then do
+  // the full function.
+  computeMassInLoops();
+  computeMassInFunction();
+  unwrapLoops();
+  finalizeMetrics();
+}
+
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::setBlockFreq(const BlockT *BB, uint64_t Freq) {
+  if (Nodes.count(BB))
+    BlockFrequencyInfoImplBase::setBlockFreq(getNode(BB), Freq);
+  else {
+    // If BB is a newly added block after BFI is done, we need to create a new
+    // BlockNode for it assigned with a new index. The index can be determined
+    // by the size of Freqs.
+    BlockNode NewNode(Freqs.size());
+    Nodes[BB] = NewNode;
+    Freqs.emplace_back();
+    BlockFrequencyInfoImplBase::setBlockFreq(NewNode, Freq);
+  }
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
+  const BlockT *Entry = &F->front();
+  RPOT.reserve(F->size());
+  std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
+  std::reverse(RPOT.begin(), RPOT.end());
+
+  assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
+         "More nodes in function than Block Frequency Info supports");
+
+  DEBUG(dbgs() << "reverse-post-order-traversal\n");
+  for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
+    BlockNode Node = getNode(I);
+    DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
+    Nodes[*I] = Node;
+  }
+
+  Working.reserve(RPOT.size());
+  for (size_t Index = 0; Index < RPOT.size(); ++Index)
+    Working.emplace_back(Index);
+  Freqs.resize(RPOT.size());
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
+  DEBUG(dbgs() << "loop-detection\n");
+  if (LI->empty())
+    return;
+
+  // Visit loops top down and assign them an index.
+  std::deque<std::pair<const LoopT *, LoopData *>> Q;
+  for (const LoopT *L : *LI)
+    Q.emplace_back(L, nullptr);
+  while (!Q.empty()) {
+    const LoopT *Loop = Q.front().first;
+    LoopData *Parent = Q.front().second;
+    Q.pop_front();
+
+    BlockNode Header = getNode(Loop->getHeader());
+    assert(Header.isValid());
+
+    Loops.emplace_back(Parent, Header);
+    Working[Header.Index].Loop = &Loops.back();
+    DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
+
+    for (const LoopT *L : *Loop)
+      Q.emplace_back(L, &Loops.back());
+  }
+
+  // Visit nodes in reverse post-order and add them to their deepest containing
+  // loop.
+  for (size_t Index = 0; Index < RPOT.size(); ++Index) {
+    // Loop headers have already been mostly mapped.
+    if (Working[Index].isLoopHeader()) {
+      LoopData *ContainingLoop = Working[Index].getContainingLoop();
+      if (ContainingLoop)
+        ContainingLoop->Nodes.push_back(Index);
+      continue;
+    }
+
+    const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
+    if (!Loop)
+      continue;
+
+    // Add this node to its containing loop's member list.
+    BlockNode Header = getNode(Loop->getHeader());
+    assert(Header.isValid());
+    const auto &HeaderData = Working[Header.Index];
+    assert(HeaderData.isLoopHeader());
+
+    Working[Index].Loop = HeaderData.Loop;
+    HeaderData.Loop->Nodes.push_back(Index);
+    DEBUG(dbgs() << " - loop = " << getBlockName(Header)
+                 << ": member = " << getBlockName(Index) << "\n");
+  }
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
+  // Visit loops with the deepest first, and the top-level loops last.
+  for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
+    if (computeMassInLoop(*L))
+      continue;
+    auto Next = std::next(L);
+    computeIrreducibleMass(&*L, L.base());
+    L = std::prev(Next);
+    if (computeMassInLoop(*L))
+      continue;
+    llvm_unreachable("unhandled irreducible control flow");
+  }
+}
+
+template <class BT>
+bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
+  // Compute mass in loop.
+  DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
+
+  if (Loop.isIrreducible()) {
+    DEBUG(dbgs() << "isIrreducible = true\n");
+    Distribution Dist;
+    unsigned NumHeadersWithWeight = 0;
+    Optional<uint64_t> MinHeaderWeight;
+    DenseSet<uint32_t> HeadersWithoutWeight;
+    HeadersWithoutWeight.reserve(Loop.NumHeaders);
+    for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
+      auto &HeaderNode = Loop.Nodes[H];
+      const BlockT *Block = getBlock(HeaderNode);
+      IsIrrLoopHeader.set(Loop.Nodes[H].Index);
+      Optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
+      if (!HeaderWeight) {
+        DEBUG(dbgs() << "Missing irr loop header metadata on "
+              << getBlockName(HeaderNode) << "\n");
+        HeadersWithoutWeight.insert(H);
+        continue;
+      }
+      DEBUG(dbgs() << getBlockName(HeaderNode)
+            << " has irr loop header weight " << HeaderWeight.getValue()
+            << "\n");
+      NumHeadersWithWeight++;
+      uint64_t HeaderWeightValue = HeaderWeight.getValue();
+      if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
+        MinHeaderWeight = HeaderWeightValue;
+      if (HeaderWeightValue) {
+        Dist.addLocal(HeaderNode, HeaderWeightValue);
+      }
+    }
+    // As a heuristic, if some headers don't have a weight, give them the
+    // minimium weight seen (not to disrupt the existing trends too much by
+    // using a weight that's in the general range of the other headers' weights,
+    // and the minimum seems to perform better than the average.)
+    // FIXME: better update in the passes that drop the header weight.
+    // If no headers have a weight, give them even weight (use weight 1).
+    if (!MinHeaderWeight)
+      MinHeaderWeight = 1;
+    for (uint32_t H : HeadersWithoutWeight) {
+      auto &HeaderNode = Loop.Nodes[H];
+      assert(!getBlock(HeaderNode)->getIrrLoopHeaderWeight() &&
+             "Shouldn't have a weight metadata");
+      uint64_t MinWeight = MinHeaderWeight.getValue();
+      DEBUG(dbgs() << "Giving weight " << MinWeight
+            << " to " << getBlockName(HeaderNode) << "\n");
+      if (MinWeight)
+        Dist.addLocal(HeaderNode, MinWeight);
+    }
+    distributeIrrLoopHeaderMass(Dist);
+    for (const BlockNode &M : Loop.Nodes)
+      if (!propagateMassToSuccessors(&Loop, M))
+        llvm_unreachable("unhandled irreducible control flow");
+    if (NumHeadersWithWeight == 0)
+      // No headers have a metadata. Adjust header mass.
+      adjustLoopHeaderMass(Loop);
+  } else {
+    Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
+    if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
+      llvm_unreachable("irreducible control flow to loop header!?");
+    for (const BlockNode &M : Loop.members())
+      if (!propagateMassToSuccessors(&Loop, M))
+        // Irreducible backedge.
+        return false;
+  }
+
+  computeLoopScale(Loop);
+  packageLoop(Loop);
+  return true;
+}
+
+template <class BT>
+bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
+  // Compute mass in function.
+  DEBUG(dbgs() << "compute-mass-in-function\n");
+  assert(!Working.empty() && "no blocks in function");
+  assert(!Working[0].isLoopHeader() && "entry block is a loop header");
+
+  Working[0].getMass() = BlockMass::getFull();
+  for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
+    // Check for nodes that have been packaged.
+    BlockNode Node = getNode(I);
+    if (Working[Node.Index].isPackaged())
+      continue;
+
+    if (!propagateMassToSuccessors(nullptr, Node))
+      return false;
+  }
+  return true;
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
+  if (tryToComputeMassInFunction())
+    return;
+  computeIrreducibleMass(nullptr, Loops.begin());
+  if (tryToComputeMassInFunction())
+    return;
+  llvm_unreachable("unhandled irreducible control flow");
+}
+
+/// \note This should be a lambda, but that crashes GCC 4.7.
+namespace bfi_detail {
+
+template <class BT> struct BlockEdgesAdder {
+  using BlockT = BT;
+  using LoopData = BlockFrequencyInfoImplBase::LoopData;
+  using Successor = GraphTraits<const BlockT *>;
+
+  const BlockFrequencyInfoImpl<BT> &BFI;
+
+  explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
+      : BFI(BFI) {}
+
+  void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
+                  const LoopData *OuterLoop) {
+    const BlockT *BB = BFI.RPOT[Irr.Node.Index];
+    for (const auto Succ : children<const BlockT *>(BB))
+      G.addEdge(Irr, BFI.getNode(Succ), OuterLoop);
+  }
+};
+
+} // end namespace bfi_detail
+
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
+    LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
+  DEBUG(dbgs() << "analyze-irreducible-in-";
+        if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
+        else dbgs() << "function\n");
+
+  using namespace bfi_detail;
+
+  // Ideally, addBlockEdges() would be declared here as a lambda, but that
+  // crashes GCC 4.7.
+  BlockEdgesAdder<BT> addBlockEdges(*this);
+  IrreducibleGraph G(*this, OuterLoop, addBlockEdges);
+
+  for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
+    computeMassInLoop(L);
+
+  if (!OuterLoop)
+    return;
+  updateLoopWithIrreducible(*OuterLoop);
+}
+
+// A helper function that converts a branch probability into weight.
+inline uint32_t getWeightFromBranchProb(const BranchProbability Prob) {
+  return Prob.getNumerator();
+}
+
+template <class BT>
+bool
+BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
+                                                      const BlockNode &Node) {
+  DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
+  // Calculate probability for successors.
+  Distribution Dist;
+  if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
+    assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
+    if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
+      // Irreducible backedge.
+      return false;
+  } else {
+    const BlockT *BB = getBlock(Node);
+    for (auto SI = GraphTraits<const BlockT *>::child_begin(BB),
+              SE = GraphTraits<const BlockT *>::child_end(BB);
+         SI != SE; ++SI)
+      if (!addToDist(
+              Dist, OuterLoop, Node, getNode(*SI),
+              getWeightFromBranchProb(BPI->getEdgeProbability(BB, SI))))
+        // Irreducible backedge.
+        return false;
+  }
+
+  // Distribute mass to successors, saving exit and backedge data in the
+  // loop header.
+  distributeMass(Node, OuterLoop, Dist);
+  return true;
+}
+
+template <class BT>
+raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
+  if (!F)
+    return OS;
+  OS << "block-frequency-info: " << F->getName() << "\n";
+  for (const BlockT &BB : *F) {
+    OS << " - " << bfi_detail::getBlockName(&BB) << ": float = ";
+    getFloatingBlockFreq(&BB).print(OS, 5)
+        << ", int = " << getBlockFreq(&BB).getFrequency();
+    if (Optional<uint64_t> ProfileCount =
+        BlockFrequencyInfoImplBase::getBlockProfileCount(
+            F->getFunction(), getNode(&BB)))
+      OS << ", count = " << ProfileCount.getValue();
+    if (Optional<uint64_t> IrrLoopHeaderWeight =
+        BB.getIrrLoopHeaderWeight())
+      OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.getValue();
+    OS << "\n";
+  }
+
+  // Add an extra newline for readability.
+  OS << "\n";
+  return OS;
+}
+
+// Graph trait base class for block frequency information graph
+// viewer.
+
+enum GVDAGType { GVDT_None, GVDT_Fraction, GVDT_Integer, GVDT_Count };
+
+template <class BlockFrequencyInfoT, class BranchProbabilityInfoT>
+struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
+  using GTraits = GraphTraits<BlockFrequencyInfoT *>;
+  using NodeRef = typename GTraits::NodeRef;
+  using EdgeIter = typename GTraits::ChildIteratorType;
+  using NodeIter = typename GTraits::nodes_iterator;
+
+  uint64_t MaxFrequency = 0;
+
+  explicit BFIDOTGraphTraitsBase(bool isSimple = false)
+      : DefaultDOTGraphTraits(isSimple) {}
+
+  static std::string getGraphName(const BlockFrequencyInfoT *G) {
+    return G->getFunction()->getName();
+  }
+
+  std::string getNodeAttributes(NodeRef Node, const BlockFrequencyInfoT *Graph,
+                                unsigned HotPercentThreshold = 0) {
+    std::string Result;
+    if (!HotPercentThreshold)
+      return Result;
+
+    // Compute MaxFrequency on the fly:
+    if (!MaxFrequency) {
+      for (NodeIter I = GTraits::nodes_begin(Graph),
+                    E = GTraits::nodes_end(Graph);
+           I != E; ++I) {
+        NodeRef N = *I;
+        MaxFrequency =
+            std::max(MaxFrequency, Graph->getBlockFreq(N).getFrequency());
+      }
+    }
+    BlockFrequency Freq = Graph->getBlockFreq(Node);
+    BlockFrequency HotFreq =
+        (BlockFrequency(MaxFrequency) *
+         BranchProbability::getBranchProbability(HotPercentThreshold, 100));
+
+    if (Freq < HotFreq)
+      return Result;
+
+    raw_string_ostream OS(Result);
+    OS << "color=\"red\"";
+    OS.flush();
+    return Result;
+  }
+
+  std::string getNodeLabel(NodeRef Node, const BlockFrequencyInfoT *Graph,
+                           GVDAGType GType, int layout_order = -1) {
+    std::string Result;
+    raw_string_ostream OS(Result);
+
+    if (layout_order != -1)
+      OS << Node->getName() << "[" << layout_order << "] : ";
+    else
+      OS << Node->getName() << " : ";
+    switch (GType) {
+    case GVDT_Fraction:
+      Graph->printBlockFreq(OS, Node);
+      break;
+    case GVDT_Integer:
+      OS << Graph->getBlockFreq(Node).getFrequency();
+      break;
+    case GVDT_Count: {
+      auto Count = Graph->getBlockProfileCount(Node);
+      if (Count)
+        OS << Count.getValue();
+      else
+        OS << "Unknown";
+      break;
+    }
+    case GVDT_None:
+      llvm_unreachable("If we are not supposed to render a graph we should "
+                       "never reach this point.");
+    }
+    return Result;
+  }
+
+  std::string getEdgeAttributes(NodeRef Node, EdgeIter EI,
+                                const BlockFrequencyInfoT *BFI,
+                                const BranchProbabilityInfoT *BPI,
+                                unsigned HotPercentThreshold = 0) {
+    std::string Str;
+    if (!BPI)
+      return Str;
+
+    BranchProbability BP = BPI->getEdgeProbability(Node, EI);
+    uint32_t N = BP.getNumerator();
+    uint32_t D = BP.getDenominator();
+    double Percent = 100.0 * N / D;
+    raw_string_ostream OS(Str);
+    OS << format("label=\"%.1f%%\"", Percent);
+
+    if (HotPercentThreshold) {
+      BlockFrequency EFreq = BFI->getBlockFreq(Node) * BP;
+      BlockFrequency HotFreq = BlockFrequency(MaxFrequency) *
+                               BranchProbability(HotPercentThreshold, 100);
+
+      if (EFreq >= HotFreq) {
+        OS << ",color=\"red\"";
+      }
+    }
+
+    OS.flush();
+    return Str;
+  }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
diff --git a/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h b/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h
new file mode 100644
index 0000000..417b649
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -0,0 +1,253 @@
+//===- BranchProbabilityInfo.h - Branch Probability Analysis ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to evaluate branch probabilties.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
+#define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class Function;
+class LoopInfo;
+class raw_ostream;
+class TargetLibraryInfo;
+class Value;
+
+/// \brief Analysis providing branch probability information.
+///
+/// This is a function analysis which provides information on the relative
+/// probabilities of each "edge" in the function's CFG where such an edge is
+/// defined by a pair (PredBlock and an index in the successors). The
+/// probability of an edge from one block is always relative to the
+/// probabilities of other edges from the block. The probabilites of all edges
+/// from a block sum to exactly one (100%).
+/// We use a pair (PredBlock and an index in the successors) to uniquely
+/// identify an edge, since we can have multiple edges from Src to Dst.
+/// As an example, we can have a switch which jumps to Dst with value 0 and
+/// value 10.
+class BranchProbabilityInfo {
+public:
+  BranchProbabilityInfo() = default;
+
+  BranchProbabilityInfo(const Function &F, const LoopInfo &LI,
+                        const TargetLibraryInfo *TLI = nullptr) {
+    calculate(F, LI, TLI);
+  }
+
+  BranchProbabilityInfo(BranchProbabilityInfo &&Arg)
+      : Probs(std::move(Arg.Probs)), LastF(Arg.LastF),
+        PostDominatedByUnreachable(std::move(Arg.PostDominatedByUnreachable)),
+        PostDominatedByColdCall(std::move(Arg.PostDominatedByColdCall)) {}
+
+  BranchProbabilityInfo(const BranchProbabilityInfo &) = delete;
+  BranchProbabilityInfo &operator=(const BranchProbabilityInfo &) = delete;
+
+  BranchProbabilityInfo &operator=(BranchProbabilityInfo &&RHS) {
+    releaseMemory();
+    Probs = std::move(RHS.Probs);
+    PostDominatedByColdCall = std::move(RHS.PostDominatedByColdCall);
+    PostDominatedByUnreachable = std::move(RHS.PostDominatedByUnreachable);
+    return *this;
+  }
+
+  void releaseMemory();
+
+  void print(raw_ostream &OS) const;
+
+  /// \brief Get an edge's probability, relative to other out-edges of the Src.
+  ///
+  /// This routine provides access to the fractional probability between zero
+  /// (0%) and one (100%) of this edge executing, relative to other edges
+  /// leaving the 'Src' block. The returned probability is never zero, and can
+  /// only be one if the source block has only one successor.
+  BranchProbability getEdgeProbability(const BasicBlock *Src,
+                                       unsigned IndexInSuccessors) const;
+
+  /// \brief Get the probability of going from Src to Dst.
+  ///
+  /// It returns the sum of all probabilities for edges from Src to Dst.
+  BranchProbability getEdgeProbability(const BasicBlock *Src,
+                                       const BasicBlock *Dst) const;
+
+  BranchProbability getEdgeProbability(const BasicBlock *Src,
+                                       succ_const_iterator Dst) const;
+
+  /// \brief Test if an edge is hot relative to other out-edges of the Src.
+  ///
+  /// Check whether this edge out of the source block is 'hot'. We define hot
+  /// as having a relative probability >= 80%.
+  bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
+
+  /// \brief Retrieve the hot successor of a block if one exists.
+  ///
+  /// Given a basic block, look through its successors and if one exists for
+  /// which \see isEdgeHot would return true, return that successor block.
+  const BasicBlock *getHotSucc(const BasicBlock *BB) const;
+
+  /// \brief Print an edge's probability.
+  ///
+  /// Retrieves an edge's probability similarly to \see getEdgeProbability, but
+  /// then prints that probability to the provided stream. That stream is then
+  /// returned.
+  raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
+                                    const BasicBlock *Dst) const;
+
+  /// \brief Set the raw edge probability for the given edge.
+  ///
+  /// This allows a pass to explicitly set the edge probability for an edge. It
+  /// can be used when updating the CFG to update and preserve the branch
+  /// probability information. Read the implementation of how these edge
+  /// probabilities are calculated carefully before using!
+  void setEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors,
+                          BranchProbability Prob);
+
+  static BranchProbability getBranchProbStackProtector(bool IsLikely) {
+    static const BranchProbability LikelyProb((1u << 20) - 1, 1u << 20);
+    return IsLikely ? LikelyProb : LikelyProb.getCompl();
+  }
+
+  void calculate(const Function &F, const LoopInfo &LI,
+                 const TargetLibraryInfo *TLI = nullptr);
+
+  /// Forget analysis results for the given basic block.
+  void eraseBlock(const BasicBlock *BB);
+
+  // Use to track SCCs for handling irreducible loops.
+  using SccMap = DenseMap<const BasicBlock *, int>;
+  using SccHeaderMap = DenseMap<const BasicBlock *, bool>;
+  using SccHeaderMaps = std::vector<SccHeaderMap>;
+  struct SccInfo {
+    SccMap SccNums;
+    SccHeaderMaps SccHeaders;
+  };
+
+private:
+  // We need to store CallbackVH's in order to correctly handle basic block
+  // removal.
+  class BasicBlockCallbackVH final : public CallbackVH {
+    BranchProbabilityInfo *BPI;
+
+    void deleted() override {
+      assert(BPI != nullptr);
+      BPI->eraseBlock(cast<BasicBlock>(getValPtr()));
+      BPI->Handles.erase(*this);
+    }
+
+  public:
+    BasicBlockCallbackVH(const Value *V, BranchProbabilityInfo *BPI = nullptr)
+        : CallbackVH(const_cast<Value *>(V)), BPI(BPI) {}
+  };
+
+  DenseSet<BasicBlockCallbackVH, DenseMapInfo<Value*>> Handles;
+
+  // Since we allow duplicate edges from one basic block to another, we use
+  // a pair (PredBlock and an index in the successors) to specify an edge.
+  using Edge = std::pair<const BasicBlock *, unsigned>;
+
+  // Default weight value. Used when we don't have information about the edge.
+  // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
+  // the successors have a weight yet. But it doesn't make sense when providing
+  // weight to an edge that may have siblings with non-zero weights. This can
+  // be handled various ways, but it's probably fine for an edge with unknown
+  // weight to just "inherit" the non-zero weight of an adjacent successor.
+  static const uint32_t DEFAULT_WEIGHT = 16;
+
+  DenseMap<Edge, BranchProbability> Probs;
+
+  /// \brief Track the last function we run over for printing.
+  const Function *LastF;
+
+  /// \brief Track the set of blocks directly succeeded by a returning block.
+  SmallPtrSet<const BasicBlock *, 16> PostDominatedByUnreachable;
+
+  /// \brief Track the set of blocks that always lead to a cold call.
+  SmallPtrSet<const BasicBlock *, 16> PostDominatedByColdCall;
+
+  void updatePostDominatedByUnreachable(const BasicBlock *BB);
+  void updatePostDominatedByColdCall(const BasicBlock *BB);
+  bool calcUnreachableHeuristics(const BasicBlock *BB);
+  bool calcMetadataWeights(const BasicBlock *BB);
+  bool calcColdCallHeuristics(const BasicBlock *BB);
+  bool calcPointerHeuristics(const BasicBlock *BB);
+  bool calcLoopBranchHeuristics(const BasicBlock *BB, const LoopInfo &LI,
+                                SccInfo &SccI);
+  bool calcZeroHeuristics(const BasicBlock *BB, const TargetLibraryInfo *TLI);
+  bool calcFloatingPointHeuristics(const BasicBlock *BB);
+  bool calcInvokeHeuristics(const BasicBlock *BB);
+};
+
+/// \brief Analysis pass which computes \c BranchProbabilityInfo.
+class BranchProbabilityAnalysis
+    : public AnalysisInfoMixin<BranchProbabilityAnalysis> {
+  friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result type for this analysis pass.
+  using Result = BranchProbabilityInfo;
+
+  /// \brief Run the analysis pass over a function and produce BPI.
+  BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for the \c BranchProbabilityAnalysis results.
+class BranchProbabilityPrinterPass
+    : public PassInfoMixin<BranchProbabilityPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit BranchProbabilityPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
+class BranchProbabilityInfoWrapperPass : public FunctionPass {
+  BranchProbabilityInfo BPI;
+
+public:
+  static char ID;
+
+  BranchProbabilityInfoWrapperPass() : FunctionPass(ID) {
+    initializeBranchProbabilityInfoWrapperPassPass(
+        *PassRegistry::getPassRegistry());
+  }
+
+  BranchProbabilityInfo &getBPI() { return BPI; }
+  const BranchProbabilityInfo &getBPI() const { return BPI; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnFunction(Function &F) override;
+  void releaseMemory() override;
+  void print(raw_ostream &OS, const Module *M = nullptr) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CFG.h b/linux-x64/clang/include/llvm/Analysis/CFG.h
new file mode 100644
index 0000000..d569464
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CFG.h
@@ -0,0 +1,161 @@
+//===-- Analysis/CFG.h - BasicBlock Analyses --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions performs analyses on basic blocks, and instructions
+// contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFG_H
+#define LLVM_ANALYSIS_CFG_H
+
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+
+namespace llvm {
+
+class BasicBlock;
+class DominatorTree;
+class Function;
+class Instruction;
+class LoopInfo;
+class TerminatorInst;
+
+/// Analyze the specified function to find all of the loop backedges in the
+/// function and return them.  This is a relatively cheap (compared to
+/// computing dominators and loop info) analysis.
+///
+/// The output is added to Result, as pairs of <from,to> edge info.
+void FindFunctionBackedges(
+    const Function &F,
+    SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
+        Result);
+
+/// Search for the specified successor of basic block BB and return its position
+/// in the terminator instruction's list of successors.  It is an error to call
+/// this with a block that is not a successor.
+unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
+
+/// Return true if the specified edge is a critical edge. Critical edges are
+/// edges from a block with multiple successors to a block with multiple
+/// predecessors.
+///
+bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
+                    bool AllowIdenticalEdges = false);
+
+/// \brief Determine whether instruction 'To' is reachable from 'From',
+/// returning true if uncertain.
+///
+/// Determine whether there is a path from From to To within a single function.
+/// Returns false only if we can prove that once 'From' has been executed then
+/// 'To' can not be executed. Conservatively returns true.
+///
+/// This function is linear with respect to the number of blocks in the CFG,
+/// walking down successors from From to reach To, with a fixed threshold.
+/// Using DT or LI allows us to answer more quickly. LI reduces the cost of
+/// an entire loop of any number of blocks to be the same as the cost of a
+/// single block. DT reduces the cost by allowing the search to terminate when
+/// we find a block that dominates the block containing 'To'. DT is most useful
+/// on branchy code but not loops, and LI is most useful on code with loops but
+/// does not help on branchy code outside loops.
+bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
+                            const DominatorTree *DT = nullptr,
+                            const LoopInfo *LI = nullptr);
+
+/// \brief Determine whether block 'To' is reachable from 'From', returning
+/// true if uncertain.
+///
+/// Determine whether there is a path from From to To within a single function.
+/// Returns false only if we can prove that once 'From' has been reached then
+/// 'To' can not be executed. Conservatively returns true.
+bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
+                            const DominatorTree *DT = nullptr,
+                            const LoopInfo *LI = nullptr);
+
+/// \brief Determine whether there is at least one path from a block in
+/// 'Worklist' to 'StopBB', returning true if uncertain.
+///
+/// Determine whether there is a path from at least one block in Worklist to
+/// StopBB within a single function. Returns false only if we can prove that
+/// once any block in 'Worklist' has been reached then 'StopBB' can not be
+/// executed. Conservatively returns true.
+bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
+                                    BasicBlock *StopBB,
+                                    const DominatorTree *DT = nullptr,
+                                    const LoopInfo *LI = nullptr);
+
+/// \brief Return true if the control flow in \p RPOTraversal is irreducible.
+///
+/// This is a generic implementation to detect CFG irreducibility based on loop
+/// info analysis. It can be used for any kind of CFG (Loop, MachineLoop,
+/// Function, MachineFunction, etc.) by providing an RPO traversal (\p
+/// RPOTraversal) and the loop info analysis (\p LI) of the CFG. This utility
+/// function is only recommended when loop info analysis is available. If loop
+/// info analysis isn't available, please, don't compute it explicitly for this
+/// purpose. There are more efficient ways to detect CFG irreducibility that
+/// don't require recomputing loop info analysis (e.g., T1/T2 or Tarjan's
+/// algorithm).
+///
+/// Requirements:
+///   1) GraphTraits must be implemented for NodeT type. It is used to access
+///      NodeT successors.
+//    2) \p RPOTraversal must be a valid reverse post-order traversal of the
+///      target CFG with begin()/end() iterator interfaces.
+///   3) \p LI must be a valid LoopInfoBase that contains up-to-date loop
+///      analysis information of the CFG.
+///
+/// This algorithm uses the information about reducible loop back-edges already
+/// computed in \p LI. When a back-edge is found during the RPO traversal, the
+/// algorithm checks whether the back-edge is one of the reducible back-edges in
+/// loop info. If it isn't, the CFG is irreducible. For example, for the CFG
+/// below (canonical irreducible graph) loop info won't contain any loop, so the
+/// algorithm will return that the CFG is irreducible when checking the B <-
+/// -> C back-edge.
+///
+/// (A->B, A->C, B->C, C->B, C->D)
+///    A
+///  /   \
+/// B<- ->C
+///       |
+///       D
+///
+template <class NodeT, class RPOTraversalT, class LoopInfoT,
+          class GT = GraphTraits<NodeT>>
+bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI) {
+  /// Check whether the edge (\p Src, \p Dst) is a reducible loop backedge
+  /// according to LI. I.e., check if there exists a loop that contains Src and
+  /// where Dst is the loop header.
+  auto isProperBackedge = [&](NodeT Src, NodeT Dst) {
+    for (const auto *Lp = LI.getLoopFor(Src); Lp; Lp = Lp->getParentLoop()) {
+      if (Lp->getHeader() == Dst)
+        return true;
+    }
+    return false;
+  };
+
+  SmallPtrSet<NodeT, 32> Visited;
+  for (NodeT Node : RPOTraversal) {
+    Visited.insert(Node);
+    for (NodeT Succ : make_range(GT::child_begin(Node), GT::child_end(Node))) {
+      // Succ hasn't been visited yet
+      if (!Visited.count(Succ))
+        continue;
+      // We already visited Succ, thus Node->Succ must be a backedge. Check that
+      // the head matches what we have in the loop information. Otherwise, we
+      // have an irreducible graph.
+      if (!isProperBackedge(Node, Succ))
+        return true;
+    }
+  }
+
+  return false;
+}
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/CFGPrinter.h b/linux-x64/clang/include/llvm/Analysis/CFGPrinter.h
new file mode 100644
index 0000000..5786769
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CFGPrinter.h
@@ -0,0 +1,187 @@
+//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a 'dot-cfg' analysis pass, which emits the
+// cfg.<fnname>.dot file for each function in the program, with a graph of the
+// CFG for that function.
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the CFG printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFGPRINTER_H
+#define LLVM_ANALYSIS_CFGPRINTER_H
+
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/GraphWriter.h"
+
+namespace llvm {
+class CFGViewerPass
+    : public PassInfoMixin<CFGViewerPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class CFGOnlyViewerPass
+    : public PassInfoMixin<CFGOnlyViewerPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class CFGPrinterPass
+    : public PassInfoMixin<CFGPrinterPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class CFGOnlyPrinterPass
+    : public PassInfoMixin<CFGOnlyPrinterPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+template<>
+struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
+
+  DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+  static std::string getGraphName(const Function *F) {
+    return "CFG for '" + F->getName().str() + "' function";
+  }
+
+  static std::string getSimpleNodeLabel(const BasicBlock *Node,
+                                        const Function *) {
+    if (!Node->getName().empty())
+      return Node->getName().str();
+
+    std::string Str;
+    raw_string_ostream OS(Str);
+
+    Node->printAsOperand(OS, false);
+    return OS.str();
+  }
+
+  static std::string getCompleteNodeLabel(const BasicBlock *Node,
+                                          const Function *) {
+    enum { MaxColumns = 80 };
+    std::string Str;
+    raw_string_ostream OS(Str);
+
+    if (Node->getName().empty()) {
+      Node->printAsOperand(OS, false);
+      OS << ":";
+    }
+
+    OS << *Node;
+    std::string OutStr = OS.str();
+    if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+    // Process string output to make it nicer...
+    unsigned ColNum = 0;
+    unsigned LastSpace = 0;
+    for (unsigned i = 0; i != OutStr.length(); ++i) {
+      if (OutStr[i] == '\n') {                            // Left justify
+        OutStr[i] = '\\';
+        OutStr.insert(OutStr.begin()+i+1, 'l');
+        ColNum = 0;
+        LastSpace = 0;
+      } else if (OutStr[i] == ';') {                      // Delete comments!
+        unsigned Idx = OutStr.find('\n', i+1);            // Find end of line
+        OutStr.erase(OutStr.begin()+i, OutStr.begin()+Idx);
+        --i;
+      } else if (ColNum == MaxColumns) {                  // Wrap lines.
+        // Wrap very long names even though we can't find a space.
+        if (!LastSpace)
+          LastSpace = i;
+        OutStr.insert(LastSpace, "\\l...");
+        ColNum = i - LastSpace;
+        LastSpace = 0;
+        i += 3; // The loop will advance 'i' again.
+      }
+      else
+        ++ColNum;
+      if (OutStr[i] == ' ')
+        LastSpace = i;
+    }
+    return OutStr;
+  }
+
+  std::string getNodeLabel(const BasicBlock *Node,
+                           const Function *Graph) {
+    if (isSimple())
+      return getSimpleNodeLabel(Node, Graph);
+    else
+      return getCompleteNodeLabel(Node, Graph);
+  }
+
+  static std::string getEdgeSourceLabel(const BasicBlock *Node,
+                                        succ_const_iterator I) {
+    // Label source of conditional branches with "T" or "F"
+    if (const BranchInst *BI = dyn_cast<BranchInst>(Node->getTerminator()))
+      if (BI->isConditional())
+        return (I == succ_begin(Node)) ? "T" : "F";
+
+    // Label source of switch edges with the associated value.
+    if (const SwitchInst *SI = dyn_cast<SwitchInst>(Node->getTerminator())) {
+      unsigned SuccNo = I.getSuccessorIndex();
+
+      if (SuccNo == 0) return "def";
+
+      std::string Str;
+      raw_string_ostream OS(Str);
+      auto Case = *SwitchInst::ConstCaseIt::fromSuccessorIndex(SI, SuccNo);
+      OS << Case.getCaseValue()->getValue();
+      return OS.str();
+    }
+    return "";
+  }
+
+  /// Display the raw branch weights from PGO.
+  std::string getEdgeAttributes(const BasicBlock *Node, succ_const_iterator I,
+                                const Function *F) {
+    const TerminatorInst *TI = Node->getTerminator();
+    if (TI->getNumSuccessors() == 1)
+      return "";
+
+    MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
+    if (!WeightsNode)
+      return "";
+
+    MDString *MDName = cast<MDString>(WeightsNode->getOperand(0));
+    if (MDName->getString() != "branch_weights")
+      return "";
+
+    unsigned OpNo = I.getSuccessorIndex() + 1;
+    if (OpNo >= WeightsNode->getNumOperands())
+      return "";
+    ConstantInt *Weight =
+        mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(OpNo));
+    if (!Weight)
+      return "";
+
+    // Prepend a 'W' to indicate that this is a weight rather than the actual
+    // profile count (due to scaling).
+    Twine Attrs = "label=\"W:" + Twine(Weight->getZExtValue()) + "\"";
+    return Attrs.str();
+  }
+};
+} // End llvm namespace
+
+namespace llvm {
+  class FunctionPass;
+  FunctionPass *createCFGPrinterLegacyPassPass ();
+  FunctionPass *createCFGOnlyPrinterLegacyPassPass ();
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/CFLAliasAnalysisUtils.h b/linux-x64/clang/include/llvm/Analysis/CFLAliasAnalysisUtils.h
new file mode 100644
index 0000000..981a8dd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CFLAliasAnalysisUtils.h
@@ -0,0 +1,58 @@
+//=- CFLAliasAnalysisUtils.h - Utilities for CFL Alias Analysis ----*- C++-*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// \file
+// These are the utilities/helpers used by the CFL Alias Analyses available in
+// tree, i.e. Steensgaard's and Andersens'.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H
+#define LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+namespace cflaa {
+
+template <typename AAResult> struct FunctionHandle final : public CallbackVH {
+  FunctionHandle(Function *Fn, AAResult *Result)
+      : CallbackVH(Fn), Result(Result) {
+    assert(Fn != nullptr);
+    assert(Result != nullptr);
+  }
+
+  void deleted() override { removeSelfFromCache(); }
+  void allUsesReplacedWith(Value *) override { removeSelfFromCache(); }
+
+private:
+  AAResult *Result;
+
+  void removeSelfFromCache() {
+    assert(Result != nullptr);
+    auto *Val = getValPtr();
+    Result->evict(cast<Function>(Val));
+    setValPtr(nullptr);
+  }
+};
+
+static inline const Function *parentFunctionOfValue(const Value *Val) {
+  if (auto *Inst = dyn_cast<Instruction>(Val)) {
+    auto *Bb = Inst->getParent();
+    return Bb->getParent();
+  }
+
+  if (auto *Arg = dyn_cast<Argument>(Val))
+    return Arg->getParent();
+  return nullptr;
+} // namespace cflaa
+} // namespace llvm
+}
+
+#endif // LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h
new file mode 100644
index 0000000..6239d53
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h
@@ -0,0 +1,126 @@
+//==- CFLAndersAliasAnalysis.h - Unification-based Alias Analysis -*- C++-*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for LLVM's inclusion-based alias analysis
+/// implemented with CFL graph reachability.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
+#define LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CFLAliasAnalysisUtils.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include <forward_list>
+#include <memory>
+
+namespace llvm {
+
+class Function;
+class MemoryLocation;
+class TargetLibraryInfo;
+
+namespace cflaa {
+
+struct AliasSummary;
+
+} // end namespace cflaa
+
+class CFLAndersAAResult : public AAResultBase<CFLAndersAAResult> {
+  friend AAResultBase<CFLAndersAAResult>;
+
+  class FunctionInfo;
+
+public:
+  explicit CFLAndersAAResult(const TargetLibraryInfo &TLI);
+  CFLAndersAAResult(CFLAndersAAResult &&RHS);
+  ~CFLAndersAAResult();
+
+  /// Handle invalidation events from the new pass manager.
+  /// By definition, this result is stateless and so remains valid.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  /// Evict the given function from cache
+  void evict(const Function *Fn);
+
+  /// \brief Get the alias summary for the given function
+  /// Return nullptr if the summary is not found or not available
+  const cflaa::AliasSummary *getAliasSummary(const Function &);
+
+  AliasResult query(const MemoryLocation &, const MemoryLocation &);
+  AliasResult alias(const MemoryLocation &, const MemoryLocation &);
+
+private:
+  /// \brief Ensures that the given function is available in the cache.
+  /// Returns the appropriate entry from the cache.
+  const Optional<FunctionInfo> &ensureCached(const Function &);
+
+  /// \brief Inserts the given Function into the cache.
+  void scan(const Function &);
+
+  /// \brief Build summary for a given function
+  FunctionInfo buildInfoFrom(const Function &);
+
+  const TargetLibraryInfo &TLI;
+
+  /// \brief Cached mapping of Functions to their StratifiedSets.
+  /// If a function's sets are currently being built, it is marked
+  /// in the cache as an Optional without a value. This way, if we
+  /// have any kind of recursion, it is discernable from a function
+  /// that simply has empty sets.
+  DenseMap<const Function *, Optional<FunctionInfo>> Cache;
+
+  std::forward_list<cflaa::FunctionHandle<CFLAndersAAResult>> Handles;
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+///
+/// FIXME: We really should refactor CFL to use the analysis more heavily, and
+/// in particular to leverage invalidation to trigger re-computation.
+class CFLAndersAA : public AnalysisInfoMixin<CFLAndersAA> {
+  friend AnalysisInfoMixin<CFLAndersAA>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = CFLAndersAAResult;
+
+  CFLAndersAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the CFLAndersAAResult object.
+class CFLAndersAAWrapperPass : public ImmutablePass {
+  std::unique_ptr<CFLAndersAAResult> Result;
+
+public:
+  static char ID;
+
+  CFLAndersAAWrapperPass();
+
+  CFLAndersAAResult &getResult() { return *Result; }
+  const CFLAndersAAResult &getResult() const { return *Result; }
+
+  void initializePass() override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+// createCFLAndersAAWrapperPass - This pass implements a set-based approach to
+// alias analysis.
+ImmutablePass *createCFLAndersAAWrapperPass();
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h
new file mode 100644
index 0000000..ee9e290
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h
@@ -0,0 +1,143 @@
+//==- CFLSteensAliasAnalysis.h - Unification-based Alias Analysis -*- C++-*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for LLVM's unification-based alias analysis
+/// implemented with CFL graph reachability.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
+#define LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CFLAliasAnalysisUtils.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include <forward_list>
+#include <memory>
+
+namespace llvm {
+
+class Function;
+class TargetLibraryInfo;
+
+namespace cflaa {
+
+struct AliasSummary;
+
+} // end namespace cflaa
+
+class CFLSteensAAResult : public AAResultBase<CFLSteensAAResult> {
+  friend AAResultBase<CFLSteensAAResult>;
+
+  class FunctionInfo;
+
+public:
+  explicit CFLSteensAAResult(const TargetLibraryInfo &TLI);
+  CFLSteensAAResult(CFLSteensAAResult &&Arg);
+  ~CFLSteensAAResult();
+
+  /// Handle invalidation events from the new pass manager.
+  ///
+  /// By definition, this result is stateless and so remains valid.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  /// \brief Inserts the given Function into the cache.
+  void scan(Function *Fn);
+
+  void evict(Function *Fn);
+
+  /// \brief Ensures that the given function is available in the cache.
+  /// Returns the appropriate entry from the cache.
+  const Optional<FunctionInfo> &ensureCached(Function *Fn);
+
+  /// \brief Get the alias summary for the given function
+  /// Return nullptr if the summary is not found or not available
+  const cflaa::AliasSummary *getAliasSummary(Function &Fn);
+
+  AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+    if (LocA.Ptr == LocB.Ptr)
+      return MustAlias;
+
+    // Comparisons between global variables and other constants should be
+    // handled by BasicAA.
+    // CFLSteensAA may report NoAlias when comparing a GlobalValue and
+    // ConstantExpr, but every query needs to have at least one Value tied to a
+    // Function, and neither GlobalValues nor ConstantExprs are.
+    if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr))
+      return AAResultBase::alias(LocA, LocB);
+
+    AliasResult QueryResult = query(LocA, LocB);
+    if (QueryResult == MayAlias)
+      return AAResultBase::alias(LocA, LocB);
+
+    return QueryResult;
+  }
+
+private:
+  const TargetLibraryInfo &TLI;
+
+  /// \brief Cached mapping of Functions to their StratifiedSets.
+  /// If a function's sets are currently being built, it is marked
+  /// in the cache as an Optional without a value. This way, if we
+  /// have any kind of recursion, it is discernable from a function
+  /// that simply has empty sets.
+  DenseMap<Function *, Optional<FunctionInfo>> Cache;
+  std::forward_list<cflaa::FunctionHandle<CFLSteensAAResult>> Handles;
+
+  FunctionInfo buildSetsFrom(Function *F);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+///
+/// FIXME: We really should refactor CFL to use the analysis more heavily, and
+/// in particular to leverage invalidation to trigger re-computation of sets.
+class CFLSteensAA : public AnalysisInfoMixin<CFLSteensAA> {
+  friend AnalysisInfoMixin<CFLSteensAA>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = CFLSteensAAResult;
+
+  CFLSteensAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the CFLSteensAAResult object.
+class CFLSteensAAWrapperPass : public ImmutablePass {
+  std::unique_ptr<CFLSteensAAResult> Result;
+
+public:
+  static char ID;
+
+  CFLSteensAAWrapperPass();
+
+  CFLSteensAAResult &getResult() { return *Result; }
+  const CFLSteensAAResult &getResult() const { return *Result; }
+
+  void initializePass() override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+// createCFLSteensAAWrapperPass - This pass implements a set-based approach to
+// alias analysis.
+ImmutablePass *createCFLSteensAAWrapperPass();
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h b/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h
new file mode 100644
index 0000000..457d5a0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h
@@ -0,0 +1,840 @@
+//===- CGSCCPassManager.h - Call graph pass management ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides classes for managing passes over SCCs of the call
+/// graph. These passes form an important component of LLVM's interprocedural
+/// optimizations. Because they operate on the SCCs of the call graph, and they
+/// traverse the graph in post-order, they can effectively do pair-wise
+/// interprocedural optimizations for all call edges in the program while
+/// incrementally refining it and improving the context of these pair-wise
+/// optimizations. At each call site edge, the callee has already been
+/// optimized as much as is possible. This in turn allows very accurate
+/// analysis of it for IPO.
+///
+/// A secondary more general goal is to be able to isolate optimization on
+/// unrelated parts of the IR module. This is useful to ensure our
+/// optimizations are principled and don't miss oportunities where refinement
+/// of one part of the module influence transformations in another part of the
+/// module. But this is also useful if we want to parallelize the optimizations
+/// across common large module graph shapes which tend to be very wide and have
+/// large regions of unrelated cliques.
+///
+/// To satisfy these goals, we use the LazyCallGraph which provides two graphs
+/// nested inside each other (and built lazily from the bottom-up): the call
+/// graph proper, and a reference graph. The reference graph is super set of
+/// the call graph and is a conservative approximation of what could through
+/// scalar or CGSCC transforms *become* the call graph. Using this allows us to
+/// ensure we optimize functions prior to them being introduced into the call
+/// graph by devirtualization or other technique, and thus ensures that
+/// subsequent pair-wise interprocedural optimizations observe the optimized
+/// form of these functions. The (potentially transitive) reference
+/// reachability used by the reference graph is a conservative approximation
+/// that still allows us to have independent regions of the graph.
+///
+/// FIXME: There is one major drawback of the reference graph: in its naive
+/// form it is quadratic because it contains a distinct edge for each
+/// (potentially indirect) reference, even if are all through some common
+/// global table of function pointers. This can be fixed in a number of ways
+/// that essentially preserve enough of the normalization. While it isn't
+/// expected to completely preclude the usability of this, it will need to be
+/// addressed.
+///
+///
+/// All of these issues are made substantially more complex in the face of
+/// mutations to the call graph while optimization passes are being run. When
+/// mutations to the call graph occur we want to achieve two different things:
+///
+/// - We need to update the call graph in-flight and invalidate analyses
+///   cached on entities in the graph. Because of the cache-based analysis
+///   design of the pass manager, it is essential to have stable identities for
+///   the elements of the IR that passes traverse, and to invalidate any
+///   analyses cached on these elements as the mutations take place.
+///
+/// - We want to preserve the incremental and post-order traversal of the
+///   graph even as it is refined and mutated. This means we want optimization
+///   to observe the most refined form of the call graph and to do so in
+///   post-order.
+///
+/// To address this, the CGSCC manager uses both worklists that can be expanded
+/// by passes which transform the IR, and provides invalidation tests to skip
+/// entries that become dead. This extra data is provided to every SCC pass so
+/// that it can carefully update the manager's traversal as the call graph
+/// mutates.
+///
+/// We also provide support for running function passes within the CGSCC walk,
+/// and there we provide automatic update of the call graph including of the
+/// pass manager to reflect call graph changes that fall out naturally as part
+/// of scalar transformations.
+///
+/// The patterns used to ensure the goals of post-order visitation of the fully
+/// refined graph:
+///
+/// 1) Sink toward the "bottom" as the graph is refined. This means that any
+///    iteration continues in some valid post-order sequence after the mutation
+///    has altered the structure.
+///
+/// 2) Enqueue in post-order, including the current entity. If the current
+///    entity's shape changes, it and everything after it in post-order needs
+///    to be visited to observe that shape.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
+#define LLVM_ANALYSIS_CGSCCPASSMANAGER_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/PriorityWorklist.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <utility>
+
+namespace llvm {
+
+struct CGSCCUpdateResult;
+class Module;
+
+// Allow debug logging in this inline function.
+#define DEBUG_TYPE "cgscc"
+
+/// Extern template declaration for the analysis set for this IR unit.
+extern template class AllAnalysesOn<LazyCallGraph::SCC>;
+
+extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
+
+/// \brief The CGSCC analysis manager.
+///
+/// See the documentation for the AnalysisManager template for detail
+/// documentation. This type serves as a convenient way to refer to this
+/// construct in the adaptors and proxies used to integrate this into the larger
+/// pass manager infrastructure.
+using CGSCCAnalysisManager =
+    AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
+
+// Explicit specialization and instantiation declarations for the pass manager.
+// See the comments on the definition of the specialization for details on how
+// it differs from the primary template.
+template <>
+PreservedAnalyses
+PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
+            CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
+                                      CGSCCAnalysisManager &AM,
+                                      LazyCallGraph &G, CGSCCUpdateResult &UR);
+extern template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
+                                  LazyCallGraph &, CGSCCUpdateResult &>;
+
+/// \brief The CGSCC pass manager.
+///
+/// See the documentation for the PassManager template for details. It runs
+/// a sequence of SCC passes over each SCC that the manager is run over. This
+/// type serves as a convenient way to refer to this construct.
+using CGSCCPassManager =
+    PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
+                CGSCCUpdateResult &>;
+
+/// An explicit specialization of the require analysis template pass.
+template <typename AnalysisT>
+struct RequireAnalysisPass<AnalysisT, LazyCallGraph::SCC, CGSCCAnalysisManager,
+                           LazyCallGraph &, CGSCCUpdateResult &>
+    : PassInfoMixin<RequireAnalysisPass<AnalysisT, LazyCallGraph::SCC,
+                                        CGSCCAnalysisManager, LazyCallGraph &,
+                                        CGSCCUpdateResult &>> {
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &) {
+    (void)AM.template getResult<AnalysisT>(C, CG);
+    return PreservedAnalyses::all();
+  }
+};
+
+/// A proxy from a \c CGSCCAnalysisManager to a \c Module.
+using CGSCCAnalysisManagerModuleProxy =
+    InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
+
+/// We need a specialized result for the \c CGSCCAnalysisManagerModuleProxy so
+/// it can have access to the call graph in order to walk all the SCCs when
+/// invalidating things.
+template <> class CGSCCAnalysisManagerModuleProxy::Result {
+public:
+  explicit Result(CGSCCAnalysisManager &InnerAM, LazyCallGraph &G)
+      : InnerAM(&InnerAM), G(&G) {}
+
+  /// \brief Accessor for the analysis manager.
+  CGSCCAnalysisManager &getManager() { return *InnerAM; }
+
+  /// \brief Handler for invalidation of the Module.
+  ///
+  /// If the proxy analysis itself is preserved, then we assume that the set of
+  /// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the
+  /// CGSCCAnalysisManager are still valid, and we don't need to call \c clear
+  /// on the CGSCCAnalysisManager.
+  ///
+  /// Regardless of whether this analysis is marked as preserved, all of the
+  /// analyses in the \c CGSCCAnalysisManager are potentially invalidated based
+  /// on the set of preserved analyses.
+  bool invalidate(Module &M, const PreservedAnalyses &PA,
+                  ModuleAnalysisManager::Invalidator &Inv);
+
+private:
+  CGSCCAnalysisManager *InnerAM;
+  LazyCallGraph *G;
+};
+
+/// Provide a specialized run method for the \c CGSCCAnalysisManagerModuleProxy
+/// so it can pass the lazy call graph to the result.
+template <>
+CGSCCAnalysisManagerModuleProxy::Result
+CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM);
+
+// Ensure the \c CGSCCAnalysisManagerModuleProxy is provided as an extern
+// template.
+extern template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
+
+extern template class OuterAnalysisManagerProxy<
+    ModuleAnalysisManager, LazyCallGraph::SCC, LazyCallGraph &>;
+
+/// A proxy from a \c ModuleAnalysisManager to an \c SCC.
+using ModuleAnalysisManagerCGSCCProxy =
+    OuterAnalysisManagerProxy<ModuleAnalysisManager, LazyCallGraph::SCC,
+                              LazyCallGraph &>;
+
+/// Support structure for SCC passes to communicate updates the call graph back
+/// to the CGSCC pass manager infrsatructure.
+///
+/// The CGSCC pass manager runs SCC passes which are allowed to update the call
+/// graph and SCC structures. This means the structure the pass manager works
+/// on is mutating underneath it. In order to support that, there needs to be
+/// careful communication about the precise nature and ramifications of these
+/// updates to the pass management infrastructure.
+///
+/// All SCC passes will have to accept a reference to the management layer's
+/// update result struct and use it to reflect the results of any CG updates
+/// performed.
+///
+/// Passes which do not change the call graph structure in any way can just
+/// ignore this argument to their run method.
+struct CGSCCUpdateResult {
+  /// Worklist of the RefSCCs queued for processing.
+  ///
+  /// When a pass refines the graph and creates new RefSCCs or causes them to
+  /// have a different shape or set of component SCCs it should add the RefSCCs
+  /// to this worklist so that we visit them in the refined form.
+  ///
+  /// This worklist is in reverse post-order, as we pop off the back in order
+  /// to observe RefSCCs in post-order. When adding RefSCCs, clients should add
+  /// them in reverse post-order.
+  SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> &RCWorklist;
+
+  /// Worklist of the SCCs queued for processing.
+  ///
+  /// When a pass refines the graph and creates new SCCs or causes them to have
+  /// a different shape or set of component functions it should add the SCCs to
+  /// this worklist so that we visit them in the refined form.
+  ///
+  /// Note that if the SCCs are part of a RefSCC that is added to the \c
+  /// RCWorklist, they don't need to be added here as visiting the RefSCC will
+  /// be sufficient to re-visit the SCCs within it.
+  ///
+  /// This worklist is in reverse post-order, as we pop off the back in order
+  /// to observe SCCs in post-order. When adding SCCs, clients should add them
+  /// in reverse post-order.
+  SmallPriorityWorklist<LazyCallGraph::SCC *, 1> &CWorklist;
+
+  /// The set of invalidated RefSCCs which should be skipped if they are found
+  /// in \c RCWorklist.
+  ///
+  /// This is used to quickly prune out RefSCCs when they get deleted and
+  /// happen to already be on the worklist. We use this primarily to avoid
+  /// scanning the list and removing entries from it.
+  SmallPtrSetImpl<LazyCallGraph::RefSCC *> &InvalidatedRefSCCs;
+
+  /// The set of invalidated SCCs which should be skipped if they are found
+  /// in \c CWorklist.
+  ///
+  /// This is used to quickly prune out SCCs when they get deleted and happen
+  /// to already be on the worklist. We use this primarily to avoid scanning
+  /// the list and removing entries from it.
+  SmallPtrSetImpl<LazyCallGraph::SCC *> &InvalidatedSCCs;
+
+  /// If non-null, the updated current \c RefSCC being processed.
+  ///
+  /// This is set when a graph refinement takes place an the "current" point in
+  /// the graph moves "down" or earlier in the post-order walk. This will often
+  /// cause the "current" RefSCC to be a newly created RefSCC object and the
+  /// old one to be added to the above worklist. When that happens, this
+  /// pointer is non-null and can be used to continue processing the "top" of
+  /// the post-order walk.
+  LazyCallGraph::RefSCC *UpdatedRC;
+
+  /// If non-null, the updated current \c SCC being processed.
+  ///
+  /// This is set when a graph refinement takes place an the "current" point in
+  /// the graph moves "down" or earlier in the post-order walk. This will often
+  /// cause the "current" SCC to be a newly created SCC object and the old one
+  /// to be added to the above worklist. When that happens, this pointer is
+  /// non-null and can be used to continue processing the "top" of the
+  /// post-order walk.
+  LazyCallGraph::SCC *UpdatedC;
+
+  /// A hacky area where the inliner can retain history about inlining
+  /// decisions that mutated the call graph's SCC structure in order to avoid
+  /// infinite inlining. See the comments in the inliner's CG update logic.
+  ///
+  /// FIXME: Keeping this here seems like a big layering issue, we should look
+  /// for a better technique.
+  SmallDenseSet<std::pair<LazyCallGraph::Node *, LazyCallGraph::SCC *>, 4>
+      &InlinedInternalEdges;
+};
+
+/// \brief The core module pass which does a post-order walk of the SCCs and
+/// runs a CGSCC pass over each one.
+///
+/// Designed to allow composition of a CGSCCPass(Manager) and
+/// a ModulePassManager. Note that this pass must be run with a module analysis
+/// manager as it uses the LazyCallGraph analysis. It will also run the
+/// \c CGSCCAnalysisManagerModuleProxy analysis prior to running the CGSCC
+/// pass over the module to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename CGSCCPassT>
+class ModuleToPostOrderCGSCCPassAdaptor
+    : public PassInfoMixin<ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>> {
+public:
+  explicit ModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass)
+      : Pass(std::move(Pass)) {}
+
+  // We have to explicitly define all the special member functions because MSVC
+  // refuses to generate them.
+  ModuleToPostOrderCGSCCPassAdaptor(
+      const ModuleToPostOrderCGSCCPassAdaptor &Arg)
+      : Pass(Arg.Pass) {}
+
+  ModuleToPostOrderCGSCCPassAdaptor(ModuleToPostOrderCGSCCPassAdaptor &&Arg)
+      : Pass(std::move(Arg.Pass)) {}
+
+  friend void swap(ModuleToPostOrderCGSCCPassAdaptor &LHS,
+                   ModuleToPostOrderCGSCCPassAdaptor &RHS) {
+    std::swap(LHS.Pass, RHS.Pass);
+  }
+
+  ModuleToPostOrderCGSCCPassAdaptor &
+  operator=(ModuleToPostOrderCGSCCPassAdaptor RHS) {
+    swap(*this, RHS);
+    return *this;
+  }
+
+  /// \brief Runs the CGSCC pass across every SCC in the module.
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
+    // Setup the CGSCC analysis manager from its proxy.
+    CGSCCAnalysisManager &CGAM =
+        AM.getResult<CGSCCAnalysisManagerModuleProxy>(M).getManager();
+
+    // Get the call graph for this module.
+    LazyCallGraph &CG = AM.getResult<LazyCallGraphAnalysis>(M);
+
+    // We keep worklists to allow us to push more work onto the pass manager as
+    // the passes are run.
+    SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> RCWorklist;
+    SmallPriorityWorklist<LazyCallGraph::SCC *, 1> CWorklist;
+
+    // Keep sets for invalidated SCCs and RefSCCs that should be skipped when
+    // iterating off the worklists.
+    SmallPtrSet<LazyCallGraph::RefSCC *, 4> InvalidRefSCCSet;
+    SmallPtrSet<LazyCallGraph::SCC *, 4> InvalidSCCSet;
+
+    SmallDenseSet<std::pair<LazyCallGraph::Node *, LazyCallGraph::SCC *>, 4>
+        InlinedInternalEdges;
+
+    CGSCCUpdateResult UR = {RCWorklist,          CWorklist, InvalidRefSCCSet,
+                            InvalidSCCSet,       nullptr,   nullptr,
+                            InlinedInternalEdges};
+
+    PreservedAnalyses PA = PreservedAnalyses::all();
+    CG.buildRefSCCs();
+    for (auto RCI = CG.postorder_ref_scc_begin(),
+              RCE = CG.postorder_ref_scc_end();
+         RCI != RCE;) {
+      assert(RCWorklist.empty() &&
+             "Should always start with an empty RefSCC worklist");
+      // The postorder_ref_sccs range we are walking is lazily constructed, so
+      // we only push the first one onto the worklist. The worklist allows us
+      // to capture *new* RefSCCs created during transformations.
+      //
+      // We really want to form RefSCCs lazily because that makes them cheaper
+      // to update as the program is simplified and allows us to have greater
+      // cache locality as forming a RefSCC touches all the parts of all the
+      // functions within that RefSCC.
+      //
+      // We also eagerly increment the iterator to the next position because
+      // the CGSCC passes below may delete the current RefSCC.
+      RCWorklist.insert(&*RCI++);
+
+      do {
+        LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
+        if (InvalidRefSCCSet.count(RC)) {
+          DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
+          continue;
+        }
+
+        assert(CWorklist.empty() &&
+               "Should always start with an empty SCC worklist");
+
+        DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
+                     << "\n");
+
+        // Push the initial SCCs in reverse post-order as we'll pop off the
+        // back and so see this in post-order.
+        for (LazyCallGraph::SCC &C : llvm::reverse(*RC))
+          CWorklist.insert(&C);
+
+        do {
+          LazyCallGraph::SCC *C = CWorklist.pop_back_val();
+          // Due to call graph mutations, we may have invalid SCCs or SCCs from
+          // other RefSCCs in the worklist. The invalid ones are dead and the
+          // other RefSCCs should be queued above, so we just need to skip both
+          // scenarios here.
+          if (InvalidSCCSet.count(C)) {
+            DEBUG(dbgs() << "Skipping an invalid SCC...\n");
+            continue;
+          }
+          if (&C->getOuterRefSCC() != RC) {
+            DEBUG(dbgs() << "Skipping an SCC that is now part of some other "
+                            "RefSCC...\n");
+            continue;
+          }
+
+          do {
+            // Check that we didn't miss any update scenario.
+            assert(!InvalidSCCSet.count(C) && "Processing an invalid SCC!");
+            assert(C->begin() != C->end() && "Cannot have an empty SCC!");
+            assert(&C->getOuterRefSCC() == RC &&
+                   "Processing an SCC in a different RefSCC!");
+
+            UR.UpdatedRC = nullptr;
+            UR.UpdatedC = nullptr;
+            PreservedAnalyses PassPA = Pass.run(*C, CGAM, CG, UR);
+
+            // Update the SCC and RefSCC if necessary.
+            C = UR.UpdatedC ? UR.UpdatedC : C;
+            RC = UR.UpdatedRC ? UR.UpdatedRC : RC;
+
+            // If the CGSCC pass wasn't able to provide a valid updated SCC,
+            // the current SCC may simply need to be skipped if invalid.
+            if (UR.InvalidatedSCCs.count(C)) {
+              DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
+              break;
+            }
+            // Check that we didn't miss any update scenario.
+            assert(C->begin() != C->end() && "Cannot have an empty SCC!");
+
+            // We handle invalidating the CGSCC analysis manager's information
+            // for the (potentially updated) SCC here. Note that any other SCCs
+            // whose structure has changed should have been invalidated by
+            // whatever was updating the call graph. This SCC gets invalidated
+            // late as it contains the nodes that were actively being
+            // processed.
+            CGAM.invalidate(*C, PassPA);
+
+            // Then intersect the preserved set so that invalidation of module
+            // analyses will eventually occur when the module pass completes.
+            PA.intersect(std::move(PassPA));
+
+            // The pass may have restructured the call graph and refined the
+            // current SCC and/or RefSCC. We need to update our current SCC and
+            // RefSCC pointers to follow these. Also, when the current SCC is
+            // refined, re-run the SCC pass over the newly refined SCC in order
+            // to observe the most precise SCC model available. This inherently
+            // cannot cycle excessively as it only happens when we split SCCs
+            // apart, at most converging on a DAG of single nodes.
+            // FIXME: If we ever start having RefSCC passes, we'll want to
+            // iterate there too.
+            if (UR.UpdatedC)
+              DEBUG(dbgs() << "Re-running SCC passes after a refinement of the "
+                              "current SCC: "
+                           << *UR.UpdatedC << "\n");
+
+            // Note that both `C` and `RC` may at this point refer to deleted,
+            // invalid SCC and RefSCCs respectively. But we will short circuit
+            // the processing when we check them in the loop above.
+          } while (UR.UpdatedC);
+        } while (!CWorklist.empty());
+
+        // We only need to keep internal inlined edge information within
+        // a RefSCC, clear it to save on space and let the next time we visit
+        // any of these functions have a fresh start.
+        InlinedInternalEdges.clear();
+      } while (!RCWorklist.empty());
+    }
+
+    // By definition we preserve the call garph, all SCC analyses, and the
+    // analysis proxies by handling them above and in any nested pass managers.
+    PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
+    PA.preserve<LazyCallGraphAnalysis>();
+    PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+    PA.preserve<FunctionAnalysisManagerModuleProxy>();
+    return PA;
+  }
+
+private:
+  CGSCCPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename CGSCCPassT>
+ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
+createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass) {
+  return ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>(std::move(Pass));
+}
+
+/// A proxy from a \c FunctionAnalysisManager to an \c SCC.
+///
+/// When a module pass runs and triggers invalidation, both the CGSCC and
+/// Function analysis manager proxies on the module get an invalidation event.
+/// We don't want to fully duplicate responsibility for most of the
+/// invalidation logic. Instead, this layer is only responsible for SCC-local
+/// invalidation events. We work with the module's FunctionAnalysisManager to
+/// invalidate function analyses.
+class FunctionAnalysisManagerCGSCCProxy
+    : public AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy> {
+public:
+  class Result {
+  public:
+    explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+
+    /// \brief Accessor for the analysis manager.
+    FunctionAnalysisManager &getManager() { return *FAM; }
+
+    bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
+                    CGSCCAnalysisManager::Invalidator &Inv);
+
+  private:
+    FunctionAnalysisManager *FAM;
+  };
+
+  /// Computes the \c FunctionAnalysisManager and stores it in the result proxy.
+  Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &);
+
+private:
+  friend AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy>;
+
+  static AnalysisKey Key;
+};
+
+extern template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
+
+/// A proxy from a \c CGSCCAnalysisManager to a \c Function.
+using CGSCCAnalysisManagerFunctionProxy =
+    OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
+
+/// Helper to update the call graph after running a function pass.
+///
+/// Function passes can only mutate the call graph in specific ways. This
+/// routine provides a helper that updates the call graph in those ways
+/// including returning whether any changes were made and populating a CG
+/// update result struct for the overall CGSCC walk.
+LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
+    LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
+    CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR);
+
+/// \brief Adaptor that maps from a SCC to its functions.
+///
+/// Designed to allow composition of a FunctionPass(Manager) and
+/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
+/// to a \c CGSCCAnalysisManager it will run the
+/// \c FunctionAnalysisManagerCGSCCProxy analysis prior to running the function
+/// pass over the SCC to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename FunctionPassT>
+class CGSCCToFunctionPassAdaptor
+    : public PassInfoMixin<CGSCCToFunctionPassAdaptor<FunctionPassT>> {
+public:
+  explicit CGSCCToFunctionPassAdaptor(FunctionPassT Pass)
+      : Pass(std::move(Pass)) {}
+
+  // We have to explicitly define all the special member functions because MSVC
+  // refuses to generate them.
+  CGSCCToFunctionPassAdaptor(const CGSCCToFunctionPassAdaptor &Arg)
+      : Pass(Arg.Pass) {}
+
+  CGSCCToFunctionPassAdaptor(CGSCCToFunctionPassAdaptor &&Arg)
+      : Pass(std::move(Arg.Pass)) {}
+
+  friend void swap(CGSCCToFunctionPassAdaptor &LHS,
+                   CGSCCToFunctionPassAdaptor &RHS) {
+    std::swap(LHS.Pass, RHS.Pass);
+  }
+
+  CGSCCToFunctionPassAdaptor &operator=(CGSCCToFunctionPassAdaptor RHS) {
+    swap(*this, RHS);
+    return *this;
+  }
+
+  /// \brief Runs the function pass across every function in the module.
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+    // Setup the function analysis manager from its proxy.
+    FunctionAnalysisManager &FAM =
+        AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
+
+    SmallVector<LazyCallGraph::Node *, 4> Nodes;
+    for (LazyCallGraph::Node &N : C)
+      Nodes.push_back(&N);
+
+    // The SCC may get split while we are optimizing functions due to deleting
+    // edges. If this happens, the current SCC can shift, so keep track of
+    // a pointer we can overwrite.
+    LazyCallGraph::SCC *CurrentC = &C;
+
+    DEBUG(dbgs() << "Running function passes across an SCC: " << C << "\n");
+
+    PreservedAnalyses PA = PreservedAnalyses::all();
+    for (LazyCallGraph::Node *N : Nodes) {
+      // Skip nodes from other SCCs. These may have been split out during
+      // processing. We'll eventually visit those SCCs and pick up the nodes
+      // there.
+      if (CG.lookupSCC(*N) != CurrentC)
+        continue;
+
+      PreservedAnalyses PassPA = Pass.run(N->getFunction(), FAM);
+
+      // We know that the function pass couldn't have invalidated any other
+      // function's analyses (that's the contract of a function pass), so
+      // directly handle the function analysis manager's invalidation here.
+      FAM.invalidate(N->getFunction(), PassPA);
+
+      // Then intersect the preserved set so that invalidation of module
+      // analyses will eventually occur when the module pass completes.
+      PA.intersect(std::move(PassPA));
+
+      // If the call graph hasn't been preserved, update it based on this
+      // function pass. This may also update the current SCC to point to
+      // a smaller, more refined SCC.
+      auto PAC = PA.getChecker<LazyCallGraphAnalysis>();
+      if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
+        CurrentC = &updateCGAndAnalysisManagerForFunctionPass(CG, *CurrentC, *N,
+                                                              AM, UR);
+        assert(
+            CG.lookupSCC(*N) == CurrentC &&
+            "Current SCC not updated to the SCC containing the current node!");
+      }
+    }
+
+    // By definition we preserve the proxy. And we preserve all analyses on
+    // Functions. This precludes *any* invalidation of function analyses by the
+    // proxy, but that's OK because we've taken care to invalidate analyses in
+    // the function analysis manager incrementally above.
+    PA.preserveSet<AllAnalysesOn<Function>>();
+    PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+
+    // We've also ensured that we updated the call graph along the way.
+    PA.preserve<LazyCallGraphAnalysis>();
+
+    return PA;
+  }
+
+private:
+  FunctionPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename FunctionPassT>
+CGSCCToFunctionPassAdaptor<FunctionPassT>
+createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
+  return CGSCCToFunctionPassAdaptor<FunctionPassT>(std::move(Pass));
+}
+
+/// A helper that repeats an SCC pass each time an indirect call is refined to
+/// a direct call by that pass.
+///
+/// While the CGSCC pass manager works to re-visit SCCs and RefSCCs as they
+/// change shape, we may also want to repeat an SCC pass if it simply refines
+/// an indirect call to a direct call, even if doing so does not alter the
+/// shape of the graph. Note that this only pertains to direct calls to
+/// functions where IPO across the SCC may be able to compute more precise
+/// results. For intrinsics, we assume scalar optimizations already can fully
+/// reason about them.
+///
+/// This repetition has the potential to be very large however, as each one
+/// might refine a single call site. As a consequence, in practice we use an
+/// upper bound on the number of repetitions to limit things.
+template <typename PassT>
+class DevirtSCCRepeatedPass
+    : public PassInfoMixin<DevirtSCCRepeatedPass<PassT>> {
+public:
+  explicit DevirtSCCRepeatedPass(PassT Pass, int MaxIterations)
+      : Pass(std::move(Pass)), MaxIterations(MaxIterations) {}
+
+  /// Runs the wrapped pass up to \c MaxIterations on the SCC, iterating
+  /// whenever an indirect call is refined.
+  PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+    PreservedAnalyses PA = PreservedAnalyses::all();
+
+    // The SCC may be refined while we are running passes over it, so set up
+    // a pointer that we can update.
+    LazyCallGraph::SCC *C = &InitialC;
+
+    // Collect value handles for all of the indirect call sites.
+    SmallVector<WeakTrackingVH, 8> CallHandles;
+
+    // Struct to track the counts of direct and indirect calls in each function
+    // of the SCC.
+    struct CallCount {
+      int Direct;
+      int Indirect;
+    };
+
+    // Put value handles on all of the indirect calls and return the number of
+    // direct calls for each function in the SCC.
+    auto ScanSCC = [](LazyCallGraph::SCC &C,
+                      SmallVectorImpl<WeakTrackingVH> &CallHandles) {
+      assert(CallHandles.empty() && "Must start with a clear set of handles.");
+
+      SmallVector<CallCount, 4> CallCounts;
+      for (LazyCallGraph::Node &N : C) {
+        CallCounts.push_back({0, 0});
+        CallCount &Count = CallCounts.back();
+        for (Instruction &I : instructions(N.getFunction()))
+          if (auto CS = CallSite(&I)) {
+            if (CS.getCalledFunction()) {
+              ++Count.Direct;
+            } else {
+              ++Count.Indirect;
+              CallHandles.push_back(WeakTrackingVH(&I));
+            }
+          }
+      }
+
+      return CallCounts;
+    };
+
+    // Populate the initial call handles and get the initial call counts.
+    auto CallCounts = ScanSCC(*C, CallHandles);
+
+    for (int Iteration = 0;; ++Iteration) {
+      PreservedAnalyses PassPA = Pass.run(*C, AM, CG, UR);
+
+      // If the SCC structure has changed, bail immediately and let the outer
+      // CGSCC layer handle any iteration to reflect the refined structure.
+      if (UR.UpdatedC && UR.UpdatedC != C) {
+        PA.intersect(std::move(PassPA));
+        break;
+      }
+
+      // Check that we didn't miss any update scenario.
+      assert(!UR.InvalidatedSCCs.count(C) && "Processing an invalid SCC!");
+      assert(C->begin() != C->end() && "Cannot have an empty SCC!");
+      assert((int)CallCounts.size() == C->size() &&
+             "Cannot have changed the size of the SCC!");
+
+      // Check whether any of the handles were devirtualized.
+      auto IsDevirtualizedHandle = [&](WeakTrackingVH &CallH) {
+        if (!CallH)
+          return false;
+        auto CS = CallSite(CallH);
+        if (!CS)
+          return false;
+
+        // If the call is still indirect, leave it alone.
+        Function *F = CS.getCalledFunction();
+        if (!F)
+          return false;
+
+        DEBUG(dbgs() << "Found devirutalized call from "
+                     << CS.getParent()->getParent()->getName() << " to "
+                     << F->getName() << "\n");
+
+        // We now have a direct call where previously we had an indirect call,
+        // so iterate to process this devirtualization site.
+        return true;
+      };
+      bool Devirt = llvm::any_of(CallHandles, IsDevirtualizedHandle);
+
+      // Rescan to build up a new set of handles and count how many direct
+      // calls remain. If we decide to iterate, this also sets up the input to
+      // the next iteration.
+      CallHandles.clear();
+      auto NewCallCounts = ScanSCC(*C, CallHandles);
+
+      // If we haven't found an explicit devirtualization already see if we
+      // have decreased the number of indirect calls and increased the number
+      // of direct calls for any function in the SCC. This can be fooled by all
+      // manner of transformations such as DCE and other things, but seems to
+      // work well in practice.
+      if (!Devirt)
+        for (int i = 0, Size = C->size(); i < Size; ++i)
+          if (CallCounts[i].Indirect > NewCallCounts[i].Indirect &&
+              CallCounts[i].Direct < NewCallCounts[i].Direct) {
+            Devirt = true;
+            break;
+          }
+
+      if (!Devirt) {
+        PA.intersect(std::move(PassPA));
+        break;
+      }
+
+      // Otherwise, if we've already hit our max, we're done.
+      if (Iteration >= MaxIterations) {
+        DEBUG(dbgs() << "Found another devirtualization after hitting the max "
+                        "number of repetitions ("
+                     << MaxIterations << ") on SCC: " << *C << "\n");
+        PA.intersect(std::move(PassPA));
+        break;
+      }
+
+      DEBUG(dbgs()
+            << "Repeating an SCC pass after finding a devirtualization in: "
+            << *C << "\n");
+
+      // Move over the new call counts in preparation for iterating.
+      CallCounts = std::move(NewCallCounts);
+
+      // Update the analysis manager with each run and intersect the total set
+      // of preserved analyses so we're ready to iterate.
+      AM.invalidate(*C, PassPA);
+      PA.intersect(std::move(PassPA));
+    }
+
+    // Note that we don't add any preserved entries here unlike a more normal
+    // "pass manager" because we only handle invalidation *between* iterations,
+    // not after the last iteration.
+    return PA;
+  }
+
+private:
+  PassT Pass;
+  int MaxIterations;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename PassT>
+DevirtSCCRepeatedPass<PassT> createDevirtSCCRepeatedPass(PassT Pass,
+                                                         int MaxIterations) {
+  return DevirtSCCRepeatedPass<PassT>(std::move(Pass), MaxIterations);
+}
+
+// Clear out the debug logging macro.
+#undef DEBUG_TYPE
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_CGSCCPASSMANAGER_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CallGraph.h b/linux-x64/clang/include/llvm/Analysis/CallGraph.h
new file mode 100644
index 0000000..8efc85f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CallGraph.h
@@ -0,0 +1,509 @@
+//===- CallGraph.h - Build a Module's call graph ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides interfaces used to build and manipulate a call graph,
+/// which is a very useful tool for interprocedural optimization.
+///
+/// Every function in a module is represented as a node in the call graph.  The
+/// callgraph node keeps track of which functions are called by the function
+/// corresponding to the node.
+///
+/// A call graph may contain nodes where the function that they correspond to
+/// is null.  These 'external' nodes are used to represent control flow that is
+/// not represented (or analyzable) in the module.  In particular, this
+/// analysis builds one external node such that:
+///   1. All functions in the module without internal linkage will have edges
+///      from this external node, indicating that they could be called by
+///      functions outside of the module.
+///   2. All functions whose address is used for something more than a direct
+///      call, for example being stored into a memory location will also have
+///      an edge from this external node.  Since they may be called by an
+///      unknown caller later, they must be tracked as such.
+///
+/// There is a second external node added for calls that leave this module.
+/// Functions have a call edge to the external node iff:
+///   1. The function is external, reflecting the fact that they could call
+///      anything without internal linkage or that has its address taken.
+///   2. The function contains an indirect function call.
+///
+/// As an extension in the future, there may be multiple nodes with a null
+/// function.  These will be used when we can prove (through pointer analysis)
+/// that an indirect call site can call only a specific set of functions.
+///
+/// Because of these properties, the CallGraph captures a conservative superset
+/// of all of the caller-callee relationships, which is useful for
+/// transformations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CALLGRAPH_H
+#define LLVM_ANALYSIS_CALLGRAPH_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <cassert>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class CallGraphNode;
+class Module;
+class raw_ostream;
+
+/// \brief The basic data container for the call graph of a \c Module of IR.
+///
+/// This class exposes both the interface to the call graph for a module of IR.
+///
+/// The core call graph itself can also be updated to reflect changes to the IR.
+class CallGraph {
+  Module &M;
+
+  using FunctionMapTy =
+      std::map<const Function *, std::unique_ptr<CallGraphNode>>;
+
+  /// \brief A map from \c Function* to \c CallGraphNode*.
+  FunctionMapTy FunctionMap;
+
+  /// \brief This node has edges to all external functions and those internal
+  /// functions that have their address taken.
+  CallGraphNode *ExternalCallingNode;
+
+  /// \brief This node has edges to it from all functions making indirect calls
+  /// or calling an external function.
+  std::unique_ptr<CallGraphNode> CallsExternalNode;
+
+  /// \brief Replace the function represented by this node by another.
+  ///
+  /// This does not rescan the body of the function, so it is suitable when
+  /// splicing the body of one function to another while also updating all
+  /// callers from the old function to the new.
+  void spliceFunction(const Function *From, const Function *To);
+
+  /// \brief Add a function to the call graph, and link the node to all of the
+  /// functions that it calls.
+  void addToCallGraph(Function *F);
+
+public:
+  explicit CallGraph(Module &M);
+  CallGraph(CallGraph &&Arg);
+  ~CallGraph();
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+  using iterator = FunctionMapTy::iterator;
+  using const_iterator = FunctionMapTy::const_iterator;
+
+  /// \brief Returns the module the call graph corresponds to.
+  Module &getModule() const { return M; }
+
+  inline iterator begin() { return FunctionMap.begin(); }
+  inline iterator end() { return FunctionMap.end(); }
+  inline const_iterator begin() const { return FunctionMap.begin(); }
+  inline const_iterator end() const { return FunctionMap.end(); }
+
+  /// \brief Returns the call graph node for the provided function.
+  inline const CallGraphNode *operator[](const Function *F) const {
+    const_iterator I = FunctionMap.find(F);
+    assert(I != FunctionMap.end() && "Function not in callgraph!");
+    return I->second.get();
+  }
+
+  /// \brief Returns the call graph node for the provided function.
+  inline CallGraphNode *operator[](const Function *F) {
+    const_iterator I = FunctionMap.find(F);
+    assert(I != FunctionMap.end() && "Function not in callgraph!");
+    return I->second.get();
+  }
+
+  /// \brief Returns the \c CallGraphNode which is used to represent
+  /// undetermined calls into the callgraph.
+  CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }
+
+  CallGraphNode *getCallsExternalNode() const {
+    return CallsExternalNode.get();
+  }
+
+  //===---------------------------------------------------------------------
+  // Functions to keep a call graph up to date with a function that has been
+  // modified.
+  //
+
+  /// \brief Unlink the function from this module, returning it.
+  ///
+  /// Because this removes the function from the module, the call graph node is
+  /// destroyed.  This is only valid if the function does not call any other
+  /// functions (ie, there are no edges in it's CGN).  The easiest way to do
+  /// this is to dropAllReferences before calling this.
+  Function *removeFunctionFromModule(CallGraphNode *CGN);
+
+  /// \brief Similar to operator[], but this will insert a new CallGraphNode for
+  /// \c F if one does not already exist.
+  CallGraphNode *getOrInsertFunction(const Function *F);
+};
+
+/// \brief A node in the call graph for a module.
+///
+/// Typically represents a function in the call graph. There are also special
+/// "null" nodes used to represent theoretical entries in the call graph.
+class CallGraphNode {
+public:
+  /// \brief A pair of the calling instruction (a call or invoke)
+  /// and the call graph node being called.
+  using CallRecord = std::pair<WeakTrackingVH, CallGraphNode *>;
+
+public:
+  using CalledFunctionsVector = std::vector<CallRecord>;
+
+  /// \brief Creates a node for the specified function.
+  inline CallGraphNode(Function *F) : F(F) {}
+
+  CallGraphNode(const CallGraphNode &) = delete;
+  CallGraphNode &operator=(const CallGraphNode &) = delete;
+
+  ~CallGraphNode() {
+    assert(NumReferences == 0 && "Node deleted while references remain");
+  }
+
+  using iterator = std::vector<CallRecord>::iterator;
+  using const_iterator = std::vector<CallRecord>::const_iterator;
+
+  /// \brief Returns the function that this call graph node represents.
+  Function *getFunction() const { return F; }
+
+  inline iterator begin() { return CalledFunctions.begin(); }
+  inline iterator end() { return CalledFunctions.end(); }
+  inline const_iterator begin() const { return CalledFunctions.begin(); }
+  inline const_iterator end() const { return CalledFunctions.end(); }
+  inline bool empty() const { return CalledFunctions.empty(); }
+  inline unsigned size() const { return (unsigned)CalledFunctions.size(); }
+
+  /// \brief Returns the number of other CallGraphNodes in this CallGraph that
+  /// reference this node in their callee list.
+  unsigned getNumReferences() const { return NumReferences; }
+
+  /// \brief Returns the i'th called function.
+  CallGraphNode *operator[](unsigned i) const {
+    assert(i < CalledFunctions.size() && "Invalid index");
+    return CalledFunctions[i].second;
+  }
+
+  /// \brief Print out this call graph node.
+  void dump() const;
+  void print(raw_ostream &OS) const;
+
+  //===---------------------------------------------------------------------
+  // Methods to keep a call graph up to date with a function that has been
+  // modified
+  //
+
+  /// \brief Removes all edges from this CallGraphNode to any functions it
+  /// calls.
+  void removeAllCalledFunctions() {
+    while (!CalledFunctions.empty()) {
+      CalledFunctions.back().second->DropRef();
+      CalledFunctions.pop_back();
+    }
+  }
+
+  /// \brief Moves all the callee information from N to this node.
+  void stealCalledFunctionsFrom(CallGraphNode *N) {
+    assert(CalledFunctions.empty() &&
+           "Cannot steal callsite information if I already have some");
+    std::swap(CalledFunctions, N->CalledFunctions);
+  }
+
+  /// \brief Adds a function to the list of functions called by this one.
+  void addCalledFunction(CallSite CS, CallGraphNode *M) {
+    assert(!CS.getInstruction() || !CS.getCalledFunction() ||
+           !CS.getCalledFunction()->isIntrinsic() ||
+           !Intrinsic::isLeaf(CS.getCalledFunction()->getIntrinsicID()));
+    CalledFunctions.emplace_back(CS.getInstruction(), M);
+    M->AddRef();
+  }
+
+  void removeCallEdge(iterator I) {
+    I->second->DropRef();
+    *I = CalledFunctions.back();
+    CalledFunctions.pop_back();
+  }
+
+  /// \brief Removes the edge in the node for the specified call site.
+  ///
+  /// Note that this method takes linear time, so it should be used sparingly.
+  void removeCallEdgeFor(CallSite CS);
+
+  /// \brief Removes all call edges from this node to the specified callee
+  /// function.
+  ///
+  /// This takes more time to execute than removeCallEdgeTo, so it should not
+  /// be used unless necessary.
+  void removeAnyCallEdgeTo(CallGraphNode *Callee);
+
+  /// \brief Removes one edge associated with a null callsite from this node to
+  /// the specified callee function.
+  void removeOneAbstractEdgeTo(CallGraphNode *Callee);
+
+  /// \brief Replaces the edge in the node for the specified call site with a
+  /// new one.
+  ///
+  /// Note that this method takes linear time, so it should be used sparingly.
+  void replaceCallEdge(CallSite CS, CallSite NewCS, CallGraphNode *NewNode);
+
+private:
+  friend class CallGraph;
+
+  Function *F;
+
+  std::vector<CallRecord> CalledFunctions;
+
+  /// \brief The number of times that this CallGraphNode occurs in the
+  /// CalledFunctions array of this or other CallGraphNodes.
+  unsigned NumReferences = 0;
+
+  void DropRef() { --NumReferences; }
+  void AddRef() { ++NumReferences; }
+
+  /// \brief A special function that should only be used by the CallGraph class.
+  void allReferencesDropped() { NumReferences = 0; }
+};
+
+/// \brief An analysis pass to compute the \c CallGraph for a \c Module.
+///
+/// This class implements the concept of an analysis pass used by the \c
+/// ModuleAnalysisManager to run an analysis over a module and cache the
+/// resulting data.
+class CallGraphAnalysis : public AnalysisInfoMixin<CallGraphAnalysis> {
+  friend AnalysisInfoMixin<CallGraphAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// \brief A formulaic type to inform clients of the result type.
+  using Result = CallGraph;
+
+  /// \brief Compute the \c CallGraph for the module \c M.
+  ///
+  /// The real work here is done in the \c CallGraph constructor.
+  CallGraph run(Module &M, ModuleAnalysisManager &) { return CallGraph(M); }
+};
+
+/// \brief Printer pass for the \c CallGraphAnalysis results.
+class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit CallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
+/// build it.
+///
+/// This class exposes both the interface to the call graph container and the
+/// module pass which runs over a module of IR and produces the call graph. The
+/// call graph interface is entirelly a wrapper around a \c CallGraph object
+/// which is stored internally for each module.
+class CallGraphWrapperPass : public ModulePass {
+  std::unique_ptr<CallGraph> G;
+
+public:
+  static char ID; // Class identification, replacement for typeinfo
+
+  CallGraphWrapperPass();
+  ~CallGraphWrapperPass() override;
+
+  /// \brief The internal \c CallGraph around which the rest of this interface
+  /// is wrapped.
+  const CallGraph &getCallGraph() const { return *G; }
+  CallGraph &getCallGraph() { return *G; }
+
+  using iterator = CallGraph::iterator;
+  using const_iterator = CallGraph::const_iterator;
+
+  /// \brief Returns the module the call graph corresponds to.
+  Module &getModule() const { return G->getModule(); }
+
+  inline iterator begin() { return G->begin(); }
+  inline iterator end() { return G->end(); }
+  inline const_iterator begin() const { return G->begin(); }
+  inline const_iterator end() const { return G->end(); }
+
+  /// \brief Returns the call graph node for the provided function.
+  inline const CallGraphNode *operator[](const Function *F) const {
+    return (*G)[F];
+  }
+
+  /// \brief Returns the call graph node for the provided function.
+  inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }
+
+  /// \brief Returns the \c CallGraphNode which is used to represent
+  /// undetermined calls into the callgraph.
+  CallGraphNode *getExternalCallingNode() const {
+    return G->getExternalCallingNode();
+  }
+
+  CallGraphNode *getCallsExternalNode() const {
+    return G->getCallsExternalNode();
+  }
+
+  //===---------------------------------------------------------------------
+  // Functions to keep a call graph up to date with a function that has been
+  // modified.
+  //
+
+  /// \brief Unlink the function from this module, returning it.
+  ///
+  /// Because this removes the function from the module, the call graph node is
+  /// destroyed.  This is only valid if the function does not call any other
+  /// functions (ie, there are no edges in it's CGN).  The easiest way to do
+  /// this is to dropAllReferences before calling this.
+  Function *removeFunctionFromModule(CallGraphNode *CGN) {
+    return G->removeFunctionFromModule(CGN);
+  }
+
+  /// \brief Similar to operator[], but this will insert a new CallGraphNode for
+  /// \c F if one does not already exist.
+  CallGraphNode *getOrInsertFunction(const Function *F) {
+    return G->getOrInsertFunction(F);
+  }
+
+  //===---------------------------------------------------------------------
+  // Implementation of the ModulePass interface needed here.
+  //
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnModule(Module &M) override;
+  void releaseMemory() override;
+
+  void print(raw_ostream &o, const Module *) const override;
+  void dump() const;
+};
+
+//===----------------------------------------------------------------------===//
+// GraphTraits specializations for call graphs so that they can be treated as
+// graphs by the generic graph algorithms.
+//
+
+// Provide graph traits for tranversing call graphs using standard graph
+// traversals.
+template <> struct GraphTraits<CallGraphNode *> {
+  using NodeRef = CallGraphNode *;
+  using CGNPairTy = CallGraphNode::CallRecord;
+
+  static NodeRef getEntryNode(CallGraphNode *CGN) { return CGN; }
+  static CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
+
+  using ChildIteratorType =
+      mapped_iterator<CallGraphNode::iterator, decltype(&CGNGetValue)>;
+
+  static ChildIteratorType child_begin(NodeRef N) {
+    return ChildIteratorType(N->begin(), &CGNGetValue);
+  }
+
+  static ChildIteratorType child_end(NodeRef N) {
+    return ChildIteratorType(N->end(), &CGNGetValue);
+  }
+};
+
+template <> struct GraphTraits<const CallGraphNode *> {
+  using NodeRef = const CallGraphNode *;
+  using CGNPairTy = CallGraphNode::CallRecord;
+  using EdgeRef = const CallGraphNode::CallRecord &;
+
+  static NodeRef getEntryNode(const CallGraphNode *CGN) { return CGN; }
+  static const CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
+
+  using ChildIteratorType =
+      mapped_iterator<CallGraphNode::const_iterator, decltype(&CGNGetValue)>;
+  using ChildEdgeIteratorType = CallGraphNode::const_iterator;
+
+  static ChildIteratorType child_begin(NodeRef N) {
+    return ChildIteratorType(N->begin(), &CGNGetValue);
+  }
+
+  static ChildIteratorType child_end(NodeRef N) {
+    return ChildIteratorType(N->end(), &CGNGetValue);
+  }
+
+  static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
+    return N->begin();
+  }
+  static ChildEdgeIteratorType child_edge_end(NodeRef N) { return N->end(); }
+
+  static NodeRef edge_dest(EdgeRef E) { return E.second; }
+};
+
+template <>
+struct GraphTraits<CallGraph *> : public GraphTraits<CallGraphNode *> {
+  using PairTy =
+      std::pair<const Function *const, std::unique_ptr<CallGraphNode>>;
+
+  static NodeRef getEntryNode(CallGraph *CGN) {
+    return CGN->getExternalCallingNode(); // Start at the external node!
+  }
+
+  static CallGraphNode *CGGetValuePtr(const PairTy &P) {
+    return P.second.get();
+  }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator =
+      mapped_iterator<CallGraph::iterator, decltype(&CGGetValuePtr)>;
+
+  static nodes_iterator nodes_begin(CallGraph *CG) {
+    return nodes_iterator(CG->begin(), &CGGetValuePtr);
+  }
+
+  static nodes_iterator nodes_end(CallGraph *CG) {
+    return nodes_iterator(CG->end(), &CGGetValuePtr);
+  }
+};
+
+template <>
+struct GraphTraits<const CallGraph *> : public GraphTraits<
+                                            const CallGraphNode *> {
+  using PairTy =
+      std::pair<const Function *const, std::unique_ptr<CallGraphNode>>;
+
+  static NodeRef getEntryNode(const CallGraph *CGN) {
+    return CGN->getExternalCallingNode(); // Start at the external node!
+  }
+
+  static const CallGraphNode *CGGetValuePtr(const PairTy &P) {
+    return P.second.get();
+  }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator =
+      mapped_iterator<CallGraph::const_iterator, decltype(&CGGetValuePtr)>;
+
+  static nodes_iterator nodes_begin(const CallGraph *CG) {
+    return nodes_iterator(CG->begin(), &CGGetValuePtr);
+  }
+
+  static nodes_iterator nodes_end(const CallGraph *CG) {
+    return nodes_iterator(CG->end(), &CGGetValuePtr);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_CALLGRAPH_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CallGraphSCCPass.h b/linux-x64/clang/include/llvm/Analysis/CallGraphSCCPass.h
new file mode 100644
index 0000000..ace5460
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CallGraphSCCPass.h
@@ -0,0 +1,137 @@
+//===- CallGraphSCCPass.h - Pass that operates BU on call graph -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CallGraphSCCPass class, which is used for passes which
+// are implemented as bottom-up traversals on the call graph.  Because there may
+// be cycles in the call graph, passes of this type operate on the call-graph in
+// SCC order: that is, they process function bottom-up, except for recursive
+// functions, which they process all at once.
+//
+// These passes are inherently interprocedural, and are required to keep the
+// call graph up-to-date if they do anything which could modify it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
+#define LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Pass.h"
+#include <vector>
+
+namespace llvm {
+
+class CallGraph;
+class CallGraphNode;
+class CallGraphSCC;
+class PMStack;
+
+class CallGraphSCCPass : public Pass {
+public:
+  explicit CallGraphSCCPass(char &pid) : Pass(PT_CallGraphSCC, pid) {}
+
+  /// createPrinterPass - Get a pass that prints the Module
+  /// corresponding to a CallGraph.
+  Pass *createPrinterPass(raw_ostream &OS,
+                          const std::string &Banner) const override;
+
+  using llvm::Pass::doInitialization;
+  using llvm::Pass::doFinalization;
+
+  /// doInitialization - This method is called before the SCC's of the program
+  /// has been processed, allowing the pass to do initialization as necessary.
+  virtual bool doInitialization(CallGraph &CG) {
+    return false;
+  }
+
+  /// runOnSCC - This method should be implemented by the subclass to perform
+  /// whatever action is necessary for the specified SCC.  Note that
+  /// non-recursive (or only self-recursive) functions will have an SCC size of
+  /// 1, where recursive portions of the call graph will have SCC size > 1.
+  ///
+  /// SCC passes that add or delete functions to the SCC are required to update
+  /// the SCC list, otherwise stale pointers may be dereferenced.
+  virtual bool runOnSCC(CallGraphSCC &SCC) = 0;
+
+  /// doFinalization - This method is called after the SCC's of the program has
+  /// been processed, allowing the pass to do final cleanup as necessary.
+  virtual bool doFinalization(CallGraph &CG) {
+    return false;
+  }
+
+  /// Assign pass manager to manager this pass
+  void assignPassManager(PMStack &PMS, PassManagerType PMT) override;
+
+  ///  Return what kind of Pass Manager can manage this pass.
+  PassManagerType getPotentialPassManagerType() const override {
+    return PMT_CallGraphPassManager;
+  }
+
+  /// getAnalysisUsage - For this class, we declare that we require and preserve
+  /// the call graph.  If the derived class implements this method, it should
+  /// always explicitly call the implementation here.
+  void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+protected:
+  /// Optional passes call this function to check whether the pass should be
+  /// skipped. This is the case when optimization bisect is over the limit.
+  bool skipSCC(CallGraphSCC &SCC) const;
+};
+
+/// CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
+class CallGraphSCC {
+  const CallGraph &CG; // The call graph for this SCC.
+  void *Context; // The CGPassManager object that is vending this.
+  std::vector<CallGraphNode *> Nodes;
+
+public:
+  CallGraphSCC(CallGraph &cg, void *context) : CG(cg), Context(context) {}
+
+  void initialize(ArrayRef<CallGraphNode *> NewNodes) {
+    Nodes.assign(NewNodes.begin(), NewNodes.end());
+  }
+
+  bool isSingular() const { return Nodes.size() == 1; }
+  unsigned size() const { return Nodes.size(); }
+
+  /// ReplaceNode - This informs the SCC and the pass manager that the specified
+  /// Old node has been deleted, and New is to be used in its place.
+  void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);
+
+  using iterator = std::vector<CallGraphNode *>::const_iterator;
+
+  iterator begin() const { return Nodes.begin(); }
+  iterator end() const { return Nodes.end(); }
+
+  const CallGraph &getCallGraph() { return CG; }
+};
+
+void initializeDummyCGSCCPassPass(PassRegistry &);
+
+/// This pass is required by interprocedural register allocation. It forces
+/// codegen to follow bottom up order on call graph.
+class DummyCGSCCPass : public CallGraphSCCPass {
+public:
+  static char ID;
+
+  DummyCGSCCPass() : CallGraphSCCPass(ID) {
+    PassRegistry &Registry = *PassRegistry::getPassRegistry();
+    initializeDummyCGSCCPassPass(Registry);
+  }
+
+  bool runOnSCC(CallGraphSCC &SCC) override { return false; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/CallPrinter.h b/linux-x64/clang/include/llvm/Analysis/CallPrinter.h
new file mode 100644
index 0000000..8b697d5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CallPrinter.h
@@ -0,0 +1,27 @@
+//===-- CallPrinter.h - Call graph printer external interface ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the call graph printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CALLPRINTER_H
+#define LLVM_ANALYSIS_CALLPRINTER_H
+
+namespace llvm {
+
+class ModulePass;
+
+ModulePass *createCallGraphViewerPass();
+ModulePass *createCallGraphDOTPrinterPass();
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h b/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h
new file mode 100644
index 0000000..8d2c095
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h
@@ -0,0 +1,81 @@
+//===----- llvm/Analysis/CaptureTracking.h - Pointer capture ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains routines that help determine which pointers are captured.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
+#define LLVM_ANALYSIS_CAPTURETRACKING_H
+
+namespace llvm {
+
+  class Value;
+  class Use;
+  class Instruction;
+  class DominatorTree;
+  class OrderedBasicBlock;
+
+  /// PointerMayBeCaptured - Return true if this pointer value may be captured
+  /// by the enclosing function (which is required to exist).  This routine can
+  /// be expensive, so consider caching the results.  The boolean ReturnCaptures
+  /// specifies whether returning the value (or part of it) from the function
+  /// counts as capturing it or not.  The boolean StoreCaptures specified
+  /// whether storing the value (or part of it) into memory anywhere
+  /// automatically counts as capturing it or not.
+  bool PointerMayBeCaptured(const Value *V,
+                            bool ReturnCaptures,
+                            bool StoreCaptures);
+
+  /// PointerMayBeCapturedBefore - Return true if this pointer value may be
+  /// captured by the enclosing function (which is required to exist). If a
+  /// DominatorTree is provided, only captures which happen before the given
+  /// instruction are considered. This routine can be expensive, so consider
+  /// caching the results.  The boolean ReturnCaptures specifies whether
+  /// returning the value (or part of it) from the function counts as capturing
+  /// it or not.  The boolean StoreCaptures specified whether storing the value
+  /// (or part of it) into memory anywhere automatically counts as capturing it
+  /// or not. Captures by the provided instruction are considered if the
+  /// final parameter is true. An ordered basic block in \p OBB could be used
+  /// to speed up capture-tracker queries.
+  bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
+                                  bool StoreCaptures, const Instruction *I,
+                                  DominatorTree *DT, bool IncludeI = false,
+                                  OrderedBasicBlock *OBB = nullptr);
+
+  /// This callback is used in conjunction with PointerMayBeCaptured. In
+  /// addition to the interface here, you'll need to provide your own getters
+  /// to see whether anything was captured.
+  struct CaptureTracker {
+    virtual ~CaptureTracker();
+
+    /// tooManyUses - The depth of traversal has breached a limit. There may be
+    /// capturing instructions that will not be passed into captured().
+    virtual void tooManyUses() = 0;
+
+    /// shouldExplore - This is the use of a value derived from the pointer.
+    /// To prune the search (ie., assume that none of its users could possibly
+    /// capture) return false. To search it, return true.
+    ///
+    /// U->getUser() is always an Instruction.
+    virtual bool shouldExplore(const Use *U);
+
+    /// captured - Information about the pointer was captured by the user of
+    /// use U. Return true to stop the traversal or false to continue looking
+    /// for more capturing instructions.
+    virtual bool captured(const Use *U) = 0;
+  };
+
+  /// PointerMayBeCaptured - Visit the value and the values derived from it and
+  /// find values which appear to be capturing the pointer value. This feeds
+  /// results into and is controlled by the CaptureTracker object.
+  void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker);
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/CmpInstAnalysis.h b/linux-x64/clang/include/llvm/Analysis/CmpInstAnalysis.h
new file mode 100644
index 0000000..3cc69d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CmpInstAnalysis.h
@@ -0,0 +1,72 @@
+//===-- CmpInstAnalysis.h - Utils to help fold compare insts ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds routines to help analyse compare instructions
+// and fold them into constants or other compare instructions
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CMPINSTANALYSIS_H
+#define LLVM_ANALYSIS_CMPINSTANALYSIS_H
+
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+  class ICmpInst;
+  class Value;
+
+  /// Encode a icmp predicate into a three bit mask. These bits are carefully
+  /// arranged to allow folding of expressions such as:
+  ///
+  ///      (A < B) | (A > B) --> (A != B)
+  ///
+  /// Note that this is only valid if the first and second predicates have the
+  /// same sign. It is illegal to do: (A u< B) | (A s> B)
+  ///
+  /// Three bits are used to represent the condition, as follows:
+  ///   0  A > B
+  ///   1  A == B
+  ///   2  A < B
+  ///
+  /// <=>  Value  Definition
+  /// 000     0   Always false
+  /// 001     1   A >  B
+  /// 010     2   A == B
+  /// 011     3   A >= B
+  /// 100     4   A <  B
+  /// 101     5   A != B
+  /// 110     6   A <= B
+  /// 111     7   Always true
+  ///
+  unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
+
+  /// This is the complement of getICmpCode, which turns an opcode and two
+  /// operands into either a constant true or false, or the predicate for a new
+  /// ICmp instruction. The sign is passed in to determine which kind of
+  /// predicate to use in the new icmp instruction.
+  /// Non-NULL return value will be a true or false constant.
+  /// NULL return means a new ICmp is needed. The predicate for which is output
+  /// in NewICmpPred.
+  Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
+                      CmpInst::Predicate &NewICmpPred);
+
+  /// Return true if both predicates match sign or if at least one of them is an
+  /// equality comparison (which is signless).
+  bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
+
+  /// Decompose an icmp into the form ((X & Mask) pred 0) if possible. The
+  /// returned predicate is either == or !=. Returns false if decomposition
+  /// fails.
+  bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
+                            Value *&X, APInt &Mask,
+                            bool LookThroughTrunc = true);
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h b/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h
new file mode 100644
index 0000000..9e861ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h
@@ -0,0 +1,105 @@
+//===- CodeMetrics.h - Code cost measurements -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various weight measurements for code, helping
+// the Inliner and other passes decide whether to duplicate its contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CODEMETRICS_H
+#define LLVM_ANALYSIS_CODEMETRICS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/CallSite.h"
+
+namespace llvm {
+class AssumptionCache;
+class BasicBlock;
+class Loop;
+class Function;
+class Instruction;
+class DataLayout;
+class TargetTransformInfo;
+class Value;
+
+/// \brief Check whether a call will lower to something small.
+///
+/// This tests checks whether this callsite will lower to something
+/// significantly cheaper than a traditional call, often a single
+/// instruction. Note that if isInstructionFree(CS.getInstruction()) would
+/// return true, so will this function.
+bool callIsSmall(ImmutableCallSite CS);
+
+/// \brief Utility to calculate the size and a few similar metrics for a set
+/// of basic blocks.
+struct CodeMetrics {
+  /// \brief True if this function contains a call to setjmp or other functions
+  /// with attribute "returns twice" without having the attribute itself.
+  bool exposesReturnsTwice = false;
+
+  /// \brief True if this function calls itself.
+  bool isRecursive = false;
+
+  /// \brief True if this function cannot be duplicated.
+  ///
+  /// True if this function contains one or more indirect branches, or it contains
+  /// one or more 'noduplicate' instructions.
+  bool notDuplicatable = false;
+
+  /// \brief True if this function contains a call to a convergent function.
+  bool convergent = false;
+
+  /// \brief True if this function calls alloca (in the C sense).
+  bool usesDynamicAlloca = false;
+
+  /// \brief Number of instructions in the analyzed blocks.
+  unsigned NumInsts = false;
+
+  /// \brief Number of analyzed blocks.
+  unsigned NumBlocks = false;
+
+  /// \brief Keeps track of basic block code size estimates.
+  DenseMap<const BasicBlock *, unsigned> NumBBInsts;
+
+  /// \brief Keep track of the number of calls to 'big' functions.
+  unsigned NumCalls = false;
+
+  /// \brief The number of calls to internal functions with a single caller.
+  ///
+  /// These are likely targets for future inlining, likely exposed by
+  /// interleaved devirtualization.
+  unsigned NumInlineCandidates = 0;
+
+  /// \brief How many instructions produce vector values.
+  ///
+  /// The inliner is more aggressive with inlining vector kernels.
+  unsigned NumVectorInsts = 0;
+
+  /// \brief How many 'ret' instructions the blocks contain.
+  unsigned NumRets = 0;
+
+  /// \brief Add information about a block to the current state.
+  void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
+                         const SmallPtrSetImpl<const Value*> &EphValues);
+
+  /// \brief Collect a loop's ephemeral values (those used only by an assume
+  /// or similar intrinsics in the loop).
+  static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
+                                     SmallPtrSetImpl<const Value *> &EphValues);
+
+  /// \brief Collect a functions's ephemeral values (those used only by an
+  /// assume or similar intrinsics in the function).
+  static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
+                                     SmallPtrSetImpl<const Value *> &EphValues);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h b/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h
new file mode 100644
index 0000000..354b557
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h
@@ -0,0 +1,161 @@
+//===-- ConstantFolding.h - Fold instructions into constants ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares routines for folding instructions into constants when all
+// operands are constants, for example "sub i32 1, 0" -> "1".
+//
+// Also, to supplement the basic VMCore ConstantExpr simplifications,
+// this file declares some additional folding routines that can make use of
+// DataLayout information. These functions cannot go in VMCore due to library
+// dependency issues.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
+#define LLVM_ANALYSIS_CONSTANTFOLDING_H
+
+namespace llvm {
+class APInt;
+template <typename T> class ArrayRef;
+class CallSite;
+class Constant;
+class ConstantExpr;
+class ConstantVector;
+class DataLayout;
+class Function;
+class GlobalValue;
+class Instruction;
+class ImmutableCallSite;
+class TargetLibraryInfo;
+class Type;
+
+/// If this constant is a constant offset from a global, return the global and
+/// the constant. Because of constantexprs, this function is recursive.
+bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
+                                const DataLayout &DL);
+
+/// ConstantFoldInstruction - Try to constant fold the specified instruction.
+/// If successful, the constant result is returned, if not, null is returned.
+/// Note that this fails if not all of the operands are constant.  Otherwise,
+/// this function can only fail when attempting to fold instructions like loads
+/// and stores, which have no constant expression form.
+Constant *ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
+                                  const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldConstant - Attempt to fold the constant using the
+/// specified DataLayout.
+/// If successful, the constant result is returned, if not, null is returned.
+Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
+                               const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
+/// specified operands.  If successful, the constant result is returned, if not,
+/// null is returned.  Note that this function can fail when attempting to
+/// fold instructions like loads and stores, which have no constant expression
+/// form.
+///
+Constant *ConstantFoldInstOperands(Instruction *I, ArrayRef<Constant *> Ops,
+                                   const DataLayout &DL,
+                                   const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
+/// instruction (icmp/fcmp) with the specified operands.  If it fails, it
+/// returns a constant expression of the specified operands.
+///
+Constant *
+ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS,
+                                Constant *RHS, const DataLayout &DL,
+                                const TargetLibraryInfo *TLI = nullptr);
+
+/// \brief Attempt to constant fold a binary operation with the specified
+/// operands.  If it fails, it returns a constant expression of the specified
+/// operands.
+Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
+                                       Constant *RHS, const DataLayout &DL);
+
+/// \brief Attempt to constant fold a select instruction with the specified
+/// operands. The constant result is returned if successful; if not, null is
+/// returned.
+Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
+                                        Constant *V2);
+
+/// \brief Attempt to constant fold a cast with the specified operand.  If it
+/// fails, it returns a constant expression of the specified operand.
+Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
+                                  const DataLayout &DL);
+
+/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
+/// instruction with the specified operands and indices.  The constant result is
+/// returned if successful; if not, null is returned.
+Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
+                                             ArrayRef<unsigned> Idxs);
+
+/// \brief Attempt to constant fold an extractvalue instruction with the
+/// specified operands and indices.  The constant result is returned if
+/// successful; if not, null is returned.
+Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
+                                              ArrayRef<unsigned> Idxs);
+
+/// \brief Attempt to constant fold an insertelement instruction with the
+/// specified operands and indices.  The constant result is returned if
+/// successful; if not, null is returned.
+Constant *ConstantFoldInsertElementInstruction(Constant *Val,
+                                               Constant *Elt,
+                                               Constant *Idx);
+
+/// \brief Attempt to constant fold an extractelement instruction with the
+/// specified operands and indices.  The constant result is returned if
+/// successful; if not, null is returned.
+Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
+
+/// \brief Attempt to constant fold a shufflevector instruction with the
+/// specified operands and indices.  The constant result is returned if
+/// successful; if not, null is returned.
+Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
+                                               Constant *Mask);
+
+/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
+/// produce if it is constant and determinable.  If this is not determinable,
+/// return null.
+Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL);
+
+/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
+/// getelementptr constantexpr, return the constant value being addressed by the
+/// constant expression, or null if something is funny and we can't decide.
+Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE);
+
+/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
+/// indices (with an *implied* zero pointer index that is not in the list),
+/// return the constant value being addressed by a virtual load, or null if
+/// something is funny and we can't decide.
+Constant *ConstantFoldLoadThroughGEPIndices(Constant *C,
+                                            ArrayRef<Constant *> Indices);
+
+/// canConstantFoldCallTo - Return true if its even possible to fold a call to
+/// the specified function.
+bool canConstantFoldCallTo(ImmutableCallSite CS, const Function *F);
+
+/// ConstantFoldCall - Attempt to constant fold a call to the specified function
+/// with the specified arguments, returning null if unsuccessful.
+Constant *ConstantFoldCall(ImmutableCallSite CS, Function *F,
+                           ArrayRef<Constant *> Operands,
+                           const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldLoadThroughBitcast - try to cast constant to destination type
+/// returning null if unsuccessful. Can cast pointer to pointer or pointer to
+/// integer and vice versa if their sizes are equal.
+Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
+                                         const DataLayout &DL);
+
+/// \brief Check whether the given call has no side-effects.
+/// Specifically checks for math routimes which sometimes set errno.
+bool isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h b/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h
new file mode 100644
index 0000000..39f9c39
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -0,0 +1,189 @@
+//===-- DOTGraphTraitsPass.h - Print/View dotty graphs-----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Templates to create dotty viewer and printer passes for GraphTraits graphs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
+#define LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
+
+#include "llvm/Analysis/CFGPrinter.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/FileSystem.h"
+
+namespace llvm {
+
+/// \brief Default traits class for extracting a graph from an analysis pass.
+///
+/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
+template <typename AnalysisT, typename GraphT = AnalysisT *>
+struct DefaultAnalysisGraphTraits {
+  static GraphT getGraph(AnalysisT *A) { return A; }
+};
+
+template <
+    typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+    typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT, GraphT> >
+class DOTGraphTraitsViewer : public FunctionPass {
+public:
+  DOTGraphTraitsViewer(StringRef GraphName, char &ID)
+      : FunctionPass(ID), Name(GraphName) {}
+
+  /// @brief Return true if this function should be processed.
+  ///
+  /// An implementation of this class my override this function to indicate that
+  /// only certain functions should be viewed.
+  ///
+  /// @param Analysis The current analysis result for this function.
+  virtual bool processFunction(Function &F, AnalysisT &Analysis) {
+    return true;
+  }
+
+  bool runOnFunction(Function &F) override {
+    auto &Analysis = getAnalysis<AnalysisT>();
+
+    if (!processFunction(F, Analysis))
+      return false;
+
+    GraphT Graph = AnalysisGraphTraitsT::getGraph(&Analysis);
+    std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
+    std::string Title = GraphName + " for '" + F.getName().str() + "' function";
+
+    ViewGraph(Graph, Name, IsSimple, Title);
+
+    return false;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<AnalysisT>();
+  }
+
+private:
+  std::string Name;
+};
+
+template <
+    typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+    typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT, GraphT> >
+class DOTGraphTraitsPrinter : public FunctionPass {
+public:
+  DOTGraphTraitsPrinter(StringRef GraphName, char &ID)
+      : FunctionPass(ID), Name(GraphName) {}
+
+  /// @brief Return true if this function should be processed.
+  ///
+  /// An implementation of this class my override this function to indicate that
+  /// only certain functions should be printed.
+  ///
+  /// @param Analysis The current analysis result for this function.
+  virtual bool processFunction(Function &F, AnalysisT &Analysis) {
+    return true;
+  }
+
+  bool runOnFunction(Function &F) override {
+    auto &Analysis = getAnalysis<AnalysisT>();
+
+    if (!processFunction(F, Analysis))
+      return false;
+
+    GraphT Graph = AnalysisGraphTraitsT::getGraph(&Analysis);
+    std::string Filename = Name + "." + F.getName().str() + ".dot";
+    std::error_code EC;
+
+    errs() << "Writing '" << Filename << "'...";
+
+    raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+    std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
+    std::string Title = GraphName + " for '" + F.getName().str() + "' function";
+
+    if (!EC)
+      WriteGraph(File, Graph, IsSimple, Title);
+    else
+      errs() << "  error opening file for writing!";
+    errs() << "\n";
+
+    return false;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<AnalysisT>();
+  }
+
+private:
+  std::string Name;
+};
+
+template <
+    typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+    typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT, GraphT> >
+class DOTGraphTraitsModuleViewer : public ModulePass {
+public:
+  DOTGraphTraitsModuleViewer(StringRef GraphName, char &ID)
+      : ModulePass(ID), Name(GraphName) {}
+
+  bool runOnModule(Module &M) override {
+    GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
+    std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);
+
+    ViewGraph(Graph, Name, IsSimple, Title);
+
+    return false;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<AnalysisT>();
+  }
+
+private:
+  std::string Name;
+};
+
+template <
+    typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+    typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT, GraphT> >
+class DOTGraphTraitsModulePrinter : public ModulePass {
+public:
+  DOTGraphTraitsModulePrinter(StringRef GraphName, char &ID)
+      : ModulePass(ID), Name(GraphName) {}
+
+  bool runOnModule(Module &M) override {
+    GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
+    std::string Filename = Name + ".dot";
+    std::error_code EC;
+
+    errs() << "Writing '" << Filename << "'...";
+
+    raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+    std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);
+
+    if (!EC)
+      WriteGraph(File, Graph, IsSimple, Title);
+    else
+      errs() << "  error opening file for writing!";
+    errs() << "\n";
+
+    return false;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<AnalysisT>();
+  }
+
+private:
+  std::string Name;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/DemandedBits.h b/linux-x64/clang/include/llvm/Analysis/DemandedBits.h
new file mode 100644
index 0000000..ab86682
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DemandedBits.h
@@ -0,0 +1,122 @@
+//===- llvm/Analysis/DemandedBits.h - Determine demanded bits ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements a demanded bits analysis. A demanded bit is one that
+// contributes to a result; bits that are not demanded can be either zero or
+// one without affecting control or data flow. For example in this sequence:
+//
+//   %1 = add i32 %x, %y
+//   %2 = trunc i32 %1 to i16
+//
+// Only the lowest 16 bits of %1 are demanded; the rest are removed by the
+// trunc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DEMANDED_BITS_H
+#define LLVM_ANALYSIS_DEMANDED_BITS_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Function;
+class Instruction;
+struct KnownBits;
+class raw_ostream;
+
+class DemandedBits {
+public:
+  DemandedBits(Function &F, AssumptionCache &AC, DominatorTree &DT) :
+    F(F), AC(AC), DT(DT) {}
+
+  /// Return the bits demanded from instruction I.
+  APInt getDemandedBits(Instruction *I);
+
+  /// Return true if, during analysis, I could not be reached.
+  bool isInstructionDead(Instruction *I);
+
+  void print(raw_ostream &OS);
+
+private:
+  void performAnalysis();
+  void determineLiveOperandBits(const Instruction *UserI,
+    const Instruction *I, unsigned OperandNo,
+    const APInt &AOut, APInt &AB,
+    KnownBits &Known, KnownBits &Known2);
+
+  Function &F;
+  AssumptionCache &AC;
+  DominatorTree &DT;
+
+  bool Analyzed = false;
+
+  // The set of visited instructions (non-integer-typed only).
+  SmallPtrSet<Instruction*, 32> Visited;
+  DenseMap<Instruction *, APInt> AliveBits;
+};
+
+class DemandedBitsWrapperPass : public FunctionPass {
+private:
+  mutable Optional<DemandedBits> DB;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  DemandedBitsWrapperPass();
+
+  bool runOnFunction(Function &F) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// Clean up memory in between runs
+  void releaseMemory() override;
+
+  DemandedBits &getDemandedBits() { return *DB; }
+
+  void print(raw_ostream &OS, const Module *M) const override;
+};
+
+/// An analysis that produces \c DemandedBits for a function.
+class DemandedBitsAnalysis : public AnalysisInfoMixin<DemandedBitsAnalysis> {
+  friend AnalysisInfoMixin<DemandedBitsAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result type for this analysis pass.
+  using Result = DemandedBits;
+
+  /// \brief Run the analysis pass over a function and produce demanded bits
+  /// information.
+  DemandedBits run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for DemandedBits
+class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit DemandedBitsPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Create a demanded bits analysis pass.
+FunctionPass *createDemandedBitsWrapperPass();
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_DEMANDED_BITS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h b/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h
new file mode 100644
index 0000000..90f33b8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h
@@ -0,0 +1,953 @@
+//===-- llvm/Analysis/DependenceAnalysis.h -------------------- -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DependenceAnalysis is an LLVM pass that analyses dependences between memory
+// accesses. Currently, it is an implementation of the approach described in
+//
+//            Practical Dependence Testing
+//            Goff, Kennedy, Tseng
+//            PLDI 1991
+//
+// There's a single entry point that analyzes the dependence between a pair
+// of memory references in a function, returning either NULL, for no dependence,
+// or a more-or-less detailed description of the dependence between them.
+//
+// This pass exists to support the DependenceGraph pass. There are two separate
+// passes because there's a useful separation of concerns. A dependence exists
+// if two conditions are met:
+//
+//    1) Two instructions reference the same memory location, and
+//    2) There is a flow of control leading from one instruction to the other.
+//
+// DependenceAnalysis attacks the first condition; DependenceGraph will attack
+// the second (it's not yet ready).
+//
+// Please note that this is work in progress and the interface is subject to
+// change.
+//
+// Plausible changes:
+//    Return a set of more precise dependences instead of just one dependence
+//    summarizing all.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
+#define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
+
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+template <typename T> class ArrayRef;
+  class Loop;
+  class LoopInfo;
+  class ScalarEvolution;
+  class SCEV;
+  class SCEVConstant;
+  class raw_ostream;
+
+  /// Dependence - This class represents a dependence between two memory
+  /// memory references in a function. It contains minimal information and
+  /// is used in the very common situation where the compiler is unable to
+  /// determine anything beyond the existence of a dependence; that is, it
+  /// represents a confused dependence (see also FullDependence). In most
+  /// cases (for output, flow, and anti dependences), the dependence implies
+  /// an ordering, where the source must precede the destination; in contrast,
+  /// input dependences are unordered.
+  ///
+  /// When a dependence graph is built, each Dependence will be a member of
+  /// the set of predecessor edges for its destination instruction and a set
+  /// if successor edges for its source instruction. These sets are represented
+  /// as singly-linked lists, with the "next" fields stored in the dependence
+  /// itelf.
+  class Dependence {
+  protected:
+    Dependence(Dependence &&) = default;
+    Dependence &operator=(Dependence &&) = default;
+
+  public:
+    Dependence(Instruction *Source,
+               Instruction *Destination) :
+      Src(Source),
+      Dst(Destination),
+      NextPredecessor(nullptr),
+      NextSuccessor(nullptr) {}
+    virtual ~Dependence() {}
+
+    /// Dependence::DVEntry - Each level in the distance/direction vector
+    /// has a direction (or perhaps a union of several directions), and
+    /// perhaps a distance.
+    struct DVEntry {
+      enum { NONE = 0,
+             LT = 1,
+             EQ = 2,
+             LE = 3,
+             GT = 4,
+             NE = 5,
+             GE = 6,
+             ALL = 7 };
+      unsigned char Direction : 3; // Init to ALL, then refine.
+      bool Scalar    : 1; // Init to true.
+      bool PeelFirst : 1; // Peeling the first iteration will break dependence.
+      bool PeelLast  : 1; // Peeling the last iteration will break the dependence.
+      bool Splitable : 1; // Splitting the loop will break dependence.
+      const SCEV *Distance; // NULL implies no distance available.
+      DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
+                  PeelLast(false), Splitable(false), Distance(nullptr) { }
+    };
+
+    /// getSrc - Returns the source instruction for this dependence.
+    ///
+    Instruction *getSrc() const { return Src; }
+
+    /// getDst - Returns the destination instruction for this dependence.
+    ///
+    Instruction *getDst() const { return Dst; }
+
+    /// isInput - Returns true if this is an input dependence.
+    ///
+    bool isInput() const;
+
+    /// isOutput - Returns true if this is an output dependence.
+    ///
+    bool isOutput() const;
+
+    /// isFlow - Returns true if this is a flow (aka true) dependence.
+    ///
+    bool isFlow() const;
+
+    /// isAnti - Returns true if this is an anti dependence.
+    ///
+    bool isAnti() const;
+
+    /// isOrdered - Returns true if dependence is Output, Flow, or Anti
+    ///
+    bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }
+
+    /// isUnordered - Returns true if dependence is Input
+    ///
+    bool isUnordered() const { return isInput(); }
+
+    /// isLoopIndependent - Returns true if this is a loop-independent
+    /// dependence.
+    virtual bool isLoopIndependent() const { return true; }
+
+    /// isConfused - Returns true if this dependence is confused
+    /// (the compiler understands nothing and makes worst-case
+    /// assumptions).
+    virtual bool isConfused() const { return true; }
+
+    /// isConsistent - Returns true if this dependence is consistent
+    /// (occurs every time the source and destination are executed).
+    virtual bool isConsistent() const { return false; }
+
+    /// getLevels - Returns the number of common loops surrounding the
+    /// source and destination of the dependence.
+    virtual unsigned getLevels() const { return 0; }
+
+    /// getDirection - Returns the direction associated with a particular
+    /// level.
+    virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }
+
+    /// getDistance - Returns the distance (or NULL) associated with a
+    /// particular level.
+    virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
+
+    /// isPeelFirst - Returns true if peeling the first iteration from
+    /// this loop will break this dependence.
+    virtual bool isPeelFirst(unsigned Level) const { return false; }
+
+    /// isPeelLast - Returns true if peeling the last iteration from
+    /// this loop will break this dependence.
+    virtual bool isPeelLast(unsigned Level) const { return false; }
+
+    /// isSplitable - Returns true if splitting this loop will break
+    /// the dependence.
+    virtual bool isSplitable(unsigned Level) const { return false; }
+
+    /// isScalar - Returns true if a particular level is scalar; that is,
+    /// if no subscript in the source or destination mention the induction
+    /// variable associated with the loop at this level.
+    virtual bool isScalar(unsigned Level) const;
+
+    /// getNextPredecessor - Returns the value of the NextPredecessor
+    /// field.
+    const Dependence *getNextPredecessor() const { return NextPredecessor; }
+
+    /// getNextSuccessor - Returns the value of the NextSuccessor
+    /// field.
+    const Dependence *getNextSuccessor() const { return NextSuccessor; }
+
+    /// setNextPredecessor - Sets the value of the NextPredecessor
+    /// field.
+    void setNextPredecessor(const Dependence *pred) { NextPredecessor = pred; }
+
+    /// setNextSuccessor - Sets the value of the NextSuccessor
+    /// field.
+    void setNextSuccessor(const Dependence *succ) { NextSuccessor = succ; }
+
+    /// dump - For debugging purposes, dumps a dependence to OS.
+    ///
+    void dump(raw_ostream &OS) const;
+
+  private:
+    Instruction *Src, *Dst;
+    const Dependence *NextPredecessor, *NextSuccessor;
+    friend class DependenceInfo;
+  };
+
+  /// FullDependence - This class represents a dependence between two memory
+  /// references in a function. It contains detailed information about the
+  /// dependence (direction vectors, etc.) and is used when the compiler is
+  /// able to accurately analyze the interaction of the references; that is,
+  /// it is not a confused dependence (see Dependence). In most cases
+  /// (for output, flow, and anti dependences), the dependence implies an
+  /// ordering, where the source must precede the destination; in contrast,
+  /// input dependences are unordered.
+  class FullDependence final : public Dependence {
+  public:
+    FullDependence(Instruction *Src, Instruction *Dst, bool LoopIndependent,
+                   unsigned Levels);
+
+    /// isLoopIndependent - Returns true if this is a loop-independent
+    /// dependence.
+    bool isLoopIndependent() const override { return LoopIndependent; }
+
+    /// isConfused - Returns true if this dependence is confused
+    /// (the compiler understands nothing and makes worst-case
+    /// assumptions).
+    bool isConfused() const override { return false; }
+
+    /// isConsistent - Returns true if this dependence is consistent
+    /// (occurs every time the source and destination are executed).
+    bool isConsistent() const override { return Consistent; }
+
+    /// getLevels - Returns the number of common loops surrounding the
+    /// source and destination of the dependence.
+    unsigned getLevels() const override { return Levels; }
+
+    /// getDirection - Returns the direction associated with a particular
+    /// level.
+    unsigned getDirection(unsigned Level) const override;
+
+    /// getDistance - Returns the distance (or NULL) associated with a
+    /// particular level.
+    const SCEV *getDistance(unsigned Level) const override;
+
+    /// isPeelFirst - Returns true if peeling the first iteration from
+    /// this loop will break this dependence.
+    bool isPeelFirst(unsigned Level) const override;
+
+    /// isPeelLast - Returns true if peeling the last iteration from
+    /// this loop will break this dependence.
+    bool isPeelLast(unsigned Level) const override;
+
+    /// isSplitable - Returns true if splitting the loop will break
+    /// the dependence.
+    bool isSplitable(unsigned Level) const override;
+
+    /// isScalar - Returns true if a particular level is scalar; that is,
+    /// if no subscript in the source or destination mention the induction
+    /// variable associated with the loop at this level.
+    bool isScalar(unsigned Level) const override;
+
+  private:
+    unsigned short Levels;
+    bool LoopIndependent;
+    bool Consistent; // Init to true, then refine.
+    std::unique_ptr<DVEntry[]> DV;
+    friend class DependenceInfo;
+  };
+
+  /// DependenceInfo - This class is the main dependence-analysis driver.
+  ///
+  class DependenceInfo {
+  public:
+    DependenceInfo(Function *F, AliasAnalysis *AA, ScalarEvolution *SE,
+                   LoopInfo *LI)
+        : AA(AA), SE(SE), LI(LI), F(F) {}
+
+    /// depends - Tests for a dependence between the Src and Dst instructions.
+    /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
+    /// FullDependence) with as much information as can be gleaned.
+    /// The flag PossiblyLoopIndependent should be set by the caller
+    /// if it appears that control flow can reach from Src to Dst
+    /// without traversing a loop back edge.
+    std::unique_ptr<Dependence> depends(Instruction *Src,
+                                        Instruction *Dst,
+                                        bool PossiblyLoopIndependent);
+
+    /// getSplitIteration - Give a dependence that's splittable at some
+    /// particular level, return the iteration that should be used to split
+    /// the loop.
+    ///
+    /// Generally, the dependence analyzer will be used to build
+    /// a dependence graph for a function (basically a map from instructions
+    /// to dependences). Looking for cycles in the graph shows us loops
+    /// that cannot be trivially vectorized/parallelized.
+    ///
+    /// We can try to improve the situation by examining all the dependences
+    /// that make up the cycle, looking for ones we can break.
+    /// Sometimes, peeling the first or last iteration of a loop will break
+    /// dependences, and there are flags for those possibilities.
+    /// Sometimes, splitting a loop at some other iteration will do the trick,
+    /// and we've got a flag for that case. Rather than waste the space to
+    /// record the exact iteration (since we rarely know), we provide
+    /// a method that calculates the iteration. It's a drag that it must work
+    /// from scratch, but wonderful in that it's possible.
+    ///
+    /// Here's an example:
+    ///
+    ///    for (i = 0; i < 10; i++)
+    ///        A[i] = ...
+    ///        ... = A[11 - i]
+    ///
+    /// There's a loop-carried flow dependence from the store to the load,
+    /// found by the weak-crossing SIV test. The dependence will have a flag,
+    /// indicating that the dependence can be broken by splitting the loop.
+    /// Calling getSplitIteration will return 5.
+    /// Splitting the loop breaks the dependence, like so:
+    ///
+    ///    for (i = 0; i <= 5; i++)
+    ///        A[i] = ...
+    ///        ... = A[11 - i]
+    ///    for (i = 6; i < 10; i++)
+    ///        A[i] = ...
+    ///        ... = A[11 - i]
+    ///
+    /// breaks the dependence and allows us to vectorize/parallelize
+    /// both loops.
+    const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
+
+    Function *getFunction() const { return F; }
+
+  private:
+    AliasAnalysis *AA;
+    ScalarEvolution *SE;
+    LoopInfo *LI;
+    Function *F;
+
+    /// Subscript - This private struct represents a pair of subscripts from
+    /// a pair of potentially multi-dimensional array references. We use a
+    /// vector of them to guide subscript partitioning.
+    struct Subscript {
+      const SCEV *Src;
+      const SCEV *Dst;
+      enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
+      SmallBitVector Loops;
+      SmallBitVector GroupLoops;
+      SmallBitVector Group;
+    };
+
+    struct CoefficientInfo {
+      const SCEV *Coeff;
+      const SCEV *PosPart;
+      const SCEV *NegPart;
+      const SCEV *Iterations;
+    };
+
+    struct BoundInfo {
+      const SCEV *Iterations;
+      const SCEV *Upper[8];
+      const SCEV *Lower[8];
+      unsigned char Direction;
+      unsigned char DirSet;
+    };
+
+    /// Constraint - This private class represents a constraint, as defined
+    /// in the paper
+    ///
+    ///           Practical Dependence Testing
+    ///           Goff, Kennedy, Tseng
+    ///           PLDI 1991
+    ///
+    /// There are 5 kinds of constraint, in a hierarchy.
+    ///   1) Any - indicates no constraint, any dependence is possible.
+    ///   2) Line - A line ax + by = c, where a, b, and c are parameters,
+    ///             representing the dependence equation.
+    ///   3) Distance - The value d of the dependence distance;
+    ///   4) Point - A point <x, y> representing the dependence from
+    ///              iteration x to iteration y.
+    ///   5) Empty - No dependence is possible.
+    class Constraint {
+    private:
+      enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
+      ScalarEvolution *SE;
+      const SCEV *A;
+      const SCEV *B;
+      const SCEV *C;
+      const Loop *AssociatedLoop;
+
+    public:
+      /// isEmpty - Return true if the constraint is of kind Empty.
+      bool isEmpty() const { return Kind == Empty; }
+
+      /// isPoint - Return true if the constraint is of kind Point.
+      bool isPoint() const { return Kind == Point; }
+
+      /// isDistance - Return true if the constraint is of kind Distance.
+      bool isDistance() const { return Kind == Distance; }
+
+      /// isLine - Return true if the constraint is of kind Line.
+      /// Since Distance's can also be represented as Lines, we also return
+      /// true if the constraint is of kind Distance.
+      bool isLine() const { return Kind == Line || Kind == Distance; }
+
+      /// isAny - Return true if the constraint is of kind Any;
+      bool isAny() const { return Kind == Any; }
+
+      /// getX - If constraint is a point <X, Y>, returns X.
+      /// Otherwise assert.
+      const SCEV *getX() const;
+
+      /// getY - If constraint is a point <X, Y>, returns Y.
+      /// Otherwise assert.
+      const SCEV *getY() const;
+
+      /// getA - If constraint is a line AX + BY = C, returns A.
+      /// Otherwise assert.
+      const SCEV *getA() const;
+
+      /// getB - If constraint is a line AX + BY = C, returns B.
+      /// Otherwise assert.
+      const SCEV *getB() const;
+
+      /// getC - If constraint is a line AX + BY = C, returns C.
+      /// Otherwise assert.
+      const SCEV *getC() const;
+
+      /// getD - If constraint is a distance, returns D.
+      /// Otherwise assert.
+      const SCEV *getD() const;
+
+      /// getAssociatedLoop - Returns the loop associated with this constraint.
+      const Loop *getAssociatedLoop() const;
+
+      /// setPoint - Change a constraint to Point.
+      void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
+
+      /// setLine - Change a constraint to Line.
+      void setLine(const SCEV *A, const SCEV *B,
+                   const SCEV *C, const Loop *CurrentLoop);
+
+      /// setDistance - Change a constraint to Distance.
+      void setDistance(const SCEV *D, const Loop *CurrentLoop);
+
+      /// setEmpty - Change a constraint to Empty.
+      void setEmpty();
+
+      /// setAny - Change a constraint to Any.
+      void setAny(ScalarEvolution *SE);
+
+      /// dump - For debugging purposes. Dumps the constraint
+      /// out to OS.
+      void dump(raw_ostream &OS) const;
+    };
+
+    /// establishNestingLevels - Examines the loop nesting of the Src and Dst
+    /// instructions and establishes their shared loops. Sets the variables
+    /// CommonLevels, SrcLevels, and MaxLevels.
+    /// The source and destination instructions needn't be contained in the same
+    /// loop. The routine establishNestingLevels finds the level of most deeply
+    /// nested loop that contains them both, CommonLevels. An instruction that's
+    /// not contained in a loop is at level = 0. MaxLevels is equal to the level
+    /// of the source plus the level of the destination, minus CommonLevels.
+    /// This lets us allocate vectors MaxLevels in length, with room for every
+    /// distinct loop referenced in both the source and destination subscripts.
+    /// The variable SrcLevels is the nesting depth of the source instruction.
+    /// It's used to help calculate distinct loops referenced by the destination.
+    /// Here's the map from loops to levels:
+    ///            0 - unused
+    ///            1 - outermost common loop
+    ///          ... - other common loops
+    /// CommonLevels - innermost common loop
+    ///          ... - loops containing Src but not Dst
+    ///    SrcLevels - innermost loop containing Src but not Dst
+    ///          ... - loops containing Dst but not Src
+    ///    MaxLevels - innermost loop containing Dst but not Src
+    /// Consider the follow code fragment:
+    ///    for (a = ...) {
+    ///      for (b = ...) {
+    ///        for (c = ...) {
+    ///          for (d = ...) {
+    ///            A[] = ...;
+    ///          }
+    ///        }
+    ///        for (e = ...) {
+    ///          for (f = ...) {
+    ///            for (g = ...) {
+    ///              ... = A[];
+    ///            }
+    ///          }
+    ///        }
+    ///      }
+    ///    }
+    /// If we're looking at the possibility of a dependence between the store
+    /// to A (the Src) and the load from A (the Dst), we'll note that they
+    /// have 2 loops in common, so CommonLevels will equal 2 and the direction
+    /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
+    /// A map from loop names to level indices would look like
+    ///     a - 1
+    ///     b - 2 = CommonLevels
+    ///     c - 3
+    ///     d - 4 = SrcLevels
+    ///     e - 5
+    ///     f - 6
+    ///     g - 7 = MaxLevels
+    void establishNestingLevels(const Instruction *Src,
+                                const Instruction *Dst);
+
+    unsigned CommonLevels, SrcLevels, MaxLevels;
+
+    /// mapSrcLoop - Given one of the loops containing the source, return
+    /// its level index in our numbering scheme.
+    unsigned mapSrcLoop(const Loop *SrcLoop) const;
+
+    /// mapDstLoop - Given one of the loops containing the destination,
+    /// return its level index in our numbering scheme.
+    unsigned mapDstLoop(const Loop *DstLoop) const;
+
+    /// isLoopInvariant - Returns true if Expression is loop invariant
+    /// in LoopNest.
+    bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;
+
+    /// Makes sure all subscript pairs share the same integer type by
+    /// sign-extending as necessary.
+    /// Sign-extending a subscript is safe because getelementptr assumes the
+    /// array subscripts are signed.
+    void unifySubscriptType(ArrayRef<Subscript *> Pairs);
+
+    /// removeMatchingExtensions - Examines a subscript pair.
+    /// If the source and destination are identically sign (or zero)
+    /// extended, it strips off the extension in an effort to
+    /// simplify the actual analysis.
+    void removeMatchingExtensions(Subscript *Pair);
+
+    /// collectCommonLoops - Finds the set of loops from the LoopNest that
+    /// have a level <= CommonLevels and are referred to by the SCEV Expression.
+    void collectCommonLoops(const SCEV *Expression,
+                            const Loop *LoopNest,
+                            SmallBitVector &Loops) const;
+
+    /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
+    /// linear. Collect the set of loops mentioned by Src.
+    bool checkSrcSubscript(const SCEV *Src,
+                           const Loop *LoopNest,
+                           SmallBitVector &Loops);
+
+    /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
+    /// linear. Collect the set of loops mentioned by Dst.
+    bool checkDstSubscript(const SCEV *Dst,
+                           const Loop *LoopNest,
+                           SmallBitVector &Loops);
+
+    /// isKnownPredicate - Compare X and Y using the predicate Pred.
+    /// Basically a wrapper for SCEV::isKnownPredicate,
+    /// but tries harder, especially in the presence of sign and zero
+    /// extensions and symbolics.
+    bool isKnownPredicate(ICmpInst::Predicate Pred,
+                          const SCEV *X,
+                          const SCEV *Y) const;
+
+    /// collectUpperBound - All subscripts are the same type (on my machine,
+    /// an i64). The loop bound may be a smaller type. collectUpperBound
+    /// find the bound, if available, and zero extends it to the Type T.
+    /// (I zero extend since the bound should always be >= 0.)
+    /// If no upper bound is available, return NULL.
+    const SCEV *collectUpperBound(const Loop *l, Type *T) const;
+
+    /// collectConstantUpperBound - Calls collectUpperBound(), then
+    /// attempts to cast it to SCEVConstant. If the cast fails,
+    /// returns NULL.
+    const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;
+
+    /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
+    /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
+    /// Collects the associated loops in a set.
+    Subscript::ClassificationKind classifyPair(const SCEV *Src,
+                                           const Loop *SrcLoopNest,
+                                           const SCEV *Dst,
+                                           const Loop *DstLoopNest,
+                                           SmallBitVector &Loops);
+
+    /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// If the dependence isn't proven to exist,
+    /// marks the Result as inconsistent.
+    bool testZIV(const SCEV *Src,
+                 const SCEV *Dst,
+                 FullDependence &Result) const;
+
+    /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
+    /// Things of the form [c1 + a1*i] and [c2 + a2*j], where
+    /// i and j are induction variables, c1 and c2 are loop invariant,
+    /// and a1 and a2 are constant.
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Sets appropriate direction vector entry and, when possible,
+    /// the distance vector entry.
+    /// If the dependence isn't proven to exist,
+    /// marks the Result as inconsistent.
+    bool testSIV(const SCEV *Src,
+                 const SCEV *Dst,
+                 unsigned &Level,
+                 FullDependence &Result,
+                 Constraint &NewConstraint,
+                 const SCEV *&SplitIter) const;
+
+    /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
+    /// Things of the form [c1 + a1*i] and [c2 + a2*j]
+    /// where i and j are induction variables, c1 and c2 are loop invariant,
+    /// and a1 and a2 are constant.
+    /// With minor algebra, this test can also be used for things like
+    /// [c1 + a1*i + a2*j][c2].
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Marks the Result as inconsistent.
+    bool testRDIV(const SCEV *Src,
+                  const SCEV *Dst,
+                  FullDependence &Result) const;
+
+    /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
+    /// Returns true if dependence disproved.
+    /// Can sometimes refine direction vectors.
+    bool testMIV(const SCEV *Src,
+                 const SCEV *Dst,
+                 const SmallBitVector &Loops,
+                 FullDependence &Result) const;
+
+    /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
+    /// for dependence.
+    /// Things of the form [c1 + a*i] and [c2 + a*i],
+    /// where i is an induction variable, c1 and c2 are loop invariant,
+    /// and a is a constant
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Sets appropriate direction and distance.
+    bool strongSIVtest(const SCEV *Coeff,
+                       const SCEV *SrcConst,
+                       const SCEV *DstConst,
+                       const Loop *CurrentLoop,
+                       unsigned Level,
+                       FullDependence &Result,
+                       Constraint &NewConstraint) const;
+
+    /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
+    /// (Src and Dst) for dependence.
+    /// Things of the form [c1 + a*i] and [c2 - a*i],
+    /// where i is an induction variable, c1 and c2 are loop invariant,
+    /// and a is a constant.
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Sets appropriate direction entry.
+    /// Set consistent to false.
+    /// Marks the dependence as splitable.
+    bool weakCrossingSIVtest(const SCEV *SrcCoeff,
+                             const SCEV *SrcConst,
+                             const SCEV *DstConst,
+                             const Loop *CurrentLoop,
+                             unsigned Level,
+                             FullDependence &Result,
+                             Constraint &NewConstraint,
+                             const SCEV *&SplitIter) const;
+
+    /// ExactSIVtest - Tests the SIV subscript pair
+    /// (Src and Dst) for dependence.
+    /// Things of the form [c1 + a1*i] and [c2 + a2*i],
+    /// where i is an induction variable, c1 and c2 are loop invariant,
+    /// and a1 and a2 are constant.
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Sets appropriate direction entry.
+    /// Set consistent to false.
+    bool exactSIVtest(const SCEV *SrcCoeff,
+                      const SCEV *DstCoeff,
+                      const SCEV *SrcConst,
+                      const SCEV *DstConst,
+                      const Loop *CurrentLoop,
+                      unsigned Level,
+                      FullDependence &Result,
+                      Constraint &NewConstraint) const;
+
+    /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
+    /// (Src and Dst) for dependence.
+    /// Things of the form [c1] and [c2 + a*i],
+    /// where i is an induction variable, c1 and c2 are loop invariant,
+    /// and a is a constant. See also weakZeroDstSIVtest.
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Sets appropriate direction entry.
+    /// Set consistent to false.
+    /// If loop peeling will break the dependence, mark appropriately.
+    bool weakZeroSrcSIVtest(const SCEV *DstCoeff,
+                            const SCEV *SrcConst,
+                            const SCEV *DstConst,
+                            const Loop *CurrentLoop,
+                            unsigned Level,
+                            FullDependence &Result,
+                            Constraint &NewConstraint) const;
+
+    /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
+    /// (Src and Dst) for dependence.
+    /// Things of the form [c1 + a*i] and [c2],
+    /// where i is an induction variable, c1 and c2 are loop invariant,
+    /// and a is a constant. See also weakZeroSrcSIVtest.
+    /// Returns true if any possible dependence is disproved.
+    /// If there might be a dependence, returns false.
+    /// Sets appropriate direction entry.
+    /// Set consistent to false.
+    /// If loop peeling will break the dependence, mark appropriately.
+    bool weakZeroDstSIVtest(const SCEV *SrcCoeff,
+                            const SCEV *SrcConst,
+                            const SCEV *DstConst,
+                            const Loop *CurrentLoop,
+                            unsigned Level,
+                            FullDependence &Result,
+                            Constraint &NewConstraint) const;
+
+    /// exactRDIVtest - Tests the RDIV subscript pair for dependence.
+    /// Things of the form [c1 + a*i] and [c2 + b*j],
+    /// where i and j are induction variable, c1 and c2 are loop invariant,
+    /// and a and b are constants.
+    /// Returns true if any possible dependence is disproved.
+    /// Marks the result as inconsistent.
+    /// Works in some cases that symbolicRDIVtest doesn't,
+    /// and vice versa.
+    bool exactRDIVtest(const SCEV *SrcCoeff,
+                       const SCEV *DstCoeff,
+                       const SCEV *SrcConst,
+                       const SCEV *DstConst,
+                       const Loop *SrcLoop,
+                       const Loop *DstLoop,
+                       FullDependence &Result) const;
+
+    /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
+    /// Things of the form [c1 + a*i] and [c2 + b*j],
+    /// where i and j are induction variable, c1 and c2 are loop invariant,
+    /// and a and b are constants.
+    /// Returns true if any possible dependence is disproved.
+    /// Marks the result as inconsistent.
+    /// Works in some cases that exactRDIVtest doesn't,
+    /// and vice versa. Can also be used as a backup for
+    /// ordinary SIV tests.
+    bool symbolicRDIVtest(const SCEV *SrcCoeff,
+                          const SCEV *DstCoeff,
+                          const SCEV *SrcConst,
+                          const SCEV *DstConst,
+                          const Loop *SrcLoop,
+                          const Loop *DstLoop) const;
+
+    /// gcdMIVtest - Tests an MIV subscript pair for dependence.
+    /// Returns true if any possible dependence is disproved.
+    /// Marks the result as inconsistent.
+    /// Can sometimes disprove the equal direction for 1 or more loops.
+    //  Can handle some symbolics that even the SIV tests don't get,
+    /// so we use it as a backup for everything.
+    bool gcdMIVtest(const SCEV *Src,
+                    const SCEV *Dst,
+                    FullDependence &Result) const;
+
+    /// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
+    /// Returns true if any possible dependence is disproved.
+    /// Marks the result as inconsistent.
+    /// Computes directions.
+    bool banerjeeMIVtest(const SCEV *Src,
+                         const SCEV *Dst,
+                         const SmallBitVector &Loops,
+                         FullDependence &Result) const;
+
+    /// collectCoefficientInfo - Walks through the subscript,
+    /// collecting each coefficient, the associated loop bounds,
+    /// and recording its positive and negative parts for later use.
+    CoefficientInfo *collectCoeffInfo(const SCEV *Subscript,
+                                      bool SrcFlag,
+                                      const SCEV *&Constant) const;
+
+    /// getPositivePart - X^+ = max(X, 0).
+    ///
+    const SCEV *getPositivePart(const SCEV *X) const;
+
+    /// getNegativePart - X^- = min(X, 0).
+    ///
+    const SCEV *getNegativePart(const SCEV *X) const;
+
+    /// getLowerBound - Looks through all the bounds info and
+    /// computes the lower bound given the current direction settings
+    /// at each level.
+    const SCEV *getLowerBound(BoundInfo *Bound) const;
+
+    /// getUpperBound - Looks through all the bounds info and
+    /// computes the upper bound given the current direction settings
+    /// at each level.
+    const SCEV *getUpperBound(BoundInfo *Bound) const;
+
+    /// exploreDirections - Hierarchically expands the direction vector
+    /// search space, combining the directions of discovered dependences
+    /// in the DirSet field of Bound. Returns the number of distinct
+    /// dependences discovered. If the dependence is disproved,
+    /// it will return 0.
+    unsigned exploreDirections(unsigned Level,
+                               CoefficientInfo *A,
+                               CoefficientInfo *B,
+                               BoundInfo *Bound,
+                               const SmallBitVector &Loops,
+                               unsigned &DepthExpanded,
+                               const SCEV *Delta) const;
+
+    /// testBounds - Returns true iff the current bounds are plausible.
+    bool testBounds(unsigned char DirKind,
+                    unsigned Level,
+                    BoundInfo *Bound,
+                    const SCEV *Delta) const;
+
+    /// findBoundsALL - Computes the upper and lower bounds for level K
+    /// using the * direction. Records them in Bound.
+    void findBoundsALL(CoefficientInfo *A,
+                       CoefficientInfo *B,
+                       BoundInfo *Bound,
+                       unsigned K) const;
+
+    /// findBoundsLT - Computes the upper and lower bounds for level K
+    /// using the < direction. Records them in Bound.
+    void findBoundsLT(CoefficientInfo *A,
+                      CoefficientInfo *B,
+                      BoundInfo *Bound,
+                      unsigned K) const;
+
+    /// findBoundsGT - Computes the upper and lower bounds for level K
+    /// using the > direction. Records them in Bound.
+    void findBoundsGT(CoefficientInfo *A,
+                      CoefficientInfo *B,
+                      BoundInfo *Bound,
+                      unsigned K) const;
+
+    /// findBoundsEQ - Computes the upper and lower bounds for level K
+    /// using the = direction. Records them in Bound.
+    void findBoundsEQ(CoefficientInfo *A,
+                      CoefficientInfo *B,
+                      BoundInfo *Bound,
+                      unsigned K) const;
+
+    /// intersectConstraints - Updates X with the intersection
+    /// of the Constraints X and Y. Returns true if X has changed.
+    bool intersectConstraints(Constraint *X,
+                              const Constraint *Y);
+
+    /// propagate - Review the constraints, looking for opportunities
+    /// to simplify a subscript pair (Src and Dst).
+    /// Return true if some simplification occurs.
+    /// If the simplification isn't exact (that is, if it is conservative
+    /// in terms of dependence), set consistent to false.
+    bool propagate(const SCEV *&Src,
+                   const SCEV *&Dst,
+                   SmallBitVector &Loops,
+                   SmallVectorImpl<Constraint> &Constraints,
+                   bool &Consistent);
+
+    /// propagateDistance - Attempt to propagate a distance
+    /// constraint into a subscript pair (Src and Dst).
+    /// Return true if some simplification occurs.
+    /// If the simplification isn't exact (that is, if it is conservative
+    /// in terms of dependence), set consistent to false.
+    bool propagateDistance(const SCEV *&Src,
+                           const SCEV *&Dst,
+                           Constraint &CurConstraint,
+                           bool &Consistent);
+
+    /// propagatePoint - Attempt to propagate a point
+    /// constraint into a subscript pair (Src and Dst).
+    /// Return true if some simplification occurs.
+    bool propagatePoint(const SCEV *&Src,
+                        const SCEV *&Dst,
+                        Constraint &CurConstraint);
+
+    /// propagateLine - Attempt to propagate a line
+    /// constraint into a subscript pair (Src and Dst).
+    /// Return true if some simplification occurs.
+    /// If the simplification isn't exact (that is, if it is conservative
+    /// in terms of dependence), set consistent to false.
+    bool propagateLine(const SCEV *&Src,
+                       const SCEV *&Dst,
+                       Constraint &CurConstraint,
+                       bool &Consistent);
+
+    /// findCoefficient - Given a linear SCEV,
+    /// return the coefficient corresponding to specified loop.
+    /// If there isn't one, return the SCEV constant 0.
+    /// For example, given a*i + b*j + c*k, returning the coefficient
+    /// corresponding to the j loop would yield b.
+    const SCEV *findCoefficient(const SCEV *Expr,
+                                const Loop *TargetLoop) const;
+
+    /// zeroCoefficient - Given a linear SCEV,
+    /// return the SCEV given by zeroing out the coefficient
+    /// corresponding to the specified loop.
+    /// For example, given a*i + b*j + c*k, zeroing the coefficient
+    /// corresponding to the j loop would yield a*i + c*k.
+    const SCEV *zeroCoefficient(const SCEV *Expr,
+                                const Loop *TargetLoop) const;
+
+    /// addToCoefficient - Given a linear SCEV Expr,
+    /// return the SCEV given by adding some Value to the
+    /// coefficient corresponding to the specified TargetLoop.
+    /// For example, given a*i + b*j + c*k, adding 1 to the coefficient
+    /// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
+    const SCEV *addToCoefficient(const SCEV *Expr,
+                                 const Loop *TargetLoop,
+                                 const SCEV *Value)  const;
+
+    /// updateDirection - Update direction vector entry
+    /// based on the current constraint.
+    void updateDirection(Dependence::DVEntry &Level,
+                         const Constraint &CurConstraint) const;
+
+    bool tryDelinearize(Instruction *Src, Instruction *Dst,
+                        SmallVectorImpl<Subscript> &Pair);
+  }; // class DependenceInfo
+
+  /// \brief AnalysisPass to compute dependence information in a function
+  class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
+  public:
+    typedef DependenceInfo Result;
+    Result run(Function &F, FunctionAnalysisManager &FAM);
+
+  private:
+    static AnalysisKey Key;
+    friend struct AnalysisInfoMixin<DependenceAnalysis>;
+  }; // class DependenceAnalysis
+
+  /// \brief Legacy pass manager pass to access dependence information
+  class DependenceAnalysisWrapperPass : public FunctionPass {
+  public:
+    static char ID; // Class identification, replacement for typeinfo
+    DependenceAnalysisWrapperPass() : FunctionPass(ID) {
+      initializeDependenceAnalysisWrapperPassPass(
+          *PassRegistry::getPassRegistry());
+    }
+
+    bool runOnFunction(Function &F) override;
+    void releaseMemory() override;
+    void getAnalysisUsage(AnalysisUsage &) const override;
+    void print(raw_ostream &, const Module * = nullptr) const override;
+    DependenceInfo &getDI() const;
+
+  private:
+    std::unique_ptr<DependenceInfo> info;
+  }; // class DependenceAnalysisWrapperPass
+
+  /// createDependenceAnalysisPass - This creates an instance of the
+  /// DependenceAnalysis wrapper pass.
+  FunctionPass *createDependenceAnalysisWrapperPass();
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h b/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h
new file mode 100644
index 0000000..dd3c68e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h
@@ -0,0 +1,52 @@
+//===- llvm/Analysis/DivergenceAnalysis.h - Divergence Analysis -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The divergence analysis is an LLVM pass which can be used to find out
+// if a branch instruction in a GPU program is divergent or not. It can help
+// branch optimizations such as jump threading and loop unswitching to make
+// better decisions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
+#define LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class Value;
+class DivergenceAnalysis : public FunctionPass {
+public:
+  static char ID;
+
+  DivergenceAnalysis() : FunctionPass(ID) {
+    initializeDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnFunction(Function &F) override;
+
+  // Print all divergent branches in the function.
+  void print(raw_ostream &OS, const Module *) const override;
+
+  // Returns true if V is divergent.
+  bool isDivergent(const Value *V) const { return DivergentValues.count(V); }
+
+  // Returns true if V is uniform/non-divergent.
+  bool isUniform(const Value *V) const { return !isDivergent(V); }
+
+private:
+  // Stores all divergent values.
+  DenseSet<const Value *> DivergentValues;
+};
+} // End llvm namespace
+
+#endif //LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
\ No newline at end of file
diff --git a/linux-x64/clang/include/llvm/Analysis/DomPrinter.h b/linux-x64/clang/include/llvm/Analysis/DomPrinter.h
new file mode 100644
index 0000000..0ed2899
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DomPrinter.h
@@ -0,0 +1,30 @@
+//===-- DomPrinter.h - Dom printer external interface ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the dominance tree printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMPRINTER_H
+#define LLVM_ANALYSIS_DOMPRINTER_H
+
+namespace llvm {
+  class FunctionPass;
+  FunctionPass *createDomPrinterPass();
+  FunctionPass *createDomOnlyPrinterPass();
+  FunctionPass *createDomViewerPass();
+  FunctionPass *createDomOnlyViewerPass();
+  FunctionPass *createPostDomPrinterPass();
+  FunctionPass *createPostDomOnlyPrinterPass();
+  FunctionPass *createPostDomViewerPass();
+  FunctionPass *createPostDomOnlyViewerPass();
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h b/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h
new file mode 100644
index 0000000..a304dff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h
@@ -0,0 +1,210 @@
+//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DominanceFrontier class, which calculate and holds the
+// dominance frontier for a function.
+//
+// This should be considered deprecated, don't add any more uses of this data
+// structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIER_H
+#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/GenericDomTree.h"
+#include <cassert>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Function;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+/// DominanceFrontierBase - Common base class for computing forward and inverse
+/// dominance frontiers for a function.
+///
+template <class BlockT, bool IsPostDom>
+class DominanceFrontierBase {
+public:
+  using DomSetType = std::set<BlockT *>;                // Dom set for a bb
+  using DomSetMapType = std::map<BlockT *, DomSetType>; // Dom set map
+
+protected:
+  using BlockTraits = GraphTraits<BlockT *>;
+
+  DomSetMapType Frontiers;
+  // Postdominators can have multiple roots.
+  SmallVector<BlockT *, IsPostDom ? 4 : 1> Roots;
+  static constexpr bool IsPostDominators = IsPostDom;
+
+public:
+  DominanceFrontierBase() = default;
+
+  /// getRoots - Return the root blocks of the current CFG.  This may include
+  /// multiple blocks if we are computing post dominators.  For forward
+  /// dominators, this will always be a single block (the entry node).
+  const SmallVectorImpl<BlockT *> &getRoots() const { return Roots; }
+
+  BlockT *getRoot() const {
+    assert(Roots.size() == 1 && "Should always have entry node!");
+    return Roots[0];
+  }
+
+  /// isPostDominator - Returns true if analysis based of postdoms
+  bool isPostDominator() const {
+    return IsPostDominators;
+  }
+
+  void releaseMemory() {
+    Frontiers.clear();
+  }
+
+  // Accessor interface:
+  using iterator = typename DomSetMapType::iterator;
+  using const_iterator = typename DomSetMapType::const_iterator;
+
+  iterator begin() { return Frontiers.begin(); }
+  const_iterator begin() const { return Frontiers.begin(); }
+  iterator end() { return Frontiers.end(); }
+  const_iterator end() const { return Frontiers.end(); }
+  iterator find(BlockT *B) { return Frontiers.find(B); }
+  const_iterator find(BlockT *B) const { return Frontiers.find(B); }
+
+  iterator addBasicBlock(BlockT *BB, const DomSetType &frontier) {
+    assert(find(BB) == end() && "Block already in DominanceFrontier!");
+    return Frontiers.insert(std::make_pair(BB, frontier)).first;
+  }
+
+  /// removeBlock - Remove basic block BB's frontier.
+  void removeBlock(BlockT *BB);
+
+  void addToFrontier(iterator I, BlockT *Node);
+
+  void removeFromFrontier(iterator I, BlockT *Node);
+
+  /// compareDomSet - Return false if two domsets match. Otherwise
+  /// return true;
+  bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const;
+
+  /// compare - Return true if the other dominance frontier base matches
+  /// this dominance frontier base. Otherwise return false.
+  bool compare(DominanceFrontierBase &Other) const;
+
+  /// print - Convert to human readable form
+  ///
+  void print(raw_ostream &OS) const;
+
+  /// dump - Dump the dominance frontier to dbgs().
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  void dump() const;
+#endif
+};
+
+//===-------------------------------------
+/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is
+/// used to compute a forward dominator frontiers.
+///
+template <class BlockT>
+class ForwardDominanceFrontierBase
+    : public DominanceFrontierBase<BlockT, false> {
+private:
+  using BlockTraits = GraphTraits<BlockT *>;
+
+public:
+  using DomTreeT = DomTreeBase<BlockT>;
+  using DomTreeNodeT = DomTreeNodeBase<BlockT>;
+  using DomSetType = typename DominanceFrontierBase<BlockT, false>::DomSetType;
+
+  void analyze(DomTreeT &DT) {
+    assert(DT.getRoots().size() == 1 &&
+           "Only one entry block for forward domfronts!");
+    this->Roots = {DT.getRoot()};
+    calculate(DT, DT[this->Roots[0]]);
+  }
+
+  const DomSetType &calculate(const DomTreeT &DT, const DomTreeNodeT *Node);
+};
+
+class DominanceFrontier : public ForwardDominanceFrontierBase<BasicBlock> {
+public:
+  using DomTreeT = DomTreeBase<BasicBlock>;
+  using DomTreeNodeT = DomTreeNodeBase<BasicBlock>;
+  using DomSetType = DominanceFrontierBase<BasicBlock, false>::DomSetType;
+  using iterator = DominanceFrontierBase<BasicBlock, false>::iterator;
+  using const_iterator =
+      DominanceFrontierBase<BasicBlock, false>::const_iterator;
+
+  /// Handle invalidation explicitly.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &);
+};
+
+class DominanceFrontierWrapperPass : public FunctionPass {
+  DominanceFrontier DF;
+
+public:
+  static char ID; // Pass ID, replacement for typeid
+
+  DominanceFrontierWrapperPass();
+
+  DominanceFrontier &getDominanceFrontier() { return DF; }
+  const DominanceFrontier &getDominanceFrontier() const { return DF;  }
+
+  void releaseMemory() override;
+
+  bool runOnFunction(Function &) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  void print(raw_ostream &OS, const Module * = nullptr) const override;
+
+  void dump() const;
+};
+
+extern template class DominanceFrontierBase<BasicBlock, false>;
+extern template class DominanceFrontierBase<BasicBlock, true>;
+extern template class ForwardDominanceFrontierBase<BasicBlock>;
+
+/// \brief Analysis pass which computes a \c DominanceFrontier.
+class DominanceFrontierAnalysis
+    : public AnalysisInfoMixin<DominanceFrontierAnalysis> {
+  friend AnalysisInfoMixin<DominanceFrontierAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result type for this analysis pass.
+  using Result = DominanceFrontier;
+
+  /// \brief Run the analysis pass over a function and produce a dominator tree.
+  DominanceFrontier run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for the \c DominanceFrontier.
+class DominanceFrontierPrinterPass
+    : public PassInfoMixin<DominanceFrontierPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit DominanceFrontierPrinterPass(raw_ostream &OS);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_DOMINANCEFRONTIER_H
diff --git a/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h b/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h
new file mode 100644
index 0000000..dffb2e0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h
@@ -0,0 +1,231 @@
+//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the generic implementation of the DominanceFrontier class, which
+// calculate and holds the dominance frontier for a function for.
+//
+// This should be considered deprecated, don't add any more uses of this data
+// structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
+#define LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+template <class BlockT>
+class DFCalculateWorkObject {
+public:
+  using DomTreeNodeT = DomTreeNodeBase<BlockT>;
+
+  DFCalculateWorkObject(BlockT *B, BlockT *P, const DomTreeNodeT *N,
+                        const DomTreeNodeT *PN)
+      : currentBB(B), parentBB(P), Node(N), parentNode(PN) {}
+
+  BlockT *currentBB;
+  BlockT *parentBB;
+  const DomTreeNodeT *Node;
+  const DomTreeNodeT *parentNode;
+};
+
+template <class BlockT, bool IsPostDom>
+void DominanceFrontierBase<BlockT, IsPostDom>::removeBlock(BlockT *BB) {
+  assert(find(BB) != end() && "Block is not in DominanceFrontier!");
+  for (iterator I = begin(), E = end(); I != E; ++I)
+    I->second.erase(BB);
+  Frontiers.erase(BB);
+}
+
+template <class BlockT, bool IsPostDom>
+void DominanceFrontierBase<BlockT, IsPostDom>::addToFrontier(iterator I,
+                                                             BlockT *Node) {
+  assert(I != end() && "BB is not in DominanceFrontier!");
+  assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
+  I->second.erase(Node);
+}
+
+template <class BlockT, bool IsPostDom>
+void DominanceFrontierBase<BlockT, IsPostDom>::removeFromFrontier(
+    iterator I, BlockT *Node) {
+  assert(I != end() && "BB is not in DominanceFrontier!");
+  assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
+  I->second.erase(Node);
+}
+
+template <class BlockT, bool IsPostDom>
+bool DominanceFrontierBase<BlockT, IsPostDom>::compareDomSet(
+    DomSetType &DS1, const DomSetType &DS2) const {
+  std::set<BlockT *> tmpSet;
+  for (BlockT *BB : DS2)
+    tmpSet.insert(BB);
+
+  for (typename DomSetType::const_iterator I = DS1.begin(), E = DS1.end();
+       I != E;) {
+    BlockT *Node = *I++;
+
+    if (tmpSet.erase(Node) == 0)
+      // Node is in DS1 but tnot in DS2.
+      return true;
+  }
+
+  if (!tmpSet.empty()) {
+    // There are nodes that are in DS2 but not in DS1.
+    return true;
+  }
+
+  // DS1 and DS2 matches.
+  return false;
+}
+
+template <class BlockT, bool IsPostDom>
+bool DominanceFrontierBase<BlockT, IsPostDom>::compare(
+    DominanceFrontierBase<BlockT, IsPostDom> &Other) const {
+  DomSetMapType tmpFrontiers;
+  for (typename DomSetMapType::const_iterator I = Other.begin(),
+                                              E = Other.end();
+       I != E; ++I)
+    tmpFrontiers.insert(std::make_pair(I->first, I->second));
+
+  for (typename DomSetMapType::iterator I = tmpFrontiers.begin(),
+                                        E = tmpFrontiers.end();
+       I != E;) {
+    BlockT *Node = I->first;
+    const_iterator DFI = find(Node);
+    if (DFI == end())
+      return true;
+
+    if (compareDomSet(I->second, DFI->second))
+      return true;
+
+    ++I;
+    tmpFrontiers.erase(Node);
+  }
+
+  if (!tmpFrontiers.empty())
+    return true;
+
+  return false;
+}
+
+template <class BlockT, bool IsPostDom>
+void DominanceFrontierBase<BlockT, IsPostDom>::print(raw_ostream &OS) const {
+  for (const_iterator I = begin(), E = end(); I != E; ++I) {
+    OS << "  DomFrontier for BB ";
+    if (I->first)
+      I->first->printAsOperand(OS, false);
+    else
+      OS << " <<exit node>>";
+    OS << " is:\t";
+
+    const std::set<BlockT *> &BBs = I->second;
+
+    for (const BlockT *BB : BBs) {
+      OS << ' ';
+      if (BB)
+        BB->printAsOperand(OS, false);
+      else
+        OS << "<<exit node>>";
+    }
+    OS << '\n';
+  }
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+template <class BlockT, bool IsPostDom>
+void DominanceFrontierBase<BlockT, IsPostDom>::dump() const {
+  print(dbgs());
+}
+#endif
+
+template <class BlockT>
+const typename ForwardDominanceFrontierBase<BlockT>::DomSetType &
+ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT,
+                                                const DomTreeNodeT *Node) {
+  BlockT *BB = Node->getBlock();
+  DomSetType *Result = nullptr;
+
+  std::vector<DFCalculateWorkObject<BlockT>> workList;
+  SmallPtrSet<BlockT *, 32> visited;
+
+  workList.push_back(DFCalculateWorkObject<BlockT>(BB, nullptr, Node, nullptr));
+  do {
+    DFCalculateWorkObject<BlockT> *currentW = &workList.back();
+    assert(currentW && "Missing work object.");
+
+    BlockT *currentBB = currentW->currentBB;
+    BlockT *parentBB = currentW->parentBB;
+    const DomTreeNodeT *currentNode = currentW->Node;
+    const DomTreeNodeT *parentNode = currentW->parentNode;
+    assert(currentBB && "Invalid work object. Missing current Basic Block");
+    assert(currentNode && "Invalid work object. Missing current Node");
+    DomSetType &S = this->Frontiers[currentBB];
+
+    // Visit each block only once.
+    if (visited.insert(currentBB).second) {
+      // Loop over CFG successors to calculate DFlocal[currentNode]
+      for (const auto Succ : children<BlockT *>(currentBB)) {
+        // Does Node immediately dominate this successor?
+        if (DT[Succ]->getIDom() != currentNode)
+          S.insert(Succ);
+      }
+    }
+
+    // At this point, S is DFlocal.  Now we union in DFup's of our children...
+    // Loop through and visit the nodes that Node immediately dominates (Node's
+    // children in the IDomTree)
+    bool visitChild = false;
+    for (typename DomTreeNodeT::const_iterator NI = currentNode->begin(),
+                                               NE = currentNode->end();
+         NI != NE; ++NI) {
+      DomTreeNodeT *IDominee = *NI;
+      BlockT *childBB = IDominee->getBlock();
+      if (visited.count(childBB) == 0) {
+        workList.push_back(DFCalculateWorkObject<BlockT>(
+            childBB, currentBB, IDominee, currentNode));
+        visitChild = true;
+      }
+    }
+
+    // If all children are visited or there is any child then pop this block
+    // from the workList.
+    if (!visitChild) {
+      if (!parentBB) {
+        Result = &S;
+        break;
+      }
+
+      typename DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
+      DomSetType &parentSet = this->Frontiers[parentBB];
+      for (; CDFI != CDFE; ++CDFI) {
+        if (!DT.properlyDominates(parentNode, DT[*CDFI]))
+          parentSet.insert(*CDFI);
+      }
+      workList.pop_back();
+    }
+
+  } while (!workList.empty());
+
+  return *Result;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
diff --git a/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h b/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h
new file mode 100644
index 0000000..2c45ab4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h
@@ -0,0 +1,102 @@
+//===- EHPersonalities.h - Compute EH-related information -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_EHPERSONALITIES_H
+#define LLVM_ANALYSIS_EHPERSONALITIES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+class BasicBlock;
+class Function;
+class Value;
+
+enum class EHPersonality {
+  Unknown,
+  GNU_Ada,
+  GNU_C,
+  GNU_C_SjLj,
+  GNU_CXX,
+  GNU_CXX_SjLj,
+  GNU_ObjC,
+  MSVC_X86SEH,
+  MSVC_Win64SEH,
+  MSVC_CXX,
+  CoreCLR,
+  Rust
+};
+
+/// \brief See if the given exception handling personality function is one
+/// that we understand.  If so, return a description of it; otherwise return
+/// Unknown.
+EHPersonality classifyEHPersonality(const Value *Pers);
+
+StringRef getEHPersonalityName(EHPersonality Pers);
+
+EHPersonality getDefaultEHPersonality(const Triple &T);
+
+/// \brief Returns true if this personality function catches asynchronous
+/// exceptions.
+inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
+  // The two SEH personality functions can catch asynch exceptions. We assume
+  // unknown personalities don't catch asynch exceptions.
+  switch (Pers) {
+  case EHPersonality::MSVC_X86SEH:
+  case EHPersonality::MSVC_Win64SEH:
+    return true;
+  default:
+    return false;
+  }
+  llvm_unreachable("invalid enum");
+}
+
+/// \brief Returns true if this is a personality function that invokes
+/// handler funclets (which must return to it).
+inline bool isFuncletEHPersonality(EHPersonality Pers) {
+  switch (Pers) {
+  case EHPersonality::MSVC_CXX:
+  case EHPersonality::MSVC_X86SEH:
+  case EHPersonality::MSVC_Win64SEH:
+  case EHPersonality::CoreCLR:
+    return true;
+  default:
+    return false;
+  }
+  llvm_unreachable("invalid enum");
+}
+
+/// \brief Return true if this personality may be safely removed if there
+/// are no invoke instructions remaining in the current function.
+inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
+  switch (Pers) {
+  case EHPersonality::Unknown:
+    return false;
+  // All known personalities currently have this behavior
+  default:
+    return true;
+  }
+  llvm_unreachable("invalid enum");
+}
+
+bool canSimplifyInvokeNoUnwind(const Function *F);
+
+typedef TinyPtrVector<BasicBlock *> ColorVector;
+
+/// \brief If an EH funclet personality is in use (see isFuncletEHPersonality),
+/// this will recompute which blocks are in which funclet. It is possible that
+/// some blocks are in multiple funclets. Consider this analysis to be
+/// expensive.
+DenseMap<BasicBlock *, ColorVector> colorEHFunclets(Function &F);
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/GlobalsModRef.h b/linux-x64/clang/include/llvm/Analysis/GlobalsModRef.h
new file mode 100644
index 0000000..09cef68
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/GlobalsModRef.h
@@ -0,0 +1,156 @@
+//===- GlobalsModRef.h - Simple Mod/Ref AA for Globals ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for a simple mod/ref and alias analysis over globals.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_GLOBALSMODREF_H
+#define LLVM_ANALYSIS_GLOBALSMODREF_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <list>
+
+namespace llvm {
+
+/// An alias analysis result set for globals.
+///
+/// This focuses on handling aliasing properties of globals and interprocedural
+/// function call mod/ref information.
+class GlobalsAAResult : public AAResultBase<GlobalsAAResult> {
+  friend AAResultBase<GlobalsAAResult>;
+
+  class FunctionInfo;
+
+  const DataLayout &DL;
+  const TargetLibraryInfo &TLI;
+
+  /// The globals that do not have their addresses taken.
+  SmallPtrSet<const GlobalValue *, 8> NonAddressTakenGlobals;
+
+  /// IndirectGlobals - The memory pointed to by this global is known to be
+  /// 'owned' by the global.
+  SmallPtrSet<const GlobalValue *, 8> IndirectGlobals;
+
+  /// AllocsForIndirectGlobals - If an instruction allocates memory for an
+  /// indirect global, this map indicates which one.
+  DenseMap<const Value *, const GlobalValue *> AllocsForIndirectGlobals;
+
+  /// For each function, keep track of what globals are modified or read.
+  DenseMap<const Function *, FunctionInfo> FunctionInfos;
+
+  /// A map of functions to SCC. The SCCs are described by a simple integer
+  /// ID that is only useful for comparing for equality (are two functions
+  /// in the same SCC or not?)
+  DenseMap<const Function *, unsigned> FunctionToSCCMap;
+
+  /// Handle to clear this analysis on deletion of values.
+  struct DeletionCallbackHandle final : CallbackVH {
+    GlobalsAAResult *GAR;
+    std::list<DeletionCallbackHandle>::iterator I;
+
+    DeletionCallbackHandle(GlobalsAAResult &GAR, Value *V)
+        : CallbackVH(V), GAR(&GAR) {}
+
+    void deleted() override;
+  };
+
+  /// List of callbacks for globals being tracked by this analysis. Note that
+  /// these objects are quite large, but we only anticipate having one per
+  /// global tracked by this analysis. There are numerous optimizations we
+  /// could perform to the memory utilization here if this becomes a problem.
+  std::list<DeletionCallbackHandle> Handles;
+
+  explicit GlobalsAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI);
+
+public:
+  GlobalsAAResult(GlobalsAAResult &&Arg);
+  ~GlobalsAAResult();
+
+  static GlobalsAAResult analyzeModule(Module &M, const TargetLibraryInfo &TLI,
+                                       CallGraph &CG);
+
+  //------------------------------------------------
+  // Implement the AliasAnalysis API
+  //
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+  using AAResultBase::getModRefInfo;
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+  /// getModRefBehavior - Return the behavior of the specified function if
+  /// called from the specified call site.  The call site may be null in which
+  /// case the most generic behavior of this function should be returned.
+  FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+  /// getModRefBehavior - Return the behavior of the specified function if
+  /// called from the specified call site.  The call site may be null in which
+  /// case the most generic behavior of this function should be returned.
+  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+private:
+  FunctionInfo *getFunctionInfo(const Function *F);
+
+  void AnalyzeGlobals(Module &M);
+  void AnalyzeCallGraph(CallGraph &CG, Module &M);
+  bool AnalyzeUsesOfPointer(Value *V,
+                            SmallPtrSetImpl<Function *> *Readers = nullptr,
+                            SmallPtrSetImpl<Function *> *Writers = nullptr,
+                            GlobalValue *OkayStoreDest = nullptr);
+  bool AnalyzeIndirectGlobalMemory(GlobalVariable *GV);
+  void CollectSCCMembership(CallGraph &CG);
+
+  bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
+  ModRefInfo getModRefInfoForArgument(ImmutableCallSite CS,
+                                      const GlobalValue *GV);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class GlobalsAA : public AnalysisInfoMixin<GlobalsAA> {
+  friend AnalysisInfoMixin<GlobalsAA>;
+  static AnalysisKey Key;
+
+public:
+  typedef GlobalsAAResult Result;
+
+  GlobalsAAResult run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the GlobalsAAResult object.
+class GlobalsAAWrapperPass : public ModulePass {
+  std::unique_ptr<GlobalsAAResult> Result;
+
+public:
+  static char ID;
+
+  GlobalsAAWrapperPass();
+
+  GlobalsAAResult &getResult() { return *Result; }
+  const GlobalsAAResult &getResult() const { return *Result; }
+
+  bool runOnModule(Module &M) override;
+  bool doFinalization(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createGlobalsAAWrapperPass - This pass provides alias and mod/ref info for
+// global values that do not have their addresses taken.
+//
+ModulePass *createGlobalsAAWrapperPass();
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/IVUsers.h b/linux-x64/clang/include/llvm/Analysis/IVUsers.h
new file mode 100644
index 0000000..035b974
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/IVUsers.h
@@ -0,0 +1,202 @@
+//===- llvm/Analysis/IVUsers.h - Induction Variable Users -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements bookkeeping for "interesting" users of expressions
+// computed from induction variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_IVUSERS_H
+#define LLVM_ANALYSIS_IVUSERS_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Instruction;
+class Value;
+class ScalarEvolution;
+class SCEV;
+class IVUsers;
+class DataLayout;
+
+/// IVStrideUse - Keep track of one use of a strided induction variable.
+/// The Expr member keeps track of the expression, User is the actual user
+/// instruction of the operand, and 'OperandValToReplace' is the operand of
+/// the User that is the use.
+class IVStrideUse final : public CallbackVH, public ilist_node<IVStrideUse> {
+  friend class IVUsers;
+public:
+  IVStrideUse(IVUsers *P, Instruction* U, Value *O)
+    : CallbackVH(U), Parent(P), OperandValToReplace(O) {
+  }
+
+  /// getUser - Return the user instruction for this use.
+  Instruction *getUser() const {
+    return cast<Instruction>(getValPtr());
+  }
+
+  /// setUser - Assign a new user instruction for this use.
+  void setUser(Instruction *NewUser) {
+    setValPtr(NewUser);
+  }
+
+  /// getOperandValToReplace - Return the Value of the operand in the user
+  /// instruction that this IVStrideUse is representing.
+  Value *getOperandValToReplace() const {
+    return OperandValToReplace;
+  }
+
+  /// setOperandValToReplace - Assign a new Value as the operand value
+  /// to replace.
+  void setOperandValToReplace(Value *Op) {
+    OperandValToReplace = Op;
+  }
+
+  /// getPostIncLoops - Return the set of loops for which the expression has
+  /// been adjusted to use post-inc mode.
+  const PostIncLoopSet &getPostIncLoops() const {
+    return PostIncLoops;
+  }
+
+  /// transformToPostInc - Transform the expression to post-inc form for the
+  /// given loop.
+  void transformToPostInc(const Loop *L);
+
+private:
+  /// Parent - a pointer to the IVUsers that owns this IVStrideUse.
+  IVUsers *Parent;
+
+  /// OperandValToReplace - The Value of the operand in the user instruction
+  /// that this IVStrideUse is representing.
+  WeakTrackingVH OperandValToReplace;
+
+  /// PostIncLoops - The set of loops for which Expr has been adjusted to
+  /// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
+  PostIncLoopSet PostIncLoops;
+
+  /// Deleted - Implementation of CallbackVH virtual function to
+  /// receive notification when the User is deleted.
+  void deleted() override;
+};
+
+class IVUsers {
+  friend class IVStrideUse;
+  Loop *L;
+  AssumptionCache *AC;
+  LoopInfo *LI;
+  DominatorTree *DT;
+  ScalarEvolution *SE;
+  SmallPtrSet<Instruction*, 16> Processed;
+
+  /// IVUses - A list of all tracked IV uses of induction variable expressions
+  /// we are interested in.
+  ilist<IVStrideUse> IVUses;
+
+  // Ephemeral values used by @llvm.assume in this function.
+  SmallPtrSet<const Value *, 32> EphValues;
+
+public:
+  IVUsers(Loop *L, AssumptionCache *AC, LoopInfo *LI, DominatorTree *DT,
+          ScalarEvolution *SE);
+
+  IVUsers(IVUsers &&X)
+      : L(std::move(X.L)), AC(std::move(X.AC)), DT(std::move(X.DT)),
+        SE(std::move(X.SE)), Processed(std::move(X.Processed)),
+        IVUses(std::move(X.IVUses)), EphValues(std::move(X.EphValues)) {
+    for (IVStrideUse &U : IVUses)
+      U.Parent = this;
+  }
+  IVUsers(const IVUsers &) = delete;
+  IVUsers &operator=(IVUsers &&) = delete;
+  IVUsers &operator=(const IVUsers &) = delete;
+
+  Loop *getLoop() const { return L; }
+
+  /// AddUsersIfInteresting - Inspect the specified Instruction.  If it is a
+  /// reducible SCEV, recursively add its users to the IVUsesByStride set and
+  /// return true.  Otherwise, return false.
+  bool AddUsersIfInteresting(Instruction *I);
+
+  IVStrideUse &AddUser(Instruction *User, Value *Operand);
+
+  /// getReplacementExpr - Return a SCEV expression which computes the
+  /// value of the OperandValToReplace of the given IVStrideUse.
+  const SCEV *getReplacementExpr(const IVStrideUse &IU) const;
+
+  /// getExpr - Return the expression for the use.
+  const SCEV *getExpr(const IVStrideUse &IU) const;
+
+  const SCEV *getStride(const IVStrideUse &IU, const Loop *L) const;
+
+  typedef ilist<IVStrideUse>::iterator iterator;
+  typedef ilist<IVStrideUse>::const_iterator const_iterator;
+  iterator begin() { return IVUses.begin(); }
+  iterator end()   { return IVUses.end(); }
+  const_iterator begin() const { return IVUses.begin(); }
+  const_iterator end() const   { return IVUses.end(); }
+  bool empty() const { return IVUses.empty(); }
+
+  bool isIVUserOrOperand(Instruction *Inst) const {
+    return Processed.count(Inst);
+  }
+
+  void releaseMemory();
+
+  void print(raw_ostream &OS, const Module * = nullptr) const;
+
+  /// dump - This method is used for debugging.
+  void dump() const;
+
+protected:
+  bool AddUsersImpl(Instruction *I, SmallPtrSetImpl<Loop*> &SimpleLoopNests);
+};
+
+Pass *createIVUsersPass();
+
+class IVUsersWrapperPass : public LoopPass {
+  std::unique_ptr<IVUsers> IU;
+
+public:
+  static char ID;
+
+  IVUsersWrapperPass();
+
+  IVUsers &getIU() { return *IU; }
+  const IVUsers &getIU() const { return *IU; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnLoop(Loop *L, LPPassManager &LPM) override;
+
+  void releaseMemory() override;
+
+  void print(raw_ostream &OS, const Module * = nullptr) const override;
+};
+
+/// Analysis pass that exposes the \c IVUsers for a loop.
+class IVUsersAnalysis : public AnalysisInfoMixin<IVUsersAnalysis> {
+  friend AnalysisInfoMixin<IVUsersAnalysis>;
+  static AnalysisKey Key;
+
+public:
+  typedef IVUsers Result;
+
+  IVUsers run(Loop &L, LoopAnalysisManager &AM,
+              LoopStandardAnalysisResults &AR);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h b/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h
new file mode 100644
index 0000000..8b1c101
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h
@@ -0,0 +1,69 @@
+//===- IndirectCallPromotionAnalysis.h - Indirect call analysis -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Interface to identify indirect call promotion candidates.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INDIRECTCALLPROMOTIONANALYSIS_H
+#define LLVM_ANALYSIS_INDIRECTCALLPROMOTIONANALYSIS_H
+
+#include "llvm/ProfileData/InstrProf.h"
+
+namespace llvm {
+
+class Instruction;
+
+// Class for identifying profitable indirect call promotion candidates when
+// the indirect-call value profile metadata is available.
+class ICallPromotionAnalysis {
+private:
+  // Allocate space to read the profile annotation.
+  std::unique_ptr<InstrProfValueData[]> ValueDataArray;
+
+  // Count is the call count for the direct-call target.
+  // TotalCount is the total call count for the indirect-call callsite.
+  // RemainingCount is the TotalCount minus promoted-direct-call count.
+  // Return true we should promote this indirect-call target.
+  bool isPromotionProfitable(uint64_t Count, uint64_t TotalCount,
+                             uint64_t RemainingCount);
+
+  // Returns the number of profitable candidates to promote for the
+  // current ValueDataArray and the given \p Inst.
+  uint32_t getProfitablePromotionCandidates(const Instruction *Inst,
+                                            uint32_t NumVals,
+                                            uint64_t TotalCount);
+
+  // Noncopyable
+  ICallPromotionAnalysis(const ICallPromotionAnalysis &other) = delete;
+  ICallPromotionAnalysis &
+  operator=(const ICallPromotionAnalysis &other) = delete;
+
+public:
+  ICallPromotionAnalysis();
+
+  /// \brief Returns reference to array of InstrProfValueData for the given
+  /// instruction \p I.
+  ///
+  /// The \p NumVals, \p TotalCount and \p NumCandidates
+  /// are set to the number of values in the array, the total profile count
+  /// of the indirect call \p I, and the number of profitable candidates
+  /// in the given array (which is sorted in reverse order of profitability).
+  ///
+  /// The returned array space is owned by this class, and overwritten on
+  /// subsequent calls.
+  ArrayRef<InstrProfValueData>
+  getPromotionCandidatesForInstruction(const Instruction *I, uint32_t &NumVals,
+                                       uint64_t &TotalCount,
+                                       uint32_t &NumCandidates);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/IndirectCallSiteVisitor.h b/linux-x64/clang/include/llvm/Analysis/IndirectCallSiteVisitor.h
new file mode 100644
index 0000000..dde56a1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/IndirectCallSiteVisitor.h
@@ -0,0 +1,35 @@
+//===-- IndirectCallSiteVisitor.h - indirect call-sites visitor -----------===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements defines a visitor class and a helper function that find
+// all indirect call-sites in a function.
+
+#include "llvm/IR/InstVisitor.h"
+#include <vector>
+
+namespace llvm {
+// Visitor class that finds all indirect call sites.
+struct PGOIndirectCallSiteVisitor
+    : public InstVisitor<PGOIndirectCallSiteVisitor> {
+  std::vector<Instruction *> IndirectCallInsts;
+  PGOIndirectCallSiteVisitor() {}
+
+  void visitCallSite(CallSite CS) {
+    if (CS.isIndirectCall())
+      IndirectCallInsts.push_back(CS.getInstruction());
+  }
+};
+
+// Helper function that finds all indirect call sites.
+inline std::vector<Instruction *> findIndirectCallSites(Function &F) {
+  PGOIndirectCallSiteVisitor ICV;
+  ICV.visit(F);
+  return ICV.IndirectCallInsts;
+}
+}
diff --git a/linux-x64/clang/include/llvm/Analysis/InlineCost.h b/linux-x64/clang/include/llvm/Analysis/InlineCost.h
new file mode 100644
index 0000000..138d3ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/InlineCost.h
@@ -0,0 +1,214 @@
+//===- InlineCost.h - Cost analysis for inliner -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements heuristics for inlining decisions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INLINECOST_H
+#define LLVM_ANALYSIS_INLINECOST_H
+
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include <cassert>
+#include <climits>
+
+namespace llvm {
+class AssumptionCacheTracker;
+class BlockFrequencyInfo;
+class CallSite;
+class DataLayout;
+class Function;
+class ProfileSummaryInfo;
+class TargetTransformInfo;
+
+namespace InlineConstants {
+// Various thresholds used by inline cost analysis.
+/// Use when optsize (-Os) is specified.
+const int OptSizeThreshold = 50;
+
+/// Use when minsize (-Oz) is specified.
+const int OptMinSizeThreshold = 5;
+
+/// Use when -O3 is specified.
+const int OptAggressiveThreshold = 250;
+
+// Various magic constants used to adjust heuristics.
+const int InstrCost = 5;
+const int IndirectCallThreshold = 100;
+const int CallPenalty = 25;
+const int LastCallToStaticBonus = 15000;
+const int ColdccPenalty = 2000;
+const int NoreturnPenalty = 10000;
+/// Do not inline functions which allocate this many bytes on the stack
+/// when the caller is recursive.
+const unsigned TotalAllocaSizeRecursiveCaller = 1024;
+}
+
+/// \brief Represents the cost of inlining a function.
+///
+/// This supports special values for functions which should "always" or
+/// "never" be inlined. Otherwise, the cost represents a unitless amount;
+/// smaller values increase the likelihood of the function being inlined.
+///
+/// Objects of this type also provide the adjusted threshold for inlining
+/// based on the information available for a particular callsite. They can be
+/// directly tested to determine if inlining should occur given the cost and
+/// threshold for this cost metric.
+class InlineCost {
+  enum SentinelValues {
+    AlwaysInlineCost = INT_MIN,
+    NeverInlineCost = INT_MAX
+  };
+
+  /// \brief The estimated cost of inlining this callsite.
+  const int Cost;
+
+  /// \brief The adjusted threshold against which this cost was computed.
+  const int Threshold;
+
+  // Trivial constructor, interesting logic in the factory functions below.
+  InlineCost(int Cost, int Threshold) : Cost(Cost), Threshold(Threshold) {}
+
+public:
+  static InlineCost get(int Cost, int Threshold) {
+    assert(Cost > AlwaysInlineCost && "Cost crosses sentinel value");
+    assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
+    return InlineCost(Cost, Threshold);
+  }
+  static InlineCost getAlways() {
+    return InlineCost(AlwaysInlineCost, 0);
+  }
+  static InlineCost getNever() {
+    return InlineCost(NeverInlineCost, 0);
+  }
+
+  /// \brief Test whether the inline cost is low enough for inlining.
+  explicit operator bool() const {
+    return Cost < Threshold;
+  }
+
+  bool isAlways() const { return Cost == AlwaysInlineCost; }
+  bool isNever() const { return Cost == NeverInlineCost; }
+  bool isVariable() const { return !isAlways() && !isNever(); }
+
+  /// \brief Get the inline cost estimate.
+  /// It is an error to call this on an "always" or "never" InlineCost.
+  int getCost() const {
+    assert(isVariable() && "Invalid access of InlineCost");
+    return Cost;
+  }
+
+  /// \brief Get the threshold against which the cost was computed
+  int getThreshold() const {
+    assert(isVariable() && "Invalid access of InlineCost");
+    return Threshold;
+  }
+
+  /// \brief Get the cost delta from the threshold for inlining.
+  /// Only valid if the cost is of the variable kind. Returns a negative
+  /// value if the cost is too high to inline.
+  int getCostDelta() const { return Threshold - getCost(); }
+};
+
+/// Thresholds to tune inline cost analysis. The inline cost analysis decides
+/// the condition to apply a threshold and applies it. Otherwise,
+/// DefaultThreshold is used. If a threshold is Optional, it is applied only
+/// when it has a valid value. Typically, users of inline cost analysis
+/// obtain an InlineParams object through one of the \c getInlineParams methods
+/// and pass it to \c getInlineCost. Some specialized versions of inliner
+/// (such as the pre-inliner) might have custom logic to compute \c InlineParams
+/// object.
+
+struct InlineParams {
+  /// The default threshold to start with for a callee.
+  int DefaultThreshold;
+
+  /// Threshold to use for callees with inline hint.
+  Optional<int> HintThreshold;
+
+  /// Threshold to use for cold callees.
+  Optional<int> ColdThreshold;
+
+  /// Threshold to use when the caller is optimized for size.
+  Optional<int> OptSizeThreshold;
+
+  /// Threshold to use when the caller is optimized for minsize.
+  Optional<int> OptMinSizeThreshold;
+
+  /// Threshold to use when the callsite is considered hot.
+  Optional<int> HotCallSiteThreshold;
+
+  /// Threshold to use when the callsite is considered hot relative to function
+  /// entry.
+  Optional<int> LocallyHotCallSiteThreshold;
+
+  /// Threshold to use when the callsite is considered cold.
+  Optional<int> ColdCallSiteThreshold;
+
+  /// Compute inline cost even when the cost has exceeded the threshold.
+  Optional<bool> ComputeFullInlineCost;
+};
+
+/// Generate the parameters to tune the inline cost analysis based only on the
+/// commandline options.
+InlineParams getInlineParams();
+
+/// Generate the parameters to tune the inline cost analysis based on command
+/// line options. If -inline-threshold option is not explicitly passed,
+/// \p Threshold is used as the default threshold.
+InlineParams getInlineParams(int Threshold);
+
+/// Generate the parameters to tune the inline cost analysis based on command
+/// line options. If -inline-threshold option is not explicitly passed,
+/// the default threshold is computed from \p OptLevel and \p SizeOptLevel.
+/// An \p OptLevel value above 3 is considered an aggressive optimization mode.
+/// \p SizeOptLevel of 1 corresponds to the -Os flag and 2 corresponds to
+/// the -Oz flag.
+InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
+
+/// Return the cost associated with a callsite, including parameter passing
+/// and the call/return instruction.
+int getCallsiteCost(CallSite CS, const DataLayout &DL);
+
+/// \brief Get an InlineCost object representing the cost of inlining this
+/// callsite.
+///
+/// Note that a default threshold is passed into this function. This threshold
+/// could be modified based on callsite's properties and only costs below this
+/// new threshold are computed with any accuracy. The new threshold can be
+/// used to bound the computation necessary to determine whether the cost is
+/// sufficiently low to warrant inlining.
+///
+/// Also note that calling this function *dynamically* computes the cost of
+/// inlining the callsite. It is an expensive, heavyweight call.
+InlineCost getInlineCost(
+    CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
+    std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
+    Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
+    ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE = nullptr);
+
+/// \brief Get an InlineCost with the callee explicitly specified.
+/// This allows you to calculate the cost of inlining a function via a
+/// pointer. This behaves exactly as the version with no explicit callee
+/// parameter in all other respects.
+//
+InlineCost
+getInlineCost(CallSite CS, Function *Callee, const InlineParams &Params,
+              TargetTransformInfo &CalleeTTI,
+              std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
+              Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
+              ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE);
+
+/// \brief Minimal filter to detect invalid constructs for inlining.
+bool isInlineViable(Function &Callee);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/InstructionSimplify.h b/linux-x64/clang/include/llvm/Analysis/InstructionSimplify.h
new file mode 100644
index 0000000..4f896bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/InstructionSimplify.h
@@ -0,0 +1,252 @@
+//===-- InstructionSimplify.h - Fold instrs into simpler forms --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares routines for folding instructions into simpler forms
+// that do not require creating new instructions.  This does constant folding
+// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
+// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
+// ("and i32 %x, %x" -> "%x").  If the simplification is also an instruction
+// then it dominates the original instruction.
+//
+// These routines implicitly resolve undef uses. The easiest way to be safe when
+// using these routines to obtain simplified values for existing instructions is
+// to always replace all uses of the instructions with the resulting simplified
+// values. This will prevent other code from seeing the same undef uses and
+// resolving them to different values.
+//
+// These routines are designed to tolerate moderately incomplete IR, such as
+// instructions that are not connected to basic blocks yet. However, they do
+// require that all the IR that they encounter be valid. In particular, they
+// require that all non-constant values be defined in the same function, and the
+// same call context of that function (and not split between caller and callee
+// contexts of a directly recursive call, for example).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+
+#include "llvm/IR/User.h"
+
+namespace llvm {
+class Function;
+template <typename T, typename... TArgs> class AnalysisManager;
+template <class T> class ArrayRef;
+class AssumptionCache;
+class DominatorTree;
+class Instruction;
+class ImmutableCallSite;
+class DataLayout;
+class FastMathFlags;
+struct LoopStandardAnalysisResults;
+class OptimizationRemarkEmitter;
+class Pass;
+class TargetLibraryInfo;
+class Type;
+class Value;
+
+struct SimplifyQuery {
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI = nullptr;
+  const DominatorTree *DT = nullptr;
+  AssumptionCache *AC = nullptr;
+  const Instruction *CxtI = nullptr;
+
+  SimplifyQuery(const DataLayout &DL, const Instruction *CXTI = nullptr)
+      : DL(DL), CxtI(CXTI) {}
+
+  SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
+                const DominatorTree *DT = nullptr,
+                AssumptionCache *AC = nullptr,
+                const Instruction *CXTI = nullptr)
+      : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI) {}
+  SimplifyQuery getWithInstruction(Instruction *I) const {
+    SimplifyQuery Copy(*this);
+    Copy.CxtI = I;
+    return Copy;
+  }
+};
+
+// NOTE: the explicit multiple argument versions of these functions are
+// deprecated.
+// Please use the SimplifyQuery versions in new code.
+
+/// Given operands for an Add, fold the result or return null.
+Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+                       const SimplifyQuery &Q);
+
+/// Given operands for a Sub, fold the result or return null.
+Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+                       const SimplifyQuery &Q);
+
+/// Given operands for an FAdd, fold the result or return null.
+Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+                        const SimplifyQuery &Q);
+
+/// Given operands for an FSub, fold the result or return null.
+Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+                        const SimplifyQuery &Q);
+
+/// Given operands for an FMul, fold the result or return null.
+Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+                        const SimplifyQuery &Q);
+
+/// Given operands for a Mul, fold the result or return null.
+Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for an SDiv, fold the result or return null.
+Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for a UDiv, fold the result or return null.
+Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for an FDiv, fold the result or return null.
+Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+                        const SimplifyQuery &Q);
+
+/// Given operands for an SRem, fold the result or return null.
+Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for a URem, fold the result or return null.
+Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for an FRem, fold the result or return null.
+Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+                        const SimplifyQuery &Q);
+
+/// Given operands for a Shl, fold the result or return null.
+Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+                       const SimplifyQuery &Q);
+
+/// Given operands for a LShr, fold the result or return null.
+Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
+                        const SimplifyQuery &Q);
+
+/// Given operands for a AShr, fold the result or return nulll.
+Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
+                        const SimplifyQuery &Q);
+
+/// Given operands for an And, fold the result or return null.
+Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for an Or, fold the result or return null.
+Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for an Xor, fold the result or return null.
+Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+
+/// Given operands for an ICmpInst, fold the result or return null.
+Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+                        const SimplifyQuery &Q);
+
+/// Given operands for an FCmpInst, fold the result or return null.
+Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+                        FastMathFlags FMF, const SimplifyQuery &Q);
+
+/// Given operands for a SelectInst, fold the result or return null.
+Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
+                          const SimplifyQuery &Q);
+
+/// Given operands for a GetElementPtrInst, fold the result or return null.
+Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
+                       const SimplifyQuery &Q);
+
+/// Given operands for an InsertValueInst, fold the result or return null.
+Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+                               const SimplifyQuery &Q);
+
+/// Given operands for an InsertElement, fold the result or return null.
+Value *SimplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
+                                 const SimplifyQuery &Q);
+
+/// Given operands for an ExtractValueInst, fold the result or return null.
+Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
+                                const SimplifyQuery &Q);
+
+/// Given operands for an ExtractElementInst, fold the result or return null.
+Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
+                                  const SimplifyQuery &Q);
+
+/// Given operands for a CastInst, fold the result or return null.
+Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
+                        const SimplifyQuery &Q);
+
+/// Given operands for a ShuffleVectorInst, fold the result or return null.
+Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
+                                 Type *RetTy, const SimplifyQuery &Q);
+
+//=== Helper functions for higher up the class hierarchy.
+
+/// Given operands for a CmpInst, fold the result or return null.
+Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+                       const SimplifyQuery &Q);
+
+/// Given operands for a BinaryOperator, fold the result or return null.
+Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+                     const SimplifyQuery &Q);
+
+/// Given operands for an FP BinaryOperator, fold the result or return null.
+/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
+/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
+Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+                       FastMathFlags FMF, const SimplifyQuery &Q);
+
+/// Given a callsite, fold the result or return null.
+Value *SimplifyCall(ImmutableCallSite CS, const SimplifyQuery &Q);
+
+/// Given a function and iterators over arguments, fold the result or return
+/// null.
+Value *SimplifyCall(ImmutableCallSite CS, Value *V, User::op_iterator ArgBegin,
+                    User::op_iterator ArgEnd, const SimplifyQuery &Q);
+
+/// Given a function and set of arguments, fold the result or return null.
+Value *SimplifyCall(ImmutableCallSite CS, Value *V, ArrayRef<Value *> Args,
+                    const SimplifyQuery &Q);
+
+/// See if we can compute a simplified version of this instruction. If not,
+/// return null.
+Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
+                           OptimizationRemarkEmitter *ORE = nullptr);
+
+/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
+///
+/// This first performs a normal RAUW of I with SimpleV. It then recursively
+/// attempts to simplify those users updated by the operation. The 'I'
+/// instruction must not be equal to the simplified value 'SimpleV'.
+///
+/// The function returns true if any simplifications were performed.
+bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
+                                   const TargetLibraryInfo *TLI = nullptr,
+                                   const DominatorTree *DT = nullptr,
+                                   AssumptionCache *AC = nullptr);
+
+/// Recursively attempt to simplify an instruction.
+///
+/// This routine uses SimplifyInstruction to simplify 'I', and if successful
+/// replaces uses of 'I' with the simplified value. It then recurses on each
+/// of the users impacted. It returns true if any simplifications were
+/// performed.
+bool recursivelySimplifyInstruction(Instruction *I,
+                                    const TargetLibraryInfo *TLI = nullptr,
+                                    const DominatorTree *DT = nullptr,
+                                    AssumptionCache *AC = nullptr);
+
+// These helper functions return a SimplifyQuery structure that contains as
+// many of the optional analysis we use as are currently valid.  This is the
+// strongly preferred way of constructing SimplifyQuery in passes.
+const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
+template <class T, class... TArgs>
+const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
+                                         Function &);
+const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
+                                         const DataLayout &);
+} // end namespace llvm
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Analysis/Interval.h b/linux-x64/clang/include/llvm/Analysis/Interval.h
new file mode 100644
index 0000000..f3714dd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/Interval.h
@@ -0,0 +1,142 @@
+//===- llvm/Analysis/Interval.h - Interval Class Declaration ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Interval class, which
+// represents a set of CFG nodes and is a portion of an interval partition.
+//
+// Intervals have some interesting and useful properties, including the
+// following:
+//    1. The header node of an interval dominates all of the elements of the
+//       interval
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERVAL_H
+#define LLVM_ANALYSIS_INTERVAL_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+//
+/// Interval Class - An Interval is a set of nodes defined such that every node
+/// in the interval has all of its predecessors in the interval (except for the
+/// header)
+///
+class Interval {
+  /// HeaderNode - The header BasicBlock, which dominates all BasicBlocks in this
+  /// interval.  Also, any loops in this interval must go through the HeaderNode.
+  ///
+  BasicBlock *HeaderNode;
+
+public:
+  using succ_iterator = std::vector<BasicBlock*>::iterator;
+  using pred_iterator = std::vector<BasicBlock*>::iterator;
+  using node_iterator = std::vector<BasicBlock*>::iterator;
+
+  inline Interval(BasicBlock *Header) : HeaderNode(Header) {
+    Nodes.push_back(Header);
+  }
+
+  inline BasicBlock *getHeaderNode() const { return HeaderNode; }
+
+  /// Nodes - The basic blocks in this interval.
+  std::vector<BasicBlock*> Nodes;
+
+  /// Successors - List of BasicBlocks that are reachable directly from nodes in
+  /// this interval, but are not in the interval themselves.
+  /// These nodes necessarily must be header nodes for other intervals.
+  std::vector<BasicBlock*> Successors;
+
+  /// Predecessors - List of BasicBlocks that have this Interval's header block
+  /// as one of their successors.
+  std::vector<BasicBlock*> Predecessors;
+
+  /// contains - Find out if a basic block is in this interval
+  inline bool contains(BasicBlock *BB) const {
+    for (BasicBlock *Node : Nodes)
+      if (Node == BB)
+        return true;
+    return false;
+    // I don't want the dependency on <algorithm>
+    //return find(Nodes.begin(), Nodes.end(), BB) != Nodes.end();
+  }
+
+  /// isSuccessor - find out if a basic block is a successor of this Interval
+  inline bool isSuccessor(BasicBlock *BB) const {
+    for (BasicBlock *Successor : Successors)
+      if (Successor == BB)
+        return true;
+    return false;
+    // I don't want the dependency on <algorithm>
+    //return find(Successors.begin(), Successors.end(), BB) != Successors.end();
+  }
+
+  /// Equality operator.  It is only valid to compare two intervals from the
+  /// same partition, because of this, all we have to check is the header node
+  /// for equality.
+  inline bool operator==(const Interval &I) const {
+    return HeaderNode == I.HeaderNode;
+  }
+
+  /// isLoop - Find out if there is a back edge in this interval...
+  bool isLoop() const;
+
+  /// print - Show contents in human readable format...
+  void print(raw_ostream &O) const;
+};
+
+/// succ_begin/succ_end - define methods so that Intervals may be used
+/// just like BasicBlocks can with the succ_* functions, and *::succ_iterator.
+///
+inline Interval::succ_iterator succ_begin(Interval *I) {
+  return I->Successors.begin();
+}
+inline Interval::succ_iterator succ_end(Interval *I)   {
+  return I->Successors.end();
+}
+
+/// pred_begin/pred_end - define methods so that Intervals may be used
+/// just like BasicBlocks can with the pred_* functions, and *::pred_iterator.
+///
+inline Interval::pred_iterator pred_begin(Interval *I) {
+  return I->Predecessors.begin();
+}
+inline Interval::pred_iterator pred_end(Interval *I)   {
+  return I->Predecessors.end();
+}
+
+template <> struct GraphTraits<Interval*> {
+  using NodeRef = Interval *;
+  using ChildIteratorType = Interval::succ_iterator;
+
+  static NodeRef getEntryNode(Interval *I) { return I; }
+
+  /// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
+  static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
+};
+
+template <> struct GraphTraits<Inverse<Interval*>> {
+  using NodeRef = Interval *;
+  using ChildIteratorType = Interval::pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<Interval *> G) { return G.Graph; }
+  static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
+  static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_INTERVAL_H
diff --git a/linux-x64/clang/include/llvm/Analysis/IntervalIterator.h b/linux-x64/clang/include/llvm/Analysis/IntervalIterator.h
new file mode 100644
index 0000000..6ffcae5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/IntervalIterator.h
@@ -0,0 +1,268 @@
+//===- IntervalIterator.h - Interval Iterator Declaration -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an iterator that enumerates the intervals in a control flow
+// graph of some sort.  This iterator is parametric, allowing iterator over the
+// following types of graphs:
+//
+//  1. A Function* object, composed of BasicBlock nodes.
+//  2. An IntervalPartition& object, composed of Interval nodes.
+//
+// This iterator is defined to walk the control flow graph, returning intervals
+// in depth first order.  These intervals are completely filled in except for
+// the predecessor fields (the successor information is filled in however).
+//
+// By default, the intervals created by this iterator are deleted after they
+// are no longer any use to the iterator.  This behavior can be changed by
+// passing a false value into the intervals_begin() function. This causes the
+// IOwnMem member to be set, and the intervals to not be deleted.
+//
+// It is only safe to use this if all of the intervals are deleted by the caller
+// and all of the intervals are processed.  However, the user of the iterator is
+// not allowed to modify or delete the intervals until after the iterator has
+// been used completely.  The IntervalPartition class uses this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERVALITERATOR_H
+#define LLVM_ANALYSIS_INTERVALITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/Analysis/Interval.h"
+#include "llvm/Analysis/IntervalPartition.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+
+// getNodeHeader - Given a source graph node and the source graph, return the
+// BasicBlock that is the header node.  This is the opposite of
+// getSourceGraphNode.
+inline BasicBlock *getNodeHeader(BasicBlock *BB) { return BB; }
+inline BasicBlock *getNodeHeader(Interval *I) { return I->getHeaderNode(); }
+
+// getSourceGraphNode - Given a BasicBlock and the source graph, return the
+// source graph node that corresponds to the BasicBlock.  This is the opposite
+// of getNodeHeader.
+inline BasicBlock *getSourceGraphNode(Function *, BasicBlock *BB) {
+  return BB;
+}
+inline Interval *getSourceGraphNode(IntervalPartition *IP, BasicBlock *BB) {
+  return IP->getBlockInterval(BB);
+}
+
+// addNodeToInterval - This method exists to assist the generic ProcessNode
+// with the task of adding a node to the new interval, depending on the
+// type of the source node.  In the case of a CFG source graph (BasicBlock
+// case), the BasicBlock itself is added to the interval.
+inline void addNodeToInterval(Interval *Int, BasicBlock *BB) {
+  Int->Nodes.push_back(BB);
+}
+
+// addNodeToInterval - This method exists to assist the generic ProcessNode
+// with the task of adding a node to the new interval, depending on the
+// type of the source node.  In the case of a CFG source graph (BasicBlock
+// case), the BasicBlock itself is added to the interval.  In the case of
+// an IntervalPartition source graph (Interval case), all of the member
+// BasicBlocks are added to the interval.
+inline void addNodeToInterval(Interval *Int, Interval *I) {
+  // Add all of the nodes in I as new nodes in Int.
+  Int->Nodes.insert(Int->Nodes.end(), I->Nodes.begin(), I->Nodes.end());
+}
+
+template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy *>,
+         class IGT = GraphTraits<Inverse<NodeTy *>>>
+class IntervalIterator {
+  std::vector<std::pair<Interval *, typename Interval::succ_iterator>> IntStack;
+  std::set<BasicBlock *> Visited;
+  OrigContainer_t *OrigContainer;
+  bool IOwnMem;     // If True, delete intervals when done with them
+                    // See file header for conditions of use
+
+public:
+  using iterator_category = std::forward_iterator_tag;
+
+  IntervalIterator() = default; // End iterator, empty stack
+
+  IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) {
+    OrigContainer = M;
+    if (!ProcessInterval(&M->front())) {
+      llvm_unreachable("ProcessInterval should never fail for first interval!");
+    }
+  }
+
+  IntervalIterator(IntervalIterator &&x)
+      : IntStack(std::move(x.IntStack)), Visited(std::move(x.Visited)),
+        OrigContainer(x.OrigContainer), IOwnMem(x.IOwnMem) {
+    x.IOwnMem = false;
+  }
+
+  IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) {
+    OrigContainer = &IP;
+    if (!ProcessInterval(IP.getRootInterval())) {
+      llvm_unreachable("ProcessInterval should never fail for first interval!");
+    }
+  }
+
+  ~IntervalIterator() {
+    if (IOwnMem)
+      while (!IntStack.empty()) {
+        delete operator*();
+        IntStack.pop_back();
+      }
+  }
+
+  bool operator==(const IntervalIterator &x) const {
+    return IntStack == x.IntStack;
+  }
+  bool operator!=(const IntervalIterator &x) const { return !(*this == x); }
+
+  const Interval *operator*() const { return IntStack.back().first; }
+  Interval *operator*() { return IntStack.back().first; }
+  const Interval *operator->() const { return operator*(); }
+  Interval *operator->() { return operator*(); }
+
+  IntervalIterator &operator++() { // Preincrement
+    assert(!IntStack.empty() && "Attempting to use interval iterator at end!");
+    do {
+      // All of the intervals on the stack have been visited.  Try visiting
+      // their successors now.
+      Interval::succ_iterator &SuccIt = IntStack.back().second,
+                                EndIt = succ_end(IntStack.back().first);
+      while (SuccIt != EndIt) {                 // Loop over all interval succs
+        bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
+        ++SuccIt;                               // Increment iterator
+        if (Done) return *this;                 // Found a new interval! Use it!
+      }
+
+      // Free interval memory... if necessary
+      if (IOwnMem) delete IntStack.back().first;
+
+      // We ran out of successors for this interval... pop off the stack
+      IntStack.pop_back();
+    } while (!IntStack.empty());
+
+    return *this;
+  }
+
+  IntervalIterator operator++(int) { // Postincrement
+    IntervalIterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+
+private:
+  // ProcessInterval - This method is used during the construction of the
+  // interval graph.  It walks through the source graph, recursively creating
+  // an interval per invocation until the entire graph is covered.  This uses
+  // the ProcessNode method to add all of the nodes to the interval.
+  //
+  // This method is templated because it may operate on two different source
+  // graphs: a basic block graph, or a preexisting interval graph.
+  bool ProcessInterval(NodeTy *Node) {
+    BasicBlock *Header = getNodeHeader(Node);
+    if (!Visited.insert(Header).second)
+      return false;
+
+    Interval *Int = new Interval(Header);
+
+    // Check all of our successors to see if they are in the interval...
+    for (typename GT::ChildIteratorType I = GT::child_begin(Node),
+           E = GT::child_end(Node); I != E; ++I)
+      ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
+
+    IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
+    return true;
+  }
+
+  // ProcessNode - This method is called by ProcessInterval to add nodes to the
+  // interval being constructed, and it is also called recursively as it walks
+  // the source graph.  A node is added to the current interval only if all of
+  // its predecessors are already in the graph.  This also takes care of keeping
+  // the successor set of an interval up to date.
+  //
+  // This method is templated because it may operate on two different source
+  // graphs: a basic block graph, or a preexisting interval graph.
+  void ProcessNode(Interval *Int, NodeTy *Node) {
+    assert(Int && "Null interval == bad!");
+    assert(Node && "Null Node == bad!");
+
+    BasicBlock *NodeHeader = getNodeHeader(Node);
+
+    if (Visited.count(NodeHeader)) {     // Node already been visited?
+      if (Int->contains(NodeHeader)) {   // Already in this interval...
+        return;
+      } else {                           // In other interval, add as successor
+        if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
+          Int->Successors.push_back(NodeHeader);
+      }
+    } else {                             // Otherwise, not in interval yet
+      for (typename IGT::ChildIteratorType I = IGT::child_begin(Node),
+             E = IGT::child_end(Node); I != E; ++I) {
+        if (!Int->contains(*I)) {        // If pred not in interval, we can't be
+          if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
+            Int->Successors.push_back(NodeHeader);
+          return;                        // See you later
+        }
+      }
+
+      // If we get here, then all of the predecessors of BB are in the interval
+      // already.  In this case, we must add BB to the interval!
+      addNodeToInterval(Int, Node);
+      Visited.insert(NodeHeader);     // The node has now been visited!
+
+      if (Int->isSuccessor(NodeHeader)) {
+        // If we were in the successor list from before... remove from succ list
+        Int->Successors.erase(std::remove(Int->Successors.begin(),
+                                          Int->Successors.end(), NodeHeader),
+                              Int->Successors.end());
+      }
+
+      // Now that we have discovered that Node is in the interval, perhaps some
+      // of its successors are as well?
+      for (typename GT::ChildIteratorType It = GT::child_begin(Node),
+             End = GT::child_end(Node); It != End; ++It)
+        ProcessNode(Int, getSourceGraphNode(OrigContainer, *It));
+    }
+  }
+};
+
+using function_interval_iterator = IntervalIterator<BasicBlock, Function>;
+using interval_part_interval_iterator =
+    IntervalIterator<Interval, IntervalPartition>;
+
+inline function_interval_iterator intervals_begin(Function *F,
+                                                  bool DeleteInts = true) {
+  return function_interval_iterator(F, DeleteInts);
+}
+inline function_interval_iterator intervals_end(Function *) {
+  return function_interval_iterator();
+}
+
+inline interval_part_interval_iterator
+   intervals_begin(IntervalPartition &IP, bool DeleteIntervals = true) {
+  return interval_part_interval_iterator(IP, DeleteIntervals);
+}
+
+inline interval_part_interval_iterator intervals_end(IntervalPartition &IP) {
+  return interval_part_interval_iterator();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_INTERVALITERATOR_H
diff --git a/linux-x64/clang/include/llvm/Analysis/IntervalPartition.h b/linux-x64/clang/include/llvm/Analysis/IntervalPartition.h
new file mode 100644
index 0000000..5033516
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/IntervalPartition.h
@@ -0,0 +1,111 @@
+//===- IntervalPartition.h - Interval partition Calculation -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the IntervalPartition class, which
+// calculates and represents the interval partition of a function, or a
+// preexisting interval partition.
+//
+// In this way, the interval partition may be used to reduce a flow graph down
+// to its degenerate single node interval partition (unless it is irreducible).
+//
+// TODO: The IntervalPartition class should take a bool parameter that tells
+// whether it should add the "tails" of an interval to an interval itself or if
+// they should be represented as distinct intervals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERVALPARTITION_H
+#define LLVM_ANALYSIS_INTERVALPARTITION_H
+
+#include "llvm/Pass.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class Interval;
+
+//===----------------------------------------------------------------------===//
+//
+// IntervalPartition - This class builds and holds an "interval partition" for
+// a function.  This partition divides the control flow graph into a set of
+// maximal intervals, as defined with the properties above.  Intuitively, an
+// interval is a (possibly nonexistent) loop with a "tail" of non-looping
+// nodes following it.
+//
+class IntervalPartition : public FunctionPass {
+  using IntervalMapTy = std::map<BasicBlock *, Interval *>;
+  IntervalMapTy IntervalMap;
+
+  using IntervalListTy = std::vector<Interval *>;
+  Interval *RootInterval = nullptr;
+  std::vector<Interval *> Intervals;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  IntervalPartition() : FunctionPass(ID) {
+    initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
+  }
+
+  // run - Calculate the interval partition for this function
+  bool runOnFunction(Function &F) override;
+
+  // IntervalPartition ctor - Build a reduced interval partition from an
+  // existing interval graph.  This takes an additional boolean parameter to
+  // distinguish it from a copy constructor.  Always pass in false for now.
+  IntervalPartition(IntervalPartition &I, bool);
+
+  // print - Show contents in human readable format...
+  void print(raw_ostream &O, const Module* = nullptr) const override;
+
+  // getRootInterval() - Return the root interval that contains the starting
+  // block of the function.
+  inline Interval *getRootInterval() { return RootInterval; }
+
+  // isDegeneratePartition() - Returns true if the interval partition contains
+  // a single interval, and thus cannot be simplified anymore.
+  bool isDegeneratePartition() { return Intervals.size() == 1; }
+
+  // TODO: isIrreducible - look for triangle graph.
+
+  // getBlockInterval - Return the interval that a basic block exists in.
+  inline Interval *getBlockInterval(BasicBlock *BB) {
+    IntervalMapTy::iterator I = IntervalMap.find(BB);
+    return I != IntervalMap.end() ? I->second : nullptr;
+  }
+
+  // getAnalysisUsage - Implement the Pass API
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  // Interface to Intervals vector...
+  const std::vector<Interval*> &getIntervals() const { return Intervals; }
+
+  // releaseMemory - Reset state back to before function was analyzed
+  void releaseMemory() override;
+
+private:
+  // addIntervalToPartition - Add an interval to the internal list of intervals,
+  // and then add mappings from all of the basic blocks in the interval to the
+  // interval itself (in the IntervalMap).
+  void addIntervalToPartition(Interval *I);
+
+  // updatePredecessors - Interval generation only sets the successor fields of
+  // the interval data structures.  After interval generation is complete,
+  // run through all of the intervals and propagate successor info as
+  // predecessor info.
+  void updatePredecessors(Interval *Int);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_INTERVALPARTITION_H
diff --git a/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h b/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h
new file mode 100644
index 0000000..edaf4e9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h
@@ -0,0 +1,95 @@
+//===- IteratedDominanceFrontier.h - Calculate IDF --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \brief Compute iterated dominance frontiers using a linear time algorithm.
+///
+/// The algorithm used here is based on:
+///
+///   Sreedhar and Gao. A linear time algorithm for placing phi-nodes.
+///   In Proceedings of the 22nd ACM SIGPLAN-SIGACT Symposium on Principles of
+///   Programming Languages
+///   POPL '95. ACM, New York, NY, 62-73.
+///
+/// It has been modified to not explicitly use the DJ graph data structure and
+/// to directly compute pruned SSA using per-variable liveness information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_IDF_H
+#define LLVM_ANALYSIS_IDF_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+
+namespace llvm {
+
+/// \brief Determine the iterated dominance frontier, given a set of defining
+/// blocks, and optionally, a set of live-in blocks.
+///
+/// In turn, the results can be used to place phi nodes.
+///
+/// This algorithm is a linear time computation of Iterated Dominance Frontiers,
+/// pruned using the live-in set.
+/// By default, liveness is not used to prune the IDF computation.
+/// The template parameters should be either BasicBlock* or Inverse<BasicBlock
+/// *>, depending on if you want the forward or reverse IDF.
+template <class NodeTy, bool IsPostDom>
+class IDFCalculator {
+ public:
+  IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
+      : DT(DT), useLiveIn(false) {}
+
+  /// \brief Give the IDF calculator the set of blocks in which the value is
+  /// defined.  This is equivalent to the set of starting blocks it should be
+  /// calculating the IDF for (though later gets pruned based on liveness).
+  ///
+  /// Note: This set *must* live for the entire lifetime of the IDF calculator.
+  void setDefiningBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
+    DefBlocks = &Blocks;
+  }
+
+  /// \brief Give the IDF calculator the set of blocks in which the value is
+  /// live on entry to the block.   This is used to prune the IDF calculation to
+  /// not include blocks where any phi insertion would be dead.
+  ///
+  /// Note: This set *must* live for the entire lifetime of the IDF calculator.
+
+  void setLiveInBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
+    LiveInBlocks = &Blocks;
+    useLiveIn = true;
+  }
+
+  /// \brief Reset the live-in block set to be empty, and tell the IDF
+  /// calculator to not use liveness anymore.
+  void resetLiveInBlocks() {
+    LiveInBlocks = nullptr;
+    useLiveIn = false;
+  }
+
+  /// \brief Calculate iterated dominance frontiers
+  ///
+  /// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
+  /// the file-level comment.  It performs DF->IDF pruning using the live-in
+  /// set, to avoid computing the IDF for blocks where an inserted PHI node
+  /// would be dead.
+  void calculate(SmallVectorImpl<BasicBlock *> &IDFBlocks);
+
+private:
+ DominatorTreeBase<BasicBlock, IsPostDom> &DT;
+ bool useLiveIn;
+ const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
+ const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
+};
+typedef IDFCalculator<BasicBlock *, false> ForwardIDFCalculator;
+typedef IDFCalculator<Inverse<BasicBlock *>, true> ReverseIDFCalculator;
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h
new file mode 100644
index 0000000..71ce084
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h
@@ -0,0 +1,132 @@
+//===- LazyBlockFrequencyInfo.h - Lazy Block Frequency Analysis -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.  The
+// difference is that with this pass the block frequencies are not computed when
+// the analysis pass is executed but rather when the BFI result is explicitly
+// requested by the analysis client.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYBLOCKFREQUENCYINFO_H
+#define LLVM_ANALYSIS_LAZYBLOCKFREQUENCYINFO_H
+
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/LazyBranchProbabilityInfo.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class AnalysisUsage;
+class BranchProbabilityInfo;
+class Function;
+class LoopInfo;
+
+/// Wraps a BFI to allow lazy computation of the block frequencies.
+///
+/// A pass that only conditionally uses BFI can uncondtionally require the
+/// analysis without paying for the overhead if BFI doesn't end up being used.
+template <typename FunctionT, typename BranchProbabilityInfoPassT,
+          typename LoopInfoT, typename BlockFrequencyInfoT>
+class LazyBlockFrequencyInfo {
+public:
+  LazyBlockFrequencyInfo()
+      : Calculated(false), F(nullptr), BPIPass(nullptr), LI(nullptr) {}
+
+  /// Set up the per-function input.
+  void setAnalysis(const FunctionT *F, BranchProbabilityInfoPassT *BPIPass,
+                   const LoopInfoT *LI) {
+    this->F = F;
+    this->BPIPass = BPIPass;
+    this->LI = LI;
+  }
+
+  /// Retrieve the BFI with the block frequencies computed.
+  BlockFrequencyInfoT &getCalculated() {
+    if (!Calculated) {
+      assert(F && BPIPass && LI && "call setAnalysis");
+      BFI.calculate(
+          *F, BPIPassTrait<BranchProbabilityInfoPassT>::getBPI(BPIPass), *LI);
+      Calculated = true;
+    }
+    return BFI;
+  }
+
+  const BlockFrequencyInfoT &getCalculated() const {
+    return const_cast<LazyBlockFrequencyInfo *>(this)->getCalculated();
+  }
+
+  void releaseMemory() {
+    BFI.releaseMemory();
+    Calculated = false;
+    setAnalysis(nullptr, nullptr, nullptr);
+  }
+
+private:
+  BlockFrequencyInfoT BFI;
+  bool Calculated;
+  const FunctionT *F;
+  BranchProbabilityInfoPassT *BPIPass;
+  const LoopInfoT *LI;
+};
+
+/// \brief This is an alternative analysis pass to
+/// BlockFrequencyInfoWrapperPass.  The difference is that with this pass the
+/// block frequencies are not computed when the analysis pass is executed but
+/// rather when the BFI result is explicitly requested by the analysis client.
+///
+/// There are some additional requirements for any client pass that wants to use
+/// the analysis:
+///
+/// 1. The pass needs to initialize dependent passes with:
+///
+///   INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
+///
+/// 2. Similarly, getAnalysisUsage should call:
+///
+///   LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU)
+///
+/// 3. The computed BFI should be requested with
+///    getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() before either LoopInfo
+///    or BPI could be invalidated for example by changing the CFG.
+///
+/// Note that it is expected that we wouldn't need this functionality for the
+/// new PM since with the new PM, analyses are executed on demand.
+
+class LazyBlockFrequencyInfoPass : public FunctionPass {
+private:
+  LazyBlockFrequencyInfo<Function, LazyBranchProbabilityInfoPass, LoopInfo,
+                         BlockFrequencyInfo>
+      LBFI;
+
+public:
+  static char ID;
+
+  LazyBlockFrequencyInfoPass();
+
+  /// \brief Compute and return the block frequencies.
+  BlockFrequencyInfo &getBFI() { return LBFI.getCalculated(); }
+
+  /// \brief Compute and return the block frequencies.
+  const BlockFrequencyInfo &getBFI() const { return LBFI.getCalculated(); }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// Helper for client passes to set up the analysis usage on behalf of this
+  /// pass.
+  static void getLazyBFIAnalysisUsage(AnalysisUsage &AU);
+
+  bool runOnFunction(Function &F) override;
+  void releaseMemory() override;
+  void print(raw_ostream &OS, const Module *M) const override;
+};
+
+/// \brief Helper for client passes to initialize dependent passes for LBFI.
+void initializeLazyBFIPassPass(PassRegistry &Registry);
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h b/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h
new file mode 100644
index 0000000..e1d404b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h
@@ -0,0 +1,124 @@
+//===- LazyBranchProbabilityInfo.h - Lazy Branch Probability ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
+// The difference is that with this pass the branch probabilities are not
+// computed when the analysis pass is executed but rather when the BPI results
+// is explicitly requested by the analysis client.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYBRANCHPROBABILITYINFO_H
+#define LLVM_ANALYSIS_LAZYBRANCHPROBABILITYINFO_H
+
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class AnalysisUsage;
+class Function;
+class LoopInfo;
+class TargetLibraryInfo;
+
+/// \brief This is an alternative analysis pass to
+/// BranchProbabilityInfoWrapperPass.  The difference is that with this pass the
+/// branch probabilities are not computed when the analysis pass is executed but
+/// rather when the BPI results is explicitly requested by the analysis client.
+///
+/// There are some additional requirements for any client pass that wants to use
+/// the analysis:
+///
+/// 1. The pass needs to initialize dependent passes with:
+///
+///   INITIALIZE_PASS_DEPENDENCY(LazyBPIPass)
+///
+/// 2. Similarly, getAnalysisUsage should call:
+///
+///   LazyBranchProbabilityInfoPass::getLazyBPIAnalysisUsage(AU)
+///
+/// 3. The computed BPI should be requested with
+///    getAnalysis<LazyBranchProbabilityInfoPass>().getBPI() before LoopInfo
+///    could be invalidated for example by changing the CFG.
+///
+/// Note that it is expected that we wouldn't need this functionality for the
+/// new PM since with the new PM, analyses are executed on demand.
+class LazyBranchProbabilityInfoPass : public FunctionPass {
+
+  /// Wraps a BPI to allow lazy computation of the branch probabilities.
+  ///
+  /// A pass that only conditionally uses BPI can uncondtionally require the
+  /// analysis without paying for the overhead if BPI doesn't end up being used.
+  class LazyBranchProbabilityInfo {
+  public:
+    LazyBranchProbabilityInfo(const Function *F, const LoopInfo *LI,
+                              const TargetLibraryInfo *TLI)
+        : Calculated(false), F(F), LI(LI), TLI(TLI) {}
+
+    /// Retrieve the BPI with the branch probabilities computed.
+    BranchProbabilityInfo &getCalculated() {
+      if (!Calculated) {
+        assert(F && LI && "call setAnalysis");
+        BPI.calculate(*F, *LI, TLI);
+        Calculated = true;
+      }
+      return BPI;
+    }
+
+    const BranchProbabilityInfo &getCalculated() const {
+      return const_cast<LazyBranchProbabilityInfo *>(this)->getCalculated();
+    }
+
+  private:
+    BranchProbabilityInfo BPI;
+    bool Calculated;
+    const Function *F;
+    const LoopInfo *LI;
+    const TargetLibraryInfo *TLI;
+  };
+
+  std::unique_ptr<LazyBranchProbabilityInfo> LBPI;
+
+public:
+  static char ID;
+
+  LazyBranchProbabilityInfoPass();
+
+  /// \brief Compute and return the branch probabilities.
+  BranchProbabilityInfo &getBPI() { return LBPI->getCalculated(); }
+
+  /// \brief Compute and return the branch probabilities.
+  const BranchProbabilityInfo &getBPI() const { return LBPI->getCalculated(); }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// Helper for client passes to set up the analysis usage on behalf of this
+  /// pass.
+  static void getLazyBPIAnalysisUsage(AnalysisUsage &AU);
+
+  bool runOnFunction(Function &F) override;
+  void releaseMemory() override;
+  void print(raw_ostream &OS, const Module *M) const override;
+};
+
+/// \brief Helper for client passes to initialize dependent passes for LBPI.
+void initializeLazyBPIPassPass(PassRegistry &Registry);
+
+/// \brief Simple trait class that provides a mapping between BPI passes and the
+/// corresponding BPInfo.
+template <typename PassT> struct BPIPassTrait {
+  static PassT &getBPI(PassT *P) { return *P; }
+};
+
+template <> struct BPIPassTrait<LazyBranchProbabilityInfoPass> {
+  static BranchProbabilityInfo &getBPI(LazyBranchProbabilityInfoPass *P) {
+    return P->getBPI();
+  }
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyCallGraph.h b/linux-x64/clang/include/llvm/Analysis/LazyCallGraph.h
new file mode 100644
index 0000000..d1ec6a9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LazyCallGraph.h
@@ -0,0 +1,1288 @@
+//===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Implements a lazy call graph analysis and related passes for the new pass
+/// manager.
+///
+/// NB: This is *not* a traditional call graph! It is a graph which models both
+/// the current calls and potential calls. As a consequence there are many
+/// edges in this call graph that do not correspond to a 'call' or 'invoke'
+/// instruction.
+///
+/// The primary use cases of this graph analysis is to facilitate iterating
+/// across the functions of a module in ways that ensure all callees are
+/// visited prior to a caller (given any SCC constraints), or vice versa. As
+/// such is it particularly well suited to organizing CGSCC optimizations such
+/// as inlining, outlining, argument promotion, etc. That is its primary use
+/// case and motivates the design. It may not be appropriate for other
+/// purposes. The use graph of functions or some other conservative analysis of
+/// call instructions may be interesting for optimizations and subsequent
+/// analyses which don't work in the context of an overly specified
+/// potential-call-edge graph.
+///
+/// To understand the specific rules and nature of this call graph analysis,
+/// see the documentation of the \c LazyCallGraph below.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
+#define LLVM_ANALYSIS_LAZYCALLGRAPH_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <iterator>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class Module;
+class Value;
+
+/// A lazily constructed view of the call graph of a module.
+///
+/// With the edges of this graph, the motivating constraint that we are
+/// attempting to maintain is that function-local optimization, CGSCC-local
+/// optimizations, and optimizations transforming a pair of functions connected
+/// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
+/// DAG. That is, no optimizations will delete, remove, or add an edge such
+/// that functions already visited in a bottom-up order of the SCC DAG are no
+/// longer valid to have visited, or such that functions not yet visited in
+/// a bottom-up order of the SCC DAG are not required to have already been
+/// visited.
+///
+/// Within this constraint, the desire is to minimize the merge points of the
+/// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
+/// in the SCC DAG, the more independence there is in optimizing within it.
+/// There is a strong desire to enable parallelization of optimizations over
+/// the call graph, and both limited fanout and merge points will (artificially
+/// in some cases) limit the scaling of such an effort.
+///
+/// To this end, graph represents both direct and any potential resolution to
+/// an indirect call edge. Another way to think about it is that it represents
+/// both the direct call edges and any direct call edges that might be formed
+/// through static optimizations. Specifically, it considers taking the address
+/// of a function to be an edge in the call graph because this might be
+/// forwarded to become a direct call by some subsequent function-local
+/// optimization. The result is that the graph closely follows the use-def
+/// edges for functions. Walking "up" the graph can be done by looking at all
+/// of the uses of a function.
+///
+/// The roots of the call graph are the external functions and functions
+/// escaped into global variables. Those functions can be called from outside
+/// of the module or via unknowable means in the IR -- we may not be able to
+/// form even a potential call edge from a function body which may dynamically
+/// load the function and call it.
+///
+/// This analysis still requires updates to remain valid after optimizations
+/// which could potentially change the set of potential callees. The
+/// constraints it operates under only make the traversal order remain valid.
+///
+/// The entire analysis must be re-computed if full interprocedural
+/// optimizations run at any point. For example, globalopt completely
+/// invalidates the information in this analysis.
+///
+/// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
+/// it from the existing CallGraph. At some point, it is expected that this
+/// will be the only call graph and it will be renamed accordingly.
+class LazyCallGraph {
+public:
+  class Node;
+  class EdgeSequence;
+  class SCC;
+  class RefSCC;
+  class edge_iterator;
+  class call_edge_iterator;
+
+  /// A class used to represent edges in the call graph.
+  ///
+  /// The lazy call graph models both *call* edges and *reference* edges. Call
+  /// edges are much what you would expect, and exist when there is a 'call' or
+  /// 'invoke' instruction of some function. Reference edges are also tracked
+  /// along side these, and exist whenever any instruction (transitively
+  /// through its operands) references a function. All call edges are
+  /// inherently reference edges, and so the reference graph forms a superset
+  /// of the formal call graph.
+  ///
+  /// All of these forms of edges are fundamentally represented as outgoing
+  /// edges. The edges are stored in the source node and point at the target
+  /// node. This allows the edge structure itself to be a very compact data
+  /// structure: essentially a tagged pointer.
+  class Edge {
+  public:
+    /// The kind of edge in the graph.
+    enum Kind : bool { Ref = false, Call = true };
+
+    Edge();
+    explicit Edge(Node &N, Kind K);
+
+    /// Test whether the edge is null.
+    ///
+    /// This happens when an edge has been deleted. We leave the edge objects
+    /// around but clear them.
+    explicit operator bool() const;
+
+    /// Returnss the \c Kind of the edge.
+    Kind getKind() const;
+
+    /// Test whether the edge represents a direct call to a function.
+    ///
+    /// This requires that the edge is not null.
+    bool isCall() const;
+
+    /// Get the call graph node referenced by this edge.
+    ///
+    /// This requires that the edge is not null.
+    Node &getNode() const;
+
+    /// Get the function referenced by this edge.
+    ///
+    /// This requires that the edge is not null.
+    Function &getFunction() const;
+
+  private:
+    friend class LazyCallGraph::EdgeSequence;
+    friend class LazyCallGraph::RefSCC;
+
+    PointerIntPair<Node *, 1, Kind> Value;
+
+    void setKind(Kind K) { Value.setInt(K); }
+  };
+
+  /// The edge sequence object.
+  ///
+  /// This typically exists entirely within the node but is exposed as
+  /// a separate type because a node doesn't initially have edges. An explicit
+  /// population step is required to produce this sequence at first and it is
+  /// then cached in the node. It is also used to represent edges entering the
+  /// graph from outside the module to model the graph's roots.
+  ///
+  /// The sequence itself both iterable and indexable. The indexes remain
+  /// stable even as the sequence mutates (including removal).
+  class EdgeSequence {
+    friend class LazyCallGraph;
+    friend class LazyCallGraph::Node;
+    friend class LazyCallGraph::RefSCC;
+
+    using VectorT = SmallVector<Edge, 4>;
+    using VectorImplT = SmallVectorImpl<Edge>;
+
+  public:
+    /// An iterator used for the edges to both entry nodes and child nodes.
+    class iterator
+        : public iterator_adaptor_base<iterator, VectorImplT::iterator,
+                                       std::forward_iterator_tag> {
+      friend class LazyCallGraph;
+      friend class LazyCallGraph::Node;
+
+      VectorImplT::iterator E;
+
+      // Build the iterator for a specific position in the edge list.
+      iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
+          : iterator_adaptor_base(BaseI), E(E) {
+        while (I != E && !*I)
+          ++I;
+      }
+
+    public:
+      iterator() = default;
+
+      using iterator_adaptor_base::operator++;
+      iterator &operator++() {
+        do {
+          ++I;
+        } while (I != E && !*I);
+        return *this;
+      }
+    };
+
+    /// An iterator over specifically call edges.
+    ///
+    /// This has the same iteration properties as the \c iterator, but
+    /// restricts itself to edges which represent actual calls.
+    class call_iterator
+        : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
+                                       std::forward_iterator_tag> {
+      friend class LazyCallGraph;
+      friend class LazyCallGraph::Node;
+
+      VectorImplT::iterator E;
+
+      /// Advance the iterator to the next valid, call edge.
+      void advanceToNextEdge() {
+        while (I != E && (!*I || !I->isCall()))
+          ++I;
+      }
+
+      // Build the iterator for a specific position in the edge list.
+      call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
+          : iterator_adaptor_base(BaseI), E(E) {
+        advanceToNextEdge();
+      }
+
+    public:
+      call_iterator() = default;
+
+      using iterator_adaptor_base::operator++;
+      call_iterator &operator++() {
+        ++I;
+        advanceToNextEdge();
+        return *this;
+      }
+    };
+
+    iterator begin() { return iterator(Edges.begin(), Edges.end()); }
+    iterator end() { return iterator(Edges.end(), Edges.end()); }
+
+    Edge &operator[](int i) { return Edges[i]; }
+    Edge &operator[](Node &N) {
+      assert(EdgeIndexMap.find(&N) != EdgeIndexMap.end() && "No such edge!");
+      auto &E = Edges[EdgeIndexMap.find(&N)->second];
+      assert(E && "Dead or null edge!");
+      return E;
+    }
+
+    Edge *lookup(Node &N) {
+      auto EI = EdgeIndexMap.find(&N);
+      if (EI == EdgeIndexMap.end())
+        return nullptr;
+      auto &E = Edges[EI->second];
+      return E ? &E : nullptr;
+    }
+
+    call_iterator call_begin() {
+      return call_iterator(Edges.begin(), Edges.end());
+    }
+    call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }
+
+    iterator_range<call_iterator> calls() {
+      return make_range(call_begin(), call_end());
+    }
+
+    bool empty() {
+      for (auto &E : Edges)
+        if (E)
+          return false;
+
+      return true;
+    }
+
+  private:
+    VectorT Edges;
+    DenseMap<Node *, int> EdgeIndexMap;
+
+    EdgeSequence() = default;
+
+    /// Internal helper to insert an edge to a node.
+    void insertEdgeInternal(Node &ChildN, Edge::Kind EK);
+
+    /// Internal helper to change an edge kind.
+    void setEdgeKind(Node &ChildN, Edge::Kind EK);
+
+    /// Internal helper to remove the edge to the given function.
+    bool removeEdgeInternal(Node &ChildN);
+
+    /// Internal helper to replace an edge key with a new one.
+    ///
+    /// This should be used when the function for a particular node in the
+    /// graph gets replaced and we are updating all of the edges to that node
+    /// to use the new function as the key.
+    void replaceEdgeKey(Function &OldTarget, Function &NewTarget);
+  };
+
+  /// A node in the call graph.
+  ///
+  /// This represents a single node. It's primary roles are to cache the list of
+  /// callees, de-duplicate and provide fast testing of whether a function is
+  /// a callee, and facilitate iteration of child nodes in the graph.
+  ///
+  /// The node works much like an optional in order to lazily populate the
+  /// edges of each node. Until populated, there are no edges. Once populated,
+  /// you can access the edges by dereferencing the node or using the `->`
+  /// operator as if the node was an `Optional<EdgeSequence>`.
+  class Node {
+    friend class LazyCallGraph;
+    friend class LazyCallGraph::RefSCC;
+
+  public:
+    LazyCallGraph &getGraph() const { return *G; }
+
+    Function &getFunction() const { return *F; }
+
+    StringRef getName() const { return F->getName(); }
+
+    /// Equality is defined as address equality.
+    bool operator==(const Node &N) const { return this == &N; }
+    bool operator!=(const Node &N) const { return !operator==(N); }
+
+    /// Tests whether the node has been populated with edges.
+    bool isPopulated() const { return Edges.hasValue(); }
+
+    /// Tests whether this is actually a dead node and no longer valid.
+    ///
+    /// Users rarely interact with nodes in this state and other methods are
+    /// invalid. This is used to model a node in an edge list where the
+    /// function has been completely removed.
+    bool isDead() const {
+      assert(!G == !F &&
+             "Both graph and function pointers should be null or non-null.");
+      return !G;
+    }
+
+    // We allow accessing the edges by dereferencing or using the arrow
+    // operator, essentially wrapping the internal optional.
+    EdgeSequence &operator*() const {
+      // Rip const off because the node itself isn't changing here.
+      return const_cast<EdgeSequence &>(*Edges);
+    }
+    EdgeSequence *operator->() const { return &**this; }
+
+    /// Populate the edges of this node if necessary.
+    ///
+    /// The first time this is called it will populate the edges for this node
+    /// in the graph. It does this by scanning the underlying function, so once
+    /// this is done, any changes to that function must be explicitly reflected
+    /// in updates to the graph.
+    ///
+    /// \returns the populated \c EdgeSequence to simplify walking it.
+    ///
+    /// This will not update or re-scan anything if called repeatedly. Instead,
+    /// the edge sequence is cached and returned immediately on subsequent
+    /// calls.
+    EdgeSequence &populate() {
+      if (Edges)
+        return *Edges;
+
+      return populateSlow();
+    }
+
+  private:
+    LazyCallGraph *G;
+    Function *F;
+
+    // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
+    // stored directly within the node. These are both '-1' when nodes are part
+    // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
+    int DFSNumber = 0;
+    int LowLink = 0;
+
+    Optional<EdgeSequence> Edges;
+
+    /// Basic constructor implements the scanning of F into Edges and
+    /// EdgeIndexMap.
+    Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
+
+    /// Implementation of the scan when populating.
+    EdgeSequence &populateSlow();
+
+    /// Internal helper to directly replace the function with a new one.
+    ///
+    /// This is used to facilitate tranfsormations which need to replace the
+    /// formal Function object but directly move the body and users from one to
+    /// the other.
+    void replaceFunction(Function &NewF);
+
+    void clear() { Edges.reset(); }
+
+    /// Print the name of this node's function.
+    friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
+      return OS << N.F->getName();
+    }
+
+    /// Dump the name of this node's function to stderr.
+    void dump() const;
+  };
+
+  /// An SCC of the call graph.
+  ///
+  /// This represents a Strongly Connected Component of the direct call graph
+  /// -- ignoring indirect calls and function references. It stores this as
+  /// a collection of call graph nodes. While the order of nodes in the SCC is
+  /// stable, it is not any particular order.
+  ///
+  /// The SCCs are nested within a \c RefSCC, see below for details about that
+  /// outer structure. SCCs do not support mutation of the call graph, that
+  /// must be done through the containing \c RefSCC in order to fully reason
+  /// about the ordering and connections of the graph.
+  class SCC {
+    friend class LazyCallGraph;
+    friend class LazyCallGraph::Node;
+
+    RefSCC *OuterRefSCC;
+    SmallVector<Node *, 1> Nodes;
+
+    template <typename NodeRangeT>
+    SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
+        : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}
+
+    void clear() {
+      OuterRefSCC = nullptr;
+      Nodes.clear();
+    }
+
+    /// Print a short descrtiption useful for debugging or logging.
+    ///
+    /// We print the function names in the SCC wrapped in '()'s and skipping
+    /// the middle functions if there are a large number.
+    //
+    // Note: this is defined inline to dodge issues with GCC's interpretation
+    // of enclosing namespaces for friend function declarations.
+    friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
+      OS << '(';
+      int i = 0;
+      for (LazyCallGraph::Node &N : C) {
+        if (i > 0)
+          OS << ", ";
+        // Elide the inner elements if there are too many.
+        if (i > 8) {
+          OS << "..., " << *C.Nodes.back();
+          break;
+        }
+        OS << N;
+        ++i;
+      }
+      OS << ')';
+      return OS;
+    }
+
+    /// Dump a short description of this SCC to stderr.
+    void dump() const;
+
+#ifndef NDEBUG
+    /// Verify invariants about the SCC.
+    ///
+    /// This will attempt to validate all of the basic invariants within an
+    /// SCC, but not that it is a strongly connected componet per-se. Primarily
+    /// useful while building and updating the graph to check that basic
+    /// properties are in place rather than having inexplicable crashes later.
+    void verify();
+#endif
+
+  public:
+    using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;
+
+    iterator begin() const { return Nodes.begin(); }
+    iterator end() const { return Nodes.end(); }
+
+    int size() const { return Nodes.size(); }
+
+    RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
+
+    /// Test if this SCC is a parent of \a C.
+    ///
+    /// Note that this is linear in the number of edges departing the current
+    /// SCC.
+    bool isParentOf(const SCC &C) const;
+
+    /// Test if this SCC is an ancestor of \a C.
+    ///
+    /// Note that in the worst case this is linear in the number of edges
+    /// departing the current SCC and every SCC in the entire graph reachable
+    /// from this SCC. Thus this very well may walk every edge in the entire
+    /// call graph! Do not call this in a tight loop!
+    bool isAncestorOf(const SCC &C) const;
+
+    /// Test if this SCC is a child of \a C.
+    ///
+    /// See the comments for \c isParentOf for detailed notes about the
+    /// complexity of this routine.
+    bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
+
+    /// Test if this SCC is a descendant of \a C.
+    ///
+    /// See the comments for \c isParentOf for detailed notes about the
+    /// complexity of this routine.
+    bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
+
+    /// Provide a short name by printing this SCC to a std::string.
+    ///
+    /// This copes with the fact that we don't have a name per-se for an SCC
+    /// while still making the use of this in debugging and logging useful.
+    std::string getName() const {
+      std::string Name;
+      raw_string_ostream OS(Name);
+      OS << *this;
+      OS.flush();
+      return Name;
+    }
+  };
+
+  /// A RefSCC of the call graph.
+  ///
+  /// This models a Strongly Connected Component of function reference edges in
+  /// the call graph. As opposed to actual SCCs, these can be used to scope
+  /// subgraphs of the module which are independent from other subgraphs of the
+  /// module because they do not reference it in any way. This is also the unit
+  /// where we do mutation of the graph in order to restrict mutations to those
+  /// which don't violate this independence.
+  ///
+  /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
+  /// are necessarily within some actual SCC that nests within it. Since
+  /// a direct call *is* a reference, there will always be at least one RefSCC
+  /// around any SCC.
+  class RefSCC {
+    friend class LazyCallGraph;
+    friend class LazyCallGraph::Node;
+
+    LazyCallGraph *G;
+
+    /// A postorder list of the inner SCCs.
+    SmallVector<SCC *, 4> SCCs;
+
+    /// A map from SCC to index in the postorder list.
+    SmallDenseMap<SCC *, int, 4> SCCIndices;
+
+    /// Fast-path constructor. RefSCCs should instead be constructed by calling
+    /// formRefSCCFast on the graph itself.
+    RefSCC(LazyCallGraph &G);
+
+    void clear() {
+      SCCs.clear();
+      SCCIndices.clear();
+    }
+
+    /// Print a short description useful for debugging or logging.
+    ///
+    /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
+    /// there are a large number.
+    //
+    // Note: this is defined inline to dodge issues with GCC's interpretation
+    // of enclosing namespaces for friend function declarations.
+    friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
+      OS << '[';
+      int i = 0;
+      for (LazyCallGraph::SCC &C : RC) {
+        if (i > 0)
+          OS << ", ";
+        // Elide the inner elements if there are too many.
+        if (i > 4) {
+          OS << "..., " << *RC.SCCs.back();
+          break;
+        }
+        OS << C;
+        ++i;
+      }
+      OS << ']';
+      return OS;
+    }
+
+    /// Dump a short description of this RefSCC to stderr.
+    void dump() const;
+
+#ifndef NDEBUG
+    /// Verify invariants about the RefSCC and all its SCCs.
+    ///
+    /// This will attempt to validate all of the invariants *within* the
+    /// RefSCC, but not that it is a strongly connected component of the larger
+    /// graph. This makes it useful even when partially through an update.
+    ///
+    /// Invariants checked:
+    /// - SCCs and their indices match.
+    /// - The SCCs list is in fact in post-order.
+    void verify();
+#endif
+
+    /// Handle any necessary parent set updates after inserting a trivial ref
+    /// or call edge.
+    void handleTrivialEdgeInsertion(Node &SourceN, Node &TargetN);
+
+  public:
+    using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
+    using range = iterator_range<iterator>;
+    using parent_iterator =
+        pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;
+
+    iterator begin() const { return SCCs.begin(); }
+    iterator end() const { return SCCs.end(); }
+
+    ssize_t size() const { return SCCs.size(); }
+
+    SCC &operator[](int Idx) { return *SCCs[Idx]; }
+
+    iterator find(SCC &C) const {
+      return SCCs.begin() + SCCIndices.find(&C)->second;
+    }
+
+    /// Test if this RefSCC is a parent of \a RC.
+    ///
+    /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
+    /// expensive.
+    bool isParentOf(const RefSCC &RC) const;
+
+    /// Test if this RefSCC is an ancestor of \a RC.
+    ///
+    /// CAUTION: This method walks the directed graph of edges as far as
+    /// necessary to find a possible path to the argument. In the worst case
+    /// this may walk the entire graph and can be extremely expensive.
+    bool isAncestorOf(const RefSCC &RC) const;
+
+    /// Test if this RefSCC is a child of \a RC.
+    ///
+    /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
+    /// be very expensive.
+    bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }
+
+    /// Test if this RefSCC is a descendant of \a RC.
+    ///
+    /// CAUTION: This method walks the directed graph of edges as far as
+    /// necessary to find a possible path from the argument. In the worst case
+    /// this may walk the entire graph and can be extremely expensive.
+    bool isDescendantOf(const RefSCC &RC) const {
+      return RC.isAncestorOf(*this);
+    }
+
+    /// Provide a short name by printing this RefSCC to a std::string.
+    ///
+    /// This copes with the fact that we don't have a name per-se for an RefSCC
+    /// while still making the use of this in debugging and logging useful.
+    std::string getName() const {
+      std::string Name;
+      raw_string_ostream OS(Name);
+      OS << *this;
+      OS.flush();
+      return Name;
+    }
+
+    ///@{
+    /// \name Mutation API
+    ///
+    /// These methods provide the core API for updating the call graph in the
+    /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
+    ///
+    /// Note that these methods sometimes have complex runtimes, so be careful
+    /// how you call them.
+
+    /// Make an existing internal ref edge into a call edge.
+    ///
+    /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
+    /// If that happens, the optional callback \p MergedCB will be invoked (if
+    /// provided) on the SCCs being merged away prior to actually performing
+    /// the merge. Note that this will never include the target SCC as that
+    /// will be the SCC functions are merged into to resolve the cycle. Once
+    /// this function returns, these merged SCCs are not in a valid state but
+    /// the pointers will remain valid until destruction of the parent graph
+    /// instance for the purpose of clearing cached information. This function
+    /// also returns 'true' if a cycle was formed and some SCCs merged away as
+    /// a convenience.
+    ///
+    /// After this operation, both SourceN's SCC and TargetN's SCC may move
+    /// position within this RefSCC's postorder list. Any SCCs merged are
+    /// merged into the TargetN's SCC in order to preserve reachability analyses
+    /// which took place on that SCC.
+    bool switchInternalEdgeToCall(
+        Node &SourceN, Node &TargetN,
+        function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
+
+    /// Make an existing internal call edge between separate SCCs into a ref
+    /// edge.
+    ///
+    /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
+    /// the call edge between them to a ref edge is a trivial operation that
+    /// does not require any structural changes to the call graph.
+    void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
+
+    /// Make an existing internal call edge within a single SCC into a ref
+    /// edge.
+    ///
+    /// Since SourceN and TargetN are part of a single SCC, this SCC may be
+    /// split up due to breaking a cycle in the call edges that formed it. If
+    /// that happens, then this routine will insert new SCCs into the postorder
+    /// list *before* the SCC of TargetN (previously the SCC of both). This
+    /// preserves postorder as the TargetN can reach all of the other nodes by
+    /// definition of previously being in a single SCC formed by the cycle from
+    /// SourceN to TargetN.
+    ///
+    /// The newly added SCCs are added *immediately* and contiguously
+    /// prior to the TargetN SCC and return the range covering the new SCCs in
+    /// the RefSCC's postorder sequence. You can directly iterate the returned
+    /// range to observe all of the new SCCs in postorder.
+    ///
+    /// Note that if SourceN and TargetN are in separate SCCs, the simpler
+    /// routine `switchTrivialInternalEdgeToRef` should be used instead.
+    iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
+                                                     Node &TargetN);
+
+    /// Make an existing outgoing ref edge into a call edge.
+    ///
+    /// Note that this is trivial as there are no cyclic impacts and there
+    /// remains a reference edge.
+    void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
+
+    /// Make an existing outgoing call edge into a ref edge.
+    ///
+    /// This is trivial as there are no cyclic impacts and there remains
+    /// a reference edge.
+    void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
+
+    /// Insert a ref edge from one node in this RefSCC to another in this
+    /// RefSCC.
+    ///
+    /// This is always a trivial operation as it doesn't change any part of the
+    /// graph structure besides connecting the two nodes.
+    ///
+    /// Note that we don't support directly inserting internal *call* edges
+    /// because that could change the graph structure and requires returning
+    /// information about what became invalid. As a consequence, the pattern
+    /// should be to first insert the necessary ref edge, and then to switch it
+    /// to a call edge if needed and handle any invalidation that results. See
+    /// the \c switchInternalEdgeToCall routine for details.
+    void insertInternalRefEdge(Node &SourceN, Node &TargetN);
+
+    /// Insert an edge whose parent is in this RefSCC and child is in some
+    /// child RefSCC.
+    ///
+    /// There must be an existing path from the \p SourceN to the \p TargetN.
+    /// This operation is inexpensive and does not change the set of SCCs and
+    /// RefSCCs in the graph.
+    void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
+
+    /// Insert an edge whose source is in a descendant RefSCC and target is in
+    /// this RefSCC.
+    ///
+    /// There must be an existing path from the target to the source in this
+    /// case.
+    ///
+    /// NB! This is has the potential to be a very expensive function. It
+    /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
+    /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
+    /// participate in the cycle can in the worst case require traversing every
+    /// RefSCC in the graph. Every attempt is made to avoid that, but passes
+    /// must still exercise caution calling this routine repeatedly.
+    ///
+    /// Also note that this can only insert ref edges. In order to insert
+    /// a call edge, first insert a ref edge and then switch it to a call edge.
+    /// These are intentionally kept as separate interfaces because each step
+    /// of the operation invalidates a different set of data structures.
+    ///
+    /// This returns all the RefSCCs which were merged into the this RefSCC
+    /// (the target's). This allows callers to invalidate any cached
+    /// information.
+    ///
+    /// FIXME: We could possibly optimize this quite a bit for cases where the
+    /// caller and callee are very nearby in the graph. See comments in the
+    /// implementation for details, but that use case might impact users.
+    SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
+                                                   Node &TargetN);
+
+    /// Remove an edge whose source is in this RefSCC and target is *not*.
+    ///
+    /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
+    /// from this SCC have been fully explored by any in-flight DFS graph
+    /// formation, so this is always safe to call once you have the source
+    /// RefSCC.
+    ///
+    /// This operation does not change the cyclic structure of the graph and so
+    /// is very inexpensive. It may change the connectivity graph of the SCCs
+    /// though, so be careful calling this while iterating over them.
+    void removeOutgoingEdge(Node &SourceN, Node &TargetN);
+
+    /// Remove a list of ref edges which are entirely within this RefSCC.
+    ///
+    /// Both the \a SourceN and all of the \a TargetNs must be within this
+    /// RefSCC. Removing these edges may break cycles that form this RefSCC and
+    /// thus this operation may change the RefSCC graph significantly. In
+    /// particular, this operation will re-form new RefSCCs based on the
+    /// remaining connectivity of the graph. The following invariants are
+    /// guaranteed to hold after calling this method:
+    ///
+    /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
+    ///    and in the graph. No new RefSCCs are built.
+    /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
+    ///    the graph or the postorder traversal of the call graph. Any iterator
+    ///    pointing at this RefSCC will become invalid.
+    /// 3) All newly formed RefSCCs will be returned and the order of the
+    ///    RefSCCs returned will be a valid postorder traversal of the new
+    ///    RefSCCs.
+    /// 4) No RefSCC other than this RefSCC has its member set changed (this is
+    ///    inherent in the definition of removing such an edge).
+    ///
+    /// These invariants are very important to ensure that we can build
+    /// optimization pipelines on top of the CGSCC pass manager which
+    /// intelligently update the RefSCC graph without invalidating other parts
+    /// of the RefSCC graph.
+    ///
+    /// Note that we provide no routine to remove a *call* edge. Instead, you
+    /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
+    /// This split API is intentional as each of these two steps can invalidate
+    /// a different aspect of the graph structure and needs to have the
+    /// invalidation handled independently.
+    ///
+    /// The runtime complexity of this method is, in the worst case, O(V+E)
+    /// where V is the number of nodes in this RefSCC and E is the number of
+    /// edges leaving the nodes in this RefSCC. Note that E includes both edges
+    /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
+    /// effort has been made to minimize the overhead of common cases such as
+    /// self-edges and edge removals which result in a spanning tree with no
+    /// more cycles.
+    SmallVector<RefSCC *, 1> removeInternalRefEdge(Node &SourceN,
+                                                   ArrayRef<Node *> TargetNs);
+
+    /// A convenience wrapper around the above to handle trivial cases of
+    /// inserting a new call edge.
+    ///
+    /// This is trivial whenever the target is in the same SCC as the source or
+    /// the edge is an outgoing edge to some descendant SCC. In these cases
+    /// there is no change to the cyclic structure of SCCs or RefSCCs.
+    ///
+    /// To further make calling this convenient, it also handles inserting
+    /// already existing edges.
+    void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
+
+    /// A convenience wrapper around the above to handle trivial cases of
+    /// inserting a new ref edge.
+    ///
+    /// This is trivial whenever the target is in the same RefSCC as the source
+    /// or the edge is an outgoing edge to some descendant RefSCC. In these
+    /// cases there is no change to the cyclic structure of the RefSCCs.
+    ///
+    /// To further make calling this convenient, it also handles inserting
+    /// already existing edges.
+    void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
+
+    /// Directly replace a node's function with a new function.
+    ///
+    /// This should be used when moving the body and users of a function to
+    /// a new formal function object but not otherwise changing the call graph
+    /// structure in any way.
+    ///
+    /// It requires that the old function in the provided node have zero uses
+    /// and the new function must have calls and references to it establishing
+    /// an equivalent graph.
+    void replaceNodeFunction(Node &N, Function &NewF);
+
+    ///@}
+  };
+
+  /// A post-order depth-first RefSCC iterator over the call graph.
+  ///
+  /// This iterator walks the cached post-order sequence of RefSCCs. However,
+  /// it trades stability for flexibility. It is restricted to a forward
+  /// iterator but will survive mutations which insert new RefSCCs and continue
+  /// to point to the same RefSCC even if it moves in the post-order sequence.
+  class postorder_ref_scc_iterator
+      : public iterator_facade_base<postorder_ref_scc_iterator,
+                                    std::forward_iterator_tag, RefSCC> {
+    friend class LazyCallGraph;
+    friend class LazyCallGraph::Node;
+
+    /// Nonce type to select the constructor for the end iterator.
+    struct IsAtEndT {};
+
+    LazyCallGraph *G;
+    RefSCC *RC = nullptr;
+
+    /// Build the begin iterator for a node.
+    postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
+
+    /// Build the end iterator for a node. This is selected purely by overload.
+    postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
+
+    /// Get the post-order RefSCC at the given index of the postorder walk,
+    /// populating it if necessary.
+    static RefSCC *getRC(LazyCallGraph &G, int Index) {
+      if (Index == (int)G.PostOrderRefSCCs.size())
+        // We're at the end.
+        return nullptr;
+
+      return G.PostOrderRefSCCs[Index];
+    }
+
+  public:
+    bool operator==(const postorder_ref_scc_iterator &Arg) const {
+      return G == Arg.G && RC == Arg.RC;
+    }
+
+    reference operator*() const { return *RC; }
+
+    using iterator_facade_base::operator++;
+    postorder_ref_scc_iterator &operator++() {
+      assert(RC && "Cannot increment the end iterator!");
+      RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
+      return *this;
+    }
+  };
+
+  /// Construct a graph for the given module.
+  ///
+  /// This sets up the graph and computes all of the entry points of the graph.
+  /// No function definitions are scanned until their nodes in the graph are
+  /// requested during traversal.
+  LazyCallGraph(Module &M, TargetLibraryInfo &TLI);
+
+  LazyCallGraph(LazyCallGraph &&G);
+  LazyCallGraph &operator=(LazyCallGraph &&RHS);
+
+  EdgeSequence::iterator begin() { return EntryEdges.begin(); }
+  EdgeSequence::iterator end() { return EntryEdges.end(); }
+
+  void buildRefSCCs();
+
+  postorder_ref_scc_iterator postorder_ref_scc_begin() {
+    if (!EntryEdges.empty())
+      assert(!PostOrderRefSCCs.empty() &&
+             "Must form RefSCCs before iterating them!");
+    return postorder_ref_scc_iterator(*this);
+  }
+  postorder_ref_scc_iterator postorder_ref_scc_end() {
+    if (!EntryEdges.empty())
+      assert(!PostOrderRefSCCs.empty() &&
+             "Must form RefSCCs before iterating them!");
+    return postorder_ref_scc_iterator(*this,
+                                      postorder_ref_scc_iterator::IsAtEndT());
+  }
+
+  iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
+    return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
+  }
+
+  /// Lookup a function in the graph which has already been scanned and added.
+  Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
+
+  /// Lookup a function's SCC in the graph.
+  ///
+  /// \returns null if the function hasn't been assigned an SCC via the RefSCC
+  /// iterator walk.
+  SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
+
+  /// Lookup a function's RefSCC in the graph.
+  ///
+  /// \returns null if the function hasn't been assigned a RefSCC via the
+  /// RefSCC iterator walk.
+  RefSCC *lookupRefSCC(Node &N) const {
+    if (SCC *C = lookupSCC(N))
+      return &C->getOuterRefSCC();
+
+    return nullptr;
+  }
+
+  /// Get a graph node for a given function, scanning it to populate the graph
+  /// data as necessary.
+  Node &get(Function &F) {
+    Node *&N = NodeMap[&F];
+    if (N)
+      return *N;
+
+    return insertInto(F, N);
+  }
+
+  /// Get the sequence of known and defined library functions.
+  ///
+  /// These functions, because they are known to LLVM, can have calls
+  /// introduced out of thin air from arbitrary IR.
+  ArrayRef<Function *> getLibFunctions() const {
+    return LibFunctions.getArrayRef();
+  }
+
+  /// Test whether a function is a known and defined library function tracked by
+  /// the call graph.
+  ///
+  /// Because these functions are known to LLVM they are specially modeled in
+  /// the call graph and even when all IR-level references have been removed
+  /// remain active and reachable.
+  bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
+
+  ///@{
+  /// \name Pre-SCC Mutation API
+  ///
+  /// These methods are only valid to call prior to forming any SCCs for this
+  /// call graph. They can be used to update the core node-graph during
+  /// a node-based inorder traversal that precedes any SCC-based traversal.
+  ///
+  /// Once you begin manipulating a call graph's SCCs, most mutation of the
+  /// graph must be performed via a RefSCC method. There are some exceptions
+  /// below.
+
+  /// Update the call graph after inserting a new edge.
+  void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
+
+  /// Update the call graph after inserting a new edge.
+  void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
+    return insertEdge(get(Source), get(Target), EK);
+  }
+
+  /// Update the call graph after deleting an edge.
+  void removeEdge(Node &SourceN, Node &TargetN);
+
+  /// Update the call graph after deleting an edge.
+  void removeEdge(Function &Source, Function &Target) {
+    return removeEdge(get(Source), get(Target));
+  }
+
+  ///@}
+
+  ///@{
+  /// \name General Mutation API
+  ///
+  /// There are a very limited set of mutations allowed on the graph as a whole
+  /// once SCCs have started to be formed. These routines have strict contracts
+  /// but may be called at any point.
+
+  /// Remove a dead function from the call graph (typically to delete it).
+  ///
+  /// Note that the function must have an empty use list, and the call graph
+  /// must be up-to-date prior to calling this. That means it is by itself in
+  /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
+  /// changes result from calling this routine other than potentially removing
+  /// entry points into the call graph.
+  ///
+  /// If SCC formation has begun, this function must not be part of the current
+  /// DFS in order to call this safely. Typically, the function will have been
+  /// fully visited by the DFS prior to calling this routine.
+  void removeDeadFunction(Function &F);
+
+  ///@}
+
+  ///@{
+  /// \name Static helpers for code doing updates to the call graph.
+  ///
+  /// These helpers are used to implement parts of the call graph but are also
+  /// useful to code doing updates or otherwise wanting to walk the IR in the
+  /// same patterns as when we build the call graph.
+
+  /// Recursively visits the defined functions whose address is reachable from
+  /// every constant in the \p Worklist.
+  ///
+  /// Doesn't recurse through any constants already in the \p Visited set, and
+  /// updates that set with every constant visited.
+  ///
+  /// For each defined function, calls \p Callback with that function.
+  template <typename CallbackT>
+  static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
+                              SmallPtrSetImpl<Constant *> &Visited,
+                              CallbackT Callback) {
+    while (!Worklist.empty()) {
+      Constant *C = Worklist.pop_back_val();
+
+      if (Function *F = dyn_cast<Function>(C)) {
+        if (!F->isDeclaration())
+          Callback(*F);
+        continue;
+      }
+
+      if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
+        // The blockaddress constant expression is a weird special case, we
+        // can't generically walk its operands the way we do for all other
+        // constants.
+        if (Visited.insert(BA->getFunction()).second)
+          Worklist.push_back(BA->getFunction());
+        continue;
+      }
+
+      for (Value *Op : C->operand_values())
+        if (Visited.insert(cast<Constant>(Op)).second)
+          Worklist.push_back(cast<Constant>(Op));
+    }
+  }
+
+  ///@}
+
+private:
+  using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
+  using node_stack_range = iterator_range<node_stack_iterator>;
+
+  /// Allocator that holds all the call graph nodes.
+  SpecificBumpPtrAllocator<Node> BPA;
+
+  /// Maps function->node for fast lookup.
+  DenseMap<const Function *, Node *> NodeMap;
+
+  /// The entry edges into the graph.
+  ///
+  /// These edges are from "external" sources. Put another way, they
+  /// escape at the module scope.
+  EdgeSequence EntryEdges;
+
+  /// Allocator that holds all the call graph SCCs.
+  SpecificBumpPtrAllocator<SCC> SCCBPA;
+
+  /// Maps Function -> SCC for fast lookup.
+  DenseMap<Node *, SCC *> SCCMap;
+
+  /// Allocator that holds all the call graph RefSCCs.
+  SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
+
+  /// The post-order sequence of RefSCCs.
+  ///
+  /// This list is lazily formed the first time we walk the graph.
+  SmallVector<RefSCC *, 16> PostOrderRefSCCs;
+
+  /// A map from RefSCC to the index for it in the postorder sequence of
+  /// RefSCCs.
+  DenseMap<RefSCC *, int> RefSCCIndices;
+
+  /// Defined functions that are also known library functions which the
+  /// optimizer can reason about and therefore might introduce calls to out of
+  /// thin air.
+  SmallSetVector<Function *, 4> LibFunctions;
+
+  /// Helper to insert a new function, with an already looked-up entry in
+  /// the NodeMap.
+  Node &insertInto(Function &F, Node *&MappedN);
+
+  /// Helper to update pointers back to the graph object during moves.
+  void updateGraphPtrs();
+
+  /// Allocates an SCC and constructs it using the graph allocator.
+  ///
+  /// The arguments are forwarded to the constructor.
+  template <typename... Ts> SCC *createSCC(Ts &&... Args) {
+    return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
+  }
+
+  /// Allocates a RefSCC and constructs it using the graph allocator.
+  ///
+  /// The arguments are forwarded to the constructor.
+  template <typename... Ts> RefSCC *createRefSCC(Ts &&... Args) {
+    return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
+  }
+
+  /// Common logic for building SCCs from a sequence of roots.
+  ///
+  /// This is a very generic implementation of the depth-first walk and SCC
+  /// formation algorithm. It uses a generic sequence of roots and generic
+  /// callbacks for each step. This is designed to be used to implement both
+  /// the RefSCC formation and SCC formation with shared logic.
+  ///
+  /// Currently this is a relatively naive implementation of Tarjan's DFS
+  /// algorithm to form the SCCs.
+  ///
+  /// FIXME: We should consider newer variants such as Nuutila.
+  template <typename RootsT, typename GetBeginT, typename GetEndT,
+            typename GetNodeT, typename FormSCCCallbackT>
+  static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
+                               GetEndT &&GetEnd, GetNodeT &&GetNode,
+                               FormSCCCallbackT &&FormSCC);
+
+  /// Build the SCCs for a RefSCC out of a list of nodes.
+  void buildSCCs(RefSCC &RC, node_stack_range Nodes);
+
+  /// Get the index of a RefSCC within the postorder traversal.
+  ///
+  /// Requires that this RefSCC is a valid one in the (perhaps partial)
+  /// postorder traversed part of the graph.
+  int getRefSCCIndex(RefSCC &RC) {
+    auto IndexIt = RefSCCIndices.find(&RC);
+    assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
+    assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
+           "Index does not point back at RC!");
+    return IndexIt->second;
+  }
+};
+
+inline LazyCallGraph::Edge::Edge() : Value() {}
+inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
+
+inline LazyCallGraph::Edge::operator bool() const {
+  return Value.getPointer() && !Value.getPointer()->isDead();
+}
+
+inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
+  assert(*this && "Queried a null edge!");
+  return Value.getInt();
+}
+
+inline bool LazyCallGraph::Edge::isCall() const {
+  assert(*this && "Queried a null edge!");
+  return getKind() == Call;
+}
+
+inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
+  assert(*this && "Queried a null edge!");
+  return *Value.getPointer();
+}
+
+inline Function &LazyCallGraph::Edge::getFunction() const {
+  assert(*this && "Queried a null edge!");
+  return getNode().getFunction();
+}
+
+// Provide GraphTraits specializations for call graphs.
+template <> struct GraphTraits<LazyCallGraph::Node *> {
+  using NodeRef = LazyCallGraph::Node *;
+  using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
+};
+template <> struct GraphTraits<LazyCallGraph *> {
+  using NodeRef = LazyCallGraph::Node *;
+  using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
+};
+
+/// An analysis pass which computes the call graph for a module.
+class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
+  friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// Inform generic clients of the result type.
+  using Result = LazyCallGraph;
+
+  /// Compute the \c LazyCallGraph for the module \c M.
+  ///
+  /// This just builds the set of entry points to the call graph. The rest is
+  /// built lazily as it is walked.
+  LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
+    return LazyCallGraph(M, AM.getResult<TargetLibraryAnalysis>(M));
+  }
+};
+
+/// A pass which prints the call graph to a \c raw_ostream.
+///
+/// This is primarily useful for testing the analysis.
+class LazyCallGraphPrinterPass
+    : public PassInfoMixin<LazyCallGraphPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit LazyCallGraphPrinterPass(raw_ostream &OS);
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// A pass which prints the call graph as a DOT file to a \c raw_ostream.
+///
+/// This is primarily useful for visualization purposes.
+class LazyCallGraphDOTPrinterPass
+    : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h b/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h
new file mode 100644
index 0000000..cea5bf0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h
@@ -0,0 +1,167 @@
+//===- LazyValueInfo.h - Value constraint analysis --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for lazy computation of value constraint
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYVALUEINFO_H
+#define LLVM_ANALYSIS_LAZYVALUEINFO_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+  class AssumptionCache;
+  class Constant;
+  class ConstantRange;
+  class DataLayout;
+  class DominatorTree;
+  class Instruction;
+  class TargetLibraryInfo;
+  class Value;
+
+/// This pass computes, caches, and vends lazy value constraint information.
+class LazyValueInfo {
+  friend class LazyValueInfoWrapperPass;
+  AssumptionCache *AC = nullptr;
+  const DataLayout *DL = nullptr;
+  class TargetLibraryInfo *TLI = nullptr;
+  DominatorTree *DT = nullptr;
+  void *PImpl = nullptr;
+  LazyValueInfo(const LazyValueInfo&) = delete;
+  void operator=(const LazyValueInfo&) = delete;
+public:
+  ~LazyValueInfo();
+  LazyValueInfo() {}
+  LazyValueInfo(AssumptionCache *AC_, const DataLayout *DL_, TargetLibraryInfo *TLI_,
+                DominatorTree *DT_)
+      : AC(AC_), DL(DL_), TLI(TLI_), DT(DT_) {}
+  LazyValueInfo(LazyValueInfo &&Arg)
+      : AC(Arg.AC), DL(Arg.DL), TLI(Arg.TLI), DT(Arg.DT), PImpl(Arg.PImpl) {
+    Arg.PImpl = nullptr;
+  }
+  LazyValueInfo &operator=(LazyValueInfo &&Arg) {
+    releaseMemory();
+    AC = Arg.AC;
+    DL = Arg.DL;
+    TLI = Arg.TLI;
+    DT = Arg.DT;
+    PImpl = Arg.PImpl;
+    Arg.PImpl = nullptr;
+    return *this;
+  }
+
+  /// This is used to return true/false/dunno results.
+  enum Tristate {
+    Unknown = -1, False = 0, True = 1
+  };
+
+  // Public query interface.
+
+  /// Determine whether the specified value comparison with a constant is known
+  /// to be true or false on the specified CFG edge.
+  /// Pred is a CmpInst predicate.
+  Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
+                              BasicBlock *FromBB, BasicBlock *ToBB,
+                              Instruction *CxtI = nullptr);
+
+  /// Determine whether the specified value comparison with a constant is known
+  /// to be true or false at the specified instruction
+  /// (from an assume intrinsic). Pred is a CmpInst predicate.
+  Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
+                          Instruction *CxtI);
+
+  /// Determine whether the specified value is known to be a
+  /// constant at the end of the specified block.  Return null if not.
+  Constant *getConstant(Value *V, BasicBlock *BB, Instruction *CxtI = nullptr);
+
+  /// Return the ConstantRange constraint that is known to hold for the
+  /// specified value at the end of the specified block. This may only be called
+  /// on integer-typed Values.
+  ConstantRange getConstantRange(Value *V, BasicBlock *BB, Instruction *CxtI = nullptr);
+
+  /// Determine whether the specified value is known to be a
+  /// constant on the specified edge.  Return null if not.
+  Constant *getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
+                              Instruction *CxtI = nullptr);
+
+  /// Return the ConstantRage constraint that is known to hold for the
+  /// specified value on the specified edge. This may be only be called
+  /// on integer-typed Values.
+  ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB,
+                                       BasicBlock *ToBB,
+                                       Instruction *CxtI = nullptr);
+
+  /// Inform the analysis cache that we have threaded an edge from
+  /// PredBB to OldSucc to be from PredBB to NewSucc instead.
+  void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);
+
+  /// Inform the analysis cache that we have erased a block.
+  void eraseBlock(BasicBlock *BB);
+
+  /// Print the \LazyValueInfo Analysis.
+  /// We pass in the DTree that is required for identifying which basic blocks
+  /// we can solve/print for, in the LVIPrinter. The DT is optional
+  /// in LVI, so we need to pass it here as an argument.
+  void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS);
+
+  /// Disables use of the DominatorTree within LVI.
+  void disableDT();
+
+  /// Enables use of the DominatorTree within LVI. Does nothing if the class
+  /// instance was initialized without a DT pointer.
+  void enableDT();
+
+  // For old PM pass. Delete once LazyValueInfoWrapperPass is gone.
+  void releaseMemory();
+
+  /// Handle invalidation events in the new pass manager.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+};
+
+/// \brief Analysis to compute lazy value information.
+class LazyValueAnalysis : public AnalysisInfoMixin<LazyValueAnalysis> {
+public:
+  typedef LazyValueInfo Result;
+  Result run(Function &F, FunctionAnalysisManager &FAM);
+
+private:
+  static AnalysisKey Key;
+  friend struct AnalysisInfoMixin<LazyValueAnalysis>;
+};
+
+/// Wrapper around LazyValueInfo.
+class LazyValueInfoWrapperPass : public FunctionPass {
+  LazyValueInfoWrapperPass(const LazyValueInfoWrapperPass&) = delete;
+  void operator=(const LazyValueInfoWrapperPass&) = delete;
+public:
+  static char ID;
+  LazyValueInfoWrapperPass() : FunctionPass(ID) {
+    initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
+  }
+  ~LazyValueInfoWrapperPass() override {
+    assert(!Info.PImpl && "releaseMemory not called");
+  }
+
+  LazyValueInfo &getLVI();
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void releaseMemory() override;
+  bool runOnFunction(Function &F) override;
+private:
+  LazyValueInfo Info;
+};
+
+}  // end namespace llvm
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Analysis/Lint.h b/linux-x64/clang/include/llvm/Analysis/Lint.h
new file mode 100644
index 0000000..7c88b13
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/Lint.h
@@ -0,0 +1,49 @@
+//===-- llvm/Analysis/Lint.h - LLVM IR Lint ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines lint interfaces that can be used for some sanity checking
+// of input to the system, and for checking that transformations
+// haven't done something bad. In contrast to the Verifier, the Lint checker
+// checks for undefined behavior or constructions with likely unintended
+// behavior.
+//
+// To see what specifically is checked, look at Lint.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LINT_H
+#define LLVM_ANALYSIS_LINT_H
+
+namespace llvm {
+
+class FunctionPass;
+class Module;
+class Function;
+
+/// @brief Create a lint pass.
+///
+/// Check a module or function.
+FunctionPass *createLintPass();
+
+/// @brief Check a module.
+///
+/// This should only be used for debugging, because it plays games with
+/// PassManagers and stuff.
+void lintModule(
+  const Module &M    ///< The module to be checked
+);
+
+// lintFunction - Check a function.
+void lintFunction(
+  const Function &F  ///< The function to be checked
+);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/Loads.h b/linux-x64/clang/include/llvm/Analysis/Loads.h
new file mode 100644
index 0000000..f110c28
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/Loads.h
@@ -0,0 +1,130 @@
+//===- Loads.h - Local load analysis --------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOADS_H
+#define LLVM_ANALYSIS_LOADS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+class DataLayout;
+class MDNode;
+
+/// Return true if this is always a dereferenceable pointer. If the context
+/// instruction is specified perform context-sensitive analysis and return true
+/// if the pointer is dereferenceable at the specified instruction.
+bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
+                              const Instruction *CtxI = nullptr,
+                              const DominatorTree *DT = nullptr);
+
+/// Returns true if V is always a dereferenceable pointer with alignment
+/// greater or equal than requested. If the context instruction is specified
+/// performs context-sensitive analysis and returns true if the pointer is
+/// dereferenceable at the specified instruction.
+bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+                                        const DataLayout &DL,
+                                        const Instruction *CtxI = nullptr,
+                                        const DominatorTree *DT = nullptr);
+
+/// Returns true if V is always dereferenceable for Size byte with alignment
+/// greater or equal than requested. If the context instruction is specified
+/// performs context-sensitive analysis and returns true if the pointer is
+/// dereferenceable at the specified instruction.
+bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+                                        const APInt &Size, const DataLayout &DL,
+                                        const Instruction *CtxI = nullptr,
+                                        const DominatorTree *DT = nullptr);
+
+/// Return true if we know that executing a load from this value cannot trap.
+///
+/// If DT and ScanFrom are specified this method performs context-sensitive
+/// analysis and returns true if it is safe to load immediately before ScanFrom.
+///
+/// If it is not obviously safe to load from the specified pointer, we do a
+/// quick local scan of the basic block containing ScanFrom, to determine if
+/// the address is already accessed.
+bool isSafeToLoadUnconditionally(Value *V, unsigned Align,
+                                 const DataLayout &DL,
+                                 Instruction *ScanFrom = nullptr,
+                                 const DominatorTree *DT = nullptr);
+
+/// The default number of maximum instructions to scan in the block, used by
+/// FindAvailableLoadedValue().
+extern cl::opt<unsigned> DefMaxInstsToScan;
+
+/// Scan backwards to see if we have the value of the given load available
+/// locally within a small number of instructions.
+///
+/// You can use this function to scan across multiple blocks: after you call
+/// this function, if ScanFrom points at the beginning of the block, it's safe
+/// to continue scanning the predecessors.
+///
+/// Note that performing load CSE requires special care to make sure the
+/// metadata is set appropriately.  In particular, aliasing metadata needs
+/// to be merged.  (This doesn't matter for store-to-load forwarding because
+/// the only relevant load gets deleted.)
+///
+/// \param Load The load we want to replace.
+/// \param ScanBB The basic block to scan.
+/// \param [in,out] ScanFrom The location to start scanning from. When this
+/// function returns, it points at the last instruction scanned.
+/// \param MaxInstsToScan The maximum number of instructions to scan. If this
+/// is zero, the whole block will be scanned.
+/// \param AA Optional pointer to alias analysis, to make the scan more
+/// precise.
+/// \param [out] IsLoadCSE Whether the returned value is a load from the same
+/// location in memory, as opposed to the value operand of a store.
+///
+/// \returns The found value, or nullptr if no value is found.
+Value *FindAvailableLoadedValue(LoadInst *Load,
+                                BasicBlock *ScanBB,
+                                BasicBlock::iterator &ScanFrom,
+                                unsigned MaxInstsToScan = DefMaxInstsToScan,
+                                AliasAnalysis *AA = nullptr,
+                                bool *IsLoadCSE = nullptr,
+                                unsigned *NumScanedInst = nullptr);
+
+/// Scan backwards to see if we have the value of the given pointer available
+/// locally within a small number of instructions.
+///
+/// You can use this function to scan across multiple blocks: after you call
+/// this function, if ScanFrom points at the beginning of the block, it's safe
+/// to continue scanning the predecessors.
+///
+/// \param Ptr The pointer we want the load and store to originate from.
+/// \param AccessTy The access type of the pointer.
+/// \param AtLeastAtomic Are we looking for at-least an atomic load/store ? In
+/// case it is false, we can return an atomic or non-atomic load or store. In
+/// case it is true, we need to return an atomic load or store.
+/// \param ScanBB The basic block to scan.
+/// \param [in,out] ScanFrom The location to start scanning from. When this
+/// function returns, it points at the last instruction scanned.
+/// \param MaxInstsToScan The maximum number of instructions to scan. If this
+/// is zero, the whole block will be scanned.
+/// \param AA Optional pointer to alias analysis, to make the scan more
+/// precise.
+/// \param [out] IsLoad Whether the returned value is a load from the same
+/// location in memory, as opposed to the value operand of a store.
+///
+/// \returns The found value, or nullptr if no value is found.
+Value *FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy, bool AtLeastAtomic,
+                                 BasicBlock *ScanBB,
+                                 BasicBlock::iterator &ScanFrom,
+                                 unsigned MaxInstsToScan, AliasAnalysis *AA,
+                                 bool *IsLoad, unsigned *NumScanedInst);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h b/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h
new file mode 100644
index 0000000..28154c8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -0,0 +1,749 @@
+//===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for the loop memory dependence framework that
+// was originally developed for the Loop Vectorizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
+#define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
+
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class Value;
+class DataLayout;
+class ScalarEvolution;
+class Loop;
+class SCEV;
+class SCEVUnionPredicate;
+class LoopAccessInfo;
+class OptimizationRemarkEmitter;
+
+/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
+/// Loop Access Analysis.
+struct VectorizerParams {
+  /// \brief Maximum SIMD width.
+  static const unsigned MaxVectorWidth;
+
+  /// \brief VF as overridden by the user.
+  static unsigned VectorizationFactor;
+  /// \brief Interleave factor as overridden by the user.
+  static unsigned VectorizationInterleave;
+  /// \brief True if force-vector-interleave was specified by the user.
+  static bool isInterleaveForced();
+
+  /// \\brief When performing memory disambiguation checks at runtime do not
+  /// make more than this number of comparisons.
+  static unsigned RuntimeMemoryCheckThreshold;
+};
+
+/// \brief Checks memory dependences among accesses to the same underlying
+/// object to determine whether there vectorization is legal or not (and at
+/// which vectorization factor).
+///
+/// Note: This class will compute a conservative dependence for access to
+/// different underlying pointers. Clients, such as the loop vectorizer, will
+/// sometimes deal these potential dependencies by emitting runtime checks.
+///
+/// We use the ScalarEvolution framework to symbolically evalutate access
+/// functions pairs. Since we currently don't restructure the loop we can rely
+/// on the program order of memory accesses to determine their safety.
+/// At the moment we will only deem accesses as safe for:
+///  * A negative constant distance assuming program order.
+///
+///      Safe: tmp = a[i + 1];     OR     a[i + 1] = x;
+///            a[i] = tmp;                y = a[i];
+///
+///   The latter case is safe because later checks guarantuee that there can't
+///   be a cycle through a phi node (that is, we check that "x" and "y" is not
+///   the same variable: a header phi can only be an induction or a reduction, a
+///   reduction can't have a memory sink, an induction can't have a memory
+///   source). This is important and must not be violated (or we have to
+///   resort to checking for cycles through memory).
+///
+///  * A positive constant distance assuming program order that is bigger
+///    than the biggest memory access.
+///
+///     tmp = a[i]        OR              b[i] = x
+///     a[i+2] = tmp                      y = b[i+2];
+///
+///     Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
+///
+///  * Zero distances and all accesses have the same size.
+///
+class MemoryDepChecker {
+public:
+  typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
+  typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
+  /// \brief Set of potential dependent memory accesses.
+  typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
+
+  /// \brief Dependece between memory access instructions.
+  struct Dependence {
+    /// \brief The type of the dependence.
+    enum DepType {
+      // No dependence.
+      NoDep,
+      // We couldn't determine the direction or the distance.
+      Unknown,
+      // Lexically forward.
+      //
+      // FIXME: If we only have loop-independent forward dependences (e.g. a
+      // read and write of A[i]), LAA will locally deem the dependence "safe"
+      // without querying the MemoryDepChecker.  Therefore we can miss
+      // enumerating loop-independent forward dependences in
+      // getDependences.  Note that as soon as there are different
+      // indices used to access the same array, the MemoryDepChecker *is*
+      // queried and the dependence list is complete.
+      Forward,
+      // Forward, but if vectorized, is likely to prevent store-to-load
+      // forwarding.
+      ForwardButPreventsForwarding,
+      // Lexically backward.
+      Backward,
+      // Backward, but the distance allows a vectorization factor of
+      // MaxSafeDepDistBytes.
+      BackwardVectorizable,
+      // Same, but may prevent store-to-load forwarding.
+      BackwardVectorizableButPreventsForwarding
+    };
+
+    /// \brief String version of the types.
+    static const char *DepName[];
+
+    /// \brief Index of the source of the dependence in the InstMap vector.
+    unsigned Source;
+    /// \brief Index of the destination of the dependence in the InstMap vector.
+    unsigned Destination;
+    /// \brief The type of the dependence.
+    DepType Type;
+
+    Dependence(unsigned Source, unsigned Destination, DepType Type)
+        : Source(Source), Destination(Destination), Type(Type) {}
+
+    /// \brief Return the source instruction of the dependence.
+    Instruction *getSource(const LoopAccessInfo &LAI) const;
+    /// \brief Return the destination instruction of the dependence.
+    Instruction *getDestination(const LoopAccessInfo &LAI) const;
+
+    /// \brief Dependence types that don't prevent vectorization.
+    static bool isSafeForVectorization(DepType Type);
+
+    /// \brief Lexically forward dependence.
+    bool isForward() const;
+    /// \brief Lexically backward dependence.
+    bool isBackward() const;
+
+    /// \brief May be a lexically backward dependence type (includes Unknown).
+    bool isPossiblyBackward() const;
+
+    /// \brief Print the dependence.  \p Instr is used to map the instruction
+    /// indices to instructions.
+    void print(raw_ostream &OS, unsigned Depth,
+               const SmallVectorImpl<Instruction *> &Instrs) const;
+  };
+
+  MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
+      : PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeRegisterWidth(-1U),
+        ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
+        RecordDependences(true) {}
+
+  /// \brief Register the location (instructions are given increasing numbers)
+  /// of a write access.
+  void addAccess(StoreInst *SI) {
+    Value *Ptr = SI->getPointerOperand();
+    Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
+    InstMap.push_back(SI);
+    ++AccessIdx;
+  }
+
+  /// \brief Register the location (instructions are given increasing numbers)
+  /// of a write access.
+  void addAccess(LoadInst *LI) {
+    Value *Ptr = LI->getPointerOperand();
+    Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
+    InstMap.push_back(LI);
+    ++AccessIdx;
+  }
+
+  /// \brief Check whether the dependencies between the accesses are safe.
+  ///
+  /// Only checks sets with elements in \p CheckDeps.
+  bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
+                   const ValueToValueMap &Strides);
+
+  /// \brief No memory dependence was encountered that would inhibit
+  /// vectorization.
+  bool isSafeForVectorization() const { return SafeForVectorization; }
+
+  /// \brief The maximum number of bytes of a vector register we can vectorize
+  /// the accesses safely with.
+  uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
+
+  /// \brief Return the number of elements that are safe to operate on
+  /// simultaneously, multiplied by the size of the element in bits.
+  uint64_t getMaxSafeRegisterWidth() const { return MaxSafeRegisterWidth; }
+
+  /// \brief In same cases when the dependency check fails we can still
+  /// vectorize the loop with a dynamic array access check.
+  bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
+
+  /// \brief Returns the memory dependences.  If null is returned we exceeded
+  /// the MaxDependences threshold and this information is not
+  /// available.
+  const SmallVectorImpl<Dependence> *getDependences() const {
+    return RecordDependences ? &Dependences : nullptr;
+  }
+
+  void clearDependences() { Dependences.clear(); }
+
+  /// \brief The vector of memory access instructions.  The indices are used as
+  /// instruction identifiers in the Dependence class.
+  const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
+    return InstMap;
+  }
+
+  /// \brief Generate a mapping between the memory instructions and their
+  /// indices according to program order.
+  DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
+    DenseMap<Instruction *, unsigned> OrderMap;
+
+    for (unsigned I = 0; I < InstMap.size(); ++I)
+      OrderMap[InstMap[I]] = I;
+
+    return OrderMap;
+  }
+
+  /// \brief Find the set of instructions that read or write via \p Ptr.
+  SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
+                                                         bool isWrite) const;
+
+private:
+  /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
+  /// applies dynamic knowledge to simplify SCEV expressions and convert them
+  /// to a more usable form. We need this in case assumptions about SCEV
+  /// expressions need to be made in order to avoid unknown dependences. For
+  /// example we might assume a unit stride for a pointer in order to prove
+  /// that a memory access is strided and doesn't wrap.
+  PredicatedScalarEvolution &PSE;
+  const Loop *InnermostLoop;
+
+  /// \brief Maps access locations (ptr, read/write) to program order.
+  DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
+
+  /// \brief Memory access instructions in program order.
+  SmallVector<Instruction *, 16> InstMap;
+
+  /// \brief The program order index to be used for the next instruction.
+  unsigned AccessIdx;
+
+  // We can access this many bytes in parallel safely.
+  uint64_t MaxSafeDepDistBytes;
+
+  /// \brief Number of elements (from consecutive iterations) that are safe to
+  /// operate on simultaneously, multiplied by the size of the element in bits.
+  /// The size of the element is taken from the memory access that is most
+  /// restrictive.
+  uint64_t MaxSafeRegisterWidth;
+
+  /// \brief If we see a non-constant dependence distance we can still try to
+  /// vectorize this loop with runtime checks.
+  bool ShouldRetryWithRuntimeCheck;
+
+  /// \brief No memory dependence was encountered that would inhibit
+  /// vectorization.
+  bool SafeForVectorization;
+
+  //// \brief True if Dependences reflects the dependences in the
+  //// loop.  If false we exceeded MaxDependences and
+  //// Dependences is invalid.
+  bool RecordDependences;
+
+  /// \brief Memory dependences collected during the analysis.  Only valid if
+  /// RecordDependences is true.
+  SmallVector<Dependence, 8> Dependences;
+
+  /// \brief Check whether there is a plausible dependence between the two
+  /// accesses.
+  ///
+  /// Access \p A must happen before \p B in program order. The two indices
+  /// identify the index into the program order map.
+  ///
+  /// This function checks  whether there is a plausible dependence (or the
+  /// absence of such can't be proved) between the two accesses. If there is a
+  /// plausible dependence but the dependence distance is bigger than one
+  /// element access it records this distance in \p MaxSafeDepDistBytes (if this
+  /// distance is smaller than any other distance encountered so far).
+  /// Otherwise, this function returns true signaling a possible dependence.
+  Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
+                                  const MemAccessInfo &B, unsigned BIdx,
+                                  const ValueToValueMap &Strides);
+
+  /// \brief Check whether the data dependence could prevent store-load
+  /// forwarding.
+  ///
+  /// \return false if we shouldn't vectorize at all or avoid larger
+  /// vectorization factors by limiting MaxSafeDepDistBytes.
+  bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
+};
+
+/// \brief Holds information about the memory runtime legality checks to verify
+/// that a group of pointers do not overlap.
+class RuntimePointerChecking {
+public:
+  struct PointerInfo {
+    /// Holds the pointer value that we need to check.
+    TrackingVH<Value> PointerValue;
+    /// Holds the smallest byte address accessed by the pointer throughout all
+    /// iterations of the loop.
+    const SCEV *Start;
+    /// Holds the largest byte address accessed by the pointer throughout all
+    /// iterations of the loop, plus 1.
+    const SCEV *End;
+    /// Holds the information if this pointer is used for writing to memory.
+    bool IsWritePtr;
+    /// Holds the id of the set of pointers that could be dependent because of a
+    /// shared underlying object.
+    unsigned DependencySetId;
+    /// Holds the id of the disjoint alias set to which this pointer belongs.
+    unsigned AliasSetId;
+    /// SCEV for the access.
+    const SCEV *Expr;
+
+    PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
+                bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
+                const SCEV *Expr)
+        : PointerValue(PointerValue), Start(Start), End(End),
+          IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
+          AliasSetId(AliasSetId), Expr(Expr) {}
+  };
+
+  RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
+
+  /// Reset the state of the pointer runtime information.
+  void reset() {
+    Need = false;
+    Pointers.clear();
+    Checks.clear();
+  }
+
+  /// Insert a pointer and calculate the start and end SCEVs.
+  /// We need \p PSE in order to compute the SCEV expression of the pointer
+  /// according to the assumptions that we've made during the analysis.
+  /// The method might also version the pointer stride according to \p Strides,
+  /// and add new predicates to \p PSE.
+  void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
+              unsigned ASId, const ValueToValueMap &Strides,
+              PredicatedScalarEvolution &PSE);
+
+  /// \brief No run-time memory checking is necessary.
+  bool empty() const { return Pointers.empty(); }
+
+  /// A grouping of pointers. A single memcheck is required between
+  /// two groups.
+  struct CheckingPtrGroup {
+    /// \brief Create a new pointer checking group containing a single
+    /// pointer, with index \p Index in RtCheck.
+    CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
+        : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
+          Low(RtCheck.Pointers[Index].Start) {
+      Members.push_back(Index);
+    }
+
+    /// \brief Tries to add the pointer recorded in RtCheck at index
+    /// \p Index to this pointer checking group. We can only add a pointer
+    /// to a checking group if we will still be able to get
+    /// the upper and lower bounds of the check. Returns true in case
+    /// of success, false otherwise.
+    bool addPointer(unsigned Index);
+
+    /// Constitutes the context of this pointer checking group. For each
+    /// pointer that is a member of this group we will retain the index
+    /// at which it appears in RtCheck.
+    RuntimePointerChecking &RtCheck;
+    /// The SCEV expression which represents the upper bound of all the
+    /// pointers in this group.
+    const SCEV *High;
+    /// The SCEV expression which represents the lower bound of all the
+    /// pointers in this group.
+    const SCEV *Low;
+    /// Indices of all the pointers that constitute this grouping.
+    SmallVector<unsigned, 2> Members;
+  };
+
+  /// \brief A memcheck which made up of a pair of grouped pointers.
+  ///
+  /// These *have* to be const for now, since checks are generated from
+  /// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
+  /// function.  FIXME: once check-generation is moved inside this class (after
+  /// the PtrPartition hack is removed), we could drop const.
+  typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
+      PointerCheck;
+
+  /// \brief Generate the checks and store it.  This also performs the grouping
+  /// of pointers to reduce the number of memchecks necessary.
+  void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
+                      bool UseDependencies);
+
+  /// \brief Returns the checks that generateChecks created.
+  const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
+
+  /// \brief Decide if we need to add a check between two groups of pointers,
+  /// according to needsChecking.
+  bool needsChecking(const CheckingPtrGroup &M,
+                     const CheckingPtrGroup &N) const;
+
+  /// \brief Returns the number of run-time checks required according to
+  /// needsChecking.
+  unsigned getNumberOfChecks() const { return Checks.size(); }
+
+  /// \brief Print the list run-time memory checks necessary.
+  void print(raw_ostream &OS, unsigned Depth = 0) const;
+
+  /// Print \p Checks.
+  void printChecks(raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
+                   unsigned Depth = 0) const;
+
+  /// This flag indicates if we need to add the runtime check.
+  bool Need;
+
+  /// Information about the pointers that may require checking.
+  SmallVector<PointerInfo, 2> Pointers;
+
+  /// Holds a partitioning of pointers into "check groups".
+  SmallVector<CheckingPtrGroup, 2> CheckingGroups;
+
+  /// \brief Check if pointers are in the same partition
+  ///
+  /// \p PtrToPartition contains the partition number for pointers (-1 if the
+  /// pointer belongs to multiple partitions).
+  static bool
+  arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
+                             unsigned PtrIdx1, unsigned PtrIdx2);
+
+  /// \brief Decide whether we need to issue a run-time check for pointer at
+  /// index \p I and \p J to prove their independence.
+  bool needsChecking(unsigned I, unsigned J) const;
+
+  /// \brief Return PointerInfo for pointer at index \p PtrIdx.
+  const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
+    return Pointers[PtrIdx];
+  }
+
+private:
+  /// \brief Groups pointers such that a single memcheck is required
+  /// between two different groups. This will clear the CheckingGroups vector
+  /// and re-compute it. We will only group dependecies if \p UseDependencies
+  /// is true, otherwise we will create a separate group for each pointer.
+  void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
+                   bool UseDependencies);
+
+  /// Generate the checks and return them.
+  SmallVector<PointerCheck, 4>
+  generateChecks() const;
+
+  /// Holds a pointer to the ScalarEvolution analysis.
+  ScalarEvolution *SE;
+
+  /// \brief Set of run-time checks required to establish independence of
+  /// otherwise may-aliasing pointers in the loop.
+  SmallVector<PointerCheck, 4> Checks;
+};
+
+/// \brief Drive the analysis of memory accesses in the loop
+///
+/// This class is responsible for analyzing the memory accesses of a loop.  It
+/// collects the accesses and then its main helper the AccessAnalysis class
+/// finds and categorizes the dependences in buildDependenceSets.
+///
+/// For memory dependences that can be analyzed at compile time, it determines
+/// whether the dependence is part of cycle inhibiting vectorization.  This work
+/// is delegated to the MemoryDepChecker class.
+///
+/// For memory dependences that cannot be determined at compile time, it
+/// generates run-time checks to prove independence.  This is done by
+/// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
+/// RuntimePointerCheck class.
+///
+/// If pointers can wrap or can't be expressed as affine AddRec expressions by
+/// ScalarEvolution, we will generate run-time checks by emitting a
+/// SCEVUnionPredicate.
+///
+/// Checks for both memory dependences and the SCEV predicates contained in the
+/// PSE must be emitted in order for the results of this analysis to be valid.
+class LoopAccessInfo {
+public:
+  LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI,
+                 AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI);
+
+  /// Return true we can analyze the memory accesses in the loop and there are
+  /// no memory dependence cycles.
+  bool canVectorizeMemory() const { return CanVecMem; }
+
+  const RuntimePointerChecking *getRuntimePointerChecking() const {
+    return PtrRtChecking.get();
+  }
+
+  /// \brief Number of memchecks required to prove independence of otherwise
+  /// may-alias pointers.
+  unsigned getNumRuntimePointerChecks() const {
+    return PtrRtChecking->getNumberOfChecks();
+  }
+
+  /// Return true if the block BB needs to be predicated in order for the loop
+  /// to be vectorized.
+  static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
+                                    DominatorTree *DT);
+
+  /// Returns true if the value V is uniform within the loop.
+  bool isUniform(Value *V) const;
+
+  uint64_t getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; }
+  unsigned getNumStores() const { return NumStores; }
+  unsigned getNumLoads() const { return NumLoads;}
+
+  /// \brief Add code that checks at runtime if the accessed arrays overlap.
+  ///
+  /// Returns a pair of instructions where the first element is the first
+  /// instruction generated in possibly a sequence of instructions and the
+  /// second value is the final comparator value or NULL if no check is needed.
+  std::pair<Instruction *, Instruction *>
+  addRuntimeChecks(Instruction *Loc) const;
+
+  /// \brief Generete the instructions for the checks in \p PointerChecks.
+  ///
+  /// Returns a pair of instructions where the first element is the first
+  /// instruction generated in possibly a sequence of instructions and the
+  /// second value is the final comparator value or NULL if no check is needed.
+  std::pair<Instruction *, Instruction *>
+  addRuntimeChecks(Instruction *Loc,
+                   const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
+                       &PointerChecks) const;
+
+  /// \brief The diagnostics report generated for the analysis.  E.g. why we
+  /// couldn't analyze the loop.
+  const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
+
+  /// \brief the Memory Dependence Checker which can determine the
+  /// loop-independent and loop-carried dependences between memory accesses.
+  const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
+
+  /// \brief Return the list of instructions that use \p Ptr to read or write
+  /// memory.
+  SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
+                                                         bool isWrite) const {
+    return DepChecker->getInstructionsForAccess(Ptr, isWrite);
+  }
+
+  /// \brief If an access has a symbolic strides, this maps the pointer value to
+  /// the stride symbol.
+  const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
+
+  /// \brief Pointer has a symbolic stride.
+  bool hasStride(Value *V) const { return StrideSet.count(V); }
+
+  /// \brief Print the information about the memory accesses in the loop.
+  void print(raw_ostream &OS, unsigned Depth = 0) const;
+
+  /// \brief Checks existence of store to invariant address inside loop.
+  /// If the loop has any store to invariant address, then it returns true,
+  /// else returns false.
+  bool hasStoreToLoopInvariantAddress() const {
+    return StoreToLoopInvariantAddress;
+  }
+
+  /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
+  /// them to a more usable form.  All SCEV expressions during the analysis
+  /// should be re-written (and therefore simplified) according to PSE.
+  /// A user of LoopAccessAnalysis will need to emit the runtime checks
+  /// associated with this predicate.
+  const PredicatedScalarEvolution &getPSE() const { return *PSE; }
+
+private:
+  /// \brief Analyze the loop.
+  void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
+                   const TargetLibraryInfo *TLI, DominatorTree *DT);
+
+  /// \brief Check if the structure of the loop allows it to be analyzed by this
+  /// pass.
+  bool canAnalyzeLoop();
+
+  /// \brief Save the analysis remark.
+  ///
+  /// LAA does not directly emits the remarks.  Instead it stores it which the
+  /// client can retrieve and presents as its own analysis
+  /// (e.g. -Rpass-analysis=loop-vectorize).
+  OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
+                                             Instruction *Instr = nullptr);
+
+  /// \brief Collect memory access with loop invariant strides.
+  ///
+  /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
+  /// invariant.
+  void collectStridedAccess(Value *LoadOrStoreInst);
+
+  std::unique_ptr<PredicatedScalarEvolution> PSE;
+
+  /// We need to check that all of the pointers in this list are disjoint
+  /// at runtime. Using std::unique_ptr to make using move ctor simpler.
+  std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
+
+  /// \brief the Memory Dependence Checker which can determine the
+  /// loop-independent and loop-carried dependences between memory accesses.
+  std::unique_ptr<MemoryDepChecker> DepChecker;
+
+  Loop *TheLoop;
+
+  unsigned NumLoads;
+  unsigned NumStores;
+
+  uint64_t MaxSafeDepDistBytes;
+
+  /// \brief Cache the result of analyzeLoop.
+  bool CanVecMem;
+
+  /// \brief Indicator for storing to uniform addresses.
+  /// If a loop has write to a loop invariant address then it should be true.
+  bool StoreToLoopInvariantAddress;
+
+  /// \brief The diagnostics report generated for the analysis.  E.g. why we
+  /// couldn't analyze the loop.
+  std::unique_ptr<OptimizationRemarkAnalysis> Report;
+
+  /// \brief If an access has a symbolic strides, this maps the pointer value to
+  /// the stride symbol.
+  ValueToValueMap SymbolicStrides;
+
+  /// \brief Set of symbolic strides values.
+  SmallPtrSet<Value *, 8> StrideSet;
+};
+
+Value *stripIntegerCast(Value *V);
+
+/// \brief Return the SCEV corresponding to a pointer with the symbolic stride
+/// replaced with constant one, assuming the SCEV predicate associated with
+/// \p PSE is true.
+///
+/// If necessary this method will version the stride of the pointer according
+/// to \p PtrToStride and therefore add further predicates to \p PSE.
+///
+/// If \p OrigPtr is not null, use it to look up the stride value instead of \p
+/// Ptr.  \p PtrToStride provides the mapping between the pointer value and its
+/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
+const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
+                                      const ValueToValueMap &PtrToStride,
+                                      Value *Ptr, Value *OrigPtr = nullptr);
+
+/// \brief If the pointer has a constant stride return it in units of its
+/// element size.  Otherwise return zero.
+///
+/// Ensure that it does not wrap in the address space, assuming the predicate
+/// associated with \p PSE is true.
+///
+/// If necessary this method will version the stride of the pointer according
+/// to \p PtrToStride and therefore add further predicates to \p PSE.
+/// The \p Assume parameter indicates if we are allowed to make additional
+/// run-time assumptions.
+int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
+                     const ValueToValueMap &StridesMap = ValueToValueMap(),
+                     bool Assume = false, bool ShouldCheckWrap = true);
+
+/// \brief Returns true if the memory operations \p A and \p B are consecutive.
+/// This is a simple API that does not depend on the analysis pass. 
+bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
+                         ScalarEvolution &SE, bool CheckType = true);
+
+/// \brief This analysis provides dependence information for the memory accesses
+/// of a loop.
+///
+/// It runs the analysis for a loop on demand.  This can be initiated by
+/// querying the loop access info via LAA::getInfo.  getInfo return a
+/// LoopAccessInfo object.  See this class for the specifics of what information
+/// is provided.
+class LoopAccessLegacyAnalysis : public FunctionPass {
+public:
+  static char ID;
+
+  LoopAccessLegacyAnalysis() : FunctionPass(ID) {
+    initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// \brief Query the result of the loop access information for the loop \p L.
+  ///
+  /// If there is no cached result available run the analysis.
+  const LoopAccessInfo &getInfo(Loop *L);
+
+  void releaseMemory() override {
+    // Invalidate the cache when the pass is freed.
+    LoopAccessInfoMap.clear();
+  }
+
+  /// \brief Print the result of the analysis when invoked with -analyze.
+  void print(raw_ostream &OS, const Module *M = nullptr) const override;
+
+private:
+  /// \brief The cache.
+  DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
+
+  // The used analysis passes.
+  ScalarEvolution *SE;
+  const TargetLibraryInfo *TLI;
+  AliasAnalysis *AA;
+  DominatorTree *DT;
+  LoopInfo *LI;
+};
+
+/// \brief This analysis provides dependence information for the memory
+/// accesses of a loop.
+///
+/// It runs the analysis for a loop on demand.  This can be initiated by
+/// querying the loop access info via AM.getResult<LoopAccessAnalysis>. 
+/// getResult return a LoopAccessInfo object.  See this class for the
+/// specifics of what information is provided.
+class LoopAccessAnalysis
+    : public AnalysisInfoMixin<LoopAccessAnalysis> {
+  friend AnalysisInfoMixin<LoopAccessAnalysis>;
+  static AnalysisKey Key;
+
+public:
+  typedef LoopAccessInfo Result;
+
+  Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
+};
+
+inline Instruction *MemoryDepChecker::Dependence::getSource(
+    const LoopAccessInfo &LAI) const {
+  return LAI.getDepChecker().getMemoryInstructions()[Source];
+}
+
+inline Instruction *MemoryDepChecker::Dependence::getDestination(
+    const LoopAccessInfo &LAI) const {
+  return LAI.getDepChecker().getMemoryInstructions()[Destination];
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h b/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h
new file mode 100644
index 0000000..417ee97
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h
@@ -0,0 +1,160 @@
+//===- LoopAnalysisManager.h - Loop analysis management ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides classes for managing per-loop analyses. These are
+/// typically used as part of a loop pass pipeline over the loop nests of
+/// a function.
+///
+/// Loop analyses are allowed to make some simplifying assumptions:
+/// 1) Loops are, where possible, in simplified form.
+/// 2) Loops are *always* in LCSSA form.
+/// 3) A collection of analysis results are available:
+///    - LoopInfo
+///    - DominatorTree
+///    - ScalarEvolution
+///    - AAManager
+///
+/// The primary mechanism to provide these invariants is the loop pass manager,
+/// but they can also be manually provided in order to reason about a loop from
+/// outside of a dedicated pass manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
+#define LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/PriorityWorklist.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// The adaptor from a function pass to a loop pass computes these analyses and
+/// makes them available to the loop passes "for free". Each loop pass is
+/// expected expected to update these analyses if necessary to ensure they're
+/// valid after it runs.
+struct LoopStandardAnalysisResults {
+  AAResults &AA;
+  AssumptionCache &AC;
+  DominatorTree &DT;
+  LoopInfo &LI;
+  ScalarEvolution &SE;
+  TargetLibraryInfo &TLI;
+  TargetTransformInfo &TTI;
+  MemorySSA *MSSA;
+};
+
+/// Enables memory ssa as a dependency for loop passes.
+extern cl::opt<bool> EnableMSSALoopDependency;
+
+/// Extern template declaration for the analysis set for this IR unit.
+extern template class AllAnalysesOn<Loop>;
+
+extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
+/// \brief The loop analysis manager.
+///
+/// See the documentation for the AnalysisManager template for detail
+/// documentation. This typedef serves as a convenient way to refer to this
+/// construct in the adaptors and proxies used to integrate this into the larger
+/// pass manager infrastructure.
+typedef AnalysisManager<Loop, LoopStandardAnalysisResults &>
+    LoopAnalysisManager;
+
+/// A proxy from a \c LoopAnalysisManager to a \c Function.
+typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
+    LoopAnalysisManagerFunctionProxy;
+
+/// A specialized result for the \c LoopAnalysisManagerFunctionProxy which
+/// retains a \c LoopInfo reference.
+///
+/// This allows it to collect loop objects for which analysis results may be
+/// cached in the \c LoopAnalysisManager.
+template <> class LoopAnalysisManagerFunctionProxy::Result {
+public:
+  explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI)
+      : InnerAM(&InnerAM), LI(&LI) {}
+  Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI) {
+    // We have to null out the analysis manager in the moved-from state
+    // because we are taking ownership of the responsibilty to clear the
+    // analysis state.
+    Arg.InnerAM = nullptr;
+  }
+  Result &operator=(Result &&RHS) {
+    InnerAM = RHS.InnerAM;
+    LI = RHS.LI;
+    // We have to null out the analysis manager in the moved-from state
+    // because we are taking ownership of the responsibilty to clear the
+    // analysis state.
+    RHS.InnerAM = nullptr;
+    return *this;
+  }
+  ~Result() {
+    // InnerAM is cleared in a moved from state where there is nothing to do.
+    if (!InnerAM)
+      return;
+
+    // Clear out the analysis manager if we're being destroyed -- it means we
+    // didn't even see an invalidate call when we got invalidated.
+    InnerAM->clear();
+  }
+
+  /// Accessor for the analysis manager.
+  LoopAnalysisManager &getManager() { return *InnerAM; }
+
+  /// Handler for invalidation of the proxy for a particular function.
+  ///
+  /// If the proxy, \c LoopInfo, and associated analyses are preserved, this
+  /// will merely forward the invalidation event to any cached loop analysis
+  /// results for loops within this function.
+  ///
+  /// If the necessary loop infrastructure is not preserved, this will forcibly
+  /// clear all of the cached analysis results that are keyed on the \c
+  /// LoopInfo for this function.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+
+private:
+  LoopAnalysisManager *InnerAM;
+  LoopInfo *LI;
+};
+
+/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy
+/// so it can pass the \c LoopInfo to the result.
+template <>
+LoopAnalysisManagerFunctionProxy::Result
+LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);
+
+// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
+// template.
+extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
+
+extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+                                                LoopStandardAnalysisResults &>;
+/// A proxy from a \c FunctionAnalysisManager to a \c Loop.
+typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+                                  LoopStandardAnalysisResults &>
+    FunctionAnalysisManagerLoopProxy;
+
+/// Returns the minimum set of Analyses that all loop passes must preserve.
+PreservedAnalyses getLoopPassPreservedAnalyses();
+}
+
+#endif // LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopInfo.h b/linux-x64/clang/include/llvm/Analysis/LoopInfo.h
new file mode 100644
index 0000000..28afc39
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopInfo.h
@@ -0,0 +1,988 @@
+//===- llvm/Analysis/LoopInfo.h - Natural Loop Calculator -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LoopInfo class that is used to identify natural loops
+// and determine the loop depth of various nodes of the CFG.  A natural loop
+// has exactly one entry-point, which is called the header. Note that natural
+// loops may actually be several loops that share the same header node.
+//
+// This analysis calculates the nesting structure of loops in a function.  For
+// each natural loop identified, this analysis identifies natural loops
+// contained entirely within the loop and the basic blocks the make up the loop.
+//
+// It can calculate on the fly various bits of information, for example:
+//
+//  * whether there is a preheader for the loop
+//  * the number of back edges to the header
+//  * whether or not a particular block branches out of the loop
+//  * the successor blocks of the loop
+//  * the loop depth
+//  * etc...
+//
+// Note that this analysis specifically identifies *Loops* not cycles or SCCs
+// in the CFG.  There can be strongly connected components in the CFG which
+// this analysis will not recognize and that will not be represented by a Loop
+// instance.  In particular, a Loop might be inside such a non-loop SCC, or a
+// non-loop SCC might contain a sub-SCC which is a Loop.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPINFO_H
+#define LLVM_ANALYSIS_LOOPINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include <algorithm>
+#include <utility>
+
+namespace llvm {
+
+class DominatorTree;
+class LoopInfo;
+class Loop;
+class MDNode;
+class PHINode;
+class raw_ostream;
+template <class N, bool IsPostDom> class DominatorTreeBase;
+template <class N, class M> class LoopInfoBase;
+template <class N, class M> class LoopBase;
+
+//===----------------------------------------------------------------------===//
+/// Instances of this class are used to represent loops that are detected in the
+/// flow graph.
+///
+template <class BlockT, class LoopT> class LoopBase {
+  LoopT *ParentLoop;
+  // Loops contained entirely within this one.
+  std::vector<LoopT *> SubLoops;
+
+  // The list of blocks in this loop. First entry is the header node.
+  std::vector<BlockT *> Blocks;
+
+  SmallPtrSet<const BlockT *, 8> DenseBlockSet;
+
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+  /// Indicator that this loop is no longer a valid loop.
+  bool IsInvalid = false;
+#endif
+
+  LoopBase(const LoopBase<BlockT, LoopT> &) = delete;
+  const LoopBase<BlockT, LoopT> &
+  operator=(const LoopBase<BlockT, LoopT> &) = delete;
+
+public:
+  /// Return the nesting level of this loop.  An outer-most loop has depth 1,
+  /// for consistency with loop depth values used for basic blocks, where depth
+  /// 0 is used for blocks not inside any loops.
+  unsigned getLoopDepth() const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    unsigned D = 1;
+    for (const LoopT *CurLoop = ParentLoop; CurLoop;
+         CurLoop = CurLoop->ParentLoop)
+      ++D;
+    return D;
+  }
+  BlockT *getHeader() const { return getBlocks().front(); }
+  LoopT *getParentLoop() const { return ParentLoop; }
+
+  /// This is a raw interface for bypassing addChildLoop.
+  void setParentLoop(LoopT *L) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    ParentLoop = L;
+  }
+
+  /// Return true if the specified loop is contained within in this loop.
+  bool contains(const LoopT *L) const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    if (L == this)
+      return true;
+    if (!L)
+      return false;
+    return contains(L->getParentLoop());
+  }
+
+  /// Return true if the specified basic block is in this loop.
+  bool contains(const BlockT *BB) const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return DenseBlockSet.count(BB);
+  }
+
+  /// Return true if the specified instruction is in this loop.
+  template <class InstT> bool contains(const InstT *Inst) const {
+    return contains(Inst->getParent());
+  }
+
+  /// Return the loops contained entirely within this loop.
+  const std::vector<LoopT *> &getSubLoops() const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return SubLoops;
+  }
+  std::vector<LoopT *> &getSubLoopsVector() {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return SubLoops;
+  }
+  typedef typename std::vector<LoopT *>::const_iterator iterator;
+  typedef
+      typename std::vector<LoopT *>::const_reverse_iterator reverse_iterator;
+  iterator begin() const { return getSubLoops().begin(); }
+  iterator end() const { return getSubLoops().end(); }
+  reverse_iterator rbegin() const { return getSubLoops().rbegin(); }
+  reverse_iterator rend() const { return getSubLoops().rend(); }
+  bool empty() const { return getSubLoops().empty(); }
+
+  /// Get a list of the basic blocks which make up this loop.
+  ArrayRef<BlockT *> getBlocks() const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return Blocks;
+  }
+  typedef typename ArrayRef<BlockT *>::const_iterator block_iterator;
+  block_iterator block_begin() const { return getBlocks().begin(); }
+  block_iterator block_end() const { return getBlocks().end(); }
+  inline iterator_range<block_iterator> blocks() const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return make_range(block_begin(), block_end());
+  }
+
+  /// Get the number of blocks in this loop in constant time.
+  /// Invalidate the loop, indicating that it is no longer a loop.
+  unsigned getNumBlocks() const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return Blocks.size();
+  }
+
+  /// Return a direct, mutable handle to the blocks vector so that we can
+  /// mutate it efficiently with techniques like `std::remove`.
+  std::vector<BlockT *> &getBlocksVector() {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return Blocks;
+  }
+  /// Return a direct, mutable handle to the blocks set so that we can
+  /// mutate it efficiently.
+  SmallPtrSetImpl<const BlockT *> &getBlocksSet() {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    return DenseBlockSet;
+  }
+
+  /// Return true if this loop is no longer valid.  The only valid use of this
+  /// helper is "assert(L.isInvalid())" or equivalent, since IsInvalid is set to
+  /// true by the destructor.  In other words, if this accessor returns true,
+  /// the caller has already triggered UB by calling this accessor; and so it
+  /// can only be called in a context where a return value of true indicates a
+  /// programmer error.
+  bool isInvalid() const {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    return IsInvalid;
+#else
+    return false;
+#endif
+  }
+
+  /// True if terminator in the block can branch to another block that is
+  /// outside of the current loop.
+  bool isLoopExiting(const BlockT *BB) const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    for (const auto &Succ : children<const BlockT *>(BB)) {
+      if (!contains(Succ))
+        return true;
+    }
+    return false;
+  }
+
+  /// Returns true if \p BB is a loop-latch.
+  /// A latch block is a block that contains a branch back to the header.
+  /// This function is useful when there are multiple latches in a loop
+  /// because \fn getLoopLatch will return nullptr in that case.
+  bool isLoopLatch(const BlockT *BB) const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    assert(contains(BB) && "block does not belong to the loop");
+
+    BlockT *Header = getHeader();
+    auto PredBegin = GraphTraits<Inverse<BlockT *>>::child_begin(Header);
+    auto PredEnd = GraphTraits<Inverse<BlockT *>>::child_end(Header);
+    return std::find(PredBegin, PredEnd, BB) != PredEnd;
+  }
+
+  /// Calculate the number of back edges to the loop header.
+  unsigned getNumBackEdges() const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    unsigned NumBackEdges = 0;
+    BlockT *H = getHeader();
+
+    for (const auto Pred : children<Inverse<BlockT *>>(H))
+      if (contains(Pred))
+        ++NumBackEdges;
+
+    return NumBackEdges;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // APIs for simple analysis of the loop.
+  //
+  // Note that all of these methods can fail on general loops (ie, there may not
+  // be a preheader, etc).  For best success, the loop simplification and
+  // induction variable canonicalization pass should be used to normalize loops
+  // for easy analysis.  These methods assume canonical loops.
+
+  /// Return all blocks inside the loop that have successors outside of the
+  /// loop. These are the blocks _inside of the current loop_ which branch out.
+  /// The returned list is always unique.
+  void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const;
+
+  /// If getExitingBlocks would return exactly one block, return that block.
+  /// Otherwise return null.
+  BlockT *getExitingBlock() const;
+
+  /// Return all of the successor blocks of this loop. These are the blocks
+  /// _outside of the current loop_ which are branched to.
+  void getExitBlocks(SmallVectorImpl<BlockT *> &ExitBlocks) const;
+
+  /// If getExitBlocks would return exactly one block, return that block.
+  /// Otherwise return null.
+  BlockT *getExitBlock() const;
+
+  /// Edge type.
+  typedef std::pair<const BlockT *, const BlockT *> Edge;
+
+  /// Return all pairs of (_inside_block_,_outside_block_).
+  void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const;
+
+  /// If there is a preheader for this loop, return it. A loop has a preheader
+  /// if there is only one edge to the header of the loop from outside of the
+  /// loop. If this is the case, the block branching to the header of the loop
+  /// is the preheader node.
+  ///
+  /// This method returns null if there is no preheader for the loop.
+  BlockT *getLoopPreheader() const;
+
+  /// If the given loop's header has exactly one unique predecessor outside the
+  /// loop, return it. Otherwise return null.
+  ///  This is less strict that the loop "preheader" concept, which requires
+  /// the predecessor to have exactly one successor.
+  BlockT *getLoopPredecessor() const;
+
+  /// If there is a single latch block for this loop, return it.
+  /// A latch block is a block that contains a branch back to the header.
+  BlockT *getLoopLatch() const;
+
+  /// Return all loop latch blocks of this loop. A latch block is a block that
+  /// contains a branch back to the header.
+  void getLoopLatches(SmallVectorImpl<BlockT *> &LoopLatches) const {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    BlockT *H = getHeader();
+    for (const auto Pred : children<Inverse<BlockT *>>(H))
+      if (contains(Pred))
+        LoopLatches.push_back(Pred);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // APIs for updating loop information after changing the CFG
+  //
+
+  /// This method is used by other analyses to update loop information.
+  /// NewBB is set to be a new member of the current loop.
+  /// Because of this, it is added as a member of all parent loops, and is added
+  /// to the specified LoopInfo object as being in the current basic block.  It
+  /// is not valid to replace the loop header with this method.
+  void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LI);
+
+  /// This is used when splitting loops up. It replaces the OldChild entry in
+  /// our children list with NewChild, and updates the parent pointer of
+  /// OldChild to be null and the NewChild to be this loop.
+  /// This updates the loop depth of the new child.
+  void replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild);
+
+  /// Add the specified loop to be a child of this loop.
+  /// This updates the loop depth of the new child.
+  void addChildLoop(LoopT *NewChild) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    assert(!NewChild->ParentLoop && "NewChild already has a parent!");
+    NewChild->ParentLoop = static_cast<LoopT *>(this);
+    SubLoops.push_back(NewChild);
+  }
+
+  /// This removes the specified child from being a subloop of this loop. The
+  /// loop is not deleted, as it will presumably be inserted into another loop.
+  LoopT *removeChildLoop(iterator I) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    assert(I != SubLoops.end() && "Cannot remove end iterator!");
+    LoopT *Child = *I;
+    assert(Child->ParentLoop == this && "Child is not a child of this loop!");
+    SubLoops.erase(SubLoops.begin() + (I - begin()));
+    Child->ParentLoop = nullptr;
+    return Child;
+  }
+
+  /// This removes the specified child from being a subloop of this loop. The
+  /// loop is not deleted, as it will presumably be inserted into another loop.
+  LoopT *removeChildLoop(LoopT *Child) {
+    return removeChildLoop(llvm::find(*this, Child));
+  }
+
+  /// This adds a basic block directly to the basic block list.
+  /// This should only be used by transformations that create new loops.  Other
+  /// transformations should use addBasicBlockToLoop.
+  void addBlockEntry(BlockT *BB) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    Blocks.push_back(BB);
+    DenseBlockSet.insert(BB);
+  }
+
+  /// interface to reverse Blocks[from, end of loop] in this loop
+  void reverseBlock(unsigned from) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    std::reverse(Blocks.begin() + from, Blocks.end());
+  }
+
+  /// interface to do reserve() for Blocks
+  void reserveBlocks(unsigned size) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    Blocks.reserve(size);
+  }
+
+  /// This method is used to move BB (which must be part of this loop) to be the
+  /// loop header of the loop (the block that dominates all others).
+  void moveToHeader(BlockT *BB) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    if (Blocks[0] == BB)
+      return;
+    for (unsigned i = 0;; ++i) {
+      assert(i != Blocks.size() && "Loop does not contain BB!");
+      if (Blocks[i] == BB) {
+        Blocks[i] = Blocks[0];
+        Blocks[0] = BB;
+        return;
+      }
+    }
+  }
+
+  /// This removes the specified basic block from the current loop, updating the
+  /// Blocks as appropriate. This does not update the mapping in the LoopInfo
+  /// class.
+  void removeBlockFromLoop(BlockT *BB) {
+    assert(!isInvalid() && "Loop not in a valid state!");
+    auto I = find(Blocks, BB);
+    assert(I != Blocks.end() && "N is not in this list!");
+    Blocks.erase(I);
+
+    DenseBlockSet.erase(BB);
+  }
+
+  /// Verify loop structure
+  void verifyLoop() const;
+
+  /// Verify loop structure of this loop and all nested loops.
+  void verifyLoopNest(DenseSet<const LoopT *> *Loops) const;
+
+  /// Print loop with all the BBs inside it.
+  void print(raw_ostream &OS, unsigned Depth = 0, bool Verbose = false) const;
+
+protected:
+  friend class LoopInfoBase<BlockT, LoopT>;
+
+  /// This creates an empty loop.
+  LoopBase() : ParentLoop(nullptr) {}
+
+  explicit LoopBase(BlockT *BB) : ParentLoop(nullptr) {
+    Blocks.push_back(BB);
+    DenseBlockSet.insert(BB);
+  }
+
+  // Since loop passes like SCEV are allowed to key analysis results off of
+  // `Loop` pointers, we cannot re-use pointers within a loop pass manager.
+  // This means loop passes should not be `delete` ing `Loop` objects directly
+  // (and risk a later `Loop` allocation re-using the address of a previous one)
+  // but should be using LoopInfo::markAsRemoved, which keeps around the `Loop`
+  // pointer till the end of the lifetime of the `LoopInfo` object.
+  //
+  // To make it easier to follow this rule, we mark the destructor as
+  // non-public.
+  ~LoopBase() {
+    for (auto *SubLoop : SubLoops)
+      SubLoop->~LoopT();
+
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    IsInvalid = true;
+#endif
+    SubLoops.clear();
+    Blocks.clear();
+    DenseBlockSet.clear();
+    ParentLoop = nullptr;
+  }
+};
+
+template <class BlockT, class LoopT>
+raw_ostream &operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
+  Loop.print(OS);
+  return OS;
+}
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopBase<BasicBlock, Loop>;
+
+/// Represents a single loop in the control flow graph.  Note that not all SCCs
+/// in the CFG are necessarily loops.
+class Loop : public LoopBase<BasicBlock, Loop> {
+public:
+  /// \brief A range representing the start and end location of a loop.
+  class LocRange {
+    DebugLoc Start;
+    DebugLoc End;
+
+  public:
+    LocRange() {}
+    LocRange(DebugLoc Start) : Start(std::move(Start)), End(std::move(Start)) {}
+    LocRange(DebugLoc Start, DebugLoc End)
+        : Start(std::move(Start)), End(std::move(End)) {}
+
+    const DebugLoc &getStart() const { return Start; }
+    const DebugLoc &getEnd() const { return End; }
+
+    /// \brief Check for null.
+    ///
+    explicit operator bool() const { return Start && End; }
+  };
+
+  /// Return true if the specified value is loop invariant.
+  bool isLoopInvariant(const Value *V) const;
+
+  /// Return true if all the operands of the specified instruction are loop
+  /// invariant.
+  bool hasLoopInvariantOperands(const Instruction *I) const;
+
+  /// If the given value is an instruction inside of the loop and it can be
+  /// hoisted, do so to make it trivially loop-invariant.
+  /// Return true if the value after any hoisting is loop invariant. This
+  /// function can be used as a slightly more aggressive replacement for
+  /// isLoopInvariant.
+  ///
+  /// If InsertPt is specified, it is the point to hoist instructions to.
+  /// If null, the terminator of the loop preheader is used.
+  bool makeLoopInvariant(Value *V, bool &Changed,
+                         Instruction *InsertPt = nullptr) const;
+
+  /// If the given instruction is inside of the loop and it can be hoisted, do
+  /// so to make it trivially loop-invariant.
+  /// Return true if the instruction after any hoisting is loop invariant. This
+  /// function can be used as a slightly more aggressive replacement for
+  /// isLoopInvariant.
+  ///
+  /// If InsertPt is specified, it is the point to hoist instructions to.
+  /// If null, the terminator of the loop preheader is used.
+  ///
+  bool makeLoopInvariant(Instruction *I, bool &Changed,
+                         Instruction *InsertPt = nullptr) const;
+
+  /// Check to see if the loop has a canonical induction variable: an integer
+  /// recurrence that starts at 0 and increments by one each time through the
+  /// loop. If so, return the phi node that corresponds to it.
+  ///
+  /// The IndVarSimplify pass transforms loops to have a canonical induction
+  /// variable.
+  ///
+  PHINode *getCanonicalInductionVariable() const;
+
+  /// Return true if the Loop is in LCSSA form.
+  bool isLCSSAForm(DominatorTree &DT) const;
+
+  /// Return true if this Loop and all inner subloops are in LCSSA form.
+  bool isRecursivelyLCSSAForm(DominatorTree &DT, const LoopInfo &LI) const;
+
+  /// Return true if the Loop is in the form that the LoopSimplify form
+  /// transforms loops to, which is sometimes called normal form.
+  bool isLoopSimplifyForm() const;
+
+  /// Return true if the loop body is safe to clone in practice.
+  bool isSafeToClone() const;
+
+  /// Returns true if the loop is annotated parallel.
+  ///
+  /// A parallel loop can be assumed to not contain any dependencies between
+  /// iterations by the compiler. That is, any loop-carried dependency checking
+  /// can be skipped completely when parallelizing the loop on the target
+  /// machine. Thus, if the parallel loop information originates from the
+  /// programmer, e.g. via the OpenMP parallel for pragma, it is the
+  /// programmer's responsibility to ensure there are no loop-carried
+  /// dependencies. The final execution order of the instructions across
+  /// iterations is not guaranteed, thus, the end result might or might not
+  /// implement actual concurrent execution of instructions across multiple
+  /// iterations.
+  bool isAnnotatedParallel() const;
+
+  /// Return the llvm.loop loop id metadata node for this loop if it is present.
+  ///
+  /// If this loop contains the same llvm.loop metadata on each branch to the
+  /// header then the node is returned. If any latch instruction does not
+  /// contain llvm.loop or or if multiple latches contain different nodes then
+  /// 0 is returned.
+  MDNode *getLoopID() const;
+  /// Set the llvm.loop loop id metadata for this loop.
+  ///
+  /// The LoopID metadata node will be added to each terminator instruction in
+  /// the loop that branches to the loop header.
+  ///
+  /// The LoopID metadata node should have one or more operands and the first
+  /// operand should be the node itself.
+  void setLoopID(MDNode *LoopID) const;
+
+  /// Add llvm.loop.unroll.disable to this loop's loop id metadata.
+  ///
+  /// Remove existing unroll metadata and add unroll disable metadata to
+  /// indicate the loop has already been unrolled.  This prevents a loop
+  /// from being unrolled more than is directed by a pragma if the loop
+  /// unrolling pass is run more than once (which it generally is).
+  void setLoopAlreadyUnrolled();
+
+  /// Return true if no exit block for the loop has a predecessor that is
+  /// outside the loop.
+  bool hasDedicatedExits() const;
+
+  /// Return all unique successor blocks of this loop.
+  /// These are the blocks _outside of the current loop_ which are branched to.
+  /// This assumes that loop exits are in canonical form, i.e. all exits are
+  /// dedicated exits.
+  void getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const;
+
+  /// If getUniqueExitBlocks would return exactly one block, return that block.
+  /// Otherwise return null.
+  BasicBlock *getUniqueExitBlock() const;
+
+  void dump() const;
+  void dumpVerbose() const;
+
+  /// Return the debug location of the start of this loop.
+  /// This looks for a BB terminating instruction with a known debug
+  /// location by looking at the preheader and header blocks. If it
+  /// cannot find a terminating instruction with location information,
+  /// it returns an unknown location.
+  DebugLoc getStartLoc() const;
+
+  /// Return the source code span of the loop.
+  LocRange getLocRange() const;
+
+  StringRef getName() const {
+    if (BasicBlock *Header = getHeader())
+      if (Header->hasName())
+        return Header->getName();
+    return "<unnamed loop>";
+  }
+
+private:
+  Loop() = default;
+
+  friend class LoopInfoBase<BasicBlock, Loop>;
+  friend class LoopBase<BasicBlock, Loop>;
+  explicit Loop(BasicBlock *BB) : LoopBase<BasicBlock, Loop>(BB) {}
+  ~Loop() = default;
+};
+
+//===----------------------------------------------------------------------===//
+/// This class builds and contains all of the top-level loop
+/// structures in the specified function.
+///
+
+template <class BlockT, class LoopT> class LoopInfoBase {
+  // BBMap - Mapping of basic blocks to the inner most loop they occur in
+  DenseMap<const BlockT *, LoopT *> BBMap;
+  std::vector<LoopT *> TopLevelLoops;
+  BumpPtrAllocator LoopAllocator;
+
+  friend class LoopBase<BlockT, LoopT>;
+  friend class LoopInfo;
+
+  void operator=(const LoopInfoBase &) = delete;
+  LoopInfoBase(const LoopInfoBase &) = delete;
+
+public:
+  LoopInfoBase() {}
+  ~LoopInfoBase() { releaseMemory(); }
+
+  LoopInfoBase(LoopInfoBase &&Arg)
+      : BBMap(std::move(Arg.BBMap)),
+        TopLevelLoops(std::move(Arg.TopLevelLoops)),
+        LoopAllocator(std::move(Arg.LoopAllocator)) {
+    // We have to clear the arguments top level loops as we've taken ownership.
+    Arg.TopLevelLoops.clear();
+  }
+  LoopInfoBase &operator=(LoopInfoBase &&RHS) {
+    BBMap = std::move(RHS.BBMap);
+
+    for (auto *L : TopLevelLoops)
+      L->~LoopT();
+
+    TopLevelLoops = std::move(RHS.TopLevelLoops);
+    LoopAllocator = std::move(RHS.LoopAllocator);
+    RHS.TopLevelLoops.clear();
+    return *this;
+  }
+
+  void releaseMemory() {
+    BBMap.clear();
+
+    for (auto *L : TopLevelLoops)
+      L->~LoopT();
+    TopLevelLoops.clear();
+    LoopAllocator.Reset();
+  }
+
+  template <typename... ArgsTy> LoopT *AllocateLoop(ArgsTy &&... Args) {
+    LoopT *Storage = LoopAllocator.Allocate<LoopT>();
+    return new (Storage) LoopT(std::forward<ArgsTy>(Args)...);
+  }
+
+  /// iterator/begin/end - The interface to the top-level loops in the current
+  /// function.
+  ///
+  typedef typename std::vector<LoopT *>::const_iterator iterator;
+  typedef
+      typename std::vector<LoopT *>::const_reverse_iterator reverse_iterator;
+  iterator begin() const { return TopLevelLoops.begin(); }
+  iterator end() const { return TopLevelLoops.end(); }
+  reverse_iterator rbegin() const { return TopLevelLoops.rbegin(); }
+  reverse_iterator rend() const { return TopLevelLoops.rend(); }
+  bool empty() const { return TopLevelLoops.empty(); }
+
+  /// Return all of the loops in the function in preorder across the loop
+  /// nests, with siblings in forward program order.
+  ///
+  /// Note that because loops form a forest of trees, preorder is equivalent to
+  /// reverse postorder.
+  SmallVector<LoopT *, 4> getLoopsInPreorder();
+
+  /// Return all of the loops in the function in preorder across the loop
+  /// nests, with siblings in *reverse* program order.
+  ///
+  /// Note that because loops form a forest of trees, preorder is equivalent to
+  /// reverse postorder.
+  ///
+  /// Also note that this is *not* a reverse preorder. Only the siblings are in
+  /// reverse program order.
+  SmallVector<LoopT *, 4> getLoopsInReverseSiblingPreorder();
+
+  /// Return the inner most loop that BB lives in. If a basic block is in no
+  /// loop (for example the entry node), null is returned.
+  LoopT *getLoopFor(const BlockT *BB) const { return BBMap.lookup(BB); }
+
+  /// Same as getLoopFor.
+  const LoopT *operator[](const BlockT *BB) const { return getLoopFor(BB); }
+
+  /// Return the loop nesting level of the specified block. A depth of 0 means
+  /// the block is not inside any loop.
+  unsigned getLoopDepth(const BlockT *BB) const {
+    const LoopT *L = getLoopFor(BB);
+    return L ? L->getLoopDepth() : 0;
+  }
+
+  // True if the block is a loop header node
+  bool isLoopHeader(const BlockT *BB) const {
+    const LoopT *L = getLoopFor(BB);
+    return L && L->getHeader() == BB;
+  }
+
+  /// This removes the specified top-level loop from this loop info object.
+  /// The loop is not deleted, as it will presumably be inserted into
+  /// another loop.
+  LoopT *removeLoop(iterator I) {
+    assert(I != end() && "Cannot remove end iterator!");
+    LoopT *L = *I;
+    assert(!L->getParentLoop() && "Not a top-level loop!");
+    TopLevelLoops.erase(TopLevelLoops.begin() + (I - begin()));
+    return L;
+  }
+
+  /// Change the top-level loop that contains BB to the specified loop.
+  /// This should be used by transformations that restructure the loop hierarchy
+  /// tree.
+  void changeLoopFor(BlockT *BB, LoopT *L) {
+    if (!L) {
+      BBMap.erase(BB);
+      return;
+    }
+    BBMap[BB] = L;
+  }
+
+  /// Replace the specified loop in the top-level loops list with the indicated
+  /// loop.
+  void changeTopLevelLoop(LoopT *OldLoop, LoopT *NewLoop) {
+    auto I = find(TopLevelLoops, OldLoop);
+    assert(I != TopLevelLoops.end() && "Old loop not at top level!");
+    *I = NewLoop;
+    assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
+           "Loops already embedded into a subloop!");
+  }
+
+  /// This adds the specified loop to the collection of top-level loops.
+  void addTopLevelLoop(LoopT *New) {
+    assert(!New->getParentLoop() && "Loop already in subloop!");
+    TopLevelLoops.push_back(New);
+  }
+
+  /// This method completely removes BB from all data structures,
+  /// including all of the Loop objects it is nested in and our mapping from
+  /// BasicBlocks to loops.
+  void removeBlock(BlockT *BB) {
+    auto I = BBMap.find(BB);
+    if (I != BBMap.end()) {
+      for (LoopT *L = I->second; L; L = L->getParentLoop())
+        L->removeBlockFromLoop(BB);
+
+      BBMap.erase(I);
+    }
+  }
+
+  // Internals
+
+  static bool isNotAlreadyContainedIn(const LoopT *SubLoop,
+                                      const LoopT *ParentLoop) {
+    if (!SubLoop)
+      return true;
+    if (SubLoop == ParentLoop)
+      return false;
+    return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
+  }
+
+  /// Create the loop forest using a stable algorithm.
+  void analyze(const DominatorTreeBase<BlockT, false> &DomTree);
+
+  // Debugging
+  void print(raw_ostream &OS) const;
+
+  void verify(const DominatorTreeBase<BlockT, false> &DomTree) const;
+
+  /// Destroy a loop that has been removed from the `LoopInfo` nest.
+  ///
+  /// This runs the destructor of the loop object making it invalid to
+  /// reference afterward. The memory is retained so that the *pointer* to the
+  /// loop remains valid.
+  ///
+  /// The caller is responsible for removing this loop from the loop nest and
+  /// otherwise disconnecting it from the broader `LoopInfo` data structures.
+  /// Callers that don't naturally handle this themselves should probably call
+  /// `erase' instead.
+  void destroy(LoopT *L) {
+    L->~LoopT();
+
+    // Since LoopAllocator is a BumpPtrAllocator, this Deallocate only poisons
+    // \c L, but the pointer remains valid for non-dereferencing uses.
+    LoopAllocator.Deallocate(L);
+  }
+};
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopInfoBase<BasicBlock, Loop>;
+
+class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
+  typedef LoopInfoBase<BasicBlock, Loop> BaseT;
+
+  friend class LoopBase<BasicBlock, Loop>;
+
+  void operator=(const LoopInfo &) = delete;
+  LoopInfo(const LoopInfo &) = delete;
+
+public:
+  LoopInfo() {}
+  explicit LoopInfo(const DominatorTreeBase<BasicBlock, false> &DomTree);
+
+  LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
+  LoopInfo &operator=(LoopInfo &&RHS) {
+    BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+    return *this;
+  }
+
+  /// Handle invalidation explicitly.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &);
+
+  // Most of the public interface is provided via LoopInfoBase.
+
+  /// Update LoopInfo after removing the last backedge from a loop. This updates
+  /// the loop forest and parent loops for each block so that \c L is no longer
+  /// referenced, but does not actually delete \c L immediately. The pointer
+  /// will remain valid until this LoopInfo's memory is released.
+  void erase(Loop *L);
+
+  /// Returns true if replacing From with To everywhere is guaranteed to
+  /// preserve LCSSA form.
+  bool replacementPreservesLCSSAForm(Instruction *From, Value *To) {
+    // Preserving LCSSA form is only problematic if the replacing value is an
+    // instruction.
+    Instruction *I = dyn_cast<Instruction>(To);
+    if (!I)
+      return true;
+    // If both instructions are defined in the same basic block then replacement
+    // cannot break LCSSA form.
+    if (I->getParent() == From->getParent())
+      return true;
+    // If the instruction is not defined in a loop then it can safely replace
+    // anything.
+    Loop *ToLoop = getLoopFor(I->getParent());
+    if (!ToLoop)
+      return true;
+    // If the replacing instruction is defined in the same loop as the original
+    // instruction, or in a loop that contains it as an inner loop, then using
+    // it as a replacement will not break LCSSA form.
+    return ToLoop->contains(getLoopFor(From->getParent()));
+  }
+
+  /// Checks if moving a specific instruction can break LCSSA in any loop.
+  ///
+  /// Return true if moving \p Inst to before \p NewLoc will break LCSSA,
+  /// assuming that the function containing \p Inst and \p NewLoc is currently
+  /// in LCSSA form.
+  bool movementPreservesLCSSAForm(Instruction *Inst, Instruction *NewLoc) {
+    assert(Inst->getFunction() == NewLoc->getFunction() &&
+           "Can't reason about IPO!");
+
+    auto *OldBB = Inst->getParent();
+    auto *NewBB = NewLoc->getParent();
+
+    // Movement within the same loop does not break LCSSA (the equality check is
+    // to avoid doing a hashtable lookup in case of intra-block movement).
+    if (OldBB == NewBB)
+      return true;
+
+    auto *OldLoop = getLoopFor(OldBB);
+    auto *NewLoop = getLoopFor(NewBB);
+
+    if (OldLoop == NewLoop)
+      return true;
+
+    // Check if Outer contains Inner; with the null loop counting as the
+    // "outermost" loop.
+    auto Contains = [](const Loop *Outer, const Loop *Inner) {
+      return !Outer || Outer->contains(Inner);
+    };
+
+    // To check that the movement of Inst to before NewLoc does not break LCSSA,
+    // we need to check two sets of uses for possible LCSSA violations at
+    // NewLoc: the users of NewInst, and the operands of NewInst.
+
+    // If we know we're hoisting Inst out of an inner loop to an outer loop,
+    // then the uses *of* Inst don't need to be checked.
+
+    if (!Contains(NewLoop, OldLoop)) {
+      for (Use &U : Inst->uses()) {
+        auto *UI = cast<Instruction>(U.getUser());
+        auto *UBB = isa<PHINode>(UI) ? cast<PHINode>(UI)->getIncomingBlock(U)
+                                     : UI->getParent();
+        if (UBB != NewBB && getLoopFor(UBB) != NewLoop)
+          return false;
+      }
+    }
+
+    // If we know we're sinking Inst from an outer loop into an inner loop, then
+    // the *operands* of Inst don't need to be checked.
+
+    if (!Contains(OldLoop, NewLoop)) {
+      // See below on why we can't handle phi nodes here.
+      if (isa<PHINode>(Inst))
+        return false;
+
+      for (Use &U : Inst->operands()) {
+        auto *DefI = dyn_cast<Instruction>(U.get());
+        if (!DefI)
+          return false;
+
+        // This would need adjustment if we allow Inst to be a phi node -- the
+        // new use block won't simply be NewBB.
+
+        auto *DefBlock = DefI->getParent();
+        if (DefBlock != NewBB && getLoopFor(DefBlock) != NewLoop)
+          return false;
+      }
+    }
+
+    return true;
+  }
+};
+
+// Allow clients to walk the list of nested loops...
+template <> struct GraphTraits<const Loop *> {
+  typedef const Loop *NodeRef;
+  typedef LoopInfo::iterator ChildIteratorType;
+
+  static NodeRef getEntryNode(const Loop *L) { return L; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+template <> struct GraphTraits<Loop *> {
+  typedef Loop *NodeRef;
+  typedef LoopInfo::iterator ChildIteratorType;
+
+  static NodeRef getEntryNode(Loop *L) { return L; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+/// \brief Analysis pass that exposes the \c LoopInfo for a function.
+class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
+  friend AnalysisInfoMixin<LoopAnalysis>;
+  static AnalysisKey Key;
+
+public:
+  typedef LoopInfo Result;
+
+  LoopInfo run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for the \c LoopAnalysis results.
+class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit LoopPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for the \c LoopAnalysis results.
+struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief The legacy pass manager's analysis pass to compute loop information.
+class LoopInfoWrapperPass : public FunctionPass {
+  LoopInfo LI;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  LoopInfoWrapperPass() : FunctionPass(ID) {
+    initializeLoopInfoWrapperPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  LoopInfo &getLoopInfo() { return LI; }
+  const LoopInfo &getLoopInfo() const { return LI; }
+
+  /// \brief Calculate the natural loop information for a given function.
+  bool runOnFunction(Function &F) override;
+
+  void verifyAnalysis() const override;
+
+  void releaseMemory() override { LI.releaseMemory(); }
+
+  void print(raw_ostream &O, const Module *M = nullptr) const override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// Function to print a loop's contents as LLVM's text IR assembly.
+void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h
new file mode 100644
index 0000000..b3a16b5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h
@@ -0,0 +1,677 @@
+//===- llvm/Analysis/LoopInfoImpl.h - Natural Loop Calculator ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the generic implementation of LoopInfo used for both Loops and
+// MachineLoops.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPINFOIMPL_H
+#define LLVM_ANALYSIS_LOOPINFOIMPL_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/Dominators.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// APIs for simple analysis of the loop. See header notes.
+
+/// getExitingBlocks - Return all blocks inside the loop that have successors
+/// outside of the loop.  These are the blocks _inside of the current loop_
+/// which branch out.  The returned list is always unique.
+///
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::getExitingBlocks(
+    SmallVectorImpl<BlockT *> &ExitingBlocks) const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  for (const auto BB : blocks())
+    for (const auto &Succ : children<BlockT *>(BB))
+      if (!contains(Succ)) {
+        // Not in current loop? It must be an exit block.
+        ExitingBlocks.push_back(BB);
+        break;
+      }
+}
+
+/// getExitingBlock - If getExitingBlocks would return exactly one block,
+/// return that block. Otherwise return null.
+template <class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  SmallVector<BlockT *, 8> ExitingBlocks;
+  getExitingBlocks(ExitingBlocks);
+  if (ExitingBlocks.size() == 1)
+    return ExitingBlocks[0];
+  return nullptr;
+}
+
+/// getExitBlocks - Return all of the successor blocks of this loop.  These
+/// are the blocks _outside of the current loop_ which are branched to.
+///
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::getExitBlocks(
+    SmallVectorImpl<BlockT *> &ExitBlocks) const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  for (const auto BB : blocks())
+    for (const auto &Succ : children<BlockT *>(BB))
+      if (!contains(Succ))
+        // Not in current loop? It must be an exit block.
+        ExitBlocks.push_back(Succ);
+}
+
+/// getExitBlock - If getExitBlocks would return exactly one block,
+/// return that block. Otherwise return null.
+template <class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  SmallVector<BlockT *, 8> ExitBlocks;
+  getExitBlocks(ExitBlocks);
+  if (ExitBlocks.size() == 1)
+    return ExitBlocks[0];
+  return nullptr;
+}
+
+/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::getExitEdges(
+    SmallVectorImpl<Edge> &ExitEdges) const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  for (const auto BB : blocks())
+    for (const auto &Succ : children<BlockT *>(BB))
+      if (!contains(Succ))
+        // Not in current loop? It must be an exit block.
+        ExitEdges.emplace_back(BB, Succ);
+}
+
+/// getLoopPreheader - If there is a preheader for this loop, return it.  A
+/// loop has a preheader if there is only one edge to the header of the loop
+/// from outside of the loop and it is legal to hoist instructions into the
+/// predecessor. If this is the case, the block branching to the header of the
+/// loop is the preheader node.
+///
+/// This method returns null if there is no preheader for the loop.
+///
+template <class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  // Keep track of nodes outside the loop branching to the header...
+  BlockT *Out = getLoopPredecessor();
+  if (!Out)
+    return nullptr;
+
+  // Make sure we are allowed to hoist instructions into the predecessor.
+  if (!Out->isLegalToHoistInto())
+    return nullptr;
+
+  // Make sure there is only one exit out of the preheader.
+  typedef GraphTraits<BlockT *> BlockTraits;
+  typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+  ++SI;
+  if (SI != BlockTraits::child_end(Out))
+    return nullptr; // Multiple exits from the block, must not be a preheader.
+
+  // The predecessor has exactly one successor, so it is a preheader.
+  return Out;
+}
+
+/// getLoopPredecessor - If the given loop's header has exactly one unique
+/// predecessor outside the loop, return it. Otherwise return null.
+/// This is less strict that the loop "preheader" concept, which requires
+/// the predecessor to have exactly one successor.
+///
+template <class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  // Keep track of nodes outside the loop branching to the header...
+  BlockT *Out = nullptr;
+
+  // Loop over the predecessors of the header node...
+  BlockT *Header = getHeader();
+  for (const auto Pred : children<Inverse<BlockT *>>(Header)) {
+    if (!contains(Pred)) { // If the block is not in the loop...
+      if (Out && Out != Pred)
+        return nullptr; // Multiple predecessors outside the loop
+      Out = Pred;
+    }
+  }
+
+  // Make sure there is only one exit out of the preheader.
+  assert(Out && "Header of loop has no predecessors from outside loop?");
+  return Out;
+}
+
+/// getLoopLatch - If there is a single latch block for this loop, return it.
+/// A latch block is a block that contains a branch back to the header.
+template <class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  BlockT *Header = getHeader();
+  BlockT *Latch = nullptr;
+  for (const auto Pred : children<Inverse<BlockT *>>(Header)) {
+    if (contains(Pred)) {
+      if (Latch)
+        return nullptr;
+      Latch = Pred;
+    }
+  }
+
+  return Latch;
+}
+
+//===----------------------------------------------------------------------===//
+// APIs for updating loop information after changing the CFG
+//
+
+/// addBasicBlockToLoop - This method is used by other analyses to update loop
+/// information.  NewBB is set to be a new member of the current loop.
+/// Because of this, it is added as a member of all parent loops, and is added
+/// to the specified LoopInfo object as being in the current basic block.  It
+/// is not valid to replace the loop header with this method.
+///
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::addBasicBlockToLoop(
+    BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
+  assert(!isInvalid() && "Loop not in a valid state!");
+#ifndef NDEBUG
+  if (!Blocks.empty()) {
+    auto SameHeader = LIB[getHeader()];
+    assert(contains(SameHeader) && getHeader() == SameHeader->getHeader() &&
+           "Incorrect LI specified for this loop!");
+  }
+#endif
+  assert(NewBB && "Cannot add a null basic block to the loop!");
+  assert(!LIB[NewBB] && "BasicBlock already in the loop!");
+
+  LoopT *L = static_cast<LoopT *>(this);
+
+  // Add the loop mapping to the LoopInfo object...
+  LIB.BBMap[NewBB] = L;
+
+  // Add the basic block to this loop and all parent loops...
+  while (L) {
+    L->addBlockEntry(NewBB);
+    L = L->getParentLoop();
+  }
+}
+
+/// replaceChildLoopWith - This is used when splitting loops up.  It replaces
+/// the OldChild entry in our children list with NewChild, and updates the
+/// parent pointer of OldChild to be null and the NewChild to be this loop.
+/// This updates the loop depth of the new child.
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::replaceChildLoopWith(LoopT *OldChild,
+                                                   LoopT *NewChild) {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  assert(OldChild->ParentLoop == this && "This loop is already broken!");
+  assert(!NewChild->ParentLoop && "NewChild already has a parent!");
+  typename std::vector<LoopT *>::iterator I = find(SubLoops, OldChild);
+  assert(I != SubLoops.end() && "OldChild not in loop!");
+  *I = NewChild;
+  OldChild->ParentLoop = nullptr;
+  NewChild->ParentLoop = static_cast<LoopT *>(this);
+}
+
+/// verifyLoop - Verify loop structure
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::verifyLoop() const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+#ifndef NDEBUG
+  assert(!Blocks.empty() && "Loop header is missing");
+
+  // Setup for using a depth-first iterator to visit every block in the loop.
+  SmallVector<BlockT *, 8> ExitBBs;
+  getExitBlocks(ExitBBs);
+  df_iterator_default_set<BlockT *> VisitSet;
+  VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
+  df_ext_iterator<BlockT *, df_iterator_default_set<BlockT *>>
+      BI = df_ext_begin(getHeader(), VisitSet),
+      BE = df_ext_end(getHeader(), VisitSet);
+
+  // Keep track of the BBs visited.
+  SmallPtrSet<BlockT *, 8> VisitedBBs;
+
+  // Check the individual blocks.
+  for (; BI != BE; ++BI) {
+    BlockT *BB = *BI;
+
+    assert(std::any_of(GraphTraits<BlockT *>::child_begin(BB),
+                       GraphTraits<BlockT *>::child_end(BB),
+                       [&](BlockT *B) { return contains(B); }) &&
+           "Loop block has no in-loop successors!");
+
+    assert(std::any_of(GraphTraits<Inverse<BlockT *>>::child_begin(BB),
+                       GraphTraits<Inverse<BlockT *>>::child_end(BB),
+                       [&](BlockT *B) { return contains(B); }) &&
+           "Loop block has no in-loop predecessors!");
+
+    SmallVector<BlockT *, 2> OutsideLoopPreds;
+    std::for_each(GraphTraits<Inverse<BlockT *>>::child_begin(BB),
+                  GraphTraits<Inverse<BlockT *>>::child_end(BB),
+                  [&](BlockT *B) {
+                    if (!contains(B))
+                      OutsideLoopPreds.push_back(B);
+                  });
+
+    if (BB == getHeader()) {
+      assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
+    } else if (!OutsideLoopPreds.empty()) {
+      // A non-header loop shouldn't be reachable from outside the loop,
+      // though it is permitted if the predecessor is not itself actually
+      // reachable.
+      BlockT *EntryBB = &BB->getParent()->front();
+      for (BlockT *CB : depth_first(EntryBB))
+        for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
+          assert(CB != OutsideLoopPreds[i] &&
+                 "Loop has multiple entry points!");
+    }
+    assert(BB != &getHeader()->getParent()->front() &&
+           "Loop contains function entry block!");
+
+    VisitedBBs.insert(BB);
+  }
+
+  if (VisitedBBs.size() != getNumBlocks()) {
+    dbgs() << "The following blocks are unreachable in the loop: ";
+    for (auto BB : Blocks) {
+      if (!VisitedBBs.count(BB)) {
+        dbgs() << *BB << "\n";
+      }
+    }
+    assert(false && "Unreachable block in loop");
+  }
+
+  // Check the subloops.
+  for (iterator I = begin(), E = end(); I != E; ++I)
+    // Each block in each subloop should be contained within this loop.
+    for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
+         BI != BE; ++BI) {
+      assert(contains(*BI) &&
+             "Loop does not contain all the blocks of a subloop!");
+    }
+
+  // Check the parent loop pointer.
+  if (ParentLoop) {
+    assert(is_contained(*ParentLoop, this) &&
+           "Loop is not a subloop of its parent!");
+  }
+#endif
+}
+
+/// verifyLoop - Verify loop structure of this loop and all nested loops.
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::verifyLoopNest(
+    DenseSet<const LoopT *> *Loops) const {
+  assert(!isInvalid() && "Loop not in a valid state!");
+  Loops->insert(static_cast<const LoopT *>(this));
+  // Verify this loop.
+  verifyLoop();
+  // Verify the subloops.
+  for (iterator I = begin(), E = end(); I != E; ++I)
+    (*I)->verifyLoopNest(Loops);
+}
+
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth,
+                                    bool Verbose) const {
+  OS.indent(Depth * 2) << "Loop at depth " << getLoopDepth() << " containing: ";
+
+  BlockT *H = getHeader();
+  for (unsigned i = 0; i < getBlocks().size(); ++i) {
+    BlockT *BB = getBlocks()[i];
+    if (!Verbose) {
+      if (i)
+        OS << ",";
+      BB->printAsOperand(OS, false);
+    } else
+      OS << "\n";
+
+    if (BB == H)
+      OS << "<header>";
+    if (isLoopLatch(BB))
+      OS << "<latch>";
+    if (isLoopExiting(BB))
+      OS << "<exiting>";
+    if (Verbose)
+      BB->print(OS);
+  }
+  OS << "\n";
+
+  for (iterator I = begin(), E = end(); I != E; ++I)
+    (*I)->print(OS, Depth + 2);
+}
+
+//===----------------------------------------------------------------------===//
+/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
+/// result does / not depend on use list (block predecessor) order.
+///
+
+/// Discover a subloop with the specified backedges such that: All blocks within
+/// this loop are mapped to this loop or a subloop. And all subloops within this
+/// loop have their parent loop set to this loop or a subloop.
+template <class BlockT, class LoopT>
+static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT *> Backedges,
+                                  LoopInfoBase<BlockT, LoopT> *LI,
+                                  const DomTreeBase<BlockT> &DomTree) {
+  typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
+
+  unsigned NumBlocks = 0;
+  unsigned NumSubloops = 0;
+
+  // Perform a backward CFG traversal using a worklist.
+  std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
+  while (!ReverseCFGWorklist.empty()) {
+    BlockT *PredBB = ReverseCFGWorklist.back();
+    ReverseCFGWorklist.pop_back();
+
+    LoopT *Subloop = LI->getLoopFor(PredBB);
+    if (!Subloop) {
+      if (!DomTree.isReachableFromEntry(PredBB))
+        continue;
+
+      // This is an undiscovered block. Map it to the current loop.
+      LI->changeLoopFor(PredBB, L);
+      ++NumBlocks;
+      if (PredBB == L->getHeader())
+        continue;
+      // Push all block predecessors on the worklist.
+      ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
+                                InvBlockTraits::child_begin(PredBB),
+                                InvBlockTraits::child_end(PredBB));
+    } else {
+      // This is a discovered block. Find its outermost discovered loop.
+      while (LoopT *Parent = Subloop->getParentLoop())
+        Subloop = Parent;
+
+      // If it is already discovered to be a subloop of this loop, continue.
+      if (Subloop == L)
+        continue;
+
+      // Discover a subloop of this loop.
+      Subloop->setParentLoop(L);
+      ++NumSubloops;
+      NumBlocks += Subloop->getBlocksVector().capacity();
+      PredBB = Subloop->getHeader();
+      // Continue traversal along predecessors that are not loop-back edges from
+      // within this subloop tree itself. Note that a predecessor may directly
+      // reach another subloop that is not yet discovered to be a subloop of
+      // this loop, which we must traverse.
+      for (const auto Pred : children<Inverse<BlockT *>>(PredBB)) {
+        if (LI->getLoopFor(Pred) != Subloop)
+          ReverseCFGWorklist.push_back(Pred);
+      }
+    }
+  }
+  L->getSubLoopsVector().reserve(NumSubloops);
+  L->reserveBlocks(NumBlocks);
+}
+
+/// Populate all loop data in a stable order during a single forward DFS.
+template <class BlockT, class LoopT> class PopulateLoopsDFS {
+  typedef GraphTraits<BlockT *> BlockTraits;
+  typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+
+  LoopInfoBase<BlockT, LoopT> *LI;
+
+public:
+  PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li) : LI(li) {}
+
+  void traverse(BlockT *EntryBlock);
+
+protected:
+  void insertIntoLoop(BlockT *Block);
+};
+
+/// Top-level driver for the forward DFS within the loop.
+template <class BlockT, class LoopT>
+void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
+  for (BlockT *BB : post_order(EntryBlock))
+    insertIntoLoop(BB);
+}
+
+/// Add a single Block to its ancestor loops in PostOrder. If the block is a
+/// subloop header, add the subloop to its parent in PostOrder, then reverse the
+/// Block and Subloop vectors of the now complete subloop to achieve RPO.
+template <class BlockT, class LoopT>
+void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
+  LoopT *Subloop = LI->getLoopFor(Block);
+  if (Subloop && Block == Subloop->getHeader()) {
+    // We reach this point once per subloop after processing all the blocks in
+    // the subloop.
+    if (Subloop->getParentLoop())
+      Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
+    else
+      LI->addTopLevelLoop(Subloop);
+
+    // For convenience, Blocks and Subloops are inserted in postorder. Reverse
+    // the lists, except for the loop header, which is always at the beginning.
+    Subloop->reverseBlock(1);
+    std::reverse(Subloop->getSubLoopsVector().begin(),
+                 Subloop->getSubLoopsVector().end());
+
+    Subloop = Subloop->getParentLoop();
+  }
+  for (; Subloop; Subloop = Subloop->getParentLoop())
+    Subloop->addBlockEntry(Block);
+}
+
+/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
+/// interleaved with backward CFG traversals within each subloop
+/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
+/// this part of the algorithm is linear in the number of CFG edges. Subloop and
+/// Block vectors are then populated during a single forward CFG traversal
+/// (PopulateLoopDFS).
+///
+/// During the two CFG traversals each block is seen three times:
+/// 1) Discovered and mapped by a reverse CFG traversal.
+/// 2) Visited during a forward DFS CFG traversal.
+/// 3) Reverse-inserted in the loop in postorder following forward DFS.
+///
+/// The Block vectors are inclusive, so step 3 requires loop-depth number of
+/// insertions per block.
+template <class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::analyze(const DomTreeBase<BlockT> &DomTree) {
+  // Postorder traversal of the dominator tree.
+  const DomTreeNodeBase<BlockT> *DomRoot = DomTree.getRootNode();
+  for (auto DomNode : post_order(DomRoot)) {
+
+    BlockT *Header = DomNode->getBlock();
+    SmallVector<BlockT *, 4> Backedges;
+
+    // Check each predecessor of the potential loop header.
+    for (const auto Backedge : children<Inverse<BlockT *>>(Header)) {
+      // If Header dominates predBB, this is a new loop. Collect the backedges.
+      if (DomTree.dominates(Header, Backedge) &&
+          DomTree.isReachableFromEntry(Backedge)) {
+        Backedges.push_back(Backedge);
+      }
+    }
+    // Perform a backward CFG traversal to discover and map blocks in this loop.
+    if (!Backedges.empty()) {
+      LoopT *L = AllocateLoop(Header);
+      discoverAndMapSubloop(L, ArrayRef<BlockT *>(Backedges), this, DomTree);
+    }
+  }
+  // Perform a single forward CFG traversal to populate block and subloop
+  // vectors for all loops.
+  PopulateLoopsDFS<BlockT, LoopT> DFS(this);
+  DFS.traverse(DomRoot->getBlock());
+}
+
+template <class BlockT, class LoopT>
+SmallVector<LoopT *, 4> LoopInfoBase<BlockT, LoopT>::getLoopsInPreorder() {
+  SmallVector<LoopT *, 4> PreOrderLoops, PreOrderWorklist;
+  // The outer-most loop actually goes into the result in the same relative
+  // order as we walk it. But LoopInfo stores the top level loops in reverse
+  // program order so for here we reverse it to get forward program order.
+  // FIXME: If we change the order of LoopInfo we will want to remove the
+  // reverse here.
+  for (LoopT *RootL : reverse(*this)) {
+    assert(PreOrderWorklist.empty() &&
+           "Must start with an empty preorder walk worklist.");
+    PreOrderWorklist.push_back(RootL);
+    do {
+      LoopT *L = PreOrderWorklist.pop_back_val();
+      // Sub-loops are stored in forward program order, but will process the
+      // worklist backwards so append them in reverse order.
+      PreOrderWorklist.append(L->rbegin(), L->rend());
+      PreOrderLoops.push_back(L);
+    } while (!PreOrderWorklist.empty());
+  }
+
+  return PreOrderLoops;
+}
+
+template <class BlockT, class LoopT>
+SmallVector<LoopT *, 4>
+LoopInfoBase<BlockT, LoopT>::getLoopsInReverseSiblingPreorder() {
+  SmallVector<LoopT *, 4> PreOrderLoops, PreOrderWorklist;
+  // The outer-most loop actually goes into the result in the same relative
+  // order as we walk it. LoopInfo stores the top level loops in reverse
+  // program order so we walk in order here.
+  // FIXME: If we change the order of LoopInfo we will want to add a reverse
+  // here.
+  for (LoopT *RootL : *this) {
+    assert(PreOrderWorklist.empty() &&
+           "Must start with an empty preorder walk worklist.");
+    PreOrderWorklist.push_back(RootL);
+    do {
+      LoopT *L = PreOrderWorklist.pop_back_val();
+      // Sub-loops are stored in forward program order, but will process the
+      // worklist backwards so we can just append them in order.
+      PreOrderWorklist.append(L->begin(), L->end());
+      PreOrderLoops.push_back(L);
+    } while (!PreOrderWorklist.empty());
+  }
+
+  return PreOrderLoops;
+}
+
+// Debugging
+template <class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
+  for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
+    TopLevelLoops[i]->print(OS);
+#if 0
+  for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
+         E = BBMap.end(); I != E; ++I)
+    OS << "BB '" << I->first->getName() << "' level = "
+       << I->second->getLoopDepth() << "\n";
+#endif
+}
+
+template <typename T>
+bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
+  std::sort(BB1.begin(), BB1.end());
+  std::sort(BB2.begin(), BB2.end());
+  return BB1 == BB2;
+}
+
+template <class BlockT, class LoopT>
+void addInnerLoopsToHeadersMap(DenseMap<BlockT *, const LoopT *> &LoopHeaders,
+                               const LoopInfoBase<BlockT, LoopT> &LI,
+                               const LoopT &L) {
+  LoopHeaders[L.getHeader()] = &L;
+  for (LoopT *SL : L)
+    addInnerLoopsToHeadersMap(LoopHeaders, LI, *SL);
+}
+
+#ifndef NDEBUG
+template <class BlockT, class LoopT>
+static void compareLoops(const LoopT *L, const LoopT *OtherL,
+                         DenseMap<BlockT *, const LoopT *> &OtherLoopHeaders) {
+  BlockT *H = L->getHeader();
+  BlockT *OtherH = OtherL->getHeader();
+  assert(H == OtherH &&
+         "Mismatched headers even though found in the same map entry!");
+
+  assert(L->getLoopDepth() == OtherL->getLoopDepth() &&
+         "Mismatched loop depth!");
+  const LoopT *ParentL = L, *OtherParentL = OtherL;
+  do {
+    assert(ParentL->getHeader() == OtherParentL->getHeader() &&
+           "Mismatched parent loop headers!");
+    ParentL = ParentL->getParentLoop();
+    OtherParentL = OtherParentL->getParentLoop();
+  } while (ParentL);
+
+  for (const LoopT *SubL : *L) {
+    BlockT *SubH = SubL->getHeader();
+    const LoopT *OtherSubL = OtherLoopHeaders.lookup(SubH);
+    assert(OtherSubL && "Inner loop is missing in computed loop info!");
+    OtherLoopHeaders.erase(SubH);
+    compareLoops(SubL, OtherSubL, OtherLoopHeaders);
+  }
+
+  std::vector<BlockT *> BBs = L->getBlocks();
+  std::vector<BlockT *> OtherBBs = OtherL->getBlocks();
+  assert(compareVectors(BBs, OtherBBs) &&
+         "Mismatched basic blocks in the loops!");
+}
+#endif
+
+template <class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::verify(
+    const DomTreeBase<BlockT> &DomTree) const {
+  DenseSet<const LoopT *> Loops;
+  for (iterator I = begin(), E = end(); I != E; ++I) {
+    assert(!(*I)->getParentLoop() && "Top-level loop has a parent!");
+    (*I)->verifyLoopNest(&Loops);
+  }
+
+// Verify that blocks are mapped to valid loops.
+#ifndef NDEBUG
+  for (auto &Entry : BBMap) {
+    const BlockT *BB = Entry.first;
+    LoopT *L = Entry.second;
+    assert(Loops.count(L) && "orphaned loop");
+    assert(L->contains(BB) && "orphaned block");
+  }
+
+  // Recompute LoopInfo to verify loops structure.
+  LoopInfoBase<BlockT, LoopT> OtherLI;
+  OtherLI.analyze(DomTree);
+
+  // Build a map we can use to move from our LI to the computed one. This
+  // allows us to ignore the particular order in any layer of the loop forest
+  // while still comparing the structure.
+  DenseMap<BlockT *, const LoopT *> OtherLoopHeaders;
+  for (LoopT *L : OtherLI)
+    addInnerLoopsToHeadersMap(OtherLoopHeaders, OtherLI, *L);
+
+  // Walk the top level loops and ensure there is a corresponding top-level
+  // loop in the computed version and then recursively compare those loop
+  // nests.
+  for (LoopT *L : *this) {
+    BlockT *Header = L->getHeader();
+    const LoopT *OtherL = OtherLoopHeaders.lookup(Header);
+    assert(OtherL && "Top level loop is missing in computed loop info!");
+    // Now that we've matched this loop, erase its header from the map.
+    OtherLoopHeaders.erase(Header);
+    // And recursively compare these loops.
+    compareLoops(L, OtherL, OtherLoopHeaders);
+  }
+
+  // Any remaining entries in the map are loops which were found when computing
+  // a fresh LoopInfo but not present in the current one.
+  if (!OtherLoopHeaders.empty()) {
+    for (const auto &HeaderAndLoop : OtherLoopHeaders)
+      dbgs() << "Found new loop: " << *HeaderAndLoop.second << "\n";
+    llvm_unreachable("Found new loops when recomputing LoopInfo!");
+  }
+#endif
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopIterator.h b/linux-x64/clang/include/llvm/Analysis/LoopIterator.h
new file mode 100644
index 0000000..91c54b2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopIterator.h
@@ -0,0 +1,260 @@
+//===--------- LoopIterator.h - Iterate over loop blocks --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines iterators to visit the basic blocks within a loop.
+//
+// These iterators currently visit blocks within subloops as well.
+// Unfortunately we have no efficient way of summarizing loop exits which would
+// allow skipping subloops during traversal.
+//
+// If you want to visit all blocks in a loop and don't need an ordered traveral,
+// use Loop::block_begin() instead.
+//
+// This is intentionally designed to work with ill-formed loops in which the
+// backedge has been deleted. The only prerequisite is that all blocks
+// contained within the loop according to the most recent LoopInfo analysis are
+// reachable from the loop header.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPITERATOR_H
+#define LLVM_ANALYSIS_LOOPITERATOR_H
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Analysis/LoopInfo.h"
+
+namespace llvm {
+
+class LoopBlocksTraversal;
+
+// A traits type that is intended to be used in graph algorithms. The graph
+// traits starts at the loop header, and traverses the BasicBlocks that are in
+// the loop body, but not the loop header. Since the loop header is skipped,
+// the back edges are excluded.
+//
+// TODO: Explore the possibility to implement LoopBlocksTraversal in terms of
+//       LoopBodyTraits, so that insertEdge doesn't have to be specialized.
+struct LoopBodyTraits {
+  using NodeRef = std::pair<const Loop *, BasicBlock *>;
+
+  // This wraps a const Loop * into the iterator, so we know which edges to
+  // filter out.
+  class WrappedSuccIterator
+      : public iterator_adaptor_base<
+            WrappedSuccIterator, succ_iterator,
+            typename std::iterator_traits<succ_iterator>::iterator_category,
+            NodeRef, std::ptrdiff_t, NodeRef *, NodeRef> {
+    using BaseT = iterator_adaptor_base<
+        WrappedSuccIterator, succ_iterator,
+        typename std::iterator_traits<succ_iterator>::iterator_category,
+        NodeRef, std::ptrdiff_t, NodeRef *, NodeRef>;
+
+    const Loop *L;
+
+  public:
+    WrappedSuccIterator(succ_iterator Begin, const Loop *L)
+        : BaseT(Begin), L(L) {}
+
+    NodeRef operator*() const { return {L, *I}; }
+  };
+
+  struct LoopBodyFilter {
+    bool operator()(NodeRef N) const {
+      const Loop *L = N.first;
+      return N.second != L->getHeader() && L->contains(N.second);
+    }
+  };
+
+  using ChildIteratorType =
+      filter_iterator<WrappedSuccIterator, LoopBodyFilter>;
+
+  static NodeRef getEntryNode(const Loop &G) { return {&G, G.getHeader()}; }
+
+  static ChildIteratorType child_begin(NodeRef Node) {
+    return make_filter_range(make_range<WrappedSuccIterator>(
+                                 {succ_begin(Node.second), Node.first},
+                                 {succ_end(Node.second), Node.first}),
+                             LoopBodyFilter{})
+        .begin();
+  }
+
+  static ChildIteratorType child_end(NodeRef Node) {
+    return make_filter_range(make_range<WrappedSuccIterator>(
+                                 {succ_begin(Node.second), Node.first},
+                                 {succ_end(Node.second), Node.first}),
+                             LoopBodyFilter{})
+        .end();
+  }
+};
+
+/// Store the result of a depth first search within basic blocks contained by a
+/// single loop.
+///
+/// TODO: This could be generalized for any CFG region, or the entire CFG.
+class LoopBlocksDFS {
+public:
+  /// Postorder list iterators.
+  typedef std::vector<BasicBlock*>::const_iterator POIterator;
+  typedef std::vector<BasicBlock*>::const_reverse_iterator RPOIterator;
+
+  friend class LoopBlocksTraversal;
+
+private:
+  Loop *L;
+
+  /// Map each block to its postorder number. A block is only mapped after it is
+  /// preorder visited by DFS. It's postorder number is initially zero and set
+  /// to nonzero after it is finished by postorder traversal.
+  DenseMap<BasicBlock*, unsigned> PostNumbers;
+  std::vector<BasicBlock*> PostBlocks;
+
+public:
+  LoopBlocksDFS(Loop *Container) :
+    L(Container), PostNumbers(NextPowerOf2(Container->getNumBlocks())) {
+    PostBlocks.reserve(Container->getNumBlocks());
+  }
+
+  Loop *getLoop() const { return L; }
+
+  /// Traverse the loop blocks and store the DFS result.
+  void perform(LoopInfo *LI);
+
+  /// Return true if postorder numbers are assigned to all loop blocks.
+  bool isComplete() const { return PostBlocks.size() == L->getNumBlocks(); }
+
+  /// Iterate over the cached postorder blocks.
+  POIterator beginPostorder() const {
+    assert(isComplete() && "bad loop DFS");
+    return PostBlocks.begin();
+  }
+  POIterator endPostorder() const { return PostBlocks.end(); }
+
+  /// Reverse iterate over the cached postorder blocks.
+  RPOIterator beginRPO() const {
+    assert(isComplete() && "bad loop DFS");
+    return PostBlocks.rbegin();
+  }
+  RPOIterator endRPO() const { return PostBlocks.rend(); }
+
+  /// Return true if this block has been preorder visited.
+  bool hasPreorder(BasicBlock *BB) const { return PostNumbers.count(BB); }
+
+  /// Return true if this block has a postorder number.
+  bool hasPostorder(BasicBlock *BB) const {
+    DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
+    return I != PostNumbers.end() && I->second;
+  }
+
+  /// Get a block's postorder number.
+  unsigned getPostorder(BasicBlock *BB) const {
+    DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
+    assert(I != PostNumbers.end() && "block not visited by DFS");
+    assert(I->second && "block not finished by DFS");
+    return I->second;
+  }
+
+  /// Get a block's reverse postorder number.
+  unsigned getRPO(BasicBlock *BB) const {
+    return 1 + PostBlocks.size() - getPostorder(BB);
+  }
+
+  void clear() {
+    PostNumbers.clear();
+    PostBlocks.clear();
+  }
+};
+
+/// Wrapper class to LoopBlocksDFS that provides a standard begin()/end()
+/// interface for the DFS reverse post-order traversal of blocks in a loop body.
+class LoopBlocksRPO {
+private:
+  LoopBlocksDFS DFS;
+
+public:
+  LoopBlocksRPO(Loop *Container) : DFS(Container) {}
+
+  /// Traverse the loop blocks and store the DFS result.
+  void perform(LoopInfo *LI) {
+    DFS.perform(LI);
+  }
+
+  /// Reverse iterate over the cached postorder blocks.
+  LoopBlocksDFS::RPOIterator begin() const { return DFS.beginRPO(); }
+  LoopBlocksDFS::RPOIterator end() const { return DFS.endRPO(); }
+};
+
+/// Specialize po_iterator_storage to record postorder numbers.
+template<> class po_iterator_storage<LoopBlocksTraversal, true> {
+  LoopBlocksTraversal &LBT;
+public:
+  po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
+  // These functions are defined below.
+  bool insertEdge(Optional<BasicBlock *> From, BasicBlock *To);
+  void finishPostorder(BasicBlock *BB);
+};
+
+/// Traverse the blocks in a loop using a depth-first search.
+class LoopBlocksTraversal {
+public:
+  /// Graph traversal iterator.
+  typedef po_iterator<BasicBlock*, LoopBlocksTraversal, true> POTIterator;
+
+private:
+  LoopBlocksDFS &DFS;
+  LoopInfo *LI;
+
+public:
+  LoopBlocksTraversal(LoopBlocksDFS &Storage, LoopInfo *LInfo) :
+    DFS(Storage), LI(LInfo) {}
+
+  /// Postorder traversal over the graph. This only needs to be done once.
+  /// po_iterator "automatically" calls back to visitPreorder and
+  /// finishPostorder to record the DFS result.
+  POTIterator begin() {
+    assert(DFS.PostBlocks.empty() && "Need clear DFS result before traversing");
+    assert(DFS.L->getNumBlocks() && "po_iterator cannot handle an empty graph");
+    return po_ext_begin(DFS.L->getHeader(), *this);
+  }
+  POTIterator end() {
+    // po_ext_end interface requires a basic block, but ignores its value.
+    return po_ext_end(DFS.L->getHeader(), *this);
+  }
+
+  /// Called by po_iterator upon reaching a block via a CFG edge. If this block
+  /// is contained in the loop and has not been visited, then mark it preorder
+  /// visited and return true.
+  ///
+  /// TODO: If anyone is interested, we could record preorder numbers here.
+  bool visitPreorder(BasicBlock *BB) {
+    if (!DFS.L->contains(LI->getLoopFor(BB)))
+      return false;
+
+    return DFS.PostNumbers.insert(std::make_pair(BB, 0)).second;
+  }
+
+  /// Called by po_iterator each time it advances, indicating a block's
+  /// postorder.
+  void finishPostorder(BasicBlock *BB) {
+    assert(DFS.PostNumbers.count(BB) && "Loop DFS skipped preorder");
+    DFS.PostBlocks.push_back(BB);
+    DFS.PostNumbers[BB] = DFS.PostBlocks.size();
+  }
+};
+
+inline bool po_iterator_storage<LoopBlocksTraversal, true>::insertEdge(
+    Optional<BasicBlock *> From, BasicBlock *To) {
+  return LBT.visitPreorder(To);
+}
+
+inline void po_iterator_storage<LoopBlocksTraversal, true>::
+finishPostorder(BasicBlock *BB) {
+  LBT.finishPostorder(BB);
+}
+
+} // End namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopPass.h b/linux-x64/clang/include/llvm/Analysis/LoopPass.h
new file mode 100644
index 0000000..86cfecd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopPass.h
@@ -0,0 +1,179 @@
+//===- LoopPass.h - LoopPass class ----------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines LoopPass class. All loop optimization
+// and transformation passes are derived from LoopPass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPPASS_H
+#define LLVM_ANALYSIS_LOOPPASS_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/Pass.h"
+#include <deque>
+
+namespace llvm {
+
+class LPPassManager;
+class Function;
+class PMStack;
+
+class LoopPass : public Pass {
+public:
+  explicit LoopPass(char &pid) : Pass(PT_Loop, pid) {}
+
+  /// getPrinterPass - Get a pass to print the function corresponding
+  /// to a Loop.
+  Pass *createPrinterPass(raw_ostream &O,
+                          const std::string &Banner) const override;
+
+  // runOnLoop - This method should be implemented by the subclass to perform
+  // whatever action is necessary for the specified Loop.
+  virtual bool runOnLoop(Loop *L, LPPassManager &LPM) = 0;
+
+  using llvm::Pass::doInitialization;
+  using llvm::Pass::doFinalization;
+
+  // Initialization and finalization hooks.
+  virtual bool doInitialization(Loop *L, LPPassManager &LPM) {
+    return false;
+  }
+
+  // Finalization hook does not supply Loop because at this time
+  // loop nest is completely different.
+  virtual bool doFinalization() { return false; }
+
+  // Check if this pass is suitable for the current LPPassManager, if
+  // available. This pass P is not suitable for a LPPassManager if P
+  // is not preserving higher level analysis info used by other
+  // LPPassManager passes. In such case, pop LPPassManager from the
+  // stack. This will force assignPassManager() to create new
+  // LPPassManger as expected.
+  void preparePassManager(PMStack &PMS) override;
+
+  /// Assign pass manager to manage this pass
+  void assignPassManager(PMStack &PMS, PassManagerType PMT) override;
+
+  ///  Return what kind of Pass Manager can manage this pass.
+  PassManagerType getPotentialPassManagerType() const override {
+    return PMT_LoopPassManager;
+  }
+
+  //===--------------------------------------------------------------------===//
+  /// SimpleAnalysis - Provides simple interface to update analysis info
+  /// maintained by various passes. Note, if required this interface can
+  /// be extracted into a separate abstract class but it would require
+  /// additional use of multiple inheritance in Pass class hierarchy, something
+  /// we are trying to avoid.
+
+  /// Each loop pass can override these simple analysis hooks to update
+  /// desired analysis information.
+  /// cloneBasicBlockAnalysis - Clone analysis info associated with basic block.
+  virtual void cloneBasicBlockAnalysis(BasicBlock *F, BasicBlock *T, Loop *L) {}
+
+  /// deleteAnalysisValue - Delete analysis info associated with value V.
+  virtual void deleteAnalysisValue(Value *V, Loop *L) {}
+
+  /// Delete analysis info associated with Loop L.
+  /// Called to notify a Pass that a loop has been deleted and any
+  /// associated analysis values can be deleted.
+  virtual void deleteAnalysisLoop(Loop *L) {}
+
+protected:
+  /// Optional passes call this function to check whether the pass should be
+  /// skipped. This is the case when Attribute::OptimizeNone is set or when
+  /// optimization bisect is over the limit.
+  bool skipLoop(const Loop *L) const;
+};
+
+class LPPassManager : public FunctionPass, public PMDataManager {
+public:
+  static char ID;
+  explicit LPPassManager();
+
+  /// run - Execute all of the passes scheduled for execution.  Keep track of
+  /// whether any of the passes modifies the module, and if so, return true.
+  bool runOnFunction(Function &F) override;
+
+  /// Pass Manager itself does not invalidate any analysis info.
+  // LPPassManager needs LoopInfo.
+  void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+  StringRef getPassName() const override { return "Loop Pass Manager"; }
+
+  PMDataManager *getAsPMDataManager() override { return this; }
+  Pass *getAsPass() override { return this; }
+
+  /// Print passes managed by this manager
+  void dumpPassStructure(unsigned Offset) override;
+
+  LoopPass *getContainedPass(unsigned N) {
+    assert(N < PassVector.size() && "Pass number out of range!");
+    LoopPass *LP = static_cast<LoopPass *>(PassVector[N]);
+    return LP;
+  }
+
+  PassManagerType getPassManagerType() const override {
+    return PMT_LoopPassManager;
+  }
+
+public:
+  // Add a new loop into the loop queue.
+  void addLoop(Loop &L);
+
+  // Mark \p L as deleted.
+  void markLoopAsDeleted(Loop &L);
+
+  //===--------------------------------------------------------------------===//
+  /// SimpleAnalysis - Provides simple interface to update analysis info
+  /// maintained by various passes. Note, if required this interface can
+  /// be extracted into a separate abstract class but it would require
+  /// additional use of multiple inheritance in Pass class hierarchy, something
+  /// we are trying to avoid.
+
+  /// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for
+  /// all passes that implement simple analysis interface.
+  void cloneBasicBlockSimpleAnalysis(BasicBlock *From, BasicBlock *To, Loop *L);
+
+  /// deleteSimpleAnalysisValue - Invoke deleteAnalysisValue hook for all passes
+  /// that implement simple analysis interface.
+  void deleteSimpleAnalysisValue(Value *V, Loop *L);
+
+  /// Invoke deleteAnalysisLoop hook for all passes that implement simple
+  /// analysis interface.
+  void deleteSimpleAnalysisLoop(Loop *L);
+
+private:
+  std::deque<Loop *> LQ;
+  LoopInfo *LI;
+  Loop *CurrentLoop;
+  bool CurrentLoopDeleted;
+};
+
+// This pass is required by the LCSSA transformation. It is used inside
+// LPPassManager to check if current pass preserves LCSSA form, and if it does
+// pass manager calls lcssa verification for the current loop.
+struct LCSSAVerificationPass : public FunctionPass {
+  static char ID;
+  LCSSAVerificationPass() : FunctionPass(ID) {
+    initializeLCSSAVerificationPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override { return false; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h b/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h
new file mode 100644
index 0000000..80f3e5f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h
@@ -0,0 +1,95 @@
+//===- llvm/Analysis/LoopUnrollAnalyzer.h - Loop Unroll Analyzer-*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements UnrolledInstAnalyzer class. It's used for predicting
+// potential effects that loop unrolling might have, such as enabling constant
+// propagation and other optimizations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPUNROLLANALYZER_H
+#define LLVM_ANALYSIS_LOOPUNROLLANALYZER_H
+
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/InstVisitor.h"
+
+// This class is used to get an estimate of the optimization effects that we
+// could get from complete loop unrolling. It comes from the fact that some
+// loads might be replaced with concrete constant values and that could trigger
+// a chain of instruction simplifications.
+//
+// E.g. we might have:
+//   int a[] = {0, 1, 0};
+//   v = 0;
+//   for (i = 0; i < 3; i ++)
+//     v += b[i]*a[i];
+// If we completely unroll the loop, we would get:
+//   v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2]
+// Which then will be simplified to:
+//   v = b[0]* 0 + b[1]* 1 + b[2]* 0
+// And finally:
+//   v = b[1]
+namespace llvm {
+class UnrolledInstAnalyzer : private InstVisitor<UnrolledInstAnalyzer, bool> {
+  typedef InstVisitor<UnrolledInstAnalyzer, bool> Base;
+  friend class InstVisitor<UnrolledInstAnalyzer, bool>;
+  struct SimplifiedAddress {
+    Value *Base = nullptr;
+    ConstantInt *Offset = nullptr;
+  };
+
+public:
+  UnrolledInstAnalyzer(unsigned Iteration,
+                       DenseMap<Value *, Constant *> &SimplifiedValues,
+                       ScalarEvolution &SE, const Loop *L)
+      : SimplifiedValues(SimplifiedValues), SE(SE), L(L) {
+      IterationNumber = SE.getConstant(APInt(64, Iteration));
+  }
+
+  // Allow access to the initial visit method.
+  using Base::visit;
+
+private:
+  /// \brief A cache of pointer bases and constant-folded offsets corresponding
+  /// to GEP (or derived from GEP) instructions.
+  ///
+  /// In order to find the base pointer one needs to perform non-trivial
+  /// traversal of the corresponding SCEV expression, so it's good to have the
+  /// results saved.
+  DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses;
+
+  /// \brief SCEV expression corresponding to number of currently simulated
+  /// iteration.
+  const SCEV *IterationNumber;
+
+  /// \brief A Value->Constant map for keeping values that we managed to
+  /// constant-fold on the given iteration.
+  ///
+  /// While we walk the loop instructions, we build up and maintain a mapping
+  /// of simplified values specific to this iteration.  The idea is to propagate
+  /// any special information we have about loads that can be replaced with
+  /// constants after complete unrolling, and account for likely simplifications
+  /// post-unrolling.
+  DenseMap<Value *, Constant *> &SimplifiedValues;
+
+  ScalarEvolution &SE;
+  const Loop *L;
+
+  bool simplifyInstWithSCEV(Instruction *I);
+
+  bool visitInstruction(Instruction &I) { return simplifyInstWithSCEV(&I); }
+  bool visitBinaryOperator(BinaryOperator &I);
+  bool visitLoad(LoadInst &I);
+  bool visitCastInst(CastInst &I);
+  bool visitCmpInst(CmpInst &I);
+  bool visitPHINode(PHINode &PN);
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h b/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h
new file mode 100644
index 0000000..7d53e34
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h
@@ -0,0 +1,313 @@
+//==- llvm/Analysis/MemoryBuiltins.h - Calls to memory builtins --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions identifies calls to builtin functions that allocate
+// or free memory.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
+#define LLVM_ANALYSIS_MEMORYBUILTINS_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/ValueHandle.h"
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class Argument;
+class CallInst;
+class ConstantInt;
+class ConstantPointerNull;
+class DataLayout;
+class ExtractElementInst;
+class ExtractValueInst;
+class GEPOperator;
+class GlobalAlias;
+class GlobalVariable;
+class Instruction;
+class IntegerType;
+class IntrinsicInst;
+class IntToPtrInst;
+class LLVMContext;
+class LoadInst;
+class PHINode;
+class PointerType;
+class SelectInst;
+class TargetLibraryInfo;
+class Type;
+class UndefValue;
+class Value;
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
+/// like).
+bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
+                    bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a function that returns a
+/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
+bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
+                 bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates uninitialized memory (such as malloc).
+bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+                    bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates zero-filled memory (such as calloc).
+bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+                    bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory similar to malloc or calloc.
+bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+                            bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory (either malloc, calloc, or strdup like).
+bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+                   bool LookThroughBitCast = false);
+
+//===----------------------------------------------------------------------===//
+//  malloc Call Utility Functions.
+//
+
+/// extractMallocCall - Returns the corresponding CallInst if the instruction
+/// is a malloc call.  Since CallInst::CreateMalloc() only creates calls, we
+/// ignore InvokeInst here.
+const CallInst *extractMallocCall(const Value *I, const TargetLibraryInfo *TLI);
+inline CallInst *extractMallocCall(Value *I, const TargetLibraryInfo *TLI) {
+  return const_cast<CallInst*>(extractMallocCall((const Value*)I, TLI));
+}
+
+/// getMallocType - Returns the PointerType resulting from the malloc call.
+/// The PointerType depends on the number of bitcast uses of the malloc call:
+///   0: PointerType is the malloc calls' return type.
+///   1: PointerType is the bitcast's result type.
+///  >1: Unique PointerType cannot be determined, return NULL.
+PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI);
+
+/// getMallocAllocatedType - Returns the Type allocated by malloc call.
+/// The Type depends on the number of bitcast uses of the malloc call:
+///   0: PointerType is the malloc calls' return type.
+///   1: PointerType is the bitcast's result type.
+///  >1: Unique PointerType cannot be determined, return NULL.
+Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
+
+/// getMallocArraySize - Returns the array size of a malloc call.  If the
+/// argument passed to malloc is a multiple of the size of the malloced type,
+/// then return that multiple.  For non-array mallocs, the multiple is
+/// constant 1.  Otherwise, return NULL for mallocs whose array size cannot be
+/// determined.
+Value *getMallocArraySize(CallInst *CI, const DataLayout &DL,
+                          const TargetLibraryInfo *TLI,
+                          bool LookThroughSExt = false);
+
+//===----------------------------------------------------------------------===//
+//  calloc Call Utility Functions.
+//
+
+/// extractCallocCall - Returns the corresponding CallInst if the instruction
+/// is a calloc call.
+const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI);
+inline CallInst *extractCallocCall(Value *I, const TargetLibraryInfo *TLI) {
+  return const_cast<CallInst*>(extractCallocCall((const Value*)I, TLI));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  free Call Utility Functions.
+//
+
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *isFreeCall(const Value *I, const TargetLibraryInfo *TLI);
+
+inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
+  return const_cast<CallInst*>(isFreeCall((const Value*)I, TLI));
+}
+
+//===----------------------------------------------------------------------===//
+//  Utility functions to compute size of objects.
+//
+
+/// Various options to control the behavior of getObjectSize.
+struct ObjectSizeOpts {
+  /// Controls how we handle conditional statements with unknown conditions.
+  enum class Mode : uint8_t {
+    /// Fail to evaluate an unknown condition.
+    Exact,
+    /// Evaluate all branches of an unknown condition. If all evaluations
+    /// succeed, pick the minimum size.
+    Min,
+    /// Same as Min, except we pick the maximum size of all of the branches.
+    Max
+  };
+
+  /// How we want to evaluate this object's size.
+  Mode EvalMode = Mode::Exact;
+  /// Whether to round the result up to the alignment of allocas, byval
+  /// arguments, and global variables.
+  bool RoundToAlign = false;
+  /// If this is true, null pointers in address space 0 will be treated as
+  /// though they can't be evaluated. Otherwise, null is always considered to
+  /// point to a 0 byte region of memory.
+  bool NullIsUnknownSize = false;
+};
+
+/// \brief Compute the size of the object pointed by Ptr. Returns true and the
+/// object size in Size if successful, and false otherwise. In this context, by
+/// object we mean the region of memory starting at Ptr to the end of the
+/// underlying object pointed to by Ptr.
+bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
+                   const TargetLibraryInfo *TLI, ObjectSizeOpts Opts = {});
+
+/// Try to turn a call to @llvm.objectsize into an integer value of the given
+/// Type. Returns null on failure.
+/// If MustSucceed is true, this function will not return null, and may return
+/// conservative values governed by the second argument of the call to
+/// objectsize.
+ConstantInt *lowerObjectSizeCall(IntrinsicInst *ObjectSize,
+                                 const DataLayout &DL,
+                                 const TargetLibraryInfo *TLI,
+                                 bool MustSucceed);
+
+using SizeOffsetType = std::pair<APInt, APInt>;
+
+/// \brief Evaluate the size and offset of an object pointed to by a Value*
+/// statically. Fails if size or offset are not known at compile time.
+class ObjectSizeOffsetVisitor
+  : public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI;
+  ObjectSizeOpts Options;
+  unsigned IntTyBits;
+  APInt Zero;
+  SmallPtrSet<Instruction *, 8> SeenInsts;
+
+  APInt align(APInt Size, uint64_t Align);
+
+  SizeOffsetType unknown() {
+    return std::make_pair(APInt(), APInt());
+  }
+
+public:
+  ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
+                          LLVMContext &Context, ObjectSizeOpts Options = {});
+
+  SizeOffsetType compute(Value *V);
+
+  static bool knownSize(const SizeOffsetType &SizeOffset) {
+    return SizeOffset.first.getBitWidth() > 1;
+  }
+
+  static bool knownOffset(const SizeOffsetType &SizeOffset) {
+    return SizeOffset.second.getBitWidth() > 1;
+  }
+
+  static bool bothKnown(const SizeOffsetType &SizeOffset) {
+    return knownSize(SizeOffset) && knownOffset(SizeOffset);
+  }
+
+  // These are "private", except they can't actually be made private. Only
+  // compute() should be used by external users.
+  SizeOffsetType visitAllocaInst(AllocaInst &I);
+  SizeOffsetType visitArgument(Argument &A);
+  SizeOffsetType visitCallSite(CallSite CS);
+  SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
+  SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
+  SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
+  SizeOffsetType visitGEPOperator(GEPOperator &GEP);
+  SizeOffsetType visitGlobalAlias(GlobalAlias &GA);
+  SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
+  SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
+  SizeOffsetType visitLoadInst(LoadInst &I);
+  SizeOffsetType visitPHINode(PHINode&);
+  SizeOffsetType visitSelectInst(SelectInst &I);
+  SizeOffsetType visitUndefValue(UndefValue&);
+  SizeOffsetType visitInstruction(Instruction &I);
+
+private:
+  bool CheckedZextOrTrunc(APInt &I);
+};
+
+using SizeOffsetEvalType = std::pair<Value *, Value *>;
+
+/// \brief Evaluate the size and offset of an object pointed to by a Value*.
+/// May create code to compute the result at run-time.
+class ObjectSizeOffsetEvaluator
+  : public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
+  using BuilderTy = IRBuilder<TargetFolder>;
+  using WeakEvalType = std::pair<WeakTrackingVH, WeakTrackingVH>;
+  using CacheMapTy = DenseMap<const Value *, WeakEvalType>;
+  using PtrSetTy = SmallPtrSet<const Value *, 8>;
+
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI;
+  LLVMContext &Context;
+  BuilderTy Builder;
+  IntegerType *IntTy;
+  Value *Zero;
+  CacheMapTy CacheMap;
+  PtrSetTy SeenVals;
+  bool RoundToAlign;
+
+  SizeOffsetEvalType unknown() {
+    return std::make_pair(nullptr, nullptr);
+  }
+
+  SizeOffsetEvalType compute_(Value *V);
+
+public:
+  ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
+                            LLVMContext &Context, bool RoundToAlign = false);
+
+  SizeOffsetEvalType compute(Value *V);
+
+  bool knownSize(SizeOffsetEvalType SizeOffset) {
+    return SizeOffset.first;
+  }
+
+  bool knownOffset(SizeOffsetEvalType SizeOffset) {
+    return SizeOffset.second;
+  }
+
+  bool anyKnown(SizeOffsetEvalType SizeOffset) {
+    return knownSize(SizeOffset) || knownOffset(SizeOffset);
+  }
+
+  bool bothKnown(SizeOffsetEvalType SizeOffset) {
+    return knownSize(SizeOffset) && knownOffset(SizeOffset);
+  }
+
+  // The individual instruction visitors should be treated as private.
+  SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
+  SizeOffsetEvalType visitCallSite(CallSite CS);
+  SizeOffsetEvalType visitExtractElementInst(ExtractElementInst &I);
+  SizeOffsetEvalType visitExtractValueInst(ExtractValueInst &I);
+  SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
+  SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
+  SizeOffsetEvalType visitLoadInst(LoadInst &I);
+  SizeOffsetEvalType visitPHINode(PHINode &PHI);
+  SizeOffsetEvalType visitSelectInst(SelectInst &I);
+  SizeOffsetEvalType visitInstruction(Instruction &I);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_MEMORYBUILTINS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h b/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h
new file mode 100644
index 0000000..c297452
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -0,0 +1,540 @@
+//===- llvm/Analysis/MemoryDependenceAnalysis.h - Memory Deps ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MemoryDependenceAnalysis analysis pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
+#define LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerEmbeddedInt.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerSumType.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/PredIteratorCache.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AssumptionCache;
+class CallSite;
+class DominatorTree;
+class Function;
+class Instruction;
+class LoadInst;
+class PHITransAddr;
+class TargetLibraryInfo;
+class Value;
+
+/// A memory dependence query can return one of three different answers.
+class MemDepResult {
+  enum DepType {
+    /// Clients of MemDep never see this.
+    ///
+    /// Entries with this marker occur in a LocalDeps map or NonLocalDeps map
+    /// when the instruction they previously referenced was removed from
+    /// MemDep.  In either case, the entry may include an instruction pointer.
+    /// If so, the pointer is an instruction in the block where scanning can
+    /// start from, saving some work.
+    ///
+    /// In a default-constructed MemDepResult object, the type will be Invalid
+    /// and the instruction pointer will be null.
+    Invalid = 0,
+
+    /// This is a dependence on the specified instruction which clobbers the
+    /// desired value.  The pointer member of the MemDepResult pair holds the
+    /// instruction that clobbers the memory.  For example, this occurs when we
+    /// see a may-aliased store to the memory location we care about.
+    ///
+    /// There are several cases that may be interesting here:
+    ///   1. Loads are clobbered by may-alias stores.
+    ///   2. Loads are considered clobbered by partially-aliased loads.  The
+    ///      client may choose to analyze deeper into these cases.
+    Clobber,
+
+    /// This is a dependence on the specified instruction which defines or
+    /// produces the desired memory location.  The pointer member of the
+    /// MemDepResult pair holds the instruction that defines the memory.
+    ///
+    /// Cases of interest:
+    ///   1. This could be a load or store for dependence queries on
+    ///      load/store.  The value loaded or stored is the produced value.
+    ///      Note that the pointer operand may be different than that of the
+    ///      queried pointer due to must aliases and phi translation. Note
+    ///      that the def may not be the same type as the query, the pointers
+    ///      may just be must aliases.
+    ///   2. For loads and stores, this could be an allocation instruction. In
+    ///      this case, the load is loading an undef value or a store is the
+    ///      first store to (that part of) the allocation.
+    ///   3. Dependence queries on calls return Def only when they are readonly
+    ///      calls or memory use intrinsics with identical callees and no
+    ///      intervening clobbers.  No validation is done that the operands to
+    ///      the calls are the same.
+    Def,
+
+    /// This marker indicates that the query has no known dependency in the
+    /// specified block.
+    ///
+    /// More detailed state info is encoded in the upper part of the pair (i.e.
+    /// the Instruction*)
+    Other
+  };
+
+  /// If DepType is "Other", the upper part of the sum type is an encoding of
+  /// the following more detailed type information.
+  enum OtherType {
+    /// This marker indicates that the query has no dependency in the specified
+    /// block.
+    ///
+    /// To find out more, the client should query other predecessor blocks.
+    NonLocal = 1,
+    /// This marker indicates that the query has no dependency in the specified
+    /// function.
+    NonFuncLocal,
+    /// This marker indicates that the query dependency is unknown.
+    Unknown
+  };
+
+  using ValueTy = PointerSumType<
+      DepType, PointerSumTypeMember<Invalid, Instruction *>,
+      PointerSumTypeMember<Clobber, Instruction *>,
+      PointerSumTypeMember<Def, Instruction *>,
+      PointerSumTypeMember<Other, PointerEmbeddedInt<OtherType, 3>>>;
+  ValueTy Value;
+
+  explicit MemDepResult(ValueTy V) : Value(V) {}
+
+public:
+  MemDepResult() = default;
+
+  /// get methods: These are static ctor methods for creating various
+  /// MemDepResult kinds.
+  static MemDepResult getDef(Instruction *Inst) {
+    assert(Inst && "Def requires inst");
+    return MemDepResult(ValueTy::create<Def>(Inst));
+  }
+  static MemDepResult getClobber(Instruction *Inst) {
+    assert(Inst && "Clobber requires inst");
+    return MemDepResult(ValueTy::create<Clobber>(Inst));
+  }
+  static MemDepResult getNonLocal() {
+    return MemDepResult(ValueTy::create<Other>(NonLocal));
+  }
+  static MemDepResult getNonFuncLocal() {
+    return MemDepResult(ValueTy::create<Other>(NonFuncLocal));
+  }
+  static MemDepResult getUnknown() {
+    return MemDepResult(ValueTy::create<Other>(Unknown));
+  }
+
+  /// Tests if this MemDepResult represents a query that is an instruction
+  /// clobber dependency.
+  bool isClobber() const { return Value.is<Clobber>(); }
+
+  /// Tests if this MemDepResult represents a query that is an instruction
+  /// definition dependency.
+  bool isDef() const { return Value.is<Def>(); }
+
+  /// Tests if this MemDepResult represents a query that is transparent to the
+  /// start of the block, but where a non-local hasn't been done.
+  bool isNonLocal() const {
+    return Value.is<Other>() && Value.cast<Other>() == NonLocal;
+  }
+
+  /// Tests if this MemDepResult represents a query that is transparent to the
+  /// start of the function.
+  bool isNonFuncLocal() const {
+    return Value.is<Other>() && Value.cast<Other>() == NonFuncLocal;
+  }
+
+  /// Tests if this MemDepResult represents a query which cannot and/or will
+  /// not be computed.
+  bool isUnknown() const {
+    return Value.is<Other>() && Value.cast<Other>() == Unknown;
+  }
+
+  /// If this is a normal dependency, returns the instruction that is depended
+  /// on.  Otherwise, returns null.
+  Instruction *getInst() const {
+    switch (Value.getTag()) {
+    case Invalid:
+      return Value.cast<Invalid>();
+    case Clobber:
+      return Value.cast<Clobber>();
+    case Def:
+      return Value.cast<Def>();
+    case Other:
+      return nullptr;
+    }
+    llvm_unreachable("Unknown discriminant!");
+  }
+
+  bool operator==(const MemDepResult &M) const { return Value == M.Value; }
+  bool operator!=(const MemDepResult &M) const { return Value != M.Value; }
+  bool operator<(const MemDepResult &M) const { return Value < M.Value; }
+  bool operator>(const MemDepResult &M) const { return Value > M.Value; }
+
+private:
+  friend class MemoryDependenceResults;
+
+  /// Tests if this is a MemDepResult in its dirty/invalid. state.
+  bool isDirty() const { return Value.is<Invalid>(); }
+
+  static MemDepResult getDirty(Instruction *Inst) {
+    return MemDepResult(ValueTy::create<Invalid>(Inst));
+  }
+};
+
+/// This is an entry in the NonLocalDepInfo cache.
+///
+/// For each BasicBlock (the BB entry) it keeps a MemDepResult.
+class NonLocalDepEntry {
+  BasicBlock *BB;
+  MemDepResult Result;
+
+public:
+  NonLocalDepEntry(BasicBlock *bb, MemDepResult result)
+      : BB(bb), Result(result) {}
+
+  // This is used for searches.
+  NonLocalDepEntry(BasicBlock *bb) : BB(bb) {}
+
+  // BB is the sort key, it can't be changed.
+  BasicBlock *getBB() const { return BB; }
+
+  void setResult(const MemDepResult &R) { Result = R; }
+
+  const MemDepResult &getResult() const { return Result; }
+
+  bool operator<(const NonLocalDepEntry &RHS) const { return BB < RHS.BB; }
+};
+
+/// This is a result from a NonLocal dependence query.
+///
+/// For each BasicBlock (the BB entry) it keeps a MemDepResult and the
+/// (potentially phi translated) address that was live in the block.
+class NonLocalDepResult {
+  NonLocalDepEntry Entry;
+  Value *Address;
+
+public:
+  NonLocalDepResult(BasicBlock *bb, MemDepResult result, Value *address)
+      : Entry(bb, result), Address(address) {}
+
+  // BB is the sort key, it can't be changed.
+  BasicBlock *getBB() const { return Entry.getBB(); }
+
+  void setResult(const MemDepResult &R, Value *Addr) {
+    Entry.setResult(R);
+    Address = Addr;
+  }
+
+  const MemDepResult &getResult() const { return Entry.getResult(); }
+
+  /// Returns the address of this pointer in this block.
+  ///
+  /// This can be different than the address queried for the non-local result
+  /// because of phi translation.  This returns null if the address was not
+  /// available in a block (i.e. because phi translation failed) or if this is
+  /// a cached result and that address was deleted.
+  ///
+  /// The address is always null for a non-local 'call' dependence.
+  Value *getAddress() const { return Address; }
+};
+
+/// Provides a lazy, caching interface for making common memory aliasing
+/// information queries, backed by LLVM's alias analysis passes.
+///
+/// The dependency information returned is somewhat unusual, but is pragmatic.
+/// If queried about a store or call that might modify memory, the analysis
+/// will return the instruction[s] that may either load from that memory or
+/// store to it.  If queried with a load or call that can never modify memory,
+/// the analysis will return calls and stores that might modify the pointer,
+/// but generally does not return loads unless a) they are volatile, or
+/// b) they load from *must-aliased* pointers.  Returning a dependence on
+/// must-alias'd pointers instead of all pointers interacts well with the
+/// internal caching mechanism.
+class MemoryDependenceResults {
+  // A map from instructions to their dependency.
+  using LocalDepMapType = DenseMap<Instruction *, MemDepResult>;
+  LocalDepMapType LocalDeps;
+
+public:
+  using NonLocalDepInfo = std::vector<NonLocalDepEntry>;
+
+private:
+  /// A pair<Value*, bool> where the bool is true if the dependence is a read
+  /// only dependence, false if read/write.
+  using ValueIsLoadPair = PointerIntPair<const Value *, 1, bool>;
+
+  /// This pair is used when caching information for a block.
+  ///
+  /// If the pointer is null, the cache value is not a full query that starts
+  /// at the specified block.  If non-null, the bool indicates whether or not
+  /// the contents of the block was skipped.
+  using BBSkipFirstBlockPair = PointerIntPair<BasicBlock *, 1, bool>;
+
+  /// This record is the information kept for each (value, is load) pair.
+  struct NonLocalPointerInfo {
+    /// The pair of the block and the skip-first-block flag.
+    BBSkipFirstBlockPair Pair;
+    /// The results of the query for each relevant block.
+    NonLocalDepInfo NonLocalDeps;
+    /// The maximum size of the dereferences of the pointer.
+    ///
+    /// May be UnknownSize if the sizes are unknown.
+    uint64_t Size = MemoryLocation::UnknownSize;
+    /// The AA tags associated with dereferences of the pointer.
+    ///
+    /// The members may be null if there are no tags or conflicting tags.
+    AAMDNodes AATags;
+
+    NonLocalPointerInfo() = default;
+  };
+
+  /// Cache storing single nonlocal def for the instruction.
+  /// It is set when nonlocal def would be found in function returning only
+  /// local dependencies.
+  DenseMap<Instruction *, NonLocalDepResult> NonLocalDefsCache;
+
+  /// This map stores the cached results of doing a pointer lookup at the
+  /// bottom of a block.
+  ///
+  /// The key of this map is the pointer+isload bit, the value is a list of
+  /// <bb->result> mappings.
+  using CachedNonLocalPointerInfo =
+      DenseMap<ValueIsLoadPair, NonLocalPointerInfo>;
+  CachedNonLocalPointerInfo NonLocalPointerDeps;
+
+  // A map from instructions to their non-local pointer dependencies.
+  using ReverseNonLocalPtrDepTy =
+      DenseMap<Instruction *, SmallPtrSet<ValueIsLoadPair, 4>>;
+  ReverseNonLocalPtrDepTy ReverseNonLocalPtrDeps;
+
+  /// This is the instruction we keep for each cached access that we have for
+  /// an instruction.
+  ///
+  /// The pointer is an owning pointer and the bool indicates whether we have
+  /// any dirty bits in the set.
+  using PerInstNLInfo = std::pair<NonLocalDepInfo, bool>;
+
+  // A map from instructions to their non-local dependencies.
+  using NonLocalDepMapType = DenseMap<Instruction *, PerInstNLInfo>;
+
+  NonLocalDepMapType NonLocalDeps;
+
+  // A reverse mapping from dependencies to the dependees.  This is
+  // used when removing instructions to keep the cache coherent.
+  using ReverseDepMapType =
+      DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>>;
+  ReverseDepMapType ReverseLocalDeps;
+
+  // A reverse mapping from dependencies to the non-local dependees.
+  ReverseDepMapType ReverseNonLocalDeps;
+
+  /// Current AA implementation, just a cache.
+  AliasAnalysis &AA;
+  AssumptionCache &AC;
+  const TargetLibraryInfo &TLI;
+  DominatorTree &DT;
+  PredIteratorCache PredCache;
+
+public:
+  MemoryDependenceResults(AliasAnalysis &AA, AssumptionCache &AC,
+                          const TargetLibraryInfo &TLI,
+                          DominatorTree &DT)
+      : AA(AA), AC(AC), TLI(TLI), DT(DT) {}
+
+  /// Handle invalidation in the new PM.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+
+  /// Some methods limit the number of instructions they will examine.
+  /// The return value of this method is the default limit that will be
+  /// used if no limit is explicitly passed in.
+  unsigned getDefaultBlockScanLimit() const;
+
+  /// Returns the instruction on which a memory operation depends.
+  ///
+  /// See the class comment for more details. It is illegal to call this on
+  /// non-memory instructions.
+  MemDepResult getDependency(Instruction *QueryInst);
+
+  /// Perform a full dependency query for the specified call, returning the set
+  /// of blocks that the value is potentially live across.
+  ///
+  /// The returned set of results will include a "NonLocal" result for all
+  /// blocks where the value is live across.
+  ///
+  /// This method assumes the instruction returns a "NonLocal" dependency
+  /// within its own block.
+  ///
+  /// This returns a reference to an internal data structure that may be
+  /// invalidated on the next non-local query or when an instruction is
+  /// removed.  Clients must copy this data if they want it around longer than
+  /// that.
+  const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS);
+
+  /// Perform a full dependency query for an access to the QueryInst's
+  /// specified memory location, returning the set of instructions that either
+  /// define or clobber the value.
+  ///
+  /// Warning: For a volatile query instruction, the dependencies will be
+  /// accurate, and thus usable for reordering, but it is never legal to
+  /// remove the query instruction.
+  ///
+  /// This method assumes the pointer has a "NonLocal" dependency within
+  /// QueryInst's parent basic block.
+  void getNonLocalPointerDependency(Instruction *QueryInst,
+                                    SmallVectorImpl<NonLocalDepResult> &Result);
+
+  /// Removes an instruction from the dependence analysis, updating the
+  /// dependence of instructions that previously depended on it.
+  void removeInstruction(Instruction *InstToRemove);
+
+  /// Invalidates cached information about the specified pointer, because it
+  /// may be too conservative in memdep.
+  ///
+  /// This is an optional call that can be used when the client detects an
+  /// equivalence between the pointer and some other value and replaces the
+  /// other value with ptr. This can make Ptr available in more places that
+  /// cached info does not necessarily keep.
+  void invalidateCachedPointerInfo(Value *Ptr);
+
+  /// Clears the PredIteratorCache info.
+  ///
+  /// This needs to be done when the CFG changes, e.g., due to splitting
+  /// critical edges.
+  void invalidateCachedPredecessors();
+
+  /// Returns the instruction on which a memory location depends.
+  ///
+  /// If isLoad is true, this routine ignores may-aliases with read-only
+  /// operations.  If isLoad is false, this routine ignores may-aliases
+  /// with reads from read-only locations. If possible, pass the query
+  /// instruction as well; this function may take advantage of the metadata
+  /// annotated to the query instruction to refine the result. \p Limit
+  /// can be used to set the maximum number of instructions that will be
+  /// examined to find the pointer dependency. On return, it will be set to
+  /// the number of instructions left to examine. If a null pointer is passed
+  /// in, the limit will default to the value of -memdep-block-scan-limit.
+  ///
+  /// Note that this is an uncached query, and thus may be inefficient.
+  MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad,
+                                        BasicBlock::iterator ScanIt,
+                                        BasicBlock *BB,
+                                        Instruction *QueryInst = nullptr,
+                                        unsigned *Limit = nullptr);
+
+  MemDepResult getSimplePointerDependencyFrom(const MemoryLocation &MemLoc,
+                                              bool isLoad,
+                                              BasicBlock::iterator ScanIt,
+                                              BasicBlock *BB,
+                                              Instruction *QueryInst,
+                                              unsigned *Limit = nullptr);
+
+  /// This analysis looks for other loads and stores with invariant.group
+  /// metadata and the same pointer operand. Returns Unknown if it does not
+  /// find anything, and Def if it can be assumed that 2 instructions load or
+  /// store the same value and NonLocal which indicate that non-local Def was
+  /// found, which can be retrieved by calling getNonLocalPointerDependency
+  /// with the same queried instruction.
+  MemDepResult getInvariantGroupPointerDependency(LoadInst *LI, BasicBlock *BB);
+
+  /// Looks at a memory location for a load (specified by MemLocBase, Offs, and
+  /// Size) and compares it against a load.
+  ///
+  /// If the specified load could be safely widened to a larger integer load
+  /// that is 1) still efficient, 2) safe for the target, and 3) would provide
+  /// the specified memory location value, then this function returns the size
+  /// in bytes of the load width to use.  If not, this returns zero.
+  static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
+                                                  int64_t MemLocOffs,
+                                                  unsigned MemLocSize,
+                                                  const LoadInst *LI);
+
+  /// Release memory in caches.
+  void releaseMemory();
+
+private:
+  MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
+                                         BasicBlock::iterator ScanIt,
+                                         BasicBlock *BB);
+  bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
+                                   const PHITransAddr &Pointer,
+                                   const MemoryLocation &Loc, bool isLoad,
+                                   BasicBlock *BB,
+                                   SmallVectorImpl<NonLocalDepResult> &Result,
+                                   DenseMap<BasicBlock *, Value *> &Visited,
+                                   bool SkipFirstBlock = false);
+  MemDepResult GetNonLocalInfoForBlock(Instruction *QueryInst,
+                                       const MemoryLocation &Loc, bool isLoad,
+                                       BasicBlock *BB, NonLocalDepInfo *Cache,
+                                       unsigned NumSortedEntries);
+
+  void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P);
+
+  void verifyRemoved(Instruction *Inst) const;
+};
+
+/// An analysis that produces \c MemoryDependenceResults for a function.
+///
+/// This is essentially a no-op because the results are computed entirely
+/// lazily.
+class MemoryDependenceAnalysis
+    : public AnalysisInfoMixin<MemoryDependenceAnalysis> {
+  friend AnalysisInfoMixin<MemoryDependenceAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = MemoryDependenceResults;
+
+  MemoryDependenceResults run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// A wrapper analysis pass for the legacy pass manager that exposes a \c
+/// MemoryDepnedenceResults instance.
+class MemoryDependenceWrapperPass : public FunctionPass {
+  Optional<MemoryDependenceResults> MemDep;
+
+public:
+  static char ID;
+
+  MemoryDependenceWrapperPass();
+  ~MemoryDependenceWrapperPass() override;
+
+  /// Pass Implementation stuff.  This doesn't do any analysis eagerly.
+  bool runOnFunction(Function &) override;
+
+  /// Clean up memory in between runs
+  void releaseMemory() override;
+
+  /// Does not modify anything.  It uses Value Numbering and Alias Analysis.
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MemoryDependenceResults &getMemDep() { return *MemDep; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h b/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h
new file mode 100644
index 0000000..c108074
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h
@@ -0,0 +1,149 @@
+//===- MemoryLocation.h - Memory location descriptions ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides utility analysis objects describing memory locations.
+/// These are used both by the Alias Analysis infrastructure and more
+/// specialized memory analysis layers.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
+#define LLVM_ANALYSIS_MEMORYLOCATION_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Metadata.h"
+
+namespace llvm {
+
+class LoadInst;
+class StoreInst;
+class MemTransferInst;
+class MemIntrinsic;
+class TargetLibraryInfo;
+
+/// Representation for a specific memory location.
+///
+/// This abstraction can be used to represent a specific location in memory.
+/// The goal of the location is to represent enough information to describe
+/// abstract aliasing, modification, and reference behaviors of whatever
+/// value(s) are stored in memory at the particular location.
+///
+/// The primary user of this interface is LLVM's Alias Analysis, but other
+/// memory analyses such as MemoryDependence can use it as well.
+class MemoryLocation {
+public:
+  /// UnknownSize - This is a special value which can be used with the
+  /// size arguments in alias queries to indicate that the caller does not
+  /// know the sizes of the potential memory references.
+  enum : uint64_t { UnknownSize = ~UINT64_C(0) };
+
+  /// The address of the start of the location.
+  const Value *Ptr;
+
+  /// The maximum size of the location, in address-units, or
+  /// UnknownSize if the size is not known.
+  ///
+  /// Note that an unknown size does not mean the pointer aliases the entire
+  /// virtual address space, because there are restrictions on stepping out of
+  /// one object and into another. See
+  /// http://llvm.org/docs/LangRef.html#pointeraliasing
+  uint64_t Size;
+
+  /// The metadata nodes which describes the aliasing of the location (each
+  /// member is null if that kind of information is unavailable).
+  AAMDNodes AATags;
+
+  /// Return a location with information about the memory reference by the given
+  /// instruction.
+  static MemoryLocation get(const LoadInst *LI);
+  static MemoryLocation get(const StoreInst *SI);
+  static MemoryLocation get(const VAArgInst *VI);
+  static MemoryLocation get(const AtomicCmpXchgInst *CXI);
+  static MemoryLocation get(const AtomicRMWInst *RMWI);
+  static MemoryLocation get(const Instruction *Inst) {
+    return *MemoryLocation::getOrNone(Inst);
+  }
+  static Optional<MemoryLocation> getOrNone(const Instruction *Inst) {
+    switch (Inst->getOpcode()) {
+    case Instruction::Load:
+      return get(cast<LoadInst>(Inst));
+    case Instruction::Store:
+      return get(cast<StoreInst>(Inst));
+    case Instruction::VAArg:
+      return get(cast<VAArgInst>(Inst));
+    case Instruction::AtomicCmpXchg:
+      return get(cast<AtomicCmpXchgInst>(Inst));
+    case Instruction::AtomicRMW:
+      return get(cast<AtomicRMWInst>(Inst));
+    default:
+      return None;
+    }
+  }
+
+  /// Return a location representing the source of a memory transfer.
+  static MemoryLocation getForSource(const MemTransferInst *MTI);
+
+  /// Return a location representing the destination of a memory set or
+  /// transfer.
+  static MemoryLocation getForDest(const MemIntrinsic *MI);
+
+  /// Return a location representing a particular argument of a call.
+  static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
+                                       const TargetLibraryInfo &TLI);
+
+  explicit MemoryLocation(const Value *Ptr = nullptr,
+                          uint64_t Size = UnknownSize,
+                          const AAMDNodes &AATags = AAMDNodes())
+      : Ptr(Ptr), Size(Size), AATags(AATags) {}
+
+  MemoryLocation getWithNewPtr(const Value *NewPtr) const {
+    MemoryLocation Copy(*this);
+    Copy.Ptr = NewPtr;
+    return Copy;
+  }
+
+  MemoryLocation getWithNewSize(uint64_t NewSize) const {
+    MemoryLocation Copy(*this);
+    Copy.Size = NewSize;
+    return Copy;
+  }
+
+  MemoryLocation getWithoutAATags() const {
+    MemoryLocation Copy(*this);
+    Copy.AATags = AAMDNodes();
+    return Copy;
+  }
+
+  bool operator==(const MemoryLocation &Other) const {
+    return Ptr == Other.Ptr && Size == Other.Size && AATags == Other.AATags;
+  }
+};
+
+// Specialize DenseMapInfo for MemoryLocation.
+template <> struct DenseMapInfo<MemoryLocation> {
+  static inline MemoryLocation getEmptyKey() {
+    return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), 0);
+  }
+  static inline MemoryLocation getTombstoneKey() {
+    return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), 0);
+  }
+  static unsigned getHashValue(const MemoryLocation &Val) {
+    return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
+           DenseMapInfo<uint64_t>::getHashValue(Val.Size) ^
+           DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags);
+  }
+  static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) {
+    return LHS == RHS;
+  }
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/MemorySSA.h b/linux-x64/clang/include/llvm/Analysis/MemorySSA.h
new file mode 100644
index 0000000..2899890
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/MemorySSA.h
@@ -0,0 +1,1230 @@
+//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief This file exposes an interface to building/using memory SSA to
+/// walk memory instructions using a use/def graph.
+///
+/// Memory SSA class builds an SSA form that links together memory access
+/// instructions such as loads, stores, atomics, and calls. Additionally, it
+/// does a trivial form of "heap versioning" Every time the memory state changes
+/// in the program, we generate a new heap version. It generates
+/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
+///
+/// As a trivial example,
+/// define i32 @main() #0 {
+/// entry:
+///   %call = call noalias i8* @_Znwm(i64 4) #2
+///   %0 = bitcast i8* %call to i32*
+///   %call1 = call noalias i8* @_Znwm(i64 4) #2
+///   %1 = bitcast i8* %call1 to i32*
+///   store i32 5, i32* %0, align 4
+///   store i32 7, i32* %1, align 4
+///   %2 = load i32* %0, align 4
+///   %3 = load i32* %1, align 4
+///   %add = add nsw i32 %2, %3
+///   ret i32 %add
+/// }
+///
+/// Will become
+/// define i32 @main() #0 {
+/// entry:
+///   ; 1 = MemoryDef(0)
+///   %call = call noalias i8* @_Znwm(i64 4) #3
+///   %2 = bitcast i8* %call to i32*
+///   ; 2 = MemoryDef(1)
+///   %call1 = call noalias i8* @_Znwm(i64 4) #3
+///   %4 = bitcast i8* %call1 to i32*
+///   ; 3 = MemoryDef(2)
+///   store i32 5, i32* %2, align 4
+///   ; 4 = MemoryDef(3)
+///   store i32 7, i32* %4, align 4
+///   ; MemoryUse(3)
+///   %7 = load i32* %2, align 4
+///   ; MemoryUse(4)
+///   %8 = load i32* %4, align 4
+///   %add = add nsw i32 %7, %8
+///   ret i32 %add
+/// }
+///
+/// Given this form, all the stores that could ever effect the load at %8 can be
+/// gotten by using the MemoryUse associated with it, and walking from use to
+/// def until you hit the top of the function.
+///
+/// Each def also has a list of users associated with it, so you can walk from
+/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
+/// but not the RHS of MemoryDefs. You can see this above at %7, which would
+/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
+/// store, all the MemoryUses on its use lists are may-aliases of that store
+/// (but the MemoryDefs on its use list may not be).
+///
+/// MemoryDefs are not disambiguated because it would require multiple reaching
+/// definitions, which would require multiple phis, and multiple memoryaccesses
+/// per instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYSSA_H
+#define LLVM_ANALYSIS_MEMORYSSA_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DerivedUser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class Function;
+class Instruction;
+class MemoryAccess;
+class MemorySSAWalker;
+class LLVMContext;
+class raw_ostream;
+
+namespace MSSAHelpers {
+
+struct AllAccessTag {};
+struct DefsOnlyTag {};
+
+} // end namespace MSSAHelpers
+
+enum : unsigned {
+  // Used to signify what the default invalid ID is for MemoryAccess's
+  // getID()
+  INVALID_MEMORYACCESS_ID = -1U
+};
+
+template <class T> class memoryaccess_def_iterator_base;
+using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
+using const_memoryaccess_def_iterator =
+    memoryaccess_def_iterator_base<const MemoryAccess>;
+
+// \brief The base for all memory accesses. All memory accesses in a block are
+// linked together using an intrusive list.
+class MemoryAccess
+    : public DerivedUser,
+      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
+      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
+public:
+  using AllAccessType =
+      ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
+  using DefsOnlyType =
+      ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
+
+  MemoryAccess(const MemoryAccess &) = delete;
+  MemoryAccess &operator=(const MemoryAccess &) = delete;
+
+  void *operator new(size_t) = delete;
+
+  // Methods for support type inquiry through isa, cast, and
+  // dyn_cast
+  static bool classof(const Value *V) {
+    unsigned ID = V->getValueID();
+    return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
+  }
+
+  BasicBlock *getBlock() const { return Block; }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+  /// \brief The user iterators for a memory access
+  using iterator = user_iterator;
+  using const_iterator = const_user_iterator;
+
+  /// \brief This iterator walks over all of the defs in a given
+  /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
+  /// MemoryUse/MemoryDef, this walks the defining access.
+  memoryaccess_def_iterator defs_begin();
+  const_memoryaccess_def_iterator defs_begin() const;
+  memoryaccess_def_iterator defs_end();
+  const_memoryaccess_def_iterator defs_end() const;
+
+  /// \brief Get the iterators for the all access list and the defs only list
+  /// We default to the all access list.
+  AllAccessType::self_iterator getIterator() {
+    return this->AllAccessType::getIterator();
+  }
+  AllAccessType::const_self_iterator getIterator() const {
+    return this->AllAccessType::getIterator();
+  }
+  AllAccessType::reverse_self_iterator getReverseIterator() {
+    return this->AllAccessType::getReverseIterator();
+  }
+  AllAccessType::const_reverse_self_iterator getReverseIterator() const {
+    return this->AllAccessType::getReverseIterator();
+  }
+  DefsOnlyType::self_iterator getDefsIterator() {
+    return this->DefsOnlyType::getIterator();
+  }
+  DefsOnlyType::const_self_iterator getDefsIterator() const {
+    return this->DefsOnlyType::getIterator();
+  }
+  DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
+    return this->DefsOnlyType::getReverseIterator();
+  }
+  DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
+    return this->DefsOnlyType::getReverseIterator();
+  }
+
+protected:
+  friend class MemoryDef;
+  friend class MemoryPhi;
+  friend class MemorySSA;
+  friend class MemoryUse;
+  friend class MemoryUseOrDef;
+
+  /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
+  /// moved.
+  void setBlock(BasicBlock *BB) { Block = BB; }
+
+  /// \brief Used for debugging and tracking things about MemoryAccesses.
+  /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
+  inline unsigned getID() const;
+
+  MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
+               BasicBlock *BB, unsigned NumOperands)
+      : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
+        Block(BB) {}
+
+  // Use deleteValue() to delete a generic MemoryAccess.
+  ~MemoryAccess() = default;
+
+private:
+  BasicBlock *Block;
+};
+
+template <>
+struct ilist_alloc_traits<MemoryAccess> {
+  static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
+  MA.print(OS);
+  return OS;
+}
+
+/// \brief Class that has the common methods + fields of memory uses/defs. It's
+/// a little awkward to have, but there are many cases where we want either a
+/// use or def, and there are many cases where uses are needed (defs aren't
+/// acceptable), and vice-versa.
+///
+/// This class should never be instantiated directly; make a MemoryUse or
+/// MemoryDef instead.
+class MemoryUseOrDef : public MemoryAccess {
+public:
+  void *operator new(size_t) = delete;
+
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
+
+  /// \brief Get the instruction that this MemoryUse represents.
+  Instruction *getMemoryInst() const { return MemoryInstruction; }
+
+  /// \brief Get the access that produces the memory state used by this Use.
+  MemoryAccess *getDefiningAccess() const { return getOperand(0); }
+
+  static bool classof(const Value *MA) {
+    return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
+  }
+
+  // Sadly, these have to be public because they are needed in some of the
+  // iterators.
+  inline bool isOptimized() const;
+  inline MemoryAccess *getOptimized() const;
+  inline void setOptimized(MemoryAccess *);
+
+  // Retrieve AliasResult type of the optimized access. Ideally this would be
+  // returned by the caching walker and may go away in the future.
+  Optional<AliasResult> getOptimizedAccessType() const {
+    return OptimizedAccessAlias;
+  }
+
+  /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
+  /// be rewalked by the walker if necessary.
+  /// This really should only be called by tests.
+  inline void resetOptimized();
+
+protected:
+  friend class MemorySSA;
+  friend class MemorySSAUpdater;
+
+  MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
+                 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
+      : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
+        OptimizedAccessAlias(MayAlias) {
+    setDefiningAccess(DMA);
+  }
+
+  // Use deleteValue() to delete a generic MemoryUseOrDef.
+  ~MemoryUseOrDef() = default;
+
+  void setOptimizedAccessType(Optional<AliasResult> AR) {
+    OptimizedAccessAlias = AR;
+  }
+
+  void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
+                         Optional<AliasResult> AR = MayAlias) {
+    if (!Optimized) {
+      setOperand(0, DMA);
+      return;
+    }
+    setOptimized(DMA);
+    setOptimizedAccessType(AR);
+  }
+
+private:
+  Instruction *MemoryInstruction;
+  Optional<AliasResult> OptimizedAccessAlias;
+};
+
+template <>
+struct OperandTraits<MemoryUseOrDef>
+    : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
+
+/// \brief Represents read-only accesses to memory
+///
+/// In particular, the set of Instructions that will be represented by
+/// MemoryUse's is exactly the set of Instructions for which
+/// AliasAnalysis::getModRefInfo returns "Ref".
+class MemoryUse final : public MemoryUseOrDef {
+public:
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
+
+  MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
+      : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
+
+  // allocate space for exactly one operand
+  void *operator new(size_t s) { return User::operator new(s, 1); }
+
+  static bool classof(const Value *MA) {
+    return MA->getValueID() == MemoryUseVal;
+  }
+
+  void print(raw_ostream &OS) const;
+
+  void setOptimized(MemoryAccess *DMA) {
+    OptimizedID = DMA->getID();
+    setOperand(0, DMA);
+  }
+
+  bool isOptimized() const {
+    return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
+  }
+
+  MemoryAccess *getOptimized() const {
+    return getDefiningAccess();
+  }
+
+  void resetOptimized() {
+    OptimizedID = INVALID_MEMORYACCESS_ID;
+  }
+
+protected:
+  friend class MemorySSA;
+
+private:
+  static void deleteMe(DerivedUser *Self);
+
+  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
+};
+
+template <>
+struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
+
+/// \brief Represents a read-write access to memory, whether it is a must-alias,
+/// or a may-alias.
+///
+/// In particular, the set of Instructions that will be represented by
+/// MemoryDef's is exactly the set of Instructions for which
+/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
+/// Note that, in order to provide def-def chains, all defs also have a use
+/// associated with them. This use points to the nearest reaching
+/// MemoryDef/MemoryPhi.
+class MemoryDef final : public MemoryUseOrDef {
+public:
+  friend class MemorySSA;
+
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
+
+  MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
+            unsigned Ver)
+      : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
+
+  // allocate space for exactly one operand
+  void *operator new(size_t s) { return User::operator new(s, 1); }
+
+  static bool classof(const Value *MA) {
+    return MA->getValueID() == MemoryDefVal;
+  }
+
+  void setOptimized(MemoryAccess *MA) {
+    Optimized = MA;
+    OptimizedID = getDefiningAccess()->getID();
+  }
+
+  MemoryAccess *getOptimized() const {
+    return cast_or_null<MemoryAccess>(Optimized);
+  }
+
+  bool isOptimized() const {
+    return getOptimized() && getDefiningAccess() &&
+           OptimizedID == getDefiningAccess()->getID();
+  }
+
+  void resetOptimized() {
+    OptimizedID = INVALID_MEMORYACCESS_ID;
+  }
+
+  void print(raw_ostream &OS) const;
+
+  unsigned getID() const { return ID; }
+
+private:
+  static void deleteMe(DerivedUser *Self);
+
+  const unsigned ID;
+  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
+  WeakVH Optimized;
+};
+
+template <>
+struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
+
+/// \brief Represents phi nodes for memory accesses.
+///
+/// These have the same semantic as regular phi nodes, with the exception that
+/// only one phi will ever exist in a given basic block.
+/// Guaranteeing one phi per block means guaranteeing there is only ever one
+/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
+/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
+/// a MemoryPhi's operands.
+/// That is, given
+/// if (a) {
+///   store %a
+///   store %b
+/// }
+/// it *must* be transformed into
+/// if (a) {
+///    1 = MemoryDef(liveOnEntry)
+///    store %a
+///    2 = MemoryDef(1)
+///    store %b
+/// }
+/// and *not*
+/// if (a) {
+///    1 = MemoryDef(liveOnEntry)
+///    store %a
+///    2 = MemoryDef(liveOnEntry)
+///    store %b
+/// }
+/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
+/// end of the branch, and if there are not two phi nodes, one will be
+/// disconnected completely from the SSA graph below that point.
+/// Because MemoryUse's do not generate new definitions, they do not have this
+/// issue.
+class MemoryPhi final : public MemoryAccess {
+  // allocate space for exactly zero operands
+  void *operator new(size_t s) { return User::operator new(s); }
+
+public:
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
+
+  MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
+      : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
+        ReservedSpace(NumPreds) {
+    allocHungoffUses(ReservedSpace);
+  }
+
+  // Block iterator interface. This provides access to the list of incoming
+  // basic blocks, which parallels the list of incoming values.
+  using block_iterator = BasicBlock **;
+  using const_block_iterator = BasicBlock *const *;
+
+  block_iterator block_begin() {
+    auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
+    return reinterpret_cast<block_iterator>(Ref + 1);
+  }
+
+  const_block_iterator block_begin() const {
+    const auto *Ref =
+        reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
+    return reinterpret_cast<const_block_iterator>(Ref + 1);
+  }
+
+  block_iterator block_end() { return block_begin() + getNumOperands(); }
+
+  const_block_iterator block_end() const {
+    return block_begin() + getNumOperands();
+  }
+
+  iterator_range<block_iterator> blocks() {
+    return make_range(block_begin(), block_end());
+  }
+
+  iterator_range<const_block_iterator> blocks() const {
+    return make_range(block_begin(), block_end());
+  }
+
+  op_range incoming_values() { return operands(); }
+
+  const_op_range incoming_values() const { return operands(); }
+
+  /// \brief Return the number of incoming edges
+  unsigned getNumIncomingValues() const { return getNumOperands(); }
+
+  /// \brief Return incoming value number x
+  MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
+  void setIncomingValue(unsigned I, MemoryAccess *V) {
+    assert(V && "PHI node got a null value!");
+    setOperand(I, V);
+  }
+
+  static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
+  static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
+
+  /// \brief Return incoming basic block number @p i.
+  BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
+
+  /// \brief Return incoming basic block corresponding
+  /// to an operand of the PHI.
+  BasicBlock *getIncomingBlock(const Use &U) const {
+    assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
+    return getIncomingBlock(unsigned(&U - op_begin()));
+  }
+
+  /// \brief Return incoming basic block corresponding
+  /// to value use iterator.
+  BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
+    return getIncomingBlock(I.getUse());
+  }
+
+  void setIncomingBlock(unsigned I, BasicBlock *BB) {
+    assert(BB && "PHI node got a null basic block!");
+    block_begin()[I] = BB;
+  }
+
+  /// \brief Add an incoming value to the end of the PHI list
+  void addIncoming(MemoryAccess *V, BasicBlock *BB) {
+    if (getNumOperands() == ReservedSpace)
+      growOperands(); // Get more space!
+    // Initialize some new operands.
+    setNumHungOffUseOperands(getNumOperands() + 1);
+    setIncomingValue(getNumOperands() - 1, V);
+    setIncomingBlock(getNumOperands() - 1, BB);
+  }
+
+  /// \brief Return the first index of the specified basic
+  /// block in the value list for this PHI.  Returns -1 if no instance.
+  int getBasicBlockIndex(const BasicBlock *BB) const {
+    for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
+      if (block_begin()[I] == BB)
+        return I;
+    return -1;
+  }
+
+  Value *getIncomingValueForBlock(const BasicBlock *BB) const {
+    int Idx = getBasicBlockIndex(BB);
+    assert(Idx >= 0 && "Invalid basic block argument!");
+    return getIncomingValue(Idx);
+  }
+
+  static bool classof(const Value *V) {
+    return V->getValueID() == MemoryPhiVal;
+  }
+
+  void print(raw_ostream &OS) const;
+
+  unsigned getID() const { return ID; }
+
+protected:
+  friend class MemorySSA;
+
+  /// \brief this is more complicated than the generic
+  /// User::allocHungoffUses, because we have to allocate Uses for the incoming
+  /// values and pointers to the incoming blocks, all in one allocation.
+  void allocHungoffUses(unsigned N) {
+    User::allocHungoffUses(N, /* IsPhi */ true);
+  }
+
+private:
+  // For debugging only
+  const unsigned ID;
+  unsigned ReservedSpace;
+
+  /// \brief This grows the operand list in response to a push_back style of
+  /// operation.  This grows the number of ops by 1.5 times.
+  void growOperands() {
+    unsigned E = getNumOperands();
+    // 2 op PHI nodes are VERY common, so reserve at least enough for that.
+    ReservedSpace = std::max(E + E / 2, 2u);
+    growHungoffUses(ReservedSpace, /* IsPhi */ true);
+  }
+
+  static void deleteMe(DerivedUser *Self);
+};
+
+inline unsigned MemoryAccess::getID() const {
+  assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
+         "only memory defs and phis have ids");
+  if (const auto *MD = dyn_cast<MemoryDef>(this))
+    return MD->getID();
+  return cast<MemoryPhi>(this)->getID();
+}
+
+inline bool MemoryUseOrDef::isOptimized() const {
+  if (const auto *MD = dyn_cast<MemoryDef>(this))
+    return MD->isOptimized();
+  return cast<MemoryUse>(this)->isOptimized();
+}
+
+inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
+  if (const auto *MD = dyn_cast<MemoryDef>(this))
+    return MD->getOptimized();
+  return cast<MemoryUse>(this)->getOptimized();
+}
+
+inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
+  if (auto *MD = dyn_cast<MemoryDef>(this))
+    MD->setOptimized(MA);
+  else
+    cast<MemoryUse>(this)->setOptimized(MA);
+}
+
+inline void MemoryUseOrDef::resetOptimized() {
+  if (auto *MD = dyn_cast<MemoryDef>(this))
+    MD->resetOptimized();
+  else
+    cast<MemoryUse>(this)->resetOptimized();
+}
+
+template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
+
+/// \brief Encapsulates MemorySSA, including all data associated with memory
+/// accesses.
+class MemorySSA {
+public:
+  MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
+  ~MemorySSA();
+
+  MemorySSAWalker *getWalker();
+
+  /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
+  /// access associated with it. If passed a basic block gets the memory phi
+  /// node that exists for that block, if there is one. Otherwise, this will get
+  /// a MemoryUseOrDef.
+  MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
+  MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
+
+  void dump() const;
+  void print(raw_ostream &) const;
+
+  /// \brief Return true if \p MA represents the live on entry value
+  ///
+  /// Loads and stores from pointer arguments and other global values may be
+  /// defined by memory operations that do not occur in the current function, so
+  /// they may be live on entry to the function. MemorySSA represents such
+  /// memory state by the live on entry definition, which is guaranteed to occur
+  /// before any other memory access in the function.
+  inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
+    return MA == LiveOnEntryDef.get();
+  }
+
+  inline MemoryAccess *getLiveOnEntryDef() const {
+    return LiveOnEntryDef.get();
+  }
+
+  // Sadly, iplists, by default, owns and deletes pointers added to the
+  // list. It's not currently possible to have two iplists for the same type,
+  // where one owns the pointers, and one does not. This is because the traits
+  // are per-type, not per-tag.  If this ever changes, we should make the
+  // DefList an iplist.
+  using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
+  using DefsList =
+      simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
+
+  /// \brief Return the list of MemoryAccess's for a given basic block.
+  ///
+  /// This list is not modifiable by the user.
+  const AccessList *getBlockAccesses(const BasicBlock *BB) const {
+    return getWritableBlockAccesses(BB);
+  }
+
+  /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
+  /// block.
+  ///
+  /// This list is not modifiable by the user.
+  const DefsList *getBlockDefs(const BasicBlock *BB) const {
+    return getWritableBlockDefs(BB);
+  }
+
+  /// \brief Given two memory accesses in the same basic block, determine
+  /// whether MemoryAccess \p A dominates MemoryAccess \p B.
+  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
+
+  /// \brief Given two memory accesses in potentially different blocks,
+  /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
+  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
+
+  /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
+  /// dominates Use \p B.
+  bool dominates(const MemoryAccess *A, const Use &B) const;
+
+  /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
+  /// all uses, uses appear in the right places).  This is used by unit tests.
+  void verifyMemorySSA() const;
+
+  /// Used in various insertion functions to specify whether we are talking
+  /// about the beginning or end of a block.
+  enum InsertionPlace { Beginning, End };
+
+protected:
+  // Used by Memory SSA annotater, dumpers, and wrapper pass
+  friend class MemorySSAAnnotatedWriter;
+  friend class MemorySSAPrinterLegacyPass;
+  friend class MemorySSAUpdater;
+
+  void verifyDefUses(Function &F) const;
+  void verifyDomination(Function &F) const;
+  void verifyOrdering(Function &F) const;
+
+  // This is used by the use optimizer and updater.
+  AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
+    auto It = PerBlockAccesses.find(BB);
+    return It == PerBlockAccesses.end() ? nullptr : It->second.get();
+  }
+
+  // This is used by the use optimizer and updater.
+  DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
+    auto It = PerBlockDefs.find(BB);
+    return It == PerBlockDefs.end() ? nullptr : It->second.get();
+  }
+
+  // These is used by the updater to perform various internal MemorySSA
+  // machinsations.  They do not always leave the IR in a correct state, and
+  // relies on the updater to fixup what it breaks, so it is not public.
+
+  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
+  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
+
+  // Rename the dominator tree branch rooted at BB.
+  void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
+                  SmallPtrSetImpl<BasicBlock *> &Visited) {
+    renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
+  }
+
+  void removeFromLookups(MemoryAccess *);
+  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
+  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
+                               InsertionPlace);
+  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
+                             AccessList::iterator);
+  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
+
+private:
+  class CachingWalker;
+  class OptimizeUses;
+
+  CachingWalker *getWalkerImpl();
+  void buildMemorySSA();
+  void optimizeUses();
+
+  void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
+
+  using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
+  using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
+
+  void
+  determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
+  void markUnreachableAsLiveOnEntry(BasicBlock *BB);
+  bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
+  MemoryPhi *createMemoryPhi(BasicBlock *BB);
+  MemoryUseOrDef *createNewAccess(Instruction *);
+  MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
+  void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
+                     const DenseMap<const BasicBlock *, unsigned int> &);
+  MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
+  void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
+  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
+                  SmallPtrSetImpl<BasicBlock *> &Visited,
+                  bool SkipVisited = false, bool RenameAllUses = false);
+  AccessList *getOrCreateAccessList(const BasicBlock *);
+  DefsList *getOrCreateDefsList(const BasicBlock *);
+  void renumberBlock(const BasicBlock *) const;
+  AliasAnalysis *AA;
+  DominatorTree *DT;
+  Function &F;
+
+  // Memory SSA mappings
+  DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
+
+  // These two mappings contain the main block to access/def mappings for
+  // MemorySSA. The list contained in PerBlockAccesses really owns all the
+  // MemoryAccesses.
+  // Both maps maintain the invariant that if a block is found in them, the
+  // corresponding list is not empty, and if a block is not found in them, the
+  // corresponding list is empty.
+  AccessMap PerBlockAccesses;
+  DefsMap PerBlockDefs;
+  std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
+
+  // Domination mappings
+  // Note that the numbering is local to a block, even though the map is
+  // global.
+  mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
+  mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
+
+  // Memory SSA building info
+  std::unique_ptr<CachingWalker> Walker;
+  unsigned NextID;
+};
+
+// Internal MemorySSA utils, for use by MemorySSA classes and walkers
+class MemorySSAUtil {
+protected:
+  friend class GVNHoist;
+  friend class MemorySSAWalker;
+
+  // This function should not be used by new passes.
+  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
+                                  AliasAnalysis &AA);
+};
+
+// This pass does eager building and then printing of MemorySSA. It is used by
+// the tests to be able to build, dump, and verify Memory SSA.
+class MemorySSAPrinterLegacyPass : public FunctionPass {
+public:
+  MemorySSAPrinterLegacyPass();
+
+  bool runOnFunction(Function &) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  static char ID;
+};
+
+/// An analysis that produces \c MemorySSA for a function.
+///
+class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
+  friend AnalysisInfoMixin<MemorySSAAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  // Wrap MemorySSA result to ensure address stability of internal MemorySSA
+  // pointers after construction.  Use a wrapper class instead of plain
+  // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
+  struct Result {
+    Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
+
+    MemorySSA &getMSSA() { return *MSSA.get(); }
+
+    std::unique_ptr<MemorySSA> MSSA;
+  };
+
+  Result run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for \c MemorySSA.
+class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for \c MemorySSA.
+struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Legacy analysis pass which computes \c MemorySSA.
+class MemorySSAWrapperPass : public FunctionPass {
+public:
+  MemorySSAWrapperPass();
+
+  static char ID;
+
+  bool runOnFunction(Function &) override;
+  void releaseMemory() override;
+  MemorySSA &getMSSA() { return *MSSA; }
+  const MemorySSA &getMSSA() const { return *MSSA; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  void verifyAnalysis() const override;
+  void print(raw_ostream &OS, const Module *M = nullptr) const override;
+
+private:
+  std::unique_ptr<MemorySSA> MSSA;
+};
+
+/// \brief This is the generic walker interface for walkers of MemorySSA.
+/// Walkers are used to be able to further disambiguate the def-use chains
+/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
+/// you.
+/// In particular, while the def-use chains provide basic information, and are
+/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
+/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
+/// information. In particular, they may want to use SCEV info to further
+/// disambiguate memory accesses, or they may want the nearest dominating
+/// may-aliasing MemoryDef for a call or a store. This API enables a
+/// standardized interface to getting and using that info.
+class MemorySSAWalker {
+public:
+  MemorySSAWalker(MemorySSA *);
+  virtual ~MemorySSAWalker() = default;
+
+  using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
+
+  /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
+  /// will give you the nearest dominating MemoryAccess that Mod's the location
+  /// the instruction accesses (by skipping any def which AA can prove does not
+  /// alias the location(s) accessed by the instruction given).
+  ///
+  /// Note that this will return a single access, and it must dominate the
+  /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
+  /// this will return the MemoryPhi, not the operand. This means that
+  /// given:
+  /// if (a) {
+  ///   1 = MemoryDef(liveOnEntry)
+  ///   store %a
+  /// } else {
+  ///   2 = MemoryDef(liveOnEntry)
+  ///   store %b
+  /// }
+  /// 3 = MemoryPhi(2, 1)
+  /// MemoryUse(3)
+  /// load %a
+  ///
+  /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
+  /// in the if (a) branch.
+  MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
+    MemoryAccess *MA = MSSA->getMemoryAccess(I);
+    assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
+    return getClobberingMemoryAccess(MA);
+  }
+
+  /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
+  /// but takes a MemoryAccess instead of an Instruction.
+  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
+
+  /// \brief Given a potentially clobbering memory access and a new location,
+  /// calling this will give you the nearest dominating clobbering MemoryAccess
+  /// (by skipping non-aliasing def links).
+  ///
+  /// This version of the function is mainly used to disambiguate phi translated
+  /// pointers, where the value of a pointer may have changed from the initial
+  /// memory access. Note that this expects to be handed either a MemoryUse,
+  /// or an already potentially clobbering access. Unlike the above API, if
+  /// given a MemoryDef that clobbers the pointer as the starting access, it
+  /// will return that MemoryDef, whereas the above would return the clobber
+  /// starting from the use side of  the memory def.
+  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
+                                                  const MemoryLocation &) = 0;
+
+  /// \brief Given a memory access, invalidate anything this walker knows about
+  /// that access.
+  /// This API is used by walkers that store information to perform basic cache
+  /// invalidation.  This will be called by MemorySSA at appropriate times for
+  /// the walker it uses or returns.
+  virtual void invalidateInfo(MemoryAccess *) {}
+
+  virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
+
+protected:
+  friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
+                          // constructor.
+  MemorySSA *MSSA;
+};
+
+/// \brief A MemorySSAWalker that does no alias queries, or anything else. It
+/// simply returns the links as they were constructed by the builder.
+class DoNothingMemorySSAWalker final : public MemorySSAWalker {
+public:
+  // Keep the overrides below from hiding the Instruction overload of
+  // getClobberingMemoryAccess.
+  using MemorySSAWalker::getClobberingMemoryAccess;
+
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
+                                          const MemoryLocation &) override;
+};
+
+using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
+using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
+
+/// \brief Iterator base class used to implement const and non-const iterators
+/// over the defining accesses of a MemoryAccess.
+template <class T>
+class memoryaccess_def_iterator_base
+    : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
+                                  std::forward_iterator_tag, T, ptrdiff_t, T *,
+                                  T *> {
+  using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
+
+public:
+  memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
+  memoryaccess_def_iterator_base() = default;
+
+  bool operator==(const memoryaccess_def_iterator_base &Other) const {
+    return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
+  }
+
+  // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
+  // block from the operand in constant time (In a PHINode, the uselist has
+  // both, so it's just subtraction). We provide it as part of the
+  // iterator to avoid callers having to linear walk to get the block.
+  // If the operation becomes constant time on MemoryPHI's, this bit of
+  // abstraction breaking should be removed.
+  BasicBlock *getPhiArgBlock() const {
+    MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
+    assert(MP && "Tried to get phi arg block when not iterating over a PHI");
+    return MP->getIncomingBlock(ArgNo);
+  }
+
+  typename BaseT::iterator::pointer operator*() const {
+    assert(Access && "Tried to access past the end of our iterator");
+    // Go to the first argument for phis, and the defining access for everything
+    // else.
+    if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
+      return MP->getIncomingValue(ArgNo);
+    return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
+  }
+
+  using BaseT::operator++;
+  memoryaccess_def_iterator &operator++() {
+    assert(Access && "Hit end of iterator");
+    if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
+      if (++ArgNo >= MP->getNumIncomingValues()) {
+        ArgNo = 0;
+        Access = nullptr;
+      }
+    } else {
+      Access = nullptr;
+    }
+    return *this;
+  }
+
+private:
+  T *Access = nullptr;
+  unsigned ArgNo = 0;
+};
+
+inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
+  return memoryaccess_def_iterator(this);
+}
+
+inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
+  return const_memoryaccess_def_iterator(this);
+}
+
+inline memoryaccess_def_iterator MemoryAccess::defs_end() {
+  return memoryaccess_def_iterator();
+}
+
+inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
+  return const_memoryaccess_def_iterator();
+}
+
+/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
+/// and uses in the inverse case.
+template <> struct GraphTraits<MemoryAccess *> {
+  using NodeRef = MemoryAccess *;
+  using ChildIteratorType = memoryaccess_def_iterator;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
+};
+
+template <> struct GraphTraits<Inverse<MemoryAccess *>> {
+  using NodeRef = MemoryAccess *;
+  using ChildIteratorType = MemoryAccess::iterator;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
+};
+
+/// \brief Provide an iterator that walks defs, giving both the memory access,
+/// and the current pointer location, updating the pointer location as it
+/// changes due to phi node translation.
+///
+/// This iterator, while somewhat specialized, is what most clients actually
+/// want when walking upwards through MemorySSA def chains. It takes a pair of
+/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
+/// memory location through phi nodes for the user.
+class upward_defs_iterator
+    : public iterator_facade_base<upward_defs_iterator,
+                                  std::forward_iterator_tag,
+                                  const MemoryAccessPair> {
+  using BaseT = upward_defs_iterator::iterator_facade_base;
+
+public:
+  upward_defs_iterator(const MemoryAccessPair &Info)
+      : DefIterator(Info.first), Location(Info.second),
+        OriginalAccess(Info.first) {
+    CurrentPair.first = nullptr;
+
+    WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
+    fillInCurrentPair();
+  }
+
+  upward_defs_iterator() { CurrentPair.first = nullptr; }
+
+  bool operator==(const upward_defs_iterator &Other) const {
+    return DefIterator == Other.DefIterator;
+  }
+
+  BaseT::iterator::reference operator*() const {
+    assert(DefIterator != OriginalAccess->defs_end() &&
+           "Tried to access past the end of our iterator");
+    return CurrentPair;
+  }
+
+  using BaseT::operator++;
+  upward_defs_iterator &operator++() {
+    assert(DefIterator != OriginalAccess->defs_end() &&
+           "Tried to access past the end of the iterator");
+    ++DefIterator;
+    if (DefIterator != OriginalAccess->defs_end())
+      fillInCurrentPair();
+    return *this;
+  }
+
+  BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
+
+private:
+  void fillInCurrentPair() {
+    CurrentPair.first = *DefIterator;
+    if (WalkingPhi && Location.Ptr) {
+      PHITransAddr Translator(
+          const_cast<Value *>(Location.Ptr),
+          OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
+      if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
+                                        DefIterator.getPhiArgBlock(), nullptr,
+                                        false))
+        if (Translator.getAddr() != Location.Ptr) {
+          CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
+          return;
+        }
+    }
+    CurrentPair.second = Location;
+  }
+
+  MemoryAccessPair CurrentPair;
+  memoryaccess_def_iterator DefIterator;
+  MemoryLocation Location;
+  MemoryAccess *OriginalAccess = nullptr;
+  bool WalkingPhi = false;
+};
+
+inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
+  return upward_defs_iterator(Pair);
+}
+
+inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
+
+inline iterator_range<upward_defs_iterator>
+upward_defs(const MemoryAccessPair &Pair) {
+  return make_range(upward_defs_begin(Pair), upward_defs_end());
+}
+
+/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
+/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
+/// comparing against a null def_chain_iterator, this will compare equal only
+/// after walking said Phi/liveOnEntry.
+///
+/// The UseOptimizedChain flag specifies whether to walk the clobbering
+/// access chain, or all the accesses.
+///
+/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
+/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
+/// a phi node.  The optimized chain walks the clobbering access of a store.
+/// So if you are just trying to find, given a store, what the next
+/// thing that would clobber the same memory is, you want the optimized chain.
+template <class T, bool UseOptimizedChain = false>
+struct def_chain_iterator
+    : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
+                                  std::forward_iterator_tag, MemoryAccess *> {
+  def_chain_iterator() : MA(nullptr) {}
+  def_chain_iterator(T MA) : MA(MA) {}
+
+  T operator*() const { return MA; }
+
+  def_chain_iterator &operator++() {
+    // N.B. liveOnEntry has a null defining access.
+    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
+      if (UseOptimizedChain && MUD->isOptimized())
+        MA = MUD->getOptimized();
+      else
+        MA = MUD->getDefiningAccess();
+    } else {
+      MA = nullptr;
+    }
+
+    return *this;
+  }
+
+  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
+
+private:
+  T MA;
+};
+
+template <class T>
+inline iterator_range<def_chain_iterator<T>>
+def_chain(T MA, MemoryAccess *UpTo = nullptr) {
+#ifdef EXPENSIVE_CHECKS
+  assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
+         "UpTo isn't in the def chain!");
+#endif
+  return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
+}
+
+template <class T>
+inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
+  return make_range(def_chain_iterator<T, true>(MA),
+                    def_chain_iterator<T, true>(nullptr));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_MEMORYSSA_H
diff --git a/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h b/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h
new file mode 100644
index 0000000..3f4ef06
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h
@@ -0,0 +1,158 @@
+//===- MemorySSAUpdater.h - Memory SSA Updater-------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// \brief An automatic updater for MemorySSA that handles arbitrary insertion,
+// deletion, and moves.  It performs phi insertion where necessary, and
+// automatically updates the MemorySSA IR to be correct.
+// While updating loads or removing instructions is often easy enough to not
+// need this, updating stores should generally not be attemped outside this
+// API.
+//
+// Basic API usage:
+// Create the memory access you want for the instruction (this is mainly so
+// we know where it is, without having to duplicate the entire set of create
+// functions MemorySSA supports).
+// Call insertDef or insertUse depending on whether it's a MemoryUse or a
+// MemoryDef.
+// That's it.
+//
+// For moving, first, move the instruction itself using the normal SSA
+// instruction moving API, then just call moveBefore, moveAfter,or moveTo with
+// the right arguments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYSSAUPDATER_H
+#define LLVM_ANALYSIS_MEMORYSSAUPDATER_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+class Function;
+class Instruction;
+class MemoryAccess;
+class LLVMContext;
+class raw_ostream;
+
+class MemorySSAUpdater {
+private:
+  MemorySSA *MSSA;
+  SmallVector<MemoryPhi *, 8> InsertedPHIs;
+  SmallPtrSet<BasicBlock *, 8> VisitedBlocks;
+
+public:
+  MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}
+  /// Insert a definition into the MemorySSA IR.  RenameUses will rename any use
+  /// below the new def block (and any inserted phis).  RenameUses should be set
+  /// to true if the definition may cause new aliases for loads below it.  This
+  /// is not the case for hoisting or sinking or other forms of code *movement*.
+  /// It *is* the case for straight code insertion.
+  /// For example:
+  /// store a
+  /// if (foo) { }
+  /// load a
+  ///
+  /// Moving the store into the if block, and calling insertDef, does not
+  /// require RenameUses.
+  /// However, changing it to:
+  /// store a
+  /// if (foo) { store b }
+  /// load a
+  /// Where a mayalias b, *does* require RenameUses be set to true.
+  void insertDef(MemoryDef *Def, bool RenameUses = false);
+  void insertUse(MemoryUse *Use);
+  void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
+  void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
+  void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
+                   MemorySSA::InsertionPlace Where);
+
+  // The below are utility functions. Other than creation of accesses to pass
+  // to insertDef, and removeAccess to remove accesses, you should generally
+  // not attempt to update memoryssa yourself. It is very non-trivial to get
+  // the edge cases right, and the above calls already operate in near-optimal
+  // time bounds.
+
+  /// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
+  /// with a specified clobbering definition.
+  ///
+  /// Returns the new MemoryAccess.
+  /// This should be called when a memory instruction is created that is being
+  /// used to replace an existing memory instruction. It will *not* create PHI
+  /// nodes, or verify the clobbering definition. The insertion place is used
+  /// solely to determine where in the memoryssa access lists the instruction
+  /// will be placed. The caller is expected to keep ordering the same as
+  /// instructions.
+  /// It will return the new MemoryAccess.
+  /// Note: If a MemoryAccess already exists for I, this function will make it
+  /// inaccessible and it *must* have removeMemoryAccess called on it.
+  MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
+                                       const BasicBlock *BB,
+                                       MemorySSA::InsertionPlace Point);
+
+  /// \brief Create a MemoryAccess in MemorySSA before or after an existing
+  /// MemoryAccess.
+  ///
+  /// Returns the new MemoryAccess.
+  /// This should be called when a memory instruction is created that is being
+  /// used to replace an existing memory instruction. It will *not* create PHI
+  /// nodes, or verify the clobbering definition.
+  ///
+  /// Note: If a MemoryAccess already exists for I, this function will make it
+  /// inaccessible and it *must* have removeMemoryAccess called on it.
+  MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
+                                           MemoryAccess *Definition,
+                                           MemoryUseOrDef *InsertPt);
+  MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
+                                          MemoryAccess *Definition,
+                                          MemoryAccess *InsertPt);
+
+  /// \brief Remove a MemoryAccess from MemorySSA, including updating all
+  /// definitions and uses.
+  /// This should be called when a memory instruction that has a MemoryAccess
+  /// associated with it is erased from the program.  For example, if a store or
+  /// load is simply erased (not replaced), removeMemoryAccess should be called
+  /// on the MemoryAccess for that store/load.
+  void removeMemoryAccess(MemoryAccess *);
+
+private:
+  // Move What before Where in the MemorySSA IR.
+  template <class WhereType>
+  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, WhereType Where);
+  MemoryAccess *getPreviousDef(MemoryAccess *);
+  MemoryAccess *getPreviousDefInBlock(MemoryAccess *);
+  MemoryAccess *
+  getPreviousDefFromEnd(BasicBlock *,
+                        DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &);
+  MemoryAccess *
+  getPreviousDefRecursive(BasicBlock *,
+                          DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &);
+  MemoryAccess *recursePhi(MemoryAccess *Phi);
+  template <class RangeType>
+  MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
+  void fixupDefs(const SmallVectorImpl<MemoryAccess *> &);
+};
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_MEMORYSSAUPDATER_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ModuleSummaryAnalysis.h b/linux-x64/clang/include/llvm/Analysis/ModuleSummaryAnalysis.h
new file mode 100644
index 0000000..9af7859
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ModuleSummaryAnalysis.h
@@ -0,0 +1,81 @@
+//===- ModuleSummaryAnalysis.h - Module summary index builder ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface to build a ModuleSummaryIndex for a module.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
+#define LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include <functional>
+
+namespace llvm {
+
+class BlockFrequencyInfo;
+class Function;
+class Module;
+class ProfileSummaryInfo;
+
+/// Direct function to compute a \c ModuleSummaryIndex from a given module.
+///
+/// If operating within a pass manager which has defined ways to compute the \c
+/// BlockFrequencyInfo for a given function, that can be provided via
+/// a std::function callback. Otherwise, this routine will manually construct
+/// that information.
+ModuleSummaryIndex buildModuleSummaryIndex(
+    const Module &M,
+    std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
+    ProfileSummaryInfo *PSI);
+
+/// Analysis pass to provide the ModuleSummaryIndex object.
+class ModuleSummaryIndexAnalysis
+    : public AnalysisInfoMixin<ModuleSummaryIndexAnalysis> {
+  friend AnalysisInfoMixin<ModuleSummaryIndexAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = ModuleSummaryIndex;
+
+  Result run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the ModuleSummaryIndex object.
+class ModuleSummaryIndexWrapperPass : public ModulePass {
+  Optional<ModuleSummaryIndex> Index;
+
+public:
+  static char ID;
+
+  ModuleSummaryIndexWrapperPass();
+
+  /// Get the index built by pass
+  ModuleSummaryIndex &getIndex() { return *Index; }
+  const ModuleSummaryIndex &getIndex() const { return *Index; }
+
+  bool runOnModule(Module &M) override;
+  bool doFinalization(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createModuleSummaryIndexWrapperPass - This pass builds a ModuleSummaryIndex
+// object for the module, to be written to bitcode or LLVM assembly.
+//
+ModulePass *createModuleSummaryIndexWrapperPass();
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/MustExecute.h b/linux-x64/clang/include/llvm/Analysis/MustExecute.h
new file mode 100644
index 0000000..fb48bb6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/MustExecute.h
@@ -0,0 +1,64 @@
+//===- MustExecute.h - Is an instruction known to execute--------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Contains a collection of routines for determining if a given instruction is
+/// guaranteed to execute if a given point in control flow is reached.  The most
+/// common example is an instruction within a loop being provably executed if we
+/// branch to the header of it's containing loop.  
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MUSTEXECUTE_H
+#define LLVM_ANALYSIS_MUSTEXECUTE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+
+class Instruction;
+class DominatorTree;
+class Loop;
+
+/// \brief Captures loop safety information.
+/// It keep information for loop & its header may throw exception or otherwise
+/// exit abnormaly on any iteration of the loop which might actually execute
+/// at runtime.  The primary way to consume this infromation is via
+/// isGuaranteedToExecute below, but some callers bailout or fallback to
+/// alternate reasoning if a loop contains any implicit control flow.
+struct LoopSafetyInfo {
+  bool MayThrow = false;       // The current loop contains an instruction which
+                               // may throw.
+  bool HeaderMayThrow = false; // Same as previous, but specific to loop header
+  // Used to update funclet bundle operands.
+  DenseMap<BasicBlock *, ColorVector> BlockColors;
+
+  LoopSafetyInfo() = default;
+};
+
+/// \brief Computes safety information for a loop checks loop body & header for
+/// the possibility of may throw exception, it takes LoopSafetyInfo and loop as
+/// argument. Updates safety information in LoopSafetyInfo argument.
+/// Note: This is defined to clear and reinitialize an already initialized
+/// LoopSafetyInfo.  Some callers rely on this fact.
+void computeLoopSafetyInfo(LoopSafetyInfo *, Loop *);
+
+/// Returns true if the instruction in a loop is guaranteed to execute at least
+/// once (under the assumption that the loop is entered).
+bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT,
+                           const Loop *CurLoop,
+                           const LoopSafetyInfo *SafetyInfo);
+  
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h
new file mode 100644
index 0000000..db524ff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -0,0 +1,97 @@
+//===- ObjCARCAliasAnalysis.h - ObjC ARC Alias Analysis ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares a simple ARC-aware AliasAnalysis using special knowledge
+/// of Objective C to enhance other optimization passes which rely on the Alias
+/// Analysis infrastructure.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_OBJCARCALIASANALYSIS_H
+#define LLVM_ANALYSIS_OBJCARCALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+namespace objcarc {
+
+/// \brief This is a simple alias analysis implementation that uses knowledge
+/// of ARC constructs to answer queries.
+///
+/// TODO: This class could be generalized to know about other ObjC-specific
+/// tricks. Such as knowing that ivars in the non-fragile ABI are non-aliasing
+/// even though their offsets are dynamic.
+class ObjCARCAAResult : public AAResultBase<ObjCARCAAResult> {
+  friend AAResultBase<ObjCARCAAResult>;
+
+  const DataLayout &DL;
+
+public:
+  explicit ObjCARCAAResult(const DataLayout &DL) : AAResultBase(), DL(DL) {}
+  ObjCARCAAResult(ObjCARCAAResult &&Arg)
+      : AAResultBase(std::move(Arg)), DL(Arg.DL) {}
+
+  /// Handle invalidation events from the new pass manager.
+  ///
+  /// By definition, this result is stateless and so remains valid.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+
+  using AAResultBase::getModRefBehavior;
+  FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+  using AAResultBase::getModRefInfo;
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class ObjCARCAA : public AnalysisInfoMixin<ObjCARCAA> {
+  friend AnalysisInfoMixin<ObjCARCAA>;
+  static AnalysisKey Key;
+
+public:
+  typedef ObjCARCAAResult Result;
+
+  ObjCARCAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the ObjCARCAAResult object.
+class ObjCARCAAWrapperPass : public ImmutablePass {
+  std::unique_ptr<ObjCARCAAResult> Result;
+
+public:
+  static char ID;
+
+  ObjCARCAAWrapperPass();
+
+  ObjCARCAAResult &getResult() { return *Result; }
+  const ObjCARCAAResult &getResult() const { return *Result; }
+
+  bool doInitialization(Module &M) override;
+  bool doFinalization(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+} // namespace objcarc
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h
new file mode 100644
index 0000000..096573f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -0,0 +1,298 @@
+//===- ObjCARCAnalysisUtils.h - ObjC ARC Analysis Utilities -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines common analysis utilities used by the ObjC ARC Optimizer.
+/// ARC stands for Automatic Reference Counting and is a system for managing
+/// reference counts for objects in Objective C.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_ANALYSIS_OBJCARCANALYSISUTILS_H
+#define LLVM_LIB_ANALYSIS_OBJCARCANALYSISUTILS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ObjCARCInstKind.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class raw_ostream;
+}
+
+namespace llvm {
+namespace objcarc {
+
+/// \brief A handy option to enable/disable all ARC Optimizations.
+extern bool EnableARCOpts;
+
+/// \brief Test if the given module looks interesting to run ARC optimization
+/// on.
+inline bool ModuleHasARC(const Module &M) {
+  return
+    M.getNamedValue("objc_retain") ||
+    M.getNamedValue("objc_release") ||
+    M.getNamedValue("objc_autorelease") ||
+    M.getNamedValue("objc_retainAutoreleasedReturnValue") ||
+    M.getNamedValue("objc_unsafeClaimAutoreleasedReturnValue") ||
+    M.getNamedValue("objc_retainBlock") ||
+    M.getNamedValue("objc_autoreleaseReturnValue") ||
+    M.getNamedValue("objc_autoreleasePoolPush") ||
+    M.getNamedValue("objc_loadWeakRetained") ||
+    M.getNamedValue("objc_loadWeak") ||
+    M.getNamedValue("objc_destroyWeak") ||
+    M.getNamedValue("objc_storeWeak") ||
+    M.getNamedValue("objc_initWeak") ||
+    M.getNamedValue("objc_moveWeak") ||
+    M.getNamedValue("objc_copyWeak") ||
+    M.getNamedValue("objc_retainedObject") ||
+    M.getNamedValue("objc_unretainedObject") ||
+    M.getNamedValue("objc_unretainedPointer") ||
+    M.getNamedValue("clang.arc.use");
+}
+
+/// \brief This is a wrapper around getUnderlyingObject which also knows how to
+/// look through objc_retain and objc_autorelease calls, which we know to return
+/// their argument verbatim.
+inline const Value *GetUnderlyingObjCPtr(const Value *V,
+                                                const DataLayout &DL) {
+  for (;;) {
+    V = GetUnderlyingObject(V, DL);
+    if (!IsForwarding(GetBasicARCInstKind(V)))
+      break;
+    V = cast<CallInst>(V)->getArgOperand(0);
+  }
+
+  return V;
+}
+
+/// A wrapper for GetUnderlyingObjCPtr used for results memoization.
+inline const Value *
+GetUnderlyingObjCPtrCached(const Value *V, const DataLayout &DL,
+                           DenseMap<const Value *, const Value *> &Cache) {
+  if (auto InCache = Cache.lookup(V))
+    return InCache;
+
+  return Cache[V] = GetUnderlyingObjCPtr(V, DL);
+}
+
+/// The RCIdentity root of a value \p V is a dominating value U for which
+/// retaining or releasing U is equivalent to retaining or releasing V. In other
+/// words, ARC operations on \p V are equivalent to ARC operations on \p U.
+///
+/// We use this in the ARC optimizer to make it easier to match up ARC
+/// operations by always mapping ARC operations to RCIdentityRoots instead of
+/// pointers themselves.
+///
+/// The two ways that we see RCIdentical values in ObjC are via:
+///
+///   1. PointerCasts
+///   2. Forwarding Calls that return their argument verbatim.
+///
+/// Thus this function strips off pointer casts and forwarding calls. *NOTE*
+/// This implies that two RCIdentical values must alias.
+inline const Value *GetRCIdentityRoot(const Value *V) {
+  for (;;) {
+    V = V->stripPointerCasts();
+    if (!IsForwarding(GetBasicARCInstKind(V)))
+      break;
+    V = cast<CallInst>(V)->getArgOperand(0);
+  }
+  return V;
+}
+
+/// Helper which calls const Value *GetRCIdentityRoot(const Value *V) and just
+/// casts away the const of the result. For documentation about what an
+/// RCIdentityRoot (and by extension GetRCIdentityRoot is) look at that
+/// function.
+inline Value *GetRCIdentityRoot(Value *V) {
+  return const_cast<Value *>(GetRCIdentityRoot((const Value *)V));
+}
+
+/// \brief Assuming the given instruction is one of the special calls such as
+/// objc_retain or objc_release, return the RCIdentity root of the argument of
+/// the call.
+inline Value *GetArgRCIdentityRoot(Value *Inst) {
+  return GetRCIdentityRoot(cast<CallInst>(Inst)->getArgOperand(0));
+}
+
+inline bool IsNullOrUndef(const Value *V) {
+  return isa<ConstantPointerNull>(V) || isa<UndefValue>(V);
+}
+
+inline bool IsNoopInstruction(const Instruction *I) {
+  return isa<BitCastInst>(I) ||
+    (isa<GetElementPtrInst>(I) &&
+     cast<GetElementPtrInst>(I)->hasAllZeroIndices());
+}
+
+/// \brief Test whether the given value is possible a retainable object pointer.
+inline bool IsPotentialRetainableObjPtr(const Value *Op) {
+  // Pointers to static or stack storage are not valid retainable object
+  // pointers.
+  if (isa<Constant>(Op) || isa<AllocaInst>(Op))
+    return false;
+  // Special arguments can not be a valid retainable object pointer.
+  if (const Argument *Arg = dyn_cast<Argument>(Op))
+    if (Arg->hasByValAttr() ||
+        Arg->hasInAllocaAttr() ||
+        Arg->hasNestAttr() ||
+        Arg->hasStructRetAttr())
+      return false;
+  // Only consider values with pointer types.
+  //
+  // It seemes intuitive to exclude function pointer types as well, since
+  // functions are never retainable object pointers, however clang occasionally
+  // bitcasts retainable object pointers to function-pointer type temporarily.
+  PointerType *Ty = dyn_cast<PointerType>(Op->getType());
+  if (!Ty)
+    return false;
+  // Conservatively assume anything else is a potential retainable object
+  // pointer.
+  return true;
+}
+
+inline bool IsPotentialRetainableObjPtr(const Value *Op,
+                                               AliasAnalysis &AA) {
+  // First make the rudimentary check.
+  if (!IsPotentialRetainableObjPtr(Op))
+    return false;
+
+  // Objects in constant memory are not reference-counted.
+  if (AA.pointsToConstantMemory(Op))
+    return false;
+
+  // Pointers in constant memory are not pointing to reference-counted objects.
+  if (const LoadInst *LI = dyn_cast<LoadInst>(Op))
+    if (AA.pointsToConstantMemory(LI->getPointerOperand()))
+      return false;
+
+  // Otherwise assume the worst.
+  return true;
+}
+
+/// \brief Helper for GetARCInstKind. Determines what kind of construct CS
+/// is.
+inline ARCInstKind GetCallSiteClass(ImmutableCallSite CS) {
+  for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
+       I != E; ++I)
+    if (IsPotentialRetainableObjPtr(*I))
+      return CS.onlyReadsMemory() ? ARCInstKind::User : ARCInstKind::CallOrUser;
+
+  return CS.onlyReadsMemory() ? ARCInstKind::None : ARCInstKind::Call;
+}
+
+/// \brief Return true if this value refers to a distinct and identifiable
+/// object.
+///
+/// This is similar to AliasAnalysis's isIdentifiedObject, except that it uses
+/// special knowledge of ObjC conventions.
+inline bool IsObjCIdentifiedObject(const Value *V) {
+  // Assume that call results and arguments have their own "provenance".
+  // Constants (including GlobalVariables) and Allocas are never
+  // reference-counted.
+  if (isa<CallInst>(V) || isa<InvokeInst>(V) ||
+      isa<Argument>(V) || isa<Constant>(V) ||
+      isa<AllocaInst>(V))
+    return true;
+
+  if (const LoadInst *LI = dyn_cast<LoadInst>(V)) {
+    const Value *Pointer =
+      GetRCIdentityRoot(LI->getPointerOperand());
+    if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Pointer)) {
+      // A constant pointer can't be pointing to an object on the heap. It may
+      // be reference-counted, but it won't be deleted.
+      if (GV->isConstant())
+        return true;
+      StringRef Name = GV->getName();
+      // These special variables are known to hold values which are not
+      // reference-counted pointers.
+      if (Name.startswith("\01l_objc_msgSend_fixup_"))
+        return true;
+
+      StringRef Section = GV->getSection();
+      if (Section.find("__message_refs") != StringRef::npos ||
+          Section.find("__objc_classrefs") != StringRef::npos ||
+          Section.find("__objc_superrefs") != StringRef::npos ||
+          Section.find("__objc_methname") != StringRef::npos ||
+          Section.find("__cstring") != StringRef::npos)
+        return true;
+    }
+  }
+
+  return false;
+}
+
+enum class ARCMDKindID {
+  ImpreciseRelease,
+  CopyOnEscape,
+  NoObjCARCExceptions,
+};
+
+/// A cache of MDKinds used by various ARC optimizations.
+class ARCMDKindCache {
+  Module *M;
+
+  /// The Metadata Kind for clang.imprecise_release metadata.
+  llvm::Optional<unsigned> ImpreciseReleaseMDKind;
+
+  /// The Metadata Kind for clang.arc.copy_on_escape metadata.
+  llvm::Optional<unsigned> CopyOnEscapeMDKind;
+
+  /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
+  llvm::Optional<unsigned> NoObjCARCExceptionsMDKind;
+
+public:
+  void init(Module *Mod) {
+    M = Mod;
+    ImpreciseReleaseMDKind = NoneType::None;
+    CopyOnEscapeMDKind = NoneType::None;
+    NoObjCARCExceptionsMDKind = NoneType::None;
+  }
+
+  unsigned get(ARCMDKindID ID) {
+    switch (ID) {
+    case ARCMDKindID::ImpreciseRelease:
+      if (!ImpreciseReleaseMDKind)
+        ImpreciseReleaseMDKind =
+            M->getContext().getMDKindID("clang.imprecise_release");
+      return *ImpreciseReleaseMDKind;
+    case ARCMDKindID::CopyOnEscape:
+      if (!CopyOnEscapeMDKind)
+        CopyOnEscapeMDKind =
+            M->getContext().getMDKindID("clang.arc.copy_on_escape");
+      return *CopyOnEscapeMDKind;
+    case ARCMDKindID::NoObjCARCExceptions:
+      if (!NoObjCARCExceptionsMDKind)
+        NoObjCARCExceptionsMDKind =
+            M->getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
+      return *NoObjCARCExceptionsMDKind;
+    }
+    llvm_unreachable("Covered switch isn't covered?!");
+  }
+};
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h b/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h
new file mode 100644
index 0000000..02ff035
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h
@@ -0,0 +1,124 @@
+//===- ObjCARCInstKind.h - ARC instruction equivalence classes --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_OBJCARCINSTKIND_H
+#define LLVM_ANALYSIS_OBJCARCINSTKIND_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+namespace objcarc {
+
+/// \enum ARCInstKind
+///
+/// \brief Equivalence classes of instructions in the ARC Model.
+///
+/// Since we do not have "instructions" to represent ARC concepts in LLVM IR,
+/// we instead operate on equivalence classes of instructions.
+///
+/// TODO: This should be split into two enums: a runtime entry point enum
+/// (possibly united with the ARCRuntimeEntrypoint class) and an enum that deals
+/// with effects of instructions in the ARC model (which would handle the notion
+/// of a User or CallOrUser).
+enum class ARCInstKind {
+  Retain,                   ///< objc_retain
+  RetainRV,                 ///< objc_retainAutoreleasedReturnValue
+  ClaimRV,                  ///< objc_unsafeClaimAutoreleasedReturnValue
+  RetainBlock,              ///< objc_retainBlock
+  Release,                  ///< objc_release
+  Autorelease,              ///< objc_autorelease
+  AutoreleaseRV,            ///< objc_autoreleaseReturnValue
+  AutoreleasepoolPush,      ///< objc_autoreleasePoolPush
+  AutoreleasepoolPop,       ///< objc_autoreleasePoolPop
+  NoopCast,                 ///< objc_retainedObject, etc.
+  FusedRetainAutorelease,   ///< objc_retainAutorelease
+  FusedRetainAutoreleaseRV, ///< objc_retainAutoreleaseReturnValue
+  LoadWeakRetained,         ///< objc_loadWeakRetained (primitive)
+  StoreWeak,                ///< objc_storeWeak (primitive)
+  InitWeak,                 ///< objc_initWeak (derived)
+  LoadWeak,                 ///< objc_loadWeak (derived)
+  MoveWeak,                 ///< objc_moveWeak (derived)
+  CopyWeak,                 ///< objc_copyWeak (derived)
+  DestroyWeak,              ///< objc_destroyWeak (derived)
+  StoreStrong,              ///< objc_storeStrong (derived)
+  IntrinsicUser,            ///< clang.arc.use
+  CallOrUser,               ///< could call objc_release and/or "use" pointers
+  Call,                     ///< could call objc_release
+  User,                     ///< could "use" a pointer
+  None                      ///< anything that is inert from an ARC perspective.
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ARCInstKind Class);
+
+/// \brief Test if the given class is a kind of user.
+bool IsUser(ARCInstKind Class);
+
+/// \brief Test if the given class is objc_retain or equivalent.
+bool IsRetain(ARCInstKind Class);
+
+/// \brief Test if the given class is objc_autorelease or equivalent.
+bool IsAutorelease(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which return their
+/// argument verbatim.
+bool IsForwarding(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which do nothing if
+/// passed a null pointer.
+bool IsNoopOnNull(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which are always safe
+/// to mark with the "tail" keyword.
+bool IsAlwaysTail(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which are never safe
+/// to mark with the "tail" keyword.
+bool IsNeverTail(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which are always safe
+/// to mark with the nounwind attribute.
+bool IsNoThrow(ARCInstKind Class);
+
+/// Test whether the given instruction can autorelease any pointer or cause an
+/// autoreleasepool pop.
+bool CanInterruptRV(ARCInstKind Class);
+
+/// \brief Determine if F is one of the special known Functions.  If it isn't,
+/// return ARCInstKind::CallOrUser.
+ARCInstKind GetFunctionClass(const Function *F);
+
+/// \brief Determine which objc runtime call instruction class V belongs to.
+///
+/// This is similar to GetARCInstKind except that it only detects objc
+/// runtime calls. This allows it to be faster.
+///
+inline ARCInstKind GetBasicARCInstKind(const Value *V) {
+  if (const CallInst *CI = dyn_cast<CallInst>(V)) {
+    if (const Function *F = CI->getCalledFunction())
+      return GetFunctionClass(F);
+    // Otherwise, be conservative.
+    return ARCInstKind::CallOrUser;
+  }
+
+  // Otherwise, be conservative.
+  return isa<InvokeInst>(V) ? ARCInstKind::CallOrUser : ARCInstKind::User;
+}
+
+/// Map V to its ARCInstKind equivalence class.
+ARCInstKind GetARCInstKind(const Value *V);
+
+/// Returns false if conservatively we can prove that any instruction mapped to
+/// this kind can not decrement ref counts. Returns true otherwise.
+bool CanDecrementRefCount(ARCInstKind Kind);
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h b/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h
new file mode 100644
index 0000000..26f32ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h
@@ -0,0 +1,168 @@
+//===- OptimizationRemarkEmitter.h - Optimization Diagnostic ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Optimization diagnostic interfaces.  It's packaged as an analysis pass so
+// that by using this service passes become dependent on BFI as well.  BFI is
+// used to compute the "hotness" of the diagnostic message.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
+#define LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class DebugLoc;
+class Loop;
+class Pass;
+class Twine;
+class Value;
+
+/// The optimization diagnostic interface.
+///
+/// It allows reporting when optimizations are performed and when they are not
+/// along with the reasons for it.  Hotness information of the corresponding
+/// code region can be included in the remark if DiagnosticsHotnessRequested is
+/// enabled in the LLVM context.
+class OptimizationRemarkEmitter {
+public:
+  OptimizationRemarkEmitter(const Function *F, BlockFrequencyInfo *BFI)
+      : F(F), BFI(BFI) {}
+
+  /// \brief This variant can be used to generate ORE on demand (without the
+  /// analysis pass).
+  ///
+  /// Note that this ctor has a very different cost depending on whether
+  /// F->getContext().getDiagnosticsHotnessRequested() is on or not.  If it's off
+  /// the operation is free.
+  ///
+  /// Whereas if DiagnosticsHotnessRequested is on, it is fairly expensive
+  /// operation since BFI and all its required analyses are computed.  This is
+  /// for example useful for CGSCC passes that can't use function analyses
+  /// passes in the old PM.
+  OptimizationRemarkEmitter(const Function *F);
+
+  OptimizationRemarkEmitter(OptimizationRemarkEmitter &&Arg)
+      : F(Arg.F), BFI(Arg.BFI) {}
+
+  OptimizationRemarkEmitter &operator=(OptimizationRemarkEmitter &&RHS) {
+    F = RHS.F;
+    BFI = RHS.BFI;
+    return *this;
+  }
+
+  /// Handle invalidation events in the new pass manager.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+
+  /// \brief Output the remark via the diagnostic handler and to the
+  /// optimization record file.
+  void emit(DiagnosticInfoOptimizationBase &OptDiag);
+
+  /// \brief Take a lambda that returns a remark which will be emitted.  Second
+  /// argument is only used to restrict this to functions.
+  template <typename T>
+  void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
+    // Avoid building the remark unless we know there are at least *some*
+    // remarks enabled. We can't currently check whether remarks are requested
+    // for the calling pass since that requires actually building the remark.
+
+    if (F->getContext().getDiagnosticsOutputFile() ||
+        F->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
+      auto R = RemarkBuilder();
+      emit((DiagnosticInfoOptimizationBase &)R);
+    }
+  }
+
+  /// \brief Whether we allow for extra compile-time budget to perform more
+  /// analysis to produce fewer false positives.
+  ///
+  /// This is useful when reporting missed optimizations.  In this case we can
+  /// use the extra analysis (1) to filter trivial false positives or (2) to
+  /// provide more context so that non-trivial false positives can be quickly
+  /// detected by the user.
+  bool allowExtraAnalysis(StringRef PassName) const {
+    return (F->getContext().getDiagnosticsOutputFile() ||
+            F->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
+  }
+
+private:
+  const Function *F;
+
+  BlockFrequencyInfo *BFI;
+
+  /// If we generate BFI on demand, we need to free it when ORE is freed.
+  std::unique_ptr<BlockFrequencyInfo> OwnedBFI;
+
+  /// Compute hotness from IR value (currently assumed to be a block) if PGO is
+  /// available.
+  Optional<uint64_t> computeHotness(const Value *V);
+
+  /// Similar but use value from \p OptDiag and update hotness there.
+  void computeHotness(DiagnosticInfoIROptimization &OptDiag);
+
+  /// \brief Only allow verbose messages if we know we're filtering by hotness
+  /// (BFI is only set in this case).
+  bool shouldEmitVerbose() { return BFI != nullptr; }
+
+  OptimizationRemarkEmitter(const OptimizationRemarkEmitter &) = delete;
+  void operator=(const OptimizationRemarkEmitter &) = delete;
+};
+
+/// \brief Add a small namespace to avoid name clashes with the classes used in
+/// the streaming interface.  We want these to be short for better
+/// write/readability.
+namespace ore {
+using NV = DiagnosticInfoOptimizationBase::Argument;
+using setIsVerbose = DiagnosticInfoOptimizationBase::setIsVerbose;
+using setExtraArgs = DiagnosticInfoOptimizationBase::setExtraArgs;
+}
+
+/// OptimizationRemarkEmitter legacy analysis pass
+///
+/// Note that this pass shouldn't generally be marked as preserved by other
+/// passes.  It's holding onto BFI, so if the pass does not preserve BFI, BFI
+/// could be freed.
+class OptimizationRemarkEmitterWrapperPass : public FunctionPass {
+  std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+public:
+  OptimizationRemarkEmitterWrapperPass();
+
+  bool runOnFunction(Function &F) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  OptimizationRemarkEmitter &getORE() {
+    assert(ORE && "pass not run yet");
+    return *ORE;
+  }
+
+  static char ID;
+};
+
+class OptimizationRemarkEmitterAnalysis
+    : public AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis> {
+  friend AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis>;
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result typedef for this analysis pass.
+  typedef OptimizationRemarkEmitter Result;
+
+  /// \brief Run the analysis pass over a function and produce BFI.
+  Result run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+#endif // LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
diff --git a/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h b/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h
new file mode 100644
index 0000000..2e716af
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h
@@ -0,0 +1,67 @@
+//===- llvm/Analysis/OrderedBasicBlock.h --------------------- -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OrderedBasicBlock class. OrderedBasicBlock maintains
+// an interface where clients can query if one instruction comes before another
+// in a BasicBlock. Since BasicBlock currently lacks a reliable way to query
+// relative position between instructions one can use OrderedBasicBlock to do
+// such queries. OrderedBasicBlock is lazily built on a source BasicBlock and
+// maintains an internal Instruction -> Position map. A OrderedBasicBlock
+// instance should be discarded whenever the source BasicBlock changes.
+//
+// It's currently used by the CaptureTracker in order to find relative
+// positions of a pair of instructions inside a BasicBlock.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ORDEREDBASICBLOCK_H
+#define LLVM_ANALYSIS_ORDEREDBASICBLOCK_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/BasicBlock.h"
+
+namespace llvm {
+
+class Instruction;
+class BasicBlock;
+
+class OrderedBasicBlock {
+private:
+  /// \brief Map a instruction to its position in a BasicBlock.
+  SmallDenseMap<const Instruction *, unsigned, 32> NumberedInsts;
+
+  /// \brief Keep track of last instruction inserted into \p NumberedInsts.
+  /// It speeds up queries for uncached instructions by providing a start point
+  /// for new queries in OrderedBasicBlock::comesBefore.
+  BasicBlock::const_iterator LastInstFound;
+
+  /// \brief The position/number to tag the next instruction to be found.
+  unsigned NextInstPos;
+
+  /// \brief The source BasicBlock to map.
+  const BasicBlock *BB;
+
+  /// \brief Given no cached results, find if \p A comes before \p B in \p BB.
+  /// Cache and number out instruction while walking \p BB.
+  bool comesBefore(const Instruction *A, const Instruction *B);
+
+public:
+  OrderedBasicBlock(const BasicBlock *BasicB);
+
+  /// \brief Find out whether \p A dominates \p B, meaning whether \p A
+  /// comes before \p B in \p BB. This is a simplification that considers
+  /// cached instruction positions and ignores other basic blocks, being
+  /// only relevant to compare relative instructions positions inside \p BB.
+  /// Returns false for A == B.
+  bool dominates(const Instruction *A, const Instruction *B);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h b/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h
new file mode 100644
index 0000000..f0f34f3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h
@@ -0,0 +1,127 @@
+//===- PHITransAddr.h - PHI Translation for Addresses -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the PHITransAddr class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PHITRANSADDR_H
+#define LLVM_ANALYSIS_PHITRANSADDR_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+  class AssumptionCache;
+  class DominatorTree;
+  class DataLayout;
+  class TargetLibraryInfo;
+
+/// PHITransAddr - An address value which tracks and handles phi translation.
+/// As we walk "up" the CFG through predecessors, we need to ensure that the
+/// address we're tracking is kept up to date.  For example, if we're analyzing
+/// an address of "&A[i]" and walk through the definition of 'i' which is a PHI
+/// node, we *must* phi translate i to get "&A[j]" or else we will analyze an
+/// incorrect pointer in the predecessor block.
+///
+/// This is designed to be a relatively small object that lives on the stack and
+/// is copyable.
+///
+class PHITransAddr {
+  /// Addr - The actual address we're analyzing.
+  Value *Addr;
+
+  /// The DataLayout we are playing with.
+  const DataLayout &DL;
+
+  /// TLI - The target library info if known, otherwise null.
+  const TargetLibraryInfo *TLI;
+
+  /// A cache of @llvm.assume calls used by SimplifyInstruction.
+  AssumptionCache *AC;
+
+  /// InstInputs - The inputs for our symbolic address.
+  SmallVector<Instruction*, 4> InstInputs;
+
+public:
+  PHITransAddr(Value *addr, const DataLayout &DL, AssumptionCache *AC)
+      : Addr(addr), DL(DL), TLI(nullptr), AC(AC) {
+    // If the address is an instruction, the whole thing is considered an input.
+    if (Instruction *I = dyn_cast<Instruction>(Addr))
+      InstInputs.push_back(I);
+  }
+
+  Value *getAddr() const { return Addr; }
+
+  /// NeedsPHITranslationFromBlock - Return true if moving from the specified
+  /// BasicBlock to its predecessors requires PHI translation.
+  bool NeedsPHITranslationFromBlock(BasicBlock *BB) const {
+    // We do need translation if one of our input instructions is defined in
+    // this block.
+    for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
+      if (InstInputs[i]->getParent() == BB)
+        return true;
+    return false;
+  }
+
+  /// IsPotentiallyPHITranslatable - If this needs PHI translation, return true
+  /// if we have some hope of doing it.  This should be used as a filter to
+  /// avoid calling PHITranslateValue in hopeless situations.
+  bool IsPotentiallyPHITranslatable() const;
+
+  /// PHITranslateValue - PHI translate the current address up the CFG from
+  /// CurBB to Pred, updating our state to reflect any needed changes.  If
+  /// 'MustDominate' is true, the translated value must dominate
+  /// PredBB.  This returns true on failure and sets Addr to null.
+  bool PHITranslateValue(BasicBlock *CurBB, BasicBlock *PredBB,
+                         const DominatorTree *DT, bool MustDominate);
+
+  /// PHITranslateWithInsertion - PHI translate this value into the specified
+  /// predecessor block, inserting a computation of the value if it is
+  /// unavailable.
+  ///
+  /// All newly created instructions are added to the NewInsts list.  This
+  /// returns null on failure.
+  ///
+  Value *PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
+                                   const DominatorTree &DT,
+                                   SmallVectorImpl<Instruction *> &NewInsts);
+
+  void dump() const;
+
+  /// Verify - Check internal consistency of this data structure.  If the
+  /// structure is valid, it returns true.  If invalid, it prints errors and
+  /// returns false.
+  bool Verify() const;
+
+private:
+  Value *PHITranslateSubExpr(Value *V, BasicBlock *CurBB, BasicBlock *PredBB,
+                             const DominatorTree *DT);
+
+  /// InsertPHITranslatedSubExpr - Insert a computation of the PHI translated
+  /// version of 'V' for the edge PredBB->CurBB into the end of the PredBB
+  /// block.  All newly created instructions are added to the NewInsts list.
+  /// This returns null on failure.
+  ///
+  Value *InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
+                                    BasicBlock *PredBB, const DominatorTree &DT,
+                                    SmallVectorImpl<Instruction *> &NewInsts);
+
+  /// AddAsInput - If the specified value is an instruction, add it as an input.
+  Value *AddAsInput(Value *V) {
+    // If V is an instruction, it is now an input.
+    if (Instruction *VI = dyn_cast<Instruction>(V))
+      InstInputs.push_back(VI);
+    return V;
+  }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/Passes.h b/linux-x64/clang/include/llvm/Analysis/Passes.h
new file mode 100644
index 0000000..09b28a0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/Passes.h
@@ -0,0 +1,109 @@
+//===-- llvm/Analysis/Passes.h - Constructors for analyses ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the analysis libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PASSES_H
+#define LLVM_ANALYSIS_PASSES_H
+
+namespace llvm {
+  class FunctionPass;
+  class ImmutablePass;
+  class LoopPass;
+  class ModulePass;
+  class Pass;
+  class PassInfo;
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createObjCARCAAWrapperPass - This pass implements ObjC-ARC-based
+  // alias analysis.
+  //
+  ImmutablePass *createObjCARCAAWrapperPass();
+
+  FunctionPass *createPAEvalPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  /// createLazyValueInfoPass - This creates an instance of the LazyValueInfo
+  /// pass.
+  FunctionPass *createLazyValueInfoPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createDependenceAnalysisWrapperPass - This creates an instance of the
+  // DependenceAnalysisWrapper pass.
+  //
+  FunctionPass *createDependenceAnalysisWrapperPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createCostModelAnalysisPass - This creates an instance of the
+  // CostModelAnalysis pass.
+  //
+  FunctionPass *createCostModelAnalysisPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createDelinearizationPass - This pass implements attempts to restore
+  // multidimensional array indices from linearized expressions.
+  //
+  FunctionPass *createDelinearizationPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createDivergenceAnalysisPass - This pass determines which branches in a GPU
+  // program are divergent.
+  //
+  FunctionPass *createDivergenceAnalysisPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // Minor pass prototypes, allowing us to expose them through bugpoint and
+  // analyze.
+  FunctionPass *createInstCountPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createRegionInfoPass - This pass finds all single entry single exit regions
+  // in a function and builds the region hierarchy.
+  //
+  FunctionPass *createRegionInfoPass();
+
+  // Print module-level debug info metadata in human-readable form.
+  ModulePass *createModuleDebugInfoPrinterPass();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createMemDepPrinter - This pass exhaustively collects all memdep
+  // information and prints it with -analyze.
+  //
+  FunctionPass *createMemDepPrinter();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createMemDerefPrinter - This pass collects memory dereferenceability
+  // information and prints it with -analyze.
+  //
+  FunctionPass *createMemDerefPrinter();
+
+  //===--------------------------------------------------------------------===//
+  //
+  // createMustExecutePrinter - This pass collects information about which
+  // instructions within a loop are guaranteed to execute if the loop header is
+  // entered and prints it with -analyze.
+  //
+  FunctionPass *createMustExecutePrinter();
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/PostDominators.h b/linux-x64/clang/include/llvm/Analysis/PostDominators.h
new file mode 100644
index 0000000..9a8c4d7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/PostDominators.h
@@ -0,0 +1,114 @@
+//=- llvm/Analysis/PostDominators.h - Post Dominator Calculation --*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes interfaces to post dominance information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_POSTDOMINATORS_H
+#define LLVM_ANALYSIS_POSTDOMINATORS_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class Function;
+class raw_ostream;
+
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to
+/// compute the post-dominator tree.
+class PostDominatorTree : public PostDomTreeBase<BasicBlock> {
+public:
+  using Base = PostDomTreeBase<BasicBlock>;
+
+  /// Handle invalidation explicitly.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &);
+};
+
+/// \brief Analysis pass which computes a \c PostDominatorTree.
+class PostDominatorTreeAnalysis
+    : public AnalysisInfoMixin<PostDominatorTreeAnalysis> {
+  friend AnalysisInfoMixin<PostDominatorTreeAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result type for this analysis pass.
+  using Result = PostDominatorTree;
+
+  /// \brief Run the analysis pass over a function and produce a post dominator
+  ///        tree.
+  PostDominatorTree run(Function &F, FunctionAnalysisManager &);
+};
+
+/// \brief Printer pass for the \c PostDominatorTree.
+class PostDominatorTreePrinterPass
+    : public PassInfoMixin<PostDominatorTreePrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit PostDominatorTreePrinterPass(raw_ostream &OS);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+struct PostDominatorTreeWrapperPass : public FunctionPass {
+  static char ID; // Pass identification, replacement for typeid
+
+  PostDominatorTree DT;
+
+  PostDominatorTreeWrapperPass() : FunctionPass(ID) {
+    initializePostDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  PostDominatorTree &getPostDomTree() { return DT; }
+  const PostDominatorTree &getPostDomTree() const { return DT; }
+
+  bool runOnFunction(Function &F) override;
+
+  void verifyAnalysis() const override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  void releaseMemory() override {
+    DT.releaseMemory();
+  }
+
+  void print(raw_ostream &OS, const Module*) const override;
+};
+
+FunctionPass* createPostDomTree();
+
+template <> struct GraphTraits<PostDominatorTree*>
+  : public GraphTraits<DomTreeNode*> {
+  static NodeRef getEntryNode(PostDominatorTree *DT) {
+    return DT->getRootNode();
+  }
+
+  static nodes_iterator nodes_begin(PostDominatorTree *N) {
+    if (getEntryNode(N))
+      return df_begin(getEntryNode(N));
+    else
+      return df_end(getEntryNode(N));
+  }
+
+  static nodes_iterator nodes_end(PostDominatorTree *N) {
+    return df_end(getEntryNode(N));
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_POSTDOMINATORS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h b/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h
new file mode 100644
index 0000000..2930334
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -0,0 +1,167 @@
+//===- llvm/Analysis/ProfileSummaryInfo.h - profile summary ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that provides access to profile summary
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PROFILE_SUMMARY_INFO_H
+#define LLVM_ANALYSIS_PROFILE_SUMMARY_INFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+class BasicBlock;
+class BlockFrequencyInfo;
+class CallSite;
+class ProfileSummary;
+/// \brief Analysis providing profile information.
+///
+/// This is an immutable analysis pass that provides ability to query global
+/// (program-level) profile information. The main APIs are isHotCount and
+/// isColdCount that tells whether a given profile count is considered hot/cold
+/// based on the profile summary. This also provides convenience methods to
+/// check whether a function is hot or cold.
+
+// FIXME: Provide convenience methods to determine hotness/coldness of other IR
+// units. This would require making this depend on BFI.
+class ProfileSummaryInfo {
+private:
+  Module &M;
+  std::unique_ptr<ProfileSummary> Summary;
+  bool computeSummary();
+  void computeThresholds();
+  // Count thresholds to answer isHotCount and isColdCount queries.
+  Optional<uint64_t> HotCountThreshold, ColdCountThreshold;
+  // True if the working set size of the code is considered huge,
+  // because the number of profile counts required to reach the hot
+  // percentile is above a huge threshold.
+  Optional<bool> HasHugeWorkingSetSize;
+
+public:
+  ProfileSummaryInfo(Module &M) : M(M) {}
+  ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
+      : M(Arg.M), Summary(std::move(Arg.Summary)) {}
+
+  /// \brief Returns true if profile summary is available.
+  bool hasProfileSummary() { return computeSummary(); }
+
+  /// \brief Returns true if module \c M has sample profile.
+  bool hasSampleProfile() {
+    return hasProfileSummary() &&
+           Summary->getKind() == ProfileSummary::PSK_Sample;
+  }
+
+  /// \brief Returns true if module \c M has instrumentation profile.
+  bool hasInstrumentationProfile() {
+    return hasProfileSummary() &&
+           Summary->getKind() == ProfileSummary::PSK_Instr;
+  }
+
+  /// Handle the invalidation of this information.
+  ///
+  /// When used as a result of \c ProfileSummaryAnalysis this method will be
+  /// called when the module this was computed for changes. Since profile
+  /// summary is immutable after it is annotated on the module, we return false
+  /// here.
+  bool invalidate(Module &, const PreservedAnalyses &,
+                  ModuleAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  /// Returns the profile count for \p CallInst.
+  Optional<uint64_t> getProfileCount(const Instruction *CallInst,
+                                     BlockFrequencyInfo *BFI);
+  /// Returns true if the working set size of the code is considered huge.
+  bool hasHugeWorkingSetSize();
+  /// \brief Returns true if \p F has hot function entry.
+  bool isFunctionEntryHot(const Function *F);
+  /// Returns true if \p F contains hot code.
+  bool isFunctionHotInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
+  /// \brief Returns true if \p F has cold function entry.
+  bool isFunctionEntryCold(const Function *F);
+  /// Returns true if \p F contains only cold code.
+  bool isFunctionColdInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
+  /// \brief Returns true if \p F is a hot function.
+  bool isHotCount(uint64_t C);
+  /// \brief Returns true if count \p C is considered cold.
+  bool isColdCount(uint64_t C);
+  /// \brief Returns true if BasicBlock \p B is considered hot.
+  bool isHotBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
+  /// \brief Returns true if BasicBlock \p B is considered cold.
+  bool isColdBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
+  /// \brief Returns true if CallSite \p CS is considered hot.
+  bool isHotCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
+  /// \brief Returns true if Callsite \p CS is considered cold.
+  bool isColdCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
+  /// \brief Returns HotCountThreshold if set.
+  uint64_t getHotCountThreshold() {
+    return HotCountThreshold ? HotCountThreshold.getValue() : 0;
+  }
+  /// \brief Returns ColdCountThreshold if set.
+  uint64_t getColdCountThreshold() {
+    return ColdCountThreshold ? ColdCountThreshold.getValue() : 0;
+  }
+};
+
+/// An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
+class ProfileSummaryInfoWrapperPass : public ImmutablePass {
+  std::unique_ptr<ProfileSummaryInfo> PSI;
+
+public:
+  static char ID;
+  ProfileSummaryInfoWrapperPass();
+
+  ProfileSummaryInfo *getPSI() {
+    return &*PSI;
+  }
+
+  bool doInitialization(Module &M) override;
+  bool doFinalization(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+};
+
+/// An analysis pass based on the new PM to deliver ProfileSummaryInfo.
+class ProfileSummaryAnalysis
+    : public AnalysisInfoMixin<ProfileSummaryAnalysis> {
+public:
+  typedef ProfileSummaryInfo Result;
+
+  Result run(Module &M, ModuleAnalysisManager &);
+
+private:
+  friend AnalysisInfoMixin<ProfileSummaryAnalysis>;
+  static AnalysisKey Key;
+};
+
+/// \brief Printer pass that uses \c ProfileSummaryAnalysis.
+class ProfileSummaryPrinterPass
+    : public PassInfoMixin<ProfileSummaryPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit ProfileSummaryPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h b/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h
new file mode 100644
index 0000000..f934aa6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h
@@ -0,0 +1,303 @@
+//===- PtrUseVisitor.h - InstVisitors over a pointers uses ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides a collection of visitors which walk the (instruction)
+/// uses of a pointer. These visitors all provide the same essential behavior
+/// as an InstVisitor with similar template-based flexibility and
+/// implementation strategies.
+///
+/// These can be used, for example, to quickly analyze the uses of an alloca,
+/// global variable, or function argument.
+///
+/// FIXME: Provide a variant which doesn't track offsets and is cheaper.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PTRUSEVISITOR_H
+#define LLVM_ANALYSIS_PTRUSEVISITOR_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <type_traits>
+
+namespace llvm {
+
+namespace detail {
+
+/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
+///
+/// See \c PtrUseVisitor for the public interface and detailed comments about
+/// usage. This class is just a helper base class which is not templated and
+/// contains all common code to be shared between different instantiations of
+/// PtrUseVisitor.
+class PtrUseVisitorBase {
+public:
+  /// \brief This class provides information about the result of a visit.
+  ///
+  /// After walking all the users (recursively) of a pointer, the basic
+  /// infrastructure records some commonly useful information such as escape
+  /// analysis and whether the visit completed or aborted early.
+  class PtrInfo {
+  public:
+    PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
+
+    /// \brief Reset the pointer info, clearing all state.
+    void reset() {
+      AbortedInfo.setPointer(nullptr);
+      AbortedInfo.setInt(false);
+      EscapedInfo.setPointer(nullptr);
+      EscapedInfo.setInt(false);
+    }
+
+    /// \brief Did we abort the visit early?
+    bool isAborted() const { return AbortedInfo.getInt(); }
+
+    /// \brief Is the pointer escaped at some point?
+    bool isEscaped() const { return EscapedInfo.getInt(); }
+
+    /// \brief Get the instruction causing the visit to abort.
+    /// \returns a pointer to the instruction causing the abort if one is
+    /// available; otherwise returns null.
+    Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
+
+    /// \brief Get the instruction causing the pointer to escape.
+    /// \returns a pointer to the instruction which escapes the pointer if one
+    /// is available; otherwise returns null.
+    Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
+
+    /// \brief Mark the visit as aborted. Intended for use in a void return.
+    /// \param I The instruction which caused the visit to abort, if available.
+    void setAborted(Instruction *I = nullptr) {
+      AbortedInfo.setInt(true);
+      AbortedInfo.setPointer(I);
+    }
+
+    /// \brief Mark the pointer as escaped. Intended for use in a void return.
+    /// \param I The instruction which escapes the pointer, if available.
+    void setEscaped(Instruction *I = nullptr) {
+      EscapedInfo.setInt(true);
+      EscapedInfo.setPointer(I);
+    }
+
+    /// \brief Mark the pointer as escaped, and the visit as aborted. Intended
+    /// for use in a void return.
+    /// \param I The instruction which both escapes the pointer and aborts the
+    /// visit, if available.
+    void setEscapedAndAborted(Instruction *I = nullptr) {
+      setEscaped(I);
+      setAborted(I);
+    }
+
+  private:
+    PointerIntPair<Instruction *, 1, bool> AbortedInfo, EscapedInfo;
+  };
+
+protected:
+  const DataLayout &DL;
+
+  /// \name Visitation infrastructure
+  /// @{
+
+  /// \brief The info collected about the pointer being visited thus far.
+  PtrInfo PI;
+
+  /// \brief A struct of the data needed to visit a particular use.
+  ///
+  /// This is used to maintain a worklist fo to-visit uses. This is used to
+  /// make the visit be iterative rather than recursive.
+  struct UseToVisit {
+    using UseAndIsOffsetKnownPair = PointerIntPair<Use *, 1, bool>;
+
+    UseAndIsOffsetKnownPair UseAndIsOffsetKnown;
+    APInt Offset;
+  };
+
+  /// \brief The worklist of to-visit uses.
+  SmallVector<UseToVisit, 8> Worklist;
+
+  /// \brief A set of visited uses to break cycles in unreachable code.
+  SmallPtrSet<Use *, 8> VisitedUses;
+
+  /// @}
+
+  /// \name Per-visit state
+  /// This state is reset for each instruction visited.
+  /// @{
+
+  /// \brief The use currently being visited.
+  Use *U;
+
+  /// \brief True if we have a known constant offset for the use currently
+  /// being visited.
+  bool IsOffsetKnown;
+
+  /// \brief The constant offset of the use if that is known.
+  APInt Offset;
+
+  /// @}
+
+  /// Note that the constructor is protected because this class must be a base
+  /// class, we can't create instances directly of this class.
+  PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
+
+  /// \brief Enqueue the users of this instruction in the visit worklist.
+  ///
+  /// This will visit the users with the same offset of the current visit
+  /// (including an unknown offset if that is the current state).
+  void enqueueUsers(Instruction &I);
+
+  /// \brief Walk the operands of a GEP and adjust the offset as appropriate.
+  ///
+  /// This routine does the heavy lifting of the pointer walk by computing
+  /// offsets and looking through GEPs.
+  bool adjustOffsetForGEP(GetElementPtrInst &GEPI);
+};
+
+} // end namespace detail
+
+/// \brief A base class for visitors over the uses of a pointer value.
+///
+/// Once constructed, a user can call \c visit on a pointer value, and this
+/// will walk its uses and visit each instruction using an InstVisitor. It also
+/// provides visit methods which will recurse through any pointer-to-pointer
+/// transformations such as GEPs and bitcasts.
+///
+/// During the visit, the current Use* being visited is available to the
+/// subclass, as well as the current offset from the original base pointer if
+/// known.
+///
+/// The recursive visit of uses is accomplished with a worklist, so the only
+/// ordering guarantee is that an instruction is visited before any uses of it
+/// are visited. Note that this does *not* mean before any of its users are
+/// visited! This is because users can be visited multiple times due to
+/// multiple, different uses of pointers derived from the same base.
+///
+/// A particular Use will only be visited once, but a User may be visited
+/// multiple times, once per Use. This visits may notably have different
+/// offsets.
+///
+/// All visit methods on the underlying InstVisitor return a boolean. This
+/// return short-circuits the visit, stopping it immediately.
+///
+/// FIXME: Generalize this for all values rather than just instructions.
+template <typename DerivedT>
+class PtrUseVisitor : protected InstVisitor<DerivedT>,
+                      public detail::PtrUseVisitorBase {
+  friend class InstVisitor<DerivedT>;
+
+  using Base = InstVisitor<DerivedT>;
+
+public:
+  PtrUseVisitor(const DataLayout &DL) : PtrUseVisitorBase(DL) {
+    static_assert(std::is_base_of<PtrUseVisitor, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+  }
+
+  /// \brief Recursively visit the uses of the given pointer.
+  /// \returns An info struct about the pointer. See \c PtrInfo for details.
+  PtrInfo visitPtr(Instruction &I) {
+    // This must be a pointer type. Get an integer type suitable to hold
+    // offsets on this pointer.
+    // FIXME: Support a vector of pointers.
+    assert(I.getType()->isPointerTy());
+    IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(I.getType()));
+    IsOffsetKnown = true;
+    Offset = APInt(IntPtrTy->getBitWidth(), 0);
+    PI.reset();
+
+    // Enqueue the uses of this pointer.
+    enqueueUsers(I);
+
+    // Visit all the uses off the worklist until it is empty.
+    while (!Worklist.empty()) {
+      UseToVisit ToVisit = Worklist.pop_back_val();
+      U = ToVisit.UseAndIsOffsetKnown.getPointer();
+      IsOffsetKnown = ToVisit.UseAndIsOffsetKnown.getInt();
+      if (IsOffsetKnown)
+        Offset = std::move(ToVisit.Offset);
+
+      Instruction *I = cast<Instruction>(U->getUser());
+      static_cast<DerivedT*>(this)->visit(I);
+      if (PI.isAborted())
+        break;
+    }
+    return PI;
+  }
+
+protected:
+  void visitStoreInst(StoreInst &SI) {
+    if (SI.getValueOperand() == U->get())
+      PI.setEscaped(&SI);
+  }
+
+  void visitBitCastInst(BitCastInst &BC) {
+    enqueueUsers(BC);
+  }
+
+  void visitPtrToIntInst(PtrToIntInst &I) {
+    PI.setEscaped(&I);
+  }
+
+  void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+    if (GEPI.use_empty())
+      return;
+
+    // If we can't walk the GEP, clear the offset.
+    if (!adjustOffsetForGEP(GEPI)) {
+      IsOffsetKnown = false;
+      Offset = APInt();
+    }
+
+    // Enqueue the users now that the offset has been adjusted.
+    enqueueUsers(GEPI);
+  }
+
+  // No-op intrinsics which we know don't escape the pointer to logic in
+  // some other function.
+  void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {}
+  void visitMemIntrinsic(MemIntrinsic &I) {}
+  void visitIntrinsicInst(IntrinsicInst &II) {
+    switch (II.getIntrinsicID()) {
+    default:
+      return Base::visitIntrinsicInst(II);
+
+    case Intrinsic::lifetime_start:
+    case Intrinsic::lifetime_end:
+      return; // No-op intrinsics.
+    }
+  }
+
+  // Generically, arguments to calls and invokes escape the pointer to some
+  // other function. Mark that.
+  void visitCallSite(CallSite CS) {
+    PI.setEscaped(CS.getInstruction());
+    Base::visitCallSite(CS);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_PTRUSEVISITOR_H
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionInfo.h b/linux-x64/clang/include/llvm/Analysis/RegionInfo.h
new file mode 100644
index 0000000..4bf64d1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/RegionInfo.h
@@ -0,0 +1,1032 @@
+//===- RegionInfo.h - SESE region analysis ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Calculate a program structure tree built out of single entry single exit
+// regions.
+// The basic ideas are taken from "The Program Structure Tree - Richard Johnson,
+// David Pearson, Keshav Pingali - 1994", however enriched with ideas from "The
+// Refined Process Structure Tree - Jussi Vanhatalo, Hagen Voelyer, Jana
+// Koehler - 2009".
+// The algorithm to calculate these data structures however is completely
+// different, as it takes advantage of existing information already available
+// in (Post)dominace tree and dominance frontier passes. This leads to a simpler
+// and in practice hopefully better performing algorithm. The runtime of the
+// algorithms described in the papers above are both linear in graph size,
+// O(V+E), whereas this algorithm is not, as the dominance frontier information
+// itself is not, but in practice runtime seems to be in the order of magnitude
+// of dominance tree calculation.
+//
+// WARNING: LLVM is generally very concerned about compile time such that
+//          the use of additional analysis passes in the default
+//          optimization sequence is avoided as much as possible.
+//          Specifically, if you do not need the RegionInfo, but dominance
+//          information could be sufficient please base your work only on
+//          the dominator tree. Most passes maintain it, such that using
+//          it has often near zero cost. In contrast RegionInfo is by
+//          default not available, is not maintained by existing
+//          transformations and there is no intention to do so.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONINFO_H
+#define LLVM_ANALYSIS_REGIONINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+class DominanceFrontier;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class PostDominatorTree;
+class Region;
+template <class RegionTr> class RegionBase;
+class RegionInfo;
+template <class RegionTr> class RegionInfoBase;
+class RegionNode;
+
+// Class to be specialized for different users of RegionInfo
+// (i.e. BasicBlocks or MachineBasicBlocks). This is only to avoid needing to
+// pass around an unreasonable number of template parameters.
+template <class FuncT_>
+struct RegionTraits {
+  // FuncT
+  // BlockT
+  // RegionT
+  // RegionNodeT
+  // RegionInfoT
+  using BrokenT = typename FuncT_::UnknownRegionTypeError;
+};
+
+template <>
+struct RegionTraits<Function> {
+  using FuncT = Function;
+  using BlockT = BasicBlock;
+  using RegionT = Region;
+  using RegionNodeT = RegionNode;
+  using RegionInfoT = RegionInfo;
+  using DomTreeT = DominatorTree;
+  using DomTreeNodeT = DomTreeNode;
+  using DomFrontierT = DominanceFrontier;
+  using PostDomTreeT = PostDominatorTree;
+  using InstT = Instruction;
+  using LoopT = Loop;
+  using LoopInfoT = LoopInfo;
+
+  static unsigned getNumSuccessors(BasicBlock *BB) {
+    return BB->getTerminator()->getNumSuccessors();
+  }
+};
+
+/// @brief Marker class to iterate over the elements of a Region in flat mode.
+///
+/// The class is used to either iterate in Flat mode or by not using it to not
+/// iterate in Flat mode.  During a Flat mode iteration all Regions are entered
+/// and the iteration returns every BasicBlock.  If the Flat mode is not
+/// selected for SubRegions just one RegionNode containing the subregion is
+/// returned.
+template <class GraphType>
+class FlatIt {};
+
+/// @brief A RegionNode represents a subregion or a BasicBlock that is part of a
+/// Region.
+template <class Tr>
+class RegionNodeBase {
+  friend class RegionBase<Tr>;
+
+public:
+  using BlockT = typename Tr::BlockT;
+  using RegionT = typename Tr::RegionT;
+
+private:
+  /// This is the entry basic block that starts this region node.  If this is a
+  /// BasicBlock RegionNode, then entry is just the basic block, that this
+  /// RegionNode represents.  Otherwise it is the entry of this (Sub)RegionNode.
+  ///
+  /// In the BBtoRegionNode map of the parent of this node, BB will always map
+  /// to this node no matter which kind of node this one is.
+  ///
+  /// The node can hold either a Region or a BasicBlock.
+  /// Use one bit to save, if this RegionNode is a subregion or BasicBlock
+  /// RegionNode.
+  PointerIntPair<BlockT *, 1, bool> entry;
+
+  /// @brief The parent Region of this RegionNode.
+  /// @see getParent()
+  RegionT *parent;
+
+protected:
+  /// @brief Create a RegionNode.
+  ///
+  /// @param Parent      The parent of this RegionNode.
+  /// @param Entry       The entry BasicBlock of the RegionNode.  If this
+  ///                    RegionNode represents a BasicBlock, this is the
+  ///                    BasicBlock itself.  If it represents a subregion, this
+  ///                    is the entry BasicBlock of the subregion.
+  /// @param isSubRegion If this RegionNode represents a SubRegion.
+  inline RegionNodeBase(RegionT *Parent, BlockT *Entry,
+                        bool isSubRegion = false)
+      : entry(Entry, isSubRegion), parent(Parent) {}
+
+public:
+  RegionNodeBase(const RegionNodeBase &) = delete;
+  RegionNodeBase &operator=(const RegionNodeBase &) = delete;
+
+  /// @brief Get the parent Region of this RegionNode.
+  ///
+  /// The parent Region is the Region this RegionNode belongs to. If for
+  /// example a BasicBlock is element of two Regions, there exist two
+  /// RegionNodes for this BasicBlock. Each with the getParent() function
+  /// pointing to the Region this RegionNode belongs to.
+  ///
+  /// @return Get the parent Region of this RegionNode.
+  inline RegionT *getParent() const { return parent; }
+
+  /// @brief Get the entry BasicBlock of this RegionNode.
+  ///
+  /// If this RegionNode represents a BasicBlock this is just the BasicBlock
+  /// itself, otherwise we return the entry BasicBlock of the Subregion
+  ///
+  /// @return The entry BasicBlock of this RegionNode.
+  inline BlockT *getEntry() const { return entry.getPointer(); }
+
+  /// @brief Get the content of this RegionNode.
+  ///
+  /// This can be either a BasicBlock or a subregion. Before calling getNodeAs()
+  /// check the type of the content with the isSubRegion() function call.
+  ///
+  /// @return The content of this RegionNode.
+  template <class T> inline T *getNodeAs() const;
+
+  /// @brief Is this RegionNode a subregion?
+  ///
+  /// @return True if it contains a subregion. False if it contains a
+  ///         BasicBlock.
+  inline bool isSubRegion() const { return entry.getInt(); }
+};
+
+//===----------------------------------------------------------------------===//
+/// @brief A single entry single exit Region.
+///
+/// A Region is a connected subgraph of a control flow graph that has exactly
+/// two connections to the remaining graph. It can be used to analyze or
+/// optimize parts of the control flow graph.
+///
+/// A <em> simple Region </em> is connected to the remaining graph by just two
+/// edges. One edge entering the Region and another one leaving the Region.
+///
+/// An <em> extended Region </em> (or just Region) is a subgraph that can be
+/// transform into a simple Region. The transformation is done by adding
+/// BasicBlocks that merge several entry or exit edges so that after the merge
+/// just one entry and one exit edge exists.
+///
+/// The \e Entry of a Region is the first BasicBlock that is passed after
+/// entering the Region. It is an element of the Region. The entry BasicBlock
+/// dominates all BasicBlocks in the Region.
+///
+/// The \e Exit of a Region is the first BasicBlock that is passed after
+/// leaving the Region. It is not an element of the Region. The exit BasicBlock,
+/// postdominates all BasicBlocks in the Region.
+///
+/// A <em> canonical Region </em> cannot be constructed by combining smaller
+/// Regions.
+///
+/// Region A is the \e parent of Region B, if B is completely contained in A.
+///
+/// Two canonical Regions either do not intersect at all or one is
+/// the parent of the other.
+///
+/// The <em> Program Structure Tree</em> is a graph (V, E) where V is the set of
+/// Regions in the control flow graph and E is the \e parent relation of these
+/// Regions.
+///
+/// Example:
+///
+/// \verbatim
+/// A simple control flow graph, that contains two regions.
+///
+///        1
+///       / |
+///      2   |
+///     / \   3
+///    4   5  |
+///    |   |  |
+///    6   7  8
+///     \  | /
+///      \ |/       Region A: 1 -> 9 {1,2,3,4,5,6,7,8}
+///        9        Region B: 2 -> 9 {2,4,5,6,7}
+/// \endverbatim
+///
+/// You can obtain more examples by either calling
+///
+/// <tt> "opt -regions -analyze anyprogram.ll" </tt>
+/// or
+/// <tt> "opt -view-regions-only anyprogram.ll" </tt>
+///
+/// on any LLVM file you are interested in.
+///
+/// The first call returns a textual representation of the program structure
+/// tree, the second one creates a graphical representation using graphviz.
+template <class Tr>
+class RegionBase : public RegionNodeBase<Tr> {
+  friend class RegionInfoBase<Tr>;
+
+  using FuncT = typename Tr::FuncT;
+  using BlockT = typename Tr::BlockT;
+  using RegionInfoT = typename Tr::RegionInfoT;
+  using RegionT = typename Tr::RegionT;
+  using RegionNodeT = typename Tr::RegionNodeT;
+  using DomTreeT = typename Tr::DomTreeT;
+  using LoopT = typename Tr::LoopT;
+  using LoopInfoT = typename Tr::LoopInfoT;
+  using InstT = typename Tr::InstT;
+
+  using BlockTraits = GraphTraits<BlockT *>;
+  using InvBlockTraits = GraphTraits<Inverse<BlockT *>>;
+  using SuccIterTy = typename BlockTraits::ChildIteratorType;
+  using PredIterTy = typename InvBlockTraits::ChildIteratorType;
+
+  // Information necessary to manage this Region.
+  RegionInfoT *RI;
+  DomTreeT *DT;
+
+  // The exit BasicBlock of this region.
+  // (The entry BasicBlock is part of RegionNode)
+  BlockT *exit;
+
+  using RegionSet = std::vector<std::unique_ptr<RegionT>>;
+
+  // The subregions of this region.
+  RegionSet children;
+
+  using BBNodeMapT = std::map<BlockT *, std::unique_ptr<RegionNodeT>>;
+
+  // Save the BasicBlock RegionNodes that are element of this Region.
+  mutable BBNodeMapT BBNodeMap;
+
+  /// Check if a BB is in this Region. This check also works
+  /// if the region is incorrectly built. (EXPENSIVE!)
+  void verifyBBInRegion(BlockT *BB) const;
+
+  /// Walk over all the BBs of the region starting from BB and
+  /// verify that all reachable basic blocks are elements of the region.
+  /// (EXPENSIVE!)
+  void verifyWalk(BlockT *BB, std::set<BlockT *> *visitedBB) const;
+
+  /// Verify if the region and its children are valid regions (EXPENSIVE!)
+  void verifyRegionNest() const;
+
+public:
+  /// @brief Create a new region.
+  ///
+  /// @param Entry  The entry basic block of the region.
+  /// @param Exit   The exit basic block of the region.
+  /// @param RI     The region info object that is managing this region.
+  /// @param DT     The dominator tree of the current function.
+  /// @param Parent The surrounding region or NULL if this is a top level
+  ///               region.
+  RegionBase(BlockT *Entry, BlockT *Exit, RegionInfoT *RI, DomTreeT *DT,
+             RegionT *Parent = nullptr);
+
+  RegionBase(const RegionBase &) = delete;
+  RegionBase &operator=(const RegionBase &) = delete;
+
+  /// Delete the Region and all its subregions.
+  ~RegionBase();
+
+  /// @brief Get the entry BasicBlock of the Region.
+  /// @return The entry BasicBlock of the region.
+  BlockT *getEntry() const {
+    return RegionNodeBase<Tr>::getEntry();
+  }
+
+  /// @brief Replace the entry basic block of the region with the new basic
+  ///        block.
+  ///
+  /// @param BB  The new entry basic block of the region.
+  void replaceEntry(BlockT *BB);
+
+  /// @brief Replace the exit basic block of the region with the new basic
+  ///        block.
+  ///
+  /// @param BB  The new exit basic block of the region.
+  void replaceExit(BlockT *BB);
+
+  /// @brief Recursively replace the entry basic block of the region.
+  ///
+  /// This function replaces the entry basic block with a new basic block. It
+  /// also updates all child regions that have the same entry basic block as
+  /// this region.
+  ///
+  /// @param NewEntry The new entry basic block.
+  void replaceEntryRecursive(BlockT *NewEntry);
+
+  /// @brief Recursively replace the exit basic block of the region.
+  ///
+  /// This function replaces the exit basic block with a new basic block. It
+  /// also updates all child regions that have the same exit basic block as
+  /// this region.
+  ///
+  /// @param NewExit The new exit basic block.
+  void replaceExitRecursive(BlockT *NewExit);
+
+  /// @brief Get the exit BasicBlock of the Region.
+  /// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
+  ///         Region.
+  BlockT *getExit() const { return exit; }
+
+  /// @brief Get the parent of the Region.
+  /// @return The parent of the Region or NULL if this is a top level
+  ///         Region.
+  RegionT *getParent() const {
+    return RegionNodeBase<Tr>::getParent();
+  }
+
+  /// @brief Get the RegionNode representing the current Region.
+  /// @return The RegionNode representing the current Region.
+  RegionNodeT *getNode() const {
+    return const_cast<RegionNodeT *>(
+        reinterpret_cast<const RegionNodeT *>(this));
+  }
+
+  /// @brief Get the nesting level of this Region.
+  ///
+  /// An toplevel Region has depth 0.
+  ///
+  /// @return The depth of the region.
+  unsigned getDepth() const;
+
+  /// @brief Check if a Region is the TopLevel region.
+  ///
+  /// The toplevel region represents the whole function.
+  bool isTopLevelRegion() const { return exit == nullptr; }
+
+  /// @brief Return a new (non-canonical) region, that is obtained by joining
+  ///        this region with its predecessors.
+  ///
+  /// @return A region also starting at getEntry(), but reaching to the next
+  ///         basic block that forms with getEntry() a (non-canonical) region.
+  ///         NULL if such a basic block does not exist.
+  RegionT *getExpandedRegion() const;
+
+  /// @brief Return the first block of this region's single entry edge,
+  ///        if existing.
+  ///
+  /// @return The BasicBlock starting this region's single entry edge,
+  ///         else NULL.
+  BlockT *getEnteringBlock() const;
+
+  /// @brief Return the first block of this region's single exit edge,
+  ///        if existing.
+  ///
+  /// @return The BasicBlock starting this region's single exit edge,
+  ///         else NULL.
+  BlockT *getExitingBlock() const;
+
+  /// @brief Collect all blocks of this region's single exit edge, if existing.
+  ///
+  /// @return True if this region contains all the predecessors of the exit.
+  bool getExitingBlocks(SmallVectorImpl<BlockT *> &Exitings) const;
+
+  /// @brief Is this a simple region?
+  ///
+  /// A region is simple if it has exactly one exit and one entry edge.
+  ///
+  /// @return True if the Region is simple.
+  bool isSimple() const;
+
+  /// @brief Returns the name of the Region.
+  /// @return The Name of the Region.
+  std::string getNameStr() const;
+
+  /// @brief Return the RegionInfo object, that belongs to this Region.
+  RegionInfoT *getRegionInfo() const { return RI; }
+
+  /// PrintStyle - Print region in difference ways.
+  enum PrintStyle { PrintNone, PrintBB, PrintRN };
+
+  /// @brief Print the region.
+  ///
+  /// @param OS The output stream the Region is printed to.
+  /// @param printTree Print also the tree of subregions.
+  /// @param level The indentation level used for printing.
+  void print(raw_ostream &OS, bool printTree = true, unsigned level = 0,
+             PrintStyle Style = PrintNone) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  /// @brief Print the region to stderr.
+  void dump() const;
+#endif
+
+  /// @brief Check if the region contains a BasicBlock.
+  ///
+  /// @param BB The BasicBlock that might be contained in this Region.
+  /// @return True if the block is contained in the region otherwise false.
+  bool contains(const BlockT *BB) const;
+
+  /// @brief Check if the region contains another region.
+  ///
+  /// @param SubRegion The region that might be contained in this Region.
+  /// @return True if SubRegion is contained in the region otherwise false.
+  bool contains(const RegionT *SubRegion) const {
+    // Toplevel Region.
+    if (!getExit())
+      return true;
+
+    return contains(SubRegion->getEntry()) &&
+           (contains(SubRegion->getExit()) ||
+            SubRegion->getExit() == getExit());
+  }
+
+  /// @brief Check if the region contains an Instruction.
+  ///
+  /// @param Inst The Instruction that might be contained in this region.
+  /// @return True if the Instruction is contained in the region otherwise
+  /// false.
+  bool contains(const InstT *Inst) const { return contains(Inst->getParent()); }
+
+  /// @brief Check if the region contains a loop.
+  ///
+  /// @param L The loop that might be contained in this region.
+  /// @return True if the loop is contained in the region otherwise false.
+  ///         In case a NULL pointer is passed to this function the result
+  ///         is false, except for the region that describes the whole function.
+  ///         In that case true is returned.
+  bool contains(const LoopT *L) const;
+
+  /// @brief Get the outermost loop in the region that contains a loop.
+  ///
+  /// Find for a Loop L the outermost loop OuterL that is a parent loop of L
+  /// and is itself contained in the region.
+  ///
+  /// @param L The loop the lookup is started.
+  /// @return The outermost loop in the region, NULL if such a loop does not
+  ///         exist or if the region describes the whole function.
+  LoopT *outermostLoopInRegion(LoopT *L) const;
+
+  /// @brief Get the outermost loop in the region that contains a basic block.
+  ///
+  /// Find for a basic block BB the outermost loop L that contains BB and is
+  /// itself contained in the region.
+  ///
+  /// @param LI A pointer to a LoopInfo analysis.
+  /// @param BB The basic block surrounded by the loop.
+  /// @return The outermost loop in the region, NULL if such a loop does not
+  ///         exist or if the region describes the whole function.
+  LoopT *outermostLoopInRegion(LoopInfoT *LI, BlockT *BB) const;
+
+  /// @brief Get the subregion that starts at a BasicBlock
+  ///
+  /// @param BB The BasicBlock the subregion should start.
+  /// @return The Subregion if available, otherwise NULL.
+  RegionT *getSubRegionNode(BlockT *BB) const;
+
+  /// @brief Get the RegionNode for a BasicBlock
+  ///
+  /// @param BB The BasicBlock at which the RegionNode should start.
+  /// @return If available, the RegionNode that represents the subregion
+  ///         starting at BB. If no subregion starts at BB, the RegionNode
+  ///         representing BB.
+  RegionNodeT *getNode(BlockT *BB) const;
+
+  /// @brief Get the BasicBlock RegionNode for a BasicBlock
+  ///
+  /// @param BB The BasicBlock for which the RegionNode is requested.
+  /// @return The RegionNode representing the BB.
+  RegionNodeT *getBBNode(BlockT *BB) const;
+
+  /// @brief Add a new subregion to this Region.
+  ///
+  /// @param SubRegion The new subregion that will be added.
+  /// @param moveChildren Move the children of this region, that are also
+  ///                     contained in SubRegion into SubRegion.
+  void addSubRegion(RegionT *SubRegion, bool moveChildren = false);
+
+  /// @brief Remove a subregion from this Region.
+  ///
+  /// The subregion is not deleted, as it will probably be inserted into another
+  /// region.
+  /// @param SubRegion The SubRegion that will be removed.
+  RegionT *removeSubRegion(RegionT *SubRegion);
+
+  /// @brief Move all direct child nodes of this Region to another Region.
+  ///
+  /// @param To The Region the child nodes will be transferred to.
+  void transferChildrenTo(RegionT *To);
+
+  /// @brief Verify if the region is a correct region.
+  ///
+  /// Check if this is a correctly build Region. This is an expensive check, as
+  /// the complete CFG of the Region will be walked.
+  void verifyRegion() const;
+
+  /// @brief Clear the cache for BB RegionNodes.
+  ///
+  /// After calling this function the BasicBlock RegionNodes will be stored at
+  /// different memory locations. RegionNodes obtained before this function is
+  /// called are therefore not comparable to RegionNodes abtained afterwords.
+  void clearNodeCache();
+
+  /// @name Subregion Iterators
+  ///
+  /// These iterators iterator over all subregions of this Region.
+  //@{
+  using iterator = typename RegionSet::iterator;
+  using const_iterator = typename RegionSet::const_iterator;
+
+  iterator begin() { return children.begin(); }
+  iterator end() { return children.end(); }
+
+  const_iterator begin() const { return children.begin(); }
+  const_iterator end() const { return children.end(); }
+  //@}
+
+  /// @name BasicBlock Iterators
+  ///
+  /// These iterators iterate over all BasicBlocks that are contained in this
+  /// Region. The iterator also iterates over BasicBlocks that are elements of
+  /// a subregion of this Region. It is therefore called a flat iterator.
+  //@{
+  template <bool IsConst>
+  class block_iterator_wrapper
+      : public df_iterator<
+            typename std::conditional<IsConst, const BlockT, BlockT>::type *> {
+    using super =
+        df_iterator<
+            typename std::conditional<IsConst, const BlockT, BlockT>::type *>;
+
+  public:
+    using Self = block_iterator_wrapper<IsConst>;
+    using value_type = typename super::value_type;
+
+    // Construct the begin iterator.
+    block_iterator_wrapper(value_type Entry, value_type Exit)
+        : super(df_begin(Entry)) {
+      // Mark the exit of the region as visited, so that the children of the
+      // exit and the exit itself, i.e. the block outside the region will never
+      // be visited.
+      super::Visited.insert(Exit);
+    }
+
+    // Construct the end iterator.
+    block_iterator_wrapper() : super(df_end<value_type>((BlockT *)nullptr)) {}
+
+    /*implicit*/ block_iterator_wrapper(super I) : super(I) {}
+
+    // FIXME: Even a const_iterator returns a non-const BasicBlock pointer.
+    //        This was introduced for backwards compatibility, but should
+    //        be removed as soon as all users are fixed.
+    BlockT *operator*() const {
+      return const_cast<BlockT *>(super::operator*());
+    }
+  };
+
+  using block_iterator = block_iterator_wrapper<false>;
+  using const_block_iterator = block_iterator_wrapper<true>;
+
+  block_iterator block_begin() { return block_iterator(getEntry(), getExit()); }
+
+  block_iterator block_end() { return block_iterator(); }
+
+  const_block_iterator block_begin() const {
+    return const_block_iterator(getEntry(), getExit());
+  }
+  const_block_iterator block_end() const { return const_block_iterator(); }
+
+  using block_range = iterator_range<block_iterator>;
+  using const_block_range = iterator_range<const_block_iterator>;
+
+  /// @brief Returns a range view of the basic blocks in the region.
+  inline block_range blocks() {
+    return block_range(block_begin(), block_end());
+  }
+
+  /// @brief Returns a range view of the basic blocks in the region.
+  ///
+  /// This is the 'const' version of the range view.
+  inline const_block_range blocks() const {
+    return const_block_range(block_begin(), block_end());
+  }
+  //@}
+
+  /// @name Element Iterators
+  ///
+  /// These iterators iterate over all BasicBlock and subregion RegionNodes that
+  /// are direct children of this Region. It does not iterate over any
+  /// RegionNodes that are also element of a subregion of this Region.
+  //@{
+  using element_iterator =
+      df_iterator<RegionNodeT *, df_iterator_default_set<RegionNodeT *>, false,
+                  GraphTraits<RegionNodeT *>>;
+
+  using const_element_iterator =
+      df_iterator<const RegionNodeT *,
+                  df_iterator_default_set<const RegionNodeT *>, false,
+                  GraphTraits<const RegionNodeT *>>;
+
+  element_iterator element_begin();
+  element_iterator element_end();
+  iterator_range<element_iterator> elements() {
+    return make_range(element_begin(), element_end());
+  }
+
+  const_element_iterator element_begin() const;
+  const_element_iterator element_end() const;
+  iterator_range<const_element_iterator> elements() const {
+    return make_range(element_begin(), element_end());
+  }
+  //@}
+};
+
+/// Print a RegionNode.
+template <class Tr>
+inline raw_ostream &operator<<(raw_ostream &OS, const RegionNodeBase<Tr> &Node);
+
+//===----------------------------------------------------------------------===//
+/// @brief Analysis that detects all canonical Regions.
+///
+/// The RegionInfo pass detects all canonical regions in a function. The Regions
+/// are connected using the parent relation. This builds a Program Structure
+/// Tree.
+template <class Tr>
+class RegionInfoBase {
+  friend class RegionInfo;
+  friend class MachineRegionInfo;
+
+  using BlockT = typename Tr::BlockT;
+  using FuncT = typename Tr::FuncT;
+  using RegionT = typename Tr::RegionT;
+  using RegionInfoT = typename Tr::RegionInfoT;
+  using DomTreeT = typename Tr::DomTreeT;
+  using DomTreeNodeT = typename Tr::DomTreeNodeT;
+  using PostDomTreeT = typename Tr::PostDomTreeT;
+  using DomFrontierT = typename Tr::DomFrontierT;
+  using BlockTraits = GraphTraits<BlockT *>;
+  using InvBlockTraits = GraphTraits<Inverse<BlockT *>>;
+  using SuccIterTy = typename BlockTraits::ChildIteratorType;
+  using PredIterTy = typename InvBlockTraits::ChildIteratorType;
+
+  using BBtoBBMap = DenseMap<BlockT *, BlockT *>;
+  using BBtoRegionMap = DenseMap<BlockT *, RegionT *>;
+
+  RegionInfoBase();
+
+  RegionInfoBase(RegionInfoBase &&Arg)
+    : DT(std::move(Arg.DT)), PDT(std::move(Arg.PDT)), DF(std::move(Arg.DF)),
+      TopLevelRegion(std::move(Arg.TopLevelRegion)),
+      BBtoRegion(std::move(Arg.BBtoRegion)) {
+    Arg.wipe();
+  }
+
+  RegionInfoBase &operator=(RegionInfoBase &&RHS) {
+    DT = std::move(RHS.DT);
+    PDT = std::move(RHS.PDT);
+    DF = std::move(RHS.DF);
+    TopLevelRegion = std::move(RHS.TopLevelRegion);
+    BBtoRegion = std::move(RHS.BBtoRegion);
+    RHS.wipe();
+    return *this;
+  }
+
+  virtual ~RegionInfoBase();
+
+  DomTreeT *DT;
+  PostDomTreeT *PDT;
+  DomFrontierT *DF;
+
+  /// The top level region.
+  RegionT *TopLevelRegion = nullptr;
+
+  /// Map every BB to the smallest region, that contains BB.
+  BBtoRegionMap BBtoRegion;
+
+protected:
+  /// \brief Update refences to a RegionInfoT held by the RegionT managed here
+  ///
+  /// This is a post-move helper. Regions hold references to the owning
+  /// RegionInfo object. After a move these need to be fixed.
+  template<typename TheRegionT>
+  void updateRegionTree(RegionInfoT &RI, TheRegionT *R) {
+    if (!R)
+      return;
+    R->RI = &RI;
+    for (auto &SubR : *R)
+      updateRegionTree(RI, SubR.get());
+  }
+
+private:
+  /// \brief Wipe this region tree's state without releasing any resources.
+  ///
+  /// This is essentially a post-move helper only. It leaves the object in an
+  /// assignable and destroyable state, but otherwise invalid.
+  void wipe() {
+    DT = nullptr;
+    PDT = nullptr;
+    DF = nullptr;
+    TopLevelRegion = nullptr;
+    BBtoRegion.clear();
+  }
+
+  // Check whether the entries of BBtoRegion for the BBs of region
+  // SR are correct. Triggers an assertion if not. Calls itself recursively for
+  // subregions.
+  void verifyBBMap(const RegionT *SR) const;
+
+  // Returns true if BB is in the dominance frontier of
+  // entry, because it was inherited from exit. In the other case there is an
+  // edge going from entry to BB without passing exit.
+  bool isCommonDomFrontier(BlockT *BB, BlockT *entry, BlockT *exit) const;
+
+  // Check if entry and exit surround a valid region, based on
+  // dominance tree and dominance frontier.
+  bool isRegion(BlockT *entry, BlockT *exit) const;
+
+  // Saves a shortcut pointing from entry to exit.
+  // This function may extend this shortcut if possible.
+  void insertShortCut(BlockT *entry, BlockT *exit, BBtoBBMap *ShortCut) const;
+
+  // Returns the next BB that postdominates N, while skipping
+  // all post dominators that cannot finish a canonical region.
+  DomTreeNodeT *getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const;
+
+  // A region is trivial, if it contains only one BB.
+  bool isTrivialRegion(BlockT *entry, BlockT *exit) const;
+
+  // Creates a single entry single exit region.
+  RegionT *createRegion(BlockT *entry, BlockT *exit);
+
+  // Detect all regions starting with bb 'entry'.
+  void findRegionsWithEntry(BlockT *entry, BBtoBBMap *ShortCut);
+
+  // Detects regions in F.
+  void scanForRegions(FuncT &F, BBtoBBMap *ShortCut);
+
+  // Get the top most parent with the same entry block.
+  RegionT *getTopMostParent(RegionT *region);
+
+  // Build the region hierarchy after all region detected.
+  void buildRegionsTree(DomTreeNodeT *N, RegionT *region);
+
+  // Update statistic about created regions.
+  virtual void updateStatistics(RegionT *R) = 0;
+
+  // Detect all regions in function and build the region tree.
+  void calculate(FuncT &F);
+
+public:
+  RegionInfoBase(const RegionInfoBase &) = delete;
+  RegionInfoBase &operator=(const RegionInfoBase &) = delete;
+
+  static bool VerifyRegionInfo;
+  static typename RegionT::PrintStyle printStyle;
+
+  void print(raw_ostream &OS) const;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  void dump() const;
+#endif
+
+  void releaseMemory();
+
+  /// @brief Get the smallest region that contains a BasicBlock.
+  ///
+  /// @param BB The basic block.
+  /// @return The smallest region, that contains BB or NULL, if there is no
+  /// region containing BB.
+  RegionT *getRegionFor(BlockT *BB) const;
+
+  /// @brief  Set the smallest region that surrounds a basic block.
+  ///
+  /// @param BB The basic block surrounded by a region.
+  /// @param R The smallest region that surrounds BB.
+  void setRegionFor(BlockT *BB, RegionT *R);
+
+  /// @brief A shortcut for getRegionFor().
+  ///
+  /// @param BB The basic block.
+  /// @return The smallest region, that contains BB or NULL, if there is no
+  /// region containing BB.
+  RegionT *operator[](BlockT *BB) const;
+
+  /// @brief Return the exit of the maximal refined region, that starts at a
+  /// BasicBlock.
+  ///
+  /// @param BB The BasicBlock the refined region starts.
+  BlockT *getMaxRegionExit(BlockT *BB) const;
+
+  /// @brief Find the smallest region that contains two regions.
+  ///
+  /// @param A The first region.
+  /// @param B The second region.
+  /// @return The smallest region containing A and B.
+  RegionT *getCommonRegion(RegionT *A, RegionT *B) const;
+
+  /// @brief Find the smallest region that contains two basic blocks.
+  ///
+  /// @param A The first basic block.
+  /// @param B The second basic block.
+  /// @return The smallest region that contains A and B.
+  RegionT *getCommonRegion(BlockT *A, BlockT *B) const {
+    return getCommonRegion(getRegionFor(A), getRegionFor(B));
+  }
+
+  /// @brief Find the smallest region that contains a set of regions.
+  ///
+  /// @param Regions A vector of regions.
+  /// @return The smallest region that contains all regions in Regions.
+  RegionT *getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const;
+
+  /// @brief Find the smallest region that contains a set of basic blocks.
+  ///
+  /// @param BBs A vector of basic blocks.
+  /// @return The smallest region that contains all basic blocks in BBS.
+  RegionT *getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const;
+
+  RegionT *getTopLevelRegion() const { return TopLevelRegion; }
+
+  /// @brief Clear the Node Cache for all Regions.
+  ///
+  /// @see Region::clearNodeCache()
+  void clearNodeCache() {
+    if (TopLevelRegion)
+      TopLevelRegion->clearNodeCache();
+  }
+
+  void verifyAnalysis() const;
+};
+
+class Region;
+
+class RegionNode : public RegionNodeBase<RegionTraits<Function>> {
+public:
+  inline RegionNode(Region *Parent, BasicBlock *Entry, bool isSubRegion = false)
+      : RegionNodeBase<RegionTraits<Function>>(Parent, Entry, isSubRegion) {}
+
+  bool operator==(const Region &RN) const {
+    return this == reinterpret_cast<const RegionNode *>(&RN);
+  }
+};
+
+class Region : public RegionBase<RegionTraits<Function>> {
+public:
+  Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo *RI, DominatorTree *DT,
+         Region *Parent = nullptr);
+  ~Region();
+
+  bool operator==(const RegionNode &RN) const {
+    return &RN == reinterpret_cast<const RegionNode *>(this);
+  }
+};
+
+class RegionInfo : public RegionInfoBase<RegionTraits<Function>> {
+public:
+  using Base = RegionInfoBase<RegionTraits<Function>>;
+
+  explicit RegionInfo();
+
+  RegionInfo(RegionInfo &&Arg) : Base(std::move(static_cast<Base &>(Arg))) {
+    updateRegionTree(*this, TopLevelRegion);
+  }
+
+  RegionInfo &operator=(RegionInfo &&RHS) {
+    Base::operator=(std::move(static_cast<Base &>(RHS)));
+    updateRegionTree(*this, TopLevelRegion);
+    return *this;
+  }
+
+  ~RegionInfo() override;
+
+  /// Handle invalidation explicitly.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &);
+
+  // updateStatistics - Update statistic about created regions.
+  void updateStatistics(Region *R) final;
+
+  void recalculate(Function &F, DominatorTree *DT, PostDominatorTree *PDT,
+                   DominanceFrontier *DF);
+
+#ifndef NDEBUG
+  /// @brief Opens a viewer to show the GraphViz visualization of the regions.
+  ///
+  /// Useful during debugging as an alternative to dump().
+  void view();
+
+  /// @brief Opens a viewer to show the GraphViz visualization of this region
+  /// without instructions in the BasicBlocks.
+  ///
+  /// Useful during debugging as an alternative to dump().
+  void viewOnly();
+#endif
+};
+
+class RegionInfoPass : public FunctionPass {
+  RegionInfo RI;
+
+public:
+  static char ID;
+
+  explicit RegionInfoPass();
+  ~RegionInfoPass() override;
+
+  RegionInfo &getRegionInfo() { return RI; }
+
+  const RegionInfo &getRegionInfo() const { return RI; }
+
+  /// @name FunctionPass interface
+  //@{
+  bool runOnFunction(Function &F) override;
+  void releaseMemory() override;
+  void verifyAnalysis() const override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void print(raw_ostream &OS, const Module *) const override;
+  void dump() const;
+  //@}
+};
+
+/// \brief Analysis pass that exposes the \c RegionInfo for a function.
+class RegionInfoAnalysis : public AnalysisInfoMixin<RegionInfoAnalysis> {
+  friend AnalysisInfoMixin<RegionInfoAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = RegionInfo;
+
+  RegionInfo run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Printer pass for the \c RegionInfo.
+class RegionInfoPrinterPass : public PassInfoMixin<RegionInfoPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit RegionInfoPrinterPass(raw_ostream &OS);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for the \c RegionInfo.
+struct RegionInfoVerifierPass : PassInfoMixin<RegionInfoVerifierPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+template <>
+template <>
+inline BasicBlock *
+RegionNodeBase<RegionTraits<Function>>::getNodeAs<BasicBlock>() const {
+  assert(!isSubRegion() && "This is not a BasicBlock RegionNode!");
+  return getEntry();
+}
+
+template <>
+template <>
+inline Region *
+RegionNodeBase<RegionTraits<Function>>::getNodeAs<Region>() const {
+  assert(isSubRegion() && "This is not a subregion RegionNode!");
+  auto Unconst = const_cast<RegionNodeBase<RegionTraits<Function>> *>(this);
+  return reinterpret_cast<Region *>(Unconst);
+}
+
+template <class Tr>
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const RegionNodeBase<Tr> &Node) {
+  using BlockT = typename Tr::BlockT;
+  using RegionT = typename Tr::RegionT;
+
+  if (Node.isSubRegion())
+    return OS << Node.template getNodeAs<RegionT>()->getNameStr();
+  else
+    return OS << Node.template getNodeAs<BlockT>()->getName();
+}
+
+extern template class RegionBase<RegionTraits<Function>>;
+extern template class RegionNodeBase<RegionTraits<Function>>;
+extern template class RegionInfoBase<RegionTraits<Function>>;
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_REGIONINFO_H
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h
new file mode 100644
index 0000000..eb6baac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h
@@ -0,0 +1,931 @@
+//===- RegionInfoImpl.h - SESE region detection analysis --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Detects single entry single exit regions in the control flow graph.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONINFOIMPL_H
+#define LLVM_ANALYSIS_REGIONINFOIMPL_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <memory>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#define DEBUG_TYPE "region"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// RegionBase Implementation
+template <class Tr>
+RegionBase<Tr>::RegionBase(BlockT *Entry, BlockT *Exit,
+                           typename Tr::RegionInfoT *RInfo, DomTreeT *dt,
+                           RegionT *Parent)
+    : RegionNodeBase<Tr>(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}
+
+template <class Tr>
+RegionBase<Tr>::~RegionBase() {
+  // Only clean the cache for this Region. Caches of child Regions will be
+  // cleaned when the child Regions are deleted.
+  BBNodeMap.clear();
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceEntry(BlockT *BB) {
+  this->entry.setPointer(BB);
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceExit(BlockT *BB) {
+  assert(exit && "No exit to replace!");
+  exit = BB;
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceEntryRecursive(BlockT *NewEntry) {
+  std::vector<RegionT *> RegionQueue;
+  BlockT *OldEntry = getEntry();
+
+  RegionQueue.push_back(static_cast<RegionT *>(this));
+  while (!RegionQueue.empty()) {
+    RegionT *R = RegionQueue.back();
+    RegionQueue.pop_back();
+
+    R->replaceEntry(NewEntry);
+    for (std::unique_ptr<RegionT> &Child : *R) {
+      if (Child->getEntry() == OldEntry)
+        RegionQueue.push_back(Child.get());
+    }
+  }
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceExitRecursive(BlockT *NewExit) {
+  std::vector<RegionT *> RegionQueue;
+  BlockT *OldExit = getExit();
+
+  RegionQueue.push_back(static_cast<RegionT *>(this));
+  while (!RegionQueue.empty()) {
+    RegionT *R = RegionQueue.back();
+    RegionQueue.pop_back();
+
+    R->replaceExit(NewExit);
+    for (std::unique_ptr<RegionT> &Child : *R) {
+      if (Child->getExit() == OldExit)
+        RegionQueue.push_back(Child.get());
+    }
+  }
+}
+
+template <class Tr>
+bool RegionBase<Tr>::contains(const BlockT *B) const {
+  BlockT *BB = const_cast<BlockT *>(B);
+
+  if (!DT->getNode(BB))
+    return false;
+
+  BlockT *entry = getEntry(), *exit = getExit();
+
+  // Toplevel region.
+  if (!exit)
+    return true;
+
+  return (DT->dominates(entry, BB) &&
+          !(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
+}
+
+template <class Tr>
+bool RegionBase<Tr>::contains(const LoopT *L) const {
+  // BBs that are not part of any loop are element of the Loop
+  // described by the NULL pointer. This loop is not part of any region,
+  // except if the region describes the whole function.
+  if (!L)
+    return getExit() == nullptr;
+
+  if (!contains(L->getHeader()))
+    return false;
+
+  SmallVector<BlockT *, 8> ExitingBlocks;
+  L->getExitingBlocks(ExitingBlocks);
+
+  for (BlockT *BB : ExitingBlocks) {
+    if (!contains(BB))
+      return false;
+  }
+
+  return true;
+}
+
+template <class Tr>
+typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopT *L) const {
+  if (!contains(L))
+    return nullptr;
+
+  while (L && contains(L->getParentLoop())) {
+    L = L->getParentLoop();
+  }
+
+  return L;
+}
+
+template <class Tr>
+typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopInfoT *LI,
+                                                          BlockT *BB) const {
+  assert(LI && BB && "LI and BB cannot be null!");
+  LoopT *L = LI->getLoopFor(BB);
+  return outermostLoopInRegion(L);
+}
+
+template <class Tr>
+typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getEnteringBlock() const {
+  BlockT *entry = getEntry();
+  BlockT *enteringBlock = nullptr;
+
+  for (BlockT *Pred : make_range(InvBlockTraits::child_begin(entry),
+                                 InvBlockTraits::child_end(entry))) {
+    if (DT->getNode(Pred) && !contains(Pred)) {
+      if (enteringBlock)
+        return nullptr;
+
+      enteringBlock = Pred;
+    }
+  }
+
+  return enteringBlock;
+}
+
+template <class Tr>
+bool RegionBase<Tr>::getExitingBlocks(
+    SmallVectorImpl<BlockT *> &Exitings) const {
+  bool CoverAll = true;
+
+  if (!exit)
+    return CoverAll;
+
+  for (PredIterTy PI = InvBlockTraits::child_begin(exit),
+                  PE = InvBlockTraits::child_end(exit);
+       PI != PE; ++PI) {
+    BlockT *Pred = *PI;
+    if (contains(Pred)) {
+      Exitings.push_back(Pred);
+      continue;
+    }
+
+    CoverAll = false;
+  }
+
+  return CoverAll;
+}
+
+template <class Tr>
+typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getExitingBlock() const {
+  BlockT *exit = getExit();
+  BlockT *exitingBlock = nullptr;
+
+  if (!exit)
+    return nullptr;
+
+  for (BlockT *Pred : make_range(InvBlockTraits::child_begin(exit),
+                                 InvBlockTraits::child_end(exit))) {
+    if (contains(Pred)) {
+      if (exitingBlock)
+        return nullptr;
+
+      exitingBlock = Pred;
+    }
+  }
+
+  return exitingBlock;
+}
+
+template <class Tr>
+bool RegionBase<Tr>::isSimple() const {
+  return !isTopLevelRegion() && getEnteringBlock() && getExitingBlock();
+}
+
+template <class Tr>
+std::string RegionBase<Tr>::getNameStr() const {
+  std::string exitName;
+  std::string entryName;
+
+  if (getEntry()->getName().empty()) {
+    raw_string_ostream OS(entryName);
+
+    getEntry()->printAsOperand(OS, false);
+  } else
+    entryName = getEntry()->getName();
+
+  if (getExit()) {
+    if (getExit()->getName().empty()) {
+      raw_string_ostream OS(exitName);
+
+      getExit()->printAsOperand(OS, false);
+    } else
+      exitName = getExit()->getName();
+  } else
+    exitName = "<Function Return>";
+
+  return entryName + " => " + exitName;
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
+  if (!contains(BB))
+    report_fatal_error("Broken region found: enumerated BB not in region!");
+
+  BlockT *entry = getEntry(), *exit = getExit();
+
+  for (BlockT *Succ :
+       make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
+    if (!contains(Succ) && exit != Succ)
+      report_fatal_error("Broken region found: edges leaving the region must go "
+                         "to the exit node!");
+  }
+
+  if (entry != BB) {
+    for (BlockT *Pred : make_range(InvBlockTraits::child_begin(BB),
+                                   InvBlockTraits::child_end(BB))) {
+      if (!contains(Pred))
+        report_fatal_error("Broken region found: edges entering the region must "
+                           "go to the entry node!");
+    }
+  }
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyWalk(BlockT *BB, std::set<BlockT *> *visited) const {
+  BlockT *exit = getExit();
+
+  visited->insert(BB);
+
+  verifyBBInRegion(BB);
+
+  for (BlockT *Succ :
+       make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
+    if (Succ != exit && visited->find(Succ) == visited->end())
+      verifyWalk(Succ, visited);
+  }
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyRegion() const {
+  // Only do verification when user wants to, otherwise this expensive check
+  // will be invoked by PMDataManager::verifyPreservedAnalysis when
+  // a regionpass (marked PreservedAll) finish.
+  if (!RegionInfoBase<Tr>::VerifyRegionInfo)
+    return;
+
+  std::set<BlockT *> visited;
+  verifyWalk(getEntry(), &visited);
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyRegionNest() const {
+  for (const std::unique_ptr<RegionT> &R : *this)
+    R->verifyRegionNest();
+
+  verifyRegion();
+}
+
+template <class Tr>
+typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_begin() {
+  return GraphTraits<RegionT *>::nodes_begin(static_cast<RegionT *>(this));
+}
+
+template <class Tr>
+typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_end() {
+  return GraphTraits<RegionT *>::nodes_end(static_cast<RegionT *>(this));
+}
+
+template <class Tr>
+typename RegionBase<Tr>::const_element_iterator
+RegionBase<Tr>::element_begin() const {
+  return GraphTraits<const RegionT *>::nodes_begin(
+      static_cast<const RegionT *>(this));
+}
+
+template <class Tr>
+typename RegionBase<Tr>::const_element_iterator
+RegionBase<Tr>::element_end() const {
+  return GraphTraits<const RegionT *>::nodes_end(
+      static_cast<const RegionT *>(this));
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionBase<Tr>::getSubRegionNode(BlockT *BB) const {
+  using RegionT = typename Tr::RegionT;
+
+  RegionT *R = RI->getRegionFor(BB);
+
+  if (!R || R == this)
+    return nullptr;
+
+  // If we pass the BB out of this region, that means our code is broken.
+  assert(contains(R) && "BB not in current region!");
+
+  while (contains(R->getParent()) && R->getParent() != this)
+    R = R->getParent();
+
+  if (R->getEntry() != BB)
+    return nullptr;
+
+  return R;
+}
+
+template <class Tr>
+typename Tr::RegionNodeT *RegionBase<Tr>::getBBNode(BlockT *BB) const {
+  assert(contains(BB) && "Can get BB node out of this region!");
+
+  typename BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
+
+  if (at == BBNodeMap.end()) {
+    auto Deconst = const_cast<RegionBase<Tr> *>(this);
+    typename BBNodeMapT::value_type V = {
+        BB,
+        llvm::make_unique<RegionNodeT>(static_cast<RegionT *>(Deconst), BB)};
+    at = BBNodeMap.insert(std::move(V)).first;
+  }
+  return at->second.get();
+}
+
+template <class Tr>
+typename Tr::RegionNodeT *RegionBase<Tr>::getNode(BlockT *BB) const {
+  assert(contains(BB) && "Can get BB node out of this region!");
+  if (RegionT *Child = getSubRegionNode(BB))
+    return Child->getNode();
+
+  return getBBNode(BB);
+}
+
+template <class Tr>
+void RegionBase<Tr>::transferChildrenTo(RegionT *To) {
+  for (std::unique_ptr<RegionT> &R : *this) {
+    R->parent = To;
+    To->children.push_back(std::move(R));
+  }
+  children.clear();
+}
+
+template <class Tr>
+void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
+  assert(!SubRegion->parent && "SubRegion already has a parent!");
+  assert(llvm::find_if(*this,
+                       [&](const std::unique_ptr<RegionT> &R) {
+                         return R.get() == SubRegion;
+                       }) == children.end() &&
+         "Subregion already exists!");
+
+  SubRegion->parent = static_cast<RegionT *>(this);
+  children.push_back(std::unique_ptr<RegionT>(SubRegion));
+
+  if (!moveChildren)
+    return;
+
+  assert(SubRegion->children.empty() &&
+         "SubRegions that contain children are not supported");
+
+  for (RegionNodeT *Element : elements()) {
+    if (!Element->isSubRegion()) {
+      BlockT *BB = Element->template getNodeAs<BlockT>();
+
+      if (SubRegion->contains(BB))
+        RI->setRegionFor(BB, SubRegion);
+    }
+  }
+
+  std::vector<std::unique_ptr<RegionT>> Keep;
+  for (std::unique_ptr<RegionT> &R : *this) {
+    if (SubRegion->contains(R.get()) && R.get() != SubRegion) {
+      R->parent = SubRegion;
+      SubRegion->children.push_back(std::move(R));
+    } else
+      Keep.push_back(std::move(R));
+  }
+
+  children.clear();
+  children.insert(
+      children.begin(),
+      std::move_iterator<typename RegionSet::iterator>(Keep.begin()),
+      std::move_iterator<typename RegionSet::iterator>(Keep.end()));
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionBase<Tr>::removeSubRegion(RegionT *Child) {
+  assert(Child->parent == this && "Child is not a child of this region!");
+  Child->parent = nullptr;
+  typename RegionSet::iterator I =
+      llvm::find_if(children, [&](const std::unique_ptr<RegionT> &R) {
+        return R.get() == Child;
+      });
+  assert(I != children.end() && "Region does not exit. Unable to remove.");
+  children.erase(children.begin() + (I - begin()));
+  return Child;
+}
+
+template <class Tr>
+unsigned RegionBase<Tr>::getDepth() const {
+  unsigned Depth = 0;
+
+  for (RegionT *R = getParent(); R != nullptr; R = R->getParent())
+    ++Depth;
+
+  return Depth;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionBase<Tr>::getExpandedRegion() const {
+  unsigned NumSuccessors = Tr::getNumSuccessors(exit);
+
+  if (NumSuccessors == 0)
+    return nullptr;
+
+  RegionT *R = RI->getRegionFor(exit);
+
+  if (R->getEntry() != exit) {
+    for (BlockT *Pred : make_range(InvBlockTraits::child_begin(getExit()),
+                                   InvBlockTraits::child_end(getExit())))
+      if (!contains(Pred))
+        return nullptr;
+    if (Tr::getNumSuccessors(exit) == 1)
+      return new RegionT(getEntry(), *BlockTraits::child_begin(exit), RI, DT);
+    return nullptr;
+  }
+
+  while (R->getParent() && R->getParent()->getEntry() == exit)
+    R = R->getParent();
+
+  for (BlockT *Pred : make_range(InvBlockTraits::child_begin(getExit()),
+                                 InvBlockTraits::child_end(getExit()))) {
+    if (!(contains(Pred) || R->contains(Pred)))
+      return nullptr;
+  }
+
+  return new RegionT(getEntry(), R->getExit(), RI, DT);
+}
+
+template <class Tr>
+void RegionBase<Tr>::print(raw_ostream &OS, bool print_tree, unsigned level,
+                           PrintStyle Style) const {
+  if (print_tree)
+    OS.indent(level * 2) << '[' << level << "] " << getNameStr();
+  else
+    OS.indent(level * 2) << getNameStr();
+
+  OS << '\n';
+
+  if (Style != PrintNone) {
+    OS.indent(level * 2) << "{\n";
+    OS.indent(level * 2 + 2);
+
+    if (Style == PrintBB) {
+      for (const auto *BB : blocks())
+        OS << BB->getName() << ", "; // TODO: remove the last ","
+    } else if (Style == PrintRN) {
+      for (const RegionNodeT *Element : elements()) {
+        OS << *Element << ", "; // TODO: remove the last ",
+      }
+    }
+
+    OS << '\n';
+  }
+
+  if (print_tree) {
+    for (const std::unique_ptr<RegionT> &R : *this)
+      R->print(OS, print_tree, level + 1, Style);
+  }
+
+  if (Style != PrintNone)
+    OS.indent(level * 2) << "} \n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+template <class Tr>
+void RegionBase<Tr>::dump() const {
+  print(dbgs(), true, getDepth(), RegionInfoBase<Tr>::printStyle);
+}
+#endif
+
+template <class Tr>
+void RegionBase<Tr>::clearNodeCache() {
+  BBNodeMap.clear();
+  for (std::unique_ptr<RegionT> &R : *this)
+    R->clearNodeCache();
+}
+
+//===----------------------------------------------------------------------===//
+// RegionInfoBase implementation
+//
+
+template <class Tr>
+RegionInfoBase<Tr>::RegionInfoBase() = default;
+
+template <class Tr>
+RegionInfoBase<Tr>::~RegionInfoBase() {
+  releaseMemory();
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R) const {
+  assert(R && "Re must be non-null");
+  for (const typename Tr::RegionNodeT *Element : R->elements()) {
+    if (Element->isSubRegion()) {
+      const RegionT *SR = Element->template getNodeAs<RegionT>();
+      verifyBBMap(SR);
+    } else {
+      BlockT *BB = Element->template getNodeAs<BlockT>();
+      if (getRegionFor(BB) != R)
+        report_fatal_error("BB map does not match region nesting");
+    }
+  }
+}
+
+template <class Tr>
+bool RegionInfoBase<Tr>::isCommonDomFrontier(BlockT *BB, BlockT *entry,
+                                             BlockT *exit) const {
+  for (BlockT *P : make_range(InvBlockTraits::child_begin(BB),
+                              InvBlockTraits::child_end(BB))) {
+    if (DT->dominates(entry, P) && !DT->dominates(exit, P))
+      return false;
+  }
+
+  return true;
+}
+
+template <class Tr>
+bool RegionInfoBase<Tr>::isRegion(BlockT *entry, BlockT *exit) const {
+  assert(entry && exit && "entry and exit must not be null!");
+
+  using DST = typename DomFrontierT::DomSetType;
+
+  DST *entrySuccs = &DF->find(entry)->second;
+
+  // Exit is the header of a loop that contains the entry. In this case,
+  // the dominance frontier must only contain the exit.
+  if (!DT->dominates(entry, exit)) {
+    for (typename DST::iterator SI = entrySuccs->begin(),
+                                SE = entrySuccs->end();
+         SI != SE; ++SI) {
+      if (*SI != exit && *SI != entry)
+        return false;
+    }
+
+    return true;
+  }
+
+  DST *exitSuccs = &DF->find(exit)->second;
+
+  // Do not allow edges leaving the region.
+  for (BlockT *Succ : *entrySuccs) {
+    if (Succ == exit || Succ == entry)
+      continue;
+    if (exitSuccs->find(Succ) == exitSuccs->end())
+      return false;
+    if (!isCommonDomFrontier(Succ, entry, exit))
+      return false;
+  }
+
+  // Do not allow edges pointing into the region.
+  for (BlockT *Succ : *exitSuccs) {
+    if (DT->properlyDominates(entry, Succ) && Succ != exit)
+      return false;
+  }
+
+  return true;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::insertShortCut(BlockT *entry, BlockT *exit,
+                                        BBtoBBMap *ShortCut) const {
+  assert(entry && exit && "entry and exit must not be null!");
+
+  typename BBtoBBMap::iterator e = ShortCut->find(exit);
+
+  if (e == ShortCut->end())
+    // No further region at exit available.
+    (*ShortCut)[entry] = exit;
+  else {
+    // We found a region e that starts at exit. Therefore (entry, e->second)
+    // is also a region, that is larger than (entry, exit). Insert the
+    // larger one.
+    BlockT *BB = e->second;
+    (*ShortCut)[entry] = BB;
+  }
+}
+
+template <class Tr>
+typename Tr::DomTreeNodeT *
+RegionInfoBase<Tr>::getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const {
+  typename BBtoBBMap::iterator e = ShortCut->find(N->getBlock());
+
+  if (e == ShortCut->end())
+    return N->getIDom();
+
+  return PDT->getNode(e->second)->getIDom();
+}
+
+template <class Tr>
+bool RegionInfoBase<Tr>::isTrivialRegion(BlockT *entry, BlockT *exit) const {
+  assert(entry && exit && "entry and exit must not be null!");
+
+  unsigned num_successors =
+      BlockTraits::child_end(entry) - BlockTraits::child_begin(entry);
+
+  if (num_successors <= 1 && exit == *(BlockTraits::child_begin(entry)))
+    return true;
+
+  return false;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::createRegion(BlockT *entry,
+                                                       BlockT *exit) {
+  assert(entry && exit && "entry and exit must not be null!");
+
+  if (isTrivialRegion(entry, exit))
+    return nullptr;
+
+  RegionT *region =
+      new RegionT(entry, exit, static_cast<RegionInfoT *>(this), DT);
+  BBtoRegion.insert({entry, region});
+
+#ifdef EXPENSIVE_CHECKS
+  region->verifyRegion();
+#else
+  DEBUG(region->verifyRegion());
+#endif
+
+  updateStatistics(region);
+  return region;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::findRegionsWithEntry(BlockT *entry,
+                                              BBtoBBMap *ShortCut) {
+  assert(entry);
+
+  DomTreeNodeT *N = PDT->getNode(entry);
+  if (!N)
+    return;
+
+  RegionT *lastRegion = nullptr;
+  BlockT *lastExit = entry;
+
+  // As only a BasicBlock that postdominates entry can finish a region, walk the
+  // post dominance tree upwards.
+  while ((N = getNextPostDom(N, ShortCut))) {
+    BlockT *exit = N->getBlock();
+
+    if (!exit)
+      break;
+
+    if (isRegion(entry, exit)) {
+      RegionT *newRegion = createRegion(entry, exit);
+
+      if (lastRegion)
+        newRegion->addSubRegion(lastRegion);
+
+      lastRegion = newRegion;
+      lastExit = exit;
+    }
+
+    // This can never be a region, so stop the search.
+    if (!DT->dominates(entry, exit))
+      break;
+  }
+
+  // Tried to create regions from entry to lastExit.  Next time take a
+  // shortcut from entry to lastExit.
+  if (lastExit != entry)
+    insertShortCut(entry, lastExit, ShortCut);
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::scanForRegions(FuncT &F, BBtoBBMap *ShortCut) {
+  using FuncPtrT = typename std::add_pointer<FuncT>::type;
+
+  BlockT *entry = GraphTraits<FuncPtrT>::getEntryNode(&F);
+  DomTreeNodeT *N = DT->getNode(entry);
+
+  // Iterate over the dominance tree in post order to start with the small
+  // regions from the bottom of the dominance tree.  If the small regions are
+  // detected first, detection of bigger regions is faster, as we can jump
+  // over the small regions.
+  for (auto DomNode : post_order(N))
+    findRegionsWithEntry(DomNode->getBlock(), ShortCut);
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::getTopMostParent(RegionT *region) {
+  while (region->getParent())
+    region = region->getParent();
+
+  return region;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::buildRegionsTree(DomTreeNodeT *N, RegionT *region) {
+  BlockT *BB = N->getBlock();
+
+  // Passed region exit
+  while (BB == region->getExit())
+    region = region->getParent();
+
+  typename BBtoRegionMap::iterator it = BBtoRegion.find(BB);
+
+  // This basic block is a start block of a region. It is already in the
+  // BBtoRegion relation. Only the child basic blocks have to be updated.
+  if (it != BBtoRegion.end()) {
+    RegionT *newRegion = it->second;
+    region->addSubRegion(getTopMostParent(newRegion));
+    region = newRegion;
+  } else {
+    BBtoRegion[BB] = region;
+  }
+
+  for (DomTreeNodeBase<BlockT> *C : *N) {
+    buildRegionsTree(C, region);
+  }
+}
+
+#ifdef EXPENSIVE_CHECKS
+template <class Tr>
+bool RegionInfoBase<Tr>::VerifyRegionInfo = true;
+#else
+template <class Tr>
+bool RegionInfoBase<Tr>::VerifyRegionInfo = false;
+#endif
+
+template <class Tr>
+typename Tr::RegionT::PrintStyle RegionInfoBase<Tr>::printStyle =
+    RegionBase<Tr>::PrintNone;
+
+template <class Tr>
+void RegionInfoBase<Tr>::print(raw_ostream &OS) const {
+  OS << "Region tree:\n";
+  TopLevelRegion->print(OS, true, 0, printStyle);
+  OS << "End region tree\n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+template <class Tr>
+void RegionInfoBase<Tr>::dump() const { print(dbgs()); }
+#endif
+
+template <class Tr>
+void RegionInfoBase<Tr>::releaseMemory() {
+  BBtoRegion.clear();
+  if (TopLevelRegion)
+    delete TopLevelRegion;
+  TopLevelRegion = nullptr;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::verifyAnalysis() const {
+  // Do only verify regions if explicitely activated using EXPENSIVE_CHECKS or
+  // -verify-region-info
+  if (!RegionInfoBase<Tr>::VerifyRegionInfo)
+    return;
+
+  TopLevelRegion->verifyRegionNest();
+
+  verifyBBMap(TopLevelRegion);
+}
+
+// Region pass manager support.
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::getRegionFor(BlockT *BB) const {
+  typename BBtoRegionMap::const_iterator I = BBtoRegion.find(BB);
+  return I != BBtoRegion.end() ? I->second : nullptr;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::setRegionFor(BlockT *BB, RegionT *R) {
+  BBtoRegion[BB] = R;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::operator[](BlockT *BB) const {
+  return getRegionFor(BB);
+}
+
+template <class Tr>
+typename RegionInfoBase<Tr>::BlockT *
+RegionInfoBase<Tr>::getMaxRegionExit(BlockT *BB) const {
+  BlockT *Exit = nullptr;
+
+  while (true) {
+    // Get largest region that starts at BB.
+    RegionT *R = getRegionFor(BB);
+    while (R && R->getParent() && R->getParent()->getEntry() == BB)
+      R = R->getParent();
+
+    // Get the single exit of BB.
+    if (R && R->getEntry() == BB)
+      Exit = R->getExit();
+    else if (++BlockTraits::child_begin(BB) == BlockTraits::child_end(BB))
+      Exit = *BlockTraits::child_begin(BB);
+    else // No single exit exists.
+      return Exit;
+
+    // Get largest region that starts at Exit.
+    RegionT *ExitR = getRegionFor(Exit);
+    while (ExitR && ExitR->getParent() &&
+           ExitR->getParent()->getEntry() == Exit)
+      ExitR = ExitR->getParent();
+
+    for (BlockT *Pred : make_range(InvBlockTraits::child_begin(Exit),
+                                   InvBlockTraits::child_end(Exit))) {
+      if (!R->contains(Pred) && !ExitR->contains(Pred))
+        break;
+    }
+
+    // This stops infinite cycles.
+    if (DT->dominates(Exit, BB))
+      break;
+
+    BB = Exit;
+  }
+
+  return Exit;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::getCommonRegion(RegionT *A,
+                                                          RegionT *B) const {
+  assert(A && B && "One of the Regions is NULL");
+
+  if (A->contains(B))
+    return A;
+
+  while (!B->contains(A))
+    B = B->getParent();
+
+  return B;
+}
+
+template <class Tr>
+typename Tr::RegionT *
+RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const {
+  RegionT *ret = Regions.back();
+  Regions.pop_back();
+
+  for (RegionT *R : Regions)
+    ret = getCommonRegion(ret, R);
+
+  return ret;
+}
+
+template <class Tr>
+typename Tr::RegionT *
+RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const {
+  RegionT *ret = getRegionFor(BBs.back());
+  BBs.pop_back();
+
+  for (BlockT *BB : BBs)
+    ret = getCommonRegion(ret, getRegionFor(BB));
+
+  return ret;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::calculate(FuncT &F) {
+  using FuncPtrT = typename std::add_pointer<FuncT>::type;
+
+  // ShortCut a function where for every BB the exit of the largest region
+  // starting with BB is stored. These regions can be threated as single BBS.
+  // This improves performance on linear CFGs.
+  BBtoBBMap ShortCut;
+
+  scanForRegions(F, &ShortCut);
+  BlockT *BB = GraphTraits<FuncPtrT>::getEntryNode(&F);
+  buildRegionsTree(DT->getNode(BB), TopLevelRegion);
+}
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_ANALYSIS_REGIONINFOIMPL_H
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionIterator.h b/linux-x64/clang/include/llvm/Analysis/RegionIterator.h
new file mode 100644
index 0000000..4f823cc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/RegionIterator.h
@@ -0,0 +1,360 @@
+//===- RegionIterator.h - Iterators to iteratate over Regions ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines the iterators to iterate over the elements of a Region.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONITERATOR_H
+#define LLVM_ANALYSIS_REGIONITERATOR_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/IR/CFG.h"
+#include <cassert>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+class BasicBlock;
+
+//===----------------------------------------------------------------------===//
+/// @brief Hierarchical RegionNode successor iterator.
+///
+/// This iterator iterates over all successors of a RegionNode.
+///
+/// For a BasicBlock RegionNode it skips all BasicBlocks that are not part of
+/// the parent Region.  Furthermore for BasicBlocks that start a subregion, a
+/// RegionNode representing the subregion is returned.
+///
+/// For a subregion RegionNode there is just one successor. The RegionNode
+/// representing the exit of the subregion.
+template <class NodeRef, class BlockT, class RegionT>
+class RNSuccIterator
+    : public std::iterator<std::forward_iterator_tag, NodeRef> {
+  using super = std::iterator<std::forward_iterator_tag, NodeRef>;
+  using BlockTraits = GraphTraits<BlockT *>;
+  using SuccIterTy = typename BlockTraits::ChildIteratorType;
+
+  // The iterator works in two modes, bb mode or region mode.
+  enum ItMode {
+    // In BB mode it returns all successors of this BasicBlock as its
+    // successors.
+    ItBB,
+    // In region mode there is only one successor, thats the regionnode mapping
+    // to the exit block of the regionnode
+    ItRgBegin, // At the beginning of the regionnode successor.
+    ItRgEnd    // At the end of the regionnode successor.
+  };
+
+  static_assert(std::is_pointer<NodeRef>::value,
+                "FIXME: Currently RNSuccIterator only supports NodeRef as "
+                "pointers due to the use of pointer-specific data structures "
+                "(e.g. PointerIntPair and SmallPtrSet) internally. Generalize "
+                "it to support non-pointer types");
+
+  // Use two bit to represent the mode iterator.
+  PointerIntPair<NodeRef, 2, ItMode> Node;
+
+  // The block successor iterator.
+  SuccIterTy BItor;
+
+  // advanceRegionSucc - A region node has only one successor. It reaches end
+  // once we advance it.
+  void advanceRegionSucc() {
+    assert(Node.getInt() == ItRgBegin && "Cannot advance region successor!");
+    Node.setInt(ItRgEnd);
+  }
+
+  NodeRef getNode() const { return Node.getPointer(); }
+
+  // isRegionMode - Is the current iterator in region mode?
+  bool isRegionMode() const { return Node.getInt() != ItBB; }
+
+  // Get the immediate successor. This function may return a Basic Block
+  // RegionNode or a subregion RegionNode.
+  NodeRef getISucc(BlockT *BB) const {
+    NodeRef succ;
+    succ = getNode()->getParent()->getNode(BB);
+    assert(succ && "BB not in Region or entered subregion!");
+    return succ;
+  }
+
+  // getRegionSucc - Return the successor basic block of a SubRegion RegionNode.
+  inline BlockT* getRegionSucc() const {
+    assert(Node.getInt() == ItRgBegin && "Cannot get the region successor!");
+    return getNode()->template getNodeAs<RegionT>()->getExit();
+  }
+
+  // isExit - Is this the exit BB of the Region?
+  inline bool isExit(BlockT* BB) const {
+    return getNode()->getParent()->getExit() == BB;
+  }
+
+public:
+  using Self = RNSuccIterator<NodeRef, BlockT, RegionT>;
+  using value_type = typename super::value_type;
+
+  /// @brief Create begin iterator of a RegionNode.
+  inline RNSuccIterator(NodeRef node)
+      : Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
+        BItor(BlockTraits::child_begin(node->getEntry())) {
+    // Skip the exit block
+    if (!isRegionMode())
+      while (BlockTraits::child_end(node->getEntry()) != BItor && isExit(*BItor))
+        ++BItor;
+
+    if (isRegionMode() && isExit(getRegionSucc()))
+      advanceRegionSucc();
+  }
+
+  /// @brief Create an end iterator.
+  inline RNSuccIterator(NodeRef node, bool)
+      : Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
+        BItor(BlockTraits::child_end(node->getEntry())) {}
+
+  inline bool operator==(const Self& x) const {
+    assert(isRegionMode() == x.isRegionMode() && "Broken iterator!");
+    if (isRegionMode())
+      return Node.getInt() == x.Node.getInt();
+    else
+      return BItor == x.BItor;
+  }
+
+  inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+  inline value_type operator*() const {
+    BlockT *BB = isRegionMode() ? getRegionSucc() : *BItor;
+    assert(!isExit(BB) && "Iterator out of range!");
+    return getISucc(BB);
+  }
+
+  inline Self& operator++() {
+    if(isRegionMode()) {
+      // The Region only has 1 successor.
+      advanceRegionSucc();
+    } else {
+      // Skip the exit.
+      do
+        ++BItor;
+      while (BItor != BlockTraits::child_end(getNode()->getEntry())
+          && isExit(*BItor));
+    }
+    return *this;
+  }
+
+  inline Self operator++(int) {
+    Self tmp = *this;
+    ++*this;
+    return tmp;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// @brief Flat RegionNode iterator.
+///
+/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
+/// are contained in the Region and its subregions. This is close to a virtual
+/// control flow graph of the Region.
+template <class NodeRef, class BlockT, class RegionT>
+class RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>
+    : public std::iterator<std::forward_iterator_tag, NodeRef> {
+  using super = std::iterator<std::forward_iterator_tag, NodeRef>;
+  using BlockTraits = GraphTraits<BlockT *>;
+  using SuccIterTy = typename BlockTraits::ChildIteratorType;
+
+  NodeRef Node;
+  SuccIterTy Itor;
+
+public:
+  using Self = RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>;
+  using value_type = typename super::value_type;
+
+  /// @brief Create the iterator from a RegionNode.
+  ///
+  /// Note that the incoming node must be a bb node, otherwise it will trigger
+  /// an assertion when we try to get a BasicBlock.
+  inline RNSuccIterator(NodeRef node)
+      : Node(node), Itor(BlockTraits::child_begin(node->getEntry())) {
+    assert(!Node->isSubRegion() &&
+           "Subregion node not allowed in flat iterating mode!");
+    assert(Node->getParent() && "A BB node must have a parent!");
+
+    // Skip the exit block of the iterating region.
+    while (BlockTraits::child_end(Node->getEntry()) != Itor &&
+           Node->getParent()->getExit() == *Itor)
+      ++Itor;
+  }
+
+  /// @brief Create an end iterator
+  inline RNSuccIterator(NodeRef node, bool)
+      : Node(node), Itor(BlockTraits::child_end(node->getEntry())) {
+    assert(!Node->isSubRegion() &&
+           "Subregion node not allowed in flat iterating mode!");
+  }
+
+  inline bool operator==(const Self& x) const {
+    assert(Node->getParent() == x.Node->getParent()
+           && "Cannot compare iterators of different regions!");
+
+    return Itor == x.Itor && Node == x.Node;
+  }
+
+  inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+  inline value_type operator*() const {
+    BlockT *BB = *Itor;
+
+    // Get the iterating region.
+    RegionT *Parent = Node->getParent();
+
+    // The only case that the successor reaches out of the region is it reaches
+    // the exit of the region.
+    assert(Parent->getExit() != BB && "iterator out of range!");
+
+    return Parent->getBBNode(BB);
+  }
+
+  inline Self& operator++() {
+    // Skip the exit block of the iterating region.
+    do
+      ++Itor;
+    while (Itor != succ_end(Node->getEntry())
+        && Node->getParent()->getExit() == *Itor);
+
+    return *this;
+  }
+
+  inline Self operator++(int) {
+    Self tmp = *this;
+    ++*this;
+    return tmp;
+  }
+};
+
+template <class NodeRef, class BlockT, class RegionT>
+inline RNSuccIterator<NodeRef, BlockT, RegionT> succ_begin(NodeRef Node) {
+  return RNSuccIterator<NodeRef, BlockT, RegionT>(Node);
+}
+
+template <class NodeRef, class BlockT, class RegionT>
+inline RNSuccIterator<NodeRef, BlockT, RegionT> succ_end(NodeRef Node) {
+  return RNSuccIterator<NodeRef, BlockT, RegionT>(Node, true);
+}
+
+//===--------------------------------------------------------------------===//
+// RegionNode GraphTraits specialization so the bbs in the region can be
+// iterate by generic graph iterators.
+//
+// NodeT can either be region node or const region node, otherwise child_begin
+// and child_end fail.
+
+#define RegionNodeGraphTraits(NodeT, BlockT, RegionT)                          \
+  template <> struct GraphTraits<NodeT *> {                                    \
+    using NodeRef = NodeT *;                                                   \
+    using ChildIteratorType = RNSuccIterator<NodeRef, BlockT, RegionT>;        \
+    static NodeRef getEntryNode(NodeRef N) { return N; }                       \
+    static inline ChildIteratorType child_begin(NodeRef N) {                   \
+      return RNSuccIterator<NodeRef, BlockT, RegionT>(N);                      \
+    }                                                                          \
+    static inline ChildIteratorType child_end(NodeRef N) {                     \
+      return RNSuccIterator<NodeRef, BlockT, RegionT>(N, true);                \
+    }                                                                          \
+  };                                                                           \
+  template <> struct GraphTraits<FlatIt<NodeT *>> {                            \
+    using NodeRef = NodeT *;                                                   \
+    using ChildIteratorType =                                                  \
+        RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>;                      \
+    static NodeRef getEntryNode(NodeRef N) { return N; }                       \
+    static inline ChildIteratorType child_begin(NodeRef N) {                   \
+      return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N);              \
+    }                                                                          \
+    static inline ChildIteratorType child_end(NodeRef N) {                     \
+      return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N, true);        \
+    }                                                                          \
+  }
+
+#define RegionGraphTraits(RegionT, NodeT)                                      \
+  template <> struct GraphTraits<RegionT *> : public GraphTraits<NodeT *> {    \
+    using nodes_iterator = df_iterator<NodeRef>;                               \
+    static NodeRef getEntryNode(RegionT *R) {                                  \
+      return R->getNode(R->getEntry());                                        \
+    }                                                                          \
+    static nodes_iterator nodes_begin(RegionT *R) {                            \
+      return nodes_iterator::begin(getEntryNode(R));                           \
+    }                                                                          \
+    static nodes_iterator nodes_end(RegionT *R) {                              \
+      return nodes_iterator::end(getEntryNode(R));                             \
+    }                                                                          \
+  };                                                                           \
+  template <>                                                                  \
+  struct GraphTraits<FlatIt<RegionT *>>                                        \
+      : public GraphTraits<FlatIt<NodeT *>> {                                  \
+    using nodes_iterator =                                                     \
+        df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,          \
+                    GraphTraits<FlatIt<NodeRef>>>;                             \
+    static NodeRef getEntryNode(RegionT *R) {                                  \
+      return R->getBBNode(R->getEntry());                                      \
+    }                                                                          \
+    static nodes_iterator nodes_begin(RegionT *R) {                            \
+      return nodes_iterator::begin(getEntryNode(R));                           \
+    }                                                                          \
+    static nodes_iterator nodes_end(RegionT *R) {                              \
+      return nodes_iterator::end(getEntryNode(R));                             \
+    }                                                                          \
+  }
+
+RegionNodeGraphTraits(RegionNode, BasicBlock, Region);
+RegionNodeGraphTraits(const RegionNode, BasicBlock, Region);
+
+RegionGraphTraits(Region, RegionNode);
+RegionGraphTraits(const Region, const RegionNode);
+
+template <> struct GraphTraits<RegionInfo*>
+  : public GraphTraits<FlatIt<RegionNode*>> {
+  using nodes_iterator =
+      df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
+                  GraphTraits<FlatIt<NodeRef>>>;
+
+  static NodeRef getEntryNode(RegionInfo *RI) {
+    return GraphTraits<FlatIt<Region*>>::getEntryNode(RI->getTopLevelRegion());
+  }
+
+  static nodes_iterator nodes_begin(RegionInfo* RI) {
+    return nodes_iterator::begin(getEntryNode(RI));
+  }
+
+  static nodes_iterator nodes_end(RegionInfo *RI) {
+    return nodes_iterator::end(getEntryNode(RI));
+  }
+};
+
+template <> struct GraphTraits<RegionInfoPass*>
+  : public GraphTraits<RegionInfo *> {
+  using nodes_iterator =
+      df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
+                  GraphTraits<FlatIt<NodeRef>>>;
+
+  static NodeRef getEntryNode(RegionInfoPass *RI) {
+    return GraphTraits<RegionInfo*>::getEntryNode(&RI->getRegionInfo());
+  }
+
+  static nodes_iterator nodes_begin(RegionInfoPass* RI) {
+    return GraphTraits<RegionInfo*>::nodes_begin(&RI->getRegionInfo());
+  }
+
+  static nodes_iterator nodes_end(RegionInfoPass *RI) {
+    return GraphTraits<RegionInfo*>::nodes_end(&RI->getRegionInfo());
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_REGIONITERATOR_H
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionPass.h b/linux-x64/clang/include/llvm/Analysis/RegionPass.h
new file mode 100644
index 0000000..515b362
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/RegionPass.h
@@ -0,0 +1,131 @@
+//===- RegionPass.h - RegionPass class --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegionPass class. All region based analysis,
+// optimization and transformation passes are derived from RegionPass.
+// This class is implemented following the some ideas of the LoopPass.h class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONPASS_H
+#define LLVM_ANALYSIS_REGIONPASS_H
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/Pass.h"
+#include <deque>
+
+namespace llvm {
+
+class RGPassManager;
+class Function;
+
+//===----------------------------------------------------------------------===//
+/// @brief A pass that runs on each Region in a function.
+///
+/// RegionPass is managed by RGPassManager.
+class RegionPass : public Pass {
+public:
+  explicit RegionPass(char &pid) : Pass(PT_Region, pid) {}
+
+  //===--------------------------------------------------------------------===//
+  /// @name To be implemented by every RegionPass
+  ///
+  //@{
+  /// @brief Run the pass on a specific Region
+  ///
+  /// Accessing regions not contained in the current region is not allowed.
+  ///
+  /// @param R The region this pass is run on.
+  /// @param RGM The RegionPassManager that manages this Pass.
+  ///
+  /// @return True if the pass modifies this Region.
+  virtual bool runOnRegion(Region *R, RGPassManager &RGM) = 0;
+
+  /// @brief Get a pass to print the LLVM IR in the region.
+  ///
+  /// @param O      The output stream to print the Region.
+  /// @param Banner The banner to separate different printed passes.
+  ///
+  /// @return The pass to print the LLVM IR in the region.
+  Pass *createPrinterPass(raw_ostream &O,
+                          const std::string &Banner) const override;
+
+  using llvm::Pass::doInitialization;
+  using llvm::Pass::doFinalization;
+
+  virtual bool doInitialization(Region *R, RGPassManager &RGM) { return false; }
+  virtual bool doFinalization() { return false; }
+  //@}
+
+  //===--------------------------------------------------------------------===//
+  /// @name PassManager API
+  ///
+  //@{
+  void preparePassManager(PMStack &PMS) override;
+
+  void assignPassManager(PMStack &PMS,
+                         PassManagerType PMT = PMT_RegionPassManager) override;
+
+  PassManagerType getPotentialPassManagerType() const override {
+    return PMT_RegionPassManager;
+  }
+  //@}
+
+protected:
+  /// Optional passes call this function to check whether the pass should be
+  /// skipped. This is the case when optimization bisect is over the limit.
+  bool skipRegion(Region &R) const;
+};
+
+/// @brief The pass manager to schedule RegionPasses.
+class RGPassManager : public FunctionPass, public PMDataManager {
+  std::deque<Region*> RQ;
+  bool skipThisRegion;
+  bool redoThisRegion;
+  RegionInfo *RI;
+  Region *CurrentRegion;
+
+public:
+  static char ID;
+  explicit RGPassManager();
+
+  /// @brief Execute all of the passes scheduled for execution.
+  ///
+  /// @return True if any of the passes modifies the function.
+  bool runOnFunction(Function &F) override;
+
+  /// Pass Manager itself does not invalidate any analysis info.
+  /// RGPassManager needs RegionInfo.
+  void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+  StringRef getPassName() const override { return "Region Pass Manager"; }
+
+  PMDataManager *getAsPMDataManager() override { return this; }
+  Pass *getAsPass() override { return this; }
+
+  /// @brief Print passes managed by this manager.
+  void dumpPassStructure(unsigned Offset) override;
+
+  /// @brief Get passes contained by this manager.
+  Pass *getContainedPass(unsigned N) {
+    assert(N < PassVector.size() && "Pass number out of range!");
+    Pass *FP = static_cast<Pass *>(PassVector[N]);
+    return FP;
+  }
+
+  PassManagerType getPassManagerType() const override {
+    return PMT_RegionPassManager;
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h b/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h
new file mode 100644
index 0000000..8f0035c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h
@@ -0,0 +1,71 @@
+//===-- RegionPrinter.h - Region printer external interface -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the region printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONPRINTER_H
+#define LLVM_ANALYSIS_REGIONPRINTER_H
+
+namespace llvm {
+  class FunctionPass;
+  class Function;
+  class RegionInfo;
+
+  FunctionPass *createRegionViewerPass();
+  FunctionPass *createRegionOnlyViewerPass();
+  FunctionPass *createRegionPrinterPass();
+  FunctionPass *createRegionOnlyPrinterPass();
+
+#ifndef NDEBUG
+  /// @brief Open a viewer to display the GraphViz vizualization of the analysis
+  /// result.
+  ///
+  /// Practical to call in the debugger.
+  /// Includes the instructions in each BasicBlock.
+  ///
+  /// @param RI The analysis to display.
+  void viewRegion(llvm::RegionInfo *RI);
+
+  /// @brief Analyze the regions of a function and open its GraphViz
+  /// visualization in a viewer.
+  ///
+  /// Useful to call in the debugger.
+  /// Includes the instructions in each BasicBlock.
+  /// The result of a new analysis may differ from the RegionInfo the pass
+  /// manager currently holds.
+  ///
+  /// @param F Function to analyze.
+  void viewRegion(const llvm::Function *F);
+
+  /// @brief Open a viewer to display the GraphViz vizualization of the analysis
+  /// result.
+  ///
+  /// Useful to call in the debugger.
+  /// Shows only the BasicBlock names without their instructions.
+  ///
+  /// @param RI The analysis to display.
+  void viewRegionOnly(llvm::RegionInfo *RI);
+
+  /// @brief Analyze the regions of a function and open its GraphViz
+  /// visualization in a viewer.
+  ///
+  /// Useful to call in the debugger.
+  /// Shows only the BasicBlock names without their instructions.
+  /// The result of a new analysis may differ from the RegionInfo the pass
+  /// manager currently holds.
+  ///
+  /// @param F Function to analyze.
+  void viewRegionOnly(const llvm::Function *F);
+#endif
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h
new file mode 100644
index 0000000..7a43b81
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h
@@ -0,0 +1,1980 @@
+//===- llvm/Analysis/ScalarEvolution.h - Scalar Evolution -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The ScalarEvolution class is an LLVM pass which can be used to analyze and
+// categorize scalar expressions in loops.  It specializes in recognizing
+// general induction variables, representing them with the abstract and opaque
+// SCEV class.  Given this analysis, trip counts of loops and other important
+// properties can be obtained.
+//
+// This analysis is primarily useful for induction variable substitution and
+// strength reduction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
+#define LLVM_ANALYSIS_SCALAREVOLUTION_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class Constant;
+class ConstantInt;
+class DataLayout;
+class DominatorTree;
+class GEPOperator;
+class Instruction;
+class LLVMContext;
+class raw_ostream;
+class ScalarEvolution;
+class SCEVAddRecExpr;
+class SCEVUnknown;
+class StructType;
+class TargetLibraryInfo;
+class Type;
+class Value;
+
+/// This class represents an analyzed expression in the program.  These are
+/// opaque objects that the client is not allowed to do much with directly.
+///
+class SCEV : public FoldingSetNode {
+  friend struct FoldingSetTrait<SCEV>;
+
+  /// A reference to an Interned FoldingSetNodeID for this node.  The
+  /// ScalarEvolution's BumpPtrAllocator holds the data.
+  FoldingSetNodeIDRef FastID;
+
+  // The SCEV baseclass this node corresponds to
+  const unsigned short SCEVType;
+
+protected:
+  /// This field is initialized to zero and may be used in subclasses to store
+  /// miscellaneous information.
+  unsigned short SubclassData = 0;
+
+public:
+  /// NoWrapFlags are bitfield indices into SubclassData.
+  ///
+  /// Add and Mul expressions may have no-unsigned-wrap <NUW> or
+  /// no-signed-wrap <NSW> properties, which are derived from the IR
+  /// operator. NSW is a misnomer that we use to mean no signed overflow or
+  /// underflow.
+  ///
+  /// AddRec expressions may have a no-self-wraparound <NW> property if, in
+  /// the integer domain, abs(step) * max-iteration(loop) <=
+  /// unsigned-max(bitwidth).  This means that the recurrence will never reach
+  /// its start value if the step is non-zero.  Computing the same value on
+  /// each iteration is not considered wrapping, and recurrences with step = 0
+  /// are trivially <NW>.  <NW> is independent of the sign of step and the
+  /// value the add recurrence starts with.
+  ///
+  /// Note that NUW and NSW are also valid properties of a recurrence, and
+  /// either implies NW. For convenience, NW will be set for a recurrence
+  /// whenever either NUW or NSW are set.
+  enum NoWrapFlags {
+    FlagAnyWrap = 0,    // No guarantee.
+    FlagNW = (1 << 0),  // No self-wrap.
+    FlagNUW = (1 << 1), // No unsigned wrap.
+    FlagNSW = (1 << 2), // No signed wrap.
+    NoWrapMask = (1 << 3) - 1
+  };
+
+  explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy)
+      : FastID(ID), SCEVType(SCEVTy) {}
+  SCEV(const SCEV &) = delete;
+  SCEV &operator=(const SCEV &) = delete;
+
+  unsigned getSCEVType() const { return SCEVType; }
+
+  /// Return the LLVM type of this SCEV expression.
+  Type *getType() const;
+
+  /// Return true if the expression is a constant zero.
+  bool isZero() const;
+
+  /// Return true if the expression is a constant one.
+  bool isOne() const;
+
+  /// Return true if the expression is a constant all-ones value.
+  bool isAllOnesValue() const;
+
+  /// Return true if the specified scev is negated, but not a constant.
+  bool isNonConstantNegative() const;
+
+  /// Print out the internal representation of this scalar to the specified
+  /// stream.  This should really only be used for debugging purposes.
+  void print(raw_ostream &OS) const;
+
+  /// This method is used for debugging.
+  void dump() const;
+};
+
+// Specialize FoldingSetTrait for SCEV to avoid needing to compute
+// temporary FoldingSetNodeID values.
+template <> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
+  static void Profile(const SCEV &X, FoldingSetNodeID &ID) { ID = X.FastID; }
+
+  static bool Equals(const SCEV &X, const FoldingSetNodeID &ID, unsigned IDHash,
+                     FoldingSetNodeID &TempID) {
+    return ID == X.FastID;
+  }
+
+  static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
+    return X.FastID.ComputeHash();
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
+  S.print(OS);
+  return OS;
+}
+
+/// An object of this class is returned by queries that could not be answered.
+/// For example, if you ask for the number of iterations of a linked-list
+/// traversal loop, you will get one of these.  None of the standard SCEV
+/// operations are valid on this class, it is just a marker.
+struct SCEVCouldNotCompute : public SCEV {
+  SCEVCouldNotCompute();
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const SCEV *S);
+};
+
+/// This class represents an assumption made using SCEV expressions which can
+/// be checked at run-time.
+class SCEVPredicate : public FoldingSetNode {
+  friend struct FoldingSetTrait<SCEVPredicate>;
+
+  /// A reference to an Interned FoldingSetNodeID for this node.  The
+  /// ScalarEvolution's BumpPtrAllocator holds the data.
+  FoldingSetNodeIDRef FastID;
+
+public:
+  enum SCEVPredicateKind { P_Union, P_Equal, P_Wrap };
+
+protected:
+  SCEVPredicateKind Kind;
+  ~SCEVPredicate() = default;
+  SCEVPredicate(const SCEVPredicate &) = default;
+  SCEVPredicate &operator=(const SCEVPredicate &) = default;
+
+public:
+  SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
+
+  SCEVPredicateKind getKind() const { return Kind; }
+
+  /// Returns the estimated complexity of this predicate.  This is roughly
+  /// measured in the number of run-time checks required.
+  virtual unsigned getComplexity() const { return 1; }
+
+  /// Returns true if the predicate is always true. This means that no
+  /// assumptions were made and nothing needs to be checked at run-time.
+  virtual bool isAlwaysTrue() const = 0;
+
+  /// Returns true if this predicate implies \p N.
+  virtual bool implies(const SCEVPredicate *N) const = 0;
+
+  /// Prints a textual representation of this predicate with an indentation of
+  /// \p Depth.
+  virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
+
+  /// Returns the SCEV to which this predicate applies, or nullptr if this is
+  /// a SCEVUnionPredicate.
+  virtual const SCEV *getExpr() const = 0;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
+  P.print(OS);
+  return OS;
+}
+
+// Specialize FoldingSetTrait for SCEVPredicate to avoid needing to compute
+// temporary FoldingSetNodeID values.
+template <>
+struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
+  static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
+    ID = X.FastID;
+  }
+
+  static bool Equals(const SCEVPredicate &X, const FoldingSetNodeID &ID,
+                     unsigned IDHash, FoldingSetNodeID &TempID) {
+    return ID == X.FastID;
+  }
+
+  static unsigned ComputeHash(const SCEVPredicate &X,
+                              FoldingSetNodeID &TempID) {
+    return X.FastID.ComputeHash();
+  }
+};
+
+/// This class represents an assumption that two SCEV expressions are equal,
+/// and this can be checked at run-time.
+class SCEVEqualPredicate final : public SCEVPredicate {
+  /// We assume that LHS == RHS.
+  const SCEV *LHS;
+  const SCEV *RHS;
+
+public:
+  SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEV *LHS,
+                     const SCEV *RHS);
+
+  /// Implementation of the SCEVPredicate interface
+  bool implies(const SCEVPredicate *N) const override;
+  void print(raw_ostream &OS, unsigned Depth = 0) const override;
+  bool isAlwaysTrue() const override;
+  const SCEV *getExpr() const override;
+
+  /// Returns the left hand side of the equality.
+  const SCEV *getLHS() const { return LHS; }
+
+  /// Returns the right hand side of the equality.
+  const SCEV *getRHS() const { return RHS; }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const SCEVPredicate *P) {
+    return P->getKind() == P_Equal;
+  }
+};
+
+/// This class represents an assumption made on an AddRec expression. Given an
+/// affine AddRec expression {a,+,b}, we assume that it has the nssw or nusw
+/// flags (defined below) in the first X iterations of the loop, where X is a
+/// SCEV expression returned by getPredicatedBackedgeTakenCount).
+///
+/// Note that this does not imply that X is equal to the backedge taken
+/// count. This means that if we have a nusw predicate for i32 {0,+,1} with a
+/// predicated backedge taken count of X, we only guarantee that {0,+,1} has
+/// nusw in the first X iterations. {0,+,1} may still wrap in the loop if we
+/// have more than X iterations.
+class SCEVWrapPredicate final : public SCEVPredicate {
+public:
+  /// Similar to SCEV::NoWrapFlags, but with slightly different semantics
+  /// for FlagNUSW. The increment is considered to be signed, and a + b
+  /// (where b is the increment) is considered to wrap if:
+  ///    zext(a + b) != zext(a) + sext(b)
+  ///
+  /// If Signed is a function that takes an n-bit tuple and maps to the
+  /// integer domain as the tuples value interpreted as twos complement,
+  /// and Unsigned a function that takes an n-bit tuple and maps to the
+  /// integer domain as as the base two value of input tuple, then a + b
+  /// has IncrementNUSW iff:
+  ///
+  /// 0 <= Unsigned(a) + Signed(b) < 2^n
+  ///
+  /// The IncrementNSSW flag has identical semantics with SCEV::FlagNSW.
+  ///
+  /// Note that the IncrementNUSW flag is not commutative: if base + inc
+  /// has IncrementNUSW, then inc + base doesn't neccessarily have this
+  /// property. The reason for this is that this is used for sign/zero
+  /// extending affine AddRec SCEV expressions when a SCEVWrapPredicate is
+  /// assumed. A {base,+,inc} expression is already non-commutative with
+  /// regards to base and inc, since it is interpreted as:
+  ///     (((base + inc) + inc) + inc) ...
+  enum IncrementWrapFlags {
+    IncrementAnyWrap = 0,     // No guarantee.
+    IncrementNUSW = (1 << 0), // No unsigned with signed increment wrap.
+    IncrementNSSW = (1 << 1), // No signed with signed increment wrap
+                              // (equivalent with SCEV::NSW)
+    IncrementNoWrapMask = (1 << 2) - 1
+  };
+
+  /// Convenient IncrementWrapFlags manipulation methods.
+  LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+  clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
+             SCEVWrapPredicate::IncrementWrapFlags OffFlags) {
+    assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
+    assert((OffFlags & IncrementNoWrapMask) == OffFlags &&
+           "Invalid flags value!");
+    return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & ~OffFlags);
+  }
+
+  LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+  maskFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, int Mask) {
+    assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
+    assert((Mask & IncrementNoWrapMask) == Mask && "Invalid mask value!");
+
+    return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & Mask);
+  }
+
+  LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+  setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
+           SCEVWrapPredicate::IncrementWrapFlags OnFlags) {
+    assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
+    assert((OnFlags & IncrementNoWrapMask) == OnFlags &&
+           "Invalid flags value!");
+
+    return (SCEVWrapPredicate::IncrementWrapFlags)(Flags | OnFlags);
+  }
+
+  /// Returns the set of SCEVWrapPredicate no wrap flags implied by a
+  /// SCEVAddRecExpr.
+  LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+  getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE);
+
+private:
+  const SCEVAddRecExpr *AR;
+  IncrementWrapFlags Flags;
+
+public:
+  explicit SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
+                             const SCEVAddRecExpr *AR,
+                             IncrementWrapFlags Flags);
+
+  /// Returns the set assumed no overflow flags.
+  IncrementWrapFlags getFlags() const { return Flags; }
+
+  /// Implementation of the SCEVPredicate interface
+  const SCEV *getExpr() const override;
+  bool implies(const SCEVPredicate *N) const override;
+  void print(raw_ostream &OS, unsigned Depth = 0) const override;
+  bool isAlwaysTrue() const override;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const SCEVPredicate *P) {
+    return P->getKind() == P_Wrap;
+  }
+};
+
+/// This class represents a composition of other SCEV predicates, and is the
+/// class that most clients will interact with.  This is equivalent to a
+/// logical "AND" of all the predicates in the union.
+///
+/// NB! Unlike other SCEVPredicate sub-classes this class does not live in the
+/// ScalarEvolution::Preds folding set.  This is why the \c add function is sound.
+class SCEVUnionPredicate final : public SCEVPredicate {
+private:
+  using PredicateMap =
+      DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>;
+
+  /// Vector with references to all predicates in this union.
+  SmallVector<const SCEVPredicate *, 16> Preds;
+
+  /// Maps SCEVs to predicates for quick look-ups.
+  PredicateMap SCEVToPreds;
+
+public:
+  SCEVUnionPredicate();
+
+  const SmallVectorImpl<const SCEVPredicate *> &getPredicates() const {
+    return Preds;
+  }
+
+  /// Adds a predicate to this union.
+  void add(const SCEVPredicate *N);
+
+  /// Returns a reference to a vector containing all predicates which apply to
+  /// \p Expr.
+  ArrayRef<const SCEVPredicate *> getPredicatesForExpr(const SCEV *Expr);
+
+  /// Implementation of the SCEVPredicate interface
+  bool isAlwaysTrue() const override;
+  bool implies(const SCEVPredicate *N) const override;
+  void print(raw_ostream &OS, unsigned Depth) const override;
+  const SCEV *getExpr() const override;
+
+  /// We estimate the complexity of a union predicate as the size number of
+  /// predicates in the union.
+  unsigned getComplexity() const override { return Preds.size(); }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const SCEVPredicate *P) {
+    return P->getKind() == P_Union;
+  }
+};
+
+struct ExitLimitQuery {
+  ExitLimitQuery(const Loop *L, BasicBlock *ExitingBlock, bool AllowPredicates)
+      : L(L), ExitingBlock(ExitingBlock), AllowPredicates(AllowPredicates) {}
+
+  const Loop *L;
+  BasicBlock *ExitingBlock;
+  bool AllowPredicates;
+};
+
+template <> struct DenseMapInfo<ExitLimitQuery> {
+  static inline ExitLimitQuery getEmptyKey() {
+    return ExitLimitQuery(nullptr, nullptr, true);
+  }
+
+  static inline ExitLimitQuery getTombstoneKey() {
+    return ExitLimitQuery(nullptr, nullptr, false);
+  }
+
+  static unsigned getHashValue(ExitLimitQuery Val) {
+    return hash_combine(hash_combine(Val.L, Val.ExitingBlock),
+                        Val.AllowPredicates);
+  }
+
+  static bool isEqual(ExitLimitQuery LHS, ExitLimitQuery RHS) {
+    return LHS.L == RHS.L && LHS.ExitingBlock == RHS.ExitingBlock &&
+           LHS.AllowPredicates == RHS.AllowPredicates;
+  }
+};
+
+/// The main scalar evolution driver. Because client code (intentionally)
+/// can't do much with the SCEV objects directly, they must ask this class
+/// for services.
+class ScalarEvolution {
+public:
+  /// An enum describing the relationship between a SCEV and a loop.
+  enum LoopDisposition {
+    LoopVariant,   ///< The SCEV is loop-variant (unknown).
+    LoopInvariant, ///< The SCEV is loop-invariant.
+    LoopComputable ///< The SCEV varies predictably with the loop.
+  };
+
+  /// An enum describing the relationship between a SCEV and a basic block.
+  enum BlockDisposition {
+    DoesNotDominateBlock,  ///< The SCEV does not dominate the block.
+    DominatesBlock,        ///< The SCEV dominates the block.
+    ProperlyDominatesBlock ///< The SCEV properly dominates the block.
+  };
+
+  /// Convenient NoWrapFlags manipulation that hides enum casts and is
+  /// visible in the ScalarEvolution name space.
+  LLVM_NODISCARD static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags,
+                                                    int Mask) {
+    return (SCEV::NoWrapFlags)(Flags & Mask);
+  }
+  LLVM_NODISCARD static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags,
+                                                   SCEV::NoWrapFlags OnFlags) {
+    return (SCEV::NoWrapFlags)(Flags | OnFlags);
+  }
+  LLVM_NODISCARD static SCEV::NoWrapFlags
+  clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
+    return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
+  }
+
+  ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
+                  DominatorTree &DT, LoopInfo &LI);
+  ScalarEvolution(ScalarEvolution &&Arg);
+  ~ScalarEvolution();
+
+  LLVMContext &getContext() const { return F.getContext(); }
+
+  /// Test if values of the given type are analyzable within the SCEV
+  /// framework. This primarily includes integer types, and it can optionally
+  /// include pointer types if the ScalarEvolution class has access to
+  /// target-specific information.
+  bool isSCEVable(Type *Ty) const;
+
+  /// Return the size in bits of the specified type, for which isSCEVable must
+  /// return true.
+  uint64_t getTypeSizeInBits(Type *Ty) const;
+
+  /// Return a type with the same bitwidth as the given type and which
+  /// represents how SCEV will treat the given type, for which isSCEVable must
+  /// return true. For pointer types, this is the pointer-sized integer type.
+  Type *getEffectiveSCEVType(Type *Ty) const;
+
+  // Returns a wider type among {Ty1, Ty2}.
+  Type *getWiderType(Type *Ty1, Type *Ty2) const;
+
+  /// Return true if the SCEV is a scAddRecExpr or it contains
+  /// scAddRecExpr. The result will be cached in HasRecMap.
+  bool containsAddRecurrence(const SCEV *S);
+
+  /// Erase Value from ValueExprMap and ExprValueMap.
+  void eraseValueFromMap(Value *V);
+
+  /// Return a SCEV expression for the full generality of the specified
+  /// expression.
+  const SCEV *getSCEV(Value *V);
+
+  const SCEV *getConstant(ConstantInt *V);
+  const SCEV *getConstant(const APInt &Val);
+  const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
+  const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
+  const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
+  const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                         unsigned Depth = 0);
+  const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
+                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                         unsigned Depth = 0) {
+    SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+    return getAddExpr(Ops, Flags, Depth);
+  }
+  const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                         unsigned Depth = 0) {
+    SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+    return getAddExpr(Ops, Flags, Depth);
+  }
+  const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                         unsigned Depth = 0);
+  const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
+                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                         unsigned Depth = 0) {
+    SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+    return getMulExpr(Ops, Flags, Depth);
+  }
+  const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                         unsigned Depth = 0) {
+    SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+    return getMulExpr(Ops, Flags, Depth);
+  }
+  const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
+                            SCEV::NoWrapFlags Flags);
+  const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
+                            const Loop *L, SCEV::NoWrapFlags Flags);
+  const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
+                            const Loop *L, SCEV::NoWrapFlags Flags) {
+    SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
+    return getAddRecExpr(NewOp, L, Flags);
+  }
+
+  /// Checks if \p SymbolicPHI can be rewritten as an AddRecExpr under some
+  /// Predicates. If successful return these <AddRecExpr, Predicates>;
+  /// The function is intended to be called from PSCEV (the caller will decide
+  /// whether to actually add the predicates and carry out the rewrites).
+  Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+  createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);
+
+  /// Returns an expression for a GEP
+  ///
+  /// \p GEP The GEP. The indices contained in the GEP itself are ignored,
+  /// instead we use IndexExprs.
+  /// \p IndexExprs The expressions for the indices.
+  const SCEV *getGEPExpr(GEPOperator *GEP,
+                         const SmallVectorImpl<const SCEV *> &IndexExprs);
+  const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+  const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+  const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
+  const SCEV *getUnknown(Value *V);
+  const SCEV *getCouldNotCompute();
+
+  /// Return a SCEV for the constant 0 of a specific type.
+  const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }
+
+  /// Return a SCEV for the constant 1 of a specific type.
+  const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
+
+  /// Return an expression for sizeof AllocTy that is type IntTy
+  const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
+
+  /// Return an expression for offsetof on the given field with type IntTy
+  const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
+
+  /// Return the SCEV object corresponding to -V.
+  const SCEV *getNegativeSCEV(const SCEV *V,
+                              SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+
+  /// Return the SCEV object corresponding to ~V.
+  const SCEV *getNotSCEV(const SCEV *V);
+
+  /// Return LHS-RHS.  Minus is represented in SCEV as A+B*-1.
+  const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+                           SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                           unsigned Depth = 0);
+
+  /// Return a SCEV corresponding to a conversion of the input value to the
+  /// specified type.  If the type must be extended, it is zero extended.
+  const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
+
+  /// Return a SCEV corresponding to a conversion of the input value to the
+  /// specified type.  If the type must be extended, it is sign extended.
+  const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
+
+  /// Return a SCEV corresponding to a conversion of the input value to the
+  /// specified type.  If the type must be extended, it is zero extended.  The
+  /// conversion must not be narrowing.
+  const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
+
+  /// Return a SCEV corresponding to a conversion of the input value to the
+  /// specified type.  If the type must be extended, it is sign extended.  The
+  /// conversion must not be narrowing.
+  const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
+
+  /// Return a SCEV corresponding to a conversion of the input value to the
+  /// specified type. If the type must be extended, it is extended with
+  /// unspecified bits. The conversion must not be narrowing.
+  const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
+
+  /// Return a SCEV corresponding to a conversion of the input value to the
+  /// specified type.  The conversion must not be widening.
+  const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
+
+  /// Promote the operands to the wider of the types using zero-extension, and
+  /// then perform a umax operation with them.
+  const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+
+  /// Promote the operands to the wider of the types using zero-extension, and
+  /// then perform a umin operation with them.
+  const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+
+  /// Transitively follow the chain of pointer-type operands until reaching a
+  /// SCEV that does not have a single pointer operand. This returns a
+  /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
+  /// cases do exist.
+  const SCEV *getPointerBase(const SCEV *V);
+
+  /// Return a SCEV expression for the specified value at the specified scope
+  /// in the program.  The L value specifies a loop nest to evaluate the
+  /// expression at, where null is the top-level or a specified loop is
+  /// immediately inside of the loop.
+  ///
+  /// This method can be used to compute the exit value for a variable defined
+  /// in a loop by querying what the value will hold in the parent loop.
+  ///
+  /// In the case that a relevant loop exit value cannot be computed, the
+  /// original value V is returned.
+  const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
+
+  /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
+  const SCEV *getSCEVAtScope(Value *V, const Loop *L);
+
+  /// Test whether entry to the loop is protected by a conditional between LHS
+  /// and RHS.  This is used to help avoid max expressions in loop trip
+  /// counts, and to eliminate casts.
+  bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+                                const SCEV *LHS, const SCEV *RHS);
+
+  /// Test whether the backedge of the loop is protected by a conditional
+  /// between LHS and RHS.  This is used to eliminate casts.
+  bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+                                   const SCEV *LHS, const SCEV *RHS);
+
+  /// Returns the maximum trip count of the loop if it is a single-exit
+  /// loop and we can compute a small maximum for that loop.
+  ///
+  /// Implemented in terms of the \c getSmallConstantTripCount overload with
+  /// the single exiting block passed to it. See that routine for details.
+  unsigned getSmallConstantTripCount(const Loop *L);
+
+  /// Returns the maximum trip count of this loop as a normal unsigned
+  /// value. Returns 0 if the trip count is unknown or not constant. This
+  /// "trip count" assumes that control exits via ExitingBlock. More
+  /// precisely, it is the number of times that control may reach ExitingBlock
+  /// before taking the branch. For loops with multiple exits, it may not be
+  /// the number times that the loop header executes if the loop exits
+  /// prematurely via another branch.
+  unsigned getSmallConstantTripCount(const Loop *L, BasicBlock *ExitingBlock);
+
+  /// Returns the upper bound of the loop trip count as a normal unsigned
+  /// value.
+  /// Returns 0 if the trip count is unknown or not constant.
+  unsigned getSmallConstantMaxTripCount(const Loop *L);
+
+  /// Returns the largest constant divisor of the trip count of the
+  /// loop if it is a single-exit loop and we can compute a small maximum for
+  /// that loop.
+  ///
+  /// Implemented in terms of the \c getSmallConstantTripMultiple overload with
+  /// the single exiting block passed to it. See that routine for details.
+  unsigned getSmallConstantTripMultiple(const Loop *L);
+
+  /// Returns the largest constant divisor of the trip count of this loop as a
+  /// normal unsigned value, if possible. This means that the actual trip
+  /// count is always a multiple of the returned value (don't forget the trip
+  /// count could very well be zero as well!). As explained in the comments
+  /// for getSmallConstantTripCount, this assumes that control exits the loop
+  /// via ExitingBlock.
+  unsigned getSmallConstantTripMultiple(const Loop *L,
+                                        BasicBlock *ExitingBlock);
+
+  /// Get the expression for the number of loop iterations for which this loop
+  /// is guaranteed not to exit via ExitingBlock. Otherwise return
+  /// SCEVCouldNotCompute.
+  const SCEV *getExitCount(const Loop *L, BasicBlock *ExitingBlock);
+
+  /// If the specified loop has a predictable backedge-taken count, return it,
+  /// otherwise return a SCEVCouldNotCompute object. The backedge-taken count is
+  /// the number of times the loop header will be branched to from within the
+  /// loop, assuming there are no abnormal exists like exception throws. This is
+  /// one less than the trip count of the loop, since it doesn't count the first
+  /// iteration, when the header is branched to from outside the loop.
+  ///
+  /// Note that it is not valid to call this method on a loop without a
+  /// loop-invariant backedge-taken count (see
+  /// hasLoopInvariantBackedgeTakenCount).
+  const SCEV *getBackedgeTakenCount(const Loop *L);
+
+  /// Similar to getBackedgeTakenCount, except it will add a set of
+  /// SCEV predicates to Predicates that are required to be true in order for
+  /// the answer to be correct. Predicates can be checked with run-time
+  /// checks and can be used to perform loop versioning.
+  const SCEV *getPredicatedBackedgeTakenCount(const Loop *L,
+                                              SCEVUnionPredicate &Predicates);
+
+  /// When successful, this returns a SCEVConstant that is greater than or equal
+  /// to (i.e. a "conservative over-approximation") of the value returend by
+  /// getBackedgeTakenCount.  If such a value cannot be computed, it returns the
+  /// SCEVCouldNotCompute object.
+  const SCEV *getMaxBackedgeTakenCount(const Loop *L);
+
+  /// Return true if the backedge taken count is either the value returned by
+  /// getMaxBackedgeTakenCount or zero.
+  bool isBackedgeTakenCountMaxOrZero(const Loop *L);
+
+  /// Return true if the specified loop has an analyzable loop-invariant
+  /// backedge-taken count.
+  bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
+
+  /// This method should be called by the client when it has changed a loop in
+  /// a way that may effect ScalarEvolution's ability to compute a trip count,
+  /// or if the loop is deleted.  This call is potentially expensive for large
+  /// loop bodies.
+  void forgetLoop(const Loop *L);
+
+  /// This method should be called by the client when it has changed a value
+  /// in a way that may effect its value, or which may disconnect it from a
+  /// def-use chain linking it to a loop.
+  void forgetValue(Value *V);
+
+  /// Called when the client has changed the disposition of values in
+  /// this loop.
+  ///
+  /// We don't have a way to invalidate per-loop dispositions. Clear and
+  /// recompute is simpler.
+  void forgetLoopDispositions(const Loop *L) { LoopDispositions.clear(); }
+
+  /// Determine the minimum number of zero bits that S is guaranteed to end in
+  /// (at every loop iteration).  It is, at the same time, the minimum number
+  /// of times S is divisible by 2.  For example, given {4,+,8} it returns 2.
+  /// If S is guaranteed to be 0, it returns the bitwidth of S.
+  uint32_t GetMinTrailingZeros(const SCEV *S);
+
+  /// Determine the unsigned range for a particular SCEV.
+  /// NOTE: This returns a copy of the reference returned by getRangeRef.
+  ConstantRange getUnsignedRange(const SCEV *S) {
+    return getRangeRef(S, HINT_RANGE_UNSIGNED);
+  }
+
+  /// Determine the min of the unsigned range for a particular SCEV.
+  APInt getUnsignedRangeMin(const SCEV *S) {
+    return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMin();
+  }
+
+  /// Determine the max of the unsigned range for a particular SCEV.
+  APInt getUnsignedRangeMax(const SCEV *S) {
+    return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMax();
+  }
+
+  /// Determine the signed range for a particular SCEV.
+  /// NOTE: This returns a copy of the reference returned by getRangeRef.
+  ConstantRange getSignedRange(const SCEV *S) {
+    return getRangeRef(S, HINT_RANGE_SIGNED);
+  }
+
+  /// Determine the min of the signed range for a particular SCEV.
+  APInt getSignedRangeMin(const SCEV *S) {
+    return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMin();
+  }
+
+  /// Determine the max of the signed range for a particular SCEV.
+  APInt getSignedRangeMax(const SCEV *S) {
+    return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMax();
+  }
+
+  /// Test if the given expression is known to be negative.
+  bool isKnownNegative(const SCEV *S);
+
+  /// Test if the given expression is known to be positive.
+  bool isKnownPositive(const SCEV *S);
+
+  /// Test if the given expression is known to be non-negative.
+  bool isKnownNonNegative(const SCEV *S);
+
+  /// Test if the given expression is known to be non-positive.
+  bool isKnownNonPositive(const SCEV *S);
+
+  /// Test if the given expression is known to be non-zero.
+  bool isKnownNonZero(const SCEV *S);
+
+  /// Splits SCEV expression \p S into two SCEVs. One of them is obtained from
+  /// \p S by substitution of all AddRec sub-expression related to loop \p L
+  /// with initial value of that SCEV. The second is obtained from \p S by
+  /// substitution of all AddRec sub-expressions related to loop \p L with post
+  /// increment of this AddRec in the loop \p L. In both cases all other AddRec
+  /// sub-expressions (not related to \p L) remain the same.
+  /// If the \p S contains non-invariant unknown SCEV the function returns
+  /// CouldNotCompute SCEV in both values of std::pair.
+  /// For example, for SCEV S={0, +, 1}<L1> + {0, +, 1}<L2> and loop L=L1
+  /// the function returns pair:
+  /// first = {0, +, 1}<L2>
+  /// second = {1, +, 1}<L1> + {0, +, 1}<L2>
+  /// We can see that for the first AddRec sub-expression it was replaced with
+  /// 0 (initial value) for the first element and to {1, +, 1}<L1> (post
+  /// increment value) for the second one. In both cases AddRec expression
+  /// related to L2 remains the same.
+  std::pair<const SCEV *, const SCEV *> SplitIntoInitAndPostInc(const Loop *L,
+                                                                const SCEV *S);
+
+  /// We'd like to check the predicate on every iteration of the most dominated
+  /// loop between loops used in LHS and RHS.
+  /// To do this we use the following list of steps:
+  /// 1. Collect set S all loops on which either LHS or RHS depend.
+  /// 2. If S is non-empty
+  /// a. Let PD be the element of S which is dominated by all other elements.
+  /// b. Let E(LHS) be value of LHS on entry of PD.
+  ///    To get E(LHS), we should just take LHS and replace all AddRecs that are
+  ///    attached to PD on with their entry values.
+  ///    Define E(RHS) in the same way.
+  /// c. Let B(LHS) be value of L on backedge of PD.
+  ///    To get B(LHS), we should just take LHS and replace all AddRecs that are
+  ///    attached to PD on with their backedge values.
+  ///    Define B(RHS) in the same way.
+  /// d. Note that E(LHS) and E(RHS) are automatically available on entry of PD,
+  ///    so we can assert on that.
+  /// e. Return true if isLoopEntryGuardedByCond(Pred, E(LHS), E(RHS)) &&
+  ///                   isLoopBackedgeGuardedByCond(Pred, B(LHS), B(RHS))
+  bool isKnownViaInduction(ICmpInst::Predicate Pred, const SCEV *LHS,
+                           const SCEV *RHS);
+
+  /// Test if the given expression is known to satisfy the condition described
+  /// by Pred, LHS, and RHS.
+  bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+                        const SCEV *RHS);
+
+  /// Test if the condition described by Pred, LHS, RHS is known to be true on
+  /// every iteration of the loop of the recurrency LHS.
+  bool isKnownOnEveryIteration(ICmpInst::Predicate Pred,
+                               const SCEVAddRecExpr *LHS, const SCEV *RHS);
+
+  /// Return true if, for all loop invariant X, the predicate "LHS `Pred` X"
+  /// is monotonically increasing or decreasing.  In the former case set
+  /// `Increasing` to true and in the latter case set `Increasing` to false.
+  ///
+  /// A predicate is said to be monotonically increasing if may go from being
+  /// false to being true as the loop iterates, but never the other way
+  /// around.  A predicate is said to be monotonically decreasing if may go
+  /// from being true to being false as the loop iterates, but never the other
+  /// way around.
+  bool isMonotonicPredicate(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred,
+                            bool &Increasing);
+
+  /// Return true if the result of the predicate LHS `Pred` RHS is loop
+  /// invariant with respect to L.  Set InvariantPred, InvariantLHS and
+  /// InvariantLHS so that InvariantLHS `InvariantPred` InvariantRHS is the
+  /// loop invariant form of LHS `Pred` RHS.
+  bool isLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                const SCEV *RHS, const Loop *L,
+                                ICmpInst::Predicate &InvariantPred,
+                                const SCEV *&InvariantLHS,
+                                const SCEV *&InvariantRHS);
+
+  /// Simplify LHS and RHS in a comparison with predicate Pred. Return true
+  /// iff any changes were made. If the operands are provably equal or
+  /// unequal, LHS and RHS are set to the same value and Pred is set to either
+  /// ICMP_EQ or ICMP_NE.
+  bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS,
+                            const SCEV *&RHS, unsigned Depth = 0);
+
+  /// Return the "disposition" of the given SCEV with respect to the given
+  /// loop.
+  LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
+
+  /// Return true if the value of the given SCEV is unchanging in the
+  /// specified loop.
+  bool isLoopInvariant(const SCEV *S, const Loop *L);
+
+  /// Determine if the SCEV can be evaluated at loop's entry. It is true if it
+  /// doesn't depend on a SCEVUnknown of an instruction which is dominated by
+  /// the header of loop L.
+  bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L);
+
+  /// Return true if the given SCEV changes value in a known way in the
+  /// specified loop.  This property being true implies that the value is
+  /// variant in the loop AND that we can emit an expression to compute the
+  /// value of the expression at any particular loop iteration.
+  bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
+
+  /// Return the "disposition" of the given SCEV with respect to the given
+  /// block.
+  BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+  /// Return true if elements that makes up the given SCEV dominate the
+  /// specified basic block.
+  bool dominates(const SCEV *S, const BasicBlock *BB);
+
+  /// Return true if elements that makes up the given SCEV properly dominate
+  /// the specified basic block.
+  bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+
+  /// Test whether the given SCEV has Op as a direct or indirect operand.
+  bool hasOperand(const SCEV *S, const SCEV *Op) const;
+
+  /// Return the size of an element read or written by Inst.
+  const SCEV *getElementSize(Instruction *Inst);
+
+  /// Compute the array dimensions Sizes from the set of Terms extracted from
+  /// the memory access function of this SCEVAddRecExpr (second step of
+  /// delinearization).
+  void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
+                           SmallVectorImpl<const SCEV *> &Sizes,
+                           const SCEV *ElementSize);
+
+  void print(raw_ostream &OS) const;
+  void verify() const;
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &Inv);
+
+  /// Collect parametric terms occurring in step expressions (first step of
+  /// delinearization).
+  void collectParametricTerms(const SCEV *Expr,
+                              SmallVectorImpl<const SCEV *> &Terms);
+
+  /// Return in Subscripts the access functions for each dimension in Sizes
+  /// (third step of delinearization).
+  void computeAccessFunctions(const SCEV *Expr,
+                              SmallVectorImpl<const SCEV *> &Subscripts,
+                              SmallVectorImpl<const SCEV *> &Sizes);
+
+  /// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
+  /// subscripts and sizes of an array access.
+  ///
+  /// The delinearization is a 3 step process: the first two steps compute the
+  /// sizes of each subscript and the third step computes the access functions
+  /// for the delinearized array:
+  ///
+  /// 1. Find the terms in the step functions
+  /// 2. Compute the array size
+  /// 3. Compute the access function: divide the SCEV by the array size
+  ///    starting with the innermost dimensions found in step 2. The Quotient
+  ///    is the SCEV to be divided in the next step of the recursion. The
+  ///    Remainder is the subscript of the innermost dimension. Loop over all
+  ///    array dimensions computed in step 2.
+  ///
+  /// To compute a uniform array size for several memory accesses to the same
+  /// object, one can collect in step 1 all the step terms for all the memory
+  /// accesses, and compute in step 2 a unique array shape. This guarantees
+  /// that the array shape will be the same across all memory accesses.
+  ///
+  /// FIXME: We could derive the result of steps 1 and 2 from a description of
+  /// the array shape given in metadata.
+  ///
+  /// Example:
+  ///
+  /// A[][n][m]
+  ///
+  /// for i
+  ///   for j
+  ///     for k
+  ///       A[j+k][2i][5i] =
+  ///
+  /// The initial SCEV:
+  ///
+  /// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
+  ///
+  /// 1. Find the different terms in the step functions:
+  /// -> [2*m, 5, n*m, n*m]
+  ///
+  /// 2. Compute the array size: sort and unique them
+  /// -> [n*m, 2*m, 5]
+  /// find the GCD of all the terms = 1
+  /// divide by the GCD and erase constant terms
+  /// -> [n*m, 2*m]
+  /// GCD = m
+  /// divide by GCD -> [n, 2]
+  /// remove constant terms
+  /// -> [n]
+  /// size of the array is A[unknown][n][m]
+  ///
+  /// 3. Compute the access function
+  /// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
+  /// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
+  /// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
+  /// The remainder is the subscript of the innermost array dimension: [5i].
+  ///
+  /// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
+  /// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
+  /// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
+  /// The Remainder is the subscript of the next array dimension: [2i].
+  ///
+  /// The subscript of the outermost dimension is the Quotient: [j+k].
+  ///
+  /// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
+  void delinearize(const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
+                   SmallVectorImpl<const SCEV *> &Sizes,
+                   const SCEV *ElementSize);
+
+  /// Return the DataLayout associated with the module this SCEV instance is
+  /// operating on.
+  const DataLayout &getDataLayout() const {
+    return F.getParent()->getDataLayout();
+  }
+
+  const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
+
+  const SCEVPredicate *
+  getWrapPredicate(const SCEVAddRecExpr *AR,
+                   SCEVWrapPredicate::IncrementWrapFlags AddedFlags);
+
+  /// Re-writes the SCEV according to the Predicates in \p A.
+  const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
+                                    SCEVUnionPredicate &A);
+  /// Tries to convert the \p S expression to an AddRec expression,
+  /// adding additional predicates to \p Preds as required.
+  const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
+      const SCEV *S, const Loop *L,
+      SmallPtrSetImpl<const SCEVPredicate *> &Preds);
+
+private:
+  /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
+  /// Value is deleted.
+  class SCEVCallbackVH final : public CallbackVH {
+    ScalarEvolution *SE;
+
+    void deleted() override;
+    void allUsesReplacedWith(Value *New) override;
+
+  public:
+    SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
+  };
+
+  friend class SCEVCallbackVH;
+  friend class SCEVExpander;
+  friend class SCEVUnknown;
+
+  /// The function we are analyzing.
+  Function &F;
+
+  /// Does the module have any calls to the llvm.experimental.guard intrinsic
+  /// at all?  If this is false, we avoid doing work that will only help if
+  /// thare are guards present in the IR.
+  bool HasGuards;
+
+  /// The target library information for the target we are targeting.
+  TargetLibraryInfo &TLI;
+
+  /// The tracker for @llvm.assume intrinsics in this function.
+  AssumptionCache &AC;
+
+  /// The dominator tree.
+  DominatorTree &DT;
+
+  /// The loop information for the function we are currently analyzing.
+  LoopInfo &LI;
+
+  /// This SCEV is used to represent unknown trip counts and things.
+  std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
+
+  /// The type for HasRecMap.
+  using HasRecMapType = DenseMap<const SCEV *, bool>;
+
+  /// This is a cache to record whether a SCEV contains any scAddRecExpr.
+  HasRecMapType HasRecMap;
+
+  /// The type for ExprValueMap.
+  using ValueOffsetPair = std::pair<Value *, ConstantInt *>;
+  using ExprValueMapType = DenseMap<const SCEV *, SetVector<ValueOffsetPair>>;
+
+  /// ExprValueMap -- This map records the original values from which
+  /// the SCEV expr is generated from.
+  ///
+  /// We want to represent the mapping as SCEV -> ValueOffsetPair instead
+  /// of SCEV -> Value:
+  /// Suppose we know S1 expands to V1, and
+  ///  S1 = S2 + C_a
+  ///  S3 = S2 + C_b
+  /// where C_a and C_b are different SCEVConstants. Then we'd like to
+  /// expand S3 as V1 - C_a + C_b instead of expanding S2 literally.
+  /// It is helpful when S2 is a complex SCEV expr.
+  ///
+  /// In order to do that, we represent ExprValueMap as a mapping from
+  /// SCEV to ValueOffsetPair. We will save both S1->{V1, 0} and
+  /// S2->{V1, C_a} into the map when we create SCEV for V1. When S3
+  /// is expanded, it will first expand S2 to V1 - C_a because of
+  /// S2->{V1, C_a} in the map, then expand S3 to V1 - C_a + C_b.
+  ///
+  /// Note: S->{V, Offset} in the ExprValueMap means S can be expanded
+  /// to V - Offset.
+  ExprValueMapType ExprValueMap;
+
+  /// The type for ValueExprMap.
+  using ValueExprMapType =
+      DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>;
+
+  /// This is a cache of the values we have analyzed so far.
+  ValueExprMapType ValueExprMap;
+
+  /// Mark predicate values currently being processed by isImpliedCond.
+  SmallPtrSet<Value *, 6> PendingLoopPredicates;
+
+  /// Mark SCEVUnknown Phis currently being processed by getRangeRef.
+  SmallPtrSet<const PHINode *, 6> PendingPhiRanges;
+
+  /// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
+  /// conditions dominating the backedge of a loop.
+  bool WalkingBEDominatingConds = false;
+
+  /// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
+  /// predicate by splitting it into a set of independent predicates.
+  bool ProvingSplitPredicate = false;
+
+  /// Memoized values for the GetMinTrailingZeros
+  DenseMap<const SCEV *, uint32_t> MinTrailingZerosCache;
+
+  /// Return the Value set from which the SCEV expr is generated.
+  SetVector<ValueOffsetPair> *getSCEVValues(const SCEV *S);
+
+  /// Private helper method for the GetMinTrailingZeros method
+  uint32_t GetMinTrailingZerosImpl(const SCEV *S);
+
+  /// Information about the number of loop iterations for which a loop exit's
+  /// branch condition evaluates to the not-taken path.  This is a temporary
+  /// pair of exact and max expressions that are eventually summarized in
+  /// ExitNotTakenInfo and BackedgeTakenInfo.
+  struct ExitLimit {
+    const SCEV *ExactNotTaken; // The exit is not taken exactly this many times
+    const SCEV *MaxNotTaken; // The exit is not taken at most this many times
+
+    // Not taken either exactly MaxNotTaken or zero times
+    bool MaxOrZero = false;
+
+    /// A set of predicate guards for this ExitLimit. The result is only valid
+    /// if all of the predicates in \c Predicates evaluate to 'true' at
+    /// run-time.
+    SmallPtrSet<const SCEVPredicate *, 4> Predicates;
+
+    void addPredicate(const SCEVPredicate *P) {
+      assert(!isa<SCEVUnionPredicate>(P) && "Only add leaf predicates here!");
+      Predicates.insert(P);
+    }
+
+    /*implicit*/ ExitLimit(const SCEV *E);
+
+    ExitLimit(
+        const SCEV *E, const SCEV *M, bool MaxOrZero,
+        ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList);
+
+    ExitLimit(const SCEV *E, const SCEV *M, bool MaxOrZero,
+              const SmallPtrSetImpl<const SCEVPredicate *> &PredSet);
+
+    ExitLimit(const SCEV *E, const SCEV *M, bool MaxOrZero);
+
+    /// Test whether this ExitLimit contains any computed information, or
+    /// whether it's all SCEVCouldNotCompute values.
+    bool hasAnyInfo() const {
+      return !isa<SCEVCouldNotCompute>(ExactNotTaken) ||
+             !isa<SCEVCouldNotCompute>(MaxNotTaken);
+    }
+
+    bool hasOperand(const SCEV *S) const;
+
+    /// Test whether this ExitLimit contains all information.
+    bool hasFullInfo() const {
+      return !isa<SCEVCouldNotCompute>(ExactNotTaken);
+    }
+  };
+
+  /// Information about the number of times a particular loop exit may be
+  /// reached before exiting the loop.
+  struct ExitNotTakenInfo {
+    PoisoningVH<BasicBlock> ExitingBlock;
+    const SCEV *ExactNotTaken;
+    std::unique_ptr<SCEVUnionPredicate> Predicate;
+
+    explicit ExitNotTakenInfo(PoisoningVH<BasicBlock> ExitingBlock,
+                              const SCEV *ExactNotTaken,
+                              std::unique_ptr<SCEVUnionPredicate> Predicate)
+        : ExitingBlock(ExitingBlock), ExactNotTaken(ExactNotTaken),
+          Predicate(std::move(Predicate)) {}
+
+    bool hasAlwaysTruePredicate() const {
+      return !Predicate || Predicate->isAlwaysTrue();
+    }
+  };
+
+  /// Information about the backedge-taken count of a loop. This currently
+  /// includes an exact count and a maximum count.
+  ///
+  class BackedgeTakenInfo {
+    /// A list of computable exits and their not-taken counts.  Loops almost
+    /// never have more than one computable exit.
+    SmallVector<ExitNotTakenInfo, 1> ExitNotTaken;
+
+    /// The pointer part of \c MaxAndComplete is an expression indicating the
+    /// least maximum backedge-taken count of the loop that is known, or a
+    /// SCEVCouldNotCompute. This expression is only valid if the predicates
+    /// associated with all loop exits are true.
+    ///
+    /// The integer part of \c MaxAndComplete is a boolean indicating if \c
+    /// ExitNotTaken has an element for every exiting block in the loop.
+    PointerIntPair<const SCEV *, 1> MaxAndComplete;
+
+    /// True iff the backedge is taken either exactly Max or zero times.
+    bool MaxOrZero = false;
+
+    /// \name Helper projection functions on \c MaxAndComplete.
+    /// @{
+    bool isComplete() const { return MaxAndComplete.getInt(); }
+    const SCEV *getMax() const { return MaxAndComplete.getPointer(); }
+    /// @}
+
+  public:
+    BackedgeTakenInfo() : MaxAndComplete(nullptr, 0) {}
+    BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
+    BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
+
+    using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;
+
+    /// Initialize BackedgeTakenInfo from a list of exact exit counts.
+    BackedgeTakenInfo(SmallVectorImpl<EdgeExitInfo> &&ExitCounts, bool Complete,
+                      const SCEV *MaxCount, bool MaxOrZero);
+
+    /// Test whether this BackedgeTakenInfo contains any computed information,
+    /// or whether it's all SCEVCouldNotCompute values.
+    bool hasAnyInfo() const {
+      return !ExitNotTaken.empty() || !isa<SCEVCouldNotCompute>(getMax());
+    }
+
+    /// Test whether this BackedgeTakenInfo contains complete information.
+    bool hasFullInfo() const { return isComplete(); }
+
+    /// Return an expression indicating the exact *backedge-taken*
+    /// count of the loop if it is known or SCEVCouldNotCompute
+    /// otherwise.  If execution makes it to the backedge on every
+    /// iteration (i.e. there are no abnormal exists like exception
+    /// throws and thread exits) then this is the number of times the
+    /// loop header will execute minus one.
+    ///
+    /// If the SCEV predicate associated with the answer can be different
+    /// from AlwaysTrue, we must add a (non null) Predicates argument.
+    /// The SCEV predicate associated with the answer will be added to
+    /// Predicates. A run-time check needs to be emitted for the SCEV
+    /// predicate in order for the answer to be valid.
+    ///
+    /// Note that we should always know if we need to pass a predicate
+    /// argument or not from the way the ExitCounts vector was computed.
+    /// If we allowed SCEV predicates to be generated when populating this
+    /// vector, this information can contain them and therefore a
+    /// SCEVPredicate argument should be added to getExact.
+    const SCEV *getExact(const Loop *L, ScalarEvolution *SE,
+                         SCEVUnionPredicate *Predicates = nullptr) const;
+
+    /// Return the number of times this loop exit may fall through to the back
+    /// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
+    /// this block before this number of iterations, but may exit via another
+    /// block.
+    const SCEV *getExact(BasicBlock *ExitingBlock, ScalarEvolution *SE) const;
+
+    /// Get the max backedge taken count for the loop.
+    const SCEV *getMax(ScalarEvolution *SE) const;
+
+    /// Return true if the number of times this backedge is taken is either the
+    /// value returned by getMax or zero.
+    bool isMaxOrZero(ScalarEvolution *SE) const;
+
+    /// Return true if any backedge taken count expressions refer to the given
+    /// subexpression.
+    bool hasOperand(const SCEV *S, ScalarEvolution *SE) const;
+
+    /// Invalidate this result and free associated memory.
+    void clear();
+  };
+
+  /// Cache the backedge-taken count of the loops for this function as they
+  /// are computed.
+  DenseMap<const Loop *, BackedgeTakenInfo> BackedgeTakenCounts;
+
+  /// Cache the predicated backedge-taken count of the loops for this
+  /// function as they are computed.
+  DenseMap<const Loop *, BackedgeTakenInfo> PredicatedBackedgeTakenCounts;
+
+  /// This map contains entries for all of the PHI instructions that we
+  /// attempt to compute constant evolutions for.  This allows us to avoid
+  /// potentially expensive recomputation of these properties.  An instruction
+  /// maps to null if we are unable to compute its exit value.
+  DenseMap<PHINode *, Constant *> ConstantEvolutionLoopExitValue;
+
+  /// This map contains entries for all the expressions that we attempt to
+  /// compute getSCEVAtScope information for, which can be expensive in
+  /// extreme cases.
+  DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
+      ValuesAtScopes;
+
+  /// Memoized computeLoopDisposition results.
+  DenseMap<const SCEV *,
+           SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
+      LoopDispositions;
+
+  struct LoopProperties {
+    /// Set to true if the loop contains no instruction that can have side
+    /// effects (i.e. via throwing an exception, volatile or atomic access).
+    bool HasNoAbnormalExits;
+
+    /// Set to true if the loop contains no instruction that can abnormally exit
+    /// the loop (i.e. via throwing an exception, by terminating the thread
+    /// cleanly or by infinite looping in a called function).  Strictly
+    /// speaking, the last one is not leaving the loop, but is identical to
+    /// leaving the loop for reasoning about undefined behavior.
+    bool HasNoSideEffects;
+  };
+
+  /// Cache for \c getLoopProperties.
+  DenseMap<const Loop *, LoopProperties> LoopPropertiesCache;
+
+  /// Return a \c LoopProperties instance for \p L, creating one if necessary.
+  LoopProperties getLoopProperties(const Loop *L);
+
+  bool loopHasNoSideEffects(const Loop *L) {
+    return getLoopProperties(L).HasNoSideEffects;
+  }
+
+  bool loopHasNoAbnormalExits(const Loop *L) {
+    return getLoopProperties(L).HasNoAbnormalExits;
+  }
+
+  /// Compute a LoopDisposition value.
+  LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
+
+  /// Memoized computeBlockDisposition results.
+  DenseMap<
+      const SCEV *,
+      SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
+      BlockDispositions;
+
+  /// Compute a BlockDisposition value.
+  BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+  /// Memoized results from getRange
+  DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
+
+  /// Memoized results from getRange
+  DenseMap<const SCEV *, ConstantRange> SignedRanges;
+
+  /// Used to parameterize getRange
+  enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
+
+  /// Set the memoized range for the given SCEV.
+  const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
+                                ConstantRange CR) {
+    DenseMap<const SCEV *, ConstantRange> &Cache =
+        Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
+
+    auto Pair = Cache.try_emplace(S, std::move(CR));
+    if (!Pair.second)
+      Pair.first->second = std::move(CR);
+    return Pair.first->second;
+  }
+
+  /// Determine the range for a particular SCEV.
+  /// NOTE: This returns a reference to an entry in a cache. It must be
+  /// copied if its needed for longer.
+  const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint);
+
+  /// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Stop}.
+  /// Helper for \c getRange.
+  ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Stop,
+                                    const SCEV *MaxBECount, unsigned BitWidth);
+
+  /// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
+  /// Stop} by "factoring out" a ternary expression from the add recurrence.
+  /// Helper called by \c getRange.
+  ConstantRange getRangeViaFactoring(const SCEV *Start, const SCEV *Stop,
+                                     const SCEV *MaxBECount, unsigned BitWidth);
+
+  /// We know that there is no SCEV for the specified value.  Analyze the
+  /// expression.
+  const SCEV *createSCEV(Value *V);
+
+  /// Provide the special handling we need to analyze PHI SCEVs.
+  const SCEV *createNodeForPHI(PHINode *PN);
+
+  /// Helper function called from createNodeForPHI.
+  const SCEV *createAddRecFromPHI(PHINode *PN);
+
+  /// A helper function for createAddRecFromPHI to handle simple cases.
+  const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
+                                            Value *StartValueV);
+
+  /// Helper function called from createNodeForPHI.
+  const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
+
+  /// Provide special handling for a select-like instruction (currently this
+  /// is either a select instruction or a phi node).  \p I is the instruction
+  /// being processed, and it is assumed equivalent to "Cond ? TrueVal :
+  /// FalseVal".
+  const SCEV *createNodeForSelectOrPHI(Instruction *I, Value *Cond,
+                                       Value *TrueVal, Value *FalseVal);
+
+  /// Provide the special handling we need to analyze GEP SCEVs.
+  const SCEV *createNodeForGEP(GEPOperator *GEP);
+
+  /// Implementation code for getSCEVAtScope; called at most once for each
+  /// SCEV+Loop pair.
+  const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
+
+  /// This looks up computed SCEV values for all instructions that depend on
+  /// the given instruction and removes them from the ValueExprMap map if they
+  /// reference SymName. This is used during PHI resolution.
+  void forgetSymbolicName(Instruction *I, const SCEV *SymName);
+
+  /// Return the BackedgeTakenInfo for the given loop, lazily computing new
+  /// values if the loop hasn't been analyzed yet. The returned result is
+  /// guaranteed not to be predicated.
+  const BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
+
+  /// Similar to getBackedgeTakenInfo, but will add predicates as required
+  /// with the purpose of returning complete information.
+  const BackedgeTakenInfo &getPredicatedBackedgeTakenInfo(const Loop *L);
+
+  /// Compute the number of times the specified loop will iterate.
+  /// If AllowPredicates is set, we will create new SCEV predicates as
+  /// necessary in order to return an exact answer.
+  BackedgeTakenInfo computeBackedgeTakenCount(const Loop *L,
+                                              bool AllowPredicates = false);
+
+  /// Compute the number of times the backedge of the specified loop will
+  /// execute if it exits via the specified block. If AllowPredicates is set,
+  /// this call will try to use a minimal set of SCEV predicates in order to
+  /// return an exact answer.
+  ExitLimit computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
+                             bool AllowPredicates = false);
+
+  /// Compute the number of times the backedge of the specified loop will
+  /// execute if its exit condition were a conditional branch of ExitCond.
+  ///
+  /// \p ControlsExit is true if ExitCond directly controls the exit
+  /// branch. In this case, we can assume that the loop exits only if the
+  /// condition is true and can infer that failing to meet the condition prior
+  /// to integer wraparound results in undefined behavior.
+  ///
+  /// If \p AllowPredicates is set, this call will try to use a minimal set of
+  /// SCEV predicates in order to return an exact answer.
+  ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
+                                     bool ExitIfTrue, bool ControlsExit,
+                                     bool AllowPredicates = false);
+
+  // Helper functions for computeExitLimitFromCond to avoid exponential time
+  // complexity.
+
+  class ExitLimitCache {
+    // It may look like we need key on the whole (L, ExitIfTrue, ControlsExit,
+    // AllowPredicates) tuple, but recursive calls to
+    // computeExitLimitFromCondCached from computeExitLimitFromCondImpl only
+    // vary the in \c ExitCond and \c ControlsExit parameters.  We remember the
+    // initial values of the other values to assert our assumption.
+    SmallDenseMap<PointerIntPair<Value *, 1>, ExitLimit> TripCountMap;
+
+    const Loop *L;
+    bool ExitIfTrue;
+    bool AllowPredicates;
+
+  public:
+    ExitLimitCache(const Loop *L, bool ExitIfTrue, bool AllowPredicates)
+        : L(L), ExitIfTrue(ExitIfTrue), AllowPredicates(AllowPredicates) {}
+
+    Optional<ExitLimit> find(const Loop *L, Value *ExitCond, bool ExitIfTrue,
+                             bool ControlsExit, bool AllowPredicates);
+
+    void insert(const Loop *L, Value *ExitCond, bool ExitIfTrue,
+                bool ControlsExit, bool AllowPredicates, const ExitLimit &EL);
+  };
+
+  using ExitLimitCacheTy = ExitLimitCache;
+
+  ExitLimit computeExitLimitFromCondCached(ExitLimitCacheTy &Cache,
+                                           const Loop *L, Value *ExitCond,
+                                           bool ExitIfTrue,
+                                           bool ControlsExit,
+                                           bool AllowPredicates);
+  ExitLimit computeExitLimitFromCondImpl(ExitLimitCacheTy &Cache, const Loop *L,
+                                         Value *ExitCond, bool ExitIfTrue,
+                                         bool ControlsExit,
+                                         bool AllowPredicates);
+
+  /// Compute the number of times the backedge of the specified loop will
+  /// execute if its exit condition were a conditional branch of the ICmpInst
+  /// ExitCond and ExitIfTrue. If AllowPredicates is set, this call will try
+  /// to use a minimal set of SCEV predicates in order to return an exact
+  /// answer.
+  ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst *ExitCond,
+                                     bool ExitIfTrue,
+                                     bool IsSubExpr,
+                                     bool AllowPredicates = false);
+
+  /// Compute the number of times the backedge of the specified loop will
+  /// execute if its exit condition were a switch with a single exiting case
+  /// to ExitingBB.
+  ExitLimit computeExitLimitFromSingleExitSwitch(const Loop *L,
+                                                 SwitchInst *Switch,
+                                                 BasicBlock *ExitingBB,
+                                                 bool IsSubExpr);
+
+  /// Given an exit condition of 'icmp op load X, cst', try to see if we can
+  /// compute the backedge-taken count.
+  ExitLimit computeLoadConstantCompareExitLimit(LoadInst *LI, Constant *RHS,
+                                                const Loop *L,
+                                                ICmpInst::Predicate p);
+
+  /// Compute the exit limit of a loop that is controlled by a
+  /// "(IV >> 1) != 0" type comparison.  We cannot compute the exact trip
+  /// count in these cases (since SCEV has no way of expressing them), but we
+  /// can still sometimes compute an upper bound.
+  ///
+  /// Return an ExitLimit for a loop whose backedge is guarded by `LHS Pred
+  /// RHS`.
+  ExitLimit computeShiftCompareExitLimit(Value *LHS, Value *RHS, const Loop *L,
+                                         ICmpInst::Predicate Pred);
+
+  /// If the loop is known to execute a constant number of times (the
+  /// condition evolves only from constants), try to evaluate a few iterations
+  /// of the loop until we get the exit condition gets a value of ExitWhen
+  /// (true or false).  If we cannot evaluate the exit count of the loop,
+  /// return CouldNotCompute.
+  const SCEV *computeExitCountExhaustively(const Loop *L, Value *Cond,
+                                           bool ExitWhen);
+
+  /// Return the number of times an exit condition comparing the specified
+  /// value to zero will execute.  If not computable, return CouldNotCompute.
+  /// If AllowPredicates is set, this call will try to use a minimal set of
+  /// SCEV predicates in order to return an exact answer.
+  ExitLimit howFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr,
+                         bool AllowPredicates = false);
+
+  /// Return the number of times an exit condition checking the specified
+  /// value for nonzero will execute.  If not computable, return
+  /// CouldNotCompute.
+  ExitLimit howFarToNonZero(const SCEV *V, const Loop *L);
+
+  /// Return the number of times an exit condition containing the specified
+  /// less-than comparison will execute.  If not computable, return
+  /// CouldNotCompute.
+  ///
+  /// \p isSigned specifies whether the less-than is signed.
+  ///
+  /// \p ControlsExit is true when the LHS < RHS condition directly controls
+  /// the branch (loops exits only if condition is true). In this case, we can
+  /// use NoWrapFlags to skip overflow checks.
+  ///
+  /// If \p AllowPredicates is set, this call will try to use a minimal set of
+  /// SCEV predicates in order to return an exact answer.
+  ExitLimit howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
+                             bool isSigned, bool ControlsExit,
+                             bool AllowPredicates = false);
+
+  ExitLimit howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
+                                bool isSigned, bool IsSubExpr,
+                                bool AllowPredicates = false);
+
+  /// Return a predecessor of BB (which may not be an immediate predecessor)
+  /// which has exactly one successor from which BB is reachable, or null if
+  /// no such block is found.
+  std::pair<BasicBlock *, BasicBlock *>
+  getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the given FoundCondValue value evaluates to true.
+  bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
+                     Value *FoundCondValue, bool Inverse);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
+  /// true.
+  bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
+                     ICmpInst::Predicate FoundPred, const SCEV *FoundLHS,
+                     const SCEV *FoundRHS);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+  /// true.
+  bool isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS,
+                             const SCEV *RHS, const SCEV *FoundLHS,
+                             const SCEV *FoundRHS);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+  /// true. Here LHS is an operation that includes FoundLHS as one of its
+  /// arguments.
+  bool isImpliedViaOperations(ICmpInst::Predicate Pred,
+                              const SCEV *LHS, const SCEV *RHS,
+                              const SCEV *FoundLHS, const SCEV *FoundRHS,
+                              unsigned Depth = 0);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true.
+  /// Use only simple non-recursive types of checks, such as range analysis etc.
+  bool isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
+                                       const SCEV *LHS, const SCEV *RHS);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+  /// true.
+  bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                   const SCEV *RHS, const SCEV *FoundLHS,
+                                   const SCEV *FoundRHS);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+  /// true.  Utility function used by isImpliedCondOperands.  Tries to get
+  /// cases like "X `sgt` 0 => X - 1 `sgt` -1".
+  bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                      const SCEV *RHS, const SCEV *FoundLHS,
+                                      const SCEV *FoundRHS);
+
+  /// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
+  /// by a call to \c @llvm.experimental.guard in \p BB.
+  bool isImpliedViaGuard(BasicBlock *BB, ICmpInst::Predicate Pred,
+                         const SCEV *LHS, const SCEV *RHS);
+
+  /// Test whether the condition described by Pred, LHS, and RHS is true
+  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+  /// true.
+  ///
+  /// This routine tries to rule out certain kinds of integer overflow, and
+  /// then tries to reason about arithmetic properties of the predicates.
+  bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,
+                                          const SCEV *LHS, const SCEV *RHS,
+                                          const SCEV *FoundLHS,
+                                          const SCEV *FoundRHS);
+
+  /// If we know that the specified Phi is in the header of its containing
+  /// loop, we know the loop executes a constant number of times, and the PHI
+  /// node is just a recurrence involving constants, fold it.
+  Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt &BEs,
+                                              const Loop *L);
+
+  /// Test if the given expression is known to satisfy the condition described
+  /// by Pred and the known constant ranges of LHS and RHS.
+  bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
+                                         const SCEV *LHS, const SCEV *RHS);
+
+  /// Try to prove the condition described by "LHS Pred RHS" by ruling out
+  /// integer overflow.
+  ///
+  /// For instance, this will return true for "A s< (A + C)<nsw>" if C is
+  /// positive.
+  bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                     const SCEV *RHS);
+
+  /// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
+  /// prove them individually.
+  bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                    const SCEV *RHS);
+
+  /// Try to match the Expr as "(L + R)<Flags>".
+  bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
+                      SCEV::NoWrapFlags &Flags);
+
+  /// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
+  /// constant, and None if it isn't.
+  ///
+  /// This is intended to be a cheaper version of getMinusSCEV.  We can be
+  /// frugal here since we just bail out of actually constructing and
+  /// canonicalizing an expression in the cases where the result isn't going
+  /// to be a constant.
+  Optional<APInt> computeConstantDifference(const SCEV *LHS, const SCEV *RHS);
+
+  /// Drop memoized information computed for S.
+  void forgetMemoizedResults(const SCEV *S);
+
+  /// Return an existing SCEV for V if there is one, otherwise return nullptr.
+  const SCEV *getExistingSCEV(Value *V);
+
+  /// Return false iff given SCEV contains a SCEVUnknown with NULL value-
+  /// pointer.
+  bool checkValidity(const SCEV *S) const;
+
+  /// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
+  /// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}.  This is
+  /// equivalent to proving no signed (resp. unsigned) wrap in
+  /// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
+  /// (resp. `SCEVZeroExtendExpr`).
+  template <typename ExtendOpTy>
+  bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
+                                 const Loop *L);
+
+  /// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
+  SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);
+
+  bool isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS,
+                                ICmpInst::Predicate Pred, bool &Increasing);
+
+  /// Return SCEV no-wrap flags that can be proven based on reasoning about
+  /// how poison produced from no-wrap flags on this value (e.g. a nuw add)
+  /// would trigger undefined behavior on overflow.
+  SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V);
+
+  /// Return true if the SCEV corresponding to \p I is never poison.  Proving
+  /// this is more complex than proving that just \p I is never poison, since
+  /// SCEV commons expressions across control flow, and you can have cases
+  /// like:
+  ///
+  ///   idx0 = a + b;
+  ///   ptr[idx0] = 100;
+  ///   if (<condition>) {
+  ///     idx1 = a +nsw b;
+  ///     ptr[idx1] = 200;
+  ///   }
+  ///
+  /// where the SCEV expression (+ a b) is guaranteed to not be poison (and
+  /// hence not sign-overflow) only if "<condition>" is true.  Since both
+  /// `idx0` and `idx1` will be mapped to the same SCEV expression, (+ a b),
+  /// it is not okay to annotate (+ a b) with <nsw> in the above example.
+  bool isSCEVExprNeverPoison(const Instruction *I);
+
+  /// This is like \c isSCEVExprNeverPoison but it specifically works for
+  /// instructions that will get mapped to SCEV add recurrences.  Return true
+  /// if \p I will never generate poison under the assumption that \p I is an
+  /// add recurrence on the loop \p L.
+  bool isAddRecNeverPoison(const Instruction *I, const Loop *L);
+
+  /// Similar to createAddRecFromPHI, but with the additional flexibility of
+  /// suggesting runtime overflow checks in case casts are encountered.
+  /// If successful, the analysis records that for this loop, \p SymbolicPHI,
+  /// which is the UnknownSCEV currently representing the PHI, can be rewritten
+  /// into an AddRec, assuming some predicates; The function then returns the
+  /// AddRec and the predicates as a pair, and caches this pair in
+  /// PredicatedSCEVRewrites.
+  /// If the analysis is not successful, a mapping from the \p SymbolicPHI to
+  /// itself (with no predicates) is recorded, and a nullptr with an empty
+  /// predicates vector is returned as a pair.
+  Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+  createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI);
+
+  /// Compute the backedge taken count knowing the interval difference, the
+  /// stride and presence of the equality in the comparison.
+  const SCEV *computeBECount(const SCEV *Delta, const SCEV *Stride,
+                             bool Equality);
+
+  /// Compute the maximum backedge count based on the range of values
+  /// permitted by Start, End, and Stride. This is for loops of the form
+  /// {Start, +, Stride} LT End.
+  ///
+  /// Precondition: the induction variable is known to be positive.  We *don't*
+  /// assert these preconditions so please be careful.
+  const SCEV *computeMaxBECountForLT(const SCEV *Start, const SCEV *Stride,
+                                     const SCEV *End, unsigned BitWidth,
+                                     bool IsSigned);
+
+  /// Verify if an linear IV with positive stride can overflow when in a
+  /// less-than comparison, knowing the invariant term of the comparison,
+  /// the stride and the knowledge of NSW/NUW flags on the recurrence.
+  bool doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
+                          bool NoWrap);
+
+  /// Verify if an linear IV with negative stride can overflow when in a
+  /// greater-than comparison, knowing the invariant term of the comparison,
+  /// the stride and the knowledge of NSW/NUW flags on the recurrence.
+  bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
+                          bool NoWrap);
+
+  /// Get add expr already created or create a new one.
+  const SCEV *getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+                                 SCEV::NoWrapFlags Flags);
+
+  /// Get mul expr already created or create a new one.
+  const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+                                 SCEV::NoWrapFlags Flags);
+
+  /// Find all of the loops transitively used in \p S, and fill \p LoopsUsed.
+  /// A loop is considered "used" by an expression if it contains
+  /// an add rec on said loop.
+  void getUsedLoops(const SCEV *S, SmallPtrSetImpl<const Loop *> &LoopsUsed);
+
+  /// Find all of the loops transitively used in \p S, and update \c LoopUsers
+  /// accordingly.
+  void addToLoopUseLists(const SCEV *S);
+
+  FoldingSet<SCEV> UniqueSCEVs;
+  FoldingSet<SCEVPredicate> UniquePreds;
+  BumpPtrAllocator SCEVAllocator;
+
+  /// This maps loops to a list of SCEV expressions that (transitively) use said
+  /// loop.
+  DenseMap<const Loop *, SmallVector<const SCEV *, 4>> LoopUsers;
+
+  /// Cache tentative mappings from UnknownSCEVs in a Loop, to a SCEV expression
+  /// they can be rewritten into under certain predicates.
+  DenseMap<std::pair<const SCEVUnknown *, const Loop *>,
+           std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+      PredicatedSCEVRewrites;
+
+  /// The head of a linked list of all SCEVUnknown values that have been
+  /// allocated. This is used by releaseMemory to locate them all and call
+  /// their destructors.
+  SCEVUnknown *FirstUnknown = nullptr;
+};
+
+/// Analysis pass that exposes the \c ScalarEvolution for a function.
+class ScalarEvolutionAnalysis
+    : public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
+  friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = ScalarEvolution;
+
+  ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Printer pass for the \c ScalarEvolutionAnalysis results.
+class ScalarEvolutionPrinterPass
+    : public PassInfoMixin<ScalarEvolutionPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class ScalarEvolutionWrapperPass : public FunctionPass {
+  std::unique_ptr<ScalarEvolution> SE;
+
+public:
+  static char ID;
+
+  ScalarEvolutionWrapperPass();
+
+  ScalarEvolution &getSE() { return *SE; }
+  const ScalarEvolution &getSE() const { return *SE; }
+
+  bool runOnFunction(Function &F) override;
+  void releaseMemory() override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void print(raw_ostream &OS, const Module * = nullptr) const override;
+  void verifyAnalysis() const override;
+};
+
+/// An interface layer with SCEV used to manage how we see SCEV expressions
+/// for values in the context of existing predicates. We can add new
+/// predicates, but we cannot remove them.
+///
+/// This layer has multiple purposes:
+///   - provides a simple interface for SCEV versioning.
+///   - guarantees that the order of transformations applied on a SCEV
+///     expression for a single Value is consistent across two different
+///     getSCEV calls. This means that, for example, once we've obtained
+///     an AddRec expression for a certain value through expression
+///     rewriting, we will continue to get an AddRec expression for that
+///     Value.
+///   - lowers the number of expression rewrites.
+class PredicatedScalarEvolution {
+public:
+  PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
+
+  const SCEVUnionPredicate &getUnionPredicate() const;
+
+  /// Returns the SCEV expression of V, in the context of the current SCEV
+  /// predicate.  The order of transformations applied on the expression of V
+  /// returned by ScalarEvolution is guaranteed to be preserved, even when
+  /// adding new predicates.
+  const SCEV *getSCEV(Value *V);
+
+  /// Get the (predicated) backedge count for the analyzed loop.
+  const SCEV *getBackedgeTakenCount();
+
+  /// Adds a new predicate.
+  void addPredicate(const SCEVPredicate &Pred);
+
+  /// Attempts to produce an AddRecExpr for V by adding additional SCEV
+  /// predicates. If we can't transform the expression into an AddRecExpr we
+  /// return nullptr and not add additional SCEV predicates to the current
+  /// context.
+  const SCEVAddRecExpr *getAsAddRec(Value *V);
+
+  /// Proves that V doesn't overflow by adding SCEV predicate.
+  void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+
+  /// Returns true if we've proved that V doesn't wrap by means of a SCEV
+  /// predicate.
+  bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+
+  /// Returns the ScalarEvolution analysis used.
+  ScalarEvolution *getSE() const { return &SE; }
+
+  /// We need to explicitly define the copy constructor because of FlagsMap.
+  PredicatedScalarEvolution(const PredicatedScalarEvolution &);
+
+  /// Print the SCEV mappings done by the Predicated Scalar Evolution.
+  /// The printed text is indented by \p Depth.
+  void print(raw_ostream &OS, unsigned Depth) const;
+
+  /// Check if \p AR1 and \p AR2 are equal, while taking into account
+  /// Equal predicates in Preds.
+  bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1,
+                                const SCEVAddRecExpr *AR2) const;
+
+private:
+  /// Increments the version number of the predicate.  This needs to be called
+  /// every time the SCEV predicate changes.
+  void updateGeneration();
+
+  /// Holds a SCEV and the version number of the SCEV predicate used to
+  /// perform the rewrite of the expression.
+  using RewriteEntry = std::pair<unsigned, const SCEV *>;
+
+  /// Maps a SCEV to the rewrite result of that SCEV at a certain version
+  /// number. If this number doesn't match the current Generation, we will
+  /// need to do a rewrite. To preserve the transformation order of previous
+  /// rewrites, we will rewrite the previous result instead of the original
+  /// SCEV.
+  DenseMap<const SCEV *, RewriteEntry> RewriteMap;
+
+  /// Records what NoWrap flags we've added to a Value *.
+  ValueMap<Value *, SCEVWrapPredicate::IncrementWrapFlags> FlagsMap;
+
+  /// The ScalarEvolution analysis.
+  ScalarEvolution &SE;
+
+  /// The analyzed Loop.
+  const Loop &L;
+
+  /// The SCEVPredicate that forms our context. We will rewrite all
+  /// expressions assuming that this predicate true.
+  SCEVUnionPredicate Preds;
+
+  /// Marks the version of the SCEV predicate used. When rewriting a SCEV
+  /// expression we mark it with the version of the predicate. We use this to
+  /// figure out if the predicate has changed from the last rewrite of the
+  /// SCEV. If so, we need to perform a new rewrite.
+  unsigned Generation = 0;
+
+  /// The backedge taken count.
+  const SCEV *BackedgeCount = nullptr;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_SCALAREVOLUTION_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
new file mode 100644
index 0000000..329be51
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
@@ -0,0 +1,72 @@
+//===- ScalarEvolutionAliasAnalysis.h - SCEV-based AA -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for a SCEV-based alias analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// A simple alias analysis implementation that uses ScalarEvolution to answer
+/// queries.
+class SCEVAAResult : public AAResultBase<SCEVAAResult> {
+  ScalarEvolution &SE;
+
+public:
+  explicit SCEVAAResult(ScalarEvolution &SE) : AAResultBase(), SE(SE) {}
+  SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+private:
+  Value *GetBaseValue(const SCEV *S);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class SCEVAA : public AnalysisInfoMixin<SCEVAA> {
+  friend AnalysisInfoMixin<SCEVAA>;
+  static AnalysisKey Key;
+
+public:
+  typedef SCEVAAResult Result;
+
+  SCEVAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the SCEVAAResult object.
+class SCEVAAWrapperPass : public FunctionPass {
+  std::unique_ptr<SCEVAAResult> Result;
+
+public:
+  static char ID;
+
+  SCEVAAWrapperPass();
+
+  SCEVAAResult &getResult() { return *Result; }
+  const SCEVAAResult &getResult() const { return *Result; }
+
+  bool runOnFunction(Function &F) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// Creates an instance of \c SCEVAAWrapperPass.
+FunctionPass *createSCEVAAWrapperPass();
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h
new file mode 100644
index 0000000..3df04e9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -0,0 +1,397 @@
+//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to generate code from scalar expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+  class TargetTransformInfo;
+
+  /// Return true if the given expression is safe to expand in the sense that
+  /// all materialized values are safe to speculate anywhere their operands are
+  /// defined.
+  bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE);
+
+  /// Return true if the given expression is safe to expand in the sense that
+  /// all materialized values are defined and safe to speculate at the specified
+  /// location and their operands are defined at this location.
+  bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
+                        ScalarEvolution &SE);
+
+  /// This class uses information about analyze scalars to rewrite expressions
+  /// in canonical form.
+  ///
+  /// Clients should create an instance of this class when rewriting is needed,
+  /// and destroy it when finished to allow the release of the associated
+  /// memory.
+  class SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> {
+    ScalarEvolution &SE;
+    const DataLayout &DL;
+
+    // New instructions receive a name to identify them with the current pass.
+    const char* IVName;
+
+    // InsertedExpressions caches Values for reuse, so must track RAUW.
+    DenseMap<std::pair<const SCEV *, Instruction *>, TrackingVH<Value>>
+        InsertedExpressions;
+
+    // InsertedValues only flags inserted instructions so needs no RAUW.
+    DenseSet<AssertingVH<Value>> InsertedValues;
+    DenseSet<AssertingVH<Value>> InsertedPostIncValues;
+
+    /// A memoization of the "relevant" loop for a given SCEV.
+    DenseMap<const SCEV *, const Loop *> RelevantLoops;
+
+    /// Addrecs referring to any of the given loops are expanded in post-inc
+    /// mode. For example, expanding {1,+,1}<L> in post-inc mode returns the add
+    /// instruction that adds one to the phi for {0,+,1}<L>, as opposed to a new
+    /// phi starting at 1. This is only supported in non-canonical mode.
+    PostIncLoopSet PostIncLoops;
+
+    /// When this is non-null, addrecs expanded in the loop it indicates should
+    /// be inserted with increments at IVIncInsertPos.
+    const Loop *IVIncInsertLoop;
+
+    /// When expanding addrecs in the IVIncInsertLoop loop, insert the IV
+    /// increment at this position.
+    Instruction *IVIncInsertPos;
+
+    /// Phis that complete an IV chain. Reuse
+    DenseSet<AssertingVH<PHINode>> ChainedPhis;
+
+    /// When true, expressions are expanded in "canonical" form. In particular,
+    /// addrecs are expanded as arithmetic based on a canonical induction
+    /// variable. When false, expression are expanded in a more literal form.
+    bool CanonicalMode;
+
+    /// When invoked from LSR, the expander is in "strength reduction" mode. The
+    /// only difference is that phi's are only reused if they are already in
+    /// "expanded" form.
+    bool LSRMode;
+
+    typedef IRBuilder<TargetFolder> BuilderType;
+    BuilderType Builder;
+
+    // RAII object that stores the current insertion point and restores it when
+    // the object is destroyed. This includes the debug location.  Duplicated
+    // from InsertPointGuard to add SetInsertPoint() which is used to updated
+    // InsertPointGuards stack when insert points are moved during SCEV
+    // expansion.
+    class SCEVInsertPointGuard {
+      IRBuilderBase &Builder;
+      AssertingVH<BasicBlock> Block;
+      BasicBlock::iterator Point;
+      DebugLoc DbgLoc;
+      SCEVExpander *SE;
+
+      SCEVInsertPointGuard(const SCEVInsertPointGuard &) = delete;
+      SCEVInsertPointGuard &operator=(const SCEVInsertPointGuard &) = delete;
+
+    public:
+      SCEVInsertPointGuard(IRBuilderBase &B, SCEVExpander *SE)
+          : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
+            DbgLoc(B.getCurrentDebugLocation()), SE(SE) {
+        SE->InsertPointGuards.push_back(this);
+      }
+
+      ~SCEVInsertPointGuard() {
+        // These guards should always created/destroyed in FIFO order since they
+        // are used to guard lexically scoped blocks of code in
+        // ScalarEvolutionExpander.
+        assert(SE->InsertPointGuards.back() == this);
+        SE->InsertPointGuards.pop_back();
+        Builder.restoreIP(IRBuilderBase::InsertPoint(Block, Point));
+        Builder.SetCurrentDebugLocation(DbgLoc);
+      }
+
+      BasicBlock::iterator GetInsertPoint() const { return Point; }
+      void SetInsertPoint(BasicBlock::iterator I) { Point = I; }
+    };
+
+    /// Stack of pointers to saved insert points, used to keep insert points
+    /// consistent when instructions are moved.
+    SmallVector<SCEVInsertPointGuard *, 8> InsertPointGuards;
+
+#ifndef NDEBUG
+    const char *DebugType;
+#endif
+
+    friend struct SCEVVisitor<SCEVExpander, Value*>;
+
+  public:
+    /// Construct a SCEVExpander in "canonical" mode.
+    explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
+                          const char *name)
+        : SE(se), DL(DL), IVName(name), IVIncInsertLoop(nullptr),
+          IVIncInsertPos(nullptr), CanonicalMode(true), LSRMode(false),
+          Builder(se.getContext(), TargetFolder(DL)) {
+#ifndef NDEBUG
+      DebugType = "";
+#endif
+    }
+
+    ~SCEVExpander() {
+      // Make sure the insert point guard stack is consistent.
+      assert(InsertPointGuards.empty());
+    }
+
+#ifndef NDEBUG
+    void setDebugType(const char* s) { DebugType = s; }
+#endif
+
+    /// Erase the contents of the InsertedExpressions map so that users trying
+    /// to expand the same expression into multiple BasicBlocks or different
+    /// places within the same BasicBlock can do so.
+    void clear() {
+      InsertedExpressions.clear();
+      InsertedValues.clear();
+      InsertedPostIncValues.clear();
+      ChainedPhis.clear();
+    }
+
+    /// Return true for expressions that may incur non-trivial cost to evaluate
+    /// at runtime.
+    ///
+    /// At is an optional parameter which specifies point in code where user is
+    /// going to expand this expression. Sometimes this knowledge can lead to a
+    /// more accurate cost estimation.
+    bool isHighCostExpansion(const SCEV *Expr, Loop *L,
+                             const Instruction *At = nullptr) {
+      SmallPtrSet<const SCEV *, 8> Processed;
+      return isHighCostExpansionHelper(Expr, L, At, Processed);
+    }
+
+    /// This method returns the canonical induction variable of the specified
+    /// type for the specified loop (inserting one if there is none).  A
+    /// canonical induction variable starts at zero and steps by one on each
+    /// iteration.
+    PHINode *getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty);
+
+    /// Return the induction variable increment's IV operand.
+    Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
+                                 bool allowScale);
+
+    /// Utility for hoisting an IV increment.
+    bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
+
+    /// replace congruent phis with their most canonical representative. Return
+    /// the number of phis eliminated.
+    unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
+                                 SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+                                 const TargetTransformInfo *TTI = nullptr);
+
+    /// Insert code to directly compute the specified SCEV expression into the
+    /// program.  The inserted code is inserted into the specified block.
+    Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I);
+
+    /// Insert code to directly compute the specified SCEV expression into the
+    /// program.  The inserted code is inserted into the SCEVExpander's current
+    /// insertion point. If a type is specified, the result will be expanded to
+    /// have that type, with a cast if necessary.
+    Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
+
+
+    /// Generates a code sequence that evaluates this predicate.  The inserted
+    /// instructions will be at position \p Loc.  The result will be of type i1
+    /// and will have a value of 0 when the predicate is false and 1 otherwise.
+    Value *expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc);
+
+    /// A specialized variant of expandCodeForPredicate, handling the case when
+    /// we are expanding code for a SCEVEqualPredicate.
+    Value *expandEqualPredicate(const SCEVEqualPredicate *Pred,
+                                Instruction *Loc);
+
+    /// Generates code that evaluates if the \p AR expression will overflow.
+    Value *generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc,
+                                 bool Signed);
+
+    /// A specialized variant of expandCodeForPredicate, handling the case when
+    /// we are expanding code for a SCEVWrapPredicate.
+    Value *expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc);
+
+    /// A specialized variant of expandCodeForPredicate, handling the case when
+    /// we are expanding code for a SCEVUnionPredicate.
+    Value *expandUnionPredicate(const SCEVUnionPredicate *Pred,
+                                Instruction *Loc);
+
+    /// Set the current IV increment loop and position.
+    void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
+      assert(!CanonicalMode &&
+             "IV increment positions are not supported in CanonicalMode");
+      IVIncInsertLoop = L;
+      IVIncInsertPos = Pos;
+    }
+
+    /// Enable post-inc expansion for addrecs referring to the given
+    /// loops. Post-inc expansion is only supported in non-canonical mode.
+    void setPostInc(const PostIncLoopSet &L) {
+      assert(!CanonicalMode &&
+             "Post-inc expansion is not supported in CanonicalMode");
+      PostIncLoops = L;
+    }
+
+    /// Disable all post-inc expansion.
+    void clearPostInc() {
+      PostIncLoops.clear();
+
+      // When we change the post-inc loop set, cached expansions may no
+      // longer be valid.
+      InsertedPostIncValues.clear();
+    }
+
+    /// Disable the behavior of expanding expressions in canonical form rather
+    /// than in a more literal form. Non-canonical mode is useful for late
+    /// optimization passes.
+    void disableCanonicalMode() { CanonicalMode = false; }
+
+    void enableLSRMode() { LSRMode = true; }
+
+    /// Set the current insertion point. This is useful if multiple calls to
+    /// expandCodeFor() are going to be made with the same insert point and the
+    /// insert point may be moved during one of the expansions (e.g. if the
+    /// insert point is not a block terminator).
+    void setInsertPoint(Instruction *IP) {
+      assert(IP);
+      Builder.SetInsertPoint(IP);
+    }
+
+    /// Clear the current insertion point. This is useful if the instruction
+    /// that had been serving as the insertion point may have been deleted.
+    void clearInsertPoint() {
+      Builder.ClearInsertionPoint();
+    }
+
+    /// Return true if the specified instruction was inserted by the code
+    /// rewriter.  If so, the client should not modify the instruction.
+    bool isInsertedInstruction(Instruction *I) const {
+      return InsertedValues.count(I) || InsertedPostIncValues.count(I);
+    }
+
+    void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
+
+    /// Try to find existing LLVM IR value for S available at the point At.
+    Value *getExactExistingExpansion(const SCEV *S, const Instruction *At,
+                                     Loop *L);
+
+    /// Try to find the ValueOffsetPair for S. The function is mainly used to
+    /// check whether S can be expanded cheaply.  If this returns a non-None
+    /// value, we know we can codegen the `ValueOffsetPair` into a suitable
+    /// expansion identical with S so that S can be expanded cheaply.
+    ///
+    /// L is a hint which tells in which loop to look for the suitable value.
+    /// On success return value which is equivalent to the expanded S at point
+    /// At. Return nullptr if value was not found.
+    ///
+    /// Note that this function does not perform an exhaustive search. I.e if it
+    /// didn't find any value it does not mean that there is no such value.
+    ///
+    Optional<ScalarEvolution::ValueOffsetPair>
+    getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L);
+
+  private:
+    LLVMContext &getContext() const { return SE.getContext(); }
+
+    /// Recursive helper function for isHighCostExpansion.
+    bool isHighCostExpansionHelper(const SCEV *S, Loop *L,
+                                   const Instruction *At,
+                                   SmallPtrSetImpl<const SCEV *> &Processed);
+
+    /// Insert the specified binary operator, doing a small amount of work to
+    /// avoid inserting an obviously redundant operation.
+    Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
+
+    /// Arrange for there to be a cast of V to Ty at IP, reusing an existing
+    /// cast if a suitable one exists, moving an existing cast if a suitable one
+    /// exists but isn't in the right place, or or creating a new one.
+    Value *ReuseOrCreateCast(Value *V, Type *Ty,
+                             Instruction::CastOps Op,
+                             BasicBlock::iterator IP);
+
+    /// Insert a cast of V to the specified type, which must be possible with a
+    /// noop cast, doing what we can to share the casts.
+    Value *InsertNoopCastOfTo(Value *V, Type *Ty);
+
+    /// Expand a SCEVAddExpr with a pointer type into a GEP instead of using
+    /// ptrtoint+arithmetic+inttoptr.
+    Value *expandAddToGEP(const SCEV *const *op_begin,
+                          const SCEV *const *op_end,
+                          PointerType *PTy, Type *Ty, Value *V);
+
+    /// Find a previous Value in ExprValueMap for expand.
+    ScalarEvolution::ValueOffsetPair
+    FindValueInExprValueMap(const SCEV *S, const Instruction *InsertPt);
+
+    Value *expand(const SCEV *S);
+
+    /// Determine the most "relevant" loop for the given SCEV.
+    const Loop *getRelevantLoop(const SCEV *);
+
+    Value *visitConstant(const SCEVConstant *S) {
+      return S->getValue();
+    }
+
+    Value *visitTruncateExpr(const SCEVTruncateExpr *S);
+
+    Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S);
+
+    Value *visitSignExtendExpr(const SCEVSignExtendExpr *S);
+
+    Value *visitAddExpr(const SCEVAddExpr *S);
+
+    Value *visitMulExpr(const SCEVMulExpr *S);
+
+    Value *visitUDivExpr(const SCEVUDivExpr *S);
+
+    Value *visitAddRecExpr(const SCEVAddRecExpr *S);
+
+    Value *visitSMaxExpr(const SCEVSMaxExpr *S);
+
+    Value *visitUMaxExpr(const SCEVUMaxExpr *S);
+
+    Value *visitUnknown(const SCEVUnknown *S) {
+      return S->getValue();
+    }
+
+    void rememberInstruction(Value *I);
+
+    bool isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+    bool isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+    Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
+    PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
+                                       const Loop *L,
+                                       Type *ExpandTy,
+                                       Type *IntTy,
+                                       Type *&TruncTy,
+                                       bool &InvertStep);
+    Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
+                       Type *ExpandTy, Type *IntTy, bool useSubtract);
+
+    void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
+                        Instruction *Pos, PHINode *LoopPhi);
+
+    void fixupInsertPoints(Instruction *I);
+  };
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpressions.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpressions.h
new file mode 100644
index 0000000..42e7609
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -0,0 +1,762 @@
+//===- llvm/Analysis/ScalarEvolutionExpressions.h - SCEV Exprs --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to represent and build scalar expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+
+class APInt;
+class Constant;
+class ConstantRange;
+class Loop;
+class Type;
+
+  enum SCEVTypes {
+    // These should be ordered in terms of increasing complexity to make the
+    // folders simpler.
+    scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr,
+    scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr,
+    scUnknown, scCouldNotCompute
+  };
+
+  /// This class represents a constant integer value.
+  class SCEVConstant : public SCEV {
+    friend class ScalarEvolution;
+
+    ConstantInt *V;
+
+    SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
+      SCEV(ID, scConstant), V(v) {}
+
+  public:
+    ConstantInt *getValue() const { return V; }
+    const APInt &getAPInt() const { return getValue()->getValue(); }
+
+    Type *getType() const { return V->getType(); }
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scConstant;
+    }
+  };
+
+  /// This is the base class for unary cast operator classes.
+  class SCEVCastExpr : public SCEV {
+  protected:
+    const SCEV *Op;
+    Type *Ty;
+
+    SCEVCastExpr(const FoldingSetNodeIDRef ID,
+                 unsigned SCEVTy, const SCEV *op, Type *ty);
+
+  public:
+    const SCEV *getOperand() const { return Op; }
+    Type *getType() const { return Ty; }
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scTruncate ||
+             S->getSCEVType() == scZeroExtend ||
+             S->getSCEVType() == scSignExtend;
+    }
+  };
+
+  /// This class represents a truncation of an integer value to a
+  /// smaller integer value.
+  class SCEVTruncateExpr : public SCEVCastExpr {
+    friend class ScalarEvolution;
+
+    SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
+                     const SCEV *op, Type *ty);
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scTruncate;
+    }
+  };
+
+  /// This class represents a zero extension of a small integer value
+  /// to a larger integer value.
+  class SCEVZeroExtendExpr : public SCEVCastExpr {
+    friend class ScalarEvolution;
+
+    SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
+                       const SCEV *op, Type *ty);
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scZeroExtend;
+    }
+  };
+
+  /// This class represents a sign extension of a small integer value
+  /// to a larger integer value.
+  class SCEVSignExtendExpr : public SCEVCastExpr {
+    friend class ScalarEvolution;
+
+    SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
+                       const SCEV *op, Type *ty);
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scSignExtend;
+    }
+  };
+
+  /// This node is a base class providing common functionality for
+  /// n'ary operators.
+  class SCEVNAryExpr : public SCEV {
+  protected:
+    // Since SCEVs are immutable, ScalarEvolution allocates operand
+    // arrays with its SCEVAllocator, so this class just needs a simple
+    // pointer rather than a more elaborate vector-like data structure.
+    // This also avoids the need for a non-trivial destructor.
+    const SCEV *const *Operands;
+    size_t NumOperands;
+
+    SCEVNAryExpr(const FoldingSetNodeIDRef ID,
+                 enum SCEVTypes T, const SCEV *const *O, size_t N)
+      : SCEV(ID, T), Operands(O), NumOperands(N) {}
+
+  public:
+    size_t getNumOperands() const { return NumOperands; }
+
+    const SCEV *getOperand(unsigned i) const {
+      assert(i < NumOperands && "Operand index out of range!");
+      return Operands[i];
+    }
+
+    using op_iterator = const SCEV *const *;
+    using op_range = iterator_range<op_iterator>;
+
+    op_iterator op_begin() const { return Operands; }
+    op_iterator op_end() const { return Operands + NumOperands; }
+    op_range operands() const {
+      return make_range(op_begin(), op_end());
+    }
+
+    Type *getType() const { return getOperand(0)->getType(); }
+
+    NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
+      return (NoWrapFlags)(SubclassData & Mask);
+    }
+
+    bool hasNoUnsignedWrap() const {
+      return getNoWrapFlags(FlagNUW) != FlagAnyWrap;
+    }
+
+    bool hasNoSignedWrap() const {
+      return getNoWrapFlags(FlagNSW) != FlagAnyWrap;
+    }
+
+    bool hasNoSelfWrap() const {
+      return getNoWrapFlags(FlagNW) != FlagAnyWrap;
+    }
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scAddExpr ||
+             S->getSCEVType() == scMulExpr ||
+             S->getSCEVType() == scSMaxExpr ||
+             S->getSCEVType() == scUMaxExpr ||
+             S->getSCEVType() == scAddRecExpr;
+    }
+  };
+
+  /// This node is the base class for n'ary commutative operators.
+  class SCEVCommutativeExpr : public SCEVNAryExpr {
+  protected:
+    SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
+                        enum SCEVTypes T, const SCEV *const *O, size_t N)
+      : SCEVNAryExpr(ID, T, O, N) {}
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scAddExpr ||
+             S->getSCEVType() == scMulExpr ||
+             S->getSCEVType() == scSMaxExpr ||
+             S->getSCEVType() == scUMaxExpr;
+    }
+
+    /// Set flags for a non-recurrence without clearing previously set flags.
+    void setNoWrapFlags(NoWrapFlags Flags) {
+      SubclassData |= Flags;
+    }
+  };
+
+  /// This node represents an addition of some number of SCEVs.
+  class SCEVAddExpr : public SCEVCommutativeExpr {
+    friend class ScalarEvolution;
+
+    SCEVAddExpr(const FoldingSetNodeIDRef ID,
+                const SCEV *const *O, size_t N)
+      : SCEVCommutativeExpr(ID, scAddExpr, O, N) {}
+
+  public:
+    Type *getType() const {
+      // Use the type of the last operand, which is likely to be a pointer
+      // type, if there is one. This doesn't usually matter, but it can help
+      // reduce casts when the expressions are expanded.
+      return getOperand(getNumOperands() - 1)->getType();
+    }
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scAddExpr;
+    }
+  };
+
+  /// This node represents multiplication of some number of SCEVs.
+  class SCEVMulExpr : public SCEVCommutativeExpr {
+    friend class ScalarEvolution;
+
+    SCEVMulExpr(const FoldingSetNodeIDRef ID,
+                const SCEV *const *O, size_t N)
+      : SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scMulExpr;
+    }
+  };
+
+  /// This class represents a binary unsigned division operation.
+  class SCEVUDivExpr : public SCEV {
+    friend class ScalarEvolution;
+
+    const SCEV *LHS;
+    const SCEV *RHS;
+
+    SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+      : SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
+
+  public:
+    const SCEV *getLHS() const { return LHS; }
+    const SCEV *getRHS() const { return RHS; }
+
+    Type *getType() const {
+      // In most cases the types of LHS and RHS will be the same, but in some
+      // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
+      // depend on the type for correctness, but handling types carefully can
+      // avoid extra casts in the SCEVExpander. The LHS is more likely to be
+      // a pointer type than the RHS, so use the RHS' type here.
+      return getRHS()->getType();
+    }
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scUDivExpr;
+    }
+  };
+
+  /// This node represents a polynomial recurrence on the trip count
+  /// of the specified loop.  This is the primary focus of the
+  /// ScalarEvolution framework; all the other SCEV subclasses are
+  /// mostly just supporting infrastructure to allow SCEVAddRecExpr
+  /// expressions to be created and analyzed.
+  ///
+  /// All operands of an AddRec are required to be loop invariant.
+  ///
+  class SCEVAddRecExpr : public SCEVNAryExpr {
+    friend class ScalarEvolution;
+
+    const Loop *L;
+
+    SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
+                   const SCEV *const *O, size_t N, const Loop *l)
+      : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
+
+  public:
+    const SCEV *getStart() const { return Operands[0]; }
+    const Loop *getLoop() const { return L; }
+
+    /// Constructs and returns the recurrence indicating how much this
+    /// expression steps by.  If this is a polynomial of degree N, it
+    /// returns a chrec of degree N-1.  We cannot determine whether
+    /// the step recurrence has self-wraparound.
+    const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
+      if (isAffine()) return getOperand(1);
+      return SE.getAddRecExpr(SmallVector<const SCEV *, 3>(op_begin()+1,
+                                                           op_end()),
+                              getLoop(), FlagAnyWrap);
+    }
+
+    /// Return true if this represents an expression A + B*x where A
+    /// and B are loop invariant values.
+    bool isAffine() const {
+      // We know that the start value is invariant.  This expression is thus
+      // affine iff the step is also invariant.
+      return getNumOperands() == 2;
+    }
+
+    /// Return true if this represents an expression A + B*x + C*x^2
+    /// where A, B and C are loop invariant values.  This corresponds
+    /// to an addrec of the form {L,+,M,+,N}
+    bool isQuadratic() const {
+      return getNumOperands() == 3;
+    }
+
+    /// Set flags for a recurrence without clearing any previously set flags.
+    /// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
+    /// to make it easier to propagate flags.
+    void setNoWrapFlags(NoWrapFlags Flags) {
+      if (Flags & (FlagNUW | FlagNSW))
+        Flags = ScalarEvolution::setFlags(Flags, FlagNW);
+      SubclassData |= Flags;
+    }
+
+    /// Return the value of this chain of recurrences at the specified
+    /// iteration number.
+    const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
+
+    /// Return the number of iterations of this loop that produce
+    /// values in the specified constant range.  Another way of
+    /// looking at this is that it returns the first iteration number
+    /// where the value is not in the condition, thus computing the
+    /// exit count.  If the iteration count can't be computed, an
+    /// instance of SCEVCouldNotCompute is returned.
+    const SCEV *getNumIterationsInRange(const ConstantRange &Range,
+                                        ScalarEvolution &SE) const;
+
+    /// Return an expression representing the value of this expression
+    /// one iteration of the loop ahead.
+    const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const;
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scAddRecExpr;
+    }
+  };
+
+  /// This class represents a signed maximum selection.
+  class SCEVSMaxExpr : public SCEVCommutativeExpr {
+    friend class ScalarEvolution;
+
+    SCEVSMaxExpr(const FoldingSetNodeIDRef ID,
+                 const SCEV *const *O, size_t N)
+      : SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
+      // Max never overflows.
+      setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
+    }
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scSMaxExpr;
+    }
+  };
+
+  /// This class represents an unsigned maximum selection.
+  class SCEVUMaxExpr : public SCEVCommutativeExpr {
+    friend class ScalarEvolution;
+
+    SCEVUMaxExpr(const FoldingSetNodeIDRef ID,
+                 const SCEV *const *O, size_t N)
+      : SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
+      // Max never overflows.
+      setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
+    }
+
+  public:
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scUMaxExpr;
+    }
+  };
+
+  /// This means that we are dealing with an entirely unknown SCEV
+  /// value, and only represent it as its LLVM Value.  This is the
+  /// "bottom" value for the analysis.
+  class SCEVUnknown final : public SCEV, private CallbackVH {
+    friend class ScalarEvolution;
+
+    /// The parent ScalarEvolution value. This is used to update the
+    /// parent's maps when the value associated with a SCEVUnknown is
+    /// deleted or RAUW'd.
+    ScalarEvolution *SE;
+
+    /// The next pointer in the linked list of all SCEVUnknown
+    /// instances owned by a ScalarEvolution.
+    SCEVUnknown *Next;
+
+    SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V,
+                ScalarEvolution *se, SCEVUnknown *next) :
+      SCEV(ID, scUnknown), CallbackVH(V), SE(se), Next(next) {}
+
+    // Implement CallbackVH.
+    void deleted() override;
+    void allUsesReplacedWith(Value *New) override;
+
+  public:
+    Value *getValue() const { return getValPtr(); }
+
+    /// @{
+    /// Test whether this is a special constant representing a type
+    /// size, alignment, or field offset in a target-independent
+    /// manner, and hasn't happened to have been folded with other
+    /// operations into something unrecognizable. This is mainly only
+    /// useful for pretty-printing and other situations where it isn't
+    /// absolutely required for these to succeed.
+    bool isSizeOf(Type *&AllocTy) const;
+    bool isAlignOf(Type *&AllocTy) const;
+    bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
+    /// @}
+
+    Type *getType() const { return getValPtr()->getType(); }
+
+    /// Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const SCEV *S) {
+      return S->getSCEVType() == scUnknown;
+    }
+  };
+
+  /// This class defines a simple visitor class that may be used for
+  /// various SCEV analysis purposes.
+  template<typename SC, typename RetVal=void>
+  struct SCEVVisitor {
+    RetVal visit(const SCEV *S) {
+      switch (S->getSCEVType()) {
+      case scConstant:
+        return ((SC*)this)->visitConstant((const SCEVConstant*)S);
+      case scTruncate:
+        return ((SC*)this)->visitTruncateExpr((const SCEVTruncateExpr*)S);
+      case scZeroExtend:
+        return ((SC*)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr*)S);
+      case scSignExtend:
+        return ((SC*)this)->visitSignExtendExpr((const SCEVSignExtendExpr*)S);
+      case scAddExpr:
+        return ((SC*)this)->visitAddExpr((const SCEVAddExpr*)S);
+      case scMulExpr:
+        return ((SC*)this)->visitMulExpr((const SCEVMulExpr*)S);
+      case scUDivExpr:
+        return ((SC*)this)->visitUDivExpr((const SCEVUDivExpr*)S);
+      case scAddRecExpr:
+        return ((SC*)this)->visitAddRecExpr((const SCEVAddRecExpr*)S);
+      case scSMaxExpr:
+        return ((SC*)this)->visitSMaxExpr((const SCEVSMaxExpr*)S);
+      case scUMaxExpr:
+        return ((SC*)this)->visitUMaxExpr((const SCEVUMaxExpr*)S);
+      case scUnknown:
+        return ((SC*)this)->visitUnknown((const SCEVUnknown*)S);
+      case scCouldNotCompute:
+        return ((SC*)this)->visitCouldNotCompute((const SCEVCouldNotCompute*)S);
+      default:
+        llvm_unreachable("Unknown SCEV type!");
+      }
+    }
+
+    RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
+      llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
+    }
+  };
+
+  /// Visit all nodes in the expression tree using worklist traversal.
+  ///
+  /// Visitor implements:
+  ///   // return true to follow this node.
+  ///   bool follow(const SCEV *S);
+  ///   // return true to terminate the search.
+  ///   bool isDone();
+  template<typename SV>
+  class SCEVTraversal {
+    SV &Visitor;
+    SmallVector<const SCEV *, 8> Worklist;
+    SmallPtrSet<const SCEV *, 8> Visited;
+
+    void push(const SCEV *S) {
+      if (Visited.insert(S).second && Visitor.follow(S))
+        Worklist.push_back(S);
+    }
+
+  public:
+    SCEVTraversal(SV& V): Visitor(V) {}
+
+    void visitAll(const SCEV *Root) {
+      push(Root);
+      while (!Worklist.empty() && !Visitor.isDone()) {
+        const SCEV *S = Worklist.pop_back_val();
+
+        switch (S->getSCEVType()) {
+        case scConstant:
+        case scUnknown:
+          break;
+        case scTruncate:
+        case scZeroExtend:
+        case scSignExtend:
+          push(cast<SCEVCastExpr>(S)->getOperand());
+          break;
+        case scAddExpr:
+        case scMulExpr:
+        case scSMaxExpr:
+        case scUMaxExpr:
+        case scAddRecExpr:
+          for (const auto *Op : cast<SCEVNAryExpr>(S)->operands())
+            push(Op);
+          break;
+        case scUDivExpr: {
+          const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+          push(UDiv->getLHS());
+          push(UDiv->getRHS());
+          break;
+        }
+        case scCouldNotCompute:
+          llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+        default:
+          llvm_unreachable("Unknown SCEV kind!");
+        }
+      }
+    }
+  };
+
+  /// Use SCEVTraversal to visit all nodes in the given expression tree.
+  template<typename SV>
+  void visitAll(const SCEV *Root, SV& Visitor) {
+    SCEVTraversal<SV> T(Visitor);
+    T.visitAll(Root);
+  }
+
+  /// Return true if any node in \p Root satisfies the predicate \p Pred.
+  template <typename PredTy>
+  bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
+    struct FindClosure {
+      bool Found = false;
+      PredTy Pred;
+
+      FindClosure(PredTy Pred) : Pred(Pred) {}
+
+      bool follow(const SCEV *S) {
+        if (!Pred(S))
+          return true;
+
+        Found = true;
+        return false;
+      }
+
+      bool isDone() const { return Found; }
+    };
+
+    FindClosure FC(Pred);
+    visitAll(Root, FC);
+    return FC.Found;
+  }
+
+  /// This visitor recursively visits a SCEV expression and re-writes it.
+  /// The result from each visit is cached, so it will return the same
+  /// SCEV for the same input.
+  template<typename SC>
+  class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
+  protected:
+    ScalarEvolution &SE;
+    // Memoize the result of each visit so that we only compute once for
+    // the same input SCEV. This is to avoid redundant computations when
+    // a SCEV is referenced by multiple SCEVs. Without memoization, this
+    // visit algorithm would have exponential time complexity in the worst
+    // case, causing the compiler to hang on certain tests.
+    DenseMap<const SCEV *, const SCEV *> RewriteResults;
+
+  public:
+    SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}
+
+    const SCEV *visit(const SCEV *S) {
+      auto It = RewriteResults.find(S);
+      if (It != RewriteResults.end())
+        return It->second;
+      auto* Visited = SCEVVisitor<SC, const SCEV *>::visit(S);
+      auto Result = RewriteResults.try_emplace(S, Visited);
+      assert(Result.second && "Should insert a new entry");
+      return Result.first->second;
+    }
+
+    const SCEV *visitConstant(const SCEVConstant *Constant) {
+      return Constant;
+    }
+
+    const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
+      const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
+      return Operand == Expr->getOperand()
+                 ? Expr
+                 : SE.getTruncateExpr(Operand, Expr->getType());
+    }
+
+    const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+      const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
+      return Operand == Expr->getOperand()
+                 ? Expr
+                 : SE.getZeroExtendExpr(Operand, Expr->getType());
+    }
+
+    const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+      const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
+      return Operand == Expr->getOperand()
+                 ? Expr
+                 : SE.getSignExtendExpr(Operand, Expr->getType());
+    }
+
+    const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+      SmallVector<const SCEV *, 2> Operands;
+      bool Changed = false;
+      for (auto *Op : Expr->operands()) {
+        Operands.push_back(((SC*)this)->visit(Op));
+        Changed |= Op != Operands.back();
+      }
+      return !Changed ? Expr : SE.getAddExpr(Operands);
+    }
+
+    const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
+      SmallVector<const SCEV *, 2> Operands;
+      bool Changed = false;
+      for (auto *Op : Expr->operands()) {
+        Operands.push_back(((SC*)this)->visit(Op));
+        Changed |= Op != Operands.back();
+      }
+      return !Changed ? Expr : SE.getMulExpr(Operands);
+    }
+
+    const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
+      auto *LHS = ((SC *)this)->visit(Expr->getLHS());
+      auto *RHS = ((SC *)this)->visit(Expr->getRHS());
+      bool Changed = LHS != Expr->getLHS() || RHS != Expr->getRHS();
+      return !Changed ? Expr : SE.getUDivExpr(LHS, RHS);
+    }
+
+    const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+      SmallVector<const SCEV *, 2> Operands;
+      bool Changed = false;
+      for (auto *Op : Expr->operands()) {
+        Operands.push_back(((SC*)this)->visit(Op));
+        Changed |= Op != Operands.back();
+      }
+      return !Changed ? Expr
+                      : SE.getAddRecExpr(Operands, Expr->getLoop(),
+                                         Expr->getNoWrapFlags());
+    }
+
+    const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
+      SmallVector<const SCEV *, 2> Operands;
+      bool Changed = false;
+      for (auto *Op : Expr->operands()) {
+        Operands.push_back(((SC *)this)->visit(Op));
+        Changed |= Op != Operands.back();
+      }
+      return !Changed ? Expr : SE.getSMaxExpr(Operands);
+    }
+
+    const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
+      SmallVector<const SCEV *, 2> Operands;
+      bool Changed = false;
+      for (auto *Op : Expr->operands()) {
+        Operands.push_back(((SC*)this)->visit(Op));
+        Changed |= Op != Operands.back();
+      }
+      return !Changed ? Expr : SE.getUMaxExpr(Operands);
+    }
+
+    const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+      return Expr;
+    }
+
+    const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
+      return Expr;
+    }
+  };
+
+  using ValueToValueMap = DenseMap<const Value *, Value *>;
+
+  /// The SCEVParameterRewriter takes a scalar evolution expression and updates
+  /// the SCEVUnknown components following the Map (Value -> Value).
+  class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
+  public:
+    static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
+                               ValueToValueMap &Map,
+                               bool InterpretConsts = false) {
+      SCEVParameterRewriter Rewriter(SE, Map, InterpretConsts);
+      return Rewriter.visit(Scev);
+    }
+
+    SCEVParameterRewriter(ScalarEvolution &SE, ValueToValueMap &M, bool C)
+      : SCEVRewriteVisitor(SE), Map(M), InterpretConsts(C) {}
+
+    const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+      Value *V = Expr->getValue();
+      if (Map.count(V)) {
+        Value *NV = Map[V];
+        if (InterpretConsts && isa<ConstantInt>(NV))
+          return SE.getConstant(cast<ConstantInt>(NV));
+        return SE.getUnknown(NV);
+      }
+      return Expr;
+    }
+
+  private:
+    ValueToValueMap &Map;
+    bool InterpretConsts;
+  };
+
+  using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;
+
+  /// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
+  /// the Map (Loop -> SCEV) to all AddRecExprs.
+  class SCEVLoopAddRecRewriter
+      : public SCEVRewriteVisitor<SCEVLoopAddRecRewriter> {
+  public:
+    SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
+        : SCEVRewriteVisitor(SE), Map(M) {}
+
+    static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
+                               ScalarEvolution &SE) {
+      SCEVLoopAddRecRewriter Rewriter(SE, Map);
+      return Rewriter.visit(Scev);
+    }
+
+    const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+      SmallVector<const SCEV *, 2> Operands;
+      for (const SCEV *Op : Expr->operands())
+        Operands.push_back(visit(Op));
+
+      const Loop *L = Expr->getLoop();
+      const SCEV *Res = SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
+
+      if (0 == Map.count(L))
+        return Res;
+
+      const SCEVAddRecExpr *Rec = cast<SCEVAddRecExpr>(Res);
+      return Rec->evaluateAtIteration(Map[L], SE);
+    }
+
+  private:
+    LoopToScevMapT &Map;
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionNormalization.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionNormalization.h
new file mode 100644
index 0000000..51c9212
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionNormalization.h
@@ -0,0 +1,69 @@
+//===- llvm/Analysis/ScalarEvolutionNormalization.h - See below -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities for working with "normalized" ScalarEvolution
+// expressions.
+//
+// The following example illustrates post-increment uses and how normalized
+// expressions help.
+//
+//   for (i=0; i!=n; ++i) {
+//     ...
+//   }
+//   use(i);
+//
+// While the expression for most uses of i inside the loop is {0,+,1}<%L>, the
+// expression for the use of i outside the loop is {1,+,1}<%L>, since i is
+// incremented at the end of the loop body. This is inconveient, since it
+// suggests that we need two different induction variables, one that starts
+// at 0 and one that starts at 1. We'd prefer to be able to think of these as
+// the same induction variable, with uses inside the loop using the
+// "pre-incremented" value, and uses after the loop using the
+// "post-incremented" value.
+//
+// Expressions for post-incremented uses are represented as an expression
+// paired with a set of loops for which the expression is in "post-increment"
+// mode (there may be multiple loops).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+
+namespace llvm {
+
+class Loop;
+class ScalarEvolution;
+class SCEV;
+
+typedef SmallPtrSet<const Loop *, 2> PostIncLoopSet;
+
+typedef function_ref<bool(const SCEVAddRecExpr *)> NormalizePredTy;
+
+/// Normalize \p S to be post-increment for all loops present in \p
+/// Loops.
+const SCEV *normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
+                                   ScalarEvolution &SE);
+
+/// Normalize \p S for all add recurrence sub-expressions for which \p
+/// Pred returns true.
+const SCEV *normalizeForPostIncUseIf(const SCEV *S, NormalizePredTy Pred,
+                                     ScalarEvolution &SE);
+
+/// Denormalize \p S to be post-increment for all loops present in \p
+/// Loops.
+const SCEV *denormalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
+                                     ScalarEvolution &SE);
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ScopedNoAliasAA.h b/linux-x64/clang/include/llvm/Analysis/ScopedNoAliasAA.h
new file mode 100644
index 0000000..508968e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -0,0 +1,89 @@
+//===- ScopedNoAliasAA.h - Scoped No-Alias Alias Analysis -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This is the interface for a metadata-based scoped no-alias analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCOPEDNOALIASAA_H
+#define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+
+class Function;
+class MDNode;
+class MemoryLocation;
+
+/// A simple AA result which uses scoped-noalias metadata to answer queries.
+class ScopedNoAliasAAResult : public AAResultBase<ScopedNoAliasAAResult> {
+  friend AAResultBase<ScopedNoAliasAAResult>;
+
+public:
+  /// Handle invalidation events from the new pass manager.
+  ///
+  /// By definition, this result is stateless and so remains valid.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+private:
+  bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class ScopedNoAliasAA : public AnalysisInfoMixin<ScopedNoAliasAA> {
+  friend AnalysisInfoMixin<ScopedNoAliasAA>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = ScopedNoAliasAAResult;
+
+  ScopedNoAliasAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the ScopedNoAliasAAResult object.
+class ScopedNoAliasAAWrapperPass : public ImmutablePass {
+  std::unique_ptr<ScopedNoAliasAAResult> Result;
+
+public:
+  static char ID;
+
+  ScopedNoAliasAAWrapperPass();
+
+  ScopedNoAliasAAResult &getResult() { return *Result; }
+  const ScopedNoAliasAAResult &getResult() const { return *Result; }
+
+  bool doInitialization(Module &M) override;
+  bool doFinalization(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createScopedNoAliasAAWrapperPass - This pass implements metadata-based
+// scoped noalias analysis.
+//
+ImmutablePass *createScopedNoAliasAAWrapperPass();
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_SCOPEDNOALIASAA_H
diff --git a/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h b/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h
new file mode 100644
index 0000000..1b8df03
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h
@@ -0,0 +1,530 @@
+//===- SparsePropagation.h - Sparse Conditional Property Propagation ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an abstract sparse conditional propagation algorithm,
+// modeled after SCCP, but with a customizable lattice function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SPARSEPROPAGATION_H
+#define LLVM_ANALYSIS_SPARSEPROPAGATION_H
+
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/Debug.h"
+#include <set>
+
+#define DEBUG_TYPE "sparseprop"
+
+namespace llvm {
+
+/// A template for translating between LLVM Values and LatticeKeys. Clients must
+/// provide a specialization of LatticeKeyInfo for their LatticeKey type.
+template <class LatticeKey> struct LatticeKeyInfo {
+  // static inline Value *getValueFromLatticeKey(LatticeKey Key);
+  // static inline LatticeKey getLatticeKeyFromValue(Value *V);
+};
+
+template <class LatticeKey, class LatticeVal,
+          class KeyInfo = LatticeKeyInfo<LatticeKey>>
+class SparseSolver;
+
+/// AbstractLatticeFunction - This class is implemented by the dataflow instance
+/// to specify what the lattice values are and how they handle merges etc.  This
+/// gives the client the power to compute lattice values from instructions,
+/// constants, etc.  The current requirement is that lattice values must be
+/// copyable.  At the moment, nothing tries to avoid copying.  Additionally,
+/// lattice keys must be able to be used as keys of a mapping data structure.
+/// Internally, the generic solver currently uses a DenseMap to map lattice keys
+/// to lattice values.  If the lattice key is a non-standard type, a
+/// specialization of DenseMapInfo must be provided.
+template <class LatticeKey, class LatticeVal> class AbstractLatticeFunction {
+private:
+  LatticeVal UndefVal, OverdefinedVal, UntrackedVal;
+
+public:
+  AbstractLatticeFunction(LatticeVal undefVal, LatticeVal overdefinedVal,
+                          LatticeVal untrackedVal) {
+    UndefVal = undefVal;
+    OverdefinedVal = overdefinedVal;
+    UntrackedVal = untrackedVal;
+  }
+
+  virtual ~AbstractLatticeFunction() = default;
+
+  LatticeVal getUndefVal()       const { return UndefVal; }
+  LatticeVal getOverdefinedVal() const { return OverdefinedVal; }
+  LatticeVal getUntrackedVal()   const { return UntrackedVal; }
+
+  /// IsUntrackedValue - If the specified LatticeKey is obviously uninteresting
+  /// to the analysis (i.e., it would always return UntrackedVal), this
+  /// function can return true to avoid pointless work.
+  virtual bool IsUntrackedValue(LatticeKey Key) { return false; }
+
+  /// ComputeLatticeVal - Compute and return a LatticeVal corresponding to the
+  /// given LatticeKey.
+  virtual LatticeVal ComputeLatticeVal(LatticeKey Key) {
+    return getOverdefinedVal();
+  }
+
+  /// IsSpecialCasedPHI - Given a PHI node, determine whether this PHI node is
+  /// one that the we want to handle through ComputeInstructionState.
+  virtual bool IsSpecialCasedPHI(PHINode *PN) { return false; }
+
+  /// MergeValues - Compute and return the merge of the two specified lattice
+  /// values.  Merging should only move one direction down the lattice to
+  /// guarantee convergence (toward overdefined).
+  virtual LatticeVal MergeValues(LatticeVal X, LatticeVal Y) {
+    return getOverdefinedVal(); // always safe, never useful.
+  }
+
+  /// ComputeInstructionState - Compute the LatticeKeys that change as a result
+  /// of executing instruction \p I. Their associated LatticeVals are store in
+  /// \p ChangedValues.
+  virtual void
+  ComputeInstructionState(Instruction &I,
+                          DenseMap<LatticeKey, LatticeVal> &ChangedValues,
+                          SparseSolver<LatticeKey, LatticeVal> &SS) = 0;
+
+  /// PrintLatticeVal - Render the given LatticeVal to the specified stream.
+  virtual void PrintLatticeVal(LatticeVal LV, raw_ostream &OS);
+
+  /// PrintLatticeKey - Render the given LatticeKey to the specified stream.
+  virtual void PrintLatticeKey(LatticeKey Key, raw_ostream &OS);
+
+  /// GetValueFromLatticeVal - If the given LatticeVal is representable as an
+  /// LLVM value, return it; otherwise, return nullptr. If a type is given, the
+  /// returned value must have the same type. This function is used by the
+  /// generic solver in attempting to resolve branch and switch conditions.
+  virtual Value *GetValueFromLatticeVal(LatticeVal LV, Type *Ty = nullptr) {
+    return nullptr;
+  }
+};
+
+/// SparseSolver - This class is a general purpose solver for Sparse Conditional
+/// Propagation with a programmable lattice function.
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+class SparseSolver {
+
+  /// LatticeFunc - This is the object that knows the lattice and how to
+  /// compute transfer functions.
+  AbstractLatticeFunction<LatticeKey, LatticeVal> *LatticeFunc;
+
+  /// ValueState - Holds the LatticeVals associated with LatticeKeys.
+  DenseMap<LatticeKey, LatticeVal> ValueState;
+
+  /// BBExecutable - Holds the basic blocks that are executable.
+  SmallPtrSet<BasicBlock *, 16> BBExecutable;
+
+  /// ValueWorkList - Holds values that should be processed.
+  SmallVector<Value *, 64> ValueWorkList;
+
+  /// BBWorkList - Holds basic blocks that should be processed.
+  SmallVector<BasicBlock *, 64> BBWorkList;
+
+  using Edge = std::pair<BasicBlock *, BasicBlock *>;
+
+  /// KnownFeasibleEdges - Entries in this set are edges which have already had
+  /// PHI nodes retriggered.
+  std::set<Edge> KnownFeasibleEdges;
+
+public:
+  explicit SparseSolver(
+      AbstractLatticeFunction<LatticeKey, LatticeVal> *Lattice)
+      : LatticeFunc(Lattice) {}
+  SparseSolver(const SparseSolver &) = delete;
+  SparseSolver &operator=(const SparseSolver &) = delete;
+
+  /// Solve - Solve for constants and executable blocks.
+  void Solve();
+
+  void Print(raw_ostream &OS) const;
+
+  /// getExistingValueState - Return the LatticeVal object corresponding to the
+  /// given value from the ValueState map. If the value is not in the map,
+  /// UntrackedVal is returned, unlike the getValueState method.
+  LatticeVal getExistingValueState(LatticeKey Key) const {
+    auto I = ValueState.find(Key);
+    return I != ValueState.end() ? I->second : LatticeFunc->getUntrackedVal();
+  }
+
+  /// getValueState - Return the LatticeVal object corresponding to the given
+  /// value from the ValueState map. If the value is not in the map, its state
+  /// is initialized.
+  LatticeVal getValueState(LatticeKey Key);
+
+  /// isEdgeFeasible - Return true if the control flow edge from the 'From'
+  /// basic block to the 'To' basic block is currently feasible.  If
+  /// AggressiveUndef is true, then this treats values with unknown lattice
+  /// values as undefined.  This is generally only useful when solving the
+  /// lattice, not when querying it.
+  bool isEdgeFeasible(BasicBlock *From, BasicBlock *To,
+                      bool AggressiveUndef = false);
+
+  /// isBlockExecutable - Return true if there are any known feasible
+  /// edges into the basic block.  This is generally only useful when
+  /// querying the lattice.
+  bool isBlockExecutable(BasicBlock *BB) const {
+    return BBExecutable.count(BB);
+  }
+
+  /// MarkBlockExecutable - This method can be used by clients to mark all of
+  /// the blocks that are known to be intrinsically live in the processed unit.
+  void MarkBlockExecutable(BasicBlock *BB);
+
+private:
+  /// UpdateState - When the state of some LatticeKey is potentially updated to
+  /// the given LatticeVal, this function notices and adds the LLVM value
+  /// corresponding the key to the work list, if needed.
+  void UpdateState(LatticeKey Key, LatticeVal LV);
+
+  /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
+  /// work list if it is not already executable.
+  void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
+
+  /// getFeasibleSuccessors - Return a vector of booleans to indicate which
+  /// successors are reachable from a given terminator instruction.
+  void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs,
+                             bool AggressiveUndef);
+
+  void visitInst(Instruction &I);
+  void visitPHINode(PHINode &I);
+  void visitTerminatorInst(TerminatorInst &TI);
+};
+
+//===----------------------------------------------------------------------===//
+//                  AbstractLatticeFunction Implementation
+//===----------------------------------------------------------------------===//
+
+template <class LatticeKey, class LatticeVal>
+void AbstractLatticeFunction<LatticeKey, LatticeVal>::PrintLatticeVal(
+    LatticeVal V, raw_ostream &OS) {
+  if (V == UndefVal)
+    OS << "undefined";
+  else if (V == OverdefinedVal)
+    OS << "overdefined";
+  else if (V == UntrackedVal)
+    OS << "untracked";
+  else
+    OS << "unknown lattice value";
+}
+
+template <class LatticeKey, class LatticeVal>
+void AbstractLatticeFunction<LatticeKey, LatticeVal>::PrintLatticeKey(
+    LatticeKey Key, raw_ostream &OS) {
+  OS << "unknown lattice key";
+}
+
+//===----------------------------------------------------------------------===//
+//                          SparseSolver Implementation
+//===----------------------------------------------------------------------===//
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+LatticeVal
+SparseSolver<LatticeKey, LatticeVal, KeyInfo>::getValueState(LatticeKey Key) {
+  auto I = ValueState.find(Key);
+  if (I != ValueState.end())
+    return I->second; // Common case, in the map
+
+  if (LatticeFunc->IsUntrackedValue(Key))
+    return LatticeFunc->getUntrackedVal();
+  LatticeVal LV = LatticeFunc->ComputeLatticeVal(Key);
+
+  // If this value is untracked, don't add it to the map.
+  if (LV == LatticeFunc->getUntrackedVal())
+    return LV;
+  return ValueState[Key] = LV;
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::UpdateState(LatticeKey Key,
+                                                                LatticeVal LV) {
+  auto I = ValueState.find(Key);
+  if (I != ValueState.end() && I->second == LV)
+    return; // No change.
+
+  // Update the state of the given LatticeKey and add its corresponding LLVM
+  // value to the work list.
+  ValueState[Key] = LV;
+  if (Value *V = KeyInfo::getValueFromLatticeKey(Key))
+    ValueWorkList.push_back(V);
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::MarkBlockExecutable(
+    BasicBlock *BB) {
+  if (!BBExecutable.insert(BB).second)
+    return;
+  DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n");
+  BBWorkList.push_back(BB); // Add the block to the work list!
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::markEdgeExecutable(
+    BasicBlock *Source, BasicBlock *Dest) {
+  if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
+    return; // This edge is already known to be executable!
+
+  DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() << " -> "
+               << Dest->getName() << "\n");
+
+  if (BBExecutable.count(Dest)) {
+    // The destination is already executable, but we just made an edge
+    // feasible that wasn't before.  Revisit the PHI nodes in the block
+    // because they have potentially new operands.
+    for (BasicBlock::iterator I = Dest->begin(); isa<PHINode>(I); ++I)
+      visitPHINode(*cast<PHINode>(I));
+  } else {
+    MarkBlockExecutable(Dest);
+  }
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::getFeasibleSuccessors(
+    TerminatorInst &TI, SmallVectorImpl<bool> &Succs, bool AggressiveUndef) {
+  Succs.resize(TI.getNumSuccessors());
+  if (TI.getNumSuccessors() == 0)
+    return;
+
+  if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) {
+    if (BI->isUnconditional()) {
+      Succs[0] = true;
+      return;
+    }
+
+    LatticeVal BCValue;
+    if (AggressiveUndef)
+      BCValue =
+          getValueState(KeyInfo::getLatticeKeyFromValue(BI->getCondition()));
+    else
+      BCValue = getExistingValueState(
+          KeyInfo::getLatticeKeyFromValue(BI->getCondition()));
+
+    if (BCValue == LatticeFunc->getOverdefinedVal() ||
+        BCValue == LatticeFunc->getUntrackedVal()) {
+      // Overdefined condition variables can branch either way.
+      Succs[0] = Succs[1] = true;
+      return;
+    }
+
+    // If undefined, neither is feasible yet.
+    if (BCValue == LatticeFunc->getUndefVal())
+      return;
+
+    Constant *C =
+        dyn_cast_or_null<Constant>(LatticeFunc->GetValueFromLatticeVal(
+            BCValue, BI->getCondition()->getType()));
+    if (!C || !isa<ConstantInt>(C)) {
+      // Non-constant values can go either way.
+      Succs[0] = Succs[1] = true;
+      return;
+    }
+
+    // Constant condition variables mean the branch can only go a single way
+    Succs[C->isNullValue()] = true;
+    return;
+  }
+
+  if (TI.isExceptional()) {
+    Succs.assign(Succs.size(), true);
+    return;
+  }
+
+  if (isa<IndirectBrInst>(TI)) {
+    Succs.assign(Succs.size(), true);
+    return;
+  }
+
+  SwitchInst &SI = cast<SwitchInst>(TI);
+  LatticeVal SCValue;
+  if (AggressiveUndef)
+    SCValue = getValueState(KeyInfo::getLatticeKeyFromValue(SI.getCondition()));
+  else
+    SCValue = getExistingValueState(
+        KeyInfo::getLatticeKeyFromValue(SI.getCondition()));
+
+  if (SCValue == LatticeFunc->getOverdefinedVal() ||
+      SCValue == LatticeFunc->getUntrackedVal()) {
+    // All destinations are executable!
+    Succs.assign(TI.getNumSuccessors(), true);
+    return;
+  }
+
+  // If undefined, neither is feasible yet.
+  if (SCValue == LatticeFunc->getUndefVal())
+    return;
+
+  Constant *C = dyn_cast_or_null<Constant>(LatticeFunc->GetValueFromLatticeVal(
+      SCValue, SI.getCondition()->getType()));
+  if (!C || !isa<ConstantInt>(C)) {
+    // All destinations are executable!
+    Succs.assign(TI.getNumSuccessors(), true);
+    return;
+  }
+  SwitchInst::CaseHandle Case = *SI.findCaseValue(cast<ConstantInt>(C));
+  Succs[Case.getSuccessorIndex()] = true;
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+bool SparseSolver<LatticeKey, LatticeVal, KeyInfo>::isEdgeFeasible(
+    BasicBlock *From, BasicBlock *To, bool AggressiveUndef) {
+  SmallVector<bool, 16> SuccFeasible;
+  TerminatorInst *TI = From->getTerminator();
+  getFeasibleSuccessors(*TI, SuccFeasible, AggressiveUndef);
+
+  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+    if (TI->getSuccessor(i) == To && SuccFeasible[i])
+      return true;
+
+  return false;
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitTerminatorInst(
+    TerminatorInst &TI) {
+  SmallVector<bool, 16> SuccFeasible;
+  getFeasibleSuccessors(TI, SuccFeasible, true);
+
+  BasicBlock *BB = TI.getParent();
+
+  // Mark all feasible successors executable...
+  for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
+    if (SuccFeasible[i])
+      markEdgeExecutable(BB, TI.getSuccessor(i));
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitPHINode(PHINode &PN) {
+  // The lattice function may store more information on a PHINode than could be
+  // computed from its incoming values.  For example, SSI form stores its sigma
+  // functions as PHINodes with a single incoming value.
+  if (LatticeFunc->IsSpecialCasedPHI(&PN)) {
+    DenseMap<LatticeKey, LatticeVal> ChangedValues;
+    LatticeFunc->ComputeInstructionState(PN, ChangedValues, *this);
+    for (auto &ChangedValue : ChangedValues)
+      if (ChangedValue.second != LatticeFunc->getUntrackedVal())
+        UpdateState(ChangedValue.first, ChangedValue.second);
+    return;
+  }
+
+  LatticeKey Key = KeyInfo::getLatticeKeyFromValue(&PN);
+  LatticeVal PNIV = getValueState(Key);
+  LatticeVal Overdefined = LatticeFunc->getOverdefinedVal();
+
+  // If this value is already overdefined (common) just return.
+  if (PNIV == Overdefined || PNIV == LatticeFunc->getUntrackedVal())
+    return; // Quick exit
+
+  // Super-extra-high-degree PHI nodes are unlikely to ever be interesting,
+  // and slow us down a lot.  Just mark them overdefined.
+  if (PN.getNumIncomingValues() > 64) {
+    UpdateState(Key, Overdefined);
+    return;
+  }
+
+  // Look at all of the executable operands of the PHI node.  If any of them
+  // are overdefined, the PHI becomes overdefined as well.  Otherwise, ask the
+  // transfer function to give us the merge of the incoming values.
+  for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
+    // If the edge is not yet known to be feasible, it doesn't impact the PHI.
+    if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent(), true))
+      continue;
+
+    // Merge in this value.
+    LatticeVal OpVal =
+        getValueState(KeyInfo::getLatticeKeyFromValue(PN.getIncomingValue(i)));
+    if (OpVal != PNIV)
+      PNIV = LatticeFunc->MergeValues(PNIV, OpVal);
+
+    if (PNIV == Overdefined)
+      break; // Rest of input values don't matter.
+  }
+
+  // Update the PHI with the compute value, which is the merge of the inputs.
+  UpdateState(Key, PNIV);
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitInst(Instruction &I) {
+  // PHIs are handled by the propagation logic, they are never passed into the
+  // transfer functions.
+  if (PHINode *PN = dyn_cast<PHINode>(&I))
+    return visitPHINode(*PN);
+
+  // Otherwise, ask the transfer function what the result is.  If this is
+  // something that we care about, remember it.
+  DenseMap<LatticeKey, LatticeVal> ChangedValues;
+  LatticeFunc->ComputeInstructionState(I, ChangedValues, *this);
+  for (auto &ChangedValue : ChangedValues)
+    if (ChangedValue.second != LatticeFunc->getUntrackedVal())
+      UpdateState(ChangedValue.first, ChangedValue.second);
+
+  if (TerminatorInst *TI = dyn_cast<TerminatorInst>(&I))
+    visitTerminatorInst(*TI);
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Solve() {
+  // Process the work lists until they are empty!
+  while (!BBWorkList.empty() || !ValueWorkList.empty()) {
+    // Process the value work list.
+    while (!ValueWorkList.empty()) {
+      Value *V = ValueWorkList.back();
+      ValueWorkList.pop_back();
+
+      DEBUG(dbgs() << "\nPopped off V-WL: " << *V << "\n");
+
+      // "V" got into the work list because it made a transition. See if any
+      // users are both live and in need of updating.
+      for (User *U : V->users())
+        if (Instruction *Inst = dyn_cast<Instruction>(U))
+          if (BBExecutable.count(Inst->getParent())) // Inst is executable?
+            visitInst(*Inst);
+    }
+
+    // Process the basic block work list.
+    while (!BBWorkList.empty()) {
+      BasicBlock *BB = BBWorkList.back();
+      BBWorkList.pop_back();
+
+      DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);
+
+      // Notify all instructions in this basic block that they are newly
+      // executable.
+      for (Instruction &I : *BB)
+        visitInst(I);
+    }
+  }
+}
+
+template <class LatticeKey, class LatticeVal, class KeyInfo>
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Print(
+    raw_ostream &OS) const {
+  if (ValueState.empty())
+    return;
+
+  LatticeKey Key;
+  LatticeVal LV;
+
+  OS << "ValueState:\n";
+  for (auto &Entry : ValueState) {
+    std::tie(Key, LV) = Entry;
+    if (LV == LatticeFunc->getUntrackedVal())
+      continue;
+    OS << "\t";
+    LatticeFunc->PrintLatticeVal(LV, OS);
+    OS << ": ";
+    LatticeFunc->PrintLatticeKey(Key, OS);
+    OS << "\n";
+  }
+}
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_ANALYSIS_SPARSEPROPAGATION_H
diff --git a/linux-x64/clang/include/llvm/Analysis/SyntheticCountsUtils.h b/linux-x64/clang/include/llvm/Analysis/SyntheticCountsUtils.h
new file mode 100644
index 0000000..87f4a01
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/SyntheticCountsUtils.h
@@ -0,0 +1,52 @@
+//===- SyntheticCountsUtils.h - utilities for count propagation--*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities for synthetic counts propagation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SYNTHETIC_COUNTS_UTILS_H
+#define LLVM_ANALYSIS_SYNTHETIC_COUNTS_UTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Support/ScaledNumber.h"
+
+namespace llvm {
+
+class CallGraph;
+class Function;
+
+/// Class with methods to propagate synthetic entry counts.
+///
+/// This class is templated on the type of the call graph and designed to work
+/// with the traditional per-module callgraph and the summary callgraphs used in
+/// ThinLTO. This contains only static methods and alias templates.
+template <typename CallGraphType> class SyntheticCountsUtils {
+public:
+  using Scaled64 = ScaledNumber<uint64_t>;
+  using CGT = GraphTraits<CallGraphType>;
+  using NodeRef = typename CGT::NodeRef;
+  using EdgeRef = typename CGT::EdgeRef;
+  using SccTy = std::vector<NodeRef>;
+
+  using GetRelBBFreqTy = function_ref<Optional<Scaled64>(EdgeRef)>;
+  using GetCountTy = function_ref<uint64_t(NodeRef)>;
+  using AddCountTy = function_ref<void(NodeRef, uint64_t)>;
+
+  static void propagate(const CallGraphType &CG, GetRelBBFreqTy GetRelBBFreq,
+                        GetCountTy GetCount, AddCountTy AddCount);
+
+private:
+  static void propagateFromSCC(const SccTy &SCC, GetRelBBFreqTy GetRelBBFreq,
+                               GetCountTy GetCount, AddCountTy AddCount);
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetFolder.h b/linux-x64/clang/include/llvm/Analysis/TargetFolder.h
new file mode 100644
index 0000000..ae75d37
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TargetFolder.h
@@ -0,0 +1,269 @@
+//====- TargetFolder.h - Constant folding helper ---------------*- C++ -*-====//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TargetFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for creating constants with
+// target dependent folding, in addition to the same target-independent
+// folding that the ConstantFolder class provides.  For general constant
+// creation and folding, use ConstantExpr and the routines in
+// llvm/Analysis/ConstantFolding.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETFOLDER_H
+#define LLVM_ANALYSIS_TARGETFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+
+class DataLayout;
+
+/// TargetFolder - Create constants with target dependent folding.
+class TargetFolder {
+  const DataLayout &DL;
+
+  /// Fold - Fold the constant using target specific information.
+  Constant *Fold(Constant *C) const {
+    if (Constant *CF = ConstantFoldConstant(C, DL))
+      return CF;
+    return C;
+  }
+
+public:
+  explicit TargetFolder(const DataLayout &DL) : DL(DL) {}
+
+  //===--------------------------------------------------------------------===//
+  // Binary Operators
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateAdd(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return Fold(ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW));
+  }
+  Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getFAdd(LHS, RHS));
+  }
+  Constant *CreateSub(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return Fold(ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW));
+  }
+  Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getFSub(LHS, RHS));
+  }
+  Constant *CreateMul(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return Fold(ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW));
+  }
+  Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getFMul(LHS, RHS));
+  }
+  Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
+    return Fold(ConstantExpr::getUDiv(LHS, RHS, isExact));
+  }
+  Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
+    return Fold(ConstantExpr::getSDiv(LHS, RHS, isExact));
+  }
+  Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getFDiv(LHS, RHS));
+  }
+  Constant *CreateURem(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getURem(LHS, RHS));
+  }
+  Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getSRem(LHS, RHS));
+  }
+  Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getFRem(LHS, RHS));
+  }
+  Constant *CreateShl(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return Fold(ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW));
+  }
+  Constant *CreateLShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
+    return Fold(ConstantExpr::getLShr(LHS, RHS, isExact));
+  }
+  Constant *CreateAShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
+    return Fold(ConstantExpr::getAShr(LHS, RHS, isExact));
+  }
+  Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getAnd(LHS, RHS));
+  }
+  Constant *CreateOr(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getOr(LHS, RHS));
+  }
+  Constant *CreateXor(Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::getXor(LHS, RHS));
+  }
+
+  Constant *CreateBinOp(Instruction::BinaryOps Opc,
+                        Constant *LHS, Constant *RHS) const {
+    return Fold(ConstantExpr::get(Opc, LHS, RHS));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Unary Operators
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateNeg(Constant *C,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return Fold(ConstantExpr::getNeg(C, HasNUW, HasNSW));
+  }
+  Constant *CreateFNeg(Constant *C) const {
+    return Fold(ConstantExpr::getFNeg(C));
+  }
+  Constant *CreateNot(Constant *C) const {
+    return Fold(ConstantExpr::getNot(C));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Memory Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+                                ArrayRef<Constant *> IdxList) const {
+    return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
+  }
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return Fold(ConstantExpr::getGetElementPtr(Ty, C, Idx));
+  }
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+                                ArrayRef<Value *> IdxList) const {
+    return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
+  }
+
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        ArrayRef<Constant *> IdxList) const {
+    return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
+  }
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        Constant *Idx) const {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx));
+  }
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        ArrayRef<Value *> IdxList) const {
+    return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Cast/Conversion Operators
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateCast(Instruction::CastOps Op, Constant *C,
+                       Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getCast(Op, C, DestTy));
+  }
+  Constant *CreateIntCast(Constant *C, Type *DestTy,
+                          bool isSigned) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
+  }
+  Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getPointerCast(C, DestTy));
+  }
+  Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getFPCast(C, DestTy));
+  }
+  Constant *CreateBitCast(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::BitCast, C, DestTy);
+  }
+  Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::IntToPtr, C, DestTy);
+  }
+  Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::PtrToInt, C, DestTy);
+  }
+  Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getZExtOrBitCast(C, DestTy));
+  }
+  Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getSExtOrBitCast(C, DestTy));
+  }
+  Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy));
+  }
+
+  Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+                                                Type *DestTy) const {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Compare Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+                       Constant *RHS) const {
+    return Fold(ConstantExpr::getCompare(P, LHS, RHS));
+  }
+  Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+                       Constant *RHS) const {
+    return Fold(ConstantExpr::getCompare(P, LHS, RHS));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Other Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+    return Fold(ConstantExpr::getSelect(C, True, False));
+  }
+
+  Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+    return Fold(ConstantExpr::getExtractElement(Vec, Idx));
+  }
+
+  Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
+                                Constant *Idx) const {
+    return Fold(ConstantExpr::getInsertElement(Vec, NewElt, Idx));
+  }
+
+  Constant *CreateShuffleVector(Constant *V1, Constant *V2,
+                                Constant *Mask) const {
+    return Fold(ConstantExpr::getShuffleVector(V1, V2, Mask));
+  }
+
+  Constant *CreateExtractValue(Constant *Agg,
+                               ArrayRef<unsigned> IdxList) const {
+    return Fold(ConstantExpr::getExtractValue(Agg, IdxList));
+  }
+
+  Constant *CreateInsertValue(Constant *Agg, Constant *Val,
+                              ArrayRef<unsigned> IdxList) const {
+    return Fold(ConstantExpr::getInsertValue(Agg, Val, IdxList));
+  }
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def b/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def
new file mode 100644
index 0000000..a461ed8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def
@@ -0,0 +1,1264 @@
+//===-- TargetLibraryInfo.def - Library information -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// This .def file will either fill in the enum definition or fill in the
+// string representation array definition for TargetLibraryInfo.
+// Which is defined depends on whether TLI_DEFINE_ENUM is defined or
+// TLI_DEFINE_STRING is defined. Only one should be defined at a time.
+
+#if !(defined(TLI_DEFINE_ENUM) || defined(TLI_DEFINE_STRING))
+#error "Must define TLI_DEFINE_ENUM or TLI_DEFINE_STRING for TLI .def."
+#elif defined(TLI_DEFINE_ENUM) && defined(TLI_DEFINE_STRING)
+#error "Can only define one of TLI_DEFINE_ENUM or TLI_DEFINE_STRING at a time."
+#else
+// One of TLI_DEFINE_ENUM/STRING are defined.
+
+#if defined(TLI_DEFINE_ENUM)
+#define TLI_DEFINE_ENUM_INTERNAL(enum_variant) LibFunc_##enum_variant,
+#define TLI_DEFINE_STRING_INTERNAL(string_repr)
+#else
+#define TLI_DEFINE_ENUM_INTERNAL(enum_variant)
+#define TLI_DEFINE_STRING_INTERNAL(string_repr) string_repr,
+#endif
+
+/// void *new(unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_int)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPAXI@Z")
+
+/// void *new(unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_int_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPAXIABUnothrow_t@std@@@Z")
+
+/// void *new(unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_longlong)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPEAX_K@Z")
+
+/// void *new(unsigned long long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_longlong_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPEAX_KAEBUnothrow_t@std@@@Z")
+
+/// void operator delete(void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPAX@Z")
+
+/// void operator delete(void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPAXABUnothrow_t@std@@@Z")
+
+/// void operator delete(void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32_int)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPAXI@Z")
+
+/// void operator delete(void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAX@Z")
+
+/// void operator delete(void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAXAEBUnothrow_t@std@@@Z")
+
+/// void operator delete(void*, unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64_longlong)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAX_K@Z")
+
+/// void *new[](unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_int)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPAXI@Z")
+
+/// void *new[](unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_int_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPAXIABUnothrow_t@std@@@Z")
+
+/// void *new[](unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_longlong)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPEAX_K@Z")
+
+/// void *new[](unsigned long long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_longlong_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPEAX_KAEBUnothrow_t@std@@@Z")
+
+/// void operator delete[](void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAX@Z")
+
+/// void operator delete[](void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAXABUnothrow_t@std@@@Z")
+
+/// void operator delete[](void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32_int)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAXI@Z")
+
+/// void operator delete[](void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAX@Z")
+
+/// void operator delete[](void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAXAEBUnothrow_t@std@@@Z")
+
+/// void operator delete[](void*, unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64_longlong)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAX_K@Z")
+
+/// int _IO_getc(_IO_FILE * __fp);
+TLI_DEFINE_ENUM_INTERNAL(under_IO_getc)
+TLI_DEFINE_STRING_INTERNAL("_IO_getc")
+/// int _IO_putc(int __c, _IO_FILE * __fp);
+TLI_DEFINE_ENUM_INTERNAL(under_IO_putc)
+TLI_DEFINE_STRING_INTERNAL("_IO_putc")
+/// void operator delete[](void*);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPv)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPv")
+/// void operator delete[](void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvRKSt9nothrow_t")
+/// void operator delete[](void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvj)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvj")
+/// void operator delete[](void*, unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvm)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvm")
+/// void operator delete(void*);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPv)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPv")
+/// void operator delete(void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvRKSt9nothrow_t")
+/// void operator delete(void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvj)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvj")
+/// void operator delete(void*, unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvm)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvm")
+/// void *new[](unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(Znaj)
+TLI_DEFINE_STRING_INTERNAL("_Znaj")
+/// void *new[](unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnajRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnajRKSt9nothrow_t")
+/// void *new[](unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(Znam)
+TLI_DEFINE_STRING_INTERNAL("_Znam")
+/// void *new[](unsigned long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnamRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnamRKSt9nothrow_t")
+/// void *new(unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(Znwj)
+TLI_DEFINE_STRING_INTERNAL("_Znwj")
+/// void *new(unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnwjRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwjRKSt9nothrow_t")
+/// void *new(unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(Znwm)
+TLI_DEFINE_STRING_INTERNAL("_Znwm")
+/// void *new(unsigned long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnwmRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwmRKSt9nothrow_t")
+/// double __acos_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(acos_finite)
+TLI_DEFINE_STRING_INTERNAL("__acos_finite")
+/// float __acosf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(acosf_finite)
+TLI_DEFINE_STRING_INTERNAL("__acosf_finite")
+/// double __acosh_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(acosh_finite)
+TLI_DEFINE_STRING_INTERNAL("__acosh_finite")
+/// float __acoshf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(acoshf_finite)
+TLI_DEFINE_STRING_INTERNAL("__acoshf_finite")
+/// long double __acoshl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(acoshl_finite)
+TLI_DEFINE_STRING_INTERNAL("__acoshl_finite")
+/// long double __acosl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(acosl_finite)
+TLI_DEFINE_STRING_INTERNAL("__acosl_finite")
+/// double __asin_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(asin_finite)
+TLI_DEFINE_STRING_INTERNAL("__asin_finite")
+/// float __asinf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(asinf_finite)
+TLI_DEFINE_STRING_INTERNAL("__asinf_finite")
+/// long double __asinl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(asinl_finite)
+TLI_DEFINE_STRING_INTERNAL("__asinl_finite")
+/// double atan2_finite(double y, double x);
+TLI_DEFINE_ENUM_INTERNAL(atan2_finite)
+TLI_DEFINE_STRING_INTERNAL("__atan2_finite")
+/// float atan2f_finite(float y, float x);
+TLI_DEFINE_ENUM_INTERNAL(atan2f_finite)
+TLI_DEFINE_STRING_INTERNAL("__atan2f_finite")
+/// long double atan2l_finite(long double y, long double x);
+TLI_DEFINE_ENUM_INTERNAL(atan2l_finite)
+TLI_DEFINE_STRING_INTERNAL("__atan2l_finite")
+/// double __atanh_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(atanh_finite)
+TLI_DEFINE_STRING_INTERNAL("__atanh_finite")
+/// float __atanhf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(atanhf_finite)
+TLI_DEFINE_STRING_INTERNAL("__atanhf_finite")
+/// long double __atanhl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(atanhl_finite)
+TLI_DEFINE_STRING_INTERNAL("__atanhl_finite")
+/// double __cosh_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(cosh_finite)
+TLI_DEFINE_STRING_INTERNAL("__cosh_finite")
+/// float __coshf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(coshf_finite)
+TLI_DEFINE_STRING_INTERNAL("__coshf_finite")
+/// long double __coshl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(coshl_finite)
+TLI_DEFINE_STRING_INTERNAL("__coshl_finite")
+/// double __cospi(double x);
+TLI_DEFINE_ENUM_INTERNAL(cospi)
+TLI_DEFINE_STRING_INTERNAL("__cospi")
+/// float __cospif(float x);
+TLI_DEFINE_ENUM_INTERNAL(cospif)
+TLI_DEFINE_STRING_INTERNAL("__cospif")
+/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
+TLI_DEFINE_ENUM_INTERNAL(cxa_atexit)
+TLI_DEFINE_STRING_INTERNAL("__cxa_atexit")
+/// void __cxa_guard_abort(guard_t *guard);
+/// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
+TLI_DEFINE_ENUM_INTERNAL(cxa_guard_abort)
+TLI_DEFINE_STRING_INTERNAL("__cxa_guard_abort")
+/// int __cxa_guard_acquire(guard_t *guard);
+TLI_DEFINE_ENUM_INTERNAL(cxa_guard_acquire)
+TLI_DEFINE_STRING_INTERNAL("__cxa_guard_acquire")
+/// void __cxa_guard_release(guard_t *guard);
+TLI_DEFINE_ENUM_INTERNAL(cxa_guard_release)
+TLI_DEFINE_STRING_INTERNAL("__cxa_guard_release")
+/// double __exp10_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp10_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp10_finite")
+/// float __exp10f_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(exp10f_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp10f_finite")
+/// long double __exp10l_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(exp10l_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp10l_finite")
+/// double __exp2_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp2_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp2_finite")
+/// float __exp2f_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(exp2f_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp2f_finite")
+/// long double __exp2l_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(exp2l_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp2l_finite")
+/// double __exp_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp_finite)
+TLI_DEFINE_STRING_INTERNAL("__exp_finite")
+/// float __expf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(expf_finite)
+TLI_DEFINE_STRING_INTERNAL("__expf_finite")
+/// long double __expl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(expl_finite)
+TLI_DEFINE_STRING_INTERNAL("__expl_finite")
+/// int __isoc99_scanf (const char *format, ...)
+TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_scanf)
+TLI_DEFINE_STRING_INTERNAL("__isoc99_scanf")
+/// int __isoc99_sscanf(const char *s, const char *format, ...)
+TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_sscanf)
+TLI_DEFINE_STRING_INTERNAL("__isoc99_sscanf")
+/// double __log10_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(log10_finite)
+TLI_DEFINE_STRING_INTERNAL("__log10_finite")
+/// float __log10f_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(log10f_finite)
+TLI_DEFINE_STRING_INTERNAL("__log10f_finite")
+/// long double __log10l_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log10l_finite)
+TLI_DEFINE_STRING_INTERNAL("__log10l_finite")
+/// double __log2_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(log2_finite)
+TLI_DEFINE_STRING_INTERNAL("__log2_finite")
+/// float __log2f_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(log2f_finite)
+TLI_DEFINE_STRING_INTERNAL("__log2f_finite")
+/// long double __log2l_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log2l_finite)
+TLI_DEFINE_STRING_INTERNAL("__log2l_finite")
+/// double __log_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(log_finite)
+TLI_DEFINE_STRING_INTERNAL("__log_finite")
+/// float __logf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(logf_finite)
+TLI_DEFINE_STRING_INTERNAL("__logf_finite")
+/// long double __logl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(logl_finite)
+TLI_DEFINE_STRING_INTERNAL("__logl_finite")
+/// void *__memcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(memcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__memcpy_chk")
+/// void *__memmove_chk(void *s1, const void *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(memmove_chk)
+TLI_DEFINE_STRING_INTERNAL("__memmove_chk")
+/// void *__memset_chk(void *s, char v, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(memset_chk)
+TLI_DEFINE_STRING_INTERNAL("__memset_chk")
+
+// int __nvvm_reflect(const char *)
+TLI_DEFINE_ENUM_INTERNAL(nvvm_reflect)
+TLI_DEFINE_STRING_INTERNAL("__nvvm_reflect")
+/// double __pow_finite(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(pow_finite)
+TLI_DEFINE_STRING_INTERNAL("__pow_finite")
+/// float _powf_finite(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(powf_finite)
+TLI_DEFINE_STRING_INTERNAL("__powf_finite")
+/// long double __powl_finite(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(powl_finite)
+TLI_DEFINE_STRING_INTERNAL("__powl_finite")
+/// double __sincospi_stret(double x);
+TLI_DEFINE_ENUM_INTERNAL(sincospi_stret)
+TLI_DEFINE_STRING_INTERNAL("__sincospi_stret")
+/// float __sincospif_stret(float x);
+TLI_DEFINE_ENUM_INTERNAL(sincospif_stret)
+TLI_DEFINE_STRING_INTERNAL("__sincospif_stret")
+/// double __sinh_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(sinh_finite)
+TLI_DEFINE_STRING_INTERNAL("__sinh_finite")
+/// float _sinhf_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinhf_finite)
+TLI_DEFINE_STRING_INTERNAL("__sinhf_finite")
+/// long double __sinhl_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sinhl_finite)
+TLI_DEFINE_STRING_INTERNAL("__sinhl_finite")
+/// double __sinpi(double x);
+TLI_DEFINE_ENUM_INTERNAL(sinpi)
+TLI_DEFINE_STRING_INTERNAL("__sinpi")
+/// float __sinpif(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinpif)
+TLI_DEFINE_STRING_INTERNAL("__sinpif")
+/// double __sqrt_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrt_finite)
+TLI_DEFINE_STRING_INTERNAL("__sqrt_finite")
+/// float __sqrt_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtf_finite)
+TLI_DEFINE_STRING_INTERNAL("__sqrtf_finite")
+/// long double __sqrt_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtl_finite)
+TLI_DEFINE_STRING_INTERNAL("__sqrtl_finite")
+/// char *__stpcpy_chk(char *s1, const char *s2, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(stpcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__stpcpy_chk")
+/// char *__stpncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(stpncpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__stpncpy_chk")
+/// char *__strcpy_chk(char *s1, const char *s2, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(strcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__strcpy_chk")
+/// char * __strdup(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(dunder_strdup)
+TLI_DEFINE_STRING_INTERNAL("__strdup")
+/// char *__strncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(strncpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__strncpy_chk")
+/// char *__strndup(const char *s, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(dunder_strndup)
+TLI_DEFINE_STRING_INTERNAL("__strndup")
+/// char * __strtok_r(char *s, const char *delim, char **save_ptr);
+TLI_DEFINE_ENUM_INTERNAL(dunder_strtok_r)
+TLI_DEFINE_STRING_INTERNAL("__strtok_r")
+/// int abs(int j);
+TLI_DEFINE_ENUM_INTERNAL(abs)
+TLI_DEFINE_STRING_INTERNAL("abs")
+/// int access(const char *path, int amode);
+TLI_DEFINE_ENUM_INTERNAL(access)
+TLI_DEFINE_STRING_INTERNAL("access")
+/// double acos(double x);
+TLI_DEFINE_ENUM_INTERNAL(acos)
+TLI_DEFINE_STRING_INTERNAL("acos")
+/// float acosf(float x);
+TLI_DEFINE_ENUM_INTERNAL(acosf)
+TLI_DEFINE_STRING_INTERNAL("acosf")
+/// double acosh(double x);
+TLI_DEFINE_ENUM_INTERNAL(acosh)
+TLI_DEFINE_STRING_INTERNAL("acosh")
+/// float acoshf(float x);
+TLI_DEFINE_ENUM_INTERNAL(acoshf)
+TLI_DEFINE_STRING_INTERNAL("acoshf")
+/// long double acoshl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(acoshl)
+TLI_DEFINE_STRING_INTERNAL("acoshl")
+/// long double acosl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(acosl)
+TLI_DEFINE_STRING_INTERNAL("acosl")
+/// double asin(double x);
+TLI_DEFINE_ENUM_INTERNAL(asin)
+TLI_DEFINE_STRING_INTERNAL("asin")
+/// float asinf(float x);
+TLI_DEFINE_ENUM_INTERNAL(asinf)
+TLI_DEFINE_STRING_INTERNAL("asinf")
+/// double asinh(double x);
+TLI_DEFINE_ENUM_INTERNAL(asinh)
+TLI_DEFINE_STRING_INTERNAL("asinh")
+/// float asinhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(asinhf)
+TLI_DEFINE_STRING_INTERNAL("asinhf")
+/// long double asinhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(asinhl)
+TLI_DEFINE_STRING_INTERNAL("asinhl")
+/// long double asinl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(asinl)
+TLI_DEFINE_STRING_INTERNAL("asinl")
+/// double atan(double x);
+TLI_DEFINE_ENUM_INTERNAL(atan)
+TLI_DEFINE_STRING_INTERNAL("atan")
+/// double atan2(double y, double x);
+TLI_DEFINE_ENUM_INTERNAL(atan2)
+TLI_DEFINE_STRING_INTERNAL("atan2")
+/// float atan2f(float y, float x);
+TLI_DEFINE_ENUM_INTERNAL(atan2f)
+TLI_DEFINE_STRING_INTERNAL("atan2f")
+/// long double atan2l(long double y, long double x);
+TLI_DEFINE_ENUM_INTERNAL(atan2l)
+TLI_DEFINE_STRING_INTERNAL("atan2l")
+/// float atanf(float x);
+TLI_DEFINE_ENUM_INTERNAL(atanf)
+TLI_DEFINE_STRING_INTERNAL("atanf")
+/// double atanh(double x);
+TLI_DEFINE_ENUM_INTERNAL(atanh)
+TLI_DEFINE_STRING_INTERNAL("atanh")
+/// float atanhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(atanhf)
+TLI_DEFINE_STRING_INTERNAL("atanhf")
+/// long double atanhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(atanhl)
+TLI_DEFINE_STRING_INTERNAL("atanhl")
+/// long double atanl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(atanl)
+TLI_DEFINE_STRING_INTERNAL("atanl")
+/// double atof(const char *str);
+TLI_DEFINE_ENUM_INTERNAL(atof)
+TLI_DEFINE_STRING_INTERNAL("atof")
+/// int atoi(const char *str);
+TLI_DEFINE_ENUM_INTERNAL(atoi)
+TLI_DEFINE_STRING_INTERNAL("atoi")
+/// long atol(const char *str);
+TLI_DEFINE_ENUM_INTERNAL(atol)
+TLI_DEFINE_STRING_INTERNAL("atol")
+/// long long atoll(const char *nptr);
+TLI_DEFINE_ENUM_INTERNAL(atoll)
+TLI_DEFINE_STRING_INTERNAL("atoll")
+/// int bcmp(const void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(bcmp)
+TLI_DEFINE_STRING_INTERNAL("bcmp")
+/// void bcopy(const void *s1, void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(bcopy)
+TLI_DEFINE_STRING_INTERNAL("bcopy")
+/// void bzero(void *s, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(bzero)
+TLI_DEFINE_STRING_INTERNAL("bzero")
+/// double cabs(double complex z)
+TLI_DEFINE_ENUM_INTERNAL(cabs)
+TLI_DEFINE_STRING_INTERNAL("cabs")
+/// float cabs(float complex z)
+TLI_DEFINE_ENUM_INTERNAL(cabsf)
+TLI_DEFINE_STRING_INTERNAL("cabsf")
+/// long double cabs(long double complex z)
+TLI_DEFINE_ENUM_INTERNAL(cabsl)
+TLI_DEFINE_STRING_INTERNAL("cabsl")
+/// void *calloc(size_t count, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(calloc)
+TLI_DEFINE_STRING_INTERNAL("calloc")
+/// double cbrt(double x);
+TLI_DEFINE_ENUM_INTERNAL(cbrt)
+TLI_DEFINE_STRING_INTERNAL("cbrt")
+/// float cbrtf(float x);
+TLI_DEFINE_ENUM_INTERNAL(cbrtf)
+TLI_DEFINE_STRING_INTERNAL("cbrtf")
+/// long double cbrtl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(cbrtl)
+TLI_DEFINE_STRING_INTERNAL("cbrtl")
+/// double ceil(double x);
+TLI_DEFINE_ENUM_INTERNAL(ceil)
+TLI_DEFINE_STRING_INTERNAL("ceil")
+/// float ceilf(float x);
+TLI_DEFINE_ENUM_INTERNAL(ceilf)
+TLI_DEFINE_STRING_INTERNAL("ceilf")
+/// long double ceill(long double x);
+TLI_DEFINE_ENUM_INTERNAL(ceill)
+TLI_DEFINE_STRING_INTERNAL("ceill")
+/// int chmod(const char *path, mode_t mode);
+TLI_DEFINE_ENUM_INTERNAL(chmod)
+TLI_DEFINE_STRING_INTERNAL("chmod")
+/// int chown(const char *path, uid_t owner, gid_t group);
+TLI_DEFINE_ENUM_INTERNAL(chown)
+TLI_DEFINE_STRING_INTERNAL("chown")
+/// void clearerr(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(clearerr)
+TLI_DEFINE_STRING_INTERNAL("clearerr")
+/// int closedir(DIR *dirp);
+TLI_DEFINE_ENUM_INTERNAL(closedir)
+TLI_DEFINE_STRING_INTERNAL("closedir")
+/// double copysign(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(copysign)
+TLI_DEFINE_STRING_INTERNAL("copysign")
+/// float copysignf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(copysignf)
+TLI_DEFINE_STRING_INTERNAL("copysignf")
+/// long double copysignl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(copysignl)
+TLI_DEFINE_STRING_INTERNAL("copysignl")
+/// double cos(double x);
+TLI_DEFINE_ENUM_INTERNAL(cos)
+TLI_DEFINE_STRING_INTERNAL("cos")
+/// float cosf(float x);
+TLI_DEFINE_ENUM_INTERNAL(cosf)
+TLI_DEFINE_STRING_INTERNAL("cosf")
+/// double cosh(double x);
+TLI_DEFINE_ENUM_INTERNAL(cosh)
+TLI_DEFINE_STRING_INTERNAL("cosh")
+/// float coshf(float x);
+TLI_DEFINE_ENUM_INTERNAL(coshf)
+TLI_DEFINE_STRING_INTERNAL("coshf")
+/// long double coshl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(coshl)
+TLI_DEFINE_STRING_INTERNAL("coshl")
+/// long double cosl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(cosl)
+TLI_DEFINE_STRING_INTERNAL("cosl")
+/// char *ctermid(char *s);
+TLI_DEFINE_ENUM_INTERNAL(ctermid)
+TLI_DEFINE_STRING_INTERNAL("ctermid")
+/// double exp(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp)
+TLI_DEFINE_STRING_INTERNAL("exp")
+/// double exp10(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp10)
+TLI_DEFINE_STRING_INTERNAL("exp10")
+/// float exp10f(float x);
+TLI_DEFINE_ENUM_INTERNAL(exp10f)
+TLI_DEFINE_STRING_INTERNAL("exp10f")
+/// long double exp10l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(exp10l)
+TLI_DEFINE_STRING_INTERNAL("exp10l")
+/// double exp2(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp2)
+TLI_DEFINE_STRING_INTERNAL("exp2")
+/// float exp2f(float x);
+TLI_DEFINE_ENUM_INTERNAL(exp2f)
+TLI_DEFINE_STRING_INTERNAL("exp2f")
+/// long double exp2l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(exp2l)
+TLI_DEFINE_STRING_INTERNAL("exp2l")
+/// float expf(float x);
+TLI_DEFINE_ENUM_INTERNAL(expf)
+TLI_DEFINE_STRING_INTERNAL("expf")
+/// long double expl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(expl)
+TLI_DEFINE_STRING_INTERNAL("expl")
+/// double expm1(double x);
+TLI_DEFINE_ENUM_INTERNAL(expm1)
+TLI_DEFINE_STRING_INTERNAL("expm1")
+/// float expm1f(float x);
+TLI_DEFINE_ENUM_INTERNAL(expm1f)
+TLI_DEFINE_STRING_INTERNAL("expm1f")
+/// long double expm1l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(expm1l)
+TLI_DEFINE_STRING_INTERNAL("expm1l")
+/// double fabs(double x);
+TLI_DEFINE_ENUM_INTERNAL(fabs)
+TLI_DEFINE_STRING_INTERNAL("fabs")
+/// float fabsf(float x);
+TLI_DEFINE_ENUM_INTERNAL(fabsf)
+TLI_DEFINE_STRING_INTERNAL("fabsf")
+/// long double fabsl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(fabsl)
+TLI_DEFINE_STRING_INTERNAL("fabsl")
+/// int fclose(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fclose)
+TLI_DEFINE_STRING_INTERNAL("fclose")
+/// FILE *fdopen(int fildes, const char *mode);
+TLI_DEFINE_ENUM_INTERNAL(fdopen)
+TLI_DEFINE_STRING_INTERNAL("fdopen")
+/// int feof(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(feof)
+TLI_DEFINE_STRING_INTERNAL("feof")
+/// int ferror(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ferror)
+TLI_DEFINE_STRING_INTERNAL("ferror")
+/// int fflush(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fflush)
+TLI_DEFINE_STRING_INTERNAL("fflush")
+/// int ffs(int i);
+TLI_DEFINE_ENUM_INTERNAL(ffs)
+TLI_DEFINE_STRING_INTERNAL("ffs")
+/// int ffsl(long int i);
+TLI_DEFINE_ENUM_INTERNAL(ffsl)
+TLI_DEFINE_STRING_INTERNAL("ffsl")
+/// int ffsll(long long int i);
+TLI_DEFINE_ENUM_INTERNAL(ffsll)
+TLI_DEFINE_STRING_INTERNAL("ffsll")
+/// int fgetc(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fgetc)
+TLI_DEFINE_STRING_INTERNAL("fgetc")
+/// int fgetpos(FILE *stream, fpos_t *pos);
+TLI_DEFINE_ENUM_INTERNAL(fgetpos)
+TLI_DEFINE_STRING_INTERNAL("fgetpos")
+/// char *fgets(char *s, int n, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fgets)
+TLI_DEFINE_STRING_INTERNAL("fgets")
+/// int fileno(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fileno)
+TLI_DEFINE_STRING_INTERNAL("fileno")
+/// int fiprintf(FILE *stream, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(fiprintf)
+TLI_DEFINE_STRING_INTERNAL("fiprintf")
+/// void flockfile(FILE *file);
+TLI_DEFINE_ENUM_INTERNAL(flockfile)
+TLI_DEFINE_STRING_INTERNAL("flockfile")
+/// double floor(double x);
+TLI_DEFINE_ENUM_INTERNAL(floor)
+TLI_DEFINE_STRING_INTERNAL("floor")
+/// float floorf(float x);
+TLI_DEFINE_ENUM_INTERNAL(floorf)
+TLI_DEFINE_STRING_INTERNAL("floorf")
+/// long double floorl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(floorl)
+TLI_DEFINE_STRING_INTERNAL("floorl")
+/// int fls(int i);
+TLI_DEFINE_ENUM_INTERNAL(fls)
+TLI_DEFINE_STRING_INTERNAL("fls")
+/// int flsl(long int i);
+TLI_DEFINE_ENUM_INTERNAL(flsl)
+TLI_DEFINE_STRING_INTERNAL("flsl")
+/// int flsll(long long int i);
+TLI_DEFINE_ENUM_INTERNAL(flsll)
+TLI_DEFINE_STRING_INTERNAL("flsll")
+/// double fmax(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(fmax)
+TLI_DEFINE_STRING_INTERNAL("fmax")
+/// float fmaxf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(fmaxf)
+TLI_DEFINE_STRING_INTERNAL("fmaxf")
+/// long double fmaxl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(fmaxl)
+TLI_DEFINE_STRING_INTERNAL("fmaxl")
+/// double fmin(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(fmin)
+TLI_DEFINE_STRING_INTERNAL("fmin")
+/// float fminf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(fminf)
+TLI_DEFINE_STRING_INTERNAL("fminf")
+/// long double fminl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(fminl)
+TLI_DEFINE_STRING_INTERNAL("fminl")
+/// double fmod(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(fmod)
+TLI_DEFINE_STRING_INTERNAL("fmod")
+/// float fmodf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(fmodf)
+TLI_DEFINE_STRING_INTERNAL("fmodf")
+/// long double fmodl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(fmodl)
+TLI_DEFINE_STRING_INTERNAL("fmodl")
+/// FILE *fopen(const char *filename, const char *mode);
+TLI_DEFINE_ENUM_INTERNAL(fopen)
+TLI_DEFINE_STRING_INTERNAL("fopen")
+/// FILE *fopen64(const char *filename, const char *opentype)
+TLI_DEFINE_ENUM_INTERNAL(fopen64)
+TLI_DEFINE_STRING_INTERNAL("fopen64")
+/// int fprintf(FILE *stream, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(fprintf)
+TLI_DEFINE_STRING_INTERNAL("fprintf")
+/// int fputc(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fputc)
+TLI_DEFINE_STRING_INTERNAL("fputc")
+/// int fputs(const char *s, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fputs)
+TLI_DEFINE_STRING_INTERNAL("fputs")
+/// size_t fread(void *ptr, size_t size, size_t nitems, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fread)
+TLI_DEFINE_STRING_INTERNAL("fread")
+/// void free(void *ptr);
+TLI_DEFINE_ENUM_INTERNAL(free)
+TLI_DEFINE_STRING_INTERNAL("free")
+/// double frexp(double num, int *exp);
+TLI_DEFINE_ENUM_INTERNAL(frexp)
+TLI_DEFINE_STRING_INTERNAL("frexp")
+/// float frexpf(float num, int *exp);
+TLI_DEFINE_ENUM_INTERNAL(frexpf)
+TLI_DEFINE_STRING_INTERNAL("frexpf")
+/// long double frexpl(long double num, int *exp);
+TLI_DEFINE_ENUM_INTERNAL(frexpl)
+TLI_DEFINE_STRING_INTERNAL("frexpl")
+/// int fscanf(FILE *stream, const char *format, ... );
+TLI_DEFINE_ENUM_INTERNAL(fscanf)
+TLI_DEFINE_STRING_INTERNAL("fscanf")
+/// int fseek(FILE *stream, long offset, int whence);
+TLI_DEFINE_ENUM_INTERNAL(fseek)
+TLI_DEFINE_STRING_INTERNAL("fseek")
+/// int fseeko(FILE *stream, off_t offset, int whence);
+TLI_DEFINE_ENUM_INTERNAL(fseeko)
+TLI_DEFINE_STRING_INTERNAL("fseeko")
+/// int fseeko64(FILE *stream, off64_t offset, int whence)
+TLI_DEFINE_ENUM_INTERNAL(fseeko64)
+TLI_DEFINE_STRING_INTERNAL("fseeko64")
+/// int fsetpos(FILE *stream, const fpos_t *pos);
+TLI_DEFINE_ENUM_INTERNAL(fsetpos)
+TLI_DEFINE_STRING_INTERNAL("fsetpos")
+/// int fstat(int fildes, struct stat *buf);
+TLI_DEFINE_ENUM_INTERNAL(fstat)
+TLI_DEFINE_STRING_INTERNAL("fstat")
+/// int fstat64(int filedes, struct stat64 *buf)
+TLI_DEFINE_ENUM_INTERNAL(fstat64)
+TLI_DEFINE_STRING_INTERNAL("fstat64")
+/// int fstatvfs(int fildes, struct statvfs *buf);
+TLI_DEFINE_ENUM_INTERNAL(fstatvfs)
+TLI_DEFINE_STRING_INTERNAL("fstatvfs")
+/// int fstatvfs64(int fildes, struct statvfs64 *buf);
+TLI_DEFINE_ENUM_INTERNAL(fstatvfs64)
+TLI_DEFINE_STRING_INTERNAL("fstatvfs64")
+/// long ftell(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ftell)
+TLI_DEFINE_STRING_INTERNAL("ftell")
+/// off_t ftello(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ftello)
+TLI_DEFINE_STRING_INTERNAL("ftello")
+/// off64_t ftello64(FILE *stream)
+TLI_DEFINE_ENUM_INTERNAL(ftello64)
+TLI_DEFINE_STRING_INTERNAL("ftello64")
+/// int ftrylockfile(FILE *file);
+TLI_DEFINE_ENUM_INTERNAL(ftrylockfile)
+TLI_DEFINE_STRING_INTERNAL("ftrylockfile")
+/// void funlockfile(FILE *file);
+TLI_DEFINE_ENUM_INTERNAL(funlockfile)
+TLI_DEFINE_STRING_INTERNAL("funlockfile")
+/// size_t fwrite(const void *ptr, size_t size, size_t nitems, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fwrite)
+TLI_DEFINE_STRING_INTERNAL("fwrite")
+/// int getc(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(getc)
+TLI_DEFINE_STRING_INTERNAL("getc")
+/// int getc_unlocked(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(getc_unlocked)
+TLI_DEFINE_STRING_INTERNAL("getc_unlocked")
+/// int getchar(void);
+TLI_DEFINE_ENUM_INTERNAL(getchar)
+TLI_DEFINE_STRING_INTERNAL("getchar")
+/// char *getenv(const char *name);
+TLI_DEFINE_ENUM_INTERNAL(getenv)
+TLI_DEFINE_STRING_INTERNAL("getenv")
+/// int getitimer(int which, struct itimerval *value);
+TLI_DEFINE_ENUM_INTERNAL(getitimer)
+TLI_DEFINE_STRING_INTERNAL("getitimer")
+/// int getlogin_r(char *name, size_t namesize);
+TLI_DEFINE_ENUM_INTERNAL(getlogin_r)
+TLI_DEFINE_STRING_INTERNAL("getlogin_r")
+/// struct passwd *getpwnam(const char *name);
+TLI_DEFINE_ENUM_INTERNAL(getpwnam)
+TLI_DEFINE_STRING_INTERNAL("getpwnam")
+/// char *gets(char *s);
+TLI_DEFINE_ENUM_INTERNAL(gets)
+TLI_DEFINE_STRING_INTERNAL("gets")
+/// int gettimeofday(struct timeval *tp, void *tzp);
+TLI_DEFINE_ENUM_INTERNAL(gettimeofday)
+TLI_DEFINE_STRING_INTERNAL("gettimeofday")
+/// uint32_t htonl(uint32_t hostlong);
+TLI_DEFINE_ENUM_INTERNAL(htonl)
+TLI_DEFINE_STRING_INTERNAL("htonl")
+/// uint16_t htons(uint16_t hostshort);
+TLI_DEFINE_ENUM_INTERNAL(htons)
+TLI_DEFINE_STRING_INTERNAL("htons")
+/// int iprintf(const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(iprintf)
+TLI_DEFINE_STRING_INTERNAL("iprintf")
+/// int isascii(int c);
+TLI_DEFINE_ENUM_INTERNAL(isascii)
+TLI_DEFINE_STRING_INTERNAL("isascii")
+/// int isdigit(int c);
+TLI_DEFINE_ENUM_INTERNAL(isdigit)
+TLI_DEFINE_STRING_INTERNAL("isdigit")
+/// long int labs(long int j);
+TLI_DEFINE_ENUM_INTERNAL(labs)
+TLI_DEFINE_STRING_INTERNAL("labs")
+/// int lchown(const char *path, uid_t owner, gid_t group);
+TLI_DEFINE_ENUM_INTERNAL(lchown)
+TLI_DEFINE_STRING_INTERNAL("lchown")
+/// double ldexp(double x, int n);
+TLI_DEFINE_ENUM_INTERNAL(ldexp)
+TLI_DEFINE_STRING_INTERNAL("ldexp")
+/// float ldexpf(float x, int n);
+TLI_DEFINE_ENUM_INTERNAL(ldexpf)
+TLI_DEFINE_STRING_INTERNAL("ldexpf")
+/// long double ldexpl(long double x, int n);
+TLI_DEFINE_ENUM_INTERNAL(ldexpl)
+TLI_DEFINE_STRING_INTERNAL("ldexpl")
+/// long long int llabs(long long int j);
+TLI_DEFINE_ENUM_INTERNAL(llabs)
+TLI_DEFINE_STRING_INTERNAL("llabs")
+/// double log(double x);
+TLI_DEFINE_ENUM_INTERNAL(log)
+TLI_DEFINE_STRING_INTERNAL("log")
+/// double log10(double x);
+TLI_DEFINE_ENUM_INTERNAL(log10)
+TLI_DEFINE_STRING_INTERNAL("log10")
+/// float log10f(float x);
+TLI_DEFINE_ENUM_INTERNAL(log10f)
+TLI_DEFINE_STRING_INTERNAL("log10f")
+/// long double log10l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log10l)
+TLI_DEFINE_STRING_INTERNAL("log10l")
+/// double log1p(double x);
+TLI_DEFINE_ENUM_INTERNAL(log1p)
+TLI_DEFINE_STRING_INTERNAL("log1p")
+/// float log1pf(float x);
+TLI_DEFINE_ENUM_INTERNAL(log1pf)
+TLI_DEFINE_STRING_INTERNAL("log1pf")
+/// long double log1pl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log1pl)
+TLI_DEFINE_STRING_INTERNAL("log1pl")
+/// double log2(double x);
+TLI_DEFINE_ENUM_INTERNAL(log2)
+TLI_DEFINE_STRING_INTERNAL("log2")
+/// float log2f(float x);
+TLI_DEFINE_ENUM_INTERNAL(log2f)
+TLI_DEFINE_STRING_INTERNAL("log2f")
+/// double long double log2l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log2l)
+TLI_DEFINE_STRING_INTERNAL("log2l")
+/// double logb(double x);
+TLI_DEFINE_ENUM_INTERNAL(logb)
+TLI_DEFINE_STRING_INTERNAL("logb")
+/// float logbf(float x);
+TLI_DEFINE_ENUM_INTERNAL(logbf)
+TLI_DEFINE_STRING_INTERNAL("logbf")
+/// long double logbl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(logbl)
+TLI_DEFINE_STRING_INTERNAL("logbl")
+/// float logf(float x);
+TLI_DEFINE_ENUM_INTERNAL(logf)
+TLI_DEFINE_STRING_INTERNAL("logf")
+/// long double logl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(logl)
+TLI_DEFINE_STRING_INTERNAL("logl")
+/// int lstat(const char *path, struct stat *buf);
+TLI_DEFINE_ENUM_INTERNAL(lstat)
+TLI_DEFINE_STRING_INTERNAL("lstat")
+/// int lstat64(const char *path, struct stat64 *buf);
+TLI_DEFINE_ENUM_INTERNAL(lstat64)
+TLI_DEFINE_STRING_INTERNAL("lstat64")
+/// void *malloc(size_t size);
+TLI_DEFINE_ENUM_INTERNAL(malloc)
+TLI_DEFINE_STRING_INTERNAL("malloc")
+/// void *memalign(size_t boundary, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(memalign)
+TLI_DEFINE_STRING_INTERNAL("memalign")
+/// void *memccpy(void *s1, const void *s2, int c, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memccpy)
+TLI_DEFINE_STRING_INTERNAL("memccpy")
+/// void *memchr(const void *s, int c, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memchr)
+TLI_DEFINE_STRING_INTERNAL("memchr")
+/// int memcmp(const void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memcmp)
+TLI_DEFINE_STRING_INTERNAL("memcmp")
+/// void *memcpy(void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memcpy)
+TLI_DEFINE_STRING_INTERNAL("memcpy")
+/// void *memmove(void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memmove)
+TLI_DEFINE_STRING_INTERNAL("memmove")
+/// void *mempcpy(void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(mempcpy)
+TLI_DEFINE_STRING_INTERNAL("mempcpy")
+// void *memrchr(const void *s, int c, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memrchr)
+TLI_DEFINE_STRING_INTERNAL("memrchr")
+/// void *memset(void *b, int c, size_t len);
+TLI_DEFINE_ENUM_INTERNAL(memset)
+TLI_DEFINE_STRING_INTERNAL("memset")
+/// void memset_pattern16(void *b, const void *pattern16, size_t len);
+TLI_DEFINE_ENUM_INTERNAL(memset_pattern16)
+TLI_DEFINE_STRING_INTERNAL("memset_pattern16")
+/// int mkdir(const char *path, mode_t mode);
+TLI_DEFINE_ENUM_INTERNAL(mkdir)
+TLI_DEFINE_STRING_INTERNAL("mkdir")
+/// time_t mktime(struct tm *timeptr);
+TLI_DEFINE_ENUM_INTERNAL(mktime)
+TLI_DEFINE_STRING_INTERNAL("mktime")
+/// double modf(double x, double *iptr);
+TLI_DEFINE_ENUM_INTERNAL(modf)
+TLI_DEFINE_STRING_INTERNAL("modf")
+/// float modff(float, float *iptr);
+TLI_DEFINE_ENUM_INTERNAL(modff)
+TLI_DEFINE_STRING_INTERNAL("modff")
+/// long double modfl(long double value, long double *iptr);
+TLI_DEFINE_ENUM_INTERNAL(modfl)
+TLI_DEFINE_STRING_INTERNAL("modfl")
+
+/// double nearbyint(double x);
+TLI_DEFINE_ENUM_INTERNAL(nearbyint)
+TLI_DEFINE_STRING_INTERNAL("nearbyint")
+/// float nearbyintf(float x);
+TLI_DEFINE_ENUM_INTERNAL(nearbyintf)
+TLI_DEFINE_STRING_INTERNAL("nearbyintf")
+/// long double nearbyintl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(nearbyintl)
+TLI_DEFINE_STRING_INTERNAL("nearbyintl")
+/// uint32_t ntohl(uint32_t netlong);
+TLI_DEFINE_ENUM_INTERNAL(ntohl)
+TLI_DEFINE_STRING_INTERNAL("ntohl")
+/// uint16_t ntohs(uint16_t netshort);
+TLI_DEFINE_ENUM_INTERNAL(ntohs)
+TLI_DEFINE_STRING_INTERNAL("ntohs")
+/// int open(const char *path, int oflag, ... );
+TLI_DEFINE_ENUM_INTERNAL(open)
+TLI_DEFINE_STRING_INTERNAL("open")
+/// int open64(const char *filename, int flags[, mode_t mode])
+TLI_DEFINE_ENUM_INTERNAL(open64)
+TLI_DEFINE_STRING_INTERNAL("open64")
+/// DIR *opendir(const char *dirname);
+TLI_DEFINE_ENUM_INTERNAL(opendir)
+TLI_DEFINE_STRING_INTERNAL("opendir")
+/// int pclose(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(pclose)
+TLI_DEFINE_STRING_INTERNAL("pclose")
+/// void perror(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(perror)
+TLI_DEFINE_STRING_INTERNAL("perror")
+/// FILE *popen(const char *command, const char *mode);
+TLI_DEFINE_ENUM_INTERNAL(popen)
+TLI_DEFINE_STRING_INTERNAL("popen")
+/// int posix_memalign(void **memptr, size_t alignment, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(posix_memalign)
+TLI_DEFINE_STRING_INTERNAL("posix_memalign")
+/// double pow(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(pow)
+TLI_DEFINE_STRING_INTERNAL("pow")
+/// float powf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(powf)
+TLI_DEFINE_STRING_INTERNAL("powf")
+/// long double powl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(powl)
+TLI_DEFINE_STRING_INTERNAL("powl")
+/// ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset);
+TLI_DEFINE_ENUM_INTERNAL(pread)
+TLI_DEFINE_STRING_INTERNAL("pread")
+/// int printf(const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(printf)
+TLI_DEFINE_STRING_INTERNAL("printf")
+/// int putc(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(putc)
+TLI_DEFINE_STRING_INTERNAL("putc")
+/// int putchar(int c);
+TLI_DEFINE_ENUM_INTERNAL(putchar)
+TLI_DEFINE_STRING_INTERNAL("putchar")
+/// int puts(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(puts)
+TLI_DEFINE_STRING_INTERNAL("puts")
+/// ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+TLI_DEFINE_ENUM_INTERNAL(pwrite)
+TLI_DEFINE_STRING_INTERNAL("pwrite")
+/// void qsort(void *base, size_t nel, size_t width,
+///            int (*compar)(const void *, const void *));
+TLI_DEFINE_ENUM_INTERNAL(qsort)
+TLI_DEFINE_STRING_INTERNAL("qsort")
+/// ssize_t read(int fildes, void *buf, size_t nbyte);
+TLI_DEFINE_ENUM_INTERNAL(read)
+TLI_DEFINE_STRING_INTERNAL("read")
+/// ssize_t readlink(const char *path, char *buf, size_t bufsize);
+TLI_DEFINE_ENUM_INTERNAL(readlink)
+TLI_DEFINE_STRING_INTERNAL("readlink")
+/// void *realloc(void *ptr, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(realloc)
+TLI_DEFINE_STRING_INTERNAL("realloc")
+/// void *reallocf(void *ptr, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(reallocf)
+TLI_DEFINE_STRING_INTERNAL("reallocf")
+/// char *realpath(const char *file_name, char *resolved_name);
+TLI_DEFINE_ENUM_INTERNAL(realpath)
+TLI_DEFINE_STRING_INTERNAL("realpath")
+/// int remove(const char *path);
+TLI_DEFINE_ENUM_INTERNAL(remove)
+TLI_DEFINE_STRING_INTERNAL("remove")
+/// int rename(const char *old, const char *new);
+TLI_DEFINE_ENUM_INTERNAL(rename)
+TLI_DEFINE_STRING_INTERNAL("rename")
+/// void rewind(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(rewind)
+TLI_DEFINE_STRING_INTERNAL("rewind")
+/// double rint(double x);
+TLI_DEFINE_ENUM_INTERNAL(rint)
+TLI_DEFINE_STRING_INTERNAL("rint")
+/// float rintf(float x);
+TLI_DEFINE_ENUM_INTERNAL(rintf)
+TLI_DEFINE_STRING_INTERNAL("rintf")
+/// long double rintl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(rintl)
+TLI_DEFINE_STRING_INTERNAL("rintl")
+/// int rmdir(const char *path);
+TLI_DEFINE_ENUM_INTERNAL(rmdir)
+TLI_DEFINE_STRING_INTERNAL("rmdir")
+/// double round(double x);
+TLI_DEFINE_ENUM_INTERNAL(round)
+TLI_DEFINE_STRING_INTERNAL("round")
+/// float roundf(float x);
+TLI_DEFINE_ENUM_INTERNAL(roundf)
+TLI_DEFINE_STRING_INTERNAL("roundf")
+/// long double roundl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(roundl)
+TLI_DEFINE_STRING_INTERNAL("roundl")
+/// int scanf(const char *restrict format, ... );
+TLI_DEFINE_ENUM_INTERNAL(scanf)
+TLI_DEFINE_STRING_INTERNAL("scanf")
+/// void setbuf(FILE *stream, char *buf);
+TLI_DEFINE_ENUM_INTERNAL(setbuf)
+TLI_DEFINE_STRING_INTERNAL("setbuf")
+/// int setitimer(int which, const struct itimerval *value,
+///               struct itimerval *ovalue);
+TLI_DEFINE_ENUM_INTERNAL(setitimer)
+TLI_DEFINE_STRING_INTERNAL("setitimer")
+/// int setvbuf(FILE *stream, char *buf, int type, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(setvbuf)
+TLI_DEFINE_STRING_INTERNAL("setvbuf")
+/// double sin(double x);
+TLI_DEFINE_ENUM_INTERNAL(sin)
+TLI_DEFINE_STRING_INTERNAL("sin")
+/// float sinf(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinf)
+TLI_DEFINE_STRING_INTERNAL("sinf")
+/// double sinh(double x);
+TLI_DEFINE_ENUM_INTERNAL(sinh)
+TLI_DEFINE_STRING_INTERNAL("sinh")
+/// float sinhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinhf)
+TLI_DEFINE_STRING_INTERNAL("sinhf")
+/// long double sinhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sinhl)
+TLI_DEFINE_STRING_INTERNAL("sinhl")
+/// long double sinl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sinl)
+TLI_DEFINE_STRING_INTERNAL("sinl")
+/// int siprintf(char *str, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(siprintf)
+TLI_DEFINE_STRING_INTERNAL("siprintf")
+/// int snprintf(char *s, size_t n, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(snprintf)
+TLI_DEFINE_STRING_INTERNAL("snprintf")
+/// int sprintf(char *str, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(sprintf)
+TLI_DEFINE_STRING_INTERNAL("sprintf")
+/// double sqrt(double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrt)
+TLI_DEFINE_STRING_INTERNAL("sqrt")
+/// float sqrtf(float x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtf)
+TLI_DEFINE_STRING_INTERNAL("sqrtf")
+/// long double sqrtl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtl)
+TLI_DEFINE_STRING_INTERNAL("sqrtl")
+/// int sscanf(const char *s, const char *format, ... );
+TLI_DEFINE_ENUM_INTERNAL(sscanf)
+TLI_DEFINE_STRING_INTERNAL("sscanf")
+/// int stat(const char *path, struct stat *buf);
+TLI_DEFINE_ENUM_INTERNAL(stat)
+TLI_DEFINE_STRING_INTERNAL("stat")
+/// int stat64(const char *path, struct stat64 *buf);
+TLI_DEFINE_ENUM_INTERNAL(stat64)
+TLI_DEFINE_STRING_INTERNAL("stat64")
+/// int statvfs(const char *path, struct statvfs *buf);
+TLI_DEFINE_ENUM_INTERNAL(statvfs)
+TLI_DEFINE_STRING_INTERNAL("statvfs")
+/// int statvfs64(const char *path, struct statvfs64 *buf)
+TLI_DEFINE_ENUM_INTERNAL(statvfs64)
+TLI_DEFINE_STRING_INTERNAL("statvfs64")
+/// char *stpcpy(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(stpcpy)
+TLI_DEFINE_STRING_INTERNAL("stpcpy")
+/// char *stpncpy(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(stpncpy)
+TLI_DEFINE_STRING_INTERNAL("stpncpy")
+/// int strcasecmp(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcasecmp)
+TLI_DEFINE_STRING_INTERNAL("strcasecmp")
+/// char *strcat(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcat)
+TLI_DEFINE_STRING_INTERNAL("strcat")
+/// char *strchr(const char *s, int c);
+TLI_DEFINE_ENUM_INTERNAL(strchr)
+TLI_DEFINE_STRING_INTERNAL("strchr")
+/// int strcmp(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcmp)
+TLI_DEFINE_STRING_INTERNAL("strcmp")
+/// int strcoll(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcoll)
+TLI_DEFINE_STRING_INTERNAL("strcoll")
+/// char *strcpy(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcpy)
+TLI_DEFINE_STRING_INTERNAL("strcpy")
+/// size_t strcspn(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcspn)
+TLI_DEFINE_STRING_INTERNAL("strcspn")
+/// char *strdup(const char *s1);
+TLI_DEFINE_ENUM_INTERNAL(strdup)
+TLI_DEFINE_STRING_INTERNAL("strdup")
+/// size_t strlen(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(strlen)
+TLI_DEFINE_STRING_INTERNAL("strlen")
+/// int strncasecmp(const char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncasecmp)
+TLI_DEFINE_STRING_INTERNAL("strncasecmp")
+/// char *strncat(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncat)
+TLI_DEFINE_STRING_INTERNAL("strncat")
+/// int strncmp(const char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncmp)
+TLI_DEFINE_STRING_INTERNAL("strncmp")
+/// char *strncpy(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncpy)
+TLI_DEFINE_STRING_INTERNAL("strncpy")
+/// char *strndup(const char *s1, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strndup)
+TLI_DEFINE_STRING_INTERNAL("strndup")
+/// size_t strnlen(const char *s, size_t maxlen);
+TLI_DEFINE_ENUM_INTERNAL(strnlen)
+TLI_DEFINE_STRING_INTERNAL("strnlen")
+/// char *strpbrk(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strpbrk)
+TLI_DEFINE_STRING_INTERNAL("strpbrk")
+/// char *strrchr(const char *s, int c);
+TLI_DEFINE_ENUM_INTERNAL(strrchr)
+TLI_DEFINE_STRING_INTERNAL("strrchr")
+/// size_t strspn(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strspn)
+TLI_DEFINE_STRING_INTERNAL("strspn")
+/// char *strstr(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strstr)
+TLI_DEFINE_STRING_INTERNAL("strstr")
+/// double strtod(const char *nptr, char **endptr);
+TLI_DEFINE_ENUM_INTERNAL(strtod)
+TLI_DEFINE_STRING_INTERNAL("strtod")
+/// float strtof(const char *nptr, char **endptr);
+TLI_DEFINE_ENUM_INTERNAL(strtof)
+TLI_DEFINE_STRING_INTERNAL("strtof")
+// char *strtok(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strtok)
+TLI_DEFINE_STRING_INTERNAL("strtok")
+// char *strtok_r(char *s, const char *sep, char **lasts);
+TLI_DEFINE_ENUM_INTERNAL(strtok_r)
+TLI_DEFINE_STRING_INTERNAL("strtok_r")
+/// long int strtol(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtol)
+TLI_DEFINE_STRING_INTERNAL("strtol")
+/// long double strtold(const char *nptr, char **endptr);
+TLI_DEFINE_ENUM_INTERNAL(strtold)
+TLI_DEFINE_STRING_INTERNAL("strtold")
+/// long long int strtoll(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtoll)
+TLI_DEFINE_STRING_INTERNAL("strtoll")
+/// unsigned long int strtoul(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtoul)
+TLI_DEFINE_STRING_INTERNAL("strtoul")
+/// unsigned long long int strtoull(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtoull)
+TLI_DEFINE_STRING_INTERNAL("strtoull")
+/// size_t strxfrm(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strxfrm)
+TLI_DEFINE_STRING_INTERNAL("strxfrm")
+/// int system(const char *command);
+TLI_DEFINE_ENUM_INTERNAL(system)
+TLI_DEFINE_STRING_INTERNAL("system")
+/// double tan(double x);
+TLI_DEFINE_ENUM_INTERNAL(tan)
+TLI_DEFINE_STRING_INTERNAL("tan")
+/// float tanf(float x);
+TLI_DEFINE_ENUM_INTERNAL(tanf)
+TLI_DEFINE_STRING_INTERNAL("tanf")
+/// double tanh(double x);
+TLI_DEFINE_ENUM_INTERNAL(tanh)
+TLI_DEFINE_STRING_INTERNAL("tanh")
+/// float tanhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(tanhf)
+TLI_DEFINE_STRING_INTERNAL("tanhf")
+/// long double tanhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(tanhl)
+TLI_DEFINE_STRING_INTERNAL("tanhl")
+/// long double tanl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(tanl)
+TLI_DEFINE_STRING_INTERNAL("tanl")
+/// clock_t times(struct tms *buffer);
+TLI_DEFINE_ENUM_INTERNAL(times)
+TLI_DEFINE_STRING_INTERNAL("times")
+/// FILE *tmpfile(void);
+TLI_DEFINE_ENUM_INTERNAL(tmpfile)
+TLI_DEFINE_STRING_INTERNAL("tmpfile")
+/// FILE *tmpfile64(void)
+TLI_DEFINE_ENUM_INTERNAL(tmpfile64)
+TLI_DEFINE_STRING_INTERNAL("tmpfile64")
+/// int toascii(int c);
+TLI_DEFINE_ENUM_INTERNAL(toascii)
+TLI_DEFINE_STRING_INTERNAL("toascii")
+/// double trunc(double x);
+TLI_DEFINE_ENUM_INTERNAL(trunc)
+TLI_DEFINE_STRING_INTERNAL("trunc")
+/// float truncf(float x);
+TLI_DEFINE_ENUM_INTERNAL(truncf)
+TLI_DEFINE_STRING_INTERNAL("truncf")
+/// long double truncl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(truncl)
+TLI_DEFINE_STRING_INTERNAL("truncl")
+/// int uname(struct utsname *name);
+TLI_DEFINE_ENUM_INTERNAL(uname)
+TLI_DEFINE_STRING_INTERNAL("uname")
+/// int ungetc(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ungetc)
+TLI_DEFINE_STRING_INTERNAL("ungetc")
+/// int unlink(const char *path);
+TLI_DEFINE_ENUM_INTERNAL(unlink)
+TLI_DEFINE_STRING_INTERNAL("unlink")
+/// int unsetenv(const char *name);
+TLI_DEFINE_ENUM_INTERNAL(unsetenv)
+TLI_DEFINE_STRING_INTERNAL("unsetenv")
+/// int utime(const char *path, const struct utimbuf *times);
+TLI_DEFINE_ENUM_INTERNAL(utime)
+TLI_DEFINE_STRING_INTERNAL("utime")
+/// int utimes(const char *path, const struct timeval times[2]);
+TLI_DEFINE_ENUM_INTERNAL(utimes)
+TLI_DEFINE_STRING_INTERNAL("utimes")
+/// void *valloc(size_t size);
+TLI_DEFINE_ENUM_INTERNAL(valloc)
+TLI_DEFINE_STRING_INTERNAL("valloc")
+/// int vfprintf(FILE *stream, const char *format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vfprintf)
+TLI_DEFINE_STRING_INTERNAL("vfprintf")
+/// int vfscanf(FILE *stream, const char *format, va_list arg);
+TLI_DEFINE_ENUM_INTERNAL(vfscanf)
+TLI_DEFINE_STRING_INTERNAL("vfscanf")
+/// int vprintf(const char *restrict format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vprintf)
+TLI_DEFINE_STRING_INTERNAL("vprintf")
+/// int vscanf(const char *format, va_list arg);
+TLI_DEFINE_ENUM_INTERNAL(vscanf)
+TLI_DEFINE_STRING_INTERNAL("vscanf")
+/// int vsnprintf(char *s, size_t n, const char *format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vsnprintf)
+TLI_DEFINE_STRING_INTERNAL("vsnprintf")
+/// int vsprintf(char *s, const char *format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vsprintf)
+TLI_DEFINE_STRING_INTERNAL("vsprintf")
+/// int vsscanf(const char *s, const char *format, va_list arg);
+TLI_DEFINE_ENUM_INTERNAL(vsscanf)
+TLI_DEFINE_STRING_INTERNAL("vsscanf")
+/// size_t wcslen (const wchar_t* wcs);
+TLI_DEFINE_ENUM_INTERNAL(wcslen)
+TLI_DEFINE_STRING_INTERNAL("wcslen")
+/// ssize_t write(int fildes, const void *buf, size_t nbyte);
+TLI_DEFINE_ENUM_INTERNAL(write)
+TLI_DEFINE_STRING_INTERNAL("write")
+
+#undef TLI_DEFINE_ENUM_INTERNAL
+#undef TLI_DEFINE_STRING_INTERNAL
+#endif  // One of TLI_DEFINE_ENUM/STRING are defined.
+
+#undef TLI_DEFINE_ENUM
+#undef TLI_DEFINE_STRING
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.h b/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.h
new file mode 100644
index 0000000..a3fe834
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.h
@@ -0,0 +1,394 @@
+//===-- TargetLibraryInfo.h - Library information ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
+#define LLVM_ANALYSIS_TARGETLIBRARYINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+template <typename T> class ArrayRef;
+
+/// Describes a possible vectorization of a function.
+/// Function 'VectorFnName' is equivalent to 'ScalarFnName' vectorized
+/// by a factor 'VectorizationFactor'.
+struct VecDesc {
+  StringRef ScalarFnName;
+  StringRef VectorFnName;
+  unsigned VectorizationFactor;
+};
+
+  enum LibFunc {
+#define TLI_DEFINE_ENUM
+#include "llvm/Analysis/TargetLibraryInfo.def"
+
+    NumLibFuncs
+  };
+
+/// Implementation of the target library information.
+///
+/// This class constructs tables that hold the target library information and
+/// make it available. However, it is somewhat expensive to compute and only
+/// depends on the triple. So users typically interact with the \c
+/// TargetLibraryInfo wrapper below.
+class TargetLibraryInfoImpl {
+  friend class TargetLibraryInfo;
+
+  unsigned char AvailableArray[(NumLibFuncs+3)/4];
+  llvm::DenseMap<unsigned, std::string> CustomNames;
+  static StringRef const StandardNames[NumLibFuncs];
+  bool ShouldExtI32Param, ShouldExtI32Return, ShouldSignExtI32Param;
+
+  enum AvailabilityState {
+    StandardName = 3, // (memset to all ones)
+    CustomName = 1,
+    Unavailable = 0  // (memset to all zeros)
+  };
+  void setState(LibFunc F, AvailabilityState State) {
+    AvailableArray[F/4] &= ~(3 << 2*(F&3));
+    AvailableArray[F/4] |= State << 2*(F&3);
+  }
+  AvailabilityState getState(LibFunc F) const {
+    return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
+  }
+
+  /// Vectorization descriptors - sorted by ScalarFnName.
+  std::vector<VecDesc> VectorDescs;
+  /// Scalarization descriptors - same content as VectorDescs but sorted based
+  /// on VectorFnName rather than ScalarFnName.
+  std::vector<VecDesc> ScalarDescs;
+
+  /// Return true if the function type FTy is valid for the library function
+  /// F, regardless of whether the function is available.
+  bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
+                              const DataLayout *DL) const;
+
+public:
+  /// List of known vector-functions libraries.
+  ///
+  /// The vector-functions library defines, which functions are vectorizable
+  /// and with which factor. The library can be specified by either frontend,
+  /// or a commandline option, and then used by
+  /// addVectorizableFunctionsFromVecLib for filling up the tables of
+  /// vectorizable functions.
+  enum VectorLibrary {
+    NoLibrary,  // Don't use any vector library.
+    Accelerate, // Use Accelerate framework.
+    SVML        // Intel short vector math library.
+  };
+
+  TargetLibraryInfoImpl();
+  explicit TargetLibraryInfoImpl(const Triple &T);
+
+  // Provide value semantics.
+  TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
+  TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
+  TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
+  TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);
+
+  /// Searches for a particular function name.
+  ///
+  /// If it is one of the known library functions, return true and set F to the
+  /// corresponding value.
+  bool getLibFunc(StringRef funcName, LibFunc &F) const;
+
+  /// Searches for a particular function name, also checking that its type is
+  /// valid for the library function matching that name.
+  ///
+  /// If it is one of the known library functions, return true and set F to the
+  /// corresponding value.
+  bool getLibFunc(const Function &FDecl, LibFunc &F) const;
+
+  /// Forces a function to be marked as unavailable.
+  void setUnavailable(LibFunc F) {
+    setState(F, Unavailable);
+  }
+
+  /// Forces a function to be marked as available.
+  void setAvailable(LibFunc F) {
+    setState(F, StandardName);
+  }
+
+  /// Forces a function to be marked as available and provide an alternate name
+  /// that must be used.
+  void setAvailableWithName(LibFunc F, StringRef Name) {
+    if (StandardNames[F] != Name) {
+      setState(F, CustomName);
+      CustomNames[F] = Name;
+      assert(CustomNames.find(F) != CustomNames.end());
+    } else {
+      setState(F, StandardName);
+    }
+  }
+
+  /// Disables all builtins.
+  ///
+  /// This can be used for options like -fno-builtin.
+  void disableAllFunctions();
+
+  /// Add a set of scalar -> vector mappings, queryable via
+  /// getVectorizedFunction and getScalarizedFunction.
+  void addVectorizableFunctions(ArrayRef<VecDesc> Fns);
+
+  /// Calls addVectorizableFunctions with a known preset of functions for the
+  /// given vector library.
+  void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib);
+
+  /// Return true if the function F has a vector equivalent with vectorization
+  /// factor VF.
+  bool isFunctionVectorizable(StringRef F, unsigned VF) const {
+    return !getVectorizedFunction(F, VF).empty();
+  }
+
+  /// Return true if the function F has a vector equivalent with any
+  /// vectorization factor.
+  bool isFunctionVectorizable(StringRef F) const;
+
+  /// Return the name of the equivalent of F, vectorized with factor VF. If no
+  /// such mapping exists, return the empty string.
+  StringRef getVectorizedFunction(StringRef F, unsigned VF) const;
+
+  /// Return true if the function F has a scalar equivalent, and set VF to be
+  /// the vectorization factor.
+  bool isFunctionScalarizable(StringRef F, unsigned &VF) const {
+    return !getScalarizedFunction(F, VF).empty();
+  }
+
+  /// Return the name of the equivalent of F, scalarized. If no such mapping
+  /// exists, return the empty string.
+  ///
+  /// Set VF to the vectorization factor.
+  StringRef getScalarizedFunction(StringRef F, unsigned &VF) const;
+
+  /// Set to true iff i32 parameters to library functions should have signext
+  /// or zeroext attributes if they correspond to C-level int or unsigned int,
+  /// respectively.
+  void setShouldExtI32Param(bool Val) {
+    ShouldExtI32Param = Val;
+  }
+
+  /// Set to true iff i32 results from library functions should have signext
+  /// or zeroext attributes if they correspond to C-level int or unsigned int,
+  /// respectively.
+  void setShouldExtI32Return(bool Val) {
+    ShouldExtI32Return = Val;
+  }
+
+  /// Set to true iff i32 parameters to library functions should have signext
+  /// attribute if they correspond to C-level int or unsigned int.
+  void setShouldSignExtI32Param(bool Val) {
+    ShouldSignExtI32Param = Val;
+  }
+
+  /// Returns the size of the wchar_t type in bytes or 0 if the size is unknown.
+  /// This queries the 'wchar_size' metadata.
+  unsigned getWCharSize(const Module &M) const;
+};
+
+/// Provides information about what library functions are available for
+/// the current target.
+///
+/// This both allows optimizations to handle them specially and frontends to
+/// disable such optimizations through -fno-builtin etc.
+class TargetLibraryInfo {
+  friend class TargetLibraryAnalysis;
+  friend class TargetLibraryInfoWrapperPass;
+
+  const TargetLibraryInfoImpl *Impl;
+
+public:
+  explicit TargetLibraryInfo(const TargetLibraryInfoImpl &Impl) : Impl(&Impl) {}
+
+  // Provide value semantics.
+  TargetLibraryInfo(const TargetLibraryInfo &TLI) : Impl(TLI.Impl) {}
+  TargetLibraryInfo(TargetLibraryInfo &&TLI) : Impl(TLI.Impl) {}
+  TargetLibraryInfo &operator=(const TargetLibraryInfo &TLI) {
+    Impl = TLI.Impl;
+    return *this;
+  }
+  TargetLibraryInfo &operator=(TargetLibraryInfo &&TLI) {
+    Impl = TLI.Impl;
+    return *this;
+  }
+
+  /// Searches for a particular function name.
+  ///
+  /// If it is one of the known library functions, return true and set F to the
+  /// corresponding value.
+  bool getLibFunc(StringRef funcName, LibFunc &F) const {
+    return Impl->getLibFunc(funcName, F);
+  }
+
+  bool getLibFunc(const Function &FDecl, LibFunc &F) const {
+    return Impl->getLibFunc(FDecl, F);
+  }
+
+  /// If a callsite does not have the 'nobuiltin' attribute, return if the
+  /// called function is a known library function and set F to that function.
+  bool getLibFunc(ImmutableCallSite CS, LibFunc &F) const {
+    return !CS.isNoBuiltin() && CS.getCalledFunction() &&
+           getLibFunc(*(CS.getCalledFunction()), F);
+  }
+
+  /// Tests whether a library function is available.
+  bool has(LibFunc F) const {
+    return Impl->getState(F) != TargetLibraryInfoImpl::Unavailable;
+  }
+  bool isFunctionVectorizable(StringRef F, unsigned VF) const {
+    return Impl->isFunctionVectorizable(F, VF);
+  }
+  bool isFunctionVectorizable(StringRef F) const {
+    return Impl->isFunctionVectorizable(F);
+  }
+  StringRef getVectorizedFunction(StringRef F, unsigned VF) const {
+    return Impl->getVectorizedFunction(F, VF);
+  }
+
+  /// Tests if the function is both available and a candidate for optimized code
+  /// generation.
+  bool hasOptimizedCodeGen(LibFunc F) const {
+    if (Impl->getState(F) == TargetLibraryInfoImpl::Unavailable)
+      return false;
+    switch (F) {
+    default: break;
+    case LibFunc_copysign:     case LibFunc_copysignf:  case LibFunc_copysignl:
+    case LibFunc_fabs:         case LibFunc_fabsf:      case LibFunc_fabsl:
+    case LibFunc_sin:          case LibFunc_sinf:       case LibFunc_sinl:
+    case LibFunc_cos:          case LibFunc_cosf:       case LibFunc_cosl:
+    case LibFunc_sqrt:         case LibFunc_sqrtf:      case LibFunc_sqrtl:
+    case LibFunc_sqrt_finite:  case LibFunc_sqrtf_finite:
+                                                   case LibFunc_sqrtl_finite:
+    case LibFunc_fmax:         case LibFunc_fmaxf:      case LibFunc_fmaxl:
+    case LibFunc_fmin:         case LibFunc_fminf:      case LibFunc_fminl:
+    case LibFunc_floor:        case LibFunc_floorf:     case LibFunc_floorl:
+    case LibFunc_nearbyint:    case LibFunc_nearbyintf: case LibFunc_nearbyintl:
+    case LibFunc_ceil:         case LibFunc_ceilf:      case LibFunc_ceill:
+    case LibFunc_rint:         case LibFunc_rintf:      case LibFunc_rintl:
+    case LibFunc_round:        case LibFunc_roundf:     case LibFunc_roundl:
+    case LibFunc_trunc:        case LibFunc_truncf:     case LibFunc_truncl:
+    case LibFunc_log2:         case LibFunc_log2f:      case LibFunc_log2l:
+    case LibFunc_exp2:         case LibFunc_exp2f:      case LibFunc_exp2l:
+    case LibFunc_memcmp:       case LibFunc_strcmp:     case LibFunc_strcpy:
+    case LibFunc_stpcpy:       case LibFunc_strlen:     case LibFunc_strnlen:
+    case LibFunc_memchr:       case LibFunc_mempcpy:
+      return true;
+    }
+    return false;
+  }
+
+  StringRef getName(LibFunc F) const {
+    auto State = Impl->getState(F);
+    if (State == TargetLibraryInfoImpl::Unavailable)
+      return StringRef();
+    if (State == TargetLibraryInfoImpl::StandardName)
+      return Impl->StandardNames[F];
+    assert(State == TargetLibraryInfoImpl::CustomName);
+    return Impl->CustomNames.find(F)->second;
+  }
+
+  /// Returns extension attribute kind to be used for i32 parameters
+  /// corresponding to C-level int or unsigned int.  May be zeroext, signext,
+  /// or none.
+  Attribute::AttrKind getExtAttrForI32Param(bool Signed = true) const {
+    if (Impl->ShouldExtI32Param)
+      return Signed ? Attribute::SExt : Attribute::ZExt;
+    if (Impl->ShouldSignExtI32Param)
+      return Attribute::SExt;
+    return Attribute::None;
+  }
+
+  /// Returns extension attribute kind to be used for i32 return values
+  /// corresponding to C-level int or unsigned int.  May be zeroext, signext,
+  /// or none.
+  Attribute::AttrKind getExtAttrForI32Return(bool Signed = true) const {
+    if (Impl->ShouldExtI32Return)
+      return Signed ? Attribute::SExt : Attribute::ZExt;
+    return Attribute::None;
+  }
+
+  /// \copydoc TargetLibraryInfoImpl::getWCharSize()
+  unsigned getWCharSize(const Module &M) const {
+    return Impl->getWCharSize(M);
+  }
+
+  /// Handle invalidation from the pass manager.
+  ///
+  /// If we try to invalidate this info, just return false. It cannot become
+  /// invalid even if the module or function changes.
+  bool invalidate(Module &, const PreservedAnalyses &,
+                  ModuleAnalysisManager::Invalidator &) {
+    return false;
+  }
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+};
+
+/// Analysis pass providing the \c TargetLibraryInfo.
+///
+/// Note that this pass's result cannot be invalidated, it is immutable for the
+/// life of the module.
+class TargetLibraryAnalysis : public AnalysisInfoMixin<TargetLibraryAnalysis> {
+public:
+  typedef TargetLibraryInfo Result;
+
+  /// Default construct the library analysis.
+  ///
+  /// This will use the module's triple to construct the library info for that
+  /// module.
+  TargetLibraryAnalysis() {}
+
+  /// Construct a library analysis with preset info.
+  ///
+  /// This will directly copy the preset info into the result without
+  /// consulting the module's triple.
+  TargetLibraryAnalysis(TargetLibraryInfoImpl PresetInfoImpl)
+      : PresetInfoImpl(std::move(PresetInfoImpl)) {}
+
+  TargetLibraryInfo run(Module &M, ModuleAnalysisManager &);
+  TargetLibraryInfo run(Function &F, FunctionAnalysisManager &);
+
+private:
+  friend AnalysisInfoMixin<TargetLibraryAnalysis>;
+  static AnalysisKey Key;
+
+  Optional<TargetLibraryInfoImpl> PresetInfoImpl;
+
+  StringMap<std::unique_ptr<TargetLibraryInfoImpl>> Impls;
+
+  TargetLibraryInfoImpl &lookupInfoImpl(const Triple &T);
+};
+
+class TargetLibraryInfoWrapperPass : public ImmutablePass {
+  TargetLibraryInfoImpl TLIImpl;
+  TargetLibraryInfo TLI;
+
+  virtual void anchor();
+
+public:
+  static char ID;
+  TargetLibraryInfoWrapperPass();
+  explicit TargetLibraryInfoWrapperPass(const Triple &T);
+  explicit TargetLibraryInfoWrapperPass(const TargetLibraryInfoImpl &TLI);
+
+  TargetLibraryInfo &getTLI() { return TLI; }
+  const TargetLibraryInfo &getTLI() const { return TLI; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h
new file mode 100644
index 0000000..9e9c661
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h
@@ -0,0 +1,1662 @@
+//===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This pass exposes codegen information to IR-level passes. Every
+/// transformation that uses codegen information is broken into three parts:
+/// 1. The IR-level analysis pass.
+/// 2. The IR-level transformation interface which provides the needed
+///    information.
+/// 3. Codegen-level implementation which uses target-specific hooks.
+///
+/// This file defines #2, which is the interface that IR-level transformations
+/// use for querying the codegen.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
+#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/DataTypes.h"
+#include <functional>
+
+namespace llvm {
+
+namespace Intrinsic {
+enum ID : unsigned;
+}
+
+class Function;
+class GlobalValue;
+class IntrinsicInst;
+class LoadInst;
+class Loop;
+class SCEV;
+class ScalarEvolution;
+class StoreInst;
+class SwitchInst;
+class Type;
+class User;
+class Value;
+
+/// \brief Information about a load/store intrinsic defined by the target.
+struct MemIntrinsicInfo {
+  /// This is the pointer that the intrinsic is loading from or storing to.
+  /// If this is non-null, then analysis/optimization passes can assume that
+  /// this intrinsic is functionally equivalent to a load/store from this
+  /// pointer.
+  Value *PtrVal = nullptr;
+
+  // Ordering for atomic operations.
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+
+  // Same Id is set by the target for corresponding load/store intrinsics.
+  unsigned short MatchingId = 0;
+
+  bool ReadMem = false;
+  bool WriteMem = false;
+  bool IsVolatile = false;
+
+  bool isUnordered() const {
+    return (Ordering == AtomicOrdering::NotAtomic ||
+            Ordering == AtomicOrdering::Unordered) && !IsVolatile;
+  }
+};
+
+/// \brief This pass provides access to the codegen interfaces that are needed
+/// for IR-level transformations.
+class TargetTransformInfo {
+public:
+  /// \brief Construct a TTI object using a type implementing the \c Concept
+  /// API below.
+  ///
+  /// This is used by targets to construct a TTI wrapping their target-specific
+  /// implementaion that encodes appropriate costs for their target.
+  template <typename T> TargetTransformInfo(T Impl);
+
+  /// \brief Construct a baseline TTI object using a minimal implementation of
+  /// the \c Concept API below.
+  ///
+  /// The TTI implementation will reflect the information in the DataLayout
+  /// provided if non-null.
+  explicit TargetTransformInfo(const DataLayout &DL);
+
+  // Provide move semantics.
+  TargetTransformInfo(TargetTransformInfo &&Arg);
+  TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
+
+  // We need to define the destructor out-of-line to define our sub-classes
+  // out-of-line.
+  ~TargetTransformInfo();
+
+  /// \brief Handle the invalidation of this information.
+  ///
+  /// When used as a result of \c TargetIRAnalysis this method will be called
+  /// when the function this was computed for changes. When it returns false,
+  /// the information is preserved across those changes.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    // FIXME: We should probably in some way ensure that the subtarget
+    // information for a function hasn't changed.
+    return false;
+  }
+
+  /// \name Generic Target Information
+  /// @{
+
+  /// \brief The kind of cost model.
+  ///
+  /// There are several different cost models that can be customized by the
+  /// target. The normalization of each cost model may be target specific.
+  enum TargetCostKind {
+    TCK_RecipThroughput, ///< Reciprocal throughput.
+    TCK_Latency,         ///< The latency of instruction.
+    TCK_CodeSize         ///< Instruction code size.
+  };
+
+  /// \brief Query the cost of a specified instruction.
+  ///
+  /// Clients should use this interface to query the cost of an existing
+  /// instruction. The instruction must have a valid parent (basic block).
+  ///
+  /// Note, this method does not cache the cost calculation and it
+  /// can be expensive in some cases.
+  int getInstructionCost(const Instruction *I, enum TargetCostKind kind) const {
+    switch (kind){
+    case TCK_RecipThroughput:
+      return getInstructionThroughput(I);
+
+    case TCK_Latency:
+      return getInstructionLatency(I);
+
+    case TCK_CodeSize:
+      return getUserCost(I);
+    }
+    llvm_unreachable("Unknown instruction cost kind");
+  }
+
+  /// \brief Underlying constants for 'cost' values in this interface.
+  ///
+  /// Many APIs in this interface return a cost. This enum defines the
+  /// fundamental values that should be used to interpret (and produce) those
+  /// costs. The costs are returned as an int rather than a member of this
+  /// enumeration because it is expected that the cost of one IR instruction
+  /// may have a multiplicative factor to it or otherwise won't fit directly
+  /// into the enum. Moreover, it is common to sum or average costs which works
+  /// better as simple integral values. Thus this enum only provides constants.
+  /// Also note that the returned costs are signed integers to make it natural
+  /// to add, subtract, and test with zero (a common boundary condition). It is
+  /// not expected that 2^32 is a realistic cost to be modeling at any point.
+  ///
+  /// Note that these costs should usually reflect the intersection of code-size
+  /// cost and execution cost. A free instruction is typically one that folds
+  /// into another instruction. For example, reg-to-reg moves can often be
+  /// skipped by renaming the registers in the CPU, but they still are encoded
+  /// and thus wouldn't be considered 'free' here.
+  enum TargetCostConstants {
+    TCC_Free = 0,     ///< Expected to fold away in lowering.
+    TCC_Basic = 1,    ///< The cost of a typical 'add' instruction.
+    TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
+  };
+
+  /// \brief Estimate the cost of a specific operation when lowered.
+  ///
+  /// Note that this is designed to work on an arbitrary synthetic opcode, and
+  /// thus work for hypothetical queries before an instruction has even been
+  /// formed. However, this does *not* work for GEPs, and must not be called
+  /// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
+  /// analyzing a GEP's cost required more information.
+  ///
+  /// Typically only the result type is required, and the operand type can be
+  /// omitted. However, if the opcode is one of the cast instructions, the
+  /// operand type is required.
+  ///
+  /// The returned cost is defined in terms of \c TargetCostConstants, see its
+  /// comments for a detailed explanation of the cost values.
+  int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
+
+  /// \brief Estimate the cost of a GEP operation when lowered.
+  ///
+  /// The contract for this function is the same as \c getOperationCost except
+  /// that it supports an interface that provides extra information specific to
+  /// the GEP operation.
+  int getGEPCost(Type *PointeeType, const Value *Ptr,
+                 ArrayRef<const Value *> Operands) const;
+
+  /// \brief Estimate the cost of a EXT operation when lowered.
+  ///
+  /// The contract for this function is the same as \c getOperationCost except
+  /// that it supports an interface that provides extra information specific to
+  /// the EXT operation.
+  int getExtCost(const Instruction *I, const Value *Src) const;
+
+  /// \brief Estimate the cost of a function call when lowered.
+  ///
+  /// The contract for this is the same as \c getOperationCost except that it
+  /// supports an interface that provides extra information specific to call
+  /// instructions.
+  ///
+  /// This is the most basic query for estimating call cost: it only knows the
+  /// function type and (potentially) the number of arguments at the call site.
+  /// The latter is only interesting for varargs function types.
+  int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
+
+  /// \brief Estimate the cost of calling a specific function when lowered.
+  ///
+  /// This overload adds the ability to reason about the particular function
+  /// being called in the event it is a library call with special lowering.
+  int getCallCost(const Function *F, int NumArgs = -1) const;
+
+  /// \brief Estimate the cost of calling a specific function when lowered.
+  ///
+  /// This overload allows specifying a set of candidate argument values.
+  int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
+
+  /// \returns A value by which our inlining threshold should be multiplied.
+  /// This is primarily used to bump up the inlining threshold wholesale on
+  /// targets where calls are unusually expensive.
+  ///
+  /// TODO: This is a rather blunt instrument.  Perhaps altering the costs of
+  /// individual classes of instructions would be better.
+  unsigned getInliningThresholdMultiplier() const;
+
+  /// \brief Estimate the cost of an intrinsic when lowered.
+  ///
+  /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
+  int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                       ArrayRef<Type *> ParamTys) const;
+
+  /// \brief Estimate the cost of an intrinsic when lowered.
+  ///
+  /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
+  int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                       ArrayRef<const Value *> Arguments) const;
+
+  /// \return The estimated number of case clusters when lowering \p 'SI'.
+  /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
+  /// table.
+  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+                                            unsigned &JTSize) const;
+
+  /// \brief Estimate the cost of a given IR user when lowered.
+  ///
+  /// This can estimate the cost of either a ConstantExpr or Instruction when
+  /// lowered. It has two primary advantages over the \c getOperationCost and
+  /// \c getGEPCost above, and one significant disadvantage: it can only be
+  /// used when the IR construct has already been formed.
+  ///
+  /// The advantages are that it can inspect the SSA use graph to reason more
+  /// accurately about the cost. For example, all-constant-GEPs can often be
+  /// folded into a load or other instruction, but if they are used in some
+  /// other context they may not be folded. This routine can distinguish such
+  /// cases.
+  ///
+  /// \p Operands is a list of operands which can be a result of transformations
+  /// of the current operands. The number of the operands on the list must equal
+  /// to the number of the current operands the IR user has. Their order on the
+  /// list must be the same as the order of the current operands the IR user
+  /// has.
+  ///
+  /// The returned cost is defined in terms of \c TargetCostConstants, see its
+  /// comments for a detailed explanation of the cost values.
+  int getUserCost(const User *U, ArrayRef<const Value *> Operands) const;
+
+  /// \brief This is a helper function which calls the two-argument getUserCost
+  /// with \p Operands which are the current operands U has.
+  int getUserCost(const User *U) const {
+    SmallVector<const Value *, 4> Operands(U->value_op_begin(),
+                                           U->value_op_end());
+    return getUserCost(U, Operands);
+  }
+
+  /// \brief Return true if branch divergence exists.
+  ///
+  /// Branch divergence has a significantly negative impact on GPU performance
+  /// when threads in the same wavefront take different paths due to conditional
+  /// branches.
+  bool hasBranchDivergence() const;
+
+  /// \brief Returns whether V is a source of divergence.
+  ///
+  /// This function provides the target-dependent information for
+  /// the target-independent DivergenceAnalysis. DivergenceAnalysis first
+  /// builds the dependency graph, and then runs the reachability algorithm
+  /// starting with the sources of divergence.
+  bool isSourceOfDivergence(const Value *V) const;
+
+  // \brief Returns true for the target specific
+  // set of operations which produce uniform result
+  // even taking non-unform arguments
+  bool isAlwaysUniform(const Value *V) const;
+
+  /// Returns the address space ID for a target's 'flat' address space. Note
+  /// this is not necessarily the same as addrspace(0), which LLVM sometimes
+  /// refers to as the generic address space. The flat address space is a
+  /// generic address space that can be used access multiple segments of memory
+  /// with different address spaces. Access of a memory location through a
+  /// pointer with this address space is expected to be legal but slower
+  /// compared to the same memory location accessed through a pointer with a
+  /// different address space.
+  //
+  /// This is for targets with different pointer representations which can
+  /// be converted with the addrspacecast instruction. If a pointer is converted
+  /// to this address space, optimizations should attempt to replace the access
+  /// with the source address space.
+  ///
+  /// \returns ~0u if the target does not have such a flat address space to
+  /// optimize away.
+  unsigned getFlatAddressSpace() const;
+
+  /// \brief Test whether calls to a function lower to actual program function
+  /// calls.
+  ///
+  /// The idea is to test whether the program is likely to require a 'call'
+  /// instruction or equivalent in order to call the given function.
+  ///
+  /// FIXME: It's not clear that this is a good or useful query API. Client's
+  /// should probably move to simpler cost metrics using the above.
+  /// Alternatively, we could split the cost interface into distinct code-size
+  /// and execution-speed costs. This would allow modelling the core of this
+  /// query more accurately as a call is a single small instruction, but
+  /// incurs significant execution cost.
+  bool isLoweredToCall(const Function *F) const;
+
+  struct LSRCost {
+    /// TODO: Some of these could be merged. Also, a lexical ordering
+    /// isn't always optimal.
+    unsigned Insns;
+    unsigned NumRegs;
+    unsigned AddRecCost;
+    unsigned NumIVMuls;
+    unsigned NumBaseAdds;
+    unsigned ImmCost;
+    unsigned SetupCost;
+    unsigned ScaleCost;
+  };
+
+  /// Parameters that control the generic loop unrolling transformation.
+  struct UnrollingPreferences {
+    /// The cost threshold for the unrolled loop. Should be relative to the
+    /// getUserCost values returned by this API, and the expectation is that
+    /// the unrolled loop's instructions when run through that interface should
+    /// not exceed this cost. However, this is only an estimate. Also, specific
+    /// loops may be unrolled even with a cost above this threshold if deemed
+    /// profitable. Set this to UINT_MAX to disable the loop body cost
+    /// restriction.
+    unsigned Threshold;
+    /// If complete unrolling will reduce the cost of the loop, we will boost
+    /// the Threshold by a certain percent to allow more aggressive complete
+    /// unrolling. This value provides the maximum boost percentage that we
+    /// can apply to Threshold (The value should be no less than 100).
+    /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
+    ///                                    MaxPercentThresholdBoost / 100)
+    /// E.g. if complete unrolling reduces the loop execution time by 50%
+    /// then we boost the threshold by the factor of 2x. If unrolling is not
+    /// expected to reduce the running time, then we do not increase the
+    /// threshold.
+    unsigned MaxPercentThresholdBoost;
+    /// The cost threshold for the unrolled loop when optimizing for size (set
+    /// to UINT_MAX to disable).
+    unsigned OptSizeThreshold;
+    /// The cost threshold for the unrolled loop, like Threshold, but used
+    /// for partial/runtime unrolling (set to UINT_MAX to disable).
+    unsigned PartialThreshold;
+    /// The cost threshold for the unrolled loop when optimizing for size, like
+    /// OptSizeThreshold, but used for partial/runtime unrolling (set to
+    /// UINT_MAX to disable).
+    unsigned PartialOptSizeThreshold;
+    /// A forced unrolling factor (the number of concatenated bodies of the
+    /// original loop in the unrolled loop body). When set to 0, the unrolling
+    /// transformation will select an unrolling factor based on the current cost
+    /// threshold and other factors.
+    unsigned Count;
+    /// A forced peeling factor (the number of bodied of the original loop
+    /// that should be peeled off before the loop body). When set to 0, the
+    /// unrolling transformation will select a peeling factor based on profile
+    /// information and other factors.
+    unsigned PeelCount;
+    /// Default unroll count for loops with run-time trip count.
+    unsigned DefaultUnrollRuntimeCount;
+    // Set the maximum unrolling factor. The unrolling factor may be selected
+    // using the appropriate cost threshold, but may not exceed this number
+    // (set to UINT_MAX to disable). This does not apply in cases where the
+    // loop is being fully unrolled.
+    unsigned MaxCount;
+    /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
+    /// applies even if full unrolling is selected. This allows a target to fall
+    /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
+    unsigned FullUnrollMaxCount;
+    // Represents number of instructions optimized when "back edge"
+    // becomes "fall through" in unrolled loop.
+    // For now we count a conditional branch on a backedge and a comparison
+    // feeding it.
+    unsigned BEInsns;
+    /// Allow partial unrolling (unrolling of loops to expand the size of the
+    /// loop body, not only to eliminate small constant-trip-count loops).
+    bool Partial;
+    /// Allow runtime unrolling (unrolling of loops to expand the size of the
+    /// loop body even when the number of loop iterations is not known at
+    /// compile time).
+    bool Runtime;
+    /// Allow generation of a loop remainder (extra iterations after unroll).
+    bool AllowRemainder;
+    /// Allow emitting expensive instructions (such as divisions) when computing
+    /// the trip count of a loop for runtime unrolling.
+    bool AllowExpensiveTripCount;
+    /// Apply loop unroll on any kind of loop
+    /// (mainly to loops that fail runtime unrolling).
+    bool Force;
+    /// Allow using trip count upper bound to unroll loops.
+    bool UpperBound;
+    /// Allow peeling off loop iterations for loops with low dynamic tripcount.
+    bool AllowPeeling;
+    /// Allow unrolling of all the iterations of the runtime loop remainder.
+    bool UnrollRemainder;
+  };
+
+  /// \brief Get target-customized preferences for the generic loop unrolling
+  /// transformation. The caller will initialize UP with the current
+  /// target-independent defaults.
+  void getUnrollingPreferences(Loop *L, ScalarEvolution &,
+                               UnrollingPreferences &UP) const;
+
+  /// @}
+
+  /// \name Scalar Target Information
+  /// @{
+
+  /// \brief Flags indicating the kind of support for population count.
+  ///
+  /// Compared to the SW implementation, HW support is supposed to
+  /// significantly boost the performance when the population is dense, and it
+  /// may or may not degrade performance if the population is sparse. A HW
+  /// support is considered as "Fast" if it can outperform, or is on a par
+  /// with, SW implementation when the population is sparse; otherwise, it is
+  /// considered as "Slow".
+  enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
+
+  /// \brief Return true if the specified immediate is legal add immediate, that
+  /// is the target has add instructions which can add a register with the
+  /// immediate without having to materialize the immediate into a register.
+  bool isLegalAddImmediate(int64_t Imm) const;
+
+  /// \brief Return true if the specified immediate is legal icmp immediate,
+  /// that is the target has icmp instructions which can compare a register
+  /// against the immediate without having to materialize the immediate into a
+  /// register.
+  bool isLegalICmpImmediate(int64_t Imm) const;
+
+  /// \brief Return true if the addressing mode represented by AM is legal for
+  /// this target, for a load/store of the specified type.
+  /// The type may be VoidTy, in which case only return true if the addressing
+  /// mode is legal for a load/store of any legal type.
+  /// If target returns true in LSRWithInstrQueries(), I may be valid.
+  /// TODO: Handle pre/postinc as well.
+  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                             bool HasBaseReg, int64_t Scale,
+                             unsigned AddrSpace = 0,
+                             Instruction *I = nullptr) const;
+
+  /// \brief Return true if LSR cost of C1 is lower than C1.
+  bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
+                     TargetTransformInfo::LSRCost &C2) const;
+
+  /// Return true if the target can fuse a compare and branch.
+  /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
+  /// calculation for the instructions in a loop.
+  bool canMacroFuseCmp() const;
+
+  /// \return True is LSR should make efforts to create/preserve post-inc
+  /// addressing mode expressions.
+  bool shouldFavorPostInc() const;
+
+  /// \brief Return true if the target supports masked load/store
+  /// AVX2 and AVX-512 targets allow masks for consecutive load and store
+  bool isLegalMaskedStore(Type *DataType) const;
+  bool isLegalMaskedLoad(Type *DataType) const;
+
+  /// \brief Return true if the target supports masked gather/scatter
+  /// AVX-512 fully supports gather and scatter for vectors with 32 and 64
+  /// bits scalar type.
+  bool isLegalMaskedScatter(Type *DataType) const;
+  bool isLegalMaskedGather(Type *DataType) const;
+
+  /// Return true if the target has a unified operation to calculate division
+  /// and remainder. If so, the additional implicit multiplication and
+  /// subtraction required to calculate a remainder from division are free. This
+  /// can enable more aggressive transformations for division and remainder than
+  /// would typically be allowed using throughput or size cost models.
+  bool hasDivRemOp(Type *DataType, bool IsSigned) const;
+
+  /// Return true if the given instruction (assumed to be a memory access
+  /// instruction) has a volatile variant. If that's the case then we can avoid
+  /// addrspacecast to generic AS for volatile loads/stores. Default
+  /// implementation returns false, which prevents address space inference for
+  /// volatile loads/stores.
+  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
+
+  /// Return true if target doesn't mind addresses in vectors.
+  bool prefersVectorizedAddressing() const;
+
+  /// \brief Return the cost of the scaling factor used in the addressing
+  /// mode represented by AM for this target, for a load/store
+  /// of the specified type.
+  /// If the AM is supported, the return value must be >= 0.
+  /// If the AM is not supported, it returns a negative value.
+  /// TODO: Handle pre/postinc as well.
+  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                           bool HasBaseReg, int64_t Scale,
+                           unsigned AddrSpace = 0) const;
+
+  /// \brief Return true if the loop strength reduce pass should make
+  /// Instruction* based TTI queries to isLegalAddressingMode(). This is
+  /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
+  /// immediate offset and no index register.
+  bool LSRWithInstrQueries() const;
+
+  /// \brief Return true if it's free to truncate a value of type Ty1 to type
+  /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
+  /// by referencing its sub-register AX.
+  bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+
+  /// \brief Return true if it is profitable to hoist instruction in the
+  /// then/else to before if.
+  bool isProfitableToHoist(Instruction *I) const;
+
+  bool useAA() const;
+
+  /// \brief Return true if this type is legal.
+  bool isTypeLegal(Type *Ty) const;
+
+  /// \brief Returns the target's jmp_buf alignment in bytes.
+  unsigned getJumpBufAlignment() const;
+
+  /// \brief Returns the target's jmp_buf size in bytes.
+  unsigned getJumpBufSize() const;
+
+  /// \brief Return true if switches should be turned into lookup tables for the
+  /// target.
+  bool shouldBuildLookupTables() const;
+
+  /// \brief Return true if switches should be turned into lookup tables
+  /// containing this constant value for the target.
+  bool shouldBuildLookupTablesForConstant(Constant *C) const;
+
+  /// \brief Return true if the input function which is cold at all call sites,
+  ///  should use coldcc calling convention.
+  bool useColdCCForColdCall(Function &F) const;
+
+  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+
+  unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
+                                            unsigned VF) const;
+
+  /// If target has efficient vector element load/store instructions, it can
+  /// return true here so that insertion/extraction costs are not added to
+  /// the scalarization cost of a load/store.
+  bool supportsEfficientVectorElementLoadStore() const;
+
+  /// \brief Don't restrict interleaved unrolling to small loops.
+  bool enableAggressiveInterleaving(bool LoopHasReductions) const;
+
+  /// \brief If not nullptr, enable inline expansion of memcmp. IsZeroCmp is
+  /// true if this is the expansion of memcmp(p1, p2, s) == 0.
+  struct MemCmpExpansionOptions {
+    // The list of available load sizes (in bytes), sorted in decreasing order.
+    SmallVector<unsigned, 8> LoadSizes;
+  };
+  const MemCmpExpansionOptions *enableMemCmpExpansion(bool IsZeroCmp) const;
+
+  /// \brief Enable matching of interleaved access groups.
+  bool enableInterleavedAccessVectorization() const;
+
+  /// \brief Indicate that it is potentially unsafe to automatically vectorize
+  /// floating-point operations because the semantics of vector and scalar
+  /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
+  /// does not support IEEE-754 denormal numbers, while depending on the
+  /// platform, scalar floating-point math does.
+  /// This applies to floating-point math operations and calls, not memory
+  /// operations, shuffles, or casts.
+  bool isFPVectorizationPotentiallyUnsafe() const;
+
+  /// \brief Determine if the target supports unaligned memory accesses.
+  bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                      unsigned BitWidth, unsigned AddressSpace = 0,
+                                      unsigned Alignment = 1,
+                                      bool *Fast = nullptr) const;
+
+  /// \brief Return hardware support for population count.
+  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
+
+  /// \brief Return true if the hardware has a fast square-root instruction.
+  bool haveFastSqrt(Type *Ty) const;
+
+  /// Return true if it is faster to check if a floating-point value is NaN
+  /// (or not-NaN) versus a comparison against a constant FP zero value.
+  /// Targets should override this if materializing a 0.0 for comparison is
+  /// generally as cheap as checking for ordered/unordered.
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
+
+  /// \brief Return the expected cost of supporting the floating point operation
+  /// of the specified type.
+  int getFPOpCost(Type *Ty) const;
+
+  /// \brief Return the expected cost of materializing for the given integer
+  /// immediate of the specified type.
+  int getIntImmCost(const APInt &Imm, Type *Ty) const;
+
+  /// \brief Return the expected cost of materialization for the given integer
+  /// immediate of the specified type for a given instruction. The cost can be
+  /// zero if the immediate can be folded into the specified instruction.
+  int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+                    Type *Ty) const;
+  int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+                    Type *Ty) const;
+
+  /// \brief Return the expected cost for the given integer when optimising
+  /// for size. This is different than the other integer immediate cost
+  /// functions in that it is subtarget agnostic. This is useful when you e.g.
+  /// target one ISA such as Aarch32 but smaller encodings could be possible
+  /// with another such as Thumb. This return value is used as a penalty when
+  /// the total costs for a constant is calculated (the bigger the cost, the
+  /// more beneficial constant hoisting is).
+  int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+                            Type *Ty) const;
+  /// @}
+
+  /// \name Vector Target Information
+  /// @{
+
+  /// \brief The various kinds of shuffle patterns for vector queries.
+  enum ShuffleKind {
+    SK_Broadcast,       ///< Broadcast element 0 to all other elements.
+    SK_Reverse,         ///< Reverse the order of the vector.
+    SK_Alternate,       ///< Choose alternate elements from vector.
+    SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
+    SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset.
+    SK_PermuteTwoSrc,   ///< Merge elements from two source vectors into one
+                        ///< with any shuffle mask.
+    SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any
+                        ///< shuffle mask.
+  };
+
+  /// \brief Additional information about an operand's possible values.
+  enum OperandValueKind {
+    OK_AnyValue,               // Operand can have any value.
+    OK_UniformValue,           // Operand is uniform (splat of a value).
+    OK_UniformConstantValue,   // Operand is uniform constant.
+    OK_NonUniformConstantValue // Operand is a non uniform constant value.
+  };
+
+  /// \brief Additional properties of an operand's values.
+  enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
+
+  /// \return The number of scalar or vector registers that the target has.
+  /// If 'Vectors' is true, it returns the number of vector registers. If it is
+  /// set to false, it returns the number of scalar registers.
+  unsigned getNumberOfRegisters(bool Vector) const;
+
+  /// \return The width of the largest scalar or vector register type.
+  unsigned getRegisterBitWidth(bool Vector) const;
+
+  /// \return The width of the smallest vector register type.
+  unsigned getMinVectorRegisterBitWidth() const;
+
+  /// \return True if the vectorization factor should be chosen to
+  /// make the vector of the smallest element type match the size of a
+  /// vector register. For wider element types, this could result in
+  /// creating vectors that span multiple vector registers.
+  /// If false, the vectorization factor will be chosen based on the
+  /// size of the widest element type.
+  bool shouldMaximizeVectorBandwidth(bool OptSize) const;
+
+  /// \return True if it should be considered for address type promotion.
+  /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
+  /// profitable without finding other extensions fed by the same input.
+  bool shouldConsiderAddressTypePromotion(
+      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
+
+  /// \return The size of a cache line in bytes.
+  unsigned getCacheLineSize() const;
+
+  /// The possible cache levels
+  enum class CacheLevel {
+    L1D,   // The L1 data cache
+    L2D,   // The L2 data cache
+
+    // We currently do not model L3 caches, as their sizes differ widely between
+    // microarchitectures. Also, we currently do not have a use for L3 cache
+    // size modeling yet.
+  };
+
+  /// \return The size of the cache level in bytes, if available.
+  llvm::Optional<unsigned> getCacheSize(CacheLevel Level) const;
+
+  /// \return The associativity of the cache level, if available.
+  llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
+
+  /// \return How much before a load we should place the prefetch instruction.
+  /// This is currently measured in number of instructions.
+  unsigned getPrefetchDistance() const;
+
+  /// \return Some HW prefetchers can handle accesses up to a certain constant
+  /// stride.  This is the minimum stride in bytes where it makes sense to start
+  /// adding SW prefetches.  The default is 1, i.e. prefetch with any stride.
+  unsigned getMinPrefetchStride() const;
+
+  /// \return The maximum number of iterations to prefetch ahead.  If the
+  /// required number of iterations is more than this number, no prefetching is
+  /// performed.
+  unsigned getMaxPrefetchIterationsAhead() const;
+
+  /// \return The maximum interleave factor that any transform should try to
+  /// perform for this target. This number depends on the level of parallelism
+  /// and the number of execution units in the CPU.
+  unsigned getMaxInterleaveFactor(unsigned VF) const;
+
+  /// This is an approximation of reciprocal throughput of a math/logic op.
+  /// A higher cost indicates less expected throughput.
+  /// From Agner Fog's guides, reciprocal throughput is "the average number of
+  /// clock cycles per instruction when the instructions are not part of a
+  /// limiting dependency chain."
+  /// Therefore, costs should be scaled to account for multiple execution units
+  /// on the target that can process this type of instruction. For example, if
+  /// there are 5 scalar integer units and 2 vector integer units that can
+  /// calculate an 'add' in a single cycle, this model should indicate that the
+  /// cost of the vector add instruction is 2.5 times the cost of the scalar
+  /// add instruction.
+  /// \p Args is an optional argument which holds the instruction operands
+  /// values so the TTI can analyze those values searching for special
+  /// cases or optimizations based on those values.
+  int getArithmeticInstrCost(
+      unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
+      OperandValueKind Opd2Info = OK_AnyValue,
+      OperandValueProperties Opd1PropInfo = OP_None,
+      OperandValueProperties Opd2PropInfo = OP_None,
+      ArrayRef<const Value *> Args = ArrayRef<const Value *>()) const;
+
+  /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
+  /// The index and subtype parameters are used by the subvector insertion and
+  /// extraction shuffle kinds.
+  int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
+                     Type *SubTp = nullptr) const;
+
+  /// \return The expected cost of cast instructions, such as bitcast, trunc,
+  /// zext, etc. If there is an existing instruction that holds Opcode, it
+  /// may be passed in the 'I' parameter.
+  int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+                       const Instruction *I = nullptr) const;
+
+  /// \return The expected cost of a sign- or zero-extended vector extract. Use
+  /// -1 to indicate that there is no information about the index value.
+  int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
+                               unsigned Index = -1) const;
+
+  /// \return The expected cost of control-flow related instructions such as
+  /// Phi, Ret, Br.
+  int getCFInstrCost(unsigned Opcode) const;
+
+  /// \returns The expected cost of compare and select instructions. If there
+  /// is an existing instruction that holds Opcode, it may be passed in the
+  /// 'I' parameter.
+  int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+                 Type *CondTy = nullptr, const Instruction *I = nullptr) const;
+
+  /// \return The expected cost of vector Insert and Extract.
+  /// Use -1 to indicate that there is no information on the index value.
+  int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
+
+  /// \return The cost of Load and Store instructions.
+  int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                      unsigned AddressSpace, const Instruction *I = nullptr) const;
+
+  /// \return The cost of masked Load and Store instructions.
+  int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                            unsigned AddressSpace) const;
+
+  /// \return The cost of Gather or Scatter operation
+  /// \p Opcode - is a type of memory access Load or Store
+  /// \p DataTy - a vector type of the data to be loaded or stored
+  /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
+  /// \p VariableMask - true when the memory access is predicated with a mask
+  ///                   that is not a compile-time constant
+  /// \p Alignment - alignment of single element
+  int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
+                             bool VariableMask, unsigned Alignment) const;
+
+  /// \return The cost of the interleaved memory operation.
+  /// \p Opcode is the memory operation code
+  /// \p VecTy is the vector type of the interleaved access.
+  /// \p Factor is the interleave factor
+  /// \p Indices is the indices for interleaved load members (as interleaved
+  ///    load allows gaps)
+  /// \p Alignment is the alignment of the memory operation
+  /// \p AddressSpace is address space of the pointer.
+  int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
+                                 ArrayRef<unsigned> Indices, unsigned Alignment,
+                                 unsigned AddressSpace) const;
+
+  /// \brief Calculate the cost of performing a vector reduction.
+  ///
+  /// This is the cost of reducing the vector value of type \p Ty to a scalar
+  /// value using the operation denoted by \p Opcode. The form of the reduction
+  /// can either be a pairwise reduction or a reduction that splits the vector
+  /// at every reduction level.
+  ///
+  /// Pairwise:
+  ///  (v0, v1, v2, v3)
+  ///  ((v0+v1), (v2+v3), undef, undef)
+  /// Split:
+  ///  (v0, v1, v2, v3)
+  ///  ((v0+v2), (v1+v3), undef, undef)
+  int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
+                                 bool IsPairwiseForm) const;
+  int getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwiseForm,
+                             bool IsUnsigned) const;
+
+  /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
+  /// Three cases are handled: 1. scalar instruction 2. vector instruction
+  /// 3. scalar instruction which is to be vectorized with VF.
+  int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                            ArrayRef<Value *> Args, FastMathFlags FMF,
+                            unsigned VF = 1) const;
+
+  /// \returns The cost of Intrinsic instructions. Types analysis only.
+  /// If ScalarizationCostPassed is UINT_MAX, the cost of scalarizing the
+  /// arguments and the return value will be computed based on types.
+  int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                            ArrayRef<Type *> Tys, FastMathFlags FMF,
+                            unsigned ScalarizationCostPassed = UINT_MAX) const;
+
+  /// \returns The cost of Call instructions.
+  int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
+
+  /// \returns The number of pieces into which the provided type must be
+  /// split during legalization. Zero is returned when the answer is unknown.
+  unsigned getNumberOfParts(Type *Tp) const;
+
+  /// \returns The cost of the address computation. For most targets this can be
+  /// merged into the instruction indexing mode. Some targets might want to
+  /// distinguish between address computation for memory operations on vector
+  /// types and scalar types. Such targets should override this function.
+  /// The 'SE' parameter holds pointer for the scalar evolution object which
+  /// is used in order to get the Ptr step value in case of constant stride.
+  /// The 'Ptr' parameter holds SCEV of the access pointer.
+  int getAddressComputationCost(Type *Ty, ScalarEvolution *SE = nullptr,
+                                const SCEV *Ptr = nullptr) const;
+
+  /// \returns The cost, if any, of keeping values of the given types alive
+  /// over a callsite.
+  ///
+  /// Some types may require the use of register classes that do not have
+  /// any callee-saved registers, so would require a spill and fill.
+  unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
+
+  /// \returns True if the intrinsic is a supported memory intrinsic.  Info
+  /// will contain additional information - whether the intrinsic may write
+  /// or read to memory, volatility and the pointer.  Info is undefined
+  /// if false is returned.
+  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
+
+  /// \returns The maximum element size, in bytes, for an element
+  /// unordered-atomic memory intrinsic.
+  unsigned getAtomicMemIntrinsicMaxElementSize() const;
+
+  /// \returns A value which is the result of the given memory intrinsic.  New
+  /// instructions may be created to extract the result from the given intrinsic
+  /// memory operation.  Returns nullptr if the target cannot create a result
+  /// from the given intrinsic.
+  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+                                           Type *ExpectedType) const;
+
+  /// \returns The type to use in a loop expansion of a memcpy call.
+  Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+                                  unsigned SrcAlign, unsigned DestAlign) const;
+
+  /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
+  /// \param RemainingBytes The number of bytes to copy.
+  ///
+  /// Calculates the operand types to use when copying \p RemainingBytes of
+  /// memory, where source and destination alignments are \p SrcAlign and
+  /// \p DestAlign respectively.
+  void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+                                         LLVMContext &Context,
+                                         unsigned RemainingBytes,
+                                         unsigned SrcAlign,
+                                         unsigned DestAlign) const;
+
+  /// \returns True if the two functions have compatible attributes for inlining
+  /// purposes.
+  bool areInlineCompatible(const Function *Caller,
+                           const Function *Callee) const;
+
+  /// \brief The type of load/store indexing.
+  enum MemIndexedMode {
+    MIM_Unindexed,  ///< No indexing.
+    MIM_PreInc,     ///< Pre-incrementing.
+    MIM_PreDec,     ///< Pre-decrementing.
+    MIM_PostInc,    ///< Post-incrementing.
+    MIM_PostDec     ///< Post-decrementing.
+  };
+
+  /// \returns True if the specified indexed load for the given type is legal.
+  bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
+
+  /// \returns True if the specified indexed store for the given type is legal.
+  bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
+
+  /// \returns The bitwidth of the largest vector type that should be used to
+  /// load/store in the given address space.
+  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
+
+  /// \returns True if the load instruction is legal to vectorize.
+  bool isLegalToVectorizeLoad(LoadInst *LI) const;
+
+  /// \returns True if the store instruction is legal to vectorize.
+  bool isLegalToVectorizeStore(StoreInst *SI) const;
+
+  /// \returns True if it is legal to vectorize the given load chain.
+  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+                                   unsigned Alignment,
+                                   unsigned AddrSpace) const;
+
+  /// \returns True if it is legal to vectorize the given store chain.
+  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+                                    unsigned Alignment,
+                                    unsigned AddrSpace) const;
+
+  /// \returns The new vector factor value if the target doesn't support \p
+  /// SizeInBytes loads or has a better vector factor.
+  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+                               unsigned ChainSizeInBytes,
+                               VectorType *VecTy) const;
+
+  /// \returns The new vector factor value if the target doesn't support \p
+  /// SizeInBytes stores or has a better vector factor.
+  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+                                unsigned ChainSizeInBytes,
+                                VectorType *VecTy) const;
+
+  /// Flags describing the kind of vector reduction.
+  struct ReductionFlags {
+    ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
+    bool IsMaxOp;  ///< If the op a min/max kind, true if it's a max operation.
+    bool IsSigned; ///< Whether the operation is a signed int reduction.
+    bool NoNaN;    ///< If op is an fp min/max, whether NaNs may be present.
+  };
+
+  /// \returns True if the target wants to handle the given reduction idiom in
+  /// the intrinsics form instead of the shuffle form.
+  bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
+                             ReductionFlags Flags) const;
+
+  /// \returns True if the target wants to expand the given reduction intrinsic
+  /// into a shuffle sequence.
+  bool shouldExpandReduction(const IntrinsicInst *II) const;
+  /// @}
+
+private:
+  /// \brief Estimate the latency of specified instruction.
+  /// Returns 1 as the default value.
+  int getInstructionLatency(const Instruction *I) const;
+
+  /// \brief Returns the expected throughput cost of the instruction.
+  /// Returns -1 if the cost is unknown.
+  int getInstructionThroughput(const Instruction *I) const;
+
+  /// \brief The abstract base class used to type erase specific TTI
+  /// implementations.
+  class Concept;
+
+  /// \brief The template model for the base class which wraps a concrete
+  /// implementation in a type erased interface.
+  template <typename T> class Model;
+
+  std::unique_ptr<Concept> TTIImpl;
+};
+
+class TargetTransformInfo::Concept {
+public:
+  virtual ~Concept() = 0;
+  virtual const DataLayout &getDataLayout() const = 0;
+  virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
+  virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
+                         ArrayRef<const Value *> Operands) = 0;
+  virtual int getExtCost(const Instruction *I, const Value *Src) = 0;
+  virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
+  virtual int getCallCost(const Function *F, int NumArgs) = 0;
+  virtual int getCallCost(const Function *F,
+                          ArrayRef<const Value *> Arguments) = 0;
+  virtual unsigned getInliningThresholdMultiplier() = 0;
+  virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                               ArrayRef<Type *> ParamTys) = 0;
+  virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                               ArrayRef<const Value *> Arguments) = 0;
+  virtual unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+                                                    unsigned &JTSize) = 0;
+  virtual int
+  getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0;
+  virtual bool hasBranchDivergence() = 0;
+  virtual bool isSourceOfDivergence(const Value *V) = 0;
+  virtual bool isAlwaysUniform(const Value *V) = 0;
+  virtual unsigned getFlatAddressSpace() = 0;
+  virtual bool isLoweredToCall(const Function *F) = 0;
+  virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
+                                       UnrollingPreferences &UP) = 0;
+  virtual bool isLegalAddImmediate(int64_t Imm) = 0;
+  virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
+  virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
+                                     int64_t BaseOffset, bool HasBaseReg,
+                                     int64_t Scale,
+                                     unsigned AddrSpace,
+                                     Instruction *I) = 0;
+  virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
+                             TargetTransformInfo::LSRCost &C2) = 0;
+  virtual bool canMacroFuseCmp() = 0;
+  virtual bool shouldFavorPostInc() const = 0;
+  virtual bool isLegalMaskedStore(Type *DataType) = 0;
+  virtual bool isLegalMaskedLoad(Type *DataType) = 0;
+  virtual bool isLegalMaskedScatter(Type *DataType) = 0;
+  virtual bool isLegalMaskedGather(Type *DataType) = 0;
+  virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
+  virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
+  virtual bool prefersVectorizedAddressing() = 0;
+  virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
+                                   int64_t BaseOffset, bool HasBaseReg,
+                                   int64_t Scale, unsigned AddrSpace) = 0;
+  virtual bool LSRWithInstrQueries() = 0;
+  virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
+  virtual bool isProfitableToHoist(Instruction *I) = 0;
+  virtual bool useAA() = 0;
+  virtual bool isTypeLegal(Type *Ty) = 0;
+  virtual unsigned getJumpBufAlignment() = 0;
+  virtual unsigned getJumpBufSize() = 0;
+  virtual bool shouldBuildLookupTables() = 0;
+  virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
+  virtual bool useColdCCForColdCall(Function &F) = 0;
+  virtual unsigned
+  getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) = 0;
+  virtual unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
+                                                    unsigned VF) = 0;
+  virtual bool supportsEfficientVectorElementLoadStore() = 0;
+  virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
+  virtual const MemCmpExpansionOptions *enableMemCmpExpansion(
+      bool IsZeroCmp) const = 0;
+  virtual bool enableInterleavedAccessVectorization() = 0;
+  virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
+  virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                              unsigned BitWidth,
+                                              unsigned AddressSpace,
+                                              unsigned Alignment,
+                                              bool *Fast) = 0;
+  virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
+  virtual bool haveFastSqrt(Type *Ty) = 0;
+  virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
+  virtual int getFPOpCost(Type *Ty) = 0;
+  virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+                                    Type *Ty) = 0;
+  virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
+  virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+                            Type *Ty) = 0;
+  virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+                            Type *Ty) = 0;
+  virtual unsigned getNumberOfRegisters(bool Vector) = 0;
+  virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
+  virtual unsigned getMinVectorRegisterBitWidth() = 0;
+  virtual bool shouldMaximizeVectorBandwidth(bool OptSize) const = 0;
+  virtual bool shouldConsiderAddressTypePromotion(
+      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
+  virtual unsigned getCacheLineSize() = 0;
+  virtual llvm::Optional<unsigned> getCacheSize(CacheLevel Level) = 0;
+  virtual llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) = 0;
+  virtual unsigned getPrefetchDistance() = 0;
+  virtual unsigned getMinPrefetchStride() = 0;
+  virtual unsigned getMaxPrefetchIterationsAhead() = 0;
+  virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
+  virtual unsigned
+  getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
+                         OperandValueKind Opd2Info,
+                         OperandValueProperties Opd1PropInfo,
+                         OperandValueProperties Opd2PropInfo,
+                         ArrayRef<const Value *> Args) = 0;
+  virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
+                             Type *SubTp) = 0;
+  virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+                               const Instruction *I) = 0;
+  virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst,
+                                       VectorType *VecTy, unsigned Index) = 0;
+  virtual int getCFInstrCost(unsigned Opcode) = 0;
+  virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+                                Type *CondTy, const Instruction *I) = 0;
+  virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
+                                 unsigned Index) = 0;
+  virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                              unsigned AddressSpace, const Instruction *I) = 0;
+  virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
+                                    unsigned Alignment,
+                                    unsigned AddressSpace) = 0;
+  virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+                                     Value *Ptr, bool VariableMask,
+                                     unsigned Alignment) = 0;
+  virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+                                         unsigned Factor,
+                                         ArrayRef<unsigned> Indices,
+                                         unsigned Alignment,
+                                         unsigned AddressSpace) = 0;
+  virtual int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
+                                         bool IsPairwiseForm) = 0;
+  virtual int getMinMaxReductionCost(Type *Ty, Type *CondTy,
+                                     bool IsPairwiseForm, bool IsUnsigned) = 0;
+  virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                      ArrayRef<Type *> Tys, FastMathFlags FMF,
+                      unsigned ScalarizationCostPassed) = 0;
+  virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+         ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) = 0;
+  virtual int getCallInstrCost(Function *F, Type *RetTy,
+                               ArrayRef<Type *> Tys) = 0;
+  virtual unsigned getNumberOfParts(Type *Tp) = 0;
+  virtual int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
+                                        const SCEV *Ptr) = 0;
+  virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
+  virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+                                  MemIntrinsicInfo &Info) = 0;
+  virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
+  virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+                                                   Type *ExpectedType) = 0;
+  virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+                                          unsigned SrcAlign,
+                                          unsigned DestAlign) const = 0;
+  virtual void getMemcpyLoopResidualLoweringType(
+      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
+      unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
+  virtual bool areInlineCompatible(const Function *Caller,
+                                   const Function *Callee) const = 0;
+  virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
+  virtual bool isIndexedStoreLegal(MemIndexedMode Mode,Type *Ty) const = 0;
+  virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
+  virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
+  virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
+  virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+                                           unsigned Alignment,
+                                           unsigned AddrSpace) const = 0;
+  virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+                                            unsigned Alignment,
+                                            unsigned AddrSpace) const = 0;
+  virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+                                       unsigned ChainSizeInBytes,
+                                       VectorType *VecTy) const = 0;
+  virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+                                        unsigned ChainSizeInBytes,
+                                        VectorType *VecTy) const = 0;
+  virtual bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
+                                     ReductionFlags) const = 0;
+  virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
+  virtual int getInstructionLatency(const Instruction *I) = 0;
+};
+
+template <typename T>
+class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
+  T Impl;
+
+public:
+  Model(T Impl) : Impl(std::move(Impl)) {}
+  ~Model() override {}
+
+  const DataLayout &getDataLayout() const override {
+    return Impl.getDataLayout();
+  }
+
+  int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
+    return Impl.getOperationCost(Opcode, Ty, OpTy);
+  }
+  int getGEPCost(Type *PointeeType, const Value *Ptr,
+                 ArrayRef<const Value *> Operands) override {
+    return Impl.getGEPCost(PointeeType, Ptr, Operands);
+  }
+  int getExtCost(const Instruction *I, const Value *Src) override {
+    return Impl.getExtCost(I, Src);
+  }
+  int getCallCost(FunctionType *FTy, int NumArgs) override {
+    return Impl.getCallCost(FTy, NumArgs);
+  }
+  int getCallCost(const Function *F, int NumArgs) override {
+    return Impl.getCallCost(F, NumArgs);
+  }
+  int getCallCost(const Function *F,
+                  ArrayRef<const Value *> Arguments) override {
+    return Impl.getCallCost(F, Arguments);
+  }
+  unsigned getInliningThresholdMultiplier() override {
+    return Impl.getInliningThresholdMultiplier();
+  }
+  int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                       ArrayRef<Type *> ParamTys) override {
+    return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
+  }
+  int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                       ArrayRef<const Value *> Arguments) override {
+    return Impl.getIntrinsicCost(IID, RetTy, Arguments);
+  }
+  int getUserCost(const User *U, ArrayRef<const Value *> Operands) override {
+    return Impl.getUserCost(U, Operands);
+  }
+  bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
+  bool isSourceOfDivergence(const Value *V) override {
+    return Impl.isSourceOfDivergence(V);
+  }
+
+  bool isAlwaysUniform(const Value *V) override {
+    return Impl.isAlwaysUniform(V);
+  }
+
+  unsigned getFlatAddressSpace() override {
+    return Impl.getFlatAddressSpace();
+  }
+
+  bool isLoweredToCall(const Function *F) override {
+    return Impl.isLoweredToCall(F);
+  }
+  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+                               UnrollingPreferences &UP) override {
+    return Impl.getUnrollingPreferences(L, SE, UP);
+  }
+  bool isLegalAddImmediate(int64_t Imm) override {
+    return Impl.isLegalAddImmediate(Imm);
+  }
+  bool isLegalICmpImmediate(int64_t Imm) override {
+    return Impl.isLegalICmpImmediate(Imm);
+  }
+  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                             bool HasBaseReg, int64_t Scale,
+                             unsigned AddrSpace,
+                             Instruction *I) override {
+    return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
+                                      Scale, AddrSpace, I);
+  }
+  bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
+                     TargetTransformInfo::LSRCost &C2) override {
+    return Impl.isLSRCostLess(C1, C2);
+  }
+  bool canMacroFuseCmp() override {
+    return Impl.canMacroFuseCmp();
+  }
+  bool shouldFavorPostInc() const override {
+    return Impl.shouldFavorPostInc();
+  }
+  bool isLegalMaskedStore(Type *DataType) override {
+    return Impl.isLegalMaskedStore(DataType);
+  }
+  bool isLegalMaskedLoad(Type *DataType) override {
+    return Impl.isLegalMaskedLoad(DataType);
+  }
+  bool isLegalMaskedScatter(Type *DataType) override {
+    return Impl.isLegalMaskedScatter(DataType);
+  }
+  bool isLegalMaskedGather(Type *DataType) override {
+    return Impl.isLegalMaskedGather(DataType);
+  }
+  bool hasDivRemOp(Type *DataType, bool IsSigned) override {
+    return Impl.hasDivRemOp(DataType, IsSigned);
+  }
+  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
+    return Impl.hasVolatileVariant(I, AddrSpace);
+  }
+  bool prefersVectorizedAddressing() override {
+    return Impl.prefersVectorizedAddressing();
+  }
+  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                           bool HasBaseReg, int64_t Scale,
+                           unsigned AddrSpace) override {
+    return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
+                                     Scale, AddrSpace);
+  }
+  bool LSRWithInstrQueries() override {
+    return Impl.LSRWithInstrQueries();
+  }
+  bool isTruncateFree(Type *Ty1, Type *Ty2) override {
+    return Impl.isTruncateFree(Ty1, Ty2);
+  }
+  bool isProfitableToHoist(Instruction *I) override {
+    return Impl.isProfitableToHoist(I);
+  }
+  bool useAA() override { return Impl.useAA(); }
+  bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
+  unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
+  unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
+  bool shouldBuildLookupTables() override {
+    return Impl.shouldBuildLookupTables();
+  }
+  bool shouldBuildLookupTablesForConstant(Constant *C) override {
+    return Impl.shouldBuildLookupTablesForConstant(C);
+  }
+  bool useColdCCForColdCall(Function &F) override {
+    return Impl.useColdCCForColdCall(F);
+  }
+
+  unsigned getScalarizationOverhead(Type *Ty, bool Insert,
+                                    bool Extract) override {
+    return Impl.getScalarizationOverhead(Ty, Insert, Extract);
+  }
+  unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
+                                            unsigned VF) override {
+    return Impl.getOperandsScalarizationOverhead(Args, VF);
+  }
+
+  bool supportsEfficientVectorElementLoadStore() override {
+    return Impl.supportsEfficientVectorElementLoadStore();
+  }
+
+  bool enableAggressiveInterleaving(bool LoopHasReductions) override {
+    return Impl.enableAggressiveInterleaving(LoopHasReductions);
+  }
+  const MemCmpExpansionOptions *enableMemCmpExpansion(
+      bool IsZeroCmp) const override {
+    return Impl.enableMemCmpExpansion(IsZeroCmp);
+  }
+  bool enableInterleavedAccessVectorization() override {
+    return Impl.enableInterleavedAccessVectorization();
+  }
+  bool isFPVectorizationPotentiallyUnsafe() override {
+    return Impl.isFPVectorizationPotentiallyUnsafe();
+  }
+  bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                      unsigned BitWidth, unsigned AddressSpace,
+                                      unsigned Alignment, bool *Fast) override {
+    return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
+                                               Alignment, Fast);
+  }
+  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
+    return Impl.getPopcntSupport(IntTyWidthInBit);
+  }
+  bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
+
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
+    return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
+  }
+
+  int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
+
+  int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+                            Type *Ty) override {
+    return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
+  }
+  int getIntImmCost(const APInt &Imm, Type *Ty) override {
+    return Impl.getIntImmCost(Imm, Ty);
+  }
+  int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+                    Type *Ty) override {
+    return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
+  }
+  int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+                    Type *Ty) override {
+    return Impl.getIntImmCost(IID, Idx, Imm, Ty);
+  }
+  unsigned getNumberOfRegisters(bool Vector) override {
+    return Impl.getNumberOfRegisters(Vector);
+  }
+  unsigned getRegisterBitWidth(bool Vector) const override {
+    return Impl.getRegisterBitWidth(Vector);
+  }
+  unsigned getMinVectorRegisterBitWidth() override {
+    return Impl.getMinVectorRegisterBitWidth();
+  }
+  bool shouldMaximizeVectorBandwidth(bool OptSize) const override {
+    return Impl.shouldMaximizeVectorBandwidth(OptSize);
+  }
+  bool shouldConsiderAddressTypePromotion(
+      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
+    return Impl.shouldConsiderAddressTypePromotion(
+        I, AllowPromotionWithoutCommonHeader);
+  }
+  unsigned getCacheLineSize() override {
+    return Impl.getCacheLineSize();
+  }
+  llvm::Optional<unsigned> getCacheSize(CacheLevel Level) override {
+    return Impl.getCacheSize(Level);
+  }
+  llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) override {
+    return Impl.getCacheAssociativity(Level);
+  }
+  unsigned getPrefetchDistance() override { return Impl.getPrefetchDistance(); }
+  unsigned getMinPrefetchStride() override {
+    return Impl.getMinPrefetchStride();
+  }
+  unsigned getMaxPrefetchIterationsAhead() override {
+    return Impl.getMaxPrefetchIterationsAhead();
+  }
+  unsigned getMaxInterleaveFactor(unsigned VF) override {
+    return Impl.getMaxInterleaveFactor(VF);
+  }
+  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+                                            unsigned &JTSize) override {
+    return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize);
+  }
+  unsigned
+  getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
+                         OperandValueKind Opd2Info,
+                         OperandValueProperties Opd1PropInfo,
+                         OperandValueProperties Opd2PropInfo,
+                         ArrayRef<const Value *> Args) override {
+    return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+                                       Opd1PropInfo, Opd2PropInfo, Args);
+  }
+  int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
+                     Type *SubTp) override {
+    return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
+  }
+  int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+                       const Instruction *I) override {
+    return Impl.getCastInstrCost(Opcode, Dst, Src, I);
+  }
+  int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
+                               unsigned Index) override {
+    return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
+  }
+  int getCFInstrCost(unsigned Opcode) override {
+    return Impl.getCFInstrCost(Opcode);
+  }
+  int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
+                         const Instruction *I) override {
+    return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
+  }
+  int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
+    return Impl.getVectorInstrCost(Opcode, Val, Index);
+  }
+  int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                      unsigned AddressSpace, const Instruction *I) override {
+    return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
+  }
+  int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                            unsigned AddressSpace) override {
+    return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
+  }
+  int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+                             Value *Ptr, bool VariableMask,
+                             unsigned Alignment) override {
+    return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
+                                       Alignment);
+  }
+  int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
+                                 ArrayRef<unsigned> Indices, unsigned Alignment,
+                                 unsigned AddressSpace) override {
+    return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
+                                           Alignment, AddressSpace);
+  }
+  int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
+                                 bool IsPairwiseForm) override {
+    return Impl.getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm);
+  }
+  int getMinMaxReductionCost(Type *Ty, Type *CondTy,
+                             bool IsPairwiseForm, bool IsUnsigned) override {
+    return Impl.getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned);
+   }
+  int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef<Type *> Tys,
+               FastMathFlags FMF, unsigned ScalarizationCostPassed) override {
+    return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
+                                      ScalarizationCostPassed);
+  }
+  int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+       ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) override {
+    return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
+  }
+  int getCallInstrCost(Function *F, Type *RetTy,
+                       ArrayRef<Type *> Tys) override {
+    return Impl.getCallInstrCost(F, RetTy, Tys);
+  }
+  unsigned getNumberOfParts(Type *Tp) override {
+    return Impl.getNumberOfParts(Tp);
+  }
+  int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
+                                const SCEV *Ptr) override {
+    return Impl.getAddressComputationCost(Ty, SE, Ptr);
+  }
+  unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
+    return Impl.getCostOfKeepingLiveOverCall(Tys);
+  }
+  bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+                          MemIntrinsicInfo &Info) override {
+    return Impl.getTgtMemIntrinsic(Inst, Info);
+  }
+  unsigned getAtomicMemIntrinsicMaxElementSize() const override {
+    return Impl.getAtomicMemIntrinsicMaxElementSize();
+  }
+  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+                                           Type *ExpectedType) override {
+    return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
+  }
+  Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+                                  unsigned SrcAlign,
+                                  unsigned DestAlign) const override {
+    return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAlign, DestAlign);
+  }
+  void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+                                         LLVMContext &Context,
+                                         unsigned RemainingBytes,
+                                         unsigned SrcAlign,
+                                         unsigned DestAlign) const override {
+    Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
+                                           SrcAlign, DestAlign);
+  }
+  bool areInlineCompatible(const Function *Caller,
+                           const Function *Callee) const override {
+    return Impl.areInlineCompatible(Caller, Callee);
+  }
+  bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
+    return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
+  }
+  bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
+    return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
+  }
+  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
+    return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
+  }
+  bool isLegalToVectorizeLoad(LoadInst *LI) const override {
+    return Impl.isLegalToVectorizeLoad(LI);
+  }
+  bool isLegalToVectorizeStore(StoreInst *SI) const override {
+    return Impl.isLegalToVectorizeStore(SI);
+  }
+  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+                                   unsigned Alignment,
+                                   unsigned AddrSpace) const override {
+    return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
+                                            AddrSpace);
+  }
+  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+                                    unsigned Alignment,
+                                    unsigned AddrSpace) const override {
+    return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
+                                             AddrSpace);
+  }
+  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+                               unsigned ChainSizeInBytes,
+                               VectorType *VecTy) const override {
+    return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
+  }
+  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+                                unsigned ChainSizeInBytes,
+                                VectorType *VecTy) const override {
+    return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
+  }
+  bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
+                             ReductionFlags Flags) const override {
+    return Impl.useReductionIntrinsic(Opcode, Ty, Flags);
+  }
+  bool shouldExpandReduction(const IntrinsicInst *II) const override {
+    return Impl.shouldExpandReduction(II);
+  }
+  int getInstructionLatency(const Instruction *I) override {
+    return Impl.getInstructionLatency(I);
+  }
+};
+
+template <typename T>
+TargetTransformInfo::TargetTransformInfo(T Impl)
+    : TTIImpl(new Model<T>(Impl)) {}
+
+/// \brief Analysis pass providing the \c TargetTransformInfo.
+///
+/// The core idea of the TargetIRAnalysis is to expose an interface through
+/// which LLVM targets can analyze and provide information about the middle
+/// end's target-independent IR. This supports use cases such as target-aware
+/// cost modeling of IR constructs.
+///
+/// This is a function analysis because much of the cost modeling for targets
+/// is done in a subtarget specific way and LLVM supports compiling different
+/// functions targeting different subtargets in order to support runtime
+/// dispatch according to the observed subtarget.
+class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
+public:
+  typedef TargetTransformInfo Result;
+
+  /// \brief Default construct a target IR analysis.
+  ///
+  /// This will use the module's datalayout to construct a baseline
+  /// conservative TTI result.
+  TargetIRAnalysis();
+
+  /// \brief Construct an IR analysis pass around a target-provide callback.
+  ///
+  /// The callback will be called with a particular function for which the TTI
+  /// is needed and must return a TTI object for that function.
+  TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
+
+  // Value semantics. We spell out the constructors for MSVC.
+  TargetIRAnalysis(const TargetIRAnalysis &Arg)
+      : TTICallback(Arg.TTICallback) {}
+  TargetIRAnalysis(TargetIRAnalysis &&Arg)
+      : TTICallback(std::move(Arg.TTICallback)) {}
+  TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
+    TTICallback = RHS.TTICallback;
+    return *this;
+  }
+  TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
+    TTICallback = std::move(RHS.TTICallback);
+    return *this;
+  }
+
+  Result run(const Function &F, FunctionAnalysisManager &);
+
+private:
+  friend AnalysisInfoMixin<TargetIRAnalysis>;
+  static AnalysisKey Key;
+
+  /// \brief The callback used to produce a result.
+  ///
+  /// We use a completely opaque callback so that targets can provide whatever
+  /// mechanism they desire for constructing the TTI for a given function.
+  ///
+  /// FIXME: Should we really use std::function? It's relatively inefficient.
+  /// It might be possible to arrange for even stateful callbacks to outlive
+  /// the analysis and thus use a function_ref which would be lighter weight.
+  /// This may also be less error prone as the callback is likely to reference
+  /// the external TargetMachine, and that reference needs to never dangle.
+  std::function<Result(const Function &)> TTICallback;
+
+  /// \brief Helper function used as the callback in the default constructor.
+  static Result getDefaultTTI(const Function &F);
+};
+
+/// \brief Wrapper pass for TargetTransformInfo.
+///
+/// This pass can be constructed from a TTI object which it stores internally
+/// and is queried by passes.
+class TargetTransformInfoWrapperPass : public ImmutablePass {
+  TargetIRAnalysis TIRA;
+  Optional<TargetTransformInfo> TTI;
+
+  virtual void anchor();
+
+public:
+  static char ID;
+
+  /// \brief We must provide a default constructor for the pass but it should
+  /// never be used.
+  ///
+  /// Use the constructor below or call one of the creation routines.
+  TargetTransformInfoWrapperPass();
+
+  explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
+
+  TargetTransformInfo &getTTI(const Function &F);
+};
+
+/// \brief Create an analysis pass wrapper around a TTI object.
+///
+/// This analysis pass just holds the TTI instance and makes it available to
+/// clients.
+ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h
new file mode 100644
index 0000000..df4f853
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -0,0 +1,850 @@
+//===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides helpers for the implementation of
+/// a TargetTransformInfo-conforming class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
+#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
+
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+
+namespace llvm {
+
+/// \brief Base class for use as a mix-in that aids implementing
+/// a TargetTransformInfo-compatible class.
+class TargetTransformInfoImplBase {
+protected:
+  typedef TargetTransformInfo TTI;
+
+  const DataLayout &DL;
+
+  explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
+
+public:
+  // Provide value semantics. MSVC requires that we spell all of these out.
+  TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
+      : DL(Arg.DL) {}
+  TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
+
+  const DataLayout &getDataLayout() const { return DL; }
+
+  unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
+    switch (Opcode) {
+    default:
+      // By default, just classify everything as 'basic'.
+      return TTI::TCC_Basic;
+
+    case Instruction::GetElementPtr:
+      llvm_unreachable("Use getGEPCost for GEP operations!");
+
+    case Instruction::BitCast:
+      assert(OpTy && "Cast instructions must provide the operand type");
+      if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
+        // Identity and pointer-to-pointer casts are free.
+        return TTI::TCC_Free;
+
+      // Otherwise, the default basic cost is used.
+      return TTI::TCC_Basic;
+
+    case Instruction::FDiv:
+    case Instruction::FRem:
+    case Instruction::SDiv:
+    case Instruction::SRem:
+    case Instruction::UDiv:
+    case Instruction::URem:
+      return TTI::TCC_Expensive;
+
+    case Instruction::IntToPtr: {
+      // An inttoptr cast is free so long as the input is a legal integer type
+      // which doesn't contain values outside the range of a pointer.
+      unsigned OpSize = OpTy->getScalarSizeInBits();
+      if (DL.isLegalInteger(OpSize) &&
+          OpSize <= DL.getPointerTypeSizeInBits(Ty))
+        return TTI::TCC_Free;
+
+      // Otherwise it's not a no-op.
+      return TTI::TCC_Basic;
+    }
+    case Instruction::PtrToInt: {
+      // A ptrtoint cast is free so long as the result is large enough to store
+      // the pointer, and a legal integer type.
+      unsigned DestSize = Ty->getScalarSizeInBits();
+      if (DL.isLegalInteger(DestSize) &&
+          DestSize >= DL.getPointerTypeSizeInBits(OpTy))
+        return TTI::TCC_Free;
+
+      // Otherwise it's not a no-op.
+      return TTI::TCC_Basic;
+    }
+    case Instruction::Trunc:
+      // trunc to a native type is free (assuming the target has compare and
+      // shift-right of the same width).
+      if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
+        return TTI::TCC_Free;
+
+      return TTI::TCC_Basic;
+    }
+  }
+
+  int getGEPCost(Type *PointeeType, const Value *Ptr,
+                 ArrayRef<const Value *> Operands) {
+    // In the basic model, we just assume that all-constant GEPs will be folded
+    // into their uses via addressing modes.
+    for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
+      if (!isa<Constant>(Operands[Idx]))
+        return TTI::TCC_Basic;
+
+    return TTI::TCC_Free;
+  }
+
+  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+                                            unsigned &JTSize) {
+    JTSize = 0;
+    return SI.getNumCases();
+  }
+
+  int getExtCost(const Instruction *I, const Value *Src) {
+    return TTI::TCC_Basic;
+  }
+
+  unsigned getCallCost(FunctionType *FTy, int NumArgs) {
+    assert(FTy && "FunctionType must be provided to this routine.");
+
+    // The target-independent implementation just measures the size of the
+    // function by approximating that each argument will take on average one
+    // instruction to prepare.
+
+    if (NumArgs < 0)
+      // Set the argument number to the number of explicit arguments in the
+      // function.
+      NumArgs = FTy->getNumParams();
+
+    return TTI::TCC_Basic * (NumArgs + 1);
+  }
+
+  unsigned getInliningThresholdMultiplier() { return 1; }
+
+  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<Type *> ParamTys) {
+    switch (IID) {
+    default:
+      // Intrinsics rarely (if ever) have normal argument setup constraints.
+      // Model them as having a basic instruction cost.
+      // FIXME: This is wrong for libc intrinsics.
+      return TTI::TCC_Basic;
+
+    case Intrinsic::annotation:
+    case Intrinsic::assume:
+    case Intrinsic::sideeffect:
+    case Intrinsic::dbg_declare:
+    case Intrinsic::dbg_value:
+    case Intrinsic::invariant_start:
+    case Intrinsic::invariant_end:
+    case Intrinsic::lifetime_start:
+    case Intrinsic::lifetime_end:
+    case Intrinsic::objectsize:
+    case Intrinsic::ptr_annotation:
+    case Intrinsic::var_annotation:
+    case Intrinsic::experimental_gc_result:
+    case Intrinsic::experimental_gc_relocate:
+    case Intrinsic::coro_alloc:
+    case Intrinsic::coro_begin:
+    case Intrinsic::coro_free:
+    case Intrinsic::coro_end:
+    case Intrinsic::coro_frame:
+    case Intrinsic::coro_size:
+    case Intrinsic::coro_suspend:
+    case Intrinsic::coro_param:
+    case Intrinsic::coro_subfn_addr:
+      // These intrinsics don't actually represent code after lowering.
+      return TTI::TCC_Free;
+    }
+  }
+
+  bool hasBranchDivergence() { return false; }
+
+  bool isSourceOfDivergence(const Value *V) { return false; }
+
+  bool isAlwaysUniform(const Value *V) { return false; }
+
+  unsigned getFlatAddressSpace () {
+    return -1;
+  }
+
+  bool isLoweredToCall(const Function *F) {
+    assert(F && "A concrete function must be provided to this routine.");
+
+    // FIXME: These should almost certainly not be handled here, and instead
+    // handled with the help of TLI or the target itself. This was largely
+    // ported from existing analysis heuristics here so that such refactorings
+    // can take place in the future.
+
+    if (F->isIntrinsic())
+      return false;
+
+    if (F->hasLocalLinkage() || !F->hasName())
+      return true;
+
+    StringRef Name = F->getName();
+
+    // These will all likely lower to a single selection DAG node.
+    if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
+        Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
+        Name == "fmin" || Name == "fminf" || Name == "fminl" ||
+        Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
+        Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
+        Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
+      return false;
+
+    // These are all likely to be optimized into something smaller.
+    if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
+        Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
+        Name == "floorf" || Name == "ceil" || Name == "round" ||
+        Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
+        Name == "llabs")
+      return false;
+
+    return true;
+  }
+
+  void getUnrollingPreferences(Loop *, ScalarEvolution &,
+                               TTI::UnrollingPreferences &) {}
+
+  bool isLegalAddImmediate(int64_t Imm) { return false; }
+
+  bool isLegalICmpImmediate(int64_t Imm) { return false; }
+
+  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                             bool HasBaseReg, int64_t Scale,
+                             unsigned AddrSpace, Instruction *I = nullptr) {
+    // Guess that only reg and reg+reg addressing is allowed. This heuristic is
+    // taken from the implementation of LSR.
+    return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
+  }
+
+  bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) {
+    return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
+                    C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
+           std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
+                    C2.ScaleCost, C2.ImmCost, C2.SetupCost);
+  }
+
+  bool canMacroFuseCmp() { return false; }
+
+  bool shouldFavorPostInc() const { return false; }
+
+  bool isLegalMaskedStore(Type *DataType) { return false; }
+
+  bool isLegalMaskedLoad(Type *DataType) { return false; }
+
+  bool isLegalMaskedScatter(Type *DataType) { return false; }
+
+  bool isLegalMaskedGather(Type *DataType) { return false; }
+
+  bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
+
+  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
+
+  bool prefersVectorizedAddressing() { return true; }
+
+  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                           bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
+    // Guess that all legal addressing mode are free.
+    if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
+                              Scale, AddrSpace))
+      return 0;
+    return -1;
+  }
+
+  bool LSRWithInstrQueries() { return false; }
+
+  bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
+
+  bool isProfitableToHoist(Instruction *I) { return true; }
+
+  bool useAA() { return false; }
+
+  bool isTypeLegal(Type *Ty) { return false; }
+
+  unsigned getJumpBufAlignment() { return 0; }
+
+  unsigned getJumpBufSize() { return 0; }
+
+  bool shouldBuildLookupTables() { return true; }
+  bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
+
+  bool useColdCCForColdCall(Function &F) { return false; }
+
+  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
+    return 0;
+  }
+
+  unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
+                                            unsigned VF) { return 0; }
+
+  bool supportsEfficientVectorElementLoadStore() { return false; }
+
+  bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
+
+  const TTI::MemCmpExpansionOptions *enableMemCmpExpansion(
+      bool IsZeroCmp) const {
+    return nullptr;
+  }
+
+  bool enableInterleavedAccessVectorization() { return false; }
+
+  bool isFPVectorizationPotentiallyUnsafe() { return false; }
+
+  bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                      unsigned BitWidth,
+                                      unsigned AddressSpace,
+                                      unsigned Alignment,
+                                      bool *Fast) { return false; }
+
+  TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
+    return TTI::PSK_Software;
+  }
+
+  bool haveFastSqrt(Type *Ty) { return false; }
+
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
+  
+  unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
+
+  int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+                            Type *Ty) {
+    return 0;
+  }
+
+  unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
+
+  unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+                         Type *Ty) {
+    return TTI::TCC_Free;
+  }
+
+  unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+                         Type *Ty) {
+    return TTI::TCC_Free;
+  }
+
+  unsigned getNumberOfRegisters(bool Vector) { return 8; }
+
+  unsigned getRegisterBitWidth(bool Vector) const { return 32; }
+
+  unsigned getMinVectorRegisterBitWidth() { return 128; }
+
+  bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
+
+  bool
+  shouldConsiderAddressTypePromotion(const Instruction &I,
+                                     bool &AllowPromotionWithoutCommonHeader) {
+    AllowPromotionWithoutCommonHeader = false;
+    return false;
+  }
+
+  unsigned getCacheLineSize() { return 0; }
+
+  llvm::Optional<unsigned> getCacheSize(TargetTransformInfo::CacheLevel Level) {
+    switch (Level) {
+    case TargetTransformInfo::CacheLevel::L1D:
+      LLVM_FALLTHROUGH;
+    case TargetTransformInfo::CacheLevel::L2D:
+      return llvm::Optional<unsigned>();
+    }
+
+    llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
+  }
+
+  llvm::Optional<unsigned> getCacheAssociativity(
+    TargetTransformInfo::CacheLevel Level) {
+    switch (Level) {
+    case TargetTransformInfo::CacheLevel::L1D:
+      LLVM_FALLTHROUGH;
+    case TargetTransformInfo::CacheLevel::L2D:
+      return llvm::Optional<unsigned>();
+    }
+
+    llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
+  }
+
+  unsigned getPrefetchDistance() { return 0; }
+
+  unsigned getMinPrefetchStride() { return 1; }
+
+  unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; }
+
+  unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
+
+  unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+                                  TTI::OperandValueKind Opd1Info,
+                                  TTI::OperandValueKind Opd2Info,
+                                  TTI::OperandValueProperties Opd1PropInfo,
+                                  TTI::OperandValueProperties Opd2PropInfo,
+                                  ArrayRef<const Value *> Args) {
+    return 1;
+  }
+
+  unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
+                          Type *SubTp) {
+    return 1;
+  }
+
+  unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+                            const Instruction *I) { return 1; }
+
+  unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
+                                    VectorType *VecTy, unsigned Index) {
+    return 1;
+  }
+
+  unsigned getCFInstrCost(unsigned Opcode) { return 1; }
+
+  unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
+                              const Instruction *I) {
+    return 1;
+  }
+
+  unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
+    return 1;
+  }
+
+  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                           unsigned AddressSpace, const Instruction *I) {
+    return 1;
+  }
+
+  unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                                 unsigned AddressSpace) {
+    return 1;
+  }
+
+  unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
+                                  bool VariableMask,
+                                  unsigned Alignment) {
+    return 1;
+  }
+
+  unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+                                      unsigned Factor,
+                                      ArrayRef<unsigned> Indices,
+                                      unsigned Alignment,
+                                      unsigned AddressSpace) {
+    return 1;
+  }
+
+  unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                                 ArrayRef<Type *> Tys, FastMathFlags FMF,
+                                 unsigned ScalarizationCostPassed) {
+    return 1;
+  }
+  unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+            ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
+    return 1;
+  }
+
+  unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
+    return 1;
+  }
+
+  unsigned getNumberOfParts(Type *Tp) { return 0; }
+
+  unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
+                                     const SCEV *) {
+    return 0;
+  }
+
+  unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; }
+
+  unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; }
+
+  unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
+
+  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
+    return false;
+  }
+
+  unsigned getAtomicMemIntrinsicMaxElementSize() const {
+    // Note for overrides: You must ensure for all element unordered-atomic
+    // memory intrinsics that all power-of-2 element sizes up to, and
+    // including, the return value of this method have a corresponding
+    // runtime lib call. These runtime lib call definitions can be found
+    // in RuntimeLibcalls.h
+    return 0;
+  }
+
+  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+                                           Type *ExpectedType) {
+    return nullptr;
+  }
+
+  Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+                                  unsigned SrcAlign, unsigned DestAlign) const {
+    return Type::getInt8Ty(Context);
+  }
+
+  void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+                                         LLVMContext &Context,
+                                         unsigned RemainingBytes,
+                                         unsigned SrcAlign,
+                                         unsigned DestAlign) const {
+    for (unsigned i = 0; i != RemainingBytes; ++i)
+      OpsOut.push_back(Type::getInt8Ty(Context));
+  }
+
+  bool areInlineCompatible(const Function *Caller,
+                           const Function *Callee) const {
+    return (Caller->getFnAttribute("target-cpu") ==
+            Callee->getFnAttribute("target-cpu")) &&
+           (Caller->getFnAttribute("target-features") ==
+            Callee->getFnAttribute("target-features"));
+  }
+
+  bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
+                          const DataLayout &DL) const {
+    return false;
+  }
+
+  bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty,
+                           const DataLayout &DL) const {
+    return false;
+  }
+
+  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
+
+  bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
+
+  bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
+
+  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+                                   unsigned Alignment,
+                                   unsigned AddrSpace) const {
+    return true;
+  }
+
+  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+                                    unsigned Alignment,
+                                    unsigned AddrSpace) const {
+    return true;
+  }
+
+  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+                               unsigned ChainSizeInBytes,
+                               VectorType *VecTy) const {
+    return VF;
+  }
+
+  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+                                unsigned ChainSizeInBytes,
+                                VectorType *VecTy) const {
+    return VF;
+  }
+
+  bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
+                             TTI::ReductionFlags Flags) const {
+    return false;
+  }
+
+  bool shouldExpandReduction(const IntrinsicInst *II) const {
+    return true;
+  }
+
+protected:
+  // Obtain the minimum required size to hold the value (without the sign)
+  // In case of a vector it returns the min required size for one element.
+  unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
+    if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
+      const auto* VectorValue = cast<Constant>(Val);
+
+      // In case of a vector need to pick the max between the min
+      // required size for each element
+      auto *VT = cast<VectorType>(Val->getType());
+
+      // Assume unsigned elements
+      isSigned = false;
+
+      // The max required size is the total vector width divided by num
+      // of elements in the vector
+      unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
+
+      unsigned MinRequiredSize = 0;
+      for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
+        if (auto* IntElement =
+              dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
+          bool signedElement = IntElement->getValue().isNegative();
+          // Get the element min required size.
+          unsigned ElementMinRequiredSize =
+            IntElement->getValue().getMinSignedBits() - 1;
+          // In case one element is signed then all the vector is signed.
+          isSigned |= signedElement;
+          // Save the max required bit size between all the elements.
+          MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
+        }
+        else {
+          // not an int constant element
+          return MaxRequiredSize;
+        }
+      }
+      return MinRequiredSize;
+    }
+
+    if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
+      isSigned = CI->getValue().isNegative();
+      return CI->getValue().getMinSignedBits() - 1;
+    }
+
+    if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
+      isSigned = true;
+      return Cast->getSrcTy()->getScalarSizeInBits() - 1;
+    }
+
+    if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
+      isSigned = false;
+      return Cast->getSrcTy()->getScalarSizeInBits();
+    }
+
+    isSigned = false;
+    return Val->getType()->getScalarSizeInBits();
+  }
+
+  bool isStridedAccess(const SCEV *Ptr) {
+    return Ptr && isa<SCEVAddRecExpr>(Ptr);
+  }
+
+  const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
+                                            const SCEV *Ptr) {
+    if (!isStridedAccess(Ptr))
+      return nullptr;
+    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
+    return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
+  }
+
+  bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
+                                       int64_t MergeDistance) {
+    const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
+    if (!Step)
+      return false;
+    APInt StrideVal = Step->getAPInt();
+    if (StrideVal.getBitWidth() > 64)
+      return false;
+    // FIXME: Need to take absolute value for negative stride case.
+    return StrideVal.getSExtValue() < MergeDistance;
+  }
+};
+
+/// \brief CRTP base class for use as a mix-in that aids implementing
+/// a TargetTransformInfo-compatible class.
+template <typename T>
+class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
+private:
+  typedef TargetTransformInfoImplBase BaseT;
+
+protected:
+  explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
+
+public:
+  using BaseT::getCallCost;
+
+  unsigned getCallCost(const Function *F, int NumArgs) {
+    assert(F && "A concrete function must be provided to this routine.");
+
+    if (NumArgs < 0)
+      // Set the argument number to the number of explicit arguments in the
+      // function.
+      NumArgs = F->arg_size();
+
+    if (Intrinsic::ID IID = F->getIntrinsicID()) {
+      FunctionType *FTy = F->getFunctionType();
+      SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
+      return static_cast<T *>(this)
+          ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
+    }
+
+    if (!static_cast<T *>(this)->isLoweredToCall(F))
+      return TTI::TCC_Basic; // Give a basic cost if it will be lowered
+                             // directly.
+
+    return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
+  }
+
+  unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
+    // Simply delegate to generic handling of the call.
+    // FIXME: We should use instsimplify or something else to catch calls which
+    // will constant fold with these arguments.
+    return static_cast<T *>(this)->getCallCost(F, Arguments.size());
+  }
+
+  using BaseT::getGEPCost;
+
+  int getGEPCost(Type *PointeeType, const Value *Ptr,
+                 ArrayRef<const Value *> Operands) {
+    const GlobalValue *BaseGV = nullptr;
+    if (Ptr != nullptr) {
+      // TODO: will remove this when pointers have an opaque type.
+      assert(Ptr->getType()->getScalarType()->getPointerElementType() ==
+                 PointeeType &&
+             "explicit pointee type doesn't match operand's pointee type");
+      BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
+    }
+    bool HasBaseReg = (BaseGV == nullptr);
+
+    auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
+    APInt BaseOffset(PtrSizeBits, 0);
+    int64_t Scale = 0;
+
+    auto GTI = gep_type_begin(PointeeType, Operands);
+    Type *TargetType = nullptr;
+
+    // Handle the case where the GEP instruction has a single operand,
+    // the basis, therefore TargetType is a nullptr.
+    if (Operands.empty())
+      return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
+
+    for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
+      TargetType = GTI.getIndexedType();
+      // We assume that the cost of Scalar GEP with constant index and the
+      // cost of Vector GEP with splat constant index are the same.
+      const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
+      if (!ConstIdx)
+        if (auto Splat = getSplatValue(*I))
+          ConstIdx = dyn_cast<ConstantInt>(Splat);
+      if (StructType *STy = GTI.getStructTypeOrNull()) {
+        // For structures the index is always splat or scalar constant
+        assert(ConstIdx && "Unexpected GEP index");
+        uint64_t Field = ConstIdx->getZExtValue();
+        BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
+      } else {
+        int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
+        if (ConstIdx) {
+          BaseOffset +=
+              ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
+        } else {
+          // Needs scale register.
+          if (Scale != 0)
+            // No addressing mode takes two scale registers.
+            return TTI::TCC_Basic;
+          Scale = ElementSize;
+        }
+      }
+    }
+
+    // Assumes the address space is 0 when Ptr is nullptr.
+    unsigned AS =
+        (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
+
+    if (static_cast<T *>(this)->isLegalAddressingMode(
+            TargetType, const_cast<GlobalValue *>(BaseGV),
+            BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale, AS))
+      return TTI::TCC_Free;
+    return TTI::TCC_Basic;
+  }
+
+  using BaseT::getIntrinsicCost;
+
+  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<const Value *> Arguments) {
+    // Delegate to the generic intrinsic handling code. This mostly provides an
+    // opportunity for targets to (for example) special case the cost of
+    // certain intrinsics based on constants used as arguments.
+    SmallVector<Type *, 8> ParamTys;
+    ParamTys.reserve(Arguments.size());
+    for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
+      ParamTys.push_back(Arguments[Idx]->getType());
+    return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
+  }
+
+  unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) {
+    if (isa<PHINode>(U))
+      return TTI::TCC_Free; // Model all PHI nodes as free.
+
+    // Static alloca doesn't generate target instructions.
+    if (auto *A = dyn_cast<AllocaInst>(U))
+      if (A->isStaticAlloca())
+        return TTI::TCC_Free;
+
+    if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+      return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
+                                                GEP->getPointerOperand(),
+                                                Operands.drop_front());
+    }
+
+    if (auto CS = ImmutableCallSite(U)) {
+      const Function *F = CS.getCalledFunction();
+      if (!F) {
+        // Just use the called value type.
+        Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
+        return static_cast<T *>(this)
+            ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
+      }
+
+      SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
+      return static_cast<T *>(this)->getCallCost(F, Arguments);
+    }
+
+    if (const CastInst *CI = dyn_cast<CastInst>(U)) {
+      // Result of a cmp instruction is often extended (to be used by other
+      // cmp instructions, logical or return instructions). These are usually
+      // nop on most sane targets.
+      if (isa<CmpInst>(CI->getOperand(0)))
+        return TTI::TCC_Free;
+      if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
+        return static_cast<T *>(this)->getExtCost(CI, Operands.back());
+    }
+
+    return static_cast<T *>(this)->getOperationCost(
+        Operator::getOpcode(U), U->getType(),
+        U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
+  }
+
+  int getInstructionLatency(const Instruction *I) {
+    SmallVector<const Value *, 4> Operands(I->value_op_begin(),
+                                           I->value_op_end());
+    if (getUserCost(I, Operands) == TTI::TCC_Free)
+      return 0;
+
+    if (isa<LoadInst>(I))
+      return 4;
+
+    Type *DstTy = I->getType();
+
+    // Usually an intrinsic is a simple instruction.
+    // A real function call is much slower.
+    if (auto *CI = dyn_cast<CallInst>(I)) {
+      const Function *F = CI->getCalledFunction();
+      if (!F || static_cast<T *>(this)->isLoweredToCall(F))
+        return 40;
+      // Some intrinsics return a value and a flag, we use the value type
+      // to decide its latency.
+      if (StructType* StructTy = dyn_cast<StructType>(DstTy))
+        DstTy = StructTy->getElementType(0);
+      // Fall through to simple instructions.
+    }
+
+    if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy))
+      DstTy = VectorTy->getElementType();
+    if (DstTy->isFloatingPointTy())
+      return 3;
+
+    return 1;
+  }
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/Trace.h b/linux-x64/clang/include/llvm/Analysis/Trace.h
new file mode 100644
index 0000000..b05d384
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/Trace.h
@@ -0,0 +1,112 @@
+//===- llvm/Analysis/Trace.h - Represent one trace of LLVM code -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents a single trace of LLVM basic blocks.  A trace is a
+// single entry, multiple exit, region of code that is often hot.  Trace-based
+// optimizations treat traces almost like they are a large, strange, basic
+// block: because the trace path is assumed to be hot, optimizations for the
+// fall-through path are made at the expense of the non-fall-through paths.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TRACE_H
+#define LLVM_ANALYSIS_TRACE_H
+
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class Function;
+class Module;
+class raw_ostream;
+
+class Trace {
+  using BasicBlockListType = std::vector<BasicBlock *>;
+
+  BasicBlockListType BasicBlocks;
+
+public:
+  /// Trace ctor - Make a new trace from a vector of basic blocks,
+  /// residing in the function which is the parent of the first
+  /// basic block in the vector.
+  Trace(const std::vector<BasicBlock *> &vBB) : BasicBlocks (vBB) {}
+
+  /// getEntryBasicBlock - Return the entry basic block (first block)
+  /// of the trace.
+  BasicBlock *getEntryBasicBlock () const { return BasicBlocks[0]; }
+
+  /// operator[]/getBlock - Return basic block N in the trace.
+  BasicBlock *operator[](unsigned i) const { return BasicBlocks[i]; }
+  BasicBlock *getBlock(unsigned i)   const { return BasicBlocks[i]; }
+
+  /// getFunction - Return this trace's parent function.
+  Function *getFunction () const;
+
+  /// getModule - Return this Module that contains this trace's parent
+  /// function.
+  Module *getModule () const;
+
+  /// getBlockIndex - Return the index of the specified basic block in the
+  /// trace, or -1 if it is not in the trace.
+  int getBlockIndex(const BasicBlock *X) const {
+    for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
+      if (BasicBlocks[i] == X)
+        return i;
+    return -1;
+  }
+
+  /// contains - Returns true if this trace contains the given basic
+  /// block.
+  bool contains(const BasicBlock *X) const {
+    return getBlockIndex(X) != -1;
+  }
+
+  /// Returns true if B1 occurs before B2 in the trace, or if it is the same
+  /// block as B2..  Both blocks must be in the trace.
+  bool dominates(const BasicBlock *B1, const BasicBlock *B2) const {
+    int B1Idx = getBlockIndex(B1), B2Idx = getBlockIndex(B2);
+    assert(B1Idx != -1 && B2Idx != -1 && "Block is not in the trace!");
+    return B1Idx <= B2Idx;
+  }
+
+  // BasicBlock iterators...
+  using iterator = BasicBlockListType::iterator;
+  using const_iterator = BasicBlockListType::const_iterator;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+  iterator                begin()       { return BasicBlocks.begin(); }
+  const_iterator          begin() const { return BasicBlocks.begin(); }
+  iterator                end  ()       { return BasicBlocks.end();   }
+  const_iterator          end  () const { return BasicBlocks.end();   }
+
+  reverse_iterator       rbegin()       { return BasicBlocks.rbegin(); }
+  const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
+  reverse_iterator       rend  ()       { return BasicBlocks.rend();   }
+  const_reverse_iterator rend  () const { return BasicBlocks.rend();   }
+
+  unsigned                 size() const { return BasicBlocks.size(); }
+  bool                    empty() const { return BasicBlocks.empty(); }
+
+  iterator erase(iterator q)               { return BasicBlocks.erase (q); }
+  iterator erase(iterator q1, iterator q2) { return BasicBlocks.erase (q1, q2); }
+
+  /// print - Write trace to output stream.
+  void print(raw_ostream &O) const;
+
+  /// dump - Debugger convenience method; writes trace to standard error
+  /// output stream.
+  void dump() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_TRACE_H
diff --git a/linux-x64/clang/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/TypeBasedAliasAnalysis.h
new file mode 100644
index 0000000..7fcfdb3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -0,0 +1,94 @@
+//===- TypeBasedAliasAnalysis.h - Type-Based Alias Analysis -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This is the interface for a metadata-based TBAA. See the source file for
+/// details on the algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
+#define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+
+class Function;
+class MDNode;
+class MemoryLocation;
+
+/// A simple AA result that uses TBAA metadata to answer queries.
+class TypeBasedAAResult : public AAResultBase<TypeBasedAAResult> {
+  friend AAResultBase<TypeBasedAAResult>;
+
+public:
+  /// Handle invalidation events from the new pass manager.
+  ///
+  /// By definition, this result is stateless and so remains valid.
+  bool invalidate(Function &, const PreservedAnalyses &,
+                  FunctionAnalysisManager::Invalidator &) {
+    return false;
+  }
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+  FunctionModRefBehavior getModRefBehavior(const Function *F);
+  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+private:
+  bool Aliases(const MDNode *A, const MDNode *B) const;
+  bool PathAliases(const MDNode *A, const MDNode *B) const;
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class TypeBasedAA : public AnalysisInfoMixin<TypeBasedAA> {
+  friend AnalysisInfoMixin<TypeBasedAA>;
+
+  static AnalysisKey Key;
+
+public:
+  using Result = TypeBasedAAResult;
+
+  TypeBasedAAResult run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy wrapper pass to provide the TypeBasedAAResult object.
+class TypeBasedAAWrapperPass : public ImmutablePass {
+  std::unique_ptr<TypeBasedAAResult> Result;
+
+public:
+  static char ID;
+
+  TypeBasedAAWrapperPass();
+
+  TypeBasedAAResult &getResult() { return *Result; }
+  const TypeBasedAAResult &getResult() const { return *Result; }
+
+  bool doInitialization(Module &M) override;
+  bool doFinalization(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createTypeBasedAAWrapperPass - This pass implements metadata-based
+// type-based alias analysis.
+//
+ImmutablePass *createTypeBasedAAWrapperPass();
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h b/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h
new file mode 100644
index 0000000..422e153
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h
@@ -0,0 +1,53 @@
+//===- TypeMetadataUtils.h - Utilities related to type metadata --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions that make it easier to manipulate type metadata
+// for devirtualization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TYPEMETADATAUTILS_H
+#define LLVM_ANALYSIS_TYPEMETADATAUTILS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CallSite.h"
+
+namespace llvm {
+
+/// The type of CFI jumptable needed for a function.
+enum CfiFunctionLinkage {
+  CFL_Definition = 0,
+  CFL_Declaration = 1,
+  CFL_WeakDeclaration = 2
+};
+
+/// A call site that could be devirtualized.
+struct DevirtCallSite {
+  /// The offset from the address point to the virtual function.
+  uint64_t Offset;
+  /// The call site itself.
+  CallSite CS;
+};
+
+/// Given a call to the intrinsic @llvm.type.test, find all devirtualizable
+/// call sites based on the call and return them in DevirtCalls.
+void findDevirtualizableCallsForTypeTest(
+    SmallVectorImpl<DevirtCallSite> &DevirtCalls,
+    SmallVectorImpl<CallInst *> &Assumes, const CallInst *CI);
+
+/// Given a call to the intrinsic @llvm.type.checked.load, find all
+/// devirtualizable call sites based on the call and return them in DevirtCalls.
+void findDevirtualizableCallsForTypeCheckedLoad(
+    SmallVectorImpl<DevirtCallSite> &DevirtCalls,
+    SmallVectorImpl<Instruction *> &LoadedPtrs,
+    SmallVectorImpl<Instruction *> &Preds, bool &HasNonCallUses,
+    const CallInst *CI);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/Utils/Local.h b/linux-x64/clang/include/llvm/Analysis/Utils/Local.h
new file mode 100644
index 0000000..42a654d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/Utils/Local.h
@@ -0,0 +1,507 @@
+//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform various local transformations to the
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
+#define LLVM_ANALYSIS_UTILS_LOCAL_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+#include <limits>
+
+namespace llvm {
+
+class AllocaInst;
+class AssumptionCache;
+class BasicBlock;
+class BranchInst;
+class CallInst;
+class DbgInfoIntrinsic;
+class DbgValueInst;
+class DIBuilder;
+class Function;
+class Instruction;
+class LazyValueInfo;
+class LoadInst;
+class MDNode;
+class PHINode;
+class StoreInst;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+/// A set of parameters used to control the transforms in the SimplifyCFG pass.
+/// Options may change depending on the position in the optimization pipeline.
+/// For example, canonical form that includes switches and branches may later be
+/// replaced by lookup tables and selects.
+struct SimplifyCFGOptions {
+  int BonusInstThreshold;
+  bool ForwardSwitchCondToPhi;
+  bool ConvertSwitchToLookupTable;
+  bool NeedCanonicalLoop;
+  bool SinkCommonInsts;
+  AssumptionCache *AC;
+
+  SimplifyCFGOptions(unsigned BonusThreshold = 1,
+                     bool ForwardSwitchCond = false,
+                     bool SwitchToLookup = false, bool CanonicalLoops = true,
+                     bool SinkCommon = false,
+                     AssumptionCache *AssumpCache = nullptr)
+      : BonusInstThreshold(BonusThreshold),
+        ForwardSwitchCondToPhi(ForwardSwitchCond),
+        ConvertSwitchToLookupTable(SwitchToLookup),
+        NeedCanonicalLoop(CanonicalLoops),
+        SinkCommonInsts(SinkCommon),
+        AC(AssumpCache) {}
+
+  // Support 'builder' pattern to set members by name at construction time.
+  SimplifyCFGOptions &bonusInstThreshold(int I) {
+    BonusInstThreshold = I;
+    return *this;
+  }
+  SimplifyCFGOptions &forwardSwitchCondToPhi(bool B) {
+    ForwardSwitchCondToPhi = B;
+    return *this;
+  }
+  SimplifyCFGOptions &convertSwitchToLookupTable(bool B) {
+    ConvertSwitchToLookupTable = B;
+    return *this;
+  }
+  SimplifyCFGOptions &needCanonicalLoops(bool B) {
+    NeedCanonicalLoop = B;
+    return *this;
+  }
+  SimplifyCFGOptions &sinkCommonInsts(bool B) {
+    SinkCommonInsts = B;
+    return *this;
+  }
+  SimplifyCFGOptions &setAssumptionCache(AssumptionCache *Cache) {
+    AC = Cache;
+    return *this;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//  Local constant propagation.
+//
+
+/// If a terminator instruction is predicated on a constant value, convert it
+/// into an unconditional branch to the constant destination.
+/// This is a nontrivial operation because the successors of this basic block
+/// must have their PHI nodes updated.
+/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
+/// conditions and indirectbr addresses this might make dead if
+/// DeleteDeadConditions is true.
+bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
+                            const TargetLibraryInfo *TLI = nullptr,
+                            DeferredDominance *DDT = nullptr);
+
+//===----------------------------------------------------------------------===//
+//  Local dead code elimination.
+//
+
+/// Return true if the result produced by the instruction is not used, and the
+/// instruction has no side effects.
+bool isInstructionTriviallyDead(Instruction *I,
+                                const TargetLibraryInfo *TLI = nullptr);
+
+/// Return true if the result produced by the instruction would have no side
+/// effects if it was not used. This is equivalent to checking whether
+/// isInstructionTriviallyDead would be true if the use count was 0.
+bool wouldInstructionBeTriviallyDead(Instruction *I,
+                                     const TargetLibraryInfo *TLI = nullptr);
+
+/// If the specified value is a trivially dead instruction, delete it.
+/// If that makes any of its operands trivially dead, delete them too,
+/// recursively. Return true if any instructions were deleted.
+bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
+                                        const TargetLibraryInfo *TLI = nullptr);
+
+/// If the specified value is an effectively dead PHI node, due to being a
+/// def-use chain of single-use nodes that either forms a cycle or is terminated
+/// by a trivially dead instruction, delete it. If that makes any of its
+/// operands trivially dead, delete them too, recursively. Return true if a
+/// change was made.
+bool RecursivelyDeleteDeadPHINode(PHINode *PN,
+                                  const TargetLibraryInfo *TLI = nullptr);
+
+/// Scan the specified basic block and try to simplify any instructions in it
+/// and recursively delete dead instructions.
+///
+/// This returns true if it changed the code, note that it can delete
+/// instructions in other blocks as well in this block.
+bool SimplifyInstructionsInBlock(BasicBlock *BB,
+                                 const TargetLibraryInfo *TLI = nullptr);
+
+//===----------------------------------------------------------------------===//
+//  Control Flow Graph Restructuring.
+//
+
+/// Like BasicBlock::removePredecessor, this method is called when we're about
+/// to delete Pred as a predecessor of BB. If BB contains any PHI nodes, this
+/// drops the entries in the PHI nodes for Pred.
+///
+/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
+/// nodes that collapse into identity values.  For example, if we have:
+///   x = phi(1, 0, 0, 0)
+///   y = and x, z
+///
+/// .. and delete the predecessor corresponding to the '1', this will attempt to
+/// recursively fold the 'and' to 0.
+void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
+                                  DeferredDominance *DDT = nullptr);
+
+/// BB is a block with one predecessor and its predecessor is known to have one
+/// successor (BB!). Eliminate the edge between them, moving the instructions in
+/// the predecessor into BB. This deletes the predecessor block.
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr,
+                                 DeferredDominance *DDT = nullptr);
+
+/// BB is known to contain an unconditional branch, and contains no instructions
+/// other than PHI nodes, potential debug intrinsics and the branch. If
+/// possible, eliminate BB by rewriting all the predecessors to branch to the
+/// successor block and return true. If we can't transform, return false.
+bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
+                                             DeferredDominance *DDT = nullptr);
+
+/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
+/// to be clever about PHI nodes which differ only in the order of the incoming
+/// values, but instcombine orders them so it usually won't matter.
+bool EliminateDuplicatePHINodes(BasicBlock *BB);
+
+/// This function is used to do simplification of a CFG.  For example, it
+/// adjusts branches to branches to eliminate the extra hop, it eliminates
+/// unreachable basic blocks, and does other peephole optimization of the CFG.
+/// It returns true if a modification was made, possibly deleting the basic
+/// block that was pointed to. LoopHeaders is an optional input parameter
+/// providing the set of loop headers that SimplifyCFG should not eliminate.
+bool simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
+                 const SimplifyCFGOptions &Options = {},
+                 SmallPtrSetImpl<BasicBlock *> *LoopHeaders = nullptr);
+
+/// This function is used to flatten a CFG. For example, it uses parallel-and
+/// and parallel-or mode to collapse if-conditions and merge if-regions with
+/// identical statements.
+bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
+
+/// If this basic block is ONLY a setcc and a branch, and if a predecessor
+/// branches to us and one of our successors, fold the setcc into the
+/// predecessor and use logical operations to pick the right destination.
+bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
+
+/// This function takes a virtual register computed by an Instruction and
+/// replaces it with a slot in the stack frame, allocated via alloca.
+/// This allows the CFG to be changed around without fear of invalidating the
+/// SSA information for the value. It returns the pointer to the alloca inserted
+/// to create a stack slot for X.
+AllocaInst *DemoteRegToStack(Instruction &X,
+                             bool VolatileLoads = false,
+                             Instruction *AllocaPoint = nullptr);
+
+/// This function takes a virtual register computed by a phi node and replaces
+/// it with a slot in the stack frame, allocated via alloca. The phi node is
+/// deleted and it returns the pointer to the alloca inserted.
+AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
+
+/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
+/// the owning object can be modified and has an alignment less than \p
+/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
+/// cannot be increased, the known alignment of the value is returned.
+///
+/// It is not always possible to modify the alignment of the underlying object,
+/// so if alignment is important, a more reliable approach is to simply align
+/// all global variables and allocation instructions to their preferred
+/// alignment from the beginning.
+unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
+                                    const DataLayout &DL,
+                                    const Instruction *CxtI = nullptr,
+                                    AssumptionCache *AC = nullptr,
+                                    const DominatorTree *DT = nullptr);
+
+/// Try to infer an alignment for the specified pointer.
+inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
+                                  const Instruction *CxtI = nullptr,
+                                  AssumptionCache *AC = nullptr,
+                                  const DominatorTree *DT = nullptr) {
+  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
+}
+
+/// Given a getelementptr instruction/constantexpr, emit the code necessary to
+/// compute the offset from the base pointer (without adding in the base
+/// pointer). Return the result as a signed integer of intptr size.
+/// When NoAssumptions is true, no assumptions about index computation not
+/// overflowing is made.
+template <typename IRBuilderTy>
+Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
+                     bool NoAssumptions = false) {
+  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
+  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
+  Value *Result = Constant::getNullValue(IntPtrTy);
+
+  // If the GEP is inbounds, we know that none of the addressing operations will
+  // overflow in an unsigned sense.
+  bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
+
+  // Build a mask for high order bits.
+  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
+  uint64_t PtrSizeMask =
+      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
+
+  gep_type_iterator GTI = gep_type_begin(GEP);
+  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
+       ++i, ++GTI) {
+    Value *Op = *i;
+    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
+    if (Constant *OpC = dyn_cast<Constant>(Op)) {
+      if (OpC->isZeroValue())
+        continue;
+
+      // Handle a struct index, which adds its field offset to the pointer.
+      if (StructType *STy = GTI.getStructTypeOrNull()) {
+        if (OpC->getType()->isVectorTy())
+          OpC = OpC->getSplatValue();
+
+        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
+        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
+
+        if (Size)
+          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
+                                      GEP->getName()+".offs");
+        continue;
+      }
+
+      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
+      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
+      // Emit an add instruction.
+      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
+      continue;
+    }
+    // Convert to correct type.
+    if (Op->getType() != IntPtrTy)
+      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
+    if (Size != 1) {
+      // We'll let instcombine(mul) convert this to a shl if possible.
+      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
+                              GEP->getName()+".idx", isInBounds /*NUW*/);
+    }
+
+    // Emit an add instruction.
+    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
+  }
+  return Result;
+}
+
+///===---------------------------------------------------------------------===//
+///  Dbg Intrinsic utilities
+///
+
+/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
+/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
+                                     StoreInst *SI, DIBuilder &Builder);
+
+/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
+/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
+                                     LoadInst *LI, DIBuilder &Builder);
+
+/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
+/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
+                                     PHINode *LI, DIBuilder &Builder);
+
+/// Lowers llvm.dbg.declare intrinsics into appropriate set of
+/// llvm.dbg.value intrinsics.
+bool LowerDbgDeclare(Function &F);
+
+/// Propagate dbg.value intrinsics through the newly inserted PHIs.
+void insertDebugValuesForPHIs(BasicBlock *BB,
+                              SmallVectorImpl<PHINode *> &InsertedPHIs);
+
+/// Finds all intrinsics declaring local variables as living in the memory that
+/// 'V' points to. This may include a mix of dbg.declare and
+/// dbg.addr intrinsics.
+TinyPtrVector<DbgInfoIntrinsic *> FindDbgAddrUses(Value *V);
+
+/// Finds the llvm.dbg.value intrinsics describing a value.
+void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
+
+/// Finds the debug info intrinsics describing a value.
+void findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgInsts, Value *V);
+
+/// Replaces llvm.dbg.declare instruction when the address it
+/// describes is replaced with a new value. If Deref is true, an
+/// additional DW_OP_deref is prepended to the expression. If Offset
+/// is non-zero, a constant displacement is added to the expression
+/// (between the optional Deref operations). Offset can be negative.
+bool replaceDbgDeclare(Value *Address, Value *NewAddress,
+                       Instruction *InsertBefore, DIBuilder &Builder,
+                       bool DerefBefore, int Offset, bool DerefAfter);
+
+/// Replaces llvm.dbg.declare instruction when the alloca it describes
+/// is replaced with a new value. If Deref is true, an additional
+/// DW_OP_deref is prepended to the expression. If Offset is non-zero,
+/// a constant displacement is added to the expression (between the
+/// optional Deref operations). Offset can be negative. The new
+/// llvm.dbg.declare is inserted immediately before AI.
+bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
+                                DIBuilder &Builder, bool DerefBefore,
+                                int Offset, bool DerefAfter);
+
+/// Replaces multiple llvm.dbg.value instructions when the alloca it describes
+/// is replaced with a new value. If Offset is non-zero, a constant displacement
+/// is added to the expression (after the mandatory Deref). Offset can be
+/// negative. New llvm.dbg.value instructions are inserted at the locations of
+/// the instructions they replace.
+void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
+                              DIBuilder &Builder, int Offset = 0);
+
+/// Assuming the instruction \p I is going to be deleted, attempt to salvage any
+/// dbg.value intrinsics referring to \p I by rewriting its effect into a
+/// DIExpression.
+void salvageDebugInfo(Instruction &I);
+
+/// Remove all instructions from a basic block other than it's terminator
+/// and any present EH pad instructions.
+unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
+
+/// Insert an unreachable instruction before the specified
+/// instruction, making it and the rest of the code in the block dead.
+unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
+                             bool PreserveLCSSA = false,
+                             DeferredDominance *DDT = nullptr);
+
+/// Convert the CallInst to InvokeInst with the specified unwind edge basic
+/// block.  This also splits the basic block where CI is located, because
+/// InvokeInst is a terminator instruction.  Returns the newly split basic
+/// block.
+BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
+                                             BasicBlock *UnwindEdge);
+
+/// Replace 'BB's terminator with one that does not have an unwind successor
+/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
+/// successor.
+///
+/// \param BB  Block whose terminator will be replaced.  Its terminator must
+///            have an unwind successor.
+void removeUnwindEdge(BasicBlock *BB, DeferredDominance *DDT = nullptr);
+
+/// Remove all blocks that can not be reached from the function's entry.
+///
+/// Returns true if any basic block was removed.
+bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr,
+                             DeferredDominance *DDT = nullptr);
+
+/// Combine the metadata of two instructions so that K can replace J
+///
+/// Metadata not listed as known via KnownIDs is removed
+void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
+
+/// Combine the metadata of two instructions so that K can replace J. This
+/// specifically handles the case of CSE-like transformations.
+///
+/// Unknown metadata is removed.
+void combineMetadataForCSE(Instruction *K, const Instruction *J);
+
+// Replace each use of 'From' with 'To', if that use does not belong to basic
+// block where 'From' is defined. Returns the number of replacements made.
+unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);
+
+/// Replace each use of 'From' with 'To' if that use is dominated by
+/// the given edge.  Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+                                  const BasicBlockEdge &Edge);
+/// Replace each use of 'From' with 'To' if that use is dominated by
+/// the end of the given BasicBlock. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+                                  const BasicBlock *BB);
+
+/// Return true if the CallSite CS calls a gc leaf function.
+///
+/// A leaf function is a function that does not safepoint the thread during its
+/// execution.  During a call or invoke to such a function, the callers stack
+/// does not have to be made parseable.
+///
+/// Most passes can and should ignore this information, and it is only used
+/// during lowering by the GC infrastructure.
+bool callsGCLeafFunction(ImmutableCallSite CS, const TargetLibraryInfo &TLI);
+
+/// Copy a nonnull metadata node to a new load instruction.
+///
+/// This handles mapping it to range metadata if the new load is an integer
+/// load instead of a pointer load.
+void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI);
+
+/// Copy a range metadata node to a new load instruction.
+///
+/// This handles mapping it to nonnull metadata if the new load is a pointer
+/// load instead of an integer load and the range doesn't cover null.
+void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N,
+                       LoadInst &NewLI);
+
+//===----------------------------------------------------------------------===//
+//  Intrinsic pattern matching
+//
+
+/// Try to match a bswap or bitreverse idiom.
+///
+/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
+/// instructions are returned in \c InsertedInsts. They will all have been added
+/// to a basic block.
+///
+/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
+/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
+/// to BW / 4 nodes to be searched, so is significantly faster.
+///
+/// This function returns true on a successful match or false otherwise.
+bool recognizeBSwapOrBitReverseIdiom(
+    Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
+    SmallVectorImpl<Instruction *> &InsertedInsts);
+
+//===----------------------------------------------------------------------===//
+//  Sanitizer utilities
+//
+
+/// Given a CallInst, check if it calls a string function known to CodeGen,
+/// and mark it with NoBuiltin if so.  To be used by sanitizers that intend
+/// to intercept string functions and want to avoid converting them to target
+/// specific instructions.
+void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
+                                            const TargetLibraryInfo *TLI);
+
+//===----------------------------------------------------------------------===//
+//  Transform predicates
+//
+
+/// Given an instruction, is it legal to set operand OpIdx to a non-constant
+/// value?
+bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ValueLattice.h b/linux-x64/clang/include/llvm/Analysis/ValueLattice.h
new file mode 100644
index 0000000..c943fd1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ValueLattice.h
@@ -0,0 +1,321 @@
+//===- ValueLattice.h - Value constraint analysis ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_VALUELATTICE_H
+#define LLVM_ANALYSIS_VALUELATTICE_H
+
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+//
+//===----------------------------------------------------------------------===//
+//                               ValueLatticeElement
+//===----------------------------------------------------------------------===//
+
+/// This class represents lattice values for constants.
+///
+/// FIXME: This is basically just for bringup, this can be made a lot more rich
+/// in the future.
+///
+
+namespace llvm {
+class ValueLatticeElement {
+  enum ValueLatticeElementTy {
+    /// This Value has no known value yet.  As a result, this implies the
+    /// producing instruction is dead.  Caution: We use this as the starting
+    /// state in our local meet rules.  In this usage, it's taken to mean
+    /// "nothing known yet".
+    undefined,
+
+    /// This Value has a specific constant value.  (For constant integers,
+    /// constantrange is used instead.  Integer typed constantexprs can appear
+    /// as constant.)
+    constant,
+
+    /// This Value is known to not have the specified value.  (For constant
+    /// integers, constantrange is used instead.  As above, integer typed
+    /// constantexprs can appear here.)
+    notconstant,
+
+    /// The Value falls within this range. (Used only for integer typed values.)
+    constantrange,
+
+    /// We can not precisely model the dynamic values this value might take.
+    overdefined
+  };
+
+  ValueLatticeElementTy Tag;
+
+  /// The union either stores a pointer to a constant or a constant range,
+  /// associated to the lattice element. We have to ensure that Range is
+  /// initialized or destroyed when changing state to or from constantrange.
+  union {
+    Constant *ConstVal;
+    ConstantRange Range;
+  };
+
+public:
+  // Const and Range are initialized on-demand.
+  ValueLatticeElement() : Tag(undefined) {}
+
+  /// Custom destructor to ensure Range is properly destroyed, when the object
+  /// is deallocated.
+  ~ValueLatticeElement() {
+    switch (Tag) {
+    case overdefined:
+    case undefined:
+    case constant:
+    case notconstant:
+      break;
+    case constantrange:
+      Range.~ConstantRange();
+      break;
+    };
+  }
+
+  /// Custom copy constructor, to ensure Range gets initialized when
+  /// copying a constant range lattice element.
+  ValueLatticeElement(const ValueLatticeElement &Other) : Tag(undefined) {
+    *this = Other;
+  }
+
+  /// Custom assignment operator, to ensure Range gets initialized when
+  /// assigning a constant range lattice element.
+  ValueLatticeElement &operator=(const ValueLatticeElement &Other) {
+    // If we change the state of this from constant range to non constant range,
+    // destroy Range.
+    if (isConstantRange() && !Other.isConstantRange())
+      Range.~ConstantRange();
+
+    // If we change the state of this from a valid ConstVal to another a state
+    // without a valid ConstVal, zero the pointer.
+    if ((isConstant() || isNotConstant()) && !Other.isConstant() &&
+        !Other.isNotConstant())
+      ConstVal = nullptr;
+
+    switch (Other.Tag) {
+    case constantrange:
+      if (!isConstantRange())
+        new (&Range) ConstantRange(Other.Range);
+      else
+        Range = Other.Range;
+      break;
+    case constant:
+    case notconstant:
+      ConstVal = Other.ConstVal;
+      break;
+    case overdefined:
+    case undefined:
+      break;
+    }
+    Tag = Other.Tag;
+    return *this;
+  }
+
+  static ValueLatticeElement get(Constant *C) {
+    ValueLatticeElement Res;
+    if (!isa<UndefValue>(C))
+      Res.markConstant(C);
+    return Res;
+  }
+  static ValueLatticeElement getNot(Constant *C) {
+    ValueLatticeElement Res;
+    if (!isa<UndefValue>(C))
+      Res.markNotConstant(C);
+    return Res;
+  }
+  static ValueLatticeElement getRange(ConstantRange CR) {
+    ValueLatticeElement Res;
+    Res.markConstantRange(std::move(CR));
+    return Res;
+  }
+  static ValueLatticeElement getOverdefined() {
+    ValueLatticeElement Res;
+    Res.markOverdefined();
+    return Res;
+  }
+
+  bool isUndefined() const { return Tag == undefined; }
+  bool isConstant() const { return Tag == constant; }
+  bool isNotConstant() const { return Tag == notconstant; }
+  bool isConstantRange() const { return Tag == constantrange; }
+  bool isOverdefined() const { return Tag == overdefined; }
+
+  Constant *getConstant() const {
+    assert(isConstant() && "Cannot get the constant of a non-constant!");
+    return ConstVal;
+  }
+
+  Constant *getNotConstant() const {
+    assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
+    return ConstVal;
+  }
+
+  const ConstantRange &getConstantRange() const {
+    assert(isConstantRange() &&
+           "Cannot get the constant-range of a non-constant-range!");
+    return Range;
+  }
+
+  Optional<APInt> asConstantInteger() const {
+    if (isConstant() && isa<ConstantInt>(getConstant())) {
+      return cast<ConstantInt>(getConstant())->getValue();
+    } else if (isConstantRange() && getConstantRange().isSingleElement()) {
+      return *getConstantRange().getSingleElement();
+    }
+    return None;
+  }
+
+private:
+  void markOverdefined() {
+    if (isOverdefined())
+      return;
+    if (isConstant() || isNotConstant())
+      ConstVal = nullptr;
+    if (isConstantRange())
+      Range.~ConstantRange();
+    Tag = overdefined;
+  }
+
+  void markConstant(Constant *V) {
+    assert(V && "Marking constant with NULL");
+    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+      markConstantRange(ConstantRange(CI->getValue()));
+      return;
+    }
+    if (isa<UndefValue>(V))
+      return;
+
+    assert((!isConstant() || getConstant() == V) &&
+           "Marking constant with different value");
+    assert(isUndefined());
+    Tag = constant;
+    ConstVal = V;
+  }
+
+  void markNotConstant(Constant *V) {
+    assert(V && "Marking constant with NULL");
+    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+      markConstantRange(ConstantRange(CI->getValue() + 1, CI->getValue()));
+      return;
+    }
+    if (isa<UndefValue>(V))
+      return;
+
+    assert((!isConstant() || getConstant() != V) &&
+           "Marking constant !constant with same value");
+    assert((!isNotConstant() || getNotConstant() == V) &&
+           "Marking !constant with different value");
+    assert(isUndefined() || isConstant());
+    Tag = notconstant;
+    ConstVal = V;
+  }
+
+  void markConstantRange(ConstantRange NewR) {
+    if (isConstantRange()) {
+      if (NewR.isEmptySet())
+        markOverdefined();
+      else {
+        Range = std::move(NewR);
+      }
+      return;
+    }
+
+    assert(isUndefined());
+    if (NewR.isEmptySet())
+      markOverdefined();
+    else {
+      Tag = constantrange;
+      new (&Range) ConstantRange(std::move(NewR));
+    }
+  }
+
+public:
+  /// Updates this object to approximate both this object and RHS. Returns
+  /// true if this object has been changed.
+  bool mergeIn(const ValueLatticeElement &RHS, const DataLayout &DL) {
+    if (RHS.isUndefined() || isOverdefined())
+      return false;
+    if (RHS.isOverdefined()) {
+      markOverdefined();
+      return true;
+    }
+
+    if (isUndefined()) {
+      *this = RHS;
+      return !RHS.isUndefined();
+    }
+
+    if (isConstant()) {
+      if (RHS.isConstant() && getConstant() == RHS.getConstant())
+        return false;
+      markOverdefined();
+      return true;
+    }
+
+    if (isNotConstant()) {
+      if (RHS.isNotConstant() && getNotConstant() == RHS.getNotConstant())
+        return false;
+      markOverdefined();
+      return true;
+    }
+
+    assert(isConstantRange() && "New ValueLattice type?");
+    if (!RHS.isConstantRange()) {
+      // We can get here if we've encountered a constantexpr of integer type
+      // and merge it with a constantrange.
+      markOverdefined();
+      return true;
+    }
+    ConstantRange NewR = getConstantRange().unionWith(RHS.getConstantRange());
+    if (NewR.isFullSet())
+      markOverdefined();
+    else
+      markConstantRange(std::move(NewR));
+    return true;
+  }
+
+  ConstantInt *getConstantInt() const {
+    assert(isConstant() && isa<ConstantInt>(getConstant()) &&
+           "No integer constant");
+    return cast<ConstantInt>(getConstant());
+  }
+
+  /// Compares this symbolic value with Other using Pred and returns either
+  /// true, false or undef constants, or nullptr if the comparison cannot be
+  /// evaluated.
+  Constant *getCompare(CmpInst::Predicate Pred, Type *Ty,
+                       const ValueLatticeElement &Other) const {
+    if (isUndefined() || Other.isUndefined())
+      return UndefValue::get(Ty);
+
+    if (isConstant() && Other.isConstant())
+      return ConstantExpr::getCompare(Pred, getConstant(), Other.getConstant());
+
+    // Integer constants are represented as ConstantRanges with single
+    // elements.
+    if (!isConstantRange() || !Other.isConstantRange())
+      return nullptr;
+
+    const auto &CR = getConstantRange();
+    const auto &OtherCR = Other.getConstantRange();
+    if (ConstantRange::makeSatisfyingICmpRegion(Pred, OtherCR).contains(CR))
+      return ConstantInt::getTrue(Ty);
+    if (ConstantRange::makeSatisfyingICmpRegion(
+            CmpInst::getInversePredicate(Pred), OtherCR)
+            .contains(CR))
+      return ConstantInt::getFalse(Ty);
+
+    return nullptr;
+  }
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ValueLatticeElement &Val);
+
+} // end namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ValueLatticeUtils.h b/linux-x64/clang/include/llvm/Analysis/ValueLatticeUtils.h
new file mode 100644
index 0000000..0207267
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ValueLatticeUtils.h
@@ -0,0 +1,41 @@
+//===-- ValueLatticeUtils.h - Utils for solving lattices --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares common functions useful for performing data-flow analyses
+// that propagate values across function boundaries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_VALUELATTICEUTILS_H
+#define LLVM_ANALYSIS_VALUELATTICEUTILS_H
+
+namespace llvm {
+
+class Function;
+class GlobalVariable;
+
+/// Determine if the values of the given function's arguments can be tracked
+/// interprocedurally. The value of an argument can be tracked if the function
+/// has local linkage and its address is not taken.
+bool canTrackArgumentsInterprocedurally(Function *F);
+
+/// Determine if the values of the given function's returns can be tracked
+/// interprocedurally. Return values can be tracked if the function has an
+/// exact definition and it doesn't have the "naked" attribute. Naked functions
+/// may contain assembly code that returns untrackable values.
+bool canTrackReturnsInterprocedurally(Function *F);
+
+/// Determine if the value maintained in the given global variable can be
+/// tracked interprocedurally. A value can be tracked if the global variable
+/// has local linkage and is only used by non-volatile loads and stores.
+bool canTrackGlobalVariableInterprocedurally(GlobalVariable *GV);
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_VALUELATTICEUTILS_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ValueTracking.h b/linux-x64/clang/include/llvm/Analysis/ValueTracking.h
new file mode 100644
index 0000000..ced95df
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/ValueTracking.h
@@ -0,0 +1,559 @@
+//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains routines that help analyze properties that chains of
+// computations have.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_VALUETRACKING_H
+#define LLVM_ANALYSIS_VALUETRACKING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Intrinsics.h"
+#include <cassert>
+#include <cstdint>
+
+namespace llvm {
+
+class AddOperator;
+class APInt;
+class AssumptionCache;
+class DataLayout;
+class DominatorTree;
+class GEPOperator;
+class IntrinsicInst;
+struct KnownBits;
+class Loop;
+class LoopInfo;
+class MDNode;
+class OptimizationRemarkEmitter;
+class StringRef;
+class TargetLibraryInfo;
+class Value;
+
+  /// Determine which bits of V are known to be either zero or one and return
+  /// them in the KnownZero/KnownOne bit sets.
+  ///
+  /// This function is defined on values with integer type, values with pointer
+  /// type, and vectors of integers.  In the case
+  /// where V is a vector, the known zero and known one values are the
+  /// same width as the vector element, and the bit is set only if it is true
+  /// for all of the elements in the vector.
+  void computeKnownBits(const Value *V, KnownBits &Known,
+                        const DataLayout &DL, unsigned Depth = 0,
+                        AssumptionCache *AC = nullptr,
+                        const Instruction *CxtI = nullptr,
+                        const DominatorTree *DT = nullptr,
+                        OptimizationRemarkEmitter *ORE = nullptr);
+
+  /// Returns the known bits rather than passing by reference.
+  KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
+                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
+                             const Instruction *CxtI = nullptr,
+                             const DominatorTree *DT = nullptr,
+                             OptimizationRemarkEmitter *ORE = nullptr);
+
+  /// Compute known bits from the range metadata.
+  /// \p KnownZero the set of bits that are known to be zero
+  /// \p KnownOne the set of bits that are known to be one
+  void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
+                                         KnownBits &Known);
+
+  /// Return true if LHS and RHS have no common bits set.
+  bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
+                           const DataLayout &DL,
+                           AssumptionCache *AC = nullptr,
+                           const Instruction *CxtI = nullptr,
+                           const DominatorTree *DT = nullptr);
+
+  /// Return true if the given value is known to have exactly one bit set when
+  /// defined. For vectors return true if every element is known to be a power
+  /// of two when defined. Supports values with integer or pointer type and
+  /// vectors of integers. If 'OrZero' is set, then return true if the given
+  /// value is either a power of two or zero.
+  bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
+                              bool OrZero = false, unsigned Depth = 0,
+                              AssumptionCache *AC = nullptr,
+                              const Instruction *CxtI = nullptr,
+                              const DominatorTree *DT = nullptr);
+
+  bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
+
+  /// Return true if the given value is known to be non-zero when defined. For
+  /// vectors, return true if every element is known to be non-zero when
+  /// defined. For pointers, if the context instruction and dominator tree are
+  /// specified, perform context-sensitive analysis and return true if the
+  /// pointer couldn't possibly be null at the specified instruction.
+  /// Supports values with integer or pointer type and vectors of integers.
+  bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
+                      AssumptionCache *AC = nullptr,
+                      const Instruction *CxtI = nullptr,
+                      const DominatorTree *DT = nullptr);
+
+  /// Returns true if the give value is known to be non-negative.
+  bool isKnownNonNegative(const Value *V, const DataLayout &DL,
+                          unsigned Depth = 0,
+                          AssumptionCache *AC = nullptr,
+                          const Instruction *CxtI = nullptr,
+                          const DominatorTree *DT = nullptr);
+
+  /// Returns true if the given value is known be positive (i.e. non-negative
+  /// and non-zero).
+  bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
+                       AssumptionCache *AC = nullptr,
+                       const Instruction *CxtI = nullptr,
+                       const DominatorTree *DT = nullptr);
+
+  /// Returns true if the given value is known be negative (i.e. non-positive
+  /// and non-zero).
+  bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
+                       AssumptionCache *AC = nullptr,
+                       const Instruction *CxtI = nullptr,
+                       const DominatorTree *DT = nullptr);
+
+  /// Return true if the given values are known to be non-equal when defined.
+  /// Supports scalar integer types only.
+  bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
+                      AssumptionCache *AC = nullptr,
+                      const Instruction *CxtI = nullptr,
+                      const DominatorTree *DT = nullptr);
+
+  /// Return true if 'V & Mask' is known to be zero. We use this predicate to
+  /// simplify operations downstream. Mask is known to be zero for bits that V
+  /// cannot have.
+  ///
+  /// This function is defined on values with integer type, values with pointer
+  /// type, and vectors of integers.  In the case
+  /// where V is a vector, the mask, known zero, and known one values are the
+  /// same width as the vector element, and the bit is set only if it is true
+  /// for all of the elements in the vector.
+  bool MaskedValueIsZero(const Value *V, const APInt &Mask,
+                         const DataLayout &DL,
+                         unsigned Depth = 0, AssumptionCache *AC = nullptr,
+                         const Instruction *CxtI = nullptr,
+                         const DominatorTree *DT = nullptr);
+
+  /// Return the number of times the sign bit of the register is replicated into
+  /// the other bits. We know that at least 1 bit is always equal to the sign
+  /// bit (itself), but other cases can give us information. For example,
+  /// immediately after an "ashr X, 2", we know that the top 3 bits are all
+  /// equal to each other, so we return 3. For vectors, return the number of
+  /// sign bits for the vector element with the mininum number of known sign
+  /// bits.
+  unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
+                              unsigned Depth = 0, AssumptionCache *AC = nullptr,
+                              const Instruction *CxtI = nullptr,
+                              const DominatorTree *DT = nullptr);
+
+  /// This function computes the integer multiple of Base that equals V. If
+  /// successful, it returns true and returns the multiple in Multiple. If
+  /// unsuccessful, it returns false. Also, if V can be simplified to an
+  /// integer, then the simplified V is returned in Val. Look through sext only
+  /// if LookThroughSExt=true.
+  bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
+                       bool LookThroughSExt = false,
+                       unsigned Depth = 0);
+
+  /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
+  /// intrinsics are treated as-if they were intrinsics.
+  Intrinsic::ID getIntrinsicForCallSite(ImmutableCallSite ICS,
+                                        const TargetLibraryInfo *TLI);
+
+  /// Return true if we can prove that the specified FP value is never equal to
+  /// -0.0.
+  bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
+                            unsigned Depth = 0);
+
+  /// Return true if we can prove that the specified FP value is either NaN or
+  /// never less than -0.0.
+  ///
+  ///      NaN --> true
+  ///       +0 --> true
+  ///       -0 --> true
+  ///   x > +0 --> true
+  ///   x < -0 --> false
+  bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
+
+  /// Return true if the floating-point scalar value is not a NaN or if the
+  /// floating-point vector value has no NaN elements. Return false if a value
+  /// could ever be NaN.
+  bool isKnownNeverNaN(const Value *V);
+
+  /// Return true if we can prove that the specified FP value's sign bit is 0.
+  ///
+  ///      NaN --> true/false (depending on the NaN's sign bit)
+  ///       +0 --> true
+  ///       -0 --> false
+  ///   x > +0 --> true
+  ///   x < -0 --> false
+  bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
+
+  /// If the specified value can be set by repeating the same byte in memory,
+  /// return the i8 value that it is represented with. This is true for all i8
+  /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
+  /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
+  /// i16 0x1234), return null.
+  Value *isBytewiseValue(Value *V);
+
+  /// Given an aggregrate and an sequence of indices, see if the scalar value
+  /// indexed is already around as a register, for example if it were inserted
+  /// directly into the aggregrate.
+  ///
+  /// If InsertBefore is not null, this function will duplicate (modified)
+  /// insertvalues when a part of a nested struct is extracted.
+  Value *FindInsertedValue(Value *V,
+                           ArrayRef<unsigned> idx_range,
+                           Instruction *InsertBefore = nullptr);
+
+  /// Analyze the specified pointer to see if it can be expressed as a base
+  /// pointer plus a constant offset. Return the base and offset to the caller.
+  Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
+                                          const DataLayout &DL);
+  inline const Value *GetPointerBaseWithConstantOffset(const Value *Ptr,
+                                                       int64_t &Offset,
+                                                       const DataLayout &DL) {
+    return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
+                                            DL);
+  }
+
+  /// Returns true if the GEP is based on a pointer to a string (array of
+  // \p CharSize integers) and is indexing into this string.
+  bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
+                                   unsigned CharSize = 8);
+
+  /// Represents offset+length into a ConstantDataArray.
+  struct ConstantDataArraySlice {
+    /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
+    /// initializer, it just doesn't fit the ConstantDataArray interface).
+    const ConstantDataArray *Array;
+
+    /// Slice starts at this Offset.
+    uint64_t Offset;
+
+    /// Length of the slice.
+    uint64_t Length;
+
+    /// Moves the Offset and adjusts Length accordingly.
+    void move(uint64_t Delta) {
+      assert(Delta < Length);
+      Offset += Delta;
+      Length -= Delta;
+    }
+
+    /// Convenience accessor for elements in the slice.
+    uint64_t operator[](unsigned I) const {
+      return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset);
+    }
+  };
+
+  /// Returns true if the value \p V is a pointer into a ConstantDataArray.
+  /// If successful \p Slice will point to a ConstantDataArray info object
+  /// with an appropriate offset.
+  bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
+                                unsigned ElementSize, uint64_t Offset = 0);
+
+  /// This function computes the length of a null-terminated C string pointed to
+  /// by V. If successful, it returns true and returns the string in Str. If
+  /// unsuccessful, it returns false. This does not include the trailing null
+  /// character by default. If TrimAtNul is set to false, then this returns any
+  /// trailing null characters as well as any other characters that come after
+  /// it.
+  bool getConstantStringInfo(const Value *V, StringRef &Str,
+                             uint64_t Offset = 0, bool TrimAtNul = true);
+
+  /// If we can compute the length of the string pointed to by the specified
+  /// pointer, return 'len+1'.  If we can't, return 0.
+  uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
+
+  /// This method strips off any GEP address adjustments and pointer casts from
+  /// the specified value, returning the original object being addressed. Note
+  /// that the returned value has pointer type if the specified value does. If
+  /// the MaxLookup value is non-zero, it limits the number of instructions to
+  /// be stripped off.
+  Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
+                             unsigned MaxLookup = 6);
+  inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL,
+                                          unsigned MaxLookup = 6) {
+    return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
+  }
+
+  /// \brief This method is similar to GetUnderlyingObject except that it can
+  /// look through phi and select instructions and return multiple objects.
+  ///
+  /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
+  /// accesses different objects in each iteration, we don't look through the
+  /// phi node. E.g. consider this loop nest:
+  ///
+  ///   int **A;
+  ///   for (i)
+  ///     for (j) {
+  ///        A[i][j] = A[i-1][j] * B[j]
+  ///     }
+  ///
+  /// This is transformed by Load-PRE to stash away A[i] for the next iteration
+  /// of the outer loop:
+  ///
+  ///   Curr = A[0];          // Prev_0
+  ///   for (i: 1..N) {
+  ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
+  ///     Curr = A[i];
+  ///     for (j: 0..N) {
+  ///        Curr[j] = Prev[j] * B[j]
+  ///     }
+  ///   }
+  ///
+  /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
+  /// should not assume that Curr and Prev share the same underlying object thus
+  /// it shouldn't look through the phi above.
+  void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
+                            const DataLayout &DL, LoopInfo *LI = nullptr,
+                            unsigned MaxLookup = 6);
+
+  /// This is a wrapper around GetUnderlyingObjects and adds support for basic
+  /// ptrtoint+arithmetic+inttoptr sequences.
+  bool getUnderlyingObjectsForCodeGen(const Value *V,
+                            SmallVectorImpl<Value *> &Objects,
+                            const DataLayout &DL);
+
+  /// Return true if the only users of this pointer are lifetime markers.
+  bool onlyUsedByLifetimeMarkers(const Value *V);
+
+  /// Return true if the instruction does not have any effects besides
+  /// calculating the result and does not have undefined behavior.
+  ///
+  /// This method never returns true for an instruction that returns true for
+  /// mayHaveSideEffects; however, this method also does some other checks in
+  /// addition. It checks for undefined behavior, like dividing by zero or
+  /// loading from an invalid pointer (but not for undefined results, like a
+  /// shift with a shift amount larger than the width of the result). It checks
+  /// for malloc and alloca because speculatively executing them might cause a
+  /// memory leak. It also returns false for instructions related to control
+  /// flow, specifically terminators and PHI nodes.
+  ///
+  /// If the CtxI is specified this method performs context-sensitive analysis
+  /// and returns true if it is safe to execute the instruction immediately
+  /// before the CtxI.
+  ///
+  /// If the CtxI is NOT specified this method only looks at the instruction
+  /// itself and its operands, so if this method returns true, it is safe to
+  /// move the instruction as long as the correct dominance relationships for
+  /// the operands and users hold.
+  ///
+  /// This method can return true for instructions that read memory;
+  /// for such instructions, moving them may change the resulting value.
+  bool isSafeToSpeculativelyExecute(const Value *V,
+                                    const Instruction *CtxI = nullptr,
+                                    const DominatorTree *DT = nullptr);
+
+  /// Returns true if the result or effects of the given instructions \p I
+  /// depend on or influence global memory.
+  /// Memory dependence arises for example if the instruction reads from
+  /// memory or may produce effects or undefined behaviour. Memory dependent
+  /// instructions generally cannot be reorderd with respect to other memory
+  /// dependent instructions or moved into non-dominated basic blocks.
+  /// Instructions which just compute a value based on the values of their
+  /// operands are not memory dependent.
+  bool mayBeMemoryDependent(const Instruction &I);
+
+  /// Return true if it is an intrinsic that cannot be speculated but also
+  /// cannot trap.
+  bool isAssumeLikeIntrinsic(const Instruction *I);
+
+  /// Return true if it is valid to use the assumptions provided by an
+  /// assume intrinsic, I, at the point in the control-flow identified by the
+  /// context instruction, CxtI.
+  bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
+                               const DominatorTree *DT = nullptr);
+
+  enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
+
+  OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
+                                               const Value *RHS,
+                                               const DataLayout &DL,
+                                               AssumptionCache *AC,
+                                               const Instruction *CxtI,
+                                               const DominatorTree *DT);
+  OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
+                                               const Value *RHS,
+                                               const DataLayout &DL,
+                                               AssumptionCache *AC,
+                                               const Instruction *CxtI,
+                                               const DominatorTree *DT);
+  OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
+                                             const DataLayout &DL,
+                                             AssumptionCache *AC = nullptr,
+                                             const Instruction *CxtI = nullptr,
+                                             const DominatorTree *DT = nullptr);
+  /// This version also leverages the sign bit of Add if known.
+  OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
+                                             const DataLayout &DL,
+                                             AssumptionCache *AC = nullptr,
+                                             const Instruction *CxtI = nullptr,
+                                             const DominatorTree *DT = nullptr);
+
+  /// Returns true if the arithmetic part of the \p II 's result is
+  /// used only along the paths control dependent on the computation
+  /// not overflowing, \p II being an <op>.with.overflow intrinsic.
+  bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
+                                 const DominatorTree &DT);
+
+  /// Return true if this function can prove that the instruction I will
+  /// always transfer execution to one of its successors (including the next
+  /// instruction that follows within a basic block). E.g. this is not
+  /// guaranteed for function calls that could loop infinitely.
+  ///
+  /// In other words, this function returns false for instructions that may
+  /// transfer execution or fail to transfer execution in a way that is not
+  /// captured in the CFG nor in the sequence of instructions within a basic
+  /// block.
+  ///
+  /// Undefined behavior is assumed not to happen, so e.g. division is
+  /// guaranteed to transfer execution to the following instruction even
+  /// though division by zero might cause undefined behavior.
+  bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
+
+  /// Returns true if this block does not contain a potential implicit exit.
+  /// This is equivelent to saying that all instructions within the basic block
+  /// are guaranteed to transfer execution to their successor within the basic
+  /// block. This has the same assumptions w.r.t. undefined behavior as the
+  /// instruction variant of this function. 
+  bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
+
+  /// Return true if this function can prove that the instruction I
+  /// is executed for every iteration of the loop L.
+  ///
+  /// Note that this currently only considers the loop header.
+  bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
+                                              const Loop *L);
+
+  /// Return true if this function can prove that I is guaranteed to yield
+  /// full-poison (all bits poison) if at least one of its operands are
+  /// full-poison (all bits poison).
+  ///
+  /// The exact rules for how poison propagates through instructions have
+  /// not been settled as of 2015-07-10, so this function is conservative
+  /// and only considers poison to be propagated in uncontroversial
+  /// cases. There is no attempt to track values that may be only partially
+  /// poison.
+  bool propagatesFullPoison(const Instruction *I);
+
+  /// Return either nullptr or an operand of I such that I will trigger
+  /// undefined behavior if I is executed and that operand has a full-poison
+  /// value (all bits poison).
+  const Value *getGuaranteedNonFullPoisonOp(const Instruction *I);
+
+  /// Return true if this function can prove that if PoisonI is executed
+  /// and yields a full-poison value (all bits poison), then that will
+  /// trigger undefined behavior.
+  ///
+  /// Note that this currently only considers the basic block that is
+  /// the parent of I.
+  bool programUndefinedIfFullPoison(const Instruction *PoisonI);
+
+  /// \brief Specific patterns of select instructions we can match.
+  enum SelectPatternFlavor {
+    SPF_UNKNOWN = 0,
+    SPF_SMIN,                   /// Signed minimum
+    SPF_UMIN,                   /// Unsigned minimum
+    SPF_SMAX,                   /// Signed maximum
+    SPF_UMAX,                   /// Unsigned maximum
+    SPF_FMINNUM,                /// Floating point minnum
+    SPF_FMAXNUM,                /// Floating point maxnum
+    SPF_ABS,                    /// Absolute value
+    SPF_NABS                    /// Negated absolute value
+  };
+
+  /// \brief Behavior when a floating point min/max is given one NaN and one
+  /// non-NaN as input.
+  enum SelectPatternNaNBehavior {
+    SPNB_NA = 0,                /// NaN behavior not applicable.
+    SPNB_RETURNS_NAN,           /// Given one NaN input, returns the NaN.
+    SPNB_RETURNS_OTHER,         /// Given one NaN input, returns the non-NaN.
+    SPNB_RETURNS_ANY            /// Given one NaN input, can return either (or
+                                /// it has been determined that no operands can
+                                /// be NaN).
+  };
+
+  struct SelectPatternResult {
+    SelectPatternFlavor Flavor;
+    SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
+                                          /// SPF_FMINNUM or SPF_FMAXNUM.
+    bool Ordered;               /// When implementing this min/max pattern as
+                                /// fcmp; select, does the fcmp have to be
+                                /// ordered?
+
+    /// Return true if \p SPF is a min or a max pattern.
+    static bool isMinOrMax(SelectPatternFlavor SPF) {
+      return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
+    }
+  };
+
+  /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
+  /// and providing the out parameter results if we successfully match.
+  ///
+  /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
+  /// not match that of the original select. If this is the case, the cast
+  /// operation (one of Trunc,SExt,Zext) that must be done to transform the
+  /// type of LHS and RHS into the type of V is returned in CastOp.
+  ///
+  /// For example:
+  ///   %1 = icmp slt i32 %a, i32 4
+  ///   %2 = sext i32 %a to i64
+  ///   %3 = select i1 %1, i64 %2, i64 4
+  ///
+  /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
+  ///
+  SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
+                                         Instruction::CastOps *CastOp = nullptr,
+                                         unsigned Depth = 0);
+  inline SelectPatternResult
+  matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
+                     Instruction::CastOps *CastOp = nullptr) {
+    Value *L = const_cast<Value*>(LHS);
+    Value *R = const_cast<Value*>(RHS);
+    auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
+    LHS = L;
+    RHS = R;
+    return Result;
+  }
+
+  /// Return the canonical comparison predicate for the specified
+  /// minimum/maximum flavor.
+  CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
+                                   bool Ordered = false);
+
+  /// Return the inverse minimum/maximum flavor of the specified flavor.
+  /// For example, signed minimum is the inverse of signed maximum.
+  SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
+
+  /// Return the canonical inverse comparison predicate for the specified
+  /// minimum/maximum flavor.
+  CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
+
+  /// Return true if RHS is known to be implied true by LHS.  Return false if
+  /// RHS is known to be implied false by LHS.  Otherwise, return None if no
+  /// implication can be made.
+  /// A & B must be i1 (boolean) values or a vector of such values. Note that
+  /// the truth table for implication is the same as <=u on i1 values (but not
+  /// <=s!).  The truth table for both is:
+  ///    | T | F (B)
+  ///  T | T | F
+  ///  F | T | T
+  /// (A)
+  Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
+                                    const DataLayout &DL, bool LHSIsTrue = true,
+                                    unsigned Depth = 0);
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_VALUETRACKING_H
diff --git a/linux-x64/clang/include/llvm/Analysis/VectorUtils.h b/linux-x64/clang/include/llvm/Analysis/VectorUtils.h
new file mode 100644
index 0000000..6315e84
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/VectorUtils.h
@@ -0,0 +1,181 @@
+//===- llvm/Analysis/VectorUtils.h - Vector utilities -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some vectorizer utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_VECTORUTILS_H
+#define LLVM_ANALYSIS_VECTORUTILS_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class DemandedBits;
+class GetElementPtrInst;
+class Loop;
+class ScalarEvolution;
+class TargetTransformInfo;
+class Type;
+class Value;
+
+namespace Intrinsic {
+enum ID : unsigned;
+}
+
+/// \brief Identify if the intrinsic is trivially vectorizable.
+/// This method returns true if the intrinsic's argument types are all
+/// scalars for the scalar form of the intrinsic and all vectors for
+/// the vector form of the intrinsic.
+bool isTriviallyVectorizable(Intrinsic::ID ID);
+
+/// \brief Identifies if the intrinsic has a scalar operand. It checks for
+/// ctlz,cttz and powi special intrinsics whose argument is scalar.
+bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
+
+/// \brief Returns intrinsic ID for call.
+/// For the input call instruction it finds mapping intrinsic and returns
+/// its intrinsic ID, in case it does not found it return not_intrinsic.
+Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
+                                          const TargetLibraryInfo *TLI);
+
+/// \brief Find the operand of the GEP that should be checked for consecutive
+/// stores. This ignores trailing indices that have no effect on the final
+/// pointer.
+unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
+
+/// \brief If the argument is a GEP, then returns the operand identified by
+/// getGEPInductionOperand. However, if there is some other non-loop-invariant
+/// operand, it returns that instead.
+Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
+
+/// \brief If a value has only one user that is a CastInst, return it.
+Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
+
+/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
+/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
+Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
+
+/// \brief Given a vector and an element number, see if the scalar value is
+/// already around as a register, for example if it were inserted then extracted
+/// from the vector.
+Value *findScalarElement(Value *V, unsigned EltNo);
+
+/// \brief Get splat value if the input is a splat vector or return nullptr.
+/// The value may be extracted from a splat constants vector or from
+/// a sequence of instructions that broadcast a single value into a vector.
+const Value *getSplatValue(const Value *V);
+
+/// \brief Compute a map of integer instructions to their minimum legal type
+/// size.
+///
+/// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
+/// type (e.g. i32) whenever arithmetic is performed on them.
+///
+/// For targets with native i8 or i16 operations, usually InstCombine can shrink
+/// the arithmetic type down again. However InstCombine refuses to create
+/// illegal types, so for targets without i8 or i16 registers, the lengthening
+/// and shrinking remains.
+///
+/// Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
+/// their scalar equivalents do not, so during vectorization it is important to
+/// remove these lengthens and truncates when deciding the profitability of
+/// vectorization.
+///
+/// This function analyzes the given range of instructions and determines the
+/// minimum type size each can be converted to. It attempts to remove or
+/// minimize type size changes across each def-use chain, so for example in the
+/// following code:
+///
+///   %1 = load i8, i8*
+///   %2 = add i8 %1, 2
+///   %3 = load i16, i16*
+///   %4 = zext i8 %2 to i32
+///   %5 = zext i16 %3 to i32
+///   %6 = add i32 %4, %5
+///   %7 = trunc i32 %6 to i16
+///
+/// Instruction %6 must be done at least in i16, so computeMinimumValueSizes
+/// will return: {%1: 16, %2: 16, %3: 16, %4: 16, %5: 16, %6: 16, %7: 16}.
+///
+/// If the optional TargetTransformInfo is provided, this function tries harder
+/// to do less work by only looking at illegal types.
+MapVector<Instruction*, uint64_t>
+computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
+                         DemandedBits &DB,
+                         const TargetTransformInfo *TTI=nullptr);
+
+/// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
+/// MD_nontemporal].  For K in Kinds, we get the MDNode for K from each of the
+/// elements of VL, compute their "intersection" (i.e., the most generic
+/// metadata value that covers all of the individual values), and set I's
+/// metadata for M equal to the intersection value.
+///
+/// This function always sets a (possibly null) value for each K in Kinds.
+Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
+
+/// \brief Create an interleave shuffle mask.
+///
+/// This function creates a shuffle mask for interleaving \p NumVecs vectors of
+/// vectorization factor \p VF into a single wide vector. The mask is of the
+/// form:
+///
+///   <0, VF, VF * 2, ..., VF * (NumVecs - 1), 1, VF + 1, VF * 2 + 1, ...>
+///
+/// For example, the mask for VF = 4 and NumVecs = 2 is:
+///
+///   <0, 4, 1, 5, 2, 6, 3, 7>.
+Constant *createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
+                               unsigned NumVecs);
+
+/// \brief Create a stride shuffle mask.
+///
+/// This function creates a shuffle mask whose elements begin at \p Start and
+/// are incremented by \p Stride. The mask can be used to deinterleave an
+/// interleaved vector into separate vectors of vectorization factor \p VF. The
+/// mask is of the form:
+///
+///   <Start, Start + Stride, ..., Start + Stride * (VF - 1)>
+///
+/// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
+///
+///   <0, 2, 4, 6>
+Constant *createStrideMask(IRBuilder<> &Builder, unsigned Start,
+                           unsigned Stride, unsigned VF);
+
+/// \brief Create a sequential shuffle mask.
+///
+/// This function creates shuffle mask whose elements are sequential and begin
+/// at \p Start.  The mask contains \p NumInts integers and is padded with \p
+/// NumUndefs undef values. The mask is of the form:
+///
+///   <Start, Start + 1, ... Start + NumInts - 1, undef_1, ... undef_NumUndefs>
+///
+/// For example, the mask for Start = 0, NumInsts = 4, and NumUndefs = 4 is:
+///
+///   <0, 1, 2, 3, undef, undef, undef, undef>
+Constant *createSequentialMask(IRBuilder<> &Builder, unsigned Start,
+                               unsigned NumInts, unsigned NumUndefs);
+
+/// \brief Concatenate a list of vectors.
+///
+/// This function generates code that concatenate the vectors in \p Vecs into a
+/// single large vector. The number of vectors should be greater than one, and
+/// their element types should be the same. The number of elements in the
+/// vectors should also be the same; however, if the last vector has fewer
+/// elements, it will be padded with undefs.
+Value *concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs);
+
+} // llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/AsmParser/Parser.h b/linux-x64/clang/include/llvm/AsmParser/Parser.h
new file mode 100644
index 0000000..4e22101
--- /dev/null
+++ b/linux-x64/clang/include/llvm/AsmParser/Parser.h
@@ -0,0 +1,137 @@
+//===-- Parser.h - Parser for LLVM IR text assembly files -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  These classes are implemented by the lib/AsmParser library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ASMPARSER_PARSER_H
+#define LLVM_ASMPARSER_PARSER_H
+
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+class Constant;
+class LLVMContext;
+class Module;
+struct SlotMapping;
+class SMDiagnostic;
+class Type;
+
+/// This function is the main interface to the LLVM Assembly Parser. It parses
+/// an ASCII file that (presumably) contains LLVM Assembly code. It returns a
+/// Module (intermediate representation) with the corresponding features. Note
+/// that this does not verify that the generated Module is valid, so you should
+/// run the verifier after parsing the file to check that it is okay.
+/// \brief Parse LLVM Assembly from a file
+/// \param Filename The name of the file to parse
+/// \param Error Error result info.
+/// \param Context Context in which to allocate globals info.
+/// \param Slots The optional slot mapping that will be initialized during
+///              parsing.
+/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
+///                         This option should only be set to false by llvm-as
+///                         for use inside the LLVM testuite!
+/// \param DataLayoutString Override datalayout in the llvm assembly.
+std::unique_ptr<Module>
+parseAssemblyFile(StringRef Filename, SMDiagnostic &Error, LLVMContext &Context,
+                  SlotMapping *Slots = nullptr, bool UpgradeDebugInfo = true,
+                  StringRef DataLayoutString = "");
+
+/// The function is a secondary interface to the LLVM Assembly Parser. It parses
+/// an ASCII string that (presumably) contains LLVM Assembly code. It returns a
+/// Module (intermediate representation) with the corresponding features. Note
+/// that this does not verify that the generated Module is valid, so you should
+/// run the verifier after parsing the file to check that it is okay.
+/// \brief Parse LLVM Assembly from a string
+/// \param AsmString The string containing assembly
+/// \param Error Error result info.
+/// \param Context Context in which to allocate globals info.
+/// \param Slots The optional slot mapping that will be initialized during
+///              parsing.
+/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
+///                         This option should only be set to false by llvm-as
+///                         for use inside the LLVM testuite!
+/// \param DataLayoutString Override datalayout in the llvm assembly.
+std::unique_ptr<Module> parseAssemblyString(StringRef AsmString,
+                                            SMDiagnostic &Error,
+                                            LLVMContext &Context,
+                                            SlotMapping *Slots = nullptr,
+                                            bool UpgradeDebugInfo = true,
+                                            StringRef DataLayoutString = "");
+
+/// parseAssemblyFile and parseAssemblyString are wrappers around this function.
+/// \brief Parse LLVM Assembly from a MemoryBuffer.
+/// \param F The MemoryBuffer containing assembly
+/// \param Err Error result info.
+/// \param Slots The optional slot mapping that will be initialized during
+///              parsing.
+/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
+///                         This option should only be set to false by llvm-as
+///                         for use inside the LLVM testuite!
+/// \param DataLayoutString Override datalayout in the llvm assembly.
+std::unique_ptr<Module> parseAssembly(MemoryBufferRef F, SMDiagnostic &Err,
+                                      LLVMContext &Context,
+                                      SlotMapping *Slots = nullptr,
+                                      bool UpgradeDebugInfo = true,
+                                      StringRef DataLayoutString = "");
+
+/// This function is the low-level interface to the LLVM Assembly Parser.
+/// This is kept as an independent function instead of being inlined into
+/// parseAssembly for the convenience of interactive users that want to add
+/// recently parsed bits to an existing module.
+///
+/// \param F The MemoryBuffer containing assembly
+/// \param M The module to add data to.
+/// \param Err Error result info.
+/// \param Slots The optional slot mapping that will be initialized during
+///              parsing.
+/// \return true on error.
+/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
+///                         This option should only be set to false by llvm-as
+///                         for use inside the LLVM testuite!
+/// \param DataLayoutString Override datalayout in the llvm assembly.
+bool parseAssemblyInto(MemoryBufferRef F, Module &M, SMDiagnostic &Err,
+                       SlotMapping *Slots = nullptr,
+                       bool UpgradeDebugInfo = true,
+                       StringRef DataLayoutString = "");
+
+/// Parse a type and a constant value in the given string.
+///
+/// The constant value can be any LLVM constant, including a constant
+/// expression.
+///
+/// \param Slots The optional slot mapping that will restore the parsing state
+/// of the module.
+/// \return null on error.
+Constant *parseConstantValue(StringRef Asm, SMDiagnostic &Err, const Module &M,
+                             const SlotMapping *Slots = nullptr);
+
+/// Parse a type in the given string.
+///
+/// \param Slots The optional slot mapping that will restore the parsing state
+/// of the module.
+/// \return null on error.
+Type *parseType(StringRef Asm, SMDiagnostic &Err, const Module &M,
+                const SlotMapping *Slots = nullptr);
+
+/// Parse a string \p Asm that starts with a type.
+/// \p Read[out] gives the number of characters that have been read to parse
+/// the type in \p Asm.
+///
+/// \param Slots The optional slot mapping that will restore the parsing state
+/// of the module.
+/// \return null on error.
+Type *parseTypeAtBeginning(StringRef Asm, unsigned &Read, SMDiagnostic &Err,
+                           const Module &M, const SlotMapping *Slots = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/AsmParser/SlotMapping.h b/linux-x64/clang/include/llvm/AsmParser/SlotMapping.h
new file mode 100644
index 0000000..bd7e8fc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/AsmParser/SlotMapping.h
@@ -0,0 +1,42 @@
+//===-- SlotMapping.h - Slot number mapping for unnamed values --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the SlotMapping struct.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ASMPARSER_SLOTMAPPING_H
+#define LLVM_ASMPARSER_SLOTMAPPING_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class GlobalValue;
+class Type;
+
+/// This struct contains the mappings from the slot numbers to unnamed metadata
+/// nodes, global values and types. It also contains the mapping for the named
+/// types.
+/// It can be used to save the parsing state of an LLVM IR module so that the
+/// textual references to the values in the module can be parsed outside of the
+/// module's source.
+struct SlotMapping {
+  std::vector<GlobalValue *> GlobalValues;
+  std::map<unsigned, TrackingMDNodeRef> MetadataNodes;
+  StringMap<Type *> NamedTypes;
+  std::map<unsigned, Type *> Types;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/COFF.h b/linux-x64/clang/include/llvm/BinaryFormat/COFF.h
new file mode 100644
index 0000000..4d726aa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/COFF.h
@@ -0,0 +1,727 @@
+//===-- llvm/BinaryFormat/COFF.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an definitions used in Windows COFF Files.
+//
+// Structures and enums defined within this file where created using
+// information from Microsoft's publicly available PE/COFF format document:
+//
+// Microsoft Portable Executable and Common Object File Format Specification
+// Revision 8.1 - February 15, 2008
+//
+// As of 5/2/2010, hosted by Microsoft at:
+// http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_COFF_H
+#define LLVM_BINARYFORMAT_COFF_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+namespace COFF {
+
+// The maximum number of sections that a COFF object can have (inclusive).
+const int32_t MaxNumberOfSections16 = 65279;
+
+// The PE signature bytes that follows the DOS stub header.
+static const char PEMagic[] = {'P', 'E', '\0', '\0'};
+
+static const char BigObjMagic[] = {
+    '\xc7', '\xa1', '\xba', '\xd1', '\xee', '\xba', '\xa9', '\x4b',
+    '\xaf', '\x20', '\xfa', '\xf6', '\x6a', '\xa4', '\xdc', '\xb8',
+};
+
+static const char ClGlObjMagic[] = {
+    '\x38', '\xfe', '\xb3', '\x0c', '\xa5', '\xd9', '\xab', '\x4d',
+    '\xac', '\x9b', '\xd6', '\xb6', '\x22', '\x26', '\x53', '\xc2',
+};
+
+// The signature bytes that start a .res file.
+static const char WinResMagic[] = {
+    '\x00', '\x00', '\x00', '\x00', '\x20', '\x00', '\x00', '\x00',
+    '\xff', '\xff', '\x00', '\x00', '\xff', '\xff', '\x00', '\x00',
+};
+
+// Sizes in bytes of various things in the COFF format.
+enum {
+  Header16Size = 20,
+  Header32Size = 56,
+  NameSize = 8,
+  Symbol16Size = 18,
+  Symbol32Size = 20,
+  SectionSize = 40,
+  RelocationSize = 10
+};
+
+struct header {
+  uint16_t Machine;
+  int32_t NumberOfSections;
+  uint32_t TimeDateStamp;
+  uint32_t PointerToSymbolTable;
+  uint32_t NumberOfSymbols;
+  uint16_t SizeOfOptionalHeader;
+  uint16_t Characteristics;
+};
+
+struct BigObjHeader {
+  enum : uint16_t { MinBigObjectVersion = 2 };
+
+  uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
+  uint16_t Sig2; ///< Must be 0xFFFF.
+  uint16_t Version;
+  uint16_t Machine;
+  uint32_t TimeDateStamp;
+  uint8_t UUID[16];
+  uint32_t unused1;
+  uint32_t unused2;
+  uint32_t unused3;
+  uint32_t unused4;
+  uint32_t NumberOfSections;
+  uint32_t PointerToSymbolTable;
+  uint32_t NumberOfSymbols;
+};
+
+enum MachineTypes : unsigned {
+  MT_Invalid = 0xffff,
+
+  IMAGE_FILE_MACHINE_UNKNOWN = 0x0,
+  IMAGE_FILE_MACHINE_AM33 = 0x1D3,
+  IMAGE_FILE_MACHINE_AMD64 = 0x8664,
+  IMAGE_FILE_MACHINE_ARM = 0x1C0,
+  IMAGE_FILE_MACHINE_ARMNT = 0x1C4,
+  IMAGE_FILE_MACHINE_ARM64 = 0xAA64,
+  IMAGE_FILE_MACHINE_EBC = 0xEBC,
+  IMAGE_FILE_MACHINE_I386 = 0x14C,
+  IMAGE_FILE_MACHINE_IA64 = 0x200,
+  IMAGE_FILE_MACHINE_M32R = 0x9041,
+  IMAGE_FILE_MACHINE_MIPS16 = 0x266,
+  IMAGE_FILE_MACHINE_MIPSFPU = 0x366,
+  IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466,
+  IMAGE_FILE_MACHINE_POWERPC = 0x1F0,
+  IMAGE_FILE_MACHINE_POWERPCFP = 0x1F1,
+  IMAGE_FILE_MACHINE_R4000 = 0x166,
+  IMAGE_FILE_MACHINE_RISCV32 = 0x5032,
+  IMAGE_FILE_MACHINE_RISCV64 = 0x5064,
+  IMAGE_FILE_MACHINE_RISCV128 = 0x5128,
+  IMAGE_FILE_MACHINE_SH3 = 0x1A2,
+  IMAGE_FILE_MACHINE_SH3DSP = 0x1A3,
+  IMAGE_FILE_MACHINE_SH4 = 0x1A6,
+  IMAGE_FILE_MACHINE_SH5 = 0x1A8,
+  IMAGE_FILE_MACHINE_THUMB = 0x1C2,
+  IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169
+};
+
+enum Characteristics : unsigned {
+  C_Invalid = 0,
+
+  /// The file does not contain base relocations and must be loaded at its
+  /// preferred base. If this cannot be done, the loader will error.
+  IMAGE_FILE_RELOCS_STRIPPED = 0x0001,
+  /// The file is valid and can be run.
+  IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002,
+  /// COFF line numbers have been stripped. This is deprecated and should be
+  /// 0.
+  IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004,
+  /// COFF symbol table entries for local symbols have been removed. This is
+  /// deprecated and should be 0.
+  IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008,
+  /// Aggressively trim working set. This is deprecated and must be 0.
+  IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010,
+  /// Image can handle > 2GiB addresses.
+  IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020,
+  /// Little endian: the LSB precedes the MSB in memory. This is deprecated
+  /// and should be 0.
+  IMAGE_FILE_BYTES_REVERSED_LO = 0x0080,
+  /// Machine is based on a 32bit word architecture.
+  IMAGE_FILE_32BIT_MACHINE = 0x0100,
+  /// Debugging info has been removed.
+  IMAGE_FILE_DEBUG_STRIPPED = 0x0200,
+  /// If the image is on removable media, fully load it and copy it to swap.
+  IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400,
+  /// If the image is on network media, fully load it and copy it to swap.
+  IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800,
+  /// The image file is a system file, not a user program.
+  IMAGE_FILE_SYSTEM = 0x1000,
+  /// The image file is a DLL.
+  IMAGE_FILE_DLL = 0x2000,
+  /// This file should only be run on a uniprocessor machine.
+  IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000,
+  /// Big endian: the MSB precedes the LSB in memory. This is deprecated
+  /// and should be 0.
+  IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
+};
+
+enum ResourceTypeID : unsigned {
+  RID_Cursor = 1,
+  RID_Bitmap = 2,
+  RID_Icon = 3,
+  RID_Menu = 4,
+  RID_Dialog = 5,
+  RID_String = 6,
+  RID_FontDir = 7,
+  RID_Font = 8,
+  RID_Accelerator = 9,
+  RID_RCData = 10,
+  RID_MessageTable = 11,
+  RID_Group_Cursor = 12,
+  RID_Group_Icon = 14,
+  RID_Version = 16,
+  RID_DLGInclude = 17,
+  RID_PlugPlay = 19,
+  RID_VXD = 20,
+  RID_AniCursor = 21,
+  RID_AniIcon = 22,
+  RID_HTML = 23,
+  RID_Manifest = 24,
+};
+
+struct symbol {
+  char Name[NameSize];
+  uint32_t Value;
+  int32_t SectionNumber;
+  uint16_t Type;
+  uint8_t StorageClass;
+  uint8_t NumberOfAuxSymbols;
+};
+
+enum SymbolSectionNumber : int32_t {
+  IMAGE_SYM_DEBUG = -2,
+  IMAGE_SYM_ABSOLUTE = -1,
+  IMAGE_SYM_UNDEFINED = 0
+};
+
+/// Storage class tells where and what the symbol represents
+enum SymbolStorageClass {
+  SSC_Invalid = 0xff,
+
+  IMAGE_SYM_CLASS_END_OF_FUNCTION = -1,  ///< Physical end of function
+  IMAGE_SYM_CLASS_NULL = 0,              ///< No symbol
+  IMAGE_SYM_CLASS_AUTOMATIC = 1,         ///< Stack variable
+  IMAGE_SYM_CLASS_EXTERNAL = 2,          ///< External symbol
+  IMAGE_SYM_CLASS_STATIC = 3,            ///< Static
+  IMAGE_SYM_CLASS_REGISTER = 4,          ///< Register variable
+  IMAGE_SYM_CLASS_EXTERNAL_DEF = 5,      ///< External definition
+  IMAGE_SYM_CLASS_LABEL = 6,             ///< Label
+  IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7,   ///< Undefined label
+  IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8,  ///< Member of structure
+  IMAGE_SYM_CLASS_ARGUMENT = 9,          ///< Function argument
+  IMAGE_SYM_CLASS_STRUCT_TAG = 10,       ///< Structure tag
+  IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11,  ///< Member of union
+  IMAGE_SYM_CLASS_UNION_TAG = 12,        ///< Union tag
+  IMAGE_SYM_CLASS_TYPE_DEFINITION = 13,  ///< Type definition
+  IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14, ///< Undefined static
+  IMAGE_SYM_CLASS_ENUM_TAG = 15,         ///< Enumeration tag
+  IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16,   ///< Member of enumeration
+  IMAGE_SYM_CLASS_REGISTER_PARAM = 17,   ///< Register parameter
+  IMAGE_SYM_CLASS_BIT_FIELD = 18,        ///< Bit field
+  /// ".bb" or ".eb" - beginning or end of block
+  IMAGE_SYM_CLASS_BLOCK = 100,
+  /// ".bf" or ".ef" - beginning or end of function
+  IMAGE_SYM_CLASS_FUNCTION = 101,
+  IMAGE_SYM_CLASS_END_OF_STRUCT = 102, ///< End of structure
+  IMAGE_SYM_CLASS_FILE = 103,          ///< File name
+  /// Line number, reformatted as symbol
+  IMAGE_SYM_CLASS_SECTION = 104,
+  IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105, ///< Duplicate tag
+  /// External symbol in dmert public lib
+  IMAGE_SYM_CLASS_CLR_TOKEN = 107
+};
+
+enum SymbolBaseType : unsigned {
+  IMAGE_SYM_TYPE_NULL = 0,   ///< No type information or unknown base type.
+  IMAGE_SYM_TYPE_VOID = 1,   ///< Used with void pointers and functions.
+  IMAGE_SYM_TYPE_CHAR = 2,   ///< A character (signed byte).
+  IMAGE_SYM_TYPE_SHORT = 3,  ///< A 2-byte signed integer.
+  IMAGE_SYM_TYPE_INT = 4,    ///< A natural integer type on the target.
+  IMAGE_SYM_TYPE_LONG = 5,   ///< A 4-byte signed integer.
+  IMAGE_SYM_TYPE_FLOAT = 6,  ///< A 4-byte floating-point number.
+  IMAGE_SYM_TYPE_DOUBLE = 7, ///< An 8-byte floating-point number.
+  IMAGE_SYM_TYPE_STRUCT = 8, ///< A structure.
+  IMAGE_SYM_TYPE_UNION = 9,  ///< An union.
+  IMAGE_SYM_TYPE_ENUM = 10,  ///< An enumerated type.
+  IMAGE_SYM_TYPE_MOE = 11,   ///< A member of enumeration (a specific value).
+  IMAGE_SYM_TYPE_BYTE = 12,  ///< A byte; unsigned 1-byte integer.
+  IMAGE_SYM_TYPE_WORD = 13,  ///< A word; unsigned 2-byte integer.
+  IMAGE_SYM_TYPE_UINT = 14,  ///< An unsigned integer of natural size.
+  IMAGE_SYM_TYPE_DWORD = 15  ///< An unsigned 4-byte integer.
+};
+
+enum SymbolComplexType : unsigned {
+  IMAGE_SYM_DTYPE_NULL = 0,     ///< No complex type; simple scalar variable.
+  IMAGE_SYM_DTYPE_POINTER = 1,  ///< A pointer to base type.
+  IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type.
+  IMAGE_SYM_DTYPE_ARRAY = 3,    ///< An array of base type.
+
+  /// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
+  SCT_COMPLEX_TYPE_SHIFT = 4
+};
+
+enum AuxSymbolType { IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF = 1 };
+
+struct section {
+  char Name[NameSize];
+  uint32_t VirtualSize;
+  uint32_t VirtualAddress;
+  uint32_t SizeOfRawData;
+  uint32_t PointerToRawData;
+  uint32_t PointerToRelocations;
+  uint32_t PointerToLineNumbers;
+  uint16_t NumberOfRelocations;
+  uint16_t NumberOfLineNumbers;
+  uint32_t Characteristics;
+};
+
+enum SectionCharacteristics : uint32_t {
+  SC_Invalid = 0xffffffff,
+
+  IMAGE_SCN_TYPE_NOLOAD = 0x00000002,
+  IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
+  IMAGE_SCN_CNT_CODE = 0x00000020,
+  IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
+  IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
+  IMAGE_SCN_LNK_OTHER = 0x00000100,
+  IMAGE_SCN_LNK_INFO = 0x00000200,
+  IMAGE_SCN_LNK_REMOVE = 0x00000800,
+  IMAGE_SCN_LNK_COMDAT = 0x00001000,
+  IMAGE_SCN_GPREL = 0x00008000,
+  IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
+  IMAGE_SCN_MEM_16BIT = 0x00020000,
+  IMAGE_SCN_MEM_LOCKED = 0x00040000,
+  IMAGE_SCN_MEM_PRELOAD = 0x00080000,
+  IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
+  IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
+  IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
+  IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
+  IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
+  IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
+  IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
+  IMAGE_SCN_ALIGN_128BYTES = 0x00800000,
+  IMAGE_SCN_ALIGN_256BYTES = 0x00900000,
+  IMAGE_SCN_ALIGN_512BYTES = 0x00A00000,
+  IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000,
+  IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000,
+  IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000,
+  IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000,
+  IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
+  IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
+  IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
+  IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
+  IMAGE_SCN_MEM_SHARED = 0x10000000,
+  IMAGE_SCN_MEM_EXECUTE = 0x20000000,
+  IMAGE_SCN_MEM_READ = 0x40000000,
+  IMAGE_SCN_MEM_WRITE = 0x80000000
+};
+
+struct relocation {
+  uint32_t VirtualAddress;
+  uint32_t SymbolTableIndex;
+  uint16_t Type;
+};
+
+enum RelocationTypeI386 : unsigned {
+  IMAGE_REL_I386_ABSOLUTE = 0x0000,
+  IMAGE_REL_I386_DIR16 = 0x0001,
+  IMAGE_REL_I386_REL16 = 0x0002,
+  IMAGE_REL_I386_DIR32 = 0x0006,
+  IMAGE_REL_I386_DIR32NB = 0x0007,
+  IMAGE_REL_I386_SEG12 = 0x0009,
+  IMAGE_REL_I386_SECTION = 0x000A,
+  IMAGE_REL_I386_SECREL = 0x000B,
+  IMAGE_REL_I386_TOKEN = 0x000C,
+  IMAGE_REL_I386_SECREL7 = 0x000D,
+  IMAGE_REL_I386_REL32 = 0x0014
+};
+
+enum RelocationTypeAMD64 : unsigned {
+  IMAGE_REL_AMD64_ABSOLUTE = 0x0000,
+  IMAGE_REL_AMD64_ADDR64 = 0x0001,
+  IMAGE_REL_AMD64_ADDR32 = 0x0002,
+  IMAGE_REL_AMD64_ADDR32NB = 0x0003,
+  IMAGE_REL_AMD64_REL32 = 0x0004,
+  IMAGE_REL_AMD64_REL32_1 = 0x0005,
+  IMAGE_REL_AMD64_REL32_2 = 0x0006,
+  IMAGE_REL_AMD64_REL32_3 = 0x0007,
+  IMAGE_REL_AMD64_REL32_4 = 0x0008,
+  IMAGE_REL_AMD64_REL32_5 = 0x0009,
+  IMAGE_REL_AMD64_SECTION = 0x000A,
+  IMAGE_REL_AMD64_SECREL = 0x000B,
+  IMAGE_REL_AMD64_SECREL7 = 0x000C,
+  IMAGE_REL_AMD64_TOKEN = 0x000D,
+  IMAGE_REL_AMD64_SREL32 = 0x000E,
+  IMAGE_REL_AMD64_PAIR = 0x000F,
+  IMAGE_REL_AMD64_SSPAN32 = 0x0010
+};
+
+enum RelocationTypesARM : unsigned {
+  IMAGE_REL_ARM_ABSOLUTE = 0x0000,
+  IMAGE_REL_ARM_ADDR32 = 0x0001,
+  IMAGE_REL_ARM_ADDR32NB = 0x0002,
+  IMAGE_REL_ARM_BRANCH24 = 0x0003,
+  IMAGE_REL_ARM_BRANCH11 = 0x0004,
+  IMAGE_REL_ARM_TOKEN = 0x0005,
+  IMAGE_REL_ARM_BLX24 = 0x0008,
+  IMAGE_REL_ARM_BLX11 = 0x0009,
+  IMAGE_REL_ARM_SECTION = 0x000E,
+  IMAGE_REL_ARM_SECREL = 0x000F,
+  IMAGE_REL_ARM_MOV32A = 0x0010,
+  IMAGE_REL_ARM_MOV32T = 0x0011,
+  IMAGE_REL_ARM_BRANCH20T = 0x0012,
+  IMAGE_REL_ARM_BRANCH24T = 0x0014,
+  IMAGE_REL_ARM_BLX23T = 0x0015
+};
+
+enum RelocationTypesARM64 : unsigned {
+  IMAGE_REL_ARM64_ABSOLUTE = 0x0000,
+  IMAGE_REL_ARM64_ADDR32 = 0x0001,
+  IMAGE_REL_ARM64_ADDR32NB = 0x0002,
+  IMAGE_REL_ARM64_BRANCH26 = 0x0003,
+  IMAGE_REL_ARM64_PAGEBASE_REL21 = 0x0004,
+  IMAGE_REL_ARM64_REL21 = 0x0005,
+  IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006,
+  IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007,
+  IMAGE_REL_ARM64_SECREL = 0x0008,
+  IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009,
+  IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A,
+  IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B,
+  IMAGE_REL_ARM64_TOKEN = 0x000C,
+  IMAGE_REL_ARM64_SECTION = 0x000D,
+  IMAGE_REL_ARM64_ADDR64 = 0x000E,
+  IMAGE_REL_ARM64_BRANCH19 = 0x000F,
+  IMAGE_REL_ARM64_BRANCH14 = 0x0010,
+};
+
+enum COMDATType : unsigned {
+  IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
+  IMAGE_COMDAT_SELECT_ANY,
+  IMAGE_COMDAT_SELECT_SAME_SIZE,
+  IMAGE_COMDAT_SELECT_EXACT_MATCH,
+  IMAGE_COMDAT_SELECT_ASSOCIATIVE,
+  IMAGE_COMDAT_SELECT_LARGEST,
+  IMAGE_COMDAT_SELECT_NEWEST
+};
+
+// Auxiliary Symbol Formats
+struct AuxiliaryFunctionDefinition {
+  uint32_t TagIndex;
+  uint32_t TotalSize;
+  uint32_t PointerToLinenumber;
+  uint32_t PointerToNextFunction;
+  char unused[2];
+};
+
+struct AuxiliarybfAndefSymbol {
+  uint8_t unused1[4];
+  uint16_t Linenumber;
+  uint8_t unused2[6];
+  uint32_t PointerToNextFunction;
+  uint8_t unused3[2];
+};
+
+struct AuxiliaryWeakExternal {
+  uint32_t TagIndex;
+  uint32_t Characteristics;
+  uint8_t unused[10];
+};
+
+enum WeakExternalCharacteristics : unsigned {
+  IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1,
+  IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2,
+  IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
+};
+
+struct AuxiliarySectionDefinition {
+  uint32_t Length;
+  uint16_t NumberOfRelocations;
+  uint16_t NumberOfLinenumbers;
+  uint32_t CheckSum;
+  uint32_t Number;
+  uint8_t Selection;
+  char unused;
+};
+
+struct AuxiliaryCLRToken {
+  uint8_t AuxType;
+  uint8_t unused1;
+  uint32_t SymbolTableIndex;
+  char unused2[12];
+};
+
+union Auxiliary {
+  AuxiliaryFunctionDefinition FunctionDefinition;
+  AuxiliarybfAndefSymbol bfAndefSymbol;
+  AuxiliaryWeakExternal WeakExternal;
+  AuxiliarySectionDefinition SectionDefinition;
+};
+
+/// @brief The Import Directory Table.
+///
+/// There is a single array of these and one entry per imported DLL.
+struct ImportDirectoryTableEntry {
+  uint32_t ImportLookupTableRVA;
+  uint32_t TimeDateStamp;
+  uint32_t ForwarderChain;
+  uint32_t NameRVA;
+  uint32_t ImportAddressTableRVA;
+};
+
+/// @brief The PE32 Import Lookup Table.
+///
+/// There is an array of these for each imported DLL. It represents either
+/// the ordinal to import from the target DLL, or a name to lookup and import
+/// from the target DLL.
+///
+/// This also happens to be the same format used by the Import Address Table
+/// when it is initially written out to the image.
+struct ImportLookupTableEntry32 {
+  uint32_t data;
+
+  /// @brief Is this entry specified by ordinal, or name?
+  bool isOrdinal() const { return data & 0x80000000; }
+
+  /// @brief Get the ordinal value of this entry. isOrdinal must be true.
+  uint16_t getOrdinal() const {
+    assert(isOrdinal() && "ILT entry is not an ordinal!");
+    return data & 0xFFFF;
+  }
+
+  /// @brief Set the ordinal value and set isOrdinal to true.
+  void setOrdinal(uint16_t o) {
+    data = o;
+    data |= 0x80000000;
+  }
+
+  /// @brief Get the Hint/Name entry RVA. isOrdinal must be false.
+  uint32_t getHintNameRVA() const {
+    assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
+    return data;
+  }
+
+  /// @brief Set the Hint/Name entry RVA and set isOrdinal to false.
+  void setHintNameRVA(uint32_t rva) { data = rva; }
+};
+
+/// @brief The DOS compatible header at the front of all PEs.
+struct DOSHeader {
+  uint16_t Magic;
+  uint16_t UsedBytesInTheLastPage;
+  uint16_t FileSizeInPages;
+  uint16_t NumberOfRelocationItems;
+  uint16_t HeaderSizeInParagraphs;
+  uint16_t MinimumExtraParagraphs;
+  uint16_t MaximumExtraParagraphs;
+  uint16_t InitialRelativeSS;
+  uint16_t InitialSP;
+  uint16_t Checksum;
+  uint16_t InitialIP;
+  uint16_t InitialRelativeCS;
+  uint16_t AddressOfRelocationTable;
+  uint16_t OverlayNumber;
+  uint16_t Reserved[4];
+  uint16_t OEMid;
+  uint16_t OEMinfo;
+  uint16_t Reserved2[10];
+  uint32_t AddressOfNewExeHeader;
+};
+
+struct PE32Header {
+  enum { PE32 = 0x10b, PE32_PLUS = 0x20b };
+
+  uint16_t Magic;
+  uint8_t MajorLinkerVersion;
+  uint8_t MinorLinkerVersion;
+  uint32_t SizeOfCode;
+  uint32_t SizeOfInitializedData;
+  uint32_t SizeOfUninitializedData;
+  uint32_t AddressOfEntryPoint; // RVA
+  uint32_t BaseOfCode;          // RVA
+  uint32_t BaseOfData;          // RVA
+  uint32_t ImageBase;
+  uint32_t SectionAlignment;
+  uint32_t FileAlignment;
+  uint16_t MajorOperatingSystemVersion;
+  uint16_t MinorOperatingSystemVersion;
+  uint16_t MajorImageVersion;
+  uint16_t MinorImageVersion;
+  uint16_t MajorSubsystemVersion;
+  uint16_t MinorSubsystemVersion;
+  uint32_t Win32VersionValue;
+  uint32_t SizeOfImage;
+  uint32_t SizeOfHeaders;
+  uint32_t CheckSum;
+  uint16_t Subsystem;
+  // FIXME: This should be DllCharacteristics to match the COFF spec.
+  uint16_t DLLCharacteristics;
+  uint32_t SizeOfStackReserve;
+  uint32_t SizeOfStackCommit;
+  uint32_t SizeOfHeapReserve;
+  uint32_t SizeOfHeapCommit;
+  uint32_t LoaderFlags;
+  // FIXME: This should be NumberOfRvaAndSizes to match the COFF spec.
+  uint32_t NumberOfRvaAndSize;
+};
+
+struct DataDirectory {
+  uint32_t RelativeVirtualAddress;
+  uint32_t Size;
+};
+
+enum DataDirectoryIndex : unsigned {
+  EXPORT_TABLE = 0,
+  IMPORT_TABLE,
+  RESOURCE_TABLE,
+  EXCEPTION_TABLE,
+  CERTIFICATE_TABLE,
+  BASE_RELOCATION_TABLE,
+  DEBUG_DIRECTORY,
+  ARCHITECTURE,
+  GLOBAL_PTR,
+  TLS_TABLE,
+  LOAD_CONFIG_TABLE,
+  BOUND_IMPORT,
+  IAT,
+  DELAY_IMPORT_DESCRIPTOR,
+  CLR_RUNTIME_HEADER,
+
+  NUM_DATA_DIRECTORIES
+};
+
+enum WindowsSubsystem : unsigned {
+  IMAGE_SUBSYSTEM_UNKNOWN = 0, ///< An unknown subsystem.
+  IMAGE_SUBSYSTEM_NATIVE = 1,  ///< Device drivers and native Windows processes
+  IMAGE_SUBSYSTEM_WINDOWS_GUI = 2,      ///< The Windows GUI subsystem.
+  IMAGE_SUBSYSTEM_WINDOWS_CUI = 3,      ///< The Windows character subsystem.
+  IMAGE_SUBSYSTEM_OS2_CUI = 5,          ///< The OS/2 character subsytem.
+  IMAGE_SUBSYSTEM_POSIX_CUI = 7,        ///< The POSIX character subsystem.
+  IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8,   ///< Native Windows 9x driver.
+  IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9,   ///< Windows CE.
+  IMAGE_SUBSYSTEM_EFI_APPLICATION = 10, ///< An EFI application.
+  IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11, ///< An EFI driver with boot
+                                                ///  services.
+  IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12,      ///< An EFI driver with run-time
+                                                ///  services.
+  IMAGE_SUBSYSTEM_EFI_ROM = 13,                 ///< An EFI ROM image.
+  IMAGE_SUBSYSTEM_XBOX = 14,                    ///< XBOX.
+  IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16 ///< A BCD application.
+};
+
+enum DLLCharacteristics : unsigned {
+  /// ASLR with 64 bit address space.
+  IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020,
+  /// DLL can be relocated at load time.
+  IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040,
+  /// Code integrity checks are enforced.
+  IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY = 0x0080,
+  ///< Image is NX compatible.
+  IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100,
+  /// Isolation aware, but do not isolate the image.
+  IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION = 0x0200,
+  /// Does not use structured exception handling (SEH). No SEH handler may be
+  /// called in this image.
+  IMAGE_DLL_CHARACTERISTICS_NO_SEH = 0x0400,
+  /// Do not bind the image.
+  IMAGE_DLL_CHARACTERISTICS_NO_BIND = 0x0800,
+  ///< Image should execute in an AppContainer.
+  IMAGE_DLL_CHARACTERISTICS_APPCONTAINER = 0x1000,
+  ///< A WDM driver.
+  IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER = 0x2000,
+  ///< Image supports Control Flow Guard.
+  IMAGE_DLL_CHARACTERISTICS_GUARD_CF = 0x4000,
+  /// Terminal Server aware.
+  IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000
+};
+
+enum DebugType : unsigned {
+  IMAGE_DEBUG_TYPE_UNKNOWN = 0,
+  IMAGE_DEBUG_TYPE_COFF = 1,
+  IMAGE_DEBUG_TYPE_CODEVIEW = 2,
+  IMAGE_DEBUG_TYPE_FPO = 3,
+  IMAGE_DEBUG_TYPE_MISC = 4,
+  IMAGE_DEBUG_TYPE_EXCEPTION = 5,
+  IMAGE_DEBUG_TYPE_FIXUP = 6,
+  IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7,
+  IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8,
+  IMAGE_DEBUG_TYPE_BORLAND = 9,
+  IMAGE_DEBUG_TYPE_RESERVED10 = 10,
+  IMAGE_DEBUG_TYPE_CLSID = 11,
+  IMAGE_DEBUG_TYPE_VC_FEATURE = 12,
+  IMAGE_DEBUG_TYPE_POGO = 13,
+  IMAGE_DEBUG_TYPE_ILTCG = 14,
+  IMAGE_DEBUG_TYPE_MPX = 15,
+  IMAGE_DEBUG_TYPE_REPRO = 16,
+};
+
+enum BaseRelocationType : unsigned {
+  IMAGE_REL_BASED_ABSOLUTE = 0,
+  IMAGE_REL_BASED_HIGH = 1,
+  IMAGE_REL_BASED_LOW = 2,
+  IMAGE_REL_BASED_HIGHLOW = 3,
+  IMAGE_REL_BASED_HIGHADJ = 4,
+  IMAGE_REL_BASED_MIPS_JMPADDR = 5,
+  IMAGE_REL_BASED_ARM_MOV32A = 5,
+  IMAGE_REL_BASED_ARM_MOV32T = 7,
+  IMAGE_REL_BASED_MIPS_JMPADDR16 = 9,
+  IMAGE_REL_BASED_DIR64 = 10
+};
+
+enum ImportType : unsigned {
+  IMPORT_CODE = 0,
+  IMPORT_DATA = 1,
+  IMPORT_CONST = 2
+};
+
+enum ImportNameType : unsigned {
+  /// Import is by ordinal. This indicates that the value in the Ordinal/Hint
+  /// field of the import header is the import's ordinal. If this constant is
+  /// not specified, then the Ordinal/Hint field should always be interpreted
+  /// as the import's hint.
+  IMPORT_ORDINAL = 0,
+  /// The import name is identical to the public symbol name
+  IMPORT_NAME = 1,
+  /// The import name is the public symbol name, but skipping the leading ?,
+  /// @, or optionally _.
+  IMPORT_NAME_NOPREFIX = 2,
+  /// The import name is the public symbol name, but skipping the leading ?,
+  /// @, or optionally _, and truncating at the first @.
+  IMPORT_NAME_UNDECORATE = 3
+};
+
+struct ImportHeader {
+  uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
+  uint16_t Sig2; ///< Must be 0xFFFF.
+  uint16_t Version;
+  uint16_t Machine;
+  uint32_t TimeDateStamp;
+  uint32_t SizeOfData;
+  uint16_t OrdinalHint;
+  uint16_t TypeInfo;
+
+  ImportType getType() const { return static_cast<ImportType>(TypeInfo & 0x3); }
+
+  ImportNameType getNameType() const {
+    return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 2);
+  }
+};
+
+enum CodeViewIdentifiers {
+  DEBUG_SECTION_MAGIC = 0x4,
+  DEBUG_HASHES_SECTION_MAGIC = 0x133C9C5
+};
+
+inline bool isReservedSectionNumber(int32_t SectionNumber) {
+  return SectionNumber <= 0;
+}
+
+} // End namespace COFF.
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/Dwarf.def b/linux-x64/clang/include/llvm/BinaryFormat/Dwarf.def
new file mode 100644
index 0000000..57e2596
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/Dwarf.def
@@ -0,0 +1,910 @@
+//===- llvm/Support/Dwarf.def - Dwarf definitions ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through Dwarf enumerators.
+//
+//===----------------------------------------------------------------------===//
+
+// TODO: Add other DW-based macros.
+#if !(                                                                         \
+    defined HANDLE_DW_TAG || defined HANDLE_DW_AT || defined HANDLE_DW_FORM || \
+    defined HANDLE_DW_OP || defined HANDLE_DW_LANG || defined HANDLE_DW_ATE || \
+    defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_DEFAULTED ||             \
+    defined HANDLE_DW_CC || defined HANDLE_DW_LNS || defined HANDLE_DW_LNE ||  \
+    defined HANDLE_DW_LNCT || defined HANDLE_DW_MACRO ||                       \
+    defined HANDLE_DW_RLE || defined HANDLE_DW_CFA ||                          \
+    defined HANDLE_DW_APPLE_PROPERTY || defined HANDLE_DW_UT ||                \
+    defined HANDLE_DWARF_SECTION || defined HANDLE_DW_IDX)
+#error "Missing macro definition of HANDLE_DW*"
+#endif
+
+#ifndef HANDLE_DW_TAG
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR)
+#endif
+
+#ifndef HANDLE_DW_AT
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR)
+#endif
+
+#ifndef HANDLE_DW_FORM
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR)
+#endif
+
+#ifndef HANDLE_DW_OP
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR)
+#endif
+
+#ifndef HANDLE_DW_LANG
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)
+#endif
+
+#ifndef HANDLE_DW_ATE
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR)
+#endif
+
+#ifndef HANDLE_DW_VIRTUALITY
+#define HANDLE_DW_VIRTUALITY(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_DEFAULTED
+#define HANDLE_DW_DEFAULTED(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_CC
+#define HANDLE_DW_CC(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_LNS
+#define HANDLE_DW_LNS(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_LNE
+#define HANDLE_DW_LNE(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_LNCT
+#define HANDLE_DW_LNCT(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_MACRO
+#define HANDLE_DW_MACRO(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_RLE
+#define HANDLE_DW_RLE(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_CFA
+#define HANDLE_DW_CFA(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_APPLE_PROPERTY
+#define HANDLE_DW_APPLE_PROPERTY(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_UT
+#define HANDLE_DW_UT(ID, NAME)
+#endif
+
+#ifndef HANDLE_DWARF_SECTION
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME)
+#endif
+
+#ifndef HANDLE_DW_IDX
+#define HANDLE_DW_IDX(ID, NAME)
+#endif
+
+HANDLE_DW_TAG(0x0000, null, 2, DWARF)
+HANDLE_DW_TAG(0x0001, array_type, 2, DWARF)
+HANDLE_DW_TAG(0x0002, class_type, 2, DWARF)
+HANDLE_DW_TAG(0x0003, entry_point, 2, DWARF)
+HANDLE_DW_TAG(0x0004, enumeration_type, 2, DWARF)
+HANDLE_DW_TAG(0x0005, formal_parameter, 2, DWARF)
+HANDLE_DW_TAG(0x0008, imported_declaration, 2, DWARF)
+HANDLE_DW_TAG(0x000a, label, 2, DWARF)
+HANDLE_DW_TAG(0x000b, lexical_block, 2, DWARF)
+HANDLE_DW_TAG(0x000d, member, 2, DWARF)
+HANDLE_DW_TAG(0x000f, pointer_type, 2, DWARF)
+HANDLE_DW_TAG(0x0010, reference_type, 2, DWARF)
+HANDLE_DW_TAG(0x0011, compile_unit, 2, DWARF)
+HANDLE_DW_TAG(0x0012, string_type, 2, DWARF)
+HANDLE_DW_TAG(0x0013, structure_type, 2, DWARF)
+HANDLE_DW_TAG(0x0015, subroutine_type, 2, DWARF)
+HANDLE_DW_TAG(0x0016, typedef, 2, DWARF)
+HANDLE_DW_TAG(0x0017, union_type, 2, DWARF)
+HANDLE_DW_TAG(0x0018, unspecified_parameters, 2, DWARF)
+HANDLE_DW_TAG(0x0019, variant, 2, DWARF)
+HANDLE_DW_TAG(0x001a, common_block, 2, DWARF)
+HANDLE_DW_TAG(0x001b, common_inclusion, 2, DWARF)
+HANDLE_DW_TAG(0x001c, inheritance, 2, DWARF)
+HANDLE_DW_TAG(0x001d, inlined_subroutine, 2, DWARF)
+HANDLE_DW_TAG(0x001e, module, 2, DWARF)
+HANDLE_DW_TAG(0x001f, ptr_to_member_type, 2, DWARF)
+HANDLE_DW_TAG(0x0020, set_type, 2, DWARF)
+HANDLE_DW_TAG(0x0021, subrange_type, 2, DWARF)
+HANDLE_DW_TAG(0x0022, with_stmt, 2, DWARF)
+HANDLE_DW_TAG(0x0023, access_declaration, 2, DWARF)
+HANDLE_DW_TAG(0x0024, base_type, 2, DWARF)
+HANDLE_DW_TAG(0x0025, catch_block, 2, DWARF)
+HANDLE_DW_TAG(0x0026, const_type, 2, DWARF)
+HANDLE_DW_TAG(0x0027, constant, 2, DWARF)
+HANDLE_DW_TAG(0x0028, enumerator, 2, DWARF)
+HANDLE_DW_TAG(0x0029, file_type, 2, DWARF)
+HANDLE_DW_TAG(0x002a, friend, 2, DWARF)
+HANDLE_DW_TAG(0x002b, namelist, 2, DWARF)
+HANDLE_DW_TAG(0x002c, namelist_item, 2, DWARF)
+HANDLE_DW_TAG(0x002d, packed_type, 2, DWARF)
+HANDLE_DW_TAG(0x002e, subprogram, 2, DWARF)
+HANDLE_DW_TAG(0x002f, template_type_parameter, 2, DWARF)
+HANDLE_DW_TAG(0x0030, template_value_parameter, 2, DWARF)
+HANDLE_DW_TAG(0x0031, thrown_type, 2, DWARF)
+HANDLE_DW_TAG(0x0032, try_block, 2, DWARF)
+HANDLE_DW_TAG(0x0033, variant_part, 2, DWARF)
+HANDLE_DW_TAG(0x0034, variable, 2, DWARF)
+HANDLE_DW_TAG(0x0035, volatile_type, 2, DWARF)
+// New in DWARF v3:
+HANDLE_DW_TAG(0x0036, dwarf_procedure, 3, DWARF)
+HANDLE_DW_TAG(0x0037, restrict_type, 3, DWARF)
+HANDLE_DW_TAG(0x0038, interface_type, 3, DWARF)
+HANDLE_DW_TAG(0x0039, namespace, 3, DWARF)
+HANDLE_DW_TAG(0x003a, imported_module, 3, DWARF)
+HANDLE_DW_TAG(0x003b, unspecified_type, 3, DWARF)
+HANDLE_DW_TAG(0x003c, partial_unit, 3, DWARF)
+HANDLE_DW_TAG(0x003d, imported_unit, 3, DWARF)
+HANDLE_DW_TAG(0x003f, condition, 3, DWARF)
+HANDLE_DW_TAG(0x0040, shared_type, 3, DWARF)
+// New in DWARF v4:
+HANDLE_DW_TAG(0x0041, type_unit, 4, DWARF)
+HANDLE_DW_TAG(0x0042, rvalue_reference_type, 4, DWARF)
+HANDLE_DW_TAG(0x0043, template_alias, 4, DWARF)
+// New in DWARF v5:
+HANDLE_DW_TAG(0x0044, coarray_type, 5, DWARF)
+HANDLE_DW_TAG(0x0045, generic_subrange, 5, DWARF)
+HANDLE_DW_TAG(0x0046, dynamic_type, 5, DWARF)
+HANDLE_DW_TAG(0x0047, atomic_type, 5, DWARF)
+HANDLE_DW_TAG(0x0048, call_site, 5, DWARF)
+HANDLE_DW_TAG(0x0049, call_site_parameter, 5, DWARF)
+HANDLE_DW_TAG(0x004a, skeleton_unit, 5, DWARF)
+HANDLE_DW_TAG(0x004b, immutable_type, 5, DWARF)
+// Vendor extensions:
+HANDLE_DW_TAG(0x4081, MIPS_loop, 0, MIPS)
+HANDLE_DW_TAG(0x4101, format_label, 0, GNU)
+HANDLE_DW_TAG(0x4102, function_template, 0, GNU)
+HANDLE_DW_TAG(0x4103, class_template, 0, GNU)
+HANDLE_DW_TAG(0x4106, GNU_template_template_param, 0, GNU)
+HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack, 0, GNU)
+HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack, 0, GNU)
+HANDLE_DW_TAG(0x4109, GNU_call_site, 0, GNU)
+HANDLE_DW_TAG(0x410a, GNU_call_site_parameter, 0, GNU)
+HANDLE_DW_TAG(0x4200, APPLE_property, 0, APPLE)
+HANDLE_DW_TAG(0xb000, BORLAND_property, 0, BORLAND)
+HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string, 0, BORLAND)
+HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array, 0, BORLAND)
+HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set, 0, BORLAND)
+HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant, 0, BORLAND)
+
+// Attributes.
+HANDLE_DW_AT(0x01, sibling, 2, DWARF)
+HANDLE_DW_AT(0x02, location, 2, DWARF)
+HANDLE_DW_AT(0x03, name, 2, DWARF)
+HANDLE_DW_AT(0x09, ordering, 2, DWARF)
+HANDLE_DW_AT(0x0b, byte_size, 2, DWARF)
+HANDLE_DW_AT(0x0c, bit_offset, 2, DWARF)
+HANDLE_DW_AT(0x0d, bit_size, 2, DWARF)
+HANDLE_DW_AT(0x10, stmt_list, 2, DWARF)
+HANDLE_DW_AT(0x11, low_pc, 2, DWARF)
+HANDLE_DW_AT(0x12, high_pc, 2, DWARF)
+HANDLE_DW_AT(0x13, language, 2, DWARF)
+HANDLE_DW_AT(0x15, discr, 2, DWARF)
+HANDLE_DW_AT(0x16, discr_value, 2, DWARF)
+HANDLE_DW_AT(0x17, visibility, 2, DWARF)
+HANDLE_DW_AT(0x18, import, 2, DWARF)
+HANDLE_DW_AT(0x19, string_length, 2, DWARF)
+HANDLE_DW_AT(0x1a, common_reference, 2, DWARF)
+HANDLE_DW_AT(0x1b, comp_dir, 2, DWARF)
+HANDLE_DW_AT(0x1c, const_value, 2, DWARF)
+HANDLE_DW_AT(0x1d, containing_type, 2, DWARF)
+HANDLE_DW_AT(0x1e, default_value, 2, DWARF)
+HANDLE_DW_AT(0x20, inline, 2, DWARF)
+HANDLE_DW_AT(0x21, is_optional, 2, DWARF)
+HANDLE_DW_AT(0x22, lower_bound, 2, DWARF)
+HANDLE_DW_AT(0x25, producer, 2, DWARF)
+HANDLE_DW_AT(0x27, prototyped, 2, DWARF)
+HANDLE_DW_AT(0x2a, return_addr, 2, DWARF)
+HANDLE_DW_AT(0x2c, start_scope, 2, DWARF)
+HANDLE_DW_AT(0x2e, bit_stride, 2, DWARF)
+HANDLE_DW_AT(0x2f, upper_bound, 2, DWARF)
+HANDLE_DW_AT(0x31, abstract_origin, 2, DWARF)
+HANDLE_DW_AT(0x32, accessibility, 2, DWARF)
+HANDLE_DW_AT(0x33, address_class, 2, DWARF)
+HANDLE_DW_AT(0x34, artificial, 2, DWARF)
+HANDLE_DW_AT(0x35, base_types, 2, DWARF)
+HANDLE_DW_AT(0x36, calling_convention, 2, DWARF)
+HANDLE_DW_AT(0x37, count, 2, DWARF)
+HANDLE_DW_AT(0x38, data_member_location, 2, DWARF)
+HANDLE_DW_AT(0x39, decl_column, 2, DWARF)
+HANDLE_DW_AT(0x3a, decl_file, 2, DWARF)
+HANDLE_DW_AT(0x3b, decl_line, 2, DWARF)
+HANDLE_DW_AT(0x3c, declaration, 2, DWARF)
+HANDLE_DW_AT(0x3d, discr_list, 2, DWARF)
+HANDLE_DW_AT(0x3e, encoding, 2, DWARF)
+HANDLE_DW_AT(0x3f, external, 2, DWARF)
+HANDLE_DW_AT(0x40, frame_base, 2, DWARF)
+HANDLE_DW_AT(0x41, friend, 2, DWARF)
+HANDLE_DW_AT(0x42, identifier_case, 2, DWARF)
+HANDLE_DW_AT(0x43, macro_info, 2, DWARF)
+HANDLE_DW_AT(0x44, namelist_item, 2, DWARF)
+HANDLE_DW_AT(0x45, priority, 2, DWARF)
+HANDLE_DW_AT(0x46, segment, 2, DWARF)
+HANDLE_DW_AT(0x47, specification, 2, DWARF)
+HANDLE_DW_AT(0x48, static_link, 2, DWARF)
+HANDLE_DW_AT(0x49, type, 2, DWARF)
+HANDLE_DW_AT(0x4a, use_location, 2, DWARF)
+HANDLE_DW_AT(0x4b, variable_parameter, 2, DWARF)
+HANDLE_DW_AT(0x4c, virtuality, 2, DWARF)
+HANDLE_DW_AT(0x4d, vtable_elem_location, 2, DWARF)
+// New in DWARF v3:
+HANDLE_DW_AT(0x4e, allocated, 3, DWARF)
+HANDLE_DW_AT(0x4f, associated, 3, DWARF)
+HANDLE_DW_AT(0x50, data_location, 3, DWARF)
+HANDLE_DW_AT(0x51, byte_stride, 3, DWARF)
+HANDLE_DW_AT(0x52, entry_pc, 3, DWARF)
+HANDLE_DW_AT(0x53, use_UTF8, 3, DWARF)
+HANDLE_DW_AT(0x54, extension, 3, DWARF)
+HANDLE_DW_AT(0x55, ranges, 3, DWARF)
+HANDLE_DW_AT(0x56, trampoline, 3, DWARF)
+HANDLE_DW_AT(0x57, call_column, 3, DWARF)
+HANDLE_DW_AT(0x58, call_file, 3, DWARF)
+HANDLE_DW_AT(0x59, call_line, 3, DWARF)
+HANDLE_DW_AT(0x5a, description, 3, DWARF)
+HANDLE_DW_AT(0x5b, binary_scale, 3, DWARF)
+HANDLE_DW_AT(0x5c, decimal_scale, 3, DWARF)
+HANDLE_DW_AT(0x5d, small, 3, DWARF)
+HANDLE_DW_AT(0x5e, decimal_sign, 3, DWARF)
+HANDLE_DW_AT(0x5f, digit_count, 3, DWARF)
+HANDLE_DW_AT(0x60, picture_string, 3, DWARF)
+HANDLE_DW_AT(0x61, mutable, 3, DWARF)
+HANDLE_DW_AT(0x62, threads_scaled, 3, DWARF)
+HANDLE_DW_AT(0x63, explicit, 3, DWARF)
+HANDLE_DW_AT(0x64, object_pointer, 3, DWARF)
+HANDLE_DW_AT(0x65, endianity, 3, DWARF)
+HANDLE_DW_AT(0x66, elemental, 3, DWARF)
+HANDLE_DW_AT(0x67, pure, 3, DWARF)
+HANDLE_DW_AT(0x68, recursive, 3, DWARF)
+// New in DWARF v4:
+HANDLE_DW_AT(0x69, signature, 4, DWARF)
+HANDLE_DW_AT(0x6a, main_subprogram, 4, DWARF)
+HANDLE_DW_AT(0x6b, data_bit_offset, 4, DWARF)
+HANDLE_DW_AT(0x6c, const_expr, 4, DWARF)
+HANDLE_DW_AT(0x6d, enum_class, 4, DWARF)
+HANDLE_DW_AT(0x6e, linkage_name, 4, DWARF)
+// New in DWARF v5:
+HANDLE_DW_AT(0x6f, string_length_bit_size, 5, DWARF)
+HANDLE_DW_AT(0x70, string_length_byte_size, 5, DWARF)
+HANDLE_DW_AT(0x71, rank, 5, DWARF)
+HANDLE_DW_AT(0x72, str_offsets_base, 5, DWARF)
+HANDLE_DW_AT(0x73, addr_base, 5, DWARF)
+HANDLE_DW_AT(0x74, rnglists_base, 5, DWARF)
+HANDLE_DW_AT(0x75, dwo_id, 0, DWARF) ///< Retracted from DWARF v5.
+HANDLE_DW_AT(0x76, dwo_name, 5, DWARF)
+HANDLE_DW_AT(0x77, reference, 5, DWARF)
+HANDLE_DW_AT(0x78, rvalue_reference, 5, DWARF)
+HANDLE_DW_AT(0x79, macros, 5, DWARF)
+HANDLE_DW_AT(0x7a, call_all_calls, 5, DWARF)
+HANDLE_DW_AT(0x7b, call_all_source_calls, 5, DWARF)
+HANDLE_DW_AT(0x7c, call_all_tail_calls, 5, DWARF)
+HANDLE_DW_AT(0x7d, call_return_pc, 5, DWARF)
+HANDLE_DW_AT(0x7e, call_value, 5, DWARF)
+HANDLE_DW_AT(0x7f, call_origin, 5, DWARF)
+HANDLE_DW_AT(0x80, call_parameter, 5, DWARF)
+HANDLE_DW_AT(0x81, call_pc, 5, DWARF)
+HANDLE_DW_AT(0x82, call_tail_call, 5, DWARF)
+HANDLE_DW_AT(0x83, call_target, 5, DWARF)
+HANDLE_DW_AT(0x84, call_target_clobbered, 5, DWARF)
+HANDLE_DW_AT(0x85, call_data_location, 5, DWARF)
+HANDLE_DW_AT(0x86, call_data_value, 5, DWARF)
+HANDLE_DW_AT(0x87, noreturn, 5, DWARF)
+HANDLE_DW_AT(0x88, alignment, 5, DWARF)
+HANDLE_DW_AT(0x89, export_symbols, 5, DWARF)
+HANDLE_DW_AT(0x8a, deleted, 5, DWARF)
+HANDLE_DW_AT(0x8b, defaulted, 5, DWARF)
+HANDLE_DW_AT(0x8c, loclists_base, 5, DWARF)
+// Vendor extensions:
+HANDLE_DW_AT(0x2002, MIPS_loop_begin, 0, MIPS)
+HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin, 0, MIPS)
+HANDLE_DW_AT(0x2004, MIPS_epilog_begin, 0, MIPS)
+HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor, 0, MIPS)
+HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth, 0, MIPS)
+HANDLE_DW_AT(0x2007, MIPS_linkage_name, 0, MIPS)
+HANDLE_DW_AT(0x2008, MIPS_stride, 0, MIPS)
+HANDLE_DW_AT(0x2009, MIPS_abstract_name, 0, MIPS)
+HANDLE_DW_AT(0x200a, MIPS_clone_origin, 0, MIPS)
+HANDLE_DW_AT(0x200b, MIPS_has_inlines, 0, MIPS)
+HANDLE_DW_AT(0x200c, MIPS_stride_byte, 0, MIPS)
+HANDLE_DW_AT(0x200d, MIPS_stride_elem, 0, MIPS)
+HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype, 0, MIPS)
+HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype, 0, MIPS)
+HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype, 0, MIPS)
+// This one appears to have only been implemented by Open64 for
+// fortran and may conflict with other extensions.
+HANDLE_DW_AT(0x2011, MIPS_assumed_size, 0, MIPS)
+// GNU extensions
+HANDLE_DW_AT(0x2101, sf_names, 0, GNU)
+HANDLE_DW_AT(0x2102, src_info, 0, GNU)
+HANDLE_DW_AT(0x2103, mac_info, 0, GNU)
+HANDLE_DW_AT(0x2104, src_coords, 0, GNU)
+HANDLE_DW_AT(0x2105, body_begin, 0, GNU)
+HANDLE_DW_AT(0x2106, body_end, 0, GNU)
+HANDLE_DW_AT(0x2107, GNU_vector, 0, GNU)
+HANDLE_DW_AT(0x2110, GNU_template_name, 0, GNU)
+HANDLE_DW_AT(0x210f, GNU_odr_signature, 0, GNU)
+HANDLE_DW_AT(0x2111, GNU_call_site_value, 0, GNU)
+HANDLE_DW_AT(0x2117, GNU_all_call_sites, 0, GNU)
+HANDLE_DW_AT(0x2119, GNU_macros, 0, GNU)
+// Extensions for Fission proposal.
+HANDLE_DW_AT(0x2130, GNU_dwo_name, 0, GNU)
+HANDLE_DW_AT(0x2131, GNU_dwo_id, 0, GNU)
+HANDLE_DW_AT(0x2132, GNU_ranges_base, 0, GNU)
+HANDLE_DW_AT(0x2133, GNU_addr_base, 0, GNU)
+HANDLE_DW_AT(0x2134, GNU_pubnames, 0, GNU)
+HANDLE_DW_AT(0x2135, GNU_pubtypes, 0, GNU)
+HANDLE_DW_AT(0x2136, GNU_discriminator, 0, GNU)
+// Borland extensions.
+HANDLE_DW_AT(0x3b11, BORLAND_property_read, 0, BORLAND)
+HANDLE_DW_AT(0x3b12, BORLAND_property_write, 0, BORLAND)
+HANDLE_DW_AT(0x3b13, BORLAND_property_implements, 0, BORLAND)
+HANDLE_DW_AT(0x3b14, BORLAND_property_index, 0, BORLAND)
+HANDLE_DW_AT(0x3b15, BORLAND_property_default, 0, BORLAND)
+HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit, 0, BORLAND)
+HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class, 0, BORLAND)
+HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record, 0, BORLAND)
+HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass, 0, BORLAND)
+HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor, 0, BORLAND)
+HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor, 0, BORLAND)
+HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method, 0, BORLAND)
+HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface, 0, BORLAND)
+HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI, 0, BORLAND)
+HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return, 0, BORLAND)
+HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr, 0, BORLAND)
+HANDLE_DW_AT(0x3b31, BORLAND_closure, 0, BORLAND)
+// LLVM project extensions.
+HANDLE_DW_AT(0x3e00, LLVM_include_path, 0, LLVM)
+HANDLE_DW_AT(0x3e01, LLVM_config_macros, 0, LLVM)
+HANDLE_DW_AT(0x3e02, LLVM_isysroot, 0, LLVM)
+// Apple extensions.
+HANDLE_DW_AT(0x3fe1, APPLE_optimized, 0, APPLE)
+HANDLE_DW_AT(0x3fe2, APPLE_flags, 0, APPLE)
+HANDLE_DW_AT(0x3fe3, APPLE_isa, 0, APPLE)
+HANDLE_DW_AT(0x3fe4, APPLE_block, 0, APPLE)
+HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers, 0, APPLE)
+HANDLE_DW_AT(0x3fe6, APPLE_runtime_class, 0, APPLE)
+HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr, 0, APPLE)
+HANDLE_DW_AT(0x3fe8, APPLE_property_name, 0, APPLE)
+HANDLE_DW_AT(0x3fe9, APPLE_property_getter, 0, APPLE)
+HANDLE_DW_AT(0x3fea, APPLE_property_setter, 0, APPLE)
+HANDLE_DW_AT(0x3feb, APPLE_property_attribute, 0, APPLE)
+HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type, 0, APPLE)
+HANDLE_DW_AT(0x3fed, APPLE_property, 0, APPLE)
+
+// Attribute form encodings.
+HANDLE_DW_FORM(0x01, addr, 2, DWARF)
+HANDLE_DW_FORM(0x03, block2, 2, DWARF)
+HANDLE_DW_FORM(0x04, block4, 2, DWARF)
+HANDLE_DW_FORM(0x05, data2, 2, DWARF)
+HANDLE_DW_FORM(0x06, data4, 2, DWARF)
+HANDLE_DW_FORM(0x07, data8, 2, DWARF)
+HANDLE_DW_FORM(0x08, string, 2, DWARF)
+HANDLE_DW_FORM(0x09, block, 2, DWARF)
+HANDLE_DW_FORM(0x0a, block1, 2, DWARF)
+HANDLE_DW_FORM(0x0b, data1, 2, DWARF)
+HANDLE_DW_FORM(0x0c, flag, 2, DWARF)
+HANDLE_DW_FORM(0x0d, sdata, 2, DWARF)
+HANDLE_DW_FORM(0x0e, strp, 2, DWARF)
+HANDLE_DW_FORM(0x0f, udata, 2, DWARF)
+HANDLE_DW_FORM(0x10, ref_addr, 2, DWARF)
+HANDLE_DW_FORM(0x11, ref1, 2, DWARF)
+HANDLE_DW_FORM(0x12, ref2, 2, DWARF)
+HANDLE_DW_FORM(0x13, ref4, 2, DWARF)
+HANDLE_DW_FORM(0x14, ref8, 2, DWARF)
+HANDLE_DW_FORM(0x15, ref_udata, 2, DWARF)
+HANDLE_DW_FORM(0x16, indirect, 2, DWARF)
+// New in DWARF v4:
+HANDLE_DW_FORM(0x17, sec_offset, 4, DWARF)
+HANDLE_DW_FORM(0x18, exprloc, 4, DWARF)
+HANDLE_DW_FORM(0x19, flag_present, 4, DWARF)
+// This was defined out of sequence.
+HANDLE_DW_FORM(0x20, ref_sig8, 4, DWARF)
+// New in DWARF v5:
+HANDLE_DW_FORM(0x1a, strx, 5, DWARF)
+HANDLE_DW_FORM(0x1b, addrx, 5, DWARF)
+HANDLE_DW_FORM(0x1c, ref_sup4, 5, DWARF)
+HANDLE_DW_FORM(0x1d, strp_sup, 5, DWARF)
+HANDLE_DW_FORM(0x1e, data16, 5, DWARF)
+HANDLE_DW_FORM(0x1f, line_strp, 5, DWARF)
+HANDLE_DW_FORM(0x21, implicit_const, 5, DWARF)
+HANDLE_DW_FORM(0x22, loclistx, 5, DWARF)
+HANDLE_DW_FORM(0x23, rnglistx, 5, DWARF)
+HANDLE_DW_FORM(0x24, ref_sup8, 5, DWARF)
+HANDLE_DW_FORM(0x25, strx1, 5, DWARF)
+HANDLE_DW_FORM(0x26, strx2, 5, DWARF)
+HANDLE_DW_FORM(0x27, strx3, 5, DWARF)
+HANDLE_DW_FORM(0x28, strx4, 5, DWARF)
+HANDLE_DW_FORM(0x29, addrx1, 5, DWARF)
+HANDLE_DW_FORM(0x2a, addrx2, 5, DWARF)
+HANDLE_DW_FORM(0x2b, addrx3, 5, DWARF)
+HANDLE_DW_FORM(0x2c, addrx4, 5, DWARF)
+// Extensions for Fission proposal
+HANDLE_DW_FORM(0x1f01, GNU_addr_index, 0, GNU)
+HANDLE_DW_FORM(0x1f02, GNU_str_index, 0, GNU)
+// Alternate debug sections proposal (output of "dwz" tool).
+HANDLE_DW_FORM(0x1f20, GNU_ref_alt, 0, GNU)
+HANDLE_DW_FORM(0x1f21, GNU_strp_alt, 0, GNU)
+
+// DWARF Expression operators.
+HANDLE_DW_OP(0x03, addr, 2, DWARF)
+HANDLE_DW_OP(0x06, deref, 2, DWARF)
+HANDLE_DW_OP(0x08, const1u, 2, DWARF)
+HANDLE_DW_OP(0x09, const1s, 2, DWARF)
+HANDLE_DW_OP(0x0a, const2u, 2, DWARF)
+HANDLE_DW_OP(0x0b, const2s, 2, DWARF)
+HANDLE_DW_OP(0x0c, const4u, 2, DWARF)
+HANDLE_DW_OP(0x0d, const4s, 2, DWARF)
+HANDLE_DW_OP(0x0e, const8u, 2, DWARF)
+HANDLE_DW_OP(0x0f, const8s, 2, DWARF)
+HANDLE_DW_OP(0x10, constu, 2, DWARF)
+HANDLE_DW_OP(0x11, consts, 2, DWARF)
+HANDLE_DW_OP(0x12, dup, 2, DWARF)
+HANDLE_DW_OP(0x13, drop, 2, DWARF)
+HANDLE_DW_OP(0x14, over, 2, DWARF)
+HANDLE_DW_OP(0x15, pick, 2, DWARF)
+HANDLE_DW_OP(0x16, swap, 2, DWARF)
+HANDLE_DW_OP(0x17, rot, 2, DWARF)
+HANDLE_DW_OP(0x18, xderef, 2, DWARF)
+HANDLE_DW_OP(0x19, abs, 2, DWARF)
+HANDLE_DW_OP(0x1a, and, 2, DWARF)
+HANDLE_DW_OP(0x1b, div, 2, DWARF)
+HANDLE_DW_OP(0x1c, minus, 2, DWARF)
+HANDLE_DW_OP(0x1d, mod, 2, DWARF)
+HANDLE_DW_OP(0x1e, mul, 2, DWARF)
+HANDLE_DW_OP(0x1f, neg, 2, DWARF)
+HANDLE_DW_OP(0x20, not, 2, DWARF)
+HANDLE_DW_OP(0x21, or, 2, DWARF)
+HANDLE_DW_OP(0x22, plus, 2, DWARF)
+HANDLE_DW_OP(0x23, plus_uconst, 2, DWARF)
+HANDLE_DW_OP(0x24, shl, 2, DWARF)
+HANDLE_DW_OP(0x25, shr, 2, DWARF)
+HANDLE_DW_OP(0x26, shra, 2, DWARF)
+HANDLE_DW_OP(0x27, xor, 2, DWARF)
+HANDLE_DW_OP(0x28, bra, 2, DWARF)
+HANDLE_DW_OP(0x29, eq, 2, DWARF)
+HANDLE_DW_OP(0x2a, ge, 2, DWARF)
+HANDLE_DW_OP(0x2b, gt, 2, DWARF)
+HANDLE_DW_OP(0x2c, le, 2, DWARF)
+HANDLE_DW_OP(0x2d, lt, 2, DWARF)
+HANDLE_DW_OP(0x2e, ne, 2, DWARF)
+HANDLE_DW_OP(0x2f, skip, 2, DWARF)
+HANDLE_DW_OP(0x30, lit0, 2, DWARF)
+HANDLE_DW_OP(0x31, lit1, 2, DWARF)
+HANDLE_DW_OP(0x32, lit2, 2, DWARF)
+HANDLE_DW_OP(0x33, lit3, 2, DWARF)
+HANDLE_DW_OP(0x34, lit4, 2, DWARF)
+HANDLE_DW_OP(0x35, lit5, 2, DWARF)
+HANDLE_DW_OP(0x36, lit6, 2, DWARF)
+HANDLE_DW_OP(0x37, lit7, 2, DWARF)
+HANDLE_DW_OP(0x38, lit8, 2, DWARF)
+HANDLE_DW_OP(0x39, lit9, 2, DWARF)
+HANDLE_DW_OP(0x3a, lit10, 2, DWARF)
+HANDLE_DW_OP(0x3b, lit11, 2, DWARF)
+HANDLE_DW_OP(0x3c, lit12, 2, DWARF)
+HANDLE_DW_OP(0x3d, lit13, 2, DWARF)
+HANDLE_DW_OP(0x3e, lit14, 2, DWARF)
+HANDLE_DW_OP(0x3f, lit15, 2, DWARF)
+HANDLE_DW_OP(0x40, lit16, 2, DWARF)
+HANDLE_DW_OP(0x41, lit17, 2, DWARF)
+HANDLE_DW_OP(0x42, lit18, 2, DWARF)
+HANDLE_DW_OP(0x43, lit19, 2, DWARF)
+HANDLE_DW_OP(0x44, lit20, 2, DWARF)
+HANDLE_DW_OP(0x45, lit21, 2, DWARF)
+HANDLE_DW_OP(0x46, lit22, 2, DWARF)
+HANDLE_DW_OP(0x47, lit23, 2, DWARF)
+HANDLE_DW_OP(0x48, lit24, 2, DWARF)
+HANDLE_DW_OP(0x49, lit25, 2, DWARF)
+HANDLE_DW_OP(0x4a, lit26, 2, DWARF)
+HANDLE_DW_OP(0x4b, lit27, 2, DWARF)
+HANDLE_DW_OP(0x4c, lit28, 2, DWARF)
+HANDLE_DW_OP(0x4d, lit29, 2, DWARF)
+HANDLE_DW_OP(0x4e, lit30, 2, DWARF)
+HANDLE_DW_OP(0x4f, lit31, 2, DWARF)
+HANDLE_DW_OP(0x50, reg0, 2, DWARF)
+HANDLE_DW_OP(0x51, reg1, 2, DWARF)
+HANDLE_DW_OP(0x52, reg2, 2, DWARF)
+HANDLE_DW_OP(0x53, reg3, 2, DWARF)
+HANDLE_DW_OP(0x54, reg4, 2, DWARF)
+HANDLE_DW_OP(0x55, reg5, 2, DWARF)
+HANDLE_DW_OP(0x56, reg6, 2, DWARF)
+HANDLE_DW_OP(0x57, reg7, 2, DWARF)
+HANDLE_DW_OP(0x58, reg8, 2, DWARF)
+HANDLE_DW_OP(0x59, reg9, 2, DWARF)
+HANDLE_DW_OP(0x5a, reg10, 2, DWARF)
+HANDLE_DW_OP(0x5b, reg11, 2, DWARF)
+HANDLE_DW_OP(0x5c, reg12, 2, DWARF)
+HANDLE_DW_OP(0x5d, reg13, 2, DWARF)
+HANDLE_DW_OP(0x5e, reg14, 2, DWARF)
+HANDLE_DW_OP(0x5f, reg15, 2, DWARF)
+HANDLE_DW_OP(0x60, reg16, 2, DWARF)
+HANDLE_DW_OP(0x61, reg17, 2, DWARF)
+HANDLE_DW_OP(0x62, reg18, 2, DWARF)
+HANDLE_DW_OP(0x63, reg19, 2, DWARF)
+HANDLE_DW_OP(0x64, reg20, 2, DWARF)
+HANDLE_DW_OP(0x65, reg21, 2, DWARF)
+HANDLE_DW_OP(0x66, reg22, 2, DWARF)
+HANDLE_DW_OP(0x67, reg23, 2, DWARF)
+HANDLE_DW_OP(0x68, reg24, 2, DWARF)
+HANDLE_DW_OP(0x69, reg25, 2, DWARF)
+HANDLE_DW_OP(0x6a, reg26, 2, DWARF)
+HANDLE_DW_OP(0x6b, reg27, 2, DWARF)
+HANDLE_DW_OP(0x6c, reg28, 2, DWARF)
+HANDLE_DW_OP(0x6d, reg29, 2, DWARF)
+HANDLE_DW_OP(0x6e, reg30, 2, DWARF)
+HANDLE_DW_OP(0x6f, reg31, 2, DWARF)
+HANDLE_DW_OP(0x70, breg0, 2, DWARF)
+HANDLE_DW_OP(0x71, breg1, 2, DWARF)
+HANDLE_DW_OP(0x72, breg2, 2, DWARF)
+HANDLE_DW_OP(0x73, breg3, 2, DWARF)
+HANDLE_DW_OP(0x74, breg4, 2, DWARF)
+HANDLE_DW_OP(0x75, breg5, 2, DWARF)
+HANDLE_DW_OP(0x76, breg6, 2, DWARF)
+HANDLE_DW_OP(0x77, breg7, 2, DWARF)
+HANDLE_DW_OP(0x78, breg8, 2, DWARF)
+HANDLE_DW_OP(0x79, breg9, 2, DWARF)
+HANDLE_DW_OP(0x7a, breg10, 2, DWARF)
+HANDLE_DW_OP(0x7b, breg11, 2, DWARF)
+HANDLE_DW_OP(0x7c, breg12, 2, DWARF)
+HANDLE_DW_OP(0x7d, breg13, 2, DWARF)
+HANDLE_DW_OP(0x7e, breg14, 2, DWARF)
+HANDLE_DW_OP(0x7f, breg15, 2, DWARF)
+HANDLE_DW_OP(0x80, breg16, 2, DWARF)
+HANDLE_DW_OP(0x81, breg17, 2, DWARF)
+HANDLE_DW_OP(0x82, breg18, 2, DWARF)
+HANDLE_DW_OP(0x83, breg19, 2, DWARF)
+HANDLE_DW_OP(0x84, breg20, 2, DWARF)
+HANDLE_DW_OP(0x85, breg21, 2, DWARF)
+HANDLE_DW_OP(0x86, breg22, 2, DWARF)
+HANDLE_DW_OP(0x87, breg23, 2, DWARF)
+HANDLE_DW_OP(0x88, breg24, 2, DWARF)
+HANDLE_DW_OP(0x89, breg25, 2, DWARF)
+HANDLE_DW_OP(0x8a, breg26, 2, DWARF)
+HANDLE_DW_OP(0x8b, breg27, 2, DWARF)
+HANDLE_DW_OP(0x8c, breg28, 2, DWARF)
+HANDLE_DW_OP(0x8d, breg29, 2, DWARF)
+HANDLE_DW_OP(0x8e, breg30, 2, DWARF)
+HANDLE_DW_OP(0x8f, breg31, 2, DWARF)
+HANDLE_DW_OP(0x90, regx, 2, DWARF)
+HANDLE_DW_OP(0x91, fbreg, 2, DWARF)
+HANDLE_DW_OP(0x92, bregx, 2, DWARF)
+HANDLE_DW_OP(0x93, piece, 2, DWARF)
+HANDLE_DW_OP(0x94, deref_size, 2, DWARF)
+HANDLE_DW_OP(0x95, xderef_size, 2, DWARF)
+HANDLE_DW_OP(0x96, nop, 2, DWARF)
+// New in DWARF v3:
+HANDLE_DW_OP(0x97, push_object_address, 3, DWARF)
+HANDLE_DW_OP(0x98, call2, 3, DWARF)
+HANDLE_DW_OP(0x99, call4, 3, DWARF)
+HANDLE_DW_OP(0x9a, call_ref, 3, DWARF)
+HANDLE_DW_OP(0x9b, form_tls_address, 3, DWARF)
+HANDLE_DW_OP(0x9c, call_frame_cfa, 3, DWARF)
+HANDLE_DW_OP(0x9d, bit_piece, 3, DWARF)
+// New in DWARF v4:
+HANDLE_DW_OP(0x9e, implicit_value, 4, DWARF)
+HANDLE_DW_OP(0x9f, stack_value, 4, DWARF)
+// New in DWARF v5:
+HANDLE_DW_OP(0xa0, implicit_pointer, 5, DWARF)
+HANDLE_DW_OP(0xa1, addrx, 5, DWARF)
+HANDLE_DW_OP(0xa2, constx, 5, DWARF)
+HANDLE_DW_OP(0xa3, entry_value, 5, DWARF)
+HANDLE_DW_OP(0xa4, const_type, 5, DWARF)
+HANDLE_DW_OP(0xa5, regval_type, 5, DWARF)
+HANDLE_DW_OP(0xa6, deref_type, 5, DWARF)
+HANDLE_DW_OP(0xa7, xderef_type, 5, DWARF)
+HANDLE_DW_OP(0xa8, convert, 5, DWARF)
+HANDLE_DW_OP(0xa9, reinterpret, 5, DWARF)
+// Vendor extensions:
+// Extensions for GNU-style thread-local storage.
+HANDLE_DW_OP(0xe0, GNU_push_tls_address, 0, GNU)
+// Extensions for Fission proposal.
+HANDLE_DW_OP(0xfb, GNU_addr_index, 0, GNU)
+HANDLE_DW_OP(0xfc, GNU_const_index, 0, GNU)
+
+// DWARF languages.
+HANDLE_DW_LANG(0x0001, C89, 2, DWARF)
+HANDLE_DW_LANG(0x0002, C, 2, DWARF)
+HANDLE_DW_LANG(0x0003, Ada83, 2, DWARF)
+HANDLE_DW_LANG(0x0004, C_plus_plus, 2, DWARF)
+HANDLE_DW_LANG(0x0005, Cobol74, 2, DWARF)
+HANDLE_DW_LANG(0x0006, Cobol85, 2, DWARF)
+HANDLE_DW_LANG(0x0007, Fortran77, 2, DWARF)
+HANDLE_DW_LANG(0x0008, Fortran90, 2, DWARF)
+HANDLE_DW_LANG(0x0009, Pascal83, 2, DWARF)
+HANDLE_DW_LANG(0x000a, Modula2, 2, DWARF)
+// New in DWARF v3:
+HANDLE_DW_LANG(0x000b, Java, 3, DWARF)
+HANDLE_DW_LANG(0x000c, C99, 3, DWARF)
+HANDLE_DW_LANG(0x000d, Ada95, 3, DWARF)
+HANDLE_DW_LANG(0x000e, Fortran95, 3, DWARF)
+HANDLE_DW_LANG(0x000f, PLI, 3, DWARF)
+HANDLE_DW_LANG(0x0010, ObjC, 3, DWARF)
+HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 3, DWARF)
+HANDLE_DW_LANG(0x0012, UPC, 3, DWARF)
+HANDLE_DW_LANG(0x0013, D, 3, DWARF)
+// New in DWARF v4:
+HANDLE_DW_LANG(0x0014, Python, 4, DWARF)
+// New in DWARF v5:
+HANDLE_DW_LANG(0x0015, OpenCL, 5, DWARF)
+HANDLE_DW_LANG(0x0016, Go, 5, DWARF)
+HANDLE_DW_LANG(0x0017, Modula3, 5, DWARF)
+HANDLE_DW_LANG(0x0018, Haskell, 5, DWARF)
+HANDLE_DW_LANG(0x0019, C_plus_plus_03, 5, DWARF)
+HANDLE_DW_LANG(0x001a, C_plus_plus_11, 5, DWARF)
+HANDLE_DW_LANG(0x001b, OCaml, 5, DWARF)
+HANDLE_DW_LANG(0x001c, Rust, 5, DWARF)
+HANDLE_DW_LANG(0x001d, C11, 5, DWARF)
+HANDLE_DW_LANG(0x001e, Swift, 5, DWARF)
+HANDLE_DW_LANG(0x001f, Julia, 5, DWARF)
+HANDLE_DW_LANG(0x0020, Dylan, 5, DWARF)
+HANDLE_DW_LANG(0x0021, C_plus_plus_14, 5, DWARF)
+HANDLE_DW_LANG(0x0022, Fortran03, 5, DWARF)
+HANDLE_DW_LANG(0x0023, Fortran08, 5, DWARF)
+HANDLE_DW_LANG(0x0024, RenderScript, 5, DWARF)
+HANDLE_DW_LANG(0x0025, BLISS, 5, DWARF)
+// Vendor extensions:
+HANDLE_DW_LANG(0x8001, Mips_Assembler, 0, MIPS)
+HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, GOOGLE)
+HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, BORLAND)
+
+// DWARF attribute type encodings.
+HANDLE_DW_ATE(0x01, address, 2, DWARF)
+HANDLE_DW_ATE(0x02, boolean, 2, DWARF)
+HANDLE_DW_ATE(0x03, complex_float, 2, DWARF)
+HANDLE_DW_ATE(0x04, float, 2, DWARF)
+HANDLE_DW_ATE(0x05, signed, 2, DWARF)
+HANDLE_DW_ATE(0x06, signed_char, 2, DWARF)
+HANDLE_DW_ATE(0x07, unsigned, 2, DWARF)
+HANDLE_DW_ATE(0x08, unsigned_char, 2, DWARF)
+// New in DWARF v3:
+HANDLE_DW_ATE(0x09, imaginary_float, 3, DWARF)
+HANDLE_DW_ATE(0x0a, packed_decimal, 3, DWARF)
+HANDLE_DW_ATE(0x0b, numeric_string, 3, DWARF)
+HANDLE_DW_ATE(0x0c, edited, 3, DWARF)
+HANDLE_DW_ATE(0x0d, signed_fixed, 3, DWARF)
+HANDLE_DW_ATE(0x0e, unsigned_fixed, 3, DWARF)
+HANDLE_DW_ATE(0x0f, decimal_float, 3, DWARF)
+// New in DWARF v4:
+HANDLE_DW_ATE(0x10, UTF, 4, DWARF)
+// New in DWARF v5:
+HANDLE_DW_ATE(0x11, UCS, 5, DWARF)
+HANDLE_DW_ATE(0x12, ASCII, 5, DWARF)
+
+// DWARF virtuality codes.
+HANDLE_DW_VIRTUALITY(0x00, none)
+HANDLE_DW_VIRTUALITY(0x01, virtual)
+HANDLE_DW_VIRTUALITY(0x02, pure_virtual)
+
+// DWARF v5 Defaulted Member Encodings.
+HANDLE_DW_DEFAULTED(0x00, no)
+HANDLE_DW_DEFAULTED(0x01, in_class)
+HANDLE_DW_DEFAULTED(0x02, out_of_class)
+
+// DWARF calling convention codes.
+HANDLE_DW_CC(0x01, normal)
+HANDLE_DW_CC(0x02, program)
+HANDLE_DW_CC(0x03, nocall)
+// New in DWARF v5:
+HANDLE_DW_CC(0x04, pass_by_reference)
+HANDLE_DW_CC(0x05, pass_by_value)
+// Vendor extensions:
+HANDLE_DW_CC(0x40, GNU_renesas_sh)
+HANDLE_DW_CC(0x41, GNU_borland_fastcall_i386)
+HANDLE_DW_CC(0xb0, BORLAND_safecall)
+HANDLE_DW_CC(0xb1, BORLAND_stdcall)
+HANDLE_DW_CC(0xb2, BORLAND_pascal)
+HANDLE_DW_CC(0xb3, BORLAND_msfastcall)
+HANDLE_DW_CC(0xb4, BORLAND_msreturn)
+HANDLE_DW_CC(0xb5, BORLAND_thiscall)
+HANDLE_DW_CC(0xb6, BORLAND_fastcall)
+HANDLE_DW_CC(0xc0, LLVM_vectorcall)
+HANDLE_DW_CC(0xc1, LLVM_Win64)
+HANDLE_DW_CC(0xc2, LLVM_X86_64SysV)
+HANDLE_DW_CC(0xc3, LLVM_AAPCS)
+HANDLE_DW_CC(0xc4, LLVM_AAPCS_VFP)
+HANDLE_DW_CC(0xc5, LLVM_IntelOclBicc)
+HANDLE_DW_CC(0xc6, LLVM_SpirFunction)
+HANDLE_DW_CC(0xc7, LLVM_OpenCLKernel)
+HANDLE_DW_CC(0xc8, LLVM_Swift)
+HANDLE_DW_CC(0xc9, LLVM_PreserveMost)
+HANDLE_DW_CC(0xca, LLVM_PreserveAll)
+HANDLE_DW_CC(0xcb, LLVM_X86RegCall)
+// From GCC source code (include/dwarf2.h): This DW_CC_ value is not currently
+// generated by any toolchain.  It is used internally to GDB to indicate OpenCL C
+// functions that have been compiled with the IBM XL C for OpenCL compiler and use
+// a non-platform calling convention for passing OpenCL C vector types.
+HANDLE_DW_CC(0xff, GDB_IBM_OpenCL)
+
+// Line Number Extended Opcode Encodings
+HANDLE_DW_LNE(0x01, end_sequence)
+HANDLE_DW_LNE(0x02, set_address)
+HANDLE_DW_LNE(0x03, define_file)
+// New in DWARF v4:
+HANDLE_DW_LNE(0x04, set_discriminator)
+
+// Line Number Standard Opcode Encodings.
+HANDLE_DW_LNS(0x00, extended_op)
+HANDLE_DW_LNS(0x01, copy)
+HANDLE_DW_LNS(0x02, advance_pc)
+HANDLE_DW_LNS(0x03, advance_line)
+HANDLE_DW_LNS(0x04, set_file)
+HANDLE_DW_LNS(0x05, set_column)
+HANDLE_DW_LNS(0x06, negate_stmt)
+HANDLE_DW_LNS(0x07, set_basic_block)
+HANDLE_DW_LNS(0x08, const_add_pc)
+HANDLE_DW_LNS(0x09, fixed_advance_pc)
+// New in DWARF v3:
+HANDLE_DW_LNS(0x0a, set_prologue_end)
+HANDLE_DW_LNS(0x0b, set_epilogue_begin)
+HANDLE_DW_LNS(0x0c, set_isa)
+
+// DWARF v5 Line number header entry format.
+HANDLE_DW_LNCT(0x01, path)
+HANDLE_DW_LNCT(0x02, directory_index)
+HANDLE_DW_LNCT(0x03, timestamp)
+HANDLE_DW_LNCT(0x04, size)
+HANDLE_DW_LNCT(0x05, MD5)
+// A vendor extension until http://dwarfstd.org/ShowIssue.php?issue=180201.1 is
+// accepted and incorporated into the next DWARF standard.
+HANDLE_DW_LNCT(0x2001, LLVM_source)
+
+// DWARF v5 Macro information.
+HANDLE_DW_MACRO(0x01, define)
+HANDLE_DW_MACRO(0x02, undef)
+HANDLE_DW_MACRO(0x03, start_file)
+HANDLE_DW_MACRO(0x04, end_file)
+HANDLE_DW_MACRO(0x05, define_strp)
+HANDLE_DW_MACRO(0x06, undef_strp)
+HANDLE_DW_MACRO(0x07, import)
+HANDLE_DW_MACRO(0x08, define_sup)
+HANDLE_DW_MACRO(0x09, undef_sup)
+HANDLE_DW_MACRO(0x0a, import_sup)
+HANDLE_DW_MACRO(0x0b, define_strx)
+HANDLE_DW_MACRO(0x0c, undef_strx)
+
+// DWARF v5 Range List Entry encoding values.
+HANDLE_DW_RLE(0x00, end_of_list)
+HANDLE_DW_RLE(0x01, base_addressx)
+HANDLE_DW_RLE(0x02, startx_endx)
+HANDLE_DW_RLE(0x03, startx_length)
+HANDLE_DW_RLE(0x04, offset_pair)
+HANDLE_DW_RLE(0x05, base_address)
+HANDLE_DW_RLE(0x06, start_end)
+HANDLE_DW_RLE(0x07, start_length)
+
+// Call frame instruction encodings.
+HANDLE_DW_CFA(0x00, nop)
+HANDLE_DW_CFA(0x40, advance_loc)
+HANDLE_DW_CFA(0x80, offset)
+HANDLE_DW_CFA(0xc0, restore)
+HANDLE_DW_CFA(0x01, set_loc)
+HANDLE_DW_CFA(0x02, advance_loc1)
+HANDLE_DW_CFA(0x03, advance_loc2)
+HANDLE_DW_CFA(0x04, advance_loc4)
+HANDLE_DW_CFA(0x05, offset_extended)
+HANDLE_DW_CFA(0x06, restore_extended)
+HANDLE_DW_CFA(0x07, undefined)
+HANDLE_DW_CFA(0x08, same_value)
+HANDLE_DW_CFA(0x09, register)
+HANDLE_DW_CFA(0x0a, remember_state)
+HANDLE_DW_CFA(0x0b, restore_state)
+HANDLE_DW_CFA(0x0c, def_cfa)
+HANDLE_DW_CFA(0x0d, def_cfa_register)
+HANDLE_DW_CFA(0x0e, def_cfa_offset)
+// New in DWARF v3:
+HANDLE_DW_CFA(0x0f, def_cfa_expression)
+HANDLE_DW_CFA(0x10, expression)
+HANDLE_DW_CFA(0x11, offset_extended_sf)
+HANDLE_DW_CFA(0x12, def_cfa_sf)
+HANDLE_DW_CFA(0x13, def_cfa_offset_sf)
+HANDLE_DW_CFA(0x14, val_offset)
+HANDLE_DW_CFA(0x15, val_offset_sf)
+HANDLE_DW_CFA(0x16, val_expression)
+// Vendor extensions:
+HANDLE_DW_CFA(0x1d, MIPS_advance_loc8)
+HANDLE_DW_CFA(0x2d, GNU_window_save)
+HANDLE_DW_CFA(0x2e, GNU_args_size)
+
+// Apple Objective-C Property Attributes.
+// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind!
+HANDLE_DW_APPLE_PROPERTY(0x01, readonly)
+HANDLE_DW_APPLE_PROPERTY(0x02, getter)
+HANDLE_DW_APPLE_PROPERTY(0x04, assign)
+HANDLE_DW_APPLE_PROPERTY(0x08, readwrite)
+HANDLE_DW_APPLE_PROPERTY(0x10, retain)
+HANDLE_DW_APPLE_PROPERTY(0x20, copy)
+HANDLE_DW_APPLE_PROPERTY(0x40, nonatomic)
+HANDLE_DW_APPLE_PROPERTY(0x80, setter)
+HANDLE_DW_APPLE_PROPERTY(0x100, atomic)
+HANDLE_DW_APPLE_PROPERTY(0x200, weak)
+HANDLE_DW_APPLE_PROPERTY(0x400, strong)
+HANDLE_DW_APPLE_PROPERTY(0x800, unsafe_unretained)
+HANDLE_DW_APPLE_PROPERTY(0x1000, nullability)
+HANDLE_DW_APPLE_PROPERTY(0x2000, null_resettable)
+HANDLE_DW_APPLE_PROPERTY(0x4000, class)
+
+// DWARF v5 Unit Types.
+HANDLE_DW_UT(0x01, compile)
+HANDLE_DW_UT(0x02, type)
+HANDLE_DW_UT(0x03, partial)
+HANDLE_DW_UT(0x04, skeleton)
+HANDLE_DW_UT(0x05, split_compile)
+HANDLE_DW_UT(0x06, split_type)
+
+// DWARF section types. (enum name, ELF name, ELF DWO name, cmdline name)
+// Note that these IDs don't mean anything.
+// TODO: Add Mach-O and COFF names.
+// Official DWARF sections.
+HANDLE_DWARF_SECTION(DebugAbbrev, ".debug_abbrev", "debug-abbrev")
+HANDLE_DWARF_SECTION(DebugAranges, ".debug_aranges", "debug-aranges")
+HANDLE_DWARF_SECTION(DebugInfo, ".debug_info", "debug-info")
+HANDLE_DWARF_SECTION(DebugTypes, ".debug_types", "debug-types")
+HANDLE_DWARF_SECTION(DebugLine, ".debug_line", "debug-line")
+HANDLE_DWARF_SECTION(DebugLineStr, ".debug_line_str", "debug-line-str")
+HANDLE_DWARF_SECTION(DebugLoc, ".debug_loc", "debug-loc")
+HANDLE_DWARF_SECTION(DebugFrame, ".debug_frame", "debug-frame")
+HANDLE_DWARF_SECTION(DebugMacro, ".debug_macro", "debug-macro")
+HANDLE_DWARF_SECTION(DebugNames, ".debug_names", "debug-names")
+HANDLE_DWARF_SECTION(DebugPubnames, ".debug_pubnames", "debug-pubnames")
+HANDLE_DWARF_SECTION(DebugPubtypes, ".debug_pubtypes", "debug-pubtypes")
+HANDLE_DWARF_SECTION(DebugGnuPubnames, ".debug_gnu_pubnames", "debug-gnu-pubnames")
+HANDLE_DWARF_SECTION(DebugGnuPubtypes, ".debug_gnu_pubtypes", "debug-gnu-pubtypes")
+HANDLE_DWARF_SECTION(DebugRanges, ".debug_ranges", "debug-ranges")
+HANDLE_DWARF_SECTION(DebugRnglists, ".debug_rnglists", "debug-rnglists")
+HANDLE_DWARF_SECTION(DebugStr, ".debug_str", "debug-str")
+HANDLE_DWARF_SECTION(DebugStrOffsets, ".debug_str_offsets", "debug-str-offsets")
+HANDLE_DWARF_SECTION(DebugCUIndex, ".debug_cu_index", "debug-cu-index")
+HANDLE_DWARF_SECTION(DebugTUIndex, ".debug_tu_index", "debug-tu-index")
+// Vendor extensions.
+HANDLE_DWARF_SECTION(AppleNames, ".apple_names", "apple-names")
+HANDLE_DWARF_SECTION(AppleTypes, ".apple_types", "apple-types")
+HANDLE_DWARF_SECTION(AppleNamespaces, ".apple_namespaces", "apple-namespaces")
+HANDLE_DWARF_SECTION(AppleObjC, ".apple_objc", "apple-objc")
+HANDLE_DWARF_SECTION(GdbIndex, ".gdb_index", "gdb-index")
+
+HANDLE_DW_IDX(0x01, compile_unit)
+HANDLE_DW_IDX(0x02, type_unit)
+HANDLE_DW_IDX(0x03, die_offset)
+HANDLE_DW_IDX(0x04, parent)
+HANDLE_DW_IDX(0x05, type_hash)
+
+
+#undef HANDLE_DW_TAG
+#undef HANDLE_DW_AT
+#undef HANDLE_DW_FORM
+#undef HANDLE_DW_OP
+#undef HANDLE_DW_LANG
+#undef HANDLE_DW_ATE
+#undef HANDLE_DW_VIRTUALITY
+#undef HANDLE_DW_DEFAULTED
+#undef HANDLE_DW_CC
+#undef HANDLE_DW_LNS
+#undef HANDLE_DW_LNE
+#undef HANDLE_DW_LNCT
+#undef HANDLE_DW_MACRO
+#undef HANDLE_DW_RLE
+#undef HANDLE_DW_CFA
+#undef HANDLE_DW_APPLE_PROPERTY
+#undef HANDLE_DW_UT
+#undef HANDLE_DWARF_SECTION
+#undef HANDLE_DW_IDX
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/Dwarf.h b/linux-x64/clang/include/llvm/BinaryFormat/Dwarf.h
new file mode 100644
index 0000000..15724a9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/Dwarf.h
@@ -0,0 +1,620 @@
+//===-- llvm/BinaryFormat/Dwarf.h ---Dwarf Constants-------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file contains constants used for implementing Dwarf
+/// debug support.
+///
+/// For details on the Dwarf specfication see the latest DWARF Debugging
+/// Information Format standard document on http://www.dwarfstd.org. This
+/// file often includes support for non-released standard features.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_DWARF_H
+#define LLVM_BINARYFORMAT_DWARF_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+
+namespace llvm {
+class StringRef;
+
+namespace dwarf {
+
+//===----------------------------------------------------------------------===//
+// DWARF constants as gleaned from the DWARF Debugging Information Format V.5
+// reference manual http://www.dwarfstd.org/.
+//
+
+// Do not mix the following two enumerations sets.  DW_TAG_invalid changes the
+// enumeration base type.
+
+enum LLVMConstants : uint32_t {
+  // LLVM mock tags (see also llvm/BinaryFormat/Dwarf.def).
+  DW_TAG_invalid = ~0U,        // Tag for invalid results.
+  DW_VIRTUALITY_invalid = ~0U, // Virtuality for invalid results.
+  DW_MACINFO_invalid = ~0U,    // Macinfo type for invalid results.
+
+  // Other constants.
+  DWARF_VERSION = 4,       // Default dwarf version we output.
+  DW_PUBTYPES_VERSION = 2, // Section version number for .debug_pubtypes.
+  DW_PUBNAMES_VERSION = 2, // Section version number for .debug_pubnames.
+  DW_ARANGES_VERSION = 2,  // Section version number for .debug_aranges.
+  // Identifiers we use to distinguish vendor extensions.
+  DWARF_VENDOR_DWARF = 0, // Defined in v2 or later of the DWARF standard.
+  DWARF_VENDOR_APPLE = 1,
+  DWARF_VENDOR_BORLAND = 2,
+  DWARF_VENDOR_GNU = 3,
+  DWARF_VENDOR_GOOGLE = 4,
+  DWARF_VENDOR_LLVM = 5,
+  DWARF_VENDOR_MIPS = 6
+};
+
+/// Constants that define the DWARF format as 32 or 64 bit.
+enum DwarfFormat : uint8_t { DWARF32, DWARF64 };
+
+/// Special ID values that distinguish a CIE from a FDE in DWARF CFI.
+/// Not inside an enum because a 64-bit value is needed.
+/// @{
+const uint32_t DW_CIE_ID = UINT32_MAX;
+const uint64_t DW64_CIE_ID = UINT64_MAX;
+/// @}
+
+/// Identifier of an invalid DIE offset in the .debug_info section.
+const uint32_t DW_INVALID_OFFSET = UINT32_MAX;
+
+enum Tag : uint16_t {
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) DW_TAG_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_TAG_lo_user = 0x4080,
+  DW_TAG_hi_user = 0xffff,
+  DW_TAG_user_base = 0x1000 ///< Recommended base for user tags.
+};
+
+inline bool isType(Tag T) {
+  switch (T) {
+  case DW_TAG_array_type:
+  case DW_TAG_class_type:
+  case DW_TAG_interface_type:
+  case DW_TAG_enumeration_type:
+  case DW_TAG_pointer_type:
+  case DW_TAG_reference_type:
+  case DW_TAG_rvalue_reference_type:
+  case DW_TAG_string_type:
+  case DW_TAG_structure_type:
+  case DW_TAG_subroutine_type:
+  case DW_TAG_union_type:
+  case DW_TAG_ptr_to_member_type:
+  case DW_TAG_set_type:
+  case DW_TAG_subrange_type:
+  case DW_TAG_base_type:
+  case DW_TAG_const_type:
+  case DW_TAG_file_type:
+  case DW_TAG_packed_type:
+  case DW_TAG_volatile_type:
+  case DW_TAG_typedef:
+    return true;
+  default:
+    return false;
+  }
+}
+
+/// Attributes.
+enum Attribute : uint16_t {
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) DW_AT_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_AT_lo_user = 0x2000,
+  DW_AT_hi_user = 0x3fff,
+};
+
+enum Form : uint16_t {
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) DW_FORM_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_FORM_lo_user = 0x1f00, ///< Not specified by DWARF.
+};
+
+enum LocationAtom {
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) DW_OP_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_OP_lo_user = 0xe0,
+  DW_OP_hi_user = 0xff,
+  DW_OP_LLVM_fragment = 0x1000 ///< Only used in LLVM metadata.
+};
+
+enum TypeKind : uint8_t {
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) DW_ATE_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_ATE_lo_user = 0x80,
+  DW_ATE_hi_user = 0xff
+};
+
+enum DecimalSignEncoding {
+  // Decimal sign attribute values
+  DW_DS_unsigned = 0x01,
+  DW_DS_leading_overpunch = 0x02,
+  DW_DS_trailing_overpunch = 0x03,
+  DW_DS_leading_separate = 0x04,
+  DW_DS_trailing_separate = 0x05
+};
+
+enum EndianityEncoding {
+  // Endianity attribute values
+  DW_END_default = 0x00,
+  DW_END_big = 0x01,
+  DW_END_little = 0x02,
+  DW_END_lo_user = 0x40,
+  DW_END_hi_user = 0xff
+};
+
+enum AccessAttribute {
+  // Accessibility codes
+  DW_ACCESS_public = 0x01,
+  DW_ACCESS_protected = 0x02,
+  DW_ACCESS_private = 0x03
+};
+
+enum VisibilityAttribute {
+  // Visibility codes
+  DW_VIS_local = 0x01,
+  DW_VIS_exported = 0x02,
+  DW_VIS_qualified = 0x03
+};
+
+enum VirtualityAttribute {
+#define HANDLE_DW_VIRTUALITY(ID, NAME) DW_VIRTUALITY_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_VIRTUALITY_max = 0x02
+};
+
+enum DefaultedMemberAttribute {
+#define HANDLE_DW_DEFAULTED(ID, NAME) DW_DEFAULTED_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_DEFAULTED_max = 0x02
+};
+
+enum SourceLanguage {
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) DW_LANG_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_LANG_lo_user = 0x8000,
+  DW_LANG_hi_user = 0xffff
+};
+
+enum CaseSensitivity {
+  // Identifier case codes
+  DW_ID_case_sensitive = 0x00,
+  DW_ID_up_case = 0x01,
+  DW_ID_down_case = 0x02,
+  DW_ID_case_insensitive = 0x03
+};
+
+enum CallingConvention {
+// Calling convention codes
+#define HANDLE_DW_CC(ID, NAME) DW_CC_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_CC_lo_user = 0x40,
+  DW_CC_hi_user = 0xff
+};
+
+enum InlineAttribute {
+  // Inline codes
+  DW_INL_not_inlined = 0x00,
+  DW_INL_inlined = 0x01,
+  DW_INL_declared_not_inlined = 0x02,
+  DW_INL_declared_inlined = 0x03
+};
+
+enum ArrayDimensionOrdering {
+  // Array ordering
+  DW_ORD_row_major = 0x00,
+  DW_ORD_col_major = 0x01
+};
+
+enum DiscriminantList {
+  // Discriminant descriptor values
+  DW_DSC_label = 0x00,
+  DW_DSC_range = 0x01
+};
+
+/// Line Number Standard Opcode Encodings.
+enum LineNumberOps : uint8_t {
+#define HANDLE_DW_LNS(ID, NAME) DW_LNS_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+};
+
+/// Line Number Extended Opcode Encodings.
+enum LineNumberExtendedOps {
+#define HANDLE_DW_LNE(ID, NAME) DW_LNE_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_LNE_lo_user = 0x80,
+  DW_LNE_hi_user = 0xff
+};
+
+enum LineNumberEntryFormat {
+#define HANDLE_DW_LNCT(ID, NAME) DW_LNCT_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_LNCT_lo_user = 0x2000,
+  DW_LNCT_hi_user = 0x3fff,
+};
+
+enum MacinfoRecordType {
+  // Macinfo Type Encodings
+  DW_MACINFO_define = 0x01,
+  DW_MACINFO_undef = 0x02,
+  DW_MACINFO_start_file = 0x03,
+  DW_MACINFO_end_file = 0x04,
+  DW_MACINFO_vendor_ext = 0xff
+};
+
+/// DWARF v5 macro information entry type encodings.
+enum MacroEntryType {
+#define HANDLE_DW_MACRO(ID, NAME) DW_MACRO_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_MACRO_lo_user = 0xe0,
+  DW_MACRO_hi_user = 0xff
+};
+
+/// DWARF v5 range list entry encoding values.
+enum RangeListEntries {
+#define HANDLE_DW_RLE(ID, NAME) DW_RLE_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+};
+
+/// Call frame instruction encodings.
+enum CallFrameInfo {
+#define HANDLE_DW_CFA(ID, NAME) DW_CFA_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_CFA_extended = 0x00,
+
+  DW_CFA_lo_user = 0x1c,
+  DW_CFA_hi_user = 0x3f
+};
+
+enum Constants {
+  // Children flag
+  DW_CHILDREN_no = 0x00,
+  DW_CHILDREN_yes = 0x01,
+
+  DW_EH_PE_absptr = 0x00,
+  DW_EH_PE_omit = 0xff,
+  DW_EH_PE_uleb128 = 0x01,
+  DW_EH_PE_udata2 = 0x02,
+  DW_EH_PE_udata4 = 0x03,
+  DW_EH_PE_udata8 = 0x04,
+  DW_EH_PE_sleb128 = 0x09,
+  DW_EH_PE_sdata2 = 0x0A,
+  DW_EH_PE_sdata4 = 0x0B,
+  DW_EH_PE_sdata8 = 0x0C,
+  DW_EH_PE_signed = 0x08,
+  DW_EH_PE_pcrel = 0x10,
+  DW_EH_PE_textrel = 0x20,
+  DW_EH_PE_datarel = 0x30,
+  DW_EH_PE_funcrel = 0x40,
+  DW_EH_PE_aligned = 0x50,
+  DW_EH_PE_indirect = 0x80
+};
+
+/// Constants for location lists in DWARF v5.
+enum LocationListEntry : unsigned char {
+  DW_LLE_end_of_list = 0x00,
+  DW_LLE_base_addressx = 0x01,
+  DW_LLE_startx_endx = 0x02,
+  DW_LLE_startx_length = 0x03,
+  DW_LLE_offset_pair = 0x04,
+  DW_LLE_default_location = 0x05,
+  DW_LLE_base_address = 0x06,
+  DW_LLE_start_end = 0x07,
+  DW_LLE_start_length = 0x08
+};
+
+/// Constants for the DW_APPLE_PROPERTY_attributes attribute.
+/// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind!
+enum ApplePropertyAttributes {
+#define HANDLE_DW_APPLE_PROPERTY(ID, NAME) DW_APPLE_PROPERTY_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+};
+
+/// Constants for unit types in DWARF v5.
+enum UnitType : unsigned char {
+#define HANDLE_DW_UT(ID, NAME) DW_UT_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_UT_lo_user = 0x80,
+  DW_UT_hi_user = 0xff
+};
+
+enum Index {
+#define HANDLE_DW_IDX(ID, NAME) DW_IDX_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
+  DW_IDX_lo_user = 0x2000,
+  DW_IDX_hi_user = 0x3fff
+};
+
+inline bool isUnitType(uint8_t UnitType) {
+  switch (UnitType) {
+  case DW_UT_compile:
+  case DW_UT_type:
+  case DW_UT_partial:
+  case DW_UT_skeleton:
+  case DW_UT_split_compile:
+  case DW_UT_split_type:
+    return true;
+  default:
+    return false;
+  }
+}
+
+inline bool isUnitType(dwarf::Tag T) {
+  switch (T) {
+  case DW_TAG_compile_unit:
+  case DW_TAG_type_unit:
+  case DW_TAG_partial_unit:
+  case DW_TAG_skeleton_unit:
+    return true;
+  default:
+    return false;
+  }
+}
+
+// Constants for the DWARF v5 Accelerator Table Proposal
+enum AcceleratorTable {
+  // Data layout descriptors.
+  DW_ATOM_null = 0u,       ///  Marker as the end of a list of atoms.
+  DW_ATOM_die_offset = 1u, // DIE offset in the debug_info section.
+  DW_ATOM_cu_offset = 2u, // Offset of the compile unit header that contains the
+                          // item in question.
+  DW_ATOM_die_tag = 3u,   // A tag entry.
+  DW_ATOM_type_flags = 4u, // Set of flags for a type.
+
+  DW_ATOM_type_type_flags = 5u, // Dsymutil type extension.
+  DW_ATOM_qual_name_hash = 6u,  // Dsymutil qualified hash extension.
+
+  // DW_ATOM_type_flags values.
+
+  // Always set for C++, only set for ObjC if this is the @implementation for a
+  // class.
+  DW_FLAG_type_implementation = 2u,
+
+  // Hash functions.
+
+  // Daniel J. Bernstein hash.
+  DW_hash_function_djb = 0u
+};
+
+// Constants for the GNU pubnames/pubtypes extensions supporting gdb index.
+enum GDBIndexEntryKind {
+  GIEK_NONE,
+  GIEK_TYPE,
+  GIEK_VARIABLE,
+  GIEK_FUNCTION,
+  GIEK_OTHER,
+  GIEK_UNUSED5,
+  GIEK_UNUSED6,
+  GIEK_UNUSED7
+};
+
+enum GDBIndexEntryLinkage { GIEL_EXTERNAL, GIEL_STATIC };
+
+/// \defgroup DwarfConstantsDumping Dwarf constants dumping functions
+///
+/// All these functions map their argument's value back to the
+/// corresponding enumerator name or return an empty StringRef if the value
+/// isn't known.
+///
+/// @{
+StringRef TagString(unsigned Tag);
+StringRef ChildrenString(unsigned Children);
+StringRef AttributeString(unsigned Attribute);
+StringRef FormEncodingString(unsigned Encoding);
+StringRef OperationEncodingString(unsigned Encoding);
+StringRef AttributeEncodingString(unsigned Encoding);
+StringRef DecimalSignString(unsigned Sign);
+StringRef EndianityString(unsigned Endian);
+StringRef AccessibilityString(unsigned Access);
+StringRef VisibilityString(unsigned Visibility);
+StringRef VirtualityString(unsigned Virtuality);
+StringRef LanguageString(unsigned Language);
+StringRef CaseString(unsigned Case);
+StringRef ConventionString(unsigned Convention);
+StringRef InlineCodeString(unsigned Code);
+StringRef ArrayOrderString(unsigned Order);
+StringRef LNStandardString(unsigned Standard);
+StringRef LNExtendedString(unsigned Encoding);
+StringRef MacinfoString(unsigned Encoding);
+StringRef RangeListEncodingString(unsigned Encoding);
+StringRef CallFrameString(unsigned Encoding);
+StringRef ApplePropertyString(unsigned);
+StringRef UnitTypeString(unsigned);
+StringRef AtomTypeString(unsigned Atom);
+StringRef GDBIndexEntryKindString(GDBIndexEntryKind Kind);
+StringRef GDBIndexEntryLinkageString(GDBIndexEntryLinkage Linkage);
+StringRef IndexString(unsigned Idx);
+/// @}
+
+/// \defgroup DwarfConstantsParsing Dwarf constants parsing functions
+///
+/// These functions map their strings back to the corresponding enumeration
+/// value or return 0 if there is none, except for these exceptions:
+///
+/// \li \a getTag() returns \a DW_TAG_invalid on invalid input.
+/// \li \a getVirtuality() returns \a DW_VIRTUALITY_invalid on invalid input.
+/// \li \a getMacinfo() returns \a DW_MACINFO_invalid on invalid input.
+///
+/// @{
+unsigned getTag(StringRef TagString);
+unsigned getOperationEncoding(StringRef OperationEncodingString);
+unsigned getVirtuality(StringRef VirtualityString);
+unsigned getLanguage(StringRef LanguageString);
+unsigned getCallingConvention(StringRef LanguageString);
+unsigned getAttributeEncoding(StringRef EncodingString);
+unsigned getMacinfo(StringRef MacinfoString);
+/// @}
+
+/// \defgroup DwarfConstantsVersioning Dwarf version for constants
+///
+/// For constants defined by DWARF, returns the DWARF version when the constant
+/// was first defined. For vendor extensions, if there is a version-related
+/// policy for when to emit it, returns a version number for that policy.
+/// Otherwise returns 0.
+///
+/// @{
+unsigned TagVersion(Tag T);
+unsigned AttributeVersion(Attribute A);
+unsigned FormVersion(Form F);
+unsigned OperationVersion(LocationAtom O);
+unsigned AttributeEncodingVersion(TypeKind E);
+unsigned LanguageVersion(SourceLanguage L);
+/// @}
+
+/// \defgroup DwarfConstantsVendor Dwarf "vendor" for constants
+///
+/// These functions return an identifier describing "who" defined the constant,
+/// either the DWARF standard itself or the vendor who defined the extension.
+///
+/// @{
+unsigned TagVendor(Tag T);
+unsigned AttributeVendor(Attribute A);
+unsigned FormVendor(Form F);
+unsigned OperationVendor(LocationAtom O);
+unsigned AttributeEncodingVendor(TypeKind E);
+unsigned LanguageVendor(SourceLanguage L);
+/// @}
+
+/// A helper struct providing information about the byte size of DW_FORM
+/// values that vary in size depending on the DWARF version, address byte
+/// size, or DWARF32/DWARF64.
+struct FormParams {
+  uint16_t Version;
+  uint8_t AddrSize;
+  DwarfFormat Format;
+
+  /// The definition of the size of form DW_FORM_ref_addr depends on the
+  /// version. In DWARF v2 it's the size of an address; after that, it's the
+  /// size of a reference.
+  uint8_t getRefAddrByteSize() const {
+    if (Version == 2)
+      return AddrSize;
+    return getDwarfOffsetByteSize();
+  }
+
+  /// The size of a reference is determined by the DWARF 32/64-bit format.
+  uint8_t getDwarfOffsetByteSize() const {
+    switch (Format) {
+    case DwarfFormat::DWARF32:
+      return 4;
+    case DwarfFormat::DWARF64:
+      return 8;
+    }
+    llvm_unreachable("Invalid Format value");
+  }
+
+  explicit operator bool() const { return Version && AddrSize; }
+};
+
+/// Get the fixed byte size for a given form.
+///
+/// If the form has a fixed byte size, then an Optional with a value will be
+/// returned. If the form is always encoded using a variable length storage
+/// format (ULEB or SLEB numbers or blocks) then None will be returned.
+///
+/// \param Form DWARF form to get the fixed byte size for.
+/// \param Params DWARF parameters to help interpret forms.
+/// \returns Optional<uint8_t> value with the fixed byte size or None if
+/// \p Form doesn't have a fixed byte size.
+Optional<uint8_t> getFixedFormByteSize(dwarf::Form Form, FormParams Params);
+
+/// Tells whether the specified form is defined in the specified version,
+/// or is an extension if extensions are allowed.
+bool isValidFormForVersion(Form F, unsigned Version, bool ExtensionsOk = true);
+
+/// Returns the symbolic string representing Val when used as a value
+/// for attribute Attr.
+StringRef AttributeValueString(uint16_t Attr, unsigned Val);
+
+/// Describes an entry of the various gnu_pub* debug sections.
+///
+/// The gnu_pub* kind looks like:
+///
+/// 0-3  reserved
+/// 4-6  symbol kind
+/// 7    0 == global, 1 == static
+///
+/// A gdb_index descriptor includes the above kind, shifted 24 bits up with the
+/// offset of the cu within the debug_info section stored in those 24 bits.
+struct PubIndexEntryDescriptor {
+  GDBIndexEntryKind Kind;
+  GDBIndexEntryLinkage Linkage;
+  PubIndexEntryDescriptor(GDBIndexEntryKind Kind, GDBIndexEntryLinkage Linkage)
+      : Kind(Kind), Linkage(Linkage) {}
+  /* implicit */ PubIndexEntryDescriptor(GDBIndexEntryKind Kind)
+      : Kind(Kind), Linkage(GIEL_EXTERNAL) {}
+  explicit PubIndexEntryDescriptor(uint8_t Value)
+      : Kind(
+            static_cast<GDBIndexEntryKind>((Value & KIND_MASK) >> KIND_OFFSET)),
+        Linkage(static_cast<GDBIndexEntryLinkage>((Value & LINKAGE_MASK) >>
+                                                  LINKAGE_OFFSET)) {}
+  uint8_t toBits() const {
+    return Kind << KIND_OFFSET | Linkage << LINKAGE_OFFSET;
+  }
+
+private:
+  enum {
+    KIND_OFFSET = 4,
+    KIND_MASK = 7 << KIND_OFFSET,
+    LINKAGE_OFFSET = 7,
+    LINKAGE_MASK = 1 << LINKAGE_OFFSET
+  };
+};
+
+template <typename Enum> struct EnumTraits : public std::false_type {};
+
+template <> struct EnumTraits<Attribute> : public std::true_type {
+  static constexpr char Type[3] = "AT";
+  static constexpr StringRef (*StringFn)(unsigned) = &AttributeString;
+};
+
+template <> struct EnumTraits<Form> : public std::true_type {
+  static constexpr char Type[5] = "FORM";
+  static constexpr StringRef (*StringFn)(unsigned) = &FormEncodingString;
+};
+
+template <> struct EnumTraits<Index> : public std::true_type {
+  static constexpr char Type[4] = "IDX";
+  static constexpr StringRef (*StringFn)(unsigned) = &IndexString;
+};
+
+template <> struct EnumTraits<Tag> : public std::true_type {
+  static constexpr char Type[4] = "TAG";
+  static constexpr StringRef (*StringFn)(unsigned) = &TagString;
+};
+} // End of namespace dwarf
+
+/// Dwarf constants format_provider
+///
+/// Specialization of the format_provider template for dwarf enums. Unlike the
+/// dumping functions above, these format unknown enumerator values as
+/// DW_TYPE_unknown_1234 (e.g. DW_TAG_unknown_ffff).
+template <typename Enum>
+struct format_provider<
+    Enum, typename std::enable_if<dwarf::EnumTraits<Enum>::value>::type> {
+  static void format(const Enum &E, raw_ostream &OS, StringRef Style) {
+    StringRef Str = dwarf::EnumTraits<Enum>::StringFn(E);
+    if (Str.empty()) {
+      OS << "DW_" << dwarf::EnumTraits<Enum>::Type << "_unknown_"
+         << llvm::format("%x", E);
+    } else
+      OS << Str;
+  }
+};
+} // End of namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/DynamicTags.def b/linux-x64/clang/include/llvm/BinaryFormat/DynamicTags.def
new file mode 100644
index 0000000..c39f38a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/DynamicTags.def
@@ -0,0 +1,187 @@
+#ifndef DYNAMIC_TAG
+#error "DYNAMIC_TAG must be defined"
+#endif
+
+// Add separate macros for the architecture specific tags and the markers
+// such as DT_HIOS, etc. to allow using this file to in other contexts.
+// For example we can use it to generate a stringification switch statement.
+
+#ifndef HEXAGON_DYNAMIC_TAG
+#define HEXAGON_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#define HEXAGON_DYNAMIC_TAG_DEFINED
+#endif
+
+#ifndef MIPS_DYNAMIC_TAG
+#define MIPS_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#define MIPS_DYNAMIC_TAG_DEFINED
+#endif
+
+#ifndef DYNAMIC_TAG_MARKER
+#define DYNAMIC_TAG_MARKER(name, value) DYNAMIC_TAG(name, value)
+#define DYNAMIC_TAG_MARKER_DEFINED
+#endif
+
+DYNAMIC_TAG(NULL, 0)        // Marks end of dynamic array.
+DYNAMIC_TAG(NEEDED, 1)      // String table offset of needed library.
+DYNAMIC_TAG(PLTRELSZ, 2)    // Size of relocation entries in PLT.
+DYNAMIC_TAG(PLTGOT, 3)      // Address associated with linkage table.
+DYNAMIC_TAG(HASH, 4)        // Address of symbolic hash table.
+DYNAMIC_TAG(STRTAB, 5)      // Address of dynamic string table.
+DYNAMIC_TAG(SYMTAB, 6)      // Address of dynamic symbol table.
+DYNAMIC_TAG(RELA, 7)        // Address of relocation table (Rela entries).
+DYNAMIC_TAG(RELASZ, 8)      // Size of Rela relocation table.
+DYNAMIC_TAG(RELAENT, 9)     // Size of a Rela relocation entry.
+DYNAMIC_TAG(STRSZ, 10)      // Total size of the string table.
+DYNAMIC_TAG(SYMENT, 11)     // Size of a symbol table entry.
+DYNAMIC_TAG(INIT, 12)       // Address of initialization function.
+DYNAMIC_TAG(FINI, 13)       // Address of termination function.
+DYNAMIC_TAG(SONAME, 14)     // String table offset of a shared objects name.
+DYNAMIC_TAG(RPATH, 15)      // String table offset of library search path.
+DYNAMIC_TAG(SYMBOLIC, 16)   // Changes symbol resolution algorithm.
+DYNAMIC_TAG(REL, 17)        // Address of relocation table (Rel entries).
+DYNAMIC_TAG(RELSZ, 18)      // Size of Rel relocation table.
+DYNAMIC_TAG(RELENT, 19)     // Size of a Rel relocation entry.
+DYNAMIC_TAG(PLTREL, 20)     // Type of relocation entry used for linking.
+DYNAMIC_TAG(DEBUG, 21)      // Reserved for debugger.
+DYNAMIC_TAG(TEXTREL, 22)    // Relocations exist for non-writable segments.
+DYNAMIC_TAG(JMPREL, 23)     // Address of relocations associated with PLT.
+DYNAMIC_TAG(BIND_NOW, 24)   // Process all relocations before execution.
+DYNAMIC_TAG(INIT_ARRAY, 25) // Pointer to array of initialization functions.
+DYNAMIC_TAG(FINI_ARRAY, 26) // Pointer to array of termination functions.
+DYNAMIC_TAG(INIT_ARRAYSZ, 27) // Size of DT_INIT_ARRAY.
+DYNAMIC_TAG(FINI_ARRAYSZ, 28) // Size of DT_FINI_ARRAY.
+DYNAMIC_TAG(RUNPATH, 29)      // String table offset of lib search path.
+DYNAMIC_TAG(FLAGS, 30)        // Flags.
+DYNAMIC_TAG_MARKER(ENCODING, 32) // Values from here to DT_LOOS follow the rules
+                                 // for the interpretation of the d_un union.
+
+DYNAMIC_TAG(PREINIT_ARRAY, 32)   // Pointer to array of preinit functions.
+DYNAMIC_TAG(PREINIT_ARRAYSZ, 33) // Size of the DT_PREINIT_ARRAY array.
+
+DYNAMIC_TAG_MARKER(LOOS, 0x60000000)   // Start of environment specific tags.
+DYNAMIC_TAG_MARKER(HIOS, 0x6FFFFFFF)   // End of environment specific tags.
+DYNAMIC_TAG_MARKER(LOPROC, 0x70000000) // Start of processor specific tags.
+DYNAMIC_TAG_MARKER(HIPROC, 0x7FFFFFFF) // End of processor specific tags.
+
+// Android packed relocation section tags.
+// https://android.googlesource.com/platform/bionic/+/6f12bfece5dcc01325e0abba56a46b1bcf991c69/tools/relocation_packer/src/elf_file.cc#31
+DYNAMIC_TAG(ANDROID_REL, 0x6000000F)
+DYNAMIC_TAG(ANDROID_RELSZ, 0x60000010)
+DYNAMIC_TAG(ANDROID_RELA, 0x60000011)
+DYNAMIC_TAG(ANDROID_RELASZ, 0x60000012)
+
+DYNAMIC_TAG(GNU_HASH, 0x6FFFFEF5)    // Reference to the GNU hash table.
+DYNAMIC_TAG(TLSDESC_PLT, 0x6FFFFEF6) // Location of PLT entry for TLS
+                                     // descriptor resolver calls.
+DYNAMIC_TAG(TLSDESC_GOT, 0x6FFFFEF7) // Location of GOT entry used by TLS
+                                     // descriptor resolver PLT entry.
+DYNAMIC_TAG(RELACOUNT, 0x6FFFFFF9)   // ELF32_Rela count.
+DYNAMIC_TAG(RELCOUNT, 0x6FFFFFFA)    // ELF32_Rel count.
+
+DYNAMIC_TAG(FLAGS_1, 0X6FFFFFFB) // Flags_1.
+
+DYNAMIC_TAG(VERSYM, 0x6FFFFFF0)     // The address of .gnu.version section.
+DYNAMIC_TAG(VERDEF, 0X6FFFFFFC)     // The address of the version definition
+                                    // table.
+DYNAMIC_TAG(VERDEFNUM, 0X6FFFFFFD)  // The number of entries in DT_VERDEF.
+DYNAMIC_TAG(VERNEED, 0X6FFFFFFE)    // The address of the version dependency
+                                    // table.
+DYNAMIC_TAG(VERNEEDNUM, 0X6FFFFFFF) // The number of entries in DT_VERNEED.
+
+// Hexagon specific dynamic table entries
+HEXAGON_DYNAMIC_TAG(HEXAGON_SYMSZ, 0x70000000)
+HEXAGON_DYNAMIC_TAG(HEXAGON_VER, 0x70000001)
+HEXAGON_DYNAMIC_TAG(HEXAGON_PLT, 0x70000002)
+
+// Mips specific dynamic table entry tags.
+
+MIPS_DYNAMIC_TAG(MIPS_RLD_VERSION, 0x70000001)  // 32 bit version number for
+                                                // runtime linker interface.
+MIPS_DYNAMIC_TAG(MIPS_TIME_STAMP, 0x70000002)   // Time stamp.
+MIPS_DYNAMIC_TAG(MIPS_ICHECKSUM, 0x70000003)    // Checksum of external strings
+                                                // and common sizes.
+MIPS_DYNAMIC_TAG(MIPS_IVERSION, 0x70000004)     // Index of version string
+                                                // in string table.
+MIPS_DYNAMIC_TAG(MIPS_FLAGS, 0x70000005)        // 32 bits of flags.
+MIPS_DYNAMIC_TAG(MIPS_BASE_ADDRESS, 0x70000006) // Base address of the segment.
+MIPS_DYNAMIC_TAG(MIPS_MSYM, 0x70000007)         // Address of .msym section.
+MIPS_DYNAMIC_TAG(MIPS_CONFLICT, 0x70000008)     // Address of .conflict section.
+MIPS_DYNAMIC_TAG(MIPS_LIBLIST, 0x70000009)      // Address of .liblist section.
+MIPS_DYNAMIC_TAG(MIPS_LOCAL_GOTNO, 0x7000000a)  // Number of local global offset
+                                                // table entries.
+MIPS_DYNAMIC_TAG(MIPS_CONFLICTNO, 0x7000000b)   // Number of entries
+                                                // in the .conflict section.
+MIPS_DYNAMIC_TAG(MIPS_LIBLISTNO, 0x70000010)    // Number of entries
+                                                // in the .liblist section.
+MIPS_DYNAMIC_TAG(MIPS_SYMTABNO, 0x70000011)     // Number of entries
+                                                // in the .dynsym section.
+MIPS_DYNAMIC_TAG(MIPS_UNREFEXTNO, 0x70000012)   // Index of first external dynamic
+                                                // symbol not referenced locally.
+MIPS_DYNAMIC_TAG(MIPS_GOTSYM, 0x70000013)       // Index of first dynamic symbol
+                                                // in global offset table.
+MIPS_DYNAMIC_TAG(MIPS_HIPAGENO, 0x70000014)     // Number of page table entries
+                                                // in global offset table.
+MIPS_DYNAMIC_TAG(MIPS_RLD_MAP, 0x70000016)      // Address of run time loader map
+                                                // used for debugging.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASS, 0x70000017)    // Delta C++ class definition.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASS_NO, 0x70000018) // Number of entries
+                                                  // in DT_MIPS_DELTA_CLASS.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_INSTANCE, 0x70000019) // Delta C++ class instances.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_INSTANCE_NO, 0x7000001A) // Number of entries
+                                                     // in DT_MIPS_DELTA_INSTANCE.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_RELOC, 0x7000001B)       // Delta relocations.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_RELOC_NO, 0x7000001C)    // Number of entries
+                                                     // in DT_MIPS_DELTA_RELOC.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_SYM, 0x7000001D)         // Delta symbols that Delta
+                                                     // relocations refer to.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_SYM_NO, 0x7000001E)      // Number of entries
+                                                     // in DT_MIPS_DELTA_SYM.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASSSYM, 0x70000020)    // Delta symbols that hold
+                                                     // class declarations.
+MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASSSYM_NO, 0x70000021) // Number of entries
+                                                     // in DT_MIPS_DELTA_CLASSSYM.
+
+MIPS_DYNAMIC_TAG(MIPS_CXX_FLAGS, 0x70000022)         // Flags indicating information
+                                                     // about C++ flavor.
+MIPS_DYNAMIC_TAG(MIPS_PIXIE_INIT, 0x70000023)        // Pixie information.
+MIPS_DYNAMIC_TAG(MIPS_SYMBOL_LIB, 0x70000024)        // Address of .MIPS.symlib
+MIPS_DYNAMIC_TAG(MIPS_LOCALPAGE_GOTIDX, 0x70000025)  // The GOT index of the first PTE
+                                                     // for a segment
+MIPS_DYNAMIC_TAG(MIPS_LOCAL_GOTIDX, 0x70000026)      // The GOT index of the first PTE
+                                                     // for a local symbol
+MIPS_DYNAMIC_TAG(MIPS_HIDDEN_GOTIDX, 0x70000027)     // The GOT index of the first PTE
+                                                     // for a hidden symbol
+MIPS_DYNAMIC_TAG(MIPS_PROTECTED_GOTIDX, 0x70000028)  // The GOT index of the first PTE
+                                                        // for a protected symbol
+MIPS_DYNAMIC_TAG(MIPS_OPTIONS, 0x70000029)               // Address of `.MIPS.options'.
+MIPS_DYNAMIC_TAG(MIPS_INTERFACE, 0x7000002A)             // Address of `.interface'.
+MIPS_DYNAMIC_TAG(MIPS_DYNSTR_ALIGN, 0x7000002B)          // Unknown.
+MIPS_DYNAMIC_TAG(MIPS_INTERFACE_SIZE, 0x7000002C)        // Size of the .interface section.
+MIPS_DYNAMIC_TAG(MIPS_RLD_TEXT_RESOLVE_ADDR, 0x7000002D) // Size of rld_text_resolve
+                                                         // function stored in the GOT.
+MIPS_DYNAMIC_TAG(MIPS_PERF_SUFFIX, 0x7000002E)  // Default suffix of DSO to be added
+                                                // by rld on dlopen() calls.
+MIPS_DYNAMIC_TAG(MIPS_COMPACT_SIZE, 0x7000002F) // Size of compact relocation
+                                                // section (O32).
+MIPS_DYNAMIC_TAG(MIPS_GP_VALUE, 0x70000030)     // GP value for auxiliary GOTs.
+MIPS_DYNAMIC_TAG(MIPS_AUX_DYNAMIC, 0x70000031)  // Address of auxiliary .dynamic.
+MIPS_DYNAMIC_TAG(MIPS_PLTGOT, 0x70000032)       // Address of the base of the PLTGOT.
+MIPS_DYNAMIC_TAG(MIPS_RWPLT, 0x70000034)        // Points to the base
+                                                // of a writable PLT.
+MIPS_DYNAMIC_TAG(MIPS_RLD_MAP_REL, 0x70000035)  // Relative offset of run time loader
+                                                // map, used for debugging.
+
+// Sun machine-independent extensions.
+DYNAMIC_TAG(AUXILIARY, 0x7FFFFFFD) // Shared object to load before self
+DYNAMIC_TAG(FILTER, 0x7FFFFFFF)    // Shared object to get values from
+
+
+#ifdef DYNAMIC_TAG_MARKER_DEFINED
+#undef DYNAMIC_TAG_MARKER
+#endif
+#ifdef MIPS_DYNAMIC_TAG_DEFINED
+#undef MIPS_DYNAMIC_TAG
+#endif
+#ifdef HEXAGON_DYNAMIC_TAG_DEFINED
+#undef HEXAGON_DYNAMIC_TAG
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELF.h b/linux-x64/clang/include/llvm/BinaryFormat/ELF.h
new file mode 100644
index 0000000..4651e51
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELF.h
@@ -0,0 +1,1374 @@
+//===- llvm/BinaryFormat/ELF.h - ELF constants and structures ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains common, non-processor-specific data structures and
+// constants for the ELF file format.
+//
+// The details of the ELF32 bits in this file are largely based on the Tool
+// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
+// Version 1.2, May 1995. The ELF64 stuff is based on ELF-64 Object File Format
+// Version 1.5, Draft 2, May 1998 as well as OpenBSD header files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_ELF_H
+#define LLVM_BINARYFORMAT_ELF_H
+
+#include <cstdint>
+#include <cstring>
+
+namespace llvm {
+namespace ELF {
+
+using Elf32_Addr = uint32_t; // Program address
+using Elf32_Off = uint32_t;  // File offset
+using Elf32_Half = uint16_t;
+using Elf32_Word = uint32_t;
+using Elf32_Sword = int32_t;
+
+using Elf64_Addr = uint64_t;
+using Elf64_Off = uint64_t;
+using Elf64_Half = uint16_t;
+using Elf64_Word = uint32_t;
+using Elf64_Sword = int32_t;
+using Elf64_Xword = uint64_t;
+using Elf64_Sxword = int64_t;
+
+// Object file magic string.
+static const char ElfMagic[] = {0x7f, 'E', 'L', 'F', '\0'};
+
+// e_ident size and indices.
+enum {
+  EI_MAG0 = 0,       // File identification index.
+  EI_MAG1 = 1,       // File identification index.
+  EI_MAG2 = 2,       // File identification index.
+  EI_MAG3 = 3,       // File identification index.
+  EI_CLASS = 4,      // File class.
+  EI_DATA = 5,       // Data encoding.
+  EI_VERSION = 6,    // File version.
+  EI_OSABI = 7,      // OS/ABI identification.
+  EI_ABIVERSION = 8, // ABI version.
+  EI_PAD = 9,        // Start of padding bytes.
+  EI_NIDENT = 16     // Number of bytes in e_ident.
+};
+
+struct Elf32_Ehdr {
+  unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
+  Elf32_Half e_type;                // Type of file (see ET_* below)
+  Elf32_Half e_machine;   // Required architecture for this file (see EM_*)
+  Elf32_Word e_version;   // Must be equal to 1
+  Elf32_Addr e_entry;     // Address to jump to in order to start program
+  Elf32_Off e_phoff;      // Program header table's file offset, in bytes
+  Elf32_Off e_shoff;      // Section header table's file offset, in bytes
+  Elf32_Word e_flags;     // Processor-specific flags
+  Elf32_Half e_ehsize;    // Size of ELF header, in bytes
+  Elf32_Half e_phentsize; // Size of an entry in the program header table
+  Elf32_Half e_phnum;     // Number of entries in the program header table
+  Elf32_Half e_shentsize; // Size of an entry in the section header table
+  Elf32_Half e_shnum;     // Number of entries in the section header table
+  Elf32_Half e_shstrndx;  // Sect hdr table index of sect name string table
+
+  bool checkMagic() const {
+    return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+  }
+
+  unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+  unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
+};
+
+// 64-bit ELF header. Fields are the same as for ELF32, but with different
+// types (see above).
+struct Elf64_Ehdr {
+  unsigned char e_ident[EI_NIDENT];
+  Elf64_Half e_type;
+  Elf64_Half e_machine;
+  Elf64_Word e_version;
+  Elf64_Addr e_entry;
+  Elf64_Off e_phoff;
+  Elf64_Off e_shoff;
+  Elf64_Word e_flags;
+  Elf64_Half e_ehsize;
+  Elf64_Half e_phentsize;
+  Elf64_Half e_phnum;
+  Elf64_Half e_shentsize;
+  Elf64_Half e_shnum;
+  Elf64_Half e_shstrndx;
+
+  bool checkMagic() const {
+    return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+  }
+
+  unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+  unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
+};
+
+// File types
+enum {
+  ET_NONE = 0,        // No file type
+  ET_REL = 1,         // Relocatable file
+  ET_EXEC = 2,        // Executable file
+  ET_DYN = 3,         // Shared object file
+  ET_CORE = 4,        // Core file
+  ET_LOPROC = 0xff00, // Beginning of processor-specific codes
+  ET_HIPROC = 0xffff  // Processor-specific
+};
+
+// Versioning
+enum { EV_NONE = 0, EV_CURRENT = 1 };
+
+// Machine architectures
+// See current registered ELF machine architectures at:
+//    http://www.uxsglobal.com/developers/gabi/latest/ch4.eheader.html
+enum {
+  EM_NONE = 0,           // No machine
+  EM_M32 = 1,            // AT&T WE 32100
+  EM_SPARC = 2,          // SPARC
+  EM_386 = 3,            // Intel 386
+  EM_68K = 4,            // Motorola 68000
+  EM_88K = 5,            // Motorola 88000
+  EM_IAMCU = 6,          // Intel MCU
+  EM_860 = 7,            // Intel 80860
+  EM_MIPS = 8,           // MIPS R3000
+  EM_S370 = 9,           // IBM System/370
+  EM_MIPS_RS3_LE = 10,   // MIPS RS3000 Little-endian
+  EM_PARISC = 15,        // Hewlett-Packard PA-RISC
+  EM_VPP500 = 17,        // Fujitsu VPP500
+  EM_SPARC32PLUS = 18,   // Enhanced instruction set SPARC
+  EM_960 = 19,           // Intel 80960
+  EM_PPC = 20,           // PowerPC
+  EM_PPC64 = 21,         // PowerPC64
+  EM_S390 = 22,          // IBM System/390
+  EM_SPU = 23,           // IBM SPU/SPC
+  EM_V800 = 36,          // NEC V800
+  EM_FR20 = 37,          // Fujitsu FR20
+  EM_RH32 = 38,          // TRW RH-32
+  EM_RCE = 39,           // Motorola RCE
+  EM_ARM = 40,           // ARM
+  EM_ALPHA = 41,         // DEC Alpha
+  EM_SH = 42,            // Hitachi SH
+  EM_SPARCV9 = 43,       // SPARC V9
+  EM_TRICORE = 44,       // Siemens TriCore
+  EM_ARC = 45,           // Argonaut RISC Core
+  EM_H8_300 = 46,        // Hitachi H8/300
+  EM_H8_300H = 47,       // Hitachi H8/300H
+  EM_H8S = 48,           // Hitachi H8S
+  EM_H8_500 = 49,        // Hitachi H8/500
+  EM_IA_64 = 50,         // Intel IA-64 processor architecture
+  EM_MIPS_X = 51,        // Stanford MIPS-X
+  EM_COLDFIRE = 52,      // Motorola ColdFire
+  EM_68HC12 = 53,        // Motorola M68HC12
+  EM_MMA = 54,           // Fujitsu MMA Multimedia Accelerator
+  EM_PCP = 55,           // Siemens PCP
+  EM_NCPU = 56,          // Sony nCPU embedded RISC processor
+  EM_NDR1 = 57,          // Denso NDR1 microprocessor
+  EM_STARCORE = 58,      // Motorola Star*Core processor
+  EM_ME16 = 59,          // Toyota ME16 processor
+  EM_ST100 = 60,         // STMicroelectronics ST100 processor
+  EM_TINYJ = 61,         // Advanced Logic Corp. TinyJ embedded processor family
+  EM_X86_64 = 62,        // AMD x86-64 architecture
+  EM_PDSP = 63,          // Sony DSP Processor
+  EM_PDP10 = 64,         // Digital Equipment Corp. PDP-10
+  EM_PDP11 = 65,         // Digital Equipment Corp. PDP-11
+  EM_FX66 = 66,          // Siemens FX66 microcontroller
+  EM_ST9PLUS = 67,       // STMicroelectronics ST9+ 8/16 bit microcontroller
+  EM_ST7 = 68,           // STMicroelectronics ST7 8-bit microcontroller
+  EM_68HC16 = 69,        // Motorola MC68HC16 Microcontroller
+  EM_68HC11 = 70,        // Motorola MC68HC11 Microcontroller
+  EM_68HC08 = 71,        // Motorola MC68HC08 Microcontroller
+  EM_68HC05 = 72,        // Motorola MC68HC05 Microcontroller
+  EM_SVX = 73,           // Silicon Graphics SVx
+  EM_ST19 = 74,          // STMicroelectronics ST19 8-bit microcontroller
+  EM_VAX = 75,           // Digital VAX
+  EM_CRIS = 76,          // Axis Communications 32-bit embedded processor
+  EM_JAVELIN = 77,       // Infineon Technologies 32-bit embedded processor
+  EM_FIREPATH = 78,      // Element 14 64-bit DSP Processor
+  EM_ZSP = 79,           // LSI Logic 16-bit DSP Processor
+  EM_MMIX = 80,          // Donald Knuth's educational 64-bit processor
+  EM_HUANY = 81,         // Harvard University machine-independent object files
+  EM_PRISM = 82,         // SiTera Prism
+  EM_AVR = 83,           // Atmel AVR 8-bit microcontroller
+  EM_FR30 = 84,          // Fujitsu FR30
+  EM_D10V = 85,          // Mitsubishi D10V
+  EM_D30V = 86,          // Mitsubishi D30V
+  EM_V850 = 87,          // NEC v850
+  EM_M32R = 88,          // Mitsubishi M32R
+  EM_MN10300 = 89,       // Matsushita MN10300
+  EM_MN10200 = 90,       // Matsushita MN10200
+  EM_PJ = 91,            // picoJava
+  EM_OPENRISC = 92,      // OpenRISC 32-bit embedded processor
+  EM_ARC_COMPACT = 93,   // ARC International ARCompact processor (old
+                         // spelling/synonym: EM_ARC_A5)
+  EM_XTENSA = 94,        // Tensilica Xtensa Architecture
+  EM_VIDEOCORE = 95,     // Alphamosaic VideoCore processor
+  EM_TMM_GPP = 96,       // Thompson Multimedia General Purpose Processor
+  EM_NS32K = 97,         // National Semiconductor 32000 series
+  EM_TPC = 98,           // Tenor Network TPC processor
+  EM_SNP1K = 99,         // Trebia SNP 1000 processor
+  EM_ST200 = 100,        // STMicroelectronics (www.st.com) ST200
+  EM_IP2K = 101,         // Ubicom IP2xxx microcontroller family
+  EM_MAX = 102,          // MAX Processor
+  EM_CR = 103,           // National Semiconductor CompactRISC microprocessor
+  EM_F2MC16 = 104,       // Fujitsu F2MC16
+  EM_MSP430 = 105,       // Texas Instruments embedded microcontroller msp430
+  EM_BLACKFIN = 106,     // Analog Devices Blackfin (DSP) processor
+  EM_SE_C33 = 107,       // S1C33 Family of Seiko Epson processors
+  EM_SEP = 108,          // Sharp embedded microprocessor
+  EM_ARCA = 109,         // Arca RISC Microprocessor
+  EM_UNICORE = 110,      // Microprocessor series from PKU-Unity Ltd. and MPRC
+                         // of Peking University
+  EM_EXCESS = 111,       // eXcess: 16/32/64-bit configurable embedded CPU
+  EM_DXP = 112,          // Icera Semiconductor Inc. Deep Execution Processor
+  EM_ALTERA_NIOS2 = 113, // Altera Nios II soft-core processor
+  EM_CRX = 114,          // National Semiconductor CompactRISC CRX
+  EM_XGATE = 115,        // Motorola XGATE embedded processor
+  EM_C166 = 116,         // Infineon C16x/XC16x processor
+  EM_M16C = 117,         // Renesas M16C series microprocessors
+  EM_DSPIC30F = 118,     // Microchip Technology dsPIC30F Digital Signal
+                         // Controller
+  EM_CE = 119,           // Freescale Communication Engine RISC core
+  EM_M32C = 120,         // Renesas M32C series microprocessors
+  EM_TSK3000 = 131,      // Altium TSK3000 core
+  EM_RS08 = 132,         // Freescale RS08 embedded processor
+  EM_SHARC = 133,        // Analog Devices SHARC family of 32-bit DSP
+                         // processors
+  EM_ECOG2 = 134,        // Cyan Technology eCOG2 microprocessor
+  EM_SCORE7 = 135,       // Sunplus S+core7 RISC processor
+  EM_DSP24 = 136,        // New Japan Radio (NJR) 24-bit DSP Processor
+  EM_VIDEOCORE3 = 137,   // Broadcom VideoCore III processor
+  EM_LATTICEMICO32 = 138, // RISC processor for Lattice FPGA architecture
+  EM_SE_C17 = 139,        // Seiko Epson C17 family
+  EM_TI_C6000 = 140,      // The Texas Instruments TMS320C6000 DSP family
+  EM_TI_C2000 = 141,      // The Texas Instruments TMS320C2000 DSP family
+  EM_TI_C5500 = 142,      // The Texas Instruments TMS320C55x DSP family
+  EM_MMDSP_PLUS = 160,    // STMicroelectronics 64bit VLIW Data Signal Processor
+  EM_CYPRESS_M8C = 161,   // Cypress M8C microprocessor
+  EM_R32C = 162,          // Renesas R32C series microprocessors
+  EM_TRIMEDIA = 163,      // NXP Semiconductors TriMedia architecture family
+  EM_HEXAGON = 164,       // Qualcomm Hexagon processor
+  EM_8051 = 165,          // Intel 8051 and variants
+  EM_STXP7X = 166,        // STMicroelectronics STxP7x family of configurable
+                          // and extensible RISC processors
+  EM_NDS32 = 167,         // Andes Technology compact code size embedded RISC
+                          // processor family
+  EM_ECOG1 = 168,         // Cyan Technology eCOG1X family
+  EM_ECOG1X = 168,        // Cyan Technology eCOG1X family
+  EM_MAXQ30 = 169,        // Dallas Semiconductor MAXQ30 Core Micro-controllers
+  EM_XIMO16 = 170,        // New Japan Radio (NJR) 16-bit DSP Processor
+  EM_MANIK = 171,         // M2000 Reconfigurable RISC Microprocessor
+  EM_CRAYNV2 = 172,       // Cray Inc. NV2 vector architecture
+  EM_RX = 173,            // Renesas RX family
+  EM_METAG = 174,         // Imagination Technologies META processor
+                          // architecture
+  EM_MCST_ELBRUS = 175,   // MCST Elbrus general purpose hardware architecture
+  EM_ECOG16 = 176,        // Cyan Technology eCOG16 family
+  EM_CR16 = 177,          // National Semiconductor CompactRISC CR16 16-bit
+                          // microprocessor
+  EM_ETPU = 178,          // Freescale Extended Time Processing Unit
+  EM_SLE9X = 179,         // Infineon Technologies SLE9X core
+  EM_L10M = 180,          // Intel L10M
+  EM_K10M = 181,          // Intel K10M
+  EM_AARCH64 = 183,       // ARM AArch64
+  EM_AVR32 = 185,         // Atmel Corporation 32-bit microprocessor family
+  EM_STM8 = 186,          // STMicroeletronics STM8 8-bit microcontroller
+  EM_TILE64 = 187,        // Tilera TILE64 multicore architecture family
+  EM_TILEPRO = 188,       // Tilera TILEPro multicore architecture family
+  EM_CUDA = 190,          // NVIDIA CUDA architecture
+  EM_TILEGX = 191,        // Tilera TILE-Gx multicore architecture family
+  EM_CLOUDSHIELD = 192,   // CloudShield architecture family
+  EM_COREA_1ST = 193,     // KIPO-KAIST Core-A 1st generation processor family
+  EM_COREA_2ND = 194,     // KIPO-KAIST Core-A 2nd generation processor family
+  EM_ARC_COMPACT2 = 195,  // Synopsys ARCompact V2
+  EM_OPEN8 = 196,         // Open8 8-bit RISC soft processor core
+  EM_RL78 = 197,          // Renesas RL78 family
+  EM_VIDEOCORE5 = 198,    // Broadcom VideoCore V processor
+  EM_78KOR = 199,         // Renesas 78KOR family
+  EM_56800EX = 200,       // Freescale 56800EX Digital Signal Controller (DSC)
+  EM_BA1 = 201,           // Beyond BA1 CPU architecture
+  EM_BA2 = 202,           // Beyond BA2 CPU architecture
+  EM_XCORE = 203,         // XMOS xCORE processor family
+  EM_MCHP_PIC = 204,      // Microchip 8-bit PIC(r) family
+  EM_INTEL205 = 205,      // Reserved by Intel
+  EM_INTEL206 = 206,      // Reserved by Intel
+  EM_INTEL207 = 207,      // Reserved by Intel
+  EM_INTEL208 = 208,      // Reserved by Intel
+  EM_INTEL209 = 209,      // Reserved by Intel
+  EM_KM32 = 210,          // KM211 KM32 32-bit processor
+  EM_KMX32 = 211,         // KM211 KMX32 32-bit processor
+  EM_KMX16 = 212,         // KM211 KMX16 16-bit processor
+  EM_KMX8 = 213,          // KM211 KMX8 8-bit processor
+  EM_KVARC = 214,         // KM211 KVARC processor
+  EM_CDP = 215,           // Paneve CDP architecture family
+  EM_COGE = 216,          // Cognitive Smart Memory Processor
+  EM_COOL = 217,          // iCelero CoolEngine
+  EM_NORC = 218,          // Nanoradio Optimized RISC
+  EM_CSR_KALIMBA = 219,   // CSR Kalimba architecture family
+  EM_AMDGPU = 224,        // AMD GPU architecture
+  EM_RISCV = 243,         // RISC-V
+  EM_LANAI = 244,         // Lanai 32-bit processor
+  EM_BPF = 247,           // Linux kernel bpf virtual machine
+
+  // A request has been made to the maintainer of the official registry for
+  // such numbers for an official value for WebAssembly. As soon as one is
+  // allocated, this enum will be updated to use it.
+  EM_WEBASSEMBLY = 0x4157, // WebAssembly architecture
+};
+
+// Object file classes.
+enum {
+  ELFCLASSNONE = 0,
+  ELFCLASS32 = 1, // 32-bit object file
+  ELFCLASS64 = 2  // 64-bit object file
+};
+
+// Object file byte orderings.
+enum {
+  ELFDATANONE = 0, // Invalid data encoding.
+  ELFDATA2LSB = 1, // Little-endian object file
+  ELFDATA2MSB = 2  // Big-endian object file
+};
+
+// OS ABI identification.
+enum {
+  ELFOSABI_NONE = 0,           // UNIX System V ABI
+  ELFOSABI_HPUX = 1,           // HP-UX operating system
+  ELFOSABI_NETBSD = 2,         // NetBSD
+  ELFOSABI_GNU = 3,            // GNU/Linux
+  ELFOSABI_LINUX = 3,          // Historical alias for ELFOSABI_GNU.
+  ELFOSABI_HURD = 4,           // GNU/Hurd
+  ELFOSABI_SOLARIS = 6,        // Solaris
+  ELFOSABI_AIX = 7,            // AIX
+  ELFOSABI_IRIX = 8,           // IRIX
+  ELFOSABI_FREEBSD = 9,        // FreeBSD
+  ELFOSABI_TRU64 = 10,         // TRU64 UNIX
+  ELFOSABI_MODESTO = 11,       // Novell Modesto
+  ELFOSABI_OPENBSD = 12,       // OpenBSD
+  ELFOSABI_OPENVMS = 13,       // OpenVMS
+  ELFOSABI_NSK = 14,           // Hewlett-Packard Non-Stop Kernel
+  ELFOSABI_AROS = 15,          // AROS
+  ELFOSABI_FENIXOS = 16,       // FenixOS
+  ELFOSABI_CLOUDABI = 17,      // Nuxi CloudABI
+  ELFOSABI_FIRST_ARCH = 64,    // First architecture-specific OS ABI
+  ELFOSABI_AMDGPU_HSA = 64,    // AMD HSA runtime
+  ELFOSABI_AMDGPU_PAL = 65,    // AMD PAL runtime
+  ELFOSABI_AMDGPU_MESA3D = 66, // AMD GCN GPUs (GFX6+) for MESA runtime
+  ELFOSABI_ARM = 97,           // ARM
+  ELFOSABI_C6000_ELFABI = 64,  // Bare-metal TMS320C6000
+  ELFOSABI_C6000_LINUX = 65,   // Linux TMS320C6000
+  ELFOSABI_STANDALONE = 255,   // Standalone (embedded) application
+  ELFOSABI_LAST_ARCH = 255     // Last Architecture-specific OS ABI
+};
+
+#define ELF_RELOC(name, value) name = value,
+
+// X86_64 relocations.
+enum {
+#include "ELFRelocs/x86_64.def"
+};
+
+// i386 relocations.
+enum {
+#include "ELFRelocs/i386.def"
+};
+
+// ELF Relocation types for PPC32
+enum {
+#include "ELFRelocs/PowerPC.def"
+};
+
+// Specific e_flags for PPC64
+enum {
+  // e_flags bits specifying ABI:
+  // 1 for original ABI using function descriptors,
+  // 2 for revised ABI without function descriptors,
+  // 0 for unspecified or not using any features affected by the differences.
+  EF_PPC64_ABI = 3
+};
+
+// Special values for the st_other field in the symbol table entry for PPC64.
+enum {
+  STO_PPC64_LOCAL_BIT = 5,
+  STO_PPC64_LOCAL_MASK = (7 << STO_PPC64_LOCAL_BIT)
+};
+static inline int64_t decodePPC64LocalEntryOffset(unsigned Other) {
+  unsigned Val = (Other & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT;
+  return ((1 << Val) >> 2) << 2;
+}
+static inline unsigned encodePPC64LocalEntryOffset(int64_t Offset) {
+  unsigned Val =
+      (Offset >= 4 * 4 ? (Offset >= 8 * 4 ? (Offset >= 16 * 4 ? 6 : 5) : 4)
+                       : (Offset >= 2 * 4 ? 3 : (Offset >= 1 * 4 ? 2 : 0)));
+  return Val << STO_PPC64_LOCAL_BIT;
+}
+
+// ELF Relocation types for PPC64
+enum {
+#include "ELFRelocs/PowerPC64.def"
+};
+
+// ELF Relocation types for AArch64
+enum {
+#include "ELFRelocs/AArch64.def"
+};
+
+// ARM Specific e_flags
+enum : unsigned {
+  EF_ARM_SOFT_FLOAT = 0x00000200U,
+  EF_ARM_VFP_FLOAT = 0x00000400U,
+  EF_ARM_EABI_UNKNOWN = 0x00000000U,
+  EF_ARM_EABI_VER1 = 0x01000000U,
+  EF_ARM_EABI_VER2 = 0x02000000U,
+  EF_ARM_EABI_VER3 = 0x03000000U,
+  EF_ARM_EABI_VER4 = 0x04000000U,
+  EF_ARM_EABI_VER5 = 0x05000000U,
+  EF_ARM_EABIMASK = 0xFF000000U
+};
+
+// ELF Relocation types for ARM
+enum {
+#include "ELFRelocs/ARM.def"
+};
+
+// ARC Specific e_flags
+enum : unsigned {
+  EF_ARC_MACH_MSK = 0x000000ff,
+  EF_ARC_OSABI_MSK = 0x00000f00,
+  E_ARC_MACH_ARC600 = 0x00000002,
+  E_ARC_MACH_ARC601 = 0x00000004,
+  E_ARC_MACH_ARC700 = 0x00000003,
+  EF_ARC_CPU_ARCV2EM = 0x00000005,
+  EF_ARC_CPU_ARCV2HS = 0x00000006,
+  E_ARC_OSABI_ORIG = 0x00000000,
+  E_ARC_OSABI_V2 = 0x00000200,
+  E_ARC_OSABI_V3 = 0x00000300,
+  E_ARC_OSABI_V4 = 0x00000400,
+  EF_ARC_PIC = 0x00000100
+};
+
+// ELF Relocation types for ARC
+enum {
+#include "ELFRelocs/ARC.def"
+};
+
+// AVR specific e_flags
+enum : unsigned {
+  EF_AVR_ARCH_AVR1 = 1,
+  EF_AVR_ARCH_AVR2 = 2,
+  EF_AVR_ARCH_AVR25 = 25,
+  EF_AVR_ARCH_AVR3 = 3,
+  EF_AVR_ARCH_AVR31 = 31,
+  EF_AVR_ARCH_AVR35 = 35,
+  EF_AVR_ARCH_AVR4 = 4,
+  EF_AVR_ARCH_AVR5 = 5,
+  EF_AVR_ARCH_AVR51 = 51,
+  EF_AVR_ARCH_AVR6 = 6,
+  EF_AVR_ARCH_AVRTINY = 100,
+  EF_AVR_ARCH_XMEGA1 = 101,
+  EF_AVR_ARCH_XMEGA2 = 102,
+  EF_AVR_ARCH_XMEGA3 = 103,
+  EF_AVR_ARCH_XMEGA4 = 104,
+  EF_AVR_ARCH_XMEGA5 = 105,
+  EF_AVR_ARCH_XMEGA6 = 106,
+  EF_AVR_ARCH_XMEGA7 = 107
+};
+
+// ELF Relocation types for AVR
+enum {
+#include "ELFRelocs/AVR.def"
+};
+
+// Mips Specific e_flags
+enum : unsigned {
+  EF_MIPS_NOREORDER = 0x00000001, // Don't reorder instructions
+  EF_MIPS_PIC = 0x00000002,       // Position independent code
+  EF_MIPS_CPIC = 0x00000004,      // Call object with Position independent code
+  EF_MIPS_ABI2 = 0x00000020,      // File uses N32 ABI
+  EF_MIPS_32BITMODE = 0x00000100, // Code compiled for a 64-bit machine
+                                  // in 32-bit mode
+  EF_MIPS_FP64 = 0x00000200,      // Code compiled for a 32-bit machine
+                                  // but uses 64-bit FP registers
+  EF_MIPS_NAN2008 = 0x00000400,   // Uses IEE 754-2008 NaN encoding
+
+  // ABI flags
+  EF_MIPS_ABI_O32 = 0x00001000, // This file follows the first MIPS 32 bit ABI
+  EF_MIPS_ABI_O64 = 0x00002000, // O32 ABI extended for 64-bit architecture.
+  EF_MIPS_ABI_EABI32 = 0x00003000, // EABI in 32 bit mode.
+  EF_MIPS_ABI_EABI64 = 0x00004000, // EABI in 64 bit mode.
+  EF_MIPS_ABI = 0x0000f000,        // Mask for selecting EF_MIPS_ABI_ variant.
+
+  // MIPS machine variant
+  EF_MIPS_MACH_NONE = 0x00000000,    // A standard MIPS implementation.
+  EF_MIPS_MACH_3900 = 0x00810000,    // Toshiba R3900
+  EF_MIPS_MACH_4010 = 0x00820000,    // LSI R4010
+  EF_MIPS_MACH_4100 = 0x00830000,    // NEC VR4100
+  EF_MIPS_MACH_4650 = 0x00850000,    // MIPS R4650
+  EF_MIPS_MACH_4120 = 0x00870000,    // NEC VR4120
+  EF_MIPS_MACH_4111 = 0x00880000,    // NEC VR4111/VR4181
+  EF_MIPS_MACH_SB1 = 0x008a0000,     // Broadcom SB-1
+  EF_MIPS_MACH_OCTEON = 0x008b0000,  // Cavium Networks Octeon
+  EF_MIPS_MACH_XLR = 0x008c0000,     // RMI Xlr
+  EF_MIPS_MACH_OCTEON2 = 0x008d0000, // Cavium Networks Octeon2
+  EF_MIPS_MACH_OCTEON3 = 0x008e0000, // Cavium Networks Octeon3
+  EF_MIPS_MACH_5400 = 0x00910000,    // NEC VR5400
+  EF_MIPS_MACH_5900 = 0x00920000,    // MIPS R5900
+  EF_MIPS_MACH_5500 = 0x00980000,    // NEC VR5500
+  EF_MIPS_MACH_9000 = 0x00990000,    // Unknown
+  EF_MIPS_MACH_LS2E = 0x00a00000,    // ST Microelectronics Loongson 2E
+  EF_MIPS_MACH_LS2F = 0x00a10000,    // ST Microelectronics Loongson 2F
+  EF_MIPS_MACH_LS3A = 0x00a20000,    // Loongson 3A
+  EF_MIPS_MACH = 0x00ff0000,         // EF_MIPS_MACH_xxx selection mask
+
+  // ARCH_ASE
+  EF_MIPS_MICROMIPS = 0x02000000,     // microMIPS
+  EF_MIPS_ARCH_ASE_M16 = 0x04000000,  // Has Mips-16 ISA extensions
+  EF_MIPS_ARCH_ASE_MDMX = 0x08000000, // Has MDMX multimedia extensions
+  EF_MIPS_ARCH_ASE = 0x0f000000,      // Mask for EF_MIPS_ARCH_ASE_xxx flags
+
+  // ARCH
+  EF_MIPS_ARCH_1 = 0x00000000,    // MIPS1 instruction set
+  EF_MIPS_ARCH_2 = 0x10000000,    // MIPS2 instruction set
+  EF_MIPS_ARCH_3 = 0x20000000,    // MIPS3 instruction set
+  EF_MIPS_ARCH_4 = 0x30000000,    // MIPS4 instruction set
+  EF_MIPS_ARCH_5 = 0x40000000,    // MIPS5 instruction set
+  EF_MIPS_ARCH_32 = 0x50000000,   // MIPS32 instruction set per linux not elf.h
+  EF_MIPS_ARCH_64 = 0x60000000,   // MIPS64 instruction set per linux not elf.h
+  EF_MIPS_ARCH_32R2 = 0x70000000, // mips32r2, mips32r3, mips32r5
+  EF_MIPS_ARCH_64R2 = 0x80000000, // mips64r2, mips64r3, mips64r5
+  EF_MIPS_ARCH_32R6 = 0x90000000, // mips32r6
+  EF_MIPS_ARCH_64R6 = 0xa0000000, // mips64r6
+  EF_MIPS_ARCH = 0xf0000000       // Mask for applying EF_MIPS_ARCH_ variant
+};
+
+// ELF Relocation types for Mips
+enum {
+#include "ELFRelocs/Mips.def"
+};
+
+// Special values for the st_other field in the symbol table entry for MIPS.
+enum {
+  STO_MIPS_OPTIONAL = 0x04,  // Symbol whose definition is optional
+  STO_MIPS_PLT = 0x08,       // PLT entry related dynamic table record
+  STO_MIPS_PIC = 0x20,       // PIC func in an object mixes PIC/non-PIC
+  STO_MIPS_MICROMIPS = 0x80, // MIPS Specific ISA for MicroMips
+  STO_MIPS_MIPS16 = 0xf0     // MIPS Specific ISA for Mips16
+};
+
+// .MIPS.options section descriptor kinds
+enum {
+  ODK_NULL = 0,       // Undefined
+  ODK_REGINFO = 1,    // Register usage information
+  ODK_EXCEPTIONS = 2, // Exception processing options
+  ODK_PAD = 3,        // Section padding options
+  ODK_HWPATCH = 4,    // Hardware patches applied
+  ODK_FILL = 5,       // Linker fill value
+  ODK_TAGS = 6,       // Space for tool identification
+  ODK_HWAND = 7,      // Hardware AND patches applied
+  ODK_HWOR = 8,       // Hardware OR patches applied
+  ODK_GP_GROUP = 9,   // GP group to use for text/data sections
+  ODK_IDENT = 10,     // ID information
+  ODK_PAGESIZE = 11   // Page size information
+};
+
+// Hexagon-specific e_flags
+enum {
+  // Object processor version flags, bits[11:0]
+  EF_HEXAGON_MACH_V2 = 0x00000001,  // Hexagon V2
+  EF_HEXAGON_MACH_V3 = 0x00000002,  // Hexagon V3
+  EF_HEXAGON_MACH_V4 = 0x00000003,  // Hexagon V4
+  EF_HEXAGON_MACH_V5 = 0x00000004,  // Hexagon V5
+  EF_HEXAGON_MACH_V55 = 0x00000005, // Hexagon V55
+  EF_HEXAGON_MACH_V60 = 0x00000060, // Hexagon V60
+  EF_HEXAGON_MACH_V62 = 0x00000062, // Hexagon V62
+  EF_HEXAGON_MACH_V65 = 0x00000065, // Hexagon V65
+
+  // Highest ISA version flags
+  EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
+                                    // of e_flags
+  EF_HEXAGON_ISA_V2 = 0x00000010,   // Hexagon V2 ISA
+  EF_HEXAGON_ISA_V3 = 0x00000020,   // Hexagon V3 ISA
+  EF_HEXAGON_ISA_V4 = 0x00000030,   // Hexagon V4 ISA
+  EF_HEXAGON_ISA_V5 = 0x00000040,   // Hexagon V5 ISA
+  EF_HEXAGON_ISA_V55 = 0x00000050,  // Hexagon V55 ISA
+  EF_HEXAGON_ISA_V60 = 0x00000060,  // Hexagon V60 ISA
+  EF_HEXAGON_ISA_V62 = 0x00000062,  // Hexagon V62 ISA
+  EF_HEXAGON_ISA_V65 = 0x00000065,  // Hexagon V65 ISA
+};
+
+// Hexagon-specific section indexes for common small data
+enum {
+  SHN_HEXAGON_SCOMMON = 0xff00,   // Other access sizes
+  SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
+  SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
+  SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
+  SHN_HEXAGON_SCOMMON_8 = 0xff04  // Double-word-size access
+};
+
+// ELF Relocation types for Hexagon
+enum {
+#include "ELFRelocs/Hexagon.def"
+};
+
+// ELF Relocation type for Lanai.
+enum {
+#include "ELFRelocs/Lanai.def"
+};
+
+// RISCV Specific e_flags
+enum : unsigned {
+  EF_RISCV_RVC = 0x0001,
+  EF_RISCV_FLOAT_ABI = 0x0006,
+  EF_RISCV_FLOAT_ABI_SOFT = 0x0000,
+  EF_RISCV_FLOAT_ABI_SINGLE = 0x0002,
+  EF_RISCV_FLOAT_ABI_DOUBLE = 0x0004,
+  EF_RISCV_FLOAT_ABI_QUAD = 0x0006,
+  EF_RISCV_RVE = 0x0008
+};
+
+// ELF Relocation types for RISC-V
+enum {
+#include "ELFRelocs/RISCV.def"
+};
+
+// ELF Relocation types for S390/zSeries
+enum {
+#include "ELFRelocs/SystemZ.def"
+};
+
+// ELF Relocation type for Sparc.
+enum {
+#include "ELFRelocs/Sparc.def"
+};
+
+// ELF Relocation types for WebAssembly
+enum {
+#include "ELFRelocs/WebAssembly.def"
+};
+
+// AMDGPU specific e_flags.
+enum : unsigned {
+  // Processor selection mask for EF_AMDGPU_MACH_* values.
+  EF_AMDGPU_MACH = 0x0ff,
+
+  // Not specified processor.
+  EF_AMDGPU_MACH_NONE = 0x000,
+
+  // R600-based processors.
+  EF_AMDGPU_MACH_R600_FIRST = 0x001,
+  EF_AMDGPU_MACH_R600_LAST = 0x010,
+  // Radeon HD 2000/3000 Series (R600).
+  EF_AMDGPU_MACH_R600_R600 = 0x001,
+  EF_AMDGPU_MACH_R600_R630 = 0x002,
+  EF_AMDGPU_MACH_R600_RS880 = 0x003,
+  EF_AMDGPU_MACH_R600_RV670 = 0x004,
+  // Radeon HD 4000 Series (R700).
+  EF_AMDGPU_MACH_R600_RV710 = 0x005,
+  EF_AMDGPU_MACH_R600_RV730 = 0x006,
+  EF_AMDGPU_MACH_R600_RV770 = 0x007,
+  // Radeon HD 5000 Series (Evergreen).
+  EF_AMDGPU_MACH_R600_CEDAR = 0x008,
+  EF_AMDGPU_MACH_R600_CYPRESS = 0x009,
+  EF_AMDGPU_MACH_R600_JUNIPER = 0x00a,
+  EF_AMDGPU_MACH_R600_REDWOOD = 0x00b,
+  EF_AMDGPU_MACH_R600_SUMO = 0x00c,
+  // Radeon HD 6000 Series (Northern Islands).
+  EF_AMDGPU_MACH_R600_BARTS = 0x00d,
+  EF_AMDGPU_MACH_R600_CAICOS = 0x00e,
+  EF_AMDGPU_MACH_R600_CAYMAN = 0x00f,
+  EF_AMDGPU_MACH_R600_TURKS = 0x010,
+
+  // Reserved for R600-based processors.
+  EF_AMDGPU_MACH_R600_RESERVED_FIRST = 0x011,
+  EF_AMDGPU_MACH_R600_RESERVED_LAST = 0x01f,
+
+  // AMDGCN-based processors.
+  EF_AMDGPU_MACH_AMDGCN_FIRST = 0x020,
+  EF_AMDGPU_MACH_AMDGCN_LAST = 0x02d,
+  // AMDGCN GFX6.
+  EF_AMDGPU_MACH_AMDGCN_GFX600 = 0x020,
+  EF_AMDGPU_MACH_AMDGCN_GFX601 = 0x021,
+  // AMDGCN GFX7.
+  EF_AMDGPU_MACH_AMDGCN_GFX700 = 0x022,
+  EF_AMDGPU_MACH_AMDGCN_GFX701 = 0x023,
+  EF_AMDGPU_MACH_AMDGCN_GFX702 = 0x024,
+  EF_AMDGPU_MACH_AMDGCN_GFX703 = 0x025,
+  EF_AMDGPU_MACH_AMDGCN_GFX704 = 0x026,
+  // AMDGCN GFX8.
+  EF_AMDGPU_MACH_AMDGCN_GFX801 = 0x028,
+  EF_AMDGPU_MACH_AMDGCN_GFX802 = 0x029,
+  EF_AMDGPU_MACH_AMDGCN_GFX803 = 0x02a,
+  EF_AMDGPU_MACH_AMDGCN_GFX810 = 0x02b,
+  // AMDGCN GFX9.
+  EF_AMDGPU_MACH_AMDGCN_GFX900 = 0x02c,
+  EF_AMDGPU_MACH_AMDGCN_GFX902 = 0x02d,
+
+  // Reserved for AMDGCN-based processors.
+  EF_AMDGPU_MACH_AMDGCN_RESERVED0 = 0x027,
+  EF_AMDGPU_MACH_AMDGCN_RESERVED1 = 0x02e,
+  EF_AMDGPU_MACH_AMDGCN_RESERVED2 = 0x02f,
+  EF_AMDGPU_MACH_AMDGCN_RESERVED3 = 0x030,
+
+  // Indicates if the xnack target feature is enabled for all code contained in
+  // the object.
+  EF_AMDGPU_XNACK = 0x100,
+};
+
+// ELF Relocation types for AMDGPU
+enum {
+#include "ELFRelocs/AMDGPU.def"
+};
+
+// ELF Relocation types for BPF
+enum {
+#include "ELFRelocs/BPF.def"
+};
+
+#undef ELF_RELOC
+
+// Section header.
+struct Elf32_Shdr {
+  Elf32_Word sh_name;      // Section name (index into string table)
+  Elf32_Word sh_type;      // Section type (SHT_*)
+  Elf32_Word sh_flags;     // Section flags (SHF_*)
+  Elf32_Addr sh_addr;      // Address where section is to be loaded
+  Elf32_Off sh_offset;     // File offset of section data, in bytes
+  Elf32_Word sh_size;      // Size of section, in bytes
+  Elf32_Word sh_link;      // Section type-specific header table index link
+  Elf32_Word sh_info;      // Section type-specific extra information
+  Elf32_Word sh_addralign; // Section address alignment
+  Elf32_Word sh_entsize;   // Size of records contained within the section
+};
+
+// Section header for ELF64 - same fields as ELF32, different types.
+struct Elf64_Shdr {
+  Elf64_Word sh_name;
+  Elf64_Word sh_type;
+  Elf64_Xword sh_flags;
+  Elf64_Addr sh_addr;
+  Elf64_Off sh_offset;
+  Elf64_Xword sh_size;
+  Elf64_Word sh_link;
+  Elf64_Word sh_info;
+  Elf64_Xword sh_addralign;
+  Elf64_Xword sh_entsize;
+};
+
+// Special section indices.
+enum {
+  SHN_UNDEF = 0,          // Undefined, missing, irrelevant, or meaningless
+  SHN_LORESERVE = 0xff00, // Lowest reserved index
+  SHN_LOPROC = 0xff00,    // Lowest processor-specific index
+  SHN_HIPROC = 0xff1f,    // Highest processor-specific index
+  SHN_LOOS = 0xff20,      // Lowest operating system-specific index
+  SHN_HIOS = 0xff3f,      // Highest operating system-specific index
+  SHN_ABS = 0xfff1,       // Symbol has absolute value; does not need relocation
+  SHN_COMMON = 0xfff2,    // FORTRAN COMMON or C external global variables
+  SHN_XINDEX = 0xffff,    // Mark that the index is >= SHN_LORESERVE
+  SHN_HIRESERVE = 0xffff  // Highest reserved index
+};
+
+// Section types.
+enum : unsigned {
+  SHT_NULL = 0,                         // No associated section (inactive entry).
+  SHT_PROGBITS = 1,                     // Program-defined contents.
+  SHT_SYMTAB = 2,                       // Symbol table.
+  SHT_STRTAB = 3,                       // String table.
+  SHT_RELA = 4,                         // Relocation entries; explicit addends.
+  SHT_HASH = 5,                         // Symbol hash table.
+  SHT_DYNAMIC = 6,                      // Information for dynamic linking.
+  SHT_NOTE = 7,                         // Information about the file.
+  SHT_NOBITS = 8,                       // Data occupies no space in the file.
+  SHT_REL = 9,                          // Relocation entries; no explicit addends.
+  SHT_SHLIB = 10,                       // Reserved.
+  SHT_DYNSYM = 11,                      // Symbol table.
+  SHT_INIT_ARRAY = 14,                  // Pointers to initialization functions.
+  SHT_FINI_ARRAY = 15,                  // Pointers to termination functions.
+  SHT_PREINIT_ARRAY = 16,               // Pointers to pre-init functions.
+  SHT_GROUP = 17,                       // Section group.
+  SHT_SYMTAB_SHNDX = 18,                // Indices for SHN_XINDEX entries.
+  SHT_LOOS = 0x60000000,                // Lowest operating system-specific type.
+  // Android packed relocation section types.
+  // https://android.googlesource.com/platform/bionic/+/6f12bfece5dcc01325e0abba56a46b1bcf991c69/tools/relocation_packer/src/elf_file.cc#37
+  SHT_ANDROID_REL = 0x60000001,
+  SHT_ANDROID_RELA = 0x60000002,
+  SHT_LLVM_ODRTAB = 0x6fff4c00,         // LLVM ODR table.
+  SHT_LLVM_LINKER_OPTIONS = 0x6fff4c01, // LLVM Linker Options.
+  SHT_GNU_ATTRIBUTES = 0x6ffffff5,      // Object attributes.
+  SHT_GNU_HASH = 0x6ffffff6,            // GNU-style hash table.
+  SHT_GNU_verdef = 0x6ffffffd,          // GNU version definitions.
+  SHT_GNU_verneed = 0x6ffffffe,         // GNU version references.
+  SHT_GNU_versym = 0x6fffffff,          // GNU symbol versions table.
+  SHT_HIOS = 0x6fffffff,                // Highest operating system-specific type.
+  SHT_LOPROC = 0x70000000,              // Lowest processor arch-specific type.
+  // Fixme: All this is duplicated in MCSectionELF. Why??
+  // Exception Index table
+  SHT_ARM_EXIDX = 0x70000001U,
+  // BPABI DLL dynamic linking pre-emption map
+  SHT_ARM_PREEMPTMAP = 0x70000002U,
+  //  Object file compatibility attributes
+  SHT_ARM_ATTRIBUTES = 0x70000003U,
+  SHT_ARM_DEBUGOVERLAY = 0x70000004U,
+  SHT_ARM_OVERLAYSECTION = 0x70000005U,
+  SHT_HEX_ORDERED = 0x70000000,         // Link editor is to sort the entries in
+                                        // this section based on their sizes
+  SHT_X86_64_UNWIND = 0x70000001,       // Unwind information
+
+  SHT_MIPS_REGINFO = 0x70000006,        // Register usage information
+  SHT_MIPS_OPTIONS = 0x7000000d,        // General options
+  SHT_MIPS_DWARF = 0x7000001e,          // DWARF debugging section.
+  SHT_MIPS_ABIFLAGS = 0x7000002a,       // ABI information.
+
+  SHT_HIPROC = 0x7fffffff,              // Highest processor arch-specific type.
+  SHT_LOUSER = 0x80000000,              // Lowest type reserved for applications.
+  SHT_HIUSER = 0xffffffff               // Highest type reserved for applications.
+};
+
+// Section flags.
+enum : unsigned {
+  // Section data should be writable during execution.
+  SHF_WRITE = 0x1,
+
+  // Section occupies memory during program execution.
+  SHF_ALLOC = 0x2,
+
+  // Section contains executable machine instructions.
+  SHF_EXECINSTR = 0x4,
+
+  // The data in this section may be merged.
+  SHF_MERGE = 0x10,
+
+  // The data in this section is null-terminated strings.
+  SHF_STRINGS = 0x20,
+
+  // A field in this section holds a section header table index.
+  SHF_INFO_LINK = 0x40U,
+
+  // Adds special ordering requirements for link editors.
+  SHF_LINK_ORDER = 0x80U,
+
+  // This section requires special OS-specific processing to avoid incorrect
+  // behavior.
+  SHF_OS_NONCONFORMING = 0x100U,
+
+  // This section is a member of a section group.
+  SHF_GROUP = 0x200U,
+
+  // This section holds Thread-Local Storage.
+  SHF_TLS = 0x400U,
+
+  // Identifies a section containing compressed data.
+  SHF_COMPRESSED = 0x800U,
+
+  // This section is excluded from the final executable or shared library.
+  SHF_EXCLUDE = 0x80000000U,
+
+  // Start of target-specific flags.
+
+  SHF_MASKOS = 0x0ff00000,
+
+  // Bits indicating processor-specific flags.
+  SHF_MASKPROC = 0xf0000000,
+
+  /// All sections with the "d" flag are grouped together by the linker to form
+  /// the data section and the dp register is set to the start of the section by
+  /// the boot code.
+  XCORE_SHF_DP_SECTION = 0x10000000,
+
+  /// All sections with the "c" flag are grouped together by the linker to form
+  /// the constant pool and the cp register is set to the start of the constant
+  /// pool by the boot code.
+  XCORE_SHF_CP_SECTION = 0x20000000,
+
+  // If an object file section does not have this flag set, then it may not hold
+  // more than 2GB and can be freely referred to in objects using smaller code
+  // models. Otherwise, only objects using larger code models can refer to them.
+  // For example, a medium code model object can refer to data in a section that
+  // sets this flag besides being able to refer to data in a section that does
+  // not set it; likewise, a small code model object can refer only to code in a
+  // section that does not set this flag.
+  SHF_X86_64_LARGE = 0x10000000,
+
+  // All sections with the GPREL flag are grouped into a global data area
+  // for faster accesses
+  SHF_HEX_GPREL = 0x10000000,
+
+  // Section contains text/data which may be replicated in other sections.
+  // Linker must retain only one copy.
+  SHF_MIPS_NODUPES = 0x01000000,
+
+  // Linker must generate implicit hidden weak names.
+  SHF_MIPS_NAMES = 0x02000000,
+
+  // Section data local to process.
+  SHF_MIPS_LOCAL = 0x04000000,
+
+  // Do not strip this section.
+  SHF_MIPS_NOSTRIP = 0x08000000,
+
+  // Section must be part of global data area.
+  SHF_MIPS_GPREL = 0x10000000,
+
+  // This section should be merged.
+  SHF_MIPS_MERGE = 0x20000000,
+
+  // Address size to be inferred from section entry size.
+  SHF_MIPS_ADDR = 0x40000000,
+
+  // Section data is string data by default.
+  SHF_MIPS_STRING = 0x80000000,
+
+  // Make code section unreadable when in execute-only mode
+  SHF_ARM_PURECODE = 0x20000000
+};
+
+// Section Group Flags
+enum : unsigned {
+  GRP_COMDAT = 0x1,
+  GRP_MASKOS = 0x0ff00000,
+  GRP_MASKPROC = 0xf0000000
+};
+
+// Symbol table entries for ELF32.
+struct Elf32_Sym {
+  Elf32_Word st_name;     // Symbol name (index into string table)
+  Elf32_Addr st_value;    // Value or address associated with the symbol
+  Elf32_Word st_size;     // Size of the symbol
+  unsigned char st_info;  // Symbol's type and binding attributes
+  unsigned char st_other; // Must be zero; reserved
+  Elf32_Half st_shndx;    // Which section (header table index) it's defined in
+
+  // These accessors and mutators correspond to the ELF32_ST_BIND,
+  // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
+  unsigned char getBinding() const { return st_info >> 4; }
+  unsigned char getType() const { return st_info & 0x0f; }
+  void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+  void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+  void setBindingAndType(unsigned char b, unsigned char t) {
+    st_info = (b << 4) + (t & 0x0f);
+  }
+};
+
+// Symbol table entries for ELF64.
+struct Elf64_Sym {
+  Elf64_Word st_name;     // Symbol name (index into string table)
+  unsigned char st_info;  // Symbol's type and binding attributes
+  unsigned char st_other; // Must be zero; reserved
+  Elf64_Half st_shndx;    // Which section (header tbl index) it's defined in
+  Elf64_Addr st_value;    // Value or address associated with the symbol
+  Elf64_Xword st_size;    // Size of the symbol
+
+  // These accessors and mutators are identical to those defined for ELF32
+  // symbol table entries.
+  unsigned char getBinding() const { return st_info >> 4; }
+  unsigned char getType() const { return st_info & 0x0f; }
+  void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+  void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+  void setBindingAndType(unsigned char b, unsigned char t) {
+    st_info = (b << 4) + (t & 0x0f);
+  }
+};
+
+// The size (in bytes) of symbol table entries.
+enum {
+  SYMENTRY_SIZE32 = 16, // 32-bit symbol entry size
+  SYMENTRY_SIZE64 = 24  // 64-bit symbol entry size.
+};
+
+// Symbol bindings.
+enum {
+  STB_LOCAL = 0,  // Local symbol, not visible outside obj file containing def
+  STB_GLOBAL = 1, // Global symbol, visible to all object files being combined
+  STB_WEAK = 2,   // Weak symbol, like global but lower-precedence
+  STB_GNU_UNIQUE = 10,
+  STB_LOOS = 10,   // Lowest operating system-specific binding type
+  STB_HIOS = 12,   // Highest operating system-specific binding type
+  STB_LOPROC = 13, // Lowest processor-specific binding type
+  STB_HIPROC = 15  // Highest processor-specific binding type
+};
+
+// Symbol types.
+enum {
+  STT_NOTYPE = 0,     // Symbol's type is not specified
+  STT_OBJECT = 1,     // Symbol is a data object (variable, array, etc.)
+  STT_FUNC = 2,       // Symbol is executable code (function, etc.)
+  STT_SECTION = 3,    // Symbol refers to a section
+  STT_FILE = 4,       // Local, absolute symbol that refers to a file
+  STT_COMMON = 5,     // An uninitialized common block
+  STT_TLS = 6,        // Thread local data object
+  STT_GNU_IFUNC = 10, // GNU indirect function
+  STT_LOOS = 10,      // Lowest operating system-specific symbol type
+  STT_HIOS = 12,      // Highest operating system-specific symbol type
+  STT_LOPROC = 13,    // Lowest processor-specific symbol type
+  STT_HIPROC = 15,    // Highest processor-specific symbol type
+
+  // AMDGPU symbol types
+  STT_AMDGPU_HSA_KERNEL = 10
+};
+
+enum {
+  STV_DEFAULT = 0,  // Visibility is specified by binding type
+  STV_INTERNAL = 1, // Defined by processor supplements
+  STV_HIDDEN = 2,   // Not visible to other components
+  STV_PROTECTED = 3 // Visible in other components but not preemptable
+};
+
+// Symbol number.
+enum { STN_UNDEF = 0 };
+
+// Special relocation symbols used in the MIPS64 ELF relocation entries
+enum {
+  RSS_UNDEF = 0, // None
+  RSS_GP = 1,    // Value of gp
+  RSS_GP0 = 2,   // Value of gp used to create object being relocated
+  RSS_LOC = 3    // Address of location being relocated
+};
+
+// Relocation entry, without explicit addend.
+struct Elf32_Rel {
+  Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
+  Elf32_Word r_info;   // Symbol table index and type of relocation to apply
+
+  // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+  // and ELF32_R_INFO macros defined in the ELF specification:
+  Elf32_Word getSymbol() const { return (r_info >> 8); }
+  unsigned char getType() const { return (unsigned char)(r_info & 0x0ff); }
+  void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+  void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+  void setSymbolAndType(Elf32_Word s, unsigned char t) {
+    r_info = (s << 8) + t;
+  }
+};
+
+// Relocation entry with explicit addend.
+struct Elf32_Rela {
+  Elf32_Addr r_offset;  // Location (file byte offset, or program virtual addr)
+  Elf32_Word r_info;    // Symbol table index and type of relocation to apply
+  Elf32_Sword r_addend; // Compute value for relocatable field by adding this
+
+  // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+  // and ELF32_R_INFO macros defined in the ELF specification:
+  Elf32_Word getSymbol() const { return (r_info >> 8); }
+  unsigned char getType() const { return (unsigned char)(r_info & 0x0ff); }
+  void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+  void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+  void setSymbolAndType(Elf32_Word s, unsigned char t) {
+    r_info = (s << 8) + t;
+  }
+};
+
+// Relocation entry, without explicit addend.
+struct Elf64_Rel {
+  Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+  Elf64_Xword r_info;  // Symbol table index and type of relocation to apply.
+
+  // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+  // and ELF64_R_INFO macros defined in the ELF specification:
+  Elf64_Word getSymbol() const { return (r_info >> 32); }
+  Elf64_Word getType() const { return (Elf64_Word)(r_info & 0xffffffffL); }
+  void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
+  void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
+  void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
+    r_info = ((Elf64_Xword)s << 32) + (t & 0xffffffffL);
+  }
+};
+
+// Relocation entry with explicit addend.
+struct Elf64_Rela {
+  Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+  Elf64_Xword r_info;  // Symbol table index and type of relocation to apply.
+  Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.
+
+  // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+  // and ELF64_R_INFO macros defined in the ELF specification:
+  Elf64_Word getSymbol() const { return (r_info >> 32); }
+  Elf64_Word getType() const { return (Elf64_Word)(r_info & 0xffffffffL); }
+  void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
+  void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
+  void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
+    r_info = ((Elf64_Xword)s << 32) + (t & 0xffffffffL);
+  }
+};
+
+// Program header for ELF32.
+struct Elf32_Phdr {
+  Elf32_Word p_type;   // Type of segment
+  Elf32_Off p_offset;  // File offset where segment is located, in bytes
+  Elf32_Addr p_vaddr;  // Virtual address of beginning of segment
+  Elf32_Addr p_paddr;  // Physical address of beginning of segment (OS-specific)
+  Elf32_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+  Elf32_Word p_memsz;  // Num. of bytes in mem image of segment (may be zero)
+  Elf32_Word p_flags;  // Segment flags
+  Elf32_Word p_align;  // Segment alignment constraint
+};
+
+// Program header for ELF64.
+struct Elf64_Phdr {
+  Elf64_Word p_type;    // Type of segment
+  Elf64_Word p_flags;   // Segment flags
+  Elf64_Off p_offset;   // File offset where segment is located, in bytes
+  Elf64_Addr p_vaddr;   // Virtual address of beginning of segment
+  Elf64_Addr p_paddr;   // Physical addr of beginning of segment (OS-specific)
+  Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+  Elf64_Xword p_memsz;  // Num. of bytes in mem image of segment (may be zero)
+  Elf64_Xword p_align;  // Segment alignment constraint
+};
+
+// Segment types.
+enum {
+  PT_NULL = 0,            // Unused segment.
+  PT_LOAD = 1,            // Loadable segment.
+  PT_DYNAMIC = 2,         // Dynamic linking information.
+  PT_INTERP = 3,          // Interpreter pathname.
+  PT_NOTE = 4,            // Auxiliary information.
+  PT_SHLIB = 5,           // Reserved.
+  PT_PHDR = 6,            // The program header table itself.
+  PT_TLS = 7,             // The thread-local storage template.
+  PT_LOOS = 0x60000000,   // Lowest operating system-specific pt entry type.
+  PT_HIOS = 0x6fffffff,   // Highest operating system-specific pt entry type.
+  PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
+  PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.
+
+  // x86-64 program header types.
+  // These all contain stack unwind tables.
+  PT_GNU_EH_FRAME = 0x6474e550,
+  PT_SUNW_EH_FRAME = 0x6474e550,
+  PT_SUNW_UNWIND = 0x6464e550,
+
+  PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
+  PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
+
+  PT_OPENBSD_RANDOMIZE = 0x65a3dbe6, // Fill with random data.
+  PT_OPENBSD_WXNEEDED = 0x65a3dbe7,  // Program does W^X violations.
+  PT_OPENBSD_BOOTDATA = 0x65a41be6,  // Section for boot arguments.
+
+  // ARM program header types.
+  PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility info
+  // These all contain stack unwind tables.
+  PT_ARM_EXIDX = 0x70000001,
+  PT_ARM_UNWIND = 0x70000001,
+
+  // MIPS program header types.
+  PT_MIPS_REGINFO = 0x70000000,  // Register usage information.
+  PT_MIPS_RTPROC = 0x70000001,   // Runtime procedure table.
+  PT_MIPS_OPTIONS = 0x70000002,  // Options segment.
+  PT_MIPS_ABIFLAGS = 0x70000003, // Abiflags segment.
+
+  // WebAssembly program header types.
+  PT_WEBASSEMBLY_FUNCTIONS = PT_LOPROC + 0, // Function definitions.
+};
+
+// Segment flag bits.
+enum : unsigned {
+  PF_X = 1,                // Execute
+  PF_W = 2,                // Write
+  PF_R = 4,                // Read
+  PF_MASKOS = 0x0ff00000,  // Bits for operating system-specific semantics.
+  PF_MASKPROC = 0xf0000000 // Bits for processor-specific semantics.
+};
+
+// Dynamic table entry for ELF32.
+struct Elf32_Dyn {
+  Elf32_Sword d_tag; // Type of dynamic table entry.
+  union {
+    Elf32_Word d_val; // Integer value of entry.
+    Elf32_Addr d_ptr; // Pointer value of entry.
+  } d_un;
+};
+
+// Dynamic table entry for ELF64.
+struct Elf64_Dyn {
+  Elf64_Sxword d_tag; // Type of dynamic table entry.
+  union {
+    Elf64_Xword d_val; // Integer value of entry.
+    Elf64_Addr d_ptr;  // Pointer value of entry.
+  } d_un;
+};
+
+// Dynamic table entry tags.
+enum {
+#define DYNAMIC_TAG(name, value) DT_##name = value,
+#include "DynamicTags.def"
+#undef DYNAMIC_TAG
+};
+
+// DT_FLAGS values.
+enum {
+  DF_ORIGIN = 0x01,    // The object may reference $ORIGIN.
+  DF_SYMBOLIC = 0x02,  // Search the shared lib before searching the exe.
+  DF_TEXTREL = 0x04,   // Relocations may modify a non-writable segment.
+  DF_BIND_NOW = 0x08,  // Process all relocations on load.
+  DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
+};
+
+// State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 entry.
+enum {
+  DF_1_NOW = 0x00000001,       // Set RTLD_NOW for this object.
+  DF_1_GLOBAL = 0x00000002,    // Set RTLD_GLOBAL for this object.
+  DF_1_GROUP = 0x00000004,     // Set RTLD_GROUP for this object.
+  DF_1_NODELETE = 0x00000008,  // Set RTLD_NODELETE for this object.
+  DF_1_LOADFLTR = 0x00000010,  // Trigger filtee loading at runtime.
+  DF_1_INITFIRST = 0x00000020, // Set RTLD_INITFIRST for this object.
+  DF_1_NOOPEN = 0x00000040,    // Set RTLD_NOOPEN for this object.
+  DF_1_ORIGIN = 0x00000080,    // $ORIGIN must be handled.
+  DF_1_DIRECT = 0x00000100,    // Direct binding enabled.
+  DF_1_TRANS = 0x00000200,
+  DF_1_INTERPOSE = 0x00000400,  // Object is used to interpose.
+  DF_1_NODEFLIB = 0x00000800,   // Ignore default lib search path.
+  DF_1_NODUMP = 0x00001000,     // Object can't be dldump'ed.
+  DF_1_CONFALT = 0x00002000,    // Configuration alternative created.
+  DF_1_ENDFILTEE = 0x00004000,  // Filtee terminates filters search.
+  DF_1_DISPRELDNE = 0x00008000, // Disp reloc applied at build time.
+  DF_1_DISPRELPND = 0x00010000, // Disp reloc applied at run-time.
+  DF_1_NODIRECT = 0x00020000,   // Object has no-direct binding.
+  DF_1_IGNMULDEF = 0x00040000,
+  DF_1_NOKSYMS = 0x00080000,
+  DF_1_NOHDR = 0x00100000,
+  DF_1_EDITED = 0x00200000, // Object is modified after built.
+  DF_1_NORELOC = 0x00400000,
+  DF_1_SYMINTPOSE = 0x00800000, // Object has individual interposers.
+  DF_1_GLOBAUDIT = 0x01000000,  // Global auditing required.
+  DF_1_SINGLETON = 0x02000000   // Singleton symbols are used.
+};
+
+// DT_MIPS_FLAGS values.
+enum {
+  RHF_NONE = 0x00000000,                   // No flags.
+  RHF_QUICKSTART = 0x00000001,             // Uses shortcut pointers.
+  RHF_NOTPOT = 0x00000002,                 // Hash size is not a power of two.
+  RHS_NO_LIBRARY_REPLACEMENT = 0x00000004, // Ignore LD_LIBRARY_PATH.
+  RHF_NO_MOVE = 0x00000008,                // DSO address may not be relocated.
+  RHF_SGI_ONLY = 0x00000010,               // SGI specific features.
+  RHF_GUARANTEE_INIT = 0x00000020,         // Guarantee that .init will finish
+                                           // executing before any non-init
+                                           // code in DSO is called.
+  RHF_DELTA_C_PLUS_PLUS = 0x00000040,      // Contains Delta C++ code.
+  RHF_GUARANTEE_START_INIT = 0x00000080,   // Guarantee that .init will start
+                                           // executing before any non-init
+                                           // code in DSO is called.
+  RHF_PIXIE = 0x00000100,                  // Generated by pixie.
+  RHF_DEFAULT_DELAY_LOAD = 0x00000200,     // Delay-load DSO by default.
+  RHF_REQUICKSTART = 0x00000400,           // Object may be requickstarted
+  RHF_REQUICKSTARTED = 0x00000800,         // Object has been requickstarted
+  RHF_CORD = 0x00001000,                   // Generated by cord.
+  RHF_NO_UNRES_UNDEF = 0x00002000,         // Object contains no unresolved
+                                           // undef symbols.
+  RHF_RLD_ORDER_SAFE = 0x00004000          // Symbol table is in a safe order.
+};
+
+// ElfXX_VerDef structure version (GNU versioning)
+enum { VER_DEF_NONE = 0, VER_DEF_CURRENT = 1 };
+
+// VerDef Flags (ElfXX_VerDef::vd_flags)
+enum { VER_FLG_BASE = 0x1, VER_FLG_WEAK = 0x2, VER_FLG_INFO = 0x4 };
+
+// Special constants for the version table. (SHT_GNU_versym/.gnu.version)
+enum {
+  VER_NDX_LOCAL = 0,       // Unversioned local symbol
+  VER_NDX_GLOBAL = 1,      // Unversioned global symbol
+  VERSYM_VERSION = 0x7fff, // Version Index mask
+  VERSYM_HIDDEN = 0x8000   // Hidden bit (non-default version)
+};
+
+// ElfXX_VerNeed structure version (GNU versioning)
+enum { VER_NEED_NONE = 0, VER_NEED_CURRENT = 1 };
+
+// SHT_NOTE section types
+enum {
+  NT_FREEBSD_THRMISC = 7,
+  NT_FREEBSD_PROCSTAT_PROC = 8,
+  NT_FREEBSD_PROCSTAT_FILES = 9,
+  NT_FREEBSD_PROCSTAT_VMMAP = 10,
+  NT_FREEBSD_PROCSTAT_GROUPS = 11,
+  NT_FREEBSD_PROCSTAT_UMASK = 12,
+  NT_FREEBSD_PROCSTAT_RLIMIT = 13,
+  NT_FREEBSD_PROCSTAT_OSREL = 14,
+  NT_FREEBSD_PROCSTAT_PSSTRINGS = 15,
+  NT_FREEBSD_PROCSTAT_AUXV = 16,
+};
+
+enum {
+  NT_GNU_ABI_TAG = 1,
+  NT_GNU_HWCAP = 2,
+  NT_GNU_BUILD_ID = 3,
+  NT_GNU_GOLD_VERSION = 4,
+  NT_GNU_PROPERTY_TYPE_0 = 5,
+};
+
+// Property types used in GNU_PROPERTY_TYPE_0 notes.
+enum {
+  GNU_PROPERTY_STACK_SIZE = 1,
+  GNU_PROPERTY_NO_COPY_ON_PROTECTED = 2,
+};
+
+// AMDGPU specific notes.
+enum {
+  // Note types with values between 0 and 9 (inclusive) are reserved.
+  NT_AMD_AMDGPU_HSA_METADATA = 10,
+  NT_AMD_AMDGPU_ISA = 11,
+  NT_AMD_AMDGPU_PAL_METADATA = 12
+};
+
+enum {
+  GNU_ABI_TAG_LINUX = 0,
+  GNU_ABI_TAG_HURD = 1,
+  GNU_ABI_TAG_SOLARIS = 2,
+  GNU_ABI_TAG_FREEBSD = 3,
+  GNU_ABI_TAG_NETBSD = 4,
+  GNU_ABI_TAG_SYLLABLE = 5,
+  GNU_ABI_TAG_NACL = 6,
+};
+
+// Android packed relocation group flags.
+enum {
+  RELOCATION_GROUPED_BY_INFO_FLAG = 1,
+  RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG = 2,
+  RELOCATION_GROUPED_BY_ADDEND_FLAG = 4,
+  RELOCATION_GROUP_HAS_ADDEND_FLAG = 8,
+};
+
+// Compressed section header for ELF32.
+struct Elf32_Chdr {
+  Elf32_Word ch_type;
+  Elf32_Word ch_size;
+  Elf32_Word ch_addralign;
+};
+
+// Compressed section header for ELF64.
+struct Elf64_Chdr {
+  Elf64_Word ch_type;
+  Elf64_Word ch_reserved;
+  Elf64_Xword ch_size;
+  Elf64_Xword ch_addralign;
+};
+
+// Node header for ELF32.
+struct Elf32_Nhdr {
+  Elf32_Word n_namesz;
+  Elf32_Word n_descsz;
+  Elf32_Word n_type;
+};
+
+// Node header for ELF64.
+struct Elf64_Nhdr {
+  Elf64_Word n_namesz;
+  Elf64_Word n_descsz;
+  Elf64_Word n_type;
+};
+
+// Legal values for ch_type field of compressed section header.
+enum {
+  ELFCOMPRESS_ZLIB = 1,            // ZLIB/DEFLATE algorithm.
+  ELFCOMPRESS_LOOS = 0x60000000,   // Start of OS-specific.
+  ELFCOMPRESS_HIOS = 0x6fffffff,   // End of OS-specific.
+  ELFCOMPRESS_LOPROC = 0x70000000, // Start of processor-specific.
+  ELFCOMPRESS_HIPROC = 0x7fffffff  // End of processor-specific.
+};
+
+} // end namespace ELF
+} // end namespace llvm
+
+#endif // LLVM_BINARYFORMAT_ELF_H
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AArch64.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AArch64.def
new file mode 100644
index 0000000..4afcd7d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AArch64.def
@@ -0,0 +1,218 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// Based on ABI release 1.1-beta, dated 6 November 2013. NB: The cover page of
+// this document, IHI0056C_beta_aaelf64.pdf, on infocenter.arm.com, still
+// labels this as release 1.0.
+ELF_RELOC(R_AARCH64_NONE,                                0)
+ELF_RELOC(R_AARCH64_ABS64,                           0x101)
+ELF_RELOC(R_AARCH64_ABS32,                           0x102)
+ELF_RELOC(R_AARCH64_ABS16,                           0x103)
+ELF_RELOC(R_AARCH64_PREL64,                          0x104)
+ELF_RELOC(R_AARCH64_PREL32,                          0x105)
+ELF_RELOC(R_AARCH64_PREL16,                          0x106)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G0,                    0x107)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G0_NC,                 0x108)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G1,                    0x109)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G1_NC,                 0x10a)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G2,                    0x10b)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G2_NC,                 0x10c)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G3,                    0x10d)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G0,                    0x10e)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G1,                    0x10f)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G2,                    0x110)
+ELF_RELOC(R_AARCH64_LD_PREL_LO19,                    0x111)
+ELF_RELOC(R_AARCH64_ADR_PREL_LO21,                   0x112)
+ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21,                0x113)
+ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21_NC,             0x114)
+ELF_RELOC(R_AARCH64_ADD_ABS_LO12_NC,                 0x115)
+ELF_RELOC(R_AARCH64_LDST8_ABS_LO12_NC,               0x116)
+ELF_RELOC(R_AARCH64_TSTBR14,                         0x117)
+ELF_RELOC(R_AARCH64_CONDBR19,                        0x118)
+ELF_RELOC(R_AARCH64_JUMP26,                          0x11a)
+ELF_RELOC(R_AARCH64_CALL26,                          0x11b)
+ELF_RELOC(R_AARCH64_LDST16_ABS_LO12_NC,              0x11c)
+ELF_RELOC(R_AARCH64_LDST32_ABS_LO12_NC,              0x11d)
+ELF_RELOC(R_AARCH64_LDST64_ABS_LO12_NC,              0x11e)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G0,                    0x11f)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G0_NC,                 0x120)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G1,                    0x121)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G1_NC,                 0x122)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G2,                    0x123)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G2_NC,                 0x124)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G3,                    0x125)
+ELF_RELOC(R_AARCH64_LDST128_ABS_LO12_NC,             0x12b)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0,                  0x12c)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0_NC,               0x12d)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1,                  0x12e)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1_NC,               0x12f)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2,                  0x130)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2_NC,               0x131)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G3,                  0x132)
+ELF_RELOC(R_AARCH64_GOTREL64,                        0x133)
+ELF_RELOC(R_AARCH64_GOTREL32,                        0x134)
+ELF_RELOC(R_AARCH64_GOT_LD_PREL19,                   0x135)
+ELF_RELOC(R_AARCH64_LD64_GOTOFF_LO15,                0x136)
+ELF_RELOC(R_AARCH64_ADR_GOT_PAGE,                    0x137)
+ELF_RELOC(R_AARCH64_LD64_GOT_LO12_NC,                0x138)
+ELF_RELOC(R_AARCH64_LD64_GOTPAGE_LO15,               0x139)
+ELF_RELOC(R_AARCH64_TLSGD_ADR_PREL21,                0x200)
+ELF_RELOC(R_AARCH64_TLSGD_ADR_PAGE21,                0x201)
+ELF_RELOC(R_AARCH64_TLSGD_ADD_LO12_NC,               0x202)
+ELF_RELOC(R_AARCH64_TLSGD_MOVW_G1,                   0x203)
+ELF_RELOC(R_AARCH64_TLSGD_MOVW_G0_NC,                0x204)
+ELF_RELOC(R_AARCH64_TLSLD_ADR_PREL21,                0x205)
+ELF_RELOC(R_AARCH64_TLSLD_ADR_PAGE21,                0x206)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_LO12_NC,               0x207)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_G1,                   0x208)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_G0_NC,                0x209)
+ELF_RELOC(R_AARCH64_TLSLD_LD_PREL19,                 0x20a)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G2,            0x20b)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1,            0x20c)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,         0x20d)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0,            0x20e)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,         0x20f)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_HI12,           0x210)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12,           0x211)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,        0x212)
+ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12,         0x213)
+ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,      0x214)
+ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12,        0x215)
+ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,     0x216)
+ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12,        0x217)
+ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,     0x218)
+ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12,        0x219)
+ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,     0x21a)
+ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G1,          0x21b)
+ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,       0x21c)
+ELF_RELOC(R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,       0x21d)
+ELF_RELOC(R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,     0x21e)
+ELF_RELOC(R_AARCH64_TLSIE_LD_GOTTPREL_PREL19,        0x21f)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G2,             0x220)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1,             0x221)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1_NC,          0x222)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0,             0x223)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0_NC,          0x224)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_HI12,            0x225)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12,            0x226)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12_NC,         0x227)
+ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12,          0x228)
+ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,       0x229)
+ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12,         0x22a)
+ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,      0x22b)
+ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12,         0x22c)
+ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,      0x22d)
+ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12,         0x22e)
+ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,      0x22f)
+ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19,               0x230)
+ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21,              0x231)
+ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21,              0x232)
+ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12,               0x233)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12,                0x234)
+ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1,                  0x235)
+ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC,               0x236)
+ELF_RELOC(R_AARCH64_TLSDESC_LDR,                     0x237)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD,                     0x238)
+ELF_RELOC(R_AARCH64_TLSDESC_CALL,                    0x239)
+ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12,        0x23a)
+ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC,     0x23b)
+ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12,       0x23c)
+ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC,    0x23d)
+ELF_RELOC(R_AARCH64_COPY,                            0x400)
+ELF_RELOC(R_AARCH64_GLOB_DAT,                        0x401)
+ELF_RELOC(R_AARCH64_JUMP_SLOT,                       0x402)
+ELF_RELOC(R_AARCH64_RELATIVE,                        0x403)
+ELF_RELOC(R_AARCH64_TLS_DTPREL64,                    0x404)
+ELF_RELOC(R_AARCH64_TLS_DTPMOD64,                    0x405)
+ELF_RELOC(R_AARCH64_TLS_TPREL64,                     0x406)
+ELF_RELOC(R_AARCH64_TLSDESC,                         0x407)
+ELF_RELOC(R_AARCH64_IRELATIVE,                       0x408)
+
+// ELF_RELOC(R_AARCH64_P32_NONE,                         0)
+ELF_RELOC(R_AARCH64_P32_ABS32,                       0x001)
+ELF_RELOC(R_AARCH64_P32_ABS16,                       0x002)
+ELF_RELOC(R_AARCH64_P32_PREL32,                      0x003)
+ELF_RELOC(R_AARCH64_P32_PREL16,                      0x004)
+ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G0,                0x005)
+ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G0_NC,             0x006)
+ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G1,                0x007)
+ELF_RELOC(R_AARCH64_P32_MOVW_SABS_G0,                0x008)
+ELF_RELOC(R_AARCH64_P32_LD_PREL_LO19,                0x009)
+ELF_RELOC(R_AARCH64_P32_ADR_PREL_LO21,               0x00a)
+ELF_RELOC(R_AARCH64_P32_ADR_PREL_PG_HI21,            0x00b)
+ELF_RELOC(R_AARCH64_P32_ADD_ABS_LO12_NC,             0x00c)
+ELF_RELOC(R_AARCH64_P32_LDST8_ABS_LO12_NC,           0x00d)
+ELF_RELOC(R_AARCH64_P32_LDST16_ABS_LO12_NC,          0x00e)
+ELF_RELOC(R_AARCH64_P32_LDST32_ABS_LO12_NC,          0x00f)
+ELF_RELOC(R_AARCH64_P32_LDST64_ABS_LO12_NC,          0x010)
+ELF_RELOC(R_AARCH64_P32_LDST128_ABS_LO12_NC,         0x011)
+ELF_RELOC(R_AARCH64_P32_TSTBR14,                     0x012)
+ELF_RELOC(R_AARCH64_P32_CONDBR19,                    0x013)
+ELF_RELOC(R_AARCH64_P32_JUMP26,                      0x014)
+ELF_RELOC(R_AARCH64_P32_CALL26,                      0x015)
+ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0,                0x016)
+ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0_NC,             0x017)
+ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G1,                0x018)
+ELF_RELOC(R_AARCH64_P32_GOT_LD_PREL19,               0x019)
+ELF_RELOC(R_AARCH64_P32_ADR_GOT_PAGE,                0x01a)
+ELF_RELOC(R_AARCH64_P32_LD32_GOT_LO12_NC,            0x01b)
+ELF_RELOC(R_AARCH64_P32_LD32_GOTPAGE_LO14,           0x01c)
+ELF_RELOC(R_AARCH64_P32_TLSGD_ADR_PREL21,            0x050)
+ELF_RELOC(R_AARCH64_P32_TLSGD_ADR_PAGE21,            0x051)
+ELF_RELOC(R_AARCH64_P32_TLSGD_ADD_LO12_NC,           0x052)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADR_PREL21,            0x053)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADR_PAGE21,            0x054)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_LO12_NC,           0x055)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LD_PREL19,             0x056)
+ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G1,        0x057)
+ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0,        0x058)
+ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0_NC,     0x059)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_HI12,       0x05a)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12,       0x05b)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12_NC,    0x05c)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12,     0x05d)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12_NC,  0x05e)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12,    0x05f)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12_NC, 0x060)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12,    0x061)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12_NC, 0x062)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12,    0x063)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12_NC, 0x064)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12,   0x065)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12_NC,0x066)
+ELF_RELOC(R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21,   0x067)
+ELF_RELOC(R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC, 0x068)
+ELF_RELOC(R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19,    0x069)
+ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G1,         0x06a)
+ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G0,         0x06b)
+ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC,      0x06c)
+ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_HI12,        0x06d)
+ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_LO12,        0x06e)
+ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC,     0x06f)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12,      0x070)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12_NC,   0x071)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12,     0x072)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12_NC,  0x073)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12,     0x074)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12_NC,  0x075)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12,     0x076)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12_NC,  0x077)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12,    0x078)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12_NC, 0x079)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_LD_PREL19,           0x07a)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PREL21,          0x07b)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PAGE21,          0x07c)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_LD32_LO12,           0x07d)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADD_LO12,            0x07e)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_CALL,                0x07f)
+ELF_RELOC(R_AARCH64_P32_COPY,                        0x0b4)
+ELF_RELOC(R_AARCH64_P32_GLOB_DAT,                    0x0b5)
+ELF_RELOC(R_AARCH64_P32_JUMP_SLOT,                   0x0b6)
+ELF_RELOC(R_AARCH64_P32_RELATIVE,                    0x0b7)
+ELF_RELOC(R_AARCH64_P32_TLS_DTPREL,                  0x0b8)
+ELF_RELOC(R_AARCH64_P32_TLS_DTPMOD,                  0x0b9)
+ELF_RELOC(R_AARCH64_P32_TLS_TPREL,                   0x0ba)
+ELF_RELOC(R_AARCH64_P32_TLSDESC,                     0x0bb)
+ELF_RELOC(R_AARCH64_P32_IRELATIVE,                   0x0bc)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AMDGPU.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AMDGPU.def
new file mode 100644
index 0000000..00b19c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AMDGPU.def
@@ -0,0 +1,17 @@
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_AMDGPU_NONE,           0)
+ELF_RELOC(R_AMDGPU_ABS32_LO,       1)
+ELF_RELOC(R_AMDGPU_ABS32_HI,       2)
+ELF_RELOC(R_AMDGPU_ABS64,          3)
+ELF_RELOC(R_AMDGPU_REL32,          4)
+ELF_RELOC(R_AMDGPU_REL64,          5)
+ELF_RELOC(R_AMDGPU_ABS32,          6)
+ELF_RELOC(R_AMDGPU_GOTPCREL,       7)
+ELF_RELOC(R_AMDGPU_GOTPCREL32_LO,  8)
+ELF_RELOC(R_AMDGPU_GOTPCREL32_HI,  9)
+ELF_RELOC(R_AMDGPU_REL32_LO,      10)
+ELF_RELOC(R_AMDGPU_REL32_HI,      11)
+ELF_RELOC(R_AMDGPU_RELATIVE64,    13)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/ARC.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/ARC.def
new file mode 100644
index 0000000..5691fb3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/ARC.def
@@ -0,0 +1,74 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_ARC_NONE,                  0)
+ELF_RELOC(R_ARC_8,                     1)
+ELF_RELOC(R_ARC_16,                    2)
+ELF_RELOC(R_ARC_24,                    3)
+ELF_RELOC(R_ARC_32,                    4)
+ELF_RELOC(R_ARC_N8,                    8)
+ELF_RELOC(R_ARC_N16,                   9)
+ELF_RELOC(R_ARC_N24,                  10)
+ELF_RELOC(R_ARC_N32,                  11)
+ELF_RELOC(R_ARC_SDA,                  12)
+ELF_RELOC(R_ARC_SECTOFF,              13)
+ELF_RELOC(R_ARC_S21H_PCREL,           14)
+ELF_RELOC(R_ARC_S21W_PCREL,           15)
+ELF_RELOC(R_ARC_S25H_PCREL,           16)
+ELF_RELOC(R_ARC_S25W_PCREL,           17)
+ELF_RELOC(R_ARC_SDA32,                18)
+ELF_RELOC(R_ARC_SDA_LDST,             19)
+ELF_RELOC(R_ARC_SDA_LDST1,            20)
+ELF_RELOC(R_ARC_SDA_LDST2,            21)
+ELF_RELOC(R_ARC_SDA16_LD,             22)
+ELF_RELOC(R_ARC_SDA16_LD1,            23)
+ELF_RELOC(R_ARC_SDA16_LD2,            24)
+ELF_RELOC(R_ARC_S13_PCREL,            25)
+ELF_RELOC(R_ARC_W,                    26)
+ELF_RELOC(R_ARC_32_ME,                27)
+ELF_RELOC(R_ARC_32_ME_S,             105)
+ELF_RELOC(R_ARC_N32_ME,               28)
+ELF_RELOC(R_ARC_SECTOFF_ME,           29)
+ELF_RELOC(R_ARC_SDA32_ME,             30)
+ELF_RELOC(R_ARC_W_ME,                 31)
+ELF_RELOC(R_AC_SECTOFF_U8,            35)
+ELF_RELOC(R_AC_SECTOFF_U8_1,          36)
+ELF_RELOC(R_AC_SECTOFF_U8_2,          37)
+ELF_RELOC(R_AC_SECTOFF_S9,            38)
+ELF_RELOC(R_AC_SECTOFF_S9_1,          39)
+ELF_RELOC(R_AC_SECTOFF_S9_2,          40)
+ELF_RELOC(R_ARC_SECTOFF_ME_1,         41)
+ELF_RELOC(R_ARC_SECTOFF_ME_2,         42)
+ELF_RELOC(R_ARC_SECTOFF_1,            43)
+ELF_RELOC(R_ARC_SECTOFF_2,            44)
+ELF_RELOC(R_ARC_SDA_12,               45)
+ELF_RELOC(R_ARC_SDA16_ST2,            48)
+ELF_RELOC(R_ARC_32_PCREL,             49)
+ELF_RELOC(R_ARC_PC32,                 50)
+ELF_RELOC(R_ARC_GOT32,                59)
+ELF_RELOC(R_ARC_GOTPC32,              51)
+ELF_RELOC(R_ARC_PLT32,                52)
+ELF_RELOC(R_ARC_COPY,                 53)
+ELF_RELOC(R_ARC_GLOB_DAT,             54)
+ELF_RELOC(R_ARC_JMP_SLOT,             55)
+ELF_RELOC(R_ARC_RELATIVE,             56)
+ELF_RELOC(R_ARC_GOTOFF,               57)
+ELF_RELOC(R_ARC_GOTPC,                58)
+ELF_RELOC(R_ARC_S21W_PCREL_PLT,       60)
+ELF_RELOC(R_ARC_S25H_PCREL_PLT,       61)
+ELF_RELOC(R_ARC_JLI_SECTOFF,          63)
+ELF_RELOC(R_ARC_TLS_DTPMOD,           66)
+ELF_RELOC(R_ARC_TLS_TPOFF,            68)
+ELF_RELOC(R_ARC_TLS_GD_GOT,           69)
+ELF_RELOC(R_ARC_TLS_GD_LD,            70)
+ELF_RELOC(R_ARC_TLS_GD_CALL,          71)
+ELF_RELOC(R_ARC_TLS_IE_GOT,           72)
+ELF_RELOC(R_ARC_TLS_DTPOFF,           67)
+ELF_RELOC(R_ARC_TLS_DTPOFF_S9,        73)
+ELF_RELOC(R_ARC_TLS_LE_S9,            74)
+ELF_RELOC(R_ARC_TLS_LE_32,            75)
+ELF_RELOC(R_ARC_S25W_PCREL_PLT,       76)
+ELF_RELOC(R_ARC_S21H_PCREL_PLT,       77)
+ELF_RELOC(R_ARC_NPS_CMEM16,           78)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/ARM.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/ARM.def
new file mode 100644
index 0000000..730fc5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/ARM.def
@@ -0,0 +1,138 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// Meets 2.09 ABI Specs.
+ELF_RELOC(R_ARM_NONE,                   0x00)
+ELF_RELOC(R_ARM_PC24,                   0x01)
+ELF_RELOC(R_ARM_ABS32,                  0x02)
+ELF_RELOC(R_ARM_REL32,                  0x03)
+ELF_RELOC(R_ARM_LDR_PC_G0,              0x04)
+ELF_RELOC(R_ARM_ABS16,                  0x05)
+ELF_RELOC(R_ARM_ABS12,                  0x06)
+ELF_RELOC(R_ARM_THM_ABS5,               0x07)
+ELF_RELOC(R_ARM_ABS8,                   0x08)
+ELF_RELOC(R_ARM_SBREL32,                0x09)
+ELF_RELOC(R_ARM_THM_CALL,               0x0a)
+ELF_RELOC(R_ARM_THM_PC8,                0x0b)
+ELF_RELOC(R_ARM_BREL_ADJ,               0x0c)
+ELF_RELOC(R_ARM_TLS_DESC,               0x0d)
+ELF_RELOC(R_ARM_THM_SWI8,               0x0e)
+ELF_RELOC(R_ARM_XPC25,                  0x0f)
+ELF_RELOC(R_ARM_THM_XPC22,              0x10)
+ELF_RELOC(R_ARM_TLS_DTPMOD32,           0x11)
+ELF_RELOC(R_ARM_TLS_DTPOFF32,           0x12)
+ELF_RELOC(R_ARM_TLS_TPOFF32,            0x13)
+ELF_RELOC(R_ARM_COPY,                   0x14)
+ELF_RELOC(R_ARM_GLOB_DAT,               0x15)
+ELF_RELOC(R_ARM_JUMP_SLOT,              0x16)
+ELF_RELOC(R_ARM_RELATIVE,               0x17)
+ELF_RELOC(R_ARM_GOTOFF32,               0x18)
+ELF_RELOC(R_ARM_BASE_PREL,              0x19)
+ELF_RELOC(R_ARM_GOT_BREL,               0x1a)
+ELF_RELOC(R_ARM_PLT32,                  0x1b)
+ELF_RELOC(R_ARM_CALL,                   0x1c)
+ELF_RELOC(R_ARM_JUMP24,                 0x1d)
+ELF_RELOC(R_ARM_THM_JUMP24,             0x1e)
+ELF_RELOC(R_ARM_BASE_ABS,               0x1f)
+ELF_RELOC(R_ARM_ALU_PCREL_7_0,          0x20)
+ELF_RELOC(R_ARM_ALU_PCREL_15_8,         0x21)
+ELF_RELOC(R_ARM_ALU_PCREL_23_15,        0x22)
+ELF_RELOC(R_ARM_LDR_SBREL_11_0_NC,      0x23)
+ELF_RELOC(R_ARM_ALU_SBREL_19_12_NC,     0x24)
+ELF_RELOC(R_ARM_ALU_SBREL_27_20_CK,     0x25)
+ELF_RELOC(R_ARM_TARGET1,                0x26)
+ELF_RELOC(R_ARM_SBREL31,                0x27)
+ELF_RELOC(R_ARM_V4BX,                   0x28)
+ELF_RELOC(R_ARM_TARGET2,                0x29)
+ELF_RELOC(R_ARM_PREL31,                 0x2a)
+ELF_RELOC(R_ARM_MOVW_ABS_NC,            0x2b)
+ELF_RELOC(R_ARM_MOVT_ABS,               0x2c)
+ELF_RELOC(R_ARM_MOVW_PREL_NC,           0x2d)
+ELF_RELOC(R_ARM_MOVT_PREL,              0x2e)
+ELF_RELOC(R_ARM_THM_MOVW_ABS_NC,        0x2f)
+ELF_RELOC(R_ARM_THM_MOVT_ABS,           0x30)
+ELF_RELOC(R_ARM_THM_MOVW_PREL_NC,       0x31)
+ELF_RELOC(R_ARM_THM_MOVT_PREL,          0x32)
+ELF_RELOC(R_ARM_THM_JUMP19,             0x33)
+ELF_RELOC(R_ARM_THM_JUMP6,              0x34)
+ELF_RELOC(R_ARM_THM_ALU_PREL_11_0,      0x35)
+ELF_RELOC(R_ARM_THM_PC12,               0x36)
+ELF_RELOC(R_ARM_ABS32_NOI,              0x37)
+ELF_RELOC(R_ARM_REL32_NOI,              0x38)
+ELF_RELOC(R_ARM_ALU_PC_G0_NC,           0x39)
+ELF_RELOC(R_ARM_ALU_PC_G0,              0x3a)
+ELF_RELOC(R_ARM_ALU_PC_G1_NC,           0x3b)
+ELF_RELOC(R_ARM_ALU_PC_G1,              0x3c)
+ELF_RELOC(R_ARM_ALU_PC_G2,              0x3d)
+ELF_RELOC(R_ARM_LDR_PC_G1,              0x3e)
+ELF_RELOC(R_ARM_LDR_PC_G2,              0x3f)
+ELF_RELOC(R_ARM_LDRS_PC_G0,             0x40)
+ELF_RELOC(R_ARM_LDRS_PC_G1,             0x41)
+ELF_RELOC(R_ARM_LDRS_PC_G2,             0x42)
+ELF_RELOC(R_ARM_LDC_PC_G0,              0x43)
+ELF_RELOC(R_ARM_LDC_PC_G1,              0x44)
+ELF_RELOC(R_ARM_LDC_PC_G2,              0x45)
+ELF_RELOC(R_ARM_ALU_SB_G0_NC,           0x46)
+ELF_RELOC(R_ARM_ALU_SB_G0,              0x47)
+ELF_RELOC(R_ARM_ALU_SB_G1_NC,           0x48)
+ELF_RELOC(R_ARM_ALU_SB_G1,              0x49)
+ELF_RELOC(R_ARM_ALU_SB_G2,              0x4a)
+ELF_RELOC(R_ARM_LDR_SB_G0,              0x4b)
+ELF_RELOC(R_ARM_LDR_SB_G1,              0x4c)
+ELF_RELOC(R_ARM_LDR_SB_G2,              0x4d)
+ELF_RELOC(R_ARM_LDRS_SB_G0,             0x4e)
+ELF_RELOC(R_ARM_LDRS_SB_G1,             0x4f)
+ELF_RELOC(R_ARM_LDRS_SB_G2,             0x50)
+ELF_RELOC(R_ARM_LDC_SB_G0,              0x51)
+ELF_RELOC(R_ARM_LDC_SB_G1,              0x52)
+ELF_RELOC(R_ARM_LDC_SB_G2,              0x53)
+ELF_RELOC(R_ARM_MOVW_BREL_NC,           0x54)
+ELF_RELOC(R_ARM_MOVT_BREL,              0x55)
+ELF_RELOC(R_ARM_MOVW_BREL,              0x56)
+ELF_RELOC(R_ARM_THM_MOVW_BREL_NC,       0x57)
+ELF_RELOC(R_ARM_THM_MOVT_BREL,          0x58)
+ELF_RELOC(R_ARM_THM_MOVW_BREL,          0x59)
+ELF_RELOC(R_ARM_TLS_GOTDESC,            0x5a)
+ELF_RELOC(R_ARM_TLS_CALL,               0x5b)
+ELF_RELOC(R_ARM_TLS_DESCSEQ,            0x5c)
+ELF_RELOC(R_ARM_THM_TLS_CALL,           0x5d)
+ELF_RELOC(R_ARM_PLT32_ABS,              0x5e)
+ELF_RELOC(R_ARM_GOT_ABS,                0x5f)
+ELF_RELOC(R_ARM_GOT_PREL,               0x60)
+ELF_RELOC(R_ARM_GOT_BREL12,             0x61)
+ELF_RELOC(R_ARM_GOTOFF12,               0x62)
+ELF_RELOC(R_ARM_GOTRELAX,               0x63)
+ELF_RELOC(R_ARM_GNU_VTENTRY,            0x64)
+ELF_RELOC(R_ARM_GNU_VTINHERIT,          0x65)
+ELF_RELOC(R_ARM_THM_JUMP11,             0x66)
+ELF_RELOC(R_ARM_THM_JUMP8,              0x67)
+ELF_RELOC(R_ARM_TLS_GD32,               0x68)
+ELF_RELOC(R_ARM_TLS_LDM32,              0x69)
+ELF_RELOC(R_ARM_TLS_LDO32,              0x6a)
+ELF_RELOC(R_ARM_TLS_IE32,               0x6b)
+ELF_RELOC(R_ARM_TLS_LE32,               0x6c)
+ELF_RELOC(R_ARM_TLS_LDO12,              0x6d)
+ELF_RELOC(R_ARM_TLS_LE12,               0x6e)
+ELF_RELOC(R_ARM_TLS_IE12GP,             0x6f)
+ELF_RELOC(R_ARM_PRIVATE_0,              0x70)
+ELF_RELOC(R_ARM_PRIVATE_1,              0x71)
+ELF_RELOC(R_ARM_PRIVATE_2,              0x72)
+ELF_RELOC(R_ARM_PRIVATE_3,              0x73)
+ELF_RELOC(R_ARM_PRIVATE_4,              0x74)
+ELF_RELOC(R_ARM_PRIVATE_5,              0x75)
+ELF_RELOC(R_ARM_PRIVATE_6,              0x76)
+ELF_RELOC(R_ARM_PRIVATE_7,              0x77)
+ELF_RELOC(R_ARM_PRIVATE_8,              0x78)
+ELF_RELOC(R_ARM_PRIVATE_9,              0x79)
+ELF_RELOC(R_ARM_PRIVATE_10,             0x7a)
+ELF_RELOC(R_ARM_PRIVATE_11,             0x7b)
+ELF_RELOC(R_ARM_PRIVATE_12,             0x7c)
+ELF_RELOC(R_ARM_PRIVATE_13,             0x7d)
+ELF_RELOC(R_ARM_PRIVATE_14,             0x7e)
+ELF_RELOC(R_ARM_PRIVATE_15,             0x7f)
+ELF_RELOC(R_ARM_ME_TOO,                 0x80)
+ELF_RELOC(R_ARM_THM_TLS_DESCSEQ16,      0x81)
+ELF_RELOC(R_ARM_THM_TLS_DESCSEQ32,      0x82)
+ELF_RELOC(R_ARM_IRELATIVE,              0xa0)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AVR.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AVR.def
new file mode 100644
index 0000000..696fc60
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/AVR.def
@@ -0,0 +1,41 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_AVR_NONE,                  0)
+ELF_RELOC(R_AVR_32,                    1)
+ELF_RELOC(R_AVR_7_PCREL,               2)
+ELF_RELOC(R_AVR_13_PCREL,              3)
+ELF_RELOC(R_AVR_16,                    4)
+ELF_RELOC(R_AVR_16_PM,                 5)
+ELF_RELOC(R_AVR_LO8_LDI,               6)
+ELF_RELOC(R_AVR_HI8_LDI,               7)
+ELF_RELOC(R_AVR_HH8_LDI,               8)
+ELF_RELOC(R_AVR_LO8_LDI_NEG,           9)
+ELF_RELOC(R_AVR_HI8_LDI_NEG,          10)
+ELF_RELOC(R_AVR_HH8_LDI_NEG,          11)
+ELF_RELOC(R_AVR_LO8_LDI_PM,           12)
+ELF_RELOC(R_AVR_HI8_LDI_PM,           13)
+ELF_RELOC(R_AVR_HH8_LDI_PM,           14)
+ELF_RELOC(R_AVR_LO8_LDI_PM_NEG,       15)
+ELF_RELOC(R_AVR_HI8_LDI_PM_NEG,       16)
+ELF_RELOC(R_AVR_HH8_LDI_PM_NEG,       17)
+ELF_RELOC(R_AVR_CALL,                 18)
+ELF_RELOC(R_AVR_LDI,                  19)
+ELF_RELOC(R_AVR_6,                    20)
+ELF_RELOC(R_AVR_6_ADIW,               21)
+ELF_RELOC(R_AVR_MS8_LDI,              22)
+ELF_RELOC(R_AVR_MS8_LDI_NEG,          23)
+ELF_RELOC(R_AVR_LO8_LDI_GS,           24)
+ELF_RELOC(R_AVR_HI8_LDI_GS,           25)
+ELF_RELOC(R_AVR_8,                    26)
+ELF_RELOC(R_AVR_8_LO8,                27)
+ELF_RELOC(R_AVR_8_HI8,                28)
+ELF_RELOC(R_AVR_8_HLO8,               29)
+ELF_RELOC(R_AVR_DIFF8,                30)
+ELF_RELOC(R_AVR_DIFF16,               31)
+ELF_RELOC(R_AVR_DIFF32,               32)
+ELF_RELOC(R_AVR_LDS_STS_16,           33)
+ELF_RELOC(R_AVR_PORT6,                34)
+ELF_RELOC(R_AVR_PORT5,                35)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/BPF.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/BPF.def
new file mode 100644
index 0000000..5dd7f70
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/BPF.def
@@ -0,0 +1,8 @@
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// No relocation
+ELF_RELOC(R_BPF_NONE,        0)
+ELF_RELOC(R_BPF_64_64,       1)
+ELF_RELOC(R_BPF_64_32,      10)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Hexagon.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Hexagon.def
new file mode 100644
index 0000000..5021e2b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Hexagon.def
@@ -0,0 +1,106 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// Release 5 ABI
+ELF_RELOC(R_HEX_NONE,                0)
+ELF_RELOC(R_HEX_B22_PCREL,           1)
+ELF_RELOC(R_HEX_B15_PCREL,           2)
+ELF_RELOC(R_HEX_B7_PCREL,            3)
+ELF_RELOC(R_HEX_LO16,                4)
+ELF_RELOC(R_HEX_HI16,                5)
+ELF_RELOC(R_HEX_32,                  6)
+ELF_RELOC(R_HEX_16,                  7)
+ELF_RELOC(R_HEX_8,                   8)
+ELF_RELOC(R_HEX_GPREL16_0,           9)
+ELF_RELOC(R_HEX_GPREL16_1,           10)
+ELF_RELOC(R_HEX_GPREL16_2,           11)
+ELF_RELOC(R_HEX_GPREL16_3,           12)
+ELF_RELOC(R_HEX_HL16,                13)
+ELF_RELOC(R_HEX_B13_PCREL,           14)
+ELF_RELOC(R_HEX_B9_PCREL,            15)
+ELF_RELOC(R_HEX_B32_PCREL_X,         16)
+ELF_RELOC(R_HEX_32_6_X,              17)
+ELF_RELOC(R_HEX_B22_PCREL_X,         18)
+ELF_RELOC(R_HEX_B15_PCREL_X,         19)
+ELF_RELOC(R_HEX_B13_PCREL_X,         20)
+ELF_RELOC(R_HEX_B9_PCREL_X,          21)
+ELF_RELOC(R_HEX_B7_PCREL_X,          22)
+ELF_RELOC(R_HEX_16_X,                23)
+ELF_RELOC(R_HEX_12_X,                24)
+ELF_RELOC(R_HEX_11_X,                25)
+ELF_RELOC(R_HEX_10_X,                26)
+ELF_RELOC(R_HEX_9_X,                 27)
+ELF_RELOC(R_HEX_8_X,                 28)
+ELF_RELOC(R_HEX_7_X,                 29)
+ELF_RELOC(R_HEX_6_X,                 30)
+ELF_RELOC(R_HEX_32_PCREL,            31)
+ELF_RELOC(R_HEX_COPY,                32)
+ELF_RELOC(R_HEX_GLOB_DAT,            33)
+ELF_RELOC(R_HEX_JMP_SLOT,            34)
+ELF_RELOC(R_HEX_RELATIVE,            35)
+ELF_RELOC(R_HEX_PLT_B22_PCREL,       36)
+ELF_RELOC(R_HEX_GOTREL_LO16,         37)
+ELF_RELOC(R_HEX_GOTREL_HI16,         38)
+ELF_RELOC(R_HEX_GOTREL_32,           39)
+ELF_RELOC(R_HEX_GOT_LO16,            40)
+ELF_RELOC(R_HEX_GOT_HI16,            41)
+ELF_RELOC(R_HEX_GOT_32,              42)
+ELF_RELOC(R_HEX_GOT_16,              43)
+ELF_RELOC(R_HEX_DTPMOD_32,           44)
+ELF_RELOC(R_HEX_DTPREL_LO16,         45)
+ELF_RELOC(R_HEX_DTPREL_HI16,         46)
+ELF_RELOC(R_HEX_DTPREL_32,           47)
+ELF_RELOC(R_HEX_DTPREL_16,           48)
+ELF_RELOC(R_HEX_GD_PLT_B22_PCREL,    49)
+ELF_RELOC(R_HEX_GD_GOT_LO16,         50)
+ELF_RELOC(R_HEX_GD_GOT_HI16,         51)
+ELF_RELOC(R_HEX_GD_GOT_32,           52)
+ELF_RELOC(R_HEX_GD_GOT_16,           53)
+ELF_RELOC(R_HEX_IE_LO16,             54)
+ELF_RELOC(R_HEX_IE_HI16,             55)
+ELF_RELOC(R_HEX_IE_32,               56)
+ELF_RELOC(R_HEX_IE_GOT_LO16,         57)
+ELF_RELOC(R_HEX_IE_GOT_HI16,         58)
+ELF_RELOC(R_HEX_IE_GOT_32,           59)
+ELF_RELOC(R_HEX_IE_GOT_16,           60)
+ELF_RELOC(R_HEX_TPREL_LO16,          61)
+ELF_RELOC(R_HEX_TPREL_HI16,          62)
+ELF_RELOC(R_HEX_TPREL_32,            63)
+ELF_RELOC(R_HEX_TPREL_16,            64)
+ELF_RELOC(R_HEX_6_PCREL_X,           65)
+ELF_RELOC(R_HEX_GOTREL_32_6_X,       66)
+ELF_RELOC(R_HEX_GOTREL_16_X,         67)
+ELF_RELOC(R_HEX_GOTREL_11_X,         68)
+ELF_RELOC(R_HEX_GOT_32_6_X,          69)
+ELF_RELOC(R_HEX_GOT_16_X,            70)
+ELF_RELOC(R_HEX_GOT_11_X,            71)
+ELF_RELOC(R_HEX_DTPREL_32_6_X,       72)
+ELF_RELOC(R_HEX_DTPREL_16_X,         73)
+ELF_RELOC(R_HEX_DTPREL_11_X,         74)
+ELF_RELOC(R_HEX_GD_GOT_32_6_X,       75)
+ELF_RELOC(R_HEX_GD_GOT_16_X,         76)
+ELF_RELOC(R_HEX_GD_GOT_11_X,         77)
+ELF_RELOC(R_HEX_IE_32_6_X,           78)
+ELF_RELOC(R_HEX_IE_16_X,             79)
+ELF_RELOC(R_HEX_IE_GOT_32_6_X,       80)
+ELF_RELOC(R_HEX_IE_GOT_16_X,         81)
+ELF_RELOC(R_HEX_IE_GOT_11_X,         82)
+ELF_RELOC(R_HEX_TPREL_32_6_X,        83)
+ELF_RELOC(R_HEX_TPREL_16_X,          84)
+ELF_RELOC(R_HEX_TPREL_11_X,          85)
+ELF_RELOC(R_HEX_LD_PLT_B22_PCREL,    86)
+ELF_RELOC(R_HEX_LD_GOT_LO16,         87)
+ELF_RELOC(R_HEX_LD_GOT_HI16,         88)
+ELF_RELOC(R_HEX_LD_GOT_32,           89)
+ELF_RELOC(R_HEX_LD_GOT_16,           90)
+ELF_RELOC(R_HEX_LD_GOT_32_6_X,       91)
+ELF_RELOC(R_HEX_LD_GOT_16_X,         92)
+ELF_RELOC(R_HEX_LD_GOT_11_X,         93)
+ELF_RELOC(R_HEX_23_REG,              94)
+ELF_RELOC(R_HEX_GD_PLT_B22_PCREL_X,  95)
+ELF_RELOC(R_HEX_GD_PLT_B32_PCREL_X,  96)
+ELF_RELOC(R_HEX_LD_PLT_B22_PCREL_X,  97)
+ELF_RELOC(R_HEX_LD_PLT_B32_PCREL_X,  98)
+ELF_RELOC(R_HEX_27_REG,              99)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Lanai.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Lanai.def
new file mode 100644
index 0000000..77ecb04
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Lanai.def
@@ -0,0 +1,19 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// No relocation
+ELF_RELOC(R_LANAI_NONE,        0)
+// 21-bit symbol relocation
+ELF_RELOC(R_LANAI_21,          1)
+// 21-bit symbol relocation with last two bits masked to 0
+ELF_RELOC(R_LANAI_21_F,        2)
+// 25-bit branch targets
+ELF_RELOC(R_LANAI_25,          3)
+// General 32-bit relocation
+ELF_RELOC(R_LANAI_32,          4)
+// Upper 16-bits of a symbolic relocation
+ELF_RELOC(R_LANAI_HI16,        5)
+// Lower 16-bits of a symbolic relocation
+ELF_RELOC(R_LANAI_LO16,        6)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Mips.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Mips.def
new file mode 100644
index 0000000..bc0088d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Mips.def
@@ -0,0 +1,117 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_MIPS_NONE,                0)
+ELF_RELOC(R_MIPS_16,                  1)
+ELF_RELOC(R_MIPS_32,                  2)
+ELF_RELOC(R_MIPS_REL32,               3)
+ELF_RELOC(R_MIPS_26,                  4)
+ELF_RELOC(R_MIPS_HI16,                5)
+ELF_RELOC(R_MIPS_LO16,                6)
+ELF_RELOC(R_MIPS_GPREL16,             7)
+ELF_RELOC(R_MIPS_LITERAL,             8)
+ELF_RELOC(R_MIPS_GOT16,               9)
+ELF_RELOC(R_MIPS_PC16,               10)
+ELF_RELOC(R_MIPS_CALL16,             11)
+ELF_RELOC(R_MIPS_GPREL32,            12)
+ELF_RELOC(R_MIPS_UNUSED1,            13)
+ELF_RELOC(R_MIPS_UNUSED2,            14)
+ELF_RELOC(R_MIPS_UNUSED3,            15)
+ELF_RELOC(R_MIPS_SHIFT5,             16)
+ELF_RELOC(R_MIPS_SHIFT6,             17)
+ELF_RELOC(R_MIPS_64,                 18)
+ELF_RELOC(R_MIPS_GOT_DISP,           19)
+ELF_RELOC(R_MIPS_GOT_PAGE,           20)
+ELF_RELOC(R_MIPS_GOT_OFST,           21)
+ELF_RELOC(R_MIPS_GOT_HI16,           22)
+ELF_RELOC(R_MIPS_GOT_LO16,           23)
+ELF_RELOC(R_MIPS_SUB,                24)
+ELF_RELOC(R_MIPS_INSERT_A,           25)
+ELF_RELOC(R_MIPS_INSERT_B,           26)
+ELF_RELOC(R_MIPS_DELETE,             27)
+ELF_RELOC(R_MIPS_HIGHER,             28)
+ELF_RELOC(R_MIPS_HIGHEST,            29)
+ELF_RELOC(R_MIPS_CALL_HI16,          30)
+ELF_RELOC(R_MIPS_CALL_LO16,          31)
+ELF_RELOC(R_MIPS_SCN_DISP,           32)
+ELF_RELOC(R_MIPS_REL16,              33)
+ELF_RELOC(R_MIPS_ADD_IMMEDIATE,      34)
+ELF_RELOC(R_MIPS_PJUMP,              35)
+ELF_RELOC(R_MIPS_RELGOT,             36)
+ELF_RELOC(R_MIPS_JALR,               37)
+ELF_RELOC(R_MIPS_TLS_DTPMOD32,       38)
+ELF_RELOC(R_MIPS_TLS_DTPREL32,       39)
+ELF_RELOC(R_MIPS_TLS_DTPMOD64,       40)
+ELF_RELOC(R_MIPS_TLS_DTPREL64,       41)
+ELF_RELOC(R_MIPS_TLS_GD,             42)
+ELF_RELOC(R_MIPS_TLS_LDM,            43)
+ELF_RELOC(R_MIPS_TLS_DTPREL_HI16,    44)
+ELF_RELOC(R_MIPS_TLS_DTPREL_LO16,    45)
+ELF_RELOC(R_MIPS_TLS_GOTTPREL,       46)
+ELF_RELOC(R_MIPS_TLS_TPREL32,        47)
+ELF_RELOC(R_MIPS_TLS_TPREL64,        48)
+ELF_RELOC(R_MIPS_TLS_TPREL_HI16,     49)
+ELF_RELOC(R_MIPS_TLS_TPREL_LO16,     50)
+ELF_RELOC(R_MIPS_GLOB_DAT,           51)
+ELF_RELOC(R_MIPS_PC21_S2,            60)
+ELF_RELOC(R_MIPS_PC26_S2,            61)
+ELF_RELOC(R_MIPS_PC18_S3,            62)
+ELF_RELOC(R_MIPS_PC19_S2,            63)
+ELF_RELOC(R_MIPS_PCHI16,             64)
+ELF_RELOC(R_MIPS_PCLO16,             65)
+ELF_RELOC(R_MIPS16_26,               100)
+ELF_RELOC(R_MIPS16_GPREL,            101)
+ELF_RELOC(R_MIPS16_GOT16,            102)
+ELF_RELOC(R_MIPS16_CALL16,           103)
+ELF_RELOC(R_MIPS16_HI16,             104)
+ELF_RELOC(R_MIPS16_LO16,             105)
+ELF_RELOC(R_MIPS16_TLS_GD,           106)
+ELF_RELOC(R_MIPS16_TLS_LDM,          107)
+ELF_RELOC(R_MIPS16_TLS_DTPREL_HI16,  108)
+ELF_RELOC(R_MIPS16_TLS_DTPREL_LO16,  109)
+ELF_RELOC(R_MIPS16_TLS_GOTTPREL,     110)
+ELF_RELOC(R_MIPS16_TLS_TPREL_HI16,   111)
+ELF_RELOC(R_MIPS16_TLS_TPREL_LO16,   112)
+ELF_RELOC(R_MIPS_COPY,               126)
+ELF_RELOC(R_MIPS_JUMP_SLOT,          127)
+ELF_RELOC(R_MICROMIPS_26_S1,         133)
+ELF_RELOC(R_MICROMIPS_HI16,          134)
+ELF_RELOC(R_MICROMIPS_LO16,          135)
+ELF_RELOC(R_MICROMIPS_GPREL16,       136)
+ELF_RELOC(R_MICROMIPS_LITERAL,       137)
+ELF_RELOC(R_MICROMIPS_GOT16,         138)
+ELF_RELOC(R_MICROMIPS_PC7_S1,        139)
+ELF_RELOC(R_MICROMIPS_PC10_S1,       140)
+ELF_RELOC(R_MICROMIPS_PC16_S1,       141)
+ELF_RELOC(R_MICROMIPS_CALL16,        142)
+ELF_RELOC(R_MICROMIPS_GOT_DISP,      145)
+ELF_RELOC(R_MICROMIPS_GOT_PAGE,      146)
+ELF_RELOC(R_MICROMIPS_GOT_OFST,      147)
+ELF_RELOC(R_MICROMIPS_GOT_HI16,      148)
+ELF_RELOC(R_MICROMIPS_GOT_LO16,      149)
+ELF_RELOC(R_MICROMIPS_SUB,           150)
+ELF_RELOC(R_MICROMIPS_HIGHER,        151)
+ELF_RELOC(R_MICROMIPS_HIGHEST,       152)
+ELF_RELOC(R_MICROMIPS_CALL_HI16,     153)
+ELF_RELOC(R_MICROMIPS_CALL_LO16,     154)
+ELF_RELOC(R_MICROMIPS_SCN_DISP,      155)
+ELF_RELOC(R_MICROMIPS_JALR,          156)
+ELF_RELOC(R_MICROMIPS_HI0_LO16,      157)
+ELF_RELOC(R_MICROMIPS_TLS_GD,           162)
+ELF_RELOC(R_MICROMIPS_TLS_LDM,          163)
+ELF_RELOC(R_MICROMIPS_TLS_DTPREL_HI16,  164)
+ELF_RELOC(R_MICROMIPS_TLS_DTPREL_LO16,  165)
+ELF_RELOC(R_MICROMIPS_TLS_GOTTPREL,     166)
+ELF_RELOC(R_MICROMIPS_TLS_TPREL_HI16,   169)
+ELF_RELOC(R_MICROMIPS_TLS_TPREL_LO16,   170)
+ELF_RELOC(R_MICROMIPS_GPREL7_S2,        172)
+ELF_RELOC(R_MICROMIPS_PC23_S2,          173)
+ELF_RELOC(R_MICROMIPS_PC21_S1,          174)
+ELF_RELOC(R_MICROMIPS_PC26_S1,          175)
+ELF_RELOC(R_MICROMIPS_PC18_S3,          176)
+ELF_RELOC(R_MICROMIPS_PC19_S2,          177)
+ELF_RELOC(R_MIPS_NUM,                218)
+ELF_RELOC(R_MIPS_PC32,               248)
+ELF_RELOC(R_MIPS_EH,                 249)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/PowerPC.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/PowerPC.def
new file mode 100644
index 0000000..e4f8ee0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/PowerPC.def
@@ -0,0 +1,123 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// glibc's PowerPC asm/sigcontext.h, when compiling for PPC64, has the
+// unfortunate behavior of including asm/elf.h, which defines R_PPC_NONE, etc.
+// to their corresponding integer values. As a result, we need to undef them
+// here before continuing.
+
+#undef R_PPC_NONE
+#undef R_PPC_ADDR32
+#undef R_PPC_ADDR24
+#undef R_PPC_ADDR16
+#undef R_PPC_ADDR16_LO
+#undef R_PPC_ADDR16_HI
+#undef R_PPC_ADDR16_HA
+#undef R_PPC_ADDR14
+#undef R_PPC_ADDR14_BRTAKEN
+#undef R_PPC_ADDR14_BRNTAKEN
+#undef R_PPC_REL24
+#undef R_PPC_REL14
+#undef R_PPC_REL14_BRTAKEN
+#undef R_PPC_REL14_BRNTAKEN
+#undef R_PPC_GOT16
+#undef R_PPC_GOT16_LO
+#undef R_PPC_GOT16_HI
+#undef R_PPC_GOT16_HA
+#undef R_PPC_PLTREL24
+#undef R_PPC_JMP_SLOT
+#undef R_PPC_LOCAL24PC
+#undef R_PPC_REL32
+#undef R_PPC_TLS
+#undef R_PPC_DTPMOD32
+#undef R_PPC_TPREL16
+#undef R_PPC_TPREL16_LO
+#undef R_PPC_TPREL16_HI
+#undef R_PPC_TPREL16_HA
+#undef R_PPC_TPREL32
+#undef R_PPC_DTPREL16
+#undef R_PPC_DTPREL16_LO
+#undef R_PPC_DTPREL16_HI
+#undef R_PPC_DTPREL16_HA
+#undef R_PPC_DTPREL32
+#undef R_PPC_GOT_TLSGD16
+#undef R_PPC_GOT_TLSGD16_LO
+#undef R_PPC_GOT_TLSGD16_HI
+#undef R_PPC_GOT_TLSGD16_HA
+#undef R_PPC_GOT_TLSLD16
+#undef R_PPC_GOT_TLSLD16_LO
+#undef R_PPC_GOT_TLSLD16_HI
+#undef R_PPC_GOT_TLSLD16_HA
+#undef R_PPC_GOT_TPREL16
+#undef R_PPC_GOT_TPREL16_LO
+#undef R_PPC_GOT_TPREL16_HI
+#undef R_PPC_GOT_TPREL16_HA
+#undef R_PPC_GOT_DTPREL16
+#undef R_PPC_GOT_DTPREL16_LO
+#undef R_PPC_GOT_DTPREL16_HI
+#undef R_PPC_GOT_DTPREL16_HA
+#undef R_PPC_TLSGD
+#undef R_PPC_TLSLD
+#undef R_PPC_REL16
+#undef R_PPC_REL16_LO
+#undef R_PPC_REL16_HI
+#undef R_PPC_REL16_HA
+
+ELF_RELOC(R_PPC_NONE,                   0)      /* No relocation. */
+ELF_RELOC(R_PPC_ADDR32,                 1)
+ELF_RELOC(R_PPC_ADDR24,                 2)
+ELF_RELOC(R_PPC_ADDR16,                 3)
+ELF_RELOC(R_PPC_ADDR16_LO,              4)
+ELF_RELOC(R_PPC_ADDR16_HI,              5)
+ELF_RELOC(R_PPC_ADDR16_HA,              6)
+ELF_RELOC(R_PPC_ADDR14,                 7)
+ELF_RELOC(R_PPC_ADDR14_BRTAKEN,         8)
+ELF_RELOC(R_PPC_ADDR14_BRNTAKEN,        9)
+ELF_RELOC(R_PPC_REL24,                  10)
+ELF_RELOC(R_PPC_REL14,                  11)
+ELF_RELOC(R_PPC_REL14_BRTAKEN,          12)
+ELF_RELOC(R_PPC_REL14_BRNTAKEN,         13)
+ELF_RELOC(R_PPC_GOT16,                  14)
+ELF_RELOC(R_PPC_GOT16_LO,               15)
+ELF_RELOC(R_PPC_GOT16_HI,               16)
+ELF_RELOC(R_PPC_GOT16_HA,               17)
+ELF_RELOC(R_PPC_PLTREL24,               18)
+ELF_RELOC(R_PPC_JMP_SLOT,               21)
+ELF_RELOC(R_PPC_LOCAL24PC,              23)
+ELF_RELOC(R_PPC_REL32,                  26)
+ELF_RELOC(R_PPC_TLS,                    67)
+ELF_RELOC(R_PPC_DTPMOD32,               68)
+ELF_RELOC(R_PPC_TPREL16,                69)
+ELF_RELOC(R_PPC_TPREL16_LO,             70)
+ELF_RELOC(R_PPC_TPREL16_HI,             71)
+ELF_RELOC(R_PPC_TPREL16_HA,             72)
+ELF_RELOC(R_PPC_TPREL32,                73)
+ELF_RELOC(R_PPC_DTPREL16,               74)
+ELF_RELOC(R_PPC_DTPREL16_LO,            75)
+ELF_RELOC(R_PPC_DTPREL16_HI,            76)
+ELF_RELOC(R_PPC_DTPREL16_HA,            77)
+ELF_RELOC(R_PPC_DTPREL32,               78)
+ELF_RELOC(R_PPC_GOT_TLSGD16,            79)
+ELF_RELOC(R_PPC_GOT_TLSGD16_LO,         80)
+ELF_RELOC(R_PPC_GOT_TLSGD16_HI,         81)
+ELF_RELOC(R_PPC_GOT_TLSGD16_HA,         82)
+ELF_RELOC(R_PPC_GOT_TLSLD16,            83)
+ELF_RELOC(R_PPC_GOT_TLSLD16_LO,         84)
+ELF_RELOC(R_PPC_GOT_TLSLD16_HI,         85)
+ELF_RELOC(R_PPC_GOT_TLSLD16_HA,         86)
+ELF_RELOC(R_PPC_GOT_TPREL16,            87)
+ELF_RELOC(R_PPC_GOT_TPREL16_LO,         88)
+ELF_RELOC(R_PPC_GOT_TPREL16_HI,         89)
+ELF_RELOC(R_PPC_GOT_TPREL16_HA,         90)
+ELF_RELOC(R_PPC_GOT_DTPREL16,           91)
+ELF_RELOC(R_PPC_GOT_DTPREL16_LO,        92)
+ELF_RELOC(R_PPC_GOT_DTPREL16_HI,        93)
+ELF_RELOC(R_PPC_GOT_DTPREL16_HA,        94)
+ELF_RELOC(R_PPC_TLSGD,                  95)
+ELF_RELOC(R_PPC_TLSLD,                  96)
+ELF_RELOC(R_PPC_REL16,                  249)
+ELF_RELOC(R_PPC_REL16_LO,               250)
+ELF_RELOC(R_PPC_REL16_HI,               251)
+ELF_RELOC(R_PPC_REL16_HA,               252)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/PowerPC64.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/PowerPC64.def
new file mode 100644
index 0000000..3a47c5a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/PowerPC64.def
@@ -0,0 +1,181 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// glibc's PowerPC asm/sigcontext.h, when compiling for PPC64, has the
+// unfortunate behavior of including asm/elf.h, which defines R_PPC_NONE, etc.
+// to their corresponding integer values. As a result, we need to undef them
+// here before continuing.
+
+#undef R_PPC64_NONE
+#undef R_PPC64_ADDR32
+#undef R_PPC64_ADDR24
+#undef R_PPC64_ADDR16
+#undef R_PPC64_ADDR16_LO
+#undef R_PPC64_ADDR16_HI
+#undef R_PPC64_ADDR16_HA
+#undef R_PPC64_ADDR14
+#undef R_PPC64_ADDR14_BRTAKEN
+#undef R_PPC64_ADDR14_BRNTAKEN
+#undef R_PPC64_REL24
+#undef R_PPC64_REL14
+#undef R_PPC64_REL14_BRTAKEN
+#undef R_PPC64_REL14_BRNTAKEN
+#undef R_PPC64_GOT16
+#undef R_PPC64_GOT16_LO
+#undef R_PPC64_GOT16_HI
+#undef R_PPC64_GOT16_HA
+#undef R_PPC64_GLOB_DAT
+#undef R_PPC64_JMP_SLOT
+#undef R_PPC64_RELATIVE
+#undef R_PPC64_REL32
+#undef R_PPC64_ADDR64
+#undef R_PPC64_ADDR16_HIGHER
+#undef R_PPC64_ADDR16_HIGHERA
+#undef R_PPC64_ADDR16_HIGHEST
+#undef R_PPC64_ADDR16_HIGHESTA
+#undef R_PPC64_REL64
+#undef R_PPC64_TOC16
+#undef R_PPC64_TOC16_LO
+#undef R_PPC64_TOC16_HI
+#undef R_PPC64_TOC16_HA
+#undef R_PPC64_TOC
+#undef R_PPC64_ADDR16_DS
+#undef R_PPC64_ADDR16_LO_DS
+#undef R_PPC64_GOT16_DS
+#undef R_PPC64_GOT16_LO_DS
+#undef R_PPC64_TOC16_DS
+#undef R_PPC64_TOC16_LO_DS
+#undef R_PPC64_TLS
+#undef R_PPC64_DTPMOD64
+#undef R_PPC64_TPREL16
+#undef R_PPC64_TPREL16_LO
+#undef R_PPC64_TPREL16_HI
+#undef R_PPC64_TPREL16_HA
+#undef R_PPC64_TPREL64
+#undef R_PPC64_DTPREL16
+#undef R_PPC64_DTPREL16_LO
+#undef R_PPC64_DTPREL16_HI
+#undef R_PPC64_DTPREL16_HA
+#undef R_PPC64_DTPREL64
+#undef R_PPC64_GOT_TLSGD16
+#undef R_PPC64_GOT_TLSGD16_LO
+#undef R_PPC64_GOT_TLSGD16_HI
+#undef R_PPC64_GOT_TLSGD16_HA
+#undef R_PPC64_GOT_TLSLD16
+#undef R_PPC64_GOT_TLSLD16_LO
+#undef R_PPC64_GOT_TLSLD16_HI
+#undef R_PPC64_GOT_TLSLD16_HA
+#undef R_PPC64_GOT_TPREL16_DS
+#undef R_PPC64_GOT_TPREL16_LO_DS
+#undef R_PPC64_GOT_TPREL16_HI
+#undef R_PPC64_GOT_TPREL16_HA
+#undef R_PPC64_GOT_DTPREL16_DS
+#undef R_PPC64_GOT_DTPREL16_LO_DS
+#undef R_PPC64_GOT_DTPREL16_HI
+#undef R_PPC64_GOT_DTPREL16_HA
+#undef R_PPC64_TPREL16_DS
+#undef R_PPC64_TPREL16_LO_DS
+#undef R_PPC64_TPREL16_HIGHER
+#undef R_PPC64_TPREL16_HIGHERA
+#undef R_PPC64_TPREL16_HIGHEST
+#undef R_PPC64_TPREL16_HIGHESTA
+#undef R_PPC64_DTPREL16_DS
+#undef R_PPC64_DTPREL16_LO_DS
+#undef R_PPC64_DTPREL16_HIGHER
+#undef R_PPC64_DTPREL16_HIGHERA
+#undef R_PPC64_DTPREL16_HIGHEST
+#undef R_PPC64_DTPREL16_HIGHESTA
+#undef R_PPC64_TLSGD
+#undef R_PPC64_TLSLD
+#undef R_PPC64_REL16
+#undef R_PPC64_REL16_LO
+#undef R_PPC64_REL16_HI
+#undef R_PPC64_REL16_HA
+
+ELF_RELOC(R_PPC64_NONE,                 0)
+ELF_RELOC(R_PPC64_ADDR32,               1)
+ELF_RELOC(R_PPC64_ADDR24,               2)
+ELF_RELOC(R_PPC64_ADDR16,               3)
+ELF_RELOC(R_PPC64_ADDR16_LO,            4)
+ELF_RELOC(R_PPC64_ADDR16_HI,            5)
+ELF_RELOC(R_PPC64_ADDR16_HA,            6)
+ELF_RELOC(R_PPC64_ADDR14,               7)
+ELF_RELOC(R_PPC64_ADDR14_BRTAKEN,       8)
+ELF_RELOC(R_PPC64_ADDR14_BRNTAKEN,      9)
+ELF_RELOC(R_PPC64_REL24,                10)
+ELF_RELOC(R_PPC64_REL14,                11)
+ELF_RELOC(R_PPC64_REL14_BRTAKEN,        12)
+ELF_RELOC(R_PPC64_REL14_BRNTAKEN,       13)
+ELF_RELOC(R_PPC64_GOT16,                14)
+ELF_RELOC(R_PPC64_GOT16_LO,             15)
+ELF_RELOC(R_PPC64_GOT16_HI,             16)
+ELF_RELOC(R_PPC64_GOT16_HA,             17)
+ELF_RELOC(R_PPC64_GLOB_DAT,             20)
+ELF_RELOC(R_PPC64_JMP_SLOT,             21)
+ELF_RELOC(R_PPC64_RELATIVE,             22)
+ELF_RELOC(R_PPC64_REL32,                26)
+ELF_RELOC(R_PPC64_ADDR64,               38)
+ELF_RELOC(R_PPC64_ADDR16_HIGHER,        39)
+ELF_RELOC(R_PPC64_ADDR16_HIGHERA,       40)
+ELF_RELOC(R_PPC64_ADDR16_HIGHEST,       41)
+ELF_RELOC(R_PPC64_ADDR16_HIGHESTA,      42)
+ELF_RELOC(R_PPC64_REL64,                44)
+ELF_RELOC(R_PPC64_TOC16,                47)
+ELF_RELOC(R_PPC64_TOC16_LO,             48)
+ELF_RELOC(R_PPC64_TOC16_HI,             49)
+ELF_RELOC(R_PPC64_TOC16_HA,             50)
+ELF_RELOC(R_PPC64_TOC,                  51)
+ELF_RELOC(R_PPC64_ADDR16_DS,            56)
+ELF_RELOC(R_PPC64_ADDR16_LO_DS,         57)
+ELF_RELOC(R_PPC64_GOT16_DS,             58)
+ELF_RELOC(R_PPC64_GOT16_LO_DS,          59)
+ELF_RELOC(R_PPC64_TOC16_DS,             63)
+ELF_RELOC(R_PPC64_TOC16_LO_DS,          64)
+ELF_RELOC(R_PPC64_TLS,                  67)
+ELF_RELOC(R_PPC64_DTPMOD64,             68)
+ELF_RELOC(R_PPC64_TPREL16,              69)
+ELF_RELOC(R_PPC64_TPREL16_LO,           70)
+ELF_RELOC(R_PPC64_TPREL16_HI,           71)
+ELF_RELOC(R_PPC64_TPREL16_HA,           72)
+ELF_RELOC(R_PPC64_TPREL64,              73)
+ELF_RELOC(R_PPC64_DTPREL16,             74)
+ELF_RELOC(R_PPC64_DTPREL16_LO,          75)
+ELF_RELOC(R_PPC64_DTPREL16_HI,          76)
+ELF_RELOC(R_PPC64_DTPREL16_HA,          77)
+ELF_RELOC(R_PPC64_DTPREL64,             78)
+ELF_RELOC(R_PPC64_GOT_TLSGD16,          79)
+ELF_RELOC(R_PPC64_GOT_TLSGD16_LO,       80)
+ELF_RELOC(R_PPC64_GOT_TLSGD16_HI,       81)
+ELF_RELOC(R_PPC64_GOT_TLSGD16_HA,       82)
+ELF_RELOC(R_PPC64_GOT_TLSLD16,          83)
+ELF_RELOC(R_PPC64_GOT_TLSLD16_LO,       84)
+ELF_RELOC(R_PPC64_GOT_TLSLD16_HI,       85)
+ELF_RELOC(R_PPC64_GOT_TLSLD16_HA,       86)
+ELF_RELOC(R_PPC64_GOT_TPREL16_DS,       87)
+ELF_RELOC(R_PPC64_GOT_TPREL16_LO_DS,    88)
+ELF_RELOC(R_PPC64_GOT_TPREL16_HI,       89)
+ELF_RELOC(R_PPC64_GOT_TPREL16_HA,       90)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_DS,      91)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_LO_DS,   92)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_HI,      93)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_HA,      94)
+ELF_RELOC(R_PPC64_TPREL16_DS,           95)
+ELF_RELOC(R_PPC64_TPREL16_LO_DS,        96)
+ELF_RELOC(R_PPC64_TPREL16_HIGHER,       97)
+ELF_RELOC(R_PPC64_TPREL16_HIGHERA,      98)
+ELF_RELOC(R_PPC64_TPREL16_HIGHEST,      99)
+ELF_RELOC(R_PPC64_TPREL16_HIGHESTA,     100)
+ELF_RELOC(R_PPC64_DTPREL16_DS,          101)
+ELF_RELOC(R_PPC64_DTPREL16_LO_DS,       102)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHER,      103)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHERA,     104)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHEST,     105)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHESTA,    106)
+ELF_RELOC(R_PPC64_TLSGD,                107)
+ELF_RELOC(R_PPC64_TLSLD,                108)
+ELF_RELOC(R_PPC64_REL16,                249)
+ELF_RELOC(R_PPC64_REL16_LO,             250)
+ELF_RELOC(R_PPC64_REL16_HI,             251)
+ELF_RELOC(R_PPC64_REL16_HA,             252)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/RISCV.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/RISCV.def
new file mode 100644
index 0000000..5cc4c0e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/RISCV.def
@@ -0,0 +1,59 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_RISCV_NONE,               0)
+ELF_RELOC(R_RISCV_32,                 1)
+ELF_RELOC(R_RISCV_64,                 2)
+ELF_RELOC(R_RISCV_RELATIVE,           3)
+ELF_RELOC(R_RISCV_COPY,               4)
+ELF_RELOC(R_RISCV_JUMP_SLOT,          5)
+ELF_RELOC(R_RISCV_TLS_DTPMOD32,       6)
+ELF_RELOC(R_RISCV_TLS_DTPMOD64,       7)
+ELF_RELOC(R_RISCV_TLS_DTPREL32,       8)
+ELF_RELOC(R_RISCV_TLS_DTPREL64,       9)
+ELF_RELOC(R_RISCV_TLS_TPREL32,       10)
+ELF_RELOC(R_RISCV_TLS_TPREL64,       11)
+ELF_RELOC(R_RISCV_BRANCH,            16)
+ELF_RELOC(R_RISCV_JAL,               17)
+ELF_RELOC(R_RISCV_CALL,              18)
+ELF_RELOC(R_RISCV_CALL_PLT,          19)
+ELF_RELOC(R_RISCV_GOT_HI20,          20)
+ELF_RELOC(R_RISCV_TLS_GOT_HI20,      21)
+ELF_RELOC(R_RISCV_TLS_GD_HI20,       22)
+ELF_RELOC(R_RISCV_PCREL_HI20,        23)
+ELF_RELOC(R_RISCV_PCREL_LO12_I,      24)
+ELF_RELOC(R_RISCV_PCREL_LO12_S,      25)
+ELF_RELOC(R_RISCV_HI20,              26)
+ELF_RELOC(R_RISCV_LO12_I,            27)
+ELF_RELOC(R_RISCV_LO12_S,            28)
+ELF_RELOC(R_RISCV_TPREL_HI20,        29)
+ELF_RELOC(R_RISCV_TPREL_LO12_I,      30)
+ELF_RELOC(R_RISCV_TPREL_LO12_S,      31)
+ELF_RELOC(R_RISCV_TPREL_ADD,         32)
+ELF_RELOC(R_RISCV_ADD8,              33)
+ELF_RELOC(R_RISCV_ADD16,             34)
+ELF_RELOC(R_RISCV_ADD32,             35)
+ELF_RELOC(R_RISCV_ADD64,             36)
+ELF_RELOC(R_RISCV_SUB8,              37)
+ELF_RELOC(R_RISCV_SUB16,             38)
+ELF_RELOC(R_RISCV_SUB32,             39)
+ELF_RELOC(R_RISCV_SUB64,             40)
+ELF_RELOC(R_RISCV_GNU_VTINHERIT,     41)
+ELF_RELOC(R_RISCV_GNU_VTENTRY,       42)
+ELF_RELOC(R_RISCV_ALIGN,             43)
+ELF_RELOC(R_RISCV_RVC_BRANCH,        44)
+ELF_RELOC(R_RISCV_RVC_JUMP,          45)
+ELF_RELOC(R_RISCV_RVC_LUI,           46)
+ELF_RELOC(R_RISCV_GPREL_I,           47)
+ELF_RELOC(R_RISCV_GPREL_S,           48)
+ELF_RELOC(R_RISCV_TPREL_I,           49)
+ELF_RELOC(R_RISCV_TPREL_S,           50)
+ELF_RELOC(R_RISCV_RELAX,             51)
+ELF_RELOC(R_RISCV_SUB6,              52)
+ELF_RELOC(R_RISCV_SET6,              53)
+ELF_RELOC(R_RISCV_SET8,              54)
+ELF_RELOC(R_RISCV_SET16,             55)
+ELF_RELOC(R_RISCV_SET32,             56)
+ELF_RELOC(R_RISCV_32_PCREL,          57)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Sparc.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Sparc.def
new file mode 100644
index 0000000..7e01a4a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/Sparc.def
@@ -0,0 +1,89 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_SPARC_NONE,         0)
+ELF_RELOC(R_SPARC_8,            1)
+ELF_RELOC(R_SPARC_16,           2)
+ELF_RELOC(R_SPARC_32,           3)
+ELF_RELOC(R_SPARC_DISP8,        4)
+ELF_RELOC(R_SPARC_DISP16,       5)
+ELF_RELOC(R_SPARC_DISP32,       6)
+ELF_RELOC(R_SPARC_WDISP30,      7)
+ELF_RELOC(R_SPARC_WDISP22,      8)
+ELF_RELOC(R_SPARC_HI22,         9)
+ELF_RELOC(R_SPARC_22,           10)
+ELF_RELOC(R_SPARC_13,           11)
+ELF_RELOC(R_SPARC_LO10,         12)
+ELF_RELOC(R_SPARC_GOT10,        13)
+ELF_RELOC(R_SPARC_GOT13,        14)
+ELF_RELOC(R_SPARC_GOT22,        15)
+ELF_RELOC(R_SPARC_PC10,         16)
+ELF_RELOC(R_SPARC_PC22,         17)
+ELF_RELOC(R_SPARC_WPLT30,       18)
+ELF_RELOC(R_SPARC_COPY,         19)
+ELF_RELOC(R_SPARC_GLOB_DAT,     20)
+ELF_RELOC(R_SPARC_JMP_SLOT,     21)
+ELF_RELOC(R_SPARC_RELATIVE,     22)
+ELF_RELOC(R_SPARC_UA32,         23)
+ELF_RELOC(R_SPARC_PLT32,        24)
+ELF_RELOC(R_SPARC_HIPLT22,      25)
+ELF_RELOC(R_SPARC_LOPLT10,      26)
+ELF_RELOC(R_SPARC_PCPLT32,      27)
+ELF_RELOC(R_SPARC_PCPLT22,      28)
+ELF_RELOC(R_SPARC_PCPLT10,      29)
+ELF_RELOC(R_SPARC_10,           30)
+ELF_RELOC(R_SPARC_11,           31)
+ELF_RELOC(R_SPARC_64,           32)
+ELF_RELOC(R_SPARC_OLO10,        33)
+ELF_RELOC(R_SPARC_HH22,         34)
+ELF_RELOC(R_SPARC_HM10,         35)
+ELF_RELOC(R_SPARC_LM22,         36)
+ELF_RELOC(R_SPARC_PC_HH22,      37)
+ELF_RELOC(R_SPARC_PC_HM10,      38)
+ELF_RELOC(R_SPARC_PC_LM22,      39)
+ELF_RELOC(R_SPARC_WDISP16,      40)
+ELF_RELOC(R_SPARC_WDISP19,      41)
+ELF_RELOC(R_SPARC_7,            43)
+ELF_RELOC(R_SPARC_5,            44)
+ELF_RELOC(R_SPARC_6,            45)
+ELF_RELOC(R_SPARC_DISP64,       46)
+ELF_RELOC(R_SPARC_PLT64,        47)
+ELF_RELOC(R_SPARC_HIX22,        48)
+ELF_RELOC(R_SPARC_LOX10,        49)
+ELF_RELOC(R_SPARC_H44,          50)
+ELF_RELOC(R_SPARC_M44,          51)
+ELF_RELOC(R_SPARC_L44,          52)
+ELF_RELOC(R_SPARC_REGISTER,     53)
+ELF_RELOC(R_SPARC_UA64,         54)
+ELF_RELOC(R_SPARC_UA16,         55)
+ELF_RELOC(R_SPARC_TLS_GD_HI22,    56)
+ELF_RELOC(R_SPARC_TLS_GD_LO10,    57)
+ELF_RELOC(R_SPARC_TLS_GD_ADD,     58)
+ELF_RELOC(R_SPARC_TLS_GD_CALL,    59)
+ELF_RELOC(R_SPARC_TLS_LDM_HI22,   60)
+ELF_RELOC(R_SPARC_TLS_LDM_LO10,   61)
+ELF_RELOC(R_SPARC_TLS_LDM_ADD,    62)
+ELF_RELOC(R_SPARC_TLS_LDM_CALL,   63)
+ELF_RELOC(R_SPARC_TLS_LDO_HIX22,  64)
+ELF_RELOC(R_SPARC_TLS_LDO_LOX10,  65)
+ELF_RELOC(R_SPARC_TLS_LDO_ADD,    66)
+ELF_RELOC(R_SPARC_TLS_IE_HI22,    67)
+ELF_RELOC(R_SPARC_TLS_IE_LO10,    68)
+ELF_RELOC(R_SPARC_TLS_IE_LD,      69)
+ELF_RELOC(R_SPARC_TLS_IE_LDX,     70)
+ELF_RELOC(R_SPARC_TLS_IE_ADD,     71)
+ELF_RELOC(R_SPARC_TLS_LE_HIX22,   72)
+ELF_RELOC(R_SPARC_TLS_LE_LOX10,   73)
+ELF_RELOC(R_SPARC_TLS_DTPMOD32,   74)
+ELF_RELOC(R_SPARC_TLS_DTPMOD64,   75)
+ELF_RELOC(R_SPARC_TLS_DTPOFF32,   76)
+ELF_RELOC(R_SPARC_TLS_DTPOFF64,   77)
+ELF_RELOC(R_SPARC_TLS_TPOFF32,    78)
+ELF_RELOC(R_SPARC_TLS_TPOFF64,    79)
+ELF_RELOC(R_SPARC_GOTDATA_HIX22,  80)
+ELF_RELOC(R_SPARC_GOTDATA_LOX10,  81)
+ELF_RELOC(R_SPARC_GOTDATA_OP_HIX22,  82)
+ELF_RELOC(R_SPARC_GOTDATA_OP_LOX10,  83)
+ELF_RELOC(R_SPARC_GOTDATA_OP,     84)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/SystemZ.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/SystemZ.def
new file mode 100644
index 0000000..d6c0b79
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/SystemZ.def
@@ -0,0 +1,71 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_390_NONE,          0)
+ELF_RELOC(R_390_8,             1)
+ELF_RELOC(R_390_12,            2)
+ELF_RELOC(R_390_16,            3)
+ELF_RELOC(R_390_32,            4)
+ELF_RELOC(R_390_PC32,          5)
+ELF_RELOC(R_390_GOT12,         6)
+ELF_RELOC(R_390_GOT32,         7)
+ELF_RELOC(R_390_PLT32,         8)
+ELF_RELOC(R_390_COPY,          9)
+ELF_RELOC(R_390_GLOB_DAT,     10)
+ELF_RELOC(R_390_JMP_SLOT,     11)
+ELF_RELOC(R_390_RELATIVE,     12)
+ELF_RELOC(R_390_GOTOFF,       13)
+ELF_RELOC(R_390_GOTPC,        14)
+ELF_RELOC(R_390_GOT16,        15)
+ELF_RELOC(R_390_PC16,         16)
+ELF_RELOC(R_390_PC16DBL,      17)
+ELF_RELOC(R_390_PLT16DBL,     18)
+ELF_RELOC(R_390_PC32DBL,      19)
+ELF_RELOC(R_390_PLT32DBL,     20)
+ELF_RELOC(R_390_GOTPCDBL,     21)
+ELF_RELOC(R_390_64,           22)
+ELF_RELOC(R_390_PC64,         23)
+ELF_RELOC(R_390_GOT64,        24)
+ELF_RELOC(R_390_PLT64,        25)
+ELF_RELOC(R_390_GOTENT,       26)
+ELF_RELOC(R_390_GOTOFF16,     27)
+ELF_RELOC(R_390_GOTOFF64,     28)
+ELF_RELOC(R_390_GOTPLT12,     29)
+ELF_RELOC(R_390_GOTPLT16,     30)
+ELF_RELOC(R_390_GOTPLT32,     31)
+ELF_RELOC(R_390_GOTPLT64,     32)
+ELF_RELOC(R_390_GOTPLTENT,    33)
+ELF_RELOC(R_390_PLTOFF16,     34)
+ELF_RELOC(R_390_PLTOFF32,     35)
+ELF_RELOC(R_390_PLTOFF64,     36)
+ELF_RELOC(R_390_TLS_LOAD,     37)
+ELF_RELOC(R_390_TLS_GDCALL,   38)
+ELF_RELOC(R_390_TLS_LDCALL,   39)
+ELF_RELOC(R_390_TLS_GD32,     40)
+ELF_RELOC(R_390_TLS_GD64,     41)
+ELF_RELOC(R_390_TLS_GOTIE12,  42)
+ELF_RELOC(R_390_TLS_GOTIE32,  43)
+ELF_RELOC(R_390_TLS_GOTIE64,  44)
+ELF_RELOC(R_390_TLS_LDM32,    45)
+ELF_RELOC(R_390_TLS_LDM64,    46)
+ELF_RELOC(R_390_TLS_IE32,     47)
+ELF_RELOC(R_390_TLS_IE64,     48)
+ELF_RELOC(R_390_TLS_IEENT,    49)
+ELF_RELOC(R_390_TLS_LE32,     50)
+ELF_RELOC(R_390_TLS_LE64,     51)
+ELF_RELOC(R_390_TLS_LDO32,    52)
+ELF_RELOC(R_390_TLS_LDO64,    53)
+ELF_RELOC(R_390_TLS_DTPMOD,   54)
+ELF_RELOC(R_390_TLS_DTPOFF,   55)
+ELF_RELOC(R_390_TLS_TPOFF,    56)
+ELF_RELOC(R_390_20,           57)
+ELF_RELOC(R_390_GOT20,        58)
+ELF_RELOC(R_390_GOTPLT20,     59)
+ELF_RELOC(R_390_TLS_GOTIE20,  60)
+ELF_RELOC(R_390_IRELATIVE,    61)
+ELF_RELOC(R_390_PC12DBL,      62)
+ELF_RELOC(R_390_PLT12DBL,     63)
+ELF_RELOC(R_390_PC24DBL,      64)
+ELF_RELOC(R_390_PLT24DBL,     65)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/WebAssembly.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/WebAssembly.def
new file mode 100644
index 0000000..9a34349
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/WebAssembly.def
@@ -0,0 +1,8 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_WEBASSEMBLY_NONE,          0)
+ELF_RELOC(R_WEBASSEMBLY_DATA,          1)
+ELF_RELOC(R_WEBASSEMBLY_FUNCTION,      2)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/i386.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/i386.def
new file mode 100644
index 0000000..1d28cf5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/i386.def
@@ -0,0 +1,47 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// TODO: this is just a subset
+ELF_RELOC(R_386_NONE,           0)
+ELF_RELOC(R_386_32,             1)
+ELF_RELOC(R_386_PC32,           2)
+ELF_RELOC(R_386_GOT32,          3)
+ELF_RELOC(R_386_PLT32,          4)
+ELF_RELOC(R_386_COPY,           5)
+ELF_RELOC(R_386_GLOB_DAT,       6)
+ELF_RELOC(R_386_JUMP_SLOT,      7)
+ELF_RELOC(R_386_RELATIVE,       8)
+ELF_RELOC(R_386_GOTOFF,         9)
+ELF_RELOC(R_386_GOTPC,          10)
+ELF_RELOC(R_386_32PLT,          11)
+ELF_RELOC(R_386_TLS_TPOFF,      14)
+ELF_RELOC(R_386_TLS_IE,         15)
+ELF_RELOC(R_386_TLS_GOTIE,      16)
+ELF_RELOC(R_386_TLS_LE,         17)
+ELF_RELOC(R_386_TLS_GD,         18)
+ELF_RELOC(R_386_TLS_LDM,        19)
+ELF_RELOC(R_386_16,             20)
+ELF_RELOC(R_386_PC16,           21)
+ELF_RELOC(R_386_8,              22)
+ELF_RELOC(R_386_PC8,            23)
+ELF_RELOC(R_386_TLS_GD_32,      24)
+ELF_RELOC(R_386_TLS_GD_PUSH,    25)
+ELF_RELOC(R_386_TLS_GD_CALL,    26)
+ELF_RELOC(R_386_TLS_GD_POP,     27)
+ELF_RELOC(R_386_TLS_LDM_32,     28)
+ELF_RELOC(R_386_TLS_LDM_PUSH,   29)
+ELF_RELOC(R_386_TLS_LDM_CALL,   30)
+ELF_RELOC(R_386_TLS_LDM_POP,    31)
+ELF_RELOC(R_386_TLS_LDO_32,     32)
+ELF_RELOC(R_386_TLS_IE_32,      33)
+ELF_RELOC(R_386_TLS_LE_32,      34)
+ELF_RELOC(R_386_TLS_DTPMOD32,   35)
+ELF_RELOC(R_386_TLS_DTPOFF32,   36)
+ELF_RELOC(R_386_TLS_TPOFF32,    37)
+ELF_RELOC(R_386_TLS_GOTDESC,    39)
+ELF_RELOC(R_386_TLS_DESC_CALL,  40)
+ELF_RELOC(R_386_TLS_DESC,       41)
+ELF_RELOC(R_386_IRELATIVE,      42)
+ELF_RELOC(R_386_GOT32X,         43)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/x86_64.def b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/x86_64.def
new file mode 100644
index 0000000..18fdcf9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/ELFRelocs/x86_64.def
@@ -0,0 +1,45 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_X86_64_NONE,        0)
+ELF_RELOC(R_X86_64_64,          1)
+ELF_RELOC(R_X86_64_PC32,        2)
+ELF_RELOC(R_X86_64_GOT32,       3)
+ELF_RELOC(R_X86_64_PLT32,       4)
+ELF_RELOC(R_X86_64_COPY,        5)
+ELF_RELOC(R_X86_64_GLOB_DAT,    6)
+ELF_RELOC(R_X86_64_JUMP_SLOT,   7)
+ELF_RELOC(R_X86_64_RELATIVE,    8)
+ELF_RELOC(R_X86_64_GOTPCREL,    9)
+ELF_RELOC(R_X86_64_32,          10)
+ELF_RELOC(R_X86_64_32S,         11)
+ELF_RELOC(R_X86_64_16,          12)
+ELF_RELOC(R_X86_64_PC16,        13)
+ELF_RELOC(R_X86_64_8,           14)
+ELF_RELOC(R_X86_64_PC8,         15)
+ELF_RELOC(R_X86_64_DTPMOD64,    16)
+ELF_RELOC(R_X86_64_DTPOFF64,    17)
+ELF_RELOC(R_X86_64_TPOFF64,     18)
+ELF_RELOC(R_X86_64_TLSGD,       19)
+ELF_RELOC(R_X86_64_TLSLD,       20)
+ELF_RELOC(R_X86_64_DTPOFF32,    21)
+ELF_RELOC(R_X86_64_GOTTPOFF,    22)
+ELF_RELOC(R_X86_64_TPOFF32,     23)
+ELF_RELOC(R_X86_64_PC64,        24)
+ELF_RELOC(R_X86_64_GOTOFF64,    25)
+ELF_RELOC(R_X86_64_GOTPC32,     26)
+ELF_RELOC(R_X86_64_GOT64,       27)
+ELF_RELOC(R_X86_64_GOTPCREL64,  28)
+ELF_RELOC(R_X86_64_GOTPC64,     29)
+ELF_RELOC(R_X86_64_GOTPLT64,    30)
+ELF_RELOC(R_X86_64_PLTOFF64,    31)
+ELF_RELOC(R_X86_64_SIZE32,      32)
+ELF_RELOC(R_X86_64_SIZE64,      33)
+ELF_RELOC(R_X86_64_GOTPC32_TLSDESC,  34)
+ELF_RELOC(R_X86_64_TLSDESC_CALL,     35)
+ELF_RELOC(R_X86_64_TLSDESC,     36)
+ELF_RELOC(R_X86_64_IRELATIVE,   37)
+ELF_RELOC(R_X86_64_GOTPCRELX,   41)
+ELF_RELOC(R_X86_64_REX_GOTPCRELX,    42)
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/MachO.def b/linux-x64/clang/include/llvm/BinaryFormat/MachO.def
new file mode 100644
index 0000000..95de48d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/MachO.def
@@ -0,0 +1,120 @@
+//,,,-- llvm/Support/MachO.def - The MachO file definitions -----*- C++ -*-,,,//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//,,,----------------------------------------------------------------------,,,//
+//
+// Definitions for MachO files
+//
+//,,,----------------------------------------------------------------------,,,//
+
+#ifdef HANDLE_LOAD_COMMAND
+
+HANDLE_LOAD_COMMAND(LC_SEGMENT, 0x00000001u, segment_command)
+HANDLE_LOAD_COMMAND(LC_SYMTAB, 0x00000002u, symtab_command)
+// LC_SYMSEG is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_SYMSEG, 0x00000003u, symseg_command)
+HANDLE_LOAD_COMMAND(LC_THREAD, 0x00000004u, thread_command)
+HANDLE_LOAD_COMMAND(LC_UNIXTHREAD, 0x00000005u, thread_command)
+// LC_LOADFVMLIB is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_LOADFVMLIB, 0x00000006u, fvmlib_command)
+// LC_IDFVMLIB is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_IDFVMLIB, 0x00000007u, fvmlib_command)
+// LC_IDENT is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_IDENT, 0x00000008u, ident_command)
+// LC_FVMFILE is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_FVMFILE, 0x00000009u, fvmfile_command)
+// LC_PREPAGE is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_PREPAGE, 0x0000000Au, load_command)
+HANDLE_LOAD_COMMAND(LC_DYSYMTAB, 0x0000000Bu, dysymtab_command)
+HANDLE_LOAD_COMMAND(LC_LOAD_DYLIB, 0x0000000Cu, dylib_command)
+HANDLE_LOAD_COMMAND(LC_ID_DYLIB, 0x0000000Du, dylib_command)
+HANDLE_LOAD_COMMAND(LC_LOAD_DYLINKER, 0x0000000Eu, dylinker_command)
+HANDLE_LOAD_COMMAND(LC_ID_DYLINKER, 0x0000000Fu, dylinker_command)
+// LC_PREBOUND_DYLIB is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_PREBOUND_DYLIB, 0x00000010u, prebound_dylib_command)
+HANDLE_LOAD_COMMAND(LC_ROUTINES, 0x00000011u, routines_command)
+HANDLE_LOAD_COMMAND(LC_SUB_FRAMEWORK, 0x00000012u, sub_framework_command)
+HANDLE_LOAD_COMMAND(LC_SUB_UMBRELLA, 0x00000013u, sub_umbrella_command)
+HANDLE_LOAD_COMMAND(LC_SUB_CLIENT, 0x00000014u, sub_client_command)
+HANDLE_LOAD_COMMAND(LC_SUB_LIBRARY, 0x00000015u, sub_library_command)
+// LC_TWOLEVEL_HINTS is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_TWOLEVEL_HINTS, 0x00000016u, twolevel_hints_command)
+// LC_PREBIND_CKSUM is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_PREBIND_CKSUM, 0x00000017u, prebind_cksum_command)
+// LC_LOAD_WEAK_DYLIB is obsolete and no longer supported.
+HANDLE_LOAD_COMMAND(LC_LOAD_WEAK_DYLIB, 0x80000018u, dylib_command)
+HANDLE_LOAD_COMMAND(LC_SEGMENT_64, 0x00000019u, segment_command_64)
+HANDLE_LOAD_COMMAND(LC_ROUTINES_64, 0x0000001Au, routines_command_64)
+HANDLE_LOAD_COMMAND(LC_UUID, 0x0000001Bu, uuid_command)
+HANDLE_LOAD_COMMAND(LC_RPATH, 0x8000001Cu, rpath_command)
+HANDLE_LOAD_COMMAND(LC_CODE_SIGNATURE, 0x0000001Du, linkedit_data_command)
+HANDLE_LOAD_COMMAND(LC_SEGMENT_SPLIT_INFO, 0x0000001Eu, linkedit_data_command)
+HANDLE_LOAD_COMMAND(LC_REEXPORT_DYLIB, 0x8000001Fu, dylib_command)
+HANDLE_LOAD_COMMAND(LC_LAZY_LOAD_DYLIB, 0x00000020u, dylib_command)
+HANDLE_LOAD_COMMAND(LC_ENCRYPTION_INFO, 0x00000021u, encryption_info_command)
+HANDLE_LOAD_COMMAND(LC_DYLD_INFO, 0x00000022u, dyld_info_command)
+HANDLE_LOAD_COMMAND(LC_DYLD_INFO_ONLY, 0x80000022u, dyld_info_command)
+HANDLE_LOAD_COMMAND(LC_LOAD_UPWARD_DYLIB, 0x80000023u, dylib_command)
+HANDLE_LOAD_COMMAND(LC_VERSION_MIN_MACOSX, 0x00000024u, version_min_command)
+HANDLE_LOAD_COMMAND(LC_VERSION_MIN_IPHONEOS, 0x00000025u, version_min_command)
+HANDLE_LOAD_COMMAND(LC_FUNCTION_STARTS, 0x00000026u, linkedit_data_command)
+HANDLE_LOAD_COMMAND(LC_DYLD_ENVIRONMENT, 0x00000027u, dylinker_command)
+HANDLE_LOAD_COMMAND(LC_MAIN, 0x80000028u, entry_point_command)
+HANDLE_LOAD_COMMAND(LC_DATA_IN_CODE, 0x00000029u, linkedit_data_command)
+HANDLE_LOAD_COMMAND(LC_SOURCE_VERSION, 0x0000002Au, source_version_command)
+HANDLE_LOAD_COMMAND(LC_DYLIB_CODE_SIGN_DRS, 0x0000002Bu, linkedit_data_command)
+HANDLE_LOAD_COMMAND(LC_ENCRYPTION_INFO_64, 0x0000002Cu,
+                    encryption_info_command_64)
+HANDLE_LOAD_COMMAND(LC_LINKER_OPTION, 0x0000002Du, linker_option_command)
+HANDLE_LOAD_COMMAND(LC_LINKER_OPTIMIZATION_HINT, 0x0000002Eu, linkedit_data_command)
+HANDLE_LOAD_COMMAND(LC_VERSION_MIN_TVOS, 0x0000002Fu, version_min_command)
+HANDLE_LOAD_COMMAND(LC_VERSION_MIN_WATCHOS, 0x00000030u, version_min_command)
+HANDLE_LOAD_COMMAND(LC_NOTE, 0x00000031u, note_command)
+HANDLE_LOAD_COMMAND(LC_BUILD_VERSION, 0x00000032u, build_version_command)
+
+#endif
+
+#ifdef LOAD_COMMAND_STRUCT
+
+LOAD_COMMAND_STRUCT(dyld_info_command)
+LOAD_COMMAND_STRUCT(dylib_command)
+LOAD_COMMAND_STRUCT(dylinker_command)
+LOAD_COMMAND_STRUCT(dysymtab_command)
+LOAD_COMMAND_STRUCT(encryption_info_command)
+LOAD_COMMAND_STRUCT(encryption_info_command_64)
+LOAD_COMMAND_STRUCT(entry_point_command)
+LOAD_COMMAND_STRUCT(fvmfile_command)
+LOAD_COMMAND_STRUCT(fvmlib_command)
+LOAD_COMMAND_STRUCT(ident_command)
+LOAD_COMMAND_STRUCT(linkedit_data_command)
+LOAD_COMMAND_STRUCT(linker_option_command)
+LOAD_COMMAND_STRUCT(load_command)
+LOAD_COMMAND_STRUCT(prebind_cksum_command)
+LOAD_COMMAND_STRUCT(prebound_dylib_command)
+LOAD_COMMAND_STRUCT(routines_command)
+LOAD_COMMAND_STRUCT(routines_command_64)
+LOAD_COMMAND_STRUCT(rpath_command)
+LOAD_COMMAND_STRUCT(segment_command)
+LOAD_COMMAND_STRUCT(segment_command_64)
+LOAD_COMMAND_STRUCT(source_version_command)
+LOAD_COMMAND_STRUCT(sub_client_command)
+LOAD_COMMAND_STRUCT(sub_framework_command)
+LOAD_COMMAND_STRUCT(sub_library_command)
+LOAD_COMMAND_STRUCT(sub_umbrella_command)
+LOAD_COMMAND_STRUCT(symseg_command)
+LOAD_COMMAND_STRUCT(symtab_command)
+LOAD_COMMAND_STRUCT(thread_command)
+LOAD_COMMAND_STRUCT(twolevel_hints_command)
+LOAD_COMMAND_STRUCT(uuid_command)
+LOAD_COMMAND_STRUCT(version_min_command)
+LOAD_COMMAND_STRUCT(note_command)
+LOAD_COMMAND_STRUCT(build_version_command)
+
+#endif
+
+#undef HANDLE_LOAD_COMMAND
+#undef LOAD_COMMAND_STRUCT
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/MachO.h b/linux-x64/clang/include/llvm/BinaryFormat/MachO.h
new file mode 100644
index 0000000..060fbe1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/MachO.h
@@ -0,0 +1,1983 @@
+//===-- llvm/BinaryFormat/MachO.h - The MachO file format -------*- C++/-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines manifest constants for the MachO object file format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_MACHO_H
+#define LLVM_BINARYFORMAT_MACHO_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+
+namespace llvm {
+namespace MachO {
+// Enums from <mach-o/loader.h>
+enum : uint32_t {
+  // Constants for the "magic" field in llvm::MachO::mach_header and
+  // llvm::MachO::mach_header_64
+  MH_MAGIC = 0xFEEDFACEu,
+  MH_CIGAM = 0xCEFAEDFEu,
+  MH_MAGIC_64 = 0xFEEDFACFu,
+  MH_CIGAM_64 = 0xCFFAEDFEu,
+  FAT_MAGIC = 0xCAFEBABEu,
+  FAT_CIGAM = 0xBEBAFECAu,
+  FAT_MAGIC_64 = 0xCAFEBABFu,
+  FAT_CIGAM_64 = 0xBFBAFECAu
+};
+
+enum HeaderFileType {
+  // Constants for the "filetype" field in llvm::MachO::mach_header and
+  // llvm::MachO::mach_header_64
+  MH_OBJECT = 0x1u,
+  MH_EXECUTE = 0x2u,
+  MH_FVMLIB = 0x3u,
+  MH_CORE = 0x4u,
+  MH_PRELOAD = 0x5u,
+  MH_DYLIB = 0x6u,
+  MH_DYLINKER = 0x7u,
+  MH_BUNDLE = 0x8u,
+  MH_DYLIB_STUB = 0x9u,
+  MH_DSYM = 0xAu,
+  MH_KEXT_BUNDLE = 0xBu
+};
+
+enum {
+  // Constant bits for the "flags" field in llvm::MachO::mach_header and
+  // llvm::MachO::mach_header_64
+  MH_NOUNDEFS = 0x00000001u,
+  MH_INCRLINK = 0x00000002u,
+  MH_DYLDLINK = 0x00000004u,
+  MH_BINDATLOAD = 0x00000008u,
+  MH_PREBOUND = 0x00000010u,
+  MH_SPLIT_SEGS = 0x00000020u,
+  MH_LAZY_INIT = 0x00000040u,
+  MH_TWOLEVEL = 0x00000080u,
+  MH_FORCE_FLAT = 0x00000100u,
+  MH_NOMULTIDEFS = 0x00000200u,
+  MH_NOFIXPREBINDING = 0x00000400u,
+  MH_PREBINDABLE = 0x00000800u,
+  MH_ALLMODSBOUND = 0x00001000u,
+  MH_SUBSECTIONS_VIA_SYMBOLS = 0x00002000u,
+  MH_CANONICAL = 0x00004000u,
+  MH_WEAK_DEFINES = 0x00008000u,
+  MH_BINDS_TO_WEAK = 0x00010000u,
+  MH_ALLOW_STACK_EXECUTION = 0x00020000u,
+  MH_ROOT_SAFE = 0x00040000u,
+  MH_SETUID_SAFE = 0x00080000u,
+  MH_NO_REEXPORTED_DYLIBS = 0x00100000u,
+  MH_PIE = 0x00200000u,
+  MH_DEAD_STRIPPABLE_DYLIB = 0x00400000u,
+  MH_HAS_TLV_DESCRIPTORS = 0x00800000u,
+  MH_NO_HEAP_EXECUTION = 0x01000000u,
+  MH_APP_EXTENSION_SAFE = 0x02000000u,
+  MH_NLIST_OUTOFSYNC_WITH_DYLDINFO = 0x04000000u
+};
+
+enum : uint32_t {
+  // Flags for the "cmd" field in llvm::MachO::load_command
+  LC_REQ_DYLD = 0x80000000u
+};
+
+#define HANDLE_LOAD_COMMAND(LCName, LCValue, LCStruct) LCName = LCValue,
+
+enum LoadCommandType : uint32_t {
+#include "llvm/BinaryFormat/MachO.def"
+};
+
+#undef HANDLE_LOAD_COMMAND
+
+enum : uint32_t {
+  // Constant bits for the "flags" field in llvm::MachO::segment_command
+  SG_HIGHVM = 0x1u,
+  SG_FVMLIB = 0x2u,
+  SG_NORELOC = 0x4u,
+  SG_PROTECTED_VERSION_1 = 0x8u,
+
+  // Constant masks for the "flags" field in llvm::MachO::section and
+  // llvm::MachO::section_64
+  SECTION_TYPE = 0x000000ffu,           // SECTION_TYPE
+  SECTION_ATTRIBUTES = 0xffffff00u,     // SECTION_ATTRIBUTES
+  SECTION_ATTRIBUTES_USR = 0xff000000u, // SECTION_ATTRIBUTES_USR
+  SECTION_ATTRIBUTES_SYS = 0x00ffff00u  // SECTION_ATTRIBUTES_SYS
+};
+
+/// These are the section type and attributes fields.  A MachO section can
+/// have only one Type, but can have any of the attributes specified.
+enum SectionType : uint32_t {
+  // Constant masks for the "flags[7:0]" field in llvm::MachO::section and
+  // llvm::MachO::section_64 (mask "flags" with SECTION_TYPE)
+
+  /// S_REGULAR - Regular section.
+  S_REGULAR = 0x00u,
+  /// S_ZEROFILL - Zero fill on demand section.
+  S_ZEROFILL = 0x01u,
+  /// S_CSTRING_LITERALS - Section with literal C strings.
+  S_CSTRING_LITERALS = 0x02u,
+  /// S_4BYTE_LITERALS - Section with 4 byte literals.
+  S_4BYTE_LITERALS = 0x03u,
+  /// S_8BYTE_LITERALS - Section with 8 byte literals.
+  S_8BYTE_LITERALS = 0x04u,
+  /// S_LITERAL_POINTERS - Section with pointers to literals.
+  S_LITERAL_POINTERS = 0x05u,
+  /// S_NON_LAZY_SYMBOL_POINTERS - Section with non-lazy symbol pointers.
+  S_NON_LAZY_SYMBOL_POINTERS = 0x06u,
+  /// S_LAZY_SYMBOL_POINTERS - Section with lazy symbol pointers.
+  S_LAZY_SYMBOL_POINTERS = 0x07u,
+  /// S_SYMBOL_STUBS - Section with symbol stubs, byte size of stub in
+  /// the Reserved2 field.
+  S_SYMBOL_STUBS = 0x08u,
+  /// S_MOD_INIT_FUNC_POINTERS - Section with only function pointers for
+  /// initialization.
+  S_MOD_INIT_FUNC_POINTERS = 0x09u,
+  /// S_MOD_TERM_FUNC_POINTERS - Section with only function pointers for
+  /// termination.
+  S_MOD_TERM_FUNC_POINTERS = 0x0au,
+  /// S_COALESCED - Section contains symbols that are to be coalesced.
+  S_COALESCED = 0x0bu,
+  /// S_GB_ZEROFILL - Zero fill on demand section (that can be larger than 4
+  /// gigabytes).
+  S_GB_ZEROFILL = 0x0cu,
+  /// S_INTERPOSING - Section with only pairs of function pointers for
+  /// interposing.
+  S_INTERPOSING = 0x0du,
+  /// S_16BYTE_LITERALS - Section with only 16 byte literals.
+  S_16BYTE_LITERALS = 0x0eu,
+  /// S_DTRACE_DOF - Section contains DTrace Object Format.
+  S_DTRACE_DOF = 0x0fu,
+  /// S_LAZY_DYLIB_SYMBOL_POINTERS - Section with lazy symbol pointers to
+  /// lazy loaded dylibs.
+  S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10u,
+  /// S_THREAD_LOCAL_REGULAR - Thread local data section.
+  S_THREAD_LOCAL_REGULAR = 0x11u,
+  /// S_THREAD_LOCAL_ZEROFILL - Thread local zerofill section.
+  S_THREAD_LOCAL_ZEROFILL = 0x12u,
+  /// S_THREAD_LOCAL_VARIABLES - Section with thread local variable
+  /// structure data.
+  S_THREAD_LOCAL_VARIABLES = 0x13u,
+  /// S_THREAD_LOCAL_VARIABLE_POINTERS - Section with pointers to thread
+  /// local structures.
+  S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14u,
+  /// S_THREAD_LOCAL_INIT_FUNCTION_POINTERS - Section with thread local
+  /// variable initialization pointers to functions.
+  S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15u,
+
+  LAST_KNOWN_SECTION_TYPE = S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
+};
+
+enum : uint32_t {
+  // Constant masks for the "flags[31:24]" field in llvm::MachO::section and
+  // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_USR)
+
+  /// S_ATTR_PURE_INSTRUCTIONS - Section contains only true machine
+  /// instructions.
+  S_ATTR_PURE_INSTRUCTIONS = 0x80000000u,
+  /// S_ATTR_NO_TOC - Section contains coalesced symbols that are not to be
+  /// in a ranlib table of contents.
+  S_ATTR_NO_TOC = 0x40000000u,
+  /// S_ATTR_STRIP_STATIC_SYMS - Ok to strip static symbols in this section
+  /// in files with the MY_DYLDLINK flag.
+  S_ATTR_STRIP_STATIC_SYMS = 0x20000000u,
+  /// S_ATTR_NO_DEAD_STRIP - No dead stripping.
+  S_ATTR_NO_DEAD_STRIP = 0x10000000u,
+  /// S_ATTR_LIVE_SUPPORT - Blocks are live if they reference live blocks.
+  S_ATTR_LIVE_SUPPORT = 0x08000000u,
+  /// S_ATTR_SELF_MODIFYING_CODE - Used with i386 code stubs written on by
+  /// dyld.
+  S_ATTR_SELF_MODIFYING_CODE = 0x04000000u,
+  /// S_ATTR_DEBUG - A debug section.
+  S_ATTR_DEBUG = 0x02000000u,
+
+  // Constant masks for the "flags[23:8]" field in llvm::MachO::section and
+  // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_SYS)
+
+  /// S_ATTR_SOME_INSTRUCTIONS - Section contains some machine instructions.
+  S_ATTR_SOME_INSTRUCTIONS = 0x00000400u,
+  /// S_ATTR_EXT_RELOC - Section has external relocation entries.
+  S_ATTR_EXT_RELOC = 0x00000200u,
+  /// S_ATTR_LOC_RELOC - Section has local relocation entries.
+  S_ATTR_LOC_RELOC = 0x00000100u,
+
+  // Constant masks for the value of an indirect symbol in an indirect
+  // symbol table
+  INDIRECT_SYMBOL_LOCAL = 0x80000000u,
+  INDIRECT_SYMBOL_ABS = 0x40000000u
+};
+
+enum DataRegionType {
+  // Constants for the "kind" field in a data_in_code_entry structure
+  DICE_KIND_DATA = 1u,
+  DICE_KIND_JUMP_TABLE8 = 2u,
+  DICE_KIND_JUMP_TABLE16 = 3u,
+  DICE_KIND_JUMP_TABLE32 = 4u,
+  DICE_KIND_ABS_JUMP_TABLE32 = 5u
+};
+
+enum RebaseType {
+  REBASE_TYPE_POINTER = 1u,
+  REBASE_TYPE_TEXT_ABSOLUTE32 = 2u,
+  REBASE_TYPE_TEXT_PCREL32 = 3u
+};
+
+enum { REBASE_OPCODE_MASK = 0xF0u, REBASE_IMMEDIATE_MASK = 0x0Fu };
+
+enum RebaseOpcode {
+  REBASE_OPCODE_DONE = 0x00u,
+  REBASE_OPCODE_SET_TYPE_IMM = 0x10u,
+  REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x20u,
+  REBASE_OPCODE_ADD_ADDR_ULEB = 0x30u,
+  REBASE_OPCODE_ADD_ADDR_IMM_SCALED = 0x40u,
+  REBASE_OPCODE_DO_REBASE_IMM_TIMES = 0x50u,
+  REBASE_OPCODE_DO_REBASE_ULEB_TIMES = 0x60u,
+  REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB = 0x70u,
+  REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB = 0x80u
+};
+
+enum BindType {
+  BIND_TYPE_POINTER = 1u,
+  BIND_TYPE_TEXT_ABSOLUTE32 = 2u,
+  BIND_TYPE_TEXT_PCREL32 = 3u
+};
+
+enum BindSpecialDylib {
+  BIND_SPECIAL_DYLIB_SELF = 0,
+  BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE = -1,
+  BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2
+};
+
+enum {
+  BIND_SYMBOL_FLAGS_WEAK_IMPORT = 0x1u,
+  BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION = 0x8u,
+
+  BIND_OPCODE_MASK = 0xF0u,
+  BIND_IMMEDIATE_MASK = 0x0Fu
+};
+
+enum BindOpcode {
+  BIND_OPCODE_DONE = 0x00u,
+  BIND_OPCODE_SET_DYLIB_ORDINAL_IMM = 0x10u,
+  BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB = 0x20u,
+  BIND_OPCODE_SET_DYLIB_SPECIAL_IMM = 0x30u,
+  BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM = 0x40u,
+  BIND_OPCODE_SET_TYPE_IMM = 0x50u,
+  BIND_OPCODE_SET_ADDEND_SLEB = 0x60u,
+  BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x70u,
+  BIND_OPCODE_ADD_ADDR_ULEB = 0x80u,
+  BIND_OPCODE_DO_BIND = 0x90u,
+  BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB = 0xA0u,
+  BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED = 0xB0u,
+  BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB = 0xC0u
+};
+
+enum {
+  EXPORT_SYMBOL_FLAGS_KIND_MASK = 0x03u,
+  EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION = 0x04u,
+  EXPORT_SYMBOL_FLAGS_REEXPORT = 0x08u,
+  EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER = 0x10u
+};
+
+enum ExportSymbolKind {
+  EXPORT_SYMBOL_FLAGS_KIND_REGULAR = 0x00u,
+  EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL = 0x01u,
+  EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE = 0x02u
+};
+
+enum {
+  // Constant masks for the "n_type" field in llvm::MachO::nlist and
+  // llvm::MachO::nlist_64
+  N_STAB = 0xe0,
+  N_PEXT = 0x10,
+  N_TYPE = 0x0e,
+  N_EXT = 0x01
+};
+
+enum NListType : uint8_t {
+  // Constants for the "n_type & N_TYPE" llvm::MachO::nlist and
+  // llvm::MachO::nlist_64
+  N_UNDF = 0x0u,
+  N_ABS = 0x2u,
+  N_SECT = 0xeu,
+  N_PBUD = 0xcu,
+  N_INDR = 0xau
+};
+
+enum SectionOrdinal {
+  // Constants for the "n_sect" field in llvm::MachO::nlist and
+  // llvm::MachO::nlist_64
+  NO_SECT = 0u,
+  MAX_SECT = 0xffu
+};
+
+enum {
+  // Constant masks for the "n_desc" field in llvm::MachO::nlist and
+  // llvm::MachO::nlist_64
+  // The low 3 bits are the for the REFERENCE_TYPE.
+  REFERENCE_TYPE = 0x7,
+  REFERENCE_FLAG_UNDEFINED_NON_LAZY = 0,
+  REFERENCE_FLAG_UNDEFINED_LAZY = 1,
+  REFERENCE_FLAG_DEFINED = 2,
+  REFERENCE_FLAG_PRIVATE_DEFINED = 3,
+  REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY = 4,
+  REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY = 5,
+  // Flag bits (some overlap with the library ordinal bits).
+  N_ARM_THUMB_DEF = 0x0008u,
+  REFERENCED_DYNAMICALLY = 0x0010u,
+  N_NO_DEAD_STRIP = 0x0020u,
+  N_WEAK_REF = 0x0040u,
+  N_WEAK_DEF = 0x0080u,
+  N_SYMBOL_RESOLVER = 0x0100u,
+  N_ALT_ENTRY = 0x0200u,
+  // For undefined symbols coming from libraries, see GET_LIBRARY_ORDINAL()
+  // as these are in the top 8 bits.
+  SELF_LIBRARY_ORDINAL = 0x0,
+  MAX_LIBRARY_ORDINAL = 0xfd,
+  DYNAMIC_LOOKUP_ORDINAL = 0xfe,
+  EXECUTABLE_ORDINAL = 0xff
+};
+
+enum StabType {
+  // Constant values for the "n_type" field in llvm::MachO::nlist and
+  // llvm::MachO::nlist_64 when "(n_type & N_STAB) != 0"
+  N_GSYM = 0x20u,
+  N_FNAME = 0x22u,
+  N_FUN = 0x24u,
+  N_STSYM = 0x26u,
+  N_LCSYM = 0x28u,
+  N_BNSYM = 0x2Eu,
+  N_PC = 0x30u,
+  N_AST = 0x32u,
+  N_OPT = 0x3Cu,
+  N_RSYM = 0x40u,
+  N_SLINE = 0x44u,
+  N_ENSYM = 0x4Eu,
+  N_SSYM = 0x60u,
+  N_SO = 0x64u,
+  N_OSO = 0x66u,
+  N_LSYM = 0x80u,
+  N_BINCL = 0x82u,
+  N_SOL = 0x84u,
+  N_PARAMS = 0x86u,
+  N_VERSION = 0x88u,
+  N_OLEVEL = 0x8Au,
+  N_PSYM = 0xA0u,
+  N_EINCL = 0xA2u,
+  N_ENTRY = 0xA4u,
+  N_LBRAC = 0xC0u,
+  N_EXCL = 0xC2u,
+  N_RBRAC = 0xE0u,
+  N_BCOMM = 0xE2u,
+  N_ECOMM = 0xE4u,
+  N_ECOML = 0xE8u,
+  N_LENG = 0xFEu
+};
+
+enum : uint32_t {
+  // Constant values for the r_symbolnum field in an
+  // llvm::MachO::relocation_info structure when r_extern is 0.
+  R_ABS = 0,
+
+  // Constant bits for the r_address field in an
+  // llvm::MachO::relocation_info structure.
+  R_SCATTERED = 0x80000000
+};
+
+enum RelocationInfoType {
+  // Constant values for the r_type field in an
+  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+  // structure.
+  GENERIC_RELOC_VANILLA = 0,
+  GENERIC_RELOC_PAIR = 1,
+  GENERIC_RELOC_SECTDIFF = 2,
+  GENERIC_RELOC_PB_LA_PTR = 3,
+  GENERIC_RELOC_LOCAL_SECTDIFF = 4,
+  GENERIC_RELOC_TLV = 5,
+
+  // Constant values for the r_type field in a PowerPC architecture
+  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+  // structure.
+  PPC_RELOC_VANILLA = GENERIC_RELOC_VANILLA,
+  PPC_RELOC_PAIR = GENERIC_RELOC_PAIR,
+  PPC_RELOC_BR14 = 2,
+  PPC_RELOC_BR24 = 3,
+  PPC_RELOC_HI16 = 4,
+  PPC_RELOC_LO16 = 5,
+  PPC_RELOC_HA16 = 6,
+  PPC_RELOC_LO14 = 7,
+  PPC_RELOC_SECTDIFF = 8,
+  PPC_RELOC_PB_LA_PTR = 9,
+  PPC_RELOC_HI16_SECTDIFF = 10,
+  PPC_RELOC_LO16_SECTDIFF = 11,
+  PPC_RELOC_HA16_SECTDIFF = 12,
+  PPC_RELOC_JBSR = 13,
+  PPC_RELOC_LO14_SECTDIFF = 14,
+  PPC_RELOC_LOCAL_SECTDIFF = 15,
+
+  // Constant values for the r_type field in an ARM architecture
+  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+  // structure.
+  ARM_RELOC_VANILLA = GENERIC_RELOC_VANILLA,
+  ARM_RELOC_PAIR = GENERIC_RELOC_PAIR,
+  ARM_RELOC_SECTDIFF = GENERIC_RELOC_SECTDIFF,
+  ARM_RELOC_LOCAL_SECTDIFF = 3,
+  ARM_RELOC_PB_LA_PTR = 4,
+  ARM_RELOC_BR24 = 5,
+  ARM_THUMB_RELOC_BR22 = 6,
+  ARM_THUMB_32BIT_BRANCH = 7, // obsolete
+  ARM_RELOC_HALF = 8,
+  ARM_RELOC_HALF_SECTDIFF = 9,
+
+  // Constant values for the r_type field in an ARM64 architecture
+  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+  // structure.
+
+  // For pointers.
+  ARM64_RELOC_UNSIGNED = 0,
+  // Must be followed by an ARM64_RELOC_UNSIGNED
+  ARM64_RELOC_SUBTRACTOR = 1,
+  // A B/BL instruction with 26-bit displacement.
+  ARM64_RELOC_BRANCH26 = 2,
+  // PC-rel distance to page of target.
+  ARM64_RELOC_PAGE21 = 3,
+  // Offset within page, scaled by r_length.
+  ARM64_RELOC_PAGEOFF12 = 4,
+  // PC-rel distance to page of GOT slot.
+  ARM64_RELOC_GOT_LOAD_PAGE21 = 5,
+  // Offset within page of GOT slot, scaled by r_length.
+  ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6,
+  // For pointers to GOT slots.
+  ARM64_RELOC_POINTER_TO_GOT = 7,
+  // PC-rel distance to page of TLVP slot.
+  ARM64_RELOC_TLVP_LOAD_PAGE21 = 8,
+  // Offset within page of TLVP slot, scaled by r_length.
+  ARM64_RELOC_TLVP_LOAD_PAGEOFF12 = 9,
+  // Must be followed by ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12.
+  ARM64_RELOC_ADDEND = 10,
+
+  // Constant values for the r_type field in an x86_64 architecture
+  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+  // structure
+  X86_64_RELOC_UNSIGNED = 0,
+  X86_64_RELOC_SIGNED = 1,
+  X86_64_RELOC_BRANCH = 2,
+  X86_64_RELOC_GOT_LOAD = 3,
+  X86_64_RELOC_GOT = 4,
+  X86_64_RELOC_SUBTRACTOR = 5,
+  X86_64_RELOC_SIGNED_1 = 6,
+  X86_64_RELOC_SIGNED_2 = 7,
+  X86_64_RELOC_SIGNED_4 = 8,
+  X86_64_RELOC_TLV = 9
+};
+
+// Values for segment_command.initprot.
+// From <mach/vm_prot.h>
+enum { VM_PROT_READ = 0x1, VM_PROT_WRITE = 0x2, VM_PROT_EXECUTE = 0x4 };
+
+// Values for platform field in build_version_command.
+enum PlatformType {
+  PLATFORM_MACOS = 1,
+  PLATFORM_IOS = 2,
+  PLATFORM_TVOS = 3,
+  PLATFORM_WATCHOS = 4,
+  PLATFORM_BRIDGEOS = 5
+};
+
+// Values for tools enum in build_tool_version.
+enum { TOOL_CLANG = 1, TOOL_SWIFT = 2, TOOL_LD = 3 };
+
+// Structs from <mach-o/loader.h>
+
+struct mach_header {
+  uint32_t magic;
+  uint32_t cputype;
+  uint32_t cpusubtype;
+  uint32_t filetype;
+  uint32_t ncmds;
+  uint32_t sizeofcmds;
+  uint32_t flags;
+};
+
+struct mach_header_64 {
+  uint32_t magic;
+  uint32_t cputype;
+  uint32_t cpusubtype;
+  uint32_t filetype;
+  uint32_t ncmds;
+  uint32_t sizeofcmds;
+  uint32_t flags;
+  uint32_t reserved;
+};
+
+struct load_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+};
+
+struct segment_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  char segname[16];
+  uint32_t vmaddr;
+  uint32_t vmsize;
+  uint32_t fileoff;
+  uint32_t filesize;
+  uint32_t maxprot;
+  uint32_t initprot;
+  uint32_t nsects;
+  uint32_t flags;
+};
+
+struct segment_command_64 {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  char segname[16];
+  uint64_t vmaddr;
+  uint64_t vmsize;
+  uint64_t fileoff;
+  uint64_t filesize;
+  uint32_t maxprot;
+  uint32_t initprot;
+  uint32_t nsects;
+  uint32_t flags;
+};
+
+struct section {
+  char sectname[16];
+  char segname[16];
+  uint32_t addr;
+  uint32_t size;
+  uint32_t offset;
+  uint32_t align;
+  uint32_t reloff;
+  uint32_t nreloc;
+  uint32_t flags;
+  uint32_t reserved1;
+  uint32_t reserved2;
+};
+
+struct section_64 {
+  char sectname[16];
+  char segname[16];
+  uint64_t addr;
+  uint64_t size;
+  uint32_t offset;
+  uint32_t align;
+  uint32_t reloff;
+  uint32_t nreloc;
+  uint32_t flags;
+  uint32_t reserved1;
+  uint32_t reserved2;
+  uint32_t reserved3;
+};
+
+struct fvmlib {
+  uint32_t name;
+  uint32_t minor_version;
+  uint32_t header_addr;
+};
+
+// The fvmlib_command is obsolete and no longer supported.
+struct fvmlib_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  struct fvmlib fvmlib;
+};
+
+struct dylib {
+  uint32_t name;
+  uint32_t timestamp;
+  uint32_t current_version;
+  uint32_t compatibility_version;
+};
+
+struct dylib_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  struct dylib dylib;
+};
+
+struct sub_framework_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t umbrella;
+};
+
+struct sub_client_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t client;
+};
+
+struct sub_umbrella_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t sub_umbrella;
+};
+
+struct sub_library_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t sub_library;
+};
+
+// The prebound_dylib_command is obsolete and no longer supported.
+struct prebound_dylib_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t name;
+  uint32_t nmodules;
+  uint32_t linked_modules;
+};
+
+struct dylinker_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t name;
+};
+
+struct thread_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+};
+
+struct routines_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t init_address;
+  uint32_t init_module;
+  uint32_t reserved1;
+  uint32_t reserved2;
+  uint32_t reserved3;
+  uint32_t reserved4;
+  uint32_t reserved5;
+  uint32_t reserved6;
+};
+
+struct routines_command_64 {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint64_t init_address;
+  uint64_t init_module;
+  uint64_t reserved1;
+  uint64_t reserved2;
+  uint64_t reserved3;
+  uint64_t reserved4;
+  uint64_t reserved5;
+  uint64_t reserved6;
+};
+
+struct symtab_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t symoff;
+  uint32_t nsyms;
+  uint32_t stroff;
+  uint32_t strsize;
+};
+
+struct dysymtab_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t ilocalsym;
+  uint32_t nlocalsym;
+  uint32_t iextdefsym;
+  uint32_t nextdefsym;
+  uint32_t iundefsym;
+  uint32_t nundefsym;
+  uint32_t tocoff;
+  uint32_t ntoc;
+  uint32_t modtaboff;
+  uint32_t nmodtab;
+  uint32_t extrefsymoff;
+  uint32_t nextrefsyms;
+  uint32_t indirectsymoff;
+  uint32_t nindirectsyms;
+  uint32_t extreloff;
+  uint32_t nextrel;
+  uint32_t locreloff;
+  uint32_t nlocrel;
+};
+
+struct dylib_table_of_contents {
+  uint32_t symbol_index;
+  uint32_t module_index;
+};
+
+struct dylib_module {
+  uint32_t module_name;
+  uint32_t iextdefsym;
+  uint32_t nextdefsym;
+  uint32_t irefsym;
+  uint32_t nrefsym;
+  uint32_t ilocalsym;
+  uint32_t nlocalsym;
+  uint32_t iextrel;
+  uint32_t nextrel;
+  uint32_t iinit_iterm;
+  uint32_t ninit_nterm;
+  uint32_t objc_module_info_addr;
+  uint32_t objc_module_info_size;
+};
+
+struct dylib_module_64 {
+  uint32_t module_name;
+  uint32_t iextdefsym;
+  uint32_t nextdefsym;
+  uint32_t irefsym;
+  uint32_t nrefsym;
+  uint32_t ilocalsym;
+  uint32_t nlocalsym;
+  uint32_t iextrel;
+  uint32_t nextrel;
+  uint32_t iinit_iterm;
+  uint32_t ninit_nterm;
+  uint32_t objc_module_info_size;
+  uint64_t objc_module_info_addr;
+};
+
+struct dylib_reference {
+  uint32_t isym : 24, flags : 8;
+};
+
+// The twolevel_hints_command is obsolete and no longer supported.
+struct twolevel_hints_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t offset;
+  uint32_t nhints;
+};
+
+// The twolevel_hints_command is obsolete and no longer supported.
+struct twolevel_hint {
+  uint32_t isub_image : 8, itoc : 24;
+};
+
+// The prebind_cksum_command is obsolete and no longer supported.
+struct prebind_cksum_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t cksum;
+};
+
+struct uuid_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint8_t uuid[16];
+};
+
+struct rpath_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t path;
+};
+
+struct linkedit_data_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t dataoff;
+  uint32_t datasize;
+};
+
+struct data_in_code_entry {
+  uint32_t offset;
+  uint16_t length;
+  uint16_t kind;
+};
+
+struct source_version_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint64_t version;
+};
+
+struct encryption_info_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t cryptoff;
+  uint32_t cryptsize;
+  uint32_t cryptid;
+};
+
+struct encryption_info_command_64 {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t cryptoff;
+  uint32_t cryptsize;
+  uint32_t cryptid;
+  uint32_t pad;
+};
+
+struct version_min_command {
+  uint32_t cmd;     // LC_VERSION_MIN_MACOSX or
+                    // LC_VERSION_MIN_IPHONEOS
+  uint32_t cmdsize; // sizeof(struct version_min_command)
+  uint32_t version; // X.Y.Z is encoded in nibbles xxxx.yy.zz
+  uint32_t sdk;     // X.Y.Z is encoded in nibbles xxxx.yy.zz
+};
+
+struct note_command {
+  uint32_t cmd;        // LC_NOTE
+  uint32_t cmdsize;    // sizeof(struct note_command)
+  char data_owner[16]; // owner name for this LC_NOTE
+  uint64_t offset;     // file offset of this data
+  uint64_t size;       // length of data region
+};
+
+struct build_tool_version {
+  uint32_t tool;    // enum for the tool
+  uint32_t version; // version of the tool
+};
+
+struct build_version_command {
+  uint32_t cmd;      // LC_BUILD_VERSION
+  uint32_t cmdsize;  // sizeof(struct build_version_command) +
+                     // ntools * sizeof(struct build_tool_version)
+  uint32_t platform; // platform
+  uint32_t minos;    // X.Y.Z is encoded in nibbles xxxx.yy.zz
+  uint32_t sdk;      // X.Y.Z is encoded in nibbles xxxx.yy.zz
+  uint32_t ntools;   // number of tool entries following this
+};
+
+struct dyld_info_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t rebase_off;
+  uint32_t rebase_size;
+  uint32_t bind_off;
+  uint32_t bind_size;
+  uint32_t weak_bind_off;
+  uint32_t weak_bind_size;
+  uint32_t lazy_bind_off;
+  uint32_t lazy_bind_size;
+  uint32_t export_off;
+  uint32_t export_size;
+};
+
+struct linker_option_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t count;
+};
+
+// The symseg_command is obsolete and no longer supported.
+struct symseg_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t offset;
+  uint32_t size;
+};
+
+// The ident_command is obsolete and no longer supported.
+struct ident_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+};
+
+// The fvmfile_command is obsolete and no longer supported.
+struct fvmfile_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint32_t name;
+  uint32_t header_addr;
+};
+
+struct tlv_descriptor_32 {
+  uint32_t thunk;
+  uint32_t key;
+  uint32_t offset;
+};
+
+struct tlv_descriptor_64 {
+  uint64_t thunk;
+  uint64_t key;
+  uint64_t offset;
+};
+
+struct tlv_descriptor {
+  uintptr_t thunk;
+  uintptr_t key;
+  uintptr_t offset;
+};
+
+struct entry_point_command {
+  uint32_t cmd;
+  uint32_t cmdsize;
+  uint64_t entryoff;
+  uint64_t stacksize;
+};
+
+// Structs from <mach-o/fat.h>
+struct fat_header {
+  uint32_t magic;
+  uint32_t nfat_arch;
+};
+
+struct fat_arch {
+  uint32_t cputype;
+  uint32_t cpusubtype;
+  uint32_t offset;
+  uint32_t size;
+  uint32_t align;
+};
+
+struct fat_arch_64 {
+  uint32_t cputype;
+  uint32_t cpusubtype;
+  uint64_t offset;
+  uint64_t size;
+  uint32_t align;
+  uint32_t reserved;
+};
+
+// Structs from <mach-o/reloc.h>
+struct relocation_info {
+  int32_t r_address;
+  uint32_t r_symbolnum : 24, r_pcrel : 1, r_length : 2, r_extern : 1,
+      r_type : 4;
+};
+
+struct scattered_relocation_info {
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && (BYTE_ORDER == BIG_ENDIAN)
+  uint32_t r_scattered : 1, r_pcrel : 1, r_length : 2, r_type : 4,
+      r_address : 24;
+#else
+  uint32_t r_address : 24, r_type : 4, r_length : 2, r_pcrel : 1,
+      r_scattered : 1;
+#endif
+  int32_t r_value;
+};
+
+// Structs NOT from <mach-o/reloc.h>, but that make LLVM's life easier
+struct any_relocation_info {
+  uint32_t r_word0, r_word1;
+};
+
+// Structs from <mach-o/nlist.h>
+struct nlist_base {
+  uint32_t n_strx;
+  uint8_t n_type;
+  uint8_t n_sect;
+  uint16_t n_desc;
+};
+
+struct nlist {
+  uint32_t n_strx;
+  uint8_t n_type;
+  uint8_t n_sect;
+  int16_t n_desc;
+  uint32_t n_value;
+};
+
+struct nlist_64 {
+  uint32_t n_strx;
+  uint8_t n_type;
+  uint8_t n_sect;
+  uint16_t n_desc;
+  uint64_t n_value;
+};
+
+// Byte order swapping functions for MachO structs
+
+inline void swapStruct(fat_header &mh) {
+  sys::swapByteOrder(mh.magic);
+  sys::swapByteOrder(mh.nfat_arch);
+}
+
+inline void swapStruct(fat_arch &mh) {
+  sys::swapByteOrder(mh.cputype);
+  sys::swapByteOrder(mh.cpusubtype);
+  sys::swapByteOrder(mh.offset);
+  sys::swapByteOrder(mh.size);
+  sys::swapByteOrder(mh.align);
+}
+
+inline void swapStruct(fat_arch_64 &mh) {
+  sys::swapByteOrder(mh.cputype);
+  sys::swapByteOrder(mh.cpusubtype);
+  sys::swapByteOrder(mh.offset);
+  sys::swapByteOrder(mh.size);
+  sys::swapByteOrder(mh.align);
+  sys::swapByteOrder(mh.reserved);
+}
+
+inline void swapStruct(mach_header &mh) {
+  sys::swapByteOrder(mh.magic);
+  sys::swapByteOrder(mh.cputype);
+  sys::swapByteOrder(mh.cpusubtype);
+  sys::swapByteOrder(mh.filetype);
+  sys::swapByteOrder(mh.ncmds);
+  sys::swapByteOrder(mh.sizeofcmds);
+  sys::swapByteOrder(mh.flags);
+}
+
+inline void swapStruct(mach_header_64 &H) {
+  sys::swapByteOrder(H.magic);
+  sys::swapByteOrder(H.cputype);
+  sys::swapByteOrder(H.cpusubtype);
+  sys::swapByteOrder(H.filetype);
+  sys::swapByteOrder(H.ncmds);
+  sys::swapByteOrder(H.sizeofcmds);
+  sys::swapByteOrder(H.flags);
+  sys::swapByteOrder(H.reserved);
+}
+
+inline void swapStruct(load_command &lc) {
+  sys::swapByteOrder(lc.cmd);
+  sys::swapByteOrder(lc.cmdsize);
+}
+
+inline void swapStruct(symtab_command &lc) {
+  sys::swapByteOrder(lc.cmd);
+  sys::swapByteOrder(lc.cmdsize);
+  sys::swapByteOrder(lc.symoff);
+  sys::swapByteOrder(lc.nsyms);
+  sys::swapByteOrder(lc.stroff);
+  sys::swapByteOrder(lc.strsize);
+}
+
+inline void swapStruct(segment_command_64 &seg) {
+  sys::swapByteOrder(seg.cmd);
+  sys::swapByteOrder(seg.cmdsize);
+  sys::swapByteOrder(seg.vmaddr);
+  sys::swapByteOrder(seg.vmsize);
+  sys::swapByteOrder(seg.fileoff);
+  sys::swapByteOrder(seg.filesize);
+  sys::swapByteOrder(seg.maxprot);
+  sys::swapByteOrder(seg.initprot);
+  sys::swapByteOrder(seg.nsects);
+  sys::swapByteOrder(seg.flags);
+}
+
+inline void swapStruct(segment_command &seg) {
+  sys::swapByteOrder(seg.cmd);
+  sys::swapByteOrder(seg.cmdsize);
+  sys::swapByteOrder(seg.vmaddr);
+  sys::swapByteOrder(seg.vmsize);
+  sys::swapByteOrder(seg.fileoff);
+  sys::swapByteOrder(seg.filesize);
+  sys::swapByteOrder(seg.maxprot);
+  sys::swapByteOrder(seg.initprot);
+  sys::swapByteOrder(seg.nsects);
+  sys::swapByteOrder(seg.flags);
+}
+
+inline void swapStruct(section_64 &sect) {
+  sys::swapByteOrder(sect.addr);
+  sys::swapByteOrder(sect.size);
+  sys::swapByteOrder(sect.offset);
+  sys::swapByteOrder(sect.align);
+  sys::swapByteOrder(sect.reloff);
+  sys::swapByteOrder(sect.nreloc);
+  sys::swapByteOrder(sect.flags);
+  sys::swapByteOrder(sect.reserved1);
+  sys::swapByteOrder(sect.reserved2);
+}
+
+inline void swapStruct(section &sect) {
+  sys::swapByteOrder(sect.addr);
+  sys::swapByteOrder(sect.size);
+  sys::swapByteOrder(sect.offset);
+  sys::swapByteOrder(sect.align);
+  sys::swapByteOrder(sect.reloff);
+  sys::swapByteOrder(sect.nreloc);
+  sys::swapByteOrder(sect.flags);
+  sys::swapByteOrder(sect.reserved1);
+  sys::swapByteOrder(sect.reserved2);
+}
+
+inline void swapStruct(dyld_info_command &info) {
+  sys::swapByteOrder(info.cmd);
+  sys::swapByteOrder(info.cmdsize);
+  sys::swapByteOrder(info.rebase_off);
+  sys::swapByteOrder(info.rebase_size);
+  sys::swapByteOrder(info.bind_off);
+  sys::swapByteOrder(info.bind_size);
+  sys::swapByteOrder(info.weak_bind_off);
+  sys::swapByteOrder(info.weak_bind_size);
+  sys::swapByteOrder(info.lazy_bind_off);
+  sys::swapByteOrder(info.lazy_bind_size);
+  sys::swapByteOrder(info.export_off);
+  sys::swapByteOrder(info.export_size);
+}
+
+inline void swapStruct(dylib_command &d) {
+  sys::swapByteOrder(d.cmd);
+  sys::swapByteOrder(d.cmdsize);
+  sys::swapByteOrder(d.dylib.name);
+  sys::swapByteOrder(d.dylib.timestamp);
+  sys::swapByteOrder(d.dylib.current_version);
+  sys::swapByteOrder(d.dylib.compatibility_version);
+}
+
+inline void swapStruct(sub_framework_command &s) {
+  sys::swapByteOrder(s.cmd);
+  sys::swapByteOrder(s.cmdsize);
+  sys::swapByteOrder(s.umbrella);
+}
+
+inline void swapStruct(sub_umbrella_command &s) {
+  sys::swapByteOrder(s.cmd);
+  sys::swapByteOrder(s.cmdsize);
+  sys::swapByteOrder(s.sub_umbrella);
+}
+
+inline void swapStruct(sub_library_command &s) {
+  sys::swapByteOrder(s.cmd);
+  sys::swapByteOrder(s.cmdsize);
+  sys::swapByteOrder(s.sub_library);
+}
+
+inline void swapStruct(sub_client_command &s) {
+  sys::swapByteOrder(s.cmd);
+  sys::swapByteOrder(s.cmdsize);
+  sys::swapByteOrder(s.client);
+}
+
+inline void swapStruct(routines_command &r) {
+  sys::swapByteOrder(r.cmd);
+  sys::swapByteOrder(r.cmdsize);
+  sys::swapByteOrder(r.init_address);
+  sys::swapByteOrder(r.init_module);
+  sys::swapByteOrder(r.reserved1);
+  sys::swapByteOrder(r.reserved2);
+  sys::swapByteOrder(r.reserved3);
+  sys::swapByteOrder(r.reserved4);
+  sys::swapByteOrder(r.reserved5);
+  sys::swapByteOrder(r.reserved6);
+}
+
+inline void swapStruct(routines_command_64 &r) {
+  sys::swapByteOrder(r.cmd);
+  sys::swapByteOrder(r.cmdsize);
+  sys::swapByteOrder(r.init_address);
+  sys::swapByteOrder(r.init_module);
+  sys::swapByteOrder(r.reserved1);
+  sys::swapByteOrder(r.reserved2);
+  sys::swapByteOrder(r.reserved3);
+  sys::swapByteOrder(r.reserved4);
+  sys::swapByteOrder(r.reserved5);
+  sys::swapByteOrder(r.reserved6);
+}
+
+inline void swapStruct(thread_command &t) {
+  sys::swapByteOrder(t.cmd);
+  sys::swapByteOrder(t.cmdsize);
+}
+
+inline void swapStruct(dylinker_command &d) {
+  sys::swapByteOrder(d.cmd);
+  sys::swapByteOrder(d.cmdsize);
+  sys::swapByteOrder(d.name);
+}
+
+inline void swapStruct(uuid_command &u) {
+  sys::swapByteOrder(u.cmd);
+  sys::swapByteOrder(u.cmdsize);
+}
+
+inline void swapStruct(rpath_command &r) {
+  sys::swapByteOrder(r.cmd);
+  sys::swapByteOrder(r.cmdsize);
+  sys::swapByteOrder(r.path);
+}
+
+inline void swapStruct(source_version_command &s) {
+  sys::swapByteOrder(s.cmd);
+  sys::swapByteOrder(s.cmdsize);
+  sys::swapByteOrder(s.version);
+}
+
+inline void swapStruct(entry_point_command &e) {
+  sys::swapByteOrder(e.cmd);
+  sys::swapByteOrder(e.cmdsize);
+  sys::swapByteOrder(e.entryoff);
+  sys::swapByteOrder(e.stacksize);
+}
+
+inline void swapStruct(encryption_info_command &e) {
+  sys::swapByteOrder(e.cmd);
+  sys::swapByteOrder(e.cmdsize);
+  sys::swapByteOrder(e.cryptoff);
+  sys::swapByteOrder(e.cryptsize);
+  sys::swapByteOrder(e.cryptid);
+}
+
+inline void swapStruct(encryption_info_command_64 &e) {
+  sys::swapByteOrder(e.cmd);
+  sys::swapByteOrder(e.cmdsize);
+  sys::swapByteOrder(e.cryptoff);
+  sys::swapByteOrder(e.cryptsize);
+  sys::swapByteOrder(e.cryptid);
+  sys::swapByteOrder(e.pad);
+}
+
+inline void swapStruct(dysymtab_command &dst) {
+  sys::swapByteOrder(dst.cmd);
+  sys::swapByteOrder(dst.cmdsize);
+  sys::swapByteOrder(dst.ilocalsym);
+  sys::swapByteOrder(dst.nlocalsym);
+  sys::swapByteOrder(dst.iextdefsym);
+  sys::swapByteOrder(dst.nextdefsym);
+  sys::swapByteOrder(dst.iundefsym);
+  sys::swapByteOrder(dst.nundefsym);
+  sys::swapByteOrder(dst.tocoff);
+  sys::swapByteOrder(dst.ntoc);
+  sys::swapByteOrder(dst.modtaboff);
+  sys::swapByteOrder(dst.nmodtab);
+  sys::swapByteOrder(dst.extrefsymoff);
+  sys::swapByteOrder(dst.nextrefsyms);
+  sys::swapByteOrder(dst.indirectsymoff);
+  sys::swapByteOrder(dst.nindirectsyms);
+  sys::swapByteOrder(dst.extreloff);
+  sys::swapByteOrder(dst.nextrel);
+  sys::swapByteOrder(dst.locreloff);
+  sys::swapByteOrder(dst.nlocrel);
+}
+
+inline void swapStruct(any_relocation_info &reloc) {
+  sys::swapByteOrder(reloc.r_word0);
+  sys::swapByteOrder(reloc.r_word1);
+}
+
+inline void swapStruct(nlist_base &S) {
+  sys::swapByteOrder(S.n_strx);
+  sys::swapByteOrder(S.n_desc);
+}
+
+inline void swapStruct(nlist &sym) {
+  sys::swapByteOrder(sym.n_strx);
+  sys::swapByteOrder(sym.n_desc);
+  sys::swapByteOrder(sym.n_value);
+}
+
+inline void swapStruct(nlist_64 &sym) {
+  sys::swapByteOrder(sym.n_strx);
+  sys::swapByteOrder(sym.n_desc);
+  sys::swapByteOrder(sym.n_value);
+}
+
+inline void swapStruct(linkedit_data_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.dataoff);
+  sys::swapByteOrder(C.datasize);
+}
+
+inline void swapStruct(linker_option_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.count);
+}
+
+inline void swapStruct(version_min_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.version);
+  sys::swapByteOrder(C.sdk);
+}
+
+inline void swapStruct(note_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.offset);
+  sys::swapByteOrder(C.size);
+}
+
+inline void swapStruct(build_version_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.platform);
+  sys::swapByteOrder(C.minos);
+  sys::swapByteOrder(C.sdk);
+  sys::swapByteOrder(C.ntools);
+}
+
+inline void swapStruct(build_tool_version &C) {
+  sys::swapByteOrder(C.tool);
+  sys::swapByteOrder(C.version);
+}
+
+inline void swapStruct(data_in_code_entry &C) {
+  sys::swapByteOrder(C.offset);
+  sys::swapByteOrder(C.length);
+  sys::swapByteOrder(C.kind);
+}
+
+inline void swapStruct(uint32_t &C) { sys::swapByteOrder(C); }
+
+// The prebind_cksum_command is obsolete and no longer supported.
+inline void swapStruct(prebind_cksum_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.cksum);
+}
+
+// The twolevel_hints_command is obsolete and no longer supported.
+inline void swapStruct(twolevel_hints_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.offset);
+  sys::swapByteOrder(C.nhints);
+}
+
+// The prebound_dylib_command is obsolete and no longer supported.
+inline void swapStruct(prebound_dylib_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.name);
+  sys::swapByteOrder(C.nmodules);
+  sys::swapByteOrder(C.linked_modules);
+}
+
+// The fvmfile_command is obsolete and no longer supported.
+inline void swapStruct(fvmfile_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.name);
+  sys::swapByteOrder(C.header_addr);
+}
+
+// The symseg_command is obsolete and no longer supported.
+inline void swapStruct(symseg_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  sys::swapByteOrder(C.offset);
+  sys::swapByteOrder(C.size);
+}
+
+// The ident_command is obsolete and no longer supported.
+inline void swapStruct(ident_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+}
+
+inline void swapStruct(fvmlib &C) {
+  sys::swapByteOrder(C.name);
+  sys::swapByteOrder(C.minor_version);
+  sys::swapByteOrder(C.header_addr);
+}
+
+// The fvmlib_command is obsolete and no longer supported.
+inline void swapStruct(fvmlib_command &C) {
+  sys::swapByteOrder(C.cmd);
+  sys::swapByteOrder(C.cmdsize);
+  swapStruct(C.fvmlib);
+}
+
+// Get/Set functions from <mach-o/nlist.h>
+
+inline uint16_t GET_LIBRARY_ORDINAL(uint16_t n_desc) {
+  return (((n_desc) >> 8u) & 0xffu);
+}
+
+inline void SET_LIBRARY_ORDINAL(uint16_t &n_desc, uint8_t ordinal) {
+  n_desc = (((n_desc)&0x00ff) | (((ordinal)&0xff) << 8));
+}
+
+inline uint8_t GET_COMM_ALIGN(uint16_t n_desc) {
+  return (n_desc >> 8u) & 0x0fu;
+}
+
+inline void SET_COMM_ALIGN(uint16_t &n_desc, uint8_t align) {
+  n_desc = ((n_desc & 0xf0ffu) | ((align & 0x0fu) << 8u));
+}
+
+// Enums from <mach/machine.h>
+enum : uint32_t {
+  // Capability bits used in the definition of cpu_type.
+  CPU_ARCH_MASK = 0xff000000, // Mask for architecture bits
+  CPU_ARCH_ABI64 = 0x01000000 // 64 bit ABI
+};
+
+// Constants for the cputype field.
+enum CPUType {
+  CPU_TYPE_ANY = -1,
+  CPU_TYPE_X86 = 7,
+  CPU_TYPE_I386 = CPU_TYPE_X86,
+  CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64,
+  /* CPU_TYPE_MIPS      = 8, */
+  CPU_TYPE_MC98000 = 10, // Old Motorola PowerPC
+  CPU_TYPE_ARM = 12,
+  CPU_TYPE_ARM64 = CPU_TYPE_ARM | CPU_ARCH_ABI64,
+  CPU_TYPE_SPARC = 14,
+  CPU_TYPE_POWERPC = 18,
+  CPU_TYPE_POWERPC64 = CPU_TYPE_POWERPC | CPU_ARCH_ABI64
+};
+
+enum : uint32_t {
+  // Capability bits used in the definition of cpusubtype.
+  CPU_SUBTYPE_MASK = 0xff000000,  // Mask for architecture bits
+  CPU_SUBTYPE_LIB64 = 0x80000000, // 64 bit libraries
+
+  // Special CPU subtype constants.
+  CPU_SUBTYPE_MULTIPLE = ~0u
+};
+
+// Constants for the cpusubtype field.
+enum CPUSubTypeX86 {
+  CPU_SUBTYPE_I386_ALL = 3,
+  CPU_SUBTYPE_386 = 3,
+  CPU_SUBTYPE_486 = 4,
+  CPU_SUBTYPE_486SX = 0x84,
+  CPU_SUBTYPE_586 = 5,
+  CPU_SUBTYPE_PENT = CPU_SUBTYPE_586,
+  CPU_SUBTYPE_PENTPRO = 0x16,
+  CPU_SUBTYPE_PENTII_M3 = 0x36,
+  CPU_SUBTYPE_PENTII_M5 = 0x56,
+  CPU_SUBTYPE_CELERON = 0x67,
+  CPU_SUBTYPE_CELERON_MOBILE = 0x77,
+  CPU_SUBTYPE_PENTIUM_3 = 0x08,
+  CPU_SUBTYPE_PENTIUM_3_M = 0x18,
+  CPU_SUBTYPE_PENTIUM_3_XEON = 0x28,
+  CPU_SUBTYPE_PENTIUM_M = 0x09,
+  CPU_SUBTYPE_PENTIUM_4 = 0x0a,
+  CPU_SUBTYPE_PENTIUM_4_M = 0x1a,
+  CPU_SUBTYPE_ITANIUM = 0x0b,
+  CPU_SUBTYPE_ITANIUM_2 = 0x1b,
+  CPU_SUBTYPE_XEON = 0x0c,
+  CPU_SUBTYPE_XEON_MP = 0x1c,
+
+  CPU_SUBTYPE_X86_ALL = 3,
+  CPU_SUBTYPE_X86_64_ALL = 3,
+  CPU_SUBTYPE_X86_ARCH1 = 4,
+  CPU_SUBTYPE_X86_64_H = 8
+};
+inline int CPU_SUBTYPE_INTEL(int Family, int Model) {
+  return Family | (Model << 4);
+}
+inline int CPU_SUBTYPE_INTEL_FAMILY(CPUSubTypeX86 ST) {
+  return ((int)ST) & 0x0f;
+}
+inline int CPU_SUBTYPE_INTEL_MODEL(CPUSubTypeX86 ST) { return ((int)ST) >> 4; }
+enum { CPU_SUBTYPE_INTEL_FAMILY_MAX = 15, CPU_SUBTYPE_INTEL_MODEL_ALL = 0 };
+
+enum CPUSubTypeARM {
+  CPU_SUBTYPE_ARM_ALL = 0,
+  CPU_SUBTYPE_ARM_V4T = 5,
+  CPU_SUBTYPE_ARM_V6 = 6,
+  CPU_SUBTYPE_ARM_V5 = 7,
+  CPU_SUBTYPE_ARM_V5TEJ = 7,
+  CPU_SUBTYPE_ARM_XSCALE = 8,
+  CPU_SUBTYPE_ARM_V7 = 9,
+  //  unused  ARM_V7F     = 10,
+  CPU_SUBTYPE_ARM_V7S = 11,
+  CPU_SUBTYPE_ARM_V7K = 12,
+  CPU_SUBTYPE_ARM_V6M = 14,
+  CPU_SUBTYPE_ARM_V7M = 15,
+  CPU_SUBTYPE_ARM_V7EM = 16
+};
+
+enum CPUSubTypeARM64 { CPU_SUBTYPE_ARM64_ALL = 0 };
+
+enum CPUSubTypeSPARC { CPU_SUBTYPE_SPARC_ALL = 0 };
+
+enum CPUSubTypePowerPC {
+  CPU_SUBTYPE_POWERPC_ALL = 0,
+  CPU_SUBTYPE_POWERPC_601 = 1,
+  CPU_SUBTYPE_POWERPC_602 = 2,
+  CPU_SUBTYPE_POWERPC_603 = 3,
+  CPU_SUBTYPE_POWERPC_603e = 4,
+  CPU_SUBTYPE_POWERPC_603ev = 5,
+  CPU_SUBTYPE_POWERPC_604 = 6,
+  CPU_SUBTYPE_POWERPC_604e = 7,
+  CPU_SUBTYPE_POWERPC_620 = 8,
+  CPU_SUBTYPE_POWERPC_750 = 9,
+  CPU_SUBTYPE_POWERPC_7400 = 10,
+  CPU_SUBTYPE_POWERPC_7450 = 11,
+  CPU_SUBTYPE_POWERPC_970 = 100,
+
+  CPU_SUBTYPE_MC980000_ALL = CPU_SUBTYPE_POWERPC_ALL,
+  CPU_SUBTYPE_MC98601 = CPU_SUBTYPE_POWERPC_601
+};
+
+struct x86_thread_state32_t {
+  uint32_t eax;
+  uint32_t ebx;
+  uint32_t ecx;
+  uint32_t edx;
+  uint32_t edi;
+  uint32_t esi;
+  uint32_t ebp;
+  uint32_t esp;
+  uint32_t ss;
+  uint32_t eflags;
+  uint32_t eip;
+  uint32_t cs;
+  uint32_t ds;
+  uint32_t es;
+  uint32_t fs;
+  uint32_t gs;
+};
+
+struct x86_thread_state64_t {
+  uint64_t rax;
+  uint64_t rbx;
+  uint64_t rcx;
+  uint64_t rdx;
+  uint64_t rdi;
+  uint64_t rsi;
+  uint64_t rbp;
+  uint64_t rsp;
+  uint64_t r8;
+  uint64_t r9;
+  uint64_t r10;
+  uint64_t r11;
+  uint64_t r12;
+  uint64_t r13;
+  uint64_t r14;
+  uint64_t r15;
+  uint64_t rip;
+  uint64_t rflags;
+  uint64_t cs;
+  uint64_t fs;
+  uint64_t gs;
+};
+
+enum x86_fp_control_precis {
+  x86_FP_PREC_24B = 0,
+  x86_FP_PREC_53B = 2,
+  x86_FP_PREC_64B = 3
+};
+
+enum x86_fp_control_rc {
+  x86_FP_RND_NEAR = 0,
+  x86_FP_RND_DOWN = 1,
+  x86_FP_RND_UP = 2,
+  x86_FP_CHOP = 3
+};
+
+struct fp_control_t {
+  unsigned short invalid : 1, denorm : 1, zdiv : 1, ovrfl : 1, undfl : 1,
+      precis : 1, : 2, pc : 2, rc : 2, : 1, : 3;
+};
+
+struct fp_status_t {
+  unsigned short invalid : 1, denorm : 1, zdiv : 1, ovrfl : 1, undfl : 1,
+      precis : 1, stkflt : 1, errsumm : 1, c0 : 1, c1 : 1, c2 : 1, tos : 3,
+      c3 : 1, busy : 1;
+};
+
+struct mmst_reg_t {
+  char mmst_reg[10];
+  char mmst_rsrv[6];
+};
+
+struct xmm_reg_t {
+  char xmm_reg[16];
+};
+
+struct x86_float_state64_t {
+  int32_t fpu_reserved[2];
+  fp_control_t fpu_fcw;
+  fp_status_t fpu_fsw;
+  uint8_t fpu_ftw;
+  uint8_t fpu_rsrv1;
+  uint16_t fpu_fop;
+  uint32_t fpu_ip;
+  uint16_t fpu_cs;
+  uint16_t fpu_rsrv2;
+  uint32_t fpu_dp;
+  uint16_t fpu_ds;
+  uint16_t fpu_rsrv3;
+  uint32_t fpu_mxcsr;
+  uint32_t fpu_mxcsrmask;
+  mmst_reg_t fpu_stmm0;
+  mmst_reg_t fpu_stmm1;
+  mmst_reg_t fpu_stmm2;
+  mmst_reg_t fpu_stmm3;
+  mmst_reg_t fpu_stmm4;
+  mmst_reg_t fpu_stmm5;
+  mmst_reg_t fpu_stmm6;
+  mmst_reg_t fpu_stmm7;
+  xmm_reg_t fpu_xmm0;
+  xmm_reg_t fpu_xmm1;
+  xmm_reg_t fpu_xmm2;
+  xmm_reg_t fpu_xmm3;
+  xmm_reg_t fpu_xmm4;
+  xmm_reg_t fpu_xmm5;
+  xmm_reg_t fpu_xmm6;
+  xmm_reg_t fpu_xmm7;
+  xmm_reg_t fpu_xmm8;
+  xmm_reg_t fpu_xmm9;
+  xmm_reg_t fpu_xmm10;
+  xmm_reg_t fpu_xmm11;
+  xmm_reg_t fpu_xmm12;
+  xmm_reg_t fpu_xmm13;
+  xmm_reg_t fpu_xmm14;
+  xmm_reg_t fpu_xmm15;
+  char fpu_rsrv4[6 * 16];
+  uint32_t fpu_reserved1;
+};
+
+struct x86_exception_state64_t {
+  uint16_t trapno;
+  uint16_t cpu;
+  uint32_t err;
+  uint64_t faultvaddr;
+};
+
+inline void swapStruct(x86_thread_state32_t &x) {
+  sys::swapByteOrder(x.eax);
+  sys::swapByteOrder(x.ebx);
+  sys::swapByteOrder(x.ecx);
+  sys::swapByteOrder(x.edx);
+  sys::swapByteOrder(x.edi);
+  sys::swapByteOrder(x.esi);
+  sys::swapByteOrder(x.ebp);
+  sys::swapByteOrder(x.esp);
+  sys::swapByteOrder(x.ss);
+  sys::swapByteOrder(x.eflags);
+  sys::swapByteOrder(x.eip);
+  sys::swapByteOrder(x.cs);
+  sys::swapByteOrder(x.ds);
+  sys::swapByteOrder(x.es);
+  sys::swapByteOrder(x.fs);
+  sys::swapByteOrder(x.gs);
+}
+
+inline void swapStruct(x86_thread_state64_t &x) {
+  sys::swapByteOrder(x.rax);
+  sys::swapByteOrder(x.rbx);
+  sys::swapByteOrder(x.rcx);
+  sys::swapByteOrder(x.rdx);
+  sys::swapByteOrder(x.rdi);
+  sys::swapByteOrder(x.rsi);
+  sys::swapByteOrder(x.rbp);
+  sys::swapByteOrder(x.rsp);
+  sys::swapByteOrder(x.r8);
+  sys::swapByteOrder(x.r9);
+  sys::swapByteOrder(x.r10);
+  sys::swapByteOrder(x.r11);
+  sys::swapByteOrder(x.r12);
+  sys::swapByteOrder(x.r13);
+  sys::swapByteOrder(x.r14);
+  sys::swapByteOrder(x.r15);
+  sys::swapByteOrder(x.rip);
+  sys::swapByteOrder(x.rflags);
+  sys::swapByteOrder(x.cs);
+  sys::swapByteOrder(x.fs);
+  sys::swapByteOrder(x.gs);
+}
+
+inline void swapStruct(x86_float_state64_t &x) {
+  sys::swapByteOrder(x.fpu_reserved[0]);
+  sys::swapByteOrder(x.fpu_reserved[1]);
+  // TODO swap: fp_control_t fpu_fcw;
+  // TODO swap: fp_status_t fpu_fsw;
+  sys::swapByteOrder(x.fpu_fop);
+  sys::swapByteOrder(x.fpu_ip);
+  sys::swapByteOrder(x.fpu_cs);
+  sys::swapByteOrder(x.fpu_rsrv2);
+  sys::swapByteOrder(x.fpu_dp);
+  sys::swapByteOrder(x.fpu_ds);
+  sys::swapByteOrder(x.fpu_rsrv3);
+  sys::swapByteOrder(x.fpu_mxcsr);
+  sys::swapByteOrder(x.fpu_mxcsrmask);
+  sys::swapByteOrder(x.fpu_reserved1);
+}
+
+inline void swapStruct(x86_exception_state64_t &x) {
+  sys::swapByteOrder(x.trapno);
+  sys::swapByteOrder(x.cpu);
+  sys::swapByteOrder(x.err);
+  sys::swapByteOrder(x.faultvaddr);
+}
+
+struct x86_state_hdr_t {
+  uint32_t flavor;
+  uint32_t count;
+};
+
+struct x86_thread_state_t {
+  x86_state_hdr_t tsh;
+  union {
+    x86_thread_state64_t ts64;
+    x86_thread_state32_t ts32;
+  } uts;
+};
+
+struct x86_float_state_t {
+  x86_state_hdr_t fsh;
+  union {
+    x86_float_state64_t fs64;
+  } ufs;
+};
+
+struct x86_exception_state_t {
+  x86_state_hdr_t esh;
+  union {
+    x86_exception_state64_t es64;
+  } ues;
+};
+
+inline void swapStruct(x86_state_hdr_t &x) {
+  sys::swapByteOrder(x.flavor);
+  sys::swapByteOrder(x.count);
+}
+
+enum X86ThreadFlavors {
+  x86_THREAD_STATE32 = 1,
+  x86_FLOAT_STATE32 = 2,
+  x86_EXCEPTION_STATE32 = 3,
+  x86_THREAD_STATE64 = 4,
+  x86_FLOAT_STATE64 = 5,
+  x86_EXCEPTION_STATE64 = 6,
+  x86_THREAD_STATE = 7,
+  x86_FLOAT_STATE = 8,
+  x86_EXCEPTION_STATE = 9,
+  x86_DEBUG_STATE32 = 10,
+  x86_DEBUG_STATE64 = 11,
+  x86_DEBUG_STATE = 12
+};
+
+inline void swapStruct(x86_thread_state_t &x) {
+  swapStruct(x.tsh);
+  if (x.tsh.flavor == x86_THREAD_STATE64)
+    swapStruct(x.uts.ts64);
+}
+
+inline void swapStruct(x86_float_state_t &x) {
+  swapStruct(x.fsh);
+  if (x.fsh.flavor == x86_FLOAT_STATE64)
+    swapStruct(x.ufs.fs64);
+}
+
+inline void swapStruct(x86_exception_state_t &x) {
+  swapStruct(x.esh);
+  if (x.esh.flavor == x86_EXCEPTION_STATE64)
+    swapStruct(x.ues.es64);
+}
+
+const uint32_t x86_THREAD_STATE32_COUNT =
+    sizeof(x86_thread_state32_t) / sizeof(uint32_t);
+
+const uint32_t x86_THREAD_STATE64_COUNT =
+    sizeof(x86_thread_state64_t) / sizeof(uint32_t);
+const uint32_t x86_FLOAT_STATE64_COUNT =
+    sizeof(x86_float_state64_t) / sizeof(uint32_t);
+const uint32_t x86_EXCEPTION_STATE64_COUNT =
+    sizeof(x86_exception_state64_t) / sizeof(uint32_t);
+
+const uint32_t x86_THREAD_STATE_COUNT =
+    sizeof(x86_thread_state_t) / sizeof(uint32_t);
+const uint32_t x86_FLOAT_STATE_COUNT =
+    sizeof(x86_float_state_t) / sizeof(uint32_t);
+const uint32_t x86_EXCEPTION_STATE_COUNT =
+    sizeof(x86_exception_state_t) / sizeof(uint32_t);
+
+struct arm_thread_state32_t {
+  uint32_t r[13];
+  uint32_t sp;
+  uint32_t lr;
+  uint32_t pc;
+  uint32_t cpsr;
+};
+
+inline void swapStruct(arm_thread_state32_t &x) {
+  for (int i = 0; i < 13; i++)
+    sys::swapByteOrder(x.r[i]);
+  sys::swapByteOrder(x.sp);
+  sys::swapByteOrder(x.lr);
+  sys::swapByteOrder(x.pc);
+  sys::swapByteOrder(x.cpsr);
+}
+
+struct arm_thread_state64_t {
+  uint64_t x[29];
+  uint64_t fp;
+  uint64_t lr;
+  uint64_t sp;
+  uint64_t pc;
+  uint32_t cpsr;
+  uint32_t pad;
+};
+
+inline void swapStruct(arm_thread_state64_t &x) {
+  for (int i = 0; i < 29; i++)
+    sys::swapByteOrder(x.x[i]);
+  sys::swapByteOrder(x.fp);
+  sys::swapByteOrder(x.lr);
+  sys::swapByteOrder(x.sp);
+  sys::swapByteOrder(x.pc);
+  sys::swapByteOrder(x.cpsr);
+}
+
+struct arm_state_hdr_t {
+  uint32_t flavor;
+  uint32_t count;
+};
+
+struct arm_thread_state_t {
+  arm_state_hdr_t tsh;
+  union {
+    arm_thread_state32_t ts32;
+  } uts;
+};
+
+inline void swapStruct(arm_state_hdr_t &x) {
+  sys::swapByteOrder(x.flavor);
+  sys::swapByteOrder(x.count);
+}
+
+enum ARMThreadFlavors {
+  ARM_THREAD_STATE = 1,
+  ARM_VFP_STATE = 2,
+  ARM_EXCEPTION_STATE = 3,
+  ARM_DEBUG_STATE = 4,
+  ARN_THREAD_STATE_NONE = 5,
+  ARM_THREAD_STATE64 = 6,
+  ARM_EXCEPTION_STATE64 = 7
+};
+
+inline void swapStruct(arm_thread_state_t &x) {
+  swapStruct(x.tsh);
+  if (x.tsh.flavor == ARM_THREAD_STATE)
+    swapStruct(x.uts.ts32);
+}
+
+const uint32_t ARM_THREAD_STATE_COUNT =
+    sizeof(arm_thread_state32_t) / sizeof(uint32_t);
+
+const uint32_t ARM_THREAD_STATE64_COUNT =
+    sizeof(arm_thread_state64_t) / sizeof(uint32_t);
+
+struct ppc_thread_state32_t {
+  uint32_t srr0;
+  uint32_t srr1;
+  uint32_t r0;
+  uint32_t r1;
+  uint32_t r2;
+  uint32_t r3;
+  uint32_t r4;
+  uint32_t r5;
+  uint32_t r6;
+  uint32_t r7;
+  uint32_t r8;
+  uint32_t r9;
+  uint32_t r10;
+  uint32_t r11;
+  uint32_t r12;
+  uint32_t r13;
+  uint32_t r14;
+  uint32_t r15;
+  uint32_t r16;
+  uint32_t r17;
+  uint32_t r18;
+  uint32_t r19;
+  uint32_t r20;
+  uint32_t r21;
+  uint32_t r22;
+  uint32_t r23;
+  uint32_t r24;
+  uint32_t r25;
+  uint32_t r26;
+  uint32_t r27;
+  uint32_t r28;
+  uint32_t r29;
+  uint32_t r30;
+  uint32_t r31;
+  uint32_t ct;
+  uint32_t xer;
+  uint32_t lr;
+  uint32_t ctr;
+  uint32_t mq;
+  uint32_t vrsave;
+};
+
+inline void swapStruct(ppc_thread_state32_t &x) {
+  sys::swapByteOrder(x.srr0);
+  sys::swapByteOrder(x.srr1);
+  sys::swapByteOrder(x.r0);
+  sys::swapByteOrder(x.r1);
+  sys::swapByteOrder(x.r2);
+  sys::swapByteOrder(x.r3);
+  sys::swapByteOrder(x.r4);
+  sys::swapByteOrder(x.r5);
+  sys::swapByteOrder(x.r6);
+  sys::swapByteOrder(x.r7);
+  sys::swapByteOrder(x.r8);
+  sys::swapByteOrder(x.r9);
+  sys::swapByteOrder(x.r10);
+  sys::swapByteOrder(x.r11);
+  sys::swapByteOrder(x.r12);
+  sys::swapByteOrder(x.r13);
+  sys::swapByteOrder(x.r14);
+  sys::swapByteOrder(x.r15);
+  sys::swapByteOrder(x.r16);
+  sys::swapByteOrder(x.r17);
+  sys::swapByteOrder(x.r18);
+  sys::swapByteOrder(x.r19);
+  sys::swapByteOrder(x.r20);
+  sys::swapByteOrder(x.r21);
+  sys::swapByteOrder(x.r22);
+  sys::swapByteOrder(x.r23);
+  sys::swapByteOrder(x.r24);
+  sys::swapByteOrder(x.r25);
+  sys::swapByteOrder(x.r26);
+  sys::swapByteOrder(x.r27);
+  sys::swapByteOrder(x.r28);
+  sys::swapByteOrder(x.r29);
+  sys::swapByteOrder(x.r30);
+  sys::swapByteOrder(x.r31);
+  sys::swapByteOrder(x.ct);
+  sys::swapByteOrder(x.xer);
+  sys::swapByteOrder(x.lr);
+  sys::swapByteOrder(x.ctr);
+  sys::swapByteOrder(x.mq);
+  sys::swapByteOrder(x.vrsave);
+}
+
+struct ppc_state_hdr_t {
+  uint32_t flavor;
+  uint32_t count;
+};
+
+struct ppc_thread_state_t {
+  ppc_state_hdr_t tsh;
+  union {
+    ppc_thread_state32_t ts32;
+  } uts;
+};
+
+inline void swapStruct(ppc_state_hdr_t &x) {
+  sys::swapByteOrder(x.flavor);
+  sys::swapByteOrder(x.count);
+}
+
+enum PPCThreadFlavors {
+  PPC_THREAD_STATE = 1,
+  PPC_FLOAT_STATE = 2,
+  PPC_EXCEPTION_STATE = 3,
+  PPC_VECTOR_STATE = 4,
+  PPC_THREAD_STATE64 = 5,
+  PPC_EXCEPTION_STATE64 = 6,
+  PPC_THREAD_STATE_NONE = 7
+};
+
+inline void swapStruct(ppc_thread_state_t &x) {
+  swapStruct(x.tsh);
+  if (x.tsh.flavor == PPC_THREAD_STATE)
+    swapStruct(x.uts.ts32);
+}
+
+const uint32_t PPC_THREAD_STATE_COUNT =
+    sizeof(ppc_thread_state32_t) / sizeof(uint32_t);
+
+// Define a union of all load command structs
+#define LOAD_COMMAND_STRUCT(LCStruct) LCStruct LCStruct##_data;
+
+union macho_load_command {
+#include "llvm/BinaryFormat/MachO.def"
+};
+
+} // end namespace MachO
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/Magic.h b/linux-x64/clang/include/llvm/BinaryFormat/Magic.h
new file mode 100644
index 0000000..4ac826e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/Magic.h
@@ -0,0 +1,74 @@
+//===- llvm/BinaryFormat/Magic.h - File magic identification ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_MAGIC_H
+#define LLVM_BINARYFORMAT_MAGIC_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+
+#include <system_error>
+
+namespace llvm {
+/// file_magic - An "enum class" enumeration of file types based on magic (the
+/// first N bytes of the file).
+struct file_magic {
+  enum Impl {
+    unknown = 0,       ///< Unrecognized file
+    bitcode,           ///< Bitcode file
+    archive,           ///< ar style archive file
+    elf,               ///< ELF Unknown type
+    elf_relocatable,   ///< ELF Relocatable object file
+    elf_executable,    ///< ELF Executable image
+    elf_shared_object, ///< ELF dynamically linked shared lib
+    elf_core,          ///< ELF core image
+    macho_object,      ///< Mach-O Object file
+    macho_executable,  ///< Mach-O Executable
+    macho_fixed_virtual_memory_shared_lib,    ///< Mach-O Shared Lib, FVM
+    macho_core,                               ///< Mach-O Core File
+    macho_preload_executable,                 ///< Mach-O Preloaded Executable
+    macho_dynamically_linked_shared_lib,      ///< Mach-O dynlinked shared lib
+    macho_dynamic_linker,                     ///< The Mach-O dynamic linker
+    macho_bundle,                             ///< Mach-O Bundle file
+    macho_dynamically_linked_shared_lib_stub, ///< Mach-O Shared lib stub
+    macho_dsym_companion,                     ///< Mach-O dSYM companion file
+    macho_kext_bundle,                        ///< Mach-O kext bundle file
+    macho_universal_binary,                   ///< Mach-O universal binary
+    coff_cl_gl_object,   ///< Microsoft cl.exe's intermediate code file
+    coff_object,         ///< COFF object file
+    coff_import_library, ///< COFF import library
+    pecoff_executable,   ///< PECOFF executable file
+    windows_resource,    ///< Windows compiled resource file (.res)
+    wasm_object,         ///< WebAssembly Object file
+    pdb,                 ///< Windows PDB debug info file
+  };
+
+  bool is_object() const { return V != unknown; }
+
+  file_magic() = default;
+  file_magic(Impl V) : V(V) {}
+  operator Impl() const { return V; }
+
+private:
+  Impl V = unknown;
+};
+
+/// @brief Identify the type of a binary file based on how magical it is.
+file_magic identify_magic(StringRef magic);
+
+/// @brief Get and identify \a path's type based on its content.
+///
+/// @param path Input path.
+/// @param result Set to the type of file, or file_magic::unknown.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code identify_magic(const Twine &path, file_magic &result);
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/Wasm.h b/linux-x64/clang/include/llvm/BinaryFormat/Wasm.h
new file mode 100644
index 0000000..91b217f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/Wasm.h
@@ -0,0 +1,290 @@
+//===- Wasm.h - Wasm object file format -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines manifest constants for the wasm object file format.
+// See: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_WASM_H
+#define LLVM_BINARYFORMAT_WASM_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+namespace wasm {
+
+// Object file magic string.
+const char WasmMagic[] = {'\0', 'a', 's', 'm'};
+// Wasm binary format version
+const uint32_t WasmVersion = 0x1;
+// Wasm uses a 64k page size
+const uint32_t WasmPageSize = 65536;
+
+struct WasmObjectHeader {
+  StringRef Magic;
+  uint32_t Version;
+};
+
+struct WasmSignature {
+  std::vector<uint8_t> ParamTypes;
+  uint8_t ReturnType;
+};
+
+struct WasmExport {
+  StringRef Name;
+  uint8_t Kind;
+  uint32_t Index;
+};
+
+struct WasmLimits {
+  uint8_t Flags;
+  uint32_t Initial;
+  uint32_t Maximum;
+};
+
+struct WasmTable {
+  uint8_t ElemType;
+  WasmLimits Limits;
+};
+
+struct WasmInitExpr {
+  uint8_t Opcode;
+  union {
+    int32_t Int32;
+    int64_t Int64;
+    int32_t Float32;
+    int64_t Float64;
+    uint32_t Global;
+  } Value;
+};
+
+struct WasmGlobalType {
+  uint8_t Type;
+  bool Mutable;
+};
+
+struct WasmGlobal {
+  uint32_t Index;
+  WasmGlobalType Type;
+  WasmInitExpr InitExpr;
+  StringRef Name; // from the "linking" or "names" section
+};
+
+struct WasmImport {
+  StringRef Module;
+  StringRef Field;
+  uint8_t Kind;
+  union {
+    uint32_t SigIndex;
+    WasmGlobalType Global;
+    WasmTable Table;
+    WasmLimits Memory;
+  };
+};
+
+struct WasmLocalDecl {
+  uint8_t Type;
+  uint32_t Count;
+};
+
+struct WasmFunction {
+  uint32_t Index;
+  std::vector<WasmLocalDecl> Locals;
+  ArrayRef<uint8_t> Body;
+  uint32_t CodeSectionOffset;
+  uint32_t Size;
+  StringRef Name; // from the "linking" or "names" section
+  uint32_t Comdat; // from the "comdat info" section
+};
+
+struct WasmDataSegment {
+  uint32_t MemoryIndex;
+  WasmInitExpr Offset;
+  ArrayRef<uint8_t> Content;
+  StringRef Name;
+  uint32_t Alignment;
+  uint32_t Flags;
+  uint32_t Comdat; // from the "comdat info" section
+};
+
+struct WasmElemSegment {
+  uint32_t TableIndex;
+  WasmInitExpr Offset;
+  std::vector<uint32_t> Functions;
+};
+
+// Represents the location of a Wasm data symbol within a WasmDataSegment, as
+// the index of the segment, and the offset and size within the segment.
+struct WasmDataReference {
+  uint32_t Segment;
+  uint32_t Offset;
+  uint32_t Size;
+};
+
+struct WasmRelocation {
+  uint8_t Type;    // The type of the relocation.
+  uint32_t Index;  // Index into either symbol or type index space.
+  uint64_t Offset; // Offset from the start of the section.
+  int64_t Addend;  // A value to add to the symbol.
+};
+
+struct WasmInitFunc {
+  uint32_t Priority;
+  uint32_t Symbol;
+};
+
+struct WasmSymbolInfo {
+  StringRef Name;
+  uint8_t Kind;
+  uint32_t Flags;
+  union {
+    // For function or global symbols, the index in function of global index
+    // space.
+    uint32_t ElementIndex;
+    // For a data symbols, the address of the data relative to segment.
+    WasmDataReference DataRef;
+  };
+};
+
+struct WasmFunctionName {
+  uint32_t Index;
+  StringRef Name;
+};
+
+struct WasmLinkingData {
+  std::vector<WasmInitFunc> InitFunctions;
+  std::vector<StringRef> Comdats;
+  std::vector<WasmSymbolInfo> SymbolTable;
+};
+
+enum : unsigned {
+  WASM_SEC_CUSTOM = 0,   // Custom / User-defined section
+  WASM_SEC_TYPE = 1,     // Function signature declarations
+  WASM_SEC_IMPORT = 2,   // Import declarations
+  WASM_SEC_FUNCTION = 3, // Function declarations
+  WASM_SEC_TABLE = 4,    // Indirect function table and other tables
+  WASM_SEC_MEMORY = 5,   // Memory attributes
+  WASM_SEC_GLOBAL = 6,   // Global declarations
+  WASM_SEC_EXPORT = 7,   // Exports
+  WASM_SEC_START = 8,    // Start function declaration
+  WASM_SEC_ELEM = 9,     // Elements section
+  WASM_SEC_CODE = 10,    // Function bodies (code)
+  WASM_SEC_DATA = 11     // Data segments
+};
+
+// Type immediate encodings used in various contexts.
+enum : unsigned {
+  WASM_TYPE_I32 = 0x7F,
+  WASM_TYPE_I64 = 0x7E,
+  WASM_TYPE_F32 = 0x7D,
+  WASM_TYPE_F64 = 0x7C,
+  WASM_TYPE_ANYFUNC = 0x70,
+  WASM_TYPE_EXCEPT_REF = 0x68,
+  WASM_TYPE_FUNC = 0x60,
+  WASM_TYPE_NORESULT = 0x40, // for blocks with no result values
+};
+
+// Kinds of externals (for imports and exports).
+enum : unsigned {
+  WASM_EXTERNAL_FUNCTION = 0x0,
+  WASM_EXTERNAL_TABLE = 0x1,
+  WASM_EXTERNAL_MEMORY = 0x2,
+  WASM_EXTERNAL_GLOBAL = 0x3,
+};
+
+// Opcodes used in initializer expressions.
+enum : unsigned {
+  WASM_OPCODE_END = 0x0b,
+  WASM_OPCODE_GET_GLOBAL = 0x23,
+  WASM_OPCODE_I32_CONST = 0x41,
+  WASM_OPCODE_I64_CONST = 0x42,
+  WASM_OPCODE_F32_CONST = 0x43,
+  WASM_OPCODE_F64_CONST = 0x44,
+};
+
+enum : unsigned {
+  WASM_LIMITS_FLAG_HAS_MAX = 0x1,
+};
+
+// Subset of types that a value can have
+enum class ValType {
+  I32 = WASM_TYPE_I32,
+  I64 = WASM_TYPE_I64,
+  F32 = WASM_TYPE_F32,
+  F64 = WASM_TYPE_F64,
+  EXCEPT_REF = WASM_TYPE_EXCEPT_REF,
+};
+
+// Kind codes used in the custom "name" section
+enum : unsigned {
+  WASM_NAMES_FUNCTION = 0x1,
+  WASM_NAMES_LOCAL    = 0x2,
+};
+
+// Kind codes used in the custom "linking" section
+enum : unsigned {
+  WASM_SEGMENT_INFO   = 0x5,
+  WASM_INIT_FUNCS     = 0x6,
+  WASM_COMDAT_INFO    = 0x7,
+  WASM_SYMBOL_TABLE   = 0x8,
+};
+
+// Kind codes used in the custom "linking" section in the WASM_COMDAT_INFO
+enum : unsigned {
+  WASM_COMDAT_DATA        = 0x0,
+  WASM_COMDAT_FUNCTION    = 0x1,
+};
+
+// Kind codes used in the custom "linking" section in the WASM_SYMBOL_TABLE
+enum WasmSymbolType : unsigned {
+  WASM_SYMBOL_TYPE_FUNCTION = 0x0,
+  WASM_SYMBOL_TYPE_DATA     = 0x1,
+  WASM_SYMBOL_TYPE_GLOBAL   = 0x2,
+};
+
+const unsigned WASM_SYMBOL_BINDING_MASK       = 0x3;
+const unsigned WASM_SYMBOL_VISIBILITY_MASK    = 0xc;
+
+const unsigned WASM_SYMBOL_BINDING_GLOBAL     = 0x0;
+const unsigned WASM_SYMBOL_BINDING_WEAK       = 0x1;
+const unsigned WASM_SYMBOL_BINDING_LOCAL      = 0x2;
+const unsigned WASM_SYMBOL_VISIBILITY_DEFAULT = 0x0;
+const unsigned WASM_SYMBOL_VISIBILITY_HIDDEN  = 0x4;
+const unsigned WASM_SYMBOL_UNDEFINED          = 0x10;
+
+#define WASM_RELOC(name, value) name = value,
+
+enum : unsigned {
+#include "WasmRelocs.def"
+};
+
+#undef WASM_RELOC
+
+// Useful comparison operators
+inline bool operator==(const WasmSignature &LHS, const WasmSignature &RHS) {
+  return LHS.ReturnType == RHS.ReturnType && LHS.ParamTypes == RHS.ParamTypes;
+}
+
+inline bool operator!=(const WasmSignature &LHS, const WasmSignature &RHS) {
+  return !(LHS == RHS);
+}
+
+inline bool operator==(const WasmGlobalType &LHS, const WasmGlobalType &RHS) {
+  return LHS.Type == RHS.Type && LHS.Mutable == RHS.Mutable;
+}
+
+inline bool operator!=(const WasmGlobalType &LHS, const WasmGlobalType &RHS) {
+  return !(LHS == RHS);
+}
+
+} // end namespace wasm
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/BinaryFormat/WasmRelocs.def b/linux-x64/clang/include/llvm/BinaryFormat/WasmRelocs.def
new file mode 100644
index 0000000..d6f0e42
--- /dev/null
+++ b/linux-x64/clang/include/llvm/BinaryFormat/WasmRelocs.def
@@ -0,0 +1,13 @@
+
+#ifndef WASM_RELOC
+#error "WASM_RELOC must be defined"
+#endif
+
+WASM_RELOC(R_WEBASSEMBLY_FUNCTION_INDEX_LEB,   0)
+WASM_RELOC(R_WEBASSEMBLY_TABLE_INDEX_SLEB,     1)
+WASM_RELOC(R_WEBASSEMBLY_TABLE_INDEX_I32,      2)
+WASM_RELOC(R_WEBASSEMBLY_MEMORY_ADDR_LEB,      3)
+WASM_RELOC(R_WEBASSEMBLY_MEMORY_ADDR_SLEB,     4)
+WASM_RELOC(R_WEBASSEMBLY_MEMORY_ADDR_I32,      5)
+WASM_RELOC(R_WEBASSEMBLY_TYPE_INDEX_LEB,       6)
+WASM_RELOC(R_WEBASSEMBLY_GLOBAL_INDEX_LEB,     7)
diff --git a/linux-x64/clang/include/llvm/Bitcode/BitCodes.h b/linux-x64/clang/include/llvm/Bitcode/BitCodes.h
new file mode 100644
index 0000000..bf21e14
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/BitCodes.h
@@ -0,0 +1,185 @@
+//===- BitCodes.h - Enum values for the bitcode format ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header Bitcode enum values.
+//
+// The enum values defined in this file should be considered permanent.  If
+// new features are added, they should have values added at the end of the
+// respective lists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODES_H
+#define LLVM_BITCODE_BITCODES_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace llvm {
+/// Offsets of the 32-bit fields of bitcode wrapper header.
+static const unsigned BWH_MagicField = 0 * 4;
+static const unsigned BWH_VersionField = 1 * 4;
+static const unsigned BWH_OffsetField = 2 * 4;
+static const unsigned BWH_SizeField = 3 * 4;
+static const unsigned BWH_CPUTypeField = 4 * 4;
+static const unsigned BWH_HeaderSize = 5 * 4;
+
+namespace bitc {
+  enum StandardWidths {
+    BlockIDWidth   = 8,  // We use VBR-8 for block IDs.
+    CodeLenWidth   = 4,  // Codelen are VBR-4.
+    BlockSizeWidth = 32  // BlockSize up to 2^32 32-bit words = 16GB per block.
+  };
+
+  // The standard abbrev namespace always has a way to exit a block, enter a
+  // nested block, define abbrevs, and define an unabbreviated record.
+  enum FixedAbbrevIDs {
+    END_BLOCK = 0,  // Must be zero to guarantee termination for broken bitcode.
+    ENTER_SUBBLOCK = 1,
+
+    /// DEFINE_ABBREV - Defines an abbrev for the current block.  It consists
+    /// of a vbr5 for # operand infos.  Each operand info is emitted with a
+    /// single bit to indicate if it is a literal encoding.  If so, the value is
+    /// emitted with a vbr8.  If not, the encoding is emitted as 3 bits followed
+    /// by the info value as a vbr5 if needed.
+    DEFINE_ABBREV = 2,
+
+    // UNABBREV_RECORDs are emitted with a vbr6 for the record code, followed by
+    // a vbr6 for the # operands, followed by vbr6's for each operand.
+    UNABBREV_RECORD = 3,
+
+    // This is not a code, this is a marker for the first abbrev assignment.
+    FIRST_APPLICATION_ABBREV = 4
+  };
+
+  /// StandardBlockIDs - All bitcode files can optionally include a BLOCKINFO
+  /// block, which contains metadata about other blocks in the file.
+  enum StandardBlockIDs {
+    /// BLOCKINFO_BLOCK is used to define metadata about blocks, for example,
+    /// standard abbrevs that should be available to all blocks of a specified
+    /// ID.
+    BLOCKINFO_BLOCK_ID = 0,
+
+    // Block IDs 1-7 are reserved for future expansion.
+    FIRST_APPLICATION_BLOCKID = 8
+  };
+
+  /// BlockInfoCodes - The blockinfo block contains metadata about user-defined
+  /// blocks.
+  enum BlockInfoCodes {
+    // DEFINE_ABBREV has magic semantics here, applying to the current SETBID'd
+    // block, instead of the BlockInfo block.
+
+    BLOCKINFO_CODE_SETBID        = 1, // SETBID: [blockid#]
+    BLOCKINFO_CODE_BLOCKNAME     = 2, // BLOCKNAME: [name]
+    BLOCKINFO_CODE_SETRECORDNAME = 3  // BLOCKINFO_CODE_SETRECORDNAME:
+                                      //                             [id, name]
+  };
+
+} // End bitc namespace
+
+/// BitCodeAbbrevOp - This describes one or more operands in an abbreviation.
+/// This is actually a union of two different things:
+///   1. It could be a literal integer value ("the operand is always 17").
+///   2. It could be an encoding specification ("this operand encoded like so").
+///
+class BitCodeAbbrevOp {
+  uint64_t Val;           // A literal value or data for an encoding.
+  bool IsLiteral : 1;     // Indicate whether this is a literal value or not.
+  unsigned Enc   : 3;     // The encoding to use.
+public:
+  enum Encoding {
+    Fixed = 1,  // A fixed width field, Val specifies number of bits.
+    VBR   = 2,  // A VBR field where Val specifies the width of each chunk.
+    Array = 3,  // A sequence of fields, next field species elt encoding.
+    Char6 = 4,  // A 6-bit fixed field which maps to [a-zA-Z0-9._].
+    Blob  = 5   // 32-bit aligned array of 8-bit characters.
+  };
+
+  explicit BitCodeAbbrevOp(uint64_t V) :  Val(V), IsLiteral(true) {}
+  explicit BitCodeAbbrevOp(Encoding E, uint64_t Data = 0)
+    : Val(Data), IsLiteral(false), Enc(E) {}
+
+  bool isLiteral() const  { return IsLiteral; }
+  bool isEncoding() const { return !IsLiteral; }
+
+  // Accessors for literals.
+  uint64_t getLiteralValue() const { assert(isLiteral()); return Val; }
+
+  // Accessors for encoding info.
+  Encoding getEncoding() const { assert(isEncoding()); return (Encoding)Enc; }
+  uint64_t getEncodingData() const {
+    assert(isEncoding() && hasEncodingData());
+    return Val;
+  }
+
+  bool hasEncodingData() const { return hasEncodingData(getEncoding()); }
+  static bool hasEncodingData(Encoding E) {
+    switch (E) {
+    case Fixed:
+    case VBR:
+      return true;
+    case Array:
+    case Char6:
+    case Blob:
+      return false;
+    }
+    report_fatal_error("Invalid encoding");
+  }
+
+  /// isChar6 - Return true if this character is legal in the Char6 encoding.
+  static bool isChar6(char C) {
+    if (C >= 'a' && C <= 'z') return true;
+    if (C >= 'A' && C <= 'Z') return true;
+    if (C >= '0' && C <= '9') return true;
+    if (C == '.' || C == '_') return true;
+    return false;
+  }
+  static unsigned EncodeChar6(char C) {
+    if (C >= 'a' && C <= 'z') return C-'a';
+    if (C >= 'A' && C <= 'Z') return C-'A'+26;
+    if (C >= '0' && C <= '9') return C-'0'+26+26;
+    if (C == '.')             return 62;
+    if (C == '_')             return 63;
+    llvm_unreachable("Not a value Char6 character!");
+  }
+
+  static char DecodeChar6(unsigned V) {
+    assert((V & ~63) == 0 && "Not a Char6 encoded character!");
+    return "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._"
+        [V];
+  }
+
+};
+
+template <> struct isPodLike<BitCodeAbbrevOp> { static const bool value=true; };
+
+/// BitCodeAbbrev - This class represents an abbreviation record.  An
+/// abbreviation allows a complex record that has redundancy to be stored in a
+/// specialized format instead of the fully-general, fully-vbr, format.
+class BitCodeAbbrev {
+  SmallVector<BitCodeAbbrevOp, 32> OperandList;
+
+public:
+  unsigned getNumOperandInfos() const {
+    return static_cast<unsigned>(OperandList.size());
+  }
+  const BitCodeAbbrevOp &getOperandInfo(unsigned N) const {
+    return OperandList[N];
+  }
+
+  void Add(const BitCodeAbbrevOp &OpInfo) {
+    OperandList.push_back(OpInfo);
+  }
+};
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Bitcode/BitcodeReader.h b/linux-x64/clang/include/llvm/Bitcode/BitcodeReader.h
new file mode 100644
index 0000000..ce8bdd9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/BitcodeReader.h
@@ -0,0 +1,272 @@
+//===- llvm/Bitcode/BitcodeReader.h - Bitcode reader ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines interfaces to read LLVM bitcode files/streams.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODEREADER_H
+#define LLVM_BITCODE_BITCODEREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <vector>
+namespace llvm {
+
+class LLVMContext;
+class Module;
+
+  // These functions are for converting Expected/Error values to
+  // ErrorOr/std::error_code for compatibility with legacy clients. FIXME:
+  // Remove these functions once no longer needed by the C and libLTO APIs.
+
+  std::error_code errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, Error Err);
+
+  template <typename T>
+  ErrorOr<T> expectedToErrorOrAndEmitErrors(LLVMContext &Ctx, Expected<T> Val) {
+    if (!Val)
+      return errorToErrorCodeAndEmitErrors(Ctx, Val.takeError());
+    return std::move(*Val);
+  }
+
+  struct BitcodeFileContents;
+
+  /// Basic information extracted from a bitcode module to be used for LTO.
+  struct BitcodeLTOInfo {
+    bool IsThinLTO;
+    bool HasSummary;
+  };
+
+  /// Represents a module in a bitcode file.
+  class BitcodeModule {
+    // This covers the identification (if present) and module blocks.
+    ArrayRef<uint8_t> Buffer;
+    StringRef ModuleIdentifier;
+
+    // The string table used to interpret this module.
+    StringRef Strtab;
+
+    // The bitstream location of the IDENTIFICATION_BLOCK.
+    uint64_t IdentificationBit;
+
+    // The bitstream location of this module's MODULE_BLOCK.
+    uint64_t ModuleBit;
+
+    BitcodeModule(ArrayRef<uint8_t> Buffer, StringRef ModuleIdentifier,
+                  uint64_t IdentificationBit, uint64_t ModuleBit)
+        : Buffer(Buffer), ModuleIdentifier(ModuleIdentifier),
+          IdentificationBit(IdentificationBit), ModuleBit(ModuleBit) {}
+
+    // Calls the ctor.
+    friend Expected<BitcodeFileContents>
+    getBitcodeFileContents(MemoryBufferRef Buffer);
+
+    Expected<std::unique_ptr<Module>> getModuleImpl(LLVMContext &Context,
+                                                    bool MaterializeAll,
+                                                    bool ShouldLazyLoadMetadata,
+                                                    bool IsImporting);
+
+  public:
+    StringRef getBuffer() const {
+      return StringRef((const char *)Buffer.begin(), Buffer.size());
+    }
+
+    StringRef getStrtab() const { return Strtab; }
+
+    StringRef getModuleIdentifier() const { return ModuleIdentifier; }
+
+    /// Read the bitcode module and prepare for lazy deserialization of function
+    /// bodies. If ShouldLazyLoadMetadata is true, lazily load metadata as well.
+    /// If IsImporting is true, this module is being parsed for ThinLTO
+    /// importing into another module.
+    Expected<std::unique_ptr<Module>> getLazyModule(LLVMContext &Context,
+                                                    bool ShouldLazyLoadMetadata,
+                                                    bool IsImporting);
+
+    /// Read the entire bitcode module and return it.
+    Expected<std::unique_ptr<Module>> parseModule(LLVMContext &Context);
+
+    /// Returns information about the module to be used for LTO: whether to
+    /// compile with ThinLTO, and whether it has a summary.
+    Expected<BitcodeLTOInfo> getLTOInfo();
+
+    /// Parse the specified bitcode buffer, returning the module summary index.
+    Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary();
+
+    /// Parse the specified bitcode buffer and merge its module summary index
+    /// into CombinedIndex.
+    Error readSummary(ModuleSummaryIndex &CombinedIndex, StringRef ModulePath,
+                      uint64_t ModuleId);
+  };
+
+  struct BitcodeFileContents {
+    std::vector<BitcodeModule> Mods;
+    StringRef Symtab, StrtabForSymtab;
+  };
+
+  /// Returns the contents of a bitcode file. This includes the raw contents of
+  /// the symbol table embedded in the bitcode file. Clients which require a
+  /// symbol table should prefer to use irsymtab::read instead of this function
+  /// because it creates a reader for the irsymtab and handles upgrading bitcode
+  /// files without a symbol table or with an old symbol table.
+  Expected<BitcodeFileContents> getBitcodeFileContents(MemoryBufferRef Buffer);
+
+  /// Returns a list of modules in the specified bitcode buffer.
+  Expected<std::vector<BitcodeModule>>
+  getBitcodeModuleList(MemoryBufferRef Buffer);
+
+  /// Read the header of the specified bitcode buffer and prepare for lazy
+  /// deserialization of function bodies. If ShouldLazyLoadMetadata is true,
+  /// lazily load metadata as well. If IsImporting is true, this module is
+  /// being parsed for ThinLTO importing into another module.
+  Expected<std::unique_ptr<Module>>
+  getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context,
+                       bool ShouldLazyLoadMetadata = false,
+                       bool IsImporting = false);
+
+  /// Like getLazyBitcodeModule, except that the module takes ownership of
+  /// the memory buffer if successful. If successful, this moves Buffer. On
+  /// error, this *does not* move Buffer. If IsImporting is true, this module is
+  /// being parsed for ThinLTO importing into another module.
+  Expected<std::unique_ptr<Module>> getOwningLazyBitcodeModule(
+      std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context,
+      bool ShouldLazyLoadMetadata = false, bool IsImporting = false);
+
+  /// Read the header of the specified bitcode buffer and extract just the
+  /// triple information. If successful, this returns a string. On error, this
+  /// returns "".
+  Expected<std::string> getBitcodeTargetTriple(MemoryBufferRef Buffer);
+
+  /// Return true if \p Buffer contains a bitcode file with ObjC code (category
+  /// or class) in it.
+  Expected<bool> isBitcodeContainingObjCCategory(MemoryBufferRef Buffer);
+
+  /// Read the header of the specified bitcode buffer and extract just the
+  /// producer string information. If successful, this returns a string. On
+  /// error, this returns "".
+  Expected<std::string> getBitcodeProducerString(MemoryBufferRef Buffer);
+
+  /// Read the specified bitcode file, returning the module.
+  Expected<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
+                                                     LLVMContext &Context);
+
+  /// Returns LTO information for the specified bitcode file.
+  Expected<BitcodeLTOInfo> getBitcodeLTOInfo(MemoryBufferRef Buffer);
+
+  /// Parse the specified bitcode buffer, returning the module summary index.
+  Expected<std::unique_ptr<ModuleSummaryIndex>>
+  getModuleSummaryIndex(MemoryBufferRef Buffer);
+
+  /// Parse the specified bitcode buffer and merge the index into CombinedIndex.
+  Error readModuleSummaryIndex(MemoryBufferRef Buffer,
+                               ModuleSummaryIndex &CombinedIndex,
+                               uint64_t ModuleId);
+
+  /// Parse the module summary index out of an IR file and return the module
+  /// summary index object if found, or an empty summary if not. If Path refers
+  /// to an empty file and IgnoreEmptyThinLTOIndexFile is true, then
+  /// this function will return nullptr.
+  Expected<std::unique_ptr<ModuleSummaryIndex>>
+  getModuleSummaryIndexForFile(StringRef Path,
+                               bool IgnoreEmptyThinLTOIndexFile = false);
+
+  /// isBitcodeWrapper - Return true if the given bytes are the magic bytes
+  /// for an LLVM IR bitcode wrapper.
+  inline bool isBitcodeWrapper(const unsigned char *BufPtr,
+                               const unsigned char *BufEnd) {
+    // See if you can find the hidden message in the magic bytes :-).
+    // (Hint: it's a little-endian encoding.)
+    return BufPtr != BufEnd &&
+           BufPtr[0] == 0xDE &&
+           BufPtr[1] == 0xC0 &&
+           BufPtr[2] == 0x17 &&
+           BufPtr[3] == 0x0B;
+  }
+
+  /// isRawBitcode - Return true if the given bytes are the magic bytes for
+  /// raw LLVM IR bitcode (without a wrapper).
+  inline bool isRawBitcode(const unsigned char *BufPtr,
+                           const unsigned char *BufEnd) {
+    // These bytes sort of have a hidden message, but it's not in
+    // little-endian this time, and it's a little redundant.
+    return BufPtr != BufEnd &&
+           BufPtr[0] == 'B' &&
+           BufPtr[1] == 'C' &&
+           BufPtr[2] == 0xc0 &&
+           BufPtr[3] == 0xde;
+  }
+
+  /// isBitcode - Return true if the given bytes are the magic bytes for
+  /// LLVM IR bitcode, either with or without a wrapper.
+  inline bool isBitcode(const unsigned char *BufPtr,
+                        const unsigned char *BufEnd) {
+    return isBitcodeWrapper(BufPtr, BufEnd) ||
+           isRawBitcode(BufPtr, BufEnd);
+  }
+
+  /// SkipBitcodeWrapperHeader - Some systems wrap bc files with a special
+  /// header for padding or other reasons.  The format of this header is:
+  ///
+  /// struct bc_header {
+  ///   uint32_t Magic;         // 0x0B17C0DE
+  ///   uint32_t Version;       // Version, currently always 0.
+  ///   uint32_t BitcodeOffset; // Offset to traditional bitcode file.
+  ///   uint32_t BitcodeSize;   // Size of traditional bitcode file.
+  ///   ... potentially other gunk ...
+  /// };
+  ///
+  /// This function is called when we find a file with a matching magic number.
+  /// In this case, skip down to the subsection of the file that is actually a
+  /// BC file.
+  /// If 'VerifyBufferSize' is true, check that the buffer is large enough to
+  /// contain the whole bitcode file.
+  inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
+                                       const unsigned char *&BufEnd,
+                                       bool VerifyBufferSize) {
+    // Must contain the offset and size field!
+    if (unsigned(BufEnd - BufPtr) < BWH_SizeField + 4)
+      return true;
+
+    unsigned Offset = support::endian::read32le(&BufPtr[BWH_OffsetField]);
+    unsigned Size = support::endian::read32le(&BufPtr[BWH_SizeField]);
+    uint64_t BitcodeOffsetEnd = (uint64_t)Offset + (uint64_t)Size;
+
+    // Verify that Offset+Size fits in the file.
+    if (VerifyBufferSize && BitcodeOffsetEnd > uint64_t(BufEnd-BufPtr))
+      return true;
+    BufPtr += Offset;
+    BufEnd = BufPtr+Size;
+    return false;
+  }
+
+  const std::error_category &BitcodeErrorCategory();
+  enum class BitcodeError { CorruptedBitcode = 1 };
+  inline std::error_code make_error_code(BitcodeError E) {
+    return std::error_code(static_cast<int>(E), BitcodeErrorCategory());
+  }
+
+} // end namespace llvm
+
+namespace std {
+
+template <> struct is_error_code_enum<llvm::BitcodeError> : std::true_type {};
+
+} // end namespace std
+
+#endif // LLVM_BITCODE_BITCODEREADER_H
diff --git a/linux-x64/clang/include/llvm/Bitcode/BitcodeWriter.h b/linux-x64/clang/include/llvm/Bitcode/BitcodeWriter.h
new file mode 100644
index 0000000..fa32295
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/BitcodeWriter.h
@@ -0,0 +1,157 @@
+//===- llvm/Bitcode/BitcodeWriter.h - Bitcode writers -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines interfaces to write LLVM bitcode files/streams.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODEWRITER_H
+#define LLVM_BITCODE_BITCODEWRITER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Support/Allocator.h"
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class BitstreamWriter;
+class Module;
+class raw_ostream;
+
+  class BitcodeWriter {
+    SmallVectorImpl<char> &Buffer;
+    std::unique_ptr<BitstreamWriter> Stream;
+
+    StringTableBuilder StrtabBuilder{StringTableBuilder::RAW};
+
+    // Owns any strings created by the irsymtab writer until we create the
+    // string table.
+    BumpPtrAllocator Alloc;
+
+    bool WroteStrtab = false, WroteSymtab = false;
+
+    void writeBlob(unsigned Block, unsigned Record, StringRef Blob);
+
+    std::vector<Module *> Mods;
+
+  public:
+    /// Create a BitcodeWriter that writes to Buffer.
+    BitcodeWriter(SmallVectorImpl<char> &Buffer);
+
+    ~BitcodeWriter();
+
+    /// Attempt to write a symbol table to the bitcode file. This must be called
+    /// at most once after all modules have been written.
+    ///
+    /// A reader does not require a symbol table to interpret a bitcode file;
+    /// the symbol table is needed only to improve link-time performance. So
+    /// this function may decide not to write a symbol table. It may so decide
+    /// if, for example, the target is unregistered or the IR is malformed.
+    void writeSymtab();
+
+    /// Write the bitcode file's string table. This must be called exactly once
+    /// after all modules and the optional symbol table have been written.
+    void writeStrtab();
+
+    /// Copy the string table for another module into this bitcode file. This
+    /// should be called after copying the module itself into the bitcode file.
+    void copyStrtab(StringRef Strtab);
+
+    /// Write the specified module to the buffer specified at construction time.
+    ///
+    /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
+    /// Value in \c M.  These will be reconstructed exactly when \a M is
+    /// deserialized.
+    ///
+    /// If \c Index is supplied, the bitcode will contain the summary index
+    /// (currently for use in ThinLTO optimization).
+    ///
+    /// \p GenerateHash enables hashing the Module and including the hash in the
+    /// bitcode (currently for use in ThinLTO incremental build).
+    ///
+    /// If \p ModHash is non-null, when GenerateHash is true, the resulting
+    /// hash is written into ModHash. When GenerateHash is false, that value
+    /// is used as the hash instead of computing from the generated bitcode.
+    /// Can be used to produce the same module hash for a minimized bitcode
+    /// used just for the thin link as in the regular full bitcode that will
+    /// be used in the backend.
+    void writeModule(const Module &M, bool ShouldPreserveUseListOrder = false,
+                     const ModuleSummaryIndex *Index = nullptr,
+                     bool GenerateHash = false, ModuleHash *ModHash = nullptr);
+
+    /// Write the specified thin link bitcode file (i.e., the minimized bitcode
+    /// file) to the buffer specified at construction time. The thin link
+    /// bitcode file is used for thin link, and it only contains the necessary
+    /// information for thin link.
+    ///
+    /// ModHash is for use in ThinLTO incremental build, generated while the
+    /// IR bitcode file writing.
+    void writeThinLinkBitcode(const Module &M, const ModuleSummaryIndex &Index,
+                              const ModuleHash &ModHash);
+
+    void writeIndex(
+        const ModuleSummaryIndex *Index,
+        const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex);
+  };
+
+  /// \brief Write the specified module to the specified raw output stream.
+  ///
+  /// For streams where it matters, the given stream should be in "binary"
+  /// mode.
+  ///
+  /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
+  /// Value in \c M.  These will be reconstructed exactly when \a M is
+  /// deserialized.
+  ///
+  /// If \c Index is supplied, the bitcode will contain the summary index
+  /// (currently for use in ThinLTO optimization).
+  ///
+  /// \p GenerateHash enables hashing the Module and including the hash in the
+  /// bitcode (currently for use in ThinLTO incremental build).
+  ///
+  /// If \p ModHash is non-null, when GenerateHash is true, the resulting
+  /// hash is written into ModHash. When GenerateHash is false, that value
+  /// is used as the hash instead of computing from the generated bitcode.
+  /// Can be used to produce the same module hash for a minimized bitcode
+  /// used just for the thin link as in the regular full bitcode that will
+  /// be used in the backend.
+  void WriteBitcodeToFile(const Module &M, raw_ostream &Out,
+                          bool ShouldPreserveUseListOrder = false,
+                          const ModuleSummaryIndex *Index = nullptr,
+                          bool GenerateHash = false,
+                          ModuleHash *ModHash = nullptr);
+
+  /// Write the specified thin link bitcode file (i.e., the minimized bitcode
+  /// file) to the given raw output stream, where it will be written in a new
+  /// bitcode block. The thin link bitcode file is used for thin link, and it
+  /// only contains the necessary information for thin link.
+  ///
+  /// ModHash is for use in ThinLTO incremental build, generated while the IR
+  /// bitcode file writing.
+  void WriteThinLinkBitcodeToFile(const Module &M, raw_ostream &Out,
+                                  const ModuleSummaryIndex &Index,
+                                  const ModuleHash &ModHash);
+
+  /// Write the specified module summary index to the given raw output stream,
+  /// where it will be written in a new bitcode block. This is used when
+  /// writing the combined index file for ThinLTO. When writing a subset of the
+  /// index for a distributed backend, provide the \p ModuleToSummariesForIndex
+  /// map.
+  void WriteIndexToFile(const ModuleSummaryIndex &Index, raw_ostream &Out,
+                        const std::map<std::string, GVSummaryMapTy>
+                            *ModuleToSummariesForIndex = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_BITCODE_BITCODEWRITER_H
diff --git a/linux-x64/clang/include/llvm/Bitcode/BitcodeWriterPass.h b/linux-x64/clang/include/llvm/Bitcode/BitcodeWriterPass.h
new file mode 100644
index 0000000..9ac6fba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/BitcodeWriterPass.h
@@ -0,0 +1,75 @@
+//===-- BitcodeWriterPass.h - Bitcode writing pass --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides a bitcode writing pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODEWRITERPASS_H
+#define LLVM_BITCODE_BITCODEWRITERPASS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class Module;
+class ModulePass;
+class raw_ostream;
+
+/// \brief Create and return a pass that writes the module to the specified
+/// ostream. Note that this pass is designed for use with the legacy pass
+/// manager.
+///
+/// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
+/// reproduced when deserialized.
+///
+/// If \c EmitSummaryIndex, emit the summary index (currently for use in ThinLTO
+/// optimization).
+///
+/// If \c EmitModuleHash, compute and emit the module hash in the bitcode
+/// (currently for use in ThinLTO incremental build).
+ModulePass *createBitcodeWriterPass(raw_ostream &Str,
+                                    bool ShouldPreserveUseListOrder = false,
+                                    bool EmitSummaryIndex = false,
+                                    bool EmitModuleHash = false);
+
+/// \brief Pass for writing a module of IR out to a bitcode file.
+///
+/// Note that this is intended for use with the new pass manager. To construct
+/// a pass for the legacy pass manager, use the function above.
+class BitcodeWriterPass : public PassInfoMixin<BitcodeWriterPass> {
+  raw_ostream &OS;
+  bool ShouldPreserveUseListOrder;
+  bool EmitSummaryIndex;
+  bool EmitModuleHash;
+
+public:
+  /// \brief Construct a bitcode writer pass around a particular output stream.
+  ///
+  /// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
+  /// reproduced when deserialized.
+  ///
+  /// If \c EmitSummaryIndex, emit the summary index (currently
+  /// for use in ThinLTO optimization).
+  explicit BitcodeWriterPass(raw_ostream &OS,
+                             bool ShouldPreserveUseListOrder = false,
+                             bool EmitSummaryIndex = false,
+                             bool EmitModuleHash = false)
+      : OS(OS), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder),
+  EmitSummaryIndex(EmitSummaryIndex), EmitModuleHash(EmitModuleHash) {}
+
+  /// \brief Run the bitcode writer pass, and output the module to the selected
+  /// output stream.
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Bitcode/BitstreamReader.h b/linux-x64/clang/include/llvm/Bitcode/BitstreamReader.h
new file mode 100644
index 0000000..b484fa2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/BitstreamReader.h
@@ -0,0 +1,506 @@
+//===- BitstreamReader.h - Low-level bitstream reader interface -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the BitstreamReader class.  This class can be used to
+// read an arbitrary bitstream, regardless of its contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITSTREAMREADER_H
+#define LLVM_BITCODE_BITSTREAMREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// This class maintains the abbreviations read from a block info block.
+class BitstreamBlockInfo {
+public:
+  /// This contains information emitted to BLOCKINFO_BLOCK blocks. These
+  /// describe abbreviations that all blocks of the specified ID inherit.
+  struct BlockInfo {
+    unsigned BlockID;
+    std::vector<std::shared_ptr<BitCodeAbbrev>> Abbrevs;
+    std::string Name;
+    std::vector<std::pair<unsigned, std::string>> RecordNames;
+  };
+
+private:
+  std::vector<BlockInfo> BlockInfoRecords;
+
+public:
+  /// If there is block info for the specified ID, return it, otherwise return
+  /// null.
+  const BlockInfo *getBlockInfo(unsigned BlockID) const {
+    // Common case, the most recent entry matches BlockID.
+    if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID)
+      return &BlockInfoRecords.back();
+
+    for (unsigned i = 0, e = static_cast<unsigned>(BlockInfoRecords.size());
+         i != e; ++i)
+      if (BlockInfoRecords[i].BlockID == BlockID)
+        return &BlockInfoRecords[i];
+    return nullptr;
+  }
+
+  BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
+    if (const BlockInfo *BI = getBlockInfo(BlockID))
+      return *const_cast<BlockInfo*>(BI);
+
+    // Otherwise, add a new record.
+    BlockInfoRecords.emplace_back();
+    BlockInfoRecords.back().BlockID = BlockID;
+    return BlockInfoRecords.back();
+  }
+};
+
+/// This represents a position within a bitstream. There may be multiple
+/// independent cursors reading within one bitstream, each maintaining their
+/// own local state.
+class SimpleBitstreamCursor {
+  ArrayRef<uint8_t> BitcodeBytes;
+  size_t NextChar = 0;
+
+public:
+  /// This is the current data we have pulled from the stream but have not
+  /// returned to the client. This is specifically and intentionally defined to
+  /// follow the word size of the host machine for efficiency. We use word_t in
+  /// places that are aware of this to make it perfectly explicit what is going
+  /// on.
+  using word_t = size_t;
+
+private:
+  word_t CurWord = 0;
+
+  /// This is the number of bits in CurWord that are valid. This is always from
+  /// [0...bits_of(size_t)-1] inclusive.
+  unsigned BitsInCurWord = 0;
+
+public:
+  static const size_t MaxChunkSize = sizeof(word_t) * 8;
+
+  SimpleBitstreamCursor() = default;
+  explicit SimpleBitstreamCursor(ArrayRef<uint8_t> BitcodeBytes)
+      : BitcodeBytes(BitcodeBytes) {}
+  explicit SimpleBitstreamCursor(StringRef BitcodeBytes)
+      : BitcodeBytes(reinterpret_cast<const uint8_t *>(BitcodeBytes.data()),
+                     BitcodeBytes.size()) {}
+  explicit SimpleBitstreamCursor(MemoryBufferRef BitcodeBytes)
+      : SimpleBitstreamCursor(BitcodeBytes.getBuffer()) {}
+
+  bool canSkipToPos(size_t pos) const {
+    // pos can be skipped to if it is a valid address or one byte past the end.
+    return pos <= BitcodeBytes.size();
+  }
+
+  bool AtEndOfStream() {
+    return BitsInCurWord == 0 && BitcodeBytes.size() <= NextChar;
+  }
+
+  /// Return the bit # of the bit we are reading.
+  uint64_t GetCurrentBitNo() const {
+    return NextChar*CHAR_BIT - BitsInCurWord;
+  }
+
+  // Return the byte # of the current bit.
+  uint64_t getCurrentByteNo() const { return GetCurrentBitNo() / 8; }
+
+  ArrayRef<uint8_t> getBitcodeBytes() const { return BitcodeBytes; }
+
+  /// Reset the stream to the specified bit number.
+  void JumpToBit(uint64_t BitNo) {
+    size_t ByteNo = size_t(BitNo/8) & ~(sizeof(word_t)-1);
+    unsigned WordBitNo = unsigned(BitNo & (sizeof(word_t)*8-1));
+    assert(canSkipToPos(ByteNo) && "Invalid location");
+
+    // Move the cursor to the right word.
+    NextChar = ByteNo;
+    BitsInCurWord = 0;
+
+    // Skip over any bits that are already consumed.
+    if (WordBitNo)
+      Read(WordBitNo);
+  }
+
+  /// Get a pointer into the bitstream at the specified byte offset.
+  const uint8_t *getPointerToByte(uint64_t ByteNo, uint64_t NumBytes) {
+    return BitcodeBytes.data() + ByteNo;
+  }
+
+  /// Get a pointer into the bitstream at the specified bit offset.
+  ///
+  /// The bit offset must be on a byte boundary.
+  const uint8_t *getPointerToBit(uint64_t BitNo, uint64_t NumBytes) {
+    assert(!(BitNo % 8) && "Expected bit on byte boundary");
+    return getPointerToByte(BitNo / 8, NumBytes);
+  }
+
+  void fillCurWord() {
+    if (NextChar >= BitcodeBytes.size())
+      report_fatal_error("Unexpected end of file");
+
+    // Read the next word from the stream.
+    const uint8_t *NextCharPtr = BitcodeBytes.data() + NextChar;
+    unsigned BytesRead;
+    if (BitcodeBytes.size() >= NextChar + sizeof(word_t)) {
+      BytesRead = sizeof(word_t);
+      CurWord =
+          support::endian::read<word_t, support::little, support::unaligned>(
+              NextCharPtr);
+    } else {
+      // Short read.
+      BytesRead = BitcodeBytes.size() - NextChar;
+      CurWord = 0;
+      for (unsigned B = 0; B != BytesRead; ++B)
+        CurWord |= uint64_t(NextCharPtr[B]) << (B * 8);
+    }
+    NextChar += BytesRead;
+    BitsInCurWord = BytesRead * 8;
+  }
+
+  word_t Read(unsigned NumBits) {
+    static const unsigned BitsInWord = MaxChunkSize;
+
+    assert(NumBits && NumBits <= BitsInWord &&
+           "Cannot return zero or more than BitsInWord bits!");
+
+    static const unsigned Mask = sizeof(word_t) > 4 ? 0x3f : 0x1f;
+
+    // If the field is fully contained by CurWord, return it quickly.
+    if (BitsInCurWord >= NumBits) {
+      word_t R = CurWord & (~word_t(0) >> (BitsInWord - NumBits));
+
+      // Use a mask to avoid undefined behavior.
+      CurWord >>= (NumBits & Mask);
+
+      BitsInCurWord -= NumBits;
+      return R;
+    }
+
+    word_t R = BitsInCurWord ? CurWord : 0;
+    unsigned BitsLeft = NumBits - BitsInCurWord;
+
+    fillCurWord();
+
+    // If we run out of data, abort.
+    if (BitsLeft > BitsInCurWord)
+      report_fatal_error("Unexpected end of file");
+
+    word_t R2 = CurWord & (~word_t(0) >> (BitsInWord - BitsLeft));
+
+    // Use a mask to avoid undefined behavior.
+    CurWord >>= (BitsLeft & Mask);
+
+    BitsInCurWord -= BitsLeft;
+
+    R |= R2 << (NumBits - BitsLeft);
+
+    return R;
+  }
+
+  uint32_t ReadVBR(unsigned NumBits) {
+    uint32_t Piece = Read(NumBits);
+    if ((Piece & (1U << (NumBits-1))) == 0)
+      return Piece;
+
+    uint32_t Result = 0;
+    unsigned NextBit = 0;
+    while (true) {
+      Result |= (Piece & ((1U << (NumBits-1))-1)) << NextBit;
+
+      if ((Piece & (1U << (NumBits-1))) == 0)
+        return Result;
+
+      NextBit += NumBits-1;
+      Piece = Read(NumBits);
+    }
+  }
+
+  // Read a VBR that may have a value up to 64-bits in size. The chunk size of
+  // the VBR must still be <= 32 bits though.
+  uint64_t ReadVBR64(unsigned NumBits) {
+    uint32_t Piece = Read(NumBits);
+    if ((Piece & (1U << (NumBits-1))) == 0)
+      return uint64_t(Piece);
+
+    uint64_t Result = 0;
+    unsigned NextBit = 0;
+    while (true) {
+      Result |= uint64_t(Piece & ((1U << (NumBits-1))-1)) << NextBit;
+
+      if ((Piece & (1U << (NumBits-1))) == 0)
+        return Result;
+
+      NextBit += NumBits-1;
+      Piece = Read(NumBits);
+    }
+  }
+
+  void SkipToFourByteBoundary() {
+    // If word_t is 64-bits and if we've read less than 32 bits, just dump
+    // the bits we have up to the next 32-bit boundary.
+    if (sizeof(word_t) > 4 &&
+        BitsInCurWord >= 32) {
+      CurWord >>= BitsInCurWord-32;
+      BitsInCurWord = 32;
+      return;
+    }
+
+    BitsInCurWord = 0;
+  }
+
+  /// Skip to the end of the file.
+  void skipToEnd() { NextChar = BitcodeBytes.size(); }
+};
+
+/// When advancing through a bitstream cursor, each advance can discover a few
+/// different kinds of entries:
+struct BitstreamEntry {
+  enum {
+    Error,    // Malformed bitcode was found.
+    EndBlock, // We've reached the end of the current block, (or the end of the
+              // file, which is treated like a series of EndBlock records.
+    SubBlock, // This is the start of a new subblock of a specific ID.
+    Record    // This is a record with a specific AbbrevID.
+  } Kind;
+
+  unsigned ID;
+
+  static BitstreamEntry getError() {
+    BitstreamEntry E; E.Kind = Error; return E;
+  }
+
+  static BitstreamEntry getEndBlock() {
+    BitstreamEntry E; E.Kind = EndBlock; return E;
+  }
+
+  static BitstreamEntry getSubBlock(unsigned ID) {
+    BitstreamEntry E; E.Kind = SubBlock; E.ID = ID; return E;
+  }
+
+  static BitstreamEntry getRecord(unsigned AbbrevID) {
+    BitstreamEntry E; E.Kind = Record; E.ID = AbbrevID; return E;
+  }
+};
+
+/// This represents a position within a bitcode file, implemented on top of a
+/// SimpleBitstreamCursor.
+///
+/// Unlike iterators, BitstreamCursors are heavy-weight objects that should not
+/// be passed by value.
+class BitstreamCursor : SimpleBitstreamCursor {
+  // This is the declared size of code values used for the current block, in
+  // bits.
+  unsigned CurCodeSize = 2;
+
+  /// Abbrevs installed at in this block.
+  std::vector<std::shared_ptr<BitCodeAbbrev>> CurAbbrevs;
+
+  struct Block {
+    unsigned PrevCodeSize;
+    std::vector<std::shared_ptr<BitCodeAbbrev>> PrevAbbrevs;
+
+    explicit Block(unsigned PCS) : PrevCodeSize(PCS) {}
+  };
+
+  /// This tracks the codesize of parent blocks.
+  SmallVector<Block, 8> BlockScope;
+
+  BitstreamBlockInfo *BlockInfo = nullptr;
+
+public:
+  static const size_t MaxChunkSize = sizeof(word_t) * 8;
+
+  BitstreamCursor() = default;
+  explicit BitstreamCursor(ArrayRef<uint8_t> BitcodeBytes)
+      : SimpleBitstreamCursor(BitcodeBytes) {}
+  explicit BitstreamCursor(StringRef BitcodeBytes)
+      : SimpleBitstreamCursor(BitcodeBytes) {}
+  explicit BitstreamCursor(MemoryBufferRef BitcodeBytes)
+      : SimpleBitstreamCursor(BitcodeBytes) {}
+
+  using SimpleBitstreamCursor::canSkipToPos;
+  using SimpleBitstreamCursor::AtEndOfStream;
+  using SimpleBitstreamCursor::getBitcodeBytes;
+  using SimpleBitstreamCursor::GetCurrentBitNo;
+  using SimpleBitstreamCursor::getCurrentByteNo;
+  using SimpleBitstreamCursor::getPointerToByte;
+  using SimpleBitstreamCursor::JumpToBit;
+  using SimpleBitstreamCursor::fillCurWord;
+  using SimpleBitstreamCursor::Read;
+  using SimpleBitstreamCursor::ReadVBR;
+  using SimpleBitstreamCursor::ReadVBR64;
+
+  /// Return the number of bits used to encode an abbrev #.
+  unsigned getAbbrevIDWidth() const { return CurCodeSize; }
+
+  /// Flags that modify the behavior of advance().
+  enum {
+    /// If this flag is used, the advance() method does not automatically pop
+    /// the block scope when the end of a block is reached.
+    AF_DontPopBlockAtEnd = 1,
+
+    /// If this flag is used, abbrev entries are returned just like normal
+    /// records.
+    AF_DontAutoprocessAbbrevs = 2
+  };
+
+  /// Advance the current bitstream, returning the next entry in the stream.
+  BitstreamEntry advance(unsigned Flags = 0) {
+    while (true) {
+      if (AtEndOfStream())
+        return BitstreamEntry::getError();
+
+      unsigned Code = ReadCode();
+      if (Code == bitc::END_BLOCK) {
+        // Pop the end of the block unless Flags tells us not to.
+        if (!(Flags & AF_DontPopBlockAtEnd) && ReadBlockEnd())
+          return BitstreamEntry::getError();
+        return BitstreamEntry::getEndBlock();
+      }
+
+      if (Code == bitc::ENTER_SUBBLOCK)
+        return BitstreamEntry::getSubBlock(ReadSubBlockID());
+
+      if (Code == bitc::DEFINE_ABBREV &&
+          !(Flags & AF_DontAutoprocessAbbrevs)) {
+        // We read and accumulate abbrev's, the client can't do anything with
+        // them anyway.
+        ReadAbbrevRecord();
+        continue;
+      }
+
+      return BitstreamEntry::getRecord(Code);
+    }
+  }
+
+  /// This is a convenience function for clients that don't expect any
+  /// subblocks. This just skips over them automatically.
+  BitstreamEntry advanceSkippingSubblocks(unsigned Flags = 0) {
+    while (true) {
+      // If we found a normal entry, return it.
+      BitstreamEntry Entry = advance(Flags);
+      if (Entry.Kind != BitstreamEntry::SubBlock)
+        return Entry;
+
+      // If we found a sub-block, just skip over it and check the next entry.
+      if (SkipBlock())
+        return BitstreamEntry::getError();
+    }
+  }
+
+  unsigned ReadCode() {
+    return Read(CurCodeSize);
+  }
+
+  // Block header:
+  //    [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]
+
+  /// Having read the ENTER_SUBBLOCK code, read the BlockID for the block.
+  unsigned ReadSubBlockID() {
+    return ReadVBR(bitc::BlockIDWidth);
+  }
+
+  /// Having read the ENTER_SUBBLOCK abbrevid and a BlockID, skip over the body
+  /// of this block. If the block record is malformed, return true.
+  bool SkipBlock() {
+    // Read and ignore the codelen value.  Since we are skipping this block, we
+    // don't care what code widths are used inside of it.
+    ReadVBR(bitc::CodeLenWidth);
+    SkipToFourByteBoundary();
+    unsigned NumFourBytes = Read(bitc::BlockSizeWidth);
+
+    // Check that the block wasn't partially defined, and that the offset isn't
+    // bogus.
+    size_t SkipTo = GetCurrentBitNo() + NumFourBytes*4*8;
+    if (AtEndOfStream() || !canSkipToPos(SkipTo/8))
+      return true;
+
+    JumpToBit(SkipTo);
+    return false;
+  }
+
+  /// Having read the ENTER_SUBBLOCK abbrevid, enter the block, and return true
+  /// if the block has an error.
+  bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = nullptr);
+
+  bool ReadBlockEnd() {
+    if (BlockScope.empty()) return true;
+
+    // Block tail:
+    //    [END_BLOCK, <align4bytes>]
+    SkipToFourByteBoundary();
+
+    popBlockScope();
+    return false;
+  }
+
+private:
+  void popBlockScope() {
+    CurCodeSize = BlockScope.back().PrevCodeSize;
+
+    CurAbbrevs = std::move(BlockScope.back().PrevAbbrevs);
+    BlockScope.pop_back();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Record Processing
+  //===--------------------------------------------------------------------===//
+
+public:
+  /// Return the abbreviation for the specified AbbrevId.
+  const BitCodeAbbrev *getAbbrev(unsigned AbbrevID) {
+    unsigned AbbrevNo = AbbrevID - bitc::FIRST_APPLICATION_ABBREV;
+    if (AbbrevNo >= CurAbbrevs.size())
+      report_fatal_error("Invalid abbrev number");
+    return CurAbbrevs[AbbrevNo].get();
+  }
+
+  /// Read the current record and discard it, returning the code for the record.
+  unsigned skipRecord(unsigned AbbrevID);
+
+  unsigned readRecord(unsigned AbbrevID, SmallVectorImpl<uint64_t> &Vals,
+                      StringRef *Blob = nullptr);
+
+  //===--------------------------------------------------------------------===//
+  // Abbrev Processing
+  //===--------------------------------------------------------------------===//
+  void ReadAbbrevRecord();
+
+  /// Read and return a block info block from the bitstream. If an error was
+  /// encountered, return None.
+  ///
+  /// \param ReadBlockInfoNames Whether to read block/record name information in
+  /// the BlockInfo block. Only llvm-bcanalyzer uses this.
+  Optional<BitstreamBlockInfo>
+  ReadBlockInfoBlock(bool ReadBlockInfoNames = false);
+
+  /// Set the block info to be used by this BitstreamCursor to interpret
+  /// abbreviated records.
+  void setBlockInfo(BitstreamBlockInfo *BI) { BlockInfo = BI; }
+};
+
+} // end llvm namespace
+
+#endif // LLVM_BITCODE_BITSTREAMREADER_H
diff --git a/linux-x64/clang/include/llvm/Bitcode/BitstreamWriter.h b/linux-x64/clang/include/llvm/Bitcode/BitstreamWriter.h
new file mode 100644
index 0000000..e276db5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/BitstreamWriter.h
@@ -0,0 +1,550 @@
+//===- BitstreamWriter.h - Low-level bitstream writer interface -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the BitstreamWriter class.  This class can be used to
+// write an arbitrary bitstream, regardless of its contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITSTREAMWRITER_H
+#define LLVM_BITCODE_BITSTREAMWRITER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Support/Endian.h"
+#include <vector>
+
+namespace llvm {
+
+class BitstreamWriter {
+  SmallVectorImpl<char> &Out;
+
+  /// CurBit - Always between 0 and 31 inclusive, specifies the next bit to use.
+  unsigned CurBit;
+
+  /// CurValue - The current value.  Only bits < CurBit are valid.
+  uint32_t CurValue;
+
+  /// CurCodeSize - This is the declared size of code values used for the
+  /// current block, in bits.
+  unsigned CurCodeSize;
+
+  /// BlockInfoCurBID - When emitting a BLOCKINFO_BLOCK, this is the currently
+  /// selected BLOCK ID.
+  unsigned BlockInfoCurBID;
+
+  /// CurAbbrevs - Abbrevs installed at in this block.
+  std::vector<std::shared_ptr<BitCodeAbbrev>> CurAbbrevs;
+
+  struct Block {
+    unsigned PrevCodeSize;
+    size_t StartSizeWord;
+    std::vector<std::shared_ptr<BitCodeAbbrev>> PrevAbbrevs;
+    Block(unsigned PCS, size_t SSW) : PrevCodeSize(PCS), StartSizeWord(SSW) {}
+  };
+
+  /// BlockScope - This tracks the current blocks that we have entered.
+  std::vector<Block> BlockScope;
+
+  /// BlockInfo - This contains information emitted to BLOCKINFO_BLOCK blocks.
+  /// These describe abbreviations that all blocks of the specified ID inherit.
+  struct BlockInfo {
+    unsigned BlockID;
+    std::vector<std::shared_ptr<BitCodeAbbrev>> Abbrevs;
+  };
+  std::vector<BlockInfo> BlockInfoRecords;
+
+  void WriteByte(unsigned char Value) {
+    Out.push_back(Value);
+  }
+
+  void WriteWord(unsigned Value) {
+    Value = support::endian::byte_swap<uint32_t, support::little>(Value);
+    Out.append(reinterpret_cast<const char *>(&Value),
+               reinterpret_cast<const char *>(&Value + 1));
+  }
+
+  size_t GetBufferOffset() const { return Out.size(); }
+
+  size_t GetWordIndex() const {
+    size_t Offset = GetBufferOffset();
+    assert((Offset & 3) == 0 && "Not 32-bit aligned");
+    return Offset / 4;
+  }
+
+public:
+  explicit BitstreamWriter(SmallVectorImpl<char> &O)
+    : Out(O), CurBit(0), CurValue(0), CurCodeSize(2) {}
+
+  ~BitstreamWriter() {
+    assert(CurBit == 0 && "Unflushed data remaining");
+    assert(BlockScope.empty() && CurAbbrevs.empty() && "Block imbalance");
+  }
+
+  /// \brief Retrieve the current position in the stream, in bits.
+  uint64_t GetCurrentBitNo() const { return GetBufferOffset() * 8 + CurBit; }
+
+  /// \brief Retrieve the number of bits currently used to encode an abbrev ID.
+  unsigned GetAbbrevIDWidth() const { return CurCodeSize; }
+
+  //===--------------------------------------------------------------------===//
+  // Basic Primitives for emitting bits to the stream.
+  //===--------------------------------------------------------------------===//
+
+  /// Backpatch a 32-bit word in the output at the given bit offset
+  /// with the specified value.
+  void BackpatchWord(uint64_t BitNo, unsigned NewWord) {
+    using namespace llvm::support;
+    unsigned ByteNo = BitNo / 8;
+    assert((!endian::readAtBitAlignment<uint32_t, little, unaligned>(
+               &Out[ByteNo], BitNo & 7)) &&
+           "Expected to be patching over 0-value placeholders");
+    endian::writeAtBitAlignment<uint32_t, little, unaligned>(
+        &Out[ByteNo], NewWord, BitNo & 7);
+  }
+
+  void BackpatchWord64(uint64_t BitNo, uint64_t Val) {
+    BackpatchWord(BitNo, (uint32_t)Val);
+    BackpatchWord(BitNo + 32, (uint32_t)(Val >> 32));
+  }
+
+  void Emit(uint32_t Val, unsigned NumBits) {
+    assert(NumBits && NumBits <= 32 && "Invalid value size!");
+    assert((Val & ~(~0U >> (32-NumBits))) == 0 && "High bits set!");
+    CurValue |= Val << CurBit;
+    if (CurBit + NumBits < 32) {
+      CurBit += NumBits;
+      return;
+    }
+
+    // Add the current word.
+    WriteWord(CurValue);
+
+    if (CurBit)
+      CurValue = Val >> (32-CurBit);
+    else
+      CurValue = 0;
+    CurBit = (CurBit+NumBits) & 31;
+  }
+
+  void FlushToWord() {
+    if (CurBit) {
+      WriteWord(CurValue);
+      CurBit = 0;
+      CurValue = 0;
+    }
+  }
+
+  void EmitVBR(uint32_t Val, unsigned NumBits) {
+    assert(NumBits <= 32 && "Too many bits to emit!");
+    uint32_t Threshold = 1U << (NumBits-1);
+
+    // Emit the bits with VBR encoding, NumBits-1 bits at a time.
+    while (Val >= Threshold) {
+      Emit((Val & ((1 << (NumBits-1))-1)) | (1 << (NumBits-1)), NumBits);
+      Val >>= NumBits-1;
+    }
+
+    Emit(Val, NumBits);
+  }
+
+  void EmitVBR64(uint64_t Val, unsigned NumBits) {
+    assert(NumBits <= 32 && "Too many bits to emit!");
+    if ((uint32_t)Val == Val)
+      return EmitVBR((uint32_t)Val, NumBits);
+
+    uint32_t Threshold = 1U << (NumBits-1);
+
+    // Emit the bits with VBR encoding, NumBits-1 bits at a time.
+    while (Val >= Threshold) {
+      Emit(((uint32_t)Val & ((1 << (NumBits-1))-1)) |
+           (1 << (NumBits-1)), NumBits);
+      Val >>= NumBits-1;
+    }
+
+    Emit((uint32_t)Val, NumBits);
+  }
+
+  /// EmitCode - Emit the specified code.
+  void EmitCode(unsigned Val) {
+    Emit(Val, CurCodeSize);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Block Manipulation
+  //===--------------------------------------------------------------------===//
+
+  /// getBlockInfo - If there is block info for the specified ID, return it,
+  /// otherwise return null.
+  BlockInfo *getBlockInfo(unsigned BlockID) {
+    // Common case, the most recent entry matches BlockID.
+    if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID)
+      return &BlockInfoRecords.back();
+
+    for (unsigned i = 0, e = static_cast<unsigned>(BlockInfoRecords.size());
+         i != e; ++i)
+      if (BlockInfoRecords[i].BlockID == BlockID)
+        return &BlockInfoRecords[i];
+    return nullptr;
+  }
+
+  void EnterSubblock(unsigned BlockID, unsigned CodeLen) {
+    // Block header:
+    //    [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]
+    EmitCode(bitc::ENTER_SUBBLOCK);
+    EmitVBR(BlockID, bitc::BlockIDWidth);
+    EmitVBR(CodeLen, bitc::CodeLenWidth);
+    FlushToWord();
+
+    size_t BlockSizeWordIndex = GetWordIndex();
+    unsigned OldCodeSize = CurCodeSize;
+
+    // Emit a placeholder, which will be replaced when the block is popped.
+    Emit(0, bitc::BlockSizeWidth);
+
+    CurCodeSize = CodeLen;
+
+    // Push the outer block's abbrev set onto the stack, start out with an
+    // empty abbrev set.
+    BlockScope.emplace_back(OldCodeSize, BlockSizeWordIndex);
+    BlockScope.back().PrevAbbrevs.swap(CurAbbrevs);
+
+    // If there is a blockinfo for this BlockID, add all the predefined abbrevs
+    // to the abbrev list.
+    if (BlockInfo *Info = getBlockInfo(BlockID)) {
+      CurAbbrevs.insert(CurAbbrevs.end(), Info->Abbrevs.begin(),
+                        Info->Abbrevs.end());
+    }
+  }
+
+  void ExitBlock() {
+    assert(!BlockScope.empty() && "Block scope imbalance!");
+    const Block &B = BlockScope.back();
+
+    // Block tail:
+    //    [END_BLOCK, <align4bytes>]
+    EmitCode(bitc::END_BLOCK);
+    FlushToWord();
+
+    // Compute the size of the block, in words, not counting the size field.
+    size_t SizeInWords = GetWordIndex() - B.StartSizeWord - 1;
+    uint64_t BitNo = uint64_t(B.StartSizeWord) * 32;
+
+    // Update the block size field in the header of this sub-block.
+    BackpatchWord(BitNo, SizeInWords);
+
+    // Restore the inner block's code size and abbrev table.
+    CurCodeSize = B.PrevCodeSize;
+    CurAbbrevs = std::move(B.PrevAbbrevs);
+    BlockScope.pop_back();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Record Emission
+  //===--------------------------------------------------------------------===//
+
+private:
+  /// EmitAbbreviatedLiteral - Emit a literal value according to its abbrev
+  /// record.  This is a no-op, since the abbrev specifies the literal to use.
+  template<typename uintty>
+  void EmitAbbreviatedLiteral(const BitCodeAbbrevOp &Op, uintty V) {
+    assert(Op.isLiteral() && "Not a literal");
+    // If the abbrev specifies the literal value to use, don't emit
+    // anything.
+    assert(V == Op.getLiteralValue() &&
+           "Invalid abbrev for record!");
+  }
+
+  /// EmitAbbreviatedField - Emit a single scalar field value with the specified
+  /// encoding.
+  template<typename uintty>
+  void EmitAbbreviatedField(const BitCodeAbbrevOp &Op, uintty V) {
+    assert(!Op.isLiteral() && "Literals should use EmitAbbreviatedLiteral!");
+
+    // Encode the value as we are commanded.
+    switch (Op.getEncoding()) {
+    default: llvm_unreachable("Unknown encoding!");
+    case BitCodeAbbrevOp::Fixed:
+      if (Op.getEncodingData())
+        Emit((unsigned)V, (unsigned)Op.getEncodingData());
+      break;
+    case BitCodeAbbrevOp::VBR:
+      if (Op.getEncodingData())
+        EmitVBR64(V, (unsigned)Op.getEncodingData());
+      break;
+    case BitCodeAbbrevOp::Char6:
+      Emit(BitCodeAbbrevOp::EncodeChar6((char)V), 6);
+      break;
+    }
+  }
+
+  /// EmitRecordWithAbbrevImpl - This is the core implementation of the record
+  /// emission code.  If BlobData is non-null, then it specifies an array of
+  /// data that should be emitted as part of the Blob or Array operand that is
+  /// known to exist at the end of the record. If Code is specified, then
+  /// it is the record code to emit before the Vals, which must not contain
+  /// the code.
+  template <typename uintty>
+  void EmitRecordWithAbbrevImpl(unsigned Abbrev, ArrayRef<uintty> Vals,
+                                StringRef Blob, Optional<unsigned> Code) {
+    const char *BlobData = Blob.data();
+    unsigned BlobLen = (unsigned) Blob.size();
+    unsigned AbbrevNo = Abbrev-bitc::FIRST_APPLICATION_ABBREV;
+    assert(AbbrevNo < CurAbbrevs.size() && "Invalid abbrev #!");
+    const BitCodeAbbrev *Abbv = CurAbbrevs[AbbrevNo].get();
+
+    EmitCode(Abbrev);
+
+    unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos());
+    if (Code) {
+      assert(e && "Expected non-empty abbreviation");
+      const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);
+
+      if (Op.isLiteral())
+        EmitAbbreviatedLiteral(Op, Code.getValue());
+      else {
+        assert(Op.getEncoding() != BitCodeAbbrevOp::Array &&
+               Op.getEncoding() != BitCodeAbbrevOp::Blob &&
+               "Expected literal or scalar");
+        EmitAbbreviatedField(Op, Code.getValue());
+      }
+    }
+
+    unsigned RecordIdx = 0;
+    for (; i != e; ++i) {
+      const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i);
+      if (Op.isLiteral()) {
+        assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
+        EmitAbbreviatedLiteral(Op, Vals[RecordIdx]);
+        ++RecordIdx;
+      } else if (Op.getEncoding() == BitCodeAbbrevOp::Array) {
+        // Array case.
+        assert(i + 2 == e && "array op not second to last?");
+        const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i);
+
+        // If this record has blob data, emit it, otherwise we must have record
+        // entries to encode this way.
+        if (BlobData) {
+          assert(RecordIdx == Vals.size() &&
+                 "Blob data and record entries specified for array!");
+          // Emit a vbr6 to indicate the number of elements present.
+          EmitVBR(static_cast<uint32_t>(BlobLen), 6);
+
+          // Emit each field.
+          for (unsigned i = 0; i != BlobLen; ++i)
+            EmitAbbreviatedField(EltEnc, (unsigned char)BlobData[i]);
+
+          // Know that blob data is consumed for assertion below.
+          BlobData = nullptr;
+        } else {
+          // Emit a vbr6 to indicate the number of elements present.
+          EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6);
+
+          // Emit each field.
+          for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx)
+            EmitAbbreviatedField(EltEnc, Vals[RecordIdx]);
+        }
+      } else if (Op.getEncoding() == BitCodeAbbrevOp::Blob) {
+        // If this record has blob data, emit it, otherwise we must have record
+        // entries to encode this way.
+
+        if (BlobData) {
+          assert(RecordIdx == Vals.size() &&
+                 "Blob data and record entries specified for blob operand!");
+
+          assert(Blob.data() == BlobData && "BlobData got moved");
+          assert(Blob.size() == BlobLen && "BlobLen got changed");
+          emitBlob(Blob);
+          BlobData = nullptr;
+        } else {
+          emitBlob(Vals.slice(RecordIdx));
+        }
+      } else {  // Single scalar field.
+        assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
+        EmitAbbreviatedField(Op, Vals[RecordIdx]);
+        ++RecordIdx;
+      }
+    }
+    assert(RecordIdx == Vals.size() && "Not all record operands emitted!");
+    assert(BlobData == nullptr &&
+           "Blob data specified for record that doesn't use it!");
+  }
+
+public:
+  /// Emit a blob, including flushing before and tail-padding.
+  template <class UIntTy>
+  void emitBlob(ArrayRef<UIntTy> Bytes, bool ShouldEmitSize = true) {
+    // Emit a vbr6 to indicate the number of elements present.
+    if (ShouldEmitSize)
+      EmitVBR(static_cast<uint32_t>(Bytes.size()), 6);
+
+    // Flush to a 32-bit alignment boundary.
+    FlushToWord();
+
+    // Emit literal bytes.
+    for (const auto &B : Bytes) {
+      assert(isUInt<8>(B) && "Value too large to emit as byte");
+      WriteByte((unsigned char)B);
+    }
+
+    // Align end to 32-bits.
+    while (GetBufferOffset() & 3)
+      WriteByte(0);
+  }
+  void emitBlob(StringRef Bytes, bool ShouldEmitSize = true) {
+    emitBlob(makeArrayRef((const uint8_t *)Bytes.data(), Bytes.size()),
+             ShouldEmitSize);
+  }
+
+  /// EmitRecord - Emit the specified record to the stream, using an abbrev if
+  /// we have one to compress the output.
+  template <typename Container>
+  void EmitRecord(unsigned Code, const Container &Vals, unsigned Abbrev = 0) {
+    if (!Abbrev) {
+      // If we don't have an abbrev to use, emit this in its fully unabbreviated
+      // form.
+      auto Count = static_cast<uint32_t>(makeArrayRef(Vals).size());
+      EmitCode(bitc::UNABBREV_RECORD);
+      EmitVBR(Code, 6);
+      EmitVBR(Count, 6);
+      for (unsigned i = 0, e = Count; i != e; ++i)
+        EmitVBR64(Vals[i], 6);
+      return;
+    }
+
+    EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), StringRef(), Code);
+  }
+
+  /// EmitRecordWithAbbrev - Emit a record with the specified abbreviation.
+  /// Unlike EmitRecord, the code for the record should be included in Vals as
+  /// the first entry.
+  template <typename Container>
+  void EmitRecordWithAbbrev(unsigned Abbrev, const Container &Vals) {
+    EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), StringRef(), None);
+  }
+
+  /// EmitRecordWithBlob - Emit the specified record to the stream, using an
+  /// abbrev that includes a blob at the end.  The blob data to emit is
+  /// specified by the pointer and length specified at the end.  In contrast to
+  /// EmitRecord, this routine expects that the first entry in Vals is the code
+  /// of the record.
+  template <typename Container>
+  void EmitRecordWithBlob(unsigned Abbrev, const Container &Vals,
+                          StringRef Blob) {
+    EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), Blob, None);
+  }
+  template <typename Container>
+  void EmitRecordWithBlob(unsigned Abbrev, const Container &Vals,
+                          const char *BlobData, unsigned BlobLen) {
+    return EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals),
+                                    StringRef(BlobData, BlobLen), None);
+  }
+
+  /// EmitRecordWithArray - Just like EmitRecordWithBlob, works with records
+  /// that end with an array.
+  template <typename Container>
+  void EmitRecordWithArray(unsigned Abbrev, const Container &Vals,
+                           StringRef Array) {
+    EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), Array, None);
+  }
+  template <typename Container>
+  void EmitRecordWithArray(unsigned Abbrev, const Container &Vals,
+                           const char *ArrayData, unsigned ArrayLen) {
+    return EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals),
+                                    StringRef(ArrayData, ArrayLen), None);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Abbrev Emission
+  //===--------------------------------------------------------------------===//
+
+private:
+  // Emit the abbreviation as a DEFINE_ABBREV record.
+  void EncodeAbbrev(const BitCodeAbbrev &Abbv) {
+    EmitCode(bitc::DEFINE_ABBREV);
+    EmitVBR(Abbv.getNumOperandInfos(), 5);
+    for (unsigned i = 0, e = static_cast<unsigned>(Abbv.getNumOperandInfos());
+         i != e; ++i) {
+      const BitCodeAbbrevOp &Op = Abbv.getOperandInfo(i);
+      Emit(Op.isLiteral(), 1);
+      if (Op.isLiteral()) {
+        EmitVBR64(Op.getLiteralValue(), 8);
+      } else {
+        Emit(Op.getEncoding(), 3);
+        if (Op.hasEncodingData())
+          EmitVBR64(Op.getEncodingData(), 5);
+      }
+    }
+  }
+public:
+
+  /// EmitAbbrev - This emits an abbreviation to the stream.  Note that this
+  /// method takes ownership of the specified abbrev.
+  unsigned EmitAbbrev(std::shared_ptr<BitCodeAbbrev> Abbv) {
+    // Emit the abbreviation as a record.
+    EncodeAbbrev(*Abbv);
+    CurAbbrevs.push_back(std::move(Abbv));
+    return static_cast<unsigned>(CurAbbrevs.size())-1 +
+      bitc::FIRST_APPLICATION_ABBREV;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // BlockInfo Block Emission
+  //===--------------------------------------------------------------------===//
+
+  /// EnterBlockInfoBlock - Start emitting the BLOCKINFO_BLOCK.
+  void EnterBlockInfoBlock() {
+    EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, 2);
+    BlockInfoCurBID = ~0U;
+    BlockInfoRecords.clear();
+  }
+private:
+  /// SwitchToBlockID - If we aren't already talking about the specified block
+  /// ID, emit a BLOCKINFO_CODE_SETBID record.
+  void SwitchToBlockID(unsigned BlockID) {
+    if (BlockInfoCurBID == BlockID) return;
+    SmallVector<unsigned, 2> V;
+    V.push_back(BlockID);
+    EmitRecord(bitc::BLOCKINFO_CODE_SETBID, V);
+    BlockInfoCurBID = BlockID;
+  }
+
+  BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
+    if (BlockInfo *BI = getBlockInfo(BlockID))
+      return *BI;
+
+    // Otherwise, add a new record.
+    BlockInfoRecords.emplace_back();
+    BlockInfoRecords.back().BlockID = BlockID;
+    return BlockInfoRecords.back();
+  }
+
+public:
+
+  /// EmitBlockInfoAbbrev - Emit a DEFINE_ABBREV record for the specified
+  /// BlockID.
+  unsigned EmitBlockInfoAbbrev(unsigned BlockID, std::shared_ptr<BitCodeAbbrev> Abbv) {
+    SwitchToBlockID(BlockID);
+    EncodeAbbrev(*Abbv);
+
+    // Add the abbrev to the specified block record.
+    BlockInfo &Info = getOrCreateBlockInfo(BlockID);
+    Info.Abbrevs.push_back(std::move(Abbv));
+
+    return Info.Abbrevs.size()-1+bitc::FIRST_APPLICATION_ABBREV;
+  }
+};
+
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Bitcode/LLVMBitCodes.h b/linux-x64/clang/include/llvm/Bitcode/LLVMBitCodes.h
new file mode 100644
index 0000000..f3500e1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Bitcode/LLVMBitCodes.h
@@ -0,0 +1,613 @@
+//===- LLVMBitCodes.h - Enum values for the LLVM bitcode format -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines Bitcode enum values for LLVM IR bitcode files.
+//
+// The enum values defined in this file should be considered permanent.  If
+// new features are added, they should have values added at the end of the
+// respective lists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_LLVMBITCODES_H
+#define LLVM_BITCODE_LLVMBITCODES_H
+
+#include "llvm/Bitcode/BitCodes.h"
+
+namespace llvm {
+namespace bitc {
+// The only top-level block types are MODULE, IDENTIFICATION, STRTAB and SYMTAB.
+enum BlockIDs {
+  // Blocks
+  MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID,
+
+  // Module sub-block id's.
+  PARAMATTR_BLOCK_ID,
+  PARAMATTR_GROUP_BLOCK_ID,
+
+  CONSTANTS_BLOCK_ID,
+  FUNCTION_BLOCK_ID,
+
+  // Block intended to contains information on the bitcode versioning.
+  // Can be used to provide better error messages when we fail to parse a
+  // bitcode file.
+  IDENTIFICATION_BLOCK_ID,
+
+  VALUE_SYMTAB_BLOCK_ID,
+  METADATA_BLOCK_ID,
+  METADATA_ATTACHMENT_ID,
+
+  TYPE_BLOCK_ID_NEW,
+
+  USELIST_BLOCK_ID,
+
+  MODULE_STRTAB_BLOCK_ID,
+  GLOBALVAL_SUMMARY_BLOCK_ID,
+
+  OPERAND_BUNDLE_TAGS_BLOCK_ID,
+
+  METADATA_KIND_BLOCK_ID,
+
+  STRTAB_BLOCK_ID,
+
+  FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID,
+
+  SYMTAB_BLOCK_ID,
+
+  SYNC_SCOPE_NAMES_BLOCK_ID,
+};
+
+/// Identification block contains a string that describes the producer details,
+/// and an epoch that defines the auto-upgrade capability.
+enum IdentificationCodes {
+  IDENTIFICATION_CODE_STRING = 1, // IDENTIFICATION:      [strchr x N]
+  IDENTIFICATION_CODE_EPOCH = 2,  // EPOCH:               [epoch#]
+};
+
+/// The epoch that defines the auto-upgrade compatibility for the bitcode.
+///
+/// LLVM guarantees in a major release that a minor release can read bitcode
+/// generated by previous minor releases. We translate this by making the reader
+/// accepting only bitcode with the same epoch, except for the X.0 release which
+/// also accepts N-1.
+enum { BITCODE_CURRENT_EPOCH = 0 };
+
+/// MODULE blocks have a number of optional fields and subblocks.
+enum ModuleCodes {
+  MODULE_CODE_VERSION = 1,     // VERSION:     [version#]
+  MODULE_CODE_TRIPLE = 2,      // TRIPLE:      [strchr x N]
+  MODULE_CODE_DATALAYOUT = 3,  // DATALAYOUT:  [strchr x N]
+  MODULE_CODE_ASM = 4,         // ASM:         [strchr x N]
+  MODULE_CODE_SECTIONNAME = 5, // SECTIONNAME: [strchr x N]
+
+  // FIXME: Remove DEPLIB in 4.0.
+  MODULE_CODE_DEPLIB = 6, // DEPLIB:      [strchr x N]
+
+  // GLOBALVAR: [pointer type, isconst, initid,
+  //             linkage, alignment, section, visibility, threadlocal]
+  MODULE_CODE_GLOBALVAR = 7,
+
+  // FUNCTION:  [type, callingconv, isproto, linkage, paramattrs, alignment,
+  //             section, visibility, gc, unnamed_addr]
+  MODULE_CODE_FUNCTION = 8,
+
+  // ALIAS: [alias type, aliasee val#, linkage, visibility]
+  MODULE_CODE_ALIAS_OLD = 9,
+
+  MODULE_CODE_GCNAME = 11, // GCNAME: [strchr x N]
+  MODULE_CODE_COMDAT = 12, // COMDAT: [selection_kind, name]
+
+  MODULE_CODE_VSTOFFSET = 13, // VSTOFFSET: [offset]
+
+  // ALIAS: [alias value type, addrspace, aliasee val#, linkage, visibility]
+  MODULE_CODE_ALIAS = 14,
+
+  MODULE_CODE_METADATA_VALUES_UNUSED = 15,
+
+  // SOURCE_FILENAME: [namechar x N]
+  MODULE_CODE_SOURCE_FILENAME = 16,
+
+  // HASH: [5*i32]
+  MODULE_CODE_HASH = 17,
+
+  // IFUNC: [ifunc value type, addrspace, resolver val#, linkage, visibility]
+  MODULE_CODE_IFUNC = 18,
+};
+
+/// PARAMATTR blocks have code for defining a parameter attribute set.
+enum AttributeCodes {
+  // FIXME: Remove `PARAMATTR_CODE_ENTRY_OLD' in 4.0
+  PARAMATTR_CODE_ENTRY_OLD = 1, // ENTRY: [paramidx0, attr0,
+                                //         paramidx1, attr1...]
+  PARAMATTR_CODE_ENTRY = 2,     // ENTRY: [attrgrp0, attrgrp1, ...]
+  PARAMATTR_GRP_CODE_ENTRY = 3  // ENTRY: [grpid, idx, attr0, attr1, ...]
+};
+
+/// TYPE blocks have codes for each type primitive they use.
+enum TypeCodes {
+  TYPE_CODE_NUMENTRY = 1, // NUMENTRY: [numentries]
+
+  // Type Codes
+  TYPE_CODE_VOID = 2,    // VOID
+  TYPE_CODE_FLOAT = 3,   // FLOAT
+  TYPE_CODE_DOUBLE = 4,  // DOUBLE
+  TYPE_CODE_LABEL = 5,   // LABEL
+  TYPE_CODE_OPAQUE = 6,  // OPAQUE
+  TYPE_CODE_INTEGER = 7, // INTEGER: [width]
+  TYPE_CODE_POINTER = 8, // POINTER: [pointee type]
+
+  TYPE_CODE_FUNCTION_OLD = 9, // FUNCTION: [vararg, attrid, retty,
+                              //            paramty x N]
+
+  TYPE_CODE_HALF = 10, // HALF
+
+  TYPE_CODE_ARRAY = 11,  // ARRAY: [numelts, eltty]
+  TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty]
+
+  // These are not with the other floating point types because they're
+  // a late addition, and putting them in the right place breaks
+  // binary compatibility.
+  TYPE_CODE_X86_FP80 = 13,  // X86 LONG DOUBLE
+  TYPE_CODE_FP128 = 14,     // LONG DOUBLE (112 bit mantissa)
+  TYPE_CODE_PPC_FP128 = 15, // PPC LONG DOUBLE (2 doubles)
+
+  TYPE_CODE_METADATA = 16, // METADATA
+
+  TYPE_CODE_X86_MMX = 17, // X86 MMX
+
+  TYPE_CODE_STRUCT_ANON = 18,  // STRUCT_ANON: [ispacked, eltty x N]
+  TYPE_CODE_STRUCT_NAME = 19,  // STRUCT_NAME: [strchr x N]
+  TYPE_CODE_STRUCT_NAMED = 20, // STRUCT_NAMED: [ispacked, eltty x N]
+
+  TYPE_CODE_FUNCTION = 21, // FUNCTION: [vararg, retty, paramty x N]
+
+  TYPE_CODE_TOKEN = 22 // TOKEN
+};
+
+enum OperandBundleTagCode {
+  OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
+};
+
+enum SyncScopeNameCode {
+  SYNC_SCOPE_NAME = 1,
+};
+
+// Value symbol table codes.
+enum ValueSymtabCodes {
+  VST_CODE_ENTRY = 1,   // VST_ENTRY: [valueid, namechar x N]
+  VST_CODE_BBENTRY = 2, // VST_BBENTRY: [bbid, namechar x N]
+  VST_CODE_FNENTRY = 3, // VST_FNENTRY: [valueid, offset, namechar x N]
+  // VST_COMBINED_ENTRY: [valueid, refguid]
+  VST_CODE_COMBINED_ENTRY = 5
+};
+
+// The module path symbol table only has one code (MST_CODE_ENTRY).
+enum ModulePathSymtabCodes {
+  MST_CODE_ENTRY = 1, // MST_ENTRY: [modid, namechar x N]
+  MST_CODE_HASH = 2,  // MST_HASH:  [5*i32]
+};
+
+// The summary section uses different codes in the per-module
+// and combined index cases.
+enum GlobalValueSummarySymtabCodes {
+  // PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
+  //             n x (valueid)]
+  FS_PERMODULE = 1,
+  // PERMODULE_PROFILE: [valueid, flags, instcount, numrefs,
+  //                     numrefs x valueid,
+  //                     n x (valueid, hotness)]
+  FS_PERMODULE_PROFILE = 2,
+  // PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid]
+  FS_PERMODULE_GLOBALVAR_INIT_REFS = 3,
+  // COMBINED: [valueid, modid, flags, instcount, numrefs, numrefs x valueid,
+  //            n x (valueid)]
+  FS_COMBINED = 4,
+  // COMBINED_PROFILE: [valueid, modid, flags, instcount, numrefs,
+  //                    numrefs x valueid,
+  //                    n x (valueid, hotness)]
+  FS_COMBINED_PROFILE = 5,
+  // COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
+  FS_COMBINED_GLOBALVAR_INIT_REFS = 6,
+  // ALIAS: [valueid, flags, valueid]
+  FS_ALIAS = 7,
+  // COMBINED_ALIAS: [valueid, modid, flags, valueid]
+  FS_COMBINED_ALIAS = 8,
+  // COMBINED_ORIGINAL_NAME: [original_name_hash]
+  FS_COMBINED_ORIGINAL_NAME = 9,
+  // VERSION of the summary, bumped when adding flags for instance.
+  FS_VERSION = 10,
+  // The list of llvm.type.test type identifiers used by the following function
+  // that are used other than by an llvm.assume.
+  // [n x typeid]
+  FS_TYPE_TESTS = 11,
+  // The list of virtual calls made by this function using
+  // llvm.assume(llvm.type.test) intrinsics that do not have all constant
+  // integer arguments.
+  // [n x (typeid, offset)]
+  FS_TYPE_TEST_ASSUME_VCALLS = 12,
+  // The list of virtual calls made by this function using
+  // llvm.type.checked.load intrinsics that do not have all constant integer
+  // arguments.
+  // [n x (typeid, offset)]
+  FS_TYPE_CHECKED_LOAD_VCALLS = 13,
+  // Identifies a virtual call made by this function using an
+  // llvm.assume(llvm.type.test) intrinsic with all constant integer arguments.
+  // [typeid, offset, n x arg]
+  FS_TYPE_TEST_ASSUME_CONST_VCALL = 14,
+  // Identifies a virtual call made by this function using an
+  // llvm.type.checked.load intrinsic with all constant integer arguments.
+  // [typeid, offset, n x arg]
+  FS_TYPE_CHECKED_LOAD_CONST_VCALL = 15,
+  // Assigns a GUID to a value ID. This normally appears only in combined
+  // summaries, but it can also appear in per-module summaries for PGO data.
+  // [valueid, guid]
+  FS_VALUE_GUID = 16,
+  // The list of local functions with CFI jump tables. Function names are
+  // strings in strtab.
+  // [n * name]
+  FS_CFI_FUNCTION_DEFS = 17,
+  // The list of external functions with CFI jump tables. Function names are
+  // strings in strtab.
+  // [n * name]
+  FS_CFI_FUNCTION_DECLS = 18,
+  // Per-module summary that also adds relative block frequency to callee info.
+  // PERMODULE_RELBF: [valueid, flags, instcount, numrefs,
+  //                   numrefs x valueid,
+  //                   n x (valueid, relblockfreq)]
+  FS_PERMODULE_RELBF = 19,
+  // Index-wide flags
+  FS_FLAGS = 20,
+  // Maps type identifier to summary information for that type identifier.
+  // TYPE_ID: [typeid, kind, bitwidth, align, size, bitmask, inlinebits,
+  //           n x (typeid, kind, name, numrba,
+  //                numrba x (numarg, numarg x arg, kind, info, byte, bit))]
+  FS_TYPE_ID = 21,
+};
+
+enum MetadataCodes {
+  METADATA_STRING_OLD = 1,     // MDSTRING:      [values]
+  METADATA_VALUE = 2,          // VALUE:         [type num, value num]
+  METADATA_NODE = 3,           // NODE:          [n x md num]
+  METADATA_NAME = 4,           // STRING:        [values]
+  METADATA_DISTINCT_NODE = 5,  // DISTINCT_NODE: [n x md num]
+  METADATA_KIND = 6,           // [n x [id, name]]
+  METADATA_LOCATION = 7,       // [distinct, line, col, scope, inlined-at?]
+  METADATA_OLD_NODE = 8,       // OLD_NODE:      [n x (type num, value num)]
+  METADATA_OLD_FN_NODE = 9,    // OLD_FN_NODE:   [n x (type num, value num)]
+  METADATA_NAMED_NODE = 10,    // NAMED_NODE:    [n x mdnodes]
+  METADATA_ATTACHMENT = 11,    // [m x [value, [n x [id, mdnode]]]
+  METADATA_GENERIC_DEBUG = 12, // [distinct, tag, vers, header, n x md num]
+  METADATA_SUBRANGE = 13,      // [distinct, count, lo]
+  METADATA_ENUMERATOR = 14,    // [isUnsigned|distinct, value, name]
+  METADATA_BASIC_TYPE = 15,    // [distinct, tag, name, size, align, enc]
+  METADATA_FILE = 16, // [distinct, filename, directory, checksumkind, checksum]
+  METADATA_DERIVED_TYPE = 17,       // [distinct, ...]
+  METADATA_COMPOSITE_TYPE = 18,     // [distinct, ...]
+  METADATA_SUBROUTINE_TYPE = 19,    // [distinct, flags, types, cc]
+  METADATA_COMPILE_UNIT = 20,       // [distinct, ...]
+  METADATA_SUBPROGRAM = 21,         // [distinct, ...]
+  METADATA_LEXICAL_BLOCK = 22,      // [distinct, scope, file, line, column]
+  METADATA_LEXICAL_BLOCK_FILE = 23, //[distinct, scope, file, discriminator]
+  METADATA_NAMESPACE = 24, // [distinct, scope, file, name, line, exportSymbols]
+  METADATA_TEMPLATE_TYPE = 25,   // [distinct, scope, name, type, ...]
+  METADATA_TEMPLATE_VALUE = 26,  // [distinct, scope, name, type, value, ...]
+  METADATA_GLOBAL_VAR = 27,      // [distinct, ...]
+  METADATA_LOCAL_VAR = 28,       // [distinct, ...]
+  METADATA_EXPRESSION = 29,      // [distinct, n x element]
+  METADATA_OBJC_PROPERTY = 30,   // [distinct, name, file, line, ...]
+  METADATA_IMPORTED_ENTITY = 31, // [distinct, tag, scope, entity, line, name]
+  METADATA_MODULE = 32,          // [distinct, scope, name, ...]
+  METADATA_MACRO = 33,           // [distinct, macinfo, line, name, value]
+  METADATA_MACRO_FILE = 34,      // [distinct, macinfo, line, file, ...]
+  METADATA_STRINGS = 35,         // [count, offset] blob([lengths][chars])
+  METADATA_GLOBAL_DECL_ATTACHMENT = 36, // [valueid, n x [id, mdnode]]
+  METADATA_GLOBAL_VAR_EXPR = 37,        // [distinct, var, expr]
+  METADATA_INDEX_OFFSET = 38,           // [offset]
+  METADATA_INDEX = 39,                  // [bitpos]
+};
+
+// The constants block (CONSTANTS_BLOCK_ID) describes emission for each
+// constant and maintains an implicit current type value.
+enum ConstantsCodes {
+  CST_CODE_SETTYPE = 1,          // SETTYPE:       [typeid]
+  CST_CODE_NULL = 2,             // NULL
+  CST_CODE_UNDEF = 3,            // UNDEF
+  CST_CODE_INTEGER = 4,          // INTEGER:       [intval]
+  CST_CODE_WIDE_INTEGER = 5,     // WIDE_INTEGER:  [n x intval]
+  CST_CODE_FLOAT = 6,            // FLOAT:         [fpval]
+  CST_CODE_AGGREGATE = 7,        // AGGREGATE:     [n x value number]
+  CST_CODE_STRING = 8,           // STRING:        [values]
+  CST_CODE_CSTRING = 9,          // CSTRING:       [values]
+  CST_CODE_CE_BINOP = 10,        // CE_BINOP:      [opcode, opval, opval]
+  CST_CODE_CE_CAST = 11,         // CE_CAST:       [opcode, opty, opval]
+  CST_CODE_CE_GEP = 12,          // CE_GEP:        [n x operands]
+  CST_CODE_CE_SELECT = 13,       // CE_SELECT:     [opval, opval, opval]
+  CST_CODE_CE_EXTRACTELT = 14,   // CE_EXTRACTELT: [opty, opval, opval]
+  CST_CODE_CE_INSERTELT = 15,    // CE_INSERTELT:  [opval, opval, opval]
+  CST_CODE_CE_SHUFFLEVEC = 16,   // CE_SHUFFLEVEC: [opval, opval, opval]
+  CST_CODE_CE_CMP = 17,          // CE_CMP:        [opty, opval, opval, pred]
+  CST_CODE_INLINEASM_OLD = 18,   // INLINEASM:     [sideeffect|alignstack,
+                                 //                 asmstr,conststr]
+  CST_CODE_CE_SHUFVEC_EX = 19,   // SHUFVEC_EX:    [opty, opval, opval, opval]
+  CST_CODE_CE_INBOUNDS_GEP = 20, // INBOUNDS_GEP:  [n x operands]
+  CST_CODE_BLOCKADDRESS = 21,    // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
+  CST_CODE_DATA = 22,            // DATA:          [n x elements]
+  CST_CODE_INLINEASM = 23,       // INLINEASM:     [sideeffect|alignstack|
+                                 //                 asmdialect,asmstr,conststr]
+  CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, //      [opty, flags, n x operands]
+};
+
+/// CastOpcodes - These are values used in the bitcode files to encode which
+/// cast a CST_CODE_CE_CAST or a XXX refers to.  The values of these enums
+/// have no fixed relation to the LLVM IR enum values.  Changing these will
+/// break compatibility with old files.
+enum CastOpcodes {
+  CAST_TRUNC = 0,
+  CAST_ZEXT = 1,
+  CAST_SEXT = 2,
+  CAST_FPTOUI = 3,
+  CAST_FPTOSI = 4,
+  CAST_UITOFP = 5,
+  CAST_SITOFP = 6,
+  CAST_FPTRUNC = 7,
+  CAST_FPEXT = 8,
+  CAST_PTRTOINT = 9,
+  CAST_INTTOPTR = 10,
+  CAST_BITCAST = 11,
+  CAST_ADDRSPACECAST = 12
+};
+
+/// BinaryOpcodes - These are values used in the bitcode files to encode which
+/// binop a CST_CODE_CE_BINOP or a XXX refers to.  The values of these enums
+/// have no fixed relation to the LLVM IR enum values.  Changing these will
+/// break compatibility with old files.
+enum BinaryOpcodes {
+  BINOP_ADD = 0,
+  BINOP_SUB = 1,
+  BINOP_MUL = 2,
+  BINOP_UDIV = 3,
+  BINOP_SDIV = 4, // overloaded for FP
+  BINOP_UREM = 5,
+  BINOP_SREM = 6, // overloaded for FP
+  BINOP_SHL = 7,
+  BINOP_LSHR = 8,
+  BINOP_ASHR = 9,
+  BINOP_AND = 10,
+  BINOP_OR = 11,
+  BINOP_XOR = 12
+};
+
+/// These are values used in the bitcode files to encode AtomicRMW operations.
+/// The values of these enums have no fixed relation to the LLVM IR enum
+/// values.  Changing these will break compatibility with old files.
+enum RMWOperations {
+  RMW_XCHG = 0,
+  RMW_ADD = 1,
+  RMW_SUB = 2,
+  RMW_AND = 3,
+  RMW_NAND = 4,
+  RMW_OR = 5,
+  RMW_XOR = 6,
+  RMW_MAX = 7,
+  RMW_MIN = 8,
+  RMW_UMAX = 9,
+  RMW_UMIN = 10
+};
+
+/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
+/// OverflowingBinaryOperator's SubclassOptionalData contents.
+enum OverflowingBinaryOperatorOptionalFlags {
+  OBO_NO_UNSIGNED_WRAP = 0,
+  OBO_NO_SIGNED_WRAP = 1
+};
+
+/// FastMath Flags
+/// This is a fixed layout derived from the bitcode emitted by LLVM 5.0
+/// intended to decouple the in-memory representation from the serialization.
+enum FastMathMap {
+  UnsafeAlgebra   = (1 << 0), // Legacy
+  NoNaNs          = (1 << 1),
+  NoInfs          = (1 << 2),
+  NoSignedZeros   = (1 << 3),
+  AllowReciprocal = (1 << 4),
+  AllowContract   = (1 << 5),
+  ApproxFunc      = (1 << 6),
+  AllowReassoc    = (1 << 7)
+};
+
+/// PossiblyExactOperatorOptionalFlags - Flags for serializing
+/// PossiblyExactOperator's SubclassOptionalData contents.
+enum PossiblyExactOperatorOptionalFlags { PEO_EXACT = 0 };
+
+/// Encoded AtomicOrdering values.
+enum AtomicOrderingCodes {
+  ORDERING_NOTATOMIC = 0,
+  ORDERING_UNORDERED = 1,
+  ORDERING_MONOTONIC = 2,
+  ORDERING_ACQUIRE = 3,
+  ORDERING_RELEASE = 4,
+  ORDERING_ACQREL = 5,
+  ORDERING_SEQCST = 6
+};
+
+/// Markers and flags for call instruction.
+enum CallMarkersFlags {
+  CALL_TAIL = 0,
+  CALL_CCONV = 1,
+  CALL_MUSTTAIL = 14,
+  CALL_EXPLICIT_TYPE = 15,
+  CALL_NOTAIL = 16,
+  CALL_FMF = 17 // Call has optional fast-math-flags.
+};
+
+// The function body block (FUNCTION_BLOCK_ID) describes function bodies.  It
+// can contain a constant block (CONSTANTS_BLOCK_ID).
+enum FunctionCodes {
+  FUNC_CODE_DECLAREBLOCKS = 1, // DECLAREBLOCKS: [n]
+
+  FUNC_CODE_INST_BINOP = 2,      // BINOP:      [opcode, ty, opval, opval]
+  FUNC_CODE_INST_CAST = 3,       // CAST:       [opcode, ty, opty, opval]
+  FUNC_CODE_INST_GEP_OLD = 4,    // GEP:        [n x operands]
+  FUNC_CODE_INST_SELECT = 5,     // SELECT:     [ty, opval, opval, opval]
+  FUNC_CODE_INST_EXTRACTELT = 6, // EXTRACTELT: [opty, opval, opval]
+  FUNC_CODE_INST_INSERTELT = 7,  // INSERTELT:  [ty, opval, opval, opval]
+  FUNC_CODE_INST_SHUFFLEVEC = 8, // SHUFFLEVEC: [ty, opval, opval, opval]
+  FUNC_CODE_INST_CMP = 9,        // CMP:        [opty, opval, opval, pred]
+
+  FUNC_CODE_INST_RET = 10,    // RET:        [opty,opval<both optional>]
+  FUNC_CODE_INST_BR = 11,     // BR:         [bb#, bb#, cond] or [bb#]
+  FUNC_CODE_INST_SWITCH = 12, // SWITCH:     [opty, op0, op1, ...]
+  FUNC_CODE_INST_INVOKE = 13, // INVOKE:     [attr, fnty, op0,op1, ...]
+  // 14 is unused.
+  FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE
+
+  FUNC_CODE_INST_PHI = 16, // PHI:        [ty, val0,bb0, ...]
+  // 17 is unused.
+  // 18 is unused.
+  FUNC_CODE_INST_ALLOCA = 19, // ALLOCA:     [instty, opty, op, align]
+  FUNC_CODE_INST_LOAD = 20,   // LOAD:       [opty, op, align, vol]
+  // 21 is unused.
+  // 22 is unused.
+  FUNC_CODE_INST_VAARG = 23, // VAARG:      [valistty, valist, instty]
+  // This store code encodes the pointer type, rather than the value type
+  // this is so information only available in the pointer type (e.g. address
+  // spaces) is retained.
+  FUNC_CODE_INST_STORE_OLD = 24, // STORE:      [ptrty,ptr,val, align, vol]
+  // 25 is unused.
+  FUNC_CODE_INST_EXTRACTVAL = 26, // EXTRACTVAL: [n x operands]
+  FUNC_CODE_INST_INSERTVAL = 27,  // INSERTVAL:  [n x operands]
+  // fcmp/icmp returning Int1TY or vector of Int1Ty. Same as CMP, exists to
+  // support legacy vicmp/vfcmp instructions.
+  FUNC_CODE_INST_CMP2 = 28, // CMP2:       [opty, opval, opval, pred]
+  // new select on i1 or [N x i1]
+  FUNC_CODE_INST_VSELECT = 29, // VSELECT:    [ty,opval,opval,predty,pred]
+  FUNC_CODE_INST_INBOUNDS_GEP_OLD = 30, // INBOUNDS_GEP: [n x operands]
+  FUNC_CODE_INST_INDIRECTBR = 31,       // INDIRECTBR: [opty, op0, op1, ...]
+  // 32 is unused.
+  FUNC_CODE_DEBUG_LOC_AGAIN = 33, // DEBUG_LOC_AGAIN
+
+  FUNC_CODE_INST_CALL = 34, // CALL:    [attr, cc, fnty, fnid, args...]
+
+  FUNC_CODE_DEBUG_LOC = 35,        // DEBUG_LOC:  [Line,Col,ScopeVal, IAVal]
+  FUNC_CODE_INST_FENCE = 36,       // FENCE: [ordering, synchscope]
+  FUNC_CODE_INST_CMPXCHG_OLD = 37, // CMPXCHG: [ptrty,ptr,cmp,new, align, vol,
+                                   //           ordering, synchscope]
+  FUNC_CODE_INST_ATOMICRMW = 38,   // ATOMICRMW: [ptrty,ptr,val, operation,
+                                   //             align, vol,
+                                   //             ordering, synchscope]
+  FUNC_CODE_INST_RESUME = 39,      // RESUME:     [opval]
+  FUNC_CODE_INST_LANDINGPAD_OLD =
+      40,                         // LANDINGPAD: [ty,val,val,num,id0,val0...]
+  FUNC_CODE_INST_LOADATOMIC = 41, // LOAD: [opty, op, align, vol,
+                                  //        ordering, synchscope]
+  FUNC_CODE_INST_STOREATOMIC_OLD = 42, // STORE: [ptrty,ptr,val, align, vol
+                                       //         ordering, synchscope]
+  FUNC_CODE_INST_GEP = 43,             // GEP:  [inbounds, n x operands]
+  FUNC_CODE_INST_STORE = 44,       // STORE: [ptrty,ptr,valty,val, align, vol]
+  FUNC_CODE_INST_STOREATOMIC = 45, // STORE: [ptrty,ptr,val, align, vol
+  FUNC_CODE_INST_CMPXCHG = 46,     // CMPXCHG: [ptrty,ptr,valty,cmp,new, align,
+                                   //           vol,ordering,synchscope]
+  FUNC_CODE_INST_LANDINGPAD = 47,  // LANDINGPAD: [ty,val,num,id0,val0...]
+  FUNC_CODE_INST_CLEANUPRET = 48,  // CLEANUPRET: [val] or [val,bb#]
+  FUNC_CODE_INST_CATCHRET = 49,    // CATCHRET: [val,bb#]
+  FUNC_CODE_INST_CATCHPAD = 50,    // CATCHPAD: [bb#,bb#,num,args...]
+  FUNC_CODE_INST_CLEANUPPAD = 51,  // CLEANUPPAD: [num,args...]
+  FUNC_CODE_INST_CATCHSWITCH =
+      52, // CATCHSWITCH: [num,args...] or [num,args...,bb]
+  // 53 is unused.
+  // 54 is unused.
+  FUNC_CODE_OPERAND_BUNDLE = 55, // OPERAND_BUNDLE: [tag#, value...]
+};
+
+enum UseListCodes {
+  USELIST_CODE_DEFAULT = 1, // DEFAULT: [index..., value-id]
+  USELIST_CODE_BB = 2       // BB: [index..., bb-id]
+};
+
+enum AttributeKindCodes {
+  // = 0 is unused
+  ATTR_KIND_ALIGNMENT = 1,
+  ATTR_KIND_ALWAYS_INLINE = 2,
+  ATTR_KIND_BY_VAL = 3,
+  ATTR_KIND_INLINE_HINT = 4,
+  ATTR_KIND_IN_REG = 5,
+  ATTR_KIND_MIN_SIZE = 6,
+  ATTR_KIND_NAKED = 7,
+  ATTR_KIND_NEST = 8,
+  ATTR_KIND_NO_ALIAS = 9,
+  ATTR_KIND_NO_BUILTIN = 10,
+  ATTR_KIND_NO_CAPTURE = 11,
+  ATTR_KIND_NO_DUPLICATE = 12,
+  ATTR_KIND_NO_IMPLICIT_FLOAT = 13,
+  ATTR_KIND_NO_INLINE = 14,
+  ATTR_KIND_NON_LAZY_BIND = 15,
+  ATTR_KIND_NO_RED_ZONE = 16,
+  ATTR_KIND_NO_RETURN = 17,
+  ATTR_KIND_NO_UNWIND = 18,
+  ATTR_KIND_OPTIMIZE_FOR_SIZE = 19,
+  ATTR_KIND_READ_NONE = 20,
+  ATTR_KIND_READ_ONLY = 21,
+  ATTR_KIND_RETURNED = 22,
+  ATTR_KIND_RETURNS_TWICE = 23,
+  ATTR_KIND_S_EXT = 24,
+  ATTR_KIND_STACK_ALIGNMENT = 25,
+  ATTR_KIND_STACK_PROTECT = 26,
+  ATTR_KIND_STACK_PROTECT_REQ = 27,
+  ATTR_KIND_STACK_PROTECT_STRONG = 28,
+  ATTR_KIND_STRUCT_RET = 29,
+  ATTR_KIND_SANITIZE_ADDRESS = 30,
+  ATTR_KIND_SANITIZE_THREAD = 31,
+  ATTR_KIND_SANITIZE_MEMORY = 32,
+  ATTR_KIND_UW_TABLE = 33,
+  ATTR_KIND_Z_EXT = 34,
+  ATTR_KIND_BUILTIN = 35,
+  ATTR_KIND_COLD = 36,
+  ATTR_KIND_OPTIMIZE_NONE = 37,
+  ATTR_KIND_IN_ALLOCA = 38,
+  ATTR_KIND_NON_NULL = 39,
+  ATTR_KIND_JUMP_TABLE = 40,
+  ATTR_KIND_DEREFERENCEABLE = 41,
+  ATTR_KIND_DEREFERENCEABLE_OR_NULL = 42,
+  ATTR_KIND_CONVERGENT = 43,
+  ATTR_KIND_SAFESTACK = 44,
+  ATTR_KIND_ARGMEMONLY = 45,
+  ATTR_KIND_SWIFT_SELF = 46,
+  ATTR_KIND_SWIFT_ERROR = 47,
+  ATTR_KIND_NO_RECURSE = 48,
+  ATTR_KIND_INACCESSIBLEMEM_ONLY = 49,
+  ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50,
+  ATTR_KIND_ALLOC_SIZE = 51,
+  ATTR_KIND_WRITEONLY = 52,
+  ATTR_KIND_SPECULATABLE = 53,
+  ATTR_KIND_STRICT_FP = 54,
+  ATTR_KIND_SANITIZE_HWADDRESS = 55,
+  ATTR_KIND_NOCF_CHECK = 56,
+  ATTR_KIND_OPT_FOR_FUZZING = 57,
+};
+
+enum ComdatSelectionKindCodes {
+  COMDAT_SELECTION_KIND_ANY = 1,
+  COMDAT_SELECTION_KIND_EXACT_MATCH = 2,
+  COMDAT_SELECTION_KIND_LARGEST = 3,
+  COMDAT_SELECTION_KIND_NO_DUPLICATES = 4,
+  COMDAT_SELECTION_KIND_SAME_SIZE = 5,
+};
+
+enum StrtabCodes {
+  STRTAB_BLOB = 1,
+};
+
+enum SymtabCodes {
+  SYMTAB_BLOB = 1,
+};
+
+} // End bitc namespace
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/AccelTable.h b/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
new file mode 100644
index 0000000..ec850a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
@@ -0,0 +1,373 @@
+//==- include/llvm/CodeGen/AccelTable.h - Accelerator Tables -----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing accelerator tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DWARFACCELTABLE_H
+#define LLVM_CODEGEN_DWARFACCELTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/DIE.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DJB.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+/// The DWARF and Apple accelerator tables are an indirect hash table optimized
+/// for null lookup rather than access to known data. The Apple accelerator
+/// tables are a precursor of the newer DWARF v5 accelerator tables. Both
+/// formats share common design ideas.
+///
+/// The Apple accelerator table are output into an on-disk format that looks
+/// like this:
+///
+/// .------------------.
+/// |  HEADER          |
+/// |------------------|
+/// |  BUCKETS         |
+/// |------------------|
+/// |  HASHES          |
+/// |------------------|
+/// |  OFFSETS         |
+/// |------------------|
+/// |  DATA            |
+/// `------------------'
+///
+/// The header contains a magic number, version, type of hash function,
+/// the number of buckets, total number of hashes, and room for a special struct
+/// of data and the length of that struct.
+///
+/// The buckets contain an index (e.g. 6) into the hashes array. The hashes
+/// section contains all of the 32-bit hash values in contiguous memory, and the
+/// offsets contain the offset into the data area for the particular hash.
+///
+/// For a lookup example, we could hash a function name and take it modulo the
+/// number of buckets giving us our bucket. From there we take the bucket value
+/// as an index into the hashes table and look at each successive hash as long
+/// as the hash value is still the same modulo result (bucket value) as earlier.
+/// If we have a match we look at that same entry in the offsets table and grab
+/// the offset in the data for our final match.
+///
+/// The DWARF v5 accelerator table consists of zero or more name indices that
+/// are output into an on-disk format that looks like this:
+///
+/// .------------------.
+/// |  HEADER          |
+/// |------------------|
+/// |  CU LIST         |
+/// |------------------|
+/// |  LOCAL TU LIST   |
+/// |------------------|
+/// |  FOREIGN TU LIST |
+/// |------------------|
+/// |  HASH TABLE      |
+/// |------------------|
+/// |  NAME TABLE      |
+/// |------------------|
+/// |  ABBREV TABLE    |
+/// |------------------|
+/// |  ENTRY POOL      |
+/// `------------------'
+///
+/// For the full documentation please refer to the DWARF 5 standard.
+///
+///
+/// This file defines the class template AccelTable, which is represents an
+/// abstract view of an Accelerator table, without any notion of an on-disk
+/// layout. This class is parameterized by an entry type, which should derive
+/// from AccelTableData. This is the type of individual entries in the table,
+/// and it should store the data necessary to emit them. AppleAccelTableData is
+/// the base class for Apple Accelerator Table entries, which have a uniform
+/// structure based on a sequence of Atoms. There are different sub-classes
+/// derived from AppleAccelTable, which differ in the set of Atoms and how they
+/// obtain their values.
+///
+/// An Apple Accelerator Table can be serialized by calling emitAppleAccelTable
+/// function.
+///
+/// TODO: Add DWARF v5 emission code.
+
+namespace llvm {
+
+class AsmPrinter;
+
+/// Interface which the different types of accelerator table data have to
+/// conform. It serves as a base class for different values of the template
+/// argument of the AccelTable class template.
+class AccelTableData {
+public:
+  virtual ~AccelTableData() = default;
+
+  bool operator<(const AccelTableData &Other) const {
+    return order() < Other.order();
+  }
+
+  // Subclasses should implement:
+  // static uint32_t hash(StringRef Name);
+
+#ifndef NDEBUG
+  virtual void print(raw_ostream &OS) const = 0;
+#endif
+protected:
+  virtual uint64_t order() const = 0;
+};
+
+/// A base class holding non-template-dependant functionality of the AccelTable
+/// class. Clients should not use this class directly but rather instantiate
+/// AccelTable with a type derived from AccelTableData.
+class AccelTableBase {
+public:
+  using HashFn = uint32_t(StringRef);
+
+  /// Represents a group of entries with identical name (and hence, hash value).
+  struct HashData {
+    DwarfStringPoolEntryRef Name;
+    uint32_t HashValue;
+    std::vector<AccelTableData *> Values;
+    MCSymbol *Sym;
+
+    HashData(DwarfStringPoolEntryRef Name, HashFn *Hash)
+        : Name(Name), HashValue(Hash(Name.getString())) {}
+
+#ifndef NDEBUG
+    void print(raw_ostream &OS) const;
+    void dump() const { print(dbgs()); }
+#endif
+  };
+  using HashList = std::vector<HashData *>;
+  using BucketList = std::vector<HashList>;
+
+protected:
+  /// Allocator for HashData and Values.
+  BumpPtrAllocator Allocator;
+
+  using StringEntries = StringMap<HashData, BumpPtrAllocator &>;
+  StringEntries Entries;
+
+  HashFn *Hash;
+  uint32_t BucketCount;
+  uint32_t UniqueHashCount;
+
+  HashList Hashes;
+  BucketList Buckets;
+
+  void computeBucketCount();
+
+  AccelTableBase(HashFn *Hash) : Entries(Allocator), Hash(Hash) {}
+
+public:
+  void finalize(AsmPrinter *Asm, StringRef Prefix);
+  ArrayRef<HashList> getBuckets() const { return Buckets; }
+  uint32_t getBucketCount() const { return BucketCount; }
+  uint32_t getUniqueHashCount() const { return UniqueHashCount; }
+  uint32_t getUniqueNameCount() const { return Entries.size(); }
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const;
+  void dump() const { print(dbgs()); }
+#endif
+
+  AccelTableBase(const AccelTableBase &) = delete;
+  void operator=(const AccelTableBase &) = delete;
+};
+
+/// This class holds an abstract representation of an Accelerator Table,
+/// consisting of a sequence of buckets, each bucket containint a sequence of
+/// HashData entries. The class is parameterized by the type of entries it
+/// holds. The type template parameter also defines the hash function to use for
+/// hashing names.
+template <typename DataT> class AccelTable : public AccelTableBase {
+public:
+  AccelTable() : AccelTableBase(DataT::hash) {}
+
+  template <typename... Types>
+  void addName(DwarfStringPoolEntryRef Name, Types &&... Args);
+};
+
+template <typename AccelTableDataT>
+template <typename... Types>
+void AccelTable<AccelTableDataT>::addName(DwarfStringPoolEntryRef Name,
+                                          Types &&... Args) {
+  assert(Buckets.empty() && "Already finalized!");
+  // If the string is in the list already then add this die to the list
+  // otherwise add a new one.
+  auto Iter = Entries.try_emplace(Name.getString(), Name, Hash).first;
+  assert(Iter->second.Name == Name);
+  Iter->second.Values.push_back(
+      new (Allocator) AccelTableDataT(std::forward<Types>(Args)...));
+}
+
+/// A base class for different implementations of Data classes for Apple
+/// Accelerator Tables. The columns in the table are defined by the static Atoms
+/// variable defined on the subclasses.
+class AppleAccelTableData : public AccelTableData {
+public:
+  /// An Atom defines the form of the data in an Apple accelerator table.
+  /// Conceptually it is a column in the accelerator consisting of a type and a
+  /// specification of the form of its data.
+  struct Atom {
+    /// Atom Type.
+    const uint16_t Type;
+    /// DWARF Form.
+    const uint16_t Form;
+
+    constexpr Atom(uint16_t Type, uint16_t Form) : Type(Type), Form(Form) {}
+
+#ifndef NDEBUG
+    void print(raw_ostream &OS) const;
+    void dump() const { print(dbgs()); }
+#endif
+  };
+  // Subclasses should define:
+  // static constexpr Atom Atoms[];
+
+  virtual void emit(AsmPrinter *Asm) const = 0;
+
+  static uint32_t hash(StringRef Buffer) { return djbHash(Buffer); }
+};
+
+void emitAppleAccelTableImpl(AsmPrinter *Asm, AccelTableBase &Contents,
+                             StringRef Prefix, const MCSymbol *SecBegin,
+                             ArrayRef<AppleAccelTableData::Atom> Atoms);
+
+/// Emit an Apple Accelerator Table consisting of entries in the specified
+/// AccelTable. The DataT template parameter should be derived from
+/// AppleAccelTableData.
+template <typename DataT>
+void emitAppleAccelTable(AsmPrinter *Asm, AccelTable<DataT> &Contents,
+                         StringRef Prefix, const MCSymbol *SecBegin) {
+  static_assert(std::is_convertible<DataT *, AppleAccelTableData *>::value, "");
+  emitAppleAccelTableImpl(Asm, Contents, Prefix, SecBegin, DataT::Atoms);
+}
+
+/// Accelerator table data implementation for simple Apple accelerator tables
+/// with just a DIE reference.
+class AppleAccelTableOffsetData : public AppleAccelTableData {
+public:
+  AppleAccelTableOffsetData(const DIE *D) : Die(D) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+protected:
+  uint64_t order() const override { return Die->getOffset(); }
+
+  const DIE *Die;
+};
+
+/// Accelerator table data implementation for Apple type accelerator tables.
+class AppleAccelTableTypeData : public AppleAccelTableOffsetData {
+public:
+  AppleAccelTableTypeData(const DIE *D) : AppleAccelTableOffsetData(D) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
+      Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
+      Atom(dwarf::DW_ATOM_type_flags, dwarf::DW_FORM_data1)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+};
+
+/// Accelerator table data implementation for simple Apple accelerator tables
+/// with a DIE offset but no actual DIE pointer.
+class AppleAccelTableStaticOffsetData : public AppleAccelTableData {
+public:
+  AppleAccelTableStaticOffsetData(uint32_t Offset) : Offset(Offset) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+protected:
+  uint64_t order() const override { return Offset; }
+
+  uint32_t Offset;
+};
+
+/// Accelerator table data implementation for type accelerator tables with
+/// a DIE offset but no actual DIE pointer.
+class AppleAccelTableStaticTypeData : public AppleAccelTableStaticOffsetData {
+public:
+  AppleAccelTableStaticTypeData(uint32_t Offset, uint16_t Tag,
+                                bool ObjCClassIsImplementation,
+                                uint32_t QualifiedNameHash)
+      : AppleAccelTableStaticOffsetData(Offset),
+        QualifiedNameHash(QualifiedNameHash), Tag(Tag),
+        ObjCClassIsImplementation(ObjCClassIsImplementation) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
+      Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
+      Atom(5, dwarf::DW_FORM_data1), Atom(6, dwarf::DW_FORM_data4)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+protected:
+  uint64_t order() const override { return Offset; }
+
+  uint32_t QualifiedNameHash;
+  uint16_t Tag;
+  bool ObjCClassIsImplementation;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_DWARFACCELTABLE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/Analysis.h b/linux-x64/clang/include/llvm/CodeGen/Analysis.h
new file mode 100644
index 0000000..ba88f1f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/Analysis.h
@@ -0,0 +1,131 @@
+//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares several CodeGen-specific LLVM IR analysis utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ANALYSIS_H
+#define LLVM_CODEGEN_ANALYSIS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+class GlobalValue;
+class MachineBasicBlock;
+class MachineFunction;
+class TargetLoweringBase;
+class TargetLowering;
+class TargetMachine;
+class SDNode;
+class SDValue;
+class SelectionDAG;
+struct EVT;
+
+/// \brief Compute the linearized index of a member in a nested
+/// aggregate/struct/array.
+///
+/// Given an LLVM IR aggregate type and a sequence of insertvalue or
+/// extractvalue indices that identify a member, return the linearized index of
+/// the start of the member, i.e the number of element in memory before the
+/// sought one. This is disconnected from the number of bytes.
+///
+/// \param Ty is the type indexed by \p Indices.
+/// \param Indices is an optional pointer in the indices list to the current
+/// index.
+/// \param IndicesEnd is the end of the indices list.
+/// \param CurIndex is the current index in the recursion.
+///
+/// \returns \p CurIndex plus the linear index in \p Ty  the indices list.
+unsigned ComputeLinearIndex(Type *Ty,
+                            const unsigned *Indices,
+                            const unsigned *IndicesEnd,
+                            unsigned CurIndex = 0);
+
+inline unsigned ComputeLinearIndex(Type *Ty,
+                                   ArrayRef<unsigned> Indices,
+                                   unsigned CurIndex = 0) {
+  return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
+}
+
+/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
+/// EVTs that represent all the individual underlying
+/// non-aggregate types that comprise it.
+///
+/// If Offsets is non-null, it points to a vector to be filled in
+/// with the in-memory offsets of each of the individual values.
+///
+void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
+                     SmallVectorImpl<EVT> &ValueVTs,
+                     SmallVectorImpl<uint64_t> *Offsets = nullptr,
+                     uint64_t StartingOffset = 0);
+
+/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
+GlobalValue *ExtractTypeInfo(Value *V);
+
+/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
+/// processed uses a memory 'm' constraint.
+bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
+                               const TargetLowering &TLI);
+
+/// getFCmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR floating-point condition code.  This includes
+/// consideration of global floating-point math flags.
+///
+ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
+
+/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
+/// return the equivalent code if we're allowed to assume that NaNs won't occur.
+ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
+
+/// getICmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR integer condition code.
+///
+ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
+
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM);
+
+/// Test if given that the input instruction is in the tail call position, if
+/// there is an attribute mismatch between the caller and the callee that will
+/// inhibit tail call optimizations.
+/// \p AllowDifferingSizes is an output parameter which, if forming a tail call
+/// is permitted, determines whether it's permitted only if the size of the
+/// caller's and callee's return types match exactly.
+bool attributesPermitTailCall(const Function *F, const Instruction *I,
+                              const ReturnInst *Ret,
+                              const TargetLoweringBase &TLI,
+                              bool *AllowDifferingSizes = nullptr);
+
+/// Test if given that the input instruction is in the tail call position if the
+/// return type or any attributes of the function will inhibit tail call
+/// optimization.
+bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
+                                     const ReturnInst *Ret,
+                                     const TargetLoweringBase &TLI);
+
+DenseMap<const MachineBasicBlock *, int>
+getFuncletMembership(const MachineFunction &MF);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
new file mode 100644
index 0000000..3d3bd3a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
@@ -0,0 +1,645 @@
+//===- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class to be used as the base class for target specific
+// asm writers.  This class primarily handles common functionality used by
+// all asm writers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTER_H
+#define LLVM_CODEGEN_ASMPRINTER_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinterHandler;
+class BasicBlock;
+class BlockAddress;
+class Constant;
+class ConstantArray;
+class DataLayout;
+class DIE;
+class DIEAbbrev;
+class DwarfDebug;
+class GCMetadataPrinter;
+class GCStrategy;
+class GlobalIndirectSymbol;
+class GlobalObject;
+class GlobalValue;
+class GlobalVariable;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class MachineFunction;
+class MachineInstr;
+class MachineJumpTableInfo;
+class MachineLoopInfo;
+class MachineModuleInfo;
+class MachineOptimizationRemarkEmitter;
+class MCAsmInfo;
+class MCCFIInstruction;
+struct MCCodePaddingContext;
+class MCContext;
+class MCExpr;
+class MCInst;
+class MCSection;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCSymbol;
+class MCTargetOptions;
+class MDNode;
+class Module;
+class raw_ostream;
+class TargetLoweringObjectFile;
+class TargetMachine;
+
+/// This class is intended to be used as a driving class for all asm writers.
+class AsmPrinter : public MachineFunctionPass {
+public:
+  /// Target machine description.
+  TargetMachine &TM;
+
+  /// Target Asm Printer information.
+  const MCAsmInfo *MAI;
+
+  /// This is the context for the output file that we are streaming. This owns
+  /// all of the global MC-related objects for the generated translation unit.
+  MCContext &OutContext;
+
+  /// This is the MCStreamer object for the file we are generating. This
+  /// contains the transient state for the current translation unit that we are
+  /// generating (such as the current section etc).
+  std::unique_ptr<MCStreamer> OutStreamer;
+
+  /// The current machine function.
+  const MachineFunction *MF = nullptr;
+
+  /// This is a pointer to the current MachineModuleInfo.
+  MachineModuleInfo *MMI = nullptr;
+
+  /// Optimization remark emitter.
+  MachineOptimizationRemarkEmitter *ORE;
+
+  /// The symbol for the current function. This is recalculated at the beginning
+  /// of each call to runOnMachineFunction().
+  MCSymbol *CurrentFnSym = nullptr;
+
+  /// The symbol used to represent the start of the current function for the
+  /// purpose of calculating its size (e.g. using the .size directive). By
+  /// default, this is equal to CurrentFnSym.
+  MCSymbol *CurrentFnSymForSize = nullptr;
+
+  /// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
+  /// its number of uses by other globals.
+  using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
+  MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
+
+  /// Enable print [latency:throughput] in output.
+  bool EnablePrintSchedInfo = false;
+
+private:
+  MCSymbol *CurrentFnBegin = nullptr;
+  MCSymbol *CurrentFnEnd = nullptr;
+  MCSymbol *CurExceptionSym = nullptr;
+
+  // The garbage collection metadata printer table.
+  void *GCMetadataPrinters = nullptr; // Really a DenseMap.
+
+  /// Emit comments in assembly output if this is true.
+  bool VerboseAsm;
+
+  static char ID;
+
+  /// If VerboseAsm is set, a pointer to the loop info for this function.
+  MachineLoopInfo *LI = nullptr;
+
+  struct HandlerInfo {
+    AsmPrinterHandler *Handler;
+    const char *TimerName;
+    const char *TimerDescription;
+    const char *TimerGroupName;
+    const char *TimerGroupDescription;
+
+    HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
+                const char *TimerDescription, const char *TimerGroupName,
+                const char *TimerGroupDescription)
+        : Handler(Handler), TimerName(TimerName),
+          TimerDescription(TimerDescription), TimerGroupName(TimerGroupName),
+          TimerGroupDescription(TimerGroupDescription) {}
+  };
+
+  /// A vector of all debug/EH info emitters we should use. This vector
+  /// maintains ownership of the emitters.
+  SmallVector<HandlerInfo, 1> Handlers;
+
+public:
+  struct SrcMgrDiagInfo {
+    SourceMgr SrcMgr;
+    std::vector<const MDNode *> LocInfos;
+    LLVMContext::InlineAsmDiagHandlerTy DiagHandler;
+    void *DiagContext;
+  };
+
+private:
+  /// Structure for generating diagnostics for inline assembly. Only initialised
+  /// when necessary.
+  mutable std::unique_ptr<SrcMgrDiagInfo> DiagInfo;
+
+  /// If the target supports dwarf debug info, this pointer is non-null.
+  DwarfDebug *DD = nullptr;
+
+  /// If the current module uses dwarf CFI annotations strictly for debugging.
+  bool isCFIMoveForDebugging = false;
+
+protected:
+  explicit AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer);
+
+public:
+  ~AsmPrinter() override;
+
+  DwarfDebug *getDwarfDebug() { return DD; }
+  DwarfDebug *getDwarfDebug() const { return DD; }
+
+  uint16_t getDwarfVersion() const;
+  void setDwarfVersion(uint16_t Version);
+
+  bool isPositionIndependent() const;
+
+  /// Return true if assembly output should contain comments.
+  bool isVerbose() const { return VerboseAsm; }
+
+  /// Return a unique ID for the current function.
+  unsigned getFunctionNumber() const;
+
+  MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
+  MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }
+  MCSymbol *getCurExceptionSym();
+
+  /// Return information about object file lowering.
+  const TargetLoweringObjectFile &getObjFileLowering() const;
+
+  /// Return information about data layout.
+  const DataLayout &getDataLayout() const;
+
+  /// Return the pointer size from the TargetMachine
+  unsigned getPointerSize() const;
+
+  /// Return information about subtarget.
+  const MCSubtargetInfo &getSubtargetInfo() const;
+
+  void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
+
+  /// Return the current section we are emitting to.
+  const MCSection *getCurrentSection() const;
+
+  void getNameWithPrefix(SmallVectorImpl<char> &Name,
+                         const GlobalValue *GV) const;
+
+  MCSymbol *getSymbol(const GlobalValue *GV) const;
+
+  //===------------------------------------------------------------------===//
+  // XRay instrumentation implementation.
+  //===------------------------------------------------------------------===//
+public:
+  // This describes the kind of sled we're storing in the XRay table.
+  enum class SledKind : uint8_t {
+    FUNCTION_ENTER = 0,
+    FUNCTION_EXIT = 1,
+    TAIL_CALL = 2,
+    LOG_ARGS_ENTER = 3,
+    CUSTOM_EVENT = 4,
+  };
+
+  // The table will contain these structs that point to the sled, the function
+  // containing the sled, and what kind of sled (and whether they should always
+  // be instrumented). We also use a version identifier that the runtime can use
+  // to decide what to do with the sled, depending on the version of the sled.
+  struct XRayFunctionEntry {
+    const MCSymbol *Sled;
+    const MCSymbol *Function;
+    SledKind Kind;
+    bool AlwaysInstrument;
+    const class Function *Fn;
+    uint8_t Version;
+
+    void emit(int, MCStreamer *, const MCSymbol *) const;
+  };
+
+  // All the sleds to be emitted.
+  SmallVector<XRayFunctionEntry, 4> Sleds;
+
+  // A unique ID used for ELF sections associated with a particular function.
+  unsigned XRayFnUniqueID = 0;
+
+  // Helper function to record a given XRay sled.
+  void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind,
+                  uint8_t Version = 0);
+
+  /// Emit a table with all XRay instrumentation points.
+  void emitXRayTable();
+
+  //===------------------------------------------------------------------===//
+  // MachineFunctionPass Implementation.
+  //===------------------------------------------------------------------===//
+
+  /// Record analysis usage.
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// Set up the AsmPrinter when we are working on a new module. If your pass
+  /// overrides this, it must make sure to explicitly call this implementation.
+  bool doInitialization(Module &M) override;
+
+  /// Shut down the asmprinter. If you override this in your pass, you must make
+  /// sure to call it explicitly.
+  bool doFinalization(Module &M) override;
+
+  /// Emit the specified function out to the OutStreamer.
+  bool runOnMachineFunction(MachineFunction &MF) override {
+    SetupMachineFunction(MF);
+    EmitFunctionBody();
+    return false;
+  }
+
+  //===------------------------------------------------------------------===//
+  // Coarse grained IR lowering routines.
+  //===------------------------------------------------------------------===//
+
+  /// This should be called when a new MachineFunction is being processed from
+  /// runOnMachineFunction.
+  void SetupMachineFunction(MachineFunction &MF);
+
+  /// This method emits the body and trailer for a function.
+  void EmitFunctionBody();
+
+  void emitCFIInstruction(const MachineInstr &MI);
+
+  void emitFrameAlloc(const MachineInstr &MI);
+
+  void emitStackSizeSection(const MachineFunction &MF);
+
+  enum CFIMoveType { CFI_M_None, CFI_M_EH, CFI_M_Debug };
+  CFIMoveType needsCFIMoves() const;
+
+  /// Returns false if needsCFIMoves() == CFI_M_EH for any function
+  /// in the module.
+  bool needsOnlyDebugCFIMoves() const { return isCFIMoveForDebugging; }
+
+  bool needsSEHMoves();
+
+  /// Print to the current output stream assembly representations of the
+  /// constants in the constant pool MCP. This is used to print out constants
+  /// which have been "spilled to memory" by the code generator.
+  virtual void EmitConstantPool();
+
+  /// Print assembly representations of the jump tables used by the current
+  /// function to the current output stream.
+  virtual void EmitJumpTableInfo();
+
+  /// Emit the specified global variable to the .s file.
+  virtual void EmitGlobalVariable(const GlobalVariable *GV);
+
+  /// Check to see if the specified global is a special global used by LLVM. If
+  /// so, emit it and return true, otherwise do nothing and return false.
+  bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
+
+  /// Emit an alignment directive to the specified power of two boundary. For
+  /// example, if you pass in 3 here, you will get an 8 byte alignment. If a
+  /// global value is specified, and if that global has an explicit alignment
+  /// requested, it will override the alignment request if required for
+  /// correctness.
+  void EmitAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
+
+  /// Lower the specified LLVM Constant to an MCExpr.
+  virtual const MCExpr *lowerConstant(const Constant *CV);
+
+  /// \brief Print a general LLVM constant to the .s file.
+  void EmitGlobalConstant(const DataLayout &DL, const Constant *CV);
+
+  /// \brief Unnamed constant global variables solely contaning a pointer to
+  /// another globals variable act like a global variable "proxy", or GOT
+  /// equivalents, i.e., it's only used to hold the address of the latter. One
+  /// optimization is to replace accesses to these proxies by using the GOT
+  /// entry for the final global instead. Hence, we select GOT equivalent
+  /// candidates among all the module global variables, avoid emitting them
+  /// unnecessarily and finally replace references to them by pc relative
+  /// accesses to GOT entries.
+  void computeGlobalGOTEquivs(Module &M);
+
+  /// \brief Constant expressions using GOT equivalent globals may not be
+  /// eligible for PC relative GOT entry conversion, in such cases we need to
+  /// emit the proxies we previously omitted in EmitGlobalVariable.
+  void emitGlobalGOTEquivs();
+
+  //===------------------------------------------------------------------===//
+  // Overridable Hooks
+  //===------------------------------------------------------------------===//
+
+  // Targets can, or in the case of EmitInstruction, must implement these to
+  // customize output.
+
+  /// This virtual method can be overridden by targets that want to emit
+  /// something at the start of their file.
+  virtual void EmitStartOfAsmFile(Module &) {}
+
+  /// This virtual method can be overridden by targets that want to emit
+  /// something at the end of their file.
+  virtual void EmitEndOfAsmFile(Module &) {}
+
+  /// Targets can override this to emit stuff before the first basic block in
+  /// the function.
+  virtual void EmitFunctionBodyStart() {}
+
+  /// Targets can override this to emit stuff after the last basic block in the
+  /// function.
+  virtual void EmitFunctionBodyEnd() {}
+
+  /// Targets can override this to emit stuff at the start of a basic block.
+  /// By default, this method prints the label for the specified
+  /// MachineBasicBlock, an alignment (if present) and a comment describing it
+  /// if appropriate.
+  virtual void EmitBasicBlockStart(const MachineBasicBlock &MBB) const;
+
+  /// Targets can override this to emit stuff at the end of a basic block.
+  virtual void EmitBasicBlockEnd(const MachineBasicBlock &MBB);
+
+  /// Targets should implement this to emit instructions.
+  virtual void EmitInstruction(const MachineInstr *) {
+    llvm_unreachable("EmitInstruction not implemented");
+  }
+
+  /// Return the symbol for the specified constant pool entry.
+  virtual MCSymbol *GetCPISymbol(unsigned CPID) const;
+
+  virtual void EmitFunctionEntryLabel();
+
+  virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+
+  /// Targets can override this to change how global constants that are part of
+  /// a C++ static/global constructor list are emitted.
+  virtual void EmitXXStructor(const DataLayout &DL, const Constant *CV) {
+    EmitGlobalConstant(DL, CV);
+  }
+
+  /// Return true if the basic block has exactly one predecessor and the control
+  /// transfer mechanism between the predecessor and this block is a
+  /// fall-through.
+  virtual bool
+  isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+  /// Targets can override this to customize the output of IMPLICIT_DEF
+  /// instructions in verbose mode.
+  virtual void emitImplicitDef(const MachineInstr *MI) const;
+
+  //===------------------------------------------------------------------===//
+  // Symbol Lowering Routines.
+  //===------------------------------------------------------------------===//
+
+  MCSymbol *createTempSymbol(const Twine &Name) const;
+
+  /// Return the MCSymbol for a private symbol with global value name as its
+  /// base, with the specified suffix.
+  MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
+                                         StringRef Suffix) const;
+
+  /// Return the MCSymbol for the specified ExternalSymbol.
+  MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
+
+  /// Return the symbol for the specified jump table entry.
+  MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
+
+  /// Return the symbol for the specified jump table .set
+  /// FIXME: privatize to AsmPrinter.
+  MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
+
+  /// Return the MCSymbol used to satisfy BlockAddress uses of the specified
+  /// basic block.
+  MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
+  MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
+
+  //===------------------------------------------------------------------===//
+  // Emission Helper Routines.
+  //===------------------------------------------------------------------===//
+
+  /// This is just convenient handler for printing offsets.
+  void printOffset(int64_t Offset, raw_ostream &OS) const;
+
+  /// Emit a byte directive and value.
+  void emitInt8(int Value) const;
+
+  /// Emit a short directive and value.
+  void emitInt16(int Value) const;
+
+  /// Emit a long directive and value.
+  void emitInt32(int Value) const;
+
+  /// Emit something like ".long Hi-Lo" where the size in bytes of the directive
+  /// is specified by Size and Hi/Lo specify the labels.  This implicitly uses
+  /// .set if it is available.
+  void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
+                           unsigned Size) const;
+
+  /// Emit something like ".uleb128 Hi-Lo".
+  void EmitLabelDifferenceAsULEB128(const MCSymbol *Hi,
+                                    const MCSymbol *Lo) const;
+
+  /// Emit something like ".long Label+Offset" where the size in bytes of the
+  /// directive is specified by Size and Label specifies the label.  This
+  /// implicitly uses .set if it is available.
+  void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
+                           unsigned Size, bool IsSectionRelative = false) const;
+
+  /// Emit something like ".long Label" where the size in bytes of the directive
+  /// is specified by Size and Label specifies the label.
+  void EmitLabelReference(const MCSymbol *Label, unsigned Size,
+                          bool IsSectionRelative = false) const {
+    EmitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
+  }
+
+  /// Emit something like ".long Label + Offset".
+  void EmitDwarfOffset(const MCSymbol *Label, uint64_t Offset) const;
+
+  //===------------------------------------------------------------------===//
+  // Dwarf Emission Helper Routines
+  //===------------------------------------------------------------------===//
+
+  /// Emit the specified signed leb128 value.
+  void EmitSLEB128(int64_t Value, const char *Desc = nullptr) const;
+
+  /// Emit the specified unsigned leb128 value.
+  void EmitULEB128(uint64_t Value, const char *Desc = nullptr) const;
+
+  /// Emit a .byte 42 directive that corresponds to an encoding.  If verbose
+  /// assembly output is enabled, we output comments describing the encoding.
+  /// Desc is a string saying what the encoding is specifying (e.g. "LSDA").
+  void EmitEncodingByte(unsigned Val, const char *Desc = nullptr) const;
+
+  /// Return the size of the encoding in bytes.
+  unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
+
+  /// Emit reference to a ttype global with a specified encoding.
+  void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
+
+  /// Emit a reference to a symbol for use in dwarf. Different object formats
+  /// represent this in different ways. Some use a relocation others encode
+  /// the label offset in its section.
+  void emitDwarfSymbolReference(const MCSymbol *Label,
+                                bool ForceOffset = false) const;
+
+  /// Emit the 4-byte offset of a string from the start of its section.
+  ///
+  /// When possible, emit a DwarfStringPool section offset without any
+  /// relocations, and without using the symbol.  Otherwise, defers to \a
+  /// emitDwarfSymbolReference().
+  void emitDwarfStringOffset(DwarfStringPoolEntry S) const;
+
+  /// Emit the 4-byte offset of a string from the start of its section.
+  void emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
+    emitDwarfStringOffset(S.getEntry());
+  }
+
+  /// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
+  virtual unsigned getISAEncoding() { return 0; }
+
+  /// Emit the directive and value for debug thread local expression
+  ///
+  /// \p Value - The value to emit.
+  /// \p Size - The size of the integer (in bytes) to emit.
+  virtual void EmitDebugThreadLocal(const MCExpr *Value, unsigned Size) const;
+
+  //===------------------------------------------------------------------===//
+  // Dwarf Lowering Routines
+  //===------------------------------------------------------------------===//
+
+  /// \brief Emit frame instruction to describe the layout of the frame.
+  void emitCFIInstruction(const MCCFIInstruction &Inst) const;
+
+  /// \brief Emit Dwarf abbreviation table.
+  template <typename T> void emitDwarfAbbrevs(const T &Abbrevs) const {
+    // For each abbreviation.
+    for (const auto &Abbrev : Abbrevs)
+      emitDwarfAbbrev(*Abbrev);
+
+    // Mark end of abbreviations.
+    EmitULEB128(0, "EOM(3)");
+  }
+
+  void emitDwarfAbbrev(const DIEAbbrev &Abbrev) const;
+
+  /// \brief Recursively emit Dwarf DIE tree.
+  void emitDwarfDIE(const DIE &Die) const;
+
+  //===------------------------------------------------------------------===//
+  // Inline Asm Support
+  //===------------------------------------------------------------------===//
+
+  // These are hooks that targets can override to implement inline asm
+  // support.  These should probably be moved out of AsmPrinter someday.
+
+  /// Print information related to the specified machine instr that is
+  /// independent of the operand, and may be independent of the instr itself.
+  /// This can be useful for portably encoding the comment character or other
+  /// bits of target-specific knowledge into the asmstrings.  The syntax used is
+  /// ${:comment}.  Targets can override this to add support for their own
+  /// strange codes.
+  virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
+                            const char *Code) const;
+
+  /// Print the specified operand of MI, an INLINEASM instruction, using the
+  /// specified assembler variant.  Targets should override this to format as
+  /// appropriate.  This method can return true if the operand is erroneous.
+  virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+                               unsigned AsmVariant, const char *ExtraCode,
+                               raw_ostream &OS);
+
+  /// Print the specified operand of MI, an INLINEASM instruction, using the
+  /// specified assembler variant as an address. Targets should override this to
+  /// format as appropriate.  This method can return true if the operand is
+  /// erroneous.
+  virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+                                     unsigned AsmVariant, const char *ExtraCode,
+                                     raw_ostream &OS);
+
+  /// Let the target do anything it needs to do before emitting inlineasm.
+  /// \p StartInfo - the subtarget info before parsing inline asm
+  virtual void emitInlineAsmStart() const;
+
+  /// Let the target do anything it needs to do after emitting inlineasm.
+  /// This callback can be used restore the original mode in case the
+  /// inlineasm contains directives to switch modes.
+  /// \p StartInfo - the original subtarget info before inline asm
+  /// \p EndInfo   - the final subtarget info after parsing the inline asm,
+  ///                or NULL if the value is unknown.
+  virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+                                const MCSubtargetInfo *EndInfo) const;
+
+private:
+  /// Private state for PrintSpecial()
+  // Assign a unique ID to this machine instruction.
+  mutable const MachineInstr *LastMI = nullptr;
+  mutable unsigned LastFn = 0;
+  mutable unsigned Counter = ~0U;
+
+  /// This method emits the header for the current function.
+  virtual void EmitFunctionHeader();
+
+  /// Emit a blob of inline asm to the output streamer.
+  void
+  EmitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
+                const MCTargetOptions &MCOptions,
+                const MDNode *LocMDNode = nullptr,
+                InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
+
+  /// This method formats and emits the specified machine instruction that is an
+  /// inline asm.
+  void EmitInlineAsm(const MachineInstr *MI) const;
+
+  //===------------------------------------------------------------------===//
+  // Internal Implementation Details
+  //===------------------------------------------------------------------===//
+
+  /// This emits visibility information about symbol, if this is supported by
+  /// the target.
+  void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
+                      bool IsDefinition = true) const;
+
+  void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
+
+  void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
+                          const MachineBasicBlock *MBB, unsigned uid) const;
+  void EmitLLVMUsedList(const ConstantArray *InitList);
+  /// Emit llvm.ident metadata in an '.ident' directive.
+  void EmitModuleIdents(Module &M);
+  void EmitXXStructorList(const DataLayout &DL, const Constant *List,
+                          bool isCtor);
+
+  GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
+  /// Emit GlobalAlias or GlobalIFunc.
+  void emitGlobalIndirectSymbol(Module &M,
+                                const GlobalIndirectSymbol& GIS);
+  void setupCodePaddingContext(const MachineBasicBlock &MBB,
+                               MCCodePaddingContext &Context) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ASMPRINTER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h b/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
new file mode 100644
index 0000000..1f9c96b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -0,0 +1,65 @@
+//===- AtomicExpandUtils.h - Utilities for expanding atomic instructions --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ATOMICEXPANDUTILS_H
+#define LLVM_CODEGEN_ATOMICEXPANDUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/AtomicOrdering.h"
+
+namespace llvm {
+
+class AtomicRMWInst;
+class Value;
+
+/// Parameters (see the expansion example below):
+/// (the builder, %addr, %loaded, %new_val, ordering,
+///  /* OUT */ %success, /* OUT */ %new_loaded)
+using CreateCmpXchgInstFun =
+    function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
+                      Value *&, Value *&)>;
+
+/// \brief Expand an atomic RMW instruction into a loop utilizing
+/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
+/// instructions in the first place and that there isn't another, better,
+/// transformation available (for example AArch32/AArch64 have linked loads).
+///
+/// This is useful in passes which can't rewrite the more exotic RMW
+/// instructions directly into a platform specific intrinsics (because, say,
+/// those intrinsics don't exist). If such a pass is able to expand cmpxchg
+/// instructions directly however, then, with this function, it could avoid two
+/// extra module passes (avoiding passes by `-atomic-expand` and itself). A
+/// specific example would be PNaCl's `RewriteAtomics` pass.
+///
+/// Given: atomicrmw some_op iN* %addr, iN %incr ordering
+///
+/// The standard expansion we produce is:
+///     [...]
+///     %init_loaded = load atomic iN* %addr
+///     br label %loop
+/// loop:
+///     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
+///     %new = some_op iN %loaded, %incr
+/// ; This is what -atomic-expand will produce using this function on i686
+/// targets:
+///     %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
+///     %new_loaded = extractvalue { iN, i1 } %pair, 0
+///     %success = extractvalue { iN, i1 } %pair, 1
+/// ; End callback produced IR
+///     br i1 %success, label %atomicrmw.end, label %loop
+/// atomicrmw.end:
+///     [...]
+///
+/// Returns true if the containing function was modified.
+bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ATOMICEXPANDUTILS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
new file mode 100644
index 0000000..9096263
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
@@ -0,0 +1,1383 @@
+//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides a helper that implements much of the TTI interface in
+/// terms of the target-independent code generator and TargetLowering
+/// interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
+#define LLVM_CODEGEN_BASICTTIIMPL_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/TargetTransformInfoImpl.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+class Function;
+class GlobalValue;
+class LLVMContext;
+class ScalarEvolution;
+class SCEV;
+class TargetMachine;
+
+extern cl::opt<unsigned> PartialUnrollingThreshold;
+
+/// \brief Base class which can be used to help build a TTI implementation.
+///
+/// This class provides as much implementation of the TTI interface as is
+/// possible using the target independent parts of the code generator.
+///
+/// In order to subclass it, your class must implement a getST() method to
+/// return the subtarget, and a getTLI() method to return the target lowering.
+/// We need these methods implemented in the derived class so that this class
+/// doesn't have to duplicate storage for them.
+template <typename T>
+class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
+private:
+  using BaseT = TargetTransformInfoImplCRTPBase<T>;
+  using TTI = TargetTransformInfo;
+
+  /// Estimate a cost of shuffle as a sequence of extract and insert
+  /// operations.
+  unsigned getPermuteShuffleOverhead(Type *Ty) {
+    assert(Ty->isVectorTy() && "Can only shuffle vectors");
+    unsigned Cost = 0;
+    // Shuffle cost is equal to the cost of extracting element from its argument
+    // plus the cost of inserting them onto the result vector.
+
+    // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
+    // index 0 of first vector, index 1 of second vector,index 2 of first
+    // vector and finally index 3 of second vector and insert them at index
+    // <0,1,2,3> of result vector.
+    for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+      Cost += static_cast<T *>(this)
+                  ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+      Cost += static_cast<T *>(this)
+                  ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+    }
+    return Cost;
+  }
+
+  /// \brief Local query method delegates up to T which *must* implement this!
+  const TargetSubtargetInfo *getST() const {
+    return static_cast<const T *>(this)->getST();
+  }
+
+  /// \brief Local query method delegates up to T which *must* implement this!
+  const TargetLoweringBase *getTLI() const {
+    return static_cast<const T *>(this)->getTLI();
+  }
+
+  static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
+    switch (M) {
+      case TTI::MIM_Unindexed:
+        return ISD::UNINDEXED;
+      case TTI::MIM_PreInc:
+        return ISD::PRE_INC;
+      case TTI::MIM_PreDec:
+        return ISD::PRE_DEC;
+      case TTI::MIM_PostInc:
+        return ISD::POST_INC;
+      case TTI::MIM_PostDec:
+        return ISD::POST_DEC;
+    }
+    llvm_unreachable("Unexpected MemIndexedMode");
+  }
+
+protected:
+  explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
+      : BaseT(DL) {}
+
+  using TargetTransformInfoImplBase::DL;
+
+public:
+  /// \name Scalar TTI Implementations
+  /// @{
+  bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                      unsigned BitWidth, unsigned AddressSpace,
+                                      unsigned Alignment, bool *Fast) const {
+    EVT E = EVT::getIntegerVT(Context, BitWidth);
+    return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
+  }
+
+  bool hasBranchDivergence() { return false; }
+
+  bool isSourceOfDivergence(const Value *V) { return false; }
+
+  bool isAlwaysUniform(const Value *V) { return false; }
+
+  unsigned getFlatAddressSpace() {
+    // Return an invalid address space.
+    return -1;
+  }
+
+  bool isLegalAddImmediate(int64_t imm) {
+    return getTLI()->isLegalAddImmediate(imm);
+  }
+
+  bool isLegalICmpImmediate(int64_t imm) {
+    return getTLI()->isLegalICmpImmediate(imm);
+  }
+
+  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                             bool HasBaseReg, int64_t Scale,
+                             unsigned AddrSpace, Instruction *I = nullptr) {
+    TargetLoweringBase::AddrMode AM;
+    AM.BaseGV = BaseGV;
+    AM.BaseOffs = BaseOffset;
+    AM.HasBaseReg = HasBaseReg;
+    AM.Scale = Scale;
+    return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
+  }
+
+  bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
+                          const DataLayout &DL) const {
+    EVT VT = getTLI()->getValueType(DL, Ty);
+    return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
+  }
+
+  bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
+                           const DataLayout &DL) const {
+    EVT VT = getTLI()->getValueType(DL, Ty);
+    return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
+  }
+
+  bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
+    return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
+  }
+
+  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                           bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
+    TargetLoweringBase::AddrMode AM;
+    AM.BaseGV = BaseGV;
+    AM.BaseOffs = BaseOffset;
+    AM.HasBaseReg = HasBaseReg;
+    AM.Scale = Scale;
+    return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
+  }
+
+  bool isTruncateFree(Type *Ty1, Type *Ty2) {
+    return getTLI()->isTruncateFree(Ty1, Ty2);
+  }
+
+  bool isProfitableToHoist(Instruction *I) {
+    return getTLI()->isProfitableToHoist(I);
+  }
+
+  bool useAA() const { return getST()->useAA(); }
+
+  bool isTypeLegal(Type *Ty) {
+    EVT VT = getTLI()->getValueType(DL, Ty);
+    return getTLI()->isTypeLegal(VT);
+  }
+
+  int getGEPCost(Type *PointeeType, const Value *Ptr,
+                 ArrayRef<const Value *> Operands) {
+    return BaseT::getGEPCost(PointeeType, Ptr, Operands);
+  }
+
+  int getExtCost(const Instruction *I, const Value *Src) {
+    if (getTLI()->isExtFree(I))
+      return TargetTransformInfo::TCC_Free;
+
+    if (isa<ZExtInst>(I) || isa<SExtInst>(I))
+      if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
+        if (getTLI()->isExtLoad(LI, I, DL))
+          return TargetTransformInfo::TCC_Free;
+
+    return TargetTransformInfo::TCC_Basic;
+  }
+
+  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<const Value *> Arguments) {
+    return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
+  }
+
+  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<Type *> ParamTys) {
+    if (IID == Intrinsic::cttz) {
+      if (getTLI()->isCheapToSpeculateCttz())
+        return TargetTransformInfo::TCC_Basic;
+      return TargetTransformInfo::TCC_Expensive;
+    }
+
+    if (IID == Intrinsic::ctlz) {
+      if (getTLI()->isCheapToSpeculateCtlz())
+        return TargetTransformInfo::TCC_Basic;
+      return TargetTransformInfo::TCC_Expensive;
+    }
+
+    return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
+  }
+
+  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+                                            unsigned &JumpTableSize) {
+    /// Try to find the estimated number of clusters. Note that the number of
+    /// clusters identified in this function could be different from the actural
+    /// numbers found in lowering. This function ignore switches that are
+    /// lowered with a mix of jump table / bit test / BTree. This function was
+    /// initially intended to be used when estimating the cost of switch in
+    /// inline cost heuristic, but it's a generic cost model to be used in other
+    /// places (e.g., in loop unrolling).
+    unsigned N = SI.getNumCases();
+    const TargetLoweringBase *TLI = getTLI();
+    const DataLayout &DL = this->getDataLayout();
+
+    JumpTableSize = 0;
+    bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
+
+    // Early exit if both a jump table and bit test are not allowed.
+    if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
+      return N;
+
+    APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
+    APInt MinCaseVal = MaxCaseVal;
+    for (auto CI : SI.cases()) {
+      const APInt &CaseVal = CI.getCaseValue()->getValue();
+      if (CaseVal.sgt(MaxCaseVal))
+        MaxCaseVal = CaseVal;
+      if (CaseVal.slt(MinCaseVal))
+        MinCaseVal = CaseVal;
+    }
+
+    // Check if suitable for a bit test
+    if (N <= DL.getIndexSizeInBits(0u)) {
+      SmallPtrSet<const BasicBlock *, 4> Dests;
+      for (auto I : SI.cases())
+        Dests.insert(I.getCaseSuccessor());
+
+      if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
+                                     DL))
+        return 1;
+    }
+
+    // Check if suitable for a jump table.
+    if (IsJTAllowed) {
+      if (N < 2 || N < TLI->getMinimumJumpTableEntries())
+        return N;
+      uint64_t Range =
+          (MaxCaseVal - MinCaseVal)
+              .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
+      // Check whether a range of clusters is dense enough for a jump table
+      if (TLI->isSuitableForJumpTable(&SI, N, Range)) {
+        JumpTableSize = Range;
+        return 1;
+      }
+    }
+    return N;
+  }
+
+  unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
+
+  unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
+
+  bool shouldBuildLookupTables() {
+    const TargetLoweringBase *TLI = getTLI();
+    return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+           TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
+  }
+
+  bool haveFastSqrt(Type *Ty) {
+    const TargetLoweringBase *TLI = getTLI();
+    EVT VT = TLI->getValueType(DL, Ty);
+    return TLI->isTypeLegal(VT) &&
+           TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
+  }
+
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
+    return true;
+  }
+
+  unsigned getFPOpCost(Type *Ty) {
+    // Check whether FADD is available, as a proxy for floating-point in
+    // general.
+    const TargetLoweringBase *TLI = getTLI();
+    EVT VT = TLI->getValueType(DL, Ty);
+    if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
+      return TargetTransformInfo::TCC_Basic;
+    return TargetTransformInfo::TCC_Expensive;
+  }
+
+  unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
+    const TargetLoweringBase *TLI = getTLI();
+    switch (Opcode) {
+    default: break;
+    case Instruction::Trunc:
+      if (TLI->isTruncateFree(OpTy, Ty))
+        return TargetTransformInfo::TCC_Free;
+      return TargetTransformInfo::TCC_Basic;
+    case Instruction::ZExt:
+      if (TLI->isZExtFree(OpTy, Ty))
+        return TargetTransformInfo::TCC_Free;
+      return TargetTransformInfo::TCC_Basic;
+    }
+
+    return BaseT::getOperationCost(Opcode, Ty, OpTy);
+  }
+
+  unsigned getInliningThresholdMultiplier() { return 1; }
+
+  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+                               TTI::UnrollingPreferences &UP) {
+    // This unrolling functionality is target independent, but to provide some
+    // motivation for its intended use, for x86:
+
+    // According to the Intel 64 and IA-32 Architectures Optimization Reference
+    // Manual, Intel Core models and later have a loop stream detector (and
+    // associated uop queue) that can benefit from partial unrolling.
+    // The relevant requirements are:
+    //  - The loop must have no more than 4 (8 for Nehalem and later) branches
+    //    taken, and none of them may be calls.
+    //  - The loop can have no more than 18 (28 for Nehalem and later) uops.
+
+    // According to the Software Optimization Guide for AMD Family 15h
+    // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
+    // and loop buffer which can benefit from partial unrolling.
+    // The relevant requirements are:
+    //  - The loop must have fewer than 16 branches
+    //  - The loop must have less than 40 uops in all executed loop branches
+
+    // The number of taken branches in a loop is hard to estimate here, and
+    // benchmarking has revealed that it is better not to be conservative when
+    // estimating the branch count. As a result, we'll ignore the branch limits
+    // until someone finds a case where it matters in practice.
+
+    unsigned MaxOps;
+    const TargetSubtargetInfo *ST = getST();
+    if (PartialUnrollingThreshold.getNumOccurrences() > 0)
+      MaxOps = PartialUnrollingThreshold;
+    else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
+      MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
+    else
+      return;
+
+    // Scan the loop: don't unroll loops with calls.
+    for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
+         ++I) {
+      BasicBlock *BB = *I;
+
+      for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
+        if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
+          ImmutableCallSite CS(&*J);
+          if (const Function *F = CS.getCalledFunction()) {
+            if (!static_cast<T *>(this)->isLoweredToCall(F))
+              continue;
+          }
+
+          return;
+        }
+    }
+
+    // Enable runtime and partial unrolling up to the specified size.
+    // Enable using trip count upper bound to unroll loops.
+    UP.Partial = UP.Runtime = UP.UpperBound = true;
+    UP.PartialThreshold = MaxOps;
+
+    // Avoid unrolling when optimizing for size.
+    UP.OptSizeThreshold = 0;
+    UP.PartialOptSizeThreshold = 0;
+
+    // Set number of instructions optimized when "back edge"
+    // becomes "fall through" to default value of 2.
+    UP.BEInsns = 2;
+  }
+
+  int getInstructionLatency(const Instruction *I) {
+    if (isa<LoadInst>(I))
+      return getST()->getSchedModel().DefaultLoadLatency;
+
+    return BaseT::getInstructionLatency(I);
+  }
+
+  /// @}
+
+  /// \name Vector TTI Implementations
+  /// @{
+
+  unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
+
+  unsigned getRegisterBitWidth(bool Vector) const { return 32; }
+
+  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+  /// are set if the result needs to be inserted and/or extracted from vectors.
+  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
+    assert(Ty->isVectorTy() && "Can only scalarize vectors");
+    unsigned Cost = 0;
+
+    for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+      if (Insert)
+        Cost += static_cast<T *>(this)
+                    ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+      if (Extract)
+        Cost += static_cast<T *>(this)
+                    ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+    }
+
+    return Cost;
+  }
+
+  /// Estimate the overhead of scalarizing an instructions unique
+  /// non-constant operands. The types of the arguments are ordinarily
+  /// scalar, in which case the costs are multiplied with VF.
+  unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
+                                            unsigned VF) {
+    unsigned Cost = 0;
+    SmallPtrSet<const Value*, 4> UniqueOperands;
+    for (const Value *A : Args) {
+      if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
+        Type *VecTy = nullptr;
+        if (A->getType()->isVectorTy()) {
+          VecTy = A->getType();
+          // If A is a vector operand, VF should be 1 or correspond to A.
+          assert((VF == 1 || VF == VecTy->getVectorNumElements()) &&
+                 "Vector argument does not match VF");
+        }
+        else
+          VecTy = VectorType::get(A->getType(), VF);
+
+        Cost += getScalarizationOverhead(VecTy, false, true);
+      }
+    }
+
+    return Cost;
+  }
+
+  unsigned getScalarizationOverhead(Type *VecTy, ArrayRef<const Value *> Args) {
+    assert(VecTy->isVectorTy());
+
+    unsigned Cost = 0;
+
+    Cost += getScalarizationOverhead(VecTy, true, false);
+    if (!Args.empty())
+      Cost += getOperandsScalarizationOverhead(Args,
+                                               VecTy->getVectorNumElements());
+    else
+      // When no information on arguments is provided, we add the cost
+      // associated with one argument as a heuristic.
+      Cost += getScalarizationOverhead(VecTy, false, true);
+
+    return Cost;
+  }
+
+  unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
+
+  unsigned getArithmeticInstrCost(
+      unsigned Opcode, Type *Ty,
+      TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+      TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+      TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+      TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
+      ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
+    // Check if any of the operands are vector operands.
+    const TargetLoweringBase *TLI = getTLI();
+    int ISD = TLI->InstructionOpcodeToISD(Opcode);
+    assert(ISD && "Invalid opcode");
+
+    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
+
+    bool IsFloat = Ty->isFPOrFPVectorTy();
+    // Assume that floating point arithmetic operations cost twice as much as
+    // integer operations.
+    unsigned OpCost = (IsFloat ? 2 : 1);
+
+    if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
+      // The operation is legal. Assume it costs 1.
+      // TODO: Once we have extract/insert subvector cost we need to use them.
+      return LT.first * OpCost;
+    }
+
+    if (!TLI->isOperationExpand(ISD, LT.second)) {
+      // If the operation is custom lowered, then assume that the code is twice
+      // as expensive.
+      return LT.first * 2 * OpCost;
+    }
+
+    // Else, assume that we need to scalarize this op.
+    // TODO: If one of the types get legalized by splitting, handle this
+    // similarly to what getCastInstrCost() does.
+    if (Ty->isVectorTy()) {
+      unsigned Num = Ty->getVectorNumElements();
+      unsigned Cost = static_cast<T *>(this)
+                          ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
+      // Return the cost of multiple scalar invocation plus the cost of
+      // inserting and extracting the values.
+      return getScalarizationOverhead(Ty, Args) + Num * Cost;
+    }
+
+    // We don't know anything about this scalar instruction.
+    return OpCost;
+  }
+
+  unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+                          Type *SubTp) {
+    if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
+        Kind == TTI::SK_PermuteSingleSrc) {
+      return getPermuteShuffleOverhead(Tp);
+    }
+    return 1;
+  }
+
+  unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+                            const Instruction *I = nullptr) {
+    const TargetLoweringBase *TLI = getTLI();
+    int ISD = TLI->InstructionOpcodeToISD(Opcode);
+    assert(ISD && "Invalid opcode");
+    std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
+    std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
+
+    // Check for NOOP conversions.
+    if (SrcLT.first == DstLT.first &&
+        SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+      // Bitcast between types that are legalized to the same type are free.
+      if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
+        return 0;
+    }
+
+    if (Opcode == Instruction::Trunc &&
+        TLI->isTruncateFree(SrcLT.second, DstLT.second))
+      return 0;
+
+    if (Opcode == Instruction::ZExt &&
+        TLI->isZExtFree(SrcLT.second, DstLT.second))
+      return 0;
+
+    if (Opcode == Instruction::AddrSpaceCast &&
+        TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
+                                 Dst->getPointerAddressSpace()))
+      return 0;
+
+    // If this is a zext/sext of a load, return 0 if the corresponding
+    // extending load exists on target.
+    if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
+        I && isa<LoadInst>(I->getOperand(0))) {
+        EVT ExtVT = EVT::getEVT(Dst);
+        EVT LoadVT = EVT::getEVT(Src);
+        unsigned LType =
+          ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
+        if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
+          return 0;
+    }
+
+    // If the cast is marked as legal (or promote) then assume low cost.
+    if (SrcLT.first == DstLT.first &&
+        TLI->isOperationLegalOrPromote(ISD, DstLT.second))
+      return 1;
+
+    // Handle scalar conversions.
+    if (!Src->isVectorTy() && !Dst->isVectorTy()) {
+      // Scalar bitcasts are usually free.
+      if (Opcode == Instruction::BitCast)
+        return 0;
+
+      // Just check the op cost. If the operation is legal then assume it costs
+      // 1.
+      if (!TLI->isOperationExpand(ISD, DstLT.second))
+        return 1;
+
+      // Assume that illegal scalar instruction are expensive.
+      return 4;
+    }
+
+    // Check vector-to-vector casts.
+    if (Dst->isVectorTy() && Src->isVectorTy()) {
+      // If the cast is between same-sized registers, then the check is simple.
+      if (SrcLT.first == DstLT.first &&
+          SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+        // Assume that Zext is done using AND.
+        if (Opcode == Instruction::ZExt)
+          return 1;
+
+        // Assume that sext is done using SHL and SRA.
+        if (Opcode == Instruction::SExt)
+          return 2;
+
+        // Just check the op cost. If the operation is legal then assume it
+        // costs
+        // 1 and multiply by the type-legalization overhead.
+        if (!TLI->isOperationExpand(ISD, DstLT.second))
+          return SrcLT.first * 1;
+      }
+
+      // If we are legalizing by splitting, query the concrete TTI for the cost
+      // of casting the original vector twice. We also need to factor in the
+      // cost of the split itself. Count that as 1, to be consistent with
+      // TLI->getTypeLegalizationCost().
+      if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
+           TargetLowering::TypeSplitVector) ||
+          (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
+           TargetLowering::TypeSplitVector)) {
+        Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
+                                         Dst->getVectorNumElements() / 2);
+        Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
+                                         Src->getVectorNumElements() / 2);
+        T *TTI = static_cast<T *>(this);
+        return TTI->getVectorSplitCost() +
+               (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc, I));
+      }
+
+      // In other cases where the source or destination are illegal, assume
+      // the operation will get scalarized.
+      unsigned Num = Dst->getVectorNumElements();
+      unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
+          Opcode, Dst->getScalarType(), Src->getScalarType(), I);
+
+      // Return the cost of multiple scalar invocation plus the cost of
+      // inserting and extracting the values.
+      return getScalarizationOverhead(Dst, true, true) + Num * Cost;
+    }
+
+    // We already handled vector-to-vector and scalar-to-scalar conversions.
+    // This
+    // is where we handle bitcast between vectors and scalars. We need to assume
+    //  that the conversion is scalarized in one way or another.
+    if (Opcode == Instruction::BitCast)
+      // Illegal bitcasts are done by storing and loading from a stack slot.
+      return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
+                                : 0) +
+             (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
+                                : 0);
+
+    llvm_unreachable("Unhandled cast");
+  }
+
+  unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
+                                    VectorType *VecTy, unsigned Index) {
+    return static_cast<T *>(this)->getVectorInstrCost(
+               Instruction::ExtractElement, VecTy, Index) +
+           static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
+                                                    VecTy->getElementType());
+  }
+
+  unsigned getCFInstrCost(unsigned Opcode) {
+    // Branches are assumed to be predicted.
+    return 0;
+  }
+
+  unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
+                              const Instruction *I) {
+    const TargetLoweringBase *TLI = getTLI();
+    int ISD = TLI->InstructionOpcodeToISD(Opcode);
+    assert(ISD && "Invalid opcode");
+
+    // Selects on vectors are actually vector selects.
+    if (ISD == ISD::SELECT) {
+      assert(CondTy && "CondTy must exist");
+      if (CondTy->isVectorTy())
+        ISD = ISD::VSELECT;
+    }
+    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
+
+    if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
+        !TLI->isOperationExpand(ISD, LT.second)) {
+      // The operation is legal. Assume it costs 1. Multiply
+      // by the type-legalization overhead.
+      return LT.first * 1;
+    }
+
+    // Otherwise, assume that the cast is scalarized.
+    // TODO: If one of the types get legalized by splitting, handle this
+    // similarly to what getCastInstrCost() does.
+    if (ValTy->isVectorTy()) {
+      unsigned Num = ValTy->getVectorNumElements();
+      if (CondTy)
+        CondTy = CondTy->getScalarType();
+      unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
+          Opcode, ValTy->getScalarType(), CondTy, I);
+
+      // Return the cost of multiple scalar invocation plus the cost of
+      // inserting and extracting the values.
+      return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
+    }
+
+    // Unknown scalar opcode.
+    return 1;
+  }
+
+  unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
+    std::pair<unsigned, MVT> LT =
+        getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
+
+    return LT.first;
+  }
+
+  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                       unsigned AddressSpace, const Instruction *I = nullptr) {
+    assert(!Src->isVoidTy() && "Invalid type");
+    std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
+
+    // Assuming that all loads of legal types cost 1.
+    unsigned Cost = LT.first;
+
+    if (Src->isVectorTy() &&
+        Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
+      // This is a vector load that legalizes to a larger type than the vector
+      // itself. Unless the corresponding extending load or truncating store is
+      // legal, then this will scalarize.
+      TargetLowering::LegalizeAction LA = TargetLowering::Expand;
+      EVT MemVT = getTLI()->getValueType(DL, Src);
+      if (Opcode == Instruction::Store)
+        LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
+      else
+        LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
+
+      if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
+        // This is a vector load/store for some illegal type that is scalarized.
+        // We must account for the cost of building or decomposing the vector.
+        Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
+                                         Opcode == Instruction::Store);
+      }
+    }
+
+    return Cost;
+  }
+
+  unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+                                      unsigned Factor,
+                                      ArrayRef<unsigned> Indices,
+                                      unsigned Alignment,
+                                      unsigned AddressSpace) {
+    VectorType *VT = dyn_cast<VectorType>(VecTy);
+    assert(VT && "Expect a vector type for interleaved memory op");
+
+    unsigned NumElts = VT->getNumElements();
+    assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
+
+    unsigned NumSubElts = NumElts / Factor;
+    VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
+
+    // Firstly, the cost of load/store operation.
+    unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
+        Opcode, VecTy, Alignment, AddressSpace);
+
+    // Legalize the vector type, and get the legalized and unlegalized type
+    // sizes.
+    MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
+    unsigned VecTySize =
+        static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
+    unsigned VecTyLTSize = VecTyLT.getStoreSize();
+
+    // Return the ceiling of dividing A by B.
+    auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
+
+    // Scale the cost of the memory operation by the fraction of legalized
+    // instructions that will actually be used. We shouldn't account for the
+    // cost of dead instructions since they will be removed.
+    //
+    // E.g., An interleaved load of factor 8:
+    //       %vec = load <16 x i64>, <16 x i64>* %ptr
+    //       %v0 = shufflevector %vec, undef, <0, 8>
+    //
+    // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
+    // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
+    // type). The other loads are unused.
+    //
+    // We only scale the cost of loads since interleaved store groups aren't
+    // allowed to have gaps.
+    if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
+      // The number of loads of a legal type it will take to represent a load
+      // of the unlegalized vector type.
+      unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
+
+      // The number of elements of the unlegalized type that correspond to a
+      // single legal instruction.
+      unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
+
+      // Determine which legal instructions will be used.
+      BitVector UsedInsts(NumLegalInsts, false);
+      for (unsigned Index : Indices)
+        for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
+          UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
+
+      // Scale the cost of the load by the fraction of legal instructions that
+      // will be used.
+      Cost *= UsedInsts.count() / NumLegalInsts;
+    }
+
+    // Then plus the cost of interleave operation.
+    if (Opcode == Instruction::Load) {
+      // The interleave cost is similar to extract sub vectors' elements
+      // from the wide vector, and insert them into sub vectors.
+      //
+      // E.g. An interleaved load of factor 2 (with one member of index 0):
+      //      %vec = load <8 x i32>, <8 x i32>* %ptr
+      //      %v0 = shuffle %vec, undef, <0, 2, 4, 6>         ; Index 0
+      // The cost is estimated as extract elements at 0, 2, 4, 6 from the
+      // <8 x i32> vector and insert them into a <4 x i32> vector.
+
+      assert(Indices.size() <= Factor &&
+             "Interleaved memory op has too many members");
+
+      for (unsigned Index : Indices) {
+        assert(Index < Factor && "Invalid index for interleaved memory op");
+
+        // Extract elements from loaded vector for each sub vector.
+        for (unsigned i = 0; i < NumSubElts; i++)
+          Cost += static_cast<T *>(this)->getVectorInstrCost(
+              Instruction::ExtractElement, VT, Index + i * Factor);
+      }
+
+      unsigned InsSubCost = 0;
+      for (unsigned i = 0; i < NumSubElts; i++)
+        InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
+            Instruction::InsertElement, SubVT, i);
+
+      Cost += Indices.size() * InsSubCost;
+    } else {
+      // The interleave cost is extract all elements from sub vectors, and
+      // insert them into the wide vector.
+      //
+      // E.g. An interleaved store of factor 2:
+      //      %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
+      //      store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
+      // The cost is estimated as extract all elements from both <4 x i32>
+      // vectors and insert into the <8 x i32> vector.
+
+      unsigned ExtSubCost = 0;
+      for (unsigned i = 0; i < NumSubElts; i++)
+        ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
+            Instruction::ExtractElement, SubVT, i);
+      Cost += ExtSubCost * Factor;
+
+      for (unsigned i = 0; i < NumElts; i++)
+        Cost += static_cast<T *>(this)
+                    ->getVectorInstrCost(Instruction::InsertElement, VT, i);
+    }
+
+    return Cost;
+  }
+
+  /// Get intrinsic cost based on arguments.
+  unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+                                 ArrayRef<Value *> Args, FastMathFlags FMF,
+                                 unsigned VF = 1) {
+    unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
+    assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
+
+    switch (IID) {
+    default: {
+      // Assume that we need to scalarize this intrinsic.
+      SmallVector<Type *, 4> Types;
+      for (Value *Op : Args) {
+        Type *OpTy = Op->getType();
+        assert(VF == 1 || !OpTy->isVectorTy());
+        Types.push_back(VF == 1 ? OpTy : VectorType::get(OpTy, VF));
+      }
+
+      if (VF > 1 && !RetTy->isVoidTy())
+        RetTy = VectorType::get(RetTy, VF);
+
+      // Compute the scalarization overhead based on Args for a vector
+      // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
+      // CostModel will pass a vector RetTy and VF is 1.
+      unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
+      if (RetVF > 1 || VF > 1) {
+        ScalarizationCost = 0;
+        if (!RetTy->isVoidTy())
+          ScalarizationCost += getScalarizationOverhead(RetTy, true, false);
+        ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
+      }
+
+      return static_cast<T *>(this)->
+        getIntrinsicInstrCost(IID, RetTy, Types, FMF, ScalarizationCost);
+    }
+    case Intrinsic::masked_scatter: {
+      assert(VF == 1 && "Can't vectorize types here.");
+      Value *Mask = Args[3];
+      bool VarMask = !isa<Constant>(Mask);
+      unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
+      return
+        static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
+                                                       Args[0]->getType(),
+                                                       Args[1], VarMask,
+                                                       Alignment);
+    }
+    case Intrinsic::masked_gather: {
+      assert(VF == 1 && "Can't vectorize types here.");
+      Value *Mask = Args[2];
+      bool VarMask = !isa<Constant>(Mask);
+      unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
+      return
+        static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
+                                                       RetTy, Args[0], VarMask,
+                                                       Alignment);
+    }
+    case Intrinsic::experimental_vector_reduce_add:
+    case Intrinsic::experimental_vector_reduce_mul:
+    case Intrinsic::experimental_vector_reduce_and:
+    case Intrinsic::experimental_vector_reduce_or:
+    case Intrinsic::experimental_vector_reduce_xor:
+    case Intrinsic::experimental_vector_reduce_fadd:
+    case Intrinsic::experimental_vector_reduce_fmul:
+    case Intrinsic::experimental_vector_reduce_smax:
+    case Intrinsic::experimental_vector_reduce_smin:
+    case Intrinsic::experimental_vector_reduce_fmax:
+    case Intrinsic::experimental_vector_reduce_fmin:
+    case Intrinsic::experimental_vector_reduce_umax:
+    case Intrinsic::experimental_vector_reduce_umin:
+      return getIntrinsicInstrCost(IID, RetTy, Args[0]->getType(), FMF);
+    }
+  }
+
+  /// Get intrinsic cost based on argument types.
+  /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
+  /// cost of scalarizing the arguments and the return value will be computed
+  /// based on types.
+  unsigned getIntrinsicInstrCost(
+      Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> Tys, FastMathFlags FMF,
+      unsigned ScalarizationCostPassed = std::numeric_limits<unsigned>::max()) {
+    SmallVector<unsigned, 2> ISDs;
+    unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
+    switch (IID) {
+    default: {
+      // Assume that we need to scalarize this intrinsic.
+      unsigned ScalarizationCost = ScalarizationCostPassed;
+      unsigned ScalarCalls = 1;
+      Type *ScalarRetTy = RetTy;
+      if (RetTy->isVectorTy()) {
+        if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
+          ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
+        ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
+        ScalarRetTy = RetTy->getScalarType();
+      }
+      SmallVector<Type *, 4> ScalarTys;
+      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+        Type *Ty = Tys[i];
+        if (Ty->isVectorTy()) {
+          if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
+            ScalarizationCost += getScalarizationOverhead(Ty, false, true);
+          ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
+          Ty = Ty->getScalarType();
+        }
+        ScalarTys.push_back(Ty);
+      }
+      if (ScalarCalls == 1)
+        return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
+
+      unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+          IID, ScalarRetTy, ScalarTys, FMF);
+
+      return ScalarCalls * ScalarCost + ScalarizationCost;
+    }
+    // Look for intrinsics that can be lowered directly or turned into a scalar
+    // intrinsic call.
+    case Intrinsic::sqrt:
+      ISDs.push_back(ISD::FSQRT);
+      break;
+    case Intrinsic::sin:
+      ISDs.push_back(ISD::FSIN);
+      break;
+    case Intrinsic::cos:
+      ISDs.push_back(ISD::FCOS);
+      break;
+    case Intrinsic::exp:
+      ISDs.push_back(ISD::FEXP);
+      break;
+    case Intrinsic::exp2:
+      ISDs.push_back(ISD::FEXP2);
+      break;
+    case Intrinsic::log:
+      ISDs.push_back(ISD::FLOG);
+      break;
+    case Intrinsic::log10:
+      ISDs.push_back(ISD::FLOG10);
+      break;
+    case Intrinsic::log2:
+      ISDs.push_back(ISD::FLOG2);
+      break;
+    case Intrinsic::fabs:
+      ISDs.push_back(ISD::FABS);
+      break;
+    case Intrinsic::minnum:
+      ISDs.push_back(ISD::FMINNUM);
+      if (FMF.noNaNs())
+        ISDs.push_back(ISD::FMINNAN);
+      break;
+    case Intrinsic::maxnum:
+      ISDs.push_back(ISD::FMAXNUM);
+      if (FMF.noNaNs())
+        ISDs.push_back(ISD::FMAXNAN);
+      break;
+    case Intrinsic::copysign:
+      ISDs.push_back(ISD::FCOPYSIGN);
+      break;
+    case Intrinsic::floor:
+      ISDs.push_back(ISD::FFLOOR);
+      break;
+    case Intrinsic::ceil:
+      ISDs.push_back(ISD::FCEIL);
+      break;
+    case Intrinsic::trunc:
+      ISDs.push_back(ISD::FTRUNC);
+      break;
+    case Intrinsic::nearbyint:
+      ISDs.push_back(ISD::FNEARBYINT);
+      break;
+    case Intrinsic::rint:
+      ISDs.push_back(ISD::FRINT);
+      break;
+    case Intrinsic::round:
+      ISDs.push_back(ISD::FROUND);
+      break;
+    case Intrinsic::pow:
+      ISDs.push_back(ISD::FPOW);
+      break;
+    case Intrinsic::fma:
+      ISDs.push_back(ISD::FMA);
+      break;
+    case Intrinsic::fmuladd:
+      ISDs.push_back(ISD::FMA);
+      break;
+    // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
+    case Intrinsic::lifetime_start:
+    case Intrinsic::lifetime_end:
+    case Intrinsic::sideeffect:
+      return 0;
+    case Intrinsic::masked_store:
+      return static_cast<T *>(this)
+          ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
+    case Intrinsic::masked_load:
+      return static_cast<T *>(this)
+          ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
+    case Intrinsic::experimental_vector_reduce_add:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Add, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_mul:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Mul, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_and:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::And, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_or:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Or, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_xor:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Xor, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_fadd:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::FAdd, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_fmul:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::FMul, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_smax:
+    case Intrinsic::experimental_vector_reduce_smin:
+    case Intrinsic::experimental_vector_reduce_fmax:
+    case Intrinsic::experimental_vector_reduce_fmin:
+      return static_cast<T *>(this)->getMinMaxReductionCost(
+          Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
+          /*IsSigned=*/true);
+    case Intrinsic::experimental_vector_reduce_umax:
+    case Intrinsic::experimental_vector_reduce_umin:
+      return static_cast<T *>(this)->getMinMaxReductionCost(
+          Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
+          /*IsSigned=*/false);
+    case Intrinsic::ctpop:
+      ISDs.push_back(ISD::CTPOP);
+      // In case of legalization use TCC_Expensive. This is cheaper than a
+      // library call but still not a cheap instruction.
+      SingleCallCost = TargetTransformInfo::TCC_Expensive;
+      break;
+    // FIXME: ctlz, cttz, ...
+    }
+
+    const TargetLoweringBase *TLI = getTLI();
+    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
+
+    SmallVector<unsigned, 2> LegalCost;
+    SmallVector<unsigned, 2> CustomCost;
+    for (unsigned ISD : ISDs) {
+      if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
+        if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
+          return 0;
+        }
+
+        // The operation is legal. Assume it costs 1.
+        // If the type is split to multiple registers, assume that there is some
+        // overhead to this.
+        // TODO: Once we have extract/insert subvector cost we need to use them.
+        if (LT.first > 1)
+          LegalCost.push_back(LT.first * 2);
+        else
+          LegalCost.push_back(LT.first * 1);
+      } else if (!TLI->isOperationExpand(ISD, LT.second)) {
+        // If the operation is custom lowered then assume
+        // that the code is twice as expensive.
+        CustomCost.push_back(LT.first * 2);
+      }
+    }
+
+    auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
+    if (MinLegalCostI != LegalCost.end())
+      return *MinLegalCostI;
+
+    auto MinCustomCostI = std::min_element(CustomCost.begin(), CustomCost.end());
+    if (MinCustomCostI != CustomCost.end())
+      return *MinCustomCostI;
+
+    // If we can't lower fmuladd into an FMA estimate the cost as a floating
+    // point mul followed by an add.
+    if (IID == Intrinsic::fmuladd)
+      return static_cast<T *>(this)
+                 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
+             static_cast<T *>(this)
+                 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
+
+    // Else, assume that we need to scalarize this intrinsic. For math builtins
+    // this will emit a costly libcall, adding call overhead and spills. Make it
+    // very expensive.
+    if (RetTy->isVectorTy()) {
+      unsigned ScalarizationCost =
+          ((ScalarizationCostPassed != std::numeric_limits<unsigned>::max())
+               ? ScalarizationCostPassed
+               : getScalarizationOverhead(RetTy, true, false));
+      unsigned ScalarCalls = RetTy->getVectorNumElements();
+      SmallVector<Type *, 4> ScalarTys;
+      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+        Type *Ty = Tys[i];
+        if (Ty->isVectorTy())
+          Ty = Ty->getScalarType();
+        ScalarTys.push_back(Ty);
+      }
+      unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+          IID, RetTy->getScalarType(), ScalarTys, FMF);
+      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+        if (Tys[i]->isVectorTy()) {
+          if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
+            ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
+          ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
+        }
+      }
+
+      return ScalarCalls * ScalarCost + ScalarizationCost;
+    }
+
+    // This is going to be turned into a library call, make it expensive.
+    return SingleCallCost;
+  }
+
+  /// \brief Compute a cost of the given call instruction.
+  ///
+  /// Compute the cost of calling function F with return type RetTy and
+  /// argument types Tys. F might be nullptr, in this case the cost of an
+  /// arbitrary call with the specified signature will be returned.
+  /// This is used, for instance,  when we estimate call of a vector
+  /// counterpart of the given function.
+  /// \param F Called function, might be nullptr.
+  /// \param RetTy Return value types.
+  /// \param Tys Argument types.
+  /// \returns The cost of Call instruction.
+  unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
+    return 10;
+  }
+
+  unsigned getNumberOfParts(Type *Tp) {
+    std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
+    return LT.first;
+  }
+
+  unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
+                                     const SCEV *) {
+    return 0;
+  }
+
+  /// Try to calculate arithmetic and shuffle op costs for reduction operations.
+  /// We're assuming that reduction operation are performing the following way:
+  /// 1. Non-pairwise reduction
+  /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
+  /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
+  ///            \----------------v-------------/  \----------v------------/
+  ///                            n/2 elements               n/2 elements
+  /// %red1 = op <n x t> %val, <n x t> val1
+  /// After this operation we have a vector %red1 where only the first n/2
+  /// elements are meaningful, the second n/2 elements are undefined and can be
+  /// dropped. All other operations are actually working with the vector of
+  /// length n/2, not n, though the real vector length is still n.
+  /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
+  /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
+  ///            \----------------v-------------/  \----------v------------/
+  ///                            n/4 elements               3*n/4 elements
+  /// %red2 = op <n x t> %red1, <n x t> val2  - working with the vector of
+  /// length n/2, the resulting vector has length n/4 etc.
+  /// 2. Pairwise reduction:
+  /// Everything is the same except for an additional shuffle operation which
+  /// is used to produce operands for pairwise kind of reductions.
+  /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
+  /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
+  ///            \-------------v----------/  \----------v------------/
+  ///                   n/2 elements               n/2 elements
+  /// %val2 = shufflevector<n x t> %val, <n x t> %undef,
+  /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
+  ///            \-------------v----------/  \----------v------------/
+  ///                   n/2 elements               n/2 elements
+  /// %red1 = op <n x t> %val1, <n x t> val2
+  /// Again, the operation is performed on <n x t> vector, but the resulting
+  /// vector %red1 is <n/2 x t> vector.
+  ///
+  /// The cost model should take into account that the actual length of the
+  /// vector is reduced on each iteration.
+  unsigned getArithmeticReductionCost(unsigned Opcode, Type *Ty,
+                                      bool IsPairwise) {
+    assert(Ty->isVectorTy() && "Expect a vector type");
+    Type *ScalarTy = Ty->getVectorElementType();
+    unsigned NumVecElts = Ty->getVectorNumElements();
+    unsigned NumReduxLevels = Log2_32(NumVecElts);
+    unsigned ArithCost = 0;
+    unsigned ShuffleCost = 0;
+    auto *ConcreteTTI = static_cast<T *>(this);
+    std::pair<unsigned, MVT> LT =
+        ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
+    unsigned LongVectorCount = 0;
+    unsigned MVTLen =
+        LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
+    while (NumVecElts > MVTLen) {
+      NumVecElts /= 2;
+      // Assume the pairwise shuffles add a cost.
+      ShuffleCost += (IsPairwise + 1) *
+                     ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                                 NumVecElts, Ty);
+      ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
+      Ty = VectorType::get(ScalarTy, NumVecElts);
+      ++LongVectorCount;
+    }
+    // The minimal length of the vector is limited by the real length of vector
+    // operations performed on the current platform. That's why several final
+    // reduction operations are performed on the vectors with the same
+    // architecture-dependent length.
+    ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
+                   ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                               NumVecElts, Ty);
+    ArithCost += (NumReduxLevels - LongVectorCount) *
+                 ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
+    return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
+  }
+
+  /// Try to calculate op costs for min/max reduction operations.
+  /// \param CondTy Conditional type for the Select instruction.
+  unsigned getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwise,
+                                  bool) {
+    assert(Ty->isVectorTy() && "Expect a vector type");
+    Type *ScalarTy = Ty->getVectorElementType();
+    Type *ScalarCondTy = CondTy->getVectorElementType();
+    unsigned NumVecElts = Ty->getVectorNumElements();
+    unsigned NumReduxLevels = Log2_32(NumVecElts);
+    unsigned CmpOpcode;
+    if (Ty->isFPOrFPVectorTy()) {
+      CmpOpcode = Instruction::FCmp;
+    } else {
+      assert(Ty->isIntOrIntVectorTy() &&
+             "expecting floating point or integer type for min/max reduction");
+      CmpOpcode = Instruction::ICmp;
+    }
+    unsigned MinMaxCost = 0;
+    unsigned ShuffleCost = 0;
+    auto *ConcreteTTI = static_cast<T *>(this);
+    std::pair<unsigned, MVT> LT =
+        ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
+    unsigned LongVectorCount = 0;
+    unsigned MVTLen =
+        LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
+    while (NumVecElts > MVTLen) {
+      NumVecElts /= 2;
+      // Assume the pairwise shuffles add a cost.
+      ShuffleCost += (IsPairwise + 1) *
+                     ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                                 NumVecElts, Ty);
+      MinMaxCost +=
+          ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
+          ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
+                                          nullptr);
+      Ty = VectorType::get(ScalarTy, NumVecElts);
+      CondTy = VectorType::get(ScalarCondTy, NumVecElts);
+      ++LongVectorCount;
+    }
+    // The minimal length of the vector is limited by the real length of vector
+    // operations performed on the current platform. That's why several final
+    // reduction opertions are perfomed on the vectors with the same
+    // architecture-dependent length.
+    ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
+                   ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                               NumVecElts, Ty);
+    MinMaxCost +=
+        (NumReduxLevels - LongVectorCount) *
+        (ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
+         ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
+                                         nullptr));
+    // Need 3 extractelement instructions for scalarization + an additional
+    // scalar select instruction.
+    return ShuffleCost + MinMaxCost +
+           3 * getScalarizationOverhead(Ty, /*Insert=*/false,
+                                        /*Extract=*/true) +
+           ConcreteTTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
+                                           ScalarCondTy, nullptr);
+  }
+
+  unsigned getVectorSplitCost() { return 1; }
+
+  /// @}
+};
+
+/// \brief Concrete BasicTTIImpl that can be used if no further customization
+/// is needed.
+class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
+  using BaseT = BasicTTIImplBase<BasicTTIImpl>;
+
+  friend class BasicTTIImplBase<BasicTTIImpl>;
+
+  const TargetSubtargetInfo *ST;
+  const TargetLoweringBase *TLI;
+
+  const TargetSubtargetInfo *getST() const { return ST; }
+  const TargetLoweringBase *getTLI() const { return TLI; }
+
+public:
+  explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_BASICTTIIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h b/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
new file mode 100644
index 0000000..d9e8206
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
@@ -0,0 +1,108 @@
+//===- lib/CodeGen/CalcSpillWeights.h ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CALCSPILLWEIGHTS_H
+#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+namespace llvm {
+
+class LiveInterval;
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineFunction;
+class MachineLoopInfo;
+class VirtRegMap;
+
+  /// \brief Normalize the spill weight of a live interval
+  ///
+  /// The spill weight of a live interval is computed as:
+  ///
+  ///   (sum(use freq) + sum(def freq)) / (K + size)
+  ///
+  /// @param UseDefFreq Expected number of executed use and def instructions
+  ///                   per function call. Derived from block frequencies.
+  /// @param Size       Size of live interval as returnexd by getSize()
+  /// @param NumInstr   Number of instructions using this live interval
+  static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size,
+                                           unsigned NumInstr) {
+    // The constant 25 instructions is added to avoid depending too much on
+    // accidental SlotIndex gaps for small intervals. The effect is that small
+    // intervals have a spill weight that is mostly proportional to the number
+    // of uses, while large intervals get a spill weight that is closer to a use
+    // density.
+    return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
+  }
+
+  /// \brief Calculate auxiliary information for a virtual register such as its
+  /// spill weight and allocation hint.
+  class VirtRegAuxInfo {
+  public:
+    using NormalizingFn = float (*)(float, unsigned, unsigned);
+
+  private:
+    MachineFunction &MF;
+    LiveIntervals &LIS;
+    VirtRegMap *VRM;
+    const MachineLoopInfo &Loops;
+    const MachineBlockFrequencyInfo &MBFI;
+    DenseMap<unsigned, float> Hint;
+    NormalizingFn normalize;
+
+  public:
+    VirtRegAuxInfo(MachineFunction &mf, LiveIntervals &lis,
+                   VirtRegMap *vrm, const MachineLoopInfo &loops,
+                   const MachineBlockFrequencyInfo &mbfi,
+                   NormalizingFn norm = normalizeSpillWeight)
+        : MF(mf), LIS(lis), VRM(vrm), Loops(loops), MBFI(mbfi), normalize(norm) {}
+
+    /// \brief (re)compute li's spill weight and allocation hint.
+    void calculateSpillWeightAndHint(LiveInterval &li);
+
+    /// \brief Compute future expected spill weight of a split artifact of li
+    /// that will span between start and end slot indexes.
+    /// \param li     The live interval to be split.
+    /// \param start  The expected begining of the split artifact. Instructions
+    ///               before start will not affect the weight.
+    /// \param end    The expected end of the split artifact. Instructions
+    ///               after end will not affect the weight.
+    /// \return The expected spill weight of the split artifact. Returns
+    /// negative weight for unspillable li.
+    float futureWeight(LiveInterval &li, SlotIndex start, SlotIndex end);
+
+    /// \brief Helper function for weight calculations.
+    /// (Re)compute li's spill weight and allocation hint, or, for non null
+    /// start and end - compute future expected spill weight of a split
+    /// artifact of li that will span between start and end slot indexes.
+    /// \param li     The live interval for which to compute the weight.
+    /// \param start  The expected begining of the split artifact. Instructions
+    ///               before start will not affect the weight. Relevant for
+    ///               weight calculation of future split artifact.
+    /// \param end    The expected end of the split artifact. Instructions
+    ///               after end will not affect the weight. Relevant for
+    ///               weight calculation of future split artifact.
+    /// \return The spill weight. Returns negative weight for unspillable li.
+    float weightCalcHelper(LiveInterval &li, SlotIndex *start = nullptr,
+                           SlotIndex *end = nullptr);
+  };
+
+  /// \brief Compute spill weights and allocation hints for all virtual register
+  /// live intervals.
+  void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF,
+                                     VirtRegMap *VRM,
+                                     const MachineLoopInfo &MLI,
+                                     const MachineBlockFrequencyInfo &MBFI,
+                                     VirtRegAuxInfo::NormalizingFn norm =
+                                         normalizeSpillWeight);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h b/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
new file mode 100644
index 0000000..d30a273
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
@@ -0,0 +1,576 @@
+//===- llvm/CallingConvLower.h - Calling Conventions ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the CCState and CCValAssign classes, used for lowering
+// and implementing calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
+#define LLVM_CODEGEN_CALLINGCONVLOWER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCRegisterInfo.h"
+
+namespace llvm {
+
+class CCState;
+class MVT;
+class TargetMachine;
+class TargetRegisterInfo;
+
+/// CCValAssign - Represent assignment of one arg/retval to a location.
+class CCValAssign {
+public:
+  enum LocInfo {
+    Full,      // The value fills the full location.
+    SExt,      // The value is sign extended in the location.
+    ZExt,      // The value is zero extended in the location.
+    AExt,      // The value is extended with undefined upper bits.
+    SExtUpper, // The value is in the upper bits of the location and should be
+               // sign extended when retrieved.
+    ZExtUpper, // The value is in the upper bits of the location and should be
+               // zero extended when retrieved.
+    AExtUpper, // The value is in the upper bits of the location and should be
+               // extended with undefined upper bits when retrieved.
+    BCvt,      // The value is bit-converted in the location.
+    VExt,      // The value is vector-widened in the location.
+               // FIXME: Not implemented yet. Code that uses AExt to mean
+               // vector-widen should be fixed to use VExt instead.
+    FPExt,     // The floating-point value is fp-extended in the location.
+    Indirect   // The location contains pointer to the value.
+    // TODO: a subset of the value is in the location.
+  };
+
+private:
+  /// ValNo - This is the value number begin assigned (e.g. an argument number).
+  unsigned ValNo;
+
+  /// Loc is either a stack offset or a register number.
+  unsigned Loc;
+
+  /// isMem - True if this is a memory loc, false if it is a register loc.
+  unsigned isMem : 1;
+
+  /// isCustom - True if this arg/retval requires special handling.
+  unsigned isCustom : 1;
+
+  /// Information about how the value is assigned.
+  LocInfo HTP : 6;
+
+  /// ValVT - The type of the value being assigned.
+  MVT ValVT;
+
+  /// LocVT - The type of the location being assigned to.
+  MVT LocVT;
+public:
+
+  static CCValAssign getReg(unsigned ValNo, MVT ValVT,
+                            unsigned RegNo, MVT LocVT,
+                            LocInfo HTP) {
+    CCValAssign Ret;
+    Ret.ValNo = ValNo;
+    Ret.Loc = RegNo;
+    Ret.isMem = false;
+    Ret.isCustom = false;
+    Ret.HTP = HTP;
+    Ret.ValVT = ValVT;
+    Ret.LocVT = LocVT;
+    return Ret;
+  }
+
+  static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
+                                  unsigned RegNo, MVT LocVT,
+                                  LocInfo HTP) {
+    CCValAssign Ret;
+    Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
+    Ret.isCustom = true;
+    return Ret;
+  }
+
+  static CCValAssign getMem(unsigned ValNo, MVT ValVT,
+                            unsigned Offset, MVT LocVT,
+                            LocInfo HTP) {
+    CCValAssign Ret;
+    Ret.ValNo = ValNo;
+    Ret.Loc = Offset;
+    Ret.isMem = true;
+    Ret.isCustom = false;
+    Ret.HTP = HTP;
+    Ret.ValVT = ValVT;
+    Ret.LocVT = LocVT;
+    return Ret;
+  }
+
+  static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
+                                  unsigned Offset, MVT LocVT,
+                                  LocInfo HTP) {
+    CCValAssign Ret;
+    Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
+    Ret.isCustom = true;
+    return Ret;
+  }
+
+  // There is no need to differentiate between a pending CCValAssign and other
+  // kinds, as they are stored in a different list.
+  static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT,
+                                LocInfo HTP, unsigned ExtraInfo = 0) {
+    return getReg(ValNo, ValVT, ExtraInfo, LocVT, HTP);
+  }
+
+  void convertToReg(unsigned RegNo) {
+    Loc = RegNo;
+    isMem = false;
+  }
+
+  void convertToMem(unsigned Offset) {
+    Loc = Offset;
+    isMem = true;
+  }
+
+  unsigned getValNo() const { return ValNo; }
+  MVT getValVT() const { return ValVT; }
+
+  bool isRegLoc() const { return !isMem; }
+  bool isMemLoc() const { return isMem; }
+
+  bool needsCustom() const { return isCustom; }
+
+  unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
+  unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
+  unsigned getExtraInfo() const { return Loc; }
+  MVT getLocVT() const { return LocVT; }
+
+  LocInfo getLocInfo() const { return HTP; }
+  bool isExtInLoc() const {
+    return (HTP == AExt || HTP == SExt || HTP == ZExt);
+  }
+
+  bool isUpperBitsInLoc() const {
+    return HTP == AExtUpper || HTP == SExtUpper || HTP == ZExtUpper;
+  }
+};
+
+/// Describes a register that needs to be forwarded from the prologue to a
+/// musttail call.
+struct ForwardedRegister {
+  ForwardedRegister(unsigned VReg, MCPhysReg PReg, MVT VT)
+      : VReg(VReg), PReg(PReg), VT(VT) {}
+  unsigned VReg;
+  MCPhysReg PReg;
+  MVT VT;
+};
+
+/// CCAssignFn - This function assigns a location for Val, updating State to
+/// reflect the change.  It returns 'true' if it failed to handle Val.
+typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
+                        MVT LocVT, CCValAssign::LocInfo LocInfo,
+                        ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+/// CCCustomFn - This function assigns a location for Val, possibly updating
+/// all args to reflect changes and indicates if it handled it. It must set
+/// isCustom if it handles the arg and returns true.
+typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
+                        MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+                        ISD::ArgFlagsTy &ArgFlags, CCState &State);
+
+/// CCState - This class holds information needed while lowering arguments and
+/// return values.  It captures which registers are already assigned and which
+/// stack slots are used.  It provides accessors to allocate these values.
+class CCState {
+private:
+  CallingConv::ID CallingConv;
+  bool IsVarArg;
+  bool AnalyzingMustTailForwardedRegs = false;
+  MachineFunction &MF;
+  const TargetRegisterInfo &TRI;
+  SmallVectorImpl<CCValAssign> &Locs;
+  LLVMContext &Context;
+
+  unsigned StackOffset;
+  unsigned MaxStackArgAlign;
+  SmallVector<uint32_t, 16> UsedRegs;
+  SmallVector<CCValAssign, 4> PendingLocs;
+  SmallVector<ISD::ArgFlagsTy, 4> PendingArgFlags;
+
+  // ByValInfo and SmallVector<ByValInfo, 4> ByValRegs:
+  //
+  // Vector of ByValInfo instances (ByValRegs) is introduced for byval registers
+  // tracking.
+  // Or, in another words it tracks byval parameters that are stored in
+  // general purpose registers.
+  //
+  // For 4 byte stack alignment,
+  // instance index means byval parameter number in formal
+  // arguments set. Assume, we have some "struct_type" with size = 4 bytes,
+  // then, for function "foo":
+  //
+  // i32 foo(i32 %p, %struct_type* %r, i32 %s, %struct_type* %t)
+  //
+  // ByValRegs[0] describes how "%r" is stored (Begin == r1, End == r2)
+  // ByValRegs[1] describes how "%t" is stored (Begin == r3, End == r4).
+  //
+  // In case of 8 bytes stack alignment,
+  // ByValRegs may also contain information about wasted registers.
+  // In function shown above, r3 would be wasted according to AAPCS rules.
+  // And in that case ByValRegs[1].Waste would be "true".
+  // ByValRegs vector size still would be 2,
+  // while "%t" goes to the stack: it wouldn't be described in ByValRegs.
+  //
+  // Supposed use-case for this collection:
+  // 1. Initially ByValRegs is empty, InRegsParamsProcessed is 0.
+  // 2. HandleByVal fillups ByValRegs.
+  // 3. Argument analysis (LowerFormatArguments, for example). After
+  // some byval argument was analyzed, InRegsParamsProcessed is increased.
+  struct ByValInfo {
+    ByValInfo(unsigned B, unsigned E, bool IsWaste = false) :
+      Begin(B), End(E), Waste(IsWaste) {}
+    // First register allocated for current parameter.
+    unsigned Begin;
+
+    // First after last register allocated for current parameter.
+    unsigned End;
+
+    // Means that current range of registers doesn't belong to any
+    // parameters. It was wasted due to stack alignment rules.
+    // For more information see:
+    // AAPCS, 5.5 Parameter Passing, Stage C, C.3.
+    bool Waste;
+  };
+  SmallVector<ByValInfo, 4 > ByValRegs;
+
+  // InRegsParamsProcessed - shows how many instances of ByValRegs was proceed
+  // during argument analysis.
+  unsigned InRegsParamsProcessed;
+
+public:
+  CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
+          SmallVectorImpl<CCValAssign> &locs, LLVMContext &C);
+
+  void addLoc(const CCValAssign &V) {
+    Locs.push_back(V);
+  }
+
+  LLVMContext &getContext() const { return Context; }
+  MachineFunction &getMachineFunction() const { return MF; }
+  CallingConv::ID getCallingConv() const { return CallingConv; }
+  bool isVarArg() const { return IsVarArg; }
+
+  /// getNextStackOffset - Return the next stack offset such that all stack
+  /// slots satisfy their alignment requirements.
+  unsigned getNextStackOffset() const {
+    return StackOffset;
+  }
+
+  /// getAlignedCallFrameSize - Return the size of the call frame needed to
+  /// be able to store all arguments and such that the alignment requirement
+  /// of each of the arguments is satisfied.
+  unsigned getAlignedCallFrameSize() const {
+    return alignTo(StackOffset, MaxStackArgAlign);
+  }
+
+  /// isAllocated - Return true if the specified register (or an alias) is
+  /// allocated.
+  bool isAllocated(unsigned Reg) const {
+    return UsedRegs[Reg/32] & (1 << (Reg&31));
+  }
+
+  /// AnalyzeFormalArguments - Analyze an array of argument values,
+  /// incorporating info about the formals into this state.
+  void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+                              CCAssignFn Fn);
+
+  /// The function will invoke AnalyzeFormalArguments.
+  void AnalyzeArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+                        CCAssignFn Fn) {
+    AnalyzeFormalArguments(Ins, Fn);
+  }
+
+  /// AnalyzeReturn - Analyze the returned values of a return,
+  /// incorporating info about the result values into this state.
+  void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                     CCAssignFn Fn);
+
+  /// CheckReturn - Analyze the return values of a function, returning
+  /// true if the return can be performed without sret-demotion, and
+  /// false otherwise.
+  bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+                   CCAssignFn Fn);
+
+  /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+  /// incorporating info about the passed values into this state.
+  void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                           CCAssignFn Fn);
+
+  /// AnalyzeCallOperands - Same as above except it takes vectors of types
+  /// and argument flags.
+  void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
+                           SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+                           CCAssignFn Fn);
+
+  /// The function will invoke AnalyzeCallOperands.
+  void AnalyzeArguments(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                        CCAssignFn Fn) {
+    AnalyzeCallOperands(Outs, Fn);
+  }
+
+  /// AnalyzeCallResult - Analyze the return values of a call,
+  /// incorporating info about the passed values into this state.
+  void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+                         CCAssignFn Fn);
+
+  /// A shadow allocated register is a register that was allocated
+  /// but wasn't added to the location list (Locs).
+  /// \returns true if the register was allocated as shadow or false otherwise.
+  bool IsShadowAllocatedReg(unsigned Reg) const;
+
+  /// AnalyzeCallResult - Same as above except it's specialized for calls which
+  /// produce a single value.
+  void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
+
+  /// getFirstUnallocated - Return the index of the first unallocated register
+  /// in the set, or Regs.size() if they are all allocated.
+  unsigned getFirstUnallocated(ArrayRef<MCPhysReg> Regs) const {
+    for (unsigned i = 0; i < Regs.size(); ++i)
+      if (!isAllocated(Regs[i]))
+        return i;
+    return Regs.size();
+  }
+
+  /// AllocateReg - Attempt to allocate one register.  If it is not available,
+  /// return zero.  Otherwise, return the register, marking it and any aliases
+  /// as allocated.
+  unsigned AllocateReg(unsigned Reg) {
+    if (isAllocated(Reg)) return 0;
+    MarkAllocated(Reg);
+    return Reg;
+  }
+
+  /// Version of AllocateReg with extra register to be shadowed.
+  unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
+    if (isAllocated(Reg)) return 0;
+    MarkAllocated(Reg);
+    MarkAllocated(ShadowReg);
+    return Reg;
+  }
+
+  /// AllocateReg - Attempt to allocate one of the specified registers.  If none
+  /// are available, return zero.  Otherwise, return the first one available,
+  /// marking it and any aliases as allocated.
+  unsigned AllocateReg(ArrayRef<MCPhysReg> Regs) {
+    unsigned FirstUnalloc = getFirstUnallocated(Regs);
+    if (FirstUnalloc == Regs.size())
+      return 0;    // Didn't find the reg.
+
+    // Mark the register and any aliases as allocated.
+    unsigned Reg = Regs[FirstUnalloc];
+    MarkAllocated(Reg);
+    return Reg;
+  }
+
+  /// AllocateRegBlock - Attempt to allocate a block of RegsRequired consecutive
+  /// registers. If this is not possible, return zero. Otherwise, return the first
+  /// register of the block that were allocated, marking the entire block as allocated.
+  unsigned AllocateRegBlock(ArrayRef<MCPhysReg> Regs, unsigned RegsRequired) {
+    if (RegsRequired > Regs.size())
+      return 0;
+
+    for (unsigned StartIdx = 0; StartIdx <= Regs.size() - RegsRequired;
+         ++StartIdx) {
+      bool BlockAvailable = true;
+      // Check for already-allocated regs in this block
+      for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+        if (isAllocated(Regs[StartIdx + BlockIdx])) {
+          BlockAvailable = false;
+          break;
+        }
+      }
+      if (BlockAvailable) {
+        // Mark the entire block as allocated
+        for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+          MarkAllocated(Regs[StartIdx + BlockIdx]);
+        }
+        return Regs[StartIdx];
+      }
+    }
+    // No block was available
+    return 0;
+  }
+
+  /// Version of AllocateReg with list of registers to be shadowed.
+  unsigned AllocateReg(ArrayRef<MCPhysReg> Regs, const MCPhysReg *ShadowRegs) {
+    unsigned FirstUnalloc = getFirstUnallocated(Regs);
+    if (FirstUnalloc == Regs.size())
+      return 0;    // Didn't find the reg.
+
+    // Mark the register and any aliases as allocated.
+    unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
+    MarkAllocated(Reg);
+    MarkAllocated(ShadowReg);
+    return Reg;
+  }
+
+  /// AllocateStack - Allocate a chunk of stack space with the specified size
+  /// and alignment.
+  unsigned AllocateStack(unsigned Size, unsigned Align) {
+    assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
+    StackOffset = alignTo(StackOffset, Align);
+    unsigned Result = StackOffset;
+    StackOffset += Size;
+    MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
+    ensureMaxAlignment(Align);
+    return Result;
+  }
+
+  void ensureMaxAlignment(unsigned Align) {
+    if (!AnalyzingMustTailForwardedRegs)
+      MF.getFrameInfo().ensureMaxAlignment(Align);
+  }
+
+  /// Version of AllocateStack with extra register to be shadowed.
+  unsigned AllocateStack(unsigned Size, unsigned Align, unsigned ShadowReg) {
+    MarkAllocated(ShadowReg);
+    return AllocateStack(Size, Align);
+  }
+
+  /// Version of AllocateStack with list of extra registers to be shadowed.
+  /// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
+  unsigned AllocateStack(unsigned Size, unsigned Align,
+                         ArrayRef<MCPhysReg> ShadowRegs) {
+    for (unsigned i = 0; i < ShadowRegs.size(); ++i)
+      MarkAllocated(ShadowRegs[i]);
+    return AllocateStack(Size, Align);
+  }
+
+  // HandleByVal - Allocate a stack slot large enough to pass an argument by
+  // value. The size and alignment information of the argument is encoded in its
+  // parameter attribute.
+  void HandleByVal(unsigned ValNo, MVT ValVT,
+                   MVT LocVT, CCValAssign::LocInfo LocInfo,
+                   int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
+
+  // Returns count of byval arguments that are to be stored (even partly)
+  // in registers.
+  unsigned getInRegsParamsCount() const { return ByValRegs.size(); }
+
+  // Returns count of byval in-regs arguments proceed.
+  unsigned getInRegsParamsProcessed() const { return InRegsParamsProcessed; }
+
+  // Get information about N-th byval parameter that is stored in registers.
+  // Here "ByValParamIndex" is N.
+  void getInRegsParamInfo(unsigned InRegsParamRecordIndex,
+                          unsigned& BeginReg, unsigned& EndReg) const {
+    assert(InRegsParamRecordIndex < ByValRegs.size() &&
+           "Wrong ByVal parameter index");
+
+    const ByValInfo& info = ByValRegs[InRegsParamRecordIndex];
+    BeginReg = info.Begin;
+    EndReg = info.End;
+  }
+
+  // Add information about parameter that is kept in registers.
+  void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd) {
+    ByValRegs.push_back(ByValInfo(RegBegin, RegEnd));
+  }
+
+  // Goes either to next byval parameter (excluding "waste" record), or
+  // to the end of collection.
+  // Returns false, if end is reached.
+  bool nextInRegsParam() {
+    unsigned e = ByValRegs.size();
+    if (InRegsParamsProcessed < e)
+      ++InRegsParamsProcessed;
+    return InRegsParamsProcessed < e;
+  }
+
+  // Clear byval registers tracking info.
+  void clearByValRegsInfo() {
+    InRegsParamsProcessed = 0;
+    ByValRegs.clear();
+  }
+
+  // Rewind byval registers tracking info.
+  void rewindByValRegsInfo() {
+    InRegsParamsProcessed = 0;
+  }
+
+  // Get list of pending assignments
+  SmallVectorImpl<CCValAssign> &getPendingLocs() {
+    return PendingLocs;
+  }
+
+  // Get a list of argflags for pending assignments.
+  SmallVectorImpl<ISD::ArgFlagsTy> &getPendingArgFlags() {
+    return PendingArgFlags;
+  }
+
+  /// Compute the remaining unused register parameters that would be used for
+  /// the given value type. This is useful when varargs are passed in the
+  /// registers that normal prototyped parameters would be passed in, or for
+  /// implementing perfect forwarding.
+  void getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, MVT VT,
+                                   CCAssignFn Fn);
+
+  /// Compute the set of registers that need to be preserved and forwarded to
+  /// any musttail calls.
+  void analyzeMustTailForwardedRegisters(
+      SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
+      CCAssignFn Fn);
+
+  /// Returns true if the results of the two calling conventions are compatible.
+  /// This is usually part of the check for tailcall eligibility.
+  static bool resultsCompatible(CallingConv::ID CalleeCC,
+                                CallingConv::ID CallerCC, MachineFunction &MF,
+                                LLVMContext &C,
+                                const SmallVectorImpl<ISD::InputArg> &Ins,
+                                CCAssignFn CalleeFn, CCAssignFn CallerFn);
+
+  /// The function runs an additional analysis pass over function arguments.
+  /// It will mark each argument with the attribute flag SecArgPass.
+  /// After running, it will sort the locs list.
+  template <class T>
+  void AnalyzeArgumentsSecondPass(const SmallVectorImpl<T> &Args,
+                                  CCAssignFn Fn) {
+    unsigned NumFirstPassLocs = Locs.size();
+
+    /// Creates similar argument list to \p Args in which each argument is
+    /// marked using SecArgPass flag.
+    SmallVector<T, 16> SecPassArg;
+    // SmallVector<ISD::InputArg, 16> SecPassArg;
+    for (auto Arg : Args) {
+      Arg.Flags.setSecArgPass();
+      SecPassArg.push_back(Arg);
+    }
+
+    // Run the second argument pass
+    AnalyzeArguments(SecPassArg, Fn);
+
+    // Sort the locations of the arguments according to their original position.
+    SmallVector<CCValAssign, 16> TmpArgLocs;
+    std::swap(TmpArgLocs, Locs);
+    auto B = TmpArgLocs.begin(), E = TmpArgLocs.end();
+    std::merge(B, B + NumFirstPassLocs, B + NumFirstPassLocs, E,
+               std::back_inserter(Locs),
+               [](const CCValAssign &A, const CCValAssign &B) -> bool {
+                 return A.getValNo() < B.getValNo();
+               });
+  }
+
+private:
+  /// MarkAllocated - Mark a register and all of its aliases as allocated.
+  void MarkAllocated(unsigned Reg);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_CALLINGCONVLOWER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def
new file mode 100644
index 0000000..3708c04
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def
@@ -0,0 +1,389 @@
+//===-- CommandFlags.h - Command Line Flags Interface -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains codegen-specific flags that are shared between different
+// command line tools. The tools "llc" and "opt" both use this file to prevent
+// flag duplication.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCTargetOptionsCommandFlags.def"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <string>
+using namespace llvm;
+
+static cl::opt<std::string>
+    MArch("march",
+          cl::desc("Architecture to generate code for (see --version)"));
+
+static cl::opt<std::string>
+    MCPU("mcpu",
+         cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+         cl::value_desc("cpu-name"), cl::init(""));
+
+static cl::list<std::string>
+    MAttrs("mattr", cl::CommaSeparated,
+           cl::desc("Target specific attributes (-mattr=help for details)"),
+           cl::value_desc("a1,+a2,-a3,..."));
+
+static cl::opt<Reloc::Model> RelocModel(
+    "relocation-model", cl::desc("Choose relocation model"),
+    cl::values(
+        clEnumValN(Reloc::Static, "static", "Non-relocatable code"),
+        clEnumValN(Reloc::PIC_, "pic",
+                   "Fully relocatable, position independent code"),
+        clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+                   "Relocatable external references, non-relocatable code"),
+        clEnumValN(Reloc::ROPI, "ropi",
+                   "Code and read-only data relocatable, accessed PC-relative"),
+        clEnumValN(
+            Reloc::RWPI, "rwpi",
+            "Read-write data relocatable, accessed relative to static base"),
+        clEnumValN(Reloc::ROPI_RWPI, "ropi-rwpi",
+                   "Combination of ropi and rwpi")));
+
+LLVM_ATTRIBUTE_UNUSED static Optional<Reloc::Model> getRelocModel() {
+  if (RelocModel.getNumOccurrences()) {
+    Reloc::Model R = RelocModel;
+    return R;
+  }
+  return None;
+}
+
+static cl::opt<ThreadModel::Model> TMModel(
+    "thread-model", cl::desc("Choose threading model"),
+    cl::init(ThreadModel::POSIX),
+    cl::values(clEnumValN(ThreadModel::POSIX, "posix", "POSIX thread model"),
+               clEnumValN(ThreadModel::Single, "single",
+                          "Single thread model")));
+
+static cl::opt<llvm::CodeModel::Model> CMModel(
+    "code-model", cl::desc("Choose code model"),
+    cl::values(clEnumValN(CodeModel::Small, "small", "Small code model"),
+               clEnumValN(CodeModel::Kernel, "kernel", "Kernel code model"),
+               clEnumValN(CodeModel::Medium, "medium", "Medium code model"),
+               clEnumValN(CodeModel::Large, "large", "Large code model")));
+
+LLVM_ATTRIBUTE_UNUSED static Optional<CodeModel::Model> getCodeModel() {
+  if (CMModel.getNumOccurrences()) {
+    CodeModel::Model M = CMModel;
+    return M;
+  }
+  return None;
+}
+
+static cl::opt<llvm::ExceptionHandling> ExceptionModel(
+    "exception-model", cl::desc("exception model"),
+    cl::init(ExceptionHandling::None),
+    cl::values(
+        clEnumValN(ExceptionHandling::None, "default",
+                   "default exception handling model"),
+        clEnumValN(ExceptionHandling::DwarfCFI, "dwarf",
+                   "DWARF-like CFI based exception handling"),
+        clEnumValN(ExceptionHandling::SjLj, "sjlj", "SjLj exception handling"),
+        clEnumValN(ExceptionHandling::ARM, "arm", "ARM EHABI exceptions"),
+        clEnumValN(ExceptionHandling::WinEH, "wineh",
+                   "Windows exception model"),
+        clEnumValN(ExceptionHandling::Wasm, "wasm",
+                   "WebAssembly exception handling")));
+
+static cl::opt<TargetMachine::CodeGenFileType> FileType(
+    "filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
+    cl::desc(
+        "Choose a file type (not all types are supported by all targets):"),
+    cl::values(clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
+                          "Emit an assembly ('.s') file"),
+               clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
+                          "Emit a native object ('.o') file"),
+               clEnumValN(TargetMachine::CGFT_Null, "null",
+                          "Emit nothing, for performance testing")));
+
+static cl::opt<bool>
+    DisableFPElim("disable-fp-elim",
+                  cl::desc("Disable frame pointer elimination optimization"),
+                  cl::init(false));
+
+static cl::opt<bool> EnableUnsafeFPMath(
+    "enable-unsafe-fp-math",
+    cl::desc("Enable optimizations that may decrease FP precision"),
+    cl::init(false));
+
+static cl::opt<bool> EnableNoInfsFPMath(
+    "enable-no-infs-fp-math",
+    cl::desc("Enable FP math optimizations that assume no +-Infs"),
+    cl::init(false));
+
+static cl::opt<bool> EnableNoNaNsFPMath(
+    "enable-no-nans-fp-math",
+    cl::desc("Enable FP math optimizations that assume no NaNs"),
+    cl::init(false));
+
+static cl::opt<bool> EnableNoSignedZerosFPMath(
+    "enable-no-signed-zeros-fp-math",
+    cl::desc("Enable FP math optimizations that assume "
+             "the sign of 0 is insignificant"),
+    cl::init(false));
+
+static cl::opt<bool>
+    EnableNoTrappingFPMath("enable-no-trapping-fp-math",
+                           cl::desc("Enable setting the FP exceptions build "
+                                    "attribute not to use exceptions"),
+                           cl::init(false));
+
+static cl::opt<llvm::FPDenormal::DenormalMode> DenormalMode(
+    "denormal-fp-math",
+    cl::desc("Select which denormal numbers the code is permitted to require"),
+    cl::init(FPDenormal::IEEE),
+    cl::values(clEnumValN(FPDenormal::IEEE, "ieee",
+                          "IEEE 754 denormal numbers"),
+               clEnumValN(FPDenormal::PreserveSign, "preserve-sign",
+                          "the sign of a  flushed-to-zero number is preserved "
+                          "in the sign of 0"),
+               clEnumValN(FPDenormal::PositiveZero, "positive-zero",
+                          "denormals are flushed to positive zero")));
+
+static cl::opt<bool> EnableHonorSignDependentRoundingFPMath(
+    "enable-sign-dependent-rounding-fp-math", cl::Hidden,
+    cl::desc("Force codegen to assume rounding mode can change dynamically"),
+    cl::init(false));
+
+static cl::opt<llvm::FloatABI::ABIType> FloatABIForCalls(
+    "float-abi", cl::desc("Choose float ABI type"), cl::init(FloatABI::Default),
+    cl::values(clEnumValN(FloatABI::Default, "default",
+                          "Target default float ABI type"),
+               clEnumValN(FloatABI::Soft, "soft",
+                          "Soft float ABI (implied by -soft-float)"),
+               clEnumValN(FloatABI::Hard, "hard",
+                          "Hard float ABI (uses FP registers)")));
+
+static cl::opt<llvm::FPOpFusion::FPOpFusionMode> FuseFPOps(
+    "fp-contract", cl::desc("Enable aggressive formation of fused FP ops"),
+    cl::init(FPOpFusion::Standard),
+    cl::values(
+        clEnumValN(FPOpFusion::Fast, "fast", "Fuse FP ops whenever profitable"),
+        clEnumValN(FPOpFusion::Standard, "on", "Only fuse 'blessed' FP ops."),
+        clEnumValN(FPOpFusion::Strict, "off",
+                   "Only fuse FP ops when the result won't be affected.")));
+
+static cl::opt<bool> DontPlaceZerosInBSS(
+    "nozero-initialized-in-bss",
+    cl::desc("Don't place zero-initialized symbols into bss section"),
+    cl::init(false));
+
+static cl::opt<bool> EnableGuaranteedTailCallOpt(
+    "tailcallopt",
+    cl::desc(
+        "Turn fastcc calls into tail calls by (potentially) changing ABI."),
+    cl::init(false));
+
+static cl::opt<bool> DisableTailCalls("disable-tail-calls",
+                                      cl::desc("Never emit tail calls"),
+                                      cl::init(false));
+
+static cl::opt<bool> StackSymbolOrdering("stack-symbol-ordering",
+                                         cl::desc("Order local stack symbols."),
+                                         cl::init(true));
+
+static cl::opt<unsigned>
+    OverrideStackAlignment("stack-alignment",
+                           cl::desc("Override default stack alignment"),
+                           cl::init(0));
+
+static cl::opt<bool>
+    StackRealign("stackrealign",
+                 cl::desc("Force align the stack to the minimum alignment"),
+                 cl::init(false));
+
+static cl::opt<std::string> TrapFuncName(
+    "trap-func", cl::Hidden,
+    cl::desc("Emit a call to trap function rather than a trap instruction"),
+    cl::init(""));
+
+static cl::opt<bool> UseCtors("use-ctors",
+                              cl::desc("Use .ctors instead of .init_array."),
+                              cl::init(false));
+
+static cl::opt<bool> RelaxELFRelocations(
+    "relax-elf-relocations",
+    cl::desc("Emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL on x86-64 ELF"),
+    cl::init(false));
+
+static cl::opt<bool> DataSections("data-sections",
+                                  cl::desc("Emit data into separate sections"),
+                                  cl::init(false));
+
+static cl::opt<bool>
+    FunctionSections("function-sections",
+                     cl::desc("Emit functions into separate sections"),
+                     cl::init(false));
+
+static cl::opt<bool> EmulatedTLS("emulated-tls",
+                                 cl::desc("Use emulated TLS model"),
+                                 cl::init(false));
+
+static cl::opt<bool>
+    UniqueSectionNames("unique-section-names",
+                       cl::desc("Give unique names to every section"),
+                       cl::init(true));
+
+static cl::opt<llvm::EABI>
+    EABIVersion("meabi", cl::desc("Set EABI type (default depends on triple):"),
+                cl::init(EABI::Default),
+                cl::values(clEnumValN(EABI::Default, "default",
+                                      "Triple default EABI version"),
+                           clEnumValN(EABI::EABI4, "4", "EABI version 4"),
+                           clEnumValN(EABI::EABI5, "5", "EABI version 5"),
+                           clEnumValN(EABI::GNU, "gnu", "EABI GNU")));
+
+static cl::opt<DebuggerKind> DebuggerTuningOpt(
+    "debugger-tune", cl::desc("Tune debug info for a particular debugger"),
+    cl::init(DebuggerKind::Default),
+    cl::values(clEnumValN(DebuggerKind::GDB, "gdb", "gdb"),
+               clEnumValN(DebuggerKind::LLDB, "lldb", "lldb"),
+               clEnumValN(DebuggerKind::SCE, "sce", "SCE targets (e.g. PS4)")));
+
+static cl::opt<bool> EnableStackSizeSection(
+    "stack-size-section",
+    cl::desc("Emit a section containing stack size metadata"), cl::init(false));
+
+// Common utility function tightly tied to the options listed here. Initializes
+// a TargetOptions object with CodeGen flags and returns it.
+static TargetOptions InitTargetOptionsFromCodeGenFlags() {
+  TargetOptions Options;
+  Options.AllowFPOpFusion = FuseFPOps;
+  Options.UnsafeFPMath = EnableUnsafeFPMath;
+  Options.NoInfsFPMath = EnableNoInfsFPMath;
+  Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+  Options.NoSignedZerosFPMath = EnableNoSignedZerosFPMath;
+  Options.NoTrappingFPMath = EnableNoTrappingFPMath;
+  Options.FPDenormalMode = DenormalMode;
+  Options.HonorSignDependentRoundingFPMathOption =
+      EnableHonorSignDependentRoundingFPMath;
+  if (FloatABIForCalls != FloatABI::Default)
+    Options.FloatABIType = FloatABIForCalls;
+  Options.NoZerosInBSS = DontPlaceZerosInBSS;
+  Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+  Options.StackAlignmentOverride = OverrideStackAlignment;
+  Options.StackSymbolOrdering = StackSymbolOrdering;
+  Options.UseInitArray = !UseCtors;
+  Options.RelaxELFRelocations = RelaxELFRelocations;
+  Options.DataSections = DataSections;
+  Options.FunctionSections = FunctionSections;
+  Options.UniqueSectionNames = UniqueSectionNames;
+  Options.EmulatedTLS = EmulatedTLS;
+  Options.ExplicitEmulatedTLS = EmulatedTLS.getNumOccurrences() > 0;
+  Options.ExceptionModel = ExceptionModel;
+  Options.EmitStackSizeSection = EnableStackSizeSection;
+
+  Options.MCOptions = InitMCTargetOptionsFromFlags();
+
+  Options.ThreadModel = TMModel;
+  Options.EABIVersion = EABIVersion;
+  Options.DebuggerTuning = DebuggerTuningOpt;
+
+  return Options;
+}
+
+LLVM_ATTRIBUTE_UNUSED static std::string getCPUStr() {
+  // If user asked for the 'native' CPU, autodetect here. If autodection fails,
+  // this will set the CPU to an empty string which tells the target to
+  // pick a basic default.
+  if (MCPU == "native")
+    return sys::getHostCPUName();
+
+  return MCPU;
+}
+
+LLVM_ATTRIBUTE_UNUSED static std::string getFeaturesStr() {
+  SubtargetFeatures Features;
+
+  // If user asked for the 'native' CPU, we need to autodetect features.
+  // This is necessary for x86 where the CPU might not support all the
+  // features the autodetected CPU name lists in the target. For example,
+  // not all Sandybridge processors support AVX.
+  if (MCPU == "native") {
+    StringMap<bool> HostFeatures;
+    if (sys::getHostCPUFeatures(HostFeatures))
+      for (auto &F : HostFeatures)
+        Features.AddFeature(F.first(), F.second);
+  }
+
+  for (unsigned i = 0; i != MAttrs.size(); ++i)
+    Features.AddFeature(MAttrs[i]);
+
+  return Features.getString();
+}
+
+LLVM_ATTRIBUTE_UNUSED static std::vector<std::string> getFeatureList() {
+  SubtargetFeatures Features;
+
+  // If user asked for the 'native' CPU, we need to autodetect features.
+  // This is necessary for x86 where the CPU might not support all the
+  // features the autodetected CPU name lists in the target. For example,
+  // not all Sandybridge processors support AVX.
+  if (MCPU == "native") {
+    StringMap<bool> HostFeatures;
+    if (sys::getHostCPUFeatures(HostFeatures))
+      for (auto &F : HostFeatures)
+        Features.AddFeature(F.first(), F.second);
+  }
+
+  for (unsigned i = 0; i != MAttrs.size(); ++i)
+    Features.AddFeature(MAttrs[i]);
+
+  return Features.getFeatures();
+}
+
+/// \brief Set function attributes of functions in Module M based on CPU,
+/// Features, and command line flags.
+LLVM_ATTRIBUTE_UNUSED static void
+setFunctionAttributes(StringRef CPU, StringRef Features, Module &M) {
+  for (auto &F : M) {
+    auto &Ctx = F.getContext();
+    AttributeList Attrs = F.getAttributes();
+    AttrBuilder NewAttrs;
+
+    if (!CPU.empty())
+      NewAttrs.addAttribute("target-cpu", CPU);
+    if (!Features.empty())
+      NewAttrs.addAttribute("target-features", Features);
+    if (DisableFPElim.getNumOccurrences() > 0)
+      NewAttrs.addAttribute("no-frame-pointer-elim",
+                            DisableFPElim ? "true" : "false");
+    if (DisableTailCalls.getNumOccurrences() > 0)
+      NewAttrs.addAttribute("disable-tail-calls",
+                            toStringRef(DisableTailCalls));
+    if (StackRealign)
+      NewAttrs.addAttribute("stackrealign");
+
+    if (TrapFuncName.getNumOccurrences() > 0)
+      for (auto &B : F)
+        for (auto &I : B)
+          if (auto *Call = dyn_cast<CallInst>(&I))
+            if (const auto *F = Call->getCalledFunction())
+              if (F->getIntrinsicID() == Intrinsic::debugtrap ||
+                  F->getIntrinsicID() == Intrinsic::trap)
+                Call->addAttribute(
+                    llvm::AttributeList::FunctionIndex,
+                    Attribute::get(Ctx, "trap-func-name", TrapFuncName));
+
+    // Let NewAttrs override Attrs.
+    F.setAttributes(
+        Attrs.addAttributes(Ctx, AttributeList::FunctionIndex, NewAttrs));
+  }
+}
diff --git a/linux-x64/clang/include/llvm/CodeGen/CostTable.h b/linux-x64/clang/include/llvm/CodeGen/CostTable.h
new file mode 100644
index 0000000..0fc16d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CostTable.h
@@ -0,0 +1,69 @@
+//===-- CostTable.h - Instruction Cost Table handling -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Cost tables and simple lookup functions
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_COSTTABLE_H_
+#define LLVM_CODEGEN_COSTTABLE_H_
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/MachineValueType.h"
+
+namespace llvm {
+
+/// Cost Table Entry
+struct CostTblEntry {
+  int ISD;
+  MVT::SimpleValueType Type;
+  unsigned Cost;
+};
+
+/// Find in cost table, TypeTy must be comparable to CompareTy by ==
+inline const CostTblEntry *CostTableLookup(ArrayRef<CostTblEntry> Tbl,
+                                           int ISD, MVT Ty) {
+  auto I = find_if(Tbl, [=](const CostTblEntry &Entry) {
+    return ISD == Entry.ISD && Ty == Entry.Type;
+  });
+  if (I != Tbl.end())
+    return I;
+
+  // Could not find an entry.
+  return nullptr;
+}
+
+/// Type Conversion Cost Table
+struct TypeConversionCostTblEntry {
+  int ISD;
+  MVT::SimpleValueType Dst;
+  MVT::SimpleValueType Src;
+  unsigned Cost;
+};
+
+/// Find in type conversion cost table, TypeTy must be comparable to CompareTy
+/// by ==
+inline const TypeConversionCostTblEntry *
+ConvertCostTableLookup(ArrayRef<TypeConversionCostTblEntry> Tbl,
+                       int ISD, MVT Dst, MVT Src) {
+  auto I = find_if(Tbl, [=](const TypeConversionCostTblEntry &Entry) {
+    return ISD == Entry.ISD && Src == Entry.Src && Dst == Entry.Dst;
+  });
+  if (I != Tbl.end())
+    return I;
+
+  // Could not find an entry.
+  return nullptr;
+}
+
+} // namespace llvm
+
+#endif /* LLVM_CODEGEN_COSTTABLE_H_ */
diff --git a/linux-x64/clang/include/llvm/CodeGen/DAGCombine.h b/linux-x64/clang/include/llvm/CodeGen/DAGCombine.h
new file mode 100644
index 0000000..8b59190
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DAGCombine.h
@@ -0,0 +1,25 @@
+//===-- llvm/CodeGen/DAGCombine.h  ------- SelectionDAG Nodes ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef LLVM_CODEGEN_DAGCOMBINE_H
+#define LLVM_CODEGEN_DAGCOMBINE_H
+
+namespace llvm {
+
+enum CombineLevel {
+  BeforeLegalizeTypes,
+  AfterLegalizeTypes,
+  AfterLegalizeVectorOps,
+  AfterLegalizeDAG
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/DFAPacketizer.h b/linux-x64/clang/include/llvm/CodeGen/DFAPacketizer.h
new file mode 100644
index 0000000..d3aabe2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DFAPacketizer.h
@@ -0,0 +1,222 @@
+//===- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This class implements a deterministic finite automaton (DFA) based
+// packetizing mechanism for VLIW architectures. It provides APIs to
+// determine whether there exists a legal mapping of instructions to
+// functional unit assignments in a packet. The DFA is auto-generated from
+// the target's Schedule.td file.
+//
+// A DFA consists of 3 major elements: states, inputs, and transitions. For
+// the packetizing mechanism, the input is the set of instruction classes for
+// a target. The state models all possible combinations of functional unit
+// consumption for a given set of instructions in a packet. A transition
+// models the addition of an instruction to a packet. In the DFA constructed
+// by this class, if an instruction can be added to a packet, then a valid
+// transition exists from the corresponding state. Invalid transitions
+// indicate that the instruction cannot be added to the current packet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
+#define LLVM_CODEGEN_DFAPACKETIZER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class DefaultVLIWScheduler;
+class InstrItineraryData;
+class MachineFunction;
+class MachineInstr;
+class MachineLoopInfo;
+class MCInstrDesc;
+class SUnit;
+class TargetInstrInfo;
+
+// --------------------------------------------------------------------
+// Definitions shared between DFAPacketizer.cpp and DFAPacketizerEmitter.cpp
+
+// DFA_MAX_RESTERMS * DFA_MAX_RESOURCES must fit within sizeof DFAInput.
+// This is verified in DFAPacketizer.cpp:DFAPacketizer::DFAPacketizer.
+//
+// e.g. terms x resource bit combinations that fit in uint32_t:
+//      4 terms x 8  bits = 32 bits
+//      3 terms x 10 bits = 30 bits
+//      2 terms x 16 bits = 32 bits
+//
+// e.g. terms x resource bit combinations that fit in uint64_t:
+//      8 terms x 8  bits = 64 bits
+//      7 terms x 9  bits = 63 bits
+//      6 terms x 10 bits = 60 bits
+//      5 terms x 12 bits = 60 bits
+//      4 terms x 16 bits = 64 bits <--- current
+//      3 terms x 21 bits = 63 bits
+//      2 terms x 32 bits = 64 bits
+//
+#define DFA_MAX_RESTERMS        4   // The max # of AND'ed resource terms.
+#define DFA_MAX_RESOURCES       16  // The max # of resource bits in one term.
+
+using DFAInput = uint64_t;
+using DFAStateInput = int64_t;
+
+#define DFA_TBLTYPE             "int64_t" // For generating DFAStateInputTable.
+// --------------------------------------------------------------------
+
+class DFAPacketizer {
+private:
+  using UnsignPair = std::pair<unsigned, DFAInput>;
+
+  const InstrItineraryData *InstrItins;
+  int CurrentState = 0;
+  const DFAStateInput (*DFAStateInputTable)[2];
+  const unsigned *DFAStateEntryTable;
+
+  // CachedTable is a map from <FromState, Input> to ToState.
+  DenseMap<UnsignPair, unsigned> CachedTable;
+
+  // Read the DFA transition table and update CachedTable.
+  void ReadTable(unsigned state);
+
+public:
+  DFAPacketizer(const InstrItineraryData *I, const DFAStateInput (*SIT)[2],
+                const unsigned *SET);
+
+  // Reset the current state to make all resources available.
+  void clearResources() {
+    CurrentState = 0;
+  }
+
+  // Return the DFAInput for an instruction class.
+  DFAInput getInsnInput(unsigned InsnClass);
+
+  // Return the DFAInput for an instruction class input vector.
+  static DFAInput getInsnInput(const std::vector<unsigned> &InsnClass);
+
+  // Check if the resources occupied by a MCInstrDesc are available in
+  // the current state.
+  bool canReserveResources(const MCInstrDesc *MID);
+
+  // Reserve the resources occupied by a MCInstrDesc and change the current
+  // state to reflect that change.
+  void reserveResources(const MCInstrDesc *MID);
+
+  // Check if the resources occupied by a machine instruction are available
+  // in the current state.
+  bool canReserveResources(MachineInstr &MI);
+
+  // Reserve the resources occupied by a machine instruction and change the
+  // current state to reflect that change.
+  void reserveResources(MachineInstr &MI);
+
+  const InstrItineraryData *getInstrItins() const { return InstrItins; }
+};
+
+// VLIWPacketizerList implements a simple VLIW packetizer using DFA. The
+// packetizer works on machine basic blocks. For each instruction I in BB,
+// the packetizer consults the DFA to see if machine resources are available
+// to execute I. If so, the packetizer checks if I depends on any instruction
+// in the current packet. If no dependency is found, I is added to current
+// packet and the machine resource is marked as taken. If any dependency is
+// found, a target API call is made to prune the dependence.
+class VLIWPacketizerList {
+protected:
+  MachineFunction &MF;
+  const TargetInstrInfo *TII;
+  AliasAnalysis *AA;
+
+  // The VLIW Scheduler.
+  DefaultVLIWScheduler *VLIWScheduler;
+  // Vector of instructions assigned to the current packet.
+  std::vector<MachineInstr*> CurrentPacketMIs;
+  // DFA resource tracker.
+  DFAPacketizer *ResourceTracker;
+  // Map: MI -> SU.
+  std::map<MachineInstr*, SUnit*> MIToSUnit;
+
+public:
+  // The AliasAnalysis parameter can be nullptr.
+  VLIWPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI,
+                     AliasAnalysis *AA);
+
+  virtual ~VLIWPacketizerList();
+
+  // Implement this API in the backend to bundle instructions.
+  void PacketizeMIs(MachineBasicBlock *MBB,
+                    MachineBasicBlock::iterator BeginItr,
+                    MachineBasicBlock::iterator EndItr);
+
+  // Return the ResourceTracker.
+  DFAPacketizer *getResourceTracker() {return ResourceTracker;}
+
+  // addToPacket - Add MI to the current packet.
+  virtual MachineBasicBlock::iterator addToPacket(MachineInstr &MI) {
+    CurrentPacketMIs.push_back(&MI);
+    ResourceTracker->reserveResources(MI);
+    return MI;
+  }
+
+  // End the current packet and reset the state of the packetizer.
+  // Overriding this function allows the target-specific packetizer
+  // to perform custom finalization.
+  virtual void endPacket(MachineBasicBlock *MBB,
+                         MachineBasicBlock::iterator MI);
+
+  // Perform initialization before packetizing an instruction. This
+  // function is supposed to be overrided by the target dependent packetizer.
+  virtual void initPacketizerState() {}
+
+  // Check if the given instruction I should be ignored by the packetizer.
+  virtual bool ignorePseudoInstruction(const MachineInstr &I,
+                                       const MachineBasicBlock *MBB) {
+    return false;
+  }
+
+  // Return true if instruction MI can not be packetized with any other
+  // instruction, which means that MI itself is a packet.
+  virtual bool isSoloInstruction(const MachineInstr &MI) { return true; }
+
+  // Check if the packetizer should try to add the given instruction to
+  // the current packet. One reasons for which it may not be desirable
+  // to include an instruction in the current packet could be that it
+  // would cause a stall.
+  // If this function returns "false", the current packet will be ended,
+  // and the instruction will be added to the next packet.
+  virtual bool shouldAddToPacket(const MachineInstr &MI) { return true; }
+
+  // Check if it is legal to packetize SUI and SUJ together.
+  virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+    return false;
+  }
+
+  // Check if it is legal to prune dependece between SUI and SUJ.
+  virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+    return false;
+  }
+
+  // Add a DAG mutation to be done before the packetization begins.
+  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation);
+
+  bool alias(const MachineInstr &MI1, const MachineInstr &MI2,
+             bool UseTBAA = true) const;
+
+private:
+  bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2,
+             bool UseTBAA = true) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_DFAPACKETIZER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/DIE.h b/linux-x64/clang/include/llvm/CodeGen/DIE.h
new file mode 100644
index 0000000..f809fc9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DIE.h
@@ -0,0 +1,910 @@
+//===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures for DWARF info entries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
+#define LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class DIE;
+class DIEUnit;
+class MCExpr;
+class MCSection;
+class MCSymbol;
+class raw_ostream;
+
+//===--------------------------------------------------------------------===//
+/// Dwarf abbreviation data, describes one attribute of a Dwarf abbreviation.
+class DIEAbbrevData {
+  /// Dwarf attribute code.
+  dwarf::Attribute Attribute;
+
+  /// Dwarf form code.
+  dwarf::Form Form;
+
+  /// Dwarf attribute value for DW_FORM_implicit_const
+  int64_t Value = 0;
+
+public:
+  DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
+      : Attribute(A), Form(F) {}
+  DIEAbbrevData(dwarf::Attribute A, int64_t V)
+      : Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}
+
+  /// Accessors.
+  /// @{
+  dwarf::Attribute getAttribute() const { return Attribute; }
+  dwarf::Form getForm() const { return Form; }
+  int64_t getValue() const { return Value; }
+  /// @}
+
+  /// Used to gather unique data for the abbreviation folding set.
+  void Profile(FoldingSetNodeID &ID) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Dwarf abbreviation, describes the organization of a debug information
+/// object.
+class DIEAbbrev : public FoldingSetNode {
+  /// Unique number for node.
+  unsigned Number;
+
+  /// Dwarf tag code.
+  dwarf::Tag Tag;
+
+  /// Whether or not this node has children.
+  ///
+  /// This cheats a bit in all of the uses since the values in the standard
+  /// are 0 and 1 for no children and children respectively.
+  bool Children;
+
+  /// Raw data bytes for abbreviation.
+  SmallVector<DIEAbbrevData, 12> Data;
+
+public:
+  DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C) {}
+
+  /// Accessors.
+  /// @{
+  dwarf::Tag getTag() const { return Tag; }
+  unsigned getNumber() const { return Number; }
+  bool hasChildren() const { return Children; }
+  const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
+  void setChildrenFlag(bool hasChild) { Children = hasChild; }
+  void setNumber(unsigned N) { Number = N; }
+  /// @}
+
+  /// Adds another set of attribute information to the abbreviation.
+  void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
+    Data.push_back(DIEAbbrevData(Attribute, Form));
+  }
+
+  /// Adds attribute with DW_FORM_implicit_const value
+  void AddImplicitConstAttribute(dwarf::Attribute Attribute, int64_t Value) {
+    Data.push_back(DIEAbbrevData(Attribute, Value));
+  }
+
+  /// Used to gather unique data for the abbreviation folding set.
+  void Profile(FoldingSetNodeID &ID) const;
+
+  /// Print the abbreviation using the specified asm printer.
+  void Emit(const AsmPrinter *AP) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Helps unique DIEAbbrev objects and assigns abbreviation numbers.
+///
+/// This class will unique the DIE abbreviations for a llvm::DIE object and
+/// assign a unique abbreviation number to each unique DIEAbbrev object it
+/// finds. The resulting collection of DIEAbbrev objects can then be emitted
+/// into the .debug_abbrev section.
+class DIEAbbrevSet {
+  /// The bump allocator to use when creating DIEAbbrev objects in the uniqued
+  /// storage container.
+  BumpPtrAllocator &Alloc;
+  /// \brief FoldingSet that uniques the abbreviations.
+  FoldingSet<DIEAbbrev> AbbreviationsSet;
+  /// A list of all the unique abbreviations in use.
+  std::vector<DIEAbbrev *> Abbreviations;
+
+public:
+  DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
+  ~DIEAbbrevSet();
+
+  /// Generate the abbreviation declaration for a DIE and return a pointer to
+  /// the generated abbreviation.
+  ///
+  /// \param Die the debug info entry to generate the abbreviation for.
+  /// \returns A reference to the uniqued abbreviation declaration that is
+  /// owned by this class.
+  DIEAbbrev &uniqueAbbreviation(DIE &Die);
+
+  /// Print all abbreviations using the specified asm printer.
+  void Emit(const AsmPrinter *AP, MCSection *Section) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// An integer value DIE.
+///
+class DIEInteger {
+  uint64_t Integer;
+
+public:
+  explicit DIEInteger(uint64_t I) : Integer(I) {}
+
+  /// Choose the best form for integer.
+  static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
+    if (IsSigned) {
+      const int64_t SignedInt = Int;
+      if ((char)Int == SignedInt)
+        return dwarf::DW_FORM_data1;
+      if ((short)Int == SignedInt)
+        return dwarf::DW_FORM_data2;
+      if ((int)Int == SignedInt)
+        return dwarf::DW_FORM_data4;
+    } else {
+      if ((unsigned char)Int == Int)
+        return dwarf::DW_FORM_data1;
+      if ((unsigned short)Int == Int)
+        return dwarf::DW_FORM_data2;
+      if ((unsigned int)Int == Int)
+        return dwarf::DW_FORM_data4;
+    }
+    return dwarf::DW_FORM_data8;
+  }
+
+  uint64_t getValue() const { return Integer; }
+  void setValue(uint64_t Val) { Integer = Val; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// An expression DIE.
+class DIEExpr {
+  const MCExpr *Expr;
+
+public:
+  explicit DIEExpr(const MCExpr *E) : Expr(E) {}
+
+  /// Get MCExpr.
+  const MCExpr *getValue() const { return Expr; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A label DIE.
+class DIELabel {
+  const MCSymbol *Label;
+
+public:
+  explicit DIELabel(const MCSymbol *L) : Label(L) {}
+
+  /// Get MCSymbol.
+  const MCSymbol *getValue() const { return Label; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A simple label difference DIE.
+///
+class DIEDelta {
+  const MCSymbol *LabelHi;
+  const MCSymbol *LabelLo;
+
+public:
+  DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {}
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A container for string pool string values.
+///
+/// This class is used with the DW_FORM_strp and DW_FORM_GNU_str_index forms.
+class DIEString {
+  DwarfStringPoolEntryRef S;
+
+public:
+  DIEString(DwarfStringPoolEntryRef S) : S(S) {}
+
+  /// Grab the string out of the object.
+  StringRef getString() const { return S.getString(); }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A container for inline string values.
+///
+/// This class is used with the DW_FORM_string form.
+class DIEInlineString {
+  StringRef S;
+
+public:
+  template <typename Allocator>
+  explicit DIEInlineString(StringRef Str, Allocator &A) : S(Str.copy(A)) {}
+
+  ~DIEInlineString() = default;
+
+  /// Grab the string out of the object.
+  StringRef getString() const { return S; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A pointer to another debug information entry.  An instance of this class can
+/// also be used as a proxy for a debug information entry not yet defined
+/// (ie. types.)
+class DIEEntry {
+  DIE *Entry;
+
+public:
+  DIEEntry() = delete;
+  explicit DIEEntry(DIE &E) : Entry(&E) {}
+
+  DIE &getEntry() const { return *Entry; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Represents a pointer to a location list in the debug_loc
+/// section.
+class DIELocList {
+  /// Index into the .debug_loc vector.
+  size_t Index;
+
+public:
+  DIELocList(size_t I) : Index(I) {}
+
+  /// Grab the current index out.
+  size_t getValue() const { return Index; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A debug information entry value. Some of these roughly correlate
+/// to DWARF attribute classes.
+class DIEBlock;
+class DIELoc;
+class DIEValue {
+public:
+  enum Type {
+    isNone,
+#define HANDLE_DIEVALUE(T) is##T,
+#include "llvm/CodeGen/DIEValue.def"
+  };
+
+private:
+  /// Type of data stored in the value.
+  Type Ty = isNone;
+  dwarf::Attribute Attribute = (dwarf::Attribute)0;
+  dwarf::Form Form = (dwarf::Form)0;
+
+  /// Storage for the value.
+  ///
+  /// All values that aren't standard layout (or are larger than 8 bytes)
+  /// should be stored by reference instead of by value.
+  using ValTy = AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
+                                      DIEDelta *, DIEEntry, DIEBlock *,
+                                      DIELoc *, DIELocList>;
+
+  static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
+                    sizeof(ValTy) <= sizeof(void *),
+                "Expected all large types to be stored via pointer");
+
+  /// Underlying stored value.
+  ValTy Val;
+
+  template <class T> void construct(T V) {
+    static_assert(std::is_standard_layout<T>::value ||
+                      std::is_pointer<T>::value,
+                  "Expected standard layout or pointer");
+    new (reinterpret_cast<void *>(Val.buffer)) T(V);
+  }
+
+  template <class T> T *get() { return reinterpret_cast<T *>(Val.buffer); }
+  template <class T> const T *get() const {
+    return reinterpret_cast<const T *>(Val.buffer);
+  }
+  template <class T> void destruct() { get<T>()->~T(); }
+
+  /// Destroy the underlying value.
+  ///
+  /// This should get optimized down to a no-op.  We could skip it if we could
+  /// add a static assert on \a std::is_trivially_copyable(), but we currently
+  /// support versions of GCC that don't understand that.
+  void destroyVal() {
+    switch (Ty) {
+    case isNone:
+      return;
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  case is##T:                                                                  \
+    destruct<DIE##T>();                                                        \
+    return;
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  case is##T:                                                                  \
+    destruct<const DIE##T *>();                                                \
+    return;
+#include "llvm/CodeGen/DIEValue.def"
+    }
+  }
+
+  /// Copy the underlying value.
+  ///
+  /// This should get optimized down to a simple copy.  We need to actually
+  /// construct the value, rather than calling memcpy, to satisfy strict
+  /// aliasing rules.
+  void copyVal(const DIEValue &X) {
+    switch (Ty) {
+    case isNone:
+      return;
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  case is##T:                                                                  \
+    construct<DIE##T>(*X.get<DIE##T>());                                       \
+    return;
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  case is##T:                                                                  \
+    construct<const DIE##T *>(*X.get<const DIE##T *>());                       \
+    return;
+#include "llvm/CodeGen/DIEValue.def"
+    }
+  }
+
+public:
+  DIEValue() = default;
+
+  DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) {
+    copyVal(X);
+  }
+
+  DIEValue &operator=(const DIEValue &X) {
+    destroyVal();
+    Ty = X.Ty;
+    Attribute = X.Attribute;
+    Form = X.Form;
+    copyVal(X);
+    return *this;
+  }
+
+  ~DIEValue() { destroyVal(); }
+
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T &V)      \
+      : Ty(is##T), Attribute(Attribute), Form(Form) {                          \
+    construct<DIE##T>(V);                                                      \
+  }
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T *V)      \
+      : Ty(is##T), Attribute(Attribute), Form(Form) {                          \
+    assert(V && "Expected valid value");                                       \
+    construct<const DIE##T *>(V);                                              \
+  }
+#include "llvm/CodeGen/DIEValue.def"
+
+  /// Accessors.
+  /// @{
+  Type getType() const { return Ty; }
+  dwarf::Attribute getAttribute() const { return Attribute; }
+  dwarf::Form getForm() const { return Form; }
+  explicit operator bool() const { return Ty; }
+  /// @}
+
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  const DIE##T &getDIE##T() const {                                            \
+    assert(getType() == is##T && "Expected " #T);                              \
+    return *get<DIE##T>();                                                     \
+  }
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  const DIE##T &getDIE##T() const {                                            \
+    assert(getType() == is##T && "Expected " #T);                              \
+    return **get<const DIE##T *>();                                            \
+  }
+#include "llvm/CodeGen/DIEValue.def"
+
+  /// Emit value via the Dwarf writer.
+  void EmitValue(const AsmPrinter *AP) const;
+
+  /// Return the size of a value in bytes.
+  unsigned SizeOf(const AsmPrinter *AP) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+};
+
+struct IntrusiveBackListNode {
+  PointerIntPair<IntrusiveBackListNode *, 1> Next;
+
+  IntrusiveBackListNode() : Next(this, true) {}
+
+  IntrusiveBackListNode *getNext() const {
+    return Next.getInt() ? nullptr : Next.getPointer();
+  }
+};
+
+struct IntrusiveBackListBase {
+  using Node = IntrusiveBackListNode;
+
+  Node *Last = nullptr;
+
+  bool empty() const { return !Last; }
+
+  void push_back(Node &N) {
+    assert(N.Next.getPointer() == &N && "Expected unlinked node");
+    assert(N.Next.getInt() == true && "Expected unlinked node");
+
+    if (Last) {
+      N.Next = Last->Next;
+      Last->Next.setPointerAndInt(&N, false);
+    }
+    Last = &N;
+  }
+};
+
+template <class T> class IntrusiveBackList : IntrusiveBackListBase {
+public:
+  using IntrusiveBackListBase::empty;
+
+  void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
+  T &back() { return *static_cast<T *>(Last); }
+  const T &back() const { return *static_cast<T *>(Last); }
+
+  class const_iterator;
+  class iterator
+      : public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
+    friend class const_iterator;
+
+    Node *N = nullptr;
+
+  public:
+    iterator() = default;
+    explicit iterator(T *N) : N(N) {}
+
+    iterator &operator++() {
+      N = N->getNext();
+      return *this;
+    }
+
+    explicit operator bool() const { return N; }
+    T &operator*() const { return *static_cast<T *>(N); }
+
+    bool operator==(const iterator &X) const { return N == X.N; }
+    bool operator!=(const iterator &X) const { return N != X.N; }
+  };
+
+  class const_iterator
+      : public iterator_facade_base<const_iterator, std::forward_iterator_tag,
+                                    const T> {
+    const Node *N = nullptr;
+
+  public:
+    const_iterator() = default;
+    // Placate MSVC by explicitly scoping 'iterator'.
+    const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {}
+    explicit const_iterator(const T *N) : N(N) {}
+
+    const_iterator &operator++() {
+      N = N->getNext();
+      return *this;
+    }
+
+    explicit operator bool() const { return N; }
+    const T &operator*() const { return *static_cast<const T *>(N); }
+
+    bool operator==(const const_iterator &X) const { return N == X.N; }
+    bool operator!=(const const_iterator &X) const { return N != X.N; }
+  };
+
+  iterator begin() {
+    return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end();
+  }
+  const_iterator begin() const {
+    return const_cast<IntrusiveBackList *>(this)->begin();
+  }
+  iterator end() { return iterator(); }
+  const_iterator end() const { return const_iterator(); }
+
+  static iterator toIterator(T &N) { return iterator(&N); }
+  static const_iterator toIterator(const T &N) { return const_iterator(&N); }
+};
+
+/// A list of DIE values.
+///
+/// This is a singly-linked list, but instead of reversing the order of
+/// insertion, we keep a pointer to the back of the list so we can push in
+/// order.
+///
+/// There are two main reasons to choose a linked list over a customized
+/// vector-like data structure.
+///
+///  1. For teardown efficiency, we want DIEs to be BumpPtrAllocated.  Using a
+///     linked list here makes this way easier to accomplish.
+///  2. Carrying an extra pointer per \a DIEValue isn't expensive.  45% of DIEs
+///     have 2 or fewer values, and 90% have 5 or fewer.  A vector would be
+///     over-allocated by 50% on average anyway, the same cost as the
+///     linked-list node.
+class DIEValueList {
+  struct Node : IntrusiveBackListNode {
+    DIEValue V;
+
+    explicit Node(DIEValue V) : V(V) {}
+  };
+
+  using ListTy = IntrusiveBackList<Node>;
+
+  ListTy List;
+
+public:
+  class const_value_iterator;
+  class value_iterator
+      : public iterator_adaptor_base<value_iterator, ListTy::iterator,
+                                     std::forward_iterator_tag, DIEValue> {
+    friend class const_value_iterator;
+
+    using iterator_adaptor =
+        iterator_adaptor_base<value_iterator, ListTy::iterator,
+                              std::forward_iterator_tag, DIEValue>;
+
+  public:
+    value_iterator() = default;
+    explicit value_iterator(ListTy::iterator X) : iterator_adaptor(X) {}
+
+    explicit operator bool() const { return bool(wrapped()); }
+    DIEValue &operator*() const { return wrapped()->V; }
+  };
+
+  class const_value_iterator : public iterator_adaptor_base<
+                                   const_value_iterator, ListTy::const_iterator,
+                                   std::forward_iterator_tag, const DIEValue> {
+    using iterator_adaptor =
+        iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
+                              std::forward_iterator_tag, const DIEValue>;
+
+  public:
+    const_value_iterator() = default;
+    const_value_iterator(DIEValueList::value_iterator X)
+        : iterator_adaptor(X.wrapped()) {}
+    explicit const_value_iterator(ListTy::const_iterator X)
+        : iterator_adaptor(X) {}
+
+    explicit operator bool() const { return bool(wrapped()); }
+    const DIEValue &operator*() const { return wrapped()->V; }
+  };
+
+  using value_range = iterator_range<value_iterator>;
+  using const_value_range = iterator_range<const_value_iterator>;
+
+  value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) {
+    List.push_back(*new (Alloc) Node(V));
+    return value_iterator(ListTy::toIterator(List.back()));
+  }
+  template <class T>
+  value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
+                    dwarf::Form Form, T &&Value) {
+    return addValue(Alloc, DIEValue(Attribute, Form, std::forward<T>(Value)));
+  }
+
+  value_range values() {
+    return make_range(value_iterator(List.begin()), value_iterator(List.end()));
+  }
+  const_value_range values() const {
+    return make_range(const_value_iterator(List.begin()),
+                      const_value_iterator(List.end()));
+  }
+};
+
+//===--------------------------------------------------------------------===//
+/// A structured debug information entry.  Has an abbreviation which
+/// describes its organization.
+class DIE : IntrusiveBackListNode, public DIEValueList {
+  friend class IntrusiveBackList<DIE>;
+  friend class DIEUnit;
+
+  /// Dwarf unit relative offset.
+  unsigned Offset = 0;
+  /// Size of instance + children.
+  unsigned Size = 0;
+  unsigned AbbrevNumber = ~0u;
+  /// Dwarf tag code.
+  dwarf::Tag Tag = (dwarf::Tag)0;
+  /// Set to true to force a DIE to emit an abbreviation that says it has
+  /// children even when it doesn't. This is used for unit testing purposes.
+  bool ForceChildren = false;
+  /// Children DIEs.
+  IntrusiveBackList<DIE> Children;
+
+  /// The owner is either the parent DIE for children of other DIEs, or a
+  /// DIEUnit which contains this DIE as its unit DIE.
+  PointerUnion<DIE *, DIEUnit *> Owner;
+
+  explicit DIE(dwarf::Tag Tag) : Tag(Tag) {}
+
+public:
+  DIE() = delete;
+  DIE(const DIE &RHS) = delete;
+  DIE(DIE &&RHS) = delete;
+  DIE &operator=(const DIE &RHS) = delete;
+  DIE &operator=(const DIE &&RHS) = delete;
+
+  static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
+    return new (Alloc) DIE(Tag);
+  }
+
+  // Accessors.
+  unsigned getAbbrevNumber() const { return AbbrevNumber; }
+  dwarf::Tag getTag() const { return Tag; }
+  /// Get the compile/type unit relative offset of this DIE.
+  unsigned getOffset() const { return Offset; }
+  unsigned getSize() const { return Size; }
+  bool hasChildren() const { return ForceChildren || !Children.empty(); }
+  void setForceChildren(bool B) { ForceChildren = B; }
+
+  using child_iterator = IntrusiveBackList<DIE>::iterator;
+  using const_child_iterator = IntrusiveBackList<DIE>::const_iterator;
+  using child_range = iterator_range<child_iterator>;
+  using const_child_range = iterator_range<const_child_iterator>;
+
+  child_range children() {
+    return make_range(Children.begin(), Children.end());
+  }
+  const_child_range children() const {
+    return make_range(Children.begin(), Children.end());
+  }
+
+  DIE *getParent() const;
+
+  /// Generate the abbreviation for this DIE.
+  ///
+  /// Calculate the abbreviation for this, which should be uniqued and
+  /// eventually used to call \a setAbbrevNumber().
+  DIEAbbrev generateAbbrev() const;
+
+  /// Set the abbreviation number for this DIE.
+  void setAbbrevNumber(unsigned I) { AbbrevNumber = I; }
+
+  /// Get the absolute offset within the .debug_info or .debug_types section
+  /// for this DIE.
+  unsigned getDebugSectionOffset() const;
+
+  /// Compute the offset of this DIE and all its children.
+  ///
+  /// This function gets called just before we are going to generate the debug
+  /// information and gives each DIE a chance to figure out its CU relative DIE
+  /// offset, unique its abbreviation and fill in the abbreviation code, and
+  /// return the unit offset that points to where the next DIE will be emitted
+  /// within the debug unit section. After this function has been called for all
+  /// DIE objects, the DWARF can be generated since all DIEs will be able to
+  /// properly refer to other DIE objects since all DIEs have calculated their
+  /// offsets.
+  ///
+  /// \param AP AsmPrinter to use when calculating sizes.
+  /// \param AbbrevSet the abbreviation used to unique DIE abbreviations.
+  /// \param CUOffset the compile/type unit relative offset in bytes.
+  /// \returns the offset for the DIE that follows this DIE within the
+  /// current compile/type unit.
+  unsigned computeOffsetsAndAbbrevs(const AsmPrinter *AP,
+                                    DIEAbbrevSet &AbbrevSet, unsigned CUOffset);
+
+  /// Climb up the parent chain to get the compile unit or type unit DIE that
+  /// this DIE belongs to.
+  ///
+  /// \returns the compile or type unit DIE that owns this DIE, or NULL if
+  /// this DIE hasn't been added to a unit DIE.
+  const DIE *getUnitDie() const;
+
+  /// Climb up the parent chain to get the compile unit or type unit that this
+  /// DIE belongs to.
+  ///
+  /// \returns the DIEUnit that represents the compile or type unit that owns
+  /// this DIE, or NULL if this DIE hasn't been added to a unit DIE.
+  const DIEUnit *getUnit() const;
+
+  void setOffset(unsigned O) { Offset = O; }
+  void setSize(unsigned S) { Size = S; }
+
+  /// Add a child to the DIE.
+  DIE &addChild(DIE *Child) {
+    assert(!Child->getParent() && "Child should be orphaned");
+    Child->Owner = this;
+    Children.push_back(*Child);
+    return Children.back();
+  }
+
+  /// Find a value in the DIE with the attribute given.
+  ///
+  /// Returns a default-constructed DIEValue (where \a DIEValue::getType()
+  /// gives \a DIEValue::isNone) if no such attribute exists.
+  DIEValue findAttribute(dwarf::Attribute Attribute) const;
+
+  void print(raw_ostream &O, unsigned IndentCount = 0) const;
+  void dump() const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Represents a compile or type unit.
+class DIEUnit {
+  /// The compile unit or type unit DIE. This variable must be an instance of
+  /// DIE so that we can calculate the DIEUnit from any DIE by traversing the
+  /// parent backchain and getting the Unit DIE, and then casting itself to a
+  /// DIEUnit. This allows us to be able to find the DIEUnit for any DIE without
+  /// having to store a pointer to the DIEUnit in each DIE instance.
+  DIE Die;
+  /// The section this unit will be emitted in. This may or may not be set to
+  /// a valid section depending on the client that is emitting DWARF.
+  MCSection *Section;
+  uint64_t Offset; /// .debug_info or .debug_types absolute section offset.
+  uint32_t Length; /// The length in bytes of all of the DIEs in this unit.
+  const uint16_t Version; /// The Dwarf version number for this unit.
+  const uint8_t AddrSize; /// The size in bytes of an address for this unit.
+protected:
+  ~DIEUnit() = default;
+
+public:
+  DIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag);
+  DIEUnit(const DIEUnit &RHS) = delete;
+  DIEUnit(DIEUnit &&RHS) = delete;
+  void operator=(const DIEUnit &RHS) = delete;
+  void operator=(const DIEUnit &&RHS) = delete;
+  /// Set the section that this DIEUnit will be emitted into.
+  ///
+  /// This function is used by some clients to set the section. Not all clients
+  /// that emit DWARF use this section variable.
+  void setSection(MCSection *Section) {
+    assert(!this->Section);
+    this->Section = Section;
+  }
+
+  virtual const MCSymbol *getCrossSectionRelativeBaseAddress() const {
+    return nullptr;
+  }
+
+  /// Return the section that this DIEUnit will be emitted into.
+  ///
+  /// \returns Section pointer which can be NULL.
+  MCSection *getSection() const { return Section; }
+  void setDebugSectionOffset(unsigned O) { Offset = O; }
+  unsigned getDebugSectionOffset() const { return Offset; }
+  void setLength(uint64_t L) { Length = L; }
+  uint64_t getLength() const { return Length; }
+  uint16_t getDwarfVersion() const { return Version; }
+  uint16_t getAddressSize() const { return AddrSize; }
+  DIE &getUnitDie() { return Die; }
+  const DIE &getUnitDie() const { return Die; }
+};
+
+struct BasicDIEUnit final : DIEUnit {
+  BasicDIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag)
+      : DIEUnit(Version, AddrSize, UnitTag) {}
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELoc - Represents an expression location.
+//
+class DIELoc : public DIEValueList {
+  mutable unsigned Size = 0; // Size in bytes excluding size header.
+
+public:
+  DIELoc() = default;
+
+  /// ComputeSize - Calculate the size of the location expression.
+  ///
+  unsigned ComputeSize(const AsmPrinter *AP) const;
+
+  /// BestForm - Choose the best form for data.
+  ///
+  dwarf::Form BestForm(unsigned DwarfVersion) const {
+    if (DwarfVersion > 3)
+      return dwarf::DW_FORM_exprloc;
+    // Pre-DWARF4 location expressions were blocks and not exprloc.
+    if ((unsigned char)Size == Size)
+      return dwarf::DW_FORM_block1;
+    if ((unsigned short)Size == Size)
+      return dwarf::DW_FORM_block2;
+    if ((unsigned int)Size == Size)
+      return dwarf::DW_FORM_block4;
+    return dwarf::DW_FORM_block;
+  }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEBlock - Represents a block of values.
+//
+class DIEBlock : public DIEValueList {
+  mutable unsigned Size = 0; // Size in bytes excluding size header.
+
+public:
+  DIEBlock() = default;
+
+  /// ComputeSize - Calculate the size of the location expression.
+  ///
+  unsigned ComputeSize(const AsmPrinter *AP) const;
+
+  /// BestForm - Choose the best form for data.
+  ///
+  dwarf::Form BestForm() const {
+    if ((unsigned char)Size == Size)
+      return dwarf::DW_FORM_block1;
+    if ((unsigned short)Size == Size)
+      return dwarf::DW_FORM_block2;
+    if ((unsigned int)Size == Size)
+      return dwarf::DW_FORM_block4;
+    return dwarf::DW_FORM_block;
+  }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/DIEValue.def b/linux-x64/clang/include/llvm/CodeGen/DIEValue.def
new file mode 100644
index 0000000..a3fce9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DIEValue.def
@@ -0,0 +1,47 @@
+//===- llvm/CodeGen/DIEValue.def - DIEValue types ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through all types of DIEValue.
+//
+//===----------------------------------------------------------------------===//
+
+#if !(defined HANDLE_DIEVALUE || defined HANDLE_DIEVALUE_SMALL ||              \
+      defined HANDLE_DIEVALUE_LARGE)
+#error "Missing macro definition of HANDLE_DIEVALUE"
+#endif
+
+// Handler for all values.
+#ifndef HANDLE_DIEVALUE
+#define HANDLE_DIEVALUE(T)
+#endif
+
+// Handler for small values.
+#ifndef HANDLE_DIEVALUE_SMALL
+#define HANDLE_DIEVALUE_SMALL(T) HANDLE_DIEVALUE(T)
+#endif
+
+// Handler for large values.
+#ifndef HANDLE_DIEVALUE_LARGE
+#define HANDLE_DIEVALUE_LARGE(T) HANDLE_DIEVALUE(T)
+#endif
+
+HANDLE_DIEVALUE_SMALL(Integer)
+HANDLE_DIEVALUE_SMALL(String)
+HANDLE_DIEVALUE_SMALL(Expr)
+HANDLE_DIEVALUE_SMALL(Label)
+HANDLE_DIEVALUE_LARGE(Delta)
+HANDLE_DIEVALUE_SMALL(Entry)
+HANDLE_DIEVALUE_LARGE(Block)
+HANDLE_DIEVALUE_LARGE(Loc)
+HANDLE_DIEVALUE_SMALL(LocList)
+HANDLE_DIEVALUE_LARGE(InlineString)
+
+#undef HANDLE_DIEVALUE
+#undef HANDLE_DIEVALUE_SMALL
+#undef HANDLE_DIEVALUE_LARGE
diff --git a/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h b/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
new file mode 100644
index 0000000..e6c0483
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
@@ -0,0 +1,53 @@
+//===- llvm/CodeGen/DwarfStringPoolEntry.h - String pool entry --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+class MCSymbol;
+
+/// Data for a string pool entry.
+struct DwarfStringPoolEntry {
+  MCSymbol *Symbol;
+  unsigned Offset;
+  unsigned Index;
+};
+
+/// String pool entry reference.
+struct DwarfStringPoolEntryRef {
+  const StringMapEntry<DwarfStringPoolEntry> *I = nullptr;
+
+public:
+  DwarfStringPoolEntryRef() = default;
+  explicit DwarfStringPoolEntryRef(
+      const StringMapEntry<DwarfStringPoolEntry> &I)
+      : I(&I) {}
+
+  explicit operator bool() const { return I; }
+  MCSymbol *getSymbol() const {
+    assert(I->second.Symbol && "No symbol available!");
+    return I->second.Symbol;
+  }
+  unsigned getOffset() const { return I->second.Offset; }
+  unsigned getIndex() const { return I->second.Index; }
+  StringRef getString() const { return I->first(); }
+  /// Return the entire string pool entry for convenience.
+  DwarfStringPoolEntry getEntry() const { return I->getValue(); }
+
+  bool operator==(const DwarfStringPoolEntryRef &X) const { return I == X.I; }
+  bool operator!=(const DwarfStringPoolEntryRef &X) const { return I != X.I; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/EdgeBundles.h b/linux-x64/clang/include/llvm/CodeGen/EdgeBundles.h
new file mode 100644
index 0000000..c31fad2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/EdgeBundles.h
@@ -0,0 +1,64 @@
+//===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
+// edges leaving a machine basic block are in the same bundle, and all edges
+// leaving a basic block are in the same bundle.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
+#define LLVM_CODEGEN_EDGEBUNDLES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class EdgeBundles : public MachineFunctionPass {
+  const MachineFunction *MF;
+
+  /// EC - Each edge bundle is an equivalence class. The keys are:
+  ///   2*BB->getNumber()   -> Ingoing bundle.
+  ///   2*BB->getNumber()+1 -> Outgoing bundle.
+  IntEqClasses EC;
+
+  /// Blocks - Map each bundle to a list of basic block numbers.
+  SmallVector<SmallVector<unsigned, 8>, 4> Blocks;
+
+public:
+  static char ID;
+  EdgeBundles() : MachineFunctionPass(ID) {}
+
+  /// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
+  /// bundle number for basic block #N
+  unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }
+
+  /// getNumBundles - Return the total number of bundles in the CFG.
+  unsigned getNumBundles() const { return EC.getNumClasses(); }
+
+  /// getBlocks - Return an array of blocks that are connected to Bundle.
+  ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
+
+  /// getMachineFunction - Return the last machine function computed.
+  const MachineFunction *getMachineFunction() const { return MF; }
+
+  /// view - Visualize the annotated bipartite CFG with Graphviz.
+  void view() const;
+
+private:
+  bool runOnMachineFunction(MachineFunction&) override;
+  void getAnalysisUsage(AnalysisUsage&) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ExecutionDomainFix.h b/linux-x64/clang/include/llvm/CodeGen/ExecutionDomainFix.h
new file mode 100644
index 0000000..338c214
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ExecutionDomainFix.h
@@ -0,0 +1,213 @@
+//==-- llvm/CodeGen/ExecutionDomainFix.h - Execution Domain Fix -*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Execution Domain Fix pass.
+///
+/// Some X86 SSE instructions like mov, and, or, xor are available in different
+/// variants for different operand types. These variant instructions are
+/// equivalent, but on Nehalem and newer cpus there is extra latency
+/// transferring data between integer and floating point domains.  ARM cores
+/// have similar issues when they are configured with both VFP and NEON
+/// pipelines.
+///
+/// This pass changes the variant instructions to minimize domain crossings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
+#define LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LoopTraversal.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/ReachingDefAnalysis.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineInstr;
+class TargetInstrInfo;
+
+/// A DomainValue is a bit like LiveIntervals' ValNo, but it also keeps track
+/// of execution domains.
+///
+/// An open DomainValue represents a set of instructions that can still switch
+/// execution domain. Multiple registers may refer to the same open
+/// DomainValue - they will eventually be collapsed to the same execution
+/// domain.
+///
+/// A collapsed DomainValue represents a single register that has been forced
+/// into one of more execution domains. There is a separate collapsed
+/// DomainValue for each register, but it may contain multiple execution
+/// domains. A register value is initially created in a single execution
+/// domain, but if we were forced to pay the penalty of a domain crossing, we
+/// keep track of the fact that the register is now available in multiple
+/// domains.
+struct DomainValue {
+  /// Basic reference counting.
+  unsigned Refs = 0;
+
+  /// Bitmask of available domains. For an open DomainValue, it is the still
+  /// possible domains for collapsing. For a collapsed DomainValue it is the
+  /// domains where the register is available for free.
+  unsigned AvailableDomains;
+
+  /// Pointer to the next DomainValue in a chain.  When two DomainValues are
+  /// merged, Victim.Next is set to point to Victor, so old DomainValue
+  /// references can be updated by following the chain.
+  DomainValue *Next;
+
+  /// Twiddleable instructions using or defining these registers.
+  SmallVector<MachineInstr *, 8> Instrs;
+
+  DomainValue() { clear(); }
+
+  /// A collapsed DomainValue has no instructions to twiddle - it simply keeps
+  /// track of the domains where the registers are already available.
+  bool isCollapsed() const { return Instrs.empty(); }
+
+  /// Is domain available?
+  bool hasDomain(unsigned domain) const {
+    assert(domain <
+               static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
+           "undefined behavior");
+    return AvailableDomains & (1u << domain);
+  }
+
+  /// Mark domain as available.
+  void addDomain(unsigned domain) { AvailableDomains |= 1u << domain; }
+
+  // Restrict to a single domain available.
+  void setSingleDomain(unsigned domain) { AvailableDomains = 1u << domain; }
+
+  /// Return bitmask of domains that are available and in mask.
+  unsigned getCommonDomains(unsigned mask) const {
+    return AvailableDomains & mask;
+  }
+
+  /// First domain available.
+  unsigned getFirstDomain() const {
+    return countTrailingZeros(AvailableDomains);
+  }
+
+  /// Clear this DomainValue and point to next which has all its data.
+  void clear() {
+    AvailableDomains = 0;
+    Next = nullptr;
+    Instrs.clear();
+  }
+};
+
+class ExecutionDomainFix : public MachineFunctionPass {
+  SpecificBumpPtrAllocator<DomainValue> Allocator;
+  SmallVector<DomainValue *, 16> Avail;
+
+  const TargetRegisterClass *const RC;
+  MachineFunction *MF;
+  const TargetInstrInfo *TII;
+  const TargetRegisterInfo *TRI;
+  std::vector<SmallVector<int, 1>> AliasMap;
+  const unsigned NumRegs;
+  /// Value currently in each register, or NULL when no value is being tracked.
+  /// This counts as a DomainValue reference.
+  using LiveRegsDVInfo = std::vector<DomainValue *>;
+  LiveRegsDVInfo LiveRegs;
+  /// Keeps domain information for all registers. Note that this
+  /// is different from the usual definition notion of liveness. The CPU
+  /// doesn't care whether or not we consider a register killed.
+  using OutRegsInfoMap = SmallVector<LiveRegsDVInfo, 4>;
+  OutRegsInfoMap MBBOutRegsInfos;
+
+  ReachingDefAnalysis *RDA;
+
+public:
+  ExecutionDomainFix(char &PassID, const TargetRegisterClass &RC)
+      : MachineFunctionPass(PassID), RC(&RC), NumRegs(RC.getNumRegs()) {}
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<ReachingDefAnalysis>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoVRegs);
+  }
+
+private:
+  /// Translate TRI register number to a list of indices into our smaller tables
+  /// of interesting registers.
+  iterator_range<SmallVectorImpl<int>::const_iterator>
+  regIndices(unsigned Reg) const;
+
+  /// DomainValue allocation.
+  DomainValue *alloc(int domain = -1);
+
+  /// Add reference to DV.
+  DomainValue *retain(DomainValue *DV) {
+    if (DV)
+      ++DV->Refs;
+    return DV;
+  }
+
+  /// Release a reference to DV.  When the last reference is released,
+  /// collapse if needed.
+  void release(DomainValue *);
+
+  /// Follow the chain of dead DomainValues until a live DomainValue is reached.
+  /// Update the referenced pointer when necessary.
+  DomainValue *resolve(DomainValue *&);
+
+  /// Set LiveRegs[rx] = dv, updating reference counts.
+  void setLiveReg(int rx, DomainValue *DV);
+
+  /// Kill register rx, recycle or collapse any DomainValue.
+  void kill(int rx);
+
+  /// Force register rx into domain.
+  void force(int rx, unsigned domain);
+
+  /// Collapse open DomainValue into given domain. If there are multiple
+  /// registers using dv, they each get a unique collapsed DomainValue.
+  void collapse(DomainValue *dv, unsigned domain);
+
+  /// All instructions and registers in B are moved to A, and B is released.
+  bool merge(DomainValue *A, DomainValue *B);
+
+  /// Set up LiveRegs by merging predecessor live-out values.
+  void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Update live-out values.
+  void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Process he given basic block.
+  void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Visit given insturcion.
+  bool visitInstr(MachineInstr *);
+
+  /// Update def-ages for registers defined by MI.
+  /// If Kill is set, also kill off DomainValues clobbered by the defs.
+  void processDefs(MachineInstr *, bool Kill);
+
+  /// A soft instruction can be changed to work in other domains given by mask.
+  void visitSoftInstr(MachineInstr *, unsigned mask);
+
+  /// A hard instruction only works in one domain. All input registers will be
+  /// forced into that domain.
+  void visitHardInstr(MachineInstr *, unsigned domain);
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ExpandReductions.h b/linux-x64/clang/include/llvm/CodeGen/ExpandReductions.h
new file mode 100644
index 0000000..c6aaaad
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ExpandReductions.h
@@ -0,0 +1,24 @@
+//===----- ExpandReductions.h - Expand experimental reduction intrinsics --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXPANDREDUCTIONS_H
+#define LLVM_CODEGEN_EXPANDREDUCTIONS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class ExpandReductionsPass
+    : public PassInfoMixin<ExpandReductionsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_EXPANDREDUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/FastISel.h b/linux-x64/clang/include/llvm/CodeGen/FastISel.h
new file mode 100644
index 0000000..772bd6c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/FastISel.h
@@ -0,0 +1,593 @@
+//===- FastISel.h - Definition of the FastISel class ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the FastISel class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FASTISEL_H
+#define LLVM_CODEGEN_FASTISEL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/MachineValueType.h"
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class Constant;
+class ConstantFP;
+class DataLayout;
+class FunctionLoweringInfo;
+class LoadInst;
+class MachineConstantPool;
+class MachineFrameInfo;
+class MachineFunction;
+class MachineInstr;
+class MachineMemOperand;
+class MachineOperand;
+class MachineRegisterInfo;
+class MCContext;
+class MCInstrDesc;
+class MCSymbol;
+class TargetInstrInfo;
+class TargetLibraryInfo;
+class TargetMachine;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class Type;
+class User;
+class Value;
+
+/// \brief This is a fast-path instruction selection class that generates poor
+/// code and doesn't support illegal types or non-trivial lowering, but runs
+/// quickly.
+class FastISel {
+public:
+  using ArgListEntry = TargetLoweringBase::ArgListEntry;
+  using ArgListTy = TargetLoweringBase::ArgListTy;
+  struct CallLoweringInfo {
+    Type *RetTy = nullptr;
+    bool RetSExt : 1;
+    bool RetZExt : 1;
+    bool IsVarArg : 1;
+    bool IsInReg : 1;
+    bool DoesNotReturn : 1;
+    bool IsReturnValueUsed : 1;
+    bool IsPatchPoint : 1;
+
+    // \brief IsTailCall Should be modified by implementations of FastLowerCall
+    // that perform tail call conversions.
+    bool IsTailCall = false;
+
+    unsigned NumFixedArgs = -1;
+    CallingConv::ID CallConv = CallingConv::C;
+    const Value *Callee = nullptr;
+    MCSymbol *Symbol = nullptr;
+    ArgListTy Args;
+    ImmutableCallSite *CS = nullptr;
+    MachineInstr *Call = nullptr;
+    unsigned ResultReg = 0;
+    unsigned NumResultRegs = 0;
+
+    SmallVector<Value *, 16> OutVals;
+    SmallVector<ISD::ArgFlagsTy, 16> OutFlags;
+    SmallVector<unsigned, 16> OutRegs;
+    SmallVector<ISD::InputArg, 4> Ins;
+    SmallVector<unsigned, 4> InRegs;
+
+    CallLoweringInfo()
+        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
+          DoesNotReturn(false), IsReturnValueUsed(true), IsPatchPoint(false) {}
+
+    CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
+                                const Value *Target, ArgListTy &&ArgsList,
+                                ImmutableCallSite &Call) {
+      RetTy = ResultTy;
+      Callee = Target;
+
+      IsInReg = Call.hasRetAttr(Attribute::InReg);
+      DoesNotReturn = Call.doesNotReturn();
+      IsVarArg = FuncTy->isVarArg();
+      IsReturnValueUsed = !Call.getInstruction()->use_empty();
+      RetSExt = Call.hasRetAttr(Attribute::SExt);
+      RetZExt = Call.hasRetAttr(Attribute::ZExt);
+
+      CallConv = Call.getCallingConv();
+      Args = std::move(ArgsList);
+      NumFixedArgs = FuncTy->getNumParams();
+
+      CS = &Call;
+
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
+                                MCSymbol *Target, ArgListTy &&ArgsList,
+                                ImmutableCallSite &Call,
+                                unsigned FixedArgs = ~0U) {
+      RetTy = ResultTy;
+      Callee = Call.getCalledValue();
+      Symbol = Target;
+
+      IsInReg = Call.hasRetAttr(Attribute::InReg);
+      DoesNotReturn = Call.doesNotReturn();
+      IsVarArg = FuncTy->isVarArg();
+      IsReturnValueUsed = !Call.getInstruction()->use_empty();
+      RetSExt = Call.hasRetAttr(Attribute::SExt);
+      RetZExt = Call.hasRetAttr(Attribute::ZExt);
+
+      CallConv = Call.getCallingConv();
+      Args = std::move(ArgsList);
+      NumFixedArgs = (FixedArgs == ~0U) ? FuncTy->getNumParams() : FixedArgs;
+
+      CS = &Call;
+
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
+                                const Value *Target, ArgListTy &&ArgsList,
+                                unsigned FixedArgs = ~0U) {
+      RetTy = ResultTy;
+      Callee = Target;
+      CallConv = CC;
+      Args = std::move(ArgsList);
+      NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
+                                CallingConv::ID CC, Type *ResultTy,
+                                StringRef Target, ArgListTy &&ArgsList,
+                                unsigned FixedArgs = ~0U);
+
+    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
+                                MCSymbol *Target, ArgListTy &&ArgsList,
+                                unsigned FixedArgs = ~0U) {
+      RetTy = ResultTy;
+      Symbol = Target;
+      CallConv = CC;
+      Args = std::move(ArgsList);
+      NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
+      return *this;
+    }
+
+    CallLoweringInfo &setTailCall(bool Value = true) {
+      IsTailCall = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
+      IsPatchPoint = Value;
+      return *this;
+    }
+
+    ArgListTy &getArgs() { return Args; }
+
+    void clearOuts() {
+      OutVals.clear();
+      OutFlags.clear();
+      OutRegs.clear();
+    }
+
+    void clearIns() {
+      Ins.clear();
+      InRegs.clear();
+    }
+  };
+
+protected:
+  DenseMap<const Value *, unsigned> LocalValueMap;
+  FunctionLoweringInfo &FuncInfo;
+  MachineFunction *MF;
+  MachineRegisterInfo &MRI;
+  MachineFrameInfo &MFI;
+  MachineConstantPool &MCP;
+  DebugLoc DbgLoc;
+  const TargetMachine &TM;
+  const DataLayout &DL;
+  const TargetInstrInfo &TII;
+  const TargetLowering &TLI;
+  const TargetRegisterInfo &TRI;
+  const TargetLibraryInfo *LibInfo;
+  bool SkipTargetIndependentISel;
+
+  /// \brief The position of the last instruction for materializing constants
+  /// for use in the current block. It resets to EmitStartPt when it makes sense
+  /// (for example, it's usually profitable to avoid function calls between the
+  /// definition and the use)
+  MachineInstr *LastLocalValue;
+
+  /// \brief The top most instruction in the current block that is allowed for
+  /// emitting local variables. LastLocalValue resets to EmitStartPt when it
+  /// makes sense (for example, on function calls)
+  MachineInstr *EmitStartPt;
+
+public:
+  virtual ~FastISel();
+
+  /// \brief Return the position of the last instruction emitted for
+  /// materializing constants for use in the current block.
+  MachineInstr *getLastLocalValue() { return LastLocalValue; }
+
+  /// \brief Update the position of the last instruction emitted for
+  /// materializing constants for use in the current block.
+  void setLastLocalValue(MachineInstr *I) {
+    EmitStartPt = I;
+    LastLocalValue = I;
+  }
+
+  /// \brief Set the current block to which generated machine instructions will
+  /// be appended.
+  void startNewBlock();
+
+  /// Flush the local value map and sink local values if possible.
+  void finishBasicBlock();
+
+  /// \brief Return current debug location information.
+  DebugLoc getCurDebugLoc() const { return DbgLoc; }
+
+  /// \brief Do "fast" instruction selection for function arguments and append
+  /// the machine instructions to the current block. Returns true when
+  /// successful.
+  bool lowerArguments();
+
+  /// \brief Do "fast" instruction selection for the given LLVM IR instruction
+  /// and append the generated machine instructions to the current block.
+  /// Returns true if selection was successful.
+  bool selectInstruction(const Instruction *I);
+
+  /// \brief Do "fast" instruction selection for the given LLVM IR operator
+  /// (Instruction or ConstantExpr), and append generated machine instructions
+  /// to the current block. Return true if selection was successful.
+  bool selectOperator(const User *I, unsigned Opcode);
+
+  /// \brief Create a virtual register and arrange for it to be assigned the
+  /// value for the given LLVM value.
+  unsigned getRegForValue(const Value *V);
+
+  /// \brief Look up the value to see if its value is already cached in a
+  /// register. It may be defined by instructions across blocks or defined
+  /// locally.
+  unsigned lookUpRegForValue(const Value *V);
+
+  /// \brief This is a wrapper around getRegForValue that also takes care of
+  /// truncating or sign-extending the given getelementptr index value.
+  std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+
+  /// \brief We're checking to see if we can fold \p LI into \p FoldInst. Note
+  /// that we could have a sequence where multiple LLVM IR instructions are
+  /// folded into the same machineinstr.  For example we could have:
+  ///
+  ///   A: x = load i32 *P
+  ///   B: y = icmp A, 42
+  ///   C: br y, ...
+  ///
+  /// In this scenario, \p LI is "A", and \p FoldInst is "C".  We know about "B"
+  /// (and any other folded instructions) because it is between A and C.
+  ///
+  /// If we succeed folding, return true.
+  bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
+
+  /// \brief The specified machine instr operand is a vreg, and that vreg is
+  /// being provided by the specified load instruction.  If possible, try to
+  /// fold the load as an operand to the instruction, returning true if
+  /// possible.
+  ///
+  /// This method should be implemented by targets.
+  virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
+                                   const LoadInst * /*LI*/) {
+    return false;
+  }
+
+  /// \brief Reset InsertPt to prepare for inserting instructions into the
+  /// current block.
+  void recomputeInsertPt();
+
+  /// \brief Remove all dead instructions between the I and E.
+  void removeDeadCode(MachineBasicBlock::iterator I,
+                      MachineBasicBlock::iterator E);
+
+  struct SavePoint {
+    MachineBasicBlock::iterator InsertPt;
+    DebugLoc DL;
+  };
+
+  /// \brief Prepare InsertPt to begin inserting instructions into the local
+  /// value area and return the old insert position.
+  SavePoint enterLocalValueArea();
+
+  /// \brief Reset InsertPt to the given old insert position.
+  void leaveLocalValueArea(SavePoint Old);
+
+protected:
+  explicit FastISel(FunctionLoweringInfo &FuncInfo,
+                    const TargetLibraryInfo *LibInfo,
+                    bool SkipTargetIndependentISel = false);
+
+  /// \brief This method is called by target-independent code when the normal
+  /// FastISel process fails to select an instruction. This gives targets a
+  /// chance to emit code for anything that doesn't fit into FastISel's
+  /// framework. It returns true if it was successful.
+  virtual bool fastSelectInstruction(const Instruction *I) = 0;
+
+  /// \brief This method is called by target-independent code to do target-
+  /// specific argument lowering. It returns true if it was successful.
+  virtual bool fastLowerArguments();
+
+  /// \brief This method is called by target-independent code to do target-
+  /// specific call lowering. It returns true if it was successful.
+  virtual bool fastLowerCall(CallLoweringInfo &CLI);
+
+  /// \brief This method is called by target-independent code to do target-
+  /// specific intrinsic lowering. It returns true if it was successful.
+  virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type and opcode be emitted.
+  virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and register operand be emitted.
+  virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+                              bool Op0IsKill);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and register operands be emitted.
+  virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+                               bool Op0IsKill, unsigned Op1, bool Op1IsKill);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and register and immediate
+  /// operands be emitted.
+  virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+                               bool Op0IsKill, uint64_t Imm);
+
+  /// \brief This method is a wrapper of fastEmit_ri.
+  ///
+  /// It first tries to emit an instruction with an immediate operand using
+  /// fastEmit_ri.  If that fails, it materializes the immediate into a register
+  /// and try fastEmit_rr instead.
+  unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
+                        uint64_t Imm, MVT ImmType);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and immediate operand be emitted.
+  virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and floating-point immediate
+  /// operand be emitted.
+  virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
+                              const ConstantFP *FPImm);
+
+  /// \brief Emit a MachineInstr with no operands and a result register in the
+  /// given register class.
+  unsigned fastEmitInst_(unsigned MachineInstOpcode,
+                         const TargetRegisterClass *RC);
+
+  /// \brief Emit a MachineInstr with one register operand and a result register
+  /// in the given register class.
+  unsigned fastEmitInst_r(unsigned MachineInstOpcode,
+                          const TargetRegisterClass *RC, unsigned Op0,
+                          bool Op0IsKill);
+
+  /// \brief Emit a MachineInstr with two register operands and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
+                           const TargetRegisterClass *RC, unsigned Op0,
+                           bool Op0IsKill, unsigned Op1, bool Op1IsKill);
+
+  /// \brief Emit a MachineInstr with three register operands and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
+                            const TargetRegisterClass *RC, unsigned Op0,
+                            bool Op0IsKill, unsigned Op1, bool Op1IsKill,
+                            unsigned Op2, bool Op2IsKill);
+
+  /// \brief Emit a MachineInstr with a register operand, an immediate, and a
+  /// result register in the given register class.
+  unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
+                           const TargetRegisterClass *RC, unsigned Op0,
+                           bool Op0IsKill, uint64_t Imm);
+
+  /// \brief Emit a MachineInstr with one register operand and two immediate
+  /// operands.
+  unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
+                            const TargetRegisterClass *RC, unsigned Op0,
+                            bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
+
+  /// \brief Emit a MachineInstr with a floating point immediate, and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_f(unsigned MachineInstOpcode,
+                          const TargetRegisterClass *RC,
+                          const ConstantFP *FPImm);
+
+  /// \brief Emit a MachineInstr with two register operands, an immediate, and a
+  /// result register in the given register class.
+  unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
+                            const TargetRegisterClass *RC, unsigned Op0,
+                            bool Op0IsKill, unsigned Op1, bool Op1IsKill,
+                            uint64_t Imm);
+
+  /// \brief Emit a MachineInstr with a single immediate operand, and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
+                          const TargetRegisterClass *RC, uint64_t Imm);
+
+  /// \brief Emit a MachineInstr for an extract_subreg from a specified index of
+  /// a superregister to a specified type.
+  unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
+                                      uint32_t Idx);
+
+  /// \brief Emit MachineInstrs to compute the value of Op with all but the
+  /// least significant bit set to zero.
+  unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
+
+  /// \brief Emit an unconditional branch to the given block, unless it is the
+  /// immediate (fall-through) successor, and update the CFG.
+  void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL);
+
+  /// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
+  /// and adds TrueMBB and FalseMBB to the successor list.
+  void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
+                        MachineBasicBlock *FalseMBB);
+
+  /// \brief Update the value map to include the new mapping for this
+  /// instruction, or insert an extra copy to get the result in a previous
+  /// determined register.
+  ///
+  /// NOTE: This is only necessary because we might select a block that uses a
+  /// value before we select the block that defines the value. It might be
+  /// possible to fix this by selecting blocks in reverse postorder.
+  void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
+
+  unsigned createResultReg(const TargetRegisterClass *RC);
+
+  /// \brief Try to constrain Op so that it is usable by argument OpNum of the
+  /// provided MCInstrDesc. If this fails, create a new virtual register in the
+  /// correct class and COPY the value there.
+  unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
+                                    unsigned OpNum);
+
+  /// \brief Emit a constant in a register using target-specific logic, such as
+  /// constant pool loads.
+  virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
+
+  /// \brief Emit an alloca address in a register using target-specific logic.
+  virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
+
+  /// \brief Emit the floating-point constant +0.0 in a register using target-
+  /// specific logic.
+  virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
+    return 0;
+  }
+
+  /// \brief Check if \c Add is an add that can be safely folded into \c GEP.
+  ///
+  /// \c Add can be folded into \c GEP if:
+  /// - \c Add is an add,
+  /// - \c Add's size matches \c GEP's,
+  /// - \c Add is in the same basic block as \c GEP, and
+  /// - \c Add has a constant operand.
+  bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
+
+  /// \brief Test whether the given value has exactly one use.
+  bool hasTrivialKill(const Value *V);
+
+  /// \brief Create a machine mem operand from the given instruction.
+  MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
+
+  CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
+
+  bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
+  bool lowerCallTo(const CallInst *CI, const char *SymbolName,
+                   unsigned NumArgs);
+  bool lowerCallTo(CallLoweringInfo &CLI);
+
+  bool isCommutativeIntrinsic(IntrinsicInst const *II) {
+    switch (II->getIntrinsicID()) {
+    case Intrinsic::sadd_with_overflow:
+    case Intrinsic::uadd_with_overflow:
+    case Intrinsic::smul_with_overflow:
+    case Intrinsic::umul_with_overflow:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  bool lowerCall(const CallInst *I);
+  /// \brief Select and emit code for a binary operator instruction, which has
+  /// an opcode which directly corresponds to the given ISD opcode.
+  bool selectBinaryOp(const User *I, unsigned ISDOpcode);
+  bool selectFNeg(const User *I);
+  bool selectGetElementPtr(const User *I);
+  bool selectStackmap(const CallInst *I);
+  bool selectPatchpoint(const CallInst *I);
+  bool selectCall(const User *Call);
+  bool selectIntrinsicCall(const IntrinsicInst *II);
+  bool selectBitCast(const User *I);
+  bool selectCast(const User *I, unsigned Opcode);
+  bool selectExtractValue(const User *I);
+  bool selectInsertValue(const User *I);
+  bool selectXRayCustomEvent(const CallInst *II);
+
+private:
+  /// \brief Handle PHI nodes in successor blocks.
+  ///
+  /// Emit code to ensure constants are copied into registers when needed.
+  /// Remember the virtual registers that need to be added to the Machine PHI
+  /// nodes as input.  We cannot just directly add them, because expansion might
+  /// result in multiple MBB's for one BB.  As such, the start of the BB might
+  /// correspond to a different MBB than the end.
+  bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
+
+  /// \brief Helper for materializeRegForValue to materialize a constant in a
+  /// target-independent way.
+  unsigned materializeConstant(const Value *V, MVT VT);
+
+  /// \brief Helper for getRegForVale. This function is called when the value
+  /// isn't already available in a register and must be materialized with new
+  /// instructions.
+  unsigned materializeRegForValue(const Value *V, MVT VT);
+
+  /// \brief Clears LocalValueMap and moves the area for the new local variables
+  /// to the beginning of the block. It helps to avoid spilling cached variables
+  /// across heavy instructions like calls.
+  void flushLocalValueMap();
+
+  /// \brief Removes dead local value instructions after SavedLastLocalvalue.
+  void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);
+
+  struct InstOrderMap {
+    DenseMap<MachineInstr *, unsigned> Orders;
+    MachineInstr *FirstTerminator = nullptr;
+    unsigned FirstTerminatorOrder = std::numeric_limits<unsigned>::max();
+
+    void initialize(MachineBasicBlock *MBB);
+  };
+
+  /// Sinks the local value materialization instruction LocalMI to its first use
+  /// in the basic block, or deletes it if it is not used.
+  void sinkLocalValueMaterialization(MachineInstr &LocalMI, unsigned DefReg,
+                                     InstOrderMap &OrderMap);
+
+  /// \brief Insertion point before trying to select the current instruction.
+  MachineBasicBlock::iterator SavedInsertPt;
+
+  /// \brief Add a stackmap or patchpoint intrinsic call's live variable
+  /// operands to a stackmap or patchpoint machine instruction.
+  bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
+                           const CallInst *CI, unsigned StartIdx);
+  bool lowerCallOperands(const CallInst *CI, unsigned ArgIdx, unsigned NumArgs,
+                         const Value *Callee, bool ForceRetVoidTy,
+                         CallLoweringInfo &CLI);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_FASTISEL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/FaultMaps.h b/linux-x64/clang/include/llvm/CodeGen/FaultMaps.h
new file mode 100644
index 0000000..55e25c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/FaultMaps.h
@@ -0,0 +1,218 @@
+//===- FaultMaps.h - The "FaultMaps" section --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FAULTMAPS_H
+#define LLVM_CODEGEN_FAULTMAPS_H
+
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Endian.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCExpr;
+class raw_ostream;
+
+class FaultMaps {
+public:
+  enum FaultKind {
+    FaultingLoad = 1,
+    FaultingLoadStore,
+    FaultingStore,
+    FaultKindMax
+  };
+
+  explicit FaultMaps(AsmPrinter &AP);
+
+  static const char *faultTypeToString(FaultKind);
+
+  void recordFaultingOp(FaultKind FaultTy, const MCSymbol *HandlerLabel);
+  void serializeToFaultMapSection();
+  void reset() {
+    FunctionInfos.clear();
+  }
+
+private:
+  static const char *WFMP;
+
+  struct FaultInfo {
+    FaultKind Kind = FaultKindMax;
+    const MCExpr *FaultingOffsetExpr = nullptr;
+    const MCExpr *HandlerOffsetExpr = nullptr;
+
+    FaultInfo() = default;
+
+    explicit FaultInfo(FaultMaps::FaultKind Kind, const MCExpr *FaultingOffset,
+                       const MCExpr *HandlerOffset)
+        : Kind(Kind), FaultingOffsetExpr(FaultingOffset),
+          HandlerOffsetExpr(HandlerOffset) {}
+  };
+
+  using FunctionFaultInfos = std::vector<FaultInfo>;
+
+  // We'd like to keep a stable iteration order for FunctionInfos to help
+  // FileCheck based testing.
+  struct MCSymbolComparator {
+    bool operator()(const MCSymbol *LHS, const MCSymbol *RHS) const {
+      return LHS->getName() < RHS->getName();
+    }
+  };
+
+  std::map<const MCSymbol *, FunctionFaultInfos, MCSymbolComparator>
+      FunctionInfos;
+  AsmPrinter &AP;
+
+  void emitFunctionInfo(const MCSymbol *FnLabel, const FunctionFaultInfos &FFI);
+};
+
+/// A parser for the __llvm_faultmaps section generated by the FaultMaps class
+/// above.  This parser is version locked with with the __llvm_faultmaps section
+/// generated by the version of LLVM that includes it.  No guarantees are made
+/// with respect to forward or backward compatibility.
+class FaultMapParser {
+  using FaultMapVersionType = uint8_t;
+  using Reserved0Type = uint8_t;
+  using Reserved1Type = uint16_t;
+  using NumFunctionsType = uint32_t;
+
+  static const size_t FaultMapVersionOffset = 0;
+  static const size_t Reserved0Offset =
+      FaultMapVersionOffset + sizeof(FaultMapVersionType);
+  static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
+  static const size_t NumFunctionsOffset =
+      Reserved1Offset + sizeof(Reserved1Type);
+  static const size_t FunctionInfosOffset =
+      NumFunctionsOffset + sizeof(NumFunctionsType);
+
+  const uint8_t *P;
+  const uint8_t *E;
+
+  template <typename T> static T read(const uint8_t *P, const uint8_t *E) {
+    assert(P + sizeof(T) <= E && "out of bounds read!");
+    return support::endian::read<T, support::little, 1>(P);
+  }
+
+public:
+  class FunctionFaultInfoAccessor {
+    using FaultKindType = uint32_t;
+    using FaultingPCOffsetType = uint32_t;
+    using HandlerPCOffsetType = uint32_t;
+
+    static const size_t FaultKindOffset = 0;
+    static const size_t FaultingPCOffsetOffset =
+        FaultKindOffset + sizeof(FaultKindType);
+    static const size_t HandlerPCOffsetOffset =
+        FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
+
+    const uint8_t *P;
+    const uint8_t *E;
+
+  public:
+    static const size_t Size =
+        HandlerPCOffsetOffset + sizeof(HandlerPCOffsetType);
+
+    explicit FunctionFaultInfoAccessor(const uint8_t *P, const uint8_t *E)
+        : P(P), E(E) {}
+
+    FaultKindType getFaultKind() const {
+      return read<FaultKindType>(P + FaultKindOffset, E);
+    }
+
+    FaultingPCOffsetType getFaultingPCOffset() const {
+      return read<FaultingPCOffsetType>(P + FaultingPCOffsetOffset, E);
+    }
+
+    HandlerPCOffsetType getHandlerPCOffset() const {
+      return read<HandlerPCOffsetType>(P + HandlerPCOffsetOffset, E);
+    }
+  };
+
+  class FunctionInfoAccessor {
+    using FunctionAddrType = uint64_t;
+    using NumFaultingPCsType = uint32_t;
+    using ReservedType = uint32_t;
+
+    static const size_t FunctionAddrOffset = 0;
+    static const size_t NumFaultingPCsOffset =
+        FunctionAddrOffset + sizeof(FunctionAddrType);
+    static const size_t ReservedOffset =
+        NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
+    static const size_t FunctionFaultInfosOffset =
+        ReservedOffset + sizeof(ReservedType);
+    static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
+
+    const uint8_t *P = nullptr;
+    const uint8_t *E = nullptr;
+
+  public:
+    FunctionInfoAccessor() = default;
+
+    explicit FunctionInfoAccessor(const uint8_t *P, const uint8_t *E)
+        : P(P), E(E) {}
+
+    FunctionAddrType getFunctionAddr() const {
+      return read<FunctionAddrType>(P + FunctionAddrOffset, E);
+    }
+
+    NumFaultingPCsType getNumFaultingPCs() const {
+      return read<NumFaultingPCsType>(P + NumFaultingPCsOffset, E);
+    }
+
+    FunctionFaultInfoAccessor getFunctionFaultInfoAt(uint32_t Index) const {
+      assert(Index < getNumFaultingPCs() && "index out of bounds!");
+      const uint8_t *Begin = P + FunctionFaultInfosOffset +
+                             FunctionFaultInfoAccessor::Size * Index;
+      return FunctionFaultInfoAccessor(Begin, E);
+    }
+
+    FunctionInfoAccessor getNextFunctionInfo() const {
+      size_t MySize = FunctionInfoHeaderSize +
+                      getNumFaultingPCs() * FunctionFaultInfoAccessor::Size;
+
+      const uint8_t *Begin = P + MySize;
+      assert(Begin < E && "out of bounds!");
+      return FunctionInfoAccessor(Begin, E);
+    }
+  };
+
+  explicit FaultMapParser(const uint8_t *Begin, const uint8_t *End)
+      : P(Begin), E(End) {}
+
+  FaultMapVersionType getFaultMapVersion() const {
+    auto Version = read<FaultMapVersionType>(P + FaultMapVersionOffset, E);
+    assert(Version == 1 && "only version 1 supported!");
+    return Version;
+  }
+
+  NumFunctionsType getNumFunctions() const {
+    return read<NumFunctionsType>(P + NumFunctionsOffset, E);
+  }
+
+  FunctionInfoAccessor getFirstFunctionInfo() const {
+    const uint8_t *Begin = P + FunctionInfosOffset;
+    return FunctionInfoAccessor(Begin, E);
+  }
+};
+
+raw_ostream &
+operator<<(raw_ostream &OS, const FaultMapParser::FunctionFaultInfoAccessor &);
+
+raw_ostream &operator<<(raw_ostream &OS,
+                        const FaultMapParser::FunctionInfoAccessor &);
+
+raw_ostream &operator<<(raw_ostream &OS, const FaultMapParser &);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_FAULTMAPS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h b/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
new file mode 100644
index 0000000..2da00b7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -0,0 +1,325 @@
+//===- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements routines for translating functions from LLVM IR into
+// Machine IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/KnownBits.h"
+#include <cassert>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Argument;
+class BasicBlock;
+class BranchProbabilityInfo;
+class Function;
+class Instruction;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class MVT;
+class SelectionDAG;
+class TargetLowering;
+
+//===--------------------------------------------------------------------===//
+/// FunctionLoweringInfo - This contains information that is global to a
+/// function that is used when lowering a region of the function.
+///
+class FunctionLoweringInfo {
+public:
+  const Function *Fn;
+  MachineFunction *MF;
+  const TargetLowering *TLI;
+  MachineRegisterInfo *RegInfo;
+  BranchProbabilityInfo *BPI;
+  /// CanLowerReturn - true iff the function's return value can be lowered to
+  /// registers.
+  bool CanLowerReturn;
+
+  /// True if part of the CSRs will be handled via explicit copies.
+  bool SplitCSR;
+
+  /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
+  /// allocated to hold a pointer to the hidden sret parameter.
+  unsigned DemoteRegister;
+
+  /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
+  DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
+
+  /// A map from swifterror value in a basic block to the virtual register it is
+  /// currently represented by.
+  DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+      SwiftErrorVRegDefMap;
+
+  /// A list of upward exposed vreg uses that need to be satisfied by either a
+  /// copy def or a phi node at the beginning of the basic block representing
+  /// the predecessor(s) swifterror value.
+  DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+      SwiftErrorVRegUpwardsUse;
+
+  /// A map from instructions that define/use a swifterror value to the virtual
+  /// register that represents that def/use.
+  llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, unsigned>
+      SwiftErrorVRegDefUses;
+
+  /// The swifterror argument of the current function.
+  const Value *SwiftErrorArg;
+
+  using SwiftErrorValues = SmallVector<const Value*, 1>;
+  /// A function can only have a single swifterror argument. And if it does
+  /// have a swifterror argument, it must be the first entry in
+  /// SwiftErrorVals.
+  SwiftErrorValues SwiftErrorVals;
+
+  /// Get or create the swifterror value virtual register in
+  /// SwiftErrorVRegDefMap for this basic block.
+  unsigned getOrCreateSwiftErrorVReg(const MachineBasicBlock *,
+                                     const Value *);
+
+  /// Set the swifterror virtual register in the SwiftErrorVRegDefMap for this
+  /// basic block.
+  void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *,
+                                unsigned);
+
+  /// Get or create the swifterror value virtual register for a def of a
+  /// swifterror by an instruction.
+  std::pair<unsigned, bool> getOrCreateSwiftErrorVRegDefAt(const Instruction *);
+  std::pair<unsigned, bool>
+  getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *,
+                                 const Value *);
+
+  /// ValueMap - Since we emit code for the function a basic block at a time,
+  /// we must remember which virtual registers hold the values for
+  /// cross-basic-block values.
+  DenseMap<const Value *, unsigned> ValueMap;
+
+  /// VirtReg2Value map is needed by the Divergence Analysis driven
+  /// instruction selection. It is reverted ValueMap. It is computed
+  /// in lazy style - on demand. It is used to get the Value corresponding
+  /// to the live in virtual register and is called from the
+  /// TargetLowerinInfo::isSDNodeSourceOfDivergence.
+  DenseMap<unsigned, const Value*> VirtReg2Value;
+
+  /// This method is called from TargetLowerinInfo::isSDNodeSourceOfDivergence
+  /// to get the Value corresponding to the live-in virtual register.
+  const Value * getValueFromVirtualReg(unsigned Vreg);
+
+  /// Track virtual registers created for exception pointers.
+  DenseMap<const Value *, unsigned> CatchPadExceptionPointers;
+
+  /// Keep track of frame indices allocated for statepoints as they could be
+  /// used across basic block boundaries.  This struct is more complex than a
+  /// simple map because the stateopint lowering code de-duplicates gc pointers
+  /// based on their SDValue (so %p and (bitcast %p to T) will get the same
+  /// slot), and we track that here.
+
+  struct StatepointSpillMap {
+    using SlotMapTy = DenseMap<const Value *, Optional<int>>;
+
+    /// Maps uniqued llvm IR values to the slots they were spilled in.  If a
+    /// value is mapped to None it means we visited the value but didn't spill
+    /// it (because it was a constant, for instance).
+    SlotMapTy SlotMap;
+
+    /// Maps llvm IR values to the values they were de-duplicated to.
+    DenseMap<const Value *, const Value *> DuplicateMap;
+
+    SlotMapTy::const_iterator find(const Value *V) const {
+      auto DuplIt = DuplicateMap.find(V);
+      if (DuplIt != DuplicateMap.end())
+        V = DuplIt->second;
+      return SlotMap.find(V);
+    }
+
+    SlotMapTy::const_iterator end() const { return SlotMap.end(); }
+  };
+
+  /// Maps gc.statepoint instructions to their corresponding StatepointSpillMap
+  /// instances.
+  DenseMap<const Instruction *, StatepointSpillMap> StatepointSpillMaps;
+
+  /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
+  /// the entry block.  This allows the allocas to be efficiently referenced
+  /// anywhere in the function.
+  DenseMap<const AllocaInst*, int> StaticAllocaMap;
+
+  /// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
+  DenseMap<const Argument*, int> ByValArgFrameIndexMap;
+
+  /// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
+  /// function arguments that are inserted after scheduling is completed.
+  SmallVector<MachineInstr*, 8> ArgDbgValues;
+
+  /// RegFixups - Registers which need to be replaced after isel is done.
+  DenseMap<unsigned, unsigned> RegFixups;
+
+  DenseSet<unsigned> RegsWithFixups;
+
+  /// StatepointStackSlots - A list of temporary stack slots (frame indices)
+  /// used to spill values at a statepoint.  We store them here to enable
+  /// reuse of the same stack slots across different statepoints in different
+  /// basic blocks.
+  SmallVector<unsigned, 50> StatepointStackSlots;
+
+  /// MBB - The current block.
+  MachineBasicBlock *MBB;
+
+  /// MBB - The current insert position inside the current block.
+  MachineBasicBlock::iterator InsertPt;
+
+  struct LiveOutInfo {
+    unsigned NumSignBits : 31;
+    unsigned IsValid : 1;
+    KnownBits Known = 1;
+
+    LiveOutInfo() : NumSignBits(0), IsValid(true) {}
+  };
+
+  /// Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND)
+  /// for a value.
+  DenseMap<const Value *, ISD::NodeType> PreferredExtendType;
+
+  /// VisitedBBs - The set of basic blocks visited thus far by instruction
+  /// selection.
+  SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
+
+  /// PHINodesToUpdate - A list of phi instructions whose operand list will
+  /// be updated after processing the current basic block.
+  /// TODO: This isn't per-function state, it's per-basic-block state. But
+  /// there's no other convenient place for it to live right now.
+  std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+  unsigned OrigNumPHINodesToUpdate;
+
+  /// If the current MBB is a landing pad, the exception pointer and exception
+  /// selector registers are copied into these virtual registers by
+  /// SelectionDAGISel::PrepareEHLandingPad().
+  unsigned ExceptionPointerVirtReg, ExceptionSelectorVirtReg;
+
+  /// set - Initialize this FunctionLoweringInfo with the given Function
+  /// and its associated MachineFunction.
+  ///
+  void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG);
+
+  /// clear - Clear out all the function-specific state. This returns this
+  /// FunctionLoweringInfo to an empty state, ready to be used for a
+  /// different function.
+  void clear();
+
+  /// isExportedInst - Return true if the specified value is an instruction
+  /// exported from its block.
+  bool isExportedInst(const Value *V) {
+    return ValueMap.count(V);
+  }
+
+  unsigned CreateReg(MVT VT);
+
+  unsigned CreateRegs(Type *Ty);
+
+  unsigned InitializeRegForValue(const Value *V) {
+    // Tokens never live in vregs.
+    if (V->getType()->isTokenTy())
+      return 0;
+    unsigned &R = ValueMap[V];
+    assert(R == 0 && "Already initialized this value register!");
+    return R = CreateRegs(V->getType());
+  }
+
+  /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
+  /// register is a PHI destination and the PHI's LiveOutInfo is not valid.
+  const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg) {
+    if (!LiveOutRegInfo.inBounds(Reg))
+      return nullptr;
+
+    const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
+    if (!LOI->IsValid)
+      return nullptr;
+
+    return LOI;
+  }
+
+  /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
+  /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
+  /// the register's LiveOutInfo is for a smaller bit width, it is extended to
+  /// the larger bit width by zero extension. The bit width must be no smaller
+  /// than the LiveOutInfo's existing bit width.
+  const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth);
+
+  /// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
+  void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
+                         const KnownBits &Known) {
+    // Only install this information if it tells us something.
+    if (NumSignBits == 1 && Known.isUnknown())
+      return;
+
+    LiveOutRegInfo.grow(Reg);
+    LiveOutInfo &LOI = LiveOutRegInfo[Reg];
+    LOI.NumSignBits = NumSignBits;
+    LOI.Known.One = Known.One;
+    LOI.Known.Zero = Known.Zero;
+  }
+
+  /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
+  /// register based on the LiveOutInfo of its operands.
+  void ComputePHILiveOutRegInfo(const PHINode*);
+
+  /// InvalidatePHILiveOutRegInfo - Invalidates a PHI's LiveOutInfo, to be
+  /// called when a block is visited before all of its predecessors.
+  void InvalidatePHILiveOutRegInfo(const PHINode *PN) {
+    // PHIs with no uses have no ValueMap entry.
+    DenseMap<const Value*, unsigned>::const_iterator It = ValueMap.find(PN);
+    if (It == ValueMap.end())
+      return;
+
+    unsigned Reg = It->second;
+    if (Reg == 0)
+      return;
+
+    LiveOutRegInfo.grow(Reg);
+    LiveOutRegInfo[Reg].IsValid = false;
+  }
+
+  /// setArgumentFrameIndex - Record frame index for the byval
+  /// argument.
+  void setArgumentFrameIndex(const Argument *A, int FI);
+
+  /// getArgumentFrameIndex - Get frame index for the byval argument.
+  int getArgumentFrameIndex(const Argument *A);
+
+  unsigned getCatchPadExceptionPointerVReg(const Value *CPI,
+                                           const TargetRegisterClass *RC);
+
+private:
+  void addSEHHandlersForLPads(ArrayRef<const LandingPadInst *> LPads);
+
+  /// LiveOutRegInfo - Information about live out vregs.
+  IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCMetadata.h b/linux-x64/clang/include/llvm/CodeGen/GCMetadata.h
new file mode 100644
index 0000000..ad2599f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCMetadata.h
@@ -0,0 +1,207 @@
+//===- GCMetadata.h - Garbage collector metadata ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
+// used as a communication channel from the target code generator to the target
+// garbage collectors. This interface allows code generators and garbage
+// collectors to be developed independently.
+//
+// The GCFunctionInfo class logs the data necessary to build a type accurate
+// stack map. The code generator outputs:
+//
+//   - Safe points as specified by the GCStrategy's NeededSafePoints.
+//   - Stack offsets for GC roots, as specified by calls to llvm.gcroot
+//
+// As a refinement, liveness analysis calculates the set of live roots at each
+// safe point. Liveness analysis is not presently performed by the code
+// generator, so all roots are assumed live.
+//
+// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
+// they are compiled. This accretion is necessary for collectors which must emit
+// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
+// outlives the MachineFunction from which it is derived and must not refer to
+// any code generator data structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATA_H
+#define LLVM_CODEGEN_GCMETADATA_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Pass.h"
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class Function;
+class MCSymbol;
+
+/// GCPoint - Metadata for a collector-safe point in machine code.
+///
+struct GCPoint {
+  GC::PointKind Kind; ///< The kind of the safe point.
+  MCSymbol *Label;    ///< A label.
+  DebugLoc Loc;
+
+  GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
+      : Kind(K), Label(L), Loc(std::move(DL)) {}
+};
+
+/// GCRoot - Metadata for a pointer to an object managed by the garbage
+/// collector.
+struct GCRoot {
+  int Num;                  ///< Usually a frame index.
+  int StackOffset = -1;     ///< Offset from the stack pointer.
+  const Constant *Metadata; ///< Metadata straight from the call
+                            ///< to llvm.gcroot.
+
+  GCRoot(int N, const Constant *MD) : Num(N), Metadata(MD) {}
+};
+
+/// Garbage collection metadata for a single function.  Currently, this
+/// information only applies to GCStrategies which use GCRoot.
+class GCFunctionInfo {
+public:
+  using iterator = std::vector<GCPoint>::iterator;
+  using roots_iterator = std::vector<GCRoot>::iterator;
+  using live_iterator = std::vector<GCRoot>::const_iterator;
+
+private:
+  const Function &F;
+  GCStrategy &S;
+  uint64_t FrameSize;
+  std::vector<GCRoot> Roots;
+  std::vector<GCPoint> SafePoints;
+
+  // FIXME: Liveness. A 2D BitVector, perhaps?
+  //
+  //   BitVector Liveness;
+  //
+  //   bool islive(int point, int root) =
+  //     Liveness[point * SafePoints.size() + root]
+  //
+  // The bit vector is the more compact representation where >3.2% of roots
+  // are live per safe point (1.5% on 64-bit hosts).
+
+public:
+  GCFunctionInfo(const Function &F, GCStrategy &S);
+  ~GCFunctionInfo();
+
+  /// getFunction - Return the function to which this metadata applies.
+  const Function &getFunction() const { return F; }
+
+  /// getStrategy - Return the GC strategy for the function.
+  GCStrategy &getStrategy() { return S; }
+
+  /// addStackRoot - Registers a root that lives on the stack. Num is the
+  ///                stack object ID for the alloca (if the code generator is
+  //                 using  MachineFrameInfo).
+  void addStackRoot(int Num, const Constant *Metadata) {
+    Roots.push_back(GCRoot(Num, Metadata));
+  }
+
+  /// removeStackRoot - Removes a root.
+  roots_iterator removeStackRoot(roots_iterator position) {
+    return Roots.erase(position);
+  }
+
+  /// addSafePoint - Notes the existence of a safe point. Num is the ID of the
+  /// label just prior to the safe point (if the code generator is using
+  /// MachineModuleInfo).
+  void addSafePoint(GC::PointKind Kind, MCSymbol *Label, const DebugLoc &DL) {
+    SafePoints.emplace_back(Kind, Label, DL);
+  }
+
+  /// getFrameSize/setFrameSize - Records the function's frame size.
+  uint64_t getFrameSize() const { return FrameSize; }
+  void setFrameSize(uint64_t S) { FrameSize = S; }
+
+  /// begin/end - Iterators for safe points.
+  iterator begin() { return SafePoints.begin(); }
+  iterator end() { return SafePoints.end(); }
+  size_t size() const { return SafePoints.size(); }
+
+  /// roots_begin/roots_end - Iterators for all roots in the function.
+  roots_iterator roots_begin() { return Roots.begin(); }
+  roots_iterator roots_end() { return Roots.end(); }
+  size_t roots_size() const { return Roots.size(); }
+
+  /// live_begin/live_end - Iterators for live roots at a given safe point.
+  live_iterator live_begin(const iterator &p) { return roots_begin(); }
+  live_iterator live_end(const iterator &p) { return roots_end(); }
+  size_t live_size(const iterator &p) const { return roots_size(); }
+};
+
+/// An analysis pass which caches information about the entire Module.
+/// Records both the function level information used by GCRoots and a
+/// cache of the 'active' gc strategy objects for the current Module.
+class GCModuleInfo : public ImmutablePass {
+  /// An owning list of all GCStrategies which have been created
+  SmallVector<std::unique_ptr<GCStrategy>, 1> GCStrategyList;
+  /// A helper map to speedup lookups into the above list
+  StringMap<GCStrategy*> GCStrategyMap;
+
+public:
+  /// Lookup the GCStrategy object associated with the given gc name.
+  /// Objects are owned internally; No caller should attempt to delete the
+  /// returned objects.
+  GCStrategy *getGCStrategy(const StringRef Name);
+
+  /// List of per function info objects.  In theory, Each of these
+  /// may be associated with a different GC.
+  using FuncInfoVec = std::vector<std::unique_ptr<GCFunctionInfo>>;
+
+  FuncInfoVec::iterator funcinfo_begin() { return Functions.begin(); }
+  FuncInfoVec::iterator funcinfo_end() { return Functions.end(); }
+
+private:
+  /// Owning list of all GCFunctionInfos associated with this Module
+  FuncInfoVec Functions;
+
+  /// Non-owning map to bypass linear search when finding the GCFunctionInfo
+  /// associated with a particular Function.
+  using finfo_map_type = DenseMap<const Function *, GCFunctionInfo *>;
+  finfo_map_type FInfoMap;
+
+public:
+  using iterator = SmallVector<std::unique_ptr<GCStrategy>, 1>::const_iterator;
+
+  static char ID;
+
+  GCModuleInfo();
+
+  /// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
+  /// call it in doFinalization().
+  ///
+  void clear();
+
+  /// begin/end - Iterators for used strategies.
+  ///
+  iterator begin() const { return GCStrategyList.begin(); }
+  iterator end() const { return GCStrategyList.end(); }
+
+  /// get - Look up function metadata.  This is currently assumed
+  /// have the side effect of initializing the associated GCStrategy.  That
+  /// will soon change.
+  GCFunctionInfo &getFunctionInfo(const Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GCMETADATA_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCMetadataPrinter.h b/linux-x64/clang/include/llvm/CodeGen/GCMetadataPrinter.h
new file mode 100644
index 0000000..1cc69a7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -0,0 +1,67 @@
+//===- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The abstract base class GCMetadataPrinter supports writing GC metadata tables
+// as assembly code. This is a separate class from GCStrategy in order to allow
+// users of the LLVM JIT to avoid linking with the AsmWriter.
+//
+// Subclasses of GCMetadataPrinter must be registered using the
+// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
+// because these subclasses are logically plugins for the AsmWriter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
+#define LLVM_CODEGEN_GCMETADATAPRINTER_H
+
+#include "llvm/Support/Registry.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class GCMetadataPrinter;
+class GCModuleInfo;
+class GCStrategy;
+class Module;
+
+/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
+/// defaults from Registry.
+using GCMetadataPrinterRegistry = Registry<GCMetadataPrinter>;
+
+/// GCMetadataPrinter - Emits GC metadata as assembly code.  Instances are
+/// created, managed, and owned by the AsmPrinter.
+class GCMetadataPrinter {
+private:
+  friend class AsmPrinter;
+
+  GCStrategy *S;
+
+protected:
+  // May only be subclassed.
+  GCMetadataPrinter();
+
+public:
+  GCMetadataPrinter(const GCMetadataPrinter &) = delete;
+  GCMetadataPrinter &operator=(const GCMetadataPrinter &) = delete;
+  virtual ~GCMetadataPrinter();
+
+  GCStrategy &getStrategy() { return *S; }
+
+  /// Called before the assembly for the module is generated by
+  /// the AsmPrinter (but after target specific hooks.)
+  virtual void beginAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+
+  /// Called after the assembly for the module is generated by
+  /// the AsmPrinter (but before target specific hooks)
+  virtual void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GCMETADATAPRINTER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h b/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
new file mode 100644
index 0000000..16168e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
@@ -0,0 +1,181 @@
+//===- llvm/CodeGen/GCStrategy.h - Garbage collection -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GCStrategy coordinates code generation algorithms and implements some itself
+// in order to generate code compatible with a target code generator as
+// specified in a function's 'gc' attribute. Algorithms are enabled by setting
+// flags in a subclass's constructor, and some virtual methods can be
+// overridden.
+//
+// GCStrategy is relevant for implementations using either gc.root or
+// gc.statepoint based lowering strategies, but is currently focused mostly on
+// options for gc.root.  This will change over time.
+//
+// When requested by a subclass of GCStrategy, the gc.root implementation will
+// populate GCModuleInfo and GCFunctionInfo with that about each Function in
+// the Module that opts in to garbage collection.  Specifically:
+//
+// - Safe points
+//   Garbage collection is generally only possible at certain points in code.
+//   GCStrategy can request that the collector insert such points:
+//
+//     - At and after any call to a subroutine
+//     - Before returning from the current function
+//     - Before backwards branches (loops)
+//
+// - Roots
+//   When a reference to a GC-allocated object exists on the stack, it must be
+//   stored in an alloca registered with llvm.gcoot.
+//
+// This information can used to emit the metadata tables which are required by
+// the target garbage collector runtime.
+//
+// When used with gc.statepoint, information about safepoint and roots can be
+// found in the binary StackMap section after code generation.  Safepoint
+// placement is currently the responsibility of the frontend, though late
+// insertion support is planned.  gc.statepoint does not currently support
+// custom stack map formats; such can be generated by parsing the standard
+// stack map section if desired.
+//
+// The read and write barrier support can be used with either implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCSTRATEGY_H
+#define LLVM_CODEGEN_GCSTRATEGY_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Registry.h"
+#include <string>
+
+namespace llvm {
+
+class Type;
+
+namespace GC {
+
+/// PointKind - Used to indicate whether the address of the call instruction
+/// or the address after the call instruction is listed in the stackmap.  For
+/// most runtimes, PostCall safepoints are appropriate.
+///
+enum PointKind {
+  PreCall, ///< Instr is a call instruction.
+  PostCall ///< Instr is the return address of a call.
+};
+
+} // end namespace GC
+
+/// GCStrategy describes a garbage collector algorithm's code generation
+/// requirements, and provides overridable hooks for those needs which cannot
+/// be abstractly described.  GCStrategy objects must be looked up through
+/// the Function.  The objects themselves are owned by the Context and must
+/// be immutable.
+class GCStrategy {
+private:
+  friend class GCModuleInfo;
+
+  std::string Name;
+
+protected:
+  bool UseStatepoints = false; /// Uses gc.statepoints as opposed to gc.roots,
+                               /// if set, none of the other options can be
+                               /// anything but their default values.
+
+  unsigned NeededSafePoints = 0;    ///< Bitmask of required safe points.
+  bool CustomReadBarriers = false;  ///< Default is to insert loads.
+  bool CustomWriteBarriers = false; ///< Default is to insert stores.
+  bool CustomRoots = false;      ///< Default is to pass through to backend.
+  bool InitRoots= true;          ///< If set, roots are nulled during lowering.
+  bool UsesMetadata = false;     ///< If set, backend must emit metadata tables.
+
+public:
+  GCStrategy();
+  virtual ~GCStrategy() = default;
+
+  /// Return the name of the GC strategy.  This is the value of the collector
+  /// name string specified on functions which use this strategy.
+  const std::string &getName() const { return Name; }
+
+  /// By default, write barriers are replaced with simple store
+  /// instructions. If true, you must provide a custom pass to lower 
+  /// calls to @llvm.gcwrite.
+  bool customWriteBarrier() const { return CustomWriteBarriers; }
+
+  /// By default, read barriers are replaced with simple load
+  /// instructions. If true, you must provide a custom pass to lower 
+  /// calls to @llvm.gcread.
+  bool customReadBarrier() const { return CustomReadBarriers; }
+
+  /// Returns true if this strategy is expecting the use of gc.statepoints,
+  /// and false otherwise.
+  bool useStatepoints() const { return UseStatepoints; }
+
+  /** @name Statepoint Specific Properties */
+  ///@{
+
+  /// If the type specified can be reliably distinguished, returns true for
+  /// pointers to GC managed locations and false for pointers to non-GC
+  /// managed locations.  Note a GCStrategy can always return 'None' (i.e. an
+  /// empty optional indicating it can't reliably distinguish.
+  virtual Optional<bool> isGCManagedPointer(const Type *Ty) const {
+    return None;
+  }
+  ///@}
+
+  /** @name GCRoot Specific Properties
+   * These properties and overrides only apply to collector strategies using
+   * GCRoot.
+   */
+  ///@{
+
+  /// True if safe points of any kind are required. By default, none are
+  /// recorded.
+  bool needsSafePoints() const { return NeededSafePoints != 0; }
+
+  /// True if the given kind of safe point is required. By default, none are
+  /// recorded.
+  bool needsSafePoint(GC::PointKind Kind) const {
+    return (NeededSafePoints & 1 << Kind) != 0;
+  }
+
+  /// By default, roots are left for the code generator so it can generate a
+  /// stack map. If true, you must provide a custom pass to lower 
+  /// calls to @llvm.gcroot.
+  bool customRoots() const { return CustomRoots; }
+
+  /// If set, gcroot intrinsics should initialize their allocas to null
+  /// before the first use. This is necessary for most GCs and is enabled by
+  /// default.
+  bool initializeRoots() const { return InitRoots; }
+
+  /// If set, appropriate metadata tables must be emitted by the back-end
+  /// (assembler, JIT, or otherwise). For statepoint, this method is
+  /// currently unsupported.  The stackmap information can be found in the
+  /// StackMap section as described in the documentation.
+  bool usesMetadata() const { return UsesMetadata; }
+
+  ///@}
+};
+
+/// Subclasses of GCStrategy are made available for use during compilation by
+/// adding them to the global GCRegistry.  This can done either within the
+/// LLVM source tree or via a loadable plugin.  An example registeration
+/// would be:
+/// static GCRegistry::Add<CustomGC> X("custom-name",
+///        "my custom supper fancy gc strategy");
+///
+/// Note that to use a custom GCMetadataPrinter w/gc.roots, you must also
+/// register your GCMetadataPrinter subclass with the
+/// GCMetadataPrinterRegistery as well.
+using GCRegistry = Registry<GCStrategy>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GCSTRATEGY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCs.h b/linux-x64/clang/include/llvm/CodeGen/GCs.h
new file mode 100644
index 0000000..5207f80
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCs.h
@@ -0,0 +1,46 @@
+//===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains hack functions to force linking in the GC components.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCS_H
+#define LLVM_CODEGEN_GCS_H
+
+namespace llvm {
+class GCStrategy;
+class GCMetadataPrinter;
+
+/// FIXME: Collector instances are not useful on their own. These no longer
+///        serve any purpose except to link in the plugins.
+
+/// Creates a CoreCLR-compatible garbage collector.
+void linkCoreCLRGC();
+
+/// Creates an ocaml-compatible garbage collector.
+void linkOcamlGC();
+
+/// Creates an ocaml-compatible metadata printer.
+void linkOcamlGCPrinter();
+
+/// Creates an erlang-compatible garbage collector.
+void linkErlangGC();
+
+/// Creates an erlang-compatible metadata printer.
+void linkErlangGCPrinter();
+
+/// Creates a shadow stack garbage collector. This collector requires no code
+/// generator support.
+void linkShadowStackGC();
+
+void linkStatepointExampleGC();
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
new file mode 100644
index 0000000..8d91cc4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -0,0 +1,215 @@
+//===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+#define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class DataLayout;
+class Function;
+class MachineIRBuilder;
+class MachineOperand;
+struct MachinePointerInfo;
+class MachineRegisterInfo;
+class TargetLowering;
+class Type;
+class Value;
+
+class CallLowering {
+  const TargetLowering *TLI;
+
+public:
+  struct ArgInfo {
+    unsigned Reg;
+    Type *Ty;
+    ISD::ArgFlagsTy Flags;
+    bool IsFixed;
+
+    ArgInfo(unsigned Reg, Type *Ty, ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{},
+            bool IsFixed = true)
+        : Reg(Reg), Ty(Ty), Flags(Flags), IsFixed(IsFixed) {}
+  };
+
+  /// Argument handling is mostly uniform between the four places that
+  /// make these decisions: function formal arguments, call
+  /// instruction args, call instruction returns and function
+  /// returns. However, once a decision has been made on where an
+  /// arugment should go, exactly what happens can vary slightly. This
+  /// class abstracts the differences.
+  struct ValueHandler {
+    ValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                 CCAssignFn *AssignFn)
+      : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn) {}
+
+    virtual ~ValueHandler() = default;
+
+    /// Materialize a VReg containing the address of the specified
+    /// stack-based object. This is either based on a FrameIndex or
+    /// direct SP manipulation, depending on the context. \p MPO
+    /// should be initialized to an appropriate description of the
+    /// address created.
+    virtual unsigned getStackAddress(uint64_t Size, int64_t Offset,
+                                     MachinePointerInfo &MPO) = 0;
+
+    /// The specified value has been assigned to a physical register,
+    /// handle the appropriate COPY (either to or from) and mark any
+    /// relevant uses/defines as needed.
+    virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+                                  CCValAssign &VA) = 0;
+
+    /// The specified value has been assigned to a stack
+    /// location. Load or store it there, with appropriate extension
+    /// if necessary.
+    virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr,
+                                      uint64_t Size, MachinePointerInfo &MPO,
+                                      CCValAssign &VA) = 0;
+
+    /// Handle custom values, which may be passed into one or more of \p VAs.
+    /// \return The number of \p VAs that have been assigned after the first
+    ///         one, and which should therefore be skipped from further
+    ///         processing.
+    virtual unsigned assignCustomValue(const ArgInfo &Arg,
+                                       ArrayRef<CCValAssign> VAs) {
+      // This is not a pure virtual method because not all targets need to worry
+      // about custom values.
+      llvm_unreachable("Custom values not supported");
+    }
+
+    unsigned extendRegister(unsigned ValReg, CCValAssign &VA);
+
+    virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+                           CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
+                           CCState &State) {
+      return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+    }
+
+    MachineIRBuilder &MIRBuilder;
+    MachineRegisterInfo &MRI;
+    CCAssignFn *AssignFn;
+  };
+
+protected:
+  /// Getter for generic TargetLowering class.
+  const TargetLowering *getTLI() const {
+    return TLI;
+  }
+
+  /// Getter for target specific TargetLowering class.
+  template <class XXXTargetLowering>
+    const XXXTargetLowering *getTLI() const {
+    return static_cast<const XXXTargetLowering *>(TLI);
+  }
+
+  template <typename FuncInfoTy>
+  void setArgFlags(ArgInfo &Arg, unsigned OpNum, const DataLayout &DL,
+                   const FuncInfoTy &FuncInfo) const;
+
+  /// Invoke Handler::assignArg on each of the given \p Args and then use
+  /// \p Callback to move them to the assigned locations.
+  ///
+  /// \return True if everything has succeeded, false otherwise.
+  bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args,
+                         ValueHandler &Callback) const;
+
+public:
+  CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
+  virtual ~CallLowering() = default;
+
+  /// This hook must be implemented to lower outgoing return values, described
+  /// by \p Val, into the specified virtual register \p VReg.
+  /// This hook is used by GlobalISel.
+  ///
+  /// \return True if the lowering succeeds, false otherwise.
+  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
+                           const Value *Val, unsigned VReg) const {
+    return false;
+  }
+
+  /// This hook must be implemented to lower the incoming (formal)
+  /// arguments, described by \p Args, for GlobalISel. Each argument
+  /// must end up in the related virtual register described by VRegs.
+  /// In other words, the first argument should end up in VRegs[0],
+  /// the second in VRegs[1], and so on.
+  /// \p MIRBuilder is set to the proper insertion for the argument
+  /// lowering.
+  ///
+  /// \return True if the lowering succeeded, false otherwise.
+  virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+                                    const Function &F,
+                                    ArrayRef<unsigned> VRegs) const {
+    return false;
+  }
+
+  /// This hook must be implemented to lower the given call instruction,
+  /// including argument and return value marshalling.
+  ///
+  /// \p CallConv is the calling convention to be used for the call.
+  ///
+  /// \p Callee is the destination of the call. It should be either a register,
+  /// globaladdress, or externalsymbol.
+  ///
+  /// \p ResTy is the type returned by the function
+  ///
+  /// \p ResReg is the generic virtual register that the returned
+  /// value should be lowered into.
+  ///
+  /// \p ArgTys is a list of the types each member of \p ArgRegs has; used by
+  /// the target to decide which register/stack slot should be allocated.
+  ///
+  /// \p ArgRegs is a list of virtual registers containing each argument that
+  /// needs to be passed.
+  ///
+  /// \return true if the lowering succeeded, false otherwise.
+  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+                         const MachineOperand &Callee, const ArgInfo &OrigRet,
+                         ArrayRef<ArgInfo> OrigArgs) const {
+    return false;
+  }
+
+  /// Lower the given call instruction, including argument and return value
+  /// marshalling.
+  ///
+  /// \p CI is the call/invoke instruction.
+  ///
+  /// \p ResReg is a register where the call's return value should be stored (or
+  /// 0 if there is no return value).
+  ///
+  /// \p ArgRegs is a list of virtual registers containing each argument that
+  /// needs to be passed.
+  ///
+  /// \p GetCalleeReg is a callback to materialize a register for the callee if
+  /// the target determines it cannot jump to the destination based purely on \p
+  /// CI. This might be because \p CI is indirect, or because of the limited
+  /// range of an immediate jump.
+  ///
+  /// \return true if the lowering succeeded, false otherwise.
+  bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+                 unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+                 std::function<unsigned()> GetCalleeReg) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
new file mode 100644
index 0000000..36a33de
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -0,0 +1,43 @@
+//== ----- llvm/CodeGen/GlobalISel/Combiner.h --------------------- == //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// This contains common code to drive combines. Combiner Passes will need to
+/// setup a CombinerInfo and call combineMachineFunction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+class MachineRegisterInfo;
+class CombinerInfo;
+class TargetPassConfig;
+class MachineFunction;
+
+class Combiner {
+public:
+  Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
+
+  bool combineMachineInstrs(MachineFunction &MF);
+
+protected:
+  CombinerInfo &CInfo;
+
+  MachineRegisterInfo *MRI = nullptr;
+  const TargetPassConfig *TPC;
+  MachineIRBuilder Builder;
+};
+
+} // End namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_GICOMBINER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
new file mode 100644
index 0000000..5d5b839
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -0,0 +1,44 @@
+//== llvm/CodeGen/GlobalISel/CombinerHelper.h -------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===--------------------------------------------------------------------===//
+//
+/// This contains common combine transformations that may be used in a combine
+/// pass,or by the target elsewhere.
+/// Targets can pick individual opcode transformations from the helper or use
+/// tryCombine which invokes all transformations. All of the transformations
+/// return true if the MachineInstruction changed and false otherwise.
+//
+//===--------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+
+namespace llvm {
+
+class MachineIRBuilder;
+class MachineRegisterInfo;
+class MachineInstr;
+
+class CombinerHelper {
+  MachineIRBuilder &Builder;
+  MachineRegisterInfo &MRI;
+
+public:
+  CombinerHelper(MachineIRBuilder &B);
+
+  /// If \p MI is COPY, try to combine it.
+  /// Returns true if MI changed.
+  bool tryCombineCopy(MachineInstr &MI);
+
+  /// Try to transform \p MI by using all of the above
+  /// combine functions. Returns true if changed.
+  bool tryCombine(MachineInstr &MI);
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
new file mode 100644
index 0000000..1d24854
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -0,0 +1,48 @@
+//===- llvm/CodeGen/GlobalISel/CombinerInfo.h ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations are combined how and when.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+
+#include <cassert>
+namespace llvm {
+
+class LegalizerInfo;
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+// Contains information relevant to enabling/disabling various combines for a
+// pass.
+class CombinerInfo {
+public:
+  CombinerInfo(bool AllowIllegalOps, bool ShouldLegalizeIllegal,
+               LegalizerInfo *LInfo)
+      : IllegalOpsAllowed(AllowIllegalOps),
+        LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo) {
+    assert(((AllowIllegalOps || !LegalizeIllegalOps) || LInfo) &&
+           "Expecting legalizerInfo when illegalops not allowed");
+  }
+  virtual ~CombinerInfo() = default;
+  /// If \p IllegalOpsAllowed is false, the CombinerHelper will make use of
+  /// the legalizerInfo to check for legality before each transformation.
+  bool IllegalOpsAllowed; // TODO: Make use of this.
+
+  /// If \p LegalizeIllegalOps is true, the Combiner will also legalize the
+  /// illegal ops that are created.
+  bool LegalizeIllegalOps; // TODO: Make use of this.
+  const LegalizerInfo *LInfo;
+  virtual bool combine(MachineInstr &MI, MachineIRBuilder &B) const = 0;
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
new file mode 100644
index 0000000..167905d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -0,0 +1,69 @@
+//===- GISelWorkList.h - Worklist for GISel passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_GISEL_WORKLIST_H
+#define LLVM_GISEL_WORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+// Worklist which mostly works similar to InstCombineWorkList, but on MachineInstrs.
+// The main difference with something like a SetVector is that erasing an element doesn't
+// move all elements over one place - instead just nulls out the element of the vector.
+// FIXME: Does it make sense to factor out common code with the instcombinerWorkList?
+template<unsigned N>
+class GISelWorkList {
+  SmallVector<MachineInstr*, N> Worklist;
+  DenseMap<MachineInstr*, unsigned> WorklistMap;
+
+public:
+  GISelWorkList() = default;
+
+  bool empty() const { return WorklistMap.empty(); }
+
+  unsigned size() const { return WorklistMap.size(); }
+
+  /// Add - Add the specified instruction to the worklist if it isn't already
+  /// in it.
+  void insert(MachineInstr *I) {
+    if (WorklistMap.try_emplace(I, Worklist.size()).second) {
+      Worklist.push_back(I);
+    }
+  }
+
+  /// Remove - remove I from the worklist if it exists.
+  void remove(MachineInstr *I) {
+    auto It = WorklistMap.find(I);
+    if (It == WorklistMap.end()) return; // Not in worklist.
+
+    // Don't bother moving everything down, just null out the slot.
+    Worklist[It->second] = nullptr;
+
+    WorklistMap.erase(It);
+  }
+
+  MachineInstr *pop_back_val() {
+    MachineInstr *I;
+    do {
+      I = Worklist.pop_back_val();
+    } while(!I);
+    assert(I && "Pop back on empty worklist");
+    WorklistMap.erase(I);
+    return I;
+  }
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
new file mode 100644
index 0000000..7061c01
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -0,0 +1,444 @@
+//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the IRTranslator pass.
+/// This pass is responsible for translating LLVM IR into MachineInstr.
+/// It uses target hooks to lower the ABI but aside from that, the pass
+/// generated code is generic. This is the default translator used for
+/// GlobalISel.
+///
+/// \todo Replace the comments with actual doxygen comments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Types.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/Intrinsics.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class CallLowering;
+class Constant;
+class DataLayout;
+class Instruction;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class TargetPassConfig;
+class User;
+class Value;
+
+// Technically the pass should run on an hypothetical MachineModule,
+// since it should translate Global into some sort of MachineGlobal.
+// The MachineGlobal should ultimately just be a transfer of ownership of
+// the interesting bits that are relevant to represent a global value.
+// That being said, we could investigate what would it cost to just duplicate
+// the information from the LLVM IR.
+// The idea is that ultimately we would be able to free up the memory used
+// by the LLVM IR as soon as the translation is over.
+class IRTranslator : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+  /// Interface used to lower the everything related to calls.
+  const CallLowering *CLI;
+
+  /// Mapping of the values of the current LLVM IR function
+  /// to the related virtual registers.
+  ValueToVReg ValToVReg;
+
+  // N.b. it's not completely obvious that this will be sufficient for every
+  // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
+  // lives.
+  DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
+
+  // One BasicBlock can be translated to multiple MachineBasicBlocks.  For such
+  // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
+  // a mapping between the edges arriving at the BasicBlock to the corresponding
+  // created MachineBasicBlocks. Some BasicBlocks that get translated to a
+  // single MachineBasicBlock may also end up in this Map.
+  using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
+  DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
+
+  // List of stubbed PHI instructions, for values and basic blocks to be filled
+  // in once all MachineBasicBlocks have been created.
+  SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
+
+  /// Record of what frame index has been allocated to specified allocas for
+  /// this function.
+  DenseMap<const AllocaInst *, int> FrameIndices;
+
+  /// \name Methods for translating form LLVM IR to MachineInstr.
+  /// \see ::translate for general information on the translate methods.
+  /// @{
+
+  /// Translate \p Inst into its corresponding MachineInstr instruction(s).
+  /// Insert the newly translated instruction(s) right where the CurBuilder
+  /// is set.
+  ///
+  /// The general algorithm is:
+  /// 1. Look for a virtual register for each operand or
+  ///    create one.
+  /// 2 Update the ValToVReg accordingly.
+  /// 2.alt. For constant arguments, if they are compile time constants,
+  ///   produce an immediate in the right operand and do not touch
+  ///   ValToReg. Actually we will go with a virtual register for each
+  ///   constants because it may be expensive to actually materialize the
+  ///   constant. Moreover, if the constant spans on several instructions,
+  ///   CSE may not catch them.
+  ///   => Update ValToVReg and remember that we saw a constant in Constants.
+  ///   We will materialize all the constants in finalize.
+  /// Note: we would need to do something so that we can recognize such operand
+  ///       as constants.
+  /// 3. Create the generic instruction.
+  ///
+  /// \return true if the translation succeeded.
+  bool translate(const Instruction &Inst);
+
+  /// Materialize \p C into virtual-register \p Reg. The generic instructions
+  /// performing this materialization will be inserted into the entry block of
+  /// the function.
+  ///
+  /// \return true if the materialization succeeded.
+  bool translate(const Constant &C, unsigned Reg);
+
+  /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
+  /// emitted.
+  bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM load instruction into generic IR.
+  bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM store instruction into generic IR.
+  bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM string intrinsic (memcpy, memset, ...).
+  bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
+                        unsigned Intrinsic);
+
+  void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
+
+  bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
+                                  MachineIRBuilder &MIRBuilder);
+
+  bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
+                               MachineIRBuilder &MIRBuilder);
+
+  bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
+
+  /// Translate call instruction.
+  /// \pre \p U is a call instruction.
+  bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate one of LLVM's cast instructions into MachineInstrs, with the
+  /// given generic Opcode.
+  bool translateCast(unsigned Opcode, const User &U,
+                     MachineIRBuilder &MIRBuilder);
+
+  /// Translate a phi instruction.
+  bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate a comparison (icmp or fcmp) instruction or constant.
+  bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an integer compare instruction (or constant).
+  bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCompare(U, MIRBuilder);
+  }
+
+  /// Translate a floating-point compare instruction (or constant).
+  bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCompare(U, MIRBuilder);
+  }
+
+  /// Add remaining operands onto phis we've translated. Executed after all
+  /// MachineBasicBlocks for the function have been created.
+  void finishPendingPhis();
+
+  /// Translate \p Inst into a binary operation \p Opcode.
+  /// \pre \p U is a binary operation.
+  bool translateBinaryOp(unsigned Opcode, const User &U,
+                         MachineIRBuilder &MIRBuilder);
+
+  /// Translate branch (br) instruction.
+  /// \pre \p U is a branch instruction.
+  bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate return (ret) instruction.
+  /// The target needs to implement CallLowering::lowerReturn for
+  /// this to succeed.
+  /// \pre \p U is a return instruction.
+  bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
+  }
+  bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
+  }
+  bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
+  }
+  bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
+  }
+  bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
+  }
+  bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
+  }
+
+  bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
+  }
+  bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
+  }
+  bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
+  }
+  bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
+  }
+  bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
+  }
+  bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
+  }
+  bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
+  }
+  bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
+  }
+  bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
+  }
+  bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
+  }
+  bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
+  }
+  bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
+  }
+  bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
+  }
+  bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
+    return true;
+  }
+  bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
+  }
+
+  bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
+  }
+
+  bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
+  }
+  bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
+  }
+  bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
+  }
+
+  bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
+  }
+  bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
+  }
+  bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
+  }
+  bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
+  }
+
+  bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
+
+  // Stubs to keep the compiler happy while we implement the rest of the
+  // translation.
+  bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+
+  /// @}
+
+  // Builder for machine instruction a la IRBuilder.
+  // I.e., compared to regular MIBuilder, this one also inserts the instruction
+  // in the current block, it can creates block, etc., basically a kind of
+  // IRBuilder, but for Machine IR.
+  MachineIRBuilder CurBuilder;
+
+  // Builder set to the entry block (just after ABI lowering instructions). Used
+  // as a convenient location for Constants.
+  MachineIRBuilder EntryBuilder;
+
+  // The MachineFunction currently being translated.
+  MachineFunction *MF;
+
+  /// MachineRegisterInfo used to create virtual registers.
+  MachineRegisterInfo *MRI = nullptr;
+
+  const DataLayout *DL;
+
+  /// Current target configuration. Controls how the pass handles errors.
+  const TargetPassConfig *TPC;
+
+  /// Current optimization remark emitter. Used to report failures.
+  std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+  // * Insert all the code needed to materialize the constants
+  // at the proper place. E.g., Entry block or dominator block
+  // of each constant depending on how fancy we want to be.
+  // * Clear the different maps.
+  void finalizeFunction();
+
+  /// Get the VReg that represents \p Val.
+  /// If such VReg does not exist, it is created.
+  unsigned getOrCreateVReg(const Value &Val);
+
+  /// Get the frame index that represents \p Val.
+  /// If such VReg does not exist, it is created.
+  int getOrCreateFrameIndex(const AllocaInst &AI);
+
+  /// Get the alignment of the given memory operation instruction. This will
+  /// either be the explicitly specified value or the ABI-required alignment for
+  /// the type being accessed (according to the Module's DataLayout).
+  unsigned getMemOpAlignment(const Instruction &I);
+
+  /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
+  /// returned will be the head of the translated block (suitable for branch
+  /// destinations).
+  MachineBasicBlock &getMBB(const BasicBlock &BB);
+
+  /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
+  /// to `Edge.first` at the IR level. This is used when IRTranslation creates
+  /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
+  /// represented simply by the IR-level CFG.
+  void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
+
+  /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
+  /// this is just the single MachineBasicBlock corresponding to the predecessor
+  /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
+  /// preceding the original though (e.g. switch instructions).
+  SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
+    auto RemappedEdge = MachinePreds.find(Edge);
+    if (RemappedEdge != MachinePreds.end())
+      return RemappedEdge->second;
+    return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
+  }
+
+public:
+  // Ctor, nothing fancy.
+  IRTranslator();
+
+  StringRef getPassName() const override { return "IRTranslator"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  // Algo:
+  //   CallLowering = MF.subtarget.getCallLowering()
+  //   F = MF.getParent()
+  //   MIRBuilder.reset(MF)
+  //   getMBB(F.getEntryBB())
+  //   CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
+  //   for each bb in F
+  //     getMBB(bb)
+  //     for each inst in bb
+  //       if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
+  //         report_fatal_error("Don't know how to translate input");
+  //   finalize()
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
new file mode 100644
index 0000000..01521c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
@@ -0,0 +1,53 @@
+//== llvm/CodeGen/GlobalISel/InstructionSelect.h -----------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for selecting (possibly generic) machine instructions to
+/// target-specific instructions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+/// This pass is responsible for selecting generic machine instructions to
+/// target-specific instructions.  It relies on the InstructionSelector provided
+/// by the target.
+/// Selection is done by examining blocks in post-order, and instructions in
+/// reverse order.
+///
+/// \post for all inst in MF: not isPreISelGenericOpcode(inst.opcode)
+class InstructionSelect : public MachineFunctionPass {
+public:
+  static char ID;
+  StringRef getPassName() const override { return "InstructionSelect"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized)
+        .set(MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::Selected);
+  }
+
+  InstructionSelect();
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
new file mode 100644
index 0000000..eacd135
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -0,0 +1,381 @@
+//===- llvm/CodeGen/GlobalISel/InstructionSelector.h ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CodeGenCoverage.h"
+#include <bitset>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <vector>
+
+namespace llvm {
+
+class APInt;
+class APFloat;
+class LLT;
+class MachineInstr;
+class MachineInstrBuilder;
+class MachineFunction;
+class MachineOperand;
+class MachineRegisterInfo;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Container class for CodeGen predicate results.
+/// This is convenient because std::bitset does not have a constructor
+/// with an initializer list of set bits.
+///
+/// Each InstructionSelector subclass should define a PredicateBitset class
+/// with:
+///   const unsigned MAX_SUBTARGET_PREDICATES = 192;
+///   using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
+/// and updating the constant to suit the target. Tablegen provides a suitable
+/// definition for the predicates in use in <Target>GenGlobalISel.inc when
+/// GET_GLOBALISEL_PREDICATE_BITSET is defined.
+template <std::size_t MaxPredicates>
+class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
+public:
+  // Cannot inherit constructors because it's not supported by VC++..
+  PredicateBitsetImpl() = default;
+
+  PredicateBitsetImpl(const std::bitset<MaxPredicates> &B)
+      : std::bitset<MaxPredicates>(B) {}
+
+  PredicateBitsetImpl(std::initializer_list<unsigned> Init) {
+    for (auto I : Init)
+      std::bitset<MaxPredicates>::set(I);
+  }
+};
+
+enum {
+  /// Begin a try-block to attempt a match and jump to OnFail if it is
+  /// unsuccessful.
+  /// - OnFail - The MatchTable entry at which to resume if the match fails.
+  ///
+  /// FIXME: This ought to take an argument indicating the number of try-blocks
+  ///        to exit on failure. It's usually one but the last match attempt of
+  ///        a block will need more. The (implemented) alternative is to tack a
+  ///        GIM_Reject on the end of each try-block which is simpler but
+  ///        requires an extra opcode and iteration in the interpreter on each
+  ///        failed match.
+  GIM_Try,
+
+  /// Record the specified instruction
+  /// - NewInsnID - Instruction ID to define
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_RecordInsn,
+
+  /// Check the feature bits
+  /// - Expected features
+  GIM_CheckFeatures,
+
+  /// Check the opcode on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - Expected opcode
+  GIM_CheckOpcode,
+  /// Check the instruction has the right number of operands
+  /// - InsnID - Instruction ID
+  /// - Expected number of operands
+  GIM_CheckNumOperands,
+  /// Check an immediate predicate on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckI64ImmPredicate,
+  /// Check an immediate predicate on the specified instruction via an APInt.
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckAPIntImmPredicate,
+  /// Check a floating point immediate predicate on the specified instruction.
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckAPFloatImmPredicate,
+  /// Check a memory operation has the specified atomic ordering.
+  /// - InsnID - Instruction ID
+  /// - Ordering - The AtomicOrdering value
+  GIM_CheckAtomicOrdering,
+  GIM_CheckAtomicOrderingOrStrongerThan,
+  GIM_CheckAtomicOrderingWeakerThan,
+
+  /// Check the type for the specified operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected type
+  GIM_CheckType,
+  /// Check the type of a pointer to any address space.
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - SizeInBits - The size of the pointer value in bits.
+  GIM_CheckPointerToAny,
+  /// Check the register bank for the specified operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected register bank (specified as a register class)
+  GIM_CheckRegBankForClass,
+  /// Check the operand matches a complex predicate
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - RendererID - The renderer to hold the result
+  /// - Complex predicate ID
+  GIM_CheckComplexPattern,
+  /// Check the operand is a specific integer
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected integer
+  GIM_CheckConstantInt,
+  /// Check the operand is a specific literal integer (i.e. MO.isImm() or
+  /// MO.isCImm() is true).
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected integer
+  GIM_CheckLiteralInt,
+  /// Check the operand is a specific intrinsic ID
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected Intrinsic ID
+  GIM_CheckIntrinsicID,
+  /// Check the specified operand is an MBB
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_CheckIsMBB,
+
+  /// Check if the specified operand is safe to fold into the current
+  /// instruction.
+  /// - InsnID - Instruction ID
+  GIM_CheckIsSafeToFold,
+
+  /// Check the specified operands are identical.
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - OtherInsnID - Other instruction ID
+  /// - OtherOpIdx - Other operand index
+  GIM_CheckIsSameOperand,
+
+  /// Fail the current try-block, or completely fail to match if there is no
+  /// current try-block.
+  GIM_Reject,
+
+  //=== Renderers ===
+
+  /// Mutate an instruction
+  /// - NewInsnID - Instruction ID to define
+  /// - OldInsnID - Instruction ID to mutate
+  /// - NewOpcode - The new opcode to use
+  GIR_MutateOpcode,
+  /// Build a new instruction
+  /// - InsnID - Instruction ID to define
+  /// - Opcode - The new opcode to use
+  GIR_BuildMI,
+
+  /// Copy an operand to the specified instruction
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  GIR_Copy,
+  /// Copy an operand to the specified instruction or add a zero register if the
+  /// operand is a zero immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  /// - ZeroReg - The zero register to use
+  GIR_CopyOrAddZeroReg,
+  /// Copy an operand to the specified instruction
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  /// - SubRegIdx - The subregister to copy
+  GIR_CopySubReg,
+  /// Add an implicit register def to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddImplicitDef,
+  /// Add an implicit register use to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddImplicitUse,
+  /// Add an register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddRegister,
+  /// Add a temporary register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - TempRegID - The temporary register ID to add
+  GIR_AddTempRegister,
+  /// Add an immediate to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - Imm - The immediate to add
+  GIR_AddImm,
+  /// Render complex operands to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RendererID - The renderer to call
+  GIR_ComplexRenderer,
+  /// Render sub-operands of complex operands to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RendererID - The renderer to call
+  /// - RenderOpID - The suboperand to render.
+  GIR_ComplexSubOperandRenderer,
+  /// Render operands to the specified instruction using a custom function
+  /// - InsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to get the matched operand from
+  /// - RendererFnID - Custom renderer function to call
+  GIR_CustomRenderer,
+
+  /// Render a G_CONSTANT operator as a sign-extended immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// The operand index is implicitly 1.
+  GIR_CopyConstantAsSImm,
+
+  /// Constrain an instruction operand to a register class.
+  /// - InsnID - Instruction ID to modify
+  /// - OpIdx - Operand index
+  /// - RCEnum - Register class enumeration value
+  GIR_ConstrainOperandRC,
+  /// Constrain an instructions operands according to the instruction
+  /// description.
+  /// - InsnID - Instruction ID to modify
+  GIR_ConstrainSelectedInstOperands,
+  /// Merge all memory operands into instruction.
+  /// - InsnID - Instruction ID to modify
+  /// - MergeInsnID... - One or more Instruction ID to merge into the result.
+  /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
+  ///                                    merge.
+  GIR_MergeMemOperands,
+  /// Erase from parent.
+  /// - InsnID - Instruction ID to erase
+  GIR_EraseFromParent,
+  /// Create a new temporary register that's not constrained.
+  /// - TempRegID - The temporary register ID to initialize.
+  /// - Expected type
+  GIR_MakeTempReg,
+
+  /// A successful emission
+  GIR_Done,
+
+  /// Increment the rule coverage counter.
+  /// - RuleID - The ID of the rule that was covered.
+  GIR_Coverage,
+};
+
+enum {
+  /// Indicates the end of the variable-length MergeInsnID list in a
+  /// GIR_MergeMemOperands opcode.
+  GIU_MergeMemOperands_EndOfList = -1,
+};
+
+/// Provides the logic to select generic machine instructions.
+class InstructionSelector {
+public:
+  virtual ~InstructionSelector() = default;
+
+  /// Select the (possibly generic) instruction \p I to only use target-specific
+  /// opcodes. It is OK to insert multiple instructions, but they cannot be
+  /// generic pre-isel instructions.
+  ///
+  /// \returns whether selection succeeded.
+  /// \pre  I.getParent() && I.getParent()->getParent()
+  /// \post
+  ///   if returns true:
+  ///     for I in all mutated/inserted instructions:
+  ///       !isPreISelGenericOpcode(I.getOpcode())
+  virtual bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const = 0;
+
+protected:
+  using ComplexRendererFns =
+      Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
+  using RecordedMIVector = SmallVector<MachineInstr *, 4>;
+  using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
+
+  struct MatcherState {
+    std::vector<ComplexRendererFns::value_type> Renderers;
+    RecordedMIVector MIs;
+    DenseMap<unsigned, unsigned> TempRegisters;
+
+    MatcherState(unsigned MaxRenderers);
+  };
+
+public:
+  template <class PredicateBitset, class ComplexMatcherMemFn,
+            class CustomRendererFn>
+  struct ISelInfoTy {
+    const LLT *TypeObjects;
+    const PredicateBitset *FeatureBitsets;
+    const ComplexMatcherMemFn *ComplexPredicates;
+    const CustomRendererFn *CustomRenderers;
+  };
+
+protected:
+  InstructionSelector();
+
+  /// Execute a given matcher table and return true if the match was successful
+  /// and false otherwise.
+  template <class TgtInstructionSelector, class PredicateBitset,
+            class ComplexMatcherMemFn, class CustomRendererFn>
+  bool executeMatchTable(
+      TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+      const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+          &ISelInfo,
+      const int64_t *MatchTable, const TargetInstrInfo &TII,
+      MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+      const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+      CodeGenCoverage &CoverageInfo) const;
+
+  virtual bool testImmPredicate_I64(unsigned, int64_t) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+  virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+  virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+
+  /// Constrain a register operand of an instruction \p I to a specified
+  /// register class. This could involve inserting COPYs before (for uses) or
+  /// after (for defs) and may replace the operand of \p I.
+  /// \returns whether operand regclass constraining succeeded.
+  bool constrainOperandRegToRegClass(MachineInstr &I, unsigned OpIdx,
+                                     const TargetRegisterClass &RC,
+                                     const TargetInstrInfo &TII,
+                                     const TargetRegisterInfo &TRI,
+                                     const RegisterBankInfo &RBI) const;
+
+  bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
+                         const MachineRegisterInfo &MRI) const;
+
+  /// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
+  /// right-hand side. GlobalISel's separation of pointer and integer types
+  /// means that we don't need to worry about G_OR with equivalent semantics.
+  bool isBaseWithConstantOffset(const MachineOperand &Root,
+                                const MachineRegisterInfo &MRI) const;
+
+  /// Return true if MI can obviously be folded into IntoMI.
+  /// MI and IntoMI do not need to be in the same basic blocks, but MI must
+  /// preceed IntoMI.
+  bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
new file mode 100644
index 0000000..f7593ba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -0,0 +1,769 @@
+//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+/// GlobalISel PatFrag Predicates
+enum {
+  GIPFP_I64_Invalid = 0,
+  GIPFP_APInt_Invalid = 0,
+  GIPFP_APFloat_Invalid = 0,
+};
+
+template <class TgtInstructionSelector, class PredicateBitset,
+          class ComplexMatcherMemFn, class CustomRendererFn>
+bool InstructionSelector::executeMatchTable(
+    TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+    const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+        &ISelInfo,
+    const int64_t *MatchTable, const TargetInstrInfo &TII,
+    MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+    const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+    CodeGenCoverage &CoverageInfo) const {
+  uint64_t CurrentIdx = 0;
+  SmallVector<uint64_t, 8> OnFailResumeAt;
+
+  enum RejectAction { RejectAndGiveUp, RejectAndResume };
+  auto handleReject = [&]() -> RejectAction {
+    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                    dbgs() << CurrentIdx << ": Rejected\n");
+    if (OnFailResumeAt.empty())
+      return RejectAndGiveUp;
+    CurrentIdx = OnFailResumeAt.back();
+    OnFailResumeAt.pop_back();
+    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                    dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
+                           << OnFailResumeAt.size() << " try-blocks remain)\n");
+    return RejectAndResume;
+  };
+
+  while (true) {
+    assert(CurrentIdx != ~0u && "Invalid MatchTable index");
+    switch (MatchTable[CurrentIdx++]) {
+    case GIM_Try: {
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": Begin try-block\n");
+      OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
+      break;
+    }
+
+    case GIM_RecordInsn: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+
+      // As an optimisation we require that MIs[0] is always the root. Refuse
+      // any attempt to modify it.
+      assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg()) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Not a register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+      if (TRI.isPhysicalRegister(MO.getReg())) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Is a physical register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineInstr *NewMI = MRI.getVRegDef(MO.getReg());
+      if ((size_t)NewInsnID < State.MIs.size())
+        State.MIs[NewInsnID] = NewMI;
+      else {
+        assert((size_t)NewInsnID == State.MIs.size() &&
+               "Expected to store MIs in order");
+        State.MIs.push_back(NewMI);
+      }
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": MIs[" << NewInsnID
+                             << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
+                             << ")\n");
+      break;
+    }
+
+    case GIM_CheckFeatures: {
+      int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckFeatures(ExpectedBitsetID="
+                             << ExpectedBitsetID << ")\n");
+      if ((AvailableFeatures & ISelInfo.FeatureBitsets[ExpectedBitsetID]) !=
+          ISelInfo.FeatureBitsets[ExpectedBitsetID]) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckOpcode: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Expected = MatchTable[CurrentIdx++];
+
+      unsigned Opcode = State.MIs[InsnID]->getOpcode();
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+                             << "], ExpectedOpcode=" << Expected
+                             << ") // Got=" << Opcode << "\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (Opcode != Expected) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckNumOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Expected = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
+                             << InsnID << "], Expected=" << Expected << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (State.MIs[InsnID]->getNumOperands() != Expected) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckI64ImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckI64ImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+             "Expected G_CONSTANT");
+      assert(Predicate > GIPFP_I64_Invalid && "Expected a valid predicate");
+      int64_t Value = 0;
+      if (State.MIs[InsnID]->getOperand(1).isCImm())
+        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getSExtValue();
+      else if (State.MIs[InsnID]->getOperand(1).isImm())
+        Value = State.MIs[InsnID]->getOperand(1).getImm();
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+
+      if (!testImmPredicate_I64(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAPIntImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() && "Expected G_CONSTANT");
+      assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
+      APInt Value;
+      if (State.MIs[InsnID]->getOperand(1).isCImm())
+        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+
+      if (!testImmPredicate_APInt(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAPFloatImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+             "Expected G_FCONSTANT");
+      assert(State.MIs[InsnID]->getOperand(1).isFPImm() && "Expected FPImm operand");
+      assert(Predicate > GIPFP_APFloat_Invalid && "Expected a valid predicate");
+      APFloat Value = State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
+
+      if (!testImmPredicate_APFloat(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAtomicOrdering: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (MMO->getOrdering() != Ordering)
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckAtomicOrderingOrStrongerThan: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (!isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckAtomicOrderingWeakerThan: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (!isStrongerThan(Ordering, MMO->getOrdering()))
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckType: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t TypeID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx
+                             << "), TypeID=" << TypeID << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg() ||
+          MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckPointerToAny: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t SizeInBits = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), SizeInBits=" << SizeInBits << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      // iPTR must be looked up in the target.
+      if (SizeInBits == 0) {
+        MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
+        SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
+      }
+
+      assert(SizeInBits != 0 && "Pointer size must be known");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (MO.isReg()) {
+        const LLT &Ty = MRI.getType(MO.getReg());
+        if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      } else if (handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
+    case GIM_CheckRegBankForClass: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RCEnum = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), RCEnum=" << RCEnum << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg() ||
+          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
+              RBI.getRegBank(MO.getReg(), MRI, TRI)) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckComplexPattern: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
+                             << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx
+                             << "), ComplexPredicateID=" << ComplexPredicateID
+                             << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      // FIXME: Use std::invoke() when it's available.
+      ComplexRendererFns Renderer =
+          (ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
+              State.MIs[InsnID]->getOperand(OpIdx));
+      if (Renderer.hasValue())
+        State.Renderers[RendererID] = Renderer.getValue();
+      else
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+
+    case GIM_CheckConstantInt: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (MO.isReg()) {
+        // isOperandImmEqual() will sign-extend to 64-bits, so should we.
+        LLT Ty = MRI.getType(MO.getReg());
+        Value = SignExtend64(Value, Ty.getSizeInBits());
+
+        if (!isOperandImmEqual(MO, Value, MRI)) {
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+        }
+      } else if (handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
+
+    case GIM_CheckLiteralInt: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isCImm() || !MO.getCImm()->equalsInt(Value)) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckIntrinsicID: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+
+    case GIM_CheckIsMBB: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx << "))\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckIsSafeToFold: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
+                             << InsnID << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckIsSameOperand: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t OtherInsnID = MatchTable[CurrentIdx++];
+      int64_t OtherOpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
+                             << InsnID << "][" << OpIdx << "], MIs["
+                             << OtherInsnID << "][" << OtherOpIdx << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isIdenticalTo(
+              State.MIs[OtherInsnID]->getOperand(OtherOpIdx))) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_Reject:
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_Reject");
+      if (handleReject() == RejectAndGiveUp)
+        return false;
+      break;
+
+    case GIR_MutateOpcode: {
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      uint64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t NewOpcode = MatchTable[CurrentIdx++];
+      if (NewInsnID >= OutMIs.size())
+        OutMIs.resize(NewInsnID + 1);
+
+      OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
+                                              State.MIs[OldInsnID]);
+      OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << NewOpcode << ")\n");
+      break;
+    }
+
+    case GIR_BuildMI: {
+      uint64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t Opcode = MatchTable[CurrentIdx++];
+      if (NewInsnID >= OutMIs.size())
+        OutMIs.resize(NewInsnID + 1);
+
+      OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+                                  State.MIs[0]->getDebugLoc(), TII.get(Opcode));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
+                             << NewInsnID << "], " << Opcode << ")\n");
+      break;
+    }
+
+    case GIR_Copy: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
+                          << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
+      break;
+    }
+
+    case GIR_CopyOrAddZeroReg: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t ZeroReg = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
+      if (isOperandImmEqual(MO, 0, MRI))
+        OutMIs[NewInsnID].addReg(ZeroReg);
+      else
+        OutMIs[NewInsnID].add(MO);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << OpIdx << ", " << ZeroReg << ")\n");
+      break;
+    }
+
+    case GIR_CopySubReg: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t SubRegIdx = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+                               0, SubRegIdx);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << OpIdx << ", " << SubRegIdx << ")\n");
+      break;
+    }
+
+    case GIR_AddImplicitDef: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddImplicitUse: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddRegister: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addReg(RegNum);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddTempRegister: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t TempRegID = MatchTable[CurrentIdx++];
+      uint64_t TempRegFlags = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
+                             << InsnID << "], TempRegisters[" << TempRegID
+                             << "], " << TempRegFlags << ")\n");
+      break;
+    }
+
+    case GIR_AddImm: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Imm = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addImm(Imm);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
+                             << "], " << Imm << ")\n");
+      break;
+    }
+
+    case GIR_ComplexRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      for (const auto &RenderOpFn : State.Renderers[RendererID])
+        RenderOpFn(OutMIs[InsnID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
+                             << InsnID << "], " << RendererID << ")\n");
+      break;
+    }
+    case GIR_ComplexSubOperandRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      int64_t RenderOpID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIR_ComplexSubOperandRenderer(OutMIs["
+                             << InsnID << "], " << RendererID << ", "
+                             << RenderOpID << ")\n");
+      break;
+    }
+
+    case GIR_CopyConstantAsSImm: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
+      if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
+        OutMIs[NewInsnID].addImm(
+            State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
+      } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
+        OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+      break;
+    }
+
+    case GIR_CustomRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t RendererFnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
+                             << InsnID << "], MIs[" << OldInsnID << "], "
+                             << RendererFnID << ")\n");
+      (ISel.*ISelInfo.CustomRenderers[RendererFnID])(OutMIs[InsnID],
+                                                     *State.MIs[OldInsnID]);
+      break;
+    }
+    case GIR_ConstrainOperandRC: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RCEnum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
+                                    *TRI.getRegClass(RCEnum), TII, TRI, RBI);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
+                             << InsnID << "], " << OpIdx << ", " << RCEnum
+                             << ")\n");
+      break;
+    }
+
+    case GIR_ConstrainSelectedInstOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+                                       RBI);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIR_ConstrainSelectedInstOperands(OutMIs["
+                             << InsnID << "])\n");
+      break;
+    }
+
+    case GIR_MergeMemOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
+                             << InsnID << "]");
+      int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
+      while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
+             GIU_MergeMemOperands_EndOfList) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << ", MIs[" << MergeInsnID << "]");
+        for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
+          OutMIs[InsnID].addMemOperand(MMO);
+      }
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << ")\n");
+      break;
+    }
+
+    case GIR_EraseFromParent: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(State.MIs[InsnID] &&
+             "Attempted to erase an undefined instruction");
+      State.MIs[InsnID]->eraseFromParent();
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
+                             << InsnID << "])\n");
+      break;
+    }
+
+    case GIR_MakeTempReg: {
+      int64_t TempRegID = MatchTable[CurrentIdx++];
+      int64_t TypeID = MatchTable[CurrentIdx++];
+
+      State.TempRegisters[TempRegID] =
+          MRI.createGenericVirtualRegister(ISelInfo.TypeObjects[TypeID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
+                             << "] = GIR_MakeTempReg(" << TypeID << ")\n");
+      break;
+    }
+
+    case GIR_Coverage: {
+      int64_t RuleID = MatchTable[CurrentIdx++];
+      CoverageInfo.setCovered(RuleID);
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIR_Coverage(" << RuleID << ")");
+      break;
+    }
+
+    case GIR_Done:
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_Done");
+      return true;
+
+    default:
+      llvm_unreachable("Unexpected command");
+    }
+  }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
new file mode 100644
index 0000000..a1f564b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -0,0 +1,287 @@
+//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h --===========//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file contains some helper functions which try to cleanup artifacts
+// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
+// the types match. This file also contains some combines of merges that happens
+// at the end of the legalization.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "legalizer"
+
+namespace llvm {
+class LegalizationArtifactCombiner {
+  MachineIRBuilder &Builder;
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+
+public:
+  LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+                    const LegalizerInfo &LI)
+      : Builder(B), MRI(MRI), LI(LI) {}
+
+  bool tryCombineAnyExt(MachineInstr &MI,
+                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    if (MI.getOpcode() != TargetOpcode::G_ANYEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      unsigned DstReg = MI.getOperand(0).getReg();
+      unsigned SrcReg = DefMI->getOperand(1).getReg();
+      Builder.setInstr(MI);
+      // We get a copy/trunc/extend depending on the sizes
+      Builder.buildAnyExtOrTrunc(DstReg, SrcReg);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  bool tryCombineZExt(MachineInstr &MI,
+                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_ZEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      Builder.setInstr(MI);
+      unsigned ZExtSrc = MI.getOperand(1).getReg();
+      LLT ZExtSrcTy = MRI.getType(ZExtSrc);
+      APInt Mask = APInt::getAllOnesValue(ZExtSrcTy.getSizeInBits());
+      auto MaskCstMIB = Builder.buildConstant(DstTy, Mask.getZExtValue());
+      unsigned TruncSrc = DefMI->getOperand(1).getReg();
+      // We get a copy/trunc/extend depending on the sizes
+      auto SrcCopyOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+      Builder.buildAnd(DstReg, SrcCopyOrTrunc, MaskCstMIB);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  bool tryCombineSExt(MachineInstr &MI,
+                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_SEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      Builder.setInstr(MI);
+      unsigned SExtSrc = MI.getOperand(1).getReg();
+      LLT SExtSrcTy = MRI.getType(SExtSrc);
+      unsigned SizeDiff = DstTy.getSizeInBits() - SExtSrcTy.getSizeInBits();
+      auto SizeDiffMIB = Builder.buildConstant(DstTy, SizeDiff);
+      unsigned TruncSrcReg = DefMI->getOperand(1).getReg();
+      // We get a copy/trunc/extend depending on the sizes
+      auto SrcCopyExtOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrcReg);
+      auto ShlMIB = Builder.buildInstr(TargetOpcode::G_SHL, DstTy,
+                                       SrcCopyExtOrTrunc, SizeDiffMIB);
+      Builder.buildInstr(TargetOpcode::G_ASHR, DstReg, ShlMIB, SizeDiffMIB);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  /// Try to fold sb = EXTEND (G_IMPLICIT_DEF sa) -> sb = G_IMPLICIT_DEF
+  bool tryFoldImplicitDef(MachineInstr &MI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    unsigned Opcode = MI.getOpcode();
+    if (Opcode != TargetOpcode::G_ANYEXT && Opcode != TargetOpcode::G_ZEXT &&
+        Opcode != TargetOpcode::G_SEXT)
+      return false;
+
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+      Builder.setInstr(MI);
+      Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return false;
+  }
+
+  bool tryCombineMerges(MachineInstr &MI,
+                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+      return false;
+
+    unsigned NumDefs = MI.getNumOperands() - 1;
+    unsigned SrcReg = MI.getOperand(NumDefs).getReg();
+    MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
+    if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES))
+      return false;
+
+    const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
+
+    if (NumMergeRegs < NumDefs) {
+      if (NumDefs % NumMergeRegs != 0)
+        return false;
+
+      Builder.setInstr(MI);
+      // Transform to UNMERGEs, for example
+      //   %1 = G_MERGE_VALUES %4, %5
+      //   %9, %10, %11, %12 = G_UNMERGE_VALUES %1
+      // to
+      //   %9, %10 = G_UNMERGE_VALUES %4
+      //   %11, %12 = G_UNMERGE_VALUES %5
+
+      const unsigned NewNumDefs = NumDefs / NumMergeRegs;
+      for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
+        SmallVector<unsigned, 2> DstRegs;
+        for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
+             ++j, ++DefIdx)
+          DstRegs.push_back(MI.getOperand(DefIdx).getReg());
+
+        Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+      }
+
+    } else if (NumMergeRegs > NumDefs) {
+      if (NumMergeRegs % NumDefs != 0)
+        return false;
+
+      Builder.setInstr(MI);
+      // Transform to MERGEs
+      //   %6 = G_MERGE_VALUES %17, %18, %19, %20
+      //   %7, %8 = G_UNMERGE_VALUES %6
+      // to
+      //   %7 = G_MERGE_VALUES %17, %18
+      //   %8 = G_MERGE_VALUES %19, %20
+
+      const unsigned NumRegs = NumMergeRegs / NumDefs;
+      for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
+        SmallVector<unsigned, 2> Regs;
+        for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
+             ++j, ++Idx)
+          Regs.push_back(MergeI->getOperand(Idx).getReg());
+
+        Builder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs);
+      }
+
+    } else {
+      // FIXME: is a COPY appropriate if the types mismatch? We know both
+      // registers are allocatable by now.
+      if (MRI.getType(MI.getOperand(0).getReg()) !=
+          MRI.getType(MergeI->getOperand(1).getReg()))
+        return false;
+
+      for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
+        MRI.replaceRegWith(MI.getOperand(Idx).getReg(),
+                           MergeI->getOperand(Idx + 1).getReg());
+    }
+
+    markInstAndDefDead(MI, *MergeI, DeadInsts);
+    return true;
+  }
+
+  /// Try to combine away MI.
+  /// Returns true if it combined away the MI.
+  /// Adds instructions that are dead as a result of the combine
+  /// into DeadInsts, which can include MI.
+  bool tryCombineInstruction(MachineInstr &MI,
+                             SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    switch (MI.getOpcode()) {
+    default:
+      return false;
+    case TargetOpcode::G_ANYEXT:
+      return tryCombineAnyExt(MI, DeadInsts);
+    case TargetOpcode::G_ZEXT:
+      return tryCombineZExt(MI, DeadInsts);
+    case TargetOpcode::G_SEXT:
+      return tryCombineSExt(MI, DeadInsts);
+    case TargetOpcode::G_UNMERGE_VALUES:
+      return tryCombineMerges(MI, DeadInsts);
+    case TargetOpcode::G_TRUNC: {
+      bool Changed = false;
+      for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg()))
+        Changed |= tryCombineInstruction(Use, DeadInsts);
+      return Changed;
+    }
+    }
+  }
+
+private:
+  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
+  /// dead due to MI being killed, then mark DefMI as dead too.
+  /// Some of the combines (extends(trunc)), try to walk through redundant
+  /// copies in between the extends and the truncs, and this attempts to collect
+  /// the in between copies if they're dead.
+  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    DeadInsts.push_back(&MI);
+
+    // Collect all the copy instructions that are made dead, due to deleting
+    // this instruction. Collect all of them until the Trunc(DefMI).
+    // Eg,
+    // %1(s1) = G_TRUNC %0(s32)
+    // %2(s1) = COPY %1(s1)
+    // %3(s1) = COPY %2(s1)
+    // %4(s32) = G_ANYEXT %3(s1)
+    // In this case, we would have replaced %4 with a copy of %0,
+    // and as a result, %3, %2, %1 are dead.
+    MachineInstr *PrevMI = &MI;
+    while (PrevMI != &DefMI) {
+      // If we're dealing with G_UNMERGE_VALUES, tryCombineMerges doesn't really try
+      // to fold copies in between and we can ignore them here.
+      if (PrevMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
+        break;
+      unsigned PrevRegSrc = PrevMI->getOperand(1).getReg();
+      MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
+      if (MRI.hasOneUse(PrevRegSrc)) {
+        if (TmpDef != &DefMI) {
+          assert(TmpDef->getOpcode() == TargetOpcode::COPY &&
+                 "Expecting copy here");
+          DeadInsts.push_back(TmpDef);
+        }
+      } else
+        break;
+      PrevMI = TmpDef;
+    }
+    if ((PrevMI == &DefMI ||
+         DefMI.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
+        MRI.hasOneUse(DefMI.getOperand(0).getReg()))
+      DeadInsts.push_back(&DefMI);
+  }
+
+  /// Checks if the target legalizer info has specified anything about the
+  /// instruction, or if unsupported.
+  bool isInstUnsupported(const LegalityQuery &Query) const {
+    using namespace LegalizeActions;
+    auto Step = LI.getAction(Query);
+    return Step.Action == Unsupported || Step.Action == NotFound;
+  }
+};
+
+} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
new file mode 100644
index 0000000..8284ab6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -0,0 +1,65 @@
+//== llvm/CodeGen/GlobalISel/LegalizePass.h ------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizeHelper class is where most of the work happens, and is designed
+/// to be callable from other passes that find themselves with an illegal
+/// instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+
+class Legalizer : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+public:
+  // Ctor, nothing fancy.
+  Legalizer();
+
+  StringRef getPassName() const override { return "Legalizer"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::IsSSA);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::Legalized);
+  }
+
+  bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
+                       const TargetInstrInfo &TII);
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
new file mode 100644
index 0000000..8bd8a9d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -0,0 +1,115 @@
+//== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizerHelper class is where most of the work happens, and is
+/// designed to be callable from other passes that find themselves with an
+/// illegal instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+namespace llvm {
+// Forward declarations.
+class LegalizerInfo;
+class Legalizer;
+class MachineRegisterInfo;
+
+class LegalizerHelper {
+public:
+  enum LegalizeResult {
+    /// Instruction was already legal and no change was made to the
+    /// MachineFunction.
+    AlreadyLegal,
+
+    /// Instruction has been legalized and the MachineFunction changed.
+    Legalized,
+
+    /// Some kind of error has occurred and we could not legalize this
+    /// instruction.
+    UnableToLegalize,
+  };
+
+  LegalizerHelper(MachineFunction &MF);
+
+  /// Replace \p MI by a sequence of legal instructions that can implement the
+  /// same operation. Note that this means \p MI may be deleted, so any iterator
+  /// steps should be performed before calling this function. \p Helper should
+  /// be initialized to the MachineFunction containing \p MI.
+  ///
+  /// Considered as an opaque blob, the legal code will use and define the same
+  /// registers as \p MI.
+  LegalizeResult legalizeInstrStep(MachineInstr &MI);
+
+  /// Legalize an instruction by emiting a runtime library call instead.
+  LegalizeResult libcall(MachineInstr &MI);
+
+  /// Legalize an instruction by reducing the width of the underlying scalar
+  /// type.
+  LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+  /// Legalize an instruction by performing the operation on a wider scalar type
+  /// (for example a 16-bit addition can be safely performed at 32-bits
+  /// precision, ignoring the unused bits).
+  LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+
+  /// Legalize an instruction by splitting it into simpler parts, hopefully
+  /// understood by the target.
+  LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+  /// Legalize a vector instruction by splitting into multiple components, each
+  /// acting on the same scalar type as the original but with fewer elements.
+  LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
+                                     LLT NarrowTy);
+
+  /// Legalize a vector instruction by increasing the number of vector elements
+  /// involved and ignoring the added elements later.
+  LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
+                                    LLT WideTy);
+
+  /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
+  /// functions
+  MachineIRBuilder MIRBuilder;
+
+  /// Expose LegalizerInfo so the clients can re-use.
+  const LegalizerInfo &getLegalizerInfo() const { return LI; }
+
+private:
+
+  /// Helper function to split a wide generic register into bitwise blocks with
+  /// the given Type (which implies the number of blocks needed). The generic
+  /// registers created are appended to Ops, starting at bit 0 of Reg.
+  void extractParts(unsigned Reg, LLT Ty, int NumParts,
+                    SmallVectorImpl<unsigned> &Ops);
+
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+};
+
+/// Helper function that creates the given libcall.
+LegalizerHelper::LegalizeResult
+createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+              const CallLowering::ArgInfo &Result,
+              ArrayRef<CallLowering::ArgInfo> Args);
+
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
new file mode 100644
index 0000000..117c791
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -0,0 +1,920 @@
+//===- llvm/CodeGen/GlobalISel/LegalizerInfo.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations they can successfully
+/// select and how the others should be expanded most efficiently.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cassert>
+#include <cstdint>
+#include <tuple>
+#include <unordered_map>
+#include <utility>
+
+namespace llvm {
+
+extern cl::opt<bool> DisableGISelLegalityCheck;
+
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+
+namespace LegalizeActions {
+enum LegalizeAction : std::uint8_t {
+  /// The operation is expected to be selectable directly by the target, and
+  /// no transformation is necessary.
+  Legal,
+
+  /// The operation should be synthesized from multiple instructions acting on
+  /// a narrower scalar base-type. For example a 64-bit add might be
+  /// implemented in terms of 32-bit add-with-carry.
+  NarrowScalar,
+
+  /// The operation should be implemented in terms of a wider scalar
+  /// base-type. For example a <2 x s8> add could be implemented as a <2
+  /// x s32> add (ignoring the high bits).
+  WidenScalar,
+
+  /// The (vector) operation should be implemented by splitting it into
+  /// sub-vectors where the operation is legal. For example a <8 x s64> add
+  /// might be implemented as 4 separate <2 x s64> adds.
+  FewerElements,
+
+  /// The (vector) operation should be implemented by widening the input
+  /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
+  /// rarely legal, but you might perform an <8 x i8> and then only look at
+  /// the first two results.
+  MoreElements,
+
+  /// The operation itself must be expressed in terms of simpler actions on
+  /// this target. E.g. a SREM replaced by an SDIV and subtraction.
+  Lower,
+
+  /// The operation should be implemented as a call to some kind of runtime
+  /// support library. For example this usually happens on machines that don't
+  /// support floating-point operations natively.
+  Libcall,
+
+  /// The target wants to do something special with this combination of
+  /// operand and type. A callback will be issued when it is needed.
+  Custom,
+
+  /// This operation is completely unsupported on the target. A programming
+  /// error has occurred.
+  Unsupported,
+
+  /// Sentinel value for when no action was found in the specified table.
+  NotFound,
+
+  /// Fall back onto the old rules.
+  /// TODO: Remove this once we've migrated
+  UseLegacyRules,
+};
+} // end namespace LegalizeActions
+
+using LegalizeActions::LegalizeAction;
+
+/// Legalization is decided based on an instruction's opcode, which type slot
+/// we're considering, and what the existing type is. These aspects are gathered
+/// together for convenience in the InstrAspect class.
+struct InstrAspect {
+  unsigned Opcode;
+  unsigned Idx = 0;
+  LLT Type;
+
+  InstrAspect(unsigned Opcode, LLT Type) : Opcode(Opcode), Type(Type) {}
+  InstrAspect(unsigned Opcode, unsigned Idx, LLT Type)
+      : Opcode(Opcode), Idx(Idx), Type(Type) {}
+
+  bool operator==(const InstrAspect &RHS) const {
+    return Opcode == RHS.Opcode && Idx == RHS.Idx && Type == RHS.Type;
+  }
+};
+
+/// The LegalityQuery object bundles together all the information that's needed
+/// to decide whether a given operation is legal or not.
+/// For efficiency, it doesn't make a copy of Types so care must be taken not
+/// to free it before using the query.
+struct LegalityQuery {
+  unsigned Opcode;
+  ArrayRef<LLT> Types;
+
+  raw_ostream &print(raw_ostream &OS) const;
+};
+
+/// The result of a query. It either indicates a final answer of Legal or
+/// Unsupported or describes an action that must be taken to make an operation
+/// more legal.
+struct LegalizeActionStep {
+  /// The action to take or the final answer.
+  LegalizeAction Action;
+  /// If describing an action, the type index to change. Otherwise zero.
+  unsigned TypeIdx;
+  /// If describing an action, the new type for TypeIdx. Otherwise LLT{}.
+  LLT NewType;
+
+  LegalizeActionStep(LegalizeAction Action, unsigned TypeIdx,
+                     const LLT &NewType)
+      : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}
+
+  bool operator==(const LegalizeActionStep &RHS) const {
+    return std::tie(Action, TypeIdx, NewType) ==
+        std::tie(RHS.Action, RHS.TypeIdx, RHS.NewType);
+  }
+};
+
+using LegalityPredicate = std::function<bool (const LegalityQuery &)>;
+using LegalizeMutation =
+    std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
+
+namespace LegalityPredicates {
+/// True iff P0 and P1 are true.
+LegalityPredicate all(LegalityPredicate P0, LegalityPredicate P1);
+/// True iff the given type index is one of the specified types.
+LegalityPredicate typeInSet(unsigned TypeIdx,
+                            std::initializer_list<LLT> TypesInit);
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate
+typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
+              std::initializer_list<std::pair<LLT, LLT>> TypesInit);
+/// True iff the specified type index is a scalar.
+LegalityPredicate isScalar(unsigned TypeIdx);
+/// True iff the specified type index is a scalar that's narrower than the given
+/// size.
+LegalityPredicate narrowerThan(unsigned TypeIdx, unsigned Size);
+/// True iff the specified type index is a scalar that's wider than the given
+/// size.
+LegalityPredicate widerThan(unsigned TypeIdx, unsigned Size);
+/// True iff the specified type index is a scalar whose size is not a power of
+/// 2.
+LegalityPredicate sizeNotPow2(unsigned TypeIdx);
+/// True iff the specified type index is a vector whose element count is not a
+/// power of 2.
+LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
+} // end namespace LegalityPredicates
+
+namespace LegalizeMutations {
+/// Select this specific type for the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);
+/// Widen the type for the given type index to the next power of 2.
+LegalizeMutation widenScalarToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+/// Add more elements to the type for the given type index to the next power of
+/// 2.
+LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+} // end namespace LegalizeMutations
+
+/// A single rule in a legalizer info ruleset.
+/// The specified action is chosen when the predicate is true. Where appropriate
+/// for the action (e.g. for WidenScalar) the new type is selected using the
+/// given mutator.
+class LegalizeRule {
+  LegalityPredicate Predicate;
+  LegalizeAction Action;
+  LegalizeMutation Mutation;
+
+public:
+  LegalizeRule(LegalityPredicate Predicate, LegalizeAction Action,
+               LegalizeMutation Mutation = nullptr)
+      : Predicate(Predicate), Action(Action), Mutation(Mutation) {}
+
+  /// Test whether the LegalityQuery matches.
+  bool match(const LegalityQuery &Query) const {
+    return Predicate(Query);
+  }
+
+  LegalizeAction getAction() const { return Action; }
+
+  /// Determine the change to make.
+  std::pair<unsigned, LLT> determineMutation(const LegalityQuery &Query) const {
+    if (Mutation)
+      return Mutation(Query);
+    return std::make_pair(0, LLT{});
+  }
+};
+
+class LegalizeRuleSet {
+  /// When non-zero, the opcode we are an alias of
+  unsigned AliasOf;
+  /// If true, there is another opcode that aliases this one
+  bool IsAliasedByAnother;
+  SmallVector<LegalizeRule, 2> Rules;
+
+  void add(const LegalizeRule &Rule) {
+    assert(AliasOf == 0 &&
+           "RuleSet is aliased, change the representative opcode instead");
+    Rules.push_back(Rule);
+  }
+
+  static bool always(const LegalityQuery &) { return true; }
+
+  /// Use the given action when the predicate is true.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionIf(LegalizeAction Action,
+                            LegalityPredicate Predicate) {
+    add({Predicate, Action});
+    return *this;
+  }
+  /// Use the given action when the predicate is true.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
+                            LegalizeMutation Mutation) {
+    add({Predicate, Action, Mutation});
+    return *this;
+  }
+  /// Use the given action when type index 0 is any type in the given list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionFor(LegalizeAction Action,
+                             std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typeInSet(0, Types));
+  }
+  /// Use the given action when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &
+  actionFor(LegalizeAction Action,
+            std::initializer_list<std::pair<LLT, LLT>> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typePairInSet(0, 1, Types));
+  }
+  /// Use the given action when type indexes 0 and 1 are both in the given list.
+  /// That is, the type pair is in the cartesian product of the list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
+                                             std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(0, Types), typeInSet(1, Types)));
+  }
+  /// Use the given action when type indexes 0 and 1 are both their respective
+  /// lists.
+  /// That is, the type pair is in the cartesian product of the lists
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &
+  actionForCartesianProduct(LegalizeAction Action,
+                            std::initializer_list<LLT> Types0,
+                            std::initializer_list<LLT> Types1) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(0, Types0), typeInSet(1, Types1)));
+  }
+
+public:
+  LegalizeRuleSet() : AliasOf(0), IsAliasedByAnother(false), Rules() {}
+
+  bool isAliasedByAnother() { return IsAliasedByAnother; }
+  void setIsAliasedByAnother() { IsAliasedByAnother = true; }
+  void aliasTo(unsigned Opcode) {
+    assert((AliasOf == 0 || AliasOf == Opcode) &&
+           "Opcode is already aliased to another opcode");
+    assert(Rules.empty() && "Aliasing will discard rules");
+    AliasOf = Opcode;
+  }
+  unsigned getAlias() const { return AliasOf; }
+
+  /// The instruction is legal if predicate is true.
+  LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Legal, Predicate);
+  }
+  /// The instruction is legal when type index 0 is any type in the given list.
+  LegalizeRuleSet &legalFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 are both in the given
+  /// list. That is, the type pair is in the cartesian product of the list.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 are both their
+  /// respective lists.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
+                                            std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
+  }
+
+  /// Like legalIf, but for the Libcall action.
+  LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Libcall, Predicate);
+  }
+  LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallForCartesianProduct(std::initializer_list<LLT> Types0,
+                             std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Libcall, Types0, Types1);
+  }
+
+  /// Widen the scalar to the one selected by the mutation if the predicate is
+  /// true.
+  LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
+                                 LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
+  }
+  /// Narrow the scalar to the one selected by the mutation if the predicate is
+  /// true.
+  LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
+                                  LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
+  }
+
+  /// Add more elements to reach the type selected by the mutation if the
+  /// predicate is true.
+  LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
+                                  LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
+  }
+  /// Remove elements to reach the type selected by the mutation if the
+  /// predicate is true.
+  LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
+                                   LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
+  }
+
+  /// The instruction is unsupported.
+  LegalizeRuleSet &unsupported() {
+    return actionIf(LegalizeAction::Unsupported, always);
+  }
+  LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Unsupported, Predicate);
+  }
+
+  LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Custom, Predicate);
+  }
+  LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Custom, Types);
+  }
+  LegalizeRuleSet &customForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Custom, Types);
+  }
+  LegalizeRuleSet &
+  customForCartesianProduct(std::initializer_list<LLT> Types0,
+                            std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1);
+  }
+
+  /// Widen the scalar to the next power of two that is at least MinSize.
+  /// No effect if the type is not a scalar or is a power of two.
+  LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize = 0) {
+    using namespace LegalityPredicates;
+    return widenScalarIf(
+        sizeNotPow2(TypeIdx),
+        LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
+  }
+
+  LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
+    using namespace LegalityPredicates;
+    return narrowScalarIf(isScalar(TypeIdx), Mutation);
+  }
+
+  /// Ensure the scalar is at least as wide as Ty.
+  LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return widenScalarIf(narrowerThan(TypeIdx, Ty.getSizeInBits()),
+                         changeTo(TypeIdx, Ty));
+  }
+
+  /// Ensure the scalar is at most as wide as Ty.
+  LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return narrowScalarIf(widerThan(TypeIdx, Ty.getSizeInBits()),
+                          changeTo(TypeIdx, Ty));
+  }
+
+  /// Conditionally limit the maximum size of the scalar.
+  /// For example, when the maximum size of one type depends on the size of
+  /// another such as extracting N bits from an M bit container.
+  LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return narrowScalarIf(
+        [=](const LegalityQuery &Query) {
+          return widerThan(TypeIdx, Ty.getSizeInBits()) &&
+                 Predicate(Query);
+        },
+        changeTo(TypeIdx, Ty));
+  }
+
+  /// Limit the range of scalar sizes to MinTy and MaxTy.
+  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy, const LLT &MaxTy) {
+    assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
+
+    return minScalar(TypeIdx, MinTy)
+        .maxScalar(TypeIdx, MaxTy);
+  }
+
+  /// Add more elements to the vector to reach the next power of two.
+  /// No effect if the type is not a vector or the element count is a power of
+  /// two.
+  LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
+    using namespace LegalityPredicates;
+    return moreElementsIf(numElementsNotPow2(TypeIdx),
+                          LegalizeMutations::moreElementsToNextPow2(TypeIdx));
+  }
+
+  /// Limit the number of elements in EltTy vectors to at least MinElements.
+  LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT &EltTy,
+                                       unsigned MinElements) {
+    return moreElementsIf(
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return VecTy.getElementType() == EltTy &&
+                 VecTy.getNumElements() < MinElements;
+        },
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return std::make_pair(
+              TypeIdx, LLT::vector(MinElements, VecTy.getScalarSizeInBits()));
+        });
+  }
+  /// Limit the number of elements in EltTy vectors to at most MaxElements.
+  LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT &EltTy,
+                                       unsigned MaxElements) {
+    return fewerElementsIf(
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return VecTy.getElementType() == EltTy &&
+                 VecTy.getNumElements() > MaxElements;
+        },
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return std::make_pair(
+              TypeIdx, LLT::vector(MaxElements, VecTy.getScalarSizeInBits()));
+        });
+  }
+  /// Limit the number of elements for the given vectors to at least MinTy's
+  /// number of elements and at most MaxTy's number of elements.
+  ///
+  /// No effect if the type is not a vector or does not have the same element
+  /// type as the constraints.
+  /// The element type of MinTy and MaxTy must match.
+  LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT &MinTy,
+                                    const LLT &MaxTy) {
+    assert(MinTy.getElementType() == MaxTy.getElementType() &&
+           "Expected element types to agree");
+
+    const LLT &EltTy = MinTy.getElementType();
+    return clampMinNumElements(TypeIdx, EltTy, MinTy.getNumElements())
+        .clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
+  }
+
+  /// Fallback on the previous implementation. This should only be used while
+  /// porting a rule.
+  LegalizeRuleSet &fallback() {
+    add({always, LegalizeAction::UseLegacyRules});
+    return *this;
+  }
+
+  /// Apply the ruleset to the given LegalityQuery.
+  LegalizeActionStep apply(const LegalityQuery &Query) const;
+};
+
+class LegalizerInfo {
+public:
+  LegalizerInfo();
+  virtual ~LegalizerInfo() = default;
+
+  unsigned getOpcodeIdxForOpcode(unsigned Opcode) const;
+  unsigned getActionDefinitionsIdx(unsigned Opcode) const;
+
+  /// Compute any ancillary tables needed to quickly decide how an operation
+  /// should be handled. This must be called after all "set*Action"methods but
+  /// before any query is made or incorrect results may be returned.
+  void computeTables();
+
+  static bool needsLegalizingToDifferentSize(const LegalizeAction Action) {
+    using namespace LegalizeActions;
+    switch (Action) {
+    case NarrowScalar:
+    case WidenScalar:
+    case FewerElements:
+    case MoreElements:
+    case Unsupported:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  using SizeAndAction = std::pair<uint16_t, LegalizeAction>;
+  using SizeAndActionsVec = std::vector<SizeAndAction>;
+  using SizeChangeStrategy =
+      std::function<SizeAndActionsVec(const SizeAndActionsVec &v)>;
+
+  /// More friendly way to set an action for common types that have an LLT
+  /// representation.
+  /// The LegalizeAction must be one for which NeedsLegalizingToDifferentSize
+  /// returns false.
+  void setAction(const InstrAspect &Aspect, LegalizeAction Action) {
+    assert(!needsLegalizingToDifferentSize(Action));
+    TablesInitialized = false;
+    const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+    if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx)
+      SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1);
+    SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action;
+  }
+
+  /// The setAction calls record the non-size-changing legalization actions
+  /// to take on specificly-sized types. The SizeChangeStrategy defines what
+  /// to do when the size of the type needs to be changed to reach a legally
+  /// sized type (i.e., one that was defined through a setAction call).
+  /// e.g.
+  /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
+  /// setLegalizeScalarToDifferentSizeStrategy(
+  ///   G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
+  /// will end up defining getAction({G_ADD, 0, T}) to return the following 
+  /// actions for different scalar types T:
+  ///  LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
+  ///  LLT::scalar(32):                 {Legal, 0, LLT::scalar(32)}
+  ///  LLT::scalar(33)..:               {NarrowScalar, 0, LLT::scalar(32)}
+  ///
+  /// If no SizeChangeAction gets defined, through this function,
+  /// the default is unsupportedForDifferentSizes.
+  void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode,
+                                                const unsigned TypeIdx,
+                                                SizeChangeStrategy S) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+      ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+    ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+  }
+
+  /// See also setLegalizeScalarToDifferentSizeStrategy.
+  /// This function allows to set the SizeChangeStrategy for vector elements.
+  void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode,
+                                                       const unsigned TypeIdx,
+                                                       SizeChangeStrategy S) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+      VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+    VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a 
+  /// particular operation consists of only supporting a specific set of type
+  /// sizes. E.g.
+  ///   setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
+  ///   setAction ({G_DIV, 0, LLT::scalar(64)}, Legal);
+  ///   setLegalizeScalarToDifferentSizeStrategy(
+  ///     G_DIV, 0, unsupportedForDifferentSizes);
+  /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64,
+  /// and Unsupported for all other scalar types T.
+  static SizeAndActionsVec
+  unsupportedForDifferentSizes(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported,
+                                                     Unsupported);
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a
+  /// particular operation consists of widening the type to a large legal type,
+  /// unless there is no such type and then instead it should be narrowed to the
+  /// largest legal type.
+  static SizeAndActionsVec
+  widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    assert(v.size() > 0 &&
+           "At least one size that can be legalized towards is needed"
+           " for this SizeChangeStrategy");
+    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+                                                     NarrowScalar);
+  }
+
+  static SizeAndActionsVec
+  widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+                                                     Unsupported);
+  }
+
+  static SizeAndActionsVec
+  narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+                                                       Unsupported);
+  }
+
+  static SizeAndActionsVec
+  narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    assert(v.size() > 0 &&
+           "At least one size that can be legalized towards is needed"
+           " for this SizeChangeStrategy");
+    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+                                                       WidenScalar);
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a
+  /// particular vector operation consists of having more elements in the
+  /// vector, to a type that is legal. Unless there is no such type and then
+  /// instead it should be legalized towards the widest vector that's still
+  /// legal. E.g.
+  ///   setAction({G_ADD, LLT::vector(8, 8)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(16, 8)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(2, 32)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(4, 32)}, Legal);
+  ///   setLegalizeVectorElementToDifferentSizeStrategy(
+  ///     G_ADD, 0, moreToWiderTypesAndLessToWidest);
+  /// will result in the following getAction results:
+  ///   * getAction({G_ADD, LLT::vector(8,8)}) returns
+  ///       (Legal, vector(8,8)).
+  ///   * getAction({G_ADD, LLT::vector(9,8)}) returns
+  ///       (MoreElements, vector(16,8)).
+  ///   * getAction({G_ADD, LLT::vector(8,32)}) returns
+  ///       (FewerElements, vector(4,32)).
+  static SizeAndActionsVec
+  moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements,
+                                                     FewerElements);
+  }
+
+  /// Helper function to implement many typical SizeChangeStrategy functions.
+  static SizeAndActionsVec
+  increaseToLargerTypesAndDecreaseToLargest(const SizeAndActionsVec &v,
+                                            LegalizeAction IncreaseAction,
+                                            LegalizeAction DecreaseAction);
+  /// Helper function to implement many typical SizeChangeStrategy functions.
+  static SizeAndActionsVec
+  decreaseToSmallerTypesAndIncreaseToSmallest(const SizeAndActionsVec &v,
+                                              LegalizeAction DecreaseAction,
+                                              LegalizeAction IncreaseAction);
+
+  /// Get the action definitions for the given opcode. Use this to run a
+  /// LegalityQuery through the definitions.
+  const LegalizeRuleSet &getActionDefinitions(unsigned Opcode) const;
+
+  /// Get the action definition builder for the given opcode. Use this to define
+  /// the action definitions.
+  ///
+  /// It is an error to request an opcode that has already been requested by the
+  /// multiple-opcode variant.
+  LegalizeRuleSet &getActionDefinitionsBuilder(unsigned Opcode);
+
+  /// Get the action definition builder for the given set of opcodes. Use this
+  /// to define the action definitions for multiple opcodes at once. The first
+  /// opcode given will be considered the representative opcode and will hold
+  /// the definitions whereas the other opcodes will be configured to refer to
+  /// the representative opcode. This lowers memory requirements and very
+  /// slightly improves performance.
+  ///
+  /// It would be very easy to introduce unexpected side-effects as a result of
+  /// this aliasing if it were permitted to request different but intersecting
+  /// sets of opcodes but that is difficult to keep track of. It is therefore an
+  /// error to request the same opcode twice using this API, to request an
+  /// opcode that already has definitions, or to use the single-opcode API on an
+  /// opcode that has already been requested by this API.
+  LegalizeRuleSet &
+  getActionDefinitionsBuilder(std::initializer_list<unsigned> Opcodes);
+  void aliasActionDefinitions(unsigned OpcodeTo, unsigned OpcodeFrom);
+
+  /// Determine what action should be taken to legalize the described
+  /// instruction. Requires computeTables to have been called.
+  ///
+  /// \returns a description of the next legalization step to perform.
+  LegalizeActionStep getAction(const LegalityQuery &Query) const;
+
+  /// Determine what action should be taken to legalize the given generic
+  /// instruction.
+  ///
+  /// \returns a description of the next legalization step to perform.
+  LegalizeActionStep getAction(const MachineInstr &MI,
+                               const MachineRegisterInfo &MRI) const;
+
+  bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+
+  virtual bool legalizeCustom(MachineInstr &MI,
+                              MachineRegisterInfo &MRI,
+                              MachineIRBuilder &MIRBuilder) const;
+
+private:
+  /// Determine what action should be taken to legalize the given generic
+  /// instruction opcode, type-index and type. Requires computeTables to have
+  /// been called.
+  ///
+  /// \returns a pair consisting of the kind of legalization that should be
+  /// performed and the destination type.
+  std::pair<LegalizeAction, LLT>
+  getAspectAction(const InstrAspect &Aspect) const;
+
+  /// The SizeAndActionsVec is a representation mapping between all natural
+  /// numbers and an Action. The natural number represents the bit size of
+  /// the InstrAspect. For example, for a target with native support for 32-bit
+  /// and 64-bit additions, you'd express that as:
+  /// setScalarAction(G_ADD, 0,
+  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
+  ///            {32, Legal},       // bit sizes [32, 33[
+  ///            {33, WidenScalar}, // bit sizes [33, 64[
+  ///            {64, Legal},       // bit sizes [64, 65[
+  ///            {65, NarrowScalar} // bit sizes [65, +inf[
+  ///           });
+  /// It may be that only 64-bit pointers are supported on your target:
+  /// setPointerAction(G_GEP, 0, LLT:pointer(1),
+  ///           {{1, Unsupported},  // bit sizes [ 1, 63[
+  ///            {64, Legal},       // bit sizes [64, 65[
+  ///            {65, Unsupported}, // bit sizes [65, +inf[
+  ///           });
+  void setScalarAction(const unsigned Opcode, const unsigned TypeIndex,
+                       const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    SmallVector<SizeAndActionsVec, 1> &Actions = ScalarActions[OpcodeIdx];
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+  void setPointerAction(const unsigned Opcode, const unsigned TypeIndex,
+                        const unsigned AddressSpace,
+                        const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) ==
+        AddrSpace2PointerActions[OpcodeIdx].end())
+      AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}};
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second;
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// If an operation on a given vector type (say <M x iN>) isn't explicitly
+  /// specified, we proceed in 2 stages. First we legalize the underlying scalar
+  /// (so that there's at least one legal vector with that scalar), then we
+  /// adjust the number of elements in the vector so that it is legal. The
+  /// desired action in the first step is controlled by this function.
+  void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex,
+                               const SizeAndActionsVec &SizeAndActions) {
+    unsigned OpcodeIdx = Opcode - FirstOp;
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        ScalarInVectorActions[OpcodeIdx];
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// See also setScalarInVectorAction.
+  /// This function let's you specify the number of elements in a vector that
+  /// are legal for a legal element size.
+  void setVectorNumElementAction(const unsigned Opcode,
+                                 const unsigned TypeIndex,
+                                 const unsigned ElementSize,
+                                 const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (NumElements2Actions[OpcodeIdx].find(ElementSize) ==
+        NumElements2Actions[OpcodeIdx].end())
+      NumElements2Actions[OpcodeIdx][ElementSize] = {{}};
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        NumElements2Actions[OpcodeIdx].find(ElementSize)->second;
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes,
+  /// i.e. it's OK if it doesn't start from size 1.
+  static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) {
+    using namespace LegalizeActions;
+#ifndef NDEBUG
+    // The sizes should be in increasing order
+    int prev_size = -1;
+    for(auto SizeAndAction: v) {
+      assert(SizeAndAction.first > prev_size);
+      prev_size = SizeAndAction.first;
+    }
+    // - for every Widen action, there should be a larger bitsize that
+    //   can be legalized towards (e.g. Legal, Lower, Libcall or Custom
+    //   action).
+    // - for every Narrow action, there should be a smaller bitsize that
+    //   can be legalized towards.
+    int SmallestNarrowIdx = -1;
+    int LargestWidenIdx = -1;
+    int SmallestLegalizableToSameSizeIdx = -1;
+    int LargestLegalizableToSameSizeIdx = -1;
+    for(size_t i=0; i<v.size(); ++i) {
+      switch (v[i].second) {
+        case FewerElements:
+        case NarrowScalar:
+          if (SmallestNarrowIdx == -1)
+            SmallestNarrowIdx = i;
+          break;
+        case WidenScalar:
+        case MoreElements:
+          LargestWidenIdx = i;
+          break;
+        case Unsupported:
+          break;
+        default:
+          if (SmallestLegalizableToSameSizeIdx == -1)
+            SmallestLegalizableToSameSizeIdx = i;
+          LargestLegalizableToSameSizeIdx = i;
+      }
+    }
+    if (SmallestNarrowIdx != -1) {
+      assert(SmallestLegalizableToSameSizeIdx != -1);
+      assert(SmallestNarrowIdx > SmallestLegalizableToSameSizeIdx);
+    }
+    if (LargestWidenIdx != -1)
+      assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx);
+#endif
+  }
+
+  /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with
+  /// from size 1.
+  static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) {
+#ifndef NDEBUG
+    // Data structure invariant: The first bit size must be size 1.
+    assert(v.size() >= 1);
+    assert(v[0].first == 1);
+    checkPartialSizeAndActionsVector(v);
+#endif
+  }
+
+  /// Sets actions for all bit sizes on a particular generic opcode, type
+  /// index and scalar or pointer type.
+  void setActions(unsigned TypeIndex,
+                  SmallVector<SizeAndActionsVec, 1> &Actions,
+                  const SizeAndActionsVec &SizeAndActions) {
+    checkFullSizeAndActionsVector(SizeAndActions);
+    if (Actions.size() <= TypeIndex)
+      Actions.resize(TypeIndex + 1);
+    Actions[TypeIndex] = SizeAndActions;
+  }
+
+  static SizeAndAction findAction(const SizeAndActionsVec &Vec,
+                                  const uint32_t Size);
+
+  /// Returns the next action needed to get the scalar or pointer type closer
+  /// to being legal
+  /// E.g. findLegalAction({G_REM, 13}) should return
+  /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will
+  /// probably be called, which should return (Lower, 32).
+  /// This is assuming the setScalarAction on G_REM was something like:
+  /// setScalarAction(G_REM, 0,
+  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
+  ///            {32, Lower},       // bit sizes [32, 33[
+  ///            {33, NarrowScalar} // bit sizes [65, +inf[
+  ///           });
+  std::pair<LegalizeAction, LLT>
+  findScalarLegalAction(const InstrAspect &Aspect) const;
+
+  /// Returns the next action needed towards legalizing the vector type.
+  std::pair<LegalizeAction, LLT>
+  findVectorLegalAction(const InstrAspect &Aspect) const;
+
+  static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
+  static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+
+  // Data structures used temporarily during construction of legality data:
+  using TypeMap = DenseMap<LLT, LegalizeAction>;
+  SmallVector<TypeMap, 1> SpecifiedActions[LastOp - FirstOp + 1];
+  SmallVector<SizeChangeStrategy, 1>
+      ScalarSizeChangeStrategies[LastOp - FirstOp + 1];
+  SmallVector<SizeChangeStrategy, 1>
+      VectorElementSizeChangeStrategies[LastOp - FirstOp + 1];
+  bool TablesInitialized;
+
+  // Data structures used by getAction:
+  SmallVector<SizeAndActionsVec, 1> ScalarActions[LastOp - FirstOp + 1];
+  SmallVector<SizeAndActionsVec, 1> ScalarInVectorActions[LastOp - FirstOp + 1];
+  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+      AddrSpace2PointerActions[LastOp - FirstOp + 1];
+  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+      NumElements2Actions[LastOp - FirstOp + 1];
+
+  LegalizeRuleSet RulesForOpcode[LastOp - FirstOp + 1];
+};
+
+#ifndef NDEBUG
+/// Checks that MIR is fully legal, returns an illegal instruction if it's not,
+/// nullptr otherwise
+const MachineInstr *machineFunctionIsIllegal(const MachineFunction &MF);
+#endif
+
+} // end namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
new file mode 100644
index 0000000..0a46eb9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -0,0 +1,78 @@
+//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the Localizer pass.
+/// This pass moves/duplicates constant-like instructions close to their uses.
+/// Its primarily goal is to workaround the deficiencies of the fast register
+/// allocator.
+/// With GlobalISel constants are all materialized in the entry block of
+/// a function. However, the fast allocator cannot rematerialize constants and
+/// has a lot more live-ranges to deal with and will most likely end up
+/// spilling a lot.
+/// By pushing the constants close to their use, we only create small
+/// live-ranges.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+// Forward declarations.
+class MachineRegisterInfo;
+
+/// This pass implements the localization mechanism described at the
+/// top of this file. One specificity of the implementation is that
+/// it will materialize one and only one instance of a constant per
+/// basic block, thus enabling reuse of that constant within that block.
+/// Moreover, it only materializes constants in blocks where they
+/// are used. PHI uses are considered happening at the end of the
+/// related predecessor.
+class Localizer : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+  /// MRI contains all the register class/bank information that this
+  /// pass uses and updates.
+  MachineRegisterInfo *MRI;
+
+  /// Check whether or not \p MI needs to be moved close to its uses.
+  static bool shouldLocalize(const MachineInstr &MI);
+
+  /// Check if \p MOUse is used in the same basic block as \p Def.
+  /// If the use is in the same block, we say it is local.
+  /// When the use is not local, \p InsertMBB will contain the basic
+  /// block when to insert \p Def to have a local use.
+  static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
+                         MachineBasicBlock *&InsertMBB);
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+public:
+  Localizer();
+
+  StringRef getPassName() const override { return "Localizer"; }
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized)
+        .set(MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
new file mode 100644
index 0000000..797f5e5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -0,0 +1,327 @@
+//== ----- llvm/CodeGen/GlobalISel/MIPatternMatch.h --------------------- == //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Contains matchers for matching SSA Machine Instructions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_GMIR_PATTERNMATCH_H
+#define LLVM_GMIR_PATTERNMATCH_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+namespace llvm {
+namespace MIPatternMatch {
+
+template <typename Reg, typename Pattern>
+bool mi_match(Reg R, MachineRegisterInfo &MRI, Pattern &&P) {
+  return P.match(MRI, R);
+}
+
+// TODO: Extend for N use.
+template <typename SubPatternT> struct OneUse_match {
+  SubPatternT SubPat;
+  OneUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+    return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
+  }
+};
+
+template <typename SubPat>
+inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
+  return SP;
+}
+
+struct ConstantMatch {
+  int64_t &CR;
+  ConstantMatch(int64_t &C) : CR(C) {}
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+    if (auto MaybeCst = getConstantVRegVal(Reg, MRI)) {
+      CR = *MaybeCst;
+      return true;
+    }
+    return false;
+  }
+};
+
+inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
+
+// TODO: Rework this for different kinds of MachineOperand.
+// Currently assumes the Src for a match is a register.
+// We might want to support taking in some MachineOperands and call getReg on
+// that.
+
+struct operand_type_match {
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) { return true; }
+  bool match(const MachineRegisterInfo &MRI, MachineOperand *MO) {
+    return MO->isReg();
+  }
+};
+
+inline operand_type_match m_Reg() { return operand_type_match(); }
+
+/// Matching combinators.
+template <typename... Preds> struct And {
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return true;
+  }
+};
+
+template <typename Pred, typename... Preds>
+struct And<Pred, Preds...> : And<Preds...> {
+  Pred P;
+  And(Pred &&p, Preds &&... preds)
+      : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
+  }
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return P.match(MRI, src) && And<Preds...>::match(MRI, src);
+  }
+};
+
+template <typename... Preds> struct Or {
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return false;
+  }
+};
+
+template <typename Pred, typename... Preds>
+struct Or<Pred, Preds...> : Or<Preds...> {
+  Pred P;
+  Or(Pred &&p, Preds &&... preds)
+      : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return P.match(MRI, src) || Or<Preds...>::match(MRI, src);
+  }
+};
+
+template <typename... Preds> And<Preds...> m_all_of(Preds &&... preds) {
+  return And<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename... Preds> Or<Preds...> m_any_of(Preds &&... preds) {
+  return Or<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename BindTy> struct bind_helper {
+  static bool bind(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
+    VR = V;
+    return true;
+  }
+};
+
+template <> struct bind_helper<MachineInstr *> {
+  static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
+                   unsigned Reg) {
+    MI = MRI.getVRegDef(Reg);
+    if (MI)
+      return true;
+    return false;
+  }
+};
+
+template <> struct bind_helper<LLT> {
+  static bool bind(const MachineRegisterInfo &MRI, LLT &Ty, unsigned Reg) {
+    Ty = MRI.getType(Reg);
+    if (Ty.isValid())
+      return true;
+    return false;
+  }
+};
+
+template <> struct bind_helper<const ConstantFP *> {
+  static bool bind(const MachineRegisterInfo &MRI, const ConstantFP *&F,
+                   unsigned Reg) {
+    F = getConstantFPVRegVal(Reg, MRI);
+    if (F)
+      return true;
+    return false;
+  }
+};
+
+template <typename Class> struct bind_ty {
+  Class &VR;
+
+  bind_ty(Class &V) : VR(V) {}
+
+  template <typename ITy> bool match(const MachineRegisterInfo &MRI, ITy &&V) {
+    return bind_helper<Class>::bind(MRI, VR, V);
+  }
+};
+
+inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
+inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
+inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
+
+// Helper for matching G_FCONSTANT
+inline bind_ty<const ConstantFP *> m_GFCst(const ConstantFP *&C) { return C; }
+
+// General helper for all the binary generic MI such as G_ADD/G_SUB etc
+template <typename LHS_P, typename RHS_P, unsigned Opcode,
+          bool Commutable = false>
+struct BinaryOp_match {
+  LHS_P L;
+  RHS_P R;
+
+  BinaryOp_match(const LHS_P &LHS, const RHS_P &RHS) : L(LHS), R(RHS) {}
+  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
+        return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                R.match(MRI, TmpMI->getOperand(2).getReg())) ||
+               (Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                               L.match(MRI, TmpMI->getOperand(2).getReg())));
+      }
+    }
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>
+m_GAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
+                                                            const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>
+m_GMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>
+m_GFAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>
+m_GFMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
+m_GAnd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
+}
+
+// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
+template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
+  SrcTy L;
+
+  UnaryOp_match(const SrcTy &LHS) : L(LHS) {}
+  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 2) {
+        return L.match(MRI, TmpMI->getOperand(1).getReg());
+      }
+    }
+    return false;
+  }
+};
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>
+m_GAnyExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_SEXT> m_GSExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_SEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT> m_GZExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT> m_GFPExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC> m_GTrunc(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>
+m_GBitcast(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>
+m_GPtrToInt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>
+m_GIntToPtr(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>
+m_GFPTrunc(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FABS> m_GFabs(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FABS>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
+}
+
+// Helper for checking if a Reg is of specific type.
+struct CheckType {
+  LLT Ty;
+  CheckType(const LLT &Ty) : Ty(Ty) {}
+
+  bool match(MachineRegisterInfo &MRI, unsigned Reg) {
+    return MRI.getType(Reg) == Ty;
+  }
+};
+
+inline CheckType m_SpecificType(LLT Ty) { return Ty; }
+
+} // namespace GMIPatternMatch
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
new file mode 100644
index 0000000..ef4e0ad
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -0,0 +1,808 @@
+//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.h - MIBuilder --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the MachineIRBuilder class.
+/// This is a helper class to build MachineInstr.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/Types.h"
+
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+
+
+namespace llvm {
+
+// Forward declarations.
+class MachineFunction;
+class MachineInstr;
+class TargetInstrInfo;
+
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilder {
+  /// MachineFunction under construction.
+  MachineFunction *MF;
+  /// Information used to access the description of the opcodes.
+  const TargetInstrInfo *TII;
+  /// Information used to verify types are consistent and to create virtual registers.
+  MachineRegisterInfo *MRI;
+  /// Debug location to be set to any instruction we create.
+  DebugLoc DL;
+
+  /// \name Fields describing the insertion point.
+  /// @{
+  MachineBasicBlock *MBB;
+  MachineBasicBlock::iterator II;
+  /// @}
+
+  std::function<void(MachineInstr *)> InsertedInstr;
+
+  const TargetInstrInfo &getTII() {
+    assert(TII && "TargetInstrInfo is not set");
+    return *TII;
+  }
+
+  void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+  MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
+
+  unsigned getDestFromArg(unsigned Reg) { return Reg; }
+  unsigned getDestFromArg(LLT Ty) {
+    return getMF().getRegInfo().createGenericVirtualRegister(Ty);
+  }
+  unsigned getDestFromArg(const TargetRegisterClass *RC) {
+    return getMF().getRegInfo().createVirtualRegister(RC);
+  }
+
+  void addUseFromArg(MachineInstrBuilder &MIB, unsigned Reg) {
+    MIB.addUse(Reg);
+  }
+
+  void addUseFromArg(MachineInstrBuilder &MIB, const MachineInstrBuilder &UseMIB) {
+    MIB.addUse(UseMIB->getOperand(0).getReg());
+  }
+
+  void addUsesFromArgs(MachineInstrBuilder &MIB) { }
+  template<typename UseArgTy, typename ... UseArgsTy>
+  void addUsesFromArgs(MachineInstrBuilder &MIB, UseArgTy &&Arg1, UseArgsTy &&... Args) {
+    addUseFromArg(MIB, Arg1);
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+  }
+  unsigned getRegFromArg(unsigned Reg) { return Reg; }
+  unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
+    return MIB->getOperand(0).getReg();
+  }
+
+public:
+  /// Some constructors for easy use.
+  MachineIRBuilder() = default;
+  MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
+  MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+    setInstr(MI);
+  }
+
+  /// Getter for the function we currently build.
+  MachineFunction &getMF() {
+    assert(MF && "MachineFunction is not set");
+    return *MF;
+  }
+
+  /// Getter for the basic block we currently build.
+  MachineBasicBlock &getMBB() {
+    assert(MBB && "MachineBasicBlock is not set");
+    return *MBB;
+  }
+
+  /// Current insertion point for new instructions.
+  MachineBasicBlock::iterator getInsertPt() {
+    return II;
+  }
+
+  /// Set the insertion point before the specified position.
+  /// \pre MBB must be in getMF().
+  /// \pre II must be a valid iterator in MBB.
+  void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
+  /// @}
+
+  /// \name Setters for the insertion point.
+  /// @{
+  /// Set the MachineFunction where to build instructions.
+  void setMF(MachineFunction &);
+
+  /// Set the insertion point to the  end of \p MBB.
+  /// \pre \p MBB must be contained by getMF().
+  void setMBB(MachineBasicBlock &MBB);
+
+  /// Set the insertion point to before MI.
+  /// \pre MI must be in getMF().
+  void setInstr(MachineInstr &MI);
+  /// @}
+
+  /// \name Control where instructions we create are recorded (typically for
+  /// visiting again later during legalization).
+  /// @{
+  void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
+  void stopRecordingInsertions();
+  /// @}
+
+  /// Set the debug location to \p DL for all the next build instructions.
+  void setDebugLoc(const DebugLoc &DL) { this->DL = DL; }
+
+  /// Get the current instruction's debug location.
+  DebugLoc getDebugLoc() { return DL; }
+
+  /// Build and insert <empty> = \p Opcode <empty>.
+  /// The insertion point is the one set by the last call of either
+  /// setBasicBlock or setMI.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildInstr(unsigned Opcode);
+
+  /// DAG like Generic method for building arbitrary instructions as above.
+  /// \Opc opcode for the instruction.
+  /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+  /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+  /// Uses of type MachineInstrBuilder will perform
+  /// getOperand(0).getReg() to convert to register.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+                                 UseArgsTy &&... Args) {
+    auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+    return MIB;
+  }
+
+  /// Build but don't insert <empty> = \p Opcode <empty>.
+  ///
+  /// \pre setMF, setBasicBlock or setMI  must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildInstrNoInsert(unsigned Opcode);
+
+  /// Insert an existing instruction at the insertion point.
+  MachineInstrBuilder insertInstr(MachineInstrBuilder MIB);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in \p Reg (suitably modified by \p Expr).
+  MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
+                                          const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
+  /// Expr).
+  MachineInstrBuilder buildIndirectDbgValue(unsigned Reg,
+                                            const MDNode *Variable,
+                                            const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in the stack slot specified by \p FI
+  /// (suitably modified by \p Expr).
+  MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable,
+                                      const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instructions specifying that \p Variable is
+  /// given by \p C (suitably modified by \p Expr).
+  MachineInstrBuilder buildConstDbgValue(const Constant &C,
+                                         const MDNode *Variable,
+                                         const MDNode *Expr);
+
+  /// Build and insert \p Res = G_FRAME_INDEX \p Idx
+  ///
+  /// G_FRAME_INDEX materializes the address of an alloca value or other
+  /// stack-based object.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
+
+  /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
+  ///
+  /// G_GLOBAL_VALUE materializes the address of the specified global
+  /// into \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type
+  ///      in the same address space as \p GV.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
+
+  /// Build and insert \p Res = G_ADD \p Op0, \p Op1
+  ///
+  /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildAdd(Res, (getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_SUB \p Op0, \p Op1
+  ///
+  /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildSub(Res, (getRegFromArg(UseArgs))...);
+  }
+  MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_MUL \p Op0, \p Op1
+  ///
+  /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildMul(Res, (getRegFromArg(UseArgs))...);
+  }
+  MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_GEP \p Op0, \p Op1
+  ///
+  /// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+  ///      type.
+  /// \pre \p Op1 must be a generic virtual register with scalar type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
+  ///
+  /// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res. If \p Value is zero then no
+  /// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
+  /// \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Op0 must be a generic virtual register with pointer type.
+  /// \pre \p ValueTy must be a scalar type.
+  /// \pre \p Res must be 0. This is to detect confusion between
+  ///      materializeGEP() and buildGEP().
+  /// \post \p Res will either be a new generic virtual register of the same
+  ///       type as \p Op0 or \p Op0 itself.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
+                                               const LLT &ValueTy,
+                                               uint64_t Value);
+
+  /// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
+  ///
+  /// G_PTR_MASK clears the low bits of a pointer operand without destroying its
+  /// pointer properties. This has the effect of rounding the address *down* to
+  /// a specified alignment in bits.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+  ///      type.
+  /// \pre \p NumBits must be an integer representing the number of low bits to
+  ///      be cleared in \p Op0.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0,
+                                   uint32_t NumBits);
+
+  /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
+  /// \p Op1, \p CarryIn
+  ///
+  /// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
+  /// width) and sets \p CarryOut to 1 if the result overflowed in unsigned
+  /// arithmetic.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same scalar type.
+  /// \pre \p CarryOut and \p CarryIn must be generic virtual
+  ///      registers with the same scalar type (typically s1)
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
+                                 unsigned Op1, unsigned CarryIn);
+
+  /// Build and insert \p Res = G_AND \p Op0, \p Op1
+  ///
+  /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+    return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+  }
+  MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_OR \p Op0, \p Op1
+  ///
+  /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildOr(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+    return buildOr(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+  }
+  MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert \p Res = G_ANYEXT \p Op0
+  ///
+  /// G_ANYEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
+  /// (i.e. this is neither zero nor sign-extension). For a vector register,
+  /// each element is extended individually.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+
+  MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildAnyExt(DstType &&Res, ArgType &&Arg) {
+    return buildAnyExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+
+  /// Build and insert \p Res = G_SEXT \p Op
+  ///
+  /// G_SEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
+  /// high bit of \p Op (i.e. 2s-complement sign extended).
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildSExt(DstType &&Res, ArgType &&Arg) {
+    return buildSExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_ZEXT \p Op
+  ///
+  /// G_ZEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
+  /// register, each element is extended individually.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildZExt(DstType &&Res, ArgType &&Arg) {
+    return buildZExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildSExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildZExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
+
+  // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildAnyExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildAnyExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildAnyExtOrTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
+  /// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
+  /// \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, unsigned Res,
+                                      unsigned Op);
+
+  /// Build and insert an appropriate cast between two registers of equal size.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg) {
+    return buildCast(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildCast(unsigned Dst, unsigned Src);
+
+  /// Build and insert G_BR \p Dest
+  ///
+  /// G_BR is an unconditional branch to \p Dest.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBr(MachineBasicBlock &BB);
+
+  /// Build and insert G_BRCOND \p Tst, \p Dest
+  ///
+  /// G_BRCOND is a conditional branch to \p Dest.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Tst must be a generic virtual register with scalar
+  ///      type. At the beginning of legalization, this will be a single
+  ///      bit (s1). Targets with interesting flags registers may change
+  ///      this. For a wider type, whether the branch is taken must only
+  ///      depend on bit 0 (for now).
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
+
+  /// Build and insert G_BRINDIRECT \p Tgt
+  ///
+  /// G_BRINDIRECT is an indirect branch to \p Tgt.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Tgt must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBrIndirect(unsigned Tgt);
+
+  /// Build and insert \p Res = G_CONSTANT \p Val
+  ///
+  /// G_CONSTANT is an integer constant with the specified size and value. \p
+  /// Val will be extended or truncated to the size of \p Reg.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or pointer
+  ///      type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val);
+
+  /// Build and insert \p Res = G_CONSTANT \p Val
+  ///
+  /// G_CONSTANT is an integer constant with the specified size and value.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+
+  template <typename DstType>
+  MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
+    return buildConstant(getDestFromArg(Res), Val);
+  }
+  /// Build and insert \p Res = G_FCONSTANT \p Val
+  ///
+  /// G_FCONSTANT is a floating-point constant with the specified size and
+  /// value.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType>
+  MachineInstrBuilder buildFConstant(DstType &&Res, const ConstantFP &Val) {
+    return buildFConstant(getDestFromArg(Res), Val);
+  }
+  MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
+
+  template <typename DstType>
+  MachineInstrBuilder buildFConstant(DstType &&Res, double Val) {
+    return buildFConstant(getDestFromArg(Res), Val);
+  }
+  MachineInstrBuilder buildFConstant(unsigned Res, double Val);
+
+  /// Build and insert \p Res = COPY Op
+  ///
+  /// Register-to-register COPY sets \p Res to \p Op.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildCopy(unsigned Res, unsigned Op);
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildCopy(DstType &&Res, SrcType &&Src) {
+    return buildCopy(getDestFromArg(Res), getRegFromArg(Src));
+  }
+
+  /// Build and insert `Res = G_LOAD Addr, MMO`.
+  ///
+  /// Loads the value stored at \p Addr. Puts the result in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
+                                MachineMemOperand &MMO);
+
+  /// Build and insert `G_STORE Val, Addr, MMO`.
+  ///
+  /// Stores the value \p Val to \p Addr.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Val must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
+                                 MachineMemOperand &MMO);
+
+  /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Src must be generic virtual registers.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
+
+  /// Build and insert \p Res = IMPLICIT_DEF.
+  template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
+    return buildUndef(getDestFromArg(Res));
+  }
+  MachineInstrBuilder buildUndef(unsigned Dst);
+
+  /// Build and insert instructions to put \p Ops together at the specified p
+  /// Indices to form a larger register.
+  ///
+  /// If the types of the input registers are uniform and cover the entirity of
+  /// \p Res then a G_MERGE_VALUES will be produced. Otherwise an IMPLICIT_DEF
+  /// followed by a sequence of G_INSERT instructions.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The final element of the sequence must not extend past the end of the
+  ///      destination register.
+  /// \pre The bits defined by each Op (derived from index and scalar size) must
+  ///      not overlap.
+  /// \pre \p Indices must be in ascending order of bit position.
+  void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+                     ArrayRef<uint64_t> Indices);
+
+  /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
+  ///
+  /// G_MERGE_VALUES combines the input elements contiguously into a larger
+  /// register.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The entire register \p Res (and no more) must be covered by the input
+  ///      registers.
+  /// \pre The type of all \p Ops registers must be identical.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildMerge(unsigned Res, ArrayRef<unsigned> Ops);
+
+  /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
+  ///
+  /// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The entire register \p Res (and no more) must be covered by the input
+  ///      registers.
+  /// \pre The type of all \p Res registers must be identical.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, unsigned Op);
+
+  MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
+                                  unsigned Op, unsigned Index);
+
+  /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
+  /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
+  /// result register definition unless \p Reg is NoReg (== 0). The second
+  /// operand will be the intrinsic's ID.
+  ///
+  /// Callers are expected to add the required definitions and uses afterwards.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
+                                     bool HasSideEffects);
+
+  /// Build and insert \p Res = G_FPTRUNC \p Op
+  ///
+  /// G_FPTRUNC converts a floating-point value into one with a smaller type.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Res must be smaller than \p Op
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildFPTrunc(DstType &&Res, SrcType &&Src) {
+    return buildFPTrunc(getDestFromArg(Res), getRegFromArg(Src));
+  }
+  MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_TRUNC \p Op
+  ///
+  /// G_TRUNC extracts the low bits of a type. For a vector type each element is
+  /// truncated independently before being packed into the destination.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Res must be smaller than \p Op
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildTrunc(DstType &&Res, SrcType &&Src) {
+    return buildTrunc(getDestFromArg(Res), getRegFromArg(Src));
+  }
+
+  /// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+
+  /// \pre \p Res must be a generic virtual register with scalar or
+  ///      vector type. Typically this starts as s1 or <N x s1>.
+  /// \pre \p Op0 and Op1 must be generic virtual registers with the
+  ///      same number of elements as \p Res. If \p Res is a scalar,
+  ///      \p Op0 must be either a scalar or pointer.
+  /// \pre \p Pred must be an integer predicate.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
+                                unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+
+  /// \pre \p Res must be a generic virtual register with scalar or
+  ///      vector type. Typically this starts as s1 or <N x s1>.
+  /// \pre \p Op0 and Op1 must be generic virtual registers with the
+  ///      same number of elements as \p Res (or scalar, if \p Res is
+  ///      scalar).
+  /// \pre \p Pred must be a floating-point predicate.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred,
+                                unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same type.
+  /// \pre \p Tst must be a generic virtual register with scalar, pointer or
+  ///      vector type. If vector then it must have the same number of
+  ///      elements as the other parameters.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
+                                  unsigned Op0, unsigned Op1);
+
+  /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
+  /// \p Elt, \p Idx
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Val must be a generic virtual register
+  //       with the same vector type.
+  /// \pre \p Elt and \p Idx must be a generic virtual register
+  ///      with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val,
+                                               unsigned Elt, unsigned Idx);
+
+  /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  /// \pre \p Val must be a generic virtual register with vector type.
+  /// \pre \p Idx must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
+                                                unsigned Idx);
+
+  /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+  /// MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+  /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+  /// Addr in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register of scalar type.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+  ///      registers of the same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
+                                         unsigned CmpVal, unsigned NewVal,
+                                         MachineMemOperand &MMO);
+};
+
+} // End namespace llvm.
+#endif // LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
new file mode 100644
index 0000000..c53ae41
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -0,0 +1,665 @@
+//=- llvm/CodeGen/GlobalISel/RegBankSelect.h - Reg Bank Selector --*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for assigning the generic virtual registers to register bank.
+
+/// By default, the reg bank selector relies on local decisions to
+/// assign the register bank. In other words, it looks at one instruction
+/// at a time to decide where the operand of that instruction should live.
+///
+/// At higher optimization level, we could imagine that the reg bank selector
+/// would use more global analysis and do crazier thing like duplicating
+/// instructions and so on. This is future work.
+///
+/// For now, the pass uses a greedy algorithm to decide where the operand
+/// of an instruction should live. It asks the target which banks may be
+/// used for each operand of the instruction and what is the cost. Then,
+/// it chooses the solution which minimize the cost of the instruction plus
+/// the cost of any move that may be needed to the values into the right
+/// register bank.
+/// In other words, the cost for an instruction on a register bank RegBank
+/// is: Cost of I on RegBank plus the sum of the cost for bringing the
+/// input operands from their current register bank to RegBank.
+/// Thus, the following formula:
+/// cost(I, RegBank) = cost(I.Opcode, RegBank) +
+///    sum(for each arg in I.arguments: costCrossCopy(arg.RegBank, RegBank))
+///
+/// E.g., Let say we are assigning the register bank for the instruction
+/// defining v2.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// v2 = G_ADD i32 v0, v1 <-- MI
+///
+/// The target may say it can generate G_ADD i32 on register bank A and B
+/// with a cost of respectively 5 and 1.
+/// Then, let say the cost of a cross register bank copies from A to B is 1.
+/// The reg bank selector would compare the following two costs:
+/// cost(MI, A_REGBANK) = cost(G_ADD, A_REGBANK) + cost(v0.RegBank, A_REGBANK) +
+///    cost(v1.RegBank, A_REGBANK)
+///                     = 5 + cost(A_REGBANK, A_REGBANK) + cost(A_REGBANK,
+///                                                             A_REGBANK)
+///                     = 5 + 0 + 0 = 5
+/// cost(MI, B_REGBANK) = cost(G_ADD, B_REGBANK) + cost(v0.RegBank, B_REGBANK) +
+///    cost(v1.RegBank, B_REGBANK)
+///                     = 1 + cost(A_REGBANK, B_REGBANK) + cost(A_REGBANK,
+///                                                             B_REGBANK)
+///                     = 1 + 1 + 1 = 3
+/// Therefore, in this specific example, the reg bank selector would choose
+/// bank B for MI.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// tmp0(B_REGBANK) = COPY v0
+/// tmp1(B_REGBANK) = COPY v1
+/// v2(B_REGBANK) = G_ADD i32 tmp0, tmp1
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class BlockFrequency;
+class MachineBlockFrequencyInfo;
+class MachineBranchProbabilityInfo;
+class MachineOperand;
+class MachineRegisterInfo;
+class Pass;
+class raw_ostream;
+class TargetPassConfig;
+class TargetRegisterInfo;
+
+/// This pass implements the reg bank selector pass used in the GlobalISel
+/// pipeline. At the end of this pass, all register operands have been assigned
+class RegBankSelect : public MachineFunctionPass {
+public:
+  static char ID;
+
+  /// List of the modes supported by the RegBankSelect pass.
+  enum Mode {
+    /// Assign the register banks as fast as possible (default).
+    Fast,
+    /// Greedily minimize the cost of assigning register banks.
+    /// This should produce code of greater quality, but will
+    /// require more compile time.
+    Greedy
+  };
+
+  /// Abstract class used to represent an insertion point in a CFG.
+  /// This class records an insertion point and materializes it on
+  /// demand.
+  /// It allows to reason about the frequency of this insertion point,
+  /// without having to logically materialize it (e.g., on an edge),
+  /// before we actually need to insert something.
+  class InsertPoint {
+  protected:
+    /// Tell if the insert point has already been materialized.
+    bool WasMaterialized = false;
+
+    /// Materialize the insertion point.
+    ///
+    /// If isSplit() is true, this involves actually splitting
+    /// the block or edge.
+    ///
+    /// \post getPointImpl() returns a valid iterator.
+    /// \post getInsertMBBImpl() returns a valid basic block.
+    /// \post isSplit() == false ; no more splitting should be required.
+    virtual void materialize() = 0;
+
+    /// Return the materialized insertion basic block.
+    /// Code will be inserted into that basic block.
+    ///
+    /// \pre ::materialize has been called.
+    virtual MachineBasicBlock &getInsertMBBImpl() = 0;
+
+    /// Return the materialized insertion point.
+    /// Code will be inserted before that point.
+    ///
+    /// \pre ::materialize has been called.
+    virtual MachineBasicBlock::iterator getPointImpl() = 0;
+
+  public:
+    virtual ~InsertPoint() = default;
+
+    /// The first call to this method will cause the splitting to
+    /// happen if need be, then sub sequent calls just return
+    /// the iterator to that point. I.e., no more splitting will
+    /// occur.
+    ///
+    /// \return The iterator that should be used with
+    /// MachineBasicBlock::insert. I.e., additional code happens
+    /// before that point.
+    MachineBasicBlock::iterator getPoint() {
+      if (!WasMaterialized) {
+        WasMaterialized = true;
+        assert(canMaterialize() && "Impossible to materialize this point");
+        materialize();
+      }
+      // When we materialized the point we should have done the splitting.
+      assert(!isSplit() && "Wrong pre-condition");
+      return getPointImpl();
+    }
+
+    /// The first call to this method will cause the splitting to
+    /// happen if need be, then sub sequent calls just return
+    /// the basic block that contains the insertion point.
+    /// I.e., no more splitting will occur.
+    ///
+    /// \return The basic block should be used with
+    /// MachineBasicBlock::insert and ::getPoint. The new code should
+    /// happen before that point.
+    MachineBasicBlock &getInsertMBB() {
+      if (!WasMaterialized) {
+        WasMaterialized = true;
+        assert(canMaterialize() && "Impossible to materialize this point");
+        materialize();
+      }
+      // When we materialized the point we should have done the splitting.
+      assert(!isSplit() && "Wrong pre-condition");
+      return getInsertMBBImpl();
+    }
+
+    /// Insert \p MI in the just before ::getPoint()
+    MachineBasicBlock::iterator insert(MachineInstr &MI) {
+      return getInsertMBB().insert(getPoint(), &MI);
+    }
+
+    /// Does this point involve splitting an edge or block?
+    /// As soon as ::getPoint is called and thus, the point
+    /// materialized, the point will not require splitting anymore,
+    /// i.e., this will return false.
+    virtual bool isSplit() const { return false; }
+
+    /// Frequency of the insertion point.
+    /// \p P is used to access the various analysis that will help to
+    /// get that information, like MachineBlockFrequencyInfo.  If \p P
+    /// does not contain enough enough to return the actual frequency,
+    /// this returns 1.
+    virtual uint64_t frequency(const Pass &P) const { return 1; }
+
+    /// Check whether this insertion point can be materialized.
+    /// As soon as ::getPoint is called and thus, the point materialized
+    /// calling this method does not make sense.
+    virtual bool canMaterialize() const { return false; }
+  };
+
+  /// Insertion point before or after an instruction.
+  class InstrInsertPoint : public InsertPoint {
+  private:
+    /// Insertion point.
+    MachineInstr &Instr;
+
+    /// Does the insertion point is before or after Instr.
+    bool Before;
+
+    void materialize() override;
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      if (Before)
+        return Instr;
+      return Instr.getNextNode() ? *Instr.getNextNode()
+                                 : Instr.getParent()->end();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override {
+      return *Instr.getParent();
+    }
+
+  public:
+    /// Create an insertion point before (\p Before=true) or after \p Instr.
+    InstrInsertPoint(MachineInstr &Instr, bool Before = true);
+
+    bool isSplit() const override;
+    uint64_t frequency(const Pass &P) const override;
+
+    // Worst case, we need to slice the basic block, but that is still doable.
+    bool canMaterialize() const override { return true; }
+  };
+
+  /// Insertion point at the beginning or end of a basic block.
+  class MBBInsertPoint : public InsertPoint {
+  private:
+    /// Insertion point.
+    MachineBasicBlock &MBB;
+
+    /// Does the insertion point is at the beginning or end of MBB.
+    bool Beginning;
+
+    void materialize() override { /*Nothing to do to materialize*/
+    }
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      return Beginning ? MBB.begin() : MBB.end();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override { return MBB; }
+
+  public:
+    MBBInsertPoint(MachineBasicBlock &MBB, bool Beginning = true)
+        : InsertPoint(), MBB(MBB), Beginning(Beginning) {
+      // If we try to insert before phis, we should use the insertion
+      // points on the incoming edges.
+      assert((!Beginning || MBB.getFirstNonPHI() == MBB.begin()) &&
+             "Invalid beginning point");
+      // If we try to insert after the terminators, we should use the
+      // points on the outcoming edges.
+      assert((Beginning || MBB.getFirstTerminator() == MBB.end()) &&
+             "Invalid end point");
+    }
+
+    bool isSplit() const override { return false; }
+    uint64_t frequency(const Pass &P) const override;
+    bool canMaterialize() const override { return true; };
+  };
+
+  /// Insertion point on an edge.
+  class EdgeInsertPoint : public InsertPoint {
+  private:
+    /// Source of the edge.
+    MachineBasicBlock &Src;
+
+    /// Destination of the edge.
+    /// After the materialization is done, this hold the basic block
+    /// that resulted from the splitting.
+    MachineBasicBlock *DstOrSplit;
+
+    /// P is used to update the analysis passes as applicable.
+    Pass &P;
+
+    void materialize() override;
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      // DstOrSplit should be the Split block at this point.
+      // I.e., it should have one predecessor, Src, and one successor,
+      // the original Dst.
+      assert(DstOrSplit && DstOrSplit->isPredecessor(&Src) &&
+             DstOrSplit->pred_size() == 1 && DstOrSplit->succ_size() == 1 &&
+             "Did not split?!");
+      return DstOrSplit->begin();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override { return *DstOrSplit; }
+
+  public:
+    EdgeInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst, Pass &P)
+        : InsertPoint(), Src(Src), DstOrSplit(&Dst), P(P) {}
+
+    bool isSplit() const override {
+      return Src.succ_size() > 1 && DstOrSplit->pred_size() > 1;
+    }
+
+    uint64_t frequency(const Pass &P) const override;
+    bool canMaterialize() const override;
+  };
+
+  /// Struct used to represent the placement of a repairing point for
+  /// a given operand.
+  class RepairingPlacement {
+  public:
+    /// Define the kind of action this repairing needs.
+    enum RepairingKind {
+      /// Nothing to repair, just drop this action.
+      None,
+      /// Reparing code needs to happen before InsertPoints.
+      Insert,
+      /// (Re)assign the register bank of the operand.
+      Reassign,
+      /// Mark this repairing placement as impossible.
+      Impossible
+    };
+
+    /// \name Convenient types for a list of insertion points.
+    /// @{
+    using InsertionPoints = SmallVector<std::unique_ptr<InsertPoint>, 2>;
+    using insertpt_iterator = InsertionPoints::iterator;
+    using const_insertpt_iterator = InsertionPoints::const_iterator;
+    /// @}
+
+  private:
+    /// Kind of repairing.
+    RepairingKind Kind;
+    /// Index of the operand that will be repaired.
+    unsigned OpIdx;
+    /// Are all the insert points materializeable?
+    bool CanMaterialize;
+    /// Is there any of the insert points needing splitting?
+    bool HasSplit = false;
+    /// Insertion point for the repair code.
+    /// The repairing code needs to happen just before these points.
+    InsertionPoints InsertPoints;
+    /// Some insertion points may need to update the liveness and such.
+    Pass &P;
+
+  public:
+    /// Create a repairing placement for the \p OpIdx-th operand of
+    /// \p MI. \p TRI is used to make some checks on the register aliases
+    /// if the machine operand is a physical register. \p P is used to
+    /// to update liveness information and such when materializing the
+    /// points.
+    RepairingPlacement(MachineInstr &MI, unsigned OpIdx,
+                       const TargetRegisterInfo &TRI, Pass &P,
+                       RepairingKind Kind = RepairingKind::Insert);
+
+    /// \name Getters.
+    /// @{
+    RepairingKind getKind() const { return Kind; }
+    unsigned getOpIdx() const { return OpIdx; }
+    bool canMaterialize() const { return CanMaterialize; }
+    bool hasSplit() { return HasSplit; }
+    /// @}
+
+    /// \name Overloaded methods to add an insertion point.
+    /// @{
+    /// Add a MBBInsertionPoint to the list of InsertPoints.
+    void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
+    /// Add a InstrInsertionPoint to the list of InsertPoints.
+    void addInsertPoint(MachineInstr &MI, bool Before);
+    /// Add an EdgeInsertionPoint (\p Src, \p Dst) to the list of InsertPoints.
+    void addInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst);
+    /// Add an InsertPoint to the list of insert points.
+    /// This method takes the ownership of &\p Point.
+    void addInsertPoint(InsertPoint &Point);
+    /// @}
+
+    /// \name Accessors related to the insertion points.
+    /// @{
+    insertpt_iterator begin() { return InsertPoints.begin(); }
+    insertpt_iterator end() { return InsertPoints.end(); }
+
+    const_insertpt_iterator begin() const { return InsertPoints.begin(); }
+    const_insertpt_iterator end() const { return InsertPoints.end(); }
+
+    unsigned getNumInsertPoints() const { return InsertPoints.size(); }
+    /// @}
+
+    /// Change the type of this repairing placement to \p NewKind.
+    /// It is not possible to switch a repairing placement to the
+    /// RepairingKind::Insert. There is no fundamental problem with
+    /// that, but no uses as well, so do not support it for now.
+    ///
+    /// \pre NewKind != RepairingKind::Insert
+    /// \post getKind() == NewKind
+    void switchTo(RepairingKind NewKind) {
+      assert(NewKind != Kind && "Already of the right Kind");
+      Kind = NewKind;
+      InsertPoints.clear();
+      CanMaterialize = NewKind != RepairingKind::Impossible;
+      HasSplit = false;
+      assert(NewKind != RepairingKind::Insert &&
+             "We would need more MI to switch to Insert");
+    }
+  };
+
+private:
+  /// Helper class used to represent the cost for mapping an instruction.
+  /// When mapping an instruction, we may introduce some repairing code.
+  /// In most cases, the repairing code is local to the instruction,
+  /// thus, we can omit the basic block frequency from the cost.
+  /// However, some alternatives may produce non-local cost, e.g., when
+  /// repairing a phi, and thus we then need to scale the local cost
+  /// to the non-local cost. This class does this for us.
+  /// \note: We could simply always scale the cost. The problem is that
+  /// there are higher chances that we saturate the cost easier and end
+  /// up having the same cost for actually different alternatives.
+  /// Another option would be to use APInt everywhere.
+  class MappingCost {
+  private:
+    /// Cost of the local instructions.
+    /// This cost is free of basic block frequency.
+    uint64_t LocalCost = 0;
+    /// Cost of the non-local instructions.
+    /// This cost should include the frequency of the related blocks.
+    uint64_t NonLocalCost = 0;
+    /// Frequency of the block where the local instructions live.
+    uint64_t LocalFreq;
+
+    MappingCost(uint64_t LocalCost, uint64_t NonLocalCost, uint64_t LocalFreq)
+        : LocalCost(LocalCost), NonLocalCost(NonLocalCost),
+          LocalFreq(LocalFreq) {}
+
+    /// Check if this cost is saturated.
+    bool isSaturated() const;
+
+  public:
+    /// Create a MappingCost assuming that most of the instructions
+    /// will occur in a basic block with \p LocalFreq frequency.
+    MappingCost(const BlockFrequency &LocalFreq);
+
+    /// Add \p Cost to the local cost.
+    /// \return true if this cost is saturated, false otherwise.
+    bool addLocalCost(uint64_t Cost);
+
+    /// Add \p Cost to the non-local cost.
+    /// Non-local cost should reflect the frequency of their placement.
+    /// \return true if this cost is saturated, false otherwise.
+    bool addNonLocalCost(uint64_t Cost);
+
+    /// Saturate the cost to the maximal representable value.
+    void saturate();
+
+    /// Return an instance of MappingCost that represents an
+    /// impossible mapping.
+    static MappingCost ImpossibleCost();
+
+    /// Check if this is less than \p Cost.
+    bool operator<(const MappingCost &Cost) const;
+    /// Check if this is equal to \p Cost.
+    bool operator==(const MappingCost &Cost) const;
+    /// Check if this is not equal to \p Cost.
+    bool operator!=(const MappingCost &Cost) const { return !(*this == Cost); }
+    /// Check if this is greater than \p Cost.
+    bool operator>(const MappingCost &Cost) const {
+      return *this != Cost && Cost < *this;
+    }
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+
+    /// Overload the stream operator for easy debug printing.
+    friend raw_ostream &operator<<(raw_ostream &OS, const MappingCost &Cost) {
+      Cost.print(OS);
+      return OS;
+    }
+  };
+
+  /// Interface to the target lowering info related
+  /// to register banks.
+  const RegisterBankInfo *RBI = nullptr;
+
+  /// MRI contains all the register class/bank information that this
+  /// pass uses and updates.
+  MachineRegisterInfo *MRI = nullptr;
+
+  /// Information on the register classes for the current function.
+  const TargetRegisterInfo *TRI = nullptr;
+
+  /// Get the frequency of blocks.
+  /// This is required for non-fast mode.
+  MachineBlockFrequencyInfo *MBFI = nullptr;
+
+  /// Get the frequency of the edges.
+  /// This is required for non-fast mode.
+  MachineBranchProbabilityInfo *MBPI = nullptr;
+
+  /// Current optimization remark emitter. Used to report failures.
+  std::unique_ptr<MachineOptimizationRemarkEmitter> MORE;
+
+  /// Helper class used for every code morphing.
+  MachineIRBuilder MIRBuilder;
+
+  /// Optimization mode of the pass.
+  Mode OptMode;
+
+  /// Current target configuration. Controls how the pass handles errors.
+  const TargetPassConfig *TPC;
+
+  /// Assign the register bank of each operand of \p MI.
+  /// \return True on success, false otherwise.
+  bool assignInstr(MachineInstr &MI);
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+  /// Check if \p Reg is already assigned what is described by \p ValMapping.
+  /// \p OnlyAssign == true means that \p Reg just needs to be assigned a
+  /// register bank.  I.e., no repairing is necessary to have the
+  /// assignment match.
+  bool assignmentMatch(unsigned Reg,
+                       const RegisterBankInfo::ValueMapping &ValMapping,
+                       bool &OnlyAssign) const;
+
+  /// Insert repairing code for \p Reg as specified by \p ValMapping.
+  /// The repairing placement is specified by \p RepairPt.
+  /// \p NewVRegs contains all the registers required to remap \p Reg.
+  /// In other words, the number of registers in NewVRegs must be equal
+  /// to ValMapping.BreakDown.size().
+  ///
+  /// The transformation could be sketched as:
+  /// \code
+  /// ... = op Reg
+  /// \endcode
+  /// Becomes
+  /// \code
+  /// <NewRegs> = COPY or extract Reg
+  /// ... = op Reg
+  /// \endcode
+  ///
+  /// and
+  /// \code
+  /// Reg = op ...
+  /// \endcode
+  /// Becomes
+  /// \code
+  /// Reg = op ...
+  /// Reg = COPY or build_sequence <NewRegs>
+  /// \endcode
+  ///
+  /// \pre NewVRegs.size() == ValMapping.BreakDown.size()
+  ///
+  /// \note The caller is supposed to do the rewriting of op if need be.
+  /// I.e., Reg = op ... => <NewRegs> = NewOp ...
+  ///
+  /// \return True if the repairing worked, false otherwise.
+  bool repairReg(MachineOperand &MO,
+                 const RegisterBankInfo::ValueMapping &ValMapping,
+                 RegBankSelect::RepairingPlacement &RepairPt,
+                 const iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+                     &NewVRegs);
+
+  /// Return the cost of the instruction needed to map \p MO to \p ValMapping.
+  /// The cost is free of basic block frequencies.
+  /// \pre MO.isReg()
+  /// \pre MO is assigned to a register bank.
+  /// \pre ValMapping is a valid mapping for MO.
+  uint64_t
+  getRepairCost(const MachineOperand &MO,
+                const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+  /// Find the best mapping for \p MI from \p PossibleMappings.
+  /// \return a reference on the best mapping in \p PossibleMappings.
+  const RegisterBankInfo::InstructionMapping &
+  findBestMapping(MachineInstr &MI,
+                  RegisterBankInfo::InstructionMappings &PossibleMappings,
+                  SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+  /// Compute the cost of mapping \p MI with \p InstrMapping and
+  /// compute the repairing placement for such mapping in \p
+  /// RepairPts.
+  /// \p BestCost is used to specify when the cost becomes too high
+  /// and thus it is not worth computing the RepairPts.  Moreover if
+  /// \p BestCost == nullptr, the mapping cost is actually not
+  /// computed.
+  MappingCost
+  computeMapping(MachineInstr &MI,
+                 const RegisterBankInfo::InstructionMapping &InstrMapping,
+                 SmallVectorImpl<RepairingPlacement> &RepairPts,
+                 const MappingCost *BestCost = nullptr);
+
+  /// When \p RepairPt involves splitting to repair \p MO for the
+  /// given \p ValMapping, try to change the way we repair such that
+  /// the splitting is not required anymore.
+  ///
+  /// \pre \p RepairPt.hasSplit()
+  /// \pre \p MO == MO.getParent()->getOperand(\p RepairPt.getOpIdx())
+  /// \pre \p ValMapping is the mapping of \p MO for MO.getParent()
+  ///      that implied \p RepairPt.
+  void tryAvoidingSplit(RegBankSelect::RepairingPlacement &RepairPt,
+                        const MachineOperand &MO,
+                        const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+  /// Apply \p Mapping to \p MI. \p RepairPts represents the different
+  /// mapping action that need to happen for the mapping to be
+  /// applied.
+  /// \return True if the mapping was applied sucessfully, false otherwise.
+  bool applyMapping(MachineInstr &MI,
+                    const RegisterBankInfo::InstructionMapping &InstrMapping,
+                    SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+public:
+  /// Create a RegBankSelect pass with the specified \p RunningMode.
+  RegBankSelect(Mode RunningMode = Fast);
+
+  StringRef getPassName() const override { return "RegBankSelect"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  /// Walk through \p MF and assign a register bank to every virtual register
+  /// that are still mapped to nothing.
+  /// The target needs to provide a RegisterBankInfo and in particular
+  /// override RegisterBankInfo::getInstrMapping.
+  ///
+  /// Simplified algo:
+  /// \code
+  ///   RBI = MF.subtarget.getRegBankInfo()
+  ///   MIRBuilder.setMF(MF)
+  ///   for each bb in MF
+  ///     for each inst in bb
+  ///       MIRBuilder.setInstr(inst)
+  ///       MappingCosts = RBI.getMapping(inst);
+  ///       Idx = findIdxOfMinCost(MappingCosts)
+  ///       CurRegBank = MappingCosts[Idx].RegBank
+  ///       MRI.setRegBank(inst.getOperand(0).getReg(), CurRegBank)
+  ///       for each argument in inst
+  ///         if (CurRegBank != argument.RegBank)
+  ///           ArgReg = argument.getReg()
+  ///           Tmp = MRI.createNewVirtual(MRI.getSize(ArgReg), CurRegBank)
+  ///           MIRBuilder.buildInstr(COPY, Tmp, ArgReg)
+  ///           inst.getOperand(argument.getOperandNo()).setReg(Tmp)
+  /// \endcode
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
new file mode 100644
index 0000000..5d75842
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -0,0 +1,99 @@
+//==-- llvm/CodeGen/GlobalISel/RegisterBank.h - Register Bank ----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+// Forward declarations.
+class RegisterBankInfo;
+class raw_ostream;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// This class implements the register bank concept.
+/// Two instances of RegisterBank must have different ID.
+/// This property is enforced by the RegisterBankInfo class.
+class RegisterBank {
+private:
+  unsigned ID;
+  const char *Name;
+  unsigned Size;
+  BitVector ContainedRegClasses;
+
+  /// Sentinel value used to recognize register bank not properly
+  /// initialized yet.
+  static const unsigned InvalidID;
+
+  /// Only the RegisterBankInfo can initialize RegisterBank properly.
+  friend RegisterBankInfo;
+
+public:
+  RegisterBank(unsigned ID, const char *Name, unsigned Size,
+               const uint32_t *ContainedRegClasses, unsigned NumRegClasses);
+
+  /// Get the identifier of this register bank.
+  unsigned getID() const { return ID; }
+
+  /// Get a user friendly name of this register bank.
+  /// Should be used only for debugging purposes.
+  const char *getName() const { return Name; }
+
+  /// Get the maximal size in bits that fits in this register bank.
+  unsigned getSize() const { return Size; }
+
+  /// Check whether this instance is ready to be used.
+  bool isValid() const;
+
+  /// Check if this register bank is valid. In other words,
+  /// if it has been properly constructed.
+  ///
+  /// \note This method does not check anything when assertions are disabled.
+  ///
+  /// \return True is the check was successful.
+  bool verify(const TargetRegisterInfo &TRI) const;
+
+  /// Check whether this register bank covers \p RC.
+  /// In other words, check if this register bank fully covers
+  /// the registers that \p RC contains.
+  /// \pre isValid()
+  bool covers(const TargetRegisterClass &RC) const;
+
+  /// Check whether \p OtherRB is the same as this.
+  bool operator==(const RegisterBank &OtherRB) const;
+  bool operator!=(const RegisterBank &OtherRB) const {
+    return !this->operator==(OtherRB);
+  }
+
+  /// Dump the register mask on dbgs() stream.
+  /// The dump is verbose.
+  void dump(const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Print the register mask on OS.
+  /// If IsForDebug is false, then only the name of the register bank
+  /// is printed. Otherwise, all the fields are printing.
+  /// TRI is then used to print the name of the register classes that
+  /// this register bank covers.
+  void print(raw_ostream &OS, bool IsForDebug = false,
+             const TargetRegisterInfo *TRI = nullptr) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RegisterBank &RegBank) {
+  RegBank.print(OS);
+  return OS;
+}
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
new file mode 100644
index 0000000..82fd7ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -0,0 +1,757 @@
+//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the register bank info.
+/// This API is responsible for handling the register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <initializer_list>
+#include <memory>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class RegisterBank;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Holds all the information related to register banks.
+class RegisterBankInfo {
+public:
+  /// Helper struct that represents how a value is partially mapped
+  /// into a register.
+  /// The StartIdx and Length represent what region of the orginal
+  /// value this partial mapping covers.
+  /// This can be represented as a Mask of contiguous bit starting
+  /// at StartIdx bit and spanning Length bits.
+  /// StartIdx is the number of bits from the less significant bits.
+  struct PartialMapping {
+    /// Number of bits at which this partial mapping starts in the
+    /// original value.  The bits are counted from less significant
+    /// bits to most significant bits.
+    unsigned StartIdx;
+
+    /// Length of this mapping in bits. This is how many bits this
+    /// partial mapping covers in the original value:
+    /// from StartIdx to StartIdx + Length -1.
+    unsigned Length;
+
+    /// Register bank where the partial value lives.
+    const RegisterBank *RegBank;
+
+    PartialMapping() = default;
+
+    /// Provide a shortcut for quickly building PartialMapping.
+    PartialMapping(unsigned StartIdx, unsigned Length,
+                   const RegisterBank &RegBank)
+        : StartIdx(StartIdx), Length(Length), RegBank(&RegBank) {}
+
+    /// \return the index of in the original value of the most
+    /// significant bit that this partial mapping covers.
+    unsigned getHighBitIdx() const { return StartIdx + Length - 1; }
+
+    /// Print this partial mapping on dbgs() stream.
+    void dump() const;
+
+    /// Print this partial mapping on \p OS;
+    void print(raw_ostream &OS) const;
+
+    /// Check that the Mask is compatible with the RegBank.
+    /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask,
+    /// there is no way this mapping is valid.
+    ///
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify() const;
+  };
+
+  /// Helper struct that represents how a value is mapped through
+  /// different register banks.
+  ///
+  /// \note: So far we do not have any users of the complex mappings
+  /// (mappings with more than one partial mapping), but when we do,
+  /// we would have needed to duplicate partial mappings.
+  /// The alternative could be to use an array of pointers of partial
+  /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
+  /// pointers instead.
+  ///
+  /// E.g.,
+  /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
+  /// can expand the
+  /// <2 x 32-bit> add into 2 x 32-bit add.
+  ///
+  /// Currently the TableGen-like file would look like:
+  /// \code
+  /// PartialMapping[] = {
+  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*2x32-bit add*/ {0, 32, GPR}, {0, 32, GPR}, // <-- Same entry 3x
+  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// }; // PartialMapping duplicated.
+  ///
+  /// ValueMapping[] {
+  ///   /*plain 32-bit add*/ {&PartialMapping[0], 1},
+  ///   /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
+  ///   /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+  /// };
+  /// \endcode
+  ///
+  /// With the array of pointer, we would have:
+  /// \code
+  /// PartialMapping[] = {
+  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// }; // No more duplication.
+  ///
+  /// BreakDowns[] = {
+  /// /*AddBreakDown*/ &PartialMapping[0],
+  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[0],
+  /// /*VAddBreakDown*/ &PartialMapping[1]
+  /// }; // Addresses of PartialMapping duplicated (smaller).
+  ///
+  /// ValueMapping[] {
+  ///   /*plain 32-bit add*/ {&BreakDowns[0], 1},
+  ///   /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
+  ///   /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+  /// };
+  /// \endcode
+  ///
+  /// Given that a PartialMapping is actually small, the code size
+  /// impact is actually a degradation. Moreover the compile time will
+  /// be hit by the additional indirection.
+  /// If PartialMapping gets bigger we may reconsider.
+  struct ValueMapping {
+    /// How the value is broken down between the different register banks.
+    const PartialMapping *BreakDown;
+
+    /// Number of partial mapping to break down this value.
+    unsigned NumBreakDowns;
+
+    /// The default constructor creates an invalid (isValid() == false)
+    /// instance.
+    ValueMapping() : ValueMapping(nullptr, 0) {}
+
+    /// Initialize a ValueMapping with the given parameter.
+    /// \p BreakDown needs to have a life time at least as long
+    /// as this instance.
+    ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
+        : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}
+
+    /// Iterators through the PartialMappings.
+    const PartialMapping *begin() const { return BreakDown; }
+    const PartialMapping *end() const { return BreakDown + NumBreakDowns; }
+
+    /// Check if this ValueMapping is valid.
+    bool isValid() const { return BreakDown && NumBreakDowns; }
+
+    /// Verify that this mapping makes sense for a value of
+    /// \p MeaningfulBitWidth.
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify(unsigned MeaningfulBitWidth) const;
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+  };
+
+  /// Helper class that represents how the value of an instruction may be
+  /// mapped and what is the related cost of such mapping.
+  class InstructionMapping {
+    /// Identifier of the mapping.
+    /// This is used to communicate between the target and the optimizers
+    /// which mapping should be realized.
+    unsigned ID = InvalidMappingID;
+
+    /// Cost of this mapping.
+    unsigned Cost = 0;
+
+    /// Mapping of all the operands.
+    const ValueMapping *OperandsMapping;
+
+    /// Number of operands.
+    unsigned NumOperands = 0;
+
+    const ValueMapping &getOperandMapping(unsigned i) {
+      assert(i < getNumOperands() && "Out of bound operand");
+      return OperandsMapping[i];
+    }
+
+  public:
+    /// Constructor for the mapping of an instruction.
+    /// \p NumOperands must be equal to number of all the operands of
+    /// the related instruction.
+    /// The rationale is that it is more efficient for the optimizers
+    /// to be able to assume that the mapping of the ith operand is
+    /// at the index i.
+    ///
+    /// \pre ID != InvalidMappingID
+    InstructionMapping(unsigned ID, unsigned Cost,
+                       const ValueMapping *OperandsMapping,
+                       unsigned NumOperands)
+        : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
+          NumOperands(NumOperands) {
+      assert(getID() != InvalidMappingID &&
+             "Use the default constructor for invalid mapping");
+    }
+
+    /// Default constructor.
+    /// Use this constructor to express that the mapping is invalid.
+    InstructionMapping() = default;
+
+    /// Get the cost.
+    unsigned getCost() const { return Cost; }
+
+    /// Get the ID.
+    unsigned getID() const { return ID; }
+
+    /// Get the number of operands.
+    unsigned getNumOperands() const { return NumOperands; }
+
+    /// Get the value mapping of the ith operand.
+    /// \pre The mapping for the ith operand has been set.
+    /// \pre The ith operand is a register.
+    const ValueMapping &getOperandMapping(unsigned i) const {
+      const ValueMapping &ValMapping =
+          const_cast<InstructionMapping *>(this)->getOperandMapping(i);
+      return ValMapping;
+    }
+
+    /// Set the mapping for all the operands.
+    /// In other words, OpdsMapping should hold at least getNumOperands
+    /// ValueMapping.
+    void setOperandsMapping(const ValueMapping *OpdsMapping) {
+      OperandsMapping = OpdsMapping;
+    }
+
+    /// Check whether this object is valid.
+    /// This is a lightweight check for obvious wrong instance.
+    bool isValid() const {
+      return getID() != InvalidMappingID && OperandsMapping;
+    }
+
+    /// Verifiy that this mapping makes sense for \p MI.
+    /// \pre \p MI must be connected to a MachineFunction.
+    ///
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify(const MachineInstr &MI) const;
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+  };
+
+  /// Convenient type to represent the alternatives for mapping an
+  /// instruction.
+  /// \todo When we move to TableGen this should be an array ref.
+  using InstructionMappings = SmallVector<const InstructionMapping *, 4>;
+
+  /// Helper class used to get/create the virtual registers that will be used
+  /// to replace the MachineOperand when applying a mapping.
+  class OperandsMapper {
+    /// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
+    /// OpIdx-th operand starts. -1 means we do not have such mapping yet.
+    /// Note: We use a SmallVector to avoid heap allocation for most cases.
+    SmallVector<int, 8> OpToNewVRegIdx;
+
+    /// Hold the registers that will be used to map MI with InstrMapping.
+    SmallVector<unsigned, 8> NewVRegs;
+
+    /// Current MachineRegisterInfo, used to create new virtual registers.
+    MachineRegisterInfo &MRI;
+
+    /// Instruction being remapped.
+    MachineInstr &MI;
+
+    /// New mapping of the instruction.
+    const InstructionMapping &InstrMapping;
+
+    /// Constant value identifying that the index in OpToNewVRegIdx
+    /// for an operand has not been set yet.
+    static const int DontKnowIdx;
+
+    /// Get the range in NewVRegs to store all the partial
+    /// values for the \p OpIdx-th operand.
+    ///
+    /// \return The iterator range for the space created.
+    //
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    iterator_range<SmallVectorImpl<unsigned>::iterator>
+    getVRegsMem(unsigned OpIdx);
+
+    /// Get the end iterator for a range starting at \p StartIdx and
+    /// spannig \p NumVal in NewVRegs.
+    /// \pre StartIdx + NumVal <= NewVRegs.size()
+    SmallVectorImpl<unsigned>::const_iterator
+    getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
+    SmallVectorImpl<unsigned>::iterator getNewVRegsEnd(unsigned StartIdx,
+                                                       unsigned NumVal);
+
+  public:
+    /// Create an OperandsMapper that will hold the information to apply \p
+    /// InstrMapping to \p MI.
+    /// \pre InstrMapping.verify(MI)
+    OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
+                   MachineRegisterInfo &MRI);
+
+    /// \name Getters.
+    /// @{
+    /// The MachineInstr being remapped.
+    MachineInstr &getMI() const { return MI; }
+
+    /// The final mapping of the instruction.
+    const InstructionMapping &getInstrMapping() const { return InstrMapping; }
+
+    /// The MachineRegisterInfo we used to realize the mapping.
+    MachineRegisterInfo &getMRI() const { return MRI; }
+    /// @}
+
+    /// Create as many new virtual registers as needed for the mapping of the \p
+    /// OpIdx-th operand.
+    /// The number of registers is determined by the number of breakdown for the
+    /// related operand in the instruction mapping.
+    /// The type of the new registers is a plain scalar of the right size.
+    /// The proper type is expected to be set when the mapping is applied to
+    /// the instruction(s) that realizes the mapping.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    ///
+    /// \post All the partial mapping of the \p OpIdx-th operand have been
+    /// assigned a new virtual register.
+    void createVRegs(unsigned OpIdx);
+
+    /// Set the virtual register of the \p PartialMapIdx-th partial mapping of
+    /// the OpIdx-th operand to \p NewVReg.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    /// \pre getInstrMapping().getOperandMapping(OpIdx).BreakDown.size() >
+    /// PartialMapIdx
+    /// \pre NewReg != 0
+    ///
+    /// \post the \p PartialMapIdx-th register of the value mapping of the \p
+    /// OpIdx-th operand has been set.
+    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, unsigned NewVReg);
+
+    /// Get all the virtual registers required to map the \p OpIdx-th operand of
+    /// the instruction.
+    ///
+    /// This return an empty range when createVRegs or setVRegs has not been
+    /// called.
+    /// The iterator may be invalidated by a call to setVRegs or createVRegs.
+    ///
+    /// When \p ForDebug is true, we will not check that the list of new virtual
+    /// registers does not contain uninitialized values.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    /// \pre ForDebug || All partial mappings have been set a register
+    iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+    getVRegs(unsigned OpIdx, bool ForDebug = false) const;
+
+    /// Print this operands mapper on dbgs() stream.
+    void dump() const;
+
+    /// Print this operands mapper on \p OS stream.
+    void print(raw_ostream &OS, bool ForDebug = false) const;
+  };
+
+protected:
+  /// Hold the set of supported register banks.
+  RegisterBank **RegBanks;
+
+  /// Total number of register banks.
+  unsigned NumRegBanks;
+
+  /// Keep dynamically allocated PartialMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
+      MapOfPartialMappings;
+
+  /// Keep dynamically allocated ValueMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
+      MapOfValueMappings;
+
+  /// Keep dynamically allocated array of ValueMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
+      MapOfOperandsMappings;
+
+  /// Keep dynamically allocated InstructionMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
+      MapOfInstructionMappings;
+
+  /// Getting the minimal register class of a physreg is expensive.
+  /// Cache this information as we get it.
+  mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;
+
+  /// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
+  /// RegisterBank instances.
+  RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
+
+  /// This constructor is meaningless.
+  /// It just provides a default constructor that can be used at link time
+  /// when GlobalISel is not built.
+  /// That way, targets can still inherit from this class without doing
+  /// crazy gymnastic to avoid link time failures.
+  /// \note That works because the constructor is inlined.
+  RegisterBankInfo() {
+    llvm_unreachable("This constructor should not be executed");
+  }
+
+  /// Get the register bank identified by \p ID.
+  RegisterBank &getRegBank(unsigned ID) {
+    assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
+    return *RegBanks[ID];
+  }
+
+  /// Get the MinimalPhysRegClass for Reg.
+  /// \pre Reg is a physical register.
+  const TargetRegisterClass &
+  getMinimalPhysRegClass(unsigned Reg, const TargetRegisterInfo &TRI) const;
+
+  /// Try to get the mapping of \p MI.
+  /// See getInstrMapping for more details on what a mapping represents.
+  ///
+  /// Unlike getInstrMapping the returned InstructionMapping may be invalid
+  /// (isValid() == false).
+  /// This means that the target independent code is not smart enough
+  /// to get the mapping of \p MI and thus, the target has to provide the
+  /// information for \p MI.
+  ///
+  /// This implementation is able to get the mapping of:
+  /// - Target specific instructions by looking at the encoding constraints.
+  /// - Any instruction if all the register operands have already been assigned
+  ///   a register, a register class, or a register bank.
+  /// - Copies and phis if at least one of the operands has been assigned a
+  ///   register, a register class, or a register bank.
+  /// In other words, this method will likely fail to find a mapping for
+  /// any generic opcode that has not been lowered by target specific code.
+  const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
+
+  /// Get the uniquely generated PartialMapping for the
+  /// given arguments.
+  const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
+                                          const RegisterBank &RegBank) const;
+
+  /// \name Methods to get a uniquely generated ValueMapping.
+  /// @{
+
+  /// The most common ValueMapping consists of a single PartialMapping.
+  /// Feature a method for that.
+  const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
+                                      const RegisterBank &RegBank) const;
+
+  /// Get the ValueMapping for the given arguments.
+  const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
+                                      unsigned NumBreakDowns) const;
+  /// @}
+
+  /// \name Methods to get a uniquely generated array of ValueMapping.
+  /// @{
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// elements of between \p Begin and \p End.
+  ///
+  /// Elements that are nullptr will be replaced by
+  /// invalid ValueMapping (ValueMapping::isValid == false).
+  ///
+  /// \pre The pointers on ValueMapping between \p Begin and \p End
+  /// must uniquely identify a ValueMapping. Otherwise, there is no
+  /// guarantee that the return instance will be unique, i.e., another
+  /// OperandsMapping could have the same content.
+  template <typename Iterator>
+  const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// elements of \p OpdsMapping.
+  ///
+  /// Elements of \p OpdsMapping that are nullptr will be replaced by
+  /// invalid ValueMapping (ValueMapping::isValid == false).
+  const ValueMapping *getOperandsMapping(
+      const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// given arguments.
+  ///
+  /// Arguments that are nullptr will be replaced by invalid
+  /// ValueMapping (ValueMapping::isValid == false).
+  const ValueMapping *getOperandsMapping(
+      std::initializer_list<const ValueMapping *> OpdsMapping) const;
+  /// @}
+
+  /// \name Methods to get a uniquely generated InstructionMapping.
+  /// @{
+
+private:
+  /// Method to get a uniquely generated InstructionMapping.
+  const InstructionMapping &
+  getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
+                            unsigned Cost = 0,
+                            const ValueMapping *OperandsMapping = nullptr,
+                            unsigned NumOperands = 0) const;
+
+public:
+  /// Method to get a uniquely generated InstructionMapping.
+  const InstructionMapping &
+  getInstructionMapping(unsigned ID, unsigned Cost,
+                        const ValueMapping *OperandsMapping,
+                        unsigned NumOperands) const {
+    return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
+                                     OperandsMapping, NumOperands);
+  }
+
+  /// Method to get a uniquely generated invalid InstructionMapping.
+  const InstructionMapping &getInvalidInstructionMapping() const {
+    return getInstructionMappingImpl(/*IsInvalid*/ true);
+  }
+  /// @}
+
+  /// Get the register bank for the \p OpIdx-th operand of \p MI form
+  /// the encoding constraints, if any.
+  ///
+  /// \return A register bank that covers the register class of the
+  /// related encoding constraints or nullptr if \p MI did not provide
+  /// enough information to deduce it.
+  const RegisterBank *
+  getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
+                            const TargetInstrInfo &TII,
+                            const TargetRegisterInfo &TRI) const;
+
+  /// Helper method to apply something that is like the default mapping.
+  /// Basically, that means that \p OpdMapper.getMI() is left untouched
+  /// aside from the reassignment of the register operand that have been
+  /// remapped.
+  ///
+  /// The type of all the new registers that have been created by the
+  /// mapper are properly remapped to the type of the original registers
+  /// they replace. In other words, the semantic of the instruction does
+  /// not change, only the register banks.
+  ///
+  /// If the mapping of one of the operand spans several registers, this
+  /// method will abort as this is not like a default mapping anymore.
+  ///
+  /// \pre For OpIdx in {0..\p OpdMapper.getMI().getNumOperands())
+  ///        the range OpdMapper.getVRegs(OpIdx) is empty or of size 1.
+  static void applyDefaultMapping(const OperandsMapper &OpdMapper);
+
+  /// See ::applyMapping.
+  virtual void applyMappingImpl(const OperandsMapper &OpdMapper) const {
+    llvm_unreachable("The target has to implement that part");
+  }
+
+public:
+  virtual ~RegisterBankInfo() = default;
+
+  /// Get the register bank identified by \p ID.
+  const RegisterBank &getRegBank(unsigned ID) const {
+    return const_cast<RegisterBankInfo *>(this)->getRegBank(ID);
+  }
+
+  /// Get the register bank of \p Reg.
+  /// If Reg has not been assigned a register, a register class,
+  /// or a register bank, then this returns nullptr.
+  ///
+  /// \pre Reg != 0 (NoRegister)
+  const RegisterBank *getRegBank(unsigned Reg, const MachineRegisterInfo &MRI,
+                                 const TargetRegisterInfo &TRI) const;
+
+  /// Get the total number of register banks.
+  unsigned getNumRegBanks() const { return NumRegBanks; }
+
+  /// Get a register bank that covers \p RC.
+  ///
+  /// \pre \p RC is a user-defined register class (as opposed as one
+  /// generated by TableGen).
+  ///
+  /// \note The mapping RC -> RegBank could be built while adding the
+  /// coverage for the register banks. However, we do not do it, because,
+  /// at least for now, we only need this information for register classes
+  /// that are used in the description of instruction. In other words,
+  /// there are just a handful of them and we do not want to waste space.
+  ///
+  /// \todo This should be TableGen'ed.
+  virtual const RegisterBank &
+  getRegBankFromRegClass(const TargetRegisterClass &RC) const {
+    llvm_unreachable("The target must override this method");
+  }
+
+  /// Get the cost of a copy from \p B to \p A, or put differently,
+  /// get the cost of A = COPY B. Since register banks may cover
+  /// different size, \p Size specifies what will be the size in bits
+  /// that will be copied around.
+  ///
+  /// \note Since this is a copy, both registers have the same size.
+  virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
+                            unsigned Size) const {
+    // Optimistically assume that copies are coalesced. I.e., when
+    // they are on the same bank, they are free.
+    // Otherwise assume a non-zero cost of 1. The targets are supposed
+    // to override that properly anyway if they care.
+    return &A != &B;
+  }
+
+  /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
+  ///
+  /// \pre \p Reg is a virtual register that either has a bank or a class.
+  /// \returns The constrained register class, or nullptr if there is none.
+  /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
+  /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
+  /// purpose, including non-select passes of GlobalISel
+  static const TargetRegisterClass *
+  constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC,
+                           MachineRegisterInfo &MRI);
+
+  /// Identifier used when the related instruction mapping instance
+  /// is generated by target independent code.
+  /// Make sure not to use that identifier to avoid possible collision.
+  static const unsigned DefaultMappingID;
+
+  /// Identifier used when the related instruction mapping instance
+  /// is generated by the default constructor.
+  /// Make sure not to use that identifier.
+  static const unsigned InvalidMappingID;
+
+  /// Get the mapping of the different operands of \p MI
+  /// on the register bank.
+  /// This mapping should be the direct translation of \p MI.
+  /// In other words, when \p MI is mapped with the returned mapping,
+  /// only the register banks of the operands of \p MI need to be updated.
+  /// In particular, neither the opcode nor the type of \p MI needs to be
+  /// updated for this direct mapping.
+  ///
+  /// The target independent implementation gives a mapping based on
+  /// the register classes for the target specific opcode.
+  /// It uses the ID RegisterBankInfo::DefaultMappingID for that mapping.
+  /// Make sure you do not use that ID for the alternative mapping
+  /// for MI. See getInstrAlternativeMappings for the alternative
+  /// mappings.
+  ///
+  /// For instance, if \p MI is a vector add, the mapping should
+  /// not be a scalarization of the add.
+  ///
+  /// \post returnedVal.verify(MI).
+  ///
+  /// \note If returnedVal does not verify MI, this would probably mean
+  /// that the target does not support that instruction.
+  virtual const InstructionMapping &
+  getInstrMapping(const MachineInstr &MI) const;
+
+  /// Get the alternative mappings for \p MI.
+  /// Alternative in the sense different from getInstrMapping.
+  virtual InstructionMappings
+  getInstrAlternativeMappings(const MachineInstr &MI) const;
+
+  /// Get the possible mapping for \p MI.
+  /// A mapping defines where the different operands may live and at what cost.
+  /// For instance, let us consider:
+  /// v0(16) = G_ADD <2 x i8> v1, v2
+  /// The possible mapping could be:
+  ///
+  /// {/*ID*/VectorAdd, /*Cost*/1, /*v0*/{(0xFFFF, VPR)}, /*v1*/{(0xFFFF, VPR)},
+  ///                              /*v2*/{(0xFFFF, VPR)}}
+  /// {/*ID*/ScalarAddx2, /*Cost*/2, /*v0*/{(0x00FF, GPR),(0xFF00, GPR)},
+  ///                                /*v1*/{(0x00FF, GPR),(0xFF00, GPR)},
+  ///                                /*v2*/{(0x00FF, GPR),(0xFF00, GPR)}}
+  ///
+  /// \note The first alternative of the returned mapping should be the
+  /// direct translation of \p MI current form.
+  ///
+  /// \post !returnedVal.empty().
+  InstructionMappings getInstrPossibleMappings(const MachineInstr &MI) const;
+
+  /// Apply \p OpdMapper.getInstrMapping() to \p OpdMapper.getMI().
+  /// After this call \p OpdMapper.getMI() may not be valid anymore.
+  /// \p OpdMapper.getInstrMapping().getID() carries the information of
+  /// what has been chosen to map \p OpdMapper.getMI(). This ID is set
+  /// by the various getInstrXXXMapping method.
+  ///
+  /// Therefore, getting the mapping and applying it should be kept in
+  /// sync.
+  void applyMapping(const OperandsMapper &OpdMapper) const {
+    // The only mapping we know how to handle is the default mapping.
+    if (OpdMapper.getInstrMapping().getID() == DefaultMappingID)
+      return applyDefaultMapping(OpdMapper);
+    // For other mapping, the target needs to do the right thing.
+    // If that means calling applyDefaultMapping, fine, but this
+    // must be explicitly stated.
+    applyMappingImpl(OpdMapper);
+  }
+
+  /// Get the size in bits of \p Reg.
+  /// Utility method to get the size of any registers. Unlike
+  /// MachineRegisterInfo::getSize, the register does not need to be a
+  /// virtual register.
+  ///
+  /// \pre \p Reg != 0 (NoRegister).
+  unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
+                         const TargetRegisterInfo &TRI) const;
+
+  /// Check that information hold by this instance make sense for the
+  /// given \p TRI.
+  ///
+  /// \note This method does not check anything when assertions are disabled.
+  ///
+  /// \return True is the check was successful.
+  bool verify(const TargetRegisterInfo &TRI) const;
+};
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+           const RegisterBankInfo::PartialMapping &PartMapping) {
+  PartMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::ValueMapping &ValMapping) {
+  ValMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+           const RegisterBankInfo::InstructionMapping &InstrMapping) {
+  InstrMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
+  OpdMapper.print(OS, /*ForDebug*/ false);
+  return OS;
+}
+
+/// Hashing function for PartialMapping.
+/// It is required for the hashing of ValueMapping.
+hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
new file mode 100644
index 0000000..7b22e34
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
@@ -0,0 +1,34 @@
+//===- llvm/CodeGen/GlobalISel/Types.h - Types used by GISel ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes high level types that are used by several passes or
+/// APIs involved in the GlobalISel pipeline.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_TYPES_H
+#define LLVM_CODEGEN_GLOBALISEL_TYPES_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+class Value;
+
+/// Map a value to a virtual register.
+/// For now, we chose to map aggregate types to on single virtual
+/// register. This might be revisited if it turns out to be inefficient.
+/// PR26161 tracks that.
+/// Note: We need to expose this type to the target hooks for thing like
+/// ABI lowering that would be used during IRTranslation.
+using ValueToVReg = DenseMap<const Value *, unsigned>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_TYPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
new file mode 100644
index 0000000..837035f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -0,0 +1,106 @@
+//==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of helper functions used throughout the
+/// GlobalISel pipeline.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
+#define LLVM_CODEGEN_GLOBALISEL_UTILS_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class MachineOperand;
+class MachineOptimizationRemarkEmitter;
+class MachineOptimizationRemarkMissed;
+class MachineRegisterInfo;
+class MCInstrDesc;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetPassConfig;
+class TargetRegisterInfo;
+class TargetRegisterClass;
+class Twine;
+class ConstantFP;
+class APFloat;
+
+/// Try to constrain Reg to the specified register class. If this fails,
+/// create a new virtual register in the correct class and insert a COPY before
+/// \p InsertPt. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainRegToClass(MachineRegisterInfo &MRI,
+                             const TargetInstrInfo &TII,
+                             const RegisterBankInfo &RBI,
+                             MachineInstr &InsertPt, unsigned Reg,
+                             const TargetRegisterClass &RegClass);
+
+/// Try to constrain Reg so that it is usable by argument OpIdx of the
+/// provided MCInstrDesc \p II. If this fails, create a new virtual
+/// register in the correct class and insert a COPY before \p InsertPt.
+/// This is equivalent to constrainRegToClass() with RegClass obtained from the
+/// MCInstrDesc. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainOperandRegClass(const MachineFunction &MF,
+                                  const TargetRegisterInfo &TRI,
+                                  MachineRegisterInfo &MRI,
+                                  const TargetInstrInfo &TII,
+                                  const RegisterBankInfo &RBI,
+                                  MachineInstr &InsertPt, const MCInstrDesc &II,
+                                  const MachineOperand &RegMO, unsigned OpIdx);
+
+/// Mutate the newly-selected instruction \p I to constrain its (possibly
+/// generic) virtual register operands to the instruction's register class.
+/// This could involve inserting COPYs before (for uses) or after (for defs).
+/// This requires the number of operands to match the instruction description.
+/// \returns whether operand regclass constraining succeeded.
+///
+// FIXME: Not all instructions have the same number of operands. We should
+// probably expose a constrain helper per operand and let the target selector
+// constrain individual registers, like fast-isel.
+bool constrainSelectedInstRegOperands(MachineInstr &I,
+                                      const TargetInstrInfo &TII,
+                                      const TargetRegisterInfo &TRI,
+                                      const RegisterBankInfo &RBI);
+/// Check whether an instruction \p MI is dead: it only defines dead virtual
+/// registers, and doesn't have other side effects.
+bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
+
+/// Report an ISel error as a missed optimization remark to the LLVMContext's
+/// diagnostic stream.  Set the FailedISel MachineFunction property.
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        MachineOptimizationRemarkMissed &R);
+
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        const char *PassName, StringRef Msg,
+                        const MachineInstr &MI);
+
+Optional<int64_t> getConstantVRegVal(unsigned VReg,
+                                     const MachineRegisterInfo &MRI);
+const ConstantFP* getConstantFPVRegVal(unsigned VReg,
+                                       const MachineRegisterInfo &MRI);
+
+/// See if Reg is defined by an single def instruction that is
+/// Opcode. Also try to do trivial folding if it's a COPY with
+/// same types. Returns null otherwise.
+MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg,
+                           const MachineRegisterInfo &MRI);
+
+/// Returns an APFloat from Val converted to the appropriate size.
+APFloat getAPFloatFromSize(double Val, unsigned Size);
+} // End namespace llvm.
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
new file mode 100644
index 0000000..ea94871
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
@@ -0,0 +1,997 @@
+//===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares codegen opcodes and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ISDOPCODES_H
+#define LLVM_CODEGEN_ISDOPCODES_H
+
+namespace llvm {
+
+/// ISD namespace - This namespace contains an enum which represents all of the
+/// SelectionDAG node types and value types.
+///
+namespace ISD {
+
+  //===--------------------------------------------------------------------===//
+  /// ISD::NodeType enum - This enum defines the target-independent operators
+  /// for a SelectionDAG.
+  ///
+  /// Targets may also define target-dependent operator codes for SDNodes. For
+  /// example, on x86, these are the enum values in the X86ISD namespace.
+  /// Targets should aim to use target-independent operators to model their
+  /// instruction sets as much as possible, and only use target-dependent
+  /// operators when they have special requirements.
+  ///
+  /// Finally, during and after selection proper, SNodes may use special
+  /// operator codes that correspond directly with MachineInstr opcodes. These
+  /// are used to represent selected instructions. See the isMachineOpcode()
+  /// and getMachineOpcode() member functions of SDNode.
+  ///
+  enum NodeType {
+    /// DELETED_NODE - This is an illegal value that is used to catch
+    /// errors.  This opcode is not a legal opcode for any node.
+    DELETED_NODE,
+
+    /// EntryToken - This is the marker used to indicate the start of a region.
+    EntryToken,
+
+    /// TokenFactor - This node takes multiple tokens as input and produces a
+    /// single token result. This is used to represent the fact that the operand
+    /// operators are independent of each other.
+    TokenFactor,
+
+    /// AssertSext, AssertZext - These nodes record if a register contains a
+    /// value that has already been zero or sign extended from a narrower type.
+    /// These nodes take two operands.  The first is the node that has already
+    /// been extended, and the second is a value type node indicating the width
+    /// of the extension
+    AssertSext, AssertZext,
+
+    /// Various leaf nodes.
+    BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
+    Constant, ConstantFP,
+    GlobalAddress, GlobalTLSAddress, FrameIndex,
+    JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
+
+    /// The address of the GOT
+    GLOBAL_OFFSET_TABLE,
+
+    /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
+    /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
+    /// of the frame or return address to return.  An index of zero corresponds
+    /// to the current function's frame or return address, an index of one to
+    /// the parent's frame or return address, and so on.
+    FRAMEADDR, RETURNADDR, ADDROFRETURNADDR,
+
+    /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
+    /// Materializes the offset from the local object pointer of another
+    /// function to a particular local object passed to llvm.localescape. The
+    /// operand is the MCSymbol label used to represent this offset, since
+    /// typically the offset is not known until after code generation of the
+    /// parent.
+    LOCAL_RECOVER,
+
+    /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
+    /// the DAG, which implements the named register global variables extension.
+    READ_REGISTER,
+    WRITE_REGISTER,
+
+    /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
+    /// first (possible) on-stack argument. This is needed for correct stack
+    /// adjustment during unwind.
+    FRAME_TO_ARGS_OFFSET,
+
+    /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
+    /// Frame Address (CFA), generally the value of the stack pointer at the
+    /// call site in the previous frame.
+    EH_DWARF_CFA,
+
+    /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
+    /// 'eh_return' gcc dwarf builtin, which is used to return from
+    /// exception. The general meaning is: adjust stack by OFFSET and pass
+    /// execution to HANDLER. Many platform-related details also :)
+    EH_RETURN,
+
+    /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
+    /// This corresponds to the eh.sjlj.setjmp intrinsic.
+    /// It takes an input chain and a pointer to the jump buffer as inputs
+    /// and returns an outchain.
+    EH_SJLJ_SETJMP,
+
+    /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
+    /// This corresponds to the eh.sjlj.longjmp intrinsic.
+    /// It takes an input chain and a pointer to the jump buffer as inputs
+    /// and returns an outchain.
+    EH_SJLJ_LONGJMP,
+
+    /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
+    /// The target initializes the dispatch table here.
+    EH_SJLJ_SETUP_DISPATCH,
+
+    /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
+    /// simplification, or lowering of the constant. They are used for constants
+    /// which are known to fit in the immediate fields of their users, or for
+    /// carrying magic numbers which are not values which need to be
+    /// materialized in registers.
+    TargetConstant,
+    TargetConstantFP,
+
+    /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
+    /// anything else with this node, and this is valid in the target-specific
+    /// dag, turning into a GlobalAddress operand.
+    TargetGlobalAddress,
+    TargetGlobalTLSAddress,
+    TargetFrameIndex,
+    TargetJumpTable,
+    TargetConstantPool,
+    TargetExternalSymbol,
+    TargetBlockAddress,
+
+    MCSymbol,
+
+    /// TargetIndex - Like a constant pool entry, but with completely
+    /// target-dependent semantics. Holds target flags, a 32-bit index, and a
+    /// 64-bit index. Targets can use this however they like.
+    TargetIndex,
+
+    /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
+    /// This node represents a target intrinsic function with no side effects.
+    /// The first operand is the ID number of the intrinsic from the
+    /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
+    /// node returns the result of the intrinsic.
+    INTRINSIC_WO_CHAIN,
+
+    /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
+    /// This node represents a target intrinsic function with side effects that
+    /// returns a result.  The first operand is a chain pointer.  The second is
+    /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
+    /// operands to the intrinsic follow.  The node has two results, the result
+    /// of the intrinsic and an output chain.
+    INTRINSIC_W_CHAIN,
+
+    /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
+    /// This node represents a target intrinsic function with side effects that
+    /// does not return a result.  The first operand is a chain pointer.  The
+    /// second is the ID number of the intrinsic from the llvm::Intrinsic
+    /// namespace.  The operands to the intrinsic follow.
+    INTRINSIC_VOID,
+
+    /// CopyToReg - This node has three operands: a chain, a register number to
+    /// set to this value, and a value.
+    CopyToReg,
+
+    /// CopyFromReg - This node indicates that the input value is a virtual or
+    /// physical register that is defined outside of the scope of this
+    /// SelectionDAG.  The register is available from the RegisterSDNode object.
+    CopyFromReg,
+
+    /// UNDEF - An undefined node.
+    UNDEF,
+
+    /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
+    /// a Constant, which is required to be operand #1) half of the integer or
+    /// float value specified as operand #0.  This is only for use before
+    /// legalization, for values that will be broken into multiple registers.
+    EXTRACT_ELEMENT,
+
+    /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
+    /// Given two values of the same integer value type, this produces a value
+    /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
+    /// legalization. The lower part of the composite value should be in
+    /// element 0 and the upper part should be in element 1.
+    BUILD_PAIR,
+
+    /// MERGE_VALUES - This node takes multiple discrete operands and returns
+    /// them all as its individual results.  This nodes has exactly the same
+    /// number of inputs and outputs. This node is useful for some pieces of the
+    /// code generator that want to think about a single node with multiple
+    /// results, not multiple nodes.
+    MERGE_VALUES,
+
+    /// Simple integer binary arithmetic operators.
+    ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
+
+    /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
+    /// a signed/unsigned value of type i[2*N], and return the full value as
+    /// two results, each of type iN.
+    SMUL_LOHI, UMUL_LOHI,
+
+    /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
+    /// remainder result.
+    SDIVREM, UDIVREM,
+
+    /// CARRY_FALSE - This node is used when folding other nodes,
+    /// like ADDC/SUBC, which indicate the carry result is always false.
+    CARRY_FALSE,
+
+    /// Carry-setting nodes for multiple precision addition and subtraction.
+    /// These nodes take two operands of the same value type, and produce two
+    /// results.  The first result is the normal add or sub result, the second
+    /// result is the carry flag result.
+    /// FIXME: These nodes are deprecated in favor of ADDCARRY and SUBCARRY.
+    /// They are kept around for now to provide a smooth transition path
+    /// toward the use of ADDCARRY/SUBCARRY and will eventually be removed.
+    ADDC, SUBC,
+
+    /// Carry-using nodes for multiple precision addition and subtraction. These
+    /// nodes take three operands: The first two are the normal lhs and rhs to
+    /// the add or sub, and the third is the input carry flag.  These nodes
+    /// produce two results; the normal result of the add or sub, and the output
+    /// carry flag.  These nodes both read and write a carry flag to allow them
+    /// to them to be chained together for add and sub of arbitrarily large
+    /// values.
+    ADDE, SUBE,
+
+    /// Carry-using nodes for multiple precision addition and subtraction.
+    /// These nodes take three operands: The first two are the normal lhs and
+    /// rhs to the add or sub, and the third is a boolean indicating if there
+    /// is an incoming carry. These nodes produce two results: the normal
+    /// result of the add or sub, and the output carry so they can be chained
+    /// together. The use of this opcode is preferable to adde/sube if the
+    /// target supports it, as the carry is a regular value rather than a
+    /// glue, which allows further optimisation.
+    ADDCARRY, SUBCARRY,
+
+    /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
+    /// These nodes take two operands: the normal LHS and RHS to the add. They
+    /// produce two results: the normal result of the add, and a boolean that
+    /// indicates if an overflow occurred (*not* a flag, because it may be store
+    /// to memory, etc.).  If the type of the boolean is not i1 then the high
+    /// bits conform to getBooleanContents.
+    /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
+    SADDO, UADDO,
+
+    /// Same for subtraction.
+    SSUBO, USUBO,
+
+    /// Same for multiplication.
+    SMULO, UMULO,
+
+    /// Simple binary floating point operators.
+    FADD, FSUB, FMUL, FDIV, FREM,
+
+    /// Constrained versions of the binary floating point operators.
+    /// These will be lowered to the simple operators before final selection.
+    /// They are used to limit optimizations while the DAG is being
+    /// optimized.
+    STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM,
+    STRICT_FMA,
+
+    /// Constrained versions of libm-equivalent floating point intrinsics.
+    /// These will be lowered to the equivalent non-constrained pseudo-op
+    /// (or expanded to the equivalent library call) before final selection.
+    /// They are used to limit optimizations while the DAG is being optimized.
+    STRICT_FSQRT, STRICT_FPOW, STRICT_FPOWI, STRICT_FSIN, STRICT_FCOS,
+    STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
+    STRICT_FRINT, STRICT_FNEARBYINT,
+
+    /// FMA - Perform a * b + c with no intermediate rounding step.
+    FMA,
+
+    /// FMAD - Perform a * b + c, while getting the same result as the
+    /// separately rounded operations.
+    FMAD,
+
+    /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
+    /// DAG node does not require that X and Y have the same type, just that
+    /// they are both floating point.  X and the result must have the same type.
+    /// FCOPYSIGN(f32, f64) is allowed.
+    FCOPYSIGN,
+
+    /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
+    /// value as an integer 0/1 value.
+    FGETSIGN,
+
+    /// Returns platform specific canonical encoding of a floating point number.
+    FCANONICALIZE,
+
+    /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
+    /// specified, possibly variable, elements.  The number of elements is
+    /// required to be a power of two.  The types of the operands must all be
+    /// the same and must match the vector element type, except that integer
+    /// types are allowed to be larger than the element type, in which case
+    /// the operands are implicitly truncated.
+    BUILD_VECTOR,
+
+    /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
+    /// at IDX replaced with VAL.  If the type of VAL is larger than the vector
+    /// element type then VAL is truncated before replacement.
+    INSERT_VECTOR_ELT,
+
+    /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
+    /// identified by the (potentially variable) element number IDX.  If the
+    /// return type is an integer type larger than the element type of the
+    /// vector, the result is extended to the width of the return type. In
+    /// that case, the high bits are undefined.
+    EXTRACT_VECTOR_ELT,
+
+    /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
+    /// vector type with the same length and element type, this produces a
+    /// concatenated vector result value, with length equal to the sum of the
+    /// lengths of the input vectors.
+    CONCAT_VECTORS,
+
+    /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector
+    /// with VECTOR2 inserted into VECTOR1 at the (potentially
+    /// variable) element number IDX, which must be a multiple of the
+    /// VECTOR2 vector length.  The elements of VECTOR1 starting at
+    /// IDX are overwritten with VECTOR2.  Elements IDX through
+    /// vector_length(VECTOR2) must be valid VECTOR1 indices.
+    INSERT_SUBVECTOR,
+
+    /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
+    /// vector value) starting with the element number IDX, which must be a
+    /// constant multiple of the result vector length.
+    EXTRACT_SUBVECTOR,
+
+    /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
+    /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
+    /// values that indicate which value (or undef) each result element will
+    /// get.  These constant ints are accessible through the
+    /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
+    /// 'vperm' instruction, except that the indices must be constants and are
+    /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
+    VECTOR_SHUFFLE,
+
+    /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
+    /// scalar value into element 0 of the resultant vector type.  The top
+    /// elements 1 to N-1 of the N-element vector are undefined.  The type
+    /// of the operand must match the vector element type, except when they
+    /// are integer types.  In this case the operand is allowed to be wider
+    /// than the vector element type, and is implicitly truncated to it.
+    SCALAR_TO_VECTOR,
+
+    /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
+    /// producing an unsigned/signed value of type i[2*N], then return the top
+    /// part.
+    MULHU, MULHS,
+
+    /// [US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned
+    /// integers.
+    SMIN, SMAX, UMIN, UMAX,
+
+    /// Bitwise operators - logical and, logical or, logical xor.
+    AND, OR, XOR,
+
+    /// ABS - Determine the unsigned absolute value of a signed integer value of
+    /// the same bitwidth.
+    /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
+    /// is performed.
+    ABS,
+
+    /// Shift and rotation operations.  After legalization, the type of the
+    /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
+    /// the shift amount can be any type, but care must be taken to ensure it is
+    /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
+    /// legalization, types like i1024 can occur and i8 doesn't have enough bits
+    /// to represent the shift amount.
+    /// When the 1st operand is a vector, the shift amount must be in the same
+    /// type. (TLI.getShiftAmountTy() will return the same type when the input
+    /// type is a vector.)
+    SHL, SRA, SRL, ROTL, ROTR,
+
+    /// Byte Swap and Counting operators.
+    BSWAP, CTTZ, CTLZ, CTPOP, BITREVERSE,
+
+    /// Bit counting operators with an undefined result for zero inputs.
+    CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
+
+    /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
+    /// i1 then the high bits must conform to getBooleanContents.
+    SELECT,
+
+    /// Select with a vector condition (op #0) and two vector operands (ops #1
+    /// and #2), returning a vector result.  All vectors have the same length.
+    /// Much like the scalar select and setcc, each bit in the condition selects
+    /// whether the corresponding result element is taken from op #1 or op #2.
+    /// At first, the VSELECT condition is of vXi1 type. Later, targets may
+    /// change the condition type in order to match the VSELECT node using a
+    /// pattern. The condition follows the BooleanContent format of the target.
+    VSELECT,
+
+    /// Select with condition operator - This selects between a true value and
+    /// a false value (ops #2 and #3) based on the boolean result of comparing
+    /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
+    /// condition code in op #4, a CondCodeSDNode.
+    SELECT_CC,
+
+    /// SetCC operator - This evaluates to a true value iff the condition is
+    /// true.  If the result value type is not i1 then the high bits conform
+    /// to getBooleanContents.  The operands to this are the left and right
+    /// operands to compare (ops #0, and #1) and the condition code to compare
+    /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
+    /// then the result type must also be a vector type.
+    SETCC,
+
+    /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, and
+    /// op #2 is a *carry value*. This operator checks the result of
+    /// "LHS - RHS - Carry", and can be used to compare two wide integers:
+    /// (setcce lhshi rhshi (subc lhslo rhslo) cc). Only valid for integers.
+    /// FIXME: This node is deprecated in favor of SETCCCARRY.
+    /// It is kept around for now to provide a smooth transition path
+    /// toward the use of SETCCCARRY and will eventually be removed.
+    SETCCE,
+
+    /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
+    /// op #2 is a boolean indicating if there is an incoming carry. This
+    /// operator checks the result of "LHS - RHS - Carry", and can be used to
+    /// compare two wide integers: (setcce lhshi rhshi (subc lhslo rhslo) cc).
+    /// Only valid for integers.
+    SETCCCARRY,
+
+    /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
+    /// integer shift operations.  The operation ordering is:
+    ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
+    SHL_PARTS, SRA_PARTS, SRL_PARTS,
+
+    /// Conversion operators.  These are all single input single output
+    /// operations.  For all of these, the result type must be strictly
+    /// wider or narrower (depending on the operation) than the source
+    /// type.
+
+    /// SIGN_EXTEND - Used for integer types, replicating the sign bit
+    /// into new bits.
+    SIGN_EXTEND,
+
+    /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
+    ZERO_EXTEND,
+
+    /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
+    ANY_EXTEND,
+
+    /// TRUNCATE - Completely drop the high bits.
+    TRUNCATE,
+
+    /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
+    /// depends on the first letter) to floating point.
+    SINT_TO_FP,
+    UINT_TO_FP,
+
+    /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
+    /// sign extend a small value in a large integer register (e.g. sign
+    /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
+    /// with the 7th bit).  The size of the smaller type is indicated by the 1th
+    /// operand, a ValueType node.
+    SIGN_EXTEND_INREG,
+
+    /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+    /// in-register any-extension of the low lanes of an integer vector. The
+    /// result type must have fewer elements than the operand type, and those
+    /// elements must be larger integer types such that the total size of the
+    /// operand type and the result type match. Each of the low operand
+    /// elements is any-extended into the corresponding, wider result
+    /// elements with the high bits becoming undef.
+    ANY_EXTEND_VECTOR_INREG,
+
+    /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+    /// in-register sign-extension of the low lanes of an integer vector. The
+    /// result type must have fewer elements than the operand type, and those
+    /// elements must be larger integer types such that the total size of the
+    /// operand type and the result type match. Each of the low operand
+    /// elements is sign-extended into the corresponding, wider result
+    /// elements.
+    // FIXME: The SIGN_EXTEND_INREG node isn't specifically limited to
+    // scalars, but it also doesn't handle vectors well. Either it should be
+    // restricted to scalars or this node (and its handling) should be merged
+    // into it.
+    SIGN_EXTEND_VECTOR_INREG,
+
+    /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+    /// in-register zero-extension of the low lanes of an integer vector. The
+    /// result type must have fewer elements than the operand type, and those
+    /// elements must be larger integer types such that the total size of the
+    /// operand type and the result type match. Each of the low operand
+    /// elements is zero-extended into the corresponding, wider result
+    /// elements.
+    ZERO_EXTEND_VECTOR_INREG,
+
+    /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
+    /// integer.
+    FP_TO_SINT,
+    FP_TO_UINT,
+
+    /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
+    /// down to the precision of the destination VT.  TRUNC is a flag, which is
+    /// always an integer that is zero or one.  If TRUNC is 0, this is a
+    /// normal rounding, if it is 1, this FP_ROUND is known to not change the
+    /// value of Y.
+    ///
+    /// The TRUNC = 1 case is used in cases where we know that the value will
+    /// not be modified by the node, because Y is not using any of the extra
+    /// precision of source type.  This allows certain transformations like
+    /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
+    /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
+    FP_ROUND,
+
+    /// FLT_ROUNDS_ - Returns current rounding mode:
+    /// -1 Undefined
+    ///  0 Round to 0
+    ///  1 Round to nearest
+    ///  2 Round to +inf
+    ///  3 Round to -inf
+    FLT_ROUNDS_,
+
+    /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
+    /// rounds it to a floating point value.  It then promotes it and returns it
+    /// in a register of the same size.  This operation effectively just
+    /// discards excess precision.  The type to round down to is specified by
+    /// the VT operand, a VTSDNode.
+    FP_ROUND_INREG,
+
+    /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
+    FP_EXTEND,
+
+    /// BITCAST - This operator converts between integer, vector and FP
+    /// values, as if the value was stored to memory with one type and loaded
+    /// from the same address with the other type (or equivalently for vector
+    /// format conversions, etc).  The source and result are required to have
+    /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
+    /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
+    /// getNode().
+    ///
+    /// This operator is subtly different from the bitcast instruction from
+    /// LLVM-IR since this node may change the bits in the register. For
+    /// example, this occurs on big-endian NEON and big-endian MSA where the
+    /// layout of the bits in the register depends on the vector type and this
+    /// operator acts as a shuffle operation for some vector type combinations.
+    BITCAST,
+
+    /// ADDRSPACECAST - This operator converts between pointers of different
+    /// address spaces.
+    ADDRSPACECAST,
+
+    /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
+    /// and truncation for half-precision (16 bit) floating numbers. These nodes
+    /// form a semi-softened interface for dealing with f16 (as an i16), which
+    /// is often a storage-only type but has native conversions.
+    FP16_TO_FP, FP_TO_FP16,
+
+    /// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+    /// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+    /// FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary
+    /// floating point operations. These are inspired by libm.
+    FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+    FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+    FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR,
+    /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
+    /// values.
+    /// In the case where a single input is NaN, the non-NaN input is returned.
+    ///
+    /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
+    FMINNUM, FMAXNUM,
+    /// FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that
+    /// when a single input is NaN, NaN is returned.
+    FMINNAN, FMAXNAN,
+
+    /// FSINCOS - Compute both fsin and fcos as a single operation.
+    FSINCOS,
+
+    /// LOAD and STORE have token chains as their first operand, then the same
+    /// operands as an LLVM load/store instruction, then an offset node that
+    /// is added / subtracted from the base pointer to form the address (for
+    /// indexed memory ops).
+    LOAD, STORE,
+
+    /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
+    /// to a specified boundary.  This node always has two return values: a new
+    /// stack pointer value and a chain. The first operand is the token chain,
+    /// the second is the number of bytes to allocate, and the third is the
+    /// alignment boundary.  The size is guaranteed to be a multiple of the
+    /// stack alignment, and the alignment is guaranteed to be bigger than the
+    /// stack alignment (if required) or 0 to get standard stack alignment.
+    DYNAMIC_STACKALLOC,
+
+    /// Control flow instructions.  These all have token chains.
+
+    /// BR - Unconditional branch.  The first operand is the chain
+    /// operand, the second is the MBB to branch to.
+    BR,
+
+    /// BRIND - Indirect branch.  The first operand is the chain, the second
+    /// is the value to branch to, which must be of the same type as the
+    /// target's pointer type.
+    BRIND,
+
+    /// BR_JT - Jumptable branch. The first operand is the chain, the second
+    /// is the jumptable index, the last one is the jumptable entry index.
+    BR_JT,
+
+    /// BRCOND - Conditional branch.  The first operand is the chain, the
+    /// second is the condition, the third is the block to branch to if the
+    /// condition is true.  If the type of the condition is not i1, then the
+    /// high bits must conform to getBooleanContents.
+    BRCOND,
+
+    /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
+    /// that the condition is represented as condition code, and two nodes to
+    /// compare, rather than as a combined SetCC node.  The operands in order
+    /// are chain, cc, lhs, rhs, block to branch to if condition is true.
+    BR_CC,
+
+    /// INLINEASM - Represents an inline asm block.  This node always has two
+    /// return values: a chain and a flag result.  The inputs are as follows:
+    ///   Operand #0  : Input chain.
+    ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
+    ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
+    ///   Operand #3  : HasSideEffect, IsAlignStack bits.
+    ///   After this, it is followed by a list of operands with this format:
+    ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
+    ///                     of operands that follow, etc.  See InlineAsm.h.
+    ///     ... however many operands ...
+    ///   Operand #last: Optional, an incoming flag.
+    ///
+    /// The variable width operands are required to represent target addressing
+    /// modes as a single "operand", even though they may have multiple
+    /// SDOperands.
+    INLINEASM,
+
+    /// EH_LABEL - Represents a label in mid basic block used to track
+    /// locations needed for debug and exception handling tables.  These nodes
+    /// take a chain as input and return a chain.
+    EH_LABEL,
+
+    /// ANNOTATION_LABEL - Represents a mid basic block label used by
+    /// annotations. This should remain within the basic block and be ordered
+    /// with respect to other call instructions, but loads and stores may float
+    /// past it.
+    ANNOTATION_LABEL,
+
+    /// CATCHPAD - Represents a catchpad instruction.
+    CATCHPAD,
+
+    /// CATCHRET - Represents a return from a catch block funclet. Used for
+    /// MSVC compatible exception handling. Takes a chain operand and a
+    /// destination basic block operand.
+    CATCHRET,
+
+    /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
+    /// MSVC compatible exception handling. Takes only a chain operand.
+    CLEANUPRET,
+
+    /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
+    /// value, the same type as the pointer type for the system, and an output
+    /// chain.
+    STACKSAVE,
+
+    /// STACKRESTORE has two operands, an input chain and a pointer to restore
+    /// to it returns an output chain.
+    STACKRESTORE,
+
+    /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
+    /// of a call sequence, and carry arbitrary information that target might
+    /// want to know.  The first operand is a chain, the rest are specified by
+    /// the target and not touched by the DAG optimizers.
+    /// Targets that may use stack to pass call arguments define additional
+    /// operands:
+    /// - size of the call frame part that must be set up within the
+    ///   CALLSEQ_START..CALLSEQ_END pair,
+    /// - part of the call frame prepared prior to CALLSEQ_START.
+    /// Both these parameters must be constants, their sum is the total call
+    /// frame size.
+    /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
+    CALLSEQ_START,  // Beginning of a call sequence
+    CALLSEQ_END,    // End of a call sequence
+
+    /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+    /// and the alignment. It returns a pair of values: the vaarg value and a
+    /// new chain.
+    VAARG,
+
+    /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
+    /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
+    /// source.
+    VACOPY,
+
+    /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
+    /// pointer, and a SRCVALUE.
+    VAEND, VASTART,
+
+    /// SRCVALUE - This is a node type that holds a Value* that is used to
+    /// make reference to a value in the LLVM IR.
+    SRCVALUE,
+
+    /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
+    /// reference metadata in the IR.
+    MDNODE_SDNODE,
+
+    /// PCMARKER - This corresponds to the pcmarker intrinsic.
+    PCMARKER,
+
+    /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
+    /// It produces a chain and one i64 value. The only operand is a chain.
+    /// If i64 is not legal, the result will be expanded into smaller values.
+    /// Still, it returns an i64, so targets should set legality for i64.
+    /// The result is the content of the architecture-specific cycle
+    /// counter-like register (or other high accuracy low latency clock source).
+    READCYCLECOUNTER,
+
+    /// HANDLENODE node - Used as a handle for various purposes.
+    HANDLENODE,
+
+    /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
+    /// takes as input a token chain, the pointer to the trampoline, the pointer
+    /// to the nested function, the pointer to pass for the 'nest' parameter, a
+    /// SRCVALUE for the trampoline and another for the nested function
+    /// (allowing targets to access the original Function*).
+    /// It produces a token chain as output.
+    INIT_TRAMPOLINE,
+
+    /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
+    /// It takes a pointer to the trampoline and produces a (possibly) new
+    /// pointer to the same trampoline with platform-specific adjustments
+    /// applied.  The pointer it returns points to an executable block of code.
+    ADJUST_TRAMPOLINE,
+
+    /// TRAP - Trapping instruction
+    TRAP,
+
+    /// DEBUGTRAP - Trap intended to get the attention of a debugger.
+    DEBUGTRAP,
+
+    /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
+    /// is the chain.  The other operands are the address to prefetch,
+    /// read / write specifier, locality specifier and instruction / data cache
+    /// specifier.
+    PREFETCH,
+
+    /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
+    /// This corresponds to the fence instruction. It takes an input chain, and
+    /// two integer constants: an AtomicOrdering and a SynchronizationScope.
+    ATOMIC_FENCE,
+
+    /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
+    /// This corresponds to "load atomic" instruction.
+    ATOMIC_LOAD,
+
+    /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
+    /// This corresponds to "store atomic" instruction.
+    ATOMIC_STORE,
+
+    /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
+    /// For double-word atomic operations:
+    /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
+    ///                                          swapLo, swapHi)
+    /// This corresponds to the cmpxchg instruction.
+    ATOMIC_CMP_SWAP,
+
+    /// Val, Success, OUTCHAIN
+    ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
+    /// N.b. this is still a strong cmpxchg operation, so
+    /// Success == "Val == cmp".
+    ATOMIC_CMP_SWAP_WITH_SUCCESS,
+
+    /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
+    /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
+    /// For double-word atomic operations:
+    /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
+    /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
+    /// These correspond to the atomicrmw instruction.
+    ATOMIC_SWAP,
+    ATOMIC_LOAD_ADD,
+    ATOMIC_LOAD_SUB,
+    ATOMIC_LOAD_AND,
+    ATOMIC_LOAD_CLR,
+    ATOMIC_LOAD_OR,
+    ATOMIC_LOAD_XOR,
+    ATOMIC_LOAD_NAND,
+    ATOMIC_LOAD_MIN,
+    ATOMIC_LOAD_MAX,
+    ATOMIC_LOAD_UMIN,
+    ATOMIC_LOAD_UMAX,
+
+    // Masked load and store - consecutive vector load and store operations
+    // with additional mask operand that prevents memory accesses to the
+    // masked-off lanes.
+    MLOAD, MSTORE,
+
+    // Masked gather and scatter - load and store operations for a vector of
+    // random addresses with additional mask operand that prevents memory
+    // accesses to the masked-off lanes.
+    MGATHER, MSCATTER,
+
+    /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
+    /// is the chain and the second operand is the alloca pointer.
+    LIFETIME_START, LIFETIME_END,
+
+    /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
+    /// beginning and end of GC transition  sequence, and carry arbitrary
+    /// information that target might need for lowering.  The first operand is
+    /// a chain, the rest are specified by the target and not touched by the DAG
+    /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
+    /// nested.
+    GC_TRANSITION_START,
+    GC_TRANSITION_END,
+
+    /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
+    /// the most recent dynamic alloca. For most targets that would be 0, but
+    /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
+    /// known nonzero constant. The only operand here is the chain.
+    GET_DYNAMIC_AREA_OFFSET,
+
+    /// Generic reduction nodes. These nodes represent horizontal vector
+    /// reduction operations, producing a scalar result.
+    /// The STRICT variants perform reductions in sequential order. The first
+    /// operand is an initial scalar accumulator value, and the second operand
+    /// is the vector to reduce.
+    VECREDUCE_STRICT_FADD, VECREDUCE_STRICT_FMUL,
+    /// These reductions are non-strict, and have a single vector operand.
+    VECREDUCE_FADD, VECREDUCE_FMUL,
+    VECREDUCE_ADD, VECREDUCE_MUL,
+    VECREDUCE_AND, VECREDUCE_OR, VECREDUCE_XOR,
+    VECREDUCE_SMAX, VECREDUCE_SMIN, VECREDUCE_UMAX, VECREDUCE_UMIN,
+    /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
+    VECREDUCE_FMAX, VECREDUCE_FMIN,
+
+    /// BUILTIN_OP_END - This must be the last enum value in this list.
+    /// The target-specific pre-isel opcode values start here.
+    BUILTIN_OP_END
+  };
+
+  /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
+  /// which do not reference a specific memory location should be less than
+  /// this value. Those that do must not be less than this value, and can
+  /// be used with SelectionDAG::getMemIntrinsicNode.
+  static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+400;
+
+  //===--------------------------------------------------------------------===//
+  /// MemIndexedMode enum - This enum defines the load / store indexed
+  /// addressing modes.
+  ///
+  /// UNINDEXED    "Normal" load / store. The effective address is already
+  ///              computed and is available in the base pointer. The offset
+  ///              operand is always undefined. In addition to producing a
+  ///              chain, an unindexed load produces one value (result of the
+  ///              load); an unindexed store does not produce a value.
+  ///
+  /// PRE_INC      Similar to the unindexed mode where the effective address is
+  /// PRE_DEC      the value of the base pointer add / subtract the offset.
+  ///              It considers the computation as being folded into the load /
+  ///              store operation (i.e. the load / store does the address
+  ///              computation as well as performing the memory transaction).
+  ///              The base operand is always undefined. In addition to
+  ///              producing a chain, pre-indexed load produces two values
+  ///              (result of the load and the result of the address
+  ///              computation); a pre-indexed store produces one value (result
+  ///              of the address computation).
+  ///
+  /// POST_INC     The effective address is the value of the base pointer. The
+  /// POST_DEC     value of the offset operand is then added to / subtracted
+  ///              from the base after memory transaction. In addition to
+  ///              producing a chain, post-indexed load produces two values
+  ///              (the result of the load and the result of the base +/- offset
+  ///              computation); a post-indexed store produces one value (the
+  ///              the result of the base +/- offset computation).
+  enum MemIndexedMode {
+    UNINDEXED = 0,
+    PRE_INC,
+    PRE_DEC,
+    POST_INC,
+    POST_DEC
+  };
+
+  static const int LAST_INDEXED_MODE = POST_DEC + 1;
+
+  //===--------------------------------------------------------------------===//
+  /// LoadExtType enum - This enum defines the three variants of LOADEXT
+  /// (load with extension).
+  ///
+  /// SEXTLOAD loads the integer operand and sign extends it to a larger
+  ///          integer result type.
+  /// ZEXTLOAD loads the integer operand and zero extends it to a larger
+  ///          integer result type.
+  /// EXTLOAD  is used for two things: floating point extending loads and
+  ///          integer extending loads [the top bits are undefined].
+  enum LoadExtType {
+    NON_EXTLOAD = 0,
+    EXTLOAD,
+    SEXTLOAD,
+    ZEXTLOAD
+  };
+
+  static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
+
+  NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
+
+  //===--------------------------------------------------------------------===//
+  /// ISD::CondCode enum - These are ordered carefully to make the bitfields
+  /// below work out, when considering SETFALSE (something that never exists
+  /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
+  /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
+  /// to.  If the "N" column is 1, the result of the comparison is undefined if
+  /// the input is a NAN.
+  ///
+  /// All of these (except for the 'always folded ops') should be handled for
+  /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
+  /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
+  ///
+  /// Note that these are laid out in a specific order to allow bit-twiddling
+  /// to transform conditions.
+  enum CondCode {
+    // Opcode          N U L G E       Intuitive operation
+    SETFALSE,      //    0 0 0 0       Always false (always folded)
+    SETOEQ,        //    0 0 0 1       True if ordered and equal
+    SETOGT,        //    0 0 1 0       True if ordered and greater than
+    SETOGE,        //    0 0 1 1       True if ordered and greater than or equal
+    SETOLT,        //    0 1 0 0       True if ordered and less than
+    SETOLE,        //    0 1 0 1       True if ordered and less than or equal
+    SETONE,        //    0 1 1 0       True if ordered and operands are unequal
+    SETO,          //    0 1 1 1       True if ordered (no nans)
+    SETUO,         //    1 0 0 0       True if unordered: isnan(X) | isnan(Y)
+    SETUEQ,        //    1 0 0 1       True if unordered or equal
+    SETUGT,        //    1 0 1 0       True if unordered or greater than
+    SETUGE,        //    1 0 1 1       True if unordered, greater than, or equal
+    SETULT,        //    1 1 0 0       True if unordered or less than
+    SETULE,        //    1 1 0 1       True if unordered, less than, or equal
+    SETUNE,        //    1 1 1 0       True if unordered or not equal
+    SETTRUE,       //    1 1 1 1       Always true (always folded)
+    // Don't care operations: undefined if the input is a nan.
+    SETFALSE2,     //  1 X 0 0 0       Always false (always folded)
+    SETEQ,         //  1 X 0 0 1       True if equal
+    SETGT,         //  1 X 0 1 0       True if greater than
+    SETGE,         //  1 X 0 1 1       True if greater than or equal
+    SETLT,         //  1 X 1 0 0       True if less than
+    SETLE,         //  1 X 1 0 1       True if less than or equal
+    SETNE,         //  1 X 1 1 0       True if not equal
+    SETTRUE2,      //  1 X 1 1 1       Always true (always folded)
+
+    SETCC_INVALID       // Marker value.
+  };
+
+  /// Return true if this is a setcc instruction that performs a signed
+  /// comparison when used with integer operands.
+  inline bool isSignedIntSetCC(CondCode Code) {
+    return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
+  }
+
+  /// Return true if this is a setcc instruction that performs an unsigned
+  /// comparison when used with integer operands.
+  inline bool isUnsignedIntSetCC(CondCode Code) {
+    return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
+  }
+
+  /// Return true if the specified condition returns true if the two operands to
+  /// the condition are equal. Note that if one of the two operands is a NaN,
+  /// this value is meaningless.
+  inline bool isTrueWhenEqual(CondCode Cond) {
+    return ((int)Cond & 1) != 0;
+  }
+
+  /// This function returns 0 if the condition is always false if an operand is
+  /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
+  /// the condition is undefined if the operand is a NaN.
+  inline unsigned getUnorderedFlavor(CondCode Cond) {
+    return ((int)Cond >> 3) & 3;
+  }
+
+  /// Return the operation corresponding to !(X op Y), where 'op' is a valid
+  /// SetCC operation.
+  CondCode getSetCCInverse(CondCode Operation, bool isInteger);
+
+  /// Return the operation corresponding to (Y op X) when given the operation
+  /// for (X op Y).
+  CondCode getSetCCSwappedOperands(CondCode Operation);
+
+  /// Return the result of a logical OR between different comparisons of
+  /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
+  /// SETCC_INVALID if it is not possible to represent the resultant comparison.
+  CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+  /// Return the result of a logical AND between different comparisons of
+  /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
+  /// SETCC_INVALID if it is not possible to represent the resultant comparison.
+  CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+} // end llvm::ISD namespace
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/IntrinsicLowering.h b/linux-x64/clang/include/llvm/CodeGen/IntrinsicLowering.h
new file mode 100644
index 0000000..597d684
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/IntrinsicLowering.h
@@ -0,0 +1,54 @@
+//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IntrinsicLowering interface.  This interface allows
+// addition of domain-specific or front-end specific intrinsics to LLVM without
+// having to modify all of the C backend or interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
+#define LLVM_CODEGEN_INTRINSICLOWERING_H
+
+#include "llvm/IR/Intrinsics.h"
+
+namespace llvm {
+class CallInst;
+class Module;
+class DataLayout;
+
+class IntrinsicLowering {
+  const DataLayout &DL;
+
+  bool Warned;
+
+public:
+  explicit IntrinsicLowering(const DataLayout &DL) : DL(DL), Warned(false) {}
+
+  /// Add all of the prototypes that might be needed by an intrinsic lowering
+  /// implementation to be inserted into the module specified.
+  void AddPrototypes(Module &M);
+
+  /// Replace a call to the specified intrinsic function.
+  /// If an intrinsic function must be implemented by the code generator
+  /// (such as va_start), this function should print a message and abort.
+  ///
+  /// Otherwise, if an intrinsic function call can be lowered, the code to
+  /// implement it (often a call to a non-intrinsic function) is inserted
+  /// _after_ the call instruction and the call is deleted. The caller must
+  /// be capable of handling this kind of change.
+  void LowerIntrinsicCall(CallInst *CI);
+
+  /// Try to replace a call instruction with a call to a bswap intrinsic. Return
+  /// false if the call is not a simple integer bswap.
+  static bool LowerToByteSwap(CallInst *CI);
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h b/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
new file mode 100644
index 0000000..988e6d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -0,0 +1,98 @@
+//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LatencyPriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using latency information to
+// reduce the length of the critical path through the basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
+#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
+
+#include "llvm/CodeGen/ScheduleDAG.h"
+
+namespace llvm {
+  class LatencyPriorityQueue;
+
+  /// Sorting functions for the Available queue.
+  struct latency_sort {
+    LatencyPriorityQueue *PQ;
+    explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
+
+    bool operator()(const SUnit* left, const SUnit* right) const;
+  };
+
+  class LatencyPriorityQueue : public SchedulingPriorityQueue {
+    // SUnits - The SUnits for the current graph.
+    std::vector<SUnit> *SUnits;
+
+    /// NumNodesSolelyBlocking - This vector contains, for every node in the
+    /// Queue, the number of nodes that the node is the sole unscheduled
+    /// predecessor for.  This is used as a tie-breaker heuristic for better
+    /// mobility.
+    std::vector<unsigned> NumNodesSolelyBlocking;
+
+    /// Queue - The queue.
+    std::vector<SUnit*> Queue;
+    latency_sort Picker;
+
+  public:
+    LatencyPriorityQueue() : Picker(this) {
+    }
+
+    bool isBottomUp() const override { return false; }
+
+    void initNodes(std::vector<SUnit> &sunits) override {
+      SUnits = &sunits;
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void addNode(const SUnit *SU) override {
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void updateNode(const SUnit *SU) override {
+    }
+
+    void releaseState() override {
+      SUnits = nullptr;
+    }
+
+    unsigned getLatency(unsigned NodeNum) const {
+      assert(NodeNum < (*SUnits).size());
+      return (*SUnits)[NodeNum].getHeight();
+    }
+
+    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+      assert(NodeNum < NumNodesSolelyBlocking.size());
+      return NumNodesSolelyBlocking[NodeNum];
+    }
+
+    bool empty() const override { return Queue.empty(); }
+
+    void push(SUnit *U) override;
+
+    SUnit *pop() override;
+
+    void remove(SUnit *SU) override;
+
+    // scheduledNode - As nodes are scheduled, we look to see if there are any
+    // successor nodes that have a single unscheduled predecessor.  If so, that
+    // single predecessor has a higher priority, since scheduling it will make
+    // the node available.
+    void scheduledNode(SUnit *Node) override;
+
+private:
+    void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
+    SUnit *getSingleUnscheduledPred(SUnit *SU);
+  };
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
new file mode 100644
index 0000000..848ee1d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
@@ -0,0 +1,76 @@
+///===- LazyMachineBlockFrequencyInfo.h - Lazy Block Frequency -*- C++ -*--===//
+///
+///                     The LLVM Compiler Infrastructure
+///
+/// This file is distributed under the University of Illinois Open Source
+/// License. See LICENSE.TXT for details.
+///
+///===---------------------------------------------------------------------===//
+/// \file
+/// This is an alternative analysis pass to MachineBlockFrequencyInfo.  The
+/// difference is that with this pass the block frequencies are not computed
+/// when the analysis pass is executed but rather when the BFI result is
+/// explicitly requested by the analysis client.
+///
+///===---------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYMACHINEBLOCKFREQUENCYINFO_H
+#define LLVM_ANALYSIS_LAZYMACHINEBLOCKFREQUENCYINFO_H
+
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+
+namespace llvm {
+/// \brief This is an alternative analysis pass to MachineBlockFrequencyInfo.
+/// The difference is that with this pass, the block frequencies are not
+/// computed when the analysis pass is executed but rather when the BFI result
+/// is explicitly requested by the analysis client.
+///
+/// This works by checking querying if MBFI is available and otherwise
+/// generating MBFI on the fly.  In this case the passes required for (LI, DT)
+/// are also queried before being computed on the fly.
+///
+/// Note that it is expected that we wouldn't need this functionality for the
+/// new PM since with the new PM, analyses are executed on demand.
+
+class LazyMachineBlockFrequencyInfoPass : public MachineFunctionPass {
+private:
+  /// If generated on the fly this own the instance.
+  mutable std::unique_ptr<MachineBlockFrequencyInfo> OwnedMBFI;
+
+  /// If generated on the fly this own the instance.
+  mutable std::unique_ptr<MachineLoopInfo> OwnedMLI;
+
+  /// If generated on the fly this own the instance.
+  mutable std::unique_ptr<MachineDominatorTree> OwnedMDT;
+
+  /// The function.
+  MachineFunction *MF = nullptr;
+
+  /// \brief Calculate MBFI and all other analyses that's not available and
+  /// required by BFI.
+  MachineBlockFrequencyInfo &calculateIfNotAvailable() const;
+
+public:
+  static char ID;
+
+  LazyMachineBlockFrequencyInfoPass();
+
+  /// \brief Compute and return the block frequencies.
+  MachineBlockFrequencyInfo &getBFI() { return calculateIfNotAvailable(); }
+
+  /// \brief Compute and return the block frequencies.
+  const MachineBlockFrequencyInfo &getBFI() const {
+    return calculateIfNotAvailable();
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+  void releaseMemory() override;
+  void print(raw_ostream &OS, const Module *M) const override;
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LexicalScopes.h b/linux-x64/clang/include/llvm/CodeGen/LexicalScopes.h
new file mode 100644
index 0000000..3ba5034
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LexicalScopes.h
@@ -0,0 +1,258 @@
+//===- LexicalScopes.cpp - Collecting lexical scope info --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements LexicalScopes analysis.
+//
+// This pass collects lexical scope information and maps machine instructions
+// to respective lexical scopes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LEXICALSCOPES_H
+#define LLVM_CODEGEN_LEXICALSCOPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include <cassert>
+#include <unordered_map>
+#include <utility>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MDNode;
+
+//===----------------------------------------------------------------------===//
+/// InsnRange - This is used to track range of instructions with identical
+/// lexical scope.
+///
+using InsnRange = std::pair<const MachineInstr *, const MachineInstr *>;
+
+//===----------------------------------------------------------------------===//
+/// LexicalScope - This class is used to track scope information.
+///
+class LexicalScope {
+public:
+  LexicalScope(LexicalScope *P, const DILocalScope *D, const DILocation *I,
+               bool A)
+      : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A) {
+    assert(D);
+    assert(D->getSubprogram()->getUnit()->getEmissionKind() !=
+           DICompileUnit::NoDebug &&
+           "Don't build lexical scopes for non-debug locations");
+    assert(D->isResolved() && "Expected resolved node");
+    assert((!I || I->isResolved()) && "Expected resolved node");
+    if (Parent)
+      Parent->addChild(this);
+  }
+
+  // Accessors.
+  LexicalScope *getParent() const { return Parent; }
+  const MDNode *getDesc() const { return Desc; }
+  const DILocation *getInlinedAt() const { return InlinedAtLocation; }
+  const DILocalScope *getScopeNode() const { return Desc; }
+  bool isAbstractScope() const { return AbstractScope; }
+  SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
+  SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }
+
+  /// addChild - Add a child scope.
+  void addChild(LexicalScope *S) { Children.push_back(S); }
+
+  /// openInsnRange - This scope covers instruction range starting from MI.
+  void openInsnRange(const MachineInstr *MI) {
+    if (!FirstInsn)
+      FirstInsn = MI;
+
+    if (Parent)
+      Parent->openInsnRange(MI);
+  }
+
+  /// extendInsnRange - Extend the current instruction range covered by
+  /// this scope.
+  void extendInsnRange(const MachineInstr *MI) {
+    assert(FirstInsn && "MI Range is not open!");
+    LastInsn = MI;
+    if (Parent)
+      Parent->extendInsnRange(MI);
+  }
+
+  /// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
+  /// until now. This is used when a new scope is encountered while walking
+  /// machine instructions.
+  void closeInsnRange(LexicalScope *NewScope = nullptr) {
+    assert(LastInsn && "Last insn missing!");
+    Ranges.push_back(InsnRange(FirstInsn, LastInsn));
+    FirstInsn = nullptr;
+    LastInsn = nullptr;
+    // If Parent dominates NewScope then do not close Parent's instruction
+    // range.
+    if (Parent && (!NewScope || !Parent->dominates(NewScope)))
+      Parent->closeInsnRange(NewScope);
+  }
+
+  /// dominates - Return true if current scope dominates given lexical scope.
+  bool dominates(const LexicalScope *S) const {
+    if (S == this)
+      return true;
+    if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
+      return true;
+    return false;
+  }
+
+  // Depth First Search support to walk and manipulate LexicalScope hierarchy.
+  unsigned getDFSOut() const { return DFSOut; }
+  void setDFSOut(unsigned O) { DFSOut = O; }
+  unsigned getDFSIn() const { return DFSIn; }
+  void setDFSIn(unsigned I) { DFSIn = I; }
+
+  /// dump - print lexical scope.
+  void dump(unsigned Indent = 0) const;
+
+private:
+  LexicalScope *Parent;                        // Parent to this scope.
+  const DILocalScope *Desc;                    // Debug info descriptor.
+  const DILocation *InlinedAtLocation;         // Location at which this
+                                               // scope is inlined.
+  bool AbstractScope;                          // Abstract Scope
+  SmallVector<LexicalScope *, 4> Children;     // Scopes defined in scope.
+                                               // Contents not owned.
+  SmallVector<InsnRange, 4> Ranges;
+
+  const MachineInstr *LastInsn = nullptr;  // Last instruction of this scope.
+  const MachineInstr *FirstInsn = nullptr; // First instruction of this scope.
+  unsigned DFSIn = 0; // In & Out Depth use to determine scope nesting.
+  unsigned DFSOut = 0;
+};
+
+//===----------------------------------------------------------------------===//
+/// LexicalScopes -  This class provides interface to collect and use lexical
+/// scoping information from machine instruction.
+///
+class LexicalScopes {
+public:
+  LexicalScopes() = default;
+
+  /// initialize - Scan machine function and constuct lexical scope nest, resets
+  /// the instance if necessary.
+  void initialize(const MachineFunction &);
+
+  /// releaseMemory - release memory.
+  void reset();
+
+  /// empty - Return true if there is any lexical scope information available.
+  bool empty() { return CurrentFnLexicalScope == nullptr; }
+
+  /// getCurrentFunctionScope - Return lexical scope for the current function.
+  LexicalScope *getCurrentFunctionScope() const {
+    return CurrentFnLexicalScope;
+  }
+
+  /// getMachineBasicBlocks - Populate given set using machine basic blocks
+  /// which have machine instructions that belong to lexical scope identified by
+  /// DebugLoc.
+  void getMachineBasicBlocks(const DILocation *DL,
+                             SmallPtrSetImpl<const MachineBasicBlock *> &MBBs);
+
+  /// dominates - Return true if DebugLoc's lexical scope dominates at least one
+  /// machine instruction's lexical scope in a given machine basic block.
+  bool dominates(const DILocation *DL, MachineBasicBlock *MBB);
+
+  /// findLexicalScope - Find lexical scope, either regular or inlined, for the
+  /// given DebugLoc. Return NULL if not found.
+  LexicalScope *findLexicalScope(const DILocation *DL);
+
+  /// getAbstractScopesList - Return a reference to list of abstract scopes.
+  ArrayRef<LexicalScope *> getAbstractScopesList() const {
+    return AbstractScopesList;
+  }
+
+  /// findAbstractScope - Find an abstract scope or return null.
+  LexicalScope *findAbstractScope(const DILocalScope *N) {
+    auto I = AbstractScopeMap.find(N);
+    return I != AbstractScopeMap.end() ? &I->second : nullptr;
+  }
+
+  /// findInlinedScope - Find an inlined scope for the given scope/inlined-at.
+  LexicalScope *findInlinedScope(const DILocalScope *N, const DILocation *IA) {
+    auto I = InlinedLexicalScopeMap.find(std::make_pair(N, IA));
+    return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
+  }
+
+  /// findLexicalScope - Find regular lexical scope or return null.
+  LexicalScope *findLexicalScope(const DILocalScope *N) {
+    auto I = LexicalScopeMap.find(N);
+    return I != LexicalScopeMap.end() ? &I->second : nullptr;
+  }
+
+  /// dump - Print data structures to dbgs().
+  void dump() const;
+
+  /// getOrCreateAbstractScope - Find or create an abstract lexical scope.
+  LexicalScope *getOrCreateAbstractScope(const DILocalScope *Scope);
+
+private:
+  /// getOrCreateLexicalScope - Find lexical scope for the given Scope/IA. If
+  /// not available then create new lexical scope.
+  LexicalScope *getOrCreateLexicalScope(const DILocalScope *Scope,
+                                        const DILocation *IA = nullptr);
+  LexicalScope *getOrCreateLexicalScope(const DILocation *DL) {
+    return DL ? getOrCreateLexicalScope(DL->getScope(), DL->getInlinedAt())
+              : nullptr;
+  }
+
+  /// getOrCreateRegularScope - Find or create a regular lexical scope.
+  LexicalScope *getOrCreateRegularScope(const DILocalScope *Scope);
+
+  /// getOrCreateInlinedScope - Find or create an inlined lexical scope.
+  LexicalScope *getOrCreateInlinedScope(const DILocalScope *Scope,
+                                        const DILocation *InlinedAt);
+
+  /// extractLexicalScopes - Extract instruction ranges for each lexical scopes
+  /// for the given machine function.
+  void extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
+                            DenseMap<const MachineInstr *, LexicalScope *> &M);
+  void constructScopeNest(LexicalScope *Scope);
+  void
+  assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
+                          DenseMap<const MachineInstr *, LexicalScope *> &M);
+
+  const MachineFunction *MF = nullptr;
+
+  /// LexicalScopeMap - Tracks the scopes in the current function.
+  // Use an unordered_map to ensure value pointer validity over insertion.
+  std::unordered_map<const DILocalScope *, LexicalScope> LexicalScopeMap;
+
+  /// InlinedLexicalScopeMap - Tracks inlined function scopes in current
+  /// function.
+  std::unordered_map<std::pair<const DILocalScope *, const DILocation *>,
+                     LexicalScope,
+                     pair_hash<const DILocalScope *, const DILocation *>>
+      InlinedLexicalScopeMap;
+
+  /// AbstractScopeMap - These scopes are  not included LexicalScopeMap.
+  // Use an unordered_map to ensure value pointer validity over insertion.
+  std::unordered_map<const DILocalScope *, LexicalScope> AbstractScopeMap;
+
+  /// AbstractScopesList - Tracks abstract scopes constructed while processing
+  /// a function.
+  SmallVector<LexicalScope *, 4> AbstractScopesList;
+
+  /// CurrentFnLexicalScope - Top level scope for the current function.
+  ///
+  LexicalScope *CurrentFnLexicalScope = nullptr;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LEXICALSCOPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/linux-x64/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
new file mode 100644
index 0000000..c3046da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
@@ -0,0 +1,38 @@
+//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all assembler writer related passes for tools like
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+#include <cstdlib>
+
+namespace {
+  struct ForceAsmWriterLinking {
+    ForceAsmWriterLinking() {
+      // We must reference the plug-ins in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      llvm::linkOcamlGCPrinter();
+      llvm::linkErlangGCPrinter();
+
+    }
+  } ForceAsmWriterLinking; // Force link by creating a global definition.
+}
+
+#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h b/linux-x64/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h
new file mode 100644
index 0000000..fee131e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -0,0 +1,59 @@
+//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all codegen related passes for tools like lli and
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cstdlib>
+
+namespace {
+  struct ForceCodegenLinking {
+    ForceCodegenLinking() {
+      // We must reference the passes in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      (void) llvm::createFastRegisterAllocator();
+      (void) llvm::createBasicRegisterAllocator();
+      (void) llvm::createGreedyRegisterAllocator();
+      (void) llvm::createDefaultPBQPRegisterAllocator();
+
+      llvm::linkCoreCLRGC();
+      llvm::linkOcamlGC();
+      llvm::linkErlangGC();
+      llvm::linkShadowStackGC();
+      llvm::linkStatepointExampleGC();
+
+      (void) llvm::createBURRListDAGScheduler(nullptr,
+                                              llvm::CodeGenOpt::Default);
+      (void) llvm::createSourceListDAGScheduler(nullptr,
+                                                llvm::CodeGenOpt::Default);
+      (void) llvm::createHybridListDAGScheduler(nullptr,
+                                                llvm::CodeGenOpt::Default);
+      (void) llvm::createFastDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+      (void) llvm::createDefaultScheduler(nullptr, llvm::CodeGenOpt::Default);
+      (void) llvm::createVLIWDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+
+    }
+  } ForceCodegenLinking; // Force link by creating a global definition.
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h b/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
new file mode 100644
index 0000000..f4fa872
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
@@ -0,0 +1,943 @@
+//===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveRange and LiveInterval classes.  Given some
+// numbering of each the machine instructions an interval [i, j) is said to be a
+// live range for register v if there is no instruction with number j' >= j
+// such that v is live at j' and there is no instruction with number i' < i such
+// that v is live at i'. In this implementation ranges can have holes,
+// i.e. a range might look like [1,20), [50,65), [1000,1001).  Each
+// individual segment is represented as an instance of LiveRange::Segment,
+// and the whole range is represented as an instance of LiveRange.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
+#define LLVM_CODEGEN_LIVEINTERVAL_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <memory>
+#include <set>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+
+  class CoalescerPair;
+  class LiveIntervals;
+  class MachineRegisterInfo;
+  class raw_ostream;
+
+  /// VNInfo - Value Number Information.
+  /// This class holds information about a machine level values, including
+  /// definition and use points.
+  ///
+  class VNInfo {
+  public:
+    using Allocator = BumpPtrAllocator;
+
+    /// The ID number of this value.
+    unsigned id;
+
+    /// The index of the defining instruction.
+    SlotIndex def;
+
+    /// VNInfo constructor.
+    VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {}
+
+    /// VNInfo constructor, copies values from orig, except for the value number.
+    VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {}
+
+    /// Copy from the parameter into this VNInfo.
+    void copyFrom(VNInfo &src) {
+      def = src.def;
+    }
+
+    /// Returns true if this value is defined by a PHI instruction (or was,
+    /// PHI instructions may have been eliminated).
+    /// PHI-defs begin at a block boundary, all other defs begin at register or
+    /// EC slots.
+    bool isPHIDef() const { return def.isBlock(); }
+
+    /// Returns true if this value is unused.
+    bool isUnused() const { return !def.isValid(); }
+
+    /// Mark this value as unused.
+    void markUnused() { def = SlotIndex(); }
+  };
+
+  /// Result of a LiveRange query. This class hides the implementation details
+  /// of live ranges, and it should be used as the primary interface for
+  /// examining live ranges around instructions.
+  class LiveQueryResult {
+    VNInfo *const EarlyVal;
+    VNInfo *const LateVal;
+    const SlotIndex EndPoint;
+    const bool Kill;
+
+  public:
+    LiveQueryResult(VNInfo *EarlyVal, VNInfo *LateVal, SlotIndex EndPoint,
+                    bool Kill)
+      : EarlyVal(EarlyVal), LateVal(LateVal), EndPoint(EndPoint), Kill(Kill)
+    {}
+
+    /// Return the value that is live-in to the instruction. This is the value
+    /// that will be read by the instruction's use operands. Return NULL if no
+    /// value is live-in.
+    VNInfo *valueIn() const {
+      return EarlyVal;
+    }
+
+    /// Return true if the live-in value is killed by this instruction. This
+    /// means that either the live range ends at the instruction, or it changes
+    /// value.
+    bool isKill() const {
+      return Kill;
+    }
+
+    /// Return true if this instruction has a dead def.
+    bool isDeadDef() const {
+      return EndPoint.isDead();
+    }
+
+    /// Return the value leaving the instruction, if any. This can be a
+    /// live-through value, or a live def. A dead def returns NULL.
+    VNInfo *valueOut() const {
+      return isDeadDef() ? nullptr : LateVal;
+    }
+
+    /// Returns the value alive at the end of the instruction, if any. This can
+    /// be a live-through value, a live def or a dead def.
+    VNInfo *valueOutOrDead() const {
+      return LateVal;
+    }
+
+    /// Return the value defined by this instruction, if any. This includes
+    /// dead defs, it is the value created by the instruction's def operands.
+    VNInfo *valueDefined() const {
+      return EarlyVal == LateVal ? nullptr : LateVal;
+    }
+
+    /// Return the end point of the last live range segment to interact with
+    /// the instruction, if any.
+    ///
+    /// The end point is an invalid SlotIndex only if the live range doesn't
+    /// intersect the instruction at all.
+    ///
+    /// The end point may be at or past the end of the instruction's basic
+    /// block. That means the value was live out of the block.
+    SlotIndex endPoint() const {
+      return EndPoint;
+    }
+  };
+
+  /// This class represents the liveness of a register, stack slot, etc.
+  /// It manages an ordered list of Segment objects.
+  /// The Segments are organized in a static single assignment form: At places
+  /// where a new value is defined or different values reach a CFG join a new
+  /// segment with a new value number is used.
+  class LiveRange {
+  public:
+    /// This represents a simple continuous liveness interval for a value.
+    /// The start point is inclusive, the end point exclusive. These intervals
+    /// are rendered as [start,end).
+    struct Segment {
+      SlotIndex start;  // Start point of the interval (inclusive)
+      SlotIndex end;    // End point of the interval (exclusive)
+      VNInfo *valno = nullptr; // identifier for the value contained in this
+                               // segment.
+
+      Segment() = default;
+
+      Segment(SlotIndex S, SlotIndex E, VNInfo *V)
+        : start(S), end(E), valno(V) {
+        assert(S < E && "Cannot create empty or backwards segment");
+      }
+
+      /// Return true if the index is covered by this segment.
+      bool contains(SlotIndex I) const {
+        return start <= I && I < end;
+      }
+
+      /// Return true if the given interval, [S, E), is covered by this segment.
+      bool containsInterval(SlotIndex S, SlotIndex E) const {
+        assert((S < E) && "Backwards interval?");
+        return (start <= S && S < end) && (start < E && E <= end);
+      }
+
+      bool operator<(const Segment &Other) const {
+        return std::tie(start, end) < std::tie(Other.start, Other.end);
+      }
+      bool operator==(const Segment &Other) const {
+        return start == Other.start && end == Other.end;
+      }
+
+      void dump() const;
+    };
+
+    using Segments = SmallVector<Segment, 2>;
+    using VNInfoList = SmallVector<VNInfo *, 2>;
+
+    Segments segments;   // the liveness segments
+    VNInfoList valnos;   // value#'s
+
+    // The segment set is used temporarily to accelerate initial computation
+    // of live ranges of physical registers in computeRegUnitRange.
+    // After that the set is flushed to the segment vector and deleted.
+    using SegmentSet = std::set<Segment>;
+    std::unique_ptr<SegmentSet> segmentSet;
+
+    using iterator = Segments::iterator;
+    using const_iterator = Segments::const_iterator;
+
+    iterator begin() { return segments.begin(); }
+    iterator end()   { return segments.end(); }
+
+    const_iterator begin() const { return segments.begin(); }
+    const_iterator end() const  { return segments.end(); }
+
+    using vni_iterator = VNInfoList::iterator;
+    using const_vni_iterator = VNInfoList::const_iterator;
+
+    vni_iterator vni_begin() { return valnos.begin(); }
+    vni_iterator vni_end()   { return valnos.end(); }
+
+    const_vni_iterator vni_begin() const { return valnos.begin(); }
+    const_vni_iterator vni_end() const   { return valnos.end(); }
+
+    /// Constructs a new LiveRange object.
+    LiveRange(bool UseSegmentSet = false)
+        : segmentSet(UseSegmentSet ? llvm::make_unique<SegmentSet>()
+                                   : nullptr) {}
+
+    /// Constructs a new LiveRange object by copying segments and valnos from
+    /// another LiveRange.
+    LiveRange(const LiveRange &Other, BumpPtrAllocator &Allocator) {
+      assert(Other.segmentSet == nullptr &&
+             "Copying of LiveRanges with active SegmentSets is not supported");
+      assign(Other, Allocator);
+    }
+
+    /// Copies values numbers and live segments from \p Other into this range.
+    void assign(const LiveRange &Other, BumpPtrAllocator &Allocator) {
+      if (this == &Other)
+        return;
+
+      assert(Other.segmentSet == nullptr &&
+             "Copying of LiveRanges with active SegmentSets is not supported");
+      // Duplicate valnos.
+      for (const VNInfo *VNI : Other.valnos)
+        createValueCopy(VNI, Allocator);
+      // Now we can copy segments and remap their valnos.
+      for (const Segment &S : Other.segments)
+        segments.push_back(Segment(S.start, S.end, valnos[S.valno->id]));
+    }
+
+    /// advanceTo - Advance the specified iterator to point to the Segment
+    /// containing the specified position, or end() if the position is past the
+    /// end of the range.  If no Segment contains this position, but the
+    /// position is in a hole, this method returns an iterator pointing to the
+    /// Segment immediately after the hole.
+    iterator advanceTo(iterator I, SlotIndex Pos) {
+      assert(I != end());
+      if (Pos >= endIndex())
+        return end();
+      while (I->end <= Pos) ++I;
+      return I;
+    }
+
+    const_iterator advanceTo(const_iterator I, SlotIndex Pos) const {
+      assert(I != end());
+      if (Pos >= endIndex())
+        return end();
+      while (I->end <= Pos) ++I;
+      return I;
+    }
+
+    /// find - Return an iterator pointing to the first segment that ends after
+    /// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
+    /// when searching large ranges.
+    ///
+    /// If Pos is contained in a Segment, that segment is returned.
+    /// If Pos is in a hole, the following Segment is returned.
+    /// If Pos is beyond endIndex, end() is returned.
+    iterator find(SlotIndex Pos);
+
+    const_iterator find(SlotIndex Pos) const {
+      return const_cast<LiveRange*>(this)->find(Pos);
+    }
+
+    void clear() {
+      valnos.clear();
+      segments.clear();
+    }
+
+    size_t size() const {
+      return segments.size();
+    }
+
+    bool hasAtLeastOneValue() const { return !valnos.empty(); }
+
+    bool containsOneValue() const { return valnos.size() == 1; }
+
+    unsigned getNumValNums() const { return (unsigned)valnos.size(); }
+
+    /// getValNumInfo - Returns pointer to the specified val#.
+    ///
+    inline VNInfo *getValNumInfo(unsigned ValNo) {
+      return valnos[ValNo];
+    }
+    inline const VNInfo *getValNumInfo(unsigned ValNo) const {
+      return valnos[ValNo];
+    }
+
+    /// containsValue - Returns true if VNI belongs to this range.
+    bool containsValue(const VNInfo *VNI) const {
+      return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id);
+    }
+
+    /// getNextValue - Create a new value number and return it.  MIIdx specifies
+    /// the instruction that defines the value number.
+    VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
+      VNInfo *VNI =
+        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
+      valnos.push_back(VNI);
+      return VNI;
+    }
+
+    /// createDeadDef - Make sure the range has a value defined at Def.
+    /// If one already exists, return it. Otherwise allocate a new value and
+    /// add liveness for a dead def.
+    VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
+
+    /// Create a def of value @p VNI. Return @p VNI. If there already exists
+    /// a definition at VNI->def, the value defined there must be @p VNI.
+    VNInfo *createDeadDef(VNInfo *VNI);
+
+    /// Create a copy of the given value. The new value will be identical except
+    /// for the Value number.
+    VNInfo *createValueCopy(const VNInfo *orig,
+                            VNInfo::Allocator &VNInfoAllocator) {
+      VNInfo *VNI =
+        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
+      valnos.push_back(VNI);
+      return VNI;
+    }
+
+    /// RenumberValues - Renumber all values in order of appearance and remove
+    /// unused values.
+    void RenumberValues();
+
+    /// MergeValueNumberInto - This method is called when two value numbers
+    /// are found to be equivalent.  This eliminates V1, replacing all
+    /// segments with the V1 value number with the V2 value number.  This can
+    /// cause merging of V1/V2 values numbers and compaction of the value space.
+    VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
+
+    /// Merge all of the live segments of a specific val# in RHS into this live
+    /// range as the specified value number. The segments in RHS are allowed
+    /// to overlap with segments in the current range, it will replace the
+    /// value numbers of the overlaped live segments with the specified value
+    /// number.
+    void MergeSegmentsInAsValue(const LiveRange &RHS, VNInfo *LHSValNo);
+
+    /// MergeValueInAsValue - Merge all of the segments of a specific val#
+    /// in RHS into this live range as the specified value number.
+    /// The segments in RHS are allowed to overlap with segments in the
+    /// current range, but only if the overlapping segments have the
+    /// specified value number.
+    void MergeValueInAsValue(const LiveRange &RHS,
+                             const VNInfo *RHSValNo, VNInfo *LHSValNo);
+
+    bool empty() const { return segments.empty(); }
+
+    /// beginIndex - Return the lowest numbered slot covered.
+    SlotIndex beginIndex() const {
+      assert(!empty() && "Call to beginIndex() on empty range.");
+      return segments.front().start;
+    }
+
+    /// endNumber - return the maximum point of the range of the whole,
+    /// exclusive.
+    SlotIndex endIndex() const {
+      assert(!empty() && "Call to endIndex() on empty range.");
+      return segments.back().end;
+    }
+
+    bool expiredAt(SlotIndex index) const {
+      return index >= endIndex();
+    }
+
+    bool liveAt(SlotIndex index) const {
+      const_iterator r = find(index);
+      return r != end() && r->start <= index;
+    }
+
+    /// Return the segment that contains the specified index, or null if there
+    /// is none.
+    const Segment *getSegmentContaining(SlotIndex Idx) const {
+      const_iterator I = FindSegmentContaining(Idx);
+      return I == end() ? nullptr : &*I;
+    }
+
+    /// Return the live segment that contains the specified index, or null if
+    /// there is none.
+    Segment *getSegmentContaining(SlotIndex Idx) {
+      iterator I = FindSegmentContaining(Idx);
+      return I == end() ? nullptr : &*I;
+    }
+
+    /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
+    VNInfo *getVNInfoAt(SlotIndex Idx) const {
+      const_iterator I = FindSegmentContaining(Idx);
+      return I == end() ? nullptr : I->valno;
+    }
+
+    /// getVNInfoBefore - Return the VNInfo that is live up to but not
+    /// necessarilly including Idx, or NULL. Use this to find the reaching def
+    /// used by an instruction at this SlotIndex position.
+    VNInfo *getVNInfoBefore(SlotIndex Idx) const {
+      const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
+      return I == end() ? nullptr : I->valno;
+    }
+
+    /// Return an iterator to the segment that contains the specified index, or
+    /// end() if there is none.
+    iterator FindSegmentContaining(SlotIndex Idx) {
+      iterator I = find(Idx);
+      return I != end() && I->start <= Idx ? I : end();
+    }
+
+    const_iterator FindSegmentContaining(SlotIndex Idx) const {
+      const_iterator I = find(Idx);
+      return I != end() && I->start <= Idx ? I : end();
+    }
+
+    /// overlaps - Return true if the intersection of the two live ranges is
+    /// not empty.
+    bool overlaps(const LiveRange &other) const {
+      if (other.empty())
+        return false;
+      return overlapsFrom(other, other.begin());
+    }
+
+    /// overlaps - Return true if the two ranges have overlapping segments
+    /// that are not coalescable according to CP.
+    ///
+    /// Overlapping segments where one range is defined by a coalescable
+    /// copy are allowed.
+    bool overlaps(const LiveRange &Other, const CoalescerPair &CP,
+                  const SlotIndexes&) const;
+
+    /// overlaps - Return true if the live range overlaps an interval specified
+    /// by [Start, End).
+    bool overlaps(SlotIndex Start, SlotIndex End) const;
+
+    /// overlapsFrom - Return true if the intersection of the two live ranges
+    /// is not empty.  The specified iterator is a hint that we can begin
+    /// scanning the Other range starting at I.
+    bool overlapsFrom(const LiveRange &Other, const_iterator I) const;
+
+    /// Returns true if all segments of the @p Other live range are completely
+    /// covered by this live range.
+    /// Adjacent live ranges do not affect the covering:the liverange
+    /// [1,5](5,10] covers (3,7].
+    bool covers(const LiveRange &Other) const;
+
+    /// Add the specified Segment to this range, merging segments as
+    /// appropriate.  This returns an iterator to the inserted segment (which
+    /// may have grown since it was inserted).
+    iterator addSegment(Segment S);
+
+    /// Attempt to extend a value defined after @p StartIdx to include @p Use.
+    /// Both @p StartIdx and @p Use should be in the same basic block. In case
+    /// of subranges, an extension could be prevented by an explicit "undef"
+    /// caused by a <def,read-undef> on a non-overlapping lane. The list of
+    /// location of such "undefs" should be provided in @p Undefs.
+    /// The return value is a pair: the first element is VNInfo of the value
+    /// that was extended (possibly nullptr), the second is a boolean value
+    /// indicating whether an "undef" was encountered.
+    /// If this range is live before @p Use in the basic block that starts at
+    /// @p StartIdx, and there is no intervening "undef", extend it to be live
+    /// up to @p Use, and return the pair {value, false}. If there is no
+    /// segment before @p Use and there is no "undef" between @p StartIdx and
+    /// @p Use, return {nullptr, false}. If there is an "undef" before @p Use,
+    /// return {nullptr, true}.
+    std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs,
+        SlotIndex StartIdx, SlotIndex Use);
+
+    /// Simplified version of the above "extendInBlock", which assumes that
+    /// no register lanes are undefined by <def,read-undef> operands.
+    /// If this range is live before @p Use in the basic block that starts
+    /// at @p StartIdx, extend it to be live up to @p Use, and return the
+    /// value. If there is no segment before @p Use, return nullptr.
+    VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Kill);
+
+    /// join - Join two live ranges (this, and other) together.  This applies
+    /// mappings to the value numbers in the LHS/RHS ranges as specified.  If
+    /// the ranges are not joinable, this aborts.
+    void join(LiveRange &Other,
+              const int *ValNoAssignments,
+              const int *RHSValNoAssignments,
+              SmallVectorImpl<VNInfo *> &NewVNInfo);
+
+    /// True iff this segment is a single segment that lies between the
+    /// specified boundaries, exclusively. Vregs live across a backedge are not
+    /// considered local. The boundaries are expected to lie within an extended
+    /// basic block, so vregs that are not live out should contain no holes.
+    bool isLocal(SlotIndex Start, SlotIndex End) const {
+      return beginIndex() > Start.getBaseIndex() &&
+        endIndex() < End.getBoundaryIndex();
+    }
+
+    /// Remove the specified segment from this range.  Note that the segment
+    /// must be a single Segment in its entirety.
+    void removeSegment(SlotIndex Start, SlotIndex End,
+                       bool RemoveDeadValNo = false);
+
+    void removeSegment(Segment S, bool RemoveDeadValNo = false) {
+      removeSegment(S.start, S.end, RemoveDeadValNo);
+    }
+
+    /// Remove segment pointed to by iterator @p I from this range.  This does
+    /// not remove dead value numbers.
+    iterator removeSegment(iterator I) {
+      return segments.erase(I);
+    }
+
+    /// Query Liveness at Idx.
+    /// The sub-instruction slot of Idx doesn't matter, only the instruction
+    /// it refers to is considered.
+    LiveQueryResult Query(SlotIndex Idx) const {
+      // Find the segment that enters the instruction.
+      const_iterator I = find(Idx.getBaseIndex());
+      const_iterator E = end();
+      if (I == E)
+        return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);
+
+      // Is this an instruction live-in segment?
+      // If Idx is the start index of a basic block, include live-in segments
+      // that start at Idx.getBaseIndex().
+      VNInfo *EarlyVal = nullptr;
+      VNInfo *LateVal  = nullptr;
+      SlotIndex EndPoint;
+      bool Kill = false;
+      if (I->start <= Idx.getBaseIndex()) {
+        EarlyVal = I->valno;
+        EndPoint = I->end;
+        // Move to the potentially live-out segment.
+        if (SlotIndex::isSameInstr(Idx, I->end)) {
+          Kill = true;
+          if (++I == E)
+            return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
+        }
+        // Special case: A PHIDef value can have its def in the middle of a
+        // segment if the value happens to be live out of the layout
+        // predecessor.
+        // Such a value is not live-in.
+        if (EarlyVal->def == Idx.getBaseIndex())
+          EarlyVal = nullptr;
+      }
+      // I now points to the segment that may be live-through, or defined by
+      // this instr. Ignore segments starting after the current instr.
+      if (!SlotIndex::isEarlierInstr(Idx, I->start)) {
+        LateVal = I->valno;
+        EndPoint = I->end;
+      }
+      return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
+    }
+
+    /// removeValNo - Remove all the segments defined by the specified value#.
+    /// Also remove the value# from value# list.
+    void removeValNo(VNInfo *ValNo);
+
+    /// Returns true if the live range is zero length, i.e. no live segments
+    /// span instructions. It doesn't pay to spill such a range.
+    bool isZeroLength(SlotIndexes *Indexes) const {
+      for (const Segment &S : segments)
+        if (Indexes->getNextNonNullIndex(S.start).getBaseIndex() <
+            S.end.getBaseIndex())
+          return false;
+      return true;
+    }
+
+    // Returns true if any segment in the live range contains any of the
+    // provided slot indexes.  Slots which occur in holes between
+    // segments will not cause the function to return true.
+    bool isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const;
+
+    bool operator<(const LiveRange& other) const {
+      const SlotIndex &thisIndex = beginIndex();
+      const SlotIndex &otherIndex = other.beginIndex();
+      return thisIndex < otherIndex;
+    }
+
+    /// Returns true if there is an explicit "undef" between @p Begin
+    /// @p End.
+    bool isUndefIn(ArrayRef<SlotIndex> Undefs, SlotIndex Begin,
+                   SlotIndex End) const {
+      return std::any_of(Undefs.begin(), Undefs.end(),
+                [Begin,End] (SlotIndex Idx) -> bool {
+                  return Begin <= Idx && Idx < End;
+                });
+    }
+
+    /// Flush segment set into the regular segment vector.
+    /// The method is to be called after the live range
+    /// has been created, if use of the segment set was
+    /// activated in the constructor of the live range.
+    void flushSegmentSet();
+
+    void print(raw_ostream &OS) const;
+    void dump() const;
+
+    /// \brief Walk the range and assert if any invariants fail to hold.
+    ///
+    /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+    void verify() const {}
+#else
+    void verify() const;
+#endif
+
+  protected:
+    /// Append a segment to the list of segments.
+    void append(const LiveRange::Segment S);
+
+  private:
+    friend class LiveRangeUpdater;
+    void addSegmentToSet(Segment S);
+    void markValNoForDeletion(VNInfo *V);
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) {
+    LR.print(OS);
+    return OS;
+  }
+
+  /// LiveInterval - This class represents the liveness of a register,
+  /// or stack slot.
+  class LiveInterval : public LiveRange {
+  public:
+    using super = LiveRange;
+
+    /// A live range for subregisters. The LaneMask specifies which parts of the
+    /// super register are covered by the interval.
+    /// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
+    class SubRange : public LiveRange {
+    public:
+      SubRange *Next = nullptr;
+      LaneBitmask LaneMask;
+
+      /// Constructs a new SubRange object.
+      SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {}
+
+      /// Constructs a new SubRange object by copying liveness from @p Other.
+      SubRange(LaneBitmask LaneMask, const LiveRange &Other,
+               BumpPtrAllocator &Allocator)
+        : LiveRange(Other, Allocator), LaneMask(LaneMask) {}
+
+      void print(raw_ostream &OS) const;
+      void dump() const;
+    };
+
+  private:
+    SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
+                                   /// ranges.
+
+  public:
+    const unsigned reg;  // the register or stack slot of this interval.
+    float weight;        // weight of this interval
+
+    LiveInterval(unsigned Reg, float Weight) : reg(Reg), weight(Weight) {}
+
+    ~LiveInterval() {
+      clearSubRanges();
+    }
+
+    template<typename T>
+    class SingleLinkedListIterator {
+      T *P;
+
+    public:
+      SingleLinkedListIterator<T>(T *P) : P(P) {}
+
+      SingleLinkedListIterator<T> &operator++() {
+        P = P->Next;
+        return *this;
+      }
+      SingleLinkedListIterator<T> operator++(int) {
+        SingleLinkedListIterator res = *this;
+        ++*this;
+        return res;
+      }
+      bool operator!=(const SingleLinkedListIterator<T> &Other) {
+        return P != Other.operator->();
+      }
+      bool operator==(const SingleLinkedListIterator<T> &Other) {
+        return P == Other.operator->();
+      }
+      T &operator*() const {
+        return *P;
+      }
+      T *operator->() const {
+        return P;
+      }
+    };
+
+    using subrange_iterator = SingleLinkedListIterator<SubRange>;
+    using const_subrange_iterator = SingleLinkedListIterator<const SubRange>;
+
+    subrange_iterator subrange_begin() {
+      return subrange_iterator(SubRanges);
+    }
+    subrange_iterator subrange_end() {
+      return subrange_iterator(nullptr);
+    }
+
+    const_subrange_iterator subrange_begin() const {
+      return const_subrange_iterator(SubRanges);
+    }
+    const_subrange_iterator subrange_end() const {
+      return const_subrange_iterator(nullptr);
+    }
+
+    iterator_range<subrange_iterator> subranges() {
+      return make_range(subrange_begin(), subrange_end());
+    }
+
+    iterator_range<const_subrange_iterator> subranges() const {
+      return make_range(subrange_begin(), subrange_end());
+    }
+
+    /// Creates a new empty subregister live range. The range is added at the
+    /// beginning of the subrange list; subrange iterators stay valid.
+    SubRange *createSubRange(BumpPtrAllocator &Allocator,
+                             LaneBitmask LaneMask) {
+      SubRange *Range = new (Allocator) SubRange(LaneMask);
+      appendSubRange(Range);
+      return Range;
+    }
+
+    /// Like createSubRange() but the new range is filled with a copy of the
+    /// liveness information in @p CopyFrom.
+    SubRange *createSubRangeFrom(BumpPtrAllocator &Allocator,
+                                 LaneBitmask LaneMask,
+                                 const LiveRange &CopyFrom) {
+      SubRange *Range = new (Allocator) SubRange(LaneMask, CopyFrom, Allocator);
+      appendSubRange(Range);
+      return Range;
+    }
+
+    /// Returns true if subregister liveness information is available.
+    bool hasSubRanges() const {
+      return SubRanges != nullptr;
+    }
+
+    /// Removes all subregister liveness information.
+    void clearSubRanges();
+
+    /// Removes all subranges without any segments (subranges without segments
+    /// are not considered valid and should only exist temporarily).
+    void removeEmptySubRanges();
+
+    /// getSize - Returns the sum of sizes of all the LiveRange's.
+    ///
+    unsigned getSize() const;
+
+    /// isSpillable - Can this interval be spilled?
+    bool isSpillable() const {
+      return weight != huge_valf;
+    }
+
+    /// markNotSpillable - Mark interval as not spillable
+    void markNotSpillable() {
+      weight = huge_valf;
+    }
+
+    /// For a given lane mask @p LaneMask, compute indexes at which the
+    /// lane is marked undefined by subregister <def,read-undef> definitions.
+    void computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
+                               LaneBitmask LaneMask,
+                               const MachineRegisterInfo &MRI,
+                               const SlotIndexes &Indexes) const;
+
+    /// Refines the subranges to support \p LaneMask. This may only be called
+    /// for LI.hasSubrange()==true. Subregister ranges are split or created
+    /// until \p LaneMask can be matched exactly. \p Mod is executed on the
+    /// matching subranges.
+    ///
+    /// Example:
+    ///    Given an interval with subranges with lanemasks L0F00, L00F0 and
+    ///    L000F, refining for mask L0018. Will split the L00F0 lane into
+    ///    L00E0 and L0010 and the L000F lane into L0007 and L0008. The Mod
+    ///    function will be applied to the L0010 and L0008 subranges.
+    void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask,
+                         std::function<void(LiveInterval::SubRange&)> Mod);
+
+    bool operator<(const LiveInterval& other) const {
+      const SlotIndex &thisIndex = beginIndex();
+      const SlotIndex &otherIndex = other.beginIndex();
+      return std::tie(thisIndex, reg) < std::tie(otherIndex, other.reg);
+    }
+
+    void print(raw_ostream &OS) const;
+    void dump() const;
+
+    /// \brief Walks the interval and assert if any invariants fail to hold.
+    ///
+    /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+    void verify(const MachineRegisterInfo *MRI = nullptr) const {}
+#else
+    void verify(const MachineRegisterInfo *MRI = nullptr) const;
+#endif
+
+  private:
+    /// Appends @p Range to SubRanges list.
+    void appendSubRange(SubRange *Range) {
+      Range->Next = SubRanges;
+      SubRanges = Range;
+    }
+
+    /// Free memory held by SubRange.
+    void freeSubRange(SubRange *S);
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS,
+                                 const LiveInterval::SubRange &SR) {
+    SR.print(OS);
+    return OS;
+  }
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) {
+    LI.print(OS);
+    return OS;
+  }
+
+  raw_ostream &operator<<(raw_ostream &OS, const LiveRange::Segment &S);
+
+  inline bool operator<(SlotIndex V, const LiveRange::Segment &S) {
+    return V < S.start;
+  }
+
+  inline bool operator<(const LiveRange::Segment &S, SlotIndex V) {
+    return S.start < V;
+  }
+
+  /// Helper class for performant LiveRange bulk updates.
+  ///
+  /// Calling LiveRange::addSegment() repeatedly can be expensive on large
+  /// live ranges because segments after the insertion point may need to be
+  /// shifted. The LiveRangeUpdater class can defer the shifting when adding
+  /// many segments in order.
+  ///
+  /// The LiveRange will be in an invalid state until flush() is called.
+  class LiveRangeUpdater {
+    LiveRange *LR;
+    SlotIndex LastStart;
+    LiveRange::iterator WriteI;
+    LiveRange::iterator ReadI;
+    SmallVector<LiveRange::Segment, 16> Spills;
+    void mergeSpills();
+
+  public:
+    /// Create a LiveRangeUpdater for adding segments to LR.
+    /// LR will temporarily be in an invalid state until flush() is called.
+    LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}
+
+    ~LiveRangeUpdater() { flush(); }
+
+    /// Add a segment to LR and coalesce when possible, just like
+    /// LR.addSegment(). Segments should be added in increasing start order for
+    /// best performance.
+    void add(LiveRange::Segment);
+
+    void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) {
+      add(LiveRange::Segment(Start, End, VNI));
+    }
+
+    /// Return true if the LR is currently in an invalid state, and flush()
+    /// needs to be called.
+    bool isDirty() const { return LastStart.isValid(); }
+
+    /// Flush the updater state to LR so it is valid and contains all added
+    /// segments.
+    void flush();
+
+    /// Select a different destination live range.
+    void setDest(LiveRange *lr) {
+      if (LR != lr && isDirty())
+        flush();
+      LR = lr;
+    }
+
+    /// Get the current destination live range.
+    LiveRange *getDest() const { return LR; }
+
+    void dump() const;
+    void print(raw_ostream&) const;
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) {
+    X.print(OS);
+    return OS;
+  }
+
+  /// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
+  /// LiveInterval into equivalence clases of connected components. A
+  /// LiveInterval that has multiple connected components can be broken into
+  /// multiple LiveIntervals.
+  ///
+  /// Given a LiveInterval that may have multiple connected components, run:
+  ///
+  ///   unsigned numComps = ConEQ.Classify(LI);
+  ///   if (numComps > 1) {
+  ///     // allocate numComps-1 new LiveIntervals into LIS[1..]
+  ///     ConEQ.Distribute(LIS);
+  /// }
+
+  class ConnectedVNInfoEqClasses {
+    LiveIntervals &LIS;
+    IntEqClasses EqClass;
+
+  public:
+    explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {}
+
+    /// Classify the values in \p LR into connected components.
+    /// Returns the number of connected components.
+    unsigned Classify(const LiveRange &LR);
+
+    /// getEqClass - Classify creates equivalence classes numbered 0..N. Return
+    /// the equivalence class assigned the VNI.
+    unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; }
+
+    /// Distribute values in \p LI into a separate LiveIntervals
+    /// for each connected component. LIV must have an empty LiveInterval for
+    /// each additional connected component. The first connected component is
+    /// left in \p LI.
+    void Distribute(LiveInterval &LI, LiveInterval *LIV[],
+                    MachineRegisterInfo &MRI);
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEINTERVAL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h b/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
new file mode 100644
index 0000000..b922e54
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -0,0 +1,199 @@
+//===- LiveIntervalUnion.h - Live interval union data struct ---*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LiveIntervalUnion is a union of live segments across multiple live virtual
+// registers. This may be used during coalescing to represent a congruence
+// class, or during register allocation to model liveness of a physical
+// register.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALUNION_H
+#define LLVM_CODEGEN_LIVEINTERVALUNION_H
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+class raw_ostream;
+class TargetRegisterInfo;
+
+#ifndef NDEBUG
+// forward declaration
+template <unsigned Element> class SparseBitVector;
+
+using LiveVirtRegBitSet = SparseBitVector<128>;
+#endif
+
+/// Union of live intervals that are strong candidates for coalescing into a
+/// single register (either physical or virtual depending on the context).  We
+/// expect the constituent live intervals to be disjoint, although we may
+/// eventually make exceptions to handle value-based interference.
+class LiveIntervalUnion {
+  // A set of live virtual register segments that supports fast insertion,
+  // intersection, and removal.
+  // Mapping SlotIndex intervals to virtual register numbers.
+  using LiveSegments = IntervalMap<SlotIndex, LiveInterval*>;
+
+public:
+  // SegmentIter can advance to the next segment ordered by starting position
+  // which may belong to a different live virtual register. We also must be able
+  // to reach the current segment's containing virtual register.
+  using SegmentIter = LiveSegments::iterator;
+
+  /// Const version of SegmentIter.
+  using ConstSegmentIter = LiveSegments::const_iterator;
+
+  // LiveIntervalUnions share an external allocator.
+  using Allocator = LiveSegments::Allocator;
+
+private:
+  unsigned Tag = 0;       // unique tag for current contents.
+  LiveSegments Segments;  // union of virtual reg segments
+
+public:
+  explicit LiveIntervalUnion(Allocator &a) : Segments(a) {}
+
+  // Iterate over all segments in the union of live virtual registers ordered
+  // by their starting position.
+  SegmentIter begin() { return Segments.begin(); }
+  SegmentIter end() { return Segments.end(); }
+  SegmentIter find(SlotIndex x) { return Segments.find(x); }
+  ConstSegmentIter begin() const { return Segments.begin(); }
+  ConstSegmentIter end() const { return Segments.end(); }
+  ConstSegmentIter find(SlotIndex x) const { return Segments.find(x); }
+
+  bool empty() const { return Segments.empty(); }
+  SlotIndex startIndex() const { return Segments.start(); }
+
+  // Provide public access to the underlying map to allow overlap iteration.
+  using Map = LiveSegments;
+  const Map &getMap() const { return Segments; }
+
+  /// getTag - Return an opaque tag representing the current state of the union.
+  unsigned getTag() const { return Tag; }
+
+  /// changedSince - Return true if the union change since getTag returned tag.
+  bool changedSince(unsigned tag) const { return tag != Tag; }
+
+  // Add a live virtual register to this union and merge its segments.
+  void unify(LiveInterval &VirtReg, const LiveRange &Range);
+
+  // Remove a live virtual register's segments from this union.
+  void extract(LiveInterval &VirtReg, const LiveRange &Range);
+
+  // Remove all inserted virtual registers.
+  void clear() { Segments.clear(); ++Tag; }
+
+  // Print union, using TRI to translate register names
+  void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
+
+#ifndef NDEBUG
+  // Verify the live intervals in this union and add them to the visited set.
+  void verify(LiveVirtRegBitSet& VisitedVRegs);
+#endif
+
+  /// Query interferences between a single live virtual register and a live
+  /// interval union.
+  class Query {
+    const LiveIntervalUnion *LiveUnion = nullptr;
+    const LiveRange *LR = nullptr;
+    LiveRange::const_iterator LRI;  ///< current position in LR
+    ConstSegmentIter LiveUnionI;    ///< current position in LiveUnion
+    SmallVector<LiveInterval*,4> InterferingVRegs;
+    bool CheckedFirstInterference = false;
+    bool SeenAllInterferences = false;
+    unsigned Tag = 0;
+    unsigned UserTag = 0;
+
+    void reset(unsigned NewUserTag, const LiveRange &NewLR,
+               const LiveIntervalUnion &NewLiveUnion) {
+      LiveUnion = &NewLiveUnion;
+      LR = &NewLR;
+      InterferingVRegs.clear();
+      CheckedFirstInterference = false;
+      SeenAllInterferences = false;
+      Tag = NewLiveUnion.getTag();
+      UserTag = NewUserTag;
+    }
+
+  public:
+    Query() = default;
+    Query(const LiveRange &LR, const LiveIntervalUnion &LIU):
+      LiveUnion(&LIU), LR(&LR) {}
+    Query(const Query &) = delete;
+    Query &operator=(const Query &) = delete;
+
+    void init(unsigned NewUserTag, const LiveRange &NewLR,
+              const LiveIntervalUnion &NewLiveUnion) {
+      if (UserTag == NewUserTag && LR == &NewLR && LiveUnion == &NewLiveUnion &&
+          !NewLiveUnion.changedSince(Tag)) {
+        // Retain cached results, e.g. firstInterference.
+        return;
+      }
+      reset(NewUserTag, NewLR, NewLiveUnion);
+    }
+
+    // Does this live virtual register interfere with the union?
+    bool checkInterference() { return collectInterferingVRegs(1); }
+
+    // Count the virtual registers in this union that interfere with this
+    // query's live virtual register, up to maxInterferingRegs.
+    unsigned collectInterferingVRegs(
+        unsigned MaxInterferingRegs = std::numeric_limits<unsigned>::max());
+
+    // Was this virtual register visited during collectInterferingVRegs?
+    bool isSeenInterference(LiveInterval *VReg) const;
+
+    // Did collectInterferingVRegs collect all interferences?
+    bool seenAllInterferences() const { return SeenAllInterferences; }
+
+    // Vector generated by collectInterferingVRegs.
+    const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
+      return InterferingVRegs;
+    }
+  };
+
+  // Array of LiveIntervalUnions.
+  class Array {
+    unsigned Size = 0;
+    LiveIntervalUnion *LIUs = nullptr;
+
+  public:
+    Array() = default;
+    ~Array() { clear(); }
+
+    // Initialize the array to have Size entries.
+    // Reuse an existing allocation if the size matches.
+    void init(LiveIntervalUnion::Allocator&, unsigned Size);
+
+    unsigned size() const { return Size; }
+
+    void clear();
+
+    LiveIntervalUnion& operator[](unsigned idx) {
+      assert(idx <  Size && "idx out of bounds");
+      return LIUs[idx];
+    }
+
+    const LiveIntervalUnion& operator[](unsigned Idx) const {
+      assert(Idx < Size && "Idx out of bounds");
+      return LIUs[Idx];
+    }
+  };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEINTERVALUNION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h b/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
new file mode 100644
index 0000000..1150f3c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
@@ -0,0 +1,481 @@
+//===- LiveIntervals.h - Live Interval Analysis -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file implements the LiveInterval analysis pass.  Given some
+/// numbering of each the machine instructions (in this implemention depth-first
+/// order) an interval [i, j) is said to be a live interval for register v if
+/// there is no instruction with number j' > j such that v is live at j' and
+/// there is no instruction with number i' < i such that v is live at i'. In
+/// this implementation intervals can have holes, i.e. an interval might look
+/// like [1,20), [50,65), [1000,1001).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALS_H
+#define LLVM_CODEGEN_LIVEINTERVALS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+extern cl::opt<bool> UseSegmentSetForPhysRegs;
+
+class BitVector;
+class LiveRangeCalc;
+class MachineBlockFrequencyInfo;
+class MachineDominatorTree;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class TargetInstrInfo;
+class VirtRegMap;
+
+  class LiveIntervals : public MachineFunctionPass {
+    MachineFunction* MF;
+    MachineRegisterInfo* MRI;
+    const TargetRegisterInfo* TRI;
+    const TargetInstrInfo* TII;
+    AliasAnalysis *AA;
+    SlotIndexes* Indexes;
+    MachineDominatorTree *DomTree = nullptr;
+    LiveRangeCalc *LRCalc = nullptr;
+
+    /// Special pool allocator for VNInfo's (LiveInterval val#).
+    VNInfo::Allocator VNInfoAllocator;
+
+    /// Live interval pointers for all the virtual registers.
+    IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
+
+    /// Sorted list of instructions with register mask operands. Always use the
+    /// 'r' slot, RegMasks are normal clobbers, not early clobbers.
+    SmallVector<SlotIndex, 8> RegMaskSlots;
+
+    /// This vector is parallel to RegMaskSlots, it holds a pointer to the
+    /// corresponding register mask.  This pointer can be recomputed as:
+    ///
+    ///   MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
+    ///   unsigned OpNum = findRegMaskOperand(MI);
+    ///   RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
+    ///
+    /// This is kept in a separate vector partly because some standard
+    /// libraries don't support lower_bound() with mixed objects, partly to
+    /// improve locality when searching in RegMaskSlots.
+    /// Also see the comment in LiveInterval::find().
+    SmallVector<const uint32_t*, 8> RegMaskBits;
+
+    /// For each basic block number, keep (begin, size) pairs indexing into the
+    /// RegMaskSlots and RegMaskBits arrays.
+    /// Note that basic block numbers may not be layout contiguous, that's why
+    /// we can't just keep track of the first register mask in each basic
+    /// block.
+    SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
+
+    /// Keeps a live range set for each register unit to track fixed physreg
+    /// interference.
+    SmallVector<LiveRange*, 0> RegUnitRanges;
+
+  public:
+    static char ID;
+
+    LiveIntervals();
+    ~LiveIntervals() override;
+
+    /// Calculate the spill weight to assign to a single instruction.
+    static float getSpillWeight(bool isDef, bool isUse,
+                                const MachineBlockFrequencyInfo *MBFI,
+                                const MachineInstr &Instr);
+
+    /// Calculate the spill weight to assign to a single instruction.
+    static float getSpillWeight(bool isDef, bool isUse,
+                                const MachineBlockFrequencyInfo *MBFI,
+                                const MachineBasicBlock *MBB);
+
+    LiveInterval &getInterval(unsigned Reg) {
+      if (hasInterval(Reg))
+        return *VirtRegIntervals[Reg];
+      else
+        return createAndComputeVirtRegInterval(Reg);
+    }
+
+    const LiveInterval &getInterval(unsigned Reg) const {
+      return const_cast<LiveIntervals*>(this)->getInterval(Reg);
+    }
+
+    bool hasInterval(unsigned Reg) const {
+      return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
+    }
+
+    /// Interval creation.
+    LiveInterval &createEmptyInterval(unsigned Reg) {
+      assert(!hasInterval(Reg) && "Interval already exists!");
+      VirtRegIntervals.grow(Reg);
+      VirtRegIntervals[Reg] = createInterval(Reg);
+      return *VirtRegIntervals[Reg];
+    }
+
+    LiveInterval &createAndComputeVirtRegInterval(unsigned Reg) {
+      LiveInterval &LI = createEmptyInterval(Reg);
+      computeVirtRegInterval(LI);
+      return LI;
+    }
+
+    /// Interval removal.
+    void removeInterval(unsigned Reg) {
+      delete VirtRegIntervals[Reg];
+      VirtRegIntervals[Reg] = nullptr;
+    }
+
+    /// Given a register and an instruction, adds a live segment from that
+    /// instruction to the end of its MBB.
+    LiveInterval::Segment addSegmentToEndOfBlock(unsigned reg,
+                                                 MachineInstr &startInst);
+
+    /// After removing some uses of a register, shrink its live range to just
+    /// the remaining uses. This method does not compute reaching defs for new
+    /// uses, and it doesn't remove dead defs.
+    /// Dead PHIDef values are marked as unused. New dead machine instructions
+    /// are added to the dead vector. Returns true if the interval may have been
+    /// separated into multiple connected components.
+    bool shrinkToUses(LiveInterval *li,
+                      SmallVectorImpl<MachineInstr*> *dead = nullptr);
+
+    /// Specialized version of
+    /// shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead)
+    /// that works on a subregister live range and only looks at uses matching
+    /// the lane mask of the subregister range.
+    /// This may leave the subrange empty which needs to be cleaned up with
+    /// LiveInterval::removeEmptySubranges() afterwards.
+    void shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg);
+
+    /// Extend the live range \p LR to reach all points in \p Indices. The
+    /// points in the \p Indices array must be jointly dominated by the union
+    /// of the existing defs in \p LR and points in \p Undefs.
+    ///
+    /// PHI-defs are added as needed to maintain SSA form.
+    ///
+    /// If a SlotIndex in \p Indices is the end index of a basic block, \p LR
+    /// will be extended to be live out of the basic block.
+    /// If a SlotIndex in \p Indices is jointy dominated only by points in
+    /// \p Undefs, the live range will not be extended to that point.
+    ///
+    /// See also LiveRangeCalc::extend().
+    void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices,
+                         ArrayRef<SlotIndex> Undefs);
+
+    void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices) {
+      extendToIndices(LR, Indices, /*Undefs=*/{});
+    }
+
+    /// If \p LR has a live value at \p Kill, prune its live range by removing
+    /// any liveness reachable from Kill. Add live range end points to
+    /// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
+    /// value's live range.
+    ///
+    /// Calling pruneValue() and extendToIndices() can be used to reconstruct
+    /// SSA form after adding defs to a virtual register.
+    void pruneValue(LiveRange &LR, SlotIndex Kill,
+                    SmallVectorImpl<SlotIndex> *EndPoints);
+
+    /// This function should not be used. Its intend is to tell you that
+    /// you are doing something wrong if you call pruveValue directly on a
+    /// LiveInterval. Indeed, you are supposed to call pruneValue on the main
+    /// LiveRange and all the LiveRange of the subranges if any.
+    LLVM_ATTRIBUTE_UNUSED void pruneValue(LiveInterval &, SlotIndex,
+                                          SmallVectorImpl<SlotIndex> *) {
+      llvm_unreachable(
+          "Use pruneValue on the main LiveRange and on each subrange");
+    }
+
+    SlotIndexes *getSlotIndexes() const {
+      return Indexes;
+    }
+
+    AliasAnalysis *getAliasAnalysis() const {
+      return AA;
+    }
+
+    /// Returns true if the specified machine instr has been removed or was
+    /// never entered in the map.
+    bool isNotInMIMap(const MachineInstr &Instr) const {
+      return !Indexes->hasIndex(Instr);
+    }
+
+    /// Returns the base index of the given instruction.
+    SlotIndex getInstructionIndex(const MachineInstr &Instr) const {
+      return Indexes->getInstructionIndex(Instr);
+    }
+
+    /// Returns the instruction associated with the given index.
+    MachineInstr* getInstructionFromIndex(SlotIndex index) const {
+      return Indexes->getInstructionFromIndex(index);
+    }
+
+    /// Return the first index in the given basic block.
+    SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+      return Indexes->getMBBStartIdx(mbb);
+    }
+
+    /// Return the last index in the given basic block.
+    SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+      return Indexes->getMBBEndIdx(mbb);
+    }
+
+    bool isLiveInToMBB(const LiveRange &LR,
+                       const MachineBasicBlock *mbb) const {
+      return LR.liveAt(getMBBStartIdx(mbb));
+    }
+
+    bool isLiveOutOfMBB(const LiveRange &LR,
+                        const MachineBasicBlock *mbb) const {
+      return LR.liveAt(getMBBEndIdx(mbb).getPrevSlot());
+    }
+
+    MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
+      return Indexes->getMBBFromIndex(index);
+    }
+
+    void insertMBBInMaps(MachineBasicBlock *MBB) {
+      Indexes->insertMBBInMaps(MBB);
+      assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
+             "Blocks must be added in order.");
+      RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
+    }
+
+    SlotIndex InsertMachineInstrInMaps(MachineInstr &MI) {
+      return Indexes->insertMachineInstrInMaps(MI);
+    }
+
+    void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B,
+                                       MachineBasicBlock::iterator E) {
+      for (MachineBasicBlock::iterator I = B; I != E; ++I)
+        Indexes->insertMachineInstrInMaps(*I);
+    }
+
+    void RemoveMachineInstrFromMaps(MachineInstr &MI) {
+      Indexes->removeMachineInstrFromMaps(MI);
+    }
+
+    SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
+      return Indexes->replaceMachineInstrInMaps(MI, NewMI);
+    }
+
+    VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
+
+    void getAnalysisUsage(AnalysisUsage &AU) const override;
+    void releaseMemory() override;
+
+    /// Pass entry point; Calculates LiveIntervals.
+    bool runOnMachineFunction(MachineFunction&) override;
+
+    /// Implement the dump method.
+    void print(raw_ostream &O, const Module* = nullptr) const override;
+
+    /// If LI is confined to a single basic block, return a pointer to that
+    /// block.  If LI is live in to or out of any block, return NULL.
+    MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
+
+    /// Returns true if VNI is killed by any PHI-def values in LI.
+    /// This may conservatively return true to avoid expensive computations.
+    bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
+
+    /// Add kill flags to any instruction that kills a virtual register.
+    void addKillFlags(const VirtRegMap*);
+
+    /// Call this method to notify LiveIntervals that instruction \p MI has been
+    /// moved within a basic block. This will update the live intervals for all
+    /// operands of \p MI. Moves between basic blocks are not supported.
+    ///
+    /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+    void handleMove(MachineInstr &MI, bool UpdateFlags = false);
+
+    /// Update intervals for operands of \p MI so that they begin/end on the
+    /// SlotIndex for \p BundleStart.
+    ///
+    /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+    ///
+    /// Requires MI and BundleStart to have SlotIndexes, and assumes
+    /// existing liveness is accurate. BundleStart should be the first
+    /// instruction in the Bundle.
+    void handleMoveIntoBundle(MachineInstr &MI, MachineInstr &BundleStart,
+                              bool UpdateFlags = false);
+
+    /// Update live intervals for instructions in a range of iterators. It is
+    /// intended for use after target hooks that may insert or remove
+    /// instructions, and is only efficient for a small number of instructions.
+    ///
+    /// OrigRegs is a vector of registers that were originally used by the
+    /// instructions in the range between the two iterators.
+    ///
+    /// Currently, the only only changes that are supported are simple removal
+    /// and addition of uses.
+    void repairIntervalsInRange(MachineBasicBlock *MBB,
+                                MachineBasicBlock::iterator Begin,
+                                MachineBasicBlock::iterator End,
+                                ArrayRef<unsigned> OrigRegs);
+
+    // Register mask functions.
+    //
+    // Machine instructions may use a register mask operand to indicate that a
+    // large number of registers are clobbered by the instruction.  This is
+    // typically used for calls.
+    //
+    // For compile time performance reasons, these clobbers are not recorded in
+    // the live intervals for individual physical registers.  Instead,
+    // LiveIntervalAnalysis maintains a sorted list of instructions with
+    // register mask operands.
+
+    /// Returns a sorted array of slot indices of all instructions with
+    /// register mask operands.
+    ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
+
+    /// Returns a sorted array of slot indices of all instructions with register
+    /// mask operands in the basic block numbered \p MBBNum.
+    ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
+      std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+      return getRegMaskSlots().slice(P.first, P.second);
+    }
+
+    /// Returns an array of register mask pointers corresponding to
+    /// getRegMaskSlots().
+    ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
+
+    /// Returns an array of mask pointers corresponding to
+    /// getRegMaskSlotsInBlock(MBBNum).
+    ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
+      std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+      return getRegMaskBits().slice(P.first, P.second);
+    }
+
+    /// Test if \p LI is live across any register mask instructions, and
+    /// compute a bit mask of physical registers that are not clobbered by any
+    /// of them.
+    ///
+    /// Returns false if \p LI doesn't cross any register mask instructions. In
+    /// that case, the bit vector is not filled in.
+    bool checkRegMaskInterference(LiveInterval &LI,
+                                  BitVector &UsableRegs);
+
+    // Register unit functions.
+    //
+    // Fixed interference occurs when MachineInstrs use physregs directly
+    // instead of virtual registers. This typically happens when passing
+    // arguments to a function call, or when instructions require operands in
+    // fixed registers.
+    //
+    // Each physreg has one or more register units, see MCRegisterInfo. We
+    // track liveness per register unit to handle aliasing registers more
+    // efficiently.
+
+    /// Return the live range for register unit \p Unit. It will be computed if
+    /// it doesn't exist.
+    LiveRange &getRegUnit(unsigned Unit) {
+      LiveRange *LR = RegUnitRanges[Unit];
+      if (!LR) {
+        // Compute missing ranges on demand.
+        // Use segment set to speed-up initial computation of the live range.
+        RegUnitRanges[Unit] = LR = new LiveRange(UseSegmentSetForPhysRegs);
+        computeRegUnitRange(*LR, Unit);
+      }
+      return *LR;
+    }
+
+    /// Return the live range for register unit \p Unit if it has already been
+    /// computed, or nullptr if it hasn't been computed yet.
+    LiveRange *getCachedRegUnit(unsigned Unit) {
+      return RegUnitRanges[Unit];
+    }
+
+    const LiveRange *getCachedRegUnit(unsigned Unit) const {
+      return RegUnitRanges[Unit];
+    }
+
+    /// Remove computed live range for register unit \p Unit. Subsequent uses
+    /// should rely on on-demand recomputation.
+    void removeRegUnit(unsigned Unit) {
+      delete RegUnitRanges[Unit];
+      RegUnitRanges[Unit] = nullptr;
+    }
+
+    /// Remove value numbers and related live segments starting at position
+    /// \p Pos that are part of any liverange of physical register \p Reg or one
+    /// of its subregisters.
+    void removePhysRegDefAt(unsigned Reg, SlotIndex Pos);
+
+    /// Remove value number and related live segments of \p LI and its subranges
+    /// that start at position \p Pos.
+    void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos);
+
+    /// Split separate components in LiveInterval \p LI into separate intervals.
+    void splitSeparateComponents(LiveInterval &LI,
+                                 SmallVectorImpl<LiveInterval*> &SplitLIs);
+
+    /// For live interval \p LI with correct SubRanges construct matching
+    /// information for the main live range. Expects the main live range to not
+    /// have any segments or value numbers.
+    void constructMainRangeFromSubranges(LiveInterval &LI);
+
+  private:
+    /// Compute live intervals for all virtual registers.
+    void computeVirtRegs();
+
+    /// Compute RegMaskSlots and RegMaskBits.
+    void computeRegMasks();
+
+    /// Walk the values in \p LI and check for dead values:
+    /// - Dead PHIDef values are marked as unused.
+    /// - Dead operands are marked as such.
+    /// - Completely dead machine instructions are added to the \p dead vector
+    ///   if it is not nullptr.
+    /// Returns true if any PHI value numbers have been removed which may
+    /// have separated the interval into multiple connected components.
+    bool computeDeadValues(LiveInterval &LI,
+                           SmallVectorImpl<MachineInstr*> *dead);
+
+    static LiveInterval* createInterval(unsigned Reg);
+
+    void printInstrs(raw_ostream &O) const;
+    void dumpInstrs() const;
+
+    void computeLiveInRegUnits();
+    void computeRegUnitRange(LiveRange&, unsigned Unit);
+    void computeVirtRegInterval(LiveInterval&);
+
+
+    /// Helper function for repairIntervalsInRange(), walks backwards and
+    /// creates/modifies live segments in \p LR to match the operands found.
+    /// Only full operands or operands with subregisters matching \p LaneMask
+    /// are considered.
+    void repairOldRegInRange(MachineBasicBlock::iterator Begin,
+                             MachineBasicBlock::iterator End,
+                             const SlotIndex endIdx, LiveRange &LR,
+                             unsigned Reg,
+                             LaneBitmask LaneMask = LaneBitmask::getAll());
+
+    class HMEditor;
+  };
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h b/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
new file mode 100644
index 0000000..f9aab0d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
@@ -0,0 +1,190 @@
+//===- llvm/CodeGen/LivePhysRegs.h - Live Physical Register Set -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file implements the LivePhysRegs utility for tracking liveness of
+/// physical registers. This can be used for ad-hoc liveness tracking after
+/// register allocation. You can start with the live-ins/live-outs at the
+/// beginning/end of a block and update the information while walking the
+/// instructions inside the block. This implementation tracks the liveness on a
+/// sub-register granularity.
+///
+/// We assume that the high bits of a physical super-register are not preserved
+/// unless the instruction has an implicit-use operand reading the super-
+/// register.
+///
+/// X86 Example:
+/// %ymm0 = ...
+/// %xmm0 = ... (Kills %xmm0, all %xmm0s sub-registers, and %ymm0)
+///
+/// %ymm0 = ...
+/// %xmm0 = ..., implicit %ymm0 (%ymm0 and all its sub-registers are alive)
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
+#define LLVM_CODEGEN_LIVEPHYSREGS_H
+
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include <cassert>
+#include <utility>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineOperand;
+class MachineRegisterInfo;
+class raw_ostream;
+
+/// \brief A set of physical registers with utility functions to track liveness
+/// when walking backward/forward through a basic block.
+class LivePhysRegs {
+  const TargetRegisterInfo *TRI = nullptr;
+  SparseSet<unsigned> LiveRegs;
+
+public:
+  /// Constructs an unitialized set. init() needs to be called to initialize it.
+  LivePhysRegs() = default;
+
+  /// Constructs and initializes an empty set.
+  LivePhysRegs(const TargetRegisterInfo &TRI) : TRI(&TRI) {
+    LiveRegs.setUniverse(TRI.getNumRegs());
+  }
+
+  LivePhysRegs(const LivePhysRegs&) = delete;
+  LivePhysRegs &operator=(const LivePhysRegs&) = delete;
+
+  /// (re-)initializes and clears the set.
+  void init(const TargetRegisterInfo &TRI) {
+    this->TRI = &TRI;
+    LiveRegs.clear();
+    LiveRegs.setUniverse(TRI.getNumRegs());
+  }
+
+  /// Clears the set.
+  void clear() { LiveRegs.clear(); }
+
+  /// Returns true if the set is empty.
+  bool empty() const { return LiveRegs.empty(); }
+
+  /// Adds a physical register and all its sub-registers to the set.
+  void addReg(unsigned Reg) {
+    assert(TRI && "LivePhysRegs is not initialized.");
+    assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
+    for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+         SubRegs.isValid(); ++SubRegs)
+      LiveRegs.insert(*SubRegs);
+  }
+
+  /// \brief Removes a physical register, all its sub-registers, and all its
+  /// super-registers from the set.
+  void removeReg(unsigned Reg) {
+    assert(TRI && "LivePhysRegs is not initialized.");
+    assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
+    for (MCRegAliasIterator R(Reg, TRI, true); R.isValid(); ++R)
+      LiveRegs.erase(*R);
+  }
+
+  /// Removes physical registers clobbered by the regmask operand \p MO.
+  void removeRegsInMask(const MachineOperand &MO,
+        SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
+        nullptr);
+
+  /// \brief Returns true if register \p Reg is contained in the set. This also
+  /// works if only the super register of \p Reg has been defined, because
+  /// addReg() always adds all sub-registers to the set as well.
+  /// Note: Returns false if just some sub registers are live, use available()
+  /// when searching a free register.
+  bool contains(unsigned Reg) const { return LiveRegs.count(Reg); }
+
+  /// Returns true if register \p Reg and no aliasing register is in the set.
+  bool available(const MachineRegisterInfo &MRI, unsigned Reg) const;
+
+  /// Remove defined registers and regmask kills from the set.
+  void removeDefs(const MachineInstr &MI);
+
+  /// Add uses to the set.
+  void addUses(const MachineInstr &MI);
+
+  /// Simulates liveness when stepping backwards over an instruction(bundle).
+  /// Remove Defs, add uses. This is the recommended way of calculating
+  /// liveness.
+  void stepBackward(const MachineInstr &MI);
+
+  /// Simulates liveness when stepping forward over an instruction(bundle).
+  /// Remove killed-uses, add defs. This is the not recommended way, because it
+  /// depends on accurate kill flags. If possible use stepBackward() instead of
+  /// this function. The clobbers set will be the list of registers either
+  /// defined or clobbered by a regmask.  The operand will identify whether this
+  /// is a regmask or register operand.
+  void stepForward(const MachineInstr &MI,
+        SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
+
+  /// Adds all live-in registers of basic block \p MBB.
+  /// Live in registers are the registers in the blocks live-in list and the
+  /// pristine registers.
+  void addLiveIns(const MachineBasicBlock &MBB);
+
+  /// Adds all live-out registers of basic block \p MBB.
+  /// Live out registers are the union of the live-in registers of the successor
+  /// blocks and pristine registers. Live out registers of the end block are the
+  /// callee saved registers.
+  void addLiveOuts(const MachineBasicBlock &MBB);
+
+  /// Adds all live-out registers of basic block \p MBB but skips pristine
+  /// registers.
+  void addLiveOutsNoPristines(const MachineBasicBlock &MBB);
+
+  using const_iterator = SparseSet<unsigned>::const_iterator;
+
+  const_iterator begin() const { return LiveRegs.begin(); }
+  const_iterator end() const { return LiveRegs.end(); }
+
+  /// Prints the currently live registers to \p OS.
+  void print(raw_ostream &OS) const;
+
+  /// Dumps the currently live registers to the debug output.
+  void dump() const;
+
+private:
+  /// \brief Adds live-in registers from basic block \p MBB, taking associated
+  /// lane masks into consideration.
+  void addBlockLiveIns(const MachineBasicBlock &MBB);
+
+  /// Adds pristine registers. Pristine registers are callee saved registers
+  /// that are unused in the function.
+  void addPristines(const MachineFunction &MF);
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
+  LR.print(OS);
+  return OS;
+}
+
+/// \brief Computes registers live-in to \p MBB assuming all of its successors
+/// live-in lists are up-to-date. Puts the result into the given LivePhysReg
+/// instance \p LiveRegs.
+void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB);
+
+/// Recomputes dead and kill flags in \p MBB.
+void recomputeLivenessFlags(MachineBasicBlock &MBB);
+
+/// Adds registers contained in \p LiveRegs to the block live-in list of \p MBB.
+/// Does not add reserved registers.
+void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs);
+
+/// Convenience function combining computeLiveIns() and addLiveIns().
+void computeAndAddLiveIns(LivePhysRegs &LiveRegs,
+                          MachineBasicBlock &MBB);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEPHYSREGS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h b/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
new file mode 100644
index 0000000..82b1f0b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
@@ -0,0 +1,258 @@
+//===- LiveRangeEdit.h - Basic tools for split and spill --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRangeEdit class represents changes done to a virtual register when it
+// is spilled or split.
+//
+// The parent register is never changed. Instead, a number of new virtual
+// registers are created and added to the newRegs vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
+#define LLVM_CODEGEN_LIVERANGEEDIT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineInstr;
+class MachineLoopInfo;
+class MachineOperand;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+class VirtRegMap;
+
+class LiveRangeEdit : private MachineRegisterInfo::Delegate {
+public:
+  /// Callback methods for LiveRangeEdit owners.
+  class Delegate {
+    virtual void anchor();
+
+  public:
+    virtual ~Delegate() = default;
+
+    /// Called immediately before erasing a dead machine instruction.
+    virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
+
+    /// Called when a virtual register is no longer used. Return false to defer
+    /// its deletion from LiveIntervals.
+    virtual bool LRE_CanEraseVirtReg(unsigned) { return true; }
+
+    /// Called before shrinking the live range of a virtual register.
+    virtual void LRE_WillShrinkVirtReg(unsigned) {}
+
+    /// Called after cloning a virtual register.
+    /// This is used for new registers representing connected components of Old.
+    virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
+  };
+
+private:
+  LiveInterval *Parent;
+  SmallVectorImpl<unsigned> &NewRegs;
+  MachineRegisterInfo &MRI;
+  LiveIntervals &LIS;
+  VirtRegMap *VRM;
+  const TargetInstrInfo &TII;
+  Delegate *const TheDelegate;
+
+  /// FirstNew - Index of the first register added to NewRegs.
+  const unsigned FirstNew;
+
+  /// ScannedRemattable - true when remattable values have been identified.
+  bool ScannedRemattable = false;
+
+  /// DeadRemats - The saved instructions which have already been dead after
+  /// rematerialization but not deleted yet -- to be done in postOptimization.
+  SmallPtrSet<MachineInstr *, 32> *DeadRemats;
+
+  /// Remattable - Values defined by remattable instructions as identified by
+  /// tii.isTriviallyReMaterializable().
+  SmallPtrSet<const VNInfo *, 4> Remattable;
+
+  /// Rematted - Values that were actually rematted, and so need to have their
+  /// live range trimmed or entirely removed.
+  SmallPtrSet<const VNInfo *, 4> Rematted;
+
+  /// scanRemattable - Identify the Parent values that may rematerialize.
+  void scanRemattable(AliasAnalysis *aa);
+
+  /// allUsesAvailableAt - Return true if all registers used by OrigMI at
+  /// OrigIdx are also available with the same value at UseIdx.
+  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+                          SlotIndex UseIdx) const;
+
+  /// foldAsLoad - If LI has a single use and a single def that can be folded as
+  /// a load, eliminate the register by folding the def into the use.
+  bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead);
+
+  using ToShrinkSet = SetVector<LiveInterval *, SmallVector<LiveInterval *, 8>,
+                                SmallPtrSet<LiveInterval *, 8>>;
+
+  /// Helper for eliminateDeadDefs.
+  void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
+                        AliasAnalysis *AA);
+
+  /// MachineRegisterInfo callback to notify when new virtual
+  /// registers are created.
+  void MRI_NoteNewVirtualRegister(unsigned VReg) override;
+
+  /// \brief Check if MachineOperand \p MO is a last use/kill either in the
+  /// main live range of \p LI or in one of the matching subregister ranges.
+  bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;
+
+  /// Create a new empty interval based on OldReg.
+  LiveInterval &createEmptyIntervalFrom(unsigned OldReg, bool createSubRanges);
+
+public:
+  /// Create a LiveRangeEdit for breaking down parent into smaller pieces.
+  /// @param parent The register being spilled or split.
+  /// @param newRegs List to receive any new registers created. This needn't be
+  ///                empty initially, any existing registers are ignored.
+  /// @param MF The MachineFunction the live range edit is taking place in.
+  /// @param lis The collection of all live intervals in this function.
+  /// @param vrm Map of virtual registers to physical registers for this
+  ///            function.  If NULL, no virtual register map updates will
+  ///            be done.  This could be the case if called before Regalloc.
+  /// @param deadRemats The collection of all the instructions defining an
+  ///                   original reg and are dead after remat.
+  LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,
+                MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
+                Delegate *delegate = nullptr,
+                SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
+      : Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
+        VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),
+        FirstNew(newRegs.size()), DeadRemats(deadRemats) {
+    MRI.setDelegate(this);
+  }
+
+  ~LiveRangeEdit() override { MRI.resetDelegate(this); }
+
+  LiveInterval &getParent() const {
+    assert(Parent && "No parent LiveInterval");
+    return *Parent;
+  }
+
+  unsigned getReg() const { return getParent().reg; }
+
+  /// Iterator for accessing the new registers added by this edit.
+  using iterator = SmallVectorImpl<unsigned>::const_iterator;
+  iterator begin() const { return NewRegs.begin() + FirstNew; }
+  iterator end() const { return NewRegs.end(); }
+  unsigned size() const { return NewRegs.size() - FirstNew; }
+  bool empty() const { return size() == 0; }
+  unsigned get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
+
+  /// pop_back - It allows LiveRangeEdit users to drop new registers.
+  /// The context is when an original def instruction of a register is
+  /// dead after rematerialization, we still want to keep it for following
+  /// rematerializations. We save the def instruction in DeadRemats,
+  /// and replace the original dst register with a new dummy register so
+  /// the live range of original dst register can be shrinked normally.
+  /// We don't want to allocate phys register for the dummy register, so
+  /// we want to drop it from the NewRegs set.
+  void pop_back() { NewRegs.pop_back(); }
+
+  ArrayRef<unsigned> regs() const {
+    return makeArrayRef(NewRegs).slice(FirstNew);
+  }
+
+  /// createFrom - Create a new virtual register based on OldReg.
+  unsigned createFrom(unsigned OldReg);
+
+  /// create - Create a new register with the same class and original slot as
+  /// parent.
+  LiveInterval &createEmptyInterval() {
+    return createEmptyIntervalFrom(getReg(), true);
+  }
+
+  unsigned create() { return createFrom(getReg()); }
+
+  /// anyRematerializable - Return true if any parent values may be
+  /// rematerializable.
+  /// This function must be called before any rematerialization is attempted.
+  bool anyRematerializable(AliasAnalysis *);
+
+  /// checkRematerializable - Manually add VNI to the list of rematerializable
+  /// values if DefMI may be rematerializable.
+  bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
+                             AliasAnalysis *);
+
+  /// Remat - Information needed to rematerialize at a specific location.
+  struct Remat {
+    VNInfo *ParentVNI;              // parent_'s value at the remat location.
+    MachineInstr *OrigMI = nullptr; // Instruction defining OrigVNI. It contains
+                                    // the real expr for remat.
+
+    explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
+  };
+
+  /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
+  /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
+  /// When cheapAsAMove is set, only cheap remats are allowed.
+  bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx,
+                          bool cheapAsAMove);
+
+  /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
+  /// instruction into MBB before MI. The new instruction is mapped, but
+  /// liveness is not updated.
+  /// Return the SlotIndex of the new instruction.
+  SlotIndex rematerializeAt(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator MI, unsigned DestReg,
+                            const Remat &RM, const TargetRegisterInfo &,
+                            bool Late = false);
+
+  /// markRematerialized - explicitly mark a value as rematerialized after doing
+  /// it manually.
+  void markRematerialized(const VNInfo *ParentVNI) {
+    Rematted.insert(ParentVNI);
+  }
+
+  /// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
+  bool didRematerialize(const VNInfo *ParentVNI) const {
+    return Rematted.count(ParentVNI);
+  }
+
+  /// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
+  /// to erase it from LIS.
+  void eraseVirtReg(unsigned Reg);
+
+  /// eliminateDeadDefs - Try to delete machine instructions that are now dead
+  /// (allDefsAreDead returns true). This may cause live intervals to be trimmed
+  /// and further dead efs to be eliminated.
+  /// RegsBeingSpilled lists registers currently being spilled by the register
+  /// allocator.  These registers should not be split into new intervals
+  /// as currently those new intervals are not guaranteed to spill.
+  void eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,
+                         ArrayRef<unsigned> RegsBeingSpilled = None,
+                         AliasAnalysis *AA = nullptr);
+
+  /// calculateRegClassAndHint - Recompute register class and hint for each new
+  /// register.
+  void calculateRegClassAndHint(MachineFunction &, const MachineLoopInfo &,
+                                const MachineBlockFrequencyInfo &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVERANGEEDIT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRegMatrix.h b/linux-x64/clang/include/llvm/CodeGen/LiveRegMatrix.h
new file mode 100644
index 0000000..f62a55c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRegMatrix.h
@@ -0,0 +1,160 @@
+//===- LiveRegMatrix.h - Track register interference ----------*- C++ -*---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRegMatrix analysis pass keeps track of virtual register interference
+// along two dimensions: Slot indexes and register units. The matrix is used by
+// register allocators to ensure that no interfering virtual registers get
+// assigned to overlapping physical registers.
+//
+// Register units are defined in MCRegisterInfo.h, they represent the smallest
+// unit of interference when dealing with overlapping physical registers. The
+// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
+// a virtual register is assigned to a physical register, the live range for
+// the virtual register is inserted into the LiveIntervalUnion for each regunit
+// in the physreg.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
+#define LLVM_CODEGEN_LIVEREGMATRIX_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/LiveIntervalUnion.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include <memory>
+
+namespace llvm {
+
+class AnalysisUsage;
+class LiveInterval;
+class LiveIntervals;
+class MachineFunction;
+class TargetRegisterInfo;
+class VirtRegMap;
+
+class LiveRegMatrix : public MachineFunctionPass {
+  const TargetRegisterInfo *TRI;
+  LiveIntervals *LIS;
+  VirtRegMap *VRM;
+
+  // UserTag changes whenever virtual registers have been modified.
+  unsigned UserTag = 0;
+
+  // The matrix is represented as a LiveIntervalUnion per register unit.
+  LiveIntervalUnion::Allocator LIUAlloc;
+  LiveIntervalUnion::Array Matrix;
+
+  // Cached queries per register unit.
+  std::unique_ptr<LiveIntervalUnion::Query[]> Queries;
+
+  // Cached register mask interference info.
+  unsigned RegMaskTag = 0;
+  unsigned RegMaskVirtReg = 0;
+  BitVector RegMaskUsable;
+
+  // MachineFunctionPass boilerplate.
+  void getAnalysisUsage(AnalysisUsage &) const override;
+  bool runOnMachineFunction(MachineFunction &) override;
+  void releaseMemory() override;
+
+public:
+  static char ID;
+
+  LiveRegMatrix();
+
+  //===--------------------------------------------------------------------===//
+  // High-level interface.
+  //===--------------------------------------------------------------------===//
+  //
+  // Check for interference before assigning virtual registers to physical
+  // registers.
+  //
+
+  /// Invalidate cached interference queries after modifying virtual register
+  /// live ranges. Interference checks may return stale information unless
+  /// caches are invalidated.
+  void invalidateVirtRegs() { ++UserTag; }
+
+  enum InterferenceKind {
+    /// No interference, go ahead and assign.
+    IK_Free = 0,
+
+    /// Virtual register interference. There are interfering virtual registers
+    /// assigned to PhysReg or its aliases. This interference could be resolved
+    /// by unassigning those other virtual registers.
+    IK_VirtReg,
+
+    /// Register unit interference. A fixed live range is in the way, typically
+    /// argument registers for a call. This can't be resolved by unassigning
+    /// other virtual registers.
+    IK_RegUnit,
+
+    /// RegMask interference. The live range is crossing an instruction with a
+    /// regmask operand that doesn't preserve PhysReg. This typically means
+    /// VirtReg is live across a call, and PhysReg isn't call-preserved.
+    IK_RegMask
+  };
+
+  /// Check for interference before assigning VirtReg to PhysReg.
+  /// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
+  /// When there is more than one kind of interference, the InterferenceKind
+  /// with the highest enum value is returned.
+  InterferenceKind checkInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+  /// Check for interference in the segment [Start, End) that may prevent
+  /// assignment to PhysReg. If this function returns true, there is
+  /// interference in the segment [Start, End) of some other interval already
+  /// assigned to PhysReg. If this function returns false, PhysReg is free at
+  /// the segment [Start, End).
+  bool checkInterference(SlotIndex Start, SlotIndex End, unsigned PhysReg);
+
+  /// Assign VirtReg to PhysReg.
+  /// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
+  /// update VirtRegMap. The live range is expected to be available in PhysReg.
+  void assign(LiveInterval &VirtReg, unsigned PhysReg);
+
+  /// Unassign VirtReg from its PhysReg.
+  /// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
+  /// the assignment and updates VirtRegMap accordingly.
+  void unassign(LiveInterval &VirtReg);
+
+  /// Returns true if the given \p PhysReg has any live intervals assigned.
+  bool isPhysRegUsed(unsigned PhysReg) const;
+
+  //===--------------------------------------------------------------------===//
+  // Low-level interface.
+  //===--------------------------------------------------------------------===//
+  //
+  // Provide access to the underlying LiveIntervalUnions.
+  //
+
+  /// Check for regmask interference only.
+  /// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
+  /// If PhysReg is null, check if VirtReg crosses any regmask operands.
+  bool checkRegMaskInterference(LiveInterval &VirtReg, unsigned PhysReg = 0);
+
+  /// Check for regunit interference only.
+  /// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
+  /// register units.
+  bool checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+  /// Query a line of the assigned virtual register matrix directly.
+  /// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
+  /// This returns a reference to an internal Query data structure that is only
+  /// valid until the next query() call.
+  LiveIntervalUnion::Query &query(const LiveRange &LR, unsigned RegUnit);
+
+  /// Directly access the live interval unions per regunit.
+  /// This returns an array indexed by the regunit number.
+  LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h b/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
new file mode 100644
index 0000000..dc4956d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
@@ -0,0 +1,135 @@
+//===- llvm/CodeGen/LiveRegUnits.h - Register Unit Set ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// A set of register units. It is intended for register liveness tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEREGUNITS_H
+#define LLVM_CODEGEN_LIVEREGUNITS_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include <cstdint>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineBasicBlock;
+
+/// A set of register units used to track register liveness.
+class LiveRegUnits {
+  const TargetRegisterInfo *TRI = nullptr;
+  BitVector Units;
+
+public:
+  /// Constructs a new empty LiveRegUnits set.
+  LiveRegUnits() = default;
+
+  /// Constructs and initialize an empty LiveRegUnits set.
+  LiveRegUnits(const TargetRegisterInfo &TRI) {
+    init(TRI);
+  }
+
+  /// Initialize and clear the set.
+  void init(const TargetRegisterInfo &TRI) {
+    this->TRI = &TRI;
+    Units.reset();
+    Units.resize(TRI.getNumRegUnits());
+  }
+
+  /// Clears the set.
+  void clear() { Units.reset(); }
+
+  /// Returns true if the set is empty.
+  bool empty() const { return Units.none(); }
+
+  /// Adds register units covered by physical register \p Reg.
+  void addReg(unsigned Reg) {
+    for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit)
+      Units.set(*Unit);
+  }
+
+  /// \brief Adds register units covered by physical register \p Reg that are
+  /// part of the lanemask \p Mask.
+  void addRegMasked(unsigned Reg, LaneBitmask Mask) {
+    for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
+      LaneBitmask UnitMask = (*Unit).second;
+      if (UnitMask.none() || (UnitMask & Mask).any())
+        Units.set((*Unit).first);
+    }
+  }
+
+  /// Removes all register units covered by physical register \p Reg.
+  void removeReg(unsigned Reg) {
+    for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit)
+      Units.reset(*Unit);
+  }
+
+  /// Removes register units not preserved by the regmask \p RegMask.
+  /// The regmask has the same format as the one in the RegMask machine operand.
+  void removeRegsNotPreserved(const uint32_t *RegMask);
+
+  /// Adds register units not preserved by the regmask \p RegMask.
+  /// The regmask has the same format as the one in the RegMask machine operand.
+  void addRegsInMask(const uint32_t *RegMask);
+
+  /// Returns true if no part of physical register \p Reg is live.
+  bool available(unsigned Reg) const {
+    for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
+      if (Units.test(*Unit))
+        return false;
+    }
+    return true;
+  }
+
+  /// Updates liveness when stepping backwards over the instruction \p MI.
+  /// This removes all register units defined or clobbered in \p MI and then
+  /// adds the units used (as in use operands) in \p MI.
+  void stepBackward(const MachineInstr &MI);
+
+  /// Adds all register units used, defined or clobbered in \p MI.
+  /// This is useful when walking over a range of instruction to find registers
+  /// unused over the whole range.
+  void accumulate(const MachineInstr &MI);
+
+  /// Adds registers living out of block \p MBB.
+  /// Live out registers are the union of the live-in registers of the successor
+  /// blocks and pristine registers. Live out registers of the end block are the
+  /// callee saved registers.
+  void addLiveOuts(const MachineBasicBlock &MBB);
+
+  /// Adds registers living into block \p MBB.
+  void addLiveIns(const MachineBasicBlock &MBB);
+
+  /// Adds all register units marked in the bitvector \p RegUnits.
+  void addUnits(const BitVector &RegUnits) {
+    Units |= RegUnits;
+  }
+  /// Removes all register units marked in the bitvector \p RegUnits.
+  void removeUnits(const BitVector &RegUnits) {
+    Units.reset(RegUnits);
+  }
+  /// Return the internal bitvector representation of the set.
+  const BitVector &getBitVector() const {
+    return Units;
+  }
+
+private:
+  /// Adds pristine registers. Pristine registers are callee saved registers
+  /// that are unused in the function.
+  void addPristines(const MachineFunction &MF);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEREGUNITS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveStacks.h b/linux-x64/clang/include/llvm/CodeGen/LiveStacks.h
new file mode 100644
index 0000000..44ed785
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveStacks.h
@@ -0,0 +1,103 @@
+//===- LiveStacks.h - Live Stack Slot Analysis ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the live stack slot analysis pass. It is analogous to
+// live interval analysis except it's analyzing liveness of stack slots rather
+// than registers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVESTACKS_H
+#define LLVM_CODEGEN_LIVESTACKS_H
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Pass.h"
+#include <cassert>
+#include <map>
+#include <unordered_map>
+
+namespace llvm {
+
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+class LiveStacks : public MachineFunctionPass {
+  const TargetRegisterInfo *TRI;
+
+  /// Special pool allocator for VNInfo's (LiveInterval val#).
+  ///
+  VNInfo::Allocator VNInfoAllocator;
+
+  /// S2IMap - Stack slot indices to live interval mapping.
+  using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
+  SS2IntervalMap S2IMap;
+
+  /// S2RCMap - Stack slot indices to register class mapping.
+  std::map<int, const TargetRegisterClass *> S2RCMap;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  LiveStacks() : MachineFunctionPass(ID) {
+    initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+  }
+
+  using iterator = SS2IntervalMap::iterator;
+  using const_iterator = SS2IntervalMap::const_iterator;
+
+  const_iterator begin() const { return S2IMap.begin(); }
+  const_iterator end() const { return S2IMap.end(); }
+  iterator begin() { return S2IMap.begin(); }
+  iterator end() { return S2IMap.end(); }
+
+  unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
+
+  LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC);
+
+  LiveInterval &getInterval(int Slot) {
+    assert(Slot >= 0 && "Spill slot indice must be >= 0");
+    SS2IntervalMap::iterator I = S2IMap.find(Slot);
+    assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+    return I->second;
+  }
+
+  const LiveInterval &getInterval(int Slot) const {
+    assert(Slot >= 0 && "Spill slot indice must be >= 0");
+    SS2IntervalMap::const_iterator I = S2IMap.find(Slot);
+    assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+    return I->second;
+  }
+
+  bool hasInterval(int Slot) const { return S2IMap.count(Slot); }
+
+  const TargetRegisterClass *getIntervalRegClass(int Slot) const {
+    assert(Slot >= 0 && "Spill slot indice must be >= 0");
+    std::map<int, const TargetRegisterClass *>::const_iterator I =
+        S2RCMap.find(Slot);
+    assert(I != S2RCMap.end() &&
+           "Register class info does not exist for stack slot");
+    return I->second;
+  }
+
+  VNInfo::Allocator &getVNInfoAllocator() { return VNInfoAllocator; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void releaseMemory() override;
+
+  /// runOnMachineFunction - pass entry point
+  bool runOnMachineFunction(MachineFunction &) override;
+
+  /// print - Implement the dump method.
+  void print(raw_ostream &O, const Module * = nullptr) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveVariables.h b/linux-x64/clang/include/llvm/CodeGen/LiveVariables.h
new file mode 100644
index 0000000..ed8da86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveVariables.h
@@ -0,0 +1,309 @@
+//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveVariables analysis pass.  For each machine
+// instruction in the function, this pass calculates the set of registers that
+// are immediately dead after the instruction (i.e., the instruction calculates
+// the value, but it is never used) and the set of registers that are used by
+// the instruction, but are never used after the instruction (i.e., they are
+// killed).
+//
+// This class computes live variables using a sparse implementation based on
+// the machine code SSA form.  This class computes live variable information for
+// each virtual and _register allocatable_ physical register in a function.  It
+// uses the dominance properties of SSA form to efficiently compute live
+// variables for virtual registers, and assumes that physical registers are only
+// live within a single basic block (allowing it to do a single local analysis
+// to resolve physical register lifetimes in each basic block).  If a physical
+// register is not register allocatable, it is not tracked.  This is useful for
+// things like the stack pointer and condition codes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
+#define LLVM_CODEGEN_LIVEVARIABLES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineRegisterInfo;
+
+class LiveVariables : public MachineFunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+  LiveVariables() : MachineFunctionPass(ID) {
+    initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
+  }
+
+  /// VarInfo - This represents the regions where a virtual register is live in
+  /// the program.  We represent this with three different pieces of
+  /// information: the set of blocks in which the instruction is live
+  /// throughout, the set of blocks in which the instruction is actually used,
+  /// and the set of non-phi instructions that are the last users of the value.
+  ///
+  /// In the common case where a value is defined and killed in the same block,
+  /// There is one killing instruction, and AliveBlocks is empty.
+  ///
+  /// Otherwise, the value is live out of the block.  If the value is live
+  /// throughout any blocks, these blocks are listed in AliveBlocks.  Blocks
+  /// where the liveness range ends are not included in AliveBlocks, instead
+  /// being captured by the Kills set.  In these blocks, the value is live into
+  /// the block (unless the value is defined and killed in the same block) and
+  /// lives until the specified instruction.  Note that there cannot ever be a
+  /// value whose Kills set contains two instructions from the same basic block.
+  ///
+  /// PHI nodes complicate things a bit.  If a PHI node is the last user of a
+  /// value in one of its predecessor blocks, it is not listed in the kills set,
+  /// but does include the predecessor block in the AliveBlocks set (unless that
+  /// block also defines the value).  This leads to the (perfectly sensical)
+  /// situation where a value is defined in a block, and the last use is a phi
+  /// node in the successor.  In this case, AliveBlocks is empty (the value is
+  /// not live across any  blocks) and Kills is empty (phi nodes are not
+  /// included). This is sensical because the value must be live to the end of
+  /// the block, but is not live in any successor blocks.
+  struct VarInfo {
+    /// AliveBlocks - Set of blocks in which this value is alive completely
+    /// through.  This is a bit set which uses the basic block number as an
+    /// index.
+    ///
+    SparseBitVector<> AliveBlocks;
+
+    /// Kills - List of MachineInstruction's which are the last use of this
+    /// virtual register (kill it) in their basic block.
+    ///
+    std::vector<MachineInstr*> Kills;
+
+    /// removeKill - Delete a kill corresponding to the specified
+    /// machine instruction. Returns true if there was a kill
+    /// corresponding to this instruction, false otherwise.
+    bool removeKill(MachineInstr &MI) {
+      std::vector<MachineInstr *>::iterator I = find(Kills, &MI);
+      if (I == Kills.end())
+        return false;
+      Kills.erase(I);
+      return true;
+    }
+
+    /// findKill - Find a kill instruction in MBB. Return NULL if none is found.
+    MachineInstr *findKill(const MachineBasicBlock *MBB) const;
+
+    /// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
+    /// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
+    /// MBB, it is not considered live in.
+    bool isLiveIn(const MachineBasicBlock &MBB,
+                  unsigned Reg,
+                  MachineRegisterInfo &MRI);
+
+    void dump() const;
+  };
+
+private:
+  /// VirtRegInfo - This list is a mapping from virtual register number to
+  /// variable information.
+  ///
+  IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;
+
+  /// PHIJoins - list of virtual registers that are PHI joins. These registers
+  /// may have multiple definitions, and they require special handling when
+  /// building live intervals.
+  SparseBitVector<> PHIJoins;
+
+private:   // Intermediate data structures
+  MachineFunction *MF;
+
+  MachineRegisterInfo* MRI;
+
+  const TargetRegisterInfo *TRI;
+
+  // PhysRegInfo - Keep track of which instruction was the last def of a
+  // physical register. This is a purely local property, because all physical
+  // register references are presumed dead across basic blocks.
+  std::vector<MachineInstr *> PhysRegDef;
+
+  // PhysRegInfo - Keep track of which instruction was the last use of a
+  // physical register. This is a purely local property, because all physical
+  // register references are presumed dead across basic blocks.
+  std::vector<MachineInstr *> PhysRegUse;
+
+  std::vector<SmallVector<unsigned, 4>> PHIVarInfo;
+
+  // DistanceMap - Keep track the distance of a MI from the start of the
+  // current basic block.
+  DenseMap<MachineInstr*, unsigned> DistanceMap;
+
+  /// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
+  /// uses. Pay special attention to the sub-register uses which may come below
+  /// the last use of the whole register.
+  bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
+
+  /// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
+  void HandleRegMask(const MachineOperand&);
+
+  void HandlePhysRegUse(unsigned Reg, MachineInstr &MI);
+  void HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
+                        SmallVectorImpl<unsigned> &Defs);
+  void UpdatePhysRegDefs(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
+
+  /// FindLastRefOrPartRef - Return the last reference or partial reference of
+  /// the specified register.
+  MachineInstr *FindLastRefOrPartRef(unsigned Reg);
+
+  /// FindLastPartialDef - Return the last partial def of the specified
+  /// register. Also returns the sub-registers that're defined by the
+  /// instruction.
+  MachineInstr *FindLastPartialDef(unsigned Reg,
+                                   SmallSet<unsigned,4> &PartDefRegs);
+
+  /// analyzePHINodes - Gather information about the PHI nodes in here. In
+  /// particular, we want to map the variable information of a virtual
+  /// register which is used in a PHI node. We map that to the BB the vreg
+  /// is coming from.
+  void analyzePHINodes(const MachineFunction& Fn);
+
+  void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
+
+  void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
+public:
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  /// RegisterDefIsDead - Return true if the specified instruction defines the
+  /// specified register, but that definition is dead.
+  bool RegisterDefIsDead(MachineInstr &MI, unsigned Reg) const;
+
+  //===--------------------------------------------------------------------===//
+  //  API to update live variable information
+
+  /// replaceKillInstruction - Update register kill info by replacing a kill
+  /// instruction with a new one.
+  void replaceKillInstruction(unsigned Reg, MachineInstr &OldMI,
+                              MachineInstr &NewMI);
+
+  /// addVirtualRegisterKilled - Add information about the fact that the
+  /// specified register is killed after being used by the specified
+  /// instruction. If AddIfNotFound is true, add a implicit operand if it's
+  /// not found.
+  void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr &MI,
+                                bool AddIfNotFound = false) {
+    if (MI.addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
+      getVarInfo(IncomingReg).Kills.push_back(&MI);
+  }
+
+  /// removeVirtualRegisterKilled - Remove the specified kill of the virtual
+  /// register from the live variable information. Returns true if the
+  /// variable was marked as killed by the specified instruction,
+  /// false otherwise.
+  bool removeVirtualRegisterKilled(unsigned reg, MachineInstr &MI) {
+    if (!getVarInfo(reg).removeKill(MI))
+      return false;
+
+    bool Removed = false;
+    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+      MachineOperand &MO = MI.getOperand(i);
+      if (MO.isReg() && MO.isKill() && MO.getReg() == reg) {
+        MO.setIsKill(false);
+        Removed = true;
+        break;
+      }
+    }
+
+    assert(Removed && "Register is not used by this instruction!");
+    (void)Removed;
+    return true;
+  }
+
+  /// removeVirtualRegistersKilled - Remove all killed info for the specified
+  /// instruction.
+  void removeVirtualRegistersKilled(MachineInstr &MI);
+
+  /// addVirtualRegisterDead - Add information about the fact that the specified
+  /// register is dead after being used by the specified instruction. If
+  /// AddIfNotFound is true, add a implicit operand if it's not found.
+  void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr &MI,
+                              bool AddIfNotFound = false) {
+    if (MI.addRegisterDead(IncomingReg, TRI, AddIfNotFound))
+      getVarInfo(IncomingReg).Kills.push_back(&MI);
+  }
+
+  /// removeVirtualRegisterDead - Remove the specified kill of the virtual
+  /// register from the live variable information. Returns true if the
+  /// variable was marked dead at the specified instruction, false
+  /// otherwise.
+  bool removeVirtualRegisterDead(unsigned reg, MachineInstr &MI) {
+    if (!getVarInfo(reg).removeKill(MI))
+      return false;
+
+    bool Removed = false;
+    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+      MachineOperand &MO = MI.getOperand(i);
+      if (MO.isReg() && MO.isDef() && MO.getReg() == reg) {
+        MO.setIsDead(false);
+        Removed = true;
+        break;
+      }
+    }
+    assert(Removed && "Register is not defined by this instruction!");
+    (void)Removed;
+    return true;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  void releaseMemory() override {
+    VirtRegInfo.clear();
+  }
+
+  /// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
+  /// register.
+  VarInfo &getVarInfo(unsigned RegIdx);
+
+  void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+                               MachineBasicBlock *BB);
+  void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+                               MachineBasicBlock *BB,
+                               std::vector<MachineBasicBlock*> &WorkList);
+  void HandleVirtRegDef(unsigned reg, MachineInstr &MI);
+  void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB, MachineInstr &MI);
+
+  bool isLiveIn(unsigned Reg, const MachineBasicBlock &MBB) {
+    return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
+  }
+
+  /// isLiveOut - Determine if Reg is live out from MBB, when not considering
+  /// PHI nodes. This means that Reg is either killed by a successor block or
+  /// passed through one.
+  bool isLiveOut(unsigned Reg, const MachineBasicBlock &MBB);
+
+  /// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
+  /// variables that are live out of DomBB and live into SuccBB will be marked
+  /// as passing live through BB. This method assumes that the machine code is
+  /// still in SSA form.
+  void addNewBlock(MachineBasicBlock *BB,
+                   MachineBasicBlock *DomBB,
+                   MachineBasicBlock *SuccBB);
+
+  /// isPHIJoin - Return true if Reg is a phi join register.
+  bool isPHIJoin(unsigned Reg) { return PHIJoins.test(Reg); }
+
+  /// setPHIJoin - Mark Reg as a phi join register.
+  void setPHIJoin(unsigned Reg) { PHIJoins.set(Reg); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h b/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
new file mode 100644
index 0000000..a816f6d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
@@ -0,0 +1,116 @@
+//==------ llvm/CodeGen/LoopTraversal.h - Loop Traversal -*- C++ -*---------==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Loop Traversal logic.
+///
+/// This class provides the basic blocks traversal order used by passes like
+/// ReachingDefAnalysis and ExecutionDomainFix.
+/// It identifies basic blocks that are part of loops and should to be visited
+/// twice and returns efficient traversal order for all the blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LOOPTRAVERSAL_H
+#define LLVM_CODEGEN_LOOPTRAVERSAL_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+
+/// This class provides the basic blocks traversal order used by passes like
+/// ReachingDefAnalysis and ExecutionDomainFix.
+/// It identifies basic blocks that are part of loops and should to be visited
+/// twice and returns efficient traversal order for all the blocks.
+///
+/// We want to visit every instruction in every basic block in order to update
+/// it's execution domain or collect clearance information. However, for the
+/// clearance calculation, we need to know clearances from all predecessors
+/// (including any backedges), therfore we need to visit some blocks twice.
+/// As an example, consider the following loop.
+///
+///
+///    PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
+///          ^                                  |
+///          +----------------------------------+
+///
+/// The iteration order this pass will return is as follows:
+/// Optimized: PH A B C A' B' C' D
+///
+/// The basic block order is constructed as follows:
+/// Once we finish processing some block, we update the counters in MBBInfos
+/// and re-process any successors that are now 'done'.
+/// We call a block that is ready for its final round of processing `done`
+/// (isBlockDone), e.g. when all predecessor information is known.
+///
+/// Note that a naive traversal order would be to do two complete passes over
+/// all basic blocks/instructions, the first for recording clearances, the
+/// second for updating clearance based on backedges.
+/// However, for functions without backedges, or functions with a lot of
+/// straight-line code, and a small loop, that would be a lot of unnecessary
+/// work (since only the BBs that are part of the loop require two passes).
+///
+/// E.g., the naive iteration order for the above exmple is as follows:
+/// Naive: PH A B C D A' B' C' D'
+///
+/// In the optimized approach we avoid processing D twice, because we
+/// can entirely process the predecessors before getting to D.
+class LoopTraversal {
+private:
+  struct MBBInfo {
+    /// Whether we have gotten to this block in primary processing yet.
+    bool PrimaryCompleted = false;
+
+    /// The number of predecessors for which primary processing has completed
+    unsigned IncomingProcessed = 0;
+
+    /// The value of `IncomingProcessed` at the start of primary processing
+    unsigned PrimaryIncoming = 0;
+
+    /// The number of predecessors for which all processing steps are done.
+    unsigned IncomingCompleted = 0;
+
+    MBBInfo() = default;
+  };
+  using MBBInfoMap = SmallVector<MBBInfo, 4>;
+  /// Helps keep track if we proccessed this block and all its predecessors.
+  MBBInfoMap MBBInfos;
+
+public:
+  struct TraversedMBBInfo {
+    /// The basic block.
+    MachineBasicBlock *MBB = nullptr;
+
+    /// True if this is the first time we process the basic block.
+    bool PrimaryPass = true;
+
+    /// True if the block that is ready for its final round of processing.
+    bool IsDone = true;
+
+    TraversedMBBInfo(MachineBasicBlock *BB = nullptr, bool Primary = true,
+                     bool Done = true)
+        : MBB(BB), PrimaryPass(Primary), IsDone(Done) {}
+  };
+  LoopTraversal() {}
+
+  /// \brief Identifies basic blocks that are part of loops and should to be
+  ///  visited twice and returns efficient traversal order for all the blocks.
+  typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder;
+  TraversalOrder traverse(MachineFunction &MF);
+
+private:
+  /// Returens true if the block is ready for its final round of processing.
+  bool isBlockDone(MachineBasicBlock *MBB);
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_LOOPTRAVERSAL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LowLevelType.h b/linux-x64/clang/include/llvm/CodeGen/LowLevelType.h
new file mode 100644
index 0000000..a3c5c93
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LowLevelType.h
@@ -0,0 +1,32 @@
+//== llvm/CodeGen/LowLevelType.h ------------------------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Implement a low-level type suitable for MachineInstr level instruction
+/// selection.
+///
+/// This provides the CodeGen aspects of LowLevelType, such as Type conversion.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LOWLEVELTYPE_H
+#define LLVM_CODEGEN_LOWLEVELTYPE_H
+
+#include "llvm/Support/LowLevelTypeImpl.h"
+
+namespace llvm {
+
+class DataLayout;
+class Type;
+
+/// Construct a low-level type based on an LLVM type.
+LLT getLLTForType(Type &Ty, const DataLayout &DL);
+
+}
+
+#endif // LLVM_CODEGEN_LOWLEVELTYPE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h b/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
new file mode 100644
index 0000000..b631a8c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
@@ -0,0 +1,81 @@
+//===- MIRParser.h - MIR serialization format parser ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This MIR serialization library is currently a work in progress. It can't
+// serialize machine functions at this time.
+//
+// This file declares the functions that parse the MIR serialization format
+// files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
+#define LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace llvm {
+
+class StringRef;
+class MIRParserImpl;
+class MachineModuleInfo;
+class SMDiagnostic;
+
+/// This class initializes machine functions by applying the state loaded from
+/// a MIR file.
+class MIRParser {
+  std::unique_ptr<MIRParserImpl> Impl;
+
+public:
+  MIRParser(std::unique_ptr<MIRParserImpl> Impl);
+  MIRParser(const MIRParser &) = delete;
+  ~MIRParser();
+
+  /// Parses the optional LLVM IR module in the MIR file.
+  ///
+  /// A new, empty module is created if the LLVM IR isn't present.
+  /// \returns nullptr if a parsing error occurred.
+  std::unique_ptr<Module> parseIRModule();
+
+  /// \brief Parses MachineFunctions in the MIR file and add them to the given
+  /// MachineModuleInfo \p MMI.
+  ///
+  /// \returns true if an error occurred.
+  bool parseMachineFunctions(Module &M, MachineModuleInfo &MMI);
+};
+
+/// This function is the main interface to the MIR serialization format parser.
+///
+/// It reads in a MIR file and returns a MIR parser that can parse the embedded
+/// LLVM IR module and initialize the machine functions by parsing the machine
+/// function's state.
+///
+/// \param Filename - The name of the file to parse.
+/// \param Error - Error result info.
+/// \param Context - Context which will be used for the parsed LLVM IR module.
+std::unique_ptr<MIRParser> createMIRParserFromFile(StringRef Filename,
+                                                   SMDiagnostic &Error,
+                                                   LLVMContext &Context);
+
+/// This function is another interface to the MIR serialization format parser.
+///
+/// It returns a MIR parser that works with the given memory buffer and that can
+/// parse the embedded LLVM IR module and initialize the machine functions by
+/// parsing the machine function's state.
+///
+/// \param Contents - The MemoryBuffer containing the machine level IR.
+/// \param Context - Context which will be used for the parsed LLVM IR module.
+std::unique_ptr<MIRParser>
+createMIRParser(std::unique_ptr<MemoryBuffer> Contents, LLVMContext &Context);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h b/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
new file mode 100644
index 0000000..c73adc3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
@@ -0,0 +1,46 @@
+//===- MIRPrinter.h - MIR serialization format printer --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the functions that print out the LLVM IR and the machine
+// functions using the MIR serialization format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_MIRPRINTER_H
+#define LLVM_LIB_CODEGEN_MIRPRINTER_H
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+class Module;
+class raw_ostream;
+template <typename T> class SmallVectorImpl;
+
+/// Print LLVM IR using the MIR serialization format to the given output stream.
+void printMIR(raw_ostream &OS, const Module &M);
+
+/// Print a machine function using the MIR serialization format to the given
+/// output stream.
+void printMIR(raw_ostream &OS, const MachineFunction &MF);
+
+/// Determine a possible list of successors of a basic block based on the
+/// basic block machine operand being used inside the block. This should give
+/// you the correct list of successor blocks in most cases except for things
+/// like jump tables where the basic block references can't easily be found.
+/// The MIRPRinter will skip printing successors if they match the result of
+/// this funciton and the parser will use this function to construct a list if
+/// it is missing.
+void guessSuccessors(const MachineBasicBlock &MBB,
+                     SmallVectorImpl<MachineBasicBlock*> &Successors,
+                     bool &IsFallthrough);
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
new file mode 100644
index 0000000..b75f9c8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
@@ -0,0 +1,523 @@
+//===- MIRYAMLMapping.h - Describes the mapping between MIR and YAML ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the mapping between various MIR data structures and
+// their corresponding YAML representation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MIRYAMLMAPPING_H
+#define LLVM_CODEGEN_MIRYAMLMAPPING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace yaml {
+
+/// A wrapper around std::string which contains a source range that's being
+/// set during parsing.
+struct StringValue {
+  std::string Value;
+  SMRange SourceRange;
+
+  StringValue() = default;
+  StringValue(std::string Value) : Value(std::move(Value)) {}
+
+  bool operator==(const StringValue &Other) const {
+    return Value == Other.Value;
+  }
+};
+
+template <> struct ScalarTraits<StringValue> {
+  static void output(const StringValue &S, void *, raw_ostream &OS) {
+    OS << S.Value;
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, StringValue &S) {
+    S.Value = Scalar.str();
+    if (const auto *Node =
+            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
+      S.SourceRange = Node->getSourceRange();
+    return "";
+  }
+
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+struct FlowStringValue : StringValue {
+  FlowStringValue() = default;
+  FlowStringValue(std::string Value) : StringValue(std::move(Value)) {}
+};
+
+template <> struct ScalarTraits<FlowStringValue> {
+  static void output(const FlowStringValue &S, void *, raw_ostream &OS) {
+    return ScalarTraits<StringValue>::output(S, nullptr, OS);
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, FlowStringValue &S) {
+    return ScalarTraits<StringValue>::input(Scalar, Ctx, S);
+  }
+
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+struct BlockStringValue {
+  StringValue Value;
+
+  bool operator==(const BlockStringValue &Other) const {
+    return Value == Other.Value;
+  }
+};
+
+template <> struct BlockScalarTraits<BlockStringValue> {
+  static void output(const BlockStringValue &S, void *Ctx, raw_ostream &OS) {
+    return ScalarTraits<StringValue>::output(S.Value, Ctx, OS);
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, BlockStringValue &S) {
+    return ScalarTraits<StringValue>::input(Scalar, Ctx, S.Value);
+  }
+};
+
+/// A wrapper around unsigned which contains a source range that's being set
+/// during parsing.
+struct UnsignedValue {
+  unsigned Value = 0;
+  SMRange SourceRange;
+
+  UnsignedValue() = default;
+  UnsignedValue(unsigned Value) : Value(Value) {}
+
+  bool operator==(const UnsignedValue &Other) const {
+    return Value == Other.Value;
+  }
+};
+
+template <> struct ScalarTraits<UnsignedValue> {
+  static void output(const UnsignedValue &Value, void *Ctx, raw_ostream &OS) {
+    return ScalarTraits<unsigned>::output(Value.Value, Ctx, OS);
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, UnsignedValue &Value) {
+    if (const auto *Node =
+            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
+      Value.SourceRange = Node->getSourceRange();
+    return ScalarTraits<unsigned>::input(Scalar, Ctx, Value.Value);
+  }
+
+  static QuotingType mustQuote(StringRef Scalar) {
+    return ScalarTraits<unsigned>::mustQuote(Scalar);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<MachineJumpTableInfo::JTEntryKind> {
+  static void enumeration(yaml::IO &IO,
+                          MachineJumpTableInfo::JTEntryKind &EntryKind) {
+    IO.enumCase(EntryKind, "block-address",
+                MachineJumpTableInfo::EK_BlockAddress);
+    IO.enumCase(EntryKind, "gp-rel64-block-address",
+                MachineJumpTableInfo::EK_GPRel64BlockAddress);
+    IO.enumCase(EntryKind, "gp-rel32-block-address",
+                MachineJumpTableInfo::EK_GPRel32BlockAddress);
+    IO.enumCase(EntryKind, "label-difference32",
+                MachineJumpTableInfo::EK_LabelDifference32);
+    IO.enumCase(EntryKind, "inline", MachineJumpTableInfo::EK_Inline);
+    IO.enumCase(EntryKind, "custom32", MachineJumpTableInfo::EK_Custom32);
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::StringValue)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::FlowStringValue)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::UnsignedValue)
+
+namespace llvm {
+namespace yaml {
+
+struct VirtualRegisterDefinition {
+  UnsignedValue ID;
+  StringValue Class;
+  StringValue PreferredRegister;
+
+  // TODO: Serialize the target specific register hints.
+
+  bool operator==(const VirtualRegisterDefinition &Other) const {
+    return ID == Other.ID && Class == Other.Class &&
+           PreferredRegister == Other.PreferredRegister;
+  }
+};
+
+template <> struct MappingTraits<VirtualRegisterDefinition> {
+  static void mapping(IO &YamlIO, VirtualRegisterDefinition &Reg) {
+    YamlIO.mapRequired("id", Reg.ID);
+    YamlIO.mapRequired("class", Reg.Class);
+    YamlIO.mapOptional("preferred-register", Reg.PreferredRegister,
+                       StringValue()); // Don't print out when it's empty.
+  }
+
+  static const bool flow = true;
+};
+
+struct MachineFunctionLiveIn {
+  StringValue Register;
+  StringValue VirtualRegister;
+
+  bool operator==(const MachineFunctionLiveIn &Other) const {
+    return Register == Other.Register &&
+           VirtualRegister == Other.VirtualRegister;
+  }
+};
+
+template <> struct MappingTraits<MachineFunctionLiveIn> {
+  static void mapping(IO &YamlIO, MachineFunctionLiveIn &LiveIn) {
+    YamlIO.mapRequired("reg", LiveIn.Register);
+    YamlIO.mapOptional(
+        "virtual-reg", LiveIn.VirtualRegister,
+        StringValue()); // Don't print the virtual register when it's empty.
+  }
+
+  static const bool flow = true;
+};
+
+/// Serializable representation of stack object from the MachineFrameInfo class.
+///
+/// The flags 'isImmutable' and 'isAliased' aren't serialized, as they are
+/// determined by the object's type and frame information flags.
+/// Dead stack objects aren't serialized.
+///
+/// The 'isPreallocated' flag is determined by the local offset.
+struct MachineStackObject {
+  enum ObjectType { DefaultType, SpillSlot, VariableSized };
+  UnsignedValue ID;
+  StringValue Name;
+  // TODO: Serialize unnamed LLVM alloca reference.
+  ObjectType Type = DefaultType;
+  int64_t Offset = 0;
+  uint64_t Size = 0;
+  unsigned Alignment = 0;
+  uint8_t StackID = 0;
+  StringValue CalleeSavedRegister;
+  bool CalleeSavedRestored = true;
+  Optional<int64_t> LocalOffset;
+  StringValue DebugVar;
+  StringValue DebugExpr;
+  StringValue DebugLoc;
+
+  bool operator==(const MachineStackObject &Other) const {
+    return ID == Other.ID && Name == Other.Name && Type == Other.Type &&
+           Offset == Other.Offset && Size == Other.Size &&
+           Alignment == Other.Alignment &&
+           StackID == Other.StackID &&
+           CalleeSavedRegister == Other.CalleeSavedRegister &&
+           CalleeSavedRestored == Other.CalleeSavedRestored &&
+           LocalOffset == Other.LocalOffset && DebugVar == Other.DebugVar &&
+           DebugExpr == Other.DebugExpr && DebugLoc == Other.DebugLoc;
+  }
+};
+
+template <> struct ScalarEnumerationTraits<MachineStackObject::ObjectType> {
+  static void enumeration(yaml::IO &IO, MachineStackObject::ObjectType &Type) {
+    IO.enumCase(Type, "default", MachineStackObject::DefaultType);
+    IO.enumCase(Type, "spill-slot", MachineStackObject::SpillSlot);
+    IO.enumCase(Type, "variable-sized", MachineStackObject::VariableSized);
+  }
+};
+
+template <> struct MappingTraits<MachineStackObject> {
+  static void mapping(yaml::IO &YamlIO, MachineStackObject &Object) {
+    YamlIO.mapRequired("id", Object.ID);
+    YamlIO.mapOptional("name", Object.Name,
+                       StringValue()); // Don't print out an empty name.
+    YamlIO.mapOptional(
+        "type", Object.Type,
+        MachineStackObject::DefaultType); // Don't print the default type.
+    YamlIO.mapOptional("offset", Object.Offset, (int64_t)0);
+    if (Object.Type != MachineStackObject::VariableSized)
+      YamlIO.mapRequired("size", Object.Size);
+    YamlIO.mapOptional("alignment", Object.Alignment, (unsigned)0);
+    YamlIO.mapOptional("stack-id", Object.StackID);
+    YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
+                       true);
+    YamlIO.mapOptional("local-offset", Object.LocalOffset, Optional<int64_t>());
+    YamlIO.mapOptional("di-variable", Object.DebugVar,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("di-expression", Object.DebugExpr,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("di-location", Object.DebugLoc,
+                       StringValue()); // Don't print it out when it's empty.
+  }
+
+  static const bool flow = true;
+};
+
+/// Serializable representation of the fixed stack object from the
+/// MachineFrameInfo class.
+struct FixedMachineStackObject {
+  enum ObjectType { DefaultType, SpillSlot };
+  UnsignedValue ID;
+  ObjectType Type = DefaultType;
+  int64_t Offset = 0;
+  uint64_t Size = 0;
+  unsigned Alignment = 0;
+  uint8_t StackID = 0;
+  bool IsImmutable = false;
+  bool IsAliased = false;
+  StringValue CalleeSavedRegister;
+  bool CalleeSavedRestored = true;
+
+  bool operator==(const FixedMachineStackObject &Other) const {
+    return ID == Other.ID && Type == Other.Type && Offset == Other.Offset &&
+           Size == Other.Size && Alignment == Other.Alignment &&
+           StackID == Other.StackID &&
+           IsImmutable == Other.IsImmutable && IsAliased == Other.IsAliased &&
+           CalleeSavedRegister == Other.CalleeSavedRegister &&
+           CalleeSavedRestored == Other.CalleeSavedRestored;
+  }
+};
+
+template <>
+struct ScalarEnumerationTraits<FixedMachineStackObject::ObjectType> {
+  static void enumeration(yaml::IO &IO,
+                          FixedMachineStackObject::ObjectType &Type) {
+    IO.enumCase(Type, "default", FixedMachineStackObject::DefaultType);
+    IO.enumCase(Type, "spill-slot", FixedMachineStackObject::SpillSlot);
+  }
+};
+
+template <> struct MappingTraits<FixedMachineStackObject> {
+  static void mapping(yaml::IO &YamlIO, FixedMachineStackObject &Object) {
+    YamlIO.mapRequired("id", Object.ID);
+    YamlIO.mapOptional(
+        "type", Object.Type,
+        FixedMachineStackObject::DefaultType); // Don't print the default type.
+    YamlIO.mapOptional("offset", Object.Offset, (int64_t)0);
+    YamlIO.mapOptional("size", Object.Size, (uint64_t)0);
+    YamlIO.mapOptional("alignment", Object.Alignment, (unsigned)0);
+    YamlIO.mapOptional("stack-id", Object.StackID);
+    if (Object.Type != FixedMachineStackObject::SpillSlot) {
+      YamlIO.mapOptional("isImmutable", Object.IsImmutable, false);
+      YamlIO.mapOptional("isAliased", Object.IsAliased, false);
+    }
+    YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
+                     true);
+  }
+
+  static const bool flow = true;
+};
+
+struct MachineConstantPoolValue {
+  UnsignedValue ID;
+  StringValue Value;
+  unsigned Alignment = 0;
+  bool IsTargetSpecific = false;
+
+  bool operator==(const MachineConstantPoolValue &Other) const {
+    return ID == Other.ID && Value == Other.Value &&
+           Alignment == Other.Alignment &&
+           IsTargetSpecific == Other.IsTargetSpecific;
+  }
+};
+
+template <> struct MappingTraits<MachineConstantPoolValue> {
+  static void mapping(IO &YamlIO, MachineConstantPoolValue &Constant) {
+    YamlIO.mapRequired("id", Constant.ID);
+    YamlIO.mapOptional("value", Constant.Value, StringValue());
+    YamlIO.mapOptional("alignment", Constant.Alignment, (unsigned)0);
+    YamlIO.mapOptional("isTargetSpecific", Constant.IsTargetSpecific, false);
+  }
+};
+
+struct MachineJumpTable {
+  struct Entry {
+    UnsignedValue ID;
+    std::vector<FlowStringValue> Blocks;
+
+    bool operator==(const Entry &Other) const {
+      return ID == Other.ID && Blocks == Other.Blocks;
+    }
+  };
+
+  MachineJumpTableInfo::JTEntryKind Kind = MachineJumpTableInfo::EK_Custom32;
+  std::vector<Entry> Entries;
+
+  bool operator==(const MachineJumpTable &Other) const {
+    return Kind == Other.Kind && Entries == Other.Entries;
+  }
+};
+
+template <> struct MappingTraits<MachineJumpTable::Entry> {
+  static void mapping(IO &YamlIO, MachineJumpTable::Entry &Entry) {
+    YamlIO.mapRequired("id", Entry.ID);
+    YamlIO.mapOptional("blocks", Entry.Blocks, std::vector<FlowStringValue>());
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineFunctionLiveIn)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::VirtualRegisterDefinition)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineStackObject)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::FixedMachineStackObject)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineConstantPoolValue)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineJumpTable::Entry)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<MachineJumpTable> {
+  static void mapping(IO &YamlIO, MachineJumpTable &JT) {
+    YamlIO.mapRequired("kind", JT.Kind);
+    YamlIO.mapOptional("entries", JT.Entries,
+                       std::vector<MachineJumpTable::Entry>());
+  }
+};
+
+/// Serializable representation of MachineFrameInfo.
+///
+/// Doesn't serialize attributes like 'StackAlignment', 'IsStackRealignable' and
+/// 'RealignOption' as they are determined by the target and LLVM function
+/// attributes.
+/// It also doesn't serialize attributes like 'NumFixedObject' and
+/// 'HasVarSizedObjects' as they are determined by the frame objects themselves.
+struct MachineFrameInfo {
+  bool IsFrameAddressTaken = false;
+  bool IsReturnAddressTaken = false;
+  bool HasStackMap = false;
+  bool HasPatchPoint = false;
+  uint64_t StackSize = 0;
+  int OffsetAdjustment = 0;
+  unsigned MaxAlignment = 0;
+  bool AdjustsStack = false;
+  bool HasCalls = false;
+  StringValue StackProtector;
+  // TODO: Serialize FunctionContextIdx
+  unsigned MaxCallFrameSize = ~0u; ///< ~0u means: not computed yet.
+  bool HasOpaqueSPAdjustment = false;
+  bool HasVAStart = false;
+  bool HasMustTailInVarArgFunc = false;
+  StringValue SavePoint;
+  StringValue RestorePoint;
+
+  bool operator==(const MachineFrameInfo &Other) const {
+    return IsFrameAddressTaken == Other.IsFrameAddressTaken &&
+           IsReturnAddressTaken == Other.IsReturnAddressTaken &&
+           HasStackMap == Other.HasStackMap &&
+           HasPatchPoint == Other.HasPatchPoint &&
+           StackSize == Other.StackSize &&
+           OffsetAdjustment == Other.OffsetAdjustment &&
+           MaxAlignment == Other.MaxAlignment &&
+           AdjustsStack == Other.AdjustsStack && HasCalls == Other.HasCalls &&
+           StackProtector == Other.StackProtector &&
+           MaxCallFrameSize == Other.MaxCallFrameSize &&
+           HasOpaqueSPAdjustment == Other.HasOpaqueSPAdjustment &&
+           HasVAStart == Other.HasVAStart &&
+           HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc &&
+           SavePoint == Other.SavePoint && RestorePoint == Other.RestorePoint;
+  }
+};
+
+template <> struct MappingTraits<MachineFrameInfo> {
+  static void mapping(IO &YamlIO, MachineFrameInfo &MFI) {
+    YamlIO.mapOptional("isFrameAddressTaken", MFI.IsFrameAddressTaken, false);
+    YamlIO.mapOptional("isReturnAddressTaken", MFI.IsReturnAddressTaken, false);
+    YamlIO.mapOptional("hasStackMap", MFI.HasStackMap, false);
+    YamlIO.mapOptional("hasPatchPoint", MFI.HasPatchPoint, false);
+    YamlIO.mapOptional("stackSize", MFI.StackSize, (uint64_t)0);
+    YamlIO.mapOptional("offsetAdjustment", MFI.OffsetAdjustment, (int)0);
+    YamlIO.mapOptional("maxAlignment", MFI.MaxAlignment, (unsigned)0);
+    YamlIO.mapOptional("adjustsStack", MFI.AdjustsStack, false);
+    YamlIO.mapOptional("hasCalls", MFI.HasCalls, false);
+    YamlIO.mapOptional("stackProtector", MFI.StackProtector,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize, (unsigned)~0);
+    YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment,
+                       false);
+    YamlIO.mapOptional("hasVAStart", MFI.HasVAStart, false);
+    YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc,
+                       false);
+    YamlIO.mapOptional("savePoint", MFI.SavePoint,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("restorePoint", MFI.RestorePoint,
+                       StringValue()); // Don't print it out when it's empty.
+  }
+};
+
+struct MachineFunction {
+  StringRef Name;
+  unsigned Alignment = 0;
+  bool ExposesReturnsTwice = false;
+  // GISel MachineFunctionProperties.
+  bool Legalized = false;
+  bool RegBankSelected = false;
+  bool Selected = false;
+  bool FailedISel = false;
+  // Register information
+  bool TracksRegLiveness = false;
+  std::vector<VirtualRegisterDefinition> VirtualRegisters;
+  std::vector<MachineFunctionLiveIn> LiveIns;
+  Optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
+  // TODO: Serialize the various register masks.
+  // Frame information
+  MachineFrameInfo FrameInfo;
+  std::vector<FixedMachineStackObject> FixedStackObjects;
+  std::vector<MachineStackObject> StackObjects;
+  std::vector<MachineConstantPoolValue> Constants; /// Constant pool.
+  MachineJumpTable JumpTableInfo;
+  BlockStringValue Body;
+};
+
+template <> struct MappingTraits<MachineFunction> {
+  static void mapping(IO &YamlIO, MachineFunction &MF) {
+    YamlIO.mapRequired("name", MF.Name);
+    YamlIO.mapOptional("alignment", MF.Alignment, (unsigned)0);
+    YamlIO.mapOptional("exposesReturnsTwice", MF.ExposesReturnsTwice, false);
+    YamlIO.mapOptional("legalized", MF.Legalized, false);
+    YamlIO.mapOptional("regBankSelected", MF.RegBankSelected, false);
+    YamlIO.mapOptional("selected", MF.Selected, false);
+    YamlIO.mapOptional("failedISel", MF.FailedISel, false);
+    YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness, false);
+    YamlIO.mapOptional("registers", MF.VirtualRegisters,
+                       std::vector<VirtualRegisterDefinition>());
+    YamlIO.mapOptional("liveins", MF.LiveIns,
+                       std::vector<MachineFunctionLiveIn>());
+    YamlIO.mapOptional("calleeSavedRegisters", MF.CalleeSavedRegisters,
+                       Optional<std::vector<FlowStringValue>>());
+    YamlIO.mapOptional("frameInfo", MF.FrameInfo, MachineFrameInfo());
+    YamlIO.mapOptional("fixedStack", MF.FixedStackObjects,
+                       std::vector<FixedMachineStackObject>());
+    YamlIO.mapOptional("stack", MF.StackObjects,
+                       std::vector<MachineStackObject>());
+    YamlIO.mapOptional("constants", MF.Constants,
+                       std::vector<MachineConstantPoolValue>());
+    if (!YamlIO.outputting() || !MF.JumpTableInfo.Entries.empty())
+      YamlIO.mapOptional("jumpTable", MF.JumpTableInfo, MachineJumpTable());
+    YamlIO.mapOptional("body", MF.Body, BlockStringValue());
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MIRYAMLMAPPING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h b/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
new file mode 100644
index 0000000..8c9b7a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
@@ -0,0 +1,56 @@
+//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachORelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_MACHORELOCATION_H
+#define LLVM_CODEGEN_MACHORELOCATION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+  /// MachORelocation - This struct contains information about each relocation
+  /// that needs to be emitted to the file.
+  /// see <mach-o/reloc.h>
+  class MachORelocation {
+    uint32_t r_address;   // offset in the section to what is being  relocated
+    uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
+    bool     r_pcrel;     // was relocated pc-relative already
+    uint8_t  r_length;    // length = 2 ^ r_length
+    bool     r_extern;    // 
+    uint8_t  r_type;      // if not 0, machine-specific relocation type.
+    bool     r_scattered; // 1 = scattered, 0 = non-scattered
+    int32_t  r_value;     // the value the item to be relocated is referring
+                          // to.
+  public:      
+    uint32_t getPackedFields() const {
+      if (r_scattered)
+        return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) | 
+          ((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
+      else
+        return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
+          (r_extern << 4) | (r_type & 15);
+    }
+    uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
+    uint32_t getRawAddress() const { return r_address; }
+
+    MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
+                    bool ext, uint8_t type, bool scattered = false, 
+                    int32_t value = 0) : 
+      r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
+      r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
+  };
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_MACHORELOCATION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
new file mode 100644
index 0000000..f3130b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
@@ -0,0 +1,918 @@
+//===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect the sequence of machine instructions for a basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
+#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundleIterator.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class MachineFunction;
+class MCSymbol;
+class ModuleSlotTracker;
+class Pass;
+class SlotIndexes;
+class StringRef;
+class raw_ostream;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+template <> struct ilist_traits<MachineInstr> {
+private:
+  friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
+
+  MachineBasicBlock *Parent;
+
+  using instr_iterator =
+      simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;
+
+public:
+  void addNodeToList(MachineInstr *N);
+  void removeNodeFromList(MachineInstr *N);
+  void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
+                             instr_iterator Last);
+  void deleteNode(MachineInstr *MI);
+};
+
+class MachineBasicBlock
+    : public ilist_node_with_parent<MachineBasicBlock, MachineFunction> {
+public:
+  /// Pair of physical register and lane mask.
+  /// This is not simply a std::pair typedef because the members should be named
+  /// clearly as they both have an integer type.
+  struct RegisterMaskPair {
+  public:
+    MCPhysReg PhysReg;
+    LaneBitmask LaneMask;
+
+    RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask)
+        : PhysReg(PhysReg), LaneMask(LaneMask) {}
+  };
+
+private:
+  using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;
+
+  Instructions Insts;
+  const BasicBlock *BB;
+  int Number;
+  MachineFunction *xParent;
+
+  /// Keep track of the predecessor / successor basic blocks.
+  std::vector<MachineBasicBlock *> Predecessors;
+  std::vector<MachineBasicBlock *> Successors;
+
+  /// Keep track of the probabilities to the successors. This vector has the
+  /// same order as Successors, or it is empty if we don't use it (disable
+  /// optimization).
+  std::vector<BranchProbability> Probs;
+  using probability_iterator = std::vector<BranchProbability>::iterator;
+  using const_probability_iterator =
+      std::vector<BranchProbability>::const_iterator;
+
+  Optional<uint64_t> IrrLoopHeaderWeight;
+
+  /// Keep track of the physical registers that are livein of the basicblock.
+  using LiveInVector = std::vector<RegisterMaskPair>;
+  LiveInVector LiveIns;
+
+  /// Alignment of the basic block. Zero if the basic block does not need to be
+  /// aligned. The alignment is specified as log2(bytes).
+  unsigned Alignment = 0;
+
+  /// Indicate that this basic block is entered via an exception handler.
+  bool IsEHPad = false;
+
+  /// Indicate that this basic block is potentially the target of an indirect
+  /// branch.
+  bool AddressTaken = false;
+
+  /// Indicate that this basic block is the entry block of an EH funclet.
+  bool IsEHFuncletEntry = false;
+
+  /// Indicate that this basic block is the entry block of a cleanup funclet.
+  bool IsCleanupFuncletEntry = false;
+
+  /// \brief since getSymbol is a relatively heavy-weight operation, the symbol
+  /// is only computed once and is cached.
+  mutable MCSymbol *CachedMCSymbol = nullptr;
+
+  // Intrusive list support
+  MachineBasicBlock() = default;
+
+  explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
+
+  ~MachineBasicBlock();
+
+  // MachineBasicBlocks are allocated and owned by MachineFunction.
+  friend class MachineFunction;
+
+public:
+  /// Return the LLVM basic block that this instance corresponded to originally.
+  /// Note that this may be NULL if this instance does not correspond directly
+  /// to an LLVM basic block.
+  const BasicBlock *getBasicBlock() const { return BB; }
+
+  /// Return the name of the corresponding LLVM basic block, or an empty string.
+  StringRef getName() const;
+
+  /// Return a formatted string to identify this block and its parent function.
+  std::string getFullName() const;
+
+  /// Test whether this block is potentially the target of an indirect branch.
+  bool hasAddressTaken() const { return AddressTaken; }
+
+  /// Set this block to reflect that it potentially is the target of an indirect
+  /// branch.
+  void setHasAddressTaken() { AddressTaken = true; }
+
+  /// Return the MachineFunction containing this basic block.
+  const MachineFunction *getParent() const { return xParent; }
+  MachineFunction *getParent() { return xParent; }
+
+  using instr_iterator = Instructions::iterator;
+  using const_instr_iterator = Instructions::const_iterator;
+  using reverse_instr_iterator = Instructions::reverse_iterator;
+  using const_reverse_instr_iterator = Instructions::const_reverse_iterator;
+
+  using iterator = MachineInstrBundleIterator<MachineInstr>;
+  using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
+  using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
+  using const_reverse_iterator =
+      MachineInstrBundleIterator<const MachineInstr, true>;
+
+  unsigned size() const { return (unsigned)Insts.size(); }
+  bool empty() const { return Insts.empty(); }
+
+  MachineInstr       &instr_front()       { return Insts.front(); }
+  MachineInstr       &instr_back()        { return Insts.back();  }
+  const MachineInstr &instr_front() const { return Insts.front(); }
+  const MachineInstr &instr_back()  const { return Insts.back();  }
+
+  MachineInstr       &front()             { return Insts.front(); }
+  MachineInstr       &back()              { return *--end();      }
+  const MachineInstr &front()       const { return Insts.front(); }
+  const MachineInstr &back()        const { return *--end();      }
+
+  instr_iterator                instr_begin()       { return Insts.begin();  }
+  const_instr_iterator          instr_begin() const { return Insts.begin();  }
+  instr_iterator                  instr_end()       { return Insts.end();    }
+  const_instr_iterator            instr_end() const { return Insts.end();    }
+  reverse_instr_iterator       instr_rbegin()       { return Insts.rbegin(); }
+  const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
+  reverse_instr_iterator       instr_rend  ()       { return Insts.rend();   }
+  const_reverse_instr_iterator instr_rend  () const { return Insts.rend();   }
+
+  using instr_range = iterator_range<instr_iterator>;
+  using const_instr_range = iterator_range<const_instr_iterator>;
+  instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
+  const_instr_range instrs() const {
+    return const_instr_range(instr_begin(), instr_end());
+  }
+
+  iterator                begin()       { return instr_begin();  }
+  const_iterator          begin() const { return instr_begin();  }
+  iterator                end  ()       { return instr_end();    }
+  const_iterator          end  () const { return instr_end();    }
+  reverse_iterator rbegin() {
+    return reverse_iterator::getAtBundleBegin(instr_rbegin());
+  }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator::getAtBundleBegin(instr_rbegin());
+  }
+  reverse_iterator rend() { return reverse_iterator(instr_rend()); }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(instr_rend());
+  }
+
+  /// Support for MachineInstr::getNextNode().
+  static Instructions MachineBasicBlock::*getSublistAccess(MachineInstr *) {
+    return &MachineBasicBlock::Insts;
+  }
+
+  inline iterator_range<iterator> terminators() {
+    return make_range(getFirstTerminator(), end());
+  }
+  inline iterator_range<const_iterator> terminators() const {
+    return make_range(getFirstTerminator(), end());
+  }
+
+  /// Returns a range that iterates over the phis in the basic block.
+  inline iterator_range<iterator> phis() {
+    return make_range(begin(), getFirstNonPHI());
+  }
+  inline iterator_range<const_iterator> phis() const {
+    return const_cast<MachineBasicBlock *>(this)->phis();
+  }
+
+  // Machine-CFG iterators
+  using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
+  using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+  using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
+  using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+  using pred_reverse_iterator =
+      std::vector<MachineBasicBlock *>::reverse_iterator;
+  using const_pred_reverse_iterator =
+      std::vector<MachineBasicBlock *>::const_reverse_iterator;
+  using succ_reverse_iterator =
+      std::vector<MachineBasicBlock *>::reverse_iterator;
+  using const_succ_reverse_iterator =
+      std::vector<MachineBasicBlock *>::const_reverse_iterator;
+  pred_iterator        pred_begin()       { return Predecessors.begin(); }
+  const_pred_iterator  pred_begin() const { return Predecessors.begin(); }
+  pred_iterator        pred_end()         { return Predecessors.end();   }
+  const_pred_iterator  pred_end()   const { return Predecessors.end();   }
+  pred_reverse_iterator        pred_rbegin()
+                                          { return Predecessors.rbegin();}
+  const_pred_reverse_iterator  pred_rbegin() const
+                                          { return Predecessors.rbegin();}
+  pred_reverse_iterator        pred_rend()
+                                          { return Predecessors.rend();  }
+  const_pred_reverse_iterator  pred_rend()   const
+                                          { return Predecessors.rend();  }
+  unsigned             pred_size()  const {
+    return (unsigned)Predecessors.size();
+  }
+  bool                 pred_empty() const { return Predecessors.empty(); }
+  succ_iterator        succ_begin()       { return Successors.begin();   }
+  const_succ_iterator  succ_begin() const { return Successors.begin();   }
+  succ_iterator        succ_end()         { return Successors.end();     }
+  const_succ_iterator  succ_end()   const { return Successors.end();     }
+  succ_reverse_iterator        succ_rbegin()
+                                          { return Successors.rbegin();  }
+  const_succ_reverse_iterator  succ_rbegin() const
+                                          { return Successors.rbegin();  }
+  succ_reverse_iterator        succ_rend()
+                                          { return Successors.rend();    }
+  const_succ_reverse_iterator  succ_rend()   const
+                                          { return Successors.rend();    }
+  unsigned             succ_size()  const {
+    return (unsigned)Successors.size();
+  }
+  bool                 succ_empty() const { return Successors.empty();   }
+
+  inline iterator_range<pred_iterator> predecessors() {
+    return make_range(pred_begin(), pred_end());
+  }
+  inline iterator_range<const_pred_iterator> predecessors() const {
+    return make_range(pred_begin(), pred_end());
+  }
+  inline iterator_range<succ_iterator> successors() {
+    return make_range(succ_begin(), succ_end());
+  }
+  inline iterator_range<const_succ_iterator> successors() const {
+    return make_range(succ_begin(), succ_end());
+  }
+
+  // LiveIn management methods.
+
+  /// Adds the specified register as a live in. Note that it is an error to add
+  /// the same register to the same set more than once unless the intention is
+  /// to call sortUniqueLiveIns after all registers are added.
+  void addLiveIn(MCPhysReg PhysReg,
+                 LaneBitmask LaneMask = LaneBitmask::getAll()) {
+    LiveIns.push_back(RegisterMaskPair(PhysReg, LaneMask));
+  }
+  void addLiveIn(const RegisterMaskPair &RegMaskPair) {
+    LiveIns.push_back(RegMaskPair);
+  }
+
+  /// Sorts and uniques the LiveIns vector. It can be significantly faster to do
+  /// this than repeatedly calling isLiveIn before calling addLiveIn for every
+  /// LiveIn insertion.
+  void sortUniqueLiveIns();
+
+  /// Clear live in list.
+  void clearLiveIns();
+
+  /// Add PhysReg as live in to this block, and ensure that there is a copy of
+  /// PhysReg to a virtual register of class RC. Return the virtual register
+  /// that is a copy of the live in PhysReg.
+  unsigned addLiveIn(MCPhysReg PhysReg, const TargetRegisterClass *RC);
+
+  /// Remove the specified register from the live in set.
+  void removeLiveIn(MCPhysReg Reg,
+                    LaneBitmask LaneMask = LaneBitmask::getAll());
+
+  /// Return true if the specified register is in the live in set.
+  bool isLiveIn(MCPhysReg Reg,
+                LaneBitmask LaneMask = LaneBitmask::getAll()) const;
+
+  // Iteration support for live in sets.  These sets are kept in sorted
+  // order by their register number.
+  using livein_iterator = LiveInVector::const_iterator;
+#ifndef NDEBUG
+  /// Unlike livein_begin, this method does not check that the liveness
+  /// information is accurate. Still for debug purposes it may be useful
+  /// to have iterators that won't assert if the liveness information
+  /// is not current.
+  livein_iterator livein_begin_dbg() const { return LiveIns.begin(); }
+  iterator_range<livein_iterator> liveins_dbg() const {
+    return make_range(livein_begin_dbg(), livein_end());
+  }
+#endif
+  livein_iterator livein_begin() const;
+  livein_iterator livein_end()   const { return LiveIns.end(); }
+  bool            livein_empty() const { return LiveIns.empty(); }
+  iterator_range<livein_iterator> liveins() const {
+    return make_range(livein_begin(), livein_end());
+  }
+
+  /// Remove entry from the livein set and return iterator to the next.
+  livein_iterator removeLiveIn(livein_iterator I);
+
+  /// Get the clobber mask for the start of this basic block. Funclets use this
+  /// to prevent register allocation across funclet transitions.
+  const uint32_t *getBeginClobberMask(const TargetRegisterInfo *TRI) const;
+
+  /// Get the clobber mask for the end of the basic block.
+  /// \see getBeginClobberMask()
+  const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
+
+  /// Return alignment of the basic block. The alignment is specified as
+  /// log2(bytes).
+  unsigned getAlignment() const { return Alignment; }
+
+  /// Set alignment of the basic block. The alignment is specified as
+  /// log2(bytes).
+  void setAlignment(unsigned Align) { Alignment = Align; }
+
+  /// Returns true if the block is a landing pad. That is this basic block is
+  /// entered via an exception handler.
+  bool isEHPad() const { return IsEHPad; }
+
+  /// Indicates the block is a landing pad.  That is this basic block is entered
+  /// via an exception handler.
+  void setIsEHPad(bool V = true) { IsEHPad = V; }
+
+  bool hasEHPadSuccessor() const;
+
+  /// Returns true if this is the entry block of an EH funclet.
+  bool isEHFuncletEntry() const { return IsEHFuncletEntry; }
+
+  /// Indicates if this is the entry block of an EH funclet.
+  void setIsEHFuncletEntry(bool V = true) { IsEHFuncletEntry = V; }
+
+  /// Returns true if this is the entry block of a cleanup funclet.
+  bool isCleanupFuncletEntry() const { return IsCleanupFuncletEntry; }
+
+  /// Indicates if this is the entry block of a cleanup funclet.
+  void setIsCleanupFuncletEntry(bool V = true) { IsCleanupFuncletEntry = V; }
+
+  /// Returns true if it is legal to hoist instructions into this block.
+  bool isLegalToHoistInto() const;
+
+  // Code Layout methods.
+
+  /// Move 'this' block before or after the specified block.  This only moves
+  /// the block, it does not modify the CFG or adjust potential fall-throughs at
+  /// the end of the block.
+  void moveBefore(MachineBasicBlock *NewAfter);
+  void moveAfter(MachineBasicBlock *NewBefore);
+
+  /// Update the terminator instructions in block to account for changes to the
+  /// layout. If the block previously used a fallthrough, it may now need a
+  /// branch, and if it previously used branching it may now be able to use a
+  /// fallthrough.
+  void updateTerminator();
+
+  // Machine-CFG mutators
+
+  /// Add Succ as a successor of this MachineBasicBlock.  The Predecessors list
+  /// of Succ is automatically updated. PROB parameter is stored in
+  /// Probabilities list. The default probability is set as unknown. Mixing
+  /// known and unknown probabilities in successor list is not allowed. When all
+  /// successors have unknown probabilities, 1 / N is returned as the
+  /// probability for each successor, where N is the number of successors.
+  ///
+  /// Note that duplicate Machine CFG edges are not allowed.
+  void addSuccessor(MachineBasicBlock *Succ,
+                    BranchProbability Prob = BranchProbability::getUnknown());
+
+  /// Add Succ as a successor of this MachineBasicBlock.  The Predecessors list
+  /// of Succ is automatically updated. The probability is not provided because
+  /// BPI is not available (e.g. -O0 is used), in which case edge probabilities
+  /// won't be used. Using this interface can save some space.
+  void addSuccessorWithoutProb(MachineBasicBlock *Succ);
+
+  /// Set successor probability of a given iterator.
+  void setSuccProbability(succ_iterator I, BranchProbability Prob);
+
+  /// Normalize probabilities of all successors so that the sum of them becomes
+  /// one. This is usually done when the current update on this MBB is done, and
+  /// the sum of its successors' probabilities is not guaranteed to be one. The
+  /// user is responsible for the correct use of this function.
+  /// MBB::removeSuccessor() has an option to do this automatically.
+  void normalizeSuccProbs() {
+    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
+  }
+
+  /// Validate successors' probabilities and check if the sum of them is
+  /// approximate one. This only works in DEBUG mode.
+  void validateSuccProbs() const;
+
+  /// Remove successor from the successors list of this MachineBasicBlock. The
+  /// Predecessors list of Succ is automatically updated.
+  /// If NormalizeSuccProbs is true, then normalize successors' probabilities
+  /// after the successor is removed.
+  void removeSuccessor(MachineBasicBlock *Succ,
+                       bool NormalizeSuccProbs = false);
+
+  /// Remove specified successor from the successors list of this
+  /// MachineBasicBlock. The Predecessors list of Succ is automatically updated.
+  /// If NormalizeSuccProbs is true, then normalize successors' probabilities
+  /// after the successor is removed.
+  /// Return the iterator to the element after the one removed.
+  succ_iterator removeSuccessor(succ_iterator I,
+                                bool NormalizeSuccProbs = false);
+
+  /// Replace successor OLD with NEW and update probability info.
+  void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+  /// Copy a successor (and any probability info) from original block to this
+  /// block's. Uses an iterator into the original blocks successors.
+  ///
+  /// This is useful when doing a partial clone of successors. Afterward, the
+  /// probabilities may need to be normalized.
+  void copySuccessor(MachineBasicBlock *Orig, succ_iterator I);
+
+  /// Transfers all the successors from MBB to this machine basic block (i.e.,
+  /// copies all the successors FromMBB and remove all the successors from
+  /// FromMBB).
+  void transferSuccessors(MachineBasicBlock *FromMBB);
+
+  /// Transfers all the successors, as in transferSuccessors, and update PHI
+  /// operands in the successor blocks which refer to FromMBB to refer to this.
+  void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB);
+
+  /// Return true if any of the successors have probabilities attached to them.
+  bool hasSuccessorProbabilities() const { return !Probs.empty(); }
+
+  /// Return true if the specified MBB is a predecessor of this block.
+  bool isPredecessor(const MachineBasicBlock *MBB) const;
+
+  /// Return true if the specified MBB is a successor of this block.
+  bool isSuccessor(const MachineBasicBlock *MBB) const;
+
+  /// Return true if the specified MBB will be emitted immediately after this
+  /// block, such that if this block exits by falling through, control will
+  /// transfer to the specified MBB. Note that MBB need not be a successor at
+  /// all, for example if this block ends with an unconditional branch to some
+  /// other block.
+  bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
+
+  /// Return the fallthrough block if the block can implicitly
+  /// transfer control to the block after it by falling off the end of
+  /// it.  This should return null if it can reach the block after
+  /// it, but it uses an explicit branch to do so (e.g., a table
+  /// jump).  Non-null return  is a conservative answer.
+  MachineBasicBlock *getFallThrough();
+
+  /// Return true if the block can implicitly transfer control to the
+  /// block after it by falling off the end of it.  This should return
+  /// false if it can reach the block after it, but it uses an
+  /// explicit branch to do so (e.g., a table jump).  True is a
+  /// conservative answer.
+  bool canFallThrough();
+
+  /// Returns a pointer to the first instruction in this block that is not a
+  /// PHINode instruction. When adding instructions to the beginning of the
+  /// basic block, they should be added before the returned value, not before
+  /// the first instruction, which might be PHI.
+  /// Returns end() is there's no non-PHI instruction.
+  iterator getFirstNonPHI();
+
+  /// Return the first instruction in MBB after I that is not a PHI or a label.
+  /// This is the correct point to insert lowered copies at the beginning of a
+  /// basic block that must be before any debugging information.
+  iterator SkipPHIsAndLabels(iterator I);
+
+  /// Return the first instruction in MBB after I that is not a PHI, label or
+  /// debug.  This is the correct point to insert copies at the beginning of a
+  /// basic block.
+  iterator SkipPHIsLabelsAndDebug(iterator I);
+
+  /// Returns an iterator to the first terminator instruction of this basic
+  /// block. If a terminator does not exist, it returns end().
+  iterator getFirstTerminator();
+  const_iterator getFirstTerminator() const {
+    return const_cast<MachineBasicBlock *>(this)->getFirstTerminator();
+  }
+
+  /// Same getFirstTerminator but it ignores bundles and return an
+  /// instr_iterator instead.
+  instr_iterator getFirstInstrTerminator();
+
+  /// Returns an iterator to the first non-debug instruction in the basic block,
+  /// or end().
+  iterator getFirstNonDebugInstr();
+  const_iterator getFirstNonDebugInstr() const {
+    return const_cast<MachineBasicBlock *>(this)->getFirstNonDebugInstr();
+  }
+
+  /// Returns an iterator to the last non-debug instruction in the basic block,
+  /// or end().
+  iterator getLastNonDebugInstr();
+  const_iterator getLastNonDebugInstr() const {
+    return const_cast<MachineBasicBlock *>(this)->getLastNonDebugInstr();
+  }
+
+  /// Convenience function that returns true if the block ends in a return
+  /// instruction.
+  bool isReturnBlock() const {
+    return !empty() && back().isReturn();
+  }
+
+  /// Split the critical edge from this block to the given successor block, and
+  /// return the newly created block, or null if splitting is not possible.
+  ///
+  /// This function updates LiveVariables, MachineDominatorTree, and
+  /// MachineLoopInfo, as applicable.
+  MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass &P);
+
+  /// Check if the edge between this block and the given successor \p
+  /// Succ, can be split. If this returns true a subsequent call to
+  /// SplitCriticalEdge is guaranteed to return a valid basic block if
+  /// no changes occurred in the meantime.
+  bool canSplitCriticalEdge(const MachineBasicBlock *Succ) const;
+
+  void pop_front() { Insts.pop_front(); }
+  void pop_back() { Insts.pop_back(); }
+  void push_back(MachineInstr *MI) { Insts.push_back(MI); }
+
+  /// Insert MI into the instruction list before I, possibly inside a bundle.
+  ///
+  /// If the insertion point is inside a bundle, MI will be added to the bundle,
+  /// otherwise MI will not be added to any bundle. That means this function
+  /// alone can't be used to prepend or append instructions to bundles. See
+  /// MIBundleBuilder::insert() for a more reliable way of doing that.
+  instr_iterator insert(instr_iterator I, MachineInstr *M);
+
+  /// Insert a range of instructions into the instruction list before I.
+  template<typename IT>
+  void insert(iterator I, IT S, IT E) {
+    assert((I == end() || I->getParent() == this) &&
+           "iterator points outside of basic block");
+    Insts.insert(I.getInstrIterator(), S, E);
+  }
+
+  /// Insert MI into the instruction list before I.
+  iterator insert(iterator I, MachineInstr *MI) {
+    assert((I == end() || I->getParent() == this) &&
+           "iterator points outside of basic block");
+    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
+           "Cannot insert instruction with bundle flags");
+    return Insts.insert(I.getInstrIterator(), MI);
+  }
+
+  /// Insert MI into the instruction list after I.
+  iterator insertAfter(iterator I, MachineInstr *MI) {
+    assert((I == end() || I->getParent() == this) &&
+           "iterator points outside of basic block");
+    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
+           "Cannot insert instruction with bundle flags");
+    return Insts.insertAfter(I.getInstrIterator(), MI);
+  }
+
+  /// Remove an instruction from the instruction list and delete it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle will still be bundled after removing the single instruction.
+  instr_iterator erase(instr_iterator I);
+
+  /// Remove an instruction from the instruction list and delete it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle will still be bundled after removing the single instruction.
+  instr_iterator erase_instr(MachineInstr *I) {
+    return erase(instr_iterator(I));
+  }
+
+  /// Remove a range of instructions from the instruction list and delete them.
+  iterator erase(iterator I, iterator E) {
+    return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
+  }
+
+  /// Remove an instruction or bundle from the instruction list and delete it.
+  ///
+  /// If I points to a bundle of instructions, they are all erased.
+  iterator erase(iterator I) {
+    return erase(I, std::next(I));
+  }
+
+  /// Remove an instruction from the instruction list and delete it.
+  ///
+  /// If I is the head of a bundle of instructions, the whole bundle will be
+  /// erased.
+  iterator erase(MachineInstr *I) {
+    return erase(iterator(I));
+  }
+
+  /// Remove the unbundled instruction from the instruction list without
+  /// deleting it.
+  ///
+  /// This function can not be used to remove bundled instructions, use
+  /// remove_instr to remove individual instructions from a bundle.
+  MachineInstr *remove(MachineInstr *I) {
+    assert(!I->isBundled() && "Cannot remove bundled instructions");
+    return Insts.remove(instr_iterator(I));
+  }
+
+  /// Remove the possibly bundled instruction from the instruction list
+  /// without deleting it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle will still be bundled after removing the single instruction.
+  MachineInstr *remove_instr(MachineInstr *I);
+
+  void clear() {
+    Insts.clear();
+  }
+
+  /// Take an instruction from MBB 'Other' at the position From, and insert it
+  /// into this MBB right before 'Where'.
+  ///
+  /// If From points to a bundle of instructions, the whole bundle is moved.
+  void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
+    // The range splice() doesn't allow noop moves, but this one does.
+    if (Where != From)
+      splice(Where, Other, From, std::next(From));
+  }
+
+  /// Take a block of instructions from MBB 'Other' in the range [From, To),
+  /// and insert them into this MBB right before 'Where'.
+  ///
+  /// The instruction at 'Where' must not be included in the range of
+  /// instructions to move.
+  void splice(iterator Where, MachineBasicBlock *Other,
+              iterator From, iterator To) {
+    Insts.splice(Where.getInstrIterator(), Other->Insts,
+                 From.getInstrIterator(), To.getInstrIterator());
+  }
+
+  /// This method unlinks 'this' from the containing function, and returns it,
+  /// but does not delete it.
+  MachineBasicBlock *removeFromParent();
+
+  /// This method unlinks 'this' from the containing function and deletes it.
+  void eraseFromParent();
+
+  /// Given a machine basic block that branched to 'Old', change the code and
+  /// CFG so that it branches to 'New' instead.
+  void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+  /// Various pieces of code can cause excess edges in the CFG to be inserted.
+  /// If we have proven that MBB can only branch to DestA and DestB, remove any
+  /// other MBB successors from the CFG. DestA and DestB can be null. Besides
+  /// DestA and DestB, retain other edges leading to LandingPads (currently
+  /// there can be only one; we don't check or require that here). Note it is
+  /// possible that DestA and/or DestB are LandingPads.
+  bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
+                            MachineBasicBlock *DestB,
+                            bool IsCond);
+
+  /// Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE
+  /// instructions.  Return UnknownLoc if there is none.
+  DebugLoc findDebugLoc(instr_iterator MBBI);
+  DebugLoc findDebugLoc(iterator MBBI) {
+    return findDebugLoc(MBBI.getInstrIterator());
+  }
+
+  /// Find the previous valid DebugLoc preceding MBBI, skipping and DBG_VALUE
+  /// instructions.  Return UnknownLoc if there is none.
+  DebugLoc findPrevDebugLoc(instr_iterator MBBI);
+  DebugLoc findPrevDebugLoc(iterator MBBI) {
+    return findPrevDebugLoc(MBBI.getInstrIterator());
+  }
+
+  /// Find and return the merged DebugLoc of the branch instructions of the
+  /// block. Return UnknownLoc if there is none.
+  DebugLoc findBranchDebugLoc();
+
+  /// Possible outcome of a register liveness query to computeRegisterLiveness()
+  enum LivenessQueryResult {
+    LQR_Live,   ///< Register is known to be (at least partially) live.
+    LQR_Dead,   ///< Register is known to be fully dead.
+    LQR_Unknown ///< Register liveness not decidable from local neighborhood.
+  };
+
+  /// Return whether (physical) register \p Reg has been defined and not
+  /// killed as of just before \p Before.
+  ///
+  /// Search is localised to a neighborhood of \p Neighborhood instructions
+  /// before (searching for defs or kills) and \p Neighborhood instructions
+  /// after (searching just for defs) \p Before.
+  ///
+  /// \p Reg must be a physical register.
+  LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
+                                              unsigned Reg,
+                                              const_iterator Before,
+                                              unsigned Neighborhood = 10) const;
+
+  // Debugging methods.
+  void dump() const;
+  void print(raw_ostream &OS, const SlotIndexes * = nullptr,
+             bool IsStandalone = true) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST,
+             const SlotIndexes * = nullptr, bool IsStandalone = true) const;
+
+  // Printing method used by LoopInfo.
+  void printAsOperand(raw_ostream &OS, bool PrintType = true) const;
+
+  /// MachineBasicBlocks are uniquely numbered at the function level, unless
+  /// they're not in a MachineFunction yet, in which case this will return -1.
+  int getNumber() const { return Number; }
+  void setNumber(int N) { Number = N; }
+
+  /// Return the MCSymbol for this basic block.
+  MCSymbol *getSymbol() const;
+
+  Optional<uint64_t> getIrrLoopHeaderWeight() const {
+    return IrrLoopHeaderWeight;
+  }
+
+  void setIrrLoopHeaderWeight(uint64_t Weight) {
+    IrrLoopHeaderWeight = Weight;
+  }
+
+private:
+  /// Return probability iterator corresponding to the I successor iterator.
+  probability_iterator getProbabilityIterator(succ_iterator I);
+  const_probability_iterator
+  getProbabilityIterator(const_succ_iterator I) const;
+
+  friend class MachineBranchProbabilityInfo;
+  friend class MIPrinter;
+
+  /// Return probability of the edge from this block to MBB. This method should
+  /// NOT be called directly, but by using getEdgeProbability method from
+  /// MachineBranchProbabilityInfo class.
+  BranchProbability getSuccProbability(const_succ_iterator Succ) const;
+
+  // Methods used to maintain doubly linked list of blocks...
+  friend struct ilist_callback_traits<MachineBasicBlock>;
+
+  // Machine-CFG mutators
+
+  /// Add Pred as a predecessor of this MachineBasicBlock. Don't do this
+  /// unless you know what you're doing, because it doesn't update Pred's
+  /// successors list. Use Pred->addSuccessor instead.
+  void addPredecessor(MachineBasicBlock *Pred);
+
+  /// Remove Pred as a predecessor of this MachineBasicBlock. Don't do this
+  /// unless you know what you're doing, because it doesn't update Pred's
+  /// successors list. Use Pred->removeSuccessor instead.
+  void removePredecessor(MachineBasicBlock *Pred);
+};
+
+raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
+
+/// Prints a machine basic block reference.
+///
+/// The format is:
+///   %bb.5           - a machine basic block with MBB.getNumber() == 5.
+///
+/// Usage: OS << printMBBReference(MBB) << '\n';
+Printable printMBBReference(const MachineBasicBlock &MBB);
+
+// This is useful when building IndexedMaps keyed on basic block pointers.
+struct MBB2NumberFunctor {
+  using argument_type = const MachineBasicBlock *;
+  unsigned operator()(const MachineBasicBlock *MBB) const {
+    return MBB->getNumber();
+  }
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for machine basic block graphs (machine-CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks.
+//
+
+template <> struct GraphTraits<MachineBasicBlock *> {
+  using NodeRef = MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::succ_iterator;
+
+  static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
+};
+
+template <> struct GraphTraits<const MachineBasicBlock *> {
+  using NodeRef = const MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::const_succ_iterator;
+
+  static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
+};
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks and to walk it
+// in inverse order.  Inverse order for a function is considered
+// to be when traversing the predecessor edges of a MBB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
+  using NodeRef = MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
+    return G.Graph;
+  }
+
+  static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
+};
+
+template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
+  using NodeRef = const MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::const_pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
+    return G.Graph;
+  }
+
+  static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
+};
+
+/// MachineInstrSpan provides an interface to get an iteration range
+/// containing the instruction it was initialized with, along with all
+/// those instructions inserted prior to or following that instruction
+/// at some point after the MachineInstrSpan is constructed.
+class MachineInstrSpan {
+  MachineBasicBlock &MBB;
+  MachineBasicBlock::iterator I, B, E;
+
+public:
+  MachineInstrSpan(MachineBasicBlock::iterator I)
+    : MBB(*I->getParent()),
+      I(I),
+      B(I == MBB.begin() ? MBB.end() : std::prev(I)),
+      E(std::next(I)) {}
+
+  MachineBasicBlock::iterator begin() {
+    return B == MBB.end() ? MBB.begin() : std::next(B);
+  }
+  MachineBasicBlock::iterator end() { return E; }
+  bool empty() { return begin() == end(); }
+
+  MachineBasicBlock::iterator getInitial() { return I; }
+};
+
+/// Increment \p It until it points to a non-debug instruction or to \p End
+/// and return the resulting iterator. This function should only be used
+/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
+/// const_instr_iterator} and the respective reverse iterators.
+template<typename IterT>
+inline IterT skipDebugInstructionsForward(IterT It, IterT End) {
+  while (It != End && It->isDebugValue())
+    It++;
+  return It;
+}
+
+/// Decrement \p It until it points to a non-debug instruction or to \p Begin
+/// and return the resulting iterator. This function should only be used
+/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
+/// const_instr_iterator} and the respective reverse iterators.
+template<class IterT>
+inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
+  while (It != Begin && It->isDebugValue())
+    It--;
+  return It;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
new file mode 100644
index 0000000..5b4b99c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -0,0 +1,85 @@
+//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loops should be simplified before this analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
+#define LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/BlockFrequency.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+template <class BlockT> class BlockFrequencyInfoImpl;
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineLoopInfo;
+class raw_ostream;
+
+/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
+/// to estimate machine basic block frequencies.
+class MachineBlockFrequencyInfo : public MachineFunctionPass {
+  using ImplType = BlockFrequencyInfoImpl<MachineBasicBlock>;
+  std::unique_ptr<ImplType> MBFI;
+
+public:
+  static char ID;
+
+  MachineBlockFrequencyInfo();
+  ~MachineBlockFrequencyInfo() override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  /// calculate - compute block frequency info for the given function.
+  void calculate(const MachineFunction &F,
+                 const MachineBranchProbabilityInfo &MBPI,
+                 const MachineLoopInfo &MLI);
+
+  void releaseMemory() override;
+
+  /// getblockFreq - Return block frequency. Return 0 if we don't have the
+  /// information. Please note that initial frequency is equal to 1024. It means
+  /// that we should not rely on the value itself, but only on the comparison to
+  /// the other block frequencies. We do this to avoid using of floating points.
+  ///
+  BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
+
+  Optional<uint64_t> getBlockProfileCount(const MachineBasicBlock *MBB) const;
+  Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
+
+  bool isIrrLoopHeader(const MachineBasicBlock *MBB);
+
+  const MachineFunction *getFunction() const;
+  const MachineBranchProbabilityInfo *getMBPI() const;
+  void view(const Twine &Name, bool isSimple = true) const;
+
+  // Print the block frequency Freq to OS using the current functions entry
+  // frequency to convert freq into a relative decimal form.
+  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
+
+  // Convenience method that attempts to look up the frequency associated with
+  // BB and print it to OS.
+  raw_ostream &printBlockFreq(raw_ostream &OS,
+                              const MachineBasicBlock *MBB) const;
+
+  uint64_t getEntryFreq() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
new file mode 100644
index 0000000..81b0524
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
@@ -0,0 +1,77 @@
+//=- MachineBranchProbabilityInfo.h - Branch Probability Analysis -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to evaluate branch probabilties on machine basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
+#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+#include <climits>
+#include <numeric>
+
+namespace llvm {
+
+class MachineBranchProbabilityInfo : public ImmutablePass {
+  virtual void anchor();
+
+  // Default weight value. Used when we don't have information about the edge.
+  // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
+  // the successors have a weight yet. But it doesn't make sense when providing
+  // weight to an edge that may have siblings with non-zero weights. This can
+  // be handled various ways, but it's probably fine for an edge with unknown
+  // weight to just "inherit" the non-zero weight of an adjacent successor.
+  static const uint32_t DEFAULT_WEIGHT = 16;
+
+public:
+  static char ID;
+
+  MachineBranchProbabilityInfo() : ImmutablePass(ID) {
+    PassRegistry &Registry = *PassRegistry::getPassRegistry();
+    initializeMachineBranchProbabilityInfoPass(Registry);
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  // Return edge probability.
+  BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
+                                       const MachineBasicBlock *Dst) const;
+
+  // Same as above, but using a const_succ_iterator from Src. This is faster
+  // when the iterator is already available.
+  BranchProbability
+  getEdgeProbability(const MachineBasicBlock *Src,
+                     MachineBasicBlock::const_succ_iterator Dst) const;
+
+  // A 'Hot' edge is an edge which probability is >= 80%.
+  bool isEdgeHot(const MachineBasicBlock *Src,
+                 const MachineBasicBlock *Dst) const;
+
+  // Return a hot successor for the block BB or null if there isn't one.
+  // NB: This routine's complexity is linear on the number of successors.
+  MachineBasicBlock *getHotSucc(MachineBasicBlock *MBB) const;
+
+  // Print value between 0 (0% probability) and 1 (100% probability),
+  // however the value is never equal to 0, and can be 1 only iff SRC block
+  // has only one successor.
+  raw_ostream &printEdgeProbability(raw_ostream &OS,
+                                    const MachineBasicBlock *Src,
+                                    const MachineBasicBlock *Dst) const;
+};
+
+}
+
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineCombinerPattern.h b/linux-x64/clang/include/llvm/CodeGen/MachineCombinerPattern.h
new file mode 100644
index 0000000..586535f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineCombinerPattern.h
@@ -0,0 +1,87 @@
+//===-- llvm/CodeGen/MachineCombinerPattern.h - Instruction pattern supported by
+// combiner  ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines instruction pattern supported by combiner
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
+#define LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
+
+namespace llvm {
+
+/// These are instruction patterns matched by the machine combiner pass.
+enum class MachineCombinerPattern {
+  // These are commutative variants for reassociating a computation chain. See
+  // the comments before getMachineCombinerPatterns() in TargetInstrInfo.cpp.
+  REASSOC_AX_BY,
+  REASSOC_AX_YB,
+  REASSOC_XA_BY,
+  REASSOC_XA_YB,
+
+  // These are multiply-add patterns matched by the AArch64 machine combiner.
+  MULADDW_OP1,
+  MULADDW_OP2,
+  MULSUBW_OP1,
+  MULSUBW_OP2,
+  MULADDWI_OP1,
+  MULSUBWI_OP1,
+  MULADDX_OP1,
+  MULADDX_OP2,
+  MULSUBX_OP1,
+  MULSUBX_OP2,
+  MULADDXI_OP1,
+  MULSUBXI_OP1,
+  // Floating Point
+  FMULADDS_OP1,
+  FMULADDS_OP2,
+  FMULSUBS_OP1,
+  FMULSUBS_OP2,
+  FMULADDD_OP1,
+  FMULADDD_OP2,
+  FMULSUBD_OP1,
+  FMULSUBD_OP2,
+  FNMULSUBS_OP1,
+  FNMULSUBD_OP1,
+  FMLAv1i32_indexed_OP1,
+  FMLAv1i32_indexed_OP2,
+  FMLAv1i64_indexed_OP1,
+  FMLAv1i64_indexed_OP2,
+  FMLAv2f32_OP2,
+  FMLAv2f32_OP1,
+  FMLAv2f64_OP1,
+  FMLAv2f64_OP2,
+  FMLAv2i32_indexed_OP1,
+  FMLAv2i32_indexed_OP2,
+  FMLAv2i64_indexed_OP1,
+  FMLAv2i64_indexed_OP2,
+  FMLAv4f32_OP1,
+  FMLAv4f32_OP2,
+  FMLAv4i32_indexed_OP1,
+  FMLAv4i32_indexed_OP2,
+  FMLSv1i32_indexed_OP2,
+  FMLSv1i64_indexed_OP2,
+  FMLSv2f32_OP1,
+  FMLSv2f32_OP2,
+  FMLSv2f64_OP1,
+  FMLSv2f64_OP2,
+  FMLSv2i32_indexed_OP1,
+  FMLSv2i32_indexed_OP2,
+  FMLSv2i64_indexed_OP1,
+  FMLSv2i64_indexed_OP2,
+  FMLSv4f32_OP1,
+  FMLSv4f32_OP2,
+  FMLSv4i32_indexed_OP1,
+  FMLSv4i32_indexed_OP2
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h b/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
new file mode 100644
index 0000000..1705a0f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
@@ -0,0 +1,164 @@
+//===- CodeGen/MachineConstantPool.h - Abstract Constant Pool ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file declares the MachineConstantPool class which is an abstract
+/// constant pool to keep track of constants referenced by a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/MC/SectionKind.h"
+#include <climits>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class DataLayout;
+class FoldingSetNodeID;
+class MachineConstantPool;
+class raw_ostream;
+class Type;
+
+/// Abstract base class for all machine specific constantpool value subclasses.
+///
+class MachineConstantPoolValue {
+  virtual void anchor();
+
+  Type *Ty;
+
+public:
+  explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
+  virtual ~MachineConstantPoolValue() = default;
+
+  /// getType - get type of this MachineConstantPoolValue.
+  ///
+  Type *getType() const { return Ty; }
+
+  virtual int getExistingMachineCPValue(MachineConstantPool *CP,
+                                        unsigned Alignment) = 0;
+
+  virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;
+
+  /// print - Implement operator<<
+  virtual void print(raw_ostream &O) const = 0;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const MachineConstantPoolValue &V) {
+  V.print(OS);
+  return OS;
+}
+
+/// This class is a data container for one entry in a MachineConstantPool.
+/// It contains a pointer to the value and an offset from the start of
+/// the constant pool.
+/// @brief An entry in a MachineConstantPool
+class MachineConstantPoolEntry {
+public:
+  /// The constant itself.
+  union {
+    const Constant *ConstVal;
+    MachineConstantPoolValue *MachineCPVal;
+  } Val;
+
+  /// The required alignment for this entry. The top bit is set when Val is
+  /// a target specific MachineConstantPoolValue.
+  unsigned Alignment;
+
+  MachineConstantPoolEntry(const Constant *V, unsigned A)
+    : Alignment(A) {
+    Val.ConstVal = V;
+  }
+
+  MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A)
+      : Alignment(A) {
+    Val.MachineCPVal = V;
+    Alignment |= 1U << (sizeof(unsigned) * CHAR_BIT - 1);
+  }
+
+  /// isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry
+  /// is indeed a target specific constantpool entry, not a wrapper over a
+  /// Constant.
+  bool isMachineConstantPoolEntry() const {
+    return (int)Alignment < 0;
+  }
+
+  int getAlignment() const {
+    return Alignment & ~(1 << (sizeof(unsigned) * CHAR_BIT - 1));
+  }
+
+  Type *getType() const;
+
+  /// This method classifies the entry according to whether or not it may
+  /// generate a relocation entry.  This must be conservative, so if it might
+  /// codegen to a relocatable entry, it should say so.
+  bool needsRelocation() const;
+
+  SectionKind getSectionKind(const DataLayout *DL) const;
+};
+
+/// The MachineConstantPool class keeps track of constants referenced by a
+/// function which must be spilled to memory.  This is used for constants which
+/// are unable to be used directly as operands to instructions, which typically
+/// include floating point and large integer constants.
+///
+/// Instructions reference the address of these constant pool constants through
+/// the use of MO_ConstantPoolIndex values.  When emitting assembly or machine
+/// code, these virtual address references are converted to refer to the
+/// address of the function constant pool values.
+/// @brief The machine constant pool.
+class MachineConstantPool {
+  unsigned PoolAlignment;       ///< The alignment for the pool.
+  std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
+  /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
+  DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
+  const DataLayout &DL;
+
+  const DataLayout &getDataLayout() const { return DL; }
+
+public:
+  /// @brief The only constructor.
+  explicit MachineConstantPool(const DataLayout &DL)
+      : PoolAlignment(1), DL(DL) {}
+  ~MachineConstantPool();
+
+  /// getConstantPoolAlignment - Return the alignment required by
+  /// the whole constant pool, of which the first element must be aligned.
+  unsigned getConstantPoolAlignment() const { return PoolAlignment; }
+
+  /// getConstantPoolIndex - Create a new entry in the constant pool or return
+  /// an existing one.  User must specify the minimum required alignment for
+  /// the object.
+  unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
+  unsigned getConstantPoolIndex(MachineConstantPoolValue *V,
+                                unsigned Alignment);
+
+  /// isEmpty - Return true if this constant pool contains no constants.
+  bool isEmpty() const { return Constants.empty(); }
+
+  const std::vector<MachineConstantPoolEntry> &getConstants() const {
+    return Constants;
+  }
+
+  /// print - Used by the MachineFunction printer to print information about
+  /// constant pool objects.  Implemented in MachineFunction.cpp
+  void print(raw_ostream &OS) const;
+
+  /// dump - Call print(cerr) to be called from the debugger.
+  void dump() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINECONSTANTPOOL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h b/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
new file mode 100644
index 0000000..ffbcc62
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
@@ -0,0 +1,111 @@
+//===- llvm/CodeGen/MachineDominanceFrontier.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
+#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
+
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Analysis/DominanceFrontierImpl.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/GenericDomTree.h"
+#include <vector>
+
+namespace llvm {
+
+class MachineDominanceFrontier : public MachineFunctionPass {
+  ForwardDominanceFrontierBase<MachineBasicBlock> Base;
+
+public:
+ using DomTreeT = DomTreeBase<MachineBasicBlock>;
+ using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
+ using DomSetType = DominanceFrontierBase<MachineBasicBlock, false>::DomSetType;
+ using iterator = DominanceFrontierBase<MachineBasicBlock, false>::iterator;
+ using const_iterator =
+     DominanceFrontierBase<MachineBasicBlock, false>::const_iterator;
+
+ MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
+ MachineDominanceFrontier &operator=(const MachineDominanceFrontier &) = delete;
+
+ static char ID;
+
+ MachineDominanceFrontier();
+
+ DominanceFrontierBase<MachineBasicBlock, false> &getBase() { return Base; }
+
+  const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
+   return Base.getRoots();
+  }
+
+  MachineBasicBlock *getRoot() const {
+    return Base.getRoot();
+  }
+
+  bool isPostDominator() const {
+    return Base.isPostDominator();
+  }
+
+  iterator begin() {
+    return Base.begin();
+  }
+
+  const_iterator begin() const {
+    return Base.begin();
+  }
+
+  iterator end() {
+    return Base.end();
+  }
+
+  const_iterator end() const {
+    return Base.end();
+  }
+
+  iterator find(MachineBasicBlock *B) {
+    return Base.find(B);
+  }
+
+  const_iterator find(MachineBasicBlock *B) const {
+    return Base.find(B);
+  }
+
+  iterator addBasicBlock(MachineBasicBlock *BB, const DomSetType &frontier) {
+    return Base.addBasicBlock(BB, frontier);
+  }
+
+  void removeBlock(MachineBasicBlock *BB) {
+    return Base.removeBlock(BB);
+  }
+
+  void addToFrontier(iterator I, MachineBasicBlock *Node) {
+    return Base.addToFrontier(I, Node);
+  }
+
+  void removeFromFrontier(iterator I, MachineBasicBlock *Node) {
+    return Base.removeFromFrontier(I, Node);
+  }
+
+  bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
+    return Base.compareDomSet(DS1, DS2);
+  }
+
+  bool compare(DominanceFrontierBase<MachineBasicBlock, false> &Other) const {
+    return Base.compare(Other);
+  }
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  void releaseMemory() override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h b/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
new file mode 100644
index 0000000..af642d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
@@ -0,0 +1,291 @@
+//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
+// but for target-specific code rather than target-independent IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEDOMINATORS_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
+#include <cassert>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+template <>
+inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
+    MachineBasicBlock *MBB) {
+  this->Roots.push_back(MBB);
+}
+
+extern template class DomTreeNodeBase<MachineBasicBlock>;
+extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
+extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
+
+using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
+
+//===-------------------------------------
+/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
+/// compute a normal dominator tree.
+///
+class MachineDominatorTree : public MachineFunctionPass {
+  /// \brief Helper structure used to hold all the basic blocks
+  /// involved in the split of a critical edge.
+  struct CriticalEdge {
+    MachineBasicBlock *FromBB;
+    MachineBasicBlock *ToBB;
+    MachineBasicBlock *NewBB;
+  };
+
+  /// \brief Pile up all the critical edges to be split.
+  /// The splitting of a critical edge is local and thus, it is possible
+  /// to apply several of those changes at the same time.
+  mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
+
+  /// \brief Remember all the basic blocks that are inserted during
+  /// edge splitting.
+  /// Invariant: NewBBs == all the basic blocks contained in the NewBB
+  /// field of all the elements of CriticalEdgesToSplit.
+  /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
+  /// such as BB == elt.NewBB.
+  mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
+
+  /// The DominatorTreeBase that is used to compute a normal dominator tree
+  std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
+
+  /// \brief Apply all the recorded critical edges to the DT.
+  /// This updates the underlying DT information in a way that uses
+  /// the fast query path of DT as much as possible.
+  ///
+  /// \post CriticalEdgesToSplit.empty().
+  void applySplitCriticalEdges() const;
+
+public:
+  static char ID; // Pass ID, replacement for typeid
+
+  MachineDominatorTree();
+
+  DomTreeBase<MachineBasicBlock> &getBase() {
+    if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
+    applySplitCriticalEdges();
+    return *DT;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// getRoots -  Return the root blocks of the current CFG.  This may include
+  /// multiple blocks if we are computing post dominators.  For forward
+  /// dominators, this will always be a single block (the entry node).
+  ///
+  inline const SmallVectorImpl<MachineBasicBlock*> &getRoots() const {
+    applySplitCriticalEdges();
+    return DT->getRoots();
+  }
+
+  inline MachineBasicBlock *getRoot() const {
+    applySplitCriticalEdges();
+    return DT->getRoot();
+  }
+
+  inline MachineDomTreeNode *getRootNode() const {
+    applySplitCriticalEdges();
+    return DT->getRootNode();
+  }
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  inline bool dominates(const MachineDomTreeNode* A,
+                        const MachineDomTreeNode* B) const {
+    applySplitCriticalEdges();
+    return DT->dominates(A, B);
+  }
+
+  inline bool dominates(const MachineBasicBlock* A,
+                        const MachineBasicBlock* B) const {
+    applySplitCriticalEdges();
+    return DT->dominates(A, B);
+  }
+
+  // dominates - Return true if A dominates B. This performs the
+  // special checks necessary if A and B are in the same basic block.
+  bool dominates(const MachineInstr *A, const MachineInstr *B) const {
+    applySplitCriticalEdges();
+    const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
+    if (BBA != BBB) return DT->dominates(BBA, BBB);
+
+    // Loop through the basic block until we find A or B.
+    MachineBasicBlock::const_iterator I = BBA->begin();
+    for (; &*I != A && &*I != B; ++I)
+      /*empty*/ ;
+
+    //if(!DT.IsPostDominators) {
+      // A dominates B if it is found first in the basic block.
+      return &*I == A;
+    //} else {
+    //  // A post-dominates B if B is found first in the basic block.
+    //  return &*I == B;
+    //}
+  }
+
+  inline bool properlyDominates(const MachineDomTreeNode* A,
+                                const MachineDomTreeNode* B) const {
+    applySplitCriticalEdges();
+    return DT->properlyDominates(A, B);
+  }
+
+  inline bool properlyDominates(const MachineBasicBlock* A,
+                                const MachineBasicBlock* B) const {
+    applySplitCriticalEdges();
+    return DT->properlyDominates(A, B);
+  }
+
+  /// findNearestCommonDominator - Find nearest common dominator basic block
+  /// for basic block A and B. If there is no such block then return NULL.
+  inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+                                                       MachineBasicBlock *B) {
+    applySplitCriticalEdges();
+    return DT->findNearestCommonDominator(A, B);
+  }
+
+  inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+    applySplitCriticalEdges();
+    return DT->getNode(BB);
+  }
+
+  /// getNode - return the (Post)DominatorTree node for the specified basic
+  /// block.  This is the same as using operator[] on this class.
+  ///
+  inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+    applySplitCriticalEdges();
+    return DT->getNode(BB);
+  }
+
+  /// addNewBlock - Add a new node to the dominator tree information.  This
+  /// creates a new node as a child of DomBB dominator node,linking it into
+  /// the children list of the immediate dominator.
+  inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
+                                         MachineBasicBlock *DomBB) {
+    applySplitCriticalEdges();
+    return DT->addNewBlock(BB, DomBB);
+  }
+
+  /// changeImmediateDominator - This method is used to update the dominator
+  /// tree information when a node's immediate dominator changes.
+  ///
+  inline void changeImmediateDominator(MachineBasicBlock *N,
+                                       MachineBasicBlock* NewIDom) {
+    applySplitCriticalEdges();
+    DT->changeImmediateDominator(N, NewIDom);
+  }
+
+  inline void changeImmediateDominator(MachineDomTreeNode *N,
+                                       MachineDomTreeNode* NewIDom) {
+    applySplitCriticalEdges();
+    DT->changeImmediateDominator(N, NewIDom);
+  }
+
+  /// eraseNode - Removes a node from  the dominator tree. Block must not
+  /// dominate any other blocks. Removes node from its immediate dominator's
+  /// children list. Deletes dominator node associated with basic block BB.
+  inline void eraseNode(MachineBasicBlock *BB) {
+    applySplitCriticalEdges();
+    DT->eraseNode(BB);
+  }
+
+  /// splitBlock - BB is split and now it has one successor. Update dominator
+  /// tree to reflect this change.
+  inline void splitBlock(MachineBasicBlock* NewBB) {
+    applySplitCriticalEdges();
+    DT->splitBlock(NewBB);
+  }
+
+  /// isReachableFromEntry - Return true if A is dominated by the entry
+  /// block of the function containing it.
+  bool isReachableFromEntry(const MachineBasicBlock *A) {
+    applySplitCriticalEdges();
+    return DT->isReachableFromEntry(A);
+  }
+
+  void releaseMemory() override;
+
+  void verifyAnalysis() const override;
+
+  void print(raw_ostream &OS, const Module*) const override;
+
+  /// \brief Record that the critical edge (FromBB, ToBB) has been
+  /// split with NewBB.
+  /// This is best to use this method instead of directly update the
+  /// underlying information, because this helps mitigating the
+  /// number of time the DT information is invalidated.
+  ///
+  /// \note Do not use this method with regular edges.
+  ///
+  /// \note To benefit from the compile time improvement incurred by this
+  /// method, the users of this method have to limit the queries to the DT
+  /// interface between two edges splitting. In other words, they have to
+  /// pack the splitting of critical edges as much as possible.
+  void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
+                              MachineBasicBlock *ToBB,
+                              MachineBasicBlock *NewBB) {
+    bool Inserted = NewBBs.insert(NewBB).second;
+    (void)Inserted;
+    assert(Inserted &&
+           "A basic block inserted via edge splitting cannot appear twice");
+    CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
+  }
+};
+
+//===-------------------------------------
+/// DominatorTree GraphTraits specialization so the DominatorTree can be
+/// iterable by generic graph iterators.
+///
+
+template <class Node, class ChildIterator>
+struct MachineDomTreeGraphTraitsBase {
+  using NodeRef = Node *;
+  using ChildIteratorType = ChildIterator;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+template <class T> struct GraphTraits;
+
+template <>
+struct GraphTraits<MachineDomTreeNode *>
+    : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
+                                           MachineDomTreeNode::iterator> {};
+
+template <>
+struct GraphTraits<const MachineDomTreeNode *>
+    : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
+                                           MachineDomTreeNode::const_iterator> {
+};
+
+template <> struct GraphTraits<MachineDominatorTree*>
+  : public GraphTraits<MachineDomTreeNode *> {
+  static NodeRef getEntryNode(MachineDominatorTree *DT) {
+    return DT->getRootNode();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
new file mode 100644
index 0000000..f887517
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
@@ -0,0 +1,724 @@
+//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The file defines the MachineFrameInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
+#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+class raw_ostream;
+class MachineFunction;
+class MachineBasicBlock;
+class BitVector;
+class AllocaInst;
+
+/// The CalleeSavedInfo class tracks the information need to locate where a
+/// callee saved register is in the current frame.
+class CalleeSavedInfo {
+  unsigned Reg;
+  int FrameIdx;
+  /// Flag indicating whether the register is actually restored in the epilog.
+  /// In most cases, if a register is saved, it is also restored. There are
+  /// some situations, though, when this is not the case. For example, the
+  /// LR register on ARM is usually saved, but on exit from the function its
+  /// saved value may be loaded directly into PC. Since liveness tracking of
+  /// physical registers treats callee-saved registers are live outside of
+  /// the function, LR would be treated as live-on-exit, even though in these
+  /// scenarios it is not. This flag is added to indicate that the saved
+  /// register described by this object is not restored in the epilog.
+  /// The long-term solution is to model the liveness of callee-saved registers
+  /// by implicit uses on the return instructions, however, the required
+  /// changes in the ARM backend would be quite extensive.
+  bool Restored;
+
+public:
+  explicit CalleeSavedInfo(unsigned R, int FI = 0)
+  : Reg(R), FrameIdx(FI), Restored(true) {}
+
+  // Accessors.
+  unsigned getReg()                        const { return Reg; }
+  int getFrameIdx()                        const { return FrameIdx; }
+  void setFrameIdx(int FI)                       { FrameIdx = FI; }
+  bool isRestored()                        const { return Restored; }
+  void setRestored(bool R)                       { Restored = R; }
+};
+
+/// The MachineFrameInfo class represents an abstract stack frame until
+/// prolog/epilog code is inserted.  This class is key to allowing stack frame
+/// representation optimizations, such as frame pointer elimination.  It also
+/// allows more mundane (but still important) optimizations, such as reordering
+/// of abstract objects on the stack frame.
+///
+/// To support this, the class assigns unique integer identifiers to stack
+/// objects requested clients.  These identifiers are negative integers for
+/// fixed stack objects (such as arguments passed on the stack) or nonnegative
+/// for objects that may be reordered.  Instructions which refer to stack
+/// objects use a special MO_FrameIndex operand to represent these frame
+/// indexes.
+///
+/// Because this class keeps track of all references to the stack frame, it
+/// knows when a variable sized object is allocated on the stack.  This is the
+/// sole condition which prevents frame pointer elimination, which is an
+/// important optimization on register-poor architectures.  Because original
+/// variable sized alloca's in the source program are the only source of
+/// variable sized stack objects, it is safe to decide whether there will be
+/// any variable sized objects before all stack objects are known (for
+/// example, register allocator spill code never needs variable sized
+/// objects).
+///
+/// When prolog/epilog code emission is performed, the final stack frame is
+/// built and the machine instructions are modified to refer to the actual
+/// stack offsets of the object, eliminating all MO_FrameIndex operands from
+/// the program.
+///
+/// @brief Abstract Stack Frame Information
+class MachineFrameInfo {
+
+  // Represent a single object allocated on the stack.
+  struct StackObject {
+    // The offset of this object from the stack pointer on entry to
+    // the function.  This field has no meaning for a variable sized element.
+    int64_t SPOffset;
+
+    // The size of this object on the stack. 0 means a variable sized object,
+    // ~0ULL means a dead object.
+    uint64_t Size;
+
+    // The required alignment of this stack slot.
+    unsigned Alignment;
+
+    // If true, the value of the stack object is set before
+    // entering the function and is not modified inside the function. By
+    // default, fixed objects are immutable unless marked otherwise.
+    bool isImmutable;
+
+    // If true the stack object is used as spill slot. It
+    // cannot alias any other memory objects.
+    bool isSpillSlot;
+
+    /// If true, this stack slot is used to spill a value (could be deopt
+    /// and/or GC related) over a statepoint. We know that the address of the
+    /// slot can't alias any LLVM IR value.  This is very similar to a Spill
+    /// Slot, but is created by statepoint lowering is SelectionDAG, not the
+    /// register allocator.
+    bool isStatepointSpillSlot = false;
+
+    /// Identifier for stack memory type analagous to address space. If this is
+    /// non-0, the meaning is target defined. Offsets cannot be directly
+    /// compared between objects with different stack IDs. The object may not
+    /// necessarily reside in the same contiguous memory block as other stack
+    /// objects. Objects with differing stack IDs should not be merged or
+    /// replaced substituted for each other.
+    uint8_t StackID;
+
+    /// If this stack object is originated from an Alloca instruction
+    /// this value saves the original IR allocation. Can be NULL.
+    const AllocaInst *Alloca;
+
+    // If true, the object was mapped into the local frame
+    // block and doesn't need additional handling for allocation beyond that.
+    bool PreAllocated = false;
+
+    // If true, an LLVM IR value might point to this object.
+    // Normally, spill slots and fixed-offset objects don't alias IR-accessible
+    // objects, but there are exceptions (on PowerPC, for example, some byval
+    // arguments have ABI-prescribed offsets).
+    bool isAliased;
+
+    /// If true, the object has been zero-extended.
+    bool isZExt = false;
+
+    /// If true, the object has been zero-extended.
+    bool isSExt = false;
+
+    StackObject(uint64_t Size, unsigned Alignment, int64_t SPOffset,
+                bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
+                bool IsAliased, uint8_t StackID = 0)
+      : SPOffset(SPOffset), Size(Size), Alignment(Alignment),
+        isImmutable(IsImmutable), isSpillSlot(IsSpillSlot),
+        StackID(StackID), Alloca(Alloca), isAliased(IsAliased) {}
+  };
+
+  /// The alignment of the stack.
+  unsigned StackAlignment;
+
+  /// Can the stack be realigned. This can be false if the target does not
+  /// support stack realignment, or if the user asks us not to realign the
+  /// stack. In this situation, overaligned allocas are all treated as dynamic
+  /// allocations and the target must handle them as part of DYNAMIC_STACKALLOC
+  /// lowering. All non-alloca stack objects have their alignment clamped to the
+  /// base ABI stack alignment.
+  /// FIXME: There is room for improvement in this case, in terms of
+  /// grouping overaligned allocas into a "secondary stack frame" and
+  /// then only use a single alloca to allocate this frame and only a
+  /// single virtual register to access it. Currently, without such an
+  /// optimization, each such alloca gets its own dynamic realignment.
+  bool StackRealignable;
+
+  /// Whether the function has the \c alignstack attribute.
+  bool ForcedRealign;
+
+  /// The list of stack objects allocated.
+  std::vector<StackObject> Objects;
+
+  /// This contains the number of fixed objects contained on
+  /// the stack.  Because fixed objects are stored at a negative index in the
+  /// Objects list, this is also the index to the 0th object in the list.
+  unsigned NumFixedObjects = 0;
+
+  /// This boolean keeps track of whether any variable
+  /// sized objects have been allocated yet.
+  bool HasVarSizedObjects = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.frameaddress.
+  bool FrameAddressTaken = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.returnaddress.
+  bool ReturnAddressTaken = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.experimental.stackmap.
+  bool HasStackMap = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.experimental.patchpoint.
+  bool HasPatchPoint = false;
+
+  /// The prolog/epilog code inserter calculates the final stack
+  /// offsets for all of the fixed size objects, updating the Objects list
+  /// above.  It then updates StackSize to contain the number of bytes that need
+  /// to be allocated on entry to the function.
+  uint64_t StackSize = 0;
+
+  /// The amount that a frame offset needs to be adjusted to
+  /// have the actual offset from the stack/frame pointer.  The exact usage of
+  /// this is target-dependent, but it is typically used to adjust between
+  /// SP-relative and FP-relative offsets.  E.G., if objects are accessed via
+  /// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
+  /// to the distance between the initial SP and the value in FP.  For many
+  /// targets, this value is only used when generating debug info (via
+  /// TargetRegisterInfo::getFrameIndexReference); when generating code, the
+  /// corresponding adjustments are performed directly.
+  int OffsetAdjustment = 0;
+
+  /// The prolog/epilog code inserter may process objects that require greater
+  /// alignment than the default alignment the target provides.
+  /// To handle this, MaxAlignment is set to the maximum alignment
+  /// needed by the objects on the current frame.  If this is greater than the
+  /// native alignment maintained by the compiler, dynamic alignment code will
+  /// be needed.
+  ///
+  unsigned MaxAlignment = 0;
+
+  /// Set to true if this function adjusts the stack -- e.g.,
+  /// when calling another function. This is only valid during and after
+  /// prolog/epilog code insertion.
+  bool AdjustsStack = false;
+
+  /// Set to true if this function has any function calls.
+  bool HasCalls = false;
+
+  /// The frame index for the stack protector.
+  int StackProtectorIdx = -1;
+
+  /// The frame index for the function context. Used for SjLj exceptions.
+  int FunctionContextIdx = -1;
+
+  /// This contains the size of the largest call frame if the target uses frame
+  /// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
+  /// class).  This information is important for frame pointer elimination.
+  /// It is only valid during and after prolog/epilog code insertion.
+  unsigned MaxCallFrameSize = ~0u;
+
+  /// The prolog/epilog code inserter fills in this vector with each
+  /// callee saved register saved in the frame.  Beyond its use by the prolog/
+  /// epilog code inserter, this data used for debug info and exception
+  /// handling.
+  std::vector<CalleeSavedInfo> CSInfo;
+
+  /// Has CSInfo been set yet?
+  bool CSIValid = false;
+
+  /// References to frame indices which are mapped
+  /// into the local frame allocation block. <FrameIdx, LocalOffset>
+  SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
+
+  /// Size of the pre-allocated local frame block.
+  int64_t LocalFrameSize = 0;
+
+  /// Required alignment of the local object blob, which is the strictest
+  /// alignment of any object in it.
+  unsigned LocalFrameMaxAlign = 0;
+
+  /// Whether the local object blob needs to be allocated together. If not,
+  /// PEI should ignore the isPreAllocated flags on the stack objects and
+  /// just allocate them normally.
+  bool UseLocalStackAllocationBlock = false;
+
+  /// True if the function dynamically adjusts the stack pointer through some
+  /// opaque mechanism like inline assembly or Win32 EH.
+  bool HasOpaqueSPAdjustment = false;
+
+  /// True if the function contains operations which will lower down to
+  /// instructions which manipulate the stack pointer.
+  bool HasCopyImplyingStackAdjustment = false;
+
+  /// True if the function contains a call to the llvm.vastart intrinsic.
+  bool HasVAStart = false;
+
+  /// True if this is a varargs function that contains a musttail call.
+  bool HasMustTailInVarArgFunc = false;
+
+  /// True if this function contains a tail call. If so immutable objects like
+  /// function arguments are no longer so. A tail call *can* override fixed
+  /// stack objects like arguments so we can't treat them as immutable.
+  bool HasTailCall = false;
+
+  /// Not null, if shrink-wrapping found a better place for the prologue.
+  MachineBasicBlock *Save = nullptr;
+  /// Not null, if shrink-wrapping found a better place for the epilogue.
+  MachineBasicBlock *Restore = nullptr;
+
+public:
+  explicit MachineFrameInfo(unsigned StackAlignment, bool StackRealignable,
+                            bool ForcedRealign)
+      : StackAlignment(StackAlignment), StackRealignable(StackRealignable),
+        ForcedRealign(ForcedRealign) {}
+
+  /// Return true if there are any stack objects in this function.
+  bool hasStackObjects() const { return !Objects.empty(); }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if the stack frame for this function
+  /// contains any variable sized objects.
+  bool hasVarSizedObjects() const { return HasVarSizedObjects; }
+
+  /// Return the index for the stack protector object.
+  int getStackProtectorIndex() const { return StackProtectorIdx; }
+  void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
+  bool hasStackProtectorIndex() const { return StackProtectorIdx != -1; }
+
+  /// Return the index for the function context object.
+  /// This object is used for SjLj exceptions.
+  int getFunctionContextIndex() const { return FunctionContextIdx; }
+  void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if there is a call to
+  /// \@llvm.frameaddress in this function.
+  bool isFrameAddressTaken() const { return FrameAddressTaken; }
+  void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
+
+  /// This method may be called any time after
+  /// instruction selection is complete to determine if there is a call to
+  /// \@llvm.returnaddress in this function.
+  bool isReturnAddressTaken() const { return ReturnAddressTaken; }
+  void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if there is a call to builtin
+  /// \@llvm.experimental.stackmap.
+  bool hasStackMap() const { return HasStackMap; }
+  void setHasStackMap(bool s = true) { HasStackMap = s; }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if there is a call to builtin
+  /// \@llvm.experimental.patchpoint.
+  bool hasPatchPoint() const { return HasPatchPoint; }
+  void setHasPatchPoint(bool s = true) { HasPatchPoint = s; }
+
+  /// Return the minimum frame object index.
+  int getObjectIndexBegin() const { return -NumFixedObjects; }
+
+  /// Return one past the maximum frame object index.
+  int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
+
+  /// Return the number of fixed objects.
+  unsigned getNumFixedObjects() const { return NumFixedObjects; }
+
+  /// Return the number of objects.
+  unsigned getNumObjects() const { return Objects.size(); }
+
+  /// Map a frame index into the local object block
+  void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
+    LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
+    Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
+  }
+
+  /// Get the local offset mapping for a for an object.
+  std::pair<int, int64_t> getLocalFrameObjectMap(int i) const {
+    assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
+            "Invalid local object reference!");
+    return LocalFrameObjects[i];
+  }
+
+  /// Return the number of objects allocated into the local object block.
+  int64_t getLocalFrameObjectCount() const { return LocalFrameObjects.size(); }
+
+  /// Set the size of the local object blob.
+  void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
+
+  /// Get the size of the local object blob.
+  int64_t getLocalFrameSize() const { return LocalFrameSize; }
+
+  /// Required alignment of the local object blob,
+  /// which is the strictest alignment of any object in it.
+  void setLocalFrameMaxAlign(unsigned Align) { LocalFrameMaxAlign = Align; }
+
+  /// Return the required alignment of the local object blob.
+  unsigned getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
+
+  /// Get whether the local allocation blob should be allocated together or
+  /// let PEI allocate the locals in it directly.
+  bool getUseLocalStackAllocationBlock() const {
+    return UseLocalStackAllocationBlock;
+  }
+
+  /// setUseLocalStackAllocationBlock - Set whether the local allocation blob
+  /// should be allocated together or let PEI allocate the locals in it
+  /// directly.
+  void setUseLocalStackAllocationBlock(bool v) {
+    UseLocalStackAllocationBlock = v;
+  }
+
+  /// Return true if the object was pre-allocated into the local block.
+  bool isObjectPreAllocated(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
+  }
+
+  /// Return the size of the specified object.
+  int64_t getObjectSize(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Size;
+  }
+
+  /// Change the size of the specified stack object.
+  void setObjectSize(int ObjectIdx, int64_t Size) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].Size = Size;
+  }
+
+  /// Return the alignment of the specified stack object.
+  unsigned getObjectAlignment(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Alignment;
+  }
+
+  /// setObjectAlignment - Change the alignment of the specified stack object.
+  void setObjectAlignment(int ObjectIdx, unsigned Align) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
+    ensureMaxAlignment(Align);
+  }
+
+  /// Return the underlying Alloca of the specified
+  /// stack object if it exists. Returns 0 if none exists.
+  const AllocaInst* getObjectAllocation(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Alloca;
+  }
+
+  /// Return the assigned stack offset of the specified object
+  /// from the incoming stack pointer.
+  int64_t getObjectOffset(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    assert(!isDeadObjectIndex(ObjectIdx) &&
+           "Getting frame offset for a dead object?");
+    return Objects[ObjectIdx+NumFixedObjects].SPOffset;
+  }
+
+  bool isObjectZExt(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isZExt;
+  }
+
+  void setObjectZExt(int ObjectIdx, bool IsZExt) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isZExt = IsZExt;
+  }
+
+  bool isObjectSExt(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isSExt;
+  }
+
+  void setObjectSExt(int ObjectIdx, bool IsSExt) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isSExt = IsSExt;
+  }
+
+  /// Set the stack frame offset of the specified object. The
+  /// offset is relative to the stack pointer on entry to the function.
+  void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    assert(!isDeadObjectIndex(ObjectIdx) &&
+           "Setting frame offset for a dead object?");
+    Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
+  }
+
+  /// Return the number of bytes that must be allocated to hold
+  /// all of the fixed size frame objects.  This is only valid after
+  /// Prolog/Epilog code insertion has finalized the stack frame layout.
+  uint64_t getStackSize() const { return StackSize; }
+
+  /// Set the size of the stack.
+  void setStackSize(uint64_t Size) { StackSize = Size; }
+
+  /// Estimate and return the size of the stack frame.
+  unsigned estimateStackSize(const MachineFunction &MF) const;
+
+  /// Return the correction for frame offsets.
+  int getOffsetAdjustment() const { return OffsetAdjustment; }
+
+  /// Set the correction for frame offsets.
+  void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
+
+  /// Return the alignment in bytes that this function must be aligned to,
+  /// which is greater than the default stack alignment provided by the target.
+  unsigned getMaxAlignment() const { return MaxAlignment; }
+
+  /// Make sure the function is at least Align bytes aligned.
+  void ensureMaxAlignment(unsigned Align);
+
+  /// Return true if this function adjusts the stack -- e.g.,
+  /// when calling another function. This is only valid during and after
+  /// prolog/epilog code insertion.
+  bool adjustsStack() const { return AdjustsStack; }
+  void setAdjustsStack(bool V) { AdjustsStack = V; }
+
+  /// Return true if the current function has any function calls.
+  bool hasCalls() const { return HasCalls; }
+  void setHasCalls(bool V) { HasCalls = V; }
+
+  /// Returns true if the function contains opaque dynamic stack adjustments.
+  bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; }
+  void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; }
+
+  /// Returns true if the function contains operations which will lower down to
+  /// instructions which manipulate the stack pointer.
+  bool hasCopyImplyingStackAdjustment() const {
+    return HasCopyImplyingStackAdjustment;
+  }
+  void setHasCopyImplyingStackAdjustment(bool B) {
+    HasCopyImplyingStackAdjustment = B;
+  }
+
+  /// Returns true if the function calls the llvm.va_start intrinsic.
+  bool hasVAStart() const { return HasVAStart; }
+  void setHasVAStart(bool B) { HasVAStart = B; }
+
+  /// Returns true if the function is variadic and contains a musttail call.
+  bool hasMustTailInVarArgFunc() const { return HasMustTailInVarArgFunc; }
+  void setHasMustTailInVarArgFunc(bool B) { HasMustTailInVarArgFunc = B; }
+
+  /// Returns true if the function contains a tail call.
+  bool hasTailCall() const { return HasTailCall; }
+  void setHasTailCall() { HasTailCall = true; }
+
+  /// Computes the maximum size of a callframe and the AdjustsStack property.
+  /// This only works for targets defining
+  /// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
+  /// and getFrameSize().
+  /// This is usually computed by the prologue epilogue inserter but some
+  /// targets may call this to compute it earlier.
+  void computeMaxCallFrameSize(const MachineFunction &MF);
+
+  /// Return the maximum size of a call frame that must be
+  /// allocated for an outgoing function call.  This is only available if
+  /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
+  /// then only during or after prolog/epilog code insertion.
+  ///
+  unsigned getMaxCallFrameSize() const {
+    // TODO: Enable this assert when targets are fixed.
+    //assert(isMaxCallFrameSizeComputed() && "MaxCallFrameSize not computed yet");
+    if (!isMaxCallFrameSizeComputed())
+      return 0;
+    return MaxCallFrameSize;
+  }
+  bool isMaxCallFrameSizeComputed() const {
+    return MaxCallFrameSize != ~0u;
+  }
+  void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+
+  /// Create a new object at a fixed location on the stack.
+  /// All fixed objects should be created before other objects are created for
+  /// efficiency. By default, fixed objects are not pointed to by LLVM IR
+  /// values. This returns an index with a negative value.
+  int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable,
+                        bool isAliased = false);
+
+  /// Create a spill slot at a fixed location on the stack.
+  /// Returns an index with a negative value.
+  int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset,
+                                  bool IsImmutable = false);
+
+  /// Returns true if the specified index corresponds to a fixed stack object.
+  bool isFixedObjectIndex(int ObjectIdx) const {
+    return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
+  }
+
+  /// Returns true if the specified index corresponds
+  /// to an object that might be pointed to by an LLVM IR value.
+  bool isAliasedObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isAliased;
+  }
+
+  /// Returns true if the specified index corresponds to an immutable object.
+  bool isImmutableObjectIndex(int ObjectIdx) const {
+    // Tail calling functions can clobber their function arguments.
+    if (HasTailCall)
+      return false;
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isImmutable;
+  }
+
+  /// Marks the immutability of an object.
+  void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isImmutable = IsImmutable;
+  }
+
+  /// Returns true if the specified index corresponds to a spill slot.
+  bool isSpillSlotObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
+  }
+
+  bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot;
+  }
+
+  /// \see StackID
+  uint8_t getStackID(int ObjectIdx) const {
+    return Objects[ObjectIdx+NumFixedObjects].StackID;
+  }
+
+  /// \see StackID
+  void setStackID(int ObjectIdx, uint8_t ID) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].StackID = ID;
+  }
+
+  /// Returns true if the specified index corresponds to a dead object.
+  bool isDeadObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
+  }
+
+  /// Returns true if the specified index corresponds to a variable sized
+  /// object.
+  bool isVariableSizedObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx + NumFixedObjects].Size == 0;
+  }
+
+  void markAsStatepointSpillSlotObjectIndex(int ObjectIdx) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot = true;
+    assert(isStatepointSpillSlotObjectIndex(ObjectIdx) && "inconsistent");
+  }
+
+  /// Create a new statically sized stack object, returning
+  /// a nonnegative identifier to represent it.
+  int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot,
+                        const AllocaInst *Alloca = nullptr, uint8_t ID = 0);
+
+  /// Create a new statically sized stack object that represents a spill slot,
+  /// returning a nonnegative identifier to represent it.
+  int CreateSpillStackObject(uint64_t Size, unsigned Alignment);
+
+  /// Remove or mark dead a statically sized stack object.
+  void RemoveStackObject(int ObjectIdx) {
+    // Mark it dead.
+    Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
+  }
+
+  /// Notify the MachineFrameInfo object that a variable sized object has been
+  /// created.  This must be created whenever a variable sized object is
+  /// created, whether or not the index returned is actually used.
+  int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca);
+
+  /// Returns a reference to call saved info vector for the current function.
+  const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
+    return CSInfo;
+  }
+  /// \copydoc getCalleeSavedInfo()
+  std::vector<CalleeSavedInfo> &getCalleeSavedInfo() { return CSInfo; }
+
+  /// Used by prolog/epilog inserter to set the function's callee saved
+  /// information.
+  void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
+    CSInfo = CSI;
+  }
+
+  /// Has the callee saved info been calculated yet?
+  bool isCalleeSavedInfoValid() const { return CSIValid; }
+
+  void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
+
+  MachineBasicBlock *getSavePoint() const { return Save; }
+  void setSavePoint(MachineBasicBlock *NewSave) { Save = NewSave; }
+  MachineBasicBlock *getRestorePoint() const { return Restore; }
+  void setRestorePoint(MachineBasicBlock *NewRestore) { Restore = NewRestore; }
+
+  /// Return a set of physical registers that are pristine.
+  ///
+  /// Pristine registers hold a value that is useless to the current function,
+  /// but that must be preserved - they are callee saved registers that are not
+  /// saved.
+  ///
+  /// Before the PrologueEpilogueInserter has placed the CSR spill code, this
+  /// method always returns an empty set.
+  BitVector getPristineRegs(const MachineFunction &MF) const;
+
+  /// Used by the MachineFunction printer to print information about
+  /// stack objects. Implemented in MachineFunction.cpp.
+  void print(const MachineFunction &MF, raw_ostream &OS) const;
+
+  /// dump - Print the function to stderr.
+  void dump(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
new file mode 100644
index 0000000..7d8b7eb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
@@ -0,0 +1,948 @@
+//===- llvm/CodeGen/MachineFunction.h ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect native machine code for a function.  This class contains a list of
+// MachineBasicBlock instances that make up the current compiled function.
+//
+// This class also contains pointers to various classes which hold
+// target-specific information about the generated code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
+#define LLVM_CODEGEN_MACHINEFUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Recycler.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class BlockAddress;
+class DataLayout;
+class DIExpression;
+class DILocalVariable;
+class DILocation;
+class Function;
+class GlobalValue;
+class MachineConstantPool;
+class MachineFrameInfo;
+class MachineFunction;
+class MachineJumpTableInfo;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class MCContext;
+class MCInstrDesc;
+class Pass;
+class PseudoSourceValueManager;
+class raw_ostream;
+class SlotIndexes;
+class TargetMachine;
+class TargetRegisterClass;
+class TargetSubtargetInfo;
+struct WinEHFuncInfo;
+
+template <> struct ilist_alloc_traits<MachineBasicBlock> {
+  void deleteNode(MachineBasicBlock *MBB);
+};
+
+template <> struct ilist_callback_traits<MachineBasicBlock> {
+  void addNodeToList(MachineBasicBlock* MBB);
+  void removeNodeFromList(MachineBasicBlock* MBB);
+
+  template <class Iterator>
+  void transferNodesFromList(ilist_callback_traits &OldList, Iterator, Iterator) {
+    llvm_unreachable("Never transfer between lists");
+  }
+};
+
+/// MachineFunctionInfo - This class can be derived from and used by targets to
+/// hold private target-specific information for each MachineFunction.  Objects
+/// of type are accessed/created with MF::getInfo and destroyed when the
+/// MachineFunction is destroyed.
+struct MachineFunctionInfo {
+  virtual ~MachineFunctionInfo();
+
+  /// \brief Factory function: default behavior is to call new using the
+  /// supplied allocator.
+  ///
+  /// This function can be overridden in a derive class.
+  template<typename Ty>
+  static Ty *create(BumpPtrAllocator &Allocator, MachineFunction &MF) {
+    return new (Allocator.Allocate<Ty>()) Ty(MF);
+  }
+};
+
+/// Properties which a MachineFunction may have at a given point in time.
+/// Each of these has checking code in the MachineVerifier, and passes can
+/// require that a property be set.
+class MachineFunctionProperties {
+  // Possible TODO: Allow targets to extend this (perhaps by allowing the
+  // constructor to specify the size of the bit vector)
+  // Possible TODO: Allow requiring the negative (e.g. VRegsAllocated could be
+  // stated as the negative of "has vregs"
+
+public:
+  // The properties are stated in "positive" form; i.e. a pass could require
+  // that the property hold, but not that it does not hold.
+
+  // Property descriptions:
+  // IsSSA: True when the machine function is in SSA form and virtual registers
+  //  have a single def.
+  // NoPHIs: The machine function does not contain any PHI instruction.
+  // TracksLiveness: True when tracking register liveness accurately.
+  //  While this property is set, register liveness information in basic block
+  //  live-in lists and machine instruction operands (e.g. kill flags, implicit
+  //  defs) is accurate. This means it can be used to change the code in ways
+  //  that affect the values in registers, for example by the register
+  //  scavenger.
+  //  When this property is clear, liveness is no longer reliable.
+  // NoVRegs: The machine function does not use any virtual registers.
+  // Legalized: In GlobalISel: the MachineLegalizer ran and all pre-isel generic
+  //  instructions have been legalized; i.e., all instructions are now one of:
+  //   - generic and always legal (e.g., COPY)
+  //   - target-specific
+  //   - legal pre-isel generic instructions.
+  // RegBankSelected: In GlobalISel: the RegBankSelect pass ran and all generic
+  //  virtual registers have been assigned to a register bank.
+  // Selected: In GlobalISel: the InstructionSelect pass ran and all pre-isel
+  //  generic instructions have been eliminated; i.e., all instructions are now
+  //  target-specific or non-pre-isel generic instructions (e.g., COPY).
+  //  Since only pre-isel generic instructions can have generic virtual register
+  //  operands, this also means that all generic virtual registers have been
+  //  constrained to virtual registers (assigned to register classes) and that
+  //  all sizes attached to them have been eliminated.
+  enum class Property : unsigned {
+    IsSSA,
+    NoPHIs,
+    TracksLiveness,
+    NoVRegs,
+    FailedISel,
+    Legalized,
+    RegBankSelected,
+    Selected,
+    LastProperty = Selected,
+  };
+
+  bool hasProperty(Property P) const {
+    return Properties[static_cast<unsigned>(P)];
+  }
+
+  MachineFunctionProperties &set(Property P) {
+    Properties.set(static_cast<unsigned>(P));
+    return *this;
+  }
+
+  MachineFunctionProperties &reset(Property P) {
+    Properties.reset(static_cast<unsigned>(P));
+    return *this;
+  }
+
+  /// Reset all the properties.
+  MachineFunctionProperties &reset() {
+    Properties.reset();
+    return *this;
+  }
+
+  MachineFunctionProperties &set(const MachineFunctionProperties &MFP) {
+    Properties |= MFP.Properties;
+    return *this;
+  }
+
+  MachineFunctionProperties &reset(const MachineFunctionProperties &MFP) {
+    Properties.reset(MFP.Properties);
+    return *this;
+  }
+
+  // Returns true if all properties set in V (i.e. required by a pass) are set
+  // in this.
+  bool verifyRequiredProperties(const MachineFunctionProperties &V) const {
+    return !V.Properties.test(Properties);
+  }
+
+  /// Print the MachineFunctionProperties in human-readable form.
+  void print(raw_ostream &OS) const;
+
+private:
+  BitVector Properties =
+      BitVector(static_cast<unsigned>(Property::LastProperty)+1);
+};
+
+struct SEHHandler {
+  /// Filter or finally function. Null indicates a catch-all.
+  const Function *FilterOrFinally;
+
+  /// Address of block to recover at. Null for a finally handler.
+  const BlockAddress *RecoverBA;
+};
+
+/// This structure is used to retain landing pad info for the current function.
+struct LandingPadInfo {
+  MachineBasicBlock *LandingPadBlock;      // Landing pad block.
+  SmallVector<MCSymbol *, 1> BeginLabels;  // Labels prior to invoke.
+  SmallVector<MCSymbol *, 1> EndLabels;    // Labels after invoke.
+  SmallVector<SEHHandler, 1> SEHHandlers;  // SEH handlers active at this lpad.
+  MCSymbol *LandingPadLabel = nullptr;     // Label at beginning of landing pad.
+  std::vector<int> TypeIds;                // List of type ids (filters negative).
+
+  explicit LandingPadInfo(MachineBasicBlock *MBB)
+      : LandingPadBlock(MBB) {}
+};
+
+class MachineFunction {
+  const Function &F;
+  const TargetMachine &Target;
+  const TargetSubtargetInfo *STI;
+  MCContext &Ctx;
+  MachineModuleInfo &MMI;
+
+  // RegInfo - Information about each register in use in the function.
+  MachineRegisterInfo *RegInfo;
+
+  // Used to keep track of target-specific per-machine function information for
+  // the target implementation.
+  MachineFunctionInfo *MFInfo;
+
+  // Keep track of objects allocated on the stack.
+  MachineFrameInfo *FrameInfo;
+
+  // Keep track of constants which are spilled to memory
+  MachineConstantPool *ConstantPool;
+
+  // Keep track of jump tables for switch instructions
+  MachineJumpTableInfo *JumpTableInfo;
+
+  // Keeps track of Windows exception handling related data. This will be null
+  // for functions that aren't using a funclet-based EH personality.
+  WinEHFuncInfo *WinEHInfo = nullptr;
+
+  // Function-level unique numbering for MachineBasicBlocks.  When a
+  // MachineBasicBlock is inserted into a MachineFunction is it automatically
+  // numbered and this vector keeps track of the mapping from ID's to MBB's.
+  std::vector<MachineBasicBlock*> MBBNumbering;
+
+  // Pool-allocate MachineFunction-lifetime and IR objects.
+  BumpPtrAllocator Allocator;
+
+  // Allocation management for instructions in function.
+  Recycler<MachineInstr> InstructionRecycler;
+
+  // Allocation management for operand arrays on instructions.
+  ArrayRecycler<MachineOperand> OperandRecycler;
+
+  // Allocation management for basic blocks in function.
+  Recycler<MachineBasicBlock> BasicBlockRecycler;
+
+  // List of machine basic blocks in function
+  using BasicBlockListType = ilist<MachineBasicBlock>;
+  BasicBlockListType BasicBlocks;
+
+  /// FunctionNumber - This provides a unique ID for each function emitted in
+  /// this translation unit.
+  ///
+  unsigned FunctionNumber;
+
+  /// Alignment - The alignment of the function.
+  unsigned Alignment;
+
+  /// ExposesReturnsTwice - True if the function calls setjmp or related
+  /// functions with attribute "returns twice", but doesn't have
+  /// the attribute itself.
+  /// This is used to limit optimizations which cannot reason
+  /// about the control flow of such functions.
+  bool ExposesReturnsTwice = false;
+
+  /// True if the function includes any inline assembly.
+  bool HasInlineAsm = false;
+
+  /// True if any WinCFI instruction have been emitted in this function.
+  Optional<bool> HasWinCFI;
+
+  /// Current high-level properties of the IR of the function (e.g. is in SSA
+  /// form or whether registers have been allocated)
+  MachineFunctionProperties Properties;
+
+  // Allocation management for pseudo source values.
+  std::unique_ptr<PseudoSourceValueManager> PSVManager;
+
+  /// List of moves done by a function's prolog.  Used to construct frame maps
+  /// by debug and exception handling consumers.
+  std::vector<MCCFIInstruction> FrameInstructions;
+
+  /// \name Exception Handling
+  /// \{
+
+  /// List of LandingPadInfo describing the landing pad information.
+  std::vector<LandingPadInfo> LandingPads;
+
+  /// Map a landing pad's EH symbol to the call site indexes.
+  DenseMap<MCSymbol*, SmallVector<unsigned, 4>> LPadToCallSiteMap;
+
+  /// Map of invoke call site index values to associated begin EH_LABEL.
+  DenseMap<MCSymbol*, unsigned> CallSiteMap;
+
+  /// CodeView label annotations.
+  std::vector<std::pair<MCSymbol *, MDNode *>> CodeViewAnnotations;
+
+  bool CallsEHReturn = false;
+  bool CallsUnwindInit = false;
+  bool HasEHFunclets = false;
+
+  /// List of C++ TypeInfo used.
+  std::vector<const GlobalValue *> TypeInfos;
+
+  /// List of typeids encoding filters used.
+  std::vector<unsigned> FilterIds;
+
+  /// List of the indices in FilterIds corresponding to filter terminators.
+  std::vector<unsigned> FilterEnds;
+
+  EHPersonality PersonalityTypeCache = EHPersonality::Unknown;
+
+  /// \}
+
+  /// Clear all the members of this MachineFunction, but the ones used
+  /// to initialize again the MachineFunction.
+  /// More specifically, this deallocates all the dynamically allocated
+  /// objects and get rid of all the XXXInfo data structure, but keep
+  /// unchanged the references to Fn, Target, MMI, and FunctionNumber.
+  void clear();
+  /// Allocate and initialize the different members.
+  /// In particular, the XXXInfo data structure.
+  /// \pre Fn, Target, MMI, and FunctionNumber are properly set.
+  void init();
+
+public:
+  struct VariableDbgInfo {
+    const DILocalVariable *Var;
+    const DIExpression *Expr;
+    unsigned Slot;
+    const DILocation *Loc;
+
+    VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+                    unsigned Slot, const DILocation *Loc)
+        : Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
+  };
+  using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
+  VariableDbgInfoMapTy VariableDbgInfos;
+
+  MachineFunction(const Function &F, const TargetMachine &TM,
+                  const TargetSubtargetInfo &STI, unsigned FunctionNum,
+                  MachineModuleInfo &MMI);
+  MachineFunction(const MachineFunction &) = delete;
+  MachineFunction &operator=(const MachineFunction &) = delete;
+  ~MachineFunction();
+
+  /// Reset the instance as if it was just created.
+  void reset() {
+    clear();
+    init();
+  }
+
+  MachineModuleInfo &getMMI() const { return MMI; }
+  MCContext &getContext() const { return Ctx; }
+
+  PseudoSourceValueManager &getPSVManager() const { return *PSVManager; }
+
+  /// Return the DataLayout attached to the Module associated to this MF.
+  const DataLayout &getDataLayout() const;
+
+  /// Return the LLVM function that this machine code represents
+  const Function &getFunction() const { return F; }
+
+  /// getName - Return the name of the corresponding LLVM function.
+  StringRef getName() const;
+
+  /// getFunctionNumber - Return a unique ID for the current function.
+  unsigned getFunctionNumber() const { return FunctionNumber; }
+
+  /// getTarget - Return the target machine this machine code is compiled with
+  const TargetMachine &getTarget() const { return Target; }
+
+  /// getSubtarget - Return the subtarget for which this machine code is being
+  /// compiled.
+  const TargetSubtargetInfo &getSubtarget() const { return *STI; }
+  void setSubtarget(const TargetSubtargetInfo *ST) { STI = ST; }
+
+  /// getSubtarget - This method returns a pointer to the specified type of
+  /// TargetSubtargetInfo.  In debug builds, it verifies that the object being
+  /// returned is of the correct type.
+  template<typename STC> const STC &getSubtarget() const {
+    return *static_cast<const STC *>(STI);
+  }
+
+  /// getRegInfo - Return information about the registers currently in use.
+  MachineRegisterInfo &getRegInfo() { return *RegInfo; }
+  const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
+
+  /// getFrameInfo - Return the frame info object for the current function.
+  /// This object contains information about objects allocated on the stack
+  /// frame of the current function in an abstract way.
+  MachineFrameInfo &getFrameInfo() { return *FrameInfo; }
+  const MachineFrameInfo &getFrameInfo() const { return *FrameInfo; }
+
+  /// getJumpTableInfo - Return the jump table info object for the current
+  /// function.  This object contains information about jump tables in the
+  /// current function.  If the current function has no jump tables, this will
+  /// return null.
+  const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
+  MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
+
+  /// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
+  /// does already exist, allocate one.
+  MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);
+
+  /// getConstantPool - Return the constant pool object for the current
+  /// function.
+  MachineConstantPool *getConstantPool() { return ConstantPool; }
+  const MachineConstantPool *getConstantPool() const { return ConstantPool; }
+
+  /// getWinEHFuncInfo - Return information about how the current function uses
+  /// Windows exception handling. Returns null for functions that don't use
+  /// funclets for exception handling.
+  const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
+  WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
+
+  /// getAlignment - Return the alignment (log2, not bytes) of the function.
+  unsigned getAlignment() const { return Alignment; }
+
+  /// setAlignment - Set the alignment (log2, not bytes) of the function.
+  void setAlignment(unsigned A) { Alignment = A; }
+
+  /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
+  void ensureAlignment(unsigned A) {
+    if (Alignment < A) Alignment = A;
+  }
+
+  /// exposesReturnsTwice - Returns true if the function calls setjmp or
+  /// any other similar functions with attribute "returns twice" without
+  /// having the attribute itself.
+  bool exposesReturnsTwice() const {
+    return ExposesReturnsTwice;
+  }
+
+  /// setCallsSetJmp - Set a flag that indicates if there's a call to
+  /// a "returns twice" function.
+  void setExposesReturnsTwice(bool B) {
+    ExposesReturnsTwice = B;
+  }
+
+  /// Returns true if the function contains any inline assembly.
+  bool hasInlineAsm() const {
+    return HasInlineAsm;
+  }
+
+  /// Set a flag that indicates that the function contains inline assembly.
+  void setHasInlineAsm(bool B) {
+    HasInlineAsm = B;
+  }
+
+  bool hasWinCFI() const {
+    assert(HasWinCFI.hasValue() && "HasWinCFI not set yet!");
+    return *HasWinCFI;
+  }
+  void setHasWinCFI(bool v) { HasWinCFI = v; }
+
+  /// Get the function properties
+  const MachineFunctionProperties &getProperties() const { return Properties; }
+  MachineFunctionProperties &getProperties() { return Properties; }
+
+  /// getInfo - Keep track of various per-function pieces of information for
+  /// backends that would like to do so.
+  ///
+  template<typename Ty>
+  Ty *getInfo() {
+    if (!MFInfo)
+      MFInfo = Ty::template create<Ty>(Allocator, *this);
+    return static_cast<Ty*>(MFInfo);
+  }
+
+  template<typename Ty>
+  const Ty *getInfo() const {
+     return const_cast<MachineFunction*>(this)->getInfo<Ty>();
+  }
+
+  /// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
+  /// are inserted into the machine function.  The block number for a machine
+  /// basic block can be found by using the MBB::getNumber method, this method
+  /// provides the inverse mapping.
+  MachineBasicBlock *getBlockNumbered(unsigned N) const {
+    assert(N < MBBNumbering.size() && "Illegal block number");
+    assert(MBBNumbering[N] && "Block was removed from the machine function!");
+    return MBBNumbering[N];
+  }
+
+  /// Should we be emitting segmented stack stuff for the function
+  bool shouldSplitStack() const;
+
+  /// getNumBlockIDs - Return the number of MBB ID's allocated.
+  unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
+
+  /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
+  /// recomputes them.  This guarantees that the MBB numbers are sequential,
+  /// dense, and match the ordering of the blocks within the function.  If a
+  /// specific MachineBasicBlock is specified, only that block and those after
+  /// it are renumbered.
+  void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
+
+  /// print - Print out the MachineFunction in a format suitable for debugging
+  /// to the specified stream.
+  void print(raw_ostream &OS, const SlotIndexes* = nullptr) const;
+
+  /// viewCFG - This function is meant for use from the debugger.  You can just
+  /// say 'call F->viewCFG()' and a ghostview window should pop up from the
+  /// program, displaying the CFG of the current function with the code for each
+  /// basic block inside.  This depends on there being a 'dot' and 'gv' program
+  /// in your path.
+  void viewCFG() const;
+
+  /// viewCFGOnly - This function is meant for use from the debugger.  It works
+  /// just like viewCFG, but it does not include the contents of basic blocks
+  /// into the nodes, just the label.  If you are only interested in the CFG
+  /// this can make the graph smaller.
+  ///
+  void viewCFGOnly() const;
+
+  /// dump - Print the current MachineFunction to cerr, useful for debugger use.
+  void dump() const;
+
+  /// Run the current MachineFunction through the machine code verifier, useful
+  /// for debugger use.
+  /// \returns true if no problems were found.
+  bool verify(Pass *p = nullptr, const char *Banner = nullptr,
+              bool AbortOnError = true) const;
+
+  // Provide accessors for the MachineBasicBlock list...
+  using iterator = BasicBlockListType::iterator;
+  using const_iterator = BasicBlockListType::const_iterator;
+  using const_reverse_iterator = BasicBlockListType::const_reverse_iterator;
+  using reverse_iterator = BasicBlockListType::reverse_iterator;
+
+  /// Support for MachineBasicBlock::getNextNode().
+  static BasicBlockListType MachineFunction::*
+  getSublistAccess(MachineBasicBlock *) {
+    return &MachineFunction::BasicBlocks;
+  }
+
+  /// addLiveIn - Add the specified physical register as a live-in value and
+  /// create a corresponding virtual register for it.
+  unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
+
+  //===--------------------------------------------------------------------===//
+  // BasicBlock accessor functions.
+  //
+  iterator                 begin()       { return BasicBlocks.begin(); }
+  const_iterator           begin() const { return BasicBlocks.begin(); }
+  iterator                 end  ()       { return BasicBlocks.end();   }
+  const_iterator           end  () const { return BasicBlocks.end();   }
+
+  reverse_iterator        rbegin()       { return BasicBlocks.rbegin(); }
+  const_reverse_iterator  rbegin() const { return BasicBlocks.rbegin(); }
+  reverse_iterator        rend  ()       { return BasicBlocks.rend();   }
+  const_reverse_iterator  rend  () const { return BasicBlocks.rend();   }
+
+  unsigned                  size() const { return (unsigned)BasicBlocks.size();}
+  bool                     empty() const { return BasicBlocks.empty(); }
+  const MachineBasicBlock &front() const { return BasicBlocks.front(); }
+        MachineBasicBlock &front()       { return BasicBlocks.front(); }
+  const MachineBasicBlock & back() const { return BasicBlocks.back(); }
+        MachineBasicBlock & back()       { return BasicBlocks.back(); }
+
+  void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
+  void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
+  void insert(iterator MBBI, MachineBasicBlock *MBB) {
+    BasicBlocks.insert(MBBI, MBB);
+  }
+  void splice(iterator InsertPt, iterator MBBI) {
+    BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
+  }
+  void splice(iterator InsertPt, MachineBasicBlock *MBB) {
+    BasicBlocks.splice(InsertPt, BasicBlocks, MBB);
+  }
+  void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
+    BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
+  }
+
+  void remove(iterator MBBI) { BasicBlocks.remove(MBBI); }
+  void remove(MachineBasicBlock *MBBI) { BasicBlocks.remove(MBBI); }
+  void erase(iterator MBBI) { BasicBlocks.erase(MBBI); }
+  void erase(MachineBasicBlock *MBBI) { BasicBlocks.erase(MBBI); }
+
+  template <typename Comp>
+  void sort(Comp comp) {
+    BasicBlocks.sort(comp);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Internal functions used to automatically number MachineBasicBlocks
+
+  /// \brief Adds the MBB to the internal numbering. Returns the unique number
+  /// assigned to the MBB.
+  unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
+    MBBNumbering.push_back(MBB);
+    return (unsigned)MBBNumbering.size()-1;
+  }
+
+  /// removeFromMBBNumbering - Remove the specific machine basic block from our
+  /// tracker, this is only really to be used by the MachineBasicBlock
+  /// implementation.
+  void removeFromMBBNumbering(unsigned N) {
+    assert(N < MBBNumbering.size() && "Illegal basic block #");
+    MBBNumbering[N] = nullptr;
+  }
+
+  /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
+  /// of `new MachineInstr'.
+  MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL,
+                                   bool NoImp = false);
+
+  /// Create a new MachineInstr which is a copy of \p Orig, identical in all
+  /// ways except the instruction has no parent, prev, or next. Bundling flags
+  /// are reset.
+  ///
+  /// Note: Clones a single instruction, not whole instruction bundles.
+  /// Does not perform target specific adjustments; consider using
+  /// TargetInstrInfo::duplicate() instead.
+  MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
+
+  /// Clones instruction or the whole instruction bundle \p Orig and insert
+  /// into \p MBB before \p InsertBefore.
+  ///
+  /// Note: Does not perform target specific adjustments; consider using
+  /// TargetInstrInfo::duplicate() intead.
+  MachineInstr &CloneMachineInstrBundle(MachineBasicBlock &MBB,
+      MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig);
+
+  /// DeleteMachineInstr - Delete the given MachineInstr.
+  void DeleteMachineInstr(MachineInstr *MI);
+
+  /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
+  /// instead of `new MachineBasicBlock'.
+  MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
+
+  /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
+  void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
+
+  /// getMachineMemOperand - Allocate a new MachineMemOperand.
+  /// MachineMemOperands are owned by the MachineFunction and need not be
+  /// explicitly deallocated.
+  MachineMemOperand *getMachineMemOperand(
+      MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
+      unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
+      const MDNode *Ranges = nullptr,
+      SyncScope::ID SSID = SyncScope::System,
+      AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+      AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
+
+  /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
+  /// an existing one, adjusting by an offset and using the given size.
+  /// MachineMemOperands are owned by the MachineFunction and need not be
+  /// explicitly deallocated.
+  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
+                                          int64_t Offset, uint64_t Size);
+
+  /// Allocate a new MachineMemOperand by copying an existing one,
+  /// replacing only AliasAnalysis information. MachineMemOperands are owned
+  /// by the MachineFunction and need not be explicitly deallocated.
+  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
+                                          const AAMDNodes &AAInfo);
+
+  using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
+
+  /// Allocate an array of MachineOperands. This is only intended for use by
+  /// internal MachineInstr functions.
+  MachineOperand *allocateOperandArray(OperandCapacity Cap) {
+    return OperandRecycler.allocate(Cap, Allocator);
+  }
+
+  /// Dellocate an array of MachineOperands and recycle the memory. This is
+  /// only intended for use by internal MachineInstr functions.
+  /// Cap must be the same capacity that was used to allocate the array.
+  void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
+    OperandRecycler.deallocate(Cap, Array);
+  }
+
+  /// \brief Allocate and initialize a register mask with @p NumRegister bits.
+  uint32_t *allocateRegisterMask(unsigned NumRegister) {
+    unsigned Size = (NumRegister + 31) / 32;
+    uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
+    for (unsigned i = 0; i != Size; ++i)
+      Mask[i] = 0;
+    return Mask;
+  }
+
+  /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
+  /// pointers.  This array is owned by the MachineFunction.
+  MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
+
+  /// extractLoadMemRefs - Allocate an array and populate it with just the
+  /// load information from the given MachineMemOperand sequence.
+  std::pair<MachineInstr::mmo_iterator,
+            MachineInstr::mmo_iterator>
+    extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
+                       MachineInstr::mmo_iterator End);
+
+  /// extractStoreMemRefs - Allocate an array and populate it with just the
+  /// store information from the given MachineMemOperand sequence.
+  std::pair<MachineInstr::mmo_iterator,
+            MachineInstr::mmo_iterator>
+    extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
+                        MachineInstr::mmo_iterator End);
+
+  /// Allocate a string and populate it with the given external symbol name.
+  const char *createExternalSymbolName(StringRef Name);
+
+  //===--------------------------------------------------------------------===//
+  // Label Manipulation.
+
+  /// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
+  /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
+  /// normal 'L' label is returned.
+  MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
+                         bool isLinkerPrivate = false) const;
+
+  /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
+  /// base.
+  MCSymbol *getPICBaseSymbol() const;
+
+  /// Returns a reference to a list of cfi instructions in the function's
+  /// prologue.  Used to construct frame maps for debug and exception handling
+  /// comsumers.
+  const std::vector<MCCFIInstruction> &getFrameInstructions() const {
+    return FrameInstructions;
+  }
+
+  LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst) {
+    FrameInstructions.push_back(Inst);
+    return FrameInstructions.size() - 1;
+  }
+
+  /// \name Exception Handling
+  /// \{
+
+  bool callsEHReturn() const { return CallsEHReturn; }
+  void setCallsEHReturn(bool b) { CallsEHReturn = b; }
+
+  bool callsUnwindInit() const { return CallsUnwindInit; }
+  void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
+
+  bool hasEHFunclets() const { return HasEHFunclets; }
+  void setHasEHFunclets(bool V) { HasEHFunclets = V; }
+
+  /// Find or create an LandingPadInfo for the specified MachineBasicBlock.
+  LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
+
+  /// Remap landing pad labels and remove any deleted landing pads.
+  void tidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
+
+  /// Return a reference to the landing pad info for the current function.
+  const std::vector<LandingPadInfo> &getLandingPads() const {
+    return LandingPads;
+  }
+
+  /// Provide the begin and end labels of an invoke style call and associate it
+  /// with a try landing pad block.
+  void addInvoke(MachineBasicBlock *LandingPad,
+                 MCSymbol *BeginLabel, MCSymbol *EndLabel);
+
+  /// Add a new panding pad.  Returns the label ID for the landing pad entry.
+  MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
+
+  /// Provide the catch typeinfo for a landing pad.
+  void addCatchTypeInfo(MachineBasicBlock *LandingPad,
+                        ArrayRef<const GlobalValue *> TyInfo);
+
+  /// Provide the filter typeinfo for a landing pad.
+  void addFilterTypeInfo(MachineBasicBlock *LandingPad,
+                         ArrayRef<const GlobalValue *> TyInfo);
+
+  /// Add a cleanup action for a landing pad.
+  void addCleanup(MachineBasicBlock *LandingPad);
+
+  void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
+                          const BlockAddress *RecoverLabel);
+
+  void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
+                            const Function *Cleanup);
+
+  /// Return the type id for the specified typeinfo.  This is function wide.
+  unsigned getTypeIDFor(const GlobalValue *TI);
+
+  /// Return the id of the filter encoded by TyIds.  This is function wide.
+  int getFilterIDFor(std::vector<unsigned> &TyIds);
+
+  /// Map the landing pad's EH symbol to the call site indexes.
+  void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
+
+  /// Get the call site indexes for a landing pad EH symbol.
+  SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
+    assert(hasCallSiteLandingPad(Sym) &&
+           "missing call site number for landing pad!");
+    return LPadToCallSiteMap[Sym];
+  }
+
+  /// Return true if the landing pad Eh symbol has an associated call site.
+  bool hasCallSiteLandingPad(MCSymbol *Sym) {
+    return !LPadToCallSiteMap[Sym].empty();
+  }
+
+  /// Map the begin label for a call site.
+  void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
+    CallSiteMap[BeginLabel] = Site;
+  }
+
+  /// Get the call site number for a begin label.
+  unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) const {
+    assert(hasCallSiteBeginLabel(BeginLabel) &&
+           "Missing call site number for EH_LABEL!");
+    return CallSiteMap.lookup(BeginLabel);
+  }
+
+  /// Return true if the begin label has a call site number associated with it.
+  bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) const {
+    return CallSiteMap.count(BeginLabel);
+  }
+
+  /// Record annotations associated with a particular label.
+  void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD) {
+    CodeViewAnnotations.push_back({Label, MD});
+  }
+
+  ArrayRef<std::pair<MCSymbol *, MDNode *>> getCodeViewAnnotations() const {
+    return CodeViewAnnotations;
+  }
+
+  /// Return a reference to the C++ typeinfo for the current function.
+  const std::vector<const GlobalValue *> &getTypeInfos() const {
+    return TypeInfos;
+  }
+
+  /// Return a reference to the typeids encoding filters used in the current
+  /// function.
+  const std::vector<unsigned> &getFilterIds() const {
+    return FilterIds;
+  }
+
+  /// \}
+
+  /// Collect information used to emit debugging information of a variable.
+  void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+                          unsigned Slot, const DILocation *Loc) {
+    VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
+  }
+
+  VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
+  const VariableDbgInfoMapTy &getVariableDbgInfo() const {
+    return VariableDbgInfos;
+  }
+};
+
+/// \name Exception Handling
+/// \{
+
+/// Extract the exception handling information from the landingpad instruction
+/// and add them to the specified machine module info.
+void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB);
+
+/// \}
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// machine function as a graph of machine basic blocks... these are
+// the same as the machine basic block iterators, except that the root
+// node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<MachineFunction*> :
+  public GraphTraits<MachineBasicBlock*> {
+  static NodeRef getEntryNode(MachineFunction *F) { return &F->front(); }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator = pointer_iterator<MachineFunction::iterator>;
+
+  static nodes_iterator nodes_begin(MachineFunction *F) {
+    return nodes_iterator(F->begin());
+  }
+
+  static nodes_iterator nodes_end(MachineFunction *F) {
+    return nodes_iterator(F->end());
+  }
+
+  static unsigned       size       (MachineFunction *F) { return F->size(); }
+};
+template <> struct GraphTraits<const MachineFunction*> :
+  public GraphTraits<const MachineBasicBlock*> {
+  static NodeRef getEntryNode(const MachineFunction *F) { return &F->front(); }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator = pointer_iterator<MachineFunction::const_iterator>;
+
+  static nodes_iterator nodes_begin(const MachineFunction *F) {
+    return nodes_iterator(F->begin());
+  }
+
+  static nodes_iterator nodes_end  (const MachineFunction *F) {
+    return nodes_iterator(F->end());
+  }
+
+  static unsigned       size       (const MachineFunction *F)  {
+    return F->size();
+  }
+};
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order.  Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineFunction*>> :
+  public GraphTraits<Inverse<MachineBasicBlock*>> {
+  static NodeRef getEntryNode(Inverse<MachineFunction *> G) {
+    return &G.Graph->front();
+  }
+};
+template <> struct GraphTraits<Inverse<const MachineFunction*>> :
+  public GraphTraits<Inverse<const MachineBasicBlock*>> {
+  static NodeRef getEntryNode(Inverse<const MachineFunction *> G) {
+    return &G.Graph->front();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEFUNCTION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFunctionPass.h b/linux-x64/clang/include/llvm/CodeGen/MachineFunctionPass.h
new file mode 100644
index 0000000..6d978da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFunctionPass.h
@@ -0,0 +1,81 @@
+//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineFunctionPass class.  MachineFunctionPass's are
+// just FunctionPass's, except they operate on machine code as part of a code
+// generator.  Because they operate on machine code, not the LLVM
+// representation, MachineFunctionPass's are not allowed to modify the LLVM
+// representation.  Due to this limitation, the MachineFunctionPass class takes
+// care of declaring that no LLVM passes are invalidated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
+#define LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// MachineFunctionPass - This class adapts the FunctionPass interface to
+/// allow convenient creation of passes that operate on the MachineFunction
+/// representation. Instead of overriding runOnFunction, subclasses
+/// override runOnMachineFunction.
+class MachineFunctionPass : public FunctionPass {
+public:
+  bool doInitialization(Module&) override {
+    // Cache the properties info at module-init time so we don't have to
+    // construct them for every function.
+    RequiredProperties = getRequiredProperties();
+    SetProperties = getSetProperties();
+    ClearedProperties = getClearedProperties();
+    return false;
+  }
+protected:
+  explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}
+
+  /// runOnMachineFunction - This method must be overloaded to perform the
+  /// desired machine code transformation or analysis.
+  ///
+  virtual bool runOnMachineFunction(MachineFunction &MF) = 0;
+
+  /// getAnalysisUsage - Subclasses that override getAnalysisUsage
+  /// must call this.
+  ///
+  /// For MachineFunctionPasses, calling AU.preservesCFG() indicates that
+  /// the pass does not modify the MachineBasicBlock CFG.
+  ///
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  virtual MachineFunctionProperties getRequiredProperties() const {
+    return MachineFunctionProperties();
+  }
+  virtual MachineFunctionProperties getSetProperties() const {
+    return MachineFunctionProperties();
+  }
+  virtual MachineFunctionProperties getClearedProperties() const {
+    return MachineFunctionProperties();
+  }
+
+private:
+  MachineFunctionProperties RequiredProperties;
+  MachineFunctionProperties SetProperties;
+  MachineFunctionProperties ClearedProperties;
+
+  /// createPrinterPass - Get a machine function printer pass.
+  Pass *createPrinterPass(raw_ostream &O,
+                          const std::string &Banner) const override;
+
+  bool runOnFunction(Function &F) override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
new file mode 100644
index 0000000..ea94be0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
@@ -0,0 +1,1407 @@
+//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineInstr class, which is the
+// basic representation for all target dependent machine instructions used by
+// the back end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTR_H
+#define LLVM_CODEGEN_MACHINEINSTR_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class DIExpression;
+class DILocalVariable;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineMemOperand;
+class MachineRegisterInfo;
+class ModuleSlotTracker;
+class raw_ostream;
+template <typename T> class SmallVectorImpl;
+class SmallBitVector;
+class StringRef;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+//===----------------------------------------------------------------------===//
+/// Representation of each machine instruction.
+///
+/// This class isn't a POD type, but it must have a trivial destructor. When a
+/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
+/// without having their destructor called.
+///
+class MachineInstr
+    : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
+                                    ilist_sentinel_tracking<true>> {
+public:
+  using mmo_iterator = MachineMemOperand **;
+
+  /// Flags to specify different kinds of comments to output in
+  /// assembly code.  These flags carry semantic information not
+  /// otherwise easily derivable from the IR text.
+  ///
+  enum CommentFlag {
+    ReloadReuse = 0x1,    // higher bits are reserved for target dep comments.
+    NoSchedComment = 0x2,
+    TAsmComments = 0x4    // Target Asm comments should start from this value.
+  };
+
+  enum MIFlag {
+    NoFlags      = 0,
+    FrameSetup   = 1 << 0,              // Instruction is used as a part of
+                                        // function frame setup code.
+    FrameDestroy = 1 << 1,              // Instruction is used as a part of
+                                        // function frame destruction code.
+    BundledPred  = 1 << 2,              // Instruction has bundled predecessors.
+    BundledSucc  = 1 << 3               // Instruction has bundled successors.
+  };
+
+private:
+  const MCInstrDesc *MCID;              // Instruction descriptor.
+  MachineBasicBlock *Parent = nullptr;  // Pointer to the owning basic block.
+
+  // Operands are allocated by an ArrayRecycler.
+  MachineOperand *Operands = nullptr;   // Pointer to the first operand.
+  unsigned NumOperands = 0;             // Number of operands on instruction.
+  using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
+  OperandCapacity CapOperands;          // Capacity of the Operands array.
+
+  uint8_t Flags = 0;                    // Various bits of additional
+                                        // information about machine
+                                        // instruction.
+
+  uint8_t AsmPrinterFlags = 0;          // Various bits of information used by
+                                        // the AsmPrinter to emit helpful
+                                        // comments.  This is *not* semantic
+                                        // information.  Do not use this for
+                                        // anything other than to convey comment
+                                        // information to AsmPrinter.
+
+  uint8_t NumMemRefs = 0;               // Information on memory references.
+  // Note that MemRefs == nullptr,  means 'don't know', not 'no memory access'.
+  // Calling code must treat missing information conservatively.  If the number
+  // of memory operands required to be precise exceeds the maximum value of
+  // NumMemRefs - currently 256 - we remove the operands entirely. Note also
+  // that this is a non-owning reference to a shared copy on write buffer owned
+  // by the MachineFunction and created via MF.allocateMemRefsArray.
+  mmo_iterator MemRefs = nullptr;
+
+  DebugLoc debugLoc;                    // Source line information.
+
+  // Intrusive list support
+  friend struct ilist_traits<MachineInstr>;
+  friend struct ilist_callback_traits<MachineBasicBlock>;
+  void setParent(MachineBasicBlock *P) { Parent = P; }
+
+  /// This constructor creates a copy of the given
+  /// MachineInstr in the given MachineFunction.
+  MachineInstr(MachineFunction &, const MachineInstr &);
+
+  /// This constructor create a MachineInstr and add the implicit operands.
+  /// It reserves space for number of operands specified by
+  /// MCInstrDesc.  An explicit DebugLoc is supplied.
+  MachineInstr(MachineFunction &, const MCInstrDesc &MCID, DebugLoc dl,
+               bool NoImp = false);
+
+  // MachineInstrs are pool-allocated and owned by MachineFunction.
+  friend class MachineFunction;
+
+public:
+  MachineInstr(const MachineInstr &) = delete;
+  MachineInstr &operator=(const MachineInstr &) = delete;
+  // Use MachineFunction::DeleteMachineInstr() instead.
+  ~MachineInstr() = delete;
+
+  const MachineBasicBlock* getParent() const { return Parent; }
+  MachineBasicBlock* getParent() { return Parent; }
+
+  /// Return the function that contains the basic block that this instruction
+  /// belongs to.
+  ///
+  /// Note: this is undefined behaviour if the instruction does not have a
+  /// parent.
+  const MachineFunction *getMF() const;
+  MachineFunction *getMF() {
+    return const_cast<MachineFunction *>(
+        static_cast<const MachineInstr *>(this)->getMF());
+  }
+
+  /// Return the asm printer flags bitvector.
+  uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
+
+  /// Clear the AsmPrinter bitvector.
+  void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
+
+  /// Return whether an AsmPrinter flag is set.
+  bool getAsmPrinterFlag(CommentFlag Flag) const {
+    return AsmPrinterFlags & Flag;
+  }
+
+  /// Set a flag for the AsmPrinter.
+  void setAsmPrinterFlag(uint8_t Flag) {
+    AsmPrinterFlags |= Flag;
+  }
+
+  /// Clear specific AsmPrinter flags.
+  void clearAsmPrinterFlag(CommentFlag Flag) {
+    AsmPrinterFlags &= ~Flag;
+  }
+
+  /// Return the MI flags bitvector.
+  uint8_t getFlags() const {
+    return Flags;
+  }
+
+  /// Return whether an MI flag is set.
+  bool getFlag(MIFlag Flag) const {
+    return Flags & Flag;
+  }
+
+  /// Set a MI flag.
+  void setFlag(MIFlag Flag) {
+    Flags |= (uint8_t)Flag;
+  }
+
+  void setFlags(unsigned flags) {
+    // Filter out the automatically maintained flags.
+    unsigned Mask = BundledPred | BundledSucc;
+    Flags = (Flags & Mask) | (flags & ~Mask);
+  }
+
+  /// clearFlag - Clear a MI flag.
+  void clearFlag(MIFlag Flag) {
+    Flags &= ~((uint8_t)Flag);
+  }
+
+  /// Return true if MI is in a bundle (but not the first MI in a bundle).
+  ///
+  /// A bundle looks like this before it's finalized:
+  ///   ----------------
+  ///   |      MI      |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  /// In this case, the first MI starts a bundle but is not inside a bundle, the
+  /// next 2 MIs are considered "inside" the bundle.
+  ///
+  /// After a bundle is finalized, it looks like this:
+  ///   ----------------
+  ///   |    Bundle    |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  /// The first instruction has the special opcode "BUNDLE". It's not "inside"
+  /// a bundle, but the next three MIs are.
+  bool isInsideBundle() const {
+    return getFlag(BundledPred);
+  }
+
+  /// Return true if this instruction part of a bundle. This is true
+  /// if either itself or its following instruction is marked "InsideBundle".
+  bool isBundled() const {
+    return isBundledWithPred() || isBundledWithSucc();
+  }
+
+  /// Return true if this instruction is part of a bundle, and it is not the
+  /// first instruction in the bundle.
+  bool isBundledWithPred() const { return getFlag(BundledPred); }
+
+  /// Return true if this instruction is part of a bundle, and it is not the
+  /// last instruction in the bundle.
+  bool isBundledWithSucc() const { return getFlag(BundledSucc); }
+
+  /// Bundle this instruction with its predecessor. This can be an unbundled
+  /// instruction, or it can be the first instruction in a bundle.
+  void bundleWithPred();
+
+  /// Bundle this instruction with its successor. This can be an unbundled
+  /// instruction, or it can be the last instruction in a bundle.
+  void bundleWithSucc();
+
+  /// Break bundle above this instruction.
+  void unbundleFromPred();
+
+  /// Break bundle below this instruction.
+  void unbundleFromSucc();
+
+  /// Returns the debug location id of this MachineInstr.
+  const DebugLoc &getDebugLoc() const { return debugLoc; }
+
+  /// Return the debug variable referenced by
+  /// this DBG_VALUE instruction.
+  const DILocalVariable *getDebugVariable() const;
+
+  /// Return the complex address expression referenced by
+  /// this DBG_VALUE instruction.
+  const DIExpression *getDebugExpression() const;
+
+  /// Emit an error referring to the source location of this instruction.
+  /// This should only be used for inline assembly that is somehow
+  /// impossible to compile. Other errors should have been handled much
+  /// earlier.
+  ///
+  /// If this method returns, the caller should try to recover from the error.
+  void emitError(StringRef Msg) const;
+
+  /// Returns the target instruction descriptor of this MachineInstr.
+  const MCInstrDesc &getDesc() const { return *MCID; }
+
+  /// Returns the opcode of this MachineInstr.
+  unsigned getOpcode() const { return MCID->Opcode; }
+
+  /// Access to explicit operands of the instruction.
+  unsigned getNumOperands() const { return NumOperands; }
+
+  const MachineOperand& getOperand(unsigned i) const {
+    assert(i < getNumOperands() && "getOperand() out of range!");
+    return Operands[i];
+  }
+  MachineOperand& getOperand(unsigned i) {
+    assert(i < getNumOperands() && "getOperand() out of range!");
+    return Operands[i];
+  }
+
+  /// Return true if operand \p OpIdx is a subregister index.
+  bool isOperandSubregIdx(unsigned OpIdx) const {
+    assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&
+           "Expected MO_Immediate operand type.");
+    if (isExtractSubreg() && OpIdx == 2)
+      return true;
+    if (isInsertSubreg() && OpIdx == 3)
+      return true;
+    if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
+      return true;
+    if (isSubregToReg() && OpIdx == 3)
+      return true;
+    return false;
+  }
+
+  /// Returns the number of non-implicit operands.
+  unsigned getNumExplicitOperands() const;
+
+  /// iterator/begin/end - Iterate over all operands of a machine instruction.
+  using mop_iterator = MachineOperand *;
+  using const_mop_iterator = const MachineOperand *;
+
+  mop_iterator operands_begin() { return Operands; }
+  mop_iterator operands_end() { return Operands + NumOperands; }
+
+  const_mop_iterator operands_begin() const { return Operands; }
+  const_mop_iterator operands_end() const { return Operands + NumOperands; }
+
+  iterator_range<mop_iterator> operands() {
+    return make_range(operands_begin(), operands_end());
+  }
+  iterator_range<const_mop_iterator> operands() const {
+    return make_range(operands_begin(), operands_end());
+  }
+  iterator_range<mop_iterator> explicit_operands() {
+    return make_range(operands_begin(),
+                      operands_begin() + getNumExplicitOperands());
+  }
+  iterator_range<const_mop_iterator> explicit_operands() const {
+    return make_range(operands_begin(),
+                      operands_begin() + getNumExplicitOperands());
+  }
+  iterator_range<mop_iterator> implicit_operands() {
+    return make_range(explicit_operands().end(), operands_end());
+  }
+  iterator_range<const_mop_iterator> implicit_operands() const {
+    return make_range(explicit_operands().end(), operands_end());
+  }
+  /// Returns a range over all explicit operands that are register definitions.
+  /// Implicit definition are not included!
+  iterator_range<mop_iterator> defs() {
+    return make_range(operands_begin(),
+                      operands_begin() + getDesc().getNumDefs());
+  }
+  /// \copydoc defs()
+  iterator_range<const_mop_iterator> defs() const {
+    return make_range(operands_begin(),
+                      operands_begin() + getDesc().getNumDefs());
+  }
+  /// Returns a range that includes all operands that are register uses.
+  /// This may include unrelated operands which are not register uses.
+  iterator_range<mop_iterator> uses() {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_end());
+  }
+  /// \copydoc uses()
+  iterator_range<const_mop_iterator> uses() const {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_end());
+  }
+  iterator_range<mop_iterator> explicit_uses() {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_begin() + getNumExplicitOperands() );
+  }
+  iterator_range<const_mop_iterator> explicit_uses() const {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_begin() + getNumExplicitOperands() );
+  }
+
+  /// Returns the number of the operand iterator \p I points to.
+  unsigned getOperandNo(const_mop_iterator I) const {
+    return I - operands_begin();
+  }
+
+  /// Access to memory operands of the instruction
+  mmo_iterator memoperands_begin() const { return MemRefs; }
+  mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+  /// Return true if we don't have any memory operands which described the
+  /// memory access done by this instruction.  If this is true, calling code
+  /// must be conservative.
+  bool memoperands_empty() const { return NumMemRefs == 0; }
+
+  iterator_range<mmo_iterator>  memoperands() {
+    return make_range(memoperands_begin(), memoperands_end());
+  }
+  iterator_range<mmo_iterator> memoperands() const {
+    return make_range(memoperands_begin(), memoperands_end());
+  }
+
+  /// Return true if this instruction has exactly one MachineMemOperand.
+  bool hasOneMemOperand() const {
+    return NumMemRefs == 1;
+  }
+
+  /// Return the number of memory operands.
+  unsigned getNumMemOperands() const { return NumMemRefs; }
+
+  /// API for querying MachineInstr properties. They are the same as MCInstrDesc
+  /// queries but they are bundle aware.
+
+  enum QueryType {
+    IgnoreBundle,    // Ignore bundles
+    AnyInBundle,     // Return true if any instruction in bundle has property
+    AllInBundle      // Return true if all instructions in bundle have property
+  };
+
+  /// Return true if the instruction (or in the case of a bundle,
+  /// the instructions inside the bundle) has the specified property.
+  /// The first argument is the property being queried.
+  /// The second argument indicates whether the query should look inside
+  /// instruction bundles.
+  bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
+    // Inline the fast path for unbundled or bundle-internal instructions.
+    if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
+      return getDesc().getFlags() & (1ULL << MCFlag);
+
+    // If this is the first instruction in a bundle, take the slow path.
+    return hasPropertyInBundle(1ULL << MCFlag, Type);
+  }
+
+  /// Return true if this instruction can have a variable number of operands.
+  /// In this case, the variable operands will be after the normal
+  /// operands but before the implicit definitions and uses (if any are
+  /// present).
+  bool isVariadic(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Variadic, Type);
+  }
+
+  /// Set if this instruction has an optional definition, e.g.
+  /// ARM instructions which can set condition code if 's' bit is set.
+  bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::HasOptionalDef, Type);
+  }
+
+  /// Return true if this is a pseudo instruction that doesn't
+  /// correspond to a real machine instruction.
+  bool isPseudo(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Pseudo, Type);
+  }
+
+  bool isReturn(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Return, Type);
+  }
+
+  bool isCall(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Call, Type);
+  }
+
+  /// Returns true if the specified instruction stops control flow
+  /// from executing the instruction immediately following it.  Examples include
+  /// unconditional branches and return instructions.
+  bool isBarrier(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Barrier, Type);
+  }
+
+  /// Returns true if this instruction part of the terminator for a basic block.
+  /// Typically this is things like return and branch instructions.
+  ///
+  /// Various passes use this to insert code into the bottom of a basic block,
+  /// but before control flow occurs.
+  bool isTerminator(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Terminator, Type);
+  }
+
+  /// Returns true if this is a conditional, unconditional, or indirect branch.
+  /// Predicates below can be used to discriminate between
+  /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
+  /// get more information.
+  bool isBranch(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Branch, Type);
+  }
+
+  /// Return true if this is an indirect branch, such as a
+  /// branch through a register.
+  bool isIndirectBranch(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::IndirectBranch, Type);
+  }
+
+  /// Return true if this is a branch which may fall
+  /// through to the next instruction or may transfer control flow to some other
+  /// block.  The TargetInstrInfo::AnalyzeBranch method can be used to get more
+  /// information about this branch.
+  bool isConditionalBranch(QueryType Type = AnyInBundle) const {
+    return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
+  }
+
+  /// Return true if this is a branch which always
+  /// transfers control flow to some other block.  The
+  /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
+  /// about this branch.
+  bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
+    return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
+  }
+
+  /// Return true if this instruction has a predicate operand that
+  /// controls execution.  It may be set to 'always', or may be set to other
+  /// values.   There are various methods in TargetInstrInfo that can be used to
+  /// control and modify the predicate in this instruction.
+  bool isPredicable(QueryType Type = AllInBundle) const {
+    // If it's a bundle than all bundled instructions must be predicable for this
+    // to return true.
+    return hasProperty(MCID::Predicable, Type);
+  }
+
+  /// Return true if this instruction is a comparison.
+  bool isCompare(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Compare, Type);
+  }
+
+  /// Return true if this instruction is a move immediate
+  /// (including conditional moves) instruction.
+  bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::MoveImm, Type);
+  }
+
+  /// Return true if this instruction is a bitcast instruction.
+  bool isBitcast(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Bitcast, Type);
+  }
+
+  /// Return true if this instruction is a select instruction.
+  bool isSelect(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Select, Type);
+  }
+
+  /// Return true if this instruction cannot be safely duplicated.
+  /// For example, if the instruction has a unique labels attached
+  /// to it, duplicating it would cause multiple definition errors.
+  bool isNotDuplicable(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::NotDuplicable, Type);
+  }
+
+  /// Return true if this instruction is convergent.
+  /// Convergent instructions can not be made control-dependent on any
+  /// additional values.
+  bool isConvergent(QueryType Type = AnyInBundle) const {
+    if (isInlineAsm()) {
+      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+      if (ExtraInfo & InlineAsm::Extra_IsConvergent)
+        return true;
+    }
+    return hasProperty(MCID::Convergent, Type);
+  }
+
+  /// Returns true if the specified instruction has a delay slot
+  /// which must be filled by the code generator.
+  bool hasDelaySlot(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::DelaySlot, Type);
+  }
+
+  /// Return true for instructions that can be folded as
+  /// memory operands in other instructions. The most common use for this
+  /// is instructions that are simple loads from memory that don't modify
+  /// the loaded value in any way, but it can also be used for instructions
+  /// that can be expressed as constant-pool loads, such as V_SETALLONES
+  /// on x86, to allow them to be folded when it is beneficial.
+  /// This should only be set on instructions that return a value in their
+  /// only virtual register definition.
+  bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::FoldableAsLoad, Type);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic REG_SEQUENCE instructions.
+  /// E.g., on ARM,
+  /// dX VMOVDRR rY, rZ
+  /// is equivalent to
+  /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
+  /// override accordingly.
+  bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::RegSequence, Type);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic EXTRACT_SUBREG instructions.
+  /// E.g., on ARM,
+  /// rX, rY VMOVRRD dZ
+  /// is equivalent to two EXTRACT_SUBREG:
+  /// rX = EXTRACT_SUBREG dZ, ssub_0
+  /// rY = EXTRACT_SUBREG dZ, ssub_1
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
+  /// override accordingly.
+  bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::ExtractSubreg, Type);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic INSERT_SUBREG instructions.
+  /// E.g., on ARM,
+  /// dX = VSETLNi32 dY, rZ, Imm
+  /// is equivalent to a INSERT_SUBREG:
+  /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
+  /// override accordingly.
+  bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::InsertSubreg, Type);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Side Effect Analysis
+  //===--------------------------------------------------------------------===//
+
+  /// Return true if this instruction could possibly read memory.
+  /// Instructions with this flag set are not necessarily simple load
+  /// instructions, they may load a value and modify it, for example.
+  bool mayLoad(QueryType Type = AnyInBundle) const {
+    if (isInlineAsm()) {
+      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+      if (ExtraInfo & InlineAsm::Extra_MayLoad)
+        return true;
+    }
+    return hasProperty(MCID::MayLoad, Type);
+  }
+
+  /// Return true if this instruction could possibly modify memory.
+  /// Instructions with this flag set are not necessarily simple store
+  /// instructions, they may store a modified value based on their operands, or
+  /// may not actually modify anything, for example.
+  bool mayStore(QueryType Type = AnyInBundle) const {
+    if (isInlineAsm()) {
+      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+      if (ExtraInfo & InlineAsm::Extra_MayStore)
+        return true;
+    }
+    return hasProperty(MCID::MayStore, Type);
+  }
+
+  /// Return true if this instruction could possibly read or modify memory.
+  bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
+    return mayLoad(Type) || mayStore(Type);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Flags that indicate whether an instruction can be modified by a method.
+  //===--------------------------------------------------------------------===//
+
+  /// Return true if this may be a 2- or 3-address
+  /// instruction (of the form "X = op Y, Z, ..."), which produces the same
+  /// result if Y and Z are exchanged.  If this flag is set, then the
+  /// TargetInstrInfo::commuteInstruction method may be used to hack on the
+  /// instruction.
+  ///
+  /// Note that this flag may be set on instructions that are only commutable
+  /// sometimes.  In these cases, the call to commuteInstruction will fail.
+  /// Also note that some instructions require non-trivial modification to
+  /// commute them.
+  bool isCommutable(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Commutable, Type);
+  }
+
+  /// Return true if this is a 2-address instruction
+  /// which can be changed into a 3-address instruction if needed.  Doing this
+  /// transformation can be profitable in the register allocator, because it
+  /// means that the instruction can use a 2-address form if possible, but
+  /// degrade into a less efficient form if the source and dest register cannot
+  /// be assigned to the same register.  For example, this allows the x86
+  /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
+  /// is the same speed as the shift but has bigger code size.
+  ///
+  /// If this returns true, then the target must implement the
+  /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
+  /// is allowed to fail if the transformation isn't valid for this specific
+  /// instruction (e.g. shl reg, 4 on x86).
+  ///
+  bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::ConvertibleTo3Addr, Type);
+  }
+
+  /// Return true if this instruction requires
+  /// custom insertion support when the DAG scheduler is inserting it into a
+  /// machine basic block.  If this is true for the instruction, it basically
+  /// means that it is a pseudo instruction used at SelectionDAG time that is
+  /// expanded out into magic code by the target when MachineInstrs are formed.
+  ///
+  /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
+  /// is used to insert this into the MachineBasicBlock.
+  bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::UsesCustomInserter, Type);
+  }
+
+  /// Return true if this instruction requires *adjustment*
+  /// after instruction selection by calling a target hook. For example, this
+  /// can be used to fill in ARM 's' optional operand depending on whether
+  /// the conditional flag register is used.
+  bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::HasPostISelHook, Type);
+  }
+
+  /// Returns true if this instruction is a candidate for remat.
+  /// This flag is deprecated, please don't use it anymore.  If this
+  /// flag is set, the isReallyTriviallyReMaterializable() method is called to
+  /// verify the instruction is really rematable.
+  bool isRematerializable(QueryType Type = AllInBundle) const {
+    // It's only possible to re-mat a bundle if all bundled instructions are
+    // re-materializable.
+    return hasProperty(MCID::Rematerializable, Type);
+  }
+
+  /// Returns true if this instruction has the same cost (or less) than a move
+  /// instruction. This is useful during certain types of optimizations
+  /// (e.g., remat during two-address conversion or machine licm)
+  /// where we would like to remat or hoist the instruction, but not if it costs
+  /// more than moving the instruction into the appropriate register. Note, we
+  /// are not marking copies from and to the same register class with this flag.
+  bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
+    // Only returns true for a bundle if all bundled instructions are cheap.
+    return hasProperty(MCID::CheapAsAMove, Type);
+  }
+
+  /// Returns true if this instruction source operands
+  /// have special register allocation requirements that are not captured by the
+  /// operand register classes. e.g. ARM::STRD's two source registers must be an
+  /// even / odd pair, ARM::STM registers have to be in ascending order.
+  /// Post-register allocation passes should not attempt to change allocations
+  /// for sources of instructions with this flag.
+  bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
+  }
+
+  /// Returns true if this instruction def operands
+  /// have special register allocation requirements that are not captured by the
+  /// operand register classes. e.g. ARM::LDRD's two def registers must be an
+  /// even / odd pair, ARM::LDM registers have to be in ascending order.
+  /// Post-register allocation passes should not attempt to change allocations
+  /// for definitions of instructions with this flag.
+  bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::ExtraDefRegAllocReq, Type);
+  }
+
+  enum MICheckType {
+    CheckDefs,      // Check all operands for equality
+    CheckKillDead,  // Check all operands including kill / dead markers
+    IgnoreDefs,     // Ignore all definitions
+    IgnoreVRegDefs  // Ignore virtual register definitions
+  };
+
+  /// Return true if this instruction is identical to \p Other.
+  /// Two instructions are identical if they have the same opcode and all their
+  /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
+  /// Note that this means liveness related flags (dead, undef, kill) do not
+  /// affect the notion of identical.
+  bool isIdenticalTo(const MachineInstr &Other,
+                     MICheckType Check = CheckDefs) const;
+
+  /// Unlink 'this' from the containing basic block, and return it without
+  /// deleting it.
+  ///
+  /// This function can not be used on bundled instructions, use
+  /// removeFromBundle() to remove individual instructions from a bundle.
+  MachineInstr *removeFromParent();
+
+  /// Unlink this instruction from its basic block and return it without
+  /// deleting it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle remain bundled.
+  MachineInstr *removeFromBundle();
+
+  /// Unlink 'this' from the containing basic block and delete it.
+  ///
+  /// If this instruction is the header of a bundle, the whole bundle is erased.
+  /// This function can not be used for instructions inside a bundle, use
+  /// eraseFromBundle() to erase individual bundled instructions.
+  void eraseFromParent();
+
+  /// Unlink 'this' from the containing basic block and delete it.
+  ///
+  /// For all definitions mark their uses in DBG_VALUE nodes
+  /// as undefined. Otherwise like eraseFromParent().
+  void eraseFromParentAndMarkDBGValuesForRemoval();
+
+  /// Unlink 'this' form its basic block and delete it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle remain bundled.
+  void eraseFromBundle();
+
+  bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
+  bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
+  bool isAnnotationLabel() const {
+    return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
+  }
+
+  /// Returns true if the MachineInstr represents a label.
+  bool isLabel() const {
+    return isEHLabel() || isGCLabel() || isAnnotationLabel();
+  }
+
+  bool isCFIInstruction() const {
+    return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
+  }
+
+  // True if the instruction represents a position in the function.
+  bool isPosition() const { return isLabel() || isCFIInstruction(); }
+
+  bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
+
+  /// A DBG_VALUE is indirect iff the first operand is a register and
+  /// the second operand is an immediate.
+  bool isIndirectDebugValue() const {
+    return isDebugValue()
+      && getOperand(0).isReg()
+      && getOperand(1).isImm();
+  }
+
+  bool isPHI() const {
+    return getOpcode() == TargetOpcode::PHI ||
+           getOpcode() == TargetOpcode::G_PHI;
+  }
+  bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
+  bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
+  bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
+
+  bool isMSInlineAsm() const {
+    return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
+  }
+
+  bool isStackAligningInlineAsm() const;
+  InlineAsm::AsmDialect getInlineAsmDialect() const;
+
+  bool isInsertSubreg() const {
+    return getOpcode() == TargetOpcode::INSERT_SUBREG;
+  }
+
+  bool isSubregToReg() const {
+    return getOpcode() == TargetOpcode::SUBREG_TO_REG;
+  }
+
+  bool isRegSequence() const {
+    return getOpcode() == TargetOpcode::REG_SEQUENCE;
+  }
+
+  bool isBundle() const {
+    return getOpcode() == TargetOpcode::BUNDLE;
+  }
+
+  bool isCopy() const {
+    return getOpcode() == TargetOpcode::COPY;
+  }
+
+  bool isFullCopy() const {
+    return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
+  }
+
+  bool isExtractSubreg() const {
+    return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
+  }
+
+  /// Return true if the instruction behaves like a copy.
+  /// This does not include native copy instructions.
+  bool isCopyLike() const {
+    return isCopy() || isSubregToReg();
+  }
+
+  /// Return true is the instruction is an identity copy.
+  bool isIdentityCopy() const {
+    return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+      getOperand(0).getSubReg() == getOperand(1).getSubReg();
+  }
+
+  /// Return true if this instruction doesn't produce any output in the form of
+  /// executable instructions.
+  bool isMetaInstruction() const {
+    switch (getOpcode()) {
+    default:
+      return false;
+    case TargetOpcode::IMPLICIT_DEF:
+    case TargetOpcode::KILL:
+    case TargetOpcode::CFI_INSTRUCTION:
+    case TargetOpcode::EH_LABEL:
+    case TargetOpcode::GC_LABEL:
+    case TargetOpcode::DBG_VALUE:
+    case TargetOpcode::LIFETIME_START:
+    case TargetOpcode::LIFETIME_END:
+      return true;
+    }
+  }
+
+  /// Return true if this is a transient instruction that is either very likely
+  /// to be eliminated during register allocation (such as copy-like
+  /// instructions), or if this instruction doesn't have an execution-time cost.
+  bool isTransient() const {
+    switch (getOpcode()) {
+    default:
+      return isMetaInstruction();
+    // Copy-like instructions are usually eliminated during register allocation.
+    case TargetOpcode::PHI:
+    case TargetOpcode::G_PHI:
+    case TargetOpcode::COPY:
+    case TargetOpcode::INSERT_SUBREG:
+    case TargetOpcode::SUBREG_TO_REG:
+    case TargetOpcode::REG_SEQUENCE:
+      return true;
+    }
+  }
+
+  /// Return the number of instructions inside the MI bundle, excluding the
+  /// bundle header.
+  ///
+  /// This is the number of instructions that MachineBasicBlock::iterator
+  /// skips, 0 for unbundled instructions.
+  unsigned getBundleSize() const;
+
+  /// Return true if the MachineInstr reads the specified register.
+  /// If TargetRegisterInfo is passed, then it also checks if there
+  /// is a read of a super-register.
+  /// This does not count partial redefines of virtual registers as reads:
+  ///   %reg1024:6 = OP.
+  bool readsRegister(unsigned Reg,
+                     const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
+  }
+
+  /// Return true if the MachineInstr reads the specified virtual register.
+  /// Take into account that a partial define is a
+  /// read-modify-write operation.
+  bool readsVirtualRegister(unsigned Reg) const {
+    return readsWritesVirtualRegister(Reg).first;
+  }
+
+  /// Return a pair of bools (reads, writes) indicating if this instruction
+  /// reads or writes Reg. This also considers partial defines.
+  /// If Ops is not null, all operand indices for Reg are added.
+  std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
+                                SmallVectorImpl<unsigned> *Ops = nullptr) const;
+
+  /// Return true if the MachineInstr kills the specified register.
+  /// If TargetRegisterInfo is passed, then it also checks if there is
+  /// a kill of a super-register.
+  bool killsRegister(unsigned Reg,
+                     const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
+  }
+
+  /// Return true if the MachineInstr fully defines the specified register.
+  /// If TargetRegisterInfo is passed, then it also checks
+  /// if there is a def of a super-register.
+  /// NOTE: It's ignoring subreg indices on virtual registers.
+  bool definesRegister(unsigned Reg,
+                       const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
+  }
+
+  /// Return true if the MachineInstr modifies (fully define or partially
+  /// define) the specified register.
+  /// NOTE: It's ignoring subreg indices on virtual registers.
+  bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
+    return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
+  }
+
+  /// Returns true if the register is dead in this machine instruction.
+  /// If TargetRegisterInfo is passed, then it also checks
+  /// if there is a dead def of a super-register.
+  bool registerDefIsDead(unsigned Reg,
+                         const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
+  }
+
+  /// Returns true if the MachineInstr has an implicit-use operand of exactly
+  /// the given register (not considering sub/super-registers).
+  bool hasRegisterImplicitUseOperand(unsigned Reg) const;
+
+  /// Returns the operand index that is a use of the specific register or -1
+  /// if it is not found. It further tightens the search criteria to a use
+  /// that kills the register if isKill is true.
+  int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
+                                const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Wrapper for findRegisterUseOperandIdx, it returns
+  /// a pointer to the MachineOperand rather than an index.
+  MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
+                                      const TargetRegisterInfo *TRI = nullptr) {
+    int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
+    return (Idx == -1) ? nullptr : &getOperand(Idx);
+  }
+
+  const MachineOperand *findRegisterUseOperand(
+    unsigned Reg, bool isKill = false,
+    const TargetRegisterInfo *TRI = nullptr) const {
+    return const_cast<MachineInstr *>(this)->
+      findRegisterUseOperand(Reg, isKill, TRI);
+  }
+
+  /// Returns the operand index that is a def of the specified register or
+  /// -1 if it is not found. If isDead is true, defs that are not dead are
+  /// skipped. If Overlap is true, then it also looks for defs that merely
+  /// overlap the specified register. If TargetRegisterInfo is non-null,
+  /// then it also checks if there is a def of a super-register.
+  /// This may also return a register mask operand when Overlap is true.
+  int findRegisterDefOperandIdx(unsigned Reg,
+                                bool isDead = false, bool Overlap = false,
+                                const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Wrapper for findRegisterDefOperandIdx, it returns
+  /// a pointer to the MachineOperand rather than an index.
+  MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
+                                      const TargetRegisterInfo *TRI = nullptr) {
+    int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
+    return (Idx == -1) ? nullptr : &getOperand(Idx);
+  }
+
+  /// Find the index of the first operand in the
+  /// operand list that is used to represent the predicate. It returns -1 if
+  /// none is found.
+  int findFirstPredOperandIdx() const;
+
+  /// Find the index of the flag word operand that
+  /// corresponds to operand OpIdx on an inline asm instruction.  Returns -1 if
+  /// getOperand(OpIdx) does not belong to an inline asm operand group.
+  ///
+  /// If GroupNo is not NULL, it will receive the number of the operand group
+  /// containing OpIdx.
+  ///
+  /// The flag operand is an immediate that can be decoded with methods like
+  /// InlineAsm::hasRegClassConstraint().
+  int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
+
+  /// Compute the static register class constraint for operand OpIdx.
+  /// For normal instructions, this is derived from the MCInstrDesc.
+  /// For inline assembly it is derived from the flag words.
+  ///
+  /// Returns NULL if the static register class constraint cannot be
+  /// determined.
+  const TargetRegisterClass*
+  getRegClassConstraint(unsigned OpIdx,
+                        const TargetInstrInfo *TII,
+                        const TargetRegisterInfo *TRI) const;
+
+  /// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
+  /// the given \p CurRC.
+  /// If \p ExploreBundle is set and MI is part of a bundle, all the
+  /// instructions inside the bundle will be taken into account. In other words,
+  /// this method accumulates all the constraints of the operand of this MI and
+  /// the related bundle if MI is a bundle or inside a bundle.
+  ///
+  /// Returns the register class that satisfies both \p CurRC and the
+  /// constraints set by MI. Returns NULL if such a register class does not
+  /// exist.
+  ///
+  /// \pre CurRC must not be NULL.
+  const TargetRegisterClass *getRegClassConstraintEffectForVReg(
+      unsigned Reg, const TargetRegisterClass *CurRC,
+      const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
+      bool ExploreBundle = false) const;
+
+  /// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
+  /// to the given \p CurRC.
+  ///
+  /// Returns the register class that satisfies both \p CurRC and the
+  /// constraints set by \p OpIdx MI. Returns NULL if such a register class
+  /// does not exist.
+  ///
+  /// \pre CurRC must not be NULL.
+  /// \pre The operand at \p OpIdx must be a register.
+  const TargetRegisterClass *
+  getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
+                              const TargetInstrInfo *TII,
+                              const TargetRegisterInfo *TRI) const;
+
+  /// Add a tie between the register operands at DefIdx and UseIdx.
+  /// The tie will cause the register allocator to ensure that the two
+  /// operands are assigned the same physical register.
+  ///
+  /// Tied operands are managed automatically for explicit operands in the
+  /// MCInstrDesc. This method is for exceptional cases like inline asm.
+  void tieOperands(unsigned DefIdx, unsigned UseIdx);
+
+  /// Given the index of a tied register operand, find the
+  /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
+  /// index of the tied operand which must exist.
+  unsigned findTiedOperandIdx(unsigned OpIdx) const;
+
+  /// Given the index of a register def operand,
+  /// check if the register def is tied to a source operand, due to either
+  /// two-address elimination or inline assembly constraints. Returns the
+  /// first tied use operand index by reference if UseOpIdx is not null.
+  bool isRegTiedToUseOperand(unsigned DefOpIdx,
+                             unsigned *UseOpIdx = nullptr) const {
+    const MachineOperand &MO = getOperand(DefOpIdx);
+    if (!MO.isReg() || !MO.isDef() || !MO.isTied())
+      return false;
+    if (UseOpIdx)
+      *UseOpIdx = findTiedOperandIdx(DefOpIdx);
+    return true;
+  }
+
+  /// Return true if the use operand of the specified index is tied to a def
+  /// operand. It also returns the def operand index by reference if DefOpIdx
+  /// is not null.
+  bool isRegTiedToDefOperand(unsigned UseOpIdx,
+                             unsigned *DefOpIdx = nullptr) const {
+    const MachineOperand &MO = getOperand(UseOpIdx);
+    if (!MO.isReg() || !MO.isUse() || !MO.isTied())
+      return false;
+    if (DefOpIdx)
+      *DefOpIdx = findTiedOperandIdx(UseOpIdx);
+    return true;
+  }
+
+  /// Clears kill flags on all operands.
+  void clearKillInfo();
+
+  /// Replace all occurrences of FromReg with ToReg:SubIdx,
+  /// properly composing subreg indices where necessary.
+  void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
+                          const TargetRegisterInfo &RegInfo);
+
+  /// We have determined MI kills a register. Look for the
+  /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
+  /// add a implicit operand if it's not found. Returns true if the operand
+  /// exists / is added.
+  bool addRegisterKilled(unsigned IncomingReg,
+                         const TargetRegisterInfo *RegInfo,
+                         bool AddIfNotFound = false);
+
+  /// Clear all kill flags affecting Reg.  If RegInfo is provided, this includes
+  /// all aliasing registers.
+  void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
+
+  /// We have determined MI defined a register without a use.
+  /// Look for the operand that defines it and mark it as IsDead. If
+  /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
+  /// true if the operand exists / is added.
+  bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
+                       bool AddIfNotFound = false);
+
+  /// Clear all dead flags on operands defining register @p Reg.
+  void clearRegisterDeads(unsigned Reg);
+
+  /// Mark all subregister defs of register @p Reg with the undef flag.
+  /// This function is used when we determined to have a subregister def in an
+  /// otherwise undefined super register.
+  void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
+
+  /// We have determined MI defines a register. Make sure there is an operand
+  /// defining Reg.
+  void addRegisterDefined(unsigned Reg,
+                          const TargetRegisterInfo *RegInfo = nullptr);
+
+  /// Mark every physreg used by this instruction as
+  /// dead except those in the UsedRegs list.
+  ///
+  /// On instructions with register mask operands, also add implicit-def
+  /// operands for all registers in UsedRegs.
+  void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
+                             const TargetRegisterInfo &TRI);
+
+  /// Return true if it is safe to move this instruction. If
+  /// SawStore is set to true, it means that there is a store (or call) between
+  /// the instruction's location and its intended destination.
+  bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
+
+  /// Returns true if this instruction's memory access aliases the memory
+  /// access of Other.
+  //
+  /// Assumes any physical registers used to compute addresses
+  /// have the same value for both instructions.  Returns false if neither
+  /// instruction writes to memory.
+  ///
+  /// @param AA Optional alias analysis, used to compare memory operands.
+  /// @param Other MachineInstr to check aliasing against.
+  /// @param UseTBAA Whether to pass TBAA information to alias analysis.
+  bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA);
+
+  /// Return true if this instruction may have an ordered
+  /// or volatile memory reference, or if the information describing the memory
+  /// reference is not available. Return false if it is known to have no
+  /// ordered or volatile memory references.
+  bool hasOrderedMemoryRef() const;
+
+  /// Return true if this load instruction never traps and points to a memory
+  /// location whose value doesn't change during the execution of this function.
+  ///
+  /// Examples include loading a value from the constant pool or from the
+  /// argument area of a function (if it does not change).  If the instruction
+  /// does multiple loads, this returns true only if all of the loads are
+  /// dereferenceable and invariant.
+  bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
+
+  /// If the specified instruction is a PHI that always merges together the
+  /// same virtual register, return the register, otherwise return 0.
+  unsigned isConstantValuePHI() const;
+
+  /// Return true if this instruction has side effects that are not modeled
+  /// by mayLoad / mayStore, etc.
+  /// For all instructions, the property is encoded in MCInstrDesc::Flags
+  /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
+  /// INLINEASM instruction, in which case the side effect property is encoded
+  /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
+  ///
+  bool hasUnmodeledSideEffects() const;
+
+  /// Returns true if it is illegal to fold a load across this instruction.
+  bool isLoadFoldBarrier() const;
+
+  /// Return true if all the defs of this instruction are dead.
+  bool allDefsAreDead() const;
+
+  /// Copy implicit register operands from specified
+  /// instruction to this instruction.
+  void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
+
+  /// Debugging support
+  /// @{
+  /// Determine the generic type to be printed (if needed) on uses and defs.
+  LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
+                     const MachineRegisterInfo &MRI) const;
+
+  /// Return true when an instruction has tied register that can't be determined
+  /// by the instruction's descriptor. This is useful for MIR printing, to
+  /// determine whether we need to print the ties or not.
+  bool hasComplexRegisterTies() const;
+
+  /// Print this MI to \p OS.
+  /// Don't print information that can be inferred from other instructions if
+  /// \p IsStandalone is false. It is usually true when only a fragment of the
+  /// function is printed.
+  /// Only print the defs and the opcode if \p SkipOpers is true.
+  /// Otherwise, also print operands if \p SkipDebugLoc is true.
+  /// Otherwise, also print the debug loc, with a terminating newline.
+  /// \p TII is used to print the opcode name.  If it's not present, but the
+  /// MI is in a function, the opcode will be printed using the function's TII.
+  void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
+             bool SkipDebugLoc = false,
+             const TargetInstrInfo *TII = nullptr) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
+             bool SkipOpers = false, bool SkipDebugLoc = false,
+             const TargetInstrInfo *TII = nullptr) const;
+  void dump() const;
+  /// @}
+
+  //===--------------------------------------------------------------------===//
+  // Accessors used to build up machine instructions.
+
+  /// Add the specified operand to the instruction.  If it is an implicit
+  /// operand, it is added to the end of the operand list.  If it is an
+  /// explicit operand it is added at the end of the explicit operand list
+  /// (before the first implicit operand).
+  ///
+  /// MF must be the machine function that was used to allocate this
+  /// instruction.
+  ///
+  /// MachineInstrBuilder provides a more convenient interface for creating
+  /// instructions and adding operands.
+  void addOperand(MachineFunction &MF, const MachineOperand &Op);
+
+  /// Add an operand without providing an MF reference. This only works for
+  /// instructions that are inserted in a basic block.
+  ///
+  /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
+  /// preferred.
+  void addOperand(const MachineOperand &Op);
+
+  /// Replace the instruction descriptor (thus opcode) of
+  /// the current instruction with a new one.
+  void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
+
+  /// Replace current source information with new such.
+  /// Avoid using this, the constructor argument is preferable.
+  void setDebugLoc(DebugLoc dl) {
+    debugLoc = std::move(dl);
+    assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+  }
+
+  /// Erase an operand from an instruction, leaving it with one
+  /// fewer operand than it started with.
+  void RemoveOperand(unsigned i);
+
+  /// Add a MachineMemOperand to the machine instruction.
+  /// This function should be used only occasionally. The setMemRefs function
+  /// is the primary method for setting up a MachineInstr's MemRefs list.
+  void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
+
+  /// Assign this MachineInstr's memory reference descriptor list.
+  /// This does not transfer ownership.
+  void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+    setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
+  }
+
+  /// Assign this MachineInstr's memory reference descriptor list.  First
+  /// element in the pair is the begin iterator/pointer to the array; the
+  /// second is the number of MemoryOperands.  This does not transfer ownership
+  /// of the underlying memory.
+  void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
+    MemRefs = NewMemRefs.first;
+    NumMemRefs = uint8_t(NewMemRefs.second);
+    assert(NumMemRefs == NewMemRefs.second &&
+           "Too many memrefs - must drop memory operands");
+  }
+
+  /// Return a set of memrefs (begin iterator, size) which conservatively
+  /// describe the memory behavior of both MachineInstrs.  This is appropriate
+  /// for use when merging two MachineInstrs into one. This routine does not
+  /// modify the memrefs of the this MachineInstr.
+  std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
+
+  /// Return the MIFlags which represent both MachineInstrs. This
+  /// should be used when merging two MachineInstrs into one. This routine does
+  /// not modify the MIFlags of this MachineInstr.
+  uint8_t mergeFlagsWith(const MachineInstr& Other) const;
+
+  /// Clear this MachineInstr's memory reference descriptor list.  This resets
+  /// the memrefs to their most conservative state.  This should be used only
+  /// as a last resort since it greatly pessimizes our knowledge of the memory
+  /// access performed by the instruction.
+  void dropMemRefs() {
+    MemRefs = nullptr;
+    NumMemRefs = 0;
+  }
+
+  /// Break any tie involving OpIdx.
+  void untieRegOperand(unsigned OpIdx) {
+    MachineOperand &MO = getOperand(OpIdx);
+    if (MO.isReg() && MO.isTied()) {
+      getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
+      MO.TiedTo = 0;
+    }
+  }
+
+  /// Add all implicit def and use operands to this instruction.
+  void addImplicitDefUseOperands(MachineFunction &MF);
+
+private:
+  /// If this instruction is embedded into a MachineFunction, return the
+  /// MachineRegisterInfo object for the current function, otherwise
+  /// return null.
+  MachineRegisterInfo *getRegInfo();
+
+  /// Unlink all of the register operands in this instruction from their
+  /// respective use lists.  This requires that the operands already be on their
+  /// use lists.
+  void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
+
+  /// Add all of the register operands in this instruction from their
+  /// respective use lists.  This requires that the operands not be on their
+  /// use lists yet.
+  void AddRegOperandsToUseLists(MachineRegisterInfo&);
+
+  /// Slow path for hasProperty when we're dealing with a bundle.
+  bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
+
+  /// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
+  /// this MI and the given operand index \p OpIdx.
+  /// If the related operand does not constrained Reg, this returns CurRC.
+  const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
+      unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
+      const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
+};
+
+/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
+/// instruction rather than by pointer value.
+/// The hashing and equality testing functions ignore definitions so this is
+/// useful for CSE, etc.
+struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
+  static inline MachineInstr *getEmptyKey() {
+    return nullptr;
+  }
+
+  static inline MachineInstr *getTombstoneKey() {
+    return reinterpret_cast<MachineInstr*>(-1);
+  }
+
+  static unsigned getHashValue(const MachineInstr* const &MI);
+
+  static bool isEqual(const MachineInstr* const &LHS,
+                      const MachineInstr* const &RHS) {
+    if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
+        LHS == getEmptyKey() || LHS == getTombstoneKey())
+      return LHS == RHS;
+    return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Debugging Support
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
+  MI.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEINSTR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
new file mode 100644
index 0000000..2df89b1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -0,0 +1,560 @@
+//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes a function named BuildMI, which is useful for dramatically
+// simplifying how MachineInstr's are created.  It allows use of code like this:
+//
+//   M = BuildMI(MBB, MI, DL, TII.get(X86::ADD8rr), Dst)
+//           .addReg(argVal1)
+//           .addReg(argVal2);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class MCInstrDesc;
+class MDNode;
+
+namespace RegState {
+
+  enum {
+    Define         = 0x2,
+    Implicit       = 0x4,
+    Kill           = 0x8,
+    Dead           = 0x10,
+    Undef          = 0x20,
+    EarlyClobber   = 0x40,
+    Debug          = 0x80,
+    InternalRead   = 0x100,
+    Renamable      = 0x200,
+    DefineNoRead   = Define | Undef,
+    ImplicitDefine = Implicit | Define,
+    ImplicitKill   = Implicit | Kill
+  };
+
+} // end namespace RegState
+
+class MachineInstrBuilder {
+  MachineFunction *MF = nullptr;
+  MachineInstr *MI = nullptr;
+
+public:
+  MachineInstrBuilder() = default;
+
+  /// Create a MachineInstrBuilder for manipulating an existing instruction.
+  /// F must be the machine function that was used to allocate I.
+  MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
+  MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
+      : MF(&F), MI(&*I) {}
+
+  /// Allow automatic conversion to the machine instruction we are working on.
+  operator MachineInstr*() const { return MI; }
+  MachineInstr *operator->() const { return MI; }
+  operator MachineBasicBlock::iterator() const { return MI; }
+
+  /// If conversion operators fail, use this method to get the MachineInstr
+  /// explicitly.
+  MachineInstr *getInstr() const { return MI; }
+
+  /// Add a new virtual register operand.
+  const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
+                                    unsigned SubReg = 0) const {
+    assert((flags & 0x1) == 0 &&
+           "Passing in 'true' to addReg is forbidden! Use enums instead.");
+    MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
+                                               flags & RegState::Define,
+                                               flags & RegState::Implicit,
+                                               flags & RegState::Kill,
+                                               flags & RegState::Dead,
+                                               flags & RegState::Undef,
+                                               flags & RegState::EarlyClobber,
+                                               SubReg,
+                                               flags & RegState::Debug,
+                                               flags & RegState::InternalRead,
+                                               flags & RegState::Renamable));
+    return *this;
+  }
+
+  /// Add a virtual register definition operand.
+  const MachineInstrBuilder &addDef(unsigned RegNo, unsigned Flags = 0,
+                                    unsigned SubReg = 0) const {
+    return addReg(RegNo, Flags | RegState::Define, SubReg);
+  }
+
+  /// Add a virtual register use operand. It is an error for Flags to contain
+  /// `RegState::Define` when calling this function.
+  const MachineInstrBuilder &addUse(unsigned RegNo, unsigned Flags = 0,
+                                    unsigned SubReg = 0) const {
+    assert(!(Flags & RegState::Define) &&
+           "Misleading addUse defines register, use addReg instead.");
+    return addReg(RegNo, Flags, SubReg);
+  }
+
+  /// Add a new immediate operand.
+  const MachineInstrBuilder &addImm(int64_t Val) const {
+    MI->addOperand(*MF, MachineOperand::CreateImm(Val));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
+    MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
+    MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
+                                    unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addFrameIndex(int Idx) const {
+    MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
+                                                  int Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
+                                                          TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
+                                              int64_t Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addExternalSymbol(const char *FnName,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
+                                             int64_t Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
+    MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
+    MI->addMemOperand(*MF, MMO);
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
+                                        MachineInstr::mmo_iterator e) const {
+    MI->setMemRefs(b, e);
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
+                                        unsigned> MemOperandsRef) const {
+    MI->setMemRefs(MemOperandsRef);
+    return *this;
+  }
+
+  const MachineInstrBuilder &add(const MachineOperand &MO) const {
+    MI->addOperand(*MF, MO);
+    return *this;
+  }
+
+  const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
+    for (const MachineOperand &MO : MOs) {
+      MI->addOperand(*MF, MO);
+    }
+    return *this;
+  }
+
+  const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
+    MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
+    assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())
+                               : true) &&
+           "first MDNode argument of a DBG_VALUE not a variable");
+    return *this;
+  }
+
+  const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
+    MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
+    MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
+    MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addSym(MCSymbol *Sym,
+                                    unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
+    MI->setFlags(Flags);
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
+    MI->setFlag(Flag);
+    return *this;
+  }
+
+  // Add a displacement from an existing MachineOperand with an added offset.
+  const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
+                                     unsigned char TargetFlags = 0) const {
+    // If caller specifies new TargetFlags then use it, otherwise the
+    // default behavior is to copy the target flags from the existing
+    // MachineOperand. This means if the caller wants to clear the
+    // target flags it needs to do so explicitly.
+    if (0 == TargetFlags)
+      TargetFlags = Disp.getTargetFlags();
+
+    switch (Disp.getType()) {
+      default:
+        llvm_unreachable("Unhandled operand type in addDisp()");
+      case MachineOperand::MO_Immediate:
+        return addImm(Disp.getImm() + off);
+      case MachineOperand::MO_ConstantPoolIndex:
+        return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
+                                    TargetFlags);
+      case MachineOperand::MO_GlobalAddress:
+        return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
+                                TargetFlags);
+    }
+  }
+
+  /// Copy all the implicit operands from OtherMI onto this one.
+  const MachineInstrBuilder &
+  copyImplicitOps(const MachineInstr &OtherMI) const {
+    MI->copyImplicitOps(*MF, OtherMI);
+    return *this;
+  }
+
+  bool constrainAllUses(const TargetInstrInfo &TII,
+                        const TargetRegisterInfo &TRI,
+                        const RegisterBankInfo &RBI) const {
+    return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
+  }
+};
+
+/// Builder interface. Specify how to create the initial instruction itself.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
+}
+
+/// This version of the builder sets up the first operand as a
+/// destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID, unsigned DestReg) {
+  return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
+           .addReg(DestReg, RegState::Define);
+}
+
+/// This version of the builder inserts the newly-built instruction before
+/// the given position in the given MachineBasicBlock, and sets up the first
+/// operand as a destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::iterator I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
+}
+
+/// This version of the builder inserts the newly-built instruction before
+/// the given position in the given MachineBasicBlock, and sets up the first
+/// operand as a destination virtual register.
+///
+/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
+/// added to the same bundle.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::instr_iterator I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  // Calling the overload for instr_iterator is always correct.  However, the
+  // definition is not available in headers, so inline the check.
+  if (I.isInsideBundle())
+    return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID, DestReg);
+  return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID, DestReg);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  return BuildMI(BB, *I, DL, MCID, DestReg);
+}
+
+/// This version of the builder inserts the newly-built instruction before the
+/// given position in the given MachineBasicBlock, and does NOT take a
+/// destination register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::iterator I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::instr_iterator I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  // Calling the overload for instr_iterator is always correct.  However, the
+  // definition is not available in headers, so inline the check.
+  if (I.isInsideBundle())
+    return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID);
+  return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  return BuildMI(BB, *I, DL, MCID);
+}
+
+/// This version of the builder inserts the newly-built instruction at the end
+/// of the given MachineBasicBlock, and does NOT take a destination register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  return BuildMI(*BB, BB->end(), DL, MCID);
+}
+
+/// This version of the builder inserts the newly-built instruction at the
+/// end of the given MachineBasicBlock, and sets up the first operand as a
+/// destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID, unsigned DestReg) {
+  return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
+}
+
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for either a value in a register or a register-indirect
+/// address.  The convention is that a DBG_VALUE is indirect iff the
+/// second operand is an immediate.
+MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+                            const MCInstrDesc &MCID, bool IsIndirect,
+                            unsigned Reg, const MDNode *Variable,
+                            const MDNode *Expr);
+
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for either a value in a register or a register-indirect
+/// address and inserts it at position I.
+MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                            MachineBasicBlock::iterator I, const DebugLoc &DL,
+                            const MCInstrDesc &MCID, bool IsIndirect,
+                            unsigned Reg, const MDNode *Variable,
+                            const MDNode *Expr);
+
+/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
+MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
+                                    MachineBasicBlock::iterator I,
+                                    const MachineInstr &Orig, int FrameIndex);
+
+/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
+/// modifying an instruction in place while iterating over a basic block.
+void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex);
+
+inline unsigned getDefRegState(bool B) {
+  return B ? RegState::Define : 0;
+}
+inline unsigned getImplRegState(bool B) {
+  return B ? RegState::Implicit : 0;
+}
+inline unsigned getKillRegState(bool B) {
+  return B ? RegState::Kill : 0;
+}
+inline unsigned getDeadRegState(bool B) {
+  return B ? RegState::Dead : 0;
+}
+inline unsigned getUndefRegState(bool B) {
+  return B ? RegState::Undef : 0;
+}
+inline unsigned getInternalReadRegState(bool B) {
+  return B ? RegState::InternalRead : 0;
+}
+inline unsigned getDebugRegState(bool B) {
+  return B ? RegState::Debug : 0;
+}
+inline unsigned getRenamableRegState(bool B) {
+  return B ? RegState::Renamable : 0;
+}
+
+/// Get all register state flags from machine operand \p RegOp.
+inline unsigned getRegState(const MachineOperand &RegOp) {
+  assert(RegOp.isReg() && "Not a register operand");
+  return getDefRegState(RegOp.isDef())                    |
+         getImplRegState(RegOp.isImplicit())              |
+         getKillRegState(RegOp.isKill())                  |
+         getDeadRegState(RegOp.isDead())                  |
+         getUndefRegState(RegOp.isUndef())                |
+         getInternalReadRegState(RegOp.isInternalRead())  |
+         getDebugRegState(RegOp.isDebug())                |
+         getRenamableRegState(
+             TargetRegisterInfo::isPhysicalRegister(RegOp.getReg()) &&
+             RegOp.isRenamable());
+}
+
+/// Helper class for constructing bundles of MachineInstrs.
+///
+/// MIBundleBuilder can create a bundle from scratch by inserting new
+/// MachineInstrs one at a time, or it can create a bundle from a sequence of
+/// existing MachineInstrs in a basic block.
+class MIBundleBuilder {
+  MachineBasicBlock &MBB;
+  MachineBasicBlock::instr_iterator Begin;
+  MachineBasicBlock::instr_iterator End;
+
+public:
+  /// Create an MIBundleBuilder that inserts instructions into a new bundle in
+  /// BB above the bundle or instruction at Pos.
+  MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
+      : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
+
+  /// Create a bundle from the sequence of instructions between B and E.
+  MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
+                  MachineBasicBlock::iterator E)
+      : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
+    assert(B != E && "No instructions to bundle");
+    ++B;
+    while (B != E) {
+      MachineInstr &MI = *B;
+      ++B;
+      MI.bundleWithPred();
+    }
+  }
+
+  /// Create an MIBundleBuilder representing an existing instruction or bundle
+  /// that has MI as its head.
+  explicit MIBundleBuilder(MachineInstr *MI)
+      : MBB(*MI->getParent()), Begin(MI),
+        End(getBundleEnd(MI->getIterator())) {}
+
+  /// Return a reference to the basic block containing this bundle.
+  MachineBasicBlock &getMBB() const { return MBB; }
+
+  /// Return true if no instructions have been inserted in this bundle yet.
+  /// Empty bundles aren't representable in a MachineBasicBlock.
+  bool empty() const { return Begin == End; }
+
+  /// Return an iterator to the first bundled instruction.
+  MachineBasicBlock::instr_iterator begin() const { return Begin; }
+
+  /// Return an iterator beyond the last bundled instruction.
+  MachineBasicBlock::instr_iterator end() const { return End; }
+
+  /// Insert MI into this bundle before I which must point to an instruction in
+  /// the bundle, or end().
+  MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
+                          MachineInstr *MI) {
+    MBB.insert(I, MI);
+    if (I == Begin) {
+      if (!empty())
+        MI->bundleWithSucc();
+      Begin = MI->getIterator();
+      return *this;
+    }
+    if (I == End) {
+      MI->bundleWithPred();
+      return *this;
+    }
+    // MI was inserted in the middle of the bundle, so its neighbors' flags are
+    // already fine. Update MI's bundle flags manually.
+    MI->setFlag(MachineInstr::BundledPred);
+    MI->setFlag(MachineInstr::BundledSucc);
+    return *this;
+  }
+
+  /// Insert MI into MBB by prepending it to the instructions in the bundle.
+  /// MI will become the first instruction in the bundle.
+  MIBundleBuilder &prepend(MachineInstr *MI) {
+    return insert(begin(), MI);
+  }
+
+  /// Insert MI into MBB by appending it to the instructions in the bundle.
+  /// MI will become the last instruction in the bundle.
+  MIBundleBuilder &append(MachineInstr *MI) {
+    return insert(end(), MI);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundle.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundle.h
new file mode 100644
index 0000000..b5341fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundle.h
@@ -0,0 +1,261 @@
+//===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provide utility functions to manipulate machine instruction
+// bundles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+namespace llvm {
+
+/// finalizeBundle - Finalize a machine instruction bundle which includes
+/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
+/// This routine adds a BUNDLE instruction to represent the bundle, it adds
+/// IsInternalRead markers to MachineOperands which are defined inside the
+/// bundle, and it copies externally visible defs and uses to the BUNDLE
+/// instruction.
+void finalizeBundle(MachineBasicBlock &MBB,
+                    MachineBasicBlock::instr_iterator FirstMI,
+                    MachineBasicBlock::instr_iterator LastMI);
+
+/// finalizeBundle - Same functionality as the previous finalizeBundle except
+/// the last instruction in the bundle is not provided as an input. This is
+/// used in cases where bundles are pre-determined by marking instructions
+/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
+/// points to the end of the bundle.
+MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
+                    MachineBasicBlock::instr_iterator FirstMI);
+
+/// finalizeBundles - Finalize instruction bundles in the specified
+/// MachineFunction. Return true if any bundles are finalized.
+bool finalizeBundles(MachineFunction &MF);
+
+/// Returns an iterator to the first instruction in the bundle containing \p I.
+inline MachineBasicBlock::instr_iterator getBundleStart(
+    MachineBasicBlock::instr_iterator I) {
+  while (I->isBundledWithPred())
+    --I;
+  return I;
+}
+
+/// Returns an iterator to the first instruction in the bundle containing \p I.
+inline MachineBasicBlock::const_instr_iterator getBundleStart(
+    MachineBasicBlock::const_instr_iterator I) {
+  while (I->isBundledWithPred())
+    --I;
+  return I;
+}
+
+/// Returns an iterator pointing beyond the bundle containing \p I.
+inline MachineBasicBlock::instr_iterator getBundleEnd(
+    MachineBasicBlock::instr_iterator I) {
+  while (I->isBundledWithSucc())
+    ++I;
+  return ++I;
+}
+
+/// Returns an iterator pointing beyond the bundle containing \p I.
+inline MachineBasicBlock::const_instr_iterator getBundleEnd(
+    MachineBasicBlock::const_instr_iterator I) {
+  while (I->isBundledWithSucc())
+    ++I;
+  return ++I;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineOperand iterator
+//
+
+/// MachineOperandIteratorBase - Iterator that can visit all operands on a
+/// MachineInstr, or all operands on a bundle of MachineInstrs.  This class is
+/// not intended to be used directly, use one of the sub-classes instead.
+///
+/// Intended use:
+///
+///   for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
+///     if (!MIO->isReg())
+///       continue;
+///     ...
+///   }
+///
+class MachineOperandIteratorBase {
+  MachineBasicBlock::instr_iterator InstrI, InstrE;
+  MachineInstr::mop_iterator OpI, OpE;
+
+  // If the operands on InstrI are exhausted, advance InstrI to the next
+  // bundled instruction with operands.
+  void advance() {
+    while (OpI == OpE) {
+      // Don't advance off the basic block, or into a new bundle.
+      if (++InstrI == InstrE || !InstrI->isInsideBundle())
+        break;
+      OpI = InstrI->operands_begin();
+      OpE = InstrI->operands_end();
+    }
+  }
+
+protected:
+  /// MachineOperandIteratorBase - Create an iterator that visits all operands
+  /// on MI, or all operands on every instruction in the bundle containing MI.
+  ///
+  /// @param MI The instruction to examine.
+  /// @param WholeBundle When true, visit all operands on the entire bundle.
+  ///
+  explicit MachineOperandIteratorBase(MachineInstr &MI, bool WholeBundle) {
+    if (WholeBundle) {
+      InstrI = getBundleStart(MI.getIterator());
+      InstrE = MI.getParent()->instr_end();
+    } else {
+      InstrI = InstrE = MI.getIterator();
+      ++InstrE;
+    }
+    OpI = InstrI->operands_begin();
+    OpE = InstrI->operands_end();
+    if (WholeBundle)
+      advance();
+  }
+
+  MachineOperand &deref() const { return *OpI; }
+
+public:
+  /// isValid - Returns true until all the operands have been visited.
+  bool isValid() const { return OpI != OpE; }
+
+  /// Preincrement.  Move to the next operand.
+  void operator++() {
+    assert(isValid() && "Cannot advance MIOperands beyond the last operand");
+    ++OpI;
+    advance();
+  }
+
+  /// getOperandNo - Returns the number of the current operand relative to its
+  /// instruction.
+  ///
+  unsigned getOperandNo() const {
+    return OpI - InstrI->operands_begin();
+  }
+
+  /// VirtRegInfo - Information about a virtual register used by a set of operands.
+  ///
+  struct VirtRegInfo {
+    /// Reads - One of the operands read the virtual register.  This does not
+    /// include undef or internal use operands, see MO::readsReg().
+    bool Reads;
+
+    /// Writes - One of the operands writes the virtual register.
+    bool Writes;
+
+    /// Tied - Uses and defs must use the same register. This can be because of
+    /// a two-address constraint, or there may be a partial redefinition of a
+    /// sub-register.
+    bool Tied;
+  };
+
+  /// Information about how a physical register Reg is used by a set of
+  /// operands.
+  struct PhysRegInfo {
+    /// There is a regmask operand indicating Reg is clobbered.
+    /// \see MachineOperand::CreateRegMask().
+    bool Clobbered;
+
+    /// Reg or one of its aliases is defined. The definition may only cover
+    /// parts of the register.
+    bool Defined;
+    /// Reg or a super-register is defined. The definition covers the full
+    /// register.
+    bool FullyDefined;
+
+    /// Reg or one of its aliases is read. The register may only be read
+    /// partially.
+    bool Read;
+    /// Reg or a super-register is read. The full register is read.
+    bool FullyRead;
+
+    /// Either:
+    /// - Reg is FullyDefined and all defs of reg or an overlapping
+    ///   register are dead, or
+    /// - Reg is completely dead because "defined" by a clobber.
+    bool DeadDef;
+
+    /// Reg is Defined and all defs of reg or an overlapping register are
+    /// dead.
+    bool PartialDeadDef;
+
+    /// There is a use operand of reg or a super-register with kill flag set.
+    bool Killed;
+  };
+
+  /// analyzeVirtReg - Analyze how the current instruction or bundle uses a
+  /// virtual register.  This function should not be called after operator++(),
+  /// it expects a fresh iterator.
+  ///
+  /// @param Reg The virtual register to analyze.
+  /// @param Ops When set, this vector will receive an (MI, OpNum) entry for
+  ///            each operand referring to Reg.
+  /// @returns A filled-in RegInfo struct.
+  VirtRegInfo analyzeVirtReg(unsigned Reg,
+           SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = nullptr);
+
+  /// analyzePhysReg - Analyze how the current instruction or bundle uses a
+  /// physical register.  This function should not be called after operator++(),
+  /// it expects a fresh iterator.
+  ///
+  /// @param Reg The physical register to analyze.
+  /// @returns A filled-in PhysRegInfo struct.
+  PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI);
+};
+
+/// MIOperands - Iterate over operands of a single instruction.
+///
+class MIOperands : public MachineOperandIteratorBase {
+public:
+  MIOperands(MachineInstr &MI) : MachineOperandIteratorBase(MI, false) {}
+  MachineOperand &operator* () const { return deref(); }
+  MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIOperands - Iterate over operands of a single const instruction.
+///
+class ConstMIOperands : public MachineOperandIteratorBase {
+public:
+  ConstMIOperands(const MachineInstr &MI)
+      : MachineOperandIteratorBase(const_cast<MachineInstr &>(MI), false) {}
+  const MachineOperand &operator* () const { return deref(); }
+  const MachineOperand *operator->() const { return &deref(); }
+};
+
+/// MIBundleOperands - Iterate over all operands in a bundle of machine
+/// instructions.
+///
+class MIBundleOperands : public MachineOperandIteratorBase {
+public:
+  MIBundleOperands(MachineInstr &MI) : MachineOperandIteratorBase(MI, true) {}
+  MachineOperand &operator* () const { return deref(); }
+  MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
+/// machine instructions.
+///
+class ConstMIBundleOperands : public MachineOperandIteratorBase {
+public:
+  ConstMIBundleOperands(const MachineInstr &MI)
+      : MachineOperandIteratorBase(const_cast<MachineInstr &>(MI), true) {}
+  const MachineOperand &operator* () const { return deref(); }
+  const MachineOperand *operator->() const { return &deref(); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundleIterator.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundleIterator.h
new file mode 100644
index 0000000..5fe4964
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundleIterator.h
@@ -0,0 +1,289 @@
+//===- llvm/CodeGen/MachineInstrBundleIterator.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines an iterator class that bundles MachineInstr.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/simple_ilist.h"
+#include <cassert>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+template <class T, bool IsReverse> struct MachineInstrBundleIteratorTraits;
+template <class T> struct MachineInstrBundleIteratorTraits<T, false> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::iterator;
+  using nonconst_instr_iterator = typename list_type::iterator;
+  using const_instr_iterator = typename list_type::const_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<T, true> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::reverse_iterator;
+  using nonconst_instr_iterator = typename list_type::reverse_iterator;
+  using const_instr_iterator = typename list_type::const_reverse_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<const T, false> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::const_iterator;
+  using nonconst_instr_iterator = typename list_type::iterator;
+  using const_instr_iterator = typename list_type::const_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<const T, true> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::const_reverse_iterator;
+  using nonconst_instr_iterator = typename list_type::reverse_iterator;
+  using const_instr_iterator = typename list_type::const_reverse_iterator;
+};
+
+template <bool IsReverse> struct MachineInstrBundleIteratorHelper;
+template <> struct MachineInstrBundleIteratorHelper<false> {
+  /// Get the beginning of the current bundle.
+  template <class Iterator> static Iterator getBundleBegin(Iterator I) {
+    if (!I.isEnd())
+      while (I->isBundledWithPred())
+        --I;
+    return I;
+  }
+
+  /// Get the final node of the current bundle.
+  template <class Iterator> static Iterator getBundleFinal(Iterator I) {
+    if (!I.isEnd())
+      while (I->isBundledWithSucc())
+        ++I;
+    return I;
+  }
+
+  /// Increment forward ilist iterator.
+  template <class Iterator> static void increment(Iterator &I) {
+    I = std::next(getBundleFinal(I));
+  }
+
+  /// Decrement forward ilist iterator.
+  template <class Iterator> static void decrement(Iterator &I) {
+    I = getBundleBegin(std::prev(I));
+  }
+};
+
+template <> struct MachineInstrBundleIteratorHelper<true> {
+  /// Get the beginning of the current bundle.
+  template <class Iterator> static Iterator getBundleBegin(Iterator I) {
+    return MachineInstrBundleIteratorHelper<false>::getBundleBegin(
+               I.getReverse())
+        .getReverse();
+  }
+
+  /// Get the final node of the current bundle.
+  template <class Iterator> static Iterator getBundleFinal(Iterator I) {
+    return MachineInstrBundleIteratorHelper<false>::getBundleFinal(
+               I.getReverse())
+        .getReverse();
+  }
+
+  /// Increment reverse ilist iterator.
+  template <class Iterator> static void increment(Iterator &I) {
+    I = getBundleBegin(std::next(I));
+  }
+
+  /// Decrement reverse ilist iterator.
+  template <class Iterator> static void decrement(Iterator &I) {
+    I = std::prev(getBundleFinal(I));
+  }
+};
+
+/// MachineBasicBlock iterator that automatically skips over MIs that are
+/// inside bundles (i.e. walk top level MIs only).
+template <typename Ty, bool IsReverse = false>
+class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
+  using Traits = MachineInstrBundleIteratorTraits<Ty, IsReverse>;
+  using instr_iterator = typename Traits::instr_iterator;
+
+  instr_iterator MII;
+
+public:
+  using value_type = typename instr_iterator::value_type;
+  using difference_type = typename instr_iterator::difference_type;
+  using pointer = typename instr_iterator::pointer;
+  using reference = typename instr_iterator::reference;
+  using const_pointer = typename instr_iterator::const_pointer;
+  using const_reference = typename instr_iterator::const_reference;
+  using iterator_category = std::bidirectional_iterator_tag;
+
+private:
+  using nonconst_instr_iterator = typename Traits::nonconst_instr_iterator;
+  using const_instr_iterator = typename Traits::const_instr_iterator;
+  using nonconst_iterator =
+      MachineInstrBundleIterator<typename nonconst_instr_iterator::value_type,
+                                 IsReverse>;
+  using reverse_iterator = MachineInstrBundleIterator<Ty, !IsReverse>;
+
+public:
+  MachineInstrBundleIterator(instr_iterator MI) : MII(MI) {
+    assert((!MI.getNodePtr() || MI.isEnd() || !MI->isBundledWithPred()) &&
+           "It's not legal to initialize MachineInstrBundleIterator with a "
+           "bundled MI");
+  }
+
+  MachineInstrBundleIterator(reference MI) : MII(MI) {
+    assert(!MI.isBundledWithPred() && "It's not legal to initialize "
+                                      "MachineInstrBundleIterator with a "
+                                      "bundled MI");
+  }
+
+  MachineInstrBundleIterator(pointer MI) : MII(MI) {
+    // FIXME: This conversion should be explicit.
+    assert((!MI || !MI->isBundledWithPred()) && "It's not legal to initialize "
+                                                "MachineInstrBundleIterator "
+                                                "with a bundled MI");
+  }
+
+  // Template allows conversion from const to nonconst.
+  template <class OtherTy>
+  MachineInstrBundleIterator(
+      const MachineInstrBundleIterator<OtherTy, IsReverse> &I,
+      typename std::enable_if<std::is_convertible<OtherTy *, Ty *>::value,
+                              void *>::type = nullptr)
+      : MII(I.getInstrIterator()) {}
+
+  MachineInstrBundleIterator() : MII(nullptr) {}
+
+  /// Explicit conversion between forward/reverse iterators.
+  ///
+  /// Translate between forward and reverse iterators without changing range
+  /// boundaries.  The resulting iterator will dereference (and have a handle)
+  /// to the previous node, which is somewhat unexpected; but converting the
+  /// two endpoints in a range will give the same range in reverse.
+  ///
+  /// This matches std::reverse_iterator conversions.
+  explicit MachineInstrBundleIterator(
+      const MachineInstrBundleIterator<Ty, !IsReverse> &I)
+      : MachineInstrBundleIterator(++I.getReverse()) {}
+
+  /// Get the bundle iterator for the given instruction's bundle.
+  static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) {
+    return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI);
+  }
+
+  reference operator*() const { return *MII; }
+  pointer operator->() const { return &operator*(); }
+
+  /// Check for null.
+  bool isValid() const { return MII.getNodePtr(); }
+
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return L.MII == R.MII;
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const const_instr_iterator &R) {
+    return L.MII == R; // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const const_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return L == R.MII; // Avoid assertion about validity of L.
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const nonconst_instr_iterator &R) {
+    return L.MII == R; // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const nonconst_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return L == R.MII; // Avoid assertion about validity of L.
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L, const_pointer R) {
+    return L == const_instr_iterator(R); // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const_pointer L, const MachineInstrBundleIterator &R) {
+    return const_instr_iterator(L) == R; // Avoid assertion about validity of L.
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const_reference R) {
+    return L == &R; // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const_reference L,
+                         const MachineInstrBundleIterator &R) {
+    return &L == R; // Avoid assertion about validity of L.
+  }
+
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const const_instr_iterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const const_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const nonconst_instr_iterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const nonconst_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L, const_pointer R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const_pointer L, const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const_reference R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const_reference L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+
+  // Increment and decrement operators...
+  MachineInstrBundleIterator &operator--() {
+    this->decrement(MII);
+    return *this;
+  }
+  MachineInstrBundleIterator &operator++() {
+    this->increment(MII);
+    return *this;
+  }
+  MachineInstrBundleIterator operator--(int) {
+    MachineInstrBundleIterator Temp = *this;
+    --*this;
+    return Temp;
+  }
+  MachineInstrBundleIterator operator++(int) {
+    MachineInstrBundleIterator Temp = *this;
+    ++*this;
+    return Temp;
+  }
+
+  instr_iterator getInstrIterator() const { return MII; }
+
+  nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); }
+
+  /// Get a reverse iterator to the same node.
+  ///
+  /// Gives a reverse iterator that will dereference (and have a handle) to the
+  /// same node.  Converting the endpoint iterators in a range will give a
+  /// different range; for range operations, use the explicit conversions.
+  reverse_iterator getReverse() const { return MII.getReverse(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineJumpTableInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineJumpTableInfo.h
new file mode 100644
index 0000000..25a3e6b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -0,0 +1,140 @@
+//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables  --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The MachineJumpTableInfo class keeps track of jump tables referenced by
+// lowered switch instructions in the MachineFunction.
+//
+// Instructions reference the address of these jump tables through the use of
+// MO_JumpTableIndex values.  When emitting assembly or machine code, these
+// virtual address references are converted to refer to the address of the
+// function jump tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class DataLayout;
+class raw_ostream;
+
+/// MachineJumpTableEntry - One jump table in the jump table info.
+///
+struct MachineJumpTableEntry {
+  /// MBBs - The vector of basic blocks from which to create the jump table.
+  std::vector<MachineBasicBlock*> MBBs;
+
+  explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
+  : MBBs(M) {}
+};
+
+class MachineJumpTableInfo {
+public:
+  /// JTEntryKind - This enum indicates how each entry of the jump table is
+  /// represented and emitted.
+  enum JTEntryKind {
+    /// EK_BlockAddress - Each entry is a plain address of block, e.g.:
+    ///     .word LBB123
+    EK_BlockAddress,
+
+    /// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
+    /// with a relocation as gp-relative, e.g.:
+    ///     .gpdword LBB123
+    EK_GPRel64BlockAddress,
+
+    /// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
+    /// with a relocation as gp-relative, e.g.:
+    ///     .gprel32 LBB123
+    EK_GPRel32BlockAddress,
+
+    /// EK_LabelDifference32 - Each entry is the address of the block minus
+    /// the address of the jump table.  This is used for PIC jump tables where
+    /// gprel32 is not supported.  e.g.:
+    ///      .word LBB123 - LJTI1_2
+    /// If the .set directive is supported, this is emitted as:
+    ///      .set L4_5_set_123, LBB123 - LJTI1_2
+    ///      .word L4_5_set_123
+    EK_LabelDifference32,
+
+    /// EK_Inline - Jump table entries are emitted inline at their point of
+    /// use. It is the responsibility of the target to emit the entries.
+    EK_Inline,
+
+    /// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
+    /// TargetLowering::LowerCustomJumpTableEntry hook.
+    EK_Custom32
+  };
+private:
+  JTEntryKind EntryKind;
+  std::vector<MachineJumpTableEntry> JumpTables;
+public:
+  explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+
+  JTEntryKind getEntryKind() const { return EntryKind; }
+
+  /// getEntrySize - Return the size of each entry in the jump table.
+  unsigned getEntrySize(const DataLayout &TD) const;
+  /// getEntryAlignment - Return the alignment of each entry in the jump table.
+  unsigned getEntryAlignment(const DataLayout &TD) const;
+
+  /// createJumpTableIndex - Create a new jump table.
+  ///
+  unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
+
+  /// isEmpty - Return true if there are no jump tables.
+  ///
+  bool isEmpty() const { return JumpTables.empty(); }
+
+  const std::vector<MachineJumpTableEntry> &getJumpTables() const {
+    return JumpTables;
+  }
+
+  /// RemoveJumpTable - Mark the specific index as being dead.  This will
+  /// prevent it from being emitted.
+  void RemoveJumpTable(unsigned Idx) {
+    JumpTables[Idx].MBBs.clear();
+  }
+
+  /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
+  /// the jump tables to branch to New instead.
+  bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+  /// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
+  /// the jump table to branch to New instead.
+  bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old,
+                             MachineBasicBlock *New);
+
+  /// print - Used by the MachineFunction printer to print information about
+  /// jump tables.  Implemented in MachineFunction.cpp
+  ///
+  void print(raw_ostream &OS) const;
+
+  /// dump - Call to stderr.
+  ///
+  void dump() const;
+};
+
+
+/// Prints a jump table entry reference.
+///
+/// The format is:
+///   %jump-table.5       - a jump table entry with index == 5.
+///
+/// Usage: OS << printJumpTableEntryReference(Idx) << '\n';
+Printable printJumpTableEntryReference(unsigned Idx);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
new file mode 100644
index 0000000..104655e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
@@ -0,0 +1,193 @@
+//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineLoopInfo class that is used to identify natural
+// loops and determine the loop depth of various nodes of the CFG.  Note that
+// natural loops may actually be several loops that share the same header node.
+//
+// This analysis calculates the nesting structure of loops in a function.  For
+// each natural loop identified, this analysis identifies natural loops
+// contained entirely within the loop and the basic blocks the make up the loop.
+//
+// It can calculate on the fly various bits of information, for example:
+//
+//  * whether there is a preheader for the loop
+//  * the number of back edges to the header
+//  * whether or not a particular block branches out of the loop
+//  * the successor blocks of the loop
+//  * the loop depth
+//  * the trip count
+//  * etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINELOOPINFO_H
+#define LLVM_CODEGEN_MACHINELOOPINFO_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+// Implementation in LoopInfoImpl.h
+class MachineLoop;
+extern template class LoopBase<MachineBasicBlock, MachineLoop>;
+
+class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
+public:
+  /// Return the "top" block in the loop, which is the first block in the linear
+  /// layout, ignoring any parts of the loop not contiguous with the part that
+  /// contains the header.
+  MachineBasicBlock *getTopBlock();
+
+  /// Return the "bottom" block in the loop, which is the last block in the
+  /// linear layout, ignoring any parts of the loop not contiguous with the part
+  /// that contains the header.
+  MachineBasicBlock *getBottomBlock();
+
+  /// \brief Find the block that contains the loop control variable and the
+  /// loop test. This will return the latch block if it's one of the exiting
+  /// blocks. Otherwise, return the exiting block. Return 'null' when
+  /// multiple exiting blocks are present.
+  MachineBasicBlock *findLoopControlBlock();
+
+  /// Return the debug location of the start of this loop.
+  /// This looks for a BB terminating instruction with a known debug
+  /// location by looking at the preheader and header blocks. If it
+  /// cannot find a terminating instruction with location information,
+  /// it returns an unknown location.
+  DebugLoc getStartLoc() const;
+
+  void dump() const;
+
+private:
+  friend class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+
+  explicit MachineLoop(MachineBasicBlock *MBB)
+    : LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
+
+  MachineLoop() = default;
+};
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+
+class MachineLoopInfo : public MachineFunctionPass {
+  friend class LoopBase<MachineBasicBlock, MachineLoop>;
+
+  LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  MachineLoopInfo() : MachineFunctionPass(ID) {
+    initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+  }
+  MachineLoopInfo(const MachineLoopInfo &) = delete;
+  MachineLoopInfo &operator=(const MachineLoopInfo &) = delete;
+
+  LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+
+  /// \brief Find the block that either is the loop preheader, or could
+  /// speculatively be used as the preheader. This is e.g. useful to place
+  /// loop setup code. Code that cannot be speculated should not be placed
+  /// here. SpeculativePreheader is controlling whether it also tries to
+  /// find the speculative preheader if the regular preheader is not present.
+  MachineBasicBlock *findLoopPreheader(MachineLoop *L,
+                                       bool SpeculativePreheader = false) const;
+
+  /// The iterator interface to the top-level loops in the current function.
+  using iterator = LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator;
+  inline iterator begin() const { return LI.begin(); }
+  inline iterator end() const { return LI.end(); }
+  bool empty() const { return LI.empty(); }
+
+  /// Return the innermost loop that BB lives in. If a basic block is in no loop
+  /// (for example the entry node), null is returned.
+  inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
+    return LI.getLoopFor(BB);
+  }
+
+  /// Same as getLoopFor.
+  inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
+    return LI.getLoopFor(BB);
+  }
+
+  /// Return the loop nesting level of the specified block.
+  inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
+    return LI.getLoopDepth(BB);
+  }
+
+  /// True if the block is a loop header node.
+  inline bool isLoopHeader(const MachineBasicBlock *BB) const {
+    return LI.isLoopHeader(BB);
+  }
+
+  /// Calculate the natural loop information.
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  void releaseMemory() override { LI.releaseMemory(); }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// This removes the specified top-level loop from this loop info object. The
+  /// loop is not deleted, as it will presumably be inserted into another loop.
+  inline MachineLoop *removeLoop(iterator I) { return LI.removeLoop(I); }
+
+  /// Change the top-level loop that contains BB to the specified loop. This
+  /// should be used by transformations that restructure the loop hierarchy
+  /// tree.
+  inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
+    LI.changeLoopFor(BB, L);
+  }
+
+  /// Replace the specified loop in the top-level loops list with the indicated
+  /// loop.
+  inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
+    LI.changeTopLevelLoop(OldLoop, NewLoop);
+  }
+
+  /// This adds the specified loop to the collection of top-level loops.
+  inline void addTopLevelLoop(MachineLoop *New) {
+    LI.addTopLevelLoop(New);
+  }
+
+  /// This method completely removes BB from all data structures, including all
+  /// of the Loop objects it is nested in and our mapping from
+  /// MachineBasicBlocks to loops.
+  void removeBlock(MachineBasicBlock *BB) {
+    LI.removeBlock(BB);
+  }
+};
+
+// Allow clients to walk the list of nested loops...
+template <> struct GraphTraits<const MachineLoop*> {
+  using NodeRef = const MachineLoop *;
+  using ChildIteratorType = MachineLoopInfo::iterator;
+
+  static NodeRef getEntryNode(const MachineLoop *L) { return L; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+template <> struct GraphTraits<MachineLoop*> {
+  using NodeRef = MachineLoop *;
+  using ChildIteratorType = MachineLoopInfo::iterator;
+
+  static NodeRef getEntryNode(MachineLoop *L) { return L; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINELOOPINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h b/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
new file mode 100644
index 0000000..dea0d80
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
@@ -0,0 +1,329 @@
+//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineMemOperand class, which is a
+// description of a memory reference. It is used to help track dependencies
+// in the backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
+#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
+
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class FoldingSetNodeID;
+class MDNode;
+class raw_ostream;
+class MachineFunction;
+class ModuleSlotTracker;
+
+/// This class contains a discriminated union of information about pointers in
+/// memory operands, relating them back to LLVM IR or to virtual locations (such
+/// as frame indices) that are exposed during codegen.
+struct MachinePointerInfo {
+  /// This is the IR pointer value for the access, or it is null if unknown.
+  /// If this is null, then the access is to a pointer in the default address
+  /// space.
+  PointerUnion<const Value *, const PseudoSourceValue *> V;
+
+  /// Offset - This is an offset from the base Value*.
+  int64_t Offset;
+
+  uint8_t StackID;
+
+  unsigned AddrSpace = 0;
+
+  explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
+                              uint8_t ID = 0)
+      : V(v), Offset(offset), StackID(ID) {
+    AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
+  }
+
+  explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
+                              uint8_t ID = 0)
+      : V(v), Offset(offset), StackID(ID) {
+    AddrSpace = v ? v->getAddressSpace() : 0;
+  }
+
+  explicit MachinePointerInfo(unsigned AddressSpace = 0)
+      : V((const Value *)nullptr), Offset(0), StackID(0),
+        AddrSpace(AddressSpace) {}
+
+  explicit MachinePointerInfo(
+    PointerUnion<const Value *, const PseudoSourceValue *> v,
+    int64_t offset = 0,
+    uint8_t ID = 0)
+    : V(v), Offset(offset), StackID(ID) {
+    if (V) {
+      if (const auto *ValPtr = V.dyn_cast<const Value*>())
+        AddrSpace = ValPtr->getType()->getPointerAddressSpace();
+      else
+        AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
+    }
+  }
+
+  MachinePointerInfo getWithOffset(int64_t O) const {
+    if (V.isNull())
+      return MachinePointerInfo(AddrSpace);
+    if (V.is<const Value*>())
+      return MachinePointerInfo(V.get<const Value*>(), Offset+O, StackID);
+    return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O,
+                              StackID);
+  }
+
+  /// Return true if memory region [V, V+Offset+Size) is known to be
+  /// dereferenceable.
+  bool isDereferenceable(unsigned Size, LLVMContext &C,
+                         const DataLayout &DL) const;
+
+  /// Return the LLVM IR address space number that this pointer points into.
+  unsigned getAddrSpace() const;
+
+  /// Return a MachinePointerInfo record that refers to the constant pool.
+  static MachinePointerInfo getConstantPool(MachineFunction &MF);
+
+  /// Return a MachinePointerInfo record that refers to the specified
+  /// FrameIndex.
+  static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
+                                          int64_t Offset = 0);
+
+  /// Return a MachinePointerInfo record that refers to a jump table entry.
+  static MachinePointerInfo getJumpTable(MachineFunction &MF);
+
+  /// Return a MachinePointerInfo record that refers to a GOT entry.
+  static MachinePointerInfo getGOT(MachineFunction &MF);
+
+  /// Stack pointer relative access.
+  static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
+                                     uint8_t ID = 0);
+
+  /// Stack memory without other information.
+  static MachinePointerInfo getUnknownStack(MachineFunction &MF);
+};
+
+
+//===----------------------------------------------------------------------===//
+/// A description of a memory reference used in the backend.
+/// Instead of holding a StoreInst or LoadInst, this class holds the address
+/// Value of the reference along with a byte size and offset. This allows it
+/// to describe lowered loads and stores. Also, the special PseudoSourceValue
+/// objects can be used to represent loads and stores to memory locations
+/// that aren't explicit in the regular LLVM IR.
+///
+class MachineMemOperand {
+public:
+  /// Flags values. These may be or'd together.
+  enum Flags : uint16_t {
+    // No flags set.
+    MONone = 0,
+    /// The memory access reads data.
+    MOLoad = 1u << 0,
+    /// The memory access writes data.
+    MOStore = 1u << 1,
+    /// The memory access is volatile.
+    MOVolatile = 1u << 2,
+    /// The memory access is non-temporal.
+    MONonTemporal = 1u << 3,
+    /// The memory access is dereferenceable (i.e., doesn't trap).
+    MODereferenceable = 1u << 4,
+    /// The memory access always returns the same value (or traps).
+    MOInvariant = 1u << 5,
+
+    // Reserved for use by target-specific passes.
+    // Targets may override getSerializableMachineMemOperandTargetFlags() to
+    // enable MIR serialization/parsing of these flags.  If more of these flags
+    // are added, the MIR printing/parsing code will need to be updated as well.
+    MOTargetFlag1 = 1u << 6,
+    MOTargetFlag2 = 1u << 7,
+    MOTargetFlag3 = 1u << 8,
+
+    LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
+  };
+
+private:
+  /// Atomic information for this memory operation.
+  struct MachineAtomicInfo {
+    /// Synchronization scope ID for this memory operation.
+    unsigned SSID : 8;            // SyncScope::ID
+    /// Atomic ordering requirements for this memory operation. For cmpxchg
+    /// atomic operations, atomic ordering requirements when store occurs.
+    unsigned Ordering : 4;        // enum AtomicOrdering
+    /// For cmpxchg atomic operations, atomic ordering requirements when store
+    /// does not occur.
+    unsigned FailureOrdering : 4; // enum AtomicOrdering
+  };
+
+  MachinePointerInfo PtrInfo;
+  uint64_t Size;
+  Flags FlagVals;
+  uint16_t BaseAlignLog2; // log_2(base_alignment) + 1
+  MachineAtomicInfo AtomicInfo;
+  AAMDNodes AAInfo;
+  const MDNode *Ranges;
+
+public:
+  /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
+  /// size, and base alignment. For atomic operations the synchronization scope
+  /// and atomic ordering requirements must also be specified. For cmpxchg
+  /// atomic operations the atomic ordering requirements when store does not
+  /// occur must also be specified.
+  MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
+                    unsigned base_alignment,
+                    const AAMDNodes &AAInfo = AAMDNodes(),
+                    const MDNode *Ranges = nullptr,
+                    SyncScope::ID SSID = SyncScope::System,
+                    AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+                    AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
+
+  const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
+
+  /// Return the base address of the memory access. This may either be a normal
+  /// LLVM IR Value, or one of the special values used in CodeGen.
+  /// Special values are those obtained via
+  /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
+  /// other PseudoSourceValue member functions which return objects which stand
+  /// for frame/stack pointer relative references and other special references
+  /// which are not representable in the high-level IR.
+  const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
+
+  const PseudoSourceValue *getPseudoValue() const {
+    return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
+  }
+
+  const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
+
+  /// Return the raw flags of the source value, \see Flags.
+  Flags getFlags() const { return FlagVals; }
+
+  /// Bitwise OR the current flags with the given flags.
+  void setFlags(Flags f) { FlagVals |= f; }
+
+  /// For normal values, this is a byte offset added to the base address.
+  /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
+  int64_t getOffset() const { return PtrInfo.Offset; }
+
+  unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
+
+  /// Return the size in bytes of the memory reference.
+  uint64_t getSize() const { return Size; }
+
+  /// Return the minimum known alignment in bytes of the actual memory
+  /// reference.
+  uint64_t getAlignment() const;
+
+  /// Return the minimum known alignment in bytes of the base address, without
+  /// the offset.
+  uint64_t getBaseAlignment() const { return (1u << BaseAlignLog2) >> 1; }
+
+  /// Return the AA tags for the memory reference.
+  AAMDNodes getAAInfo() const { return AAInfo; }
+
+  /// Return the range tag for the memory reference.
+  const MDNode *getRanges() const { return Ranges; }
+
+  /// Returns the synchronization scope ID for this memory operation.
+  SyncScope::ID getSyncScopeID() const {
+    return static_cast<SyncScope::ID>(AtomicInfo.SSID);
+  }
+
+  /// Return the atomic ordering requirements for this memory operation. For
+  /// cmpxchg atomic operations, return the atomic ordering requirements when
+  /// store occurs.
+  AtomicOrdering getOrdering() const {
+    return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
+  }
+
+  /// For cmpxchg atomic operations, return the atomic ordering requirements
+  /// when store does not occur.
+  AtomicOrdering getFailureOrdering() const {
+    return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
+  }
+
+  bool isLoad() const { return FlagVals & MOLoad; }
+  bool isStore() const { return FlagVals & MOStore; }
+  bool isVolatile() const { return FlagVals & MOVolatile; }
+  bool isNonTemporal() const { return FlagVals & MONonTemporal; }
+  bool isDereferenceable() const { return FlagVals & MODereferenceable; }
+  bool isInvariant() const { return FlagVals & MOInvariant; }
+
+  /// Returns true if this operation has an atomic ordering requirement of
+  /// unordered or higher, false otherwise.
+  bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; }
+
+  /// Returns true if this memory operation doesn't have any ordering
+  /// constraints other than normal aliasing. Volatile and atomic memory
+  /// operations can't be reordered.
+  ///
+  /// Currently, we don't model the difference between volatile and atomic
+  /// operations. They should retain their ordering relative to all memory
+  /// operations.
+  bool isUnordered() const { return !isVolatile(); }
+
+  /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
+  /// greater alignment. This must only be used when the new alignment applies
+  /// to all users of this MachineMemOperand.
+  void refineAlignment(const MachineMemOperand *MMO);
+
+  /// Change the SourceValue for this MachineMemOperand. This should only be
+  /// used when an object is being relocated and all references to it are being
+  /// updated.
+  void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
+  void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
+  void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
+
+  /// Profile - Gather unique data for the object.
+  ///
+  void Profile(FoldingSetNodeID &ID) const;
+
+  /// Support for operator<<.
+  /// @{
+  void print(raw_ostream &OS) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST,
+             SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
+             const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
+  /// @}
+
+  friend bool operator==(const MachineMemOperand &LHS,
+                         const MachineMemOperand &RHS) {
+    return LHS.getValue() == RHS.getValue() &&
+           LHS.getPseudoValue() == RHS.getPseudoValue() &&
+           LHS.getSize() == RHS.getSize() &&
+           LHS.getOffset() == RHS.getOffset() &&
+           LHS.getFlags() == RHS.getFlags() &&
+           LHS.getAAInfo() == RHS.getAAInfo() &&
+           LHS.getRanges() == RHS.getRanges() &&
+           LHS.getAlignment() == RHS.getAlignment() &&
+           LHS.getAddrSpace() == RHS.getAddrSpace();
+  }
+
+  friend bool operator!=(const MachineMemOperand &LHS,
+                         const MachineMemOperand &RHS) {
+    return !(LHS == RHS);
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
+  MRO.print(OS);
+  return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
new file mode 100644
index 0000000..6be304f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
@@ -0,0 +1,271 @@
+//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect meta information for a module.  This information should be in a
+// neutral form that can be used by different debugging and exception handling
+// schemes.
+//
+// The organization of information is primarily clustered around the source
+// compile units.  The main exception is source line correspondence where
+// inlining may interleave code from various compile units.
+//
+// The following information can be retrieved from the MachineModuleInfo.
+//
+//  -- Source directories - Directories are uniqued based on their canonical
+//     string and assigned a sequential numeric ID (base 1.)
+//  -- Source files - Files are also uniqued based on their name and directory
+//     ID.  A file ID is sequential number (base 1.)
+//  -- Source line correspondence - A vector of file ID, line#, column# triples.
+//     A DEBUG_LOCATION instruction is generated  by the DAG Legalizer
+//     corresponding to each entry in the source line list.  This allows a debug
+//     emitter to generate labels referenced by debug information tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
+#define LLVM_CODEGEN_MACHINEMODULEINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Pass.h"
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class CallInst;
+class Function;
+class MachineFunction;
+class MMIAddrLabelMap;
+class Module;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+/// This class can be derived from and used by targets to hold private
+/// target-specific information for each Module.  Objects of type are
+/// accessed/created with MMI::getInfo and destroyed when the MachineModuleInfo
+/// is destroyed.
+///
+class MachineModuleInfoImpl {
+public:
+  using StubValueTy = PointerIntPair<MCSymbol *, 1, bool>;
+  using SymbolListTy = std::vector<std::pair<MCSymbol *, StubValueTy>>;
+
+  virtual ~MachineModuleInfoImpl();
+
+protected:
+  /// Return the entries from a DenseMap in a deterministic sorted orer.
+  /// Clears the map.
+  static SymbolListTy getSortedStubs(DenseMap<MCSymbol*, StubValueTy>&);
+};
+
+//===----------------------------------------------------------------------===//
+/// This class contains meta information specific to a module.  Queries can be
+/// made by different debugging and exception handling schemes and reformated
+/// for specific use.
+///
+class MachineModuleInfo : public ImmutablePass {
+  const TargetMachine &TM;
+
+  /// This is the MCContext used for the entire code generator.
+  MCContext Context;
+
+  /// This is the LLVM Module being worked on.
+  const Module *TheModule;
+
+  /// This is the object-file-format-specific implementation of
+  /// MachineModuleInfoImpl, which lets targets accumulate whatever info they
+  /// want.
+  MachineModuleInfoImpl *ObjFileMMI;
+
+  /// \name Exception Handling
+  /// \{
+
+  /// Vector of all personality functions ever seen. Used to emit common EH
+  /// frames.
+  std::vector<const Function *> Personalities;
+
+  /// The current call site index being processed, if any. 0 if none.
+  unsigned CurCallSite;
+
+  /// \}
+
+  /// This map keeps track of which symbol is being used for the specified
+  /// basic block's address of label.
+  MMIAddrLabelMap *AddrLabelSymbols;
+
+  // TODO: Ideally, what we'd like is to have a switch that allows emitting 
+  // synchronous (precise at call-sites only) CFA into .eh_frame. However,
+  // even under this switch, we'd like .debug_frame to be precise when using
+  // -g. At this moment, there's no way to specify that some CFI directives
+  // go into .eh_frame only, while others go into .debug_frame only.
+
+  /// True if debugging information is available in this module.
+  bool DbgInfoAvailable;
+
+  /// True if this module calls VarArg function with floating-point arguments.
+  /// This is used to emit an undefined reference to _fltused on Windows
+  /// targets.
+  bool UsesVAFloatArgument;
+
+  /// True if the module calls the __morestack function indirectly, as is
+  /// required under the large code model on x86. This is used to emit
+  /// a definition of a symbol, __morestack_addr, containing the address. See
+  /// comments in lib/Target/X86/X86FrameLowering.cpp for more details.
+  bool UsesMorestackAddr;
+
+  /// True if the module contains split-stack functions. This is used to
+  /// emit .note.GNU-split-stack section as required by the linker for
+  /// special handling split-stack function calling no-split-stack function.
+  bool HasSplitStack;
+
+  /// True if the module contains no-split-stack functions. This is used to
+  /// emit .note.GNU-no-split-stack section when it also contains split-stack
+  /// functions.
+  bool HasNosplitStack;
+
+  /// Maps IR Functions to their corresponding MachineFunctions.
+  DenseMap<const Function*, std::unique_ptr<MachineFunction>> MachineFunctions;
+  /// Next unique number available for a MachineFunction.
+  unsigned NextFnNum = 0;
+  const Function *LastRequest = nullptr; ///< Used for shortcut/cache.
+  MachineFunction *LastResult = nullptr; ///< Used for shortcut/cache.
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  explicit MachineModuleInfo(const TargetMachine *TM = nullptr);
+  ~MachineModuleInfo() override;
+
+  // Initialization and Finalization
+  bool doInitialization(Module &) override;
+  bool doFinalization(Module &) override;
+
+  const MCContext &getContext() const { return Context; }
+  MCContext &getContext() { return Context; }
+
+  const Module *getModule() const { return TheModule; }
+
+  /// Returns the MachineFunction constructed for the IR function \p F.
+  /// Creates a new MachineFunction if none exists yet.
+  MachineFunction &getOrCreateMachineFunction(const Function &F);
+
+  /// \bried Returns the MachineFunction associated to IR function \p F if there
+  /// is one, otherwise nullptr.
+  MachineFunction *getMachineFunction(const Function &F) const;
+
+  /// Delete the MachineFunction \p MF and reset the link in the IR Function to
+  /// Machine Function map.
+  void deleteMachineFunctionFor(Function &F);
+
+  /// Keep track of various per-function pieces of information for backends
+  /// that would like to do so.
+  template<typename Ty>
+  Ty &getObjFileInfo() {
+    if (ObjFileMMI == nullptr)
+      ObjFileMMI = new Ty(*this);
+    return *static_cast<Ty*>(ObjFileMMI);
+  }
+
+  template<typename Ty>
+  const Ty &getObjFileInfo() const {
+    return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
+  }
+
+  /// Returns true if valid debug info is present.
+  bool hasDebugInfo() const { return DbgInfoAvailable; }
+  void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
+
+  bool usesVAFloatArgument() const {
+    return UsesVAFloatArgument;
+  }
+
+  void setUsesVAFloatArgument(bool b) {
+    UsesVAFloatArgument = b;
+  }
+
+  bool usesMorestackAddr() const {
+    return UsesMorestackAddr;
+  }
+
+  void setUsesMorestackAddr(bool b) {
+    UsesMorestackAddr = b;
+  }
+
+  bool hasSplitStack() const {
+    return HasSplitStack;
+  }
+
+  void setHasSplitStack(bool b) {
+    HasSplitStack = b;
+  }
+
+  bool hasNosplitStack() const {
+    return HasNosplitStack;
+  }
+
+  void setHasNosplitStack(bool b) {
+    HasNosplitStack = b;
+  }
+
+  /// Return the symbol to be used for the specified basic block when its
+  /// address is taken.  This cannot be its normal LBB label because the block
+  /// may be accessed outside its containing function.
+  MCSymbol *getAddrLabelSymbol(const BasicBlock *BB) {
+    return getAddrLabelSymbolToEmit(BB).front();
+  }
+
+  /// Return the symbol to be used for the specified basic block when its
+  /// address is taken.  If other blocks were RAUW'd to this one, we may have
+  /// to emit them as well, return the whole set.
+  ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(const BasicBlock *BB);
+
+  /// If the specified function has had any references to address-taken blocks
+  /// generated, but the block got deleted, return the symbol now so we can
+  /// emit it.  This prevents emitting a reference to a symbol that has no
+  /// definition.
+  void takeDeletedSymbolsForFunction(const Function *F,
+                                     std::vector<MCSymbol*> &Result);
+
+  /// \name Exception Handling
+  /// \{
+
+  /// Set the call site currently being processed.
+  void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
+
+  /// Get the call site currently being processed, if any.  return zero if
+  /// none.
+  unsigned getCurrentCallSite() { return CurCallSite; }
+
+  /// Provide the personality function for the exception information.
+  void addPersonality(const Function *Personality);
+
+  /// Return array of personality functions ever seen.
+  const std::vector<const Function *>& getPersonalities() const {
+    return Personalities;
+  }
+  /// \}
+}; // End class MachineModuleInfo
+
+//===- MMI building helpers -----------------------------------------------===//
+
+/// Determine if any floating-point values are being passed to this variadic
+/// function, and set the MachineModuleInfo's usesVAFloatArgument flag if so.
+/// This flag is used to emit an undefined reference to _fltused on Windows,
+/// which will link in MSVCRT's floating-point support.
+void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEMODULEINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
new file mode 100644
index 0000000..6a87fa2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -0,0 +1,85 @@
+//===- llvm/CodeGen/MachineModuleInfoImpls.h --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines object-file format specific implementations of
+// MachineModuleInfoImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
+#define LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+class MCSymbol;
+
+/// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation
+/// for MachO targets.
+class MachineModuleInfoMachO : public MachineModuleInfoImpl {
+  /// GVStubs - Darwin '$non_lazy_ptr' stubs.  The key is something like
+  /// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
+  /// is true if this GV is external.
+  DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+  /// ThreadLocalGVStubs - Darwin '$non_lazy_ptr' stubs.  The key is something
+  /// like "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra
+  /// bit is true if this GV is external.
+  DenseMap<MCSymbol *, StubValueTy> ThreadLocalGVStubs;
+
+  virtual void anchor(); // Out of line virtual method.
+
+public:
+  MachineModuleInfoMachO(const MachineModuleInfo &) {}
+
+  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+    assert(Sym && "Key cannot be null");
+    return GVStubs[Sym];
+  }
+
+  StubValueTy &getThreadLocalGVStubEntry(MCSymbol *Sym) {
+    assert(Sym && "Key cannot be null");
+    return ThreadLocalGVStubs[Sym];
+  }
+
+  /// Accessor methods to return the set of stubs in sorted order.
+  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+  SymbolListTy GetThreadLocalGVStubList() {
+    return getSortedStubs(ThreadLocalGVStubs);
+  }
+};
+
+/// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation
+/// for ELF targets.
+class MachineModuleInfoELF : public MachineModuleInfoImpl {
+  /// GVStubs - These stubs are used to materialize global addresses in PIC
+  /// mode.
+  DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+  virtual void anchor(); // Out of line virtual method.
+
+public:
+  MachineModuleInfoELF(const MachineModuleInfo &) {}
+
+  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+    assert(Sym && "Key cannot be null");
+    return GVStubs[Sym];
+  }
+
+  /// Accessor methods to return the set of stubs in sorted order.
+
+  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h b/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
new file mode 100644
index 0000000..4f0db1c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
@@ -0,0 +1,941 @@
+//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineOperand class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
+#define LLVM_CODEGEN_MACHINEOPERAND_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cassert>
+
+namespace llvm {
+
+class BlockAddress;
+class ConstantFP;
+class ConstantInt;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineInstr;
+class MachineRegisterInfo;
+class MCCFIInstruction;
+class MDNode;
+class ModuleSlotTracker;
+class TargetMachine;
+class TargetIntrinsicInfo;
+class TargetRegisterInfo;
+class hash_code;
+class raw_ostream;
+class MCSymbol;
+
+/// MachineOperand class - Representation of each machine instruction operand.
+///
+/// This class isn't a POD type because it has a private constructor, but its
+/// destructor must be trivial. Functions like MachineInstr::addOperand(),
+/// MachineRegisterInfo::moveOperands(), and MF::DeleteMachineInstr() depend on
+/// not having to call the MachineOperand destructor.
+///
+class MachineOperand {
+public:
+  enum MachineOperandType : unsigned char {
+    MO_Register,          ///< Register operand.
+    MO_Immediate,         ///< Immediate operand
+    MO_CImmediate,        ///< Immediate >64bit operand
+    MO_FPImmediate,       ///< Floating-point immediate operand
+    MO_MachineBasicBlock, ///< MachineBasicBlock reference
+    MO_FrameIndex,        ///< Abstract Stack Frame Index
+    MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
+    MO_TargetIndex,       ///< Target-dependent index+offset operand.
+    MO_JumpTableIndex,    ///< Address of indexed Jump Table for switch
+    MO_ExternalSymbol,    ///< Name of external global symbol
+    MO_GlobalAddress,     ///< Address of a global value
+    MO_BlockAddress,      ///< Address of a basic block
+    MO_RegisterMask,      ///< Mask of preserved registers.
+    MO_RegisterLiveOut,   ///< Mask of live-out registers.
+    MO_Metadata,          ///< Metadata reference (for debug info)
+    MO_MCSymbol,          ///< MCSymbol reference (for debug/eh info)
+    MO_CFIIndex,          ///< MCCFIInstruction index.
+    MO_IntrinsicID,       ///< Intrinsic ID for ISel
+    MO_Predicate,         ///< Generic predicate for ISel
+    MO_Last = MO_Predicate,
+  };
+
+private:
+  /// OpKind - Specify what kind of operand this is.  This discriminates the
+  /// union.
+  unsigned OpKind : 8;
+
+  /// Subregister number for MO_Register.  A value of 0 indicates the
+  /// MO_Register has no subReg.
+  ///
+  /// For all other kinds of operands, this field holds target-specific flags.
+  unsigned SubReg_TargetFlags : 12;
+
+  /// TiedTo - Non-zero when this register operand is tied to another register
+  /// operand. The encoding of this field is described in the block comment
+  /// before MachineInstr::tieOperands().
+  unsigned TiedTo : 4;
+
+  /// IsDef - True if this is a def, false if this is a use of the register.
+  /// This is only valid on register operands.
+  ///
+  unsigned IsDef : 1;
+
+  /// IsImp - True if this is an implicit def or use, false if it is explicit.
+  /// This is only valid on register opderands.
+  ///
+  unsigned IsImp : 1;
+
+  /// IsDeadOrKill
+  /// For uses: IsKill - True if this instruction is the last use of the
+  /// register on this path through the function.
+  /// For defs: IsDead - True if this register is never used by a subsequent
+  /// instruction.
+  /// This is only valid on register operands.
+  unsigned IsDeadOrKill : 1;
+
+  /// See isRenamable().
+  unsigned IsRenamable : 1;
+
+  /// IsUndef - True if this register operand reads an "undef" value, i.e. the
+  /// read value doesn't matter.  This flag can be set on both use and def
+  /// operands.  On a sub-register def operand, it refers to the part of the
+  /// register that isn't written.  On a full-register def operand, it is a
+  /// noop.  See readsReg().
+  ///
+  /// This is only valid on registers.
+  ///
+  /// Note that an instruction may have multiple <undef> operands referring to
+  /// the same register.  In that case, the instruction may depend on those
+  /// operands reading the same dont-care value.  For example:
+  ///
+  ///   %1 = XOR undef %2, undef %2
+  ///
+  /// Any register can be used for %2, and its value doesn't matter, but
+  /// the two operands must be the same register.
+  ///
+  unsigned IsUndef : 1;
+
+  /// IsInternalRead - True if this operand reads a value that was defined
+  /// inside the same instruction or bundle.  This flag can be set on both use
+  /// and def operands.  On a sub-register def operand, it refers to the part
+  /// of the register that isn't written.  On a full-register def operand, it
+  /// is a noop.
+  ///
+  /// When this flag is set, the instruction bundle must contain at least one
+  /// other def of the register.  If multiple instructions in the bundle define
+  /// the register, the meaning is target-defined.
+  unsigned IsInternalRead : 1;
+
+  /// IsEarlyClobber - True if this MO_Register 'def' operand is written to
+  /// by the MachineInstr before all input registers are read.  This is used to
+  /// model the GCC inline asm '&' constraint modifier.
+  unsigned IsEarlyClobber : 1;
+
+  /// IsDebug - True if this MO_Register 'use' operand is in a debug pseudo,
+  /// not a real instruction.  Such uses should be ignored during codegen.
+  unsigned IsDebug : 1;
+
+  /// SmallContents - This really should be part of the Contents union, but
+  /// lives out here so we can get a better packed struct.
+  /// MO_Register: Register number.
+  /// OffsetedInfo: Low bits of offset.
+  union {
+    unsigned RegNo;           // For MO_Register.
+    unsigned OffsetLo;        // Matches Contents.OffsetedInfo.OffsetHi.
+  } SmallContents;
+
+  /// ParentMI - This is the instruction that this operand is embedded into.
+  /// This is valid for all operand types, when the operand is in an instr.
+  MachineInstr *ParentMI;
+
+  /// Contents union - This contains the payload for the various operand types.
+  union {
+    MachineBasicBlock *MBB;  // For MO_MachineBasicBlock.
+    const ConstantFP *CFP;   // For MO_FPImmediate.
+    const ConstantInt *CI;   // For MO_CImmediate. Integers > 64bit.
+    int64_t ImmVal;          // For MO_Immediate.
+    const uint32_t *RegMask; // For MO_RegisterMask and MO_RegisterLiveOut.
+    const MDNode *MD;        // For MO_Metadata.
+    MCSymbol *Sym;           // For MO_MCSymbol.
+    unsigned CFIIndex;       // For MO_CFI.
+    Intrinsic::ID IntrinsicID; // For MO_IntrinsicID.
+    unsigned Pred;           // For MO_Predicate
+
+    struct {                  // For MO_Register.
+      // Register number is in SmallContents.RegNo.
+      MachineOperand *Prev;   // Access list for register. See MRI.
+      MachineOperand *Next;
+    } Reg;
+
+    /// OffsetedInfo - This struct contains the offset and an object identifier.
+    /// this represent the object as with an optional offset from it.
+    struct {
+      union {
+        int Index;                // For MO_*Index - The index itself.
+        const char *SymbolName;   // For MO_ExternalSymbol.
+        const GlobalValue *GV;    // For MO_GlobalAddress.
+        const BlockAddress *BA;   // For MO_BlockAddress.
+      } Val;
+      // Low bits of offset are in SmallContents.OffsetLo.
+      int OffsetHi;               // An offset from the object, high 32 bits.
+    } OffsetedInfo;
+  } Contents;
+
+  explicit MachineOperand(MachineOperandType K)
+    : OpKind(K), SubReg_TargetFlags(0), ParentMI(nullptr) {
+    // Assert that the layout is what we expect. It's easy to grow this object.
+    static_assert(alignof(MachineOperand) <= alignof(int64_t),
+                  "MachineOperand shouldn't be more than 8 byte aligned");
+    static_assert(sizeof(Contents) <= 2 * sizeof(void *),
+                  "Contents should be at most two pointers");
+    static_assert(sizeof(MachineOperand) <=
+                      alignTo<alignof(int64_t)>(2 * sizeof(unsigned) +
+                                                3 * sizeof(void *)),
+                  "MachineOperand too big. Should be Kind, SmallContents, "
+                  "ParentMI, and Contents");
+  }
+
+public:
+  /// getType - Returns the MachineOperandType for this operand.
+  ///
+  MachineOperandType getType() const { return (MachineOperandType)OpKind; }
+
+  unsigned getTargetFlags() const {
+    return isReg() ? 0 : SubReg_TargetFlags;
+  }
+  void setTargetFlags(unsigned F) {
+    assert(!isReg() && "Register operands can't have target flags");
+    SubReg_TargetFlags = F;
+    assert(SubReg_TargetFlags == F && "Target flags out of range");
+  }
+  void addTargetFlag(unsigned F) {
+    assert(!isReg() && "Register operands can't have target flags");
+    SubReg_TargetFlags |= F;
+    assert((SubReg_TargetFlags & F) && "Target flags out of range");
+  }
+
+
+  /// getParent - Return the instruction that this operand belongs to.
+  ///
+  MachineInstr *getParent() { return ParentMI; }
+  const MachineInstr *getParent() const { return ParentMI; }
+
+  /// clearParent - Reset the parent pointer.
+  ///
+  /// The MachineOperand copy constructor also copies ParentMI, expecting the
+  /// original to be deleted. If a MachineOperand is ever stored outside a
+  /// MachineInstr, the parent pointer must be cleared.
+  ///
+  /// Never call clearParent() on an operand in a MachineInstr.
+  ///
+  void clearParent() { ParentMI = nullptr; }
+
+  /// Print a subreg index operand.
+  /// MO_Immediate operands can also be subreg idices. If it's the case, the
+  /// subreg index name will be printed. MachineInstr::isOperandSubregIdx can be
+  /// called to check this.
+  static void printSubRegIdx(raw_ostream &OS, uint64_t Index,
+                             const TargetRegisterInfo *TRI);
+
+  /// Print operand target flags.
+  static void printTargetFlags(raw_ostream& OS, const MachineOperand &Op);
+
+  /// Print a MCSymbol as an operand.
+  static void printSymbol(raw_ostream &OS, MCSymbol &Sym);
+
+  /// Print a stack object reference.
+  static void printStackObjectReference(raw_ostream &OS, unsigned FrameIndex,
+                                        bool IsFixed, StringRef Name);
+
+  /// Print the offset with explicit +/- signs.
+  static void printOperandOffset(raw_ostream &OS, int64_t Offset);
+
+  /// Print an IRSlotNumber.
+  static void printIRSlotNumber(raw_ostream &OS, int Slot);
+
+  /// Print the MachineOperand to \p os.
+  /// Providing a valid \p TRI and \p IntrinsicInfo results in a more
+  /// target-specific printing. If \p TRI and \p IntrinsicInfo are null, the
+  /// function will try to pick it up from the parent.
+  void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr,
+             const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
+
+  /// More complex way of printing a MachineOperand.
+  /// \param TypeToPrint specifies the generic type to be printed on uses and
+  /// defs. It can be determined using MachineInstr::getTypeToPrint.
+  /// \param PrintDef - whether we want to print `def` on an operand which
+  /// isDef. Sometimes, if the operand is printed before '=', we don't print
+  /// `def`.
+  /// \param IsStandalone - whether we want a verbose output of the MO. This
+  /// prints extra information that can be easily inferred when printing the
+  /// whole function, but not when printing only a fragment of it.
+  /// \param ShouldPrintRegisterTies - whether we want to print register ties.
+  /// Sometimes they are easily determined by the instruction's descriptor
+  /// (MachineInstr::hasComplexRegiterTies can determine if it's needed).
+  /// \param TiedOperandIdx - if we need to print register ties this needs to
+  /// provide the index of the tied register. If not, it will be ignored.
+  /// \param TRI - provide more target-specific information to the printer.
+  /// Unlike the previous function, this one will not try and get the
+  /// information from it's parent.
+  /// \param IntrinsicInfo - same as \p TRI.
+  void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint,
+             bool PrintDef, bool IsStandalone, bool ShouldPrintRegisterTies,
+             unsigned TiedOperandIdx, const TargetRegisterInfo *TRI,
+             const TargetIntrinsicInfo *IntrinsicInfo) const;
+
+  void dump() const;
+
+  //===--------------------------------------------------------------------===//
+  // Accessors that tell you what kind of MachineOperand you're looking at.
+  //===--------------------------------------------------------------------===//
+
+  /// isReg - Tests if this is a MO_Register operand.
+  bool isReg() const { return OpKind == MO_Register; }
+  /// isImm - Tests if this is a MO_Immediate operand.
+  bool isImm() const { return OpKind == MO_Immediate; }
+  /// isCImm - Test if this is a MO_CImmediate operand.
+  bool isCImm() const { return OpKind == MO_CImmediate; }
+  /// isFPImm - Tests if this is a MO_FPImmediate operand.
+  bool isFPImm() const { return OpKind == MO_FPImmediate; }
+  /// isMBB - Tests if this is a MO_MachineBasicBlock operand.
+  bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
+  /// isFI - Tests if this is a MO_FrameIndex operand.
+  bool isFI() const { return OpKind == MO_FrameIndex; }
+  /// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
+  bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
+  /// isTargetIndex - Tests if this is a MO_TargetIndex operand.
+  bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
+  /// isJTI - Tests if this is a MO_JumpTableIndex operand.
+  bool isJTI() const { return OpKind == MO_JumpTableIndex; }
+  /// isGlobal - Tests if this is a MO_GlobalAddress operand.
+  bool isGlobal() const { return OpKind == MO_GlobalAddress; }
+  /// isSymbol - Tests if this is a MO_ExternalSymbol operand.
+  bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
+  /// isBlockAddress - Tests if this is a MO_BlockAddress operand.
+  bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
+  /// isRegMask - Tests if this is a MO_RegisterMask operand.
+  bool isRegMask() const { return OpKind == MO_RegisterMask; }
+  /// isRegLiveOut - Tests if this is a MO_RegisterLiveOut operand.
+  bool isRegLiveOut() const { return OpKind == MO_RegisterLiveOut; }
+  /// isMetadata - Tests if this is a MO_Metadata operand.
+  bool isMetadata() const { return OpKind == MO_Metadata; }
+  bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
+  bool isCFIIndex() const { return OpKind == MO_CFIIndex; }
+  bool isIntrinsicID() const { return OpKind == MO_IntrinsicID; }
+  bool isPredicate() const { return OpKind == MO_Predicate; }
+  //===--------------------------------------------------------------------===//
+  // Accessors for Register Operands
+  //===--------------------------------------------------------------------===//
+
+  /// getReg - Returns the register number.
+  unsigned getReg() const {
+    assert(isReg() && "This is not a register operand!");
+    return SmallContents.RegNo;
+  }
+
+  unsigned getSubReg() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return SubReg_TargetFlags;
+  }
+
+  bool isUse() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return !IsDef;
+  }
+
+  bool isDef() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDef;
+  }
+
+  bool isImplicit() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsImp;
+  }
+
+  bool isDead() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDeadOrKill & IsDef;
+  }
+
+  bool isKill() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDeadOrKill & !IsDef;
+  }
+
+  bool isUndef() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsUndef;
+  }
+
+  /// isRenamable - Returns true if this register may be renamed, i.e. it does
+  /// not generate a value that is somehow read in a way that is not represented
+  /// by the Machine IR (e.g. to meet an ABI or ISA requirement).  This is only
+  /// valid on physical register operands.  Virtual registers are assumed to
+  /// always be renamable regardless of the value of this field.
+  ///
+  /// Operands that are renamable can freely be changed to any other register
+  /// that is a member of the register class returned by
+  /// MI->getRegClassConstraint().
+  ///
+  /// isRenamable can return false for several different reasons:
+  ///
+  /// - ABI constraints (since liveness is not always precisely modeled).  We
+  ///   conservatively handle these cases by setting all physical register
+  ///   operands that didn’t start out as virtual regs to not be renamable.
+  ///   Also any physical register operands created after register allocation or
+  ///   whose register is changed after register allocation will not be
+  ///   renamable.  This state is tracked in the MachineOperand::IsRenamable
+  ///   bit.
+  ///
+  /// - Opcode/target constraints: for opcodes that have complex register class
+  ///   requirements (e.g. that depend on other operands/instructions), we set
+  ///   hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq in the machine opcode
+  ///   description.  Operands belonging to instructions with opcodes that are
+  ///   marked hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq return false from
+  ///   isRenamable().  Additionally, the AllowRegisterRenaming target property
+  ///   prevents any operands from being marked renamable for targets that don't
+  ///   have detailed opcode hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq
+  ///   values.
+  bool isRenamable() const;
+
+  bool isInternalRead() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsInternalRead;
+  }
+
+  bool isEarlyClobber() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsEarlyClobber;
+  }
+
+  bool isTied() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return TiedTo;
+  }
+
+  bool isDebug() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDebug;
+  }
+
+  /// readsReg - Returns true if this operand reads the previous value of its
+  /// register.  A use operand with the <undef> flag set doesn't read its
+  /// register.  A sub-register def implicitly reads the other parts of the
+  /// register being redefined unless the <undef> flag is set.
+  ///
+  /// This refers to reading the register value from before the current
+  /// instruction or bundle. Internal bundle reads are not included.
+  bool readsReg() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Mutators for Register Operands
+  //===--------------------------------------------------------------------===//
+
+  /// Change the register this operand corresponds to.
+  ///
+  void setReg(unsigned Reg);
+
+  void setSubReg(unsigned subReg) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    SubReg_TargetFlags = subReg;
+    assert(SubReg_TargetFlags == subReg && "SubReg out of range");
+  }
+
+  /// substVirtReg - Substitute the current register with the virtual
+  /// subregister Reg:SubReg. Take any existing SubReg index into account,
+  /// using TargetRegisterInfo to compose the subreg indices if necessary.
+  /// Reg must be a virtual register, SubIdx can be 0.
+  ///
+  void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
+
+  /// substPhysReg - Substitute the current register with the physical register
+  /// Reg, taking any existing SubReg into account. For instance,
+  /// substPhysReg(%eax) will change %reg1024:sub_8bit to %al.
+  ///
+  void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
+
+  void setIsUse(bool Val = true) { setIsDef(!Val); }
+
+  /// Change a def to a use, or a use to a def.
+  void setIsDef(bool Val = true);
+
+  void setImplicit(bool Val = true) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    IsImp = Val;
+  }
+
+  void setIsKill(bool Val = true) {
+    assert(isReg() && !IsDef && "Wrong MachineOperand mutator");
+    assert((!Val || !isDebug()) && "Marking a debug operation as kill");
+    IsDeadOrKill = Val;
+  }
+
+  void setIsDead(bool Val = true) {
+    assert(isReg() && IsDef && "Wrong MachineOperand mutator");
+    IsDeadOrKill = Val;
+  }
+
+  void setIsUndef(bool Val = true) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    IsUndef = Val;
+  }
+
+  void setIsRenamable(bool Val = true);
+
+  void setIsInternalRead(bool Val = true) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    IsInternalRead = Val;
+  }
+
+  void setIsEarlyClobber(bool Val = true) {
+    assert(isReg() && IsDef && "Wrong MachineOperand mutator");
+    IsEarlyClobber = Val;
+  }
+
+  void setIsDebug(bool Val = true) {
+    assert(isReg() && !IsDef && "Wrong MachineOperand mutator");
+    IsDebug = Val;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Accessors for various operand types.
+  //===--------------------------------------------------------------------===//
+
+  int64_t getImm() const {
+    assert(isImm() && "Wrong MachineOperand accessor");
+    return Contents.ImmVal;
+  }
+
+  const ConstantInt *getCImm() const {
+    assert(isCImm() && "Wrong MachineOperand accessor");
+    return Contents.CI;
+  }
+
+  const ConstantFP *getFPImm() const {
+    assert(isFPImm() && "Wrong MachineOperand accessor");
+    return Contents.CFP;
+  }
+
+  MachineBasicBlock *getMBB() const {
+    assert(isMBB() && "Wrong MachineOperand accessor");
+    return Contents.MBB;
+  }
+
+  int getIndex() const {
+    assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
+           "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.Index;
+  }
+
+  const GlobalValue *getGlobal() const {
+    assert(isGlobal() && "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.GV;
+  }
+
+  const BlockAddress *getBlockAddress() const {
+    assert(isBlockAddress() && "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.BA;
+  }
+
+  MCSymbol *getMCSymbol() const {
+    assert(isMCSymbol() && "Wrong MachineOperand accessor");
+    return Contents.Sym;
+  }
+
+  unsigned getCFIIndex() const {
+    assert(isCFIIndex() && "Wrong MachineOperand accessor");
+    return Contents.CFIIndex;
+  }
+
+  Intrinsic::ID getIntrinsicID() const {
+    assert(isIntrinsicID() && "Wrong MachineOperand accessor");
+    return Contents.IntrinsicID;
+  }
+
+  unsigned getPredicate() const {
+    assert(isPredicate() && "Wrong MachineOperand accessor");
+    return Contents.Pred;
+  }
+
+  /// Return the offset from the symbol in this operand. This always returns 0
+  /// for ExternalSymbol operands.
+  int64_t getOffset() const {
+    assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
+            isTargetIndex() || isBlockAddress()) &&
+           "Wrong MachineOperand accessor");
+    return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
+           SmallContents.OffsetLo;
+  }
+
+  const char *getSymbolName() const {
+    assert(isSymbol() && "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.SymbolName;
+  }
+
+  /// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
+  /// It is sometimes necessary to detach the register mask pointer from its
+  /// machine operand. This static method can be used for such detached bit
+  /// mask pointers.
+  static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) {
+    // See TargetRegisterInfo.h.
+    assert(PhysReg < (1u << 30) && "Not a physical register");
+    return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
+  }
+
+  /// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
+  bool clobbersPhysReg(unsigned PhysReg) const {
+     return clobbersPhysReg(getRegMask(), PhysReg);
+  }
+
+  /// getRegMask - Returns a bit mask of registers preserved by this RegMask
+  /// operand.
+  const uint32_t *getRegMask() const {
+    assert(isRegMask() && "Wrong MachineOperand accessor");
+    return Contents.RegMask;
+  }
+
+  /// getRegLiveOut - Returns a bit mask of live-out registers.
+  const uint32_t *getRegLiveOut() const {
+    assert(isRegLiveOut() && "Wrong MachineOperand accessor");
+    return Contents.RegMask;
+  }
+
+  const MDNode *getMetadata() const {
+    assert(isMetadata() && "Wrong MachineOperand accessor");
+    return Contents.MD;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Mutators for various operand types.
+  //===--------------------------------------------------------------------===//
+
+  void setImm(int64_t immVal) {
+    assert(isImm() && "Wrong MachineOperand mutator");
+    Contents.ImmVal = immVal;
+  }
+
+  void setFPImm(const ConstantFP *CFP) {
+    assert(isFPImm() && "Wrong MachineOperand mutator");
+    Contents.CFP = CFP;
+  }
+
+  void setOffset(int64_t Offset) {
+    assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
+            isTargetIndex() || isBlockAddress()) &&
+           "Wrong MachineOperand mutator");
+    SmallContents.OffsetLo = unsigned(Offset);
+    Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
+  }
+
+  void setIndex(int Idx) {
+    assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
+           "Wrong MachineOperand mutator");
+    Contents.OffsetedInfo.Val.Index = Idx;
+  }
+
+  void setMetadata(const MDNode *MD) {
+    assert(isMetadata() && "Wrong MachineOperand mutator");
+    Contents.MD = MD;
+  }
+
+  void setMBB(MachineBasicBlock *MBB) {
+    assert(isMBB() && "Wrong MachineOperand mutator");
+    Contents.MBB = MBB;
+  }
+
+  /// Sets value of register mask operand referencing Mask.  The
+  /// operand does not take ownership of the memory referenced by Mask, it must
+  /// remain valid for the lifetime of the operand. See CreateRegMask().
+  /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
+  void setRegMask(const uint32_t *RegMaskPtr) {
+    assert(isRegMask() && "Wrong MachineOperand mutator");
+    Contents.RegMask = RegMaskPtr;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Other methods.
+  //===--------------------------------------------------------------------===//
+
+  /// Returns true if this operand is identical to the specified operand except
+  /// for liveness related flags (isKill, isUndef and isDead). Note that this
+  /// should stay in sync with the hash_value overload below.
+  bool isIdenticalTo(const MachineOperand &Other) const;
+
+  /// \brief MachineOperand hash_value overload.
+  ///
+  /// Note that this includes the same information in the hash that
+  /// isIdenticalTo uses for comparison. It is thus suited for use in hash
+  /// tables which use that function for equality comparisons only. This must
+  /// stay exactly in sync with isIdenticalTo above.
+  friend hash_code hash_value(const MachineOperand &MO);
+
+  /// ChangeToImmediate - Replace this operand with a new immediate operand of
+  /// the specified value.  If an operand is known to be an immediate already,
+  /// the setImm method should be used.
+  void ChangeToImmediate(int64_t ImmVal);
+
+  /// ChangeToFPImmediate - Replace this operand with a new FP immediate operand
+  /// of the specified value.  If an operand is known to be an FP immediate
+  /// already, the setFPImm method should be used.
+  void ChangeToFPImmediate(const ConstantFP *FPImm);
+
+  /// ChangeToES - Replace this operand with a new external symbol operand.
+  void ChangeToES(const char *SymName, unsigned char TargetFlags = 0);
+
+  /// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
+  void ChangeToMCSymbol(MCSymbol *Sym);
+
+  /// Replace this operand with a frame index.
+  void ChangeToFrameIndex(int Idx);
+
+  /// Replace this operand with a target index.
+  void ChangeToTargetIndex(unsigned Idx, int64_t Offset,
+                           unsigned char TargetFlags = 0);
+
+  /// ChangeToRegister - Replace this operand with a new register operand of
+  /// the specified value.  If an operand is known to be an register already,
+  /// the setReg method should be used.
+  void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
+                        bool isKill = false, bool isDead = false,
+                        bool isUndef = false, bool isDebug = false);
+
+  //===--------------------------------------------------------------------===//
+  // Construction methods.
+  //===--------------------------------------------------------------------===//
+
+  static MachineOperand CreateImm(int64_t Val) {
+    MachineOperand Op(MachineOperand::MO_Immediate);
+    Op.setImm(Val);
+    return Op;
+  }
+
+  static MachineOperand CreateCImm(const ConstantInt *CI) {
+    MachineOperand Op(MachineOperand::MO_CImmediate);
+    Op.Contents.CI = CI;
+    return Op;
+  }
+
+  static MachineOperand CreateFPImm(const ConstantFP *CFP) {
+    MachineOperand Op(MachineOperand::MO_FPImmediate);
+    Op.Contents.CFP = CFP;
+    return Op;
+  }
+
+  static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
+                                  bool isKill = false, bool isDead = false,
+                                  bool isUndef = false,
+                                  bool isEarlyClobber = false,
+                                  unsigned SubReg = 0, bool isDebug = false,
+                                  bool isInternalRead = false,
+                                  bool isRenamable = false) {
+    assert(!(isDead && !isDef) && "Dead flag on non-def");
+    assert(!(isKill && isDef) && "Kill flag on def");
+    MachineOperand Op(MachineOperand::MO_Register);
+    Op.IsDef = isDef;
+    Op.IsImp = isImp;
+    Op.IsDeadOrKill = isKill | isDead;
+    Op.IsRenamable = isRenamable;
+    Op.IsUndef = isUndef;
+    Op.IsInternalRead = isInternalRead;
+    Op.IsEarlyClobber = isEarlyClobber;
+    Op.TiedTo = 0;
+    Op.IsDebug = isDebug;
+    Op.SmallContents.RegNo = Reg;
+    Op.Contents.Reg.Prev = nullptr;
+    Op.Contents.Reg.Next = nullptr;
+    Op.setSubReg(SubReg);
+    return Op;
+  }
+  static MachineOperand CreateMBB(MachineBasicBlock *MBB,
+                                  unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
+    Op.setMBB(MBB);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateFI(int Idx) {
+    MachineOperand Op(MachineOperand::MO_FrameIndex);
+    Op.setIndex(Idx);
+    return Op;
+  }
+  static MachineOperand CreateCPI(unsigned Idx, int Offset,
+                                  unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
+    Op.setIndex(Idx);
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
+                                          unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_TargetIndex);
+    Op.setIndex(Idx);
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateJTI(unsigned Idx, unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_JumpTableIndex);
+    Op.setIndex(Idx);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
+                                 unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_GlobalAddress);
+    Op.Contents.OffsetedInfo.Val.GV = GV;
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateES(const char *SymName,
+                                 unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_ExternalSymbol);
+    Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
+    Op.setOffset(0); // Offset is always 0.
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
+                                 unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_BlockAddress);
+    Op.Contents.OffsetedInfo.Val.BA = BA;
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  /// CreateRegMask - Creates a register mask operand referencing Mask.  The
+  /// operand does not take ownership of the memory referenced by Mask, it
+  /// must remain valid for the lifetime of the operand.
+  ///
+  /// A RegMask operand represents a set of non-clobbered physical registers
+  /// on an instruction that clobbers many registers, typically a call.  The
+  /// bit mask has a bit set for each physreg that is preserved by this
+  /// instruction, as described in the documentation for
+  /// TargetRegisterInfo::getCallPreservedMask().
+  ///
+  /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
+  ///
+  static MachineOperand CreateRegMask(const uint32_t *Mask) {
+    assert(Mask && "Missing register mask");
+    MachineOperand Op(MachineOperand::MO_RegisterMask);
+    Op.Contents.RegMask = Mask;
+    return Op;
+  }
+  static MachineOperand CreateRegLiveOut(const uint32_t *Mask) {
+    assert(Mask && "Missing live-out register mask");
+    MachineOperand Op(MachineOperand::MO_RegisterLiveOut);
+    Op.Contents.RegMask = Mask;
+    return Op;
+  }
+  static MachineOperand CreateMetadata(const MDNode *Meta) {
+    MachineOperand Op(MachineOperand::MO_Metadata);
+    Op.Contents.MD = Meta;
+    return Op;
+  }
+
+  static MachineOperand CreateMCSymbol(MCSymbol *Sym,
+                                       unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_MCSymbol);
+    Op.Contents.Sym = Sym;
+    Op.setOffset(0);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+
+  static MachineOperand CreateCFIIndex(unsigned CFIIndex) {
+    MachineOperand Op(MachineOperand::MO_CFIIndex);
+    Op.Contents.CFIIndex = CFIIndex;
+    return Op;
+  }
+
+  static MachineOperand CreateIntrinsicID(Intrinsic::ID ID) {
+    MachineOperand Op(MachineOperand::MO_IntrinsicID);
+    Op.Contents.IntrinsicID = ID;
+    return Op;
+  }
+
+  static MachineOperand CreatePredicate(unsigned Pred) {
+    MachineOperand Op(MachineOperand::MO_Predicate);
+    Op.Contents.Pred = Pred;
+    return Op;
+  }
+
+  friend class MachineInstr;
+  friend class MachineRegisterInfo;
+
+private:
+  // If this operand is currently a register operand, and if this is in a
+  // function, deregister the operand from the register's use/def list.
+  void removeRegFromUses();
+
+  /// Artificial kinds for DenseMap usage.
+  enum : unsigned char {
+    MO_Empty = MO_Last + 1,
+    MO_Tombstone,
+  };
+
+  friend struct DenseMapInfo<MachineOperand>;
+
+  //===--------------------------------------------------------------------===//
+  // Methods for handling register use/def lists.
+  //===--------------------------------------------------------------------===//
+
+  /// isOnRegUseList - Return true if this operand is on a register use/def
+  /// list or false if not.  This can only be called for register operands
+  /// that are part of a machine instruction.
+  bool isOnRegUseList() const {
+    assert(isReg() && "Can only add reg operand to use lists");
+    return Contents.Reg.Prev != nullptr;
+  }
+};
+
+template <> struct DenseMapInfo<MachineOperand> {
+  static MachineOperand getEmptyKey() {
+    return MachineOperand(static_cast<MachineOperand::MachineOperandType>(
+        MachineOperand::MO_Empty));
+  }
+  static MachineOperand getTombstoneKey() {
+    return MachineOperand(static_cast<MachineOperand::MachineOperandType>(
+        MachineOperand::MO_Tombstone));
+  }
+  static unsigned getHashValue(const MachineOperand &MO) {
+    return hash_value(MO);
+  }
+  static bool isEqual(const MachineOperand &LHS, const MachineOperand &RHS) {
+    if (LHS.getType() == static_cast<MachineOperand::MachineOperandType>(
+                             MachineOperand::MO_Empty) ||
+        LHS.getType() == static_cast<MachineOperand::MachineOperandType>(
+                             MachineOperand::MO_Tombstone))
+      return LHS.getType() == RHS.getType();
+    return LHS.isIdenticalTo(RHS);
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand &MO) {
+  MO.print(OS);
+  return OS;
+}
+
+// See friend declaration above. This additional declaration is required in
+// order to compile LLVM with IBM xlC compiler.
+hash_code hash_value(const MachineOperand &MO);
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h b/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
new file mode 100644
index 0000000..2fdefbe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
@@ -0,0 +1,224 @@
+///===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*----===//
+///
+///                     The LLVM Compiler Infrastructure
+///
+/// This file is distributed under the University of Illinois Open Source
+/// License. See LICENSE.TXT for details.
+///
+///===---------------------------------------------------------------------===//
+/// \file
+/// Optimization diagnostic interfaces for machine passes.  It's packaged as an
+/// analysis pass so that by using this service passes become dependent on MBFI
+/// as well.  MBFI is used to compute the "hotness" of the diagnostic message.
+///
+///===---------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEOPTIMIZATIONREMARKEMITTER_H
+#define LLVM_CODEGEN_MACHINEOPTIMIZATIONREMARKEMITTER_H
+
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+class MachineBasicBlock;
+class MachineBlockFrequencyInfo;
+class MachineInstr;
+
+/// \brief Common features for diagnostics dealing with optimization remarks
+/// that are used by machine passes.
+class DiagnosticInfoMIROptimization : public DiagnosticInfoOptimizationBase {
+public:
+  DiagnosticInfoMIROptimization(enum DiagnosticKind Kind, const char *PassName,
+                                StringRef RemarkName,
+                                const DiagnosticLocation &Loc,
+                                const MachineBasicBlock *MBB)
+      : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName,
+                                       MBB->getParent()->getFunction(), Loc),
+        MBB(MBB) {}
+
+  /// MI-specific kinds of diagnostic Arguments.
+  struct MachineArgument : public DiagnosticInfoOptimizationBase::Argument {
+    /// Print an entire MachineInstr.
+    MachineArgument(StringRef Key, const MachineInstr &MI);
+  };
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() >= DK_FirstMachineRemark &&
+           DI->getKind() <= DK_LastMachineRemark;
+  }
+
+  const MachineBasicBlock *getBlock() const { return MBB; }
+
+private:
+  const MachineBasicBlock *MBB;
+};
+
+/// Diagnostic information for applied optimization remarks.
+class MachineOptimizationRemark : public DiagnosticInfoMIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass=, then the diagnostic will
+  /// be emitted.  \p RemarkName is a textual identifier for the remark.  \p
+  /// Loc is the debug location and \p MBB is the block that the optimization
+  /// operates in.
+  MachineOptimizationRemark(const char *PassName, StringRef RemarkName,
+                            const DiagnosticLocation &Loc,
+                            const MachineBasicBlock *MBB)
+      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemark, PassName,
+                                      RemarkName, Loc, MBB) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MachineOptimizationRemark;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override {
+    const Function &Fn = getFunction();
+    LLVMContext &Ctx = Fn.getContext();
+    return Ctx.getDiagHandlerPtr()->isPassedOptRemarkEnabled(getPassName());
+  }
+};
+
+/// Diagnostic information for missed-optimization remarks.
+class MachineOptimizationRemarkMissed : public DiagnosticInfoMIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-missed=, then the
+  /// diagnostic will be emitted.  \p RemarkName is a textual identifier for the
+  /// remark.  \p Loc is the debug location and \p MBB is the block that the
+  /// optimization operates in.
+  MachineOptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
+                                  const DiagnosticLocation &Loc,
+                                  const MachineBasicBlock *MBB)
+      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkMissed,
+                                      PassName, RemarkName, Loc, MBB) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MachineOptimizationRemarkMissed;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override {
+    const Function &Fn = getFunction();
+    LLVMContext &Ctx = Fn.getContext();
+    return Ctx.getDiagHandlerPtr()->isMissedOptRemarkEnabled(getPassName());
+  }
+};
+
+/// Diagnostic information for optimization analysis remarks.
+class MachineOptimizationRemarkAnalysis : public DiagnosticInfoMIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-analysis=, then the
+  /// diagnostic will be emitted.  \p RemarkName is a textual identifier for the
+  /// remark.  \p Loc is the debug location and \p MBB is the block that the
+  /// optimization operates in.
+  MachineOptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
+                                    const DiagnosticLocation &Loc,
+                                    const MachineBasicBlock *MBB)
+      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkAnalysis,
+                                      PassName, RemarkName, Loc, MBB) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MachineOptimizationRemarkAnalysis;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override {
+    const Function &Fn = getFunction();
+    LLVMContext &Ctx = Fn.getContext();
+    return Ctx.getDiagHandlerPtr()->isAnalysisRemarkEnabled(getPassName());
+  }
+};
+
+/// Extend llvm::ore:: with MI-specific helper names.
+namespace ore {
+using MNV = DiagnosticInfoMIROptimization::MachineArgument;
+}
+
+/// The optimization diagnostic interface.
+///
+/// It allows reporting when optimizations are performed and when they are not
+/// along with the reasons for it.  Hotness information of the corresponding
+/// code region can be included in the remark if DiagnosticsHotnessRequested is
+/// enabled in the LLVM context.
+class MachineOptimizationRemarkEmitter {
+public:
+  MachineOptimizationRemarkEmitter(MachineFunction &MF,
+                                   MachineBlockFrequencyInfo *MBFI)
+      : MF(MF), MBFI(MBFI) {}
+
+  /// Emit an optimization remark.
+  void emit(DiagnosticInfoOptimizationBase &OptDiag);
+
+  /// \brief Whether we allow for extra compile-time budget to perform more
+  /// analysis to be more informative.
+  ///
+  /// This is useful to enable additional missed optimizations to be reported
+  /// that are normally too noisy.  In this mode, we can use the extra analysis
+  /// (1) to filter trivial false positives or (2) to provide more context so
+  /// that non-trivial false positives can be quickly detected by the user.
+  bool allowExtraAnalysis(StringRef PassName) const {
+    return (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
+            MF.getFunction().getContext()
+            .getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
+  }
+
+  /// \brief Take a lambda that returns a remark which will be emitted.  Second
+  /// argument is only used to restrict this to functions.
+  template <typename T>
+  void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
+    // Avoid building the remark unless we know there are at least *some*
+    // remarks enabled. We can't currently check whether remarks are requested
+    // for the calling pass since that requires actually building the remark.
+
+    if (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
+        MF.getFunction().getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
+      auto R = RemarkBuilder();
+      emit((DiagnosticInfoOptimizationBase &)R);
+    }
+  }
+
+private:
+  MachineFunction &MF;
+
+  /// MBFI is only set if hotness is requested.
+  MachineBlockFrequencyInfo *MBFI;
+
+  /// Compute hotness from IR value (currently assumed to be a block) if PGO is
+  /// available.
+  Optional<uint64_t> computeHotness(const MachineBasicBlock &MBB);
+
+  /// Similar but use value from \p OptDiag and update hotness there.
+  void computeHotness(DiagnosticInfoMIROptimization &Remark);
+
+  /// \brief Only allow verbose messages if we know we're filtering by hotness
+  /// (BFI is only set in this case).
+  bool shouldEmitVerbose() { return MBFI != nullptr; }
+};
+
+/// The analysis pass
+///
+/// Note that this pass shouldn't generally be marked as preserved by other
+/// passes.  It's holding onto BFI, so if the pass does not preserve BFI, BFI
+/// could be freed.
+class MachineOptimizationRemarkEmitterPass : public MachineFunctionPass {
+  std::unique_ptr<MachineOptimizationRemarkEmitter> ORE;
+
+public:
+  MachineOptimizationRemarkEmitterPass();
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineOptimizationRemarkEmitter &getORE() {
+    assert(ORE && "pass not run yet");
+    return *ORE;
+  }
+
+  static char ID;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachinePassRegistry.h b/linux-x64/clang/include/llvm/CodeGen/MachinePassRegistry.h
new file mode 100644
index 0000000..3aba0bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachinePassRegistry.h
@@ -0,0 +1,142 @@
+//===- llvm/CodeGen/MachinePassRegistry.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the mechanics for machine function pass registries.  A
+// function pass registry (MachinePassRegistry) is auto filled by the static
+// constructors of MachinePassRegistryNode.  Further there is a command line
+// parser (RegisterPassParser) which listens to each registry for additions
+// and deletions, so that the appropriate command option is updated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+using MachinePassCtor = void *(*)();
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryListener - Listener to adds and removals of nodes in
+/// registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryListener {
+  virtual void anchor();
+
+public:
+  MachinePassRegistryListener() = default;
+  virtual ~MachinePassRegistryListener() = default;
+
+  virtual void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) = 0;
+  virtual void NotifyRemove(StringRef N) = 0;
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryNode - Machine pass node stored in registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryNode {
+private:
+  MachinePassRegistryNode *Next = nullptr; // Next function pass in list.
+  StringRef Name;                       // Name of function pass.
+  StringRef Description;                // Description string.
+  MachinePassCtor Ctor;                 // Function pass creator.
+
+public:
+  MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
+      : Name(N), Description(D), Ctor(C) {}
+
+  // Accessors
+  MachinePassRegistryNode *getNext()      const { return Next; }
+  MachinePassRegistryNode **getNextAddress()    { return &Next; }
+  StringRef getName()                   const { return Name; }
+  StringRef getDescription()            const { return Description; }
+  MachinePassCtor getCtor()               const { return Ctor; }
+  void setNext(MachinePassRegistryNode *N)      { Next = N; }
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistry - Track the registration of machine passes.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistry {
+private:
+  MachinePassRegistryNode *List;        // List of registry nodes.
+  MachinePassCtor Default;              // Default function pass creator.
+  MachinePassRegistryListener *Listener; // Listener for list adds are removes.
+
+public:
+  // NO CONSTRUCTOR - we don't want static constructor ordering to mess
+  // with the registry.
+
+  // Accessors.
+  //
+  MachinePassRegistryNode *getList()                    { return List; }
+  MachinePassCtor getDefault()                          { return Default; }
+  void setDefault(MachinePassCtor C)                    { Default = C; }
+  void setDefault(StringRef Name);
+  void setListener(MachinePassRegistryListener *L)      { Listener = L; }
+
+  /// Add - Adds a function pass to the registration list.
+  ///
+  void Add(MachinePassRegistryNode *Node);
+
+  /// Remove - Removes a function pass from the registration list.
+  ///
+  void Remove(MachinePassRegistryNode *Node);
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterPassParser class - Handle the addition of new machine passes.
+///
+//===----------------------------------------------------------------------===//
+template<class RegistryClass>
+class RegisterPassParser : public MachinePassRegistryListener,
+                   public cl::parser<typename RegistryClass::FunctionPassCtor> {
+public:
+  RegisterPassParser(cl::Option &O)
+      : cl::parser<typename RegistryClass::FunctionPassCtor>(O) {}
+  ~RegisterPassParser() override { RegistryClass::setListener(nullptr); }
+
+  void initialize() {
+    cl::parser<typename RegistryClass::FunctionPassCtor>::initialize();
+
+    // Add existing passes to option.
+    for (RegistryClass *Node = RegistryClass::getList();
+         Node; Node = Node->getNext()) {
+      this->addLiteralOption(Node->getName(),
+                      (typename RegistryClass::FunctionPassCtor)Node->getCtor(),
+                             Node->getDescription());
+    }
+
+    // Make sure we listen for list changes.
+    RegistryClass::setListener(this);
+  }
+
+  // Implement the MachinePassRegistryListener callbacks.
+  void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) override {
+    this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
+  }
+  void NotifyRemove(StringRef N) override {
+    this->removeLiteralOption(N);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEPASSREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachinePostDominators.h b/linux-x64/clang/include/llvm/CodeGen/MachinePostDominators.h
new file mode 100644
index 0000000..c6a4159
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachinePostDominators.h
@@ -0,0 +1,86 @@
+//=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes interfaces to post dominance information for
+// target-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+///
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
+/// to compute the post-dominator tree.
+///
+struct MachinePostDominatorTree : public MachineFunctionPass {
+private:
+ PostDomTreeBase<MachineBasicBlock> *DT;
+
+public:
+  static char ID;
+
+  MachinePostDominatorTree();
+
+  ~MachinePostDominatorTree() override;
+
+  FunctionPass *createMachinePostDominatorTreePass();
+
+  const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
+    return DT->getRoots();
+  }
+
+  MachineDomTreeNode *getRootNode() const {
+    return DT->getRootNode();
+  }
+
+  MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+    return DT->getNode(BB);
+  }
+
+  MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+    return DT->getNode(BB);
+  }
+
+  bool dominates(const MachineDomTreeNode *A,
+                 const MachineDomTreeNode *B) const {
+    return DT->dominates(A, B);
+  }
+
+  bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
+    return DT->dominates(A, B);
+  }
+
+  bool properlyDominates(const MachineDomTreeNode *A,
+                         const MachineDomTreeNode *B) const {
+    return DT->properlyDominates(A, B);
+  }
+
+  bool properlyDominates(const MachineBasicBlock *A,
+                         const MachineBasicBlock *B) const {
+    return DT->properlyDominates(A, B);
+  }
+
+  MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+                                                MachineBasicBlock *B) {
+    return DT->findNearestCommonDominator(A, B);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
+};
+} //end of namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineRegionInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineRegionInfo.h
new file mode 100644
index 0000000..8394b58
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineRegionInfo.h
@@ -0,0 +1,182 @@
+//===- llvm/CodeGen/MachineRegionInfo.h -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
+#define LLVM_CODEGEN_MACHINEREGIONINFO_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineDominanceFrontier.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+struct MachinePostDominatorTree;
+class MachineRegion;
+class MachineRegionNode;
+class MachineRegionInfo;
+
+template <> struct RegionTraits<MachineFunction> {
+  using FuncT = MachineFunction;
+  using BlockT = MachineBasicBlock;
+  using RegionT = MachineRegion;
+  using RegionNodeT = MachineRegionNode;
+  using RegionInfoT = MachineRegionInfo;
+  using DomTreeT = MachineDominatorTree;
+  using DomTreeNodeT = MachineDomTreeNode;
+  using PostDomTreeT = MachinePostDominatorTree;
+  using DomFrontierT = MachineDominanceFrontier;
+  using InstT = MachineInstr;
+  using LoopT = MachineLoop;
+  using LoopInfoT = MachineLoopInfo;
+
+  static unsigned getNumSuccessors(MachineBasicBlock *BB) {
+    return BB->succ_size();
+  }
+};
+
+class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
+public:
+  inline MachineRegionNode(MachineRegion *Parent, MachineBasicBlock *Entry,
+                           bool isSubRegion = false)
+      : RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry,
+                                                      isSubRegion) {}
+
+  bool operator==(const MachineRegion &RN) const {
+    return this == reinterpret_cast<const MachineRegionNode *>(&RN);
+  }
+};
+
+class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
+public:
+  MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
+                MachineRegionInfo *RI, MachineDominatorTree *DT,
+                MachineRegion *Parent = nullptr);
+  ~MachineRegion();
+
+  bool operator==(const MachineRegionNode &RN) const {
+    return &RN == reinterpret_cast<const MachineRegionNode *>(this);
+  }
+};
+
+class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
+public:
+  explicit MachineRegionInfo();
+  ~MachineRegionInfo() override;
+
+  // updateStatistics - Update statistic about created regions.
+  void updateStatistics(MachineRegion *R) final;
+
+  void recalculate(MachineFunction &F, MachineDominatorTree *DT,
+                   MachinePostDominatorTree *PDT, MachineDominanceFrontier *DF);
+};
+
+class MachineRegionInfoPass : public MachineFunctionPass {
+  MachineRegionInfo RI;
+
+public:
+  static char ID;
+
+  explicit MachineRegionInfoPass();
+  ~MachineRegionInfoPass() override;
+
+  MachineRegionInfo &getRegionInfo() { return RI; }
+
+  const MachineRegionInfo &getRegionInfo() const { return RI; }
+
+  /// @name MachineFunctionPass interface
+  //@{
+  bool runOnMachineFunction(MachineFunction &F) override;
+  void releaseMemory() override;
+  void verifyAnalysis() const override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void print(raw_ostream &OS, const Module *) const override;
+  void dump() const;
+  //@}
+};
+
+template <>
+template <>
+inline MachineBasicBlock *
+RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>()
+    const {
+  assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
+  return getEntry();
+}
+
+template <>
+template <>
+inline MachineRegion *
+RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>()
+    const {
+  assert(isSubRegion() && "This is not a subregion RegionNode!");
+  auto Unconst =
+      const_cast<RegionNodeBase<RegionTraits<MachineFunction>> *>(this);
+  return reinterpret_cast<MachineRegion *>(Unconst);
+}
+
+RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
+RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock,
+                      MachineRegion);
+
+RegionGraphTraits(MachineRegion, MachineRegionNode);
+RegionGraphTraits(const MachineRegion, const MachineRegionNode);
+
+template <>
+struct GraphTraits<MachineRegionInfo *>
+    : public GraphTraits<FlatIt<MachineRegionNode *>> {
+  using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
+                                     false, GraphTraits<FlatIt<NodeRef>>>;
+
+  static NodeRef getEntryNode(MachineRegionInfo *RI) {
+    return GraphTraits<FlatIt<MachineRegion *>>::getEntryNode(
+        RI->getTopLevelRegion());
+  }
+
+  static nodes_iterator nodes_begin(MachineRegionInfo *RI) {
+    return nodes_iterator::begin(getEntryNode(RI));
+  }
+
+  static nodes_iterator nodes_end(MachineRegionInfo *RI) {
+    return nodes_iterator::end(getEntryNode(RI));
+  }
+};
+
+template <>
+struct GraphTraits<MachineRegionInfoPass *>
+    : public GraphTraits<MachineRegionInfo *> {
+  using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
+                                     false, GraphTraits<FlatIt<NodeRef>>>;
+
+  static NodeRef getEntryNode(MachineRegionInfoPass *RI) {
+    return GraphTraits<MachineRegionInfo *>::getEntryNode(&RI->getRegionInfo());
+  }
+
+  static nodes_iterator nodes_begin(MachineRegionInfoPass *RI) {
+    return GraphTraits<MachineRegionInfo *>::nodes_begin(&RI->getRegionInfo());
+  }
+
+  static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
+    return GraphTraits<MachineRegionInfo *>::nodes_end(&RI->getRegionInfo());
+  }
+};
+
+extern template class RegionBase<RegionTraits<MachineFunction>>;
+extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
+extern template class RegionInfoBase<RegionTraits<MachineFunction>>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEREGIONINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
new file mode 100644
index 0000000..b0dfd02
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -0,0 +1,1197 @@
+//===- llvm/CodeGen/MachineRegisterInfo.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
+#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class PSetIterator;
+
+/// Convenient type to represent either a register class or a register bank.
+using RegClassOrRegBank =
+    PointerUnion<const TargetRegisterClass *, const RegisterBank *>;
+
+/// MachineRegisterInfo - Keep track of information for virtual and physical
+/// registers, including vreg register classes, use/def chains for registers,
+/// etc.
+class MachineRegisterInfo {
+public:
+  class Delegate {
+    virtual void anchor();
+
+  public:
+    virtual ~Delegate() = default;
+
+    virtual void MRI_NoteNewVirtualRegister(unsigned Reg) = 0;
+  };
+
+private:
+  MachineFunction *MF;
+  Delegate *TheDelegate = nullptr;
+
+  /// True if subregister liveness is tracked.
+  const bool TracksSubRegLiveness;
+
+  /// VRegInfo - Information we keep for each virtual register.
+  ///
+  /// Each element in this list contains the register class of the vreg and the
+  /// start of the use/def list for the register.
+  IndexedMap<std::pair<RegClassOrRegBank, MachineOperand *>,
+             VirtReg2IndexFunctor>
+      VRegInfo;
+
+  /// Map for recovering vreg name from vreg number.
+  /// This map is used by the MIR Printer.
+  IndexedMap<std::string, VirtReg2IndexFunctor> VReg2Name;
+
+  /// StringSet that is used to unique vreg names.
+  StringSet<> VRegNames;
+
+  /// The flag is true upon \p UpdatedCSRs initialization
+  /// and false otherwise.
+  bool IsUpdatedCSRsInitialized;
+
+  /// Contains the updated callee saved register list.
+  /// As opposed to the static list defined in register info,
+  /// all registers that were disabled are removed from the list.
+  SmallVector<MCPhysReg, 16> UpdatedCSRs;
+
+  /// RegAllocHints - This vector records register allocation hints for
+  /// virtual registers. For each virtual register, it keeps a pair of hint
+  /// type and hints vector making up the allocation hints. Only the first
+  /// hint may be target specific, and in that case this is reflected by the
+  /// first member of the pair being non-zero. If the hinted register is
+  /// virtual, it means the allocator should prefer the physical register
+  /// allocated to it if any.
+  IndexedMap<std::pair<unsigned, SmallVector<unsigned, 4>>,
+             VirtReg2IndexFunctor> RegAllocHints;
+
+  /// PhysRegUseDefLists - This is an array of the head of the use/def list for
+  /// physical registers.
+  std::unique_ptr<MachineOperand *[]> PhysRegUseDefLists;
+
+  /// getRegUseDefListHead - Return the head pointer for the register use/def
+  /// list for the specified virtual or physical register.
+  MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
+    if (TargetRegisterInfo::isVirtualRegister(RegNo))
+      return VRegInfo[RegNo].second;
+    return PhysRegUseDefLists[RegNo];
+  }
+
+  MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
+    if (TargetRegisterInfo::isVirtualRegister(RegNo))
+      return VRegInfo[RegNo].second;
+    return PhysRegUseDefLists[RegNo];
+  }
+
+  /// Get the next element in the use-def chain.
+  static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
+    assert(MO && MO->isReg() && "This is not a register operand!");
+    return MO->Contents.Reg.Next;
+  }
+
+  /// UsedPhysRegMask - Additional used physregs including aliases.
+  /// This bit vector represents all the registers clobbered by function calls.
+  BitVector UsedPhysRegMask;
+
+  /// ReservedRegs - This is a bit vector of reserved registers.  The target
+  /// may change its mind about which registers should be reserved.  This
+  /// vector is the frozen set of reserved registers when register allocation
+  /// started.
+  BitVector ReservedRegs;
+
+  using VRegToTypeMap = DenseMap<unsigned, LLT>;
+  /// Map generic virtual registers to their actual size.
+  mutable std::unique_ptr<VRegToTypeMap> VRegToType;
+
+  /// Keep track of the physical registers that are live in to the function.
+  /// Live in values are typically arguments in registers.  LiveIn values are
+  /// allowed to have virtual registers associated with them, stored in the
+  /// second element.
+  std::vector<std::pair<unsigned, unsigned>> LiveIns;
+
+public:
+  explicit MachineRegisterInfo(MachineFunction *MF);
+  MachineRegisterInfo(const MachineRegisterInfo &) = delete;
+  MachineRegisterInfo &operator=(const MachineRegisterInfo &) = delete;
+
+  const TargetRegisterInfo *getTargetRegisterInfo() const {
+    return MF->getSubtarget().getRegisterInfo();
+  }
+
+  void resetDelegate(Delegate *delegate) {
+    // Ensure another delegate does not take over unless the current
+    // delegate first unattaches itself. If we ever need to multicast
+    // notifications, we will need to change to using a list.
+    assert(TheDelegate == delegate &&
+           "Only the current delegate can perform reset!");
+    TheDelegate = nullptr;
+  }
+
+  void setDelegate(Delegate *delegate) {
+    assert(delegate && !TheDelegate &&
+           "Attempted to set delegate to null, or to change it without "
+           "first resetting it!");
+
+    TheDelegate = delegate;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Function State
+  //===--------------------------------------------------------------------===//
+
+  // isSSA - Returns true when the machine function is in SSA form. Early
+  // passes require the machine function to be in SSA form where every virtual
+  // register has a single defining instruction.
+  //
+  // The TwoAddressInstructionPass and PHIElimination passes take the machine
+  // function out of SSA form when they introduce multiple defs per virtual
+  // register.
+  bool isSSA() const {
+    return MF->getProperties().hasProperty(
+        MachineFunctionProperties::Property::IsSSA);
+  }
+
+  // leaveSSA - Indicates that the machine function is no longer in SSA form.
+  void leaveSSA() {
+    MF->getProperties().reset(MachineFunctionProperties::Property::IsSSA);
+  }
+
+  /// tracksLiveness - Returns true when tracking register liveness accurately.
+  /// (see MachineFUnctionProperties::Property description for details)
+  bool tracksLiveness() const {
+    return MF->getProperties().hasProperty(
+        MachineFunctionProperties::Property::TracksLiveness);
+  }
+
+  /// invalidateLiveness - Indicates that register liveness is no longer being
+  /// tracked accurately.
+  ///
+  /// This should be called by late passes that invalidate the liveness
+  /// information.
+  void invalidateLiveness() {
+    MF->getProperties().reset(
+        MachineFunctionProperties::Property::TracksLiveness);
+  }
+
+  /// Returns true if liveness for register class @p RC should be tracked at
+  /// the subregister level.
+  bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const {
+    return subRegLivenessEnabled() && RC.HasDisjunctSubRegs;
+  }
+  bool shouldTrackSubRegLiveness(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Must pass a VReg");
+    return shouldTrackSubRegLiveness(*getRegClass(VReg));
+  }
+  bool subRegLivenessEnabled() const {
+    return TracksSubRegLiveness;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Register Info
+  //===--------------------------------------------------------------------===//
+
+  /// Returns true if the updated CSR list was initialized and false otherwise.
+  bool isUpdatedCSRsInitialized() const { return IsUpdatedCSRsInitialized; }
+
+  /// Disables the register from the list of CSRs.
+  /// I.e. the register will not appear as part of the CSR mask.
+  /// \see UpdatedCalleeSavedRegs.
+  void disableCalleeSavedRegister(unsigned Reg);
+
+  /// Returns list of callee saved registers.
+  /// The function returns the updated CSR list (after taking into account
+  /// registers that are disabled from the CSR list).
+  const MCPhysReg *getCalleeSavedRegs() const;
+
+  /// Sets the updated Callee Saved Registers list.
+  /// Notice that it will override ant previously disabled/saved CSRs.
+  void setCalleeSavedRegs(ArrayRef<MCPhysReg> CSRs);
+
+  // Strictly for use by MachineInstr.cpp.
+  void addRegOperandToUseList(MachineOperand *MO);
+
+  // Strictly for use by MachineInstr.cpp.
+  void removeRegOperandFromUseList(MachineOperand *MO);
+
+  // Strictly for use by MachineInstr.cpp.
+  void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);
+
+  /// Verify the sanity of the use list for Reg.
+  void verifyUseList(unsigned Reg) const;
+
+  /// Verify the use list of all registers.
+  void verifyUseLists() const;
+
+  /// reg_begin/reg_end - Provide iteration support to walk over all definitions
+  /// and uses of a register within the MachineFunction that corresponds to this
+  /// MachineRegisterInfo object.
+  template<bool Uses, bool Defs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_iterator;
+  template<bool Uses, bool Defs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_instr_iterator;
+
+  // Make it a friend so it can access getNextOperandForReg().
+  template<bool, bool, bool, bool, bool, bool>
+    friend class defusechain_iterator;
+  template<bool, bool, bool, bool, bool, bool>
+    friend class defusechain_instr_iterator;
+
+  /// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
+  /// register.
+  using reg_iterator =
+      defusechain_iterator<true, true, false, true, false, false>;
+  reg_iterator reg_begin(unsigned RegNo) const {
+    return reg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_iterator reg_end() { return reg_iterator(nullptr); }
+
+  inline iterator_range<reg_iterator>  reg_operands(unsigned Reg) const {
+    return make_range(reg_begin(Reg), reg_end());
+  }
+
+  /// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
+  /// of the specified register, stepping by MachineInstr.
+  using reg_instr_iterator =
+      defusechain_instr_iterator<true, true, false, false, true, false>;
+  reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
+    return reg_instr_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_instr_iterator reg_instr_end() {
+    return reg_instr_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_instr_iterator>
+  reg_instructions(unsigned Reg) const {
+    return make_range(reg_instr_begin(Reg), reg_instr_end());
+  }
+
+  /// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
+  /// of the specified register, stepping by bundle.
+  using reg_bundle_iterator =
+      defusechain_instr_iterator<true, true, false, false, false, true>;
+  reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
+    return reg_bundle_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_bundle_iterator reg_bundle_end() {
+    return reg_bundle_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_bundle_iterator> reg_bundles(unsigned Reg) const {
+    return make_range(reg_bundle_begin(Reg), reg_bundle_end());
+  }
+
+  /// reg_empty - Return true if there are no instructions using or defining the
+  /// specified register (it may be live-in).
+  bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
+
+  /// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
+  /// of the specified register, skipping those marked as Debug.
+  using reg_nodbg_iterator =
+      defusechain_iterator<true, true, true, true, false, false>;
+  reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
+    return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_nodbg_iterator reg_nodbg_end() {
+    return reg_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_nodbg_iterator>
+  reg_nodbg_operands(unsigned Reg) const {
+    return make_range(reg_nodbg_begin(Reg), reg_nodbg_end());
+  }
+
+  /// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
+  /// all defs and uses of the specified register, stepping by MachineInstr,
+  /// skipping those marked as Debug.
+  using reg_instr_nodbg_iterator =
+      defusechain_instr_iterator<true, true, true, false, true, false>;
+  reg_instr_nodbg_iterator reg_instr_nodbg_begin(unsigned RegNo) const {
+    return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
+    return reg_instr_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_instr_nodbg_iterator>
+  reg_nodbg_instructions(unsigned Reg) const {
+    return make_range(reg_instr_nodbg_begin(Reg), reg_instr_nodbg_end());
+  }
+
+  /// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
+  /// all defs and uses of the specified register, stepping by bundle,
+  /// skipping those marked as Debug.
+  using reg_bundle_nodbg_iterator =
+      defusechain_instr_iterator<true, true, true, false, false, true>;
+  reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(unsigned RegNo) const {
+    return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
+    return reg_bundle_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_bundle_nodbg_iterator>
+  reg_nodbg_bundles(unsigned Reg) const {
+    return make_range(reg_bundle_nodbg_begin(Reg), reg_bundle_nodbg_end());
+  }
+
+  /// reg_nodbg_empty - Return true if the only instructions using or defining
+  /// Reg are Debug instructions.
+  bool reg_nodbg_empty(unsigned RegNo) const {
+    return reg_nodbg_begin(RegNo) == reg_nodbg_end();
+  }
+
+  /// def_iterator/def_begin/def_end - Walk all defs of the specified register.
+  using def_iterator =
+      defusechain_iterator<false, true, false, true, false, false>;
+  def_iterator def_begin(unsigned RegNo) const {
+    return def_iterator(getRegUseDefListHead(RegNo));
+  }
+  static def_iterator def_end() { return def_iterator(nullptr); }
+
+  inline iterator_range<def_iterator> def_operands(unsigned Reg) const {
+    return make_range(def_begin(Reg), def_end());
+  }
+
+  /// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
+  /// specified register, stepping by MachineInst.
+  using def_instr_iterator =
+      defusechain_instr_iterator<false, true, false, false, true, false>;
+  def_instr_iterator def_instr_begin(unsigned RegNo) const {
+    return def_instr_iterator(getRegUseDefListHead(RegNo));
+  }
+  static def_instr_iterator def_instr_end() {
+    return def_instr_iterator(nullptr);
+  }
+
+  inline iterator_range<def_instr_iterator>
+  def_instructions(unsigned Reg) const {
+    return make_range(def_instr_begin(Reg), def_instr_end());
+  }
+
+  /// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
+  /// specified register, stepping by bundle.
+  using def_bundle_iterator =
+      defusechain_instr_iterator<false, true, false, false, false, true>;
+  def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
+    return def_bundle_iterator(getRegUseDefListHead(RegNo));
+  }
+  static def_bundle_iterator def_bundle_end() {
+    return def_bundle_iterator(nullptr);
+  }
+
+  inline iterator_range<def_bundle_iterator> def_bundles(unsigned Reg) const {
+    return make_range(def_bundle_begin(Reg), def_bundle_end());
+  }
+
+  /// def_empty - Return true if there are no instructions defining the
+  /// specified register (it may be live-in).
+  bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
+
+  StringRef getVRegName(unsigned Reg) const {
+    return VReg2Name.inBounds(Reg) ? StringRef(VReg2Name[Reg]) : "";
+  }
+
+  void insertVRegByName(StringRef Name, unsigned Reg) {
+    assert((Name.empty() || VRegNames.find(Name) == VRegNames.end()) &&
+           "Named VRegs Must be Unique.");
+    if (!Name.empty()) {
+      VRegNames.insert(Name);
+      VReg2Name.grow(Reg);
+      VReg2Name[Reg] = Name.str();
+    }
+  }
+
+  /// Return true if there is exactly one operand defining the specified
+  /// register.
+  bool hasOneDef(unsigned RegNo) const {
+    def_iterator DI = def_begin(RegNo);
+    if (DI == def_end())
+      return false;
+    return ++DI == def_end();
+  }
+
+  /// use_iterator/use_begin/use_end - Walk all uses of the specified register.
+  using use_iterator =
+      defusechain_iterator<true, false, false, true, false, false>;
+  use_iterator use_begin(unsigned RegNo) const {
+    return use_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_iterator use_end() { return use_iterator(nullptr); }
+
+  inline iterator_range<use_iterator> use_operands(unsigned Reg) const {
+    return make_range(use_begin(Reg), use_end());
+  }
+
+  /// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
+  /// specified register, stepping by MachineInstr.
+  using use_instr_iterator =
+      defusechain_instr_iterator<true, false, false, false, true, false>;
+  use_instr_iterator use_instr_begin(unsigned RegNo) const {
+    return use_instr_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_instr_iterator use_instr_end() {
+    return use_instr_iterator(nullptr);
+  }
+
+  inline iterator_range<use_instr_iterator>
+  use_instructions(unsigned Reg) const {
+    return make_range(use_instr_begin(Reg), use_instr_end());
+  }
+
+  /// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
+  /// specified register, stepping by bundle.
+  using use_bundle_iterator =
+      defusechain_instr_iterator<true, false, false, false, false, true>;
+  use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
+    return use_bundle_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_bundle_iterator use_bundle_end() {
+    return use_bundle_iterator(nullptr);
+  }
+
+  inline iterator_range<use_bundle_iterator> use_bundles(unsigned Reg) const {
+    return make_range(use_bundle_begin(Reg), use_bundle_end());
+  }
+
+  /// use_empty - Return true if there are no instructions using the specified
+  /// register.
+  bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
+
+  /// hasOneUse - Return true if there is exactly one instruction using the
+  /// specified register.
+  bool hasOneUse(unsigned RegNo) const {
+    use_iterator UI = use_begin(RegNo);
+    if (UI == use_end())
+      return false;
+    return ++UI == use_end();
+  }
+
+  /// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
+  /// specified register, skipping those marked as Debug.
+  using use_nodbg_iterator =
+      defusechain_iterator<true, false, true, true, false, false>;
+  use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
+    return use_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_nodbg_iterator use_nodbg_end() {
+    return use_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<use_nodbg_iterator>
+  use_nodbg_operands(unsigned Reg) const {
+    return make_range(use_nodbg_begin(Reg), use_nodbg_end());
+  }
+
+  /// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
+  /// all uses of the specified register, stepping by MachineInstr, skipping
+  /// those marked as Debug.
+  using use_instr_nodbg_iterator =
+      defusechain_instr_iterator<true, false, true, false, true, false>;
+  use_instr_nodbg_iterator use_instr_nodbg_begin(unsigned RegNo) const {
+    return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_instr_nodbg_iterator use_instr_nodbg_end() {
+    return use_instr_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<use_instr_nodbg_iterator>
+  use_nodbg_instructions(unsigned Reg) const {
+    return make_range(use_instr_nodbg_begin(Reg), use_instr_nodbg_end());
+  }
+
+  /// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
+  /// all uses of the specified register, stepping by bundle, skipping
+  /// those marked as Debug.
+  using use_bundle_nodbg_iterator =
+      defusechain_instr_iterator<true, false, true, false, false, true>;
+  use_bundle_nodbg_iterator use_bundle_nodbg_begin(unsigned RegNo) const {
+    return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
+    return use_bundle_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<use_bundle_nodbg_iterator>
+  use_nodbg_bundles(unsigned Reg) const {
+    return make_range(use_bundle_nodbg_begin(Reg), use_bundle_nodbg_end());
+  }
+
+  /// use_nodbg_empty - Return true if there are no non-Debug instructions
+  /// using the specified register.
+  bool use_nodbg_empty(unsigned RegNo) const {
+    return use_nodbg_begin(RegNo) == use_nodbg_end();
+  }
+
+  /// hasOneNonDBGUse - Return true if there is exactly one non-Debug
+  /// instruction using the specified register.
+  bool hasOneNonDBGUse(unsigned RegNo) const;
+
+  /// replaceRegWith - Replace all instances of FromReg with ToReg in the
+  /// machine function.  This is like llvm-level X->replaceAllUsesWith(Y),
+  /// except that it also changes any definitions of the register as well.
+  ///
+  /// Note that it is usually necessary to first constrain ToReg's register
+  /// class and register bank to match the FromReg constraints using one of the
+  /// methods:
+  ///
+  ///   constrainRegClass(ToReg, getRegClass(FromReg))
+  ///   constrainRegAttrs(ToReg, FromReg)
+  ///   RegisterBankInfo::constrainGenericRegister(ToReg,
+  ///       *MRI.getRegClass(FromReg), MRI)
+  ///
+  /// These functions will return a falsy result if the virtual registers have
+  /// incompatible constraints.
+  ///
+  /// Note that if ToReg is a physical register the function will replace and
+  /// apply sub registers to ToReg in order to obtain a final/proper physical
+  /// register.
+  void replaceRegWith(unsigned FromReg, unsigned ToReg);
+
+  /// getVRegDef - Return the machine instr that defines the specified virtual
+  /// register or null if none is found.  This assumes that the code is in SSA
+  /// form, so there should only be one definition.
+  MachineInstr *getVRegDef(unsigned Reg) const;
+
+  /// getUniqueVRegDef - Return the unique machine instr that defines the
+  /// specified virtual register or null if none is found.  If there are
+  /// multiple definitions or no definition, return null.
+  MachineInstr *getUniqueVRegDef(unsigned Reg) const;
+
+  /// clearKillFlags - Iterate over all the uses of the given register and
+  /// clear the kill flag from the MachineOperand. This function is used by
+  /// optimization passes which extend register lifetimes and need only
+  /// preserve conservative kill flag information.
+  void clearKillFlags(unsigned Reg) const;
+
+  void dumpUses(unsigned RegNo) const;
+
+  /// Returns true if PhysReg is unallocatable and constant throughout the
+  /// function. Writing to a constant register has no effect.
+  bool isConstantPhysReg(unsigned PhysReg) const;
+
+  /// Returns true if either isConstantPhysReg or TRI->isCallerPreservedPhysReg
+  /// returns true. This is a utility member function.
+  bool isCallerPreservedOrConstPhysReg(unsigned PhysReg) const;
+
+  /// Get an iterator over the pressure sets affected by the given physical or
+  /// virtual register. If RegUnit is physical, it must be a register unit (from
+  /// MCRegUnitIterator).
+  PSetIterator getPressureSets(unsigned RegUnit) const;
+
+  //===--------------------------------------------------------------------===//
+  // Virtual Register Info
+  //===--------------------------------------------------------------------===//
+
+  /// Return the register class of the specified virtual register.
+  /// This shouldn't be used directly unless \p Reg has a register class.
+  /// \see getRegClassOrNull when this might happen.
+  const TargetRegisterClass *getRegClass(unsigned Reg) const {
+    assert(VRegInfo[Reg].first.is<const TargetRegisterClass *>() &&
+           "Register class not set, wrong accessor");
+    return VRegInfo[Reg].first.get<const TargetRegisterClass *>();
+  }
+
+  /// Return the register class of \p Reg, or null if Reg has not been assigned
+  /// a register class yet.
+  ///
+  /// \note A null register class can only happen when these two
+  /// conditions are met:
+  /// 1. Generic virtual registers are created.
+  /// 2. The machine function has not completely been through the
+  ///    instruction selection process.
+  /// None of this condition is possible without GlobalISel for now.
+  /// In other words, if GlobalISel is not used or if the query happens after
+  /// the select pass, using getRegClass is safe.
+  const TargetRegisterClass *getRegClassOrNull(unsigned Reg) const {
+    const RegClassOrRegBank &Val = VRegInfo[Reg].first;
+    return Val.dyn_cast<const TargetRegisterClass *>();
+  }
+
+  /// Return the register bank of \p Reg, or null if Reg has not been assigned
+  /// a register bank or has been assigned a register class.
+  /// \note It is possible to get the register bank from the register class via
+  /// RegisterBankInfo::getRegBankFromRegClass.
+  const RegisterBank *getRegBankOrNull(unsigned Reg) const {
+    const RegClassOrRegBank &Val = VRegInfo[Reg].first;
+    return Val.dyn_cast<const RegisterBank *>();
+  }
+
+  /// Return the register bank or register class of \p Reg.
+  /// \note Before the register bank gets assigned (i.e., before the
+  /// RegBankSelect pass) \p Reg may not have either.
+  const RegClassOrRegBank &getRegClassOrRegBank(unsigned Reg) const {
+    return VRegInfo[Reg].first;
+  }
+
+  /// setRegClass - Set the register class of the specified virtual register.
+  void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
+
+  /// Set the register bank to \p RegBank for \p Reg.
+  void setRegBank(unsigned Reg, const RegisterBank &RegBank);
+
+  void setRegClassOrRegBank(unsigned Reg,
+                            const RegClassOrRegBank &RCOrRB){
+    VRegInfo[Reg].first = RCOrRB;
+  }
+
+  /// constrainRegClass - Constrain the register class of the specified virtual
+  /// register to be a common subclass of RC and the current register class,
+  /// but only if the new class has at least MinNumRegs registers.  Return the
+  /// new register class, or NULL if no such class exists.
+  /// This should only be used when the constraint is known to be trivial, like
+  /// GR32 -> GR32_NOSP. Beware of increasing register pressure.
+  ///
+  /// \note Assumes that the register has a register class assigned.
+  /// Use RegisterBankInfo::constrainGenericRegister in GlobalISel's
+  /// InstructionSelect pass and constrainRegAttrs in every other pass,
+  /// including non-select passes of GlobalISel, instead.
+  const TargetRegisterClass *constrainRegClass(unsigned Reg,
+                                               const TargetRegisterClass *RC,
+                                               unsigned MinNumRegs = 0);
+
+  /// Constrain the register class or the register bank of the virtual register
+  /// \p Reg to be a common subclass and a common bank of both registers
+  /// provided respectively. Do nothing if any of the attributes (classes,
+  /// banks, or low-level types) of the registers are deemed incompatible, or if
+  /// the resulting register will have a class smaller than before and of size
+  /// less than \p MinNumRegs. Return true if such register attributes exist,
+  /// false otherwise.
+  ///
+  /// \note Assumes that each register has either a low-level type or a class
+  /// assigned, but not both. Use this method instead of constrainRegClass and
+  /// RegisterBankInfo::constrainGenericRegister everywhere but SelectionDAG
+  /// ISel / FastISel and GlobalISel's InstructionSelect pass respectively.
+  bool constrainRegAttrs(unsigned Reg, unsigned ConstrainingReg,
+                         unsigned MinNumRegs = 0);
+
+  /// recomputeRegClass - Try to find a legal super-class of Reg's register
+  /// class that still satisfies the constraints from the instructions using
+  /// Reg.  Returns true if Reg was upgraded.
+  ///
+  /// This method can be used after constraints have been removed from a
+  /// virtual register, for example after removing instructions or splitting
+  /// the live range.
+  bool recomputeRegClass(unsigned Reg);
+
+  /// createVirtualRegister - Create and return a new virtual register in the
+  /// function with the specified register class.
+  unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
+
+  /// Accessor for VRegToType. This accessor should only be used
+  /// by global-isel related work.
+  VRegToTypeMap &getVRegToType() const {
+    if (!VRegToType)
+      VRegToType.reset(new VRegToTypeMap);
+    return *VRegToType.get();
+  }
+
+  /// Get the low-level type of \p VReg or LLT{} if VReg is not a generic
+  /// (target independent) virtual register.
+  LLT getType(unsigned VReg) const;
+
+  /// Set the low-level type of \p VReg to \p Ty.
+  void setType(unsigned VReg, LLT Ty);
+
+  /// Create and return a new generic virtual register with low-level
+  /// type \p Ty.
+  unsigned createGenericVirtualRegister(LLT Ty);
+
+  /// Remove all types associated to virtual registers (after instruction
+  /// selection and constraining of all generic virtual registers).
+  void clearVirtRegTypes();
+
+  /// Creates a new virtual register that has no register class, register bank
+  /// or size assigned yet. This is only allowed to be used
+  /// temporarily while constructing machine instructions. Most operations are
+  /// undefined on an incomplete register until one of setRegClass(),
+  /// setRegBank() or setSize() has been called on it.
+  unsigned createIncompleteVirtualRegister(StringRef Name = "");
+
+  /// getNumVirtRegs - Return the number of virtual registers created.
+  unsigned getNumVirtRegs() const { return VRegInfo.size(); }
+
+  /// clearVirtRegs - Remove all virtual registers (after physreg assignment).
+  void clearVirtRegs();
+
+  /// setRegAllocationHint - Specify a register allocation hint for the
+  /// specified virtual register. This is typically used by target, and in case
+  /// of an earlier hint it will be overwritten.
+  void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg) {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    RegAllocHints[VReg].first  = Type;
+    RegAllocHints[VReg].second.clear();
+    RegAllocHints[VReg].second.push_back(PrefReg);
+  }
+
+  /// addRegAllocationHint - Add a register allocation hint to the hints
+  /// vector for VReg.
+  void addRegAllocationHint(unsigned VReg, unsigned PrefReg) {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    RegAllocHints[VReg].second.push_back(PrefReg);
+  }
+
+  /// Specify the preferred (target independent) register allocation hint for
+  /// the specified virtual register.
+  void setSimpleHint(unsigned VReg, unsigned PrefReg) {
+    setRegAllocationHint(VReg, /*Type=*/0, PrefReg);
+  }
+
+  void clearSimpleHint(unsigned VReg) {
+    assert (RegAllocHints[VReg].first == 0 &&
+            "Expected to clear a non-target hint!");
+    RegAllocHints[VReg].second.clear();
+  }
+
+  /// getRegAllocationHint - Return the register allocation hint for the
+  /// specified virtual register. If there are many hints, this returns the
+  /// one with the greatest weight.
+  std::pair<unsigned, unsigned>
+  getRegAllocationHint(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    unsigned BestHint = (RegAllocHints[VReg].second.size() ?
+                         RegAllocHints[VReg].second[0] : 0);
+    return std::pair<unsigned, unsigned>(RegAllocHints[VReg].first, BestHint);
+  }
+
+  /// getSimpleHint - same as getRegAllocationHint except it will only return
+  /// a target independent hint.
+  unsigned getSimpleHint(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    std::pair<unsigned, unsigned> Hint = getRegAllocationHint(VReg);
+    return Hint.first ? 0 : Hint.second;
+  }
+
+  /// getRegAllocationHints - Return a reference to the vector of all
+  /// register allocation hints for VReg.
+  const std::pair<unsigned, SmallVector<unsigned, 4>>
+  &getRegAllocationHints(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    return RegAllocHints[VReg];
+  }
+
+  /// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
+  /// specified register as undefined which causes the DBG_VALUE to be
+  /// deleted during LiveDebugVariables analysis.
+  void markUsesInDebugValueAsUndef(unsigned Reg) const;
+
+  /// Return true if the specified register is modified in this function.
+  /// This checks that no defining machine operands exist for the register or
+  /// any of its aliases. Definitions found on functions marked noreturn are
+  /// ignored, to consider them pass 'true' for optional parameter
+  /// SkipNoReturnDef. The register is also considered modified when it is set
+  /// in the UsedPhysRegMask.
+  bool isPhysRegModified(unsigned PhysReg, bool SkipNoReturnDef = false) const;
+
+  /// Return true if the specified register is modified or read in this
+  /// function. This checks that no machine operands exist for the register or
+  /// any of its aliases. The register is also considered used when it is set
+  /// in the UsedPhysRegMask.
+  bool isPhysRegUsed(unsigned PhysReg) const;
+
+  /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
+  /// This corresponds to the bit mask attached to register mask operands.
+  void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
+    UsedPhysRegMask.setBitsNotInMask(RegMask);
+  }
+
+  const BitVector &getUsedPhysRegsMask() const { return UsedPhysRegMask; }
+
+  //===--------------------------------------------------------------------===//
+  // Reserved Register Info
+  //===--------------------------------------------------------------------===//
+  //
+  // The set of reserved registers must be invariant during register
+  // allocation.  For example, the target cannot suddenly decide it needs a
+  // frame pointer when the register allocator has already used the frame
+  // pointer register for something else.
+  //
+  // These methods can be used by target hooks like hasFP() to avoid changing
+  // the reserved register set during register allocation.
+
+  /// freezeReservedRegs - Called by the register allocator to freeze the set
+  /// of reserved registers before allocation begins.
+  void freezeReservedRegs(const MachineFunction&);
+
+  /// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
+  /// to ensure the set of reserved registers stays constant.
+  bool reservedRegsFrozen() const {
+    return !ReservedRegs.empty();
+  }
+
+  /// canReserveReg - Returns true if PhysReg can be used as a reserved
+  /// register.  Any register can be reserved before freezeReservedRegs() is
+  /// called.
+  bool canReserveReg(unsigned PhysReg) const {
+    return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
+  }
+
+  /// getReservedRegs - Returns a reference to the frozen set of reserved
+  /// registers. This method should always be preferred to calling
+  /// TRI::getReservedRegs() when possible.
+  const BitVector &getReservedRegs() const {
+    assert(reservedRegsFrozen() &&
+           "Reserved registers haven't been frozen yet. "
+           "Use TRI::getReservedRegs().");
+    return ReservedRegs;
+  }
+
+  /// isReserved - Returns true when PhysReg is a reserved register.
+  ///
+  /// Reserved registers may belong to an allocatable register class, but the
+  /// target has explicitly requested that they are not used.
+  bool isReserved(unsigned PhysReg) const {
+    return getReservedRegs().test(PhysReg);
+  }
+
+  /// Returns true when the given register unit is considered reserved.
+  ///
+  /// Register units are considered reserved when for at least one of their
+  /// root registers, the root register and all super registers are reserved.
+  /// This currently iterates the register hierarchy and may be slower than
+  /// expected.
+  bool isReservedRegUnit(unsigned Unit) const;
+
+  /// isAllocatable - Returns true when PhysReg belongs to an allocatable
+  /// register class and it hasn't been reserved.
+  ///
+  /// Allocatable registers may show up in the allocation order of some virtual
+  /// register, so a register allocator needs to track its liveness and
+  /// availability.
+  bool isAllocatable(unsigned PhysReg) const {
+    return getTargetRegisterInfo()->isInAllocatableClass(PhysReg) &&
+      !isReserved(PhysReg);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // LiveIn Management
+  //===--------------------------------------------------------------------===//
+
+  /// addLiveIn - Add the specified register as a live-in.  Note that it
+  /// is an error to add the same register to the same set more than once.
+  void addLiveIn(unsigned Reg, unsigned vreg = 0) {
+    LiveIns.push_back(std::make_pair(Reg, vreg));
+  }
+
+  // Iteration support for the live-ins set.  It's kept in sorted order
+  // by register number.
+  using livein_iterator =
+      std::vector<std::pair<unsigned,unsigned>>::const_iterator;
+  livein_iterator livein_begin() const { return LiveIns.begin(); }
+  livein_iterator livein_end()   const { return LiveIns.end(); }
+  bool            livein_empty() const { return LiveIns.empty(); }
+
+  ArrayRef<std::pair<unsigned, unsigned>> liveins() const {
+    return LiveIns;
+  }
+
+  bool isLiveIn(unsigned Reg) const;
+
+  /// getLiveInPhysReg - If VReg is a live-in virtual register, return the
+  /// corresponding live-in physical register.
+  unsigned getLiveInPhysReg(unsigned VReg) const;
+
+  /// getLiveInVirtReg - If PReg is a live-in physical register, return the
+  /// corresponding live-in physical register.
+  unsigned getLiveInVirtReg(unsigned PReg) const;
+
+  /// EmitLiveInCopies - Emit copies to initialize livein virtual registers
+  /// into the given entry block.
+  void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
+                        const TargetRegisterInfo &TRI,
+                        const TargetInstrInfo &TII);
+
+  /// Returns a mask covering all bits that can appear in lane masks of
+  /// subregisters of the virtual register @p Reg.
+  LaneBitmask getMaxLaneMaskForVReg(unsigned Reg) const;
+
+  /// defusechain_iterator - This class provides iterator support for machine
+  /// operands in the function that use or define a specific register.  If
+  /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+  /// returns defs.  If neither are true then you are silly and it always
+  /// returns end().  If SkipDebug is true it skips uses marked Debug
+  /// when incrementing.
+  template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_iterator
+    : public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
+    friend class MachineRegisterInfo;
+
+    MachineOperand *Op = nullptr;
+
+    explicit defusechain_iterator(MachineOperand *op) : Op(op) {
+      // If the first node isn't one we're interested in, advance to one that
+      // we are interested in.
+      if (op) {
+        if ((!ReturnUses && op->isUse()) ||
+            (!ReturnDefs && op->isDef()) ||
+            (SkipDebug && op->isDebug()))
+          advance();
+      }
+    }
+
+    void advance() {
+      assert(Op && "Cannot increment end iterator!");
+      Op = getNextOperandForReg(Op);
+
+      // All defs come before the uses, so stop def_iterator early.
+      if (!ReturnUses) {
+        if (Op) {
+          if (Op->isUse())
+            Op = nullptr;
+          else
+            assert(!Op->isDebug() && "Can't have debug defs");
+        }
+      } else {
+        // If this is an operand we don't care about, skip it.
+        while (Op && ((!ReturnDefs && Op->isDef()) ||
+                      (SkipDebug && Op->isDebug())))
+          Op = getNextOperandForReg(Op);
+      }
+    }
+
+  public:
+    using reference = std::iterator<std::forward_iterator_tag,
+                                    MachineInstr, ptrdiff_t>::reference;
+    using pointer = std::iterator<std::forward_iterator_tag,
+                                  MachineInstr, ptrdiff_t>::pointer;
+
+    defusechain_iterator() = default;
+
+    bool operator==(const defusechain_iterator &x) const {
+      return Op == x.Op;
+    }
+    bool operator!=(const defusechain_iterator &x) const {
+      return !operator==(x);
+    }
+
+    /// atEnd - return true if this iterator is equal to reg_end() on the value.
+    bool atEnd() const { return Op == nullptr; }
+
+    // Iterator traversal: forward iteration only
+    defusechain_iterator &operator++() {          // Preincrement
+      assert(Op && "Cannot increment end iterator!");
+      if (ByOperand)
+        advance();
+      else if (ByInstr) {
+        MachineInstr *P = Op->getParent();
+        do {
+          advance();
+        } while (Op && Op->getParent() == P);
+      } else if (ByBundle) {
+        MachineBasicBlock::instr_iterator P =
+            getBundleStart(Op->getParent()->getIterator());
+        do {
+          advance();
+        } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
+      }
+
+      return *this;
+    }
+    defusechain_iterator operator++(int) {        // Postincrement
+      defusechain_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    /// getOperandNo - Return the operand # of this MachineOperand in its
+    /// MachineInstr.
+    unsigned getOperandNo() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return Op - &Op->getParent()->getOperand(0);
+    }
+
+    // Retrieve a reference to the current operand.
+    MachineOperand &operator*() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return *Op;
+    }
+
+    MachineOperand *operator->() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return Op;
+    }
+  };
+
+  /// defusechain_iterator - This class provides iterator support for machine
+  /// operands in the function that use or define a specific register.  If
+  /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+  /// returns defs.  If neither are true then you are silly and it always
+  /// returns end().  If SkipDebug is true it skips uses marked Debug
+  /// when incrementing.
+  template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_instr_iterator
+    : public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
+    friend class MachineRegisterInfo;
+
+    MachineOperand *Op = nullptr;
+
+    explicit defusechain_instr_iterator(MachineOperand *op) : Op(op) {
+      // If the first node isn't one we're interested in, advance to one that
+      // we are interested in.
+      if (op) {
+        if ((!ReturnUses && op->isUse()) ||
+            (!ReturnDefs && op->isDef()) ||
+            (SkipDebug && op->isDebug()))
+          advance();
+      }
+    }
+
+    void advance() {
+      assert(Op && "Cannot increment end iterator!");
+      Op = getNextOperandForReg(Op);
+
+      // All defs come before the uses, so stop def_iterator early.
+      if (!ReturnUses) {
+        if (Op) {
+          if (Op->isUse())
+            Op = nullptr;
+          else
+            assert(!Op->isDebug() && "Can't have debug defs");
+        }
+      } else {
+        // If this is an operand we don't care about, skip it.
+        while (Op && ((!ReturnDefs && Op->isDef()) ||
+                      (SkipDebug && Op->isDebug())))
+          Op = getNextOperandForReg(Op);
+      }
+    }
+
+  public:
+    using reference = std::iterator<std::forward_iterator_tag,
+                                    MachineInstr, ptrdiff_t>::reference;
+    using pointer = std::iterator<std::forward_iterator_tag,
+                                  MachineInstr, ptrdiff_t>::pointer;
+
+    defusechain_instr_iterator() = default;
+
+    bool operator==(const defusechain_instr_iterator &x) const {
+      return Op == x.Op;
+    }
+    bool operator!=(const defusechain_instr_iterator &x) const {
+      return !operator==(x);
+    }
+
+    /// atEnd - return true if this iterator is equal to reg_end() on the value.
+    bool atEnd() const { return Op == nullptr; }
+
+    // Iterator traversal: forward iteration only
+    defusechain_instr_iterator &operator++() {          // Preincrement
+      assert(Op && "Cannot increment end iterator!");
+      if (ByOperand)
+        advance();
+      else if (ByInstr) {
+        MachineInstr *P = Op->getParent();
+        do {
+          advance();
+        } while (Op && Op->getParent() == P);
+      } else if (ByBundle) {
+        MachineBasicBlock::instr_iterator P =
+            getBundleStart(Op->getParent()->getIterator());
+        do {
+          advance();
+        } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
+      }
+
+      return *this;
+    }
+    defusechain_instr_iterator operator++(int) {        // Postincrement
+      defusechain_instr_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    // Retrieve a reference to the current operand.
+    MachineInstr &operator*() const {
+      assert(Op && "Cannot dereference end iterator!");
+      if (ByBundle)
+        return *getBundleStart(Op->getParent()->getIterator());
+      return *Op->getParent();
+    }
+
+    MachineInstr *operator->() const { return &operator*(); }
+  };
+};
+
+/// Iterate over the pressure sets affected by the given physical or virtual
+/// register. If Reg is physical, it must be a register unit (from
+/// MCRegUnitIterator).
+class PSetIterator {
+  const int *PSet = nullptr;
+  unsigned Weight = 0;
+
+public:
+  PSetIterator() = default;
+
+  PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
+    const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+    if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
+      const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
+      PSet = TRI->getRegClassPressureSets(RC);
+      Weight = TRI->getRegClassWeight(RC).RegWeight;
+    }
+    else {
+      PSet = TRI->getRegUnitPressureSets(RegUnit);
+      Weight = TRI->getRegUnitWeight(RegUnit);
+    }
+    if (*PSet == -1)
+      PSet = nullptr;
+  }
+
+  bool isValid() const { return PSet; }
+
+  unsigned getWeight() const { return Weight; }
+
+  unsigned operator*() const { return *PSet; }
+
+  void operator++() {
+    assert(isValid() && "Invalid PSetIterator.");
+    ++PSet;
+    if (*PSet == -1)
+      PSet = nullptr;
+  }
+};
+
+inline PSetIterator MachineRegisterInfo::
+getPressureSets(unsigned RegUnit) const {
+  return PSetIterator(RegUnit, this);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEREGISTERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h b/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
new file mode 100644
index 0000000..b5ea208
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -0,0 +1,113 @@
+//===- MachineSSAUpdater.h - Unstructured SSA Update Tool -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MachineSSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
+#define LLVM_CODEGEN_MACHINESSAUPDATER_H
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineOperand;
+class MachineRegisterInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+template<typename T> class SmallVectorImpl;
+template<typename T> class SSAUpdaterTraits;
+
+/// MachineSSAUpdater - This class updates SSA form for a set of virtual
+/// registers defined in multiple blocks.  This is used when code duplication
+/// or another unstructured transformation wants to rewrite a set of uses of one
+/// vreg with uses of a set of vregs.
+class MachineSSAUpdater {
+  friend class SSAUpdaterTraits<MachineSSAUpdater>;
+
+private:
+  /// AvailableVals - This keeps track of which value to use on a per-block
+  /// basis.  When we insert PHI nodes, we keep track of them here.
+  //typedef DenseMap<MachineBasicBlock*, unsigned > AvailableValsTy;
+  void *AV = nullptr;
+
+  /// VR - Current virtual register whose uses are being updated.
+  unsigned VR;
+
+  /// VRC - Register class of the current virtual register.
+  const TargetRegisterClass *VRC;
+
+  /// InsertedPHIs - If this is non-null, the MachineSSAUpdater adds all PHI
+  /// nodes that it creates to the vector.
+  SmallVectorImpl<MachineInstr*> *InsertedPHIs;
+
+  const TargetInstrInfo *TII;
+  MachineRegisterInfo *MRI;
+
+public:
+  /// MachineSSAUpdater constructor.  If InsertedPHIs is specified, it will be
+  /// filled in with all PHI Nodes created by rewriting.
+  explicit MachineSSAUpdater(MachineFunction &MF,
+                        SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
+  MachineSSAUpdater(const MachineSSAUpdater &) = delete;
+  MachineSSAUpdater &operator=(const MachineSSAUpdater &) = delete;
+  ~MachineSSAUpdater();
+
+  /// Initialize - Reset this object to get ready for a new set of SSA
+  /// updates.
+  void Initialize(unsigned V);
+
+  /// AddAvailableValue - Indicate that a rewritten value is available at the
+  /// end of the specified block with the specified value.
+  void AddAvailableValue(MachineBasicBlock *BB, unsigned V);
+
+  /// HasValueForBlock - Return true if the MachineSSAUpdater already has a
+  /// value for the specified block.
+  bool HasValueForBlock(MachineBasicBlock *BB) const;
+
+  /// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
+  /// live at the end of the specified block.
+  unsigned GetValueAtEndOfBlock(MachineBasicBlock *BB);
+
+  /// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
+  /// is live in the middle of the specified block.
+  ///
+  /// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
+  /// important case: if there is a definition of the rewritten value after the
+  /// 'use' in BB.  Consider code like this:
+  ///
+  ///      X1 = ...
+  ///   SomeBB:
+  ///      use(X)
+  ///      X2 = ...
+  ///      br Cond, SomeBB, OutBB
+  ///
+  /// In this case, there are two values (X1 and X2) added to the AvailableVals
+  /// set by the client of the rewriter, and those values are both live out of
+  /// their respective blocks.  However, the use of X happens in the *middle* of
+  /// a block.  Because of this, we need to insert a new PHI node in SomeBB to
+  /// merge the appropriate values, and this value isn't live out of the block.
+  unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB);
+
+  /// RewriteUse - Rewrite a use of the symbolic value.  This handles PHI nodes,
+  /// which use their value in the corresponding predecessor.  Note that this
+  /// will not work if the use is supposed to be rewritten to a value defined in
+  /// the same block as the use, but above it.  Any 'AddAvailableValue's added
+  /// for the use's block will be considered to be below it.
+  void RewriteUse(MachineOperand &U);
+
+private:
+  unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINESSAUPDATER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
new file mode 100644
index 0000000..e327881
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
@@ -0,0 +1,1056 @@
+//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an interface for customizing the standard MachineScheduler
+// pass. Note that the entire pass may be replaced as follows:
+//
+// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
+//   PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
+//   ...}
+//
+// The MachineScheduler pass is only responsible for choosing the regions to be
+// scheduled. Targets can override the DAG builder and scheduler without
+// replacing the pass as follows:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+//   return new CustomMachineScheduler(C);
+// }
+//
+// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
+// scheduling while updating the instruction stream, register pressure, and live
+// intervals. Most targets don't need to override the DAG builder and list
+// scheduler, but subtargets that require custom scheduling heuristics may
+// plugin an alternate MachineSchedStrategy. The strategy is responsible for
+// selecting the highest priority node from the list:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+//   return new ScheduleDAGMILive(C, CustomStrategy(C));
+// }
+//
+// The DAG builder can also be customized in a sense by adding DAG mutations
+// that will run after DAG building and before list scheduling. DAG mutations
+// can adjust dependencies based on target-specific knowledge or add weak edges
+// to aid heuristics:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+//   ScheduleDAGMI *DAG = createGenericSchedLive(C);
+//   DAG->addMutation(new CustomDAGMutation(...));
+//   return DAG;
+// }
+//
+// A target that supports alternative schedulers can use the
+// MachineSchedRegistry to allow command line selection. This can be done by
+// implementing the following boilerplate:
+//
+// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
+//  return new CustomMachineScheduler(C);
+// }
+// static MachineSchedRegistry
+// SchedCustomRegistry("custom", "Run my target's custom scheduler",
+//                     createCustomMachineSched);
+//
+//
+// Finally, subtargets that don't need to implement custom heuristics but would
+// like to configure the GenericScheduler's policy for a given scheduler region,
+// including scheduling direction and register pressure tracking policy, can do
+// this:
+//
+// void <SubTarget>Subtarget::
+// overrideSchedPolicy(MachineSchedPolicy &Policy,
+//                     unsigned NumRegionInstrs) const {
+//   Policy.<Flag> = true;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
+#define LLVM_CODEGEN_MACHINESCHEDULER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+extern cl::opt<bool> ForceTopDown;
+extern cl::opt<bool> ForceBottomUp;
+
+class LiveIntervals;
+class MachineDominatorTree;
+class MachineFunction;
+class MachineInstr;
+class MachineLoopInfo;
+class RegisterClassInfo;
+class SchedDFSResult;
+class ScheduleHazardRecognizer;
+class TargetInstrInfo;
+class TargetPassConfig;
+class TargetRegisterInfo;
+
+/// MachineSchedContext provides enough context from the MachineScheduler pass
+/// for the target to instantiate a scheduler.
+struct MachineSchedContext {
+  MachineFunction *MF = nullptr;
+  const MachineLoopInfo *MLI = nullptr;
+  const MachineDominatorTree *MDT = nullptr;
+  const TargetPassConfig *PassConfig = nullptr;
+  AliasAnalysis *AA = nullptr;
+  LiveIntervals *LIS = nullptr;
+
+  RegisterClassInfo *RegClassInfo;
+
+  MachineSchedContext();
+  virtual ~MachineSchedContext();
+};
+
+/// MachineSchedRegistry provides a selection of available machine instruction
+/// schedulers.
+class MachineSchedRegistry : public MachinePassRegistryNode {
+public:
+  using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *);
+
+  // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
+  using FunctionPassCtor = ScheduleDAGCtor;
+
+  static MachinePassRegistry Registry;
+
+  MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
+    : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+    Registry.Add(this);
+  }
+
+  ~MachineSchedRegistry() { Registry.Remove(this); }
+
+  // Accessors.
+  //
+  MachineSchedRegistry *getNext() const {
+    return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
+  }
+
+  static MachineSchedRegistry *getList() {
+    return (MachineSchedRegistry *)Registry.getList();
+  }
+
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+class ScheduleDAGMI;
+
+/// Define a generic scheduling policy for targets that don't provide their own
+/// MachineSchedStrategy. This can be overriden for each scheduling region
+/// before building the DAG.
+struct MachineSchedPolicy {
+  // Allow the scheduler to disable register pressure tracking.
+  bool ShouldTrackPressure = false;
+  /// Track LaneMasks to allow reordering of independent subregister writes
+  /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
+  bool ShouldTrackLaneMasks = false;
+
+  // Allow the scheduler to force top-down or bottom-up scheduling. If neither
+  // is true, the scheduler runs in both directions and converges.
+  bool OnlyTopDown = false;
+  bool OnlyBottomUp = false;
+
+  // Disable heuristic that tries to fetch nodes from long dependency chains
+  // first.
+  bool DisableLatencyHeuristic = false;
+
+  MachineSchedPolicy() = default;
+};
+
+/// MachineSchedStrategy - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+///
+/// Initialization sequence:
+///   initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
+class MachineSchedStrategy {
+  virtual void anchor();
+
+public:
+  virtual ~MachineSchedStrategy() = default;
+
+  /// Optionally override the per-region scheduling policy.
+  virtual void initPolicy(MachineBasicBlock::iterator Begin,
+                          MachineBasicBlock::iterator End,
+                          unsigned NumRegionInstrs) {}
+
+  virtual void dumpPolicy() const {}
+
+  /// Check if pressure tracking is needed before building the DAG and
+  /// initializing this strategy. Called after initPolicy.
+  virtual bool shouldTrackPressure() const { return true; }
+
+  /// Returns true if lanemasks should be tracked. LaneMask tracking is
+  /// necessary to reorder independent subregister defs for the same vreg.
+  /// This has to be enabled in combination with shouldTrackPressure().
+  virtual bool shouldTrackLaneMasks() const { return false; }
+
+  // If this method returns true, handling of the scheduling regions
+  // themselves (in case of a scheduling boundary in MBB) will be done
+  // beginning with the topmost region of MBB.
+  virtual bool doMBBSchedRegionsTopDown() const { return false; }
+
+  /// Initialize the strategy after building the DAG for a new region.
+  virtual void initialize(ScheduleDAGMI *DAG) = 0;
+
+  /// Tell the strategy that MBB is about to be processed.
+  virtual void enterMBB(MachineBasicBlock *MBB) {};
+
+  /// Tell the strategy that current MBB is done.
+  virtual void leaveMBB() {};
+
+  /// Notify this strategy that all roots have been released (including those
+  /// that depend on EntrySU or ExitSU).
+  virtual void registerRoots() {}
+
+  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
+  /// schedule the node at the top of the unscheduled region. Otherwise it will
+  /// be scheduled at the bottom.
+  virtual SUnit *pickNode(bool &IsTopNode) = 0;
+
+  /// \brief Scheduler callback to notify that a new subtree is scheduled.
+  virtual void scheduleTree(unsigned SubtreeID) {}
+
+  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
+  /// instruction and updated scheduled/remaining flags in the DAG nodes.
+  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
+
+  /// When all predecessor dependencies have been resolved, free this node for
+  /// top-down scheduling.
+  virtual void releaseTopNode(SUnit *SU) = 0;
+
+  /// When all successor dependencies have been resolved, free this node for
+  /// bottom-up scheduling.
+  virtual void releaseBottomNode(SUnit *SU) = 0;
+};
+
+/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
+/// schedules machine instructions according to the given MachineSchedStrategy
+/// without much extra book-keeping. This is the common functionality between
+/// PreRA and PostRA MachineScheduler.
+class ScheduleDAGMI : public ScheduleDAGInstrs {
+protected:
+  AliasAnalysis *AA;
+  LiveIntervals *LIS;
+  std::unique_ptr<MachineSchedStrategy> SchedImpl;
+
+  /// Topo - A topological ordering for SUnits which permits fast IsReachable
+  /// and similar queries.
+  ScheduleDAGTopologicalSort Topo;
+
+  /// Ordered list of DAG postprocessing steps.
+  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
+
+  /// The top of the unscheduled zone.
+  MachineBasicBlock::iterator CurrentTop;
+
+  /// The bottom of the unscheduled zone.
+  MachineBasicBlock::iterator CurrentBottom;
+
+  /// Record the next node in a scheduled cluster.
+  const SUnit *NextClusterPred = nullptr;
+  const SUnit *NextClusterSucc = nullptr;
+
+#ifndef NDEBUG
+  /// The number of instructions scheduled so far. Used to cut off the
+  /// scheduler at the point determined by misched-cutoff.
+  unsigned NumInstrsScheduled = 0;
+#endif
+
+public:
+  ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
+                bool RemoveKillFlags)
+      : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
+        LIS(C->LIS), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU) {}
+
+  // Provide a vtable anchor
+  ~ScheduleDAGMI() override;
+
+  /// If this method returns true, handling of the scheduling regions
+  /// themselves (in case of a scheduling boundary in MBB) will be done
+  /// beginning with the topmost region of MBB.
+  bool doMBBSchedRegionsTopDown() const override {
+    return SchedImpl->doMBBSchedRegionsTopDown();
+  }
+
+  // Returns LiveIntervals instance for use in DAG mutators and such.
+  LiveIntervals *getLIS() const { return LIS; }
+
+  /// Return true if this DAG supports VReg liveness and RegPressure.
+  virtual bool hasVRegLiveness() const { return false; }
+
+  /// Add a postprocessing step to the DAG builder.
+  /// Mutations are applied in the order that they are added after normal DAG
+  /// building and before MachineSchedStrategy initialization.
+  ///
+  /// ScheduleDAGMI takes ownership of the Mutation object.
+  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
+    if (Mutation)
+      Mutations.push_back(std::move(Mutation));
+  }
+
+  /// \brief True if an edge can be added from PredSU to SuccSU without creating
+  /// a cycle.
+  bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);
+
+  /// \brief Add a DAG edge to the given SU with the given predecessor
+  /// dependence data.
+  ///
+  /// \returns true if the edge may be added without creating a cycle OR if an
+  /// equivalent edge already existed (false indicates failure).
+  bool addEdge(SUnit *SuccSU, const SDep &PredDep);
+
+  MachineBasicBlock::iterator top() const { return CurrentTop; }
+  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
+
+  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+  /// region. This covers all instructions in a block, while schedule() may only
+  /// cover a subset.
+  void enterRegion(MachineBasicBlock *bb,
+                   MachineBasicBlock::iterator begin,
+                   MachineBasicBlock::iterator end,
+                   unsigned regioninstrs) override;
+
+  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+  /// reorderable instructions.
+  void schedule() override;
+
+  void startBlock(MachineBasicBlock *bb) override;
+  void finishBlock() override;
+
+  /// Change the position of an instruction within the basic block and update
+  /// live ranges and region boundary iterators.
+  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
+
+  const SUnit *getNextClusterPred() const { return NextClusterPred; }
+
+  const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
+
+  void viewGraph(const Twine &Name, const Twine &Title) override;
+  void viewGraph() override;
+
+protected:
+  // Top-Level entry points for the schedule() driver...
+
+  /// Apply each ScheduleDAGMutation step in order. This allows different
+  /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
+  void postprocessDAG();
+
+  /// Release ExitSU predecessors and setup scheduler queues.
+  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
+
+  /// Update scheduler DAG and queues after scheduling an instruction.
+  void updateQueues(SUnit *SU, bool IsTopNode);
+
+  /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
+  void placeDebugValues();
+
+  /// \brief dump the scheduled Sequence.
+  void dumpSchedule() const;
+
+  // Lesser helpers...
+  bool checkSchedLimit();
+
+  void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
+                             SmallVectorImpl<SUnit*> &BotRoots);
+
+  void releaseSucc(SUnit *SU, SDep *SuccEdge);
+  void releaseSuccessors(SUnit *SU);
+  void releasePred(SUnit *SU, SDep *PredEdge);
+  void releasePredecessors(SUnit *SU);
+};
+
+/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
+/// machine instructions while updating LiveIntervals and tracking regpressure.
+class ScheduleDAGMILive : public ScheduleDAGMI {
+protected:
+  RegisterClassInfo *RegClassInfo;
+
+  /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
+  /// will be empty.
+  SchedDFSResult *DFSResult = nullptr;
+  BitVector ScheduledTrees;
+
+  MachineBasicBlock::iterator LiveRegionEnd;
+
+  /// Maps vregs to the SUnits of their uses in the current scheduling region.
+  VReg2SUnitMultiMap VRegUses;
+
+  // Map each SU to its summary of pressure changes. This array is updated for
+  // liveness during bottom-up scheduling. Top-down scheduling may proceed but
+  // has no affect on the pressure diffs.
+  PressureDiffs SUPressureDiffs;
+
+  /// Register pressure in this region computed by initRegPressure.
+  bool ShouldTrackPressure = false;
+  bool ShouldTrackLaneMasks = false;
+  IntervalPressure RegPressure;
+  RegPressureTracker RPTracker;
+
+  /// List of pressure sets that exceed the target's pressure limit before
+  /// scheduling, listed in increasing set ID order. Each pressure set is paired
+  /// with its max pressure in the currently scheduled regions.
+  std::vector<PressureChange> RegionCriticalPSets;
+
+  /// The top of the unscheduled zone.
+  IntervalPressure TopPressure;
+  RegPressureTracker TopRPTracker;
+
+  /// The bottom of the unscheduled zone.
+  IntervalPressure BotPressure;
+  RegPressureTracker BotRPTracker;
+
+  /// True if disconnected subregister components are already renamed.
+  /// The renaming is only done on demand if lane masks are tracked.
+  bool DisconnectedComponentsRenamed = false;
+
+public:
+  ScheduleDAGMILive(MachineSchedContext *C,
+                    std::unique_ptr<MachineSchedStrategy> S)
+      : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
+        RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
+        TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
+
+  ~ScheduleDAGMILive() override;
+
+  /// Return true if this DAG supports VReg liveness and RegPressure.
+  bool hasVRegLiveness() const override { return true; }
+
+  /// \brief Return true if register pressure tracking is enabled.
+  bool isTrackingPressure() const { return ShouldTrackPressure; }
+
+  /// Get current register pressure for the top scheduled instructions.
+  const IntervalPressure &getTopPressure() const { return TopPressure; }
+  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
+
+  /// Get current register pressure for the bottom scheduled instructions.
+  const IntervalPressure &getBotPressure() const { return BotPressure; }
+  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
+
+  /// Get register pressure for the entire scheduling region before scheduling.
+  const IntervalPressure &getRegPressure() const { return RegPressure; }
+
+  const std::vector<PressureChange> &getRegionCriticalPSets() const {
+    return RegionCriticalPSets;
+  }
+
+  PressureDiff &getPressureDiff(const SUnit *SU) {
+    return SUPressureDiffs[SU->NodeNum];
+  }
+
+  /// Compute a DFSResult after DAG building is complete, and before any
+  /// queue comparisons.
+  void computeDFSResult();
+
+  /// Return a non-null DFS result if the scheduling strategy initialized it.
+  const SchedDFSResult *getDFSResult() const { return DFSResult; }
+
+  BitVector &getScheduledTrees() { return ScheduledTrees; }
+
+  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+  /// region. This covers all instructions in a block, while schedule() may only
+  /// cover a subset.
+  void enterRegion(MachineBasicBlock *bb,
+                   MachineBasicBlock::iterator begin,
+                   MachineBasicBlock::iterator end,
+                   unsigned regioninstrs) override;
+
+  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+  /// reorderable instructions.
+  void schedule() override;
+
+  /// Compute the cyclic critical path through the DAG.
+  unsigned computeCyclicCriticalPath();
+
+protected:
+  // Top-Level entry points for the schedule() driver...
+
+  /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
+  /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
+  /// region, TopTracker and BottomTracker will be initialized to the top and
+  /// bottom of the DAG region without covereing any unscheduled instruction.
+  void buildDAGWithRegPressure();
+
+  /// Release ExitSU predecessors and setup scheduler queues. Re-position
+  /// the Top RP tracker in case the region beginning has changed.
+  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
+
+  /// Move an instruction and update register pressure.
+  void scheduleMI(SUnit *SU, bool IsTopNode);
+
+  // Lesser helpers...
+
+  void initRegPressure();
+
+  void updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses);
+
+  void updateScheduledPressure(const SUnit *SU,
+                               const std::vector<unsigned> &NewMaxPressure);
+
+  void collectVRegUses(SUnit &SU);
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// Helpers for implementing custom MachineSchedStrategy classes. These take
+/// care of the book-keeping associated with list scheduling heuristics.
+///
+//===----------------------------------------------------------------------===//
+
+/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
+/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
+/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
+///
+/// This is a convenience class that may be used by implementations of
+/// MachineSchedStrategy.
+class ReadyQueue {
+  unsigned ID;
+  std::string Name;
+  std::vector<SUnit*> Queue;
+
+public:
+  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
+
+  unsigned getID() const { return ID; }
+
+  StringRef getName() const { return Name; }
+
+  // SU is in this queue if it's NodeQueueID is a superset of this ID.
+  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
+
+  bool empty() const { return Queue.empty(); }
+
+  void clear() { Queue.clear(); }
+
+  unsigned size() const { return Queue.size(); }
+
+  using iterator = std::vector<SUnit*>::iterator;
+
+  iterator begin() { return Queue.begin(); }
+
+  iterator end() { return Queue.end(); }
+
+  ArrayRef<SUnit*> elements() { return Queue; }
+
+  iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
+
+  void push(SUnit *SU) {
+    Queue.push_back(SU);
+    SU->NodeQueueId |= ID;
+  }
+
+  iterator remove(iterator I) {
+    (*I)->NodeQueueId &= ~ID;
+    *I = Queue.back();
+    unsigned idx = I - Queue.begin();
+    Queue.pop_back();
+    return Queue.begin() + idx;
+  }
+
+  void dump() const;
+};
+
+/// Summarize the unscheduled region.
+struct SchedRemainder {
+  // Critical path through the DAG in expected latency.
+  unsigned CriticalPath;
+  unsigned CyclicCritPath;
+
+  // Scaled count of micro-ops left to schedule.
+  unsigned RemIssueCount;
+
+  bool IsAcyclicLatencyLimited;
+
+  // Unscheduled resources
+  SmallVector<unsigned, 16> RemainingCounts;
+
+  SchedRemainder() { reset(); }
+
+  void reset() {
+    CriticalPath = 0;
+    CyclicCritPath = 0;
+    RemIssueCount = 0;
+    IsAcyclicLatencyLimited = false;
+    RemainingCounts.clear();
+  }
+
+  void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
+};
+
+/// Each Scheduling boundary is associated with ready queues. It tracks the
+/// current cycle in the direction of movement, and maintains the state
+/// of "hazards" and other interlocks at the current cycle.
+class SchedBoundary {
+public:
+  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+  enum {
+    TopQID = 1,
+    BotQID = 2,
+    LogMaxQID = 2
+  };
+
+  ScheduleDAGMI *DAG = nullptr;
+  const TargetSchedModel *SchedModel = nullptr;
+  SchedRemainder *Rem = nullptr;
+
+  ReadyQueue Available;
+  ReadyQueue Pending;
+
+  ScheduleHazardRecognizer *HazardRec = nullptr;
+
+private:
+  /// True if the pending Q should be checked/updated before scheduling another
+  /// instruction.
+  bool CheckPending;
+
+  /// Number of cycles it takes to issue the instructions scheduled in this
+  /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
+  /// See getStalls().
+  unsigned CurrCycle;
+
+  /// Micro-ops issued in the current cycle
+  unsigned CurrMOps;
+
+  /// MinReadyCycle - Cycle of the soonest available instruction.
+  unsigned MinReadyCycle;
+
+  // The expected latency of the critical path in this scheduled zone.
+  unsigned ExpectedLatency;
+
+  // The latency of dependence chains leading into this zone.
+  // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
+  // For each cycle scheduled: DLat -= 1.
+  unsigned DependentLatency;
+
+  /// Count the scheduled (issued) micro-ops that can be retired by
+  /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
+  unsigned RetiredMOps;
+
+  // Count scheduled resources that have been executed. Resources are
+  // considered executed if they become ready in the time that it takes to
+  // saturate any resource including the one in question. Counts are scaled
+  // for direct comparison with other resources. Counts can be compared with
+  // MOps * getMicroOpFactor and Latency * getLatencyFactor.
+  SmallVector<unsigned, 16> ExecutedResCounts;
+
+  /// Cache the max count for a single resource.
+  unsigned MaxExecutedResCount;
+
+  // Cache the critical resources ID in this scheduled zone.
+  unsigned ZoneCritResIdx;
+
+  // Is the scheduled region resource limited vs. latency limited.
+  bool IsResourceLimited;
+
+  // Record the highest cycle at which each resource has been reserved by a
+  // scheduled instruction.
+  SmallVector<unsigned, 16> ReservedCycles;
+
+#ifndef NDEBUG
+  // Remember the greatest possible stall as an upper bound on the number of
+  // times we should retry the pending queue because of a hazard.
+  unsigned MaxObservedStall;
+#endif
+
+public:
+  /// Pending queues extend the ready queues with the same ID and the
+  /// PendingFlag set.
+  SchedBoundary(unsigned ID, const Twine &Name):
+    Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
+    reset();
+  }
+
+  ~SchedBoundary();
+
+  void reset();
+
+  void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
+            SchedRemainder *rem);
+
+  bool isTop() const {
+    return Available.getID() == TopQID;
+  }
+
+  /// Number of cycles to issue the instructions scheduled in this zone.
+  unsigned getCurrCycle() const { return CurrCycle; }
+
+  /// Micro-ops issued in the current cycle
+  unsigned getCurrMOps() const { return CurrMOps; }
+
+  // The latency of dependence chains leading into this zone.
+  unsigned getDependentLatency() const { return DependentLatency; }
+
+  /// Get the number of latency cycles "covered" by the scheduled
+  /// instructions. This is the larger of the critical path within the zone
+  /// and the number of cycles required to issue the instructions.
+  unsigned getScheduledLatency() const {
+    return std::max(ExpectedLatency, CurrCycle);
+  }
+
+  unsigned getUnscheduledLatency(SUnit *SU) const {
+    return isTop() ? SU->getHeight() : SU->getDepth();
+  }
+
+  unsigned getResourceCount(unsigned ResIdx) const {
+    return ExecutedResCounts[ResIdx];
+  }
+
+  /// Get the scaled count of scheduled micro-ops and resources, including
+  /// executed resources.
+  unsigned getCriticalCount() const {
+    if (!ZoneCritResIdx)
+      return RetiredMOps * SchedModel->getMicroOpFactor();
+    return getResourceCount(ZoneCritResIdx);
+  }
+
+  /// Get a scaled count for the minimum execution time of the scheduled
+  /// micro-ops that are ready to execute by getExecutedCount. Notice the
+  /// feedback loop.
+  unsigned getExecutedCount() const {
+    return std::max(CurrCycle * SchedModel->getLatencyFactor(),
+                    MaxExecutedResCount);
+  }
+
+  unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
+
+  // Is the scheduled region resource limited vs. latency limited.
+  bool isResourceLimited() const { return IsResourceLimited; }
+
+  /// Get the difference between the given SUnit's ready time and the current
+  /// cycle.
+  unsigned getLatencyStallCycles(SUnit *SU);
+
+  unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles);
+
+  bool checkHazard(SUnit *SU);
+
+  unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
+
+  unsigned getOtherResourceCount(unsigned &OtherCritIdx);
+
+  void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+  void bumpCycle(unsigned NextCycle);
+
+  void incExecutedResources(unsigned PIdx, unsigned Count);
+
+  unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
+
+  void bumpNode(SUnit *SU);
+
+  void releasePending();
+
+  void removeReady(SUnit *SU);
+
+  /// Call this before applying any other heuristics to the Available queue.
+  /// Updates the Available/Pending Q's if necessary and returns the single
+  /// available instruction, or NULL if there are multiple candidates.
+  SUnit *pickOnlyChoice();
+
+  void dumpScheduledState() const;
+};
+
+/// Base class for GenericScheduler. This class maintains information about
+/// scheduling candidates based on TargetSchedModel making it easy to implement
+/// heuristics for either preRA or postRA scheduling.
+class GenericSchedulerBase : public MachineSchedStrategy {
+public:
+  /// Represent the type of SchedCandidate found within a single queue.
+  /// pickNodeBidirectional depends on these listed by decreasing priority.
+  enum CandReason : uint8_t {
+    NoCand, Only1, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak,
+    RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
+    TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
+
+#ifndef NDEBUG
+  static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
+#endif
+
+  /// Policy for scheduling the next instruction in the candidate's zone.
+  struct CandPolicy {
+    bool ReduceLatency = false;
+    unsigned ReduceResIdx = 0;
+    unsigned DemandResIdx = 0;
+
+    CandPolicy() = default;
+
+    bool operator==(const CandPolicy &RHS) const {
+      return ReduceLatency == RHS.ReduceLatency &&
+             ReduceResIdx == RHS.ReduceResIdx &&
+             DemandResIdx == RHS.DemandResIdx;
+    }
+    bool operator!=(const CandPolicy &RHS) const {
+      return !(*this == RHS);
+    }
+  };
+
+  /// Status of an instruction's critical resource consumption.
+  struct SchedResourceDelta {
+    // Count critical resources in the scheduled region required by SU.
+    unsigned CritResources = 0;
+
+    // Count critical resources from another region consumed by SU.
+    unsigned DemandedResources = 0;
+
+    SchedResourceDelta() = default;
+
+    bool operator==(const SchedResourceDelta &RHS) const {
+      return CritResources == RHS.CritResources
+        && DemandedResources == RHS.DemandedResources;
+    }
+    bool operator!=(const SchedResourceDelta &RHS) const {
+      return !operator==(RHS);
+    }
+  };
+
+  /// Store the state used by GenericScheduler heuristics, required for the
+  /// lifetime of one invocation of pickNode().
+  struct SchedCandidate {
+    CandPolicy Policy;
+
+    // The best SUnit candidate.
+    SUnit *SU;
+
+    // The reason for this candidate.
+    CandReason Reason;
+
+    // Whether this candidate should be scheduled at top/bottom.
+    bool AtTop;
+
+    // Register pressure values for the best candidate.
+    RegPressureDelta RPDelta;
+
+    // Critical resource consumption of the best candidate.
+    SchedResourceDelta ResDelta;
+
+    SchedCandidate() { reset(CandPolicy()); }
+    SchedCandidate(const CandPolicy &Policy) { reset(Policy); }
+
+    void reset(const CandPolicy &NewPolicy) {
+      Policy = NewPolicy;
+      SU = nullptr;
+      Reason = NoCand;
+      AtTop = false;
+      RPDelta = RegPressureDelta();
+      ResDelta = SchedResourceDelta();
+    }
+
+    bool isValid() const { return SU; }
+
+    // Copy the status of another candidate without changing policy.
+    void setBest(SchedCandidate &Best) {
+      assert(Best.Reason != NoCand && "uninitialized Sched candidate");
+      SU = Best.SU;
+      Reason = Best.Reason;
+      AtTop = Best.AtTop;
+      RPDelta = Best.RPDelta;
+      ResDelta = Best.ResDelta;
+    }
+
+    void initResourceDelta(const ScheduleDAGMI *DAG,
+                           const TargetSchedModel *SchedModel);
+  };
+
+protected:
+  const MachineSchedContext *Context;
+  const TargetSchedModel *SchedModel = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+
+  SchedRemainder Rem;
+
+  GenericSchedulerBase(const MachineSchedContext *C) : Context(C) {}
+
+  void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
+                 SchedBoundary *OtherZone);
+
+#ifndef NDEBUG
+  void traceCandidate(const SchedCandidate &Cand);
+#endif
+};
+
+/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
+/// the schedule.
+class GenericScheduler : public GenericSchedulerBase {
+public:
+  GenericScheduler(const MachineSchedContext *C):
+    GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
+    Bot(SchedBoundary::BotQID, "BotQ") {}
+
+  void initPolicy(MachineBasicBlock::iterator Begin,
+                  MachineBasicBlock::iterator End,
+                  unsigned NumRegionInstrs) override;
+
+  void dumpPolicy() const override;
+
+  bool shouldTrackPressure() const override {
+    return RegionPolicy.ShouldTrackPressure;
+  }
+
+  bool shouldTrackLaneMasks() const override {
+    return RegionPolicy.ShouldTrackLaneMasks;
+  }
+
+  void initialize(ScheduleDAGMI *dag) override;
+
+  SUnit *pickNode(bool &IsTopNode) override;
+
+  void schedNode(SUnit *SU, bool IsTopNode) override;
+
+  void releaseTopNode(SUnit *SU) override {
+    if (SU->isScheduled)
+      return;
+
+    Top.releaseNode(SU, SU->TopReadyCycle);
+    TopCand.SU = nullptr;
+  }
+
+  void releaseBottomNode(SUnit *SU) override {
+    if (SU->isScheduled)
+      return;
+
+    Bot.releaseNode(SU, SU->BotReadyCycle);
+    BotCand.SU = nullptr;
+  }
+
+  void registerRoots() override;
+
+protected:
+  ScheduleDAGMILive *DAG = nullptr;
+
+  MachineSchedPolicy RegionPolicy;
+
+  // State of the top and bottom scheduled instruction boundaries.
+  SchedBoundary Top;
+  SchedBoundary Bot;
+
+  /// Candidate last picked from Top boundary.
+  SchedCandidate TopCand;
+  /// Candidate last picked from Bot boundary.
+  SchedCandidate BotCand;
+
+  void checkAcyclicLatency();
+
+  void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
+                     const RegPressureTracker &RPTracker,
+                     RegPressureTracker &TempTracker);
+
+  void tryCandidate(SchedCandidate &Cand,
+                    SchedCandidate &TryCand,
+                    SchedBoundary *Zone);
+
+  SUnit *pickNodeBidirectional(bool &IsTopNode);
+
+  void pickNodeFromQueue(SchedBoundary &Zone,
+                         const CandPolicy &ZonePolicy,
+                         const RegPressureTracker &RPTracker,
+                         SchedCandidate &Candidate);
+
+  void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+};
+
+/// PostGenericScheduler - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+///
+/// Callbacks from ScheduleDAGMI:
+///   initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
+class PostGenericScheduler : public GenericSchedulerBase {
+  ScheduleDAGMI *DAG;
+  SchedBoundary Top;
+  SmallVector<SUnit*, 8> BotRoots;
+
+public:
+  PostGenericScheduler(const MachineSchedContext *C):
+    GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
+
+  ~PostGenericScheduler() override = default;
+
+  void initPolicy(MachineBasicBlock::iterator Begin,
+                  MachineBasicBlock::iterator End,
+                  unsigned NumRegionInstrs) override {
+    /* no configurable policy */
+  }
+
+  /// PostRA scheduling does not track pressure.
+  bool shouldTrackPressure() const override { return false; }
+
+  void initialize(ScheduleDAGMI *Dag) override;
+
+  void registerRoots() override;
+
+  SUnit *pickNode(bool &IsTopNode) override;
+
+  void scheduleTree(unsigned SubtreeID) override {
+    llvm_unreachable("PostRA scheduler does not support subtree analysis.");
+  }
+
+  void schedNode(SUnit *SU, bool IsTopNode) override;
+
+  void releaseTopNode(SUnit *SU) override {
+    if (SU->isScheduled)
+      return;
+    Top.releaseNode(SU, SU->TopReadyCycle);
+  }
+
+  // Only called for roots.
+  void releaseBottomNode(SUnit *SU) override {
+    BotRoots.push_back(SU);
+  }
+
+protected:
+  void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
+
+  void pickNodeFromQueue(SchedCandidate &Cand);
+};
+
+/// Create the standard converging machine scheduler. This will be used as the
+/// default scheduler if the target does not set a default.
+/// Adds default DAG mutations.
+ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);
+
+/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
+ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);
+
+std::unique_ptr<ScheduleDAGMutation>
+createLoadClusterDAGMutation(const TargetInstrInfo *TII,
+                             const TargetRegisterInfo *TRI);
+
+std::unique_ptr<ScheduleDAGMutation>
+createStoreClusterDAGMutation(const TargetInstrInfo *TII,
+                              const TargetRegisterInfo *TRI);
+
+std::unique_ptr<ScheduleDAGMutation>
+createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
+                               const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineTraceMetrics.h b/linux-x64/clang/include/llvm/CodeGen/MachineTraceMetrics.h
new file mode 100644
index 0000000..9d8db39
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineTraceMetrics.h
@@ -0,0 +1,436 @@
+//===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for the MachineTraceMetrics analysis pass
+// that estimates CPU resource usage and critical data dependency paths through
+// preferred traces. This is useful for super-scalar CPUs where execution speed
+// can be limited both by data dependencies and by limited execution resources.
+//
+// Out-of-order CPUs will often be executing instructions from multiple basic
+// blocks at the same time. This makes it difficult to estimate the resource
+// usage accurately in a single basic block. Resources can be estimated better
+// by looking at a trace through the current basic block.
+//
+// For every block, the MachineTraceMetrics pass will pick a preferred trace
+// that passes through the block. The trace is chosen based on loop structure,
+// branch probabilities, and resource usage. The intention is to pick likely
+// traces that would be the most affected by code transformations.
+//
+// It is expensive to compute a full arbitrary trace for every block, so to
+// save some computations, traces are chosen to be convergent. This means that
+// if the traces through basic blocks A and B ever cross when moving away from
+// A and B, they never diverge again. This applies in both directions - If the
+// traces meet above A and B, they won't diverge when going further back.
+//
+// Traces tend to align with loops. The trace through a block in an inner loop
+// will begin at the loop entry block and end at a back edge. If there are
+// nested loops, the trace may begin and end at those instead.
+//
+// For each trace, we compute the critical path length, which is the number of
+// cycles required to execute the trace when execution is limited by data
+// dependencies only. We also compute the resource height, which is the number
+// of cycles required to execute all instructions in the trace when ignoring
+// data dependencies.
+//
+// Every instruction in the current block has a slack - the number of cycles
+// execution of the instruction can be delayed without extending the critical
+// path.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINETRACEMETRICS_H
+#define LLVM_CODEGEN_MACHINETRACEMETRICS_H
+
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+
+namespace llvm {
+
+class AnalysisUsage;
+class MachineFunction;
+class MachineInstr;
+class MachineLoop;
+class MachineLoopInfo;
+class MachineRegisterInfo;
+struct MCSchedClassDesc;
+class raw_ostream;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
+// Keep track of physreg data dependencies by recording each live register unit.
+// Associate each regunit with an instruction operand. Depending on the
+// direction instructions are scanned, it could be the operand that defined the
+// regunit, or the highest operand to read the regunit.
+struct LiveRegUnit {
+  unsigned RegUnit;
+  unsigned Cycle = 0;
+  const MachineInstr *MI = nullptr;
+  unsigned Op = 0;
+
+  unsigned getSparseSetIndex() const { return RegUnit; }
+
+  LiveRegUnit(unsigned RU) : RegUnit(RU) {}
+};
+
+
+class MachineTraceMetrics : public MachineFunctionPass {
+  const MachineFunction *MF = nullptr;
+  const TargetInstrInfo *TII = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+  const MachineRegisterInfo *MRI = nullptr;
+  const MachineLoopInfo *Loops = nullptr;
+  TargetSchedModel SchedModel;
+
+public:
+  friend class Ensemble;
+  friend class Trace;
+
+  class Ensemble;
+
+  static char ID;
+
+  MachineTraceMetrics();
+
+  void getAnalysisUsage(AnalysisUsage&) const override;
+  bool runOnMachineFunction(MachineFunction&) override;
+  void releaseMemory() override;
+  void verifyAnalysis() const override;
+
+  /// Per-basic block information that doesn't depend on the trace through the
+  /// block.
+  struct FixedBlockInfo {
+    /// The number of non-trivial instructions in the block.
+    /// Doesn't count PHI and COPY instructions that are likely to be removed.
+    unsigned InstrCount = ~0u;
+
+    /// True when the block contains calls.
+    bool HasCalls = false;
+
+    FixedBlockInfo() = default;
+
+    /// Returns true when resource information for this block has been computed.
+    bool hasResources() const { return InstrCount != ~0u; }
+
+    /// Invalidate resource information.
+    void invalidate() { InstrCount = ~0u; }
+  };
+
+  /// Get the fixed resource information about MBB. Compute it on demand.
+  const FixedBlockInfo *getResources(const MachineBasicBlock*);
+
+  /// Get the scaled number of cycles used per processor resource in MBB.
+  /// This is an array with SchedModel.getNumProcResourceKinds() entries.
+  /// The getResources() function above must have been called first.
+  ///
+  /// These numbers have already been scaled by SchedModel.getResourceFactor().
+  ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;
+
+  /// A virtual register or regunit required by a basic block or its trace
+  /// successors.
+  struct LiveInReg {
+    /// The virtual register required, or a register unit.
+    unsigned Reg;
+
+    /// For virtual registers: Minimum height of the defining instruction.
+    /// For regunits: Height of the highest user in the trace.
+    unsigned Height;
+
+    LiveInReg(unsigned Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
+  };
+
+  /// Per-basic block information that relates to a specific trace through the
+  /// block. Convergent traces means that only one of these is required per
+  /// block in a trace ensemble.
+  struct TraceBlockInfo {
+    /// Trace predecessor, or NULL for the first block in the trace.
+    /// Valid when hasValidDepth().
+    const MachineBasicBlock *Pred = nullptr;
+
+    /// Trace successor, or NULL for the last block in the trace.
+    /// Valid when hasValidHeight().
+    const MachineBasicBlock *Succ = nullptr;
+
+    /// The block number of the head of the trace. (When hasValidDepth()).
+    unsigned Head;
+
+    /// The block number of the tail of the trace. (When hasValidHeight()).
+    unsigned Tail;
+
+    /// Accumulated number of instructions in the trace above this block.
+    /// Does not include instructions in this block.
+    unsigned InstrDepth = ~0u;
+
+    /// Accumulated number of instructions in the trace below this block.
+    /// Includes instructions in this block.
+    unsigned InstrHeight = ~0u;
+
+    TraceBlockInfo() = default;
+
+    /// Returns true if the depth resources have been computed from the trace
+    /// above this block.
+    bool hasValidDepth() const { return InstrDepth != ~0u; }
+
+    /// Returns true if the height resources have been computed from the trace
+    /// below this block.
+    bool hasValidHeight() const { return InstrHeight != ~0u; }
+
+    /// Invalidate depth resources when some block above this one has changed.
+    void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }
+
+    /// Invalidate height resources when a block below this one has changed.
+    void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
+
+    /// Assuming that this is a dominator of TBI, determine if it contains
+    /// useful instruction depths. A dominating block can be above the current
+    /// trace head, and any dependencies from such a far away dominator are not
+    /// expected to affect the critical path.
+    ///
+    /// Also returns true when TBI == this.
+    bool isUsefulDominator(const TraceBlockInfo &TBI) const {
+      // The trace for TBI may not even be calculated yet.
+      if (!hasValidDepth() || !TBI.hasValidDepth())
+        return false;
+      // Instruction depths are only comparable if the traces share a head.
+      if (Head != TBI.Head)
+        return false;
+      // It is almost always the case that TBI belongs to the same trace as
+      // this block, but rare convoluted cases involving irreducible control
+      // flow, a dominator may share a trace head without actually being on the
+      // same trace as TBI. This is not a big problem as long as it doesn't
+      // increase the instruction depth.
+      return HasValidInstrDepths && InstrDepth <= TBI.InstrDepth;
+    }
+
+    // Data-dependency-related information. Per-instruction depth and height
+    // are computed from data dependencies in the current trace, using
+    // itinerary data.
+
+    /// Instruction depths have been computed. This implies hasValidDepth().
+    bool HasValidInstrDepths = false;
+
+    /// Instruction heights have been computed. This implies hasValidHeight().
+    bool HasValidInstrHeights = false;
+
+    /// Critical path length. This is the number of cycles in the longest data
+    /// dependency chain through the trace. This is only valid when both
+    /// HasValidInstrDepths and HasValidInstrHeights are set.
+    unsigned CriticalPath;
+
+    /// Live-in registers. These registers are defined above the current block
+    /// and used by this block or a block below it.
+    /// This does not include PHI uses in the current block, but it does
+    /// include PHI uses in deeper blocks.
+    SmallVector<LiveInReg, 4> LiveIns;
+
+    void print(raw_ostream&) const;
+  };
+
+  /// InstrCycles represents the cycle height and depth of an instruction in a
+  /// trace.
+  struct InstrCycles {
+    /// Earliest issue cycle as determined by data dependencies and instruction
+    /// latencies from the beginning of the trace. Data dependencies from
+    /// before the trace are not included.
+    unsigned Depth;
+
+    /// Minimum number of cycles from this instruction is issued to the of the
+    /// trace, as determined by data dependencies and instruction latencies.
+    unsigned Height;
+  };
+
+  /// A trace represents a plausible sequence of executed basic blocks that
+  /// passes through the current basic block one. The Trace class serves as a
+  /// handle to internal cached data structures.
+  class Trace {
+    Ensemble &TE;
+    TraceBlockInfo &TBI;
+
+    unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }
+
+  public:
+    explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}
+
+    void print(raw_ostream&) const;
+
+    /// Compute the total number of instructions in the trace.
+    unsigned getInstrCount() const {
+      return TBI.InstrDepth + TBI.InstrHeight;
+    }
+
+    /// Return the resource depth of the top/bottom of the trace center block.
+    /// This is the number of cycles required to execute all instructions from
+    /// the trace head to the trace center block. The resource depth only
+    /// considers execution resources, it ignores data dependencies.
+    /// When Bottom is set, instructions in the trace center block are included.
+    unsigned getResourceDepth(bool Bottom) const;
+
+    /// Return the resource length of the trace. This is the number of cycles
+    /// required to execute the instructions in the trace if they were all
+    /// independent, exposing the maximum instruction-level parallelism.
+    ///
+    /// Any blocks in Extrablocks are included as if they were part of the
+    /// trace. Likewise, extra resources required by the specified scheduling
+    /// classes are included. For the caller to account for extra machine
+    /// instructions, it must first resolve each instruction's scheduling class.
+    unsigned getResourceLength(
+        ArrayRef<const MachineBasicBlock *> Extrablocks = None,
+        ArrayRef<const MCSchedClassDesc *> ExtraInstrs = None,
+        ArrayRef<const MCSchedClassDesc *> RemoveInstrs = None) const;
+
+    /// Return the length of the (data dependency) critical path through the
+    /// trace.
+    unsigned getCriticalPath() const { return TBI.CriticalPath; }
+
+    /// Return the depth and height of MI. The depth is only valid for
+    /// instructions in or above the trace center block. The height is only
+    /// valid for instructions in or below the trace center block.
+    InstrCycles getInstrCycles(const MachineInstr &MI) const {
+      return TE.Cycles.lookup(&MI);
+    }
+
+    /// Return the slack of MI. This is the number of cycles MI can be delayed
+    /// before the critical path becomes longer.
+    /// MI must be an instruction in the trace center block.
+    unsigned getInstrSlack(const MachineInstr &MI) const;
+
+    /// Return the Depth of a PHI instruction in a trace center block successor.
+    /// The PHI does not have to be part of the trace.
+    unsigned getPHIDepth(const MachineInstr &PHI) const;
+
+    /// A dependence is useful if the basic block of the defining instruction
+    /// is part of the trace of the user instruction. It is assumed that DefMI
+    /// dominates UseMI (see also isUsefulDominator).
+    bool isDepInTrace(const MachineInstr &DefMI,
+                      const MachineInstr &UseMI) const;
+  };
+
+  /// A trace ensemble is a collection of traces selected using the same
+  /// strategy, for example 'minimum resource height'. There is one trace for
+  /// every block in the function.
+  class Ensemble {
+    friend class Trace;
+
+    SmallVector<TraceBlockInfo, 4> BlockInfo;
+    DenseMap<const MachineInstr*, InstrCycles> Cycles;
+    SmallVector<unsigned, 0> ProcResourceDepths;
+    SmallVector<unsigned, 0> ProcResourceHeights;
+
+    void computeTrace(const MachineBasicBlock*);
+    void computeDepthResources(const MachineBasicBlock*);
+    void computeHeightResources(const MachineBasicBlock*);
+    unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
+    void computeInstrDepths(const MachineBasicBlock*);
+    void computeInstrHeights(const MachineBasicBlock*);
+    void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
+                    ArrayRef<const MachineBasicBlock*> Trace);
+
+  protected:
+    MachineTraceMetrics &MTM;
+
+    explicit Ensemble(MachineTraceMetrics*);
+
+    virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
+    virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
+    const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
+    const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
+    const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
+    ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
+    ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;
+
+  public:
+    virtual ~Ensemble();
+
+    virtual const char *getName() const = 0;
+    void print(raw_ostream&) const;
+    void invalidate(const MachineBasicBlock *MBB);
+    void verify() const;
+
+    /// Get the trace that passes through MBB.
+    /// The trace is computed on demand.
+    Trace getTrace(const MachineBasicBlock *MBB);
+
+    /// Updates the depth of an machine instruction, given RegUnits.
+    void updateDepth(TraceBlockInfo &TBI, const MachineInstr&,
+                     SparseSet<LiveRegUnit> &RegUnits);
+    void updateDepth(const MachineBasicBlock *, const MachineInstr&,
+                     SparseSet<LiveRegUnit> &RegUnits);
+
+    /// Updates the depth of the instructions from Start to End.
+    void updateDepths(MachineBasicBlock::iterator Start,
+                      MachineBasicBlock::iterator End,
+                      SparseSet<LiveRegUnit> &RegUnits);
+
+  };
+
+  /// Strategies for selecting traces.
+  enum Strategy {
+    /// Select the trace through a block that has the fewest instructions.
+    TS_MinInstrCount,
+
+    TS_NumStrategies
+  };
+
+  /// Get the trace ensemble representing the given trace selection strategy.
+  /// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
+  /// and valid for the lifetime of the analysis pass.
+  Ensemble *getEnsemble(Strategy);
+
+  /// Invalidate cached information about MBB. This must be called *before* MBB
+  /// is erased, or the CFG is otherwise changed.
+  ///
+  /// This invalidates per-block information about resource usage for MBB only,
+  /// and it invalidates per-trace information for any trace that passes
+  /// through MBB.
+  ///
+  /// Call Ensemble::getTrace() again to update any trace handles.
+  void invalidate(const MachineBasicBlock *MBB);
+
+private:
+  // One entry per basic block, indexed by block number.
+  SmallVector<FixedBlockInfo, 4> BlockInfo;
+
+  // Cycles consumed on each processor resource per block.
+  // The number of processor resource kinds is constant for a given subtarget,
+  // but it is not known at compile time. The number of cycles consumed by
+  // block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
+  // where Kinds = SchedModel.getNumProcResourceKinds().
+  SmallVector<unsigned, 0> ProcResourceCycles;
+
+  // One ensemble per strategy.
+  Ensemble* Ensembles[TS_NumStrategies];
+
+  // Convert scaled resource usage to a cycle count that can be compared with
+  // latencies.
+  unsigned getCycles(unsigned Scaled) {
+    unsigned Factor = SchedModel.getLatencyFactor();
+    return (Scaled + Factor - 1) / Factor;
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const MachineTraceMetrics::Trace &Tr) {
+  Tr.print(OS);
+  return OS;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const MachineTraceMetrics::Ensemble &En) {
+  En.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINETRACEMETRICS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h b/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
new file mode 100644
index 0000000..dc105fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
@@ -0,0 +1,50 @@
+//===- MacroFusion.h - Macro Fusion -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file contains the definition of the DAG scheduling mutation to
+/// pair instructions back to back.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACROFUSION_H
+#define LLVM_CODEGEN_MACROFUSION_H
+
+#include <functional>
+#include <memory>
+
+namespace llvm {
+
+class MachineInstr;
+class ScheduleDAGMutation;
+class TargetInstrInfo;
+class TargetSubtargetInfo;
+
+/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
+/// together. Given SecondMI, when FirstMI is unspecified, then check if
+/// SecondMI may be part of a fused pair at all.
+using ShouldSchedulePredTy = std::function<bool(const TargetInstrInfo &TII,
+                                                const TargetSubtargetInfo &TSI,
+                                                const MachineInstr *FirstMI,
+                                                const MachineInstr &SecondMI)>;
+
+/// \brief Create a DAG scheduling mutation to pair instructions back to back
+/// for instructions that benefit according to the target-specific
+/// shouldScheduleAdjacent predicate function.
+std::unique_ptr<ScheduleDAGMutation>
+createMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
+
+/// \brief Create a DAG scheduling mutation to pair branch instructions with one
+/// of their predecessors back to back for instructions that benefit according
+/// to the target-specific shouldScheduleAdjacent predicate function.
+std::unique_ptr<ScheduleDAGMutation>
+createBranchMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACROFUSION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/CostAllocator.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/CostAllocator.h
new file mode 100644
index 0000000..bde451a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/CostAllocator.h
@@ -0,0 +1,135 @@
+//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines classes conforming to the PBQP cost value manager concept.
+//
+// Cost value managers are memory managers for PBQP cost values (vectors and
+// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
+// of edges on the largest function in SPEC2006).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
+#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
+
+#include "llvm/ADT/DenseSet.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace PBQP {
+
+template <typename ValueT> class ValuePool {
+public:
+  using PoolRef = std::shared_ptr<const ValueT>;
+
+private:
+  class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
+  public:
+    template <typename ValueKeyT>
+    PoolEntry(ValuePool &Pool, ValueKeyT Value)
+        : Pool(Pool), Value(std::move(Value)) {}
+
+    ~PoolEntry() { Pool.removeEntry(this); }
+
+    const ValueT &getValue() const { return Value; }
+
+  private:
+    ValuePool &Pool;
+    ValueT Value;
+  };
+
+  class PoolEntryDSInfo {
+  public:
+    static inline PoolEntry *getEmptyKey() { return nullptr; }
+
+    static inline PoolEntry *getTombstoneKey() {
+      return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
+    }
+
+    template <typename ValueKeyT>
+    static unsigned getHashValue(const ValueKeyT &C) {
+      return hash_value(C);
+    }
+
+    static unsigned getHashValue(PoolEntry *P) {
+      return getHashValue(P->getValue());
+    }
+
+    static unsigned getHashValue(const PoolEntry *P) {
+      return getHashValue(P->getValue());
+    }
+
+    template <typename ValueKeyT1, typename ValueKeyT2>
+    static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
+      return C1 == C2;
+    }
+
+    template <typename ValueKeyT>
+    static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
+      if (P == getEmptyKey() || P == getTombstoneKey())
+        return false;
+      return isEqual(C, P->getValue());
+    }
+
+    static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
+      if (P1 == getEmptyKey() || P1 == getTombstoneKey())
+        return P1 == P2;
+      return isEqual(P1->getValue(), P2);
+    }
+  };
+
+  using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;
+
+  EntrySetT EntrySet;
+
+  void removeEntry(PoolEntry *P) { EntrySet.erase(P); }
+
+public:
+  template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
+    typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);
+
+    if (I != EntrySet.end())
+      return PoolRef((*I)->shared_from_this(), &(*I)->getValue());
+
+    auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
+    EntrySet.insert(P.get());
+    return PoolRef(std::move(P), &P->getValue());
+  }
+};
+
+template <typename VectorT, typename MatrixT> class PoolCostAllocator {
+private:
+  using VectorCostPool = ValuePool<VectorT>;
+  using MatrixCostPool = ValuePool<MatrixT>;
+
+public:
+  using Vector = VectorT;
+  using Matrix = MatrixT;
+  using VectorPtr = typename VectorCostPool::PoolRef;
+  using MatrixPtr = typename MatrixCostPool::PoolRef;
+
+  template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
+    return VectorPool.getValue(std::move(v));
+  }
+
+  template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
+    return MatrixPool.getValue(std::move(m));
+  }
+
+private:
+  VectorCostPool VectorPool;
+  MatrixCostPool MatrixPool;
+};
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
new file mode 100644
index 0000000..e94878c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
@@ -0,0 +1,675 @@
+//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PBQP Graph class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
+#define LLVM_CODEGEN_PBQP_GRAPH_H
+
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <limits>
+#include <vector>
+
+namespace llvm {
+namespace PBQP {
+
+  class GraphBase {
+  public:
+    using NodeId = unsigned;
+    using EdgeId = unsigned;
+
+    /// @brief Returns a value representing an invalid (non-existent) node.
+    static NodeId invalidNodeId() {
+      return std::numeric_limits<NodeId>::max();
+    }
+
+    /// @brief Returns a value representing an invalid (non-existent) edge.
+    static EdgeId invalidEdgeId() {
+      return std::numeric_limits<EdgeId>::max();
+    }
+  };
+
+  /// PBQP Graph class.
+  /// Instances of this class describe PBQP problems.
+  ///
+  template <typename SolverT>
+  class Graph : public GraphBase {
+  private:
+    using CostAllocator = typename SolverT::CostAllocator;
+
+  public:
+    using RawVector = typename SolverT::RawVector;
+    using RawMatrix = typename SolverT::RawMatrix;
+    using Vector = typename SolverT::Vector;
+    using Matrix = typename SolverT::Matrix;
+    using VectorPtr = typename CostAllocator::VectorPtr;
+    using MatrixPtr = typename CostAllocator::MatrixPtr;
+    using NodeMetadata = typename SolverT::NodeMetadata;
+    using EdgeMetadata = typename SolverT::EdgeMetadata;
+    using GraphMetadata = typename SolverT::GraphMetadata;
+
+  private:
+    class NodeEntry {
+    public:
+      using AdjEdgeList = std::vector<EdgeId>;
+      using AdjEdgeIdx = AdjEdgeList::size_type;
+      using AdjEdgeItr = AdjEdgeList::const_iterator;
+
+      NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
+
+      static AdjEdgeIdx getInvalidAdjEdgeIdx() {
+        return std::numeric_limits<AdjEdgeIdx>::max();
+      }
+
+      AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
+        AdjEdgeIdx Idx = AdjEdgeIds.size();
+        AdjEdgeIds.push_back(EId);
+        return Idx;
+      }
+
+      void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
+        // Swap-and-pop for fast removal.
+        //   1) Update the adj index of the edge currently at back().
+        //   2) Move last Edge down to Idx.
+        //   3) pop_back()
+        // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
+        // redundant, but both operations are cheap.
+        G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
+        AdjEdgeIds[Idx] = AdjEdgeIds.back();
+        AdjEdgeIds.pop_back();
+      }
+
+      const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }
+
+      VectorPtr Costs;
+      NodeMetadata Metadata;
+
+    private:
+      AdjEdgeList AdjEdgeIds;
+    };
+
+    class EdgeEntry {
+    public:
+      EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
+          : Costs(std::move(Costs)) {
+        NIds[0] = N1Id;
+        NIds[1] = N2Id;
+        ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
+        ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
+      }
+
+      void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
+        assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
+               "Edge already connected to NIds[NIdx].");
+        NodeEntry &N = G.getNode(NIds[NIdx]);
+        ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
+      }
+
+      void connect(Graph &G, EdgeId ThisEdgeId) {
+        connectToN(G, ThisEdgeId, 0);
+        connectToN(G, ThisEdgeId, 1);
+      }
+
+      void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
+        if (NId == NIds[0])
+          ThisEdgeAdjIdxs[0] = NewIdx;
+        else {
+          assert(NId == NIds[1] && "Edge not connected to NId");
+          ThisEdgeAdjIdxs[1] = NewIdx;
+        }
+      }
+
+      void disconnectFromN(Graph &G, unsigned NIdx) {
+        assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
+               "Edge not connected to NIds[NIdx].");
+        NodeEntry &N = G.getNode(NIds[NIdx]);
+        N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
+        ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
+      }
+
+      void disconnectFrom(Graph &G, NodeId NId) {
+        if (NId == NIds[0])
+          disconnectFromN(G, 0);
+        else {
+          assert(NId == NIds[1] && "Edge does not connect NId");
+          disconnectFromN(G, 1);
+        }
+      }
+
+      NodeId getN1Id() const { return NIds[0]; }
+      NodeId getN2Id() const { return NIds[1]; }
+
+      MatrixPtr Costs;
+      EdgeMetadata Metadata;
+
+    private:
+      NodeId NIds[2];
+      typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
+    };
+
+    // ----- MEMBERS -----
+
+    GraphMetadata Metadata;
+    CostAllocator CostAlloc;
+    SolverT *Solver = nullptr;
+
+    using NodeVector = std::vector<NodeEntry>;
+    using FreeNodeVector = std::vector<NodeId>;
+    NodeVector Nodes;
+    FreeNodeVector FreeNodeIds;
+
+    using EdgeVector = std::vector<EdgeEntry>;
+    using FreeEdgeVector = std::vector<EdgeId>;
+    EdgeVector Edges;
+    FreeEdgeVector FreeEdgeIds;
+
+    Graph(const Graph &Other) {}
+
+    // ----- INTERNAL METHODS -----
+
+    NodeEntry &getNode(NodeId NId) {
+      assert(NId < Nodes.size() && "Out of bound NodeId");
+      return Nodes[NId];
+    }
+    const NodeEntry &getNode(NodeId NId) const {
+      assert(NId < Nodes.size() && "Out of bound NodeId");
+      return Nodes[NId];
+    }
+
+    EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
+    const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }
+
+    NodeId addConstructedNode(NodeEntry N) {
+      NodeId NId = 0;
+      if (!FreeNodeIds.empty()) {
+        NId = FreeNodeIds.back();
+        FreeNodeIds.pop_back();
+        Nodes[NId] = std::move(N);
+      } else {
+        NId = Nodes.size();
+        Nodes.push_back(std::move(N));
+      }
+      return NId;
+    }
+
+    EdgeId addConstructedEdge(EdgeEntry E) {
+      assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
+             "Attempt to add duplicate edge.");
+      EdgeId EId = 0;
+      if (!FreeEdgeIds.empty()) {
+        EId = FreeEdgeIds.back();
+        FreeEdgeIds.pop_back();
+        Edges[EId] = std::move(E);
+      } else {
+        EId = Edges.size();
+        Edges.push_back(std::move(E));
+      }
+
+      EdgeEntry &NE = getEdge(EId);
+
+      // Add the edge to the adjacency sets of its nodes.
+      NE.connect(*this, EId);
+      return EId;
+    }
+
+    void operator=(const Graph &Other) {}
+
+  public:
+    using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;
+
+    class NodeItr {
+    public:
+      using iterator_category = std::forward_iterator_tag;
+      using value_type = NodeId;
+      using difference_type = int;
+      using pointer = NodeId *;
+      using reference = NodeId &;
+
+      NodeItr(NodeId CurNId, const Graph &G)
+        : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
+        this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
+      }
+
+      bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
+      bool operator!=(const NodeItr &O) const { return !(*this == O); }
+      NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
+      NodeId operator*() const { return CurNId; }
+
+    private:
+      NodeId findNextInUse(NodeId NId) const {
+        while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
+          ++NId;
+        }
+        return NId;
+      }
+
+      NodeId CurNId, EndNId;
+      const FreeNodeVector &FreeNodeIds;
+    };
+
+    class EdgeItr {
+    public:
+      EdgeItr(EdgeId CurEId, const Graph &G)
+        : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
+        this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
+      }
+
+      bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
+      bool operator!=(const EdgeItr &O) const { return !(*this == O); }
+      EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
+      EdgeId operator*() const { return CurEId; }
+
+    private:
+      EdgeId findNextInUse(EdgeId EId) const {
+        while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
+          ++EId;
+        }
+        return EId;
+      }
+
+      EdgeId CurEId, EndEId;
+      const FreeEdgeVector &FreeEdgeIds;
+    };
+
+    class NodeIdSet {
+    public:
+      NodeIdSet(const Graph &G) : G(G) {}
+
+      NodeItr begin() const { return NodeItr(0, G); }
+      NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
+
+      bool empty() const { return G.Nodes.empty(); }
+
+      typename NodeVector::size_type size() const {
+        return G.Nodes.size() - G.FreeNodeIds.size();
+      }
+
+    private:
+      const Graph& G;
+    };
+
+    class EdgeIdSet {
+    public:
+      EdgeIdSet(const Graph &G) : G(G) {}
+
+      EdgeItr begin() const { return EdgeItr(0, G); }
+      EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
+
+      bool empty() const { return G.Edges.empty(); }
+
+      typename NodeVector::size_type size() const {
+        return G.Edges.size() - G.FreeEdgeIds.size();
+      }
+
+    private:
+      const Graph& G;
+    };
+
+    class AdjEdgeIdSet {
+    public:
+      AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}
+
+      typename NodeEntry::AdjEdgeItr begin() const {
+        return NE.getAdjEdgeIds().begin();
+      }
+
+      typename NodeEntry::AdjEdgeItr end() const {
+        return NE.getAdjEdgeIds().end();
+      }
+
+      bool empty() const { return NE.getAdjEdgeIds().empty(); }
+
+      typename NodeEntry::AdjEdgeList::size_type size() const {
+        return NE.getAdjEdgeIds().size();
+      }
+
+    private:
+      const NodeEntry &NE;
+    };
+
+    /// @brief Construct an empty PBQP graph.
+    Graph() = default;
+
+    /// @brief Construct an empty PBQP graph with the given graph metadata.
+    Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
+
+    /// @brief Get a reference to the graph metadata.
+    GraphMetadata& getMetadata() { return Metadata; }
+
+    /// @brief Get a const-reference to the graph metadata.
+    const GraphMetadata& getMetadata() const { return Metadata; }
+
+    /// @brief Lock this graph to the given solver instance in preparation
+    /// for running the solver. This method will call solver.handleAddNode for
+    /// each node in the graph, and handleAddEdge for each edge, to give the
+    /// solver an opportunity to set up any requried metadata.
+    void setSolver(SolverT &S) {
+      assert(!Solver && "Solver already set. Call unsetSolver().");
+      Solver = &S;
+      for (auto NId : nodeIds())
+        Solver->handleAddNode(NId);
+      for (auto EId : edgeIds())
+        Solver->handleAddEdge(EId);
+    }
+
+    /// @brief Release from solver instance.
+    void unsetSolver() {
+      assert(Solver && "Solver not set.");
+      Solver = nullptr;
+    }
+
+    /// @brief Add a node with the given costs.
+    /// @param Costs Cost vector for the new node.
+    /// @return Node iterator for the added node.
+    template <typename OtherVectorT>
+    NodeId addNode(OtherVectorT Costs) {
+      // Get cost vector from the problem domain
+      VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
+      NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
+      if (Solver)
+        Solver->handleAddNode(NId);
+      return NId;
+    }
+
+    /// @brief Add a node bypassing the cost allocator.
+    /// @param Costs Cost vector ptr for the new node (must be convertible to
+    ///        VectorPtr).
+    /// @return Node iterator for the added node.
+    ///
+    ///   This method allows for fast addition of a node whose costs don't need
+    /// to be passed through the cost allocator. The most common use case for
+    /// this is when duplicating costs from an existing node (when using a
+    /// pooling allocator). These have already been uniqued, so we can avoid
+    /// re-constructing and re-uniquing them by attaching them directly to the
+    /// new node.
+    template <typename OtherVectorPtrT>
+    NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
+      NodeId NId = addConstructedNode(NodeEntry(Costs));
+      if (Solver)
+        Solver->handleAddNode(NId);
+      return NId;
+    }
+
+    /// @brief Add an edge between the given nodes with the given costs.
+    /// @param N1Id First node.
+    /// @param N2Id Second node.
+    /// @param Costs Cost matrix for new edge.
+    /// @return Edge iterator for the added edge.
+    template <typename OtherVectorT>
+    EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
+      assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&
+             getNodeCosts(N2Id).getLength() == Costs.getCols() &&
+             "Matrix dimensions mismatch.");
+      // Get cost matrix from the problem domain.
+      MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
+      EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
+      if (Solver)
+        Solver->handleAddEdge(EId);
+      return EId;
+    }
+
+    /// @brief Add an edge bypassing the cost allocator.
+    /// @param N1Id First node.
+    /// @param N2Id Second node.
+    /// @param Costs Cost matrix for new edge.
+    /// @return Edge iterator for the added edge.
+    ///
+    ///   This method allows for fast addition of an edge whose costs don't need
+    /// to be passed through the cost allocator. The most common use case for
+    /// this is when duplicating costs from an existing edge (when using a
+    /// pooling allocator). These have already been uniqued, so we can avoid
+    /// re-constructing and re-uniquing them by attaching them directly to the
+    /// new edge.
+    template <typename OtherMatrixPtrT>
+    NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
+                                         OtherMatrixPtrT Costs) {
+      assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&
+             getNodeCosts(N2Id).getLength() == Costs->getCols() &&
+             "Matrix dimensions mismatch.");
+      // Get cost matrix from the problem domain.
+      EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
+      if (Solver)
+        Solver->handleAddEdge(EId);
+      return EId;
+    }
+
+    /// @brief Returns true if the graph is empty.
+    bool empty() const { return NodeIdSet(*this).empty(); }
+
+    NodeIdSet nodeIds() const { return NodeIdSet(*this); }
+    EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }
+
+    AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
+
+    /// @brief Get the number of nodes in the graph.
+    /// @return Number of nodes in the graph.
+    unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
+
+    /// @brief Get the number of edges in the graph.
+    /// @return Number of edges in the graph.
+    unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
+
+    /// @brief Set a node's cost vector.
+    /// @param NId Node to update.
+    /// @param Costs New costs to set.
+    template <typename OtherVectorT>
+    void setNodeCosts(NodeId NId, OtherVectorT Costs) {
+      VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
+      if (Solver)
+        Solver->handleSetNodeCosts(NId, *AllocatedCosts);
+      getNode(NId).Costs = AllocatedCosts;
+    }
+
+    /// @brief Get a VectorPtr to a node's cost vector. Rarely useful - use
+    ///        getNodeCosts where possible.
+    /// @param NId Node id.
+    /// @return VectorPtr to node cost vector.
+    ///
+    ///   This method is primarily useful for duplicating costs quickly by
+    /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
+    /// getNodeCosts when dealing with node cost values.
+    const VectorPtr& getNodeCostsPtr(NodeId NId) const {
+      return getNode(NId).Costs;
+    }
+
+    /// @brief Get a node's cost vector.
+    /// @param NId Node id.
+    /// @return Node cost vector.
+    const Vector& getNodeCosts(NodeId NId) const {
+      return *getNodeCostsPtr(NId);
+    }
+
+    NodeMetadata& getNodeMetadata(NodeId NId) {
+      return getNode(NId).Metadata;
+    }
+
+    const NodeMetadata& getNodeMetadata(NodeId NId) const {
+      return getNode(NId).Metadata;
+    }
+
+    typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
+      return getNode(NId).getAdjEdgeIds().size();
+    }
+
+    /// @brief Update an edge's cost matrix.
+    /// @param EId Edge id.
+    /// @param Costs New cost matrix.
+    template <typename OtherMatrixT>
+    void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
+      MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
+      if (Solver)
+        Solver->handleUpdateCosts(EId, *AllocatedCosts);
+      getEdge(EId).Costs = AllocatedCosts;
+    }
+
+    /// @brief Get a MatrixPtr to a node's cost matrix. Rarely useful - use
+    ///        getEdgeCosts where possible.
+    /// @param EId Edge id.
+    /// @return MatrixPtr to edge cost matrix.
+    ///
+    ///   This method is primarily useful for duplicating costs quickly by
+    /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
+    /// getEdgeCosts when dealing with edge cost values.
+    const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
+      return getEdge(EId).Costs;
+    }
+
+    /// @brief Get an edge's cost matrix.
+    /// @param EId Edge id.
+    /// @return Edge cost matrix.
+    const Matrix& getEdgeCosts(EdgeId EId) const {
+      return *getEdge(EId).Costs;
+    }
+
+    EdgeMetadata& getEdgeMetadata(EdgeId EId) {
+      return getEdge(EId).Metadata;
+    }
+
+    const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
+      return getEdge(EId).Metadata;
+    }
+
+    /// @brief Get the first node connected to this edge.
+    /// @param EId Edge id.
+    /// @return The first node connected to the given edge.
+    NodeId getEdgeNode1Id(EdgeId EId) const {
+      return getEdge(EId).getN1Id();
+    }
+
+    /// @brief Get the second node connected to this edge.
+    /// @param EId Edge id.
+    /// @return The second node connected to the given edge.
+    NodeId getEdgeNode2Id(EdgeId EId) const {
+      return getEdge(EId).getN2Id();
+    }
+
+    /// @brief Get the "other" node connected to this edge.
+    /// @param EId Edge id.
+    /// @param NId Node id for the "given" node.
+    /// @return The iterator for the "other" node connected to this edge.
+    NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
+      EdgeEntry &E = getEdge(EId);
+      if (E.getN1Id() == NId) {
+        return E.getN2Id();
+      } // else
+      return E.getN1Id();
+    }
+
+    /// @brief Get the edge connecting two nodes.
+    /// @param N1Id First node id.
+    /// @param N2Id Second node id.
+    /// @return An id for edge (N1Id, N2Id) if such an edge exists,
+    ///         otherwise returns an invalid edge id.
+    EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
+      for (auto AEId : adjEdgeIds(N1Id)) {
+        if ((getEdgeNode1Id(AEId) == N2Id) ||
+            (getEdgeNode2Id(AEId) == N2Id)) {
+          return AEId;
+        }
+      }
+      return invalidEdgeId();
+    }
+
+    /// @brief Remove a node from the graph.
+    /// @param NId Node id.
+    void removeNode(NodeId NId) {
+      if (Solver)
+        Solver->handleRemoveNode(NId);
+      NodeEntry &N = getNode(NId);
+      // TODO: Can this be for-each'd?
+      for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
+             AEEnd = N.adjEdgesEnd();
+           AEItr != AEEnd;) {
+        EdgeId EId = *AEItr;
+        ++AEItr;
+        removeEdge(EId);
+      }
+      FreeNodeIds.push_back(NId);
+    }
+
+    /// @brief Disconnect an edge from the given node.
+    ///
+    /// Removes the given edge from the adjacency list of the given node.
+    /// This operation leaves the edge in an 'asymmetric' state: It will no
+    /// longer appear in an iteration over the given node's (NId's) edges, but
+    /// will appear in an iteration over the 'other', unnamed node's edges.
+    ///
+    /// This does not correspond to any normal graph operation, but exists to
+    /// support efficient PBQP graph-reduction based solvers. It is used to
+    /// 'effectively' remove the unnamed node from the graph while the solver
+    /// is performing the reduction. The solver will later call reconnectNode
+    /// to restore the edge in the named node's adjacency list.
+    ///
+    /// Since the degree of a node is the number of connected edges,
+    /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
+    /// drop by 1.
+    ///
+    /// A disconnected edge WILL still appear in an iteration over the graph
+    /// edges.
+    ///
+    /// A disconnected edge should not be removed from the graph, it should be
+    /// reconnected first.
+    ///
+    /// A disconnected edge can be reconnected by calling the reconnectEdge
+    /// method.
+    void disconnectEdge(EdgeId EId, NodeId NId) {
+      if (Solver)
+        Solver->handleDisconnectEdge(EId, NId);
+
+      EdgeEntry &E = getEdge(EId);
+      E.disconnectFrom(*this, NId);
+    }
+
+    /// @brief Convenience method to disconnect all neighbours from the given
+    ///        node.
+    void disconnectAllNeighborsFromNode(NodeId NId) {
+      for (auto AEId : adjEdgeIds(NId))
+        disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
+    }
+
+    /// @brief Re-attach an edge to its nodes.
+    ///
+    /// Adds an edge that had been previously disconnected back into the
+    /// adjacency set of the nodes that the edge connects.
+    void reconnectEdge(EdgeId EId, NodeId NId) {
+      EdgeEntry &E = getEdge(EId);
+      E.connectTo(*this, EId, NId);
+      if (Solver)
+        Solver->handleReconnectEdge(EId, NId);
+    }
+
+    /// @brief Remove an edge from the graph.
+    /// @param EId Edge id.
+    void removeEdge(EdgeId EId) {
+      if (Solver)
+        Solver->handleRemoveEdge(EId);
+      EdgeEntry &E = getEdge(EId);
+      E.disconnect();
+      FreeEdgeIds.push_back(EId);
+      Edges[EId].invalidate();
+    }
+
+    /// @brief Remove all nodes and edges from the graph.
+    void clear() {
+      Nodes.clear();
+      FreeNodeIds.clear();
+      Edges.clear();
+      FreeEdgeIds.clear();
+    }
+  };
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
new file mode 100644
index 0000000..ba405e8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
@@ -0,0 +1,292 @@
+//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_MATH_H
+#define LLVM_CODEGEN_PBQP_MATH_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <memory>
+
+namespace llvm {
+namespace PBQP {
+
+using PBQPNum = float;
+
+/// \brief PBQP Vector class.
+class Vector {
+  friend hash_code hash_value(const Vector &);
+
+public:
+  /// \brief Construct a PBQP vector of the given size.
+  explicit Vector(unsigned Length)
+    : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
+
+  /// \brief Construct a PBQP vector with initializer.
+  Vector(unsigned Length, PBQPNum InitVal)
+    : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
+    std::fill(Data.get(), Data.get() + Length, InitVal);
+  }
+
+  /// \brief Copy construct a PBQP vector.
+  Vector(const Vector &V)
+    : Length(V.Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
+    std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
+  }
+
+  /// \brief Move construct a PBQP vector.
+  Vector(Vector &&V)
+    : Length(V.Length), Data(std::move(V.Data)) {
+    V.Length = 0;
+  }
+
+  /// \brief Comparison operator.
+  bool operator==(const Vector &V) const {
+    assert(Length != 0 && Data && "Invalid vector");
+    if (Length != V.Length)
+      return false;
+    return std::equal(Data.get(), Data.get() + Length, V.Data.get());
+  }
+
+  /// \brief Return the length of the vector
+  unsigned getLength() const {
+    assert(Length != 0 && Data && "Invalid vector");
+    return Length;
+  }
+
+  /// \brief Element access.
+  PBQPNum& operator[](unsigned Index) {
+    assert(Length != 0 && Data && "Invalid vector");
+    assert(Index < Length && "Vector element access out of bounds.");
+    return Data[Index];
+  }
+
+  /// \brief Const element access.
+  const PBQPNum& operator[](unsigned Index) const {
+    assert(Length != 0 && Data && "Invalid vector");
+    assert(Index < Length && "Vector element access out of bounds.");
+    return Data[Index];
+  }
+
+  /// \brief Add another vector to this one.
+  Vector& operator+=(const Vector &V) {
+    assert(Length != 0 && Data && "Invalid vector");
+    assert(Length == V.Length && "Vector length mismatch.");
+    std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
+                   std::plus<PBQPNum>());
+    return *this;
+  }
+
+  /// \brief Returns the index of the minimum value in this vector
+  unsigned minIndex() const {
+    assert(Length != 0 && Data && "Invalid vector");
+    return std::min_element(Data.get(), Data.get() + Length) - Data.get();
+  }
+
+private:
+  unsigned Length;
+  std::unique_ptr<PBQPNum []> Data;
+};
+
+/// \brief Return a hash_value for the given vector.
+inline hash_code hash_value(const Vector &V) {
+  unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
+  unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
+  return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
+}
+
+/// \brief Output a textual representation of the given vector on the given
+///        output stream.
+template <typename OStream>
+OStream& operator<<(OStream &OS, const Vector &V) {
+  assert((V.getLength() != 0) && "Zero-length vector badness.");
+
+  OS << "[ " << V[0];
+  for (unsigned i = 1; i < V.getLength(); ++i)
+    OS << ", " << V[i];
+  OS << " ]";
+
+  return OS;
+}
+
+/// \brief PBQP Matrix class
+class Matrix {
+private:
+  friend hash_code hash_value(const Matrix &);
+
+public:
+  /// \brief Construct a PBQP Matrix with the given dimensions.
+  Matrix(unsigned Rows, unsigned Cols) :
+    Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+  }
+
+  /// \brief Construct a PBQP Matrix with the given dimensions and initial
+  /// value.
+  Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
+    : Rows(Rows), Cols(Cols),
+      Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+    std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
+  }
+
+  /// \brief Copy construct a PBQP matrix.
+  Matrix(const Matrix &M)
+    : Rows(M.Rows), Cols(M.Cols),
+      Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+    std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
+  }
+
+  /// \brief Move construct a PBQP matrix.
+  Matrix(Matrix &&M)
+    : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
+    M.Rows = M.Cols = 0;
+  }
+
+  /// \brief Comparison operator.
+  bool operator==(const Matrix &M) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    if (Rows != M.Rows || Cols != M.Cols)
+      return false;
+    return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
+  }
+
+  /// \brief Return the number of rows in this matrix.
+  unsigned getRows() const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    return Rows;
+  }
+
+  /// \brief Return the number of cols in this matrix.
+  unsigned getCols() const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    return Cols;
+  }
+
+  /// \brief Matrix element access.
+  PBQPNum* operator[](unsigned R) {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    assert(R < Rows && "Row out of bounds.");
+    return Data.get() + (R * Cols);
+  }
+
+  /// \brief Matrix element access.
+  const PBQPNum* operator[](unsigned R) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    assert(R < Rows && "Row out of bounds.");
+    return Data.get() + (R * Cols);
+  }
+
+  /// \brief Returns the given row as a vector.
+  Vector getRowAsVector(unsigned R) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Vector V(Cols);
+    for (unsigned C = 0; C < Cols; ++C)
+      V[C] = (*this)[R][C];
+    return V;
+  }
+
+  /// \brief Returns the given column as a vector.
+  Vector getColAsVector(unsigned C) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Vector V(Rows);
+    for (unsigned R = 0; R < Rows; ++R)
+      V[R] = (*this)[R][C];
+    return V;
+  }
+
+  /// \brief Matrix transpose.
+  Matrix transpose() const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Matrix M(Cols, Rows);
+    for (unsigned r = 0; r < Rows; ++r)
+      for (unsigned c = 0; c < Cols; ++c)
+        M[c][r] = (*this)[r][c];
+    return M;
+  }
+
+  /// \brief Add the given matrix to this one.
+  Matrix& operator+=(const Matrix &M) {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    assert(Rows == M.Rows && Cols == M.Cols &&
+           "Matrix dimensions mismatch.");
+    std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
+                   Data.get(), std::plus<PBQPNum>());
+    return *this;
+  }
+
+  Matrix operator+(const Matrix &M) {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Matrix Tmp(*this);
+    Tmp += M;
+    return Tmp;
+  }
+
+private:
+  unsigned Rows, Cols;
+  std::unique_ptr<PBQPNum []> Data;
+};
+
+/// \brief Return a hash_code for the given matrix.
+inline hash_code hash_value(const Matrix &M) {
+  unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
+  unsigned *MEnd =
+    reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
+  return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
+}
+
+/// \brief Output a textual representation of the given matrix on the given
+///        output stream.
+template <typename OStream>
+OStream& operator<<(OStream &OS, const Matrix &M) {
+  assert((M.getRows() != 0) && "Zero-row matrix badness.");
+  for (unsigned i = 0; i < M.getRows(); ++i)
+    OS << M.getRowAsVector(i) << "\n";
+  return OS;
+}
+
+template <typename Metadata>
+class MDVector : public Vector {
+public:
+  MDVector(const Vector &v) : Vector(v), md(*this) {}
+  MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
+
+  const Metadata& getMetadata() const { return md; }
+
+private:
+  Metadata md;
+};
+
+template <typename Metadata>
+inline hash_code hash_value(const MDVector<Metadata> &V) {
+  return hash_value(static_cast<const Vector&>(V));
+}
+
+template <typename Metadata>
+class MDMatrix : public Matrix {
+public:
+  MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
+  MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
+
+  const Metadata& getMetadata() const { return md; }
+
+private:
+  Metadata md;
+};
+
+template <typename Metadata>
+inline hash_code hash_value(const MDMatrix<Metadata> &M) {
+  return hash_value(static_cast<const Matrix&>(M));
+}
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_MATH_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
new file mode 100644
index 0000000..8aeb519
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
@@ -0,0 +1,223 @@
+//===- ReductionRules.h - Reduction Rules -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reduction Rules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
+#define LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
+
+#include "Graph.h"
+#include "Math.h"
+#include "Solution.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+namespace PBQP {
+
+  /// \brief Reduce a node of degree one.
+  ///
+  /// Propagate costs from the given node, which must be of degree one, to its
+  /// neighbor. Notify the problem domain.
+  template <typename GraphT>
+  void applyR1(GraphT &G, typename GraphT::NodeId NId) {
+    using NodeId = typename GraphT::NodeId;
+    using EdgeId = typename GraphT::EdgeId;
+    using Vector = typename GraphT::Vector;
+    using Matrix = typename GraphT::Matrix;
+    using RawVector = typename GraphT::RawVector;
+
+    assert(G.getNodeDegree(NId) == 1 &&
+           "R1 applied to node with degree != 1.");
+
+    EdgeId EId = *G.adjEdgeIds(NId).begin();
+    NodeId MId = G.getEdgeOtherNodeId(EId, NId);
+
+    const Matrix &ECosts = G.getEdgeCosts(EId);
+    const Vector &XCosts = G.getNodeCosts(NId);
+    RawVector YCosts = G.getNodeCosts(MId);
+
+    // Duplicate a little to avoid transposing matrices.
+    if (NId == G.getEdgeNode1Id(EId)) {
+      for (unsigned j = 0; j < YCosts.getLength(); ++j) {
+        PBQPNum Min = ECosts[0][j] + XCosts[0];
+        for (unsigned i = 1; i < XCosts.getLength(); ++i) {
+          PBQPNum C = ECosts[i][j] + XCosts[i];
+          if (C < Min)
+            Min = C;
+        }
+        YCosts[j] += Min;
+      }
+    } else {
+      for (unsigned i = 0; i < YCosts.getLength(); ++i) {
+        PBQPNum Min = ECosts[i][0] + XCosts[0];
+        for (unsigned j = 1; j < XCosts.getLength(); ++j) {
+          PBQPNum C = ECosts[i][j] + XCosts[j];
+          if (C < Min)
+            Min = C;
+        }
+        YCosts[i] += Min;
+      }
+    }
+    G.setNodeCosts(MId, YCosts);
+    G.disconnectEdge(EId, MId);
+  }
+
+  template <typename GraphT>
+  void applyR2(GraphT &G, typename GraphT::NodeId NId) {
+    using NodeId = typename GraphT::NodeId;
+    using EdgeId = typename GraphT::EdgeId;
+    using Vector = typename GraphT::Vector;
+    using Matrix = typename GraphT::Matrix;
+    using RawMatrix = typename GraphT::RawMatrix;
+
+    assert(G.getNodeDegree(NId) == 2 &&
+           "R2 applied to node with degree != 2.");
+
+    const Vector &XCosts = G.getNodeCosts(NId);
+
+    typename GraphT::AdjEdgeItr AEItr = G.adjEdgeIds(NId).begin();
+    EdgeId YXEId = *AEItr,
+           ZXEId = *(++AEItr);
+
+    NodeId YNId = G.getEdgeOtherNodeId(YXEId, NId),
+           ZNId = G.getEdgeOtherNodeId(ZXEId, NId);
+
+    bool FlipEdge1 = (G.getEdgeNode1Id(YXEId) == NId),
+         FlipEdge2 = (G.getEdgeNode1Id(ZXEId) == NId);
+
+    const Matrix *YXECosts = FlipEdge1 ?
+      new Matrix(G.getEdgeCosts(YXEId).transpose()) :
+      &G.getEdgeCosts(YXEId);
+
+    const Matrix *ZXECosts = FlipEdge2 ?
+      new Matrix(G.getEdgeCosts(ZXEId).transpose()) :
+      &G.getEdgeCosts(ZXEId);
+
+    unsigned XLen = XCosts.getLength(),
+      YLen = YXECosts->getRows(),
+      ZLen = ZXECosts->getRows();
+
+    RawMatrix Delta(YLen, ZLen);
+
+    for (unsigned i = 0; i < YLen; ++i) {
+      for (unsigned j = 0; j < ZLen; ++j) {
+        PBQPNum Min = (*YXECosts)[i][0] + (*ZXECosts)[j][0] + XCosts[0];
+        for (unsigned k = 1; k < XLen; ++k) {
+          PBQPNum C = (*YXECosts)[i][k] + (*ZXECosts)[j][k] + XCosts[k];
+          if (C < Min) {
+            Min = C;
+          }
+        }
+        Delta[i][j] = Min;
+      }
+    }
+
+    if (FlipEdge1)
+      delete YXECosts;
+
+    if (FlipEdge2)
+      delete ZXECosts;
+
+    EdgeId YZEId = G.findEdge(YNId, ZNId);
+
+    if (YZEId == G.invalidEdgeId()) {
+      YZEId = G.addEdge(YNId, ZNId, Delta);
+    } else {
+      const Matrix &YZECosts = G.getEdgeCosts(YZEId);
+      if (YNId == G.getEdgeNode1Id(YZEId)) {
+        G.updateEdgeCosts(YZEId, Delta + YZECosts);
+      } else {
+        G.updateEdgeCosts(YZEId, Delta.transpose() + YZECosts);
+      }
+    }
+
+    G.disconnectEdge(YXEId, YNId);
+    G.disconnectEdge(ZXEId, ZNId);
+
+    // TODO: Try to normalize newly added/modified edge.
+  }
+
+#ifndef NDEBUG
+  // Does this Cost vector have any register options ?
+  template <typename VectorT>
+  bool hasRegisterOptions(const VectorT &V) {
+    unsigned VL = V.getLength();
+
+    // An empty or spill only cost vector does not provide any register option.
+    if (VL <= 1)
+      return false;
+
+    // If there are registers in the cost vector, but all of them have infinite
+    // costs, then ... there is no available register.
+    for (unsigned i = 1; i < VL; ++i)
+      if (V[i] != std::numeric_limits<PBQP::PBQPNum>::infinity())
+        return true;
+
+    return false;
+  }
+#endif
+
+  // \brief Find a solution to a fully reduced graph by backpropagation.
+  //
+  // Given a graph and a reduction order, pop each node from the reduction
+  // order and greedily compute a minimum solution based on the node costs, and
+  // the dependent costs due to previously solved nodes.
+  //
+  // Note - This does not return the graph to its original (pre-reduction)
+  //        state: the existing solvers destructively alter the node and edge
+  //        costs. Given that, the backpropagate function doesn't attempt to
+  //        replace the edges either, but leaves the graph in its reduced
+  //        state.
+  template <typename GraphT, typename StackT>
+  Solution backpropagate(GraphT& G, StackT stack) {
+    using NodeId = GraphBase::NodeId;
+    using Matrix = typename GraphT::Matrix;
+    using RawVector = typename GraphT::RawVector;
+
+    Solution s;
+
+    while (!stack.empty()) {
+      NodeId NId = stack.back();
+      stack.pop_back();
+
+      RawVector v = G.getNodeCosts(NId);
+
+#ifndef NDEBUG
+      // Although a conservatively allocatable node can be allocated to a register,
+      // spilling it may provide a lower cost solution. Assert here that spilling
+      // is done by choice, not because there were no register available.
+      if (G.getNodeMetadata(NId).wasConservativelyAllocatable())
+        assert(hasRegisterOptions(v) && "A conservatively allocatable node "
+                                        "must have available register options");
+#endif
+
+      for (auto EId : G.adjEdgeIds(NId)) {
+        const Matrix& edgeCosts = G.getEdgeCosts(EId);
+        if (NId == G.getEdgeNode1Id(EId)) {
+          NodeId mId = G.getEdgeNode2Id(EId);
+          v += edgeCosts.getColAsVector(s.getSelection(mId));
+        } else {
+          NodeId mId = G.getEdgeNode1Id(EId);
+          v += edgeCosts.getRowAsVector(s.getSelection(mId));
+        }
+      }
+
+      s.setSelection(NId, v.minIndex());
+    }
+
+    return s;
+  }
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
new file mode 100644
index 0000000..6a24727
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
@@ -0,0 +1,56 @@
+//===- Solution.h - PBQP Solution -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PBQP Solution class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_SOLUTION_H
+#define LLVM_CODEGEN_PBQP_SOLUTION_H
+
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include <cassert>
+#include <map>
+
+namespace llvm {
+namespace PBQP {
+
+  /// \brief Represents a solution to a PBQP problem.
+  ///
+  /// To get the selection for each node in the problem use the getSelection method.
+  class Solution {
+  private:
+    using SelectionsMap = std::map<GraphBase::NodeId, unsigned>;
+    SelectionsMap selections;
+
+  public:
+    /// \brief Initialise an empty solution.
+    Solution() = default;
+
+    /// \brief Set the selection for a given node.
+    /// @param nodeId Node id.
+    /// @param selection Selection for nodeId.
+    void setSelection(GraphBase::NodeId nodeId, unsigned selection) {
+      selections[nodeId] = selection;
+    }
+
+    /// \brief Get a node's selection.
+    /// @param nodeId Node id.
+    /// @return The selection for nodeId;
+    unsigned getSelection(GraphBase::NodeId nodeId) const {
+      SelectionsMap::const_iterator sItr = selections.find(nodeId);
+      assert(sItr != selections.end() && "No selection for node.");
+      return sItr->second;
+    }
+  };
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_SOLUTION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h b/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
new file mode 100644
index 0000000..269b7a7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
@@ -0,0 +1,71 @@
+//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
+#define LLVM_CODEGEN_PBQPRACONSTRAINT_H
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+namespace PBQP {
+namespace RegAlloc {
+
+// Forward declare PBQP graph class.
+class PBQPRAGraph;
+
+} // end namespace RegAlloc
+} // end namespace PBQP
+
+using PBQPRAGraph = PBQP::RegAlloc::PBQPRAGraph;
+
+/// @brief Abstract base for classes implementing PBQP register allocation
+///        constraints (e.g. Spill-costs, interference, coalescing).
+class PBQPRAConstraint {
+public:
+  virtual ~PBQPRAConstraint() = 0;
+  virtual void apply(PBQPRAGraph &G) = 0;
+
+private:
+  virtual void anchor();
+};
+
+/// @brief PBQP register allocation constraint composer.
+///
+///   Constraints added to this list will be applied, in the order that they are
+/// added, to the PBQP graph.
+class PBQPRAConstraintList : public PBQPRAConstraint {
+public:
+  void apply(PBQPRAGraph &G) override {
+    for (auto &C : Constraints)
+      C->apply(G);
+  }
+
+  void addConstraint(std::unique_ptr<PBQPRAConstraint> C) {
+    if (C)
+      Constraints.push_back(std::move(C));
+  }
+
+private:
+  std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;
+
+  void anchor() override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQPRACONSTRAINT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h b/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
new file mode 100644
index 0000000..14ef0ec
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
@@ -0,0 +1,48 @@
+//===-- llvm/CodeGen/ParallelCG.h - Parallel code generation ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header declares functions that can be used for parallel code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PARALLELCG_H
+#define LLVM_CODEGEN_PARALLELCG_H
+
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <functional>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class Module;
+class TargetOptions;
+class raw_pwrite_stream;
+
+/// Split M into OSs.size() partitions, and generate code for each. Takes a
+/// factory function for the TargetMachine TMFactory. Writes OSs.size() output
+/// files to the output streams in OSs. The resulting output files if linked
+/// together are intended to be equivalent to the single output file that would
+/// have been code generated from M.
+///
+/// Writes bitcode for individual partitions into output streams in BCOSs, if
+/// BCOSs is not empty.
+///
+/// \returns M if OSs.size() == 1, otherwise returns std::unique_ptr<Module>().
+std::unique_ptr<Module>
+splitCodeGen(std::unique_ptr<Module> M, ArrayRef<raw_pwrite_stream *> OSs,
+             ArrayRef<llvm::raw_pwrite_stream *> BCOSs,
+             const std::function<std::unique_ptr<TargetMachine>()> &TMFactory,
+             TargetMachine::CodeGenFileType FT = TargetMachine::CGFT_ObjectFile,
+             bool PreserveLocals = false);
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/Passes.h b/linux-x64/clang/include/llvm/CodeGen/Passes.h
new file mode 100644
index 0000000..68fd04b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/Passes.h
@@ -0,0 +1,439 @@
+//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines interfaces to access the target independent code generation
+// passes provided by the LLVM backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PASSES_H
+#define LLVM_CODEGEN_PASSES_H
+
+#include <functional>
+#include <string>
+
+namespace llvm {
+
+class FunctionPass;
+class MachineFunction;
+class MachineFunctionPass;
+class ModulePass;
+class Pass;
+class TargetMachine;
+class TargetRegisterClass;
+class raw_ostream;
+
+} // End llvm namespace
+
+/// List of target independent CodeGen pass IDs.
+namespace llvm {
+  FunctionPass *createAtomicExpandPass();
+
+  /// createUnreachableBlockEliminationPass - The LLVM code generator does not
+  /// work well with unreachable basic blocks (what live ranges make sense for a
+  /// block that cannot be reached?).  As such, a code generator should either
+  /// not instruction select unreachable blocks, or run this pass as its
+  /// last LLVM modifying pass to clean up blocks that are not reachable from
+  /// the entry block.
+  FunctionPass *createUnreachableBlockEliminationPass();
+
+  /// MachineFunctionPrinter pass - This pass prints out the machine function to
+  /// the given stream as a debugging tool.
+  MachineFunctionPass *
+  createMachineFunctionPrinterPass(raw_ostream &OS,
+                                   const std::string &Banner ="");
+
+  /// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
+  /// using the MIR serialization format.
+  MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
+
+  /// This pass resets a MachineFunction when it has the FailedISel property
+  /// as if it was just created.
+  /// If EmitFallbackDiag is true, the pass will emit a
+  /// DiagnosticInfoISelFallback for every MachineFunction it resets.
+  /// If AbortOnFailedISel is true, abort compilation instead of resetting.
+  MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
+                                                      bool AbortOnFailedISel);
+
+  /// createCodeGenPreparePass - Transform the code to expose more pattern
+  /// matching during instruction selection.
+  FunctionPass *createCodeGenPreparePass();
+
+  /// createScalarizeMaskedMemIntrinPass - Replace masked load, store, gather
+  /// and scatter intrinsics with scalar code when target doesn't support them.
+  FunctionPass *createScalarizeMaskedMemIntrinPass();
+
+  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
+  /// load-linked/store-conditional loops.
+  extern char &AtomicExpandID;
+
+  /// MachineLoopInfo - This pass is a loop analysis pass.
+  extern char &MachineLoopInfoID;
+
+  /// MachineDominators - This pass is a machine dominators analysis pass.
+  extern char &MachineDominatorsID;
+
+/// MachineDominanaceFrontier - This pass is a machine dominators analysis pass.
+  extern char &MachineDominanceFrontierID;
+
+  /// MachineRegionInfo - This pass computes SESE regions for machine functions.
+  extern char &MachineRegionInfoPassID;
+
+  /// EdgeBundles analysis - Bundle machine CFG edges.
+  extern char &EdgeBundlesID;
+
+  /// LiveVariables pass - This pass computes the set of blocks in which each
+  /// variable is life and sets machine operand kill flags.
+  extern char &LiveVariablesID;
+
+  /// PHIElimination - This pass eliminates machine instruction PHI nodes
+  /// by inserting copy instructions.  This destroys SSA information, but is the
+  /// desired input for some register allocators.  This pass is "required" by
+  /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
+  extern char &PHIEliminationID;
+
+  /// LiveIntervals - This analysis keeps track of the live ranges of virtual
+  /// and physical registers.
+  extern char &LiveIntervalsID;
+
+  /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
+  extern char &LiveStacksID;
+
+  /// TwoAddressInstruction - This pass reduces two-address instructions to
+  /// use two operands. This destroys SSA information but it is desired by
+  /// register allocators.
+  extern char &TwoAddressInstructionPassID;
+
+  /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
+  extern char &ProcessImplicitDefsID;
+
+  /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
+  extern char &RegisterCoalescerID;
+
+  /// MachineScheduler - This pass schedules machine instructions.
+  extern char &MachineSchedulerID;
+
+  /// PostMachineScheduler - This pass schedules machine instructions postRA.
+  extern char &PostMachineSchedulerID;
+
+  /// SpillPlacement analysis. Suggest optimal placement of spill code between
+  /// basic blocks.
+  extern char &SpillPlacementID;
+
+  /// ShrinkWrap pass. Look for the best place to insert save and restore
+  // instruction and update the MachineFunctionInfo with that information.
+  extern char &ShrinkWrapID;
+
+  /// LiveRangeShrink pass. Move instruction close to its definition to shrink
+  /// the definition's live range.
+  extern char &LiveRangeShrinkID;
+
+  /// Greedy register allocator.
+  extern char &RAGreedyID;
+
+  /// Basic register allocator.
+  extern char &RABasicID;
+
+  /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
+  /// assigned in VirtRegMap.
+  extern char &VirtRegRewriterID;
+
+  /// UnreachableMachineBlockElimination - This pass removes unreachable
+  /// machine basic blocks.
+  extern char &UnreachableMachineBlockElimID;
+
+  /// DeadMachineInstructionElim - This pass removes dead machine instructions.
+  extern char &DeadMachineInstructionElimID;
+
+  /// This pass adds dead/undef flags after analyzing subregister lanes.
+  extern char &DetectDeadLanesID;
+
+  /// This pass perform post-ra machine sink for COPY instructions.
+  extern char &PostRAMachineSinkingID;
+
+  /// FastRegisterAllocation Pass - This pass register allocates as fast as
+  /// possible. It is best suited for debug code where live ranges are short.
+  ///
+  FunctionPass *createFastRegisterAllocator();
+
+  /// BasicRegisterAllocation Pass - This pass implements a degenerate global
+  /// register allocator using the basic regalloc framework.
+  ///
+  FunctionPass *createBasicRegisterAllocator();
+
+  /// Greedy register allocation pass - This pass implements a global register
+  /// allocator for optimized builds.
+  ///
+  FunctionPass *createGreedyRegisterAllocator();
+
+  /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
+  /// Quadratic Prograaming (PBQP) based register allocator.
+  ///
+  FunctionPass *createDefaultPBQPRegisterAllocator();
+
+  /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
+  /// and eliminates abstract frame references.
+  extern char &PrologEpilogCodeInserterID;
+  MachineFunctionPass *createPrologEpilogInserterPass();
+
+  /// ExpandPostRAPseudos - This pass expands pseudo instructions after
+  /// register allocation.
+  extern char &ExpandPostRAPseudosID;
+
+  /// createPostRAHazardRecognizer - This pass runs the post-ra hazard
+  /// recognizer.
+  extern char &PostRAHazardRecognizerID;
+
+  /// createPostRAScheduler - This pass performs post register allocation
+  /// scheduling.
+  extern char &PostRASchedulerID;
+
+  /// BranchFolding - This pass performs machine code CFG based
+  /// optimizations to delete branches to branches, eliminate branches to
+  /// successor blocks (creating fall throughs), and eliminating branches over
+  /// branches.
+  extern char &BranchFolderPassID;
+
+  /// BranchRelaxation - This pass replaces branches that need to jump further
+  /// than is supported by a branch instruction.
+  extern char &BranchRelaxationPassID;
+
+  /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
+  extern char &MachineFunctionPrinterPassID;
+
+  /// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
+  /// serialization format.
+  extern char &MIRPrintingPassID;
+
+  /// TailDuplicate - Duplicate blocks with unconditional branches
+  /// into tails of their predecessors.
+  extern char &TailDuplicateID;
+
+  /// Duplicate blocks with unconditional branches into tails of their
+  /// predecessors. Variant that works before register allocation.
+  extern char &EarlyTailDuplicateID;
+
+  /// MachineTraceMetrics - This pass computes critical path and CPU resource
+  /// usage in an ensemble of traces.
+  extern char &MachineTraceMetricsID;
+
+  /// EarlyIfConverter - This pass performs if-conversion on SSA form by
+  /// inserting cmov instructions.
+  extern char &EarlyIfConverterID;
+
+  /// This pass performs instruction combining using trace metrics to estimate
+  /// critical-path and resource depth.
+  extern char &MachineCombinerID;
+
+  /// StackSlotColoring - This pass performs stack coloring and merging.
+  /// It merges disjoint allocas to reduce the stack size.
+  extern char &StackColoringID;
+
+  /// IfConverter - This pass performs machine code if conversion.
+  extern char &IfConverterID;
+
+  FunctionPass *createIfConverter(
+      std::function<bool(const MachineFunction &)> Ftor);
+
+  /// MachineBlockPlacement - This pass places basic blocks based on branch
+  /// probabilities.
+  extern char &MachineBlockPlacementID;
+
+  /// MachineBlockPlacementStats - This pass collects statistics about the
+  /// basic block placement using branch probabilities and block frequency
+  /// information.
+  extern char &MachineBlockPlacementStatsID;
+
+  /// GCLowering Pass - Used by gc.root to perform its default lowering
+  /// operations.
+  FunctionPass *createGCLoweringPass();
+
+  /// ShadowStackGCLowering - Implements the custom lowering mechanism
+  /// used by the shadow stack GC.  Only runs on functions which opt in to
+  /// the shadow stack collector.
+  FunctionPass *createShadowStackGCLoweringPass();
+
+  /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
+  /// in machine code. Must be added very late during code generation, just
+  /// prior to output, and importantly after all CFG transformations (such as
+  /// branch folding).
+  extern char &GCMachineCodeAnalysisID;
+
+  /// Creates a pass to print GC metadata.
+  ///
+  FunctionPass *createGCInfoPrinter(raw_ostream &OS);
+
+  /// MachineCSE - This pass performs global CSE on machine instructions.
+  extern char &MachineCSEID;
+
+  /// ImplicitNullChecks - This pass folds null pointer checks into nearby
+  /// memory operations.
+  extern char &ImplicitNullChecksID;
+
+  /// This pass performs loop invariant code motion on machine instructions.
+  extern char &MachineLICMID;
+
+  /// This pass performs loop invariant code motion on machine instructions.
+  /// This variant works before register allocation. \see MachineLICMID.
+  extern char &EarlyMachineLICMID;
+
+  /// MachineSinking - This pass performs sinking on machine instructions.
+  extern char &MachineSinkingID;
+
+  /// MachineCopyPropagation - This pass performs copy propagation on
+  /// machine instructions.
+  extern char &MachineCopyPropagationID;
+
+  /// PeepholeOptimizer - This pass performs peephole optimizations -
+  /// like extension and comparison eliminations.
+  extern char &PeepholeOptimizerID;
+
+  /// OptimizePHIs - This pass optimizes machine instruction PHIs
+  /// to take advantage of opportunities created during DAG legalization.
+  extern char &OptimizePHIsID;
+
+  /// StackSlotColoring - This pass performs stack slot coloring.
+  extern char &StackSlotColoringID;
+
+  /// \brief This pass lays out funclets contiguously.
+  extern char &FuncletLayoutID;
+
+  /// This pass inserts the XRay instrumentation sleds if they are supported by
+  /// the target platform.
+  extern char &XRayInstrumentationID;
+
+  /// This pass inserts FEntry calls
+  extern char &FEntryInserterID;
+
+  /// \brief This pass implements the "patchable-function" attribute.
+  extern char &PatchableFunctionID;
+
+  /// createStackProtectorPass - This pass adds stack protectors to functions.
+  ///
+  FunctionPass *createStackProtectorPass();
+
+  /// createMachineVerifierPass - This pass verifies cenerated machine code
+  /// instructions for correctness.
+  ///
+  FunctionPass *createMachineVerifierPass(const std::string& Banner);
+
+  /// createDwarfEHPass - This pass mulches exception handling code into a form
+  /// adapted to code generation.  Required if using dwarf exception handling.
+  FunctionPass *createDwarfEHPass();
+
+  /// createWinEHPass - Prepares personality functions used by MSVC on Windows,
+  /// in addition to the Itanium LSDA based personalities.
+  FunctionPass *createWinEHPass();
+
+  /// createSjLjEHPreparePass - This pass adapts exception handling code to use
+  /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
+  ///
+  FunctionPass *createSjLjEHPreparePass();
+
+  /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
+  /// slots relative to one another and allocates base registers to access them
+  /// when it is estimated by the target to be out of range of normal frame
+  /// pointer or stack pointer index addressing.
+  extern char &LocalStackSlotAllocationID;
+
+  /// ExpandISelPseudos - This pass expands pseudo-instructions.
+  extern char &ExpandISelPseudosID;
+
+  /// UnpackMachineBundles - This pass unpack machine instruction bundles.
+  extern char &UnpackMachineBundlesID;
+
+  FunctionPass *
+  createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
+
+  /// FinalizeMachineBundles - This pass finalize machine instruction
+  /// bundles (created earlier, e.g. during pre-RA scheduling).
+  extern char &FinalizeMachineBundlesID;
+
+  /// StackMapLiveness - This pass analyses the register live-out set of
+  /// stackmap/patchpoint intrinsics and attaches the calculated information to
+  /// the intrinsic for later emission to the StackMap.
+  extern char &StackMapLivenessID;
+
+  /// LiveDebugValues pass
+  extern char &LiveDebugValuesID;
+
+  /// createJumpInstrTables - This pass creates jump-instruction tables.
+  ModulePass *createJumpInstrTablesPass();
+
+  /// createForwardControlFlowIntegrityPass - This pass adds control-flow
+  /// integrity.
+  ModulePass *createForwardControlFlowIntegrityPass();
+
+  /// InterleavedAccess Pass - This pass identifies and matches interleaved
+  /// memory accesses to target specific intrinsics.
+  ///
+  FunctionPass *createInterleavedAccessPass();
+
+  /// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
+  /// TLS variables for the emulated TLS model.
+  ///
+  ModulePass *createLowerEmuTLSPass();
+
+  /// This pass lowers the @llvm.load.relative intrinsic to instructions.
+  /// This is unsafe to do earlier because a pass may combine the constant
+  /// initializer into the load, which may result in an overflowing evaluation.
+  ModulePass *createPreISelIntrinsicLoweringPass();
+
+  /// GlobalMerge - This pass merges internal (by default) globals into structs
+  /// to enable reuse of a base pointer by indexed addressing modes.
+  /// It can also be configured to focus on size optimizations only.
+  ///
+  Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
+                              bool OnlyOptimizeForSize = false,
+                              bool MergeExternalByDefault = false);
+
+  /// This pass splits the stack into a safe stack and an unsafe stack to
+  /// protect against stack-based overflow vulnerabilities.
+  FunctionPass *createSafeStackPass();
+
+  /// This pass detects subregister lanes in a virtual register that are used
+  /// independently of other lanes and splits them into separate virtual
+  /// registers.
+  extern char &RenameIndependentSubregsID;
+
+  /// This pass is executed POST-RA to collect which physical registers are
+  /// preserved by given machine function.
+  FunctionPass *createRegUsageInfoCollector();
+
+  /// Return a MachineFunction pass that identifies call sites
+  /// and propagates register usage information of callee to caller
+  /// if available with PysicalRegisterUsageInfo pass.
+  FunctionPass *createRegUsageInfoPropPass();
+
+  /// This pass performs software pipelining on machine instructions.
+  extern char &MachinePipelinerID;
+
+  /// This pass frees the memory occupied by the MachineFunction.
+  FunctionPass *createFreeMachineFunctionPass();
+
+  /// This pass performs outlining on machine instructions directly before
+  /// printing assembly.
+  ModulePass *createMachineOutlinerPass(bool OutlineFromLinkOnceODRs = false);
+
+  /// This pass expands the experimental reduction intrinsics into sequences of
+  /// shuffles.
+  FunctionPass *createExpandReductionsPass();
+
+  // This pass expands memcmp() to load/stores.
+  FunctionPass *createExpandMemCmpPass();
+
+  /// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
+  FunctionPass *createBreakFalseDeps();
+
+  // This pass expands indirectbr instructions.
+  FunctionPass *createIndirectBrExpandPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/PreISelIntrinsicLowering.h b/linux-x64/clang/include/llvm/CodeGen/PreISelIntrinsicLowering.h
new file mode 100644
index 0000000..7a007eb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PreISelIntrinsicLowering.h
@@ -0,0 +1,29 @@
+//===- PreISelIntrinsicLowering.h - Pre-ISel intrinsic lowering pass ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements IR lowering for the llvm.load.relative intrinsic.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
+#define LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+struct PreISelIntrinsicLoweringPass
+    : PassInfoMixin<PreISelIntrinsicLoweringPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h b/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
new file mode 100644
index 0000000..bdf0bb7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
@@ -0,0 +1,198 @@
+//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the PseudoSourceValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueMap.h"
+#include <map>
+
+namespace llvm {
+
+class MachineFrameInfo;
+class MachineMemOperand;
+class raw_ostream;
+class TargetInstrInfo;
+
+raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MMO);
+class PseudoSourceValue;
+raw_ostream &operator<<(raw_ostream &OS, const PseudoSourceValue* PSV);
+
+/// Special value supplied for machine level alias analysis. It indicates that
+/// a memory access references the functions stack frame (e.g., a spill slot),
+/// below the stack frame (e.g., argument space), or constant pool.
+class PseudoSourceValue {
+public:
+  enum PSVKind {
+    Stack,
+    GOT,
+    JumpTable,
+    ConstantPool,
+    FixedStack,
+    GlobalValueCallEntry,
+    ExternalSymbolCallEntry,
+    TargetCustom
+  };
+
+private:
+  PSVKind Kind;
+  unsigned AddressSpace;
+  friend raw_ostream &llvm::operator<<(raw_ostream &OS,
+                                       const PseudoSourceValue* PSV);
+
+  friend class MachineMemOperand; // For printCustom().
+
+  /// Implement printing for PseudoSourceValue. This is called from
+  /// Value::print or Value's operator<<.
+  virtual void printCustom(raw_ostream &O) const;
+
+public:
+  explicit PseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+
+  virtual ~PseudoSourceValue();
+
+  PSVKind kind() const { return Kind; }
+
+  bool isStack() const { return Kind == Stack; }
+  bool isGOT() const { return Kind == GOT; }
+  bool isConstantPool() const { return Kind == ConstantPool; }
+  bool isJumpTable() const { return Kind == JumpTable; }
+
+  unsigned getAddressSpace() const { return AddressSpace; }
+
+  unsigned getTargetCustom() const {
+    return (Kind >= TargetCustom) ? ((Kind+1) - TargetCustom) : 0;
+  }
+
+  /// Test whether the memory pointed to by this PseudoSourceValue has a
+  /// constant value.
+  virtual bool isConstant(const MachineFrameInfo *) const;
+
+  /// Test whether the memory pointed to by this PseudoSourceValue may also be
+  /// pointed to by an LLVM IR Value.
+  virtual bool isAliased(const MachineFrameInfo *) const;
+
+  /// Return true if the memory pointed to by this PseudoSourceValue can ever
+  /// alias an LLVM IR Value.
+  virtual bool mayAlias(const MachineFrameInfo *) const;
+};
+
+/// A specialized PseudoSourceValue for holding FixedStack values, which must
+/// include a frame index.
+class FixedStackPseudoSourceValue : public PseudoSourceValue {
+  const int FI;
+
+public:
+  explicit FixedStackPseudoSourceValue(int FI, const TargetInstrInfo &TII)
+      : PseudoSourceValue(FixedStack, TII), FI(FI) {}
+
+  static bool classof(const PseudoSourceValue *V) {
+    return V->kind() == FixedStack;
+  }
+
+  bool isConstant(const MachineFrameInfo *MFI) const override;
+
+  bool isAliased(const MachineFrameInfo *MFI) const override;
+
+  bool mayAlias(const MachineFrameInfo *) const override;
+
+  void printCustom(raw_ostream &OS) const override;
+
+  int getFrameIndex() const { return FI; }
+};
+
+class CallEntryPseudoSourceValue : public PseudoSourceValue {
+protected:
+  CallEntryPseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+
+public:
+  bool isConstant(const MachineFrameInfo *) const override;
+  bool isAliased(const MachineFrameInfo *) const override;
+  bool mayAlias(const MachineFrameInfo *) const override;
+};
+
+/// A specialized pseudo soruce value for holding GlobalValue values.
+class GlobalValuePseudoSourceValue : public CallEntryPseudoSourceValue {
+  const GlobalValue *GV;
+
+public:
+  GlobalValuePseudoSourceValue(const GlobalValue *GV,
+                               const TargetInstrInfo &TII);
+
+  static bool classof(const PseudoSourceValue *V) {
+    return V->kind() == GlobalValueCallEntry;
+  }
+
+  const GlobalValue *getValue() const { return GV; }
+};
+
+/// A specialized pseudo source value for holding external symbol values.
+class ExternalSymbolPseudoSourceValue : public CallEntryPseudoSourceValue {
+  const char *ES;
+
+public:
+  ExternalSymbolPseudoSourceValue(const char *ES, const TargetInstrInfo &TII);
+
+  static bool classof(const PseudoSourceValue *V) {
+    return V->kind() == ExternalSymbolCallEntry;
+  }
+
+  const char *getSymbol() const { return ES; }
+};
+
+/// Manages creation of pseudo source values.
+class PseudoSourceValueManager {
+  const TargetInstrInfo &TII;
+  const PseudoSourceValue StackPSV, GOTPSV, JumpTablePSV, ConstantPoolPSV;
+  std::map<int, std::unique_ptr<FixedStackPseudoSourceValue>> FSValues;
+  StringMap<std::unique_ptr<const ExternalSymbolPseudoSourceValue>>
+      ExternalCallEntries;
+  ValueMap<const GlobalValue *,
+           std::unique_ptr<const GlobalValuePseudoSourceValue>>
+      GlobalCallEntries;
+
+public:
+  PseudoSourceValueManager(const TargetInstrInfo &TII);
+
+  /// Return a pseudo source value referencing the area below the stack frame of
+  /// a function, e.g., the argument space.
+  const PseudoSourceValue *getStack();
+
+  /// Return a pseudo source value referencing the global offset table
+  /// (or something the like).
+  const PseudoSourceValue *getGOT();
+
+  /// Return a pseudo source value referencing the constant pool. Since constant
+  /// pools are constant, this doesn't need to identify a specific constant
+  /// pool entry.
+  const PseudoSourceValue *getConstantPool();
+
+  /// Return a pseudo source value referencing a jump table. Since jump tables
+  /// are constant, this doesn't need to identify a specific jump table.
+  const PseudoSourceValue *getJumpTable();
+
+  /// Return a pseudo source value referencing a fixed stack frame entry,
+  /// e.g., a spill slot.
+  const PseudoSourceValue *getFixedStack(int FI);
+
+  const PseudoSourceValue *getGlobalValueCallEntry(const GlobalValue *GV);
+
+  const PseudoSourceValue *getExternalSymbolCallEntry(const char *ES);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ReachingDefAnalysis.h b/linux-x64/clang/include/llvm/CodeGen/ReachingDefAnalysis.h
new file mode 100644
index 0000000..b21b745
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ReachingDefAnalysis.h
@@ -0,0 +1,118 @@
+//==--- llvm/CodeGen/ReachingDefAnalysis.h - Reaching Def Analysis -*- C++ -*---==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Reaching Defs Analysis pass.
+///
+/// This pass tracks for each instruction what is the “closest” reaching def of
+/// a given register. It is used by BreakFalseDeps (for clearance calculation)
+/// and ExecutionDomainFix (for arbitrating conflicting domains).
+///
+/// Note that this is different from the usual definition notion of liveness.
+/// The CPU doesn't care whether or not we consider a register killed.
+///
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REACHINGDEFSANALYSIS_H
+#define LLVM_CODEGEN_REACHINGDEFSANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LoopTraversal.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineInstr;
+
+/// This class provides the reaching def analysis.
+class ReachingDefAnalysis : public MachineFunctionPass {
+private:
+  MachineFunction *MF;
+  const TargetRegisterInfo *TRI;
+  unsigned NumRegUnits;
+  /// Instruction that defined each register, relative to the beginning of the
+  /// current basic block.  When a LiveRegsDefInfo is used to represent a
+  /// live-out register, this value is relative to the end of the basic block,
+  /// so it will be a negative number.
+  using LiveRegsDefInfo = std::vector<int>;
+  LiveRegsDefInfo LiveRegs;
+
+  /// Keeps clearance information for all registers. Note that this
+  /// is different from the usual definition notion of liveness. The CPU
+  /// doesn't care whether or not we consider a register killed.
+  using OutRegsInfoMap = SmallVector<LiveRegsDefInfo, 4>;
+  OutRegsInfoMap MBBOutRegsInfos;
+
+  /// Current instruction number.
+  /// The first instruction in each basic block is 0.
+  int CurInstr;
+
+  /// Maps instructions to their instruction Ids, relative to the begining of
+  /// their basic blocks.
+  DenseMap<MachineInstr *, int> InstIds;
+
+  /// All reaching defs of a given RegUnit for a given MBB.
+  using MBBRegUnitDefs = SmallVector<int, 1>;
+  /// All reaching defs of all reg units for a given MBB
+  using MBBDefsInfo = std::vector<MBBRegUnitDefs>;
+  /// All reaching defs of all reg units for a all MBBs
+  using MBBReachingDefsInfo = SmallVector<MBBDefsInfo, 4>;
+  MBBReachingDefsInfo MBBReachingDefs;
+
+  /// Default values are 'nothing happened a long time ago'.
+  const int ReachingDefDefaultVal = -(1 << 20);
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  ReachingDefAnalysis() : MachineFunctionPass(ID) {
+    initializeReachingDefAnalysisPass(*PassRegistry::getPassRegistry());
+  }
+  void releaseMemory() override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoVRegs);
+  }
+
+  /// Provides the instruction id of the closest reaching def instruction of
+  /// PhysReg that reaches MI, relative to the begining of MI's basic block.
+  int getReachingDef(MachineInstr *MI, int PhysReg);
+
+  /// Provides the clearance - the number of instructions since the closest
+  /// reaching def instuction of PhysReg that reaches MI.
+  int getClearance(MachineInstr *MI, MCPhysReg PhysReg);
+
+private:
+  /// Set up LiveRegs by merging predecessor live-out values.
+  void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Update live-out values.
+  void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Process he given basic block.
+  void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Update def-ages for registers defined by MI.
+  /// Also break dependencies on partial defs and undef uses.
+  void processDefs(MachineInstr *);
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_REACHINGDEFSANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h b/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
new file mode 100644
index 0000000..5b34286
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
@@ -0,0 +1,536 @@
+//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
+#define LLVM_CODEGEN_REGALLOCPBQP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/CodeGen/PBQP/CostAllocator.h"
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include "llvm/CodeGen/PBQP/Math.h"
+#include "llvm/CodeGen/PBQP/ReductionRules.h"
+#include "llvm/CodeGen/PBQP/Solution.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class FunctionPass;
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineFunction;
+class raw_ostream;
+
+namespace PBQP {
+namespace RegAlloc {
+
+/// @brief Spill option index.
+inline unsigned getSpillOptionIdx() { return 0; }
+
+/// \brief Metadata to speed allocatability test.
+///
+/// Keeps track of the number of infinities in each row and column.
+class MatrixMetadata {
+public:
+  MatrixMetadata(const Matrix& M)
+    : UnsafeRows(new bool[M.getRows() - 1]()),
+      UnsafeCols(new bool[M.getCols() - 1]()) {
+    unsigned* ColCounts = new unsigned[M.getCols() - 1]();
+
+    for (unsigned i = 1; i < M.getRows(); ++i) {
+      unsigned RowCount = 0;
+      for (unsigned j = 1; j < M.getCols(); ++j) {
+        if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
+          ++RowCount;
+          ++ColCounts[j - 1];
+          UnsafeRows[i - 1] = true;
+          UnsafeCols[j - 1] = true;
+        }
+      }
+      WorstRow = std::max(WorstRow, RowCount);
+    }
+    unsigned WorstColCountForCurRow =
+      *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
+    WorstCol = std::max(WorstCol, WorstColCountForCurRow);
+    delete[] ColCounts;
+  }
+
+  MatrixMetadata(const MatrixMetadata &) = delete;
+  MatrixMetadata &operator=(const MatrixMetadata &) = delete;
+
+  unsigned getWorstRow() const { return WorstRow; }
+  unsigned getWorstCol() const { return WorstCol; }
+  const bool* getUnsafeRows() const { return UnsafeRows.get(); }
+  const bool* getUnsafeCols() const { return UnsafeCols.get(); }
+
+private:
+  unsigned WorstRow = 0;
+  unsigned WorstCol = 0;
+  std::unique_ptr<bool[]> UnsafeRows;
+  std::unique_ptr<bool[]> UnsafeCols;
+};
+
+/// \brief Holds a vector of the allowed physical regs for a vreg.
+class AllowedRegVector {
+  friend hash_code hash_value(const AllowedRegVector &);
+
+public:
+  AllowedRegVector() = default;
+  AllowedRegVector(AllowedRegVector &&) = default;
+
+  AllowedRegVector(const std::vector<unsigned> &OptVec)
+    : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
+    std::copy(OptVec.begin(), OptVec.end(), Opts.get());
+  }
+
+  unsigned size() const { return NumOpts; }
+  unsigned operator[](size_t I) const { return Opts[I]; }
+
+  bool operator==(const AllowedRegVector &Other) const {
+    if (NumOpts != Other.NumOpts)
+      return false;
+    return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
+  }
+
+  bool operator!=(const AllowedRegVector &Other) const {
+    return !(*this == Other);
+  }
+
+private:
+  unsigned NumOpts = 0;
+  std::unique_ptr<unsigned[]> Opts;
+};
+
+inline hash_code hash_value(const AllowedRegVector &OptRegs) {
+  unsigned *OStart = OptRegs.Opts.get();
+  unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
+  return hash_combine(OptRegs.NumOpts,
+                      hash_combine_range(OStart, OEnd));
+}
+
+/// \brief Holds graph-level metadata relevant to PBQP RA problems.
+class GraphMetadata {
+private:
+  using AllowedRegVecPool = ValuePool<AllowedRegVector>;
+
+public:
+  using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
+
+  GraphMetadata(MachineFunction &MF,
+                LiveIntervals &LIS,
+                MachineBlockFrequencyInfo &MBFI)
+    : MF(MF), LIS(LIS), MBFI(MBFI) {}
+
+  MachineFunction &MF;
+  LiveIntervals &LIS;
+  MachineBlockFrequencyInfo &MBFI;
+
+  void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
+    VRegToNodeId[VReg] = NId;
+  }
+
+  GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
+    auto VRegItr = VRegToNodeId.find(VReg);
+    if (VRegItr == VRegToNodeId.end())
+      return GraphBase::invalidNodeId();
+    return VRegItr->second;
+  }
+
+  AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
+    return AllowedRegVecs.getValue(std::move(Allowed));
+  }
+
+private:
+  DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
+  AllowedRegVecPool AllowedRegVecs;
+};
+
+/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
+class NodeMetadata {
+public:
+  using AllowedRegVector = RegAlloc::AllowedRegVector;
+
+  // The node's reduction state. The order in this enum is important,
+  // as it is assumed nodes can only progress up (i.e. towards being
+  // optimally reducible) when reducing the graph.
+  using ReductionState = enum {
+    Unprocessed,
+    NotProvablyAllocatable,
+    ConservativelyAllocatable,
+    OptimallyReducible
+  };
+
+  NodeMetadata() = default;
+
+  NodeMetadata(const NodeMetadata &Other)
+    : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
+      OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
+      AllowedRegs(Other.AllowedRegs)
+#ifndef NDEBUG
+      , everConservativelyAllocatable(Other.everConservativelyAllocatable)
+#endif
+  {
+    if (NumOpts > 0) {
+      std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
+                &OptUnsafeEdges[0]);
+    }
+  }
+
+  NodeMetadata(NodeMetadata &&) = default;
+  NodeMetadata& operator=(NodeMetadata &&) = default;
+
+  void setVReg(unsigned VReg) { this->VReg = VReg; }
+  unsigned getVReg() const { return VReg; }
+
+  void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
+    this->AllowedRegs = std::move(AllowedRegs);
+  }
+  const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
+
+  void setup(const Vector& Costs) {
+    NumOpts = Costs.getLength() - 1;
+    OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
+  }
+
+  ReductionState getReductionState() const { return RS; }
+  void setReductionState(ReductionState RS) {
+    assert(RS >= this->RS && "A node's reduction state can not be downgraded");
+    this->RS = RS;
+
+#ifndef NDEBUG
+    // Remember this state to assert later that a non-infinite register
+    // option was available.
+    if (RS == ConservativelyAllocatable)
+      everConservativelyAllocatable = true;
+#endif
+  }
+
+  void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
+    DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
+    const bool* UnsafeOpts =
+      Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
+    for (unsigned i = 0; i < NumOpts; ++i)
+      OptUnsafeEdges[i] += UnsafeOpts[i];
+  }
+
+  void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
+    DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
+    const bool* UnsafeOpts =
+      Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
+    for (unsigned i = 0; i < NumOpts; ++i)
+      OptUnsafeEdges[i] -= UnsafeOpts[i];
+  }
+
+  bool isConservativelyAllocatable() const {
+    return (DeniedOpts < NumOpts) ||
+      (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
+       &OptUnsafeEdges[NumOpts]);
+  }
+
+#ifndef NDEBUG
+  bool wasConservativelyAllocatable() const {
+    return everConservativelyAllocatable;
+  }
+#endif
+
+private:
+  ReductionState RS = Unprocessed;
+  unsigned NumOpts = 0;
+  unsigned DeniedOpts = 0;
+  std::unique_ptr<unsigned[]> OptUnsafeEdges;
+  unsigned VReg = 0;
+  GraphMetadata::AllowedRegVecRef AllowedRegs;
+
+#ifndef NDEBUG
+  bool everConservativelyAllocatable = false;
+#endif
+};
+
+class RegAllocSolverImpl {
+private:
+  using RAMatrix = MDMatrix<MatrixMetadata>;
+
+public:
+  using RawVector = PBQP::Vector;
+  using RawMatrix = PBQP::Matrix;
+  using Vector = PBQP::Vector;
+  using Matrix = RAMatrix;
+  using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
+
+  using NodeId = GraphBase::NodeId;
+  using EdgeId = GraphBase::EdgeId;
+
+  using NodeMetadata = RegAlloc::NodeMetadata;
+  struct EdgeMetadata {};
+  using GraphMetadata = RegAlloc::GraphMetadata;
+
+  using Graph = PBQP::Graph<RegAllocSolverImpl>;
+
+  RegAllocSolverImpl(Graph &G) : G(G) {}
+
+  Solution solve() {
+    G.setSolver(*this);
+    Solution S;
+    setup();
+    S = backpropagate(G, reduce());
+    G.unsetSolver();
+    return S;
+  }
+
+  void handleAddNode(NodeId NId) {
+    assert(G.getNodeCosts(NId).getLength() > 1 &&
+           "PBQP Graph should not contain single or zero-option nodes");
+    G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
+  }
+
+  void handleRemoveNode(NodeId NId) {}
+  void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
+
+  void handleAddEdge(EdgeId EId) {
+    handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
+    handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
+  }
+
+  void handleDisconnectEdge(EdgeId EId, NodeId NId) {
+    NodeMetadata& NMd = G.getNodeMetadata(NId);
+    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
+    NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
+    promote(NId, NMd);
+  }
+
+  void handleReconnectEdge(EdgeId EId, NodeId NId) {
+    NodeMetadata& NMd = G.getNodeMetadata(NId);
+    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
+    NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
+  }
+
+  void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
+    NodeId N1Id = G.getEdgeNode1Id(EId);
+    NodeId N2Id = G.getEdgeNode2Id(EId);
+    NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
+    NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
+    bool Transpose = N1Id != G.getEdgeNode1Id(EId);
+
+    // Metadata are computed incrementally. First, update them
+    // by removing the old cost.
+    const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
+    N1Md.handleRemoveEdge(OldMMd, Transpose);
+    N2Md.handleRemoveEdge(OldMMd, !Transpose);
+
+    // And update now the metadata with the new cost.
+    const MatrixMetadata& MMd = NewCosts.getMetadata();
+    N1Md.handleAddEdge(MMd, Transpose);
+    N2Md.handleAddEdge(MMd, !Transpose);
+
+    // As the metadata may have changed with the update, the nodes may have
+    // become ConservativelyAllocatable or OptimallyReducible.
+    promote(N1Id, N1Md);
+    promote(N2Id, N2Md);
+  }
+
+private:
+  void promote(NodeId NId, NodeMetadata& NMd) {
+    if (G.getNodeDegree(NId) == 3) {
+      // This node is becoming optimally reducible.
+      moveToOptimallyReducibleNodes(NId);
+    } else if (NMd.getReductionState() ==
+               NodeMetadata::NotProvablyAllocatable &&
+               NMd.isConservativelyAllocatable()) {
+      // This node just became conservatively allocatable.
+      moveToConservativelyAllocatableNodes(NId);
+    }
+  }
+
+  void removeFromCurrentSet(NodeId NId) {
+    switch (G.getNodeMetadata(NId).getReductionState()) {
+    case NodeMetadata::Unprocessed: break;
+    case NodeMetadata::OptimallyReducible:
+      assert(OptimallyReducibleNodes.find(NId) !=
+             OptimallyReducibleNodes.end() &&
+             "Node not in optimally reducible set.");
+      OptimallyReducibleNodes.erase(NId);
+      break;
+    case NodeMetadata::ConservativelyAllocatable:
+      assert(ConservativelyAllocatableNodes.find(NId) !=
+             ConservativelyAllocatableNodes.end() &&
+             "Node not in conservatively allocatable set.");
+      ConservativelyAllocatableNodes.erase(NId);
+      break;
+    case NodeMetadata::NotProvablyAllocatable:
+      assert(NotProvablyAllocatableNodes.find(NId) !=
+             NotProvablyAllocatableNodes.end() &&
+             "Node not in not-provably-allocatable set.");
+      NotProvablyAllocatableNodes.erase(NId);
+      break;
+    }
+  }
+
+  void moveToOptimallyReducibleNodes(NodeId NId) {
+    removeFromCurrentSet(NId);
+    OptimallyReducibleNodes.insert(NId);
+    G.getNodeMetadata(NId).setReductionState(
+      NodeMetadata::OptimallyReducible);
+  }
+
+  void moveToConservativelyAllocatableNodes(NodeId NId) {
+    removeFromCurrentSet(NId);
+    ConservativelyAllocatableNodes.insert(NId);
+    G.getNodeMetadata(NId).setReductionState(
+      NodeMetadata::ConservativelyAllocatable);
+  }
+
+  void moveToNotProvablyAllocatableNodes(NodeId NId) {
+    removeFromCurrentSet(NId);
+    NotProvablyAllocatableNodes.insert(NId);
+    G.getNodeMetadata(NId).setReductionState(
+      NodeMetadata::NotProvablyAllocatable);
+  }
+
+  void setup() {
+    // Set up worklists.
+    for (auto NId : G.nodeIds()) {
+      if (G.getNodeDegree(NId) < 3)
+        moveToOptimallyReducibleNodes(NId);
+      else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
+        moveToConservativelyAllocatableNodes(NId);
+      else
+        moveToNotProvablyAllocatableNodes(NId);
+    }
+  }
+
+  // Compute a reduction order for the graph by iteratively applying PBQP
+  // reduction rules. Locally optimal rules are applied whenever possible (R0,
+  // R1, R2). If no locally-optimal rules apply then any conservatively
+  // allocatable node is reduced. Finally, if no conservatively allocatable
+  // node exists then the node with the lowest spill-cost:degree ratio is
+  // selected.
+  std::vector<GraphBase::NodeId> reduce() {
+    assert(!G.empty() && "Cannot reduce empty graph.");
+
+    using NodeId = GraphBase::NodeId;
+    std::vector<NodeId> NodeStack;
+
+    // Consume worklists.
+    while (true) {
+      if (!OptimallyReducibleNodes.empty()) {
+        NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
+        NodeId NId = *NItr;
+        OptimallyReducibleNodes.erase(NItr);
+        NodeStack.push_back(NId);
+        switch (G.getNodeDegree(NId)) {
+        case 0:
+          break;
+        case 1:
+          applyR1(G, NId);
+          break;
+        case 2:
+          applyR2(G, NId);
+          break;
+        default: llvm_unreachable("Not an optimally reducible node.");
+        }
+      } else if (!ConservativelyAllocatableNodes.empty()) {
+        // Conservatively allocatable nodes will never spill. For now just
+        // take the first node in the set and push it on the stack. When we
+        // start optimizing more heavily for register preferencing, it may
+        // would be better to push nodes with lower 'expected' or worst-case
+        // register costs first (since early nodes are the most
+        // constrained).
+        NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
+        NodeId NId = *NItr;
+        ConservativelyAllocatableNodes.erase(NItr);
+        NodeStack.push_back(NId);
+        G.disconnectAllNeighborsFromNode(NId);
+      } else if (!NotProvablyAllocatableNodes.empty()) {
+        NodeSet::iterator NItr =
+          std::min_element(NotProvablyAllocatableNodes.begin(),
+                           NotProvablyAllocatableNodes.end(),
+                           SpillCostComparator(G));
+        NodeId NId = *NItr;
+        NotProvablyAllocatableNodes.erase(NItr);
+        NodeStack.push_back(NId);
+        G.disconnectAllNeighborsFromNode(NId);
+      } else
+        break;
+    }
+
+    return NodeStack;
+  }
+
+  class SpillCostComparator {
+  public:
+    SpillCostComparator(const Graph& G) : G(G) {}
+
+    bool operator()(NodeId N1Id, NodeId N2Id) {
+      PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
+      PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
+      if (N1SC == N2SC)
+        return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
+      return N1SC < N2SC;
+    }
+
+  private:
+    const Graph& G;
+  };
+
+  Graph& G;
+  using NodeSet = std::set<NodeId>;
+  NodeSet OptimallyReducibleNodes;
+  NodeSet ConservativelyAllocatableNodes;
+  NodeSet NotProvablyAllocatableNodes;
+};
+
+class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
+private:
+  using BaseT = PBQP::Graph<RegAllocSolverImpl>;
+
+public:
+  PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
+
+  /// @brief Dump this graph to dbgs().
+  void dump() const;
+
+  /// @brief Dump this graph to an output stream.
+  /// @param OS Output stream to print on.
+  void dump(raw_ostream &OS) const;
+
+  /// @brief Print a representation of this graph in DOT format.
+  /// @param OS Output stream to print on.
+  void printDot(raw_ostream &OS) const;
+};
+
+inline Solution solve(PBQPRAGraph& G) {
+  if (G.empty())
+    return Solution();
+  RegAllocSolverImpl RegAllocSolver(G);
+  return RegAllocSolver.solve();
+}
+
+} // end namespace RegAlloc
+} // end namespace PBQP
+
+/// @brief Create a PBQP register allocator instance.
+FunctionPass *
+createPBQPRegisterAllocator(char *customPassID = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGALLOCPBQP_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegAllocRegistry.h b/linux-x64/clang/include/llvm/CodeGen/RegAllocRegistry.h
new file mode 100644
index 0000000..481747d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegAllocRegistry.h
@@ -0,0 +1,66 @@
+//===- llvm/CodeGen/RegAllocRegistry.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for register allocator function
+// pass registry (RegisterRegAlloc).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCREGISTRY_H
+#define LLVM_CODEGEN_REGALLOCREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+
+namespace llvm {
+
+class FunctionPass;
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterRegAlloc class - Track the registration of register allocators.
+///
+//===----------------------------------------------------------------------===//
+class RegisterRegAlloc : public MachinePassRegistryNode {
+public:
+  using FunctionPassCtor = FunctionPass *(*)();
+
+  static MachinePassRegistry Registry;
+
+  RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
+      : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+    Registry.Add(this);
+  }
+
+  ~RegisterRegAlloc() { Registry.Remove(this); }
+
+  // Accessors.
+  RegisterRegAlloc *getNext() const {
+    return (RegisterRegAlloc *)MachinePassRegistryNode::getNext();
+  }
+
+  static RegisterRegAlloc *getList() {
+    return (RegisterRegAlloc *)Registry.getList();
+  }
+
+  static FunctionPassCtor getDefault() {
+    return (FunctionPassCtor)Registry.getDefault();
+  }
+
+  static void setDefault(FunctionPassCtor C) {
+    Registry.setDefault((MachinePassCtor)C);
+  }
+
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGALLOCREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterClassInfo.h b/linux-x64/clang/include/llvm/CodeGen/RegisterClassInfo.h
new file mode 100644
index 0000000..97113c5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterClassInfo.h
@@ -0,0 +1,150 @@
+//===- RegisterClassInfo.h - Dynamic Register Class Info --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RegisterClassInfo class which provides dynamic
+// information about target register classes. Callee saved and reserved
+// registers depends on calling conventions and other dynamic information, so
+// some things cannot be determined statically.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERCLASSINFO_H
+#define LLVM_CODEGEN_REGISTERCLASSINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class RegisterClassInfo {
+  struct RCInfo {
+    unsigned Tag = 0;
+    unsigned NumRegs = 0;
+    bool ProperSubClass = false;
+    uint8_t MinCost = 0;
+    uint16_t LastCostChange = 0;
+    std::unique_ptr<MCPhysReg[]> Order;
+
+    RCInfo() = default;
+
+    operator ArrayRef<MCPhysReg>() const {
+      return makeArrayRef(Order.get(), NumRegs);
+    }
+  };
+
+  // Brief cached information for each register class.
+  std::unique_ptr<RCInfo[]> RegClass;
+
+  // Tag changes whenever cached information needs to be recomputed. An RCInfo
+  // entry is valid when its tag matches.
+  unsigned Tag = 0;
+
+  const MachineFunction *MF = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+
+  // Callee saved registers of last MF. Assumed to be valid until the next
+  // runOnFunction() call.
+  // Used only to determine if an update was made to CalleeSavedAliases.
+  const MCPhysReg *CalleeSavedRegs = nullptr;
+
+  // Map register alias to the callee saved Register.
+  SmallVector<MCPhysReg, 4> CalleeSavedAliases;
+
+  // Reserved registers in the current MF.
+  BitVector Reserved;
+
+  std::unique_ptr<unsigned[]> PSetLimits;
+
+  // Compute all information about RC.
+  void compute(const TargetRegisterClass *RC) const;
+
+  // Return an up-to-date RCInfo for RC.
+  const RCInfo &get(const TargetRegisterClass *RC) const {
+    const RCInfo &RCI = RegClass[RC->getID()];
+    if (Tag != RCI.Tag)
+      compute(RC);
+    return RCI;
+  }
+
+public:
+  RegisterClassInfo();
+
+  /// runOnFunction - Prepare to answer questions about MF. This must be called
+  /// before any other methods are used.
+  void runOnMachineFunction(const MachineFunction &MF);
+
+  /// getNumAllocatableRegs - Returns the number of actually allocatable
+  /// registers in RC in the current function.
+  unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const {
+    return get(RC).NumRegs;
+  }
+
+  /// getOrder - Returns the preferred allocation order for RC. The order
+  /// contains no reserved registers, and registers that alias callee saved
+  /// registers come last.
+  ArrayRef<MCPhysReg> getOrder(const TargetRegisterClass *RC) const {
+    return get(RC);
+  }
+
+  /// isProperSubClass - Returns true if RC has a legal super-class with more
+  /// allocatable registers.
+  ///
+  /// Register classes like GR32_NOSP are not proper sub-classes because %esp
+  /// is not allocatable.  Similarly, tGPR is not a proper sub-class in Thumb
+  /// mode because the GPR super-class is not legal.
+  bool isProperSubClass(const TargetRegisterClass *RC) const {
+    return get(RC).ProperSubClass;
+  }
+
+  /// getLastCalleeSavedAlias - Returns the last callee saved register that
+  /// overlaps PhysReg, or 0 if Reg doesn't overlap a CalleeSavedAliases.
+  unsigned getLastCalleeSavedAlias(unsigned PhysReg) const {
+    assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
+    if (PhysReg < CalleeSavedAliases.size())
+      return CalleeSavedAliases[PhysReg];
+    return 0;
+  }
+
+  /// Get the minimum register cost in RC's allocation order.
+  /// This is the smallest value returned by TRI->getCostPerUse(Reg) for all
+  /// the registers in getOrder(RC).
+  unsigned getMinCost(const TargetRegisterClass *RC) {
+    return get(RC).MinCost;
+  }
+
+  /// Get the position of the last cost change in getOrder(RC).
+  ///
+  /// All registers in getOrder(RC).slice(getLastCostChange(RC)) will have the
+  /// same cost according to TRI->getCostPerUse().
+  unsigned getLastCostChange(const TargetRegisterClass *RC) {
+    return get(RC).LastCostChange;
+  }
+
+  /// Get the register unit limit for the given pressure set index.
+  ///
+  /// RegisterClassInfo adjusts this limit for reserved registers.
+  unsigned getRegPressureSetLimit(unsigned Idx) const {
+    if (!PSetLimits[Idx])
+      PSetLimits[Idx] = computePSetLimit(Idx);
+    return PSetLimits[Idx];
+  }
+
+protected:
+  unsigned computePSetLimit(unsigned Idx) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGISTERCLASSINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h b/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
new file mode 100644
index 0000000..2b14b78
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
@@ -0,0 +1,576 @@
+//===- RegisterPressure.h - Dynamic Register Pressure -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegisterPressure class which can be used to track
+// MachineInstr level register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
+#define LLVM_CODEGEN_REGISTERPRESSURE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <vector>
+
+namespace llvm {
+
+class LiveIntervals;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class RegisterClassInfo;
+
+struct RegisterMaskPair {
+  unsigned RegUnit; ///< Virtual register or register unit.
+  LaneBitmask LaneMask;
+
+  RegisterMaskPair(unsigned RegUnit, LaneBitmask LaneMask)
+      : RegUnit(RegUnit), LaneMask(LaneMask) {}
+};
+
+/// Base class for register pressure results.
+struct RegisterPressure {
+  /// Map of max reg pressure indexed by pressure set ID, not class ID.
+  std::vector<unsigned> MaxSetPressure;
+
+  /// List of live in virtual registers or physical register units.
+  SmallVector<RegisterMaskPair,8> LiveInRegs;
+  SmallVector<RegisterMaskPair,8> LiveOutRegs;
+
+  void dump(const TargetRegisterInfo *TRI) const;
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopIdx and BottomIdx.  During pressure computation, the maximum pressure per
+/// register pressure set is increased. Once pressure within a region is fully
+/// computed, the live-in and live-out sets are recorded.
+///
+/// This is preferable to RegionPressure when LiveIntervals are available,
+/// because delimiting regions by SlotIndex is more robust and convenient than
+/// holding block iterators. The block contents can change without invalidating
+/// the pressure result.
+struct IntervalPressure : RegisterPressure {
+  /// Record the boundary of the region being tracked.
+  SlotIndex TopIdx;
+  SlotIndex BottomIdx;
+
+  void reset();
+
+  void openTop(SlotIndex NextTop);
+
+  void openBottom(SlotIndex PrevBottom);
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
+/// use when LiveIntervals are unavailable.
+struct RegionPressure : RegisterPressure {
+  /// Record the boundary of the region being tracked.
+  MachineBasicBlock::const_iterator TopPos;
+  MachineBasicBlock::const_iterator BottomPos;
+
+  void reset();
+
+  void openTop(MachineBasicBlock::const_iterator PrevTop);
+
+  void openBottom(MachineBasicBlock::const_iterator PrevBottom);
+};
+
+/// Capture a change in pressure for a single pressure set. UnitInc may be
+/// expressed in terms of upward or downward pressure depending on the client
+/// and will be dynamically adjusted for current liveness.
+///
+/// Pressure increments are tiny, typically 1-2 units, and this is only for
+/// heuristics, so we don't check UnitInc overflow. Instead, we may have a
+/// higher level assert that pressure is consistent within a region. We also
+/// effectively ignore dead defs which don't affect heuristics much.
+class PressureChange {
+  uint16_t PSetID = 0; // ID+1. 0=Invalid.
+  int16_t UnitInc = 0;
+
+public:
+  PressureChange() = default;
+  PressureChange(unsigned id): PSetID(id + 1) {
+    assert(id < std::numeric_limits<uint16_t>::max() && "PSetID overflow.");
+  }
+
+  bool isValid() const { return PSetID > 0; }
+
+  unsigned getPSet() const {
+    assert(isValid() && "invalid PressureChange");
+    return PSetID - 1;
+  }
+
+  // If PSetID is invalid, return UINT16_MAX to give it lowest priority.
+  unsigned getPSetOrMax() const {
+    return (PSetID - 1) & std::numeric_limits<uint16_t>::max();
+  }
+
+  int getUnitInc() const { return UnitInc; }
+
+  void setUnitInc(int Inc) { UnitInc = Inc; }
+
+  bool operator==(const PressureChange &RHS) const {
+    return PSetID == RHS.PSetID && UnitInc == RHS.UnitInc;
+  }
+};
+
+template <> struct isPodLike<PressureChange> {
+   static const bool value = true;
+};
+
+/// List of PressureChanges in order of increasing, unique PSetID.
+///
+/// Use a small fixed number, because we can fit more PressureChanges in an
+/// empty SmallVector than ever need to be tracked per register class. If more
+/// PSets are affected, then we only track the most constrained.
+class PressureDiff {
+  // The initial design was for MaxPSets=4, but that requires PSet partitions,
+  // which are not yet implemented. (PSet partitions are equivalent PSets given
+  // the register classes actually in use within the scheduling region.)
+  enum { MaxPSets = 16 };
+
+  PressureChange PressureChanges[MaxPSets];
+
+  using iterator = PressureChange *;
+
+  iterator nonconst_begin() { return &PressureChanges[0]; }
+  iterator nonconst_end() { return &PressureChanges[MaxPSets]; }
+
+public:
+  using const_iterator = const PressureChange *;
+
+  const_iterator begin() const { return &PressureChanges[0]; }
+  const_iterator end() const { return &PressureChanges[MaxPSets]; }
+
+  void addPressureChange(unsigned RegUnit, bool IsDec,
+                         const MachineRegisterInfo *MRI);
+
+  void dump(const TargetRegisterInfo &TRI) const;
+};
+
+/// List of registers defined and used by a machine instruction.
+class RegisterOperands {
+public:
+  /// List of virtual registers and register units read by the instruction.
+  SmallVector<RegisterMaskPair, 8> Uses;
+  /// \brief List of virtual registers and register units defined by the
+  /// instruction which are not dead.
+  SmallVector<RegisterMaskPair, 8> Defs;
+  /// \brief List of virtual registers and register units defined by the
+  /// instruction but dead.
+  SmallVector<RegisterMaskPair, 8> DeadDefs;
+
+  /// Analyze the given instruction \p MI and fill in the Uses, Defs and
+  /// DeadDefs list based on the MachineOperand flags.
+  void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI,
+               const MachineRegisterInfo &MRI, bool TrackLaneMasks,
+               bool IgnoreDead);
+
+  /// Use liveness information to find dead defs not marked with a dead flag
+  /// and move them to the DeadDefs vector.
+  void detectDeadDefs(const MachineInstr &MI, const LiveIntervals &LIS);
+
+  /// Use liveness information to find out which uses/defs are partially
+  /// undefined/dead and adjust the RegisterMaskPairs accordingly.
+  /// If \p AddFlagsMI is given then missing read-undef and dead flags will be
+  /// added to the instruction.
+  void adjustLaneLiveness(const LiveIntervals &LIS,
+                          const MachineRegisterInfo &MRI, SlotIndex Pos,
+                          MachineInstr *AddFlagsMI = nullptr);
+};
+
+/// Array of PressureDiffs.
+class PressureDiffs {
+  PressureDiff *PDiffArray = nullptr;
+  unsigned Size = 0;
+  unsigned Max = 0;
+
+public:
+  PressureDiffs() = default;
+  ~PressureDiffs() { free(PDiffArray); }
+
+  void clear() { Size = 0; }
+
+  void init(unsigned N);
+
+  PressureDiff &operator[](unsigned Idx) {
+    assert(Idx < Size && "PressureDiff index out of bounds");
+    return PDiffArray[Idx];
+  }
+  const PressureDiff &operator[](unsigned Idx) const {
+    return const_cast<PressureDiffs*>(this)->operator[](Idx);
+  }
+
+  /// \brief Record pressure difference induced by the given operand list to
+  /// node with index \p Idx.
+  void addInstruction(unsigned Idx, const RegisterOperands &RegOpers,
+                      const MachineRegisterInfo &MRI);
+};
+
+/// Store the effects of a change in pressure on things that MI scheduler cares
+/// about.
+///
+/// Excess records the value of the largest difference in register units beyond
+/// the target's pressure limits across the affected pressure sets, where
+/// largest is defined as the absolute value of the difference. Negative
+/// ExcessUnits indicates a reduction in pressure that had already exceeded the
+/// target's limits.
+///
+/// CriticalMax records the largest increase in the tracker's max pressure that
+/// exceeds the critical limit for some pressure set determined by the client.
+///
+/// CurrentMax records the largest increase in the tracker's max pressure that
+/// exceeds the current limit for some pressure set determined by the client.
+struct RegPressureDelta {
+  PressureChange Excess;
+  PressureChange CriticalMax;
+  PressureChange CurrentMax;
+
+  RegPressureDelta() = default;
+
+  bool operator==(const RegPressureDelta &RHS) const {
+    return Excess == RHS.Excess && CriticalMax == RHS.CriticalMax
+      && CurrentMax == RHS.CurrentMax;
+  }
+  bool operator!=(const RegPressureDelta &RHS) const {
+    return !operator==(RHS);
+  }
+};
+
+/// A set of live virtual registers and physical register units.
+///
+/// This is a wrapper around a SparseSet which deals with mapping register unit
+/// and virtual register indexes to an index usable by the sparse set.
+class LiveRegSet {
+private:
+  struct IndexMaskPair {
+    unsigned Index;
+    LaneBitmask LaneMask;
+
+    IndexMaskPair(unsigned Index, LaneBitmask LaneMask)
+        : Index(Index), LaneMask(LaneMask) {}
+
+    unsigned getSparseSetIndex() const {
+      return Index;
+    }
+  };
+
+  using RegSet = SparseSet<IndexMaskPair>;
+  RegSet Regs;
+  unsigned NumRegUnits;
+
+  unsigned getSparseIndexFromReg(unsigned Reg) const {
+    if (TargetRegisterInfo::isVirtualRegister(Reg))
+      return TargetRegisterInfo::virtReg2Index(Reg) + NumRegUnits;
+    assert(Reg < NumRegUnits);
+    return Reg;
+  }
+
+  unsigned getRegFromSparseIndex(unsigned SparseIndex) const {
+    if (SparseIndex >= NumRegUnits)
+      return TargetRegisterInfo::index2VirtReg(SparseIndex-NumRegUnits);
+    return SparseIndex;
+  }
+
+public:
+  void clear();
+  void init(const MachineRegisterInfo &MRI);
+
+  LaneBitmask contains(unsigned Reg) const {
+    unsigned SparseIndex = getSparseIndexFromReg(Reg);
+    RegSet::const_iterator I = Regs.find(SparseIndex);
+    if (I == Regs.end())
+      return LaneBitmask::getNone();
+    return I->LaneMask;
+  }
+
+  /// Mark the \p Pair.LaneMask lanes of \p Pair.Reg as live.
+  /// Returns the previously live lanes of \p Pair.Reg.
+  LaneBitmask insert(RegisterMaskPair Pair) {
+    unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
+    auto InsertRes = Regs.insert(IndexMaskPair(SparseIndex, Pair.LaneMask));
+    if (!InsertRes.second) {
+      LaneBitmask PrevMask = InsertRes.first->LaneMask;
+      InsertRes.first->LaneMask |= Pair.LaneMask;
+      return PrevMask;
+    }
+    return LaneBitmask::getNone();
+  }
+
+  /// Clears the \p Pair.LaneMask lanes of \p Pair.Reg (mark them as dead).
+  /// Returns the previously live lanes of \p Pair.Reg.
+  LaneBitmask erase(RegisterMaskPair Pair) {
+    unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
+    RegSet::iterator I = Regs.find(SparseIndex);
+    if (I == Regs.end())
+      return LaneBitmask::getNone();
+    LaneBitmask PrevMask = I->LaneMask;
+    I->LaneMask &= ~Pair.LaneMask;
+    return PrevMask;
+  }
+
+  size_t size() const {
+    return Regs.size();
+  }
+
+  template<typename ContainerT>
+  void appendTo(ContainerT &To) const {
+    for (const IndexMaskPair &P : Regs) {
+      unsigned Reg = getRegFromSparseIndex(P.Index);
+      if (P.LaneMask.any())
+        To.push_back(RegisterMaskPair(Reg, P.LaneMask));
+    }
+  }
+};
+
+/// Track the current register pressure at some position in the instruction
+/// stream, and remember the high water mark within the region traversed. This
+/// does not automatically consider live-through ranges. The client may
+/// independently adjust for global liveness.
+///
+/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
+/// be tracked across a larger region by storing a RegisterPressure result at
+/// each block boundary and explicitly adjusting pressure to account for block
+/// live-in and live-out register sets.
+///
+/// RegPressureTracker holds a reference to a RegisterPressure result that it
+/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
+/// is invalid until it reaches the end of the block or closeRegion() is
+/// explicitly called. Similarly, P.TopIdx is invalid during upward
+/// tracking. Changing direction has the side effect of closing region, and
+/// traversing past TopIdx or BottomIdx reopens it.
+class RegPressureTracker {
+  const MachineFunction *MF = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+  const RegisterClassInfo *RCI = nullptr;
+  const MachineRegisterInfo *MRI;
+  const LiveIntervals *LIS = nullptr;
+
+  /// We currently only allow pressure tracking within a block.
+  const MachineBasicBlock *MBB = nullptr;
+
+  /// Track the max pressure within the region traversed so far.
+  RegisterPressure &P;
+
+  /// Run in two modes dependending on whether constructed with IntervalPressure
+  /// or RegisterPressure. If requireIntervals is false, LIS are ignored.
+  bool RequireIntervals;
+
+  /// True if UntiedDefs will be populated.
+  bool TrackUntiedDefs = false;
+
+  /// True if lanemasks should be tracked.
+  bool TrackLaneMasks = false;
+
+  /// Register pressure corresponds to liveness before this instruction
+  /// iterator. It may point to the end of the block or a DebugValue rather than
+  /// an instruction.
+  MachineBasicBlock::const_iterator CurrPos;
+
+  /// Pressure map indexed by pressure set ID, not class ID.
+  std::vector<unsigned> CurrSetPressure;
+
+  /// Set of live registers.
+  LiveRegSet LiveRegs;
+
+  /// Set of vreg defs that start a live range.
+  SparseSet<unsigned, VirtReg2IndexFunctor> UntiedDefs;
+  /// Live-through pressure.
+  std::vector<unsigned> LiveThruPressure;
+
+public:
+  RegPressureTracker(IntervalPressure &rp) : P(rp), RequireIntervals(true) {}
+  RegPressureTracker(RegionPressure &rp) : P(rp), RequireIntervals(false) {}
+
+  void reset();
+
+  void init(const MachineFunction *mf, const RegisterClassInfo *rci,
+            const LiveIntervals *lis, const MachineBasicBlock *mbb,
+            MachineBasicBlock::const_iterator pos,
+            bool TrackLaneMasks, bool TrackUntiedDefs);
+
+  /// Force liveness of virtual registers or physical register
+  /// units. Particularly useful to initialize the livein/out state of the
+  /// tracker before the first call to advance/recede.
+  void addLiveRegs(ArrayRef<RegisterMaskPair> Regs);
+
+  /// Get the MI position corresponding to this register pressure.
+  MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
+
+  // Reset the MI position corresponding to the register pressure. This allows
+  // schedulers to move instructions above the RegPressureTracker's
+  // CurrPos. Since the pressure is computed before CurrPos, the iterator
+  // position changes while pressure does not.
+  void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
+
+  /// Recede across the previous instruction.
+  void recede(SmallVectorImpl<RegisterMaskPair> *LiveUses = nullptr);
+
+  /// Recede across the previous instruction.
+  /// This "low-level" variant assumes that recedeSkipDebugValues() was
+  /// called previously and takes precomputed RegisterOperands for the
+  /// instruction.
+  void recede(const RegisterOperands &RegOpers,
+              SmallVectorImpl<RegisterMaskPair> *LiveUses = nullptr);
+
+  /// Recede until we find an instruction which is not a DebugValue.
+  void recedeSkipDebugValues();
+
+  /// Advance across the current instruction.
+  void advance();
+
+  /// Advance across the current instruction.
+  /// This is a "low-level" variant of advance() which takes precomputed
+  /// RegisterOperands of the instruction.
+  void advance(const RegisterOperands &RegOpers);
+
+  /// Finalize the region boundaries and recored live ins and live outs.
+  void closeRegion();
+
+  /// Initialize the LiveThru pressure set based on the untied defs found in
+  /// RPTracker.
+  void initLiveThru(const RegPressureTracker &RPTracker);
+
+  /// Copy an existing live thru pressure result.
+  void initLiveThru(ArrayRef<unsigned> PressureSet) {
+    LiveThruPressure.assign(PressureSet.begin(), PressureSet.end());
+  }
+
+  ArrayRef<unsigned> getLiveThru() const { return LiveThruPressure; }
+
+  /// Get the resulting register pressure over the traversed region.
+  /// This result is complete if closeRegion() was explicitly invoked.
+  RegisterPressure &getPressure() { return P; }
+  const RegisterPressure &getPressure() const { return P; }
+
+  /// Get the register set pressure at the current position, which may be less
+  /// than the pressure across the traversed region.
+  const std::vector<unsigned> &getRegSetPressureAtPos() const {
+    return CurrSetPressure;
+  }
+
+  bool isTopClosed() const;
+  bool isBottomClosed() const;
+
+  void closeTop();
+  void closeBottom();
+
+  /// Consider the pressure increase caused by traversing this instruction
+  /// bottom-up. Find the pressure set with the most change beyond its pressure
+  /// limit based on the tracker's current pressure, and record the number of
+  /// excess register units of that pressure set introduced by this instruction.
+  void getMaxUpwardPressureDelta(const MachineInstr *MI,
+                                 PressureDiff *PDiff,
+                                 RegPressureDelta &Delta,
+                                 ArrayRef<PressureChange> CriticalPSets,
+                                 ArrayRef<unsigned> MaxPressureLimit);
+
+  void getUpwardPressureDelta(const MachineInstr *MI,
+                              /*const*/ PressureDiff &PDiff,
+                              RegPressureDelta &Delta,
+                              ArrayRef<PressureChange> CriticalPSets,
+                              ArrayRef<unsigned> MaxPressureLimit) const;
+
+  /// Consider the pressure increase caused by traversing this instruction
+  /// top-down. Find the pressure set with the most change beyond its pressure
+  /// limit based on the tracker's current pressure, and record the number of
+  /// excess register units of that pressure set introduced by this instruction.
+  void getMaxDownwardPressureDelta(const MachineInstr *MI,
+                                   RegPressureDelta &Delta,
+                                   ArrayRef<PressureChange> CriticalPSets,
+                                   ArrayRef<unsigned> MaxPressureLimit);
+
+  /// Find the pressure set with the most change beyond its pressure limit after
+  /// traversing this instruction either upward or downward depending on the
+  /// closed end of the current region.
+  void getMaxPressureDelta(const MachineInstr *MI,
+                           RegPressureDelta &Delta,
+                           ArrayRef<PressureChange> CriticalPSets,
+                           ArrayRef<unsigned> MaxPressureLimit) {
+    if (isTopClosed())
+      return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
+                                         MaxPressureLimit);
+
+    assert(isBottomClosed() && "Uninitialized pressure tracker");
+    return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
+                                     MaxPressureLimit);
+  }
+
+  /// Get the pressure of each PSet after traversing this instruction bottom-up.
+  void getUpwardPressure(const MachineInstr *MI,
+                         std::vector<unsigned> &PressureResult,
+                         std::vector<unsigned> &MaxPressureResult);
+
+  /// Get the pressure of each PSet after traversing this instruction top-down.
+  void getDownwardPressure(const MachineInstr *MI,
+                           std::vector<unsigned> &PressureResult,
+                           std::vector<unsigned> &MaxPressureResult);
+
+  void getPressureAfterInst(const MachineInstr *MI,
+                            std::vector<unsigned> &PressureResult,
+                            std::vector<unsigned> &MaxPressureResult) {
+    if (isTopClosed())
+      return getUpwardPressure(MI, PressureResult, MaxPressureResult);
+
+    assert(isBottomClosed() && "Uninitialized pressure tracker");
+    return getDownwardPressure(MI, PressureResult, MaxPressureResult);
+  }
+
+  bool hasUntiedDef(unsigned VirtReg) const {
+    return UntiedDefs.count(VirtReg);
+  }
+
+  void dump() const;
+
+protected:
+  /// Add Reg to the live out set and increase max pressure.
+  void discoverLiveOut(RegisterMaskPair Pair);
+  /// Add Reg to the live in set and increase max pressure.
+  void discoverLiveIn(RegisterMaskPair Pair);
+
+  /// \brief Get the SlotIndex for the first nondebug instruction including or
+  /// after the current position.
+  SlotIndex getCurrSlot() const;
+
+  void increaseRegPressure(unsigned RegUnit, LaneBitmask PreviousMask,
+                           LaneBitmask NewMask);
+  void decreaseRegPressure(unsigned RegUnit, LaneBitmask PreviousMask,
+                           LaneBitmask NewMask);
+
+  void bumpDeadDefs(ArrayRef<RegisterMaskPair> DeadDefs);
+
+  void bumpUpwardPressure(const MachineInstr *MI);
+  void bumpDownwardPressure(const MachineInstr *MI);
+
+  void discoverLiveInOrOut(RegisterMaskPair Pair,
+                           SmallVectorImpl<RegisterMaskPair> &LiveInOrOut);
+
+  LaneBitmask getLastUsedLanes(unsigned RegUnit, SlotIndex Pos) const;
+  LaneBitmask getLiveLanesAt(unsigned RegUnit, SlotIndex Pos) const;
+  LaneBitmask getLiveThroughAt(unsigned RegUnit, SlotIndex Pos) const;
+};
+
+void dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
+                        const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGISTERPRESSURE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h b/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
new file mode 100644
index 0000000..489c72b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
@@ -0,0 +1,231 @@
+//===- RegisterScavenging.h - Machine register scavenging -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file declares the machine register scavenger class. It can provide
+/// information such as unused register at any point in a machine basic block.
+/// It also provides a mechanism to make registers available by evicting them
+/// to spill slots.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERSCAVENGING_H
+#define LLVM_CODEGEN_REGISTERSCAVENGING_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+
+namespace llvm {
+
+class MachineInstr;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+class RegScavenger {
+  const TargetRegisterInfo *TRI;
+  const TargetInstrInfo *TII;
+  MachineRegisterInfo* MRI;
+  MachineBasicBlock *MBB = nullptr;
+  MachineBasicBlock::iterator MBBI;
+  unsigned NumRegUnits = 0;
+
+  /// True if RegScavenger is currently tracking the liveness of registers.
+  bool Tracking = false;
+
+  /// Information on scavenged registers (held in a spill slot).
+  struct ScavengedInfo {
+    ScavengedInfo(int FI = -1) : FrameIndex(FI) {}
+
+    /// A spill slot used for scavenging a register post register allocation.
+    int FrameIndex;
+
+    /// If non-zero, the specific register is currently being
+    /// scavenged. That is, it is spilled to this scavenging stack slot.
+    unsigned Reg = 0;
+
+    /// The instruction that restores the scavenged register from stack.
+    const MachineInstr *Restore = nullptr;
+  };
+
+  /// A vector of information on scavenged registers.
+  SmallVector<ScavengedInfo, 2> Scavenged;
+
+  LiveRegUnits LiveUnits;
+
+  // These BitVectors are only used internally to forward(). They are members
+  // to avoid frequent reallocations.
+  BitVector KillRegUnits, DefRegUnits;
+  BitVector TmpRegUnits;
+
+public:
+  RegScavenger() = default;
+
+  /// Start tracking liveness from the begin of basic block \p MBB.
+  void enterBasicBlock(MachineBasicBlock &MBB);
+
+  /// Start tracking liveness from the end of basic block \p MBB.
+  /// Use backward() to move towards the beginning of the block. This is
+  /// preferred to enterBasicBlock() and forward() because it does not depend
+  /// on the presence of kill flags.
+  void enterBasicBlockEnd(MachineBasicBlock &MBB);
+
+  /// Move the internal MBB iterator and update register states.
+  void forward();
+
+  /// Move the internal MBB iterator and update register states until
+  /// it has processed the specific iterator.
+  void forward(MachineBasicBlock::iterator I) {
+    if (!Tracking && MBB->begin() != I) forward();
+    while (MBBI != I) forward();
+  }
+
+  /// Invert the behavior of forward() on the current instruction (undo the
+  /// changes to the available registers made by forward()).
+  void unprocess();
+
+  /// Unprocess instructions until you reach the provided iterator.
+  void unprocess(MachineBasicBlock::iterator I) {
+    while (MBBI != I) unprocess();
+  }
+
+  /// Update internal register state and move MBB iterator backwards.
+  /// Contrary to unprocess() this method gives precise results even in the
+  /// absence of kill flags.
+  void backward();
+
+  /// Call backward() as long as the internal iterator does not point to \p I.
+  void backward(MachineBasicBlock::iterator I) {
+    while (MBBI != I)
+      backward();
+  }
+
+  /// Move the internal MBB iterator but do not update register states.
+  void skipTo(MachineBasicBlock::iterator I) {
+    if (I == MachineBasicBlock::iterator(nullptr))
+      Tracking = false;
+    MBBI = I;
+  }
+
+  MachineBasicBlock::iterator getCurrentPosition() const { return MBBI; }
+
+  /// Return if a specific register is currently used.
+  bool isRegUsed(unsigned Reg, bool includeReserved = true) const;
+
+  /// Return all available registers in the register class in Mask.
+  BitVector getRegsAvailable(const TargetRegisterClass *RC);
+
+  /// Find an unused register of the specified register class.
+  /// Return 0 if none is found.
+  unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
+
+  /// Add a scavenging frame index.
+  void addScavengingFrameIndex(int FI) {
+    Scavenged.push_back(ScavengedInfo(FI));
+  }
+
+  /// Query whether a frame index is a scavenging frame index.
+  bool isScavengingFrameIndex(int FI) const {
+    for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
+         IE = Scavenged.end(); I != IE; ++I)
+      if (I->FrameIndex == FI)
+        return true;
+
+    return false;
+  }
+
+  /// Get an array of scavenging frame indices.
+  void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
+    for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
+         IE = Scavenged.end(); I != IE; ++I)
+      if (I->FrameIndex >= 0)
+        A.push_back(I->FrameIndex);
+  }
+
+  /// Make a register of the specific register class
+  /// available and do the appropriate bookkeeping. SPAdj is the stack
+  /// adjustment due to call frame, it's passed along to eliminateFrameIndex().
+  /// Returns the scavenged register.
+  /// This is deprecated as it depends on the quality of the kill flags being
+  /// present; Use scavengeRegisterBackwards() instead!
+  unsigned scavengeRegister(const TargetRegisterClass *RegClass,
+                            MachineBasicBlock::iterator I, int SPAdj);
+  unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
+    return scavengeRegister(RegClass, MBBI, SPAdj);
+  }
+
+  /// Make a register of the specific register class available from the current
+  /// position backwards to the place before \p To. If \p RestoreAfter is true
+  /// this includes the instruction following the current position.
+  /// SPAdj is the stack adjustment due to call frame, it's passed along to
+  /// eliminateFrameIndex().
+  /// Returns the scavenged register.
+  unsigned scavengeRegisterBackwards(const TargetRegisterClass &RC,
+                                     MachineBasicBlock::iterator To,
+                                     bool RestoreAfter, int SPAdj);
+
+  /// Tell the scavenger a register is used.
+  void setRegUsed(unsigned Reg, LaneBitmask LaneMask = LaneBitmask::getAll());
+
+private:
+  /// Returns true if a register is reserved. It is never "unused".
+  bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
+
+  /// setUsed / setUnused - Mark the state of one or a number of register units.
+  ///
+  void setUsed(const BitVector &RegUnits) {
+    LiveUnits.addUnits(RegUnits);
+  }
+  void setUnused(const BitVector &RegUnits) {
+    LiveUnits.removeUnits(RegUnits);
+  }
+
+  /// Processes the current instruction and fill the KillRegUnits and
+  /// DefRegUnits bit vectors.
+  void determineKillsAndDefs();
+
+  /// Add all Reg Units that Reg contains to BV.
+  void addRegUnits(BitVector &BV, unsigned Reg);
+
+  /// Remove all Reg Units that \p Reg contains from \p BV.
+  void removeRegUnits(BitVector &BV, unsigned Reg);
+
+  /// Return the candidate register that is unused for the longest after
+  /// StartMI. UseMI is set to the instruction where the search stopped.
+  ///
+  /// No more than InstrLimit instructions are inspected.
+  unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
+                           BitVector &Candidates,
+                           unsigned InstrLimit,
+                           MachineBasicBlock::iterator &UseMI);
+
+  /// Initialize RegisterScavenger.
+  void init(MachineBasicBlock &MBB);
+
+  /// Mark live-in registers of basic block as used.
+  void setLiveInsUsed(const MachineBasicBlock &MBB);
+
+  /// Spill a register after position \p After and reload it before position
+  /// \p UseMI.
+  ScavengedInfo &spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj,
+                       MachineBasicBlock::iterator After,
+                       MachineBasicBlock::iterator &UseMI);
+};
+
+/// Replaces all frame index virtual registers with physical registers. Uses the
+/// register scavenger to find an appropriate register to use.
+void scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGISTERSCAVENGING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h b/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
new file mode 100644
index 0000000..eabadd8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
@@ -0,0 +1,77 @@
+//==- RegisterUsageInfo.h - Register Usage Informartion Storage --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This pass is required to take advantage of the interprocedural register
+/// allocation infrastructure.
+///
+/// This pass is simple immutable pass which keeps RegMasks (calculated based on
+/// actual register allocation) for functions in a module and provides simple
+/// API to query this information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
+#define LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Pass.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class Function;
+class TargetMachine;
+
+class PhysicalRegisterUsageInfo : public ImmutablePass {
+  virtual void anchor();
+
+public:
+  static char ID;
+
+  PhysicalRegisterUsageInfo() : ImmutablePass(ID) {
+    PassRegistry &Registry = *PassRegistry::getPassRegistry();
+    initializePhysicalRegisterUsageInfoPass(Registry);
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  /// To set TargetMachine *, which is used to print
+  /// analysis when command line option -print-regusage is used.
+  void setTargetMachine(const TargetMachine *TM_) { TM = TM_; }
+
+  bool doInitialization(Module &M) override;
+
+  bool doFinalization(Module &M) override;
+
+  /// To store RegMask for given Function *.
+  void storeUpdateRegUsageInfo(const Function *FP,
+                               std::vector<uint32_t> RegMask);
+
+  /// To query stored RegMask for given Function *, it will return nullptr if
+  /// function is not known.
+  const std::vector<uint32_t> *getRegUsageInfo(const Function *FP);
+
+  void print(raw_ostream &OS, const Module *M = nullptr) const override;
+
+private:
+  /// A Dense map from Function * to RegMask.
+  /// In RegMask 0 means register used (clobbered) by function.
+  /// and 1 means content of register will be preserved around function call.
+  DenseMap<const Function *, std::vector<uint32_t>> RegMasks;
+
+  const TargetMachine *TM;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h b/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
new file mode 100644
index 0000000..03166cc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
@@ -0,0 +1,136 @@
+//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ResourcePriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using DFA state to
+// reduce the length of the critical path through the basic block
+// on VLIW platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
+#define LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
+
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
+
+namespace llvm {
+  class ResourcePriorityQueue;
+
+  /// Sorting functions for the Available queue.
+  struct resource_sort {
+    ResourcePriorityQueue *PQ;
+    explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
+
+    bool operator()(const SUnit* left, const SUnit* right) const;
+  };
+
+  class ResourcePriorityQueue : public SchedulingPriorityQueue {
+    /// SUnits - The SUnits for the current graph.
+    std::vector<SUnit> *SUnits;
+
+    /// NumNodesSolelyBlocking - This vector contains, for every node in the
+    /// Queue, the number of nodes that the node is the sole unscheduled
+    /// predecessor for.  This is used as a tie-breaker heuristic for better
+    /// mobility.
+    std::vector<unsigned> NumNodesSolelyBlocking;
+
+    /// Queue - The queue.
+    std::vector<SUnit*> Queue;
+
+    /// RegPressure - Tracking current reg pressure per register class.
+    ///
+    std::vector<unsigned> RegPressure;
+
+    /// RegLimit - Tracking the number of allocatable registers per register
+    /// class.
+    std::vector<unsigned> RegLimit;
+
+    resource_sort Picker;
+    const TargetRegisterInfo *TRI;
+    const TargetLowering *TLI;
+    const TargetInstrInfo *TII;
+    const InstrItineraryData* InstrItins;
+    /// ResourcesModel - Represents VLIW state.
+    /// Not limited to VLIW targets per say, but assumes
+    /// definition of DFA by a target.
+    std::unique_ptr<DFAPacketizer> ResourcesModel;
+
+    /// Resource model - packet/bundle model. Purely
+    /// internal at the time.
+    std::vector<SUnit*> Packet;
+
+    /// Heuristics for estimating register pressure.
+    unsigned ParallelLiveRanges;
+    int HorizontalVerticalBalance;
+
+  public:
+    ResourcePriorityQueue(SelectionDAGISel *IS);
+
+    bool isBottomUp() const override { return false; }
+
+    void initNodes(std::vector<SUnit> &sunits) override;
+
+    void addNode(const SUnit *SU) override {
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void updateNode(const SUnit *SU) override {}
+
+    void releaseState() override {
+      SUnits = nullptr;
+    }
+
+    unsigned getLatency(unsigned NodeNum) const {
+      assert(NodeNum < (*SUnits).size());
+      return (*SUnits)[NodeNum].getHeight();
+    }
+
+    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+      assert(NodeNum < NumNodesSolelyBlocking.size());
+      return NumNodesSolelyBlocking[NodeNum];
+    }
+
+    /// Single cost function reflecting benefit of scheduling SU
+    /// in the current cycle.
+    int SUSchedulingCost (SUnit *SU);
+
+    /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
+    ///
+    void initNumRegDefsLeft(SUnit *SU);
+    void updateNumRegDefsLeft(SUnit *SU);
+    int regPressureDelta(SUnit *SU, bool RawPressure = false);
+    int rawRegPressureDelta (SUnit *SU, unsigned RCId);
+
+    bool empty() const override { return Queue.empty(); }
+
+    void push(SUnit *U) override;
+
+    SUnit *pop() override;
+
+    void remove(SUnit *SU) override;
+
+    /// scheduledNode - Main resource tracking point.
+    void scheduledNode(SUnit *Node) override;
+    bool isResourceAvailable(SUnit *SU);
+    void reserveResources(SUnit *SU);
+
+private:
+    void adjustPriorityOfUnscheduledPreds(SUnit *SU);
+    SUnit *getSingleUnscheduledPred(SUnit *SU);
+    unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
+    unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
+  };
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def
new file mode 100644
index 0000000..7ed90d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def
@@ -0,0 +1,527 @@
+//===-- llvm/RuntimeLibcalls.def - File that describes libcalls -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the runtime library calls the backend can emit.
+// The various long double types cannot be merged, because 80-bit library
+// functions use "xf" and 128-bit use "tf".
+//
+// When adding PPCF128 functions here, note that their names generally need
+// to be overridden for Darwin with the xxx$LDBL128 form.  See
+// PPCISelLowering.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+
+// Declare the enumerator for each libcall, along with its default name. Some
+// libcalls have different names on particular OSes or architectures. These
+// are set in InitLibcallNames() in TargetLoweringBase.cpp and/or by targets
+// using TargetLoweringBase::setLibcallName()
+#ifndef HANDLE_LIBCALL
+#error "HANDLE_LIBCALL must be defined"
+#endif
+
+// Integer
+HANDLE_LIBCALL(SHL_I16, "__ashlhi3")
+HANDLE_LIBCALL(SHL_I32, "__ashlsi3")
+HANDLE_LIBCALL(SHL_I64, "__ashldi3")
+HANDLE_LIBCALL(SHL_I128, "__ashlti3")
+HANDLE_LIBCALL(SRL_I16, "__lshrhi3")
+HANDLE_LIBCALL(SRL_I32, "__lshrsi3")
+HANDLE_LIBCALL(SRL_I64, "__lshrdi3")
+HANDLE_LIBCALL(SRL_I128, "__lshrti3")
+HANDLE_LIBCALL(SRA_I16, "__ashrhi3")
+HANDLE_LIBCALL(SRA_I32, "__ashrsi3")
+HANDLE_LIBCALL(SRA_I64, "__ashrdi3")
+HANDLE_LIBCALL(SRA_I128, "__ashrti3")
+HANDLE_LIBCALL(MUL_I8, "__mulqi3")
+HANDLE_LIBCALL(MUL_I16, "__mulhi3")
+HANDLE_LIBCALL(MUL_I32, "__mulsi3")
+HANDLE_LIBCALL(MUL_I64, "__muldi3")
+HANDLE_LIBCALL(MUL_I128, "__multi3")
+HANDLE_LIBCALL(MULO_I32, "__mulosi4")
+HANDLE_LIBCALL(MULO_I64, "__mulodi4")
+HANDLE_LIBCALL(MULO_I128, "__muloti4")
+HANDLE_LIBCALL(SDIV_I8, "__divqi3")
+HANDLE_LIBCALL(SDIV_I16, "__divhi3")
+HANDLE_LIBCALL(SDIV_I32, "__divsi3")
+HANDLE_LIBCALL(SDIV_I64, "__divdi3")
+HANDLE_LIBCALL(SDIV_I128, "__divti3")
+HANDLE_LIBCALL(UDIV_I8, "__udivqi3")
+HANDLE_LIBCALL(UDIV_I16, "__udivhi3")
+HANDLE_LIBCALL(UDIV_I32, "__udivsi3")
+HANDLE_LIBCALL(UDIV_I64, "__udivdi3")
+HANDLE_LIBCALL(UDIV_I128, "__udivti3")
+HANDLE_LIBCALL(SREM_I8, "__modqi3")
+HANDLE_LIBCALL(SREM_I16, "__modhi3")
+HANDLE_LIBCALL(SREM_I32, "__modsi3")
+HANDLE_LIBCALL(SREM_I64, "__moddi3")
+HANDLE_LIBCALL(SREM_I128, "__modti3")
+HANDLE_LIBCALL(UREM_I8, "__umodqi3")
+HANDLE_LIBCALL(UREM_I16, "__umodhi3")
+HANDLE_LIBCALL(UREM_I32, "__umodsi3")
+HANDLE_LIBCALL(UREM_I64, "__umoddi3")
+HANDLE_LIBCALL(UREM_I128, "__umodti3")
+HANDLE_LIBCALL(SDIVREM_I8, nullptr)
+HANDLE_LIBCALL(SDIVREM_I16, nullptr)
+HANDLE_LIBCALL(SDIVREM_I32, nullptr)
+HANDLE_LIBCALL(SDIVREM_I64, nullptr)
+HANDLE_LIBCALL(SDIVREM_I128, nullptr)
+HANDLE_LIBCALL(UDIVREM_I8, nullptr)
+HANDLE_LIBCALL(UDIVREM_I16, nullptr)
+HANDLE_LIBCALL(UDIVREM_I32, nullptr)
+HANDLE_LIBCALL(UDIVREM_I64, nullptr)
+HANDLE_LIBCALL(UDIVREM_I128, nullptr)
+HANDLE_LIBCALL(NEG_I32, "__negsi2")
+HANDLE_LIBCALL(NEG_I64, "__negdi2")
+
+// Floating-point
+HANDLE_LIBCALL(ADD_F32, "__addsf3")
+HANDLE_LIBCALL(ADD_F64, "__adddf3")
+HANDLE_LIBCALL(ADD_F80, "__addxf3")
+HANDLE_LIBCALL(ADD_F128, "__addtf3")
+HANDLE_LIBCALL(ADD_PPCF128, "__gcc_qadd")
+HANDLE_LIBCALL(SUB_F32, "__subsf3")
+HANDLE_LIBCALL(SUB_F64, "__subdf3")
+HANDLE_LIBCALL(SUB_F80, "__subxf3")
+HANDLE_LIBCALL(SUB_F128, "__subtf3")
+HANDLE_LIBCALL(SUB_PPCF128, "__gcc_qsub")
+HANDLE_LIBCALL(MUL_F32, "__mulsf3")
+HANDLE_LIBCALL(MUL_F64, "__muldf3")
+HANDLE_LIBCALL(MUL_F80, "__mulxf3")
+HANDLE_LIBCALL(MUL_F128, "__multf3")
+HANDLE_LIBCALL(MUL_PPCF128, "__gcc_qmul")
+HANDLE_LIBCALL(DIV_F32, "__divsf3")
+HANDLE_LIBCALL(DIV_F64, "__divdf3")
+HANDLE_LIBCALL(DIV_F80, "__divxf3")
+HANDLE_LIBCALL(DIV_F128, "__divtf3")
+HANDLE_LIBCALL(DIV_PPCF128, "__gcc_qdiv")
+HANDLE_LIBCALL(REM_F32, "fmodf")
+HANDLE_LIBCALL(REM_F64, "fmod")
+HANDLE_LIBCALL(REM_F80, "fmodl")
+HANDLE_LIBCALL(REM_F128, "fmodl")
+HANDLE_LIBCALL(REM_PPCF128, "fmodl")
+HANDLE_LIBCALL(FMA_F32, "fmaf")
+HANDLE_LIBCALL(FMA_F64, "fma")
+HANDLE_LIBCALL(FMA_F80, "fmal")
+HANDLE_LIBCALL(FMA_F128, "fmal")
+HANDLE_LIBCALL(FMA_PPCF128, "fmal")
+HANDLE_LIBCALL(POWI_F32, "__powisf2")
+HANDLE_LIBCALL(POWI_F64, "__powidf2")
+HANDLE_LIBCALL(POWI_F80, "__powixf2")
+HANDLE_LIBCALL(POWI_F128, "__powitf2")
+HANDLE_LIBCALL(POWI_PPCF128, "__powitf2")
+HANDLE_LIBCALL(SQRT_F32, "sqrtf")
+HANDLE_LIBCALL(SQRT_F64, "sqrt")
+HANDLE_LIBCALL(SQRT_F80, "sqrtl")
+HANDLE_LIBCALL(SQRT_F128, "sqrtl")
+HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
+HANDLE_LIBCALL(LOG_F32, "logf")
+HANDLE_LIBCALL(LOG_F64, "log")
+HANDLE_LIBCALL(LOG_F80, "logl")
+HANDLE_LIBCALL(LOG_F128, "logl")
+HANDLE_LIBCALL(LOG_PPCF128, "logl")
+HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite")
+HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite")
+HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite")
+HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite")
+HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite")
+HANDLE_LIBCALL(LOG2_F32, "log2f")
+HANDLE_LIBCALL(LOG2_F64, "log2")
+HANDLE_LIBCALL(LOG2_F80, "log2l")
+HANDLE_LIBCALL(LOG2_F128, "log2l")
+HANDLE_LIBCALL(LOG2_PPCF128, "log2l")
+HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite")
+HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite")
+HANDLE_LIBCALL(LOG10_F32, "log10f")
+HANDLE_LIBCALL(LOG10_F64, "log10")
+HANDLE_LIBCALL(LOG10_F80, "log10l")
+HANDLE_LIBCALL(LOG10_F128, "log10l")
+HANDLE_LIBCALL(LOG10_PPCF128, "log10l")
+HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite")
+HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite")
+HANDLE_LIBCALL(EXP_F32, "expf")
+HANDLE_LIBCALL(EXP_F64, "exp")
+HANDLE_LIBCALL(EXP_F80, "expl")
+HANDLE_LIBCALL(EXP_F128, "expl")
+HANDLE_LIBCALL(EXP_PPCF128, "expl")
+HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite")
+HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite")
+HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite")
+HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite")
+HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite")
+HANDLE_LIBCALL(EXP2_F32, "exp2f")
+HANDLE_LIBCALL(EXP2_F64, "exp2")
+HANDLE_LIBCALL(EXP2_F80, "exp2l")
+HANDLE_LIBCALL(EXP2_F128, "exp2l")
+HANDLE_LIBCALL(EXP2_PPCF128, "exp2l")
+HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite")
+HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite")
+HANDLE_LIBCALL(SIN_F32, "sinf")
+HANDLE_LIBCALL(SIN_F64, "sin")
+HANDLE_LIBCALL(SIN_F80, "sinl")
+HANDLE_LIBCALL(SIN_F128, "sinl")
+HANDLE_LIBCALL(SIN_PPCF128, "sinl")
+HANDLE_LIBCALL(COS_F32, "cosf")
+HANDLE_LIBCALL(COS_F64, "cos")
+HANDLE_LIBCALL(COS_F80, "cosl")
+HANDLE_LIBCALL(COS_F128, "cosl")
+HANDLE_LIBCALL(COS_PPCF128, "cosl")
+HANDLE_LIBCALL(SINCOS_F32, nullptr)
+HANDLE_LIBCALL(SINCOS_F64, nullptr)
+HANDLE_LIBCALL(SINCOS_F80, nullptr)
+HANDLE_LIBCALL(SINCOS_F128, nullptr)
+HANDLE_LIBCALL(SINCOS_PPCF128, nullptr)
+HANDLE_LIBCALL(SINCOS_STRET_F32, nullptr)
+HANDLE_LIBCALL(SINCOS_STRET_F64, nullptr)
+HANDLE_LIBCALL(POW_F32, "powf")
+HANDLE_LIBCALL(POW_F64, "pow")
+HANDLE_LIBCALL(POW_F80, "powl")
+HANDLE_LIBCALL(POW_F128, "powl")
+HANDLE_LIBCALL(POW_PPCF128, "powl")
+HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite")
+HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite")
+HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite")
+HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite")
+HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite")
+HANDLE_LIBCALL(CEIL_F32, "ceilf")
+HANDLE_LIBCALL(CEIL_F64, "ceil")
+HANDLE_LIBCALL(CEIL_F80, "ceill")
+HANDLE_LIBCALL(CEIL_F128, "ceill")
+HANDLE_LIBCALL(CEIL_PPCF128, "ceill")
+HANDLE_LIBCALL(TRUNC_F32, "truncf")
+HANDLE_LIBCALL(TRUNC_F64, "trunc")
+HANDLE_LIBCALL(TRUNC_F80, "truncl")
+HANDLE_LIBCALL(TRUNC_F128, "truncl")
+HANDLE_LIBCALL(TRUNC_PPCF128, "truncl")
+HANDLE_LIBCALL(RINT_F32, "rintf")
+HANDLE_LIBCALL(RINT_F64, "rint")
+HANDLE_LIBCALL(RINT_F80, "rintl")
+HANDLE_LIBCALL(RINT_F128, "rintl")
+HANDLE_LIBCALL(RINT_PPCF128, "rintl")
+HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf")
+HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint")
+HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl")
+HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl")
+HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl")
+HANDLE_LIBCALL(ROUND_F32, "roundf")
+HANDLE_LIBCALL(ROUND_F64, "round")
+HANDLE_LIBCALL(ROUND_F80, "roundl")
+HANDLE_LIBCALL(ROUND_F128, "roundl")
+HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
+HANDLE_LIBCALL(FLOOR_F32, "floorf")
+HANDLE_LIBCALL(FLOOR_F64, "floor")
+HANDLE_LIBCALL(FLOOR_F80, "floorl")
+HANDLE_LIBCALL(FLOOR_F128, "floorl")
+HANDLE_LIBCALL(FLOOR_PPCF128, "floorl")
+HANDLE_LIBCALL(COPYSIGN_F32, "copysignf")
+HANDLE_LIBCALL(COPYSIGN_F64, "copysign")
+HANDLE_LIBCALL(COPYSIGN_F80, "copysignl")
+HANDLE_LIBCALL(COPYSIGN_F128, "copysignl")
+HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl")
+HANDLE_LIBCALL(FMIN_F32, "fminf")
+HANDLE_LIBCALL(FMIN_F64, "fmin")
+HANDLE_LIBCALL(FMIN_F80, "fminl")
+HANDLE_LIBCALL(FMIN_F128, "fminl")
+HANDLE_LIBCALL(FMIN_PPCF128, "fminl")
+HANDLE_LIBCALL(FMAX_F32, "fmaxf")
+HANDLE_LIBCALL(FMAX_F64, "fmax")
+HANDLE_LIBCALL(FMAX_F80, "fmaxl")
+HANDLE_LIBCALL(FMAX_F128, "fmaxl")
+HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl")
+
+// Conversion
+HANDLE_LIBCALL(FPEXT_F32_PPCF128, "__gcc_stoq")
+HANDLE_LIBCALL(FPEXT_F64_PPCF128, "__gcc_dtoq")
+HANDLE_LIBCALL(FPEXT_F80_F128, "__extendxftf2")
+HANDLE_LIBCALL(FPEXT_F64_F128, "__extenddftf2")
+HANDLE_LIBCALL(FPEXT_F32_F128, "__extendsftf2")
+HANDLE_LIBCALL(FPEXT_F32_F64, "__extendsfdf2")
+HANDLE_LIBCALL(FPEXT_F16_F32, "__gnu_h2f_ieee")
+HANDLE_LIBCALL(FPROUND_F32_F16, "__gnu_f2h_ieee")
+HANDLE_LIBCALL(FPROUND_F64_F16, "__truncdfhf2")
+HANDLE_LIBCALL(FPROUND_F80_F16, "__truncxfhf2")
+HANDLE_LIBCALL(FPROUND_F128_F16, "__trunctfhf2")
+HANDLE_LIBCALL(FPROUND_PPCF128_F16, "__trunctfhf2")
+HANDLE_LIBCALL(FPROUND_F64_F32, "__truncdfsf2")
+HANDLE_LIBCALL(FPROUND_F80_F32, "__truncxfsf2")
+HANDLE_LIBCALL(FPROUND_F128_F32, "__trunctfsf2")
+HANDLE_LIBCALL(FPROUND_PPCF128_F32, "__gcc_qtos")
+HANDLE_LIBCALL(FPROUND_F80_F64, "__truncxfdf2")
+HANDLE_LIBCALL(FPROUND_F128_F64, "__trunctfdf2")
+HANDLE_LIBCALL(FPROUND_PPCF128_F64, "__gcc_qtod")
+HANDLE_LIBCALL(FPROUND_F128_F80, "__trunctfxf2")
+HANDLE_LIBCALL(FPTOSINT_F32_I32, "__fixsfsi")
+HANDLE_LIBCALL(FPTOSINT_F32_I64, "__fixsfdi")
+HANDLE_LIBCALL(FPTOSINT_F32_I128, "__fixsfti")
+HANDLE_LIBCALL(FPTOSINT_F64_I32, "__fixdfsi")
+HANDLE_LIBCALL(FPTOSINT_F64_I64, "__fixdfdi")
+HANDLE_LIBCALL(FPTOSINT_F64_I128, "__fixdfti")
+HANDLE_LIBCALL(FPTOSINT_F80_I32, "__fixxfsi")
+HANDLE_LIBCALL(FPTOSINT_F80_I64, "__fixxfdi")
+HANDLE_LIBCALL(FPTOSINT_F80_I128, "__fixxfti")
+HANDLE_LIBCALL(FPTOSINT_F128_I32, "__fixtfsi")
+HANDLE_LIBCALL(FPTOSINT_F128_I64, "__fixtfdi")
+HANDLE_LIBCALL(FPTOSINT_F128_I128, "__fixtfti")
+HANDLE_LIBCALL(FPTOSINT_PPCF128_I32, "__gcc_qtou")
+HANDLE_LIBCALL(FPTOSINT_PPCF128_I64, "__fixtfdi")
+HANDLE_LIBCALL(FPTOSINT_PPCF128_I128, "__fixtfti")
+HANDLE_LIBCALL(FPTOUINT_F32_I32, "__fixunssfsi")
+HANDLE_LIBCALL(FPTOUINT_F32_I64, "__fixunssfdi")
+HANDLE_LIBCALL(FPTOUINT_F32_I128, "__fixunssfti")
+HANDLE_LIBCALL(FPTOUINT_F64_I32, "__fixunsdfsi")
+HANDLE_LIBCALL(FPTOUINT_F64_I64, "__fixunsdfdi")
+HANDLE_LIBCALL(FPTOUINT_F64_I128, "__fixunsdfti")
+HANDLE_LIBCALL(FPTOUINT_F80_I32, "__fixunsxfsi")
+HANDLE_LIBCALL(FPTOUINT_F80_I64, "__fixunsxfdi")
+HANDLE_LIBCALL(FPTOUINT_F80_I128, "__fixunsxfti")
+HANDLE_LIBCALL(FPTOUINT_F128_I32, "__fixunstfsi")
+HANDLE_LIBCALL(FPTOUINT_F128_I64, "__fixunstfdi")
+HANDLE_LIBCALL(FPTOUINT_F128_I128, "__fixunstfti")
+HANDLE_LIBCALL(FPTOUINT_PPCF128_I32, "__fixunstfsi")
+HANDLE_LIBCALL(FPTOUINT_PPCF128_I64, "__fixunstfdi")
+HANDLE_LIBCALL(FPTOUINT_PPCF128_I128, "__fixunstfti")
+HANDLE_LIBCALL(SINTTOFP_I32_F32, "__floatsisf")
+HANDLE_LIBCALL(SINTTOFP_I32_F64, "__floatsidf")
+HANDLE_LIBCALL(SINTTOFP_I32_F80, "__floatsixf")
+HANDLE_LIBCALL(SINTTOFP_I32_F128, "__floatsitf")
+HANDLE_LIBCALL(SINTTOFP_I32_PPCF128, "__gcc_itoq")
+HANDLE_LIBCALL(SINTTOFP_I64_F32, "__floatdisf")
+HANDLE_LIBCALL(SINTTOFP_I64_F64, "__floatdidf")
+HANDLE_LIBCALL(SINTTOFP_I64_F80, "__floatdixf")
+HANDLE_LIBCALL(SINTTOFP_I64_F128, "__floatditf")
+HANDLE_LIBCALL(SINTTOFP_I64_PPCF128, "__floatditf")
+HANDLE_LIBCALL(SINTTOFP_I128_F32, "__floattisf")
+HANDLE_LIBCALL(SINTTOFP_I128_F64, "__floattidf")
+HANDLE_LIBCALL(SINTTOFP_I128_F80, "__floattixf")
+HANDLE_LIBCALL(SINTTOFP_I128_F128, "__floattitf")
+HANDLE_LIBCALL(SINTTOFP_I128_PPCF128, "__floattitf")
+HANDLE_LIBCALL(UINTTOFP_I32_F32, "__floatunsisf")
+HANDLE_LIBCALL(UINTTOFP_I32_F64, "__floatunsidf")
+HANDLE_LIBCALL(UINTTOFP_I32_F80, "__floatunsixf")
+HANDLE_LIBCALL(UINTTOFP_I32_F128, "__floatunsitf")
+HANDLE_LIBCALL(UINTTOFP_I32_PPCF128, "__gcc_utoq")
+HANDLE_LIBCALL(UINTTOFP_I64_F32, "__floatundisf")
+HANDLE_LIBCALL(UINTTOFP_I64_F64, "__floatundidf")
+HANDLE_LIBCALL(UINTTOFP_I64_F80, "__floatundixf")
+HANDLE_LIBCALL(UINTTOFP_I64_F128, "__floatunditf")
+HANDLE_LIBCALL(UINTTOFP_I64_PPCF128, "__floatunditf")
+HANDLE_LIBCALL(UINTTOFP_I128_F32, "__floatuntisf")
+HANDLE_LIBCALL(UINTTOFP_I128_F64, "__floatuntidf")
+HANDLE_LIBCALL(UINTTOFP_I128_F80, "__floatuntixf")
+HANDLE_LIBCALL(UINTTOFP_I128_F128, "__floatuntitf")
+HANDLE_LIBCALL(UINTTOFP_I128_PPCF128, "__floatuntitf")
+
+// Comparison
+HANDLE_LIBCALL(OEQ_F32, "__eqsf2")
+HANDLE_LIBCALL(OEQ_F64, "__eqdf2")
+HANDLE_LIBCALL(OEQ_F128, "__eqtf2")
+HANDLE_LIBCALL(OEQ_PPCF128, "__gcc_qeq")
+HANDLE_LIBCALL(UNE_F32, "__nesf2")
+HANDLE_LIBCALL(UNE_F64, "__nedf2")
+HANDLE_LIBCALL(UNE_F128, "__netf2")
+HANDLE_LIBCALL(UNE_PPCF128, "__gcc_qne")
+HANDLE_LIBCALL(OGE_F32, "__gesf2")
+HANDLE_LIBCALL(OGE_F64, "__gedf2")
+HANDLE_LIBCALL(OGE_F128, "__getf2")
+HANDLE_LIBCALL(OGE_PPCF128, "__gcc_qge")
+HANDLE_LIBCALL(OLT_F32, "__ltsf2")
+HANDLE_LIBCALL(OLT_F64, "__ltdf2")
+HANDLE_LIBCALL(OLT_F128, "__lttf2")
+HANDLE_LIBCALL(OLT_PPCF128, "__gcc_qlt")
+HANDLE_LIBCALL(OLE_F32, "__lesf2")
+HANDLE_LIBCALL(OLE_F64, "__ledf2")
+HANDLE_LIBCALL(OLE_F128, "__letf2")
+HANDLE_LIBCALL(OLE_PPCF128, "__gcc_qle")
+HANDLE_LIBCALL(OGT_F32, "__gtsf2")
+HANDLE_LIBCALL(OGT_F64, "__gtdf2")
+HANDLE_LIBCALL(OGT_F128, "__gttf2")
+HANDLE_LIBCALL(OGT_PPCF128, "__gcc_qgt")
+HANDLE_LIBCALL(UO_F32, "__unordsf2")
+HANDLE_LIBCALL(UO_F64, "__unorddf2")
+HANDLE_LIBCALL(UO_F128, "__unordtf2")
+HANDLE_LIBCALL(UO_PPCF128, "__gcc_qunord")
+HANDLE_LIBCALL(O_F32, "__unordsf2")
+HANDLE_LIBCALL(O_F64, "__unorddf2")
+HANDLE_LIBCALL(O_F128, "__unordtf2")
+HANDLE_LIBCALL(O_PPCF128, "__gcc_qunord")
+
+// Memory
+HANDLE_LIBCALL(MEMCPY, "memcpy")
+HANDLE_LIBCALL(MEMMOVE, "memmove")
+HANDLE_LIBCALL(MEMSET, "memset")
+HANDLE_LIBCALL(BZERO, nullptr)
+
+// Element-wise unordered-atomic memory of different sizes
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memcpy_element_unordered_atomic_1")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memcpy_element_unordered_atomic_2")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memcpy_element_unordered_atomic_4")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memcpy_element_unordered_atomic_8")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memcpy_element_unordered_atomic_16")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memmove_element_unordered_atomic_1")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memmove_element_unordered_atomic_2")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memmove_element_unordered_atomic_4")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memmove_element_unordered_atomic_8")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memmove_element_unordered_atomic_16")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memset_element_unordered_atomic_1")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memset_element_unordered_atomic_2")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memset_element_unordered_atomic_4")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memset_element_unordered_atomic_8")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memset_element_unordered_atomic_16")
+
+// Exception handling
+HANDLE_LIBCALL(UNWIND_RESUME, "_Unwind_Resume")
+
+// Note: there are two sets of atomics libcalls; see
+// <https://llvm.org/docs/Atomics.html> for more info on the
+// difference between them.
+
+// Atomic '__sync_*' libcalls.
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_1, "__sync_val_compare_and_swap_1")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_2, "__sync_val_compare_and_swap_2")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_4, "__sync_val_compare_and_swap_4")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_8, "__sync_val_compare_and_swap_8")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_16, "__sync_val_compare_and_swap_16")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_1, "__sync_lock_test_and_set_1")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_2, "__sync_lock_test_and_set_2")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_4, "__sync_lock_test_and_set_4")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_8, "__sync_lock_test_and_set_8")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_16, "__sync_lock_test_and_set_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_1, "__sync_fetch_and_add_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_2, "__sync_fetch_and_add_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_4, "__sync_fetch_and_add_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_8, "__sync_fetch_and_add_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_16, "__sync_fetch_and_add_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_1, "__sync_fetch_and_sub_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_2, "__sync_fetch_and_sub_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_4, "__sync_fetch_and_sub_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_8, "__sync_fetch_and_sub_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_16, "__sync_fetch_and_sub_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_1, "__sync_fetch_and_and_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_2, "__sync_fetch_and_and_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_4, "__sync_fetch_and_and_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_8, "__sync_fetch_and_and_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_16, "__sync_fetch_and_and_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_1, "__sync_fetch_and_or_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_2, "__sync_fetch_and_or_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_4, "__sync_fetch_and_or_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_8, "__sync_fetch_and_or_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_16, "__sync_fetch_and_or_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_1, "__sync_fetch_and_xor_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_2, "__sync_fetch_and_xor_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_4, "__sync_fetch_and_xor_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_8, "__sync_fetch_and_xor_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_16, "__sync_fetch_and_xor_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_1, "__sync_fetch_and_nand_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_2, "__sync_fetch_and_nand_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_4, "__sync_fetch_and_nand_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_8, "__sync_fetch_and_nand_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_16, "__sync_fetch_and_nand_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_1, "__sync_fetch_and_max_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_2, "__sync_fetch_and_max_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_4, "__sync_fetch_and_max_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_8, "__sync_fetch_and_max_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_16, "__sync_fetch_and_max_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_1, "__sync_fetch_and_umax_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_2, "__sync_fetch_and_umax_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_4, "__sync_fetch_and_umax_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_8, "__sync_fetch_and_umax_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_16, "__sync_fetch_and_umax_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_1, "__sync_fetch_and_min_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_2, "__sync_fetch_and_min_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_4, "__sync_fetch_and_min_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_8, "__sync_fetch_and_min_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_16, "__sync_fetch_and_min_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_1, "__sync_fetch_and_umin_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_2, "__sync_fetch_and_umin_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_4, "__sync_fetch_and_umin_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_8, "__sync_fetch_and_umin_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_16, "__sync_fetch_and_umin_16")
+
+// Atomic `__atomic_*' libcalls.
+HANDLE_LIBCALL(ATOMIC_LOAD, "__atomic_load")
+HANDLE_LIBCALL(ATOMIC_LOAD_1, "__atomic_load_1")
+HANDLE_LIBCALL(ATOMIC_LOAD_2, "__atomic_load_2")
+HANDLE_LIBCALL(ATOMIC_LOAD_4, "__atomic_load_4")
+HANDLE_LIBCALL(ATOMIC_LOAD_8, "__atomic_load_8")
+HANDLE_LIBCALL(ATOMIC_LOAD_16, "__atomic_load_16")
+
+HANDLE_LIBCALL(ATOMIC_STORE, "__atomic_store")
+HANDLE_LIBCALL(ATOMIC_STORE_1, "__atomic_store_1")
+HANDLE_LIBCALL(ATOMIC_STORE_2, "__atomic_store_2")
+HANDLE_LIBCALL(ATOMIC_STORE_4, "__atomic_store_4")
+HANDLE_LIBCALL(ATOMIC_STORE_8, "__atomic_store_8")
+HANDLE_LIBCALL(ATOMIC_STORE_16, "__atomic_store_16")
+
+HANDLE_LIBCALL(ATOMIC_EXCHANGE, "__atomic_exchange")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_1, "__atomic_exchange_1")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_2, "__atomic_exchange_2")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_4, "__atomic_exchange_4")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_8, "__atomic_exchange_8")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_16, "__atomic_exchange_16")
+
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE, "__atomic_compare_exchange")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_1, "__atomic_compare_exchange_1")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_2, "__atomic_compare_exchange_2")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_4, "__atomic_compare_exchange_4")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_8, "__atomic_compare_exchange_8")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_16, "__atomic_compare_exchange_16")
+
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_1, "__atomic_fetch_add_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_2, "__atomic_fetch_add_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_4, "__atomic_fetch_add_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_8, "__atomic_fetch_add_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_16, "__atomic_fetch_add_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_1, "__atomic_fetch_sub_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_2, "__atomic_fetch_sub_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_4, "__atomic_fetch_sub_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_8, "__atomic_fetch_sub_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_16, "__atomic_fetch_sub_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_1, "__atomic_fetch_and_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_2, "__atomic_fetch_and_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_4, "__atomic_fetch_and_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_8, "__atomic_fetch_and_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_16, "__atomic_fetch_and_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_1, "__atomic_fetch_or_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_2, "__atomic_fetch_or_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_4, "__atomic_fetch_or_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_8, "__atomic_fetch_or_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_16, "__atomic_fetch_or_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_1, "__atomic_fetch_xor_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_2, "__atomic_fetch_xor_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_4, "__atomic_fetch_xor_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_8, "__atomic_fetch_xor_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_16, "__atomic_fetch_xor_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_1, "__atomic_fetch_nand_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_2, "__atomic_fetch_nand_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_4, "__atomic_fetch_nand_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_8, "__atomic_fetch_nand_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_16, "__atomic_fetch_nand_16")
+
+// Stack Protector Fail
+HANDLE_LIBCALL(STACKPROTECTOR_CHECK_FAIL, "__stack_chk_fail")
+
+// Deoptimization
+HANDLE_LIBCALL(DEOPTIMIZE, "__llvm_deoptimize")
+
+HANDLE_LIBCALL(UNKNOWN_LIBCALL, nullptr)
+
+#undef HANDLE_LIBCALL
diff --git a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
new file mode 100644
index 0000000..016bef1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -0,0 +1,82 @@
+//===-- CodeGen/RuntimeLibcalls.h - Runtime Library Calls -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the enum representing the list of runtime library calls
+// the backend may emit during code generation, and also some helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
+#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+namespace RTLIB {
+  /// RTLIB::Libcall enum - This enum defines all of the runtime library calls
+  /// the backend can emit.  The various long double types cannot be merged,
+  /// because 80-bit library functions use "xf" and 128-bit use "tf".
+  ///
+  /// When adding PPCF128 functions here, note that their names generally need
+  /// to be overridden for Darwin with the xxx$LDBL128 form.  See
+  /// PPCISelLowering.cpp.
+  ///
+  enum Libcall {
+#define HANDLE_LIBCALL(code, name) code,
+    #include "RuntimeLibcalls.def"
+#undef HANDLE_LIBCALL
+  };
+
+  /// getFPEXT - Return the FPEXT_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPEXT(EVT OpVT, EVT RetVT);
+
+  /// getFPROUND - Return the FPROUND_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPROUND(EVT OpVT, EVT RetVT);
+
+  /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPTOSINT(EVT OpVT, EVT RetVT);
+
+  /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPTOUINT(EVT OpVT, EVT RetVT);
+
+  /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getSINTTOFP(EVT OpVT, EVT RetVT);
+
+  /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getUINTTOFP(EVT OpVT, EVT RetVT);
+
+  /// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getSYNC(unsigned Opc, MVT VT);
+
+  /// getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return
+  /// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+  /// UNKNOW_LIBCALL if there is none.
+  Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+  /// getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return
+  /// MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+  /// UNKNOW_LIBCALL if there is none.
+  Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+  /// getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return
+  /// MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+  /// UNKNOW_LIBCALL if there is none.
+  Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/SDNodeProperties.td b/linux-x64/clang/include/llvm/CodeGen/SDNodeProperties.td
new file mode 100644
index 0000000..83bbab2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SDNodeProperties.td
@@ -0,0 +1,34 @@
+//===- SDNodeProperties.td - Common code for DAG isels ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+class SDNodeProperty;
+
+// Selection DAG Pattern Operations
+class SDPatternOperator {
+  list<SDNodeProperty> Properties = [];
+}
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node Properties.
+//
+// Note: These are hard coded into tblgen.
+//
+def SDNPCommutative : SDNodeProperty;   // X op Y == Y op X
+def SDNPAssociative : SDNodeProperty;   // (X op Y) op Z == X op (Y op Z)
+def SDNPHasChain    : SDNodeProperty;   // R/W chain operand and result
+def SDNPOutGlue     : SDNodeProperty;   // Write a flag result
+def SDNPInGlue      : SDNodeProperty;   // Read a flag operand
+def SDNPOptInGlue   : SDNodeProperty;   // Optionally read a flag operand
+def SDNPMayStore    : SDNodeProperty;   // May write to memory, sets 'mayStore'.
+def SDNPMayLoad     : SDNodeProperty;   // May read memory, sets 'mayLoad'.
+def SDNPSideEffect  : SDNodeProperty;   // Sets 'HasUnmodelledSideEffects'.
+def SDNPMemOperand  : SDNodeProperty;   // Touches memory, has assoc MemOperand
+def SDNPVariadic    : SDNodeProperty;   // Node has variable arguments.
+def SDNPWantRoot    : SDNodeProperty;   // ComplexPattern gets the root of match
+def SDNPWantParent  : SDNodeProperty;   // ComplexPattern gets the parent
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
new file mode 100644
index 0000000..f3f2f05
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
@@ -0,0 +1,764 @@
+//===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Implements the ScheduleDAG class, which is used as the common base
+/// class for instruction schedulers. This encapsulates the scheduling DAG,
+/// which is shared between SelectionDAG and MachineInstr scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
+#define LLVM_CODEGEN_SCHEDULEDAG_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+template<class Graph> class GraphWriter;
+class MachineFunction;
+class MachineRegisterInfo;
+class MCInstrDesc;
+struct MCSchedClassDesc;
+class ScheduleDAG;
+class SDNode;
+class SUnit;
+class TargetInstrInfo;
+class TargetMachine;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+  /// Scheduling dependency. This represents one direction of an edge in the
+  /// scheduling DAG.
+  class SDep {
+  public:
+    /// These are the different kinds of scheduling dependencies.
+    enum Kind {
+      Data,        ///< Regular data dependence (aka true-dependence).
+      Anti,        ///< A register anti-dependence (aka WAR).
+      Output,      ///< A register output-dependence (aka WAW).
+      Order        ///< Any other ordering dependency.
+    };
+
+    // Strong dependencies must be respected by the scheduler. Artificial
+    // dependencies may be removed only if they are redundant with another
+    // strong dependence.
+    //
+    // Weak dependencies may be violated by the scheduling strategy, but only if
+    // the strategy can prove it is correct to do so.
+    //
+    // Strong OrderKinds must occur before "Weak".
+    // Weak OrderKinds must occur after "Weak".
+    enum OrderKind {
+      Barrier,      ///< An unknown scheduling barrier.
+      MayAliasMem,  ///< Nonvolatile load/Store instructions that may alias.
+      MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
+      Artificial,   ///< Arbitrary strong DAG edge (no real dependence).
+      Weak,         ///< Arbitrary weak DAG edge.
+      Cluster       ///< Weak DAG edge linking a chain of clustered instrs.
+    };
+
+  private:
+    /// \brief A pointer to the depending/depended-on SUnit, and an enum
+    /// indicating the kind of the dependency.
+    PointerIntPair<SUnit *, 2, Kind> Dep;
+
+    /// A union discriminated by the dependence kind.
+    union {
+      /// For Data, Anti, and Output dependencies, the associated register. For
+      /// Data dependencies that don't currently have a register/ assigned, this
+      /// is set to zero.
+      unsigned Reg;
+
+      /// Additional information about Order dependencies.
+      unsigned OrdKind; // enum OrderKind
+    } Contents;
+
+    /// The time associated with this edge. Often this is just the value of the
+    /// Latency field of the predecessor, however advanced models may provide
+    /// additional information about specific edges.
+    unsigned Latency;
+
+  public:
+    /// Constructs a null SDep. This is only for use by container classes which
+    /// require default constructors. SUnits may not/ have null SDep edges.
+    SDep() : Dep(nullptr, Data) {}
+
+    /// Constructs an SDep with the specified values.
+    SDep(SUnit *S, Kind kind, unsigned Reg)
+      : Dep(S, kind), Contents() {
+      switch (kind) {
+      default:
+        llvm_unreachable("Reg given for non-register dependence!");
+      case Anti:
+      case Output:
+        assert(Reg != 0 &&
+               "SDep::Anti and SDep::Output must use a non-zero Reg!");
+        Contents.Reg = Reg;
+        Latency = 0;
+        break;
+      case Data:
+        Contents.Reg = Reg;
+        Latency = 1;
+        break;
+      }
+    }
+
+    SDep(SUnit *S, OrderKind kind)
+      : Dep(S, Order), Contents(), Latency(0) {
+      Contents.OrdKind = kind;
+    }
+
+    /// Returns true if the specified SDep is equivalent except for latency.
+    bool overlaps(const SDep &Other) const;
+
+    bool operator==(const SDep &Other) const {
+      return overlaps(Other) && Latency == Other.Latency;
+    }
+
+    bool operator!=(const SDep &Other) const {
+      return !operator==(Other);
+    }
+
+    /// \brief Returns the latency value for this edge, which roughly means the
+    /// minimum number of cycles that must elapse between the predecessor and
+    /// the successor, given that they have this edge between them.
+    unsigned getLatency() const {
+      return Latency;
+    }
+
+    /// Sets the latency for this edge.
+    void setLatency(unsigned Lat) {
+      Latency = Lat;
+    }
+
+    //// Returns the SUnit to which this edge points.
+    SUnit *getSUnit() const;
+
+    //// Assigns the SUnit to which this edge points.
+    void setSUnit(SUnit *SU);
+
+    /// Returns an enum value representing the kind of the dependence.
+    Kind getKind() const;
+
+    /// Shorthand for getKind() != SDep::Data.
+    bool isCtrl() const {
+      return getKind() != Data;
+    }
+
+    /// \brief Tests if this is an Order dependence between two memory accesses
+    /// where both sides of the dependence access memory in non-volatile and
+    /// fully modeled ways.
+    bool isNormalMemory() const {
+      return getKind() == Order && (Contents.OrdKind == MayAliasMem
+                                    || Contents.OrdKind == MustAliasMem);
+    }
+
+    /// Tests if this is an Order dependence that is marked as a barrier.
+    bool isBarrier() const {
+      return getKind() == Order && Contents.OrdKind == Barrier;
+    }
+
+    /// Tests if this is could be any kind of memory dependence.
+    bool isNormalMemoryOrBarrier() const {
+      return (isNormalMemory() || isBarrier());
+    }
+
+    /// \brief Tests if this is an Order dependence that is marked as
+    /// "must alias", meaning that the SUnits at either end of the edge have a
+    /// memory dependence on a known memory location.
+    bool isMustAlias() const {
+      return getKind() == Order && Contents.OrdKind == MustAliasMem;
+    }
+
+    /// Tests if this a weak dependence. Weak dependencies are considered DAG
+    /// edges for height computation and other heuristics, but do not force
+    /// ordering. Breaking a weak edge may require the scheduler to compensate,
+    /// for example by inserting a copy.
+    bool isWeak() const {
+      return getKind() == Order && Contents.OrdKind >= Weak;
+    }
+
+    /// \brief Tests if this is an Order dependence that is marked as
+    /// "artificial", meaning it isn't necessary for correctness.
+    bool isArtificial() const {
+      return getKind() == Order && Contents.OrdKind == Artificial;
+    }
+
+    /// \brief Tests if this is an Order dependence that is marked as "cluster",
+    /// meaning it is artificial and wants to be adjacent.
+    bool isCluster() const {
+      return getKind() == Order && Contents.OrdKind == Cluster;
+    }
+
+    /// Tests if this is a Data dependence that is associated with a register.
+    bool isAssignedRegDep() const {
+      return getKind() == Data && Contents.Reg != 0;
+    }
+
+    /// Returns the register associated with this edge. This is only valid on
+    /// Data, Anti, and Output edges. On Data edges, this value may be zero,
+    /// meaning there is no associated register.
+    unsigned getReg() const {
+      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+             "getReg called on non-register dependence edge!");
+      return Contents.Reg;
+    }
+
+    /// Assigns the associated register for this edge. This is only valid on
+    /// Data, Anti, and Output edges. On Anti and Output edges, this value must
+    /// not be zero. On Data edges, the value may be zero, which would mean that
+    /// no specific register is associated with this edge.
+    void setReg(unsigned Reg) {
+      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+             "setReg called on non-register dependence edge!");
+      assert((getKind() != Anti || Reg != 0) &&
+             "SDep::Anti edge cannot use the zero register!");
+      assert((getKind() != Output || Reg != 0) &&
+             "SDep::Output edge cannot use the zero register!");
+      Contents.Reg = Reg;
+    }
+
+    raw_ostream &print(raw_ostream &O,
+                       const TargetRegisterInfo *TRI = nullptr) const;
+  };
+
+  template <>
+  struct isPodLike<SDep> { static const bool value = true; };
+
+  /// Scheduling unit. This is a node in the scheduling DAG.
+  class SUnit {
+  private:
+    enum : unsigned { BoundaryID = ~0u };
+
+    SDNode *Node = nullptr;        ///< Representative node.
+    MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
+
+  public:
+    SUnit *OrigNode = nullptr; ///< If not this, the node from which this node 
+                               /// was cloned. (SD scheduling only)
+
+    const MCSchedClassDesc *SchedClass =
+        nullptr; ///< nullptr or resolved SchedClass.
+
+    SmallVector<SDep, 4> Preds;  ///< All sunit predecessors.
+    SmallVector<SDep, 4> Succs;  ///< All sunit successors.
+
+    typedef SmallVectorImpl<SDep>::iterator pred_iterator;
+    typedef SmallVectorImpl<SDep>::iterator succ_iterator;
+    typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
+    typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
+
+    unsigned NodeNum = BoundaryID;     ///< Entry # of node in the node vector.
+    unsigned NodeQueueId = 0;          ///< Queue id of node.
+    unsigned NumPreds = 0;             ///< # of SDep::Data preds.
+    unsigned NumSuccs = 0;             ///< # of SDep::Data sucss.
+    unsigned NumPredsLeft = 0;         ///< # of preds not scheduled.
+    unsigned NumSuccsLeft = 0;         ///< # of succs not scheduled.
+    unsigned WeakPredsLeft = 0;        ///< # of weak preds not scheduled.
+    unsigned WeakSuccsLeft = 0;        ///< # of weak succs not scheduled.
+    unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
+    unsigned short Latency = 0;        ///< Node latency.
+    bool isVRegCycle      : 1;         ///< May use and def the same vreg.
+    bool isCall           : 1;         ///< Is a function call.
+    bool isCallOp         : 1;         ///< Is a function call operand.
+    bool isTwoAddress     : 1;         ///< Is a two-address instruction.
+    bool isCommutable     : 1;         ///< Is a commutable instruction.
+    bool hasPhysRegUses   : 1;         ///< Has physreg uses.
+    bool hasPhysRegDefs   : 1;         ///< Has physreg defs that are being used.
+    bool hasPhysRegClobbers : 1;       ///< Has any physreg defs, used or not.
+    bool isPending        : 1;         ///< True once pending.
+    bool isAvailable      : 1;         ///< True once available.
+    bool isScheduled      : 1;         ///< True once scheduled.
+    bool isScheduleHigh   : 1;         ///< True if preferable to schedule high.
+    bool isScheduleLow    : 1;         ///< True if preferable to schedule low.
+    bool isCloned         : 1;         ///< True if this node has been cloned.
+    bool isUnbuffered     : 1;         ///< Uses an unbuffered resource.
+    bool hasReservedResource : 1;      ///< Uses a reserved resource.
+    Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.
+
+  private:
+    bool isDepthCurrent   : 1;         ///< True if Depth is current.
+    bool isHeightCurrent  : 1;         ///< True if Height is current.
+    unsigned Depth = 0;                ///< Node depth.
+    unsigned Height = 0;               ///< Node height.
+
+  public:
+    unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
+    unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.
+
+    const TargetRegisterClass *CopyDstRC =
+        nullptr; ///< Is a special copy node if != nullptr.
+    const TargetRegisterClass *CopySrcRC = nullptr;
+
+    /// \brief Constructs an SUnit for pre-regalloc scheduling to represent an
+    /// SDNode and any nodes flagged to it.
+    SUnit(SDNode *node, unsigned nodenum)
+      : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
+        isCallOp(false), isTwoAddress(false), isCommutable(false),
+        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+        isPending(false), isAvailable(false), isScheduled(false),
+        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
+        isHeightCurrent(false) {}
+
+    /// \brief Constructs an SUnit for post-regalloc scheduling to represent a
+    /// MachineInstr.
+    SUnit(MachineInstr *instr, unsigned nodenum)
+      : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
+        isCallOp(false), isTwoAddress(false), isCommutable(false),
+        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+        isPending(false), isAvailable(false), isScheduled(false),
+        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
+        isHeightCurrent(false) {}
+
+    /// \brief Constructs a placeholder SUnit.
+    SUnit()
+      : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
+        isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
+        hasPhysRegClobbers(false), isPending(false), isAvailable(false),
+        isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
+        isCloned(false), isUnbuffered(false), hasReservedResource(false),
+        isDepthCurrent(false), isHeightCurrent(false) {}
+
+    /// \brief Boundary nodes are placeholders for the boundary of the
+    /// scheduling region.
+    ///
+    /// BoundaryNodes can have DAG edges, including Data edges, but they do not
+    /// correspond to schedulable entities (e.g. instructions) and do not have a
+    /// valid ID. Consequently, always check for boundary nodes before accessing
+    /// an associative data structure keyed on node ID.
+    bool isBoundaryNode() const { return NodeNum == BoundaryID; }
+
+    /// Assigns the representative SDNode for this SUnit. This may be used
+    /// during pre-regalloc scheduling.
+    void setNode(SDNode *N) {
+      assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
+      Node = N;
+    }
+
+    /// Returns the representative SDNode for this SUnit. This may be used
+    /// during pre-regalloc scheduling.
+    SDNode *getNode() const {
+      assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
+      return Node;
+    }
+
+    /// \brief Returns true if this SUnit refers to a machine instruction as
+    /// opposed to an SDNode.
+    bool isInstr() const { return Instr; }
+
+    /// Assigns the instruction for the SUnit. This may be used during
+    /// post-regalloc scheduling.
+    void setInstr(MachineInstr *MI) {
+      assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
+      Instr = MI;
+    }
+
+    /// Returns the representative MachineInstr for this SUnit. This may be used
+    /// during post-regalloc scheduling.
+    MachineInstr *getInstr() const {
+      assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
+      return Instr;
+    }
+
+    /// Adds the specified edge as a pred of the current node if not already.
+    /// It also adds the current node as a successor of the specified node.
+    bool addPred(const SDep &D, bool Required = true);
+
+    /// \brief Adds a barrier edge to SU by calling addPred(), with latency 0
+    /// generally or latency 1 for a store followed by a load.
+    bool addPredBarrier(SUnit *SU) {
+      SDep Dep(SU, SDep::Barrier);
+      unsigned TrueMemOrderLatency =
+        ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
+      Dep.setLatency(TrueMemOrderLatency);
+      return addPred(Dep);
+    }
+
+    /// Removes the specified edge as a pred of the current node if it exists.
+    /// It also removes the current node as a successor of the specified node.
+    void removePred(const SDep &D);
+
+    /// Returns the depth of this node, which is the length of the maximum path
+    /// up to any node which has no predecessors.
+    unsigned getDepth() const {
+      if (!isDepthCurrent)
+        const_cast<SUnit *>(this)->ComputeDepth();
+      return Depth;
+    }
+
+    /// \brief Returns the height of this node, which is the length of the
+    /// maximum path down to any node which has no successors.
+    unsigned getHeight() const {
+      if (!isHeightCurrent)
+        const_cast<SUnit *>(this)->ComputeHeight();
+      return Height;
+    }
+
+    /// \brief If NewDepth is greater than this node's depth value, sets it to
+    /// be the new depth value. This also recursively marks successor nodes
+    /// dirty.
+    void setDepthToAtLeast(unsigned NewDepth);
+
+    /// \brief If NewDepth is greater than this node's depth value, set it to be
+    /// the new height value. This also recursively marks predecessor nodes
+    /// dirty.
+    void setHeightToAtLeast(unsigned NewHeight);
+
+    /// \brief Sets a flag in this node to indicate that its stored Depth value
+    /// will require recomputation the next time getDepth() is called.
+    void setDepthDirty();
+
+    /// \brief Sets a flag in this node to indicate that its stored Height value
+    /// will require recomputation the next time getHeight() is called.
+    void setHeightDirty();
+
+    /// Tests if node N is a predecessor of this node.
+    bool isPred(const SUnit *N) const {
+      for (const SDep &Pred : Preds)
+        if (Pred.getSUnit() == N)
+          return true;
+      return false;
+    }
+
+    /// Tests if node N is a successor of this node.
+    bool isSucc(const SUnit *N) const {
+      for (const SDep &Succ : Succs)
+        if (Succ.getSUnit() == N)
+          return true;
+      return false;
+    }
+
+    bool isTopReady() const {
+      return NumPredsLeft == 0;
+    }
+    bool isBottomReady() const {
+      return NumSuccsLeft == 0;
+    }
+
+    /// \brief Orders this node's predecessor edges such that the critical path
+    /// edge occurs first.
+    void biasCriticalPath();
+
+    void dump(const ScheduleDAG *G) const;
+    void dumpAll(const ScheduleDAG *G) const;
+    raw_ostream &print(raw_ostream &O,
+                       const SUnit *N = nullptr,
+                       const SUnit *X = nullptr) const;
+    raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
+
+  private:
+    void ComputeDepth();
+    void ComputeHeight();
+  };
+
+  /// Returns true if the specified SDep is equivalent except for latency.
+  inline bool SDep::overlaps(const SDep &Other) const {
+    if (Dep != Other.Dep)
+      return false;
+    switch (Dep.getInt()) {
+    case Data:
+    case Anti:
+    case Output:
+      return Contents.Reg == Other.Contents.Reg;
+    case Order:
+      return Contents.OrdKind == Other.Contents.OrdKind;
+    }
+    llvm_unreachable("Invalid dependency kind!");
+  }
+
+  //// Returns the SUnit to which this edge points.
+  inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
+
+  //// Assigns the SUnit to which this edge points.
+  inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
+
+  /// Returns an enum value representing the kind of the dependence.
+  inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
+
+  //===--------------------------------------------------------------------===//
+
+  /// \brief This interface is used to plug different priorities computation
+  /// algorithms into the list scheduler. It implements the interface of a
+  /// standard priority queue, where nodes are inserted in arbitrary order and
+  /// returned in priority order.  The computation of the priority and the
+  /// representation of the queue are totally up to the implementation to
+  /// decide.
+  class SchedulingPriorityQueue {
+    virtual void anchor();
+
+    unsigned CurCycle = 0;
+    bool HasReadyFilter;
+
+  public:
+    SchedulingPriorityQueue(bool rf = false) :  HasReadyFilter(rf) {}
+
+    virtual ~SchedulingPriorityQueue() = default;
+
+    virtual bool isBottomUp() const = 0;
+
+    virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
+    virtual void addNode(const SUnit *SU) = 0;
+    virtual void updateNode(const SUnit *SU) = 0;
+    virtual void releaseState() = 0;
+
+    virtual bool empty() const = 0;
+
+    bool hasReadyFilter() const { return HasReadyFilter; }
+
+    virtual bool tracksRegPressure() const { return false; }
+
+    virtual bool isReady(SUnit *) const {
+      assert(!HasReadyFilter && "The ready filter must override isReady()");
+      return true;
+    }
+
+    virtual void push(SUnit *U) = 0;
+
+    void push_all(const std::vector<SUnit *> &Nodes) {
+      for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
+           E = Nodes.end(); I != E; ++I)
+        push(*I);
+    }
+
+    virtual SUnit *pop() = 0;
+
+    virtual void remove(SUnit *SU) = 0;
+
+    virtual void dump(ScheduleDAG *) const {}
+
+    /// As each node is scheduled, this method is invoked.  This allows the
+    /// priority function to adjust the priority of related unscheduled nodes,
+    /// for example.
+    virtual void scheduledNode(SUnit *) {}
+
+    virtual void unscheduledNode(SUnit *) {}
+
+    void setCurCycle(unsigned Cycle) {
+      CurCycle = Cycle;
+    }
+
+    unsigned getCurCycle() const {
+      return CurCycle;
+    }
+  };
+
+  class ScheduleDAG {
+  public:
+    const TargetMachine &TM;            ///< Target processor
+    const TargetInstrInfo *TII;         ///< Target instruction information
+    const TargetRegisterInfo *TRI;      ///< Target processor register info
+    MachineFunction &MF;                ///< Machine function
+    MachineRegisterInfo &MRI;           ///< Virtual/real register map
+    std::vector<SUnit> SUnits;          ///< The scheduling units.
+    SUnit EntrySU;                      ///< Special node for the region entry.
+    SUnit ExitSU;                       ///< Special node for the region exit.
+
+#ifdef NDEBUG
+    static const bool StressSched = false;
+#else
+    bool StressSched;
+#endif
+
+    explicit ScheduleDAG(MachineFunction &mf);
+
+    virtual ~ScheduleDAG();
+
+    /// Clears the DAG state (between regions).
+    void clearDAG();
+
+    /// Returns the MCInstrDesc of this SUnit.
+    /// Returns NULL for SDNodes without a machine opcode.
+    const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
+      if (SU->isInstr()) return &SU->getInstr()->getDesc();
+      return getNodeDesc(SU->getNode());
+    }
+
+    /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
+    virtual void viewGraph(const Twine &Name, const Twine &Title);
+    virtual void viewGraph();
+
+    virtual void dumpNode(const SUnit *SU) const = 0;
+
+    /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
+    virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
+
+    /// Returns a label for the region of code covered by the DAG.
+    virtual std::string getDAGName() const = 0;
+
+    /// Adds custom features for a visualization of the ScheduleDAG.
+    virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
+
+#ifndef NDEBUG
+    /// \brief Verifies that all SUnits were scheduled and that their state is
+    /// consistent. Returns the number of scheduled SUnits.
+    unsigned VerifyScheduledDAG(bool isBottomUp);
+#endif
+
+  private:
+    /// Returns the MCInstrDesc of this SDNode or NULL.
+    const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
+  };
+
+  class SUnitIterator : public std::iterator<std::forward_iterator_tag,
+                                             SUnit, ptrdiff_t> {
+    SUnit *Node;
+    unsigned Operand;
+
+    SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
+
+  public:
+    bool operator==(const SUnitIterator& x) const {
+      return Operand == x.Operand;
+    }
+    bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
+
+    pointer operator*() const {
+      return Node->Preds[Operand].getSUnit();
+    }
+    pointer operator->() const { return operator*(); }
+
+    SUnitIterator& operator++() {                // Preincrement
+      ++Operand;
+      return *this;
+    }
+    SUnitIterator operator++(int) { // Postincrement
+      SUnitIterator tmp = *this; ++*this; return tmp;
+    }
+
+    static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
+    static SUnitIterator end  (SUnit *N) {
+      return SUnitIterator(N, (unsigned)N->Preds.size());
+    }
+
+    unsigned getOperand() const { return Operand; }
+    const SUnit *getNode() const { return Node; }
+
+    /// Tests if this is not an SDep::Data dependence.
+    bool isCtrlDep() const {
+      return getSDep().isCtrl();
+    }
+    bool isArtificialDep() const {
+      return getSDep().isArtificial();
+    }
+    const SDep &getSDep() const {
+      return Node->Preds[Operand];
+    }
+  };
+
+  template <> struct GraphTraits<SUnit*> {
+    typedef SUnit *NodeRef;
+    typedef SUnitIterator ChildIteratorType;
+    static NodeRef getEntryNode(SUnit *N) { return N; }
+    static ChildIteratorType child_begin(NodeRef N) {
+      return SUnitIterator::begin(N);
+    }
+    static ChildIteratorType child_end(NodeRef N) {
+      return SUnitIterator::end(N);
+    }
+  };
+
+  template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
+    typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
+    static nodes_iterator nodes_begin(ScheduleDAG *G) {
+      return nodes_iterator(G->SUnits.begin());
+    }
+    static nodes_iterator nodes_end(ScheduleDAG *G) {
+      return nodes_iterator(G->SUnits.end());
+    }
+  };
+
+  /// This class can compute a topological ordering for SUnits and provides
+  /// methods for dynamically updating the ordering as new edges are added.
+  ///
+  /// This allows a very fast implementation of IsReachable, for example.
+  class ScheduleDAGTopologicalSort {
+    /// A reference to the ScheduleDAG's SUnits.
+    std::vector<SUnit> &SUnits;
+    SUnit *ExitSU;
+
+    /// Maps topological index to the node number.
+    std::vector<int> Index2Node;
+    /// Maps the node number to its topological index.
+    std::vector<int> Node2Index;
+    /// a set of nodes visited during a DFS traversal.
+    BitVector Visited;
+
+    /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
+    /// These nodes will later get new topological indexes by means of the Shift
+    /// method.
+    void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
+
+    /// \brief Reassigns topological indexes for the nodes in the DAG to
+    /// preserve the topological ordering.
+    void Shift(BitVector& Visited, int LowerBound, int UpperBound);
+
+    /// Assigns the topological index to the node n.
+    void Allocate(int n, int index);
+
+  public:
+    ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
+
+    /// Creates the initial topological ordering from the DAG to be scheduled.
+    void InitDAGTopologicalSorting();
+
+    /// Returns an array of SUs that are both in the successor
+    /// subtree of StartSU and in the predecessor subtree of TargetSU.
+    /// StartSU and TargetSU are not in the array.
+    /// Success is false if TargetSU is not in the successor subtree of
+    /// StartSU, else it is true.
+    std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
+                                 bool &Success);
+
+    /// Checks if \p SU is reachable from \p TargetSU.
+    bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
+
+    /// Returns true if addPred(TargetSU, SU) creates a cycle.
+    bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
+
+    /// \brief Updates the topological ordering to accommodate an edge to be
+    /// added from SUnit \p X to SUnit \p Y.
+    void AddPred(SUnit *Y, SUnit *X);
+
+    /// \brief Updates the topological ordering to accommodate an an edge to be
+    /// removed from the specified node \p N from the predecessors of the
+    /// current node \p M.
+    void RemovePred(SUnit *M, SUnit *N);
+
+    typedef std::vector<int>::iterator iterator;
+    typedef std::vector<int>::const_iterator const_iterator;
+    iterator begin() { return Index2Node.begin(); }
+    const_iterator begin() const { return Index2Node.begin(); }
+    iterator end() { return Index2Node.end(); }
+    const_iterator end() const { return Index2Node.end(); }
+
+    typedef std::vector<int>::reverse_iterator reverse_iterator;
+    typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
+    reverse_iterator rbegin() { return Index2Node.rbegin(); }
+    const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
+    reverse_iterator rend() { return Index2Node.rend(); }
+    const_reverse_iterator rend() const { return Index2Node.rend(); }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDAG_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
new file mode 100644
index 0000000..1488220
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -0,0 +1,384 @@
+//===- ScheduleDAGInstrs.h - MachineInstr Scheduling ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Implements the ScheduleDAGInstrs class, which implements scheduling
+/// for a MachineInstr-based dependency graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
+#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseMultiSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstdint>
+#include <list>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+  class LiveIntervals;
+  class MachineFrameInfo;
+  class MachineFunction;
+  class MachineInstr;
+  class MachineLoopInfo;
+  class MachineOperand;
+  struct MCSchedClassDesc;
+  class PressureDiffs;
+  class PseudoSourceValue;
+  class RegPressureTracker;
+  class UndefValue;
+  class Value;
+
+  /// An individual mapping from virtual register number to SUnit.
+  struct VReg2SUnit {
+    unsigned VirtReg;
+    LaneBitmask LaneMask;
+    SUnit *SU;
+
+    VReg2SUnit(unsigned VReg, LaneBitmask LaneMask, SUnit *SU)
+      : VirtReg(VReg), LaneMask(LaneMask), SU(SU) {}
+
+    unsigned getSparseSetIndex() const {
+      return TargetRegisterInfo::virtReg2Index(VirtReg);
+    }
+  };
+
+  /// Mapping from virtual register to SUnit including an operand index.
+  struct VReg2SUnitOperIdx : public VReg2SUnit {
+    unsigned OperandIndex;
+
+    VReg2SUnitOperIdx(unsigned VReg, LaneBitmask LaneMask,
+                      unsigned OperandIndex, SUnit *SU)
+      : VReg2SUnit(VReg, LaneMask, SU), OperandIndex(OperandIndex) {}
+  };
+
+  /// Record a physical register access.
+  /// For non-data-dependent uses, OpIdx == -1.
+  struct PhysRegSUOper {
+    SUnit *SU;
+    int OpIdx;
+    unsigned Reg;
+
+    PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}
+
+    unsigned getSparseSetIndex() const { return Reg; }
+  };
+
+  /// Use a SparseMultiSet to track physical registers. Storage is only
+  /// allocated once for the pass. It can be cleared in constant time and reused
+  /// without any frees.
+  using Reg2SUnitsMap =
+      SparseMultiSet<PhysRegSUOper, identity<unsigned>, uint16_t>;
+
+  /// Use SparseSet as a SparseMap by relying on the fact that it never
+  /// compares ValueT's, only unsigned keys. This allows the set to be cleared
+  /// between scheduling regions in constant time as long as ValueT does not
+  /// require a destructor.
+  using VReg2SUnitMap = SparseSet<VReg2SUnit, VirtReg2IndexFunctor>;
+
+  /// Track local uses of virtual registers. These uses are gathered by the DAG
+  /// builder and may be consulted by the scheduler to avoid iterating an entire
+  /// vreg use list.
+  using VReg2SUnitMultiMap = SparseMultiSet<VReg2SUnit, VirtReg2IndexFunctor>;
+
+  using VReg2SUnitOperIdxMultiMap =
+      SparseMultiSet<VReg2SUnitOperIdx, VirtReg2IndexFunctor>;
+
+  using ValueType = PointerUnion<const Value *, const PseudoSourceValue *>;
+
+  struct UnderlyingObject : PointerIntPair<ValueType, 1, bool> {
+    UnderlyingObject(ValueType V, bool MayAlias)
+        : PointerIntPair<ValueType, 1, bool>(V, MayAlias) {}
+
+    ValueType getValue() const { return getPointer(); }
+    bool mayAlias() const { return getInt(); }
+  };
+
+  using UnderlyingObjectsVector = SmallVector<UnderlyingObject, 4>;
+
+  /// A ScheduleDAG for scheduling lists of MachineInstr.
+  class ScheduleDAGInstrs : public ScheduleDAG {
+  protected:
+    const MachineLoopInfo *MLI;
+    const MachineFrameInfo &MFI;
+
+    /// TargetSchedModel provides an interface to the machine model.
+    TargetSchedModel SchedModel;
+
+    /// True if the DAG builder should remove kill flags (in preparation for
+    /// rescheduling).
+    bool RemoveKillFlags;
+
+    /// The standard DAG builder does not normally include terminators as DAG
+    /// nodes because it does not create the necessary dependencies to prevent
+    /// reordering. A specialized scheduler can override
+    /// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
+    /// it has taken responsibility for scheduling the terminator correctly.
+    bool CanHandleTerminators = false;
+
+    /// Whether lane masks should get tracked.
+    bool TrackLaneMasks = false;
+
+    // State specific to the current scheduling region.
+    // ------------------------------------------------
+
+    /// The block in which to insert instructions
+    MachineBasicBlock *BB;
+
+    /// The beginning of the range to be scheduled.
+    MachineBasicBlock::iterator RegionBegin;
+
+    /// The end of the range to be scheduled.
+    MachineBasicBlock::iterator RegionEnd;
+
+    /// Instructions in this region (distance(RegionBegin, RegionEnd)).
+    unsigned NumRegionInstrs;
+
+    /// After calling BuildSchedGraph, each machine instruction in the current
+    /// scheduling region is mapped to an SUnit.
+    DenseMap<MachineInstr*, SUnit*> MISUnitMap;
+
+    // State internal to DAG building.
+    // -------------------------------
+
+    /// Defs, Uses - Remember where defs and uses of each register are as we
+    /// iterate upward through the instructions. This is allocated here instead
+    /// of inside BuildSchedGraph to avoid the need for it to be initialized and
+    /// destructed for each block.
+    Reg2SUnitsMap Defs;
+    Reg2SUnitsMap Uses;
+
+    /// Tracks the last instruction(s) in this region defining each virtual
+    /// register. There may be multiple current definitions for a register with
+    /// disjunct lanemasks.
+    VReg2SUnitMultiMap CurrentVRegDefs;
+    /// Tracks the last instructions in this region using each virtual register.
+    VReg2SUnitOperIdxMultiMap CurrentVRegUses;
+
+    AliasAnalysis *AAForDep = nullptr;
+
+    /// Remember a generic side-effecting instruction as we proceed.
+    /// No other SU ever gets scheduled around it (except in the special
+    /// case of a huge region that gets reduced).
+    SUnit *BarrierChain = nullptr;
+
+  public:
+    /// A list of SUnits, used in Value2SUsMap, during DAG construction.
+    /// Note: to gain speed it might be worth investigating an optimized
+    /// implementation of this data structure, such as a singly linked list
+    /// with a memory pool (SmallVector was tried but slow and SparseSet is not
+    /// applicable).
+    using SUList = std::list<SUnit *>;
+
+  protected:
+    /// \brief A map from ValueType to SUList, used during DAG construction, as
+    /// a means of remembering which SUs depend on which memory locations.
+    class Value2SUsMap;
+
+    /// Reduces maps in FIFO order, by N SUs. This is better than turning
+    /// every Nth memory SU into BarrierChain in buildSchedGraph(), since
+    /// it avoids unnecessary edges between seen SUs above the new BarrierChain,
+    /// and those below it.
+    void reduceHugeMemNodeMaps(Value2SUsMap &stores,
+                               Value2SUsMap &loads, unsigned N);
+
+    /// \brief Adds a chain edge between SUa and SUb, but only if both
+    /// AliasAnalysis and Target fail to deny the dependency.
+    void addChainDependency(SUnit *SUa, SUnit *SUb,
+                            unsigned Latency = 0);
+
+    /// Adds dependencies as needed from all SUs in list to SU.
+    void addChainDependencies(SUnit *SU, SUList &SUs, unsigned Latency) {
+      for (SUnit *Entry : SUs)
+        addChainDependency(SU, Entry, Latency);
+    }
+
+    /// Adds dependencies as needed from all SUs in map, to SU.
+    void addChainDependencies(SUnit *SU, Value2SUsMap &Val2SUsMap);
+
+    /// Adds dependencies as needed to SU, from all SUs mapped to V.
+    void addChainDependencies(SUnit *SU, Value2SUsMap &Val2SUsMap,
+                              ValueType V);
+
+    /// Adds barrier chain edges from all SUs in map, and then clear the map.
+    /// This is equivalent to insertBarrierChain(), but optimized for the common
+    /// case where the new BarrierChain (a global memory object) has a higher
+    /// NodeNum than all SUs in map. It is assumed BarrierChain has been set
+    /// before calling this.
+    void addBarrierChain(Value2SUsMap &map);
+
+    /// Inserts a barrier chain in a huge region, far below current SU.
+    /// Adds barrier chain edges from all SUs in map with higher NodeNums than
+    /// this new BarrierChain, and remove them from map. It is assumed
+    /// BarrierChain has been set before calling this.
+    void insertBarrierChain(Value2SUsMap &map);
+
+    /// For an unanalyzable memory access, this Value is used in maps.
+    UndefValue *UnknownValue;
+
+    using DbgValueVector =
+        std::vector<std::pair<MachineInstr *, MachineInstr *>>;
+    /// Remember instruction that precedes DBG_VALUE.
+    /// These are generated by buildSchedGraph but persist so they can be
+    /// referenced when emitting the final schedule.
+    DbgValueVector DbgValues;
+    MachineInstr *FirstDbgValue = nullptr;
+
+    /// Set of live physical registers for updating kill flags.
+    LivePhysRegs LiveRegs;
+
+  public:
+    explicit ScheduleDAGInstrs(MachineFunction &mf,
+                               const MachineLoopInfo *mli,
+                               bool RemoveKillFlags = false);
+
+    ~ScheduleDAGInstrs() override = default;
+
+    /// Gets the machine model for instruction scheduling.
+    const TargetSchedModel *getSchedModel() const { return &SchedModel; }
+
+    /// Resolves and cache a resolved scheduling class for an SUnit.
+    const MCSchedClassDesc *getSchedClass(SUnit *SU) const {
+      if (!SU->SchedClass && SchedModel.hasInstrSchedModel())
+        SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr());
+      return SU->SchedClass;
+    }
+
+    /// Returns an iterator to the top of the current scheduling region.
+    MachineBasicBlock::iterator begin() const { return RegionBegin; }
+
+    /// Returns an iterator to the bottom of the current scheduling region.
+    MachineBasicBlock::iterator end() const { return RegionEnd; }
+
+    /// Creates a new SUnit and return a ptr to it.
+    SUnit *newSUnit(MachineInstr *MI);
+
+    /// Returns an existing SUnit for this MI, or nullptr.
+    SUnit *getSUnit(MachineInstr *MI) const;
+
+    /// If this method returns true, handling of the scheduling regions
+    /// themselves (in case of a scheduling boundary in MBB) will be done
+    /// beginning with the topmost region of MBB.
+    virtual bool doMBBSchedRegionsTopDown() const { return false; }
+
+    /// Prepares to perform scheduling in the given block.
+    virtual void startBlock(MachineBasicBlock *BB);
+
+    /// Cleans up after scheduling in the given block.
+    virtual void finishBlock();
+
+    /// \brief Initialize the DAG and common scheduler state for a new
+    /// scheduling region. This does not actually create the DAG, only clears
+    /// it. The scheduling driver may call BuildSchedGraph multiple times per
+    /// scheduling region.
+    virtual void enterRegion(MachineBasicBlock *bb,
+                             MachineBasicBlock::iterator begin,
+                             MachineBasicBlock::iterator end,
+                             unsigned regioninstrs);
+
+    /// Called when the scheduler has finished scheduling the current region.
+    virtual void exitRegion();
+
+    /// Builds SUnits for the current region.
+    /// If \p RPTracker is non-null, compute register pressure as a side effect.
+    /// The DAG builder is an efficient place to do it because it already visits
+    /// operands.
+    void buildSchedGraph(AliasAnalysis *AA,
+                         RegPressureTracker *RPTracker = nullptr,
+                         PressureDiffs *PDiffs = nullptr,
+                         LiveIntervals *LIS = nullptr,
+                         bool TrackLaneMasks = false);
+
+    /// \brief Adds dependencies from instructions in the current list of
+    /// instructions being scheduled to scheduling barrier. We want to make sure
+    /// instructions which define registers that are either used by the
+    /// terminator or are live-out are properly scheduled. This is especially
+    /// important when the definition latency of the return value(s) are too
+    /// high to be hidden by the branch or when the liveout registers used by
+    /// instructions in the fallthrough block.
+    void addSchedBarrierDeps();
+
+    /// Orders nodes according to selected style.
+    ///
+    /// Typically, a scheduling algorithm will implement schedule() without
+    /// overriding enterRegion() or exitRegion().
+    virtual void schedule() = 0;
+
+    /// Allow targets to perform final scheduling actions at the level of the
+    /// whole MachineFunction. By default does nothing.
+    virtual void finalizeSchedule() {}
+
+    void dumpNode(const SUnit *SU) const override;
+
+    /// Returns a label for a DAG node that points to an instruction.
+    std::string getGraphNodeLabel(const SUnit *SU) const override;
+
+    /// Returns a label for the region of code covered by the DAG.
+    std::string getDAGName() const override;
+
+    /// Fixes register kill flags that scheduling has made invalid.
+    void fixupKills(MachineBasicBlock &MBB);
+
+  protected:
+    void initSUnits();
+    void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
+    void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
+    void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
+    void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
+
+    /// Initializes register live-range state for updating kills.
+    /// PostRA helper for rewriting kill flags.
+    void startBlockForKills(MachineBasicBlock *BB);
+
+    /// Toggles a register operand kill flag.
+    ///
+    /// Other adjustments may be made to the instruction if necessary. Return
+    /// true if the operand has been deleted, false if not.
+    void toggleKillFlag(MachineInstr &MI, MachineOperand &MO);
+
+    /// Returns a mask for which lanes get read/written by the given (register)
+    /// machine operand.
+    LaneBitmask getLaneMaskForMO(const MachineOperand &MO) const;
+  };
+
+  /// Creates a new SUnit and return a ptr to it.
+  inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
+#ifndef NDEBUG
+    const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
+#endif
+    SUnits.emplace_back(MI, (unsigned)SUnits.size());
+    assert((Addr == nullptr || Addr == &SUnits[0]) &&
+           "SUnits std::vector reallocated on the fly!");
+    return &SUnits.back();
+  }
+
+  /// Returns an existing SUnit for this MI, or nullptr.
+  inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
+    DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
+    if (I == MISUnitMap.end())
+      return nullptr;
+    return I->second;
+  }
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGMutation.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGMutation.h
new file mode 100644
index 0000000..5c23642
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGMutation.h
@@ -0,0 +1,34 @@
+//===- ScheduleDAGMutation.h - MachineInstr Scheduling ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAGMutation class, which represents
+// a target-specific mutation of the dependency graph for scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
+#define LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
+
+namespace llvm {
+
+class ScheduleDAGInstrs;
+
+/// Mutate the DAG as a postpass after normal DAG building.
+class ScheduleDAGMutation {
+  virtual void anchor();
+
+public:
+  virtual ~ScheduleDAGMutation() = default;
+
+  virtual void apply(ScheduleDAGInstrs *DAG) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
new file mode 100644
index 0000000..d6a8c79
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
@@ -0,0 +1,194 @@
+//===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of an ILP metric for machine level instruction scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDFS_H
+#define LLVM_CODEGEN_SCHEDULEDFS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// \brief Represent the ILP of the subDAG rooted at a DAG node.
+///
+/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
+/// valid for all nodes regardless of their subtree membership.
+///
+/// When computed using bottom-up DFS, this metric assumes that the DAG is a
+/// forest of trees with roots at the bottom of the schedule branching upward.
+struct ILPValue {
+  unsigned InstrCount;
+  /// Length may either correspond to depth or height, depending on direction,
+  /// and cycles or nodes depending on context.
+  unsigned Length;
+
+  ILPValue(unsigned count, unsigned length):
+    InstrCount(count), Length(length) {}
+
+  // Order by the ILP metric's value.
+  bool operator<(ILPValue RHS) const {
+    return (uint64_t)InstrCount * RHS.Length
+      < (uint64_t)Length * RHS.InstrCount;
+  }
+  bool operator>(ILPValue RHS) const {
+    return RHS < *this;
+  }
+  bool operator<=(ILPValue RHS) const {
+    return (uint64_t)InstrCount * RHS.Length
+      <= (uint64_t)Length * RHS.InstrCount;
+  }
+  bool operator>=(ILPValue RHS) const {
+    return RHS <= *this;
+  }
+
+  void print(raw_ostream &OS) const;
+
+  void dump() const;
+};
+
+/// \brief Compute the values of each DAG node for various metrics during DFS.
+class SchedDFSResult {
+  friend class SchedDFSImpl;
+
+  static const unsigned InvalidSubtreeID = ~0u;
+
+  /// \brief Per-SUnit data computed during DFS for various metrics.
+  ///
+  /// A node's SubtreeID is set to itself when it is visited to indicate that it
+  /// is the root of a subtree. Later it is set to its parent to indicate an
+  /// interior node. Finally, it is set to a representative subtree ID during
+  /// finalization.
+  struct NodeData {
+    unsigned InstrCount = 0;
+    unsigned SubtreeID = InvalidSubtreeID;
+
+    NodeData() = default;
+  };
+
+  /// \brief Per-Subtree data computed during DFS.
+  struct TreeData {
+    unsigned ParentTreeID = InvalidSubtreeID;
+    unsigned SubInstrCount = 0;
+
+    TreeData() = default;
+  };
+
+  /// \brief Record a connection between subtrees and the connection level.
+  struct Connection {
+    unsigned TreeID;
+    unsigned Level;
+
+    Connection(unsigned tree, unsigned level): TreeID(tree), Level(level) {}
+  };
+
+  bool IsBottomUp;
+  unsigned SubtreeLimit;
+  /// DFS results for each SUnit in this DAG.
+  std::vector<NodeData> DFSNodeData;
+
+  // Store per-tree data indexed on tree ID,
+  SmallVector<TreeData, 16> DFSTreeData;
+
+  // For each subtree discovered during DFS, record its connections to other
+  // subtrees.
+  std::vector<SmallVector<Connection, 4>> SubtreeConnections;
+
+  /// Cache the current connection level of each subtree.
+  /// This mutable array is updated during scheduling.
+  std::vector<unsigned> SubtreeConnectLevels;
+
+public:
+  SchedDFSResult(bool IsBU, unsigned lim)
+    : IsBottomUp(IsBU), SubtreeLimit(lim) {}
+
+  /// \brief Get the node cutoff before subtrees are considered significant.
+  unsigned getSubtreeLimit() const { return SubtreeLimit; }
+
+  /// \brief Return true if this DFSResult is uninitialized.
+  ///
+  /// resize() initializes DFSResult, while compute() populates it.
+  bool empty() const { return DFSNodeData.empty(); }
+
+  /// \brief Clear the results.
+  void clear() {
+    DFSNodeData.clear();
+    DFSTreeData.clear();
+    SubtreeConnections.clear();
+    SubtreeConnectLevels.clear();
+  }
+
+  /// \brief Initialize the result data with the size of the DAG.
+  void resize(unsigned NumSUnits) {
+    DFSNodeData.resize(NumSUnits);
+  }
+
+  /// \brief Compute various metrics for the DAG with given roots.
+  void compute(ArrayRef<SUnit> SUnits);
+
+  /// \brief Get the number of instructions in the given subtree and its
+  /// children.
+  unsigned getNumInstrs(const SUnit *SU) const {
+    return DFSNodeData[SU->NodeNum].InstrCount;
+  }
+
+  /// \brief Get the number of instructions in the given subtree not including
+  /// children.
+  unsigned getNumSubInstrs(unsigned SubtreeID) const {
+    return DFSTreeData[SubtreeID].SubInstrCount;
+  }
+
+  /// \brief Get the ILP value for a DAG node.
+  ///
+  /// A leaf node has an ILP of 1/1.
+  ILPValue getILP(const SUnit *SU) const {
+    return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
+  }
+
+  /// \brief The number of subtrees detected in this DAG.
+  unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }
+
+  /// \brief Get the ID of the subtree the given DAG node belongs to.
+  ///
+  /// For convenience, if DFSResults have not been computed yet, give everything
+  /// tree ID 0.
+  unsigned getSubtreeID(const SUnit *SU) const {
+    if (empty())
+      return 0;
+    assert(SU->NodeNum < DFSNodeData.size() &&  "New Node");
+    return DFSNodeData[SU->NodeNum].SubtreeID;
+  }
+
+  /// \brief Get the connection level of a subtree.
+  ///
+  /// For bottom-up trees, the connection level is the latency depth (in cycles)
+  /// of the deepest connection to another subtree.
+  unsigned getSubtreeLevel(unsigned SubtreeID) const {
+    return SubtreeConnectLevels[SubtreeID];
+  }
+
+  /// \brief Scheduler callback to update SubtreeConnectLevels when a tree is
+  /// initially scheduled.
+  void scheduleTree(unsigned SubtreeID);
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDFS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h
new file mode 100644
index 0000000..ace4a2d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -0,0 +1,122 @@
+//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleHazardRecognizer class, which implements
+// hazard-avoidance heuristics for scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+
+namespace llvm {
+
+class MachineInstr;
+class SUnit;
+
+/// HazardRecognizer - This determines whether or not an instruction can be
+/// issued this cycle, and whether or not a noop needs to be inserted to handle
+/// the hazard.
+class ScheduleHazardRecognizer {
+protected:
+  /// MaxLookAhead - Indicate the number of cycles in the scoreboard
+  /// state. Important to restore the state after backtracking. Additionally,
+  /// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
+  /// bypass virtual calls. Currently the PostRA scheduler ignores it.
+  unsigned MaxLookAhead = 0;
+
+public:
+  ScheduleHazardRecognizer() = default;
+  virtual ~ScheduleHazardRecognizer();
+
+  enum HazardType {
+    NoHazard,      // This instruction can be emitted at this cycle.
+    Hazard,        // This instruction can't be emitted at this cycle.
+    NoopHazard     // This instruction can't be emitted, and needs noops.
+  };
+
+  unsigned getMaxLookAhead() const { return MaxLookAhead; }
+
+  bool isEnabled() const { return MaxLookAhead != 0; }
+
+  /// atIssueLimit - Return true if no more instructions may be issued in this
+  /// cycle.
+  ///
+  /// FIXME: remove this once MachineScheduler is the only client.
+  virtual bool atIssueLimit() const { return false; }
+
+  /// getHazardType - Return the hazard type of emitting this node.  There are
+  /// three possible results.  Either:
+  ///  * NoHazard: it is legal to issue this instruction on this cycle.
+  ///  * Hazard: issuing this instruction would stall the machine.  If some
+  ///     other instruction is available, issue it first.
+  ///  * NoopHazard: issuing this instruction would break the program.  If
+  ///     some other instruction can be issued, do so, otherwise issue a noop.
+  virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
+    return NoHazard;
+  }
+
+  /// Reset - This callback is invoked when a new block of
+  /// instructions is about to be schedule. The hazard state should be
+  /// set to an initialized state.
+  virtual void Reset() {}
+
+  /// EmitInstruction - This callback is invoked when an instruction is
+  /// emitted, to advance the hazard state.
+  virtual void EmitInstruction(SUnit *) {}
+
+  /// This overload will be used when the hazard recognizer is being used
+  /// by a non-scheduling pass, which does not use SUnits.
+  virtual void EmitInstruction(MachineInstr *) {}
+
+  /// PreEmitNoops - This callback is invoked prior to emitting an instruction.
+  /// It should return the number of noops to emit prior to the provided
+  /// instruction.
+  /// Note: This is only used during PostRA scheduling. EmitNoop is not called
+  /// for these noops.
+  virtual unsigned PreEmitNoops(SUnit *) {
+    return 0;
+  }
+
+  /// This overload will be used when the hazard recognizer is being used
+  /// by a non-scheduling pass, which does not use SUnits.
+  virtual unsigned PreEmitNoops(MachineInstr *) {
+    return 0;
+  }
+
+  /// ShouldPreferAnother - This callback may be invoked if getHazardType
+  /// returns NoHazard. If, even though there is no hazard, it would be better to
+  /// schedule another available instruction, this callback should return true.
+  virtual bool ShouldPreferAnother(SUnit *) {
+    return false;
+  }
+
+  /// AdvanceCycle - This callback is invoked whenever the next top-down
+  /// instruction to be scheduled cannot issue in the current cycle, either
+  /// because of latency or resource conflicts.  This should increment the
+  /// internal state of the hazard recognizer so that previously "Hazard"
+  /// instructions will now not be hazards.
+  virtual void AdvanceCycle() {}
+
+  /// RecedeCycle - This callback is invoked whenever the next bottom-up
+  /// instruction to be scheduled cannot issue in the current cycle, either
+  /// because of latency or resource conflicts.
+  virtual void RecedeCycle() {}
+
+  /// EmitNoop - This callback is invoked when a noop was added to the
+  /// instruction stream.
+  virtual void EmitNoop() {
+    // Default implementation: count it as a cycle.
+    AdvanceCycle();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SchedulerRegistry.h b/linux-x64/clang/include/llvm/CodeGen/SchedulerRegistry.h
new file mode 100644
index 0000000..badf927
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SchedulerRegistry.h
@@ -0,0 +1,105 @@
+//===- llvm/CodeGen/SchedulerRegistry.h -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for instruction scheduler function
+// pass registry (RegisterScheduler).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULERREGISTRY_H
+#define LLVM_CODEGEN_SCHEDULERREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterScheduler class - Track the registration of instruction schedulers.
+///
+//===----------------------------------------------------------------------===//
+
+class ScheduleDAGSDNodes;
+class SelectionDAGISel;
+
+class RegisterScheduler : public MachinePassRegistryNode {
+public:
+  using FunctionPassCtor = ScheduleDAGSDNodes *(*)(SelectionDAGISel*,
+                                                   CodeGenOpt::Level);
+
+  static MachinePassRegistry Registry;
+
+  RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
+  : MachinePassRegistryNode(N, D, (MachinePassCtor)C)
+  { Registry.Add(this); }
+  ~RegisterScheduler() { Registry.Remove(this); }
+
+
+  // Accessors.
+  RegisterScheduler *getNext() const {
+    return (RegisterScheduler *)MachinePassRegistryNode::getNext();
+  }
+
+  static RegisterScheduler *getList() {
+    return (RegisterScheduler *)Registry.getList();
+  }
+
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+/// createBURRListDAGScheduler - This creates a bottom up register usage
+/// reduction list scheduler.
+ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
+                                               CodeGenOpt::Level OptLevel);
+
+/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
+/// schedules nodes in source code order when possible.
+ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
+                                                 CodeGenOpt::Level OptLevel);
+
+/// createHybridListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that make use of latency information to avoid stalls
+/// for long latency instructions in low register pressure mode. In high
+/// register pressure mode it schedules to reduce register pressure.
+ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
+                                                 CodeGenOpt::Level);
+
+/// createILPListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that tries to increase instruction level parallelism
+/// in low register pressure mode. In high register pressure mode it schedules
+/// to reduce register pressure.
+ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
+                                              CodeGenOpt::Level);
+
+/// createFastDAGScheduler - This creates a "fast" scheduler.
+///
+ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
+                                           CodeGenOpt::Level OptLevel);
+
+/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
+/// DFA driven list scheduler with clustering heuristic to control
+/// register pressure.
+ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
+                                           CodeGenOpt::Level OptLevel);
+/// createDefaultScheduler - This creates an instruction scheduler appropriate
+/// for the target.
+ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
+                                           CodeGenOpt::Level OptLevel);
+
+/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
+/// linearize the DAG using topological order.
+ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
+                                        CodeGenOpt::Level OptLevel);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULERREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h b/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
new file mode 100644
index 0000000..466ab53
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
@@ -0,0 +1,128 @@
+//=- llvm/CodeGen/ScoreboardHazardRecognizer.h - Schedule Support -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ScoreboardHazardRecognizer class, which
+// encapsulates hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+
+namespace llvm {
+
+class InstrItineraryData;
+class ScheduleDAG;
+class SUnit;
+
+class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
+  // Scoreboard to track function unit usage. Scoreboard[0] is a
+  // mask of the FUs in use in the cycle currently being
+  // schedule. Scoreboard[1] is a mask for the next cycle. The
+  // Scoreboard is used as a circular buffer with the current cycle
+  // indicated by Head.
+  //
+  // Scoreboard always counts cycles in forward execution order. If used by a
+  // bottom-up scheduler, then the scoreboard cycles are the inverse of the
+  // scheduler's cycles.
+  class Scoreboard {
+    unsigned *Data = nullptr;
+
+    // The maximum number of cycles monitored by the Scoreboard. This
+    // value is determined based on the target itineraries to ensure
+    // that all hazards can be tracked.
+    size_t Depth = 0;
+
+    // Indices into the Scoreboard that represent the current cycle.
+    size_t Head = 0;
+
+  public:
+    Scoreboard() = default;
+
+    ~Scoreboard() {
+      delete[] Data;
+    }
+
+    size_t getDepth() const { return Depth; }
+
+    unsigned& operator[](size_t idx) const {
+      // Depth is expected to be a power-of-2.
+      assert(Depth && !(Depth & (Depth - 1)) &&
+             "Scoreboard was not initialized properly!");
+
+      return Data[(Head + idx) & (Depth-1)];
+    }
+
+    void reset(size_t d = 1) {
+      if (!Data) {
+        Depth = d;
+        Data = new unsigned[Depth];
+      }
+
+      memset(Data, 0, Depth * sizeof(Data[0]));
+      Head = 0;
+    }
+
+    void advance() {
+      Head = (Head + 1) & (Depth-1);
+    }
+
+    void recede() {
+      Head = (Head - 1) & (Depth-1);
+    }
+
+    // Print the scoreboard.
+    void dump() const;
+  };
+
+  // Support for tracing ScoreboardHazardRecognizer as a component within
+  // another module.
+  const char *DebugType;
+
+  // Itinerary data for the target.
+  const InstrItineraryData *ItinData;
+
+  const ScheduleDAG *DAG;
+
+  /// IssueWidth - Max issue per cycle. 0=Unknown.
+  unsigned IssueWidth = 0;
+
+  /// IssueCount - Count instructions issued in this cycle.
+  unsigned IssueCount = 0;
+
+  Scoreboard ReservedScoreboard;
+  Scoreboard RequiredScoreboard;
+
+public:
+  ScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+                             const ScheduleDAG *DAG,
+                             const char *ParentDebugType = "");
+
+  /// atIssueLimit - Return true if no more instructions may be issued in this
+  /// cycle.
+  bool atIssueLimit() const override;
+
+  // Stalls provides an cycle offset at which SU will be scheduled. It will be
+  // negative for bottom-up scheduling.
+  HazardType getHazardType(SUnit *SU, int Stalls) override;
+  void Reset() override;
+  void EmitInstruction(SUnit *SU) override;
+  void AdvanceCycle() override;
+  void RecedeCycle() override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
new file mode 100644
index 0000000..af43c9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
@@ -0,0 +1,1611 @@
+//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SelectionDAG class, and transitively defines the
+// SDNode class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAG_H
+#define LLVM_CODEGEN_SELECTIONDAG_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BlockAddress;
+class Constant;
+class ConstantFP;
+class ConstantInt;
+class DataLayout;
+struct fltSemantics;
+class GlobalValue;
+struct KnownBits;
+class LLVMContext;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class MCSymbol;
+class OptimizationRemarkEmitter;
+class SDDbgValue;
+class SelectionDAG;
+class SelectionDAGTargetInfo;
+class TargetLibraryInfo;
+class TargetLowering;
+class TargetMachine;
+class TargetSubtargetInfo;
+class Value;
+
+class SDVTListNode : public FoldingSetNode {
+  friend struct FoldingSetTrait<SDVTListNode>;
+
+  /// A reference to an Interned FoldingSetNodeID for this node.
+  /// The Allocator in SelectionDAG holds the data.
+  /// SDVTList contains all types which are frequently accessed in SelectionDAG.
+  /// The size of this list is not expected to be big so it won't introduce
+  /// a memory penalty.
+  FoldingSetNodeIDRef FastID;
+  const EVT *VTs;
+  unsigned int NumVTs;
+  /// The hash value for SDVTList is fixed, so cache it to avoid
+  /// hash calculation.
+  unsigned HashValue;
+
+public:
+  SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
+      FastID(ID), VTs(VT), NumVTs(Num) {
+    HashValue = ID.ComputeHash();
+  }
+
+  SDVTList getSDVTList() {
+    SDVTList result = {VTs, NumVTs};
+    return result;
+  }
+};
+
+/// Specialize FoldingSetTrait for SDVTListNode
+/// to avoid computing temp FoldingSetNodeID and hash value.
+template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
+  static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
+    ID = X.FastID;
+  }
+
+  static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
+                     unsigned IDHash, FoldingSetNodeID &TempID) {
+    if (X.HashValue != IDHash)
+      return false;
+    return ID == X.FastID;
+  }
+
+  static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
+    return X.HashValue;
+  }
+};
+
+template <> struct ilist_alloc_traits<SDNode> {
+  static void deleteNode(SDNode *) {
+    llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
+  }
+};
+
+/// Keeps track of dbg_value information through SDISel.  We do
+/// not build SDNodes for these so as not to perturb the generated code;
+/// instead the info is kept off to the side in this structure. Each SDNode may
+/// have one or more associated dbg_value entries. This information is kept in
+/// DbgValMap.
+/// Byval parameters are handled separately because they don't use alloca's,
+/// which busts the normal mechanism.  There is good reason for handling all
+/// parameters separately:  they may not have code generated for them, they
+/// should always go at the beginning of the function regardless of other code
+/// motion, and debug info for them is potentially useful even if the parameter
+/// is unused.  Right now only byval parameters are handled separately.
+class SDDbgInfo {
+  BumpPtrAllocator Alloc;
+  SmallVector<SDDbgValue*, 32> DbgValues;
+  SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
+  using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
+  DbgValMapType DbgValMap;
+
+public:
+  SDDbgInfo() = default;
+  SDDbgInfo(const SDDbgInfo &) = delete;
+  SDDbgInfo &operator=(const SDDbgInfo &) = delete;
+
+  void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
+    if (isParameter) {
+      ByvalParmDbgValues.push_back(V);
+    } else     DbgValues.push_back(V);
+    if (Node)
+      DbgValMap[Node].push_back(V);
+  }
+
+  /// \brief Invalidate all DbgValues attached to the node and remove
+  /// it from the Node-to-DbgValues map.
+  void erase(const SDNode *Node);
+
+  void clear() {
+    DbgValMap.clear();
+    DbgValues.clear();
+    ByvalParmDbgValues.clear();
+    Alloc.Reset();
+  }
+
+  BumpPtrAllocator &getAlloc() { return Alloc; }
+
+  bool empty() const {
+    return DbgValues.empty() && ByvalParmDbgValues.empty();
+  }
+
+  ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
+    DbgValMapType::iterator I = DbgValMap.find(Node);
+    if (I != DbgValMap.end())
+      return I->second;
+    return ArrayRef<SDDbgValue*>();
+  }
+
+  using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
+
+  DbgIterator DbgBegin() { return DbgValues.begin(); }
+  DbgIterator DbgEnd()   { return DbgValues.end(); }
+  DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
+  DbgIterator ByvalParmDbgEnd()   { return ByvalParmDbgValues.end(); }
+};
+
+void checkForCycles(const SelectionDAG *DAG, bool force = false);
+
+/// This is used to represent a portion of an LLVM function in a low-level
+/// Data Dependence DAG representation suitable for instruction selection.
+/// This DAG is constructed as the first step of instruction selection in order
+/// to allow implementation of machine specific optimizations
+/// and code simplifications.
+///
+/// The representation used by the SelectionDAG is a target-independent
+/// representation, which has some similarities to the GCC RTL representation,
+/// but is significantly more simple, powerful, and is a graph form instead of a
+/// linear form.
+///
+class SelectionDAG {
+  const TargetMachine &TM;
+  const SelectionDAGTargetInfo *TSI = nullptr;
+  const TargetLowering *TLI = nullptr;
+  const TargetLibraryInfo *LibInfo = nullptr;
+  MachineFunction *MF;
+  Pass *SDAGISelPass = nullptr;
+  LLVMContext *Context;
+  CodeGenOpt::Level OptLevel;
+
+  DivergenceAnalysis * DA = nullptr;
+  FunctionLoweringInfo * FLI = nullptr;
+
+  /// The function-level optimization remark emitter.  Used to emit remarks
+  /// whenever manipulating the DAG.
+  OptimizationRemarkEmitter *ORE;
+
+  /// The starting token.
+  SDNode EntryNode;
+
+  /// The root of the entire DAG.
+  SDValue Root;
+
+  /// A linked list of nodes in the current DAG.
+  ilist<SDNode> AllNodes;
+
+  /// The AllocatorType for allocating SDNodes. We use
+  /// pool allocation with recycling.
+  using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
+                                               sizeof(LargestSDNode),
+                                               alignof(MostAlignedSDNode)>;
+
+  /// Pool allocation for nodes.
+  NodeAllocatorType NodeAllocator;
+
+  /// This structure is used to memoize nodes, automatically performing
+  /// CSE with existing nodes when a duplicate is requested.
+  FoldingSet<SDNode> CSEMap;
+
+  /// Pool allocation for machine-opcode SDNode operands.
+  BumpPtrAllocator OperandAllocator;
+  ArrayRecycler<SDUse> OperandRecycler;
+
+  /// Pool allocation for misc. objects that are created once per SelectionDAG.
+  BumpPtrAllocator Allocator;
+
+  /// Tracks dbg_value information through SDISel.
+  SDDbgInfo *DbgInfo;
+
+  uint16_t NextPersistentId = 0;
+
+public:
+  /// Clients of various APIs that cause global effects on
+  /// the DAG can optionally implement this interface.  This allows the clients
+  /// to handle the various sorts of updates that happen.
+  ///
+  /// A DAGUpdateListener automatically registers itself with DAG when it is
+  /// constructed, and removes itself when destroyed in RAII fashion.
+  struct DAGUpdateListener {
+    DAGUpdateListener *const Next;
+    SelectionDAG &DAG;
+
+    explicit DAGUpdateListener(SelectionDAG &D)
+      : Next(D.UpdateListeners), DAG(D) {
+      DAG.UpdateListeners = this;
+    }
+
+    virtual ~DAGUpdateListener() {
+      assert(DAG.UpdateListeners == this &&
+             "DAGUpdateListeners must be destroyed in LIFO order");
+      DAG.UpdateListeners = Next;
+    }
+
+    /// The node N that was deleted and, if E is not null, an
+    /// equivalent node E that replaced it.
+    virtual void NodeDeleted(SDNode *N, SDNode *E);
+
+    /// The node N that was updated.
+    virtual void NodeUpdated(SDNode *N);
+  };
+
+  struct DAGNodeDeletedListener : public DAGUpdateListener {
+    std::function<void(SDNode *, SDNode *)> Callback;
+
+    DAGNodeDeletedListener(SelectionDAG &DAG,
+                           std::function<void(SDNode *, SDNode *)> Callback)
+        : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
+
+    void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
+  };
+
+  /// When true, additional steps are taken to
+  /// ensure that getConstant() and similar functions return DAG nodes that
+  /// have legal types. This is important after type legalization since
+  /// any illegally typed nodes generated after this point will not experience
+  /// type legalization.
+  bool NewNodesMustHaveLegalTypes = false;
+
+private:
+  /// DAGUpdateListener is a friend so it can manipulate the listener stack.
+  friend struct DAGUpdateListener;
+
+  /// Linked list of registered DAGUpdateListener instances.
+  /// This stack is maintained by DAGUpdateListener RAII.
+  DAGUpdateListener *UpdateListeners = nullptr;
+
+  /// Implementation of setSubgraphColor.
+  /// Return whether we had to truncate the search.
+  bool setSubgraphColorHelper(SDNode *N, const char *Color,
+                              DenseSet<SDNode *> &visited,
+                              int level, bool &printed);
+
+  template <typename SDNodeT, typename... ArgTypes>
+  SDNodeT *newSDNode(ArgTypes &&... Args) {
+    return new (NodeAllocator.template Allocate<SDNodeT>())
+        SDNodeT(std::forward<ArgTypes>(Args)...);
+  }
+
+  /// Build a synthetic SDNodeT with the given args and extract its subclass
+  /// data as an integer (e.g. for use in a folding set).
+  ///
+  /// The args to this function are the same as the args to SDNodeT's
+  /// constructor, except the second arg (assumed to be a const DebugLoc&) is
+  /// omitted.
+  template <typename SDNodeT, typename... ArgTypes>
+  static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
+                                               ArgTypes &&... Args) {
+    // The compiler can reduce this expression to a constant iff we pass an
+    // empty DebugLoc.  Thankfully, the debug location doesn't have any bearing
+    // on the subclass data.
+    return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
+        .getRawSubclassData();
+  }
+
+  template <typename SDNodeTy>
+  static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
+                                                SDVTList VTs, EVT MemoryVT,
+                                                MachineMemOperand *MMO) {
+    return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
+         .getRawSubclassData();
+  }
+
+  void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
+
+  void removeOperands(SDNode *Node) {
+    if (!Node->OperandList)
+      return;
+    OperandRecycler.deallocate(
+        ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
+        Node->OperandList);
+    Node->NumOperands = 0;
+    Node->OperandList = nullptr;
+  }
+  void CreateTopologicalOrder(std::vector<SDNode*>& Order);
+public:
+  explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
+  SelectionDAG(const SelectionDAG &) = delete;
+  SelectionDAG &operator=(const SelectionDAG &) = delete;
+  ~SelectionDAG();
+
+  /// Prepare this SelectionDAG to process code in the given MachineFunction.
+  void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
+            Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
+            DivergenceAnalysis * DA);
+
+  void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
+    FLI = FuncInfo;
+  }
+
+  /// Clear state and free memory necessary to make this
+  /// SelectionDAG ready to process a new block.
+  void clear();
+
+  MachineFunction &getMachineFunction() const { return *MF; }
+  const Pass *getPass() const { return SDAGISelPass; }
+
+  const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
+  const TargetMachine &getTarget() const { return TM; }
+  const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
+  const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
+  const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
+  const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
+  LLVMContext *getContext() const {return Context; }
+  OptimizationRemarkEmitter &getORE() const { return *ORE; }
+
+  /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
+  void viewGraph(const std::string &Title);
+  void viewGraph();
+
+#ifndef NDEBUG
+  std::map<const SDNode *, std::string> NodeGraphAttrs;
+#endif
+
+  /// Clear all previously defined node graph attributes.
+  /// Intended to be used from a debugging tool (eg. gdb).
+  void clearGraphAttrs();
+
+  /// Set graph attributes for a node. (eg. "color=red".)
+  void setGraphAttrs(const SDNode *N, const char *Attrs);
+
+  /// Get graph attributes for a node. (eg. "color=red".)
+  /// Used from getNodeAttributes.
+  const std::string getGraphAttrs(const SDNode *N) const;
+
+  /// Convenience for setting node color attribute.
+  void setGraphColor(const SDNode *N, const char *Color);
+
+  /// Convenience for setting subgraph color attribute.
+  void setSubgraphColor(SDNode *N, const char *Color);
+
+  using allnodes_const_iterator = ilist<SDNode>::const_iterator;
+
+  allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
+  allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
+
+  using allnodes_iterator = ilist<SDNode>::iterator;
+
+  allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
+  allnodes_iterator allnodes_end() { return AllNodes.end(); }
+
+  ilist<SDNode>::size_type allnodes_size() const {
+    return AllNodes.size();
+  }
+
+  iterator_range<allnodes_iterator> allnodes() {
+    return make_range(allnodes_begin(), allnodes_end());
+  }
+  iterator_range<allnodes_const_iterator> allnodes() const {
+    return make_range(allnodes_begin(), allnodes_end());
+  }
+
+  /// Return the root tag of the SelectionDAG.
+  const SDValue &getRoot() const { return Root; }
+
+  /// Return the token chain corresponding to the entry of the function.
+  SDValue getEntryNode() const {
+    return SDValue(const_cast<SDNode *>(&EntryNode), 0);
+  }
+
+  /// Set the current root tag of the SelectionDAG.
+  ///
+  const SDValue &setRoot(SDValue N) {
+    assert((!N.getNode() || N.getValueType() == MVT::Other) &&
+           "DAG root value is not a chain!");
+    if (N.getNode())
+      checkForCycles(N.getNode(), this);
+    Root = N;
+    if (N.getNode())
+      checkForCycles(this);
+    return Root;
+  }
+
+  void VerifyDAGDiverence();
+
+  /// This iterates over the nodes in the SelectionDAG, folding
+  /// certain types of nodes together, or eliminating superfluous nodes.  The
+  /// Level argument controls whether Combine is allowed to produce nodes and
+  /// types that are illegal on the target.
+  void Combine(CombineLevel Level, AliasAnalysis *AA,
+               CodeGenOpt::Level OptLevel);
+
+  /// This transforms the SelectionDAG into a SelectionDAG that
+  /// only uses types natively supported by the target.
+  /// Returns "true" if it made any changes.
+  ///
+  /// Note that this is an involved process that may invalidate pointers into
+  /// the graph.
+  bool LegalizeTypes();
+
+  /// This transforms the SelectionDAG into a SelectionDAG that is
+  /// compatible with the target instruction selector, as indicated by the
+  /// TargetLowering object.
+  ///
+  /// Note that this is an involved process that may invalidate pointers into
+  /// the graph.
+  void Legalize();
+
+  /// \brief Transforms a SelectionDAG node and any operands to it into a node
+  /// that is compatible with the target instruction selector, as indicated by
+  /// the TargetLowering object.
+  ///
+  /// \returns true if \c N is a valid, legal node after calling this.
+  ///
+  /// This essentially runs a single recursive walk of the \c Legalize process
+  /// over the given node (and its operands). This can be used to incrementally
+  /// legalize the DAG. All of the nodes which are directly replaced,
+  /// potentially including N, are added to the output parameter \c
+  /// UpdatedNodes so that the delta to the DAG can be understood by the
+  /// caller.
+  ///
+  /// When this returns false, N has been legalized in a way that make the
+  /// pointer passed in no longer valid. It may have even been deleted from the
+  /// DAG, and so it shouldn't be used further. When this returns true, the
+  /// N passed in is a legal node, and can be immediately processed as such.
+  /// This may still have done some work on the DAG, and will still populate
+  /// UpdatedNodes with any new nodes replacing those originally in the DAG.
+  bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
+
+  /// This transforms the SelectionDAG into a SelectionDAG
+  /// that only uses vector math operations supported by the target.  This is
+  /// necessary as a separate step from Legalize because unrolling a vector
+  /// operation can introduce illegal types, which requires running
+  /// LegalizeTypes again.
+  ///
+  /// This returns true if it made any changes; in that case, LegalizeTypes
+  /// is called again before Legalize.
+  ///
+  /// Note that this is an involved process that may invalidate pointers into
+  /// the graph.
+  bool LegalizeVectors();
+
+  /// This method deletes all unreachable nodes in the SelectionDAG.
+  void RemoveDeadNodes();
+
+  /// Remove the specified node from the system.  This node must
+  /// have no referrers.
+  void DeleteNode(SDNode *N);
+
+  /// Return an SDVTList that represents the list of values specified.
+  SDVTList getVTList(EVT VT);
+  SDVTList getVTList(EVT VT1, EVT VT2);
+  SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
+  SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
+  SDVTList getVTList(ArrayRef<EVT> VTs);
+
+  //===--------------------------------------------------------------------===//
+  // Node creation methods.
+
+  /// \brief Create a ConstantSDNode wrapping a constant value.
+  /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
+  ///
+  /// If only legal types can be produced, this does the necessary
+  /// transformations (e.g., if the vector element type is illegal).
+  /// @{
+  SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
+                      bool isTarget = false, bool isOpaque = false);
+  SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
+                      bool isTarget = false, bool isOpaque = false);
+
+  SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
+                             bool IsOpaque = false) {
+    return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
+                       VT, IsTarget, IsOpaque);
+  }
+
+  SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
+                      bool isTarget = false, bool isOpaque = false);
+  SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
+                            bool isTarget = false);
+  SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
+                            bool isOpaque = false) {
+    return getConstant(Val, DL, VT, true, isOpaque);
+  }
+  SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
+                            bool isOpaque = false) {
+    return getConstant(Val, DL, VT, true, isOpaque);
+  }
+  SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
+                            bool isOpaque = false) {
+    return getConstant(Val, DL, VT, true, isOpaque);
+  }
+
+  /// \brief Create a true or false constant of type \p VT using the target's
+  /// BooleanContent for type \p OpVT.
+  SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
+  /// @}
+
+  /// \brief Create a ConstantFPSDNode wrapping a constant value.
+  /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
+  ///
+  /// If only legal types can be produced, this does the necessary
+  /// transformations (e.g., if the vector element type is illegal).
+  /// The forms that take a double should only be used for simple constants
+  /// that can be exactly represented in VT.  No checks are made.
+  /// @{
+  SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
+                        bool isTarget = false);
+  SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
+                        bool isTarget = false);
+  SDValue getConstantFP(const ConstantFP &CF, const SDLoc &DL, EVT VT,
+                        bool isTarget = false);
+  SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
+    return getConstantFP(Val, DL, VT, true);
+  }
+  SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
+    return getConstantFP(Val, DL, VT, true);
+  }
+  SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
+    return getConstantFP(Val, DL, VT, true);
+  }
+  /// @}
+
+  SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
+                           int64_t offset = 0, bool isTargetGA = false,
+                           unsigned char TargetFlags = 0);
+  SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
+                                 int64_t offset = 0,
+                                 unsigned char TargetFlags = 0) {
+    return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
+  }
+  SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
+  SDValue getTargetFrameIndex(int FI, EVT VT) {
+    return getFrameIndex(FI, VT, true);
+  }
+  SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
+                       unsigned char TargetFlags = 0);
+  SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags = 0) {
+    return getJumpTable(JTI, VT, true, TargetFlags);
+  }
+  SDValue getConstantPool(const Constant *C, EVT VT,
+                          unsigned Align = 0, int Offs = 0, bool isT=false,
+                          unsigned char TargetFlags = 0);
+  SDValue getTargetConstantPool(const Constant *C, EVT VT,
+                                unsigned Align = 0, int Offset = 0,
+                                unsigned char TargetFlags = 0) {
+    return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
+  }
+  SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
+                          unsigned Align = 0, int Offs = 0, bool isT=false,
+                          unsigned char TargetFlags = 0);
+  SDValue getTargetConstantPool(MachineConstantPoolValue *C,
+                                  EVT VT, unsigned Align = 0,
+                                  int Offset = 0, unsigned char TargetFlags=0) {
+    return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
+  }
+  SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
+                         unsigned char TargetFlags = 0);
+  // When generating a branch to a BB, we don't in general know enough
+  // to provide debug info for the BB at that time, so keep this one around.
+  SDValue getBasicBlock(MachineBasicBlock *MBB);
+  SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
+  SDValue getExternalSymbol(const char *Sym, EVT VT);
+  SDValue getExternalSymbol(const char *Sym, const SDLoc &dl, EVT VT);
+  SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
+                                  unsigned char TargetFlags = 0);
+  SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
+
+  SDValue getValueType(EVT);
+  SDValue getRegister(unsigned Reg, EVT VT);
+  SDValue getRegisterMask(const uint32_t *RegMask);
+  SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
+  SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
+                       MCSymbol *Label);
+  SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
+                          int64_t Offset = 0, bool isTarget = false,
+                          unsigned char TargetFlags = 0);
+  SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
+                                int64_t Offset = 0,
+                                unsigned char TargetFlags = 0) {
+    return getBlockAddress(BA, VT, Offset, true, TargetFlags);
+  }
+
+  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
+                       SDValue N) {
+    return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
+                   getRegister(Reg, N.getValueType()), N);
+  }
+
+  // This version of the getCopyToReg method takes an extra operand, which
+  // indicates that there is potentially an incoming glue value (if Glue is not
+  // null) and that there should be a glue result.
+  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
+                       SDValue Glue) {
+    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
+    return getNode(ISD::CopyToReg, dl, VTs,
+                   makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
+  }
+
+  // Similar to last getCopyToReg() except parameter Reg is a SDValue
+  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
+                       SDValue Glue) {
+    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain, Reg, N, Glue };
+    return getNode(ISD::CopyToReg, dl, VTs,
+                   makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
+  }
+
+  SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
+    SDVTList VTs = getVTList(VT, MVT::Other);
+    SDValue Ops[] = { Chain, getRegister(Reg, VT) };
+    return getNode(ISD::CopyFromReg, dl, VTs, Ops);
+  }
+
+  // This version of the getCopyFromReg method takes an extra operand, which
+  // indicates that there is potentially an incoming glue value (if Glue is not
+  // null) and that there should be a glue result.
+  SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
+                         SDValue Glue) {
+    SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
+    return getNode(ISD::CopyFromReg, dl, VTs,
+                   makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
+  }
+
+  SDValue getCondCode(ISD::CondCode Cond);
+
+  /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
+  /// which must be a vector type, must match the number of mask elements
+  /// NumElts. An integer mask element equal to -1 is treated as undefined.
+  SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
+                           ArrayRef<int> Mask);
+
+  /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
+  /// which must be a vector type, must match the number of operands in Ops.
+  /// The operands must have the same type as (or, for integers, a type wider
+  /// than) VT's element type.
+  SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
+    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
+    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+  }
+
+  /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
+  /// which must be a vector type, must match the number of operands in Ops.
+  /// The operands must have the same type as (or, for integers, a type wider
+  /// than) VT's element type.
+  SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
+    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
+    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+  }
+
+  /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
+  /// elements. VT must be a vector type. Op's type must be the same as (or,
+  /// for integers, a type wider than) VT's element type.
+  SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
+    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
+    if (Op.getOpcode() == ISD::UNDEF) {
+      assert((VT.getVectorElementType() == Op.getValueType() ||
+              (VT.isInteger() &&
+               VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
+             "A splatted value must have a width equal or (for integers) "
+             "greater than the vector element type!");
+      return getNode(ISD::UNDEF, SDLoc(), VT);
+    }
+
+    SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
+    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+  }
+
+  /// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
+  /// the shuffle node in input but with swapped operands.
+  ///
+  /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
+  SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
+
+  /// Convert Op, which must be of float type, to the
+  /// float type VT, by either extending or rounding (by truncation).
+  SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the
+  /// integer type VT, by either any-extending or truncating it.
+  SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the
+  /// integer type VT, by either sign-extending or truncating it.
+  SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the
+  /// integer type VT, by either zero-extending or truncating it.
+  SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Return the expression required to zero extend the Op
+  /// value assuming it was the smaller SrcTy value.
+  SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT SrcTy);
+
+  /// Return an operation which will any-extend the low lanes of the operand
+  /// into the specified vector type. For example,
+  /// this can convert a v16i8 into a v4i32 by any-extending the low four
+  /// lanes of the operand from i8 to i32.
+  SDValue getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Return an operation which will sign extend the low lanes of the operand
+  /// into the specified vector type. For example,
+  /// this can convert a v16i8 into a v4i32 by sign extending the low four
+  /// lanes of the operand from i8 to i32.
+  SDValue getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Return an operation which will zero extend the low lanes of the operand
+  /// into the specified vector type. For example,
+  /// this can convert a v16i8 into a v4i32 by zero extending the low four
+  /// lanes of the operand from i8 to i32.
+  SDValue getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the integer type VT,
+  /// by using an extension appropriate for the target's
+  /// BooleanContent for type OpVT or truncating it.
+  SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
+
+  /// Create a bitwise NOT operation as (XOR Val, -1).
+  SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
+
+  /// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
+  SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
+
+  /// \brief Create an add instruction with appropriate flags when used for
+  /// addressing some offset of an object. i.e. if a load is split into multiple
+  /// components, create an add nuw from the base pointer to the offset.
+  SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Op, int64_t Offset) {
+    EVT VT = Op.getValueType();
+    return getObjectPtrOffset(SL, Op, getConstant(Offset, SL, VT));
+  }
+
+  SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Op, SDValue Offset) {
+    EVT VT = Op.getValueType();
+
+    // The object itself can't wrap around the address space, so it shouldn't be
+    // possible for the adds of the offsets to the split parts to overflow.
+    SDNodeFlags Flags;
+    Flags.setNoUnsignedWrap(true);
+    return getNode(ISD::ADD, SL, VT, Op, Offset, Flags);
+  }
+
+  /// Return a new CALLSEQ_START node, that starts new call frame, in which
+  /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
+  /// OutSize specifies part of the frame set up prior to the sequence.
+  SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
+                           const SDLoc &DL) {
+    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain,
+                      getIntPtrConstant(InSize, DL, true),
+                      getIntPtrConstant(OutSize, DL, true) };
+    return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
+  }
+
+  /// Return a new CALLSEQ_END node, which always must have a
+  /// glue result (to ensure it's not CSE'd).
+  /// CALLSEQ_END does not have a useful SDLoc.
+  SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
+                         SDValue InGlue, const SDLoc &DL) {
+    SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
+    SmallVector<SDValue, 4> Ops;
+    Ops.push_back(Chain);
+    Ops.push_back(Op1);
+    Ops.push_back(Op2);
+    if (InGlue.getNode())
+      Ops.push_back(InGlue);
+    return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
+  }
+
+  /// Return true if the result of this operation is always undefined.
+  bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
+
+  /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
+  SDValue getUNDEF(EVT VT) {
+    return getNode(ISD::UNDEF, SDLoc(), VT);
+  }
+
+  /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
+  SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
+    return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
+  }
+
+  /// Gets or creates the specified node.
+  ///
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+                  ArrayRef<SDUse> Ops);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+                  ArrayRef<SDValue> Ops, const SDNodeFlags Flags = SDNodeFlags());
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
+                  ArrayRef<SDValue> Ops);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
+                  ArrayRef<SDValue> Ops);
+
+  // Specialize based on number of operands.
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N,
+                  const SDNodeFlags Flags = SDNodeFlags());
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, const SDNodeFlags Flags = SDNodeFlags());
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, SDValue N3);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4, SDValue N5);
+
+  // Specialize again based on number of operands for nodes with a VTList
+  // rather than a single VT.
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2, SDValue N3);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4, SDValue N5);
+
+  /// Compute a TokenFactor to force all the incoming stack arguments to be
+  /// loaded from the stack. This is used in tail call lowering to protect
+  /// stack arguments from being clobbered.
+  SDValue getStackArgumentTokenFactor(SDValue Chain);
+
+  SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
+                    SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
+                    bool isTailCall, MachinePointerInfo DstPtrInfo,
+                    MachinePointerInfo SrcPtrInfo);
+
+  SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
+                     SDValue Size, unsigned Align, bool isVol, bool isTailCall,
+                     MachinePointerInfo DstPtrInfo,
+                     MachinePointerInfo SrcPtrInfo);
+
+  SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
+                    SDValue Size, unsigned Align, bool isVol, bool isTailCall,
+                    MachinePointerInfo DstPtrInfo);
+
+  /// Helper function to make it easier to build SetCC's if you just
+  /// have an ISD::CondCode instead of an SDValue.
+  ///
+  SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
+                   ISD::CondCode Cond) {
+    assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
+      "Cannot compare scalars to vectors");
+    assert(LHS.getValueType().isVector() == VT.isVector() &&
+      "Cannot compare scalars to vectors");
+    assert(Cond != ISD::SETCC_INVALID &&
+        "Cannot create a setCC of an invalid node.");
+    return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
+  }
+
+  /// Helper function to make it easier to build Select's if you just
+  /// have operands and don't want to check for vector.
+  SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
+                    SDValue RHS) {
+    assert(LHS.getValueType() == RHS.getValueType() &&
+           "Cannot use select on differing types");
+    assert(VT.isVector() == LHS.getValueType().isVector() &&
+           "Cannot mix vectors and scalars");
+    return getNode(Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
+                   Cond, LHS, RHS);
+  }
+
+  /// Helper function to make it easier to build SelectCC's if you
+  /// just have an ISD::CondCode instead of an SDValue.
+  ///
+  SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
+                      SDValue False, ISD::CondCode Cond) {
+    return getNode(ISD::SELECT_CC, DL, True.getValueType(),
+                   LHS, RHS, True, False, getCondCode(Cond));
+  }
+
+  /// VAArg produces a result and token chain, and takes a pointer
+  /// and a source value as input.
+  SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                   SDValue SV, unsigned Align);
+
+  /// Gets a node for an atomic cmpxchg op. There are two
+  /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
+  /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
+  /// a success flag (initially i1), and a chain.
+  SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
+                           SDVTList VTs, SDValue Chain, SDValue Ptr,
+                           SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
+                           unsigned Alignment, AtomicOrdering SuccessOrdering,
+                           AtomicOrdering FailureOrdering,
+                           SyncScope::ID SSID);
+  SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
+                           SDVTList VTs, SDValue Chain, SDValue Ptr,
+                           SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
+
+  /// Gets a node for an atomic op, produces result (if relevant)
+  /// and chain and takes 2 operands.
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
+                    SDValue Ptr, SDValue Val, const Value *PtrVal,
+                    unsigned Alignment, AtomicOrdering Ordering,
+                    SyncScope::ID SSID);
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
+                    SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
+
+  /// Gets a node for an atomic op, produces result and chain and
+  /// takes 1 operand.
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
+                    SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
+
+  /// Gets a node for an atomic op, produces result and chain and takes N
+  /// operands.
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
+                    SDVTList VTList, ArrayRef<SDValue> Ops,
+                    MachineMemOperand *MMO);
+
+  /// Creates a MemIntrinsicNode that may produce a
+  /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
+  /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
+  /// less than FIRST_TARGET_MEMORY_OPCODE.
+  SDValue getMemIntrinsicNode(
+    unsigned Opcode, const SDLoc &dl, SDVTList VTList,
+    ArrayRef<SDValue> Ops, EVT MemVT,
+    MachinePointerInfo PtrInfo,
+    unsigned Align = 0,
+    MachineMemOperand::Flags Flags
+    = MachineMemOperand::MOLoad | MachineMemOperand::MOStore,
+    unsigned Size = 0);
+
+  SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
+                              ArrayRef<SDValue> Ops, EVT MemVT,
+                              MachineMemOperand *MMO);
+
+  /// Create a MERGE_VALUES node from the given operands.
+  SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
+
+  /// Loads are not normal binary operators: their result type is not
+  /// determined by their operands, and they produce a value AND a token chain.
+  ///
+  /// This function will set the MOLoad flag on MMOFlags, but you can set it if
+  /// you want.  The MOStore flag must not be set.
+  SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                  MachinePointerInfo PtrInfo, unsigned Alignment = 0,
+                  MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+                  const AAMDNodes &AAInfo = AAMDNodes(),
+                  const MDNode *Ranges = nullptr);
+  SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                  MachineMemOperand *MMO);
+  SDValue
+  getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
+             SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
+             unsigned Alignment = 0,
+             MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+             const AAMDNodes &AAInfo = AAMDNodes());
+  SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
+                     SDValue Chain, SDValue Ptr, EVT MemVT,
+                     MachineMemOperand *MMO);
+  SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
+                         SDValue Offset, ISD::MemIndexedMode AM);
+  SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
+                  const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
+                  MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment = 0,
+                  MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+                  const AAMDNodes &AAInfo = AAMDNodes(),
+                  const MDNode *Ranges = nullptr);
+  SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
+                  const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
+                  EVT MemVT, MachineMemOperand *MMO);
+
+  /// Helper function to build ISD::STORE nodes.
+  ///
+  /// This function will set the MOStore flag on MMOFlags, but you can set it if
+  /// you want.  The MOLoad and MOInvariant flags must not be set.
+  SDValue
+  getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
+           MachinePointerInfo PtrInfo, unsigned Alignment = 0,
+           MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+           const AAMDNodes &AAInfo = AAMDNodes());
+  SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
+                   MachineMemOperand *MMO);
+  SDValue
+  getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
+                MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment = 0,
+                MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+                const AAMDNodes &AAInfo = AAMDNodes());
+  SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
+                        SDValue Ptr, EVT TVT, MachineMemOperand *MMO);
+  SDValue getIndexedStore(SDValue OrigStoe, const SDLoc &dl, SDValue Base,
+                          SDValue Offset, ISD::MemIndexedMode AM);
+
+  /// Returns sum of the base pointer and offset.
+  SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, const SDLoc &DL);
+
+  SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                        SDValue Mask, SDValue Src0, EVT MemVT,
+                        MachineMemOperand *MMO, ISD::LoadExtType,
+                        bool IsExpanding = false);
+  SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
+                         SDValue Ptr, SDValue Mask, EVT MemVT,
+                         MachineMemOperand *MMO, bool IsTruncating = false,
+                         bool IsCompressing = false);
+  SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
+                          ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+  SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
+                           ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+
+  /// Return (create a new or find existing) a target-specific node.
+  /// TargetMemSDNode should be derived class from MemSDNode.
+  template <class TargetMemSDNode>
+  SDValue getTargetMemSDNode(SDVTList VTs, ArrayRef<SDValue> Ops,
+                             const SDLoc &dl, EVT MemVT,
+                             MachineMemOperand *MMO);
+
+  /// Construct a node to track a Value* through the backend.
+  SDValue getSrcValue(const Value *v);
+
+  /// Return an MDNodeSDNode which holds an MDNode.
+  SDValue getMDNode(const MDNode *MD);
+
+  /// Return a bitcast using the SDLoc of the value operand, and casting to the
+  /// provided type. Use getNode to set a custom SDLoc.
+  SDValue getBitcast(EVT VT, SDValue V);
+
+  /// Return an AddrSpaceCastSDNode.
+  SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
+                           unsigned DestAS);
+
+  /// Return the specified value casted to
+  /// the target's desired shift amount type.
+  SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
+
+  /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
+  SDValue expandVAArg(SDNode *Node);
+
+  /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
+  SDValue expandVACopy(SDNode *Node);
+
+  /// *Mutate* the specified node in-place to have the
+  /// specified operands.  If the resultant node already exists in the DAG,
+  /// this does not modify the specified node, instead it returns the node that
+  /// already exists.  If the resultant node does not exist in the DAG, the
+  /// input node is returned.  As a degenerate case, if you specify the same
+  /// input operands as the node already has, the input node is returned.
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+                               SDValue Op3);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+                               SDValue Op3, SDValue Op4);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+                               SDValue Op3, SDValue Op4, SDValue Op5);
+  SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
+
+  // Propagates the change in divergence to users
+  void updateDivergence(SDNode * N);
+
+  /// These are used for target selectors to *mutate* the
+  /// specified node to have the specified return type, Target opcode, and
+  /// operands.  Note that target opcodes are stored as
+  /// ~TargetOpcode in the node opcode field.  The resultant node is returned.
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT, SDValue Op1);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+                       SDValue Op1, SDValue Op2);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+                       SDValue Op1, SDValue Op2, SDValue Op3);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+                       ArrayRef<SDValue> Ops);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1, EVT VT2);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, ArrayRef<SDValue> Ops);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, SDValue Op1);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, SDValue Op1, SDValue Op2);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
+                       ArrayRef<SDValue> Ops);
+
+  /// This *mutates* the specified node to have the specified
+  /// return type, opcode, and operands.
+  SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
+                      ArrayRef<SDValue> Ops);
+
+  /// Mutate the specified strict FP node to its non-strict equivalent,
+  /// unlinking the node from its chain and dropping the metadata arguments.
+  /// The node must be a strict FP node.
+  SDNode *mutateStrictFPToFP(SDNode *Node);
+
+  /// These are used for target selectors to create a new node
+  /// with specified return type(s), MachineInstr opcode, and operands.
+  ///
+  /// Note that getMachineNode returns the resultant node.  If there is already
+  /// a node of the specified opcode and operands, it returns that node instead
+  /// of the current one.
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                SDValue Op1);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                SDValue Op1, SDValue Op2);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                SDValue Op1, SDValue Op2, SDValue Op3);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, SDValue Op1, SDValue Op2);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
+                                SDValue Op3);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
+                                ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
+                                ArrayRef<SDValue> Ops);
+
+  /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
+  SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
+                                 SDValue Operand);
+
+  /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
+  SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
+                                SDValue Operand, SDValue Subreg);
+
+  /// Get the specified node if it's already available, or else return NULL.
+  SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops,
+                          const SDNodeFlags Flags = SDNodeFlags());
+
+  /// Creates a SDDbgValue node.
+  SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
+                          unsigned R, bool IsIndirect, const DebugLoc &DL,
+                          unsigned O);
+
+  /// Creates a constant SDDbgValue node.
+  SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
+                                  const Value *C, const DebugLoc &DL,
+                                  unsigned O);
+
+  /// Creates a FrameIndex SDDbgValue node.
+  SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
+                                    unsigned FI, const DebugLoc &DL,
+                                    unsigned O);
+
+  /// Transfer debug values from one node to another, while optionally
+  /// generating fragment expressions for split-up values. If \p InvalidateDbg
+  /// is set, debug values are invalidated after they are transferred.
+  void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
+                         unsigned SizeInBits = 0, bool InvalidateDbg = true);
+
+  /// Remove the specified node from the system. If any of its
+  /// operands then becomes dead, remove them as well. Inform UpdateListener
+  /// for each node deleted.
+  void RemoveDeadNode(SDNode *N);
+
+  /// This method deletes the unreachable nodes in the
+  /// given list, and any nodes that become unreachable as a result.
+  void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
+
+  /// Modify anything using 'From' to use 'To' instead.
+  /// This can cause recursive merging of nodes in the DAG.  Use the first
+  /// version if 'From' is known to have a single result, use the second
+  /// if you have two nodes with identical results (or if 'To' has a superset
+  /// of the results of 'From'), use the third otherwise.
+  ///
+  /// These methods all take an optional UpdateListener, which (if not null) is
+  /// informed about nodes that are deleted and modified due to recursive
+  /// changes in the dag.
+  ///
+  /// These functions only replace all existing uses. It's possible that as
+  /// these replacements are being performed, CSE may cause the From node
+  /// to be given new uses. These new uses of From are left in place, and
+  /// not automatically transferred to To.
+  ///
+  void ReplaceAllUsesWith(SDValue From, SDValue Op);
+  void ReplaceAllUsesWith(SDNode *From, SDNode *To);
+  void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
+
+  /// Replace any uses of From with To, leaving
+  /// uses of other values produced by From.getNode() alone.
+  void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
+
+  /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
+  /// This correctly handles the case where
+  /// there is an overlap between the From values and the To values.
+  void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
+                                  unsigned Num);
+
+  /// If an existing load has uses of its chain, create a token factor node with
+  /// that chain and the new memory node's chain and update users of the old
+  /// chain to the token factor. This ensures that the new memory node will have
+  /// the same relative memory dependency position as the old load. Returns the
+  /// new merged load chain.
+  SDValue makeEquivalentMemoryOrdering(LoadSDNode *Old, SDValue New);
+
+  /// Topological-sort the AllNodes list and a
+  /// assign a unique node id for each node in the DAG based on their
+  /// topological order. Returns the number of nodes.
+  unsigned AssignTopologicalOrder();
+
+  /// Move node N in the AllNodes list to be immediately
+  /// before the given iterator Position. This may be used to update the
+  /// topological ordering when the list of nodes is modified.
+  void RepositionNode(allnodes_iterator Position, SDNode *N) {
+    AllNodes.insert(Position, AllNodes.remove(N));
+  }
+
+  /// Returns an APFloat semantics tag appropriate for the given type. If VT is
+  /// a vector type, the element semantics are returned.
+  static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
+    switch (VT.getScalarType().getSimpleVT().SimpleTy) {
+    default: llvm_unreachable("Unknown FP format");
+    case MVT::f16:     return APFloat::IEEEhalf();
+    case MVT::f32:     return APFloat::IEEEsingle();
+    case MVT::f64:     return APFloat::IEEEdouble();
+    case MVT::f80:     return APFloat::x87DoubleExtended();
+    case MVT::f128:    return APFloat::IEEEquad();
+    case MVT::ppcf128: return APFloat::PPCDoubleDouble();
+    }
+  }
+
+  /// Add a dbg_value SDNode. If SD is non-null that means the
+  /// value is produced by SD.
+  void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
+
+  /// Get the debug values which reference the given SDNode.
+  ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
+    return DbgInfo->getSDDbgValues(SD);
+  }
+
+public:
+  /// Return true if there are any SDDbgValue nodes associated
+  /// with this SelectionDAG.
+  bool hasDebugValues() const { return !DbgInfo->empty(); }
+
+  SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
+  SDDbgInfo::DbgIterator DbgEnd()   { return DbgInfo->DbgEnd(); }
+
+  SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
+    return DbgInfo->ByvalParmDbgBegin();
+  }
+
+  SDDbgInfo::DbgIterator ByvalParmDbgEnd()   {
+    return DbgInfo->ByvalParmDbgEnd();
+  }
+
+  /// To be invoked on an SDNode that is slated to be erased. This
+  /// function mirrors \c llvm::salvageDebugInfo.
+  void salvageDebugInfo(SDNode &N);
+
+  void dump() const;
+
+  /// Create a stack temporary, suitable for holding the specified value type.
+  /// If minAlign is specified, the slot size will have at least that alignment.
+  SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
+
+  /// Create a stack temporary suitable for holding either of the specified
+  /// value types.
+  SDValue CreateStackTemporary(EVT VT1, EVT VT2);
+
+  SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
+                           const GlobalAddressSDNode *GA,
+                           const SDNode *N2);
+
+  SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
+                                 SDNode *Cst1, SDNode *Cst2);
+
+  SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
+                                 const ConstantSDNode *Cst1,
+                                 const ConstantSDNode *Cst2);
+
+  SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
+                                       ArrayRef<SDValue> Ops,
+                                       const SDNodeFlags Flags = SDNodeFlags());
+
+  /// Constant fold a setcc to true or false.
+  SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
+                    const SDLoc &dl);
+
+  /// See if the specified operand can be simplified with the knowledge that only
+  /// the bits specified by Mask are used.  If so, return the simpler operand,
+  /// otherwise return a null SDValue.
+  ///
+  /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
+  /// simplify nodes with multiple uses more aggressively.)
+  SDValue GetDemandedBits(SDValue V, const APInt &Mask);
+
+  /// Return true if the sign bit of Op is known to be zero.
+  /// We use this predicate to simplify operations downstream.
+  bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
+
+  /// Return true if 'Op & Mask' is known to be zero.  We
+  /// use this predicate to simplify operations downstream.  Op and Mask are
+  /// known to be the same type.
+  bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0)
+    const;
+
+  /// Determine which bits of Op are known to be either zero or one and return
+  /// them in Known. For vectors, the known bits are those that are shared by
+  /// every vector element.
+  /// Targets can implement the computeKnownBitsForTargetNode method in the
+  /// TargetLowering class to allow target nodes to be understood.
+  void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth = 0) const;
+
+  /// Determine which bits of Op are known to be either zero or one and return
+  /// them in Known. The DemandedElts argument allows us to only collect the
+  /// known bits that are shared by the requested vector elements.
+  /// Targets can implement the computeKnownBitsForTargetNode method in the
+  /// TargetLowering class to allow target nodes to be understood.
+  void computeKnownBits(SDValue Op, KnownBits &Known, const APInt &DemandedElts,
+                        unsigned Depth = 0) const;
+
+  /// Used to represent the possible overflow behavior of an operation.
+  /// Never: the operation cannot overflow.
+  /// Always: the operation will always overflow.
+  /// Sometime: the operation may or may not overflow.
+  enum OverflowKind {
+    OFK_Never,
+    OFK_Sometime,
+    OFK_Always,
+  };
+
+  /// Determine if the result of the addition of 2 node can overflow.
+  OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
+
+  /// Test if the given value is known to have exactly one bit set. This differs
+  /// from computeKnownBits in that it doesn't necessarily determine which bit
+  /// is set.
+  bool isKnownToBeAPowerOfTwo(SDValue Val) const;
+
+  /// Return the number of times the sign bit of the register is replicated into
+  /// the other bits. We know that at least 1 bit is always equal to the sign
+  /// bit (itself), but other cases can give us information. For example,
+  /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
+  /// to each other, so we return 3. Targets can implement the
+  /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
+  /// target nodes to be understood.
+  unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
+
+  /// Return the number of times the sign bit of the register is replicated into
+  /// the other bits. We know that at least 1 bit is always equal to the sign
+  /// bit (itself), but other cases can give us information. For example,
+  /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
+  /// to each other, so we return 3. The DemandedElts argument allows
+  /// us to only collect the minimum sign bits of the requested vector elements.
+  /// Targets can implement the ComputeNumSignBitsForTarget method in the
+  /// TargetLowering class to allow target nodes to be understood.
+  unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
+                              unsigned Depth = 0) const;
+
+  /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
+  /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
+  /// is guaranteed to have the same semantics as an ADD. This handles the
+  /// equivalence:
+  ///     X|Cst == X+Cst iff X&Cst = 0.
+  bool isBaseWithConstantOffset(SDValue Op) const;
+
+  /// Test whether the given SDValue is known to never be NaN.
+  bool isKnownNeverNaN(SDValue Op) const;
+
+  /// Test whether the given SDValue is known to never be positive or negative
+  /// zero.
+  bool isKnownNeverZero(SDValue Op) const;
+
+  /// Test whether two SDValues are known to compare equal. This
+  /// is true if they are the same value, or if one is negative zero and the
+  /// other positive zero.
+  bool isEqualTo(SDValue A, SDValue B) const;
+
+  /// Return true if A and B have no common bits set. As an example, this can
+  /// allow an 'add' to be transformed into an 'or'.
+  bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
+
+  /// Utility function used by legalize and lowering to
+  /// "unroll" a vector operation by splitting out the scalars and operating
+  /// on each element individually.  If the ResNE is 0, fully unroll the vector
+  /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
+  /// If the  ResNE is greater than the width of the vector op, unroll the
+  /// vector op and fill the end of the resulting vector with UNDEFS.
+  SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
+
+  /// Return true if loads are next to each other and can be
+  /// merged. Check that both are nonvolatile and if LD is loading
+  /// 'Bytes' bytes from a location that is 'Dist' units away from the
+  /// location that the 'Base' load is loading from.
+  bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
+                                      unsigned Bytes, int Dist) const;
+
+  /// Infer alignment of a load / store address. Return 0 if
+  /// it cannot be inferred.
+  unsigned InferPtrAlignment(SDValue Ptr) const;
+
+  /// Compute the VTs needed for the low/hi parts of a type
+  /// which is split (or expanded) into two not necessarily identical pieces.
+  std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
+
+  /// Split the vector with EXTRACT_SUBVECTOR using the provides
+  /// VTs and return the low/high part.
+  std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
+                                          const EVT &LoVT, const EVT &HiVT);
+
+  /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
+  std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
+    EVT LoVT, HiVT;
+    std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
+    return SplitVector(N, DL, LoVT, HiVT);
+  }
+
+  /// Split the node's operand with EXTRACT_SUBVECTOR and
+  /// return the low/high part.
+  std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
+  {
+    return SplitVector(N->getOperand(OpNo), SDLoc(N));
+  }
+
+  /// Append the extracted elements from Start to Count out of the vector Op
+  /// in Args. If Count is 0, all of the elements will be extracted.
+  void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
+                             unsigned Start = 0, unsigned Count = 0);
+
+  /// Compute the default alignment value for the given type.
+  unsigned getEVTAlignment(EVT MemoryVT) const;
+
+  /// Test whether the given value is a constant int or similar node.
+  SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N);
+
+  /// Test whether the given value is a constant FP or similar node.
+  SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N);
+
+  /// \returns true if \p N is any kind of constant or build_vector of
+  /// constants, int or float. If a vector, it may not necessarily be a splat.
+  inline bool isConstantValueOfAnyType(SDValue N) {
+    return isConstantIntBuildVectorOrConstantInt(N) ||
+           isConstantFPBuildVectorOrConstantFP(N);
+  }
+
+private:
+  void InsertNode(SDNode *N);
+  bool RemoveNodeFromCSEMaps(SDNode *N);
+  void AddModifiedNodeToCSEMaps(SDNode *N);
+  SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
+  SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
+                               void *&InsertPos);
+  SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
+                               void *&InsertPos);
+  SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
+
+  void DeleteNodeNotInCSEMaps(SDNode *N);
+  void DeallocateNode(SDNode *N);
+
+  void allnodes_clear();
+
+  /// Look up the node specified by ID in CSEMap.  If it exists, return it.  If
+  /// not, return the insertion token that will make insertion faster.  This
+  /// overload is for nodes other than Constant or ConstantFP, use the other one
+  /// for those.
+  SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+  /// Look up the node specified by ID in CSEMap.  If it exists, return it.  If
+  /// not, return the insertion token that will make insertion faster.  Performs
+  /// additional processing for constant nodes.
+  SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
+                              void *&InsertPos);
+
+  /// List of non-single value types.
+  FoldingSet<SDVTListNode> VTListMap;
+
+  /// Maps to auto-CSE operations.
+  std::vector<CondCodeSDNode*> CondCodeNodes;
+
+  std::vector<SDNode*> ValueTypeNodes;
+  std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
+  StringMap<SDNode*> ExternalSymbols;
+
+  std::map<std::pair<std::string, unsigned char>,SDNode*> TargetExternalSymbols;
+  DenseMap<MCSymbol *, SDNode *> MCSymbols;
+};
+
+template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
+  using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
+
+  static nodes_iterator nodes_begin(SelectionDAG *G) {
+    return nodes_iterator(G->allnodes_begin());
+  }
+
+  static nodes_iterator nodes_end(SelectionDAG *G) {
+    return nodes_iterator(G->allnodes_end());
+  }
+};
+
+template <class TargetMemSDNode>
+SDValue SelectionDAG::getTargetMemSDNode(SDVTList VTs,
+                                         ArrayRef<SDValue> Ops,
+                                         const SDLoc &dl, EVT MemVT,
+                                         MachineMemOperand *MMO) {
+  /// Compose node ID and try to find an existing node.
+  FoldingSetNodeID ID;
+  unsigned Opcode =
+    TargetMemSDNode(dl.getIROrder(), DebugLoc(), VTs, MemVT, MMO).getOpcode();
+  ID.AddInteger(Opcode);
+  ID.AddPointer(VTs.VTs);
+  for (auto& Op : Ops) {
+    ID.AddPointer(Op.getNode());
+    ID.AddInteger(Op.getResNo());
+  }
+  ID.AddInteger(MemVT.getRawBits());
+  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
+  ID.AddInteger(getSyntheticNodeSubclassData<TargetMemSDNode>(
+    dl.getIROrder(), VTs, MemVT, MMO));
+
+  void *IP = nullptr;
+  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
+    cast<TargetMemSDNode>(E)->refineAlignment(MMO);
+    return SDValue(E, 0);
+  }
+
+  /// Existing node was not found. Create a new one.
+  auto *N = newSDNode<TargetMemSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
+                                       MemVT, MMO);
+  createOperands(N, Ops);
+  CSEMap.InsertNode(N, IP);
+  InsertNode(N);
+  return SDValue(N, 0);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAG_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
new file mode 100644
index 0000000..5806064
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
@@ -0,0 +1,64 @@
+//===- SelectionDAGAddressAnalysis.h - DAG Address Analysis -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
+#define LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
+
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include <cstdint>
+
+namespace llvm {
+
+class SelectionDAG;
+
+/// Helper struct to parse and store a memory address as base + index + offset.
+/// We ignore sign extensions when it is safe to do so.
+/// The following two expressions are not equivalent. To differentiate we need
+/// to store whether there was a sign extension involved in the index
+/// computation.
+///  (load (i64 add (i64 copyfromreg %c)
+///                 (i64 signextend (add (i8 load %index)
+///                                      (i8 1))))
+/// vs
+///
+/// (load (i64 add (i64 copyfromreg %c)
+///                (i64 signextend (i32 add (i32 signextend (i8 load %index))
+///                                         (i32 1)))))
+class BaseIndexOffset {
+private:
+  SDValue Base;
+  SDValue Index;
+  int64_t Offset = 0;
+  bool IsIndexSignExt = false;
+
+public:
+  BaseIndexOffset() = default;
+  BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
+                  bool IsIndexSignExt)
+      : Base(Base), Index(Index), Offset(Offset),
+        IsIndexSignExt(IsIndexSignExt) {}
+
+  SDValue getBase() { return Base; }
+  SDValue getIndex() { return Index; }
+
+  bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG) {
+    int64_t Off;
+    return equalBaseIndex(Other, DAG, Off);
+  }
+
+  bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG,
+                      int64_t &Off);
+
+  /// Parses tree in Ptr for base, index, offset addresses.
+  static BaseIndexOffset match(LSBaseSDNode *N, const SelectionDAG &DAG);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
new file mode 100644
index 0000000..e56eafc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
@@ -0,0 +1,348 @@
+//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SelectionDAGISel class, which is used as the common
+// base class for SelectionDAG-based instruction selectors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGISEL_H
+#define LLVM_CODEGEN_SELECTIONDAGISEL_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+  class FastISel;
+  class SelectionDAGBuilder;
+  class SDValue;
+  class MachineRegisterInfo;
+  class MachineBasicBlock;
+  class MachineFunction;
+  class MachineInstr;
+  class OptimizationRemarkEmitter;
+  class TargetLowering;
+  class TargetLibraryInfo;
+  class FunctionLoweringInfo;
+  class ScheduleHazardRecognizer;
+  class GCFunctionInfo;
+  class ScheduleDAGSDNodes;
+  class LoadInst;
+
+/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
+/// pattern-matching instruction selectors.
+class SelectionDAGISel : public MachineFunctionPass {
+public:
+  TargetMachine &TM;
+  const TargetLibraryInfo *LibInfo;
+  FunctionLoweringInfo *FuncInfo;
+  MachineFunction *MF;
+  MachineRegisterInfo *RegInfo;
+  SelectionDAG *CurDAG;
+  SelectionDAGBuilder *SDB;
+  AliasAnalysis *AA;
+  GCFunctionInfo *GFI;
+  CodeGenOpt::Level OptLevel;
+  const TargetInstrInfo *TII;
+  const TargetLowering *TLI;
+  bool FastISelFailed;
+  SmallPtrSet<const Instruction *, 4> ElidedArgCopyInstrs;
+
+  /// Current optimization remark emitter.
+  /// Used to report things like combines and FastISel failures.
+  std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+  static char ID;
+
+  explicit SelectionDAGISel(TargetMachine &tm,
+                            CodeGenOpt::Level OL = CodeGenOpt::Default);
+  ~SelectionDAGISel() override;
+
+  const TargetLowering *getTargetLowering() const { return TLI; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  virtual void EmitFunctionEntryCode() {}
+
+  /// PreprocessISelDAG - This hook allows targets to hack on the graph before
+  /// instruction selection starts.
+  virtual void PreprocessISelDAG() {}
+
+  /// PostprocessISelDAG() - This hook allows the target to hack on the graph
+  /// right after selection.
+  virtual void PostprocessISelDAG() {}
+
+  /// Main hook for targets to transform nodes into machine nodes.
+  virtual void Select(SDNode *N) = 0;
+
+  /// SelectInlineAsmMemoryOperand - Select the specified address as a target
+  /// addressing mode, according to the specified constraint.  If this does
+  /// not match or is not implemented, return true.  The resultant operands
+  /// (which will appear in the machine instruction) should be added to the
+  /// OutOps vector.
+  virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+                                            unsigned ConstraintID,
+                                            std::vector<SDValue> &OutOps) {
+    return true;
+  }
+
+  /// IsProfitableToFold - Returns true if it's profitable to fold the specific
+  /// operand node N of U during instruction selection that starts at Root.
+  virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
+
+  /// IsLegalToFold - Returns true if the specific operand node N of
+  /// U can be folded during instruction selection that starts at Root.
+  /// FIXME: This is a static member function because the MSP430/X86
+  /// targets, which uses it during isel.  This could become a proper member.
+  static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
+                            CodeGenOpt::Level OptLevel,
+                            bool IgnoreChains = false);
+
+  static void InvalidateNodeId(SDNode *N);
+  static int getUninvalidatedNodeId(SDNode *N);
+
+  static void EnforceNodeIdInvariant(SDNode *N);
+
+  // Opcodes used by the DAG state machine:
+  enum BuiltinOpcodes {
+    OPC_Scope,
+    OPC_RecordNode,
+    OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
+    OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
+    OPC_RecordMemRef,
+    OPC_CaptureGlueInput,
+    OPC_MoveChild,
+    OPC_MoveChild0, OPC_MoveChild1, OPC_MoveChild2, OPC_MoveChild3,
+    OPC_MoveChild4, OPC_MoveChild5, OPC_MoveChild6, OPC_MoveChild7,
+    OPC_MoveParent,
+    OPC_CheckSame,
+    OPC_CheckChild0Same, OPC_CheckChild1Same,
+    OPC_CheckChild2Same, OPC_CheckChild3Same,
+    OPC_CheckPatternPredicate,
+    OPC_CheckPredicate,
+    OPC_CheckOpcode,
+    OPC_SwitchOpcode,
+    OPC_CheckType,
+    OPC_CheckTypeRes,
+    OPC_SwitchType,
+    OPC_CheckChild0Type, OPC_CheckChild1Type, OPC_CheckChild2Type,
+    OPC_CheckChild3Type, OPC_CheckChild4Type, OPC_CheckChild5Type,
+    OPC_CheckChild6Type, OPC_CheckChild7Type,
+    OPC_CheckInteger,
+    OPC_CheckChild0Integer, OPC_CheckChild1Integer, OPC_CheckChild2Integer,
+    OPC_CheckChild3Integer, OPC_CheckChild4Integer,
+    OPC_CheckCondCode,
+    OPC_CheckValueType,
+    OPC_CheckComplexPat,
+    OPC_CheckAndImm, OPC_CheckOrImm,
+    OPC_CheckFoldableChainNode,
+
+    OPC_EmitInteger,
+    OPC_EmitRegister,
+    OPC_EmitRegister2,
+    OPC_EmitConvertToTarget,
+    OPC_EmitMergeInputChains,
+    OPC_EmitMergeInputChains1_0,
+    OPC_EmitMergeInputChains1_1,
+    OPC_EmitMergeInputChains1_2,
+    OPC_EmitCopyToReg,
+    OPC_EmitNodeXForm,
+    OPC_EmitNode,
+    // Space-optimized forms that implicitly encode number of result VTs.
+    OPC_EmitNode0, OPC_EmitNode1, OPC_EmitNode2,
+    OPC_MorphNodeTo,
+    // Space-optimized forms that implicitly encode number of result VTs.
+    OPC_MorphNodeTo0, OPC_MorphNodeTo1, OPC_MorphNodeTo2,
+    OPC_CompleteMatch,
+    // Contains offset in table for pattern being selected
+    OPC_Coverage
+  };
+
+  enum {
+    OPFL_None       = 0,  // Node has no chain or glue input and isn't variadic.
+    OPFL_Chain      = 1,     // Node has a chain input.
+    OPFL_GlueInput  = 2,     // Node has a glue input.
+    OPFL_GlueOutput = 4,     // Node has a glue output.
+    OPFL_MemRefs    = 8,     // Node gets accumulated MemRefs.
+    OPFL_Variadic0  = 1<<4,  // Node is variadic, root has 0 fixed inputs.
+    OPFL_Variadic1  = 2<<4,  // Node is variadic, root has 1 fixed inputs.
+    OPFL_Variadic2  = 3<<4,  // Node is variadic, root has 2 fixed inputs.
+    OPFL_Variadic3  = 4<<4,  // Node is variadic, root has 3 fixed inputs.
+    OPFL_Variadic4  = 5<<4,  // Node is variadic, root has 4 fixed inputs.
+    OPFL_Variadic5  = 6<<4,  // Node is variadic, root has 5 fixed inputs.
+    OPFL_Variadic6  = 7<<4,  // Node is variadic, root has 6 fixed inputs.
+
+    OPFL_VariadicInfo = OPFL_Variadic6
+  };
+
+  /// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
+  /// number of fixed arity values that should be skipped when copying from the
+  /// root.
+  static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
+    return ((Flags&OPFL_VariadicInfo) >> 4)-1;
+  }
+
+
+protected:
+  /// DAGSize - Size of DAG being instruction selected.
+  ///
+  unsigned DAGSize;
+
+  /// ReplaceUses - replace all uses of the old node F with the use
+  /// of the new node T.
+  void ReplaceUses(SDValue F, SDValue T) {
+    CurDAG->ReplaceAllUsesOfValueWith(F, T);
+    EnforceNodeIdInvariant(T.getNode());
+  }
+
+  /// ReplaceUses - replace all uses of the old nodes F with the use
+  /// of the new nodes T.
+  void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
+    CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
+    for (unsigned i = 0; i < Num; ++i)
+      EnforceNodeIdInvariant(T[i].getNode());
+  }
+
+  /// ReplaceUses - replace all uses of the old node F with the use
+  /// of the new node T.
+  void ReplaceUses(SDNode *F, SDNode *T) {
+    CurDAG->ReplaceAllUsesWith(F, T);
+    EnforceNodeIdInvariant(T);
+  }
+
+  /// Replace all uses of \c F with \c T, then remove \c F from the DAG.
+  void ReplaceNode(SDNode *F, SDNode *T) {
+    CurDAG->ReplaceAllUsesWith(F, T);
+    EnforceNodeIdInvariant(T);
+    CurDAG->RemoveDeadNode(F);
+  }
+
+  /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
+  /// by tblgen.  Others should not call it.
+  void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
+                                     const SDLoc &DL);
+
+  /// getPatternForIndex - Patterns selected by tablegen during ISEL
+  virtual StringRef getPatternForIndex(unsigned index) {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  /// getIncludePathForIndex - get the td source location of pattern instantiation
+  virtual StringRef getIncludePathForIndex(unsigned index) {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+public:
+  // Calls to these predicates are generated by tblgen.
+  bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
+                    int64_t DesiredMaskS) const;
+  bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
+                    int64_t DesiredMaskS) const;
+
+
+  /// CheckPatternPredicate - This function is generated by tblgen in the
+  /// target.  It runs the specified pattern predicate and returns true if it
+  /// succeeds or false if it fails.  The number is a private implementation
+  /// detail to the code tblgen produces.
+  virtual bool CheckPatternPredicate(unsigned PredNo) const {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  /// CheckNodePredicate - This function is generated by tblgen in the target.
+  /// It runs node predicate number PredNo and returns true if it succeeds or
+  /// false if it fails.  The number is a private implementation
+  /// detail to the code tblgen produces.
+  virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
+                                   unsigned PatternNo,
+                        SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
+    llvm_unreachable("Tblgen should generate this!");
+  }
+
+  void SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
+                        unsigned TableSize);
+
+  /// \brief Return true if complex patterns for this target can mutate the
+  /// DAG.
+  virtual bool ComplexPatternFuncMutatesDAG() const {
+    return false;
+  }
+
+  bool isOrEquivalentToAdd(const SDNode *N) const;
+
+private:
+
+  // Calls to these functions are generated by tblgen.
+  void Select_INLINEASM(SDNode *N);
+  void Select_READ_REGISTER(SDNode *N);
+  void Select_WRITE_REGISTER(SDNode *N);
+  void Select_UNDEF(SDNode *N);
+  void CannotYetSelect(SDNode *N);
+
+private:
+  void DoInstructionSelection();
+  SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
+                    ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);
+
+  SDNode *MutateStrictFPToFP(SDNode *Node, unsigned NewOpc);
+
+  /// Prepares the landing pad to take incoming values or do other EH
+  /// personality specific tasks. Returns true if the block should be
+  /// instruction selected, false if no code should be emitted for it.
+  bool PrepareEHLandingPad();
+
+  /// \brief Perform instruction selection on all basic blocks in the function.
+  void SelectAllBasicBlocks(const Function &Fn);
+
+  /// \brief Perform instruction selection on a single basic block, for
+  /// instructions between \p Begin and \p End.  \p HadTailCall will be set
+  /// to true if a call in the block was translated as a tail call.
+  void SelectBasicBlock(BasicBlock::const_iterator Begin,
+                        BasicBlock::const_iterator End,
+                        bool &HadTailCall);
+  void FinishBasicBlock();
+
+  void CodeGenAndEmitDAG();
+
+  /// \brief Generate instructions for lowering the incoming arguments of the
+  /// given function.
+  void LowerArguments(const Function &F);
+
+  void ComputeLiveOutVRegInfo();
+
+  /// Create the scheduler. If a specific scheduler was specified
+  /// via the SchedulerRegistry, use it, otherwise select the
+  /// one preferred by the target.
+  ///
+  ScheduleDAGSDNodes *CreateScheduler();
+
+  /// OpcodeOffset - This is a cache used to dispatch efficiently into isel
+  /// state machines that start with a OPC_SwitchOpcode node.
+  std::vector<unsigned> OpcodeOffset;
+
+  void UpdateChains(SDNode *NodeToMatch, SDValue InputChain,
+                    SmallVectorImpl<SDNode *> &ChainNodesMatched,
+                    bool isMorphNodeTo);
+};
+
+}
+
+#endif /* LLVM_CODEGEN_SELECTIONDAGISEL_H */
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
new file mode 100644
index 0000000..ffb5c00
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -0,0 +1,2399 @@
+//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SDNode class and derived classes, which are used to
+// represent the nodes and operations present in a SelectionDAG.  These nodes
+// and operations are machine code level operations, with some similarities to
+// the GCC RTL representation.
+//
+// Clients should include the SelectionDAG.h file instead of this file directly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
+#define LLVM_CODEGEN_SELECTIONDAGNODES_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <tuple>
+
+namespace llvm {
+
+class APInt;
+class Constant;
+template <typename T> struct DenseMapInfo;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class MCSymbol;
+class raw_ostream;
+class SDNode;
+class SelectionDAG;
+class Type;
+class Value;
+
+void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
+                    bool force = false);
+
+/// This represents a list of ValueType's that has been intern'd by
+/// a SelectionDAG.  Instances of this simple value class are returned by
+/// SelectionDAG::getVTList(...).
+///
+struct SDVTList {
+  const EVT *VTs;
+  unsigned int NumVTs;
+};
+
+namespace ISD {
+
+  /// Node predicates
+
+  /// If N is a BUILD_VECTOR node whose elements are all the same constant or
+  /// undefined, return true and return the constant value in \p SplatValue.
+  bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
+
+  /// Return true if the specified node is a BUILD_VECTOR where all of the
+  /// elements are ~0 or undef.
+  bool isBuildVectorAllOnes(const SDNode *N);
+
+  /// Return true if the specified node is a BUILD_VECTOR where all of the
+  /// elements are 0 or undef.
+  bool isBuildVectorAllZeros(const SDNode *N);
+
+  /// Return true if the specified node is a BUILD_VECTOR node of all
+  /// ConstantSDNode or undef.
+  bool isBuildVectorOfConstantSDNodes(const SDNode *N);
+
+  /// Return true if the specified node is a BUILD_VECTOR node of all
+  /// ConstantFPSDNode or undef.
+  bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
+
+  /// Return true if the node has at least one operand and all operands of the
+  /// specified node are ISD::UNDEF.
+  bool allOperandsUndef(const SDNode *N);
+
+} // end namespace ISD
+
+//===----------------------------------------------------------------------===//
+/// Unlike LLVM values, Selection DAG nodes may return multiple
+/// values as the result of a computation.  Many nodes return multiple values,
+/// from loads (which define a token and a return value) to ADDC (which returns
+/// a result and a carry value), to calls (which may return an arbitrary number
+/// of values).
+///
+/// As such, each use of a SelectionDAG computation must indicate the node that
+/// computes it as well as which return value to use from that node.  This pair
+/// of information is represented with the SDValue value type.
+///
+class SDValue {
+  friend struct DenseMapInfo<SDValue>;
+
+  SDNode *Node = nullptr; // The node defining the value we are using.
+  unsigned ResNo = 0;     // Which return value of the node we are using.
+
+public:
+  SDValue() = default;
+  SDValue(SDNode *node, unsigned resno);
+
+  /// get the index which selects a specific result in the SDNode
+  unsigned getResNo() const { return ResNo; }
+
+  /// get the SDNode which holds the desired result
+  SDNode *getNode() const { return Node; }
+
+  /// set the SDNode
+  void setNode(SDNode *N) { Node = N; }
+
+  inline SDNode *operator->() const { return Node; }
+
+  bool operator==(const SDValue &O) const {
+    return Node == O.Node && ResNo == O.ResNo;
+  }
+  bool operator!=(const SDValue &O) const {
+    return !operator==(O);
+  }
+  bool operator<(const SDValue &O) const {
+    return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
+  }
+  explicit operator bool() const {
+    return Node != nullptr;
+  }
+
+  SDValue getValue(unsigned R) const {
+    return SDValue(Node, R);
+  }
+
+  /// Return true if this node is an operand of N.
+  bool isOperandOf(const SDNode *N) const;
+
+  /// Return the ValueType of the referenced return value.
+  inline EVT getValueType() const;
+
+  /// Return the simple ValueType of the referenced return value.
+  MVT getSimpleValueType() const {
+    return getValueType().getSimpleVT();
+  }
+
+  /// Returns the size of the value in bits.
+  unsigned getValueSizeInBits() const {
+    return getValueType().getSizeInBits();
+  }
+
+  unsigned getScalarValueSizeInBits() const {
+    return getValueType().getScalarType().getSizeInBits();
+  }
+
+  // Forwarding methods - These forward to the corresponding methods in SDNode.
+  inline unsigned getOpcode() const;
+  inline unsigned getNumOperands() const;
+  inline const SDValue &getOperand(unsigned i) const;
+  inline uint64_t getConstantOperandVal(unsigned i) const;
+  inline bool isTargetMemoryOpcode() const;
+  inline bool isTargetOpcode() const;
+  inline bool isMachineOpcode() const;
+  inline bool isUndef() const;
+  inline unsigned getMachineOpcode() const;
+  inline const DebugLoc &getDebugLoc() const;
+  inline void dump() const;
+  inline void dump(const SelectionDAG *G) const;
+  inline void dumpr() const;
+  inline void dumpr(const SelectionDAG *G) const;
+
+  /// Return true if this operand (which must be a chain) reaches the
+  /// specified operand without crossing any side-effecting instructions.
+  /// In practice, this looks through token factors and non-volatile loads.
+  /// In order to remain efficient, this only
+  /// looks a couple of nodes in, it does not do an exhaustive search.
+  bool reachesChainWithoutSideEffects(SDValue Dest,
+                                      unsigned Depth = 2) const;
+
+  /// Return true if there are no nodes using value ResNo of Node.
+  inline bool use_empty() const;
+
+  /// Return true if there is exactly one node using value ResNo of Node.
+  inline bool hasOneUse() const;
+};
+
+template<> struct DenseMapInfo<SDValue> {
+  static inline SDValue getEmptyKey() {
+    SDValue V;
+    V.ResNo = -1U;
+    return V;
+  }
+
+  static inline SDValue getTombstoneKey() {
+    SDValue V;
+    V.ResNo = -2U;
+    return V;
+  }
+
+  static unsigned getHashValue(const SDValue &Val) {
+    return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
+            (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
+  }
+
+  static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
+    return LHS == RHS;
+  }
+};
+template <> struct isPodLike<SDValue> { static const bool value = true; };
+
+/// Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDValue> {
+  using SimpleType = SDNode *;
+
+  static SimpleType getSimplifiedValue(SDValue &Val) {
+    return Val.getNode();
+  }
+};
+template<> struct simplify_type<const SDValue> {
+  using SimpleType = /*const*/ SDNode *;
+
+  static SimpleType getSimplifiedValue(const SDValue &Val) {
+    return Val.getNode();
+  }
+};
+
+/// Represents a use of a SDNode. This class holds an SDValue,
+/// which records the SDNode being used and the result number, a
+/// pointer to the SDNode using the value, and Next and Prev pointers,
+/// which link together all the uses of an SDNode.
+///
+class SDUse {
+  /// Val - The value being used.
+  SDValue Val;
+  /// User - The user of this value.
+  SDNode *User = nullptr;
+  /// Prev, Next - Pointers to the uses list of the SDNode referred by
+  /// this operand.
+  SDUse **Prev = nullptr;
+  SDUse *Next = nullptr;
+
+public:
+  SDUse() = default;
+  SDUse(const SDUse &U) = delete;
+  SDUse &operator=(const SDUse &) = delete;
+
+  /// Normally SDUse will just implicitly convert to an SDValue that it holds.
+  operator const SDValue&() const { return Val; }
+
+  /// If implicit conversion to SDValue doesn't work, the get() method returns
+  /// the SDValue.
+  const SDValue &get() const { return Val; }
+
+  /// This returns the SDNode that contains this Use.
+  SDNode *getUser() { return User; }
+
+  /// Get the next SDUse in the use list.
+  SDUse *getNext() const { return Next; }
+
+  /// Convenience function for get().getNode().
+  SDNode *getNode() const { return Val.getNode(); }
+  /// Convenience function for get().getResNo().
+  unsigned getResNo() const { return Val.getResNo(); }
+  /// Convenience function for get().getValueType().
+  EVT getValueType() const { return Val.getValueType(); }
+
+  /// Convenience function for get().operator==
+  bool operator==(const SDValue &V) const {
+    return Val == V;
+  }
+
+  /// Convenience function for get().operator!=
+  bool operator!=(const SDValue &V) const {
+    return Val != V;
+  }
+
+  /// Convenience function for get().operator<
+  bool operator<(const SDValue &V) const {
+    return Val < V;
+  }
+
+private:
+  friend class SelectionDAG;
+  friend class SDNode;
+  // TODO: unfriend HandleSDNode once we fix its operand handling.
+  friend class HandleSDNode;
+
+  void setUser(SDNode *p) { User = p; }
+
+  /// Remove this use from its existing use list, assign it the
+  /// given value, and add it to the new value's node's use list.
+  inline void set(const SDValue &V);
+  /// Like set, but only supports initializing a newly-allocated
+  /// SDUse with a non-null value.
+  inline void setInitial(const SDValue &V);
+  /// Like set, but only sets the Node portion of the value,
+  /// leaving the ResNo portion unmodified.
+  inline void setNode(SDNode *N);
+
+  void addToList(SDUse **List) {
+    Next = *List;
+    if (Next) Next->Prev = &Next;
+    Prev = List;
+    *List = this;
+  }
+
+  void removeFromList() {
+    *Prev = Next;
+    if (Next) Next->Prev = Prev;
+  }
+};
+
+/// simplify_type specializations - Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDUse> {
+  using SimpleType = SDNode *;
+
+  static SimpleType getSimplifiedValue(SDUse &Val) {
+    return Val.getNode();
+  }
+};
+
+/// These are IR-level optimization flags that may be propagated to SDNodes.
+/// TODO: This data structure should be shared by the IR optimizer and the
+/// the backend.
+struct SDNodeFlags {
+private:
+  // This bit is used to determine if the flags are in a defined state.
+  // Flag bits can only be masked out during intersection if the masking flags
+  // are defined.
+  bool AnyDefined : 1;
+
+  bool NoUnsignedWrap : 1;
+  bool NoSignedWrap : 1;
+  bool Exact : 1;
+  bool UnsafeAlgebra : 1;
+  bool NoNaNs : 1;
+  bool NoInfs : 1;
+  bool NoSignedZeros : 1;
+  bool AllowReciprocal : 1;
+  bool VectorReduction : 1;
+  bool AllowContract : 1;
+
+public:
+  /// Default constructor turns off all optimization flags.
+  SDNodeFlags()
+      : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
+        Exact(false), UnsafeAlgebra(false), NoNaNs(false), NoInfs(false),
+        NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
+        AllowContract(false) {}
+
+  /// Sets the state of the flags to the defined state.
+  void setDefined() { AnyDefined = true; }
+  /// Returns true if the flags are in a defined state.
+  bool isDefined() const { return AnyDefined; }
+
+  // These are mutators for each flag.
+  void setNoUnsignedWrap(bool b) {
+    setDefined();
+    NoUnsignedWrap = b;
+  }
+  void setNoSignedWrap(bool b) {
+    setDefined();
+    NoSignedWrap = b;
+  }
+  void setExact(bool b) {
+    setDefined();
+    Exact = b;
+  }
+  void setUnsafeAlgebra(bool b) {
+    setDefined();
+    UnsafeAlgebra = b;
+  }
+  void setNoNaNs(bool b) {
+    setDefined();
+    NoNaNs = b;
+  }
+  void setNoInfs(bool b) {
+    setDefined();
+    NoInfs = b;
+  }
+  void setNoSignedZeros(bool b) {
+    setDefined();
+    NoSignedZeros = b;
+  }
+  void setAllowReciprocal(bool b) {
+    setDefined();
+    AllowReciprocal = b;
+  }
+  void setVectorReduction(bool b) {
+    setDefined();
+    VectorReduction = b;
+  }
+  void setAllowContract(bool b) {
+    setDefined();
+    AllowContract = b;
+  }
+
+  // These are accessors for each flag.
+  bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
+  bool hasNoSignedWrap() const { return NoSignedWrap; }
+  bool hasExact() const { return Exact; }
+  bool hasUnsafeAlgebra() const { return UnsafeAlgebra; }
+  bool hasNoNaNs() const { return NoNaNs; }
+  bool hasNoInfs() const { return NoInfs; }
+  bool hasNoSignedZeros() const { return NoSignedZeros; }
+  bool hasAllowReciprocal() const { return AllowReciprocal; }
+  bool hasVectorReduction() const { return VectorReduction; }
+  bool hasAllowContract() const { return AllowContract; }
+
+  /// Clear any flags in this flag set that aren't also set in Flags.
+  /// If the given Flags are undefined then don't do anything.
+  void intersectWith(const SDNodeFlags Flags) {
+    if (!Flags.isDefined())
+      return;
+    NoUnsignedWrap &= Flags.NoUnsignedWrap;
+    NoSignedWrap &= Flags.NoSignedWrap;
+    Exact &= Flags.Exact;
+    UnsafeAlgebra &= Flags.UnsafeAlgebra;
+    NoNaNs &= Flags.NoNaNs;
+    NoInfs &= Flags.NoInfs;
+    NoSignedZeros &= Flags.NoSignedZeros;
+    AllowReciprocal &= Flags.AllowReciprocal;
+    VectorReduction &= Flags.VectorReduction;
+    AllowContract &= Flags.AllowContract;
+  }
+};
+
+/// Represents one node in the SelectionDAG.
+///
+class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
+private:
+  /// The operation that this node performs.
+  int16_t NodeType;
+
+protected:
+  // We define a set of mini-helper classes to help us interpret the bits in our
+  // SubclassData.  These are designed to fit within a uint16_t so they pack
+  // with NodeType.
+
+  class SDNodeBitfields {
+    friend class SDNode;
+    friend class MemIntrinsicSDNode;
+    friend class MemSDNode;
+    friend class SelectionDAG;
+
+    uint16_t HasDebugValue : 1;
+    uint16_t IsMemIntrinsic : 1;
+    uint16_t IsDivergent : 1;
+  };
+  enum { NumSDNodeBits = 3 };
+
+  class ConstantSDNodeBitfields {
+    friend class ConstantSDNode;
+
+    uint16_t : NumSDNodeBits;
+
+    uint16_t IsOpaque : 1;
+  };
+
+  class MemSDNodeBitfields {
+    friend class MemSDNode;
+    friend class MemIntrinsicSDNode;
+    friend class AtomicSDNode;
+
+    uint16_t : NumSDNodeBits;
+
+    uint16_t IsVolatile : 1;
+    uint16_t IsNonTemporal : 1;
+    uint16_t IsDereferenceable : 1;
+    uint16_t IsInvariant : 1;
+  };
+  enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
+
+  class LSBaseSDNodeBitfields {
+    friend class LSBaseSDNode;
+
+    uint16_t : NumMemSDNodeBits;
+
+    uint16_t AddressingMode : 3; // enum ISD::MemIndexedMode
+  };
+  enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
+
+  class LoadSDNodeBitfields {
+    friend class LoadSDNode;
+    friend class MaskedLoadSDNode;
+
+    uint16_t : NumLSBaseSDNodeBits;
+
+    uint16_t ExtTy : 2; // enum ISD::LoadExtType
+    uint16_t IsExpanding : 1;
+  };
+
+  class StoreSDNodeBitfields {
+    friend class StoreSDNode;
+    friend class MaskedStoreSDNode;
+
+    uint16_t : NumLSBaseSDNodeBits;
+
+    uint16_t IsTruncating : 1;
+    uint16_t IsCompressing : 1;
+  };
+
+  union {
+    char RawSDNodeBits[sizeof(uint16_t)];
+    SDNodeBitfields SDNodeBits;
+    ConstantSDNodeBitfields ConstantSDNodeBits;
+    MemSDNodeBitfields MemSDNodeBits;
+    LSBaseSDNodeBitfields LSBaseSDNodeBits;
+    LoadSDNodeBitfields LoadSDNodeBits;
+    StoreSDNodeBitfields StoreSDNodeBits;
+  };
+
+  // RawSDNodeBits must cover the entirety of the union.  This means that all of
+  // the union's members must have size <= RawSDNodeBits.  We write the RHS as
+  // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
+  static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(LoadSDNodeBitfields) <= 4, "field too wide");
+  static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
+
+private:
+  friend class SelectionDAG;
+  // TODO: unfriend HandleSDNode once we fix its operand handling.
+  friend class HandleSDNode;
+
+  /// Unique id per SDNode in the DAG.
+  int NodeId = -1;
+
+  /// The values that are used by this operation.
+  SDUse *OperandList = nullptr;
+
+  /// The types of the values this node defines.  SDNode's may
+  /// define multiple values simultaneously.
+  const EVT *ValueList;
+
+  /// List of uses for this SDNode.
+  SDUse *UseList = nullptr;
+
+  /// The number of entries in the Operand/Value list.
+  unsigned short NumOperands = 0;
+  unsigned short NumValues;
+
+  // The ordering of the SDNodes. It roughly corresponds to the ordering of the
+  // original LLVM instructions.
+  // This is used for turning off scheduling, because we'll forgo
+  // the normal scheduling algorithms and output the instructions according to
+  // this ordering.
+  unsigned IROrder;
+
+  /// Source line information.
+  DebugLoc debugLoc;
+
+  /// Return a pointer to the specified value type.
+  static const EVT *getValueTypeList(EVT VT);
+
+  SDNodeFlags Flags;
+
+public:
+  /// Unique and persistent id per SDNode in the DAG.
+  /// Used for debug printing.
+  uint16_t PersistentId;
+
+  //===--------------------------------------------------------------------===//
+  //  Accessors
+  //
+
+  /// Return the SelectionDAG opcode value for this node. For
+  /// pre-isel nodes (those for which isMachineOpcode returns false), these
+  /// are the opcode values in the ISD and <target>ISD namespaces. For
+  /// post-isel opcodes, see getMachineOpcode.
+  unsigned getOpcode()  const { return (unsigned short)NodeType; }
+
+  /// Test if this node has a target-specific opcode (in the
+  /// \<target\>ISD namespace).
+  bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
+
+  /// Test if this node has a target-specific
+  /// memory-referencing opcode (in the \<target\>ISD namespace and
+  /// greater than FIRST_TARGET_MEMORY_OPCODE).
+  bool isTargetMemoryOpcode() const {
+    return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
+  }
+
+  /// Return true if the type of the node type undefined.
+  bool isUndef() const { return NodeType == ISD::UNDEF; }
+
+  /// Test if this node is a memory intrinsic (with valid pointer information).
+  /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
+  /// non-memory intrinsics (with chains) that are not really instances of
+  /// MemSDNode. For such nodes, we need some extra state to determine the
+  /// proper classof relationship.
+  bool isMemIntrinsic() const {
+    return (NodeType == ISD::INTRINSIC_W_CHAIN ||
+            NodeType == ISD::INTRINSIC_VOID) &&
+           SDNodeBits.IsMemIntrinsic;
+  }
+
+  /// Test if this node is a strict floating point pseudo-op.
+  bool isStrictFPOpcode() {
+    switch (NodeType) {
+      default:
+        return false;
+      case ISD::STRICT_FADD:
+      case ISD::STRICT_FSUB:
+      case ISD::STRICT_FMUL:
+      case ISD::STRICT_FDIV:
+      case ISD::STRICT_FREM:
+      case ISD::STRICT_FMA:
+      case ISD::STRICT_FSQRT:
+      case ISD::STRICT_FPOW:
+      case ISD::STRICT_FPOWI:
+      case ISD::STRICT_FSIN:
+      case ISD::STRICT_FCOS:
+      case ISD::STRICT_FEXP:
+      case ISD::STRICT_FEXP2:
+      case ISD::STRICT_FLOG:
+      case ISD::STRICT_FLOG10:
+      case ISD::STRICT_FLOG2:
+      case ISD::STRICT_FRINT:
+      case ISD::STRICT_FNEARBYINT:
+        return true;
+    }
+  }
+
+  /// Test if this node has a post-isel opcode, directly
+  /// corresponding to a MachineInstr opcode.
+  bool isMachineOpcode() const { return NodeType < 0; }
+
+  /// This may only be called if isMachineOpcode returns
+  /// true. It returns the MachineInstr opcode value that the node's opcode
+  /// corresponds to.
+  unsigned getMachineOpcode() const {
+    assert(isMachineOpcode() && "Not a MachineInstr opcode!");
+    return ~NodeType;
+  }
+
+  bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
+  void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
+
+  bool isDivergent() const { return SDNodeBits.IsDivergent; }
+
+  /// Return true if there are no uses of this node.
+  bool use_empty() const { return UseList == nullptr; }
+
+  /// Return true if there is exactly one use of this node.
+  bool hasOneUse() const {
+    return !use_empty() && std::next(use_begin()) == use_end();
+  }
+
+  /// Return the number of uses of this node. This method takes
+  /// time proportional to the number of uses.
+  size_t use_size() const { return std::distance(use_begin(), use_end()); }
+
+  /// Return the unique node id.
+  int getNodeId() const { return NodeId; }
+
+  /// Set unique node id.
+  void setNodeId(int Id) { NodeId = Id; }
+
+  /// Return the node ordering.
+  unsigned getIROrder() const { return IROrder; }
+
+  /// Set the node ordering.
+  void setIROrder(unsigned Order) { IROrder = Order; }
+
+  /// Return the source location info.
+  const DebugLoc &getDebugLoc() const { return debugLoc; }
+
+  /// Set source location info.  Try to avoid this, putting
+  /// it in the constructor is preferable.
+  void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
+
+  /// This class provides iterator support for SDUse
+  /// operands that use a specific SDNode.
+  class use_iterator
+    : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
+    friend class SDNode;
+
+    SDUse *Op = nullptr;
+
+    explicit use_iterator(SDUse *op) : Op(op) {}
+
+  public:
+    using reference = std::iterator<std::forward_iterator_tag,
+                                    SDUse, ptrdiff_t>::reference;
+    using pointer = std::iterator<std::forward_iterator_tag,
+                                  SDUse, ptrdiff_t>::pointer;
+
+    use_iterator() = default;
+    use_iterator(const use_iterator &I) : Op(I.Op) {}
+
+    bool operator==(const use_iterator &x) const {
+      return Op == x.Op;
+    }
+    bool operator!=(const use_iterator &x) const {
+      return !operator==(x);
+    }
+
+    /// Return true if this iterator is at the end of uses list.
+    bool atEnd() const { return Op == nullptr; }
+
+    // Iterator traversal: forward iteration only.
+    use_iterator &operator++() {          // Preincrement
+      assert(Op && "Cannot increment end iterator!");
+      Op = Op->getNext();
+      return *this;
+    }
+
+    use_iterator operator++(int) {        // Postincrement
+      use_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    /// Retrieve a pointer to the current user node.
+    SDNode *operator*() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return Op->getUser();
+    }
+
+    SDNode *operator->() const { return operator*(); }
+
+    SDUse &getUse() const { return *Op; }
+
+    /// Retrieve the operand # of this use in its user.
+    unsigned getOperandNo() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return (unsigned)(Op - Op->getUser()->OperandList);
+    }
+  };
+
+  /// Provide iteration support to walk over all uses of an SDNode.
+  use_iterator use_begin() const {
+    return use_iterator(UseList);
+  }
+
+  static use_iterator use_end() { return use_iterator(nullptr); }
+
+  inline iterator_range<use_iterator> uses() {
+    return make_range(use_begin(), use_end());
+  }
+  inline iterator_range<use_iterator> uses() const {
+    return make_range(use_begin(), use_end());
+  }
+
+  /// Return true if there are exactly NUSES uses of the indicated value.
+  /// This method ignores uses of other values defined by this operation.
+  bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
+
+  /// Return true if there are any use of the indicated value.
+  /// This method ignores uses of other values defined by this operation.
+  bool hasAnyUseOfValue(unsigned Value) const;
+
+  /// Return true if this node is the only use of N.
+  bool isOnlyUserOf(const SDNode *N) const;
+
+  /// Return true if this node is an operand of N.
+  bool isOperandOf(const SDNode *N) const;
+
+  /// Return true if this node is a predecessor of N.
+  /// NOTE: Implemented on top of hasPredecessor and every bit as
+  /// expensive. Use carefully.
+  bool isPredecessorOf(const SDNode *N) const {
+    return N->hasPredecessor(this);
+  }
+
+  /// Return true if N is a predecessor of this node.
+  /// N is either an operand of this node, or can be reached by recursively
+  /// traversing up the operands.
+  /// NOTE: This is an expensive method. Use it carefully.
+  bool hasPredecessor(const SDNode *N) const;
+
+  /// Returns true if N is a predecessor of any node in Worklist. This
+  /// helper keeps Visited and Worklist sets externally to allow unions
+  /// searches to be performed in parallel, caching of results across
+  /// queries and incremental addition to Worklist. Stops early if N is
+  /// found but will resume. Remember to clear Visited and Worklists
+  /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
+  /// giving up. The TopologicalPrune flag signals that positive NodeIds are
+  /// topologically ordered (Operands have strictly smaller node id) and search
+  /// can be pruned leveraging this.
+  static bool hasPredecessorHelper(const SDNode *N,
+                                   SmallPtrSetImpl<const SDNode *> &Visited,
+                                   SmallVectorImpl<const SDNode *> &Worklist,
+                                   unsigned int MaxSteps = 0,
+                                   bool TopologicalPrune = false) {
+    SmallVector<const SDNode *, 8> DeferredNodes;
+    if (Visited.count(N))
+      return true;
+
+    // Node Id's are assigned in three places: As a topological
+    // ordering (> 0), during legalization (results in values set to
+    // 0), new nodes (set to -1). If N has a topolgical id then we
+    // know that all nodes with ids smaller than it cannot be
+    // successors and we need not check them. Filter out all node
+    // that can't be matches. We add them to the worklist before exit
+    // in case of multiple calls. Note that during selection the topological id
+    // may be violated if a node's predecessor is selected before it. We mark
+    // this at selection negating the id of unselected successors and
+    // restricting topological pruning to positive ids.
+
+    int NId = N->getNodeId();
+    // If we Invalidated the Id, reconstruct original NId.
+    if (NId < -1)
+      NId = -(NId + 1);
+
+    bool Found = false;
+    while (!Worklist.empty()) {
+      const SDNode *M = Worklist.pop_back_val();
+      int MId = M->getNodeId();
+      if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
+          (MId > 0) && (MId < NId)) {
+        DeferredNodes.push_back(M);
+        continue;
+      }
+      for (const SDValue &OpV : M->op_values()) {
+        SDNode *Op = OpV.getNode();
+        if (Visited.insert(Op).second)
+          Worklist.push_back(Op);
+        if (Op == N)
+          Found = true;
+      }
+      if (Found)
+        break;
+      if (MaxSteps != 0 && Visited.size() >= MaxSteps)
+        break;
+    }
+    // Push deferred nodes back on worklist.
+    Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
+    // If we bailed early, conservatively return found.
+    if (MaxSteps != 0 && Visited.size() >= MaxSteps)
+      return true;
+    return Found;
+  }
+
+  /// Return true if all the users of N are contained in Nodes.
+  /// NOTE: Requires at least one match, but doesn't require them all.
+  static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
+
+  /// Return the number of values used by this operation.
+  unsigned getNumOperands() const { return NumOperands; }
+
+  /// Helper method returns the integer value of a ConstantSDNode operand.
+  inline uint64_t getConstantOperandVal(unsigned Num) const;
+
+  const SDValue &getOperand(unsigned Num) const {
+    assert(Num < NumOperands && "Invalid child # of SDNode!");
+    return OperandList[Num];
+  }
+
+  using op_iterator = SDUse *;
+
+  op_iterator op_begin() const { return OperandList; }
+  op_iterator op_end() const { return OperandList+NumOperands; }
+  ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
+
+  /// Iterator for directly iterating over the operand SDValue's.
+  struct value_op_iterator
+      : iterator_adaptor_base<value_op_iterator, op_iterator,
+                              std::random_access_iterator_tag, SDValue,
+                              ptrdiff_t, value_op_iterator *,
+                              value_op_iterator *> {
+    explicit value_op_iterator(SDUse *U = nullptr)
+      : iterator_adaptor_base(U) {}
+
+    const SDValue &operator*() const { return I->get(); }
+  };
+
+  iterator_range<value_op_iterator> op_values() const {
+    return make_range(value_op_iterator(op_begin()),
+                      value_op_iterator(op_end()));
+  }
+
+  SDVTList getVTList() const {
+    SDVTList X = { ValueList, NumValues };
+    return X;
+  }
+
+  /// If this node has a glue operand, return the node
+  /// to which the glue operand points. Otherwise return NULL.
+  SDNode *getGluedNode() const {
+    if (getNumOperands() != 0 &&
+        getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
+      return getOperand(getNumOperands()-1).getNode();
+    return nullptr;
+  }
+
+  /// If this node has a glue value with a user, return
+  /// the user (there is at most one). Otherwise return NULL.
+  SDNode *getGluedUser() const {
+    for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+      if (UI.getUse().get().getValueType() == MVT::Glue)
+        return *UI;
+    return nullptr;
+  }
+
+  const SDNodeFlags getFlags() const { return Flags; }
+  void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
+
+  /// Clear any flags in this node that aren't also set in Flags.
+  /// If Flags is not in a defined state then this has no effect.
+  void intersectFlagsWith(const SDNodeFlags Flags);
+
+  /// Return the number of values defined/returned by this operator.
+  unsigned getNumValues() const { return NumValues; }
+
+  /// Return the type of a specified result.
+  EVT getValueType(unsigned ResNo) const {
+    assert(ResNo < NumValues && "Illegal result number!");
+    return ValueList[ResNo];
+  }
+
+  /// Return the type of a specified result as a simple type.
+  MVT getSimpleValueType(unsigned ResNo) const {
+    return getValueType(ResNo).getSimpleVT();
+  }
+
+  /// Returns MVT::getSizeInBits(getValueType(ResNo)).
+  unsigned getValueSizeInBits(unsigned ResNo) const {
+    return getValueType(ResNo).getSizeInBits();
+  }
+
+  using value_iterator = const EVT *;
+
+  value_iterator value_begin() const { return ValueList; }
+  value_iterator value_end() const { return ValueList+NumValues; }
+
+  /// Return the opcode of this operation for printing.
+  std::string getOperationName(const SelectionDAG *G = nullptr) const;
+  static const char* getIndexedModeName(ISD::MemIndexedMode AM);
+  void print_types(raw_ostream &OS, const SelectionDAG *G) const;
+  void print_details(raw_ostream &OS, const SelectionDAG *G) const;
+  void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+  void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+
+  /// Print a SelectionDAG node and all children down to
+  /// the leaves.  The given SelectionDAG allows target-specific nodes
+  /// to be printed in human-readable form.  Unlike printr, this will
+  /// print the whole DAG, including children that appear multiple
+  /// times.
+  ///
+  void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
+
+  /// Print a SelectionDAG node and children up to
+  /// depth "depth."  The given SelectionDAG allows target-specific
+  /// nodes to be printed in human-readable form.  Unlike printr, this
+  /// will print children that appear multiple times wherever they are
+  /// used.
+  ///
+  void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
+                       unsigned depth = 100) const;
+
+  /// Dump this node, for debugging.
+  void dump() const;
+
+  /// Dump (recursively) this node and its use-def subgraph.
+  void dumpr() const;
+
+  /// Dump this node, for debugging.
+  /// The given SelectionDAG allows target-specific nodes to be printed
+  /// in human-readable form.
+  void dump(const SelectionDAG *G) const;
+
+  /// Dump (recursively) this node and its use-def subgraph.
+  /// The given SelectionDAG allows target-specific nodes to be printed
+  /// in human-readable form.
+  void dumpr(const SelectionDAG *G) const;
+
+  /// printrFull to dbgs().  The given SelectionDAG allows
+  /// target-specific nodes to be printed in human-readable form.
+  /// Unlike dumpr, this will print the whole DAG, including children
+  /// that appear multiple times.
+  void dumprFull(const SelectionDAG *G = nullptr) const;
+
+  /// printrWithDepth to dbgs().  The given
+  /// SelectionDAG allows target-specific nodes to be printed in
+  /// human-readable form.  Unlike dumpr, this will print children
+  /// that appear multiple times wherever they are used.
+  ///
+  void dumprWithDepth(const SelectionDAG *G = nullptr,
+                      unsigned depth = 100) const;
+
+  /// Gather unique data for the node.
+  void Profile(FoldingSetNodeID &ID) const;
+
+  /// This method should only be used by the SDUse class.
+  void addUse(SDUse &U) { U.addToList(&UseList); }
+
+protected:
+  static SDVTList getSDVTList(EVT VT) {
+    SDVTList Ret = { getValueTypeList(VT), 1 };
+    return Ret;
+  }
+
+  /// Create an SDNode.
+  ///
+  /// SDNodes are created without any operands, and never own the operand
+  /// storage. To add operands, see SelectionDAG::createOperands.
+  SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
+      : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
+        IROrder(Order), debugLoc(std::move(dl)) {
+    memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
+    assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+    assert(NumValues == VTs.NumVTs &&
+           "NumValues wasn't wide enough for its operands!");
+  }
+
+  /// Release the operands and set this node to have zero operands.
+  void DropOperands();
+};
+
+/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
+/// into SDNode creation functions.
+/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
+/// from the original Instruction, and IROrder is the ordinal position of
+/// the instruction.
+/// When an SDNode is created after the DAG is being built, both DebugLoc and
+/// the IROrder are propagated from the original SDNode.
+/// So SDLoc class provides two constructors besides the default one, one to
+/// be used by the DAGBuilder, the other to be used by others.
+class SDLoc {
+private:
+  DebugLoc DL;
+  int IROrder = 0;
+
+public:
+  SDLoc() = default;
+  SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
+  SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
+  SDLoc(const Instruction *I, int Order) : IROrder(Order) {
+    assert(Order >= 0 && "bad IROrder");
+    if (I)
+      DL = I->getDebugLoc();
+  }
+
+  unsigned getIROrder() const { return IROrder; }
+  const DebugLoc &getDebugLoc() const { return DL; }
+};
+
+// Define inline functions from the SDValue class.
+
+inline SDValue::SDValue(SDNode *node, unsigned resno)
+    : Node(node), ResNo(resno) {
+  // Explicitly check for !ResNo to avoid use-after-free, because there are
+  // callers that use SDValue(N, 0) with a deleted N to indicate successful
+  // combines.
+  assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&
+         "Invalid result number for the given node!");
+  assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.");
+}
+
+inline unsigned SDValue::getOpcode() const {
+  return Node->getOpcode();
+}
+
+inline EVT SDValue::getValueType() const {
+  return Node->getValueType(ResNo);
+}
+
+inline unsigned SDValue::getNumOperands() const {
+  return Node->getNumOperands();
+}
+
+inline const SDValue &SDValue::getOperand(unsigned i) const {
+  return Node->getOperand(i);
+}
+
+inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
+  return Node->getConstantOperandVal(i);
+}
+
+inline bool SDValue::isTargetOpcode() const {
+  return Node->isTargetOpcode();
+}
+
+inline bool SDValue::isTargetMemoryOpcode() const {
+  return Node->isTargetMemoryOpcode();
+}
+
+inline bool SDValue::isMachineOpcode() const {
+  return Node->isMachineOpcode();
+}
+
+inline unsigned SDValue::getMachineOpcode() const {
+  return Node->getMachineOpcode();
+}
+
+inline bool SDValue::isUndef() const {
+  return Node->isUndef();
+}
+
+inline bool SDValue::use_empty() const {
+  return !Node->hasAnyUseOfValue(ResNo);
+}
+
+inline bool SDValue::hasOneUse() const {
+  return Node->hasNUsesOfValue(1, ResNo);
+}
+
+inline const DebugLoc &SDValue::getDebugLoc() const {
+  return Node->getDebugLoc();
+}
+
+inline void SDValue::dump() const {
+  return Node->dump();
+}
+
+inline void SDValue::dump(const SelectionDAG *G) const {
+  return Node->dump(G);
+}
+
+inline void SDValue::dumpr() const {
+  return Node->dumpr();
+}
+
+inline void SDValue::dumpr(const SelectionDAG *G) const {
+  return Node->dumpr(G);
+}
+
+// Define inline functions from the SDUse class.
+
+inline void SDUse::set(const SDValue &V) {
+  if (Val.getNode()) removeFromList();
+  Val = V;
+  if (V.getNode()) V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setInitial(const SDValue &V) {
+  Val = V;
+  V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setNode(SDNode *N) {
+  if (Val.getNode()) removeFromList();
+  Val.setNode(N);
+  if (N) N->addUse(*this);
+}
+
+/// This class is used to form a handle around another node that
+/// is persistent and is updated across invocations of replaceAllUsesWith on its
+/// operand.  This node should be directly created by end-users and not added to
+/// the AllNodes list.
+class HandleSDNode : public SDNode {
+  SDUse Op;
+
+public:
+  explicit HandleSDNode(SDValue X)
+    : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
+    // HandleSDNodes are never inserted into the DAG, so they won't be
+    // auto-numbered. Use ID 65535 as a sentinel.
+    PersistentId = 0xffff;
+
+    // Manually set up the operand list. This node type is special in that it's
+    // always stack allocated and SelectionDAG does not manage its operands.
+    // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
+    // be so special.
+    Op.setUser(this);
+    Op.setInitial(X);
+    NumOperands = 1;
+    OperandList = &Op;
+  }
+  ~HandleSDNode();
+
+  const SDValue &getValue() const { return Op; }
+};
+
+class AddrSpaceCastSDNode : public SDNode {
+private:
+  unsigned SrcAddrSpace;
+  unsigned DestAddrSpace;
+
+public:
+  AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
+                      unsigned SrcAS, unsigned DestAS);
+
+  unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
+  unsigned getDestAddressSpace() const { return DestAddrSpace; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ADDRSPACECAST;
+  }
+};
+
+/// This is an abstract virtual class for memory operations.
+class MemSDNode : public SDNode {
+private:
+  // VT of in-memory value.
+  EVT MemoryVT;
+
+protected:
+  /// Memory reference information.
+  MachineMemOperand *MMO;
+
+public:
+  MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
+            EVT MemoryVT, MachineMemOperand *MMO);
+
+  bool readMem() const { return MMO->isLoad(); }
+  bool writeMem() const { return MMO->isStore(); }
+
+  /// Returns alignment and volatility of the memory access
+  unsigned getOriginalAlignment() const {
+    return MMO->getBaseAlignment();
+  }
+  unsigned getAlignment() const {
+    return MMO->getAlignment();
+  }
+
+  /// Return the SubclassData value, without HasDebugValue. This contains an
+  /// encoding of the volatile flag, as well as bits used by subclasses. This
+  /// function should only be used to compute a FoldingSetNodeID value.
+  /// The HasDebugValue bit is masked out because CSE map needs to match
+  /// nodes with debug info with nodes without debug info. Same is about
+  /// isDivergent bit.
+  unsigned getRawSubclassData() const {
+    uint16_t Data;
+    union {
+      char RawSDNodeBits[sizeof(uint16_t)];
+      SDNodeBitfields SDNodeBits;
+    };
+    memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
+    SDNodeBits.HasDebugValue = 0;
+    SDNodeBits.IsDivergent = false;
+    memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
+    return Data;
+  }
+
+  bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
+  bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
+  bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
+  bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
+
+  // Returns the offset from the location of the access.
+  int64_t getSrcValueOffset() const { return MMO->getOffset(); }
+
+  /// Returns the AA info that describes the dereference.
+  AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
+
+  /// Returns the Ranges that describes the dereference.
+  const MDNode *getRanges() const { return MMO->getRanges(); }
+
+  /// Returns the synchronization scope ID for this memory operation.
+  SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
+
+  /// Return the atomic ordering requirements for this memory operation. For
+  /// cmpxchg atomic operations, return the atomic ordering requirements when
+  /// store occurs.
+  AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
+
+  /// Return the type of the in-memory value.
+  EVT getMemoryVT() const { return MemoryVT; }
+
+  /// Return a MachineMemOperand object describing the memory
+  /// reference performed by operation.
+  MachineMemOperand *getMemOperand() const { return MMO; }
+
+  const MachinePointerInfo &getPointerInfo() const {
+    return MMO->getPointerInfo();
+  }
+
+  /// Return the address space for the associated pointer
+  unsigned getAddressSpace() const {
+    return getPointerInfo().getAddrSpace();
+  }
+
+  /// Update this MemSDNode's MachineMemOperand information
+  /// to reflect the alignment of NewMMO, if it has a greater alignment.
+  /// This must only be used when the new alignment applies to all users of
+  /// this MachineMemOperand.
+  void refineAlignment(const MachineMemOperand *NewMMO) {
+    MMO->refineAlignment(NewMMO);
+  }
+
+  const SDValue &getChain() const { return getOperand(0); }
+  const SDValue &getBasePtr() const {
+    return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
+  }
+
+  // Methods to support isa and dyn_cast
+  static bool classof(const SDNode *N) {
+    // For some targets, we lower some target intrinsics to a MemIntrinsicNode
+    // with either an intrinsic or a target opcode.
+    return N->getOpcode() == ISD::LOAD                ||
+           N->getOpcode() == ISD::STORE               ||
+           N->getOpcode() == ISD::PREFETCH            ||
+           N->getOpcode() == ISD::ATOMIC_CMP_SWAP     ||
+           N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
+           N->getOpcode() == ISD::ATOMIC_SWAP         ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_ADD     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_SUB     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_AND     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_CLR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_OR      ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_XOR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_NAND    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MIN     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD         ||
+           N->getOpcode() == ISD::ATOMIC_STORE        ||
+           N->getOpcode() == ISD::MLOAD               ||
+           N->getOpcode() == ISD::MSTORE              ||
+           N->getOpcode() == ISD::MGATHER             ||
+           N->getOpcode() == ISD::MSCATTER            ||
+           N->isMemIntrinsic()                        ||
+           N->isTargetMemoryOpcode();
+  }
+};
+
+/// This is an SDNode representing atomic operations.
+class AtomicSDNode : public MemSDNode {
+public:
+  AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
+               EVT MemVT, MachineMemOperand *MMO)
+      : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {}
+
+  const SDValue &getBasePtr() const { return getOperand(1); }
+  const SDValue &getVal() const { return getOperand(2); }
+
+  /// Returns true if this SDNode represents cmpxchg atomic operation, false
+  /// otherwise.
+  bool isCompareAndSwap() const {
+    unsigned Op = getOpcode();
+    return Op == ISD::ATOMIC_CMP_SWAP ||
+           Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
+  }
+
+  /// For cmpxchg atomic operations, return the atomic ordering requirements
+  /// when store does not occur.
+  AtomicOrdering getFailureOrdering() const {
+    assert(isCompareAndSwap() && "Must be cmpxchg operation");
+    return MMO->getFailureOrdering();
+  }
+
+  // Methods to support isa and dyn_cast
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ATOMIC_CMP_SWAP     ||
+           N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
+           N->getOpcode() == ISD::ATOMIC_SWAP         ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_ADD     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_SUB     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_AND     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_CLR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_OR      ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_XOR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_NAND    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MIN     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD         ||
+           N->getOpcode() == ISD::ATOMIC_STORE;
+  }
+};
+
+/// This SDNode is used for target intrinsics that touch
+/// memory and need an associated MachineMemOperand. Its opcode may be
+/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
+/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
+class MemIntrinsicSDNode : public MemSDNode {
+public:
+  MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
+                     SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
+      : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
+    SDNodeBits.IsMemIntrinsic = true;
+  }
+
+  // Methods to support isa and dyn_cast
+  static bool classof(const SDNode *N) {
+    // We lower some target intrinsics to their target opcode
+    // early a node with a target opcode can be of this class
+    return N->isMemIntrinsic()             ||
+           N->getOpcode() == ISD::PREFETCH ||
+           N->isTargetMemoryOpcode();
+  }
+};
+
+/// This SDNode is used to implement the code generator
+/// support for the llvm IR shufflevector instruction.  It combines elements
+/// from two input vectors into a new input vector, with the selection and
+/// ordering of elements determined by an array of integers, referred to as
+/// the shuffle mask.  For input vectors of width N, mask indices of 0..N-1
+/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
+/// An index of -1 is treated as undef, such that the code generator may put
+/// any value in the corresponding element of the result.
+class ShuffleVectorSDNode : public SDNode {
+  // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
+  // is freed when the SelectionDAG object is destroyed.
+  const int *Mask;
+
+protected:
+  friend class SelectionDAG;
+
+  ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
+      : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
+
+public:
+  ArrayRef<int> getMask() const {
+    EVT VT = getValueType(0);
+    return makeArrayRef(Mask, VT.getVectorNumElements());
+  }
+
+  int getMaskElt(unsigned Idx) const {
+    assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
+    return Mask[Idx];
+  }
+
+  bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
+
+  int  getSplatIndex() const {
+    assert(isSplat() && "Cannot get splat index for non-splat!");
+    EVT VT = getValueType(0);
+    for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+      if (Mask[i] >= 0)
+        return Mask[i];
+    }
+    llvm_unreachable("Splat with all undef indices?");
+  }
+
+  static bool isSplatMask(const int *Mask, EVT VT);
+
+  /// Change values in a shuffle permute mask assuming
+  /// the two vector operands have swapped position.
+  static void commuteMask(MutableArrayRef<int> Mask) {
+    unsigned NumElems = Mask.size();
+    for (unsigned i = 0; i != NumElems; ++i) {
+      int idx = Mask[i];
+      if (idx < 0)
+        continue;
+      else if (idx < (int)NumElems)
+        Mask[i] = idx + NumElems;
+      else
+        Mask[i] = idx - NumElems;
+    }
+  }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::VECTOR_SHUFFLE;
+  }
+};
+
+class ConstantSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const ConstantInt *Value;
+
+  ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
+                 const DebugLoc &DL, EVT VT)
+      : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DL,
+               getSDVTList(VT)),
+        Value(val) {
+    ConstantSDNodeBits.IsOpaque = isOpaque;
+  }
+
+public:
+  const ConstantInt *getConstantIntValue() const { return Value; }
+  const APInt &getAPIntValue() const { return Value->getValue(); }
+  uint64_t getZExtValue() const { return Value->getZExtValue(); }
+  int64_t getSExtValue() const { return Value->getSExtValue(); }
+  uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) {
+    return Value->getLimitedValue(Limit);
+  }
+
+  bool isOne() const { return Value->isOne(); }
+  bool isNullValue() const { return Value->isZero(); }
+  bool isAllOnesValue() const { return Value->isMinusOne(); }
+
+  bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::Constant ||
+           N->getOpcode() == ISD::TargetConstant;
+  }
+};
+
+uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
+  return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
+}
+
+class ConstantFPSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const ConstantFP *Value;
+
+  ConstantFPSDNode(bool isTarget, const ConstantFP *val, const DebugLoc &DL,
+                   EVT VT)
+      : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, DL,
+               getSDVTList(VT)),
+        Value(val) {}
+
+public:
+  const APFloat& getValueAPF() const { return Value->getValueAPF(); }
+  const ConstantFP *getConstantFPValue() const { return Value; }
+
+  /// Return true if the value is positive or negative zero.
+  bool isZero() const { return Value->isZero(); }
+
+  /// Return true if the value is a NaN.
+  bool isNaN() const { return Value->isNaN(); }
+
+  /// Return true if the value is an infinity
+  bool isInfinity() const { return Value->isInfinity(); }
+
+  /// Return true if the value is negative.
+  bool isNegative() const { return Value->isNegative(); }
+
+  /// We don't rely on operator== working on double values, as
+  /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+  /// As such, this method can be used to do an exact bit-for-bit comparison of
+  /// two floating point values.
+
+  /// We leave the version with the double argument here because it's just so
+  /// convenient to write "2.0" and the like.  Without this function we'd
+  /// have to duplicate its logic everywhere it's called.
+  bool isExactlyValue(double V) const {
+    return Value->getValueAPF().isExactlyValue(V);
+  }
+  bool isExactlyValue(const APFloat& V) const;
+
+  static bool isValueValidForType(EVT VT, const APFloat& Val);
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ConstantFP ||
+           N->getOpcode() == ISD::TargetConstantFP;
+  }
+};
+
+/// Returns true if \p V is a constant integer zero.
+bool isNullConstant(SDValue V);
+
+/// Returns true if \p V is an FP constant with a value of positive zero.
+bool isNullFPConstant(SDValue V);
+
+/// Returns true if \p V is an integer constant with all bits set.
+bool isAllOnesConstant(SDValue V);
+
+/// Returns true if \p V is a constant integer one.
+bool isOneConstant(SDValue V);
+
+/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
+/// constant is canonicalized to be operand 1.
+bool isBitwiseNot(SDValue V);
+
+/// Returns the SDNode if it is a constant splat BuildVector or constant int.
+ConstantSDNode *isConstOrConstSplat(SDValue V);
+
+/// Returns the SDNode if it is a constant splat BuildVector or constant float.
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue V);
+
+class GlobalAddressSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const GlobalValue *TheGlobal;
+  int64_t Offset;
+  unsigned char TargetFlags;
+
+  GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
+                      const GlobalValue *GA, EVT VT, int64_t o,
+                      unsigned char TargetFlags);
+
+public:
+  const GlobalValue *getGlobal() const { return TheGlobal; }
+  int64_t getOffset() const { return Offset; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+  // Return the address space this GlobalAddress belongs to.
+  unsigned getAddressSpace() const;
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::GlobalAddress ||
+           N->getOpcode() == ISD::TargetGlobalAddress ||
+           N->getOpcode() == ISD::GlobalTLSAddress ||
+           N->getOpcode() == ISD::TargetGlobalTLSAddress;
+  }
+};
+
+class FrameIndexSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  int FI;
+
+  FrameIndexSDNode(int fi, EVT VT, bool isTarg)
+    : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
+      0, DebugLoc(), getSDVTList(VT)), FI(fi) {
+  }
+
+public:
+  int getIndex() const { return FI; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::FrameIndex ||
+           N->getOpcode() == ISD::TargetFrameIndex;
+  }
+};
+
+class JumpTableSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  int JTI;
+  unsigned char TargetFlags;
+
+  JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned char TF)
+    : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
+      0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
+  }
+
+public:
+  int getIndex() const { return JTI; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::JumpTable ||
+           N->getOpcode() == ISD::TargetJumpTable;
+  }
+};
+
+class ConstantPoolSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  union {
+    const Constant *ConstVal;
+    MachineConstantPoolValue *MachineCPVal;
+  } Val;
+  int Offset;  // It's a MachineConstantPoolValue if top bit is set.
+  unsigned Alignment;  // Minimum alignment requirement of CP (not log2 value).
+  unsigned char TargetFlags;
+
+  ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
+                     unsigned Align, unsigned char TF)
+    : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
+             DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
+             TargetFlags(TF) {
+    assert(Offset >= 0 && "Offset is too large");
+    Val.ConstVal = c;
+  }
+
+  ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
+                     EVT VT, int o, unsigned Align, unsigned char TF)
+    : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
+             DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
+             TargetFlags(TF) {
+    assert(Offset >= 0 && "Offset is too large");
+    Val.MachineCPVal = v;
+    Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
+  }
+
+public:
+  bool isMachineConstantPoolEntry() const {
+    return Offset < 0;
+  }
+
+  const Constant *getConstVal() const {
+    assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
+    return Val.ConstVal;
+  }
+
+  MachineConstantPoolValue *getMachineCPVal() const {
+    assert(isMachineConstantPoolEntry() && "Wrong constantpool type");
+    return Val.MachineCPVal;
+  }
+
+  int getOffset() const {
+    return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
+  }
+
+  // Return the alignment of this constant pool object, which is either 0 (for
+  // default alignment) or the desired value.
+  unsigned getAlignment() const { return Alignment; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  Type *getType() const;
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ConstantPool ||
+           N->getOpcode() == ISD::TargetConstantPool;
+  }
+};
+
+/// Completely target-dependent object reference.
+class TargetIndexSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  unsigned char TargetFlags;
+  int Index;
+  int64_t Offset;
+
+public:
+  TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
+    : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
+      TargetFlags(TF), Index(Idx), Offset(Ofs) {}
+
+  unsigned char getTargetFlags() const { return TargetFlags; }
+  int getIndex() const { return Index; }
+  int64_t getOffset() const { return Offset; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::TargetIndex;
+  }
+};
+
+class BasicBlockSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  MachineBasicBlock *MBB;
+
+  /// Debug info is meaningful and potentially useful here, but we create
+  /// blocks out of order when they're jumped to, which makes it a bit
+  /// harder.  Let's see if we need it first.
+  explicit BasicBlockSDNode(MachineBasicBlock *mbb)
+    : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
+  {}
+
+public:
+  MachineBasicBlock *getBasicBlock() const { return MBB; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::BasicBlock;
+  }
+};
+
+/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
+class BuildVectorSDNode : public SDNode {
+public:
+  // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
+  explicit BuildVectorSDNode() = delete;
+
+  /// Check if this is a constant splat, and if so, find the
+  /// smallest element size that splats the vector.  If MinSplatBits is
+  /// nonzero, the element size must be at least that large.  Note that the
+  /// splat element may be the entire vector (i.e., a one element vector).
+  /// Returns the splat element value in SplatValue.  Any undefined bits in
+  /// that value are zero, and the corresponding bits in the SplatUndef mask
+  /// are set.  The SplatBitSize value is set to the splat element size in
+  /// bits.  HasAnyUndefs is set to true if any bits in the vector are
+  /// undefined.  isBigEndian describes the endianness of the target.
+  bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
+                       unsigned &SplatBitSize, bool &HasAnyUndefs,
+                       unsigned MinSplatBits = 0,
+                       bool isBigEndian = false) const;
+
+  /// \brief Returns the splatted value or a null value if this is not a splat.
+  ///
+  /// If passed a non-null UndefElements bitvector, it will resize it to match
+  /// the vector width and set the bits where elements are undef.
+  SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
+
+  /// \brief Returns the splatted constant or null if this is not a constant
+  /// splat.
+  ///
+  /// If passed a non-null UndefElements bitvector, it will resize it to match
+  /// the vector width and set the bits where elements are undef.
+  ConstantSDNode *
+  getConstantSplatNode(BitVector *UndefElements = nullptr) const;
+
+  /// \brief Returns the splatted constant FP or null if this is not a constant
+  /// FP splat.
+  ///
+  /// If passed a non-null UndefElements bitvector, it will resize it to match
+  /// the vector width and set the bits where elements are undef.
+  ConstantFPSDNode *
+  getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
+
+  /// \brief If this is a constant FP splat and the splatted constant FP is an
+  /// exact power or 2, return the log base 2 integer value.  Otherwise,
+  /// return -1.
+  ///
+  /// The BitWidth specifies the necessary bit precision.
+  int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
+                                          uint32_t BitWidth) const;
+
+  bool isConstant() const;
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::BUILD_VECTOR;
+  }
+};
+
+/// An SDNode that holds an arbitrary LLVM IR Value. This is
+/// used when the SelectionDAG needs to make a simple reference to something
+/// in the LLVM IR representation.
+///
+class SrcValueSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const Value *V;
+
+  /// Create a SrcValue for a general value.
+  explicit SrcValueSDNode(const Value *v)
+    : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
+
+public:
+  /// Return the contained Value.
+  const Value *getValue() const { return V; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::SRCVALUE;
+  }
+};
+
+class MDNodeSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const MDNode *MD;
+
+  explicit MDNodeSDNode(const MDNode *md)
+  : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
+  {}
+
+public:
+  const MDNode *getMD() const { return MD; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MDNODE_SDNODE;
+  }
+};
+
+class RegisterSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  unsigned Reg;
+
+  RegisterSDNode(unsigned reg, EVT VT)
+    : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
+
+public:
+  unsigned getReg() const { return Reg; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::Register;
+  }
+};
+
+class RegisterMaskSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  // The memory for RegMask is not owned by the node.
+  const uint32_t *RegMask;
+
+  RegisterMaskSDNode(const uint32_t *mask)
+    : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
+      RegMask(mask) {}
+
+public:
+  const uint32_t *getRegMask() const { return RegMask; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::RegisterMask;
+  }
+};
+
+class BlockAddressSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const BlockAddress *BA;
+  int64_t Offset;
+  unsigned char TargetFlags;
+
+  BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
+                     int64_t o, unsigned char Flags)
+    : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
+             BA(ba), Offset(o), TargetFlags(Flags) {}
+
+public:
+  const BlockAddress *getBlockAddress() const { return BA; }
+  int64_t getOffset() const { return Offset; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::BlockAddress ||
+           N->getOpcode() == ISD::TargetBlockAddress;
+  }
+};
+
+class LabelSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  MCSymbol *Label;
+
+  LabelSDNode(unsigned Order, const DebugLoc &dl, MCSymbol *L)
+      : SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {}
+
+public:
+  MCSymbol *getLabel() const { return Label; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::EH_LABEL ||
+           N->getOpcode() == ISD::ANNOTATION_LABEL;
+  }
+};
+
+class ExternalSymbolSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const char *Symbol;
+  unsigned char TargetFlags;
+
+  ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned char TF, EVT VT)
+    : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
+             0, DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {}
+
+public:
+  const char *getSymbol() const { return Symbol; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ExternalSymbol ||
+           N->getOpcode() == ISD::TargetExternalSymbol;
+  }
+};
+
+class MCSymbolSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  MCSymbol *Symbol;
+
+  MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
+      : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
+
+public:
+  MCSymbol *getMCSymbol() const { return Symbol; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MCSymbol;
+  }
+};
+
+class CondCodeSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  ISD::CondCode Condition;
+
+  explicit CondCodeSDNode(ISD::CondCode Cond)
+    : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
+      Condition(Cond) {}
+
+public:
+  ISD::CondCode get() const { return Condition; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::CONDCODE;
+  }
+};
+
+/// This class is used to represent EVT's, which are used
+/// to parameterize some operations.
+class VTSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  EVT ValueType;
+
+  explicit VTSDNode(EVT VT)
+    : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
+      ValueType(VT) {}
+
+public:
+  EVT getVT() const { return ValueType; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::VALUETYPE;
+  }
+};
+
+/// Base class for LoadSDNode and StoreSDNode
+class LSBaseSDNode : public MemSDNode {
+public:
+  LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
+               SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
+               MachineMemOperand *MMO)
+      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
+    LSBaseSDNodeBits.AddressingMode = AM;
+    assert(getAddressingMode() == AM && "Value truncated");
+  }
+
+  const SDValue &getOffset() const {
+    return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
+  }
+
+  /// Return the addressing mode for this load or store:
+  /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
+  ISD::MemIndexedMode getAddressingMode() const {
+    return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
+  }
+
+  /// Return true if this is a pre/post inc/dec load/store.
+  bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
+
+  /// Return true if this is NOT a pre/post inc/dec load/store.
+  bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::LOAD ||
+           N->getOpcode() == ISD::STORE;
+  }
+};
+
+/// This class is used to represent ISD::LOAD nodes.
+class LoadSDNode : public LSBaseSDNode {
+  friend class SelectionDAG;
+
+  LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+             ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
+             MachineMemOperand *MMO)
+      : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
+    LoadSDNodeBits.ExtTy = ETy;
+    assert(readMem() && "Load MachineMemOperand is not a load!");
+    assert(!writeMem() && "Load MachineMemOperand is a store!");
+  }
+
+public:
+  /// Return whether this is a plain node,
+  /// or one of the varieties of value-extending loads.
+  ISD::LoadExtType getExtensionType() const {
+    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
+  }
+
+  const SDValue &getBasePtr() const { return getOperand(1); }
+  const SDValue &getOffset() const { return getOperand(2); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::LOAD;
+  }
+};
+
+/// This class is used to represent ISD::STORE nodes.
+class StoreSDNode : public LSBaseSDNode {
+  friend class SelectionDAG;
+
+  StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+              ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
+              MachineMemOperand *MMO)
+      : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
+    StoreSDNodeBits.IsTruncating = isTrunc;
+    assert(!readMem() && "Store MachineMemOperand is a load!");
+    assert(writeMem() && "Store MachineMemOperand is not a store!");
+  }
+
+public:
+  /// Return true if the op does a truncation before store.
+  /// For integers this is the same as doing a TRUNCATE and storing the result.
+  /// For floats, it is the same as doing an FP_ROUND and storing the result.
+  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
+  void setTruncatingStore(bool Truncating) {
+    StoreSDNodeBits.IsTruncating = Truncating;
+  }
+
+  const SDValue &getValue() const { return getOperand(1); }
+  const SDValue &getBasePtr() const { return getOperand(2); }
+  const SDValue &getOffset() const { return getOperand(3); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::STORE;
+  }
+};
+
+/// This base class is used to represent MLOAD and MSTORE nodes
+class MaskedLoadStoreSDNode : public MemSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
+                        const DebugLoc &dl, SDVTList VTs, EVT MemVT,
+                        MachineMemOperand *MMO)
+      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
+
+  // In the both nodes address is Op1, mask is Op2:
+  // MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value
+  // MaskedStoreSDNode (Chain, ptr, mask, data)
+  // Mask is a vector of i1 elements
+  const SDValue &getBasePtr() const { return getOperand(1); }
+  const SDValue &getMask() const    { return getOperand(2); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MLOAD ||
+           N->getOpcode() == ISD::MSTORE;
+  }
+};
+
+/// This class is used to represent an MLOAD node
+class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                   ISD::LoadExtType ETy, bool IsExpanding, EVT MemVT,
+                   MachineMemOperand *MMO)
+      : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, MemVT, MMO) {
+    LoadSDNodeBits.ExtTy = ETy;
+    LoadSDNodeBits.IsExpanding = IsExpanding;
+  }
+
+  ISD::LoadExtType getExtensionType() const {
+    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
+  }
+
+  const SDValue &getSrc0() const { return getOperand(3); }
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MLOAD;
+  }
+
+  bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
+};
+
+/// This class is used to represent an MSTORE node
+class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                    bool isTrunc, bool isCompressing, EVT MemVT,
+                    MachineMemOperand *MMO)
+      : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, MemVT, MMO) {
+    StoreSDNodeBits.IsTruncating = isTrunc;
+    StoreSDNodeBits.IsCompressing = isCompressing;
+  }
+
+  /// Return true if the op does a truncation before store.
+  /// For integers this is the same as doing a TRUNCATE and storing the result.
+  /// For floats, it is the same as doing an FP_ROUND and storing the result.
+  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
+
+  /// Returns true if the op does a compression to the vector before storing.
+  /// The node contiguously stores the active elements (integers or floats)
+  /// in src (those with their respective bit set in writemask k) to unaligned
+  /// memory at base_addr.
+  bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
+
+  const SDValue &getValue() const { return getOperand(3); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MSTORE;
+  }
+};
+
+/// This is a base class used to represent
+/// MGATHER and MSCATTER nodes
+///
+class MaskedGatherScatterSDNode : public MemSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
+                            const DebugLoc &dl, SDVTList VTs, EVT MemVT,
+                            MachineMemOperand *MMO)
+      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
+
+  // In the both nodes address is Op1, mask is Op2:
+  // MaskedGatherSDNode  (Chain, passthru, mask, base, index, scale)
+  // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
+  // Mask is a vector of i1 elements
+  const SDValue &getBasePtr() const { return getOperand(3); }
+  const SDValue &getIndex()   const { return getOperand(4); }
+  const SDValue &getMask()    const { return getOperand(2); }
+  const SDValue &getValue()   const { return getOperand(1); }
+  const SDValue &getScale()   const { return getOperand(5); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MGATHER ||
+           N->getOpcode() == ISD::MSCATTER;
+  }
+};
+
+/// This class is used to represent an MGATHER node
+///
+class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                     EVT MemVT, MachineMemOperand *MMO)
+      : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO) {}
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MGATHER;
+  }
+};
+
+/// This class is used to represent an MSCATTER node
+///
+class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                      EVT MemVT, MachineMemOperand *MMO)
+      : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO) {}
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MSCATTER;
+  }
+};
+
+/// An SDNode that represents everything that will be needed
+/// to construct a MachineInstr. These nodes are created during the
+/// instruction selection proper phase.
+class MachineSDNode : public SDNode {
+public:
+  using mmo_iterator = MachineMemOperand **;
+
+private:
+  friend class SelectionDAG;
+
+  MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
+      : SDNode(Opc, Order, DL, VTs) {}
+
+  /// Memory reference descriptions for this instruction.
+  mmo_iterator MemRefs = nullptr;
+  mmo_iterator MemRefsEnd = nullptr;
+
+public:
+  mmo_iterator memoperands_begin() const { return MemRefs; }
+  mmo_iterator memoperands_end() const { return MemRefsEnd; }
+  bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
+
+  /// Assign this MachineSDNodes's memory reference descriptor
+  /// list. This does not transfer ownership.
+  void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+    for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
+      assert(*MMI && "Null mem ref detected!");
+    MemRefs = NewMemRefs;
+    MemRefsEnd = NewMemRefsEnd;
+  }
+
+  static bool classof(const SDNode *N) {
+    return N->isMachineOpcode();
+  }
+};
+
+class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
+                                            SDNode, ptrdiff_t> {
+  const SDNode *Node;
+  unsigned Operand;
+
+  SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+
+public:
+  bool operator==(const SDNodeIterator& x) const {
+    return Operand == x.Operand;
+  }
+  bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
+
+  pointer operator*() const {
+    return Node->getOperand(Operand).getNode();
+  }
+  pointer operator->() const { return operator*(); }
+
+  SDNodeIterator& operator++() {                // Preincrement
+    ++Operand;
+    return *this;
+  }
+  SDNodeIterator operator++(int) { // Postincrement
+    SDNodeIterator tmp = *this; ++*this; return tmp;
+  }
+  size_t operator-(SDNodeIterator Other) const {
+    assert(Node == Other.Node &&
+           "Cannot compare iterators of two different nodes!");
+    return Operand - Other.Operand;
+  }
+
+  static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
+  static SDNodeIterator end  (const SDNode *N) {
+    return SDNodeIterator(N, N->getNumOperands());
+  }
+
+  unsigned getOperand() const { return Operand; }
+  const SDNode *getNode() const { return Node; }
+};
+
+template <> struct GraphTraits<SDNode*> {
+  using NodeRef = SDNode *;
+  using ChildIteratorType = SDNodeIterator;
+
+  static NodeRef getEntryNode(SDNode *N) { return N; }
+
+  static ChildIteratorType child_begin(NodeRef N) {
+    return SDNodeIterator::begin(N);
+  }
+
+  static ChildIteratorType child_end(NodeRef N) {
+    return SDNodeIterator::end(N);
+  }
+};
+
+/// A representation of the largest SDNode, for use in sizeof().
+///
+/// This needs to be a union because the largest node differs on 32 bit systems
+/// with 4 and 8 byte pointer alignment, respectively.
+using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
+                                            BlockAddressSDNode,
+                                            GlobalAddressSDNode>;
+
+/// The SDNode class with the greatest alignment requirement.
+using MostAlignedSDNode = GlobalAddressSDNode;
+
+namespace ISD {
+
+  /// Returns true if the specified node is a non-extending and unindexed load.
+  inline bool isNormalLoad(const SDNode *N) {
+    const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
+    return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
+      Ld->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Returns true if the specified node is a non-extending load.
+  inline bool isNON_EXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
+  }
+
+  /// Returns true if the specified node is a EXTLOAD.
+  inline bool isEXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+  }
+
+  /// Returns true if the specified node is a SEXTLOAD.
+  inline bool isSEXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+  }
+
+  /// Returns true if the specified node is a ZEXTLOAD.
+  inline bool isZEXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+  }
+
+  /// Returns true if the specified node is an unindexed load.
+  inline bool isUNINDEXEDLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Returns true if the specified node is a non-truncating
+  /// and unindexed store.
+  inline bool isNormalStore(const SDNode *N) {
+    const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
+    return St && !St->isTruncatingStore() &&
+      St->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Returns true if the specified node is a non-truncating store.
+  inline bool isNON_TRUNCStore(const SDNode *N) {
+    return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
+  }
+
+  /// Returns true if the specified node is a truncating store.
+  inline bool isTRUNCStore(const SDNode *N) {
+    return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
+  }
+
+  /// Returns true if the specified node is an unindexed store.
+  inline bool isUNINDEXEDStore(const SDNode *N) {
+    return isa<StoreSDNode>(N) &&
+      cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Attempt to match a unary predicate against a scalar/splat constant or
+  /// every element of a constant BUILD_VECTOR.
+  bool matchUnaryPredicate(SDValue Op,
+                           std::function<bool(ConstantSDNode *)> Match);
+
+  /// Attempt to match a binary predicate against a pair of scalar/splat
+  /// constants or every element of a pair of constant BUILD_VECTORs.
+  bool matchBinaryPredicate(
+      SDValue LHS, SDValue RHS,
+      std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match);
+
+} // end namespace ISD
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGTargetInfo.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGTargetInfo.h
new file mode 100644
index 0000000..45c1df4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGTargetInfo.h
@@ -0,0 +1,160 @@
+//==- llvm/CodeGen/SelectionDAGTargetInfo.h - SelectionDAG Info --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SelectionDAGTargetInfo class, which targets can
+// subclass to parameterize the SelectionDAG lowering and instruction
+// selection process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
+#define LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
+
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Support/CodeGen.h"
+#include <utility>
+
+namespace llvm {
+
+class SelectionDAG;
+
+//===----------------------------------------------------------------------===//
+/// Targets can subclass this to parameterize the
+/// SelectionDAG lowering and instruction selection process.
+///
+class SelectionDAGTargetInfo {
+public:
+  explicit SelectionDAGTargetInfo() = default;
+  SelectionDAGTargetInfo(const SelectionDAGTargetInfo &) = delete;
+  SelectionDAGTargetInfo &operator=(const SelectionDAGTargetInfo &) = delete;
+  virtual ~SelectionDAGTargetInfo();
+
+  /// Emit target-specific code that performs a memcpy.
+  /// This can be used by targets to provide code sequences for cases
+  /// that don't fit the target's parameters for simple loads/stores and can be
+  /// more efficient than using a library call. This function can return a null
+  /// SDValue if the target declines to use custom code and a different
+  /// lowering strategy should be used.
+  ///
+  /// If AlwaysInline is true, the size is constant and the target should not
+  /// emit any calls and is strongly encouraged to attempt to emit inline code
+  /// even if it is beyond the usual threshold because this intrinsic is being
+  /// expanded in a place where calls are not feasible (e.g. within the prologue
+  /// for another call). If the target chooses to decline an AlwaysInline
+  /// request here, legalize will resort to using simple loads and stores.
+  virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+                                          SDValue Chain, SDValue Op1,
+                                          SDValue Op2, SDValue Op3,
+                                          unsigned Align, bool isVolatile,
+                                          bool AlwaysInline,
+                                          MachinePointerInfo DstPtrInfo,
+                                          MachinePointerInfo SrcPtrInfo) const {
+    return SDValue();
+  }
+
+  /// Emit target-specific code that performs a memmove.
+  /// This can be used by targets to provide code sequences for cases
+  /// that don't fit the target's parameters for simple loads/stores and can be
+  /// more efficient than using a library call. This function can return a null
+  /// SDValue if the target declines to use custom code and a different
+  /// lowering strategy should be used.
+  virtual SDValue EmitTargetCodeForMemmove(
+      SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1,
+      SDValue Op2, SDValue Op3, unsigned Align, bool isVolatile,
+      MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
+    return SDValue();
+  }
+
+  /// Emit target-specific code that performs a memset.
+  /// This can be used by targets to provide code sequences for cases
+  /// that don't fit the target's parameters for simple stores and can be more
+  /// efficient than using a library call. This function can return a null
+  /// SDValue if the target declines to use custom code and a different
+  /// lowering strategy should be used.
+  virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
+                                          SDValue Chain, SDValue Op1,
+                                          SDValue Op2, SDValue Op3,
+                                          unsigned Align, bool isVolatile,
+                                          MachinePointerInfo DstPtrInfo) const {
+    return SDValue();
+  }
+
+  /// Emit target-specific code that performs a memcmp, in cases where that is
+  /// faster than a libcall. The first returned SDValue is the result of the
+  /// memcmp and the second is the chain. Both SDValues can be null if a normal
+  /// libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+                          SDValue Op1, SDValue Op2, SDValue Op3,
+                          MachinePointerInfo Op1PtrInfo,
+                          MachinePointerInfo Op2PtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  /// Emit target-specific code that performs a memchr, in cases where that is
+  /// faster than a libcall. The first returned SDValue is the result of the
+  /// memchr and the second is the chain. Both SDValues can be null if a normal
+  /// libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+                          SDValue Src, SDValue Char, SDValue Length,
+                          MachinePointerInfo SrcPtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  /// Emit target-specific code that performs a strcpy or stpcpy, in cases
+  /// where that is faster than a libcall.
+  /// The first returned SDValue is the result of the copy (the start
+  /// of the destination string for strcpy, a pointer to the null terminator
+  /// for stpcpy) and the second is the chain.  Both SDValues can be null
+  /// if a normal libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
+                          SDValue Dest, SDValue Src,
+                          MachinePointerInfo DestPtrInfo,
+                          MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  /// Emit target-specific code that performs a strcmp, in cases where that is
+  /// faster than a libcall.
+  /// The first returned SDValue is the result of the strcmp and the second is
+  /// the chain. Both SDValues can be null if a normal libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+                          SDValue Op1, SDValue Op2,
+                          MachinePointerInfo Op1PtrInfo,
+                          MachinePointerInfo Op2PtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
+                          SDValue Src, MachinePointerInfo SrcPtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
+                           SDValue Src, SDValue MaxLength,
+                           MachinePointerInfo SrcPtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
+  // than FMUL and ADD is delegated to the machine combiner.
+  virtual bool generateFMAsInMachineCombiner(CodeGenOpt::Level OptLevel) const {
+    return false;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
new file mode 100644
index 0000000..3a91e36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
@@ -0,0 +1,714 @@
+//===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements SlotIndex and related classes. The purpose of SlotIndex
+// is to describe a position at which a register can become live, or cease to
+// be live.
+//
+// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
+// is held is LiveIntervals and provides the real numbering. This allows
+// LiveIntervals to perform largely transparent renumbering.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SLOTINDEXES_H
+#define LLVM_CODEGEN_SLOTINDEXES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+
+  /// This class represents an entry in the slot index list held in the
+  /// SlotIndexes pass. It should not be used directly. See the
+  /// SlotIndex & SlotIndexes classes for the public interface to this
+  /// information.
+  class IndexListEntry : public ilist_node<IndexListEntry> {
+    MachineInstr *mi;
+    unsigned index;
+
+  public:
+    IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
+
+    MachineInstr* getInstr() const { return mi; }
+    void setInstr(MachineInstr *mi) {
+      this->mi = mi;
+    }
+
+    unsigned getIndex() const { return index; }
+    void setIndex(unsigned index) {
+      this->index = index;
+    }
+
+#ifdef EXPENSIVE_CHECKS
+    // When EXPENSIVE_CHECKS is defined, "erased" index list entries will
+    // actually be moved to a "graveyard" list, and have their pointers
+    // poisoned, so that dangling SlotIndex access can be reliably detected.
+    void setPoison() {
+      intptr_t tmp = reinterpret_cast<intptr_t>(mi);
+      assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
+      tmp |= 0x1;
+      mi = reinterpret_cast<MachineInstr*>(tmp);
+    }
+
+    bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
+#endif // EXPENSIVE_CHECKS
+  };
+
+  template <>
+  struct ilist_alloc_traits<IndexListEntry>
+      : public ilist_noalloc_traits<IndexListEntry> {};
+
+  /// SlotIndex - An opaque wrapper around machine indexes.
+  class SlotIndex {
+    friend class SlotIndexes;
+
+    enum Slot {
+      /// Basic block boundary.  Used for live ranges entering and leaving a
+      /// block without being live in the layout neighbor.  Also used as the
+      /// def slot of PHI-defs.
+      Slot_Block,
+
+      /// Early-clobber register use/def slot.  A live range defined at
+      /// Slot_EarlyClobber interferes with normal live ranges killed at
+      /// Slot_Register.  Also used as the kill slot for live ranges tied to an
+      /// early-clobber def.
+      Slot_EarlyClobber,
+
+      /// Normal register use/def slot.  Normal instructions kill and define
+      /// register live ranges at this slot.
+      Slot_Register,
+
+      /// Dead def kill point.  Kill slot for a live range that is defined by
+      /// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
+      /// used anywhere.
+      Slot_Dead,
+
+      Slot_Count
+    };
+
+    PointerIntPair<IndexListEntry*, 2, unsigned> lie;
+
+    SlotIndex(IndexListEntry *entry, unsigned slot)
+      : lie(entry, slot) {}
+
+    IndexListEntry* listEntry() const {
+      assert(isValid() && "Attempt to compare reserved index.");
+#ifdef EXPENSIVE_CHECKS
+      assert(!lie.getPointer()->isPoisoned() &&
+             "Attempt to access deleted list-entry.");
+#endif // EXPENSIVE_CHECKS
+      return lie.getPointer();
+    }
+
+    unsigned getIndex() const {
+      return listEntry()->getIndex() | getSlot();
+    }
+
+    /// Returns the slot for this SlotIndex.
+    Slot getSlot() const {
+      return static_cast<Slot>(lie.getInt());
+    }
+
+  public:
+    enum {
+      /// The default distance between instructions as returned by distance().
+      /// This may vary as instructions are inserted and removed.
+      InstrDist = 4 * Slot_Count
+    };
+
+    /// Construct an invalid index.
+    SlotIndex() = default;
+
+    // Construct a new slot index from the given one, and set the slot.
+    SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
+      assert(lie.getPointer() != nullptr &&
+             "Attempt to construct index with 0 pointer.");
+    }
+
+    /// Returns true if this is a valid index. Invalid indices do
+    /// not point into an index table, and cannot be compared.
+    bool isValid() const {
+      return lie.getPointer();
+    }
+
+    /// Return true for a valid index.
+    explicit operator bool() const { return isValid(); }
+
+    /// Print this index to the given raw_ostream.
+    void print(raw_ostream &os) const;
+
+    /// Dump this index to stderr.
+    void dump() const;
+
+    /// Compare two SlotIndex objects for equality.
+    bool operator==(SlotIndex other) const {
+      return lie == other.lie;
+    }
+    /// Compare two SlotIndex objects for inequality.
+    bool operator!=(SlotIndex other) const {
+      return lie != other.lie;
+    }
+
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is strictly lower than the second.
+    bool operator<(SlotIndex other) const {
+      return getIndex() < other.getIndex();
+    }
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is lower than, or equal to, the second.
+    bool operator<=(SlotIndex other) const {
+      return getIndex() <= other.getIndex();
+    }
+
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is greater than the second.
+    bool operator>(SlotIndex other) const {
+      return getIndex() > other.getIndex();
+    }
+
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is greater than, or equal to, the second.
+    bool operator>=(SlotIndex other) const {
+      return getIndex() >= other.getIndex();
+    }
+
+    /// isSameInstr - Return true if A and B refer to the same instruction.
+    static bool isSameInstr(SlotIndex A, SlotIndex B) {
+      return A.lie.getPointer() == B.lie.getPointer();
+    }
+
+    /// isEarlierInstr - Return true if A refers to an instruction earlier than
+    /// B. This is equivalent to A < B && !isSameInstr(A, B).
+    static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
+      return A.listEntry()->getIndex() < B.listEntry()->getIndex();
+    }
+
+    /// Return true if A refers to the same instruction as B or an earlier one.
+    /// This is equivalent to !isEarlierInstr(B, A).
+    static bool isEarlierEqualInstr(SlotIndex A, SlotIndex B) {
+      return !isEarlierInstr(B, A);
+    }
+
+    /// Return the distance from this index to the given one.
+    int distance(SlotIndex other) const {
+      return other.getIndex() - getIndex();
+    }
+
+    /// Return the scaled distance from this index to the given one, where all
+    /// slots on the same instruction have zero distance.
+    int getInstrDistance(SlotIndex other) const {
+      return (other.listEntry()->getIndex() - listEntry()->getIndex())
+        / Slot_Count;
+    }
+
+    /// isBlock - Returns true if this is a block boundary slot.
+    bool isBlock() const { return getSlot() == Slot_Block; }
+
+    /// isEarlyClobber - Returns true if this is an early-clobber slot.
+    bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }
+
+    /// isRegister - Returns true if this is a normal register use/def slot.
+    /// Note that early-clobber slots may also be used for uses and defs.
+    bool isRegister() const { return getSlot() == Slot_Register; }
+
+    /// isDead - Returns true if this is a dead def kill slot.
+    bool isDead() const { return getSlot() == Slot_Dead; }
+
+    /// Returns the base index for associated with this index. The base index
+    /// is the one associated with the Slot_Block slot for the instruction
+    /// pointed to by this index.
+    SlotIndex getBaseIndex() const {
+      return SlotIndex(listEntry(), Slot_Block);
+    }
+
+    /// Returns the boundary index for associated with this index. The boundary
+    /// index is the one associated with the Slot_Block slot for the instruction
+    /// pointed to by this index.
+    SlotIndex getBoundaryIndex() const {
+      return SlotIndex(listEntry(), Slot_Dead);
+    }
+
+    /// Returns the register use/def slot in the current instruction for a
+    /// normal or early-clobber def.
+    SlotIndex getRegSlot(bool EC = false) const {
+      return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
+    }
+
+    /// Returns the dead def kill slot for the current instruction.
+    SlotIndex getDeadSlot() const {
+      return SlotIndex(listEntry(), Slot_Dead);
+    }
+
+    /// Returns the next slot in the index list. This could be either the
+    /// next slot for the instruction pointed to by this index or, if this
+    /// index is a STORE, the first slot for the next instruction.
+    /// WARNING: This method is considerably more expensive than the methods
+    /// that return specific slots (getUseIndex(), etc). If you can - please
+    /// use one of those methods.
+    SlotIndex getNextSlot() const {
+      Slot s = getSlot();
+      if (s == Slot_Dead) {
+        return SlotIndex(&*++listEntry()->getIterator(), Slot_Block);
+      }
+      return SlotIndex(listEntry(), s + 1);
+    }
+
+    /// Returns the next index. This is the index corresponding to the this
+    /// index's slot, but for the next instruction.
+    SlotIndex getNextIndex() const {
+      return SlotIndex(&*++listEntry()->getIterator(), getSlot());
+    }
+
+    /// Returns the previous slot in the index list. This could be either the
+    /// previous slot for the instruction pointed to by this index or, if this
+    /// index is a Slot_Block, the last slot for the previous instruction.
+    /// WARNING: This method is considerably more expensive than the methods
+    /// that return specific slots (getUseIndex(), etc). If you can - please
+    /// use one of those methods.
+    SlotIndex getPrevSlot() const {
+      Slot s = getSlot();
+      if (s == Slot_Block) {
+        return SlotIndex(&*--listEntry()->getIterator(), Slot_Dead);
+      }
+      return SlotIndex(listEntry(), s - 1);
+    }
+
+    /// Returns the previous index. This is the index corresponding to this
+    /// index's slot, but for the previous instruction.
+    SlotIndex getPrevIndex() const {
+      return SlotIndex(&*--listEntry()->getIterator(), getSlot());
+    }
+  };
+
+  template <> struct isPodLike<SlotIndex> { static const bool value = true; };
+
+  inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
+    li.print(os);
+    return os;
+  }
+
+  using IdxMBBPair = std::pair<SlotIndex, MachineBasicBlock *>;
+
+  inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
+    return V < IM.first;
+  }
+
+  inline bool operator<(const IdxMBBPair &IM, SlotIndex V) {
+    return IM.first < V;
+  }
+
+  struct Idx2MBBCompare {
+    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
+      return LHS.first < RHS.first;
+    }
+  };
+
+  /// SlotIndexes pass.
+  ///
+  /// This pass assigns indexes to each instruction.
+  class SlotIndexes : public MachineFunctionPass {
+  private:
+    // IndexListEntry allocator.
+    BumpPtrAllocator ileAllocator;
+
+    using IndexList = ilist<IndexListEntry>;
+    IndexList indexList;
+
+#ifdef EXPENSIVE_CHECKS
+    IndexList graveyardList;
+#endif // EXPENSIVE_CHECKS
+
+    MachineFunction *mf;
+
+    using Mi2IndexMap = DenseMap<const MachineInstr *, SlotIndex>;
+    Mi2IndexMap mi2iMap;
+
+    /// MBBRanges - Map MBB number to (start, stop) indexes.
+    SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges;
+
+    /// Idx2MBBMap - Sorted list of pairs of index of first instruction
+    /// and MBB id.
+    SmallVector<IdxMBBPair, 8> idx2MBBMap;
+
+    IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
+      IndexListEntry *entry =
+          static_cast<IndexListEntry *>(ileAllocator.Allocate(
+              sizeof(IndexListEntry), alignof(IndexListEntry)));
+
+      new (entry) IndexListEntry(mi, index);
+
+      return entry;
+    }
+
+    /// Renumber locally after inserting curItr.
+    void renumberIndexes(IndexList::iterator curItr);
+
+  public:
+    static char ID;
+
+    SlotIndexes() : MachineFunctionPass(ID) {
+      initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+    }
+
+    ~SlotIndexes() override {
+      // The indexList's nodes are all allocated in the BumpPtrAllocator.
+      indexList.clearAndLeakNodesUnsafely();
+    }
+
+    void getAnalysisUsage(AnalysisUsage &au) const override;
+    void releaseMemory() override;
+
+    bool runOnMachineFunction(MachineFunction &fn) override;
+
+    /// Dump the indexes.
+    void dump() const;
+
+    /// Renumber the index list, providing space for new instructions.
+    void renumberIndexes();
+
+    /// Repair indexes after adding and removing instructions.
+    void repairIndexesInRange(MachineBasicBlock *MBB,
+                              MachineBasicBlock::iterator Begin,
+                              MachineBasicBlock::iterator End);
+
+    /// Returns the zero index for this analysis.
+    SlotIndex getZeroIndex() {
+      assert(indexList.front().getIndex() == 0 && "First index is not 0?");
+      return SlotIndex(&indexList.front(), 0);
+    }
+
+    /// Returns the base index of the last slot in this analysis.
+    SlotIndex getLastIndex() {
+      return SlotIndex(&indexList.back(), 0);
+    }
+
+    /// Returns true if the given machine instr is mapped to an index,
+    /// otherwise returns false.
+    bool hasIndex(const MachineInstr &instr) const {
+      return mi2iMap.count(&instr);
+    }
+
+    /// Returns the base index for the given instruction.
+    SlotIndex getInstructionIndex(const MachineInstr &MI) const {
+      // Instructions inside a bundle have the same number as the bundle itself.
+      const MachineInstr &BundleStart = *getBundleStart(MI.getIterator());
+      Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleStart);
+      assert(itr != mi2iMap.end() && "Instruction not found in maps.");
+      return itr->second;
+    }
+
+    /// Returns the instruction for the given index, or null if the given
+    /// index has no instruction associated with it.
+    MachineInstr* getInstructionFromIndex(SlotIndex index) const {
+      return index.isValid() ? index.listEntry()->getInstr() : nullptr;
+    }
+
+    /// Returns the next non-null index, if one exists.
+    /// Otherwise returns getLastIndex().
+    SlotIndex getNextNonNullIndex(SlotIndex Index) {
+      IndexList::iterator I = Index.listEntry()->getIterator();
+      IndexList::iterator E = indexList.end();
+      while (++I != E)
+        if (I->getInstr())
+          return SlotIndex(&*I, Index.getSlot());
+      // We reached the end of the function.
+      return getLastIndex();
+    }
+
+    /// getIndexBefore - Returns the index of the last indexed instruction
+    /// before MI, or the start index of its basic block.
+    /// MI is not required to have an index.
+    SlotIndex getIndexBefore(const MachineInstr &MI) const {
+      const MachineBasicBlock *MBB = MI.getParent();
+      assert(MBB && "MI must be inserted inna basic block");
+      MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
+      while (true) {
+        if (I == B)
+          return getMBBStartIdx(MBB);
+        --I;
+        Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I);
+        if (MapItr != mi2iMap.end())
+          return MapItr->second;
+      }
+    }
+
+    /// getIndexAfter - Returns the index of the first indexed instruction
+    /// after MI, or the end index of its basic block.
+    /// MI is not required to have an index.
+    SlotIndex getIndexAfter(const MachineInstr &MI) const {
+      const MachineBasicBlock *MBB = MI.getParent();
+      assert(MBB && "MI must be inserted inna basic block");
+      MachineBasicBlock::const_iterator I = MI, E = MBB->end();
+      while (true) {
+        ++I;
+        if (I == E)
+          return getMBBEndIdx(MBB);
+        Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I);
+        if (MapItr != mi2iMap.end())
+          return MapItr->second;
+      }
+    }
+
+    /// Return the (start,end) range of the given basic block number.
+    const std::pair<SlotIndex, SlotIndex> &
+    getMBBRange(unsigned Num) const {
+      return MBBRanges[Num];
+    }
+
+    /// Return the (start,end) range of the given basic block.
+    const std::pair<SlotIndex, SlotIndex> &
+    getMBBRange(const MachineBasicBlock *MBB) const {
+      return getMBBRange(MBB->getNumber());
+    }
+
+    /// Returns the first index in the given basic block number.
+    SlotIndex getMBBStartIdx(unsigned Num) const {
+      return getMBBRange(Num).first;
+    }
+
+    /// Returns the first index in the given basic block.
+    SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+      return getMBBRange(mbb).first;
+    }
+
+    /// Returns the last index in the given basic block number.
+    SlotIndex getMBBEndIdx(unsigned Num) const {
+      return getMBBRange(Num).second;
+    }
+
+    /// Returns the last index in the given basic block.
+    SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+      return getMBBRange(mbb).second;
+    }
+
+    /// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block
+    /// begin and basic block)
+    using MBBIndexIterator = SmallVectorImpl<IdxMBBPair>::const_iterator;
+
+    /// Move iterator to the next IdxMBBPair where the SlotIndex is greater or
+    /// equal to \p To.
+    MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const {
+      return std::lower_bound(I, idx2MBBMap.end(), To);
+    }
+
+    /// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex
+    /// that is greater or equal to \p Idx.
+    MBBIndexIterator findMBBIndex(SlotIndex Idx) const {
+      return advanceMBBIndex(idx2MBBMap.begin(), Idx);
+    }
+
+    /// Returns an iterator for the begin of the idx2MBBMap.
+    MBBIndexIterator MBBIndexBegin() const {
+      return idx2MBBMap.begin();
+    }
+
+    /// Return an iterator for the end of the idx2MBBMap.
+    MBBIndexIterator MBBIndexEnd() const {
+      return idx2MBBMap.end();
+    }
+
+    /// Returns the basic block which the given index falls in.
+    MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
+      if (MachineInstr *MI = getInstructionFromIndex(index))
+        return MI->getParent();
+
+      MBBIndexIterator I = findMBBIndex(index);
+      // Take the pair containing the index
+      MBBIndexIterator J =
+        ((I != MBBIndexEnd() && I->first > index) ||
+         (I == MBBIndexEnd() && !idx2MBBMap.empty())) ? std::prev(I) : I;
+
+      assert(J != MBBIndexEnd() && J->first <= index &&
+             index < getMBBEndIdx(J->second) &&
+             "index does not correspond to an MBB");
+      return J->second;
+    }
+
+    /// Returns the MBB covering the given range, or null if the range covers
+    /// more than one basic block.
+    MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
+
+      assert(start < end && "Backwards ranges not allowed.");
+      MBBIndexIterator itr = findMBBIndex(start);
+      if (itr == MBBIndexEnd()) {
+        itr = std::prev(itr);
+        return itr->second;
+      }
+
+      // Check that we don't cross the boundary into this block.
+      if (itr->first < end)
+        return nullptr;
+
+      itr = std::prev(itr);
+
+      if (itr->first <= start)
+        return itr->second;
+
+      return nullptr;
+    }
+
+    /// Insert the given machine instruction into the mapping. Returns the
+    /// assigned index.
+    /// If Late is set and there are null indexes between mi's neighboring
+    /// instructions, create the new index after the null indexes instead of
+    /// before them.
+    SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late = false) {
+      assert(!MI.isInsideBundle() &&
+             "Instructions inside bundles should use bundle start's slot.");
+      assert(mi2iMap.find(&MI) == mi2iMap.end() && "Instr already indexed.");
+      // Numbering DBG_VALUE instructions could cause code generation to be
+      // affected by debug information.
+      assert(!MI.isDebugValue() && "Cannot number DBG_VALUE instructions.");
+
+      assert(MI.getParent() != nullptr && "Instr must be added to function.");
+
+      // Get the entries where MI should be inserted.
+      IndexList::iterator prevItr, nextItr;
+      if (Late) {
+        // Insert MI's index immediately before the following instruction.
+        nextItr = getIndexAfter(MI).listEntry()->getIterator();
+        prevItr = std::prev(nextItr);
+      } else {
+        // Insert MI's index immediately after the preceding instruction.
+        prevItr = getIndexBefore(MI).listEntry()->getIterator();
+        nextItr = std::next(prevItr);
+      }
+
+      // Get a number for the new instr, or 0 if there's no room currently.
+      // In the latter case we'll force a renumber later.
+      unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
+      unsigned newNumber = prevItr->getIndex() + dist;
+
+      // Insert a new list entry for MI.
+      IndexList::iterator newItr =
+          indexList.insert(nextItr, createEntry(&MI, newNumber));
+
+      // Renumber locally if we need to.
+      if (dist == 0)
+        renumberIndexes(newItr);
+
+      SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
+      mi2iMap.insert(std::make_pair(&MI, newIndex));
+      return newIndex;
+    }
+
+    /// Removes machine instruction (bundle) \p MI from the mapping.
+    /// This should be called before MachineInstr::eraseFromParent() is used to
+    /// remove a whole bundle or an unbundled instruction.
+    void removeMachineInstrFromMaps(MachineInstr &MI);
+
+    /// Removes a single machine instruction \p MI from the mapping.
+    /// This should be called before MachineInstr::eraseFromBundle() is used to
+    /// remove a single instruction (out of a bundle).
+    void removeSingleMachineInstrFromMaps(MachineInstr &MI);
+
+    /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
+    /// maps used by register allocator. \returns the index where the new
+    /// instruction was inserted.
+    SlotIndex replaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
+      Mi2IndexMap::iterator mi2iItr = mi2iMap.find(&MI);
+      if (mi2iItr == mi2iMap.end())
+        return SlotIndex();
+      SlotIndex replaceBaseIndex = mi2iItr->second;
+      IndexListEntry *miEntry(replaceBaseIndex.listEntry());
+      assert(miEntry->getInstr() == &MI &&
+             "Mismatched instruction in index tables.");
+      miEntry->setInstr(&NewMI);
+      mi2iMap.erase(mi2iItr);
+      mi2iMap.insert(std::make_pair(&NewMI, replaceBaseIndex));
+      return replaceBaseIndex;
+    }
+
+    /// Add the given MachineBasicBlock into the maps.
+    void insertMBBInMaps(MachineBasicBlock *mbb) {
+      MachineFunction::iterator nextMBB =
+        std::next(MachineFunction::iterator(mbb));
+
+      IndexListEntry *startEntry = nullptr;
+      IndexListEntry *endEntry = nullptr;
+      IndexList::iterator newItr;
+      if (nextMBB == mbb->getParent()->end()) {
+        startEntry = &indexList.back();
+        endEntry = createEntry(nullptr, 0);
+        newItr = indexList.insertAfter(startEntry->getIterator(), endEntry);
+      } else {
+        startEntry = createEntry(nullptr, 0);
+        endEntry = getMBBStartIdx(&*nextMBB).listEntry();
+        newItr = indexList.insert(endEntry->getIterator(), startEntry);
+      }
+
+      SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
+      SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);
+
+      MachineFunction::iterator prevMBB(mbb);
+      assert(prevMBB != mbb->getParent()->end() &&
+             "Can't insert a new block at the beginning of a function.");
+      --prevMBB;
+      MBBRanges[prevMBB->getNumber()].second = startIdx;
+
+      assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
+             "Blocks must be added in order");
+      MBBRanges.push_back(std::make_pair(startIdx, endIdx));
+      idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+      renumberIndexes(newItr);
+      std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+    }
+
+    /// \brief Free the resources that were required to maintain a SlotIndex.
+    ///
+    /// Once an index is no longer needed (for instance because the instruction
+    /// at that index has been moved), the resources required to maintain the
+    /// index can be relinquished to reduce memory use and improve renumbering
+    /// performance. Any remaining SlotIndex objects that point to the same
+    /// index are left 'dangling' (much the same as a dangling pointer to a
+    /// freed object) and should not be accessed, except to destruct them.
+    ///
+    /// Like dangling pointers, access to dangling SlotIndexes can cause
+    /// painful-to-track-down bugs, especially if the memory for the index
+    /// previously pointed to has been re-used. To detect dangling SlotIndex
+    /// bugs, build with EXPENSIVE_CHECKS=1. This will cause "erased" indexes to
+    /// be retained in a graveyard instead of being freed. Operations on indexes
+    /// in the graveyard will trigger an assertion.
+    void eraseIndex(SlotIndex index) {
+      IndexListEntry *entry = index.listEntry();
+#ifdef EXPENSIVE_CHECKS
+      indexList.remove(entry);
+      graveyardList.push_back(entry);
+      entry->setPoison();
+#else
+      indexList.erase(entry);
+#endif
+    }
+  };
+
+  // Specialize IntervalMapInfo for half-open slot index intervals.
+  template <>
+  struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> {
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SLOTINDEXES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/StackMaps.h b/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
new file mode 100644
index 0000000..4407114
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
@@ -0,0 +1,332 @@
+//===- StackMaps.h - StackMaps ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKMAPS_H
+#define LLVM_CODEGEN_STACKMAPS_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCExpr;
+class MCStreamer;
+class raw_ostream;
+class TargetRegisterInfo;
+
+/// \brief MI-level stackmap operands.
+///
+/// MI stackmap operations take the form:
+/// <id>, <numBytes>, live args...
+class StackMapOpers {
+public:
+  /// Enumerate the meta operands.
+  enum { IDPos, NBytesPos };
+
+private:
+  const MachineInstr* MI;
+
+public:
+  explicit StackMapOpers(const MachineInstr *MI);
+
+  /// Return the ID for the given stackmap
+  uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
+
+  /// Return the number of patchable bytes the given stackmap should emit.
+  uint32_t getNumPatchBytes() const {
+    return MI->getOperand(NBytesPos).getImm();
+  }
+
+  /// Get the operand index of the variable list of non-argument operands.
+  /// These hold the "live state".
+  unsigned getVarIdx() const {
+    // Skip ID, nShadowBytes.
+    return 2;
+  }
+};
+
+/// \brief MI-level patchpoint operands.
+///
+/// MI patchpoint operations take the form:
+/// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
+///
+/// IR patchpoint intrinsics do not have the <cc> operand because calling
+/// convention is part of the subclass data.
+///
+/// SD patchpoint nodes do not have a def operand because it is part of the
+/// SDValue.
+///
+/// Patchpoints following the anyregcc convention are handled specially. For
+/// these, the stack map also records the location of the return value and
+/// arguments.
+class PatchPointOpers {
+public:
+  /// Enumerate the meta operands.
+  enum { IDPos, NBytesPos, TargetPos, NArgPos, CCPos, MetaEnd };
+
+private:
+  const MachineInstr *MI;
+  bool HasDef;
+
+  unsigned getMetaIdx(unsigned Pos = 0) const {
+    assert(Pos < MetaEnd && "Meta operand index out of range.");
+    return (HasDef ? 1 : 0) + Pos;
+  }
+
+  const MachineOperand &getMetaOper(unsigned Pos) const {
+    return MI->getOperand(getMetaIdx(Pos));
+  }
+
+public:
+  explicit PatchPointOpers(const MachineInstr *MI);
+
+  bool isAnyReg() const { return (getCallingConv() == CallingConv::AnyReg); }
+  bool hasDef() const { return HasDef; }
+
+  /// Return the ID for the given patchpoint.
+  uint64_t getID() const { return getMetaOper(IDPos).getImm(); }
+
+  /// Return the number of patchable bytes the given patchpoint should emit.
+  uint32_t getNumPatchBytes() const {
+    return getMetaOper(NBytesPos).getImm();
+  }
+
+  /// Returns the target of the underlying call.
+  const MachineOperand &getCallTarget() const {
+    return getMetaOper(TargetPos);
+  }
+
+  /// Returns the calling convention
+  CallingConv::ID getCallingConv() const {
+    return getMetaOper(CCPos).getImm();
+  }
+
+  unsigned getArgIdx() const { return getMetaIdx() + MetaEnd; }
+
+  /// Return the number of call arguments
+  uint32_t getNumCallArgs() const {
+    return MI->getOperand(getMetaIdx(NArgPos)).getImm();
+  }
+
+  /// Get the operand index of the variable list of non-argument operands.
+  /// These hold the "live state".
+  unsigned getVarIdx() const {
+    return getMetaIdx() + MetaEnd + getNumCallArgs();
+  }
+
+  /// Get the index at which stack map locations will be recorded.
+  /// Arguments are not recorded unless the anyregcc convention is used.
+  unsigned getStackMapStartIdx() const {
+    if (isAnyReg())
+      return getArgIdx();
+    return getVarIdx();
+  }
+
+  /// \brief Get the next scratch register operand index.
+  unsigned getNextScratchIdx(unsigned StartIdx = 0) const;
+};
+
+/// MI-level Statepoint operands
+///
+/// Statepoint operands take the form:
+///   <id>, <num patch bytes >, <num call arguments>, <call target>,
+///   [call arguments...],
+///   <StackMaps::ConstantOp>, <calling convention>,
+///   <StackMaps::ConstantOp>, <statepoint flags>,
+///   <StackMaps::ConstantOp>, <num deopt args>, [deopt args...],
+///   <gc base/derived pairs...> <gc allocas...>
+/// Note that the last two sets of arguments are not currently length
+///   prefixed.
+class StatepointOpers {
+  // TODO:: we should change the STATEPOINT representation so that CC and
+  // Flags should be part of meta operands, with args and deopt operands, and
+  // gc operands all prefixed by their length and a type code. This would be
+  // much more consistent. 
+public:
+  // These values are aboolute offsets into the operands of the statepoint
+  // instruction.
+  enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };
+
+  // These values are relative offests from the start of the statepoint meta
+  // arguments (i.e. the end of the call arguments).
+  enum { CCOffset = 1, FlagsOffset = 3, NumDeoptOperandsOffset = 5 };
+
+  explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {}
+
+  /// Get starting index of non call related arguments
+  /// (calling convention, statepoint flags, vm state and gc state).
+  unsigned getVarIdx() const {
+    return MI->getOperand(NCallArgsPos).getImm() + MetaEnd;
+  }
+
+  /// Return the ID for the given statepoint.
+  uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
+
+  /// Return the number of patchable bytes the given statepoint should emit.
+  uint32_t getNumPatchBytes() const {
+    return MI->getOperand(NBytesPos).getImm();
+  }
+
+  /// Returns the target of the underlying call.
+  const MachineOperand &getCallTarget() const {
+    return MI->getOperand(CallTargetPos);
+  }
+
+private:
+  const MachineInstr *MI;
+};
+
+class StackMaps {
+public:
+  struct Location {
+    enum LocationType {
+      Unprocessed,
+      Register,
+      Direct,
+      Indirect,
+      Constant,
+      ConstantIndex
+    };
+    LocationType Type = Unprocessed;
+    unsigned Size = 0;
+    unsigned Reg = 0;
+    int64_t Offset = 0;
+
+    Location() = default;
+    Location(LocationType Type, unsigned Size, unsigned Reg, int64_t Offset)
+        : Type(Type), Size(Size), Reg(Reg), Offset(Offset) {}
+  };
+
+  struct LiveOutReg {
+    unsigned short Reg = 0;
+    unsigned short DwarfRegNum = 0;
+    unsigned short Size = 0;
+
+    LiveOutReg() = default;
+    LiveOutReg(unsigned short Reg, unsigned short DwarfRegNum,
+               unsigned short Size)
+        : Reg(Reg), DwarfRegNum(DwarfRegNum), Size(Size) {}
+  };
+
+  // OpTypes are used to encode information about the following logical
+  // operand (which may consist of several MachineOperands) for the
+  // OpParser.
+  using OpType = enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp };
+
+  StackMaps(AsmPrinter &AP);
+
+  void reset() {
+    CSInfos.clear();
+    ConstPool.clear();
+    FnInfos.clear();
+  }
+
+  /// \brief Generate a stackmap record for a stackmap instruction.
+  ///
+  /// MI must be a raw STACKMAP, not a PATCHPOINT.
+  void recordStackMap(const MachineInstr &MI);
+
+  /// \brief Generate a stackmap record for a patchpoint instruction.
+  void recordPatchPoint(const MachineInstr &MI);
+
+  /// \brief Generate a stackmap record for a statepoint instruction.
+  void recordStatepoint(const MachineInstr &MI);
+
+  /// If there is any stack map data, create a stack map section and serialize
+  /// the map info into it. This clears the stack map data structures
+  /// afterwards.
+  void serializeToStackMapSection();
+
+private:
+  static const char *WSMP;
+
+  using LocationVec = SmallVector<Location, 8>;
+  using LiveOutVec = SmallVector<LiveOutReg, 8>;
+  using ConstantPool = MapVector<uint64_t, uint64_t>;
+
+  struct FunctionInfo {
+    uint64_t StackSize = 0;
+    uint64_t RecordCount = 1;
+
+    FunctionInfo() = default;
+    explicit FunctionInfo(uint64_t StackSize) : StackSize(StackSize) {}
+  };
+
+  struct CallsiteInfo {
+    const MCExpr *CSOffsetExpr = nullptr;
+    uint64_t ID = 0;
+    LocationVec Locations;
+    LiveOutVec LiveOuts;
+
+    CallsiteInfo() = default;
+    CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
+                 LocationVec &&Locations, LiveOutVec &&LiveOuts)
+        : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)),
+          LiveOuts(std::move(LiveOuts)) {}
+  };
+
+  using FnInfoMap = MapVector<const MCSymbol *, FunctionInfo>;
+  using CallsiteInfoList = std::vector<CallsiteInfo>;
+
+  AsmPrinter &AP;
+  CallsiteInfoList CSInfos;
+  ConstantPool ConstPool;
+  FnInfoMap FnInfos;
+
+  MachineInstr::const_mop_iterator
+  parseOperand(MachineInstr::const_mop_iterator MOI,
+               MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
+               LiveOutVec &LiveOuts) const;
+
+  /// \brief Create a live-out register record for the given register @p Reg.
+  LiveOutReg createLiveOutReg(unsigned Reg,
+                              const TargetRegisterInfo *TRI) const;
+
+  /// \brief Parse the register live-out mask and return a vector of live-out
+  /// registers that need to be recorded in the stackmap.
+  LiveOutVec parseRegisterLiveOutMask(const uint32_t *Mask) const;
+
+  /// This should be called by the MC lowering code _immediately_ before
+  /// lowering the MI to an MCInst. It records where the operands for the
+  /// instruction are stored, and outputs a label to record the offset of
+  /// the call from the start of the text section. In special cases (e.g. AnyReg
+  /// calling convention) the return register is also recorded if requested.
+  void recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
+                           MachineInstr::const_mop_iterator MOI,
+                           MachineInstr::const_mop_iterator MOE,
+                           bool recordResult = false);
+
+  /// \brief Emit the stackmap header.
+  void emitStackmapHeader(MCStreamer &OS);
+
+  /// \brief Emit the function frame record for each function.
+  void emitFunctionFrameRecords(MCStreamer &OS);
+
+  /// \brief Emit the constant pool.
+  void emitConstantPoolEntries(MCStreamer &OS);
+
+  /// \brief Emit the callsite info for each stackmap/patchpoint intrinsic call.
+  void emitCallsiteEntries(MCStreamer &OS);
+
+  void print(raw_ostream &OS);
+  void debug() { print(dbgs()); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_STACKMAPS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/StackProtector.h b/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
new file mode 100644
index 0000000..72de212
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
@@ -0,0 +1,138 @@
+//===- StackProtector.h - Stack Protector Insertion -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass inserts stack protectors into functions which need them. A variable
+// with a random value in it is stored onto the stack before the local variables
+// are allocated. Upon exiting the block, the stored value is checked. If it's
+// changed, then there was some sort of violation and the program aborts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKPROTECTOR_H
+#define LLVM_CODEGEN_STACKPROTECTOR_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class BasicBlock;
+class DominatorTree;
+class Function;
+class Instruction;
+class Module;
+class TargetLoweringBase;
+class TargetMachine;
+class Type;
+
+class StackProtector : public FunctionPass {
+public:
+  /// SSPLayoutKind.  Stack Smashing Protection (SSP) rules require that
+  /// vulnerable stack allocations are located close the stack protector.
+  enum SSPLayoutKind {
+    SSPLK_None,       ///< Did not trigger a stack protector.  No effect on data
+                      ///< layout.
+    SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size.  Closest
+                      ///< to the stack protector.
+    SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
+                      ///< to the stack protector.
+    SSPLK_AddrOf      ///< The address of this allocation is exposed and
+                      ///< triggered protection.  3rd closest to the protector.
+  };
+
+  /// A mapping of AllocaInsts to their required SSP layout.
+  using SSPLayoutMap = ValueMap<const AllocaInst *, SSPLayoutKind>;
+
+private:
+  const TargetMachine *TM = nullptr;
+
+  /// TLI - Keep a pointer of a TargetLowering to consult for determining
+  /// target type sizes.
+  const TargetLoweringBase *TLI = nullptr;
+  Triple Trip;
+
+  Function *F;
+  Module *M;
+
+  DominatorTree *DT;
+
+  /// Layout - Mapping of allocations to the required SSPLayoutKind.
+  /// StackProtector analysis will update this map when determining if an
+  /// AllocaInst triggers a stack protector.
+  SSPLayoutMap Layout;
+
+  /// \brief The minimum size of buffers that will receive stack smashing
+  /// protection when -fstack-protection is used.
+  unsigned SSPBufferSize = 0;
+
+  /// VisitedPHIs - The set of PHI nodes visited when determining
+  /// if a variable's reference has been taken.  This set
+  /// is maintained to ensure we don't visit the same PHI node multiple
+  /// times.
+  SmallPtrSet<const PHINode *, 16> VisitedPHIs;
+
+  // A prologue is generated.
+  bool HasPrologue = false;
+
+  // IR checking code is generated.
+  bool HasIRCheck = false;
+
+  /// InsertStackProtectors - Insert code into the prologue and epilogue of
+  /// the function.
+  ///
+  ///  - The prologue code loads and stores the stack guard onto the stack.
+  ///  - The epilogue checks the value stored in the prologue against the
+  ///    original value. It calls __stack_chk_fail if they differ.
+  bool InsertStackProtectors();
+
+  /// CreateFailBB - Create a basic block to jump to when the stack protector
+  /// check fails.
+  BasicBlock *CreateFailBB();
+
+  /// ContainsProtectableArray - Check whether the type either is an array or
+  /// contains an array of sufficient size so that we need stack protectors
+  /// for it.
+  /// \param [out] IsLarge is set to true if a protectable array is found and
+  /// it is "large" ( >= ssp-buffer-size).  In the case of a structure with
+  /// multiple arrays, this gets set if any of them is large.
+  bool ContainsProtectableArray(Type *Ty, bool &IsLarge, bool Strong = false,
+                                bool InStruct = false) const;
+
+  /// \brief Check whether a stack allocation has its address taken.
+  bool HasAddressTaken(const Instruction *AI);
+
+  /// RequiresStackProtector - Check whether or not this function needs a
+  /// stack protector based upon the stack protector level.
+  bool RequiresStackProtector();
+
+public:
+  static char ID; // Pass identification, replacement for typeid.
+
+  StackProtector() : FunctionPass(ID), SSPBufferSize(8) {
+    initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  SSPLayoutKind getSSPLayout(const AllocaInst *AI) const;
+
+  // Return true if StackProtector is supposed to be handled by SelectionDAG.
+  bool shouldEmitSDCheck(const BasicBlock &BB) const;
+
+  void adjustForColoring(const AllocaInst *From, const AllocaInst *To);
+
+  bool runOnFunction(Function &Fn) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_STACKPROTECTOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TailDuplicator.h b/linux-x64/clang/include/llvm/CodeGen/TailDuplicator.h
new file mode 100644
index 0000000..be6562c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TailDuplicator.h
@@ -0,0 +1,128 @@
+//===- llvm/CodeGen/TailDuplicator.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TailDuplicator class. Used by the
+// TailDuplication pass, and MachineBlockPlacement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TAILDUPLICATOR_H
+#define LLVM_CODEGEN_TAILDUPLICATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineInstr;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class TargetRegisterInfo;
+
+/// Utility class to perform tail duplication.
+class TailDuplicator {
+  const TargetInstrInfo *TII;
+  const TargetRegisterInfo *TRI;
+  const MachineBranchProbabilityInfo *MBPI;
+  const MachineModuleInfo *MMI;
+  MachineRegisterInfo *MRI;
+  MachineFunction *MF;
+  bool PreRegAlloc;
+  bool LayoutMode;
+  unsigned TailDupSize;
+
+  // A list of virtual registers for which to update SSA form.
+  SmallVector<unsigned, 16> SSAUpdateVRs;
+
+  // For each virtual register in SSAUpdateVals keep a list of source virtual
+  // registers.
+  using AvailableValsTy = std::vector<std::pair<MachineBasicBlock *, unsigned>>;
+
+  DenseMap<unsigned, AvailableValsTy> SSAUpdateVals;
+
+public:
+  /// Prepare to run on a specific machine function.
+  /// @param MF - Function that will be processed
+  /// @param PreRegAlloc - true if used before register allocation
+  /// @param MBPI - Branch Probability Info. Used to propagate correct
+  ///     probabilities when modifying the CFG.
+  /// @param LayoutMode - When true, don't use the existing layout to make
+  ///     decisions.
+  /// @param TailDupSize - Maxmimum size of blocks to tail-duplicate. Zero
+  ///     default implies using the command line value TailDupSize.
+  void initMF(MachineFunction &MF, bool PreRegAlloc,
+              const MachineBranchProbabilityInfo *MBPI,
+              bool LayoutMode, unsigned TailDupSize = 0);
+
+  bool tailDuplicateBlocks();
+  static bool isSimpleBB(MachineBasicBlock *TailBB);
+  bool shouldTailDuplicate(bool IsSimple, MachineBasicBlock &TailBB);
+
+  /// Returns true if TailBB can successfully be duplicated into PredBB
+  bool canTailDuplicate(MachineBasicBlock *TailBB, MachineBasicBlock *PredBB);
+
+  /// Tail duplicate a single basic block into its predecessors, and then clean
+  /// up.
+  /// If \p DuplicatePreds is not null, it will be updated to contain the list
+  /// of predecessors that received a copy of \p MBB.
+  /// If \p RemovalCallback is non-null. It will be called before MBB is
+  /// deleted.
+  bool tailDuplicateAndUpdate(
+      bool IsSimple, MachineBasicBlock *MBB,
+      MachineBasicBlock *ForcedLayoutPred,
+      SmallVectorImpl<MachineBasicBlock*> *DuplicatedPreds = nullptr,
+      function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
+
+private:
+  using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
+
+  void addSSAUpdateEntry(unsigned OrigReg, unsigned NewReg,
+                         MachineBasicBlock *BB);
+  void processPHI(MachineInstr *MI, MachineBasicBlock *TailBB,
+                  MachineBasicBlock *PredBB,
+                  DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
+                  SmallVectorImpl<std::pair<unsigned, RegSubRegPair>> &Copies,
+                  const DenseSet<unsigned> &UsedByPhi, bool Remove);
+  void duplicateInstruction(MachineInstr *MI, MachineBasicBlock *TailBB,
+                            MachineBasicBlock *PredBB,
+                            DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
+                            const DenseSet<unsigned> &UsedByPhi);
+  void updateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
+                            SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+                            SmallSetVector<MachineBasicBlock *, 8> &Succs);
+  bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
+  bool duplicateSimpleBB(MachineBasicBlock *TailBB,
+                         SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+                         const DenseSet<unsigned> &RegsUsedByPhi,
+                         SmallVectorImpl<MachineInstr *> &Copies);
+  bool tailDuplicate(bool IsSimple,
+                     MachineBasicBlock *TailBB,
+                     MachineBasicBlock *ForcedLayoutPred,
+                     SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+                     SmallVectorImpl<MachineInstr *> &Copies);
+  void appendCopies(MachineBasicBlock *MBB,
+                 SmallVectorImpl<std::pair<unsigned,RegSubRegPair>> &CopyInfos,
+                 SmallVectorImpl<MachineInstr *> &Copies);
+
+  void removeDeadBlock(
+      MachineBasicBlock *MBB,
+      function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TAILDUPLICATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetCallingConv.h b/linux-x64/clang/include/llvm/CodeGen/TargetCallingConv.h
new file mode 100644
index 0000000..7d138f5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetCallingConv.h
@@ -0,0 +1,204 @@
+//===-- llvm/CodeGen/TargetCallingConv.h - Calling Convention ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines types for working with calling-convention information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETCALLINGCONV_H
+#define LLVM_CODEGEN_TARGETCALLINGCONV_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <climits>
+#include <cstdint>
+
+namespace llvm {
+namespace ISD {
+
+  struct ArgFlagsTy {
+  private:
+    unsigned IsZExt : 1;     ///< Zero extended
+    unsigned IsSExt : 1;     ///< Sign extended
+    unsigned IsInReg : 1;    ///< Passed in register
+    unsigned IsSRet : 1;     ///< Hidden struct-ret ptr
+    unsigned IsByVal : 1;    ///< Struct passed by value
+    unsigned IsNest : 1;     ///< Nested fn static chain
+    unsigned IsReturned : 1; ///< Always returned
+    unsigned IsSplit : 1;
+    unsigned IsInAlloca : 1;   ///< Passed with inalloca
+    unsigned IsSplitEnd : 1;   ///< Last part of a split
+    unsigned IsSwiftSelf : 1;  ///< Swift self parameter
+    unsigned IsSwiftError : 1; ///< Swift error parameter
+    unsigned IsHva : 1;        ///< HVA field for
+    unsigned IsHvaStart : 1;   ///< HVA structure start
+    unsigned IsSecArgPass : 1; ///< Second argument
+    unsigned ByValAlign : 4;   ///< Log 2 of byval alignment
+    unsigned OrigAlign : 5;    ///< Log 2 of original alignment
+    unsigned IsInConsecutiveRegsLast : 1;
+    unsigned IsInConsecutiveRegs : 1;
+    unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate
+
+    unsigned ByValSize; ///< Byval struct size
+
+  public:
+    ArgFlagsTy()
+        : IsZExt(0), IsSExt(0), IsInReg(0), IsSRet(0), IsByVal(0), IsNest(0),
+          IsReturned(0), IsSplit(0), IsInAlloca(0), IsSplitEnd(0),
+          IsSwiftSelf(0), IsSwiftError(0), IsHva(0), IsHvaStart(0),
+          IsSecArgPass(0), ByValAlign(0), OrigAlign(0),
+          IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
+          IsCopyElisionCandidate(0), ByValSize(0) {
+      static_assert(sizeof(*this) == 2 * sizeof(unsigned), "flags are too big");
+    }
+
+    bool isZExt() const { return IsZExt; }
+    void setZExt() { IsZExt = 1; }
+
+    bool isSExt() const { return IsSExt; }
+    void setSExt() { IsSExt = 1; }
+
+    bool isInReg() const { return IsInReg; }
+    void setInReg() { IsInReg = 1; }
+
+    bool isSRet() const { return IsSRet; }
+    void setSRet() { IsSRet = 1; }
+
+    bool isByVal() const { return IsByVal; }
+    void setByVal() { IsByVal = 1; }
+
+    bool isInAlloca() const { return IsInAlloca; }
+    void setInAlloca() { IsInAlloca = 1; }
+
+    bool isSwiftSelf() const { return IsSwiftSelf; }
+    void setSwiftSelf() { IsSwiftSelf = 1; }
+
+    bool isSwiftError() const { return IsSwiftError; }
+    void setSwiftError() { IsSwiftError = 1; }
+
+    bool isHva() const { return IsHva; }
+    void setHva() { IsHva = 1; }
+
+    bool isHvaStart() const { return IsHvaStart; }
+    void setHvaStart() { IsHvaStart = 1; }
+
+    bool isSecArgPass() const { return IsSecArgPass; }
+    void setSecArgPass() { IsSecArgPass = 1; }
+
+    bool isNest() const { return IsNest; }
+    void setNest() { IsNest = 1; }
+
+    bool isReturned() const { return IsReturned; }
+    void setReturned() { IsReturned = 1; }
+
+    bool isInConsecutiveRegs()  const { return IsInConsecutiveRegs; }
+    void setInConsecutiveRegs() { IsInConsecutiveRegs = 1; }
+
+    bool isInConsecutiveRegsLast() const { return IsInConsecutiveRegsLast; }
+    void setInConsecutiveRegsLast() { IsInConsecutiveRegsLast = 1; }
+
+    bool isSplit()   const { return IsSplit; }
+    void setSplit()  { IsSplit = 1; }
+
+    bool isSplitEnd()   const { return IsSplitEnd; }
+    void setSplitEnd()  { IsSplitEnd = 1; }
+
+    bool isCopyElisionCandidate()  const { return IsCopyElisionCandidate; }
+    void setCopyElisionCandidate() { IsCopyElisionCandidate = 1; }
+
+    unsigned getByValAlign() const { return (1U << ByValAlign) / 2; }
+    void setByValAlign(unsigned A) {
+      ByValAlign = Log2_32(A) + 1;
+      assert(getByValAlign() == A && "bitfield overflow");
+    }
+
+    unsigned getOrigAlign() const { return (1U << OrigAlign) / 2; }
+    void setOrigAlign(unsigned A) {
+      OrigAlign = Log2_32(A) + 1;
+      assert(getOrigAlign() == A && "bitfield overflow");
+    }
+
+    unsigned getByValSize() const { return ByValSize; }
+    void setByValSize(unsigned S) { ByValSize = S; }
+  };
+
+  /// InputArg - This struct carries flags and type information about a
+  /// single incoming (formal) argument or incoming (from the perspective
+  /// of the caller) return value virtual register.
+  ///
+  struct InputArg {
+    ArgFlagsTy Flags;
+    MVT VT = MVT::Other;
+    EVT ArgVT;
+    bool Used = false;
+
+    /// Index original Function's argument.
+    unsigned OrigArgIndex;
+    /// Sentinel value for implicit machine-level input arguments.
+    static const unsigned NoArgIndex = UINT_MAX;
+
+    /// Offset in bytes of current input value relative to the beginning of
+    /// original argument. E.g. if argument was splitted into four 32 bit
+    /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
+    unsigned PartOffset;
+
+    InputArg() = default;
+    InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
+             unsigned origIdx, unsigned partOffs)
+      : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
+      VT = vt.getSimpleVT();
+      ArgVT = argvt;
+    }
+
+    bool isOrigArg() const {
+      return OrigArgIndex != NoArgIndex;
+    }
+
+    unsigned getOrigArgIndex() const {
+      assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
+      return OrigArgIndex;
+    }
+  };
+
+  /// OutputArg - This struct carries flags and a value for a
+  /// single outgoing (actual) argument or outgoing (from the perspective
+  /// of the caller) return value virtual register.
+  ///
+  struct OutputArg {
+    ArgFlagsTy Flags;
+    MVT VT;
+    EVT ArgVT;
+
+    /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
+    bool IsFixed = false;
+
+    /// Index original Function's argument.
+    unsigned OrigArgIndex;
+
+    /// Offset in bytes of current output value relative to the beginning of
+    /// original argument. E.g. if argument was splitted into four 32 bit
+    /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
+    unsigned PartOffset;
+
+    OutputArg() = default;
+    OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool isfixed,
+              unsigned origIdx, unsigned partOffs)
+      : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
+        PartOffset(partOffs) {
+      VT = vt.getSimpleVT();
+      ArgVT = argvt;
+    }
+  };
+
+} // end namespace ISD
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETCALLINGCONV_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
new file mode 100644
index 0000000..61f1cf0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
@@ -0,0 +1,348 @@
+//===-- llvm/CodeGen/TargetFrameLowering.h ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to describe the layout of a stack frame on the target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETFRAMELOWERING_H
+#define LLVM_CODEGEN_TARGETFRAMELOWERING_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include <utility>
+#include <vector>
+
+namespace llvm {
+  class BitVector;
+  class CalleeSavedInfo;
+  class MachineFunction;
+  class RegScavenger;
+
+/// Information about stack frame layout on the target.  It holds the direction
+/// of stack growth, the known stack alignment on entry to each function, and
+/// the offset to the locals area.
+///
+/// The offset to the local area is the offset from the stack pointer on
+/// function entry to the first location where function data (local variables,
+/// spill locations) can be stored.
+class TargetFrameLowering {
+public:
+  enum StackDirection {
+    StackGrowsUp,        // Adding to the stack increases the stack address
+    StackGrowsDown       // Adding to the stack decreases the stack address
+  };
+
+  // Maps a callee saved register to a stack slot with a fixed offset.
+  struct SpillSlot {
+    unsigned Reg;
+    int Offset; // Offset relative to stack pointer on function entry.
+  };
+private:
+  StackDirection StackDir;
+  unsigned StackAlignment;
+  unsigned TransientStackAlignment;
+  int LocalAreaOffset;
+  bool StackRealignable;
+public:
+  TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
+                      unsigned TransAl = 1, bool StackReal = true)
+    : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
+      LocalAreaOffset(LAO), StackRealignable(StackReal) {}
+
+  virtual ~TargetFrameLowering();
+
+  // These methods return information that describes the abstract stack layout
+  // of the target machine.
+
+  /// getStackGrowthDirection - Return the direction the stack grows
+  ///
+  StackDirection getStackGrowthDirection() const { return StackDir; }
+
+  /// getStackAlignment - This method returns the number of bytes to which the
+  /// stack pointer must be aligned on entry to a function.  Typically, this
+  /// is the largest alignment for any data object in the target.
+  ///
+  unsigned getStackAlignment() const { return StackAlignment; }
+
+  /// alignSPAdjust - This method aligns the stack adjustment to the correct
+  /// alignment.
+  ///
+  int alignSPAdjust(int SPAdj) const {
+    if (SPAdj < 0) {
+      SPAdj = -alignTo(-SPAdj, StackAlignment);
+    } else {
+      SPAdj = alignTo(SPAdj, StackAlignment);
+    }
+    return SPAdj;
+  }
+
+  /// getTransientStackAlignment - This method returns the number of bytes to
+  /// which the stack pointer must be aligned at all times, even between
+  /// calls.
+  ///
+  unsigned getTransientStackAlignment() const {
+    return TransientStackAlignment;
+  }
+
+  /// isStackRealignable - This method returns whether the stack can be
+  /// realigned.
+  bool isStackRealignable() const {
+    return StackRealignable;
+  }
+
+  /// Return the skew that has to be applied to stack alignment under
+  /// certain conditions (e.g. stack was adjusted before function \p MF
+  /// was called).
+  virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const;
+
+  /// getOffsetOfLocalArea - This method returns the offset of the local area
+  /// from the stack pointer on entrance to a function.
+  ///
+  int getOffsetOfLocalArea() const { return LocalAreaOffset; }
+
+  /// isFPCloseToIncomingSP - Return true if the frame pointer is close to
+  /// the incoming stack pointer, false if it is close to the post-prologue
+  /// stack pointer.
+  virtual bool isFPCloseToIncomingSP() const { return true; }
+
+  /// assignCalleeSavedSpillSlots - Allows target to override spill slot
+  /// assignment logic.  If implemented, assignCalleeSavedSpillSlots() should
+  /// assign frame slots to all CSI entries and return true.  If this method
+  /// returns false, spill slots will be assigned using generic implementation.
+  /// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of
+  /// CSI.
+  virtual bool
+  assignCalleeSavedSpillSlots(MachineFunction &MF,
+                              const TargetRegisterInfo *TRI,
+                              std::vector<CalleeSavedInfo> &CSI) const {
+    return false;
+  }
+
+  /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
+  /// pairs, that contains an entry for each callee saved register that must be
+  /// spilled to a particular stack location if it is spilled.
+  ///
+  /// Each entry in this array contains a <register,offset> pair, indicating the
+  /// fixed offset from the incoming stack pointer that each register should be
+  /// spilled at. If a register is not listed here, the code generator is
+  /// allowed to spill it anywhere it chooses.
+  ///
+  virtual const SpillSlot *
+  getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+    NumEntries = 0;
+    return nullptr;
+  }
+
+  /// targetHandlesStackFrameRounding - Returns true if the target is
+  /// responsible for rounding up the stack frame (probably at emitPrologue
+  /// time).
+  virtual bool targetHandlesStackFrameRounding() const {
+    return false;
+  }
+
+  /// Returns true if the target will correctly handle shrink wrapping.
+  virtual bool enableShrinkWrapping(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the stack slot holes in the fixed and callee-save stack
+  /// area should be used when allocating other stack locations to reduce stack
+  /// size.
+  virtual bool enableStackSlotScavenging(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+  /// the function.
+  virtual void emitPrologue(MachineFunction &MF,
+                            MachineBasicBlock &MBB) const = 0;
+  virtual void emitEpilogue(MachineFunction &MF,
+                            MachineBasicBlock &MBB) const = 0;
+
+  /// Replace a StackProbe stub (if any) with the actual probe code inline
+  virtual void inlineStackProbe(MachineFunction &MF,
+                                MachineBasicBlock &PrologueMBB) const {}
+
+  /// Adjust the prologue to have the function use segmented stacks. This works
+  /// by adding a check even before the "normal" function prologue.
+  virtual void adjustForSegmentedStacks(MachineFunction &MF,
+                                        MachineBasicBlock &PrologueMBB) const {}
+
+  /// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in
+  /// the assembly prologue to explicitly handle the stack.
+  virtual void adjustForHiPEPrologue(MachineFunction &MF,
+                                     MachineBasicBlock &PrologueMBB) const {}
+
+  /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
+  /// saved registers and returns true if it isn't possible / profitable to do
+  /// so by issuing a series of store instructions via
+  /// storeRegToStackSlot(). Returns false otherwise.
+  virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+                                         MachineBasicBlock::iterator MI,
+                                        const std::vector<CalleeSavedInfo> &CSI,
+                                         const TargetRegisterInfo *TRI) const {
+    return false;
+  }
+
+  /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
+  /// saved registers and returns true if it isn't possible / profitable to do
+  /// so by issuing a series of load instructions via loadRegToStackSlot().
+  /// If it returns true, and any of the registers in CSI is not restored,
+  /// it sets the corresponding Restored flag in CSI to false.
+  /// Returns false otherwise.
+  virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+                                           MachineBasicBlock::iterator MI,
+                                           std::vector<CalleeSavedInfo> &CSI,
+                                        const TargetRegisterInfo *TRI) const {
+    return false;
+  }
+
+  /// Return true if the target needs to disable frame pointer elimination.
+  virtual bool noFramePointerElim(const MachineFunction &MF) const;
+
+  /// hasFP - Return true if the specified function should have a dedicated
+  /// frame pointer register. For most targets this is true only if the function
+  /// has variable sized allocas or if frame pointer elimination is disabled.
+  virtual bool hasFP(const MachineFunction &MF) const = 0;
+
+  /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+  /// not required, we reserve argument space for call sites in the function
+  /// immediately on entry to the current function. This eliminates the need for
+  /// add/sub sp brackets around call sites. Returns true if the call frame is
+  /// included as part of the stack frame.
+  virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
+    return !hasFP(MF);
+  }
+
+  /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
+  /// call frame pseudo ops before doing frame index elimination. This is
+  /// possible only when frame index references between the pseudos won't
+  /// need adjusting for the call frame adjustments. Normally, that's true
+  /// if the function has a reserved call frame or a frame pointer. Some
+  /// targets (Thumb2, for example) may have more complicated criteria,
+  /// however, and can override this behavior.
+  virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+    return hasReservedCallFrame(MF) || hasFP(MF);
+  }
+
+  // needsFrameIndexResolution - Do we need to perform FI resolution for
+  // this function. Normally, this is required only when the function
+  // has any stack objects. However, targets may want to override this.
+  virtual bool needsFrameIndexResolution(const MachineFunction &MF) const;
+
+  /// getFrameIndexReference - This method should return the base register
+  /// and offset used to reference a frame index location. The offset is
+  /// returned directly, and the base register is returned via FrameReg.
+  virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
+                                     unsigned &FrameReg) const;
+
+  /// Same as \c getFrameIndexReference, except that the stack pointer (as
+  /// opposed to the frame pointer) will be the preferred value for \p
+  /// FrameReg. This is generally used for emitting statepoint or EH tables that
+  /// use offsets from RSP.  If \p IgnoreSPUpdates is true, the returned
+  /// offset is only guaranteed to be valid with respect to the value of SP at
+  /// the end of the prologue.
+  virtual int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
+                                             unsigned &FrameReg,
+                                             bool IgnoreSPUpdates) const {
+    // Always safe to dispatch to getFrameIndexReference.
+    return getFrameIndexReference(MF, FI, FrameReg);
+  }
+
+  /// This method determines which of the registers reported by
+  /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved.
+  /// The default implementation checks populates the \p SavedRegs bitset with
+  /// all registers which are modified in the function, targets may override
+  /// this function to save additional registers.
+  /// This method also sets up the register scavenger ensuring there is a free
+  /// register or a frameindex available.
+  virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+                                    RegScavenger *RS = nullptr) const;
+
+  /// processFunctionBeforeFrameFinalized - This method is called immediately
+  /// before the specified function's frame layout (MF.getFrameInfo()) is
+  /// finalized.  Once the frame is finalized, MO_FrameIndex operands are
+  /// replaced with direct constants.  This method is optional.
+  ///
+  virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+                                             RegScavenger *RS = nullptr) const {
+  }
+
+  virtual unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const {
+    report_fatal_error("WinEH not implemented for this target");
+  }
+
+  /// This method is called during prolog/epilog code insertion to eliminate
+  /// call frame setup and destroy pseudo instructions (but only if the Target
+  /// is using them).  It is responsible for eliminating these instructions,
+  /// replacing them with concrete instructions.  This method need only be
+  /// implemented if using call frame setup/destroy pseudo instructions.
+  /// Returns an iterator pointing to the instruction after the replaced one.
+  virtual MachineBasicBlock::iterator
+  eliminateCallFramePseudoInstr(MachineFunction &MF,
+                                MachineBasicBlock &MBB,
+                                MachineBasicBlock::iterator MI) const {
+    llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
+                     "target!");
+  }
+
+
+  /// Order the symbols in the local stack frame.
+  /// The list of objects that we want to order is in \p objectsToAllocate as
+  /// indices into the MachineFrameInfo. The array can be reordered in any way
+  /// upon return. The contents of the array, however, may not be modified (i.e.
+  /// only their order may be changed).
+  /// By default, just maintain the original order.
+  virtual void
+  orderFrameObjects(const MachineFunction &MF,
+                    SmallVectorImpl<int> &objectsToAllocate) const {
+  }
+
+  /// Check whether or not the given \p MBB can be used as a prologue
+  /// for the target.
+  /// The prologue will be inserted first in this basic block.
+  /// This method is used by the shrink-wrapping pass to decide if
+  /// \p MBB will be correctly handled by the target.
+  /// As soon as the target enable shrink-wrapping without overriding
+  /// this method, we assume that each basic block is a valid
+  /// prologue.
+  virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const {
+    return true;
+  }
+
+  /// Check whether or not the given \p MBB can be used as a epilogue
+  /// for the target.
+  /// The epilogue will be inserted before the first terminator of that block.
+  /// This method is used by the shrink-wrapping pass to decide if
+  /// \p MBB will be correctly handled by the target.
+  /// As soon as the target enable shrink-wrapping without overriding
+  /// this method, we assume that each basic block is a valid
+  /// epilogue.
+  virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const {
+    return true;
+  }
+
+  /// Check if given function is safe for not having callee saved registers.
+  /// This is used when interprocedural register allocation is enabled.
+  static bool isSafeForNoCSROpt(const Function &F) {
+    if (!F.hasLocalLinkage() || F.hasAddressTaken() ||
+        !F.hasFnAttribute(Attribute::NoRecurse))
+      return false;
+    // Function should not be optimized as tail call.
+    for (const User *U : F.users())
+      if (auto CS = ImmutableCallSite(U))
+        if (CS.isTailCall())
+          return false;
+    return true;
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
new file mode 100644
index 0000000..5c2a530
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
@@ -0,0 +1,1710 @@
+//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target machine instruction set to the code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETINSTRINFO_H
+#define LLVM_TARGET_TARGETINSTRINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/None.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineCombinerPattern.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class DFAPacketizer;
+class InstrItineraryData;
+class LiveIntervals;
+class LiveVariables;
+class MachineMemOperand;
+class MachineRegisterInfo;
+class MCAsmInfo;
+class MCInst;
+struct MCSchedModel;
+class Module;
+class ScheduleDAG;
+class ScheduleHazardRecognizer;
+class SDNode;
+class SelectionDAG;
+class RegScavenger;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class TargetSchedModel;
+class TargetSubtargetInfo;
+
+template <class T> class SmallVectorImpl;
+
+//---------------------------------------------------------------------------
+///
+/// TargetInstrInfo - Interface to description of machine instruction set
+///
+class TargetInstrInfo : public MCInstrInfo {
+public:
+  TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
+                  unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
+      : CallFrameSetupOpcode(CFSetupOpcode),
+        CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
+        ReturnOpcode(ReturnOpcode) {}
+  TargetInstrInfo(const TargetInstrInfo &) = delete;
+  TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
+  virtual ~TargetInstrInfo();
+
+  static bool isGenericOpcode(unsigned Opc) {
+    return Opc <= TargetOpcode::GENERIC_OP_END;
+  }
+
+  /// Given a machine instruction descriptor, returns the register
+  /// class constraint for OpNum, or NULL.
+  const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
+                                         const TargetRegisterInfo *TRI,
+                                         const MachineFunction &MF) const;
+
+  /// Return true if the instruction is trivially rematerializable, meaning it
+  /// has no side effects and requires no operands that aren't always available.
+  /// This means the only allowed uses are constants and unallocatable physical
+  /// registers so that the instructions result is independent of the place
+  /// in the function.
+  bool isTriviallyReMaterializable(const MachineInstr &MI,
+                                   AliasAnalysis *AA = nullptr) const {
+    return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
+           (MI.getDesc().isRematerializable() &&
+            (isReallyTriviallyReMaterializable(MI, AA) ||
+             isReallyTriviallyReMaterializableGeneric(MI, AA)));
+  }
+
+protected:
+  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
+  /// set, this hook lets the target specify whether the instruction is actually
+  /// trivially rematerializable, taking into consideration its operands. This
+  /// predicate must return false if the instruction has any side effects other
+  /// than producing a value, or if it requres any address registers that are
+  /// not always available.
+  /// Requirements must be check as stated in isTriviallyReMaterializable() .
+  virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
+                                                 AliasAnalysis *AA) const {
+    return false;
+  }
+
+  /// This method commutes the operands of the given machine instruction MI.
+  /// The operands to be commuted are specified by their indices OpIdx1 and
+  /// OpIdx2.
+  ///
+  /// If a target has any instructions that are commutable but require
+  /// converting to different instructions or making non-trivial changes
+  /// to commute them, this method can be overloaded to do that.
+  /// The default implementation simply swaps the commutable operands.
+  ///
+  /// If NewMI is false, MI is modified in place and returned; otherwise, a
+  /// new machine instruction is created and returned.
+  ///
+  /// Do not call this method for a non-commutable instruction.
+  /// Even though the instruction is commutable, the method may still
+  /// fail to commute the operands, null pointer is returned in such cases.
+  virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+                                               unsigned OpIdx1,
+                                               unsigned OpIdx2) const;
+
+  /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
+  /// operand indices to (ResultIdx1, ResultIdx2).
+  /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
+  /// predefined to some indices or be undefined (designated by the special
+  /// value 'CommuteAnyOperandIndex').
+  /// The predefined result indices cannot be re-defined.
+  /// The function returns true iff after the result pair redefinition
+  /// the fixed result pair is equal to or equivalent to the source pair of
+  /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
+  /// the pairs (x,y) and (y,x) are equivalent.
+  static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
+                                   unsigned CommutableOpIdx1,
+                                   unsigned CommutableOpIdx2);
+
+private:
+  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
+  /// set and the target hook isReallyTriviallyReMaterializable returns false,
+  /// this function does target-independent tests to determine if the
+  /// instruction is really trivially rematerializable.
+  bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
+                                                AliasAnalysis *AA) const;
+
+public:
+  /// These methods return the opcode of the frame setup/destroy instructions
+  /// if they exist (-1 otherwise).  Some targets use pseudo instructions in
+  /// order to abstract away the difference between operating with a frame
+  /// pointer and operating without, through the use of these two instructions.
+  ///
+  unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
+  unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
+
+  /// Returns true if the argument is a frame pseudo instruction.
+  bool isFrameInstr(const MachineInstr &I) const {
+    return I.getOpcode() == getCallFrameSetupOpcode() ||
+           I.getOpcode() == getCallFrameDestroyOpcode();
+  }
+
+  /// Returns true if the argument is a frame setup pseudo instruction.
+  bool isFrameSetup(const MachineInstr &I) const {
+    return I.getOpcode() == getCallFrameSetupOpcode();
+  }
+
+  /// Returns size of the frame associated with the given frame instruction.
+  /// For frame setup instruction this is frame that is set up space set up
+  /// after the instruction. For frame destroy instruction this is the frame
+  /// freed by the caller.
+  /// Note, in some cases a call frame (or a part of it) may be prepared prior
+  /// to the frame setup instruction. It occurs in the calls that involve
+  /// inalloca arguments. This function reports only the size of the frame part
+  /// that is set up between the frame setup and destroy pseudo instructions.
+  int64_t getFrameSize(const MachineInstr &I) const {
+    assert(isFrameInstr(I) && "Not a frame instruction");
+    assert(I.getOperand(0).getImm() >= 0);
+    return I.getOperand(0).getImm();
+  }
+
+  /// Returns the total frame size, which is made up of the space set up inside
+  /// the pair of frame start-stop instructions and the space that is set up
+  /// prior to the pair.
+  int64_t getFrameTotalSize(const MachineInstr &I) const {
+    if (isFrameSetup(I)) {
+      assert(I.getOperand(1).getImm() >= 0 &&
+             "Frame size must not be negative");
+      return getFrameSize(I) + I.getOperand(1).getImm();
+    }
+    return getFrameSize(I);
+  }
+
+  unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
+  unsigned getReturnOpcode() const { return ReturnOpcode; }
+
+  /// Returns the actual stack pointer adjustment made by an instruction
+  /// as part of a call sequence. By default, only call frame setup/destroy
+  /// instructions adjust the stack, but targets may want to override this
+  /// to enable more fine-grained adjustment, or adjust by a different value.
+  virtual int getSPAdjust(const MachineInstr &MI) const;
+
+  /// Return true if the instruction is a "coalescable" extension instruction.
+  /// That is, it's like a copy where it's legal for the source to overlap the
+  /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
+  /// expected the pre-extension value is available as a subreg of the result
+  /// register. This also returns the sub-register index in SubIdx.
+  virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
+                                     unsigned &DstReg, unsigned &SubIdx) const {
+    return false;
+  }
+
+  /// If the specified machine instruction is a direct
+  /// load from a stack slot, return the virtual or physical register number of
+  /// the destination along with the FrameIndex of the loaded stack slot.  If
+  /// not, return 0.  This predicate must return 0 if the instruction has
+  /// any side effects other than loading from the stack slot.
+  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
+                                       int &FrameIndex) const {
+    return 0;
+  }
+
+  /// Check for post-frame ptr elimination stack locations as well.
+  /// This uses a heuristic so it isn't reliable for correctness.
+  virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
+                                             int &FrameIndex) const {
+    return 0;
+  }
+
+  /// If the specified machine instruction has a load from a stack slot,
+  /// return true along with the FrameIndex of the loaded stack slot and the
+  /// machine mem operand containing the reference.
+  /// If not, return false.  Unlike isLoadFromStackSlot, this returns true for
+  /// any instructions that loads from the stack.  This is just a hint, as some
+  /// cases may be missed.
+  virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
+                                    const MachineMemOperand *&MMO,
+                                    int &FrameIndex) const;
+
+  /// If the specified machine instruction is a direct
+  /// store to a stack slot, return the virtual or physical register number of
+  /// the source reg along with the FrameIndex of the loaded stack slot.  If
+  /// not, return 0.  This predicate must return 0 if the instruction has
+  /// any side effects other than storing to the stack slot.
+  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
+                                      int &FrameIndex) const {
+    return 0;
+  }
+
+  /// Check for post-frame ptr elimination stack locations as well.
+  /// This uses a heuristic, so it isn't reliable for correctness.
+  virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
+                                            int &FrameIndex) const {
+    return 0;
+  }
+
+  /// If the specified machine instruction has a store to a stack slot,
+  /// return true along with the FrameIndex of the loaded stack slot and the
+  /// machine mem operand containing the reference.
+  /// If not, return false.  Unlike isStoreToStackSlot,
+  /// this returns true for any instructions that stores to the
+  /// stack.  This is just a hint, as some cases may be missed.
+  virtual bool hasStoreToStackSlot(const MachineInstr &MI,
+                                   const MachineMemOperand *&MMO,
+                                   int &FrameIndex) const;
+
+  /// Return true if the specified machine instruction
+  /// is a copy of one stack slot to another and has no other effect.
+  /// Provide the identity of the two frame indices.
+  virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
+                               int &SrcFrameIndex) const {
+    return false;
+  }
+
+  /// Compute the size in bytes and offset within a stack slot of a spilled
+  /// register or subregister.
+  ///
+  /// \param [out] Size in bytes of the spilled value.
+  /// \param [out] Offset in bytes within the stack slot.
+  /// \returns true if both Size and Offset are successfully computed.
+  ///
+  /// Not all subregisters have computable spill slots. For example,
+  /// subregisters registers may not be byte-sized, and a pair of discontiguous
+  /// subregisters has no single offset.
+  ///
+  /// Targets with nontrivial bigendian implementations may need to override
+  /// this, particularly to support spilled vector registers.
+  virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
+                                 unsigned &Size, unsigned &Offset,
+                                 const MachineFunction &MF) const;
+
+  /// Returns the size in bytes of the specified MachineInstr, or ~0U
+  /// when this function is not implemented by a target.
+  virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
+    return ~0U;
+  }
+
+  /// Return true if the instruction is as cheap as a move instruction.
+  ///
+  /// Targets for different archs need to override this, and different
+  /// micro-architectures can also be finely tuned inside.
+  virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
+    return MI.isAsCheapAsAMove();
+  }
+
+  /// Return true if the instruction should be sunk by MachineSink.
+  ///
+  /// MachineSink determines on its own whether the instruction is safe to sink;
+  /// this gives the target a hook to override the default behavior with regards
+  /// to which instructions should be sunk.
+  virtual bool shouldSink(const MachineInstr &MI) const { return true; }
+
+  /// Re-issue the specified 'original' instruction at the
+  /// specific location targeting a new destination register.
+  /// The register in Orig->getOperand(0).getReg() will be substituted by
+  /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
+  /// SubIdx.
+  virtual void reMaterialize(MachineBasicBlock &MBB,
+                             MachineBasicBlock::iterator MI, unsigned DestReg,
+                             unsigned SubIdx, const MachineInstr &Orig,
+                             const TargetRegisterInfo &TRI) const;
+
+  /// \brief Clones instruction or the whole instruction bundle \p Orig and
+  /// insert into \p MBB before \p InsertBefore. The target may update operands
+  /// that are required to be unique.
+  ///
+  /// \p Orig must not return true for MachineInstr::isNotDuplicable().
+  virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
+                                  MachineBasicBlock::iterator InsertBefore,
+                                  const MachineInstr &Orig) const;
+
+  /// This method must be implemented by targets that
+  /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
+  /// may be able to convert a two-address instruction into one or more true
+  /// three-address instructions on demand.  This allows the X86 target (for
+  /// example) to convert ADD and SHL instructions into LEA instructions if they
+  /// would require register copies due to two-addressness.
+  ///
+  /// This method returns a null pointer if the transformation cannot be
+  /// performed, otherwise it returns the last new instruction.
+  ///
+  virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+                                              MachineInstr &MI,
+                                              LiveVariables *LV) const {
+    return nullptr;
+  }
+
+  // This constant can be used as an input value of operand index passed to
+  // the method findCommutedOpIndices() to tell the method that the
+  // corresponding operand index is not pre-defined and that the method
+  // can pick any commutable operand.
+  static const unsigned CommuteAnyOperandIndex = ~0U;
+
+  /// This method commutes the operands of the given machine instruction MI.
+  ///
+  /// The operands to be commuted are specified by their indices OpIdx1 and
+  /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
+  /// 'CommuteAnyOperandIndex', which means that the method is free to choose
+  /// any arbitrarily chosen commutable operand. If both arguments are set to
+  /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
+  /// operands; then commutes them if such operands could be found.
+  ///
+  /// If NewMI is false, MI is modified in place and returned; otherwise, a
+  /// new machine instruction is created and returned.
+  ///
+  /// Do not call this method for a non-commutable instruction or
+  /// for non-commuable operands.
+  /// Even though the instruction is commutable, the method may still
+  /// fail to commute the operands, null pointer is returned in such cases.
+  MachineInstr *
+  commuteInstruction(MachineInstr &MI, bool NewMI = false,
+                     unsigned OpIdx1 = CommuteAnyOperandIndex,
+                     unsigned OpIdx2 = CommuteAnyOperandIndex) const;
+
+  /// Returns true iff the routine could find two commutable operands in the
+  /// given machine instruction.
+  /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
+  /// If any of the INPUT values is set to the special value
+  /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
+  /// operand, then returns its index in the corresponding argument.
+  /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
+  /// looks for 2 commutable operands.
+  /// If INPUT values refer to some operands of MI, then the method simply
+  /// returns true if the corresponding operands are commutable and returns
+  /// false otherwise.
+  ///
+  /// For example, calling this method this way:
+  ///     unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
+  ///     findCommutedOpIndices(MI, Op1, Op2);
+  /// can be interpreted as a query asking to find an operand that would be
+  /// commutable with the operand#1.
+  virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
+                                     unsigned &SrcOpIdx2) const;
+
+  /// A pair composed of a register and a sub-register index.
+  /// Used to give some type checking when modeling Reg:SubReg.
+  struct RegSubRegPair {
+    unsigned Reg;
+    unsigned SubReg;
+
+    RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
+        : Reg(Reg), SubReg(SubReg) {}
+  };
+
+  /// A pair composed of a pair of a register and a sub-register index,
+  /// and another sub-register index.
+  /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
+  struct RegSubRegPairAndIdx : RegSubRegPair {
+    unsigned SubIdx;
+
+    RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
+                        unsigned SubIdx = 0)
+        : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
+  };
+
+  /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
+  /// and \p DefIdx.
+  /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
+  /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
+  /// flag are not added to this list.
+  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
+  /// two elements:
+  /// - %1:sub1, sub0
+  /// - %2<:0>, sub1
+  ///
+  /// \returns true if it is possible to build such an input sequence
+  /// with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
+  ///
+  /// \note The generic implementation does not provide any support for
+  /// MI.isRegSequenceLike(). In other words, one has to override
+  /// getRegSequenceLikeInputs for target specific instructions.
+  bool
+  getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
+                       SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
+
+  /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
+  /// and \p DefIdx.
+  /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
+  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+  /// - %1:sub1, sub0
+  ///
+  /// \returns true if it is possible to build such an input sequence
+  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
+  /// False otherwise.
+  ///
+  /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
+  ///
+  /// \note The generic implementation does not provide any support for
+  /// MI.isExtractSubregLike(). In other words, one has to override
+  /// getExtractSubregLikeInputs for target specific instructions.
+  bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
+                              RegSubRegPairAndIdx &InputReg) const;
+
+  /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
+  /// and \p DefIdx.
+  /// \p [out] BaseReg and \p [out] InsertedReg contain
+  /// the equivalent inputs of INSERT_SUBREG.
+  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+  /// - BaseReg: %0:sub0
+  /// - InsertedReg: %1:sub1, sub3
+  ///
+  /// \returns true if it is possible to build such an input sequence
+  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
+  /// False otherwise.
+  ///
+  /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
+  ///
+  /// \note The generic implementation does not provide any support for
+  /// MI.isInsertSubregLike(). In other words, one has to override
+  /// getInsertSubregLikeInputs for target specific instructions.
+  bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
+                             RegSubRegPair &BaseReg,
+                             RegSubRegPairAndIdx &InsertedReg) const;
+
+  /// Return true if two machine instructions would produce identical values.
+  /// By default, this is only true when the two instructions
+  /// are deemed identical except for defs. If this function is called when the
+  /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
+  /// aggressive checks.
+  virtual bool produceSameValue(const MachineInstr &MI0,
+                                const MachineInstr &MI1,
+                                const MachineRegisterInfo *MRI = nullptr) const;
+
+  /// \returns true if a branch from an instruction with opcode \p BranchOpc
+  ///  bytes is capable of jumping to a position \p BrOffset bytes away.
+  virtual bool isBranchOffsetInRange(unsigned BranchOpc,
+                                     int64_t BrOffset) const {
+    llvm_unreachable("target did not implement");
+  }
+
+  /// \returns The block that branch instruction \p MI jumps to.
+  virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
+    llvm_unreachable("target did not implement");
+  }
+
+  /// Insert an unconditional indirect branch at the end of \p MBB to \p
+  /// NewDestBB.  \p BrOffset indicates the offset of \p NewDestBB relative to
+  /// the offset of the position to insert the new branch.
+  ///
+  /// \returns The number of bytes added to the block.
+  virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB,
+                                        MachineBasicBlock &NewDestBB,
+                                        const DebugLoc &DL,
+                                        int64_t BrOffset = 0,
+                                        RegScavenger *RS = nullptr) const {
+    llvm_unreachable("target did not implement");
+  }
+
+  /// Analyze the branching code at the end of MBB, returning
+  /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
+  /// implemented for a target).  Upon success, this returns false and returns
+  /// with the following information in various cases:
+  ///
+  /// 1. If this block ends with no branches (it just falls through to its succ)
+  ///    just return false, leaving TBB/FBB null.
+  /// 2. If this block ends with only an unconditional branch, it sets TBB to be
+  ///    the destination block.
+  /// 3. If this block ends with a conditional branch and it falls through to a
+  ///    successor block, it sets TBB to be the branch destination block and a
+  ///    list of operands that evaluate the condition. These operands can be
+  ///    passed to other TargetInstrInfo methods to create new branches.
+  /// 4. If this block ends with a conditional branch followed by an
+  ///    unconditional branch, it returns the 'true' destination in TBB, the
+  ///    'false' destination in FBB, and a list of operands that evaluate the
+  ///    condition.  These operands can be passed to other TargetInstrInfo
+  ///    methods to create new branches.
+  ///
+  /// Note that removeBranch and insertBranch must be implemented to support
+  /// cases where this method returns success.
+  ///
+  /// If AllowModify is true, then this routine is allowed to modify the basic
+  /// block (e.g. delete instructions after the unconditional branch).
+  ///
+  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
+  /// before calling this function.
+  virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+                             MachineBasicBlock *&FBB,
+                             SmallVectorImpl<MachineOperand> &Cond,
+                             bool AllowModify = false) const {
+    return true;
+  }
+
+  /// Represents a predicate at the MachineFunction level.  The control flow a
+  /// MachineBranchPredicate represents is:
+  ///
+  ///  Reg = LHS `Predicate` RHS         == ConditionDef
+  ///  if Reg then goto TrueDest else goto FalseDest
+  ///
+  struct MachineBranchPredicate {
+    enum ComparePredicate {
+      PRED_EQ,     // True if two values are equal
+      PRED_NE,     // True if two values are not equal
+      PRED_INVALID // Sentinel value
+    };
+
+    ComparePredicate Predicate = PRED_INVALID;
+    MachineOperand LHS = MachineOperand::CreateImm(0);
+    MachineOperand RHS = MachineOperand::CreateImm(0);
+    MachineBasicBlock *TrueDest = nullptr;
+    MachineBasicBlock *FalseDest = nullptr;
+    MachineInstr *ConditionDef = nullptr;
+
+    /// SingleUseCondition is true if ConditionDef is dead except for the
+    /// branch(es) at the end of the basic block.
+    ///
+    bool SingleUseCondition = false;
+
+    explicit MachineBranchPredicate() = default;
+  };
+
+  /// Analyze the branching code at the end of MBB and parse it into the
+  /// MachineBranchPredicate structure if possible.  Returns false on success
+  /// and true on failure.
+  ///
+  /// If AllowModify is true, then this routine is allowed to modify the basic
+  /// block (e.g. delete instructions after the unconditional branch).
+  ///
+  virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
+                                      MachineBranchPredicate &MBP,
+                                      bool AllowModify = false) const {
+    return true;
+  }
+
+  /// Remove the branching code at the end of the specific MBB.
+  /// This is only invoked in cases where AnalyzeBranch returns success. It
+  /// returns the number of instructions that were removed.
+  /// If \p BytesRemoved is non-null, report the change in code size from the
+  /// removed instructions.
+  virtual unsigned removeBranch(MachineBasicBlock &MBB,
+                                int *BytesRemoved = nullptr) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
+  }
+
+  /// Insert branch code into the end of the specified MachineBasicBlock. The
+  /// operands to this method are the same as those returned by AnalyzeBranch.
+  /// This is only invoked in cases where AnalyzeBranch returns success. It
+  /// returns the number of instructions inserted. If \p BytesAdded is non-null,
+  /// report the change in code size from the added instructions.
+  ///
+  /// It is also invoked by tail merging to add unconditional branches in
+  /// cases where AnalyzeBranch doesn't apply because there was no original
+  /// branch to analyze.  At least this much must be implemented, else tail
+  /// merging needs to be disabled.
+  ///
+  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
+  /// before calling this function.
+  virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+                                MachineBasicBlock *FBB,
+                                ArrayRef<MachineOperand> Cond,
+                                const DebugLoc &DL,
+                                int *BytesAdded = nullptr) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
+  }
+
+  unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
+                                     MachineBasicBlock *DestBB,
+                                     const DebugLoc &DL,
+                                     int *BytesAdded = nullptr) const {
+    return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
+                        BytesAdded);
+  }
+
+  /// Analyze the loop code, return true if it cannot be understoo. Upon
+  /// success, this function returns false and returns information about the
+  /// induction variable and compare instruction used at the end.
+  virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
+                           MachineInstr *&CmpInst) const {
+    return true;
+  }
+
+  /// Generate code to reduce the loop iteration by one and check if the loop
+  /// is finished.  Return the value/register of the new loop count.  We need
+  /// this function when peeling off one or more iterations of a loop. This
+  /// function assumes the nth iteration is peeled first.
+  virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar,
+                                   MachineInstr &Cmp,
+                                   SmallVectorImpl<MachineOperand> &Cond,
+                                   SmallVectorImpl<MachineInstr *> &PrevInsts,
+                                   unsigned Iter, unsigned MaxIter) const {
+    llvm_unreachable("Target didn't implement ReduceLoopCount");
+  }
+
+  /// Delete the instruction OldInst and everything after it, replacing it with
+  /// an unconditional branch to NewDest. This is used by the tail merging pass.
+  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+                                       MachineBasicBlock *NewDest) const;
+
+  /// Return true if it's legal to split the given basic
+  /// block at the specified instruction (i.e. instruction would be the start
+  /// of a new basic block).
+  virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+                                   MachineBasicBlock::iterator MBBI) const {
+    return true;
+  }
+
+  /// Return true if it's profitable to predicate
+  /// instructions with accumulated instruction latency of "NumCycles"
+  /// of the specified basic block, where the probability of the instructions
+  /// being executed is given by Probability, and Confidence is a measure
+  /// of our confidence that it will be properly predicted.
+  virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+                                   unsigned ExtraPredCycles,
+                                   BranchProbability Probability) const {
+    return false;
+  }
+
+  /// Second variant of isProfitableToIfCvt. This one
+  /// checks for the case where two basic blocks from true and false path
+  /// of a if-then-else (diamond) are predicated on mutally exclusive
+  /// predicates, where the probability of the true path being taken is given
+  /// by Probability, and Confidence is a measure of our confidence that it
+  /// will be properly predicted.
+  virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
+                                   unsigned ExtraTCycles,
+                                   MachineBasicBlock &FMBB, unsigned NumFCycles,
+                                   unsigned ExtraFCycles,
+                                   BranchProbability Probability) const {
+    return false;
+  }
+
+  /// Return true if it's profitable for if-converter to duplicate instructions
+  /// of specified accumulated instruction latencies in the specified MBB to
+  /// enable if-conversion.
+  /// The probability of the instructions being executed is given by
+  /// Probability, and Confidence is a measure of our confidence that it
+  /// will be properly predicted.
+  virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
+                                         unsigned NumCycles,
+                                         BranchProbability Probability) const {
+    return false;
+  }
+
+  /// Return true if it's profitable to unpredicate
+  /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
+  /// exclusive predicates.
+  /// e.g.
+  ///   subeq  r0, r1, #1
+  ///   addne  r0, r1, #1
+  /// =>
+  ///   sub    r0, r1, #1
+  ///   addne  r0, r1, #1
+  ///
+  /// This may be profitable is conditional instructions are always executed.
+  virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+                                         MachineBasicBlock &FMBB) const {
+    return false;
+  }
+
+  /// Return true if it is possible to insert a select
+  /// instruction that chooses between TrueReg and FalseReg based on the
+  /// condition code in Cond.
+  ///
+  /// When successful, also return the latency in cycles from TrueReg,
+  /// FalseReg, and Cond to the destination register. In most cases, a select
+  /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
+  ///
+  /// Some x86 implementations have 2-cycle cmov instructions.
+  ///
+  /// @param MBB         Block where select instruction would be inserted.
+  /// @param Cond        Condition returned by AnalyzeBranch.
+  /// @param TrueReg     Virtual register to select when Cond is true.
+  /// @param FalseReg    Virtual register to select when Cond is false.
+  /// @param CondCycles  Latency from Cond+Branch to select output.
+  /// @param TrueCycles  Latency from TrueReg to select output.
+  /// @param FalseCycles Latency from FalseReg to select output.
+  virtual bool canInsertSelect(const MachineBasicBlock &MBB,
+                               ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+                               unsigned FalseReg, int &CondCycles,
+                               int &TrueCycles, int &FalseCycles) const {
+    return false;
+  }
+
+  /// Insert a select instruction into MBB before I that will copy TrueReg to
+  /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
+  ///
+  /// This function can only be called after canInsertSelect() returned true.
+  /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
+  /// that the same flags or registers required by Cond are available at the
+  /// insertion point.
+  ///
+  /// @param MBB      Block where select instruction should be inserted.
+  /// @param I        Insertion point.
+  /// @param DL       Source location for debugging.
+  /// @param DstReg   Virtual register to be defined by select instruction.
+  /// @param Cond     Condition as computed by AnalyzeBranch.
+  /// @param TrueReg  Virtual register to copy when Cond is true.
+  /// @param FalseReg Virtual register to copy when Cons is false.
+  virtual void insertSelect(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator I, const DebugLoc &DL,
+                            unsigned DstReg, ArrayRef<MachineOperand> Cond,
+                            unsigned TrueReg, unsigned FalseReg) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
+  }
+
+  /// Analyze the given select instruction, returning true if
+  /// it cannot be understood. It is assumed that MI->isSelect() is true.
+  ///
+  /// When successful, return the controlling condition and the operands that
+  /// determine the true and false result values.
+  ///
+  ///   Result = SELECT Cond, TrueOp, FalseOp
+  ///
+  /// Some targets can optimize select instructions, for example by predicating
+  /// the instruction defining one of the operands. Such targets should set
+  /// Optimizable.
+  ///
+  /// @param         MI Select instruction to analyze.
+  /// @param Cond    Condition controlling the select.
+  /// @param TrueOp  Operand number of the value selected when Cond is true.
+  /// @param FalseOp Operand number of the value selected when Cond is false.
+  /// @param Optimizable Returned as true if MI is optimizable.
+  /// @returns False on success.
+  virtual bool analyzeSelect(const MachineInstr &MI,
+                             SmallVectorImpl<MachineOperand> &Cond,
+                             unsigned &TrueOp, unsigned &FalseOp,
+                             bool &Optimizable) const {
+    assert(MI.getDesc().isSelect() && "MI must be a select instruction");
+    return true;
+  }
+
+  /// Given a select instruction that was understood by
+  /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
+  /// merging it with one of its operands. Returns NULL on failure.
+  ///
+  /// When successful, returns the new select instruction. The client is
+  /// responsible for deleting MI.
+  ///
+  /// If both sides of the select can be optimized, PreferFalse is used to pick
+  /// a side.
+  ///
+  /// @param MI          Optimizable select instruction.
+  /// @param NewMIs     Set that record all MIs in the basic block up to \p
+  /// MI. Has to be updated with any newly created MI or deleted ones.
+  /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
+  /// @returns Optimized instruction or NULL.
+  virtual MachineInstr *optimizeSelect(MachineInstr &MI,
+                                       SmallPtrSetImpl<MachineInstr *> &NewMIs,
+                                       bool PreferFalse = false) const {
+    // This function must be implemented if Optimizable is ever set.
+    llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
+  }
+
+  /// Emit instructions to copy a pair of physical registers.
+  ///
+  /// This function should support copies within any legal register class as
+  /// well as any cross-class copies created during instruction selection.
+  ///
+  /// The source and destination registers may overlap, which may require a
+  /// careful implementation when multiple copy instructions are required for
+  /// large registers. See for example the ARM target.
+  virtual void copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, const DebugLoc &DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
+  }
+
+  /// Store the specified register of the given register class to the specified
+  /// stack frame index. The store instruction is to be added to the given
+  /// machine basic block before the specified machine instruction. If isKill
+  /// is true, the register operand is the last use and must be marked kill.
+  virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+                                   MachineBasicBlock::iterator MI,
+                                   unsigned SrcReg, bool isKill, int FrameIndex,
+                                   const TargetRegisterClass *RC,
+                                   const TargetRegisterInfo *TRI) const {
+    llvm_unreachable("Target didn't implement "
+                     "TargetInstrInfo::storeRegToStackSlot!");
+  }
+
+  /// Load the specified register of the given register class from the specified
+  /// stack frame index. The load instruction is to be added to the given
+  /// machine basic block before the specified machine instruction.
+  virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+                                    MachineBasicBlock::iterator MI,
+                                    unsigned DestReg, int FrameIndex,
+                                    const TargetRegisterClass *RC,
+                                    const TargetRegisterInfo *TRI) const {
+    llvm_unreachable("Target didn't implement "
+                     "TargetInstrInfo::loadRegFromStackSlot!");
+  }
+
+  /// This function is called for all pseudo instructions
+  /// that remain after register allocation. Many pseudo instructions are
+  /// created to help register allocation. This is the place to convert them
+  /// into real instructions. The target can edit MI in place, or it can insert
+  /// new instructions and erase MI. The function should return true if
+  /// anything was changed.
+  virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
+
+  /// Check whether the target can fold a load that feeds a subreg operand
+  /// (or a subreg operand that feeds a store).
+  /// For example, X86 may want to return true if it can fold
+  /// movl (%esp), %eax
+  /// subb, %al, ...
+  /// Into:
+  /// subb (%esp), ...
+  ///
+  /// Ideally, we'd like the target implementation of foldMemoryOperand() to
+  /// reject subregs - but since this behavior used to be enforced in the
+  /// target-independent code, moving this responsibility to the targets
+  /// has the potential of causing nasty silent breakage in out-of-tree targets.
+  virtual bool isSubregFoldable() const { return false; }
+
+  /// Attempt to fold a load or store of the specified stack
+  /// slot into the specified machine instruction for the specified operand(s).
+  /// If this is possible, a new instruction is returned with the specified
+  /// operand folded, otherwise NULL is returned.
+  /// The new instruction is inserted before MI, and the client is responsible
+  /// for removing the old instruction.
+  MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
+                                  int FrameIndex,
+                                  LiveIntervals *LIS = nullptr) const;
+
+  /// Same as the previous version except it allows folding of any load and
+  /// store from / to any address, not just from a specific stack slot.
+  MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
+                                  MachineInstr &LoadMI,
+                                  LiveIntervals *LIS = nullptr) const;
+
+  /// Return true when there is potentially a faster code sequence
+  /// for an instruction chain ending in \p Root. All potential patterns are
+  /// returned in the \p Pattern vector. Pattern should be sorted in priority
+  /// order since the pattern evaluator stops checking as soon as it finds a
+  /// faster sequence.
+  /// \param Root - Instruction that could be combined with one of its operands
+  /// \param Patterns - Vector of possible combination patterns
+  virtual bool getMachineCombinerPatterns(
+      MachineInstr &Root,
+      SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
+
+  /// Return true when a code sequence can improve throughput. It
+  /// should be called only for instructions in loops.
+  /// \param Pattern - combiner pattern
+  virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
+
+  /// Return true if the input \P Inst is part of a chain of dependent ops
+  /// that are suitable for reassociation, otherwise return false.
+  /// If the instruction's operands must be commuted to have a previous
+  /// instruction of the same type define the first source operand, \P Commuted
+  /// will be set to true.
+  bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
+
+  /// Return true when \P Inst is both associative and commutative.
+  virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
+    return false;
+  }
+
+  /// Return true when \P Inst has reassociable operands in the same \P MBB.
+  virtual bool hasReassociableOperands(const MachineInstr &Inst,
+                                       const MachineBasicBlock *MBB) const;
+
+  /// Return true when \P Inst has reassociable sibling.
+  bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
+
+  /// When getMachineCombinerPatterns() finds patterns, this function generates
+  /// the instructions that could replace the original code sequence. The client
+  /// has to decide whether the actual replacement is beneficial or not.
+  /// \param Root - Instruction that could be combined with one of its operands
+  /// \param Pattern - Combination pattern for Root
+  /// \param InsInstrs - Vector of new instructions that implement P
+  /// \param DelInstrs - Old instructions, including Root, that could be
+  /// replaced by InsInstr
+  /// \param InstrIdxForVirtReg - map of virtual register to instruction in
+  /// InsInstr that defines it
+  virtual void genAlternativeCodeSequence(
+      MachineInstr &Root, MachineCombinerPattern Pattern,
+      SmallVectorImpl<MachineInstr *> &InsInstrs,
+      SmallVectorImpl<MachineInstr *> &DelInstrs,
+      DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+
+  /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
+  /// reduce critical path length.
+  void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
+                      MachineCombinerPattern Pattern,
+                      SmallVectorImpl<MachineInstr *> &InsInstrs,
+                      SmallVectorImpl<MachineInstr *> &DelInstrs,
+                      DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+
+  /// This is an architecture-specific helper function of reassociateOps.
+  /// Set special operand attributes for new instructions after reassociation.
+  virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
+                                     MachineInstr &NewMI1,
+                                     MachineInstr &NewMI2) const {}
+
+  /// Return true when a target supports MachineCombiner.
+  virtual bool useMachineCombiner() const { return false; }
+
+  /// Return true if the given SDNode can be copied during scheduling
+  /// even if it has glue.
+  virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
+
+  /// Remember what registers the specified instruction uses and modifies.
+  virtual void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
+                                BitVector &UsedRegs,
+                                const TargetRegisterInfo *TRI) const;
+
+protected:
+  /// Target-dependent implementation for foldMemoryOperand.
+  /// Target-independent code in foldMemoryOperand will
+  /// take care of adding a MachineMemOperand to the newly created instruction.
+  /// The instruction and any auxiliary instructions necessary will be inserted
+  /// at InsertPt.
+  virtual MachineInstr *
+  foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+                        ArrayRef<unsigned> Ops,
+                        MachineBasicBlock::iterator InsertPt, int FrameIndex,
+                        LiveIntervals *LIS = nullptr) const {
+    return nullptr;
+  }
+
+  /// Target-dependent implementation for foldMemoryOperand.
+  /// Target-independent code in foldMemoryOperand will
+  /// take care of adding a MachineMemOperand to the newly created instruction.
+  /// The instruction and any auxiliary instructions necessary will be inserted
+  /// at InsertPt.
+  virtual MachineInstr *foldMemoryOperandImpl(
+      MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+      MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+      LiveIntervals *LIS = nullptr) const {
+    return nullptr;
+  }
+
+  /// \brief Target-dependent implementation of getRegSequenceInputs.
+  ///
+  /// \returns true if it is possible to build the equivalent
+  /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isRegSequenceLike().
+  ///
+  /// \see TargetInstrInfo::getRegSequenceInputs.
+  virtual bool getRegSequenceLikeInputs(
+      const MachineInstr &MI, unsigned DefIdx,
+      SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
+    return false;
+  }
+
+  /// \brief Target-dependent implementation of getExtractSubregInputs.
+  ///
+  /// \returns true if it is possible to build the equivalent
+  /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isExtractSubregLike().
+  ///
+  /// \see TargetInstrInfo::getExtractSubregInputs.
+  virtual bool getExtractSubregLikeInputs(const MachineInstr &MI,
+                                          unsigned DefIdx,
+                                          RegSubRegPairAndIdx &InputReg) const {
+    return false;
+  }
+
+  /// \brief Target-dependent implementation of getInsertSubregInputs.
+  ///
+  /// \returns true if it is possible to build the equivalent
+  /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isInsertSubregLike().
+  ///
+  /// \see TargetInstrInfo::getInsertSubregInputs.
+  virtual bool
+  getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
+                            RegSubRegPair &BaseReg,
+                            RegSubRegPairAndIdx &InsertedReg) const {
+    return false;
+  }
+
+public:
+  /// getAddressSpaceForPseudoSourceKind - Given the kind of memory
+  /// (e.g. stack) the target returns the corresponding address space.
+  virtual unsigned
+  getAddressSpaceForPseudoSourceKind(PseudoSourceValue::PSVKind Kind) const {
+    return 0;
+  }
+
+  /// unfoldMemoryOperand - Separate a single instruction which folded a load or
+  /// a store or a load and a store into two or more instruction. If this is
+  /// possible, returns true as well as the new instructions by reference.
+  virtual bool
+  unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
+                      bool UnfoldLoad, bool UnfoldStore,
+                      SmallVectorImpl<MachineInstr *> &NewMIs) const {
+    return false;
+  }
+
+  virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+                                   SmallVectorImpl<SDNode *> &NewNodes) const {
+    return false;
+  }
+
+  /// Returns the opcode of the would be new
+  /// instruction after load / store are unfolded from an instruction of the
+  /// specified opcode. It returns zero if the specified unfolding is not
+  /// possible. If LoadRegIndex is non-null, it is filled in with the operand
+  /// index of the operand which will hold the register holding the loaded
+  /// value.
+  virtual unsigned
+  getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
+                             unsigned *LoadRegIndex = nullptr) const {
+    return 0;
+  }
+
+  /// This is used by the pre-regalloc scheduler to determine if two loads are
+  /// loading from the same base address. It should only return true if the base
+  /// pointers are the same and the only differences between the two addresses
+  /// are the offset. It also returns the offsets by reference.
+  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+                                       int64_t &Offset1,
+                                       int64_t &Offset2) const {
+    return false;
+  }
+
+  /// This is a used by the pre-regalloc scheduler to determine (in conjunction
+  /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
+  /// On some targets if two loads are loading from
+  /// addresses in the same cache line, it's better if they are scheduled
+  /// together. This function takes two integers that represent the load offsets
+  /// from the common base address. It returns true if it decides it's desirable
+  /// to schedule the two loads together. "NumLoads" is the number of loads that
+  /// have already been scheduled after Load1.
+  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+                                       int64_t Offset1, int64_t Offset2,
+                                       unsigned NumLoads) const {
+    return false;
+  }
+
+  /// Get the base register and byte offset of an instruction that reads/writes
+  /// memory.
+  virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
+                                     int64_t &Offset,
+                                     const TargetRegisterInfo *TRI) const {
+    return false;
+  }
+
+  /// Return true if the instruction contains a base register and offset. If
+  /// true, the function also sets the operand position in the instruction
+  /// for the base register and offset.
+  virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
+                                        unsigned &BasePos,
+                                        unsigned &OffsetPos) const {
+    return false;
+  }
+
+  /// If the instruction is an increment of a constant value, return the amount.
+  virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
+    return false;
+  }
+
+  /// Returns true if the two given memory operations should be scheduled
+  /// adjacent. Note that you have to add:
+  ///   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+  /// or
+  ///   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
+  /// to TargetPassConfig::createMachineScheduler() to have an effect.
+  virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1,
+                                   MachineInstr &SecondLdSt, unsigned BaseReg2,
+                                   unsigned NumLoads) const {
+    llvm_unreachable("target did not implement shouldClusterMemOps()");
+  }
+
+  /// Reverses the branch condition of the specified condition list,
+  /// returning false on success and true if it cannot be reversed.
+  virtual bool
+  reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+    return true;
+  }
+
+  /// Insert a noop into the instruction stream at the specified point.
+  virtual void insertNoop(MachineBasicBlock &MBB,
+                          MachineBasicBlock::iterator MI) const;
+
+  /// Return the noop instruction to use for a noop.
+  virtual void getNoop(MCInst &NopInst) const;
+
+  /// Return true for post-incremented instructions.
+  virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
+
+  /// Returns true if the instruction is already predicated.
+  virtual bool isPredicated(const MachineInstr &MI) const { return false; }
+
+  /// Returns true if the instruction is a
+  /// terminator instruction that has not been predicated.
+  virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
+
+  /// Returns true if MI is an unconditional tail call.
+  virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
+    return false;
+  }
+
+  /// Returns true if the tail call can be made conditional on BranchCond.
+  virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
+                                          const MachineInstr &TailCall) const {
+    return false;
+  }
+
+  /// Replace the conditional branch in MBB with a conditional tail call.
+  virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
+                                         SmallVectorImpl<MachineOperand> &Cond,
+                                         const MachineInstr &TailCall) const {
+    llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
+  }
+
+  /// Convert the instruction into a predicated instruction.
+  /// It returns true if the operation was successful.
+  virtual bool PredicateInstruction(MachineInstr &MI,
+                                    ArrayRef<MachineOperand> Pred) const;
+
+  /// Returns true if the first specified predicate
+  /// subsumes the second, e.g. GE subsumes GT.
+  virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+                                 ArrayRef<MachineOperand> Pred2) const {
+    return false;
+  }
+
+  /// If the specified instruction defines any predicate
+  /// or condition code register(s) used for predication, returns true as well
+  /// as the definition predicate(s) by reference.
+  virtual bool DefinesPredicate(MachineInstr &MI,
+                                std::vector<MachineOperand> &Pred) const {
+    return false;
+  }
+
+  /// Return true if the specified instruction can be predicated.
+  /// By default, this returns true for every instruction with a
+  /// PredicateOperand.
+  virtual bool isPredicable(const MachineInstr &MI) const {
+    return MI.getDesc().isPredicable();
+  }
+
+  /// Return true if it's safe to move a machine
+  /// instruction that defines the specified register class.
+  virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+    return true;
+  }
+
+  /// Test if the given instruction should be considered a scheduling boundary.
+  /// This primarily includes labels and terminators.
+  virtual bool isSchedulingBoundary(const MachineInstr &MI,
+                                    const MachineBasicBlock *MBB,
+                                    const MachineFunction &MF) const;
+
+  /// Measure the specified inline asm to determine an approximation of its
+  /// length.
+  virtual unsigned getInlineAsmLength(const char *Str,
+                                      const MCAsmInfo &MAI) const;
+
+  /// Allocate and return a hazard recognizer to use for this target when
+  /// scheduling the machine instructions before register allocation.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+                               const ScheduleDAG *DAG) const;
+
+  /// Allocate and return a hazard recognizer to use for this target when
+  /// scheduling the machine instructions before register allocation.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetMIHazardRecognizer(const InstrItineraryData *,
+                                 const ScheduleDAG *DAG) const;
+
+  /// Allocate and return a hazard recognizer to use for this target when
+  /// scheduling the machine instructions after register allocation.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
+                                     const ScheduleDAG *DAG) const;
+
+  /// Allocate and return a hazard recognizer to use for by non-scheduling
+  /// passes.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
+    return nullptr;
+  }
+
+  /// Provide a global flag for disabling the PreRA hazard recognizer that
+  /// targets may choose to honor.
+  bool usePreRAHazardRecognizer() const;
+
+  /// For a comparison instruction, return the source registers
+  /// in SrcReg and SrcReg2 if having two register operands, and the value it
+  /// compares against in CmpValue. Return true if the comparison instruction
+  /// can be analyzed.
+  virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+                              unsigned &SrcReg2, int &Mask, int &Value) const {
+    return false;
+  }
+
+  /// See if the comparison instruction can be converted
+  /// into something more efficient. E.g., on ARM most instructions can set the
+  /// flags register, obviating the need for a separate CMP.
+  virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+                                    unsigned SrcReg2, int Mask, int Value,
+                                    const MachineRegisterInfo *MRI) const {
+    return false;
+  }
+  virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
+
+  /// Try to remove the load by folding it to a register operand at the use.
+  /// We fold the load instructions if and only if the
+  /// def and use are in the same BB. We only look at one load and see
+  /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+  /// defined by the load we are trying to fold. DefMI returns the machine
+  /// instruction that defines FoldAsLoadDefReg, and the function returns
+  /// the machine instruction generated due to folding.
+  virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
+                                          const MachineRegisterInfo *MRI,
+                                          unsigned &FoldAsLoadDefReg,
+                                          MachineInstr *&DefMI) const {
+    return nullptr;
+  }
+
+  /// 'Reg' is known to be defined by a move immediate instruction,
+  /// try to fold the immediate into the use instruction.
+  /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
+  /// then the caller may assume that DefMI has been erased from its parent
+  /// block. The caller may assume that it will not be erased by this
+  /// function otherwise.
+  virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
+                             unsigned Reg, MachineRegisterInfo *MRI) const {
+    return false;
+  }
+
+  /// Return the number of u-operations the given machine
+  /// instruction will be decoded to on the target cpu. The itinerary's
+  /// IssueWidth is the number of microops that can be dispatched each
+  /// cycle. An instruction with zero microops takes no dispatch resources.
+  virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+                                  const MachineInstr &MI) const;
+
+  /// Return true for pseudo instructions that don't consume any
+  /// machine resources in their current form. These are common cases that the
+  /// scheduler should consider free, rather than conservatively handling them
+  /// as instructions with no itinerary.
+  bool isZeroCost(unsigned Opcode) const {
+    return Opcode <= TargetOpcode::COPY;
+  }
+
+  virtual int getOperandLatency(const InstrItineraryData *ItinData,
+                                SDNode *DefNode, unsigned DefIdx,
+                                SDNode *UseNode, unsigned UseIdx) const;
+
+  /// Compute and return the use operand latency of a given pair of def and use.
+  /// In most cases, the static scheduling itinerary was enough to determine the
+  /// operand latency. But it may not be possible for instructions with variable
+  /// number of defs / uses.
+  ///
+  /// This is a raw interface to the itinerary that may be directly overridden
+  /// by a target. Use computeOperandLatency to get the best estimate of
+  /// latency.
+  virtual int getOperandLatency(const InstrItineraryData *ItinData,
+                                const MachineInstr &DefMI, unsigned DefIdx,
+                                const MachineInstr &UseMI,
+                                unsigned UseIdx) const;
+
+  /// Compute the instruction latency of a given instruction.
+  /// If the instruction has higher cost when predicated, it's returned via
+  /// PredCost.
+  virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
+                                   const MachineInstr &MI,
+                                   unsigned *PredCost = nullptr) const;
+
+  virtual unsigned getPredicationCost(const MachineInstr &MI) const;
+
+  virtual int getInstrLatency(const InstrItineraryData *ItinData,
+                              SDNode *Node) const;
+
+  /// Return the default expected latency for a def based on its opcode.
+  unsigned defaultDefLatency(const MCSchedModel &SchedModel,
+                             const MachineInstr &DefMI) const;
+
+  int computeDefOperandLatency(const InstrItineraryData *ItinData,
+                               const MachineInstr &DefMI) const;
+
+  /// Return true if this opcode has high latency to its result.
+  virtual bool isHighLatencyDef(int opc) const { return false; }
+
+  /// Compute operand latency between a def of 'Reg'
+  /// and a use in the current loop. Return true if the target considered
+  /// it 'high'. This is used by optimization passes such as machine LICM to
+  /// determine whether it makes sense to hoist an instruction out even in a
+  /// high register pressure situation.
+  virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
+                                     const MachineRegisterInfo *MRI,
+                                     const MachineInstr &DefMI, unsigned DefIdx,
+                                     const MachineInstr &UseMI,
+                                     unsigned UseIdx) const {
+    return false;
+  }
+
+  /// Compute operand latency of a def of 'Reg'. Return true
+  /// if the target considered it 'low'.
+  virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
+                                const MachineInstr &DefMI,
+                                unsigned DefIdx) const;
+
+  /// Perform target-specific instruction verification.
+  virtual bool verifyInstruction(const MachineInstr &MI,
+                                 StringRef &ErrInfo) const {
+    return true;
+  }
+
+  /// Return the current execution domain and bit mask of
+  /// possible domains for instruction.
+  ///
+  /// Some micro-architectures have multiple execution domains, and multiple
+  /// opcodes that perform the same operation in different domains.  For
+  /// example, the x86 architecture provides the por, orps, and orpd
+  /// instructions that all do the same thing.  There is a latency penalty if a
+  /// register is written in one domain and read in another.
+  ///
+  /// This function returns a pair (domain, mask) containing the execution
+  /// domain of MI, and a bit mask of possible domains.  The setExecutionDomain
+  /// function can be used to change the opcode to one of the domains in the
+  /// bit mask.  Instructions whose execution domain can't be changed should
+  /// return a 0 mask.
+  ///
+  /// The execution domain numbers don't have any special meaning except domain
+  /// 0 is used for instructions that are not associated with any interesting
+  /// execution domain.
+  ///
+  virtual std::pair<uint16_t, uint16_t>
+  getExecutionDomain(const MachineInstr &MI) const {
+    return std::make_pair(0, 0);
+  }
+
+  /// Change the opcode of MI to execute in Domain.
+  ///
+  /// The bit (1 << Domain) must be set in the mask returned from
+  /// getExecutionDomain(MI).
+  virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
+
+  /// Returns the preferred minimum clearance
+  /// before an instruction with an unwanted partial register update.
+  ///
+  /// Some instructions only write part of a register, and implicitly need to
+  /// read the other parts of the register.  This may cause unwanted stalls
+  /// preventing otherwise unrelated instructions from executing in parallel in
+  /// an out-of-order CPU.
+  ///
+  /// For example, the x86 instruction cvtsi2ss writes its result to bits
+  /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
+  /// the instruction needs to wait for the old value of the register to become
+  /// available:
+  ///
+  ///   addps %xmm1, %xmm0
+  ///   movaps %xmm0, (%rax)
+  ///   cvtsi2ss %rbx, %xmm0
+  ///
+  /// In the code above, the cvtsi2ss instruction needs to wait for the addps
+  /// instruction before it can issue, even though the high bits of %xmm0
+  /// probably aren't needed.
+  ///
+  /// This hook returns the preferred clearance before MI, measured in
+  /// instructions.  Other defs of MI's operand OpNum are avoided in the last N
+  /// instructions before MI.  It should only return a positive value for
+  /// unwanted dependencies.  If the old bits of the defined register have
+  /// useful values, or if MI is determined to otherwise read the dependency,
+  /// the hook should return 0.
+  ///
+  /// The unwanted dependency may be handled by:
+  ///
+  /// 1. Allocating the same register for an MI def and use.  That makes the
+  ///    unwanted dependency identical to a required dependency.
+  ///
+  /// 2. Allocating a register for the def that has no defs in the previous N
+  ///    instructions.
+  ///
+  /// 3. Calling breakPartialRegDependency() with the same arguments.  This
+  ///    allows the target to insert a dependency breaking instruction.
+  ///
+  virtual unsigned
+  getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
+                               const TargetRegisterInfo *TRI) const {
+    // The default implementation returns 0 for no partial register dependency.
+    return 0;
+  }
+
+  /// \brief Return the minimum clearance before an instruction that reads an
+  /// unused register.
+  ///
+  /// For example, AVX instructions may copy part of a register operand into
+  /// the unused high bits of the destination register.
+  ///
+  /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
+  ///
+  /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
+  /// false dependence on any previous write to %xmm0.
+  ///
+  /// This hook works similarly to getPartialRegUpdateClearance, except that it
+  /// does not take an operand index. Instead sets \p OpNum to the index of the
+  /// unused register.
+  virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
+                                        const TargetRegisterInfo *TRI) const {
+    // The default implementation returns 0 for no undef register dependency.
+    return 0;
+  }
+
+  /// Insert a dependency-breaking instruction
+  /// before MI to eliminate an unwanted dependency on OpNum.
+  ///
+  /// If it wasn't possible to avoid a def in the last N instructions before MI
+  /// (see getPartialRegUpdateClearance), this hook will be called to break the
+  /// unwanted dependency.
+  ///
+  /// On x86, an xorps instruction can be used as a dependency breaker:
+  ///
+  ///   addps %xmm1, %xmm0
+  ///   movaps %xmm0, (%rax)
+  ///   xorps %xmm0, %xmm0
+  ///   cvtsi2ss %rbx, %xmm0
+  ///
+  /// An <imp-kill> operand should be added to MI if an instruction was
+  /// inserted.  This ties the instructions together in the post-ra scheduler.
+  ///
+  virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
+                                         const TargetRegisterInfo *TRI) const {}
+
+  /// Create machine specific model for scheduling.
+  virtual DFAPacketizer *
+  CreateTargetScheduleState(const TargetSubtargetInfo &) const {
+    return nullptr;
+  }
+
+  /// Sometimes, it is possible for the target
+  /// to tell, even without aliasing information, that two MIs access different
+  /// memory addresses. This function returns true if two MIs access different
+  /// memory addresses and false otherwise.
+  ///
+  /// Assumes any physical registers used to compute addresses have the same
+  /// value for both instructions. (This is the most useful assumption for
+  /// post-RA scheduling.)
+  ///
+  /// See also MachineInstr::mayAlias, which is implemented on top of this
+  /// function.
+  virtual bool
+  areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+                                  AliasAnalysis *AA = nullptr) const {
+    assert((MIa.mayLoad() || MIa.mayStore()) &&
+           "MIa must load from or modify a memory location");
+    assert((MIb.mayLoad() || MIb.mayStore()) &&
+           "MIb must load from or modify a memory location");
+    return false;
+  }
+
+  /// \brief Return the value to use for the MachineCSE's LookAheadLimit,
+  /// which is a heuristic used for CSE'ing phys reg defs.
+  virtual unsigned getMachineCSELookAheadLimit() const {
+    // The default lookahead is small to prevent unprofitable quadratic
+    // behavior.
+    return 5;
+  }
+
+  /// Return an array that contains the ids of the target indices (used for the
+  /// TargetIndex machine operand) and their names.
+  ///
+  /// MIR Serialization is able to serialize only the target indices that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<int, const char *>>
+  getSerializableTargetIndices() const {
+    return None;
+  }
+
+  /// Decompose the machine operand's target flags into two values - the direct
+  /// target flag value and any of bit flags that are applied.
+  virtual std::pair<unsigned, unsigned>
+  decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
+    return std::make_pair(0u, 0u);
+  }
+
+  /// Return an array that contains the direct target flag values and their
+  /// names.
+  ///
+  /// MIR Serialization is able to serialize only the target flags that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<unsigned, const char *>>
+  getSerializableDirectMachineOperandTargetFlags() const {
+    return None;
+  }
+
+  /// Return an array that contains the bitmask target flag values and their
+  /// names.
+  ///
+  /// MIR Serialization is able to serialize only the target flags that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<unsigned, const char *>>
+  getSerializableBitmaskMachineOperandTargetFlags() const {
+    return None;
+  }
+
+  /// Return an array that contains the MMO target flag values and their
+  /// names.
+  ///
+  /// MIR Serialization is able to serialize only the MMO target flags that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
+  getSerializableMachineMemOperandTargetFlags() const {
+    return None;
+  }
+
+  /// Determines whether \p Inst is a tail call instruction. Override this
+  /// method on targets that do not properly set MCID::Return and MCID::Call on
+  /// tail call instructions."
+  virtual bool isTailCall(const MachineInstr &Inst) const {
+    return Inst.isReturn() && Inst.isCall();
+  }
+
+  /// True if the instruction is bound to the top of its basic block and no
+  /// other instructions shall be inserted before it. This can be implemented
+  /// to prevent register allocator to insert spills before such instructions.
+  virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
+    return false;
+  }
+
+  /// \brief Describes the number of instructions that it will take to call and
+  /// construct a frame for a given outlining candidate.
+  struct MachineOutlinerInfo {
+    /// Number of instructions to call an outlined function for this candidate.
+    unsigned CallOverhead;
+
+    /// \brief Number of instructions to construct an outlined function frame
+    /// for this candidate.
+    unsigned FrameOverhead;
+
+    /// \brief Represents the specific instructions that must be emitted to
+    /// construct a call to this candidate.
+    unsigned CallConstructionID;
+
+    /// \brief Represents the specific instructions that must be emitted to
+    /// construct a frame for this candidate's outlined function.
+    unsigned FrameConstructionID;
+
+    MachineOutlinerInfo() {}
+    MachineOutlinerInfo(unsigned CallOverhead, unsigned FrameOverhead,
+                        unsigned CallConstructionID,
+                        unsigned FrameConstructionID)
+        : CallOverhead(CallOverhead), FrameOverhead(FrameOverhead),
+          CallConstructionID(CallConstructionID),
+          FrameConstructionID(FrameConstructionID) {}
+  };
+
+  /// \brief Returns a \p MachineOutlinerInfo struct containing target-specific
+  /// information for a set of outlining candidates.
+  virtual MachineOutlinerInfo getOutlininingCandidateInfo(
+      std::vector<
+          std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
+          &RepeatedSequenceLocs) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::getOutliningOverhead!");
+  }
+
+  /// Represents how an instruction should be mapped by the outliner.
+  /// \p Legal instructions are those which are safe to outline.
+  /// \p Illegal instructions are those which cannot be outlined.
+  /// \p Invisible instructions are instructions which can be outlined, but
+  /// shouldn't actually impact the outlining result.
+  enum MachineOutlinerInstrType { Legal, Illegal, Invisible };
+
+  /// Returns how or if \p MI should be outlined.
+  virtual MachineOutlinerInstrType
+  getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::getOutliningType!");
+  }
+
+  /// \brief Returns target-defined flags defining properties of the MBB for
+  /// the outliner.
+  virtual unsigned getMachineOutlinerMBBFlags(MachineBasicBlock &MBB) const {
+    return 0x0;
+  }
+
+  /// Insert a custom epilogue for outlined functions.
+  /// This may be empty, in which case no epilogue or return statement will be
+  /// emitted.
+  virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB,
+                                      MachineFunction &MF,
+                                      const MachineOutlinerInfo &MInfo) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!");
+  }
+
+  /// Insert a call to an outlined function into the program.
+  /// Returns an iterator to the spot where we inserted the call. This must be
+  /// implemented by the target.
+  virtual MachineBasicBlock::iterator
+  insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
+                     MachineBasicBlock::iterator &It, MachineFunction &MF,
+                     const MachineOutlinerInfo &MInfo) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
+  }
+
+  /// Insert a custom prologue for outlined functions.
+  /// This may be empty, in which case no prologue will be emitted.
+  virtual void insertOutlinerPrologue(MachineBasicBlock &MBB,
+                                      MachineFunction &MF,
+                                      const MachineOutlinerInfo &MInfo) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!");
+  }
+
+  /// Return true if the function can safely be outlined from.
+  /// A function \p MF is considered safe for outlining if an outlined function
+  /// produced from instructions in F will produce a program which produces the
+  /// same output for any set of given inputs.
+  virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
+                                           bool OutlineFromLinkOnceODRs) const {
+    llvm_unreachable("Target didn't implement "
+                     "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
+  }
+
+private:
+  unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
+  unsigned CatchRetOpcode;
+  unsigned ReturnOpcode;
+};
+
+/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
+template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
+  using RegInfo = DenseMapInfo<unsigned>;
+
+  static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
+    return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
+                                          RegInfo::getEmptyKey());
+  }
+
+  static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
+    return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
+                                          RegInfo::getTombstoneKey());
+  }
+
+  /// \brief Reuse getHashValue implementation from
+  /// std::pair<unsigned, unsigned>.
+  static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
+    std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
+    return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
+  }
+
+  static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
+                      const TargetInstrInfo::RegSubRegPair &RHS) {
+    return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
+           RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TARGET_TARGETINSTRINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
new file mode 100644
index 0000000..483223a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
@@ -0,0 +1,3615 @@
+//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM code to machine code.  This has two
+/// main components:
+///
+///  1. Which ValueTypes are natively supported by the target.
+///  2. Which operations are supported for supported ValueTypes.
+///  3. Cost thresholds for alternative implementations of certain operations.
+///
+/// In addition it has a few other components, like information about FP
+/// immediates.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETLOWERING_H
+#define LLVM_CODEGEN_TARGETLOWERING_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BranchProbability;
+class CCState;
+class CCValAssign;
+class Constant;
+class FastISel;
+class FunctionLoweringInfo;
+class GlobalValue;
+class IntrinsicInst;
+struct KnownBits;
+class LLVMContext;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineJumpTableInfo;
+class MachineLoop;
+class MachineRegisterInfo;
+class MCContext;
+class MCExpr;
+class Module;
+class TargetRegisterClass;
+class TargetLibraryInfo;
+class TargetRegisterInfo;
+class Value;
+
+namespace Sched {
+
+  enum Preference {
+    None,             // No preference
+    Source,           // Follow source order.
+    RegPressure,      // Scheduling for lowest register pressure.
+    Hybrid,           // Scheduling for both latency and register pressure.
+    ILP,              // Scheduling for ILP in low register pressure mode.
+    VLIW              // Scheduling for VLIW targets.
+  };
+
+} // end namespace Sched
+
+/// This base class for TargetLowering contains the SelectionDAG-independent
+/// parts that can be used from the rest of CodeGen.
+class TargetLoweringBase {
+public:
+  /// This enum indicates whether operations are valid for a target, and if not,
+  /// what action should be used to make them valid.
+  enum LegalizeAction : uint8_t {
+    Legal,      // The target natively supports this operation.
+    Promote,    // This operation should be executed in a larger type.
+    Expand,     // Try to expand this to other ops, otherwise use a libcall.
+    LibCall,    // Don't try to expand this to other ops, always use a libcall.
+    Custom      // Use the LowerOperation hook to implement custom lowering.
+  };
+
+  /// This enum indicates whether a types are legal for a target, and if not,
+  /// what action should be used to make them valid.
+  enum LegalizeTypeAction : uint8_t {
+    TypeLegal,           // The target natively supports this type.
+    TypePromoteInteger,  // Replace this integer with a larger one.
+    TypeExpandInteger,   // Split this integer into two of half the size.
+    TypeSoftenFloat,     // Convert this float to a same size integer type,
+                         // if an operation is not supported in target HW.
+    TypeExpandFloat,     // Split this float into two of half the size.
+    TypeScalarizeVector, // Replace this one-element vector with its element.
+    TypeSplitVector,     // Split this vector into two of half the size.
+    TypeWidenVector,     // This vector should be widened into a larger vector.
+    TypePromoteFloat     // Replace this float with a larger one.
+  };
+
+  /// LegalizeKind holds the legalization kind that needs to happen to EVT
+  /// in order to type-legalize it.
+  using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
+
+  /// Enum that describes how the target represents true/false values.
+  enum BooleanContent {
+    UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
+    ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
+    ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
+  };
+
+  /// Enum that describes what type of support for selects the target has.
+  enum SelectSupportKind {
+    ScalarValSelect,      // The target supports scalar selects (ex: cmov).
+    ScalarCondVectorVal,  // The target supports selects with a scalar condition
+                          // and vector values (ex: cmov).
+    VectorMaskSelect      // The target supports vector selects with a vector
+                          // mask (ex: x86 blends).
+  };
+
+  /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
+  /// to, if at all. Exists because different targets have different levels of
+  /// support for these atomic instructions, and also have different options
+  /// w.r.t. what they should expand to.
+  enum class AtomicExpansionKind {
+    None,    // Don't expand the instruction.
+    LLSC,    // Expand the instruction into loadlinked/storeconditional; used
+             // by ARM/AArch64.
+    LLOnly,  // Expand the (load) instruction into just a load-linked, which has
+             // greater atomic guarantees than a normal load.
+    CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+  };
+
+  /// Enum that specifies when a multiplication should be expanded.
+  enum class MulExpansionKind {
+    Always,            // Always expand the instruction.
+    OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
+                       // or custom.
+  };
+
+  class ArgListEntry {
+  public:
+    Value *Val = nullptr;
+    SDValue Node = SDValue();
+    Type *Ty = nullptr;
+    bool IsSExt : 1;
+    bool IsZExt : 1;
+    bool IsInReg : 1;
+    bool IsSRet : 1;
+    bool IsNest : 1;
+    bool IsByVal : 1;
+    bool IsInAlloca : 1;
+    bool IsReturned : 1;
+    bool IsSwiftSelf : 1;
+    bool IsSwiftError : 1;
+    uint16_t Alignment = 0;
+
+    ArgListEntry()
+        : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
+          IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
+          IsSwiftSelf(false), IsSwiftError(false) {}
+
+    void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx);
+  };
+  using ArgListTy = std::vector<ArgListEntry>;
+
+  virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
+                                     ArgListTy &Args) const {};
+
+  static ISD::NodeType getExtendForContent(BooleanContent Content) {
+    switch (Content) {
+    case UndefinedBooleanContent:
+      // Extend by adding rubbish bits.
+      return ISD::ANY_EXTEND;
+    case ZeroOrOneBooleanContent:
+      // Extend by adding zero bits.
+      return ISD::ZERO_EXTEND;
+    case ZeroOrNegativeOneBooleanContent:
+      // Extend by copying the sign bit.
+      return ISD::SIGN_EXTEND;
+    }
+    llvm_unreachable("Invalid content kind");
+  }
+
+  /// NOTE: The TargetMachine owns TLOF.
+  explicit TargetLoweringBase(const TargetMachine &TM);
+  TargetLoweringBase(const TargetLoweringBase &) = delete;
+  TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
+  virtual ~TargetLoweringBase() = default;
+
+protected:
+  /// \brief Initialize all of the actions to default values.
+  void initActions();
+
+public:
+  const TargetMachine &getTargetMachine() const { return TM; }
+
+  virtual bool useSoftFloat() const { return false; }
+
+  /// Return the pointer type for the given address space, defaults to
+  /// the pointer type from the data layout.
+  /// FIXME: The default needs to be removed once all the code is updated.
+  MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
+    return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
+  }
+
+  /// Return the type for frame index, which is determined by
+  /// the alloca address space specified through the data layout.
+  MVT getFrameIndexTy(const DataLayout &DL) const {
+    return getPointerTy(DL, DL.getAllocaAddrSpace());
+  }
+
+  /// Return the type for operands of fence.
+  /// TODO: Let fence operands be of i32 type and remove this.
+  virtual MVT getFenceOperandTy(const DataLayout &DL) const {
+    return getPointerTy(DL);
+  }
+
+  /// EVT is not used in-tree, but is used by out-of-tree target.
+  /// A documentation for this function would be nice...
+  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
+
+  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
+                       bool LegalTypes = true) const;
+
+  /// Returns the type to be used for the index operand of:
+  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
+  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
+  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
+    return getPointerTy(DL);
+  }
+
+  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
+    return true;
+  }
+
+  /// Return true if multiple condition registers are available.
+  bool hasMultipleConditionRegisters() const {
+    return HasMultipleConditionRegisters;
+  }
+
+  /// Return true if the target has BitExtract instructions.
+  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
+
+  /// Return the preferred vector type legalization action.
+  virtual TargetLoweringBase::LegalizeTypeAction
+  getPreferredVectorAction(EVT VT) const {
+    // The default action for one element vectors is to scalarize
+    if (VT.getVectorNumElements() == 1)
+      return TypeScalarizeVector;
+    // The default action for other vectors is to promote
+    return TypePromoteInteger;
+  }
+
+  // There are two general methods for expanding a BUILD_VECTOR node:
+  //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
+  //     them together.
+  //  2. Build the vector on the stack and then load it.
+  // If this function returns true, then method (1) will be used, subject to
+  // the constraint that all of the necessary shuffles are legal (as determined
+  // by isShuffleMaskLegal). If this function returns false, then method (2) is
+  // always used. The vector type, and the number of defined values, are
+  // provided.
+  virtual bool
+  shouldExpandBuildVectorWithShuffles(EVT /* VT */,
+                                      unsigned DefinedValues) const {
+    return DefinedValues < 3;
+  }
+
+  /// Return true if integer divide is usually cheaper than a sequence of
+  /// several shifts, adds, and multiplies for this target.
+  /// The definition of "cheaper" may depend on whether we're optimizing
+  /// for speed or for size.
+  virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
+
+  /// Return true if the target can handle a standalone remainder operation.
+  virtual bool hasStandaloneRem(EVT VT) const {
+    return true;
+  }
+
+  /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
+  virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
+    // Default behavior is to replace SQRT(X) with X*RSQRT(X).
+    return false;
+  }
+
+  /// Reciprocal estimate status values used by the functions below.
+  enum ReciprocalEstimate : int {
+    Unspecified = -1,
+    Disabled = 0,
+    Enabled = 1
+  };
+
+  /// Return a ReciprocalEstimate enum value for a square root of the given type
+  /// based on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
+
+  /// Return a ReciprocalEstimate enum value for a division of the given type
+  /// based on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
+
+  /// Return the refinement step count for a square root of the given type based
+  /// on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
+
+  /// Return the refinement step count for a division of the given type based
+  /// on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
+
+  /// Returns true if target has indicated at least one type should be bypassed.
+  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
+
+  /// Returns map of slow types for division or remainder with corresponding
+  /// fast types
+  const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+    return BypassSlowDivWidths;
+  }
+
+  /// Return true if Flow Control is an expensive operation that should be
+  /// avoided.
+  bool isJumpExpensive() const { return JumpIsExpensive; }
+
+  /// Return true if selects are only cheaper than branches if the branch is
+  /// unlikely to be predicted right.
+  bool isPredictableSelectExpensive() const {
+    return PredictableSelectIsExpensive;
+  }
+
+  /// If a branch or a select condition is skewed in one direction by more than
+  /// this factor, it is very likely to be predicted correctly.
+  virtual BranchProbability getPredictableBranchThreshold() const;
+
+  /// Return true if the following transform is beneficial:
+  /// fold (conv (load x)) -> (load (conv*)x)
+  /// On architectures that don't natively support some vector loads
+  /// efficiently, casting the load to a smaller vector of larger types and
+  /// loading is more efficient, however, this can be undone by optimizations in
+  /// dag combiner.
+  virtual bool isLoadBitCastBeneficial(EVT LoadVT,
+                                       EVT BitcastVT) const {
+    // Don't do if we could do an indexed load on the original type, but not on
+    // the new one.
+    if (!LoadVT.isSimple() || !BitcastVT.isSimple())
+      return true;
+
+    MVT LoadMVT = LoadVT.getSimpleVT();
+
+    // Don't bother doing this if it's just going to be promoted again later, as
+    // doing so might interfere with other combines.
+    if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
+        getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
+      return false;
+
+    return true;
+  }
+
+  /// Return true if the following transform is beneficial:
+  /// (store (y (conv x)), y*)) -> (store x, (x*))
+  virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
+    // Default to the same logic as loads.
+    return isLoadBitCastBeneficial(StoreVT, BitcastVT);
+  }
+
+  /// Return true if it is expected to be cheaper to do a store of a non-zero
+  /// vector constant with the given size and type for the address space than to
+  /// store the individual scalar element constants.
+  virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
+                                            unsigned NumElem,
+                                            unsigned AddrSpace) const {
+    return false;
+  }
+
+  /// Allow store merging after legalization in addition to before legalization.
+  /// This may catch stores that do not exist earlier (eg, stores created from
+  /// intrinsics).
+  virtual bool mergeStoresAfterLegalization() const { return true; }
+
+  /// Returns if it's reasonable to merge stores to MemVT size.
+  virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
+                                const SelectionDAG &DAG) const {
+    return true;
+  }
+
+  /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
+  virtual bool isCheapToSpeculateCttz() const {
+    return false;
+  }
+
+  /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
+  virtual bool isCheapToSpeculateCtlz() const {
+    return false;
+  }
+
+  /// \brief Return true if ctlz instruction is fast.
+  virtual bool isCtlzFast() const {
+    return false;
+  }
+
+  /// Return true if it is safe to transform an integer-domain bitwise operation
+  /// into the equivalent floating-point operation. This should be set to true
+  /// if the target has IEEE-754-compliant fabs/fneg operations for the input
+  /// type.
+  virtual bool hasBitPreservingFPLogic(EVT VT) const {
+    return false;
+  }
+
+  /// \brief Return true if it is cheaper to split the store of a merged int val
+  /// from a pair of smaller values into multiple stores.
+  virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
+    return false;
+  }
+
+  /// \brief Return if the target supports combining a
+  /// chain like:
+  /// \code
+  ///   %andResult = and %val1, #mask
+  ///   %icmpResult = icmp %andResult, 0
+  /// \endcode
+  /// into a single machine instruction of a form like:
+  /// \code
+  ///   cc = test %register, #mask
+  /// \endcode
+  virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
+    return false;
+  }
+
+  /// Use bitwise logic to make pairs of compares more efficient. For example:
+  /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
+  /// This should be true when it takes more than one instruction to lower
+  /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
+  /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
+  virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
+    return false;
+  }
+
+  /// Return the preferred operand type if the target has a quick way to compare
+  /// integer values of the given size. Assume that any legal integer type can
+  /// be compared efficiently. Targets may override this to allow illegal wide
+  /// types to return a vector type if there is support to compare that type.
+  virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
+    MVT VT = MVT::getIntegerVT(NumBits);
+    return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
+  }
+
+  /// Return true if the target should transform:
+  /// (X & Y) == Y ---> (~X & Y) == 0
+  /// (X & Y) != Y ---> (~X & Y) != 0
+  ///
+  /// This may be profitable if the target has a bitwise and-not operation that
+  /// sets comparison flags. A target may want to limit the transformation based
+  /// on the type of Y or if Y is a constant.
+  ///
+  /// Note that the transform will not occur if Y is known to be a power-of-2
+  /// because a mask and compare of a single bit can be handled by inverting the
+  /// predicate, for example:
+  /// (X & 8) == 8 ---> (X & 8) != 0
+  virtual bool hasAndNotCompare(SDValue Y) const {
+    return false;
+  }
+
+  /// Return true if the target has a bitwise and-not operation:
+  /// X = ~A & B
+  /// This can be used to simplify select or other instructions.
+  virtual bool hasAndNot(SDValue X) const {
+    // If the target has the more complex version of this operation, assume that
+    // it has this operation too.
+    return hasAndNotCompare(X);
+  }
+
+  /// \brief Return true if the target wants to use the optimization that
+  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
+  /// promotedInst1(...(promotedInstN(ext(load)))).
+  bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
+
+  /// Return true if the target can combine store(extractelement VectorTy,
+  /// Idx).
+  /// \p Cost[out] gives the cost of that transformation when this is true.
+  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
+                                         unsigned &Cost) const {
+    return false;
+  }
+
+  /// Return true if target supports floating point exceptions.
+  bool hasFloatingPointExceptions() const {
+    return HasFloatingPointExceptions;
+  }
+
+  /// Return true if target always beneficiates from combining into FMA for a
+  /// given value type. This must typically return false on targets where FMA
+  /// takes more cycles to execute than FADD.
+  virtual bool enableAggressiveFMAFusion(EVT VT) const {
+    return false;
+  }
+
+  /// Return the ValueType of the result of SETCC operations.
+  virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+                                 EVT VT) const;
+
+  /// Return the ValueType for comparison libcalls. Comparions libcalls include
+  /// floating point comparion calls, and Ordered/Unordered check calls on
+  /// floating point numbers.
+  virtual
+  MVT::SimpleValueType getCmpLibcallReturnType() const;
+
+  /// For targets without i1 registers, this gives the nature of the high-bits
+  /// of boolean values held in types wider than i1.
+  ///
+  /// "Boolean values" are special true/false values produced by nodes like
+  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
+  /// Not to be confused with general values promoted from i1.  Some cpus
+  /// distinguish between vectors of boolean and scalars; the isVec parameter
+  /// selects between the two kinds.  For example on X86 a scalar boolean should
+  /// be zero extended from i1, while the elements of a vector of booleans
+  /// should be sign extended from i1.
+  ///
+  /// Some cpus also treat floating point types the same way as they treat
+  /// vectors instead of the way they treat scalars.
+  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
+    if (isVec)
+      return BooleanVectorContents;
+    return isFloat ? BooleanFloatContents : BooleanContents;
+  }
+
+  BooleanContent getBooleanContents(EVT Type) const {
+    return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
+  }
+
+  /// Return target scheduling preference.
+  Sched::Preference getSchedulingPreference() const {
+    return SchedPreferenceInfo;
+  }
+
+  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
+  /// for different nodes. This function returns the preference (or none) for
+  /// the given node.
+  virtual Sched::Preference getSchedulingPreference(SDNode *) const {
+    return Sched::None;
+  }
+
+  /// Return the register class that should be used for the specified value
+  /// type.
+  virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
+    const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
+    assert(RC && "This value type is not natively supported!");
+    return RC;
+  }
+
+  /// Return the 'representative' register class for the specified value
+  /// type.
+  ///
+  /// The 'representative' register class is the largest legal super-reg
+  /// register class for the register class of the value type.  For example, on
+  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
+  /// register class is GR64 on x86_64.
+  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
+    const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
+    return RC;
+  }
+
+  /// Return the cost of the 'representative' register class for the specified
+  /// value type.
+  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
+    return RepRegClassCostForVT[VT.SimpleTy];
+  }
+
+  /// Return true if the target has native support for the specified value type.
+  /// This means that it has a register that directly holds it without
+  /// promotions or expansions.
+  bool isTypeLegal(EVT VT) const {
+    assert(!VT.isSimple() ||
+           (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
+    return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
+  }
+
+  class ValueTypeActionImpl {
+    /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
+    /// that indicates how instruction selection should deal with the type.
+    LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
+
+  public:
+    ValueTypeActionImpl() {
+      std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
+                TypeLegal);
+    }
+
+    LegalizeTypeAction getTypeAction(MVT VT) const {
+      return ValueTypeActions[VT.SimpleTy];
+    }
+
+    void setTypeAction(MVT VT, LegalizeTypeAction Action) {
+      ValueTypeActions[VT.SimpleTy] = Action;
+    }
+  };
+
+  const ValueTypeActionImpl &getValueTypeActions() const {
+    return ValueTypeActions;
+  }
+
+  /// Return how we should legalize values of this type, either it is already
+  /// legal (return 'Legal') or we need to promote it to a larger type (return
+  /// 'Promote'), or we need to expand it into multiple registers of smaller
+  /// integer type (return 'Expand').  'Custom' is not an option.
+  LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
+    return getTypeConversion(Context, VT).first;
+  }
+  LegalizeTypeAction getTypeAction(MVT VT) const {
+    return ValueTypeActions.getTypeAction(VT);
+  }
+
+  /// For types supported by the target, this is an identity function.  For
+  /// types that must be promoted to larger types, this returns the larger type
+  /// to promote to.  For integer types that are larger than the largest integer
+  /// register, this contains one step in the expansion to get to the smaller
+  /// register. For illegal floating point types, this returns the integer type
+  /// to transform to.
+  EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
+    return getTypeConversion(Context, VT).second;
+  }
+
+  /// For types supported by the target, this is an identity function.  For
+  /// types that must be expanded (i.e. integer types that are larger than the
+  /// largest integer register or illegal floating point types), this returns
+  /// the largest legal type it will be expanded to.
+  EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
+    assert(!VT.isVector());
+    while (true) {
+      switch (getTypeAction(Context, VT)) {
+      case TypeLegal:
+        return VT;
+      case TypeExpandInteger:
+        VT = getTypeToTransformTo(Context, VT);
+        break;
+      default:
+        llvm_unreachable("Type is not legal nor is it to be expanded!");
+      }
+    }
+  }
+
+  /// Vector types are broken down into some number of legal first class types.
+  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
+  /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
+  /// turns into 4 EVT::i32 values with both PPC and X86.
+  ///
+  /// This method returns the number of registers needed, and the VT for each
+  /// register.  It also returns the VT and quantity of the intermediate values
+  /// before they are promoted/expanded.
+  unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
+                                  EVT &IntermediateVT,
+                                  unsigned &NumIntermediates,
+                                  MVT &RegisterVT) const;
+
+  /// Certain targets such as MIPS require that some types such as vectors are
+  /// always broken down into scalars in some contexts. This occurs even if the
+  /// vector type is legal.
+  virtual unsigned getVectorTypeBreakdownForCallingConv(
+      LLVMContext &Context, EVT VT, EVT &IntermediateVT,
+      unsigned &NumIntermediates, MVT &RegisterVT) const {
+    return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
+                                  RegisterVT);
+  }
+
+  struct IntrinsicInfo {
+    unsigned     opc = 0;          // target opcode
+    EVT          memVT;            // memory VT
+
+    // value representing memory location
+    PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
+
+    int          offset = 0;       // offset off of ptrVal
+    unsigned     size = 0;         // the size of the memory location
+                                   // (taken from memVT if zero)
+    unsigned     align = 1;        // alignment
+
+    MachineMemOperand::Flags flags = MachineMemOperand::MONone;
+    IntrinsicInfo() = default;
+  };
+
+  /// Given an intrinsic, checks if on the target the intrinsic will need to map
+  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
+  /// true and store the intrinsic information into the IntrinsicInfo that was
+  /// passed to the function.
+  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
+                                  MachineFunction &,
+                                  unsigned /*Intrinsic*/) const {
+    return false;
+  }
+
+  /// Returns true if the target can instruction select the specified FP
+  /// immediate natively. If false, the legalizer will materialize the FP
+  /// immediate as a load from a constant pool.
+  virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
+    return false;
+  }
+
+  /// Targets can use this to indicate that they only support *some*
+  /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
+  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
+  /// legal.
+  virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
+    return true;
+  }
+
+  /// Returns true if the operation can trap for the value type.
+  ///
+  /// VT must be a legal type. By default, we optimistically assume most
+  /// operations don't trap except for integer divide and remainder.
+  virtual bool canOpTrap(unsigned Op, EVT VT) const;
+
+  /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
+  /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
+  /// a VAND with a constant pool entry.
+  virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
+                                      EVT /*VT*/) const {
+    return false;
+  }
+
+  /// Return how this operation should be treated: either it is legal, needs to
+  /// be promoted to a larger size, needs to be expanded to some other code
+  /// sequence, or the target has a custom expander for it.
+  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
+    if (VT.isExtended()) return Expand;
+    // If a target-specific SDNode requires legalization, require the target
+    // to provide custom legalization for it.
+    if (Op >= array_lengthof(OpActions[0])) return Custom;
+    return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
+  }
+
+  /// Return true if the specified operation is legal on this target or can be
+  /// made legal with custom lowering. This is used to help guide high-level
+  /// lowering decisions.
+  bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+      (getOperationAction(Op, VT) == Legal ||
+       getOperationAction(Op, VT) == Custom);
+  }
+
+  /// Return true if the specified operation is legal on this target or can be
+  /// made legal using promotion. This is used to help guide high-level lowering
+  /// decisions.
+  bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+      (getOperationAction(Op, VT) == Legal ||
+       getOperationAction(Op, VT) == Promote);
+  }
+
+  /// Return true if the specified operation is legal on this target or can be
+  /// made legal with custom lowering or using promotion. This is used to help
+  /// guide high-level lowering decisions.
+  bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+      (getOperationAction(Op, VT) == Legal ||
+       getOperationAction(Op, VT) == Custom ||
+       getOperationAction(Op, VT) == Promote);
+  }
+
+  /// Return true if the operation uses custom lowering, regardless of whether
+  /// the type is legal or not.
+  bool isOperationCustom(unsigned Op, EVT VT) const {
+    return getOperationAction(Op, VT) == Custom;
+  }
+
+  /// Return true if lowering to a jump table is allowed.
+  virtual bool areJTsAllowed(const Function *Fn) const {
+    if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
+      return false;
+
+    return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+           isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
+  }
+
+  /// Check whether the range [Low,High] fits in a machine word.
+  bool rangeFitsInWord(const APInt &Low, const APInt &High,
+                       const DataLayout &DL) const {
+    // FIXME: Using the pointer type doesn't seem ideal.
+    uint64_t BW = DL.getIndexSizeInBits(0u);
+    uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
+    return Range <= BW;
+  }
+
+  /// Return true if lowering to a jump table is suitable for a set of case
+  /// clusters which may contain \p NumCases cases, \p Range range of values.
+  /// FIXME: This function check the maximum table size and density, but the
+  /// minimum size is not checked. It would be nice if the minimum size is
+  /// also combined within this function. Currently, the minimum size check is
+  /// performed in findJumpTable() in SelectionDAGBuiler and
+  /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
+  virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
+                                      uint64_t Range) const {
+    const bool OptForSize = SI->getParent()->getParent()->optForSize();
+    const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
+    const unsigned MaxJumpTableSize =
+        OptForSize || getMaximumJumpTableSize() == 0
+            ? UINT_MAX
+            : getMaximumJumpTableSize();
+    // Check whether a range of clusters is dense enough for a jump table.
+    if (Range <= MaxJumpTableSize &&
+        (NumCases * 100 >= Range * MinDensity)) {
+      return true;
+    }
+    return false;
+  }
+
+  /// Return true if lowering to a bit test is suitable for a set of case
+  /// clusters which contains \p NumDests unique destinations, \p Low and
+  /// \p High as its lowest and highest case values, and expects \p NumCmps
+  /// case value comparisons. Check if the number of destinations, comparison
+  /// metric, and range are all suitable.
+  bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
+                             const APInt &Low, const APInt &High,
+                             const DataLayout &DL) const {
+    // FIXME: I don't think NumCmps is the correct metric: a single case and a
+    // range of cases both require only one branch to lower. Just looking at the
+    // number of clusters and destinations should be enough to decide whether to
+    // build bit tests.
+
+    // To lower a range with bit tests, the range must fit the bitwidth of a
+    // machine word.
+    if (!rangeFitsInWord(Low, High, DL))
+      return false;
+
+    // Decide whether it's profitable to lower this range with bit tests. Each
+    // destination requires a bit test and branch, and there is an overall range
+    // check branch. For a small number of clusters, separate comparisons might
+    // be cheaper, and for many destinations, splitting the range might be
+    // better.
+    return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
+           (NumDests == 3 && NumCmps >= 6);
+  }
+
+  /// Return true if the specified operation is illegal on this target or
+  /// unlikely to be made legal with custom lowering. This is used to help guide
+  /// high-level lowering decisions.
+  bool isOperationExpand(unsigned Op, EVT VT) const {
+    return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+  }
+
+  /// Return true if the specified operation is legal on this target.
+  bool isOperationLegal(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+           getOperationAction(Op, VT) == Legal;
+  }
+
+  /// Return how this load with extension should be treated: either it is legal,
+  /// needs to be promoted to a larger size, needs to be expanded to some other
+  /// code sequence, or the target has a custom expander for it.
+  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
+                                  EVT MemVT) const {
+    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
+           MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
+    unsigned Shift = 4 * ExtType;
+    return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
+  }
+
+  /// Return true if the specified load with extension is legal on this target.
+  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
+  }
+
+  /// Return true if the specified load with extension is legal or custom
+  /// on this target.
+  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
+           getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
+  }
+
+  /// Return how this store with truncation should be treated: either it is
+  /// legal, needs to be promoted to a larger size, needs to be expanded to some
+  /// other code sequence, or the target has a custom expander for it.
+  LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
+    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+    assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
+           "Table isn't big enough!");
+    return TruncStoreActions[ValI][MemI];
+  }
+
+  /// Return true if the specified store with truncation is legal on this
+  /// target.
+  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
+    return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
+  }
+
+  /// Return true if the specified store with truncation has solution on this
+  /// target.
+  bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
+    return isTypeLegal(ValVT) &&
+      (getTruncStoreAction(ValVT, MemVT) == Legal ||
+       getTruncStoreAction(ValVT, MemVT) == Custom);
+  }
+
+  /// Return how the indexed load should be treated: either it is legal, needs
+  /// to be promoted to a larger size, needs to be expanded to some other code
+  /// sequence, or the target has a custom expander for it.
+  LegalizeAction
+  getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
+    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
+           "Table isn't big enough!");
+    unsigned Ty = (unsigned)VT.SimpleTy;
+    return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
+  }
+
+  /// Return true if the specified indexed load is legal on this target.
+  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
+    return VT.isSimple() &&
+      (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
+       getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
+  }
+
+  /// Return how the indexed store should be treated: either it is legal, needs
+  /// to be promoted to a larger size, needs to be expanded to some other code
+  /// sequence, or the target has a custom expander for it.
+  LegalizeAction
+  getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
+    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
+           "Table isn't big enough!");
+    unsigned Ty = (unsigned)VT.SimpleTy;
+    return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
+  }
+
+  /// Return true if the specified indexed load is legal on this target.
+  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
+    return VT.isSimple() &&
+      (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
+       getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
+  }
+
+  /// Return how the condition code should be treated: either it is legal, needs
+  /// to be expanded to some other code sequence, or the target has a custom
+  /// expander for it.
+  LegalizeAction
+  getCondCodeAction(ISD::CondCode CC, MVT VT) const {
+    assert((unsigned)CC < array_lengthof(CondCodeActions) &&
+           ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
+           "Table isn't big enough!");
+    // See setCondCodeAction for how this is encoded.
+    uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
+    uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
+    LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
+    assert(Action != Promote && "Can't promote condition code!");
+    return Action;
+  }
+
+  /// Return true if the specified condition code is legal on this target.
+  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
+    return getCondCodeAction(CC, VT) == Legal;
+  }
+
+  /// Return true if the specified condition code is legal or custom on this
+  /// target.
+  bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
+    return getCondCodeAction(CC, VT) == Legal ||
+           getCondCodeAction(CC, VT) == Custom;
+  }
+
+  /// If the action for this operation is to promote, this method returns the
+  /// ValueType to promote to.
+  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
+    assert(getOperationAction(Op, VT) == Promote &&
+           "This operation isn't promoted!");
+
+    // See if this has an explicit type specified.
+    std::map<std::pair<unsigned, MVT::SimpleValueType>,
+             MVT::SimpleValueType>::const_iterator PTTI =
+      PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
+    if (PTTI != PromoteToType.end()) return PTTI->second;
+
+    assert((VT.isInteger() || VT.isFloatingPoint()) &&
+           "Cannot autopromote this type, add it with AddPromotedToType.");
+
+    MVT NVT = VT;
+    do {
+      NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
+      assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
+             "Didn't find type to promote to!");
+    } while (!isTypeLegal(NVT) ||
+              getOperationAction(Op, NVT) == Promote);
+    return NVT;
+  }
+
+  /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
+  /// operations except for the pointer size.  If AllowUnknown is true, this
+  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
+  /// otherwise it will assert.
+  EVT getValueType(const DataLayout &DL, Type *Ty,
+                   bool AllowUnknown = false) const {
+    // Lower scalar pointers to native pointer types.
+    if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+      return getPointerTy(DL, PTy->getAddressSpace());
+
+    if (Ty->isVectorTy()) {
+      VectorType *VTy = cast<VectorType>(Ty);
+      Type *Elm = VTy->getElementType();
+      // Lower vectors of pointers to native pointer types.
+      if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
+        EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
+        Elm = PointerTy.getTypeForEVT(Ty->getContext());
+      }
+
+      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
+                       VTy->getNumElements());
+    }
+    return EVT::getEVT(Ty, AllowUnknown);
+  }
+
+  /// Return the MVT corresponding to this LLVM type. See getValueType.
+  MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
+                         bool AllowUnknown = false) const {
+    return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
+  }
+
+  /// Return the desired alignment for ByVal or InAlloca aggregate function
+  /// arguments in the caller parameter area.  This is the actual alignment, not
+  /// its logarithm.
+  virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
+
+  /// Return the type of registers that this ValueType will eventually require.
+  MVT getRegisterType(MVT VT) const {
+    assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
+    return RegisterTypeForVT[VT.SimpleTy];
+  }
+
+  /// Return the type of registers that this ValueType will eventually require.
+  MVT getRegisterType(LLVMContext &Context, EVT VT) const {
+    if (VT.isSimple()) {
+      assert((unsigned)VT.getSimpleVT().SimpleTy <
+                array_lengthof(RegisterTypeForVT));
+      return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
+    }
+    if (VT.isVector()) {
+      EVT VT1;
+      MVT RegisterVT;
+      unsigned NumIntermediates;
+      (void)getVectorTypeBreakdown(Context, VT, VT1,
+                                   NumIntermediates, RegisterVT);
+      return RegisterVT;
+    }
+    if (VT.isInteger()) {
+      return getRegisterType(Context, getTypeToTransformTo(Context, VT));
+    }
+    llvm_unreachable("Unsupported extended type!");
+  }
+
+  /// Return the number of registers that this ValueType will eventually
+  /// require.
+  ///
+  /// This is one for any types promoted to live in larger registers, but may be
+  /// more than one for types (like i64) that are split into pieces.  For types
+  /// like i140, which are first promoted then expanded, it is the number of
+  /// registers needed to hold all the bits of the original type.  For an i140
+  /// on a 32 bit machine this means 5 registers.
+  unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
+    if (VT.isSimple()) {
+      assert((unsigned)VT.getSimpleVT().SimpleTy <
+                array_lengthof(NumRegistersForVT));
+      return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
+    }
+    if (VT.isVector()) {
+      EVT VT1;
+      MVT VT2;
+      unsigned NumIntermediates;
+      return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
+    }
+    if (VT.isInteger()) {
+      unsigned BitWidth = VT.getSizeInBits();
+      unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
+      return (BitWidth + RegWidth - 1) / RegWidth;
+    }
+    llvm_unreachable("Unsupported extended type!");
+  }
+
+  /// Certain combinations of ABIs, Targets and features require that types
+  /// are legal for some operations and not for other operations.
+  /// For MIPS all vector types must be passed through the integer register set.
+  virtual MVT getRegisterTypeForCallingConv(MVT VT) const {
+    return getRegisterType(VT);
+  }
+
+  virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
+                                            EVT VT) const {
+    return getRegisterType(Context, VT);
+  }
+
+  /// Certain targets require unusual breakdowns of certain types. For MIPS,
+  /// this occurs when a vector type is used, as vector are passed through the
+  /// integer register set.
+  virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+                                                 EVT VT) const {
+    return getNumRegisters(Context, VT);
+  }
+
+  /// Certain targets have context senstive alignment requirements, where one
+  /// type has the alignment requirement of another type.
+  virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
+                                                 DataLayout DL) const {
+    return DL.getABITypeAlignment(ArgTy);
+  }
+
+  /// If true, then instruction selection should seek to shrink the FP constant
+  /// of the specified type to a smaller type in order to save space and / or
+  /// reduce runtime.
+  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
+
+  // Return true if it is profitable to reduce the given load node to a smaller
+  // type.
+  //
+  // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
+  virtual bool shouldReduceLoadWidth(SDNode *Load,
+                                     ISD::LoadExtType ExtTy,
+                                     EVT NewVT) const {
+    return true;
+  }
+
+  /// When splitting a value of the specified type into parts, does the Lo
+  /// or Hi part come first?  This usually follows the endianness, except
+  /// for ppcf128, where the Hi part always comes first.
+  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
+    return DL.isBigEndian() || VT == MVT::ppcf128;
+  }
+
+  /// If true, the target has custom DAG combine transformations that it can
+  /// perform for the specified node.
+  bool hasTargetDAGCombine(ISD::NodeType NT) const {
+    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
+  }
+
+  unsigned getGatherAllAliasesMaxDepth() const {
+    return GatherAllAliasesMaxDepth;
+  }
+
+  /// Returns the size of the platform's va_list object.
+  virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
+    return getPointerTy(DL).getSizeInBits();
+  }
+
+  /// \brief Get maximum # of store operations permitted for llvm.memset
+  ///
+  /// This function returns the maximum number of store operations permitted
+  /// to replace a call to llvm.memset. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxStoresPerMemset(bool OptSize) const {
+    return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
+  }
+
+  /// \brief Get maximum # of store operations permitted for llvm.memcpy
+  ///
+  /// This function returns the maximum number of store operations permitted
+  /// to replace a call to llvm.memcpy. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
+    return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
+  }
+
+  /// Get maximum # of load operations permitted for memcmp
+  ///
+  /// This function returns the maximum number of load operations permitted
+  /// to replace a call to memcmp. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
+    return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
+  }
+
+  /// For memcmp expansion when the memcmp result is only compared equal or
+  /// not-equal to 0, allow up to this number of load pairs per block. As an
+  /// example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
+  ///   a0 = load2bytes &a[0]
+  ///   b0 = load2bytes &b[0]
+  ///   a2 = load1byte  &a[2]
+  ///   b2 = load1byte  &b[2]
+  ///   r  = cmp eq (a0 ^ b0 | a2 ^ b2), 0
+  virtual unsigned getMemcmpEqZeroLoadsPerBlock() const {
+    return 1;
+  }
+
+  /// \brief Get maximum # of store operations permitted for llvm.memmove
+  ///
+  /// This function returns the maximum number of store operations permitted
+  /// to replace a call to llvm.memmove. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxStoresPerMemmove(bool OptSize) const {
+    return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
+  }
+
+  /// \brief Determine if the target supports unaligned memory accesses.
+  ///
+  /// This function returns true if the target allows unaligned memory accesses
+  /// of the specified type in the given address space. If true, it also returns
+  /// whether the unaligned memory access is "fast" in the last argument by
+  /// reference. This is used, for example, in situations where an array
+  /// copy/move/set is converted to a sequence of store operations. Its use
+  /// helps to ensure that such replacements don't generate code that causes an
+  /// alignment error (trap) on the target machine.
+  virtual bool allowsMisalignedMemoryAccesses(EVT,
+                                              unsigned AddrSpace = 0,
+                                              unsigned Align = 1,
+                                              bool * /*Fast*/ = nullptr) const {
+    return false;
+  }
+
+  /// Return true if the target supports a memory access of this type for the
+  /// given address space and alignment. If the access is allowed, the optional
+  /// final parameter returns if the access is also fast (as defined by the
+  /// target).
+  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
+                          unsigned AddrSpace = 0, unsigned Alignment = 1,
+                          bool *Fast = nullptr) const;
+
+  /// Returns the target specific optimal type for load and store operations as
+  /// a result of memset, memcpy, and memmove lowering.
+  ///
+  /// If DstAlign is zero that means it's safe to destination alignment can
+  /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
+  /// a need to check it against alignment requirement, probably because the
+  /// source does not need to be loaded. If 'IsMemset' is true, that means it's
+  /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
+  /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
+  /// does not need to be loaded.  It returns EVT::Other if the type should be
+  /// determined using generic target-independent logic.
+  virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
+                                  unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
+                                  bool /*IsMemset*/,
+                                  bool /*ZeroMemset*/,
+                                  bool /*MemcpyStrSrc*/,
+                                  MachineFunction &/*MF*/) const {
+    return MVT::Other;
+  }
+
+  /// Returns true if it's safe to use load / store of the specified type to
+  /// expand memcpy / memset inline.
+  ///
+  /// This is mostly true for all types except for some special cases. For
+  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
+  /// fstpl which also does type conversion. Note the specified type doesn't
+  /// have to be legal as the hook is used before type legalization.
+  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
+
+  /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
+  bool usesUnderscoreSetJmp() const {
+    return UseUnderscoreSetJmp;
+  }
+
+  /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
+  bool usesUnderscoreLongJmp() const {
+    return UseUnderscoreLongJmp;
+  }
+
+  /// Return lower limit for number of blocks in a jump table.
+  virtual unsigned getMinimumJumpTableEntries() const;
+
+  /// Return lower limit of the density in a jump table.
+  unsigned getMinimumJumpTableDensity(bool OptForSize) const;
+
+  /// Return upper limit for number of entries in a jump table.
+  /// Zero if no limit.
+  unsigned getMaximumJumpTableSize() const;
+
+  virtual bool isJumpTableRelative() const {
+    return TM.isPositionIndependent();
+  }
+
+  /// If a physical register, this specifies the register that
+  /// llvm.savestack/llvm.restorestack should save and restore.
+  unsigned getStackPointerRegisterToSaveRestore() const {
+    return StackPointerRegisterToSaveRestore;
+  }
+
+  /// If a physical register, this returns the register that receives the
+  /// exception address on entry to an EH pad.
+  virtual unsigned
+  getExceptionPointerRegister(const Constant *PersonalityFn) const {
+    // 0 is guaranteed to be the NoRegister value on all targets
+    return 0;
+  }
+
+  /// If a physical register, this returns the register that receives the
+  /// exception typeid on entry to a landing pad.
+  virtual unsigned
+  getExceptionSelectorRegister(const Constant *PersonalityFn) const {
+    // 0 is guaranteed to be the NoRegister value on all targets
+    return 0;
+  }
+
+  virtual bool needsFixedCatchObjects() const {
+    report_fatal_error("Funclet EH is not implemented for this target");
+  }
+
+  /// Returns the target's jmp_buf size in bytes (if never set, the default is
+  /// 200)
+  unsigned getJumpBufSize() const {
+    return JumpBufSize;
+  }
+
+  /// Returns the target's jmp_buf alignment in bytes (if never set, the default
+  /// is 0)
+  unsigned getJumpBufAlignment() const {
+    return JumpBufAlignment;
+  }
+
+  /// Return the minimum stack alignment of an argument.
+  unsigned getMinStackArgumentAlignment() const {
+    return MinStackArgumentAlignment;
+  }
+
+  /// Return the minimum function alignment.
+  unsigned getMinFunctionAlignment() const {
+    return MinFunctionAlignment;
+  }
+
+  /// Return the preferred function alignment.
+  unsigned getPrefFunctionAlignment() const {
+    return PrefFunctionAlignment;
+  }
+
+  /// Return the preferred loop alignment.
+  virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
+    return PrefLoopAlignment;
+  }
+
+  /// If the target has a standard location for the stack protector guard,
+  /// returns the address of that location. Otherwise, returns nullptr.
+  /// DEPRECATED: please override useLoadStackGuardNode and customize
+  ///             LOAD_STACK_GUARD, or customize @llvm.stackguard().
+  virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
+
+  /// Inserts necessary declarations for SSP (stack protection) purpose.
+  /// Should be used only when getIRStackGuard returns nullptr.
+  virtual void insertSSPDeclarations(Module &M) const;
+
+  /// Return the variable that's previously inserted by insertSSPDeclarations,
+  /// if any, otherwise return nullptr. Should be used only when
+  /// getIRStackGuard returns nullptr.
+  virtual Value *getSDagStackGuard(const Module &M) const;
+
+  /// If this function returns true, stack protection checks should XOR the
+  /// frame pointer (or whichever pointer is used to address locals) into the
+  /// stack guard value before checking it. getIRStackGuard must return nullptr
+  /// if this returns true.
+  virtual bool useStackGuardXorFP() const { return false; }
+
+  /// If the target has a standard stack protection check function that
+  /// performs validation and error handling, returns the function. Otherwise,
+  /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
+  /// Should be used only when getIRStackGuard returns nullptr.
+  virtual Value *getSSPStackGuardCheck(const Module &M) const;
+
+protected:
+  Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
+                                            bool UseTLS) const;
+
+public:
+  /// Returns the target-specific address of the unsafe stack pointer.
+  virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
+
+  /// Returns the name of the symbol used to emit stack probes or the empty
+  /// string if not applicable.
+  virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
+    return "";
+  }
+
+  /// Returns true if a cast between SrcAS and DestAS is a noop.
+  virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+    return false;
+  }
+
+  /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
+  /// are happy to sink it into basic blocks.
+  virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+    return isNoopAddrSpaceCast(SrcAS, DestAS);
+  }
+
+  /// Return true if the pointer arguments to CI should be aligned by aligning
+  /// the object whose address is being passed. If so then MinSize is set to the
+  /// minimum size the object must be to be aligned and PrefAlign is set to the
+  /// preferred alignment.
+  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
+                                      unsigned & /*PrefAlign*/) const {
+    return false;
+  }
+
+  //===--------------------------------------------------------------------===//
+  /// \name Helpers for TargetTransformInfo implementations
+  /// @{
+
+  /// Get the ISD node that corresponds to the Instruction class opcode.
+  int InstructionOpcodeToISD(unsigned Opcode) const;
+
+  /// Estimate the cost of type-legalization and the legalized type.
+  std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
+                                              Type *Ty) const;
+
+  /// @}
+
+  //===--------------------------------------------------------------------===//
+  /// \name Helpers for atomic expansion.
+  /// @{
+
+  /// Returns the maximum atomic operation size (in bits) supported by
+  /// the backend. Atomic operations greater than this size (as well
+  /// as ones that are not naturally aligned), will be expanded by
+  /// AtomicExpandPass into an __atomic_* library call.
+  unsigned getMaxAtomicSizeInBitsSupported() const {
+    return MaxAtomicSizeInBitsSupported;
+  }
+
+  /// Returns the size of the smallest cmpxchg or ll/sc instruction
+  /// the backend supports.  Any smaller operations are widened in
+  /// AtomicExpandPass.
+  ///
+  /// Note that *unlike* operations above the maximum size, atomic ops
+  /// are still natively supported below the minimum; they just
+  /// require a more complex expansion.
+  unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
+
+  /// Whether the target supports unaligned atomic operations.
+  bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
+
+  /// Whether AtomicExpandPass should automatically insert fences and reduce
+  /// ordering for this atomic. This should be true for most architectures with
+  /// weak memory ordering. Defaults to false.
+  virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
+    return false;
+  }
+
+  /// Perform a load-linked operation on Addr, returning a "Value *" with the
+  /// corresponding pointee type. This may entail some non-trivial operations to
+  /// truncate or reconstruct types that will be illegal in the backend. See
+  /// ARMISelLowering for an example implementation.
+  virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+                                AtomicOrdering Ord) const {
+    llvm_unreachable("Load linked unimplemented on this target");
+  }
+
+  /// Perform a store-conditional operation to Addr. Return the status of the
+  /// store. This should be 0 if the store succeeded, non-zero otherwise.
+  virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+                                      Value *Addr, AtomicOrdering Ord) const {
+    llvm_unreachable("Store conditional unimplemented on this target");
+  }
+
+  /// Inserts in the IR a target-specific intrinsic specifying a fence.
+  /// It is called by AtomicExpandPass before expanding an
+  ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
+  ///   if shouldInsertFencesForAtomic returns true.
+  ///
+  /// Inst is the original atomic instruction, prior to other expansions that
+  /// may be performed.
+  ///
+  /// This function should either return a nullptr, or a pointer to an IR-level
+  ///   Instruction*. Even complex fence sequences can be represented by a
+  ///   single Instruction* through an intrinsic to be lowered later.
+  /// Backends should override this method to produce target-specific intrinsic
+  ///   for their fences.
+  /// FIXME: Please note that the default implementation here in terms of
+  ///   IR-level fences exists for historical/compatibility reasons and is
+  ///   *unsound* ! Fences cannot, in general, be used to restore sequential
+  ///   consistency. For example, consider the following example:
+  /// atomic<int> x = y = 0;
+  /// int r1, r2, r3, r4;
+  /// Thread 0:
+  ///   x.store(1);
+  /// Thread 1:
+  ///   y.store(1);
+  /// Thread 2:
+  ///   r1 = x.load();
+  ///   r2 = y.load();
+  /// Thread 3:
+  ///   r3 = y.load();
+  ///   r4 = x.load();
+  ///  r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
+  ///  seq_cst. But if they are lowered to monotonic accesses, no amount of
+  ///  IR-level fences can prevent it.
+  /// @{
+  virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
+                                        AtomicOrdering Ord) const {
+    if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
+      return Builder.CreateFence(Ord);
+    else
+      return nullptr;
+  }
+
+  virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
+                                         Instruction *Inst,
+                                         AtomicOrdering Ord) const {
+    if (isAcquireOrStronger(Ord))
+      return Builder.CreateFence(Ord);
+    else
+      return nullptr;
+  }
+  /// @}
+
+  // Emits code that executes when the comparison result in the ll/sc
+  // expansion of a cmpxchg instruction is such that the store-conditional will
+  // not execute.  This makes it possible to balance out the load-linked with
+  // a dedicated instruction, if desired.
+  // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
+  // be unnecessarily held, except if clrex, inserted by this hook, is executed.
+  virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
+
+  /// Returns true if the given (atomic) store should be expanded by the
+  /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
+  virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+    return false;
+  }
+
+  /// Returns true if arguments should be sign-extended in lib calls.
+  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+    return IsSigned;
+  }
+
+  /// Returns how the given (atomic) load should be expanded by the
+  /// IR-level AtomicExpand pass.
+  virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+    return AtomicExpansionKind::None;
+  }
+
+  /// Returns true if the given atomic cmpxchg should be expanded by the
+  /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
+  /// (through emitLoadLinked() and emitStoreConditional()).
+  virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
+    return false;
+  }
+
+  /// Returns how the IR-level AtomicExpand pass should expand the given
+  /// AtomicRMW, if at all. Default is to never expand.
+  virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
+    return AtomicExpansionKind::None;
+  }
+
+  /// On some platforms, an AtomicRMW that never actually modifies the value
+  /// (such as fetch_add of 0) can be turned into a fence followed by an
+  /// atomic load. This may sound useless, but it makes it possible for the
+  /// processor to keep the cacheline shared, dramatically improving
+  /// performance. And such idempotent RMWs are useful for implementing some
+  /// kinds of locks, see for example (justification + benchmarks):
+  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+  /// This method tries doing that transformation, returning the atomic load if
+  /// it succeeds, and nullptr otherwise.
+  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
+  /// another round of expansion.
+  virtual LoadInst *
+  lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
+    return nullptr;
+  }
+
+  /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
+  /// SIGN_EXTEND, or ANY_EXTEND).
+  virtual ISD::NodeType getExtendForAtomicOps() const {
+    return ISD::ZERO_EXTEND;
+  }
+
+  /// @}
+
+  /// Returns true if we should normalize
+  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
+  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
+  /// that it saves us from materializing N0 and N1 in an integer register.
+  /// Targets that are able to perform and/or on flags should return false here.
+  virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
+                                               EVT VT) const {
+    // If a target has multiple condition registers, then it likely has logical
+    // operations on those registers.
+    if (hasMultipleConditionRegisters())
+      return false;
+    // Only do the transform if the value won't be split into multiple
+    // registers.
+    LegalizeTypeAction Action = getTypeAction(Context, VT);
+    return Action != TypeExpandInteger && Action != TypeExpandFloat &&
+      Action != TypeSplitVector;
+  }
+
+  /// Return true if a select of constants (select Cond, C1, C2) should be
+  /// transformed into simple math ops with the condition value. For example:
+  /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
+  virtual bool convertSelectOfConstantsToMath(EVT VT) const {
+    return false;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // TargetLowering Configuration Methods - These methods should be invoked by
+  // the derived class constructor to configure this object for the target.
+  //
+protected:
+  /// Specify how the target extends the result of integer and floating point
+  /// boolean values from i1 to a wider type.  See getBooleanContents.
+  void setBooleanContents(BooleanContent Ty) {
+    BooleanContents = Ty;
+    BooleanFloatContents = Ty;
+  }
+
+  /// Specify how the target extends the result of integer and floating point
+  /// boolean values from i1 to a wider type.  See getBooleanContents.
+  void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
+    BooleanContents = IntTy;
+    BooleanFloatContents = FloatTy;
+  }
+
+  /// Specify how the target extends the result of a vector boolean value from a
+  /// vector of i1 to a wider type.  See getBooleanContents.
+  void setBooleanVectorContents(BooleanContent Ty) {
+    BooleanVectorContents = Ty;
+  }
+
+  /// Specify the target scheduling preference.
+  void setSchedulingPreference(Sched::Preference Pref) {
+    SchedPreferenceInfo = Pref;
+  }
+
+  /// Indicate whether this target prefers to use _setjmp to implement
+  /// llvm.setjmp or the version without _.  Defaults to false.
+  void setUseUnderscoreSetJmp(bool Val) {
+    UseUnderscoreSetJmp = Val;
+  }
+
+  /// Indicate whether this target prefers to use _longjmp to implement
+  /// llvm.longjmp or the version without _.  Defaults to false.
+  void setUseUnderscoreLongJmp(bool Val) {
+    UseUnderscoreLongJmp = Val;
+  }
+
+  /// Indicate the minimum number of blocks to generate jump tables.
+  void setMinimumJumpTableEntries(unsigned Val);
+
+  /// Indicate the maximum number of entries in jump tables.
+  /// Set to zero to generate unlimited jump tables.
+  void setMaximumJumpTableSize(unsigned);
+
+  /// If set to a physical register, this specifies the register that
+  /// llvm.savestack/llvm.restorestack should save and restore.
+  void setStackPointerRegisterToSaveRestore(unsigned R) {
+    StackPointerRegisterToSaveRestore = R;
+  }
+
+  /// Tells the code generator that the target has multiple (allocatable)
+  /// condition registers that can be used to store the results of comparisons
+  /// for use by selects and conditional branches. With multiple condition
+  /// registers, the code generator will not aggressively sink comparisons into
+  /// the blocks of their users.
+  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
+    HasMultipleConditionRegisters = hasManyRegs;
+  }
+
+  /// Tells the code generator that the target has BitExtract instructions.
+  /// The code generator will aggressively sink "shift"s into the blocks of
+  /// their users if the users will generate "and" instructions which can be
+  /// combined with "shift" to BitExtract instructions.
+  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
+    HasExtractBitsInsn = hasExtractInsn;
+  }
+
+  /// Tells the code generator not to expand logic operations on comparison
+  /// predicates into separate sequences that increase the amount of flow
+  /// control.
+  void setJumpIsExpensive(bool isExpensive = true);
+
+  /// Tells the code generator that this target supports floating point
+  /// exceptions and cares about preserving floating point exception behavior.
+  void setHasFloatingPointExceptions(bool FPExceptions = true) {
+    HasFloatingPointExceptions = FPExceptions;
+  }
+
+  /// Tells the code generator which bitwidths to bypass.
+  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+    BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
+  }
+
+  /// Add the specified register class as an available regclass for the
+  /// specified value type. This indicates the selector can handle values of
+  /// that class natively.
+  void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
+    assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
+    RegClassForVT[VT.SimpleTy] = RC;
+  }
+
+  /// Return the largest legal super-reg register class of the register class
+  /// for the specified type and its associated "cost".
+  virtual std::pair<const TargetRegisterClass *, uint8_t>
+  findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
+
+  /// Once all of the register classes are added, this allows us to compute
+  /// derived properties we expose.
+  void computeRegisterProperties(const TargetRegisterInfo *TRI);
+
+  /// Indicate that the specified operation does not work with the specified
+  /// type and indicate what to do about it. Note that VT may refer to either
+  /// the type of a result or that of an operand of Op.
+  void setOperationAction(unsigned Op, MVT VT,
+                          LegalizeAction Action) {
+    assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
+    OpActions[(unsigned)VT.SimpleTy][Op] = Action;
+  }
+
+  /// Indicate that the specified load with extension does not work with the
+  /// specified type and indicate what to do about it.
+  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
+                        LegalizeAction Action) {
+    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
+           MemVT.isValid() && "Table isn't big enough!");
+    assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
+    unsigned Shift = 4 * ExtType;
+    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
+    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
+  }
+
+  /// Indicate that the specified truncating store does not work with the
+  /// specified type and indicate what to do about it.
+  void setTruncStoreAction(MVT ValVT, MVT MemVT,
+                           LegalizeAction Action) {
+    assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
+    TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
+  }
+
+  /// Indicate that the specified indexed load does or does not work with the
+  /// specified type and indicate what to do abort it.
+  ///
+  /// NOTE: All indexed mode loads are initialized to Expand in
+  /// TargetLowering.cpp
+  void setIndexedLoadAction(unsigned IdxMode, MVT VT,
+                            LegalizeAction Action) {
+    assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
+           (unsigned)Action < 0xf && "Table isn't big enough!");
+    // Load action are kept in the upper half.
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
+  }
+
+  /// Indicate that the specified indexed store does or does not work with the
+  /// specified type and indicate what to do about it.
+  ///
+  /// NOTE: All indexed mode stores are initialized to Expand in
+  /// TargetLowering.cpp
+  void setIndexedStoreAction(unsigned IdxMode, MVT VT,
+                             LegalizeAction Action) {
+    assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
+           (unsigned)Action < 0xf && "Table isn't big enough!");
+    // Store action are kept in the lower half.
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
+  }
+
+  /// Indicate that the specified condition code is or isn't supported on the
+  /// target and indicate what to do about it.
+  void setCondCodeAction(ISD::CondCode CC, MVT VT,
+                         LegalizeAction Action) {
+    assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
+           "Table isn't big enough!");
+    assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
+    /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
+    /// value and the upper 29 bits index into the second dimension of the array
+    /// to select what 32-bit value to use.
+    uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
+    CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
+    CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
+  }
+
+  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
+  /// to trying a larger integer/fp until it can find one that works. If that
+  /// default is insufficient, this method can be used by the target to override
+  /// the default.
+  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
+    PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
+  }
+
+  /// Convenience method to set an operation to Promote and specify the type
+  /// in a single call.
+  void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
+    setOperationAction(Opc, OrigVT, Promote);
+    AddPromotedToType(Opc, OrigVT, DestVT);
+  }
+
+  /// Targets should invoke this method for each target independent node that
+  /// they want to provide a custom DAG combiner for by implementing the
+  /// PerformDAGCombine virtual method.
+  void setTargetDAGCombine(ISD::NodeType NT) {
+    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
+  }
+
+  /// Set the target's required jmp_buf buffer size (in bytes); default is 200
+  void setJumpBufSize(unsigned Size) {
+    JumpBufSize = Size;
+  }
+
+  /// Set the target's required jmp_buf buffer alignment (in bytes); default is
+  /// 0
+  void setJumpBufAlignment(unsigned Align) {
+    JumpBufAlignment = Align;
+  }
+
+  /// Set the target's minimum function alignment (in log2(bytes))
+  void setMinFunctionAlignment(unsigned Align) {
+    MinFunctionAlignment = Align;
+  }
+
+  /// Set the target's preferred function alignment.  This should be set if
+  /// there is a performance benefit to higher-than-minimum alignment (in
+  /// log2(bytes))
+  void setPrefFunctionAlignment(unsigned Align) {
+    PrefFunctionAlignment = Align;
+  }
+
+  /// Set the target's preferred loop alignment. Default alignment is zero, it
+  /// means the target does not care about loop alignment.  The alignment is
+  /// specified in log2(bytes). The target may also override
+  /// getPrefLoopAlignment to provide per-loop values.
+  void setPrefLoopAlignment(unsigned Align) {
+    PrefLoopAlignment = Align;
+  }
+
+  /// Set the minimum stack alignment of an argument (in log2(bytes)).
+  void setMinStackArgumentAlignment(unsigned Align) {
+    MinStackArgumentAlignment = Align;
+  }
+
+  /// Set the maximum atomic operation size supported by the
+  /// backend. Atomic operations greater than this size (as well as
+  /// ones that are not naturally aligned), will be expanded by
+  /// AtomicExpandPass into an __atomic_* library call.
+  void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
+    MaxAtomicSizeInBitsSupported = SizeInBits;
+  }
+
+  /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
+  void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
+    MinCmpXchgSizeInBits = SizeInBits;
+  }
+
+  /// Sets whether unaligned atomic operations are supported.
+  void setSupportsUnalignedAtomics(bool UnalignedSupported) {
+    SupportsUnalignedAtomics = UnalignedSupported;
+  }
+
+public:
+  //===--------------------------------------------------------------------===//
+  // Addressing mode description hooks (used by LSR etc).
+  //
+
+  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
+  /// instructions reading the address. This allows as much computation as
+  /// possible to be done in the address mode for that operand. This hook lets
+  /// targets also pass back when this should be done on intrinsics which
+  /// load/store.
+  virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
+                                    SmallVectorImpl<Value*> &/*Ops*/,
+                                    Type *&/*AccessTy*/) const {
+    return false;
+  }
+
+  /// This represents an addressing mode of:
+  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+  /// If BaseGV is null,  there is no BaseGV.
+  /// If BaseOffs is zero, there is no base offset.
+  /// If HasBaseReg is false, there is no base register.
+  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
+  /// no scale.
+  struct AddrMode {
+    GlobalValue *BaseGV = nullptr;
+    int64_t      BaseOffs = 0;
+    bool         HasBaseReg = false;
+    int64_t      Scale = 0;
+    AddrMode() = default;
+  };
+
+  /// Return true if the addressing mode represented by AM is legal for this
+  /// target, for a load/store of the specified type.
+  ///
+  /// The type may be VoidTy, in which case only return true if the addressing
+  /// mode is legal for a load/store of any legal type.  TODO: Handle
+  /// pre/postinc as well.
+  ///
+  /// If the address space cannot be determined, it will be -1.
+  ///
+  /// TODO: Remove default argument
+  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
+                                     Type *Ty, unsigned AddrSpace,
+                                     Instruction *I = nullptr) const;
+
+  /// \brief Return the cost of the scaling factor used in the addressing mode
+  /// represented by AM for this target, for a load/store of the specified type.
+  ///
+  /// If the AM is supported, the return value must be >= 0.
+  /// If the AM is not supported, it returns a negative value.
+  /// TODO: Handle pre/postinc as well.
+  /// TODO: Remove default argument
+  virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
+                                   Type *Ty, unsigned AS = 0) const {
+    // Default: assume that any scaling factor used in a legal AM is free.
+    if (isLegalAddressingMode(DL, AM, Ty, AS))
+      return 0;
+    return -1;
+  }
+
+  /// Return true if the specified immediate is legal icmp immediate, that is
+  /// the target has icmp instructions which can compare a register against the
+  /// immediate without having to materialize the immediate into a register.
+  virtual bool isLegalICmpImmediate(int64_t) const {
+    return true;
+  }
+
+  /// Return true if the specified immediate is legal add immediate, that is the
+  /// target has add instructions which can add a register with the immediate
+  /// without having to materialize the immediate into a register.
+  virtual bool isLegalAddImmediate(int64_t) const {
+    return true;
+  }
+
+  /// Return true if it's significantly cheaper to shift a vector by a uniform
+  /// scalar than by an amount which will vary across each lane. On x86, for
+  /// example, there is a "psllw" instruction for the former case, but no simple
+  /// instruction for a general "a << b" operation on vectors.
+  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
+    return false;
+  }
+
+  /// Returns true if the opcode is a commutative binary operation.
+  virtual bool isCommutativeBinOp(unsigned Opcode) const {
+    // FIXME: This should get its info from the td file.
+    switch (Opcode) {
+    case ISD::ADD:
+    case ISD::SMIN:
+    case ISD::SMAX:
+    case ISD::UMIN:
+    case ISD::UMAX:
+    case ISD::MUL:
+    case ISD::MULHU:
+    case ISD::MULHS:
+    case ISD::SMUL_LOHI:
+    case ISD::UMUL_LOHI:
+    case ISD::FADD:
+    case ISD::FMUL:
+    case ISD::AND:
+    case ISD::OR:
+    case ISD::XOR:
+    case ISD::SADDO:
+    case ISD::UADDO:
+    case ISD::ADDC:
+    case ISD::ADDE:
+    case ISD::FMINNUM:
+    case ISD::FMAXNUM:
+    case ISD::FMINNAN:
+    case ISD::FMAXNAN:
+      return true;
+    default: return false;
+    }
+  }
+
+  /// Return true if it's free to truncate a value of type FromTy to type
+  /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
+  /// by referencing its sub-register AX.
+  /// Targets must return false when FromTy <= ToTy.
+  virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
+    return false;
+  }
+
+  /// Return true if a truncation from FromTy to ToTy is permitted when deciding
+  /// whether a call is in tail position. Typically this means that both results
+  /// would be assigned to the same register or stack slot, but it could mean
+  /// the target performs adequate checks of its own before proceeding with the
+  /// tail call.  Targets must return false when FromTy <= ToTy.
+  virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
+    return false;
+  }
+
+  virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
+    return false;
+  }
+
+  virtual bool isProfitableToHoist(Instruction *I) const { return true; }
+
+  /// Return true if the extension represented by \p I is free.
+  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
+  /// this method can use the context provided by \p I to decide
+  /// whether or not \p I is free.
+  /// This method extends the behavior of the is[Z|FP]ExtFree family.
+  /// In other words, if is[Z|FP]Free returns true, then this method
+  /// returns true as well. The converse is not true.
+  /// The target can perform the adequate checks by overriding isExtFreeImpl.
+  /// \pre \p I must be a sign, zero, or fp extension.
+  bool isExtFree(const Instruction *I) const {
+    switch (I->getOpcode()) {
+    case Instruction::FPExt:
+      if (isFPExtFree(EVT::getEVT(I->getType()),
+                      EVT::getEVT(I->getOperand(0)->getType())))
+        return true;
+      break;
+    case Instruction::ZExt:
+      if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
+        return true;
+      break;
+    case Instruction::SExt:
+      break;
+    default:
+      llvm_unreachable("Instruction is not an extension");
+    }
+    return isExtFreeImpl(I);
+  }
+
+  /// Return true if \p Load and \p Ext can form an ExtLoad.
+  /// For example, in AArch64
+  ///   %L = load i8, i8* %ptr
+  ///   %E = zext i8 %L to i32
+  /// can be lowered into one load instruction
+  ///   ldrb w0, [x0]
+  bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
+                 const DataLayout &DL) const {
+    EVT VT = getValueType(DL, Ext->getType());
+    EVT LoadVT = getValueType(DL, Load->getType());
+
+    // If the load has other users and the truncate is not free, the ext
+    // probably isn't free.
+    if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
+        !isTruncateFree(Ext->getType(), Load->getType()))
+      return false;
+
+    // Check whether the target supports casts folded into loads.
+    unsigned LType;
+    if (isa<ZExtInst>(Ext))
+      LType = ISD::ZEXTLOAD;
+    else {
+      assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
+      LType = ISD::SEXTLOAD;
+    }
+
+    return isLoadExtLegal(LType, VT, LoadVT);
+  }
+
+  /// Return true if any actual instruction that defines a value of type FromTy
+  /// implicitly zero-extends the value to ToTy in the result register.
+  ///
+  /// The function should return true when it is likely that the truncate can
+  /// be freely folded with an instruction defining a value of FromTy. If
+  /// the defining instruction is unknown (because you're looking at a
+  /// function argument, PHI, etc.) then the target may require an
+  /// explicit truncate, which is not necessarily free, but this function
+  /// does not deal with those cases.
+  /// Targets must return false when FromTy >= ToTy.
+  virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
+    return false;
+  }
+
+  virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
+    return false;
+  }
+
+  /// Return true if the target supplies and combines to a paired load
+  /// two loaded values of type LoadedType next to each other in memory.
+  /// RequiredAlignment gives the minimal alignment constraints that must be met
+  /// to be able to select this paired load.
+  ///
+  /// This information is *not* used to generate actual paired loads, but it is
+  /// used to generate a sequence of loads that is easier to combine into a
+  /// paired load.
+  /// For instance, something like this:
+  /// a = load i64* addr
+  /// b = trunc i64 a to i32
+  /// c = lshr i64 a, 32
+  /// d = trunc i64 c to i32
+  /// will be optimized into:
+  /// b = load i32* addr1
+  /// d = load i32* addr2
+  /// Where addr1 = addr2 +/- sizeof(i32).
+  ///
+  /// In other words, unless the target performs a post-isel load combining,
+  /// this information should not be provided because it will generate more
+  /// loads.
+  virtual bool hasPairedLoad(EVT /*LoadedType*/,
+                             unsigned & /*RequiredAlignment*/) const {
+    return false;
+  }
+
+  /// Return true if the target has a vector blend instruction.
+  virtual bool hasVectorBlend() const { return false; }
+
+  /// \brief Get the maximum supported factor for interleaved memory accesses.
+  /// Default to be the minimum interleave factor: 2.
+  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
+
+  /// \brief Lower an interleaved load to target specific intrinsics. Return
+  /// true on success.
+  ///
+  /// \p LI is the vector load instruction.
+  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
+  /// \p Indices is the corresponding indices for each shufflevector.
+  /// \p Factor is the interleave factor.
+  virtual bool lowerInterleavedLoad(LoadInst *LI,
+                                    ArrayRef<ShuffleVectorInst *> Shuffles,
+                                    ArrayRef<unsigned> Indices,
+                                    unsigned Factor) const {
+    return false;
+  }
+
+  /// \brief Lower an interleaved store to target specific intrinsics. Return
+  /// true on success.
+  ///
+  /// \p SI is the vector store instruction.
+  /// \p SVI is the shufflevector to RE-interleave the stored vector.
+  /// \p Factor is the interleave factor.
+  virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
+                                     unsigned Factor) const {
+    return false;
+  }
+
+  /// Return true if zero-extending the specific node Val to type VT2 is free
+  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
+  /// because it's folded such as X86 zero-extending loads).
+  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
+    return isZExtFree(Val.getValueType(), VT2);
+  }
+
+  /// Return true if an fpext operation is free (for instance, because
+  /// single-precision floating-point numbers are implicitly extended to
+  /// double-precision).
+  virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
+    assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
+           "invalid fpext types");
+    return false;
+  }
+
+  /// Return true if an fpext operation input to an \p Opcode operation is free
+  /// (for instance, because half-precision floating-point numbers are
+  /// implicitly extended to float-precision) for an FMA instruction.
+  virtual bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const {
+    assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
+           "invalid fpext types");
+    return isFPExtFree(DestVT, SrcVT);
+  }
+
+  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
+  /// extend node) is profitable.
+  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
+
+  /// Return true if an fneg operation is free to the point where it is never
+  /// worthwhile to replace it with a bitwise operation.
+  virtual bool isFNegFree(EVT VT) const {
+    assert(VT.isFloatingPoint());
+    return false;
+  }
+
+  /// Return true if an fabs operation is free to the point where it is never
+  /// worthwhile to replace it with a bitwise operation.
+  virtual bool isFAbsFree(EVT VT) const {
+    assert(VT.isFloatingPoint());
+    return false;
+  }
+
+  /// Return true if an FMA operation is faster than a pair of fmul and fadd
+  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
+  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
+  ///
+  /// NOTE: This may be called before legalization on types for which FMAs are
+  /// not legal, but should return true if those types will eventually legalize
+  /// to types that support FMAs. After legalization, it will only be called on
+  /// types that support FMAs (via Legal or Custom actions)
+  virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
+    return false;
+  }
+
+  /// Return true if it's profitable to narrow operations of type VT1 to
+  /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
+  /// i32 to i16.
+  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
+    return false;
+  }
+
+  /// \brief Return true if it is beneficial to convert a load of a constant to
+  /// just the constant itself.
+  /// On some targets it might be more efficient to use a combination of
+  /// arithmetic instructions to materialize the constant instead of loading it
+  /// from a constant pool.
+  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+                                                 Type *Ty) const {
+    return false;
+  }
+
+  /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
+  /// from this source type with this index. This is needed because
+  /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
+  /// the first element, and only the target knows which lowering is cheap.
+  virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
+                                       unsigned Index) const {
+    return false;
+  }
+
+  // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
+  // even if the vector itself has multiple uses.
+  virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
+    return false;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Runtime Library hooks
+  //
+
+  /// Rename the default libcall routine name for the specified libcall.
+  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
+    LibcallRoutineNames[Call] = Name;
+  }
+
+  /// Get the libcall routine name for the specified libcall.
+  const char *getLibcallName(RTLIB::Libcall Call) const {
+    return LibcallRoutineNames[Call];
+  }
+
+  /// Override the default CondCode to be used to test the result of the
+  /// comparison libcall against zero.
+  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
+    CmpLibcallCCs[Call] = CC;
+  }
+
+  /// Get the CondCode that's to be used to test the result of the comparison
+  /// libcall against zero.
+  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
+    return CmpLibcallCCs[Call];
+  }
+
+  /// Set the CallingConv that should be used for the specified libcall.
+  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
+    LibcallCallingConvs[Call] = CC;
+  }
+
+  /// Get the CallingConv that should be used for the specified libcall.
+  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
+    return LibcallCallingConvs[Call];
+  }
+
+  /// Execute target specific actions to finalize target lowering.
+  /// This is used to set extra flags in MachineFrameInformation and freezing
+  /// the set of reserved registers.
+  /// The default implementation just freezes the set of reserved registers.
+  virtual void finalizeLowering(MachineFunction &MF) const;
+
+private:
+  const TargetMachine &TM;
+
+  /// Tells the code generator that the target has multiple (allocatable)
+  /// condition registers that can be used to store the results of comparisons
+  /// for use by selects and conditional branches. With multiple condition
+  /// registers, the code generator will not aggressively sink comparisons into
+  /// the blocks of their users.
+  bool HasMultipleConditionRegisters;
+
+  /// Tells the code generator that the target has BitExtract instructions.
+  /// The code generator will aggressively sink "shift"s into the blocks of
+  /// their users if the users will generate "and" instructions which can be
+  /// combined with "shift" to BitExtract instructions.
+  bool HasExtractBitsInsn;
+
+  /// Tells the code generator to bypass slow divide or remainder
+  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
+  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
+  /// div/rem when the operands are positive and less than 256.
+  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
+
+  /// Tells the code generator that it shouldn't generate extra flow control
+  /// instructions and should attempt to combine flow control instructions via
+  /// predication.
+  bool JumpIsExpensive;
+
+  /// Whether the target supports or cares about preserving floating point
+  /// exception behavior.
+  bool HasFloatingPointExceptions;
+
+  /// This target prefers to use _setjmp to implement llvm.setjmp.
+  ///
+  /// Defaults to false.
+  bool UseUnderscoreSetJmp;
+
+  /// This target prefers to use _longjmp to implement llvm.longjmp.
+  ///
+  /// Defaults to false.
+  bool UseUnderscoreLongJmp;
+
+  /// Information about the contents of the high-bits in boolean values held in
+  /// a type wider than i1. See getBooleanContents.
+  BooleanContent BooleanContents;
+
+  /// Information about the contents of the high-bits in boolean values held in
+  /// a type wider than i1. See getBooleanContents.
+  BooleanContent BooleanFloatContents;
+
+  /// Information about the contents of the high-bits in boolean vector values
+  /// when the element type is wider than i1. See getBooleanContents.
+  BooleanContent BooleanVectorContents;
+
+  /// The target scheduling preference: shortest possible total cycles or lowest
+  /// register usage.
+  Sched::Preference SchedPreferenceInfo;
+
+  /// The size, in bytes, of the target's jmp_buf buffers
+  unsigned JumpBufSize;
+
+  /// The alignment, in bytes, of the target's jmp_buf buffers
+  unsigned JumpBufAlignment;
+
+  /// The minimum alignment that any argument on the stack needs to have.
+  unsigned MinStackArgumentAlignment;
+
+  /// The minimum function alignment (used when optimizing for size, and to
+  /// prevent explicitly provided alignment from leading to incorrect code).
+  unsigned MinFunctionAlignment;
+
+  /// The preferred function alignment (used when alignment unspecified and
+  /// optimizing for speed).
+  unsigned PrefFunctionAlignment;
+
+  /// The preferred loop alignment.
+  unsigned PrefLoopAlignment;
+
+  /// Size in bits of the maximum atomics size the backend supports.
+  /// Accesses larger than this will be expanded by AtomicExpandPass.
+  unsigned MaxAtomicSizeInBitsSupported;
+
+  /// Size in bits of the minimum cmpxchg or ll/sc operation the
+  /// backend supports.
+  unsigned MinCmpXchgSizeInBits;
+
+  /// This indicates if the target supports unaligned atomic operations.
+  bool SupportsUnalignedAtomics;
+
+  /// If set to a physical register, this specifies the register that
+  /// llvm.savestack/llvm.restorestack should save and restore.
+  unsigned StackPointerRegisterToSaveRestore;
+
+  /// This indicates the default register class to use for each ValueType the
+  /// target supports natively.
+  const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
+  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
+  MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
+
+  /// This indicates the "representative" register class to use for each
+  /// ValueType the target supports natively. This information is used by the
+  /// scheduler to track register pressure. By default, the representative
+  /// register class is the largest legal super-reg register class of the
+  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
+  /// representative class would be GR32.
+  const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
+
+  /// This indicates the "cost" of the "representative" register class for each
+  /// ValueType. The cost is used by the scheduler to approximate register
+  /// pressure.
+  uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
+
+  /// For any value types we are promoting or expanding, this contains the value
+  /// type that we are changing to.  For Expanded types, this contains one step
+  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
+  /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
+  /// the same type (e.g. i32 -> i32).
+  MVT TransformToType[MVT::LAST_VALUETYPE];
+
+  /// For each operation and each value type, keep a LegalizeAction that
+  /// indicates how instruction selection should deal with the operation.  Most
+  /// operations are Legal (aka, supported natively by the target), but
+  /// operations that are not should be described.  Note that operations on
+  /// non-legal value types are not described here.
+  LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
+
+  /// For each load extension type and each value type, keep a LegalizeAction
+  /// that indicates how instruction selection should deal with a load of a
+  /// specific value type and extension type. Uses 4-bits to store the action
+  /// for each of the 4 load ext types.
+  uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
+
+  /// For each value type pair keep a LegalizeAction that indicates whether a
+  /// truncating store of a specific value type and truncating type is legal.
+  LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
+
+  /// For each indexed mode and each value type, keep a pair of LegalizeAction
+  /// that indicates how instruction selection should deal with the load /
+  /// store.
+  ///
+  /// The first dimension is the value_type for the reference. The second
+  /// dimension represents the various modes for load store.
+  uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
+
+  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
+  /// indicates how instruction selection should deal with the condition code.
+  ///
+  /// Because each CC action takes up 4 bits, we need to have the array size be
+  /// large enough to fit all of the value types. This can be done by rounding
+  /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
+  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
+
+protected:
+  ValueTypeActionImpl ValueTypeActions;
+
+private:
+  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
+
+  /// Targets can specify ISD nodes that they would like PerformDAGCombine
+  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
+  /// array.
+  unsigned char
+  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
+
+  /// For operations that must be promoted to a specific type, this holds the
+  /// destination type.  This map should be sparse, so don't hold it as an
+  /// array.
+  ///
+  /// Targets add entries to this map with AddPromotedToType(..), clients access
+  /// this with getTypeToPromoteTo(..).
+  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
+    PromoteToType;
+
+  /// Stores the name each libcall.
+  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
+
+  /// The ISD::CondCode that should be used to test the result of each of the
+  /// comparison libcall against zero.
+  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
+
+  /// Stores the CallingConv that should be used for each libcall.
+  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
+
+  /// Set default libcall names and calling conventions.
+  void InitLibcalls(const Triple &TT);
+
+protected:
+  /// Return true if the extension represented by \p I is free.
+  /// \pre \p I is a sign, zero, or fp extension and
+  ///      is[Z|FP]ExtFree of the related types is not true.
+  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
+
+  /// Depth that GatherAllAliases should should continue looking for chain
+  /// dependencies when trying to find a more preferable chain. As an
+  /// approximation, this should be more than the number of consecutive stores
+  /// expected to be merged.
+  unsigned GatherAllAliasesMaxDepth;
+
+  /// \brief Specify maximum number of store instructions per memset call.
+  ///
+  /// When lowering \@llvm.memset this field specifies the maximum number of
+  /// store operations that may be substituted for the call to memset. Targets
+  /// must set this value based on the cost threshold for that target. Targets
+  /// should assume that the memset will be done using as many of the largest
+  /// store operations first, followed by smaller ones, if necessary, per
+  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
+  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
+  /// store.  This only applies to setting a constant array of a constant size.
+  unsigned MaxStoresPerMemset;
+
+  /// Maximum number of stores operations that may be substituted for the call
+  /// to memset, used for functions with OptSize attribute.
+  unsigned MaxStoresPerMemsetOptSize;
+
+  /// \brief Specify maximum bytes of store instructions per memcpy call.
+  ///
+  /// When lowering \@llvm.memcpy this field specifies the maximum number of
+  /// store operations that may be substituted for a call to memcpy. Targets
+  /// must set this value based on the cost threshold for that target. Targets
+  /// should assume that the memcpy will be done using as many of the largest
+  /// store operations first, followed by smaller ones, if necessary, per
+  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
+  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
+  /// and one 1-byte store. This only applies to copying a constant array of
+  /// constant size.
+  unsigned MaxStoresPerMemcpy;
+
+  /// Maximum number of store operations that may be substituted for a call to
+  /// memcpy, used for functions with OptSize attribute.
+  unsigned MaxStoresPerMemcpyOptSize;
+  unsigned MaxLoadsPerMemcmp;
+  unsigned MaxLoadsPerMemcmpOptSize;
+
+  /// \brief Specify maximum bytes of store instructions per memmove call.
+  ///
+  /// When lowering \@llvm.memmove this field specifies the maximum number of
+  /// store instructions that may be substituted for a call to memmove. Targets
+  /// must set this value based on the cost threshold for that target. Targets
+  /// should assume that the memmove will be done using as many of the largest
+  /// store operations first, followed by smaller ones, if necessary, per
+  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
+  /// with 8-bit alignment would result in nine 1-byte stores.  This only
+  /// applies to copying a constant array of constant size.
+  unsigned MaxStoresPerMemmove;
+
+  /// Maximum number of store instructions that may be substituted for a call to
+  /// memmove, used for functions with OptSize attribute.
+  unsigned MaxStoresPerMemmoveOptSize;
+
+  /// Tells the code generator that select is more expensive than a branch if
+  /// the branch is usually predicted right.
+  bool PredictableSelectIsExpensive;
+
+  /// \see enableExtLdPromotion.
+  bool EnableExtLdPromotion;
+
+  /// Return true if the value types that can be represented by the specified
+  /// register class are all legal.
+  bool isLegalRC(const TargetRegisterInfo &TRI,
+                 const TargetRegisterClass &RC) const;
+
+  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
+  /// sequence of memory operands that is recognized by PrologEpilogInserter.
+  MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
+                                    MachineBasicBlock *MBB) const;
+
+  /// Replace/modify the XRay custom event operands with target-dependent
+  /// details.
+  MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
+                                         MachineBasicBlock *MBB) const;
+};
+
+/// This class defines information used to lower LLVM code to legal SelectionDAG
+/// operators that the target instruction selector can accept natively.
+///
+/// This class also defines callbacks that targets must implement to lower
+/// target-specific constructs to SelectionDAG operators.
+class TargetLowering : public TargetLoweringBase {
+public:
+  struct DAGCombinerInfo;
+
+  TargetLowering(const TargetLowering &) = delete;
+  TargetLowering &operator=(const TargetLowering &) = delete;
+
+  /// NOTE: The TargetMachine owns TLOF.
+  explicit TargetLowering(const TargetMachine &TM);
+
+  bool isPositionIndependent() const;
+
+  virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
+                                          FunctionLoweringInfo *FLI,
+                                          DivergenceAnalysis *DA) const {
+    return false;
+  }
+
+  virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
+    return false;
+  }
+
+  /// Returns true by value, base pointer and offset pointer and addressing mode
+  /// by reference if the node's address can be legally represented as
+  /// pre-indexed load / store address.
+  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
+                                         SDValue &/*Offset*/,
+                                         ISD::MemIndexedMode &/*AM*/,
+                                         SelectionDAG &/*DAG*/) const {
+    return false;
+  }
+
+  /// Returns true by value, base pointer and offset pointer and addressing mode
+  /// by reference if this node can be combined with a load / store to form a
+  /// post-indexed load / store.
+  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
+                                          SDValue &/*Base*/,
+                                          SDValue &/*Offset*/,
+                                          ISD::MemIndexedMode &/*AM*/,
+                                          SelectionDAG &/*DAG*/) const {
+    return false;
+  }
+
+  /// Return the entry encoding for a jump table in the current function.  The
+  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
+  virtual unsigned getJumpTableEncoding() const;
+
+  virtual const MCExpr *
+  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
+                            const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
+                            MCContext &/*Ctx*/) const {
+    llvm_unreachable("Need to implement this hook if target has custom JTIs");
+  }
+
+  /// Returns relocation base for the given PIC jumptable.
+  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
+                                           SelectionDAG &DAG) const;
+
+  /// This returns the relocation base for the given PIC jumptable, the same as
+  /// getPICJumpTableRelocBase, but as an MCExpr.
+  virtual const MCExpr *
+  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
+                               unsigned JTI, MCContext &Ctx) const;
+
+  /// Return true if folding a constant offset with the given GlobalAddress is
+  /// legal.  It is frequently not legal in PIC relocation models.
+  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+
+  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
+                            SDValue &Chain) const;
+
+  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
+                           SDValue &NewRHS, ISD::CondCode &CCCode,
+                           const SDLoc &DL) const;
+
+  /// Returns a pair of (return value, chain).
+  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
+  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
+                                          EVT RetVT, ArrayRef<SDValue> Ops,
+                                          bool isSigned, const SDLoc &dl,
+                                          bool doesNotReturn = false,
+                                          bool isReturnValueUsed = true) const;
+
+  /// Check whether parameters to a call that are passed in callee saved
+  /// registers are the same as from the calling function.  This needs to be
+  /// checked for tail call eligibility.
+  bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
+      const uint32_t *CallerPreservedMask,
+      const SmallVectorImpl<CCValAssign> &ArgLocs,
+      const SmallVectorImpl<SDValue> &OutVals) const;
+
+  //===--------------------------------------------------------------------===//
+  // TargetLowering Optimization Methods
+  //
+
+  /// A convenience struct that encapsulates a DAG, and two SDValues for
+  /// returning information from TargetLowering to its clients that want to
+  /// combine.
+  struct TargetLoweringOpt {
+    SelectionDAG &DAG;
+    bool LegalTys;
+    bool LegalOps;
+    SDValue Old;
+    SDValue New;
+
+    explicit TargetLoweringOpt(SelectionDAG &InDAG,
+                               bool LT, bool LO) :
+      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
+
+    bool LegalTypes() const { return LegalTys; }
+    bool LegalOperations() const { return LegalOps; }
+
+    bool CombineTo(SDValue O, SDValue N) {
+      Old = O;
+      New = N;
+      return true;
+    }
+  };
+
+  /// Check to see if the specified operand of the specified instruction is a
+  /// constant integer.  If so, check to see if there are any bits set in the
+  /// constant that are not demanded.  If so, shrink the constant and return
+  /// true.
+  bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
+                              TargetLoweringOpt &TLO) const;
+
+  // Target hook to do target-specific const optimization, which is called by
+  // ShrinkDemandedConstant. This function should return true if the target
+  // doesn't want ShrinkDemandedConstant to further optimize the constant.
+  virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
+                                            TargetLoweringOpt &TLO) const {
+    return false;
+  }
+
+  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
+  /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
+  /// generalized for targets with other types of implicit widening casts.
+  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
+                        TargetLoweringOpt &TLO) const;
+
+  /// Helper for SimplifyDemandedBits that can simplify an operation with
+  /// multiple uses.  This function simplifies operand \p OpIdx of \p User and
+  /// then updates \p User with the simplified version. No other uses of
+  /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
+  /// function behaves exactly like function SimplifyDemandedBits declared
+  /// below except that it also updates the DAG by calling
+  /// DCI.CommitTargetLoweringOpt.
+  bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
+                            DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
+
+  /// Look at Op.  At this point, we know that only the DemandedMask bits of the
+  /// result of Op are ever used downstream.  If we can use this information to
+  /// simplify Op, create a new simplified DAG node and return true, returning
+  /// the original and new nodes in Old and New.  Otherwise, analyze the
+  /// expression and return a mask of KnownOne and KnownZero bits for the
+  /// expression (used to simplify the caller).  The KnownZero/One bits may only
+  /// be accurate for those bits in the DemandedMask.
+  /// \p AssumeSingleUse When this parameter is true, this function will
+  ///    attempt to simplify \p Op even if there are multiple uses.
+  ///    Callers are responsible for correctly updating the DAG based on the
+  ///    results of this function, because simply replacing replacing TLO.Old
+  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
+  ///    has multiple uses.
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+                            KnownBits &Known,
+                            TargetLoweringOpt &TLO,
+                            unsigned Depth = 0,
+                            bool AssumeSingleUse = false) const;
+
+  /// Helper wrapper around SimplifyDemandedBits
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+                            DAGCombinerInfo &DCI) const;
+
+  /// Look at Vector Op. At this point, we know that only the DemandedElts
+  /// elements of the result of Op are ever used downstream.  If we can use
+  /// this information to simplify Op, create a new simplified DAG node and
+  /// return true, storing the original and new nodes in TLO.
+  /// Otherwise, analyze the expression and return a mask of KnownUndef and
+  /// KnownZero elements for the expression (used to simplify the caller).
+  /// The KnownUndef/Zero elements may only be accurate for those bits
+  /// in the DemandedMask.
+  /// \p AssumeSingleUse When this parameter is true, this function will
+  ///    attempt to simplify \p Op even if there are multiple uses.
+  ///    Callers are responsible for correctly updating the DAG based on the
+  ///    results of this function, because simply replacing replacing TLO.Old
+  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
+  ///    has multiple uses.
+  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
+                                  APInt &KnownUndef, APInt &KnownZero,
+                                  TargetLoweringOpt &TLO, unsigned Depth = 0,
+                                  bool AssumeSingleUse = false) const;
+
+  /// Helper wrapper around SimplifyDemandedVectorElts
+  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
+                                  APInt &KnownUndef, APInt &KnownZero,
+                                  DAGCombinerInfo &DCI) const;
+
+  /// Determine which of the bits specified in Mask are known to be either zero
+  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
+  /// argument allows us to only collect the known bits that are shared by the
+  /// requested vector elements.
+  virtual void computeKnownBitsForTargetNode(const SDValue Op,
+                                             KnownBits &Known,
+                                             const APInt &DemandedElts,
+                                             const SelectionDAG &DAG,
+                                             unsigned Depth = 0) const;
+
+  /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
+  /// Default implementation computes low bits based on alignment
+  /// information. This should preserve known bits passed into it.
+  virtual void computeKnownBitsForFrameIndex(const SDValue FIOp,
+                                             KnownBits &Known,
+                                             const APInt &DemandedElts,
+                                             const SelectionDAG &DAG,
+                                             unsigned Depth = 0) const;
+
+  /// This method can be implemented by targets that want to expose additional
+  /// information about sign bits to the DAG Combiner. The DemandedElts
+  /// argument allows us to only collect the minimum sign bits that are shared
+  /// by the requested vector elements.
+  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+                                                   const APInt &DemandedElts,
+                                                   const SelectionDAG &DAG,
+                                                   unsigned Depth = 0) const;
+
+  /// Attempt to simplify any target nodes based on the demanded vector
+  /// elements, returning true on success. Otherwise, analyze the expression and
+  /// return a mask of KnownUndef and KnownZero elements for the expression
+  /// (used to simplify the caller). The KnownUndef/Zero elements may only be
+  /// accurate for those bits in the DemandedMask
+  virtual bool SimplifyDemandedVectorEltsForTargetNode(
+      SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
+      APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
+
+  struct DAGCombinerInfo {
+    void *DC;  // The DAG Combiner object.
+    CombineLevel Level;
+    bool CalledByLegalizer;
+
+  public:
+    SelectionDAG &DAG;
+
+    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
+      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
+
+    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
+    bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
+    bool isAfterLegalizeDAG() const {
+      return Level == AfterLegalizeDAG;
+    }
+    CombineLevel getDAGCombineLevel() { return Level; }
+    bool isCalledByLegalizer() const { return CalledByLegalizer; }
+
+    void AddToWorklist(SDNode *N);
+    SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
+    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
+    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
+
+    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
+  };
+
+  /// Return if the N is a constant or constant vector equal to the true value
+  /// from getBooleanContents().
+  bool isConstTrueVal(const SDNode *N) const;
+
+  /// Return if the N is a constant or constant vector equal to the false value
+  /// from getBooleanContents().
+  bool isConstFalseVal(const SDNode *N) const;
+
+  /// Return if \p N is a True value when extended to \p VT.
+  bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
+
+  /// Try to simplify a setcc built with the specified operands and cc. If it is
+  /// unable to simplify it, return a null SDValue.
+  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
+                        bool foldBooleans, DAGCombinerInfo &DCI,
+                        const SDLoc &dl) const;
+
+  // For targets which wrap address, unwrap for analysis.
+  virtual SDValue unwrapAddress(SDValue N) const { return N; }
+
+  /// Returns true (and the GlobalValue and the offset) if the node is a
+  /// GlobalAddress + offset.
+  virtual bool
+  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
+
+  /// This method will be invoked for all target nodes and for any
+  /// target-independent nodes that the target has registered with invoke it
+  /// for.
+  ///
+  /// The semantics are as follows:
+  /// Return Value:
+  ///   SDValue.Val == 0   - No change was made
+  ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
+  ///   otherwise          - N should be replaced by the returned Operand.
+  ///
+  /// In addition, methods provided by DAGCombinerInfo may be used to perform
+  /// more complex transformations.
+  ///
+  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
+  /// Return true if it is profitable to move a following shift through this
+  //  node, adjusting any immediate operands as necessary to preserve semantics.
+  //  This transformation may not be desirable if it disrupts a particularly
+  //  auspicious target-specific tree (e.g. bitfield extraction in AArch64).
+  //  By default, it returns true.
+  virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
+    return true;
+  }
+
+  // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
+  // to a shuffle and a truncate.
+  // Example of such a combine:
+  // v4i32 build_vector((extract_elt V, 1),
+  //                    (extract_elt V, 3),
+  //                    (extract_elt V, 5),
+  //                    (extract_elt V, 7))
+  //  -->
+  // v4i32 truncate (bitcast (shuffle<1,u,3,u,5,u,7,u> V, u) to v4i64)
+  virtual bool isDesirableToCombineBuildVectorToShuffleTruncate(
+      ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
+    return false;
+  }
+
+  /// Return true if the target has native support for the specified value type
+  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
+  /// i16 is legal, but undesirable since i16 instruction encodings are longer
+  /// and some i16 instructions are slow.
+  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
+    // By default, assume all legal types are desirable.
+    return isTypeLegal(VT);
+  }
+
+  /// Return true if it is profitable for dag combiner to transform a floating
+  /// point op of specified opcode to a equivalent op of an integer
+  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
+  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
+                                                 EVT /*VT*/) const {
+    return false;
+  }
+
+  /// This method query the target whether it is beneficial for dag combiner to
+  /// promote the specified node. If true, it should return the desired
+  /// promotion type by reference.
+  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
+    return false;
+  }
+
+  /// Return true if the target supports swifterror attribute. It optimizes
+  /// loads and stores to reading and writing a specific register.
+  virtual bool supportSwiftError() const {
+    return false;
+  }
+
+  /// Return true if the target supports that a subset of CSRs for the given
+  /// machine function is handled explicitly via copies.
+  virtual bool supportSplitCSR(MachineFunction *MF) const {
+    return false;
+  }
+
+  /// Perform necessary initialization to handle a subset of CSRs explicitly
+  /// via copies. This function is called at the beginning of instruction
+  /// selection.
+  virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// Insert explicit copies in entry and exit blocks. We copy a subset of
+  /// CSRs to virtual registers in the entry block, and copy them back to
+  /// physical registers in the exit blocks. This function is called at the end
+  /// of instruction selection.
+  virtual void insertCopiesSplitCSR(
+      MachineBasicBlock *Entry,
+      const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Lowering methods - These methods must be implemented by targets so that
+  // the SelectionDAGBuilder code knows how to lower these.
+  //
+
+  /// This hook must be implemented to lower the incoming (formal) arguments,
+  /// described by the Ins array, into the specified DAG. The implementation
+  /// should fill in the InVals array with legal-type argument values, and
+  /// return the resulting token chain value.
+  virtual SDValue LowerFormalArguments(
+      SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
+      const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
+      SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// This structure contains all information that is necessary for lowering
+  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
+  /// needs to lower a call, and targets will see this struct in their LowerCall
+  /// implementation.
+  struct CallLoweringInfo {
+    SDValue Chain;
+    Type *RetTy = nullptr;
+    bool RetSExt           : 1;
+    bool RetZExt           : 1;
+    bool IsVarArg          : 1;
+    bool IsInReg           : 1;
+    bool DoesNotReturn     : 1;
+    bool IsReturnValueUsed : 1;
+    bool IsConvergent      : 1;
+    bool IsPatchPoint      : 1;
+
+    // IsTailCall should be modified by implementations of
+    // TargetLowering::LowerCall that perform tail call conversions.
+    bool IsTailCall = false;
+
+    // Is Call lowering done post SelectionDAG type legalization.
+    bool IsPostTypeLegalization = false;
+
+    unsigned NumFixedArgs = -1;
+    CallingConv::ID CallConv = CallingConv::C;
+    SDValue Callee;
+    ArgListTy Args;
+    SelectionDAG &DAG;
+    SDLoc DL;
+    ImmutableCallSite CS;
+    SmallVector<ISD::OutputArg, 32> Outs;
+    SmallVector<SDValue, 32> OutVals;
+    SmallVector<ISD::InputArg, 32> Ins;
+    SmallVector<SDValue, 4> InVals;
+
+    CallLoweringInfo(SelectionDAG &DAG)
+        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
+          DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
+          IsPatchPoint(false), DAG(DAG) {}
+
+    CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
+      DL = dl;
+      return *this;
+    }
+
+    CallLoweringInfo &setChain(SDValue InChain) {
+      Chain = InChain;
+      return *this;
+    }
+
+    // setCallee with target/module-specific attributes
+    CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
+                                   SDValue Target, ArgListTy &&ArgsList) {
+      RetTy = ResultType;
+      Callee = Target;
+      CallConv = CC;
+      NumFixedArgs = ArgsList.size();
+      Args = std::move(ArgsList);
+
+      DAG.getTargetLoweringInfo().markLibCallAttributes(
+          &(DAG.getMachineFunction()), CC, Args);
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
+                                SDValue Target, ArgListTy &&ArgsList) {
+      RetTy = ResultType;
+      Callee = Target;
+      CallConv = CC;
+      NumFixedArgs = ArgsList.size();
+      Args = std::move(ArgsList);
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
+                                SDValue Target, ArgListTy &&ArgsList,
+                                ImmutableCallSite Call) {
+      RetTy = ResultType;
+
+      IsInReg = Call.hasRetAttr(Attribute::InReg);
+      DoesNotReturn =
+          Call.doesNotReturn() ||
+          (!Call.isInvoke() &&
+           isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
+      IsVarArg = FTy->isVarArg();
+      IsReturnValueUsed = !Call.getInstruction()->use_empty();
+      RetSExt = Call.hasRetAttr(Attribute::SExt);
+      RetZExt = Call.hasRetAttr(Attribute::ZExt);
+
+      Callee = Target;
+
+      CallConv = Call.getCallingConv();
+      NumFixedArgs = FTy->getNumParams();
+      Args = std::move(ArgsList);
+
+      CS = Call;
+
+      return *this;
+    }
+
+    CallLoweringInfo &setInRegister(bool Value = true) {
+      IsInReg = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setNoReturn(bool Value = true) {
+      DoesNotReturn = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setVarArg(bool Value = true) {
+      IsVarArg = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setTailCall(bool Value = true) {
+      IsTailCall = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setDiscardResult(bool Value = true) {
+      IsReturnValueUsed = !Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setConvergent(bool Value = true) {
+      IsConvergent = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setSExtResult(bool Value = true) {
+      RetSExt = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setZExtResult(bool Value = true) {
+      RetZExt = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
+      IsPatchPoint = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
+      IsPostTypeLegalization = Value;
+      return *this;
+    }
+
+    ArgListTy &getArgs() {
+      return Args;
+    }
+  };
+
+  /// This function lowers an abstract call to a function into an actual call.
+  /// This returns a pair of operands.  The first element is the return value
+  /// for the function (if RetTy is not VoidTy).  The second element is the
+  /// outgoing token chain. It calls LowerCall to do the actual lowering.
+  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
+
+  /// This hook must be implemented to lower calls into the specified
+  /// DAG. The outgoing arguments to the call are described by the Outs array,
+  /// and the values to be returned by the call are described by the Ins
+  /// array. The implementation should fill in the InVals array with legal-type
+  /// return values from the call, and return the resulting token chain value.
+  virtual SDValue
+    LowerCall(CallLoweringInfo &/*CLI*/,
+              SmallVectorImpl<SDValue> &/*InVals*/) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// Target-specific cleanup for formal ByVal parameters.
+  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
+
+  /// This hook should be implemented to check whether the return values
+  /// described by the Outs array can fit into the return registers.  If false
+  /// is returned, an sret-demotion is performed.
+  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
+                              MachineFunction &/*MF*/, bool /*isVarArg*/,
+               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
+               LLVMContext &/*Context*/) const
+  {
+    // Return true by default to get preexisting behavior.
+    return true;
+  }
+
+  /// This hook must be implemented to lower outgoing return values, described
+  /// by the Outs array, into the specified DAG. The implementation should
+  /// return the resulting token chain value.
+  virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
+                              bool /*isVarArg*/,
+                              const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
+                              const SmallVectorImpl<SDValue> & /*OutVals*/,
+                              const SDLoc & /*dl*/,
+                              SelectionDAG & /*DAG*/) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// Return true if result of the specified node is used by a return node
+  /// only. It also compute and return the input chain for the tail call.
+  ///
+  /// This is used to determine whether it is possible to codegen a libcall as
+  /// tail call at legalization time.
+  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
+    return false;
+  }
+
+  /// Return true if the target may be able emit the call instruction as a tail
+  /// call. This is used by optimization passes to determine if it's profitable
+  /// to duplicate return instructions to enable tailcall optimization.
+  virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
+    return false;
+  }
+
+  /// Return the builtin name for the __builtin___clear_cache intrinsic
+  /// Default is to invoke the clear cache library call
+  virtual const char * getClearCacheBuiltinName() const {
+    return "__clear_cache";
+  }
+
+  /// Return the register ID of the name passed in. Used by named register
+  /// global variables extension. There is no target-independent behaviour
+  /// so the default action is to bail.
+  virtual unsigned getRegisterByName(const char* RegName, EVT VT,
+                                     SelectionDAG &DAG) const {
+    report_fatal_error("Named registers not implemented for this target");
+  }
+
+  /// Return the type that should be used to zero or sign extend a
+  /// zeroext/signext integer return value.  FIXME: Some C calling conventions
+  /// require the return type to be promoted, but this is not true all the time,
+  /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
+  /// conventions. The frontend should handle this and include all of the
+  /// necessary information.
+  virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
+                                       ISD::NodeType /*ExtendKind*/) const {
+    EVT MinVT = getRegisterType(Context, MVT::i32);
+    return VT.bitsLT(MinVT) ? MinVT : VT;
+  }
+
+  /// For some targets, an LLVM struct type must be broken down into multiple
+  /// simple types, but the calling convention specifies that the entire struct
+  /// must be passed in a block of consecutive registers.
+  virtual bool
+  functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
+                                            bool isVarArg) const {
+    return false;
+  }
+
+  /// Returns a 0 terminated array of registers that can be safely used as
+  /// scratch registers.
+  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
+    return nullptr;
+  }
+
+  /// This callback is used to prepare for a volatile or atomic load.
+  /// It takes a chain node as input and returns the chain for the load itself.
+  ///
+  /// Having a callback like this is necessary for targets like SystemZ,
+  /// which allows a CPU to reuse the result of a previous load indefinitely,
+  /// even if a cache-coherent store is performed by another CPU.  The default
+  /// implementation does nothing.
+  virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
+                                              SelectionDAG &DAG) const {
+    return Chain;
+  }
+
+  /// This callback is used to inspect load/store instructions and add
+  /// target-specific MachineMemOperand flags to them.  The default
+  /// implementation does nothing.
+  virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
+    return MachineMemOperand::MONone;
+  }
+
+  /// This callback is invoked by the type legalizer to legalize nodes with an
+  /// illegal operand type but legal result types.  It replaces the
+  /// LowerOperation callback in the type Legalizer.  The reason we can not do
+  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
+  /// use this callback.
+  ///
+  /// TODO: Consider merging with ReplaceNodeResults.
+  ///
+  /// The target places new result values for the node in Results (their number
+  /// and types must exactly match those of the original return values of
+  /// the node), or leaves Results empty, which indicates that the node is not
+  /// to be custom lowered after all.
+  /// The default implementation calls LowerOperation.
+  virtual void LowerOperationWrapper(SDNode *N,
+                                     SmallVectorImpl<SDValue> &Results,
+                                     SelectionDAG &DAG) const;
+
+  /// This callback is invoked for operations that are unsupported by the
+  /// target, which are registered to use 'custom' lowering, and whose defined
+  /// values are all legal.  If the target has no operations that require custom
+  /// lowering, it need not implement this.  The default implementation of this
+  /// aborts.
+  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+  /// This callback is invoked when a node result type is illegal for the
+  /// target, and the operation was registered to use 'custom' lowering for that
+  /// result type.  The target places new result values for the node in Results
+  /// (their number and types must exactly match those of the original return
+  /// values of the node), or leaves Results empty, which indicates that the
+  /// node is not to be custom lowered after all.
+  ///
+  /// If the target has no operations that require custom lowering, it need not
+  /// implement this.  The default implementation aborts.
+  virtual void ReplaceNodeResults(SDNode * /*N*/,
+                                  SmallVectorImpl<SDValue> &/*Results*/,
+                                  SelectionDAG &/*DAG*/) const {
+    llvm_unreachable("ReplaceNodeResults not implemented for this target!");
+  }
+
+  /// This method returns the name of a target specific DAG node.
+  virtual const char *getTargetNodeName(unsigned Opcode) const;
+
+  /// This method returns a target specific FastISel object, or null if the
+  /// target does not support "fast" ISel.
+  virtual FastISel *createFastISel(FunctionLoweringInfo &,
+                                   const TargetLibraryInfo *) const {
+    return nullptr;
+  }
+
+  bool verifyReturnAddressArgumentIsConstant(SDValue Op,
+                                             SelectionDAG &DAG) const;
+
+  //===--------------------------------------------------------------------===//
+  // Inline Asm Support hooks
+  //
+
+  /// This hook allows the target to expand an inline asm call to be explicit
+  /// llvm code if it wants to.  This is useful for turning simple inline asms
+  /// into LLVM intrinsics, which gives the compiler more information about the
+  /// behavior of the code.
+  virtual bool ExpandInlineAsm(CallInst *) const {
+    return false;
+  }
+
+  enum ConstraintType {
+    C_Register,            // Constraint represents specific register(s).
+    C_RegisterClass,       // Constraint represents any of register(s) in class.
+    C_Memory,              // Memory constraint.
+    C_Other,               // Something else.
+    C_Unknown              // Unsupported constraint.
+  };
+
+  enum ConstraintWeight {
+    // Generic weights.
+    CW_Invalid  = -1,     // No match.
+    CW_Okay     = 0,      // Acceptable.
+    CW_Good     = 1,      // Good weight.
+    CW_Better   = 2,      // Better weight.
+    CW_Best     = 3,      // Best weight.
+
+    // Well-known weights.
+    CW_SpecificReg  = CW_Okay,    // Specific register operands.
+    CW_Register     = CW_Good,    // Register operands.
+    CW_Memory       = CW_Better,  // Memory operands.
+    CW_Constant     = CW_Best,    // Constant operand.
+    CW_Default      = CW_Okay     // Default or don't know type.
+  };
+
+  /// This contains information for each constraint that we are lowering.
+  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
+    /// This contains the actual string for the code, like "m".  TargetLowering
+    /// picks the 'best' code from ConstraintInfo::Codes that most closely
+    /// matches the operand.
+    std::string ConstraintCode;
+
+    /// Information about the constraint code, e.g. Register, RegisterClass,
+    /// Memory, Other, Unknown.
+    TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
+
+    /// If this is the result output operand or a clobber, this is null,
+    /// otherwise it is the incoming operand to the CallInst.  This gets
+    /// modified as the asm is processed.
+    Value *CallOperandVal = nullptr;
+
+    /// The ValueType for the operand value.
+    MVT ConstraintVT = MVT::Other;
+
+    /// Copy constructor for copying from a ConstraintInfo.
+    AsmOperandInfo(InlineAsm::ConstraintInfo Info)
+        : InlineAsm::ConstraintInfo(std::move(Info)) {}
+
+    /// Return true of this is an input operand that is a matching constraint
+    /// like "4".
+    bool isMatchingInputConstraint() const;
+
+    /// If this is an input matching constraint, this method returns the output
+    /// operand it matches.
+    unsigned getMatchedOperand() const;
+  };
+
+  using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
+
+  /// Split up the constraint string from the inline assembly value into the
+  /// specific constraints and their prefixes, and also tie in the associated
+  /// operand values.  If this returns an empty vector, and if the constraint
+  /// string itself isn't empty, there was an error parsing.
+  virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
+                                                const TargetRegisterInfo *TRI,
+                                                ImmutableCallSite CS) const;
+
+  /// Examine constraint type and operand type and determine a weight value.
+  /// The operand object must already have been set up with the operand type.
+  virtual ConstraintWeight getMultipleConstraintMatchWeight(
+      AsmOperandInfo &info, int maIndex) const;
+
+  /// Examine constraint string and operand type and determine a weight value.
+  /// The operand object must already have been set up with the operand type.
+  virtual ConstraintWeight getSingleConstraintMatchWeight(
+      AsmOperandInfo &info, const char *constraint) const;
+
+  /// Determines the constraint code and constraint type to use for the specific
+  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
+  /// If the actual operand being passed in is available, it can be passed in as
+  /// Op, otherwise an empty SDValue can be passed.
+  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
+                                      SDValue Op,
+                                      SelectionDAG *DAG = nullptr) const;
+
+  /// Given a constraint, return the type of constraint it is for this target.
+  virtual ConstraintType getConstraintType(StringRef Constraint) const;
+
+  /// Given a physical register constraint (e.g.  {edx}), return the register
+  /// number and the register class for the register.
+  ///
+  /// Given a register class constraint, like 'r', if this corresponds directly
+  /// to an LLVM register class, return a register of 0 and the register class
+  /// pointer.
+  ///
+  /// This should only be used for C_Register constraints.  On error, this
+  /// returns a register number of 0 and a null register class pointer.
+  virtual std::pair<unsigned, const TargetRegisterClass *>
+  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+                               StringRef Constraint, MVT VT) const;
+
+  virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+    if (ConstraintCode == "i")
+      return InlineAsm::Constraint_i;
+    else if (ConstraintCode == "m")
+      return InlineAsm::Constraint_m;
+    return InlineAsm::Constraint_Unknown;
+  }
+
+  /// Try to replace an X constraint, which matches anything, with another that
+  /// has more specific requirements based on the type of the corresponding
+  /// operand.  This returns null if there is no replacement to make.
+  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
+
+  /// Lower the specified operand into the Ops vector.  If it is invalid, don't
+  /// add anything to Ops.
+  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+                                            std::vector<SDValue> &Ops,
+                                            SelectionDAG &DAG) const;
+
+  //===--------------------------------------------------------------------===//
+  // Div utility functions
+  //
+  SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+                    bool IsAfterLegalization,
+                    std::vector<SDNode *> *Created) const;
+  SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+                    bool IsAfterLegalization,
+                    std::vector<SDNode *> *Created) const;
+
+  /// Targets may override this function to provide custom SDIV lowering for
+  /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
+  /// assumes SDIV is expensive and replaces it with a series of other integer
+  /// operations.
+  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
+                                SelectionDAG &DAG,
+                                std::vector<SDNode *> *Created) const;
+
+  /// Indicate whether this target prefers to combine FDIVs with the same
+  /// divisor. If the transform should never be done, return zero. If the
+  /// transform should be done, return the minimum number of divisor uses
+  /// that must exist.
+  virtual unsigned combineRepeatedFPDivisors() const {
+    return 0;
+  }
+
+  /// Hooks for building estimates in place of slower divisions and square
+  /// roots.
+
+  /// Return either a square root or its reciprocal estimate value for the input
+  /// operand.
+  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
+  /// 'Enabled' as set by a potential default override attribute.
+  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
+  /// refinement iterations required to generate a sufficient (though not
+  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
+  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
+  /// algorithm implementation that uses either one or two constants.
+  /// The boolean Reciprocal is used to select whether the estimate is for the
+  /// square root of the input operand or the reciprocal of its square root.
+  /// A target may choose to implement its own refinement within this function.
+  /// If that's true, then return '0' as the number of RefinementSteps to avoid
+  /// any further refinement of the estimate.
+  /// An empty SDValue return means no estimate sequence can be created.
+  virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+                                  int Enabled, int &RefinementSteps,
+                                  bool &UseOneConstNR, bool Reciprocal) const {
+    return SDValue();
+  }
+
+  /// Return a reciprocal estimate value for the input operand.
+  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
+  /// 'Enabled' as set by a potential default override attribute.
+  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
+  /// refinement iterations required to generate a sufficient (though not
+  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
+  /// A target may choose to implement its own refinement within this function.
+  /// If that's true, then return '0' as the number of RefinementSteps to avoid
+  /// any further refinement of the estimate.
+  /// An empty SDValue return means no estimate sequence can be created.
+  virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
+                                   int Enabled, int &RefinementSteps) const {
+    return SDValue();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Legalization utility functions
+  //
+
+  /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
+  /// respectively, each computing an n/2-bit part of the result.
+  /// \param Result A vector that will be filled with the parts of the result
+  ///        in little-endian order.
+  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
+  ///        if you want to control how low bits are extracted from the LHS.
+  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
+  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
+  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
+  /// \returns true if the node has been expanded, false if it has not
+  bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
+                      SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
+                      SelectionDAG &DAG, MulExpansionKind Kind,
+                      SDValue LL = SDValue(), SDValue LH = SDValue(),
+                      SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+
+  /// Expand a MUL into two nodes.  One that computes the high bits of
+  /// the result and one that computes the low bits.
+  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
+  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
+  ///        if you want to control how low bits are extracted from the LHS.
+  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
+  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
+  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
+  /// \returns true if the node has been expanded. false if it has not
+  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
+                 SelectionDAG &DAG, MulExpansionKind Kind,
+                 SDValue LL = SDValue(), SDValue LH = SDValue(),
+                 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+
+  /// Expand float(f32) to SINT(i64) conversion
+  /// \param N Node to expand
+  /// \param Result output after conversion
+  /// \returns True, if the expansion was successful, false otherwise
+  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+  /// Turn load of vector type into a load of the individual elements.
+  /// \param LD load to expand
+  /// \returns MERGE_VALUEs of the scalar loads with their chains.
+  SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
+
+  // Turn a store of a vector type into stores of the individual elements.
+  /// \param ST Store with a vector value type
+  /// \returns MERGE_VALUs of the individual store chains.
+  SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
+
+  /// Expands an unaligned load to 2 half-size loads for an integer, and
+  /// possibly more for vectors.
+  std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
+                                                  SelectionDAG &DAG) const;
+
+  /// Expands an unaligned store to 2 half-size stores for integer values, and
+  /// possibly more for vectors.
+  SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
+
+  /// Increments memory address \p Addr according to the type of the value
+  /// \p DataVT that should be stored. If the data is stored in compressed
+  /// form, the memory address should be incremented according to the number of
+  /// the stored elements. This number is equal to the number of '1's bits
+  /// in the \p Mask.
+  /// \p DataVT is a vector type. \p Mask is a vector value.
+  /// \p DataVT and \p Mask have the same number of vector elements.
+  SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
+                                 EVT DataVT, SelectionDAG &DAG,
+                                 bool IsCompressedMemory) const;
+
+  /// Get a pointer to vector element \p Idx located in memory for a vector of
+  /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
+  /// bounds the returned pointer is unspecified, but will be within the vector
+  /// bounds.
+  SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
+                                  SDValue Idx) const;
+
+  //===--------------------------------------------------------------------===//
+  // Instruction Emitting Hooks
+  //
+
+  /// This method should be implemented by targets that mark instructions with
+  /// the 'usesCustomInserter' flag.  These instructions are special in various
+  /// ways, which require special support to insert.  The specified MachineInstr
+  /// is created but not inserted into any basic blocks, and this method is
+  /// called to expand it into a sequence of instructions, potentially also
+  /// creating new basic blocks and control flow.
+  /// As long as the returned basic block is different (i.e., we created a new
+  /// one), the custom inserter is free to modify the rest of \p MBB.
+  virtual MachineBasicBlock *
+  EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
+
+  /// This method should be implemented by targets that mark instructions with
+  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
+  /// instruction selection by target hooks.  e.g. To fill in optional defs for
+  /// ARM 's' setting instructions.
+  virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
+                                             SDNode *Node) const;
+
+  /// If this function returns true, SelectionDAGBuilder emits a
+  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
+  virtual bool useLoadStackGuardNode() const {
+    return false;
+  }
+
+  virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
+                                      const SDLoc &DL) const {
+    llvm_unreachable("not implemented for this target");
+  }
+
+  /// Lower TLS global address SDNode for target independent emulated TLS model.
+  virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
+                                          SelectionDAG &DAG) const;
+
+  /// Expands target specific indirect branch for the case of JumpTable
+  /// expanasion.
+  virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
+                                         SelectionDAG &DAG) const {
+    return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
+  }
+
+  // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
+  // If we're comparing for equality to zero and isCtlzFast is true, expose the
+  // fact that this can be implemented as a ctlz/srl pair, so that the dag
+  // combiner can fold the new nodes.
+  SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
+
+private:
+  SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
+                               ISD::CondCode Cond, DAGCombinerInfo &DCI,
+                               const SDLoc &DL) const;
+};
+
+/// Given an LLVM IR type and return type attributes, compute the return value
+/// EVTs and flags, and optionally also the offsets, if the return value is
+/// being lowered to memory.
+void GetReturnInfo(Type *ReturnType, AttributeList attr,
+                   SmallVectorImpl<ISD::OutputArg> &Outs,
+                   const TargetLowering &TLI, const DataLayout &DL);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
new file mode 100644
index 0000000..78da77f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -0,0 +1,200 @@
+//==- llvm/CodeGen/TargetLoweringObjectFileImpl.h - Object Info --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes used to handle lowerings specific to common
+// object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
+#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
+namespace llvm {
+
+class GlobalValue;
+class MachineModuleInfo;
+class Mangler;
+class MCContext;
+class MCSection;
+class MCSymbol;
+class TargetMachine;
+
+class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
+  bool UseInitArray = false;
+  mutable unsigned NextUniqueID = 1;  // ID 0 is reserved for execute-only sections
+
+protected:
+  MCSymbolRefExpr::VariantKind PLTRelativeVariantKind =
+      MCSymbolRefExpr::VK_None;
+
+public:
+  TargetLoweringObjectFileELF() = default;
+  ~TargetLoweringObjectFileELF() override = default;
+
+  /// Emit Obj-C garbage collection and linker options.
+  void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                          const TargetMachine &TM) const override;
+
+  void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
+                            const MCSymbol *Sym) const override;
+
+  /// Given a constant with the SectionKind, return a section that it should be
+  /// placed in.
+  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+                                   const Constant *C,
+                                   unsigned &Align) const override;
+
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  MCSection *getSectionForJumpTable(const Function &F,
+                                    const TargetMachine &TM) const override;
+
+  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+                                           const Function &F) const override;
+
+  /// Return an MCExpr to use for a reference to the specified type info global
+  /// variable from exception handling information.
+  const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+                                        unsigned Encoding,
+                                        const TargetMachine &TM,
+                                        MachineModuleInfo *MMI,
+                                        MCStreamer &Streamer) const override;
+
+  // The symbol that gets passed to .cfi_personality.
+  MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+                                    const TargetMachine &TM,
+                                    MachineModuleInfo *MMI) const override;
+
+  void InitializeELF(bool UseInitArray_);
+  MCSection *getStaticCtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+  MCSection *getStaticDtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+
+  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+                                       const GlobalValue *RHS,
+                                       const TargetMachine &TM) const override;
+};
+
+class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
+public:
+  TargetLoweringObjectFileMachO();
+  ~TargetLoweringObjectFileMachO() override = default;
+
+  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+  /// Emit the module flags that specify the garbage collection information.
+  void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                          const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+                                   const Constant *C,
+                                   unsigned &Align) const override;
+
+  /// The mach-o version of this method defaults to returning a stub reference.
+  const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+                                        unsigned Encoding,
+                                        const TargetMachine &TM,
+                                        MachineModuleInfo *MMI,
+                                        MCStreamer &Streamer) const override;
+
+  // The symbol that gets passed to .cfi_personality.
+  MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+                                    const TargetMachine &TM,
+                                    MachineModuleInfo *MMI) const override;
+
+  /// Get MachO PC relative GOT entry relocation
+  const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
+                                          const MCValue &MV, int64_t Offset,
+                                          MachineModuleInfo *MMI,
+                                          MCStreamer &Streamer) const override;
+
+  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+                         const TargetMachine &TM) const override;
+};
+
+class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
+  mutable unsigned NextUniqueID = 0;
+
+public:
+  ~TargetLoweringObjectFileCOFF() override = default;
+
+  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+                         const TargetMachine &TM) const override;
+
+  MCSection *getSectionForJumpTable(const Function &F,
+                                    const TargetMachine &TM) const override;
+
+  /// Emit Obj-C garbage collection and linker options.
+  void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                          const TargetMachine &TM) const override;
+
+  MCSection *getStaticCtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+  MCSection *getStaticDtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+
+  void emitLinkerFlagsForGlobal(raw_ostream &OS,
+                                const GlobalValue *GV) const override;
+
+  void emitLinkerFlagsForUsed(raw_ostream &OS,
+                              const GlobalValue *GV) const override;
+};
+
+class TargetLoweringObjectFileWasm : public TargetLoweringObjectFile {
+  mutable unsigned NextUniqueID = 0;
+
+public:
+  TargetLoweringObjectFileWasm() = default;
+  ~TargetLoweringObjectFileWasm() override = default;
+
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+                                           const Function &F) const override;
+
+  void InitializeWasm();
+  MCSection *getStaticCtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+  MCSection *getStaticDtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+
+  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+                                       const GlobalValue *RHS,
+                                       const TargetMachine &TM) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetOpcodes.h b/linux-x64/clang/include/llvm/CodeGen/TargetOpcodes.h
new file mode 100644
index 0000000..d0d959c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetOpcodes.h
@@ -0,0 +1,42 @@
+//===-- llvm/CodeGen/TargetOpcodes.h - Target Indep Opcodes -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target independent instruction opcodes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETOPCODES_H
+#define LLVM_CODEGEN_TARGETOPCODES_H
+
+namespace llvm {
+
+/// Invariant opcodes: All instruction sets have these as their low opcodes.
+///
+namespace TargetOpcode {
+enum {
+#define HANDLE_TARGET_OPCODE(OPC) OPC,
+#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC) IDENT = OPC,
+#include "llvm/Support/TargetOpcodes.def"
+};
+} // end namespace TargetOpcode
+
+/// Check whether the given Opcode is a generic opcode that is not supposed
+/// to appear after ISel.
+inline bool isPreISelGenericOpcode(unsigned Opcode) {
+  return Opcode >= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START &&
+         Opcode <= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+}
+
+/// Check whether the given Opcode is a target-specific opcode.
+inline bool isTargetSpecificOpcode(unsigned Opcode) {
+  return Opcode > TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+}
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h b/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
new file mode 100644
index 0000000..5918c52
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
@@ -0,0 +1,433 @@
+//===- TargetPassConfig.h - Code Generation pass options --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Target-Independent Code Generator Pass Configuration Options pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETPASSCONFIG_H
+#define LLVM_CODEGEN_TARGETPASSCONFIG_H
+
+#include "llvm/Pass.h"
+#include "llvm/Support/CodeGen.h"
+#include <cassert> 
+#include <string>
+
+namespace llvm {
+
+class LLVMTargetMachine;
+struct MachineSchedContext;
+class PassConfigImpl;
+class ScheduleDAGInstrs;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+
+class PassManagerBase;
+
+} // end namespace legacy
+
+using legacy::PassManagerBase;
+
+/// Discriminated union of Pass ID types.
+///
+/// The PassConfig API prefers dealing with IDs because they are safer and more
+/// efficient. IDs decouple configuration from instantiation. This way, when a
+/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
+/// refer to a Pass pointer after adding it to a pass manager, which deletes
+/// redundant pass instances.
+///
+/// However, it is convient to directly instantiate target passes with
+/// non-default ctors. These often don't have a registered PassInfo. Rather than
+/// force all target passes to implement the pass registry boilerplate, allow
+/// the PassConfig API to handle either type.
+///
+/// AnalysisID is sadly char*, so PointerIntPair won't work.
+class IdentifyingPassPtr {
+  union {
+    AnalysisID ID;
+    Pass *P;
+  };
+  bool IsInstance = false;
+
+public:
+  IdentifyingPassPtr() : P(nullptr) {}
+  IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr) {}
+  IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
+
+  bool isValid() const { return P; }
+  bool isInstance() const { return IsInstance; }
+
+  AnalysisID getID() const {
+    assert(!IsInstance && "Not a Pass ID");
+    return ID;
+  }
+
+  Pass *getInstance() const {
+    assert(IsInstance && "Not a Pass Instance");
+    return P;
+  }
+};
+
+template <> struct isPodLike<IdentifyingPassPtr> {
+  static const bool value = true;
+};
+
+/// Target-Independent Code Generator Pass Configuration Options.
+///
+/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
+/// to the internals of other CodeGen passes.
+class TargetPassConfig : public ImmutablePass {
+private:
+  PassManagerBase *PM = nullptr;
+  AnalysisID StartBefore = nullptr;
+  AnalysisID StartAfter = nullptr;
+  AnalysisID StopBefore = nullptr;
+  AnalysisID StopAfter = nullptr;
+  bool Started = true;
+  bool Stopped = false;
+  bool AddingMachinePasses = false;
+
+  /// Set the StartAfter, StartBefore and StopAfter passes to allow running only
+  /// a portion of the normal code-gen pass sequence.
+  ///
+  /// If the StartAfter and StartBefore pass ID is zero, then compilation will
+  /// begin at the normal point; otherwise, clear the Started flag to indicate
+  /// that passes should not be added until the starting pass is seen.  If the
+  /// Stop pass ID is zero, then compilation will continue to the end.
+  ///
+  /// This function expects that at least one of the StartAfter or the
+  /// StartBefore pass IDs is null.
+  void setStartStopPasses();
+
+protected:
+  LLVMTargetMachine *TM;
+  PassConfigImpl *Impl = nullptr; // Internal data structures
+  bool Initialized = false; // Flagged after all passes are configured.
+
+  // Target Pass Options
+  // Targets provide a default setting, user flags override.
+  bool DisableVerify = false;
+
+  /// Default setting for -enable-tail-merge on this target.
+  bool EnableTailMerge = true;
+
+  /// Require processing of functions such that callees are generated before
+  /// callers.
+  bool RequireCodeGenSCCOrder = false;
+
+  /// Add the actual instruction selection passes. This does not include
+  /// preparation passes on IR.
+  bool addCoreISelPasses();
+
+public:
+  TargetPassConfig(LLVMTargetMachine &TM, PassManagerBase &pm);
+  // Dummy constructor.
+  TargetPassConfig();
+
+  ~TargetPassConfig() override;
+
+  static char ID;
+
+  /// Get the right type of TargetMachine for this target.
+  template<typename TMC> TMC &getTM() const {
+    return *static_cast<TMC*>(TM);
+  }
+
+  //
+  void setInitialized() { Initialized = true; }
+
+  CodeGenOpt::Level getOptLevel() const;
+
+  /// Describe the status of the codegen
+  /// pipeline set by this target pass config.
+  /// Having a limited codegen pipeline means that options
+  /// have been used to restrict what codegen is doing.
+  /// In particular, that means that codegen won't emit
+  /// assembly code.
+  bool hasLimitedCodeGenPipeline() const;
+
+  /// If hasLimitedCodeGenPipeline is true, this method
+  /// returns a string with the name of the options, separated
+  /// by \p Separator that caused this pipeline to be limited.
+  std::string
+  getLimitedCodeGenPipelineReason(const char *Separator = "/") const;
+
+  /// Check if the codegen pipeline is limited in such a way that it
+  /// won't be complete. When the codegen pipeline is not complete,
+  /// this means it may not be possible to generate assembly from it.
+  bool willCompleteCodeGenPipeline() const {
+    return !hasLimitedCodeGenPipeline() || (!StopAfter && !StopBefore);
+  }
+
+  void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
+
+  bool getEnableTailMerge() const { return EnableTailMerge; }
+  void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }
+
+  bool requiresCodeGenSCCOrder() const { return RequireCodeGenSCCOrder; }
+  void setRequiresCodeGenSCCOrder(bool Enable = true) {
+    setOpt(RequireCodeGenSCCOrder, Enable);
+  }
+
+  /// Allow the target to override a specific pass without overriding the pass
+  /// pipeline. When passes are added to the standard pipeline at the
+  /// point where StandardID is expected, add TargetID in its place.
+  void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);
+
+  /// Insert InsertedPassID pass after TargetPassID pass.
+  void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID,
+                  bool VerifyAfter = true, bool PrintAfter = true);
+
+  /// Allow the target to enable a specific standard pass by default.
+  void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
+
+  /// Allow the target to disable a specific standard pass by default.
+  void disablePass(AnalysisID PassID) {
+    substitutePass(PassID, IdentifyingPassPtr());
+  }
+
+  /// Return the pass substituted for StandardID by the target.
+  /// If no substitution exists, return StandardID.
+  IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;
+
+  /// Return true if the pass has been substituted by the target or
+  /// overridden on the command line.
+  bool isPassSubstitutedOrOverridden(AnalysisID ID) const;
+
+  /// Return true if the optimized regalloc pipeline is enabled.
+  bool getOptimizeRegAlloc() const;
+
+  /// Return true if the default global register allocator is in use and
+  /// has not be overriden on the command line with '-regalloc=...'
+  bool usingDefaultRegAlloc() const;
+
+  /// High level function that adds all passes necessary to go from llvm IR
+  /// representation to the MI representation.
+  /// Adds IR based lowering and target specific optimization passes and finally
+  /// the core instruction selection passes.
+  /// \returns true if an error occurred, false otherwise.
+  bool addISelPasses();
+
+  /// Add common target configurable passes that perform LLVM IR to IR
+  /// transforms following machine independent optimization.
+  virtual void addIRPasses();
+
+  /// Add passes to lower exception handling for the code generator.
+  void addPassesToHandleExceptions();
+
+  /// Add pass to prepare the LLVM IR for code generation. This should be done
+  /// before exception handling preparation passes.
+  virtual void addCodeGenPrepare();
+
+  /// Add common passes that perform LLVM IR to IR transforms in preparation for
+  /// instruction selection.
+  virtual void addISelPrepare();
+
+  /// addInstSelector - This method should install an instruction selector pass,
+  /// which converts from LLVM code to machine instructions.
+  virtual bool addInstSelector() {
+    return true;
+  }
+
+  /// This method should install an IR translator pass, which converts from
+  /// LLVM code to machine instructions with possibly generic opcodes.
+  virtual bool addIRTranslator() { return true; }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before legalization.
+  virtual void addPreLegalizeMachineIR() {}
+
+  /// This method should install a legalize pass, which converts the instruction
+  /// sequence into one that can be selected by the target.
+  virtual bool addLegalizeMachineIR() { return true; }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before the register bank selection.
+  virtual void addPreRegBankSelect() {}
+
+  /// This method should install a register bank selector pass, which
+  /// assigns register banks to virtual registers without a register
+  /// class or register banks.
+  virtual bool addRegBankSelect() { return true; }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before the (global) instruction selection.
+  virtual void addPreGlobalInstructionSelect() {}
+
+  /// This method should install a (global) instruction selector pass, which
+  /// converts possibly generic instructions to fully target-specific
+  /// instructions, thereby constraining all generic virtual registers to
+  /// register classes.
+  virtual bool addGlobalInstructionSelect() { return true; }
+
+  /// Add the complete, standard set of LLVM CodeGen passes.
+  /// Fully developed targets will not generally override this.
+  virtual void addMachinePasses();
+
+  /// Create an instance of ScheduleDAGInstrs to be run within the standard
+  /// MachineScheduler pass for this function and target at the current
+  /// optimization level.
+  ///
+  /// This can also be used to plug a new MachineSchedStrategy into an instance
+  /// of the standard ScheduleDAGMI:
+  ///   return new ScheduleDAGMI(C, make_unique<MyStrategy>(C), /*RemoveKillFlags=*/false)
+  ///
+  /// Return NULL to select the default (generic) machine scheduler.
+  virtual ScheduleDAGInstrs *
+  createMachineScheduler(MachineSchedContext *C) const {
+    return nullptr;
+  }
+
+  /// Similar to createMachineScheduler but used when postRA machine scheduling
+  /// is enabled.
+  virtual ScheduleDAGInstrs *
+  createPostMachineScheduler(MachineSchedContext *C) const {
+    return nullptr;
+  }
+
+  /// printAndVerify - Add a pass to dump then verify the machine function, if
+  /// those steps are enabled.
+  void printAndVerify(const std::string &Banner);
+
+  /// Add a pass to print the machine function if printing is enabled.
+  void addPrintPass(const std::string &Banner);
+
+  /// Add a pass to perform basic verification of the machine function if
+  /// verification is enabled.
+  void addVerifyPass(const std::string &Banner);
+
+  /// Check whether or not GlobalISel should abort on error.
+  /// When this is disabled, GlobalISel will fall back on SDISel instead of
+  /// erroring out.
+  bool isGlobalISelAbortEnabled() const;
+
+  /// Check whether or not a diagnostic should be emitted when GlobalISel
+  /// uses the fallback path. In other words, it will emit a diagnostic
+  /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
+  virtual bool reportDiagnosticWhenGlobalISelFallback() const;
+
+protected:
+  // Helper to verify the analysis is really immutable.
+  void setOpt(bool &Opt, bool Val);
+
+  /// Methods with trivial inline returns are convenient points in the common
+  /// codegen pass pipeline where targets may insert passes. Methods with
+  /// out-of-line standard implementations are major CodeGen stages called by
+  /// addMachinePasses. Some targets may override major stages when inserting
+  /// passes is insufficient, but maintaining overriden stages is more work.
+  ///
+
+  /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
+  /// passes (which are run just before instruction selector).
+  virtual bool addPreISel() {
+    return true;
+  }
+
+  /// addMachineSSAOptimization - Add standard passes that optimize machine
+  /// instructions in SSA form.
+  virtual void addMachineSSAOptimization();
+
+  /// Add passes that optimize instruction level parallelism for out-of-order
+  /// targets. These passes are run while the machine code is still in SSA
+  /// form, so they can use MachineTraceMetrics to control their heuristics.
+  ///
+  /// All passes added here should preserve the MachineDominatorTree,
+  /// MachineLoopInfo, and MachineTraceMetrics analyses.
+  virtual bool addILPOpts() {
+    return false;
+  }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before register allocation.
+  virtual void addPreRegAlloc() { }
+
+  /// createTargetRegisterAllocator - Create the register allocator pass for
+  /// this target at the current optimization level.
+  virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);
+
+  /// addFastRegAlloc - Add the minimum set of target-independent passes that
+  /// are required for fast register allocation.
+  virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
+
+  /// addOptimizedRegAlloc - Add passes related to register allocation.
+  /// LLVMTargetMachine provides standard regalloc passes for most targets.
+  virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+
+  /// addPreRewrite - Add passes to the optimized register allocation pipeline
+  /// after register allocation is complete, but before virtual registers are
+  /// rewritten to physical registers.
+  ///
+  /// These passes must preserve VirtRegMap and LiveIntervals, and when running
+  /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
+  /// When these passes run, VirtRegMap contains legal physreg assignments for
+  /// all virtual registers.
+  virtual bool addPreRewrite() {
+    return false;
+  }
+
+  /// This method may be implemented by targets that want to run passes after
+  /// register allocation pass pipeline but before prolog-epilog insertion.
+  virtual void addPostRegAlloc() { }
+
+  /// Add passes that optimize machine instructions after register allocation.
+  virtual void addMachineLateOptimization();
+
+  /// This method may be implemented by targets that want to run passes after
+  /// prolog-epilog insertion and before the second instruction scheduling pass.
+  virtual void addPreSched2() { }
+
+  /// addGCPasses - Add late codegen passes that analyze code for garbage
+  /// collection. This should return true if GC info should be printed after
+  /// these passes.
+  virtual bool addGCPasses();
+
+  /// Add standard basic block placement passes.
+  virtual void addBlockPlacement();
+
+  /// This pass may be implemented by targets that want to run passes
+  /// immediately before machine code is emitted.
+  virtual void addPreEmitPass() { }
+
+  /// Targets may add passes immediately before machine code is emitted in this
+  /// callback. This is called even later than `addPreEmitPass`.
+  // FIXME: Rename `addPreEmitPass` to something more sensible given its actual
+  // position and remove the `2` suffix here as this callback is what
+  // `addPreEmitPass` *should* be but in reality isn't.
+  virtual void addPreEmitPass2() {}
+
+  /// Utilities for targets to add passes to the pass manager.
+  ///
+
+  /// Add a CodeGen pass at this point in the pipeline after checking overrides.
+  /// Return the pass that was added, or zero if no pass was added.
+  /// @p printAfter    if true and adding a machine function pass add an extra
+  ///                  machine printer pass afterwards
+  /// @p verifyAfter   if true and adding a machine function pass add an extra
+  ///                  machine verification pass afterwards.
+  AnalysisID addPass(AnalysisID PassID, bool verifyAfter = true,
+                     bool printAfter = true);
+
+  /// Add a pass to the PassManager if that pass is supposed to be run, as
+  /// determined by the StartAfter and StopAfter options. Takes ownership of the
+  /// pass.
+  /// @p printAfter    if true and adding a machine function pass add an extra
+  ///                  machine printer pass afterwards
+  /// @p verifyAfter   if true and adding a machine function pass add an extra
+  ///                  machine verification pass afterwards.
+  void addPass(Pass *P, bool verifyAfter = true, bool printAfter = true);
+
+  /// addMachinePasses helper to create the target-selected or overriden
+  /// regalloc pass.
+  FunctionPass *createRegAllocPass(bool Optimized);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETPASSCONFIG_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
new file mode 100644
index 0000000..ea47a24
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -0,0 +1,1188 @@
+//==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes an abstract interface used to get information about a
+// target machines register file.  This information is used for a variety of
+// purposed, especially register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
+#define LLVM_CODEGEN_TARGETREGISTERINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class BitVector;
+class LiveRegMatrix;
+class MachineFunction;
+class MachineInstr;
+class RegScavenger;
+class VirtRegMap;
+class LiveIntervals;
+
+class TargetRegisterClass {
+public:
+  using iterator = const MCPhysReg *;
+  using const_iterator = const MCPhysReg *;
+  using sc_iterator = const TargetRegisterClass* const *;
+
+  // Instance variables filled by tablegen, do not use!
+  const MCRegisterClass *MC;
+  const uint32_t *SubClassMask;
+  const uint16_t *SuperRegIndices;
+  const LaneBitmask LaneMask;
+  /// Classes with a higher priority value are assigned first by register
+  /// allocators using a greedy heuristic. The value is in the range [0,63].
+  const uint8_t AllocationPriority;
+  /// Whether the class supports two (or more) disjunct subregister indices.
+  const bool HasDisjunctSubRegs;
+  /// Whether a combination of subregisters can cover every register in the
+  /// class. See also the CoveredBySubRegs description in Target.td.
+  const bool CoveredBySubRegs;
+  const sc_iterator SuperClasses;
+  ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
+
+  /// Return the register class ID number.
+  unsigned getID() const { return MC->getID(); }
+
+  /// begin/end - Return all of the registers in this class.
+  ///
+  iterator       begin() const { return MC->begin(); }
+  iterator         end() const { return MC->end(); }
+
+  /// Return the number of registers in this class.
+  unsigned getNumRegs() const { return MC->getNumRegs(); }
+
+  iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
+  getRegisters() const {
+    return make_range(MC->begin(), MC->end());
+  }
+
+  /// Return the specified register in the class.
+  unsigned getRegister(unsigned i) const {
+    return MC->getRegister(i);
+  }
+
+  /// Return true if the specified register is included in this register class.
+  /// This does not include virtual registers.
+  bool contains(unsigned Reg) const {
+    return MC->contains(Reg);
+  }
+
+  /// Return true if both registers are in this class.
+  bool contains(unsigned Reg1, unsigned Reg2) const {
+    return MC->contains(Reg1, Reg2);
+  }
+
+  /// Return the cost of copying a value between two registers in this class.
+  /// A negative number means the register class is very expensive
+  /// to copy e.g. status flag register classes.
+  int getCopyCost() const { return MC->getCopyCost(); }
+
+  /// Return true if this register class may be used to create virtual
+  /// registers.
+  bool isAllocatable() const { return MC->isAllocatable(); }
+
+  /// Return true if the specified TargetRegisterClass
+  /// is a proper sub-class of this TargetRegisterClass.
+  bool hasSubClass(const TargetRegisterClass *RC) const {
+    return RC != this && hasSubClassEq(RC);
+  }
+
+  /// Returns true if RC is a sub-class of or equal to this class.
+  bool hasSubClassEq(const TargetRegisterClass *RC) const {
+    unsigned ID = RC->getID();
+    return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
+  }
+
+  /// Return true if the specified TargetRegisterClass is a
+  /// proper super-class of this TargetRegisterClass.
+  bool hasSuperClass(const TargetRegisterClass *RC) const {
+    return RC->hasSubClass(this);
+  }
+
+  /// Returns true if RC is a super-class of or equal to this class.
+  bool hasSuperClassEq(const TargetRegisterClass *RC) const {
+    return RC->hasSubClassEq(this);
+  }
+
+  /// Returns a bit vector of subclasses, including this one.
+  /// The vector is indexed by class IDs.
+  ///
+  /// To use it, consider the returned array as a chunk of memory that
+  /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
+  /// contains a bitset of the ID of the subclasses in big-endian style.
+
+  /// I.e., the representation of the memory from left to right at the
+  /// bit level looks like:
+  /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
+  ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
+  /// Where the number represents the class ID and XXX bits that
+  /// should be ignored.
+  ///
+  /// See the implementation of hasSubClassEq for an example of how it
+  /// can be used.
+  const uint32_t *getSubClassMask() const {
+    return SubClassMask;
+  }
+
+  /// Returns a 0-terminated list of sub-register indices that project some
+  /// super-register class into this register class. The list has an entry for
+  /// each Idx such that:
+  ///
+  ///   There exists SuperRC where:
+  ///     For all Reg in SuperRC:
+  ///       this->contains(Reg:Idx)
+  const uint16_t *getSuperRegIndices() const {
+    return SuperRegIndices;
+  }
+
+  /// Returns a NULL-terminated list of super-classes.  The
+  /// classes are ordered by ID which is also a topological ordering from large
+  /// to small classes.  The list does NOT include the current class.
+  sc_iterator getSuperClasses() const {
+    return SuperClasses;
+  }
+
+  /// Return true if this TargetRegisterClass is a subset
+  /// class of at least one other TargetRegisterClass.
+  bool isASubClass() const {
+    return SuperClasses[0] != nullptr;
+  }
+
+  /// Returns the preferred order for allocating registers from this register
+  /// class in MF. The raw order comes directly from the .td file and may
+  /// include reserved registers that are not allocatable.
+  /// Register allocators should also make sure to allocate
+  /// callee-saved registers only after all the volatiles are used. The
+  /// RegisterClassInfo class provides filtered allocation orders with
+  /// callee-saved registers moved to the end.
+  ///
+  /// The MachineFunction argument can be used to tune the allocatable
+  /// registers based on the characteristics of the function, subtarget, or
+  /// other criteria.
+  ///
+  /// By default, this method returns all registers in the class.
+  ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
+    return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
+  }
+
+  /// Returns the combination of all lane masks of register in this class.
+  /// The lane masks of the registers are the combination of all lane masks
+  /// of their subregisters. Returns 1 if there are no subregisters.
+  LaneBitmask getLaneMask() const {
+    return LaneMask;
+  }
+};
+
+/// Extra information, not in MCRegisterDesc, about registers.
+/// These are used by codegen, not by MC.
+struct TargetRegisterInfoDesc {
+  unsigned CostPerUse;          // Extra cost of instructions using register.
+  bool inAllocatableClass;      // Register belongs to an allocatable regclass.
+};
+
+/// Each TargetRegisterClass has a per register weight, and weight
+/// limit which must be less than the limits of its pressure sets.
+struct RegClassWeight {
+  unsigned RegWeight;
+  unsigned WeightLimit;
+};
+
+/// TargetRegisterInfo base class - We assume that the target defines a static
+/// array of TargetRegisterDesc objects that represent all of the machine
+/// registers that the target has.  As such, we simply have to track a pointer
+/// to this array so that we can turn register number into a register
+/// descriptor.
+///
+class TargetRegisterInfo : public MCRegisterInfo {
+public:
+  using regclass_iterator = const TargetRegisterClass * const *;
+  using vt_iterator = const MVT::SimpleValueType *;
+  struct RegClassInfo {
+    unsigned RegSize, SpillSize, SpillAlignment;
+    vt_iterator VTList;
+  };
+private:
+  const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
+  const char *const *SubRegIndexNames;        // Names of subreg indexes.
+  // Pointer to array of lane masks, one per sub-reg index.
+  const LaneBitmask *SubRegIndexLaneMasks;
+
+  regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
+  LaneBitmask CoveringLanes;
+  const RegClassInfo *const RCInfos;
+  unsigned HwMode;
+
+protected:
+  TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
+                     regclass_iterator RegClassBegin,
+                     regclass_iterator RegClassEnd,
+                     const char *const *SRINames,
+                     const LaneBitmask *SRILaneMasks,
+                     LaneBitmask CoveringLanes,
+                     const RegClassInfo *const RSI,
+                     unsigned Mode = 0);
+  virtual ~TargetRegisterInfo();
+
+public:
+  // Register numbers can represent physical registers, virtual registers, and
+  // sometimes stack slots. The unsigned values are divided into these ranges:
+  //
+  //   0           Not a register, can be used as a sentinel.
+  //   [1;2^30)    Physical registers assigned by TableGen.
+  //   [2^30;2^31) Stack slots. (Rarely used.)
+  //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
+  //
+  // Further sentinels can be allocated from the small negative integers.
+  // DenseMapInfo<unsigned> uses -1u and -2u.
+
+  /// isStackSlot - Sometimes it is useful the be able to store a non-negative
+  /// frame index in a variable that normally holds a register. isStackSlot()
+  /// returns true if Reg is in the range used for stack slots.
+  ///
+  /// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
+  /// slots, so if a variable may contains a stack slot, always check
+  /// isStackSlot() first.
+  ///
+  static bool isStackSlot(unsigned Reg) {
+    return int(Reg) >= (1 << 30);
+  }
+
+  /// Compute the frame index from a register value representing a stack slot.
+  static int stackSlot2Index(unsigned Reg) {
+    assert(isStackSlot(Reg) && "Not a stack slot");
+    return int(Reg - (1u << 30));
+  }
+
+  /// Convert a non-negative frame index to a stack slot register value.
+  static unsigned index2StackSlot(int FI) {
+    assert(FI >= 0 && "Cannot hold a negative frame index.");
+    return FI + (1u << 30);
+  }
+
+  /// Return true if the specified register number is in
+  /// the physical register namespace.
+  static bool isPhysicalRegister(unsigned Reg) {
+    assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+    return int(Reg) > 0;
+  }
+
+  /// Return true if the specified register number is in
+  /// the virtual register namespace.
+  static bool isVirtualRegister(unsigned Reg) {
+    assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+    return int(Reg) < 0;
+  }
+
+  /// Convert a virtual register number to a 0-based index.
+  /// The first virtual register in a function will get the index 0.
+  static unsigned virtReg2Index(unsigned Reg) {
+    assert(isVirtualRegister(Reg) && "Not a virtual register");
+    return Reg & ~(1u << 31);
+  }
+
+  /// Convert a 0-based index to a virtual register number.
+  /// This is the inverse operation of VirtReg2IndexFunctor below.
+  static unsigned index2VirtReg(unsigned Index) {
+    return Index | (1u << 31);
+  }
+
+  /// Return the size in bits of a register from class RC.
+  unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).RegSize;
+  }
+
+  /// Return the size in bytes of the stack slot allocated to hold a spilled
+  /// copy of a register from class RC.
+  unsigned getSpillSize(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).SpillSize / 8;
+  }
+
+  /// Return the minimum required alignment in bytes for a spill slot for
+  /// a register of this class.
+  unsigned getSpillAlignment(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).SpillAlignment / 8;
+  }
+
+  /// Return true if the given TargetRegisterClass has the ValueType T.
+  bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
+    for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
+      if (MVT(*I) == T)
+        return true;
+    return false;
+  }
+
+  /// Loop over all of the value types that can be represented by values
+  /// in the given register class.
+  vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).VTList;
+  }
+
+  vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
+    vt_iterator I = legalclasstypes_begin(RC);
+    while (*I != MVT::Other)
+      ++I;
+    return I;
+  }
+
+  /// Returns the Register Class of a physical register of the given type,
+  /// picking the most sub register class of the right type that contains this
+  /// physreg.
+  const TargetRegisterClass *
+    getMinimalPhysRegClass(unsigned Reg, MVT VT = MVT::Other) const;
+
+  /// Return the maximal subclass of the given register class that is
+  /// allocatable or NULL.
+  const TargetRegisterClass *
+    getAllocatableClass(const TargetRegisterClass *RC) const;
+
+  /// Returns a bitset indexed by register number indicating if a register is
+  /// allocatable or not. If a register class is specified, returns the subset
+  /// for the class.
+  BitVector getAllocatableSet(const MachineFunction &MF,
+                              const TargetRegisterClass *RC = nullptr) const;
+
+  /// Return the additional cost of using this register instead
+  /// of other registers in its class.
+  unsigned getCostPerUse(unsigned RegNo) const {
+    return InfoDesc[RegNo].CostPerUse;
+  }
+
+  /// Return true if the register is in the allocation of any register class.
+  bool isInAllocatableClass(unsigned RegNo) const {
+    return InfoDesc[RegNo].inAllocatableClass;
+  }
+
+  /// Return the human-readable symbolic target-specific
+  /// name for the specified SubRegIndex.
+  const char *getSubRegIndexName(unsigned SubIdx) const {
+    assert(SubIdx && SubIdx < getNumSubRegIndices() &&
+           "This is not a subregister index");
+    return SubRegIndexNames[SubIdx-1];
+  }
+
+  /// Return a bitmask representing the parts of a register that are covered by
+  /// SubIdx \see LaneBitmask.
+  ///
+  /// SubIdx == 0 is allowed, it has the lane mask ~0u.
+  LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
+    assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
+    return SubRegIndexLaneMasks[SubIdx];
+  }
+
+  /// The lane masks returned by getSubRegIndexLaneMask() above can only be
+  /// used to determine if sub-registers overlap - they can't be used to
+  /// determine if a set of sub-registers completely cover another
+  /// sub-register.
+  ///
+  /// The X86 general purpose registers have two lanes corresponding to the
+  /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
+  /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
+  /// sub_32bit sub-register.
+  ///
+  /// On the other hand, the ARM NEON lanes fully cover their registers: The
+  /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
+  /// This is related to the CoveredBySubRegs property on register definitions.
+  ///
+  /// This function returns a bit mask of lanes that completely cover their
+  /// sub-registers. More precisely, given:
+  ///
+  ///   Covering = getCoveringLanes();
+  ///   MaskA = getSubRegIndexLaneMask(SubA);
+  ///   MaskB = getSubRegIndexLaneMask(SubB);
+  ///
+  /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
+  /// SubB.
+  LaneBitmask getCoveringLanes() const { return CoveringLanes; }
+
+  /// Returns true if the two registers are equal or alias each other.
+  /// The registers may be virtual registers.
+  bool regsOverlap(unsigned regA, unsigned regB) const {
+    if (regA == regB) return true;
+    if (isVirtualRegister(regA) || isVirtualRegister(regB))
+      return false;
+
+    // Regunits are numerically ordered. Find a common unit.
+    MCRegUnitIterator RUA(regA, this);
+    MCRegUnitIterator RUB(regB, this);
+    do {
+      if (*RUA == *RUB) return true;
+      if (*RUA < *RUB) ++RUA;
+      else             ++RUB;
+    } while (RUA.isValid() && RUB.isValid());
+    return false;
+  }
+
+  /// Returns true if Reg contains RegUnit.
+  bool hasRegUnit(unsigned Reg, unsigned RegUnit) const {
+    for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
+      if (*Units == RegUnit)
+        return true;
+    return false;
+  }
+
+  /// Returns the original SrcReg unless it is the target of a copy-like
+  /// operation, in which case we chain backwards through all such operations
+  /// to the ultimate source register.  If a physical register is encountered,
+  /// we stop the search.
+  virtual unsigned lookThruCopyLike(unsigned SrcReg,
+                                    const MachineRegisterInfo *MRI) const;
+
+  /// Return a null-terminated list of all of the callee-saved registers on
+  /// this target. The register should be in the order of desired callee-save
+  /// stack frame offset. The first register is closest to the incoming stack
+  /// pointer if stack grows down, and vice versa.
+  /// Notice: This function does not take into account disabled CSRs.
+  ///         In most cases you will want to use instead the function 
+  ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
+  virtual const MCPhysReg*
+  getCalleeSavedRegs(const MachineFunction *MF) const = 0;
+
+  /// Return a mask of call-preserved registers for the given calling convention
+  /// on the current function. The mask should include all call-preserved
+  /// aliases. This is used by the register allocator to determine which
+  /// registers can be live across a call.
+  ///
+  /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
+  /// A set bit indicates that all bits of the corresponding register are
+  /// preserved across the function call.  The bit mask is expected to be
+  /// sub-register complete, i.e. if A is preserved, so are all its
+  /// sub-registers.
+  ///
+  /// Bits are numbered from the LSB, so the bit for physical register Reg can
+  /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
+  ///
+  /// A NULL pointer means that no register mask will be used, and call
+  /// instructions should use implicit-def operands to indicate call clobbered
+  /// registers.
+  ///
+  virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+                                               CallingConv::ID) const {
+    // The default mask clobbers everything.  All targets should override.
+    return nullptr;
+  }
+
+  /// Return a register mask that clobbers everything.
+  virtual const uint32_t *getNoPreservedMask() const {
+    llvm_unreachable("target does not provide no preserved mask");
+  }
+
+  /// Return true if all bits that are set in mask \p mask0 are also set in
+  /// \p mask1.
+  bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
+
+  /// Return all the call-preserved register masks defined for this target.
+  virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
+  virtual ArrayRef<const char *> getRegMaskNames() const = 0;
+
+  /// Returns a bitset indexed by physical register number indicating if a
+  /// register is a special register that has particular uses and should be
+  /// considered unavailable at all times, e.g. stack pointer, return address.
+  /// A reserved register:
+  /// - is not allocatable
+  /// - is considered always live
+  /// - is ignored by liveness tracking
+  /// It is often necessary to reserve the super registers of a reserved
+  /// register as well, to avoid them getting allocated indirectly. You may use
+  /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
+  virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+
+  /// Returns true if PhysReg is unallocatable and constant throughout the
+  /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
+  virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
+
+  /// Physical registers that may be modified within a function but are
+  /// guaranteed to be restored before any uses. This is useful for targets that
+  /// have call sequences where a GOT register may be updated by the caller
+  /// prior to a call and is guaranteed to be restored (also by the caller)
+  /// after the call. 
+  virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
+                                        const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Prior to adding the live-out mask to a stackmap or patchpoint
+  /// instruction, provide the target the opportunity to adjust it (mainly to
+  /// remove pseudo-registers that should be ignored).
+  virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
+
+  /// Return a super-register of the specified register
+  /// Reg so its sub-register of index SubIdx is Reg.
+  unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+                               const TargetRegisterClass *RC) const {
+    return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
+  }
+
+  /// Return a subclass of the specified register
+  /// class A so that each register in it has a sub-register of the
+  /// specified sub-register index which is in the specified register class B.
+  ///
+  /// TableGen will synthesize missing A sub-classes.
+  virtual const TargetRegisterClass *
+  getMatchingSuperRegClass(const TargetRegisterClass *A,
+                           const TargetRegisterClass *B, unsigned Idx) const;
+
+  // For a copy-like instruction that defines a register of class DefRC with
+  // subreg index DefSubReg, reading from another source with class SrcRC and
+  // subregister SrcSubReg return true if this is a preferable copy
+  // instruction or an earlier use should be used.
+  virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
+                                    unsigned DefSubReg,
+                                    const TargetRegisterClass *SrcRC,
+                                    unsigned SrcSubReg) const;
+
+  /// Returns the largest legal sub-class of RC that
+  /// supports the sub-register index Idx.
+  /// If no such sub-class exists, return NULL.
+  /// If all registers in RC already have an Idx sub-register, return RC.
+  ///
+  /// TableGen generates a version of this function that is good enough in most
+  /// cases.  Targets can override if they have constraints that TableGen
+  /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
+  /// supported by the full GR32 register class in 64-bit mode, but only by the
+  /// GR32_ABCD regiister class in 32-bit mode.
+  ///
+  /// TableGen will synthesize missing RC sub-classes.
+  virtual const TargetRegisterClass *
+  getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
+    assert(Idx == 0 && "Target has no sub-registers");
+    return RC;
+  }
+
+  /// Return the subregister index you get from composing
+  /// two subregister indices.
+  ///
+  /// The special null sub-register index composes as the identity.
+  ///
+  /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
+  /// returns c. Note that composeSubRegIndices does not tell you about illegal
+  /// compositions. If R does not have a subreg a, or R:a does not have a subreg
+  /// b, composeSubRegIndices doesn't tell you.
+  ///
+  /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
+  /// ssub_0:S0 - ssub_3:S3 subregs.
+  /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+  unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+    if (!a) return b;
+    if (!b) return a;
+    return composeSubRegIndicesImpl(a, b);
+  }
+
+  /// Transforms a LaneMask computed for one subregister to the lanemask that
+  /// would have been computed when composing the subsubregisters with IdxA
+  /// first. @sa composeSubRegIndices()
+  LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
+                                         LaneBitmask Mask) const {
+    if (!IdxA)
+      return Mask;
+    return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
+  }
+
+  /// Transform a lanemask given for a virtual register to the corresponding
+  /// lanemask before using subregister with index \p IdxA.
+  /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
+  /// valie lane mask (no invalid bits set) the following holds:
+  /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
+  /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
+  /// => X1 == Mask
+  LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
+                                                LaneBitmask LaneMask) const {
+    if (!IdxA)
+      return LaneMask;
+    return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
+  }
+
+  /// Debugging helper: dump register in human readable form to dbgs() stream.
+  static void dumpReg(unsigned Reg, unsigned SubRegIndex = 0,
+                      const TargetRegisterInfo* TRI = nullptr);
+
+protected:
+  /// Overridden by TableGen in targets that have sub-registers.
+  virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
+    llvm_unreachable("Target has no sub-registers");
+  }
+
+  /// Overridden by TableGen in targets that have sub-registers.
+  virtual LaneBitmask
+  composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
+    llvm_unreachable("Target has no sub-registers");
+  }
+
+  virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
+                                                            LaneBitmask) const {
+    llvm_unreachable("Target has no sub-registers");
+  }
+
+public:
+  /// Find a common super-register class if it exists.
+  ///
+  /// Find a register class, SuperRC and two sub-register indices, PreA and
+  /// PreB, such that:
+  ///
+  ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
+  ///
+  ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
+  ///
+  ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
+  ///
+  /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
+  /// requirements, and there is no register class with a smaller spill size
+  /// that satisfies the requirements.
+  ///
+  /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
+  ///
+  /// Either of the PreA and PreB sub-register indices may be returned as 0. In
+  /// that case, the returned register class will be a sub-class of the
+  /// corresponding argument register class.
+  ///
+  /// The function returns NULL if no register class can be found.
+  const TargetRegisterClass*
+  getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
+                         const TargetRegisterClass *RCB, unsigned SubB,
+                         unsigned &PreA, unsigned &PreB) const;
+
+  //===--------------------------------------------------------------------===//
+  // Register Class Information
+  //
+protected:
+  const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
+    return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
+  }
+
+public:
+  /// Register class iterators
+  regclass_iterator regclass_begin() const { return RegClassBegin; }
+  regclass_iterator regclass_end() const { return RegClassEnd; }
+  iterator_range<regclass_iterator> regclasses() const {
+    return make_range(regclass_begin(), regclass_end());
+  }
+
+  unsigned getNumRegClasses() const {
+    return (unsigned)(regclass_end()-regclass_begin());
+  }
+
+  /// Returns the register class associated with the enumeration value.
+  /// See class MCOperandInfo.
+  const TargetRegisterClass *getRegClass(unsigned i) const {
+    assert(i < getNumRegClasses() && "Register Class ID out of range");
+    return RegClassBegin[i];
+  }
+
+  /// Returns the name of the register class.
+  const char *getRegClassName(const TargetRegisterClass *Class) const {
+    return MCRegisterInfo::getRegClassName(Class->MC);
+  }
+
+  /// Find the largest common subclass of A and B.
+  /// Return NULL if there is no common subclass.
+  /// The common subclass should contain
+  /// simple value type SVT if it is not the Any type.
+  const TargetRegisterClass *
+  getCommonSubClass(const TargetRegisterClass *A,
+                    const TargetRegisterClass *B,
+                    const MVT::SimpleValueType SVT =
+                    MVT::SimpleValueType::Any) const;
+
+  /// Returns a TargetRegisterClass used for pointer values.
+  /// If a target supports multiple different pointer register classes,
+  /// kind specifies which one is indicated.
+  virtual const TargetRegisterClass *
+  getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
+    llvm_unreachable("Target didn't implement getPointerRegClass!");
+  }
+
+  /// Returns a legal register class to copy a register in the specified class
+  /// to or from. If it is possible to copy the register directly without using
+  /// a cross register class copy, return the specified RC. Returns NULL if it
+  /// is not possible to copy between two registers of the specified class.
+  virtual const TargetRegisterClass *
+  getCrossCopyRegClass(const TargetRegisterClass *RC) const {
+    return RC;
+  }
+
+  /// Returns the largest super class of RC that is legal to use in the current
+  /// sub-target and has the same spill size.
+  /// The returned register class can be used to create virtual registers which
+  /// means that all its registers can be copied and spilled.
+  virtual const TargetRegisterClass *
+  getLargestLegalSuperClass(const TargetRegisterClass *RC,
+                            const MachineFunction &) const {
+    /// The default implementation is very conservative and doesn't allow the
+    /// register allocator to inflate register classes.
+    return RC;
+  }
+
+  /// Return the register pressure "high water mark" for the specific register
+  /// class. The scheduler is in high register pressure mode (for the specific
+  /// register class) if it goes over the limit.
+  ///
+  /// Note: this is the old register pressure model that relies on a manually
+  /// specified representative register class per value type.
+  virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+                                       MachineFunction &MF) const {
+    return 0;
+  }
+
+  /// Return a heuristic for the machine scheduler to compare the profitability
+  /// of increasing one register pressure set versus another.  The scheduler
+  /// will prefer increasing the register pressure of the set which returns
+  /// the largest value for this function.
+  virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
+                                          unsigned PSetID) const {
+    return PSetID;
+  }
+
+  /// Get the weight in units of pressure for this register class.
+  virtual const RegClassWeight &getRegClassWeight(
+    const TargetRegisterClass *RC) const = 0;
+
+  /// Returns size in bits of a phys/virtual/generic register.
+  unsigned getRegSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI) const;
+
+  /// Get the weight in units of pressure for this register unit.
+  virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
+
+  /// Get the number of dimensions of register pressure.
+  virtual unsigned getNumRegPressureSets() const = 0;
+
+  /// Get the name of this register unit pressure set.
+  virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
+
+  /// Get the register unit pressure limit for this dimension.
+  /// This limit must be adjusted dynamically for reserved registers.
+  virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
+                                          unsigned Idx) const = 0;
+
+  /// Get the dimensions of register pressure impacted by this register class.
+  /// Returns a -1 terminated array of pressure set IDs.
+  virtual const int *getRegClassPressureSets(
+    const TargetRegisterClass *RC) const = 0;
+
+  /// Get the dimensions of register pressure impacted by this register unit.
+  /// Returns a -1 terminated array of pressure set IDs.
+  virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
+
+  /// Get a list of 'hint' registers that the register allocator should try
+  /// first when allocating a physical register for the virtual register
+  /// VirtReg. These registers are effectively moved to the front of the
+  /// allocation order. If true is returned, regalloc will try to only use
+  /// hints to the greatest extent possible even if it means spilling.
+  ///
+  /// The Order argument is the allocation order for VirtReg's register class
+  /// as returned from RegisterClassInfo::getOrder(). The hint registers must
+  /// come from Order, and they must not be reserved.
+  ///
+  /// The default implementation of this function will only add target
+  /// independent register allocation hints. Targets that override this
+  /// function should typically call this default implementation as well and
+  /// expect to see generic copy hints added.
+  virtual bool getRegAllocationHints(unsigned VirtReg,
+                                     ArrayRef<MCPhysReg> Order,
+                                     SmallVectorImpl<MCPhysReg> &Hints,
+                                     const MachineFunction &MF,
+                                     const VirtRegMap *VRM = nullptr,
+                                     const LiveRegMatrix *Matrix = nullptr)
+    const;
+
+  /// A callback to allow target a chance to update register allocation hints
+  /// when a register is "changed" (e.g. coalesced) to another register.
+  /// e.g. On ARM, some virtual registers should target register pairs,
+  /// if one of pair is coalesced to another register, the allocation hint of
+  /// the other half of the pair should be changed to point to the new register.
+  virtual void updateRegAllocHint(unsigned Reg, unsigned NewReg,
+                                  MachineFunction &MF) const {
+    // Do nothing.
+  }
+
+  /// The creation of multiple copy hints have been implemented in
+  /// weightCalcHelper(), but since this affects so many tests for many
+  /// targets, this is temporarily disabled per default. THIS SHOULD BE
+  /// "GENERAL GOODNESS" and hopefully all targets will update their tests
+  /// and enable this soon. This hook should then be removed.
+  virtual bool enableMultipleCopyHints() const { return false; }
+
+  /// Allow the target to reverse allocation order of local live ranges. This
+  /// will generally allocate shorter local live ranges first. For targets with
+  /// many registers, this could reduce regalloc compile time by a large
+  /// factor. It is disabled by default for three reasons:
+  /// (1) Top-down allocation is simpler and easier to debug for targets that
+  /// don't benefit from reversing the order.
+  /// (2) Bottom-up allocation could result in poor evicition decisions on some
+  /// targets affecting the performance of compiled code.
+  /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
+  virtual bool reverseLocalAssignment() const { return false; }
+
+  /// Allow the target to override the cost of using a callee-saved register for
+  /// the first time. Default value of 0 means we will use a callee-saved
+  /// register if it is available.
+  virtual unsigned getCSRFirstUseCost() const { return 0; }
+
+  /// Returns true if the target requires (and can make use of) the register
+  /// scavenger.
+  virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the target wants to use frame pointer based accesses to
+  /// spill to the scavenger emergency spill slot.
+  virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
+    return true;
+  }
+
+  /// Returns true if the target requires post PEI scavenging of registers for
+  /// materializing frame index constants.
+  virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the target requires using the RegScavenger directly for
+  /// frame elimination despite using requiresFrameIndexScavenging.
+  virtual bool requiresFrameIndexReplacementScavenging(
+      const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the target wants the LocalStackAllocation pass to be run
+  /// and virtual base registers used for more efficient stack access.
+  virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Return true if target has reserved a spill slot in the stack frame of
+  /// the given function for the specified register. e.g. On x86, if the frame
+  /// register is required, the first fixed stack object is reserved as its
+  /// spill slot. This tells PEI not to create a new stack frame
+  /// object for the given register. It should be called only after
+  /// determineCalleeSaves().
+  virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
+                                    int &FrameIdx) const {
+    return false;
+  }
+
+  /// Returns true if the live-ins should be tracked after register allocation.
+  virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// True if the stack can be realigned for the target.
+  virtual bool canRealignStack(const MachineFunction &MF) const;
+
+  /// True if storage within the function requires the stack pointer to be
+  /// aligned more than the normal calling convention calls for.
+  /// This cannot be overriden by the target, but canRealignStack can be
+  /// overridden.
+  bool needsStackRealignment(const MachineFunction &MF) const;
+
+  /// Get the offset from the referenced frame index in the instruction,
+  /// if there is one.
+  virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+                                           int Idx) const {
+    return 0;
+  }
+
+  /// Returns true if the instruction's frame index reference would be better
+  /// served by a base register other than FP or SP.
+  /// Used by LocalStackFrameAllocation to determine which frame index
+  /// references it should create new base registers for.
+  virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+    return false;
+  }
+
+  /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
+  /// before insertion point I.
+  virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
+                                            unsigned BaseReg, int FrameIdx,
+                                            int64_t Offset) const {
+    llvm_unreachable("materializeFrameBaseRegister does not exist on this "
+                     "target");
+  }
+
+  /// Resolve a frame index operand of an instruction
+  /// to reference the indicated base register plus offset instead.
+  virtual void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+                                 int64_t Offset) const {
+    llvm_unreachable("resolveFrameIndex does not exist on this target");
+  }
+
+  /// Determine whether a given base register plus offset immediate is
+  /// encodable to resolve a frame index.
+  virtual bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
+                                  int64_t Offset) const {
+    llvm_unreachable("isFrameOffsetLegal does not exist on this target");
+  }
+
+  /// Spill the register so it can be used by the register scavenger.
+  /// Return true if the register was spilled, false otherwise.
+  /// If this function does not spill the register, the scavenger
+  /// will instead spill it to the emergency spill slot.
+  virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
+                                     MachineBasicBlock::iterator I,
+                                     MachineBasicBlock::iterator &UseMI,
+                                     const TargetRegisterClass *RC,
+                                     unsigned Reg) const {
+    return false;
+  }
+
+  /// This method must be overriden to eliminate abstract frame indices from
+  /// instructions which may use them. The instruction referenced by the
+  /// iterator contains an MO_FrameIndex operand which must be eliminated by
+  /// this method. This method may modify or replace the specified instruction,
+  /// as long as it keeps the iterator pointing at the finished product.
+  /// SPAdj is the SP adjustment due to call frame setup instruction.
+  /// FIOperandNum is the FI operand number.
+  virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+                                   int SPAdj, unsigned FIOperandNum,
+                                   RegScavenger *RS = nullptr) const = 0;
+
+  /// Return the assembly name for \p Reg.
+  virtual StringRef getRegAsmName(unsigned Reg) const {
+    // FIXME: We are assuming that the assembly name is equal to the TableGen
+    // name converted to lower case
+    //
+    // The TableGen name is the name of the definition for this register in the
+    // target's tablegen files.  For example, the TableGen name of
+    // def EAX : Register <...>; is "EAX"
+    return StringRef(getName(Reg));
+  }
+
+  //===--------------------------------------------------------------------===//
+  /// Subtarget Hooks
+
+  /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true.
+  virtual bool shouldCoalesce(MachineInstr *MI,
+                              const TargetRegisterClass *SrcRC,
+                              unsigned SubReg,
+                              const TargetRegisterClass *DstRC,
+                              unsigned DstSubReg,
+                              const TargetRegisterClass *NewRC,
+                              LiveIntervals &LIS) const
+  { return true; }
+
+  //===--------------------------------------------------------------------===//
+  /// Debug information queries.
+
+  /// getFrameRegister - This method should return the register used as a base
+  /// for values allocated in the current stack frame.
+  virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
+
+  /// Mark a register and all its aliases as reserved in the given set.
+  void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;
+
+  /// Returns true if for every register in the set all super registers are part
+  /// of the set as well.
+  bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
+      ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
+};
+
+//===----------------------------------------------------------------------===//
+//                           SuperRegClassIterator
+//===----------------------------------------------------------------------===//
+//
+// Iterate over the possible super-registers for a given register class. The
+// iterator will visit a list of pairs (Idx, Mask) corresponding to the
+// possible classes of super-registers.
+//
+// Each bit mask will have at least one set bit, and each set bit in Mask
+// corresponds to a SuperRC such that:
+//
+//   For all Reg in SuperRC: Reg:Idx is in RC.
+//
+// The iterator can include (O, RC->getSubClassMask()) as the first entry which
+// also satisfies the above requirement, assuming Reg:0 == Reg.
+//
+class SuperRegClassIterator {
+  const unsigned RCMaskWords;
+  unsigned SubReg = 0;
+  const uint16_t *Idx;
+  const uint32_t *Mask;
+
+public:
+  /// Create a SuperRegClassIterator that visits all the super-register classes
+  /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
+  SuperRegClassIterator(const TargetRegisterClass *RC,
+                        const TargetRegisterInfo *TRI,
+                        bool IncludeSelf = false)
+    : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
+      Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
+    if (!IncludeSelf)
+      ++*this;
+  }
+
+  /// Returns true if this iterator is still pointing at a valid entry.
+  bool isValid() const { return Idx; }
+
+  /// Returns the current sub-register index.
+  unsigned getSubReg() const { return SubReg; }
+
+  /// Returns the bit mask of register classes that getSubReg() projects into
+  /// RC.
+  /// See TargetRegisterClass::getSubClassMask() for how to use it.
+  const uint32_t *getMask() const { return Mask; }
+
+  /// Advance iterator to the next entry.
+  void operator++() {
+    assert(isValid() && "Cannot move iterator past end.");
+    Mask += RCMaskWords;
+    SubReg = *Idx++;
+    if (!SubReg)
+      Idx = nullptr;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                           BitMaskClassIterator
+//===----------------------------------------------------------------------===//
+/// This class encapuslates the logic to iterate over bitmask returned by
+/// the various RegClass related APIs.
+/// E.g., this class can be used to iterate over the subclasses provided by
+/// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
+class BitMaskClassIterator {
+  /// Total number of register classes.
+  const unsigned NumRegClasses;
+  /// Base index of CurrentChunk.
+  /// In other words, the number of bit we read to get at the
+  /// beginning of that chunck.
+  unsigned Base = 0;
+  /// Adjust base index of CurrentChunk.
+  /// Base index + how many bit we read within CurrentChunk.
+  unsigned Idx = 0;
+  /// Current register class ID.
+  unsigned ID = 0;
+  /// Mask we are iterating over.
+  const uint32_t *Mask;
+  /// Current chunk of the Mask we are traversing.
+  uint32_t CurrentChunk;
+
+  /// Move ID to the next set bit.
+  void moveToNextID() {
+    // If the current chunk of memory is empty, move to the next one,
+    // while making sure we do not go pass the number of register
+    // classes.
+    while (!CurrentChunk) {
+      // Move to the next chunk.
+      Base += 32;
+      if (Base >= NumRegClasses) {
+        ID = NumRegClasses;
+        return;
+      }
+      CurrentChunk = *++Mask;
+      Idx = Base;
+    }
+    // Otherwise look for the first bit set from the right
+    // (representation of the class ID is big endian).
+    // See getSubClassMask for more details on the representation.
+    unsigned Offset = countTrailingZeros(CurrentChunk);
+    // Add the Offset to the adjusted base number of this chunk: Idx.
+    // This is the ID of the register class.
+    ID = Idx + Offset;
+
+    // Consume the zeros, if any, and the bit we just read
+    // so that we are at the right spot for the next call.
+    // Do not do Offset + 1 because Offset may be 31 and 32
+    // will be UB for the shift, though in that case we could
+    // have make the chunk being equal to 0, but that would
+    // have introduced a if statement.
+    moveNBits(Offset);
+    moveNBits(1);
+  }
+
+  /// Move \p NumBits Bits forward in CurrentChunk.
+  void moveNBits(unsigned NumBits) {
+    assert(NumBits < 32 && "Undefined behavior spotted!");
+    // Consume the bit we read for the next call.
+    CurrentChunk >>= NumBits;
+    // Adjust the base for the chunk.
+    Idx += NumBits;
+  }
+
+public:
+  /// Create a BitMaskClassIterator that visits all the register classes
+  /// represented by \p Mask.
+  ///
+  /// \pre \p Mask != nullptr
+  BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
+      : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
+    // Move to the first ID.
+    moveToNextID();
+  }
+
+  /// Returns true if this iterator is still pointing at a valid entry.
+  bool isValid() const { return getID() != NumRegClasses; }
+
+  /// Returns the current register class ID.
+  unsigned getID() const { return ID; }
+
+  /// Advance iterator to the next entry.
+  void operator++() {
+    assert(isValid() && "Cannot move iterator past end.");
+    moveToNextID();
+  }
+};
+
+// This is useful when building IndexedMaps keyed on virtual registers
+struct VirtReg2IndexFunctor {
+  using argument_type = unsigned;
+  unsigned operator()(unsigned Reg) const {
+    return TargetRegisterInfo::virtReg2Index(Reg);
+  }
+};
+
+/// Prints virtual and physical registers with or without a TRI instance.
+///
+/// The format is:
+///   %noreg          - NoRegister
+///   %5              - a virtual register.
+///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
+///   %eax            - a physical register
+///   %physreg17      - a physical register when no TRI instance given.
+///
+/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
+Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI = nullptr,
+                   unsigned SubRegIdx = 0,
+                   const MachineRegisterInfo *MRI = nullptr);
+
+/// Create Printable object to print register units on a \ref raw_ostream.
+///
+/// Register units are named after their root registers:
+///
+///   al      - Single root.
+///   fp0~st7 - Dual roots.
+///
+/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
+Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
+
+/// \brief Create Printable object to print virtual registers and physical
+/// registers on a \ref raw_ostream.
+Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
+
+/// \brief Create Printable object to print register classes or register banks
+/// on a \ref raw_ostream.
+Printable printRegClassOrBank(unsigned Reg, const MachineRegisterInfo &RegInfo,
+                              const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h b/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
new file mode 100644
index 0000000..1044f0b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
@@ -0,0 +1,204 @@
+//===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper around MCSchedModel that allows the interface to
+// benefit from information currently only available in TargetInstrInfo.
+// Ideally, the scheduling interface would be fully defined in the MC layer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
+#define LLVM_CODEGEN_TARGETSCHEDULE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/MCSchedule.h"
+
+namespace llvm {
+
+class MachineInstr;
+class TargetInstrInfo;
+
+/// Provide an instruction scheduling machine model to CodeGen passes.
+class TargetSchedModel {
+  // For efficiency, hold a copy of the statically defined MCSchedModel for this
+  // processor.
+  MCSchedModel SchedModel;
+  InstrItineraryData InstrItins;
+  const TargetSubtargetInfo *STI = nullptr;
+  const TargetInstrInfo *TII = nullptr;
+
+  SmallVector<unsigned, 16> ResourceFactors;
+  unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
+  unsigned ResourceLCM;   // Resource units per cycle. Latency normalization factor.
+
+  unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
+
+public:
+  TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
+
+  /// \brief Initialize the machine model for instruction scheduling.
+  ///
+  /// The machine model API keeps a copy of the top-level MCSchedModel table
+  /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
+  /// dynamic properties.
+  void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
+            const TargetInstrInfo *tii);
+
+  /// Return the MCSchedClassDesc for this instruction.
+  const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
+
+  /// \brief TargetSubtargetInfo getter.
+  const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
+
+  /// \brief TargetInstrInfo getter.
+  const TargetInstrInfo *getInstrInfo() const { return TII; }
+
+  /// \brief Return true if this machine model includes an instruction-level
+  /// scheduling model.
+  ///
+  /// This is more detailed than the course grain IssueWidth and default
+  /// latency properties, but separate from the per-cycle itinerary data.
+  bool hasInstrSchedModel() const;
+
+  const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
+
+  /// \brief Return true if this machine model includes cycle-to-cycle itinerary
+  /// data.
+  ///
+  /// This models scheduling at each stage in the processor pipeline.
+  bool hasInstrItineraries() const;
+
+  const InstrItineraryData *getInstrItineraries() const {
+    if (hasInstrItineraries())
+      return &InstrItins;
+    return nullptr;
+  }
+
+  /// \brief Return true if this machine model includes an instruction-level
+  /// scheduling model or cycle-to-cycle itinerary data.
+  bool hasInstrSchedModelOrItineraries() const {
+    return hasInstrSchedModel() || hasInstrItineraries();
+  }
+
+  /// \brief Identify the processor corresponding to the current subtarget.
+  unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
+
+  /// \brief Maximum number of micro-ops that may be scheduled per cycle.
+  unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
+
+  /// \brief Return true if new group must begin.
+  bool mustBeginGroup(const MachineInstr *MI,
+                          const MCSchedClassDesc *SC = nullptr) const;
+  /// \brief Return true if current group must end.
+  bool mustEndGroup(const MachineInstr *MI,
+                          const MCSchedClassDesc *SC = nullptr) const;
+
+  /// \brief Return the number of issue slots required for this MI.
+  unsigned getNumMicroOps(const MachineInstr *MI,
+                          const MCSchedClassDesc *SC = nullptr) const;
+
+  /// \brief Get the number of kinds of resources for this target.
+  unsigned getNumProcResourceKinds() const {
+    return SchedModel.getNumProcResourceKinds();
+  }
+
+  /// \brief Get a processor resource by ID for convenience.
+  const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
+    return SchedModel.getProcResource(PIdx);
+  }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  const char *getResourceName(unsigned PIdx) const {
+    if (!PIdx)
+      return "MOps";
+    return SchedModel.getProcResource(PIdx)->Name;
+  }
+#endif
+
+  using ProcResIter = const MCWriteProcResEntry *;
+
+  // \brief Get an iterator into the processor resources consumed by this
+  // scheduling class.
+  ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
+    // The subtarget holds a single resource table for all processors.
+    return STI->getWriteProcResBegin(SC);
+  }
+  ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
+    return STI->getWriteProcResEnd(SC);
+  }
+
+  /// \brief Multiply the number of units consumed for a resource by this factor
+  /// to normalize it relative to other resources.
+  unsigned getResourceFactor(unsigned ResIdx) const {
+    return ResourceFactors[ResIdx];
+  }
+
+  /// \brief Multiply number of micro-ops by this factor to normalize it
+  /// relative to other resources.
+  unsigned getMicroOpFactor() const {
+    return MicroOpFactor;
+  }
+
+  /// \brief Multiply cycle count by this factor to normalize it relative to
+  /// other resources. This is the number of resource units per cycle.
+  unsigned getLatencyFactor() const {
+    return ResourceLCM;
+  }
+
+  /// \brief Number of micro-ops that may be buffered for OOO execution.
+  unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
+
+  /// \brief Number of resource units that may be buffered for OOO execution.
+  /// \return The buffer size in resource units or -1 for unlimited.
+  int getResourceBufferSize(unsigned PIdx) const {
+    return SchedModel.getProcResource(PIdx)->BufferSize;
+  }
+
+  /// \brief Compute operand latency based on the available machine model.
+  ///
+  /// Compute and return the latency of the given data dependent def and use
+  /// when the operand indices are already known. UseMI may be NULL for an
+  /// unknown user.
+  unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
+                                 const MachineInstr *UseMI, unsigned UseOperIdx)
+    const;
+
+  /// \brief Compute the instruction latency based on the available machine
+  /// model.
+  ///
+  /// Compute and return the expected latency of this instruction independent of
+  /// a particular use. computeOperandLatency is the preferred API, but this is
+  /// occasionally useful to help estimate instruction cost.
+  ///
+  /// If UseDefaultDefLatency is false and no new machine sched model is
+  /// present this method falls back to TII->getInstrLatency with an empty
+  /// instruction itinerary (this is so we preserve the previous behavior of the
+  /// if converter after moving it to TargetSchedModel).
+  unsigned computeInstrLatency(const MachineInstr *MI,
+                               bool UseDefaultDefLatency = true) const;
+  unsigned computeInstrLatency(unsigned Opcode) const;
+
+
+  /// \brief Output dependency latency of a pair of defs of the same register.
+  ///
+  /// This is typically one cycle.
+  unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
+                                const MachineInstr *DepMI) const;
+
+  /// \brief Compute the reciprocal throughput of the given instruction.
+  Optional<double> computeInstrRThroughput(const MachineInstr *MI) const;
+  Optional<double> computeInstrRThroughput(unsigned Opcode) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETSCHEDULE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
new file mode 100644
index 0000000..5e5faac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -0,0 +1,261 @@
+//===- llvm/CodeGen/TargetSubtargetInfo.h - Target Information --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the subtarget options of a Target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETSUBTARGETINFO_H
+#define LLVM_CODEGEN_TARGETSUBTARGETINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/PBQPRAConstraint.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/CodeGen.h"
+#include <memory>
+#include <vector>
+
+
+namespace llvm {
+
+class CallLowering;
+class InstrItineraryData;
+struct InstrStage;
+class InstructionSelector;
+class LegalizerInfo;
+class MachineInstr;
+struct MachineSchedPolicy;
+struct MCReadAdvanceEntry;
+struct MCWriteLatencyEntry;
+struct MCWriteProcResEntry;
+class RegisterBankInfo;
+class SDep;
+class SelectionDAGTargetInfo;
+struct SubtargetFeatureKV;
+struct SubtargetInfoKV;
+class SUnit;
+class TargetFrameLowering;
+class TargetInstrInfo;
+class TargetLowering;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class TargetSchedModel;
+class Triple;
+
+//===----------------------------------------------------------------------===//
+///
+/// TargetSubtargetInfo - Generic base class for all target subtargets.  All
+/// Target-specific options that control code generation and printing should
+/// be exposed through a TargetSubtargetInfo-derived class.
+///
+class TargetSubtargetInfo : public MCSubtargetInfo {
+protected: // Can only create subclasses...
+  TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
+                      ArrayRef<SubtargetFeatureKV> PF,
+                      ArrayRef<SubtargetFeatureKV> PD,
+                      const SubtargetInfoKV *ProcSched,
+                      const MCWriteProcResEntry *WPR,
+                      const MCWriteLatencyEntry *WL,
+                      const MCReadAdvanceEntry *RA, const InstrStage *IS,
+                      const unsigned *OC, const unsigned *FP);
+
+public:
+  // AntiDepBreakMode - Type of anti-dependence breaking that should
+  // be performed before post-RA scheduling.
+  using AntiDepBreakMode = enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL };
+  using RegClassVector = SmallVectorImpl<const TargetRegisterClass *>;
+
+  TargetSubtargetInfo() = delete;
+  TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
+  TargetSubtargetInfo &operator=(const TargetSubtargetInfo &) = delete;
+  ~TargetSubtargetInfo() override;
+
+  virtual bool isXRaySupported() const { return false; }
+
+  // Interfaces to the major aspects of target machine information:
+  //
+  // -- Instruction opcode and operand information
+  // -- Pipelines and scheduling information
+  // -- Stack frame information
+  // -- Selection DAG lowering information
+  // -- Call lowering information
+  //
+  // N.B. These objects may change during compilation. It's not safe to cache
+  // them between functions.
+  virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
+  virtual const TargetFrameLowering *getFrameLowering() const {
+    return nullptr;
+  }
+  virtual const TargetLowering *getTargetLowering() const { return nullptr; }
+  virtual const SelectionDAGTargetInfo *getSelectionDAGInfo() const {
+    return nullptr;
+  }
+  virtual const CallLowering *getCallLowering() const { return nullptr; }
+
+  // FIXME: This lets targets specialize the selector by subtarget (which lets
+  // us do things like a dedicated avx512 selector).  However, we might want
+  // to also specialize selectors by MachineFunction, which would let us be
+  // aware of optsize/optnone and such.
+  virtual const InstructionSelector *getInstructionSelector() const {
+    return nullptr;
+  }
+
+  virtual unsigned getHwMode() const { return 0; }
+
+  /// Target can subclass this hook to select a different DAG scheduler.
+  virtual RegisterScheduler::FunctionPassCtor
+      getDAGScheduler(CodeGenOpt::Level) const {
+    return nullptr;
+  }
+
+  virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }
+
+  /// getRegisterInfo - If register information is available, return it.  If
+  /// not, return null.
+  virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
+
+  /// If the information for the register banks is available, return it.
+  /// Otherwise return nullptr.
+  virtual const RegisterBankInfo *getRegBankInfo() const { return nullptr; }
+
+  /// getInstrItineraryData - Returns instruction itinerary data for the target
+  /// or specific subtarget.
+  virtual const InstrItineraryData *getInstrItineraryData() const {
+    return nullptr;
+  }
+
+  /// Resolve a SchedClass at runtime, where SchedClass identifies an
+  /// MCSchedClassDesc with the isVariant property. This may return the ID of
+  /// another variant SchedClass, but repeated invocation must quickly terminate
+  /// in a nonvariant SchedClass.
+  virtual unsigned resolveSchedClass(unsigned SchedClass,
+                                     const MachineInstr *MI,
+                                     const TargetSchedModel *SchedModel) const {
+    return 0;
+  }
+
+  /// \brief True if the subtarget should run MachineScheduler after aggressive
+  /// coalescing.
+  ///
+  /// This currently replaces the SelectionDAG scheduler with the "source" order
+  /// scheduler (though see below for an option to turn this off and use the
+  /// TargetLowering preference). It does not yet disable the postRA scheduler.
+  virtual bool enableMachineScheduler() const;
+
+  /// \brief Support printing of [latency:throughput] comment in output .S file.
+  virtual bool supportPrintSchedInfo() const { return false; }
+
+  /// \brief True if the machine scheduler should disable the TLI preference
+  /// for preRA scheduling with the source level scheduler.
+  virtual bool enableMachineSchedDefaultSched() const { return true; }
+
+  /// \brief True if the subtarget should enable joining global copies.
+  ///
+  /// By default this is enabled if the machine scheduler is enabled, but
+  /// can be overridden.
+  virtual bool enableJoinGlobalCopies() const;
+
+  /// True if the subtarget should run a scheduler after register allocation.
+  ///
+  /// By default this queries the PostRAScheduling bit in the scheduling model
+  /// which is the preferred way to influence this.
+  virtual bool enablePostRAScheduler() const;
+
+  /// \brief True if the subtarget should run the atomic expansion pass.
+  virtual bool enableAtomicExpand() const;
+
+  /// True if the subtarget should run the indirectbr expansion pass.
+  virtual bool enableIndirectBrExpand() const;
+
+  /// \brief Override generic scheduling policy within a region.
+  ///
+  /// This is a convenient way for targets that don't provide any custom
+  /// scheduling heuristics (no custom MachineSchedStrategy) to make
+  /// changes to the generic scheduling policy.
+  virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
+                                   unsigned NumRegionInstrs) const {}
+
+  // \brief Perform target specific adjustments to the latency of a schedule
+  // dependency.
+  virtual void adjustSchedDependency(SUnit *def, SUnit *use, SDep &dep) const {}
+
+  // For use with PostRAScheduling: get the anti-dependence breaking that should
+  // be performed before post-RA scheduling.
+  virtual AntiDepBreakMode getAntiDepBreakMode() const { return ANTIDEP_NONE; }
+
+  // For use with PostRAScheduling: in CriticalPathRCs, return any register
+  // classes that should only be considered for anti-dependence breaking if they
+  // are on the critical path.
+  virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
+    return CriticalPathRCs.clear();
+  }
+
+  // \brief Provide an ordered list of schedule DAG mutations for the post-RA
+  // scheduler.
+  virtual void getPostRAMutations(
+      std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+  }
+
+  // \brief Provide an ordered list of schedule DAG mutations for the machine
+  // pipeliner.
+  virtual void getSMSMutations(
+      std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+  }
+
+  // For use with PostRAScheduling: get the minimum optimization level needed
+  // to enable post-RA scheduling.
+  virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
+    return CodeGenOpt::Default;
+  }
+
+  /// \brief True if the subtarget should run the local reassignment
+  /// heuristic of the register allocator.
+  /// This heuristic may be compile time intensive, \p OptLevel provides
+  /// a finer grain to tune the register allocator.
+  virtual bool enableRALocalReassignment(CodeGenOpt::Level OptLevel) const;
+
+  /// \brief True if the subtarget should consider the cost of local intervals
+  /// created by a split candidate when choosing the best split candidate. This
+  /// heuristic may be compile time intensive.
+  virtual bool enableAdvancedRASplitCost() const;
+
+  /// \brief Enable use of alias analysis during code generation (during MI
+  /// scheduling, DAGCombine, etc.).
+  virtual bool useAA() const;
+
+  /// \brief Enable the use of the early if conversion pass.
+  virtual bool enableEarlyIfConversion() const { return false; }
+
+  /// \brief Return PBQPConstraint(s) for the target.
+  ///
+  /// Override to provide custom PBQP constraints.
+  virtual std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const {
+    return nullptr;
+  }
+
+  /// Enable tracking of subregister liveness in register allocator.
+  /// Please use MachineRegisterInfo::subRegLivenessEnabled() instead where
+  /// possible.
+  virtual bool enableSubRegLiveness() const { return false; }
+
+  /// Returns string representation of scheduler comment
+  std::string getSchedInfoStr(const MachineInstr &MI) const override;
+  std::string getSchedInfoStr(MCInst const &MCI) const override;
+
+  /// This is called after a .mir file was loaded.
+  virtual void mirFileLoaded(MachineFunction &MF) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETSUBTARGETINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/UnreachableBlockElim.h b/linux-x64/clang/include/llvm/CodeGen/UnreachableBlockElim.h
new file mode 100644
index 0000000..3e7afd4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/UnreachableBlockElim.h
@@ -0,0 +1,37 @@
+//===-- UnreachableBlockElim.h - Remove unreachable blocks for codegen --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is an extremely simple version of the SimplifyCFG pass.  Its sole
+// job is to delete LLVM basic blocks that are not reachable from the entry
+// node.  To do this, it performs a simple depth first traversal of the CFG,
+// then deletes any unvisited nodes.
+//
+// Note that this pass is really a hack.  In particular, the instruction
+// selectors for various targets should just not generate code for unreachable
+// blocks.  Until LLVM has a more systematic way of defining instruction
+// selectors, however, we cannot really expect them to handle additional
+// complexity.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_UNREACHABLEBLOCKELIM_H
+#define LLVM_LIB_CODEGEN_UNREACHABLEBLOCKELIM_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class UnreachableBlockElimPass
+    : public PassInfoMixin<UnreachableBlockElimPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_UNREACHABLEBLOCKELIM_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ValueTypes.h b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.h
new file mode 100644
index 0000000..d2ef4a9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.h
@@ -0,0 +1,437 @@
+//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of low-level target independent types which various
+// values in the code generator are.  This allows the target specific behavior
+// of instructions to be described to target independent passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VALUETYPES_H
+#define LLVM_CODEGEN_VALUETYPES_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+  class LLVMContext;
+  class Type;
+
+  /// Extended Value Type. Capable of holding value types which are not native
+  /// for any processor (such as the i12345 type), as well as the types an MVT
+  /// can represent.
+  struct EVT {
+  private:
+    MVT V = MVT::INVALID_SIMPLE_VALUE_TYPE;
+    Type *LLVMTy = nullptr;
+
+  public:
+    constexpr EVT() = default;
+    constexpr EVT(MVT::SimpleValueType SVT) : V(SVT) {}
+    constexpr EVT(MVT S) : V(S) {}
+
+    bool operator==(EVT VT) const {
+      return !(*this != VT);
+    }
+    bool operator!=(EVT VT) const {
+      if (V.SimpleTy != VT.V.SimpleTy)
+        return true;
+      if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return LLVMTy != VT.LLVMTy;
+      return false;
+    }
+
+    /// Returns the EVT that represents a floating-point type with the given
+    /// number of bits. There are two floating-point types with 128 bits - this
+    /// returns f128 rather than ppcf128.
+    static EVT getFloatingPointVT(unsigned BitWidth) {
+      return MVT::getFloatingPointVT(BitWidth);
+    }
+
+    /// Returns the EVT that represents an integer with the given number of
+    /// bits.
+    static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
+      MVT M = MVT::getIntegerVT(BitWidth);
+      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return M;
+      return getExtendedIntegerVT(Context, BitWidth);
+    }
+
+    /// Returns the EVT that represents a vector NumElements in length, where
+    /// each element is of type VT.
+    static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
+                           bool IsScalable = false) {
+      MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
+      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return M;
+
+      assert(!IsScalable && "We don't support extended scalable types yet");
+      return getExtendedVectorVT(Context, VT, NumElements);
+    }
+
+    /// Returns the EVT that represents a vector EC.Min elements in length,
+    /// where each element is of type VT.
+    static EVT getVectorVT(LLVMContext &Context, EVT VT, MVT::ElementCount EC) {
+      MVT M = MVT::getVectorVT(VT.V, EC);
+      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return M;
+      assert (!EC.Scalable && "We don't support extended scalable types yet");
+      return getExtendedVectorVT(Context, VT, EC.Min);
+    }
+
+    /// Return a vector with the same number of elements as this vector, but
+    /// with the element type converted to an integer type with the same
+    /// bitwidth.
+    EVT changeVectorElementTypeToInteger() const {
+      if (!isSimple()) {
+        assert (!isScalableVector() &&
+                "We don't support extended scalable types yet");
+        return changeExtendedVectorElementTypeToInteger();
+      }
+      MVT EltTy = getSimpleVT().getVectorElementType();
+      unsigned BitWidth = EltTy.getSizeInBits();
+      MVT IntTy = MVT::getIntegerVT(BitWidth);
+      MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements(),
+                                   isScalableVector());
+      assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
+             "Simple vector VT not representable by simple integer vector VT!");
+      return VecTy;
+    }
+
+    /// Return the type converted to an equivalently sized integer or vector
+    /// with integer element type. Similar to changeVectorElementTypeToInteger,
+    /// but also handles scalars.
+    EVT changeTypeToInteger() {
+      if (isVector())
+        return changeVectorElementTypeToInteger();
+
+      if (isSimple())
+        return MVT::getIntegerVT(getSizeInBits());
+
+      return changeExtendedTypeToInteger();
+    }
+
+    /// Test if the given EVT is simple (as opposed to being extended).
+    bool isSimple() const {
+      return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE;
+    }
+
+    /// Test if the given EVT is extended (as opposed to being simple).
+    bool isExtended() const {
+      return !isSimple();
+    }
+
+    /// Return true if this is a FP or a vector FP type.
+    bool isFloatingPoint() const {
+      return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
+    }
+
+    /// Return true if this is an integer or a vector integer type.
+    bool isInteger() const {
+      return isSimple() ? V.isInteger() : isExtendedInteger();
+    }
+
+    /// Return true if this is an integer, but not a vector.
+    bool isScalarInteger() const {
+      return isSimple() ? V.isScalarInteger() : isExtendedScalarInteger();
+    }
+
+    /// Return true if this is a vector value type.
+    bool isVector() const {
+      return isSimple() ? V.isVector() : isExtendedVector();
+    }
+
+    /// Return true if this is a vector type where the runtime
+    /// length is machine dependent
+    bool isScalableVector() const {
+      // FIXME: We don't support extended scalable types yet, because the
+      // matching IR type doesn't exist. Once it has been added, this can
+      // be changed to call isExtendedScalableVector.
+      if (!isSimple())
+        return false;
+      return V.isScalableVector();
+    }
+
+    /// Return true if this is a 16-bit vector type.
+    bool is16BitVector() const {
+      return isSimple() ? V.is16BitVector() : isExtended16BitVector();
+    }
+
+    /// Return true if this is a 32-bit vector type.
+    bool is32BitVector() const {
+      return isSimple() ? V.is32BitVector() : isExtended32BitVector();
+    }
+
+    /// Return true if this is a 64-bit vector type.
+    bool is64BitVector() const {
+      return isSimple() ? V.is64BitVector() : isExtended64BitVector();
+    }
+
+    /// Return true if this is a 128-bit vector type.
+    bool is128BitVector() const {
+      return isSimple() ? V.is128BitVector() : isExtended128BitVector();
+    }
+
+    /// Return true if this is a 256-bit vector type.
+    bool is256BitVector() const {
+      return isSimple() ? V.is256BitVector() : isExtended256BitVector();
+    }
+
+    /// Return true if this is a 512-bit vector type.
+    bool is512BitVector() const {
+      return isSimple() ? V.is512BitVector() : isExtended512BitVector();
+    }
+
+    /// Return true if this is a 1024-bit vector type.
+    bool is1024BitVector() const {
+      return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
+    }
+
+    /// Return true if this is a 2048-bit vector type.
+    bool is2048BitVector() const {
+      return isSimple() ? V.is2048BitVector() : isExtended2048BitVector();
+    }
+
+    /// Return true if this is an overloaded type for TableGen.
+    bool isOverloaded() const {
+      return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
+    }
+
+    /// Return true if the bit size is a multiple of 8.
+    bool isByteSized() const {
+      return (getSizeInBits() & 7) == 0;
+    }
+
+    /// Return true if the size is a power-of-two number of bytes.
+    bool isRound() const {
+      unsigned BitSize = getSizeInBits();
+      return BitSize >= 8 && !(BitSize & (BitSize - 1));
+    }
+
+    /// Return true if this has the same number of bits as VT.
+    bool bitsEq(EVT VT) const {
+      if (EVT::operator==(VT)) return true;
+      return getSizeInBits() == VT.getSizeInBits();
+    }
+
+    /// Return true if this has more bits than VT.
+    bool bitsGT(EVT VT) const {
+      if (EVT::operator==(VT)) return false;
+      return getSizeInBits() > VT.getSizeInBits();
+    }
+
+    /// Return true if this has no less bits than VT.
+    bool bitsGE(EVT VT) const {
+      if (EVT::operator==(VT)) return true;
+      return getSizeInBits() >= VT.getSizeInBits();
+    }
+
+    /// Return true if this has less bits than VT.
+    bool bitsLT(EVT VT) const {
+      if (EVT::operator==(VT)) return false;
+      return getSizeInBits() < VT.getSizeInBits();
+    }
+
+    /// Return true if this has no more bits than VT.
+    bool bitsLE(EVT VT) const {
+      if (EVT::operator==(VT)) return true;
+      return getSizeInBits() <= VT.getSizeInBits();
+    }
+
+    /// Return the SimpleValueType held in the specified simple EVT.
+    MVT getSimpleVT() const {
+      assert(isSimple() && "Expected a SimpleValueType!");
+      return V;
+    }
+
+    /// If this is a vector type, return the element type, otherwise return
+    /// this.
+    EVT getScalarType() const {
+      return isVector() ? getVectorElementType() : *this;
+    }
+
+    /// Given a vector type, return the type of each element.
+    EVT getVectorElementType() const {
+      assert(isVector() && "Invalid vector type!");
+      if (isSimple())
+        return V.getVectorElementType();
+      return getExtendedVectorElementType();
+    }
+
+    /// Given a vector type, return the number of elements it contains.
+    unsigned getVectorNumElements() const {
+      assert(isVector() && "Invalid vector type!");
+      if (isSimple())
+        return V.getVectorNumElements();
+      return getExtendedVectorNumElements();
+    }
+
+    // Given a (possibly scalable) vector type, return the ElementCount
+    MVT::ElementCount getVectorElementCount() const {
+      assert((isVector()) && "Invalid vector type!");
+      if (isSimple())
+        return V.getVectorElementCount();
+
+      assert(!isScalableVector() &&
+             "We don't support extended scalable types yet");
+      return {getExtendedVectorNumElements(), false};
+    }
+
+    /// Return the size of the specified value type in bits.
+    unsigned getSizeInBits() const {
+      if (isSimple())
+        return V.getSizeInBits();
+      return getExtendedSizeInBits();
+    }
+
+    unsigned getScalarSizeInBits() const {
+      return getScalarType().getSizeInBits();
+    }
+
+    /// Return the number of bytes overwritten by a store of the specified value
+    /// type.
+    unsigned getStoreSize() const {
+      return (getSizeInBits() + 7) / 8;
+    }
+
+    /// Return the number of bits overwritten by a store of the specified value
+    /// type.
+    unsigned getStoreSizeInBits() const {
+      return getStoreSize() * 8;
+    }
+
+    /// Rounds the bit-width of the given integer EVT up to the nearest power of
+    /// two (and at least to eight), and returns the integer EVT with that
+    /// number of bits.
+    EVT getRoundIntegerType(LLVMContext &Context) const {
+      assert(isInteger() && !isVector() && "Invalid integer type!");
+      unsigned BitWidth = getSizeInBits();
+      if (BitWidth <= 8)
+        return EVT(MVT::i8);
+      return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
+    }
+
+    /// Finds the smallest simple value type that is greater than or equal to
+    /// half the width of this EVT. If no simple value type can be found, an
+    /// extended integer value type of half the size (rounded up) is returned.
+    EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
+      assert(isInteger() && !isVector() && "Invalid integer type!");
+      unsigned EVTSize = getSizeInBits();
+      for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
+          IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
+        EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
+        if (HalfVT.getSizeInBits() * 2 >= EVTSize)
+          return HalfVT;
+      }
+      return getIntegerVT(Context, (EVTSize + 1) / 2);
+    }
+
+    /// Return a VT for an integer vector type with the size of the
+    /// elements doubled. The typed returned may be an extended type.
+    EVT widenIntegerVectorElementType(LLVMContext &Context) const {
+      EVT EltVT = getVectorElementType();
+      EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
+      return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
+    }
+
+    // Return a VT for a vector type with the same element type but
+    // half the number of elements. The type returned may be an
+    // extended type.
+    EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
+      EVT EltVT = getVectorElementType();
+      auto EltCnt = getVectorElementCount();
+      assert(!(EltCnt.Min & 1) && "Splitting vector, but not in half!");
+      return EVT::getVectorVT(Context, EltVT, EltCnt / 2);
+    }
+
+    /// Returns true if the given vector is a power of 2.
+    bool isPow2VectorType() const {
+      unsigned NElts = getVectorNumElements();
+      return !(NElts & (NElts - 1));
+    }
+
+    /// Widens the length of the given vector EVT up to the nearest power of 2
+    /// and returns that type.
+    EVT getPow2VectorType(LLVMContext &Context) const {
+      if (!isPow2VectorType()) {
+        unsigned NElts = getVectorNumElements();
+        unsigned Pow2NElts = 1 <<  Log2_32_Ceil(NElts);
+        return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts,
+                                isScalableVector());
+      }
+      else {
+        return *this;
+      }
+    }
+
+    /// This function returns value type as a string, e.g. "i32".
+    std::string getEVTString() const;
+
+    /// This method returns an LLVM type corresponding to the specified EVT.
+    /// For integer types, this returns an unsigned type. Note that this will
+    /// abort for types that cannot be represented.
+    Type *getTypeForEVT(LLVMContext &Context) const;
+
+    /// Return the value type corresponding to the specified type.
+    /// This returns all pointers as iPTR.  If HandleUnknown is true, unknown
+    /// types are returned as Other, otherwise they are invalid.
+    static EVT getEVT(Type *Ty, bool HandleUnknown = false);
+
+    intptr_t getRawBits() const {
+      if (isSimple())
+        return V.SimpleTy;
+      else
+        return (intptr_t)(LLVMTy);
+    }
+
+    /// A meaningless but well-behaved order, useful for constructing
+    /// containers.
+    struct compareRawBits {
+      bool operator()(EVT L, EVT R) const {
+        if (L.V.SimpleTy == R.V.SimpleTy)
+          return L.LLVMTy < R.LLVMTy;
+        else
+          return L.V.SimpleTy < R.V.SimpleTy;
+      }
+    };
+
+  private:
+    // Methods for handling the Extended-type case in functions above.
+    // These are all out-of-line to prevent users of this header file
+    // from having a dependency on Type.h.
+    EVT changeExtendedTypeToInteger() const;
+    EVT changeExtendedVectorElementTypeToInteger() const;
+    static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
+    static EVT getExtendedVectorVT(LLVMContext &C, EVT VT,
+                                   unsigned NumElements);
+    bool isExtendedFloatingPoint() const LLVM_READONLY;
+    bool isExtendedInteger() const LLVM_READONLY;
+    bool isExtendedScalarInteger() const LLVM_READONLY;
+    bool isExtendedVector() const LLVM_READONLY;
+    bool isExtended16BitVector() const LLVM_READONLY;
+    bool isExtended32BitVector() const LLVM_READONLY;
+    bool isExtended64BitVector() const LLVM_READONLY;
+    bool isExtended128BitVector() const LLVM_READONLY;
+    bool isExtended256BitVector() const LLVM_READONLY;
+    bool isExtended512BitVector() const LLVM_READONLY;
+    bool isExtended1024BitVector() const LLVM_READONLY;
+    bool isExtended2048BitVector() const LLVM_READONLY;
+    EVT getExtendedVectorElementType() const;
+    unsigned getExtendedVectorNumElements() const LLVM_READONLY;
+    unsigned getExtendedSizeInBits() const LLVM_READONLY;
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_VALUETYPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
new file mode 100644
index 0000000..673eec9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
@@ -0,0 +1,169 @@
+//===- ValueTypes.td - ValueType definitions ---------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Value types - These values correspond to the register types defined in the
+// ValueTypes.h file.  If you update anything here, you must update it there as
+// well!
+//
+//===----------------------------------------------------------------------===//
+
+class ValueType<int size, int value> {
+  string Namespace = "MVT";
+  int Size = size;
+  int Value = value;
+}
+
+def OtherVT: ValueType<0  ,  1>;   // "Other" value
+def i1     : ValueType<1  ,  2>;   // One bit boolean value
+def i8     : ValueType<8  ,  3>;   // 8-bit integer value
+def i16    : ValueType<16 ,  4>;   // 16-bit integer value
+def i32    : ValueType<32 ,  5>;   // 32-bit integer value
+def i64    : ValueType<64 ,  6>;   // 64-bit integer value
+def i128   : ValueType<128,  7>;   // 128-bit integer value
+def f16    : ValueType<16 ,  8>;   // 16-bit floating point value
+def f32    : ValueType<32 ,  9>;   // 32-bit floating point value
+def f64    : ValueType<64 , 10>;   // 64-bit floating point value
+def f80    : ValueType<80 , 11>;   // 80-bit floating point value
+def f128   : ValueType<128, 12>;   // 128-bit floating point value
+def ppcf128: ValueType<128, 13>;   // PPC 128-bit floating point value
+
+def v1i1   : ValueType<1 ,  14>;   //   1 x i1 vector value
+def v2i1   : ValueType<2 ,  15>;   //   2 x i1 vector value
+def v4i1   : ValueType<4 ,  16>;   //   4 x i1 vector value
+def v8i1   : ValueType<8 ,  17>;   //   8 x i1 vector value
+def v16i1  : ValueType<16,  18>;   //  16 x i1 vector value
+def v32i1  : ValueType<32 , 19>;   //  32 x i1 vector value
+def v64i1  : ValueType<64 , 20>;   //  64 x i1 vector value
+def v128i1 : ValueType<128, 21>;   // 128 x i1 vector value
+def v512i1 : ValueType<512, 22>;   // 512 x i1 vector value
+def v1024i1: ValueType<1024,23>;   //1024 x i1 vector value
+
+def v1i8   : ValueType<8,   24>;   //  1 x i8  vector value
+def v2i8   : ValueType<16 , 25>;   //  2 x i8  vector value
+def v4i8   : ValueType<32 , 26>;   //  4 x i8  vector value
+def v8i8   : ValueType<64 , 27>;   //  8 x i8  vector value
+def v16i8  : ValueType<128, 28>;   // 16 x i8  vector value
+def v32i8  : ValueType<256, 29>;   // 32 x i8  vector value
+def v64i8  : ValueType<512, 30>;   // 64 x i8  vector value
+def v128i8 : ValueType<1024,31>;   //128 x i8  vector value
+def v256i8 : ValueType<2048,32>;   //256 x i8  vector value
+
+def v1i16  : ValueType<16 , 33>;   //  1 x i16 vector value
+def v2i16  : ValueType<32 , 34>;   //  2 x i16 vector value
+def v4i16  : ValueType<64 , 35>;   //  4 x i16 vector value
+def v8i16  : ValueType<128, 36>;   //  8 x i16 vector value
+def v16i16 : ValueType<256, 37>;   // 16 x i16 vector value
+def v32i16 : ValueType<512, 38>;   // 32 x i16 vector value
+def v64i16 : ValueType<1024,39>;   // 64 x i16 vector value
+def v128i16: ValueType<2048,40>;   //128 x i16 vector value
+
+def v1i32  : ValueType<32 , 41>;   //  1 x i32 vector value
+def v2i32  : ValueType<64 , 42>;   //  2 x i32 vector value
+def v4i32  : ValueType<128, 43>;   //  4 x i32 vector value
+def v8i32  : ValueType<256, 44>;   //  8 x i32 vector value
+def v16i32 : ValueType<512, 45>;   // 16 x i32 vector value
+def v32i32 : ValueType<1024,46>;   // 32 x i32 vector value
+def v64i32 : ValueType<2048,47>;   // 64 x i32 vector value
+
+def v1i64  : ValueType<64 , 48>;   //  1 x i64 vector value
+def v2i64  : ValueType<128, 49>;   //  2 x i64 vector value
+def v4i64  : ValueType<256, 50>;   //  4 x i64 vector value
+def v8i64  : ValueType<512, 51>;   //  8 x i64 vector value
+def v16i64 : ValueType<1024,52>;   // 16 x i64 vector value
+def v32i64 : ValueType<2048,53>;   // 32 x i64 vector value
+
+def v1i128 : ValueType<128, 54>;   //  1 x i128 vector value
+
+def nxv1i1  : ValueType<1,   55>;  // n x  1 x i1  vector value
+def nxv2i1  : ValueType<2,   56>;  // n x  2 x i1  vector value
+def nxv4i1  : ValueType<4,   57>;  // n x  4 x i1  vector value
+def nxv8i1  : ValueType<8,   58>;  // n x  8 x i1  vector value
+def nxv16i1 : ValueType<16,  59>;  // n x 16 x i1  vector value
+def nxv32i1 : ValueType<32,  60>;  // n x 32 x i1  vector value
+
+def nxv1i8  : ValueType<8,   61>;  // n x  1 x i8  vector value
+def nxv2i8  : ValueType<16,  62>;  // n x  2 x i8  vector value
+def nxv4i8  : ValueType<32,  63>;  // n x  4 x i8  vector value
+def nxv8i8  : ValueType<64,  64>;  // n x  8 x i8  vector value
+def nxv16i8 : ValueType<128, 65>;  // n x 16 x i8  vector value
+def nxv32i8 : ValueType<256, 66>;  // n x 32 x i8  vector value
+
+def nxv1i16 : ValueType<16,  67>;  // n x  1 x i16 vector value
+def nxv2i16 : ValueType<32,  68>;  // n x  2 x i16 vector value
+def nxv4i16 : ValueType<64,  69>;  // n x  4 x i16 vector value
+def nxv8i16 : ValueType<128, 70>;  // n x  8 x i16 vector value
+def nxv16i16: ValueType<256, 71>;  // n x 16 x i16 vector value
+def nxv32i16: ValueType<512, 72>;  // n x 32 x i16 vector value
+
+def nxv1i32 : ValueType<32,  73>;  // n x  1 x i32 vector value
+def nxv2i32 : ValueType<64,  74>;  // n x  2 x i32 vector value
+def nxv4i32 : ValueType<128, 75>;  // n x  4 x i32 vector value
+def nxv8i32 : ValueType<256, 76>;  // n x  8 x i32 vector value
+def nxv16i32: ValueType<512, 77>;  // n x 16 x i32 vector value
+def nxv32i32: ValueType<1024,78>;  // n x 32 x i32 vector value
+
+def nxv1i64 : ValueType<64,  79>;  // n x  1 x i64 vector value
+def nxv2i64 : ValueType<128, 80>;  // n x  2 x i64 vector value
+def nxv4i64 : ValueType<256, 81>;  // n x  4 x i64 vector value
+def nxv8i64 : ValueType<512, 82>;  // n x  8 x i64 vector value
+def nxv16i64: ValueType<1024,83>;  // n x 16 x i64 vector value
+def nxv32i64: ValueType<2048,84>;  // n x 32 x i64 vector value
+
+def v2f16  : ValueType<32 , 85>;   //  2 x f16 vector value
+def v4f16  : ValueType<64 , 86>;   //  4 x f16 vector value
+def v8f16  : ValueType<128, 87>;   //  8 x f16 vector value
+def v1f32  : ValueType<32 , 88>;   //  1 x f32 vector value
+def v2f32  : ValueType<64 , 89>;   //  2 x f32 vector value
+def v4f32  : ValueType<128, 90>;   //  4 x f32 vector value
+def v8f32  : ValueType<256, 91>;   //  8 x f32 vector value
+def v16f32 : ValueType<512, 92>;   // 16 x f32 vector value
+def v1f64  : ValueType<64,  93>;   //  1 x f64 vector value
+def v2f64  : ValueType<128, 94>;   //  2 x f64 vector value
+def v4f64  : ValueType<256, 95>;   //  4 x f64 vector value
+def v8f64  : ValueType<512, 96>;   //  8 x f64 vector value
+
+def nxv2f16  : ValueType<32 ,  97>; // n x  2 x f16 vector value
+def nxv4f16  : ValueType<64 ,  98>; // n x  4 x f16 vector value
+def nxv8f16  : ValueType<128,  99>; // n x  8 x f16 vector value
+def nxv1f32  : ValueType<32 , 100>; // n x  1 x f32 vector value
+def nxv2f32  : ValueType<64 , 101>; // n x  2 x f32 vector value
+def nxv4f32  : ValueType<128, 102>; // n x  4 x f32 vector value
+def nxv8f32  : ValueType<256, 103>; // n x  8 x f32 vector value
+def nxv16f32 : ValueType<512, 104>; // n x 16 x f32 vector value
+def nxv1f64  : ValueType<64,  105>; // n x  1 x f64 vector value
+def nxv2f64  : ValueType<128, 106>; // n x  2 x f64 vector value
+def nxv4f64  : ValueType<256, 107>; // n x  4 x f64 vector value
+def nxv8f64  : ValueType<512, 108>; // n x  8 x f64 vector value
+
+def x86mmx : ValueType<64 , 109>;   // X86 MMX value
+def FlagVT : ValueType<0  , 110>;   // Pre-RA sched glue
+def isVoid : ValueType<0  , 111>;   // Produces no value
+def untyped: ValueType<8  , 112>;   // Produces an untyped value
+def ExceptRef: ValueType<0, 113>;   // WebAssembly's except_ref type
+def token  : ValueType<0  , 248>;   // TokenTy
+def MetadataVT: ValueType<0, 249>;  // Metadata
+
+// Pseudo valuetype mapped to the current pointer size to any address space.
+// Should only be used in TableGen.
+def iPTRAny   : ValueType<0, 250>;
+
+// Pseudo valuetype to represent "vector of any size"
+def vAny   : ValueType<0  , 251>;
+
+// Pseudo valuetype to represent "float of any format"
+def fAny   : ValueType<0  , 252>;
+
+// Pseudo valuetype to represent "integer of any bit width"
+def iAny   : ValueType<0  , 253>;
+
+// Pseudo valuetype mapped to the current pointer size.
+def iPTR   : ValueType<0  , 254>;
+
+// Pseudo valuetype to represent "any type of any size".
+def Any    : ValueType<0  , 255>;
diff --git a/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h b/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
new file mode 100644
index 0000000..3b06f03
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
@@ -0,0 +1,188 @@
+//===- llvm/CodeGen/VirtRegMap.h - Virtual Register Map ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a virtual register map. This maps virtual registers to
+// physical registers and virtual registers to stack slots. It is created and
+// updated by a register allocator and then used by a machine code rewriter that
+// adds spill code and rewrites virtual into physical register references.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VIRTREGMAP_H
+#define LLVM_CODEGEN_VIRTREGMAP_H
+
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Pass.h"
+#include <cassert>
+
+namespace llvm {
+
+class MachineFunction;
+class MachineRegisterInfo;
+class raw_ostream;
+class TargetInstrInfo;
+
+  class VirtRegMap : public MachineFunctionPass {
+  public:
+    enum {
+      NO_PHYS_REG = 0,
+      NO_STACK_SLOT = (1L << 30)-1,
+      MAX_STACK_SLOT = (1L << 18)-1
+    };
+
+  private:
+    MachineRegisterInfo *MRI;
+    const TargetInstrInfo *TII;
+    const TargetRegisterInfo *TRI;
+    MachineFunction *MF;
+
+    /// Virt2PhysMap - This is a virtual to physical register
+    /// mapping. Each virtual register is required to have an entry in
+    /// it; even spilled virtual registers (the register mapped to a
+    /// spilled register is the temporary used to load it from the
+    /// stack).
+    IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
+
+    /// Virt2StackSlotMap - This is virtual register to stack slot
+    /// mapping. Each spilled virtual register has an entry in it
+    /// which corresponds to the stack slot this register is spilled
+    /// at.
+    IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
+
+    /// Virt2SplitMap - This is virtual register to splitted virtual register
+    /// mapping.
+    IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
+
+    /// createSpillSlot - Allocate a spill slot for RC from MFI.
+    unsigned createSpillSlot(const TargetRegisterClass *RC);
+
+  public:
+    static char ID;
+
+    VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
+                   Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) {}
+    VirtRegMap(const VirtRegMap &) = delete;
+    VirtRegMap &operator=(const VirtRegMap &) = delete;
+
+    bool runOnMachineFunction(MachineFunction &MF) override;
+
+    void getAnalysisUsage(AnalysisUsage &AU) const override {
+      AU.setPreservesAll();
+      MachineFunctionPass::getAnalysisUsage(AU);
+    }
+
+    MachineFunction &getMachineFunction() const {
+      assert(MF && "getMachineFunction called before runOnMachineFunction");
+      return *MF;
+    }
+
+    MachineRegisterInfo &getRegInfo() const { return *MRI; }
+    const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
+
+    void grow();
+
+    /// @brief returns true if the specified virtual register is
+    /// mapped to a physical register
+    bool hasPhys(unsigned virtReg) const {
+      return getPhys(virtReg) != NO_PHYS_REG;
+    }
+
+    /// @brief returns the physical register mapped to the specified
+    /// virtual register
+    unsigned getPhys(unsigned virtReg) const {
+      assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+      return Virt2PhysMap[virtReg];
+    }
+
+    /// @brief creates a mapping for the specified virtual register to
+    /// the specified physical register
+    void assignVirt2Phys(unsigned virtReg, MCPhysReg physReg);
+
+    /// @brief clears the specified virtual register's, physical
+    /// register mapping
+    void clearVirt(unsigned virtReg) {
+      assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+      assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
+             "attempt to clear a not assigned virtual register");
+      Virt2PhysMap[virtReg] = NO_PHYS_REG;
+    }
+
+    /// @brief clears all virtual to physical register mappings
+    void clearAllVirt() {
+      Virt2PhysMap.clear();
+      grow();
+    }
+
+    /// @brief returns true if VirtReg is assigned to its preferred physreg.
+    bool hasPreferredPhys(unsigned VirtReg);
+
+    /// @brief returns true if VirtReg has a known preferred register.
+    /// This returns false if VirtReg has a preference that is a virtual
+    /// register that hasn't been assigned yet.
+    bool hasKnownPreference(unsigned VirtReg);
+
+    /// @brief records virtReg is a split live interval from SReg.
+    void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
+      Virt2SplitMap[virtReg] = SReg;
+    }
+
+    /// @brief returns the live interval virtReg is split from.
+    unsigned getPreSplitReg(unsigned virtReg) const {
+      return Virt2SplitMap[virtReg];
+    }
+
+    /// getOriginal - Return the original virtual register that VirtReg descends
+    /// from through splitting.
+    /// A register that was not created by splitting is its own original.
+    /// This operation is idempotent.
+    unsigned getOriginal(unsigned VirtReg) const {
+      unsigned Orig = getPreSplitReg(VirtReg);
+      return Orig ? Orig : VirtReg;
+    }
+
+    /// @brief returns true if the specified virtual register is not
+    /// mapped to a stack slot or rematerialized.
+    bool isAssignedReg(unsigned virtReg) const {
+      if (getStackSlot(virtReg) == NO_STACK_SLOT)
+        return true;
+      // Split register can be assigned a physical register as well as a
+      // stack slot or remat id.
+      return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
+    }
+
+    /// @brief returns the stack slot mapped to the specified virtual
+    /// register
+    int getStackSlot(unsigned virtReg) const {
+      assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+      return Virt2StackSlotMap[virtReg];
+    }
+
+    /// @brief create a mapping for the specifed virtual register to
+    /// the next available stack slot
+    int assignVirt2StackSlot(unsigned virtReg);
+
+    /// @brief create a mapping for the specified virtual register to
+    /// the specified stack slot
+    void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
+
+    void print(raw_ostream &OS, const Module* M = nullptr) const override;
+    void dump() const;
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
+    VRM.print(OS);
+    return OS;
+  }
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_VIRTREGMAP_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/WinEHFuncInfo.h b/linux-x64/clang/include/llvm/CodeGen/WinEHFuncInfo.h
new file mode 100644
index 0000000..8043024
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/WinEHFuncInfo.h
@@ -0,0 +1,129 @@
+//===- llvm/CodeGen/WinEHFuncInfo.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures and associated state for Windows exception handling schemes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_WINEHFUNCINFO_H
+#define LLVM_CODEGEN_WINEHFUNCINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class FuncletPadInst;
+class Function;
+class GlobalVariable;
+class Instruction;
+class InvokeInst;
+class MachineBasicBlock;
+class MCSymbol;
+
+// The following structs respresent the .xdata tables for various
+// Windows-related EH personalities.
+
+using MBBOrBasicBlock = PointerUnion<const BasicBlock *, MachineBasicBlock *>;
+
+struct CxxUnwindMapEntry {
+  int ToState;
+  MBBOrBasicBlock Cleanup;
+};
+
+/// Similar to CxxUnwindMapEntry, but supports SEH filters.
+struct SEHUnwindMapEntry {
+  /// If unwinding continues through this handler, transition to the handler at
+  /// this state. This indexes into SEHUnwindMap.
+  int ToState = -1;
+
+  bool IsFinally = false;
+
+  /// Holds the filter expression function.
+  const Function *Filter = nullptr;
+
+  /// Holds the __except or __finally basic block.
+  MBBOrBasicBlock Handler;
+};
+
+struct WinEHHandlerType {
+  int Adjectives;
+  /// The CatchObj starts out life as an LLVM alloca and is eventually turned
+  /// frame index.
+  union {
+    const AllocaInst *Alloca;
+    int FrameIndex;
+  } CatchObj = {};
+  GlobalVariable *TypeDescriptor;
+  MBBOrBasicBlock Handler;
+};
+
+struct WinEHTryBlockMapEntry {
+  int TryLow = -1;
+  int TryHigh = -1;
+  int CatchHigh = -1;
+  SmallVector<WinEHHandlerType, 1> HandlerArray;
+};
+
+enum class ClrHandlerType { Catch, Finally, Fault, Filter };
+
+struct ClrEHUnwindMapEntry {
+  MBBOrBasicBlock Handler;
+  uint32_t TypeToken;
+  int HandlerParentState; ///< Outer handler enclosing this entry's handler
+  int TryParentState; ///< Outer try region enclosing this entry's try region,
+                      ///< treating later catches on same try as "outer"
+  ClrHandlerType HandlerType;
+};
+
+struct WinEHFuncInfo {
+  DenseMap<const Instruction *, int> EHPadStateMap;
+  DenseMap<const FuncletPadInst *, int> FuncletBaseStateMap;
+  DenseMap<const InvokeInst *, int> InvokeStateMap;
+  DenseMap<MCSymbol *, std::pair<int, MCSymbol *>> LabelToStateMap;
+  SmallVector<CxxUnwindMapEntry, 4> CxxUnwindMap;
+  SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
+  SmallVector<SEHUnwindMapEntry, 4> SEHUnwindMap;
+  SmallVector<ClrEHUnwindMapEntry, 4> ClrEHUnwindMap;
+  int UnwindHelpFrameIdx = std::numeric_limits<int>::max();
+  int PSPSymFrameIdx = std::numeric_limits<int>::max();
+
+  int getLastStateNumber() const { return CxxUnwindMap.size() - 1; }
+
+  void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin,
+                         MCSymbol *InvokeEnd);
+
+  int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
+  int EHRegNodeEndOffset = std::numeric_limits<int>::max();
+  int EHGuardFrameIndex = std::numeric_limits<int>::max();
+  int SEHSetFrameOffset = std::numeric_limits<int>::max();
+
+  WinEHFuncInfo();
+};
+
+/// Analyze the IR in ParentFn and it's handlers to build WinEHFuncInfo, which
+/// describes the state numbers and tables used by __CxxFrameHandler3. This
+/// analysis assumes that WinEHPrepare has already been run.
+void calculateWinCXXEHStateNumbers(const Function *ParentFn,
+                                   WinEHFuncInfo &FuncInfo);
+
+void calculateSEHStateNumbers(const Function *ParentFn,
+                              WinEHFuncInfo &FuncInfo);
+
+void calculateClrEHStateNumbers(const Function *Fn, WinEHFuncInfo &FuncInfo);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_WINEHFUNCINFO_H
diff --git a/linux-x64/clang/include/llvm/Config/AsmParsers.def b/linux-x64/clang/include/llvm/Config/AsmParsers.def
new file mode 100644
index 0000000..931883b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Config/AsmParsers.def
@@ -0,0 +1,33 @@
+/*===- llvm/Config/AsmParsers.def - LLVM Assembly Parsers -------*- C++ -*-===*\
+|*                                                                            *|
+|*                     The LLVM Compiler Infrastructure                       *|
+|*                                                                            *|
+|* This file is distributed under the University of Illinois Open Source      *|
+|* License. See LICENSE.TXT for details.                                      *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This file enumerates all of the assembly-language parsers                  *|
+|* supported by this build of LLVM. Clients of this file should define        *|
+|* the LLVM_ASM_PARSER macro to be a function-like macro with a               *|
+|* single parameter (the name of the target whose assembly can be             *|
+|* generated); including this file will then enumerate all of the             *|
+|* targets with assembly parsers.                                             *|
+|*                                                                            *|
+|* The set of targets supported by LLVM is generated at configuration         *|
+|* time, at which point this header is generated. Do not modify this          *|
+|* header directly.                                                           *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_ASM_PARSER
+#  error Please define the macro LLVM_ASM_PARSER(TargetName)
+#endif
+
+LLVM_ASM_PARSER(AArch64)
+LLVM_ASM_PARSER(ARM)
+LLVM_ASM_PARSER(BPF)
+LLVM_ASM_PARSER(X86)
+
+
+#undef LLVM_ASM_PARSER
diff --git a/linux-x64/clang/include/llvm/Config/AsmPrinters.def b/linux-x64/clang/include/llvm/Config/AsmPrinters.def
new file mode 100644
index 0000000..e01fe2b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Config/AsmPrinters.def
@@ -0,0 +1,33 @@
+/*===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===*\
+|*                                                                            *|
+|*                     The LLVM Compiler Infrastructure                       *|
+|*                                                                            *|
+|* This file is distributed under the University of Illinois Open Source      *|
+|* License. See LICENSE.TXT for details.                                      *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This file enumerates all of the assembly-language printers                 *|
+|* supported by this build of LLVM. Clients of this file should define        *|
+|* the LLVM_ASM_PRINTER macro to be a function-like macro with a              *|
+|* single parameter (the name of the target whose assembly can be             *|
+|* generated); including this file will then enumerate all of the             *|
+|* targets with assembly printers.                                            *|
+|*                                                                            *|
+|* The set of targets supported by LLVM is generated at configuration         *|
+|* time, at which point this header is generated. Do not modify this          *|
+|* header directly.                                                           *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_ASM_PRINTER
+#  error Please define the macro LLVM_ASM_PRINTER(TargetName)
+#endif
+
+LLVM_ASM_PRINTER(AArch64)
+LLVM_ASM_PRINTER(ARM)
+LLVM_ASM_PRINTER(BPF)
+LLVM_ASM_PRINTER(X86)
+
+
+#undef LLVM_ASM_PRINTER
diff --git a/linux-x64/clang/include/llvm/Config/Disassemblers.def b/linux-x64/clang/include/llvm/Config/Disassemblers.def
new file mode 100644
index 0000000..21f4b0c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Config/Disassemblers.def
@@ -0,0 +1,33 @@
+/*===- llvm/Config/Disassemblers.def - LLVM Assembly Parsers ----*- C++ -*-===*\
+|*                                                                            *|
+|*                     The LLVM Compiler Infrastructure                       *|
+|*                                                                            *|
+|* This file is distributed under the University of Illinois Open Source      *|
+|* License. See LICENSE.TXT for details.                                      *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This file enumerates all of the assembly-language parsers                  *|
+|* supported by this build of LLVM. Clients of this file should define        *|
+|* the LLVM_DISASSEMBLER macro to be a function-like macro with a             *|
+|* single parameter (the name of the target whose assembly can be             *|
+|* generated); including this file will then enumerate all of the             *|
+|* targets with assembly parsers.                                             *|
+|*                                                                            *|
+|* The set of targets supported by LLVM is generated at configuration         *|
+|* time, at which point this header is generated. Do not modify this          *|
+|* header directly.                                                           *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_DISASSEMBLER
+#  error Please define the macro LLVM_DISASSEMBLER(TargetName)
+#endif
+
+LLVM_DISASSEMBLER(AArch64)
+LLVM_DISASSEMBLER(ARM)
+LLVM_DISASSEMBLER(BPF)
+LLVM_DISASSEMBLER(X86)
+
+
+#undef LLVM_DISASSEMBLER
diff --git a/linux-x64/clang/include/llvm/Config/Targets.def b/linux-x64/clang/include/llvm/Config/Targets.def
new file mode 100644
index 0000000..02725f3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Config/Targets.def
@@ -0,0 +1,32 @@
+/*===- llvm/Config/Targets.def - LLVM Target Architectures ------*- C++ -*-===*\
+|*                                                                            *|
+|*                     The LLVM Compiler Infrastructure                       *|
+|*                                                                            *|
+|* This file is distributed under the University of Illinois Open Source      *|
+|* License. See LICENSE.TXT for details.                                      *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This file enumerates all of the target architectures supported by          *|
+|* this build of LLVM. Clients of this file should define the                 *|
+|* LLVM_TARGET macro to be a function-like macro with a single                *|
+|* parameter (the name of the target); including this file will then          *|
+|* enumerate all of the targets.                                              *|
+|*                                                                            *|
+|* The set of targets supported by LLVM is generated at configuration         *|
+|* time, at which point this header is generated. Do not modify this          *|
+|* header directly.                                                           *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_TARGET
+#  error Please define the macro LLVM_TARGET(TargetName)
+#endif
+
+LLVM_TARGET(AArch64)
+LLVM_TARGET(ARM)
+LLVM_TARGET(BPF)
+LLVM_TARGET(X86)
+
+
+#undef LLVM_TARGET
diff --git a/linux-x64/clang/include/llvm/Config/abi-breaking.h b/linux-x64/clang/include/llvm/Config/abi-breaking.h
new file mode 100644
index 0000000..d7f82e1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Config/abi-breaking.h
@@ -0,0 +1,50 @@
+/*===------- llvm/Config/abi-breaking.h - llvm configuration -------*- C -*-===*/
+/*                                                                            */
+/*                     The LLVM Compiler Infrastructure                       */
+/*                                                                            */
+/* This file is distributed under the University of Illinois Open Source      */
+/* License. See LICENSE.TXT for details.                                      */
+/*                                                                            */
+/*===----------------------------------------------------------------------===*/
+
+/* This file controls the C++ ABI break introduced in LLVM public header. */
+
+#ifndef LLVM_ABI_BREAKING_CHECKS_H
+#define LLVM_ABI_BREAKING_CHECKS_H
+
+/* Define to enable checks that alter the LLVM C++ ABI */
+#define LLVM_ENABLE_ABI_BREAKING_CHECKS 0
+
+/* Define to enable reverse iteration of unordered llvm containers */
+#define LLVM_ENABLE_REVERSE_ITERATION 0
+
+/* Allow selectively disabling link-time mismatch checking so that header-only
+   ADT content from LLVM can be used without linking libSupport. */
+#if !LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+
+// ABI_BREAKING_CHECKS protection: provides link-time failure when clients build
+// mismatch with LLVM
+#if defined(_MSC_VER)
+// Use pragma with MSVC
+#define LLVM_XSTR(s) LLVM_STR(s)
+#define LLVM_STR(s) #s
+#pragma detect_mismatch("LLVM_ENABLE_ABI_BREAKING_CHECKS", LLVM_XSTR(LLVM_ENABLE_ABI_BREAKING_CHECKS))
+#undef LLVM_XSTR
+#undef LLVM_STR
+#elif defined(_WIN32) || defined(__CYGWIN__) // Win32 w/o #pragma detect_mismatch
+// FIXME: Implement checks without weak.
+#elif defined(__cplusplus)
+namespace llvm {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+extern int EnableABIBreakingChecks;
+__attribute__((weak, visibility ("hidden"))) int *VerifyEnableABIBreakingChecks = &EnableABIBreakingChecks;
+#else
+extern int DisableABIBreakingChecks;
+__attribute__((weak, visibility ("hidden"))) int *VerifyDisableABIBreakingChecks = &DisableABIBreakingChecks;
+#endif
+}
+#endif // _MSC_VER
+
+#endif // LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Config/llvm-config.h b/linux-x64/clang/include/llvm/Config/llvm-config.h
new file mode 100644
index 0000000..07fa1c8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Config/llvm-config.h
@@ -0,0 +1,85 @@
+/*===------- llvm/Config/llvm-config.h - llvm configuration -------*- C -*-===*/
+/*                                                                            */
+/*                     The LLVM Compiler Infrastructure                       */
+/*                                                                            */
+/* This file is distributed under the University of Illinois Open Source      */
+/* License. See LICENSE.TXT for details.                                      */
+/*                                                                            */
+/*===----------------------------------------------------------------------===*/
+
+/* This file enumerates variables from the LLVM configuration so that they
+   can be in exported headers and won't override package specific directives.
+   This is a C header that can be included in the llvm-c headers. */
+
+#ifndef LLVM_CONFIG_H
+#define LLVM_CONFIG_H
+
+/* Define if LLVM_ENABLE_DUMP is enabled */
+/* #undef LLVM_ENABLE_DUMP */
+
+/* Define if we link Polly to the tools */
+/* #undef LINK_POLLY_INTO_TOOLS */
+
+/* Target triple LLVM will generate code for by default */
+#define LLVM_DEFAULT_TARGET_TRIPLE "x86_64-unknown-linux-gnu"
+
+/* Define if threads enabled */
+#define LLVM_ENABLE_THREADS 1
+
+/* Has gcc/MSVC atomic intrinsics */
+#define LLVM_HAS_ATOMICS 1
+
+/* Host triple LLVM will be executed on */
+#define LLVM_HOST_TRIPLE "x86_64-unknown-linux-gnu"
+
+/* LLVM architecture name for the native architecture, if available */
+#define LLVM_NATIVE_ARCH X86
+
+/* LLVM name for the native AsmParser init function, if available */
+#define LLVM_NATIVE_ASMPARSER LLVMInitializeX86AsmParser
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#define LLVM_NATIVE_ASMPRINTER LLVMInitializeX86AsmPrinter
+
+/* LLVM name for the native Disassembler init function, if available */
+#define LLVM_NATIVE_DISASSEMBLER LLVMInitializeX86Disassembler
+
+/* LLVM name for the native Target init function, if available */
+#define LLVM_NATIVE_TARGET LLVMInitializeX86Target
+
+/* LLVM name for the native TargetInfo init function, if available */
+#define LLVM_NATIVE_TARGETINFO LLVMInitializeX86TargetInfo
+
+/* LLVM name for the native target MC init function, if available */
+#define LLVM_NATIVE_TARGETMC LLVMInitializeX86TargetMC
+
+/* Define if this is Unixish platform */
+#define LLVM_ON_UNIX 1
+
+/* Define if this is Win32ish platform */
+/* #undef LLVM_ON_WIN32 */
+
+/* Define if we have the Intel JIT API runtime support library */
+#define LLVM_USE_INTEL_JITEVENTS 0
+
+/* Define if we have the oprofile JIT-support library */
+#define LLVM_USE_OPROFILE 0
+
+/* Major version of the LLVM API */
+#define LLVM_VERSION_MAJOR 7
+
+/* Minor version of the LLVM API */
+#define LLVM_VERSION_MINOR 0
+
+/* Patch version of the LLVM API */
+#define LLVM_VERSION_PATCH 2
+
+/* LLVM version string */
+#define LLVM_VERSION_STRING "7.0.2svn"
+
+/* Whether LLVM records statistics for use with GetStatistics(),
+ * PrintStatistics() or PrintStatisticsJSON()
+ */
+#define LLVM_FORCE_ENABLE_STATS 0
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h
new file mode 100644
index 0000000..bd17435
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h
@@ -0,0 +1,70 @@
+//===- AppendingTypeTableBuilder.h -------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_APPENDINGTYPETABLEBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_APPENDINGTYPETABLEBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/SimpleTypeSerializer.h"
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class ContinuationRecordBuilder;
+
+class AppendingTypeTableBuilder : public TypeCollection {
+
+  BumpPtrAllocator &RecordStorage;
+  SimpleTypeSerializer SimpleSerializer;
+
+  /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
+  SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;
+
+public:
+  explicit AppendingTypeTableBuilder(BumpPtrAllocator &Storage);
+  ~AppendingTypeTableBuilder();
+
+  // TypeTableCollection overrides
+  Optional<TypeIndex> getFirst() override;
+  Optional<TypeIndex> getNext(TypeIndex Prev) override;
+  CVType getType(TypeIndex Index) override;
+  StringRef getTypeName(TypeIndex Index) override;
+  bool contains(TypeIndex Index) override;
+  uint32_t size() override;
+  uint32_t capacity() override;
+
+  // public interface
+  void reset();
+  TypeIndex nextTypeIndex() const;
+
+  BumpPtrAllocator &getAllocator() { return RecordStorage; }
+
+  ArrayRef<ArrayRef<uint8_t>> records() const;
+  TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
+  TypeIndex insertRecord(ContinuationRecordBuilder &Builder);
+
+  template <typename T> TypeIndex writeLeafType(T &Record) {
+    ArrayRef<uint8_t> Data = SimpleSerializer.serialize(Record);
+    return insertRecordBytes(Data);
+  }
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVRecord.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVRecord.h
new file mode 100644
index 0000000..9dbeb43
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVRecord.h
@@ -0,0 +1,126 @@
+//===- RecordIterator.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_RECORDITERATOR_H
+#define LLVM_DEBUGINFO_CODEVIEW_RECORDITERATOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace codeview {
+
+template <typename Kind> class CVRecord {
+public:
+  CVRecord() : Type(static_cast<Kind>(0)) {}
+
+  CVRecord(Kind K, ArrayRef<uint8_t> Data) : Type(K), RecordData(Data) {}
+
+  bool valid() const { return Type != static_cast<Kind>(0); }
+
+  uint32_t length() const { return RecordData.size(); }
+  Kind kind() const { return Type; }
+  ArrayRef<uint8_t> data() const { return RecordData; }
+  StringRef str_data() const {
+    return StringRef(reinterpret_cast<const char *>(RecordData.data()),
+                     RecordData.size());
+  }
+
+  ArrayRef<uint8_t> content() const {
+    return RecordData.drop_front(sizeof(RecordPrefix));
+  }
+
+  Optional<uint32_t> hash() const { return Hash; }
+
+  void setHash(uint32_t Value) { Hash = Value; }
+
+  Kind Type;
+  ArrayRef<uint8_t> RecordData;
+  Optional<uint32_t> Hash;
+};
+
+template <typename Kind> struct RemappedRecord {
+  explicit RemappedRecord(const CVRecord<Kind> &R) : OriginalRecord(R) {}
+
+  CVRecord<Kind> OriginalRecord;
+  SmallVector<std::pair<uint32_t, TypeIndex>, 8> Mappings;
+};
+
+template <typename Record, typename Func>
+Error forEachCodeViewRecord(ArrayRef<uint8_t> StreamBuffer, Func F) {
+  while (!StreamBuffer.empty()) {
+    if (StreamBuffer.size() < sizeof(RecordPrefix))
+      return make_error<CodeViewError>(cv_error_code::corrupt_record);
+
+    const RecordPrefix *Prefix =
+        reinterpret_cast<const RecordPrefix *>(StreamBuffer.data());
+
+    size_t RealLen = Prefix->RecordLen + 2;
+    if (StreamBuffer.size() < RealLen)
+      return make_error<CodeViewError>(cv_error_code::corrupt_record);
+
+    ArrayRef<uint8_t> Data = StreamBuffer.take_front(RealLen);
+    StreamBuffer = StreamBuffer.drop_front(RealLen);
+
+    Record R(static_cast<decltype(Record::Type)>((uint16_t)Prefix->RecordKind),
+             Data);
+    if (auto EC = F(R))
+      return EC;
+  }
+  return Error::success();
+}
+
+/// Read a complete record from a stream at a random offset.
+template <typename Kind>
+inline Expected<CVRecord<Kind>> readCVRecordFromStream(BinaryStreamRef Stream,
+                                                       uint32_t Offset) {
+  const RecordPrefix *Prefix = nullptr;
+  BinaryStreamReader Reader(Stream);
+  Reader.setOffset(Offset);
+
+  if (auto EC = Reader.readObject(Prefix))
+    return std::move(EC);
+  if (Prefix->RecordLen < 2)
+    return make_error<CodeViewError>(cv_error_code::corrupt_record);
+  Kind K = static_cast<Kind>(uint16_t(Prefix->RecordKind));
+
+  Reader.setOffset(Offset);
+  ArrayRef<uint8_t> RawData;
+  if (auto EC = Reader.readBytes(RawData, Prefix->RecordLen + sizeof(uint16_t)))
+    return std::move(EC);
+  return codeview::CVRecord<Kind>(K, RawData);
+}
+
+} // end namespace codeview
+
+template <typename Kind>
+struct VarStreamArrayExtractor<codeview::CVRecord<Kind>> {
+  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
+                   codeview::CVRecord<Kind> &Item) {
+    auto ExpectedRec = codeview::readCVRecordFromStream<Kind>(Stream, 0);
+    if (!ExpectedRec)
+      return ExpectedRec.takeError();
+    Item = *ExpectedRec;
+    Len = ExpectedRec->length();
+    return Error::success();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_RECORDITERATOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h
new file mode 100644
index 0000000..7c8cd12
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h
@@ -0,0 +1,39 @@
+//===- CVSymbolVisitor.h ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CVSYMBOLVISITOR_H
+#define LLVM_DEBUGINFO_CODEVIEW_CVSYMBOLVISITOR_H
+
+#include "llvm/DebugInfo/CodeView/CVRecord.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
+#include "llvm/Support/ErrorOr.h"
+
+namespace llvm {
+namespace codeview {
+class SymbolVisitorCallbacks;
+
+class CVSymbolVisitor {
+public:
+  CVSymbolVisitor(SymbolVisitorCallbacks &Callbacks);
+
+  Error visitSymbolRecord(CVSymbol &Record);
+  Error visitSymbolRecord(CVSymbol &Record, uint32_t Offset);
+  Error visitSymbolStream(const CVSymbolArray &Symbols);
+  Error visitSymbolStream(const CVSymbolArray &Symbols, uint32_t InitialOffset);
+
+private:
+  SymbolVisitorCallbacks &Callbacks;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_CVSYMBOLVISITOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
new file mode 100644
index 0000000..b765ba1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
@@ -0,0 +1,53 @@
+//===- CVTypeVisitor.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
+#define LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
+
+#include "llvm/DebugInfo/CodeView/CVRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+class TypeCollection;
+class TypeVisitorCallbacks;
+
+enum VisitorDataSource {
+  VDS_BytesPresent, // The record bytes are passed into the visitation
+                    // function.  The algorithm should first deserialize them
+                    // before passing them on through the pipeline.
+  VDS_BytesExternal // The record bytes are not present, and it is the
+                    // responsibility of the visitor callback interface to
+                    // supply the bytes.
+};
+
+Error visitTypeRecord(CVType &Record, TypeIndex Index,
+                      TypeVisitorCallbacks &Callbacks,
+                      VisitorDataSource Source = VDS_BytesPresent);
+Error visitTypeRecord(CVType &Record, TypeVisitorCallbacks &Callbacks,
+                      VisitorDataSource Source = VDS_BytesPresent);
+
+Error visitMemberRecord(CVMemberRecord Record, TypeVisitorCallbacks &Callbacks,
+                        VisitorDataSource Source = VDS_BytesPresent);
+Error visitMemberRecord(TypeLeafKind Kind, ArrayRef<uint8_t> Record,
+                        TypeVisitorCallbacks &Callbacks);
+
+Error visitMemberRecordStream(ArrayRef<uint8_t> FieldList,
+                              TypeVisitorCallbacks &Callbacks);
+
+Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
+                      VisitorDataSource Source = VDS_BytesPresent);
+Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks);
+Error visitTypeStream(TypeCollection &Types, TypeVisitorCallbacks &Callbacks);
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeView.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeView.h
new file mode 100644
index 0000000..301e4f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -0,0 +1,580 @@
+//===- CodeView.h -----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines constants and basic types describing CodeView debug information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEW_H
+#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEW_H
+
+#include <cinttypes>
+#include <type_traits>
+
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace codeview {
+
+/// Distinguishes individual records in .debug$T section or PDB type stream. The
+/// documentation and headers talk about this as the "leaf" type.
+enum class TypeRecordKind : uint16_t {
+#define TYPE_RECORD(lf_ename, value, name) name = value,
+#include "CodeViewTypes.def"
+};
+
+/// Duplicate copy of the above enum, but using the official CV names. Useful
+/// for reference purposes and when dealing with unknown record types.
+enum TypeLeafKind : uint16_t {
+#define CV_TYPE(name, val) name = val,
+#include "CodeViewTypes.def"
+};
+
+/// Distinguishes individual records in the Symbols subsection of a .debug$S
+/// section. Equivalent to SYM_ENUM_e in cvinfo.h.
+enum class SymbolRecordKind : uint16_t {
+#define SYMBOL_RECORD(lf_ename, value, name) name = value,
+#include "CodeViewSymbols.def"
+};
+
+/// Duplicate copy of the above enum, but using the official CV names. Useful
+/// for reference purposes and when dealing with unknown record types.
+enum SymbolKind : uint16_t {
+#define CV_SYMBOL(name, val) name = val,
+#include "CodeViewSymbols.def"
+};
+
+#define CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(Class)                            \
+  inline Class operator|(Class a, Class b) {                                   \
+    return static_cast<Class>(                                                 \
+        static_cast<std::underlying_type<Class>::type>(a) |                    \
+        static_cast<std::underlying_type<Class>::type>(b));                    \
+  }                                                                            \
+  inline Class operator&(Class a, Class b) {                                   \
+    return static_cast<Class>(                                                 \
+        static_cast<std::underlying_type<Class>::type>(a) &                    \
+        static_cast<std::underlying_type<Class>::type>(b));                    \
+  }                                                                            \
+  inline Class operator~(Class a) {                                            \
+    return static_cast<Class>(                                                 \
+        ~static_cast<std::underlying_type<Class>::type>(a));                   \
+  }                                                                            \
+  inline Class &operator|=(Class &a, Class b) {                                \
+    a = a | b;                                                                 \
+    return a;                                                                  \
+  }                                                                            \
+  inline Class &operator&=(Class &a, Class b) {                                \
+    a = a & b;                                                                 \
+    return a;                                                                  \
+  }
+
+/// These values correspond to the CV_CPU_TYPE_e enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
+enum class CPUType : uint16_t {
+  Intel8080 = 0x0,
+  Intel8086 = 0x1,
+  Intel80286 = 0x2,
+  Intel80386 = 0x3,
+  Intel80486 = 0x4,
+  Pentium = 0x5,
+  PentiumPro = 0x6,
+  Pentium3 = 0x7,
+  MIPS = 0x10,
+  MIPS16 = 0x11,
+  MIPS32 = 0x12,
+  MIPS64 = 0x13,
+  MIPSI = 0x14,
+  MIPSII = 0x15,
+  MIPSIII = 0x16,
+  MIPSIV = 0x17,
+  MIPSV = 0x18,
+  M68000 = 0x20,
+  M68010 = 0x21,
+  M68020 = 0x22,
+  M68030 = 0x23,
+  M68040 = 0x24,
+  Alpha = 0x30,
+  Alpha21164 = 0x31,
+  Alpha21164A = 0x32,
+  Alpha21264 = 0x33,
+  Alpha21364 = 0x34,
+  PPC601 = 0x40,
+  PPC603 = 0x41,
+  PPC604 = 0x42,
+  PPC620 = 0x43,
+  PPCFP = 0x44,
+  PPCBE = 0x45,
+  SH3 = 0x50,
+  SH3E = 0x51,
+  SH3DSP = 0x52,
+  SH4 = 0x53,
+  SHMedia = 0x54,
+  ARM3 = 0x60,
+  ARM4 = 0x61,
+  ARM4T = 0x62,
+  ARM5 = 0x63,
+  ARM5T = 0x64,
+  ARM6 = 0x65,
+  ARM_XMAC = 0x66,
+  ARM_WMMX = 0x67,
+  ARM7 = 0x68,
+  ARM64 = 0x69,
+  Omni = 0x70,
+  Ia64 = 0x80,
+  Ia64_2 = 0x81,
+  CEE = 0x90,
+  AM33 = 0xa0,
+  M32R = 0xb0,
+  TriCore = 0xc0,
+  X64 = 0xd0,
+  EBC = 0xe0,
+  Thumb = 0xf0,
+  ARMNT = 0xf4,
+  D3D11_Shader = 0x100,
+};
+
+/// These values correspond to the CV_CFL_LANG enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/bw3aekw6.aspx
+enum SourceLanguage : uint8_t {
+  C = 0x00,
+  Cpp = 0x01,
+  Fortran = 0x02,
+  Masm = 0x03,
+  Pascal = 0x04,
+  Basic = 0x05,
+  Cobol = 0x06,
+  Link = 0x07,
+  Cvtres = 0x08,
+  Cvtpgd = 0x09,
+  CSharp = 0x0a,
+  VB = 0x0b,
+  ILAsm = 0x0c,
+  Java = 0x0d,
+  JScript = 0x0e,
+  MSIL = 0x0f,
+  HLSL = 0x10,
+
+  /// The DMD compiler emits 'D' for the CV source language. Microsoft doesn't
+  /// have an enumerator for it yet.
+  D = 'D',
+};
+
+/// These values correspond to the CV_call_e enumeration, and are documented
+/// at the following locations:
+///   https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
+///   https://msdn.microsoft.com/en-us/library/windows/desktop/ms680207(v=vs.85).aspx
+///
+enum class CallingConvention : uint8_t {
+  NearC = 0x00,       // near right to left push, caller pops stack
+  FarC = 0x01,        // far right to left push, caller pops stack
+  NearPascal = 0x02,  // near left to right push, callee pops stack
+  FarPascal = 0x03,   // far left to right push, callee pops stack
+  NearFast = 0x04,    // near left to right push with regs, callee pops stack
+  FarFast = 0x05,     // far left to right push with regs, callee pops stack
+  NearStdCall = 0x07, // near standard call
+  FarStdCall = 0x08,  // far standard call
+  NearSysCall = 0x09, // near sys call
+  FarSysCall = 0x0a,  // far sys call
+  ThisCall = 0x0b,    // this call (this passed in register)
+  MipsCall = 0x0c,    // Mips call
+  Generic = 0x0d,     // Generic call sequence
+  AlphaCall = 0x0e,   // Alpha call
+  PpcCall = 0x0f,     // PPC call
+  SHCall = 0x10,      // Hitachi SuperH call
+  ArmCall = 0x11,     // ARM call
+  AM33Call = 0x12,    // AM33 call
+  TriCall = 0x13,     // TriCore Call
+  SH5Call = 0x14,     // Hitachi SuperH-5 call
+  M32RCall = 0x15,    // M32R Call
+  ClrCall = 0x16,     // clr call
+  Inline =
+      0x17, // Marker for routines always inlined and thus lacking a convention
+  NearVector = 0x18 // near left to right push with regs, callee pops stack
+};
+
+enum class ClassOptions : uint16_t {
+  None = 0x0000,
+  Packed = 0x0001,
+  HasConstructorOrDestructor = 0x0002,
+  HasOverloadedOperator = 0x0004,
+  Nested = 0x0008,
+  ContainsNestedClass = 0x0010,
+  HasOverloadedAssignmentOperator = 0x0020,
+  HasConversionOperator = 0x0040,
+  ForwardReference = 0x0080,
+  Scoped = 0x0100,
+  HasUniqueName = 0x0200,
+  Sealed = 0x0400,
+  Intrinsic = 0x2000
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ClassOptions)
+
+enum class FrameProcedureOptions : uint32_t {
+  None = 0x00000000,
+  HasAlloca = 0x00000001,
+  HasSetJmp = 0x00000002,
+  HasLongJmp = 0x00000004,
+  HasInlineAssembly = 0x00000008,
+  HasExceptionHandling = 0x00000010,
+  MarkedInline = 0x00000020,
+  HasStructuredExceptionHandling = 0x00000040,
+  Naked = 0x00000080,
+  SecurityChecks = 0x00000100,
+  AsynchronousExceptionHandling = 0x00000200,
+  NoStackOrderingForSecurityChecks = 0x00000400,
+  Inlined = 0x00000800,
+  StrictSecurityChecks = 0x00001000,
+  SafeBuffers = 0x00002000,
+  ProfileGuidedOptimization = 0x00040000,
+  ValidProfileCounts = 0x00080000,
+  OptimizedForSpeed = 0x00100000,
+  GuardCfg = 0x00200000,
+  GuardCfw = 0x00400000
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(FrameProcedureOptions)
+
+enum class FunctionOptions : uint8_t {
+  None = 0x00,
+  CxxReturnUdt = 0x01,
+  Constructor = 0x02,
+  ConstructorWithVirtualBases = 0x04
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(FunctionOptions)
+
+enum class HfaKind : uint8_t {
+  None = 0x00,
+  Float = 0x01,
+  Double = 0x02,
+  Other = 0x03
+};
+
+/// Source-level access specifier. (CV_access_e)
+enum class MemberAccess : uint8_t {
+  None = 0,
+  Private = 1,
+  Protected = 2,
+  Public = 3
+};
+
+/// Part of member attribute flags. (CV_methodprop_e)
+enum class MethodKind : uint8_t {
+  Vanilla = 0x00,
+  Virtual = 0x01,
+  Static = 0x02,
+  Friend = 0x03,
+  IntroducingVirtual = 0x04,
+  PureVirtual = 0x05,
+  PureIntroducingVirtual = 0x06
+};
+
+/// Equivalent to CV_fldattr_t bitfield.
+enum class MethodOptions : uint16_t {
+  None = 0x0000,
+  AccessMask = 0x0003,
+  MethodKindMask = 0x001c,
+  Pseudo = 0x0020,
+  NoInherit = 0x0040,
+  NoConstruct = 0x0080,
+  CompilerGenerated = 0x0100,
+  Sealed = 0x0200
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(MethodOptions)
+
+/// Equivalent to CV_LABEL_TYPE_e.
+enum class LabelType : uint16_t {
+  Near = 0x0,
+  Far = 0x4,
+};
+
+/// Equivalent to CV_modifier_t.
+/// TODO: Add flag for _Atomic modifier
+enum class ModifierOptions : uint16_t {
+  None = 0x0000,
+  Const = 0x0001,
+  Volatile = 0x0002,
+  Unaligned = 0x0004
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ModifierOptions)
+
+enum class DebugSubsectionKind : uint32_t {
+  None = 0,
+  Symbols = 0xf1,
+  Lines = 0xf2,
+  StringTable = 0xf3,
+  FileChecksums = 0xf4,
+  FrameData = 0xf5,
+  InlineeLines = 0xf6,
+  CrossScopeImports = 0xf7,
+  CrossScopeExports = 0xf8,
+
+  // These appear to relate to .Net assembly info.
+  ILLines = 0xf9,
+  FuncMDTokenMap = 0xfa,
+  TypeMDTokenMap = 0xfb,
+  MergedAssemblyInput = 0xfc,
+
+  CoffSymbolRVA = 0xfd,
+};
+
+/// Equivalent to CV_ptrtype_e.
+enum class PointerKind : uint8_t {
+  Near16 = 0x00,                // 16 bit pointer
+  Far16 = 0x01,                 // 16:16 far pointer
+  Huge16 = 0x02,                // 16:16 huge pointer
+  BasedOnSegment = 0x03,        // based on segment
+  BasedOnValue = 0x04,          // based on value of base
+  BasedOnSegmentValue = 0x05,   // based on segment value of base
+  BasedOnAddress = 0x06,        // based on address of base
+  BasedOnSegmentAddress = 0x07, // based on segment address of base
+  BasedOnType = 0x08,           // based on type
+  BasedOnSelf = 0x09,           // based on self
+  Near32 = 0x0a,                // 32 bit pointer
+  Far32 = 0x0b,                 // 16:32 pointer
+  Near64 = 0x0c                 // 64 bit pointer
+};
+
+/// Equivalent to CV_ptrmode_e.
+enum class PointerMode : uint8_t {
+  Pointer = 0x00,                 // "normal" pointer
+  LValueReference = 0x01,         // "old" reference
+  PointerToDataMember = 0x02,     // pointer to data member
+  PointerToMemberFunction = 0x03, // pointer to member function
+  RValueReference = 0x04          // r-value reference
+};
+
+/// Equivalent to misc lfPointerAttr bitfields.
+enum class PointerOptions : uint32_t {
+  None = 0x00000000,
+  Flat32 = 0x00000100,
+  Volatile = 0x00000200,
+  Const = 0x00000400,
+  Unaligned = 0x00000800,
+  Restrict = 0x00001000,
+  WinRTSmartPointer = 0x00080000
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(PointerOptions)
+
+/// Equivalent to CV_pmtype_e.
+enum class PointerToMemberRepresentation : uint16_t {
+  Unknown = 0x00,                     // not specified (pre VC8)
+  SingleInheritanceData = 0x01,       // member data, single inheritance
+  MultipleInheritanceData = 0x02,     // member data, multiple inheritance
+  VirtualInheritanceData = 0x03,      // member data, virtual inheritance
+  GeneralData = 0x04,                 // member data, most general
+  SingleInheritanceFunction = 0x05,   // member function, single inheritance
+  MultipleInheritanceFunction = 0x06, // member function, multiple inheritance
+  VirtualInheritanceFunction = 0x07,  // member function, virtual inheritance
+  GeneralFunction = 0x08              // member function, most general
+};
+
+enum class VFTableSlotKind : uint8_t {
+  Near16 = 0x00,
+  Far16 = 0x01,
+  This = 0x02,
+  Outer = 0x03,
+  Meta = 0x04,
+  Near = 0x05,
+  Far = 0x06
+};
+
+enum class WindowsRTClassKind : uint8_t {
+  None = 0x00,
+  RefClass = 0x01,
+  ValueClass = 0x02,
+  Interface = 0x03
+};
+
+/// Corresponds to CV_LVARFLAGS bitfield.
+enum class LocalSymFlags : uint16_t {
+  None = 0,
+  IsParameter = 1 << 0,
+  IsAddressTaken = 1 << 1,
+  IsCompilerGenerated = 1 << 2,
+  IsAggregate = 1 << 3,
+  IsAggregated = 1 << 4,
+  IsAliased = 1 << 5,
+  IsAlias = 1 << 6,
+  IsReturnValue = 1 << 7,
+  IsOptimizedOut = 1 << 8,
+  IsEnregisteredGlobal = 1 << 9,
+  IsEnregisteredStatic = 1 << 10,
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(LocalSymFlags)
+
+/// Corresponds to the CV_PUBSYMFLAGS bitfield.
+enum class PublicSymFlags : uint32_t {
+  None = 0,
+  Code = 1 << 0,
+  Function = 1 << 1,
+  Managed = 1 << 2,
+  MSIL = 1 << 3,
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(PublicSymFlags)
+
+/// Corresponds to the CV_PROCFLAGS bitfield.
+enum class ProcSymFlags : uint8_t {
+  None = 0,
+  HasFP = 1 << 0,
+  HasIRET = 1 << 1,
+  HasFRET = 1 << 2,
+  IsNoReturn = 1 << 3,
+  IsUnreachable = 1 << 4,
+  HasCustomCallingConv = 1 << 5,
+  IsNoInline = 1 << 6,
+  HasOptimizedDebugInfo = 1 << 7,
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ProcSymFlags)
+
+/// Corresponds to COMPILESYM2::Flags bitfield.
+enum class CompileSym2Flags : uint32_t {
+  None = 0,
+  SourceLanguageMask = 0xFF,
+  EC = 1 << 8,
+  NoDbgInfo = 1 << 9,
+  LTCG = 1 << 10,
+  NoDataAlign = 1 << 11,
+  ManagedPresent = 1 << 12,
+  SecurityChecks = 1 << 13,
+  HotPatch = 1 << 14,
+  CVTCIL = 1 << 15,
+  MSILModule = 1 << 16,
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym2Flags)
+
+/// Corresponds to COMPILESYM3::Flags bitfield.
+enum class CompileSym3Flags : uint32_t {
+  None = 0,
+  SourceLanguageMask = 0xFF,
+  EC = 1 << 8,
+  NoDbgInfo = 1 << 9,
+  LTCG = 1 << 10,
+  NoDataAlign = 1 << 11,
+  ManagedPresent = 1 << 12,
+  SecurityChecks = 1 << 13,
+  HotPatch = 1 << 14,
+  CVTCIL = 1 << 15,
+  MSILModule = 1 << 16,
+  Sdl = 1 << 17,
+  PGO = 1 << 18,
+  Exp = 1 << 19,
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym3Flags)
+
+enum class ExportFlags : uint16_t {
+  None = 0,
+  IsConstant = 1 << 0,
+  IsData = 1 << 1,
+  IsPrivate = 1 << 2,
+  HasNoName = 1 << 3,
+  HasExplicitOrdinal = 1 << 4,
+  IsForwarder = 1 << 5
+};
+CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ExportFlags)
+
+// Corresponds to BinaryAnnotationOpcode enum.
+enum class BinaryAnnotationsOpCode : uint32_t {
+  Invalid,
+  CodeOffset,
+  ChangeCodeOffsetBase,
+  ChangeCodeOffset,
+  ChangeCodeLength,
+  ChangeFile,
+  ChangeLineOffset,
+  ChangeLineEndDelta,
+  ChangeRangeKind,
+  ChangeColumnStart,
+  ChangeColumnEndDelta,
+  ChangeCodeOffsetAndLineOffset,
+  ChangeCodeLengthAndCodeOffset,
+  ChangeColumnEnd,
+};
+
+// Corresponds to CV_cookietype_e enum.
+enum class FrameCookieKind : uint8_t {
+  Copy,
+  XorStackPointer,
+  XorFramePointer,
+  XorR13,
+};
+
+// Corresponds to CV_HREG_e enum.
+enum class RegisterId : uint16_t {
+#define CV_REGISTER(name, value) name = value,
+#include "CodeViewRegisters.def"
+#undef CV_REGISTER
+};
+
+/// These values correspond to the THUNK_ORDINAL enumeration.
+enum class ThunkOrdinal : uint8_t {
+  Standard,
+  ThisAdjustor,
+  Vcall,
+  Pcode,
+  UnknownLoad,
+  TrampIncremental,
+  BranchIsland
+};
+
+enum class TrampolineType : uint16_t { TrampIncremental, BranchIsland };
+
+// These values correspond to the CV_SourceChksum_t enumeration.
+enum class FileChecksumKind : uint8_t { None, MD5, SHA1, SHA256 };
+
+enum LineFlags : uint16_t {
+  LF_None = 0,
+  LF_HaveColumns = 1, // CV_LINES_HAVE_COLUMNS
+};
+
+/// Data in the SUBSEC_FRAMEDATA subection.
+struct FrameData {
+  support::ulittle32_t RvaStart;
+  support::ulittle32_t CodeSize;
+  support::ulittle32_t LocalSize;
+  support::ulittle32_t ParamsSize;
+  support::ulittle32_t MaxStackSize;
+  support::ulittle32_t FrameFunc;
+  support::ulittle16_t PrologSize;
+  support::ulittle16_t SavedRegsSize;
+  support::ulittle32_t Flags;
+  enum : uint32_t {
+    HasSEH = 1 << 0,
+    HasEH = 1 << 1,
+    IsFunctionStart = 1 << 2,
+  };
+};
+
+// Corresponds to LocalIdAndGlobalIdPair structure.
+// This structure information allows cross-referencing between PDBs.  For
+// example, when a PDB is being built during compilation it is not yet known
+// what other modules may end up in the PDB at link time.  So certain types of
+// IDs may clash between the various compile time PDBs.  For each affected
+// module, a subsection would be put into the PDB containing a mapping from its
+// local IDs to a single ID namespace for all items in the PDB file.
+struct CrossModuleExport {
+  support::ulittle32_t Local;
+  support::ulittle32_t Global;
+};
+
+struct CrossModuleImport {
+  support::ulittle32_t ModuleNameOffset;
+  support::ulittle32_t Count; // Number of elements
+  // support::ulittle32_t ids[Count]; // id from referenced module
+};
+
+enum class CodeViewContainer { ObjectFile, Pdb };
+
+inline uint32_t alignOf(CodeViewContainer Container) {
+  if (Container == CodeViewContainer::ObjectFile)
+    return 1;
+  return 4;
+}
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewError.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewError.h
new file mode 100644
index 0000000..586a720
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewError.h
@@ -0,0 +1,46 @@
+//===- CodeViewError.h - Error extensions for CodeView ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_CODEVIEW_CODEVIEWERROR_H
+#define LLVM_DEBUGINFO_PDB_CODEVIEW_CODEVIEWERROR_H
+
+#include "llvm/Support/Error.h"
+
+#include <string>
+
+namespace llvm {
+namespace codeview {
+enum class cv_error_code {
+  unspecified = 1,
+  insufficient_buffer,
+  operation_unsupported,
+  corrupt_record,
+  no_records,
+  unknown_member_record,
+};
+
+/// Base class for errors originating when parsing raw PDB files
+class CodeViewError : public ErrorInfo<CodeViewError> {
+public:
+  static char ID;
+  CodeViewError(cv_error_code C);
+  CodeViewError(const std::string &Context);
+  CodeViewError(cv_error_code C, const std::string &Context);
+
+  void log(raw_ostream &OS) const override;
+  const std::string &getErrorMessage() const;
+  std::error_code convertToErrorCode() const override;
+
+private:
+  std::string ErrMsg;
+  cv_error_code Code;
+};
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h
new file mode 100644
index 0000000..94f104f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h
@@ -0,0 +1,171 @@
+//===- CodeViewRecordIO.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
+#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+namespace codeview {
+
+class CodeViewRecordIO {
+  uint32_t getCurrentOffset() const {
+    return (isWriting()) ? Writer->getOffset() : Reader->getOffset();
+  }
+
+public:
+  explicit CodeViewRecordIO(BinaryStreamReader &Reader) : Reader(&Reader) {}
+  explicit CodeViewRecordIO(BinaryStreamWriter &Writer) : Writer(&Writer) {}
+
+  Error beginRecord(Optional<uint32_t> MaxLength);
+  Error endRecord();
+
+  Error mapInteger(TypeIndex &TypeInd);
+
+  bool isReading() const { return Reader != nullptr; }
+  bool isWriting() const { return !isReading(); }
+
+  uint32_t maxFieldLength() const;
+
+  template <typename T> Error mapObject(T &Value) {
+    if (isWriting())
+      return Writer->writeObject(Value);
+
+    const T *ValuePtr;
+    if (auto EC = Reader->readObject(ValuePtr))
+      return EC;
+    Value = *ValuePtr;
+    return Error::success();
+  }
+
+  template <typename T> Error mapInteger(T &Value) {
+    if (isWriting())
+      return Writer->writeInteger(Value);
+
+    return Reader->readInteger(Value);
+  }
+
+  template <typename T> Error mapEnum(T &Value) {
+    if (sizeof(Value) > maxFieldLength())
+      return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
+
+    using U = typename std::underlying_type<T>::type;
+    U X;
+    if (isWriting())
+      X = static_cast<U>(Value);
+
+    if (auto EC = mapInteger(X))
+      return EC;
+    if (isReading())
+      Value = static_cast<T>(X);
+    return Error::success();
+  }
+
+  Error mapEncodedInteger(int64_t &Value);
+  Error mapEncodedInteger(uint64_t &Value);
+  Error mapEncodedInteger(APSInt &Value);
+  Error mapStringZ(StringRef &Value);
+  Error mapGuid(GUID &Guid);
+
+  Error mapStringZVectorZ(std::vector<StringRef> &Value);
+
+  template <typename SizeType, typename T, typename ElementMapper>
+  Error mapVectorN(T &Items, const ElementMapper &Mapper) {
+    SizeType Size;
+    if (isWriting()) {
+      Size = static_cast<SizeType>(Items.size());
+      if (auto EC = Writer->writeInteger(Size))
+        return EC;
+
+      for (auto &X : Items) {
+        if (auto EC = Mapper(*this, X))
+          return EC;
+      }
+    } else {
+      if (auto EC = Reader->readInteger(Size))
+        return EC;
+      for (SizeType I = 0; I < Size; ++I) {
+        typename T::value_type Item;
+        if (auto EC = Mapper(*this, Item))
+          return EC;
+        Items.push_back(Item);
+      }
+    }
+
+    return Error::success();
+  }
+
+  template <typename T, typename ElementMapper>
+  Error mapVectorTail(T &Items, const ElementMapper &Mapper) {
+    if (isWriting()) {
+      for (auto &Item : Items) {
+        if (auto EC = Mapper(*this, Item))
+          return EC;
+      }
+    } else {
+      typename T::value_type Field;
+      // Stop when we run out of bytes or we hit record padding bytes.
+      while (!Reader->empty() && Reader->peek() < 0xf0 /* LF_PAD0 */) {
+        if (auto EC = Mapper(*this, Field))
+          return EC;
+        Items.push_back(Field);
+      }
+    }
+    return Error::success();
+  }
+
+  Error mapByteVectorTail(ArrayRef<uint8_t> &Bytes);
+  Error mapByteVectorTail(std::vector<uint8_t> &Bytes);
+
+  Error padToAlignment(uint32_t Align);
+  Error skipPadding();
+
+private:
+  Error writeEncodedSignedInteger(const int64_t &Value);
+  Error writeEncodedUnsignedInteger(const uint64_t &Value);
+
+  struct RecordLimit {
+    uint32_t BeginOffset;
+    Optional<uint32_t> MaxLength;
+
+    Optional<uint32_t> bytesRemaining(uint32_t CurrentOffset) const {
+      if (!MaxLength.hasValue())
+        return None;
+      assert(CurrentOffset >= BeginOffset);
+
+      uint32_t BytesUsed = CurrentOffset - BeginOffset;
+      if (BytesUsed >= *MaxLength)
+        return 0;
+      return *MaxLength - BytesUsed;
+    }
+  };
+
+  SmallVector<RecordLimit, 2> Limits;
+
+  BinaryStreamReader *Reader = nullptr;
+  BinaryStreamWriter *Writer = nullptr;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def
new file mode 100644
index 0000000..3f06602
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def
@@ -0,0 +1,268 @@
+//===-- CodeViewRegisters.def - CodeView registers --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See CV_HREG_e in cvconst.h. This should match the constants there.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CV_REGISTER
+#define CV_REGISTER(name, value)
+#endif
+
+// This currently only contains the "register subset shraed by all processor
+// types" (ERR etc.) and the x86 registers.
+
+CV_REGISTER(ERR, 30000)
+CV_REGISTER(TEB, 30001)
+CV_REGISTER(TIMER, 30002)
+CV_REGISTER(EFAD1, 30003)
+CV_REGISTER(EFAD2, 30004)
+CV_REGISTER(EFAD3, 30005)
+CV_REGISTER(VFRAME, 30006)
+CV_REGISTER(HANDLE, 30007)
+CV_REGISTER(PARAMS, 30008)
+CV_REGISTER(LOCALS, 30009)
+CV_REGISTER(TID, 30010)
+CV_REGISTER(ENV, 30011)
+CV_REGISTER(CMDLN, 30012)
+
+CV_REGISTER(NONE, 0)
+CV_REGISTER(AL, 1)
+CV_REGISTER(CL, 2)
+CV_REGISTER(DL, 3)
+CV_REGISTER(BL, 4)
+CV_REGISTER(AH, 5)
+CV_REGISTER(CH, 6)
+CV_REGISTER(DH, 7)
+CV_REGISTER(BH, 8)
+CV_REGISTER(AX, 9)
+CV_REGISTER(CX, 10)
+CV_REGISTER(DX, 11)
+CV_REGISTER(BX, 12)
+CV_REGISTER(SP, 13)
+CV_REGISTER(BP, 14)
+CV_REGISTER(SI, 15)
+CV_REGISTER(DI, 16)
+CV_REGISTER(EAX, 17)
+CV_REGISTER(ECX, 18)
+CV_REGISTER(EDX, 19)
+CV_REGISTER(EBX, 20)
+CV_REGISTER(ESP, 21)
+CV_REGISTER(EBP, 22)
+CV_REGISTER(ESI, 23)
+CV_REGISTER(EDI, 24)
+CV_REGISTER(ES, 25)
+CV_REGISTER(CS, 26)
+CV_REGISTER(SS, 27)
+CV_REGISTER(DS, 28)
+CV_REGISTER(FS, 29)
+CV_REGISTER(GS, 30)
+CV_REGISTER(IP, 31)
+CV_REGISTER(FLAGS, 32)
+CV_REGISTER(EIP, 33)
+CV_REGISTER(EFLAGS, 34)
+CV_REGISTER(TEMP, 40)
+CV_REGISTER(TEMPH, 41)
+CV_REGISTER(QUOTE, 42)
+CV_REGISTER(PCDR3, 43)
+CV_REGISTER(PCDR4, 44)
+CV_REGISTER(PCDR5, 45)
+CV_REGISTER(PCDR6, 46)
+CV_REGISTER(PCDR7, 47)
+CV_REGISTER(CR0, 80)
+CV_REGISTER(CR1, 81)
+CV_REGISTER(CR2, 82)
+CV_REGISTER(CR3, 83)
+CV_REGISTER(CR4, 84)
+CV_REGISTER(DR0, 90)
+CV_REGISTER(DR1, 91)
+CV_REGISTER(DR2, 92)
+CV_REGISTER(DR3, 93)
+CV_REGISTER(DR4, 94)
+CV_REGISTER(DR5, 95)
+CV_REGISTER(DR6, 96)
+CV_REGISTER(DR7, 97)
+CV_REGISTER(GDTR, 110)
+CV_REGISTER(GDTL, 111)
+CV_REGISTER(IDTR, 112)
+CV_REGISTER(IDTL, 113)
+CV_REGISTER(LDTR, 114)
+CV_REGISTER(TR, 115)
+
+CV_REGISTER(PSEUDO1, 116)
+CV_REGISTER(PSEUDO2, 117)
+CV_REGISTER(PSEUDO3, 118)
+CV_REGISTER(PSEUDO4, 119)
+CV_REGISTER(PSEUDO5, 120)
+CV_REGISTER(PSEUDO6, 121)
+CV_REGISTER(PSEUDO7, 122)
+CV_REGISTER(PSEUDO8, 123)
+CV_REGISTER(PSEUDO9, 124)
+
+CV_REGISTER(ST0, 128)
+CV_REGISTER(ST1, 129)
+CV_REGISTER(ST2, 130)
+CV_REGISTER(ST3, 131)
+CV_REGISTER(ST4, 132)
+CV_REGISTER(ST5, 133)
+CV_REGISTER(ST6, 134)
+CV_REGISTER(ST7, 135)
+CV_REGISTER(CTRL, 136)
+CV_REGISTER(STAT, 137)
+CV_REGISTER(TAG, 138)
+CV_REGISTER(FPIP, 139)
+CV_REGISTER(FPCS, 140)
+CV_REGISTER(FPDO, 141)
+CV_REGISTER(FPDS, 142)
+CV_REGISTER(ISEM, 143)
+CV_REGISTER(FPEIP, 144)
+CV_REGISTER(FPEDO, 145)
+
+CV_REGISTER(MM0, 146)
+CV_REGISTER(MM1, 147)
+CV_REGISTER(MM2, 148)
+CV_REGISTER(MM3, 149)
+CV_REGISTER(MM4, 150)
+CV_REGISTER(MM5, 151)
+CV_REGISTER(MM6, 152)
+CV_REGISTER(MM7, 153)
+
+CV_REGISTER(XMM0, 154)
+CV_REGISTER(XMM1, 155)
+CV_REGISTER(XMM2, 156)
+CV_REGISTER(XMM3, 157)
+CV_REGISTER(XMM4, 158)
+CV_REGISTER(XMM5, 159)
+CV_REGISTER(XMM6, 160)
+CV_REGISTER(XMM7, 161)
+
+CV_REGISTER(MXCSR, 211)
+
+CV_REGISTER(EDXEAX, 212)
+
+CV_REGISTER(EMM0L, 220)
+CV_REGISTER(EMM1L, 221)
+CV_REGISTER(EMM2L, 222)
+CV_REGISTER(EMM3L, 223)
+CV_REGISTER(EMM4L, 224)
+CV_REGISTER(EMM5L, 225)
+CV_REGISTER(EMM6L, 226)
+CV_REGISTER(EMM7L, 227)
+
+CV_REGISTER(EMM0H, 228)
+CV_REGISTER(EMM1H, 229)
+CV_REGISTER(EMM2H, 230)
+CV_REGISTER(EMM3H, 231)
+CV_REGISTER(EMM4H, 232)
+CV_REGISTER(EMM5H, 233)
+CV_REGISTER(EMM6H, 234)
+CV_REGISTER(EMM7H, 235)
+
+CV_REGISTER(MM00, 236)
+CV_REGISTER(MM01, 237)
+CV_REGISTER(MM10, 238)
+CV_REGISTER(MM11, 239)
+CV_REGISTER(MM20, 240)
+CV_REGISTER(MM21, 241)
+CV_REGISTER(MM30, 242)
+CV_REGISTER(MM31, 243)
+CV_REGISTER(MM40, 244)
+CV_REGISTER(MM41, 245)
+CV_REGISTER(MM50, 246)
+CV_REGISTER(MM51, 247)
+CV_REGISTER(MM60, 248)
+CV_REGISTER(MM61, 249)
+CV_REGISTER(MM70, 250)
+CV_REGISTER(MM71, 251)
+
+CV_REGISTER(BND0, 396)
+CV_REGISTER(BND1, 397)
+CV_REGISTER(BND2, 398)
+
+
+CV_REGISTER(XMM8, 252)
+CV_REGISTER(XMM9, 253)
+CV_REGISTER(XMM10, 254)
+CV_REGISTER(XMM11, 255)
+CV_REGISTER(XMM12, 256)
+CV_REGISTER(XMM13, 257)
+CV_REGISTER(XMM14, 258)
+CV_REGISTER(XMM15, 259)
+
+
+CV_REGISTER(SIL, 324)
+CV_REGISTER(DIL, 325)
+CV_REGISTER(BPL, 326)
+CV_REGISTER(SPL, 327)
+
+CV_REGISTER(RAX, 328)
+CV_REGISTER(RBX, 329)
+CV_REGISTER(RCX, 330)
+CV_REGISTER(RDX, 331)
+CV_REGISTER(RSI, 332)
+CV_REGISTER(RDI, 333)
+CV_REGISTER(RBP, 334)
+CV_REGISTER(RSP, 335)
+
+CV_REGISTER(R8, 336)
+CV_REGISTER(R9, 337)
+CV_REGISTER(R10, 338)
+CV_REGISTER(R11, 339)
+CV_REGISTER(R12, 340)
+CV_REGISTER(R13, 341)
+CV_REGISTER(R14, 342)
+CV_REGISTER(R15, 343)
+
+CV_REGISTER(R8B, 344)
+CV_REGISTER(R9B, 345)
+CV_REGISTER(R10B, 346)
+CV_REGISTER(R11B, 347)
+CV_REGISTER(R12B, 348)
+CV_REGISTER(R13B, 349)
+CV_REGISTER(R14B, 350)
+CV_REGISTER(R15B, 351)
+
+CV_REGISTER(R8W, 352)
+CV_REGISTER(R9W, 353)
+CV_REGISTER(R10W, 354)
+CV_REGISTER(R11W, 355)
+CV_REGISTER(R12W, 356)
+CV_REGISTER(R13W, 357)
+CV_REGISTER(R14W, 358)
+CV_REGISTER(R15W, 359)
+
+CV_REGISTER(R8D, 360)
+CV_REGISTER(R9D, 361)
+CV_REGISTER(R10D, 362)
+CV_REGISTER(R11D, 363)
+CV_REGISTER(R12D, 364)
+CV_REGISTER(R13D, 365)
+CV_REGISTER(R14D, 366)
+CV_REGISTER(R15D, 367)
+
+
+// cvconst.h defines both CV_REG_YMM0 (252) and CV_AMD64_YMM0 (368). Keep the
+// original prefix to distinguish them.
+
+CV_REGISTER(AMD64_YMM0, 368)
+CV_REGISTER(AMD64_YMM1, 369)
+CV_REGISTER(AMD64_YMM2, 370)
+CV_REGISTER(AMD64_YMM3, 371)
+CV_REGISTER(AMD64_YMM4, 372)
+CV_REGISTER(AMD64_YMM5, 373)
+CV_REGISTER(AMD64_YMM6, 374)
+CV_REGISTER(AMD64_YMM7, 375)
+CV_REGISTER(AMD64_YMM8, 376)
+CV_REGISTER(AMD64_YMM9, 377)
+CV_REGISTER(AMD64_YMM10, 378)
+CV_REGISTER(AMD64_YMM11, 379)
+CV_REGISTER(AMD64_YMM12, 380)
+CV_REGISTER(AMD64_YMM13, 381)
+CV_REGISTER(AMD64_YMM14, 382)
+CV_REGISTER(AMD64_YMM15, 383)
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewSymbols.def b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewSymbols.def
new file mode 100644
index 0000000..41c5380
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewSymbols.def
@@ -0,0 +1,261 @@
+//===-- CodeViewSymbols.def - All CodeView leaf types -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See LEAF_ENUM_e in cvinfo.h. This should match the constants there.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CV_SYMBOL
+#define CV_SYMBOL(ename, value)
+#endif
+
+#ifndef SYMBOL_RECORD
+#define SYMBOL_RECORD(lf_ename, value, name) CV_SYMBOL(lf_ename, value)
+#endif
+
+#ifndef SYMBOL_RECORD_ALIAS
+#define SYMBOL_RECORD_ALIAS(lf_ename, value, name, alias_name)                   \
+  SYMBOL_RECORD(lf_ename, value, name)
+#endif
+
+// 16 bit symbol types. Not very useful, provided only for reference.
+CV_SYMBOL(S_COMPILE       , 0x0001)
+CV_SYMBOL(S_REGISTER_16t  , 0x0002)
+CV_SYMBOL(S_CONSTANT_16t  , 0x0003)
+CV_SYMBOL(S_UDT_16t       , 0x0004)
+CV_SYMBOL(S_SSEARCH       , 0x0005)
+CV_SYMBOL(S_SKIP          , 0x0007)
+CV_SYMBOL(S_CVRESERVE     , 0x0008)
+CV_SYMBOL(S_OBJNAME_ST    , 0x0009)
+CV_SYMBOL(S_ENDARG        , 0x000a)
+CV_SYMBOL(S_COBOLUDT_16t  , 0x000b)
+CV_SYMBOL(S_MANYREG_16t   , 0x000c)
+CV_SYMBOL(S_RETURN        , 0x000d)
+CV_SYMBOL(S_ENTRYTHIS     , 0x000e)
+CV_SYMBOL(S_BPREL16       , 0x0100)
+CV_SYMBOL(S_LDATA16       , 0x0101)
+CV_SYMBOL(S_GDATA16       , 0x0102)
+CV_SYMBOL(S_PUB16         , 0x0103)
+CV_SYMBOL(S_LPROC16       , 0x0104)
+CV_SYMBOL(S_GPROC16       , 0x0105)
+CV_SYMBOL(S_THUNK16       , 0x0106)
+CV_SYMBOL(S_BLOCK16       , 0x0107)
+CV_SYMBOL(S_WITH16        , 0x0108)
+CV_SYMBOL(S_LABEL16       , 0x0109)
+CV_SYMBOL(S_CEXMODEL16    , 0x010a)
+CV_SYMBOL(S_VFTABLE16     , 0x010b)
+CV_SYMBOL(S_REGREL16      , 0x010c)
+CV_SYMBOL(S_BPREL32_16t   , 0x0200)
+CV_SYMBOL(S_LDATA32_16t   , 0x0201)
+CV_SYMBOL(S_GDATA32_16t   , 0x0202)
+CV_SYMBOL(S_PUB32_16t     , 0x0203)
+CV_SYMBOL(S_LPROC32_16t   , 0x0204)
+CV_SYMBOL(S_GPROC32_16t   , 0x0205)
+CV_SYMBOL(S_THUNK32_ST    , 0x0206)
+CV_SYMBOL(S_BLOCK32_ST    , 0x0207)
+CV_SYMBOL(S_WITH32_ST     , 0x0208)
+CV_SYMBOL(S_LABEL32_ST    , 0x0209)
+CV_SYMBOL(S_CEXMODEL32    , 0x020a)
+CV_SYMBOL(S_VFTABLE32_16t , 0x020b)
+CV_SYMBOL(S_REGREL32_16t  , 0x020c)
+CV_SYMBOL(S_LTHREAD32_16t , 0x020d)
+CV_SYMBOL(S_GTHREAD32_16t , 0x020e)
+CV_SYMBOL(S_SLINK32       , 0x020f)
+CV_SYMBOL(S_LPROCMIPS_16t , 0x0300)
+CV_SYMBOL(S_GPROCMIPS_16t , 0x0301)
+CV_SYMBOL(S_PROCREF_ST    , 0x0400)
+CV_SYMBOL(S_DATAREF_ST    , 0x0401)
+CV_SYMBOL(S_ALIGN         , 0x0402)
+CV_SYMBOL(S_LPROCREF_ST   , 0x0403)
+CV_SYMBOL(S_OEM           , 0x0404)
+
+// All post 16 bit symbol types have the 0x1000 bit set.
+CV_SYMBOL(S_TI16_MAX      , 0x1000)
+
+// Mostly unused "start" symbol types.
+CV_SYMBOL(S_REGISTER_ST   , 0x1001)
+CV_SYMBOL(S_CONSTANT_ST   , 0x1002)
+CV_SYMBOL(S_UDT_ST        , 0x1003)
+CV_SYMBOL(S_COBOLUDT_ST   , 0x1004)
+CV_SYMBOL(S_MANYREG_ST    , 0x1005)
+CV_SYMBOL(S_BPREL32_ST    , 0x1006)
+CV_SYMBOL(S_LDATA32_ST    , 0x1007)
+CV_SYMBOL(S_GDATA32_ST    , 0x1008)
+CV_SYMBOL(S_PUB32_ST      , 0x1009)
+CV_SYMBOL(S_LPROC32_ST    , 0x100a)
+CV_SYMBOL(S_GPROC32_ST    , 0x100b)
+CV_SYMBOL(S_VFTABLE32     , 0x100c)
+CV_SYMBOL(S_REGREL32_ST   , 0x100d)
+CV_SYMBOL(S_LTHREAD32_ST  , 0x100e)
+CV_SYMBOL(S_GTHREAD32_ST  , 0x100f)
+CV_SYMBOL(S_LPROCMIPS_ST  , 0x1010)
+CV_SYMBOL(S_GPROCMIPS_ST  , 0x1011)
+
+CV_SYMBOL(S_COMPILE2_ST   , 0x1013)
+CV_SYMBOL(S_MANYREG2_ST   , 0x1014)
+CV_SYMBOL(S_LPROCIA64_ST  , 0x1015)
+CV_SYMBOL(S_GPROCIA64_ST  , 0x1016)
+CV_SYMBOL(S_LOCALSLOT_ST  , 0x1017)
+CV_SYMBOL(S_PARAMSLOT_ST  , 0x1018)
+CV_SYMBOL(S_ANNOTATION    , 0x1019)
+CV_SYMBOL(S_GMANPROC_ST   , 0x101a)
+CV_SYMBOL(S_LMANPROC_ST   , 0x101b)
+CV_SYMBOL(S_RESERVED1     , 0x101c)
+CV_SYMBOL(S_RESERVED2     , 0x101d)
+CV_SYMBOL(S_RESERVED3     , 0x101e)
+CV_SYMBOL(S_RESERVED4     , 0x101f)
+CV_SYMBOL(S_LMANDATA_ST   , 0x1020)
+CV_SYMBOL(S_GMANDATA_ST   , 0x1021)
+CV_SYMBOL(S_MANFRAMEREL_ST, 0x1022)
+CV_SYMBOL(S_MANREGISTER_ST, 0x1023)
+CV_SYMBOL(S_MANSLOT_ST    , 0x1024)
+CV_SYMBOL(S_MANMANYREG_ST , 0x1025)
+CV_SYMBOL(S_MANREGREL_ST  , 0x1026)
+CV_SYMBOL(S_MANMANYREG2_ST, 0x1027)
+CV_SYMBOL(S_MANTYPREF     , 0x1028)
+CV_SYMBOL(S_UNAMESPACE_ST , 0x1029)
+
+// End of S_*_ST symbols, which do not appear to be generated by modern
+// compilers.
+CV_SYMBOL(S_ST_MAX        , 0x1100)
+
+
+CV_SYMBOL(S_WITH32        , 0x1104)
+CV_SYMBOL(S_MANYREG       , 0x110a)
+CV_SYMBOL(S_LPROCMIPS     , 0x1114)
+CV_SYMBOL(S_GPROCMIPS     , 0x1115)
+CV_SYMBOL(S_MANYREG2      , 0x1117)
+CV_SYMBOL(S_LPROCIA64     , 0x1118)
+CV_SYMBOL(S_GPROCIA64     , 0x1119)
+CV_SYMBOL(S_LOCALSLOT     , 0x111a)
+CV_SYMBOL(S_PARAMSLOT     , 0x111b)
+
+// Managed code symbols.
+CV_SYMBOL(S_MANFRAMEREL   , 0x111e)
+CV_SYMBOL(S_MANREGISTER   , 0x111f)
+CV_SYMBOL(S_MANSLOT       , 0x1120)
+CV_SYMBOL(S_MANMANYREG    , 0x1121)
+CV_SYMBOL(S_MANREGREL     , 0x1122)
+CV_SYMBOL(S_MANMANYREG2   , 0x1123)
+CV_SYMBOL(S_UNAMESPACE    , 0x1124)
+CV_SYMBOL(S_DATAREF       , 0x1126)
+CV_SYMBOL(S_ANNOTATIONREF , 0x1128)
+CV_SYMBOL(S_TOKENREF      , 0x1129)
+CV_SYMBOL(S_GMANPROC      , 0x112a)
+CV_SYMBOL(S_LMANPROC      , 0x112b)
+CV_SYMBOL(S_ATTR_FRAMEREL , 0x112e)
+CV_SYMBOL(S_ATTR_REGISTER , 0x112f)
+CV_SYMBOL(S_ATTR_REGREL   , 0x1130)
+CV_SYMBOL(S_ATTR_MANYREG  , 0x1131)
+
+
+CV_SYMBOL(S_SEPCODE       , 0x1132)
+CV_SYMBOL(S_LOCAL_2005    , 0x1133)
+CV_SYMBOL(S_DEFRANGE_2005 , 0x1134)
+CV_SYMBOL(S_DEFRANGE2_2005, 0x1135)
+CV_SYMBOL(S_DISCARDED     , 0x113b)
+
+// Current symbol types for most procedures as of this writing.
+CV_SYMBOL(S_LPROCMIPS_ID   , 0x1148)
+CV_SYMBOL(S_GPROCMIPS_ID   , 0x1149)
+CV_SYMBOL(S_LPROCIA64_ID   , 0x114a)
+CV_SYMBOL(S_GPROCIA64_ID   , 0x114b)
+
+CV_SYMBOL(S_DEFRANGE_HLSL  , 0x1150)
+CV_SYMBOL(S_GDATA_HLSL     , 0x1151)
+CV_SYMBOL(S_LDATA_HLSL     , 0x1152)
+CV_SYMBOL(S_LOCAL_DPC_GROUPSHARED, 0x1154)
+CV_SYMBOL(S_DEFRANGE_DPC_PTR_TAG, 0x1157)
+CV_SYMBOL(S_DPC_SYM_TAG_MAP, 0x1158)
+CV_SYMBOL(S_ARMSWITCHTABLE , 0x1159)
+CV_SYMBOL(S_POGODATA       , 0x115c)
+CV_SYMBOL(S_INLINESITE2    , 0x115d)
+CV_SYMBOL(S_MOD_TYPEREF    , 0x115f)
+CV_SYMBOL(S_REF_MINIPDB    , 0x1160)
+CV_SYMBOL(S_PDBMAP         , 0x1161)
+CV_SYMBOL(S_GDATA_HLSL32   , 0x1162)
+CV_SYMBOL(S_LDATA_HLSL32   , 0x1163)
+CV_SYMBOL(S_GDATA_HLSL32_EX, 0x1164)
+CV_SYMBOL(S_LDATA_HLSL32_EX, 0x1165)
+
+CV_SYMBOL(S_FASTLINK, 0x1167) // Undocumented
+SYMBOL_RECORD_ALIAS(S_INLINEES, 0x1168, InlineesSym, CallerSym) // Undocumented
+
+// Known symbol types
+SYMBOL_RECORD(S_END                  , 0x0006, ScopeEndSym)
+SYMBOL_RECORD_ALIAS(S_INLINESITE_END , 0x114e, InlineSiteEnd, ScopeEndSym)
+SYMBOL_RECORD_ALIAS(S_PROC_ID_END    , 0x114f, ProcEnd, ScopeEndSym)
+
+SYMBOL_RECORD(S_THUNK32       , 0x1102, Thunk32Sym)
+SYMBOL_RECORD(S_TRAMPOLINE    , 0x112c, TrampolineSym)
+SYMBOL_RECORD(S_SECTION       , 0x1136, SectionSym)
+SYMBOL_RECORD(S_COFFGROUP     , 0x1137, CoffGroupSym)
+SYMBOL_RECORD(S_EXPORT        , 0x1138, ExportSym)
+
+SYMBOL_RECORD(S_LPROC32       , 0x110f, ProcSym)
+SYMBOL_RECORD_ALIAS(S_GPROC32       , 0x1110, GlobalProcSym, ProcSym)
+SYMBOL_RECORD_ALIAS(S_LPROC32_ID     , 0x1146, ProcIdSym, ProcSym)
+SYMBOL_RECORD_ALIAS(S_GPROC32_ID     , 0x1147, GlobalProcIdSym, ProcSym)
+SYMBOL_RECORD_ALIAS(S_LPROC32_DPC    , 0x1155, DPCProcSym, ProcSym)
+SYMBOL_RECORD_ALIAS(S_LPROC32_DPC_ID , 0x1156, DPCProcIdSym, ProcSym)
+
+SYMBOL_RECORD(S_REGISTER      , 0x1106, RegisterSym)
+SYMBOL_RECORD(S_PUB32         , 0x110e, PublicSym32)
+
+SYMBOL_RECORD(S_PROCREF       , 0x1125, ProcRefSym)
+SYMBOL_RECORD_ALIAS(S_LPROCREF, 0x1127, LocalProcRef, ProcRefSym)
+
+
+SYMBOL_RECORD(S_ENVBLOCK      , 0x113d, EnvBlockSym)
+
+SYMBOL_RECORD(S_INLINESITE     , 0x114d, InlineSiteSym)
+SYMBOL_RECORD(S_LOCAL         , 0x113e, LocalSym)
+SYMBOL_RECORD(S_DEFRANGE      , 0x113f, DefRangeSym)
+SYMBOL_RECORD(S_DEFRANGE_SUBFIELD, 0x1140, DefRangeSubfieldSym)
+SYMBOL_RECORD(S_DEFRANGE_REGISTER, 0x1141, DefRangeRegisterSym)
+SYMBOL_RECORD(S_DEFRANGE_FRAMEPOINTER_REL, 0x1142, DefRangeFramePointerRelSym)
+SYMBOL_RECORD(S_DEFRANGE_SUBFIELD_REGISTER, 0x1143, DefRangeSubfieldRegisterSym)
+SYMBOL_RECORD(S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE, 0x1144, DefRangeFramePointerRelFullScopeSym)
+SYMBOL_RECORD(S_DEFRANGE_REGISTER_REL, 0x1145, DefRangeRegisterRelSym)
+SYMBOL_RECORD(S_BLOCK32       , 0x1103, BlockSym)
+SYMBOL_RECORD(S_LABEL32       , 0x1105, LabelSym)
+SYMBOL_RECORD(S_OBJNAME       , 0x1101, ObjNameSym)
+SYMBOL_RECORD(S_COMPILE2      , 0x1116, Compile2Sym)
+SYMBOL_RECORD(S_COMPILE3      , 0x113c, Compile3Sym)
+SYMBOL_RECORD(S_FRAMEPROC     , 0x1012, FrameProcSym)
+SYMBOL_RECORD(S_CALLSITEINFO  , 0x1139, CallSiteInfoSym)
+SYMBOL_RECORD(S_FILESTATIC     , 0x1153, FileStaticSym)
+SYMBOL_RECORD(S_HEAPALLOCSITE  , 0x115e, HeapAllocationSiteSym)
+SYMBOL_RECORD(S_FRAMECOOKIE   , 0x113a, FrameCookieSym)
+
+SYMBOL_RECORD(S_CALLEES        , 0x115a, CallerSym)
+SYMBOL_RECORD_ALIAS(S_CALLERS, 0x115b, CalleeSym, CallerSym)
+
+SYMBOL_RECORD(S_UDT           , 0x1108, UDTSym)
+SYMBOL_RECORD_ALIAS(S_COBOLUDT      , 0x1109, CobolUDT, UDTSym)
+
+SYMBOL_RECORD(S_BUILDINFO      , 0x114c, BuildInfoSym)
+SYMBOL_RECORD(S_BPREL32       , 0x110b, BPRelativeSym)
+SYMBOL_RECORD(S_REGREL32      , 0x1111, RegRelativeSym)
+
+SYMBOL_RECORD(S_CONSTANT      , 0x1107, ConstantSym)
+SYMBOL_RECORD_ALIAS(S_MANCONSTANT   , 0x112d, ManagedConstant, ConstantSym)
+
+SYMBOL_RECORD(S_LDATA32       , 0x110c, DataSym)
+SYMBOL_RECORD_ALIAS(S_GDATA32       , 0x110d, GlobalData, DataSym)
+SYMBOL_RECORD_ALIAS(S_LMANDATA      , 0x111c, ManagedLocalData, DataSym)
+SYMBOL_RECORD_ALIAS(S_GMANDATA      , 0x111d, ManagedGlobalData, DataSym)
+
+SYMBOL_RECORD(S_LTHREAD32     , 0x1112, ThreadLocalDataSym)
+SYMBOL_RECORD_ALIAS(S_GTHREAD32     , 0x1113, GlobalTLS, ThreadLocalDataSym)
+
+
+#undef CV_SYMBOL
+#undef SYMBOL_RECORD
+#undef SYMBOL_RECORD_ALIAS
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewTypes.def b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewTypes.def
new file mode 100644
index 0000000..69ce960
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/CodeViewTypes.def
@@ -0,0 +1,250 @@
+//===-- CodeViewTypes.def - All CodeView leaf types -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See LEAF_ENUM_e in cvinfo.h. This should match the constants there.
+//
+//===----------------------------------------------------------------------===//
+
+// If the type is known, then we have a record describing it in TypeRecord.h.
+
+#ifndef CV_TYPE
+#define CV_TYPE(lf_ename, value)
+#endif
+
+// If the type is known, then we have a record describing it in TypeRecord.h.
+#ifndef TYPE_RECORD
+#define TYPE_RECORD(lf_ename, value, name) CV_TYPE(lf_ename, value)
+#endif
+
+#ifndef TYPE_RECORD_ALIAS
+#define TYPE_RECORD_ALIAS(lf_ename, value, name, alias_name)                   \
+  TYPE_RECORD(lf_ename, value, name)
+#endif
+
+#ifndef MEMBER_RECORD
+#define MEMBER_RECORD(lf_ename, value, name) TYPE_RECORD(lf_ename, value, name)
+#endif
+
+#ifndef MEMBER_RECORD_ALIAS
+#define MEMBER_RECORD_ALIAS(lf_ename, value, name, alias_name)                 \
+  MEMBER_RECORD(lf_ename, value, name)
+#endif
+
+TYPE_RECORD(LF_POINTER, 0x1002, Pointer)
+TYPE_RECORD(LF_MODIFIER, 0x1001, Modifier)
+TYPE_RECORD(LF_PROCEDURE, 0x1008, Procedure)
+TYPE_RECORD(LF_MFUNCTION, 0x1009, MemberFunction)
+TYPE_RECORD(LF_LABEL, 0x000e, Label)
+TYPE_RECORD(LF_ARGLIST, 0x1201, ArgList)
+
+TYPE_RECORD(LF_FIELDLIST, 0x1203, FieldList)
+
+TYPE_RECORD(LF_ARRAY, 0x1503, Array)
+TYPE_RECORD(LF_CLASS, 0x1504, Class)
+TYPE_RECORD_ALIAS(LF_STRUCTURE, 0x1505, Struct, Class)
+TYPE_RECORD_ALIAS(LF_INTERFACE, 0x1519, Interface, Class)
+TYPE_RECORD(LF_UNION, 0x1506, Union)
+TYPE_RECORD(LF_ENUM, 0x1507, Enum)
+TYPE_RECORD(LF_TYPESERVER2, 0x1515, TypeServer2)
+TYPE_RECORD(LF_VFTABLE, 0x151d, VFTable)
+TYPE_RECORD(LF_VTSHAPE, 0x000a, VFTableShape)
+
+TYPE_RECORD(LF_BITFIELD, 0x1205, BitField)
+
+// Member type records. These are generally not length prefixed, and appear
+// inside of a field list record.
+MEMBER_RECORD(LF_BCLASS, 0x1400, BaseClass)
+MEMBER_RECORD_ALIAS(LF_BINTERFACE, 0x151a, BaseInterface, BaseClass)
+
+MEMBER_RECORD(LF_VBCLASS, 0x1401, VirtualBaseClass)
+MEMBER_RECORD_ALIAS(LF_IVBCLASS, 0x1402, IndirectVirtualBaseClass,
+                    VirtualBaseClass)
+
+MEMBER_RECORD(LF_VFUNCTAB, 0x1409, VFPtr)
+MEMBER_RECORD(LF_STMEMBER, 0x150e, StaticDataMember)
+MEMBER_RECORD(LF_METHOD, 0x150f, OverloadedMethod)
+MEMBER_RECORD(LF_MEMBER, 0x150d, DataMember)
+MEMBER_RECORD(LF_NESTTYPE, 0x1510, NestedType)
+MEMBER_RECORD(LF_ONEMETHOD, 0x1511, OneMethod)
+MEMBER_RECORD(LF_ENUMERATE, 0x1502, Enumerator)
+MEMBER_RECORD(LF_INDEX, 0x1404, ListContinuation)
+
+// ID leaf records. Subsequent leaf types may be referenced from .debug$S.
+TYPE_RECORD(LF_FUNC_ID, 0x1601, FuncId)
+TYPE_RECORD(LF_MFUNC_ID, 0x1602, MemberFuncId)
+TYPE_RECORD(LF_BUILDINFO, 0x1603, BuildInfo)
+TYPE_RECORD(LF_SUBSTR_LIST, 0x1604, StringList)
+TYPE_RECORD(LF_STRING_ID, 0x1605, StringId)
+TYPE_RECORD(LF_UDT_SRC_LINE, 0x1606, UdtSourceLine)
+TYPE_RECORD(LF_UDT_MOD_SRC_LINE, 0x1607, UdtModSourceLine)
+
+
+TYPE_RECORD(LF_METHODLIST, 0x1206, MethodOverloadList)
+
+
+// 16 bit type records.
+CV_TYPE(LF_MODIFIER_16t, 0x0001)
+CV_TYPE(LF_POINTER_16t, 0x0002)
+CV_TYPE(LF_ARRAY_16t, 0x0003)
+CV_TYPE(LF_CLASS_16t, 0x0004)
+CV_TYPE(LF_STRUCTURE_16t, 0x0005)
+CV_TYPE(LF_UNION_16t, 0x0006)
+CV_TYPE(LF_ENUM_16t, 0x0007)
+CV_TYPE(LF_PROCEDURE_16t, 0x0008)
+CV_TYPE(LF_MFUNCTION_16t, 0x0009)
+CV_TYPE(LF_COBOL0_16t, 0x000b)
+CV_TYPE(LF_COBOL1, 0x000c)
+CV_TYPE(LF_BARRAY_16t, 0x000d)
+CV_TYPE(LF_NULLLEAF, 0x000f) // LF_NULL
+CV_TYPE(LF_NOTTRAN, 0x0010)
+CV_TYPE(LF_DIMARRAY_16t, 0x0011)
+CV_TYPE(LF_VFTPATH_16t, 0x0012)
+CV_TYPE(LF_PRECOMP_16t, 0x0013)
+CV_TYPE(LF_ENDPRECOMP, 0x0014)
+CV_TYPE(LF_OEM_16t, 0x0015)
+CV_TYPE(LF_TYPESERVER_ST, 0x0016)
+
+CV_TYPE(LF_SKIP_16t, 0x0200)
+CV_TYPE(LF_ARGLIST_16t, 0x0201)
+CV_TYPE(LF_DEFARG_16t, 0x0202)
+CV_TYPE(LF_LIST, 0x0203)
+CV_TYPE(LF_FIELDLIST_16t, 0x0204)
+CV_TYPE(LF_DERIVED_16t, 0x0205)
+CV_TYPE(LF_BITFIELD_16t, 0x0206)
+CV_TYPE(LF_METHODLIST_16t, 0x0207)
+CV_TYPE(LF_DIMCONU_16t, 0x0208)
+CV_TYPE(LF_DIMCONLU_16t, 0x0209)
+CV_TYPE(LF_DIMVARU_16t, 0x020a)
+CV_TYPE(LF_DIMVARLU_16t, 0x020b)
+CV_TYPE(LF_REFSYM, 0x020c)
+
+// 16 bit member types. Generally not length prefixed.
+CV_TYPE(LF_BCLASS_16t, 0x0400)
+CV_TYPE(LF_VBCLASS_16t, 0x0401)
+CV_TYPE(LF_IVBCLASS_16t, 0x0402)
+CV_TYPE(LF_ENUMERATE_ST, 0x0403)
+CV_TYPE(LF_FRIENDFCN_16t, 0x0404)
+CV_TYPE(LF_INDEX_16t, 0x0405)
+CV_TYPE(LF_MEMBER_16t, 0x0406)
+CV_TYPE(LF_STMEMBER_16t, 0x0407)
+CV_TYPE(LF_METHOD_16t, 0x0408)
+CV_TYPE(LF_NESTTYPE_16t, 0x0409)
+CV_TYPE(LF_VFUNCTAB_16t, 0x040a)
+CV_TYPE(LF_FRIENDCLS_16t, 0x040b)
+CV_TYPE(LF_ONEMETHOD_16t, 0x040c)
+CV_TYPE(LF_VFUNCOFF_16t, 0x040d)
+
+CV_TYPE(LF_TI16_MAX, 0x1000)
+
+CV_TYPE(LF_ARRAY_ST, 0x1003)
+CV_TYPE(LF_CLASS_ST, 0x1004)
+CV_TYPE(LF_STRUCTURE_ST, 0x1005)
+CV_TYPE(LF_UNION_ST, 0x1006)
+CV_TYPE(LF_ENUM_ST, 0x1007)
+CV_TYPE(LF_COBOL0, 0x100a)
+CV_TYPE(LF_BARRAY, 0x100b)
+CV_TYPE(LF_DIMARRAY_ST, 0x100c)
+CV_TYPE(LF_VFTPATH, 0x100d)
+CV_TYPE(LF_PRECOMP_ST, 0x100e)
+CV_TYPE(LF_OEM, 0x100f)
+CV_TYPE(LF_ALIAS_ST, 0x1010)
+CV_TYPE(LF_OEM2, 0x1011)
+
+CV_TYPE(LF_SKIP, 0x1200)
+CV_TYPE(LF_DEFARG_ST, 0x1202)
+CV_TYPE(LF_DERIVED, 0x1204)
+CV_TYPE(LF_DIMCONU, 0x1207)
+CV_TYPE(LF_DIMCONLU, 0x1208)
+CV_TYPE(LF_DIMVARU, 0x1209)
+CV_TYPE(LF_DIMVARLU, 0x120a)
+
+// Member type records. These are generally not length prefixed, and appear
+// inside of a field list record.
+CV_TYPE(LF_FRIENDFCN_ST, 0x1403)
+CV_TYPE(LF_MEMBER_ST, 0x1405)
+CV_TYPE(LF_STMEMBER_ST, 0x1406)
+CV_TYPE(LF_METHOD_ST, 0x1407)
+CV_TYPE(LF_NESTTYPE_ST, 0x1408)
+CV_TYPE(LF_FRIENDCLS, 0x140a)
+CV_TYPE(LF_ONEMETHOD_ST, 0x140b)
+CV_TYPE(LF_VFUNCOFF, 0x140c)
+CV_TYPE(LF_NESTTYPEEX_ST, 0x140d)
+CV_TYPE(LF_MEMBERMODIFY_ST, 0x140e)
+CV_TYPE(LF_MANAGED_ST, 0x140f)
+
+CV_TYPE(LF_ST_MAX, 0x1500)
+CV_TYPE(LF_TYPESERVER, 0x1501)
+CV_TYPE(LF_DIMARRAY, 0x1508)
+CV_TYPE(LF_PRECOMP, 0x1509)
+CV_TYPE(LF_ALIAS, 0x150a)
+CV_TYPE(LF_DEFARG, 0x150b)
+CV_TYPE(LF_FRIENDFCN, 0x150c)
+CV_TYPE(LF_NESTTYPEEX, 0x1512)
+CV_TYPE(LF_MEMBERMODIFY, 0x1513)
+CV_TYPE(LF_MANAGED, 0x1514)
+CV_TYPE(LF_STRIDED_ARRAY, 0x1516)
+CV_TYPE(LF_HLSL, 0x1517)
+CV_TYPE(LF_MODIFIER_EX, 0x1518)
+CV_TYPE(LF_VECTOR, 0x151b)
+CV_TYPE(LF_MATRIX, 0x151c)
+
+// ID leaf records. Subsequent leaf types may be referenced from .debug$S.
+
+// Numeric leaf types. These are generally contained in other records, and not
+// encountered in the main type stream.
+
+CV_TYPE(LF_NUMERIC, 0x8000)
+CV_TYPE(LF_CHAR, 0x8000)
+CV_TYPE(LF_SHORT, 0x8001)
+CV_TYPE(LF_USHORT, 0x8002)
+CV_TYPE(LF_LONG, 0x8003)
+CV_TYPE(LF_ULONG, 0x8004)
+CV_TYPE(LF_REAL32, 0x8005)
+CV_TYPE(LF_REAL64, 0x8006)
+CV_TYPE(LF_REAL80, 0x8007)
+CV_TYPE(LF_REAL128, 0x8008)
+CV_TYPE(LF_QUADWORD, 0x8009)
+CV_TYPE(LF_UQUADWORD, 0x800a)
+CV_TYPE(LF_REAL48, 0x800b)
+CV_TYPE(LF_COMPLEX32, 0x800c)
+CV_TYPE(LF_COMPLEX64, 0x800d)
+CV_TYPE(LF_COMPLEX80, 0x800e)
+CV_TYPE(LF_COMPLEX128, 0x800f)
+CV_TYPE(LF_VARSTRING, 0x8010)
+CV_TYPE(LF_OCTWORD, 0x8017)
+CV_TYPE(LF_UOCTWORD, 0x8018)
+CV_TYPE(LF_DECIMAL, 0x8019)
+CV_TYPE(LF_DATE, 0x801a)
+CV_TYPE(LF_UTF8STRING, 0x801b)
+CV_TYPE(LF_REAL16, 0x801c)
+
+// Padding bytes. These are emitted into alignment bytes in the type stream.
+
+CV_TYPE(LF_PAD0, 0xf0)
+CV_TYPE(LF_PAD1, 0xf1)
+CV_TYPE(LF_PAD2, 0xf2)
+CV_TYPE(LF_PAD3, 0xf3)
+CV_TYPE(LF_PAD4, 0xf4)
+CV_TYPE(LF_PAD5, 0xf5)
+CV_TYPE(LF_PAD6, 0xf6)
+CV_TYPE(LF_PAD7, 0xf7)
+CV_TYPE(LF_PAD8, 0xf8)
+CV_TYPE(LF_PAD9, 0xf9)
+CV_TYPE(LF_PAD10, 0xfa)
+CV_TYPE(LF_PAD11, 0xfb)
+CV_TYPE(LF_PAD12, 0xfc)
+CV_TYPE(LF_PAD13, 0xfd)
+CV_TYPE(LF_PAD14, 0xfe)
+CV_TYPE(LF_PAD15, 0xff)
+
+#undef CV_TYPE
+#undef TYPE_RECORD
+#undef TYPE_RECORD_ALIAS
+#undef MEMBER_RECORD
+#undef MEMBER_RECORD_ALIAS
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h
new file mode 100644
index 0000000..7f851a2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h
@@ -0,0 +1,65 @@
+//===- ContinuationRecordBuilder.h ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CONTINUATIONRECORDBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_CONTINUATIONRECORDBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+enum class ContinuationRecordKind { FieldList, MethodOverloadList };
+
+class ContinuationRecordBuilder {
+  SmallVector<uint32_t, 4> SegmentOffsets;
+  Optional<ContinuationRecordKind> Kind;
+  AppendingBinaryByteStream Buffer;
+  BinaryStreamWriter SegmentWriter;
+  TypeRecordMapping Mapping;
+  ArrayRef<uint8_t> InjectedSegmentBytes;
+
+  uint32_t getCurrentSegmentLength() const;
+
+  void insertSegmentEnd(uint32_t Offset);
+  CVType createSegmentRecord(uint32_t OffBegin, uint32_t OffEnd,
+                             Optional<TypeIndex> RefersTo);
+
+public:
+  ContinuationRecordBuilder();
+  ~ContinuationRecordBuilder();
+
+  void begin(ContinuationRecordKind RecordKind);
+
+  // This template is explicitly instantiated in the implementation file for all
+  // supported types.  The method itself is ugly, so inlining it into the header
+  // file clutters an otherwise straightforward interface.
+  template <typename RecordType> void writeMemberType(RecordType &Record);
+
+  std::vector<CVType> end(TypeIndex Index);
+};
+} // namespace codeview
+} // namespace llvm
+
+#endif
\ No newline at end of file
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h
new file mode 100644
index 0000000..78b2845
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h
@@ -0,0 +1,104 @@
+//===- DebugChecksumsSubsection.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGCHECKSUMSSUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGCHECKSUMSSUBSECTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+namespace codeview {
+
+class DebugStringTableSubsection;
+
+struct FileChecksumEntry {
+  uint32_t FileNameOffset;    // Byte offset of filename in global stringtable.
+  FileChecksumKind Kind;      // The type of checksum.
+  ArrayRef<uint8_t> Checksum; // The bytes of the checksum.
+};
+
+} // end namespace codeview
+
+template <> struct VarStreamArrayExtractor<codeview::FileChecksumEntry> {
+public:
+  using ContextType = void;
+
+  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
+                   codeview::FileChecksumEntry &Item);
+};
+
+namespace codeview {
+
+class DebugChecksumsSubsectionRef final : public DebugSubsectionRef {
+  using FileChecksumArray = VarStreamArray<codeview::FileChecksumEntry>;
+  using Iterator = FileChecksumArray::Iterator;
+
+public:
+  DebugChecksumsSubsectionRef()
+      : DebugSubsectionRef(DebugSubsectionKind::FileChecksums) {}
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::FileChecksums;
+  }
+
+  bool valid() const { return Checksums.valid(); }
+
+  Error initialize(BinaryStreamReader Reader);
+  Error initialize(BinaryStreamRef Stream);
+
+  Iterator begin() const { return Checksums.begin(); }
+  Iterator end() const { return Checksums.end(); }
+
+  const FileChecksumArray &getArray() const { return Checksums; }
+
+private:
+  FileChecksumArray Checksums;
+};
+
+class DebugChecksumsSubsection final : public DebugSubsection {
+public:
+  explicit DebugChecksumsSubsection(DebugStringTableSubsection &Strings);
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::FileChecksums;
+  }
+
+  void addChecksum(StringRef FileName, FileChecksumKind Kind,
+                   ArrayRef<uint8_t> Bytes);
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+  uint32_t mapChecksumOffset(StringRef FileName) const;
+
+private:
+  DebugStringTableSubsection &Strings;
+
+  DenseMap<uint32_t, uint32_t> OffsetMap;
+  uint32_t SerializedSize = 0;
+  BumpPtrAllocator Storage;
+  std::vector<FileChecksumEntry> Checksums;
+};
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGCHECKSUMSSUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h
new file mode 100644
index 0000000..2f9e981
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h
@@ -0,0 +1,68 @@
+//===- DebugCrossExSubsection.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSEXSUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSEXSUBSECTION_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <map>
+
+namespace llvm {
+namespace codeview {
+
+class DebugCrossModuleExportsSubsectionRef final : public DebugSubsectionRef {
+  using ReferenceArray = FixedStreamArray<CrossModuleExport>;
+  using Iterator = ReferenceArray::Iterator;
+
+public:
+  DebugCrossModuleExportsSubsectionRef()
+      : DebugSubsectionRef(DebugSubsectionKind::CrossScopeExports) {}
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::CrossScopeExports;
+  }
+
+  Error initialize(BinaryStreamReader Reader);
+  Error initialize(BinaryStreamRef Stream);
+
+  Iterator begin() const { return References.begin(); }
+  Iterator end() const { return References.end(); }
+
+private:
+  FixedStreamArray<CrossModuleExport> References;
+};
+
+class DebugCrossModuleExportsSubsection final : public DebugSubsection {
+public:
+  DebugCrossModuleExportsSubsection()
+      : DebugSubsection(DebugSubsectionKind::CrossScopeExports) {}
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::CrossScopeExports;
+  }
+
+  void addMapping(uint32_t Local, uint32_t Global);
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+
+private:
+  std::map<uint32_t, uint32_t> Mappings;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSEXSUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h
new file mode 100644
index 0000000..8be7ef2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h
@@ -0,0 +1,95 @@
+//===- DebugCrossExSubsection.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSIMPSUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSIMPSUBSECTION_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+namespace codeview {
+
+struct CrossModuleImportItem {
+  const CrossModuleImport *Header = nullptr;
+  FixedStreamArray<support::ulittle32_t> Imports;
+};
+
+} // end namespace codeview
+
+template <> struct VarStreamArrayExtractor<codeview::CrossModuleImportItem> {
+public:
+  using ContextType = void;
+
+  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
+                   codeview::CrossModuleImportItem &Item);
+};
+
+namespace codeview {
+
+class DebugStringTableSubsection;
+
+class DebugCrossModuleImportsSubsectionRef final : public DebugSubsectionRef {
+  using ReferenceArray = VarStreamArray<CrossModuleImportItem>;
+  using Iterator = ReferenceArray::Iterator;
+
+public:
+  DebugCrossModuleImportsSubsectionRef()
+      : DebugSubsectionRef(DebugSubsectionKind::CrossScopeImports) {}
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::CrossScopeImports;
+  }
+
+  Error initialize(BinaryStreamReader Reader);
+  Error initialize(BinaryStreamRef Stream);
+
+  Iterator begin() const { return References.begin(); }
+  Iterator end() const { return References.end(); }
+
+private:
+  ReferenceArray References;
+};
+
+class DebugCrossModuleImportsSubsection final : public DebugSubsection {
+public:
+  explicit DebugCrossModuleImportsSubsection(
+      DebugStringTableSubsection &Strings)
+      : DebugSubsection(DebugSubsectionKind::CrossScopeImports),
+        Strings(Strings) {}
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::CrossScopeImports;
+  }
+
+  void addImport(StringRef Module, uint32_t ImportId);
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+
+private:
+  DebugStringTableSubsection &Strings;
+  StringMap<std::vector<support::ulittle32_t>> Mappings;
+};
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSIMPSUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
new file mode 100644
index 0000000..1e329c7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
@@ -0,0 +1,60 @@
+//===- DebugFrameDataSubsection.h ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGFRAMEDATASUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGFRAMEDATASUBSECTION_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+class DebugFrameDataSubsectionRef final : public DebugSubsectionRef {
+public:
+  DebugFrameDataSubsectionRef()
+      : DebugSubsectionRef(DebugSubsectionKind::FrameData) {}
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::FrameData;
+  }
+
+  Error initialize(BinaryStreamReader Reader);
+
+  FixedStreamArray<FrameData>::Iterator begin() const { return Frames.begin(); }
+  FixedStreamArray<FrameData>::Iterator end() const { return Frames.end(); }
+
+  const void *getRelocPtr() const { return RelocPtr; }
+
+private:
+  const uint32_t *RelocPtr = nullptr;
+  FixedStreamArray<FrameData> Frames;
+};
+
+class DebugFrameDataSubsection final : public DebugSubsection {
+public:
+  DebugFrameDataSubsection()
+      : DebugSubsection(DebugSubsectionKind::FrameData) {}
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::FrameData;
+  }
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+
+  void addFrameData(const FrameData &Frame);
+  void setFrames(ArrayRef<FrameData> Frames);
+
+private:
+  std::vector<FrameData> Frames;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h
new file mode 100644
index 0000000..b88c0ea
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h
@@ -0,0 +1,121 @@
+//===- DebugInlineeLinesSubsection.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGINLINEELINESSUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGINLINEELINESSUBSECTION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/DebugInfo/CodeView/Line.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+namespace codeview {
+
+class DebugChecksumsSubsection;
+
+enum class InlineeLinesSignature : uint32_t {
+  Normal,    // CV_INLINEE_SOURCE_LINE_SIGNATURE
+  ExtraFiles // CV_INLINEE_SOURCE_LINE_SIGNATURE_EX
+};
+
+struct InlineeSourceLineHeader {
+  TypeIndex Inlinee;                  // ID of the function that was inlined.
+  support::ulittle32_t FileID;        // Offset into FileChecksums subsection.
+  support::ulittle32_t SourceLineNum; // First line of inlined code.
+                                      // If extra files present:
+                                      //   ulittle32_t ExtraFileCount;
+                                      //   ulittle32_t Files[];
+};
+
+struct InlineeSourceLine {
+  const InlineeSourceLineHeader *Header;
+  FixedStreamArray<support::ulittle32_t> ExtraFiles;
+};
+
+} // end namespace codeview
+
+template <> struct VarStreamArrayExtractor<codeview::InlineeSourceLine> {
+  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
+                   codeview::InlineeSourceLine &Item);
+
+  bool HasExtraFiles = false;
+};
+
+namespace codeview {
+
+class DebugInlineeLinesSubsectionRef final : public DebugSubsectionRef {
+  using LinesArray = VarStreamArray<InlineeSourceLine>;
+  using Iterator = LinesArray::Iterator;
+
+public:
+  DebugInlineeLinesSubsectionRef();
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::InlineeLines;
+  }
+
+  Error initialize(BinaryStreamReader Reader);
+  bool hasExtraFiles() const;
+
+  Iterator begin() const { return Lines.begin(); }
+  Iterator end() const { return Lines.end(); }
+
+private:
+  InlineeLinesSignature Signature;
+  VarStreamArray<InlineeSourceLine> Lines;
+};
+
+class DebugInlineeLinesSubsection final : public DebugSubsection {
+public:
+  struct Entry {
+    std::vector<support::ulittle32_t> ExtraFiles;
+    InlineeSourceLineHeader Header;
+  };
+
+  DebugInlineeLinesSubsection(DebugChecksumsSubsection &Checksums,
+                              bool HasExtraFiles = false);
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::InlineeLines;
+  }
+
+  Error commit(BinaryStreamWriter &Writer) const override;
+  uint32_t calculateSerializedSize() const override;
+
+  void addInlineSite(TypeIndex FuncId, StringRef FileName, uint32_t SourceLine);
+  void addExtraFile(StringRef FileName);
+
+  bool hasExtraFiles() const { return HasExtraFiles; }
+  void setHasExtraFiles(bool Has) { HasExtraFiles = Has; }
+
+  std::vector<Entry>::const_iterator begin() const { return Entries.begin(); }
+  std::vector<Entry>::const_iterator end() const { return Entries.end(); }
+
+private:
+  DebugChecksumsSubsection &Checksums;
+  bool HasExtraFiles = false;
+  uint32_t ExtraFileCount = 0;
+  std::vector<Entry> Entries;
+};
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGINLINEELINESSUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h
new file mode 100644
index 0000000..53044b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h
@@ -0,0 +1,150 @@
+//===- DebugLinesSubsection.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGLINESSUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGLINESSUBSECTION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/DebugInfo/CodeView/Line.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class DebugChecksumsSubsection;
+class DebugStringTableSubsection;
+
+// Corresponds to the `CV_DebugSLinesHeader_t` structure.
+struct LineFragmentHeader {
+  support::ulittle32_t RelocOffset;  // Code offset of line contribution.
+  support::ulittle16_t RelocSegment; // Code segment of line contribution.
+  support::ulittle16_t Flags;        // See LineFlags enumeration.
+  support::ulittle32_t CodeSize;     // Code size of this line contribution.
+};
+
+// Corresponds to the `CV_DebugSLinesFileBlockHeader_t` structure.
+struct LineBlockFragmentHeader {
+  support::ulittle32_t NameIndex; // Offset of FileChecksum entry in File
+                                  // checksums buffer.  The checksum entry then
+                                  // contains another offset into the string
+                                  // table of the actual name.
+  support::ulittle32_t NumLines;  // Number of lines
+  support::ulittle32_t BlockSize; // Code size of block, in bytes.
+  // The following two variable length arrays appear immediately after the
+  // header.  The structure definitions follow.
+  // LineNumberEntry   Lines[NumLines];
+  // ColumnNumberEntry Columns[NumLines];
+};
+
+// Corresponds to `CV_Line_t` structure
+struct LineNumberEntry {
+  support::ulittle32_t Offset; // Offset to start of code bytes for line number
+  support::ulittle32_t Flags;  // Start:24, End:7, IsStatement:1
+};
+
+// Corresponds to `CV_Column_t` structure
+struct ColumnNumberEntry {
+  support::ulittle16_t StartColumn;
+  support::ulittle16_t EndColumn;
+};
+
+struct LineColumnEntry {
+  support::ulittle32_t NameIndex;
+  FixedStreamArray<LineNumberEntry> LineNumbers;
+  FixedStreamArray<ColumnNumberEntry> Columns;
+};
+
+class LineColumnExtractor {
+public:
+  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
+                   LineColumnEntry &Item);
+
+  const LineFragmentHeader *Header = nullptr;
+};
+
+class DebugLinesSubsectionRef final : public DebugSubsectionRef {
+  friend class LineColumnExtractor;
+
+  using LineInfoArray = VarStreamArray<LineColumnEntry, LineColumnExtractor>;
+  using Iterator = LineInfoArray::Iterator;
+
+public:
+  DebugLinesSubsectionRef();
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::Lines;
+  }
+
+  Error initialize(BinaryStreamReader Reader);
+
+  Iterator begin() const { return LinesAndColumns.begin(); }
+  Iterator end() const { return LinesAndColumns.end(); }
+
+  const LineFragmentHeader *header() const { return Header; }
+
+  bool hasColumnInfo() const;
+
+private:
+  const LineFragmentHeader *Header = nullptr;
+  LineInfoArray LinesAndColumns;
+};
+
+class DebugLinesSubsection final : public DebugSubsection {
+  struct Block {
+    Block(uint32_t ChecksumBufferOffset)
+        : ChecksumBufferOffset(ChecksumBufferOffset) {}
+
+    uint32_t ChecksumBufferOffset;
+    std::vector<LineNumberEntry> Lines;
+    std::vector<ColumnNumberEntry> Columns;
+  };
+
+public:
+  DebugLinesSubsection(DebugChecksumsSubsection &Checksums,
+                       DebugStringTableSubsection &Strings);
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::Lines;
+  }
+
+  void createBlock(StringRef FileName);
+  void addLineInfo(uint32_t Offset, const LineInfo &Line);
+  void addLineAndColumnInfo(uint32_t Offset, const LineInfo &Line,
+                            uint32_t ColStart, uint32_t ColEnd);
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+
+  void setRelocationAddress(uint16_t Segment, uint32_t Offset);
+  void setCodeSize(uint32_t Size);
+  void setFlags(LineFlags Flags);
+
+  bool hasColumnInfo() const;
+
+private:
+  DebugChecksumsSubsection &Checksums;
+  uint32_t RelocOffset = 0;
+  uint16_t RelocSegment = 0;
+  uint32_t CodeSize = 0;
+  LineFlags Flags = LF_None;
+  std::vector<Block> Blocks;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGLINESSUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h
new file mode 100644
index 0000000..bebc960
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h
@@ -0,0 +1,97 @@
+//===- DebugStringTableSubsection.h - CodeView String Table -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSTRINGTABLESUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSTRINGTABLESUBSECTION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+
+class BinaryStreamReader;
+
+namespace codeview {
+
+/// Represents a read-only view of a CodeView string table.  This is a very
+/// simple flat buffer consisting of null-terminated strings, where strings
+/// are retrieved by their offset in the buffer.  DebugStringTableSubsectionRef
+/// does not own the underlying storage for the buffer.
+class DebugStringTableSubsectionRef : public DebugSubsectionRef {
+public:
+  DebugStringTableSubsectionRef();
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::StringTable;
+  }
+
+  Error initialize(BinaryStreamRef Contents);
+  Error initialize(BinaryStreamReader &Reader);
+
+  Expected<StringRef> getString(uint32_t Offset) const;
+
+  bool valid() const { return Stream.valid(); }
+
+  BinaryStreamRef getBuffer() const { return Stream; }
+
+private:
+  BinaryStreamRef Stream;
+};
+
+/// Represents a read-write view of a CodeView string table.
+/// DebugStringTableSubsection owns the underlying storage for the table, and is
+/// capable of serializing the string table into a format understood by
+/// DebugStringTableSubsectionRef.
+class DebugStringTableSubsection : public DebugSubsection {
+public:
+  DebugStringTableSubsection();
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::StringTable;
+  }
+
+  // If string S does not exist in the string table, insert it.
+  // Returns the ID for S.
+  uint32_t insert(StringRef S);
+
+  // Return the ID for string S.  Assumes S exists in the table.
+  uint32_t getIdForString(StringRef S) const;
+
+  StringRef getStringForId(uint32_t Id) const;
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+
+  uint32_t size() const;
+
+  StringMap<uint32_t>::const_iterator begin() const {
+    return StringToId.begin();
+  }
+
+  StringMap<uint32_t>::const_iterator end() const { return StringToId.end(); }
+
+  std::vector<uint32_t> sortedIds() const;
+
+private:
+  DenseMap<uint32_t, StringRef> IdToString;
+  StringMap<uint32_t> StringToId;
+  uint32_t StringSize = 1;
+};
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSTRINGTABLESUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsection.h
new file mode 100644
index 0000000..e427e00
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsection.h
@@ -0,0 +1,52 @@
+//===- DebugSubsection.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENT_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+namespace codeview {
+
+class DebugSubsectionRef {
+public:
+  explicit DebugSubsectionRef(DebugSubsectionKind Kind) : Kind(Kind) {}
+  virtual ~DebugSubsectionRef();
+
+  static bool classof(const DebugSubsectionRef *S) { return true; }
+
+  DebugSubsectionKind kind() const { return Kind; }
+
+protected:
+  DebugSubsectionKind Kind;
+};
+
+class DebugSubsection {
+public:
+  explicit DebugSubsection(DebugSubsectionKind Kind) : Kind(Kind) {}
+  virtual ~DebugSubsection();
+
+  static bool classof(const DebugSubsection *S) { return true; }
+
+  DebugSubsectionKind kind() const { return Kind; }
+
+  virtual Error commit(BinaryStreamWriter &Writer) const = 0;
+  virtual uint32_t calculateSerializedSize() const = 0;
+
+protected:
+  DebugSubsectionKind Kind;
+};
+
+} // namespace codeview
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h
new file mode 100644
index 0000000..fc0cf0d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h
@@ -0,0 +1,103 @@
+//===- DebugSubsectionRecord.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONRECORD_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONRECORD_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MathExtras.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class BinaryStreamWriter;
+
+namespace codeview {
+
+class DebugSubsection;
+
+// Corresponds to the `CV_DebugSSubsectionHeader_t` structure.
+struct DebugSubsectionHeader {
+  support::ulittle32_t Kind;   // codeview::DebugSubsectionKind enum
+  support::ulittle32_t Length; // number of bytes occupied by this record.
+};
+
+class DebugSubsectionRecord {
+public:
+  DebugSubsectionRecord();
+  DebugSubsectionRecord(DebugSubsectionKind Kind, BinaryStreamRef Data,
+                        CodeViewContainer Container);
+
+  static Error initialize(BinaryStreamRef Stream, DebugSubsectionRecord &Info,
+                          CodeViewContainer Container);
+
+  uint32_t getRecordLength() const;
+  DebugSubsectionKind kind() const;
+  BinaryStreamRef getRecordData() const;
+
+private:
+  CodeViewContainer Container = CodeViewContainer::ObjectFile;
+  DebugSubsectionKind Kind = DebugSubsectionKind::None;
+  BinaryStreamRef Data;
+};
+
+class DebugSubsectionRecordBuilder {
+public:
+  DebugSubsectionRecordBuilder(std::shared_ptr<DebugSubsection> Subsection,
+                               CodeViewContainer Container);
+
+  /// Use this to copy existing subsections directly from source to destination.
+  /// For example, line table subsections in an object file only need to be
+  /// relocated before being copied into the PDB.
+  DebugSubsectionRecordBuilder(const DebugSubsectionRecord &Contents,
+                               CodeViewContainer Container);
+
+  uint32_t calculateSerializedLength();
+  Error commit(BinaryStreamWriter &Writer) const;
+
+private:
+  /// The subsection to build. Will be null if Contents is non-empty.
+  std::shared_ptr<DebugSubsection> Subsection;
+
+  /// The bytes of the subsection. Only non-empty if Subsection is null.
+  DebugSubsectionRecord Contents;
+
+  CodeViewContainer Container;
+};
+
+} // end namespace codeview
+
+template <> struct VarStreamArrayExtractor<codeview::DebugSubsectionRecord> {
+  Error operator()(BinaryStreamRef Stream, uint32_t &Length,
+                   codeview::DebugSubsectionRecord &Info) {
+    // FIXME: We need to pass the container type through to this function.  In
+    // practice this isn't super important since the subsection header describes
+    // its length and we can just skip it.  It's more important when writing.
+    if (auto EC = codeview::DebugSubsectionRecord::initialize(
+            Stream, Info, codeview::CodeViewContainer::Pdb))
+      return EC;
+    Length = alignTo(Info.getRecordLength(), 4);
+    return Error::success();
+  }
+};
+
+namespace codeview {
+
+using DebugSubsectionArray = VarStreamArray<DebugSubsectionRecord>;
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONRECORD_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h
new file mode 100644
index 0000000..75f749d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h
@@ -0,0 +1,114 @@
+//===- DebugSubsectionVisitor.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTVISITOR_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTVISITOR_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace codeview {
+
+class DebugChecksumsSubsectionRef;
+class DebugSubsectionRecord;
+class DebugInlineeLinesSubsectionRef;
+class DebugCrossModuleExportsSubsectionRef;
+class DebugCrossModuleImportsSubsectionRef;
+class DebugFrameDataSubsectionRef;
+class DebugLinesSubsectionRef;
+class DebugStringTableSubsectionRef;
+class DebugSymbolRVASubsectionRef;
+class DebugSymbolsSubsectionRef;
+class DebugUnknownSubsectionRef;
+class StringsAndChecksumsRef;
+
+class DebugSubsectionVisitor {
+public:
+  virtual ~DebugSubsectionVisitor() = default;
+
+  virtual Error visitUnknown(DebugUnknownSubsectionRef &Unknown) {
+    return Error::success();
+  }
+  virtual Error visitLines(DebugLinesSubsectionRef &Lines,
+                           const StringsAndChecksumsRef &State) = 0;
+  virtual Error visitFileChecksums(DebugChecksumsSubsectionRef &Checksums,
+                                   const StringsAndChecksumsRef &State) = 0;
+  virtual Error visitInlineeLines(DebugInlineeLinesSubsectionRef &Inlinees,
+                                  const StringsAndChecksumsRef &State) = 0;
+  virtual Error
+  visitCrossModuleExports(DebugCrossModuleExportsSubsectionRef &CSE,
+                          const StringsAndChecksumsRef &State) = 0;
+  virtual Error
+  visitCrossModuleImports(DebugCrossModuleImportsSubsectionRef &CSE,
+                          const StringsAndChecksumsRef &State) = 0;
+
+  virtual Error visitStringTable(DebugStringTableSubsectionRef &ST,
+                                 const StringsAndChecksumsRef &State) = 0;
+
+  virtual Error visitSymbols(DebugSymbolsSubsectionRef &CSE,
+                             const StringsAndChecksumsRef &State) = 0;
+
+  virtual Error visitFrameData(DebugFrameDataSubsectionRef &FD,
+                               const StringsAndChecksumsRef &State) = 0;
+  virtual Error visitCOFFSymbolRVAs(DebugSymbolRVASubsectionRef &RVAs,
+                                    const StringsAndChecksumsRef &State) = 0;
+};
+
+Error visitDebugSubsection(const DebugSubsectionRecord &R,
+                           DebugSubsectionVisitor &V,
+                           const StringsAndChecksumsRef &State);
+
+namespace detail {
+template <typename T>
+Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
+                            StringsAndChecksumsRef &State) {
+  State.initialize(std::forward<T>(FragmentRange));
+
+  for (const DebugSubsectionRecord &L : FragmentRange) {
+    if (auto EC = visitDebugSubsection(L, V, State))
+      return EC;
+  }
+  return Error::success();
+}
+} // namespace detail
+
+template <typename T>
+Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V) {
+  StringsAndChecksumsRef State;
+  return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
+                                       State);
+}
+
+template <typename T>
+Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
+                            const DebugStringTableSubsectionRef &Strings) {
+  StringsAndChecksumsRef State(Strings);
+  return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
+                                       State);
+}
+
+template <typename T>
+Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
+                            const DebugStringTableSubsectionRef &Strings,
+                            const DebugChecksumsSubsectionRef &Checksums) {
+  StringsAndChecksumsRef State(Strings, Checksums);
+  return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
+                                       State);
+}
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTVISITOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h
new file mode 100644
index 0000000..a4c04b5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h
@@ -0,0 +1,67 @@
+//===- DebugSymbolRVASubsection.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLRVASUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLRVASUBSECTION_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class BinaryStreamReader;
+
+namespace codeview {
+
+class DebugSymbolRVASubsectionRef final : public DebugSubsectionRef {
+public:
+  using ArrayType = FixedStreamArray<support::ulittle32_t>;
+
+  DebugSymbolRVASubsectionRef();
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::CoffSymbolRVA;
+  }
+
+  ArrayType::Iterator begin() const { return RVAs.begin(); }
+  ArrayType::Iterator end() const { return RVAs.end(); }
+
+  Error initialize(BinaryStreamReader &Reader);
+
+private:
+  ArrayType RVAs;
+};
+
+class DebugSymbolRVASubsection final : public DebugSubsection {
+public:
+  DebugSymbolRVASubsection();
+
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::CoffSymbolRVA;
+  }
+
+  Error commit(BinaryStreamWriter &Writer) const override;
+  uint32_t calculateSerializedSize() const override;
+
+  void addRVA(uint32_t RVA) { RVAs.push_back(support::ulittle32_t(RVA)); }
+
+private:
+  std::vector<support::ulittle32_t> RVAs;
+};
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLRVASUBSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h
new file mode 100644
index 0000000..dfda7de
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h
@@ -0,0 +1,56 @@
+//===- DebugSymbolsSubsection.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLSSUBSECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLSSUBSECTION_H
+
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+class DebugSymbolsSubsectionRef final : public DebugSubsectionRef {
+public:
+  DebugSymbolsSubsectionRef()
+      : DebugSubsectionRef(DebugSubsectionKind::Symbols) {}
+
+  static bool classof(const DebugSubsectionRef *S) {
+    return S->kind() == DebugSubsectionKind::Symbols;
+  }
+
+  Error initialize(BinaryStreamReader Reader);
+
+  CVSymbolArray::Iterator begin() const { return Records.begin(); }
+  CVSymbolArray::Iterator end() const { return Records.end(); }
+
+private:
+  CVSymbolArray Records;
+};
+
+class DebugSymbolsSubsection final : public DebugSubsection {
+public:
+  DebugSymbolsSubsection() : DebugSubsection(DebugSubsectionKind::Symbols) {}
+  static bool classof(const DebugSubsection *S) {
+    return S->kind() == DebugSubsectionKind::Symbols;
+  }
+
+  uint32_t calculateSerializedSize() const override;
+  Error commit(BinaryStreamWriter &Writer) const override;
+
+  void addSymbol(CVSymbol Symbol);
+
+private:
+  uint32_t Length = 0;
+  std::vector<CVSymbol> Records;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h
new file mode 100644
index 0000000..ea9a96c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h
@@ -0,0 +1,32 @@
+//===- DebugUnknownSubsection.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGUNKNOWNFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGUNKNOWNFRAGMENT_H
+
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/Support/BinaryStreamRef.h"
+
+namespace llvm {
+namespace codeview {
+
+class DebugUnknownSubsectionRef final : public DebugSubsectionRef {
+public:
+  DebugUnknownSubsectionRef(DebugSubsectionKind Kind, BinaryStreamRef Data)
+      : DebugSubsectionRef(Kind), Data(Data) {}
+
+  BinaryStreamRef getData() const { return Data; }
+
+private:
+  BinaryStreamRef Data;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/EnumTables.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/EnumTables.h
new file mode 100644
index 0000000..ee0f0f7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/EnumTables.h
@@ -0,0 +1,45 @@
+//===- EnumTables.h - Enum to string conversion tables ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_ENUMTABLES_H
+#define LLVM_DEBUGINFO_CODEVIEW_ENUMTABLES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include <cstdint>
+
+namespace llvm {
+namespace codeview {
+
+ArrayRef<EnumEntry<SymbolKind>> getSymbolTypeNames();
+ArrayRef<EnumEntry<TypeLeafKind>> getTypeLeafNames();
+ArrayRef<EnumEntry<uint16_t>> getRegisterNames();
+ArrayRef<EnumEntry<uint32_t>> getPublicSymFlagNames();
+ArrayRef<EnumEntry<uint8_t>> getProcSymFlagNames();
+ArrayRef<EnumEntry<uint16_t>> getLocalFlagNames();
+ArrayRef<EnumEntry<uint8_t>> getFrameCookieKindNames();
+ArrayRef<EnumEntry<SourceLanguage>> getSourceLanguageNames();
+ArrayRef<EnumEntry<uint32_t>> getCompileSym2FlagNames();
+ArrayRef<EnumEntry<uint32_t>> getCompileSym3FlagNames();
+ArrayRef<EnumEntry<uint32_t>> getFileChecksumNames();
+ArrayRef<EnumEntry<unsigned>> getCPUTypeNames();
+ArrayRef<EnumEntry<uint32_t>> getFrameProcSymFlagNames();
+ArrayRef<EnumEntry<uint16_t>> getExportSymFlagNames();
+ArrayRef<EnumEntry<uint32_t>> getModuleSubstreamKindNames();
+ArrayRef<EnumEntry<uint8_t>> getThunkOrdinalNames();
+ArrayRef<EnumEntry<uint16_t>> getTrampolineNames();
+ArrayRef<EnumEntry<COFF::SectionCharacteristics>>
+getImageSectionCharacteristicNames();
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_ENUMTABLES_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/Formatters.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/Formatters.h
new file mode 100644
index 0000000..278ad02
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/Formatters.h
@@ -0,0 +1,73 @@
+//===- Formatters.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H
+#define LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/GUID.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/FormatAdapters.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace codeview {
+
+namespace detail {
+
+class GuidAdapter final : public FormatAdapter<ArrayRef<uint8_t>> {
+  ArrayRef<uint8_t> Guid;
+
+public:
+  explicit GuidAdapter(ArrayRef<uint8_t> Guid);
+  explicit GuidAdapter(StringRef Guid);
+
+  void format(raw_ostream &Stream, StringRef Style) override;
+};
+
+} // end namespace detail
+
+inline detail::GuidAdapter fmt_guid(StringRef Item) {
+  return detail::GuidAdapter(Item);
+}
+
+inline detail::GuidAdapter fmt_guid(ArrayRef<uint8_t> Item) {
+  return detail::GuidAdapter(Item);
+}
+
+} // end namespace codeview
+
+template <> struct format_provider<codeview::TypeIndex> {
+public:
+  static void format(const codeview::TypeIndex &V, raw_ostream &Stream,
+                     StringRef Style) {
+    if (V.isNoneType())
+      Stream << "<no type>";
+    else {
+      Stream << formatv("{0:X+4}", V.getIndex());
+      if (V.isSimple())
+        Stream << " (" << codeview::TypeIndex::simpleTypeName(V) << ")";
+    }
+  }
+};
+
+template <> struct format_provider<codeview::GUID> {
+  static void format(const codeview::GUID &V, llvm::raw_ostream &Stream,
+                     StringRef Style) {
+    Stream << V;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/FunctionId.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/FunctionId.h
new file mode 100644
index 0000000..1af3da8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/FunctionId.h
@@ -0,0 +1,56 @@
+//===- FunctionId.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
+#define LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
+
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+class FunctionId {
+public:
+  FunctionId() : Index(0) {}
+
+  explicit FunctionId(uint32_t Index) : Index(Index) {}
+
+  uint32_t getIndex() const { return Index; }
+
+private:
+  uint32_t Index;
+};
+
+inline bool operator==(const FunctionId &A, const FunctionId &B) {
+  return A.getIndex() == B.getIndex();
+}
+
+inline bool operator!=(const FunctionId &A, const FunctionId &B) {
+  return A.getIndex() != B.getIndex();
+}
+
+inline bool operator<(const FunctionId &A, const FunctionId &B) {
+  return A.getIndex() < B.getIndex();
+}
+
+inline bool operator<=(const FunctionId &A, const FunctionId &B) {
+  return A.getIndex() <= B.getIndex();
+}
+
+inline bool operator>(const FunctionId &A, const FunctionId &B) {
+  return A.getIndex() > B.getIndex();
+}
+
+inline bool operator>=(const FunctionId &A, const FunctionId &B) {
+  return A.getIndex() >= B.getIndex();
+}
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/GUID.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/GUID.h
new file mode 100644
index 0000000..a055ce9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/GUID.h
@@ -0,0 +1,55 @@
+//===- GUID.h ---------------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_GUID_H
+#define LLVM_DEBUGINFO_CODEVIEW_GUID_H
+
+#include <cstdint>
+#include <cstring>
+
+namespace llvm {
+class raw_ostream;
+
+namespace codeview {
+
+/// This represents the 'GUID' type from windows.h.
+struct GUID {
+  uint8_t Guid[16];
+};
+
+inline bool operator==(const GUID &LHS, const GUID &RHS) {
+  return 0 == ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid));
+}
+
+inline bool operator<(const GUID &LHS, const GUID &RHS) {
+  return ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid)) < 0;
+}
+
+inline bool operator<=(const GUID &LHS, const GUID &RHS) {
+  return ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid)) <= 0;
+}
+
+inline bool operator>(const GUID &LHS, const GUID &RHS) {
+  return !(LHS <= RHS);
+}
+
+inline bool operator>=(const GUID &LHS, const GUID &RHS) {
+  return !(LHS < RHS);
+}
+
+inline bool operator!=(const GUID &LHS, const GUID &RHS) {
+  return !(LHS == RHS);
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const GUID &Guid);
+
+} // namespace codeview
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h
new file mode 100644
index 0000000..c470416
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h
@@ -0,0 +1,100 @@
+//===- GlobalTypeTableBuilder.h ----------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_GLOBALTYPETABLEBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_GLOBALTYPETABLEBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/SimpleTypeSerializer.h"
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeHashing.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class ContinuationRecordBuilder;
+
+class GlobalTypeTableBuilder : public TypeCollection {
+  /// Storage for records.  These need to outlive the TypeTableBuilder.
+  BumpPtrAllocator &RecordStorage;
+
+  /// A serializer that can write non-continuation leaf types.  Only used as
+  /// a convenience function so that we can provide an interface method to
+  /// write an unserialized record.
+  SimpleTypeSerializer SimpleSerializer;
+
+  /// Hash table.
+  DenseMap<GloballyHashedType, TypeIndex> HashedRecords;
+
+  /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
+  SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;
+
+  /// Contains a list of all hash values inexed by TypeIndex.toArrayIndex().
+  SmallVector<GloballyHashedType, 2> SeenHashes;
+
+public:
+  explicit GlobalTypeTableBuilder(BumpPtrAllocator &Storage);
+  ~GlobalTypeTableBuilder();
+
+  // TypeTableCollection overrides
+  Optional<TypeIndex> getFirst() override;
+  Optional<TypeIndex> getNext(TypeIndex Prev) override;
+  CVType getType(TypeIndex Index) override;
+  StringRef getTypeName(TypeIndex Index) override;
+  bool contains(TypeIndex Index) override;
+  uint32_t size() override;
+  uint32_t capacity() override;
+
+  // public interface
+  void reset();
+  TypeIndex nextTypeIndex() const;
+
+  BumpPtrAllocator &getAllocator() { return RecordStorage; }
+
+  ArrayRef<ArrayRef<uint8_t>> records() const;
+  ArrayRef<GloballyHashedType> hashes() const;
+
+  template <typename CreateFunc>
+  TypeIndex insertRecordAs(GloballyHashedType Hash, size_t RecordSize,
+                           CreateFunc Create) {
+    auto Result = HashedRecords.try_emplace(Hash, nextTypeIndex());
+
+    if (LLVM_UNLIKELY(Result.second)) {
+      uint8_t *Stable = RecordStorage.Allocate<uint8_t>(RecordSize);
+      MutableArrayRef<uint8_t> Data(Stable, RecordSize);
+      SeenRecords.push_back(Create(Data));
+      SeenHashes.push_back(Hash);
+    }
+
+    // Update the caller's copy of Record to point a stable copy.
+    return Result.first->second;
+  }
+
+  TypeIndex insertRecordBytes(ArrayRef<uint8_t> Data);
+  TypeIndex insertRecord(ContinuationRecordBuilder &Builder);
+
+  template <typename T> TypeIndex writeLeafType(T &Record) {
+    ArrayRef<uint8_t> Data = SimpleSerializer.serialize(Record);
+    return insertRecordBytes(Data);
+  }
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h
new file mode 100644
index 0000000..16d7869
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h
@@ -0,0 +1,116 @@
+//===- LazyRandomTypeCollection.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/StringSaver.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+/// \brief Provides amortized O(1) random access to a CodeView type stream.
+/// Normally to access a type from a type stream, you must know its byte
+/// offset into the type stream, because type records are variable-lengthed.
+/// However, this is not the way we prefer to access them.  For example, given
+/// a symbol record one of the fields may be the TypeIndex of the symbol's
+/// type record.  Or given a type record such as an array type, there might
+/// be a TypeIndex for the element type.  Sequential access is perfect when
+/// we're just dumping every entry, but it's very poor for real world usage.
+///
+/// Type streams in PDBs contain an additional field which is a list of pairs
+/// containing indices and their corresponding offsets, roughly every ~8KB of
+/// record data.  This general idea need not be confined to PDBs though.  By
+/// supplying such an array, the producer of a type stream can allow the
+/// consumer much better access time, because the consumer can find the nearest
+/// index in this array, and do a linear scan forward only from there.
+///
+/// LazyRandomTypeCollection implements this algorithm, but additionally goes
+/// one step further by caching offsets of every record that has been visited at
+/// least once.  This way, even repeated visits of the same record will never
+/// require more than one linear scan.  For a type stream of N elements divided
+/// into M chunks of roughly equal size, this yields a worst case lookup time
+/// of O(N/M) and an amortized time of O(1).
+class LazyRandomTypeCollection : public TypeCollection {
+  using PartialOffsetArray = FixedStreamArray<TypeIndexOffset>;
+
+  struct CacheEntry {
+    CVType Type;
+    uint32_t Offset;
+    StringRef Name;
+  };
+
+public:
+  explicit LazyRandomTypeCollection(uint32_t RecordCountHint);
+  LazyRandomTypeCollection(StringRef Data, uint32_t RecordCountHint);
+  LazyRandomTypeCollection(ArrayRef<uint8_t> Data, uint32_t RecordCountHint);
+  LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint,
+                           PartialOffsetArray PartialOffsets);
+  LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint);
+
+  void reset(ArrayRef<uint8_t> Data, uint32_t RecordCountHint);
+  void reset(StringRef Data, uint32_t RecordCountHint);
+  void reset(BinaryStreamReader &Reader, uint32_t RecordCountHint);
+
+  uint32_t getOffsetOfType(TypeIndex Index);
+
+  Optional<CVType> tryGetType(TypeIndex Index);
+
+  CVType getType(TypeIndex Index) override;
+  StringRef getTypeName(TypeIndex Index) override;
+  bool contains(TypeIndex Index) override;
+  uint32_t size() override;
+  uint32_t capacity() override;
+  Optional<TypeIndex> getFirst() override;
+  Optional<TypeIndex> getNext(TypeIndex Prev) override;
+
+private:
+  Error ensureTypeExists(TypeIndex Index);
+  void ensureCapacityFor(TypeIndex Index);
+
+  Error visitRangeForType(TypeIndex TI);
+  Error fullScanForType(TypeIndex TI);
+  void visitRange(TypeIndex Begin, uint32_t BeginOffset, TypeIndex End);
+
+  /// Number of actual records.
+  uint32_t Count = 0;
+
+  /// The largest type index which we've visited.
+  TypeIndex LargestTypeIndex = TypeIndex::None();
+
+  BumpPtrAllocator Allocator;
+  StringSaver NameStorage;
+
+  /// The type array to allow random access visitation of.
+  CVTypeArray Types;
+
+  std::vector<CacheEntry> Records;
+
+  /// An array of index offsets for the given type stream, allowing log(N)
+  /// lookups of a type record by index.  Similar to KnownOffsets but only
+  /// contains offsets for some type indices, some of which may not have
+  /// ever been visited.
+  PartialOffsetArray PartialOffsets;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/Line.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/Line.h
new file mode 100644
index 0000000..ac229c3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/Line.h
@@ -0,0 +1,133 @@
+//===- Line.h ---------------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_LINE_H
+#define LLVM_DEBUGINFO_CODEVIEW_LINE_H
+
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/Endian.h"
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+using llvm::support::ulittle32_t;
+
+class LineInfo {
+public:
+  enum : uint32_t {
+    AlwaysStepIntoLineNumber = 0xfeefee,
+    NeverStepIntoLineNumber = 0xf00f00
+  };
+
+  enum : int { EndLineDeltaShift = 24 };
+
+  enum : uint32_t {
+    StartLineMask = 0x00ffffff,
+    EndLineDeltaMask = 0x7f000000,
+    StatementFlag = 0x80000000u
+  };
+
+  LineInfo(uint32_t StartLine, uint32_t EndLine, bool IsStatement);
+  LineInfo(uint32_t LineData) : LineData(LineData) {}
+
+  uint32_t getStartLine() const { return LineData & StartLineMask; }
+
+  uint32_t getLineDelta() const {
+    return (LineData & EndLineDeltaMask) >> EndLineDeltaShift;
+  }
+
+  uint32_t getEndLine() const { return getStartLine() + getLineDelta(); }
+
+  bool isStatement() const { return (LineData & StatementFlag) != 0; }
+
+  uint32_t getRawData() const { return LineData; }
+
+  bool isAlwaysStepInto() const {
+    return getStartLine() == AlwaysStepIntoLineNumber;
+  }
+
+  bool isNeverStepInto() const {
+    return getStartLine() == NeverStepIntoLineNumber;
+  }
+
+private:
+  uint32_t LineData;
+};
+
+class ColumnInfo {
+private:
+  static const uint32_t StartColumnMask = 0x0000ffffu;
+  static const uint32_t EndColumnMask = 0xffff0000u;
+  static const int EndColumnShift = 16;
+
+public:
+  ColumnInfo(uint16_t StartColumn, uint16_t EndColumn) {
+    ColumnData =
+        (static_cast<uint32_t>(StartColumn) & StartColumnMask) |
+        ((static_cast<uint32_t>(EndColumn) << EndColumnShift) & EndColumnMask);
+  }
+
+  uint16_t getStartColumn() const {
+    return static_cast<uint16_t>(ColumnData & StartColumnMask);
+  }
+
+  uint16_t getEndColumn() const {
+    return static_cast<uint16_t>((ColumnData & EndColumnMask) >>
+                                 EndColumnShift);
+  }
+
+  uint32_t getRawData() const { return ColumnData; }
+
+private:
+  uint32_t ColumnData;
+};
+
+class Line {
+private:
+  int32_t CodeOffset;
+  LineInfo LineInf;
+  ColumnInfo ColumnInf;
+
+public:
+  Line(int32_t CodeOffset, uint32_t StartLine, uint32_t EndLine,
+       uint16_t StartColumn, uint16_t EndColumn, bool IsStatement)
+      : CodeOffset(CodeOffset), LineInf(StartLine, EndLine, IsStatement),
+        ColumnInf(StartColumn, EndColumn) {}
+
+  Line(int32_t CodeOffset, LineInfo LineInf, ColumnInfo ColumnInf)
+      : CodeOffset(CodeOffset), LineInf(LineInf), ColumnInf(ColumnInf) {}
+
+  LineInfo getLineInfo() const { return LineInf; }
+
+  ColumnInfo getColumnInfo() const { return ColumnInf; }
+
+  int32_t getCodeOffset() const { return CodeOffset; }
+
+  uint32_t getStartLine() const { return LineInf.getStartLine(); }
+
+  uint32_t getLineDelta() const { return LineInf.getLineDelta(); }
+
+  uint32_t getEndLine() const { return LineInf.getEndLine(); }
+
+  uint16_t getStartColumn() const { return ColumnInf.getStartColumn(); }
+
+  uint16_t getEndColumn() const { return ColumnInf.getEndColumn(); }
+
+  bool isStatement() const { return LineInf.isStatement(); }
+
+  bool isAlwaysStepInto() const { return LineInf.isAlwaysStepInto(); }
+
+  bool isNeverStepInto() const { return LineInf.isNeverStepInto(); }
+};
+
+} // namespace codeview
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h
new file mode 100644
index 0000000..9030918
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h
@@ -0,0 +1,81 @@
+//===- MergingTypeTableBuilder.h ---------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/SimpleTypeSerializer.h"
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeHashing.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class ContinuationRecordBuilder;
+
+class MergingTypeTableBuilder : public TypeCollection {
+  /// Storage for records.  These need to outlive the TypeTableBuilder.
+  BumpPtrAllocator &RecordStorage;
+
+  /// A serializer that can write non-continuation leaf types.  Only used as
+  /// a convenience function so that we can provide an interface method to
+  /// write an unserialized record.
+  SimpleTypeSerializer SimpleSerializer;
+
+  /// Hash table.
+  DenseMap<LocallyHashedType, TypeIndex> HashedRecords;
+
+  /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
+  SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;
+
+public:
+  explicit MergingTypeTableBuilder(BumpPtrAllocator &Storage);
+  ~MergingTypeTableBuilder();
+
+  // TypeTableCollection overrides
+  Optional<TypeIndex> getFirst() override;
+  Optional<TypeIndex> getNext(TypeIndex Prev) override;
+  CVType getType(TypeIndex Index) override;
+  StringRef getTypeName(TypeIndex Index) override;
+  bool contains(TypeIndex Index) override;
+  uint32_t size() override;
+  uint32_t capacity() override;
+
+  // public interface
+  void reset();
+  TypeIndex nextTypeIndex() const;
+
+  BumpPtrAllocator &getAllocator() { return RecordStorage; }
+
+  ArrayRef<ArrayRef<uint8_t>> records() const;
+
+  TypeIndex insertRecordAs(hash_code Hash, ArrayRef<uint8_t> &Record);
+  TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
+  TypeIndex insertRecord(ContinuationRecordBuilder &Builder);
+
+  template <typename T> TypeIndex writeLeafType(T &Record) {
+    ArrayRef<uint8_t> Data = SimpleSerializer.serialize(Record);
+    return insertRecordBytes(Data);
+  }
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/RecordName.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/RecordName.h
new file mode 100644
index 0000000..b022108
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/RecordName.h
@@ -0,0 +1,24 @@
+//===- RecordName.h ------------------------------------------- *- C++ --*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_RECORDNAME_H
+#define LLVM_DEBUGINFO_CODEVIEW_RECORDNAME_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+
+namespace llvm {
+namespace codeview {
+std::string computeTypeName(TypeCollection &Types, TypeIndex Index);
+StringRef getSymbolName(CVSymbol Sym);
+} // namespace codeview
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/RecordSerialization.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/RecordSerialization.h
new file mode 100644
index 0000000..58449c2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/RecordSerialization.h
@@ -0,0 +1,251 @@
+//===- RecordSerialization.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_RECORDSERIALIZATION_H
+#define LLVM_DEBUGINFO_CODEVIEW_RECORDSERIALIZATION_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cinttypes>
+#include <tuple>
+
+namespace llvm {
+namespace codeview {
+using llvm::support::little32_t;
+using llvm::support::ulittle16_t;
+using llvm::support::ulittle32_t;
+
+/// Limit on the size of all codeview symbol and type records, including the
+/// RecordPrefix. MSVC does not emit any records larger than this.
+enum : unsigned { MaxRecordLength = 0xFF00 };
+
+struct RecordPrefix {
+  ulittle16_t RecordLen;  // Record length, starting from &RecordKind.
+  ulittle16_t RecordKind; // Record kind enum (SymRecordKind or TypeRecordKind)
+};
+
+/// Reinterpret a byte array as an array of characters. Does not interpret as
+/// a C string, as StringRef has several helpers (split) that make that easy.
+StringRef getBytesAsCharacters(ArrayRef<uint8_t> LeafData);
+StringRef getBytesAsCString(ArrayRef<uint8_t> LeafData);
+
+inline Error consume(BinaryStreamReader &Reader) { return Error::success(); }
+
+/// Decodes a numeric "leaf" value. These are integer literals encountered in
+/// the type stream. If the value is positive and less than LF_NUMERIC (1 <<
+/// 15), it is emitted directly in Data. Otherwise, it has a tag like LF_CHAR
+/// that indicates the bitwidth and sign of the numeric data.
+Error consume(BinaryStreamReader &Reader, APSInt &Num);
+
+/// Decodes a numeric leaf value that is known to be a particular type.
+Error consume_numeric(BinaryStreamReader &Reader, uint64_t &Value);
+
+/// Decodes signed and unsigned fixed-length integers.
+Error consume(BinaryStreamReader &Reader, uint32_t &Item);
+Error consume(BinaryStreamReader &Reader, int32_t &Item);
+
+/// Decodes a null terminated string.
+Error consume(BinaryStreamReader &Reader, StringRef &Item);
+
+Error consume(StringRef &Data, APSInt &Num);
+Error consume(StringRef &Data, uint32_t &Item);
+
+/// Decodes an arbitrary object whose layout matches that of the underlying
+/// byte sequence, and returns a pointer to the object.
+template <typename T> Error consume(BinaryStreamReader &Reader, T *&Item) {
+  return Reader.readObject(Item);
+}
+
+template <typename T, typename U> struct serialize_conditional_impl {
+  serialize_conditional_impl(T &Item, U Func) : Item(Item), Func(Func) {}
+
+  Error deserialize(BinaryStreamReader &Reader) const {
+    if (!Func())
+      return Error::success();
+    return consume(Reader, Item);
+  }
+
+  T &Item;
+  U Func;
+};
+
+template <typename T, typename U>
+serialize_conditional_impl<T, U> serialize_conditional(T &Item, U Func) {
+  return serialize_conditional_impl<T, U>(Item, Func);
+}
+
+template <typename T, typename U> struct serialize_array_impl {
+  serialize_array_impl(ArrayRef<T> &Item, U Func) : Item(Item), Func(Func) {}
+
+  Error deserialize(BinaryStreamReader &Reader) const {
+    return Reader.readArray(Item, Func());
+  }
+
+  ArrayRef<T> &Item;
+  U Func;
+};
+
+template <typename T> struct serialize_vector_tail_impl {
+  serialize_vector_tail_impl(std::vector<T> &Item) : Item(Item) {}
+
+  Error deserialize(BinaryStreamReader &Reader) const {
+    T Field;
+    // Stop when we run out of bytes or we hit record padding bytes.
+    while (!Reader.empty() && Reader.peek() < LF_PAD0) {
+      if (auto EC = consume(Reader, Field))
+        return EC;
+      Item.push_back(Field);
+    }
+    return Error::success();
+  }
+
+  std::vector<T> &Item;
+};
+
+struct serialize_null_term_string_array_impl {
+  serialize_null_term_string_array_impl(std::vector<StringRef> &Item)
+      : Item(Item) {}
+
+  Error deserialize(BinaryStreamReader &Reader) const {
+    if (Reader.empty())
+      return make_error<CodeViewError>(cv_error_code::insufficient_buffer,
+                                       "Null terminated string is empty!");
+
+    while (Reader.peek() != 0) {
+      StringRef Field;
+      if (auto EC = Reader.readCString(Field))
+        return EC;
+      Item.push_back(Field);
+    }
+    return Reader.skip(1);
+  }
+
+  std::vector<StringRef> &Item;
+};
+
+template <typename T> struct serialize_arrayref_tail_impl {
+  serialize_arrayref_tail_impl(ArrayRef<T> &Item) : Item(Item) {}
+
+  Error deserialize(BinaryStreamReader &Reader) const {
+    uint32_t Count = Reader.bytesRemaining() / sizeof(T);
+    return Reader.readArray(Item, Count);
+  }
+
+  ArrayRef<T> &Item;
+};
+
+template <typename T> struct serialize_numeric_impl {
+  serialize_numeric_impl(T &Item) : Item(Item) {}
+
+  Error deserialize(BinaryStreamReader &Reader) const {
+    return consume_numeric(Reader, Item);
+  }
+
+  T &Item;
+};
+
+template <typename T, typename U>
+serialize_array_impl<T, U> serialize_array(ArrayRef<T> &Item, U Func) {
+  return serialize_array_impl<T, U>(Item, Func);
+}
+
+inline serialize_null_term_string_array_impl
+serialize_null_term_string_array(std::vector<StringRef> &Item) {
+  return serialize_null_term_string_array_impl(Item);
+}
+
+template <typename T>
+serialize_vector_tail_impl<T> serialize_array_tail(std::vector<T> &Item) {
+  return serialize_vector_tail_impl<T>(Item);
+}
+
+template <typename T>
+serialize_arrayref_tail_impl<T> serialize_array_tail(ArrayRef<T> &Item) {
+  return serialize_arrayref_tail_impl<T>(Item);
+}
+
+template <typename T> serialize_numeric_impl<T> serialize_numeric(T &Item) {
+  return serialize_numeric_impl<T>(Item);
+}
+
+// This field is only present in the byte record if the condition is true.  The
+// condition is evaluated lazily, so it can depend on items that were
+// deserialized
+// earlier.
+#define CV_CONDITIONAL_FIELD(I, C)                                             \
+  serialize_conditional(I, [&]() { return !!(C); })
+
+// This is an array of N items, where N is evaluated lazily, so it can refer
+// to a field deserialized earlier.
+#define CV_ARRAY_FIELD_N(I, N) serialize_array(I, [&]() { return N; })
+
+// This is an array that exhausts the remainder of the input buffer.
+#define CV_ARRAY_FIELD_TAIL(I) serialize_array_tail(I)
+
+// This is an array that consumes null terminated strings until a double null
+// is encountered.
+#define CV_STRING_ARRAY_NULL_TERM(I) serialize_null_term_string_array(I)
+
+#define CV_NUMERIC_FIELD(I) serialize_numeric(I)
+
+template <typename T, typename U>
+Error consume(BinaryStreamReader &Reader,
+              const serialize_conditional_impl<T, U> &Item) {
+  return Item.deserialize(Reader);
+}
+
+template <typename T, typename U>
+Error consume(BinaryStreamReader &Reader,
+              const serialize_array_impl<T, U> &Item) {
+  return Item.deserialize(Reader);
+}
+
+inline Error consume(BinaryStreamReader &Reader,
+                     const serialize_null_term_string_array_impl &Item) {
+  return Item.deserialize(Reader);
+}
+
+template <typename T>
+Error consume(BinaryStreamReader &Reader,
+              const serialize_vector_tail_impl<T> &Item) {
+  return Item.deserialize(Reader);
+}
+
+template <typename T>
+Error consume(BinaryStreamReader &Reader,
+              const serialize_arrayref_tail_impl<T> &Item) {
+  return Item.deserialize(Reader);
+}
+
+template <typename T>
+Error consume(BinaryStreamReader &Reader,
+              const serialize_numeric_impl<T> &Item) {
+  return Item.deserialize(Reader);
+}
+
+template <typename T, typename U, typename... Args>
+Error consume(BinaryStreamReader &Reader, T &&X, U &&Y, Args &&... Rest) {
+  if (auto EC = consume(Reader, X))
+    return EC;
+  return consume(Reader, Y, std::forward<Args>(Rest)...);
+}
+
+#define CV_DESERIALIZE(...)                                                    \
+  if (auto EC = consume(__VA_ARGS__))                                          \
+    return std::move(EC);
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h
new file mode 100644
index 0000000..a85d927
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h
@@ -0,0 +1,53 @@
+//===- SimpleTypeSerializer.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SIMPLETYPESERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_SIMPLETYPESERIALIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class SimpleTypeSerializer {
+  std::vector<uint8_t> ScratchBuffer;
+
+public:
+  SimpleTypeSerializer();
+  ~SimpleTypeSerializer();
+
+  // This template is explicitly instantiated in the implementation file for all
+  // supported types.  The method itself is ugly, so inlining it into the header
+  // file clutters an otherwise straightforward interface.
+  template <typename T> ArrayRef<uint8_t> serialize(T &Record);
+
+  // Don't allow serialization of field list records using this interface.
+  ArrayRef<uint8_t> serialize(const FieldListRecord &Record) = delete;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SIMPLETYPESERIALIZER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/StringsAndChecksums.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/StringsAndChecksums.h
new file mode 100644
index 0000000..22a333e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/StringsAndChecksums.h
@@ -0,0 +1,107 @@
+//===- StringsAndChecksums.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_STRINGSANDCHECKSUMS_H
+#define LLVM_DEBUGINFO_CODEVIEW_STRINGSANDCHECKSUMS_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+#include <memory>
+
+namespace llvm {
+namespace codeview {
+
+class StringsAndChecksumsRef {
+public:
+  // If no subsections are known about initially, we find as much as we can.
+  StringsAndChecksumsRef();
+
+  // If only a string table subsection is given, we find a checksums subsection.
+  explicit StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings);
+
+  // If both subsections are given, we don't need to find anything.
+  StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings,
+                         const DebugChecksumsSubsectionRef &Checksums);
+
+  void setStrings(const DebugStringTableSubsectionRef &Strings);
+  void setChecksums(const DebugChecksumsSubsectionRef &CS);
+
+  void reset();
+  void resetStrings();
+  void resetChecksums();
+
+  template <typename T> void initialize(T &&FragmentRange) {
+    for (const DebugSubsectionRecord &R : FragmentRange) {
+      if (Strings && Checksums)
+        return;
+      if (R.kind() == DebugSubsectionKind::FileChecksums) {
+        initializeChecksums(R);
+        continue;
+      }
+      if (R.kind() == DebugSubsectionKind::StringTable && !Strings) {
+        // While in practice we should never encounter a string table even
+        // though the string table is already initialized, in theory it's
+        // possible.  PDBs are supposed to have one global string table and
+        // then this subsection should not appear.  Whereas object files are
+        // supposed to have this subsection appear exactly once.  However,
+        // for testing purposes it's nice to be able to test this subsection
+        // independently of one format or the other, so for some tests we
+        // manually construct a PDB that contains this subsection in addition
+        // to a global string table.
+        initializeStrings(R);
+        continue;
+      }
+    }
+  }
+
+  const DebugStringTableSubsectionRef &strings() const { return *Strings; }
+  const DebugChecksumsSubsectionRef &checksums() const { return *Checksums; }
+
+  bool hasStrings() const { return Strings != nullptr; }
+  bool hasChecksums() const { return Checksums != nullptr; }
+
+private:
+  void initializeStrings(const DebugSubsectionRecord &SR);
+  void initializeChecksums(const DebugSubsectionRecord &FCR);
+
+  std::shared_ptr<DebugStringTableSubsectionRef> OwnedStrings;
+  std::shared_ptr<DebugChecksumsSubsectionRef> OwnedChecksums;
+
+  const DebugStringTableSubsectionRef *Strings = nullptr;
+  const DebugChecksumsSubsectionRef *Checksums = nullptr;
+};
+
+class StringsAndChecksums {
+public:
+  using StringsPtr = std::shared_ptr<DebugStringTableSubsection>;
+  using ChecksumsPtr = std::shared_ptr<DebugChecksumsSubsection>;
+
+  // If no subsections are known about initially, we find as much as we can.
+  StringsAndChecksums() = default;
+
+  void setStrings(const StringsPtr &SP) { Strings = SP; }
+  void setChecksums(const ChecksumsPtr &CP) { Checksums = CP; }
+
+  const StringsPtr &strings() const { return Strings; }
+  const ChecksumsPtr &checksums() const { return Checksums; }
+
+  bool hasStrings() const { return Strings != nullptr; }
+  bool hasChecksums() const { return Checksums != nullptr; }
+
+private:
+  StringsPtr Strings;
+  ChecksumsPtr Checksums;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_STRINGSANDCHECKSUMS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
new file mode 100644
index 0000000..b5479db
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
@@ -0,0 +1,100 @@
+//===- SymbolDeserializer.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDESERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDESERIALIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+class SymbolVisitorDelegate;
+class SymbolDeserializer : public SymbolVisitorCallbacks {
+  struct MappingInfo {
+    MappingInfo(ArrayRef<uint8_t> RecordData, CodeViewContainer Container)
+        : Stream(RecordData, llvm::support::little), Reader(Stream),
+          Mapping(Reader, Container) {}
+
+    BinaryByteStream Stream;
+    BinaryStreamReader Reader;
+    SymbolRecordMapping Mapping;
+  };
+
+public:
+  template <typename T> static Error deserializeAs(CVSymbol Symbol, T &Record) {
+    // If we're just deserializing one record, then don't worry about alignment
+    // as there's nothing that comes after.
+    SymbolDeserializer S(nullptr, CodeViewContainer::ObjectFile);
+    if (auto EC = S.visitSymbolBegin(Symbol))
+      return EC;
+    if (auto EC = S.visitKnownRecord(Symbol, Record))
+      return EC;
+    if (auto EC = S.visitSymbolEnd(Symbol))
+      return EC;
+    return Error::success();
+  }
+  template <typename T> static Expected<T> deserializeAs(CVSymbol Symbol) {
+    T Record(Symbol.kind());
+    if (auto EC = deserializeAs<T>(Symbol, Record))
+      return std::move(EC);
+    return Record;
+  }
+
+  explicit SymbolDeserializer(SymbolVisitorDelegate *Delegate,
+                              CodeViewContainer Container)
+      : Delegate(Delegate), Container(Container) {}
+
+  Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) override {
+    return visitSymbolBegin(Record);
+  }
+
+  Error visitSymbolBegin(CVSymbol &Record) override {
+    assert(!Mapping && "Already in a symbol mapping!");
+    Mapping = llvm::make_unique<MappingInfo>(Record.content(), Container);
+    return Mapping->Mapping.visitSymbolBegin(Record);
+  }
+  Error visitSymbolEnd(CVSymbol &Record) override {
+    assert(Mapping && "Not in a symbol mapping!");
+    auto EC = Mapping->Mapping.visitSymbolEnd(Record);
+    Mapping.reset();
+    return EC;
+  }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override {               \
+    return visitKnownRecordImpl(CVR, Record);                                  \
+  }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+
+private:
+  template <typename T> Error visitKnownRecordImpl(CVSymbol &CVR, T &Record) {
+
+    Record.RecordOffset =
+        Delegate ? Delegate->getRecordOffset(Mapping->Reader) : 0;
+    if (auto EC = Mapping->Mapping.visitKnownRecord(CVR, Record))
+      return EC;
+    return Error::success();
+  }
+
+  SymbolVisitorDelegate *Delegate;
+  CodeViewContainer Container;
+  std::unique_ptr<MappingInfo> Mapping;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h
new file mode 100644
index 0000000..823636c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h
@@ -0,0 +1,35 @@
+//===-- SymbolDumpDelegate.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
+#include <cstdint>
+
+namespace llvm {
+namespace codeview {
+
+class SymbolDumpDelegate : public SymbolVisitorDelegate {
+public:
+  ~SymbolDumpDelegate() override = default;
+
+  virtual void printRelocatedField(StringRef Label, uint32_t RelocOffset,
+                                   uint32_t Offset,
+                                   StringRef *RelocSym = nullptr) = 0;
+  virtual void printBinaryBlockWithRelocs(StringRef Label,
+                                          ArrayRef<uint8_t> Block) = 0;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDumper.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDumper.h
new file mode 100644
index 0000000..293daa8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolDumper.h
@@ -0,0 +1,57 @@
+//===-- SymbolDumper.h - CodeView symbol info dumper ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPER_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/DebugInfo/CodeView/SymbolDumpDelegate.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+
+namespace llvm {
+class ScopedPrinter;
+
+namespace codeview {
+class TypeCollection;
+
+/// Dumper for CodeView symbol streams found in COFF object files and PDB files.
+class CVSymbolDumper {
+public:
+  CVSymbolDumper(ScopedPrinter &W, TypeCollection &Types,
+                 CodeViewContainer Container,
+                 std::unique_ptr<SymbolDumpDelegate> ObjDelegate,
+                 bool PrintRecordBytes)
+      : W(W), Types(Types), Container(Container),
+        ObjDelegate(std::move(ObjDelegate)),
+        PrintRecordBytes(PrintRecordBytes) {}
+
+  /// Dumps one type record.  Returns false if there was a type parsing error,
+  /// and true otherwise.  This should be called in order, since the dumper
+  /// maintains state about previous records which are necessary for cross
+  /// type references.
+  Error dump(CVRecord<SymbolKind> &Record);
+
+  /// Dumps the type records in Data. Returns false if there was a type stream
+  /// parse error, and true otherwise.
+  Error dump(const CVSymbolArray &Symbols);
+
+private:
+  ScopedPrinter &W;
+  TypeCollection &Types;
+  CodeViewContainer Container;
+  std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
+
+  bool PrintRecordBytes;
+};
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolRecord.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolRecord.h
new file mode 100644
index 0000000..cf267f2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolRecord.h
@@ -0,0 +1,956 @@
+//===- SymbolRecord.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/CodeView/CVRecord.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/Endian.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class SymbolRecord {
+protected:
+  explicit SymbolRecord(SymbolRecordKind Kind) : Kind(Kind) {}
+
+public:
+  SymbolRecordKind getKind() const { return Kind; }
+
+  SymbolRecordKind Kind;
+};
+
+// S_GPROC32, S_LPROC32, S_GPROC32_ID, S_LPROC32_ID, S_LPROC32_DPC or
+// S_LPROC32_DPC_ID
+class ProcSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 32;
+
+public:
+  explicit ProcSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  ProcSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t Parent = 0;
+  uint32_t End = 0;
+  uint32_t Next = 0;
+  uint32_t CodeSize = 0;
+  uint32_t DbgStart = 0;
+  uint32_t DbgEnd = 0;
+  TypeIndex FunctionType;
+  uint32_t CodeOffset = 0;
+  uint16_t Segment = 0;
+  ProcSymFlags Flags = ProcSymFlags::None;
+  StringRef Name;
+
+  uint32_t RecordOffset = 0;
+};
+
+// S_THUNK32
+class Thunk32Sym : public SymbolRecord {
+public:
+  explicit Thunk32Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  Thunk32Sym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  uint32_t Parent;
+  uint32_t End;
+  uint32_t Next;
+  uint32_t Offset;
+  uint16_t Segment;
+  uint16_t Length;
+  ThunkOrdinal Thunk;
+  StringRef Name;
+  ArrayRef<uint8_t> VariantData;
+
+  uint32_t RecordOffset;
+};
+
+// S_TRAMPOLINE
+class TrampolineSym : public SymbolRecord {
+public:
+  explicit TrampolineSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  TrampolineSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  TrampolineType Type;
+  uint16_t Size;
+  uint32_t ThunkOffset;
+  uint32_t TargetOffset;
+  uint16_t ThunkSection;
+  uint16_t TargetSection;
+
+  uint32_t RecordOffset;
+};
+
+// S_SECTION
+class SectionSym : public SymbolRecord {
+public:
+  explicit SectionSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  SectionSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  uint16_t SectionNumber;
+  uint8_t Alignment;
+  uint32_t Rva;
+  uint32_t Length;
+  uint32_t Characteristics;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_COFFGROUP
+class CoffGroupSym : public SymbolRecord {
+public:
+  explicit CoffGroupSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  CoffGroupSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  uint32_t Size;
+  uint32_t Characteristics;
+  uint32_t Offset;
+  uint16_t Segment;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+class ScopeEndSym : public SymbolRecord {
+public:
+  explicit ScopeEndSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  ScopeEndSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  uint32_t RecordOffset;
+};
+
+class CallerSym : public SymbolRecord {
+public:
+  explicit CallerSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  CallerSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+
+  std::vector<TypeIndex> Indices;
+
+  uint32_t RecordOffset;
+};
+
+struct BinaryAnnotationIterator {
+  struct AnnotationData {
+    BinaryAnnotationsOpCode OpCode;
+    StringRef Name;
+    uint32_t U1;
+    uint32_t U2;
+    int32_t S1;
+  };
+
+  BinaryAnnotationIterator() = default;
+  BinaryAnnotationIterator(ArrayRef<uint8_t> Annotations) : Data(Annotations) {}
+  BinaryAnnotationIterator(const BinaryAnnotationIterator &Other)
+      : Data(Other.Data) {}
+
+  bool operator==(BinaryAnnotationIterator Other) const {
+    return Data == Other.Data;
+  }
+
+  bool operator!=(const BinaryAnnotationIterator &Other) const {
+    return !(*this == Other);
+  }
+
+  BinaryAnnotationIterator &operator=(const BinaryAnnotationIterator Other) {
+    Data = Other.Data;
+    return *this;
+  }
+
+  BinaryAnnotationIterator &operator++() {
+    if (!ParseCurrentAnnotation()) {
+      *this = BinaryAnnotationIterator();
+      return *this;
+    }
+    Data = Next;
+    Next = ArrayRef<uint8_t>();
+    Current.reset();
+    return *this;
+  }
+
+  BinaryAnnotationIterator operator++(int) {
+    BinaryAnnotationIterator Orig(*this);
+    ++(*this);
+    return Orig;
+  }
+
+  const AnnotationData &operator*() {
+    ParseCurrentAnnotation();
+    return Current.getValue();
+  }
+
+private:
+  static uint32_t GetCompressedAnnotation(ArrayRef<uint8_t> &Annotations) {
+    if (Annotations.empty())
+      return -1;
+
+    uint8_t FirstByte = Annotations.front();
+    Annotations = Annotations.drop_front();
+
+    if ((FirstByte & 0x80) == 0x00)
+      return FirstByte;
+
+    if (Annotations.empty())
+      return -1;
+
+    uint8_t SecondByte = Annotations.front();
+    Annotations = Annotations.drop_front();
+
+    if ((FirstByte & 0xC0) == 0x80)
+      return ((FirstByte & 0x3F) << 8) | SecondByte;
+
+    if (Annotations.empty())
+      return -1;
+
+    uint8_t ThirdByte = Annotations.front();
+    Annotations = Annotations.drop_front();
+
+    if (Annotations.empty())
+      return -1;
+
+    uint8_t FourthByte = Annotations.front();
+    Annotations = Annotations.drop_front();
+
+    if ((FirstByte & 0xE0) == 0xC0)
+      return ((FirstByte & 0x1F) << 24) | (SecondByte << 16) |
+             (ThirdByte << 8) | FourthByte;
+
+    return -1;
+  };
+
+  static int32_t DecodeSignedOperand(uint32_t Operand) {
+    if (Operand & 1)
+      return -(Operand >> 1);
+    return Operand >> 1;
+  };
+
+  static int32_t DecodeSignedOperand(ArrayRef<uint8_t> &Annotations) {
+    return DecodeSignedOperand(GetCompressedAnnotation(Annotations));
+  };
+
+  bool ParseCurrentAnnotation() {
+    if (Current.hasValue())
+      return true;
+
+    Next = Data;
+    uint32_t Op = GetCompressedAnnotation(Next);
+    AnnotationData Result;
+    Result.OpCode = static_cast<BinaryAnnotationsOpCode>(Op);
+    switch (Result.OpCode) {
+    case BinaryAnnotationsOpCode::Invalid:
+      Result.Name = "Invalid";
+      Next = ArrayRef<uint8_t>();
+      break;
+    case BinaryAnnotationsOpCode::CodeOffset:
+      Result.Name = "CodeOffset";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeCodeOffsetBase:
+      Result.Name = "ChangeCodeOffsetBase";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeCodeOffset:
+      Result.Name = "ChangeCodeOffset";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeCodeLength:
+      Result.Name = "ChangeCodeLength";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeFile:
+      Result.Name = "ChangeFile";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeLineEndDelta:
+      Result.Name = "ChangeLineEndDelta";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeRangeKind:
+      Result.Name = "ChangeRangeKind";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeColumnStart:
+      Result.Name = "ChangeColumnStart";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeColumnEnd:
+      Result.Name = "ChangeColumnEnd";
+      Result.U1 = GetCompressedAnnotation(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeLineOffset:
+      Result.Name = "ChangeLineOffset";
+      Result.S1 = DecodeSignedOperand(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeColumnEndDelta:
+      Result.Name = "ChangeColumnEndDelta";
+      Result.S1 = DecodeSignedOperand(Next);
+      break;
+    case BinaryAnnotationsOpCode::ChangeCodeOffsetAndLineOffset: {
+      Result.Name = "ChangeCodeOffsetAndLineOffset";
+      uint32_t Annotation = GetCompressedAnnotation(Next);
+      Result.S1 = DecodeSignedOperand(Annotation >> 4);
+      Result.U1 = Annotation & 0xf;
+      break;
+    }
+    case BinaryAnnotationsOpCode::ChangeCodeLengthAndCodeOffset: {
+      Result.Name = "ChangeCodeLengthAndCodeOffset";
+      Result.U1 = GetCompressedAnnotation(Next);
+      Result.U2 = GetCompressedAnnotation(Next);
+      break;
+    }
+    }
+    Current = Result;
+    return true;
+  }
+
+  Optional<AnnotationData> Current;
+  ArrayRef<uint8_t> Data;
+  ArrayRef<uint8_t> Next;
+};
+
+// S_INLINESITE
+class InlineSiteSym : public SymbolRecord {
+public:
+  explicit InlineSiteSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  InlineSiteSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::InlineSiteSym),
+        RecordOffset(RecordOffset) {}
+
+  iterator_range<BinaryAnnotationIterator> annotations() const {
+    return make_range(BinaryAnnotationIterator(AnnotationData),
+                      BinaryAnnotationIterator());
+  }
+
+  uint32_t Parent;
+  uint32_t End;
+  TypeIndex Inlinee;
+  std::vector<uint8_t> AnnotationData;
+
+  uint32_t RecordOffset;
+};
+
+// S_PUB32
+class PublicSym32 : public SymbolRecord {
+public:
+  explicit PublicSym32(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit PublicSym32(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::PublicSym32),
+        RecordOffset(RecordOffset) {}
+
+  PublicSymFlags Flags = PublicSymFlags::None;
+  uint32_t Offset = 0;
+  uint16_t Segment = 0;
+  StringRef Name;
+
+  uint32_t RecordOffset = 0;
+};
+
+// S_REGISTER
+class RegisterSym : public SymbolRecord {
+public:
+  explicit RegisterSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  RegisterSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::RegisterSym),
+        RecordOffset(RecordOffset) {}
+
+  TypeIndex Index;
+  RegisterId Register;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_PROCREF, S_LPROCREF
+class ProcRefSym : public SymbolRecord {
+public:
+  explicit ProcRefSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit ProcRefSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::ProcRefSym), RecordOffset(RecordOffset) {
+  }
+
+  uint32_t SumName;
+  uint32_t SymOffset;
+  uint16_t Module;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_LOCAL
+class LocalSym : public SymbolRecord {
+public:
+  explicit LocalSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit LocalSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::LocalSym), RecordOffset(RecordOffset) {}
+
+  TypeIndex Type;
+  LocalSymFlags Flags;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+struct LocalVariableAddrRange {
+  uint32_t OffsetStart;
+  uint16_t ISectStart;
+  uint16_t Range;
+};
+
+struct LocalVariableAddrGap {
+  uint16_t GapStartOffset;
+  uint16_t Range;
+};
+
+enum : uint16_t { MaxDefRange = 0xf000 };
+
+// S_DEFRANGE
+class DefRangeSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 8;
+
+public:
+  explicit DefRangeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit DefRangeSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t Program;
+  LocalVariableAddrRange Range;
+  std::vector<LocalVariableAddrGap> Gaps;
+
+  uint32_t RecordOffset;
+};
+
+// S_DEFRANGE_SUBFIELD
+class DefRangeSubfieldSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 12;
+
+public:
+  explicit DefRangeSubfieldSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  DefRangeSubfieldSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeSubfieldSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t Program;
+  uint16_t OffsetInParent;
+  LocalVariableAddrRange Range;
+  std::vector<LocalVariableAddrGap> Gaps;
+
+  uint32_t RecordOffset;
+};
+
+// S_DEFRANGE_REGISTER
+class DefRangeRegisterSym : public SymbolRecord {
+public:
+  struct Header {
+    ulittle16_t Register;
+    ulittle16_t MayHaveNoName;
+  };
+
+  explicit DefRangeRegisterSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  DefRangeRegisterSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeRegisterSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const { return RecordOffset + sizeof(Header); }
+
+  Header Hdr;
+  LocalVariableAddrRange Range;
+  std::vector<LocalVariableAddrGap> Gaps;
+
+  uint32_t RecordOffset;
+};
+
+// S_DEFRANGE_SUBFIELD_REGISTER
+class DefRangeSubfieldRegisterSym : public SymbolRecord {
+public:
+  struct Header {
+    ulittle16_t Register;
+    ulittle16_t MayHaveNoName;
+    ulittle32_t OffsetInParent;
+  };
+
+  explicit DefRangeSubfieldRegisterSym(SymbolRecordKind Kind)
+      : SymbolRecord(Kind) {}
+  DefRangeSubfieldRegisterSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeSubfieldRegisterSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const { return RecordOffset + sizeof(Header); }
+
+  Header Hdr;
+  LocalVariableAddrRange Range;
+  std::vector<LocalVariableAddrGap> Gaps;
+
+  uint32_t RecordOffset;
+};
+
+// S_DEFRANGE_FRAMEPOINTER_REL
+class DefRangeFramePointerRelSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 8;
+
+public:
+  explicit DefRangeFramePointerRelSym(SymbolRecordKind Kind)
+      : SymbolRecord(Kind) {}
+  DefRangeFramePointerRelSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeFramePointerRelSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  int32_t Offset;
+  LocalVariableAddrRange Range;
+  std::vector<LocalVariableAddrGap> Gaps;
+
+  uint32_t RecordOffset;
+};
+
+// S_DEFRANGE_REGISTER_REL
+class DefRangeRegisterRelSym : public SymbolRecord {
+public:
+  struct Header {
+    ulittle16_t Register;
+    ulittle16_t Flags;
+    little32_t BasePointerOffset;
+  };
+
+  explicit DefRangeRegisterRelSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit DefRangeRegisterRelSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeRegisterRelSym),
+        RecordOffset(RecordOffset) {}
+
+  // The flags implement this notional bitfield:
+  //   uint16_t IsSubfield : 1;
+  //   uint16_t Padding : 3;
+  //   uint16_t OffsetInParent : 12;
+  enum : uint16_t {
+    IsSubfieldFlag = 1,
+    OffsetInParentShift = 4,
+  };
+
+  bool hasSpilledUDTMember() const { return Hdr.Flags & IsSubfieldFlag; }
+  uint16_t offsetInParent() const { return Hdr.Flags >> OffsetInParentShift; }
+
+  uint32_t getRelocationOffset() const { return RecordOffset + sizeof(Header); }
+
+  Header Hdr;
+  LocalVariableAddrRange Range;
+  std::vector<LocalVariableAddrGap> Gaps;
+
+  uint32_t RecordOffset;
+};
+
+// S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE
+class DefRangeFramePointerRelFullScopeSym : public SymbolRecord {
+public:
+  explicit DefRangeFramePointerRelFullScopeSym(SymbolRecordKind Kind)
+      : SymbolRecord(Kind) {}
+  explicit DefRangeFramePointerRelFullScopeSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DefRangeFramePointerRelFullScopeSym),
+        RecordOffset(RecordOffset) {}
+
+  int32_t Offset;
+
+  uint32_t RecordOffset;
+};
+
+// S_BLOCK32
+class BlockSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 16;
+
+public:
+  explicit BlockSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit BlockSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::BlockSym), RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t Parent;
+  uint32_t End;
+  uint32_t CodeSize;
+  uint32_t CodeOffset;
+  uint16_t Segment;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_LABEL32
+class LabelSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 4;
+
+public:
+  explicit LabelSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit LabelSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::LabelSym), RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t CodeOffset;
+  uint16_t Segment;
+  ProcSymFlags Flags;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_OBJNAME
+class ObjNameSym : public SymbolRecord {
+public:
+  explicit ObjNameSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  ObjNameSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::ObjNameSym), RecordOffset(RecordOffset) {
+  }
+
+  uint32_t Signature;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_ENVBLOCK
+class EnvBlockSym : public SymbolRecord {
+public:
+  explicit EnvBlockSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  EnvBlockSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::EnvBlockSym),
+        RecordOffset(RecordOffset) {}
+
+  std::vector<StringRef> Fields;
+
+  uint32_t RecordOffset;
+};
+
+// S_EXPORT
+class ExportSym : public SymbolRecord {
+public:
+  explicit ExportSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  ExportSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::ExportSym), RecordOffset(RecordOffset) {}
+
+  uint16_t Ordinal;
+  ExportFlags Flags;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_FILESTATIC
+class FileStaticSym : public SymbolRecord {
+public:
+  explicit FileStaticSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  FileStaticSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::FileStaticSym),
+        RecordOffset(RecordOffset) {}
+
+  TypeIndex Index;
+  uint32_t ModFilenameOffset;
+  LocalSymFlags Flags;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_COMPILE2
+class Compile2Sym : public SymbolRecord {
+public:
+  explicit Compile2Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  Compile2Sym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::Compile2Sym),
+        RecordOffset(RecordOffset) {}
+
+  CompileSym2Flags Flags;
+  CPUType Machine;
+  uint16_t VersionFrontendMajor;
+  uint16_t VersionFrontendMinor;
+  uint16_t VersionFrontendBuild;
+  uint16_t VersionBackendMajor;
+  uint16_t VersionBackendMinor;
+  uint16_t VersionBackendBuild;
+  StringRef Version;
+  std::vector<StringRef> ExtraStrings;
+
+  uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
+  uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
+
+  uint32_t RecordOffset;
+};
+
+// S_COMPILE3
+class Compile3Sym : public SymbolRecord {
+public:
+  explicit Compile3Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  Compile3Sym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::Compile3Sym),
+        RecordOffset(RecordOffset) {}
+
+  CompileSym3Flags Flags;
+  CPUType Machine;
+  uint16_t VersionFrontendMajor;
+  uint16_t VersionFrontendMinor;
+  uint16_t VersionFrontendBuild;
+  uint16_t VersionFrontendQFE;
+  uint16_t VersionBackendMajor;
+  uint16_t VersionBackendMinor;
+  uint16_t VersionBackendBuild;
+  uint16_t VersionBackendQFE;
+  StringRef Version;
+
+  void setLanguage(SourceLanguage Lang) {
+    Flags = CompileSym3Flags((uint32_t(Flags) & 0xFFFFFF00) | uint32_t(Lang));
+  }
+
+  uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
+  uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
+
+  uint32_t RecordOffset;
+};
+
+// S_FRAMEPROC
+class FrameProcSym : public SymbolRecord {
+public:
+  explicit FrameProcSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit FrameProcSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::FrameProcSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t TotalFrameBytes;
+  uint32_t PaddingFrameBytes;
+  uint32_t OffsetToPadding;
+  uint32_t BytesOfCalleeSavedRegisters;
+  uint32_t OffsetOfExceptionHandler;
+  uint16_t SectionIdOfExceptionHandler;
+  FrameProcedureOptions Flags;
+
+  uint32_t RecordOffset;
+};
+
+// S_CALLSITEINFO
+class CallSiteInfoSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 4;
+
+public:
+  explicit CallSiteInfoSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit CallSiteInfoSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::CallSiteInfoSym) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t CodeOffset;
+  uint16_t Segment;
+  TypeIndex Type;
+
+  uint32_t RecordOffset;
+};
+
+// S_HEAPALLOCSITE
+class HeapAllocationSiteSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 4;
+
+public:
+  explicit HeapAllocationSiteSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit HeapAllocationSiteSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::HeapAllocationSiteSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t CodeOffset;
+  uint16_t Segment;
+  uint16_t CallInstructionSize;
+  TypeIndex Type;
+
+  uint32_t RecordOffset;
+};
+
+// S_FRAMECOOKIE
+class FrameCookieSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 4;
+
+public:
+  explicit FrameCookieSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit FrameCookieSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::FrameCookieSym) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  uint32_t CodeOffset;
+  uint16_t Register;
+  FrameCookieKind CookieKind;
+  uint8_t Flags;
+
+  uint32_t RecordOffset;
+};
+
+// S_UDT, S_COBOLUDT
+class UDTSym : public SymbolRecord {
+public:
+  explicit UDTSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit UDTSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::UDTSym) {}
+
+  TypeIndex Type;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_BUILDINFO
+class BuildInfoSym : public SymbolRecord {
+public:
+  explicit BuildInfoSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  BuildInfoSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::BuildInfoSym),
+        RecordOffset(RecordOffset) {}
+
+  TypeIndex BuildId;
+
+  uint32_t RecordOffset;
+};
+
+// S_BPREL32
+class BPRelativeSym : public SymbolRecord {
+public:
+  explicit BPRelativeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit BPRelativeSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::BPRelativeSym),
+        RecordOffset(RecordOffset) {}
+
+  int32_t Offset;
+  TypeIndex Type;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_REGREL32
+class RegRelativeSym : public SymbolRecord {
+public:
+  explicit RegRelativeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit RegRelativeSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::RegRelativeSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t Offset;
+  TypeIndex Type;
+  RegisterId Register;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_CONSTANT, S_MANCONSTANT
+class ConstantSym : public SymbolRecord {
+public:
+  explicit ConstantSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  ConstantSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::ConstantSym),
+        RecordOffset(RecordOffset) {}
+
+  TypeIndex Type;
+  APSInt Value;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_LDATA32, S_GDATA32, S_LMANDATA, S_GMANDATA
+class DataSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 8;
+
+public:
+  explicit DataSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  DataSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::DataSym), RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  TypeIndex Type;
+  uint32_t DataOffset;
+  uint16_t Segment;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_LTHREAD32, S_GTHREAD32
+class ThreadLocalDataSym : public SymbolRecord {
+  static constexpr uint32_t RelocationOffset = 8;
+
+public:
+  explicit ThreadLocalDataSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+  explicit ThreadLocalDataSym(uint32_t RecordOffset)
+      : SymbolRecord(SymbolRecordKind::ThreadLocalDataSym),
+        RecordOffset(RecordOffset) {}
+
+  uint32_t getRelocationOffset() const {
+    return RecordOffset + RelocationOffset;
+  }
+
+  TypeIndex Type;
+  uint32_t DataOffset;
+  uint16_t Segment;
+  StringRef Name;
+
+  uint32_t RecordOffset;
+};
+
+// S_ANNOTATION
+
+using CVSymbol = CVRecord<SymbolKind>;
+using CVSymbolArray = VarStreamArray<CVSymbol>;
+
+Expected<CVSymbol> readSymbolFromStream(BinaryStreamRef Stream,
+                                        uint32_t Offset);
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h
new file mode 100644
index 0000000..391e8f1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h
@@ -0,0 +1,47 @@
+//===- SymbolRecordMapping.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDMAPPING_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDMAPPING_H
+
+#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+
+namespace llvm {
+class BinaryStreamReader;
+class BinaryStreamWriter;
+
+namespace codeview {
+class SymbolRecordMapping : public SymbolVisitorCallbacks {
+public:
+  explicit SymbolRecordMapping(BinaryStreamReader &Reader,
+                               CodeViewContainer Container)
+      : IO(Reader), Container(Container) {}
+  explicit SymbolRecordMapping(BinaryStreamWriter &Writer,
+                               CodeViewContainer Container)
+      : IO(Writer), Container(Container) {}
+
+  Error visitSymbolBegin(CVSymbol &Record) override;
+  Error visitSymbolEnd(CVSymbol &Record) override;
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override;
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+
+private:
+  Optional<SymbolKind> Kind;
+
+  CodeViewRecordIO IO;
+  CodeViewContainer Container;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolSerializer.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolSerializer.h
new file mode 100644
index 0000000..f4d8ab0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolSerializer.h
@@ -0,0 +1,84 @@
+//===- SymbolSerializer.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class SymbolSerializer : public SymbolVisitorCallbacks {
+  BumpPtrAllocator &Storage;
+  // Since this is a fixed size buffer, use a stack allocated buffer.  This
+  // yields measurable performance increase over the repeated heap allocations
+  // when serializing many independent records via writeOneSymbol.
+  std::array<uint8_t, MaxRecordLength> RecordBuffer;
+  MutableBinaryByteStream Stream;
+  BinaryStreamWriter Writer;
+  SymbolRecordMapping Mapping;
+  Optional<SymbolKind> CurrentSymbol;
+
+  Error writeRecordPrefix(SymbolKind Kind) {
+    RecordPrefix Prefix;
+    Prefix.RecordKind = Kind;
+    Prefix.RecordLen = 0;
+    if (auto EC = Writer.writeObject(Prefix))
+      return EC;
+    return Error::success();
+  }
+
+public:
+  SymbolSerializer(BumpPtrAllocator &Storage, CodeViewContainer Container);
+
+  template <typename SymType>
+  static CVSymbol writeOneSymbol(SymType &Sym, BumpPtrAllocator &Storage,
+                                 CodeViewContainer Container) {
+    CVSymbol Result;
+    Result.Type = static_cast<SymbolKind>(Sym.Kind);
+    SymbolSerializer Serializer(Storage, Container);
+    consumeError(Serializer.visitSymbolBegin(Result));
+    consumeError(Serializer.visitKnownRecord(Result, Sym));
+    consumeError(Serializer.visitSymbolEnd(Result));
+    return Result;
+  }
+
+  Error visitSymbolBegin(CVSymbol &Record) override;
+  Error visitSymbolEnd(CVSymbol &Record) override;
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override {               \
+    return visitKnownRecordImpl(CVR, Record);                                  \
+  }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+
+private:
+  template <typename RecordKind>
+  Error visitKnownRecordImpl(CVSymbol &CVR, RecordKind &Record) {
+    return Mapping.visitKnownRecord(CVR, Record);
+  }
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h
new file mode 100644
index 0000000..e29511a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h
@@ -0,0 +1,79 @@
+//===- SymbolVisitorCallbackPipeline.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class SymbolVisitorCallbackPipeline : public SymbolVisitorCallbacks {
+public:
+  SymbolVisitorCallbackPipeline() = default;
+
+  Error visitUnknownSymbol(CVSymbol &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitUnknownSymbol(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitSymbolBegin(Record, Offset))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitSymbolBegin(CVSymbol &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitSymbolBegin(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitSymbolEnd(CVSymbol &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitSymbolEnd(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  void addCallbackToPipeline(SymbolVisitorCallbacks &Callbacks) {
+    Pipeline.push_back(&Callbacks);
+  }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override {               \
+    for (auto Visitor : Pipeline) {                                            \
+      if (auto EC = Visitor->visitKnownRecord(CVR, Record))                    \
+        return EC;                                                             \
+    }                                                                          \
+    return Error::success();                                                   \
+  }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+
+private:
+  std::vector<SymbolVisitorCallbacks *> Pipeline;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h
new file mode 100644
index 0000000..0816f7c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h
@@ -0,0 +1,50 @@
+//===- SymbolVisitorCallbacks.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class SymbolVisitorCallbacks {
+  friend class CVSymbolVisitor;
+
+public:
+  virtual ~SymbolVisitorCallbacks() = default;
+
+  /// Action to take on unknown symbols. By default, they are ignored.
+  virtual Error visitUnknownSymbol(CVSymbol &Record) {
+    return Error::success();
+  }
+
+  /// Paired begin/end actions for all symbols. Receives all record data,
+  /// including the fixed-length record prefix.  visitSymbolBegin() should
+  /// return the type of the Symbol, or an error if it cannot be determined.
+  virtual Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) {
+    return Error::success();
+  }
+  virtual Error visitSymbolBegin(CVSymbol &Record) { return Error::success(); }
+  virtual Error visitSymbolEnd(CVSymbol &Record) { return Error::success(); }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
+  virtual Error visitKnownRecord(CVSymbol &CVR, Name &Record) {                \
+    return Error::success();                                                   \
+  }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h
new file mode 100644
index 0000000..a2a3c6f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h
@@ -0,0 +1,37 @@
+//===-- SymbolVisitorDelegate.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
+
+#include "llvm/ADT/StringRef.h"
+#include <cstdint>
+
+namespace llvm {
+
+class BinaryStreamReader;
+
+namespace codeview {
+
+class DebugStringTableSubsectionRef;
+
+class SymbolVisitorDelegate {
+public:
+  virtual ~SymbolVisitorDelegate() = default;
+
+  virtual uint32_t getRecordOffset(BinaryStreamReader Reader) = 0;
+  virtual StringRef getFileNameForFileOffset(uint32_t FileOffset) = 0;
+  virtual DebugStringTableSubsectionRef getStringTable() = 0;
+};
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeCollection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeCollection.h
new file mode 100644
index 0000000..e9fc9b0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeCollection.h
@@ -0,0 +1,48 @@
+//===- TypeCollection.h - A collection of CodeView type records -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPECOLLECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPECOLLECTION_H
+
+#include "llvm/ADT/StringRef.h"
+
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+
+namespace llvm {
+namespace codeview {
+class TypeCollection {
+public:
+  virtual ~TypeCollection() = default;
+
+  bool empty() { return size() == 0; }
+
+  virtual Optional<TypeIndex> getFirst() = 0;
+  virtual Optional<TypeIndex> getNext(TypeIndex Prev) = 0;
+
+  virtual CVType getType(TypeIndex Index) = 0;
+  virtual StringRef getTypeName(TypeIndex Index) = 0;
+  virtual bool contains(TypeIndex Index) = 0;
+  virtual uint32_t size() = 0;
+  virtual uint32_t capacity() = 0;
+
+  template <typename TFunc> void ForEachRecord(TFunc Func) {
+    Optional<TypeIndex> Next = getFirst();
+
+    while (Next.hasValue()) {
+      TypeIndex N = *Next;
+      Func(N, getType(N));
+      Next = getNext(N);
+    }
+  }
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeDeserializer.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
new file mode 100644
index 0000000..9887d90
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
@@ -0,0 +1,166 @@
+//===- TypeDeserializer.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace codeview {
+
+class TypeDeserializer : public TypeVisitorCallbacks {
+  struct MappingInfo {
+    explicit MappingInfo(ArrayRef<uint8_t> RecordData)
+        : Stream(RecordData, llvm::support::little), Reader(Stream),
+          Mapping(Reader) {}
+
+    BinaryByteStream Stream;
+    BinaryStreamReader Reader;
+    TypeRecordMapping Mapping;
+  };
+
+public:
+  TypeDeserializer() = default;
+
+  template <typename T> static Error deserializeAs(CVType &CVT, T &Record) {
+    Record.Kind = static_cast<TypeRecordKind>(CVT.kind());
+    MappingInfo I(CVT.content());
+    if (auto EC = I.Mapping.visitTypeBegin(CVT))
+      return EC;
+    if (auto EC = I.Mapping.visitKnownRecord(CVT, Record))
+      return EC;
+    if (auto EC = I.Mapping.visitTypeEnd(CVT))
+      return EC;
+    return Error::success();
+  }
+
+  template <typename T>
+  static Expected<T> deserializeAs(ArrayRef<uint8_t> Data) {
+    const RecordPrefix *Prefix =
+        reinterpret_cast<const RecordPrefix *>(Data.data());
+    TypeRecordKind K =
+        static_cast<TypeRecordKind>(uint16_t(Prefix->RecordKind));
+    T Record(K);
+    CVType CVT(static_cast<TypeLeafKind>(K), Data);
+    if (auto EC = deserializeAs<T>(CVT, Record))
+      return std::move(EC);
+    return Record;
+  }
+
+  Error visitTypeBegin(CVType &Record) override {
+    assert(!Mapping && "Already in a type mapping!");
+    Mapping = llvm::make_unique<MappingInfo>(Record.content());
+    return Mapping->Mapping.visitTypeBegin(Record);
+  }
+
+  Error visitTypeBegin(CVType &Record, TypeIndex Index) override {
+    return visitTypeBegin(Record);
+  }
+
+  Error visitTypeEnd(CVType &Record) override {
+    assert(Mapping && "Not in a type mapping!");
+    auto EC = Mapping->Mapping.visitTypeEnd(Record);
+    Mapping.reset();
+    return EC;
+  }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
+  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override {         \
+    return visitKnownRecordImpl<Name##Record>(CVR, Record);                    \
+  }
+#define MEMBER_RECORD(EnumName, EnumVal, Name)
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+
+private:
+  template <typename RecordType>
+  Error visitKnownRecordImpl(CVType &CVR, RecordType &Record) {
+    return Mapping->Mapping.visitKnownRecord(CVR, Record);
+  }
+
+  std::unique_ptr<MappingInfo> Mapping;
+};
+
+class FieldListDeserializer : public TypeVisitorCallbacks {
+  struct MappingInfo {
+    explicit MappingInfo(BinaryStreamReader &R)
+        : Reader(R), Mapping(Reader), StartOffset(0) {}
+
+    BinaryStreamReader &Reader;
+    TypeRecordMapping Mapping;
+    uint32_t StartOffset;
+  };
+
+public:
+  explicit FieldListDeserializer(BinaryStreamReader &Reader) : Mapping(Reader) {
+    CVType FieldList;
+    FieldList.Type = TypeLeafKind::LF_FIELDLIST;
+    consumeError(Mapping.Mapping.visitTypeBegin(FieldList));
+  }
+
+  ~FieldListDeserializer() override {
+    CVType FieldList;
+    FieldList.Type = TypeLeafKind::LF_FIELDLIST;
+    consumeError(Mapping.Mapping.visitTypeEnd(FieldList));
+  }
+
+  Error visitMemberBegin(CVMemberRecord &Record) override {
+    Mapping.StartOffset = Mapping.Reader.getOffset();
+    return Mapping.Mapping.visitMemberBegin(Record);
+  }
+
+  Error visitMemberEnd(CVMemberRecord &Record) override {
+    if (auto EC = Mapping.Mapping.visitMemberEnd(Record))
+      return EC;
+    return Error::success();
+  }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)
+#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override { \
+    return visitKnownMemberImpl<Name##Record>(CVR, Record);                    \
+  }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+
+private:
+  template <typename RecordType>
+  Error visitKnownMemberImpl(CVMemberRecord &CVR, RecordType &Record) {
+    if (auto EC = Mapping.Mapping.visitKnownMember(CVR, Record))
+      return EC;
+
+    uint32_t EndOffset = Mapping.Reader.getOffset();
+    uint32_t RecordLength = EndOffset - Mapping.StartOffset;
+    Mapping.Reader.setOffset(Mapping.StartOffset);
+    if (auto EC = Mapping.Reader.readBytes(CVR.Data, RecordLength))
+      return EC;
+    assert(Mapping.Reader.getOffset() == EndOffset);
+    return Error::success();
+  }
+  MappingInfo Mapping;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h
new file mode 100644
index 0000000..afb8b36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h
@@ -0,0 +1,86 @@
+//===-- TypeDumpVisitor.h - CodeView type info dumper -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPVISITOR_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPVISITOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+
+namespace llvm {
+class ScopedPrinter;
+
+namespace codeview {
+
+class TypeCollection;
+
+/// Dumper for CodeView type streams found in COFF object files and PDB files.
+class TypeDumpVisitor : public TypeVisitorCallbacks {
+public:
+  TypeDumpVisitor(TypeCollection &TpiTypes, ScopedPrinter *W,
+                  bool PrintRecordBytes)
+      : W(W), PrintRecordBytes(PrintRecordBytes), TpiTypes(TpiTypes) {}
+
+  /// When dumping types from an IPI stream in a PDB, a type index may refer to
+  /// a type or an item ID. The dumper will lookup the "name" of the index in
+  /// the item database if appropriate. If ItemDB is null, it will use TypeDB,
+  /// which is correct when dumping types from an object file (/Z7).
+  void setIpiTypes(TypeCollection &Types) { IpiTypes = &Types; }
+
+  void printTypeIndex(StringRef FieldName, TypeIndex TI) const;
+
+  void printItemIndex(StringRef FieldName, TypeIndex TI) const;
+
+  /// Action to take on unknown types. By default, they are ignored.
+  Error visitUnknownType(CVType &Record) override;
+  Error visitUnknownMember(CVMemberRecord &Record) override;
+
+  /// Paired begin/end actions for all types. Receives all record data,
+  /// including the fixed-length record prefix.
+  Error visitTypeBegin(CVType &Record) override;
+  Error visitTypeBegin(CVType &Record, TypeIndex Index) override;
+  Error visitTypeEnd(CVType &Record) override;
+  Error visitMemberBegin(CVMemberRecord &Record) override;
+  Error visitMemberEnd(CVMemberRecord &Record) override;
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
+  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
+#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+
+private:
+  void printMemberAttributes(MemberAttributes Attrs);
+  void printMemberAttributes(MemberAccess Access, MethodKind Kind,
+                             MethodOptions Options);
+
+  /// Get the database of indices for the stream that we are dumping. If ItemDB
+  /// is set, then we must be dumping an item (IPI) stream. This will also
+  /// always get the appropriate DB for printing item names.
+  TypeCollection &getSourceTypes() const {
+    return IpiTypes ? *IpiTypes : TpiTypes;
+  }
+
+  ScopedPrinter *W;
+
+  bool PrintRecordBytes = false;
+
+  TypeCollection &TpiTypes;
+  TypeCollection *IpiTypes = nullptr;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeHashing.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeHashing.h
new file mode 100644
index 0000000..7413375
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeHashing.h
@@ -0,0 +1,204 @@
+//===- TypeHashing.h ---------------------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEHASHING_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEHASHING_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/Hashing.h"
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+
+#include "llvm/Support/FormatProviders.h"
+
+#include <type_traits>
+
+namespace llvm {
+namespace codeview {
+
+/// A locally hashed type represents a straightforward hash code of a serialized
+/// record.  The record is simply serialized, and then the bytes are hashed by
+/// a standard algorithm.  This is sufficient for the case of de-duplicating
+/// records within a single sequence of types, because if two records both have
+/// a back-reference to the same type in the same stream, they will both have
+/// the same numeric value for the TypeIndex of the back reference.
+struct LocallyHashedType {
+  hash_code Hash;
+  ArrayRef<uint8_t> RecordData;
+
+  /// Given a type, compute its local hash.
+  static LocallyHashedType hashType(ArrayRef<uint8_t> RecordData);
+
+  /// Given a sequence of types, compute all of the local hashes.
+  template <typename Range>
+  static std::vector<LocallyHashedType> hashTypes(Range &&Records) {
+    std::vector<LocallyHashedType> Hashes;
+    Hashes.reserve(std::distance(std::begin(Records), std::end(Records)));
+    for (const auto &R : Records)
+      Hashes.push_back(hashType(R));
+
+    return Hashes;
+  }
+
+  static std::vector<LocallyHashedType>
+  hashTypeCollection(TypeCollection &Types) {
+    std::vector<LocallyHashedType> Hashes;
+    Types.ForEachRecord([&Hashes](TypeIndex TI, const CVType &Type) {
+      Hashes.push_back(hashType(Type.RecordData));
+    });
+    return Hashes;
+  }
+};
+
+enum class GlobalTypeHashAlg : uint16_t { SHA1 = 0 };
+
+/// A globally hashed type represents a hash value that is sufficient to
+/// uniquely identify a record across multiple type streams or type sequences.
+/// This works by, for any given record A which references B, replacing the
+/// TypeIndex that refers to B with a previously-computed global hash for B.  As
+/// this is a recursive algorithm (e.g. the global hash of B also depends on the
+/// global hashes of the types that B refers to), a global hash can uniquely
+/// identify identify that A occurs in another stream that has a completely
+/// different graph structure.  Although the hash itself is slower to compute,
+/// probing is much faster with a globally hashed type, because the hash itself
+/// is considered "as good as" the original type.  Since type records can be
+/// quite large, this makes the equality comparison of the hash much faster than
+/// equality comparison of a full record.
+struct GloballyHashedType {
+  GloballyHashedType() = default;
+  GloballyHashedType(StringRef H)
+      : GloballyHashedType(ArrayRef<uint8_t>(H.bytes_begin(), H.bytes_end())) {}
+  GloballyHashedType(ArrayRef<uint8_t> H) {
+    assert(H.size() == 20);
+    ::memcpy(Hash.data(), H.data(), 20);
+  }
+  std::array<uint8_t, 20> Hash;
+
+  /// Given a sequence of bytes representing a record, compute a global hash for
+  /// this record.  Due to the nature of global hashes incorporating the hashes
+  /// of referenced records, this function requires a list of types and ids
+  /// that RecordData might reference, indexable by TypeIndex.
+  static GloballyHashedType hashType(ArrayRef<uint8_t> RecordData,
+                                     ArrayRef<GloballyHashedType> PreviousTypes,
+                                     ArrayRef<GloballyHashedType> PreviousIds);
+
+  /// Given a sequence of bytes representing a record, compute a global hash for
+  /// this record.  Due to the nature of global hashes incorporating the hashes
+  /// of referenced records, this function requires a list of types and ids
+  /// that RecordData might reference, indexable by TypeIndex.
+  static GloballyHashedType hashType(CVType Type,
+                                     ArrayRef<GloballyHashedType> PreviousTypes,
+                                     ArrayRef<GloballyHashedType> PreviousIds) {
+    return hashType(Type.RecordData, PreviousTypes, PreviousIds);
+  }
+
+  /// Given a sequence of combined type and ID records, compute global hashes
+  /// for each of them, returning the results in a vector of hashed types.
+  template <typename Range>
+  static std::vector<GloballyHashedType> hashTypes(Range &&Records) {
+    std::vector<GloballyHashedType> Hashes;
+    for (const auto &R : Records)
+      Hashes.push_back(hashType(R, Hashes, Hashes));
+
+    return Hashes;
+  }
+
+  /// Given a sequence of combined type and ID records, compute global hashes
+  /// for each of them, returning the results in a vector of hashed types.
+  template <typename Range>
+  static std::vector<GloballyHashedType>
+  hashIds(Range &&Records, ArrayRef<GloballyHashedType> TypeHashes) {
+    std::vector<GloballyHashedType> IdHashes;
+    for (const auto &R : Records)
+      IdHashes.push_back(hashType(R, TypeHashes, IdHashes));
+
+    return IdHashes;
+  }
+
+  static std::vector<GloballyHashedType>
+  hashTypeCollection(TypeCollection &Types) {
+    std::vector<GloballyHashedType> Hashes;
+    Types.ForEachRecord([&Hashes](TypeIndex TI, const CVType &Type) {
+      Hashes.push_back(hashType(Type.RecordData, Hashes, Hashes));
+    });
+    return Hashes;
+  }
+};
+#if defined(_MSC_VER)
+// is_trivially_copyable is not available in older versions of libc++, but it is
+// available in all supported versions of MSVC, so at least this gives us some
+// coverage.
+static_assert(std::is_trivially_copyable<GloballyHashedType>::value,
+              "GloballyHashedType must be trivially copyable so that we can "
+              "reinterpret_cast arrays of hash data to arrays of "
+              "GloballyHashedType");
+#endif
+} // namespace codeview
+
+template <> struct DenseMapInfo<codeview::LocallyHashedType> {
+  static codeview::LocallyHashedType Empty;
+  static codeview::LocallyHashedType Tombstone;
+
+  static codeview::LocallyHashedType getEmptyKey() { return Empty; }
+
+  static codeview::LocallyHashedType getTombstoneKey() { return Tombstone; }
+
+  static unsigned getHashValue(codeview::LocallyHashedType Val) {
+    return Val.Hash;
+  }
+
+  static bool isEqual(codeview::LocallyHashedType LHS,
+                      codeview::LocallyHashedType RHS) {
+    if (LHS.Hash != RHS.Hash)
+      return false;
+    return LHS.RecordData == RHS.RecordData;
+  }
+};
+
+template <> struct DenseMapInfo<codeview::GloballyHashedType> {
+  static codeview::GloballyHashedType Empty;
+  static codeview::GloballyHashedType Tombstone;
+
+  static codeview::GloballyHashedType getEmptyKey() { return Empty; }
+
+  static codeview::GloballyHashedType getTombstoneKey() { return Tombstone; }
+
+  static unsigned getHashValue(codeview::GloballyHashedType Val) {
+    return *reinterpret_cast<const unsigned *>(Val.Hash.data());
+  }
+
+  static bool isEqual(codeview::GloballyHashedType LHS,
+                      codeview::GloballyHashedType RHS) {
+    return LHS.Hash == RHS.Hash;
+  }
+};
+
+template <> struct format_provider<codeview::LocallyHashedType> {
+public:
+  static void format(const codeview::LocallyHashedType &V,
+                     llvm::raw_ostream &Stream, StringRef Style) {
+    write_hex(Stream, V.Hash, HexPrintStyle::Upper, 8);
+  }
+};
+
+template <> struct format_provider<codeview::GloballyHashedType> {
+public:
+  static void format(const codeview::GloballyHashedType &V,
+                     llvm::raw_ostream &Stream, StringRef Style) {
+    for (uint8_t B : V.Hash) {
+      write_hex(Stream, B, HexPrintStyle::Upper, 2);
+    }
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeIndex.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeIndex.h
new file mode 100644
index 0000000..c71281d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -0,0 +1,291 @@
+//===- TypeIndex.h ----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/Endian.h"
+#include <cassert>
+#include <cinttypes>
+#include <functional>
+
+namespace llvm {
+
+class ScopedPrinter;
+
+namespace codeview {
+
+class TypeCollection;
+
+enum class SimpleTypeKind : uint32_t {
+  None = 0x0000,          // uncharacterized type (no type)
+  Void = 0x0003,          // void
+  NotTranslated = 0x0007, // type not translated by cvpack
+  HResult = 0x0008,       // OLE/COM HRESULT
+
+  SignedCharacter = 0x0010,   // 8 bit signed
+  UnsignedCharacter = 0x0020, // 8 bit unsigned
+  NarrowCharacter = 0x0070,   // really a char
+  WideCharacter = 0x0071,     // wide char
+  Character16 = 0x007a,       // char16_t
+  Character32 = 0x007b,       // char32_t
+
+  SByte = 0x0068,       // 8 bit signed int
+  Byte = 0x0069,        // 8 bit unsigned int
+  Int16Short = 0x0011,  // 16 bit signed
+  UInt16Short = 0x0021, // 16 bit unsigned
+  Int16 = 0x0072,       // 16 bit signed int
+  UInt16 = 0x0073,      // 16 bit unsigned int
+  Int32Long = 0x0012,   // 32 bit signed
+  UInt32Long = 0x0022,  // 32 bit unsigned
+  Int32 = 0x0074,       // 32 bit signed int
+  UInt32 = 0x0075,      // 32 bit unsigned int
+  Int64Quad = 0x0013,   // 64 bit signed
+  UInt64Quad = 0x0023,  // 64 bit unsigned
+  Int64 = 0x0076,       // 64 bit signed int
+  UInt64 = 0x0077,      // 64 bit unsigned int
+  Int128Oct = 0x0014,   // 128 bit signed int
+  UInt128Oct = 0x0024,  // 128 bit unsigned int
+  Int128 = 0x0078,      // 128 bit signed int
+  UInt128 = 0x0079,     // 128 bit unsigned int
+
+  Float16 = 0x0046,                 // 16 bit real
+  Float32 = 0x0040,                 // 32 bit real
+  Float32PartialPrecision = 0x0045, // 32 bit PP real
+  Float48 = 0x0044,                 // 48 bit real
+  Float64 = 0x0041,                 // 64 bit real
+  Float80 = 0x0042,                 // 80 bit real
+  Float128 = 0x0043,                // 128 bit real
+
+  Complex16 = 0x0056,                 // 16 bit complex
+  Complex32 = 0x0050,                 // 32 bit complex
+  Complex32PartialPrecision = 0x0055, // 32 bit PP complex
+  Complex48 = 0x0054,                 // 48 bit complex
+  Complex64 = 0x0051,                 // 64 bit complex
+  Complex80 = 0x0052,                 // 80 bit complex
+  Complex128 = 0x0053,                // 128 bit complex
+
+  Boolean8 = 0x0030,   // 8 bit boolean
+  Boolean16 = 0x0031,  // 16 bit boolean
+  Boolean32 = 0x0032,  // 32 bit boolean
+  Boolean64 = 0x0033,  // 64 bit boolean
+  Boolean128 = 0x0034, // 128 bit boolean
+};
+
+enum class SimpleTypeMode : uint32_t {
+  Direct = 0x00000000,        // Not a pointer
+  NearPointer = 0x00000100,   // Near pointer
+  FarPointer = 0x00000200,    // Far pointer
+  HugePointer = 0x00000300,   // Huge pointer
+  NearPointer32 = 0x00000400, // 32 bit near pointer
+  FarPointer32 = 0x00000500,  // 32 bit far pointer
+  NearPointer64 = 0x00000600, // 64 bit near pointer
+  NearPointer128 = 0x00000700 // 128 bit near pointer
+};
+
+/// A 32-bit type reference. Types are indexed by their order of appearance in
+/// .debug$T plus 0x1000. Type indices less than 0x1000 are "simple" types,
+/// composed of a SimpleTypeMode byte followed by a SimpleTypeKind byte.
+class TypeIndex {
+public:
+  static const uint32_t FirstNonSimpleIndex = 0x1000;
+  static const uint32_t SimpleKindMask = 0x000000ff;
+  static const uint32_t SimpleModeMask = 0x00000700;
+  static const uint32_t DecoratedItemIdMask = 0x80000000;
+
+public:
+  TypeIndex() : Index(static_cast<uint32_t>(SimpleTypeKind::None)) {}
+  explicit TypeIndex(uint32_t Index) : Index(Index) {}
+  explicit TypeIndex(SimpleTypeKind Kind)
+      : Index(static_cast<uint32_t>(Kind)) {}
+  TypeIndex(SimpleTypeKind Kind, SimpleTypeMode Mode)
+      : Index(static_cast<uint32_t>(Kind) | static_cast<uint32_t>(Mode)) {}
+
+  uint32_t getIndex() const { return Index; }
+  void setIndex(uint32_t I) { Index = I; }
+  bool isSimple() const { return Index < FirstNonSimpleIndex; }
+  bool isDecoratedItemId() const { return !!(Index & DecoratedItemIdMask); }
+
+  bool isNoneType() const { return *this == None(); }
+
+  uint32_t toArrayIndex() const {
+    assert(!isSimple());
+    return getIndex() - FirstNonSimpleIndex;
+  }
+
+  static TypeIndex fromArrayIndex(uint32_t Index) {
+    return TypeIndex(Index + FirstNonSimpleIndex);
+  }
+
+  SimpleTypeKind getSimpleKind() const {
+    assert(isSimple());
+    return static_cast<SimpleTypeKind>(Index & SimpleKindMask);
+  }
+
+  SimpleTypeMode getSimpleMode() const {
+    assert(isSimple());
+    return static_cast<SimpleTypeMode>(Index & SimpleModeMask);
+  }
+
+  static TypeIndex None() { return TypeIndex(SimpleTypeKind::None); }
+  static TypeIndex Void() { return TypeIndex(SimpleTypeKind::Void); }
+  static TypeIndex VoidPointer32() {
+    return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer32);
+  }
+  static TypeIndex VoidPointer64() {
+    return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer64);
+  }
+
+  static TypeIndex SignedCharacter() {
+    return TypeIndex(SimpleTypeKind::SignedCharacter);
+  }
+  static TypeIndex UnsignedCharacter() {
+    return TypeIndex(SimpleTypeKind::UnsignedCharacter);
+  }
+  static TypeIndex NarrowCharacter() {
+    return TypeIndex(SimpleTypeKind::NarrowCharacter);
+  }
+  static TypeIndex WideCharacter() {
+    return TypeIndex(SimpleTypeKind::WideCharacter);
+  }
+  static TypeIndex Int16Short() {
+    return TypeIndex(SimpleTypeKind::Int16Short);
+  }
+  static TypeIndex UInt16Short() {
+    return TypeIndex(SimpleTypeKind::UInt16Short);
+  }
+  static TypeIndex Int32() { return TypeIndex(SimpleTypeKind::Int32); }
+  static TypeIndex UInt32() { return TypeIndex(SimpleTypeKind::UInt32); }
+  static TypeIndex Int32Long() { return TypeIndex(SimpleTypeKind::Int32Long); }
+  static TypeIndex UInt32Long() {
+    return TypeIndex(SimpleTypeKind::UInt32Long);
+  }
+  static TypeIndex Int64() { return TypeIndex(SimpleTypeKind::Int64); }
+  static TypeIndex UInt64() { return TypeIndex(SimpleTypeKind::UInt64); }
+  static TypeIndex Int64Quad() { return TypeIndex(SimpleTypeKind::Int64Quad); }
+  static TypeIndex UInt64Quad() {
+    return TypeIndex(SimpleTypeKind::UInt64Quad);
+  }
+
+  static TypeIndex Float32() { return TypeIndex(SimpleTypeKind::Float32); }
+  static TypeIndex Float64() { return TypeIndex(SimpleTypeKind::Float64); }
+
+  TypeIndex &operator+=(unsigned N) {
+    Index += N;
+    return *this;
+  }
+
+  TypeIndex &operator++() {
+    Index += 1;
+    return *this;
+  }
+
+  TypeIndex operator++(int) {
+    TypeIndex Copy = *this;
+    operator++();
+    return Copy;
+  }
+
+  TypeIndex &operator-=(unsigned N) {
+    assert(Index >= N);
+    Index -= N;
+    return *this;
+  }
+
+  TypeIndex &operator--() {
+    Index -= 1;
+    return *this;
+  }
+
+  TypeIndex operator--(int) {
+    TypeIndex Copy = *this;
+    operator--();
+    return Copy;
+  }
+
+  friend inline bool operator==(const TypeIndex &A, const TypeIndex &B) {
+    return A.getIndex() == B.getIndex();
+  }
+
+  friend inline bool operator!=(const TypeIndex &A, const TypeIndex &B) {
+    return A.getIndex() != B.getIndex();
+  }
+
+  friend inline bool operator<(const TypeIndex &A, const TypeIndex &B) {
+    return A.getIndex() < B.getIndex();
+  }
+
+  friend inline bool operator<=(const TypeIndex &A, const TypeIndex &B) {
+    return A.getIndex() <= B.getIndex();
+  }
+
+  friend inline bool operator>(const TypeIndex &A, const TypeIndex &B) {
+    return A.getIndex() > B.getIndex();
+  }
+
+  friend inline bool operator>=(const TypeIndex &A, const TypeIndex &B) {
+    return A.getIndex() >= B.getIndex();
+  }
+
+  friend inline TypeIndex operator+(const TypeIndex &A, uint32_t N) {
+    TypeIndex Result(A);
+    Result += N;
+    return Result;
+  }
+
+  friend inline TypeIndex operator-(const TypeIndex &A, uint32_t N) {
+    assert(A.getIndex() >= N);
+    TypeIndex Result(A);
+    Result -= N;
+    return Result;
+  }
+
+  friend inline uint32_t operator-(const TypeIndex &A, const TypeIndex &B) {
+    assert(A >= B);
+    return A.toArrayIndex() - B.toArrayIndex();
+  }
+
+  static StringRef simpleTypeName(TypeIndex TI);
+
+private:
+  support::ulittle32_t Index;
+};
+
+// Used for pseudo-indexing an array of type records.  An array of such records
+// sorted by TypeIndex can allow log(N) lookups even though such a type record
+// stream does not provide random access.
+struct TypeIndexOffset {
+  TypeIndex Type;
+  support::ulittle32_t Offset;
+};
+
+void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName, TypeIndex TI,
+                    TypeCollection &Types);
+}
+
+template <> struct DenseMapInfo<codeview::TypeIndex> {
+  static inline codeview::TypeIndex getEmptyKey() {
+    return codeview::TypeIndex{DenseMapInfo<uint32_t>::getEmptyKey()};
+  }
+  static inline codeview::TypeIndex getTombstoneKey() {
+    return codeview::TypeIndex{DenseMapInfo<uint32_t>::getTombstoneKey()};
+  }
+  static unsigned getHashValue(const codeview::TypeIndex &TI) {
+    return DenseMapInfo<uint32_t>::getHashValue(TI.getIndex());
+  }
+  static bool isEqual(const codeview::TypeIndex &LHS,
+                      const codeview::TypeIndex &RHS) {
+    return LHS == RHS;
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h
new file mode 100644
index 0000000..c424a09
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h
@@ -0,0 +1,47 @@
+//===- TypeIndexDiscovery.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+enum class TiRefKind { TypeRef, IndexRef };
+struct TiReference {
+  TiRefKind Kind;
+  uint32_t Offset;
+  uint32_t Count;
+};
+
+void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
+                         SmallVectorImpl<TiReference> &Refs);
+void discoverTypeIndices(const CVType &Type,
+                         SmallVectorImpl<TiReference> &Refs);
+void discoverTypeIndices(const CVType &Type,
+                         SmallVectorImpl<TypeIndex> &Indices);
+void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
+                         SmallVectorImpl<TypeIndex> &Indices);
+
+/// Discover type indices in symbol records. Returns false if this is an unknown
+/// record.
+bool discoverTypeIndicesInSymbol(const CVSymbol &Symbol,
+                                 SmallVectorImpl<TiReference> &Refs);
+bool discoverTypeIndicesInSymbol(ArrayRef<uint8_t> RecordData,
+                                 SmallVectorImpl<TiReference> &Refs);
+bool discoverTypeIndicesInSymbol(ArrayRef<uint8_t> RecordData,
+                                 SmallVectorImpl<TypeIndex> &Indices);
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeRecord.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeRecord.h
new file mode 100644
index 0000000..55f2822
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -0,0 +1,902 @@
+//===- TypeRecord.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/CodeView/CVRecord.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/GUID.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/Endian.h"
+#include <algorithm>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+using support::little32_t;
+using support::ulittle16_t;
+using support::ulittle32_t;
+
+using CVType = CVRecord<TypeLeafKind>;
+using RemappedType = RemappedRecord<TypeLeafKind>;
+
+struct CVMemberRecord {
+  TypeLeafKind Kind;
+  ArrayRef<uint8_t> Data;
+};
+using CVTypeArray = VarStreamArray<CVType>;
+using CVTypeRange = iterator_range<CVTypeArray::Iterator>;
+
+/// Equvalent to CV_fldattr_t in cvinfo.h.
+struct MemberAttributes {
+  uint16_t Attrs = 0;
+
+  enum {
+    MethodKindShift = 2,
+  };
+
+  MemberAttributes() = default;
+
+  explicit MemberAttributes(MemberAccess Access)
+      : Attrs(static_cast<uint16_t>(Access)) {}
+
+  MemberAttributes(MemberAccess Access, MethodKind Kind, MethodOptions Flags) {
+    Attrs = static_cast<uint16_t>(Access);
+    Attrs |= (static_cast<uint16_t>(Kind) << MethodKindShift);
+    Attrs |= static_cast<uint16_t>(Flags);
+  }
+
+  /// Get the access specifier. Valid for any kind of member.
+  MemberAccess getAccess() const {
+    return MemberAccess(unsigned(Attrs) & unsigned(MethodOptions::AccessMask));
+  }
+
+  /// Indicates if a method is defined with friend, virtual, static, etc.
+  MethodKind getMethodKind() const {
+    return MethodKind(
+        (unsigned(Attrs) & unsigned(MethodOptions::MethodKindMask)) >>
+        MethodKindShift);
+  }
+
+  /// Get the flags that are not included in access control or method
+  /// properties.
+  MethodOptions getFlags() const {
+    return MethodOptions(
+        unsigned(Attrs) &
+        ~unsigned(MethodOptions::AccessMask | MethodOptions::MethodKindMask));
+  }
+
+  /// Is this method virtual.
+  bool isVirtual() const {
+    auto MP = getMethodKind();
+    return MP != MethodKind::Vanilla && MP != MethodKind::Friend &&
+           MP != MethodKind::Static;
+  }
+
+  /// Does this member introduce a new virtual method.
+  bool isIntroducedVirtual() const {
+    auto MP = getMethodKind();
+    return MP == MethodKind::IntroducingVirtual ||
+           MP == MethodKind::PureIntroducingVirtual;
+  }
+};
+
+// Does not correspond to any tag, this is the tail of an LF_POINTER record
+// if it represents a member pointer.
+class MemberPointerInfo {
+public:
+  MemberPointerInfo() = default;
+
+  MemberPointerInfo(TypeIndex ContainingType,
+                    PointerToMemberRepresentation Representation)
+      : ContainingType(ContainingType), Representation(Representation) {}
+
+  TypeIndex getContainingType() const { return ContainingType; }
+  PointerToMemberRepresentation getRepresentation() const {
+    return Representation;
+  }
+
+  TypeIndex ContainingType;
+  PointerToMemberRepresentation Representation;
+};
+
+class TypeRecord {
+protected:
+  TypeRecord() = default;
+  explicit TypeRecord(TypeRecordKind Kind) : Kind(Kind) {}
+
+public:
+  TypeRecordKind getKind() const { return Kind; }
+
+  TypeRecordKind Kind;
+};
+
+// LF_MODIFIER
+class ModifierRecord : public TypeRecord {
+public:
+  ModifierRecord() = default;
+  explicit ModifierRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  ModifierRecord(TypeIndex ModifiedType, ModifierOptions Modifiers)
+      : TypeRecord(TypeRecordKind::Modifier), ModifiedType(ModifiedType),
+        Modifiers(Modifiers) {}
+
+  TypeIndex getModifiedType() const { return ModifiedType; }
+  ModifierOptions getModifiers() const { return Modifiers; }
+
+  TypeIndex ModifiedType;
+  ModifierOptions Modifiers;
+};
+
+// LF_PROCEDURE
+class ProcedureRecord : public TypeRecord {
+public:
+  ProcedureRecord() = default;
+  explicit ProcedureRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  ProcedureRecord(TypeIndex ReturnType, CallingConvention CallConv,
+                  FunctionOptions Options, uint16_t ParameterCount,
+                  TypeIndex ArgumentList)
+      : TypeRecord(TypeRecordKind::Procedure), ReturnType(ReturnType),
+        CallConv(CallConv), Options(Options), ParameterCount(ParameterCount),
+        ArgumentList(ArgumentList) {}
+
+  TypeIndex getReturnType() const { return ReturnType; }
+  CallingConvention getCallConv() const { return CallConv; }
+  FunctionOptions getOptions() const { return Options; }
+  uint16_t getParameterCount() const { return ParameterCount; }
+  TypeIndex getArgumentList() const { return ArgumentList; }
+
+  TypeIndex ReturnType;
+  CallingConvention CallConv;
+  FunctionOptions Options;
+  uint16_t ParameterCount;
+  TypeIndex ArgumentList;
+};
+
+// LF_MFUNCTION
+class MemberFunctionRecord : public TypeRecord {
+public:
+  MemberFunctionRecord() = default;
+  explicit MemberFunctionRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
+  MemberFunctionRecord(TypeIndex ReturnType, TypeIndex ClassType,
+                       TypeIndex ThisType, CallingConvention CallConv,
+                       FunctionOptions Options, uint16_t ParameterCount,
+                       TypeIndex ArgumentList, int32_t ThisPointerAdjustment)
+      : TypeRecord(TypeRecordKind::MemberFunction), ReturnType(ReturnType),
+        ClassType(ClassType), ThisType(ThisType), CallConv(CallConv),
+        Options(Options), ParameterCount(ParameterCount),
+        ArgumentList(ArgumentList),
+        ThisPointerAdjustment(ThisPointerAdjustment) {}
+
+  TypeIndex getReturnType() const { return ReturnType; }
+  TypeIndex getClassType() const { return ClassType; }
+  TypeIndex getThisType() const { return ThisType; }
+  CallingConvention getCallConv() const { return CallConv; }
+  FunctionOptions getOptions() const { return Options; }
+  uint16_t getParameterCount() const { return ParameterCount; }
+  TypeIndex getArgumentList() const { return ArgumentList; }
+  int32_t getThisPointerAdjustment() const { return ThisPointerAdjustment; }
+
+  TypeIndex ReturnType;
+  TypeIndex ClassType;
+  TypeIndex ThisType;
+  CallingConvention CallConv;
+  FunctionOptions Options;
+  uint16_t ParameterCount;
+  TypeIndex ArgumentList;
+  int32_t ThisPointerAdjustment;
+};
+
+// LF_LABEL
+class LabelRecord : public TypeRecord {
+public:
+  LabelRecord() = default;
+  explicit LabelRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
+  LabelRecord(LabelType Mode) : TypeRecord(TypeRecordKind::Label), Mode(Mode) {}
+
+  LabelType Mode;
+};
+
+// LF_MFUNC_ID
+class MemberFuncIdRecord : public TypeRecord {
+public:
+  MemberFuncIdRecord() = default;
+  explicit MemberFuncIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  MemberFuncIdRecord(TypeIndex ClassType, TypeIndex FunctionType,
+                         StringRef Name)
+      : TypeRecord(TypeRecordKind::MemberFuncId), ClassType(ClassType),
+        FunctionType(FunctionType), Name(Name) {}
+
+  TypeIndex getClassType() const { return ClassType; }
+  TypeIndex getFunctionType() const { return FunctionType; }
+  StringRef getName() const { return Name; }
+
+  TypeIndex ClassType;
+  TypeIndex FunctionType;
+  StringRef Name;
+};
+
+// LF_ARGLIST
+class ArgListRecord : public TypeRecord {
+public:
+  ArgListRecord() = default;
+  explicit ArgListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
+  ArgListRecord(TypeRecordKind Kind, ArrayRef<TypeIndex> Indices)
+      : TypeRecord(Kind), ArgIndices(Indices) {}
+
+  ArrayRef<TypeIndex> getIndices() const { return ArgIndices; }
+
+  std::vector<TypeIndex> ArgIndices;
+};
+
+// LF_SUBSTR_LIST
+class StringListRecord : public TypeRecord {
+public:
+  StringListRecord() = default;
+  explicit StringListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
+  StringListRecord(TypeRecordKind Kind, ArrayRef<TypeIndex> Indices)
+      : TypeRecord(Kind), StringIndices(Indices) {}
+
+  ArrayRef<TypeIndex> getIndices() const { return StringIndices; }
+
+  std::vector<TypeIndex> StringIndices;
+};
+
+// LF_POINTER
+class PointerRecord : public TypeRecord {
+public:
+  static const uint32_t PointerKindShift = 0;
+  static const uint32_t PointerKindMask = 0x1F;
+
+  static const uint32_t PointerModeShift = 5;
+  static const uint32_t PointerModeMask = 0x07;
+
+  static const uint32_t PointerOptionMask = 0xFF;
+
+  static const uint32_t PointerSizeShift = 13;
+  static const uint32_t PointerSizeMask = 0xFF;
+
+  PointerRecord() = default;
+  explicit PointerRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
+  PointerRecord(TypeIndex ReferentType, uint32_t Attrs)
+      : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+        Attrs(Attrs) {}
+
+  PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
+                PointerOptions PO, uint8_t Size)
+      : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+        Attrs(calcAttrs(PK, PM, PO, Size)) {}
+
+  PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
+                PointerOptions PO, uint8_t Size, const MemberPointerInfo &MPI)
+      : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+        Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(MPI) {}
+
+  TypeIndex getReferentType() const { return ReferentType; }
+
+  PointerKind getPointerKind() const {
+    return static_cast<PointerKind>((Attrs >> PointerKindShift) &
+                                    PointerKindMask);
+  }
+
+  PointerMode getMode() const {
+    return static_cast<PointerMode>((Attrs >> PointerModeShift) &
+                                    PointerModeMask);
+  }
+
+  PointerOptions getOptions() const {
+    return static_cast<PointerOptions>(Attrs);
+  }
+
+  uint8_t getSize() const {
+    return (Attrs >> PointerSizeShift) & PointerSizeMask;
+  }
+
+  MemberPointerInfo getMemberInfo() const { return *MemberInfo; }
+
+  bool isPointerToMember() const {
+    return getMode() == PointerMode::PointerToDataMember ||
+           getMode() == PointerMode::PointerToMemberFunction;
+  }
+
+  bool isFlat() const { return !!(Attrs & uint32_t(PointerOptions::Flat32)); }
+  bool isConst() const { return !!(Attrs & uint32_t(PointerOptions::Const)); }
+
+  bool isVolatile() const {
+    return !!(Attrs & uint32_t(PointerOptions::Volatile));
+  }
+
+  bool isUnaligned() const {
+    return !!(Attrs & uint32_t(PointerOptions::Unaligned));
+  }
+
+  bool isRestrict() const {
+    return !!(Attrs & uint32_t(PointerOptions::Restrict));
+  }
+
+  TypeIndex ReferentType;
+  uint32_t Attrs;
+  Optional<MemberPointerInfo> MemberInfo;
+
+  void setAttrs(PointerKind PK, PointerMode PM, PointerOptions PO,
+                uint8_t Size) {
+    Attrs = calcAttrs(PK, PM, PO, Size);
+  }
+
+private:
+  static uint32_t calcAttrs(PointerKind PK, PointerMode PM, PointerOptions PO,
+                            uint8_t Size) {
+    uint32_t A = 0;
+    A |= static_cast<uint32_t>(PK);
+    A |= static_cast<uint32_t>(PO);
+    A |= (static_cast<uint32_t>(PM) << PointerModeShift);
+    A |= (static_cast<uint32_t>(Size) << PointerSizeShift);
+    return A;
+  }
+};
+
+// LF_NESTTYPE
+class NestedTypeRecord : public TypeRecord {
+public:
+  NestedTypeRecord() = default;
+  explicit NestedTypeRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  NestedTypeRecord(TypeIndex Type, StringRef Name)
+      : TypeRecord(TypeRecordKind::NestedType), Type(Type), Name(Name) {}
+
+  TypeIndex getNestedType() const { return Type; }
+  StringRef getName() const { return Name; }
+
+  TypeIndex Type;
+  StringRef Name;
+};
+
+// LF_FIELDLIST
+class FieldListRecord : public TypeRecord {
+public:
+  FieldListRecord() = default;
+  explicit FieldListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  explicit FieldListRecord(ArrayRef<uint8_t> Data)
+      : TypeRecord(TypeRecordKind::FieldList), Data(Data) {}
+
+  ArrayRef<uint8_t> Data;
+};
+
+// LF_ARRAY
+class ArrayRecord : public TypeRecord {
+public:
+  ArrayRecord() = default;
+  explicit ArrayRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  ArrayRecord(TypeIndex ElementType, TypeIndex IndexType, uint64_t Size,
+              StringRef Name)
+      : TypeRecord(TypeRecordKind::Array), ElementType(ElementType),
+        IndexType(IndexType), Size(Size), Name(Name) {}
+
+  TypeIndex getElementType() const { return ElementType; }
+  TypeIndex getIndexType() const { return IndexType; }
+  uint64_t getSize() const { return Size; }
+  StringRef getName() const { return Name; }
+
+  TypeIndex ElementType;
+  TypeIndex IndexType;
+  uint64_t Size;
+  StringRef Name;
+};
+
+class TagRecord : public TypeRecord {
+protected:
+  TagRecord() = default;
+  explicit TagRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  TagRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
+            TypeIndex FieldList, StringRef Name, StringRef UniqueName)
+      : TypeRecord(Kind), MemberCount(MemberCount), Options(Options),
+        FieldList(FieldList), Name(Name), UniqueName(UniqueName) {}
+
+public:
+  static const int HfaKindShift = 11;
+  static const int HfaKindMask = 0x1800;
+  static const int WinRTKindShift = 14;
+  static const int WinRTKindMask = 0xC000;
+
+  bool hasUniqueName() const {
+    return (Options & ClassOptions::HasUniqueName) != ClassOptions::None;
+  }
+
+  bool isNested() const {
+    return (Options & ClassOptions::Nested) != ClassOptions::None;
+  }
+
+  bool isForwardRef() const {
+    return (Options & ClassOptions::ForwardReference) != ClassOptions::None;
+  }
+
+  uint16_t getMemberCount() const { return MemberCount; }
+  ClassOptions getOptions() const { return Options; }
+  TypeIndex getFieldList() const { return FieldList; }
+  StringRef getName() const { return Name; }
+  StringRef getUniqueName() const { return UniqueName; }
+
+  uint16_t MemberCount;
+  ClassOptions Options;
+  TypeIndex FieldList;
+  StringRef Name;
+  StringRef UniqueName;
+};
+
+// LF_CLASS, LF_STRUCTURE, LF_INTERFACE
+class ClassRecord : public TagRecord {
+public:
+  ClassRecord() = default;
+  explicit ClassRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
+  ClassRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
+              TypeIndex FieldList, TypeIndex DerivationList,
+              TypeIndex VTableShape, uint64_t Size, StringRef Name,
+              StringRef UniqueName)
+      : TagRecord(Kind, MemberCount, Options, FieldList, Name, UniqueName),
+        DerivationList(DerivationList), VTableShape(VTableShape), Size(Size) {}
+
+  HfaKind getHfa() const {
+    uint16_t Value = static_cast<uint16_t>(Options);
+    Value = (Value & HfaKindMask) >> HfaKindShift;
+    return static_cast<HfaKind>(Value);
+  }
+
+  WindowsRTClassKind getWinRTKind() const {
+    uint16_t Value = static_cast<uint16_t>(Options);
+    Value = (Value & WinRTKindMask) >> WinRTKindShift;
+    return static_cast<WindowsRTClassKind>(Value);
+  }
+
+  TypeIndex getDerivationList() const { return DerivationList; }
+  TypeIndex getVTableShape() const { return VTableShape; }
+  uint64_t getSize() const { return Size; }
+
+  TypeIndex DerivationList;
+  TypeIndex VTableShape;
+  uint64_t Size;
+};
+
+// LF_UNION
+struct UnionRecord : public TagRecord {
+  UnionRecord() = default;
+  explicit UnionRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
+  UnionRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
+              uint64_t Size, StringRef Name, StringRef UniqueName)
+      : TagRecord(TypeRecordKind::Union, MemberCount, Options, FieldList, Name,
+                  UniqueName),
+        Size(Size) {}
+
+  HfaKind getHfa() const {
+    uint16_t Value = static_cast<uint16_t>(Options);
+    Value = (Value & HfaKindMask) >> HfaKindShift;
+    return static_cast<HfaKind>(Value);
+  }
+
+  uint64_t getSize() const { return Size; }
+
+  uint64_t Size;
+};
+
+// LF_ENUM
+class EnumRecord : public TagRecord {
+public:
+  EnumRecord() = default;
+  explicit EnumRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
+  EnumRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
+             StringRef Name, StringRef UniqueName, TypeIndex UnderlyingType)
+      : TagRecord(TypeRecordKind::Enum, MemberCount, Options, FieldList, Name,
+                  UniqueName),
+        UnderlyingType(UnderlyingType) {}
+
+  TypeIndex getUnderlyingType() const { return UnderlyingType; }
+
+  TypeIndex UnderlyingType;
+};
+
+// LF_BITFIELD
+class BitFieldRecord : public TypeRecord {
+public:
+  BitFieldRecord() = default;
+  explicit BitFieldRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  BitFieldRecord(TypeIndex Type, uint8_t BitSize, uint8_t BitOffset)
+      : TypeRecord(TypeRecordKind::BitField), Type(Type), BitSize(BitSize),
+        BitOffset(BitOffset) {}
+
+  TypeIndex getType() const { return Type; }
+  uint8_t getBitOffset() const { return BitOffset; }
+  uint8_t getBitSize() const { return BitSize; }
+
+  TypeIndex Type;
+  uint8_t BitSize;
+  uint8_t BitOffset;
+};
+
+// LF_VTSHAPE
+class VFTableShapeRecord : public TypeRecord {
+public:
+  VFTableShapeRecord() = default;
+  explicit VFTableShapeRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  explicit VFTableShapeRecord(ArrayRef<VFTableSlotKind> Slots)
+      : TypeRecord(TypeRecordKind::VFTableShape), SlotsRef(Slots) {}
+  explicit VFTableShapeRecord(std::vector<VFTableSlotKind> Slots)
+      : TypeRecord(TypeRecordKind::VFTableShape), Slots(std::move(Slots)) {}
+
+  ArrayRef<VFTableSlotKind> getSlots() const {
+    if (!SlotsRef.empty())
+      return SlotsRef;
+    return Slots;
+  }
+
+  uint32_t getEntryCount() const { return getSlots().size(); }
+
+  ArrayRef<VFTableSlotKind> SlotsRef;
+  std::vector<VFTableSlotKind> Slots;
+};
+
+// LF_TYPESERVER2
+class TypeServer2Record : public TypeRecord {
+public:
+  TypeServer2Record() = default;
+  explicit TypeServer2Record(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  TypeServer2Record(StringRef GuidStr, uint32_t Age, StringRef Name)
+      : TypeRecord(TypeRecordKind::TypeServer2), Age(Age), Name(Name) {
+    assert(GuidStr.size() == 16 && "guid isn't 16 bytes");
+    ::memcpy(Guid.Guid, GuidStr.data(), 16);
+  }
+
+  const GUID &getGuid() const { return Guid; }
+  uint32_t getAge() const { return Age; }
+  StringRef getName() const { return Name; }
+
+  GUID Guid;
+  uint32_t Age;
+  StringRef Name;
+};
+
+// LF_STRING_ID
+class StringIdRecord : public TypeRecord {
+public:
+  StringIdRecord() = default;
+  explicit StringIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  StringIdRecord(TypeIndex Id, StringRef String)
+      : TypeRecord(TypeRecordKind::StringId), Id(Id), String(String) {}
+
+  TypeIndex getId() const { return Id; }
+  StringRef getString() const { return String; }
+
+  TypeIndex Id;
+  StringRef String;
+};
+
+// LF_FUNC_ID
+class FuncIdRecord : public TypeRecord {
+public:
+  FuncIdRecord() = default;
+  explicit FuncIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  FuncIdRecord(TypeIndex ParentScope, TypeIndex FunctionType, StringRef Name)
+      : TypeRecord(TypeRecordKind::FuncId), ParentScope(ParentScope),
+        FunctionType(FunctionType), Name(Name) {}
+
+  TypeIndex getParentScope() const { return ParentScope; }
+  TypeIndex getFunctionType() const { return FunctionType; }
+  StringRef getName() const { return Name; }
+
+  TypeIndex ParentScope;
+  TypeIndex FunctionType;
+  StringRef Name;
+};
+
+// LF_UDT_SRC_LINE
+class UdtSourceLineRecord : public TypeRecord {
+public:
+  UdtSourceLineRecord() = default;
+  explicit UdtSourceLineRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  UdtSourceLineRecord(TypeIndex UDT, TypeIndex SourceFile, uint32_t LineNumber)
+      : TypeRecord(TypeRecordKind::UdtSourceLine), UDT(UDT),
+        SourceFile(SourceFile), LineNumber(LineNumber) {}
+
+  TypeIndex getUDT() const { return UDT; }
+  TypeIndex getSourceFile() const { return SourceFile; }
+  uint32_t getLineNumber() const { return LineNumber; }
+
+  TypeIndex UDT;
+  TypeIndex SourceFile;
+  uint32_t LineNumber;
+};
+
+// LF_UDT_MOD_SRC_LINE
+class UdtModSourceLineRecord : public TypeRecord {
+public:
+  UdtModSourceLineRecord() = default;
+  explicit UdtModSourceLineRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  UdtModSourceLineRecord(TypeIndex UDT, TypeIndex SourceFile,
+                         uint32_t LineNumber, uint16_t Module)
+      : TypeRecord(TypeRecordKind::UdtSourceLine), UDT(UDT),
+        SourceFile(SourceFile), LineNumber(LineNumber), Module(Module) {}
+
+  TypeIndex getUDT() const { return UDT; }
+  TypeIndex getSourceFile() const { return SourceFile; }
+  uint32_t getLineNumber() const { return LineNumber; }
+  uint16_t getModule() const { return Module; }
+
+  TypeIndex UDT;
+  TypeIndex SourceFile;
+  uint32_t LineNumber;
+  uint16_t Module;
+};
+
+// LF_BUILDINFO
+class BuildInfoRecord : public TypeRecord {
+public:
+  BuildInfoRecord() = default;
+  explicit BuildInfoRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  BuildInfoRecord(ArrayRef<TypeIndex> ArgIndices)
+      : TypeRecord(TypeRecordKind::BuildInfo),
+        ArgIndices(ArgIndices.begin(), ArgIndices.end()) {}
+
+  ArrayRef<TypeIndex> getArgs() const { return ArgIndices; }
+
+  SmallVector<TypeIndex, 4> ArgIndices;
+};
+
+// LF_VFTABLE
+class VFTableRecord : public TypeRecord {
+public:
+  VFTableRecord() = default;
+  explicit VFTableRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  VFTableRecord(TypeIndex CompleteClass, TypeIndex OverriddenVFTable,
+                uint32_t VFPtrOffset, StringRef Name,
+                ArrayRef<StringRef> Methods)
+      : TypeRecord(TypeRecordKind::VFTable), CompleteClass(CompleteClass),
+        OverriddenVFTable(OverriddenVFTable), VFPtrOffset(VFPtrOffset) {
+    MethodNames.push_back(Name);
+    MethodNames.insert(MethodNames.end(), Methods.begin(), Methods.end());
+  }
+
+  TypeIndex getCompleteClass() const { return CompleteClass; }
+  TypeIndex getOverriddenVTable() const { return OverriddenVFTable; }
+  uint32_t getVFPtrOffset() const { return VFPtrOffset; }
+  StringRef getName() const { return makeArrayRef(MethodNames).front(); }
+
+  ArrayRef<StringRef> getMethodNames() const {
+    return makeArrayRef(MethodNames).drop_front();
+  }
+
+  TypeIndex CompleteClass;
+  TypeIndex OverriddenVFTable;
+  uint32_t VFPtrOffset;
+  std::vector<StringRef> MethodNames;
+};
+
+// LF_ONEMETHOD
+class OneMethodRecord : public TypeRecord {
+public:
+  OneMethodRecord() = default;
+  explicit OneMethodRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  OneMethodRecord(TypeIndex Type, MemberAttributes Attrs, int32_t VFTableOffset,
+                  StringRef Name)
+      : TypeRecord(TypeRecordKind::OneMethod), Type(Type), Attrs(Attrs),
+        VFTableOffset(VFTableOffset), Name(Name) {}
+  OneMethodRecord(TypeIndex Type, MemberAccess Access, MethodKind MK,
+                  MethodOptions Options, int32_t VFTableOffset, StringRef Name)
+      : TypeRecord(TypeRecordKind::OneMethod), Type(Type),
+        Attrs(Access, MK, Options), VFTableOffset(VFTableOffset), Name(Name) {}
+
+  TypeIndex getType() const { return Type; }
+  MethodKind getMethodKind() const { return Attrs.getMethodKind(); }
+  MethodOptions getOptions() const { return Attrs.getFlags(); }
+  MemberAccess getAccess() const { return Attrs.getAccess(); }
+  int32_t getVFTableOffset() const { return VFTableOffset; }
+  StringRef getName() const { return Name; }
+
+  bool isIntroducingVirtual() const {
+    return getMethodKind() == MethodKind::IntroducingVirtual ||
+           getMethodKind() == MethodKind::PureIntroducingVirtual;
+  }
+
+  TypeIndex Type;
+  MemberAttributes Attrs;
+  int32_t VFTableOffset;
+  StringRef Name;
+};
+
+// LF_METHODLIST
+class MethodOverloadListRecord : public TypeRecord {
+public:
+  MethodOverloadListRecord() = default;
+  explicit MethodOverloadListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  MethodOverloadListRecord(ArrayRef<OneMethodRecord> Methods)
+      : TypeRecord(TypeRecordKind::MethodOverloadList), Methods(Methods) {}
+
+  ArrayRef<OneMethodRecord> getMethods() const { return Methods; }
+
+  std::vector<OneMethodRecord> Methods;
+};
+
+/// For method overload sets.  LF_METHOD
+class OverloadedMethodRecord : public TypeRecord {
+public:
+  OverloadedMethodRecord() = default;
+  explicit OverloadedMethodRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  OverloadedMethodRecord(uint16_t NumOverloads, TypeIndex MethodList,
+                         StringRef Name)
+      : TypeRecord(TypeRecordKind::OverloadedMethod),
+        NumOverloads(NumOverloads), MethodList(MethodList), Name(Name) {}
+
+  uint16_t getNumOverloads() const { return NumOverloads; }
+  TypeIndex getMethodList() const { return MethodList; }
+  StringRef getName() const { return Name; }
+
+  uint16_t NumOverloads;
+  TypeIndex MethodList;
+  StringRef Name;
+};
+
+// LF_MEMBER
+class DataMemberRecord : public TypeRecord {
+public:
+  DataMemberRecord() = default;
+  explicit DataMemberRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  DataMemberRecord(MemberAttributes Attrs, TypeIndex Type, uint64_t Offset,
+                   StringRef Name)
+      : TypeRecord(TypeRecordKind::DataMember), Attrs(Attrs), Type(Type),
+        FieldOffset(Offset), Name(Name) {}
+  DataMemberRecord(MemberAccess Access, TypeIndex Type, uint64_t Offset,
+                   StringRef Name)
+      : TypeRecord(TypeRecordKind::DataMember), Attrs(Access), Type(Type),
+        FieldOffset(Offset), Name(Name) {}
+
+  MemberAccess getAccess() const { return Attrs.getAccess(); }
+  TypeIndex getType() const { return Type; }
+  uint64_t getFieldOffset() const { return FieldOffset; }
+  StringRef getName() const { return Name; }
+
+  MemberAttributes Attrs;
+  TypeIndex Type;
+  uint64_t FieldOffset;
+  StringRef Name;
+};
+
+// LF_STMEMBER
+class StaticDataMemberRecord : public TypeRecord {
+public:
+  StaticDataMemberRecord() = default;
+  explicit StaticDataMemberRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  StaticDataMemberRecord(MemberAttributes Attrs, TypeIndex Type, StringRef Name)
+      : TypeRecord(TypeRecordKind::StaticDataMember), Attrs(Attrs), Type(Type),
+        Name(Name) {}
+  StaticDataMemberRecord(MemberAccess Access, TypeIndex Type, StringRef Name)
+      : TypeRecord(TypeRecordKind::StaticDataMember), Attrs(Access), Type(Type),
+        Name(Name) {}
+
+  MemberAccess getAccess() const { return Attrs.getAccess(); }
+  TypeIndex getType() const { return Type; }
+  StringRef getName() const { return Name; }
+
+  MemberAttributes Attrs;
+  TypeIndex Type;
+  StringRef Name;
+};
+
+// LF_ENUMERATE
+class EnumeratorRecord : public TypeRecord {
+public:
+  EnumeratorRecord() = default;
+  explicit EnumeratorRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  EnumeratorRecord(MemberAttributes Attrs, APSInt Value, StringRef Name)
+      : TypeRecord(TypeRecordKind::Enumerator), Attrs(Attrs),
+        Value(std::move(Value)), Name(Name) {}
+  EnumeratorRecord(MemberAccess Access, APSInt Value, StringRef Name)
+      : TypeRecord(TypeRecordKind::Enumerator), Attrs(Access),
+        Value(std::move(Value)), Name(Name) {}
+
+  MemberAccess getAccess() const { return Attrs.getAccess(); }
+  APSInt getValue() const { return Value; }
+  StringRef getName() const { return Name; }
+
+  MemberAttributes Attrs;
+  APSInt Value;
+  StringRef Name;
+};
+
+// LF_VFUNCTAB
+class VFPtrRecord : public TypeRecord {
+public:
+  VFPtrRecord() = default;
+  explicit VFPtrRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  VFPtrRecord(TypeIndex Type)
+      : TypeRecord(TypeRecordKind::VFPtr), Type(Type) {}
+
+  TypeIndex getType() const { return Type; }
+
+  TypeIndex Type;
+};
+
+// LF_BCLASS, LF_BINTERFACE
+class BaseClassRecord : public TypeRecord {
+public:
+  BaseClassRecord() = default;
+  explicit BaseClassRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  BaseClassRecord(MemberAttributes Attrs, TypeIndex Type, uint64_t Offset)
+      : TypeRecord(TypeRecordKind::BaseClass), Attrs(Attrs), Type(Type),
+        Offset(Offset) {}
+  BaseClassRecord(MemberAccess Access, TypeIndex Type, uint64_t Offset)
+      : TypeRecord(TypeRecordKind::BaseClass), Attrs(Access), Type(Type),
+        Offset(Offset) {}
+
+  MemberAccess getAccess() const { return Attrs.getAccess(); }
+  TypeIndex getBaseType() const { return Type; }
+  uint64_t getBaseOffset() const { return Offset; }
+
+  MemberAttributes Attrs;
+  TypeIndex Type;
+  uint64_t Offset;
+};
+
+// LF_VBCLASS, LF_IVBCLASS
+class VirtualBaseClassRecord : public TypeRecord {
+public:
+  VirtualBaseClassRecord() = default;
+  explicit VirtualBaseClassRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  VirtualBaseClassRecord(TypeRecordKind Kind, MemberAttributes Attrs,
+                         TypeIndex BaseType, TypeIndex VBPtrType,
+                         uint64_t Offset, uint64_t Index)
+      : TypeRecord(Kind), Attrs(Attrs), BaseType(BaseType),
+        VBPtrType(VBPtrType), VBPtrOffset(Offset), VTableIndex(Index) {}
+  VirtualBaseClassRecord(TypeRecordKind Kind, MemberAccess Access,
+                         TypeIndex BaseType, TypeIndex VBPtrType,
+                         uint64_t Offset, uint64_t Index)
+      : TypeRecord(Kind), Attrs(Access), BaseType(BaseType),
+        VBPtrType(VBPtrType), VBPtrOffset(Offset), VTableIndex(Index) {}
+
+  MemberAccess getAccess() const { return Attrs.getAccess(); }
+  TypeIndex getBaseType() const { return BaseType; }
+  TypeIndex getVBPtrType() const { return VBPtrType; }
+  uint64_t getVBPtrOffset() const { return VBPtrOffset; }
+  uint64_t getVTableIndex() const { return VTableIndex; }
+
+  MemberAttributes Attrs;
+  TypeIndex BaseType;
+  TypeIndex VBPtrType;
+  uint64_t VBPtrOffset;
+  uint64_t VTableIndex;
+};
+
+/// LF_INDEX - Used to chain two large LF_FIELDLIST or LF_METHODLIST records
+/// together. The first will end in an LF_INDEX record that points to the next.
+class ListContinuationRecord : public TypeRecord {
+public:
+  ListContinuationRecord() = default;
+  explicit ListContinuationRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+  ListContinuationRecord(TypeIndex ContinuationIndex)
+      : TypeRecord(TypeRecordKind::ListContinuation),
+        ContinuationIndex(ContinuationIndex) {}
+
+  TypeIndex getContinuationIndex() const { return ContinuationIndex; }
+
+  TypeIndex ContinuationIndex;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeRecordMapping.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeRecordMapping.h
new file mode 100644
index 0000000..cbe8d60
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeRecordMapping.h
@@ -0,0 +1,52 @@
+//===- TypeRecordMapping.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDMAPPING_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDMAPPING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+class BinaryStreamReader;
+class BinaryStreamWriter;
+
+namespace codeview {
+class TypeRecordMapping : public TypeVisitorCallbacks {
+public:
+  explicit TypeRecordMapping(BinaryStreamReader &Reader) : IO(Reader) {}
+  explicit TypeRecordMapping(BinaryStreamWriter &Writer) : IO(Writer) {}
+
+  using TypeVisitorCallbacks::visitTypeBegin;
+  Error visitTypeBegin(CVType &Record) override;
+  Error visitTypeEnd(CVType &Record) override;
+
+  Error visitMemberBegin(CVMemberRecord &Record) override;
+  Error visitMemberEnd(CVMemberRecord &Record) override;
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
+  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
+#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+
+private:
+  Optional<TypeLeafKind> TypeKind;
+  Optional<TypeLeafKind> MemberKind;
+
+  CodeViewRecordIO IO;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
new file mode 100644
index 0000000..59e216a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
@@ -0,0 +1,107 @@
+//===- TypeStreamMerger.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESTREAMMERGER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPESTREAMMERGER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class TypeIndex;
+struct GloballyHashedType;
+class GlobalTypeTableBuilder;
+class MergingTypeTableBuilder;
+
+/// \brief Merge one set of type records into another.  This method assumes
+/// that all records are type records, and there are no Id records present.
+///
+/// \param Dest The table to store the re-written type records into.
+///
+/// \param SourceToDest A vector, indexed by the TypeIndex in the source
+/// type stream, that contains the index of the corresponding type record
+/// in the destination stream.
+///
+/// \param Types The collection of types to merge in.
+///
+/// \returns Error::success() if the operation succeeded, otherwise an
+/// appropriate error code.
+Error mergeTypeRecords(MergingTypeTableBuilder &Dest,
+                       SmallVectorImpl<TypeIndex> &SourceToDest,
+                       const CVTypeArray &Types);
+
+/// \brief Merge one set of id records into another.  This method assumes
+/// that all records are id records, and there are no Type records present.
+/// However, since Id records can refer back to Type records, this method
+/// assumes that the referenced type records have also been merged into
+/// another type stream (for example using the above method), and accepts
+/// the mapping from source to dest for that stream so that it can re-write
+/// the type record mappings accordingly.
+///
+/// \param Dest The table to store the re-written id records into.
+///
+/// \param Types The mapping to use for the type records that these id
+/// records refer to.
+///
+/// \param SourceToDest A vector, indexed by the TypeIndex in the source
+/// id stream, that contains the index of the corresponding id record
+/// in the destination stream.
+///
+/// \param Ids The collection of id records to merge in.
+///
+/// \returns Error::success() if the operation succeeded, otherwise an
+/// appropriate error code.
+Error mergeIdRecords(MergingTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
+                     SmallVectorImpl<TypeIndex> &SourceToDest,
+                     const CVTypeArray &Ids);
+
+/// \brief Merge a unified set of type and id records, splitting them into
+/// separate output streams.
+///
+/// \param DestIds The table to store the re-written id records into.
+///
+/// \param DestTypes the table to store the re-written type records into.
+///
+/// \param SourceToDest A vector, indexed by the TypeIndex in the source
+/// id stream, that contains the index of the corresponding id record
+/// in the destination stream.
+///
+/// \param IdsAndTypes The collection of id records to merge in.
+///
+/// \returns Error::success() if the operation succeeded, otherwise an
+/// appropriate error code.
+Error mergeTypeAndIdRecords(MergingTypeTableBuilder &DestIds,
+                            MergingTypeTableBuilder &DestTypes,
+                            SmallVectorImpl<TypeIndex> &SourceToDest,
+                            const CVTypeArray &IdsAndTypes);
+
+Error mergeTypeAndIdRecords(GlobalTypeTableBuilder &DestIds,
+                            GlobalTypeTableBuilder &DestTypes,
+                            SmallVectorImpl<TypeIndex> &SourceToDest,
+                            const CVTypeArray &IdsAndTypes,
+                            ArrayRef<GloballyHashedType> Hashes);
+
+Error mergeTypeRecords(GlobalTypeTableBuilder &Dest,
+                       SmallVectorImpl<TypeIndex> &SourceToDest,
+                       const CVTypeArray &Types,
+                       ArrayRef<GloballyHashedType> Hashes);
+
+Error mergeIdRecords(GlobalTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
+                     SmallVectorImpl<TypeIndex> &SourceToDest,
+                     const CVTypeArray &Ids,
+                     ArrayRef<GloballyHashedType> Hashes);
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPESTREAMMERGER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h
new file mode 100644
index 0000000..dfba83d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h
@@ -0,0 +1,38 @@
+//===- TypeSymbolEmitter.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+
+namespace llvm {
+class StringRef;
+
+namespace codeview {
+
+class TypeSymbolEmitter {
+private:
+  TypeSymbolEmitter(const TypeSymbolEmitter &) = delete;
+  TypeSymbolEmitter &operator=(const TypeSymbolEmitter &) = delete;
+
+protected:
+  TypeSymbolEmitter() {}
+
+public:
+  virtual ~TypeSymbolEmitter() {}
+
+public:
+  virtual void writeUserDefinedType(TypeIndex TI, StringRef Name) = 0;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeTableCollection.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeTableCollection.h
new file mode 100644
index 0000000..80326a0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeTableCollection.h
@@ -0,0 +1,43 @@
+//===- TypeTableCollection.h ---------------------------------- *- C++ --*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H
+
+#include "llvm/DebugInfo/CodeView/TypeCollection.h"
+#include "llvm/Support/StringSaver.h"
+
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class TypeTableCollection : public TypeCollection {
+public:
+  explicit TypeTableCollection(ArrayRef<ArrayRef<uint8_t>> Records);
+
+  Optional<TypeIndex> getFirst() override;
+  Optional<TypeIndex> getNext(TypeIndex Prev) override;
+
+  CVType getType(TypeIndex Index) override;
+  StringRef getTypeName(TypeIndex Index) override;
+  bool contains(TypeIndex Index) override;
+  uint32_t size() override;
+  uint32_t capacity() override;
+
+private:
+  BumpPtrAllocator Allocator;
+  StringSaver NameStorage;
+  std::vector<StringRef> Names;
+  ArrayRef<ArrayRef<uint8_t>> Records;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h
new file mode 100644
index 0000000..126fb8a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h
@@ -0,0 +1,122 @@
+//===- TypeVisitorCallbackPipeline.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class TypeVisitorCallbackPipeline : public TypeVisitorCallbacks {
+public:
+  TypeVisitorCallbackPipeline() = default;
+
+  Error visitUnknownType(CVRecord<TypeLeafKind> &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitUnknownType(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitUnknownMember(CVMemberRecord &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitUnknownMember(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitTypeBegin(CVType &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitTypeBegin(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitTypeBegin(CVType &Record, TypeIndex Index) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitTypeBegin(Record, Index))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitTypeEnd(CVType &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitTypeEnd(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitMemberBegin(CVMemberRecord &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitMemberBegin(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  Error visitMemberEnd(CVMemberRecord &Record) override {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitMemberEnd(Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  void addCallbackToPipeline(TypeVisitorCallbacks &Callbacks) {
+    Pipeline.push_back(&Callbacks);
+  }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
+  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override {         \
+    return visitKnownRecordImpl(CVR, Record);                                  \
+  }
+#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
+  Error visitKnownMember(CVMemberRecord &CVMR, Name##Record &Record)           \
+      override {                                                               \
+    return visitKnownMemberImpl(CVMR, Record);                                 \
+  }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+
+private:
+  template <typename T> Error visitKnownRecordImpl(CVType &CVR, T &Record) {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitKnownRecord(CVR, Record))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  template <typename T>
+  Error visitKnownMemberImpl(CVMemberRecord &CVMR, T &Record) {
+    for (auto Visitor : Pipeline) {
+      if (auto EC = Visitor->visitKnownMember(CVMR, Record))
+        return EC;
+    }
+    return Error::success();
+  }
+  std::vector<TypeVisitorCallbacks *> Pipeline;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h
new file mode 100644
index 0000000..d7a4733
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h
@@ -0,0 +1,71 @@
+//===- TypeVisitorCallbacks.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
+
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class TypeVisitorCallbacks {
+public:
+  virtual ~TypeVisitorCallbacks() = default;
+
+  /// Action to take on unknown types. By default, they are ignored.
+  virtual Error visitUnknownType(CVType &Record) { return Error::success(); }
+  /// Paired begin/end actions for all types. Receives all record data,
+  /// including the fixed-length record prefix.  visitTypeBegin() should return
+  /// the type of the Record, or an error if it cannot be determined.  Exactly
+  /// one of the two visitTypeBegin methods will be called, depending on whether
+  /// records are being visited sequentially or randomly.  An implementation
+  /// should be prepared to handle both (or assert if it can't handle random
+  /// access visitation).
+  virtual Error visitTypeBegin(CVType &Record) { return Error::success(); }
+  virtual Error visitTypeBegin(CVType &Record, TypeIndex Index) {
+    return Error::success();
+  }
+  virtual Error visitTypeEnd(CVType &Record) { return Error::success(); }
+
+  virtual Error visitUnknownMember(CVMemberRecord &Record) {
+    return Error::success();
+  }
+
+  virtual Error visitMemberBegin(CVMemberRecord &Record) {
+    return Error::success();
+  }
+
+  virtual Error visitMemberEnd(CVMemberRecord &Record) {
+    return Error::success();
+  }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
+  virtual Error visitKnownRecord(CVType &CVR, Name##Record &Record) {          \
+    return Error::success();                                                   \
+  }
+#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
+  virtual Error visitKnownMember(CVMemberRecord &CVM, Name##Record &Record) {  \
+    return Error::success();                                                   \
+  }
+
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+#undef TYPE_RECORD
+#undef TYPE_RECORD_ALIAS
+#undef MEMBER_RECORD
+#undef MEMBER_RECORD_ALIAS
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DIContext.h b/linux-x64/clang/include/llvm/DebugInfo/DIContext.h
new file mode 100644
index 0000000..f89eb34
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DIContext.h
@@ -0,0 +1,273 @@
+//===- DIContext.h ----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DIContext, an abstract data structure that holds
+// debug information data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DICONTEXT_H
+#define LLVM_DEBUGINFO_DICONTEXT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+
+/// A format-neutral container for source line information.
+struct DILineInfo {
+  std::string FileName;
+  std::string FunctionName;
+  Optional<StringRef> Source;
+  uint32_t Line = 0;
+  uint32_t Column = 0;
+  uint32_t StartLine = 0;
+
+  // DWARF-specific.
+  uint32_t Discriminator = 0;
+
+  DILineInfo() : FileName("<invalid>"), FunctionName("<invalid>") {}
+
+  bool operator==(const DILineInfo &RHS) const {
+    return Line == RHS.Line && Column == RHS.Column &&
+           FileName == RHS.FileName && FunctionName == RHS.FunctionName &&
+           StartLine == RHS.StartLine && Discriminator == RHS.Discriminator;
+  }
+
+  bool operator!=(const DILineInfo &RHS) const {
+    return !(*this == RHS);
+  }
+
+  bool operator<(const DILineInfo &RHS) const {
+    return std::tie(FileName, FunctionName, Line, Column, StartLine,
+                    Discriminator) <
+           std::tie(RHS.FileName, RHS.FunctionName, RHS.Line, RHS.Column,
+                    RHS.StartLine, RHS.Discriminator);
+  }
+
+  explicit operator bool() const { return *this != DILineInfo(); }
+
+  void dump(raw_ostream &OS) {
+    OS << "Line info: ";
+    if (FileName != "<invalid>")
+      OS << "file '" << FileName << "', ";
+    if (FunctionName != "<invalid>")
+      OS << "function '" << FunctionName << "', ";
+    OS << "line " << Line << ", ";
+    OS << "column " << Column << ", ";
+    OS << "start line " << StartLine << '\n';
+  }
+};
+
+using DILineInfoTable = SmallVector<std::pair<uint64_t, DILineInfo>, 16>;
+
+/// A format-neutral container for inlined code description.
+class DIInliningInfo {
+  SmallVector<DILineInfo, 4> Frames;
+
+public:
+  DIInliningInfo() = default;
+
+  DILineInfo getFrame(unsigned Index) const {
+    assert(Index < Frames.size());
+    return Frames[Index];
+  }
+
+  DILineInfo *getMutableFrame(unsigned Index) {
+    assert(Index < Frames.size());
+    return &Frames[Index];
+  }
+
+  uint32_t getNumberOfFrames() const {
+    return Frames.size();
+  }
+
+  void addFrame(const DILineInfo &Frame) {
+    Frames.push_back(Frame);
+  }
+};
+
+/// Container for description of a global variable.
+struct DIGlobal {
+  std::string Name;
+  uint64_t Start = 0;
+  uint64_t Size = 0;
+
+  DIGlobal() : Name("<invalid>") {}
+};
+
+/// A DINameKind is passed to name search methods to specify a
+/// preference regarding the type of name resolution the caller wants.
+enum class DINameKind { None, ShortName, LinkageName };
+
+/// Controls which fields of DILineInfo container should be filled
+/// with data.
+struct DILineInfoSpecifier {
+  enum class FileLineInfoKind { None, Default, AbsoluteFilePath };
+  using FunctionNameKind = DINameKind;
+
+  FileLineInfoKind FLIKind;
+  FunctionNameKind FNKind;
+
+  DILineInfoSpecifier(FileLineInfoKind FLIKind = FileLineInfoKind::Default,
+                      FunctionNameKind FNKind = FunctionNameKind::None)
+      : FLIKind(FLIKind), FNKind(FNKind) {}
+};
+
+/// This is just a helper to programmatically construct DIDumpType.
+enum DIDumpTypeCounter {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME) \
+  DIDT_ID_##ENUM_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+  DIDT_ID_UUID,
+  DIDT_ID_Count
+};
+static_assert(DIDT_ID_Count <= 32, "section types overflow storage");
+
+/// Selects which debug sections get dumped.
+enum DIDumpType : unsigned {
+  DIDT_Null,
+  DIDT_All             = ~0U,
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME) \
+  DIDT_##ENUM_NAME = 1U << DIDT_ID_##ENUM_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+  DIDT_UUID = 1 << DIDT_ID_UUID,
+};
+
+/// Container for dump options that control which debug information will be
+/// dumped.
+struct DIDumpOptions {
+  unsigned DumpType = DIDT_All;
+  unsigned RecurseDepth = -1U;
+  bool ShowAddresses = true;
+  bool ShowChildren = false;
+  bool ShowParents = false;
+  bool ShowForm = false;
+  bool SummarizeTypes = false;
+  bool Verbose = false;
+  bool DisplayRawContents = false;
+
+  /// Return default option set for printing a single DIE without children.
+  static DIDumpOptions getForSingleDIE() {
+    DIDumpOptions Opts;
+    Opts.RecurseDepth = 0;
+    return Opts;
+  }
+
+  /// Return the options with RecurseDepth set to 0 unless explicitly required.
+  DIDumpOptions noImplicitRecursion() const {
+    DIDumpOptions Opts = *this;
+    if (RecurseDepth == -1U && !ShowChildren)
+      Opts.RecurseDepth = 0;
+    return Opts;
+  }
+};
+
+class DIContext {
+public:
+  enum DIContextKind {
+    CK_DWARF,
+    CK_PDB
+  };
+
+  DIContext(DIContextKind K) : Kind(K) {}
+  virtual ~DIContext() = default;
+
+  DIContextKind getKind() const { return Kind; }
+
+  virtual void dump(raw_ostream &OS, DIDumpOptions DumpOpts) = 0;
+
+  virtual bool verify(raw_ostream &OS, DIDumpOptions DumpOpts = {}) {
+    // No verifier? Just say things went well.
+    return true;
+  }
+
+  virtual DILineInfo getLineInfoForAddress(uint64_t Address,
+      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+  virtual DILineInfoTable getLineInfoForAddressRange(uint64_t Address,
+      uint64_t Size, DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+  virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+
+private:
+  const DIContextKind Kind;
+};
+
+/// An inferface for inquiring the load address of a loaded object file
+/// to be used by the DIContext implementations when applying relocations
+/// on the fly.
+class LoadedObjectInfo {
+protected:
+  LoadedObjectInfo() = default;
+  LoadedObjectInfo(const LoadedObjectInfo &) = default;
+
+public:
+  virtual ~LoadedObjectInfo() = default;
+
+  /// Obtain the Load Address of a section by SectionRef.
+  ///
+  /// Calculate the address of the given section.
+  /// The section need not be present in the local address space. The addresses
+  /// need to be consistent with the addresses used to query the DIContext and
+  /// the output of this function should be deterministic, i.e. repeated calls
+  /// with the same Sec should give the same address.
+  virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const {
+    return 0;
+  }
+
+  /// If conveniently available, return the content of the given Section.
+  ///
+  /// When the section is available in the local address space, in relocated
+  /// (loaded) form, e.g. because it was relocated by a JIT for execution, this
+  /// function should provide the contents of said section in `Data`. If the
+  /// loaded section is not available, or the cost of retrieving it would be
+  /// prohibitive, this function should return false. In that case, relocations
+  /// will be read from the local (unrelocated) object file and applied on the
+  /// fly. Note that this method is used purely for optimzation purposes in the
+  /// common case of JITting in the local address space, so returning false
+  /// should always be correct.
+  virtual bool getLoadedSectionContents(const object::SectionRef &Sec,
+                                        StringRef &Data) const {
+    return false;
+  }
+
+  // FIXME: This is untested and unused anywhere in the LLVM project, it's
+  // used/needed by Julia (an external project). It should have some coverage
+  // (at least tests, but ideally example functionality).
+  /// Obtain a copy of this LoadedObjectInfo.
+  virtual std::unique_ptr<LoadedObjectInfo> clone() const = 0;
+};
+
+template <typename Derived, typename Base = LoadedObjectInfo>
+struct LoadedObjectInfoHelper : Base {
+protected:
+  LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
+  LoadedObjectInfoHelper() = default;
+
+public:
+  template <typename... Ts>
+  LoadedObjectInfoHelper(Ts &&... Args) : Base(std::forward<Ts>(Args)...) {}
+
+  std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
+    return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DICONTEXT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
new file mode 100644
index 0000000..84b2339
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
@@ -0,0 +1,184 @@
+//===- DWARFAbbreviationDeclaration.h ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
+#define LLVM_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+class DWARFFormValue;
+class DWARFUnit;
+class raw_ostream;
+
+class DWARFAbbreviationDeclaration {
+public:
+  struct AttributeSpec {
+    AttributeSpec(dwarf::Attribute A, dwarf::Form F, int64_t Value)
+        : Attr(A), Form(F), Value(Value) {
+      assert(isImplicitConst());
+    }
+    AttributeSpec(dwarf::Attribute A, dwarf::Form F, Optional<uint8_t> ByteSize)
+        : Attr(A), Form(F) {
+      assert(!isImplicitConst());
+      this->ByteSize.HasByteSize = ByteSize.hasValue();
+      if (this->ByteSize.HasByteSize)
+        this->ByteSize.ByteSize = *ByteSize;
+    }
+
+    dwarf::Attribute Attr;
+    dwarf::Form Form;
+
+  private:
+    /// The following field is used for ByteSize for non-implicit_const
+    /// attributes and as value for implicit_const ones, indicated by
+    /// Form == DW_FORM_implicit_const.
+    /// The following cases are distinguished:
+    /// * Form != DW_FORM_implicit_const and HasByteSize is true:
+    ///     ByteSize contains the fixed size in bytes for the Form in this
+    ///     object.
+    /// * Form != DW_FORM_implicit_const and HasByteSize is false:
+    ///     byte size of Form either varies according to the DWARFUnit
+    ///     that it is contained in or the value size varies and must be
+    ///     decoded from the debug information in order to determine its size.
+    /// * Form == DW_FORM_implicit_const:
+    ///     Value contains value for the implicit_const attribute.
+    struct ByteSizeStorage {
+      bool HasByteSize;
+      uint8_t ByteSize;
+    };
+    union {
+      ByteSizeStorage ByteSize;
+      int64_t Value;
+    };
+
+  public:
+    bool isImplicitConst() const {
+      return Form == dwarf::DW_FORM_implicit_const;
+    }
+
+    int64_t getImplicitConstValue() const {
+      assert(isImplicitConst());
+      return Value;
+    }
+
+    /// Get the fixed byte size of this Form if possible. This function might
+    /// use the DWARFUnit to calculate the size of the Form, like for
+    /// DW_AT_address and DW_AT_ref_addr, so this isn't just an accessor for
+    /// the ByteSize member.
+    Optional<int64_t> getByteSize(const DWARFUnit &U) const;
+  };
+  using AttributeSpecVector = SmallVector<AttributeSpec, 8>;
+
+  DWARFAbbreviationDeclaration();
+
+  uint32_t getCode() const { return Code; }
+  uint8_t getCodeByteSize() const { return CodeByteSize; }
+  dwarf::Tag getTag() const { return Tag; }
+  bool hasChildren() const { return HasChildren; }
+
+  using attr_iterator_range =
+      iterator_range<AttributeSpecVector::const_iterator>;
+
+  attr_iterator_range attributes() const {
+    return attr_iterator_range(AttributeSpecs.begin(), AttributeSpecs.end());
+  }
+
+  dwarf::Form getFormByIndex(uint32_t idx) const {
+    assert(idx < AttributeSpecs.size());
+    return AttributeSpecs[idx].Form;
+  }
+
+  size_t getNumAttributes() const {
+    return AttributeSpecs.size();
+  }
+
+  dwarf::Attribute getAttrByIndex(uint32_t idx) const {
+    assert(idx < AttributeSpecs.size());
+    return AttributeSpecs[idx].Attr;
+  }
+
+  /// Get the index of the specified attribute.
+  ///
+  /// Searches the this abbreviation declaration for the index of the specified
+  /// attribute.
+  ///
+  /// \param attr DWARF attribute to search for.
+  /// \returns Optional index of the attribute if found, None otherwise.
+  Optional<uint32_t> findAttributeIndex(dwarf::Attribute attr) const;
+
+  /// Extract a DWARF form value from a DIE specified by DIE offset.
+  ///
+  /// Extract an attribute value for a DWARFUnit given the DIE offset and the
+  /// attribute.
+  ///
+  /// \param DIEOffset the DIE offset that points to the ULEB128 abbreviation
+  /// code in the .debug_info data.
+  /// \param Attr DWARF attribute to search for.
+  /// \param U the DWARFUnit the contains the DIE.
+  /// \returns Optional DWARF form value if the attribute was extracted.
+  Optional<DWARFFormValue> getAttributeValue(const uint32_t DIEOffset,
+                                             const dwarf::Attribute Attr,
+                                             const DWARFUnit &U) const;
+
+  bool extract(DataExtractor Data, uint32_t* OffsetPtr);
+  void dump(raw_ostream &OS) const;
+
+  // Return an optional byte size of all attribute data in this abbreviation
+  // if a constant byte size can be calculated given a DWARFUnit. This allows
+  // DWARF parsing to be faster as many DWARF DIEs have a fixed byte size.
+  Optional<size_t> getFixedAttributesByteSize(const DWARFUnit &U) const;
+
+private:
+  void clear();
+
+  /// A helper structure that can quickly determine the size in bytes of an
+  /// abbreviation declaration.
+  struct FixedSizeInfo {
+    /// The fixed byte size for fixed size forms.
+    uint16_t NumBytes = 0;
+    /// Number of DW_FORM_address forms in this abbrevation declaration.
+    uint8_t NumAddrs = 0;
+    /// Number of DW_FORM_ref_addr forms in this abbrevation declaration.
+    uint8_t NumRefAddrs = 0;
+    /// Number of 4 byte in DWARF32 and 8 byte in DWARF64 forms.
+    uint8_t NumDwarfOffsets = 0;
+
+    FixedSizeInfo() = default;
+
+    /// Calculate the fixed size in bytes given a DWARFUnit.
+    ///
+    /// \param U the DWARFUnit to use when determing the byte size.
+    /// \returns the size in bytes for all attribute data in this abbreviation.
+    /// The returned size does not include bytes for the  ULEB128 abbreviation
+    /// code
+    size_t getByteSize(const DWARFUnit &U) const;
+  };
+
+  uint32_t Code;
+  dwarf::Tag Tag;
+  uint8_t CodeByteSize;
+  bool HasChildren;
+  AttributeSpecVector AttributeSpecs;
+  /// If this abbreviation has a fixed byte size then FixedAttributeSize member
+  /// variable below will have a value.
+  Optional<FixedSizeInfo> FixedAttributeSize;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
new file mode 100644
index 0000000..27f11ca
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
@@ -0,0 +1,510 @@
+//===- DWARFAcceleratorTable.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFACCELERATORTABLE_H
+#define LLVM_DEBUGINFO_DWARFACCELERATORTABLE_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+class ScopedPrinter;
+
+/// The accelerator tables are designed to allow efficient random access
+/// (using a symbol name as a key) into debug info by providing an index of the
+/// debug info DIEs. This class implements the common functionality of Apple and
+/// DWARF 5 accelerator tables.
+/// TODO: Generalize the rest of the AppleAcceleratorTable interface and move it
+/// to this class.
+class DWARFAcceleratorTable {
+protected:
+  DWARFDataExtractor AccelSection;
+  DataExtractor StringSection;
+
+public:
+  /// An abstract class representing a single entry in the accelerator tables.
+  class Entry {
+  protected:
+    SmallVector<DWARFFormValue, 3> Values;
+
+    Entry() = default;
+
+    // Make these protected so only (final) subclasses can be copied around.
+    Entry(const Entry &) = default;
+    Entry(Entry &&) = default;
+    Entry &operator=(const Entry &) = default;
+    Entry &operator=(Entry &&) = default;
+    ~Entry() = default;
+
+
+  public:
+    /// Returns the Offset of the Compilation Unit associated with this
+    /// Accelerator Entry or None if the Compilation Unit offset is not recorded
+    /// in this Accelerator Entry.
+    virtual Optional<uint64_t> getCUOffset() const = 0;
+
+    /// Returns the Section Offset of the Debug Info Entry associated with this
+    /// Accelerator Entry or None if the DIE offset is not recorded in this
+    /// Accelerator Entry. The returned offset is relative to the start of the
+    /// Section containing the DIE.
+    virtual Optional<uint64_t> getDIESectionOffset() const = 0;
+
+    /// Returns the Tag of the Debug Info Entry associated with this
+    /// Accelerator Entry or None if the Tag is not recorded in this
+    /// Accelerator Entry.
+    virtual Optional<dwarf::Tag> getTag() const = 0;
+
+    /// Returns the raw values of fields in the Accelerator Entry. In general,
+    /// these can only be interpreted with the help of the metadata in the
+    /// owning Accelerator Table.
+    ArrayRef<DWARFFormValue> getValues() const { return Values; }
+  };
+
+  DWARFAcceleratorTable(const DWARFDataExtractor &AccelSection,
+                        DataExtractor StringSection)
+      : AccelSection(AccelSection), StringSection(StringSection) {}
+  virtual ~DWARFAcceleratorTable();
+
+  virtual llvm::Error extract() = 0;
+  virtual void dump(raw_ostream &OS) const = 0;
+
+  DWARFAcceleratorTable(const DWARFAcceleratorTable &) = delete;
+  void operator=(const DWARFAcceleratorTable &) = delete;
+};
+
+/// This implements the Apple accelerator table format, a precursor of the
+/// DWARF 5 accelerator table format.
+class AppleAcceleratorTable : public DWARFAcceleratorTable {
+  struct Header {
+    uint32_t Magic;
+    uint16_t Version;
+    uint16_t HashFunction;
+    uint32_t BucketCount;
+    uint32_t HashCount;
+    uint32_t HeaderDataLength;
+
+    void dump(ScopedPrinter &W) const;
+  };
+
+  struct HeaderData {
+    using AtomType = uint16_t;
+    using Form = dwarf::Form;
+
+    uint32_t DIEOffsetBase;
+    SmallVector<std::pair<AtomType, Form>, 3> Atoms;
+
+    Optional<uint64_t> extractOffset(Optional<DWARFFormValue> Value) const;
+  };
+
+  struct Header Hdr;
+  struct HeaderData HdrData;
+  bool IsValid = false;
+
+  /// Returns true if we should continue scanning for entries or false if we've
+  /// reached the last (sentinel) entry of encountered a parsing error.
+  bool dumpName(ScopedPrinter &W, SmallVectorImpl<DWARFFormValue> &AtomForms,
+                uint32_t *DataOffset) const;
+
+public:
+  /// Apple-specific implementation of an Accelerator Entry.
+  class Entry final : public DWARFAcceleratorTable::Entry {
+    const HeaderData *HdrData = nullptr;
+
+    Entry(const HeaderData &Data);
+    Entry() = default;
+
+    void extract(const AppleAcceleratorTable &AccelTable, uint32_t *Offset);
+
+  public:
+    Optional<uint64_t> getCUOffset() const override;
+    Optional<uint64_t> getDIESectionOffset() const override;
+    Optional<dwarf::Tag> getTag() const override;
+
+    /// Returns the value of the Atom in this Accelerator Entry, if the Entry
+    /// contains such Atom.
+    Optional<DWARFFormValue> lookup(HeaderData::AtomType Atom) const;
+
+    friend class AppleAcceleratorTable;
+    friend class ValueIterator;
+  };
+
+  class ValueIterator : public std::iterator<std::input_iterator_tag, Entry> {
+    const AppleAcceleratorTable *AccelTable = nullptr;
+    Entry Current;           ///< The current entry.
+    unsigned DataOffset = 0; ///< Offset into the section.
+    unsigned Data = 0; ///< Current data entry.
+    unsigned NumData = 0; ///< Number of data entries.
+
+    /// Advance the iterator.
+    void Next();
+  public:
+    /// Construct a new iterator for the entries at \p DataOffset.
+    ValueIterator(const AppleAcceleratorTable &AccelTable, unsigned DataOffset);
+    /// End marker.
+    ValueIterator() = default;
+
+    const Entry &operator*() const { return Current; }
+    ValueIterator &operator++() { Next(); return *this; }
+    ValueIterator operator++(int) {
+      ValueIterator I = *this;
+      Next();
+      return I;
+    }
+    friend bool operator==(const ValueIterator &A, const ValueIterator &B) {
+      return A.NumData == B.NumData && A.DataOffset == B.DataOffset;
+    }
+    friend bool operator!=(const ValueIterator &A, const ValueIterator &B) {
+      return !(A == B);
+    }
+  };
+
+  AppleAcceleratorTable(const DWARFDataExtractor &AccelSection,
+                        DataExtractor StringSection)
+      : DWARFAcceleratorTable(AccelSection, StringSection) {}
+
+  llvm::Error extract() override;
+  uint32_t getNumBuckets();
+  uint32_t getNumHashes();
+  uint32_t getSizeHdr();
+  uint32_t getHeaderDataLength();
+
+  /// Return the Atom description, which can be used to interpret the raw values
+  /// of the Accelerator Entries in this table.
+  ArrayRef<std::pair<HeaderData::AtomType, HeaderData::Form>> getAtomsDesc();
+  bool validateForms();
+
+  /// Return information related to the DWARF DIE we're looking for when
+  /// performing a lookup by name.
+  ///
+  /// \param HashDataOffset an offset into the hash data table
+  /// \returns <DieOffset, DieTag>
+  /// DieOffset is the offset into the .debug_info section for the DIE
+  /// related to the input hash data offset.
+  /// DieTag is the tag of the DIE
+  std::pair<uint32_t, dwarf::Tag> readAtoms(uint32_t &HashDataOffset);
+  void dump(raw_ostream &OS) const override;
+
+  /// Look up all entries in the accelerator table matching \c Key.
+  iterator_range<ValueIterator> equal_range(StringRef Key) const;
+};
+
+/// .debug_names section consists of one or more units. Each unit starts with a
+/// header, which is followed by a list of compilation units, local and foreign
+/// type units.
+///
+/// These may be followed by an (optional) hash lookup table, which consists of
+/// an array of buckets and hashes similar to the apple tables above. The only
+/// difference is that the hashes array is 1-based, and consequently an empty
+/// bucket is denoted by 0 and not UINT32_MAX.
+///
+/// Next is the name table, which consists of an array of names and array of
+/// entry offsets. This is different from the apple tables, which store names
+/// next to the actual entries.
+///
+/// The structure of the entries is described by an abbreviations table, which
+/// comes after the name table. Unlike the apple tables, which have a uniform
+/// entry structure described in the header, each .debug_names entry may have
+/// different index attributes (DW_IDX_???) attached to it.
+///
+/// The last segment consists of a list of entries, which is a 0-terminated list
+/// referenced by the name table and interpreted with the help of the
+/// abbreviation table.
+class DWARFDebugNames : public DWARFAcceleratorTable {
+  /// The fixed-size part of a Dwarf 5 Name Index header
+  struct HeaderPOD {
+    uint32_t UnitLength;
+    uint16_t Version;
+    uint16_t Padding;
+    uint32_t CompUnitCount;
+    uint32_t LocalTypeUnitCount;
+    uint32_t ForeignTypeUnitCount;
+    uint32_t BucketCount;
+    uint32_t NameCount;
+    uint32_t AbbrevTableSize;
+    uint32_t AugmentationStringSize;
+  };
+
+public:
+  class NameIndex;
+  class ValueIterator;
+
+  /// Dwarf 5 Name Index header.
+  struct Header : public HeaderPOD {
+    SmallString<8> AugmentationString;
+
+    Error extract(const DWARFDataExtractor &AS, uint32_t *Offset);
+    void dump(ScopedPrinter &W) const;
+  };
+
+  /// Index attribute and its encoding.
+  struct AttributeEncoding {
+    dwarf::Index Index;
+    dwarf::Form Form;
+
+    constexpr AttributeEncoding(dwarf::Index Index, dwarf::Form Form)
+        : Index(Index), Form(Form) {}
+
+    friend bool operator==(const AttributeEncoding &LHS,
+                           const AttributeEncoding &RHS) {
+      return LHS.Index == RHS.Index && LHS.Form == RHS.Form;
+    }
+  };
+
+  /// Abbreviation describing the encoding of Name Index entries.
+  struct Abbrev {
+    uint32_t Code;  ///< Abbreviation code
+    dwarf::Tag Tag; ///< Dwarf Tag of the described entity.
+    std::vector<AttributeEncoding> Attributes; ///< List of index attributes.
+
+    Abbrev(uint32_t Code, dwarf::Tag Tag,
+           std::vector<AttributeEncoding> Attributes)
+        : Code(Code), Tag(Tag), Attributes(std::move(Attributes)) {}
+
+    void dump(ScopedPrinter &W) const;
+  };
+
+  /// DWARF v5-specific implementation of an Accelerator Entry.
+  class Entry final : public DWARFAcceleratorTable::Entry {
+    const NameIndex *NameIdx;
+    const Abbrev *Abbr;
+
+    Entry(const NameIndex &NameIdx, const Abbrev &Abbr);
+
+    /// Returns the Index into the Compilation Unit list of the owning Name
+    /// Index or None if this Accelerator Entry does not have an associated
+    /// Compilation Unit. It is up to the user to verify that the returned Index
+    /// is valid in the owning NameIndex (or use getCUOffset(), which will
+    /// handle that check itself). Note that entries in NameIndexes which index
+    /// just a single Compilation Unit are implicitly associated with that unit,
+    /// so this function will return 0 even without an explicit
+    /// DW_IDX_compile_unit attribute.
+    Optional<uint64_t> getCUIndex() const;
+
+  public:
+    Optional<uint64_t> getCUOffset() const override;
+    Optional<uint64_t> getDIESectionOffset() const override;
+    Optional<dwarf::Tag> getTag() const override { return tag(); }
+
+    /// .debug_names-specific getter, which always succeeds (DWARF v5 index
+    /// entries always have a tag).
+    dwarf::Tag tag() const { return Abbr->Tag; }
+
+    /// Returns the Offset of the DIE within the containing CU or TU.
+    Optional<uint64_t> getDIEUnitOffset() const;
+
+    /// Return the Abbreviation that can be used to interpret the raw values of
+    /// this Accelerator Entry.
+    const Abbrev &getAbbrev() const { return *Abbr; }
+
+    /// Returns the value of the Index Attribute in this Accelerator Entry, if
+    /// the Entry contains such Attribute.
+    Optional<DWARFFormValue> lookup(dwarf::Index Index) const;
+
+    void dump(ScopedPrinter &W) const;
+
+    friend class NameIndex;
+    friend class ValueIterator;
+  };
+
+private:
+  /// Error returned by NameIndex::getEntry to report it has reached the end of
+  /// the entry list.
+  class SentinelError : public ErrorInfo<SentinelError> {
+  public:
+    static char ID;
+
+    void log(raw_ostream &OS) const override { OS << "Sentinel"; }
+    std::error_code convertToErrorCode() const override;
+  };
+
+  /// DenseMapInfo for struct Abbrev.
+  struct AbbrevMapInfo {
+    static Abbrev getEmptyKey();
+    static Abbrev getTombstoneKey();
+    static unsigned getHashValue(uint32_t Code) {
+      return DenseMapInfo<uint32_t>::getHashValue(Code);
+    }
+    static unsigned getHashValue(const Abbrev &Abbr) {
+      return getHashValue(Abbr.Code);
+    }
+    static bool isEqual(uint32_t LHS, const Abbrev &RHS) {
+      return LHS == RHS.Code;
+    }
+    static bool isEqual(const Abbrev &LHS, const Abbrev &RHS) {
+      return LHS.Code == RHS.Code;
+    }
+  };
+
+public:
+  /// A single entry in the Name Table (Dwarf 5 sect. 6.1.1.4.6) of the Name
+  /// Index.
+  struct NameTableEntry {
+    uint32_t StringOffset; ///< Offset of the name of the described entities.
+    uint32_t EntryOffset;  ///< Offset of the first Entry in the list.
+  };
+
+  /// Represents a single accelerator table within the Dwarf 5 .debug_names
+  /// section.
+  class NameIndex {
+    DenseSet<Abbrev, AbbrevMapInfo> Abbrevs;
+    struct Header Hdr;
+    const DWARFDebugNames &Section;
+
+    // Base of the whole unit and of various important tables, as offsets from
+    // the start of the section.
+    uint32_t Base;
+    uint32_t CUsBase;
+    uint32_t BucketsBase;
+    uint32_t HashesBase;
+    uint32_t StringOffsetsBase;
+    uint32_t EntryOffsetsBase;
+    uint32_t EntriesBase;
+
+    Expected<Entry> getEntry(uint32_t *Offset) const;
+
+    void dumpCUs(ScopedPrinter &W) const;
+    void dumpLocalTUs(ScopedPrinter &W) const;
+    void dumpForeignTUs(ScopedPrinter &W) const;
+    void dumpAbbreviations(ScopedPrinter &W) const;
+    bool dumpEntry(ScopedPrinter &W, uint32_t *Offset) const;
+    void dumpName(ScopedPrinter &W, uint32_t Index,
+                  Optional<uint32_t> Hash) const;
+    void dumpBucket(ScopedPrinter &W, uint32_t Bucket) const;
+
+    Expected<AttributeEncoding> extractAttributeEncoding(uint32_t *Offset);
+
+    Expected<std::vector<AttributeEncoding>>
+    extractAttributeEncodings(uint32_t *Offset);
+
+    Expected<Abbrev> extractAbbrev(uint32_t *Offset);
+
+  public:
+    NameIndex(const DWARFDebugNames &Section, uint32_t Base)
+        : Section(Section), Base(Base) {}
+
+    /// Reads offset of compilation unit CU. CU is 0-based.
+    uint32_t getCUOffset(uint32_t CU) const;
+    uint32_t getCUCount() const { return Hdr.CompUnitCount; }
+
+    /// Reads offset of local type unit TU, TU is 0-based.
+    uint32_t getLocalTUOffset(uint32_t TU) const;
+    uint32_t getLocalTUCount() const { return Hdr.LocalTypeUnitCount; }
+
+    /// Reads signature of foreign type unit TU. TU is 0-based.
+    uint64_t getForeignTUSignature(uint32_t TU) const;
+    uint32_t getForeignTUCount() const { return Hdr.ForeignTypeUnitCount; }
+
+    /// Reads an entry in the Bucket Array for the given Bucket. The returned
+    /// value is a (1-based) index into the Names, StringOffsets and
+    /// EntryOffsets arrays. The input Bucket index is 0-based.
+    uint32_t getBucketArrayEntry(uint32_t Bucket) const;
+    uint32_t getBucketCount() const { return Hdr.BucketCount; }
+
+    /// Reads an entry in the Hash Array for the given Index. The input Index
+    /// is 1-based.
+    uint32_t getHashArrayEntry(uint32_t Index) const;
+
+    /// Reads an entry in the Name Table for the given Index. The Name Table
+    /// consists of two arrays -- String Offsets and Entry Offsets. The returned
+    /// offsets are relative to the starts of respective sections. Input Index
+    /// is 1-based.
+    NameTableEntry getNameTableEntry(uint32_t Index) const;
+
+    uint32_t getNameCount() const { return Hdr.NameCount; }
+
+    const DenseSet<Abbrev, AbbrevMapInfo> &getAbbrevs() const {
+      return Abbrevs;
+    }
+
+    llvm::Error extract();
+    uint32_t getUnitOffset() const { return Base; }
+    uint32_t getNextUnitOffset() const { return Base + 4 + Hdr.UnitLength; }
+    void dump(ScopedPrinter &W) const;
+
+    friend class DWARFDebugNames;
+  };
+
+  class ValueIterator : public std::iterator<std::input_iterator_tag, Entry> {
+
+    /// The Name Index we are currently iterating through. The implementation
+    /// relies on the fact that this can also be used as an iterator into the
+    /// "NameIndices" vector in the Accelerator section.
+    const NameIndex *CurrentIndex = nullptr;
+
+    Optional<Entry> CurrentEntry;
+    unsigned DataOffset = 0; ///< Offset into the section.
+    std::string Key;         ///< The Key we are searching for.
+    Optional<uint32_t> Hash; ///< Hash of Key, if it has been computed.
+
+    bool getEntryAtCurrentOffset();
+    Optional<uint32_t> findEntryOffsetInCurrentIndex();
+    bool findInCurrentIndex();
+    void searchFromStartOfCurrentIndex();
+    void next();
+
+    /// Set the iterator to the "end" state.
+    void setEnd() { *this = ValueIterator(); }
+
+  public:
+    /// Create a "begin" iterator for looping over all entries in the
+    /// accelerator table matching Key. The iterator will run through all Name
+    /// Indexes in the section in sequence.
+    ValueIterator(const DWARFDebugNames &AccelTable, StringRef Key);
+
+    /// End marker.
+    ValueIterator() = default;
+
+    const Entry &operator*() const { return *CurrentEntry; }
+    ValueIterator &operator++() {
+      next();
+      return *this;
+    }
+    ValueIterator operator++(int) {
+      ValueIterator I = *this;
+      next();
+      return I;
+    }
+
+    friend bool operator==(const ValueIterator &A, const ValueIterator &B) {
+      return A.CurrentIndex == B.CurrentIndex && A.DataOffset == B.DataOffset;
+    }
+    friend bool operator!=(const ValueIterator &A, const ValueIterator &B) {
+      return !(A == B);
+    }
+  };
+
+private:
+  SmallVector<NameIndex, 0> NameIndices;
+
+public:
+  DWARFDebugNames(const DWARFDataExtractor &AccelSection,
+                  DataExtractor StringSection)
+      : DWARFAcceleratorTable(AccelSection, StringSection) {}
+
+  llvm::Error extract() override;
+  void dump(raw_ostream &OS) const override;
+
+  /// Look up all entries in the accelerator table matching \c Key.
+  iterator_range<ValueIterator> equal_range(StringRef Key) const;
+
+  using const_iterator = SmallVector<NameIndex, 0>::const_iterator;
+  const_iterator begin() const { return NameIndices.begin(); }
+  const_iterator end() const { return NameIndices.end(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFACCELERATORTABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAddressRange.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAddressRange.h
new file mode 100644
index 0000000..5a7df5c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAddressRange.h
@@ -0,0 +1,68 @@
+//===- DWARFAddressRange.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFADDRESSRANGE_H
+#define LLVM_DEBUGINFO_DWARF_DWARFADDRESSRANGE_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include <cstdint>
+#include <tuple>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+struct DWARFAddressRange {
+  uint64_t LowPC;
+  uint64_t HighPC;
+  uint64_t SectionIndex;
+
+  DWARFAddressRange() = default;
+
+  /// Used for unit testing.
+  DWARFAddressRange(uint64_t LowPC, uint64_t HighPC, uint64_t SectionIndex = 0)
+      : LowPC(LowPC), HighPC(HighPC), SectionIndex(SectionIndex) {}
+
+  /// Returns true if LowPC is smaller or equal to HighPC. This accounts for
+  /// dead-stripped ranges.
+  bool valid() const { return LowPC <= HighPC; }
+
+  /// Returns true if [LowPC, HighPC) intersects with [RHS.LowPC, RHS.HighPC).
+  bool intersects(const DWARFAddressRange &RHS) const {
+    assert(valid() && RHS.valid());
+    // Empty ranges can't intersect.
+    if (LowPC == HighPC || RHS.LowPC == RHS.HighPC)
+      return false;
+    return LowPC < RHS.HighPC && RHS.LowPC < HighPC;
+  }
+
+  /// Returns true if [LowPC, HighPC) fully contains [RHS.LowPC, RHS.HighPC).
+  bool contains(const DWARFAddressRange &RHS) const {
+    assert(valid() && RHS.valid());
+    return LowPC <= RHS.LowPC && RHS.HighPC <= HighPC;
+  }
+
+  void dump(raw_ostream &OS, uint32_t AddressSize,
+            DIDumpOptions DumpOpts = {}) const;
+};
+
+static inline bool operator<(const DWARFAddressRange &LHS,
+                             const DWARFAddressRange &RHS) {
+  return std::tie(LHS.LowPC, LHS.HighPC) < std::tie(RHS.LowPC, RHS.HighPC);
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const DWARFAddressRange &R);
+
+/// DWARFAddressRangesVector - represents a set of absolute address ranges.
+using DWARFAddressRangesVector = std::vector<DWARFAddressRange>;
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFADDRESSRANGE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAttribute.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAttribute.h
new file mode 100644
index 0000000..f0672bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFAttribute.h
@@ -0,0 +1,56 @@
+//===- DWARFAttribute.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFATTRIBUTE_H
+#define LLVM_DEBUGINFO_DWARFATTRIBUTE_H
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
+#include <cstdint>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// Encapsulates a DWARF attribute value and all of the data required to
+/// describe the attribute value.
+///
+/// This class is designed to be used by clients that want to iterate across all
+/// attributes in a DWARFDie.
+struct DWARFAttribute {
+  /// The debug info/types offset for this attribute.
+  uint32_t Offset = 0;
+  /// The debug info/types section byte size of the data for this attribute.
+  uint32_t ByteSize = 0;
+  /// The attribute enumeration of this attribute.
+  dwarf::Attribute Attr;
+  /// The form and value for this attribute.
+  DWARFFormValue Value;
+
+  DWARFAttribute(uint32_t O, dwarf::Attribute A = dwarf::Attribute(0),
+                 dwarf::Form F = dwarf::Form(0)) : Attr(A), Value(F) {}
+
+  bool isValid() const {
+    return Offset != 0 && Attr != dwarf::Attribute(0);
+  }
+
+  explicit operator bool() const {
+    return isValid();
+  }
+
+  void clear() {
+    Offset = 0;
+    ByteSize = 0;
+    Attr = dwarf::Attribute(0);
+    Value = DWARFFormValue();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFATTRIBUTE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
new file mode 100644
index 0000000..a18adf8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
@@ -0,0 +1,39 @@
+//===- DWARFCompileUnit.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFCOMPILEUNIT_H
+#define LLVM_DEBUGINFO_DWARFCOMPILEUNIT_H
+
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
+
+namespace llvm {
+
+class DWARFCompileUnit : public DWARFUnit {
+public:
+  DWARFCompileUnit(DWARFContext &Context, const DWARFSection &Section,
+                   const DWARFDebugAbbrev *DA, const DWARFSection *RS,
+                   StringRef SS, const DWARFSection &SOS,
+                   const DWARFSection *AOS, const DWARFSection &LS, bool LE,
+                   bool IsDWO, const DWARFUnitSectionBase &UnitSection,
+                   const DWARFUnitIndex::Entry *Entry)
+      : DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
+                  UnitSection, Entry) {}
+
+  // VTable anchor.
+  ~DWARFCompileUnit() override;
+
+  void dump(raw_ostream &OS, DIDumpOptions DumpOpts);
+
+  static const DWARFSectionKind Section = DW_SECT_INFO;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFCOMPILEUNIT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFContext.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFContext.h
new file mode 100644
index 0000000..e842cf2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -0,0 +1,327 @@
+//===- DWARFContext.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
+#define LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h"
+#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugAranges.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugFrame.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugLoc.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugMacro.h"
+#include "llvm/DebugInfo/DWARF/DWARFDie.h"
+#include "llvm/DebugInfo/DWARF/DWARFGdbIndex.h"
+#include "llvm/DebugInfo/DWARF/DWARFObject.h"
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/DebugInfo/DWARF/DWARFTypeUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Host.h"
+#include <cstdint>
+#include <deque>
+#include <map>
+#include <memory>
+
+namespace llvm {
+
+class MCRegisterInfo;
+class MemoryBuffer;
+class raw_ostream;
+
+/// Used as a return value for a error callback passed to DWARF context.
+/// Callback should return Halt if client application wants to stop
+/// object parsing, or should return Continue otherwise.
+enum class ErrorPolicy { Halt, Continue };
+
+/// DWARFContext
+/// This data structure is the top level entity that deals with dwarf debug
+/// information parsing. The actual data is supplied through DWARFObj.
+class DWARFContext : public DIContext {
+  DWARFUnitSection<DWARFCompileUnit> CUs;
+  std::deque<DWARFUnitSection<DWARFTypeUnit>> TUs;
+  std::unique_ptr<DWARFUnitIndex> CUIndex;
+  std::unique_ptr<DWARFGdbIndex> GdbIndex;
+  std::unique_ptr<DWARFUnitIndex> TUIndex;
+  std::unique_ptr<DWARFDebugAbbrev> Abbrev;
+  std::unique_ptr<DWARFDebugLoc> Loc;
+  std::unique_ptr<DWARFDebugAranges> Aranges;
+  std::unique_ptr<DWARFDebugLine> Line;
+  std::unique_ptr<DWARFDebugFrame> DebugFrame;
+  std::unique_ptr<DWARFDebugFrame> EHFrame;
+  std::unique_ptr<DWARFDebugMacro> Macro;
+  std::unique_ptr<DWARFDebugNames> Names;
+  std::unique_ptr<AppleAcceleratorTable> AppleNames;
+  std::unique_ptr<AppleAcceleratorTable> AppleTypes;
+  std::unique_ptr<AppleAcceleratorTable> AppleNamespaces;
+  std::unique_ptr<AppleAcceleratorTable> AppleObjC;
+
+  DWARFUnitSection<DWARFCompileUnit> DWOCUs;
+  std::deque<DWARFUnitSection<DWARFTypeUnit>> DWOTUs;
+  std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
+  std::unique_ptr<DWARFDebugLocDWO> LocDWO;
+
+  /// The maximum DWARF version of all units.
+  unsigned MaxVersion = 0;
+
+  struct DWOFile {
+    object::OwningBinary<object::ObjectFile> File;
+    std::unique_ptr<DWARFContext> Context;
+  };
+  StringMap<std::weak_ptr<DWOFile>> DWOFiles;
+  std::weak_ptr<DWOFile> DWP;
+  bool CheckedForDWP = false;
+  std::string DWPName;
+
+  std::unique_ptr<MCRegisterInfo> RegInfo;
+
+  /// Read compile units from the debug_info section (if necessary)
+  /// and store them in CUs.
+  void parseCompileUnits();
+
+  /// Read type units from the debug_types sections (if necessary)
+  /// and store them in TUs.
+  void parseTypeUnits();
+
+  /// Read compile units from the debug_info.dwo section (if necessary)
+  /// and store them in DWOCUs.
+  void parseDWOCompileUnits();
+
+  /// Read type units from the debug_types.dwo section (if necessary)
+  /// and store them in DWOTUs.
+  void parseDWOTypeUnits();
+
+protected:
+  std::unique_ptr<const DWARFObject> DObj;
+
+public:
+  DWARFContext(std::unique_ptr<const DWARFObject> DObj,
+               std::string DWPName = "");
+  ~DWARFContext();
+
+  DWARFContext(DWARFContext &) = delete;
+  DWARFContext &operator=(DWARFContext &) = delete;
+
+  const DWARFObject &getDWARFObj() const { return *DObj; }
+
+  static bool classof(const DIContext *DICtx) {
+    return DICtx->getKind() == CK_DWARF;
+  }
+
+  /// Dump a textual representation to \p OS. If any \p DumpOffsets are present,
+  /// dump only the record at the specified offset.
+  void dump(raw_ostream &OS, DIDumpOptions DumpOpts,
+            std::array<Optional<uint64_t>, DIDT_ID_Count> DumpOffsets);
+
+  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) override {
+    std::array<Optional<uint64_t>, DIDT_ID_Count> DumpOffsets;
+    dump(OS, DumpOpts, DumpOffsets);
+  }
+
+  bool verify(raw_ostream &OS, DIDumpOptions DumpOpts = {}) override;
+
+  using cu_iterator_range = DWARFUnitSection<DWARFCompileUnit>::iterator_range;
+  using tu_iterator_range = DWARFUnitSection<DWARFTypeUnit>::iterator_range;
+  using tu_section_iterator_range = iterator_range<decltype(TUs)::iterator>;
+
+  /// Get compile units in this context.
+  cu_iterator_range compile_units() {
+    parseCompileUnits();
+    return cu_iterator_range(CUs.begin(), CUs.end());
+  }
+
+  /// Get type units in this context.
+  tu_section_iterator_range type_unit_sections() {
+    parseTypeUnits();
+    return tu_section_iterator_range(TUs.begin(), TUs.end());
+  }
+
+  /// Get compile units in the DWO context.
+  cu_iterator_range dwo_compile_units() {
+    parseDWOCompileUnits();
+    return cu_iterator_range(DWOCUs.begin(), DWOCUs.end());
+  }
+
+  /// Get type units in the DWO context.
+  tu_section_iterator_range dwo_type_unit_sections() {
+    parseDWOTypeUnits();
+    return tu_section_iterator_range(DWOTUs.begin(), DWOTUs.end());
+  }
+
+  /// Get the number of compile units in this context.
+  unsigned getNumCompileUnits() {
+    parseCompileUnits();
+    return CUs.size();
+  }
+
+  /// Get the number of compile units in this context.
+  unsigned getNumTypeUnits() {
+    parseTypeUnits();
+    return TUs.size();
+  }
+
+  /// Get the number of compile units in the DWO context.
+  unsigned getNumDWOCompileUnits() {
+    parseDWOCompileUnits();
+    return DWOCUs.size();
+  }
+
+  /// Get the number of compile units in the DWO context.
+  unsigned getNumDWOTypeUnits() {
+    parseDWOTypeUnits();
+    return DWOTUs.size();
+  }
+
+  /// Get the compile unit at the specified index for this compile unit.
+  DWARFCompileUnit *getCompileUnitAtIndex(unsigned index) {
+    parseCompileUnits();
+    return CUs[index].get();
+  }
+
+  /// Get the compile unit at the specified index for the DWO compile units.
+  DWARFCompileUnit *getDWOCompileUnitAtIndex(unsigned index) {
+    parseDWOCompileUnits();
+    return DWOCUs[index].get();
+  }
+
+  DWARFCompileUnit *getDWOCompileUnitForHash(uint64_t Hash);
+
+  /// Get a DIE given an exact offset.
+  DWARFDie getDIEForOffset(uint32_t Offset);
+
+  unsigned getMaxVersion() const { return MaxVersion; }
+
+  void setMaxVersionIfGreater(unsigned Version) {
+    if (Version > MaxVersion)
+      MaxVersion = Version;
+  }
+
+  const DWARFUnitIndex &getCUIndex();
+  DWARFGdbIndex &getGdbIndex();
+  const DWARFUnitIndex &getTUIndex();
+
+  /// Get a pointer to the parsed DebugAbbrev object.
+  const DWARFDebugAbbrev *getDebugAbbrev();
+
+  /// Get a pointer to the parsed DebugLoc object.
+  const DWARFDebugLoc *getDebugLoc();
+
+  /// Get a pointer to the parsed dwo abbreviations object.
+  const DWARFDebugAbbrev *getDebugAbbrevDWO();
+
+  /// Get a pointer to the parsed DebugLoc object.
+  const DWARFDebugLocDWO *getDebugLocDWO();
+
+  /// Get a pointer to the parsed DebugAranges object.
+  const DWARFDebugAranges *getDebugAranges();
+
+  /// Get a pointer to the parsed frame information object.
+  const DWARFDebugFrame *getDebugFrame();
+
+  /// Get a pointer to the parsed eh frame information object.
+  const DWARFDebugFrame *getEHFrame();
+
+  /// Get a pointer to the parsed DebugMacro object.
+  const DWARFDebugMacro *getDebugMacro();
+
+  /// Get a reference to the parsed accelerator table object.
+  const DWARFDebugNames &getDebugNames();
+
+  /// Get a reference to the parsed accelerator table object.
+  const AppleAcceleratorTable &getAppleNames();
+
+  /// Get a reference to the parsed accelerator table object.
+  const AppleAcceleratorTable &getAppleTypes();
+
+  /// Get a reference to the parsed accelerator table object.
+  const AppleAcceleratorTable &getAppleNamespaces();
+
+  /// Get a reference to the parsed accelerator table object.
+  const AppleAcceleratorTable &getAppleObjC();
+
+  /// Get a pointer to a parsed line table corresponding to a compile unit.
+  const DWARFDebugLine::LineTable *getLineTableForUnit(DWARFUnit *cu);
+
+  DataExtractor getStringExtractor() const {
+    return DataExtractor(DObj->getStringSection(), false, 0);
+  }
+  DataExtractor getLineStringExtractor() const {
+    return DataExtractor(DObj->getLineStringSection(), false, 0);
+  }
+
+  /// Wraps the returned DIEs for a given address.
+  struct DIEsForAddress {
+    DWARFCompileUnit *CompileUnit = nullptr;
+    DWARFDie FunctionDIE;
+    DWARFDie BlockDIE;
+    explicit operator bool() const { return CompileUnit != nullptr; }
+  };
+
+  /// Get the compilation unit, the function DIE and lexical block DIE for the
+  /// given address where applicable.
+  DIEsForAddress getDIEsForAddress(uint64_t Address);
+
+  DILineInfo getLineInfoForAddress(uint64_t Address,
+      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+  DILineInfoTable getLineInfoForAddressRange(uint64_t Address, uint64_t Size,
+      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+  DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+
+  bool isLittleEndian() const { return DObj->isLittleEndian(); }
+  static bool isSupportedVersion(unsigned version) {
+    return version == 2 || version == 3 || version == 4 || version == 5;
+  }
+
+  std::shared_ptr<DWARFContext> getDWOContext(StringRef AbsolutePath);
+
+  const MCRegisterInfo *getRegisterInfo() const { return RegInfo.get(); }
+
+  /// Function used to handle default error reporting policy. Prints a error
+  /// message and returns Continue, so DWARF context ignores the error.
+  static ErrorPolicy defaultErrorHandler(Error E);
+  static std::unique_ptr<DWARFContext>
+  create(const object::ObjectFile &Obj, const LoadedObjectInfo *L = nullptr,
+         function_ref<ErrorPolicy(Error)> HandleError = defaultErrorHandler,
+         std::string DWPName = "");
+
+  static std::unique_ptr<DWARFContext>
+  create(const StringMap<std::unique_ptr<MemoryBuffer>> &Sections,
+         uint8_t AddrSize, bool isLittleEndian = sys::IsLittleEndianHost);
+
+  /// Loads register info for the architecture of the provided object file.
+  /// Improves readability of dumped DWARF expressions. Requires the caller to
+  /// have initialized the relevant target descriptions.
+  Error loadRegisterInfo(const object::ObjectFile &Obj);
+
+private:
+  /// Return the compile unit that includes an offset (relative to .debug_info).
+  DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
+
+  /// Return the compile unit which contains instruction with provided
+  /// address.
+  DWARFCompileUnit *getCompileUnitForAddress(uint64_t Address);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h
new file mode 100644
index 0000000..10e146b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h
@@ -0,0 +1,58 @@
+//===- DWARFDataExtractor.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDATAEXTRACTOR_H
+#define LLVM_DEBUGINFO_DWARFDATAEXTRACTOR_H
+
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/Support/DataExtractor.h"
+
+namespace llvm {
+class DWARFObject;
+
+/// A DataExtractor (typically for an in-memory copy of an object-file section)
+/// plus a relocation map for that section, if there is one.
+class DWARFDataExtractor : public DataExtractor {
+  const DWARFObject *Obj = nullptr;
+  const DWARFSection *Section = nullptr;
+
+public:
+  /// Constructor for the normal case of extracting data from a DWARF section.
+  /// The DWARFSection's lifetime must be at least as long as the extractor's.
+  DWARFDataExtractor(const DWARFObject &Obj, const DWARFSection &Section,
+                     bool IsLittleEndian, uint8_t AddressSize)
+      : DataExtractor(Section.Data, IsLittleEndian, AddressSize), Obj(&Obj),
+        Section(&Section) {}
+
+  /// Constructor for cases when there are no relocations.
+  DWARFDataExtractor(StringRef Data, bool IsLittleEndian, uint8_t AddressSize)
+    : DataExtractor(Data, IsLittleEndian, AddressSize) {}
+
+  /// Extracts a value and applies a relocation to the result if
+  /// one exists for the given offset.
+  uint64_t getRelocatedValue(uint32_t Size, uint32_t *Off,
+                             uint64_t *SectionIndex = nullptr) const;
+
+  /// Extracts an address-sized value and applies a relocation to the result if
+  /// one exists for the given offset.
+  uint64_t getRelocatedAddress(uint32_t *Off, uint64_t *SecIx = nullptr) const {
+    return getRelocatedValue(getAddressSize(), Off, SecIx);
+  }
+
+  /// Extracts a DWARF-encoded pointer in \p Offset using \p Encoding.
+  /// There is a DWARF encoding that uses a PC-relative adjustment.
+  /// For these values, \p AbsPosOffset is used to fix them, which should
+  /// reflect the absolute address of this pointer.
+  Optional<uint64_t> getEncodedPointer(uint32_t *Offset, uint8_t Encoding,
+                                       uint64_t AbsPosOffset = 0) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDATAEXTRACTOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
new file mode 100644
index 0000000..d277ec3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
@@ -0,0 +1,88 @@
+//===- DWARFDebugAbbrev.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGABBREV_H
+#define LLVM_DEBUGINFO_DWARFDEBUGABBREV_H
+
+#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFAbbreviationDeclarationSet {
+  uint32_t Offset;
+  /// Code of the first abbreviation, if all abbreviations in the set have
+  /// consecutive codes. UINT32_MAX otherwise.
+  uint32_t FirstAbbrCode;
+  std::vector<DWARFAbbreviationDeclaration> Decls;
+
+  using const_iterator =
+      std::vector<DWARFAbbreviationDeclaration>::const_iterator;
+
+public:
+  DWARFAbbreviationDeclarationSet();
+
+  uint32_t getOffset() const { return Offset; }
+  void dump(raw_ostream &OS) const;
+  bool extract(DataExtractor Data, uint32_t *OffsetPtr);
+
+  const DWARFAbbreviationDeclaration *
+  getAbbreviationDeclaration(uint32_t AbbrCode) const;
+
+  const_iterator begin() const {
+    return Decls.begin();
+  }
+
+  const_iterator end() const {
+    return Decls.end();
+  }
+
+private:
+  void clear();
+};
+
+class DWARFDebugAbbrev {
+  using DWARFAbbreviationDeclarationSetMap =
+      std::map<uint64_t, DWARFAbbreviationDeclarationSet>;
+
+  mutable DWARFAbbreviationDeclarationSetMap AbbrDeclSets;
+  mutable DWARFAbbreviationDeclarationSetMap::const_iterator PrevAbbrOffsetPos;
+  mutable Optional<DataExtractor> Data;
+
+public:
+  DWARFDebugAbbrev();
+
+  const DWARFAbbreviationDeclarationSet *
+  getAbbreviationDeclarationSet(uint64_t CUAbbrOffset) const;
+
+  void dump(raw_ostream &OS) const;
+  void parse() const;
+  void extract(DataExtractor Data);
+
+  DWARFAbbreviationDeclarationSetMap::const_iterator begin() const {
+    parse();
+    return AbbrDeclSets.begin();
+  }
+
+  DWARFAbbreviationDeclarationSetMap::const_iterator end() const {
+    return AbbrDeclSets.end();
+  }
+
+private:
+  void clear();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGABBREV_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
new file mode 100644
index 0000000..ab46fac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
@@ -0,0 +1,76 @@
+//===- DWARFDebugArangeSet.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGARANGESET_H
+#define LLVM_DEBUGINFO_DWARFDEBUGARANGESET_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugArangeSet {
+public:
+  struct Header {
+    /// The total length of the entries for that set, not including the length
+    /// field itself.
+    uint32_t Length;
+    /// The offset from the beginning of the .debug_info section of the
+    /// compilation unit entry referenced by the table.
+    uint32_t CuOffset;
+    /// The DWARF version number.
+    uint16_t Version;
+    /// The size in bytes of an address on the target architecture. For segmented
+    /// addressing, this is the size of the offset portion of the address.
+    uint8_t AddrSize;
+    /// The size in bytes of a segment descriptor on the target architecture.
+    /// If the target system uses a flat address space, this value is 0.
+    uint8_t SegSize;
+  };
+
+  struct Descriptor {
+    uint64_t Address;
+    uint64_t Length;
+
+    uint64_t getEndAddress() const { return Address + Length; }
+    void dump(raw_ostream &OS, uint32_t AddressSize) const;
+  };
+
+private:
+  using DescriptorColl = std::vector<Descriptor>;
+  using desc_iterator_range = iterator_range<DescriptorColl::const_iterator>;
+
+  uint32_t Offset;
+  Header HeaderData;
+  DescriptorColl ArangeDescriptors;
+
+public:
+  DWARFDebugArangeSet() { clear(); }
+
+  void clear();
+  bool extract(DataExtractor data, uint32_t *offset_ptr);
+  void dump(raw_ostream &OS) const;
+
+  uint32_t getCompileUnitDIEOffset() const { return HeaderData.CuOffset; }
+
+  const Header &getHeader() const { return HeaderData; }
+
+  desc_iterator_range descriptors() const {
+    return desc_iterator_range(ArangeDescriptors.begin(),
+                               ArangeDescriptors.end());
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGARANGESET_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h
new file mode 100644
index 0000000..ea71a50
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h
@@ -0,0 +1,89 @@
+//===- DWARFDebugAranges.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGARANGES_H
+#define LLVM_DEBUGINFO_DWARFDEBUGARANGES_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class DWARFContext;
+
+class DWARFDebugAranges {
+public:
+  void generate(DWARFContext *CTX);
+  uint32_t findAddress(uint64_t Address) const;
+
+private:
+  void clear();
+  void extract(DataExtractor DebugArangesData);
+
+  /// Call appendRange multiple times and then call construct.
+  void appendRange(uint32_t CUOffset, uint64_t LowPC, uint64_t HighPC);
+  void construct();
+
+  struct Range {
+    explicit Range(uint64_t LowPC = -1ULL, uint64_t HighPC = -1ULL,
+                   uint32_t CUOffset = -1U)
+      : LowPC(LowPC), Length(HighPC - LowPC), CUOffset(CUOffset) {}
+
+    void setHighPC(uint64_t HighPC) {
+      if (HighPC == -1ULL || HighPC <= LowPC)
+        Length = 0;
+      else
+        Length = HighPC - LowPC;
+    }
+
+    uint64_t HighPC() const {
+      if (Length)
+        return LowPC + Length;
+      return -1ULL;
+    }
+
+    bool containsAddress(uint64_t Address) const {
+      return LowPC <= Address && Address < HighPC();
+    }
+
+    bool operator<(const Range &other) const {
+      return LowPC < other.LowPC;
+    }
+
+    uint64_t LowPC; /// Start of address range.
+    uint32_t Length; /// End of address range (not including this address).
+    uint32_t CUOffset; /// Offset of the compile unit or die.
+  };
+
+  struct RangeEndpoint {
+    uint64_t Address;
+    uint32_t CUOffset;
+    bool IsRangeStart;
+
+    RangeEndpoint(uint64_t Address, uint32_t CUOffset, bool IsRangeStart)
+        : Address(Address), CUOffset(CUOffset), IsRangeStart(IsRangeStart) {}
+
+    bool operator<(const RangeEndpoint &Other) const {
+      return Address < Other.Address;
+    }
+  };
+
+  using RangeColl = std::vector<Range>;
+  using RangeCollIterator = RangeColl::const_iterator;
+
+  std::vector<RangeEndpoint> Endpoints;
+  RangeColl Aranges;
+  DenseSet<uint32_t> ParsedCUOffsets;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGARANGES_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
new file mode 100644
index 0000000..ff1c7fb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
@@ -0,0 +1,301 @@
+//===- DWARFDebugFrame.h - Parsing of .debug_frame --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGFRAME_H
+#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGFRAME_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/DebugInfo/DWARF/DWARFExpression.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace dwarf {
+
+/// Represent a sequence of Call Frame Information instructions that, when read
+/// in order, construct a table mapping PC to frame state. This can also be
+/// referred to as "CFI rules" in DWARF literature to avoid confusion with
+/// computer programs in the broader sense, and in this context each instruction
+/// would be a rule to establish the mapping. Refer to pg. 172 in the DWARF5
+/// manual, "6.4.1 Structure of Call Frame Information".
+class CFIProgram {
+public:
+  typedef SmallVector<uint64_t, 2> Operands;
+
+  /// An instruction consists of a DWARF CFI opcode and an optional sequence of
+  /// operands. If it refers to an expression, then this expression has its own
+  /// sequence of operations and operands handled separately by DWARFExpression.
+  struct Instruction {
+    Instruction(uint8_t Opcode) : Opcode(Opcode) {}
+
+    uint8_t Opcode;
+    Operands Ops;
+    // Associated DWARF expression in case this instruction refers to one
+    Optional<DWARFExpression> Expression;
+  };
+
+  using InstrList = std::vector<Instruction>;
+  using iterator = InstrList::iterator;
+  using const_iterator = InstrList::const_iterator;
+
+  iterator begin() { return Instructions.begin(); }
+  const_iterator begin() const { return Instructions.begin(); }
+  iterator end() { return Instructions.end(); }
+  const_iterator end() const { return Instructions.end(); }
+
+  unsigned size() const { return (unsigned)Instructions.size(); }
+  bool empty() const { return Instructions.empty(); }
+
+  CFIProgram(uint64_t CodeAlignmentFactor, int64_t DataAlignmentFactor)
+      : CodeAlignmentFactor(CodeAlignmentFactor),
+        DataAlignmentFactor(DataAlignmentFactor) {}
+
+  /// Parse and store a sequence of CFI instructions from Data,
+  /// starting at *Offset and ending at EndOffset. *Offset is updated
+  /// to EndOffset upon successful parsing, or indicates the offset
+  /// where a problem occurred in case an error is returned.
+  Error parse(DataExtractor Data, uint32_t *Offset, uint32_t EndOffset);
+
+  void dump(raw_ostream &OS, const MCRegisterInfo *MRI, bool IsEH,
+            unsigned IndentLevel = 1) const;
+
+private:
+  std::vector<Instruction> Instructions;
+  const uint64_t CodeAlignmentFactor;
+  const int64_t DataAlignmentFactor;
+
+  /// Convenience method to add a new instruction with the given opcode.
+  void addInstruction(uint8_t Opcode) {
+    Instructions.push_back(Instruction(Opcode));
+  }
+
+  /// Add a new single-operand instruction.
+  void addInstruction(uint8_t Opcode, uint64_t Operand1) {
+    Instructions.push_back(Instruction(Opcode));
+    Instructions.back().Ops.push_back(Operand1);
+  }
+
+  /// Add a new instruction that has two operands.
+  void addInstruction(uint8_t Opcode, uint64_t Operand1, uint64_t Operand2) {
+    Instructions.push_back(Instruction(Opcode));
+    Instructions.back().Ops.push_back(Operand1);
+    Instructions.back().Ops.push_back(Operand2);
+  }
+
+  /// Types of operands to CFI instructions
+  /// In DWARF, this type is implicitly tied to a CFI instruction opcode and
+  /// thus this type doesn't need to be explictly written to the file (this is
+  /// not a DWARF encoding). The relationship of instrs to operand types can
+  /// be obtained from getOperandTypes() and is only used to simplify
+  /// instruction printing.
+  enum OperandType {
+    OT_Unset,
+    OT_None,
+    OT_Address,
+    OT_Offset,
+    OT_FactoredCodeOffset,
+    OT_SignedFactDataOffset,
+    OT_UnsignedFactDataOffset,
+    OT_Register,
+    OT_Expression
+  };
+
+  /// Retrieve the array describing the types of operands according to the enum
+  /// above. This is indexed by opcode.
+  static ArrayRef<OperandType[2]> getOperandTypes();
+
+  /// Print \p Opcode's operand number \p OperandIdx which has value \p Operand.
+  void printOperand(raw_ostream &OS, const MCRegisterInfo *MRI, bool IsEH,
+                    const Instruction &Instr, unsigned OperandIdx,
+                    uint64_t Operand) const;
+};
+
+/// An entry in either debug_frame or eh_frame. This entry can be a CIE or an
+/// FDE.
+class FrameEntry {
+public:
+  enum FrameKind { FK_CIE, FK_FDE };
+
+  FrameEntry(FrameKind K, uint64_t Offset, uint64_t Length, uint64_t CodeAlign,
+             int64_t DataAlign)
+      : Kind(K), Offset(Offset), Length(Length), CFIs(CodeAlign, DataAlign) {}
+
+  virtual ~FrameEntry() {}
+
+  FrameKind getKind() const { return Kind; }
+  uint64_t getOffset() const { return Offset; }
+  uint64_t getLength() const { return Length; }
+  const CFIProgram &cfis() const { return CFIs; }
+  CFIProgram &cfis() { return CFIs; }
+
+  /// Dump the instructions in this CFI fragment
+  virtual void dump(raw_ostream &OS, const MCRegisterInfo *MRI,
+                    bool IsEH) const = 0;
+
+protected:
+  const FrameKind Kind;
+
+  /// Offset of this entry in the section.
+  const uint64_t Offset;
+
+  /// Entry length as specified in DWARF.
+  const uint64_t Length;
+
+  CFIProgram CFIs;
+};
+
+/// DWARF Common Information Entry (CIE)
+class CIE : public FrameEntry {
+public:
+  // CIEs (and FDEs) are simply container classes, so the only sensible way to
+  // create them is by providing the full parsed contents in the constructor.
+  CIE(uint64_t Offset, uint64_t Length, uint8_t Version,
+      SmallString<8> Augmentation, uint8_t AddressSize,
+      uint8_t SegmentDescriptorSize, uint64_t CodeAlignmentFactor,
+      int64_t DataAlignmentFactor, uint64_t ReturnAddressRegister,
+      SmallString<8> AugmentationData, uint32_t FDEPointerEncoding,
+      uint32_t LSDAPointerEncoding, Optional<uint64_t> Personality,
+      Optional<uint32_t> PersonalityEnc)
+      : FrameEntry(FK_CIE, Offset, Length, CodeAlignmentFactor,
+                   DataAlignmentFactor),
+        Version(Version), Augmentation(std::move(Augmentation)),
+        AddressSize(AddressSize), SegmentDescriptorSize(SegmentDescriptorSize),
+        CodeAlignmentFactor(CodeAlignmentFactor),
+        DataAlignmentFactor(DataAlignmentFactor),
+        ReturnAddressRegister(ReturnAddressRegister),
+        AugmentationData(std::move(AugmentationData)),
+        FDEPointerEncoding(FDEPointerEncoding),
+        LSDAPointerEncoding(LSDAPointerEncoding), Personality(Personality),
+        PersonalityEnc(PersonalityEnc) {}
+
+  static bool classof(const FrameEntry *FE) { return FE->getKind() == FK_CIE; }
+
+  StringRef getAugmentationString() const { return Augmentation; }
+  uint64_t getCodeAlignmentFactor() const { return CodeAlignmentFactor; }
+  int64_t getDataAlignmentFactor() const { return DataAlignmentFactor; }
+  uint8_t getVersion() const { return Version; }
+  uint64_t getReturnAddressRegister() const { return ReturnAddressRegister; }
+  Optional<uint64_t> getPersonalityAddress() const { return Personality; }
+  Optional<uint32_t> getPersonalityEncoding() const { return PersonalityEnc; }
+
+  uint32_t getFDEPointerEncoding() const { return FDEPointerEncoding; }
+
+  uint32_t getLSDAPointerEncoding() const { return LSDAPointerEncoding; }
+
+  void dump(raw_ostream &OS, const MCRegisterInfo *MRI,
+            bool IsEH) const override;
+
+private:
+  /// The following fields are defined in section 6.4.1 of the DWARF standard v4
+  const uint8_t Version;
+  const SmallString<8> Augmentation;
+  const uint8_t AddressSize;
+  const uint8_t SegmentDescriptorSize;
+  const uint64_t CodeAlignmentFactor;
+  const int64_t DataAlignmentFactor;
+  const uint64_t ReturnAddressRegister;
+
+  // The following are used when the CIE represents an EH frame entry.
+  const SmallString<8> AugmentationData;
+  const uint32_t FDEPointerEncoding;
+  const uint32_t LSDAPointerEncoding;
+  const Optional<uint64_t> Personality;
+  const Optional<uint32_t> PersonalityEnc;
+};
+
+/// DWARF Frame Description Entry (FDE)
+class FDE : public FrameEntry {
+public:
+  // Each FDE has a CIE it's "linked to". Our FDE contains is constructed with
+  // an offset to the CIE (provided by parsing the FDE header). The CIE itself
+  // is obtained lazily once it's actually required.
+  FDE(uint64_t Offset, uint64_t Length, int64_t LinkedCIEOffset,
+      uint64_t InitialLocation, uint64_t AddressRange, CIE *Cie,
+      Optional<uint64_t> LSDAAddress)
+      : FrameEntry(FK_FDE, Offset, Length,
+                   Cie ? Cie->getCodeAlignmentFactor() : 0,
+                   Cie ? Cie->getDataAlignmentFactor() : 0),
+        LinkedCIEOffset(LinkedCIEOffset), InitialLocation(InitialLocation),
+        AddressRange(AddressRange), LinkedCIE(Cie), LSDAAddress(LSDAAddress) {}
+
+  ~FDE() override = default;
+
+  const CIE *getLinkedCIE() const { return LinkedCIE; }
+  uint64_t getInitialLocation() const { return InitialLocation; }
+  uint64_t getAddressRange() const { return AddressRange; }
+  Optional<uint64_t> getLSDAAddress() const { return LSDAAddress; }
+
+  void dump(raw_ostream &OS, const MCRegisterInfo *MRI,
+            bool IsEH) const override;
+
+  static bool classof(const FrameEntry *FE) { return FE->getKind() == FK_FDE; }
+
+private:
+  /// The following fields are defined in section 6.4.1 of the DWARF standard v3
+  const uint64_t LinkedCIEOffset;
+  const uint64_t InitialLocation;
+  const uint64_t AddressRange;
+  const CIE *LinkedCIE;
+  const Optional<uint64_t> LSDAAddress;
+};
+
+} // end namespace dwarf
+
+/// A parsed .debug_frame or .eh_frame section
+class DWARFDebugFrame {
+  // True if this is parsing an eh_frame section.
+  const bool IsEH;
+  // Not zero for sane pointer values coming out of eh_frame
+  const uint64_t EHFrameAddress;
+
+  std::vector<std::unique_ptr<dwarf::FrameEntry>> Entries;
+  using iterator = pointee_iterator<decltype(Entries)::const_iterator>;
+
+  /// Return the entry at the given offset or nullptr.
+  dwarf::FrameEntry *getEntryAtOffset(uint64_t Offset) const;
+
+public:
+  // If IsEH is true, assume it is a .eh_frame section. Otherwise,
+  // it is a .debug_frame section. EHFrameAddress should be different
+  // than zero for correct parsing of .eh_frame addresses when they
+  // use a PC-relative encoding.
+  DWARFDebugFrame(bool IsEH = false, uint64_t EHFrameAddress = 0);
+  ~DWARFDebugFrame();
+
+  /// Dump the section data into the given stream.
+  void dump(raw_ostream &OS, const MCRegisterInfo *MRI,
+            Optional<uint64_t> Offset) const;
+
+  /// Parse the section from raw data. \p Data is assumed to contain the whole
+  /// frame section contents to be parsed.
+  void parse(DWARFDataExtractor Data);
+
+  /// Return whether the section has any entries.
+  bool empty() const { return Entries.empty(); }
+
+  /// DWARF Frame entries accessors
+  iterator begin() const { return Entries.begin(); }
+  iterator end() const { return Entries.end(); }
+  iterator_range<iterator> entries() const {
+    return iterator_range<iterator>(Entries.begin(), Entries.end());
+  }
+
+  uint64_t getEHFrameAddress() const { return EHFrameAddress; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGFRAME_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
new file mode 100644
index 0000000..88c8f57
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
@@ -0,0 +1,63 @@
+//===- DWARFDebugInfoEntry.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGINFOENTRY_H
+#define LLVM_DEBUGINFO_DWARFDEBUGINFOENTRY_H
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include <cstdint>
+
+namespace llvm {
+
+class DataExtractor;
+class DWARFUnit;
+
+/// DWARFDebugInfoEntry - A DIE with only the minimum required data.
+class DWARFDebugInfoEntry {
+  /// Offset within the .debug_info of the start of this entry.
+  uint32_t Offset = 0;
+
+  /// The integer depth of this DIE within the compile unit DIEs where the
+  /// compile/type unit DIE has a depth of zero.
+  uint32_t Depth = 0;
+
+  const DWARFAbbreviationDeclaration *AbbrevDecl = nullptr;
+
+public:
+  DWARFDebugInfoEntry() = default;
+
+  /// Extracts a debug info entry, which is a child of a given unit,
+  /// starting at a given offset. If DIE can't be extracted, returns false and
+  /// doesn't change OffsetPtr.
+  bool extractFast(const DWARFUnit &U, uint32_t *OffsetPtr);
+
+  /// High performance extraction should use this call.
+  bool extractFast(const DWARFUnit &U, uint32_t *OffsetPtr,
+                   const DWARFDataExtractor &DebugInfoData, uint32_t UEndOffset,
+                   uint32_t Depth);
+
+  uint32_t getOffset() const { return Offset; }
+  uint32_t getDepth() const { return Depth; }
+
+  dwarf::Tag getTag() const {
+    return AbbrevDecl ? AbbrevDecl->getTag() : dwarf::DW_TAG_null;
+  }
+
+  bool hasChildren() const { return AbbrevDecl && AbbrevDecl->hasChildren(); }
+
+  const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
+    return AbbrevDecl;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGINFOENTRY_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
new file mode 100644
index 0000000..c24364a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
@@ -0,0 +1,296 @@
+//===- DWARFDebugLine.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGLINE_H
+#define LLVM_DEBUGINFO_DWARFDEBUGLINE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/Support/MD5.h"
+#include <cstdint>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class DWARFUnit;
+class raw_ostream;
+
+class DWARFDebugLine {
+public:
+  struct FileNameEntry {
+    FileNameEntry() = default;
+
+    DWARFFormValue Name;
+    uint64_t DirIdx = 0;
+    uint64_t ModTime = 0;
+    uint64_t Length = 0;
+    MD5::MD5Result Checksum;
+    DWARFFormValue Source;
+  };
+
+  /// Tracks which optional content types are present in a DWARF file name
+  /// entry format.
+  struct ContentTypeTracker {
+    ContentTypeTracker() = default;
+
+    /// Whether filename entries provide a modification timestamp.
+    bool HasModTime = false;
+    /// Whether filename entries provide a file size.
+    bool HasLength = false;
+    /// For v5, whether filename entries provide an MD5 checksum.
+    bool HasMD5 = false;
+    /// For v5, whether filename entries provide source text.
+    bool HasSource = false;
+
+    /// Update tracked content types with \p ContentType.
+    void trackContentType(dwarf::LineNumberEntryFormat ContentType);
+  };
+
+  struct Prologue {
+    Prologue();
+
+    /// The size in bytes of the statement information for this compilation unit
+    /// (not including the total_length field itself).
+    uint64_t TotalLength;
+    /// Version, address size (starting in v5), and DWARF32/64 format; these
+    /// parameters affect interpretation of forms (used in the directory and
+    /// file tables starting with v5).
+    dwarf::FormParams FormParams;
+    /// The number of bytes following the prologue_length field to the beginning
+    /// of the first byte of the statement program itself.
+    uint64_t PrologueLength;
+    /// In v5, size in bytes of a segment selector.
+    uint8_t SegSelectorSize;
+    /// The size in bytes of the smallest target machine instruction. Statement
+    /// program opcodes that alter the address register first multiply their
+    /// operands by this value.
+    uint8_t MinInstLength;
+    /// The maximum number of individual operations that may be encoded in an
+    /// instruction.
+    uint8_t MaxOpsPerInst;
+    /// The initial value of theis_stmtregister.
+    uint8_t DefaultIsStmt;
+    /// This parameter affects the meaning of the special opcodes. See below.
+    int8_t LineBase;
+    /// This parameter affects the meaning of the special opcodes. See below.
+    uint8_t LineRange;
+    /// The number assigned to the first special opcode.
+    uint8_t OpcodeBase;
+    /// This tracks which optional file format content types are present.
+    ContentTypeTracker ContentTypes;
+    std::vector<uint8_t> StandardOpcodeLengths;
+    std::vector<DWARFFormValue> IncludeDirectories;
+    std::vector<FileNameEntry> FileNames;
+
+    const dwarf::FormParams getFormParams() const { return FormParams; }
+    uint16_t getVersion() const { return FormParams.Version; }
+    uint8_t getAddressSize() const { return FormParams.AddrSize; }
+    bool isDWARF64() const { return FormParams.Format == dwarf::DWARF64; }
+
+    uint32_t sizeofTotalLength() const { return isDWARF64() ? 12 : 4; }
+
+    uint32_t sizeofPrologueLength() const { return isDWARF64() ? 8 : 4; }
+
+    /// Length of the prologue in bytes.
+    uint32_t getLength() const {
+      return PrologueLength + sizeofTotalLength() + sizeof(getVersion()) +
+             sizeofPrologueLength();
+    }
+
+    /// Length of the line table data in bytes (not including the prologue).
+    uint32_t getStatementTableLength() const {
+      return TotalLength + sizeofTotalLength() - getLength();
+    }
+
+    int32_t getMaxLineIncrementForSpecialOpcode() const {
+      return LineBase + (int8_t)LineRange - 1;
+    }
+
+    void clear();
+    void dump(raw_ostream &OS, DIDumpOptions DumpOptions) const;
+    bool parse(const DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr,
+               const DWARFContext &Ctx, const DWARFUnit *U = nullptr);
+  };
+
+  /// Standard .debug_line state machine structure.
+  struct Row {
+    explicit Row(bool DefaultIsStmt = false);
+
+    /// Called after a row is appended to the matrix.
+    void postAppend();
+    void reset(bool DefaultIsStmt);
+    void dump(raw_ostream &OS) const;
+
+    static void dumpTableHeader(raw_ostream &OS);
+
+    static bool orderByAddress(const Row &LHS, const Row &RHS) {
+      return LHS.Address < RHS.Address;
+    }
+
+    /// The program-counter value corresponding to a machine instruction
+    /// generated by the compiler.
+    uint64_t Address;
+    /// An unsigned integer indicating a source line number. Lines are numbered
+    /// beginning at 1. The compiler may emit the value 0 in cases where an
+    /// instruction cannot be attributed to any source line.
+    uint32_t Line;
+    /// An unsigned integer indicating a column number within a source line.
+    /// Columns are numbered beginning at 1. The value 0 is reserved to indicate
+    /// that a statement begins at the 'left edge' of the line.
+    uint16_t Column;
+    /// An unsigned integer indicating the identity of the source file
+    /// corresponding to a machine instruction.
+    uint16_t File;
+    /// An unsigned integer representing the DWARF path discriminator value
+    /// for this location.
+    uint32_t Discriminator;
+    /// An unsigned integer whose value encodes the applicable instruction set
+    /// architecture for the current instruction.
+    uint8_t Isa;
+    /// A boolean indicating that the current instruction is the beginning of a
+    /// statement.
+    uint8_t IsStmt : 1,
+        /// A boolean indicating that the current instruction is the
+        /// beginning of a basic block.
+        BasicBlock : 1,
+        /// A boolean indicating that the current address is that of the
+        /// first byte after the end of a sequence of target machine
+        /// instructions.
+        EndSequence : 1,
+        /// A boolean indicating that the current address is one (of possibly
+        /// many) where execution should be suspended for an entry breakpoint
+        /// of a function.
+        PrologueEnd : 1,
+        /// A boolean indicating that the current address is one (of possibly
+        /// many) where execution should be suspended for an exit breakpoint
+        /// of a function.
+        EpilogueBegin : 1;
+  };
+
+  /// Represents a series of contiguous machine instructions. Line table for
+  /// each compilation unit may consist of multiple sequences, which are not
+  /// guaranteed to be in the order of ascending instruction address.
+  struct Sequence {
+    Sequence();
+
+    /// Sequence describes instructions at address range [LowPC, HighPC)
+    /// and is described by line table rows [FirstRowIndex, LastRowIndex).
+    uint64_t LowPC;
+    uint64_t HighPC;
+    unsigned FirstRowIndex;
+    unsigned LastRowIndex;
+    bool Empty;
+
+    void reset();
+
+    static bool orderByLowPC(const Sequence &LHS, const Sequence &RHS) {
+      return LHS.LowPC < RHS.LowPC;
+    }
+
+    bool isValid() const {
+      return !Empty && (LowPC < HighPC) && (FirstRowIndex < LastRowIndex);
+    }
+
+    bool containsPC(uint64_t PC) const { return (LowPC <= PC && PC < HighPC); }
+  };
+
+  struct LineTable {
+    LineTable();
+
+    /// Represents an invalid row
+    const uint32_t UnknownRowIndex = UINT32_MAX;
+
+    void appendRow(const DWARFDebugLine::Row &R) { Rows.push_back(R); }
+
+    void appendSequence(const DWARFDebugLine::Sequence &S) {
+      Sequences.push_back(S);
+    }
+
+    /// Returns the index of the row with file/line info for a given address,
+    /// or UnknownRowIndex if there is no such row.
+    uint32_t lookupAddress(uint64_t Address) const;
+
+    bool lookupAddressRange(uint64_t Address, uint64_t Size,
+                            std::vector<uint32_t> &Result) const;
+
+    bool hasFileAtIndex(uint64_t FileIndex) const;
+
+    /// Extracts filename by its index in filename table in prologue.
+    /// Returns true on success.
+    bool getFileNameByIndex(uint64_t FileIndex, const char *CompDir,
+                            DILineInfoSpecifier::FileLineInfoKind Kind,
+                            std::string &Result) const;
+
+    /// Fills the Result argument with the file and line information
+    /// corresponding to Address. Returns true on success.
+    bool getFileLineInfoForAddress(uint64_t Address, const char *CompDir,
+                                   DILineInfoSpecifier::FileLineInfoKind Kind,
+                                   DILineInfo &Result) const;
+
+    void dump(raw_ostream &OS, DIDumpOptions DumpOptions) const;
+    void clear();
+
+    /// Parse prologue and all rows.
+    bool parse(DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr,
+               const DWARFContext &Ctx, const DWARFUnit *U,
+               raw_ostream *OS = nullptr);
+
+    using RowVector = std::vector<Row>;
+    using RowIter = RowVector::const_iterator;
+    using SequenceVector = std::vector<Sequence>;
+    using SequenceIter = SequenceVector::const_iterator;
+
+    struct Prologue Prologue;
+    RowVector Rows;
+    SequenceVector Sequences;
+
+  private:
+    uint32_t findRowInSeq(const DWARFDebugLine::Sequence &Seq,
+                          uint64_t Address) const;
+    Optional<StringRef> getSourceByIndex(uint64_t FileIndex,
+                                         DILineInfoSpecifier::FileLineInfoKind Kind) const;
+  };
+
+  const LineTable *getLineTable(uint32_t Offset) const;
+  const LineTable *getOrParseLineTable(DWARFDataExtractor &DebugLineData,
+                                       uint32_t Offset, const DWARFContext &C,
+                                       const DWARFUnit *U);
+
+private:
+  struct ParsingState {
+    ParsingState(struct LineTable *LT);
+
+    void resetRowAndSequence();
+    void appendRowToMatrix(uint32_t Offset);
+
+    /// Line table we're currently parsing.
+    struct LineTable *LineTable;
+    /// The row number that starts at zero for the prologue, and increases for
+    /// each row added to the matrix.
+    unsigned RowNumber = 0;
+    struct Row Row;
+    struct Sequence Sequence;
+  };
+
+  using LineTableMapTy = std::map<uint32_t, LineTable>;
+  using LineTableIter = LineTableMapTy::iterator;
+  using LineTableConstIter = LineTableMapTy::const_iterator;
+
+  LineTableMapTy LineTableMap;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGLINE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
new file mode 100644
index 0000000..a6d319a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
@@ -0,0 +1,113 @@
+//===- DWARFDebugLoc.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H
+#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include <cstdint>
+
+namespace llvm {
+class DWARFUnit;
+class MCRegisterInfo;
+class raw_ostream;
+
+class DWARFDebugLoc {
+public:
+  /// A single location within a location list.
+  struct Entry {
+    /// The beginning address of the instruction range.
+    uint64_t Begin;
+    /// The ending address of the instruction range.
+    uint64_t End;
+    /// The location of the variable within the specified range.
+    SmallVector<char, 4> Loc;
+  };
+
+  /// A list of locations that contain one variable.
+  struct LocationList {
+    /// The beginning offset where this location list is stored in the debug_loc
+    /// section.
+    unsigned Offset;
+    /// All the locations in which the variable is stored.
+    SmallVector<Entry, 2> Entries;
+    /// Dump this list on OS.
+    void dump(raw_ostream &OS, bool IsLittleEndian, unsigned AddressSize,
+              const MCRegisterInfo *MRI, unsigned Indent) const;
+  };
+
+private:
+  using LocationLists = SmallVector<LocationList, 4>;
+
+  /// A list of all the variables in the debug_loc section, each one describing
+  /// the locations in which the variable is stored.
+  LocationLists Locations;
+
+  unsigned AddressSize;
+
+  bool IsLittleEndian;
+
+public:
+  /// Print the location lists found within the debug_loc section.
+  void dump(raw_ostream &OS, const MCRegisterInfo *RegInfo,
+            Optional<uint64_t> Offset) const;
+
+  /// Parse the debug_loc section accessible via the 'data' parameter using the
+  /// address size also given in 'data' to interpret the address ranges.
+  void parse(const DWARFDataExtractor &data);
+
+  /// Return the location list at the given offset or nullptr.
+  LocationList const *getLocationListAtOffset(uint64_t Offset) const;
+
+  Optional<LocationList> parseOneLocationList(DWARFDataExtractor Data,
+                                              uint32_t *Offset);
+};
+
+class DWARFDebugLocDWO {
+public:
+  struct Entry {
+    uint64_t Start;
+    uint32_t Length;
+    SmallVector<char, 4> Loc;
+  };
+
+  struct LocationList {
+    unsigned Offset;
+    SmallVector<Entry, 2> Entries;
+    void dump(raw_ostream &OS, bool IsLittleEndian, unsigned AddressSize,
+              const MCRegisterInfo *RegInfo, unsigned Indent) const;
+  };
+
+private:
+  using LocationLists = SmallVector<LocationList, 4>;
+
+  LocationLists Locations;
+
+  unsigned AddressSize;
+
+  bool IsLittleEndian;
+
+public:
+  void parse(DataExtractor data);
+  void dump(raw_ostream &OS, const MCRegisterInfo *RegInfo,
+            Optional<uint64_t> Offset) const;
+
+  /// Return the location list at the given offset or nullptr.
+  LocationList const *getLocationListAtOffset(uint64_t Offset) const;
+
+  static Optional<LocationList> parseOneLocationList(DataExtractor Data,
+                                                     uint32_t *Offset);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
new file mode 100644
index 0000000..bfe2fc3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
@@ -0,0 +1,63 @@
+//===- DWARFDebugMacro.h ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
+#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugMacro {
+  /// A single macro entry within a macro list.
+  struct Entry {
+    /// The type of the macro entry.
+    uint32_t Type;
+    union {
+      /// The source line where the macro is defined.
+      uint64_t Line;
+      /// Vendor extension constant value.
+      uint64_t ExtConstant;
+    };
+
+    union {
+      /// The string (name, value) of the macro entry.
+      const char *MacroStr;
+      // An unsigned integer indicating the identity of the source file.
+      uint64_t File;
+      /// Vendor extension string.
+      const char *ExtStr;
+    };
+  };
+
+  using MacroList = SmallVector<Entry, 4>;
+
+  /// A list of all the macro entries in the debug_macinfo section.
+  MacroList Macros;
+
+public:
+  DWARFDebugMacro() = default;
+
+  /// Print the macro list found within the debug_macinfo section.
+  void dump(raw_ostream &OS) const;
+
+  /// Parse the debug_macinfo section accessible via the 'data' parameter.
+  void parse(DataExtractor data);
+
+  /// Return whether the section has any entries.
+  bool empty() const { return Macros.empty(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
new file mode 100644
index 0000000..761871d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
@@ -0,0 +1,79 @@
+//===- DWARFDebugPubTable.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGPUBTABLE_H
+#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGPUBTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// Represents structure for holding and parsing .debug_pub* tables.
+class DWARFDebugPubTable {
+public:
+  struct Entry {
+    /// Section offset from the beginning of the compilation unit.
+    uint32_t SecOffset;
+
+    /// An entry of the various gnu_pub* debug sections.
+    dwarf::PubIndexEntryDescriptor Descriptor;
+
+    /// The name of the object as given by the DW_AT_name attribute of the
+    /// referenced DIE.
+    const char *Name;
+  };
+
+  /// Each table consists of sets of variable length entries. Each set describes
+  /// the names of global objects and functions, or global types, respectively,
+  /// whose definitions are represented by debugging information entries owned
+  /// by a single compilation unit.
+  struct Set {
+    /// The total length of the entries for that set, not including the length
+    /// field itself.
+    uint32_t Length;
+
+    /// This number is specific to the name lookup table and is independent of
+    /// the DWARF version number.
+    uint16_t Version;
+
+    /// The offset from the beginning of the .debug_info section of the
+    /// compilation unit header referenced by the set.
+    uint32_t Offset;
+
+    /// The size in bytes of the contents of the .debug_info section generated
+    /// to represent that compilation unit.
+    uint32_t Size;
+
+    std::vector<Entry> Entries;
+  };
+
+private:
+  std::vector<Set> Sets;
+
+  /// gnu styled tables contains additional information.
+  /// This flag determines whether or not section we parse is debug_gnu* table.
+  bool GnuStyle;
+
+public:
+  DWARFDebugPubTable(StringRef Data, bool LittleEndian, bool GnuStyle);
+
+  void dump(raw_ostream &OS) const;
+
+  ArrayRef<Set> getData() { return Sets; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGPUBTABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
new file mode 100644
index 0000000..38b7f22
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
@@ -0,0 +1,86 @@
+//===- DWARFDebugRangeList.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H
+#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H
+
+#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+struct BaseAddress;
+class raw_ostream;
+
+class DWARFDebugRangeList {
+public:
+  struct RangeListEntry {
+    /// A beginning address offset. This address offset has the size of an
+    /// address and is relative to the applicable base address of the
+    /// compilation unit referencing this range list. It marks the beginning
+    /// of an address range.
+    uint64_t StartAddress;
+    /// An ending address offset. This address offset again has the size of
+    /// an address and is relative to the applicable base address of the
+    /// compilation unit referencing this range list. It marks the first
+    /// address past the end of the address range. The ending address must
+    /// be greater than or equal to the beginning address.
+    uint64_t EndAddress;
+    /// A section index this range belongs to.
+    uint64_t SectionIndex;
+
+    /// The end of any given range list is marked by an end of list entry,
+    /// which consists of a 0 for the beginning address offset
+    /// and a 0 for the ending address offset.
+    bool isEndOfListEntry() const {
+      return (StartAddress == 0) && (EndAddress == 0);
+    }
+
+    /// A base address selection entry consists of:
+    /// 1. The value of the largest representable address offset
+    /// (for example, 0xffffffff when the size of an address is 32 bits).
+    /// 2. An address, which defines the appropriate base address for
+    /// use in interpreting the beginning and ending address offsets of
+    /// subsequent entries of the location list.
+    bool isBaseAddressSelectionEntry(uint8_t AddressSize) const {
+      assert(AddressSize == 4 || AddressSize == 8);
+      if (AddressSize == 4)
+        return StartAddress == -1U;
+      else
+        return StartAddress == -1ULL;
+    }
+  };
+
+private:
+  /// Offset in .debug_ranges section.
+  uint32_t Offset;
+  uint8_t AddressSize;
+  std::vector<RangeListEntry> Entries;
+
+public:
+  DWARFDebugRangeList() { clear(); }
+
+  void clear();
+  void dump(raw_ostream &OS) const;
+  bool extract(const DWARFDataExtractor &data, uint32_t *offset_ptr);
+  const std::vector<RangeListEntry> &getEntries() { return Entries; }
+
+  /// getAbsoluteRanges - Returns absolute address ranges defined by this range
+  /// list. Has to be passed base address of the compile unit referencing this
+  /// range list.
+  DWARFAddressRangesVector
+  getAbsoluteRanges(llvm::Optional<BaseAddress> BaseAddr) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h
new file mode 100644
index 0000000..7579def
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h
@@ -0,0 +1,81 @@
+//===- DWARFDebugRnglists.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGRNGLISTS_H
+#define LLVM_DEBUGINFO_DWARFDEBUGRNGLISTS_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class Error;
+class raw_ostream;
+
+class DWARFDebugRnglists {
+private:
+  struct Header {
+    /// The total length of the entries for this table, not including the length
+    /// field itself.
+    uint32_t Length = 0;
+    /// The DWARF version number.
+    uint16_t Version;
+    /// The size in bytes of an address on the target architecture. For
+    /// segmented addressing, this is the size of the offset portion of the
+    /// address.
+    uint8_t AddrSize;
+    /// The size in bytes of a segment selector on the target architecture.
+    /// If the target system uses a flat address space, this value is 0.
+    uint8_t SegSize;
+    /// The number of offsets that follow the header before the range lists.
+    uint32_t OffsetEntryCount;
+  };
+
+public:
+  struct RangeListEntry {
+    /// The offset at which the entry is located in the section.
+    const uint32_t Offset;
+    /// The DWARF encoding (DW_RLE_*).
+    const uint8_t EntryKind;
+    /// The values making up the range list entry. Most represent a range with
+    /// a start and end address or a start address and a length. Others are
+    /// single value base addresses or end-of-list with no values. The unneeded
+    /// values are semantically undefined, but initialized to 0.
+    const uint64_t Value0;
+    const uint64_t Value1;
+  };
+
+  using DWARFRangeList = std::vector<RangeListEntry>;
+
+private:
+  uint32_t HeaderOffset;
+  Header HeaderData;
+  std::vector<uint32_t> Offsets;
+  std::vector<DWARFRangeList> Ranges;
+  // The length of the longest encoding string we encountered during parsing.
+  uint8_t MaxEncodingStringLength = 0;
+
+public:
+  void clear();
+  Error extract(DWARFDataExtractor Data, uint32_t *OffsetPtr);
+  uint32_t getHeaderOffset() const { return HeaderOffset; }
+  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) const;
+
+  /// Returns the length of this table, including the length field, or 0 if the
+  /// length has not been determined (e.g. because the table has not yet been
+  /// parsed, or there was a problem in parsing).
+  uint32_t length() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGRNGLISTS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDie.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDie.h
new file mode 100644
index 0000000..39a3dd3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFDie.h
@@ -0,0 +1,353 @@
+//===- DWARFDie.h -----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDIE_H
+#define LLVM_DEBUGINFO_DWARFDIE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
+#include "llvm/DebugInfo/DWARF/DWARFAttribute.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+
+namespace llvm {
+
+class DWARFUnit;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+/// Utility class that carries the DWARF compile/type unit and the debug info
+/// entry in an object.
+///
+/// When accessing information from a debug info entry we always need to DWARF
+/// compile/type unit in order to extract the info correctly as some information
+/// is relative to the compile/type unit. Prior to this class the DWARFUnit and
+/// the DWARFDebugInfoEntry was passed around separately and there was the
+/// possibility for error if the wrong DWARFUnit was used to extract a unit
+/// relative offset. This class helps to ensure that this doesn't happen and
+/// also simplifies the attribute extraction calls by not having to specify the
+/// DWARFUnit for each call.
+class DWARFDie {
+  DWARFUnit *U = nullptr;
+  const DWARFDebugInfoEntry *Die = nullptr;
+
+public:
+  DWARFDie() = default;
+  DWARFDie(DWARFUnit *Unit, const DWARFDebugInfoEntry * D) : U(Unit), Die(D) {}
+
+  bool isValid() const { return U && Die; }
+  explicit operator bool() const { return isValid(); }
+  const DWARFDebugInfoEntry *getDebugInfoEntry() const { return Die; }
+  DWARFUnit *getDwarfUnit() const { return U; }
+
+  /// Get the abbreviation declaration for this DIE.
+  ///
+  /// \returns the abbreviation declaration or NULL for null tags.
+  const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
+    assert(isValid() && "must check validity prior to calling");
+    return Die->getAbbreviationDeclarationPtr();
+  }
+
+  /// Get the absolute offset into the debug info or types section.
+  ///
+  /// \returns the DIE offset or -1U if invalid.
+  uint32_t getOffset() const {
+    assert(isValid() && "must check validity prior to calling");
+    return Die->getOffset();
+  }
+
+  dwarf::Tag getTag() const {
+    auto AbbrevDecl = getAbbreviationDeclarationPtr();
+    if (AbbrevDecl)
+      return AbbrevDecl->getTag();
+    return dwarf::DW_TAG_null;
+  }
+
+  bool hasChildren() const {
+    assert(isValid() && "must check validity prior to calling");
+    return Die->hasChildren();
+  }
+
+  /// Returns true for a valid DIE that terminates a sibling chain.
+  bool isNULL() const {
+    return getAbbreviationDeclarationPtr() == nullptr;
+  }
+
+  /// Returns true if DIE represents a subprogram (not inlined).
+  bool isSubprogramDIE() const;
+
+  /// Returns true if DIE represents a subprogram or an inlined subroutine.
+  bool isSubroutineDIE() const;
+
+  /// Get the parent of this DIE object.
+  ///
+  /// \returns a valid DWARFDie instance if this object has a parent or an
+  /// invalid DWARFDie instance if it doesn't.
+  DWARFDie getParent() const;
+
+  /// Get the sibling of this DIE object.
+  ///
+  /// \returns a valid DWARFDie instance if this object has a sibling or an
+  /// invalid DWARFDie instance if it doesn't.
+  DWARFDie getSibling() const;
+
+  /// Get the first child of this DIE object.
+  ///
+  /// \returns a valid DWARFDie instance if this object has children or an
+  /// invalid DWARFDie instance if it doesn't.
+  DWARFDie getFirstChild() const;
+
+  /// Dump the DIE and all of its attributes to the supplied stream.
+  ///
+  /// \param OS the stream to use for output.
+  /// \param indent the number of characters to indent each line that is output.
+  void dump(raw_ostream &OS, unsigned indent = 0,
+            DIDumpOptions DumpOpts = DIDumpOptions()) const;
+
+
+  /// Convenience zero-argument overload for debugging.
+  LLVM_DUMP_METHOD void dump() const;
+
+  /// Extract the specified attribute from this DIE.
+  ///
+  /// Extract an attribute value from this DIE only. This call doesn't look
+  /// for the attribute value in any DW_AT_specification or
+  /// DW_AT_abstract_origin referenced DIEs.
+  ///
+  /// \param Attr the attribute to extract.
+  /// \returns an optional DWARFFormValue that will have the form value if the
+  /// attribute was successfully extracted.
+  Optional<DWARFFormValue> find(dwarf::Attribute Attr) const;
+
+  /// Extract the first value of any attribute in Attrs from this DIE.
+  ///
+  /// Extract the first attribute that matches from this DIE only. This call
+  /// doesn't look for the attribute value in any DW_AT_specification or
+  /// DW_AT_abstract_origin referenced DIEs. The attributes will be searched
+  /// linearly in the order they are specified within Attrs.
+  ///
+  /// \param Attrs an array of DWARF attribute to look for.
+  /// \returns an optional that has a valid DWARFFormValue for the first
+  /// matching attribute in Attrs, or None if none of the attributes in Attrs
+  /// exist in this DIE.
+  Optional<DWARFFormValue> find(ArrayRef<dwarf::Attribute> Attrs) const;
+
+  /// Extract the first value of any attribute in Attrs from this DIE and
+  /// recurse into any DW_AT_specification or DW_AT_abstract_origin referenced
+  /// DIEs.
+  ///
+  /// \param Attrs an array of DWARF attribute to look for.
+  /// \returns an optional that has a valid DWARFFormValue for the first
+  /// matching attribute in Attrs, or None if none of the attributes in Attrs
+  /// exist in this DIE or in any DW_AT_specification or DW_AT_abstract_origin
+  /// DIEs.
+  Optional<DWARFFormValue>
+  findRecursively(ArrayRef<dwarf::Attribute> Attrs) const;
+
+  /// Extract the specified attribute from this DIE as the referenced DIE.
+  ///
+  /// Regardless of the reference type, return the correct DWARFDie instance if
+  /// the attribute exists. The returned DWARFDie object might be from another
+  /// DWARFUnit, but that is all encapsulated in the new DWARFDie object.
+  ///
+  /// Extract an attribute value from this DIE only. This call doesn't look
+  /// for the attribute value in any DW_AT_specification or
+  /// DW_AT_abstract_origin referenced DIEs.
+  ///
+  /// \param Attr the attribute to extract.
+  /// \returns a valid DWARFDie instance if the attribute exists, or an invalid
+  /// DWARFDie object if it doesn't.
+  DWARFDie getAttributeValueAsReferencedDie(dwarf::Attribute Attr) const;
+
+  /// Extract the range base attribute from this DIE as absolute section offset.
+  ///
+  /// This is a utility function that checks for either the DW_AT_rnglists_base
+  /// or DW_AT_GNU_ranges_base attribute.
+  ///
+  /// \returns anm optional absolute section offset value for the attribute.
+  Optional<uint64_t> getRangesBaseAttribute() const;
+
+  /// Get the DW_AT_high_pc attribute value as an address.
+  ///
+  /// In DWARF version 4 and later the high PC can be encoded as an offset from
+  /// the DW_AT_low_pc. This function takes care of extracting the value as an
+  /// address or offset and adds it to the low PC if needed and returns the
+  /// value as an optional in case the DIE doesn't have a DW_AT_high_pc
+  /// attribute.
+  ///
+  /// \param LowPC the low PC that might be needed to calculate the high PC.
+  /// \returns an optional address value for the attribute.
+  Optional<uint64_t> getHighPC(uint64_t LowPC) const;
+
+  /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
+  /// Returns true if both attributes are present.
+  bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
+                       uint64_t &SectionIndex) const;
+
+  /// Get the address ranges for this DIE.
+  ///
+  /// Get the hi/low PC range if both attributes are available or exrtracts the
+  /// non-contiguous address ranges from the DW_AT_ranges attribute.
+  ///
+  /// Extracts the range information from this DIE only. This call doesn't look
+  /// for the range in any DW_AT_specification or DW_AT_abstract_origin DIEs.
+  ///
+  /// \returns a address range vector that might be empty if no address range
+  /// information is available.
+  DWARFAddressRangesVector getAddressRanges() const;
+
+  /// Get all address ranges for any DW_TAG_subprogram DIEs in this DIE or any
+  /// of its children.
+  ///
+  /// Get the hi/low PC range if both attributes are available or exrtracts the
+  /// non-contiguous address ranges from the DW_AT_ranges attribute for this DIE
+  /// and all children.
+  ///
+  /// \param Ranges the addres range vector to fill in.
+  void collectChildrenAddressRanges(DWARFAddressRangesVector &Ranges) const;
+
+  bool addressRangeContainsAddress(const uint64_t Address) const;
+
+  /// If a DIE represents a subprogram (or inlined subroutine), returns its
+  /// mangled name (or short name, if mangled is missing). This name may be
+  /// fetched from specification or abstract origin for this subprogram.
+  /// Returns null if no name is found.
+  const char *getSubroutineName(DINameKind Kind) const;
+
+  /// Return the DIE name resolving DW_AT_sepcification or DW_AT_abstract_origin
+  /// references if necessary. Returns null if no name is found.
+  const char *getName(DINameKind Kind) const;
+
+  /// Returns the declaration line (start line) for a DIE, assuming it specifies
+  /// a subprogram. This may be fetched from specification or abstract origin
+  /// for this subprogram by resolving DW_AT_sepcification or
+  /// DW_AT_abstract_origin references if necessary.
+  uint64_t getDeclLine() const;
+
+  /// Retrieves values of DW_AT_call_file, DW_AT_call_line and DW_AT_call_column
+  /// from DIE (or zeroes if they are missing). This function looks for
+  /// DW_AT_call attributes in this DIE only, it will not resolve the attribute
+  /// values in any DW_AT_specification or DW_AT_abstract_origin DIEs.
+  /// \param CallFile filled in with non-zero if successful, zero if there is no
+  /// DW_AT_call_file attribute in this DIE.
+  /// \param CallLine filled in with non-zero if successful, zero if there is no
+  /// DW_AT_call_line attribute in this DIE.
+  /// \param CallColumn filled in with non-zero if successful, zero if there is
+  /// no DW_AT_call_column attribute in this DIE.
+  /// \param CallDiscriminator filled in with non-zero if successful, zero if
+  /// there is no DW_AT_GNU_discriminator attribute in this DIE.
+  void getCallerFrame(uint32_t &CallFile, uint32_t &CallLine,
+                      uint32_t &CallColumn, uint32_t &CallDiscriminator) const;
+
+  class attribute_iterator;
+
+  /// Get an iterator range to all attributes in the current DIE only.
+  ///
+  /// \returns an iterator range for the attributes of the current DIE.
+  iterator_range<attribute_iterator> attributes() const;
+
+  class iterator;
+
+  iterator begin() const;
+  iterator end() const;
+  iterator_range<iterator> children() const;
+};
+
+class DWARFDie::attribute_iterator :
+    public iterator_facade_base<attribute_iterator, std::forward_iterator_tag,
+                                const DWARFAttribute> {
+  /// The DWARF DIE we are extracting attributes from.
+  DWARFDie Die;
+  /// The value vended to clients via the operator*() or operator->().
+  DWARFAttribute AttrValue;
+  /// The attribute index within the abbreviation declaration in Die.
+  uint32_t Index;
+
+  /// Update the attribute index and attempt to read the attribute value. If the
+  /// attribute is able to be read, update AttrValue and the Index member
+  /// variable. If the attribute value is not able to be read, an appropriate
+  /// error will be set if the Err member variable is non-NULL and the iterator
+  /// will be set to the end value so iteration stops.
+  void updateForIndex(const DWARFAbbreviationDeclaration &AbbrDecl, uint32_t I);
+
+public:
+  attribute_iterator() = delete;
+  explicit attribute_iterator(DWARFDie D, bool End);
+
+  attribute_iterator &operator++();
+  explicit operator bool() const { return AttrValue.isValid(); }
+  const DWARFAttribute &operator*() const { return AttrValue; }
+  bool operator==(const attribute_iterator &X) const { return Index == X.Index; }
+};
+
+inline bool operator==(const DWARFDie &LHS, const DWARFDie &RHS) {
+  return LHS.getDebugInfoEntry() == RHS.getDebugInfoEntry() &&
+      LHS.getDwarfUnit() == RHS.getDwarfUnit();
+}
+
+inline bool operator!=(const DWARFDie &LHS, const DWARFDie &RHS) {
+  return !(LHS == RHS);
+}
+
+inline bool operator<(const DWARFDie &LHS, const DWARFDie &RHS) {
+  return LHS.getOffset() < RHS.getOffset();
+}
+
+class DWARFDie::iterator : public iterator_facade_base<iterator,
+                                                      std::forward_iterator_tag,
+                                                      const DWARFDie> {
+  DWARFDie Die;
+  void skipNull() {
+    if (Die && Die.isNULL())
+      Die = DWARFDie();
+  }
+public:
+  iterator() = default;
+
+  explicit iterator(DWARFDie D) : Die(D) {
+    // If we start out with only a Null DIE then invalidate.
+    skipNull();
+  }
+
+  iterator &operator++() {
+    Die = Die.getSibling();
+    // Don't include the NULL die when iterating.
+    skipNull();
+    return *this;
+  }
+
+  explicit operator bool() const { return Die.isValid(); }
+  const DWARFDie &operator*() const { return Die; }
+  bool operator==(const iterator &X) const { return Die == X.Die; }
+};
+
+// These inline functions must follow the DWARFDie::iterator definition above
+// as they use functions from that class.
+inline DWARFDie::iterator DWARFDie::begin() const {
+  return iterator(getFirstChild());
+}
+
+inline DWARFDie::iterator DWARFDie::end() const {
+  return iterator();
+}
+
+inline iterator_range<DWARFDie::iterator> DWARFDie::children() const {
+  return make_range(begin(), end());
+}
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDIE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFExpression.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFExpression.h
new file mode 100644
index 0000000..3fad68a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFExpression.h
@@ -0,0 +1,153 @@
+//===--- DWARFExpression.h - DWARF Expression handling ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFEXPRESSION_H
+#define LLVM_DEBUGINFO_DWARFEXPRESSION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/DataExtractor.h"
+
+namespace llvm {
+class DWARFUnit;
+class MCRegisterInfo;
+class raw_ostream;
+
+class DWARFExpression {
+public:
+  class iterator;
+
+  /// This class represents an Operation in the Expression. Each operation can
+  /// have up to 2 oprerands.
+  ///
+  /// An Operation can be in Error state (check with isError()). This
+  /// means that it couldn't be decoded successfully and if it is the
+  /// case, all others fields contain undefined values.
+  class Operation {
+  public:
+    /// Size and signedness of expression operations' operands.
+    enum Encoding : uint8_t {
+      Size1 = 0,
+      Size2 = 1,
+      Size4 = 2,
+      Size8 = 3,
+      SizeLEB = 4,
+      SizeAddr = 5,
+      SizeRefAddr = 6,
+      SizeBlock = 7, ///< Preceding operand contains block size
+      SignBit = 0x8,
+      SignedSize1 = SignBit | Size1,
+      SignedSize2 = SignBit | Size2,
+      SignedSize4 = SignBit | Size4,
+      SignedSize8 = SignBit | Size8,
+      SignedSizeLEB = SignBit | SizeLEB,
+      SizeNA = 0xFF ///< Unused operands get this encoding.
+    };
+
+    enum DwarfVersion : uint8_t {
+      DwarfNA, ///< Serves as a marker for unused entries
+      Dwarf2 = 2,
+      Dwarf3,
+      Dwarf4
+    };
+
+    /// Description of the encoding of one expression Op.
+    struct Description {
+      DwarfVersion Version; ///< Dwarf version where the Op was introduced.
+      Encoding Op[2];       ///< Encoding for Op operands, or SizeNA.
+
+      Description(DwarfVersion Version = DwarfNA, Encoding Op1 = SizeNA,
+                  Encoding Op2 = SizeNA)
+          : Version(Version) {
+        Op[0] = Op1;
+        Op[1] = Op2;
+      }
+    };
+
+  private:
+    friend class DWARFExpression::iterator;
+    uint8_t Opcode; ///< The Op Opcode, DW_OP_<something>.
+    Description Desc;
+    bool Error;
+    uint32_t EndOffset;
+    uint64_t Operands[2];
+
+  public:
+    Description &getDescription() { return Desc; }
+    uint8_t getCode() { return Opcode; }
+    uint64_t getRawOperand(unsigned Idx) { return Operands[Idx]; }
+    uint32_t getEndOffset() { return EndOffset; }
+    bool extract(DataExtractor Data, uint16_t Version, uint8_t AddressSize,
+                 uint32_t Offset);
+    bool isError() { return Error; }
+    bool print(raw_ostream &OS, const DWARFExpression *U,
+               const MCRegisterInfo *RegInfo, bool isEH);
+  };
+
+  /// An iterator to go through the expression operations.
+  class iterator
+      : public iterator_facade_base<iterator, std::forward_iterator_tag,
+                                    Operation> {
+    friend class DWARFExpression;
+    const DWARFExpression *Expr;
+    uint32_t Offset;
+    Operation Op;
+    iterator(const DWARFExpression *Expr, uint32_t Offset)
+        : Expr(Expr), Offset(Offset) {
+      Op.Error =
+          Offset >= Expr->Data.getData().size() ||
+          !Op.extract(Expr->Data, Expr->Version, Expr->AddressSize, Offset);
+    }
+
+  public:
+    class Operation &operator++() {
+      Offset = Op.isError() ? Expr->Data.getData().size() : Op.EndOffset;
+      Op.Error =
+          Offset >= Expr->Data.getData().size() ||
+          !Op.extract(Expr->Data, Expr->Version, Expr->AddressSize, Offset);
+      return Op;
+    }
+
+    class Operation &operator*() {
+      return Op;
+    }
+
+    // Comparison operators are provided out of line.
+    friend bool operator==(const iterator &, const iterator &);
+  };
+
+  DWARFExpression(DataExtractor Data, uint16_t Version, uint8_t AddressSize)
+      : Data(Data), Version(Version), AddressSize(AddressSize) {
+    assert(AddressSize == 8 || AddressSize == 4);
+  }
+
+  iterator begin() const { return iterator(this, 0); }
+  iterator end() const { return iterator(this, Data.getData().size()); }
+
+  void print(raw_ostream &OS, const MCRegisterInfo *RegInfo,
+             bool IsEH = false) const;
+
+private:
+  DataExtractor Data;
+  uint16_t Version;
+  uint8_t AddressSize;
+};
+
+inline bool operator==(const DWARFExpression::iterator &LHS,
+                       const DWARFExpression::iterator &RHS) {
+  return LHS.Expr == RHS.Expr && LHS.Offset == RHS.Offset;
+}
+
+inline bool operator!=(const DWARFExpression::iterator &LHS,
+                       const DWARFExpression::iterator &RHS) {
+  return !(LHS == RHS);
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
new file mode 100644
index 0000000..1b5f71c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -0,0 +1,288 @@
+//===- DWARFFormValue.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFFORMVALUE_H
+#define LLVM_DEBUGINFO_DWARFFORMVALUE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include <cstdint>
+
+namespace llvm {
+
+class DWARFContext;
+class DWARFUnit;
+class raw_ostream;
+
+class DWARFFormValue {
+public:
+  enum FormClass {
+    FC_Unknown,
+    FC_Address,
+    FC_Block,
+    FC_Constant,
+    FC_String,
+    FC_Flag,
+    FC_Reference,
+    FC_Indirect,
+    FC_SectionOffset,
+    FC_Exprloc
+  };
+
+private:
+  struct ValueType {
+    ValueType() { uval = 0; }
+
+    union {
+      uint64_t uval;
+      int64_t sval;
+      const char *cstr;
+    };
+    const uint8_t *data = nullptr;
+    uint64_t SectionIndex;      /// Section index for reference forms.
+  };
+
+  dwarf::Form Form;             /// Form for this value.
+  ValueType Value;              /// Contains all data for the form.
+  const DWARFUnit *U = nullptr; /// Remember the DWARFUnit at extract time.
+  const DWARFContext *C = nullptr; /// Context for extract time.
+public:
+  DWARFFormValue(dwarf::Form F = dwarf::Form(0)) : Form(F) {}
+
+  dwarf::Form getForm() const { return Form; }
+  uint64_t getRawUValue() const { return Value.uval; }
+  uint64_t getSectionIndex() const { return Value.SectionIndex; }
+  void setForm(dwarf::Form F) { Form = F; }
+  void setUValue(uint64_t V) { Value.uval = V; }
+  void setSValue(int64_t V) { Value.sval = V; }
+  void setPValue(const char *V) { Value.cstr = V; }
+
+  void setBlockValue(const ArrayRef<uint8_t> &Data) {
+    Value.data = Data.data();
+    setUValue(Data.size());
+  }
+
+  bool isFormClass(FormClass FC) const;
+  const DWARFUnit *getUnit() const { return U; }
+  void dump(raw_ostream &OS, DIDumpOptions DumpOpts = DIDumpOptions()) const;
+
+  /// Extracts a value in \p Data at offset \p *OffsetPtr. The information
+  /// in \p FormParams is needed to interpret some forms. The optional
+  /// \p Context and \p Unit allows extracting information if the form refers
+  /// to other sections (e.g., .debug_str).
+  bool extractValue(const DWARFDataExtractor &Data, uint32_t *OffsetPtr,
+                    dwarf::FormParams FormParams,
+                    const DWARFContext *Context = nullptr,
+                    const DWARFUnit *Unit = nullptr);
+
+  bool extractValue(const DWARFDataExtractor &Data, uint32_t *OffsetPtr,
+                    dwarf::FormParams FormParams, const DWARFUnit *U) {
+    return extractValue(Data, OffsetPtr, FormParams, nullptr, U);
+  }
+
+  bool isInlinedCStr() const {
+    return Value.data != nullptr && Value.data == (const uint8_t *)Value.cstr;
+  }
+
+  /// getAsFoo functions below return the extracted value as Foo if only
+  /// DWARFFormValue has form class is suitable for representing Foo.
+  Optional<uint64_t> getAsReference() const;
+  Optional<uint64_t> getAsUnsignedConstant() const;
+  Optional<int64_t> getAsSignedConstant() const;
+  Optional<const char *> getAsCString() const;
+  Optional<uint64_t> getAsAddress() const;
+  Optional<uint64_t> getAsSectionOffset() const;
+  Optional<ArrayRef<uint8_t>> getAsBlock() const;
+  Optional<uint64_t> getAsCStringOffset() const;
+  Optional<uint64_t> getAsReferenceUVal() const;
+
+  /// Skip a form's value in \p DebugInfoData at the offset specified by
+  /// \p OffsetPtr.
+  ///
+  /// Skips the bytes for the current form and updates the offset.
+  ///
+  /// \param DebugInfoData The data where we want to skip the value.
+  /// \param OffsetPtr A reference to the offset that will be updated.
+  /// \param Params DWARF parameters to help interpret forms.
+  /// \returns true on success, false if the form was not skipped.
+  bool skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
+                 const dwarf::FormParams Params) const {
+    return DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, Params);
+  }
+
+  /// Skip a form's value in \p DebugInfoData at the offset specified by
+  /// \p OffsetPtr.
+  ///
+  /// Skips the bytes for the specified form and updates the offset.
+  ///
+  /// \param Form The DW_FORM enumeration that indicates the form to skip.
+  /// \param DebugInfoData The data where we want to skip the value.
+  /// \param OffsetPtr A reference to the offset that will be updated.
+  /// \param FormParams DWARF parameters to help interpret forms.
+  /// \returns true on success, false if the form was not skipped.
+  static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+                        uint32_t *OffsetPtr,
+                        const dwarf::FormParams FormParams);
+
+private:
+  void dumpString(raw_ostream &OS) const;
+};
+
+namespace dwarf {
+
+/// Take an optional DWARFFormValue and try to extract a string value from it.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and was a string.
+inline Optional<const char *> toString(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsCString();
+  return None;
+}
+
+/// Take an optional DWARFFormValue and extract a string value from it.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the string value or Default if the V doesn't have a value or the
+/// form value's encoding wasn't a string.
+inline const char *toString(const Optional<DWARFFormValue> &V,
+                            const char *Default) {
+  return toString(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an unsigned constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a unsigned constant form.
+inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsUnsignedConstant();
+  return None;
+}
+
+/// Take an optional DWARFFormValue and extract a unsigned constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted unsigned value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't an unsigned constant form.
+inline uint64_t toUnsigned(const Optional<DWARFFormValue> &V,
+                           uint64_t Default) {
+  return toUnsigned(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an reference.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a reference form.
+inline Optional<uint64_t> toReference(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsReference();
+  return None;
+}
+
+/// Take an optional DWARFFormValue and extract a reference.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted reference value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't a reference form.
+inline uint64_t toReference(const Optional<DWARFFormValue> &V,
+                            uint64_t Default) {
+  return toReference(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an signed constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a signed constant form.
+inline Optional<int64_t> toSigned(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsSignedConstant();
+  return None;
+}
+
+/// Take an optional DWARFFormValue and extract a signed integer.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted signed integer value or Default if the V doesn't
+/// have a value or the form value's encoding wasn't a signed integer form.
+inline int64_t toSigned(const Optional<DWARFFormValue> &V, int64_t Default) {
+  return toSigned(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an address.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a address form.
+inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsAddress();
+  return None;
+}
+
+/// Take an optional DWARFFormValue and extract a address.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted address value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't an address form.
+inline uint64_t toAddress(const Optional<DWARFFormValue> &V, uint64_t Default) {
+  return toAddress(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an section offset.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a section offset form.
+inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsSectionOffset();
+  return None;
+}
+
+/// Take an optional DWARFFormValue and extract a section offset.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted section offset value or Default if the V doesn't
+/// have a value or the form value's encoding wasn't a section offset form.
+inline uint64_t toSectionOffset(const Optional<DWARFFormValue> &V,
+                                uint64_t Default) {
+  return toSectionOffset(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract block data.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a block form.
+inline Optional<ArrayRef<uint8_t>> toBlock(const Optional<DWARFFormValue> &V) {
+  if (V)
+    return V->getAsBlock();
+  return None;
+}
+
+} // end namespace dwarf
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFFORMVALUE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
new file mode 100644
index 0000000..8d1ac5c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
@@ -0,0 +1,74 @@
+//===- DWARFGdbIndex.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFGDBINDEX_H
+#define LLVM_DEBUGINFO_DWARF_DWARFGDBINDEX_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFGdbIndex {
+  uint32_t Version;
+
+  uint32_t CuListOffset;
+  uint32_t AddressAreaOffset;
+  uint32_t SymbolTableOffset;
+  uint32_t ConstantPoolOffset;
+
+  struct CompUnitEntry {
+    uint64_t Offset; /// Offset of a CU in the .debug_info section.
+    uint64_t Length; /// Length of that CU.
+  };
+  SmallVector<CompUnitEntry, 0> CuList;
+
+  struct AddressEntry {
+    uint64_t LowAddress;  /// The low address.
+    uint64_t HighAddress; /// The high address.
+    uint32_t CuIndex;     /// The CU index.
+  };
+  SmallVector<AddressEntry, 0> AddressArea;
+
+  struct SymTableEntry {
+    uint32_t NameOffset; /// Offset of the symbol's name in the constant pool.
+    uint32_t VecOffset;  /// Offset of the CU vector in the constant pool.
+  };
+  SmallVector<SymTableEntry, 0> SymbolTable;
+
+  /// Each value is CU index + attributes.
+  SmallVector<std::pair<uint32_t, SmallVector<uint32_t, 0>>, 0>
+      ConstantPoolVectors;
+
+  StringRef ConstantPoolStrings;
+  uint32_t StringPoolOffset;
+
+  void dumpCUList(raw_ostream &OS) const;
+  void dumpAddressArea(raw_ostream &OS) const;
+  void dumpSymbolTable(raw_ostream &OS) const;
+  void dumpConstantPool(raw_ostream &OS) const;
+
+  bool parseImpl(DataExtractor Data);
+
+public:
+  void dump(raw_ostream &OS);
+  void parse(DataExtractor Data);
+
+  bool HasContent = false;
+  bool HasError = false;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFGDBINDEX_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFObject.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFObject.h
new file mode 100644
index 0000000..795eddd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFObject.h
@@ -0,0 +1,82 @@
+//===- DWARFObject.h --------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFOBJECT_H
+#define LLVM_DEBUGINFO_DWARF_DWARFOBJECT_H
+
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/Object/ObjectFile.h"
+
+namespace llvm {
+// This is responsible for low level access to the object file. It
+// knows how to find the required sections and compute relocated
+// values.
+// The default implementations of the get<Section> methods return dummy values.
+// This is to allow clients that only need some of those to implement just the
+// ones they need. We can't use unreachable for as many cases because the parser
+// implementation is eager and will call some of these methods even if the
+// result is not used.
+class DWARFObject {
+  DWARFSection Dummy;
+
+public:
+  virtual ~DWARFObject() = default;
+  virtual StringRef getFileName() const { llvm_unreachable("unimplemented"); }
+  virtual const object::ObjectFile *getFile() const { return nullptr; }
+  virtual ArrayRef<SectionName> getSectionNames() const { return {}; }
+  virtual bool isLittleEndian() const = 0;
+  virtual uint8_t getAddressSize() const { llvm_unreachable("unimplemented"); }
+  virtual const DWARFSection &getInfoSection() const { return Dummy; }
+  virtual void
+  forEachTypesSections(function_ref<void(const DWARFSection &)> F) const {}
+  virtual StringRef getAbbrevSection() const { return ""; }
+  virtual const DWARFSection &getLocSection() const { return Dummy; }
+  virtual StringRef getARangeSection() const { return ""; }
+  virtual StringRef getDebugFrameSection() const { return ""; }
+  virtual StringRef getEHFrameSection() const { return ""; }
+  virtual const DWARFSection &getLineSection() const { return Dummy; }
+  virtual StringRef getLineStringSection() const { return ""; }
+  virtual StringRef getStringSection() const { return ""; }
+  virtual const DWARFSection &getRangeSection() const { return Dummy; }
+  virtual const DWARFSection &getRnglistsSection() const { return Dummy; }
+  virtual StringRef getMacinfoSection() const { return ""; }
+  virtual StringRef getPubNamesSection() const { return ""; }
+  virtual StringRef getPubTypesSection() const { return ""; }
+  virtual StringRef getGnuPubNamesSection() const { return ""; }
+  virtual StringRef getGnuPubTypesSection() const { return ""; }
+  virtual const DWARFSection &getStringOffsetSection() const { return Dummy; }
+  virtual const DWARFSection &getInfoDWOSection() const { return Dummy; }
+  virtual void
+  forEachTypesDWOSections(function_ref<void(const DWARFSection &)> F) const {}
+  virtual StringRef getAbbrevDWOSection() const { return ""; }
+  virtual const DWARFSection &getLineDWOSection() const { return Dummy; }
+  virtual const DWARFSection &getLocDWOSection() const { return Dummy; }
+  virtual StringRef getStringDWOSection() const { return ""; }
+  virtual const DWARFSection &getStringOffsetDWOSection() const {
+    return Dummy;
+  }
+  virtual const DWARFSection &getRangeDWOSection() const { return Dummy; }
+  virtual const DWARFSection &getAddrSection() const { return Dummy; }
+  virtual const DWARFSection &getAppleNamesSection() const { return Dummy; }
+  virtual const DWARFSection &getAppleTypesSection() const { return Dummy; }
+  virtual const DWARFSection &getAppleNamespacesSection() const {
+    return Dummy;
+  }
+  virtual const DWARFSection &getDebugNamesSection() const { return Dummy; }
+  virtual const DWARFSection &getAppleObjCSection() const { return Dummy; }
+  virtual StringRef getCUIndexSection() const { return ""; }
+  virtual StringRef getGdbIndexSection() const { return ""; }
+  virtual StringRef getTUIndexSection() const { return ""; }
+  virtual Optional<RelocAddrEntry> find(const DWARFSection &Sec,
+                                        uint64_t Pos) const = 0;
+};
+
+} // namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
new file mode 100644
index 0000000..f518384
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
@@ -0,0 +1,34 @@
+//===- DWARFRelocMap.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFRELOCMAP_H
+#define LLVM_DEBUGINFO_DWARF_DWARFRELOCMAP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include <cstdint>
+
+namespace llvm {
+
+/// RelocAddrEntry contains relocated value and section index.
+/// Section index is -1LL if relocation points to absolute symbol.
+struct RelocAddrEntry {
+  uint64_t SectionIndex;
+  uint64_t Value;
+};
+
+/// In place of applying the relocations to the data we've read from disk we use
+/// a separate mapping table to the side and checking that at locations in the
+/// dwarf where we expect relocated values. This adds a bit of complexity to the
+/// dwarf parsing/extraction at the benefit of not allocating memory for the
+/// entire size of the debug info sections.
+using RelocAddrMap = DenseMap<uint64_t, RelocAddrEntry>;
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFRELOCMAP_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFSection.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFSection.h
new file mode 100644
index 0000000..77045f0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFSection.h
@@ -0,0 +1,28 @@
+//===- DWARFSection.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFSECTION_H
+#define LLVM_DEBUGINFO_DWARF_DWARFSECTION_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+struct DWARFSection {
+  StringRef Data;
+};
+
+struct SectionName {
+  StringRef Name;
+  bool IsNameUnique;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFSECTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
new file mode 100644
index 0000000..a659a63
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
@@ -0,0 +1,55 @@
+//===- DWARFTypeUnit.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFTYPEUNIT_H
+#define LLVM_DEBUGINFO_DWARF_DWARFTYPEUNIT_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+
+namespace llvm {
+
+class DWARFContext;
+class DWARFDebugAbbrev;
+struct DWARFSection;
+class raw_ostream;
+
+class DWARFTypeUnit : public DWARFUnit {
+private:
+  uint64_t TypeHash;
+  uint32_t TypeOffset;
+
+public:
+  DWARFTypeUnit(DWARFContext &Context, const DWARFSection &Section,
+                const DWARFDebugAbbrev *DA, const DWARFSection *RS,
+                StringRef SS, const DWARFSection &SOS, const DWARFSection *AOS,
+                const DWARFSection &LS, bool LE, bool IsDWO,
+                const DWARFUnitSectionBase &UnitSection,
+                const DWARFUnitIndex::Entry *Entry)
+      : DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
+                  UnitSection, Entry) {}
+
+  uint32_t getHeaderSize() const override {
+    return DWARFUnit::getHeaderSize() + 12;
+  }
+
+  void dump(raw_ostream &OS, DIDumpOptions DumpOpts = {});
+  static const DWARFSectionKind Section = DW_SECT_TYPES;
+
+protected:
+  bool extractImpl(const DWARFDataExtractor &debug_info,
+                   uint32_t *offset_ptr) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFTYPEUNIT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFUnit.h
new file mode 100644
index 0000000..fe3f573
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -0,0 +1,488 @@
+//===- DWARFUnit.h ----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFUNIT_H
+#define LLVM_DEBUGINFO_DWARF_DWARFUNIT_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/DebugInfo/DWARF/DWARFDie.h"
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
+#include "llvm/Support/DataExtractor.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class DWARFAbbreviationDeclarationSet;
+class DWARFContext;
+class DWARFDebugAbbrev;
+class DWARFUnit;
+
+/// Base class for all DWARFUnitSection classes. This provides the
+/// functionality common to all unit types.
+class DWARFUnitSectionBase {
+public:
+  /// Returns the Unit that contains the given section offset in the
+  /// same section this Unit originated from.
+  virtual DWARFUnit *getUnitForOffset(uint32_t Offset) const = 0;
+  virtual DWARFUnit *getUnitForIndexEntry(const DWARFUnitIndex::Entry &E) = 0;
+
+  void parse(DWARFContext &C, const DWARFSection &Section);
+  void parseDWO(DWARFContext &C, const DWARFSection &DWOSection,
+                bool Lazy = false);
+
+protected:
+  ~DWARFUnitSectionBase() = default;
+
+  virtual void parseImpl(DWARFContext &Context, const DWARFObject &Obj,
+                         const DWARFSection &Section,
+                         const DWARFDebugAbbrev *DA, const DWARFSection *RS,
+                         StringRef SS, const DWARFSection &SOS,
+                         const DWARFSection *AOS, const DWARFSection &LS,
+                         bool isLittleEndian, bool isDWO, bool Lazy) = 0;
+};
+
+const DWARFUnitIndex &getDWARFUnitIndex(DWARFContext &Context,
+                                        DWARFSectionKind Kind);
+
+/// Concrete instance of DWARFUnitSection, specialized for one Unit type.
+template<typename UnitType>
+class DWARFUnitSection final : public SmallVector<std::unique_ptr<UnitType>, 1>,
+                               public DWARFUnitSectionBase {
+  bool Parsed = false;
+  std::function<std::unique_ptr<UnitType>(uint32_t)> Parser;
+
+public:
+  using UnitVector = SmallVectorImpl<std::unique_ptr<UnitType>>;
+  using iterator = typename UnitVector::iterator;
+  using iterator_range = llvm::iterator_range<typename UnitVector::iterator>;
+
+  UnitType *getUnitForOffset(uint32_t Offset) const override {
+    auto *CU = std::upper_bound(
+        this->begin(), this->end(), Offset,
+        [](uint32_t LHS, const std::unique_ptr<UnitType> &RHS) {
+          return LHS < RHS->getNextUnitOffset();
+        });
+    if (CU != this->end() && (*CU)->getOffset() <= Offset)
+      return CU->get();
+    return nullptr;
+  }
+  UnitType *getUnitForIndexEntry(const DWARFUnitIndex::Entry &E) override {
+    const auto *CUOff = E.getOffset(DW_SECT_INFO);
+    if (!CUOff)
+      return nullptr;
+
+    auto Offset = CUOff->Offset;
+
+    auto *CU = std::upper_bound(
+        this->begin(), this->end(), CUOff->Offset,
+        [](uint32_t LHS, const std::unique_ptr<UnitType> &RHS) {
+          return LHS < RHS->getNextUnitOffset();
+        });
+    if (CU != this->end() && (*CU)->getOffset() <= Offset)
+      return CU->get();
+
+    if (!Parser)
+      return nullptr;
+
+    auto U = Parser(Offset);
+    if (!U)
+      U = nullptr;
+
+    auto *NewCU = U.get();
+    this->insert(CU, std::move(U));
+    return NewCU;
+  }
+
+private:
+  void parseImpl(DWARFContext &Context, const DWARFObject &Obj,
+                 const DWARFSection &Section, const DWARFDebugAbbrev *DA,
+                 const DWARFSection *RS, StringRef SS, const DWARFSection &SOS,
+                 const DWARFSection *AOS, const DWARFSection &LS, bool LE,
+                 bool IsDWO, bool Lazy) override {
+    if (Parsed)
+      return;
+    DWARFDataExtractor Data(Obj, Section, LE, 0);
+    if (!Parser) {
+      const DWARFUnitIndex *Index = nullptr;
+      if (IsDWO)
+        Index = &getDWARFUnitIndex(Context, UnitType::Section);
+      Parser = [=, &Context, &Section, &SOS,
+                &LS](uint32_t Offset) -> std::unique_ptr<UnitType> {
+        if (!Data.isValidOffset(Offset))
+          return nullptr;
+        auto U = llvm::make_unique<UnitType>(
+            Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO, *this,
+            Index ? Index->getFromOffset(Offset) : nullptr);
+        if (!U->extract(Data, &Offset))
+          return nullptr;
+        return U;
+      };
+    }
+    if (Lazy)
+      return;
+    auto I = this->begin();
+    uint32_t Offset = 0;
+    while (Data.isValidOffset(Offset)) {
+      if (I != this->end() && (*I)->getOffset() == Offset) {
+        ++I;
+        continue;
+      }
+      auto U = Parser(Offset);
+      if (!U)
+        break;
+      Offset = U->getNextUnitOffset();
+      I = std::next(this->insert(I, std::move(U)));
+    }
+    Parsed = true;
+  }
+};
+
+/// Represents base address of the CU.
+struct BaseAddress {
+  uint64_t Address;
+  uint64_t SectionIndex;
+};
+
+/// Represents a unit's contribution to the string offsets table.
+struct StrOffsetsContributionDescriptor {
+  uint64_t Base = 0;
+  uint64_t Size = 0;
+  /// Format and version.
+  dwarf::FormParams FormParams = {0, 0, dwarf::DwarfFormat::DWARF32};
+
+  StrOffsetsContributionDescriptor(uint64_t Base, uint64_t Size,
+                                   uint8_t Version, dwarf::DwarfFormat Format)
+      : Base(Base), Size(Size), FormParams({Version, 0, Format}) {}
+
+  uint8_t getVersion() const { return FormParams.Version; }
+  dwarf::DwarfFormat getFormat() const { return FormParams.Format; }
+  uint8_t getDwarfOffsetByteSize() const {
+    return FormParams.getDwarfOffsetByteSize();
+  }
+  /// Determine whether a contribution to the string offsets table is
+  /// consistent with the relevant section size and that its length is
+  /// a multiple of the size of one of its entries.
+  Optional<StrOffsetsContributionDescriptor>
+  validateContributionSize(DWARFDataExtractor &DA);
+};
+
+class DWARFUnit {
+  DWARFContext &Context;
+  /// Section containing this DWARFUnit.
+  const DWARFSection &InfoSection;
+
+  const DWARFDebugAbbrev *Abbrev;
+  const DWARFSection *RangeSection;
+  uint32_t RangeSectionBase;
+  const DWARFSection &LineSection;
+  StringRef StringSection;
+  const DWARFSection &StringOffsetSection;
+  const DWARFSection *AddrOffsetSection;
+  uint32_t AddrOffsetSectionBase = 0;
+  bool isLittleEndian;
+  bool isDWO;
+  const DWARFUnitSectionBase &UnitSection;
+
+  // Version, address size, and DWARF format.
+  dwarf::FormParams FormParams;
+  /// Start, length, and DWARF format of the unit's contribution to the string
+  /// offsets table (DWARF v5).
+  Optional<StrOffsetsContributionDescriptor> StringOffsetsTableContribution;
+
+  uint32_t Offset;
+  uint32_t Length;
+  mutable const DWARFAbbreviationDeclarationSet *Abbrevs;
+  uint64_t AbbrOffset;
+  uint8_t UnitType;
+  llvm::Optional<BaseAddress> BaseAddr;
+  /// The compile unit debug information entry items.
+  std::vector<DWARFDebugInfoEntry> DieArray;
+
+  /// Map from range's start address to end address and corresponding DIE.
+  /// IntervalMap does not support range removal, as a result, we use the
+  /// std::map::upper_bound for address range lookup.
+  std::map<uint64_t, std::pair<uint64_t, DWARFDie>> AddrDieMap;
+
+  using die_iterator_range =
+      iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>;
+
+  std::shared_ptr<DWARFUnit> DWO;
+
+  const DWARFUnitIndex::Entry *IndexEntry;
+
+  uint32_t getDIEIndex(const DWARFDebugInfoEntry *Die) {
+    auto First = DieArray.data();
+    assert(Die >= First && Die < First + DieArray.size());
+    return Die - First;
+  }
+
+protected:
+  virtual bool extractImpl(const DWARFDataExtractor &debug_info,
+                           uint32_t *offset_ptr);
+
+  /// Size in bytes of the unit header.
+  virtual uint32_t getHeaderSize() const { return getVersion() <= 4 ? 11 : 12; }
+
+  /// Find the unit's contribution to the string offsets table and determine its
+  /// length and form. The given offset is expected to be derived from the unit
+  /// DIE's DW_AT_str_offsets_base attribute.
+  Optional<StrOffsetsContributionDescriptor>
+  determineStringOffsetsTableContribution(DWARFDataExtractor &DA,
+                                          uint64_t Offset);
+
+  /// Find the unit's contribution to the string offsets table and determine its
+  /// length and form. The given offset is expected to be 0 in a dwo file or,
+  /// in a dwp file, the start of the unit's contribution to the string offsets
+  /// table section (as determined by the index table).
+  Optional<StrOffsetsContributionDescriptor>
+  determineStringOffsetsTableContributionDWO(DWARFDataExtractor &DA,
+                                             uint64_t Offset);
+
+public:
+  DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
+            const DWARFDebugAbbrev *DA, const DWARFSection *RS, StringRef SS,
+            const DWARFSection &SOS, const DWARFSection *AOS,
+            const DWARFSection &LS, bool LE, bool IsDWO,
+            const DWARFUnitSectionBase &UnitSection,
+            const DWARFUnitIndex::Entry *IndexEntry = nullptr);
+
+  virtual ~DWARFUnit();
+
+  DWARFContext& getContext() const { return Context; }
+
+  const DWARFSection &getLineSection() const { return LineSection; }
+  StringRef getStringSection() const { return StringSection; }
+  const DWARFSection &getStringOffsetSection() const {
+    return StringOffsetSection;
+  }
+
+  void setAddrOffsetSection(const DWARFSection *AOS, uint32_t Base) {
+    AddrOffsetSection = AOS;
+    AddrOffsetSectionBase = Base;
+  }
+
+  /// Recursively update address to Die map.
+  void updateAddressDieMap(DWARFDie Die);
+
+  void setRangesSection(const DWARFSection *RS, uint32_t Base) {
+    RangeSection = RS;
+    RangeSectionBase = Base;
+  }
+
+  bool getAddrOffsetSectionItem(uint32_t Index, uint64_t &Result) const;
+  bool getStringOffsetSectionItem(uint32_t Index, uint64_t &Result) const;
+
+  DWARFDataExtractor getDebugInfoExtractor() const;
+
+  DataExtractor getStringExtractor() const {
+    return DataExtractor(StringSection, false, 0);
+  }
+
+  bool extract(const DWARFDataExtractor &debug_info, uint32_t *offset_ptr);
+
+  /// extractRangeList - extracts the range list referenced by this compile
+  /// unit from .debug_ranges section. Returns true on success.
+  /// Requires that compile unit is already extracted.
+  bool extractRangeList(uint32_t RangeListOffset,
+                        DWARFDebugRangeList &RangeList) const;
+  void clear();
+  uint32_t getOffset() const { return Offset; }
+  uint32_t getNextUnitOffset() const { return Offset + Length + 4; }
+  uint32_t getLength() const { return Length; }
+
+  const Optional<StrOffsetsContributionDescriptor> &
+  getStringOffsetsTableContribution() const {
+    return StringOffsetsTableContribution;
+  }
+  const dwarf::FormParams &getFormParams() const { return FormParams; }
+  uint16_t getVersion() const { return FormParams.Version; }
+  dwarf::DwarfFormat getFormat() const { return FormParams.Format; }
+  uint8_t getAddressByteSize() const { return FormParams.AddrSize; }
+  uint8_t getRefAddrByteSize() const { return FormParams.getRefAddrByteSize(); }
+  uint8_t getDwarfOffsetByteSize() const {
+    return FormParams.getDwarfOffsetByteSize();
+  }
+
+  uint8_t getDwarfStringOffsetsByteSize() const {
+    assert(StringOffsetsTableContribution);
+    return StringOffsetsTableContribution->getDwarfOffsetByteSize();
+  }
+
+  uint64_t getStringOffsetsBase() const {
+    assert(StringOffsetsTableContribution);
+    return StringOffsetsTableContribution->Base;
+  }
+
+  const DWARFAbbreviationDeclarationSet *getAbbreviations() const;
+
+  uint8_t getUnitType() const { return UnitType; }
+
+  static bool isMatchingUnitTypeAndTag(uint8_t UnitType, dwarf::Tag Tag) {
+    switch (UnitType) {
+    case dwarf::DW_UT_compile:
+      return Tag == dwarf::DW_TAG_compile_unit;
+    case dwarf::DW_UT_type:
+      return Tag == dwarf::DW_TAG_type_unit;
+    case dwarf::DW_UT_partial:
+      return Tag == dwarf::DW_TAG_partial_unit;
+    case dwarf::DW_UT_skeleton:
+      return Tag == dwarf::DW_TAG_skeleton_unit;
+    case dwarf::DW_UT_split_compile:
+    case dwarf::DW_UT_split_type:
+      return dwarf::isUnitType(Tag);
+    }
+    return false;
+  }
+
+  /// \brief Return the number of bytes for the header of a unit of
+  /// UnitType type.
+  ///
+  /// This function must be called with a valid unit type which in
+  /// DWARF5 is defined as one of the following six types.
+  static uint32_t getDWARF5HeaderSize(uint8_t UnitType) {
+    switch (UnitType) {
+    case dwarf::DW_UT_compile:
+    case dwarf::DW_UT_partial:
+      return 12;
+    case dwarf::DW_UT_skeleton:
+    case dwarf::DW_UT_split_compile:
+      return 20;
+    case dwarf::DW_UT_type:
+    case dwarf::DW_UT_split_type:
+      return 24;
+    }
+    llvm_unreachable("Invalid UnitType.");
+  }
+
+  llvm::Optional<BaseAddress> getBaseAddress() const { return BaseAddr; }
+
+  void setBaseAddress(BaseAddress BaseAddr) { this->BaseAddr = BaseAddr; }
+
+  DWARFDie getUnitDIE(bool ExtractUnitDIEOnly = true) {
+    extractDIEsIfNeeded(ExtractUnitDIEOnly);
+    if (DieArray.empty())
+      return DWARFDie();
+    return DWARFDie(this, &DieArray[0]);
+  }
+
+  const char *getCompilationDir();
+  Optional<uint64_t> getDWOId();
+
+  void collectAddressRanges(DWARFAddressRangesVector &CURanges);
+
+  /// Returns subprogram DIE with address range encompassing the provided
+  /// address. The pointer is alive as long as parsed compile unit DIEs are not
+  /// cleared.
+  DWARFDie getSubroutineForAddress(uint64_t Address);
+
+  /// getInlinedChainForAddress - fetches inlined chain for a given address.
+  /// Returns empty chain if there is no subprogram containing address. The
+  /// chain is valid as long as parsed compile unit DIEs are not cleared.
+  void getInlinedChainForAddress(uint64_t Address,
+                                 SmallVectorImpl<DWARFDie> &InlinedChain);
+
+  /// getUnitSection - Return the DWARFUnitSection containing this unit.
+  const DWARFUnitSectionBase &getUnitSection() const { return UnitSection; }
+
+  /// \brief Returns the number of DIEs in the unit. Parses the unit
+  /// if necessary.
+  unsigned getNumDIEs() {
+    extractDIEsIfNeeded(false);
+    return DieArray.size();
+  }
+
+  /// \brief Return the index of a DIE inside the unit's DIE vector.
+  ///
+  /// It is illegal to call this method with a DIE that hasn't be
+  /// created by this unit. In other word, it's illegal to call this
+  /// method on a DIE that isn't accessible by following
+  /// children/sibling links starting from this unit's getUnitDIE().
+  uint32_t getDIEIndex(const DWARFDie &D) {
+    return getDIEIndex(D.getDebugInfoEntry());
+  }
+
+  /// \brief Return the DIE object at the given index.
+  DWARFDie getDIEAtIndex(unsigned Index) {
+    assert(Index < DieArray.size());
+    return DWARFDie(this, &DieArray[Index]);
+  }
+
+  DWARFDie getParent(const DWARFDebugInfoEntry *Die);
+  DWARFDie getSibling(const DWARFDebugInfoEntry *Die);
+  DWARFDie getFirstChild(const DWARFDebugInfoEntry *Die);
+
+  /// \brief Return the DIE object for a given offset inside the
+  /// unit's DIE vector.
+  ///
+  /// The unit needs to have its DIEs extracted for this method to work.
+  DWARFDie getDIEForOffset(uint32_t Offset) {
+    extractDIEsIfNeeded(false);
+    assert(!DieArray.empty());
+    auto it = std::lower_bound(
+        DieArray.begin(), DieArray.end(), Offset,
+        [](const DWARFDebugInfoEntry &LHS, uint32_t Offset) {
+          return LHS.getOffset() < Offset;
+        });
+    if (it != DieArray.end() && it->getOffset() == Offset)
+      return DWARFDie(this, &*it);
+    return DWARFDie();
+  }
+
+  uint32_t getLineTableOffset() const {
+    if (IndexEntry)
+      if (const auto *Contrib = IndexEntry->getOffset(DW_SECT_LINE))
+        return Contrib->Offset;
+    return 0;
+  }
+
+  die_iterator_range dies() {
+    extractDIEsIfNeeded(false);
+    return die_iterator_range(DieArray.begin(), DieArray.end());
+  }
+
+private:
+  /// Size in bytes of the .debug_info data associated with this compile unit.
+  size_t getDebugInfoSize() const { return Length + 4 - getHeaderSize(); }
+
+  /// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it
+  /// hasn't already been done. Returns the number of DIEs parsed at this call.
+  size_t extractDIEsIfNeeded(bool CUDieOnly);
+
+  /// extractDIEsToVector - Appends all parsed DIEs to a vector.
+  void extractDIEsToVector(bool AppendCUDie, bool AppendNonCUDIEs,
+                           std::vector<DWARFDebugInfoEntry> &DIEs) const;
+
+  /// clearDIEs - Clear parsed DIEs to keep memory usage low.
+  void clearDIEs(bool KeepCUDie);
+
+  /// parseDWO - Parses .dwo file for current compile unit. Returns true if
+  /// it was actually constructed.
+  bool parseDWO();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFUNIT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
new file mode 100644
index 0000000..49ed4bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
@@ -0,0 +1,105 @@
+//===- DWARFUnitIndex.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFUNITINDEX_H
+#define LLVM_DEBUGINFO_DWARF_DWARFUNITINDEX_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataExtractor.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class raw_ostream;
+
+enum DWARFSectionKind {
+  DW_SECT_INFO = 1,
+  DW_SECT_TYPES,
+  DW_SECT_ABBREV,
+  DW_SECT_LINE,
+  DW_SECT_LOC,
+  DW_SECT_STR_OFFSETS,
+  DW_SECT_MACINFO,
+  DW_SECT_MACRO,
+};
+
+class DWARFUnitIndex {
+  struct Header {
+    uint32_t Version;
+    uint32_t NumColumns;
+    uint32_t NumUnits;
+    uint32_t NumBuckets = 0;
+
+    bool parse(DataExtractor IndexData, uint32_t *OffsetPtr);
+    void dump(raw_ostream &OS) const;
+  };
+
+public:
+  class Entry {
+  public:
+    struct SectionContribution {
+      uint32_t Offset;
+      uint32_t Length;
+    };
+
+  private:
+    const DWARFUnitIndex *Index;
+    uint64_t Signature;
+    std::unique_ptr<SectionContribution[]> Contributions;
+    friend class DWARFUnitIndex;
+
+  public:
+    const SectionContribution *getOffset(DWARFSectionKind Sec) const;
+    const SectionContribution *getOffset() const;
+
+    const SectionContribution *getOffsets() const {
+      return Contributions.get();
+    }
+
+    uint64_t getSignature() const { return Signature; }
+  };
+
+private:
+  struct Header Header;
+
+  DWARFSectionKind InfoColumnKind;
+  int InfoColumn = -1;
+  std::unique_ptr<DWARFSectionKind[]> ColumnKinds;
+  std::unique_ptr<Entry[]> Rows;
+
+  static StringRef getColumnHeader(DWARFSectionKind DS);
+
+  bool parseImpl(DataExtractor IndexData);
+
+public:
+  DWARFUnitIndex(DWARFSectionKind InfoColumnKind)
+      : InfoColumnKind(InfoColumnKind) {}
+
+  explicit operator bool() const { return Header.NumBuckets; }
+
+  bool parse(DataExtractor IndexData);
+  void dump(raw_ostream &OS) const;
+
+  const Entry *getFromOffset(uint32_t Offset) const;
+  const Entry *getFromHash(uint64_t Offset) const;
+
+  ArrayRef<DWARFSectionKind> getColumnKinds() const {
+    return makeArrayRef(ColumnKinds.get(), Header.NumColumns);
+  }
+
+  ArrayRef<Entry> getRows() const {
+    return makeArrayRef(Rows.get(), Header.NumBuckets);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFUNITINDEX_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFVerifier.h b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
new file mode 100644
index 0000000..afaa299
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
@@ -0,0 +1,309 @@
+//===- DWARFVerifier.h ----------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFVERIFIER_H
+#define LLVM_DEBUGINFO_DWARF_DWARFVERIFIER_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h"
+#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
+#include "llvm/DebugInfo/DWARF/DWARFDie.h"
+
+#include <cstdint>
+#include <map>
+#include <set>
+
+namespace llvm {
+class raw_ostream;
+struct DWARFAttribute;
+class DWARFContext;
+class DWARFDie;
+class DWARFUnit;
+class DWARFDataExtractor;
+class DWARFDebugAbbrev;
+class DataExtractor;
+struct DWARFSection;
+
+/// A class that verifies DWARF debug information given a DWARF Context.
+class DWARFVerifier {
+public:
+  /// A class that keeps the address range information for a single DIE.
+  struct DieRangeInfo {
+    DWARFDie Die;
+
+    /// Sorted DWARFAddressRanges.
+    std::vector<DWARFAddressRange> Ranges;
+
+    /// Sorted DWARFAddressRangeInfo.
+    std::set<DieRangeInfo> Children;
+
+    DieRangeInfo() = default;
+    DieRangeInfo(DWARFDie Die) : Die(Die) {}
+
+    /// Used for unit testing.
+    DieRangeInfo(std::vector<DWARFAddressRange> Ranges)
+        : Ranges(std::move(Ranges)) {}
+
+    typedef std::vector<DWARFAddressRange>::const_iterator
+        address_range_iterator;
+    typedef std::set<DieRangeInfo>::const_iterator die_range_info_iterator;
+
+    /// Inserts the address range. If the range overlaps with an existing
+    /// range, the range is *not* added and an iterator to the overlapping
+    /// range is returned.
+    ///
+    /// This is used for finding overlapping ranges within the same DIE.
+    address_range_iterator insert(const DWARFAddressRange &R);
+
+    /// Finds an address range in the sorted vector of ranges.
+    address_range_iterator findRange(const DWARFAddressRange &R) const {
+      auto Begin = Ranges.begin();
+      auto End = Ranges.end();
+      auto Iter = std::upper_bound(Begin, End, R);
+      if (Iter != Begin)
+        --Iter;
+      return Iter;
+    }
+
+    /// Inserts the address range info. If any of its ranges overlaps with a
+    /// range in an existing range info, the range info is *not* added and an
+    /// iterator to the overlapping range info.
+    ///
+    /// This is used for finding overlapping children of the same DIE.
+    die_range_info_iterator insert(const DieRangeInfo &RI);
+
+    /// Return true if ranges in this object contains all ranges within RHS.
+    bool contains(const DieRangeInfo &RHS) const;
+
+    /// Return true if any range in this object intersects with any range in
+    /// RHS.
+    bool intersects(const DieRangeInfo &RHS) const;
+  };
+
+private:
+  raw_ostream &OS;
+  DWARFContext &DCtx;
+  DIDumpOptions DumpOpts;
+  /// A map that tracks all references (converted absolute references) so we
+  /// can verify each reference points to a valid DIE and not an offset that
+  /// lies between to valid DIEs.
+  std::map<uint64_t, std::set<uint32_t>> ReferenceToDIEOffsets;
+  uint32_t NumDebugLineErrors = 0;
+
+  raw_ostream &error() const;
+  raw_ostream &warn() const;
+  raw_ostream &note() const;
+
+  /// Verifies the abbreviations section.
+  ///
+  /// This function currently checks that:
+  /// --No abbreviation declaration has more than one attributes with the same
+  /// name.
+  ///
+  /// \param Abbrev Pointer to the abbreviations section we are verifying
+  /// Abbrev can be a pointer to either .debug_abbrev or debug_abbrev.dwo.
+  ///
+  /// \returns The number of errors that occurred during verification.
+  unsigned verifyAbbrevSection(const DWARFDebugAbbrev *Abbrev);
+
+  /// Verifies the header of a unit in the .debug_info section.
+  ///
+  /// This function currently checks for:
+  /// - Unit is in 32-bit DWARF format. The function can be modified to
+  /// support 64-bit format.
+  /// - The DWARF version is valid
+  /// - The unit type is valid (if unit is in version >=5)
+  /// - The unit doesn't extend beyond .debug_info section
+  /// - The address size is valid
+  /// - The offset in the .debug_abbrev section is valid
+  ///
+  /// \param DebugInfoData The .debug_info section data
+  /// \param Offset A reference to the offset start of the unit. The offset will
+  /// be updated to point to the next unit in .debug_info
+  /// \param UnitIndex The index of the unit to be verified
+  /// \param UnitType A reference to the type of the unit
+  /// \param isUnitDWARF64 A reference to a flag that shows whether the unit is
+  /// in 64-bit format.
+  ///
+  /// \returns true if the header is verified successfully, false otherwise.
+  bool verifyUnitHeader(const DWARFDataExtractor DebugInfoData,
+                        uint32_t *Offset, unsigned UnitIndex, uint8_t &UnitType,
+                        bool &isUnitDWARF64);
+
+  /// Verifies the header of a unit in the .debug_info section.
+  ///
+  /// This function currently verifies:
+  ///  - The debug info attributes.
+  ///  - The debug info form=s.
+  ///  - The presence of a root DIE.
+  ///  - That the root DIE is a unit DIE.
+  ///  - If a unit type is provided, that the unit DIE matches the unit type.
+  ///  - The DIE ranges.
+  ///
+  /// \param Unit      The DWARF Unit to verifiy.
+  /// \param UnitType  An optional unit type which will be used to verify the
+  ///                  type of the unit DIE.
+  ///
+  /// \returns true if the content is verified successfully, false otherwise.
+  bool verifyUnitContents(DWARFUnit Unit, uint8_t UnitType = 0);
+
+  /// Verify that all Die ranges are valid.
+  ///
+  /// This function currently checks for:
+  /// - cases in which lowPC >= highPC
+  ///
+  /// \returns Number of errors that occurred during verification.
+  unsigned verifyDieRanges(const DWARFDie &Die, DieRangeInfo &ParentRI);
+
+  /// Verifies the attribute's DWARF attribute and its value.
+  ///
+  /// This function currently checks for:
+  /// - DW_AT_ranges values is a valid .debug_ranges offset
+  /// - DW_AT_stmt_list is a valid .debug_line offset
+  ///
+  /// \param Die          The DWARF DIE that owns the attribute value
+  /// \param AttrValue    The DWARF attribute value to check
+  ///
+  /// \returns NumErrors The number of errors occurred during verification of
+  /// attributes' values in a .debug_info section unit
+  unsigned verifyDebugInfoAttribute(const DWARFDie &Die,
+                                    DWARFAttribute &AttrValue);
+
+  /// Verifies the attribute's DWARF form.
+  ///
+  /// This function currently checks for:
+  /// - All DW_FORM_ref values that are CU relative have valid CU offsets
+  /// - All DW_FORM_ref_addr values have valid .debug_info offsets
+  /// - All DW_FORM_strp values have valid .debug_str offsets
+  ///
+  /// \param Die          The DWARF DIE that owns the attribute value
+  /// \param AttrValue    The DWARF attribute value to check
+  ///
+  /// \returns NumErrors The number of errors occurred during verification of
+  /// attributes' forms in a .debug_info section unit
+  unsigned verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue);
+
+  /// Verifies the all valid references that were found when iterating through
+  /// all of the DIE attributes.
+  ///
+  /// This function will verify that all references point to DIEs whose DIE
+  /// offset matches. This helps to ensure if a DWARF link phase moved things
+  /// around, that it doesn't create invalid references by failing to relocate
+  /// CU relative and absolute references.
+  ///
+  /// \returns NumErrors The number of errors occurred during verification of
+  /// references for the .debug_info section
+  unsigned verifyDebugInfoReferences();
+
+  /// Verify the DW_AT_stmt_list encoding and value and ensure that no
+  /// compile units that have the same DW_AT_stmt_list value.
+  void verifyDebugLineStmtOffsets();
+
+  /// Verify that all of the rows in the line table are valid.
+  ///
+  /// This function currently checks for:
+  /// - addresses within a sequence that decrease in value
+  /// - invalid file indexes
+  void verifyDebugLineRows();
+
+  /// Verify that an Apple-style accelerator table is valid.
+  ///
+  /// This function currently checks that:
+  /// - The fixed part of the header fits in the section
+  /// - The size of the section is as large as what the header describes
+  /// - There is at least one atom
+  /// - The form for each atom is valid
+  /// - The tag for each DIE in the table is valid
+  /// - The buckets have a valid index, or they are empty
+  /// - Each hashdata offset is valid
+  /// - Each DIE is valid
+  ///
+  /// \param AccelSection pointer to the section containing the acceleration table
+  /// \param StrData pointer to the string section
+  /// \param SectionName the name of the table we're verifying
+  ///
+  /// \returns The number of errors occurred during verification
+  unsigned verifyAppleAccelTable(const DWARFSection *AccelSection,
+                                 DataExtractor *StrData,
+                                 const char *SectionName);
+
+  unsigned verifyDebugNamesCULists(const DWARFDebugNames &AccelTable);
+  unsigned verifyNameIndexBuckets(const DWARFDebugNames::NameIndex &NI,
+                                  const DataExtractor &StrData);
+  unsigned verifyNameIndexAbbrevs(const DWARFDebugNames::NameIndex &NI);
+  unsigned verifyNameIndexAttribute(const DWARFDebugNames::NameIndex &NI,
+                                    const DWARFDebugNames::Abbrev &Abbr,
+                                    DWARFDebugNames::AttributeEncoding AttrEnc);
+
+  /// Verify that the DWARF v5 accelerator table is valid.
+  ///
+  /// This function currently checks that:
+  /// - Headers individual Name Indices fit into the section and can be parsed.
+  /// - Abbreviation tables can be parsed and contain valid index attributes
+  ///   with correct form encodings.
+  /// - The CU lists reference existing compile units.
+  /// - The buckets have a valid index, or they are empty.
+  /// - All names are reachable via the hash table (they have the correct hash,
+  ///   and the hash is in the correct bucket).
+  ///
+  /// \param AccelSection section containing the acceleration table
+  /// \param StrData string section
+  ///
+  /// \returns The number of errors occurred during verification
+  unsigned verifyDebugNames(const DWARFSection &AccelSection,
+                            const DataExtractor &StrData);
+
+public:
+  DWARFVerifier(raw_ostream &S, DWARFContext &D,
+                DIDumpOptions DumpOpts = DIDumpOptions::getForSingleDIE())
+      : OS(S), DCtx(D), DumpOpts(std::move(DumpOpts)) {}
+  /// Verify the information in any of the following sections, if available:
+  /// .debug_abbrev, debug_abbrev.dwo
+  ///
+  /// Any errors are reported to the stream that was this object was
+  /// constructed with.
+  ///
+  /// \returns true if .debug_abbrev and .debug_abbrev.dwo verify successfully,
+  /// false otherwise.
+  bool handleDebugAbbrev();
+
+  /// Verify the information in the .debug_info section.
+  ///
+  /// Any errors are reported to the stream that was this object was
+  /// constructed with.
+  ///
+  /// \returns true if the .debug_info verifies successfully, false otherwise.
+  bool handleDebugInfo();
+
+  /// Verify the information in the .debug_line section.
+  ///
+  /// Any errors are reported to the stream that was this object was
+  /// constructed with.
+  ///
+  /// \returns true if the .debug_line verifies successfully, false otherwise.
+  bool handleDebugLine();
+
+  /// Verify the information in accelerator tables, if they exist.
+  ///
+  /// Any errors are reported to the stream that was this object was
+  /// constructed with.
+  ///
+  /// \returns true if the existing Apple-style accelerator tables verify
+  /// successfully, false otherwise.
+  bool handleAccelTables();
+};
+
+static inline bool operator<(const DWARFVerifier::DieRangeInfo &LHS,
+                             const DWARFVerifier::DieRangeInfo &RHS) {
+  return std::tie(LHS.Ranges, LHS.Die) < std::tie(RHS.Ranges, RHS.Die);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/MSF/IMSFFile.h b/linux-x64/clang/include/llvm/DebugInfo/MSF/IMSFFile.h
new file mode 100644
index 0000000..f98e715
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/MSF/IMSFFile.h
@@ -0,0 +1,42 @@
+//===- IMSFFile.h - Abstract base class for an MSF file ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_IMSFFILE_H
+#define LLVM_DEBUGINFO_MSF_IMSFFILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+namespace msf {
+
+class IMSFFile {
+public:
+  virtual ~IMSFFile() = default;
+
+  virtual uint32_t getBlockSize() const = 0;
+  virtual uint32_t getBlockCount() const = 0;
+
+  virtual uint32_t getNumStreams() const = 0;
+  virtual uint32_t getStreamByteSize(uint32_t StreamIndex) const = 0;
+  virtual ArrayRef<support::ulittle32_t>
+  getStreamBlockList(uint32_t StreamIndex) const = 0;
+
+  virtual Expected<ArrayRef<uint8_t>> getBlockData(uint32_t BlockIndex,
+                                                   uint32_t NumBytes) const = 0;
+  virtual Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
+                             ArrayRef<uint8_t> Data) const = 0;
+};
+
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_IMSFFILE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFBuilder.h
new file mode 100644
index 0000000..19e5c31
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFBuilder.h
@@ -0,0 +1,140 @@
+//===- MSFBuilder.h - MSF Directory & Metadata Builder ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MSFBUILDER_H
+#define LLVM_DEBUGINFO_MSF_MSFBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/DebugInfo/MSF/MSFCommon.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+namespace msf {
+
+class MSFBuilder {
+public:
+  /// \brief Create a new `MSFBuilder`.
+  ///
+  /// \param BlockSize The internal block size used by the PDB file.  See
+  /// isValidBlockSize() for a list of valid block sizes.
+  ///
+  /// \param MinBlockCount Causes the builder to reserve up front space for
+  /// at least `MinBlockCount` blocks.  This is useful when using `MSFBuilder`
+  /// to read an existing MSF that you want to write back out later.  The
+  /// original MSF file's SuperBlock contains the exact number of blocks used
+  /// by the file, so is a good hint as to how many blocks the new MSF file
+  /// will contain.  Furthermore, it is actually necessary in this case.  To
+  /// preserve stability of the file's layout, it is helpful to try to keep
+  /// all streams mapped to their original block numbers.  To ensure that this
+  /// is possible, space for all blocks must be allocated beforehand so that
+  /// streams can be assigned to them.
+  ///
+  /// \param CanGrow If true, any operation which results in an attempt to
+  /// locate a free block when all available blocks have been exhausted will
+  /// allocate a new block, thereby growing the size of the final MSF file.
+  /// When false, any such attempt will result in an error.  This is especially
+  /// useful in testing scenarios when you know your test isn't going to do
+  /// anything to increase the size of the file, so having an Error returned if
+  /// it were to happen would catch a programming error
+  ///
+  /// \returns an llvm::Error representing whether the operation succeeded or
+  /// failed.  Currently the only way this can fail is if an invalid block size
+  /// is specified, or `MinBlockCount` does not leave enough room for the
+  /// mandatory reserved blocks required by an MSF file.
+  static Expected<MSFBuilder> create(BumpPtrAllocator &Allocator,
+                                     uint32_t BlockSize,
+                                     uint32_t MinBlockCount = 0,
+                                     bool CanGrow = true);
+
+  /// Request the block map to be at a specific block address.  This is useful
+  /// when editing a MSF and you want the layout to be as stable as possible.
+  Error setBlockMapAddr(uint32_t Addr);
+  Error setDirectoryBlocksHint(ArrayRef<uint32_t> DirBlocks);
+  void setFreePageMap(uint32_t Fpm);
+  void setUnknown1(uint32_t Unk1);
+
+  /// Add a stream to the MSF file with the given size, occupying the given
+  /// list of blocks.  This is useful when reading a MSF file and you want a
+  /// particular stream to occupy the original set of blocks.  If the given
+  /// blocks are already allocated, or if the number of blocks specified is
+  /// incorrect for the given stream size, this function will return an Error.
+  Expected<uint32_t> addStream(uint32_t Size, ArrayRef<uint32_t> Blocks);
+
+  /// Add a stream to the MSF file with the given size, occupying any available
+  /// blocks that the builder decides to use.  This is useful when building a
+  /// new PDB file from scratch and you don't care what blocks a stream occupies
+  /// but you just want it to work.
+  Expected<uint32_t> addStream(uint32_t Size);
+
+  /// Update the size of an existing stream.  This will allocate or deallocate
+  /// blocks as needed to match the requested size.  This can fail if `CanGrow`
+  /// was set to false when initializing the `MSFBuilder`.
+  Error setStreamSize(uint32_t Idx, uint32_t Size);
+
+  /// Get the total number of streams in the MSF layout.  This should return 1
+  /// for every call to `addStream`.
+  uint32_t getNumStreams() const;
+
+  /// Get the size of a stream by index.
+  uint32_t getStreamSize(uint32_t StreamIdx) const;
+
+  /// Get the list of blocks allocated to a particular stream.
+  ArrayRef<uint32_t> getStreamBlocks(uint32_t StreamIdx) const;
+
+  /// Get the total number of blocks that will be allocated to actual data in
+  /// this MSF file.
+  uint32_t getNumUsedBlocks() const;
+
+  /// Get the total number of blocks that exist in the MSF file but are not
+  /// allocated to any valid data.
+  uint32_t getNumFreeBlocks() const;
+
+  /// Get the total number of blocks in the MSF file.  In practice this is equal
+  /// to `getNumUsedBlocks() + getNumFreeBlocks()`.
+  uint32_t getTotalBlockCount() const;
+
+  /// Check whether a particular block is allocated or free.
+  bool isBlockFree(uint32_t Idx) const;
+
+  /// Finalize the layout and build the headers and structures that describe the
+  /// MSF layout and can be written directly to the MSF file.
+  Expected<MSFLayout> build();
+
+  BumpPtrAllocator &getAllocator() { return Allocator; }
+
+private:
+  MSFBuilder(uint32_t BlockSize, uint32_t MinBlockCount, bool CanGrow,
+             BumpPtrAllocator &Allocator);
+
+  Error allocateBlocks(uint32_t NumBlocks, MutableArrayRef<uint32_t> Blocks);
+  uint32_t computeDirectoryByteSize() const;
+
+  using BlockList = std::vector<uint32_t>;
+
+  BumpPtrAllocator &Allocator;
+
+  bool IsGrowable;
+  uint32_t FreePageMap;
+  uint32_t Unknown1 = 0;
+  uint32_t BlockSize;
+  uint32_t BlockMapAddr;
+  BitVector FreeBlocks;
+  std::vector<uint32_t> DirectoryBlocks;
+  std::vector<std::pair<uint32_t, BlockList>> StreamData;
+};
+
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MSFBUILDER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFCommon.h b/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFCommon.h
new file mode 100644
index 0000000..dd53264
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFCommon.h
@@ -0,0 +1,162 @@
+//===- MSFCommon.h - Common types and functions for MSF files ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MSFCOMMON_H
+#define LLVM_DEBUGINFO_MSF_MSFCOMMON_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MathExtras.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace msf {
+
+static const char Magic[] = {'M',  'i',  'c',    'r', 'o', 's',  'o',  'f',
+                             't',  ' ',  'C',    '/', 'C', '+',  '+',  ' ',
+                             'M',  'S',  'F',    ' ', '7', '.',  '0',  '0',
+                             '\r', '\n', '\x1a', 'D', 'S', '\0', '\0', '\0'};
+
+// The superblock is overlaid at the beginning of the file (offset 0).
+// It starts with a magic header and is followed by information which
+// describes the layout of the file system.
+struct SuperBlock {
+  char MagicBytes[sizeof(Magic)];
+  // The file system is split into a variable number of fixed size elements.
+  // These elements are referred to as blocks.  The size of a block may vary
+  // from system to system.
+  support::ulittle32_t BlockSize;
+  // The index of the free block map.
+  support::ulittle32_t FreeBlockMapBlock;
+  // This contains the number of blocks resident in the file system.  In
+  // practice, NumBlocks * BlockSize is equivalent to the size of the MSF
+  // file.
+  support::ulittle32_t NumBlocks;
+  // This contains the number of bytes which make up the directory.
+  support::ulittle32_t NumDirectoryBytes;
+  // This field's purpose is not yet known.
+  support::ulittle32_t Unknown1;
+  // This contains the block # of the block map.
+  support::ulittle32_t BlockMapAddr;
+};
+
+struct MSFLayout {
+  MSFLayout() = default;
+
+  uint32_t mainFpmBlock() const {
+    assert(SB->FreeBlockMapBlock == 1 || SB->FreeBlockMapBlock == 2);
+    return SB->FreeBlockMapBlock;
+  }
+
+  uint32_t alternateFpmBlock() const {
+    // If mainFpmBlock is 1, this is 2.  If mainFpmBlock is 2, this is 1.
+    return 3U - mainFpmBlock();
+  }
+
+  const SuperBlock *SB = nullptr;
+  BitVector FreePageMap;
+  ArrayRef<support::ulittle32_t> DirectoryBlocks;
+  ArrayRef<support::ulittle32_t> StreamSizes;
+  std::vector<ArrayRef<support::ulittle32_t>> StreamMap;
+};
+
+/// \brief Describes the layout of a stream in an MSF layout.  A "stream" here
+/// is defined as any logical unit of data which may be arranged inside the MSF
+/// file as a sequence of (possibly discontiguous) blocks.  When we want to read
+/// from a particular MSF Stream, we fill out a stream layout structure and the
+/// reader uses it to determine which blocks in the underlying MSF file contain
+/// the data, so that it can be pieced together in the right order.
+class MSFStreamLayout {
+public:
+  uint32_t Length;
+  std::vector<support::ulittle32_t> Blocks;
+};
+
+/// \brief Determine the layout of the FPM stream, given the MSF layout.  An FPM
+/// stream spans 1 or more blocks, each at equally spaced intervals throughout
+/// the file.
+MSFStreamLayout getFpmStreamLayout(const MSFLayout &Msf,
+                                   bool IncludeUnusedFpmData = false,
+                                   bool AltFpm = false);
+
+inline bool isValidBlockSize(uint32_t Size) {
+  switch (Size) {
+  case 512:
+  case 1024:
+  case 2048:
+  case 4096:
+    return true;
+  }
+  return false;
+}
+
+// Super Block, Fpm0, Fpm1, and Block Map
+inline uint32_t getMinimumBlockCount() { return 4; }
+
+// Super Block, Fpm0, and Fpm1 are reserved.  The Block Map, although required
+// need not be at block 3.
+inline uint32_t getFirstUnreservedBlock() { return 3; }
+
+inline uint64_t bytesToBlocks(uint64_t NumBytes, uint64_t BlockSize) {
+  return divideCeil(NumBytes, BlockSize);
+}
+
+inline uint64_t blockToOffset(uint64_t BlockNumber, uint64_t BlockSize) {
+  return BlockNumber * BlockSize;
+}
+
+inline uint32_t getFpmIntervalLength(const MSFLayout &L) {
+  return L.SB->BlockSize;
+}
+
+/// Given an MSF with the specified block size and number of blocks, determine
+/// how many pieces the specified Fpm is split into.
+/// \p BlockSize - the block size of the MSF
+/// \p NumBlocks - the total number of blocks in the MSF
+/// \p IncludeUnusedFpmData - When true, this will count every block that is
+///    both in the file and matches the form of an FPM block, even if some of
+///    those FPM blocks are unused (a single FPM block can describe the
+///    allocation status of up to 32,767 blocks, although one appears only
+///    every 4,096 blocks).  So there are 8x as many blocks that match the
+///    form as there are blocks that are necessary to describe the allocation
+///    status of the file.  When this parameter is false, these extraneous
+///    trailing blocks are not counted.
+inline uint32_t getNumFpmIntervals(uint32_t BlockSize, uint32_t NumBlocks,
+                                   bool IncludeUnusedFpmData, int FpmNumber) {
+  assert(FpmNumber == 1 || FpmNumber == 2);
+  if (IncludeUnusedFpmData) {
+    // This calculation determines how many times a number of the form
+    // BlockSize * k + N appears in the range [0, NumBlocks).  We only need to
+    // do this when unused data is included, since the number of blocks dwarfs
+    // the number of fpm blocks.
+    return divideCeil(NumBlocks - FpmNumber, BlockSize);
+  }
+
+  // We want the minimum number of intervals required, where each interval can
+  // represent BlockSize * 8 blocks.
+  return divideCeil(NumBlocks, 8 * BlockSize);
+}
+
+inline uint32_t getNumFpmIntervals(const MSFLayout &L,
+                                   bool IncludeUnusedFpmData = false,
+                                   bool AltFpm = false) {
+  return getNumFpmIntervals(L.SB->BlockSize, L.SB->NumBlocks,
+                            IncludeUnusedFpmData,
+                            AltFpm ? L.alternateFpmBlock() : L.mainFpmBlock());
+}
+
+Error validateSuperBlock(const SuperBlock &SB);
+
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MSFCOMMON_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFError.h b/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFError.h
new file mode 100644
index 0000000..e66aeca
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/MSF/MSFError.h
@@ -0,0 +1,47 @@
+//===- MSFError.h - Error extensions for MSF Files --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MSFERROR_H
+#define LLVM_DEBUGINFO_MSF_MSFERROR_H
+
+#include "llvm/Support/Error.h"
+
+#include <string>
+
+namespace llvm {
+namespace msf {
+enum class msf_error_code {
+  unspecified = 1,
+  insufficient_buffer,
+  not_writable,
+  no_stream,
+  invalid_format,
+  block_in_use
+};
+
+/// Base class for errors originating when parsing raw PDB files
+class MSFError : public ErrorInfo<MSFError> {
+public:
+  static char ID;
+  MSFError(msf_error_code C);
+  MSFError(const std::string &Context);
+  MSFError(msf_error_code C, const std::string &Context);
+
+  void log(raw_ostream &OS) const override;
+  const std::string &getErrorMessage() const;
+  std::error_code convertToErrorCode() const override;
+
+private:
+  std::string ErrMsg;
+  msf_error_code Code;
+};
+} // namespace msf
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MSFERROR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/MSF/MappedBlockStream.h b/linux-x64/clang/include/llvm/DebugInfo/MSF/MappedBlockStream.h
new file mode 100644
index 0000000..f65e529
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/MSF/MappedBlockStream.h
@@ -0,0 +1,163 @@
+//==- MappedBlockStream.h - Discontiguous stream data in an MSF --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
+#define LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DebugInfo/MSF/MSFCommon.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStream.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace msf {
+
+struct MSFLayout;
+
+/// MappedBlockStream represents data stored in an MSF file into chunks of a
+/// particular size (called the Block Size), and whose chunks may not be
+/// necessarily contiguous.  The arrangement of these chunks MSF the file
+/// is described by some other metadata contained within the MSF file.  In
+/// the case of a standard MSF Stream, the layout of the stream's blocks
+/// is described by the MSF "directory", but in the case of the directory
+/// itself, the layout is described by an array at a fixed location within
+/// the MSF.  MappedBlockStream provides methods for reading from and writing
+/// to one of these streams transparently, as if it were a contiguous sequence
+/// of bytes.
+class MappedBlockStream : public BinaryStream {
+  friend class WritableMappedBlockStream;
+
+public:
+  static std::unique_ptr<MappedBlockStream>
+  createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
+               BinaryStreamRef MsfData, BumpPtrAllocator &Allocator);
+
+  static std::unique_ptr<MappedBlockStream>
+  createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
+                      uint32_t StreamIndex, BumpPtrAllocator &Allocator);
+
+  static std::unique_ptr<MappedBlockStream>
+  createFpmStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
+                  BumpPtrAllocator &Allocator);
+
+  static std::unique_ptr<MappedBlockStream>
+  createDirectoryStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
+                        BumpPtrAllocator &Allocator);
+
+  support::endianness getEndian() const override {
+    return support::little;
+  }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override;
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override;
+
+  uint32_t getLength() override;
+
+  BumpPtrAllocator &getAllocator() { return Allocator; }
+
+  void invalidateCache();
+
+  uint32_t getBlockSize() const { return BlockSize; }
+  uint32_t getNumBlocks() const { return StreamLayout.Blocks.size(); }
+  uint32_t getStreamLength() const { return StreamLayout.Length; }
+
+protected:
+  MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
+                    BinaryStreamRef MsfData, BumpPtrAllocator &Allocator);
+
+private:
+  const MSFStreamLayout &getStreamLayout() const { return StreamLayout; }
+  void fixCacheAfterWrite(uint32_t Offset, ArrayRef<uint8_t> Data) const;
+
+  Error readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer);
+  bool tryReadContiguously(uint32_t Offset, uint32_t Size,
+                           ArrayRef<uint8_t> &Buffer);
+
+  const uint32_t BlockSize;
+  const MSFStreamLayout StreamLayout;
+  BinaryStreamRef MsfData;
+
+  using CacheEntry = MutableArrayRef<uint8_t>;
+
+  // We just store the allocator by reference.  We use this to allocate
+  // contiguous memory for things like arrays or strings that cross a block
+  // boundary, and this memory is expected to outlive the stream.  For example,
+  // someone could create a stream, read some stuff, then close the stream, and
+  // we would like outstanding references to fields to remain valid since the
+  // entire file is mapped anyway.  Because of that, the user must supply the
+  // allocator to allocate broken records from.
+  BumpPtrAllocator &Allocator;
+  DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
+};
+
+class WritableMappedBlockStream : public WritableBinaryStream {
+public:
+  static std::unique_ptr<WritableMappedBlockStream>
+  createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
+               WritableBinaryStreamRef MsfData, BumpPtrAllocator &Allocator);
+
+  static std::unique_ptr<WritableMappedBlockStream>
+  createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
+                      uint32_t StreamIndex, BumpPtrAllocator &Allocator);
+
+  static std::unique_ptr<WritableMappedBlockStream>
+  createDirectoryStream(const MSFLayout &Layout,
+                        WritableBinaryStreamRef MsfData,
+                        BumpPtrAllocator &Allocator);
+
+  static std::unique_ptr<WritableMappedBlockStream>
+  createFpmStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
+                  BumpPtrAllocator &Allocator, bool AltFpm = false);
+
+  support::endianness getEndian() const override {
+    return support::little;
+  }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override;
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override;
+  uint32_t getLength() override;
+
+  Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) override;
+
+  Error commit() override;
+
+  const MSFStreamLayout &getStreamLayout() const {
+    return ReadInterface.getStreamLayout();
+  }
+
+  uint32_t getBlockSize() const { return ReadInterface.getBlockSize(); }
+  uint32_t getNumBlocks() const { return ReadInterface.getNumBlocks(); }
+  uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }
+
+protected:
+  WritableMappedBlockStream(uint32_t BlockSize,
+                            const MSFStreamLayout &StreamLayout,
+                            WritableBinaryStreamRef MsfData,
+                            BumpPtrAllocator &Allocator);
+
+private:
+  MappedBlockStream ReadInterface;
+  WritableBinaryStreamRef WriteInterface;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
new file mode 100644
index 0000000..9713dce
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
@@ -0,0 +1,59 @@
+//===- ConcreteSymbolEnumerator.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
+#define LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
+
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace pdb {
+
+template <typename ChildType>
+class ConcreteSymbolEnumerator : public IPDBEnumChildren<ChildType> {
+public:
+  ConcreteSymbolEnumerator(std::unique_ptr<IPDBEnumSymbols> SymbolEnumerator)
+      : Enumerator(std::move(SymbolEnumerator)) {}
+
+  ~ConcreteSymbolEnumerator() override = default;
+
+  uint32_t getChildCount() const override {
+    return Enumerator->getChildCount();
+  }
+
+  std::unique_ptr<ChildType> getChildAtIndex(uint32_t Index) const override {
+    std::unique_ptr<PDBSymbol> Child = Enumerator->getChildAtIndex(Index);
+    return unique_dyn_cast_or_null<ChildType>(Child);
+  }
+
+  std::unique_ptr<ChildType> getNext() override {
+    return unique_dyn_cast_or_null<ChildType>(Enumerator->getNext());
+  }
+
+  void reset() override { Enumerator->reset(); }
+
+  ConcreteSymbolEnumerator<ChildType> *clone() const override {
+    std::unique_ptr<IPDBEnumSymbols> WrappedClone(Enumerator->clone());
+    return new ConcreteSymbolEnumerator<ChildType>(std::move(WrappedClone));
+  }
+
+private:
+
+  std::unique_ptr<IPDBEnumSymbols> Enumerator;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
new file mode 100644
index 0000000..930bea6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
@@ -0,0 +1,35 @@
+//===- DIADataStream.h - DIA implementation of IPDBDataStream ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
+
+namespace llvm {
+namespace pdb {
+class DIADataStream : public IPDBDataStream {
+public:
+  explicit DIADataStream(CComPtr<IDiaEnumDebugStreamData> DiaStreamData);
+
+  uint32_t getRecordCount() const override;
+  std::string getName() const override;
+  llvm::Optional<RecordType> getItemAtIndex(uint32_t Index) const override;
+  bool getNext(RecordType &Record) override;
+  void reset() override;
+  DIADataStream *clone() const override;
+
+private:
+  CComPtr<IDiaEnumDebugStreamData> StreamData;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
new file mode 100644
index 0000000..ffae664
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
@@ -0,0 +1,38 @@
+//==- DIAEnumDebugStreams.h - DIA Debug Stream Enumerator impl ---*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+
+namespace llvm {
+namespace pdb {
+
+class IPDBDataStream;
+
+class DIAEnumDebugStreams : public IPDBEnumChildren<IPDBDataStream> {
+public:
+  explicit DIAEnumDebugStreams(CComPtr<IDiaEnumDebugStreams> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+  ChildTypePtr getNext() override;
+  void reset() override;
+  DIAEnumDebugStreams *clone() const override;
+
+private:
+  CComPtr<IDiaEnumDebugStreams> Enumerator;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
new file mode 100644
index 0000000..39490a4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
@@ -0,0 +1,40 @@
+//==- DIAEnumInjectedSources.h - DIA Injected Sources Enumerator -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumInjectedSources : public IPDBEnumChildren<IPDBInjectedSource> {
+public:
+  explicit DIAEnumInjectedSources(
+      const DIASession &PDBSession,
+      CComPtr<IDiaEnumInjectedSources> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+  ChildTypePtr getNext() override;
+  void reset() override;
+  DIAEnumInjectedSources *clone() const override;
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaEnumInjectedSources> Enumerator;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
new file mode 100644
index 0000000..08f0de1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
@@ -0,0 +1,37 @@
+//==- DIAEnumLineNumbers.h - DIA Line Number Enumerator impl -----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
+
+namespace llvm {
+namespace pdb {
+class IPDBLineNumber;
+
+class DIAEnumLineNumbers : public IPDBEnumChildren<IPDBLineNumber> {
+public:
+  explicit DIAEnumLineNumbers(CComPtr<IDiaEnumLineNumbers> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+  ChildTypePtr getNext() override;
+  void reset() override;
+  DIAEnumLineNumbers *clone() const override;
+
+private:
+  CComPtr<IDiaEnumLineNumbers> Enumerator;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
new file mode 100644
index 0000000..5c37d9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
@@ -0,0 +1,40 @@
+//==- DIAEnumSectionContribs.h --------------------------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumSectionContribs : public IPDBEnumChildren<IPDBSectionContrib> {
+public:
+  explicit DIAEnumSectionContribs(
+      const DIASession &PDBSession,
+      CComPtr<IDiaEnumSectionContribs> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+  ChildTypePtr getNext() override;
+  void reset() override;
+  DIAEnumSectionContribs *clone() const override;
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaEnumSectionContribs> Enumerator;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
new file mode 100644
index 0000000..e69d18f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
@@ -0,0 +1,39 @@
+//==- DIAEnumSourceFiles.h - DIA Source File Enumerator impl -----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumSourceFiles : public IPDBEnumChildren<IPDBSourceFile> {
+public:
+  explicit DIAEnumSourceFiles(const DIASession &PDBSession,
+                              CComPtr<IDiaEnumSourceFiles> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+  ChildTypePtr getNext() override;
+  void reset() override;
+  DIAEnumSourceFiles *clone() const override;
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaEnumSourceFiles> Enumerator;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
new file mode 100644
index 0000000..f779cd1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
@@ -0,0 +1,39 @@
+//==- DIAEnumSymbols.h - DIA Symbol Enumerator impl --------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/PDBSymbol.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumSymbols : public IPDBEnumChildren<PDBSymbol> {
+public:
+  explicit DIAEnumSymbols(const DIASession &Session,
+                          CComPtr<IDiaEnumSymbols> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
+  std::unique_ptr<PDBSymbol> getNext() override;
+  void reset() override;
+  DIAEnumSymbols *clone() const override;
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaEnumSymbols> Enumerator;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
new file mode 100644
index 0000000..926fcfe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
@@ -0,0 +1,37 @@
+//===- DIAEnumTables.h - DIA Tables Enumerator Impl -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBTable.h"
+
+namespace llvm {
+namespace pdb {
+class IPDBTable;
+
+class DIAEnumTables : public IPDBEnumChildren<IPDBTable> {
+public:
+  explicit DIAEnumTables(CComPtr<IDiaEnumTables> DiaEnumerator);
+
+  uint32_t getChildCount() const override;
+  std::unique_ptr<IPDBTable> getChildAtIndex(uint32_t Index) const override;
+  std::unique_ptr<IPDBTable> getNext() override;
+  void reset() override;
+  DIAEnumTables *clone() const override;
+
+private:
+  CComPtr<IDiaEnumTables> Enumerator;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAError.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAError.h
new file mode 100644
index 0000000..35a39a0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAError.h
@@ -0,0 +1,45 @@
+//===- DIAError.h - Error extensions for PDB DIA implementation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+enum class dia_error_code {
+  unspecified = 1,
+  could_not_create_impl,
+  invalid_file_format,
+  invalid_parameter,
+  already_loaded,
+  debug_info_mismatch,
+};
+
+/// Base class for errors originating in DIA SDK, e.g. COM calls
+class DIAError : public ErrorInfo<DIAError> {
+public:
+  static char ID;
+  DIAError(dia_error_code C);
+  DIAError(StringRef Context);
+  DIAError(dia_error_code C, StringRef Context);
+
+  void log(raw_ostream &OS) const override;
+  StringRef getErrorMessage() const;
+  std::error_code convertToErrorCode() const override;
+
+private:
+  std::string ErrMsg;
+  dia_error_code Code;
+};
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h
new file mode 100644
index 0000000..635508d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h
@@ -0,0 +1,38 @@
+//===- DIAInjectedSource.h - DIA impl for IPDBInjectedSource ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAInjectedSource : public IPDBInjectedSource {
+public:
+  explicit DIAInjectedSource(CComPtr<IDiaInjectedSource> DiaSourceFile);
+
+  uint32_t getCrc32() const override;
+  uint64_t getCodeByteSize() const override;
+  std::string getFileName() const override;
+  std::string getObjectFileName() const override;
+  std::string getVirtualFileName() const override;
+  PDB_SourceCompression getCompression() const override;
+  std::string getCode() const override;
+
+private:
+  CComPtr<IDiaInjectedSource> SourceFile;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h
new file mode 100644
index 0000000..a59e3a1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h
@@ -0,0 +1,40 @@
+//===- DIALineNumber.h - DIA implementation of IPDBLineNumber ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
+
+namespace llvm {
+namespace pdb {
+class DIALineNumber : public IPDBLineNumber {
+public:
+  explicit DIALineNumber(CComPtr<IDiaLineNumber> DiaLineNumber);
+
+  uint32_t getLineNumber() const override;
+  uint32_t getLineNumberEnd() const override;
+  uint32_t getColumnNumber() const override;
+  uint32_t getColumnNumberEnd() const override;
+  uint32_t getAddressSection() const override;
+  uint32_t getAddressOffset() const override;
+  uint32_t getRelativeVirtualAddress() const override;
+  uint64_t getVirtualAddress() const override;
+  uint32_t getLength() const override;
+  uint32_t getSourceFileId() const override;
+  uint32_t getCompilandId() const override;
+  bool isStatement() const override;
+
+private:
+  CComPtr<IDiaLineNumber> LineNumber;
+};
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
new file mode 100644
index 0000000..dfb3564
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
@@ -0,0 +1,233 @@
+//===- DIARawSymbol.h - DIA implementation of IPDBRawSymbol ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+class DIARawSymbol : public IPDBRawSymbol {
+public:
+  DIARawSymbol(const DIASession &PDBSession, CComPtr<IDiaSymbol> DiaSymbol);
+
+  void dump(raw_ostream &OS, int Indent) const override;
+
+  CComPtr<IDiaSymbol> getDiaSymbol() const { return Symbol; }
+
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type, StringRef Name,
+               PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildrenByAddr(PDB_SymType Type, StringRef Name,
+                     PDB_NameSearchFlags Flags,
+                     uint32_t Section, uint32_t Offset) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+                   uint64_t VA) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+                    uint32_t RVA) const override;
+
+  std::unique_ptr<IPDBEnumSymbols>
+  findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+  findInlineFramesByRVA(uint32_t RVA) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+  findInlineFramesByVA(uint64_t VA) const override;
+
+  std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
+                         uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findInlineeLinesByVA(uint64_t VA, uint32_t Length) const override;
+
+  void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const override;
+  void getFrontEndVersion(VersionInfo &Version) const override;
+  void getBackEndVersion(VersionInfo &Version) const override;
+  PDB_MemberAccess getAccess() const override;
+  uint32_t getAddressOffset() const override;
+  uint32_t getAddressSection() const override;
+  uint32_t getAge() const override;
+  uint32_t getArrayIndexTypeId() const override;
+  uint32_t getBaseDataOffset() const override;
+  uint32_t getBaseDataSlot() const override;
+  uint32_t getBaseSymbolId() const override;
+  PDB_BuiltinType getBuiltinType() const override;
+  uint32_t getBitPosition() const override;
+  PDB_CallingConv getCallingConvention() const override;
+  uint32_t getClassParentId() const override;
+  std::string getCompilerName() const override;
+  uint32_t getCount() const override;
+  uint32_t getCountLiveRanges() const override;
+  PDB_Lang getLanguage() const override;
+  uint32_t getLexicalParentId() const override;
+  std::string getLibraryName() const override;
+  uint32_t getLiveRangeStartAddressOffset() const override;
+  uint32_t getLiveRangeStartAddressSection() const override;
+  uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
+  codeview::RegisterId getLocalBasePointerRegisterId() const override;
+  uint32_t getLowerBoundId() const override;
+  uint32_t getMemorySpaceKind() const override;
+  std::string getName() const override;
+  uint32_t getNumberOfAcceleratorPointerTags() const override;
+  uint32_t getNumberOfColumns() const override;
+  uint32_t getNumberOfModifiers() const override;
+  uint32_t getNumberOfRegisterIndices() const override;
+  uint32_t getNumberOfRows() const override;
+  std::string getObjectFileName() const override;
+  uint32_t getOemId() const override;
+  uint32_t getOemSymbolId() const override;
+  uint32_t getOffsetInUdt() const override;
+  PDB_Cpu getPlatform() const override;
+  uint32_t getRank() const override;
+  codeview::RegisterId getRegisterId() const override;
+  uint32_t getRegisterType() const override;
+  uint32_t getRelativeVirtualAddress() const override;
+  uint32_t getSamplerSlot() const override;
+  uint32_t getSignature() const override;
+  uint32_t getSizeInUdt() const override;
+  uint32_t getSlot() const override;
+  std::string getSourceFileName() const override;
+  std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
+  uint32_t getStride() const override;
+  uint32_t getSubTypeId() const override;
+  std::string getSymbolsFileName() const override;
+  uint32_t getSymIndexId() const override;
+  uint32_t getTargetOffset() const override;
+  uint32_t getTargetRelativeVirtualAddress() const override;
+  uint64_t getTargetVirtualAddress() const override;
+  uint32_t getTargetSection() const override;
+  uint32_t getTextureSlot() const override;
+  uint32_t getTimeStamp() const override;
+  uint32_t getToken() const override;
+  uint32_t getTypeId() const override;
+  uint32_t getUavSlot() const override;
+  std::string getUndecoratedName() const override;
+  std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
+  uint32_t getUnmodifiedTypeId() const override;
+  uint32_t getUpperBoundId() const override;
+  Variant getValue() const override;
+  uint32_t getVirtualBaseDispIndex() const override;
+  uint32_t getVirtualBaseOffset() const override;
+  uint32_t getVirtualTableShapeId() const override;
+  std::unique_ptr<PDBSymbolTypeBuiltin>
+  getVirtualBaseTableType() const override;
+  PDB_DataKind getDataKind() const override;
+  PDB_SymType getSymTag() const override;
+  codeview::GUID getGuid() const override;
+  int32_t getOffset() const override;
+  int32_t getThisAdjust() const override;
+  int32_t getVirtualBasePointerOffset() const override;
+  PDB_LocType getLocationType() const override;
+  PDB_Machine getMachineType() const override;
+  codeview::ThunkOrdinal getThunkOrdinal() const override;
+  uint64_t getLength() const override;
+  uint64_t getLiveRangeLength() const override;
+  uint64_t getVirtualAddress() const override;
+  PDB_UdtType getUdtKind() const override;
+  bool hasConstructor() const override;
+  bool hasCustomCallingConvention() const override;
+  bool hasFarReturn() const override;
+  bool isCode() const override;
+  bool isCompilerGenerated() const override;
+  bool isConstType() const override;
+  bool isEditAndContinueEnabled() const override;
+  bool isFunction() const override;
+  bool getAddressTaken() const override;
+  bool getNoStackOrdering() const override;
+  bool hasAlloca() const override;
+  bool hasAssignmentOperator() const override;
+  bool hasCTypes() const override;
+  bool hasCastOperator() const override;
+  bool hasDebugInfo() const override;
+  bool hasEH() const override;
+  bool hasEHa() const override;
+  bool hasInlAsm() const override;
+  bool hasInlineAttribute() const override;
+  bool hasInterruptReturn() const override;
+  bool hasFramePointer() const override;
+  bool hasLongJump() const override;
+  bool hasManagedCode() const override;
+  bool hasNestedTypes() const override;
+  bool hasNoInlineAttribute() const override;
+  bool hasNoReturnAttribute() const override;
+  bool hasOptimizedCodeDebugInfo() const override;
+  bool hasOverloadedOperator() const override;
+  bool hasSEH() const override;
+  bool hasSecurityChecks() const override;
+  bool hasSetJump() const override;
+  bool hasStrictGSCheck() const override;
+  bool isAcceleratorGroupSharedLocal() const override;
+  bool isAcceleratorPointerTagLiveRange() const override;
+  bool isAcceleratorStubFunction() const override;
+  bool isAggregated() const override;
+  bool isIntroVirtualFunction() const override;
+  bool isCVTCIL() const override;
+  bool isConstructorVirtualBase() const override;
+  bool isCxxReturnUdt() const override;
+  bool isDataAligned() const override;
+  bool isHLSLData() const override;
+  bool isHotpatchable() const override;
+  bool isIndirectVirtualBaseClass() const override;
+  bool isInterfaceUdt() const override;
+  bool isIntrinsic() const override;
+  bool isLTCG() const override;
+  bool isLocationControlFlowDependent() const override;
+  bool isMSILNetmodule() const override;
+  bool isMatrixRowMajor() const override;
+  bool isManagedCode() const override;
+  bool isMSILCode() const override;
+  bool isMultipleInheritance() const override;
+  bool isNaked() const override;
+  bool isNested() const override;
+  bool isOptimizedAway() const override;
+  bool isPacked() const override;
+  bool isPointerBasedOnSymbolValue() const override;
+  bool isPointerToDataMember() const override;
+  bool isPointerToMemberFunction() const override;
+  bool isPureVirtual() const override;
+  bool isRValueReference() const override;
+  bool isRefUdt() const override;
+  bool isReference() const override;
+  bool isRestrictedType() const override;
+  bool isReturnValue() const override;
+  bool isSafeBuffers() const override;
+  bool isScoped() const override;
+  bool isSdl() const override;
+  bool isSingleInheritance() const override;
+  bool isSplitted() const override;
+  bool isStatic() const override;
+  bool hasPrivateSymbols() const override;
+  bool isUnalignedType() const override;
+  bool isUnreached() const override;
+  bool isValueUdt() const override;
+  bool isVirtual() const override;
+  bool isVirtualBaseClass() const override;
+  bool isVirtualInheritance() const override;
+  bool isVolatileType() const override;
+  bool wasInlined() const override;
+  std::string getUnused() const override;
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaSymbol> Symbol;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h
new file mode 100644
index 0000000..7bc28e3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h
@@ -0,0 +1,55 @@
+//===- DIASectionContrib.h - DIA Impl. of IPDBSectionContrib ------ C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIASectionContrib : public IPDBSectionContrib {
+public:
+  explicit DIASectionContrib(const DIASession &PDBSession,
+                             CComPtr<IDiaSectionContrib> DiaSection);
+
+  std::unique_ptr<PDBSymbolCompiland> getCompiland() const override;
+  uint32_t getAddressSection() const override;
+  uint32_t getAddressOffset() const override;
+  uint32_t getRelativeVirtualAddress() const override;
+  uint64_t getVirtualAddress() const override;
+  uint32_t getLength() const override;
+  bool isNotPaged() const override;
+  bool hasCode() const override;
+  bool hasCode16Bit() const override;
+  bool hasInitializedData() const override;
+  bool hasUninitializedData() const override;
+  bool isRemoved() const override;
+  bool hasComdat() const override;
+  bool isDiscardable() const override;
+  bool isNotCached() const override;
+  bool isShared() const override;
+  bool isExecutable() const override;
+  bool isReadable() const override;
+  bool isWritable() const override;
+  uint32_t getDataCrc32() const override;
+  uint32_t getRelocationsCrc32() const override;
+  uint32_t getCompilandId() const override;
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaSectionContrib> Section;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASession.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASession.h
new file mode 100644
index 0000000..4f3d728
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASession.h
@@ -0,0 +1,88 @@
+//===- DIASession.h - DIA implementation of IPDBSession ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSession.h"
+#include "llvm/Support/Error.h"
+
+#include <system_error>
+
+namespace llvm {
+class StringRef;
+
+namespace pdb {
+class DIASession : public IPDBSession {
+public:
+  explicit DIASession(CComPtr<IDiaSession> DiaSession);
+
+  static Error createFromPdb(StringRef Path,
+                             std::unique_ptr<IPDBSession> &Session);
+  static Error createFromExe(StringRef Path,
+                             std::unique_ptr<IPDBSession> &Session);
+
+  uint64_t getLoadAddress() const override;
+  bool setLoadAddress(uint64_t Address) override;
+  std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
+  std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const override;
+
+  bool addressForVA(uint64_t VA, uint32_t &Section,
+                    uint32_t &Offset) const override;
+  bool addressForRVA(uint32_t RVA, uint32_t &Section,
+                     uint32_t &Offset) const override;
+
+  std::unique_ptr<PDBSymbol>
+  findSymbolByAddress(uint64_t Address, PDB_SymType Type) const override;
+
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbers(const PDBSymbolCompiland &Compiland,
+                  const IPDBSourceFile &File) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersByAddress(uint64_t Address, uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
+                              uint32_t Length) const override;
+
+  std::unique_ptr<IPDBEnumSourceFiles>
+  findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
+                  PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBSourceFile>
+  findOneSourceFile(const PDBSymbolCompiland *Compiland,
+                    llvm::StringRef Pattern,
+                    PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+  findCompilandsForSourceFile(llvm::StringRef Pattern,
+                              PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<PDBSymbolCompiland>
+  findOneCompilandForSourceFile(llvm::StringRef Pattern,
+                                PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const override;
+  std::unique_ptr<IPDBEnumSourceFiles> getSourceFilesForCompiland(
+      const PDBSymbolCompiland &Compiland) const override;
+  std::unique_ptr<IPDBSourceFile>
+  getSourceFileById(uint32_t FileId) const override;
+
+  std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const override;
+
+  std::unique_ptr<IPDBEnumTables> getEnumTables() const override;
+
+  std::unique_ptr<IPDBEnumInjectedSources> getInjectedSources() const override;
+
+  std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;
+
+private:
+  CComPtr<IDiaSession> Session;
+};
+} // namespace pdb
+} // namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h
new file mode 100644
index 0000000..1088ea5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h
@@ -0,0 +1,41 @@
+//===- DIASourceFile.h - DIA implementation of IPDBSourceFile ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIASourceFile : public IPDBSourceFile {
+public:
+  explicit DIASourceFile(const DIASession &Session,
+                         CComPtr<IDiaSourceFile> DiaSourceFile);
+
+  std::string getFileName() const override;
+  uint32_t getUniqueId() const override;
+  std::string getChecksum() const override;
+  PDB_Checksum getChecksumType() const override;
+  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+  getCompilands() const override;
+
+  CComPtr<IDiaSourceFile> getDiaFile() const { return SourceFile; }
+
+private:
+  const DIASession &Session;
+  CComPtr<IDiaSourceFile> SourceFile;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASupport.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
new file mode 100644
index 0000000..3b4a348
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
@@ -0,0 +1,44 @@
+//===- DIASupport.h - Common header includes for DIA ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Common defines and header includes for all LLVMDebugInfoPDBDIA.  The
+// definitions here configure the necessary #defines and include system headers
+// in the proper order for using DIA.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+
+// Require at least Vista
+#define NTDDI_VERSION NTDDI_VISTA
+#define _WIN32_WINNT _WIN32_WINNT_VISTA
+#define WINVER _WIN32_WINNT_VISTA
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+
+// llvm/Support/Debug.h unconditionally #defines DEBUG as a macro.
+// DIA headers #define it if it is not already defined, so we have
+// an order of includes problem.  The real fix is to make LLVM use
+// something less generic than DEBUG, such as LLVM_DEBUG(), but it's
+// fairly prevalent.  So for now, we save the definition state and
+// restore it.
+#pragma push_macro("DEBUG")
+
+// atlbase.h has to come before windows.h
+#include <atlbase.h>
+#include <windows.h>
+
+// DIA headers must come after windows headers.
+#include <cvconst.h>
+#include <dia2.h>
+#include <diacreate.h>
+
+#pragma pop_macro("DEBUG")
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIATable.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIATable.h
new file mode 100644
index 0000000..ce93fa0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIATable.h
@@ -0,0 +1,32 @@
+//===- DIATable.h - DIA implementation of IPDBTable -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBTable.h"
+
+namespace llvm {
+namespace pdb {
+class DIATable : public IPDBTable {
+public:
+  explicit DIATable(CComPtr<IDiaTable> DiaTable);
+
+  uint32_t getItemCount() const override;
+  std::string getName() const override;
+  PDB_TableType getTableType() const override;
+
+private:
+  CComPtr<IDiaTable> Table;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h
new file mode 100644
index 0000000..aa843e0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h
@@ -0,0 +1,31 @@
+//===- DIAUtils.h - Utility functions for working with DIA ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ConvertUTF.h"
+
+template <typename Obj>
+std::string invokeBstrMethod(Obj &Object,
+                             HRESULT (__stdcall Obj::*Func)(BSTR *)) {
+  CComBSTR Str16;
+  HRESULT Result = (Object.*Func)(&Str16);
+  if (S_OK != Result)
+    return std::string();
+
+  std::string Str8;
+  llvm::ArrayRef<char> StrBytes(reinterpret_cast<char *>(Str16.m_str),
+                                Str16.ByteLength());
+  llvm::convertUTF16ToUTF8String(StrBytes, Str8);
+  return Str8;
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/GenericError.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/GenericError.h
new file mode 100644
index 0000000..03205a9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/GenericError.h
@@ -0,0 +1,44 @@
+//===- Error.h - system_error extensions for PDB ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_ERROR_H
+#define LLVM_DEBUGINFO_PDB_ERROR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+
+enum class generic_error_code {
+  invalid_path = 1,
+  dia_sdk_not_present,
+  type_server_not_found,
+  unspecified,
+};
+
+/// Base class for errors originating when parsing raw PDB files
+class GenericError : public ErrorInfo<GenericError> {
+public:
+  static char ID;
+  GenericError(generic_error_code C);
+  GenericError(StringRef Context);
+  GenericError(generic_error_code C, StringRef Context);
+
+  void log(raw_ostream &OS) const override;
+  StringRef getErrorMessage() const;
+  std::error_code convertToErrorCode() const override;
+
+private:
+  std::string ErrMsg;
+  generic_error_code Code;
+};
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBDataStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBDataStream.h
new file mode 100644
index 0000000..67b5a06
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBDataStream.h
@@ -0,0 +1,41 @@
+//===- IPDBDataStream.h - base interface for child enumerator ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
+#define LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+namespace pdb {
+
+/// IPDBDataStream defines an interface used to represent a stream consisting
+/// of a name and a series of records whose formats depend on the particular
+/// stream type.
+class IPDBDataStream {
+public:
+  using RecordType = SmallVector<uint8_t, 32>;
+
+  virtual ~IPDBDataStream();
+
+  virtual uint32_t getRecordCount() const = 0;
+  virtual std::string getName() const = 0;
+  virtual Optional<RecordType> getItemAtIndex(uint32_t Index) const = 0;
+  virtual bool getNext(RecordType &Record) = 0;
+  virtual void reset() = 0;
+  virtual IPDBDataStream *clone() const = 0;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
new file mode 100644
index 0000000..b6b7d95
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
@@ -0,0 +1,36 @@
+//===- IPDBEnumChildren.h - base interface for child enumerator -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
+#define LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
+
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace pdb {
+
+template <typename ChildType> class IPDBEnumChildren {
+public:
+  using ChildTypePtr = std::unique_ptr<ChildType>;
+  using MyType = IPDBEnumChildren<ChildType>;
+
+  virtual ~IPDBEnumChildren() = default;
+
+  virtual uint32_t getChildCount() const = 0;
+  virtual ChildTypePtr getChildAtIndex(uint32_t Index) const = 0;
+  virtual ChildTypePtr getNext() = 0;
+  virtual void reset() = 0;
+  virtual MyType *clone() const = 0;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBInjectedSource.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBInjectedSource.h
new file mode 100644
index 0000000..e75d64a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBInjectedSource.h
@@ -0,0 +1,42 @@
+//===- IPDBInjectedSource.h - base class for PDB injected file --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBINJECTEDSOURCE_H
+#define LLVM_DEBUGINFO_PDB_IPDBINJECTEDSOURCE_H
+
+#include "PDBTypes.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+class raw_ostream;
+
+namespace pdb {
+
+/// IPDBInjectedSource defines an interface used to represent source files
+/// which were injected directly into the PDB file during the compilation
+/// process.  This is used, for example, to add natvis files to a PDB, but
+/// in theory could be used to add arbitrary source code.
+class IPDBInjectedSource {
+public:
+  virtual ~IPDBInjectedSource();
+
+  virtual uint32_t getCrc32() const = 0;
+  virtual uint64_t getCodeByteSize() const = 0;
+  virtual std::string getFileName() const = 0;
+  virtual std::string getObjectFileName() const = 0;
+  virtual std::string getVirtualFileName() const = 0;
+  virtual PDB_SourceCompression getCompression() const = 0;
+  virtual std::string getCode() const = 0;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_IPDBINJECTEDSOURCE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBLineNumber.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBLineNumber.h
new file mode 100644
index 0000000..e20080f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBLineNumber.h
@@ -0,0 +1,37 @@
+//===- IPDBLineNumber.h - base interface for PDB line no. info ---*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBLINENUMBER_H
+#define LLVM_DEBUGINFO_PDB_IPDBLINENUMBER_H
+
+#include "PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+class IPDBLineNumber {
+public:
+  virtual ~IPDBLineNumber();
+
+  virtual uint32_t getLineNumber() const = 0;
+  virtual uint32_t getLineNumberEnd() const = 0;
+  virtual uint32_t getColumnNumber() const = 0;
+  virtual uint32_t getColumnNumberEnd() const = 0;
+  virtual uint32_t getAddressSection() const = 0;
+  virtual uint32_t getAddressOffset() const = 0;
+  virtual uint32_t getRelativeVirtualAddress() const = 0;
+  virtual uint64_t getVirtualAddress() const = 0;
+  virtual uint32_t getLength() const = 0;
+  virtual uint32_t getSourceFileId() const = 0;
+  virtual uint32_t getCompilandId() const = 0;
+  virtual bool isStatement() const = 0;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
new file mode 100644
index 0000000..bcb2eaa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
@@ -0,0 +1,243 @@
+//===- IPDBRawSymbol.h - base interface for PDB symbol types ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H
+
+#include "PDBTypes.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include <memory>
+
+namespace llvm {
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolTypeVTable;
+class PDBSymbolTypeVTableShape;
+
+/// IPDBRawSymbol defines an interface used to represent an arbitrary symbol.
+/// It exposes a monolithic interface consisting of accessors for the union of
+/// all properties that are valid for any symbol type.  This interface is then
+/// wrapped by a concrete class which exposes only those set of methods valid
+/// for this particular symbol type.  See PDBSymbol.h for more details.
+class IPDBRawSymbol {
+public:
+  virtual ~IPDBRawSymbol();
+
+  virtual void dump(raw_ostream &OS, int Indent) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type, StringRef Name,
+               PDB_NameSearchFlags Flags) const = 0;
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findChildrenByAddr(PDB_SymType Type, StringRef Name,
+                     PDB_NameSearchFlags Flags,
+                     uint32_t Section, uint32_t Offset) const = 0;
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+                   uint64_t VA) const = 0;
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+                    uint32_t RVA) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const = 0;
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findInlineFramesByRVA(uint32_t RVA) const = 0;
+  virtual std::unique_ptr<IPDBEnumSymbols>
+  findInlineFramesByVA(uint64_t VA) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const = 0;
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
+                         uint32_t Length) const = 0;
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const = 0;
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findInlineeLinesByVA(uint64_t VA, uint32_t Length) const = 0;
+
+  virtual void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const = 0;
+  virtual void getBackEndVersion(VersionInfo &Version) const = 0;
+  virtual PDB_MemberAccess getAccess() const = 0;
+  virtual uint32_t getAddressOffset() const = 0;
+  virtual uint32_t getAddressSection() const = 0;
+  virtual uint32_t getAge() const = 0;
+  virtual uint32_t getArrayIndexTypeId() const = 0;
+  virtual uint32_t getBaseDataOffset() const = 0;
+  virtual uint32_t getBaseDataSlot() const = 0;
+  virtual uint32_t getBaseSymbolId() const = 0;
+  virtual PDB_BuiltinType getBuiltinType() const = 0;
+  virtual uint32_t getBitPosition() const = 0;
+  virtual PDB_CallingConv getCallingConvention() const = 0;
+  virtual uint32_t getClassParentId() const = 0;
+  virtual std::string getCompilerName() const = 0;
+  virtual uint32_t getCount() const = 0;
+  virtual uint32_t getCountLiveRanges() const = 0;
+  virtual void getFrontEndVersion(VersionInfo &Version) const = 0;
+  virtual PDB_Lang getLanguage() const = 0;
+  virtual uint32_t getLexicalParentId() const = 0;
+  virtual std::string getLibraryName() const = 0;
+  virtual uint32_t getLiveRangeStartAddressOffset() const = 0;
+  virtual uint32_t getLiveRangeStartAddressSection() const = 0;
+  virtual uint32_t getLiveRangeStartRelativeVirtualAddress() const = 0;
+  virtual codeview::RegisterId getLocalBasePointerRegisterId() const = 0;
+  virtual uint32_t getLowerBoundId() const = 0;
+  virtual uint32_t getMemorySpaceKind() const = 0;
+  virtual std::string getName() const = 0;
+  virtual uint32_t getNumberOfAcceleratorPointerTags() const = 0;
+  virtual uint32_t getNumberOfColumns() const = 0;
+  virtual uint32_t getNumberOfModifiers() const = 0;
+  virtual uint32_t getNumberOfRegisterIndices() const = 0;
+  virtual uint32_t getNumberOfRows() const = 0;
+  virtual std::string getObjectFileName() const = 0;
+  virtual uint32_t getOemId() const = 0;
+  virtual uint32_t getOemSymbolId() const = 0;
+  virtual uint32_t getOffsetInUdt() const = 0;
+  virtual PDB_Cpu getPlatform() const = 0;
+  virtual uint32_t getRank() const = 0;
+  virtual codeview::RegisterId getRegisterId() const = 0;
+  virtual uint32_t getRegisterType() const = 0;
+  virtual uint32_t getRelativeVirtualAddress() const = 0;
+  virtual uint32_t getSamplerSlot() const = 0;
+  virtual uint32_t getSignature() const = 0;
+  virtual uint32_t getSizeInUdt() const = 0;
+  virtual uint32_t getSlot() const = 0;
+  virtual std::string getSourceFileName() const = 0;
+  virtual std::unique_ptr<IPDBLineNumber>
+  getSrcLineOnTypeDefn() const = 0;
+  virtual uint32_t getStride() const = 0;
+  virtual uint32_t getSubTypeId() const = 0;
+  virtual std::string getSymbolsFileName() const = 0;
+  virtual uint32_t getSymIndexId() const = 0;
+  virtual uint32_t getTargetOffset() const = 0;
+  virtual uint32_t getTargetRelativeVirtualAddress() const = 0;
+  virtual uint64_t getTargetVirtualAddress() const = 0;
+  virtual uint32_t getTargetSection() const = 0;
+  virtual uint32_t getTextureSlot() const = 0;
+  virtual uint32_t getTimeStamp() const = 0;
+  virtual uint32_t getToken() const = 0;
+  virtual uint32_t getTypeId() const = 0;
+  virtual uint32_t getUavSlot() const = 0;
+  virtual std::string getUndecoratedName() const = 0;
+  virtual std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const = 0;
+  virtual uint32_t getUnmodifiedTypeId() const = 0;
+  virtual uint32_t getUpperBoundId() const = 0;
+  virtual Variant getValue() const = 0;
+  virtual uint32_t getVirtualBaseDispIndex() const = 0;
+  virtual uint32_t getVirtualBaseOffset() const = 0;
+  virtual std::unique_ptr<PDBSymbolTypeBuiltin>
+  getVirtualBaseTableType() const = 0;
+  virtual uint32_t getVirtualTableShapeId() const = 0;
+  virtual PDB_DataKind getDataKind() const = 0;
+  virtual PDB_SymType getSymTag() const = 0;
+  virtual codeview::GUID getGuid() const = 0;
+  virtual int32_t getOffset() const = 0;
+  virtual int32_t getThisAdjust() const = 0;
+  virtual int32_t getVirtualBasePointerOffset() const = 0;
+  virtual PDB_LocType getLocationType() const = 0;
+  virtual PDB_Machine getMachineType() const = 0;
+  virtual codeview::ThunkOrdinal getThunkOrdinal() const = 0;
+  virtual uint64_t getLength() const = 0;
+  virtual uint64_t getLiveRangeLength() const = 0;
+  virtual uint64_t getVirtualAddress() const = 0;
+  virtual PDB_UdtType getUdtKind() const = 0;
+  virtual bool hasConstructor() const = 0;
+  virtual bool hasCustomCallingConvention() const = 0;
+  virtual bool hasFarReturn() const = 0;
+  virtual bool isCode() const = 0;
+  virtual bool isCompilerGenerated() const = 0;
+  virtual bool isConstType() const = 0;
+  virtual bool isEditAndContinueEnabled() const = 0;
+  virtual bool isFunction() const = 0;
+  virtual bool getAddressTaken() const = 0;
+  virtual bool getNoStackOrdering() const = 0;
+  virtual bool hasAlloca() const = 0;
+  virtual bool hasAssignmentOperator() const = 0;
+  virtual bool hasCTypes() const = 0;
+  virtual bool hasCastOperator() const = 0;
+  virtual bool hasDebugInfo() const = 0;
+  virtual bool hasEH() const = 0;
+  virtual bool hasEHa() const = 0;
+  virtual bool hasFramePointer() const = 0;
+  virtual bool hasInlAsm() const = 0;
+  virtual bool hasInlineAttribute() const = 0;
+  virtual bool hasInterruptReturn() const = 0;
+  virtual bool hasLongJump() const = 0;
+  virtual bool hasManagedCode() const = 0;
+  virtual bool hasNestedTypes() const = 0;
+  virtual bool hasNoInlineAttribute() const = 0;
+  virtual bool hasNoReturnAttribute() const = 0;
+  virtual bool hasOptimizedCodeDebugInfo() const = 0;
+  virtual bool hasOverloadedOperator() const = 0;
+  virtual bool hasSEH() const = 0;
+  virtual bool hasSecurityChecks() const = 0;
+  virtual bool hasSetJump() const = 0;
+  virtual bool hasStrictGSCheck() const = 0;
+  virtual bool isAcceleratorGroupSharedLocal() const = 0;
+  virtual bool isAcceleratorPointerTagLiveRange() const = 0;
+  virtual bool isAcceleratorStubFunction() const = 0;
+  virtual bool isAggregated() const = 0;
+  virtual bool isIntroVirtualFunction() const = 0;
+  virtual bool isCVTCIL() const = 0;
+  virtual bool isConstructorVirtualBase() const = 0;
+  virtual bool isCxxReturnUdt() const = 0;
+  virtual bool isDataAligned() const = 0;
+  virtual bool isHLSLData() const = 0;
+  virtual bool isHotpatchable() const = 0;
+  virtual bool isIndirectVirtualBaseClass() const = 0;
+  virtual bool isInterfaceUdt() const = 0;
+  virtual bool isIntrinsic() const = 0;
+  virtual bool isLTCG() const = 0;
+  virtual bool isLocationControlFlowDependent() const = 0;
+  virtual bool isMSILNetmodule() const = 0;
+  virtual bool isMatrixRowMajor() const = 0;
+  virtual bool isManagedCode() const = 0;
+  virtual bool isMSILCode() const = 0;
+  virtual bool isMultipleInheritance() const = 0;
+  virtual bool isNaked() const = 0;
+  virtual bool isNested() const = 0;
+  virtual bool isOptimizedAway() const = 0;
+  virtual bool isPacked() const = 0;
+  virtual bool isPointerBasedOnSymbolValue() const = 0;
+  virtual bool isPointerToDataMember() const = 0;
+  virtual bool isPointerToMemberFunction() const = 0;
+  virtual bool isPureVirtual() const = 0;
+  virtual bool isRValueReference() const = 0;
+  virtual bool isRefUdt() const = 0;
+  virtual bool isReference() const = 0;
+  virtual bool isRestrictedType() const = 0;
+  virtual bool isReturnValue() const = 0;
+  virtual bool isSafeBuffers() const = 0;
+  virtual bool isScoped() const = 0;
+  virtual bool isSdl() const = 0;
+  virtual bool isSingleInheritance() const = 0;
+  virtual bool isSplitted() const = 0;
+  virtual bool isStatic() const = 0;
+  virtual bool hasPrivateSymbols() const = 0;
+  virtual bool isUnalignedType() const = 0;
+  virtual bool isUnreached() const = 0;
+  virtual bool isValueUdt() const = 0;
+  virtual bool isVirtual() const = 0;
+  virtual bool isVirtualBaseClass() const = 0;
+  virtual bool isVirtualInheritance() const = 0;
+  virtual bool isVolatileType() const = 0;
+  virtual bool wasInlined() const = 0;
+  virtual std::string getUnused() const = 0;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSectionContrib.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSectionContrib.h
new file mode 100644
index 0000000..4fda624
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSectionContrib.h
@@ -0,0 +1,50 @@
+//==- IPDBSectionContrib.h - Interfaces for PDB SectionContribs --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBSECTIONCONTRIB_H
+#define LLVM_DEBUGINFO_PDB_IPDBSECTIONCONTRIB_H
+
+#include "PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+
+/// IPDBSectionContrib defines an interface used to represent section
+/// contributions whose information are stored in the PDB.
+class IPDBSectionContrib {
+public:
+  virtual ~IPDBSectionContrib();
+
+  virtual std::unique_ptr<PDBSymbolCompiland> getCompiland() const = 0;
+  virtual uint32_t getAddressSection() const = 0;
+  virtual uint32_t getAddressOffset() const = 0;
+  virtual uint32_t getRelativeVirtualAddress() const = 0;
+  virtual uint64_t getVirtualAddress() const  = 0;
+  virtual uint32_t getLength() const = 0;
+  virtual bool isNotPaged() const = 0;
+  virtual bool hasCode() const = 0;
+  virtual bool hasCode16Bit() const = 0;
+  virtual bool hasInitializedData() const = 0;
+  virtual bool hasUninitializedData() const = 0;
+  virtual bool isRemoved() const = 0;
+  virtual bool hasComdat() const = 0;
+  virtual bool isDiscardable() const = 0;
+  virtual bool isNotCached() const = 0;
+  virtual bool isShared() const = 0;
+  virtual bool isExecutable() const = 0;
+  virtual bool isReadable() const = 0;
+  virtual bool isWritable() const = 0;
+  virtual uint32_t getDataCrc32() const = 0;
+  virtual uint32_t getRelocationsCrc32() const = 0;
+  virtual uint32_t getCompilandId() const = 0;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_IPDBSECTIONCONTRIB_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSession.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSession.h
new file mode 100644
index 0000000..695d62c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSession.h
@@ -0,0 +1,92 @@
+//===- IPDBSession.h - base interface for a PDB symbol context --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBSESSION_H
+#define LLVM_DEBUGINFO_PDB_IPDBSESSION_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <memory>
+
+namespace llvm {
+namespace pdb {
+class PDBSymbolCompiland;
+class PDBSymbolExe;
+
+/// IPDBSession defines an interface used to provide a context for querying
+/// debug information from a debug data source (for example, a PDB).
+class IPDBSession {
+public:
+  virtual ~IPDBSession();
+
+  virtual uint64_t getLoadAddress() const = 0;
+  virtual bool setLoadAddress(uint64_t Address) = 0;
+  virtual std::unique_ptr<PDBSymbolExe> getGlobalScope() = 0;
+  virtual std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const = 0;
+
+  virtual bool addressForVA(uint64_t VA, uint32_t &Section,
+                            uint32_t &Offset) const = 0;
+  virtual bool addressForRVA(uint32_t RVA, uint32_t &Section,
+                             uint32_t &Offset) const = 0;
+
+  template <typename T>
+  std::unique_ptr<T> getConcreteSymbolById(uint32_t SymbolId) const {
+    return unique_dyn_cast_or_null<T>(getSymbolById(SymbolId));
+  }
+
+  virtual std::unique_ptr<PDBSymbol>
+  findSymbolByAddress(uint64_t Address, PDB_SymType Type) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbers(const PDBSymbolCompiland &Compiland,
+                  const IPDBSourceFile &File) const = 0;
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersByAddress(uint64_t Address, uint32_t Length) const = 0;
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const = 0;
+  virtual std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
+                              uint32_t Length) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumSourceFiles>
+  findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
+                  PDB_NameSearchFlags Flags) const = 0;
+  virtual std::unique_ptr<IPDBSourceFile>
+  findOneSourceFile(const PDBSymbolCompiland *Compiland,
+                    llvm::StringRef Pattern,
+                    PDB_NameSearchFlags Flags) const = 0;
+  virtual std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+  findCompilandsForSourceFile(llvm::StringRef Pattern,
+                              PDB_NameSearchFlags Flags) const = 0;
+  virtual std::unique_ptr<PDBSymbolCompiland>
+  findOneCompilandForSourceFile(llvm::StringRef Pattern,
+                                PDB_NameSearchFlags Flags) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const = 0;
+  virtual std::unique_ptr<IPDBEnumSourceFiles>
+  getSourceFilesForCompiland(const PDBSymbolCompiland &Compiland) const = 0;
+  virtual std::unique_ptr<IPDBSourceFile>
+  getSourceFileById(uint32_t FileId) const = 0;
+
+  virtual std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const = 0;
+
+  virtual std::unique_ptr<IPDBEnumTables> getEnumTables() const = 0;
+
+  virtual std::unique_ptr<IPDBEnumInjectedSources>
+  getInjectedSources() const = 0;
+
+  virtual std::unique_ptr<IPDBEnumSectionContribs>
+  getSectionContribs() const = 0;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSourceFile.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSourceFile.h
new file mode 100644
index 0000000..3676c40
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBSourceFile.h
@@ -0,0 +1,40 @@
+//===- IPDBSourceFile.h - base interface for a PDB source file --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBSOURCEFILE_H
+#define LLVM_DEBUGINFO_PDB_IPDBSOURCEFILE_H
+
+#include "PDBTypes.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+class raw_ostream;
+
+namespace pdb {
+
+/// IPDBSourceFile defines an interface used to represent source files whose
+/// information are stored in the PDB.
+class IPDBSourceFile {
+public:
+  virtual ~IPDBSourceFile();
+
+  void dump(raw_ostream &OS, int Indent) const;
+
+  virtual std::string getFileName() const = 0;
+  virtual uint32_t getUniqueId() const = 0;
+  virtual std::string getChecksum() const = 0;
+  virtual PDB_Checksum getChecksumType() const = 0;
+  virtual std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+  getCompilands() const = 0;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBTable.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBTable.h
new file mode 100644
index 0000000..4561c4e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/IPDBTable.h
@@ -0,0 +1,28 @@
+//===- IPDBTable.h - Base Interface for a PDB Symbol Context ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBTABLE_H
+#define LLVM_DEBUGINFO_PDB_IPDBTABLE_H
+
+#include "PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+class IPDBTable {
+public:
+  virtual ~IPDBTable();
+
+  virtual std::string getName() const = 0;
+  virtual uint32_t getItemCount() const = 0;
+  virtual PDB_TableType getTableType() const = 0;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_IPDBTABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
new file mode 100644
index 0000000..8200f51
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
@@ -0,0 +1,70 @@
+//===- DbiModuleDescriptor.h - PDB module information -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTOR_H
+#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+namespace pdb {
+
+class DbiModuleDescriptor {
+  friend class DbiStreamBuilder;
+
+public:
+  DbiModuleDescriptor();
+  DbiModuleDescriptor(const DbiModuleDescriptor &Info);
+  ~DbiModuleDescriptor();
+
+  static Error initialize(BinaryStreamRef Stream, DbiModuleDescriptor &Info);
+
+  bool hasECInfo() const;
+  uint16_t getTypeServerIndex() const;
+  uint16_t getModuleStreamIndex() const;
+  uint32_t getSymbolDebugInfoByteSize() const;
+  uint32_t getC11LineInfoByteSize() const;
+  uint32_t getC13LineInfoByteSize() const;
+  uint32_t getNumberOfFiles() const;
+  uint32_t getSourceFileNameIndex() const;
+  uint32_t getPdbFilePathNameIndex() const;
+
+  StringRef getModuleName() const;
+  StringRef getObjFileName() const;
+
+  uint32_t getRecordLength() const;
+
+private:
+  StringRef ModuleName;
+  StringRef ObjFileName;
+  const ModuleInfoHeader *Layout = nullptr;
+};
+
+} // end namespace pdb
+
+template <> struct VarStreamArrayExtractor<pdb::DbiModuleDescriptor> {
+  Error operator()(BinaryStreamRef Stream, uint32_t &Length,
+                   pdb::DbiModuleDescriptor &Info) {
+    if (auto EC = pdb::DbiModuleDescriptor::initialize(Stream, Info))
+      return EC;
+    Length = Info.getRecordLength();
+    return Error::success();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
new file mode 100644
index 0000000..c918a5d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
@@ -0,0 +1,105 @@
+//===- DbiModuleDescriptorBuilder.h - PDB module information ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTORBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTORBUILDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugLinesSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace llvm {
+class BinaryStreamWriter;
+
+namespace codeview {
+class DebugSubsectionRecordBuilder;
+}
+
+namespace msf {
+class MSFBuilder;
+struct MSFLayout;
+}
+namespace pdb {
+
+class DbiModuleDescriptorBuilder {
+  friend class DbiStreamBuilder;
+
+public:
+  DbiModuleDescriptorBuilder(StringRef ModuleName, uint32_t ModIndex,
+                             msf::MSFBuilder &Msf);
+  ~DbiModuleDescriptorBuilder();
+
+  DbiModuleDescriptorBuilder(const DbiModuleDescriptorBuilder &) = delete;
+  DbiModuleDescriptorBuilder &
+  operator=(const DbiModuleDescriptorBuilder &) = delete;
+
+  void setPdbFilePathNI(uint32_t NI);
+  void setObjFileName(StringRef Name);
+  void addSymbol(codeview::CVSymbol Symbol);
+
+  void
+  addDebugSubsection(std::shared_ptr<codeview::DebugSubsection> Subsection);
+
+  void
+  addDebugSubsection(const codeview::DebugSubsectionRecord &SubsectionContents);
+
+  uint16_t getStreamIndex() const;
+  StringRef getModuleName() const { return ModuleName; }
+  StringRef getObjFileName() const { return ObjFileName; }
+
+  unsigned getModuleIndex() const { return Layout.Mod; }
+
+  ArrayRef<std::string> source_files() const {
+    return makeArrayRef(SourceFiles);
+  }
+
+  uint32_t calculateSerializedLength() const;
+
+  /// Return the offset within the module symbol stream of the next symbol
+  /// record passed to addSymbol. Add four to account for the signature.
+  uint32_t getNextSymbolOffset() const { return SymbolByteSize + 4; }
+
+  void finalize();
+  Error finalizeMsfLayout();
+
+  Error commit(BinaryStreamWriter &ModiWriter, const msf::MSFLayout &MsfLayout,
+               WritableBinaryStreamRef MsfBuffer);
+
+private:
+  uint32_t calculateC13DebugInfoSize() const;
+
+  void addSourceFile(StringRef Path);
+  msf::MSFBuilder &MSF;
+
+  uint32_t SymbolByteSize = 0;
+  uint32_t PdbFilePathNI = 0;
+  std::string ModuleName;
+  std::string ObjFileName;
+  std::vector<std::string> SourceFiles;
+  std::vector<codeview::CVSymbol> Symbols;
+
+  std::vector<std::unique_ptr<codeview::DebugSubsectionRecordBuilder>>
+      C13Builders;
+
+  ModuleInfoHeader Layout;
+};
+
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTORBUILDER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
new file mode 100644
index 0000000..5f6e7ab
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
@@ -0,0 +1,118 @@
+//===- DbiModuleList.h - PDB module information list ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULELIST_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULELIST_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+
+class DbiModuleList;
+struct FileInfoSubstreamHeader;
+
+class DbiModuleSourceFilesIterator
+    : public iterator_facade_base<DbiModuleSourceFilesIterator,
+                                  std::random_access_iterator_tag, StringRef> {
+  using BaseType =
+      iterator_facade_base<DbiModuleSourceFilesIterator,
+                           std::random_access_iterator_tag, StringRef>;
+
+public:
+  DbiModuleSourceFilesIterator(const DbiModuleList &Modules, uint32_t Modi,
+                               uint16_t Filei);
+  DbiModuleSourceFilesIterator() = default;
+  DbiModuleSourceFilesIterator &
+  operator=(const DbiModuleSourceFilesIterator &R) = default;
+
+  bool operator==(const DbiModuleSourceFilesIterator &R) const;
+
+  const StringRef &operator*() const { return ThisValue; }
+  StringRef &operator*() { return ThisValue; }
+
+  bool operator<(const DbiModuleSourceFilesIterator &RHS) const;
+  std::ptrdiff_t operator-(const DbiModuleSourceFilesIterator &R) const;
+  DbiModuleSourceFilesIterator &operator+=(std::ptrdiff_t N);
+  DbiModuleSourceFilesIterator &operator-=(std::ptrdiff_t N);
+
+private:
+  void setValue();
+
+  bool isEnd() const;
+  bool isCompatible(const DbiModuleSourceFilesIterator &R) const;
+  bool isUniversalEnd() const;
+
+  StringRef ThisValue;
+  const DbiModuleList *Modules{nullptr};
+  uint32_t Modi{0};
+  uint16_t Filei{0};
+};
+
+class DbiModuleList {
+  friend DbiModuleSourceFilesIterator;
+
+public:
+  Error initialize(BinaryStreamRef ModInfo, BinaryStreamRef FileInfo);
+
+  Expected<StringRef> getFileName(uint32_t Index) const;
+  uint32_t getModuleCount() const;
+  uint32_t getSourceFileCount() const;
+  uint16_t getSourceFileCount(uint32_t Modi) const;
+
+  iterator_range<DbiModuleSourceFilesIterator>
+  source_files(uint32_t Modi) const;
+
+  DbiModuleDescriptor getModuleDescriptor(uint32_t Modi) const;
+
+private:
+  Error initializeModInfo(BinaryStreamRef ModInfo);
+  Error initializeFileInfo(BinaryStreamRef FileInfo);
+
+  VarStreamArray<DbiModuleDescriptor> Descriptors;
+
+  FixedStreamArray<support::little32_t> FileNameOffsets;
+  FixedStreamArray<support::ulittle16_t> ModFileCountArray;
+
+  // For each module, there are multiple filenames, which can be obtained by
+  // knowing the index of the file.  Given the index of the file, one can use
+  // that as an offset into the FileNameOffsets array, which contains the
+  // absolute offset of the file name in NamesBuffer.  Thus, for each module
+  // we store the first index in the FileNameOffsets array for this module.
+  // The number of files for the corresponding module is stored in
+  // ModFileCountArray.
+  std::vector<uint32_t> ModuleInitialFileIndex;
+
+  // In order to provide random access into the Descriptors array, we iterate it
+  // once up front to find the offsets of the individual items and store them in
+  // this array.
+  std::vector<uint32_t> ModuleDescriptorOffsets;
+
+  const FileInfoSubstreamHeader *FileInfoHeader = nullptr;
+
+  BinaryStreamRef ModInfoSubstream;
+  BinaryStreamRef FileInfoSubstream;
+  BinaryStreamRef NamesBuffer;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULELIST_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiStream.h
new file mode 100644
index 0000000..760d19a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -0,0 +1,129 @@
+//===- DbiStream.h - PDB Dbi Stream (Stream 3) Access -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAM_H
+
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
+#include "llvm/DebugInfo/PDB/Native/PDBStringTable.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace object {
+struct FpoData;
+struct coff_section;
+}
+
+namespace pdb {
+class DbiStreamBuilder;
+class PDBFile;
+class ISectionContribVisitor;
+
+class DbiStream {
+  friend class DbiStreamBuilder;
+
+public:
+  DbiStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
+  ~DbiStream();
+  Error reload();
+
+  PdbRaw_DbiVer getDbiVersion() const;
+  uint32_t getAge() const;
+  uint16_t getPublicSymbolStreamIndex() const;
+  uint16_t getGlobalSymbolStreamIndex() const;
+
+  uint16_t getFlags() const;
+  bool isIncrementallyLinked() const;
+  bool hasCTypes() const;
+  bool isStripped() const;
+
+  uint16_t getBuildNumber() const;
+  uint16_t getBuildMajorVersion() const;
+  uint16_t getBuildMinorVersion() const;
+
+  uint16_t getPdbDllRbld() const;
+  uint32_t getPdbDllVersion() const;
+
+  uint32_t getSymRecordStreamIndex() const;
+
+  PDB_Machine getMachineType() const;
+
+  const DbiStreamHeader *getHeader() const { return Header; }
+
+  BinarySubstreamRef getSectionContributionData() const;
+  BinarySubstreamRef getSecMapSubstreamData() const;
+  BinarySubstreamRef getModiSubstreamData() const;
+  BinarySubstreamRef getFileInfoSubstreamData() const;
+  BinarySubstreamRef getTypeServerMapSubstreamData() const;
+  BinarySubstreamRef getECSubstreamData() const;
+
+  /// If the given stream type is present, returns its stream index. If it is
+  /// not present, returns InvalidStreamIndex.
+  uint32_t getDebugStreamIndex(DbgHeaderType Type) const;
+
+  const DbiModuleList &modules() const;
+
+  FixedStreamArray<object::coff_section> getSectionHeaders();
+
+  FixedStreamArray<object::FpoData> getFpoRecords();
+
+  FixedStreamArray<SecMapEntry> getSectionMap() const;
+  void visitSectionContributions(ISectionContribVisitor &Visitor) const;
+
+  Expected<StringRef> getECName(uint32_t NI) const;
+
+private:
+  Error initializeSectionContributionData();
+  Error initializeSectionHeadersData();
+  Error initializeSectionMapData();
+  Error initializeFpoRecords();
+
+  PDBFile &Pdb;
+  std::unique_ptr<msf::MappedBlockStream> Stream;
+
+  PDBStringTable ECNames;
+
+  BinarySubstreamRef SecContrSubstream;
+  BinarySubstreamRef SecMapSubstream;
+  BinarySubstreamRef ModiSubstream;
+  BinarySubstreamRef FileInfoSubstream;
+  BinarySubstreamRef TypeServerMapSubstream;
+  BinarySubstreamRef ECSubstream;
+
+  DbiModuleList Modules;
+
+  FixedStreamArray<support::ulittle16_t> DbgStreams;
+
+  PdbRaw_DbiSecContribVer SectionContribVersion =
+      PdbRaw_DbiSecContribVer::DbiSecContribVer60;
+  FixedStreamArray<SectionContrib> SectionContribs;
+  FixedStreamArray<SectionContrib2> SectionContribs2;
+  FixedStreamArray<SecMapEntry> SectionMap;
+
+  std::unique_ptr<msf::MappedBlockStream> SectionHeaderStream;
+  FixedStreamArray<object::coff_section> SectionHeaders;
+
+  std::unique_ptr<msf::MappedBlockStream> FpoStream;
+  FixedStreamArray<object::FpoData> FpoRecords;
+
+  const DbiStreamHeader *Header;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
new file mode 100644
index 0000000..daea062
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -0,0 +1,129 @@
+//===- DbiStreamBuilder.h - PDB Dbi Stream Creation -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAMBUILDER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Error.h"
+
+#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace msf {
+class MSFBuilder;
+}
+namespace object {
+struct coff_section;
+}
+namespace pdb {
+class DbiStream;
+struct DbiStreamHeader;
+class DbiModuleDescriptorBuilder;
+class PDBFile;
+
+class DbiStreamBuilder {
+public:
+  DbiStreamBuilder(msf::MSFBuilder &Msf);
+  ~DbiStreamBuilder();
+
+  DbiStreamBuilder(const DbiStreamBuilder &) = delete;
+  DbiStreamBuilder &operator=(const DbiStreamBuilder &) = delete;
+
+  void setVersionHeader(PdbRaw_DbiVer V);
+  void setAge(uint32_t A);
+  void setBuildNumber(uint16_t B);
+  void setPdbDllVersion(uint16_t V);
+  void setPdbDllRbld(uint16_t R);
+  void setFlags(uint16_t F);
+  void setMachineType(PDB_Machine M);
+  void setSectionMap(ArrayRef<SecMapEntry> SecMap);
+
+  // Add given bytes as a new stream.
+  Error addDbgStream(pdb::DbgHeaderType Type, ArrayRef<uint8_t> Data);
+
+  uint32_t addECName(StringRef Name);
+
+  uint32_t calculateSerializedLength() const;
+
+  void setGlobalsStreamIndex(uint32_t Index);
+  void setPublicsStreamIndex(uint32_t Index);
+  void setSymbolRecordStreamIndex(uint32_t Index);
+
+  Expected<DbiModuleDescriptorBuilder &> addModuleInfo(StringRef ModuleName);
+  Error addModuleSourceFile(DbiModuleDescriptorBuilder &Module, StringRef File);
+  Expected<uint32_t> getSourceFileNameIndex(StringRef FileName);
+
+  Error finalizeMsfLayout();
+
+  Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef MsfBuffer);
+
+  void addSectionContrib(const SectionContrib &SC) {
+    SectionContribs.emplace_back(SC);
+  }
+
+  // A helper function to create a Section Map from a COFF section header.
+  static std::vector<SecMapEntry>
+  createSectionMap(ArrayRef<llvm::object::coff_section> SecHdrs);
+
+private:
+  struct DebugStream {
+    ArrayRef<uint8_t> Data;
+    uint16_t StreamNumber = kInvalidStreamIndex;
+  };
+
+  Error finalize();
+  uint32_t calculateModiSubstreamSize() const;
+  uint32_t calculateNamesOffset() const;
+  uint32_t calculateSectionContribsStreamSize() const;
+  uint32_t calculateSectionMapStreamSize() const;
+  uint32_t calculateFileInfoSubstreamSize() const;
+  uint32_t calculateNamesBufferSize() const;
+  uint32_t calculateDbgStreamsSize() const;
+
+  Error generateFileInfoSubstream();
+
+  msf::MSFBuilder &Msf;
+  BumpPtrAllocator &Allocator;
+
+  Optional<PdbRaw_DbiVer> VerHeader;
+  uint32_t Age;
+  uint16_t BuildNumber;
+  uint16_t PdbDllVersion;
+  uint16_t PdbDllRbld;
+  uint16_t Flags;
+  PDB_Machine MachineType;
+  uint32_t GlobalsStreamIndex = kInvalidStreamIndex;
+  uint32_t PublicsStreamIndex = kInvalidStreamIndex;
+  uint32_t SymRecordStreamIndex = kInvalidStreamIndex;
+
+  const DbiStreamHeader *Header;
+
+  std::vector<std::unique_ptr<DbiModuleDescriptorBuilder>> ModiList;
+
+  StringMap<uint32_t> SourceFileNames;
+
+  PDBStringTableBuilder ECNamesBuilder;
+  WritableBinaryStreamRef NamesBuffer;
+  MutableBinaryByteStream FileInfoBuffer;
+  std::vector<SectionContrib> SectionContribs;
+  ArrayRef<SecMapEntry> SectionMap;
+  std::array<Optional<DebugStream>, (int)DbgHeaderType::Max> DbgStreams;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/EnumTables.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/EnumTables.h
new file mode 100644
index 0000000..c018445
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/EnumTables.h
@@ -0,0 +1,22 @@
+//===- EnumTables.h - Enum to string conversion tables ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_ENUMTABLES_H
+#define LLVM_DEBUGINFO_PDB_RAW_ENUMTABLES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ScopedPrinter.h"
+
+namespace llvm {
+namespace pdb {
+ArrayRef<EnumEntry<uint16_t>> getOMFSegMapDescFlagNames();
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_ENUMTABLES_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/Formatters.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/Formatters.h
new file mode 100644
index 0000000..7d5eab2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/Formatters.h
@@ -0,0 +1,45 @@
+//===- Formatters.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_FORMATTERS_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_FORMATTERS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/Formatters.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/FormatProviders.h"
+
+#define FORMAT_CASE(Value, Name)                                               \
+  case Value:                                                                  \
+    Stream << Name;                                                            \
+    break;
+
+namespace llvm {
+template <> struct format_provider<pdb::PdbRaw_ImplVer> {
+  static void format(const pdb::PdbRaw_ImplVer &V, llvm::raw_ostream &Stream,
+                     StringRef Style) {
+    switch (V) {
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC110, "VC110")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC140, "VC140")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC2, "VC2")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC4, "VC4")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC41, "VC41")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC50, "VC50")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC70, "VC70")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC70Dep, "VC70Dep")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC80, "VC80")
+      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC98, "VC98")
+    }
+  }
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h
new file mode 100644
index 0000000..1a4f89d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h
@@ -0,0 +1,82 @@
+//===- GSIStreamBuilder.h - PDB Publics/Globals Stream Creation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_GSISTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_GSISTREAMBUILDER_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/PDB/Native/GlobalsStream.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryItemStream.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+template <> struct BinaryItemTraits<codeview::CVSymbol> {
+  static size_t length(const codeview::CVSymbol &Item) {
+    return Item.RecordData.size();
+  }
+  static ArrayRef<uint8_t> bytes(const codeview::CVSymbol &Item) {
+    return Item.RecordData;
+  }
+};
+
+namespace msf {
+class MSFBuilder;
+struct MSFLayout;
+} // namespace msf
+namespace pdb {
+struct GSIHashStreamBuilder;
+
+class GSIStreamBuilder {
+
+public:
+  explicit GSIStreamBuilder(msf::MSFBuilder &Msf);
+  ~GSIStreamBuilder();
+
+  GSIStreamBuilder(const GSIStreamBuilder &) = delete;
+  GSIStreamBuilder &operator=(const GSIStreamBuilder &) = delete;
+
+  Error finalizeMsfLayout();
+
+  Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef Buffer);
+
+  uint32_t getPublicsStreamIndex() const;
+  uint32_t getGlobalsStreamIndex() const;
+  uint32_t getRecordStreamIdx() const { return RecordStreamIdx; }
+
+  void addPublicSymbol(const codeview::PublicSym32 &Pub);
+
+  void addGlobalSymbol(const codeview::ProcRefSym &Sym);
+  void addGlobalSymbol(const codeview::DataSym &Sym);
+  void addGlobalSymbol(const codeview::ConstantSym &Sym);
+  void addGlobalSymbol(const codeview::UDTSym &Sym);
+  void addGlobalSymbol(const codeview::CVSymbol &Sym);
+
+private:
+  uint32_t calculatePublicsHashStreamSize() const;
+  uint32_t calculateGlobalsHashStreamSize() const;
+  Error commitSymbolRecordStream(WritableBinaryStreamRef Stream);
+  Error commitPublicsHashStream(WritableBinaryStreamRef Stream);
+  Error commitGlobalsHashStream(WritableBinaryStreamRef Stream);
+
+  uint32_t RecordStreamIdx = kInvalidStreamIndex;
+  msf::MSFBuilder &Msf;
+  std::unique_ptr<GSIHashStreamBuilder> PSH;
+  std::unique_ptr<GSIHashStreamBuilder> GSH;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h
new file mode 100644
index 0000000..fdc58dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h
@@ -0,0 +1,84 @@
+//===- GlobalsStream.h - PDB Index of Symbols by Name -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_GLOBALS_STREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_GLOBALS_STREAM_H
+
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/Error.h"
+#include "llvm/ADT/iterator.h"
+
+namespace llvm {
+namespace pdb {
+class DbiStream;
+class PDBFile;
+
+/// Iterator over hash records producing symbol record offsets. Abstracts away
+/// the fact that symbol record offsets on disk are off-by-one.
+class GSIHashIterator
+    : public iterator_adaptor_base<
+          GSIHashIterator, FixedStreamArrayIterator<PSHashRecord>,
+          std::random_access_iterator_tag, const uint32_t> {
+public:
+  GSIHashIterator() = default;
+
+  template <typename T>
+  GSIHashIterator(T &&v)
+      : GSIHashIterator::iterator_adaptor_base(std::forward<T &&>(v)) {}
+
+  uint32_t operator*() const {
+    uint32_t Off = this->I->Off;
+    return --Off;
+  }
+};
+
+/// From https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.cpp
+enum : unsigned { IPHR_HASH = 4096 };
+
+/// A readonly view of a hash table used in the globals and publics streams.
+/// Most clients will only want to iterate this to get symbol record offsets
+/// into the PDB symbol stream.
+class GSIHashTable {
+public:
+  const GSIHashHeader *HashHdr;
+  FixedStreamArray<PSHashRecord> HashRecords;
+  ArrayRef<uint8_t> HashBitmap;
+  FixedStreamArray<support::ulittle32_t> HashBuckets;
+
+  Error read(BinaryStreamReader &Reader);
+
+  uint32_t getVerSignature() const { return HashHdr->VerSignature; }
+  uint32_t getVerHeader() const { return HashHdr->VerHdr; }
+  uint32_t getHashRecordSize() const { return HashHdr->HrSize; }
+  uint32_t getNumBuckets() const { return HashHdr->NumBuckets; }
+
+  typedef GSIHashHeader iterator;
+  GSIHashIterator begin() const { return GSIHashIterator(HashRecords.begin()); }
+  GSIHashIterator end() const { return GSIHashIterator(HashRecords.end()); }
+};
+
+class GlobalsStream {
+public:
+  explicit GlobalsStream(std::unique_ptr<msf::MappedBlockStream> Stream);
+  ~GlobalsStream();
+  const GSIHashTable &getGlobalsTable() const { return GlobalsTable; }
+  Error reload();
+
+private:
+  GSIHashTable GlobalsTable;
+  std::unique_ptr<msf::MappedBlockStream> Stream;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/Hash.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/Hash.h
new file mode 100644
index 0000000..1f11d43
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/Hash.h
@@ -0,0 +1,27 @@
+//===- Hash.h - PDB hash functions ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_HASH_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_HASH_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstdint>
+
+namespace llvm {
+namespace pdb {
+
+uint32_t hashStringV1(StringRef Str);
+uint32_t hashStringV2(StringRef Str);
+uint32_t hashBufferV8(ArrayRef<uint8_t> Data);
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_HASH_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/HashTable.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/HashTable.h
new file mode 100644
index 0000000..34cc617
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/HashTable.h
@@ -0,0 +1,335 @@
+//===- HashTable.h - PDB Hash Table -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_HASHTABLE_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_HASHTABLE_H
+
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/DebugInfo/PDB/Native/RawError.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <iterator>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BinaryStreamReader;
+class BinaryStreamWriter;
+
+namespace pdb {
+
+Error readSparseBitVector(BinaryStreamReader &Stream, SparseBitVector<> &V);
+Error writeSparseBitVector(BinaryStreamWriter &Writer, SparseBitVector<> &Vec);
+
+template <typename ValueT, typename TraitsT> class HashTable;
+
+template <typename ValueT, typename TraitsT>
+class HashTableIterator
+    : public iterator_facade_base<HashTableIterator<ValueT, TraitsT>,
+                                  std::forward_iterator_tag,
+                                  std::pair<uint32_t, ValueT>> {
+  friend HashTable<ValueT, TraitsT>;
+
+  HashTableIterator(const HashTable<ValueT, TraitsT> &Map, uint32_t Index,
+                    bool IsEnd)
+      : Map(&Map), Index(Index), IsEnd(IsEnd) {}
+
+public:
+  HashTableIterator(const HashTable<ValueT, TraitsT> &Map) : Map(&Map) {
+    int I = Map.Present.find_first();
+    if (I == -1) {
+      Index = 0;
+      IsEnd = true;
+    } else {
+      Index = static_cast<uint32_t>(I);
+      IsEnd = false;
+    }
+  }
+
+  HashTableIterator &operator=(const HashTableIterator &R) {
+    Map = R.Map;
+    return *this;
+  }
+  bool operator==(const HashTableIterator &R) const {
+    if (IsEnd && R.IsEnd)
+      return true;
+    if (IsEnd != R.IsEnd)
+      return false;
+
+    return (Map == R.Map) && (Index == R.Index);
+  }
+  const std::pair<uint32_t, ValueT> &operator*() const {
+    assert(Map->Present.test(Index));
+    return Map->Buckets[Index];
+  }
+  HashTableIterator &operator++() {
+    while (Index < Map->Buckets.size()) {
+      ++Index;
+      if (Map->Present.test(Index))
+        return *this;
+    }
+
+    IsEnd = true;
+    return *this;
+  }
+
+private:
+  bool isEnd() const { return IsEnd; }
+  uint32_t index() const { return Index; }
+
+  const HashTable<ValueT, TraitsT> *Map;
+  uint32_t Index;
+  bool IsEnd;
+};
+
+template <typename T> struct PdbHashTraits {};
+
+template <> struct PdbHashTraits<uint32_t> {
+  uint32_t hashLookupKey(uint32_t N) const { return N; }
+  uint32_t storageKeyToLookupKey(uint32_t N) const { return N; }
+  uint32_t lookupKeyToStorageKey(uint32_t N) { return N; }
+};
+
+template <typename ValueT, typename TraitsT = PdbHashTraits<ValueT>>
+class HashTable {
+  using iterator = HashTableIterator<ValueT, TraitsT>;
+  friend iterator;
+
+  struct Header {
+    support::ulittle32_t Size;
+    support::ulittle32_t Capacity;
+  };
+
+  using BucketList = std::vector<std::pair<uint32_t, ValueT>>;
+
+public:
+  HashTable() { Buckets.resize(8); }
+
+  explicit HashTable(TraitsT Traits) : HashTable(8, std::move(Traits)) {}
+  HashTable(uint32_t Capacity, TraitsT Traits) : Traits(Traits) {
+    Buckets.resize(Capacity);
+  }
+
+  Error load(BinaryStreamReader &Stream) {
+    const Header *H;
+    if (auto EC = Stream.readObject(H))
+      return EC;
+    if (H->Capacity == 0)
+      return make_error<RawError>(raw_error_code::corrupt_file,
+                                  "Invalid Hash Table Capacity");
+    if (H->Size > maxLoad(H->Capacity))
+      return make_error<RawError>(raw_error_code::corrupt_file,
+                                  "Invalid Hash Table Size");
+
+    Buckets.resize(H->Capacity);
+
+    if (auto EC = readSparseBitVector(Stream, Present))
+      return EC;
+    if (Present.count() != H->Size)
+      return make_error<RawError>(raw_error_code::corrupt_file,
+                                  "Present bit vector does not match size!");
+
+    if (auto EC = readSparseBitVector(Stream, Deleted))
+      return EC;
+    if (Present.intersects(Deleted))
+      return make_error<RawError>(raw_error_code::corrupt_file,
+                                  "Present bit vector interesects deleted!");
+
+    for (uint32_t P : Present) {
+      if (auto EC = Stream.readInteger(Buckets[P].first))
+        return EC;
+      const ValueT *Value;
+      if (auto EC = Stream.readObject(Value))
+        return EC;
+      Buckets[P].second = *Value;
+    }
+
+    return Error::success();
+  }
+
+  uint32_t calculateSerializedLength() const {
+    uint32_t Size = sizeof(Header);
+
+    constexpr int BitsPerWord = 8 * sizeof(uint32_t);
+
+    int NumBitsP = Present.find_last() + 1;
+    int NumBitsD = Deleted.find_last() + 1;
+
+    uint32_t NumWordsP = alignTo(NumBitsP, BitsPerWord) / BitsPerWord;
+    uint32_t NumWordsD = alignTo(NumBitsD, BitsPerWord) / BitsPerWord;
+
+    // Present bit set number of words (4 bytes), followed by that many actual
+    // words (4 bytes each).
+    Size += sizeof(uint32_t);
+    Size += NumWordsP * sizeof(uint32_t);
+
+    // Deleted bit set number of words (4 bytes), followed by that many actual
+    // words (4 bytes each).
+    Size += sizeof(uint32_t);
+    Size += NumWordsD * sizeof(uint32_t);
+
+    // One (Key, ValueT) pair for each entry Present.
+    Size += (sizeof(uint32_t) + sizeof(ValueT)) * size();
+
+    return Size;
+  }
+
+  Error commit(BinaryStreamWriter &Writer) const {
+    Header H;
+    H.Size = size();
+    H.Capacity = capacity();
+    if (auto EC = Writer.writeObject(H))
+      return EC;
+
+    if (auto EC = writeSparseBitVector(Writer, Present))
+      return EC;
+
+    if (auto EC = writeSparseBitVector(Writer, Deleted))
+      return EC;
+
+    for (const auto &Entry : *this) {
+      if (auto EC = Writer.writeInteger(Entry.first))
+        return EC;
+      if (auto EC = Writer.writeObject(Entry.second))
+        return EC;
+    }
+    return Error::success();
+  }
+
+  void clear() {
+    Buckets.resize(8);
+    Present.clear();
+    Deleted.clear();
+  }
+
+  bool empty() const { return size() == 0; }
+  uint32_t capacity() const { return Buckets.size(); }
+  uint32_t size() const { return Present.count(); }
+
+  iterator begin() const { return iterator(*this); }
+  iterator end() const { return iterator(*this, 0, true); }
+
+  /// Find the entry whose key has the specified hash value, using the specified
+  /// traits defining hash function and equality.
+  template <typename Key> iterator find_as(const Key &K) const {
+    uint32_t H = Traits.hashLookupKey(K) % capacity();
+    uint32_t I = H;
+    Optional<uint32_t> FirstUnused;
+    do {
+      if (isPresent(I)) {
+        if (Traits.storageKeyToLookupKey(Buckets[I].first) == K)
+          return iterator(*this, I, false);
+      } else {
+        if (!FirstUnused)
+          FirstUnused = I;
+        // Insertion occurs via linear probing from the slot hint, and will be
+        // inserted at the first empty / deleted location.  Therefore, if we are
+        // probing and find a location that is neither present nor deleted, then
+        // nothing must have EVER been inserted at this location, and thus it is
+        // not possible for a matching value to occur later.
+        if (!isDeleted(I))
+          break;
+      }
+      I = (I + 1) % capacity();
+    } while (I != H);
+
+    // The only way FirstUnused would not be set is if every single entry in the
+    // table were Present.  But this would violate the load factor constraints
+    // that we impose, so it should never happen.
+    assert(FirstUnused);
+    return iterator(*this, *FirstUnused, true);
+  }
+
+  /// Set the entry using a key type that the specified Traits can convert
+  /// from a real key to an internal key.
+  template <typename Key> bool set_as(const Key &K, ValueT V) {
+    return set_as_internal(K, std::move(V), None);
+  }
+
+  template <typename Key> ValueT get(const Key &K) const {
+    auto Iter = find_as(K);
+    assert(Iter != end());
+    return (*Iter).second;
+  }
+
+protected:
+  bool isPresent(uint32_t K) const { return Present.test(K); }
+  bool isDeleted(uint32_t K) const { return Deleted.test(K); }
+
+  TraitsT Traits;
+  BucketList Buckets;
+  mutable SparseBitVector<> Present;
+  mutable SparseBitVector<> Deleted;
+
+private:
+  /// Set the entry using a key type that the specified Traits can convert
+  /// from a real key to an internal key.
+  template <typename Key>
+  bool set_as_internal(const Key &K, ValueT V, Optional<uint32_t> InternalKey) {
+    auto Entry = find_as(K);
+    if (Entry != end()) {
+      assert(isPresent(Entry.index()));
+      assert(Traits.storageKeyToLookupKey(Buckets[Entry.index()].first) == K);
+      // We're updating, no need to do anything special.
+      Buckets[Entry.index()].second = V;
+      return false;
+    }
+
+    auto &B = Buckets[Entry.index()];
+    assert(!isPresent(Entry.index()));
+    assert(Entry.isEnd());
+    B.first = InternalKey ? *InternalKey : Traits.lookupKeyToStorageKey(K);
+    B.second = V;
+    Present.set(Entry.index());
+    Deleted.reset(Entry.index());
+
+    grow();
+
+    assert((find_as(K)) != end());
+    return true;
+  }
+
+  static uint32_t maxLoad(uint32_t capacity) { return capacity * 2 / 3 + 1; }
+
+  void grow() {
+    uint32_t S = size();
+    uint32_t MaxLoad = maxLoad(capacity());
+    if (S < maxLoad(capacity()))
+      return;
+    assert(capacity() != UINT32_MAX && "Can't grow Hash table!");
+
+    uint32_t NewCapacity = (capacity() <= INT32_MAX) ? MaxLoad * 2 : UINT32_MAX;
+
+    // Growing requires rebuilding the table and re-hashing every item.  Make a
+    // copy with a larger capacity, insert everything into the copy, then swap
+    // it in.
+    HashTable NewMap(NewCapacity, Traits);
+    for (auto I : Present) {
+      auto LookupKey = Traits.storageKeyToLookupKey(Buckets[I].first);
+      NewMap.set_as_internal(LookupKey, Buckets[I].second, Buckets[I].first);
+    }
+
+    Buckets.swap(NewMap.Buckets);
+    std::swap(Present, NewMap.Present);
+    std::swap(Deleted, NewMap.Deleted);
+    assert(capacity() == NewCapacity);
+    assert(size() == S);
+  }
+};
+
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_HASHTABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h
new file mode 100644
index 0000000..fb00d6a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h
@@ -0,0 +1,30 @@
+//===- ISectionContribVisitor.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_ISECTIONCONTRIBVISITOR_H
+#define LLVM_DEBUGINFO_PDB_RAW_ISECTIONCONTRIBVISITOR_H
+
+namespace llvm {
+namespace pdb {
+
+struct SectionContrib;
+struct SectionContrib2;
+
+class ISectionContribVisitor {
+public:
+  virtual ~ISectionContribVisitor() = default;
+
+  virtual void visit(const SectionContrib &C) = 0;
+  virtual void visit(const SectionContrib2 &C) = 0;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_ISECTIONCONTRIBVISITOR_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/InfoStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/InfoStream.h
new file mode 100644
index 0000000..caeb423
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/InfoStream.h
@@ -0,0 +1,75 @@
+//===- InfoStream.h - PDB Info Stream (Stream 1) Access ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBINFOSTREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBINFOSTREAM_H
+
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/DebugInfo/CodeView/GUID.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+class InfoStreamBuilder;
+class PDBFile;
+
+class InfoStream {
+  friend class InfoStreamBuilder;
+
+public:
+  InfoStream(std::unique_ptr<msf::MappedBlockStream> Stream);
+
+  Error reload();
+
+  uint32_t getStreamSize() const;
+
+  const InfoStreamHeader *getHeader() const { return Header; }
+
+  bool containsIdStream() const;
+  PdbRaw_ImplVer getVersion() const;
+  uint32_t getSignature() const;
+  uint32_t getAge() const;
+  codeview::GUID getGuid() const;
+  uint32_t getNamedStreamMapByteSize() const;
+
+  PdbRaw_Features getFeatures() const;
+  ArrayRef<PdbRaw_FeatureSig> getFeatureSignatures() const;
+
+  const NamedStreamMap &getNamedStreams() const;
+
+  BinarySubstreamRef getNamedStreamsBuffer() const;
+
+  uint32_t getNamedStreamIndex(llvm::StringRef Name) const;
+  StringMap<uint32_t> named_streams() const;
+
+private:
+  std::unique_ptr<msf::MappedBlockStream> Stream;
+
+  const InfoStreamHeader *Header;
+
+  BinarySubstreamRef SubNamedStreams;
+
+  std::vector<PdbRaw_FeatureSig> FeatureSignatures;
+  PdbRaw_Features Features = PdbFeatureNone;
+
+  uint32_t NamedStreamMapByteSize = 0;
+
+  NamedStreamMap NamedStreams;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h
new file mode 100644
index 0000000..419e8ad
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h
@@ -0,0 +1,68 @@
+//===- InfoStreamBuilder.h - PDB Info Stream Creation -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBINFOSTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBINFOSTREAMBUILDER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Error.h"
+
+#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
+#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+
+namespace llvm {
+class WritableBinaryStreamRef;
+
+namespace msf {
+class MSFBuilder;
+}
+namespace pdb {
+class PDBFile;
+class NamedStreamMap;
+
+class InfoStreamBuilder {
+public:
+  InfoStreamBuilder(msf::MSFBuilder &Msf, NamedStreamMap &NamedStreams);
+  InfoStreamBuilder(const InfoStreamBuilder &) = delete;
+  InfoStreamBuilder &operator=(const InfoStreamBuilder &) = delete;
+
+  void setVersion(PdbRaw_ImplVer V);
+  void setSignature(uint32_t S);
+  void setAge(uint32_t A);
+  void setGuid(codeview::GUID G);
+  void addFeature(PdbRaw_FeatureSig Sig);
+
+  uint32_t getAge() const { return Age; }
+  codeview::GUID getGuid() const { return Guid; }
+  Optional<uint32_t> getSignature() const { return Signature; }
+
+  uint32_t finalize();
+
+  Error finalizeMsfLayout();
+
+  Error commit(const msf::MSFLayout &Layout,
+               WritableBinaryStreamRef Buffer) const;
+
+private:
+  msf::MSFBuilder &Msf;
+
+  std::vector<PdbRaw_FeatureSig> Features;
+  PdbRaw_ImplVer Ver;
+  uint32_t Age;
+  Optional<uint32_t> Signature;
+  codeview::GUID Guid;
+
+  NamedStreamMap &NamedStreams;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
new file mode 100644
index 0000000..6602264
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
@@ -0,0 +1,86 @@
+//===- ModuleDebugStream.h - PDB Module Info Stream Access ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_MODULEDEBUGSTREAM_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_MODULEDEBUGSTREAM_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace pdb {
+
+class DbiModuleDescriptor;
+
+class ModuleDebugStreamRef {
+  using DebugSubsectionIterator = codeview::DebugSubsectionArray::Iterator;
+
+public:
+  ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
+                       std::unique_ptr<msf::MappedBlockStream> Stream);
+  ModuleDebugStreamRef(ModuleDebugStreamRef &&Other) = default;
+  ModuleDebugStreamRef(const ModuleDebugStreamRef &Other) = default;
+  ~ModuleDebugStreamRef();
+
+  Error reload();
+
+  uint32_t signature() const { return Signature; }
+
+  iterator_range<codeview::CVSymbolArray::Iterator>
+  symbols(bool *HadError) const;
+
+  const codeview::CVSymbolArray &getSymbolArray() const { return SymbolArray; }
+
+  BinarySubstreamRef getSymbolsSubstream() const;
+  BinarySubstreamRef getC11LinesSubstream() const;
+  BinarySubstreamRef getC13LinesSubstream() const;
+  BinarySubstreamRef getGlobalRefsSubstream() const;
+
+  ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = default;
+
+  iterator_range<DebugSubsectionIterator> subsections() const;
+  codeview::DebugSubsectionArray getSubsectionsArray() const {
+    return Subsections;
+  }
+
+  bool hasDebugSubsections() const;
+
+  Error commit();
+
+  Expected<codeview::DebugChecksumsSubsectionRef>
+  findChecksumsSubsection() const;
+
+private:
+  const DbiModuleDescriptor &Mod;
+
+  uint32_t Signature;
+
+  std::shared_ptr<msf::MappedBlockStream> Stream;
+
+  codeview::CVSymbolArray SymbolArray;
+
+  BinarySubstreamRef SymbolsSubstream;
+  BinarySubstreamRef C11LinesSubstream;
+  BinarySubstreamRef C13LinesSubstream;
+  BinarySubstreamRef GlobalRefsSubstream;
+
+  codeview::DebugSubsectionArray Subsections;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_MODULEDEBUGSTREAM_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
new file mode 100644
index 0000000..01b8f1b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
@@ -0,0 +1,73 @@
+//===- NamedStreamMap.h - PDB Named Stream Map ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NAMEDSTREAMMAP_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NAMEDSTREAMMAP_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/PDB/Native/HashTable.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+
+class BinaryStreamReader;
+class BinaryStreamWriter;
+
+namespace pdb {
+
+class NamedStreamMap;
+
+struct NamedStreamMapTraits {
+  NamedStreamMap *NS;
+
+  explicit NamedStreamMapTraits(NamedStreamMap &NS);
+  uint16_t hashLookupKey(StringRef S) const;
+  StringRef storageKeyToLookupKey(uint32_t Offset) const;
+  uint32_t lookupKeyToStorageKey(StringRef S);
+};
+
+class NamedStreamMap {
+  friend class NamedStreamMapBuilder;
+
+public:
+  NamedStreamMap();
+
+  Error load(BinaryStreamReader &Stream);
+  Error commit(BinaryStreamWriter &Writer) const;
+  uint32_t calculateSerializedLength() const;
+
+  uint32_t size() const;
+  bool get(StringRef Stream, uint32_t &StreamNo) const;
+  void set(StringRef Stream, uint32_t StreamNo);
+
+  uint32_t appendStringData(StringRef S);
+  StringRef getString(uint32_t Offset) const;
+  uint32_t hashString(uint32_t Offset) const;
+
+  StringMap<uint32_t> entries() const;
+
+private:
+  NamedStreamMapTraits HashTraits;
+  /// Closed hash table from Offset -> StreamNumber, where Offset is the offset
+  /// of the stream name in NamesBuffer.
+  HashTable<support::ulittle32_t, NamedStreamMapTraits> OffsetIndexMap;
+
+  /// Buffer of string data.
+  std::vector<char> NamesBuffer;
+};
+
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NAMEDSTREAMMAP_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
new file mode 100644
index 0000000..4f532c6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
@@ -0,0 +1,49 @@
+//===- NativeBuiltinSymbol.h -------------------------------------- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+class NativeBuiltinSymbol : public NativeRawSymbol {
+public:
+  NativeBuiltinSymbol(NativeSession &PDBSession, SymIndexId Id,
+                      PDB_BuiltinType T, uint64_t L);
+  ~NativeBuiltinSymbol() override;
+
+  virtual std::unique_ptr<NativeRawSymbol> clone() const override;
+
+  void dump(raw_ostream &OS, int Indent) const override;
+
+  PDB_SymType getSymTag() const override;
+
+  PDB_BuiltinType getBuiltinType() const override;
+  bool isConstType() const override;
+  uint64_t getLength() const override;
+  bool isUnalignedType() const override;
+  bool isVolatileType() const override;
+
+protected:
+  NativeSession &Session;
+  PDB_BuiltinType Type;
+  uint64_t Length;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
new file mode 100644
index 0000000..bd5c09e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -0,0 +1,39 @@
+//===- NativeCompilandSymbol.h - native impl for compiland syms -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVECOMPILANDSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVECOMPILANDSYMBOL_H
+
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeCompilandSymbol : public NativeRawSymbol {
+public:
+  NativeCompilandSymbol(NativeSession &Session, SymIndexId SymbolId,
+                        DbiModuleDescriptor MI);
+
+  std::unique_ptr<NativeRawSymbol> clone() const override;
+
+  PDB_SymType getSymTag() const override;
+  bool isEditAndContinueEnabled() const override;
+  uint32_t getLexicalParentId() const override;
+  std::string getLibraryName() const override;
+  std::string getName() const override;
+
+private:
+  DbiModuleDescriptor Module;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
new file mode 100644
index 0000000..6aa1460
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
@@ -0,0 +1,41 @@
+//==- NativeEnumModules.h - Native Module Enumerator impl --------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMMODULES_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMMODULES_H
+
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/PDBSymbol.h"
+namespace llvm {
+namespace pdb {
+
+class DbiModuleList;
+class NativeSession;
+
+class NativeEnumModules : public IPDBEnumChildren<PDBSymbol> {
+public:
+  NativeEnumModules(NativeSession &Session, const DbiModuleList &Modules,
+                    uint32_t Index = 0);
+
+  uint32_t getChildCount() const override;
+  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
+  std::unique_ptr<PDBSymbol> getNext() override;
+  void reset() override;
+  NativeEnumModules *clone() const override;
+
+private:
+  NativeSession &Session;
+  const DbiModuleList &Modules;
+  uint32_t Index;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h
new file mode 100644
index 0000000..41b7b78
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h
@@ -0,0 +1,60 @@
+//===- NativeEnumSymbol.h - info about enum type ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOL_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeEnumSymbol : public NativeRawSymbol,
+                         public codeview::TypeVisitorCallbacks {
+public:
+  NativeEnumSymbol(NativeSession &Session, SymIndexId Id,
+                   const codeview::CVType &CV);
+  ~NativeEnumSymbol() override;
+
+  std::unique_ptr<NativeRawSymbol> clone() const override;
+
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type) const override;
+
+  Error visitKnownRecord(codeview::CVType &CVR,
+                         codeview::EnumRecord &Record) override;
+  Error visitKnownMember(codeview::CVMemberRecord &CVM,
+                         codeview::EnumeratorRecord &Record) override;
+
+  PDB_SymType getSymTag() const override;
+  uint32_t getClassParentId() const override;
+  uint32_t getUnmodifiedTypeId() const override;
+  bool hasConstructor() const override;
+  bool hasAssignmentOperator() const override;
+  bool hasCastOperator() const override;
+  uint64_t getLength() const override;
+  std::string getName() const override;
+  bool isNested() const override;
+  bool hasOverloadedOperator() const override;
+  bool isPacked() const override;
+  bool isScoped() const override;
+  uint32_t getTypeId() const override;
+
+protected:
+  codeview::CVType CV;
+  codeview::EnumRecord Record;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h
new file mode 100644
index 0000000..e0a5c8d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h
@@ -0,0 +1,51 @@
+//==- NativeEnumTypes.h - Native Type Enumerator impl ------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMTYPES_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMTYPES_H
+
+#include "llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/PDBSymbol.h"
+
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+class NativeEnumTypes : public IPDBEnumChildren<PDBSymbol> {
+public:
+  NativeEnumTypes(NativeSession &Session,
+                  codeview::LazyRandomTypeCollection &TypeCollection,
+                  codeview::TypeLeafKind Kind);
+
+  uint32_t getChildCount() const override;
+  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
+  std::unique_ptr<PDBSymbol> getNext() override;
+  void reset() override;
+  NativeEnumTypes *clone() const override;
+
+private:
+  NativeEnumTypes(NativeSession &Session,
+                  const std::vector<codeview::TypeIndex> &Matches,
+                  codeview::TypeLeafKind Kind);
+
+  std::vector<codeview::TypeIndex> Matches;
+  uint32_t Index;
+  NativeSession &Session;
+  codeview::TypeLeafKind Kind;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
new file mode 100644
index 0000000..587c7ff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
@@ -0,0 +1,41 @@
+//===- NativeExeSymbol.h - native impl for PDBSymbolExe ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEEXESYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEEXESYMBOL_H
+
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeExeSymbol : public NativeRawSymbol {
+public:
+  NativeExeSymbol(NativeSession &Session, SymIndexId SymbolId);
+
+  std::unique_ptr<NativeRawSymbol> clone() const override;
+
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type) const override;
+
+  uint32_t getAge() const override;
+  std::string getSymbolsFileName() const override;
+  codeview::GUID getGuid() const override;
+  bool hasCTypes() const override;
+  bool hasPrivateSymbols() const override;
+
+private:
+  PDBFile &File;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
new file mode 100644
index 0000000..5b70ecf
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
@@ -0,0 +1,239 @@
+//==- NativeRawSymbol.h - Native implementation of IPDBRawSymbol -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVERAWSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVERAWSYMBOL_H
+
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+typedef uint32_t SymIndexId;
+
+class NativeRawSymbol : public IPDBRawSymbol {
+public:
+  NativeRawSymbol(NativeSession &PDBSession, SymIndexId SymbolId);
+
+  virtual std::unique_ptr<NativeRawSymbol> clone() const = 0;
+
+  void dump(raw_ostream &OS, int Indent) const override;
+
+  std::unique_ptr<IPDBEnumSymbols>
+    findChildren(PDB_SymType Type) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+    findChildren(PDB_SymType Type, StringRef Name,
+      PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+    findChildrenByAddr(PDB_SymType Type, StringRef Name,
+                       PDB_NameSearchFlags Flags,
+                       uint32_t Section, uint32_t Offset) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+    findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+                     uint64_t VA) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+    findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+      uint32_t RVA) const override;
+
+  std::unique_ptr<IPDBEnumSymbols>
+    findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+    findInlineFramesByRVA(uint32_t RVA) const override;
+  std::unique_ptr<IPDBEnumSymbols>
+    findInlineFramesByVA(uint64_t VA) const override;
+
+  std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+    findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
+                           uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+    findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+    findInlineeLinesByVA(uint64_t VA, uint32_t Length) const override;
+
+  void getDataBytes(SmallVector<uint8_t, 32> &Bytes) const override;
+  void getFrontEndVersion(VersionInfo &Version) const override;
+  void getBackEndVersion(VersionInfo &Version) const override;
+  PDB_MemberAccess getAccess() const override;
+  uint32_t getAddressOffset() const override;
+  uint32_t getAddressSection() const override;
+  uint32_t getAge() const override;
+  uint32_t getArrayIndexTypeId() const override;
+  uint32_t getBaseDataOffset() const override;
+  uint32_t getBaseDataSlot() const override;
+  uint32_t getBaseSymbolId() const override;
+  PDB_BuiltinType getBuiltinType() const override;
+  uint32_t getBitPosition() const override;
+  PDB_CallingConv getCallingConvention() const override;
+  uint32_t getClassParentId() const override;
+  std::string getCompilerName() const override;
+  uint32_t getCount() const override;
+  uint32_t getCountLiveRanges() const override;
+  PDB_Lang getLanguage() const override;
+  uint32_t getLexicalParentId() const override;
+  std::string getLibraryName() const override;
+  uint32_t getLiveRangeStartAddressOffset() const override;
+  uint32_t getLiveRangeStartAddressSection() const override;
+  uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
+  codeview::RegisterId getLocalBasePointerRegisterId() const override;
+  uint32_t getLowerBoundId() const override;
+  uint32_t getMemorySpaceKind() const override;
+  std::string getName() const override;
+  uint32_t getNumberOfAcceleratorPointerTags() const override;
+  uint32_t getNumberOfColumns() const override;
+  uint32_t getNumberOfModifiers() const override;
+  uint32_t getNumberOfRegisterIndices() const override;
+  uint32_t getNumberOfRows() const override;
+  std::string getObjectFileName() const override;
+  uint32_t getOemId() const override;
+  uint32_t getOemSymbolId() const override;
+  uint32_t getOffsetInUdt() const override;
+  PDB_Cpu getPlatform() const override;
+  uint32_t getRank() const override;
+  codeview::RegisterId getRegisterId() const override;
+  uint32_t getRegisterType() const override;
+  uint32_t getRelativeVirtualAddress() const override;
+  uint32_t getSamplerSlot() const override;
+  uint32_t getSignature() const override;
+  uint32_t getSizeInUdt() const override;
+  uint32_t getSlot() const override;
+  std::string getSourceFileName() const override;
+  std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
+  uint32_t getStride() const override;
+  uint32_t getSubTypeId() const override;
+  std::string getSymbolsFileName() const override;
+  uint32_t getSymIndexId() const override;
+  uint32_t getTargetOffset() const override;
+  uint32_t getTargetRelativeVirtualAddress() const override;
+  uint64_t getTargetVirtualAddress() const override;
+  uint32_t getTargetSection() const override;
+  uint32_t getTextureSlot() const override;
+  uint32_t getTimeStamp() const override;
+  uint32_t getToken() const override;
+  uint32_t getTypeId() const override;
+  uint32_t getUavSlot() const override;
+  std::string getUndecoratedName() const override;
+  std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
+  uint32_t getUnmodifiedTypeId() const override;
+  uint32_t getUpperBoundId() const override;
+  Variant getValue() const override;
+  uint32_t getVirtualBaseDispIndex() const override;
+  uint32_t getVirtualBaseOffset() const override;
+  uint32_t getVirtualTableShapeId() const override;
+  std::unique_ptr<PDBSymbolTypeBuiltin>
+  getVirtualBaseTableType() const override;
+  PDB_DataKind getDataKind() const override;
+  PDB_SymType getSymTag() const override;
+  codeview::GUID getGuid() const override;
+  int32_t getOffset() const override;
+  int32_t getThisAdjust() const override;
+  int32_t getVirtualBasePointerOffset() const override;
+  PDB_LocType getLocationType() const override;
+  PDB_Machine getMachineType() const override;
+  codeview::ThunkOrdinal getThunkOrdinal() const override;
+  uint64_t getLength() const override;
+  uint64_t getLiveRangeLength() const override;
+  uint64_t getVirtualAddress() const override;
+  PDB_UdtType getUdtKind() const override;
+  bool hasConstructor() const override;
+  bool hasCustomCallingConvention() const override;
+  bool hasFarReturn() const override;
+  bool isCode() const override;
+  bool isCompilerGenerated() const override;
+  bool isConstType() const override;
+  bool isEditAndContinueEnabled() const override;
+  bool isFunction() const override;
+  bool getAddressTaken() const override;
+  bool getNoStackOrdering() const override;
+  bool hasAlloca() const override;
+  bool hasAssignmentOperator() const override;
+  bool hasCTypes() const override;
+  bool hasCastOperator() const override;
+  bool hasDebugInfo() const override;
+  bool hasEH() const override;
+  bool hasEHa() const override;
+  bool hasInlAsm() const override;
+  bool hasInlineAttribute() const override;
+  bool hasInterruptReturn() const override;
+  bool hasFramePointer() const override;
+  bool hasLongJump() const override;
+  bool hasManagedCode() const override;
+  bool hasNestedTypes() const override;
+  bool hasNoInlineAttribute() const override;
+  bool hasNoReturnAttribute() const override;
+  bool hasOptimizedCodeDebugInfo() const override;
+  bool hasOverloadedOperator() const override;
+  bool hasSEH() const override;
+  bool hasSecurityChecks() const override;
+  bool hasSetJump() const override;
+  bool hasStrictGSCheck() const override;
+  bool isAcceleratorGroupSharedLocal() const override;
+  bool isAcceleratorPointerTagLiveRange() const override;
+  bool isAcceleratorStubFunction() const override;
+  bool isAggregated() const override;
+  bool isIntroVirtualFunction() const override;
+  bool isCVTCIL() const override;
+  bool isConstructorVirtualBase() const override;
+  bool isCxxReturnUdt() const override;
+  bool isDataAligned() const override;
+  bool isHLSLData() const override;
+  bool isHotpatchable() const override;
+  bool isIndirectVirtualBaseClass() const override;
+  bool isInterfaceUdt() const override;
+  bool isIntrinsic() const override;
+  bool isLTCG() const override;
+  bool isLocationControlFlowDependent() const override;
+  bool isMSILNetmodule() const override;
+  bool isMatrixRowMajor() const override;
+  bool isManagedCode() const override;
+  bool isMSILCode() const override;
+  bool isMultipleInheritance() const override;
+  bool isNaked() const override;
+  bool isNested() const override;
+  bool isOptimizedAway() const override;
+  bool isPacked() const override;
+  bool isPointerBasedOnSymbolValue() const override;
+  bool isPointerToDataMember() const override;
+  bool isPointerToMemberFunction() const override;
+  bool isPureVirtual() const override;
+  bool isRValueReference() const override;
+  bool isRefUdt() const override;
+  bool isReference() const override;
+  bool isRestrictedType() const override;
+  bool isReturnValue() const override;
+  bool isSafeBuffers() const override;
+  bool isScoped() const override;
+  bool isSdl() const override;
+  bool isSingleInheritance() const override;
+  bool isSplitted() const override;
+  bool isStatic() const override;
+  bool hasPrivateSymbols() const override;
+  bool isUnalignedType() const override;
+  bool isUnreached() const override;
+  bool isValueUdt() const override;
+  bool isVirtual() const override;
+  bool isVirtualBaseClass() const override;
+  bool isVirtualInheritance() const override;
+  bool isVolatileType() const override;
+  bool wasInlined() const override;
+  std::string getUnused() const override;
+
+protected:
+  NativeSession &Session;
+  SymIndexId SymbolId;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVERAWSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeSession.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeSession.h
new file mode 100644
index 0000000..60a94d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/NativeSession.h
@@ -0,0 +1,114 @@
+//===- NativeSession.h - Native implementation of IPDBSession ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
+#include "llvm/DebugInfo/PDB/IPDBSession.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+class MemoryBuffer;
+namespace pdb {
+class PDBFile;
+
+class NativeSession : public IPDBSession {
+public:
+  NativeSession(std::unique_ptr<PDBFile> PdbFile,
+                std::unique_ptr<BumpPtrAllocator> Allocator);
+  ~NativeSession() override;
+
+  static Error createFromPdb(std::unique_ptr<MemoryBuffer> MB,
+                             std::unique_ptr<IPDBSession> &Session);
+  static Error createFromExe(StringRef Path,
+                             std::unique_ptr<IPDBSession> &Session);
+
+  std::unique_ptr<PDBSymbolCompiland>
+  createCompilandSymbol(DbiModuleDescriptor MI);
+
+  std::unique_ptr<PDBSymbolTypeEnum>
+  createEnumSymbol(codeview::TypeIndex Index);
+
+  std::unique_ptr<IPDBEnumSymbols>
+  createTypeEnumerator(codeview::TypeLeafKind Kind);
+
+  SymIndexId findSymbolByTypeIndex(codeview::TypeIndex TI);
+
+  uint64_t getLoadAddress() const override;
+  bool setLoadAddress(uint64_t Address) override;
+  std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
+  std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const override;
+
+  bool addressForVA(uint64_t VA, uint32_t &Section,
+                    uint32_t &Offset) const override;
+  bool addressForRVA(uint32_t RVA, uint32_t &Section,
+                     uint32_t &Offset) const override;
+
+  std::unique_ptr<PDBSymbol>
+  findSymbolByAddress(uint64_t Address, PDB_SymType Type) const override;
+
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbers(const PDBSymbolCompiland &Compiland,
+                  const IPDBSourceFile &File) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersByAddress(uint64_t Address, uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const override;
+  std::unique_ptr<IPDBEnumLineNumbers>
+  findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
+                              uint32_t Length) const override;
+
+  std::unique_ptr<IPDBEnumSourceFiles>
+  findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
+                  PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBSourceFile>
+  findOneSourceFile(const PDBSymbolCompiland *Compiland,
+                    llvm::StringRef Pattern,
+                    PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+  findCompilandsForSourceFile(llvm::StringRef Pattern,
+                              PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<PDBSymbolCompiland>
+  findOneCompilandForSourceFile(llvm::StringRef Pattern,
+                                PDB_NameSearchFlags Flags) const override;
+  std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const override;
+  std::unique_ptr<IPDBEnumSourceFiles> getSourceFilesForCompiland(
+      const PDBSymbolCompiland &Compiland) const override;
+  std::unique_ptr<IPDBSourceFile>
+  getSourceFileById(uint32_t FileId) const override;
+
+  std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const override;
+
+  std::unique_ptr<IPDBEnumTables> getEnumTables() const override;
+
+  std::unique_ptr<IPDBEnumInjectedSources> getInjectedSources() const override;
+
+  std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;
+
+  PDBFile &getPDBFile() { return *Pdb; }
+  const PDBFile &getPDBFile() const { return *Pdb; }
+
+private:
+  std::unique_ptr<PDBFile> Pdb;
+  std::unique_ptr<BumpPtrAllocator> Allocator;
+  std::vector<std::unique_ptr<NativeRawSymbol>> SymbolCache;
+  DenseMap<codeview::TypeIndex, SymIndexId> TypeIndexToSymbolId;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBFile.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBFile.h
new file mode 100644
index 0000000..5e39ac3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBFile.h
@@ -0,0 +1,144 @@
+//===- PDBFile.h - Low level interface to a PDB file ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBFILE_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBFILE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DebugInfo/MSF/IMSFFile.h"
+#include "llvm/DebugInfo/MSF/MSFCommon.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MathExtras.h"
+
+#include <memory>
+
+namespace llvm {
+
+class BinaryStream;
+
+namespace msf {
+class MappedBlockStream;
+}
+
+namespace pdb {
+class DbiStream;
+class GlobalsStream;
+class InfoStream;
+class PDBStringTable;
+class PDBFileBuilder;
+class PublicsStream;
+class SymbolStream;
+class TpiStream;
+
+class PDBFile : public msf::IMSFFile {
+  friend PDBFileBuilder;
+
+public:
+  PDBFile(StringRef Path, std::unique_ptr<BinaryStream> PdbFileBuffer,
+          BumpPtrAllocator &Allocator);
+  ~PDBFile() override;
+
+  StringRef getFileDirectory() const;
+  StringRef getFilePath() const;
+
+  uint32_t getFreeBlockMapBlock() const;
+  uint32_t getUnknown1() const;
+
+  uint32_t getBlockSize() const override;
+  uint32_t getBlockCount() const override;
+  uint32_t getNumDirectoryBytes() const;
+  uint32_t getBlockMapIndex() const;
+  uint32_t getNumDirectoryBlocks() const;
+  uint64_t getBlockMapOffset() const;
+
+  uint32_t getNumStreams() const override;
+  uint32_t getMaxStreamSize() const;
+  uint32_t getStreamByteSize(uint32_t StreamIndex) const override;
+  ArrayRef<support::ulittle32_t>
+  getStreamBlockList(uint32_t StreamIndex) const override;
+  uint32_t getFileSize() const;
+
+  Expected<ArrayRef<uint8_t>> getBlockData(uint32_t BlockIndex,
+                                           uint32_t NumBytes) const override;
+  Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
+                     ArrayRef<uint8_t> Data) const override;
+
+  ArrayRef<support::ulittle32_t> getStreamSizes() const {
+    return ContainerLayout.StreamSizes;
+  }
+  ArrayRef<ArrayRef<support::ulittle32_t>> getStreamMap() const {
+    return ContainerLayout.StreamMap;
+  }
+
+  const msf::MSFLayout &getMsfLayout() const { return ContainerLayout; }
+  BinaryStreamRef getMsfBuffer() const { return *Buffer; }
+
+  ArrayRef<support::ulittle32_t> getDirectoryBlockArray() const;
+
+  std::unique_ptr<msf::MappedBlockStream> createIndexedStream(uint16_t SN);
+
+  msf::MSFStreamLayout getStreamLayout(uint32_t StreamIdx) const;
+  msf::MSFStreamLayout getFpmStreamLayout() const;
+
+  Error parseFileHeaders();
+  Error parseStreamData();
+
+  Expected<InfoStream &> getPDBInfoStream();
+  Expected<DbiStream &> getPDBDbiStream();
+  Expected<GlobalsStream &> getPDBGlobalsStream();
+  Expected<TpiStream &> getPDBTpiStream();
+  Expected<TpiStream &> getPDBIpiStream();
+  Expected<PublicsStream &> getPDBPublicsStream();
+  Expected<SymbolStream &> getPDBSymbolStream();
+  Expected<PDBStringTable &> getStringTable();
+
+  BumpPtrAllocator &getAllocator() { return Allocator; }
+
+  bool hasPDBDbiStream() const;
+  bool hasPDBGlobalsStream();
+  bool hasPDBInfoStream() const;
+  bool hasPDBIpiStream() const;
+  bool hasPDBPublicsStream();
+  bool hasPDBSymbolStream();
+  bool hasPDBTpiStream() const;
+  bool hasPDBStringTable();
+
+  uint32_t getPointerSize();
+
+private:
+  Expected<std::unique_ptr<msf::MappedBlockStream>>
+  safelyCreateIndexedStream(const msf::MSFLayout &Layout,
+                            BinaryStreamRef MsfData,
+                            uint32_t StreamIndex) const;
+
+  std::string FilePath;
+  BumpPtrAllocator &Allocator;
+
+  std::unique_ptr<BinaryStream> Buffer;
+
+  msf::MSFLayout ContainerLayout;
+
+  std::unique_ptr<GlobalsStream> Globals;
+  std::unique_ptr<InfoStream> Info;
+  std::unique_ptr<DbiStream> Dbi;
+  std::unique_ptr<TpiStream> Tpi;
+  std::unique_ptr<TpiStream> Ipi;
+  std::unique_ptr<PublicsStream> Publics;
+  std::unique_ptr<SymbolStream> Symbols;
+  std::unique_ptr<msf::MappedBlockStream> DirectoryStream;
+  std::unique_ptr<msf::MappedBlockStream> StringTableStream;
+  std::unique_ptr<PDBStringTable> Strings;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
new file mode 100644
index 0000000..58dda71
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
@@ -0,0 +1,109 @@
+//===- PDBFileBuilder.h - PDB File Creation ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBFILEBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBFILEBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
+#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace msf {
+class MSFBuilder;
+}
+namespace pdb {
+class DbiStreamBuilder;
+class InfoStreamBuilder;
+class GSIStreamBuilder;
+class TpiStreamBuilder;
+
+class PDBFileBuilder {
+public:
+  explicit PDBFileBuilder(BumpPtrAllocator &Allocator);
+  ~PDBFileBuilder();
+  PDBFileBuilder(const PDBFileBuilder &) = delete;
+  PDBFileBuilder &operator=(const PDBFileBuilder &) = delete;
+
+  Error initialize(uint32_t BlockSize);
+
+  msf::MSFBuilder &getMsfBuilder();
+  InfoStreamBuilder &getInfoBuilder();
+  DbiStreamBuilder &getDbiBuilder();
+  TpiStreamBuilder &getTpiBuilder();
+  TpiStreamBuilder &getIpiBuilder();
+  PDBStringTableBuilder &getStringTableBuilder();
+  GSIStreamBuilder &getGsiBuilder();
+
+  Error commit(StringRef Filename);
+
+  Expected<uint32_t> getNamedStreamIndex(StringRef Name) const;
+  Error addNamedStream(StringRef Name, StringRef Data);
+  void addInjectedSource(StringRef Name, std::unique_ptr<MemoryBuffer> Buffer);
+
+private:
+  struct InjectedSourceDescriptor {
+    // The full name of the stream that contains the contents of this injected
+    // source.  This is built as a concatenation of the literal "/src/files"
+    // plus the "vname".
+    std::string StreamName;
+
+    // The exact name of the file name as specified by the user.
+    uint32_t NameIndex;
+
+    // The string table index of the "vname" of the file.  As far as we
+    // understand, this is the same as the name, except it is lowercased and
+    // forward slashes are converted to backslashes.
+    uint32_t VNameIndex;
+    std::unique_ptr<MemoryBuffer> Content;
+  };
+
+  Expected<msf::MSFLayout> finalizeMsfLayout();
+  Expected<uint32_t> allocateNamedStream(StringRef Name, uint32_t Size);
+
+  void commitFpm(WritableBinaryStream &MsfBuffer, const msf::MSFLayout &Layout);
+  void commitInjectedSources(WritableBinaryStream &MsfBuffer,
+                             const msf::MSFLayout &Layout);
+  void commitSrcHeaderBlock(WritableBinaryStream &MsfBuffer,
+                            const msf::MSFLayout &Layout);
+
+  BumpPtrAllocator &Allocator;
+
+  std::unique_ptr<msf::MSFBuilder> Msf;
+  std::unique_ptr<InfoStreamBuilder> Info;
+  std::unique_ptr<DbiStreamBuilder> Dbi;
+  std::unique_ptr<GSIStreamBuilder> Gsi;
+  std::unique_ptr<TpiStreamBuilder> Tpi;
+  std::unique_ptr<TpiStreamBuilder> Ipi;
+
+  PDBStringTableBuilder Strings;
+  StringTableHashTraits InjectedSourceHashTraits;
+  HashTable<SrcHeaderBlockEntry, StringTableHashTraits> InjectedSourceTable;
+
+  SmallVector<InjectedSourceDescriptor, 2> InjectedSources;
+
+  NamedStreamMap NamedStreams;
+  DenseMap<uint32_t, std::string> NamedStreamData;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
new file mode 100644
index 0000000..29167c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
@@ -0,0 +1,65 @@
+//===- PDBStringTable.h - PDB String Table -----------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBSTRINGTABLE_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBSTRINGTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+class BinaryStreamReader;
+
+namespace msf {
+class MappedBlockStream;
+}
+
+namespace pdb {
+
+struct PDBStringTableHeader;
+
+class PDBStringTable {
+public:
+  Error reload(BinaryStreamReader &Reader);
+
+  uint32_t getByteSize() const;
+  uint32_t getNameCount() const;
+  uint32_t getHashVersion() const;
+  uint32_t getSignature() const;
+
+  Expected<StringRef> getStringForID(uint32_t ID) const;
+  Expected<uint32_t> getIDForString(StringRef Str) const;
+
+  FixedStreamArray<support::ulittle32_t> name_ids() const;
+
+  const codeview::DebugStringTableSubsectionRef &getStringTable() const;
+
+private:
+  Error readHeader(BinaryStreamReader &Reader);
+  Error readStrings(BinaryStreamReader &Reader);
+  Error readHashTable(BinaryStreamReader &Reader);
+  Error readEpilogue(BinaryStreamReader &Reader);
+
+  const PDBStringTableHeader *Header = nullptr;
+  codeview::DebugStringTableSubsectionRef Strings;
+  FixedStreamArray<support::ulittle32_t> IDs;
+  uint32_t NameCount = 0;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_STRINGTABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h
new file mode 100644
index 0000000..0f81c18
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h
@@ -0,0 +1,72 @@
+//===- PDBStringTableBuilder.h - PDB String Table Builder -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file creates the "/names" stream.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBSTRINGTABLEBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBSTRINGTABLEBUILDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+namespace llvm {
+class BinaryStreamWriter;
+class WritableBinaryStreamRef;
+
+namespace msf {
+struct MSFLayout;
+}
+
+namespace pdb {
+
+class PDBFileBuilder;
+class PDBStringTableBuilder;
+
+struct StringTableHashTraits {
+  PDBStringTableBuilder *Table;
+
+  explicit StringTableHashTraits(PDBStringTableBuilder &Table);
+  uint32_t hashLookupKey(StringRef S) const;
+  StringRef storageKeyToLookupKey(uint32_t Offset) const;
+  uint32_t lookupKeyToStorageKey(StringRef S);
+};
+
+class PDBStringTableBuilder {
+public:
+  // If string S does not exist in the string table, insert it.
+  // Returns the ID for S.
+  uint32_t insert(StringRef S);
+
+  uint32_t getIdForString(StringRef S) const;
+  StringRef getStringForId(uint32_t Id) const;
+
+  uint32_t calculateSerializedSize() const;
+  Error commit(BinaryStreamWriter &Writer) const;
+
+  void setStrings(const codeview::DebugStringTableSubsection &Strings);
+
+private:
+  uint32_t calculateHashTableSize() const;
+  Error writeHeader(BinaryStreamWriter &Writer) const;
+  Error writeStrings(BinaryStreamWriter &Writer) const;
+  Error writeHashTable(BinaryStreamWriter &Writer) const;
+  Error writeEpilogue(BinaryStreamWriter &Writer) const;
+
+  codeview::DebugStringTableSubsection Strings;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_PDBSTRINGTABLEBUILDER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PublicsStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
new file mode 100644
index 0000000..2d0222a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
@@ -0,0 +1,60 @@
+//===- PublicsStream.h - PDB Public Symbol Stream -------- ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PUBLICSSTREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_PUBLICSSTREAM_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/PDB/Native/GlobalsStream.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+class DbiStream;
+struct GSIHashHeader;
+class PDBFile;
+
+class PublicsStream {
+public:
+  PublicsStream(std::unique_ptr<msf::MappedBlockStream> Stream);
+  ~PublicsStream();
+  Error reload();
+
+  uint32_t getSymHash() const;
+  uint16_t getThunkTableSection() const;
+  uint32_t getThunkTableOffset() const;
+  const GSIHashTable &getPublicsTable() const { return PublicsTable; }
+  FixedStreamArray<support::ulittle32_t> getAddressMap() const {
+    return AddressMap;
+  }
+  FixedStreamArray<support::ulittle32_t> getThunkMap() const {
+    return ThunkMap;
+  }
+  FixedStreamArray<SectionOffset> getSectionOffsets() const {
+    return SectionOffsets;
+  }
+
+private:
+  std::unique_ptr<msf::MappedBlockStream> Stream;
+  GSIHashTable PublicsTable;
+  FixedStreamArray<support::ulittle32_t> AddressMap;
+  FixedStreamArray<support::ulittle32_t> ThunkMap;
+  FixedStreamArray<SectionOffset> SectionOffsets;
+
+  const PublicsStreamHeader *Header;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawConstants.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawConstants.h
new file mode 100644
index 0000000..fbbd331
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawConstants.h
@@ -0,0 +1,119 @@
+//===- RawConstants.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBRAWCONSTANTS_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBRAWCONSTANTS_H
+
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include <cstdint>
+
+namespace llvm {
+namespace pdb {
+
+const uint16_t kInvalidStreamIndex = 0xFFFF;
+
+enum PdbRaw_ImplVer : uint32_t {
+  PdbImplVC2 = 19941610,
+  PdbImplVC4 = 19950623,
+  PdbImplVC41 = 19950814,
+  PdbImplVC50 = 19960307,
+  PdbImplVC98 = 19970604,
+  PdbImplVC70Dep = 19990604, // deprecated
+  PdbImplVC70 = 20000404,
+  PdbImplVC80 = 20030901,
+  PdbImplVC110 = 20091201,
+  PdbImplVC140 = 20140508,
+};
+
+enum class PdbRaw_SrcHeaderBlockVer : uint32_t { SrcVerOne = 19980827 };
+
+enum class PdbRaw_FeatureSig : uint32_t {
+  VC110 = PdbImplVC110,
+  VC140 = PdbImplVC140,
+  NoTypeMerge = 0x4D544F4E,
+  MinimalDebugInfo = 0x494E494D,
+};
+
+enum PdbRaw_Features : uint32_t {
+  PdbFeatureNone = 0x0,
+  PdbFeatureContainsIdStream = 0x1,
+  PdbFeatureMinimalDebugInfo = 0x2,
+  PdbFeatureNoTypeMerging = 0x4,
+  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ PdbFeatureNoTypeMerging)
+};
+
+enum PdbRaw_DbiVer : uint32_t {
+  PdbDbiVC41 = 930803,
+  PdbDbiV50 = 19960307,
+  PdbDbiV60 = 19970606,
+  PdbDbiV70 = 19990903,
+  PdbDbiV110 = 20091201
+};
+
+enum PdbRaw_TpiVer : uint32_t {
+  PdbTpiV40 = 19950410,
+  PdbTpiV41 = 19951122,
+  PdbTpiV50 = 19961031,
+  PdbTpiV70 = 19990903,
+  PdbTpiV80 = 20040203,
+};
+
+enum PdbRaw_DbiSecContribVer : uint32_t {
+  DbiSecContribVer60 = 0xeffe0000 + 19970605,
+  DbiSecContribV2 = 0xeffe0000 + 20140516
+};
+
+enum SpecialStream : uint32_t {
+  // Stream 0 contains the copy of previous version of the MSF directory.
+  // We are not currently using it, but technically if we find the main
+  // MSF is corrupted, we could fallback to it.
+  OldMSFDirectory = 0,
+
+  StreamPDB = 1,
+  StreamTPI = 2,
+  StreamDBI = 3,
+  StreamIPI = 4,
+
+  kSpecialStreamCount
+};
+
+enum class DbgHeaderType : uint16_t {
+  FPO,
+  Exception,
+  Fixup,
+  OmapToSrc,
+  OmapFromSrc,
+  SectionHdr,
+  TokenRidMap,
+  Xdata,
+  Pdata,
+  NewFPO,
+  SectionHdrOrig,
+  Max
+};
+
+enum class OMFSegDescFlags : uint16_t {
+  None = 0,
+  Read = 1 << 0,              // Segment is readable.
+  Write = 1 << 1,             // Segment is writable.
+  Execute = 1 << 2,           // Segment is executable.
+  AddressIs32Bit = 1 << 3,    // Descriptor describes a 32-bit linear address.
+  IsSelector = 1 << 8,        // Frame represents a selector.
+  IsAbsoluteAddress = 1 << 9, // Frame represents an absolute address.
+  IsGroup = 1 << 10,          // If set, descriptor represents a group.
+  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ IsGroup)
+};
+
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_PDBRAWCONSTANTS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawError.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawError.h
new file mode 100644
index 0000000..3624a76
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawError.h
@@ -0,0 +1,53 @@
+//===- RawError.h - Error extensions for raw PDB implementation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_RAWERROR_H
+#define LLVM_DEBUGINFO_PDB_RAW_RAWERROR_H
+
+#include "llvm/Support/Error.h"
+
+#include <string>
+
+namespace llvm {
+namespace pdb {
+enum class raw_error_code {
+  unspecified = 1,
+  feature_unsupported,
+  invalid_format,
+  corrupt_file,
+  insufficient_buffer,
+  no_stream,
+  index_out_of_bounds,
+  invalid_block_address,
+  duplicate_entry,
+  no_entry,
+  not_writable,
+  stream_too_long,
+  invalid_tpi_hash,
+};
+
+/// Base class for errors originating when parsing raw PDB files
+class RawError : public ErrorInfo<RawError> {
+public:
+  static char ID;
+  RawError(raw_error_code C);
+  RawError(const std::string &Context);
+  RawError(raw_error_code C, const std::string &Context);
+
+  void log(raw_ostream &OS) const override;
+  const std::string &getErrorMessage() const;
+  std::error_code convertToErrorCode() const override;
+
+private:
+  std::string ErrMsg;
+  raw_error_code Code;
+};
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawTypes.h
new file mode 100644
index 0000000..5cc8821
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -0,0 +1,362 @@
+//===- RawTypes.h -----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_RAWTYPES_H
+#define LLVM_DEBUGINFO_PDB_RAW_RAWTYPES_H
+
+#include "llvm/DebugInfo/CodeView/GUID.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace pdb {
+// This struct is defined as "SO" in langapi/include/pdb.h.
+struct SectionOffset {
+  support::ulittle32_t Off;
+  support::ulittle16_t Isect;
+  char Padding[2];
+};
+
+/// Header of the hash tables found in the globals and publics sections.
+/// Based on GSIHashHdr in
+/// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
+struct GSIHashHeader {
+  enum : unsigned {
+    HdrSignature = ~0U,
+    HdrVersion = 0xeffe0000 + 19990810,
+  };
+  support::ulittle32_t VerSignature;
+  support::ulittle32_t VerHdr;
+  support::ulittle32_t HrSize;
+  support::ulittle32_t NumBuckets;
+};
+
+// This is HRFile.
+struct PSHashRecord {
+  support::ulittle32_t Off; // Offset in the symbol record stream
+  support::ulittle32_t CRef;
+};
+
+// This struct is defined as `SC` in include/dbicommon.h
+struct SectionContrib {
+  support::ulittle16_t ISect;
+  char Padding[2];
+  support::little32_t Off;
+  support::little32_t Size;
+  support::ulittle32_t Characteristics;
+  support::ulittle16_t Imod;
+  char Padding2[2];
+  support::ulittle32_t DataCrc;
+  support::ulittle32_t RelocCrc;
+};
+
+// This struct is defined as `SC2` in include/dbicommon.h
+struct SectionContrib2 {
+  // To guarantee SectionContrib2 is standard layout, we cannot use inheritance.
+  SectionContrib Base;
+  support::ulittle32_t ISectCoff;
+};
+
+// This corresponds to the `OMFSegMap` structure.
+struct SecMapHeader {
+  support::ulittle16_t SecCount;    // Number of segment descriptors in table
+  support::ulittle16_t SecCountLog; // Number of logical segment descriptors
+};
+
+// This corresponds to the `OMFSegMapDesc` structure.  The definition is not
+// present in the reference implementation, but the layout is derived from
+// code that accesses the fields.
+struct SecMapEntry {
+  support::ulittle16_t Flags; // Descriptor flags.  See OMFSegDescFlags
+  support::ulittle16_t Ovl;   // Logical overlay number.
+  support::ulittle16_t Group; // Group index into descriptor array.
+  support::ulittle16_t Frame;
+  support::ulittle16_t SecName;       // Byte index of the segment or group name
+                                      // in the sstSegName table, or 0xFFFF.
+  support::ulittle16_t ClassName;     // Byte index of the class name in the
+                                      // sstSegName table, or 0xFFFF.
+  support::ulittle32_t Offset;        // Byte offset of the logical segment
+                                      // within the specified physical segment.
+                                      // If group is set in flags, offset is the
+                                      // offset of the group.
+  support::ulittle32_t SecByteLength; // Byte count of the segment or group.
+};
+
+/// Some of the values are stored in bitfields.  Since this needs to be portable
+/// across compilers and architectures (big / little endian in particular) we
+/// can't use the actual structures below, but must instead do the shifting
+/// and masking ourselves.  The struct definitions are provided for reference.
+struct DbiFlags {
+  ///  uint16_t IncrementalLinking : 1; // True if linked incrementally
+  ///  uint16_t IsStripped : 1;         // True if private symbols were
+  ///  stripped.
+  ///  uint16_t HasCTypes : 1;          // True if linked with /debug:ctypes.
+  ///  uint16_t Reserved : 13;
+  static const uint16_t FlagIncrementalMask = 0x0001;
+  static const uint16_t FlagStrippedMask = 0x0002;
+  static const uint16_t FlagHasCTypesMask = 0x0004;
+};
+
+struct DbiBuildNo {
+  ///  uint16_t MinorVersion : 8;
+  ///  uint16_t MajorVersion : 7;
+  ///  uint16_t NewVersionFormat : 1;
+  static const uint16_t BuildMinorMask = 0x00FF;
+  static const uint16_t BuildMinorShift = 0;
+
+  static const uint16_t BuildMajorMask = 0x7F00;
+  static const uint16_t BuildMajorShift = 8;
+};
+
+/// The fixed size header that appears at the beginning of the DBI Stream.
+struct DbiStreamHeader {
+  support::little32_t VersionSignature;
+  support::ulittle32_t VersionHeader;
+
+  /// How "old" is this DBI Stream. Should match the age of the PDB InfoStream.
+  support::ulittle32_t Age;
+
+  /// Global symbol stream #
+  support::ulittle16_t GlobalSymbolStreamIndex;
+
+  /// See DbiBuildNo structure.
+  support::ulittle16_t BuildNumber;
+
+  /// Public symbols stream #
+  support::ulittle16_t PublicSymbolStreamIndex;
+
+  /// version of mspdbNNN.dll
+  support::ulittle16_t PdbDllVersion;
+
+  /// Symbol records stream #
+  support::ulittle16_t SymRecordStreamIndex;
+
+  /// rbld number of mspdbNNN.dll
+  support::ulittle16_t PdbDllRbld;
+
+  /// Size of module info stream
+  support::little32_t ModiSubstreamSize;
+
+  /// Size of sec. contrib stream
+  support::little32_t SecContrSubstreamSize;
+
+  /// Size of sec. map substream
+  support::little32_t SectionMapSize;
+
+  /// Size of file info substream
+  support::little32_t FileInfoSize;
+
+  /// Size of type server map
+  support::little32_t TypeServerSize;
+
+  /// Index of MFC Type Server
+  support::ulittle32_t MFCTypeServerIndex;
+
+  /// Size of DbgHeader info
+  support::little32_t OptionalDbgHdrSize;
+
+  /// Size of EC stream (what is EC?)
+  support::little32_t ECSubstreamSize;
+
+  /// See DbiFlags enum.
+  support::ulittle16_t Flags;
+
+  /// See PDB_MachineType enum.
+  support::ulittle16_t MachineType;
+
+  /// Pad to 64 bytes
+  support::ulittle32_t Reserved;
+};
+static_assert(sizeof(DbiStreamHeader) == 64, "Invalid DbiStreamHeader size!");
+
+struct SectionContribEntry {
+  support::ulittle16_t Section;
+  char Padding1[2];
+  support::little32_t Offset;
+  support::little32_t Size;
+  support::ulittle32_t Characteristics;
+  support::ulittle16_t ModuleIndex;
+  char Padding2[2];
+  support::ulittle32_t DataCrc;
+  support::ulittle32_t RelocCrc;
+};
+
+/// The header preceeding the File Info Substream of the DBI stream.
+struct FileInfoSubstreamHeader {
+  /// Total # of modules, should match number of records in the ModuleInfo
+  /// substream.
+  support::ulittle16_t NumModules;
+
+  /// Total # of source files. This value is not accurate because PDB actually
+  /// supports more than 64k source files, so we ignore it and compute the value
+  /// from other stream fields.
+  support::ulittle16_t NumSourceFiles;
+
+  /// Following this header the File Info Substream is laid out as follows:
+  ///   ulittle16_t ModIndices[NumModules];
+  ///   ulittle16_t ModFileCounts[NumModules];
+  ///   ulittle32_t FileNameOffsets[NumSourceFiles];
+  ///   char Names[][NumSourceFiles];
+  /// with the caveat that `NumSourceFiles` cannot be trusted, so
+  /// it is computed by summing the `ModFileCounts` array.
+};
+
+struct ModInfoFlags {
+  ///  uint16_t fWritten : 1;   // True if DbiModuleDescriptor is dirty
+  ///  uint16_t fECEnabled : 1; // Is EC symbolic info present?  (What is EC?)
+  ///  uint16_t unused : 6;     // Reserved
+  ///  uint16_t iTSM : 8;       // Type Server Index for this module
+  static const uint16_t HasECFlagMask = 0x2;
+
+  static const uint16_t TypeServerIndexMask = 0xFF00;
+  static const uint16_t TypeServerIndexShift = 8;
+};
+
+/// The header preceeding each entry in the Module Info substream of the DBI
+/// stream.  Corresponds to the type MODI in the reference implementation.
+struct ModuleInfoHeader {
+  /// Currently opened module. This field is a pointer in the reference
+  /// implementation, but that won't work on 64-bit systems, and anyway it
+  /// doesn't make sense to read a pointer from a file. For now it is unused,
+  /// so just ignore it.
+  support::ulittle32_t Mod;
+
+  /// First section contribution of this module.
+  SectionContribEntry SC;
+
+  /// See ModInfoFlags definition.
+  support::ulittle16_t Flags;
+
+  /// Stream Number of module debug info
+  support::ulittle16_t ModDiStream;
+
+  /// Size of local symbol debug info in above stream
+  support::ulittle32_t SymBytes;
+
+  /// Size of C11 line number info in above stream
+  support::ulittle32_t C11Bytes;
+
+  /// Size of C13 line number info in above stream
+  support::ulittle32_t C13Bytes;
+
+  /// Number of files contributing to this module
+  support::ulittle16_t NumFiles;
+
+  /// Padding so the next field is 4-byte aligned.
+  char Padding1[2];
+
+  /// Array of [0..NumFiles) DBI name buffer offsets.  In the reference
+  /// implementation this field is a pointer.  But since you can't portably
+  /// serialize a pointer, on 64-bit platforms they copy all the values except
+  /// this one into the 32-bit version of the struct and use that for
+  /// serialization.  Regardless, this field is unused, it is only there to
+  /// store a pointer that can be accessed at runtime.
+  support::ulittle32_t FileNameOffs;
+
+  /// Name Index for src file name
+  support::ulittle32_t SrcFileNameNI;
+
+  /// Name Index for path to compiler PDB
+  support::ulittle32_t PdbFilePathNI;
+
+  /// Following this header are two zero terminated strings.
+  /// char ModuleName[];
+  /// char ObjFileName[];
+};
+
+// This is PSGSIHDR struct defined in
+// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
+struct PublicsStreamHeader {
+  support::ulittle32_t SymHash;
+  support::ulittle32_t AddrMap;
+  support::ulittle32_t NumThunks;
+  support::ulittle32_t SizeOfThunk;
+  support::ulittle16_t ISectThunkTable;
+  char Padding[2];
+  support::ulittle32_t OffThunkTable;
+  support::ulittle32_t NumSections;
+};
+
+// The header preceeding the global TPI stream.
+// This corresponds to `HDR` in PDB/dbi/tpi.h.
+struct TpiStreamHeader {
+  struct EmbeddedBuf {
+    support::little32_t Off;
+    support::ulittle32_t Length;
+  };
+
+  support::ulittle32_t Version;
+  support::ulittle32_t HeaderSize;
+  support::ulittle32_t TypeIndexBegin;
+  support::ulittle32_t TypeIndexEnd;
+  support::ulittle32_t TypeRecordBytes;
+
+  // The following members correspond to `TpiHash` in PDB/dbi/tpi.h.
+  support::ulittle16_t HashStreamIndex;
+  support::ulittle16_t HashAuxStreamIndex;
+  support::ulittle32_t HashKeySize;
+  support::ulittle32_t NumHashBuckets;
+
+  EmbeddedBuf HashValueBuffer;
+  EmbeddedBuf IndexOffsetBuffer;
+  EmbeddedBuf HashAdjBuffer;
+};
+
+const uint32_t MinTpiHashBuckets = 0x1000;
+const uint32_t MaxTpiHashBuckets = 0x40000;
+
+/// The header preceeding the global PDB Stream (Stream 1)
+struct InfoStreamHeader {
+  support::ulittle32_t Version;
+  support::ulittle32_t Signature;
+  support::ulittle32_t Age;
+  codeview::GUID Guid;
+};
+
+/// The header preceeding the /names stream.
+struct PDBStringTableHeader {
+  support::ulittle32_t Signature;   // PDBStringTableSignature
+  support::ulittle32_t HashVersion; // 1 or 2
+  support::ulittle32_t ByteSize;    // Number of bytes of names buffer.
+};
+
+const uint32_t PDBStringTableSignature = 0xEFFEEFFE;
+
+/// The header preceding the /src/headerblock stream.
+struct SrcHeaderBlockHeader {
+  support::ulittle32_t Version; // PdbRaw_SrcHeaderBlockVer enumeration.
+  support::ulittle32_t Size;    // Size of entire stream.
+  uint64_t FileTime;            // Time stamp (Windows FILETIME format).
+  support::ulittle32_t Age;     // Age
+  uint8_t Padding[44];          // Pad to 64 bytes.
+};
+static_assert(sizeof(SrcHeaderBlockHeader) == 64, "Incorrect struct size!");
+
+/// A single file record entry within the /src/headerblock stream.
+struct SrcHeaderBlockEntry {
+  support::ulittle32_t Size;     // Record Length.
+  support::ulittle32_t Version;  // PdbRaw_SrcHeaderBlockVer enumeration.
+  support::ulittle32_t CRC;      // CRC of the original file contents.
+  support::ulittle32_t FileSize; // Size of original source file.
+  support::ulittle32_t FileNI;   // String table index of file name.
+  support::ulittle32_t ObjNI;    // String table index of object name.
+  support::ulittle32_t VFileNI;  // String table index of virtual file name.
+  uint8_t Compression;           // PDB_SourceCompression enumeration.
+  uint8_t IsVirtual;             // Is this a virtual file (injected)?
+  short Padding;                 // Pad to 4 bytes.
+  char Reserved[8];
+};
+
+constexpr int I = sizeof(SrcHeaderBlockEntry);
+static_assert(sizeof(SrcHeaderBlockEntry) == 40, "Incorrect struct size!");
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/SymbolStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/SymbolStream.h
new file mode 100644
index 0000000..ae9f7d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/SymbolStream.h
@@ -0,0 +1,48 @@
+//===- SymbolStream.cpp - PDB Symbol Stream Access --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBSYMBOLSTREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBSYMBOLSTREAM_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace msf {
+class MappedBlockStream;
+}
+namespace pdb {
+class PDBFile;
+
+class SymbolStream {
+public:
+  SymbolStream(std::unique_ptr<msf::MappedBlockStream> Stream);
+  ~SymbolStream();
+  Error reload();
+
+  const codeview::CVSymbolArray &getSymbolArray() const {
+    return SymbolRecords;
+  }
+
+  codeview::CVSymbol readRecord(uint32_t Offset) const;
+
+  iterator_range<codeview::CVSymbolArray::Iterator>
+  getSymbols(bool *HadError) const;
+
+  Error commit();
+
+private:
+  codeview::CVSymbolArray SymbolRecords;
+  std::unique_ptr<msf::MappedBlockStream> Stream;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiHashing.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiHashing.h
new file mode 100644
index 0000000..c1edec7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiHashing.h
@@ -0,0 +1,24 @@
+//===- TpiHashing.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_TPIHASHING_H
+#define LLVM_DEBUGINFO_PDB_TPIHASHING_H
+
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+
+Expected<uint32_t> hashTypeRecord(const llvm::codeview::CVType &Type);
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_TPIHASHING_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiStream.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiStream.h
new file mode 100644
index 0000000..b779399
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiStream.h
@@ -0,0 +1,85 @@
+//===- TpiStream.cpp - PDB Type Info (TPI) Stream 2 Access ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAM_H
+
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/HashTable.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+class LazyRandomTypeCollection;
+}
+namespace msf {
+class MappedBlockStream;
+}
+namespace pdb {
+class PDBFile;
+
+class TpiStream {
+  friend class TpiStreamBuilder;
+
+public:
+  TpiStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
+  ~TpiStream();
+  Error reload();
+
+  PdbRaw_TpiVer getTpiVersion() const;
+
+  uint32_t TypeIndexBegin() const;
+  uint32_t TypeIndexEnd() const;
+  uint32_t getNumTypeRecords() const;
+  uint16_t getTypeHashStreamIndex() const;
+  uint16_t getTypeHashStreamAuxIndex() const;
+
+  uint32_t getHashKeySize() const;
+  uint32_t getNumHashBuckets() const;
+  FixedStreamArray<support::ulittle32_t> getHashValues() const;
+  FixedStreamArray<codeview::TypeIndexOffset> getTypeIndexOffsets() const;
+  HashTable<support::ulittle32_t> &getHashAdjusters();
+
+  codeview::CVTypeRange types(bool *HadError) const;
+  const codeview::CVTypeArray &typeArray() const { return TypeRecords; }
+
+  codeview::LazyRandomTypeCollection &typeCollection() { return *Types; }
+
+  BinarySubstreamRef getTypeRecordsSubstream() const;
+
+  Error commit();
+
+private:
+  PDBFile &Pdb;
+  std::unique_ptr<msf::MappedBlockStream> Stream;
+
+  std::unique_ptr<codeview::LazyRandomTypeCollection> Types;
+
+  BinarySubstreamRef TypeRecordsSubstream;
+
+  codeview::CVTypeArray TypeRecords;
+
+  std::unique_ptr<BinaryStream> HashStream;
+  FixedStreamArray<support::ulittle32_t> HashValues;
+  FixedStreamArray<codeview::TypeIndexOffset> TypeIndexOffsets;
+  HashTable<support::ulittle32_t> HashAdjusters;
+
+  const TpiStreamHeader *Header;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h
new file mode 100644
index 0000000..411720d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h
@@ -0,0 +1,90 @@
+//===- TpiStreamBuilder.h - PDB Tpi Stream Creation -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAMBUILDER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryItemStream.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+
+#include <vector>
+
+namespace llvm {
+class BinaryByteStream;
+class WritableBinaryStreamRef;
+
+template <> struct BinaryItemTraits<llvm::codeview::CVType> {
+  static size_t length(const codeview::CVType &Item) { return Item.length(); }
+  static ArrayRef<uint8_t> bytes(const codeview::CVType &Item) {
+    return Item.data();
+  }
+};
+
+namespace codeview {
+class TypeRecord;
+}
+namespace msf {
+class MSFBuilder;
+struct MSFLayout;
+}
+namespace pdb {
+class PDBFile;
+class TpiStream;
+struct TpiStreamHeader;
+
+class TpiStreamBuilder {
+public:
+  explicit TpiStreamBuilder(msf::MSFBuilder &Msf, uint32_t StreamIdx);
+  ~TpiStreamBuilder();
+
+  TpiStreamBuilder(const TpiStreamBuilder &) = delete;
+  TpiStreamBuilder &operator=(const TpiStreamBuilder &) = delete;
+
+  void setVersionHeader(PdbRaw_TpiVer Version);
+  void addTypeRecord(ArrayRef<uint8_t> Type, Optional<uint32_t> Hash);
+
+  Error finalizeMsfLayout();
+
+  uint32_t getRecordCount() const { return TypeRecords.size(); }
+
+  Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef Buffer);
+
+  uint32_t calculateSerializedLength();
+
+private:
+  uint32_t calculateHashBufferSize() const;
+  uint32_t calculateIndexOffsetSize() const;
+  Error finalize();
+
+  msf::MSFBuilder &Msf;
+  BumpPtrAllocator &Allocator;
+
+  size_t TypeRecordBytes = 0;
+
+  PdbRaw_TpiVer VerHeader = PdbRaw_TpiVer::PdbTpiV80;
+  std::vector<ArrayRef<uint8_t>> TypeRecords;
+  std::vector<uint32_t> TypeHashes;
+  std::vector<codeview::TypeIndexOffset> TypeIndexOffsets;
+  uint32_t HashStreamIndex = kInvalidStreamIndex;
+  std::unique_ptr<BinaryByteStream> HashValueStream;
+
+  const TpiStreamHeader *Header;
+  uint32_t Idx;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDB.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDB.h
new file mode 100644
index 0000000..9f9da39
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDB.h
@@ -0,0 +1,32 @@
+//===- PDB.h - base header file for creating a PDB reader -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDB_H
+#define LLVM_DEBUGINFO_PDB_PDB_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+
+namespace llvm {
+namespace pdb {
+
+class IPDBSession;
+
+Error loadDataForPDB(PDB_ReaderType Type, StringRef Path,
+                     std::unique_ptr<IPDBSession> &Session);
+
+Error loadDataForEXE(PDB_ReaderType Type, StringRef Path,
+                     std::unique_ptr<IPDBSession> &Session);
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDB_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBContext.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBContext.h
new file mode 100644
index 0000000..0ce49f5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBContext.h
@@ -0,0 +1,65 @@
+//===-- PDBContext.h --------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
+#define LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/PDB/IPDBSession.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+namespace object {
+class COFFObjectFile;
+} // end namespace object
+
+namespace pdb {
+
+  /// PDBContext
+  /// This data structure is the top level entity that deals with PDB debug
+  /// information parsing.  This data structure exists only when there is a
+  /// need for a transparent interface to different debug information formats
+  /// (e.g. PDB and DWARF).  More control and power over the debug information
+  /// access can be had by using the PDB interfaces directly.
+  class PDBContext : public DIContext {
+  public:
+    PDBContext(const object::COFFObjectFile &Object,
+               std::unique_ptr<IPDBSession> PDBSession);
+    PDBContext(PDBContext &) = delete;
+    PDBContext &operator=(PDBContext &) = delete;
+
+    static bool classof(const DIContext *DICtx) {
+      return DICtx->getKind() == CK_PDB;
+    }
+
+    void dump(raw_ostream &OS, DIDumpOptions DIDumpOpts) override;
+
+    DILineInfo getLineInfoForAddress(
+        uint64_t Address,
+        DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+    DILineInfoTable getLineInfoForAddressRange(
+        uint64_t Address, uint64_t Size,
+        DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+    DIInliningInfo getInliningInfoForAddress(
+        uint64_t Address,
+        DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+
+  private:
+    std::string getFunctionName(uint64_t Address, DINameKind NameKind) const;
+    std::unique_ptr<IPDBSession> Session;
+  };
+
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBExtras.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBExtras.h
new file mode 100644
index 0000000..3c9a198
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBExtras.h
@@ -0,0 +1,48 @@
+//===- PDBExtras.h - helper functions and classes for PDBs ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
+#define LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include <unordered_map>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+using TagStats = std::unordered_map<PDB_SymType, int>;
+
+raw_ostream &operator<<(raw_ostream &OS, const PDB_VariantType &Value);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_CallingConv &Conv);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_DataKind &Data);
+raw_ostream &operator<<(raw_ostream &OS, const codeview::RegisterId &Reg);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_LocType &Loc);
+raw_ostream &operator<<(raw_ostream &OS, const codeview::ThunkOrdinal &Thunk);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_Checksum &Checksum);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_Lang &Lang);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_SymType &Tag);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_MemberAccess &Access);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_UdtType &Type);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_Machine &Machine);
+raw_ostream &operator<<(raw_ostream &OS,
+                        const PDB_SourceCompression &Compression);
+
+raw_ostream &operator<<(raw_ostream &OS, const Variant &Value);
+raw_ostream &operator<<(raw_ostream &OS, const VersionInfo &Version);
+raw_ostream &operator<<(raw_ostream &OS, const TagStats &Stats);
+
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymDumper.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymDumper.h
new file mode 100644
index 0000000..c976935
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymDumper.h
@@ -0,0 +1,79 @@
+//===- PDBSymDumper.h - base interface for PDB symbol dumper *- C++ -----*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMDUMPER_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMDUMPER_H
+
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymDumper {
+public:
+  PDBSymDumper(bool ShouldRequireImpl);
+  virtual ~PDBSymDumper();
+
+  virtual void dump(const PDBSymbolAnnotation &Symbol);
+  virtual void dump(const PDBSymbolBlock &Symbol);
+  virtual void dump(const PDBSymbolCompiland &Symbol);
+  virtual void dump(const PDBSymbolCompilandDetails &Symbol);
+  virtual void dump(const PDBSymbolCompilandEnv &Symbol);
+  virtual void dump(const PDBSymbolCustom &Symbol);
+  virtual void dump(const PDBSymbolData &Symbol);
+  virtual void dump(const PDBSymbolExe &Symbol);
+  virtual void dump(const PDBSymbolFunc &Symbol);
+  virtual void dump(const PDBSymbolFuncDebugEnd &Symbol);
+  virtual void dump(const PDBSymbolFuncDebugStart &Symbol);
+  virtual void dump(const PDBSymbolLabel &Symbol);
+  virtual void dump(const PDBSymbolPublicSymbol &Symbol);
+  virtual void dump(const PDBSymbolThunk &Symbol);
+  virtual void dump(const PDBSymbolTypeArray &Symbol);
+  virtual void dump(const PDBSymbolTypeBaseClass &Symbol);
+  virtual void dump(const PDBSymbolTypeBuiltin &Symbol);
+  virtual void dump(const PDBSymbolTypeCustom &Symbol);
+  virtual void dump(const PDBSymbolTypeDimension &Symbol);
+  virtual void dump(const PDBSymbolTypeEnum &Symbol);
+  virtual void dump(const PDBSymbolTypeFriend &Symbol);
+  virtual void dump(const PDBSymbolTypeFunctionArg &Symbol);
+  virtual void dump(const PDBSymbolTypeFunctionSig &Symbol);
+  virtual void dump(const PDBSymbolTypeManaged &Symbol);
+  virtual void dump(const PDBSymbolTypePointer &Symbol);
+  virtual void dump(const PDBSymbolTypeTypedef &Symbol);
+  virtual void dump(const PDBSymbolTypeUDT &Symbol);
+  virtual void dump(const PDBSymbolTypeVTable &Symbol);
+  virtual void dump(const PDBSymbolTypeVTableShape &Symbol);
+  virtual void dump(const PDBSymbolUnknown &Symbol);
+  virtual void dump(const PDBSymbolUsingNamespace &Symbol);
+
+  virtual void dumpRight(const PDBSymbolTypeArray &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeBaseClass &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeBuiltin &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeCustom &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeDimension &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeEnum &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeFriend &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeFunctionArg &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeFunctionSig &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeManaged &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypePointer &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeTypedef &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeUDT &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeVTable &Symbol) {}
+  virtual void dumpRight(const PDBSymbolTypeVTableShape &Symbol) {}
+
+private:
+  bool RequireImpl;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbol.h
new file mode 100644
index 0000000..0437346
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbol.h
@@ -0,0 +1,140 @@
+//===- PDBSymbol.h - base class for user-facing symbol types -----*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOL_H
+
+#include "ConcreteSymbolEnumerator.h"
+#include "IPDBRawSymbol.h"
+#include "PDBExtras.h"
+#include "PDBTypes.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+
+#define FORWARD_SYMBOL_METHOD(MethodName)                                      \
+  auto MethodName() const->decltype(RawSymbol->MethodName()) {                 \
+    return RawSymbol->MethodName();                                            \
+  }
+
+#define FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(ConcreteType, PrivateName, \
+                                                    PublicName)                \
+  auto PublicName##Id() const->decltype(RawSymbol->PrivateName##Id()) {        \
+    return RawSymbol->PrivateName##Id();                                       \
+  }                                                                            \
+  std::unique_ptr<ConcreteType> PublicName() const {                           \
+    uint32_t Id = PublicName##Id();                                            \
+    return getConcreteSymbolByIdHelper<ConcreteType>(Id);                      \
+  }
+
+#define FORWARD_SYMBOL_ID_METHOD_WITH_NAME(PrivateName, PublicName)            \
+  FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(PDBSymbol, PrivateName,          \
+                                              PublicName)
+
+#define FORWARD_SYMBOL_ID_METHOD(MethodName)                                   \
+  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(MethodName, MethodName)
+
+namespace llvm {
+
+class StringRef;
+class raw_ostream;
+
+namespace pdb {
+class IPDBRawSymbol;
+class IPDBSession;
+
+#define DECLARE_PDB_SYMBOL_CONCRETE_TYPE(TagValue)                             \
+  static const PDB_SymType Tag = TagValue;                                     \
+  static bool classof(const PDBSymbol *S) { return S->getSymTag() == Tag; }
+
+/// PDBSymbol defines the base of the inheritance hierarchy for concrete symbol
+/// types (e.g. functions, executables, vtables, etc).  All concrete symbol
+/// types inherit from PDBSymbol and expose the exact set of methods that are
+/// valid for that particular symbol type, as described in the Microsoft
+/// reference "Lexical and Class Hierarchy of Symbol Types":
+/// https://msdn.microsoft.com/en-us/library/370hs6k4.aspx
+class PDBSymbol {
+protected:
+  PDBSymbol(const IPDBSession &PDBSession,
+            std::unique_ptr<IPDBRawSymbol> Symbol);
+  PDBSymbol(PDBSymbol &Symbol);
+
+public:
+  static std::unique_ptr<PDBSymbol>
+  create(const IPDBSession &PDBSession, std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  virtual ~PDBSymbol();
+
+  /// Dumps the contents of a symbol a raw_ostream.  By default this will just
+  /// call dump() on the underlying RawSymbol, which allows us to discover
+  /// unknown properties, but individual implementations of PDBSymbol may
+  /// override the behavior to only dump known fields.
+  virtual void dump(PDBSymDumper &Dumper) const = 0;
+
+  /// For certain PDBSymbolTypes, dumps additional information for the type that
+  /// normally goes on the right side of the symbol.
+  virtual void dumpRight(PDBSymDumper &Dumper) const {}
+
+  void defaultDump(raw_ostream &OS, int Indent) const;
+  void dumpProperties() const;
+  void dumpChildStats() const;
+
+  PDB_SymType getSymTag() const;
+  uint32_t getSymIndexId() const;
+
+  template <typename T> std::unique_ptr<T> findOneChild() const {
+    auto Enumerator(findAllChildren<T>());
+    if (!Enumerator)
+      return nullptr;
+    return Enumerator->getNext();
+  }
+
+  std::unique_ptr<PDBSymbol> clone() const;
+
+  template <typename T>
+  std::unique_ptr<ConcreteSymbolEnumerator<T>> findAllChildren() const {
+    auto BaseIter = RawSymbol->findChildren(T::Tag);
+    if (!BaseIter)
+      return nullptr;
+    return llvm::make_unique<ConcreteSymbolEnumerator<T>>(std::move(BaseIter));
+  }
+  std::unique_ptr<IPDBEnumSymbols> findAllChildren(PDB_SymType Type) const;
+  std::unique_ptr<IPDBEnumSymbols> findAllChildren() const;
+
+  std::unique_ptr<IPDBEnumSymbols>
+  findChildren(PDB_SymType Type, StringRef Name,
+               PDB_NameSearchFlags Flags) const;
+  std::unique_ptr<IPDBEnumSymbols> findChildrenByRVA(PDB_SymType Type,
+                                                     StringRef Name,
+                                                     PDB_NameSearchFlags Flags,
+                                                     uint32_t RVA) const;
+  std::unique_ptr<IPDBEnumSymbols> findInlineFramesByRVA(uint32_t RVA) const;
+
+  const IPDBRawSymbol &getRawSymbol() const { return *RawSymbol; }
+  IPDBRawSymbol &getRawSymbol() { return *RawSymbol; }
+
+  const IPDBSession &getSession() const { return Session; }
+
+  std::unique_ptr<IPDBEnumSymbols> getChildStats(TagStats &Stats) const;
+
+protected:
+  std::unique_ptr<PDBSymbol> getSymbolByIdHelper(uint32_t Id) const;
+
+  template <typename ConcreteType>
+  std::unique_ptr<ConcreteType> getConcreteSymbolByIdHelper(uint32_t Id) const {
+    return unique_dyn_cast_or_null<ConcreteType>(getSymbolByIdHelper(Id));
+  }
+
+  const IPDBSession &Session;
+  std::unique_ptr<IPDBRawSymbol> RawSymbol;
+};
+
+} // namespace llvm
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
new file mode 100644
index 0000000..3169146
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
@@ -0,0 +1,39 @@
+//===- PDBSymbolAnnotation.h - Accessors for querying PDB annotations ---*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolAnnotation : public PDBSymbol {
+public:
+  PDBSymbolAnnotation(const IPDBSession &PDBSession,
+                      std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Annotation)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(getDataKind)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  // FORWARD_SYMBOL_METHOD(getValue)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
new file mode 100644
index 0000000..d81da1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
@@ -0,0 +1,42 @@
+//===- PDBSymbolBlock.h - Accessors for querying PDB blocks -------------*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolBlock : public PDBSymbol {
+public:
+  PDBSymbolBlock(const IPDBSession &PDBSession,
+                 std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Block)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
new file mode 100644
index 0000000..9549089
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
@@ -0,0 +1,42 @@
+//===- PDBSymbolCompiland.h - Accessors for querying PDB compilands -----*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolCompiland : public PDBSymbol {
+public:
+  PDBSymbolCompiland(const IPDBSession &PDBSession,
+                     std::unique_ptr<IPDBRawSymbol> CompilandSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Compiland)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLibraryName)
+  FORWARD_SYMBOL_METHOD(getName)
+
+  std::string getSourceFileName() const;
+  std::string getSourceFileFullPath() const;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
new file mode 100644
index 0000000..dba50c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
@@ -0,0 +1,57 @@
+//===- PDBSymbolCompilandDetails.h - PDB compiland details ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolCompilandDetails : public PDBSymbol {
+public:
+  PDBSymbolCompilandDetails(const IPDBSession &PDBSession,
+                            std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandDetails)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  void getFrontEndVersion(VersionInfo &Version) const {
+    RawSymbol->getFrontEndVersion(Version);
+  }
+
+  void getBackEndVersion(VersionInfo &Version) const {
+    RawSymbol->getBackEndVersion(Version);
+  }
+
+  FORWARD_SYMBOL_METHOD(getCompilerName)
+  FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
+  FORWARD_SYMBOL_METHOD(hasDebugInfo)
+  FORWARD_SYMBOL_METHOD(hasManagedCode)
+  FORWARD_SYMBOL_METHOD(hasSecurityChecks)
+  FORWARD_SYMBOL_METHOD(isCVTCIL)
+  FORWARD_SYMBOL_METHOD(isDataAligned)
+  FORWARD_SYMBOL_METHOD(isHotpatchable)
+  FORWARD_SYMBOL_METHOD(isLTCG)
+  FORWARD_SYMBOL_METHOD(isMSILNetmodule)
+  FORWARD_SYMBOL_METHOD(getLanguage)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getPlatform)
+  FORWARD_SYMBOL_METHOD(getSourceFileName)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBFUNCTION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
new file mode 100644
index 0000000..7868f04
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolCompilandEnv.h - compiland environment variables *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+class PDBSymbolCompilandEnv : public PDBSymbol {
+public:
+  PDBSymbolCompilandEnv(const IPDBSession &PDBSession,
+                        std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandEnv)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  std::string getValue() const;
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
new file mode 100644
index 0000000..54f0894
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
@@ -0,0 +1,40 @@
+//===- PDBSymbolCustom.h - compiler-specific types --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+/// PDBSymbolCustom represents symbols that are compiler-specific and do not
+/// fit anywhere else in the lexical hierarchy.
+/// https://msdn.microsoft.com/en-us/library/d88sf09h.aspx
+class PDBSymbolCustom : public PDBSymbol {
+public:
+  PDBSymbolCustom(const IPDBSession &PDBSession,
+                  std::unique_ptr<IPDBRawSymbol> CustomSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Custom)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes);
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolData.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolData.h
new file mode 100644
index 0000000..76b14bf
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolData.h
@@ -0,0 +1,64 @@
+//===- PDBSymbolData.h - PDB data (e.g. variable) accessors -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
+
+#include "IPDBLineNumber.h"
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolData : public PDBSymbol {
+public:
+  PDBSymbolData(const IPDBSession &PDBSession,
+                std::unique_ptr<IPDBRawSymbol> DataSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Data)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAccess)
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(getAddressTaken)
+  FORWARD_SYMBOL_METHOD(getBitPosition)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(isCompilerGenerated)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(getDataKind)
+  FORWARD_SYMBOL_METHOD(isAggregated)
+  FORWARD_SYMBOL_METHOD(isSplitted)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(getOffset)
+  FORWARD_SYMBOL_METHOD(getRegisterId)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getSlot)
+  FORWARD_SYMBOL_METHOD(getToken)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(getValue)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+
+  std::unique_ptr<IPDBEnumLineNumbers> getLineNumbers() const;
+  uint32_t getCompilandId() const;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolExe.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
new file mode 100644
index 0000000..2c2d746
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
@@ -0,0 +1,49 @@
+//===- PDBSymbolExe.h - Accessors for querying executables in a PDB ----*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolExe : public PDBSymbol {
+public:
+  PDBSymbolExe(const IPDBSession &PDBSession,
+               std::unique_ptr<IPDBRawSymbol> ExeSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Exe)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAge)
+  FORWARD_SYMBOL_METHOD(getGuid)
+  FORWARD_SYMBOL_METHOD(hasCTypes)
+  FORWARD_SYMBOL_METHOD(hasPrivateSymbols)
+  FORWARD_SYMBOL_METHOD(getMachineType)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(getSignature)
+  FORWARD_SYMBOL_METHOD(getSymbolsFileName)
+
+  uint32_t getPointerByteSize() const;
+
+private:
+  void dumpChildren(raw_ostream &OS, StringRef Label, PDB_SymType ChildType,
+                    int Indent) const;
+};
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
new file mode 100644
index 0000000..d6013e2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
@@ -0,0 +1,88 @@
+//===- PDBSymbolFunc.h - class representing a function instance -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
+
+#include "IPDBLineNumber.h"
+#include "PDBSymbol.h"
+#include "PDBSymbolTypeFunctionSig.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolFunc : public PDBSymbol {
+public:
+  PDBSymbolFunc(const IPDBSession &PDBSession,
+                std::unique_ptr<IPDBRawSymbol> FuncSymbol);
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  bool isDestructor() const;
+
+  std::unique_ptr<IPDBEnumChildren<PDBSymbolData>> getArguments() const;
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Function)
+
+  FORWARD_SYMBOL_METHOD(getAccess)
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(isCompilerGenerated)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+  FORWARD_SYMBOL_METHOD(hasFarReturn)
+  FORWARD_SYMBOL_METHOD(hasAlloca)
+  FORWARD_SYMBOL_METHOD(hasEH)
+  FORWARD_SYMBOL_METHOD(hasEHa)
+  FORWARD_SYMBOL_METHOD(hasInlAsm)
+  FORWARD_SYMBOL_METHOD(hasLongJump)
+  FORWARD_SYMBOL_METHOD(hasSEH)
+  FORWARD_SYMBOL_METHOD(hasSecurityChecks)
+  FORWARD_SYMBOL_METHOD(hasSetJump)
+  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+  FORWARD_SYMBOL_METHOD(isIntroVirtualFunction)
+  FORWARD_SYMBOL_METHOD(hasInlineAttribute)
+  FORWARD_SYMBOL_METHOD(isNaked)
+  FORWARD_SYMBOL_METHOD(isStatic)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocalBasePointerRegisterId)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(hasFramePointer)
+  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+  FORWARD_SYMBOL_METHOD(isUnreached)
+  FORWARD_SYMBOL_METHOD(getNoStackOrdering)
+  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+  FORWARD_SYMBOL_METHOD(isPureVirtual)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getToken)
+  FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(PDBSymbolTypeFunctionSig, getType,
+                                              getSignature)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(getUndecoratedName)
+  FORWARD_SYMBOL_METHOD(isVirtual)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+
+  std::unique_ptr<IPDBEnumLineNumbers> getLineNumbers() const;
+  uint32_t getCompilandId() const;
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
new file mode 100644
index 0000000..3341bd9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
@@ -0,0 +1,51 @@
+//===- PDBSymbolFuncDebugEnd.h - function end bounds info -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolFuncDebugEnd : public PDBSymbol {
+public:
+  PDBSymbolFuncDebugEnd(const IPDBSession &PDBSession,
+                        std::unique_ptr<IPDBRawSymbol> FuncDebugEndSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugEnd)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+  FORWARD_SYMBOL_METHOD(hasFarReturn)
+  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+  FORWARD_SYMBOL_METHOD(isStatic)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+  FORWARD_SYMBOL_METHOD(isUnreached)
+  FORWARD_SYMBOL_METHOD(getOffset)
+  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
new file mode 100644
index 0000000..6729838
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
@@ -0,0 +1,50 @@
+//===- PDBSymbolFuncDebugStart.h - function start bounds info ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolFuncDebugStart : public PDBSymbol {
+public:
+  PDBSymbolFuncDebugStart(const IPDBSession &PDBSession,
+                          std::unique_ptr<IPDBRawSymbol> FuncDebugStartSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugStart)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+  FORWARD_SYMBOL_METHOD(hasFarReturn)
+  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+  FORWARD_SYMBOL_METHOD(isStatic)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+  FORWARD_SYMBOL_METHOD(isUnreached)
+  FORWARD_SYMBOL_METHOD(getOffset)
+  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
new file mode 100644
index 0000000..c2b1c28
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
@@ -0,0 +1,50 @@
+//===- PDBSymbolLabel.h - label info ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolLabel : public PDBSymbol {
+public:
+  PDBSymbolLabel(const IPDBSession &PDBSession,
+                 std::unique_ptr<IPDBRawSymbol> LabelSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Label)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+  FORWARD_SYMBOL_METHOD(hasFarReturn)
+  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+  FORWARD_SYMBOL_METHOD(isUnreached)
+  FORWARD_SYMBOL_METHOD(getOffset)
+  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
new file mode 100644
index 0000000..c9e6ee6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
@@ -0,0 +1,48 @@
+//===- PDBSymbolPublicSymbol.h - public symbol info -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolPublicSymbol : public PDBSymbol {
+public:
+  PDBSymbolPublicSymbol(const IPDBSession &PDBSession,
+                        std::unique_ptr<IPDBRawSymbol> PublicSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PublicSymbol)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_METHOD(isCode)
+  FORWARD_SYMBOL_METHOD(isFunction)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getLocationType)
+  FORWARD_SYMBOL_METHOD(isManagedCode)
+  FORWARD_SYMBOL_METHOD(isMSILCode)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getUndecoratedName)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
new file mode 100644
index 0000000..614fad8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
@@ -0,0 +1,57 @@
+//===- PDBSymbolThunk.h - Support for querying PDB thunks ---------------*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolThunk : public PDBSymbol {
+public:
+  PDBSymbolThunk(const IPDBSession &PDBSession,
+                 std::unique_ptr<IPDBRawSymbol> ThunkSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Thunk)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAccess)
+  FORWARD_SYMBOL_METHOD(getAddressOffset)
+  FORWARD_SYMBOL_METHOD(getAddressSection)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(isIntroVirtualFunction)
+  FORWARD_SYMBOL_METHOD(isStatic)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(isPureVirtual)
+  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getTargetOffset)
+  FORWARD_SYMBOL_METHOD(getTargetRelativeVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getTargetVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getTargetSection)
+  FORWARD_SYMBOL_METHOD(getThunkOrdinal)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVirtual)
+  FORWARD_SYMBOL_METHOD(getVirtualAddress)
+  FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
new file mode 100644
index 0000000..39b7d3b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
@@ -0,0 +1,45 @@
+//===- PDBSymbolTypeArray.h - array type information ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeArray : public PDBSymbol {
+public:
+  PDBSymbolTypeArray(const IPDBSession &PDBSession,
+                     std::unique_ptr<IPDBRawSymbol> ArrayTypeSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ArrayType)
+
+  void dump(PDBSymDumper &Dumper) const override;
+  void dumpRight(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getArrayIndexType)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(getCount)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getRank)
+  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(getType, getElementType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
new file mode 100644
index 0000000..d607a3d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
@@ -0,0 +1,64 @@
+//===- PDBSymbolTypeBaseClass.h - base class type information ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+#include "llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h"
+#include "llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeBaseClass : public PDBSymbol {
+public:
+  PDBSymbolTypeBaseClass(const IPDBSession &PDBSession,
+                         std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BaseClass)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getAccess)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(hasConstructor)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+  FORWARD_SYMBOL_METHOD(hasCastOperator)
+  FORWARD_SYMBOL_METHOD(hasNestedTypes)
+  FORWARD_SYMBOL_METHOD(isIndirectVirtualBaseClass)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(isNested)
+  FORWARD_SYMBOL_METHOD(getOffset)
+  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+  FORWARD_SYMBOL_METHOD(isPacked)
+  FORWARD_SYMBOL_METHOD(isScoped)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+  FORWARD_SYMBOL_METHOD(getUdtKind)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+
+  FORWARD_SYMBOL_METHOD(isVirtualBaseClass)
+  FORWARD_SYMBOL_METHOD(getVirtualBaseDispIndex)
+  FORWARD_SYMBOL_METHOD(getVirtualBasePointerOffset)
+  // FORWARD_SYMBOL_METHOD(getVirtualBaseTableType)
+  FORWARD_SYMBOL_ID_METHOD(getVirtualTableShape)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
new file mode 100644
index 0000000..5b1863c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
@@ -0,0 +1,41 @@
+//===- PDBSymbolTypeBuiltin.h - builtin type information --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeBuiltin : public PDBSymbol {
+public:
+  PDBSymbolTypeBuiltin(const IPDBSession &PDBSession,
+                       std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BuiltinType)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getBuiltinType)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
new file mode 100644
index 0000000..199b3f8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolTypeCustom.h - custom compiler type information -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeCustom : public PDBSymbol {
+public:
+  PDBSymbolTypeCustom(const IPDBSession &PDBSession,
+                      std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CustomType)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getOemId)
+  FORWARD_SYMBOL_METHOD(getOemSymbolId)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
new file mode 100644
index 0000000..e635eb5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolTypeDimension.h - array dimension type info -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeDimension : public PDBSymbol {
+public:
+  PDBSymbolTypeDimension(const IPDBSession &PDBSession,
+                         std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Dimension)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getLowerBoundId)
+  FORWARD_SYMBOL_METHOD(getUpperBoundId)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
new file mode 100644
index 0000000..ddbe7e5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
@@ -0,0 +1,56 @@
+//===- PDBSymbolTypeEnum.h - enum type info ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
+
+#include "IPDBLineNumber.h"
+#include "PDBSymbol.h"
+#include "PDBSymbolTypeBuiltin.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeEnum : public PDBSymbol {
+public:
+  PDBSymbolTypeEnum(const IPDBSession &PDBSession,
+                    std::unique_ptr<IPDBRawSymbol> EnumTypeSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Enum)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getBuiltinType)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(hasConstructor)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+  FORWARD_SYMBOL_METHOD(hasCastOperator)
+  FORWARD_SYMBOL_METHOD(hasNestedTypes)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(getSrcLineOnTypeDefn)
+  FORWARD_SYMBOL_METHOD(isNested)
+  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+  FORWARD_SYMBOL_METHOD(isPacked)
+  FORWARD_SYMBOL_METHOD(isScoped)
+  FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(PDBSymbolTypeBuiltin, getType,
+                                              getUnderlyingType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
new file mode 100644
index 0000000..24c1312
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
@@ -0,0 +1,38 @@
+//===- PDBSymbolTypeFriend.h - friend type info -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeFriend : public PDBSymbol {
+public:
+  PDBSymbolTypeFriend(const IPDBSession &PDBSession,
+                      std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Friend)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
new file mode 100644
index 0000000..3855999
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
@@ -0,0 +1,38 @@
+//===- PDBSymbolTypeFunctionArg.h - function arg type info ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeFunctionArg : public PDBSymbol {
+public:
+  PDBSymbolTypeFunctionArg(const IPDBSession &PDBSession,
+                           std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionArg)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
new file mode 100644
index 0000000..abd4cf5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
@@ -0,0 +1,52 @@
+//===- PDBSymbolTypeFunctionSig.h - function signature type info *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeFunctionSig : public PDBSymbol {
+public:
+  PDBSymbolTypeFunctionSig(const IPDBSession &PDBSession,
+                           std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionSig)
+
+  std::unique_ptr<IPDBEnumSymbols> getArguments() const;
+
+  void dump(PDBSymDumper &Dumper) const override;
+  void dumpRight(PDBSymDumper &Dumper) const override;
+  void dumpArgList(raw_ostream &OS) const;
+
+  bool isCVarArgs() const;
+
+  FORWARD_SYMBOL_METHOD(getCallingConvention)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_ID_METHOD(getUnmodifiedType)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(getCount)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  // FORWARD_SYMBOL_METHOD(getObjectPointerType)
+  FORWARD_SYMBOL_METHOD(getThisAdjust)
+  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(getType, getReturnType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
new file mode 100644
index 0000000..31cf536
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
@@ -0,0 +1,36 @@
+//===- PDBSymbolTypeManaged.h - managed type info ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeManaged : public PDBSymbol {
+public:
+  PDBSymbolTypeManaged(const IPDBSession &PDBSession,
+                       std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ManagedType)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getName)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
new file mode 100644
index 0000000..7612eba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
@@ -0,0 +1,47 @@
+//===- PDBSymbolTypePointer.h - pointer type info ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypePointer : public PDBSymbol {
+public:
+  PDBSymbolTypePointer(const IPDBSession &PDBSession,
+                       std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PointerType)
+
+  void dump(PDBSymDumper &Dumper) const override;
+  void dumpRight(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(isReference)
+  FORWARD_SYMBOL_METHOD(isRValueReference)
+  FORWARD_SYMBOL_METHOD(isPointerToDataMember)
+  FORWARD_SYMBOL_METHOD(isPointerToMemberFunction)
+  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(getType, getPointeeType)
+  FORWARD_SYMBOL_METHOD(isRestrictedType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
new file mode 100644
index 0000000..16c1d1b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
@@ -0,0 +1,55 @@
+//===- PDBSymbolTypeTypedef.h - typedef type info ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeTypedef : public PDBSymbol {
+public:
+  PDBSymbolTypeTypedef(const IPDBSession &PDBSession,
+                       std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Typedef)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(getBuiltinType)
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(hasConstructor)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+  FORWARD_SYMBOL_METHOD(hasCastOperator)
+  FORWARD_SYMBOL_METHOD(hasNestedTypes)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(isNested)
+  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+  FORWARD_SYMBOL_METHOD(isPacked)
+  FORWARD_SYMBOL_METHOD(isReference)
+  FORWARD_SYMBOL_METHOD(isScoped)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+  FORWARD_SYMBOL_METHOD(getUdtKind)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_ID_METHOD(getVirtualTableShape)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
new file mode 100644
index 0000000..e259b6d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
@@ -0,0 +1,63 @@
+//===- PDBSymbolTypeUDT.h - UDT type info -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
+
+#include "IPDBLineNumber.h"
+#include "IPDBSession.h"
+#include "PDBSymbol.h"
+#include "PDBSymbolTypeBaseClass.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace pdb {
+
+class PDBSymbolTypeUDT : public PDBSymbol {
+public:
+  PDBSymbolTypeUDT(const IPDBSession &PDBSession,
+                   std::unique_ptr<IPDBRawSymbol> UDTSymbol);
+
+  std::unique_ptr<PDBSymbolTypeUDT> clone() const {
+    return getSession().getConcreteSymbolById<PDBSymbolTypeUDT>(
+        getSymIndexId());
+  }
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UDT)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_ID_METHOD(getUnmodifiedType)
+  FORWARD_SYMBOL_METHOD(hasConstructor)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+  FORWARD_SYMBOL_METHOD(hasCastOperator)
+  FORWARD_SYMBOL_METHOD(hasNestedTypes)
+  FORWARD_SYMBOL_METHOD(getLength)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+  FORWARD_SYMBOL_METHOD(getSrcLineOnTypeDefn)
+  FORWARD_SYMBOL_METHOD(isNested)
+  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+  FORWARD_SYMBOL_METHOD(isPacked)
+  FORWARD_SYMBOL_METHOD(isScoped)
+  FORWARD_SYMBOL_METHOD(getUdtKind)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_ID_METHOD(getVirtualTableShape)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+  FORWARD_SYMBOL_METHOD(getAccess)
+};
+}
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
new file mode 100644
index 0000000..e270c2b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
@@ -0,0 +1,42 @@
+//===- PDBSymbolTypeVTable.h - VTable type info -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeVTable : public PDBSymbol {
+public:
+  PDBSymbolTypeVTable(const IPDBSession &PDBSession,
+                      std::unique_ptr<IPDBRawSymbol> VtblSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTable)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getClassParent)
+  FORWARD_SYMBOL_METHOD(getOffset)
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_ID_METHOD(getType)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
new file mode 100644
index 0000000..8acaabe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
@@ -0,0 +1,40 @@
+//===- PDBSymbolTypeVTableShape.h - VTable shape info -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolTypeVTableShape : public PDBSymbol {
+public:
+  PDBSymbolTypeVTableShape(const IPDBSession &PDBSession,
+                           std::unique_ptr<IPDBRawSymbol> VtblShapeSymbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTableShape)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_METHOD(isConstType)
+  FORWARD_SYMBOL_METHOD(getCount)
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(isUnalignedType)
+  FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
new file mode 100644
index 0000000..de43e47
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
@@ -0,0 +1,36 @@
+//===- PDBSymbolUnknown.h - unknown symbol type -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
+
+#include "PDBSymbol.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolUnknown : public PDBSymbol {
+public:
+  PDBSymbolUnknown(const IPDBSession &PDBSession,
+                   std::unique_ptr<IPDBRawSymbol> UnknownSymbol);
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  static bool classof(const PDBSymbol *S) {
+    return (S->getSymTag() == PDB_SymType::None ||
+            S->getSymTag() >= PDB_SymType::Max);
+  }
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
new file mode 100644
index 0000000..70fbd5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolUsingNamespace.h - using namespace info ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+namespace pdb {
+
+class PDBSymbolUsingNamespace : public PDBSymbol {
+public:
+  PDBSymbolUsingNamespace(const IPDBSession &PDBSession,
+                          std::unique_ptr<IPDBRawSymbol> Symbol);
+
+  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UsingNamespace)
+
+  void dump(PDBSymDumper &Dumper) const override;
+
+  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+  FORWARD_SYMBOL_METHOD(getName)
+};
+
+} // namespace llvm
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBTypes.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBTypes.h
new file mode 100644
index 0000000..bc6233a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/PDBTypes.h
@@ -0,0 +1,408 @@
+//===- PDBTypes.h - Defines enums for various fields contained in PDB ----====//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBTYPES_H
+#define LLVM_DEBUGINFO_PDB_PDBTYPES_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include <cctype>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+
+namespace llvm {
+namespace pdb {
+
+class IPDBDataStream;
+class IPDBInjectedSource;
+class IPDBLineNumber;
+class IPDBSectionContrib;
+class IPDBSourceFile;
+class IPDBTable;
+class PDBSymDumper;
+class PDBSymbol;
+class PDBSymbolExe;
+class PDBSymbolCompiland;
+class PDBSymbolCompilandDetails;
+class PDBSymbolCompilandEnv;
+class PDBSymbolFunc;
+class PDBSymbolBlock;
+class PDBSymbolData;
+class PDBSymbolAnnotation;
+class PDBSymbolLabel;
+class PDBSymbolPublicSymbol;
+class PDBSymbolTypeUDT;
+class PDBSymbolTypeEnum;
+class PDBSymbolTypeFunctionSig;
+class PDBSymbolTypePointer;
+class PDBSymbolTypeArray;
+class PDBSymbolTypeBuiltin;
+class PDBSymbolTypeTypedef;
+class PDBSymbolTypeBaseClass;
+class PDBSymbolTypeFriend;
+class PDBSymbolTypeFunctionArg;
+class PDBSymbolFuncDebugStart;
+class PDBSymbolFuncDebugEnd;
+class PDBSymbolUsingNamespace;
+class PDBSymbolTypeVTableShape;
+class PDBSymbolTypeVTable;
+class PDBSymbolCustom;
+class PDBSymbolThunk;
+class PDBSymbolTypeCustom;
+class PDBSymbolTypeManaged;
+class PDBSymbolTypeDimension;
+class PDBSymbolUnknown;
+
+using IPDBEnumSymbols = IPDBEnumChildren<PDBSymbol>;
+using IPDBEnumSourceFiles = IPDBEnumChildren<IPDBSourceFile>;
+using IPDBEnumDataStreams = IPDBEnumChildren<IPDBDataStream>;
+using IPDBEnumLineNumbers = IPDBEnumChildren<IPDBLineNumber>;
+using IPDBEnumTables = IPDBEnumChildren<IPDBTable>;
+using IPDBEnumInjectedSources = IPDBEnumChildren<IPDBInjectedSource>;
+using IPDBEnumSectionContribs = IPDBEnumChildren<IPDBSectionContrib>;
+
+/// Specifies which PDB reader implementation is to be used.  Only a value
+/// of PDB_ReaderType::DIA is currently supported, but Native is in the works.
+enum class PDB_ReaderType {
+  DIA = 0,
+  Native = 1,
+};
+
+/// An enumeration indicating the type of data contained in this table.
+enum class PDB_TableType {
+  TableInvalid = 0,
+  Symbols,
+  SourceFiles,
+  LineNumbers,
+  SectionContribs,
+  Segments,
+  InjectedSources,
+  FrameData,
+  InputAssemblyFiles,
+  Dbg
+};
+
+/// Defines flags used for enumerating child symbols.  This corresponds to the
+/// NameSearchOptions enumeration which is documented here:
+/// https://msdn.microsoft.com/en-us/library/yat28ads.aspx
+enum PDB_NameSearchFlags {
+  NS_Default = 0x0,
+  NS_CaseSensitive = 0x1,
+  NS_CaseInsensitive = 0x2,
+  NS_FileNameExtMatch = 0x4,
+  NS_Regex = 0x8,
+  NS_UndecoratedName = 0x10,
+
+  // For backward compatibility.
+  NS_CaseInFileNameExt = NS_CaseInsensitive | NS_FileNameExtMatch,
+  NS_CaseRegex = NS_Regex | NS_CaseSensitive,
+  NS_CaseInRex = NS_Regex | NS_CaseInsensitive
+};
+
+/// Specifies the hash algorithm that a source file from a PDB was hashed with.
+/// This corresponds to the CV_SourceChksum_t enumeration and are documented
+/// here: https://msdn.microsoft.com/en-us/library/e96az21x.aspx
+enum class PDB_Checksum { None = 0, MD5 = 1, SHA1 = 2 };
+
+/// These values correspond to the CV_CPU_TYPE_e enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
+using PDB_Cpu = codeview::CPUType;
+
+enum class PDB_Machine {
+  Invalid = 0xffff,
+  Unknown = 0x0,
+  Am33 = 0x13,
+  Amd64 = 0x8664,
+  Arm = 0x1C0,
+  ArmNT = 0x1C4,
+  Ebc = 0xEBC,
+  x86 = 0x14C,
+  Ia64 = 0x200,
+  M32R = 0x9041,
+  Mips16 = 0x266,
+  MipsFpu = 0x366,
+  MipsFpu16 = 0x466,
+  PowerPC = 0x1F0,
+  PowerPCFP = 0x1F1,
+  R4000 = 0x166,
+  SH3 = 0x1A2,
+  SH3DSP = 0x1A3,
+  SH4 = 0x1A6,
+  SH5 = 0x1A8,
+  Thumb = 0x1C2,
+  WceMipsV2 = 0x169
+};
+
+enum class PDB_SourceCompression {
+  None,
+  RunLengthEncoded,
+  Huffman,
+  LZ,
+};
+
+/// These values correspond to the CV_call_e enumeration, and are documented
+/// at the following locations:
+///   https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
+///   https://msdn.microsoft.com/en-us/library/windows/desktop/ms680207(v=vs.85).aspx
+using PDB_CallingConv = codeview::CallingConvention;
+
+/// These values correspond to the CV_CFL_LANG enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/bw3aekw6.aspx
+using PDB_Lang = codeview::SourceLanguage;
+
+/// These values correspond to the DataKind enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/b2x2t313.aspx
+enum class PDB_DataKind {
+  Unknown,
+  Local,
+  StaticLocal,
+  Param,
+  ObjectPtr,
+  FileStatic,
+  Global,
+  Member,
+  StaticMember,
+  Constant
+};
+
+/// These values correspond to the SymTagEnum enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/bkedss5f.aspx
+enum class PDB_SymType {
+  None,
+  Exe,
+  Compiland,
+  CompilandDetails,
+  CompilandEnv,
+  Function,
+  Block,
+  Data,
+  Annotation,
+  Label,
+  PublicSymbol,
+  UDT,
+  Enum,
+  FunctionSig,
+  PointerType,
+  ArrayType,
+  BuiltinType,
+  Typedef,
+  BaseClass,
+  Friend,
+  FunctionArg,
+  FuncDebugStart,
+  FuncDebugEnd,
+  UsingNamespace,
+  VTableShape,
+  VTable,
+  Custom,
+  Thunk,
+  CustomType,
+  ManagedType,
+  Dimension,
+  Max
+};
+
+/// These values correspond to the LocationType enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/f57kaez3.aspx
+enum class PDB_LocType {
+  Null,
+  Static,
+  TLS,
+  RegRel,
+  ThisRel,
+  Enregistered,
+  BitField,
+  Slot,
+  IlRel,
+  MetaData,
+  Constant,
+  Max
+};
+
+/// These values correspond to the UdtKind enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/wcstk66t.aspx
+enum class PDB_UdtType { Struct, Class, Union, Interface };
+
+/// These values correspond to the StackFrameTypeEnum enumeration, and are
+/// documented here: https://msdn.microsoft.com/en-us/library/bc5207xw.aspx.
+enum class PDB_StackFrameType { FPO, KernelTrap, KernelTSS, EBP, FrameData };
+
+/// These values correspond to the StackFrameTypeEnum enumeration, and are
+/// documented here: https://msdn.microsoft.com/en-us/library/bc5207xw.aspx.
+enum class PDB_MemoryType { Code, Data, Stack, HeapCode };
+
+/// These values correspond to the Basictype enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/4szdtzc3.aspx
+enum class PDB_BuiltinType {
+  None = 0,
+  Void = 1,
+  Char = 2,
+  WCharT = 3,
+  Int = 6,
+  UInt = 7,
+  Float = 8,
+  BCD = 9,
+  Bool = 10,
+  Long = 13,
+  ULong = 14,
+  Currency = 25,
+  Date = 26,
+  Variant = 27,
+  Complex = 28,
+  Bitfield = 29,
+  BSTR = 30,
+  HResult = 31,
+  Char16 = 32,
+  Char32 = 33
+};
+
+/// These values correspond to the flags that can be combined to control the
+/// return of an undecorated name for a C++ decorated name, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/kszfk0fs.aspx
+enum PDB_UndnameFlags: uint32_t {
+  Undname_Complete = 0x0,
+  Undname_NoLeadingUnderscores = 0x1,
+  Undname_NoMsKeywords = 0x2,
+  Undname_NoFuncReturns = 0x4,
+  Undname_NoAllocModel = 0x8,
+  Undname_NoAllocLang = 0x10,
+  Undname_Reserved1 = 0x20,
+  Undname_Reserved2 = 0x40,
+  Undname_NoThisType = 0x60,
+  Undname_NoAccessSpec = 0x80,
+  Undname_NoThrowSig = 0x100,
+  Undname_NoMemberType = 0x200,
+  Undname_NoReturnUDTModel = 0x400,
+  Undname_32BitDecode = 0x800,
+  Undname_NameOnly = 0x1000,
+  Undname_TypeOnly = 0x2000,
+  Undname_HaveParams = 0x4000,
+  Undname_NoECSU = 0x8000,
+  Undname_NoIdentCharCheck = 0x10000,
+  Undname_NoPTR64 = 0x20000
+};
+
+enum class PDB_MemberAccess { Private = 1, Protected = 2, Public = 3 };
+
+struct VersionInfo {
+  uint32_t Major;
+  uint32_t Minor;
+  uint32_t Build;
+  uint32_t QFE;
+};
+
+enum PDB_VariantType {
+  Empty,
+  Unknown,
+  Int8,
+  Int16,
+  Int32,
+  Int64,
+  Single,
+  Double,
+  UInt8,
+  UInt16,
+  UInt32,
+  UInt64,
+  Bool,
+  String
+};
+
+struct Variant {
+  Variant() = default;
+
+  Variant(const Variant &Other) {
+    *this = Other;
+  }
+
+  ~Variant() {
+    if (Type == PDB_VariantType::String)
+      delete[] Value.String;
+  }
+
+  PDB_VariantType Type = PDB_VariantType::Empty;
+  union {
+    bool Bool;
+    int8_t Int8;
+    int16_t Int16;
+    int32_t Int32;
+    int64_t Int64;
+    float Single;
+    double Double;
+    uint8_t UInt8;
+    uint16_t UInt16;
+    uint32_t UInt32;
+    uint64_t UInt64;
+    char *String;
+  } Value;
+
+#define VARIANT_EQUAL_CASE(Enum)                                               \
+  case PDB_VariantType::Enum:                                                  \
+    return Value.Enum == Other.Value.Enum;
+
+  bool operator==(const Variant &Other) const {
+    if (Type != Other.Type)
+      return false;
+    switch (Type) {
+      VARIANT_EQUAL_CASE(Bool)
+      VARIANT_EQUAL_CASE(Int8)
+      VARIANT_EQUAL_CASE(Int16)
+      VARIANT_EQUAL_CASE(Int32)
+      VARIANT_EQUAL_CASE(Int64)
+      VARIANT_EQUAL_CASE(Single)
+      VARIANT_EQUAL_CASE(Double)
+      VARIANT_EQUAL_CASE(UInt8)
+      VARIANT_EQUAL_CASE(UInt16)
+      VARIANT_EQUAL_CASE(UInt32)
+      VARIANT_EQUAL_CASE(UInt64)
+      VARIANT_EQUAL_CASE(String)
+    default:
+      return true;
+    }
+  }
+
+#undef VARIANT_EQUAL_CASE
+
+  bool operator!=(const Variant &Other) const { return !(*this == Other); }
+  Variant &operator=(const Variant &Other) {
+    if (this == &Other)
+      return *this;
+    if (Type == PDB_VariantType::String)
+      delete[] Value.String;
+    Type = Other.Type;
+    Value = Other.Value;
+    if (Other.Type == PDB_VariantType::String &&
+        Other.Value.String != nullptr) {
+      Value.String = new char[strlen(Other.Value.String) + 1];
+      ::strcpy(Value.String, Other.Value.String);
+    }
+    return *this;
+  }
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+namespace std {
+
+template <> struct hash<llvm::pdb::PDB_SymType> {
+  using argument_type = llvm::pdb::PDB_SymType;
+  using result_type = std::size_t;
+
+  result_type operator()(const argument_type &Arg) const {
+    return std::hash<int>()(static_cast<int>(Arg));
+  }
+};
+
+} // end namespace std
+
+#endif // LLVM_DEBUGINFO_PDB_PDBTYPES_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/PDB/UDTLayout.h b/linux-x64/clang/include/llvm/DebugInfo/PDB/UDTLayout.h
new file mode 100644
index 0000000..c4234c1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/PDB/UDTLayout.h
@@ -0,0 +1,182 @@
+//===- UDTLayout.h - UDT layout info ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_UDTLAYOUT_H
+#define LLVM_DEBUGINFO_PDB_UDTLAYOUT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/PDB/PDBSymbol.h"
+#include "llvm/DebugInfo/PDB/PDBSymbolData.h"
+#include "llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h"
+#include "llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h"
+#include "llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h"
+#include "llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+
+class BaseClassLayout;
+class ClassLayout;
+class UDTLayoutBase;
+
+class LayoutItemBase {
+public:
+  LayoutItemBase(const UDTLayoutBase *Parent, const PDBSymbol *Symbol,
+                 const std::string &Name, uint32_t OffsetInParent,
+                 uint32_t Size, bool IsElided);
+  virtual ~LayoutItemBase() = default;
+
+  uint32_t deepPaddingSize() const;
+  virtual uint32_t immediatePadding() const { return 0; }
+  virtual uint32_t tailPadding() const;
+
+  const UDTLayoutBase *getParent() const { return Parent; }
+  StringRef getName() const { return Name; }
+  uint32_t getOffsetInParent() const { return OffsetInParent; }
+  uint32_t getSize() const { return SizeOf; }
+  uint32_t getLayoutSize() const { return LayoutSize; }
+  const PDBSymbol *getSymbol() const { return Symbol; }
+  const BitVector &usedBytes() const { return UsedBytes; }
+  bool isElided() const { return IsElided; }
+  virtual bool isVBPtr() const { return false; }
+
+  uint32_t containsOffset(uint32_t Off) const {
+    uint32_t Begin = getOffsetInParent();
+    uint32_t End = Begin + getSize();
+    return (Off >= Begin && Off < End);
+  }
+
+protected:
+  const PDBSymbol *Symbol = nullptr;
+  const UDTLayoutBase *Parent = nullptr;
+  BitVector UsedBytes;
+  std::string Name;
+  uint32_t OffsetInParent = 0;
+  uint32_t SizeOf = 0;
+  uint32_t LayoutSize = 0;
+  bool IsElided = false;
+};
+
+class VBPtrLayoutItem : public LayoutItemBase {
+public:
+  VBPtrLayoutItem(const UDTLayoutBase &Parent,
+                  std::unique_ptr<PDBSymbolTypeBuiltin> Sym, uint32_t Offset,
+                  uint32_t Size);
+
+  bool isVBPtr() const override { return true; }
+
+private:
+  std::unique_ptr<PDBSymbolTypeBuiltin> Type;
+};
+
+class DataMemberLayoutItem : public LayoutItemBase {
+public:
+  DataMemberLayoutItem(const UDTLayoutBase &Parent,
+                       std::unique_ptr<PDBSymbolData> DataMember);
+
+  const PDBSymbolData &getDataMember();
+  bool hasUDTLayout() const;
+  const ClassLayout &getUDTLayout() const;
+
+private:
+  std::unique_ptr<PDBSymbolData> DataMember;
+  std::unique_ptr<ClassLayout> UdtLayout;
+};
+
+class VTableLayoutItem : public LayoutItemBase {
+public:
+  VTableLayoutItem(const UDTLayoutBase &Parent,
+                   std::unique_ptr<PDBSymbolTypeVTable> VTable);
+
+  uint32_t getElementSize() const { return ElementSize; }
+
+private:
+  uint32_t ElementSize = 0;
+  std::unique_ptr<PDBSymbolTypeVTable> VTable;
+};
+
+class UDTLayoutBase : public LayoutItemBase {
+  template <typename T> using UniquePtrVector = std::vector<std::unique_ptr<T>>;
+
+public:
+  UDTLayoutBase(const UDTLayoutBase *Parent, const PDBSymbol &Sym,
+                const std::string &Name, uint32_t OffsetInParent, uint32_t Size,
+                bool IsElided);
+
+  uint32_t tailPadding() const override;
+  ArrayRef<LayoutItemBase *> layout_items() const { return LayoutItems; }
+  ArrayRef<BaseClassLayout *> bases() const { return AllBases; }
+  ArrayRef<BaseClassLayout *> regular_bases() const { return NonVirtualBases; }
+  ArrayRef<BaseClassLayout *> virtual_bases() const { return VirtualBases; }
+  uint32_t directVirtualBaseCount() const { return DirectVBaseCount; }
+  ArrayRef<std::unique_ptr<PDBSymbolFunc>> funcs() const { return Funcs; }
+  ArrayRef<std::unique_ptr<PDBSymbol>> other_items() const { return Other; }
+
+protected:
+  bool hasVBPtrAtOffset(uint32_t Off) const;
+  void initializeChildren(const PDBSymbol &Sym);
+
+  void addChildToLayout(std::unique_ptr<LayoutItemBase> Child);
+
+  uint32_t DirectVBaseCount = 0;
+
+  UniquePtrVector<PDBSymbol> Other;
+  UniquePtrVector<PDBSymbolFunc> Funcs;
+  UniquePtrVector<LayoutItemBase> ChildStorage;
+  std::vector<LayoutItemBase *> LayoutItems;
+
+  std::vector<BaseClassLayout *> AllBases;
+  ArrayRef<BaseClassLayout *> NonVirtualBases;
+  ArrayRef<BaseClassLayout *> VirtualBases;
+
+  VTableLayoutItem *VTable = nullptr;
+  VBPtrLayoutItem *VBPtr = nullptr;
+};
+
+class BaseClassLayout : public UDTLayoutBase {
+public:
+  BaseClassLayout(const UDTLayoutBase &Parent, uint32_t OffsetInParent,
+                  bool Elide, std::unique_ptr<PDBSymbolTypeBaseClass> Base);
+
+  const PDBSymbolTypeBaseClass &getBase() const { return *Base; }
+  bool isVirtualBase() const { return IsVirtualBase; }
+  bool isEmptyBase() { return SizeOf == 1 && LayoutSize == 0; }
+
+private:
+  std::unique_ptr<PDBSymbolTypeBaseClass> Base;
+  bool IsVirtualBase;
+};
+
+class ClassLayout : public UDTLayoutBase {
+public:
+  explicit ClassLayout(const PDBSymbolTypeUDT &UDT);
+  explicit ClassLayout(std::unique_ptr<PDBSymbolTypeUDT> UDT);
+
+  ClassLayout(ClassLayout &&Other) = default;
+
+  const PDBSymbolTypeUDT &getClass() const { return UDT; }
+  uint32_t immediatePadding() const override;
+
+private:
+  BitVector ImmediateUsedBytes;
+  std::unique_ptr<PDBSymbolTypeUDT> OwnedStorage;
+  const PDBSymbolTypeUDT &UDT;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_UDTLAYOUT_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/Symbolize/DIPrinter.h b/linux-x64/clang/include/llvm/DebugInfo/Symbolize/DIPrinter.h
new file mode 100644
index 0000000..ab82be3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/Symbolize/DIPrinter.h
@@ -0,0 +1,53 @@
+//===- llvm/DebugInfo/Symbolize/DIPrinter.h ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the DIPrinter class, which is responsible for printing
+// structures defined in DebugInfo/DIContext.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_SYMBOLIZE_DIPRINTER_H
+#define LLVM_DEBUGINFO_SYMBOLIZE_DIPRINTER_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+struct DILineInfo;
+class DIInliningInfo;
+struct DIGlobal;
+
+namespace symbolize {
+
+class DIPrinter {
+  raw_ostream &OS;
+  bool PrintFunctionNames;
+  bool PrintPretty;
+  int PrintSourceContext;
+  bool Verbose;
+
+  void print(const DILineInfo &Info, bool Inlined);
+  void printContext(const std::string &FileName, int64_t Line);
+
+public:
+  DIPrinter(raw_ostream &OS, bool PrintFunctionNames = true,
+            bool PrintPretty = false, int PrintSourceContext = 0,
+            bool Verbose = false)
+      : OS(OS), PrintFunctionNames(PrintFunctionNames),
+        PrintPretty(PrintPretty), PrintSourceContext(PrintSourceContext),
+        Verbose(Verbose) {}
+
+  DIPrinter &operator<<(const DILineInfo &Info);
+  DIPrinter &operator<<(const DIInliningInfo &Info);
+  DIPrinter &operator<<(const DIGlobal &Global);
+};
+}
+}
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h b/linux-x64/clang/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h
new file mode 100644
index 0000000..e576a91
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h
@@ -0,0 +1,47 @@
+//===- SymbolizableModule.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SymbolizableModule interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
+#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include <cstdint>
+
+namespace llvm {
+namespace symbolize {
+
+using FunctionNameKind = DILineInfoSpecifier::FunctionNameKind;
+
+class SymbolizableModule {
+public:
+  virtual ~SymbolizableModule() = default;
+
+  virtual DILineInfo symbolizeCode(uint64_t ModuleOffset,
+                                   FunctionNameKind FNKind,
+                                   bool UseSymbolTable) const = 0;
+  virtual DIInliningInfo symbolizeInlinedCode(uint64_t ModuleOffset,
+                                              FunctionNameKind FNKind,
+                                              bool UseSymbolTable) const = 0;
+  virtual DIGlobal symbolizeData(uint64_t ModuleOffset) const = 0;
+
+  // Return true if this is a 32-bit x86 PE COFF module.
+  virtual bool isWin32Module() const = 0;
+
+  // Returns the preferred base of the module, i.e. where the loader would place
+  // it in memory assuming there were no conflicts.
+  virtual uint64_t getModulePreferredBase() const = 0;
+};
+
+} // end namespace symbolize
+} // end namespace llvm
+
+#endif  // LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
diff --git a/linux-x64/clang/include/llvm/DebugInfo/Symbolize/Symbolize.h b/linux-x64/clang/include/llvm/DebugInfo/Symbolize/Symbolize.h
new file mode 100644
index 0000000..6480aef
--- /dev/null
+++ b/linux-x64/clang/include/llvm/DebugInfo/Symbolize/Symbolize.h
@@ -0,0 +1,123 @@
+//===- Symbolize.h ----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Header for LLVM symbolization library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
+#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
+
+#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+namespace symbolize {
+
+using namespace object;
+
+using FunctionNameKind = DILineInfoSpecifier::FunctionNameKind;
+
+class LLVMSymbolizer {
+public:
+  struct Options {
+    FunctionNameKind PrintFunctions;
+    bool UseSymbolTable : 1;
+    bool Demangle : 1;
+    bool RelativeAddresses : 1;
+    std::string DefaultArch;
+    std::vector<std::string> DsymHints;
+
+    Options(FunctionNameKind PrintFunctions = FunctionNameKind::LinkageName,
+            bool UseSymbolTable = true, bool Demangle = true,
+            bool RelativeAddresses = false, std::string DefaultArch = "")
+        : PrintFunctions(PrintFunctions), UseSymbolTable(UseSymbolTable),
+          Demangle(Demangle), RelativeAddresses(RelativeAddresses),
+          DefaultArch(std::move(DefaultArch)) {}
+  };
+
+  LLVMSymbolizer(const Options &Opts = Options()) : Opts(Opts) {}
+
+  ~LLVMSymbolizer() {
+    flush();
+  }
+
+  Expected<DILineInfo> symbolizeCode(const std::string &ModuleName,
+                                     uint64_t ModuleOffset,
+                                     StringRef DWPName = "");
+  Expected<DIInliningInfo> symbolizeInlinedCode(const std::string &ModuleName,
+                                                uint64_t ModuleOffset,
+                                                StringRef DWPName = "");
+  Expected<DIGlobal> symbolizeData(const std::string &ModuleName,
+                                   uint64_t ModuleOffset);
+  void flush();
+
+  static std::string
+  DemangleName(const std::string &Name,
+               const SymbolizableModule *DbiModuleDescriptor);
+
+private:
+  // Bundles together object file with code/data and object file with
+  // corresponding debug info. These objects can be the same.
+  using ObjectPair = std::pair<ObjectFile *, ObjectFile *>;
+
+  /// Returns a SymbolizableModule or an error if loading debug info failed.
+  /// Only one attempt is made to load a module, and errors during loading are
+  /// only reported once. Subsequent calls to get module info for a module that
+  /// failed to load will return nullptr.
+  Expected<SymbolizableModule *>
+  getOrCreateModuleInfo(const std::string &ModuleName, StringRef DWPName = "");
+
+  ObjectFile *lookUpDsymFile(const std::string &Path,
+                             const MachOObjectFile *ExeObj,
+                             const std::string &ArchName);
+  ObjectFile *lookUpDebuglinkObject(const std::string &Path,
+                                    const ObjectFile *Obj,
+                                    const std::string &ArchName);
+
+  /// \brief Returns pair of pointers to object and debug object.
+  Expected<ObjectPair> getOrCreateObjectPair(const std::string &Path,
+                                            const std::string &ArchName);
+
+  /// \brief Return a pointer to object file at specified path, for a specified
+  /// architecture (e.g. if path refers to a Mach-O universal binary, only one
+  /// object file from it will be returned).
+  Expected<ObjectFile *> getOrCreateObject(const std::string &Path,
+                                          const std::string &ArchName);
+
+  std::map<std::string, std::unique_ptr<SymbolizableModule>> Modules;
+
+  /// \brief Contains cached results of getOrCreateObjectPair().
+  std::map<std::pair<std::string, std::string>, ObjectPair>
+      ObjectPairForPathArch;
+
+  /// \brief Contains parsed binary for each path, or parsing error.
+  std::map<std::string, OwningBinary<Binary>> BinaryForPath;
+
+  /// \brief Parsed object file for path/architecture pair, where "path" refers
+  /// to Mach-O universal binary.
+  std::map<std::pair<std::string, std::string>, std::unique_ptr<ObjectFile>>
+      ObjectForUBPathAndArch;
+
+  Options Opts;
+};
+
+} // end namespace symbolize
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
diff --git a/linux-x64/clang/include/llvm/Demangle/Compiler.h b/linux-x64/clang/include/llvm/Demangle/Compiler.h
new file mode 100644
index 0000000..c996f9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Demangle/Compiler.h
@@ -0,0 +1,506 @@
+//===-- llvm/Demangle/Compiler.h - Compiler abstraction support -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several macros, based on the current compiler.  This allows
+// use of compiler-specific features in a way that remains portable.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COMPILER_H
+#define LLVM_SUPPORT_COMPILER_H
+
+#include "llvm/Config/llvm-config.h"
+
+#if defined(_MSC_VER)
+#include <sal.h>
+#endif
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#ifndef __has_extension
+# define __has_extension(x) 0
+#endif
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+#ifndef __has_cpp_attribute
+# define __has_cpp_attribute(x) 0
+#endif
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/// \macro LLVM_GNUC_PREREQ
+/// \brief Extend the default __GNUC_PREREQ even if glibc's features.h isn't
+/// available.
+#ifndef LLVM_GNUC_PREREQ
+# if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#  define LLVM_GNUC_PREREQ(maj, min, patch) \
+    ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
+     ((maj) << 20) + ((min) << 10) + (patch))
+# elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#  define LLVM_GNUC_PREREQ(maj, min, patch) \
+    ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
+# else
+#  define LLVM_GNUC_PREREQ(maj, min, patch) 0
+# endif
+#endif
+
+/// \macro LLVM_MSC_PREREQ
+/// \brief Is the compiler MSVC of at least the specified version?
+/// The common \param version values to check for are:
+///  * 1900: Microsoft Visual Studio 2015 / 14.0
+#ifdef _MSC_VER
+#define LLVM_MSC_PREREQ(version) (_MSC_VER >= (version))
+
+// We require at least MSVC 2015.
+#if !LLVM_MSC_PREREQ(1900)
+#error LLVM requires at least MSVC 2015.
+#endif
+
+#else
+#define LLVM_MSC_PREREQ(version) 0
+#endif
+
+/// \brief Does the compiler support ref-qualifiers for *this?
+///
+/// Sadly, this is separate from just rvalue reference support because GCC
+/// and MSVC implemented this later than everything else.
+#if __has_feature(cxx_rvalue_references) || LLVM_GNUC_PREREQ(4, 8, 1)
+#define LLVM_HAS_RVALUE_REFERENCE_THIS 1
+#else
+#define LLVM_HAS_RVALUE_REFERENCE_THIS 0
+#endif
+
+/// Expands to '&' if ref-qualifiers for *this are supported.
+///
+/// This can be used to provide lvalue/rvalue overrides of member functions.
+/// The rvalue override should be guarded by LLVM_HAS_RVALUE_REFERENCE_THIS
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+#define LLVM_LVALUE_FUNCTION &
+#else
+#define LLVM_LVALUE_FUNCTION
+#endif
+
+/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
+/// into a shared library, then the class should be private to the library and
+/// not accessible from outside it.  Can also be used to mark variables and
+/// functions, making them private to any shared library they are linked into.
+/// On PE/COFF targets, library visibility is the default, so this isn't needed.
+#if (__has_attribute(visibility) || LLVM_GNUC_PREREQ(4, 0, 0)) &&              \
+    !defined(__MINGW32__) && !defined(__CYGWIN__) && !defined(LLVM_ON_WIN32)
+#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
+#else
+#define LLVM_LIBRARY_VISIBILITY
+#endif
+
+#if defined(__GNUC__)
+#define LLVM_PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
+#else
+#define LLVM_PREFETCH(addr, rw, locality)
+#endif
+
+#if __has_attribute(used) || LLVM_GNUC_PREREQ(3, 1, 0)
+#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
+#else
+#define LLVM_ATTRIBUTE_USED
+#endif
+
+/// LLVM_NODISCARD - Warn if a type or return value is discarded.
+#if __cplusplus > 201402L && __has_cpp_attribute(nodiscard)
+#define LLVM_NODISCARD [[nodiscard]]
+#elif !__cplusplus
+// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
+// error when __has_cpp_attribute is given a scoped attribute in C mode.
+#define LLVM_NODISCARD
+#elif __has_cpp_attribute(clang::warn_unused_result)
+#define LLVM_NODISCARD [[clang::warn_unused_result]]
+#else
+#define LLVM_NODISCARD
+#endif
+
+// Some compilers warn about unused functions. When a function is sometimes
+// used or not depending on build settings (e.g. a function only called from
+// within "assert"), this attribute can be used to suppress such warnings.
+//
+// However, it shouldn't be used for unused *variables*, as those have a much
+// more portable solution:
+//   (void)unused_var_name;
+// Prefer cast-to-void wherever it is sufficient.
+#if __has_attribute(unused) || LLVM_GNUC_PREREQ(3, 1, 0)
+#define LLVM_ATTRIBUTE_UNUSED __attribute__((__unused__))
+#else
+#define LLVM_ATTRIBUTE_UNUSED
+#endif
+
+// FIXME: Provide this for PE/COFF targets.
+#if (__has_attribute(weak) || LLVM_GNUC_PREREQ(4, 0, 0)) &&                    \
+    (!defined(__MINGW32__) && !defined(__CYGWIN__) && !defined(LLVM_ON_WIN32))
+#define LLVM_ATTRIBUTE_WEAK __attribute__((__weak__))
+#else
+#define LLVM_ATTRIBUTE_WEAK
+#endif
+
+// Prior to clang 3.2, clang did not accept any spelling of
+// __has_attribute(const), so assume it is supported.
+#if defined(__clang__) || defined(__GNUC__)
+// aka 'CONST' but following LLVM Conventions.
+#define LLVM_READNONE __attribute__((__const__))
+#else
+#define LLVM_READNONE
+#endif
+
+#if __has_attribute(pure) || defined(__GNUC__)
+// aka 'PURE' but following LLVM Conventions.
+#define LLVM_READONLY __attribute__((__pure__))
+#else
+#define LLVM_READONLY
+#endif
+
+#if __has_builtin(__builtin_expect) || LLVM_GNUC_PREREQ(4, 0, 0)
+#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
+#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
+#else
+#define LLVM_LIKELY(EXPR) (EXPR)
+#define LLVM_UNLIKELY(EXPR) (EXPR)
+#endif
+
+/// LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so,
+/// mark a method "not for inlining".
+#if __has_attribute(noinline) || LLVM_GNUC_PREREQ(3, 4, 0)
+#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
+#else
+#define LLVM_ATTRIBUTE_NOINLINE
+#endif
+
+/// LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do
+/// so, mark a method "always inline" because it is performance sensitive. GCC
+/// 3.4 supported this but is buggy in various cases and produces unimplemented
+/// errors, just use it in GCC 4.0 and later.
+#if __has_attribute(always_inline) || LLVM_GNUC_PREREQ(4, 0, 0)
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
+#else
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+#ifdef __GNUC__
+#define LLVM_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_NORETURN __declspec(noreturn)
+#else
+#define LLVM_ATTRIBUTE_NORETURN
+#endif
+
+#if __has_attribute(returns_nonnull) || LLVM_GNUC_PREREQ(4, 9, 0)
+#define LLVM_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_RETURNS_NONNULL _Ret_notnull_
+#else
+#define LLVM_ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+/// \macro LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a
+/// pointer that does not alias any other valid pointer.
+#ifdef __GNUC__
+#define LLVM_ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
+#else
+#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
+#endif
+
+/// LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
+#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
+#define LLVM_FALLTHROUGH [[fallthrough]]
+#elif __has_cpp_attribute(gnu::fallthrough)
+#define LLVM_FALLTHROUGH [[gnu::fallthrough]]
+#elif !__cplusplus
+// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
+// error when __has_cpp_attribute is given a scoped attribute in C mode.
+#define LLVM_FALLTHROUGH
+#elif __has_cpp_attribute(clang::fallthrough)
+#define LLVM_FALLTHROUGH [[clang::fallthrough]]
+#else
+#define LLVM_FALLTHROUGH
+#endif
+
+/// LLVM_EXTENSION - Support compilers where we have a keyword to suppress
+/// pedantic diagnostics.
+#ifdef __GNUC__
+#define LLVM_EXTENSION __extension__
+#else
+#define LLVM_EXTENSION
+#endif
+
+// LLVM_ATTRIBUTE_DEPRECATED(decl, "message")
+#if __has_feature(attribute_deprecated_with_message)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+  decl __attribute__((deprecated(message)))
+#elif defined(__GNUC__)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+  decl __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+  __declspec(deprecated(message)) decl
+#else
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+  decl
+#endif
+
+/// LLVM_BUILTIN_UNREACHABLE - On compilers which support it, expands
+/// to an expression which states that it is undefined behavior for the
+/// compiler to reach this point.  Otherwise is not defined.
+#if __has_builtin(__builtin_unreachable) || LLVM_GNUC_PREREQ(4, 5, 0)
+# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define LLVM_BUILTIN_UNREACHABLE __assume(false)
+#endif
+
+/// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression
+/// which causes the program to exit abnormally.
+#if __has_builtin(__builtin_trap) || LLVM_GNUC_PREREQ(4, 3, 0)
+# define LLVM_BUILTIN_TRAP __builtin_trap()
+#elif defined(_MSC_VER)
+// The __debugbreak intrinsic is supported by MSVC, does not require forward
+// declarations involving platform-specific typedefs (unlike RaiseException),
+// results in a call to vectored exception handlers, and encodes to a short
+// instruction that still causes the trapping behavior we want.
+# define LLVM_BUILTIN_TRAP __debugbreak()
+#else
+# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0
+#endif
+
+/// LLVM_BUILTIN_DEBUGTRAP - On compilers which support it, expands to
+/// an expression which causes the program to break while running
+/// under a debugger.
+#if __has_builtin(__builtin_debugtrap)
+# define LLVM_BUILTIN_DEBUGTRAP __builtin_debugtrap()
+#elif defined(_MSC_VER)
+// The __debugbreak intrinsic is supported by MSVC and breaks while
+// running under the debugger, and also supports invoking a debugger
+// when the OS is configured appropriately.
+# define LLVM_BUILTIN_DEBUGTRAP __debugbreak()
+#else
+// Just continue execution when built with compilers that have no
+// support. This is a debugging aid and not intended to force the
+// program to abort if encountered.
+# define LLVM_BUILTIN_DEBUGTRAP
+#endif
+
+/// \macro LLVM_ASSUME_ALIGNED
+/// \brief Returns a pointer with an assumed alignment.
+#if __has_builtin(__builtin_assume_aligned) || LLVM_GNUC_PREREQ(4, 7, 0)
+# define LLVM_ASSUME_ALIGNED(p, a) __builtin_assume_aligned(p, a)
+#elif defined(LLVM_BUILTIN_UNREACHABLE)
+// As of today, clang does not support __builtin_assume_aligned.
+# define LLVM_ASSUME_ALIGNED(p, a) \
+           (((uintptr_t(p) % (a)) == 0) ? (p) : (LLVM_BUILTIN_UNREACHABLE, (p)))
+#else
+# define LLVM_ASSUME_ALIGNED(p, a) (p)
+#endif
+
+/// \macro LLVM_ALIGNAS
+/// \brief Used to specify a minimum alignment for a structure or variable.
+#if __GNUC__ && !__has_feature(cxx_alignas) && !LLVM_GNUC_PREREQ(4, 8, 1)
+# define LLVM_ALIGNAS(x) __attribute__((aligned(x)))
+#else
+# define LLVM_ALIGNAS(x) alignas(x)
+#endif
+
+/// \macro LLVM_PACKED
+/// \brief Used to specify a packed structure.
+/// LLVM_PACKED(
+///    struct A {
+///      int i;
+///      int j;
+///      int k;
+///      long long l;
+///   });
+///
+/// LLVM_PACKED_START
+/// struct B {
+///   int i;
+///   int j;
+///   int k;
+///   long long l;
+/// };
+/// LLVM_PACKED_END
+#ifdef _MSC_VER
+# define LLVM_PACKED(d) __pragma(pack(push, 1)) d __pragma(pack(pop))
+# define LLVM_PACKED_START __pragma(pack(push, 1))
+# define LLVM_PACKED_END   __pragma(pack(pop))
+#else
+# define LLVM_PACKED(d) d __attribute__((packed))
+# define LLVM_PACKED_START _Pragma("pack(push, 1)")
+# define LLVM_PACKED_END   _Pragma("pack(pop)")
+#endif
+
+/// \macro LLVM_PTR_SIZE
+/// \brief A constant integer equivalent to the value of sizeof(void*).
+/// Generally used in combination with LLVM_ALIGNAS or when doing computation in
+/// the preprocessor.
+#ifdef __SIZEOF_POINTER__
+# define LLVM_PTR_SIZE __SIZEOF_POINTER__
+#elif defined(_WIN64)
+# define LLVM_PTR_SIZE 8
+#elif defined(_WIN32)
+# define LLVM_PTR_SIZE 4
+#elif defined(_MSC_VER)
+# error "could not determine LLVM_PTR_SIZE as a constant int for MSVC"
+#else
+# define LLVM_PTR_SIZE sizeof(void *)
+#endif
+
+/// \macro LLVM_MEMORY_SANITIZER_BUILD
+/// \brief Whether LLVM itself is built with MemorySanitizer instrumentation.
+#if __has_feature(memory_sanitizer)
+# define LLVM_MEMORY_SANITIZER_BUILD 1
+# include <sanitizer/msan_interface.h>
+#else
+# define LLVM_MEMORY_SANITIZER_BUILD 0
+# define __msan_allocated_memory(p, size)
+# define __msan_unpoison(p, size)
+#endif
+
+/// \macro LLVM_ADDRESS_SANITIZER_BUILD
+/// \brief Whether LLVM itself is built with AddressSanitizer instrumentation.
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+# define LLVM_ADDRESS_SANITIZER_BUILD 1
+# include <sanitizer/asan_interface.h>
+#else
+# define LLVM_ADDRESS_SANITIZER_BUILD 0
+# define __asan_poison_memory_region(p, size)
+# define __asan_unpoison_memory_region(p, size)
+#endif
+
+/// \macro LLVM_THREAD_SANITIZER_BUILD
+/// \brief Whether LLVM itself is built with ThreadSanitizer instrumentation.
+#if __has_feature(thread_sanitizer) || defined(__SANITIZE_THREAD__)
+# define LLVM_THREAD_SANITIZER_BUILD 1
+#else
+# define LLVM_THREAD_SANITIZER_BUILD 0
+#endif
+
+#if LLVM_THREAD_SANITIZER_BUILD
+// Thread Sanitizer is a tool that finds races in code.
+// See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations .
+// tsan detects these exact functions by name.
+#ifdef __cplusplus
+extern "C" {
+#endif
+void AnnotateHappensAfter(const char *file, int line, const volatile void *cv);
+void AnnotateHappensBefore(const char *file, int line, const volatile void *cv);
+void AnnotateIgnoreWritesBegin(const char *file, int line);
+void AnnotateIgnoreWritesEnd(const char *file, int line);
+#ifdef __cplusplus
+}
+#endif
+
+// This marker is used to define a happens-before arc. The race detector will
+// infer an arc from the begin to the end when they share the same pointer
+// argument.
+# define TsanHappensBefore(cv) AnnotateHappensBefore(__FILE__, __LINE__, cv)
+
+// This marker defines the destination of a happens-before arc.
+# define TsanHappensAfter(cv) AnnotateHappensAfter(__FILE__, __LINE__, cv)
+
+// Ignore any races on writes between here and the next TsanIgnoreWritesEnd.
+# define TsanIgnoreWritesBegin() AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+
+// Resume checking for racy writes.
+# define TsanIgnoreWritesEnd() AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+#else
+# define TsanHappensBefore(cv)
+# define TsanHappensAfter(cv)
+# define TsanIgnoreWritesBegin()
+# define TsanIgnoreWritesEnd()
+#endif
+
+/// \macro LLVM_NO_SANITIZE
+/// \brief Disable a particular sanitizer for a function.
+#if __has_attribute(no_sanitize)
+#define LLVM_NO_SANITIZE(KIND) __attribute__((no_sanitize(KIND)))
+#else
+#define LLVM_NO_SANITIZE(KIND)
+#endif
+
+/// \brief Mark debug helper function definitions like dump() that should not be
+/// stripped from debug builds.
+/// Note that you should also surround dump() functions with
+/// `#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)` so they do always
+/// get stripped in release builds.
+// FIXME: Move this to a private config.h as it's not usable in public headers.
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED
+#else
+#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE
+#endif
+
+/// \macro LLVM_PRETTY_FUNCTION
+/// \brief Gets a user-friendly looking function signature for the current scope
+/// using the best available method on each platform.  The exact format of the
+/// resulting string is implementation specific and non-portable, so this should
+/// only be used, for example, for logging or diagnostics.
+#if defined(_MSC_VER)
+#define LLVM_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__) || defined(__clang__)
+#define LLVM_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define LLVM_PRETTY_FUNCTION __func__
+#endif
+
+/// \macro LLVM_THREAD_LOCAL
+/// \brief A thread-local storage specifier which can be used with globals,
+/// extern globals, and static globals.
+///
+/// This is essentially an extremely restricted analog to C++11's thread_local
+/// support, and uses that when available. However, it falls back on
+/// platform-specific or vendor-provided extensions when necessary. These
+/// extensions don't support many of the C++11 thread_local's features. You
+/// should only use this for PODs that you can statically initialize to
+/// some constant value. In almost all circumstances this is most appropriate
+/// for use with a pointer, integer, or small aggregation of pointers and
+/// integers.
+#if LLVM_ENABLE_THREADS
+#if __has_feature(cxx_thread_local)
+#define LLVM_THREAD_LOCAL thread_local
+#elif defined(_MSC_VER)
+// MSVC supports this with a __declspec.
+#define LLVM_THREAD_LOCAL __declspec(thread)
+#else
+// Clang, GCC, and other compatible compilers used __thread prior to C++11 and
+// we only need the restricted functionality that provides.
+#define LLVM_THREAD_LOCAL __thread
+#endif
+#else // !LLVM_ENABLE_THREADS
+// If threading is disabled entirely, this compiles to nothing and you get
+// a normal global variable.
+#define LLVM_THREAD_LOCAL
+#endif
+
+/// \macro LLVM_ENABLE_EXCEPTIONS
+/// \brief Whether LLVM is built with exception support.
+#if __has_feature(cxx_exceptions)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#elif defined(__GNUC__) && defined(__EXCEPTIONS)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#elif defined(_MSC_VER) && defined(_CPPUNWIND)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#endif
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Demangle/Demangle.h b/linux-x64/clang/include/llvm/Demangle/Demangle.h
new file mode 100644
index 0000000..d2eb56b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Demangle/Demangle.h
@@ -0,0 +1,28 @@
+//===--- Demangle.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cstddef>
+
+namespace llvm {
+/// This is a llvm local version of __cxa_demangle. Other than the name and
+/// being in the llvm namespace it is identical.
+///
+/// The mangled_name is demangled into buf and returned. If the buffer is not
+/// large enough, realloc is used to expand it.
+///
+/// The *status will be set to
+///   unknown_error: -4
+///   invalid_args:  -3
+///   invalid_mangled_name: -2
+///   memory_alloc_failure: -1
+///   success: 0
+
+char *itaniumDemangle(const char *mangled_name, char *buf, size_t *n,
+                      int *status);
+}
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/ExecutionEngine.h b/linux-x64/clang/include/llvm/ExecutionEngine/ExecutionEngine.h
new file mode 100644
index 0000000..7932688
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -0,0 +1,667 @@
+//===- ExecutionEngine.h - Abstract Execution Engine Interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the abstract interface that implements execution support
+// for LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
+#define LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <algorithm>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class Function;
+struct GenericValue;
+class GlobalValue;
+class GlobalVariable;
+class JITEventListener;
+class MCJITMemoryManager;
+class ObjectCache;
+class RTDyldMemoryManager;
+class Triple;
+class Type;
+
+namespace object {
+
+class Archive;
+class ObjectFile;
+
+} // end namespace object
+
+/// \brief Helper class for helping synchronize access to the global address map
+/// table.  Access to this class should be serialized under a mutex.
+class ExecutionEngineState {
+public:
+  using GlobalAddressMapTy = StringMap<uint64_t>;
+
+private:
+  /// GlobalAddressMap - A mapping between LLVM global symbol names values and
+  /// their actualized version...
+  GlobalAddressMapTy GlobalAddressMap;
+
+  /// GlobalAddressReverseMap - This is the reverse mapping of GlobalAddressMap,
+  /// used to convert raw addresses into the LLVM global value that is emitted
+  /// at the address.  This map is not computed unless getGlobalValueAtAddress
+  /// is called at some point.
+  std::map<uint64_t, std::string> GlobalAddressReverseMap;
+
+public:
+  GlobalAddressMapTy &getGlobalAddressMap() {
+    return GlobalAddressMap;
+  }
+
+  std::map<uint64_t, std::string> &getGlobalAddressReverseMap() {
+    return GlobalAddressReverseMap;
+  }
+
+  /// \brief Erase an entry from the mapping table.
+  ///
+  /// \returns The address that \p ToUnmap was happed to.
+  uint64_t RemoveMapping(StringRef Name);
+};
+
+using FunctionCreator = std::function<void *(const std::string &)>;
+
+/// \brief Abstract interface for implementation execution of LLVM modules,
+/// designed to support both interpreter and just-in-time (JIT) compiler
+/// implementations.
+class ExecutionEngine {
+  /// The state object holding the global address mapping, which must be
+  /// accessed synchronously.
+  //
+  // FIXME: There is no particular need the entire map needs to be
+  // synchronized.  Wouldn't a reader-writer design be better here?
+  ExecutionEngineState EEState;
+
+  /// The target data for the platform for which execution is being performed.
+  ///
+  /// Note: the DataLayout is LLVMContext specific because it has an
+  /// internal cache based on type pointers. It makes unsafe to reuse the
+  /// ExecutionEngine across context, we don't enforce this rule but undefined
+  /// behavior can occurs if the user tries to do it.
+  const DataLayout DL;
+
+  /// Whether lazy JIT compilation is enabled.
+  bool CompilingLazily;
+
+  /// Whether JIT compilation of external global variables is allowed.
+  bool GVCompilationDisabled;
+
+  /// Whether the JIT should perform lookups of external symbols (e.g.,
+  /// using dlsym).
+  bool SymbolSearchingDisabled;
+
+  /// Whether the JIT should verify IR modules during compilation.
+  bool VerifyModules;
+
+  friend class EngineBuilder;  // To allow access to JITCtor and InterpCtor.
+
+protected:
+  /// The list of Modules that we are JIT'ing from.  We use a SmallVector to
+  /// optimize for the case where there is only one module.
+  SmallVector<std::unique_ptr<Module>, 1> Modules;
+
+  /// getMemoryforGV - Allocate memory for a global variable.
+  virtual char *getMemoryForGV(const GlobalVariable *GV);
+
+  static ExecutionEngine *(*MCJITCtor)(
+      std::unique_ptr<Module> M, std::string *ErrorStr,
+      std::shared_ptr<MCJITMemoryManager> MM,
+      std::shared_ptr<LegacyJITSymbolResolver> SR,
+      std::unique_ptr<TargetMachine> TM);
+
+  static ExecutionEngine *(*OrcMCJITReplacementCtor)(
+      std::string *ErrorStr, std::shared_ptr<MCJITMemoryManager> MM,
+      std::shared_ptr<LegacyJITSymbolResolver> SR,
+      std::unique_ptr<TargetMachine> TM);
+
+  static ExecutionEngine *(*InterpCtor)(std::unique_ptr<Module> M,
+                                        std::string *ErrorStr);
+
+  /// LazyFunctionCreator - If an unknown function is needed, this function
+  /// pointer is invoked to create it.  If this returns null, the JIT will
+  /// abort.
+  FunctionCreator LazyFunctionCreator;
+
+  /// getMangledName - Get mangled name.
+  std::string getMangledName(const GlobalValue *GV);
+
+public:
+  /// lock - This lock protects the ExecutionEngine and MCJIT classes. It must
+  /// be held while changing the internal state of any of those classes.
+  sys::Mutex lock;
+
+  //===--------------------------------------------------------------------===//
+  //  ExecutionEngine Startup
+  //===--------------------------------------------------------------------===//
+
+  virtual ~ExecutionEngine();
+
+  /// Add a Module to the list of modules that we can JIT from.
+  virtual void addModule(std::unique_ptr<Module> M) {
+    Modules.push_back(std::move(M));
+  }
+
+  /// addObjectFile - Add an ObjectFile to the execution engine.
+  ///
+  /// This method is only supported by MCJIT.  MCJIT will immediately load the
+  /// object into memory and adds its symbols to the list used to resolve
+  /// external symbols while preparing other objects for execution.
+  ///
+  /// Objects added using this function will not be made executable until
+  /// needed by another object.
+  ///
+  /// MCJIT will take ownership of the ObjectFile.
+  virtual void addObjectFile(std::unique_ptr<object::ObjectFile> O);
+  virtual void addObjectFile(object::OwningBinary<object::ObjectFile> O);
+
+  /// addArchive - Add an Archive to the execution engine.
+  ///
+  /// This method is only supported by MCJIT.  MCJIT will use the archive to
+  /// resolve external symbols in objects it is loading.  If a symbol is found
+  /// in the Archive the contained object file will be extracted (in memory)
+  /// and loaded for possible execution.
+  virtual void addArchive(object::OwningBinary<object::Archive> A);
+
+  //===--------------------------------------------------------------------===//
+
+  const DataLayout &getDataLayout() const { return DL; }
+
+  /// removeModule - Removes a Module from the list of modules, but does not
+  /// free the module's memory. Returns true if M is found, in which case the
+  /// caller assumes responsibility for deleting the module.
+  //
+  // FIXME: This stealth ownership transfer is horrible. This will probably be
+  //        fixed by deleting ExecutionEngine.
+  virtual bool removeModule(Module *M);
+
+  /// FindFunctionNamed - Search all of the active modules to find the function that
+  /// defines FnName.  This is very slow operation and shouldn't be used for
+  /// general code.
+  virtual Function *FindFunctionNamed(StringRef FnName);
+
+  /// FindGlobalVariableNamed - Search all of the active modules to find the global variable
+  /// that defines Name.  This is very slow operation and shouldn't be used for
+  /// general code.
+  virtual GlobalVariable *FindGlobalVariableNamed(StringRef Name, bool AllowInternal = false);
+
+  /// runFunction - Execute the specified function with the specified arguments,
+  /// and return the result.
+  ///
+  /// For MCJIT execution engines, clients are encouraged to use the
+  /// "GetFunctionAddress" method (rather than runFunction) and cast the
+  /// returned uint64_t to the desired function pointer type. However, for
+  /// backwards compatibility MCJIT's implementation can execute 'main-like'
+  /// function (i.e. those returning void or int, and taking either no
+  /// arguments or (int, char*[])).
+  virtual GenericValue runFunction(Function *F,
+                                   ArrayRef<GenericValue> ArgValues) = 0;
+
+  /// getPointerToNamedFunction - This method returns the address of the
+  /// specified function by using the dlsym function call.  As such it is only
+  /// useful for resolving library symbols, not code generated symbols.
+  ///
+  /// If AbortOnFailure is false and no function with the given name is
+  /// found, this function silently returns a null pointer. Otherwise,
+  /// it prints a message to stderr and aborts.
+  ///
+  /// This function is deprecated for the MCJIT execution engine.
+  virtual void *getPointerToNamedFunction(StringRef Name,
+                                          bool AbortOnFailure = true) = 0;
+
+  /// mapSectionAddress - map a section to its target address space value.
+  /// Map the address of a JIT section as returned from the memory manager
+  /// to the address in the target process as the running code will see it.
+  /// This is the address which will be used for relocation resolution.
+  virtual void mapSectionAddress(const void *LocalAddress,
+                                 uint64_t TargetAddress) {
+    llvm_unreachable("Re-mapping of section addresses not supported with this "
+                     "EE!");
+  }
+
+  /// generateCodeForModule - Run code generation for the specified module and
+  /// load it into memory.
+  ///
+  /// When this function has completed, all code and data for the specified
+  /// module, and any module on which this module depends, will be generated
+  /// and loaded into memory, but relocations will not yet have been applied
+  /// and all memory will be readable and writable but not executable.
+  ///
+  /// This function is primarily useful when generating code for an external
+  /// target, allowing the client an opportunity to remap section addresses
+  /// before relocations are applied.  Clients that intend to execute code
+  /// locally can use the getFunctionAddress call, which will generate code
+  /// and apply final preparations all in one step.
+  ///
+  /// This method has no effect for the interpeter.
+  virtual void generateCodeForModule(Module *M) {}
+
+  /// finalizeObject - ensure the module is fully processed and is usable.
+  ///
+  /// It is the user-level function for completing the process of making the
+  /// object usable for execution.  It should be called after sections within an
+  /// object have been relocated using mapSectionAddress.  When this method is
+  /// called the MCJIT execution engine will reapply relocations for a loaded
+  /// object.  This method has no effect for the interpeter.
+  virtual void finalizeObject() {}
+
+  /// runStaticConstructorsDestructors - This method is used to execute all of
+  /// the static constructors or destructors for a program.
+  ///
+  /// \param isDtors - Run the destructors instead of constructors.
+  virtual void runStaticConstructorsDestructors(bool isDtors);
+
+  /// This method is used to execute all of the static constructors or
+  /// destructors for a particular module.
+  ///
+  /// \param isDtors - Run the destructors instead of constructors.
+  void runStaticConstructorsDestructors(Module &module, bool isDtors);
+
+
+  /// runFunctionAsMain - This is a helper function which wraps runFunction to
+  /// handle the common task of starting up main with the specified argc, argv,
+  /// and envp parameters.
+  int runFunctionAsMain(Function *Fn, const std::vector<std::string> &argv,
+                        const char * const * envp);
+
+
+  /// addGlobalMapping - Tell the execution engine that the specified global is
+  /// at the specified location.  This is used internally as functions are JIT'd
+  /// and as global variables are laid out in memory.  It can and should also be
+  /// used by clients of the EE that want to have an LLVM global overlay
+  /// existing data in memory. Values to be mapped should be named, and have
+  /// external or weak linkage. Mappings are automatically removed when their
+  /// GlobalValue is destroyed.
+  void addGlobalMapping(const GlobalValue *GV, void *Addr);
+  void addGlobalMapping(StringRef Name, uint64_t Addr);
+
+  /// clearAllGlobalMappings - Clear all global mappings and start over again,
+  /// for use in dynamic compilation scenarios to move globals.
+  void clearAllGlobalMappings();
+
+  /// clearGlobalMappingsFromModule - Clear all global mappings that came from a
+  /// particular module, because it has been removed from the JIT.
+  void clearGlobalMappingsFromModule(Module *M);
+
+  /// updateGlobalMapping - Replace an existing mapping for GV with a new
+  /// address.  This updates both maps as required.  If "Addr" is null, the
+  /// entry for the global is removed from the mappings.  This returns the old
+  /// value of the pointer, or null if it was not in the map.
+  uint64_t updateGlobalMapping(const GlobalValue *GV, void *Addr);
+  uint64_t updateGlobalMapping(StringRef Name, uint64_t Addr);
+
+  /// getAddressToGlobalIfAvailable - This returns the address of the specified
+  /// global symbol.
+  uint64_t getAddressToGlobalIfAvailable(StringRef S);
+
+  /// getPointerToGlobalIfAvailable - This returns the address of the specified
+  /// global value if it is has already been codegen'd, otherwise it returns
+  /// null.
+  void *getPointerToGlobalIfAvailable(StringRef S);
+  void *getPointerToGlobalIfAvailable(const GlobalValue *GV);
+
+  /// getPointerToGlobal - This returns the address of the specified global
+  /// value. This may involve code generation if it's a function.
+  ///
+  /// This function is deprecated for the MCJIT execution engine.  Use
+  /// getGlobalValueAddress instead.
+  void *getPointerToGlobal(const GlobalValue *GV);
+
+  /// getPointerToFunction - The different EE's represent function bodies in
+  /// different ways.  They should each implement this to say what a function
+  /// pointer should look like.  When F is destroyed, the ExecutionEngine will
+  /// remove its global mapping and free any machine code.  Be sure no threads
+  /// are running inside F when that happens.
+  ///
+  /// This function is deprecated for the MCJIT execution engine.  Use
+  /// getFunctionAddress instead.
+  virtual void *getPointerToFunction(Function *F) = 0;
+
+  /// getPointerToFunctionOrStub - If the specified function has been
+  /// code-gen'd, return a pointer to the function.  If not, compile it, or use
+  /// a stub to implement lazy compilation if available.  See
+  /// getPointerToFunction for the requirements on destroying F.
+  ///
+  /// This function is deprecated for the MCJIT execution engine.  Use
+  /// getFunctionAddress instead.
+  virtual void *getPointerToFunctionOrStub(Function *F) {
+    // Default implementation, just codegen the function.
+    return getPointerToFunction(F);
+  }
+
+  /// getGlobalValueAddress - Return the address of the specified global
+  /// value. This may involve code generation.
+  ///
+  /// This function should not be called with the interpreter engine.
+  virtual uint64_t getGlobalValueAddress(const std::string &Name) {
+    // Default implementation for the interpreter.  MCJIT will override this.
+    // JIT and interpreter clients should use getPointerToGlobal instead.
+    return 0;
+  }
+
+  /// getFunctionAddress - Return the address of the specified function.
+  /// This may involve code generation.
+  virtual uint64_t getFunctionAddress(const std::string &Name) {
+    // Default implementation for the interpreter.  MCJIT will override this.
+    // Interpreter clients should use getPointerToFunction instead.
+    return 0;
+  }
+
+  /// getGlobalValueAtAddress - Return the LLVM global value object that starts
+  /// at the specified address.
+  ///
+  const GlobalValue *getGlobalValueAtAddress(void *Addr);
+
+  /// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr.
+  /// Ptr is the address of the memory at which to store Val, cast to
+  /// GenericValue *.  It is not a pointer to a GenericValue containing the
+  /// address at which to store Val.
+  void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
+                          Type *Ty);
+
+  void InitializeMemory(const Constant *Init, void *Addr);
+
+  /// getOrEmitGlobalVariable - Return the address of the specified global
+  /// variable, possibly emitting it to memory if needed.  This is used by the
+  /// Emitter.
+  ///
+  /// This function is deprecated for the MCJIT execution engine.  Use
+  /// getGlobalValueAddress instead.
+  virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) {
+    return getPointerToGlobal((const GlobalValue *)GV);
+  }
+
+  /// Registers a listener to be called back on various events within
+  /// the JIT.  See JITEventListener.h for more details.  Does not
+  /// take ownership of the argument.  The argument may be NULL, in
+  /// which case these functions do nothing.
+  virtual void RegisterJITEventListener(JITEventListener *) {}
+  virtual void UnregisterJITEventListener(JITEventListener *) {}
+
+  /// Sets the pre-compiled object cache.  The ownership of the ObjectCache is
+  /// not changed.  Supported by MCJIT but not the interpreter.
+  virtual void setObjectCache(ObjectCache *) {
+    llvm_unreachable("No support for an object cache");
+  }
+
+  /// setProcessAllSections (MCJIT Only): By default, only sections that are
+  /// "required for execution" are passed to the RTDyldMemoryManager, and other
+  /// sections are discarded. Passing 'true' to this method will cause
+  /// RuntimeDyld to pass all sections to its RTDyldMemoryManager regardless
+  /// of whether they are "required to execute" in the usual sense.
+  ///
+  /// Rationale: Some MCJIT clients want to be able to inspect metadata
+  /// sections (e.g. Dwarf, Stack-maps) to enable functionality or analyze
+  /// performance. Passing these sections to the memory manager allows the
+  /// client to make policy about the relevant sections, rather than having
+  /// MCJIT do it.
+  virtual void setProcessAllSections(bool ProcessAllSections) {
+    llvm_unreachable("No support for ProcessAllSections option");
+  }
+
+  /// Return the target machine (if available).
+  virtual TargetMachine *getTargetMachine() { return nullptr; }
+
+  /// DisableLazyCompilation - When lazy compilation is off (the default), the
+  /// JIT will eagerly compile every function reachable from the argument to
+  /// getPointerToFunction.  If lazy compilation is turned on, the JIT will only
+  /// compile the one function and emit stubs to compile the rest when they're
+  /// first called.  If lazy compilation is turned off again while some lazy
+  /// stubs are still around, and one of those stubs is called, the program will
+  /// abort.
+  ///
+  /// In order to safely compile lazily in a threaded program, the user must
+  /// ensure that 1) only one thread at a time can call any particular lazy
+  /// stub, and 2) any thread modifying LLVM IR must hold the JIT's lock
+  /// (ExecutionEngine::lock) or otherwise ensure that no other thread calls a
+  /// lazy stub.  See http://llvm.org/PR5184 for details.
+  void DisableLazyCompilation(bool Disabled = true) {
+    CompilingLazily = !Disabled;
+  }
+  bool isCompilingLazily() const {
+    return CompilingLazily;
+  }
+
+  /// DisableGVCompilation - If called, the JIT will abort if it's asked to
+  /// allocate space and populate a GlobalVariable that is not internal to
+  /// the module.
+  void DisableGVCompilation(bool Disabled = true) {
+    GVCompilationDisabled = Disabled;
+  }
+  bool isGVCompilationDisabled() const {
+    return GVCompilationDisabled;
+  }
+
+  /// DisableSymbolSearching - If called, the JIT will not try to lookup unknown
+  /// symbols with dlsym.  A client can still use InstallLazyFunctionCreator to
+  /// resolve symbols in a custom way.
+  void DisableSymbolSearching(bool Disabled = true) {
+    SymbolSearchingDisabled = Disabled;
+  }
+  bool isSymbolSearchingDisabled() const {
+    return SymbolSearchingDisabled;
+  }
+
+  /// Enable/Disable IR module verification.
+  ///
+  /// Note: Module verification is enabled by default in Debug builds, and
+  /// disabled by default in Release. Use this method to override the default.
+  void setVerifyModules(bool Verify) {
+    VerifyModules = Verify;
+  }
+  bool getVerifyModules() const {
+    return VerifyModules;
+  }
+
+  /// InstallLazyFunctionCreator - If an unknown function is needed, the
+  /// specified function pointer is invoked to create it.  If it returns null,
+  /// the JIT will abort.
+  void InstallLazyFunctionCreator(FunctionCreator C) {
+    LazyFunctionCreator = std::move(C);
+  }
+
+protected:
+  ExecutionEngine(DataLayout DL) : DL(std::move(DL)) {}
+  explicit ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M);
+  explicit ExecutionEngine(std::unique_ptr<Module> M);
+
+  void emitGlobals();
+
+  void EmitGlobalVariable(const GlobalVariable *GV);
+
+  GenericValue getConstantValue(const Constant *C);
+  void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr,
+                           Type *Ty);
+
+private:
+  void Init(std::unique_ptr<Module> M);
+};
+
+namespace EngineKind {
+
+  // These are actually bitmasks that get or-ed together.
+  enum Kind {
+    JIT         = 0x1,
+    Interpreter = 0x2
+  };
+  const static Kind Either = (Kind)(JIT | Interpreter);
+
+} // end namespace EngineKind
+
+/// Builder class for ExecutionEngines. Use this by stack-allocating a builder,
+/// chaining the various set* methods, and terminating it with a .create()
+/// call.
+class EngineBuilder {
+private:
+  std::unique_ptr<Module> M;
+  EngineKind::Kind WhichEngine;
+  std::string *ErrorStr;
+  CodeGenOpt::Level OptLevel;
+  std::shared_ptr<MCJITMemoryManager> MemMgr;
+  std::shared_ptr<LegacyJITSymbolResolver> Resolver;
+  TargetOptions Options;
+  Optional<Reloc::Model> RelocModel;
+  Optional<CodeModel::Model> CMModel;
+  std::string MArch;
+  std::string MCPU;
+  SmallVector<std::string, 4> MAttrs;
+  bool VerifyModules;
+  bool UseOrcMCJITReplacement;
+  bool EmulatedTLS = true;
+
+public:
+  /// Default constructor for EngineBuilder.
+  EngineBuilder();
+
+  /// Constructor for EngineBuilder.
+  EngineBuilder(std::unique_ptr<Module> M);
+
+  // Out-of-line since we don't have the def'n of RTDyldMemoryManager here.
+  ~EngineBuilder();
+
+  /// setEngineKind - Controls whether the user wants the interpreter, the JIT,
+  /// or whichever engine works.  This option defaults to EngineKind::Either.
+  EngineBuilder &setEngineKind(EngineKind::Kind w) {
+    WhichEngine = w;
+    return *this;
+  }
+
+  /// setMCJITMemoryManager - Sets the MCJIT memory manager to use. This allows
+  /// clients to customize their memory allocation policies for the MCJIT. This
+  /// is only appropriate for the MCJIT; setting this and configuring the builder
+  /// to create anything other than MCJIT will cause a runtime error. If create()
+  /// is called and is successful, the created engine takes ownership of the
+  /// memory manager. This option defaults to NULL.
+  EngineBuilder &setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager> mcjmm);
+
+  EngineBuilder&
+  setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);
+
+  EngineBuilder &setSymbolResolver(std::unique_ptr<LegacyJITSymbolResolver> SR);
+
+  /// setErrorStr - Set the error string to write to on error.  This option
+  /// defaults to NULL.
+  EngineBuilder &setErrorStr(std::string *e) {
+    ErrorStr = e;
+    return *this;
+  }
+
+  /// setOptLevel - Set the optimization level for the JIT.  This option
+  /// defaults to CodeGenOpt::Default.
+  EngineBuilder &setOptLevel(CodeGenOpt::Level l) {
+    OptLevel = l;
+    return *this;
+  }
+
+  /// setTargetOptions - Set the target options that the ExecutionEngine
+  /// target is using. Defaults to TargetOptions().
+  EngineBuilder &setTargetOptions(const TargetOptions &Opts) {
+    Options = Opts;
+    return *this;
+  }
+
+  /// setRelocationModel - Set the relocation model that the ExecutionEngine
+  /// target is using. Defaults to target specific default "Reloc::Default".
+  EngineBuilder &setRelocationModel(Reloc::Model RM) {
+    RelocModel = RM;
+    return *this;
+  }
+
+  /// setCodeModel - Set the CodeModel that the ExecutionEngine target
+  /// data is using. Defaults to target specific default
+  /// "CodeModel::JITDefault".
+  EngineBuilder &setCodeModel(CodeModel::Model M) {
+    CMModel = M;
+    return *this;
+  }
+
+  /// setMArch - Override the architecture set by the Module's triple.
+  EngineBuilder &setMArch(StringRef march) {
+    MArch.assign(march.begin(), march.end());
+    return *this;
+  }
+
+  /// setMCPU - Target a specific cpu type.
+  EngineBuilder &setMCPU(StringRef mcpu) {
+    MCPU.assign(mcpu.begin(), mcpu.end());
+    return *this;
+  }
+
+  /// setVerifyModules - Set whether the JIT implementation should verify
+  /// IR modules during compilation.
+  EngineBuilder &setVerifyModules(bool Verify) {
+    VerifyModules = Verify;
+    return *this;
+  }
+
+  /// setMAttrs - Set cpu-specific attributes.
+  template<typename StringSequence>
+  EngineBuilder &setMAttrs(const StringSequence &mattrs) {
+    MAttrs.clear();
+    MAttrs.append(mattrs.begin(), mattrs.end());
+    return *this;
+  }
+
+  // \brief Use OrcMCJITReplacement instead of MCJIT. Off by default.
+  void setUseOrcMCJITReplacement(bool UseOrcMCJITReplacement) {
+    this->UseOrcMCJITReplacement = UseOrcMCJITReplacement;
+  }
+
+  void setEmulatedTLS(bool EmulatedTLS) {
+    this->EmulatedTLS = EmulatedTLS;
+  }
+  
+  TargetMachine *selectTarget();
+
+  /// selectTarget - Pick a target either via -march or by guessing the native
+  /// arch.  Add any CPU features specified via -mcpu or -mattr.
+  TargetMachine *selectTarget(const Triple &TargetTriple,
+                              StringRef MArch,
+                              StringRef MCPU,
+                              const SmallVectorImpl<std::string>& MAttrs);
+
+  ExecutionEngine *create() {
+    return create(selectTarget());
+  }
+
+  ExecutionEngine *create(TargetMachine *TM);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionEngine, LLVMExecutionEngineRef)
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/GenericValue.h b/linux-x64/clang/include/llvm/ExecutionEngine/GenericValue.h
new file mode 100644
index 0000000..504e30a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/GenericValue.h
@@ -0,0 +1,55 @@
+//===- GenericValue.h - Represent any type of LLVM value --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The GenericValue class is used to represent an LLVM value of arbitrary type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_GENERICVALUE_H
+#define LLVM_EXECUTIONENGINE_GENERICVALUE_H
+
+#include "llvm/ADT/APInt.h"
+#include <vector>
+
+namespace llvm {
+
+using PointerTy = void *;
+
+struct GenericValue {
+  struct IntPair {
+    unsigned int first;
+    unsigned int second;
+  };
+  union {
+    double DoubleVal;
+    float FloatVal;
+    PointerTy PointerVal;
+    struct IntPair UIntPairVal;
+    unsigned char Untyped[8];
+  };
+  APInt IntVal; // also used for long doubles.
+  // For aggregate data types.
+  std::vector<GenericValue> AggregateVal;
+
+  // to make code faster, set GenericValue to zero could be omitted, but it is
+  // potentially can cause problems, since GenericValue to store garbage
+  // instead of zero.
+  GenericValue() : IntVal(1, 0) {
+    UIntPairVal.first = 0;
+    UIntPairVal.second = 0;
+  }
+  explicit GenericValue(void *V) : PointerVal(V), IntVal(1, 0) {}
+};
+
+inline GenericValue PTOGV(void *P) { return GenericValue(P); }
+inline void *GVTOP(const GenericValue &GV) { return GV.PointerVal; }
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_GENERICVALUE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Interpreter.h b/linux-x64/clang/include/llvm/ExecutionEngine/Interpreter.h
new file mode 100644
index 0000000..a147078
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Interpreter.h
@@ -0,0 +1,28 @@
+//===-- Interpreter.h - Abstract Execution Engine Interface -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the interpreter to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_INTERPRETER_H
+#define LLVM_EXECUTIONENGINE_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+
+extern "C" void LLVMLinkInInterpreter();
+
+namespace {
+  struct ForceInterpreterLinking {
+    ForceInterpreterLinking() { LLVMLinkInInterpreter(); }
+  } ForceInterpreterLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/JITEventListener.h b/linux-x64/clang/include/llvm/ExecutionEngine/JITEventListener.h
new file mode 100644
index 0000000..ff7840f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/JITEventListener.h
@@ -0,0 +1,124 @@
+//===- JITEventListener.h - Exposes events from JIT compilation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the JITEventListener interface, which lets users get
+// callbacks when significant events happen during the JIT compilation process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+#define LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/DebugLoc.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class IntelJITEventsWrapper;
+class MachineFunction;
+class OProfileWrapper;
+
+namespace object {
+
+class ObjectFile;
+
+} // end namespace object
+
+/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
+/// about a generated machine code function.
+struct JITEvent_EmittedFunctionDetails {
+  struct LineStart {
+    /// The address at which the current line changes.
+    uintptr_t Address;
+
+    /// The new location information.  These can be translated to DebugLocTuples
+    /// using MF->getDebugLocTuple().
+    DebugLoc Loc;
+  };
+
+  /// The machine function the struct contains information for.
+  const MachineFunction *MF;
+
+  /// The list of line boundary information, sorted by address.
+  std::vector<LineStart> LineStarts;
+};
+
+/// JITEventListener - Abstract interface for use by the JIT to notify clients
+/// about significant events during compilation. For example, to notify
+/// profilers and debuggers that need to know where functions have been emitted.
+///
+/// The default implementation of each method does nothing.
+class JITEventListener {
+public:
+  using EmittedFunctionDetails = JITEvent_EmittedFunctionDetails;
+
+public:
+  JITEventListener() = default;
+  virtual ~JITEventListener() = default;
+
+  /// NotifyObjectEmitted - Called after an object has been successfully
+  /// emitted to memory.  NotifyFunctionEmitted will not be called for
+  /// individual functions in the object.
+  ///
+  /// ELF-specific information
+  /// The ObjectImage contains the generated object image
+  /// with section headers updated to reflect the address at which sections
+  /// were loaded and with relocations performed in-place on debug sections.
+  virtual void NotifyObjectEmitted(const object::ObjectFile &Obj,
+                                   const RuntimeDyld::LoadedObjectInfo &L) {}
+
+  /// NotifyFreeingObject - Called just before the memory associated with
+  /// a previously emitted object is released.
+  virtual void NotifyFreeingObject(const object::ObjectFile &Obj) {}
+
+  // Get a pointe to the GDB debugger registration listener.
+  static JITEventListener *createGDBRegistrationListener();
+
+#if LLVM_USE_INTEL_JITEVENTS
+  // Construct an IntelJITEventListener
+  static JITEventListener *createIntelJITEventListener();
+
+  // Construct an IntelJITEventListener with a test Intel JIT API implementation
+  static JITEventListener *createIntelJITEventListener(
+                                      IntelJITEventsWrapper* AlternativeImpl);
+#else
+  static JITEventListener *createIntelJITEventListener() { return nullptr; }
+
+  static JITEventListener *createIntelJITEventListener(
+                                      IntelJITEventsWrapper* AlternativeImpl) {
+    return nullptr;
+  }
+#endif // USE_INTEL_JITEVENTS
+
+#if LLVM_USE_OPROFILE
+  // Construct an OProfileJITEventListener
+  static JITEventListener *createOProfileJITEventListener();
+
+  // Construct an OProfileJITEventListener with a test opagent implementation
+  static JITEventListener *createOProfileJITEventListener(
+                                      OProfileWrapper* AlternativeImpl);
+#else
+  static JITEventListener *createOProfileJITEventListener() { return nullptr; }
+
+  static JITEventListener *createOProfileJITEventListener(
+                                      OProfileWrapper* AlternativeImpl) {
+    return nullptr;
+  }
+#endif // USE_OPROFILE
+
+private:
+  virtual void anchor();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/JITSymbol.h b/linux-x64/clang/include/llvm/ExecutionEngine/JITSymbol.h
new file mode 100644
index 0000000..86ab173
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/JITSymbol.h
@@ -0,0 +1,344 @@
+//===- JITSymbol.h - JIT symbol abstraction ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstraction for target process addresses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITSYMBOL_H
+#define LLVM_EXECUTIONENGINE_JITSYMBOL_H
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <set>
+#include <string>
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+class GlobalValue;
+
+namespace object {
+
+class BasicSymbolRef;
+
+} // end namespace object
+
+/// @brief Represents an address in the target process's address space.
+using JITTargetAddress = uint64_t;
+
+/// @brief Flags for symbols in the JIT.
+class JITSymbolFlags {
+public:
+  using UnderlyingType = uint8_t;
+  using TargetFlagsType = uint64_t;
+
+  enum FlagNames : UnderlyingType {
+    None = 0,
+    HasError = 1U << 0,
+    Weak = 1U << 1,
+    Common = 1U << 2,
+    Absolute = 1U << 3,
+    Exported = 1U << 4,
+    NotMaterialized = 1U << 5
+  };
+
+  static JITSymbolFlags stripTransientFlags(JITSymbolFlags Orig) {
+    return static_cast<FlagNames>(Orig.Flags & ~NotMaterialized);
+  }
+
+  /// @brief Default-construct a JITSymbolFlags instance.
+  JITSymbolFlags() = default;
+
+  /// @brief Construct a JITSymbolFlags instance from the given flags.
+  JITSymbolFlags(FlagNames Flags) : Flags(Flags) {}
+
+  /// @brief Construct a JITSymbolFlags instance from the given flags and target
+  ///        flags.
+  JITSymbolFlags(FlagNames Flags, TargetFlagsType TargetFlags)
+    : Flags(Flags), TargetFlags(TargetFlags) {}
+
+  /// @brief Return true if there was an error retrieving this symbol.
+  bool hasError() const {
+    return (Flags & HasError) == HasError;
+  }
+
+  /// @brief Returns true if this symbol has been fully materialized (i.e. is
+  ///        callable).
+  bool isMaterialized() const { return !(Flags & NotMaterialized); }
+
+  /// @brief Returns true if the Weak flag is set.
+  bool isWeak() const {
+    return (Flags & Weak) == Weak;
+  }
+
+  /// @brief Returns true if the Common flag is set.
+  bool isCommon() const {
+    return (Flags & Common) == Common;
+  }
+
+  /// @brief Returns true if the symbol isn't weak or common.
+  bool isStrong() const {
+    return !isWeak() && !isCommon();
+  }
+
+  /// @brief Returns true if the Exported flag is set.
+  bool isExported() const {
+    return (Flags & Exported) == Exported;
+  }
+
+  /// @brief Implicitly convert to the underlying flags type.
+  operator UnderlyingType&() { return Flags; }
+
+  /// @brief Implicitly convert to the underlying flags type.
+  operator const UnderlyingType&() const { return Flags; }
+
+  /// @brief Return a reference to the target-specific flags.
+  TargetFlagsType& getTargetFlags() { return TargetFlags; }
+
+  /// @brief Return a reference to the target-specific flags.
+  const TargetFlagsType& getTargetFlags() const { return TargetFlags; }
+
+  /// Construct a JITSymbolFlags value based on the flags of the given global
+  /// value.
+  static JITSymbolFlags fromGlobalValue(const GlobalValue &GV);
+
+  /// Construct a JITSymbolFlags value based on the flags of the given libobject
+  /// symbol.
+  static JITSymbolFlags fromObjectSymbol(const object::BasicSymbolRef &Symbol);
+
+private:
+  UnderlyingType Flags = None;
+  TargetFlagsType TargetFlags = 0;
+};
+
+/// @brief ARM-specific JIT symbol flags.
+/// FIXME: This should be moved into a target-specific header.
+class ARMJITSymbolFlags {
+public:
+  ARMJITSymbolFlags() = default;
+
+  enum FlagNames {
+    None = 0,
+    Thumb = 1 << 0
+  };
+
+  operator JITSymbolFlags::TargetFlagsType&() { return Flags; }
+
+  static ARMJITSymbolFlags fromObjectSymbol(
+                                           const object::BasicSymbolRef &Symbol);
+private:
+  JITSymbolFlags::TargetFlagsType Flags = 0;
+};
+
+/// @brief Represents a symbol that has been evaluated to an address already.
+class JITEvaluatedSymbol {
+public:
+  JITEvaluatedSymbol() = default;
+
+  /// @brief Create a 'null' symbol.
+  JITEvaluatedSymbol(std::nullptr_t) {}
+
+  /// @brief Create a symbol for the given address and flags.
+  JITEvaluatedSymbol(JITTargetAddress Address, JITSymbolFlags Flags)
+      : Address(Address), Flags(Flags) {}
+
+  /// @brief An evaluated symbol converts to 'true' if its address is non-zero.
+  explicit operator bool() const { return Address != 0; }
+
+  /// @brief Return the address of this symbol.
+  JITTargetAddress getAddress() const { return Address; }
+
+  /// @brief Return the flags for this symbol.
+  JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+  JITTargetAddress Address = 0;
+  JITSymbolFlags Flags;
+};
+
+/// @brief Represents a symbol in the JIT.
+class JITSymbol {
+public:
+  using GetAddressFtor = std::function<Expected<JITTargetAddress>()>;
+
+  /// @brief Create a 'null' symbol, used to represent a "symbol not found"
+  ///        result from a successful (non-erroneous) lookup.
+  JITSymbol(std::nullptr_t)
+      : CachedAddr(0) {}
+
+  /// @brief Create a JITSymbol representing an error in the symbol lookup
+  ///        process (e.g. a network failure during a remote lookup).
+  JITSymbol(Error Err)
+    : Err(std::move(Err)), Flags(JITSymbolFlags::HasError) {}
+
+  /// @brief Create a symbol for a definition with a known address.
+  JITSymbol(JITTargetAddress Addr, JITSymbolFlags Flags)
+      : CachedAddr(Addr), Flags(Flags) {}
+
+  /// @brief Construct a JITSymbol from a JITEvaluatedSymbol.
+  JITSymbol(JITEvaluatedSymbol Sym)
+      : CachedAddr(Sym.getAddress()), Flags(Sym.getFlags()) {}
+
+  /// @brief Create a symbol for a definition that doesn't have a known address
+  ///        yet.
+  /// @param GetAddress A functor to materialize a definition (fixing the
+  ///        address) on demand.
+  ///
+  ///   This constructor allows a JIT layer to provide a reference to a symbol
+  /// definition without actually materializing the definition up front. The
+  /// user can materialize the definition at any time by calling the getAddress
+  /// method.
+  JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
+      : GetAddress(std::move(GetAddress)), CachedAddr(0), Flags(Flags) {}
+
+  JITSymbol(const JITSymbol&) = delete;
+  JITSymbol& operator=(const JITSymbol&) = delete;
+
+  JITSymbol(JITSymbol &&Other)
+    : GetAddress(std::move(Other.GetAddress)), Flags(std::move(Other.Flags)) {
+    if (Flags.hasError())
+      Err = std::move(Other.Err);
+    else
+      CachedAddr = std::move(Other.CachedAddr);
+  }
+
+  JITSymbol& operator=(JITSymbol &&Other) {
+    GetAddress = std::move(Other.GetAddress);
+    Flags = std::move(Other.Flags);
+    if (Flags.hasError())
+      Err = std::move(Other.Err);
+    else
+      CachedAddr = std::move(Other.CachedAddr);
+    return *this;
+  }
+
+  ~JITSymbol() {
+    if (Flags.hasError())
+      Err.~Error();
+    else
+      CachedAddr.~JITTargetAddress();
+  }
+
+  /// @brief Returns true if the symbol exists, false otherwise.
+  explicit operator bool() const {
+    return !Flags.hasError() && (CachedAddr || GetAddress);
+  }
+
+  /// @brief Move the error field value out of this JITSymbol.
+  Error takeError() {
+    if (Flags.hasError())
+      return std::move(Err);
+    return Error::success();
+  }
+
+  /// @brief Get the address of the symbol in the target address space. Returns
+  ///        '0' if the symbol does not exist.
+  Expected<JITTargetAddress> getAddress() {
+    assert(!Flags.hasError() && "getAddress called on error value");
+    if (GetAddress) {
+      if (auto CachedAddrOrErr = GetAddress()) {
+        GetAddress = nullptr;
+        CachedAddr = *CachedAddrOrErr;
+        assert(CachedAddr && "Symbol could not be materialized.");
+      } else
+        return CachedAddrOrErr.takeError();
+    }
+    return CachedAddr;
+  }
+
+  JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+  GetAddressFtor GetAddress;
+  union {
+    JITTargetAddress CachedAddr;
+    Error Err;
+  };
+  JITSymbolFlags Flags;
+};
+
+/// @brief Symbol resolution interface.
+///
+/// Allows symbol flags and addresses to be looked up by name.
+/// Symbol queries are done in bulk (i.e. you request resolution of a set of
+/// symbols, rather than a single one) to reduce IPC overhead in the case of
+/// remote JITing, and expose opportunities for parallel compilation.
+class JITSymbolResolver {
+public:
+  using LookupSet = std::set<StringRef>;
+  using LookupResult = std::map<StringRef, JITEvaluatedSymbol>;
+  using LookupFlagsResult = std::map<StringRef, JITSymbolFlags>;
+
+  virtual ~JITSymbolResolver() = default;
+
+  /// @brief Returns the fully resolved address and flags for each of the given
+  ///        symbols.
+  ///
+  /// This method will return an error if any of the given symbols can not be
+  /// resolved, or if the resolution process itself triggers an error.
+  virtual Expected<LookupResult> lookup(const LookupSet &Symbols) = 0;
+
+  /// @brief Returns the symbol flags for each of the given symbols.
+  ///
+  /// This method does NOT return an error if any of the given symbols is
+  /// missing. Instead, that symbol will be left out of the result map.
+  virtual Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) = 0;
+
+private:
+  virtual void anchor();
+};
+
+/// \brief Legacy symbol resolution interface.
+class LegacyJITSymbolResolver : public JITSymbolResolver {
+public:
+  /// @brief Performs lookup by, for each symbol, first calling
+  ///        findSymbolInLogicalDylib and if that fails calling
+  ///        findSymbol.
+  Expected<LookupResult> lookup(const LookupSet &Symbols) final;
+
+  /// @brief Performs flags lookup by calling findSymbolInLogicalDylib and
+  ///        returning the flags value for that symbol.
+  Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) final;
+
+  /// This method returns the address of the specified symbol if it exists
+  /// within the logical dynamic library represented by this JITSymbolResolver.
+  /// Unlike findSymbol, queries through this interface should return addresses
+  /// for hidden symbols.
+  ///
+  /// This is of particular importance for the Orc JIT APIs, which support lazy
+  /// compilation by breaking up modules: Each of those broken out modules
+  /// must be able to resolve hidden symbols provided by the others. Clients
+  /// writing memory managers for MCJIT can usually ignore this method.
+  ///
+  /// This method will be queried by RuntimeDyld when checking for previous
+  /// definitions of common symbols.
+  virtual JITSymbol findSymbolInLogicalDylib(const std::string &Name) = 0;
+
+  /// This method returns the address of the specified function or variable.
+  /// It is used to resolve symbols during module linking.
+  ///
+  /// If the returned symbol's address is equal to ~0ULL then RuntimeDyld will
+  /// skip all relocations for that symbol, and the client will be responsible
+  /// for handling them manually.
+  virtual JITSymbol findSymbol(const std::string &Name) = 0;
+
+private:
+  virtual void anchor();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/MCJIT.h b/linux-x64/clang/include/llvm/ExecutionEngine/MCJIT.h
new file mode 100644
index 0000000..66ddb7c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/MCJIT.h
@@ -0,0 +1,38 @@
+//===-- MCJIT.h - MC-Based Just-In-Time Execution Engine --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the MCJIT to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_MCJIT_H
+#define LLVM_EXECUTIONENGINE_MCJIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInMCJIT();
+
+namespace {
+  struct ForceMCJITLinking {
+    ForceMCJITLinking() {
+      // We must reference MCJIT in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      LLVMLinkInMCJIT();
+    }
+  } ForceMCJITLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/OProfileWrapper.h b/linux-x64/clang/include/llvm/ExecutionEngine/OProfileWrapper.h
new file mode 100644
index 0000000..05da594
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/OProfileWrapper.h
@@ -0,0 +1,124 @@
+//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a OProfileWrapper object that detects if the oprofile
+// daemon is running, and provides wrappers for opagent functions used to
+// communicate with the oprofile JIT interface. The dynamic library libopagent
+// does not need to be linked directly as this object lazily loads the library
+// when the first op_ function is called.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+#define LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <opagent.h>
+
+namespace llvm {
+
+
+class OProfileWrapper {
+  typedef  op_agent_t    (*op_open_agent_ptr_t)();
+  typedef  int           (*op_close_agent_ptr_t)(op_agent_t);
+  typedef  int           (*op_write_native_code_ptr_t)(op_agent_t,
+                                                const char*,
+                                                uint64_t,
+                                                void const*,
+                                                const unsigned int);
+  typedef  int           (*op_write_debug_line_info_ptr_t)(op_agent_t,
+                                                void const*,
+                                                size_t,
+                                                struct debug_line_info const*);
+  typedef  int           (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);
+
+  // Also used for op_minor_version function which has the same signature
+  typedef  int           (*op_major_version_ptr_t)();
+
+  // This is not a part of the opagent API, but is useful nonetheless
+  typedef  bool          (*IsOProfileRunningPtrT)();
+
+
+  op_agent_t                      Agent;
+  op_open_agent_ptr_t             OpenAgentFunc;
+  op_close_agent_ptr_t            CloseAgentFunc;
+  op_write_native_code_ptr_t      WriteNativeCodeFunc;
+  op_write_debug_line_info_ptr_t  WriteDebugLineInfoFunc;
+  op_unload_native_code_ptr_t     UnloadNativeCodeFunc;
+  op_major_version_ptr_t          MajorVersionFunc;
+  op_major_version_ptr_t          MinorVersionFunc;
+  IsOProfileRunningPtrT           IsOProfileRunningFunc;
+
+  bool Initialized;
+
+public:
+  OProfileWrapper();
+
+  // For testing with a mock opagent implementation, skips the dynamic load and
+  // the function resolution.
+  OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
+                  op_close_agent_ptr_t CloseAgentImpl,
+                  op_write_native_code_ptr_t WriteNativeCodeImpl,
+                  op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
+                  op_unload_native_code_ptr_t UnloadNativeCodeImpl,
+                  op_major_version_ptr_t MajorVersionImpl,
+                  op_major_version_ptr_t MinorVersionImpl,
+                  IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
+  : OpenAgentFunc(OpenAgentImpl),
+    CloseAgentFunc(CloseAgentImpl),
+    WriteNativeCodeFunc(WriteNativeCodeImpl),
+    WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
+    UnloadNativeCodeFunc(UnloadNativeCodeImpl),
+    MajorVersionFunc(MajorVersionImpl),
+    MinorVersionFunc(MinorVersionImpl),
+    IsOProfileRunningFunc(MockIsOProfileRunningImpl),
+    Initialized(true)
+  {
+  }
+
+  // Calls op_open_agent in the oprofile JIT library and saves the returned
+  // op_agent_t handle internally so it can be used when calling all the other
+  // op_* functions. Callers of this class do not need to keep track of
+  // op_agent_t objects.
+  bool op_open_agent();
+
+  int op_close_agent();
+  int op_write_native_code(const char* name,
+                           uint64_t addr,
+                           void const* code,
+                           const unsigned int size);
+  int op_write_debug_line_info(void const* code,
+                               size_t num_entries,
+                               struct debug_line_info const* info);
+  int op_unload_native_code(uint64_t addr);
+  int op_major_version();
+  int op_minor_version();
+
+  // Returns true if the oprofiled process is running, the opagent library is
+  // loaded and a connection to the agent has been established, and false
+  // otherwise.
+  bool isAgentAvailable();
+
+private:
+  // Loads the libopagent library and initializes this wrapper if the oprofile
+  // daemon is running
+  bool initialize();
+
+  // Searches /proc for the oprofile daemon and returns true if the process if
+  // found, or false otherwise.
+  bool checkForOProfileProcEntry();
+
+  bool isOProfileRunning();
+};
+
+} // namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/ObjectCache.h b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectCache.h
new file mode 100644
index 0000000..0770444
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectCache.h
@@ -0,0 +1,42 @@
+//===-- ObjectCache.h - Class definition for the ObjectCache ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTCACHE_H
+#define LLVM_EXECUTIONENGINE_OBJECTCACHE_H
+
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace llvm {
+
+class Module;
+
+/// This is the base ObjectCache type which can be provided to an
+/// ExecutionEngine for the purpose of avoiding compilation for Modules that
+/// have already been compiled and an object file is available.
+class ObjectCache {
+  virtual void anchor();
+
+public:
+  ObjectCache() = default;
+
+  virtual ~ObjectCache() = default;
+
+  /// notifyObjectCompiled - Provides a pointer to compiled code for Module M.
+  virtual void notifyObjectCompiled(const Module *M, MemoryBufferRef Obj) = 0;
+
+  /// Returns a pointer to a newly allocated MemoryBuffer that contains the
+  /// object which corresponds with Module M, or 0 if an object is not
+  /// available.
+  virtual std::unique_ptr<MemoryBuffer> getObject(const Module* M) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_OBJECTCACHE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
new file mode 100644
index 0000000..0f00ad0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
@@ -0,0 +1,63 @@
+//===- ObjectMemoryBuffer.h - SmallVector-backed MemoryBuffrer  -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a wrapper class to hold the memory into which an
+// object will be generated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
+#define LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// \brief SmallVector-backed MemoryBuffer instance.
+///
+/// This class enables efficient construction of MemoryBuffers from SmallVector
+/// instances. This is useful for MCJIT and Orc, where object files are streamed
+/// into SmallVectors, then inspected using ObjectFile (which takes a
+/// MemoryBuffer).
+class ObjectMemoryBuffer : public MemoryBuffer {
+public:
+
+  /// \brief Construct an ObjectMemoryBuffer from the given SmallVector r-value.
+  ///
+  /// FIXME: It'd be nice for this to be a non-templated constructor taking a
+  /// SmallVectorImpl here instead of a templated one taking a SmallVector<N>,
+  /// but SmallVector's move-construction/assignment currently only take
+  /// SmallVectors. If/when that is fixed we can simplify this constructor and
+  /// the following one.
+  ObjectMemoryBuffer(SmallVectorImpl<char> &&SV)
+    : SV(std::move(SV)), BufferName("<in-memory object>") {
+    init(this->SV.begin(), this->SV.end(), false);
+  }
+
+  /// \brief Construct a named ObjectMemoryBuffer from the given SmallVector
+  ///        r-value and StringRef.
+  ObjectMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name)
+    : SV(std::move(SV)), BufferName(Name) {
+    init(this->SV.begin(), this->SV.end(), false);
+  }
+
+  StringRef getBufferIdentifier() const override { return BufferName; }
+
+  BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+
+private:
+  SmallVector<char, 0> SV;
+  std::string BufferName;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
new file mode 100644
index 0000000..a64a6dd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -0,0 +1,681 @@
+//===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// JIT layer for breaking up modules and inserting callbacks to allow
+// individual functions to be compiled on demand.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Value;
+
+namespace orc {
+
+/// @brief Compile-on-demand layer.
+///
+///   When a module is added to this layer a stub is created for each of its
+/// function definitions. The stubs and other global values are immediately
+/// added to the layer below. When a stub is called it triggers the extraction
+/// of the function body from the original module. The extracted body is then
+/// compiled and executed.
+template <typename BaseLayerT,
+          typename CompileCallbackMgrT = JITCompileCallbackManager,
+          typename IndirectStubsMgrT = IndirectStubsManager>
+class CompileOnDemandLayer {
+private:
+  template <typename MaterializerFtor>
+  class LambdaMaterializer final : public ValueMaterializer {
+  public:
+    LambdaMaterializer(MaterializerFtor M) : M(std::move(M)) {}
+
+    Value *materialize(Value *V) final { return M(V); }
+
+  private:
+    MaterializerFtor M;
+  };
+
+  template <typename MaterializerFtor>
+  LambdaMaterializer<MaterializerFtor>
+  createLambdaMaterializer(MaterializerFtor M) {
+    return LambdaMaterializer<MaterializerFtor>(std::move(M));
+  }
+
+  // Provide type-erasure for the Modules and MemoryManagers.
+  template <typename ResourceT>
+  class ResourceOwner {
+  public:
+    ResourceOwner() = default;
+    ResourceOwner(const ResourceOwner &) = delete;
+    ResourceOwner &operator=(const ResourceOwner &) = delete;
+    virtual ~ResourceOwner() = default;
+
+    virtual ResourceT& getResource() const = 0;
+  };
+
+  template <typename ResourceT, typename ResourcePtrT>
+  class ResourceOwnerImpl : public ResourceOwner<ResourceT> {
+  public:
+    ResourceOwnerImpl(ResourcePtrT ResourcePtr)
+      : ResourcePtr(std::move(ResourcePtr)) {}
+
+    ResourceT& getResource() const override { return *ResourcePtr; }
+
+  private:
+    ResourcePtrT ResourcePtr;
+  };
+
+  template <typename ResourceT, typename ResourcePtrT>
+  std::unique_ptr<ResourceOwner<ResourceT>>
+  wrapOwnership(ResourcePtrT ResourcePtr) {
+    using RO = ResourceOwnerImpl<ResourceT, ResourcePtrT>;
+    return llvm::make_unique<RO>(std::move(ResourcePtr));
+  }
+
+  class StaticGlobalRenamer {
+  public:
+    StaticGlobalRenamer() = default;
+    StaticGlobalRenamer(StaticGlobalRenamer &&) = default;
+    StaticGlobalRenamer &operator=(StaticGlobalRenamer &&) = default;
+
+    void rename(Module &M) {
+      for (auto &F : M)
+        if (F.hasLocalLinkage())
+          F.setName("$static." + Twine(NextId++));
+      for (auto &G : M.globals())
+        if (G.hasLocalLinkage())
+          G.setName("$static." + Twine(NextId++));
+    }
+
+  private:
+    unsigned NextId = 0;
+  };
+
+  struct LogicalDylib {
+    struct SourceModuleEntry {
+      std::unique_ptr<Module> SourceMod;
+      std::set<Function*> StubsToClone;
+    };
+
+    using SourceModulesList = std::vector<SourceModuleEntry>;
+    using SourceModuleHandle = typename SourceModulesList::size_type;
+
+    LogicalDylib() = default;
+
+    LogicalDylib(VModuleKey K, std::shared_ptr<SymbolResolver> BackingResolver,
+                 std::unique_ptr<IndirectStubsMgrT> StubsMgr)
+        : K(std::move(K)), BackingResolver(std::move(BackingResolver)),
+          StubsMgr(std::move(StubsMgr)) {}
+
+    SourceModuleHandle addSourceModule(std::unique_ptr<Module> M) {
+      SourceModuleHandle H = SourceModules.size();
+      SourceModules.push_back(SourceModuleEntry());
+      SourceModules.back().SourceMod = std::move(M);
+      return H;
+    }
+
+    Module& getSourceModule(SourceModuleHandle H) {
+      return *SourceModules[H].SourceMod;
+    }
+
+    std::set<Function*>& getStubsToClone(SourceModuleHandle H) {
+      return SourceModules[H].StubsToClone;
+    }
+
+    JITSymbol findSymbol(BaseLayerT &BaseLayer, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+      if (auto Sym = StubsMgr->findStub(Name, ExportedSymbolsOnly))
+        return Sym;
+      for (auto BLK : BaseLayerVModuleKeys)
+        if (auto Sym = BaseLayer.findSymbolIn(BLK, Name, ExportedSymbolsOnly))
+          return Sym;
+        else if (auto Err = Sym.takeError())
+          return std::move(Err);
+      return nullptr;
+    }
+
+    Error removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
+      for (auto &BLK : BaseLayerVModuleKeys)
+        if (auto Err = BaseLayer.removeModule(BLK))
+          return Err;
+      return Error::success();
+    }
+
+    VModuleKey K;
+    std::shared_ptr<SymbolResolver> BackingResolver;
+    std::unique_ptr<IndirectStubsMgrT> StubsMgr;
+    StaticGlobalRenamer StaticRenamer;
+    SourceModulesList SourceModules;
+    std::vector<VModuleKey> BaseLayerVModuleKeys;
+  };
+
+public:
+
+  /// @brief Module partitioning functor.
+  using PartitioningFtor = std::function<std::set<Function*>(Function&)>;
+
+  /// @brief Builder for IndirectStubsManagers.
+  using IndirectStubsManagerBuilderT =
+      std::function<std::unique_ptr<IndirectStubsMgrT>()>;
+
+  using SymbolResolverGetter =
+      std::function<std::shared_ptr<SymbolResolver>(VModuleKey K)>;
+
+  using SymbolResolverSetter =
+      std::function<void(VModuleKey K, std::shared_ptr<SymbolResolver> R)>;
+
+  /// @brief Construct a compile-on-demand layer instance.
+  CompileOnDemandLayer(ExecutionSession &ES, BaseLayerT &BaseLayer,
+                       SymbolResolverGetter GetSymbolResolver,
+                       SymbolResolverSetter SetSymbolResolver,
+                       PartitioningFtor Partition,
+                       CompileCallbackMgrT &CallbackMgr,
+                       IndirectStubsManagerBuilderT CreateIndirectStubsManager,
+                       bool CloneStubsIntoPartitions = true)
+      : ES(ES), BaseLayer(BaseLayer),
+        GetSymbolResolver(std::move(GetSymbolResolver)),
+        SetSymbolResolver(std::move(SetSymbolResolver)),
+        Partition(std::move(Partition)), CompileCallbackMgr(CallbackMgr),
+        CreateIndirectStubsManager(std::move(CreateIndirectStubsManager)),
+        CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
+
+  ~CompileOnDemandLayer() {
+    // FIXME: Report error on log.
+    while (!LogicalDylibs.empty())
+      consumeError(removeModule(LogicalDylibs.begin()->first));
+  }
+
+  /// @brief Add a module to the compile-on-demand layer.
+  Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+
+    assert(!LogicalDylibs.count(K) && "VModuleKey K already in use");
+    auto I = LogicalDylibs.insert(
+        LogicalDylibs.end(),
+        std::make_pair(K, LogicalDylib(K, GetSymbolResolver(K),
+                                       CreateIndirectStubsManager())));
+
+    return addLogicalModule(I->second, std::move(M));
+  }
+
+  /// @brief Add extra modules to an existing logical module.
+  Error addExtraModule(VModuleKey K, std::unique_ptr<Module> M) {
+    return addLogicalModule(LogicalDylibs[K], std::move(M));
+  }
+
+  /// @brief Remove the module represented by the given key.
+  ///
+  ///   This will remove all modules in the layers below that were derived from
+  /// the module represented by K.
+  Error removeModule(VModuleKey K) {
+    auto I = LogicalDylibs.find(K);
+    assert(I != LogicalDylibs.end() && "VModuleKey K not valid here");
+    auto Err = I->second.removeModulesFromBaseLayer(BaseLayer);
+    LogicalDylibs.erase(I);
+    return Err;
+  }
+
+  /// @brief Search for the given named symbol.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+    for (auto &KV : LogicalDylibs) {
+      if (auto Sym = KV.second.StubsMgr->findStub(Name, ExportedSymbolsOnly))
+        return Sym;
+      if (auto Sym = findSymbolIn(KV.first, Name, ExportedSymbolsOnly))
+        return Sym;
+      else if (auto Err = Sym.takeError())
+        return std::move(Err);
+    }
+    return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Get the address of a symbol provided by this layer, or some layer
+  ///        below this one.
+  JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+    assert(LogicalDylibs.count(K) && "VModuleKey K is not valid here");
+    return LogicalDylibs[K].findSymbol(BaseLayer, Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Update the stub for the given function to point at FnBodyAddr.
+  /// This can be used to support re-optimization.
+  /// @return true if the function exists and the stub is updated, false
+  ///         otherwise.
+  //
+  // FIXME: We should track and free associated resources (unused compile
+  //        callbacks, uncompiled IR, and no-longer-needed/reachable function
+  //        implementations).
+  Error updatePointer(std::string FuncName, JITTargetAddress FnBodyAddr) {
+    //Find out which logical dylib contains our symbol
+    auto LDI = LogicalDylibs.begin();
+    for (auto LDE = LogicalDylibs.end(); LDI != LDE; ++LDI) {
+      if (auto LMResources =
+            LDI->getLogicalModuleResourcesForSymbol(FuncName, false)) {
+        Module &SrcM = LMResources->SourceModule->getResource();
+        std::string CalledFnName = mangle(FuncName, SrcM.getDataLayout());
+        if (auto Err = LMResources->StubsMgr->updatePointer(CalledFnName,
+                                                            FnBodyAddr))
+          return Err;
+        return Error::success();
+      }
+    }
+    return make_error<JITSymbolNotFound>(FuncName);
+  }
+
+private:
+  Error addLogicalModule(LogicalDylib &LD, std::unique_ptr<Module> SrcMPtr) {
+
+    // Rename all static functions / globals to $static.X :
+    // This will unique the names across all modules in the logical dylib,
+    // simplifying symbol lookup.
+    LD.StaticRenamer.rename(*SrcMPtr);
+
+    // Bump the linkage and rename any anonymous/privote members in SrcM to
+    // ensure that everything will resolve properly after we partition SrcM.
+    makeAllSymbolsExternallyAccessible(*SrcMPtr);
+
+    // Create a logical module handle for SrcM within the logical dylib.
+    Module &SrcM = *SrcMPtr;
+    auto LMId = LD.addSourceModule(std::move(SrcMPtr));
+
+    // Create stub functions.
+    const DataLayout &DL = SrcM.getDataLayout();
+    {
+      typename IndirectStubsMgrT::StubInitsMap StubInits;
+      for (auto &F : SrcM) {
+        // Skip declarations.
+        if (F.isDeclaration())
+          continue;
+
+        // Skip weak functions for which we already have definitions.
+        auto MangledName = mangle(F.getName(), DL);
+        if (F.hasWeakLinkage() || F.hasLinkOnceLinkage()) {
+          if (auto Sym = LD.findSymbol(BaseLayer, MangledName, false))
+            continue;
+          else if (auto Err = Sym.takeError())
+            return std::move(Err);
+        }
+
+        // Record all functions defined by this module.
+        if (CloneStubsIntoPartitions)
+          LD.getStubsToClone(LMId).insert(&F);
+
+        // Create a callback, associate it with the stub for the function,
+        // and set the compile action to compile the partition containing the
+        // function.
+        if (auto CCInfoOrErr = CompileCallbackMgr.getCompileCallback()) {
+          auto &CCInfo = *CCInfoOrErr;
+          StubInits[MangledName] =
+            std::make_pair(CCInfo.getAddress(),
+                           JITSymbolFlags::fromGlobalValue(F));
+          CCInfo.setCompileAction([this, &LD, LMId, &F]() -> JITTargetAddress {
+              if (auto FnImplAddrOrErr = this->extractAndCompile(LD, LMId, F))
+                return *FnImplAddrOrErr;
+              else {
+                // FIXME: Report error, return to 'abort' or something similar.
+                consumeError(FnImplAddrOrErr.takeError());
+                return 0;
+              }
+            });
+        } else
+          return CCInfoOrErr.takeError();
+      }
+
+      if (auto Err = LD.StubsMgr->createStubs(StubInits))
+        return Err;
+    }
+
+    // If this module doesn't contain any globals, aliases, or module flags then
+    // we can bail out early and avoid the overhead of creating and managing an
+    // empty globals module.
+    if (SrcM.global_empty() && SrcM.alias_empty() &&
+        !SrcM.getModuleFlagsMetadata())
+      return Error::success();
+
+    // Create the GlobalValues module.
+    auto GVsM = llvm::make_unique<Module>((SrcM.getName() + ".globals").str(),
+                                          SrcM.getContext());
+    GVsM->setDataLayout(DL);
+
+    ValueToValueMapTy VMap;
+
+    // Clone global variable decls.
+    for (auto &GV : SrcM.globals())
+      if (!GV.isDeclaration() && !VMap.count(&GV))
+        cloneGlobalVariableDecl(*GVsM, GV, &VMap);
+
+    // And the aliases.
+    for (auto &A : SrcM.aliases())
+      if (!VMap.count(&A))
+        cloneGlobalAliasDecl(*GVsM, A, VMap);
+
+    // Clone the module flags.
+    cloneModuleFlagsMetadata(*GVsM, SrcM, VMap);
+
+    // Now we need to clone the GV and alias initializers.
+
+    // Initializers may refer to functions declared (but not defined) in this
+    // module. Build a materializer to clone decls on demand.
+    Error MaterializerErrors = Error::success();
+    auto Materializer = createLambdaMaterializer(
+      [&LD, &GVsM, &MaterializerErrors](Value *V) -> Value* {
+        if (auto *F = dyn_cast<Function>(V)) {
+          // Decls in the original module just get cloned.
+          if (F->isDeclaration())
+            return cloneFunctionDecl(*GVsM, *F);
+
+          // Definitions in the original module (which we have emitted stubs
+          // for at this point) get turned into a constant alias to the stub
+          // instead.
+          const DataLayout &DL = GVsM->getDataLayout();
+          std::string FName = mangle(F->getName(), DL);
+          unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(F->getType());
+          JITTargetAddress StubAddr = 0;
+
+          // Get the address for the stub. If we encounter an error while
+          // doing so, stash it in the MaterializerErrors variable and use a
+          // null address as a placeholder.
+          if (auto StubSym = LD.StubsMgr->findStub(FName, false)) {
+            if (auto StubAddrOrErr = StubSym.getAddress())
+              StubAddr = *StubAddrOrErr;
+            else
+              MaterializerErrors = joinErrors(std::move(MaterializerErrors),
+                                              StubAddrOrErr.takeError());
+          }
+
+          ConstantInt *StubAddrCI =
+            ConstantInt::get(GVsM->getContext(), APInt(PtrBitWidth, StubAddr));
+          Constant *Init = ConstantExpr::getCast(Instruction::IntToPtr,
+                                                 StubAddrCI, F->getType());
+          return GlobalAlias::create(F->getFunctionType(),
+                                     F->getType()->getAddressSpace(),
+                                     F->getLinkage(), F->getName(),
+                                     Init, GVsM.get());
+        }
+        // else....
+        return nullptr;
+      });
+
+    // Clone the global variable initializers.
+    for (auto &GV : SrcM.globals())
+      if (!GV.isDeclaration())
+        moveGlobalVariableInitializer(GV, VMap, &Materializer);
+
+    // Clone the global alias initializers.
+    for (auto &A : SrcM.aliases()) {
+      auto *NewA = cast<GlobalAlias>(VMap[&A]);
+      assert(NewA && "Alias not cloned?");
+      Value *Init = MapValue(A.getAliasee(), VMap, RF_None, nullptr,
+                             &Materializer);
+      NewA->setAliasee(cast<Constant>(Init));
+    }
+
+    if (MaterializerErrors)
+      return MaterializerErrors;
+
+    // Build a resolver for the globals module and add it to the base layer.
+    auto LegacyLookup = [this, &LD](const std::string &Name) -> JITSymbol {
+      if (auto Sym = LD.StubsMgr->findStub(Name, false))
+        return Sym;
+      else if (auto Err = Sym.takeError())
+        return std::move(Err);
+
+      if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
+        return Sym;
+      else if (auto Err = Sym.takeError())
+        return std::move(Err);
+
+      return nullptr;
+    };
+
+    auto GVsResolver = createSymbolResolver(
+        [&LD, LegacyLookup](SymbolFlagsMap &SymbolFlags,
+                            const SymbolNameSet &Symbols) {
+          auto NotFoundViaLegacyLookup =
+              lookupFlagsWithLegacyFn(SymbolFlags, Symbols, LegacyLookup);
+
+          if (!NotFoundViaLegacyLookup) {
+            logAllUnhandledErrors(NotFoundViaLegacyLookup.takeError(), errs(),
+                                  "CODLayer/GVsResolver flags lookup failed: ");
+            SymbolFlags.clear();
+            return SymbolNameSet();
+          }
+
+          return LD.BackingResolver->lookupFlags(SymbolFlags,
+                                                 *NotFoundViaLegacyLookup);
+        },
+        [&LD, LegacyLookup](std::shared_ptr<AsynchronousSymbolQuery> Query,
+                            SymbolNameSet Symbols) {
+          auto NotFoundViaLegacyLookup =
+              lookupWithLegacyFn(*Query, Symbols, LegacyLookup);
+          return LD.BackingResolver->lookup(Query, NotFoundViaLegacyLookup);
+        });
+
+    SetSymbolResolver(LD.K, std::move(GVsResolver));
+
+    if (auto Err = BaseLayer.addModule(LD.K, std::move(GVsM)))
+      return Err;
+
+    LD.BaseLayerVModuleKeys.push_back(LD.K);
+
+    return Error::success();
+  }
+
+  static std::string mangle(StringRef Name, const DataLayout &DL) {
+    std::string MangledName;
+    {
+      raw_string_ostream MangledNameStream(MangledName);
+      Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+    }
+    return MangledName;
+  }
+
+  Expected<JITTargetAddress>
+  extractAndCompile(LogicalDylib &LD,
+                    typename LogicalDylib::SourceModuleHandle LMId,
+                    Function &F) {
+    Module &SrcM = LD.getSourceModule(LMId);
+
+    // If F is a declaration we must already have compiled it.
+    if (F.isDeclaration())
+      return 0;
+
+    // Grab the name of the function being called here.
+    std::string CalledFnName = mangle(F.getName(), SrcM.getDataLayout());
+
+    JITTargetAddress CalledAddr = 0;
+    auto Part = Partition(F);
+    if (auto PartKeyOrErr = emitPartition(LD, LMId, Part)) {
+      auto &PartKey = *PartKeyOrErr;
+      for (auto *SubF : Part) {
+        std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
+        if (auto FnBodySym = BaseLayer.findSymbolIn(PartKey, FnName, false)) {
+          if (auto FnBodyAddrOrErr = FnBodySym.getAddress()) {
+            JITTargetAddress FnBodyAddr = *FnBodyAddrOrErr;
+
+            // If this is the function we're calling record the address so we can
+            // return it from this function.
+            if (SubF == &F)
+              CalledAddr = FnBodyAddr;
+
+            // Update the function body pointer for the stub.
+            if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
+              return 0;
+
+          } else
+            return FnBodyAddrOrErr.takeError();
+        } else if (auto Err = FnBodySym.takeError())
+          return std::move(Err);
+        else
+          llvm_unreachable("Function not emitted for partition");
+      }
+
+      LD.BaseLayerVModuleKeys.push_back(PartKey);
+    } else
+      return PartKeyOrErr.takeError();
+
+    return CalledAddr;
+  }
+
+  template <typename PartitionT>
+  Expected<VModuleKey>
+  emitPartition(LogicalDylib &LD,
+                typename LogicalDylib::SourceModuleHandle LMId,
+                const PartitionT &Part) {
+    Module &SrcM = LD.getSourceModule(LMId);
+
+    // Create the module.
+    std::string NewName = SrcM.getName();
+    for (auto *F : Part) {
+      NewName += ".";
+      NewName += F->getName();
+    }
+
+    auto M = llvm::make_unique<Module>(NewName, SrcM.getContext());
+    M->setDataLayout(SrcM.getDataLayout());
+    ValueToValueMapTy VMap;
+
+    auto Materializer = createLambdaMaterializer([&LD, &LMId,
+                                                  &M](Value *V) -> Value * {
+      if (auto *GV = dyn_cast<GlobalVariable>(V))
+        return cloneGlobalVariableDecl(*M, *GV);
+
+      if (auto *F = dyn_cast<Function>(V)) {
+        // Check whether we want to clone an available_externally definition.
+        if (!LD.getStubsToClone(LMId).count(F))
+          return cloneFunctionDecl(*M, *F);
+
+        // Ok - we want an inlinable stub. For that to work we need a decl
+        // for the stub pointer.
+        auto *StubPtr = createImplPointer(*F->getType(), *M,
+                                          F->getName() + "$stub_ptr", nullptr);
+        auto *ClonedF = cloneFunctionDecl(*M, *F);
+        makeStub(*ClonedF, *StubPtr);
+        ClonedF->setLinkage(GlobalValue::AvailableExternallyLinkage);
+        ClonedF->addFnAttr(Attribute::AlwaysInline);
+        return ClonedF;
+      }
+
+      if (auto *A = dyn_cast<GlobalAlias>(V)) {
+        auto *Ty = A->getValueType();
+        if (Ty->isFunctionTy())
+          return Function::Create(cast<FunctionType>(Ty),
+                                  GlobalValue::ExternalLinkage, A->getName(),
+                                  M.get());
+
+        return new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
+                                  nullptr, A->getName(), nullptr,
+                                  GlobalValue::NotThreadLocal,
+                                  A->getType()->getAddressSpace());
+      }
+
+      return nullptr;
+    });
+
+    // Create decls in the new module.
+    for (auto *F : Part)
+      cloneFunctionDecl(*M, *F, &VMap);
+
+    // Move the function bodies.
+    for (auto *F : Part)
+      moveFunctionBody(*F, VMap, &Materializer);
+
+    auto K = ES.allocateVModule();
+
+    auto LegacyLookup = [this, &LD](const std::string &Name) -> JITSymbol {
+      return LD.findSymbol(BaseLayer, Name, false);
+    };
+
+    // Create memory manager and symbol resolver.
+    auto Resolver = createSymbolResolver(
+        [&LD, LegacyLookup](SymbolFlagsMap &SymbolFlags,
+                            const SymbolNameSet &Symbols) {
+          auto NotFoundViaLegacyLookup =
+              lookupFlagsWithLegacyFn(SymbolFlags, Symbols, LegacyLookup);
+          if (!NotFoundViaLegacyLookup) {
+            logAllUnhandledErrors(NotFoundViaLegacyLookup.takeError(), errs(),
+                                  "CODLayer/SubResolver flags lookup failed: ");
+            SymbolFlags.clear();
+            return SymbolNameSet();
+          }
+          return LD.BackingResolver->lookupFlags(SymbolFlags,
+                                                 *NotFoundViaLegacyLookup);
+        },
+        [&LD, LegacyLookup](std::shared_ptr<AsynchronousSymbolQuery> Q,
+                            SymbolNameSet Symbols) {
+          auto NotFoundViaLegacyLookup =
+              lookupWithLegacyFn(*Q, Symbols, LegacyLookup);
+          return LD.BackingResolver->lookup(Q,
+                                            std::move(NotFoundViaLegacyLookup));
+        });
+    SetSymbolResolver(K, std::move(Resolver));
+
+    if (auto Err = BaseLayer.addModule(std::move(K), std::move(M)))
+      return std::move(Err);
+
+    return K;
+  }
+
+  ExecutionSession &ES;
+  BaseLayerT &BaseLayer;
+  SymbolResolverGetter GetSymbolResolver;
+  SymbolResolverSetter SetSymbolResolver;
+  PartitioningFtor Partition;
+  CompileCallbackMgrT &CompileCallbackMgr;
+  IndirectStubsManagerBuilderT CreateIndirectStubsManager;
+
+  std::map<VModuleKey, LogicalDylib> LogicalDylibs;
+  bool CloneStubsIntoPartitions;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileUtils.h
new file mode 100644
index 0000000..a8050ff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileUtils.h
@@ -0,0 +1,121 @@
+//===- CompileUtils.h - Utilities for compiling IR in the JIT ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for compiling IR to object files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <memory>
+
+namespace llvm {
+
+class MCContext;
+class Module;
+
+namespace orc {
+
+/// @brief Simple compile functor: Takes a single IR module and returns an
+///        ObjectFile.
+class SimpleCompiler {
+private:
+  class SmallVectorMemoryBuffer : public MemoryBuffer {
+  public:
+    SmallVectorMemoryBuffer(SmallVector<char, 0> Buffer)
+        : Buffer(std::move(Buffer)) {
+      init(this->Buffer.data(), this->Buffer.data() + this->Buffer.size(),
+           false);
+    }
+
+    BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+
+  private:
+    SmallVector<char, 0> Buffer;
+  };
+
+public:
+  using CompileResult = std::unique_ptr<MemoryBuffer>;
+
+  /// @brief Construct a simple compile functor with the given target.
+  SimpleCompiler(TargetMachine &TM, ObjectCache *ObjCache = nullptr)
+    : TM(TM), ObjCache(ObjCache) {}
+
+  /// @brief Set an ObjectCache to query before compiling.
+  void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
+
+  /// @brief Compile a Module to an ObjectFile.
+  CompileResult operator()(Module &M) {
+    CompileResult CachedObject = tryToLoadFromObjectCache(M);
+    if (CachedObject)
+      return CachedObject;
+
+    SmallVector<char, 0> ObjBufferSV;
+
+    {
+      raw_svector_ostream ObjStream(ObjBufferSV);
+
+      legacy::PassManager PM;
+      MCContext *Ctx;
+      if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+        llvm_unreachable("Target does not support MC emission.");
+      PM.run(M);
+    }
+
+    auto ObjBuffer =
+        llvm::make_unique<SmallVectorMemoryBuffer>(std::move(ObjBufferSV));
+    auto Obj =
+        object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+
+    if (Obj) {
+      notifyObjectCompiled(M, *ObjBuffer);
+      return std::move(ObjBuffer);
+    }
+
+    // TODO: Actually report errors helpfully.
+    consumeError(Obj.takeError());
+    return nullptr;
+  }
+
+private:
+
+  CompileResult tryToLoadFromObjectCache(const Module &M) {
+    if (!ObjCache)
+      return CompileResult();
+
+    return ObjCache->getObject(&M);
+  }
+
+  void notifyObjectCompiled(const Module &M, const MemoryBuffer &ObjBuffer) {
+    if (ObjCache)
+      ObjCache->notifyObjectCompiled(&M, ObjBuffer.getMemBufferRef());
+  }
+
+  TargetMachine &TM;
+  ObjectCache *ObjCache = nullptr;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Core.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Core.h
new file mode 100644
index 0000000..26fec8b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Core.h
@@ -0,0 +1,394 @@
+//===------ Core.h -- Core ORC APIs (Layer, JITDylib, etc.) -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains core ORC APIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_CORE_H
+#define LLVM_EXECUTIONENGINE_ORC_CORE_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+/// VModuleKey provides a unique identifier (allocated and managed by
+/// ExecutionSessions) for a module added to the JIT.
+using VModuleKey = uint64_t;
+
+class VSO;
+
+/// @brief A set of symbol names (represented by SymbolStringPtrs for
+//         efficiency).
+using SymbolNameSet = std::set<SymbolStringPtr>;
+
+/// @brief A map from symbol names (as SymbolStringPtrs) to JITSymbols
+///        (address/flags pairs).
+using SymbolMap = std::map<SymbolStringPtr, JITEvaluatedSymbol>;
+
+/// @brief A map from symbol names (as SymbolStringPtrs) to JITSymbolFlags.
+using SymbolFlagsMap = std::map<SymbolStringPtr, JITSymbolFlags>;
+
+/// @brief A symbol query that returns results via a callback when results are
+///        ready.
+///
+/// makes a callback when all symbols are available.
+class AsynchronousSymbolQuery {
+public:
+  /// @brief Callback to notify client that symbols have been resolved.
+  using SymbolsResolvedCallback = std::function<void(Expected<SymbolMap>)>;
+
+  /// @brief Callback to notify client that symbols are ready for execution.
+  using SymbolsReadyCallback = std::function<void(Error)>;
+
+  /// @brief Create a query for the given symbols, notify-resolved and
+  ///        notify-ready callbacks.
+  AsynchronousSymbolQuery(const SymbolNameSet &Symbols,
+                          SymbolsResolvedCallback NotifySymbolsResolved,
+                          SymbolsReadyCallback NotifySymbolsReady);
+
+  /// @brief Notify client that the query failed.
+  ///
+  /// If the notify-resolved callback has not been made yet, then it is called
+  /// with the given error, and the notify-finalized callback is never made.
+  ///
+  /// If the notify-resolved callback has already been made then then the
+  /// notify-finalized callback is called with the given error.
+  ///
+  /// It is illegal to call setFailed after both callbacks have been made.
+  void setFailed(Error Err);
+
+  /// @brief Set the resolved symbol information for the given symbol name.
+  ///
+  /// If this symbol was the last one not resolved, this will trigger a call to
+  /// the notify-finalized callback passing the completed sybol map.
+  void setDefinition(SymbolStringPtr Name, JITEvaluatedSymbol Sym);
+
+  /// @brief Notify the query that a requested symbol is ready for execution.
+  ///
+  /// This decrements the query's internal count of not-yet-ready symbols. If
+  /// this call to notifySymbolFinalized sets the counter to zero, it will call
+  /// the notify-finalized callback with Error::success as the value.
+  void notifySymbolFinalized();
+
+private:
+  SymbolMap Symbols;
+  size_t OutstandingResolutions = 0;
+  size_t OutstandingFinalizations = 0;
+  SymbolsResolvedCallback NotifySymbolsResolved;
+  SymbolsReadyCallback NotifySymbolsReady;
+};
+
+/// @brief SymbolResolver is a composable interface for looking up symbol flags
+///        and addresses using the AsynchronousSymbolQuery type. It will
+///        eventually replace the LegacyJITSymbolResolver interface as the
+///        stardard ORC symbol resolver type.
+class SymbolResolver {
+public:
+  virtual ~SymbolResolver() = default;
+
+  /// @brief Returns the flags for each symbol in Symbols that can be found,
+  ///        along with the set of symbol that could not be found.
+  virtual SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+                                    const SymbolNameSet &Symbols) = 0;
+
+  /// @brief For each symbol in Symbols that can be found, assigns that symbols
+  ///        value in Query. Returns the set of symbols that could not be found.
+  virtual SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+                               SymbolNameSet Symbols) = 0;
+
+private:
+  virtual void anchor();
+};
+
+/// @brief Implements SymbolResolver with a pair of supplied function objects
+///        for convenience. See createSymbolResolver.
+template <typename LookupFlagsFn, typename LookupFn>
+class LambdaSymbolResolver final : public SymbolResolver {
+public:
+  template <typename LookupFlagsFnRef, typename LookupFnRef>
+  LambdaSymbolResolver(LookupFlagsFnRef &&LookupFlags, LookupFnRef &&Lookup)
+      : LookupFlags(std::forward<LookupFlagsFnRef>(LookupFlags)),
+        Lookup(std::forward<LookupFnRef>(Lookup)) {}
+
+  SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+                            const SymbolNameSet &Symbols) final {
+    return LookupFlags(Flags, Symbols);
+  }
+
+  SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+                       SymbolNameSet Symbols) final {
+    return Lookup(std::move(Query), std::move(Symbols));
+  }
+
+private:
+  LookupFlagsFn LookupFlags;
+  LookupFn Lookup;
+};
+
+/// @brief Creates a SymbolResolver implementation from the pair of supplied
+///        function objects.
+template <typename LookupFlagsFn, typename LookupFn>
+std::unique_ptr<LambdaSymbolResolver<
+    typename std::remove_cv<
+        typename std::remove_reference<LookupFlagsFn>::type>::type,
+    typename std::remove_cv<
+        typename std::remove_reference<LookupFn>::type>::type>>
+createSymbolResolver(LookupFlagsFn &&LookupFlags, LookupFn &&Lookup) {
+  using LambdaSymbolResolverImpl = LambdaSymbolResolver<
+      typename std::remove_cv<
+          typename std::remove_reference<LookupFlagsFn>::type>::type,
+      typename std::remove_cv<
+          typename std::remove_reference<LookupFn>::type>::type>;
+  return llvm::make_unique<LambdaSymbolResolverImpl>(
+      std::forward<LookupFlagsFn>(LookupFlags), std::forward<LookupFn>(Lookup));
+}
+
+/// @brief A MaterializationUnit represents a set of symbol definitions that can
+///        be materialized as a group, or individually discarded (when
+///        overriding definitions are encountered).
+///
+/// MaterializationUnits are used when providing lazy definitions of symbols to
+/// VSOs. The VSO will call materialize when the address of a symbol is
+/// requested via the lookup method. The VSO will call discard if a stronger
+/// definition is added or already present.
+class MaterializationUnit {
+public:
+  virtual ~MaterializationUnit() {}
+
+  /// @brief Return the set of symbols that this source provides.
+  virtual SymbolFlagsMap getSymbols() = 0;
+
+  /// @brief Implementations of this method should materialize all symbols
+  ///        in the materialzation unit, except for those that have been
+  ///        previously discarded.
+  virtual Error materialize(VSO &V) = 0;
+
+  /// @brief Implementations of this method should discard the given symbol
+  ///        from the source (e.g. if the source is an LLVM IR Module and the
+  ///        symbol is a function, delete the function body or mark it available
+  ///        externally).
+  virtual void discard(VSO &V, SymbolStringPtr Name) = 0;
+
+private:
+  virtual void anchor();
+};
+
+/// @brief Represents a dynamic linkage unit in a JIT process.
+///
+/// VSO acts as a symbol table (symbol definitions can be set and the dylib
+/// queried to find symbol addresses) and as a key for tracking resources
+/// (since a VSO's address is fixed).
+class VSO {
+  friend class ExecutionSession;
+
+public:
+  enum RelativeLinkageStrength {
+    NewDefinitionIsStronger,
+    DuplicateDefinition,
+    ExistingDefinitionIsStronger
+  };
+
+  using SetDefinitionsResult =
+      std::map<SymbolStringPtr, RelativeLinkageStrength>;
+
+  using MaterializationUnitList =
+      std::vector<std::unique_ptr<MaterializationUnit>>;
+
+  struct LookupResult {
+    MaterializationUnitList MaterializationUnits;
+    SymbolNameSet UnresolvedSymbols;
+  };
+
+  VSO() = default;
+
+  VSO(const VSO &) = delete;
+  VSO &operator=(const VSO &) = delete;
+  VSO(VSO &&) = delete;
+  VSO &operator=(VSO &&) = delete;
+
+  /// @brief Compare new linkage with existing linkage.
+  static RelativeLinkageStrength
+  compareLinkage(Optional<JITSymbolFlags> OldFlags, JITSymbolFlags NewFlags);
+
+  /// @brief Compare new linkage with an existing symbol's linkage.
+  RelativeLinkageStrength compareLinkage(SymbolStringPtr Name,
+                                         JITSymbolFlags NewFlags) const;
+
+  /// @brief Adds the given symbols to the mapping as resolved, finalized
+  ///        symbols.
+  ///
+  /// FIXME: We can take this by const-ref once symbol-based laziness is
+  ///        removed.
+  Error define(SymbolMap NewSymbols);
+
+  /// @brief Adds the given symbols to the mapping as lazy symbols.
+  Error defineLazy(std::unique_ptr<MaterializationUnit> Source);
+
+  /// @brief Add the given symbol/address mappings to the dylib, but do not
+  ///        mark the symbols as finalized yet.
+  void resolve(SymbolMap SymbolValues);
+
+  /// @brief Finalize the given symbols.
+  void finalize(SymbolNameSet SymbolsToFinalize);
+
+  /// @brief Look up the flags for the given symbols.
+  ///
+  /// Returns the flags for the give symbols, together with the set of symbols
+  /// not found.
+  SymbolNameSet lookupFlags(SymbolFlagsMap &Flags, SymbolNameSet Symbols);
+
+  /// @brief Apply the given query to the given symbols in this VSO.
+  ///
+  /// For symbols in this VSO that have already been materialized, their address
+  /// will be set in the query immediately.
+  ///
+  /// For symbols in this VSO that have not been materialized, the query will be
+  /// recorded and the source for those symbols (plus the set of symbols to be
+  /// materialized by that source) will be returned as the MaterializationWork
+  /// field of the LookupResult.
+  ///
+  /// Any symbols not found in this VSO will be returned in the
+  /// UnresolvedSymbols field of the LookupResult.
+  LookupResult lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+                      SymbolNameSet Symbols);
+
+private:
+  class MaterializationInfo {
+  public:
+    using QueryList = std::vector<std::shared_ptr<AsynchronousSymbolQuery>>;
+
+    MaterializationInfo(size_t SymbolsRemaining,
+                        std::unique_ptr<MaterializationUnit> MU);
+
+    uint64_t SymbolsRemaining;
+    std::unique_ptr<MaterializationUnit> MU;
+    SymbolMap Symbols;
+    std::map<SymbolStringPtr, QueryList> PendingResolution;
+    std::map<SymbolStringPtr, QueryList> PendingFinalization;
+  };
+
+  using MaterializationInfoSet = std::set<std::unique_ptr<MaterializationInfo>>;
+
+  using MaterializationInfoIterator = MaterializationInfoSet::iterator;
+
+  class SymbolTableEntry {
+  public:
+    SymbolTableEntry(JITSymbolFlags SymbolFlags,
+                     MaterializationInfoIterator MaterializationInfoItr);
+    SymbolTableEntry(JITEvaluatedSymbol Sym);
+    SymbolTableEntry(SymbolTableEntry &&Other);
+    ~SymbolTableEntry();
+
+    SymbolTableEntry &operator=(JITEvaluatedSymbol Sym);
+
+    JITSymbolFlags getFlags() const;
+    void replaceWith(VSO &V, SymbolStringPtr Name, JITSymbolFlags Flags,
+                     MaterializationInfoIterator NewMaterializationInfoItr);
+    std::unique_ptr<MaterializationUnit>
+    query(SymbolStringPtr Name, std::shared_ptr<AsynchronousSymbolQuery> Query);
+    void resolve(VSO &V, SymbolStringPtr Name, JITEvaluatedSymbol Sym);
+    void finalize(VSO &V, SymbolStringPtr Name);
+    void discard(VSO &V, SymbolStringPtr Name);
+
+  private:
+    void destroy();
+
+    JITSymbolFlags Flags;
+    MaterializationInfoIterator MII;
+    union {
+      JITTargetAddress Address;
+      MaterializationInfoIterator MaterializationInfoItr;
+    };
+  };
+
+  std::map<SymbolStringPtr, SymbolTableEntry> Symbols;
+  MaterializationInfoSet MaterializationInfos;
+};
+
+/// @brief An ExecutionSession represents a running JIT program.
+class ExecutionSession {
+public:
+  using ErrorReporter = std::function<void(Error)>;
+
+  /// @brief Construct an ExecutionEngine.
+  ///
+  /// SymbolStringPools may be shared between ExecutionSessions.
+  ExecutionSession(SymbolStringPool &SSP);
+
+  /// @brief Returns the SymbolStringPool for this ExecutionSession.
+  SymbolStringPool &getSymbolStringPool() const { return SSP; }
+
+  /// @brief Set the error reporter function.
+  void setErrorReporter(ErrorReporter ReportError) {
+    this->ReportError = std::move(ReportError);
+  }
+
+  /// @brief Report a error for this execution session.
+  ///
+  /// Unhandled errors can be sent here to log them.
+  void reportError(Error Err) { ReportError(std::move(Err)); }
+
+  /// @brief Allocate a module key for a new module to add to the JIT.
+  VModuleKey allocateVModule();
+
+  /// @brief Return a module key to the ExecutionSession so that it can be
+  ///        re-used. This should only be done once all resources associated
+  ////       with the original key have been released.
+  void releaseVModule(VModuleKey Key);
+
+public:
+  static void logErrorsToStdErr(Error Err);
+
+  SymbolStringPool &SSP;
+  VModuleKey LastKey = 0;
+  ErrorReporter ReportError = logErrorsToStdErr;
+};
+
+/// Runs Materializers on the current thread and reports errors to the given
+/// ExecutionSession.
+class MaterializeOnCurrentThread {
+public:
+  MaterializeOnCurrentThread(ExecutionSession &ES) : ES(ES) {}
+
+  void operator()(VSO &V, std::unique_ptr<MaterializationUnit> MU) {
+    if (auto Err = MU->materialize(V))
+      ES.reportError(std::move(Err));
+  }
+
+private:
+  ExecutionSession &ES;
+};
+
+/// Materialization function object wrapper for the lookup method.
+using MaterializationDispatcher =
+    std::function<void(VSO &V, std::unique_ptr<MaterializationUnit> S)>;
+
+/// @brief Look up a set of symbols by searching a list of VSOs.
+///
+/// All VSOs in the list should be non-null.
+Expected<SymbolMap> lookup(const std::vector<VSO *> &VSOs, SymbolNameSet Names,
+                           MaterializationDispatcher DispatchMaterialization);
+
+/// @brief Look up a symbol by searching a list of VSOs.
+Expected<JITEvaluatedSymbol>
+lookup(const std::vector<VSO *> VSOs, SymbolStringPtr Name,
+       MaterializationDispatcher DispatchMaterialization);
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_CORE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
new file mode 100644
index 0000000..d466df8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -0,0 +1,190 @@
+//===- ExecutionUtils.h - Utilities for executing code in Orc ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for executing code in Orc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class ConstantArray;
+class GlobalVariable;
+class Function;
+class Module;
+class Value;
+
+namespace orc {
+
+/// @brief This iterator provides a convenient way to iterate over the elements
+///        of an llvm.global_ctors/llvm.global_dtors instance.
+///
+///   The easiest way to get hold of instances of this class is to use the
+/// getConstructors/getDestructors functions.
+class CtorDtorIterator {
+public:
+  /// @brief Accessor for an element of the global_ctors/global_dtors array.
+  ///
+  ///   This class provides a read-only view of the element with any casts on
+  /// the function stripped away.
+  struct Element {
+    Element(unsigned Priority, Function *Func, Value *Data)
+      : Priority(Priority), Func(Func), Data(Data) {}
+
+    unsigned Priority;
+    Function *Func;
+    Value *Data;
+  };
+
+  /// @brief Construct an iterator instance. If End is true then this iterator
+  ///        acts as the end of the range, otherwise it is the beginning.
+  CtorDtorIterator(const GlobalVariable *GV, bool End);
+
+  /// @brief Test iterators for equality.
+  bool operator==(const CtorDtorIterator &Other) const;
+
+  /// @brief Test iterators for inequality.
+  bool operator!=(const CtorDtorIterator &Other) const;
+
+  /// @brief Pre-increment iterator.
+  CtorDtorIterator& operator++();
+
+  /// @brief Post-increment iterator.
+  CtorDtorIterator operator++(int);
+
+  /// @brief Dereference iterator. The resulting value provides a read-only view
+  ///        of this element of the global_ctors/global_dtors list.
+  Element operator*() const;
+
+private:
+  const ConstantArray *InitList;
+  unsigned I;
+};
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+///        array.
+iterator_range<CtorDtorIterator> getConstructors(const Module &M);
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+///        array.
+iterator_range<CtorDtorIterator> getDestructors(const Module &M);
+
+/// @brief Convenience class for recording constructor/destructor names for
+///        later execution.
+template <typename JITLayerT>
+class CtorDtorRunner {
+public:
+  /// @brief Construct a CtorDtorRunner for the given range using the given
+  ///        name mangling function.
+  CtorDtorRunner(std::vector<std::string> CtorDtorNames, VModuleKey K)
+      : CtorDtorNames(std::move(CtorDtorNames)), K(K) {}
+
+  /// @brief Run the recorded constructors/destructors through the given JIT
+  ///        layer.
+  Error runViaLayer(JITLayerT &JITLayer) const {
+    using CtorDtorTy = void (*)();
+
+    for (const auto &CtorDtorName : CtorDtorNames) {
+      if (auto CtorDtorSym = JITLayer.findSymbolIn(K, CtorDtorName, false)) {
+        if (auto AddrOrErr = CtorDtorSym.getAddress()) {
+          CtorDtorTy CtorDtor =
+            reinterpret_cast<CtorDtorTy>(static_cast<uintptr_t>(*AddrOrErr));
+          CtorDtor();
+        } else
+          return AddrOrErr.takeError();
+      } else {
+        if (auto Err = CtorDtorSym.takeError())
+          return Err;
+        else
+          return make_error<JITSymbolNotFound>(CtorDtorName);
+      }
+    }
+    return Error::success();
+  }
+
+private:
+  std::vector<std::string> CtorDtorNames;
+  orc::VModuleKey K;
+};
+
+/// @brief Support class for static dtor execution. For hosted (in-process) JITs
+///        only!
+///
+///   If a __cxa_atexit function isn't found C++ programs that use static
+/// destructors will fail to link. However, we don't want to use the host
+/// process's __cxa_atexit, because it will schedule JIT'd destructors to run
+/// after the JIT has been torn down, which is no good. This class makes it easy
+/// to override __cxa_atexit (and the related __dso_handle).
+///
+///   To use, clients should manually call searchOverrides from their symbol
+/// resolver. This should generally be done after attempting symbol resolution
+/// inside the JIT, but before searching the host process's symbol table. When
+/// the client determines that destructors should be run (generally at JIT
+/// teardown or after a return from main), the runDestructors method should be
+/// called.
+class LocalCXXRuntimeOverrides {
+public:
+  /// Create a runtime-overrides class.
+  template <typename MangleFtorT>
+  LocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
+    addOverride(Mangle("__dso_handle"), toTargetAddress(&DSOHandleOverride));
+    addOverride(Mangle("__cxa_atexit"), toTargetAddress(&CXAAtExitOverride));
+  }
+
+  /// Search overrided symbols.
+  JITEvaluatedSymbol searchOverrides(const std::string &Name) {
+    auto I = CXXRuntimeOverrides.find(Name);
+    if (I != CXXRuntimeOverrides.end())
+      return JITEvaluatedSymbol(I->second, JITSymbolFlags::Exported);
+    return nullptr;
+  }
+
+  /// Run any destructors recorded by the overriden __cxa_atexit function
+  /// (CXAAtExitOverride).
+  void runDestructors();
+
+private:
+  template <typename PtrTy>
+  JITTargetAddress toTargetAddress(PtrTy* P) {
+    return static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(P));
+  }
+
+  void addOverride(const std::string &Name, JITTargetAddress Addr) {
+    CXXRuntimeOverrides.insert(std::make_pair(Name, Addr));
+  }
+
+  StringMap<JITTargetAddress> CXXRuntimeOverrides;
+
+  using DestructorPtr = void (*)(void *);
+  using CXXDestructorDataPair = std::pair<DestructorPtr, void *>;
+  using CXXDestructorDataPairList = std::vector<CXXDestructorDataPair>;
+  CXXDestructorDataPairList DSOHandleOverride;
+  static int CXAAtExitOverride(DestructorPtr Destructor, void *Arg,
+                               void *DSOHandle);
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
new file mode 100644
index 0000000..8a48c36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
@@ -0,0 +1,112 @@
+//===- GlobalMappingLayer.h - Run all IR through a functor ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Convenience layer for injecting symbols that will appear in calls to
+// findSymbol.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include <map>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class Module;
+class JITSymbolResolver;
+
+namespace orc {
+
+/// @brief Global mapping layer.
+///
+///   This layer overrides the findSymbol method to first search a local symbol
+/// table that the client can define. It can be used to inject new symbol
+/// mappings into the JIT. Beware, however: symbols within a single IR module or
+/// object file will still resolve locally (via RuntimeDyld's symbol table) -
+/// such internal references cannot be overriden via this layer.
+template <typename BaseLayerT>
+class GlobalMappingLayer {
+public:
+
+  /// @brief Handle to an added module.
+  using ModuleHandleT = typename BaseLayerT::ModuleHandleT;
+
+  /// @brief Construct an GlobalMappingLayer with the given BaseLayer
+  GlobalMappingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+  /// @brief Add the given module to the JIT.
+  /// @return A handle for the added modules.
+  Expected<ModuleHandleT>
+  addModule(std::shared_ptr<Module> M,
+            std::shared_ptr<JITSymbolResolver> Resolver) {
+    return BaseLayer.addModule(std::move(M), std::move(Resolver));
+  }
+
+  /// @brief Remove the module set associated with the handle H.
+  Error removeModule(ModuleHandleT H) { return BaseLayer.removeModule(H); }
+
+  /// @brief Manually set the address to return for the given symbol.
+  void setGlobalMapping(const std::string &Name, JITTargetAddress Addr) {
+    SymbolTable[Name] = Addr;
+  }
+
+  /// @brief Remove the given symbol from the global mapping.
+  void eraseGlobalMapping(const std::string &Name) {
+    SymbolTable.erase(Name);
+  }
+
+  /// @brief Search for the given named symbol.
+  ///
+  ///          This method will first search the local symbol table, returning
+  ///        any symbol found there. If the symbol is not found in the local
+  ///        table then this call will be passed through to the base layer.
+  ///
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+    auto I = SymbolTable.find(Name);
+    if (I != SymbolTable.end())
+      return JITSymbol(I->second, JITSymbolFlags::Exported);
+    return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Get the address of the given symbol in the context of the of the
+  ///        module represented by the handle H. This call is forwarded to the
+  ///        base layer's implementation.
+  /// @param H The handle for the module to search in.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it is found in the
+  ///         given module.
+  JITSymbol findSymbolIn(ModuleHandleT H, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Immediately emit and finalize the module set represented by the
+  ///        given handle.
+  /// @param H Handle for module set to emit/finalize.
+  Error emitAndFinalize(ModuleHandleT H) {
+    return BaseLayer.emitAndFinalize(H);
+  }
+
+private:
+  BaseLayerT &BaseLayer;
+  std::map<std::string, JITTargetAddress> SymbolTable;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
new file mode 100644
index 0000000..a7f9416
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -0,0 +1,107 @@
+//===- IRCompileLayer.h -- Eagerly compile IR for JIT -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a basic, eagerly compiling layer of the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class Module;
+
+namespace orc {
+
+/// @brief Eager IR compiling layer.
+///
+///   This layer immediately compiles each IR module added via addModule to an
+/// object file and adds this module file to the layer below, which must
+/// implement the object layer concept.
+template <typename BaseLayerT, typename CompileFtor>
+class IRCompileLayer {
+public:
+  /// @brief Callback type for notifications when modules are compiled.
+  using NotifyCompiledCallback =
+      std::function<void(VModuleKey K, std::unique_ptr<Module>)>;
+
+  /// @brief Construct an IRCompileLayer with the given BaseLayer, which must
+  ///        implement the ObjectLayer concept.
+  IRCompileLayer(
+      BaseLayerT &BaseLayer, CompileFtor Compile,
+      NotifyCompiledCallback NotifyCompiled = NotifyCompiledCallback())
+      : BaseLayer(BaseLayer), Compile(std::move(Compile)),
+        NotifyCompiled(std::move(NotifyCompiled)) {}
+
+  /// @brief Get a reference to the compiler functor.
+  CompileFtor& getCompiler() { return Compile; }
+
+  /// @brief (Re)set the NotifyCompiled callback.
+  void setNotifyCompiled(NotifyCompiledCallback NotifyCompiled) {
+    this->NotifyCompiled = std::move(NotifyCompiled);
+  }
+
+  /// @brief Compile the module, and add the resulting object to the base layer
+  ///        along with the given memory manager and symbol resolver.
+  Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+    if (auto Err = BaseLayer.addObject(std::move(K), Compile(*M)))
+      return Err;
+    if (NotifyCompiled)
+      NotifyCompiled(std::move(K), std::move(M));
+    return Error::success();
+  }
+
+  /// @brief Remove the module associated with the VModuleKey K.
+  Error removeModule(VModuleKey K) { return BaseLayer.removeObject(K); }
+
+  /// @brief Search for the given named symbol.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Get the address of the given symbol in compiled module represented
+  ///        by the handle H. This call is forwarded to the base layer's
+  ///        implementation.
+  /// @param K The VModuleKey for the module to search in.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it is found in the
+  ///         given module.
+  JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Immediately emit and finalize the module represented by the given
+  ///        handle.
+  /// @param K The VModuleKey for the module to emit/finalize.
+  Error emitAndFinalize(VModuleKey K) { return BaseLayer.emitAndFinalize(K); }
+
+private:
+  BaseLayerT &BaseLayer;
+  CompileFtor Compile;
+  NotifyCompiledCallback NotifyCompiled;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
new file mode 100644
index 0000000..4f1fe7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -0,0 +1,90 @@
+//===- IRTransformLayer.h - Run all IR through a functor --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all IR passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+class Module;
+namespace orc {
+
+/// @brief IR mutating layer.
+///
+///   This layer applies a user supplied transform to each module that is added,
+/// then adds the transformed module to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class IRTransformLayer {
+public:
+
+  /// @brief Construct an IRTransformLayer with the given BaseLayer
+  IRTransformLayer(BaseLayerT &BaseLayer,
+                   TransformFtor Transform = TransformFtor())
+    : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+  /// @brief Apply the transform functor to the module, then add the module to
+  ///        the layer below, along with the memory manager and symbol resolver.
+  ///
+  /// @return A handle for the added modules.
+  Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+    return BaseLayer.addModule(std::move(K), Transform(std::move(M)));
+  }
+
+  /// @brief Remove the module associated with the VModuleKey K.
+  Error removeModule(VModuleKey K) { return BaseLayer.removeModule(K); }
+
+  /// @brief Search for the given named symbol.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Get the address of the given symbol in the context of the module
+  ///        represented by the VModuleKey K. This call is forwarded to the base
+  ///        layer's implementation.
+  /// @param K The VModuleKey for the module to search in.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it is found in the
+  ///         given module.
+  JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Immediately emit and finalize the module represented by the given
+  ///        VModuleKey.
+  /// @param K The VModuleKey for the module to emit/finalize.
+  Error emitAndFinalize(VModuleKey K) { return BaseLayer.emitAndFinalize(K); }
+
+  /// @brief Access the transform functor directly.
+  TransformFtor& getTransform() { return Transform; }
+
+  /// @brief Access the mumate functor directly.
+  const TransformFtor& getTransform() const { return Transform; }
+
+private:
+  BaseLayerT &BaseLayer;
+  TransformFtor Transform;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
new file mode 100644
index 0000000..029b86a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -0,0 +1,451 @@
+//===- IndirectionUtils.h - Utilities for adding indirections ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for adding indirections and breaking up modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <system_error>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class Function;
+class FunctionType;
+class GlobalAlias;
+class GlobalVariable;
+class Module;
+class PointerType;
+class Triple;
+class Value;
+
+namespace orc {
+
+/// @brief Target-independent base class for compile callback management.
+class JITCompileCallbackManager {
+public:
+  using CompileFtor = std::function<JITTargetAddress()>;
+
+  /// @brief Handle to a newly created compile callback. Can be used to get an
+  ///        IR constant representing the address of the trampoline, and to set
+  ///        the compile action for the callback.
+  class CompileCallbackInfo {
+  public:
+    CompileCallbackInfo(JITTargetAddress Addr, CompileFtor &Compile)
+        : Addr(Addr), Compile(Compile) {}
+
+    JITTargetAddress getAddress() const { return Addr; }
+    void setCompileAction(CompileFtor Compile) {
+      this->Compile = std::move(Compile);
+    }
+
+  private:
+    JITTargetAddress Addr;
+    CompileFtor &Compile;
+  };
+
+  /// @brief Construct a JITCompileCallbackManager.
+  /// @param ErrorHandlerAddress The address of an error handler in the target
+  ///                            process to be used if a compile callback fails.
+  JITCompileCallbackManager(JITTargetAddress ErrorHandlerAddress)
+      : ErrorHandlerAddress(ErrorHandlerAddress) {}
+
+  virtual ~JITCompileCallbackManager() = default;
+
+  /// @brief Execute the callback for the given trampoline id. Called by the JIT
+  ///        to compile functions on demand.
+  JITTargetAddress executeCompileCallback(JITTargetAddress TrampolineAddr) {
+    auto I = ActiveTrampolines.find(TrampolineAddr);
+    // FIXME: Also raise an error in the Orc error-handler when we finally have
+    //        one.
+    if (I == ActiveTrampolines.end())
+      return ErrorHandlerAddress;
+
+    // Found a callback handler. Yank this trampoline out of the active list and
+    // put it back in the available trampolines list, then try to run the
+    // handler's compile and update actions.
+    // Moving the trampoline ID back to the available list first means there's
+    // at
+    // least one available trampoline if the compile action triggers a request
+    // for
+    // a new one.
+    auto Compile = std::move(I->second);
+    ActiveTrampolines.erase(I);
+    AvailableTrampolines.push_back(TrampolineAddr);
+
+    if (auto Addr = Compile())
+      return Addr;
+
+    return ErrorHandlerAddress;
+  }
+
+  /// @brief Reserve a compile callback.
+  Expected<CompileCallbackInfo> getCompileCallback() {
+    if (auto TrampolineAddrOrErr = getAvailableTrampolineAddr()) {
+      const auto &TrampolineAddr = *TrampolineAddrOrErr;
+      auto &Compile = this->ActiveTrampolines[TrampolineAddr];
+      return CompileCallbackInfo(TrampolineAddr, Compile);
+    } else
+      return TrampolineAddrOrErr.takeError();
+  }
+
+  /// @brief Get a CompileCallbackInfo for an existing callback.
+  CompileCallbackInfo getCompileCallbackInfo(JITTargetAddress TrampolineAddr) {
+    auto I = ActiveTrampolines.find(TrampolineAddr);
+    assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+    return CompileCallbackInfo(I->first, I->second);
+  }
+
+  /// @brief Release a compile callback.
+  ///
+  ///   Note: Callbacks are auto-released after they execute. This method should
+  /// only be called to manually release a callback that is not going to
+  /// execute.
+  void releaseCompileCallback(JITTargetAddress TrampolineAddr) {
+    auto I = ActiveTrampolines.find(TrampolineAddr);
+    assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+    ActiveTrampolines.erase(I);
+    AvailableTrampolines.push_back(TrampolineAddr);
+  }
+
+protected:
+  JITTargetAddress ErrorHandlerAddress;
+
+  using TrampolineMapT = std::map<JITTargetAddress, CompileFtor>;
+  TrampolineMapT ActiveTrampolines;
+  std::vector<JITTargetAddress> AvailableTrampolines;
+
+private:
+  Expected<JITTargetAddress> getAvailableTrampolineAddr() {
+    if (this->AvailableTrampolines.empty())
+      if (auto Err = grow())
+        return std::move(Err);
+    assert(!this->AvailableTrampolines.empty() &&
+           "Failed to grow available trampolines.");
+    JITTargetAddress TrampolineAddr = this->AvailableTrampolines.back();
+    this->AvailableTrampolines.pop_back();
+    return TrampolineAddr;
+  }
+
+  // Create new trampolines - to be implemented in subclasses.
+  virtual Error grow() = 0;
+
+  virtual void anchor();
+};
+
+/// @brief Manage compile callbacks for in-process JITs.
+template <typename TargetT>
+class LocalJITCompileCallbackManager : public JITCompileCallbackManager {
+public:
+  /// @brief Construct a InProcessJITCompileCallbackManager.
+  /// @param ErrorHandlerAddress The address of an error handler in the target
+  ///                            process to be used if a compile callback fails.
+  LocalJITCompileCallbackManager(JITTargetAddress ErrorHandlerAddress)
+      : JITCompileCallbackManager(ErrorHandlerAddress) {
+    /// Set up the resolver block.
+    std::error_code EC;
+    ResolverBlock = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+        TargetT::ResolverCodeSize, nullptr,
+        sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+    assert(!EC && "Failed to allocate resolver block");
+
+    TargetT::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
+                               &reenter, this);
+
+    EC = sys::Memory::protectMappedMemory(ResolverBlock.getMemoryBlock(),
+                                          sys::Memory::MF_READ |
+                                              sys::Memory::MF_EXEC);
+    assert(!EC && "Failed to mprotect resolver block");
+  }
+
+private:
+  static JITTargetAddress reenter(void *CCMgr, void *TrampolineId) {
+    JITCompileCallbackManager *Mgr =
+        static_cast<JITCompileCallbackManager *>(CCMgr);
+    return Mgr->executeCompileCallback(
+        static_cast<JITTargetAddress>(
+            reinterpret_cast<uintptr_t>(TrampolineId)));
+  }
+
+  Error grow() override {
+    assert(this->AvailableTrampolines.empty() && "Growing prematurely?");
+
+    std::error_code EC;
+    auto TrampolineBlock =
+        sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+            sys::Process::getPageSize(), nullptr,
+            sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+    if (EC)
+      return errorCodeToError(EC);
+
+    unsigned NumTrampolines =
+        (sys::Process::getPageSize() - TargetT::PointerSize) /
+        TargetT::TrampolineSize;
+
+    uint8_t *TrampolineMem = static_cast<uint8_t *>(TrampolineBlock.base());
+    TargetT::writeTrampolines(TrampolineMem, ResolverBlock.base(),
+                              NumTrampolines);
+
+    for (unsigned I = 0; I < NumTrampolines; ++I)
+      this->AvailableTrampolines.push_back(
+          static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(
+              TrampolineMem + (I * TargetT::TrampolineSize))));
+
+    if (auto EC = sys::Memory::protectMappedMemory(
+                    TrampolineBlock.getMemoryBlock(),
+                    sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+      return errorCodeToError(EC);
+
+    TrampolineBlocks.push_back(std::move(TrampolineBlock));
+    return Error::success();
+  }
+
+  sys::OwningMemoryBlock ResolverBlock;
+  std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+};
+
+/// @brief Base class for managing collections of named indirect stubs.
+class IndirectStubsManager {
+public:
+  /// @brief Map type for initializing the manager. See init.
+  using StubInitsMap = StringMap<std::pair<JITTargetAddress, JITSymbolFlags>>;
+
+  virtual ~IndirectStubsManager() = default;
+
+  /// @brief Create a single stub with the given name, target address and flags.
+  virtual Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+                           JITSymbolFlags StubFlags) = 0;
+
+  /// @brief Create StubInits.size() stubs with the given names, target
+  ///        addresses, and flags.
+  virtual Error createStubs(const StubInitsMap &StubInits) = 0;
+
+  /// @brief Find the stub with the given name. If ExportedStubsOnly is true,
+  ///        this will only return a result if the stub's flags indicate that it
+  ///        is exported.
+  virtual JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) = 0;
+
+  /// @brief Find the implementation-pointer for the stub.
+  virtual JITSymbol findPointer(StringRef Name) = 0;
+
+  /// @brief Change the value of the implementation pointer for the stub.
+  virtual Error updatePointer(StringRef Name, JITTargetAddress NewAddr) = 0;
+
+private:
+  virtual void anchor();
+};
+
+/// @brief IndirectStubsManager implementation for the host architecture, e.g.
+///        OrcX86_64. (See OrcArchitectureSupport.h).
+template <typename TargetT>
+class LocalIndirectStubsManager : public IndirectStubsManager {
+public:
+  Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+                   JITSymbolFlags StubFlags) override {
+    if (auto Err = reserveStubs(1))
+      return Err;
+
+    createStubInternal(StubName, StubAddr, StubFlags);
+
+    return Error::success();
+  }
+
+  Error createStubs(const StubInitsMap &StubInits) override {
+    if (auto Err = reserveStubs(StubInits.size()))
+      return Err;
+
+    for (auto &Entry : StubInits)
+      createStubInternal(Entry.first(), Entry.second.first,
+                         Entry.second.second);
+
+    return Error::success();
+  }
+
+  JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) override {
+    auto I = StubIndexes.find(Name);
+    if (I == StubIndexes.end())
+      return nullptr;
+    auto Key = I->second.first;
+    void *StubAddr = IndirectStubsInfos[Key.first].getStub(Key.second);
+    assert(StubAddr && "Missing stub address");
+    auto StubTargetAddr =
+        static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(StubAddr));
+    auto StubSymbol = JITSymbol(StubTargetAddr, I->second.second);
+    if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
+      return nullptr;
+    return StubSymbol;
+  }
+
+  JITSymbol findPointer(StringRef Name) override {
+    auto I = StubIndexes.find(Name);
+    if (I == StubIndexes.end())
+      return nullptr;
+    auto Key = I->second.first;
+    void *PtrAddr = IndirectStubsInfos[Key.first].getPtr(Key.second);
+    assert(PtrAddr && "Missing pointer address");
+    auto PtrTargetAddr =
+        static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(PtrAddr));
+    return JITSymbol(PtrTargetAddr, I->second.second);
+  }
+
+  Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
+    auto I = StubIndexes.find(Name);
+    assert(I != StubIndexes.end() && "No stub pointer for symbol");
+    auto Key = I->second.first;
+    *IndirectStubsInfos[Key.first].getPtr(Key.second) =
+        reinterpret_cast<void *>(static_cast<uintptr_t>(NewAddr));
+    return Error::success();
+  }
+
+private:
+  Error reserveStubs(unsigned NumStubs) {
+    if (NumStubs <= FreeStubs.size())
+      return Error::success();
+
+    unsigned NewStubsRequired = NumStubs - FreeStubs.size();
+    unsigned NewBlockId = IndirectStubsInfos.size();
+    typename TargetT::IndirectStubsInfo ISI;
+    if (auto Err =
+            TargetT::emitIndirectStubsBlock(ISI, NewStubsRequired, nullptr))
+      return Err;
+    for (unsigned I = 0; I < ISI.getNumStubs(); ++I)
+      FreeStubs.push_back(std::make_pair(NewBlockId, I));
+    IndirectStubsInfos.push_back(std::move(ISI));
+    return Error::success();
+  }
+
+  void createStubInternal(StringRef StubName, JITTargetAddress InitAddr,
+                          JITSymbolFlags StubFlags) {
+    auto Key = FreeStubs.back();
+    FreeStubs.pop_back();
+    *IndirectStubsInfos[Key.first].getPtr(Key.second) =
+        reinterpret_cast<void *>(static_cast<uintptr_t>(InitAddr));
+    StubIndexes[StubName] = std::make_pair(Key, StubFlags);
+  }
+
+  std::vector<typename TargetT::IndirectStubsInfo> IndirectStubsInfos;
+  using StubKey = std::pair<uint16_t, uint16_t>;
+  std::vector<StubKey> FreeStubs;
+  StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
+};
+
+/// @brief Create a local compile callback manager.
+///
+/// The given target triple will determine the ABI, and the given
+/// ErrorHandlerAddress will be used by the resulting compile callback
+/// manager if a compile callback fails.
+std::unique_ptr<JITCompileCallbackManager>
+createLocalCompileCallbackManager(const Triple &T,
+                                  JITTargetAddress ErrorHandlerAddress);
+
+/// @brief Create a local indriect stubs manager builder.
+///
+/// The given target triple will determine the ABI.
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T);
+
+/// @brief Build a function pointer of FunctionType with the given constant
+///        address.
+///
+///   Usage example: Turn a trampoline address into a function pointer constant
+/// for use in a stub.
+Constant *createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr);
+
+/// @brief Create a function pointer with the given type, name, and initializer
+///        in the given Module.
+GlobalVariable *createImplPointer(PointerType &PT, Module &M, const Twine &Name,
+                                  Constant *Initializer);
+
+/// @brief Turn a function declaration into a stub function that makes an
+///        indirect call using the given function pointer.
+void makeStub(Function &F, Value &ImplPointer);
+
+/// @brief Raise linkage types and rename as necessary to ensure that all
+///        symbols are accessible for other modules.
+///
+///   This should be called before partitioning a module to ensure that the
+/// partitions retain access to each other's symbols.
+void makeAllSymbolsExternallyAccessible(Module &M);
+
+/// @brief Clone a function declaration into a new module.
+///
+///   This function can be used as the first step towards creating a callback
+/// stub (see makeStub), or moving a function body (see moveFunctionBody).
+///
+///   If the VMap argument is non-null, a mapping will be added between F and
+/// the new declaration, and between each of F's arguments and the new
+/// declaration's arguments. This map can then be passed in to moveFunction to
+/// move the function body if required. Note: When moving functions between
+/// modules with these utilities, all decls should be cloned (and added to a
+/// single VMap) before any bodies are moved. This will ensure that references
+/// between functions all refer to the versions in the new module.
+Function *cloneFunctionDecl(Module &Dst, const Function &F,
+                            ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move the body of function 'F' to a cloned function declaration in a
+///        different module (See related cloneFunctionDecl).
+///
+///   If the target function declaration is not supplied via the NewF parameter
+/// then it will be looked up via the VMap.
+///
+///   This will delete the body of function 'F' from its original parent module,
+/// but leave its declaration.
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+                      ValueMaterializer *Materializer = nullptr,
+                      Function *NewF = nullptr);
+
+/// @brief Clone a global variable declaration into a new module.
+GlobalVariable *cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+                                        ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move global variable GV from its parent module to cloned global
+///        declaration in a different module.
+///
+///   If the target global declaration is not supplied via the NewGV parameter
+/// then it will be looked up via the VMap.
+///
+///   This will delete the initializer of GV from its original parent module,
+/// but leave its declaration.
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+                                   ValueToValueMapTy &VMap,
+                                   ValueMaterializer *Materializer = nullptr,
+                                   GlobalVariable *NewGV = nullptr);
+
+/// @brief Clone a global alias declaration into a new module.
+GlobalAlias *cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+                                  ValueToValueMapTy &VMap);
+
+/// @brief Clone module flags metadata into the destination module.
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+                              ValueToValueMapTy &VMap);
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
new file mode 100644
index 0000000..7b6f3d2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -0,0 +1,59 @@
+//===- LambdaResolverMM - Redirect symbol lookup via a functor --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//   Defines a RuntimeDyld::SymbolResolver subclass that uses a user-supplied
+// functor for symbol resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include <memory>
+
+namespace llvm {
+namespace orc {
+
+template <typename DylibLookupFtorT, typename ExternalLookupFtorT>
+class LambdaResolver : public LegacyJITSymbolResolver {
+public:
+  LambdaResolver(DylibLookupFtorT DylibLookupFtor,
+                 ExternalLookupFtorT ExternalLookupFtor)
+      : DylibLookupFtor(DylibLookupFtor),
+        ExternalLookupFtor(ExternalLookupFtor) {}
+
+  JITSymbol findSymbolInLogicalDylib(const std::string &Name) final {
+    return DylibLookupFtor(Name);
+  }
+
+  JITSymbol findSymbol(const std::string &Name) final {
+    return ExternalLookupFtor(Name);
+  }
+
+private:
+  DylibLookupFtorT DylibLookupFtor;
+  ExternalLookupFtorT ExternalLookupFtor;
+};
+
+template <typename DylibLookupFtorT,
+          typename ExternalLookupFtorT>
+std::shared_ptr<LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>>
+createLambdaResolver(DylibLookupFtorT DylibLookupFtor,
+                     ExternalLookupFtorT ExternalLookupFtor) {
+  using LR = LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>;
+  return make_unique<LR>(std::move(DylibLookupFtor),
+                         std::move(ExternalLookupFtor));
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
new file mode 100644
index 0000000..4117a92
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -0,0 +1,261 @@
+//===- LazyEmittingLayer.h - Lazily emit IR to lower JIT layers -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a lazy-emitting layer for the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <list>
+#include <memory>
+#include <string>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Lazy-emitting IR layer.
+///
+///   This layer accepts LLVM IR Modules (via addModule), but does not
+/// immediately emit them the layer below. Instead, emissing to the base layer
+/// is deferred until the first time the client requests the address (via
+/// JITSymbol::getAddress) for a symbol contained in this layer.
+template <typename BaseLayerT> class LazyEmittingLayer {
+private:
+  class EmissionDeferredModule {
+  public:
+    EmissionDeferredModule(VModuleKey K, std::unique_ptr<Module> M)
+        : K(std::move(K)), M(std::move(M)) {}
+
+    JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
+      switch (EmitState) {
+      case NotEmitted:
+        if (auto GV = searchGVs(Name, ExportedSymbolsOnly)) {
+          // Create a std::string version of Name to capture here - the argument
+          // (a StringRef) may go away before the lambda is executed.
+          // FIXME: Use capture-init when we move to C++14.
+          std::string PName = Name;
+          JITSymbolFlags Flags = JITSymbolFlags::fromGlobalValue(*GV);
+          auto GetAddress =
+            [this, ExportedSymbolsOnly, PName, &B]() -> Expected<JITTargetAddress> {
+              if (this->EmitState == Emitting)
+                return 0;
+              else if (this->EmitState == NotEmitted) {
+                this->EmitState = Emitting;
+                if (auto Err = this->emitToBaseLayer(B))
+                  return std::move(Err);
+                this->EmitState = Emitted;
+              }
+              if (auto Sym = B.findSymbolIn(K, PName, ExportedSymbolsOnly))
+                return Sym.getAddress();
+              else if (auto Err = Sym.takeError())
+                return std::move(Err);
+              else
+                llvm_unreachable("Successful symbol lookup should return "
+                                 "definition address here");
+          };
+          return JITSymbol(std::move(GetAddress), Flags);
+        } else
+          return nullptr;
+      case Emitting:
+        // Calling "emit" can trigger a recursive call to 'find' (e.g. to check
+        // for pre-existing definitions of common-symbol), but any symbol in
+        // this module would already have been found internally (in the
+        // RuntimeDyld that did the lookup), so just return a nullptr here.
+        return nullptr;
+      case Emitted:
+        return B.findSymbolIn(K, Name, ExportedSymbolsOnly);
+      }
+      llvm_unreachable("Invalid emit-state.");
+    }
+
+    Error removeModuleFromBaseLayer(BaseLayerT& BaseLayer) {
+      return EmitState != NotEmitted ? BaseLayer.removeModule(K)
+                                     : Error::success();
+    }
+
+    void emitAndFinalize(BaseLayerT &BaseLayer) {
+      assert(EmitState != Emitting &&
+             "Cannot emitAndFinalize while already emitting");
+      if (EmitState == NotEmitted) {
+        EmitState = Emitting;
+        emitToBaseLayer(BaseLayer);
+        EmitState = Emitted;
+      }
+      BaseLayer.emitAndFinalize(K);
+    }
+
+  private:
+
+    const GlobalValue* searchGVs(StringRef Name,
+                                 bool ExportedSymbolsOnly) const {
+      // FIXME: We could clean all this up if we had a way to reliably demangle
+      //        names: We could just demangle name and search, rather than
+      //        mangling everything else.
+
+      // If we have already built the mangled name set then just search it.
+      if (MangledSymbols) {
+        auto VI = MangledSymbols->find(Name);
+        if (VI == MangledSymbols->end())
+          return nullptr;
+        auto GV = VI->second;
+        if (!ExportedSymbolsOnly || GV->hasDefaultVisibility())
+          return GV;
+        return nullptr;
+      }
+
+      // If we haven't built the mangled name set yet, try to build it. As an
+      // optimization this will leave MangledNames set to nullptr if we find
+      // Name in the process of building the set.
+      return buildMangledSymbols(Name, ExportedSymbolsOnly);
+    }
+
+    Error emitToBaseLayer(BaseLayerT &BaseLayer) {
+      // We don't need the mangled names set any more: Once we've emitted this
+      // to the base layer we'll just look for symbols there.
+      MangledSymbols.reset();
+      return BaseLayer.addModule(std::move(K), std::move(M));
+    }
+
+    // If the mangled name of the given GlobalValue matches the given search
+    // name (and its visibility conforms to the ExportedSymbolsOnly flag) then
+    // return the symbol. Otherwise, add the mangled name to the Names map and
+    // return nullptr.
+    const GlobalValue* addGlobalValue(StringMap<const GlobalValue*> &Names,
+                                      const GlobalValue &GV,
+                                      const Mangler &Mang, StringRef SearchName,
+                                      bool ExportedSymbolsOnly) const {
+      // Modules don't "provide" decls or common symbols.
+      if (GV.isDeclaration() || GV.hasCommonLinkage())
+        return nullptr;
+
+      // Mangle the GV name.
+      std::string MangledName;
+      {
+        raw_string_ostream MangledNameStream(MangledName);
+        Mang.getNameWithPrefix(MangledNameStream, &GV, false);
+      }
+
+      // Check whether this is the name we were searching for, and if it is then
+      // bail out early.
+      if (MangledName == SearchName)
+        if (!ExportedSymbolsOnly || GV.hasDefaultVisibility())
+          return &GV;
+
+      // Otherwise add this to the map for later.
+      Names[MangledName] = &GV;
+      return nullptr;
+    }
+
+    // Build the MangledSymbols map. Bails out early (with MangledSymbols left set
+    // to nullptr) if the given SearchName is found while building the map.
+    const GlobalValue* buildMangledSymbols(StringRef SearchName,
+                                           bool ExportedSymbolsOnly) const {
+      assert(!MangledSymbols && "Mangled symbols map already exists?");
+
+      auto Symbols = llvm::make_unique<StringMap<const GlobalValue*>>();
+
+      Mangler Mang;
+
+      for (const auto &GO : M->global_objects())
+          if (auto GV = addGlobalValue(*Symbols, GO, Mang, SearchName,
+                                       ExportedSymbolsOnly))
+            return GV;
+
+      MangledSymbols = std::move(Symbols);
+      return nullptr;
+    }
+
+    enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
+    VModuleKey K;
+    std::unique_ptr<Module> M;
+    mutable std::unique_ptr<StringMap<const GlobalValue*>> MangledSymbols;
+  };
+
+  BaseLayerT &BaseLayer;
+  std::map<VModuleKey, std::unique_ptr<EmissionDeferredModule>> ModuleMap;
+
+public:
+
+  /// @brief Construct a lazy emitting layer.
+  LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+  /// @brief Add the given module to the lazy emitting layer.
+  Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+    assert(!ModuleMap.count(K) && "VModuleKey K already in use");
+    ModuleMap[K] =
+        llvm::make_unique<EmissionDeferredModule>(std::move(K), std::move(M));
+    return Error::success();
+  }
+
+  /// @brief Remove the module represented by the given handle.
+  ///
+  ///   This method will free the memory associated with the given module, both
+  /// in this layer, and the base layer.
+  Error removeModule(VModuleKey K) {
+    auto I = ModuleMap.find(K);
+    assert(I != ModuleMap.end() && "VModuleKey K not valid here");
+    auto EDM = std::move(I.second);
+    ModuleMap.erase(I);
+    return EDM->removeModuleFromBaseLayer(BaseLayer);
+  }
+
+  /// @brief Search for the given named symbol.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+    // Look for the symbol among existing definitions.
+    if (auto Symbol = BaseLayer.findSymbol(Name, ExportedSymbolsOnly))
+      return Symbol;
+
+    // If not found then search the deferred modules. If any of these contain a
+    // definition of 'Name' then they will return a JITSymbol that will emit
+    // the corresponding module when the symbol address is requested.
+    for (auto &KV : ModuleMap)
+      if (auto Symbol = KV.second->find(Name, ExportedSymbolsOnly, BaseLayer))
+        return Symbol;
+
+    // If no definition found anywhere return a null symbol.
+    return nullptr;
+  }
+
+  /// @brief Get the address of the given symbol in the context of the of
+  ///        compiled modules represented by the key K.
+  JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+    assert(ModuleMap.count(K) && "VModuleKey K not valid here");
+    return ModuleMap[K]->find(Name, ExportedSymbolsOnly, BaseLayer);
+  }
+
+  /// @brief Immediately emit and finalize the module represented by the given
+  ///        key.
+  Error emitAndFinalize(VModuleKey K) {
+    assert(ModuleMap.count(K) && "VModuleKey K not valid here");
+    return ModuleMap[K]->emitAndFinalize(BaseLayer);
+  }
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Legacy.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Legacy.h
new file mode 100644
index 0000000..b2b389a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Legacy.h
@@ -0,0 +1,138 @@
+//===--- Legacy.h -- Adapters for ExecutionEngine API interop ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains core ORC APIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LEGACY_H
+#define LLVM_EXECUTIONENGINE_ORC_LEGACY_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+
+namespace llvm {
+namespace orc {
+
+class JITSymbolResolverAdapter : public JITSymbolResolver {
+public:
+  JITSymbolResolverAdapter(ExecutionSession &ES, SymbolResolver &R);
+  Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) override;
+  Expected<LookupResult> lookup(const LookupSet &Symbols) override;
+
+private:
+  ExecutionSession &ES;
+  std::set<SymbolStringPtr> ResolvedStrings;
+  SymbolResolver &R;
+};
+
+/// @brief Use the given legacy-style FindSymbol function (i.e. a function that
+///        takes a const std::string& or StringRef and returns a JITSymbol) to
+///        find the flags for each symbol in Symbols and store their flags in
+///        SymbolFlags. If any JITSymbol returned by FindSymbol is in an error
+///        state the function returns immediately with that error, otherwise it
+///        returns the set of symbols not found.
+///
+/// Useful for implementing lookupFlags bodies that query legacy resolvers.
+template <typename FindSymbolFn>
+Expected<SymbolNameSet> lookupFlagsWithLegacyFn(SymbolFlagsMap &SymbolFlags,
+                                                const SymbolNameSet &Symbols,
+                                                FindSymbolFn FindSymbol) {
+  SymbolNameSet SymbolsNotFound;
+
+  for (auto &S : Symbols) {
+    if (JITSymbol Sym = FindSymbol(*S))
+      SymbolFlags[S] = Sym.getFlags();
+    else if (auto Err = Sym.takeError())
+      return std::move(Err);
+    else
+      SymbolsNotFound.insert(S);
+  }
+
+  return SymbolsNotFound;
+}
+
+/// @brief Use the given legacy-style FindSymbol function (i.e. a function that
+///        takes a const std::string& or StringRef and returns a JITSymbol) to
+///        find the address and flags for each symbol in Symbols and store the
+///        result in Query. If any JITSymbol returned by FindSymbol is in an
+///        error then Query.setFailed(...) is called with that error and the
+///        function returns immediately. On success, returns the set of symbols
+///        not found.
+///
+/// Useful for implementing lookup bodies that query legacy resolvers.
+template <typename FindSymbolFn>
+SymbolNameSet lookupWithLegacyFn(AsynchronousSymbolQuery &Query,
+                                 const SymbolNameSet &Symbols,
+                                 FindSymbolFn FindSymbol) {
+  SymbolNameSet SymbolsNotFound;
+
+  for (auto &S : Symbols) {
+    if (JITSymbol Sym = FindSymbol(*S)) {
+      if (auto Addr = Sym.getAddress()) {
+        Query.setDefinition(S, JITEvaluatedSymbol(*Addr, Sym.getFlags()));
+        Query.notifySymbolFinalized();
+      } else {
+        Query.setFailed(Addr.takeError());
+        return SymbolNameSet();
+      }
+    } else if (auto Err = Sym.takeError()) {
+      Query.setFailed(std::move(Err));
+      return SymbolNameSet();
+    } else
+      SymbolsNotFound.insert(S);
+  }
+
+  return SymbolsNotFound;
+}
+
+/// @brief An ORC SymbolResolver implementation that uses a legacy
+///        findSymbol-like function to perform lookup;
+template <typename LegacyLookupFn>
+class LegacyLookupFnResolver final : public SymbolResolver {
+public:
+  using ErrorReporter = std::function<void(Error)>;
+
+  LegacyLookupFnResolver(LegacyLookupFn LegacyLookup, ErrorReporter ReportError)
+      : LegacyLookup(std::move(LegacyLookup)),
+        ReportError(std::move(ReportError)) {}
+
+  SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+                            const SymbolNameSet &Symbols) final {
+    if (auto RemainingSymbols =
+            lookupFlagsWithLegacyFn(Flags, Symbols, LegacyLookup))
+      return std::move(*RemainingSymbols);
+    else {
+      ReportError(RemainingSymbols.takeError());
+      return Symbols;
+    }
+  }
+
+  SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+                       SymbolNameSet Symbols) final {
+    return lookupWithLegacyFn(*Query, Symbols, LegacyLookup);
+  }
+
+private:
+  LegacyLookupFn LegacyLookup;
+  ErrorReporter ReportError;
+};
+
+template <typename LegacyLookupFn>
+std::shared_ptr<LegacyLookupFnResolver<LegacyLookupFn>>
+createLegacyLookupResolver(LegacyLookupFn LegacyLookup,
+                           std::function<void(Error)> ErrorReporter) {
+  return std::make_shared<LegacyLookupFnResolver<LegacyLookupFn>>(
+      std::move(LegacyLookup), std::move(ErrorReporter));
+}
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LEGACY_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/NullResolver.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/NullResolver.h
new file mode 100644
index 0000000..eba9b95
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/NullResolver.h
@@ -0,0 +1,45 @@
+//===------ NullResolver.h - Reject symbol lookup requests ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//   Defines a RuntimeDyld::SymbolResolver subclass that rejects all symbol
+// resolution requests, for clients that have no cross-object fixups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+
+namespace llvm {
+namespace orc {
+
+class NullResolver : public SymbolResolver {
+public:
+  SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+                            const SymbolNameSet &Symbols) override;
+
+  SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+                       SymbolNameSet Symbols) override;
+};
+
+/// SymbolResolver impliementation that rejects all resolution requests.
+/// Useful for clients that have no cross-object fixups.
+class NullLegacyResolver : public LegacyJITSymbolResolver {
+public:
+  JITSymbol findSymbol(const std::string &Name) final;
+
+  JITSymbol findSymbolInLogicalDylib(const std::string &Name) final;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
new file mode 100644
index 0000000..cfc3922
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -0,0 +1,97 @@
+//===- ObjectTransformLayer.h - Run all objects through functor -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all objects passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include <algorithm>
+#include <memory>
+#include <string>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Object mutating layer.
+///
+///   This layer accepts sets of ObjectFiles (via addObject). It
+/// immediately applies the user supplied functor to each object, then adds
+/// the set of transformed objects to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class ObjectTransformLayer {
+public:
+  /// @brief Construct an ObjectTransformLayer with the given BaseLayer
+  ObjectTransformLayer(BaseLayerT &BaseLayer,
+                       TransformFtor Transform = TransformFtor())
+      : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+  /// @brief Apply the transform functor to each object in the object set, then
+  ///        add the resulting set of objects to the base layer, along with the
+  ///        memory manager and symbol resolver.
+  ///
+  /// @return A handle for the added objects.
+  template <typename ObjectPtr> Error addObject(VModuleKey K, ObjectPtr Obj) {
+    return BaseLayer.addObject(std::move(K), Transform(std::move(Obj)));
+  }
+
+  /// @brief Remove the object set associated with the VModuleKey K.
+  Error removeObject(VModuleKey K) { return BaseLayer.removeObject(K); }
+
+  /// @brief Search for the given named symbol.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Get the address of the given symbol in the context of the set of
+  ///        objects represented by the VModuleKey K. This call is forwarded to
+  ///        the base layer's implementation.
+  /// @param K The VModuleKey associated with the object set to search in.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it is found in the
+  ///         given object set.
+  JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+                         bool ExportedSymbolsOnly) {
+    return BaseLayer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Immediately emit and finalize the object set represented by the
+  ///        given VModuleKey K.
+  Error emitAndFinalize(VModuleKey K) { return BaseLayer.emitAndFinalize(K); }
+
+  /// @brief Map section addresses for the objects associated with the
+  /// VModuleKey K.
+  void mapSectionAddress(VModuleKey K, const void *LocalAddress,
+                         JITTargetAddress TargetAddr) {
+    BaseLayer.mapSectionAddress(K, LocalAddress, TargetAddr);
+  }
+
+  /// @brief Access the transform functor directly.
+  TransformFtor &getTransform() { return Transform; }
+
+  /// @brief Access the mumate functor directly.
+  const TransformFtor &getTransform() const { return Transform; }
+
+private:
+  BaseLayerT &BaseLayer;
+  TransformFtor Transform;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
new file mode 100644
index 0000000..e1b5564
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
@@ -0,0 +1,244 @@
+//===- OrcABISupport.h - ABI support code -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ABI specific code for Orc, e.g. callback assembly.
+//
+// ABI classes should be part of the JIT *target* process, not the host
+// process (except where you're doing hosted JITing and the two are one and the
+// same).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Memory.h"
+#include <algorithm>
+#include <cstdint>
+
+namespace llvm {
+namespace orc {
+
+/// Generic ORC ABI support.
+///
+/// This class can be substituted as the target architecure support class for
+/// ORC templates that require one (e.g. IndirectStubsManagers). It does not
+/// support lazy JITing however, and any attempt to use that functionality
+/// will result in execution of an llvm_unreachable.
+class OrcGenericABI {
+public:
+  static const unsigned PointerSize = sizeof(uintptr_t);
+  static const unsigned TrampolineSize = 1;
+  static const unsigned ResolverCodeSize = 1;
+
+  using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+                                            void *TrampolineId);
+
+  static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+                                void *CallbackMgr) {
+    llvm_unreachable("writeResolverCode is not supported by the generic host "
+                     "support class");
+  }
+
+  static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+                               unsigned NumTrampolines) {
+    llvm_unreachable("writeTrampolines is not supported by the generic host "
+                     "support class");
+  }
+
+  class IndirectStubsInfo {
+  public:
+    const static unsigned StubSize = 1;
+
+    unsigned getNumStubs() const { llvm_unreachable("Not supported"); }
+    void *getStub(unsigned Idx) const { llvm_unreachable("Not supported"); }
+    void **getPtr(unsigned Idx) const { llvm_unreachable("Not supported"); }
+  };
+
+  static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+                                      unsigned MinStubs, void *InitialPtrVal) {
+    llvm_unreachable("emitIndirectStubsBlock is not supported by the generic "
+                     "host support class");
+  }
+};
+
+/// @brief Provide information about stub blocks generated by the
+///        makeIndirectStubsBlock function.
+template <unsigned StubSizeVal> class GenericIndirectStubsInfo {
+public:
+  const static unsigned StubSize = StubSizeVal;
+
+  GenericIndirectStubsInfo() = default;
+  GenericIndirectStubsInfo(unsigned NumStubs, sys::OwningMemoryBlock StubsMem)
+      : NumStubs(NumStubs), StubsMem(std::move(StubsMem)) {}
+  GenericIndirectStubsInfo(GenericIndirectStubsInfo &&Other)
+      : NumStubs(Other.NumStubs), StubsMem(std::move(Other.StubsMem)) {
+    Other.NumStubs = 0;
+  }
+
+  GenericIndirectStubsInfo &operator=(GenericIndirectStubsInfo &&Other) {
+    NumStubs = Other.NumStubs;
+    Other.NumStubs = 0;
+    StubsMem = std::move(Other.StubsMem);
+    return *this;
+  }
+
+  /// @brief Number of stubs in this block.
+  unsigned getNumStubs() const { return NumStubs; }
+
+  /// @brief Get a pointer to the stub at the given index, which must be in
+  ///        the range 0 .. getNumStubs() - 1.
+  void *getStub(unsigned Idx) const {
+    return static_cast<char *>(StubsMem.base()) + Idx * StubSize;
+  }
+
+  /// @brief Get a pointer to the implementation-pointer at the given index,
+  ///        which must be in the range 0 .. getNumStubs() - 1.
+  void **getPtr(unsigned Idx) const {
+    char *PtrsBase = static_cast<char *>(StubsMem.base()) + NumStubs * StubSize;
+    return reinterpret_cast<void **>(PtrsBase) + Idx;
+  }
+
+private:
+  unsigned NumStubs = 0;
+  sys::OwningMemoryBlock StubsMem;
+};
+
+class OrcAArch64 {
+public:
+  static const unsigned PointerSize = 8;
+  static const unsigned TrampolineSize = 12;
+  static const unsigned ResolverCodeSize = 0x120;
+
+  using IndirectStubsInfo = GenericIndirectStubsInfo<8>;
+
+  using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+                                            void *TrampolineId);
+
+  /// @brief Write the resolver code into the given memory. The user is be
+  ///        responsible for allocating the memory and setting permissions.
+  static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+                                void *CallbackMgr);
+
+  /// @brief Write the requsted number of trampolines into the given memory,
+  ///        which must be big enough to hold 1 pointer, plus NumTrampolines
+  ///        trampolines.
+  static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+                               unsigned NumTrampolines);
+
+  /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+  ///        the nearest page size.
+  ///
+  ///   E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
+  /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+  /// will return a block of 1024 (2-pages worth).
+  static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+                                      unsigned MinStubs, void *InitialPtrVal);
+};
+
+/// @brief X86_64 code that's common to all ABIs.
+///
+/// X86_64 supports lazy JITing.
+class OrcX86_64_Base {
+public:
+  static const unsigned PointerSize = 8;
+  static const unsigned TrampolineSize = 8;
+
+  using IndirectStubsInfo = GenericIndirectStubsInfo<8>;
+
+  /// @brief Write the requsted number of trampolines into the given memory,
+  ///        which must be big enough to hold 1 pointer, plus NumTrampolines
+  ///        trampolines.
+  static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+                               unsigned NumTrampolines);
+
+  /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+  ///        the nearest page size.
+  ///
+  ///   E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
+  /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+  /// will return a block of 1024 (2-pages worth).
+  static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+                                      unsigned MinStubs, void *InitialPtrVal);
+};
+
+/// @brief X86_64 support for SysV ABI (Linux, MacOSX).
+///
+/// X86_64_SysV supports lazy JITing.
+class OrcX86_64_SysV : public OrcX86_64_Base {
+public:
+  static const unsigned ResolverCodeSize = 0x6C;
+
+  using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+                                            void *TrampolineId);
+
+  /// @brief Write the resolver code into the given memory. The user is be
+  ///        responsible for allocating the memory and setting permissions.
+  static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+                                void *CallbackMgr);
+};
+
+/// @brief X86_64 support for Win32.
+///
+/// X86_64_Win32 supports lazy JITing.
+class OrcX86_64_Win32 : public OrcX86_64_Base {
+public:
+  static const unsigned ResolverCodeSize = 0x74;
+
+  using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+                                            void *TrampolineId);
+
+  /// @brief Write the resolver code into the given memory. The user is be
+  ///        responsible for allocating the memory and setting permissions.
+  static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+                                void *CallbackMgr);
+};
+
+/// @brief I386 support.
+///
+/// I386 supports lazy JITing.
+class OrcI386 {
+public:
+  static const unsigned PointerSize = 4;
+  static const unsigned TrampolineSize = 8;
+  static const unsigned ResolverCodeSize = 0x4a;
+
+  using IndirectStubsInfo = GenericIndirectStubsInfo<8>;
+
+  using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+                                            void *TrampolineId);
+
+  /// @brief Write the resolver code into the given memory. The user is be
+  ///        responsible for allocating the memory and setting permissions.
+  static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+                                void *CallbackMgr);
+
+  /// @brief Write the requsted number of trampolines into the given memory,
+  ///        which must be big enough to hold 1 pointer, plus NumTrampolines
+  ///        trampolines.
+  static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+                               unsigned NumTrampolines);
+
+  /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+  ///        the nearest page size.
+  ///
+  ///   E.g. Asking for 4 stubs on i386, where stubs are 8-bytes, with 4k
+  /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+  /// will return a block of 1024 (2-pages worth).
+  static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+                                      unsigned MinStubs, void *InitialPtrVal);
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcError.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcError.h
new file mode 100644
index 0000000..c2ff41e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcError.h
@@ -0,0 +1,70 @@
+//===------ OrcError.h - Reject symbol lookup requests ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//   Define an error category, error codes, and helper utilities for Orc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCERROR_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCERROR_H
+
+#include "llvm/Support/Error.h"
+#include <system_error>
+
+namespace llvm {
+namespace orc {
+
+enum class OrcErrorCode : int {
+  // RPC Errors
+  DuplicateDefinition = 1,
+  JITSymbolNotFound,
+  RemoteAllocatorDoesNotExist,
+  RemoteAllocatorIdAlreadyInUse,
+  RemoteMProtectAddrUnrecognized,
+  RemoteIndirectStubsOwnerDoesNotExist,
+  RemoteIndirectStubsOwnerIdAlreadyInUse,
+  RPCConnectionClosed,
+  RPCCouldNotNegotiateFunction,
+  RPCResponseAbandoned,
+  UnexpectedRPCCall,
+  UnexpectedRPCResponse,
+  UnknownErrorCodeFromRemote,
+  UnknownResourceHandle
+};
+
+std::error_code orcError(OrcErrorCode ErrCode);
+
+class DuplicateDefinition : public ErrorInfo<DuplicateDefinition> {
+public:
+  static char ID;
+
+  DuplicateDefinition(std::string SymbolName);
+  std::error_code convertToErrorCode() const override;
+  void log(raw_ostream &OS) const override;
+  const std::string &getSymbolName() const;
+private:
+  std::string SymbolName;
+};
+
+class JITSymbolNotFound : public ErrorInfo<JITSymbolNotFound> {
+public:
+  static char ID;
+
+  JITSymbolNotFound(std::string SymbolName);
+  std::error_code convertToErrorCode() const override;
+  void log(raw_ostream &OS) const override;
+  const std::string &getSymbolName() const;
+private:
+  std::string SymbolName;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCERROR_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
new file mode 100644
index 0000000..7179e5f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
@@ -0,0 +1,672 @@
+//===- OrcRemoteTargetClient.h - Orc Remote-target Client -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OrcRemoteTargetClient class and helpers. This class
+// can be used to communicate over an RawByteChannel with an
+// OrcRemoteTargetServer instance to support remote-JITing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETCLIENT_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETCLIENT_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#define DEBUG_TYPE "orc-remote"
+
+namespace llvm {
+namespace orc {
+namespace remote {
+
+/// This class provides utilities (including memory manager, indirect stubs
+/// manager, and compile callback manager types) that support remote JITing
+/// in ORC.
+///
+/// Each of the utility classes talks to a JIT server (an instance of the
+/// OrcRemoteTargetServer class) via an RPC system (see RPCUtils.h) to carry out
+/// its actions.
+class OrcRemoteTargetClient
+    : public rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel> {
+public:
+  /// Remote-mapped RuntimeDyld-compatible memory manager.
+  class RemoteRTDyldMemoryManager : public RuntimeDyld::MemoryManager {
+    friend class OrcRemoteTargetClient;
+
+  public:
+    ~RemoteRTDyldMemoryManager() {
+      Client.destroyRemoteAllocator(Id);
+      DEBUG(dbgs() << "Destroyed remote allocator " << Id << "\n");
+    }
+
+    RemoteRTDyldMemoryManager(const RemoteRTDyldMemoryManager &) = delete;
+    RemoteRTDyldMemoryManager &
+    operator=(const RemoteRTDyldMemoryManager &) = delete;
+    RemoteRTDyldMemoryManager(RemoteRTDyldMemoryManager &&) = default;
+    RemoteRTDyldMemoryManager &
+    operator=(RemoteRTDyldMemoryManager &&) = default;
+
+    uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+                                 unsigned SectionID,
+                                 StringRef SectionName) override {
+      Unmapped.back().CodeAllocs.emplace_back(Size, Alignment);
+      uint8_t *Alloc = reinterpret_cast<uint8_t *>(
+          Unmapped.back().CodeAllocs.back().getLocalAddress());
+      DEBUG(dbgs() << "Allocator " << Id << " allocated code for "
+                   << SectionName << ": " << Alloc << " (" << Size
+                   << " bytes, alignment " << Alignment << ")\n");
+      return Alloc;
+    }
+
+    uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+                                 unsigned SectionID, StringRef SectionName,
+                                 bool IsReadOnly) override {
+      if (IsReadOnly) {
+        Unmapped.back().RODataAllocs.emplace_back(Size, Alignment);
+        uint8_t *Alloc = reinterpret_cast<uint8_t *>(
+            Unmapped.back().RODataAllocs.back().getLocalAddress());
+        DEBUG(dbgs() << "Allocator " << Id << " allocated ro-data for "
+                     << SectionName << ": " << Alloc << " (" << Size
+                     << " bytes, alignment " << Alignment << ")\n");
+        return Alloc;
+      } // else...
+
+      Unmapped.back().RWDataAllocs.emplace_back(Size, Alignment);
+      uint8_t *Alloc = reinterpret_cast<uint8_t *>(
+          Unmapped.back().RWDataAllocs.back().getLocalAddress());
+      DEBUG(dbgs() << "Allocator " << Id << " allocated rw-data for "
+                   << SectionName << ": " << Alloc << " (" << Size
+                   << " bytes, alignment " << Alignment << ")\n");
+      return Alloc;
+    }
+
+    void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+                                uintptr_t RODataSize, uint32_t RODataAlign,
+                                uintptr_t RWDataSize,
+                                uint32_t RWDataAlign) override {
+      Unmapped.push_back(ObjectAllocs());
+
+      DEBUG(dbgs() << "Allocator " << Id << " reserved:\n");
+
+      if (CodeSize != 0) {
+        Unmapped.back().RemoteCodeAddr =
+            Client.reserveMem(Id, CodeSize, CodeAlign);
+
+        DEBUG(dbgs() << "  code: "
+                     << format("0x%016x", Unmapped.back().RemoteCodeAddr)
+                     << " (" << CodeSize << " bytes, alignment " << CodeAlign
+                     << ")\n");
+      }
+
+      if (RODataSize != 0) {
+        Unmapped.back().RemoteRODataAddr =
+            Client.reserveMem(Id, RODataSize, RODataAlign);
+
+        DEBUG(dbgs() << "  ro-data: "
+                     << format("0x%016x", Unmapped.back().RemoteRODataAddr)
+                     << " (" << RODataSize << " bytes, alignment "
+                     << RODataAlign << ")\n");
+      }
+
+      if (RWDataSize != 0) {
+        Unmapped.back().RemoteRWDataAddr =
+            Client.reserveMem(Id, RWDataSize, RWDataAlign);
+
+        DEBUG(dbgs() << "  rw-data: "
+                     << format("0x%016x", Unmapped.back().RemoteRWDataAddr)
+                     << " (" << RWDataSize << " bytes, alignment "
+                     << RWDataAlign << ")\n");
+      }
+    }
+
+    bool needsToReserveAllocationSpace() override { return true; }
+
+    void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+                          size_t Size) override {
+      UnfinalizedEHFrames.push_back({LoadAddr, Size});
+    }
+
+    void deregisterEHFrames() override {
+      for (auto &Frame : RegisteredEHFrames) {
+        // FIXME: Add error poll.
+        Client.deregisterEHFrames(Frame.Addr, Frame.Size);
+      }
+    }
+
+    void notifyObjectLoaded(RuntimeDyld &Dyld,
+                            const object::ObjectFile &Obj) override {
+      DEBUG(dbgs() << "Allocator " << Id << " applied mappings:\n");
+      for (auto &ObjAllocs : Unmapped) {
+        mapAllocsToRemoteAddrs(Dyld, ObjAllocs.CodeAllocs,
+                               ObjAllocs.RemoteCodeAddr);
+        mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RODataAllocs,
+                               ObjAllocs.RemoteRODataAddr);
+        mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RWDataAllocs,
+                               ObjAllocs.RemoteRWDataAddr);
+        Unfinalized.push_back(std::move(ObjAllocs));
+      }
+      Unmapped.clear();
+    }
+
+    bool finalizeMemory(std::string *ErrMsg = nullptr) override {
+      DEBUG(dbgs() << "Allocator " << Id << " finalizing:\n");
+
+      for (auto &ObjAllocs : Unfinalized) {
+        if (copyAndProtect(ObjAllocs.CodeAllocs, ObjAllocs.RemoteCodeAddr,
+                           sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+          return true;
+
+        if (copyAndProtect(ObjAllocs.RODataAllocs, ObjAllocs.RemoteRODataAddr,
+                           sys::Memory::MF_READ))
+          return true;
+
+        if (copyAndProtect(ObjAllocs.RWDataAllocs, ObjAllocs.RemoteRWDataAddr,
+                           sys::Memory::MF_READ | sys::Memory::MF_WRITE))
+          return true;
+      }
+      Unfinalized.clear();
+
+      for (auto &EHFrame : UnfinalizedEHFrames) {
+        if (auto Err = Client.registerEHFrames(EHFrame.Addr, EHFrame.Size)) {
+          // FIXME: Replace this once finalizeMemory can return an Error.
+          handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+            if (ErrMsg) {
+              raw_string_ostream ErrOut(*ErrMsg);
+              EIB.log(ErrOut);
+            }
+          });
+          return false;
+        }
+      }
+      RegisteredEHFrames = std::move(UnfinalizedEHFrames);
+      UnfinalizedEHFrames = {};
+
+      return false;
+    }
+
+  private:
+    class Alloc {
+    public:
+      Alloc(uint64_t Size, unsigned Align)
+          : Size(Size), Align(Align), Contents(new char[Size + Align - 1]) {}
+
+      Alloc(const Alloc &) = delete;
+      Alloc &operator=(const Alloc &) = delete;
+      Alloc(Alloc &&) = default;
+      Alloc &operator=(Alloc &&) = default;
+
+      uint64_t getSize() const { return Size; }
+
+      unsigned getAlign() const { return Align; }
+
+      char *getLocalAddress() const {
+        uintptr_t LocalAddr = reinterpret_cast<uintptr_t>(Contents.get());
+        LocalAddr = alignTo(LocalAddr, Align);
+        return reinterpret_cast<char *>(LocalAddr);
+      }
+
+      void setRemoteAddress(JITTargetAddress RemoteAddr) {
+        this->RemoteAddr = RemoteAddr;
+      }
+
+      JITTargetAddress getRemoteAddress() const { return RemoteAddr; }
+
+    private:
+      uint64_t Size;
+      unsigned Align;
+      std::unique_ptr<char[]> Contents;
+      JITTargetAddress RemoteAddr = 0;
+    };
+
+    struct ObjectAllocs {
+      ObjectAllocs() = default;
+      ObjectAllocs(const ObjectAllocs &) = delete;
+      ObjectAllocs &operator=(const ObjectAllocs &) = delete;
+      ObjectAllocs(ObjectAllocs &&) = default;
+      ObjectAllocs &operator=(ObjectAllocs &&) = default;
+
+      JITTargetAddress RemoteCodeAddr = 0;
+      JITTargetAddress RemoteRODataAddr = 0;
+      JITTargetAddress RemoteRWDataAddr = 0;
+      std::vector<Alloc> CodeAllocs, RODataAllocs, RWDataAllocs;
+    };
+
+    RemoteRTDyldMemoryManager(OrcRemoteTargetClient &Client,
+                              ResourceIdMgr::ResourceId Id)
+        : Client(Client), Id(Id) {
+      DEBUG(dbgs() << "Created remote allocator " << Id << "\n");
+    }
+
+    // Maps all allocations in Allocs to aligned blocks
+    void mapAllocsToRemoteAddrs(RuntimeDyld &Dyld, std::vector<Alloc> &Allocs,
+                                JITTargetAddress NextAddr) {
+      for (auto &Alloc : Allocs) {
+        NextAddr = alignTo(NextAddr, Alloc.getAlign());
+        Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextAddr);
+        DEBUG(dbgs() << "     " << static_cast<void *>(Alloc.getLocalAddress())
+                     << " -> " << format("0x%016x", NextAddr) << "\n");
+        Alloc.setRemoteAddress(NextAddr);
+
+        // Only advance NextAddr if it was non-null to begin with,
+        // otherwise leave it as null.
+        if (NextAddr)
+          NextAddr += Alloc.getSize();
+      }
+    }
+
+    // Copies data for each alloc in the list, then set permissions on the
+    // segment.
+    bool copyAndProtect(const std::vector<Alloc> &Allocs,
+                        JITTargetAddress RemoteSegmentAddr,
+                        unsigned Permissions) {
+      if (RemoteSegmentAddr) {
+        assert(!Allocs.empty() && "No sections in allocated segment");
+
+        for (auto &Alloc : Allocs) {
+          DEBUG(dbgs() << "  copying section: "
+                       << static_cast<void *>(Alloc.getLocalAddress()) << " -> "
+                       << format("0x%016x", Alloc.getRemoteAddress()) << " ("
+                       << Alloc.getSize() << " bytes)\n";);
+
+          if (Client.writeMem(Alloc.getRemoteAddress(), Alloc.getLocalAddress(),
+                              Alloc.getSize()))
+            return true;
+        }
+
+        DEBUG(dbgs() << "  setting "
+                     << (Permissions & sys::Memory::MF_READ ? 'R' : '-')
+                     << (Permissions & sys::Memory::MF_WRITE ? 'W' : '-')
+                     << (Permissions & sys::Memory::MF_EXEC ? 'X' : '-')
+                     << " permissions on block: "
+                     << format("0x%016x", RemoteSegmentAddr) << "\n");
+        if (Client.setProtections(Id, RemoteSegmentAddr, Permissions))
+          return true;
+      }
+      return false;
+    }
+
+    OrcRemoteTargetClient &Client;
+    ResourceIdMgr::ResourceId Id;
+    std::vector<ObjectAllocs> Unmapped;
+    std::vector<ObjectAllocs> Unfinalized;
+
+    struct EHFrame {
+      JITTargetAddress Addr;
+      uint64_t Size;
+    };
+    std::vector<EHFrame> UnfinalizedEHFrames;
+    std::vector<EHFrame> RegisteredEHFrames;
+  };
+
+  /// Remote indirect stubs manager.
+  class RemoteIndirectStubsManager : public IndirectStubsManager {
+  public:
+    RemoteIndirectStubsManager(OrcRemoteTargetClient &Client,
+                               ResourceIdMgr::ResourceId Id)
+        : Client(Client), Id(Id) {}
+
+    ~RemoteIndirectStubsManager() override {
+      Client.destroyIndirectStubsManager(Id);
+    }
+
+    Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+                     JITSymbolFlags StubFlags) override {
+      if (auto Err = reserveStubs(1))
+        return Err;
+
+      return createStubInternal(StubName, StubAddr, StubFlags);
+    }
+
+    Error createStubs(const StubInitsMap &StubInits) override {
+      if (auto Err = reserveStubs(StubInits.size()))
+        return Err;
+
+      for (auto &Entry : StubInits)
+        if (auto Err = createStubInternal(Entry.first(), Entry.second.first,
+                                          Entry.second.second))
+          return Err;
+
+      return Error::success();
+    }
+
+    JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) override {
+      auto I = StubIndexes.find(Name);
+      if (I == StubIndexes.end())
+        return nullptr;
+      auto Key = I->second.first;
+      auto Flags = I->second.second;
+      auto StubSymbol = JITSymbol(getStubAddr(Key), Flags);
+      if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
+        return nullptr;
+      return StubSymbol;
+    }
+
+    JITSymbol findPointer(StringRef Name) override {
+      auto I = StubIndexes.find(Name);
+      if (I == StubIndexes.end())
+        return nullptr;
+      auto Key = I->second.first;
+      auto Flags = I->second.second;
+      return JITSymbol(getPtrAddr(Key), Flags);
+    }
+
+    Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
+      auto I = StubIndexes.find(Name);
+      assert(I != StubIndexes.end() && "No stub pointer for symbol");
+      auto Key = I->second.first;
+      return Client.writePointer(getPtrAddr(Key), NewAddr);
+    }
+
+  private:
+    struct RemoteIndirectStubsInfo {
+      JITTargetAddress StubBase;
+      JITTargetAddress PtrBase;
+      unsigned NumStubs;
+    };
+
+    using StubKey = std::pair<uint16_t, uint16_t>;
+
+    Error reserveStubs(unsigned NumStubs) {
+      if (NumStubs <= FreeStubs.size())
+        return Error::success();
+
+      unsigned NewStubsRequired = NumStubs - FreeStubs.size();
+      JITTargetAddress StubBase;
+      JITTargetAddress PtrBase;
+      unsigned NumStubsEmitted;
+
+      if (auto StubInfoOrErr = Client.emitIndirectStubs(Id, NewStubsRequired))
+        std::tie(StubBase, PtrBase, NumStubsEmitted) = *StubInfoOrErr;
+      else
+        return StubInfoOrErr.takeError();
+
+      unsigned NewBlockId = RemoteIndirectStubsInfos.size();
+      RemoteIndirectStubsInfos.push_back({StubBase, PtrBase, NumStubsEmitted});
+
+      for (unsigned I = 0; I < NumStubsEmitted; ++I)
+        FreeStubs.push_back(std::make_pair(NewBlockId, I));
+
+      return Error::success();
+    }
+
+    Error createStubInternal(StringRef StubName, JITTargetAddress InitAddr,
+                             JITSymbolFlags StubFlags) {
+      auto Key = FreeStubs.back();
+      FreeStubs.pop_back();
+      StubIndexes[StubName] = std::make_pair(Key, StubFlags);
+      return Client.writePointer(getPtrAddr(Key), InitAddr);
+    }
+
+    JITTargetAddress getStubAddr(StubKey K) {
+      assert(RemoteIndirectStubsInfos[K.first].StubBase != 0 &&
+             "Missing stub address");
+      return RemoteIndirectStubsInfos[K.first].StubBase +
+             K.second * Client.getIndirectStubSize();
+    }
+
+    JITTargetAddress getPtrAddr(StubKey K) {
+      assert(RemoteIndirectStubsInfos[K.first].PtrBase != 0 &&
+             "Missing pointer address");
+      return RemoteIndirectStubsInfos[K.first].PtrBase +
+             K.second * Client.getPointerSize();
+    }
+
+    OrcRemoteTargetClient &Client;
+    ResourceIdMgr::ResourceId Id;
+    std::vector<RemoteIndirectStubsInfo> RemoteIndirectStubsInfos;
+    std::vector<StubKey> FreeStubs;
+    StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
+  };
+
+  /// Remote compile callback manager.
+  class RemoteCompileCallbackManager : public JITCompileCallbackManager {
+  public:
+    RemoteCompileCallbackManager(OrcRemoteTargetClient &Client,
+                                 JITTargetAddress ErrorHandlerAddress)
+        : JITCompileCallbackManager(ErrorHandlerAddress), Client(Client) {}
+
+  private:
+    Error grow() override {
+      JITTargetAddress BlockAddr = 0;
+      uint32_t NumTrampolines = 0;
+      if (auto TrampolineInfoOrErr = Client.emitTrampolineBlock())
+        std::tie(BlockAddr, NumTrampolines) = *TrampolineInfoOrErr;
+      else
+        return TrampolineInfoOrErr.takeError();
+
+      uint32_t TrampolineSize = Client.getTrampolineSize();
+      for (unsigned I = 0; I < NumTrampolines; ++I)
+        this->AvailableTrampolines.push_back(BlockAddr + (I * TrampolineSize));
+
+      return Error::success();
+    }
+
+    OrcRemoteTargetClient &Client;
+  };
+
+  /// Create an OrcRemoteTargetClient.
+  /// Channel is the ChannelT instance to communicate on. It is assumed that
+  /// the channel is ready to be read from and written to.
+  static Expected<std::unique_ptr<OrcRemoteTargetClient>>
+  Create(rpc::RawByteChannel &Channel, std::function<void(Error)> ReportError) {
+    Error Err = Error::success();
+    auto Client = std::unique_ptr<OrcRemoteTargetClient>(
+        new OrcRemoteTargetClient(Channel, std::move(ReportError), Err));
+    if (Err)
+      return std::move(Err);
+    return std::move(Client);
+  }
+
+  /// Call the int(void) function at the given address in the target and return
+  /// its result.
+  Expected<int> callIntVoid(JITTargetAddress Addr) {
+    DEBUG(dbgs() << "Calling int(*)(void) " << format("0x%016x", Addr) << "\n");
+    return callB<exec::CallIntVoid>(Addr);
+  }
+
+  /// Call the int(int, char*[]) function at the given address in the target and
+  /// return its result.
+  Expected<int> callMain(JITTargetAddress Addr,
+                         const std::vector<std::string> &Args) {
+    DEBUG(dbgs() << "Calling int(*)(int, char*[]) " << format("0x%016x", Addr)
+                 << "\n");
+    return callB<exec::CallMain>(Addr, Args);
+  }
+
+  /// Call the void() function at the given address in the target and wait for
+  /// it to finish.
+  Error callVoidVoid(JITTargetAddress Addr) {
+    DEBUG(dbgs() << "Calling void(*)(void) " << format("0x%016x", Addr)
+                 << "\n");
+    return callB<exec::CallVoidVoid>(Addr);
+  }
+
+  /// Create an RCMemoryManager which will allocate its memory on the remote
+  /// target.
+  Expected<std::unique_ptr<RemoteRTDyldMemoryManager>>
+  createRemoteMemoryManager() {
+    auto Id = AllocatorIds.getNext();
+    if (auto Err = callB<mem::CreateRemoteAllocator>(Id))
+      return std::move(Err);
+    return std::unique_ptr<RemoteRTDyldMemoryManager>(
+        new RemoteRTDyldMemoryManager(*this, Id));
+  }
+
+  /// Create an RCIndirectStubsManager that will allocate stubs on the remote
+  /// target.
+  Expected<std::unique_ptr<RemoteIndirectStubsManager>>
+  createIndirectStubsManager() {
+    auto Id = IndirectStubOwnerIds.getNext();
+    if (auto Err = callB<stubs::CreateIndirectStubsOwner>(Id))
+      return std::move(Err);
+    return llvm::make_unique<RemoteIndirectStubsManager>(*this, Id);
+  }
+
+  Expected<RemoteCompileCallbackManager &>
+  enableCompileCallbacks(JITTargetAddress ErrorHandlerAddress) {
+    // Emit the resolver block on the JIT server.
+    if (auto Err = callB<stubs::EmitResolverBlock>())
+      return std::move(Err);
+
+    // Create the callback manager.
+    CallbackManager.emplace(*this, ErrorHandlerAddress);
+    RemoteCompileCallbackManager &Mgr = *CallbackManager;
+    return Mgr;
+  }
+
+  /// Search for symbols in the remote process. Note: This should be used by
+  /// symbol resolvers *after* they've searched the local symbol table in the
+  /// JIT stack.
+  Expected<JITTargetAddress> getSymbolAddress(StringRef Name) {
+    return callB<utils::GetSymbolAddress>(Name);
+  }
+
+  /// Get the triple for the remote target.
+  const std::string &getTargetTriple() const { return RemoteTargetTriple; }
+
+  Error terminateSession() { return callB<utils::TerminateSession>(); }
+
+private:
+  OrcRemoteTargetClient(rpc::RawByteChannel &Channel,
+                        std::function<void(Error)> ReportError, Error &Err)
+      : rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel>(Channel, true),
+        ReportError(std::move(ReportError)) {
+    ErrorAsOutParameter EAO(&Err);
+
+    addHandler<utils::RequestCompile>(
+        [this](JITTargetAddress Addr) -> JITTargetAddress {
+          if (CallbackManager)
+            return CallbackManager->executeCompileCallback(Addr);
+          return 0;
+        });
+
+    if (auto RIOrErr = callB<utils::GetRemoteInfo>()) {
+      std::tie(RemoteTargetTriple, RemotePointerSize, RemotePageSize,
+               RemoteTrampolineSize, RemoteIndirectStubSize) = *RIOrErr;
+      Err = Error::success();
+    } else
+      Err = RIOrErr.takeError();
+  }
+
+  void deregisterEHFrames(JITTargetAddress Addr, uint32_t Size) {
+    if (auto Err = callB<eh::RegisterEHFrames>(Addr, Size))
+      ReportError(std::move(Err));
+  }
+
+  void destroyRemoteAllocator(ResourceIdMgr::ResourceId Id) {
+    if (auto Err = callB<mem::DestroyRemoteAllocator>(Id)) {
+      // FIXME: This will be triggered by a removeModuleSet call: Propagate
+      //        error return up through that.
+      llvm_unreachable("Failed to destroy remote allocator.");
+      AllocatorIds.release(Id);
+    }
+  }
+
+  void destroyIndirectStubsManager(ResourceIdMgr::ResourceId Id) {
+    IndirectStubOwnerIds.release(Id);
+    if (auto Err = callB<stubs::DestroyIndirectStubsOwner>(Id))
+      ReportError(std::move(Err));
+  }
+
+  Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
+  emitIndirectStubs(ResourceIdMgr::ResourceId Id, uint32_t NumStubsRequired) {
+    return callB<stubs::EmitIndirectStubs>(Id, NumStubsRequired);
+  }
+
+  Expected<std::tuple<JITTargetAddress, uint32_t>> emitTrampolineBlock() {
+    return callB<stubs::EmitTrampolineBlock>();
+  }
+
+  uint32_t getIndirectStubSize() const { return RemoteIndirectStubSize; }
+  uint32_t getPageSize() const { return RemotePageSize; }
+  uint32_t getPointerSize() const { return RemotePointerSize; }
+
+  uint32_t getTrampolineSize() const { return RemoteTrampolineSize; }
+
+  Expected<std::vector<uint8_t>> readMem(char *Dst, JITTargetAddress Src,
+                                         uint64_t Size) {
+    return callB<mem::ReadMem>(Src, Size);
+  }
+
+  Error registerEHFrames(JITTargetAddress &RAddr, uint32_t Size) {
+    // FIXME: Duplicate error and report it via ReportError too?
+    return callB<eh::RegisterEHFrames>(RAddr, Size);
+  }
+
+  JITTargetAddress reserveMem(ResourceIdMgr::ResourceId Id, uint64_t Size,
+                              uint32_t Align) {
+    if (auto AddrOrErr = callB<mem::ReserveMem>(Id, Size, Align))
+      return *AddrOrErr;
+    else {
+      ReportError(AddrOrErr.takeError());
+      return 0;
+    }
+  }
+
+  bool setProtections(ResourceIdMgr::ResourceId Id,
+                      JITTargetAddress RemoteSegAddr, unsigned ProtFlags) {
+    if (auto Err = callB<mem::SetProtections>(Id, RemoteSegAddr, ProtFlags)) {
+      ReportError(std::move(Err));
+      return true;
+    } else
+      return false;
+  }
+
+  bool writeMem(JITTargetAddress Addr, const char *Src, uint64_t Size) {
+    if (auto Err = callB<mem::WriteMem>(DirectBufferWriter(Src, Addr, Size))) {
+      ReportError(std::move(Err));
+      return true;
+    } else
+      return false;
+  }
+
+  Error writePointer(JITTargetAddress Addr, JITTargetAddress PtrVal) {
+    return callB<mem::WritePtr>(Addr, PtrVal);
+  }
+
+  static Error doNothing() { return Error::success(); }
+
+  std::function<void(Error)> ReportError;
+  std::string RemoteTargetTriple;
+  uint32_t RemotePointerSize = 0;
+  uint32_t RemotePageSize = 0;
+  uint32_t RemoteTrampolineSize = 0;
+  uint32_t RemoteIndirectStubSize = 0;
+  ResourceIdMgr AllocatorIds, IndirectStubOwnerIds;
+  Optional<RemoteCompileCallbackManager> CallbackManager;
+};
+
+} // end namespace remote
+} // end namespace orc
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETCLIENT_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
new file mode 100644
index 0000000..bc0da0f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
@@ -0,0 +1,377 @@
+//===- OrcRemoteTargetRPCAPI.h - Orc Remote-target RPC API ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Orc remote-target RPC API. It should not be used
+// directly, but is used by the RemoteTargetClient and RemoteTargetServer
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/RPCUtils.h"
+#include "llvm/ExecutionEngine/Orc/RawByteChannel.h"
+
+namespace llvm {
+namespace orc {
+
+namespace remote {
+
+/// Template error for missing resources.
+template <typename ResourceIdT>
+class ResourceNotFound
+  : public ErrorInfo<ResourceNotFound<ResourceIdT>> {
+public:
+  static char ID;
+
+  ResourceNotFound(ResourceIdT ResourceId,
+                   std::string ResourceDescription = "")
+    : ResourceId(std::move(ResourceId)),
+      ResourceDescription(std::move(ResourceDescription)) {}
+
+  std::error_code convertToErrorCode() const override {
+    return orcError(OrcErrorCode::UnknownResourceHandle);
+  }
+
+  void log(raw_ostream &OS) const override {
+    OS << (ResourceDescription.empty()
+             ? "Remote resource with id "
+               : ResourceDescription)
+       << " " << ResourceId << " not found";
+  }
+
+private:
+  ResourceIdT ResourceId;
+  std::string ResourceDescription;
+};
+
+template <typename ResourceIdT>
+char ResourceNotFound<ResourceIdT>::ID = 0;
+
+class DirectBufferWriter {
+public:
+  DirectBufferWriter() = default;
+  DirectBufferWriter(const char *Src, JITTargetAddress Dst, uint64_t Size)
+      : Src(Src), Dst(Dst), Size(Size) {}
+
+  const char *getSrc() const { return Src; }
+  JITTargetAddress getDst() const { return Dst; }
+  uint64_t getSize() const { return Size; }
+
+private:
+  const char *Src;
+  JITTargetAddress Dst;
+  uint64_t Size;
+};
+
+} // end namespace remote
+
+namespace rpc {
+
+template <>
+class RPCTypeName<JITSymbolFlags> {
+public:
+  static const char *getName() { return "JITSymbolFlags"; }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, JITSymbolFlags> {
+public:
+
+  static Error serialize(ChannelT &C, const JITSymbolFlags &Flags) {
+    return serializeSeq(C, static_cast<JITSymbolFlags::UnderlyingType>(Flags),
+                        Flags.getTargetFlags());
+  }
+
+  static Error deserialize(ChannelT &C, JITSymbolFlags &Flags) {
+    JITSymbolFlags::UnderlyingType JITFlags;
+    JITSymbolFlags::TargetFlagsType TargetFlags;
+    if (auto Err = deserializeSeq(C, JITFlags, TargetFlags))
+      return Err;
+    Flags = JITSymbolFlags(static_cast<JITSymbolFlags::FlagNames>(JITFlags),
+                           TargetFlags);
+    return Error::success();
+  }
+};
+
+template <> class RPCTypeName<remote::DirectBufferWriter> {
+public:
+  static const char *getName() { return "DirectBufferWriter"; }
+};
+
+template <typename ChannelT>
+class SerializationTraits<
+    ChannelT, remote::DirectBufferWriter, remote::DirectBufferWriter,
+    typename std::enable_if<
+        std::is_base_of<RawByteChannel, ChannelT>::value>::type> {
+public:
+  static Error serialize(ChannelT &C, const remote::DirectBufferWriter &DBW) {
+    if (auto EC = serializeSeq(C, DBW.getDst()))
+      return EC;
+    if (auto EC = serializeSeq(C, DBW.getSize()))
+      return EC;
+    return C.appendBytes(DBW.getSrc(), DBW.getSize());
+  }
+
+  static Error deserialize(ChannelT &C, remote::DirectBufferWriter &DBW) {
+    JITTargetAddress Dst;
+    if (auto EC = deserializeSeq(C, Dst))
+      return EC;
+    uint64_t Size;
+    if (auto EC = deserializeSeq(C, Size))
+      return EC;
+    char *Addr = reinterpret_cast<char *>(static_cast<uintptr_t>(Dst));
+
+    DBW = remote::DirectBufferWriter(nullptr, Dst, Size);
+
+    return C.readBytes(Addr, Size);
+  }
+};
+
+} // end namespace rpc
+
+namespace remote {
+
+class ResourceIdMgr {
+public:
+  using ResourceId = uint64_t;
+  static const ResourceId InvalidId = ~0U;
+
+  ResourceIdMgr() = default;
+  explicit ResourceIdMgr(ResourceId FirstValidId)
+    : NextId(std::move(FirstValidId)) {}
+
+  ResourceId getNext() {
+    if (!FreeIds.empty()) {
+      ResourceId I = FreeIds.back();
+      FreeIds.pop_back();
+      return I;
+    }
+    assert(NextId + 1 != ~0ULL && "All ids allocated");
+    return NextId++;
+  }
+
+  void release(ResourceId I) { FreeIds.push_back(I); }
+
+private:
+  ResourceId NextId = 1;
+  std::vector<ResourceId> FreeIds;
+};
+
+/// Registers EH frames on the remote.
+namespace eh {
+
+  /// Registers EH frames on the remote.
+  class RegisterEHFrames
+      : public rpc::Function<RegisterEHFrames,
+                             void(JITTargetAddress Addr, uint32_t Size)> {
+  public:
+    static const char *getName() { return "RegisterEHFrames"; }
+  };
+
+  /// Deregisters EH frames on the remote.
+  class DeregisterEHFrames
+      : public rpc::Function<DeregisterEHFrames,
+                             void(JITTargetAddress Addr, uint32_t Size)> {
+  public:
+    static const char *getName() { return "DeregisterEHFrames"; }
+  };
+
+} // end namespace eh
+
+/// RPC functions for executing remote code.
+namespace exec {
+
+  /// Call an 'int32_t()'-type function on the remote, returns the called
+  /// function's return value.
+  class CallIntVoid
+      : public rpc::Function<CallIntVoid, int32_t(JITTargetAddress Addr)> {
+  public:
+    static const char *getName() { return "CallIntVoid"; }
+  };
+
+  /// Call an 'int32_t(int32_t, char**)'-type function on the remote, returns the
+  /// called function's return value.
+  class CallMain
+      : public rpc::Function<CallMain, int32_t(JITTargetAddress Addr,
+                                               std::vector<std::string> Args)> {
+  public:
+    static const char *getName() { return "CallMain"; }
+  };
+
+  /// Calls a 'void()'-type function on the remote, returns when the called
+  /// function completes.
+  class CallVoidVoid
+      : public rpc::Function<CallVoidVoid, void(JITTargetAddress FnAddr)> {
+  public:
+    static const char *getName() { return "CallVoidVoid"; }
+  };
+
+} // end namespace exec
+
+/// RPC functions for remote memory management / inspection / modification.
+namespace mem {
+
+  /// Creates a memory allocator on the remote.
+  class CreateRemoteAllocator
+      : public rpc::Function<CreateRemoteAllocator,
+                             void(ResourceIdMgr::ResourceId AllocatorID)> {
+  public:
+    static const char *getName() { return "CreateRemoteAllocator"; }
+  };
+
+  /// Destroys a remote allocator, freeing any memory allocated by it.
+  class DestroyRemoteAllocator
+      : public rpc::Function<DestroyRemoteAllocator,
+                             void(ResourceIdMgr::ResourceId AllocatorID)> {
+  public:
+    static const char *getName() { return "DestroyRemoteAllocator"; }
+  };
+
+  /// Read a remote memory block.
+  class ReadMem
+      : public rpc::Function<ReadMem, std::vector<uint8_t>(JITTargetAddress Src,
+                                                           uint64_t Size)> {
+  public:
+    static const char *getName() { return "ReadMem"; }
+  };
+
+  /// Reserve a block of memory on the remote via the given allocator.
+  class ReserveMem
+      : public rpc::Function<ReserveMem,
+                             JITTargetAddress(ResourceIdMgr::ResourceId AllocID,
+                                              uint64_t Size, uint32_t Align)> {
+  public:
+    static const char *getName() { return "ReserveMem"; }
+  };
+
+  /// Set the memory protection on a memory block.
+  class SetProtections
+      : public rpc::Function<SetProtections,
+                             void(ResourceIdMgr::ResourceId AllocID,
+                                  JITTargetAddress Dst, uint32_t ProtFlags)> {
+  public:
+    static const char *getName() { return "SetProtections"; }
+  };
+
+  /// Write to a remote memory block.
+  class WriteMem
+      : public rpc::Function<WriteMem, void(remote::DirectBufferWriter DB)> {
+  public:
+    static const char *getName() { return "WriteMem"; }
+  };
+
+  /// Write to a remote pointer.
+  class WritePtr : public rpc::Function<WritePtr, void(JITTargetAddress Dst,
+                                                       JITTargetAddress Val)> {
+  public:
+    static const char *getName() { return "WritePtr"; }
+  };
+
+} // end namespace mem
+
+/// RPC functions for remote stub and trampoline management.
+namespace stubs {
+
+  /// Creates an indirect stub owner on the remote.
+  class CreateIndirectStubsOwner
+      : public rpc::Function<CreateIndirectStubsOwner,
+                             void(ResourceIdMgr::ResourceId StubOwnerID)> {
+  public:
+    static const char *getName() { return "CreateIndirectStubsOwner"; }
+  };
+
+  /// RPC function for destroying an indirect stubs owner.
+  class DestroyIndirectStubsOwner
+      : public rpc::Function<DestroyIndirectStubsOwner,
+                             void(ResourceIdMgr::ResourceId StubsOwnerID)> {
+  public:
+    static const char *getName() { return "DestroyIndirectStubsOwner"; }
+  };
+
+  /// EmitIndirectStubs result is (StubsBase, PtrsBase, NumStubsEmitted).
+  class EmitIndirectStubs
+      : public rpc::Function<
+            EmitIndirectStubs,
+            std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>(
+                ResourceIdMgr::ResourceId StubsOwnerID,
+                uint32_t NumStubsRequired)> {
+  public:
+    static const char *getName() { return "EmitIndirectStubs"; }
+  };
+
+  /// RPC function to emit the resolver block and return its address.
+  class EmitResolverBlock : public rpc::Function<EmitResolverBlock, void()> {
+  public:
+    static const char *getName() { return "EmitResolverBlock"; }
+  };
+
+  /// EmitTrampolineBlock result is (BlockAddr, NumTrampolines).
+  class EmitTrampolineBlock
+      : public rpc::Function<EmitTrampolineBlock,
+                             std::tuple<JITTargetAddress, uint32_t>()> {
+  public:
+    static const char *getName() { return "EmitTrampolineBlock"; }
+  };
+
+} // end namespace stubs
+
+/// Miscelaneous RPC functions for dealing with remotes.
+namespace utils {
+
+  /// GetRemoteInfo result is (Triple, PointerSize, PageSize, TrampolineSize,
+  ///                          IndirectStubsSize).
+  class GetRemoteInfo
+      : public rpc::Function<
+            GetRemoteInfo,
+            std::tuple<std::string, uint32_t, uint32_t, uint32_t, uint32_t>()> {
+  public:
+    static const char *getName() { return "GetRemoteInfo"; }
+  };
+
+  /// Get the address of a remote symbol.
+  class GetSymbolAddress
+      : public rpc::Function<GetSymbolAddress,
+                             JITTargetAddress(std::string SymbolName)> {
+  public:
+    static const char *getName() { return "GetSymbolAddress"; }
+  };
+
+  /// Request that the host execute a compile callback.
+  class RequestCompile
+      : public rpc::Function<
+            RequestCompile, JITTargetAddress(JITTargetAddress TrampolineAddr)> {
+  public:
+    static const char *getName() { return "RequestCompile"; }
+  };
+
+  /// Notify the remote and terminate the session.
+  class TerminateSession : public rpc::Function<TerminateSession, void()> {
+  public:
+    static const char *getName() { return "TerminateSession"; }
+  };
+
+} // namespace utils
+
+class OrcRemoteTargetRPCAPI
+    : public rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel> {
+public:
+  // FIXME: Remove constructors once MSVC supports synthesizing move-ops.
+  OrcRemoteTargetRPCAPI(rpc::RawByteChannel &C)
+      : rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel>(C, true) {}
+};
+
+} // end namespace remote
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
new file mode 100644
index 0000000..cf419d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
@@ -0,0 +1,447 @@
+//===- OrcRemoteTargetServer.h - Orc Remote-target Server -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OrcRemoteTargetServer class. It can be used to build a
+// JIT server that can execute code sent from an OrcRemoteTargetClient.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <type_traits>
+#include <vector>
+
+#define DEBUG_TYPE "orc-remote"
+
+namespace llvm {
+namespace orc {
+namespace remote {
+
+template <typename ChannelT, typename TargetT>
+class OrcRemoteTargetServer
+    : public rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel> {
+public:
+  using SymbolLookupFtor =
+      std::function<JITTargetAddress(const std::string &Name)>;
+
+  using EHFrameRegistrationFtor =
+      std::function<void(uint8_t *Addr, uint32_t Size)>;
+
+  OrcRemoteTargetServer(ChannelT &Channel, SymbolLookupFtor SymbolLookup,
+                        EHFrameRegistrationFtor EHFramesRegister,
+                        EHFrameRegistrationFtor EHFramesDeregister)
+      : rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel>(Channel, true),
+        SymbolLookup(std::move(SymbolLookup)),
+        EHFramesRegister(std::move(EHFramesRegister)),
+        EHFramesDeregister(std::move(EHFramesDeregister)) {
+    using ThisT = typename std::remove_reference<decltype(*this)>::type;
+    addHandler<exec::CallIntVoid>(*this, &ThisT::handleCallIntVoid);
+    addHandler<exec::CallMain>(*this, &ThisT::handleCallMain);
+    addHandler<exec::CallVoidVoid>(*this, &ThisT::handleCallVoidVoid);
+    addHandler<mem::CreateRemoteAllocator>(*this,
+                                           &ThisT::handleCreateRemoteAllocator);
+    addHandler<mem::DestroyRemoteAllocator>(
+        *this, &ThisT::handleDestroyRemoteAllocator);
+    addHandler<mem::ReadMem>(*this, &ThisT::handleReadMem);
+    addHandler<mem::ReserveMem>(*this, &ThisT::handleReserveMem);
+    addHandler<mem::SetProtections>(*this, &ThisT::handleSetProtections);
+    addHandler<mem::WriteMem>(*this, &ThisT::handleWriteMem);
+    addHandler<mem::WritePtr>(*this, &ThisT::handleWritePtr);
+    addHandler<eh::RegisterEHFrames>(*this, &ThisT::handleRegisterEHFrames);
+    addHandler<eh::DeregisterEHFrames>(*this, &ThisT::handleDeregisterEHFrames);
+    addHandler<stubs::CreateIndirectStubsOwner>(
+        *this, &ThisT::handleCreateIndirectStubsOwner);
+    addHandler<stubs::DestroyIndirectStubsOwner>(
+        *this, &ThisT::handleDestroyIndirectStubsOwner);
+    addHandler<stubs::EmitIndirectStubs>(*this,
+                                         &ThisT::handleEmitIndirectStubs);
+    addHandler<stubs::EmitResolverBlock>(*this,
+                                         &ThisT::handleEmitResolverBlock);
+    addHandler<stubs::EmitTrampolineBlock>(*this,
+                                           &ThisT::handleEmitTrampolineBlock);
+    addHandler<utils::GetSymbolAddress>(*this, &ThisT::handleGetSymbolAddress);
+    addHandler<utils::GetRemoteInfo>(*this, &ThisT::handleGetRemoteInfo);
+    addHandler<utils::TerminateSession>(*this, &ThisT::handleTerminateSession);
+  }
+
+  // FIXME: Remove move/copy ops once MSVC supports synthesizing move ops.
+  OrcRemoteTargetServer(const OrcRemoteTargetServer &) = delete;
+  OrcRemoteTargetServer &operator=(const OrcRemoteTargetServer &) = delete;
+
+  OrcRemoteTargetServer(OrcRemoteTargetServer &&Other) = default;
+  OrcRemoteTargetServer &operator=(OrcRemoteTargetServer &&) = delete;
+
+  Expected<JITTargetAddress> requestCompile(JITTargetAddress TrampolineAddr) {
+    return callB<utils::RequestCompile>(TrampolineAddr);
+  }
+
+  bool receivedTerminate() const { return TerminateFlag; }
+
+private:
+  struct Allocator {
+    Allocator() = default;
+    Allocator(Allocator &&Other) : Allocs(std::move(Other.Allocs)) {}
+
+    Allocator &operator=(Allocator &&Other) {
+      Allocs = std::move(Other.Allocs);
+      return *this;
+    }
+
+    ~Allocator() {
+      for (auto &Alloc : Allocs)
+        sys::Memory::releaseMappedMemory(Alloc.second);
+    }
+
+    Error allocate(void *&Addr, size_t Size, uint32_t Align) {
+      std::error_code EC;
+      sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(
+          Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
+      if (EC)
+        return errorCodeToError(EC);
+
+      Addr = MB.base();
+      assert(Allocs.find(MB.base()) == Allocs.end() && "Duplicate alloc");
+      Allocs[MB.base()] = std::move(MB);
+      return Error::success();
+    }
+
+    Error setProtections(void *block, unsigned Flags) {
+      auto I = Allocs.find(block);
+      if (I == Allocs.end())
+        return errorCodeToError(orcError(OrcErrorCode::RemoteMProtectAddrUnrecognized));
+      return errorCodeToError(
+          sys::Memory::protectMappedMemory(I->second, Flags));
+    }
+
+  private:
+    std::map<void *, sys::MemoryBlock> Allocs;
+  };
+
+  static Error doNothing() { return Error::success(); }
+
+  static JITTargetAddress reenter(void *JITTargetAddr, void *TrampolineAddr) {
+    auto T = static_cast<OrcRemoteTargetServer *>(JITTargetAddr);
+    auto AddrOrErr = T->requestCompile(static_cast<JITTargetAddress>(
+        reinterpret_cast<uintptr_t>(TrampolineAddr)));
+    // FIXME: Allow customizable failure substitution functions.
+    assert(AddrOrErr && "Compile request failed");
+    return *AddrOrErr;
+  }
+
+  Expected<int32_t> handleCallIntVoid(JITTargetAddress Addr) {
+    using IntVoidFnTy = int (*)();
+
+    IntVoidFnTy Fn =
+        reinterpret_cast<IntVoidFnTy>(static_cast<uintptr_t>(Addr));
+
+    DEBUG(dbgs() << "  Calling " << format("0x%016x", Addr) << "\n");
+    int Result = Fn();
+    DEBUG(dbgs() << "  Result = " << Result << "\n");
+
+    return Result;
+  }
+
+  Expected<int32_t> handleCallMain(JITTargetAddress Addr,
+                                   std::vector<std::string> Args) {
+    using MainFnTy = int (*)(int, const char *[]);
+
+    MainFnTy Fn = reinterpret_cast<MainFnTy>(static_cast<uintptr_t>(Addr));
+    int ArgC = Args.size() + 1;
+    int Idx = 1;
+    std::unique_ptr<const char *[]> ArgV(new const char *[ArgC + 1]);
+    ArgV[0] = "<jit process>";
+    for (auto &Arg : Args)
+      ArgV[Idx++] = Arg.c_str();
+    ArgV[ArgC] = 0;
+    DEBUG(
+      for (int Idx = 0; Idx < ArgC; ++Idx) {
+        llvm::dbgs() << "Arg " << Idx << ": " << ArgV[Idx] << "\n";
+      }
+    );
+
+    DEBUG(dbgs() << "  Calling " << format("0x%016x", Addr) << "\n");
+    int Result = Fn(ArgC, ArgV.get());
+    DEBUG(dbgs() << "  Result = " << Result << "\n");
+
+    return Result;
+  }
+
+  Error handleCallVoidVoid(JITTargetAddress Addr) {
+    using VoidVoidFnTy = void (*)();
+
+    VoidVoidFnTy Fn =
+        reinterpret_cast<VoidVoidFnTy>(static_cast<uintptr_t>(Addr));
+
+    DEBUG(dbgs() << "  Calling " << format("0x%016x", Addr) << "\n");
+    Fn();
+    DEBUG(dbgs() << "  Complete.\n");
+
+    return Error::success();
+  }
+
+  Error handleCreateRemoteAllocator(ResourceIdMgr::ResourceId Id) {
+    auto I = Allocators.find(Id);
+    if (I != Allocators.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteAllocatorIdAlreadyInUse));
+    DEBUG(dbgs() << "  Created allocator " << Id << "\n");
+    Allocators[Id] = Allocator();
+    return Error::success();
+  }
+
+  Error handleCreateIndirectStubsOwner(ResourceIdMgr::ResourceId Id) {
+    auto I = IndirectStubsOwners.find(Id);
+    if (I != IndirectStubsOwners.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse));
+    DEBUG(dbgs() << "  Create indirect stubs owner " << Id << "\n");
+    IndirectStubsOwners[Id] = ISBlockOwnerList();
+    return Error::success();
+  }
+
+  Error handleDeregisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
+    uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
+    DEBUG(dbgs() << "  Registering EH frames at " << format("0x%016x", TAddr)
+                 << ", Size = " << Size << " bytes\n");
+    EHFramesDeregister(Addr, Size);
+    return Error::success();
+  }
+
+  Error handleDestroyRemoteAllocator(ResourceIdMgr::ResourceId Id) {
+    auto I = Allocators.find(Id);
+    if (I == Allocators.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
+    Allocators.erase(I);
+    DEBUG(dbgs() << "  Destroyed allocator " << Id << "\n");
+    return Error::success();
+  }
+
+  Error handleDestroyIndirectStubsOwner(ResourceIdMgr::ResourceId Id) {
+    auto I = IndirectStubsOwners.find(Id);
+    if (I == IndirectStubsOwners.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist));
+    IndirectStubsOwners.erase(I);
+    return Error::success();
+  }
+
+  Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
+  handleEmitIndirectStubs(ResourceIdMgr::ResourceId Id,
+                          uint32_t NumStubsRequired) {
+    DEBUG(dbgs() << "  ISMgr " << Id << " request " << NumStubsRequired
+                 << " stubs.\n");
+
+    auto StubOwnerItr = IndirectStubsOwners.find(Id);
+    if (StubOwnerItr == IndirectStubsOwners.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist));
+
+    typename TargetT::IndirectStubsInfo IS;
+    if (auto Err =
+            TargetT::emitIndirectStubsBlock(IS, NumStubsRequired, nullptr))
+      return std::move(Err);
+
+    JITTargetAddress StubsBase = static_cast<JITTargetAddress>(
+        reinterpret_cast<uintptr_t>(IS.getStub(0)));
+    JITTargetAddress PtrsBase = static_cast<JITTargetAddress>(
+        reinterpret_cast<uintptr_t>(IS.getPtr(0)));
+    uint32_t NumStubsEmitted = IS.getNumStubs();
+
+    auto &BlockList = StubOwnerItr->second;
+    BlockList.push_back(std::move(IS));
+
+    return std::make_tuple(StubsBase, PtrsBase, NumStubsEmitted);
+  }
+
+  Error handleEmitResolverBlock() {
+    std::error_code EC;
+    ResolverBlock = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+        TargetT::ResolverCodeSize, nullptr,
+        sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+    if (EC)
+      return errorCodeToError(EC);
+
+    TargetT::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
+                               &reenter, this);
+
+    return errorCodeToError(sys::Memory::protectMappedMemory(
+        ResolverBlock.getMemoryBlock(),
+        sys::Memory::MF_READ | sys::Memory::MF_EXEC));
+  }
+
+  Expected<std::tuple<JITTargetAddress, uint32_t>> handleEmitTrampolineBlock() {
+    std::error_code EC;
+    auto TrampolineBlock =
+        sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+            sys::Process::getPageSize(), nullptr,
+            sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+    if (EC)
+      return errorCodeToError(EC);
+
+    uint32_t NumTrampolines =
+        (sys::Process::getPageSize() - TargetT::PointerSize) /
+        TargetT::TrampolineSize;
+
+    uint8_t *TrampolineMem = static_cast<uint8_t *>(TrampolineBlock.base());
+    TargetT::writeTrampolines(TrampolineMem, ResolverBlock.base(),
+                              NumTrampolines);
+
+    EC = sys::Memory::protectMappedMemory(TrampolineBlock.getMemoryBlock(),
+                                          sys::Memory::MF_READ |
+                                              sys::Memory::MF_EXEC);
+
+    TrampolineBlocks.push_back(std::move(TrampolineBlock));
+
+    auto TrampolineBaseAddr = static_cast<JITTargetAddress>(
+        reinterpret_cast<uintptr_t>(TrampolineMem));
+
+    return std::make_tuple(TrampolineBaseAddr, NumTrampolines);
+  }
+
+  Expected<JITTargetAddress> handleGetSymbolAddress(const std::string &Name) {
+    JITTargetAddress Addr = SymbolLookup(Name);
+    DEBUG(dbgs() << "  Symbol '" << Name << "' =  " << format("0x%016x", Addr)
+                 << "\n");
+    return Addr;
+  }
+
+  Expected<std::tuple<std::string, uint32_t, uint32_t, uint32_t, uint32_t>>
+  handleGetRemoteInfo() {
+    std::string ProcessTriple = sys::getProcessTriple();
+    uint32_t PointerSize = TargetT::PointerSize;
+    uint32_t PageSize = sys::Process::getPageSize();
+    uint32_t TrampolineSize = TargetT::TrampolineSize;
+    uint32_t IndirectStubSize = TargetT::IndirectStubsInfo::StubSize;
+    DEBUG(dbgs() << "  Remote info:\n"
+                 << "    triple             = '" << ProcessTriple << "'\n"
+                 << "    pointer size       = " << PointerSize << "\n"
+                 << "    page size          = " << PageSize << "\n"
+                 << "    trampoline size    = " << TrampolineSize << "\n"
+                 << "    indirect stub size = " << IndirectStubSize << "\n");
+    return std::make_tuple(ProcessTriple, PointerSize, PageSize, TrampolineSize,
+                           IndirectStubSize);
+  }
+
+  Expected<std::vector<uint8_t>> handleReadMem(JITTargetAddress RSrc,
+                                               uint64_t Size) {
+    uint8_t *Src = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(RSrc));
+
+    DEBUG(dbgs() << "  Reading " << Size << " bytes from "
+                 << format("0x%016x", RSrc) << "\n");
+
+    std::vector<uint8_t> Buffer;
+    Buffer.resize(Size);
+    for (uint8_t *P = Src; Size != 0; --Size)
+      Buffer.push_back(*P++);
+
+    return Buffer;
+  }
+
+  Error handleRegisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
+    uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
+    DEBUG(dbgs() << "  Registering EH frames at " << format("0x%016x", TAddr)
+                 << ", Size = " << Size << " bytes\n");
+    EHFramesRegister(Addr, Size);
+    return Error::success();
+  }
+
+  Expected<JITTargetAddress> handleReserveMem(ResourceIdMgr::ResourceId Id,
+                                              uint64_t Size, uint32_t Align) {
+    auto I = Allocators.find(Id);
+    if (I == Allocators.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
+    auto &Allocator = I->second;
+    void *LocalAllocAddr = nullptr;
+    if (auto Err = Allocator.allocate(LocalAllocAddr, Size, Align))
+      return std::move(Err);
+
+    DEBUG(dbgs() << "  Allocator " << Id << " reserved " << LocalAllocAddr
+                 << " (" << Size << " bytes, alignment " << Align << ")\n");
+
+    JITTargetAddress AllocAddr = static_cast<JITTargetAddress>(
+        reinterpret_cast<uintptr_t>(LocalAllocAddr));
+
+    return AllocAddr;
+  }
+
+  Error handleSetProtections(ResourceIdMgr::ResourceId Id,
+                             JITTargetAddress Addr, uint32_t Flags) {
+    auto I = Allocators.find(Id);
+    if (I == Allocators.end())
+      return errorCodeToError(
+               orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
+    auto &Allocator = I->second;
+    void *LocalAddr = reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
+    DEBUG(dbgs() << "  Allocator " << Id << " set permissions on " << LocalAddr
+                 << " to " << (Flags & sys::Memory::MF_READ ? 'R' : '-')
+                 << (Flags & sys::Memory::MF_WRITE ? 'W' : '-')
+                 << (Flags & sys::Memory::MF_EXEC ? 'X' : '-') << "\n");
+    return Allocator.setProtections(LocalAddr, Flags);
+  }
+
+  Error handleTerminateSession() {
+    TerminateFlag = true;
+    return Error::success();
+  }
+
+  Error handleWriteMem(DirectBufferWriter DBW) {
+    DEBUG(dbgs() << "  Writing " << DBW.getSize() << " bytes to "
+                 << format("0x%016x", DBW.getDst()) << "\n");
+    return Error::success();
+  }
+
+  Error handleWritePtr(JITTargetAddress Addr, JITTargetAddress PtrVal) {
+    DEBUG(dbgs() << "  Writing pointer *" << format("0x%016x", Addr) << " = "
+                 << format("0x%016x", PtrVal) << "\n");
+    uintptr_t *Ptr =
+        reinterpret_cast<uintptr_t *>(static_cast<uintptr_t>(Addr));
+    *Ptr = static_cast<uintptr_t>(PtrVal);
+    return Error::success();
+  }
+
+  SymbolLookupFtor SymbolLookup;
+  EHFrameRegistrationFtor EHFramesRegister, EHFramesDeregister;
+  std::map<ResourceIdMgr::ResourceId, Allocator> Allocators;
+  using ISBlockOwnerList = std::vector<typename TargetT::IndirectStubsInfo>;
+  std::map<ResourceIdMgr::ResourceId, ISBlockOwnerList> IndirectStubsOwners;
+  sys::OwningMemoryBlock ResolverBlock;
+  std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+  bool TerminateFlag = false;
+};
+
+} // end namespace remote
+} // end namespace orc
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCSerialization.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
new file mode 100644
index 0000000..569c506
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
@@ -0,0 +1,609 @@
+//===- llvm/ExecutionEngine/Orc/RPCSerialization.h --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
+#define LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
+
+#include "OrcError.h"
+#include "llvm/Support/thread.h"
+#include <map>
+#include <mutex>
+#include <sstream>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+template <typename T>
+class RPCTypeName;
+
+/// TypeNameSequence is a utility for rendering sequences of types to a string
+/// by rendering each type, separated by ", ".
+template <typename... ArgTs> class RPCTypeNameSequence {};
+
+/// Render an empty TypeNameSequence to an ostream.
+template <typename OStream>
+OStream &operator<<(OStream &OS, const RPCTypeNameSequence<> &V) {
+  return OS;
+}
+
+/// Render a TypeNameSequence of a single type to an ostream.
+template <typename OStream, typename ArgT>
+OStream &operator<<(OStream &OS, const RPCTypeNameSequence<ArgT> &V) {
+  OS << RPCTypeName<ArgT>::getName();
+  return OS;
+}
+
+/// Render a TypeNameSequence of more than one type to an ostream.
+template <typename OStream, typename ArgT1, typename ArgT2, typename... ArgTs>
+OStream&
+operator<<(OStream &OS, const RPCTypeNameSequence<ArgT1, ArgT2, ArgTs...> &V) {
+  OS << RPCTypeName<ArgT1>::getName() << ", "
+     << RPCTypeNameSequence<ArgT2, ArgTs...>();
+  return OS;
+}
+
+template <>
+class RPCTypeName<void> {
+public:
+  static const char* getName() { return "void"; }
+};
+
+template <>
+class RPCTypeName<int8_t> {
+public:
+  static const char* getName() { return "int8_t"; }
+};
+
+template <>
+class RPCTypeName<uint8_t> {
+public:
+  static const char* getName() { return "uint8_t"; }
+};
+
+template <>
+class RPCTypeName<int16_t> {
+public:
+  static const char* getName() { return "int16_t"; }
+};
+
+template <>
+class RPCTypeName<uint16_t> {
+public:
+  static const char* getName() { return "uint16_t"; }
+};
+
+template <>
+class RPCTypeName<int32_t> {
+public:
+  static const char* getName() { return "int32_t"; }
+};
+
+template <>
+class RPCTypeName<uint32_t> {
+public:
+  static const char* getName() { return "uint32_t"; }
+};
+
+template <>
+class RPCTypeName<int64_t> {
+public:
+  static const char* getName() { return "int64_t"; }
+};
+
+template <>
+class RPCTypeName<uint64_t> {
+public:
+  static const char* getName() { return "uint64_t"; }
+};
+
+template <>
+class RPCTypeName<bool> {
+public:
+  static const char* getName() { return "bool"; }
+};
+
+template <>
+class RPCTypeName<std::string> {
+public:
+  static const char* getName() { return "std::string"; }
+};
+
+template <>
+class RPCTypeName<Error> {
+public:
+  static const char* getName() { return "Error"; }
+};
+
+template <typename T>
+class RPCTypeName<Expected<T>> {
+public:
+  static const char* getName() {
+    std::lock_guard<std::mutex> Lock(NameMutex);
+    if (Name.empty())
+      raw_string_ostream(Name) << "Expected<"
+                               << RPCTypeNameSequence<T>()
+                               << ">";
+    return Name.data();
+  }
+
+private:
+  static std::mutex NameMutex;
+  static std::string Name;
+};
+
+template <typename T>
+std::mutex RPCTypeName<Expected<T>>::NameMutex;
+
+template <typename T>
+std::string RPCTypeName<Expected<T>>::Name;
+
+template <typename T1, typename T2>
+class RPCTypeName<std::pair<T1, T2>> {
+public:
+  static const char* getName() {
+    std::lock_guard<std::mutex> Lock(NameMutex);
+    if (Name.empty())
+      raw_string_ostream(Name) << "std::pair<" << RPCTypeNameSequence<T1, T2>()
+                               << ">";
+    return Name.data();
+  }
+private:
+  static std::mutex NameMutex;
+  static std::string Name;
+};
+
+template <typename T1, typename T2>
+std::mutex RPCTypeName<std::pair<T1, T2>>::NameMutex;
+template <typename T1, typename T2>
+std::string RPCTypeName<std::pair<T1, T2>>::Name;
+
+template <typename... ArgTs>
+class RPCTypeName<std::tuple<ArgTs...>> {
+public:
+  static const char* getName() {
+    std::lock_guard<std::mutex> Lock(NameMutex);
+    if (Name.empty())
+      raw_string_ostream(Name) << "std::tuple<"
+                               << RPCTypeNameSequence<ArgTs...>() << ">";
+    return Name.data();
+  }
+private:
+  static std::mutex NameMutex;
+  static std::string Name;
+};
+
+template <typename... ArgTs>
+std::mutex RPCTypeName<std::tuple<ArgTs...>>::NameMutex;
+template <typename... ArgTs>
+std::string RPCTypeName<std::tuple<ArgTs...>>::Name;
+
+template <typename T>
+class RPCTypeName<std::vector<T>> {
+public:
+  static const char*getName() {
+    std::lock_guard<std::mutex> Lock(NameMutex);
+    if (Name.empty())
+      raw_string_ostream(Name) << "std::vector<" << RPCTypeName<T>::getName()
+                               << ">";
+    return Name.data();
+  }
+
+private:
+  static std::mutex NameMutex;
+  static std::string Name;
+};
+
+template <typename T>
+std::mutex RPCTypeName<std::vector<T>>::NameMutex;
+template <typename T>
+std::string RPCTypeName<std::vector<T>>::Name;
+
+
+/// The SerializationTraits<ChannelT, T> class describes how to serialize and
+/// deserialize an instance of type T to/from an abstract channel of type
+/// ChannelT. It also provides a representation of the type's name via the
+/// getName method.
+///
+/// Specializations of this class should provide the following functions:
+///
+///   @code{.cpp}
+///
+///   static const char* getName();
+///   static Error serialize(ChannelT&, const T&);
+///   static Error deserialize(ChannelT&, T&);
+///
+///   @endcode
+///
+/// The third argument of SerializationTraits is intended to support SFINAE.
+/// E.g.:
+///
+///   @code{.cpp}
+///
+///   class MyVirtualChannel { ... };
+///
+///   template <DerivedChannelT>
+///   class SerializationTraits<DerivedChannelT, bool,
+///         typename std::enable_if<
+///           std::is_base_of<VirtChannel, DerivedChannel>::value
+///         >::type> {
+///   public:
+///     static const char* getName() { ... };
+///   }
+///
+///   @endcode
+template <typename ChannelT, typename WireType,
+          typename ConcreteType = WireType, typename = void>
+class SerializationTraits;
+
+template <typename ChannelT>
+class SequenceTraits {
+public:
+  static Error emitSeparator(ChannelT &C) { return Error::success(); }
+  static Error consumeSeparator(ChannelT &C) { return Error::success(); }
+};
+
+/// Utility class for serializing sequences of values of varying types.
+/// Specializations of this class contain 'serialize' and 'deserialize' methods
+/// for the given channel. The ArgTs... list will determine the "over-the-wire"
+/// types to be serialized. The serialize and deserialize methods take a list
+/// CArgTs... ("caller arg types") which must be the same length as ArgTs...,
+/// but may be different types from ArgTs, provided that for each CArgT there
+/// is a SerializationTraits specialization
+/// SerializeTraits<ChannelT, ArgT, CArgT> with methods that can serialize the
+/// caller argument to over-the-wire value.
+template <typename ChannelT, typename... ArgTs>
+class SequenceSerialization;
+
+template <typename ChannelT>
+class SequenceSerialization<ChannelT> {
+public:
+  static Error serialize(ChannelT &C) { return Error::success(); }
+  static Error deserialize(ChannelT &C) { return Error::success(); }
+};
+
+template <typename ChannelT, typename ArgT>
+class SequenceSerialization<ChannelT, ArgT> {
+public:
+
+  template <typename CArgT>
+  static Error serialize(ChannelT &C, CArgT &&CArg) {
+    return SerializationTraits<ChannelT, ArgT,
+                               typename std::decay<CArgT>::type>::
+             serialize(C, std::forward<CArgT>(CArg));
+  }
+
+  template <typename CArgT>
+  static Error deserialize(ChannelT &C, CArgT &CArg) {
+    return SerializationTraits<ChannelT, ArgT, CArgT>::deserialize(C, CArg);
+  }
+};
+
+template <typename ChannelT, typename ArgT, typename... ArgTs>
+class SequenceSerialization<ChannelT, ArgT, ArgTs...> {
+public:
+
+  template <typename CArgT, typename... CArgTs>
+  static Error serialize(ChannelT &C, CArgT &&CArg,
+                         CArgTs &&... CArgs) {
+    if (auto Err =
+        SerializationTraits<ChannelT, ArgT, typename std::decay<CArgT>::type>::
+          serialize(C, std::forward<CArgT>(CArg)))
+      return Err;
+    if (auto Err = SequenceTraits<ChannelT>::emitSeparator(C))
+      return Err;
+    return SequenceSerialization<ChannelT, ArgTs...>::
+             serialize(C, std::forward<CArgTs>(CArgs)...);
+  }
+
+  template <typename CArgT, typename... CArgTs>
+  static Error deserialize(ChannelT &C, CArgT &CArg,
+                           CArgTs &... CArgs) {
+    if (auto Err =
+        SerializationTraits<ChannelT, ArgT, CArgT>::deserialize(C, CArg))
+      return Err;
+    if (auto Err = SequenceTraits<ChannelT>::consumeSeparator(C))
+      return Err;
+    return SequenceSerialization<ChannelT, ArgTs...>::deserialize(C, CArgs...);
+  }
+};
+
+template <typename ChannelT, typename... ArgTs>
+Error serializeSeq(ChannelT &C, ArgTs &&... Args) {
+  return SequenceSerialization<ChannelT, typename std::decay<ArgTs>::type...>::
+           serialize(C, std::forward<ArgTs>(Args)...);
+}
+
+template <typename ChannelT, typename... ArgTs>
+Error deserializeSeq(ChannelT &C, ArgTs &... Args) {
+  return SequenceSerialization<ChannelT, ArgTs...>::deserialize(C, Args...);
+}
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, Error> {
+public:
+
+  using WrappedErrorSerializer =
+    std::function<Error(ChannelT &C, const ErrorInfoBase&)>;
+
+  using WrappedErrorDeserializer =
+    std::function<Error(ChannelT &C, Error &Err)>;
+
+  template <typename ErrorInfoT, typename SerializeFtor,
+            typename DeserializeFtor>
+  static void registerErrorType(std::string Name, SerializeFtor Serialize,
+                                DeserializeFtor Deserialize) {
+    assert(!Name.empty() &&
+           "The empty string is reserved for the Success value");
+
+    const std::string *KeyName = nullptr;
+    {
+      // We're abusing the stability of std::map here: We take a reference to the
+      // key of the deserializers map to save us from duplicating the string in
+      // the serializer. This should be changed to use a stringpool if we switch
+      // to a map type that may move keys in memory.
+      std::lock_guard<std::recursive_mutex> Lock(DeserializersMutex);
+      auto I =
+        Deserializers.insert(Deserializers.begin(),
+                             std::make_pair(std::move(Name),
+                                            std::move(Deserialize)));
+      KeyName = &I->first;
+    }
+
+    {
+      assert(KeyName != nullptr && "No keyname pointer");
+      std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
+      // FIXME: Move capture Serialize once we have C++14.
+      Serializers[ErrorInfoT::classID()] =
+          [KeyName, Serialize](ChannelT &C, const ErrorInfoBase &EIB) -> Error {
+        assert(EIB.dynamicClassID() == ErrorInfoT::classID() &&
+               "Serializer called for wrong error type");
+        if (auto Err = serializeSeq(C, *KeyName))
+          return Err;
+        return Serialize(C, static_cast<const ErrorInfoT &>(EIB));
+      };
+    }
+  }
+
+  static Error serialize(ChannelT &C, Error &&Err) {
+    std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
+
+    if (!Err)
+      return serializeSeq(C, std::string());
+
+    return handleErrors(std::move(Err),
+                        [&C](const ErrorInfoBase &EIB) {
+                          auto SI = Serializers.find(EIB.dynamicClassID());
+                          if (SI == Serializers.end())
+                            return serializeAsStringError(C, EIB);
+                          return (SI->second)(C, EIB);
+                        });
+  }
+
+  static Error deserialize(ChannelT &C, Error &Err) {
+    std::lock_guard<std::recursive_mutex> Lock(DeserializersMutex);
+
+    std::string Key;
+    if (auto Err = deserializeSeq(C, Key))
+      return Err;
+
+    if (Key.empty()) {
+      ErrorAsOutParameter EAO(&Err);
+      Err = Error::success();
+      return Error::success();
+    }
+
+    auto DI = Deserializers.find(Key);
+    assert(DI != Deserializers.end() && "No deserializer for error type");
+    return (DI->second)(C, Err);
+  }
+
+private:
+
+  static Error serializeAsStringError(ChannelT &C, const ErrorInfoBase &EIB) {
+    std::string ErrMsg;
+    {
+      raw_string_ostream ErrMsgStream(ErrMsg);
+      EIB.log(ErrMsgStream);
+    }
+    return serialize(C, make_error<StringError>(std::move(ErrMsg),
+                                                inconvertibleErrorCode()));
+  }
+
+  static std::recursive_mutex SerializersMutex;
+  static std::recursive_mutex DeserializersMutex;
+  static std::map<const void*, WrappedErrorSerializer> Serializers;
+  static std::map<std::string, WrappedErrorDeserializer> Deserializers;
+};
+
+template <typename ChannelT>
+std::recursive_mutex SerializationTraits<ChannelT, Error>::SerializersMutex;
+
+template <typename ChannelT>
+std::recursive_mutex SerializationTraits<ChannelT, Error>::DeserializersMutex;
+
+template <typename ChannelT>
+std::map<const void*,
+         typename SerializationTraits<ChannelT, Error>::WrappedErrorSerializer>
+SerializationTraits<ChannelT, Error>::Serializers;
+
+template <typename ChannelT>
+std::map<std::string,
+         typename SerializationTraits<ChannelT, Error>::WrappedErrorDeserializer>
+SerializationTraits<ChannelT, Error>::Deserializers;
+
+/// Registers a serializer and deserializer for the given error type on the
+/// given channel type.
+template <typename ChannelT, typename ErrorInfoT, typename SerializeFtor,
+          typename DeserializeFtor>
+void registerErrorSerialization(std::string Name, SerializeFtor &&Serialize,
+                                DeserializeFtor &&Deserialize) {
+  SerializationTraits<ChannelT, Error>::template registerErrorType<ErrorInfoT>(
+    std::move(Name),
+    std::forward<SerializeFtor>(Serialize),
+    std::forward<DeserializeFtor>(Deserialize));
+}
+
+/// Registers serialization/deserialization for StringError.
+template <typename ChannelT>
+void registerStringError() {
+  static bool AlreadyRegistered = false;
+  if (!AlreadyRegistered) {
+    registerErrorSerialization<ChannelT, StringError>(
+      "StringError",
+      [](ChannelT &C, const StringError &SE) {
+        return serializeSeq(C, SE.getMessage());
+      },
+      [](ChannelT &C, Error &Err) -> Error {
+        ErrorAsOutParameter EAO(&Err);
+        std::string Msg;
+        if (auto E2 = deserializeSeq(C, Msg))
+          return E2;
+        Err =
+          make_error<StringError>(std::move(Msg),
+                                  orcError(
+                                    OrcErrorCode::UnknownErrorCodeFromRemote));
+        return Error::success();
+      });
+    AlreadyRegistered = true;
+  }
+}
+
+/// SerializationTraits for Expected<T1> from an Expected<T2>.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, Expected<T1>, Expected<T2>> {
+public:
+
+  static Error serialize(ChannelT &C, Expected<T2> &&ValOrErr) {
+    if (ValOrErr) {
+      if (auto Err = serializeSeq(C, true))
+        return Err;
+      return SerializationTraits<ChannelT, T1, T2>::serialize(C, *ValOrErr);
+    }
+    if (auto Err = serializeSeq(C, false))
+      return Err;
+    return serializeSeq(C, ValOrErr.takeError());
+  }
+
+  static Error deserialize(ChannelT &C, Expected<T2> &ValOrErr) {
+    ExpectedAsOutParameter<T2> EAO(&ValOrErr);
+    bool HasValue;
+    if (auto Err = deserializeSeq(C, HasValue))
+      return Err;
+    if (HasValue)
+      return SerializationTraits<ChannelT, T1, T2>::deserialize(C, *ValOrErr);
+    Error Err = Error::success();
+    if (auto E2 = deserializeSeq(C, Err))
+      return E2;
+    ValOrErr = std::move(Err);
+    return Error::success();
+  }
+};
+
+/// SerializationTraits for Expected<T1> from a T2.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, Expected<T1>, T2> {
+public:
+
+  static Error serialize(ChannelT &C, T2 &&Val) {
+    return serializeSeq(C, Expected<T2>(std::forward<T2>(Val)));
+  }
+};
+
+/// SerializationTraits for Expected<T1> from an Error.
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, Expected<T>, Error> {
+public:
+
+  static Error serialize(ChannelT &C, Error &&Err) {
+    return serializeSeq(C, Expected<T>(std::move(Err)));
+  }
+};
+
+/// SerializationTraits default specialization for std::pair.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, std::pair<T1, T2>> {
+public:
+  static Error serialize(ChannelT &C, const std::pair<T1, T2> &V) {
+    return serializeSeq(C, V.first, V.second);
+  }
+
+  static Error deserialize(ChannelT &C, std::pair<T1, T2> &V) {
+    return deserializeSeq(C, V.first, V.second);
+  }
+};
+
+/// SerializationTraits default specialization for std::tuple.
+template <typename ChannelT, typename... ArgTs>
+class SerializationTraits<ChannelT, std::tuple<ArgTs...>> {
+public:
+
+  /// RPC channel serialization for std::tuple.
+  static Error serialize(ChannelT &C, const std::tuple<ArgTs...> &V) {
+    return serializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
+  }
+
+  /// RPC channel deserialization for std::tuple.
+  static Error deserialize(ChannelT &C, std::tuple<ArgTs...> &V) {
+    return deserializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
+  }
+
+private:
+  // Serialization helper for std::tuple.
+  template <size_t... Is>
+  static Error serializeTupleHelper(ChannelT &C, const std::tuple<ArgTs...> &V,
+                                    llvm::index_sequence<Is...> _) {
+    return serializeSeq(C, std::get<Is>(V)...);
+  }
+
+  // Serialization helper for std::tuple.
+  template <size_t... Is>
+  static Error deserializeTupleHelper(ChannelT &C, std::tuple<ArgTs...> &V,
+                                      llvm::index_sequence<Is...> _) {
+    return deserializeSeq(C, std::get<Is>(V)...);
+  }
+};
+
+/// SerializationTraits default specialization for std::vector.
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, std::vector<T>> {
+public:
+
+  /// Serialize a std::vector<T> from std::vector<T>.
+  static Error serialize(ChannelT &C, const std::vector<T> &V) {
+    if (auto Err = serializeSeq(C, static_cast<uint64_t>(V.size())))
+      return Err;
+
+    for (const auto &E : V)
+      if (auto Err = serializeSeq(C, E))
+        return Err;
+
+    return Error::success();
+  }
+
+  /// Deserialize a std::vector<T> to a std::vector<T>.
+  static Error deserialize(ChannelT &C, std::vector<T> &V) {
+    uint64_t Count = 0;
+    if (auto Err = deserializeSeq(C, Count))
+      return Err;
+
+    V.resize(Count);
+    for (auto &E : V)
+      if (auto Err = deserializeSeq(C, E))
+        return Err;
+
+    return Error::success();
+  }
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCUtils.h
new file mode 100644
index 0000000..c278cb1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCUtils.h
@@ -0,0 +1,1752 @@
+//===------- RPCUTils.h - Utilities for building RPC APIs -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities to support construction of simple RPC APIs.
+//
+// The RPC utilities aim for ease of use (minimal conceptual overhead) for C++
+// programmers, high performance, low memory overhead, and efficient use of the
+// communications channel.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RPCUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_RPCUTILS_H
+
+#include <map>
+#include <thread>
+#include <vector>
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/RPCSerialization.h"
+
+#include <future>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+/// Base class of all fatal RPC errors (those that necessarily result in the
+/// termination of the RPC session).
+class RPCFatalError : public ErrorInfo<RPCFatalError> {
+public:
+  static char ID;
+};
+
+/// RPCConnectionClosed is returned from RPC operations if the RPC connection
+/// has already been closed due to either an error or graceful disconnection.
+class ConnectionClosed : public ErrorInfo<ConnectionClosed> {
+public:
+  static char ID;
+  std::error_code convertToErrorCode() const override;
+  void log(raw_ostream &OS) const override;
+};
+
+/// BadFunctionCall is returned from handleOne when the remote makes a call with
+/// an unrecognized function id.
+///
+/// This error is fatal because Orc RPC needs to know how to parse a function
+/// call to know where the next call starts, and if it doesn't recognize the
+/// function id it cannot parse the call.
+template <typename FnIdT, typename SeqNoT>
+class BadFunctionCall
+  : public ErrorInfo<BadFunctionCall<FnIdT, SeqNoT>, RPCFatalError> {
+public:
+  static char ID;
+
+  BadFunctionCall(FnIdT FnId, SeqNoT SeqNo)
+      : FnId(std::move(FnId)), SeqNo(std::move(SeqNo)) {}
+
+  std::error_code convertToErrorCode() const override {
+    return orcError(OrcErrorCode::UnexpectedRPCCall);
+  }
+
+  void log(raw_ostream &OS) const override {
+    OS << "Call to invalid RPC function id '" << FnId << "' with "
+          "sequence number " << SeqNo;
+  }
+
+private:
+  FnIdT FnId;
+  SeqNoT SeqNo;
+};
+
+template <typename FnIdT, typename SeqNoT>
+char BadFunctionCall<FnIdT, SeqNoT>::ID = 0;
+
+/// InvalidSequenceNumberForResponse is returned from handleOne when a response
+/// call arrives with a sequence number that doesn't correspond to any in-flight
+/// function call.
+///
+/// This error is fatal because Orc RPC needs to know how to parse the rest of
+/// the response call to know where the next call starts, and if it doesn't have
+/// a result parser for this sequence number it can't do that.
+template <typename SeqNoT>
+class InvalidSequenceNumberForResponse
+    : public ErrorInfo<InvalidSequenceNumberForResponse<SeqNoT>, RPCFatalError> {
+public:
+  static char ID;
+
+  InvalidSequenceNumberForResponse(SeqNoT SeqNo)
+      : SeqNo(std::move(SeqNo)) {}
+
+  std::error_code convertToErrorCode() const override {
+    return orcError(OrcErrorCode::UnexpectedRPCCall);
+  };
+
+  void log(raw_ostream &OS) const override {
+    OS << "Response has unknown sequence number " << SeqNo;
+  }
+private:
+  SeqNoT SeqNo;
+};
+
+template <typename SeqNoT>
+char InvalidSequenceNumberForResponse<SeqNoT>::ID = 0;
+
+/// This non-fatal error will be passed to asynchronous result handlers in place
+/// of a result if the connection goes down before a result returns, or if the
+/// function to be called cannot be negotiated with the remote.
+class ResponseAbandoned : public ErrorInfo<ResponseAbandoned> {
+public:
+  static char ID;
+
+  std::error_code convertToErrorCode() const override;
+  void log(raw_ostream &OS) const override;
+};
+
+/// This error is returned if the remote does not have a handler installed for
+/// the given RPC function.
+class CouldNotNegotiate : public ErrorInfo<CouldNotNegotiate> {
+public:
+  static char ID;
+
+  CouldNotNegotiate(std::string Signature);
+  std::error_code convertToErrorCode() const override;
+  void log(raw_ostream &OS) const override;
+  const std::string &getSignature() const { return Signature; }
+private:
+  std::string Signature;
+};
+
+template <typename DerivedFunc, typename FnT> class Function;
+
+// RPC Function class.
+// DerivedFunc should be a user defined class with a static 'getName()' method
+// returning a const char* representing the function's name.
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+class Function<DerivedFunc, RetT(ArgTs...)> {
+public:
+  /// User defined function type.
+  using Type = RetT(ArgTs...);
+
+  /// Return type.
+  using ReturnType = RetT;
+
+  /// Returns the full function prototype as a string.
+  static const char *getPrototype() {
+    std::lock_guard<std::mutex> Lock(NameMutex);
+    if (Name.empty())
+      raw_string_ostream(Name)
+          << RPCTypeName<RetT>::getName() << " " << DerivedFunc::getName()
+          << "(" << llvm::orc::rpc::RPCTypeNameSequence<ArgTs...>() << ")";
+    return Name.data();
+  }
+
+private:
+  static std::mutex NameMutex;
+  static std::string Name;
+};
+
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+std::mutex Function<DerivedFunc, RetT(ArgTs...)>::NameMutex;
+
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+std::string Function<DerivedFunc, RetT(ArgTs...)>::Name;
+
+/// Allocates RPC function ids during autonegotiation.
+/// Specializations of this class must provide four members:
+///
+/// static T getInvalidId():
+///   Should return a reserved id that will be used to represent missing
+/// functions during autonegotiation.
+///
+/// static T getResponseId():
+///   Should return a reserved id that will be used to send function responses
+/// (return values).
+///
+/// static T getNegotiateId():
+///   Should return a reserved id for the negotiate function, which will be used
+/// to negotiate ids for user defined functions.
+///
+/// template <typename Func> T allocate():
+///   Allocate a unique id for function Func.
+template <typename T, typename = void> class RPCFunctionIdAllocator;
+
+/// This specialization of RPCFunctionIdAllocator provides a default
+/// implementation for integral types.
+template <typename T>
+class RPCFunctionIdAllocator<
+    T, typename std::enable_if<std::is_integral<T>::value>::type> {
+public:
+  static T getInvalidId() { return T(0); }
+  static T getResponseId() { return T(1); }
+  static T getNegotiateId() { return T(2); }
+
+  template <typename Func> T allocate() { return NextId++; }
+
+private:
+  T NextId = 3;
+};
+
+namespace detail {
+
+// FIXME: Remove MSVCPError/MSVCPExpected once MSVC's future implementation
+//        supports classes without default constructors.
+#ifdef _MSC_VER
+
+namespace msvc_hacks {
+
+// Work around MSVC's future implementation's use of default constructors:
+// A default constructed value in the promise will be overwritten when the
+// real error is set - so the default constructed Error has to be checked
+// already.
+class MSVCPError : public Error {
+public:
+  MSVCPError() { (void)!!*this; }
+
+  MSVCPError(MSVCPError &&Other) : Error(std::move(Other)) {}
+
+  MSVCPError &operator=(MSVCPError Other) {
+    Error::operator=(std::move(Other));
+    return *this;
+  }
+
+  MSVCPError(Error Err) : Error(std::move(Err)) {}
+};
+
+// Work around MSVC's future implementation, similar to MSVCPError.
+template <typename T> class MSVCPExpected : public Expected<T> {
+public:
+  MSVCPExpected()
+      : Expected<T>(make_error<StringError>("", inconvertibleErrorCode())) {
+    consumeError(this->takeError());
+  }
+
+  MSVCPExpected(MSVCPExpected &&Other) : Expected<T>(std::move(Other)) {}
+
+  MSVCPExpected &operator=(MSVCPExpected &&Other) {
+    Expected<T>::operator=(std::move(Other));
+    return *this;
+  }
+
+  MSVCPExpected(Error Err) : Expected<T>(std::move(Err)) {}
+
+  template <typename OtherT>
+  MSVCPExpected(
+      OtherT &&Val,
+      typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+          nullptr)
+      : Expected<T>(std::move(Val)) {}
+
+  template <class OtherT>
+  MSVCPExpected(
+      Expected<OtherT> &&Other,
+      typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+          nullptr)
+      : Expected<T>(std::move(Other)) {}
+
+  template <class OtherT>
+  explicit MSVCPExpected(
+      Expected<OtherT> &&Other,
+      typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+          nullptr)
+      : Expected<T>(std::move(Other)) {}
+};
+
+} // end namespace msvc_hacks
+
+#endif // _MSC_VER
+
+/// Provides a typedef for a tuple containing the decayed argument types.
+template <typename T> class FunctionArgsTuple;
+
+template <typename RetT, typename... ArgTs>
+class FunctionArgsTuple<RetT(ArgTs...)> {
+public:
+  using Type = std::tuple<typename std::decay<
+      typename std::remove_reference<ArgTs>::type>::type...>;
+};
+
+// ResultTraits provides typedefs and utilities specific to the return type
+// of functions.
+template <typename RetT> class ResultTraits {
+public:
+  // The return type wrapped in llvm::Expected.
+  using ErrorReturnType = Expected<RetT>;
+
+#ifdef _MSC_VER
+  // The ErrorReturnType wrapped in a std::promise.
+  using ReturnPromiseType = std::promise<msvc_hacks::MSVCPExpected<RetT>>;
+
+  // The ErrorReturnType wrapped in a std::future.
+  using ReturnFutureType = std::future<msvc_hacks::MSVCPExpected<RetT>>;
+#else
+  // The ErrorReturnType wrapped in a std::promise.
+  using ReturnPromiseType = std::promise<ErrorReturnType>;
+
+  // The ErrorReturnType wrapped in a std::future.
+  using ReturnFutureType = std::future<ErrorReturnType>;
+#endif
+
+  // Create a 'blank' value of the ErrorReturnType, ready and safe to
+  // overwrite.
+  static ErrorReturnType createBlankErrorReturnValue() {
+    return ErrorReturnType(RetT());
+  }
+
+  // Consume an abandoned ErrorReturnType.
+  static void consumeAbandoned(ErrorReturnType RetOrErr) {
+    consumeError(RetOrErr.takeError());
+  }
+};
+
+// ResultTraits specialization for void functions.
+template <> class ResultTraits<void> {
+public:
+  // For void functions, ErrorReturnType is llvm::Error.
+  using ErrorReturnType = Error;
+
+#ifdef _MSC_VER
+  // The ErrorReturnType wrapped in a std::promise.
+  using ReturnPromiseType = std::promise<msvc_hacks::MSVCPError>;
+
+  // The ErrorReturnType wrapped in a std::future.
+  using ReturnFutureType = std::future<msvc_hacks::MSVCPError>;
+#else
+  // The ErrorReturnType wrapped in a std::promise.
+  using ReturnPromiseType = std::promise<ErrorReturnType>;
+
+  // The ErrorReturnType wrapped in a std::future.
+  using ReturnFutureType = std::future<ErrorReturnType>;
+#endif
+
+  // Create a 'blank' value of the ErrorReturnType, ready and safe to
+  // overwrite.
+  static ErrorReturnType createBlankErrorReturnValue() {
+    return ErrorReturnType::success();
+  }
+
+  // Consume an abandoned ErrorReturnType.
+  static void consumeAbandoned(ErrorReturnType Err) {
+    consumeError(std::move(Err));
+  }
+};
+
+// ResultTraits<Error> is equivalent to ResultTraits<void>. This allows
+// handlers for void RPC functions to return either void (in which case they
+// implicitly succeed) or Error (in which case their error return is
+// propagated). See usage in HandlerTraits::runHandlerHelper.
+template <> class ResultTraits<Error> : public ResultTraits<void> {};
+
+// ResultTraits<Expected<T>> is equivalent to ResultTraits<T>. This allows
+// handlers for RPC functions returning a T to return either a T (in which
+// case they implicitly succeed) or Expected<T> (in which case their error
+// return is propagated). See usage in HandlerTraits::runHandlerHelper.
+template <typename RetT>
+class ResultTraits<Expected<RetT>> : public ResultTraits<RetT> {};
+
+// Determines whether an RPC function's defined error return type supports
+// error return value.
+template <typename T>
+class SupportsErrorReturn {
+public:
+  static const bool value = false;
+};
+
+template <>
+class SupportsErrorReturn<Error> {
+public:
+  static const bool value = true;
+};
+
+template <typename T>
+class SupportsErrorReturn<Expected<T>> {
+public:
+  static const bool value = true;
+};
+
+// RespondHelper packages return values based on whether or not the declared
+// RPC function return type supports error returns.
+template <bool FuncSupportsErrorReturn>
+class RespondHelper;
+
+// RespondHelper specialization for functions that support error returns.
+template <>
+class RespondHelper<true> {
+public:
+
+  // Send Expected<T>.
+  template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+            typename FunctionIdT, typename SequenceNumberT>
+  static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+                          SequenceNumberT SeqNo,
+                          Expected<HandlerRetT> ResultOrErr) {
+    if (!ResultOrErr && ResultOrErr.template errorIsA<RPCFatalError>())
+      return ResultOrErr.takeError();
+
+    // Open the response message.
+    if (auto Err = C.startSendMessage(ResponseId, SeqNo))
+      return Err;
+
+    // Serialize the result.
+    if (auto Err =
+        SerializationTraits<ChannelT, WireRetT,
+                            Expected<HandlerRetT>>::serialize(
+                                                     C, std::move(ResultOrErr)))
+      return Err;
+
+    // Close the response message.
+    return C.endSendMessage();
+  }
+
+  template <typename ChannelT, typename FunctionIdT, typename SequenceNumberT>
+  static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+                          SequenceNumberT SeqNo, Error Err) {
+    if (Err && Err.isA<RPCFatalError>())
+      return Err;
+    if (auto Err2 = C.startSendMessage(ResponseId, SeqNo))
+      return Err2;
+    if (auto Err2 = serializeSeq(C, std::move(Err)))
+      return Err2;
+    return C.endSendMessage();
+  }
+
+};
+
+// RespondHelper specialization for functions that do not support error returns.
+template <>
+class RespondHelper<false> {
+public:
+
+  template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+            typename FunctionIdT, typename SequenceNumberT>
+  static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+                          SequenceNumberT SeqNo,
+                          Expected<HandlerRetT> ResultOrErr) {
+    if (auto Err = ResultOrErr.takeError())
+      return Err;
+
+    // Open the response message.
+    if (auto Err = C.startSendMessage(ResponseId, SeqNo))
+      return Err;
+
+    // Serialize the result.
+    if (auto Err =
+        SerializationTraits<ChannelT, WireRetT, HandlerRetT>::serialize(
+                                                               C, *ResultOrErr))
+      return Err;
+
+    // Close the response message.
+    return C.endSendMessage();
+  }
+
+  template <typename ChannelT, typename FunctionIdT, typename SequenceNumberT>
+  static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+                          SequenceNumberT SeqNo, Error Err) {
+    if (Err)
+      return Err;
+    if (auto Err2 = C.startSendMessage(ResponseId, SeqNo))
+      return Err2;
+    return C.endSendMessage();
+  }
+
+};
+
+
+// Send a response of the given wire return type (WireRetT) over the
+// channel, with the given sequence number.
+template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+          typename FunctionIdT, typename SequenceNumberT>
+Error respond(ChannelT &C, const FunctionIdT &ResponseId,
+              SequenceNumberT SeqNo, Expected<HandlerRetT> ResultOrErr) {
+  return RespondHelper<SupportsErrorReturn<WireRetT>::value>::
+    template sendResult<WireRetT>(C, ResponseId, SeqNo, std::move(ResultOrErr));
+}
+
+// Send an empty response message on the given channel to indicate that
+// the handler ran.
+template <typename WireRetT, typename ChannelT, typename FunctionIdT,
+          typename SequenceNumberT>
+Error respond(ChannelT &C, const FunctionIdT &ResponseId, SequenceNumberT SeqNo,
+              Error Err) {
+  return RespondHelper<SupportsErrorReturn<WireRetT>::value>::
+    sendResult(C, ResponseId, SeqNo, std::move(Err));
+}
+
+// Converts a given type to the equivalent error return type.
+template <typename T> class WrappedHandlerReturn {
+public:
+  using Type = Expected<T>;
+};
+
+template <typename T> class WrappedHandlerReturn<Expected<T>> {
+public:
+  using Type = Expected<T>;
+};
+
+template <> class WrappedHandlerReturn<void> {
+public:
+  using Type = Error;
+};
+
+template <> class WrappedHandlerReturn<Error> {
+public:
+  using Type = Error;
+};
+
+template <> class WrappedHandlerReturn<ErrorSuccess> {
+public:
+  using Type = Error;
+};
+
+// Traits class that strips the response function from the list of handler
+// arguments.
+template <typename FnT> class AsyncHandlerTraits;
+
+template <typename ResultT, typename... ArgTs>
+class AsyncHandlerTraits<Error(std::function<Error(Expected<ResultT>)>, ArgTs...)> {
+public:
+  using Type = Error(ArgTs...);
+  using ResultType = Expected<ResultT>;
+};
+
+template <typename... ArgTs>
+class AsyncHandlerTraits<Error(std::function<Error(Error)>, ArgTs...)> {
+public:
+  using Type = Error(ArgTs...);
+  using ResultType = Error;
+};
+
+template <typename... ArgTs>
+class AsyncHandlerTraits<ErrorSuccess(std::function<Error(Error)>, ArgTs...)> {
+public:
+  using Type = Error(ArgTs...);
+  using ResultType = Error;
+};
+
+template <typename... ArgTs>
+class AsyncHandlerTraits<void(std::function<Error(Error)>, ArgTs...)> {
+public:
+  using Type = Error(ArgTs...);
+  using ResultType = Error;
+};
+
+template <typename ResponseHandlerT, typename... ArgTs>
+class AsyncHandlerTraits<Error(ResponseHandlerT, ArgTs...)> :
+    public AsyncHandlerTraits<Error(typename std::decay<ResponseHandlerT>::type,
+                                    ArgTs...)> {};
+
+// This template class provides utilities related to RPC function handlers.
+// The base case applies to non-function types (the template class is
+// specialized for function types) and inherits from the appropriate
+// speciilization for the given non-function type's call operator.
+template <typename HandlerT>
+class HandlerTraits : public HandlerTraits<decltype(
+                          &std::remove_reference<HandlerT>::type::operator())> {
+};
+
+// Traits for handlers with a given function type.
+template <typename RetT, typename... ArgTs>
+class HandlerTraits<RetT(ArgTs...)> {
+public:
+  // Function type of the handler.
+  using Type = RetT(ArgTs...);
+
+  // Return type of the handler.
+  using ReturnType = RetT;
+
+  // Call the given handler with the given arguments.
+  template <typename HandlerT, typename... TArgTs>
+  static typename WrappedHandlerReturn<RetT>::Type
+  unpackAndRun(HandlerT &Handler, std::tuple<TArgTs...> &Args) {
+    return unpackAndRunHelper(Handler, Args,
+                              llvm::index_sequence_for<TArgTs...>());
+  }
+
+  // Call the given handler with the given arguments.
+  template <typename HandlerT, typename ResponderT, typename... TArgTs>
+  static Error unpackAndRunAsync(HandlerT &Handler, ResponderT &Responder,
+                                 std::tuple<TArgTs...> &Args) {
+    return unpackAndRunAsyncHelper(Handler, Responder, Args,
+                                   llvm::index_sequence_for<TArgTs...>());
+  }
+
+  // Call the given handler with the given arguments.
+  template <typename HandlerT>
+  static typename std::enable_if<
+      std::is_void<typename HandlerTraits<HandlerT>::ReturnType>::value,
+      Error>::type
+  run(HandlerT &Handler, ArgTs &&... Args) {
+    Handler(std::move(Args)...);
+    return Error::success();
+  }
+
+  template <typename HandlerT, typename... TArgTs>
+  static typename std::enable_if<
+      !std::is_void<typename HandlerTraits<HandlerT>::ReturnType>::value,
+      typename HandlerTraits<HandlerT>::ReturnType>::type
+  run(HandlerT &Handler, TArgTs... Args) {
+    return Handler(std::move(Args)...);
+  }
+
+  // Serialize arguments to the channel.
+  template <typename ChannelT, typename... CArgTs>
+  static Error serializeArgs(ChannelT &C, const CArgTs... CArgs) {
+    return SequenceSerialization<ChannelT, ArgTs...>::serialize(C, CArgs...);
+  }
+
+  // Deserialize arguments from the channel.
+  template <typename ChannelT, typename... CArgTs>
+  static Error deserializeArgs(ChannelT &C, std::tuple<CArgTs...> &Args) {
+    return deserializeArgsHelper(C, Args,
+                                 llvm::index_sequence_for<CArgTs...>());
+  }
+
+private:
+  template <typename ChannelT, typename... CArgTs, size_t... Indexes>
+  static Error deserializeArgsHelper(ChannelT &C, std::tuple<CArgTs...> &Args,
+                                     llvm::index_sequence<Indexes...> _) {
+    return SequenceSerialization<ChannelT, ArgTs...>::deserialize(
+        C, std::get<Indexes>(Args)...);
+  }
+
+  template <typename HandlerT, typename ArgTuple, size_t... Indexes>
+  static typename WrappedHandlerReturn<
+      typename HandlerTraits<HandlerT>::ReturnType>::Type
+  unpackAndRunHelper(HandlerT &Handler, ArgTuple &Args,
+                     llvm::index_sequence<Indexes...>) {
+    return run(Handler, std::move(std::get<Indexes>(Args))...);
+  }
+
+
+  template <typename HandlerT, typename ResponderT, typename ArgTuple,
+            size_t... Indexes>
+  static typename WrappedHandlerReturn<
+      typename HandlerTraits<HandlerT>::ReturnType>::Type
+  unpackAndRunAsyncHelper(HandlerT &Handler, ResponderT &Responder,
+                          ArgTuple &Args,
+                          llvm::index_sequence<Indexes...>) {
+    return run(Handler, Responder, std::move(std::get<Indexes>(Args))...);
+  }
+};
+
+// Handler traits for free functions.
+template <typename RetT, typename... ArgTs>
+class HandlerTraits<RetT(*)(ArgTs...)>
+  : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Handler traits for class methods (especially call operators for lambdas).
+template <typename Class, typename RetT, typename... ArgTs>
+class HandlerTraits<RetT (Class::*)(ArgTs...)>
+    : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Handler traits for const class methods (especially call operators for
+// lambdas).
+template <typename Class, typename RetT, typename... ArgTs>
+class HandlerTraits<RetT (Class::*)(ArgTs...) const>
+    : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Utility to peel the Expected wrapper off a response handler error type.
+template <typename HandlerT> class ResponseHandlerArg;
+
+template <typename ArgT> class ResponseHandlerArg<Error(Expected<ArgT>)> {
+public:
+  using ArgType = Expected<ArgT>;
+  using UnwrappedArgType = ArgT;
+};
+
+template <typename ArgT>
+class ResponseHandlerArg<ErrorSuccess(Expected<ArgT>)> {
+public:
+  using ArgType = Expected<ArgT>;
+  using UnwrappedArgType = ArgT;
+};
+
+template <> class ResponseHandlerArg<Error(Error)> {
+public:
+  using ArgType = Error;
+};
+
+template <> class ResponseHandlerArg<ErrorSuccess(Error)> {
+public:
+  using ArgType = Error;
+};
+
+// ResponseHandler represents a handler for a not-yet-received function call
+// result.
+template <typename ChannelT> class ResponseHandler {
+public:
+  virtual ~ResponseHandler() {}
+
+  // Reads the function result off the wire and acts on it. The meaning of
+  // "act" will depend on how this method is implemented in any given
+  // ResponseHandler subclass but could, for example, mean running a
+  // user-specified handler or setting a promise value.
+  virtual Error handleResponse(ChannelT &C) = 0;
+
+  // Abandons this outstanding result.
+  virtual void abandon() = 0;
+
+  // Create an error instance representing an abandoned response.
+  static Error createAbandonedResponseError() {
+    return make_error<ResponseAbandoned>();
+  }
+};
+
+// ResponseHandler subclass for RPC functions with non-void returns.
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+class ResponseHandlerImpl : public ResponseHandler<ChannelT> {
+public:
+  ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+  // Handle the result by deserializing it from the channel then passing it
+  // to the user defined handler.
+  Error handleResponse(ChannelT &C) override {
+    using UnwrappedArgType = typename ResponseHandlerArg<
+        typename HandlerTraits<HandlerT>::Type>::UnwrappedArgType;
+    UnwrappedArgType Result;
+    if (auto Err =
+            SerializationTraits<ChannelT, FuncRetT,
+                                UnwrappedArgType>::deserialize(C, Result))
+      return Err;
+    if (auto Err = C.endReceiveMessage())
+      return Err;
+    return Handler(std::move(Result));
+  }
+
+  // Abandon this response by calling the handler with an 'abandoned response'
+  // error.
+  void abandon() override {
+    if (auto Err = Handler(this->createAbandonedResponseError())) {
+      // Handlers should not fail when passed an abandoned response error.
+      report_fatal_error(std::move(Err));
+    }
+  }
+
+private:
+  HandlerT Handler;
+};
+
+// ResponseHandler subclass for RPC functions with void returns.
+template <typename ChannelT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, void, HandlerT>
+    : public ResponseHandler<ChannelT> {
+public:
+  ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+  // Handle the result (no actual value, just a notification that the function
+  // has completed on the remote end) by calling the user-defined handler with
+  // Error::success().
+  Error handleResponse(ChannelT &C) override {
+    if (auto Err = C.endReceiveMessage())
+      return Err;
+    return Handler(Error::success());
+  }
+
+  // Abandon this response by calling the handler with an 'abandoned response'
+  // error.
+  void abandon() override {
+    if (auto Err = Handler(this->createAbandonedResponseError())) {
+      // Handlers should not fail when passed an abandoned response error.
+      report_fatal_error(std::move(Err));
+    }
+  }
+
+private:
+  HandlerT Handler;
+};
+
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, Expected<FuncRetT>, HandlerT>
+    : public ResponseHandler<ChannelT> {
+public:
+  ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+  // Handle the result by deserializing it from the channel then passing it
+  // to the user defined handler.
+  Error handleResponse(ChannelT &C) override {
+    using HandlerArgType = typename ResponseHandlerArg<
+        typename HandlerTraits<HandlerT>::Type>::ArgType;
+    HandlerArgType Result((typename HandlerArgType::value_type()));
+
+    if (auto Err =
+            SerializationTraits<ChannelT, Expected<FuncRetT>,
+                                HandlerArgType>::deserialize(C, Result))
+      return Err;
+    if (auto Err = C.endReceiveMessage())
+      return Err;
+    return Handler(std::move(Result));
+  }
+
+  // Abandon this response by calling the handler with an 'abandoned response'
+  // error.
+  void abandon() override {
+    if (auto Err = Handler(this->createAbandonedResponseError())) {
+      // Handlers should not fail when passed an abandoned response error.
+      report_fatal_error(std::move(Err));
+    }
+  }
+
+private:
+  HandlerT Handler;
+};
+
+template <typename ChannelT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, Error, HandlerT>
+    : public ResponseHandler<ChannelT> {
+public:
+  ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+  // Handle the result by deserializing it from the channel then passing it
+  // to the user defined handler.
+  Error handleResponse(ChannelT &C) override {
+    Error Result = Error::success();
+    if (auto Err =
+            SerializationTraits<ChannelT, Error, Error>::deserialize(C, Result))
+      return Err;
+    if (auto Err = C.endReceiveMessage())
+      return Err;
+    return Handler(std::move(Result));
+  }
+
+  // Abandon this response by calling the handler with an 'abandoned response'
+  // error.
+  void abandon() override {
+    if (auto Err = Handler(this->createAbandonedResponseError())) {
+      // Handlers should not fail when passed an abandoned response error.
+      report_fatal_error(std::move(Err));
+    }
+  }
+
+private:
+  HandlerT Handler;
+};
+
+// Create a ResponseHandler from a given user handler.
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+std::unique_ptr<ResponseHandler<ChannelT>> createResponseHandler(HandlerT H) {
+  return llvm::make_unique<ResponseHandlerImpl<ChannelT, FuncRetT, HandlerT>>(
+      std::move(H));
+}
+
+// Helper for wrapping member functions up as functors. This is useful for
+// installing methods as result handlers.
+template <typename ClassT, typename RetT, typename... ArgTs>
+class MemberFnWrapper {
+public:
+  using MethodT = RetT (ClassT::*)(ArgTs...);
+  MemberFnWrapper(ClassT &Instance, MethodT Method)
+      : Instance(Instance), Method(Method) {}
+  RetT operator()(ArgTs &&... Args) {
+    return (Instance.*Method)(std::move(Args)...);
+  }
+
+private:
+  ClassT &Instance;
+  MethodT Method;
+};
+
+// Helper that provides a Functor for deserializing arguments.
+template <typename... ArgTs> class ReadArgs {
+public:
+  Error operator()() { return Error::success(); }
+};
+
+template <typename ArgT, typename... ArgTs>
+class ReadArgs<ArgT, ArgTs...> : public ReadArgs<ArgTs...> {
+public:
+  ReadArgs(ArgT &Arg, ArgTs &... Args)
+      : ReadArgs<ArgTs...>(Args...), Arg(Arg) {}
+
+  Error operator()(ArgT &ArgVal, ArgTs &... ArgVals) {
+    this->Arg = std::move(ArgVal);
+    return ReadArgs<ArgTs...>::operator()(ArgVals...);
+  }
+
+private:
+  ArgT &Arg;
+};
+
+// Manage sequence numbers.
+template <typename SequenceNumberT> class SequenceNumberManager {
+public:
+  // Reset, making all sequence numbers available.
+  void reset() {
+    std::lock_guard<std::mutex> Lock(SeqNoLock);
+    NextSequenceNumber = 0;
+    FreeSequenceNumbers.clear();
+  }
+
+  // Get the next available sequence number. Will re-use numbers that have
+  // been released.
+  SequenceNumberT getSequenceNumber() {
+    std::lock_guard<std::mutex> Lock(SeqNoLock);
+    if (FreeSequenceNumbers.empty())
+      return NextSequenceNumber++;
+    auto SequenceNumber = FreeSequenceNumbers.back();
+    FreeSequenceNumbers.pop_back();
+    return SequenceNumber;
+  }
+
+  // Release a sequence number, making it available for re-use.
+  void releaseSequenceNumber(SequenceNumberT SequenceNumber) {
+    std::lock_guard<std::mutex> Lock(SeqNoLock);
+    FreeSequenceNumbers.push_back(SequenceNumber);
+  }
+
+private:
+  std::mutex SeqNoLock;
+  SequenceNumberT NextSequenceNumber = 0;
+  std::vector<SequenceNumberT> FreeSequenceNumbers;
+};
+
+// Checks that predicate P holds for each corresponding pair of type arguments
+// from T1 and T2 tuple.
+template <template <class, class> class P, typename T1Tuple, typename T2Tuple>
+class RPCArgTypeCheckHelper;
+
+template <template <class, class> class P>
+class RPCArgTypeCheckHelper<P, std::tuple<>, std::tuple<>> {
+public:
+  static const bool value = true;
+};
+
+template <template <class, class> class P, typename T, typename... Ts,
+          typename U, typename... Us>
+class RPCArgTypeCheckHelper<P, std::tuple<T, Ts...>, std::tuple<U, Us...>> {
+public:
+  static const bool value =
+      P<T, U>::value &&
+      RPCArgTypeCheckHelper<P, std::tuple<Ts...>, std::tuple<Us...>>::value;
+};
+
+template <template <class, class> class P, typename T1Sig, typename T2Sig>
+class RPCArgTypeCheck {
+public:
+  using T1Tuple = typename FunctionArgsTuple<T1Sig>::Type;
+  using T2Tuple = typename FunctionArgsTuple<T2Sig>::Type;
+
+  static_assert(std::tuple_size<T1Tuple>::value >=
+                    std::tuple_size<T2Tuple>::value,
+                "Too many arguments to RPC call");
+  static_assert(std::tuple_size<T1Tuple>::value <=
+                    std::tuple_size<T2Tuple>::value,
+                "Too few arguments to RPC call");
+
+  static const bool value = RPCArgTypeCheckHelper<P, T1Tuple, T2Tuple>::value;
+};
+
+template <typename ChannelT, typename WireT, typename ConcreteT>
+class CanSerialize {
+private:
+  using S = SerializationTraits<ChannelT, WireT, ConcreteT>;
+
+  template <typename T>
+  static std::true_type
+  check(typename std::enable_if<
+        std::is_same<decltype(T::serialize(std::declval<ChannelT &>(),
+                                           std::declval<const ConcreteT &>())),
+                     Error>::value,
+        void *>::type);
+
+  template <typename> static std::false_type check(...);
+
+public:
+  static const bool value = decltype(check<S>(0))::value;
+};
+
+template <typename ChannelT, typename WireT, typename ConcreteT>
+class CanDeserialize {
+private:
+  using S = SerializationTraits<ChannelT, WireT, ConcreteT>;
+
+  template <typename T>
+  static std::true_type
+  check(typename std::enable_if<
+        std::is_same<decltype(T::deserialize(std::declval<ChannelT &>(),
+                                             std::declval<ConcreteT &>())),
+                     Error>::value,
+        void *>::type);
+
+  template <typename> static std::false_type check(...);
+
+public:
+  static const bool value = decltype(check<S>(0))::value;
+};
+
+/// Contains primitive utilities for defining, calling and handling calls to
+/// remote procedures. ChannelT is a bidirectional stream conforming to the
+/// RPCChannel interface (see RPCChannel.h), FunctionIdT is a procedure
+/// identifier type that must be serializable on ChannelT, and SequenceNumberT
+/// is an integral type that will be used to number in-flight function calls.
+///
+/// These utilities support the construction of very primitive RPC utilities.
+/// Their intent is to ensure correct serialization and deserialization of
+/// procedure arguments, and to keep the client and server's view of the API in
+/// sync.
+template <typename ImplT, typename ChannelT, typename FunctionIdT,
+          typename SequenceNumberT>
+class RPCEndpointBase {
+protected:
+  class OrcRPCInvalid : public Function<OrcRPCInvalid, void()> {
+  public:
+    static const char *getName() { return "__orc_rpc$invalid"; }
+  };
+
+  class OrcRPCResponse : public Function<OrcRPCResponse, void()> {
+  public:
+    static const char *getName() { return "__orc_rpc$response"; }
+  };
+
+  class OrcRPCNegotiate
+      : public Function<OrcRPCNegotiate, FunctionIdT(std::string)> {
+  public:
+    static const char *getName() { return "__orc_rpc$negotiate"; }
+  };
+
+  // Helper predicate for testing for the presence of SerializeTraits
+  // serializers.
+  template <typename WireT, typename ConcreteT>
+  class CanSerializeCheck : detail::CanSerialize<ChannelT, WireT, ConcreteT> {
+  public:
+    using detail::CanSerialize<ChannelT, WireT, ConcreteT>::value;
+
+    static_assert(value, "Missing serializer for argument (Can't serialize the "
+                         "first template type argument of CanSerializeCheck "
+                         "from the second)");
+  };
+
+  // Helper predicate for testing for the presence of SerializeTraits
+  // deserializers.
+  template <typename WireT, typename ConcreteT>
+  class CanDeserializeCheck
+      : detail::CanDeserialize<ChannelT, WireT, ConcreteT> {
+  public:
+    using detail::CanDeserialize<ChannelT, WireT, ConcreteT>::value;
+
+    static_assert(value, "Missing deserializer for argument (Can't deserialize "
+                         "the second template type argument of "
+                         "CanDeserializeCheck from the first)");
+  };
+
+public:
+  /// Construct an RPC instance on a channel.
+  RPCEndpointBase(ChannelT &C, bool LazyAutoNegotiation)
+      : C(C), LazyAutoNegotiation(LazyAutoNegotiation) {
+    // Hold ResponseId in a special variable, since we expect Response to be
+    // called relatively frequently, and want to avoid the map lookup.
+    ResponseId = FnIdAllocator.getResponseId();
+    RemoteFunctionIds[OrcRPCResponse::getPrototype()] = ResponseId;
+
+    // Register the negotiate function id and handler.
+    auto NegotiateId = FnIdAllocator.getNegotiateId();
+    RemoteFunctionIds[OrcRPCNegotiate::getPrototype()] = NegotiateId;
+    Handlers[NegotiateId] = wrapHandler<OrcRPCNegotiate>(
+        [this](const std::string &Name) { return handleNegotiate(Name); });
+  }
+
+
+  /// Negotiate a function id for Func with the other end of the channel.
+  template <typename Func> Error negotiateFunction(bool Retry = false) {
+    return getRemoteFunctionId<Func>(true, Retry).takeError();
+  }
+
+  /// Append a call Func, does not call send on the channel.
+  /// The first argument specifies a user-defined handler to be run when the
+  /// function returns. The handler should take an Expected<Func::ReturnType>,
+  /// or an Error (if Func::ReturnType is void). The handler will be called
+  /// with an error if the return value is abandoned due to a channel error.
+  template <typename Func, typename HandlerT, typename... ArgTs>
+  Error appendCallAsync(HandlerT Handler, const ArgTs &... Args) {
+
+    static_assert(
+        detail::RPCArgTypeCheck<CanSerializeCheck, typename Func::Type,
+                                void(ArgTs...)>::value,
+        "");
+
+    // Look up the function ID.
+    FunctionIdT FnId;
+    if (auto FnIdOrErr = getRemoteFunctionId<Func>(LazyAutoNegotiation, false))
+      FnId = *FnIdOrErr;
+    else {
+      // Negotiation failed. Notify the handler then return the negotiate-failed
+      // error.
+      cantFail(Handler(make_error<ResponseAbandoned>()));
+      return FnIdOrErr.takeError();
+    }
+
+    SequenceNumberT SeqNo; // initialized in locked scope below.
+    {
+      // Lock the pending responses map and sequence number manager.
+      std::lock_guard<std::mutex> Lock(ResponsesMutex);
+
+      // Allocate a sequence number.
+      SeqNo = SequenceNumberMgr.getSequenceNumber();
+      assert(!PendingResponses.count(SeqNo) &&
+             "Sequence number already allocated");
+
+      // Install the user handler.
+      PendingResponses[SeqNo] =
+        detail::createResponseHandler<ChannelT, typename Func::ReturnType>(
+            std::move(Handler));
+    }
+
+    // Open the function call message.
+    if (auto Err = C.startSendMessage(FnId, SeqNo)) {
+      abandonPendingResponses();
+      return Err;
+    }
+
+    // Serialize the call arguments.
+    if (auto Err = detail::HandlerTraits<typename Func::Type>::serializeArgs(
+            C, Args...)) {
+      abandonPendingResponses();
+      return Err;
+    }
+
+    // Close the function call messagee.
+    if (auto Err = C.endSendMessage()) {
+      abandonPendingResponses();
+      return Err;
+    }
+
+    return Error::success();
+  }
+
+  Error sendAppendedCalls() { return C.send(); };
+
+  template <typename Func, typename HandlerT, typename... ArgTs>
+  Error callAsync(HandlerT Handler, const ArgTs &... Args) {
+    if (auto Err = appendCallAsync<Func>(std::move(Handler), Args...))
+      return Err;
+    return C.send();
+  }
+
+  /// Handle one incoming call.
+  Error handleOne() {
+    FunctionIdT FnId;
+    SequenceNumberT SeqNo;
+    if (auto Err = C.startReceiveMessage(FnId, SeqNo)) {
+      abandonPendingResponses();
+      return Err;
+    }
+    if (FnId == ResponseId)
+      return handleResponse(SeqNo);
+    auto I = Handlers.find(FnId);
+    if (I != Handlers.end())
+      return I->second(C, SeqNo);
+
+    // else: No handler found. Report error to client?
+    return make_error<BadFunctionCall<FunctionIdT, SequenceNumberT>>(FnId,
+                                                                     SeqNo);
+  }
+
+  /// Helper for handling setter procedures - this method returns a functor that
+  /// sets the variables referred to by Args... to values deserialized from the
+  /// channel.
+  /// E.g.
+  ///
+  ///   typedef Function<0, bool, int> Func1;
+  ///
+  ///   ...
+  ///   bool B;
+  ///   int I;
+  ///   if (auto Err = expect<Func1>(Channel, readArgs(B, I)))
+  ///     /* Handle Args */ ;
+  ///
+  template <typename... ArgTs>
+  static detail::ReadArgs<ArgTs...> readArgs(ArgTs &... Args) {
+    return detail::ReadArgs<ArgTs...>(Args...);
+  }
+
+  /// Abandon all outstanding result handlers.
+  ///
+  /// This will call all currently registered result handlers to receive an
+  /// "abandoned" error as their argument. This is used internally by the RPC
+  /// in error situations, but can also be called directly by clients who are
+  /// disconnecting from the remote and don't or can't expect responses to their
+  /// outstanding calls. (Especially for outstanding blocking calls, calling
+  /// this function may be necessary to avoid dead threads).
+  void abandonPendingResponses() {
+    // Lock the pending responses map and sequence number manager.
+    std::lock_guard<std::mutex> Lock(ResponsesMutex);
+
+    for (auto &KV : PendingResponses)
+      KV.second->abandon();
+    PendingResponses.clear();
+    SequenceNumberMgr.reset();
+  }
+
+  /// Remove the handler for the given function.
+  /// A handler must currently be registered for this function.
+  template <typename Func>
+  void removeHandler() {
+    auto IdItr = LocalFunctionIds.find(Func::getPrototype());
+    assert(IdItr != LocalFunctionIds.end() &&
+           "Function does not have a registered handler");
+    auto HandlerItr = Handlers.find(IdItr->second);
+    assert(HandlerItr != Handlers.end() &&
+           "Function does not have a registered handler");
+    Handlers.erase(HandlerItr);
+  }
+
+  /// Clear all handlers.
+  void clearHandlers() {
+    Handlers.clear();
+  }
+
+protected:
+
+  FunctionIdT getInvalidFunctionId() const {
+    return FnIdAllocator.getInvalidId();
+  }
+
+  /// Add the given handler to the handler map and make it available for
+  /// autonegotiation and execution.
+  template <typename Func, typename HandlerT>
+  void addHandlerImpl(HandlerT Handler) {
+
+    static_assert(detail::RPCArgTypeCheck<
+                      CanDeserializeCheck, typename Func::Type,
+                      typename detail::HandlerTraits<HandlerT>::Type>::value,
+                  "");
+
+    FunctionIdT NewFnId = FnIdAllocator.template allocate<Func>();
+    LocalFunctionIds[Func::getPrototype()] = NewFnId;
+    Handlers[NewFnId] = wrapHandler<Func>(std::move(Handler));
+  }
+
+  template <typename Func, typename HandlerT>
+  void addAsyncHandlerImpl(HandlerT Handler) {
+
+    static_assert(detail::RPCArgTypeCheck<
+                      CanDeserializeCheck, typename Func::Type,
+                      typename detail::AsyncHandlerTraits<
+                        typename detail::HandlerTraits<HandlerT>::Type
+                      >::Type>::value,
+                  "");
+
+    FunctionIdT NewFnId = FnIdAllocator.template allocate<Func>();
+    LocalFunctionIds[Func::getPrototype()] = NewFnId;
+    Handlers[NewFnId] = wrapAsyncHandler<Func>(std::move(Handler));
+  }
+
+  Error handleResponse(SequenceNumberT SeqNo) {
+    using Handler = typename decltype(PendingResponses)::mapped_type;
+    Handler PRHandler;
+
+    {
+      // Lock the pending responses map and sequence number manager.
+      std::unique_lock<std::mutex> Lock(ResponsesMutex);
+      auto I = PendingResponses.find(SeqNo);
+
+      if (I != PendingResponses.end()) {
+        PRHandler = std::move(I->second);
+        PendingResponses.erase(I);
+        SequenceNumberMgr.releaseSequenceNumber(SeqNo);
+      } else {
+        // Unlock the pending results map to prevent recursive lock.
+        Lock.unlock();
+        abandonPendingResponses();
+        return make_error<
+                 InvalidSequenceNumberForResponse<SequenceNumberT>>(SeqNo);
+      }
+    }
+
+    assert(PRHandler &&
+           "If we didn't find a response handler we should have bailed out");
+
+    if (auto Err = PRHandler->handleResponse(C)) {
+      abandonPendingResponses();
+      return Err;
+    }
+
+    return Error::success();
+  }
+
+  FunctionIdT handleNegotiate(const std::string &Name) {
+    auto I = LocalFunctionIds.find(Name);
+    if (I == LocalFunctionIds.end())
+      return getInvalidFunctionId();
+    return I->second;
+  }
+
+  // Find the remote FunctionId for the given function.
+  template <typename Func>
+  Expected<FunctionIdT> getRemoteFunctionId(bool NegotiateIfNotInMap,
+                                            bool NegotiateIfInvalid) {
+    bool DoNegotiate;
+
+    // Check if we already have a function id...
+    auto I = RemoteFunctionIds.find(Func::getPrototype());
+    if (I != RemoteFunctionIds.end()) {
+      // If it's valid there's nothing left to do.
+      if (I->second != getInvalidFunctionId())
+        return I->second;
+      DoNegotiate = NegotiateIfInvalid;
+    } else
+      DoNegotiate = NegotiateIfNotInMap;
+
+    // We don't have a function id for Func yet, but we're allowed to try to
+    // negotiate one.
+    if (DoNegotiate) {
+      auto &Impl = static_cast<ImplT &>(*this);
+      if (auto RemoteIdOrErr =
+          Impl.template callB<OrcRPCNegotiate>(Func::getPrototype())) {
+        RemoteFunctionIds[Func::getPrototype()] = *RemoteIdOrErr;
+        if (*RemoteIdOrErr == getInvalidFunctionId())
+          return make_error<CouldNotNegotiate>(Func::getPrototype());
+        return *RemoteIdOrErr;
+      } else
+        return RemoteIdOrErr.takeError();
+    }
+
+    // No key was available in the map and we weren't allowed to try to
+    // negotiate one, so return an unknown function error.
+    return make_error<CouldNotNegotiate>(Func::getPrototype());
+  }
+
+  using WrappedHandlerFn = std::function<Error(ChannelT &, SequenceNumberT)>;
+
+  // Wrap the given user handler in the necessary argument-deserialization code,
+  // result-serialization code, and call to the launch policy (if present).
+  template <typename Func, typename HandlerT>
+  WrappedHandlerFn wrapHandler(HandlerT Handler) {
+    return [this, Handler](ChannelT &Channel,
+                           SequenceNumberT SeqNo) mutable -> Error {
+      // Start by deserializing the arguments.
+      using ArgsTuple =
+          typename detail::FunctionArgsTuple<
+            typename detail::HandlerTraits<HandlerT>::Type>::Type;
+      auto Args = std::make_shared<ArgsTuple>();
+
+      if (auto Err =
+              detail::HandlerTraits<typename Func::Type>::deserializeArgs(
+                  Channel, *Args))
+        return Err;
+
+      // GCC 4.7 and 4.8 incorrectly issue a -Wunused-but-set-variable warning
+      // for RPCArgs. Void cast RPCArgs to work around this for now.
+      // FIXME: Remove this workaround once we can assume a working GCC version.
+      (void)Args;
+
+      // End receieve message, unlocking the channel for reading.
+      if (auto Err = Channel.endReceiveMessage())
+        return Err;
+
+      using HTraits = detail::HandlerTraits<HandlerT>;
+      using FuncReturn = typename Func::ReturnType;
+      return detail::respond<FuncReturn>(Channel, ResponseId, SeqNo,
+                                         HTraits::unpackAndRun(Handler, *Args));
+    };
+  }
+
+  // Wrap the given user handler in the necessary argument-deserialization code,
+  // result-serialization code, and call to the launch policy (if present).
+  template <typename Func, typename HandlerT>
+  WrappedHandlerFn wrapAsyncHandler(HandlerT Handler) {
+    return [this, Handler](ChannelT &Channel,
+                           SequenceNumberT SeqNo) mutable -> Error {
+      // Start by deserializing the arguments.
+      using AHTraits = detail::AsyncHandlerTraits<
+                         typename detail::HandlerTraits<HandlerT>::Type>;
+      using ArgsTuple =
+          typename detail::FunctionArgsTuple<typename AHTraits::Type>::Type;
+      auto Args = std::make_shared<ArgsTuple>();
+
+      if (auto Err =
+              detail::HandlerTraits<typename Func::Type>::deserializeArgs(
+                  Channel, *Args))
+        return Err;
+
+      // GCC 4.7 and 4.8 incorrectly issue a -Wunused-but-set-variable warning
+      // for RPCArgs. Void cast RPCArgs to work around this for now.
+      // FIXME: Remove this workaround once we can assume a working GCC version.
+      (void)Args;
+
+      // End receieve message, unlocking the channel for reading.
+      if (auto Err = Channel.endReceiveMessage())
+        return Err;
+
+      using HTraits = detail::HandlerTraits<HandlerT>;
+      using FuncReturn = typename Func::ReturnType;
+      auto Responder =
+        [this, SeqNo](typename AHTraits::ResultType RetVal) -> Error {
+          return detail::respond<FuncReturn>(C, ResponseId, SeqNo,
+                                             std::move(RetVal));
+        };
+
+      return HTraits::unpackAndRunAsync(Handler, Responder, *Args);
+    };
+  }
+
+  ChannelT &C;
+
+  bool LazyAutoNegotiation;
+
+  RPCFunctionIdAllocator<FunctionIdT> FnIdAllocator;
+
+  FunctionIdT ResponseId;
+  std::map<std::string, FunctionIdT> LocalFunctionIds;
+  std::map<const char *, FunctionIdT> RemoteFunctionIds;
+
+  std::map<FunctionIdT, WrappedHandlerFn> Handlers;
+
+  std::mutex ResponsesMutex;
+  detail::SequenceNumberManager<SequenceNumberT> SequenceNumberMgr;
+  std::map<SequenceNumberT, std::unique_ptr<detail::ResponseHandler<ChannelT>>>
+      PendingResponses;
+};
+
+} // end namespace detail
+
+template <typename ChannelT, typename FunctionIdT = uint32_t,
+          typename SequenceNumberT = uint32_t>
+class MultiThreadedRPCEndpoint
+    : public detail::RPCEndpointBase<
+          MultiThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+          ChannelT, FunctionIdT, SequenceNumberT> {
+private:
+  using BaseClass =
+      detail::RPCEndpointBase<
+        MultiThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+        ChannelT, FunctionIdT, SequenceNumberT>;
+
+public:
+  MultiThreadedRPCEndpoint(ChannelT &C, bool LazyAutoNegotiation)
+      : BaseClass(C, LazyAutoNegotiation) {}
+
+  /// Add a handler for the given RPC function.
+  /// This installs the given handler functor for the given RPC Function, and
+  /// makes the RPC function available for negotiation/calling from the remote.
+  template <typename Func, typename HandlerT>
+  void addHandler(HandlerT Handler) {
+    return this->template addHandlerImpl<Func>(std::move(Handler));
+  }
+
+  /// Add a class-method as a handler.
+  template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+  void addHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+    addHandler<Func>(
+      detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+  }
+
+  template <typename Func, typename HandlerT>
+  void addAsyncHandler(HandlerT Handler) {
+    return this->template addAsyncHandlerImpl<Func>(std::move(Handler));
+  }
+
+  /// Add a class-method as a handler.
+  template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+  void addAsyncHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+    addAsyncHandler<Func>(
+      detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+  }
+
+  /// Return type for non-blocking call primitives.
+  template <typename Func>
+  using NonBlockingCallResult = typename detail::ResultTraits<
+      typename Func::ReturnType>::ReturnFutureType;
+
+  /// Call Func on Channel C. Does not block, does not call send. Returns a pair
+  /// of a future result and the sequence number assigned to the result.
+  ///
+  /// This utility function is primarily used for single-threaded mode support,
+  /// where the sequence number can be used to wait for the corresponding
+  /// result. In multi-threaded mode the appendCallNB method, which does not
+  /// return the sequence numeber, should be preferred.
+  template <typename Func, typename... ArgTs>
+  Expected<NonBlockingCallResult<Func>> appendCallNB(const ArgTs &... Args) {
+    using RTraits = detail::ResultTraits<typename Func::ReturnType>;
+    using ErrorReturn = typename RTraits::ErrorReturnType;
+    using ErrorReturnPromise = typename RTraits::ReturnPromiseType;
+
+    // FIXME: Stack allocate and move this into the handler once LLVM builds
+    //        with C++14.
+    auto Promise = std::make_shared<ErrorReturnPromise>();
+    auto FutureResult = Promise->get_future();
+
+    if (auto Err = this->template appendCallAsync<Func>(
+            [Promise](ErrorReturn RetOrErr) {
+              Promise->set_value(std::move(RetOrErr));
+              return Error::success();
+            },
+            Args...)) {
+      RTraits::consumeAbandoned(FutureResult.get());
+      return std::move(Err);
+    }
+    return std::move(FutureResult);
+  }
+
+  /// The same as appendCallNBWithSeq, except that it calls C.send() to
+  /// flush the channel after serializing the call.
+  template <typename Func, typename... ArgTs>
+  Expected<NonBlockingCallResult<Func>> callNB(const ArgTs &... Args) {
+    auto Result = appendCallNB<Func>(Args...);
+    if (!Result)
+      return Result;
+    if (auto Err = this->C.send()) {
+      this->abandonPendingResponses();
+      detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+          std::move(Result->get()));
+      return std::move(Err);
+    }
+    return Result;
+  }
+
+  /// Call Func on Channel C. Blocks waiting for a result. Returns an Error
+  /// for void functions or an Expected<T> for functions returning a T.
+  ///
+  /// This function is for use in threaded code where another thread is
+  /// handling responses and incoming calls.
+  template <typename Func, typename... ArgTs,
+            typename AltRetT = typename Func::ReturnType>
+  typename detail::ResultTraits<AltRetT>::ErrorReturnType
+  callB(const ArgTs &... Args) {
+    if (auto FutureResOrErr = callNB<Func>(Args...))
+      return FutureResOrErr->get();
+    else
+      return FutureResOrErr.takeError();
+  }
+
+  /// Handle incoming RPC calls.
+  Error handlerLoop() {
+    while (true)
+      if (auto Err = this->handleOne())
+        return Err;
+    return Error::success();
+  }
+};
+
+template <typename ChannelT, typename FunctionIdT = uint32_t,
+          typename SequenceNumberT = uint32_t>
+class SingleThreadedRPCEndpoint
+    : public detail::RPCEndpointBase<
+          SingleThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+          ChannelT, FunctionIdT, SequenceNumberT> {
+private:
+  using BaseClass =
+      detail::RPCEndpointBase<
+        SingleThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+        ChannelT, FunctionIdT, SequenceNumberT>;
+
+public:
+  SingleThreadedRPCEndpoint(ChannelT &C, bool LazyAutoNegotiation)
+      : BaseClass(C, LazyAutoNegotiation) {}
+
+  template <typename Func, typename HandlerT>
+  void addHandler(HandlerT Handler) {
+    return this->template addHandlerImpl<Func>(std::move(Handler));
+  }
+
+  template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+  void addHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+    addHandler<Func>(
+        detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+  }
+
+  template <typename Func, typename HandlerT>
+  void addAsyncHandler(HandlerT Handler) {
+    return this->template addAsyncHandlerImpl<Func>(std::move(Handler));
+  }
+
+  /// Add a class-method as a handler.
+  template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+  void addAsyncHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+    addAsyncHandler<Func>(
+      detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+  }
+
+  template <typename Func, typename... ArgTs,
+            typename AltRetT = typename Func::ReturnType>
+  typename detail::ResultTraits<AltRetT>::ErrorReturnType
+  callB(const ArgTs &... Args) {
+    bool ReceivedResponse = false;
+    using ResultType = typename detail::ResultTraits<AltRetT>::ErrorReturnType;
+    auto Result = detail::ResultTraits<AltRetT>::createBlankErrorReturnValue();
+
+    // We have to 'Check' result (which we know is in a success state at this
+    // point) so that it can be overwritten in the async handler.
+    (void)!!Result;
+
+    if (auto Err = this->template appendCallAsync<Func>(
+            [&](ResultType R) {
+              Result = std::move(R);
+              ReceivedResponse = true;
+              return Error::success();
+            },
+            Args...)) {
+      detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+          std::move(Result));
+      return std::move(Err);
+    }
+
+    while (!ReceivedResponse) {
+      if (auto Err = this->handleOne()) {
+        detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+            std::move(Result));
+        return std::move(Err);
+      }
+    }
+
+    return Result;
+  }
+};
+
+/// Asynchronous dispatch for a function on an RPC endpoint.
+template <typename RPCClass, typename Func>
+class RPCAsyncDispatch {
+public:
+  RPCAsyncDispatch(RPCClass &Endpoint) : Endpoint(Endpoint) {}
+
+  template <typename HandlerT, typename... ArgTs>
+  Error operator()(HandlerT Handler, const ArgTs &... Args) const {
+    return Endpoint.template appendCallAsync<Func>(std::move(Handler), Args...);
+  }
+
+private:
+  RPCClass &Endpoint;
+};
+
+/// Construct an asynchronous dispatcher from an RPC endpoint and a Func.
+template <typename Func, typename RPCEndpointT>
+RPCAsyncDispatch<RPCEndpointT, Func> rpcAsyncDispatch(RPCEndpointT &Endpoint) {
+  return RPCAsyncDispatch<RPCEndpointT, Func>(Endpoint);
+}
+
+/// \brief Allows a set of asynchrounous calls to be dispatched, and then
+///        waited on as a group.
+class ParallelCallGroup {
+public:
+
+  ParallelCallGroup() = default;
+  ParallelCallGroup(const ParallelCallGroup &) = delete;
+  ParallelCallGroup &operator=(const ParallelCallGroup &) = delete;
+
+  /// \brief Make as asynchronous call.
+  template <typename AsyncDispatcher, typename HandlerT, typename... ArgTs>
+  Error call(const AsyncDispatcher &AsyncDispatch, HandlerT Handler,
+             const ArgTs &... Args) {
+    // Increment the count of outstanding calls. This has to happen before
+    // we invoke the call, as the handler may (depending on scheduling)
+    // be run immediately on another thread, and we don't want the decrement
+    // in the wrapped handler below to run before the increment.
+    {
+      std::unique_lock<std::mutex> Lock(M);
+      ++NumOutstandingCalls;
+    }
+
+    // Wrap the user handler in a lambda that will decrement the
+    // outstanding calls count, then poke the condition variable.
+    using ArgType = typename detail::ResponseHandlerArg<
+        typename detail::HandlerTraits<HandlerT>::Type>::ArgType;
+    // FIXME: Move handler into wrapped handler once we have C++14.
+    auto WrappedHandler = [this, Handler](ArgType Arg) {
+      auto Err = Handler(std::move(Arg));
+      std::unique_lock<std::mutex> Lock(M);
+      --NumOutstandingCalls;
+      CV.notify_all();
+      return Err;
+    };
+
+    return AsyncDispatch(std::move(WrappedHandler), Args...);
+  }
+
+  /// \brief Blocks until all calls have been completed and their return value
+  ///        handlers run.
+  void wait() {
+    std::unique_lock<std::mutex> Lock(M);
+    while (NumOutstandingCalls > 0)
+      CV.wait(Lock);
+  }
+
+private:
+  std::mutex M;
+  std::condition_variable CV;
+  uint32_t NumOutstandingCalls = 0;
+};
+
+/// @brief Convenience class for grouping RPC Functions into APIs that can be
+///        negotiated as a block.
+///
+template <typename... Funcs>
+class APICalls {
+public:
+
+  /// @brief Test whether this API contains Function F.
+  template <typename F>
+  class Contains {
+  public:
+    static const bool value = false;
+  };
+
+  /// @brief Negotiate all functions in this API.
+  template <typename RPCEndpoint>
+  static Error negotiate(RPCEndpoint &R) {
+    return Error::success();
+  }
+};
+
+template <typename Func, typename... Funcs>
+class APICalls<Func, Funcs...> {
+public:
+
+  template <typename F>
+  class Contains {
+  public:
+    static const bool value = std::is_same<F, Func>::value |
+                              APICalls<Funcs...>::template Contains<F>::value;
+  };
+
+  template <typename RPCEndpoint>
+  static Error negotiate(RPCEndpoint &R) {
+    if (auto Err = R.template negotiateFunction<Func>())
+      return Err;
+    return APICalls<Funcs...>::negotiate(R);
+  }
+
+};
+
+template <typename... InnerFuncs, typename... Funcs>
+class APICalls<APICalls<InnerFuncs...>, Funcs...> {
+public:
+
+  template <typename F>
+  class Contains {
+  public:
+    static const bool value =
+      APICalls<InnerFuncs...>::template Contains<F>::value |
+      APICalls<Funcs...>::template Contains<F>::value;
+  };
+
+  template <typename RPCEndpoint>
+  static Error negotiate(RPCEndpoint &R) {
+    if (auto Err = APICalls<InnerFuncs...>::negotiate(R))
+      return Err;
+    return APICalls<Funcs...>::negotiate(R);
+  }
+
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
new file mode 100644
index 0000000..8f0d9fa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -0,0 +1,349 @@
+//===- RTDyldObjectLinkingLayer.h - RTDyld-based jit linking  ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for an RTDyld-based, in-process object linking layer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Legacy.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+class RTDyldObjectLinkingLayerBase {
+public:
+  using ObjectPtr = std::unique_ptr<MemoryBuffer>;
+
+protected:
+
+  /// @brief Holds an object to be allocated/linked as a unit in the JIT.
+  ///
+  /// An instance of this class will be created for each object added
+  /// via JITObjectLayer::addObject. Deleting the instance (via
+  /// removeObject) frees its memory, removing all symbol definitions that
+  /// had been provided by this instance. Higher level layers are responsible
+  /// for taking any action required to handle the missing symbols.
+  class LinkedObject {
+  public:
+    LinkedObject() = default;
+    LinkedObject(const LinkedObject&) = delete;
+    void operator=(const LinkedObject&) = delete;
+    virtual ~LinkedObject() = default;
+
+    virtual Error finalize() = 0;
+
+    virtual JITSymbol::GetAddressFtor
+    getSymbolMaterializer(std::string Name) = 0;
+
+    virtual void mapSectionAddress(const void *LocalAddress,
+                                   JITTargetAddress TargetAddr) const = 0;
+
+    JITSymbol getSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+      auto SymEntry = SymbolTable.find(Name);
+      if (SymEntry == SymbolTable.end())
+        return nullptr;
+      if (!SymEntry->second.getFlags().isExported() && ExportedSymbolsOnly)
+        return nullptr;
+      if (!Finalized)
+        return JITSymbol(getSymbolMaterializer(Name),
+                         SymEntry->second.getFlags());
+      return JITSymbol(SymEntry->second);
+    }
+
+  protected:
+    StringMap<JITEvaluatedSymbol> SymbolTable;
+    bool Finalized = false;
+  };
+};
+
+/// @brief Bare bones object linking layer.
+///
+///   This class is intended to be used as the base layer for a JIT. It allows
+/// object files to be loaded into memory, linked, and the addresses of their
+/// symbols queried. All objects added to this layer can see each other's
+/// symbols.
+class RTDyldObjectLinkingLayer : public RTDyldObjectLinkingLayerBase {
+public:
+
+  using RTDyldObjectLinkingLayerBase::ObjectPtr;
+
+  /// @brief Functor for receiving object-loaded notifications.
+  using NotifyLoadedFtor =
+      std::function<void(VModuleKey, const object::ObjectFile &Obj,
+                         const RuntimeDyld::LoadedObjectInfo &)>;
+
+  /// @brief Functor for receiving finalization notifications.
+  using NotifyFinalizedFtor = std::function<void(VModuleKey)>;
+
+private:
+  using OwnedObject = object::OwningBinary<object::ObjectFile>;
+
+  template <typename MemoryManagerPtrT>
+  class ConcreteLinkedObject : public LinkedObject {
+  public:
+    ConcreteLinkedObject(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+                         OwnedObject Obj, MemoryManagerPtrT MemMgr,
+                         std::shared_ptr<SymbolResolver> Resolver,
+                         bool ProcessAllSections)
+        : MemMgr(std::move(MemMgr)),
+          PFC(llvm::make_unique<PreFinalizeContents>(
+              Parent, std::move(K), std::move(Obj), std::move(Resolver),
+              ProcessAllSections)) {
+      buildInitialSymbolTable(PFC->Obj);
+    }
+
+    ~ConcreteLinkedObject() override {
+      MemMgr->deregisterEHFrames();
+    }
+
+    Error finalize() override {
+      assert(PFC && "mapSectionAddress called on finalized LinkedObject");
+
+      JITSymbolResolverAdapter ResolverAdapter(PFC->Parent.ES, *PFC->Resolver);
+      PFC->RTDyld = llvm::make_unique<RuntimeDyld>(*MemMgr, ResolverAdapter);
+      PFC->RTDyld->setProcessAllSections(PFC->ProcessAllSections);
+
+      Finalized = true;
+
+      std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info =
+          PFC->RTDyld->loadObject(*PFC->Obj.getBinary());
+
+      // Copy the symbol table out of the RuntimeDyld instance.
+      {
+        auto SymTab = PFC->RTDyld->getSymbolTable();
+        for (auto &KV : SymTab)
+          SymbolTable[KV.first] = KV.second;
+      }
+
+      if (PFC->Parent.NotifyLoaded)
+        PFC->Parent.NotifyLoaded(PFC->K, *PFC->Obj.getBinary(), *Info);
+
+      PFC->RTDyld->finalizeWithMemoryManagerLocking();
+
+      if (PFC->RTDyld->hasError())
+        return make_error<StringError>(PFC->RTDyld->getErrorString(),
+                                       inconvertibleErrorCode());
+
+      if (PFC->Parent.NotifyFinalized)
+        PFC->Parent.NotifyFinalized(PFC->K);
+
+      // Release resources.
+      PFC = nullptr;
+      return Error::success();
+    }
+
+    JITSymbol::GetAddressFtor getSymbolMaterializer(std::string Name) override {
+      return [this, Name]() -> Expected<JITTargetAddress> {
+        // The symbol may be materialized between the creation of this lambda
+        // and its execution, so we need to double check.
+        if (!this->Finalized)
+          if (auto Err = this->finalize())
+            return std::move(Err);
+        return this->getSymbol(Name, false).getAddress();
+      };
+    }
+
+    void mapSectionAddress(const void *LocalAddress,
+                           JITTargetAddress TargetAddr) const override {
+      assert(PFC && "mapSectionAddress called on finalized LinkedObject");
+      assert(PFC->RTDyld && "mapSectionAddress called on raw LinkedObject");
+      PFC->RTDyld->mapSectionAddress(LocalAddress, TargetAddr);
+    }
+
+  private:
+    void buildInitialSymbolTable(const OwnedObject &Obj) {
+      for (auto &Symbol : Obj.getBinary()->symbols()) {
+        if (Symbol.getFlags() & object::SymbolRef::SF_Undefined)
+          continue;
+        Expected<StringRef> SymbolName = Symbol.getName();
+        // FIXME: Raise an error for bad symbols.
+        if (!SymbolName) {
+          consumeError(SymbolName.takeError());
+          continue;
+        }
+        auto Flags = JITSymbolFlags::fromObjectSymbol(Symbol);
+        SymbolTable.insert(
+          std::make_pair(*SymbolName, JITEvaluatedSymbol(0, Flags)));
+      }
+    }
+
+    // Contains the information needed prior to finalization: the object files,
+    // memory manager, resolver, and flags needed for RuntimeDyld.
+    struct PreFinalizeContents {
+      PreFinalizeContents(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+                          OwnedObject Obj,
+                          std::shared_ptr<SymbolResolver> Resolver,
+                          bool ProcessAllSections)
+          : Parent(Parent), K(std::move(K)), Obj(std::move(Obj)),
+            Resolver(std::move(Resolver)),
+            ProcessAllSections(ProcessAllSections) {}
+
+      RTDyldObjectLinkingLayer &Parent;
+      VModuleKey K;
+      OwnedObject Obj;
+      std::shared_ptr<SymbolResolver> Resolver;
+      bool ProcessAllSections;
+      std::unique_ptr<RuntimeDyld> RTDyld;
+    };
+
+    MemoryManagerPtrT MemMgr;
+    std::unique_ptr<PreFinalizeContents> PFC;
+  };
+
+  template <typename MemoryManagerPtrT>
+  std::unique_ptr<ConcreteLinkedObject<MemoryManagerPtrT>>
+  createLinkedObject(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+                     OwnedObject Obj, MemoryManagerPtrT MemMgr,
+                     std::shared_ptr<SymbolResolver> Resolver,
+                     bool ProcessAllSections) {
+    using LOS = ConcreteLinkedObject<MemoryManagerPtrT>;
+    return llvm::make_unique<LOS>(Parent, std::move(K), std::move(Obj),
+                                  std::move(MemMgr), std::move(Resolver),
+                                  ProcessAllSections);
+  }
+
+public:
+  struct Resources {
+    std::shared_ptr<RuntimeDyld::MemoryManager> MemMgr;
+    std::shared_ptr<SymbolResolver> Resolver;
+  };
+
+  using ResourcesGetter = std::function<Resources(VModuleKey)>;
+
+  /// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
+  ///        and NotifyFinalized functors.
+  RTDyldObjectLinkingLayer(
+      ExecutionSession &ES, ResourcesGetter GetResources,
+      NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
+      NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
+      : ES(ES), GetResources(std::move(GetResources)),
+        NotifyLoaded(std::move(NotifyLoaded)),
+        NotifyFinalized(std::move(NotifyFinalized)), ProcessAllSections(false) {
+  }
+
+  /// @brief Set the 'ProcessAllSections' flag.
+  ///
+  /// If set to true, all sections in each object file will be allocated using
+  /// the memory manager, rather than just the sections required for execution.
+  ///
+  /// This is kludgy, and may be removed in the future.
+  void setProcessAllSections(bool ProcessAllSections) {
+    this->ProcessAllSections = ProcessAllSections;
+  }
+
+  /// @brief Add an object to the JIT.
+  Error addObject(VModuleKey K, ObjectPtr ObjBuffer) {
+
+    auto Obj =
+        object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+    if (!Obj)
+      return Obj.takeError();
+
+    assert(!LinkedObjects.count(K) && "VModuleKey already in use");
+
+    auto R = GetResources(K);
+
+    LinkedObjects[K] = createLinkedObject(
+        *this, K, OwnedObject(std::move(*Obj), std::move(ObjBuffer)),
+        std::move(R.MemMgr), std::move(R.Resolver), ProcessAllSections);
+
+    return Error::success();
+  }
+
+  /// @brief Remove the object associated with VModuleKey K.
+  ///
+  ///   All memory allocated for the object will be freed, and the sections and
+  /// symbols it provided will no longer be available. No attempt is made to
+  /// re-emit the missing symbols, and any use of these symbols (directly or
+  /// indirectly) will result in undefined behavior. If dependence tracking is
+  /// required to detect or resolve such issues it should be added at a higher
+  /// layer.
+  Error removeObject(VModuleKey K) {
+    assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+    // How do we invalidate the symbols in H?
+    LinkedObjects.erase(K);
+    return Error::success();
+  }
+
+  /// @brief Search for the given named symbol.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it exists.
+  JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+    for (auto &KV : LinkedObjects)
+      if (auto Sym = KV.second->getSymbol(Name, ExportedSymbolsOnly))
+        return Sym;
+      else if (auto Err = Sym.takeError())
+        return std::move(Err);
+
+    return nullptr;
+  }
+
+  /// @brief Search for the given named symbol in the context of the loaded
+  ///        object represented by the VModuleKey K.
+  /// @param K The VModuleKey for the object to search in.
+  /// @param Name The name of the symbol to search for.
+  /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+  /// @return A handle for the given named symbol, if it is found in the
+  ///         given object.
+  JITSymbol findSymbolIn(VModuleKey K, StringRef Name,
+                         bool ExportedSymbolsOnly) {
+    assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+    return LinkedObjects[K]->getSymbol(Name, ExportedSymbolsOnly);
+  }
+
+  /// @brief Map section addresses for the object associated with the
+  ///        VModuleKey K.
+  void mapSectionAddress(VModuleKey K, const void *LocalAddress,
+                         JITTargetAddress TargetAddr) {
+    assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+    LinkedObjects[K]->mapSectionAddress(LocalAddress, TargetAddr);
+  }
+
+  /// @brief Immediately emit and finalize the object represented by the given
+  ///        VModuleKey.
+  /// @param K VModuleKey for object to emit/finalize.
+  Error emitAndFinalize(VModuleKey K) {
+    assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+    return LinkedObjects[K]->finalize();
+  }
+
+private:
+  ExecutionSession &ES;
+
+  std::map<VModuleKey, std::unique_ptr<LinkedObject>> LinkedObjects;
+  ResourcesGetter GetResources;
+  NotifyLoadedFtor NotifyLoaded;
+  NotifyFinalizedFtor NotifyFinalized;
+  bool ProcessAllSections = false;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RawByteChannel.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RawByteChannel.h
new file mode 100644
index 0000000..db810f4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RawByteChannel.h
@@ -0,0 +1,185 @@
+//===- llvm/ExecutionEngine/Orc/RawByteChannel.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
+#define LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/Orc/RPCSerialization.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <mutex>
+#include <string>
+#include <type_traits>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+/// Interface for byte-streams to be used with RPC.
+class RawByteChannel {
+public:
+  virtual ~RawByteChannel() = default;
+
+  /// Read Size bytes from the stream into *Dst.
+  virtual Error readBytes(char *Dst, unsigned Size) = 0;
+
+  /// Read size bytes from *Src and append them to the stream.
+  virtual Error appendBytes(const char *Src, unsigned Size) = 0;
+
+  /// Flush the stream if possible.
+  virtual Error send() = 0;
+
+  /// Notify the channel that we're starting a message send.
+  /// Locks the channel for writing.
+  template <typename FunctionIdT, typename SequenceIdT>
+  Error startSendMessage(const FunctionIdT &FnId, const SequenceIdT &SeqNo) {
+    writeLock.lock();
+    if (auto Err = serializeSeq(*this, FnId, SeqNo)) {
+      writeLock.unlock();
+      return Err;
+    }
+    return Error::success();
+  }
+
+  /// Notify the channel that we're ending a message send.
+  /// Unlocks the channel for writing.
+  Error endSendMessage() {
+    writeLock.unlock();
+    return Error::success();
+  }
+
+  /// Notify the channel that we're starting a message receive.
+  /// Locks the channel for reading.
+  template <typename FunctionIdT, typename SequenceNumberT>
+  Error startReceiveMessage(FunctionIdT &FnId, SequenceNumberT &SeqNo) {
+    readLock.lock();
+    if (auto Err = deserializeSeq(*this, FnId, SeqNo)) {
+      readLock.unlock();
+      return Err;
+    }
+    return Error::success();
+  }
+
+  /// Notify the channel that we're ending a message receive.
+  /// Unlocks the channel for reading.
+  Error endReceiveMessage() {
+    readLock.unlock();
+    return Error::success();
+  }
+
+  /// Get the lock for stream reading.
+  std::mutex &getReadLock() { return readLock; }
+
+  /// Get the lock for stream writing.
+  std::mutex &getWriteLock() { return writeLock; }
+
+private:
+  std::mutex readLock, writeLock;
+};
+
+template <typename ChannelT, typename T>
+class SerializationTraits<
+    ChannelT, T, T,
+    typename std::enable_if<
+        std::is_base_of<RawByteChannel, ChannelT>::value &&
+        (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
+         std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
+         std::is_same<T, uint32_t>::value || std::is_same<T, int32_t>::value ||
+         std::is_same<T, uint64_t>::value || std::is_same<T, int64_t>::value ||
+         std::is_same<T, char>::value)>::type> {
+public:
+  static Error serialize(ChannelT &C, T V) {
+    support::endian::byte_swap<T, support::big>(V);
+    return C.appendBytes(reinterpret_cast<const char *>(&V), sizeof(T));
+  };
+
+  static Error deserialize(ChannelT &C, T &V) {
+    if (auto Err = C.readBytes(reinterpret_cast<char *>(&V), sizeof(T)))
+      return Err;
+    support::endian::byte_swap<T, support::big>(V);
+    return Error::success();
+  };
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, bool, bool,
+                          typename std::enable_if<std::is_base_of<
+                              RawByteChannel, ChannelT>::value>::type> {
+public:
+  static Error serialize(ChannelT &C, bool V) {
+    uint8_t Tmp = V ? 1 : 0;
+    if (auto Err =
+          C.appendBytes(reinterpret_cast<const char *>(&Tmp), 1))
+      return Err;
+    return Error::success();
+  }
+
+  static Error deserialize(ChannelT &C, bool &V) {
+    uint8_t Tmp = 0;
+    if (auto Err = C.readBytes(reinterpret_cast<char *>(&Tmp), 1))
+      return Err;
+    V = Tmp != 0;
+    return Error::success();
+  }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, StringRef,
+                          typename std::enable_if<std::is_base_of<
+                              RawByteChannel, ChannelT>::value>::type> {
+public:
+  /// RPC channel serialization for std::strings.
+  static Error serialize(RawByteChannel &C, StringRef S) {
+    if (auto Err = serializeSeq(C, static_cast<uint64_t>(S.size())))
+      return Err;
+    return C.appendBytes((const char *)S.data(), S.size());
+  }
+};
+
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, std::string, T,
+                          typename std::enable_if<
+                            std::is_base_of<RawByteChannel, ChannelT>::value &&
+                            (std::is_same<T, const char*>::value ||
+                             std::is_same<T, char*>::value)>::type> {
+public:
+  static Error serialize(RawByteChannel &C, const char *S) {
+    return SerializationTraits<ChannelT, std::string, StringRef>::serialize(C,
+                                                                            S);
+  }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, std::string,
+                          typename std::enable_if<std::is_base_of<
+                              RawByteChannel, ChannelT>::value>::type> {
+public:
+  /// RPC channel serialization for std::strings.
+  static Error serialize(RawByteChannel &C, const std::string &S) {
+    return SerializationTraits<ChannelT, std::string, StringRef>::serialize(C,
+                                                                            S);
+  }
+
+  /// RPC channel deserialization for std::strings.
+  static Error deserialize(RawByteChannel &C, std::string &S) {
+    uint64_t Count = 0;
+    if (auto Err = deserializeSeq(C, Count))
+      return Err;
+    S.resize(Count);
+    return C.readBytes(&S[0], Count);
+  }
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RemoteObjectLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RemoteObjectLayer.h
new file mode 100644
index 0000000..b95faaa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RemoteObjectLayer.h
@@ -0,0 +1,529 @@
+//===------ RemoteObjectLayer.h - Forwards objs to a remote -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Forwards objects to a remote object layer via RPC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_REMOTEOBJECTLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_REMOTEOBJECTLAYER_H
+
+#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include <map>
+
+namespace llvm {
+namespace orc {
+
+/// RPC API needed by RemoteObjectClientLayer and RemoteObjectServerLayer.
+class RemoteObjectLayerAPI {
+public:
+
+  using ObjHandleT = remote::ResourceIdMgr::ResourceId;
+
+protected:
+
+  using RemoteSymbolId = remote::ResourceIdMgr::ResourceId;
+  using RemoteSymbol = std::pair<RemoteSymbolId, JITSymbolFlags>;
+
+public:
+
+  using BadSymbolHandleError = remote::ResourceNotFound<RemoteSymbolId>;
+  using BadObjectHandleError = remote::ResourceNotFound<ObjHandleT>;
+
+protected:
+
+  static const ObjHandleT InvalidObjectHandleId = 0;
+  static const RemoteSymbolId NullSymbolId = 0;
+
+  class AddObject
+    : public rpc::Function<AddObject, Expected<ObjHandleT>(std::string)> {
+  public:
+    static const char *getName() { return "AddObject"; }
+  };
+
+  class RemoveObject
+    : public rpc::Function<RemoveObject, Error(ObjHandleT)> {
+  public:
+    static const char *getName() { return "RemoveObject"; }
+  };
+
+  class FindSymbol
+    : public rpc::Function<FindSymbol, Expected<RemoteSymbol>(std::string,
+                                                              bool)> {
+  public:
+    static const char *getName() { return "FindSymbol"; }
+  };
+
+  class FindSymbolIn
+    : public rpc::Function<FindSymbolIn,
+                           Expected<RemoteSymbol>(ObjHandleT, std::string,
+                                                  bool)> {
+  public:
+    static const char *getName() { return "FindSymbolIn"; }
+  };
+
+  class EmitAndFinalize
+    : public rpc::Function<EmitAndFinalize,
+                           Error(ObjHandleT)> {
+  public:
+    static const char *getName() { return "EmitAndFinalize"; }
+  };
+
+  class Lookup
+    : public rpc::Function<Lookup,
+                           Expected<RemoteSymbol>(ObjHandleT, std::string)> {
+  public:
+    static const char *getName() { return "Lookup"; }
+  };
+
+  class LookupInLogicalDylib
+    : public rpc::Function<LookupInLogicalDylib,
+                           Expected<RemoteSymbol>(ObjHandleT, std::string)> {
+  public:
+    static const char *getName() { return "LookupInLogicalDylib"; }
+  };
+
+  class ReleaseRemoteSymbol
+    : public rpc::Function<ReleaseRemoteSymbol, Error(RemoteSymbolId)> {
+  public:
+    static const char *getName() { return "ReleaseRemoteSymbol"; }
+  };
+
+  class MaterializeRemoteSymbol
+    : public rpc::Function<MaterializeRemoteSymbol,
+                           Expected<JITTargetAddress>(RemoteSymbolId)> {
+  public:
+    static const char *getName() { return "MaterializeRemoteSymbol"; }
+  };
+};
+
+/// Base class containing common utilities for RemoteObjectClientLayer and
+/// RemoteObjectServerLayer.
+template <typename RPCEndpoint>
+class RemoteObjectLayer : public RemoteObjectLayerAPI {
+public:
+
+  RemoteObjectLayer(RPCEndpoint &Remote,
+                    std::function<void(Error)> ReportError)
+      : Remote(Remote), ReportError(std::move(ReportError)),
+        SymbolIdMgr(NullSymbolId + 1) {
+    using ThisT = RemoteObjectLayer<RPCEndpoint>;
+    Remote.template addHandler<ReleaseRemoteSymbol>(
+             *this, &ThisT::handleReleaseRemoteSymbol);
+    Remote.template addHandler<MaterializeRemoteSymbol>(
+             *this, &ThisT::handleMaterializeRemoteSymbol);
+  }
+
+protected:
+
+  /// This class is used as the symbol materializer for JITSymbols returned by
+  /// RemoteObjectLayerClient/RemoteObjectLayerServer -- the materializer knows
+  /// how to call back to the other RPC endpoint to get the address when
+  /// requested.
+  class RemoteSymbolMaterializer {
+  public:
+
+    /// Construct a RemoteSymbolMaterializer for the given RemoteObjectLayer
+    /// with the given Id.
+    RemoteSymbolMaterializer(RemoteObjectLayer &C,
+                             RemoteSymbolId Id)
+      : C(C), Id(Id) {}
+
+    RemoteSymbolMaterializer(const RemoteSymbolMaterializer &Other)
+      : C(Other.C), Id(Other.Id) {
+      // FIXME: This is a horrible, auto_ptr-style, copy-as-move operation.
+      //        It should be removed as soon as LLVM has C++14's generalized
+      //        lambda capture (at which point the materializer can be moved
+      //        into the lambda in remoteToJITSymbol below).
+      const_cast<RemoteSymbolMaterializer&>(Other).Id = 0;
+    }
+
+    RemoteSymbolMaterializer&
+    operator=(const RemoteSymbolMaterializer&) = delete;
+
+    /// Release the remote symbol.
+    ~RemoteSymbolMaterializer() {
+      if (Id)
+        C.releaseRemoteSymbol(Id);
+    }
+
+    /// Materialize the symbol on the remote and get its address.
+    Expected<JITTargetAddress> materialize() {
+      auto Addr = C.materializeRemoteSymbol(Id);
+      Id = 0;
+      return Addr;
+    }
+
+  private:
+    RemoteObjectLayer &C;
+    RemoteSymbolId Id;
+  };
+
+  /// Convenience function for getting a null remote symbol value.
+  RemoteSymbol nullRemoteSymbol() {
+    return RemoteSymbol(0, JITSymbolFlags());
+  }
+
+  /// Creates a StringError that contains a copy of Err's log message, then
+  /// sends that StringError to ReportError.
+  ///
+  /// This allows us to locally log error messages for errors that will actually
+  /// be delivered to the remote.
+  Error teeLog(Error Err) {
+    return handleErrors(std::move(Err),
+                        [this](std::unique_ptr<ErrorInfoBase> EIB) {
+                          ReportError(make_error<StringError>(
+                                        EIB->message(),
+                                        EIB->convertToErrorCode()));
+                          return Error(std::move(EIB));
+                        });
+  }
+
+  Error badRemoteSymbolIdError(RemoteSymbolId Id) {
+    return make_error<BadSymbolHandleError>(Id, "Remote JIT Symbol");
+  }
+
+  Error badObjectHandleError(ObjHandleT H) {
+    return make_error<RemoteObjectLayerAPI::BadObjectHandleError>(
+             H, "Bad object handle");
+  }
+
+  /// Create a RemoteSymbol wrapping the given JITSymbol.
+  Expected<RemoteSymbol> jitSymbolToRemote(JITSymbol Sym) {
+    if (Sym) {
+      auto Id = SymbolIdMgr.getNext();
+      auto Flags = Sym.getFlags();
+      assert(!InUseSymbols.count(Id) && "Symbol id already in use");
+      InUseSymbols.insert(std::make_pair(Id, std::move(Sym)));
+      return RemoteSymbol(Id, Flags);
+    } else if (auto Err = Sym.takeError())
+      return teeLog(std::move(Err));
+    // else...
+    return nullRemoteSymbol();
+  }
+
+  /// Convert an Expected<RemoteSymbol> to a JITSymbol.
+  JITSymbol remoteToJITSymbol(Expected<RemoteSymbol> RemoteSymOrErr) {
+    if (RemoteSymOrErr) {
+      auto &RemoteSym = *RemoteSymOrErr;
+      if (RemoteSym == nullRemoteSymbol())
+        return nullptr;
+      // else...
+      RemoteSymbolMaterializer RSM(*this, RemoteSym.first);
+      auto Sym =
+        JITSymbol([RSM]() mutable { return RSM.materialize(); },
+                  RemoteSym.second);
+      return Sym;
+    } else
+      return RemoteSymOrErr.takeError();
+  }
+
+  RPCEndpoint &Remote;
+  std::function<void(Error)> ReportError;
+
+private:
+
+  /// Notify the remote to release the given JITSymbol.
+  void releaseRemoteSymbol(RemoteSymbolId Id) {
+    if (auto Err = Remote.template callB<ReleaseRemoteSymbol>(Id))
+      ReportError(std::move(Err));
+  }
+
+  /// Notify the remote to materialize the JITSymbol with the given Id and
+  /// return its address.
+  Expected<JITTargetAddress> materializeRemoteSymbol(RemoteSymbolId Id) {
+    return Remote.template callB<MaterializeRemoteSymbol>(Id);
+  }
+
+  /// Release the JITSymbol with the given Id.
+  Error handleReleaseRemoteSymbol(RemoteSymbolId Id) {
+    auto SI = InUseSymbols.find(Id);
+    if (SI != InUseSymbols.end()) {
+      InUseSymbols.erase(SI);
+      return Error::success();
+    } else
+      return teeLog(badRemoteSymbolIdError(Id));
+  }
+
+  /// Run the materializer for the JITSymbol with the given Id and return its
+  /// address.
+  Expected<JITTargetAddress> handleMaterializeRemoteSymbol(RemoteSymbolId Id) {
+    auto SI = InUseSymbols.find(Id);
+    if (SI != InUseSymbols.end()) {
+      auto AddrOrErr = SI->second.getAddress();
+      InUseSymbols.erase(SI);
+      SymbolIdMgr.release(Id);
+      if (AddrOrErr)
+        return *AddrOrErr;
+      else
+        return teeLog(AddrOrErr.takeError());
+    } else {
+      return teeLog(badRemoteSymbolIdError(Id));
+    }
+  }
+
+  remote::ResourceIdMgr SymbolIdMgr;
+  std::map<RemoteSymbolId, JITSymbol> InUseSymbols;
+};
+
+/// RemoteObjectClientLayer forwards the ORC Object Layer API over an RPC
+/// connection.
+///
+/// This class can be used as the base layer of a JIT stack on the client and
+/// will forward operations to a corresponding RemoteObjectServerLayer on the
+/// server (which can be composed on top of a "real" object layer like
+/// RTDyldObjectLinkingLayer to actually carry out the operations).
+///
+/// Sending relocatable objects to the server (rather than fully relocated
+/// bits) allows JIT'd code to be cached on the server side and re-used in
+/// subsequent JIT sessions.
+template <typename RPCEndpoint>
+class RemoteObjectClientLayer : public RemoteObjectLayer<RPCEndpoint> {
+private:
+
+  using AddObject = RemoteObjectLayerAPI::AddObject;
+  using RemoveObject = RemoteObjectLayerAPI::RemoveObject;
+  using FindSymbol = RemoteObjectLayerAPI::FindSymbol;
+  using FindSymbolIn = RemoteObjectLayerAPI::FindSymbolIn;
+  using EmitAndFinalize = RemoteObjectLayerAPI::EmitAndFinalize;
+  using Lookup = RemoteObjectLayerAPI::Lookup;
+  using LookupInLogicalDylib = RemoteObjectLayerAPI::LookupInLogicalDylib;
+
+  using RemoteObjectLayer<RPCEndpoint>::teeLog;
+  using RemoteObjectLayer<RPCEndpoint>::badObjectHandleError;
+  using RemoteObjectLayer<RPCEndpoint>::remoteToJITSymbol;
+
+public:
+
+  using ObjHandleT = RemoteObjectLayerAPI::ObjHandleT;
+  using RemoteSymbol = RemoteObjectLayerAPI::RemoteSymbol;
+
+  using ObjectPtr = std::unique_ptr<MemoryBuffer>;
+
+  /// Create a RemoteObjectClientLayer that communicates with a
+  /// RemoteObjectServerLayer instance via the given RPCEndpoint.
+  ///
+  /// The ReportError functor can be used locally log errors that are intended
+  /// to be sent  sent
+  RemoteObjectClientLayer(RPCEndpoint &Remote,
+                          std::function<void(Error)> ReportError)
+      : RemoteObjectLayer<RPCEndpoint>(Remote, std::move(ReportError)) {
+    using ThisT = RemoteObjectClientLayer<RPCEndpoint>;
+    Remote.template addHandler<Lookup>(*this, &ThisT::lookup);
+    Remote.template addHandler<LookupInLogicalDylib>(
+            *this, &ThisT::lookupInLogicalDylib);
+  }
+
+  /// @brief Add an object to the JIT.
+  ///
+  /// @return A handle that can be used to refer to the loaded object (for
+  ///         symbol searching, finalization, freeing memory, etc.).
+  Expected<ObjHandleT>
+  addObject(ObjectPtr ObjBuffer,
+            std::shared_ptr<LegacyJITSymbolResolver> Resolver) {
+    if (auto HandleOrErr =
+            this->Remote.template callB<AddObject>(ObjBuffer->getBuffer())) {
+      auto &Handle = *HandleOrErr;
+      // FIXME: Return an error for this:
+      assert(!Resolvers.count(Handle) && "Handle already in use?");
+      Resolvers[Handle] = std::move(Resolver);
+      return Handle;
+    } else
+      return HandleOrErr.takeError();
+  }
+
+  /// @brief Remove the given object from the JIT.
+  Error removeObject(ObjHandleT H) {
+    return this->Remote.template callB<RemoveObject>(H);
+  }
+
+  /// @brief Search for the given named symbol.
+  JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+    return remoteToJITSymbol(
+             this->Remote.template callB<FindSymbol>(Name,
+                                                     ExportedSymbolsOnly));
+  }
+
+  /// @brief Search for the given named symbol within the given context.
+  JITSymbol findSymbolIn(ObjHandleT H, StringRef Name, bool ExportedSymbolsOnly) {
+    return remoteToJITSymbol(
+             this->Remote.template callB<FindSymbolIn>(H, Name,
+                                                       ExportedSymbolsOnly));
+  }
+
+  /// @brief Immediately emit and finalize the object with the given handle.
+  Error emitAndFinalize(ObjHandleT H) {
+    return this->Remote.template callB<EmitAndFinalize>(H);
+  }
+
+private:
+
+  Expected<RemoteSymbol> lookup(ObjHandleT H, const std::string &Name) {
+    auto RI = Resolvers.find(H);
+    if (RI != Resolvers.end()) {
+      return this->jitSymbolToRemote(RI->second->findSymbol(Name));
+    } else
+      return teeLog(badObjectHandleError(H));
+  }
+
+  Expected<RemoteSymbol> lookupInLogicalDylib(ObjHandleT H,
+                                              const std::string &Name) {
+    auto RI = Resolvers.find(H);
+    if (RI != Resolvers.end())
+      return this->jitSymbolToRemote(
+               RI->second->findSymbolInLogicalDylib(Name));
+    else
+      return teeLog(badObjectHandleError(H));
+  }
+
+  std::map<remote::ResourceIdMgr::ResourceId,
+           std::shared_ptr<LegacyJITSymbolResolver>>
+      Resolvers;
+};
+
+/// RemoteObjectServerLayer acts as a server and handling RPC calls for the
+/// object layer API from the given RPC connection.
+///
+/// This class can be composed on top of a 'real' object layer (e.g.
+/// RTDyldObjectLinkingLayer) to do the actual work of relocating objects
+/// and making them executable.
+template <typename BaseLayerT, typename RPCEndpoint>
+class RemoteObjectServerLayer : public RemoteObjectLayer<RPCEndpoint> {
+private:
+
+  using ObjHandleT = RemoteObjectLayerAPI::ObjHandleT;
+  using RemoteSymbol = RemoteObjectLayerAPI::RemoteSymbol;
+
+  using AddObject = RemoteObjectLayerAPI::AddObject;
+  using RemoveObject = RemoteObjectLayerAPI::RemoveObject;
+  using FindSymbol = RemoteObjectLayerAPI::FindSymbol;
+  using FindSymbolIn = RemoteObjectLayerAPI::FindSymbolIn;
+  using EmitAndFinalize = RemoteObjectLayerAPI::EmitAndFinalize;
+  using Lookup = RemoteObjectLayerAPI::Lookup;
+  using LookupInLogicalDylib = RemoteObjectLayerAPI::LookupInLogicalDylib;
+
+  using RemoteObjectLayer<RPCEndpoint>::teeLog;
+  using RemoteObjectLayer<RPCEndpoint>::badObjectHandleError;
+  using RemoteObjectLayer<RPCEndpoint>::remoteToJITSymbol;
+
+public:
+
+  /// Create a RemoteObjectServerLayer with the given base layer (which must be
+  /// an object layer), RPC endpoint, and error reporter function.
+  RemoteObjectServerLayer(BaseLayerT &BaseLayer,
+                          RPCEndpoint &Remote,
+                          std::function<void(Error)> ReportError)
+    : RemoteObjectLayer<RPCEndpoint>(Remote, std::move(ReportError)),
+      BaseLayer(BaseLayer), HandleIdMgr(1) {
+    using ThisT = RemoteObjectServerLayer<BaseLayerT, RPCEndpoint>;
+
+    Remote.template addHandler<AddObject>(*this, &ThisT::addObject);
+    Remote.template addHandler<RemoveObject>(*this, &ThisT::removeObject);
+    Remote.template addHandler<FindSymbol>(*this, &ThisT::findSymbol);
+    Remote.template addHandler<FindSymbolIn>(*this, &ThisT::findSymbolIn);
+    Remote.template addHandler<EmitAndFinalize>(*this, &ThisT::emitAndFinalize);
+  }
+
+private:
+
+  class StringMemoryBuffer : public MemoryBuffer {
+  public:
+    StringMemoryBuffer(std::string Buffer)
+      : Buffer(std::move(Buffer)) {
+      init(this->Buffer.data(), this->Buffer.data() + this->Buffer.size(),
+           false);
+    }
+
+    BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+  private:
+    std::string Buffer;
+  };
+
+  JITSymbol lookup(ObjHandleT Id, const std::string &Name) {
+    return remoteToJITSymbol(
+             this->Remote.template callB<Lookup>(Id, Name));
+  }
+
+  JITSymbol lookupInLogicalDylib(ObjHandleT Id, const std::string &Name) {
+    return remoteToJITSymbol(
+             this->Remote.template callB<LookupInLogicalDylib>(Id, Name));
+  }
+
+  Expected<ObjHandleT> addObject(std::string ObjBuffer) {
+    auto Buffer = llvm::make_unique<StringMemoryBuffer>(std::move(ObjBuffer));
+    auto Id = HandleIdMgr.getNext();
+    assert(!BaseLayerHandles.count(Id) && "Id already in use?");
+
+    auto Resolver = createLambdaResolver(
+        [this, Id](const std::string &Name) { return lookup(Id, Name); },
+        [this, Id](const std::string &Name) {
+          return lookupInLogicalDylib(Id, Name);
+        });
+
+    if (auto HandleOrErr =
+            BaseLayer.addObject(std::move(Buffer), std::move(Resolver))) {
+      BaseLayerHandles[Id] = std::move(*HandleOrErr);
+      return Id;
+    } else
+      return teeLog(HandleOrErr.takeError());
+  }
+
+  Error removeObject(ObjHandleT H) {
+    auto HI = BaseLayerHandles.find(H);
+    if (HI != BaseLayerHandles.end()) {
+      if (auto Err = BaseLayer.removeObject(HI->second))
+        return teeLog(std::move(Err));
+      return Error::success();
+    } else
+      return teeLog(badObjectHandleError(H));
+  }
+
+  Expected<RemoteSymbol> findSymbol(const std::string &Name,
+                                    bool ExportedSymbolsOnly) {
+    if (auto Sym = BaseLayer.findSymbol(Name, ExportedSymbolsOnly))
+      return this->jitSymbolToRemote(std::move(Sym));
+    else if (auto Err = Sym.takeError())
+      return teeLog(std::move(Err));
+    return this->nullRemoteSymbol();
+  }
+
+  Expected<RemoteSymbol> findSymbolIn(ObjHandleT H, const std::string &Name,
+                                      bool ExportedSymbolsOnly) {
+    auto HI = BaseLayerHandles.find(H);
+    if (HI != BaseLayerHandles.end()) {
+      if (auto Sym = BaseLayer.findSymbolIn(HI->second, Name, ExportedSymbolsOnly))
+        return this->jitSymbolToRemote(std::move(Sym));
+      else if (auto Err = Sym.takeError())
+        return teeLog(std::move(Err));
+      return this->nullRemoteSymbol();
+    } else
+      return teeLog(badObjectHandleError(H));
+  }
+
+  Error emitAndFinalize(ObjHandleT H) {
+    auto HI = BaseLayerHandles.find(H);
+    if (HI != BaseLayerHandles.end()) {
+      if (auto Err = BaseLayer.emitAndFinalize(HI->second))
+        return teeLog(std::move(Err));
+      return Error::success();
+    } else
+      return teeLog(badObjectHandleError(H));
+  }
+
+  BaseLayerT &BaseLayer;
+  remote::ResourceIdMgr HandleIdMgr;
+  std::map<ObjHandleT, typename BaseLayerT::ObjHandleT> BaseLayerHandles;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_REMOTEOBJECTLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
new file mode 100644
index 0000000..da40d1c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
@@ -0,0 +1,137 @@
+//===- SymbolStringPool.h - Multi-threaded pool for JIT symbols -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains a multi-threaded string pool suitable for use with ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
+#define LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
+
+#include "llvm/ADT/StringMap.h"
+#include <atomic>
+#include <mutex>
+
+namespace llvm {
+namespace orc {
+
+class SymbolStringPtr;
+
+/// @brief String pool for symbol names used by the JIT.
+class SymbolStringPool {
+  friend class SymbolStringPtr;
+public:
+  /// @brief Create a symbol string pointer from the given string.
+  SymbolStringPtr intern(StringRef S);
+
+  /// @brief Remove from the pool any entries that are no longer referenced.
+  void clearDeadEntries();
+
+  /// @brief Returns true if the pool is empty.
+  bool empty() const;
+private:
+  using RefCountType = std::atomic<size_t>;
+  using PoolMap = StringMap<RefCountType>;
+  using PoolMapEntry = StringMapEntry<RefCountType>;
+  mutable std::mutex PoolMutex;
+  PoolMap Pool;
+};
+
+/// @brief Pointer to a pooled string representing a symbol name.
+class SymbolStringPtr {
+  friend class SymbolStringPool;
+  friend bool operator==(const SymbolStringPtr &LHS,
+                         const SymbolStringPtr &RHS);
+  friend bool operator<(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS);
+
+public:
+  SymbolStringPtr() = default;
+  SymbolStringPtr(const SymbolStringPtr &Other)
+    : S(Other.S) {
+    if (S)
+      ++S->getValue();
+  }
+
+  SymbolStringPtr& operator=(const SymbolStringPtr &Other) {
+    if (S)
+      --S->getValue();
+    S = Other.S;
+    if (S)
+      ++S->getValue();
+    return *this;
+  }
+
+  SymbolStringPtr(SymbolStringPtr &&Other) : S(nullptr) {
+    std::swap(S, Other.S);
+  }
+
+  SymbolStringPtr& operator=(SymbolStringPtr &&Other) {
+    if (S)
+      --S->getValue();
+    S = nullptr;
+    std::swap(S, Other.S);
+    return *this;
+  }
+
+  ~SymbolStringPtr() {
+    if (S)
+      --S->getValue();
+  }
+
+  StringRef operator*() const { return S->first(); }
+
+private:
+
+  SymbolStringPtr(SymbolStringPool::PoolMapEntry *S)
+      : S(S) {
+    if (S)
+      ++S->getValue();
+  }
+
+  SymbolStringPool::PoolMapEntry *S = nullptr;
+};
+
+inline bool operator==(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS) {
+  return LHS.S == RHS.S;
+}
+
+inline bool operator!=(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS) {
+  return !(LHS == RHS);
+}
+
+inline bool operator<(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS) {
+  return LHS.S < RHS.S;
+}
+
+inline SymbolStringPtr SymbolStringPool::intern(StringRef S) {
+  std::lock_guard<std::mutex> Lock(PoolMutex);
+  PoolMap::iterator I;
+  bool Added;
+  std::tie(I, Added) = Pool.try_emplace(S, 0);
+  return SymbolStringPtr(&*I);
+}
+
+inline void SymbolStringPool::clearDeadEntries() {
+  std::lock_guard<std::mutex> Lock(PoolMutex);
+  for (auto I = Pool.begin(), E = Pool.end(); I != E;) {
+    auto Tmp = I++;
+    if (Tmp->second == 0)
+      Pool.erase(Tmp);
+  }
+}
+
+inline bool SymbolStringPool::empty() const {
+  std::lock_guard<std::mutex> Lock(PoolMutex);
+  return Pool.empty();
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/OrcMCJITReplacement.h b/linux-x64/clang/include/llvm/ExecutionEngine/OrcMCJITReplacement.h
new file mode 100644
index 0000000..4cd5648
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/OrcMCJITReplacement.h
@@ -0,0 +1,38 @@
+//===---- OrcMCJITReplacement.h - Orc-based MCJIT replacement ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces OrcMCJITReplacement to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
+#define LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInOrcMCJITReplacement();
+
+namespace {
+  struct ForceOrcMCJITReplacementLinking {
+    ForceOrcMCJITReplacementLinking() {
+      // We must reference OrcMCJITReplacement in such a way that compilers will
+      // not delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough to know
+      // that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      LLVMLinkInOrcMCJITReplacement();
+    }
+  } ForceOrcMCJITReplacementLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/RTDyldMemoryManager.h b/linux-x64/clang/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
new file mode 100644
index 0000000..ee75202
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
@@ -0,0 +1,153 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+class ExecutionEngine;
+
+namespace object {
+  class ObjectFile;
+} // end namespace object
+
+class MCJITMemoryManager : public RuntimeDyld::MemoryManager {
+public:
+  // Don't hide the notifyObjectLoaded method from RuntimeDyld::MemoryManager.
+  using RuntimeDyld::MemoryManager::notifyObjectLoaded;
+
+  /// This method is called after an object has been loaded into memory but
+  /// before relocations are applied to the loaded sections.  The object load
+  /// may have been initiated by MCJIT to resolve an external symbol for another
+  /// object that is being finalized.  In that case, the object about which
+  /// the memory manager is being notified will be finalized immediately after
+  /// the memory manager returns from this call.
+  ///
+  /// Memory managers which are preparing code for execution in an external
+  /// address space can use this call to remap the section addresses for the
+  /// newly loaded object.
+  virtual void notifyObjectLoaded(ExecutionEngine *EE,
+                                  const object::ObjectFile &) {}
+};
+
+// RuntimeDyld clients often want to handle the memory management of
+// what gets placed where. For JIT clients, this is the subset of
+// JITMemoryManager required for dynamic loading of binaries.
+//
+// FIXME: As the RuntimeDyld fills out, additional routines will be needed
+//        for the varying types of objects to be allocated.
+class RTDyldMemoryManager : public MCJITMemoryManager,
+                            public LegacyJITSymbolResolver {
+public:
+  RTDyldMemoryManager() = default;
+  RTDyldMemoryManager(const RTDyldMemoryManager&) = delete;
+  void operator=(const RTDyldMemoryManager&) = delete;
+  ~RTDyldMemoryManager() override;
+
+  /// Register EH frames in the current process.
+  static void registerEHFramesInProcess(uint8_t *Addr, size_t Size);
+
+  /// Deregister EH frames in the current proces.
+  static void deregisterEHFramesInProcess(uint8_t *Addr, size_t Size);
+
+  void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override;
+  void deregisterEHFrames() override;
+
+  /// This method returns the address of the specified function or variable in
+  /// the current process.
+  static uint64_t getSymbolAddressInProcess(const std::string &Name);
+
+  /// Legacy symbol lookup - DEPRECATED! Please override findSymbol instead.
+  ///
+  /// This method returns the address of the specified function or variable.
+  /// It is used to resolve symbols during module linking.
+  virtual uint64_t getSymbolAddress(const std::string &Name) {
+    return getSymbolAddressInProcess(Name);
+  }
+
+  /// This method returns a RuntimeDyld::SymbolInfo for the specified function
+  /// or variable. It is used to resolve symbols during module linking.
+  ///
+  /// By default this falls back on the legacy lookup method:
+  /// 'getSymbolAddress'. The address returned by getSymbolAddress is treated as
+  /// a strong, exported symbol, consistent with historical treatment by
+  /// RuntimeDyld.
+  ///
+  /// Clients writing custom RTDyldMemoryManagers are encouraged to override
+  /// this method and return a SymbolInfo with the flags set correctly. This is
+  /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
+  JITSymbol findSymbol(const std::string &Name) override {
+    return JITSymbol(getSymbolAddress(Name), JITSymbolFlags::Exported);
+  }
+
+  /// Legacy symbol lookup -- DEPRECATED! Please override
+  /// findSymbolInLogicalDylib instead.
+  ///
+  /// Default to treating all modules as separate.
+  virtual uint64_t getSymbolAddressInLogicalDylib(const std::string &Name) {
+    return 0;
+  }
+
+  /// Default to treating all modules as separate.
+  ///
+  /// By default this falls back on the legacy lookup method:
+  /// 'getSymbolAddressInLogicalDylib'. The address returned by
+  /// getSymbolAddressInLogicalDylib is treated as a strong, exported symbol,
+  /// consistent with historical treatment by RuntimeDyld.
+  ///
+  /// Clients writing custom RTDyldMemoryManagers are encouraged to override
+  /// this method and return a SymbolInfo with the flags set correctly. This is
+  /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
+  JITSymbol
+  findSymbolInLogicalDylib(const std::string &Name) override {
+    return JITSymbol(getSymbolAddressInLogicalDylib(Name),
+                          JITSymbolFlags::Exported);
+  }
+
+  /// This method returns the address of the specified function. As such it is
+  /// only useful for resolving library symbols, not code generated symbols.
+  ///
+  /// If \p AbortOnFailure is false and no function with the given name is
+  /// found, this function returns a null pointer. Otherwise, it prints a
+  /// message to stderr and aborts.
+  ///
+  /// This function is deprecated for memory managers to be used with
+  /// MCJIT or RuntimeDyld.  Use getSymbolAddress instead.
+  virtual void *getPointerToNamedFunction(const std::string &Name,
+                                          bool AbortOnFailure = true);
+
+protected:
+  struct EHFrame {
+    uint8_t *Addr;
+    size_t Size;
+  };
+  typedef std::vector<EHFrame> EHFrameInfos;
+  EHFrameInfos EHFrames;
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(
+    RTDyldMemoryManager, LLVMMCJITMemoryManagerRef)
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyld.h b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyld.h
new file mode 100644
index 0000000..14da5af
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -0,0 +1,264 @@
+//===- RuntimeDyld.h - Run-time dynamic linker for MC-JIT -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the runtime dynamic linker facilities of the MC-JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
+#define LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+namespace object {
+
+template <typename T> class OwningBinary;
+
+} // end namespace object
+
+/// Base class for errors originating in RuntimeDyld, e.g. missing relocation
+/// support.
+class RuntimeDyldError : public ErrorInfo<RuntimeDyldError> {
+public:
+  static char ID;
+
+  RuntimeDyldError(std::string ErrMsg) : ErrMsg(std::move(ErrMsg)) {}
+
+  void log(raw_ostream &OS) const override;
+  const std::string &getErrorMessage() const { return ErrMsg; }
+  std::error_code convertToErrorCode() const override;
+
+private:
+  std::string ErrMsg;
+};
+
+class RuntimeDyldCheckerImpl;
+class RuntimeDyldImpl;
+
+class RuntimeDyld {
+  friend class RuntimeDyldCheckerImpl;
+
+protected:
+  // Change the address associated with a section when resolving relocations.
+  // Any relocations already associated with the symbol will be re-resolved.
+  void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+public:
+  /// \brief Information about the loaded object.
+  class LoadedObjectInfo : public llvm::LoadedObjectInfo {
+    friend class RuntimeDyldImpl;
+
+  public:
+    using ObjSectionToIDMap = std::map<object::SectionRef, unsigned>;
+
+    LoadedObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+        : RTDyld(RTDyld), ObjSecToIDMap(std::move(ObjSecToIDMap)) {}
+
+    virtual object::OwningBinary<object::ObjectFile>
+    getObjectForDebug(const object::ObjectFile &Obj) const = 0;
+
+    uint64_t
+    getSectionLoadAddress(const object::SectionRef &Sec) const override;
+
+  protected:
+    virtual void anchor();
+
+    RuntimeDyldImpl &RTDyld;
+    ObjSectionToIDMap ObjSecToIDMap;
+  };
+
+  /// \brief Memory Management.
+  class MemoryManager {
+    friend class RuntimeDyld;
+
+  public:
+    MemoryManager() = default;
+    virtual ~MemoryManager() = default;
+
+    /// Allocate a memory block of (at least) the given size suitable for
+    /// executable code. The SectionID is a unique identifier assigned by the
+    /// RuntimeDyld instance, and optionally recorded by the memory manager to
+    /// access a loaded section.
+    virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+                                         unsigned SectionID,
+                                         StringRef SectionName) = 0;
+
+    /// Allocate a memory block of (at least) the given size suitable for data.
+    /// The SectionID is a unique identifier assigned by the JIT engine, and
+    /// optionally recorded by the memory manager to access a loaded section.
+    virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+                                         unsigned SectionID,
+                                         StringRef SectionName,
+                                         bool IsReadOnly) = 0;
+
+    /// Inform the memory manager about the total amount of memory required to
+    /// allocate all sections to be loaded:
+    /// \p CodeSize - the total size of all code sections
+    /// \p DataSizeRO - the total size of all read-only data sections
+    /// \p DataSizeRW - the total size of all read-write data sections
+    ///
+    /// Note that by default the callback is disabled. To enable it
+    /// redefine the method needsToReserveAllocationSpace to return true.
+    virtual void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+                                        uintptr_t RODataSize,
+                                        uint32_t RODataAlign,
+                                        uintptr_t RWDataSize,
+                                        uint32_t RWDataAlign) {}
+
+    /// Override to return true to enable the reserveAllocationSpace callback.
+    virtual bool needsToReserveAllocationSpace() { return false; }
+
+    /// Register the EH frames with the runtime so that c++ exceptions work.
+    ///
+    /// \p Addr parameter provides the local address of the EH frame section
+    /// data, while \p LoadAddr provides the address of the data in the target
+    /// address space.  If the section has not been remapped (which will usually
+    /// be the case for local execution) these two values will be the same.
+    virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+                                  size_t Size) = 0;
+    virtual void deregisterEHFrames() = 0;
+
+    /// This method is called when object loading is complete and section page
+    /// permissions can be applied.  It is up to the memory manager implementation
+    /// to decide whether or not to act on this method.  The memory manager will
+    /// typically allocate all sections as read-write and then apply specific
+    /// permissions when this method is called.  Code sections cannot be executed
+    /// until this function has been called.  In addition, any cache coherency
+    /// operations needed to reliably use the memory are also performed.
+    ///
+    /// Returns true if an error occurred, false otherwise.
+    virtual bool finalizeMemory(std::string *ErrMsg = nullptr) = 0;
+
+    /// This method is called after an object has been loaded into memory but
+    /// before relocations are applied to the loaded sections.
+    ///
+    /// Memory managers which are preparing code for execution in an external
+    /// address space can use this call to remap the section addresses for the
+    /// newly loaded object.
+    ///
+    /// For clients that do not need access to an ExecutionEngine instance this
+    /// method should be preferred to its cousin
+    /// MCJITMemoryManager::notifyObjectLoaded as this method is compatible with
+    /// ORC JIT stacks.
+    virtual void notifyObjectLoaded(RuntimeDyld &RTDyld,
+                                    const object::ObjectFile &Obj) {}
+
+  private:
+    virtual void anchor();
+
+    bool FinalizationLocked = false;
+  };
+
+  /// \brief Construct a RuntimeDyld instance.
+  RuntimeDyld(MemoryManager &MemMgr, JITSymbolResolver &Resolver);
+  RuntimeDyld(const RuntimeDyld &) = delete;
+  RuntimeDyld &operator=(const RuntimeDyld &) = delete;
+  ~RuntimeDyld();
+
+  /// Add the referenced object file to the list of objects to be loaded and
+  /// relocated.
+  std::unique_ptr<LoadedObjectInfo> loadObject(const object::ObjectFile &O);
+
+  /// Get the address of our local copy of the symbol. This may or may not
+  /// be the address used for relocation (clients can copy the data around
+  /// and resolve relocatons based on where they put it).
+  void *getSymbolLocalAddress(StringRef Name) const;
+
+  /// Get the target address and flags for the named symbol.
+  /// This address is the one used for relocation.
+  JITEvaluatedSymbol getSymbol(StringRef Name) const;
+
+  /// Returns a copy of the symbol table. This can be used by on-finalized
+  /// callbacks to extract the symbol table before throwing away the
+  /// RuntimeDyld instance. Because the map keys (StringRefs) are backed by
+  /// strings inside the RuntimeDyld instance, the map should be processed
+  /// before the RuntimeDyld instance is discarded.
+  std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const;
+
+  /// Resolve the relocations for all symbols we currently know about.
+  void resolveRelocations();
+
+  /// Map a section to its target address space value.
+  /// Map the address of a JIT section as returned from the memory manager
+  /// to the address in the target process as the running code will see it.
+  /// This is the address which will be used for relocation resolution.
+  void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+  /// Register any EH frame sections that have been loaded but not previously
+  /// registered with the memory manager.  Note, RuntimeDyld is responsible
+  /// for identifying the EH frame and calling the memory manager with the
+  /// EH frame section data.  However, the memory manager itself will handle
+  /// the actual target-specific EH frame registration.
+  void registerEHFrames();
+
+  void deregisterEHFrames();
+
+  bool hasError();
+  StringRef getErrorString();
+
+  /// By default, only sections that are "required for execution" are passed to
+  /// the RTDyldMemoryManager, and other sections are discarded. Passing 'true'
+  /// to this method will cause RuntimeDyld to pass all sections to its
+  /// memory manager regardless of whether they are "required to execute" in the
+  /// usual sense. This is useful for inspecting metadata sections that may not
+  /// contain relocations, E.g. Debug info, stackmaps.
+  ///
+  /// Must be called before the first object file is loaded.
+  void setProcessAllSections(bool ProcessAllSections) {
+    assert(!Dyld && "setProcessAllSections must be called before loadObject.");
+    this->ProcessAllSections = ProcessAllSections;
+  }
+
+  /// Perform all actions needed to make the code owned by this RuntimeDyld
+  /// instance executable:
+  ///
+  /// 1) Apply relocations.
+  /// 2) Register EH frames.
+  /// 3) Update memory permissions*.
+  ///
+  /// * Finalization is potentially recursive**, and the 3rd step will only be
+  ///   applied by the outermost call to finalize. This allows different
+  ///   RuntimeDyld instances to share a memory manager without the innermost
+  ///   finalization locking the memory and causing relocation fixup errors in
+  ///   outer instances.
+  ///
+  /// ** Recursive finalization occurs when one RuntimeDyld instances needs the
+  ///   address of a symbol owned by some other instance in order to apply
+  ///   relocations.
+  ///
+  void finalizeWithMemoryManagerLocking();
+
+private:
+  // RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
+  // interface.
+  std::unique_ptr<RuntimeDyldImpl> Dyld;
+  MemoryManager &MemMgr;
+  JITSymbolResolver &Resolver;
+  bool ProcessAllSections;
+  RuntimeDyldCheckerImpl *Checker;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyldChecker.h b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
new file mode 100644
index 0000000..de89f40
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
@@ -0,0 +1,112 @@
+//===---- RuntimeDyldChecker.h - RuntimeDyld tester framework -----*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+
+#include "llvm/ADT/Optional.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class StringRef;
+class MCDisassembler;
+class MemoryBuffer;
+class MCInstPrinter;
+class RuntimeDyld;
+class RuntimeDyldCheckerImpl;
+class raw_ostream;
+
+/// \brief RuntimeDyld invariant checker for verifying that RuntimeDyld has
+///        correctly applied relocations.
+///
+/// The RuntimeDyldChecker class evaluates expressions against an attached
+/// RuntimeDyld instance to verify that relocations have been applied
+/// correctly.
+///
+/// The expression language supports basic pointer arithmetic and bit-masking,
+/// and has limited disassembler integration for accessing instruction
+/// operands and the next PC (program counter) address for each instruction.
+///
+/// The language syntax is:
+///
+/// check = expr '=' expr
+///
+/// expr = binary_expr
+///      | sliceable_expr
+///
+/// sliceable_expr = '*{' number '}' load_addr_expr [slice]
+///                | '(' expr ')' [slice]
+///                | ident_expr [slice]
+///                | number [slice]
+///
+/// slice = '[' high-bit-index ':' low-bit-index ']'
+///
+/// load_addr_expr = symbol
+///                | '(' symbol '+' number ')'
+///                | '(' symbol '-' number ')'
+///
+/// ident_expr = 'decode_operand' '(' symbol ',' operand-index ')'
+///            | 'next_pc'        '(' symbol ')'
+///            | 'stub_addr' '(' file-name ',' section-name ',' symbol ')'
+///            | symbol
+///
+/// binary_expr = expr '+' expr
+///             | expr '-' expr
+///             | expr '&' expr
+///             | expr '|' expr
+///             | expr '<<' expr
+///             | expr '>>' expr
+///
+class RuntimeDyldChecker {
+public:
+  RuntimeDyldChecker(RuntimeDyld &RTDyld, MCDisassembler *Disassembler,
+                     MCInstPrinter *InstPrinter, raw_ostream &ErrStream);
+  ~RuntimeDyldChecker();
+
+  // \brief Get the associated RTDyld instance.
+  RuntimeDyld& getRTDyld();
+
+  // \brief Get the associated RTDyld instance.
+  const RuntimeDyld& getRTDyld() const;
+
+  /// \brief Check a single expression against the attached RuntimeDyld
+  ///        instance.
+  bool check(StringRef CheckExpr) const;
+
+  /// \brief Scan the given memory buffer for lines beginning with the string
+  ///        in RulePrefix. The remainder of the line is passed to the check
+  ///        method to be evaluated as an expression.
+  bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+  /// \brief Returns the address of the requested section (or an error message
+  ///        in the second element of the pair if the address cannot be found).
+  ///
+  /// if 'LocalAddress' is true, this returns the address of the section
+  /// within the linker's memory. If 'LocalAddress' is false it returns the
+  /// address within the target process (i.e. the load address).
+  std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+                                                  StringRef SectionName,
+                                                  bool LocalAddress);
+
+  /// \brief If there is a section at the given local address, return its load
+  ///        address, otherwise return none.
+  Optional<uint64_t> getSectionLoadAddress(void *LocalAddress) const;
+
+private:
+  std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/SectionMemoryManager.h b/linux-x64/clang/include/llvm/ExecutionEngine/SectionMemoryManager.h
new file mode 100644
index 0000000..d76e371
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -0,0 +1,193 @@
+//===- SectionMemoryManager.h - Memory manager for MCJIT/RtDyld -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of a section-based memory manager used by
+// the MCJIT execution engine and RuntimeDyld.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Memory.h"
+#include <cstdint>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+/// This is a simple memory manager which implements the methods called by
+/// the RuntimeDyld class to allocate memory for section-based loading of
+/// objects, usually those generated by the MCJIT execution engine.
+///
+/// This memory manager allocates all section memory as read-write.  The
+/// RuntimeDyld will copy JITed section memory into these allocated blocks
+/// and perform any necessary linking and relocations.
+///
+/// Any client using this memory manager MUST ensure that section-specific
+/// page permissions have been applied before attempting to execute functions
+/// in the JITed object.  Permissions can be applied either by calling
+/// MCJIT::finalizeObject or by calling SectionMemoryManager::finalizeMemory
+/// directly.  Clients of MCJIT should call MCJIT::finalizeObject.
+class SectionMemoryManager : public RTDyldMemoryManager {
+public:
+  /// This enum describes the various reasons to allocate pages from
+  /// allocateMappedMemory.
+  enum class AllocationPurpose {
+    Code,
+    ROData,
+    RWData,
+  };
+
+  /// Implementations of this interface are used by SectionMemoryManager to
+  /// request pages from the operating system.
+  class MemoryMapper {
+  public:
+    /// This method attempts to allocate \p NumBytes bytes of virtual memory for
+    /// \p Purpose.  \p NearBlock may point to an existing allocation, in which
+    /// case an attempt is made to allocate more memory near the existing block.
+    /// The actual allocated address is not guaranteed to be near the requested
+    /// address.  \p Flags is used to set the initial protection flags for the
+    /// block of the memory.  \p EC [out] returns an object describing any error
+    /// that occurs.
+    ///
+    /// This method may allocate more than the number of bytes requested.  The
+    /// actual number of bytes allocated is indicated in the returned
+    /// MemoryBlock.
+    ///
+    /// The start of the allocated block must be aligned with the system
+    /// allocation granularity (64K on Windows, page size on Linux).  If the
+    /// address following \p NearBlock is not so aligned, it will be rounded up
+    /// to the next allocation granularity boundary.
+    ///
+    /// \r a non-null MemoryBlock if the function was successful, otherwise a
+    /// null MemoryBlock with \p EC describing the error.
+    virtual sys::MemoryBlock
+    allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes,
+                         const sys::MemoryBlock *const NearBlock,
+                         unsigned Flags, std::error_code &EC) = 0;
+
+    /// This method sets the protection flags for a block of memory to the state
+    /// specified by \p Flags.  The behavior is not specified if the memory was
+    /// not allocated using the allocateMappedMemory method.
+    /// \p Block describes the memory block to be protected.
+    /// \p Flags specifies the new protection state to be assigned to the block.
+    ///
+    /// If \p Flags is MF_WRITE, the actual behavior varies with the operating
+    /// system (i.e. MF_READ | MF_WRITE on Windows) and the target architecture
+    /// (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+    ///
+    /// \r error_success if the function was successful, or an error_code
+    /// describing the failure if an error occurred.
+    virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+                                                unsigned Flags) = 0;
+
+    /// This method releases a block of memory that was allocated with the
+    /// allocateMappedMemory method. It should not be used to release any memory
+    /// block allocated any other way.
+    /// \p Block describes the memory to be released.
+    ///
+    /// \r error_success if the function was successful, or an error_code
+    /// describing the failure if an error occurred.
+    virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M) = 0;
+
+    virtual ~MemoryMapper();
+  };
+
+  /// Creates a SectionMemoryManager instance with \p MM as the associated
+  /// memory mapper.  If \p MM is nullptr then a default memory mapper is used
+  /// that directly calls into the operating system.
+  SectionMemoryManager(MemoryMapper *MM = nullptr);
+  SectionMemoryManager(const SectionMemoryManager &) = delete;
+  void operator=(const SectionMemoryManager &) = delete;
+  ~SectionMemoryManager() override;
+
+  /// \brief Allocates a memory block of (at least) the given size suitable for
+  /// executable code.
+  ///
+  /// The value of \p Alignment must be a power of two.  If \p Alignment is zero
+  /// a default alignment of 16 will be used.
+  uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+                               unsigned SectionID,
+                               StringRef SectionName) override;
+
+  /// \brief Allocates a memory block of (at least) the given size suitable for
+  /// executable code.
+  ///
+  /// The value of \p Alignment must be a power of two.  If \p Alignment is zero
+  /// a default alignment of 16 will be used.
+  uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+                               unsigned SectionID, StringRef SectionName,
+                               bool isReadOnly) override;
+
+  /// \brief Update section-specific memory permissions and other attributes.
+  ///
+  /// This method is called when object loading is complete and section page
+  /// permissions can be applied.  It is up to the memory manager implementation
+  /// to decide whether or not to act on this method.  The memory manager will
+  /// typically allocate all sections as read-write and then apply specific
+  /// permissions when this method is called.  Code sections cannot be executed
+  /// until this function has been called.  In addition, any cache coherency
+  /// operations needed to reliably use the memory are also performed.
+  ///
+  /// \returns true if an error occurred, false otherwise.
+  bool finalizeMemory(std::string *ErrMsg = nullptr) override;
+
+  /// \brief Invalidate instruction cache for code sections.
+  ///
+  /// Some platforms with separate data cache and instruction cache require
+  /// explicit cache flush, otherwise JIT code manipulations (like resolved
+  /// relocations) will get to the data cache but not to the instruction cache.
+  ///
+  /// This method is called from finalizeMemory.
+  virtual void invalidateInstructionCache();
+
+private:
+  struct FreeMemBlock {
+    // The actual block of free memory
+    sys::MemoryBlock Free;
+    // If there is a pending allocation from the same reservation right before
+    // this block, store it's index in PendingMem, to be able to update the
+    // pending region if part of this block is allocated, rather than having to
+    // create a new one
+    unsigned PendingPrefixIndex;
+  };
+
+  struct MemoryGroup {
+    // PendingMem contains all blocks of memory (subblocks of AllocatedMem)
+    // which have not yet had their permissions applied, but have been given
+    // out to the user. FreeMem contains all block of memory, which have
+    // neither had their permissions applied, nor been given out to the user.
+    SmallVector<sys::MemoryBlock, 16> PendingMem;
+    SmallVector<FreeMemBlock, 16> FreeMem;
+
+    // All memory blocks that have been requested from the system
+    SmallVector<sys::MemoryBlock, 16> AllocatedMem;
+
+    sys::MemoryBlock Near;
+  };
+
+  uint8_t *allocateSection(AllocationPurpose Purpose, uintptr_t Size,
+                           unsigned Alignment);
+
+  std::error_code applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+                                              unsigned Permissions);
+
+  MemoryGroup CodeMem;
+  MemoryGroup RWDataMem;
+  MemoryGroup RODataMem;
+  MemoryMapper &MMapper;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H
diff --git a/linux-x64/clang/include/llvm/FuzzMutate/FuzzerCLI.h b/linux-x64/clang/include/llvm/FuzzMutate/FuzzerCLI.h
new file mode 100644
index 0000000..3333e96
--- /dev/null
+++ b/linux-x64/clang/include/llvm/FuzzMutate/FuzzerCLI.h
@@ -0,0 +1,79 @@
+//===-- FuzzerCLI.h - Common logic for CLIs of fuzzers ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common logic needed to implement LLVM's fuzz targets' CLIs - including LLVM
+// concepts like cl::opt and libFuzzer concepts like -ignore_remaining_args=1.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_FUZZER_CLI_H
+#define LLVM_FUZZMUTATE_FUZZER_CLI_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+/// Parse cl::opts from a fuzz target commandline.
+///
+/// This handles all arguments after -ignore_remaining_args=1 as cl::opts.
+void parseFuzzerCLOpts(int ArgC, char *ArgV[]);
+
+/// Handle backend options that are encoded in the executable name.
+///
+/// Parses some common backend options out of a specially crafted executable
+/// name (argv[0]). For example, a name like llvm-foo-fuzzer--aarch64-gisel
+/// might set up an AArch64 triple and the Global ISel selector. This should be
+/// called *before* parseFuzzerCLOpts if calling both.
+///
+/// This is meant to be used for environments like OSS-Fuzz that aren't capable
+/// of passing in command line arguments in the normal way.
+void handleExecNameEncodedBEOpts(StringRef ExecName);
+
+/// Handle optimizer options which are encoded in the executable name.
+/// Same semantics as in 'handleExecNameEncodedBEOpts'.
+void handleExecNameEncodedOptimizerOpts(StringRef ExecName);
+
+using FuzzerTestFun = int (*)(const uint8_t *Data, size_t Size);
+using FuzzerInitFun = int (*)(int *argc, char ***argv);
+
+/// Runs a fuzz target on the inputs specified on the command line.
+///
+/// Useful for testing fuzz targets without linking to libFuzzer. Finds inputs
+/// in the argument list in a libFuzzer compatible way.
+int runFuzzerOnInputs(int ArgC, char *ArgV[], FuzzerTestFun TestOne,
+                      FuzzerInitFun Init = [](int *, char ***) { return 0; });
+
+/// Fuzzer friendly interface for the llvm bitcode parser.
+///
+/// \param Data Bitcode we are going to parse
+/// \param Size Size of the 'Data' in bytes
+/// \return New module or nullptr in case of error
+std::unique_ptr<Module> parseModule(const uint8_t *Data, size_t Size,
+                                    LLVMContext &Context);
+
+/// Fuzzer friendly interface for the llvm bitcode printer.
+///
+/// \param M Module to print
+/// \param Dest Location to store serialized module
+/// \param MaxSize Size of the destination buffer
+/// \return Number of bytes that were written. When module size exceeds MaxSize
+///         returns 0 and leaves Dest unchanged.
+size_t writeModule(const Module &M, uint8_t *Dest, size_t MaxSize);
+
+/// Try to parse module and verify it. May output verification errors to the
+/// errs().
+/// \return New module or nullptr in case of error.
+std::unique_ptr<Module> parseAndVerify(const uint8_t *Data, size_t Size,
+                                       LLVMContext &Context);
+
+} // end llvm namespace
+
+#endif // LLVM_FUZZMUTATE_FUZZER_CLI_H
diff --git a/linux-x64/clang/include/llvm/FuzzMutate/IRMutator.h b/linux-x64/clang/include/llvm/FuzzMutate/IRMutator.h
new file mode 100644
index 0000000..9aa9d6d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/FuzzMutate/IRMutator.h
@@ -0,0 +1,108 @@
+//===-- IRMutator.h - Mutation engine for fuzzing IR ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides the IRMutator class, which drives mutations on IR based on a
+// configurable set of strategies. Some common strategies are also included
+// here.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_IRMUTATOR_H
+#define LLVM_FUZZMUTATE_IRMUTATOR_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/FuzzMutate/OpDescriptor.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+class BasicBlock;
+class Function;
+class Instruction;
+class Module;
+
+struct RandomIRBuilder;
+
+/// Base class for describing how to mutate a module. mutation functions for
+/// each IR unit forward to the contained unit.
+class IRMutationStrategy {
+public:
+  virtual ~IRMutationStrategy() = default;
+
+  /// Provide a weight to bias towards choosing this strategy for a mutation.
+  ///
+  /// The value of the weight is arbitrary, but a good default is "the number of
+  /// distinct ways in which this strategy can mutate a unit". This can also be
+  /// used to prefer strategies that shrink the overall size of the result when
+  /// we start getting close to \c MaxSize.
+  virtual uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
+                             uint64_t CurrentWeight) = 0;
+
+  /// @{
+  /// Mutators for each IR unit. By default these forward to a contained
+  /// instance of the next smaller unit.
+  virtual void mutate(Module &M, RandomIRBuilder &IB);
+  virtual void mutate(Function &F, RandomIRBuilder &IB);
+  virtual void mutate(BasicBlock &BB, RandomIRBuilder &IB);
+  virtual void mutate(Instruction &I, RandomIRBuilder &IB) {
+    llvm_unreachable("Strategy does not implement any mutators");
+  }
+  /// @}
+};
+
+using TypeGetter = std::function<Type *(LLVMContext &)>;
+
+/// Entry point for configuring and running IR mutations.
+class IRMutator {
+  std::vector<TypeGetter> AllowedTypes;
+  std::vector<std::unique_ptr<IRMutationStrategy>> Strategies;
+
+public:
+  IRMutator(std::vector<TypeGetter> &&AllowedTypes,
+            std::vector<std::unique_ptr<IRMutationStrategy>> &&Strategies)
+      : AllowedTypes(std::move(AllowedTypes)),
+        Strategies(std::move(Strategies)) {}
+
+  void mutateModule(Module &M, int Seed, size_t CurSize, size_t MaxSize);
+};
+
+/// Strategy that injects operations into the function.
+class InjectorIRStrategy : public IRMutationStrategy {
+  std::vector<fuzzerop::OpDescriptor> Operations;
+
+  Optional<fuzzerop::OpDescriptor> chooseOperation(Value *Src,
+                                                   RandomIRBuilder &IB);
+
+public:
+  InjectorIRStrategy(std::vector<fuzzerop::OpDescriptor> &&Operations)
+      : Operations(std::move(Operations)) {}
+  static std::vector<fuzzerop::OpDescriptor> getDefaultOps();
+
+  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
+                     uint64_t CurrentWeight) override {
+    return Operations.size();
+  }
+
+  using IRMutationStrategy::mutate;
+  void mutate(Function &F, RandomIRBuilder &IB) override;
+  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;
+};
+
+class InstDeleterIRStrategy : public IRMutationStrategy {
+public:
+  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
+                     uint64_t CurrentWeight) override;
+
+  using IRMutationStrategy::mutate;
+  void mutate(Function &F, RandomIRBuilder &IB) override;
+  void mutate(Instruction &Inst, RandomIRBuilder &IB) override;
+};
+
+} // end llvm namespace
+
+#endif // LLVM_FUZZMUTATE_IRMUTATOR_H
diff --git a/linux-x64/clang/include/llvm/FuzzMutate/OpDescriptor.h b/linux-x64/clang/include/llvm/FuzzMutate/OpDescriptor.h
new file mode 100644
index 0000000..dd30fda
--- /dev/null
+++ b/linux-x64/clang/include/llvm/FuzzMutate/OpDescriptor.h
@@ -0,0 +1,223 @@
+//===-- OpDescriptor.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides the fuzzerop::Descriptor class and related tools for describing
+// operations an IR fuzzer can work with.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_OPDESCRIPTOR_H
+#define LLVM_FUZZMUTATE_OPDESCRIPTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include <functional>
+
+namespace llvm {
+namespace fuzzerop {
+
+/// @{
+/// Populate a small list of potentially interesting constants of a given type.
+void makeConstantsWithType(Type *T, std::vector<Constant *> &Cs);
+std::vector<Constant *> makeConstantsWithType(Type *T);
+/// @}
+
+/// A matcher/generator for finding suitable values for the next source in an
+/// operation's partially completed argument list.
+///
+/// Given that we're building some operation X and may have already filled some
+/// subset of its operands, this predicate determines if some value New is
+/// suitable for the next operand or generates a set of values that are
+/// suitable.
+class SourcePred {
+public:
+  /// Given a list of already selected operands, returns whether a given new
+  /// operand is suitable for the next operand.
+  using PredT = std::function<bool(ArrayRef<Value *> Cur, const Value *New)>;
+  /// Given a list of already selected operands and a set of valid base types
+  /// for a fuzzer, generates a list of constants that could be used for the
+  /// next operand.
+  using MakeT = std::function<std::vector<Constant *>(
+      ArrayRef<Value *> Cur, ArrayRef<Type *> BaseTypes)>;
+
+private:
+  PredT Pred;
+  MakeT Make;
+
+public:
+  /// Create a fully general source predicate.
+  SourcePred(PredT Pred, MakeT Make) : Pred(Pred), Make(Make) {}
+  SourcePred(PredT Pred, NoneType) : Pred(Pred) {
+    Make = [Pred](ArrayRef<Value *> Cur, ArrayRef<Type *> BaseTypes) {
+      // Default filter just calls Pred on each of the base types.
+      std::vector<Constant *> Result;
+      for (Type *T : BaseTypes) {
+        Constant *V = UndefValue::get(T);
+        if (Pred(Cur, V))
+          makeConstantsWithType(T, Result);
+      }
+      if (Result.empty())
+        report_fatal_error("Predicate does not match for base types");
+      return Result;
+    };
+  }
+
+  /// Returns true if \c New is compatible for the argument after \c Cur
+  bool matches(ArrayRef<Value *> Cur, const Value *New) {
+    return Pred(Cur, New);
+  }
+
+  /// Generates a list of potential values for the argument after \c Cur.
+  std::vector<Constant *> generate(ArrayRef<Value *> Cur,
+                                   ArrayRef<Type *> BaseTypes) {
+    return Make(Cur, BaseTypes);
+  }
+};
+
+/// A description of some operation we can build while fuzzing IR.
+struct OpDescriptor {
+  unsigned Weight;
+  SmallVector<SourcePred, 2> SourcePreds;
+  std::function<Value *(ArrayRef<Value *>, Instruction *)> BuilderFunc;
+};
+
+static inline SourcePred onlyType(Type *Only) {
+  auto Pred = [Only](ArrayRef<Value *>, const Value *V) {
+    return V->getType() == Only;
+  };
+  auto Make = [Only](ArrayRef<Value *>, ArrayRef<Type *>) {
+    return makeConstantsWithType(Only);
+  };
+  return {Pred, Make};
+}
+
+static inline SourcePred anyType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    return !V->getType()->isVoidTy();
+  };
+  auto Make = None;
+  return {Pred, Make};
+}
+
+static inline SourcePred anyIntType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    return V->getType()->isIntegerTy();
+  };
+  auto Make = None;
+  return {Pred, Make};
+}
+
+static inline SourcePred anyFloatType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    return V->getType()->isFloatingPointTy();
+  };
+  auto Make = None;
+  return {Pred, Make};
+}
+
+static inline SourcePred anyPtrType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    return V->getType()->isPointerTy() && !V->isSwiftError();
+  };
+  auto Make = [](ArrayRef<Value *>, ArrayRef<Type *> Ts) {
+    std::vector<Constant *> Result;
+    // TODO: Should these point at something?
+    for (Type *T : Ts)
+      Result.push_back(UndefValue::get(PointerType::getUnqual(T)));
+    return Result;
+  };
+  return {Pred, Make};
+}
+
+static inline SourcePred sizedPtrType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    if (V->isSwiftError())
+      return false;
+
+    if (const auto *PtrT = dyn_cast<PointerType>(V->getType()))
+      return PtrT->getElementType()->isSized();
+    return false;
+  };
+  auto Make = [](ArrayRef<Value *>, ArrayRef<Type *> Ts) {
+    std::vector<Constant *> Result;
+
+    for (Type *T : Ts)
+      if (T->isSized())
+        Result.push_back(UndefValue::get(PointerType::getUnqual(T)));
+
+    return Result;
+  };
+  return {Pred, Make};
+}
+
+static inline SourcePred anyAggregateType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    // We can't index zero sized arrays.
+    if (isa<ArrayType>(V->getType()))
+      return V->getType()->getArrayNumElements() > 0;
+
+    // Structs can also be zero sized. I.e opaque types.
+    if (isa<StructType>(V->getType()))
+      return V->getType()->getStructNumElements() > 0;
+
+    return V->getType()->isAggregateType();
+  };
+  // TODO: For now we only find aggregates in BaseTypes. It might be better to
+  // manufacture them out of the base types in some cases.
+  auto Find = None;
+  return {Pred, Find};
+}
+
+static inline SourcePred anyVectorType() {
+  auto Pred = [](ArrayRef<Value *>, const Value *V) {
+    return V->getType()->isVectorTy();
+  };
+  // TODO: For now we only find vectors in BaseTypes. It might be better to
+  // manufacture vectors out of the base types, but it's tricky to be sure
+  // that's actually a reasonable type.
+  auto Make = None;
+  return {Pred, Make};
+}
+
+/// Match values that have the same type as the first source.
+static inline SourcePred matchFirstType() {
+  auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
+    assert(!Cur.empty() && "No first source yet");
+    return V->getType() == Cur[0]->getType();
+  };
+  auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *>) {
+    assert(!Cur.empty() && "No first source yet");
+    return makeConstantsWithType(Cur[0]->getType());
+  };
+  return {Pred, Make};
+}
+
+/// Match values that have the first source's scalar type.
+static inline SourcePred matchScalarOfFirstType() {
+  auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
+    assert(!Cur.empty() && "No first source yet");
+    return V->getType() == Cur[0]->getType()->getScalarType();
+  };
+  auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *>) {
+    assert(!Cur.empty() && "No first source yet");
+    return makeConstantsWithType(Cur[0]->getType()->getScalarType());
+  };
+  return {Pred, Make};
+}
+
+} // end fuzzerop namespace
+} // end llvm namespace
+
+#endif // LLVM_FUZZMUTATE_OPDESCRIPTOR_H
diff --git a/linux-x64/clang/include/llvm/FuzzMutate/Operations.h b/linux-x64/clang/include/llvm/FuzzMutate/Operations.h
new file mode 100644
index 0000000..668bd95
--- /dev/null
+++ b/linux-x64/clang/include/llvm/FuzzMutate/Operations.h
@@ -0,0 +1,54 @@
+//===-- Operations.h - ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of common fuzzer operation descriptors for building an IR
+// mutator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_OPERATIONS_H
+#define LLVM_FUZZMUTATE_OPERATIONS_H
+
+#include "llvm/FuzzMutate/OpDescriptor.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+
+/// Getters for the default sets of operations, per general category.
+/// @{
+void describeFuzzerIntOps(std::vector<fuzzerop::OpDescriptor> &Ops);
+void describeFuzzerFloatOps(std::vector<fuzzerop::OpDescriptor> &Ops);
+void describeFuzzerControlFlowOps(std::vector<fuzzerop::OpDescriptor> &Ops);
+void describeFuzzerPointerOps(std::vector<fuzzerop::OpDescriptor> &Ops);
+void describeFuzzerAggregateOps(std::vector<fuzzerop::OpDescriptor> &Ops);
+void describeFuzzerVectorOps(std::vector<fuzzerop::OpDescriptor> &Ops);
+/// @}
+
+namespace fuzzerop {
+
+/// Descriptors for individual operations.
+/// @{
+OpDescriptor binOpDescriptor(unsigned Weight, Instruction::BinaryOps Op);
+OpDescriptor cmpOpDescriptor(unsigned Weight, Instruction::OtherOps CmpOp,
+                             CmpInst::Predicate Pred);
+OpDescriptor splitBlockDescriptor(unsigned Weight);
+OpDescriptor gepDescriptor(unsigned Weight);
+OpDescriptor extractValueDescriptor(unsigned Weight);
+OpDescriptor insertValueDescriptor(unsigned Weight);
+OpDescriptor extractElementDescriptor(unsigned Weight);
+OpDescriptor insertElementDescriptor(unsigned Weight);
+OpDescriptor shuffleVectorDescriptor(unsigned Weight);
+/// @}
+
+} // end fuzzerop namespace
+
+} // end llvm namespace
+
+#endif // LLVM_FUZZMUTATE_OPERATIONS_H
diff --git a/linux-x64/clang/include/llvm/FuzzMutate/Random.h b/linux-x64/clang/include/llvm/FuzzMutate/Random.h
new file mode 100644
index 0000000..3a5f46a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/FuzzMutate/Random.h
@@ -0,0 +1,97 @@
+//===--- Random.h - Utilities for random sampling -------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities for random sampling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_RANDOM_H
+#define LLVM_FUZZMUTATE_RANDOM_H
+
+#include <random>
+#include "llvm/Support/raw_ostream.h"
+namespace llvm {
+
+/// Return a uniformly distributed random value between \c Min and \c Max
+template <typename T, typename GenT> T uniform(GenT &Gen, T Min, T Max) {
+  return std::uniform_int_distribution<T>(Min, Max)(Gen);
+}
+
+/// Return a uniformly distributed random value of type \c T
+template <typename T, typename GenT> T uniform(GenT &Gen) {
+  return uniform<T>(Gen, std::numeric_limits<T>::min(),
+                    std::numeric_limits<T>::max());
+}
+
+/// Randomly selects an item by sampling into a set with an unknown number of
+/// elements, which may each be weighted to be more likely choices.
+template <typename T, typename GenT> class ReservoirSampler {
+  GenT &RandGen;
+  typename std::remove_const<T>::type Selection = {};
+  uint64_t TotalWeight = 0;
+
+public:
+  ReservoirSampler(GenT &RandGen) : RandGen(RandGen) {}
+
+  uint64_t totalWeight() const { return TotalWeight; }
+  bool isEmpty() const { return TotalWeight == 0; }
+
+  const T &getSelection() const {
+    assert(!isEmpty() && "Nothing selected");
+    return Selection;
+  }
+
+  explicit operator bool() const { return !isEmpty();}
+  const T &operator*() const { return getSelection(); }
+
+  /// Sample each item in \c Items with unit weight
+  template <typename RangeT> ReservoirSampler &sample(RangeT &&Items) {
+    for (auto &I : Items)
+      sample(I, 1);
+    return *this;
+  }
+
+  /// Sample a single item with the given weight.
+  ReservoirSampler &sample(const T &Item, uint64_t Weight) {
+    if (!Weight)
+      // If the weight is zero, do nothing.
+      return *this;
+    TotalWeight += Weight;
+    // Consider switching from the current element to this one.
+    if (uniform<uint64_t>(RandGen, 1, TotalWeight) <= Weight)
+      Selection = Item;
+    return *this;
+  }
+};
+
+template <typename GenT, typename RangeT,
+          typename ElT = typename std::remove_reference<
+              decltype(*std::begin(std::declval<RangeT>()))>::type>
+ReservoirSampler<ElT, GenT> makeSampler(GenT &RandGen, RangeT &&Items) {
+  ReservoirSampler<ElT, GenT> RS(RandGen);
+  RS.sample(Items);
+  return RS;
+}
+
+template <typename GenT, typename T>
+ReservoirSampler<T, GenT> makeSampler(GenT &RandGen, const T &Item,
+                                      uint64_t Weight) {
+  ReservoirSampler<T, GenT> RS(RandGen);
+  RS.sample(Item, Weight);
+  return RS;
+}
+
+template <typename T, typename GenT>
+ReservoirSampler<T, GenT> makeSampler(GenT &RandGen) {
+  return ReservoirSampler<T, GenT>(RandGen);
+}
+
+} // End llvm namespace
+
+#endif // LLVM_FUZZMUTATE_RANDOM_H
diff --git a/linux-x64/clang/include/llvm/FuzzMutate/RandomIRBuilder.h b/linux-x64/clang/include/llvm/FuzzMutate/RandomIRBuilder.h
new file mode 100644
index 0000000..5cf3f0b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/FuzzMutate/RandomIRBuilder.h
@@ -0,0 +1,62 @@
+//===-- Mutator.h - Utils for randomly mutation IR --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides the Mutator class, which is used to mutate IR for fuzzing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_RANDOMIRBUILDER_H
+#define LLVM_FUZZMUTATE_RANDOMIRBUILDER_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/FuzzMutate/IRMutator.h"
+#include "llvm/FuzzMutate/Random.h"
+#include <random>
+
+namespace llvm {
+
+using RandomEngine = std::mt19937;
+
+struct RandomIRBuilder {
+  RandomEngine Rand;
+  SmallVector<Type *, 16> KnownTypes;
+
+  RandomIRBuilder(int Seed, ArrayRef<Type *> AllowedTypes)
+      : Rand(Seed), KnownTypes(AllowedTypes.begin(), AllowedTypes.end()) {}
+
+  // TODO: Try to make this a bit less of a random mishmash of functions.
+
+  /// Find a "source" for some operation, which will be used in one of the
+  /// operation's operands. This either selects an instruction in \c Insts or
+  /// returns some new arbitrary Value.
+  Value *findOrCreateSource(BasicBlock &BB, ArrayRef<Instruction *> Insts);
+  /// Find a "source" for some operation, which will be used in one of the
+  /// operation's operands. This either selects an instruction in \c Insts that
+  /// matches \c Pred, or returns some new Value that matches \c Pred. The
+  /// values in \c Srcs should be source operands that have already been
+  /// selected.
+  Value *findOrCreateSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
+                            ArrayRef<Value *> Srcs, fuzzerop::SourcePred Pred);
+  /// Create some Value suitable as a source for some operation.
+  Value *newSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
+                   ArrayRef<Value *> Srcs, fuzzerop::SourcePred Pred);
+  /// Find a viable user for \c V in \c Insts, which should all be contained in
+  /// \c BB. This may also create some new instruction in \c BB and use that.
+  void connectToSink(BasicBlock &BB, ArrayRef<Instruction *> Insts, Value *V);
+  /// Create a user for \c V in \c BB.
+  void newSink(BasicBlock &BB, ArrayRef<Instruction *> Insts, Value *V);
+  Value *findPointer(BasicBlock &BB, ArrayRef<Instruction *> Insts,
+                     ArrayRef<Value *> Srcs, fuzzerop::SourcePred Pred);
+  Type *chooseType(LLVMContext &Context, ArrayRef<Value *> Srcs,
+                   fuzzerop::SourcePred Pred);
+};
+
+} // end llvm namespace
+
+#endif // LLVM_FUZZMUTATE_RANDOMIRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/IR/Argument.h b/linux-x64/clang/include/llvm/IR/Argument.h
new file mode 100644
index 0000000..497dca4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Argument.h
@@ -0,0 +1,130 @@
+//===-- llvm/Argument.h - Definition of the Argument class ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Argument class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ARGUMENT_H
+#define LLVM_IR_ARGUMENT_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Value.h"
+
+namespace llvm {
+
+/// This class represents an incoming formal argument to a Function. A formal
+/// argument, since it is ``formal'', does not contain an actual value but
+/// instead represents the type, argument number, and attributes of an argument
+/// for a specific function. When used in the body of said function, the
+/// argument of course represents the value of the actual argument that the
+/// function was called with.
+class Argument final : public Value {
+  Function *Parent;
+  unsigned ArgNo;
+
+  friend class Function;
+  void setParent(Function *parent);
+
+public:
+  /// Argument constructor.
+  explicit Argument(Type *Ty, const Twine &Name = "", Function *F = nullptr,
+                    unsigned ArgNo = 0);
+
+  inline const Function *getParent() const { return Parent; }
+  inline       Function *getParent()       { return Parent; }
+
+  /// Return the index of this formal argument in its containing function.
+  ///
+  /// For example in "void foo(int a, float b)" a is 0 and b is 1.
+  unsigned getArgNo() const {
+    assert(Parent && "can't get number of unparented arg");
+    return ArgNo;
+  }
+
+  /// Return true if this argument has the nonnull attribute. Also returns true
+  /// if at least one byte is known to be dereferenceable and the pointer is in
+  /// addrspace(0).
+  bool hasNonNullAttr() const;
+
+  /// If this argument has the dereferenceable attribute, return the number of
+  /// bytes known to be dereferenceable. Otherwise, zero is returned.
+  uint64_t getDereferenceableBytes() const;
+
+  /// If this argument has the dereferenceable_or_null attribute, return the
+  /// number of bytes known to be dereferenceable. Otherwise, zero is returned.
+  uint64_t getDereferenceableOrNullBytes() const;
+
+  /// Return true if this argument has the byval attribute.
+  bool hasByValAttr() const;
+
+  /// Return true if this argument has the swiftself attribute.
+  bool hasSwiftSelfAttr() const;
+
+  /// Return true if this argument has the swifterror attribute.
+  bool hasSwiftErrorAttr() const;
+
+  /// Return true if this argument has the byval attribute or inalloca
+  /// attribute. These attributes represent arguments being passed by value.
+  bool hasByValOrInAllocaAttr() const;
+
+  /// If this is a byval or inalloca argument, return its alignment.
+  unsigned getParamAlignment() const;
+
+  /// Return true if this argument has the nest attribute.
+  bool hasNestAttr() const;
+
+  /// Return true if this argument has the noalias attribute.
+  bool hasNoAliasAttr() const;
+
+  /// Return true if this argument has the nocapture attribute.
+  bool hasNoCaptureAttr() const;
+
+  /// Return true if this argument has the sret attribute.
+  bool hasStructRetAttr() const;
+
+  /// Return true if this argument has the returned attribute.
+  bool hasReturnedAttr() const;
+
+  /// Return true if this argument has the readonly or readnone attribute.
+  bool onlyReadsMemory() const;
+
+  /// Return true if this argument has the inalloca attribute.
+  bool hasInAllocaAttr() const;
+
+  /// Return true if this argument has the zext attribute.
+  bool hasZExtAttr() const;
+
+  /// Return true if this argument has the sext attribute.
+  bool hasSExtAttr() const;
+
+  /// Add attributes to an argument.
+  void addAttrs(AttrBuilder &B);
+
+  void addAttr(Attribute::AttrKind Kind);
+
+  void addAttr(Attribute Attr);
+
+  /// Remove attributes from an argument.
+  void removeAttr(Attribute::AttrKind Kind);
+
+  /// Check if an argument has a given attribute.
+  bool hasAttribute(Attribute::AttrKind Kind) const;
+
+  /// Method for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Value *V) {
+    return V->getValueID() == ArgumentVal;
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/AssemblyAnnotationWriter.h b/linux-x64/clang/include/llvm/IR/AssemblyAnnotationWriter.h
new file mode 100644
index 0000000..6e1f5c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/AssemblyAnnotationWriter.h
@@ -0,0 +1,62 @@
+//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Clients of the assembly writer can use this interface to add their own
+// special-purpose annotations to LLVM assembly language printouts.  Note that
+// the assembly parser won't be able to parse these, in general, so
+// implementations are advised to print stuff as LLVM comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
+#define LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
+
+namespace llvm {
+
+class Function;
+class BasicBlock;
+class Instruction;
+class Value;
+class formatted_raw_ostream;
+
+class AssemblyAnnotationWriter {
+public:
+  virtual ~AssemblyAnnotationWriter();
+
+  /// emitFunctionAnnot - This may be implemented to emit a string right before
+  /// the start of a function.
+  virtual void emitFunctionAnnot(const Function *,
+                                 formatted_raw_ostream &) {}
+
+  /// emitBasicBlockStartAnnot - This may be implemented to emit a string right
+  /// after the basic block label, but before the first instruction in the
+  /// block.
+  virtual void emitBasicBlockStartAnnot(const BasicBlock *,
+                                        formatted_raw_ostream &) {
+  }
+
+  /// emitBasicBlockEndAnnot - This may be implemented to emit a string right
+  /// after the basic block.
+  virtual void emitBasicBlockEndAnnot(const BasicBlock *,
+                                      formatted_raw_ostream &) {
+  }
+
+  /// emitInstructionAnnot - This may be implemented to emit a string right
+  /// before an instruction is emitted.
+  virtual void emitInstructionAnnot(const Instruction *,
+                                    formatted_raw_ostream &) {}
+
+  /// printInfoComment - This may be implemented to emit a comment to the
+  /// right of an instruction or global value.
+  virtual void printInfoComment(const Value &, formatted_raw_ostream &) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Attributes.gen b/linux-x64/clang/include/llvm/IR/Attributes.gen
new file mode 100644
index 0000000..b1fb805
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Attributes.gen
@@ -0,0 +1,502 @@
+#ifdef GET_ATTR_ENUM
+#undef GET_ATTR_ENUM
+Alignment,
+AllocSize,
+AlwaysInline,
+ArgMemOnly,
+Builtin,
+ByVal,
+Cold,
+Convergent,
+Dereferenceable,
+DereferenceableOrNull,
+InAlloca,
+InReg,
+InaccessibleMemOnly,
+InaccessibleMemOrArgMemOnly,
+InlineHint,
+JumpTable,
+MinSize,
+Naked,
+Nest,
+NoAlias,
+NoBuiltin,
+NoCapture,
+NoCfCheck,
+NoDuplicate,
+NoImplicitFloat,
+NoInline,
+NoRecurse,
+NoRedZone,
+NoReturn,
+NoUnwind,
+NonLazyBind,
+NonNull,
+OptForFuzzing,
+OptimizeForSize,
+OptimizeNone,
+ReadNone,
+ReadOnly,
+Returned,
+ReturnsTwice,
+SExt,
+SafeStack,
+SanitizeAddress,
+SanitizeHWAddress,
+SanitizeMemory,
+SanitizeThread,
+Speculatable,
+StackAlignment,
+StackProtect,
+StackProtectReq,
+StackProtectStrong,
+StrictFP,
+StructRet,
+SwiftError,
+SwiftSelf,
+UWTable,
+WriteOnly,
+ZExt,
+#endif
+#ifdef GET_ATTR_KIND_FROM_NAME
+#undef GET_ATTR_KIND_FROM_NAME
+static Attribute::AttrKind getAttrKindFromName(StringRef AttrName) {
+  return StringSwitch<Attribute::AttrKind>(AttrName)
+    .Case("align", Attribute::Alignment)
+    .Case("allocsize", Attribute::AllocSize)
+    .Case("alwaysinline", Attribute::AlwaysInline)
+    .Case("argmemonly", Attribute::ArgMemOnly)
+    .Case("builtin", Attribute::Builtin)
+    .Case("byval", Attribute::ByVal)
+    .Case("cold", Attribute::Cold)
+    .Case("convergent", Attribute::Convergent)
+    .Case("dereferenceable", Attribute::Dereferenceable)
+    .Case("dereferenceable_or_null", Attribute::DereferenceableOrNull)
+    .Case("inalloca", Attribute::InAlloca)
+    .Case("inreg", Attribute::InReg)
+    .Case("inaccessiblememonly", Attribute::InaccessibleMemOnly)
+    .Case("inaccessiblemem_or_argmemonly", Attribute::InaccessibleMemOrArgMemOnly)
+    .Case("inlinehint", Attribute::InlineHint)
+    .Case("jumptable", Attribute::JumpTable)
+    .Case("minsize", Attribute::MinSize)
+    .Case("naked", Attribute::Naked)
+    .Case("nest", Attribute::Nest)
+    .Case("noalias", Attribute::NoAlias)
+    .Case("nobuiltin", Attribute::NoBuiltin)
+    .Case("nocapture", Attribute::NoCapture)
+    .Case("nocf_check", Attribute::NoCfCheck)
+    .Case("noduplicate", Attribute::NoDuplicate)
+    .Case("noimplicitfloat", Attribute::NoImplicitFloat)
+    .Case("noinline", Attribute::NoInline)
+    .Case("norecurse", Attribute::NoRecurse)
+    .Case("noredzone", Attribute::NoRedZone)
+    .Case("noreturn", Attribute::NoReturn)
+    .Case("nounwind", Attribute::NoUnwind)
+    .Case("nonlazybind", Attribute::NonLazyBind)
+    .Case("nonnull", Attribute::NonNull)
+    .Case("optforfuzzing", Attribute::OptForFuzzing)
+    .Case("optsize", Attribute::OptimizeForSize)
+    .Case("optnone", Attribute::OptimizeNone)
+    .Case("readnone", Attribute::ReadNone)
+    .Case("readonly", Attribute::ReadOnly)
+    .Case("returned", Attribute::Returned)
+    .Case("returns_twice", Attribute::ReturnsTwice)
+    .Case("signext", Attribute::SExt)
+    .Case("safestack", Attribute::SafeStack)
+    .Case("sanitize_address", Attribute::SanitizeAddress)
+    .Case("sanitize_hwaddress", Attribute::SanitizeHWAddress)
+    .Case("sanitize_memory", Attribute::SanitizeMemory)
+    .Case("sanitize_thread", Attribute::SanitizeThread)
+    .Case("speculatable", Attribute::Speculatable)
+    .Case("alignstack", Attribute::StackAlignment)
+    .Case("ssp", Attribute::StackProtect)
+    .Case("sspreq", Attribute::StackProtectReq)
+    .Case("sspstrong", Attribute::StackProtectStrong)
+    .Case("strictfp", Attribute::StrictFP)
+    .Case("sret", Attribute::StructRet)
+    .Case("swifterror", Attribute::SwiftError)
+    .Case("swiftself", Attribute::SwiftSelf)
+    .Case("uwtable", Attribute::UWTable)
+    .Case("writeonly", Attribute::WriteOnly)
+    .Case("zeroext", Attribute::ZExt)
+    .Default(Attribute::None);
+}
+
+#endif
+#ifdef GET_ATTR_COMPAT_FUNC
+#undef GET_ATTR_COMPAT_FUNC
+struct EnumAttr {
+  static bool isSet(const Function &Fn,
+                    Attribute::AttrKind Kind) {
+    return Fn.hasFnAttribute(Kind);
+  }
+
+  static void set(Function &Fn,
+                  Attribute::AttrKind Kind, bool Val) {
+    if (Val)
+      Fn.addFnAttr(Kind);
+    else
+      Fn.removeFnAttr(Kind);
+  }
+};
+
+struct StrBoolAttr {
+  static bool isSet(const Function &Fn,
+                    StringRef Kind) {
+    auto A = Fn.getFnAttribute(Kind);
+    return A.getValueAsString().equals("true");
+  }
+
+  static void set(Function &Fn,
+                  StringRef Kind, bool Val) {
+    Fn.addFnAttr(Kind, Val ? "true" : "false");
+  }
+};
+
+// EnumAttr classes
+struct AlignmentAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Alignment;
+  }
+};
+struct AllocSizeAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::AllocSize;
+  }
+};
+struct AlwaysInlineAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::AlwaysInline;
+  }
+};
+struct ArgMemOnlyAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::ArgMemOnly;
+  }
+};
+struct BuiltinAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Builtin;
+  }
+};
+struct ByValAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::ByVal;
+  }
+};
+struct ColdAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Cold;
+  }
+};
+struct ConvergentAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Convergent;
+  }
+};
+struct DereferenceableAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Dereferenceable;
+  }
+};
+struct DereferenceableOrNullAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::DereferenceableOrNull;
+  }
+};
+struct InAllocaAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::InAlloca;
+  }
+};
+struct InRegAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::InReg;
+  }
+};
+struct InaccessibleMemOnlyAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::InaccessibleMemOnly;
+  }
+};
+struct InaccessibleMemOrArgMemOnlyAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::InaccessibleMemOrArgMemOnly;
+  }
+};
+struct InlineHintAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::InlineHint;
+  }
+};
+struct JumpTableAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::JumpTable;
+  }
+};
+struct MinSizeAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::MinSize;
+  }
+};
+struct NakedAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Naked;
+  }
+};
+struct NestAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Nest;
+  }
+};
+struct NoAliasAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoAlias;
+  }
+};
+struct NoBuiltinAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoBuiltin;
+  }
+};
+struct NoCaptureAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoCapture;
+  }
+};
+struct NoCfCheckAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoCfCheck;
+  }
+};
+struct NoDuplicateAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoDuplicate;
+  }
+};
+struct NoImplicitFloatAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoImplicitFloat;
+  }
+};
+struct NoInlineAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoInline;
+  }
+};
+struct NoRecurseAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoRecurse;
+  }
+};
+struct NoRedZoneAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoRedZone;
+  }
+};
+struct NoReturnAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoReturn;
+  }
+};
+struct NoUnwindAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NoUnwind;
+  }
+};
+struct NonLazyBindAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NonLazyBind;
+  }
+};
+struct NonNullAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::NonNull;
+  }
+};
+struct OptForFuzzingAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::OptForFuzzing;
+  }
+};
+struct OptimizeForSizeAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::OptimizeForSize;
+  }
+};
+struct OptimizeNoneAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::OptimizeNone;
+  }
+};
+struct ReadNoneAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::ReadNone;
+  }
+};
+struct ReadOnlyAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::ReadOnly;
+  }
+};
+struct ReturnedAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Returned;
+  }
+};
+struct ReturnsTwiceAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::ReturnsTwice;
+  }
+};
+struct SExtAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SExt;
+  }
+};
+struct SafeStackAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SafeStack;
+  }
+};
+struct SanitizeAddressAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SanitizeAddress;
+  }
+};
+struct SanitizeHWAddressAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SanitizeHWAddress;
+  }
+};
+struct SanitizeMemoryAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SanitizeMemory;
+  }
+};
+struct SanitizeThreadAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SanitizeThread;
+  }
+};
+struct SpeculatableAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::Speculatable;
+  }
+};
+struct StackAlignmentAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::StackAlignment;
+  }
+};
+struct StackProtectAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::StackProtect;
+  }
+};
+struct StackProtectReqAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::StackProtectReq;
+  }
+};
+struct StackProtectStrongAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::StackProtectStrong;
+  }
+};
+struct StrictFPAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::StrictFP;
+  }
+};
+struct StructRetAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::StructRet;
+  }
+};
+struct SwiftErrorAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SwiftError;
+  }
+};
+struct SwiftSelfAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::SwiftSelf;
+  }
+};
+struct UWTableAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::UWTable;
+  }
+};
+struct WriteOnlyAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::WriteOnly;
+  }
+};
+struct ZExtAttr : EnumAttr {
+  static enum Attribute::AttrKind getKind() {
+    return llvm::Attribute::ZExt;
+  }
+};
+
+// StrBoolAttr classes
+struct LessPreciseFPMADAttr : StrBoolAttr {
+  static StringRef getKind() {
+    return "less-precise-fpmad";
+  }
+};
+struct NoInfsFPMathAttr : StrBoolAttr {
+  static StringRef getKind() {
+    return "no-infs-fp-math";
+  }
+};
+struct NoJumpTablesAttr : StrBoolAttr {
+  static StringRef getKind() {
+    return "no-jump-tables";
+  }
+};
+struct NoNansFPMathAttr : StrBoolAttr {
+  static StringRef getKind() {
+    return "no-nans-fp-math";
+  }
+};
+struct ProfileSampleAccurateAttr : StrBoolAttr {
+  static StringRef getKind() {
+    return "profile-sample-accurate";
+  }
+};
+struct UnsafeFPMathAttr : StrBoolAttr {
+  static StringRef getKind() {
+    return "unsafe-fp-math";
+  }
+};
+
+static inline bool hasCompatibleFnAttrs(const Function &Caller,
+                                        const Function &Callee) {
+  bool Ret = true;
+
+  Ret &= isEqual<SanitizeAddressAttr>(Caller, Callee);
+  Ret &= isEqual<SanitizeThreadAttr>(Caller, Callee);
+  Ret &= isEqual<SanitizeMemoryAttr>(Caller, Callee);
+  Ret &= isEqual<SanitizeHWAddressAttr>(Caller, Callee);
+  Ret &= isEqual<SafeStackAttr>(Caller, Callee);
+
+  return Ret;
+}
+
+static inline void mergeFnAttrs(Function &Caller,
+                                const Function &Callee) {
+  setOR<NoJumpTablesAttr>(Caller, Callee);
+  setOR<ProfileSampleAccurateAttr>(Caller, Callee);
+  adjustCallerSSPLevel(Caller, Callee);
+  adjustCallerStackProbes(Caller, Callee);
+  adjustCallerStackProbeSize(Caller, Callee);
+  setAND<LessPreciseFPMADAttr>(Caller, Callee);
+  setAND<NoInfsFPMathAttr>(Caller, Callee);
+  setAND<NoNansFPMathAttr>(Caller, Callee);
+  setAND<UnsafeFPMathAttr>(Caller, Callee);
+  setOR<NoImplicitFloatAttr>(Caller, Callee);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Attributes.h b/linux-x64/clang/include/llvm/IR/Attributes.h
new file mode 100644
index 0000000..660fc58
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Attributes.h
@@ -0,0 +1,839 @@
+//===- llvm/Attributes.h - Container for Attributes -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief This file contains the simple types necessary to represent the
+/// attributes associated with functions and their calls.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ATTRIBUTES_H
+#define LLVM_IR_ATTRIBUTES_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <bitset>
+#include <cassert>
+#include <cstdint>
+#include <map>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class AttrBuilder;
+class AttributeImpl;
+class AttributeListImpl;
+class AttributeSetNode;
+template<typename T> struct DenseMapInfo;
+class Function;
+class LLVMContext;
+class Type;
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief Functions, function parameters, and return types can have attributes
+/// to indicate how they should be treated by optimizations and code
+/// generation. This class represents one of those attributes. It's light-weight
+/// and should be passed around by-value.
+class Attribute {
+public:
+  /// This enumeration lists the attributes that can be associated with
+  /// parameters, function results, or the function itself.
+  ///
+  /// Note: The `uwtable' attribute is about the ABI or the user mandating an
+  /// entry in the unwind table. The `nounwind' attribute is about an exception
+  /// passing by the function.
+  ///
+  /// In a theoretical system that uses tables for profiling and SjLj for
+  /// exceptions, they would be fully independent. In a normal system that uses
+  /// tables for both, the semantics are:
+  ///
+  /// nil                = Needs an entry because an exception might pass by.
+  /// nounwind           = No need for an entry
+  /// uwtable            = Needs an entry because the ABI says so and because
+  ///                      an exception might pass by.
+  /// uwtable + nounwind = Needs an entry because the ABI says so.
+
+  enum AttrKind {
+    // IR-Level Attributes
+    None,                  ///< No attributes have been set
+    #define GET_ATTR_ENUM
+    #include "llvm/IR/Attributes.gen"
+    EndAttrKinds           ///< Sentinal value useful for loops
+  };
+
+private:
+  AttributeImpl *pImpl = nullptr;
+
+  Attribute(AttributeImpl *A) : pImpl(A) {}
+
+public:
+  Attribute() = default;
+
+  //===--------------------------------------------------------------------===//
+  // Attribute Construction
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Return a uniquified Attribute object.
+  static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val = 0);
+  static Attribute get(LLVMContext &Context, StringRef Kind,
+                       StringRef Val = StringRef());
+
+  /// \brief Return a uniquified Attribute object that has the specific
+  /// alignment set.
+  static Attribute getWithAlignment(LLVMContext &Context, uint64_t Align);
+  static Attribute getWithStackAlignment(LLVMContext &Context, uint64_t Align);
+  static Attribute getWithDereferenceableBytes(LLVMContext &Context,
+                                              uint64_t Bytes);
+  static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context,
+                                                     uint64_t Bytes);
+  static Attribute getWithAllocSizeArgs(LLVMContext &Context,
+                                        unsigned ElemSizeArg,
+                                        const Optional<unsigned> &NumElemsArg);
+
+  //===--------------------------------------------------------------------===//
+  // Attribute Accessors
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Return true if the attribute is an Attribute::AttrKind type.
+  bool isEnumAttribute() const;
+
+  /// \brief Return true if the attribute is an integer attribute.
+  bool isIntAttribute() const;
+
+  /// \brief Return true if the attribute is a string (target-dependent)
+  /// attribute.
+  bool isStringAttribute() const;
+
+  /// \brief Return true if the attribute is present.
+  bool hasAttribute(AttrKind Val) const;
+
+  /// \brief Return true if the target-dependent attribute is present.
+  bool hasAttribute(StringRef Val) const;
+
+  /// \brief Return the attribute's kind as an enum (Attribute::AttrKind). This
+  /// requires the attribute to be an enum or integer attribute.
+  Attribute::AttrKind getKindAsEnum() const;
+
+  /// \brief Return the attribute's value as an integer. This requires that the
+  /// attribute be an integer attribute.
+  uint64_t getValueAsInt() const;
+
+  /// \brief Return the attribute's kind as a string. This requires the
+  /// attribute to be a string attribute.
+  StringRef getKindAsString() const;
+
+  /// \brief Return the attribute's value as a string. This requires the
+  /// attribute to be a string attribute.
+  StringRef getValueAsString() const;
+
+  /// \brief Returns the alignment field of an attribute as a byte alignment
+  /// value.
+  unsigned getAlignment() const;
+
+  /// \brief Returns the stack alignment field of an attribute as a byte
+  /// alignment value.
+  unsigned getStackAlignment() const;
+
+  /// \brief Returns the number of dereferenceable bytes from the
+  /// dereferenceable attribute.
+  uint64_t getDereferenceableBytes() const;
+
+  /// \brief Returns the number of dereferenceable_or_null bytes from the
+  /// dereferenceable_or_null attribute.
+  uint64_t getDereferenceableOrNullBytes() const;
+
+  /// Returns the argument numbers for the allocsize attribute (or pair(0, 0)
+  /// if not known).
+  std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
+
+  /// \brief The Attribute is converted to a string of equivalent mnemonic. This
+  /// is, presumably, for writing out the mnemonics for the assembly writer.
+  std::string getAsString(bool InAttrGrp = false) const;
+
+  /// \brief Equality and non-equality operators.
+  bool operator==(Attribute A) const { return pImpl == A.pImpl; }
+  bool operator!=(Attribute A) const { return pImpl != A.pImpl; }
+
+  /// \brief Less-than operator. Useful for sorting the attributes list.
+  bool operator<(Attribute A) const;
+
+  void Profile(FoldingSetNodeID &ID) const {
+    ID.AddPointer(pImpl);
+  }
+
+  /// \brief Return a raw pointer that uniquely identifies this attribute.
+  void *getRawPointer() const {
+    return pImpl;
+  }
+
+  /// \brief Get an attribute from a raw pointer created by getRawPointer.
+  static Attribute fromRawPointer(void *RawPtr) {
+    return Attribute(reinterpret_cast<AttributeImpl*>(RawPtr));
+  }
+};
+
+// Specialized opaque value conversions.
+inline LLVMAttributeRef wrap(Attribute Attr) {
+  return reinterpret_cast<LLVMAttributeRef>(Attr.getRawPointer());
+}
+
+// Specialized opaque value conversions.
+inline Attribute unwrap(LLVMAttributeRef Attr) {
+  return Attribute::fromRawPointer(Attr);
+}
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// This class holds the attributes for a particular argument, parameter,
+/// function, or return value. It is an immutable value type that is cheap to
+/// copy. Adding and removing enum attributes is intended to be fast, but adding
+/// and removing string or integer attributes involves a FoldingSet lookup.
+class AttributeSet {
+  friend AttributeListImpl;
+  template <typename Ty> friend struct DenseMapInfo;
+
+  // TODO: Extract AvailableAttrs from AttributeSetNode and store them here.
+  // This will allow an efficient implementation of addAttribute and
+  // removeAttribute for enum attrs.
+
+  /// Private implementation pointer.
+  AttributeSetNode *SetNode = nullptr;
+
+private:
+  explicit AttributeSet(AttributeSetNode *ASN) : SetNode(ASN) {}
+
+public:
+  /// AttributeSet is a trivially copyable value type.
+  AttributeSet() = default;
+  AttributeSet(const AttributeSet &) = default;
+  ~AttributeSet() = default;
+
+  static AttributeSet get(LLVMContext &C, const AttrBuilder &B);
+  static AttributeSet get(LLVMContext &C, ArrayRef<Attribute> Attrs);
+
+  bool operator==(const AttributeSet &O) const { return SetNode == O.SetNode; }
+  bool operator!=(const AttributeSet &O) const { return !(*this == O); }
+
+  /// Add an argument attribute. Returns a new set because attribute sets are
+  /// immutable.
+  AttributeSet addAttribute(LLVMContext &C, Attribute::AttrKind Kind) const;
+
+  /// Add a target-dependent attribute. Returns a new set because attribute sets
+  /// are immutable.
+  AttributeSet addAttribute(LLVMContext &C, StringRef Kind,
+                            StringRef Value = StringRef()) const;
+
+  /// Add attributes to the attribute set. Returns a new set because attribute
+  /// sets are immutable.
+  AttributeSet addAttributes(LLVMContext &C, AttributeSet AS) const;
+
+  /// Remove the specified attribute from this set. Returns a new set because
+  /// attribute sets are immutable.
+  AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const;
+
+  /// Remove the specified attribute from this set. Returns a new set because
+  /// attribute sets are immutable.
+  AttributeSet removeAttribute(LLVMContext &C, StringRef Kind) const;
+
+  /// Remove the specified attributes from this set. Returns a new set because
+  /// attribute sets are immutable.
+  AttributeSet removeAttributes(LLVMContext &C,
+                                const AttrBuilder &AttrsToRemove) const;
+
+  /// Return the number of attributes in this set.
+  unsigned getNumAttributes() const;
+
+  /// Return true if attributes exists in this set.
+  bool hasAttributes() const { return SetNode != nullptr; }
+
+  /// Return true if the attribute exists in this set.
+  bool hasAttribute(Attribute::AttrKind Kind) const;
+
+  /// Return true if the attribute exists in this set.
+  bool hasAttribute(StringRef Kind) const;
+
+  /// Return the attribute object.
+  Attribute getAttribute(Attribute::AttrKind Kind) const;
+
+  /// Return the target-dependent attribute object.
+  Attribute getAttribute(StringRef Kind) const;
+
+  unsigned getAlignment() const;
+  unsigned getStackAlignment() const;
+  uint64_t getDereferenceableBytes() const;
+  uint64_t getDereferenceableOrNullBytes() const;
+  std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
+  std::string getAsString(bool InAttrGrp = false) const;
+
+  using iterator = const Attribute *;
+
+  iterator begin() const;
+  iterator end() const;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  void dump() const;
+#endif
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief Provide DenseMapInfo for AttributeSet.
+template <> struct DenseMapInfo<AttributeSet> {
+  static AttributeSet getEmptyKey() {
+    auto Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
+    return AttributeSet(reinterpret_cast<AttributeSetNode *>(Val));
+  }
+
+  static AttributeSet getTombstoneKey() {
+    auto Val = static_cast<uintptr_t>(-2);
+    Val <<= PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
+    return AttributeSet(reinterpret_cast<AttributeSetNode *>(Val));
+  }
+
+  static unsigned getHashValue(AttributeSet AS) {
+    return (unsigned((uintptr_t)AS.SetNode) >> 4) ^
+           (unsigned((uintptr_t)AS.SetNode) >> 9);
+  }
+
+  static bool isEqual(AttributeSet LHS, AttributeSet RHS) { return LHS == RHS; }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class holds the attributes for a function, its return value, and
+/// its parameters. You access the attributes for each of them via an index into
+/// the AttributeList object. The function attributes are at index
+/// `AttributeList::FunctionIndex', the return value is at index
+/// `AttributeList::ReturnIndex', and the attributes for the parameters start at
+/// index `AttributeList::FirstArgIndex'.
+class AttributeList {
+public:
+  enum AttrIndex : unsigned {
+    ReturnIndex = 0U,
+    FunctionIndex = ~0U,
+    FirstArgIndex = 1,
+  };
+
+private:
+  friend class AttrBuilder;
+  friend class AttributeListImpl;
+  friend class AttributeSet;
+  friend class AttributeSetNode;
+  template <typename Ty> friend struct DenseMapInfo;
+
+  /// \brief The attributes that we are managing. This can be null to represent
+  /// the empty attributes list.
+  AttributeListImpl *pImpl = nullptr;
+
+public:
+  /// \brief Create an AttributeList with the specified parameters in it.
+  static AttributeList get(LLVMContext &C,
+                           ArrayRef<std::pair<unsigned, Attribute>> Attrs);
+  static AttributeList get(LLVMContext &C,
+                           ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
+
+  /// \brief Create an AttributeList from attribute sets for a function, its
+  /// return value, and all of its arguments.
+  static AttributeList get(LLVMContext &C, AttributeSet FnAttrs,
+                           AttributeSet RetAttrs,
+                           ArrayRef<AttributeSet> ArgAttrs);
+
+private:
+  explicit AttributeList(AttributeListImpl *LI) : pImpl(LI) {}
+
+  static AttributeList getImpl(LLVMContext &C, ArrayRef<AttributeSet> AttrSets);
+
+public:
+  AttributeList() = default;
+
+  //===--------------------------------------------------------------------===//
+  // AttributeList Construction and Mutation
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Return an AttributeList with the specified parameters in it.
+  static AttributeList get(LLVMContext &C, ArrayRef<AttributeList> Attrs);
+  static AttributeList get(LLVMContext &C, unsigned Index,
+                           ArrayRef<Attribute::AttrKind> Kinds);
+  static AttributeList get(LLVMContext &C, unsigned Index,
+                           ArrayRef<StringRef> Kind);
+  static AttributeList get(LLVMContext &C, unsigned Index,
+                           const AttrBuilder &B);
+
+  /// \brief Add an attribute to the attribute set at the given index.
+  /// Returns a new list because attribute lists are immutable.
+  AttributeList addAttribute(LLVMContext &C, unsigned Index,
+                             Attribute::AttrKind Kind) const;
+
+  /// \brief Add an attribute to the attribute set at the given index.
+  /// Returns a new list because attribute lists are immutable.
+  AttributeList addAttribute(LLVMContext &C, unsigned Index, StringRef Kind,
+                             StringRef Value = StringRef()) const;
+
+  /// Add an attribute to the attribute set at the given index.
+  /// Returns a new list because attribute lists are immutable.
+  AttributeList addAttribute(LLVMContext &C, unsigned Index, Attribute A) const;
+
+  /// \brief Add attributes to the attribute set at the given index.
+  /// Returns a new list because attribute lists are immutable.
+  AttributeList addAttributes(LLVMContext &C, unsigned Index,
+                              const AttrBuilder &B) const;
+
+  /// Add an argument attribute to the list. Returns a new list because
+  /// attribute lists are immutable.
+  AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo,
+                                  Attribute::AttrKind Kind) const {
+    return addAttribute(C, ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// Add an argument attribute to the list. Returns a new list because
+  /// attribute lists are immutable.
+  AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo,
+                                  StringRef Kind,
+                                  StringRef Value = StringRef()) const {
+    return addAttribute(C, ArgNo + FirstArgIndex, Kind, Value);
+  }
+
+  /// Add an attribute to the attribute list at the given arg indices. Returns a
+  /// new list because attribute lists are immutable.
+  AttributeList addParamAttribute(LLVMContext &C, ArrayRef<unsigned> ArgNos,
+                                  Attribute A) const;
+
+  /// Add an argument attribute to the list. Returns a new list because
+  /// attribute lists are immutable.
+  AttributeList addParamAttributes(LLVMContext &C, unsigned ArgNo,
+                                   const AttrBuilder &B) const {
+    return addAttributes(C, ArgNo + FirstArgIndex, B);
+  }
+
+  /// \brief Remove the specified attribute at the specified index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeAttribute(LLVMContext &C, unsigned Index,
+                                Attribute::AttrKind Kind) const;
+
+  /// \brief Remove the specified attribute at the specified index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeAttribute(LLVMContext &C, unsigned Index,
+                                StringRef Kind) const;
+
+  /// \brief Remove the specified attributes at the specified index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeAttributes(LLVMContext &C, unsigned Index,
+                                 const AttrBuilder &AttrsToRemove) const;
+
+  /// \brief Remove all attributes at the specified index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeAttributes(LLVMContext &C, unsigned Index) const;
+
+  /// \brief Remove the specified attribute at the specified arg index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeParamAttribute(LLVMContext &C, unsigned ArgNo,
+                                     Attribute::AttrKind Kind) const {
+    return removeAttribute(C, ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// \brief Remove the specified attribute at the specified arg index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeParamAttribute(LLVMContext &C, unsigned ArgNo,
+                                     StringRef Kind) const {
+    return removeAttribute(C, ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// \brief Remove the specified attribute at the specified arg index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeParamAttributes(LLVMContext &C, unsigned ArgNo,
+                                      const AttrBuilder &AttrsToRemove) const {
+    return removeAttributes(C, ArgNo + FirstArgIndex, AttrsToRemove);
+  }
+
+  /// \brief Remove all attributes at the specified arg index from this
+  /// attribute list. Returns a new list because attribute lists are immutable.
+  AttributeList removeParamAttributes(LLVMContext &C, unsigned ArgNo) const {
+    return removeAttributes(C, ArgNo + FirstArgIndex);
+  }
+
+  /// \Brief Add the dereferenceable attribute to the attribute set at the given
+  /// index. Returns a new list because attribute lists are immutable.
+  AttributeList addDereferenceableAttr(LLVMContext &C, unsigned Index,
+                                       uint64_t Bytes) const;
+
+  /// \Brief Add the dereferenceable attribute to the attribute set at the given
+  /// arg index. Returns a new list because attribute lists are immutable.
+  AttributeList addDereferenceableParamAttr(LLVMContext &C, unsigned ArgNo,
+                                            uint64_t Bytes) const {
+    return addDereferenceableAttr(C, ArgNo + FirstArgIndex, Bytes);
+  }
+
+  /// \brief Add the dereferenceable_or_null attribute to the attribute set at
+  /// the given index. Returns a new list because attribute lists are immutable.
+  AttributeList addDereferenceableOrNullAttr(LLVMContext &C, unsigned Index,
+                                             uint64_t Bytes) const;
+
+  /// \brief Add the dereferenceable_or_null attribute to the attribute set at
+  /// the given arg index. Returns a new list because attribute lists are
+  /// immutable.
+  AttributeList addDereferenceableOrNullParamAttr(LLVMContext &C,
+                                                  unsigned ArgNo,
+                                                  uint64_t Bytes) const {
+    return addDereferenceableOrNullAttr(C, ArgNo + FirstArgIndex, Bytes);
+  }
+
+  /// Add the allocsize attribute to the attribute set at the given index.
+  /// Returns a new list because attribute lists are immutable.
+  AttributeList addAllocSizeAttr(LLVMContext &C, unsigned Index,
+                                 unsigned ElemSizeArg,
+                                 const Optional<unsigned> &NumElemsArg);
+
+  /// Add the allocsize attribute to the attribute set at the given arg index.
+  /// Returns a new list because attribute lists are immutable.
+  AttributeList addAllocSizeParamAttr(LLVMContext &C, unsigned ArgNo,
+                                      unsigned ElemSizeArg,
+                                      const Optional<unsigned> &NumElemsArg) {
+    return addAllocSizeAttr(C, ArgNo + FirstArgIndex, ElemSizeArg, NumElemsArg);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // AttributeList Accessors
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Retrieve the LLVM context.
+  LLVMContext &getContext() const;
+
+  /// \brief The attributes for the specified index are returned.
+  AttributeSet getAttributes(unsigned Index) const;
+
+  /// \brief The attributes for the argument or parameter at the given index are
+  /// returned.
+  AttributeSet getParamAttributes(unsigned ArgNo) const;
+
+  /// \brief The attributes for the ret value are returned.
+  AttributeSet getRetAttributes() const;
+
+  /// \brief The function attributes are returned.
+  AttributeSet getFnAttributes() const;
+
+  /// \brief Return true if the attribute exists at the given index.
+  bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const;
+
+  /// \brief Return true if the attribute exists at the given index.
+  bool hasAttribute(unsigned Index, StringRef Kind) const;
+
+  /// \brief Return true if attribute exists at the given index.
+  bool hasAttributes(unsigned Index) const;
+
+  /// \brief Return true if the attribute exists for the given argument
+  bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+    return hasAttribute(ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// \brief Return true if the attribute exists for the given argument
+  bool hasParamAttr(unsigned ArgNo, StringRef Kind) const {
+    return hasAttribute(ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// \brief Return true if attributes exists for the given argument
+  bool hasParamAttrs(unsigned ArgNo) const {
+    return hasAttributes(ArgNo + FirstArgIndex);
+  }
+
+  /// \brief Equivalent to hasAttribute(AttributeList::FunctionIndex, Kind) but
+  /// may be faster.
+  bool hasFnAttribute(Attribute::AttrKind Kind) const;
+
+  /// \brief Equivalent to hasAttribute(AttributeList::FunctionIndex, Kind) but
+  /// may be faster.
+  bool hasFnAttribute(StringRef Kind) const;
+
+  /// \brief Equivalent to hasAttribute(ArgNo + FirstArgIndex, Kind).
+  bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const;
+
+  /// \brief Return true if the specified attribute is set for at least one
+  /// parameter or for the return value. If Index is not nullptr, the index
+  /// of a parameter with the specified attribute is provided.
+  bool hasAttrSomewhere(Attribute::AttrKind Kind,
+                        unsigned *Index = nullptr) const;
+
+  /// \brief Return the attribute object that exists at the given index.
+  Attribute getAttribute(unsigned Index, Attribute::AttrKind Kind) const;
+
+  /// \brief Return the attribute object that exists at the given index.
+  Attribute getAttribute(unsigned Index, StringRef Kind) const;
+
+  /// \brief Return the attribute object that exists at the arg index.
+  Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+    return getAttribute(ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// \brief Return the attribute object that exists at the given index.
+  Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
+    return getAttribute(ArgNo + FirstArgIndex, Kind);
+  }
+
+  /// \brief Return the alignment of the return value.
+  unsigned getRetAlignment() const;
+
+  /// \brief Return the alignment for the specified function parameter.
+  unsigned getParamAlignment(unsigned ArgNo) const;
+
+  /// \brief Get the stack alignment.
+  unsigned getStackAlignment(unsigned Index) const;
+
+  /// \brief Get the number of dereferenceable bytes (or zero if unknown).
+  uint64_t getDereferenceableBytes(unsigned Index) const;
+
+  /// \brief Get the number of dereferenceable bytes (or zero if unknown) of an
+  /// arg.
+  uint64_t getParamDereferenceableBytes(unsigned ArgNo) const {
+    return getDereferenceableBytes(ArgNo + FirstArgIndex);
+  }
+
+  /// \brief Get the number of dereferenceable_or_null bytes (or zero if
+  /// unknown).
+  uint64_t getDereferenceableOrNullBytes(unsigned Index) const;
+
+  /// \brief Get the number of dereferenceable_or_null bytes (or zero if
+  /// unknown) of an arg.
+  uint64_t getParamDereferenceableOrNullBytes(unsigned ArgNo) const {
+    return getDereferenceableOrNullBytes(ArgNo + FirstArgIndex);
+  }
+
+  /// Get the allocsize argument numbers (or pair(0, 0) if unknown).
+  std::pair<unsigned, Optional<unsigned>>
+  getAllocSizeArgs(unsigned Index) const;
+
+  /// \brief Return the attributes at the index as a string.
+  std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
+
+  //===--------------------------------------------------------------------===//
+  // AttributeList Introspection
+  //===--------------------------------------------------------------------===//
+
+  using iterator = const AttributeSet *;
+
+  iterator begin() const;
+  iterator end() const;
+
+  unsigned getNumAttrSets() const;
+
+  /// Use these to iterate over the valid attribute indices.
+  unsigned index_begin() const { return AttributeList::FunctionIndex; }
+  unsigned index_end() const { return getNumAttrSets() - 1; }
+
+  /// operator==/!= - Provide equality predicates.
+  bool operator==(const AttributeList &RHS) const { return pImpl == RHS.pImpl; }
+  bool operator!=(const AttributeList &RHS) const { return pImpl != RHS.pImpl; }
+
+  /// \brief Return a raw pointer that uniquely identifies this attribute list.
+  void *getRawPointer() const {
+    return pImpl;
+  }
+
+  /// \brief Return true if there are no attributes.
+  bool isEmpty() const { return pImpl == nullptr; }
+
+  void dump() const;
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief Provide DenseMapInfo for AttributeList.
+template <> struct DenseMapInfo<AttributeList> {
+  static AttributeList getEmptyKey() {
+    auto Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable;
+    return AttributeList(reinterpret_cast<AttributeListImpl *>(Val));
+  }
+
+  static AttributeList getTombstoneKey() {
+    auto Val = static_cast<uintptr_t>(-2);
+    Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable;
+    return AttributeList(reinterpret_cast<AttributeListImpl *>(Val));
+  }
+
+  static unsigned getHashValue(AttributeList AS) {
+    return (unsigned((uintptr_t)AS.pImpl) >> 4) ^
+           (unsigned((uintptr_t)AS.pImpl) >> 9);
+  }
+
+  static bool isEqual(AttributeList LHS, AttributeList RHS) {
+    return LHS == RHS;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class is used in conjunction with the Attribute::get method to
+/// create an Attribute object. The object itself is uniquified. The Builder's
+/// value, however, is not. So this can be used as a quick way to test for
+/// equality, presence of attributes, etc.
+class AttrBuilder {
+  std::bitset<Attribute::EndAttrKinds> Attrs;
+  std::map<std::string, std::string> TargetDepAttrs;
+  uint64_t Alignment = 0;
+  uint64_t StackAlignment = 0;
+  uint64_t DerefBytes = 0;
+  uint64_t DerefOrNullBytes = 0;
+  uint64_t AllocSizeArgs = 0;
+
+public:
+  AttrBuilder() = default;
+
+  AttrBuilder(const Attribute &A) {
+    addAttribute(A);
+  }
+
+  AttrBuilder(AttributeList AS, unsigned Idx);
+  AttrBuilder(AttributeSet AS);
+
+  void clear();
+
+  /// \brief Add an attribute to the builder.
+  AttrBuilder &addAttribute(Attribute::AttrKind Val);
+
+  /// \brief Add the Attribute object to the builder.
+  AttrBuilder &addAttribute(Attribute A);
+
+  /// \brief Add the target-dependent attribute to the builder.
+  AttrBuilder &addAttribute(StringRef A, StringRef V = StringRef());
+
+  /// \brief Remove an attribute from the builder.
+  AttrBuilder &removeAttribute(Attribute::AttrKind Val);
+
+  /// \brief Remove the attributes from the builder.
+  AttrBuilder &removeAttributes(AttributeList A, uint64_t WithoutIndex);
+
+  /// \brief Remove the target-dependent attribute to the builder.
+  AttrBuilder &removeAttribute(StringRef A);
+
+  /// \brief Add the attributes from the builder.
+  AttrBuilder &merge(const AttrBuilder &B);
+
+  /// \brief Remove the attributes from the builder.
+  AttrBuilder &remove(const AttrBuilder &B);
+
+  /// \brief Return true if the builder has any attribute that's in the
+  /// specified builder.
+  bool overlaps(const AttrBuilder &B) const;
+
+  /// \brief Return true if the builder has the specified attribute.
+  bool contains(Attribute::AttrKind A) const {
+    assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
+    return Attrs[A];
+  }
+
+  /// \brief Return true if the builder has the specified target-dependent
+  /// attribute.
+  bool contains(StringRef A) const;
+
+  /// \brief Return true if the builder has IR-level attributes.
+  bool hasAttributes() const;
+
+  /// \brief Return true if the builder has any attribute that's in the
+  /// specified attribute.
+  bool hasAttributes(AttributeList A, uint64_t Index) const;
+
+  /// \brief Return true if the builder has an alignment attribute.
+  bool hasAlignmentAttr() const;
+
+  /// \brief Retrieve the alignment attribute, if it exists.
+  uint64_t getAlignment() const { return Alignment; }
+
+  /// \brief Retrieve the stack alignment attribute, if it exists.
+  uint64_t getStackAlignment() const { return StackAlignment; }
+
+  /// \brief Retrieve the number of dereferenceable bytes, if the
+  /// dereferenceable attribute exists (zero is returned otherwise).
+  uint64_t getDereferenceableBytes() const { return DerefBytes; }
+
+  /// \brief Retrieve the number of dereferenceable_or_null bytes, if the
+  /// dereferenceable_or_null attribute exists (zero is returned otherwise).
+  uint64_t getDereferenceableOrNullBytes() const { return DerefOrNullBytes; }
+
+  /// Retrieve the allocsize args, if the allocsize attribute exists.  If it
+  /// doesn't exist, pair(0, 0) is returned.
+  std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
+
+  /// \brief This turns an int alignment (which must be a power of 2) into the
+  /// form used internally in Attribute.
+  AttrBuilder &addAlignmentAttr(unsigned Align);
+
+  /// \brief This turns an int stack alignment (which must be a power of 2) into
+  /// the form used internally in Attribute.
+  AttrBuilder &addStackAlignmentAttr(unsigned Align);
+
+  /// \brief This turns the number of dereferenceable bytes into the form used
+  /// internally in Attribute.
+  AttrBuilder &addDereferenceableAttr(uint64_t Bytes);
+
+  /// \brief This turns the number of dereferenceable_or_null bytes into the
+  /// form used internally in Attribute.
+  AttrBuilder &addDereferenceableOrNullAttr(uint64_t Bytes);
+
+  /// This turns one (or two) ints into the form used internally in Attribute.
+  AttrBuilder &addAllocSizeAttr(unsigned ElemSizeArg,
+                                const Optional<unsigned> &NumElemsArg);
+
+  /// Add an allocsize attribute, using the representation returned by
+  /// Attribute.getIntValue().
+  AttrBuilder &addAllocSizeAttrFromRawRepr(uint64_t RawAllocSizeRepr);
+
+  /// \brief Return true if the builder contains no target-independent
+  /// attributes.
+  bool empty() const { return Attrs.none(); }
+
+  // Iterators for target-dependent attributes.
+  using td_type = std::pair<std::string, std::string>;
+  using td_iterator = std::map<std::string, std::string>::iterator;
+  using td_const_iterator = std::map<std::string, std::string>::const_iterator;
+  using td_range = iterator_range<td_iterator>;
+  using td_const_range = iterator_range<td_const_iterator>;
+
+  td_iterator td_begin() { return TargetDepAttrs.begin(); }
+  td_iterator td_end() { return TargetDepAttrs.end(); }
+
+  td_const_iterator td_begin() const { return TargetDepAttrs.begin(); }
+  td_const_iterator td_end() const { return TargetDepAttrs.end(); }
+
+  td_range td_attrs() { return td_range(td_begin(), td_end()); }
+
+  td_const_range td_attrs() const {
+    return td_const_range(td_begin(), td_end());
+  }
+
+  bool td_empty() const { return TargetDepAttrs.empty(); }
+
+  bool operator==(const AttrBuilder &B);
+  bool operator!=(const AttrBuilder &B) {
+    return !(*this == B);
+  }
+};
+
+namespace AttributeFuncs {
+
+/// \brief Which attributes cannot be applied to a type.
+AttrBuilder typeIncompatible(Type *Ty);
+
+/// \returns Return true if the two functions have compatible target-independent
+/// attributes for inlining purposes.
+bool areInlineCompatible(const Function &Caller, const Function &Callee);
+
+/// \brief Merge caller's and callee's attributes.
+void mergeAttributesForInlining(Function &Caller, const Function &Callee);
+
+} // end namespace AttributeFuncs
+
+} // end namespace llvm
+
+#endif // LLVM_IR_ATTRIBUTES_H
diff --git a/linux-x64/clang/include/llvm/IR/Attributes.td b/linux-x64/clang/include/llvm/IR/Attributes.td
new file mode 100644
index 0000000..554f0df
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Attributes.td
@@ -0,0 +1,233 @@
+/// Attribute base class.
+class Attr<string S> {
+  // String representation of this attribute in the IR.
+  string AttrString = S;
+}
+
+/// Enum attribute.
+class EnumAttr<string S> : Attr<S>;
+
+/// StringBool attribute.
+class StrBoolAttr<string S> : Attr<S>;
+
+/// Target-independent enum attributes.
+
+/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
+/// 0 means unaligned (different from align(1)).
+def Alignment : EnumAttr<"align">;
+
+/// The result of the function is guaranteed to point to a number of bytes that
+/// we can determine if we know the value of the function's arguments.
+def AllocSize : EnumAttr<"allocsize">;
+
+/// inline=always.
+def AlwaysInline : EnumAttr<"alwaysinline">;
+
+/// Function can access memory only using pointers based on its arguments.
+def ArgMemOnly : EnumAttr<"argmemonly">;
+
+/// Callee is recognized as a builtin, despite nobuiltin attribute on its
+/// declaration.
+def Builtin : EnumAttr<"builtin">;
+
+/// Pass structure by value.
+def ByVal : EnumAttr<"byval">;
+
+/// Marks function as being in a cold path.
+def Cold : EnumAttr<"cold">;
+
+/// Can only be moved to control-equivalent blocks.
+def Convergent : EnumAttr<"convergent">;
+
+/// Pointer is known to be dereferenceable.
+def Dereferenceable : EnumAttr<"dereferenceable">;
+
+/// Pointer is either null or dereferenceable.
+def DereferenceableOrNull : EnumAttr<"dereferenceable_or_null">;
+
+/// Function may only access memory that is inaccessible from IR.
+def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly">;
+
+/// Function may only access memory that is either inaccessible from the IR,
+/// or pointed to by its pointer arguments.
+def InaccessibleMemOrArgMemOnly : EnumAttr<"inaccessiblemem_or_argmemonly">;
+
+/// Pass structure in an alloca.
+def InAlloca : EnumAttr<"inalloca">;
+
+/// Source said inlining was desirable.
+def InlineHint : EnumAttr<"inlinehint">;
+
+/// Force argument to be passed in register.
+def InReg : EnumAttr<"inreg">;
+
+/// Build jump-instruction tables and replace refs.
+def JumpTable : EnumAttr<"jumptable">;
+
+/// Function must be optimized for size first.
+def MinSize : EnumAttr<"minsize">;
+
+/// Naked function.
+def Naked : EnumAttr<"naked">;
+
+/// Nested function static chain.
+def Nest : EnumAttr<"nest">;
+
+/// Considered to not alias after call.
+def NoAlias : EnumAttr<"noalias">;
+
+/// Callee isn't recognized as a builtin.
+def NoBuiltin : EnumAttr<"nobuiltin">;
+
+/// Function creates no aliases of pointer.
+def NoCapture : EnumAttr<"nocapture">;
+
+/// Call cannot be duplicated.
+def NoDuplicate : EnumAttr<"noduplicate">;
+
+/// Disable implicit floating point insts.
+def NoImplicitFloat : EnumAttr<"noimplicitfloat">;
+
+/// inline=never.
+def NoInline : EnumAttr<"noinline">;
+
+/// Function is called early and/or often, so lazy binding isn't worthwhile.
+def NonLazyBind : EnumAttr<"nonlazybind">;
+
+/// Pointer is known to be not null.
+def NonNull : EnumAttr<"nonnull">;
+
+/// The function does not recurse.
+def NoRecurse : EnumAttr<"norecurse">;
+
+/// Disable redzone.
+def NoRedZone : EnumAttr<"noredzone">;
+
+/// Mark the function as not returning.
+def NoReturn : EnumAttr<"noreturn">;
+
+/// Disable Indirect Branch Tracking.
+def NoCfCheck : EnumAttr<"nocf_check">;
+
+/// Function doesn't unwind stack.
+def NoUnwind : EnumAttr<"nounwind">;
+
+/// Select optimizations for best fuzzing signal.
+def OptForFuzzing : EnumAttr<"optforfuzzing">;
+
+/// opt_size.
+def OptimizeForSize : EnumAttr<"optsize">;
+
+/// Function must not be optimized.
+def OptimizeNone : EnumAttr<"optnone">;
+
+/// Function does not access memory.
+def ReadNone : EnumAttr<"readnone">;
+
+/// Function only reads from memory.
+def ReadOnly : EnumAttr<"readonly">;
+
+/// Return value is always equal to this argument.
+def Returned : EnumAttr<"returned">;
+
+/// Function can return twice.
+def ReturnsTwice : EnumAttr<"returns_twice">;
+
+/// Safe Stack protection.
+def SafeStack : EnumAttr<"safestack">;
+
+/// Sign extended before/after call.
+def SExt : EnumAttr<"signext">;
+
+/// Alignment of stack for function (3 bits)  stored as log2 of alignment with
+/// +1 bias 0 means unaligned (different from alignstack=(1)).
+def StackAlignment : EnumAttr<"alignstack">;
+
+/// Function can be speculated.
+def Speculatable : EnumAttr<"speculatable">;
+
+/// Stack protection.
+def StackProtect : EnumAttr<"ssp">;
+
+/// Stack protection required.
+def StackProtectReq : EnumAttr<"sspreq">;
+
+/// Strong Stack protection.
+def StackProtectStrong : EnumAttr<"sspstrong">;
+
+/// Function was called in a scope requiring strict floating point semantics.
+def StrictFP : EnumAttr<"strictfp">;
+
+/// Hidden pointer to structure to return.
+def StructRet : EnumAttr<"sret">;
+
+/// AddressSanitizer is on.
+def SanitizeAddress : EnumAttr<"sanitize_address">;
+
+/// ThreadSanitizer is on.
+def SanitizeThread : EnumAttr<"sanitize_thread">;
+
+/// MemorySanitizer is on.
+def SanitizeMemory : EnumAttr<"sanitize_memory">;
+
+/// HWAddressSanitizer is on.
+def SanitizeHWAddress : EnumAttr<"sanitize_hwaddress">;
+
+/// Argument is swift error.
+def SwiftError : EnumAttr<"swifterror">;
+
+/// Argument is swift self/context.
+def SwiftSelf : EnumAttr<"swiftself">;
+
+/// Function must be in a unwind table.
+def UWTable : EnumAttr<"uwtable">;
+
+/// Function only writes to memory.
+def WriteOnly : EnumAttr<"writeonly">;
+
+/// Zero extended before/after call.
+def ZExt : EnumAttr<"zeroext">;
+
+/// Target-independent string attributes.
+def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
+def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
+def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
+def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
+def NoJumpTables : StrBoolAttr<"no-jump-tables">;
+def ProfileSampleAccurate : StrBoolAttr<"profile-sample-accurate">;
+
+class CompatRule<string F> {
+  // The name of the function called to check the attribute of the caller and
+  // callee and decide whether inlining should be allowed. The function's
+  // signature must match "bool(const Function&, const Function &)", where the
+  // first parameter is the reference to the caller and the second parameter is
+  // the reference to the callee. It must return false if the attributes of the
+  // caller and callee are incompatible, and true otherwise.
+  string CompatFunc = F;
+}
+
+def : CompatRule<"isEqual<SanitizeAddressAttr>">;
+def : CompatRule<"isEqual<SanitizeThreadAttr>">;
+def : CompatRule<"isEqual<SanitizeMemoryAttr>">;
+def : CompatRule<"isEqual<SanitizeHWAddressAttr>">;
+def : CompatRule<"isEqual<SafeStackAttr>">;
+
+class MergeRule<string F> {
+  // The name of the function called to merge the attributes of the caller and
+  // callee. The function's signature must match
+  // "void(Function&, const Function &)", where the first parameter is the
+  // reference to the caller and the second parameter is the reference to the
+  // callee.
+  string MergeFunc = F;
+}
+
+def : MergeRule<"setAND<LessPreciseFPMADAttr>">;
+def : MergeRule<"setAND<NoInfsFPMathAttr>">;
+def : MergeRule<"setAND<NoNansFPMathAttr>">;
+def : MergeRule<"setAND<UnsafeFPMathAttr>">;
+def : MergeRule<"setOR<NoImplicitFloatAttr>">;
+def : MergeRule<"setOR<NoJumpTablesAttr>">;
+def : MergeRule<"setOR<ProfileSampleAccurateAttr>">;
+def : MergeRule<"adjustCallerSSPLevel">;
+def : MergeRule<"adjustCallerStackProbes">;
+def : MergeRule<"adjustCallerStackProbeSize">;
diff --git a/linux-x64/clang/include/llvm/IR/AutoUpgrade.h b/linux-x64/clang/include/llvm/IR/AutoUpgrade.h
new file mode 100644
index 0000000..3f406f0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/AutoUpgrade.h
@@ -0,0 +1,85 @@
+//===- AutoUpgrade.h - AutoUpgrade Helpers ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  These functions are implemented by lib/IR/AutoUpgrade.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_AUTOUPGRADE_H
+#define LLVM_IR_AUTOUPGRADE_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+  class CallInst;
+  class Constant;
+  class Function;
+  class Instruction;
+  class MDNode;
+  class Module;
+  class GlobalVariable;
+  class Type;
+  class Value;
+
+  /// This is a more granular function that simply checks an intrinsic function
+  /// for upgrading, and returns true if it requires upgrading. It may return
+  /// null in NewFn if the all calls to the original intrinsic function
+  /// should be transformed to non-function-call instructions.
+  bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn);
+
+  /// This is the complement to the above, replacing a specific call to an
+  /// intrinsic function with a call to the specified new function.
+  void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn);
+
+  /// This is an auto-upgrade hook for any old intrinsic function syntaxes
+  /// which need to have both the function updated as well as all calls updated
+  /// to the new function. This should only be run in a post-processing fashion
+  /// so that it can update all calls to the old function.
+  void UpgradeCallsToIntrinsic(Function* F);
+
+  /// This checks for global variables which should be upgraded. It returns true
+  /// if it requires upgrading.
+  bool UpgradeGlobalVariable(GlobalVariable *GV);
+
+  /// This checks for module flags which should be upgraded. It returns true if
+  /// module is modified.
+  bool UpgradeModuleFlags(Module &M);
+
+  void UpgradeSectionAttributes(Module &M);
+
+  /// If the given TBAA tag uses the scalar TBAA format, create a new node
+  /// corresponding to the upgrade to the struct-path aware TBAA format.
+  /// Otherwise return the \p TBAANode itself.
+  MDNode *UpgradeTBAANode(MDNode &TBAANode);
+
+  /// This is an auto-upgrade for bitcast between pointers with different
+  /// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr.
+  Instruction *UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
+                                  Instruction *&Temp);
+
+  /// This is an auto-upgrade for bitcast constant expression between pointers
+  /// with different address spaces: the instruction is replaced by a pair
+  /// ptrtoint+inttoptr.
+  Value *UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy);
+
+  /// Check the debug info version number, if it is out-dated, drop the debug
+  /// info. Return true if module is modified.
+  bool UpgradeDebugInfo(Module &M);
+
+  /// Check whether a string looks like an old loop attachment tag.
+  inline bool mayBeOldLoopAttachmentTag(StringRef Name) {
+    return Name.startswith("llvm.vectorizer.");
+  }
+
+  /// Upgrade the loop attachment metadata node.
+  MDNode *upgradeInstructionLoopAttachment(MDNode &N);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/BasicBlock.h b/linux-x64/clang/include/llvm/IR/BasicBlock.h
new file mode 100644
index 0000000..77cfc97
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/BasicBlock.h
@@ -0,0 +1,427 @@
+//===- llvm/BasicBlock.h - Represent a basic block in the VM ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the BasicBlock class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_BASICBLOCK_H
+#define LLVM_IR_BASICBLOCK_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+class CallInst;
+class Function;
+class LandingPadInst;
+class LLVMContext;
+class Module;
+class PHINode;
+class TerminatorInst;
+class ValueSymbolTable;
+
+/// \brief LLVM Basic Block Representation
+///
+/// This represents a single basic block in LLVM. A basic block is simply a
+/// container of instructions that execute sequentially. Basic blocks are Values
+/// because they are referenced by instructions such as branches and switch
+/// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block
+/// represents a label to which a branch can jump.
+///
+/// A well formed basic block is formed of a list of non-terminating
+/// instructions followed by a single TerminatorInst instruction.
+/// TerminatorInst's may not occur in the middle of basic blocks, and must
+/// terminate the blocks. The BasicBlock class allows malformed basic blocks to
+/// occur because it may be useful in the intermediate stage of constructing or
+/// modifying a program. However, the verifier will ensure that basic blocks
+/// are "well formed".
+class BasicBlock final : public Value, // Basic blocks are data objects also
+                         public ilist_node_with_parent<BasicBlock, Function> {
+public:
+  using InstListType = SymbolTableList<Instruction>;
+
+private:
+  friend class BlockAddress;
+  friend class SymbolTableListTraits<BasicBlock>;
+
+  InstListType InstList;
+  Function *Parent;
+
+  void setParent(Function *parent);
+
+  /// \brief Constructor.
+  ///
+  /// If the function parameter is specified, the basic block is automatically
+  /// inserted at either the end of the function (if InsertBefore is null), or
+  /// before the specified basic block.
+  explicit BasicBlock(LLVMContext &C, const Twine &Name = "",
+                      Function *Parent = nullptr,
+                      BasicBlock *InsertBefore = nullptr);
+
+public:
+  BasicBlock(const BasicBlock &) = delete;
+  BasicBlock &operator=(const BasicBlock &) = delete;
+  ~BasicBlock();
+
+  /// \brief Get the context in which this basic block lives.
+  LLVMContext &getContext() const;
+
+  /// Instruction iterators...
+  using iterator = InstListType::iterator;
+  using const_iterator = InstListType::const_iterator;
+  using reverse_iterator = InstListType::reverse_iterator;
+  using const_reverse_iterator = InstListType::const_reverse_iterator;
+
+  /// \brief Creates a new BasicBlock.
+  ///
+  /// If the Parent parameter is specified, the basic block is automatically
+  /// inserted at either the end of the function (if InsertBefore is 0), or
+  /// before the specified basic block.
+  static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
+                            Function *Parent = nullptr,
+                            BasicBlock *InsertBefore = nullptr) {
+    return new BasicBlock(Context, Name, Parent, InsertBefore);
+  }
+
+  /// \brief Return the enclosing method, or null if none.
+  const Function *getParent() const { return Parent; }
+        Function *getParent()       { return Parent; }
+
+  /// \brief Return the module owning the function this basic block belongs to,
+  /// or nullptr it the function does not have a module.
+  ///
+  /// Note: this is undefined behavior if the block does not have a parent.
+  const Module *getModule() const;
+  Module *getModule() {
+    return const_cast<Module *>(
+                            static_cast<const BasicBlock *>(this)->getModule());
+  }
+
+  /// \brief Returns the terminator instruction if the block is well formed or
+  /// null if the block is not well formed.
+  const TerminatorInst *getTerminator() const LLVM_READONLY;
+  TerminatorInst *getTerminator() {
+    return const_cast<TerminatorInst *>(
+                        static_cast<const BasicBlock *>(this)->getTerminator());
+  }
+
+  /// \brief Returns the call instruction calling @llvm.experimental.deoptimize
+  /// prior to the terminating return instruction of this basic block, if such a
+  /// call is present.  Otherwise, returns null.
+  const CallInst *getTerminatingDeoptimizeCall() const;
+  CallInst *getTerminatingDeoptimizeCall() {
+    return const_cast<CallInst *>(
+         static_cast<const BasicBlock *>(this)->getTerminatingDeoptimizeCall());
+  }
+
+  /// \brief Returns the call instruction marked 'musttail' prior to the
+  /// terminating return instruction of this basic block, if such a call is
+  /// present.  Otherwise, returns null.
+  const CallInst *getTerminatingMustTailCall() const;
+  CallInst *getTerminatingMustTailCall() {
+    return const_cast<CallInst *>(
+           static_cast<const BasicBlock *>(this)->getTerminatingMustTailCall());
+  }
+
+  /// \brief Returns a pointer to the first instruction in this block that is
+  /// not a PHINode instruction.
+  ///
+  /// When adding instructions to the beginning of the basic block, they should
+  /// be added before the returned value, not before the first instruction,
+  /// which might be PHI. Returns 0 is there's no non-PHI instruction.
+  const Instruction* getFirstNonPHI() const;
+  Instruction* getFirstNonPHI() {
+    return const_cast<Instruction *>(
+                       static_cast<const BasicBlock *>(this)->getFirstNonPHI());
+  }
+
+  /// \brief Returns a pointer to the first instruction in this block that is not
+  /// a PHINode or a debug intrinsic.
+  const Instruction* getFirstNonPHIOrDbg() const;
+  Instruction* getFirstNonPHIOrDbg() {
+    return const_cast<Instruction *>(
+                  static_cast<const BasicBlock *>(this)->getFirstNonPHIOrDbg());
+  }
+
+  /// \brief Returns a pointer to the first instruction in this block that is not
+  /// a PHINode, a debug intrinsic, or a lifetime intrinsic.
+  const Instruction* getFirstNonPHIOrDbgOrLifetime() const;
+  Instruction* getFirstNonPHIOrDbgOrLifetime() {
+    return const_cast<Instruction *>(
+        static_cast<const BasicBlock *>(this)->getFirstNonPHIOrDbgOrLifetime());
+  }
+
+  /// \brief Returns an iterator to the first instruction in this block that is
+  /// suitable for inserting a non-PHI instruction.
+  ///
+  /// In particular, it skips all PHIs and LandingPad instructions.
+  const_iterator getFirstInsertionPt() const;
+  iterator getFirstInsertionPt() {
+    return static_cast<const BasicBlock *>(this)
+                                          ->getFirstInsertionPt().getNonConst();
+  }
+
+  /// \brief Unlink 'this' from the containing function, but do not delete it.
+  void removeFromParent();
+
+  /// \brief Unlink 'this' from the containing function and delete it.
+  ///
+  // \returns an iterator pointing to the element after the erased one.
+  SymbolTableList<BasicBlock>::iterator eraseFromParent();
+
+  /// \brief Unlink this basic block from its current function and insert it
+  /// into the function that \p MovePos lives in, right before \p MovePos.
+  void moveBefore(BasicBlock *MovePos);
+
+  /// \brief Unlink this basic block from its current function and insert it
+  /// right after \p MovePos in the function \p MovePos lives in.
+  void moveAfter(BasicBlock *MovePos);
+
+  /// \brief Insert unlinked basic block into a function.
+  ///
+  /// Inserts an unlinked basic block into \c Parent.  If \c InsertBefore is
+  /// provided, inserts before that basic block, otherwise inserts at the end.
+  ///
+  /// \pre \a getParent() is \c nullptr.
+  void insertInto(Function *Parent, BasicBlock *InsertBefore = nullptr);
+
+  /// \brief Return the predecessor of this block if it has a single predecessor
+  /// block. Otherwise return a null pointer.
+  const BasicBlock *getSinglePredecessor() const;
+  BasicBlock *getSinglePredecessor() {
+    return const_cast<BasicBlock *>(
+                 static_cast<const BasicBlock *>(this)->getSinglePredecessor());
+  }
+
+  /// \brief Return the predecessor of this block if it has a unique predecessor
+  /// block. Otherwise return a null pointer.
+  ///
+  /// Note that unique predecessor doesn't mean single edge, there can be
+  /// multiple edges from the unique predecessor to this block (for example a
+  /// switch statement with multiple cases having the same destination).
+  const BasicBlock *getUniquePredecessor() const;
+  BasicBlock *getUniquePredecessor() {
+    return const_cast<BasicBlock *>(
+                 static_cast<const BasicBlock *>(this)->getUniquePredecessor());
+  }
+
+  /// \brief Return the successor of this block if it has a single successor.
+  /// Otherwise return a null pointer.
+  ///
+  /// This method is analogous to getSinglePredecessor above.
+  const BasicBlock *getSingleSuccessor() const;
+  BasicBlock *getSingleSuccessor() {
+    return const_cast<BasicBlock *>(
+                   static_cast<const BasicBlock *>(this)->getSingleSuccessor());
+  }
+
+  /// \brief Return the successor of this block if it has a unique successor.
+  /// Otherwise return a null pointer.
+  ///
+  /// This method is analogous to getUniquePredecessor above.
+  const BasicBlock *getUniqueSuccessor() const;
+  BasicBlock *getUniqueSuccessor() {
+    return const_cast<BasicBlock *>(
+                   static_cast<const BasicBlock *>(this)->getUniqueSuccessor());
+  }
+
+  //===--------------------------------------------------------------------===//
+  /// Instruction iterator methods
+  ///
+  inline iterator                begin()       { return InstList.begin(); }
+  inline const_iterator          begin() const { return InstList.begin(); }
+  inline iterator                end  ()       { return InstList.end();   }
+  inline const_iterator          end  () const { return InstList.end();   }
+
+  inline reverse_iterator        rbegin()       { return InstList.rbegin(); }
+  inline const_reverse_iterator  rbegin() const { return InstList.rbegin(); }
+  inline reverse_iterator        rend  ()       { return InstList.rend();   }
+  inline const_reverse_iterator  rend  () const { return InstList.rend();   }
+
+  inline size_t                   size() const { return InstList.size();  }
+  inline bool                    empty() const { return InstList.empty(); }
+  inline const Instruction      &front() const { return InstList.front(); }
+  inline       Instruction      &front()       { return InstList.front(); }
+  inline const Instruction       &back() const { return InstList.back();  }
+  inline       Instruction       &back()       { return InstList.back();  }
+
+  /// Iterator to walk just the phi nodes in the basic block.
+  template <typename PHINodeT = PHINode, typename BBIteratorT = iterator>
+  class phi_iterator_impl
+      : public iterator_facade_base<phi_iterator_impl<PHINodeT, BBIteratorT>,
+                                    std::forward_iterator_tag, PHINodeT> {
+    friend BasicBlock;
+
+    PHINodeT *PN;
+
+    phi_iterator_impl(PHINodeT *PN) : PN(PN) {}
+
+  public:
+    // Allow default construction to build variables, but this doesn't build
+    // a useful iterator.
+    phi_iterator_impl() = default;
+
+    // Allow conversion between instantiations where valid.
+    template <typename PHINodeU, typename BBIteratorU>
+    phi_iterator_impl(const phi_iterator_impl<PHINodeU, BBIteratorU> &Arg)
+        : PN(Arg.PN) {}
+
+    bool operator==(const phi_iterator_impl &Arg) const { return PN == Arg.PN; }
+
+    PHINodeT &operator*() const { return *PN; }
+
+    using phi_iterator_impl::iterator_facade_base::operator++;
+    phi_iterator_impl &operator++() {
+      assert(PN && "Cannot increment the end iterator!");
+      PN = dyn_cast<PHINodeT>(std::next(BBIteratorT(PN)));
+      return *this;
+    }
+  };
+  using phi_iterator = phi_iterator_impl<>;
+  using const_phi_iterator =
+      phi_iterator_impl<const PHINode, BasicBlock::const_iterator>;
+
+  /// Returns a range that iterates over the phis in the basic block.
+  ///
+  /// Note that this cannot be used with basic blocks that have no terminator.
+  iterator_range<const_phi_iterator> phis() const {
+    return const_cast<BasicBlock *>(this)->phis();
+  }
+  iterator_range<phi_iterator> phis();
+
+  /// \brief Return the underlying instruction list container.
+  ///
+  /// Currently you need to access the underlying instruction list container
+  /// directly if you want to modify it.
+  const InstListType &getInstList() const { return InstList; }
+        InstListType &getInstList()       { return InstList; }
+
+  /// \brief Returns a pointer to a member of the instruction list.
+  static InstListType BasicBlock::*getSublistAccess(Instruction*) {
+    return &BasicBlock::InstList;
+  }
+
+  /// \brief Returns a pointer to the symbol table if one exists.
+  ValueSymbolTable *getValueSymbolTable();
+
+  /// \brief Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::BasicBlockVal;
+  }
+
+  /// \brief Cause all subinstructions to "let go" of all the references that
+  /// said subinstructions are maintaining.
+  ///
+  /// This allows one to 'delete' a whole class at a time, even though there may
+  /// be circular references... first all references are dropped, and all use
+  /// counts go to zero.  Then everything is delete'd for real.  Note that no
+  /// operations are valid on an object that has "dropped all references",
+  /// except operator delete.
+  void dropAllReferences();
+
+  /// \brief Notify the BasicBlock that the predecessor \p Pred is no longer
+  /// able to reach it.
+  ///
+  /// This is actually not used to update the Predecessor list, but is actually
+  /// used to update the PHI nodes that reside in the block.  Note that this
+  /// should be called while the predecessor still refers to this block.
+  void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs = false);
+
+  bool canSplitPredecessors() const;
+
+  /// \brief Split the basic block into two basic blocks at the specified
+  /// instruction.
+  ///
+  /// Note that all instructions BEFORE the specified iterator stay as part of
+  /// the original basic block, an unconditional branch is added to the original
+  /// BB, and the rest of the instructions in the BB are moved to the new BB,
+  /// including the old terminator.  The newly formed BasicBlock is returned.
+  /// This function invalidates the specified iterator.
+  ///
+  /// Note that this only works on well formed basic blocks (must have a
+  /// terminator), and 'I' must not be the end of instruction list (which would
+  /// cause a degenerate basic block to be formed, having a terminator inside of
+  /// the basic block).
+  ///
+  /// Also note that this doesn't preserve any passes. To split blocks while
+  /// keeping loop information consistent, use the SplitBlock utility function.
+  BasicBlock *splitBasicBlock(iterator I, const Twine &BBName = "");
+  BasicBlock *splitBasicBlock(Instruction *I, const Twine &BBName = "") {
+    return splitBasicBlock(I->getIterator(), BBName);
+  }
+
+  /// \brief Returns true if there are any uses of this basic block other than
+  /// direct branches, switches, etc. to it.
+  bool hasAddressTaken() const { return getSubclassDataFromValue() != 0; }
+
+  /// \brief Update all phi nodes in this basic block's successors to refer to
+  /// basic block \p New instead of to it.
+  void replaceSuccessorsPhiUsesWith(BasicBlock *New);
+
+  /// \brief Return true if this basic block is an exception handling block.
+  bool isEHPad() const { return getFirstNonPHI()->isEHPad(); }
+
+  /// \brief Return true if this basic block is a landing pad.
+  ///
+  /// Being a ``landing pad'' means that the basic block is the destination of
+  /// the 'unwind' edge of an invoke instruction.
+  bool isLandingPad() const;
+
+  /// \brief Return the landingpad instruction associated with the landing pad.
+  const LandingPadInst *getLandingPadInst() const;
+  LandingPadInst *getLandingPadInst() {
+    return const_cast<LandingPadInst *>(
+                    static_cast<const BasicBlock *>(this)->getLandingPadInst());
+  }
+
+  /// \brief Return true if it is legal to hoist instructions into this block.
+  bool isLegalToHoistInto() const;
+
+  Optional<uint64_t> getIrrLoopHeaderWeight() const;
+
+private:
+  /// \brief Increment the internal refcount of the number of BlockAddresses
+  /// referencing this BasicBlock by \p Amt.
+  ///
+  /// This is almost always 0, sometimes one possibly, but almost never 2, and
+  /// inconceivably 3 or more.
+  void AdjustBlockAddressRefCount(int Amt) {
+    setValueSubclassData(getSubclassDataFromValue()+Amt);
+    assert((int)(signed char)getSubclassDataFromValue() >= 0 &&
+           "Refcount wrap-around");
+  }
+
+  /// \brief Shadow Value::setValueSubclassData with a private forwarding method
+  /// so that any future subclasses cannot accidentally use it.
+  void setValueSubclassData(unsigned short D) {
+    Value::setValueSubclassData(D);
+  }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_BASICBLOCK_H
diff --git a/linux-x64/clang/include/llvm/IR/CFG.h b/linux-x64/clang/include/llvm/IR/CFG.h
new file mode 100644
index 0000000..e259e42
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/CFG.h
@@ -0,0 +1,266 @@
+//===- CFG.h - Process LLVM structures as graphs ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines specializations of GraphTraits that allow Function and
+// BasicBlock graphs to be treated as proper graphs for generic algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CFG_H
+#define LLVM_IR_CFG_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// BasicBlock pred_iterator definition
+//===----------------------------------------------------------------------===//
+
+template <class Ptr, class USE_iterator> // Predecessor Iterator
+class PredIterator : public std::iterator<std::forward_iterator_tag,
+                                          Ptr, ptrdiff_t, Ptr*, Ptr*> {
+  using super =
+      std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*, Ptr*>;
+  using Self = PredIterator<Ptr, USE_iterator>;
+  USE_iterator It;
+
+  inline void advancePastNonTerminators() {
+    // Loop to ignore non-terminator uses (for example BlockAddresses).
+    while (!It.atEnd() && !isa<TerminatorInst>(*It))
+      ++It;
+  }
+
+public:
+  using pointer = typename super::pointer;
+  using reference = typename super::reference;
+
+  PredIterator() = default;
+  explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
+    advancePastNonTerminators();
+  }
+  inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {}
+
+  inline bool operator==(const Self& x) const { return It == x.It; }
+  inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+  inline reference operator*() const {
+    assert(!It.atEnd() && "pred_iterator out of range!");
+    return cast<TerminatorInst>(*It)->getParent();
+  }
+  inline pointer *operator->() const { return &operator*(); }
+
+  inline Self& operator++() {   // Preincrement
+    assert(!It.atEnd() && "pred_iterator out of range!");
+    ++It; advancePastNonTerminators();
+    return *this;
+  }
+
+  inline Self operator++(int) { // Postincrement
+    Self tmp = *this; ++*this; return tmp;
+  }
+
+  /// getOperandNo - Return the operand number in the predecessor's
+  /// terminator of the successor.
+  unsigned getOperandNo() const {
+    return It.getOperandNo();
+  }
+
+  /// getUse - Return the operand Use in the predecessor's terminator
+  /// of the successor.
+  Use &getUse() const {
+    return It.getUse();
+  }
+};
+
+using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
+using const_pred_iterator =
+    PredIterator<const BasicBlock, Value::const_user_iterator>;
+using pred_range = iterator_range<pred_iterator>;
+using pred_const_range = iterator_range<const_pred_iterator>;
+
+inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
+inline const_pred_iterator pred_begin(const BasicBlock *BB) {
+  return const_pred_iterator(BB);
+}
+inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);}
+inline const_pred_iterator pred_end(const BasicBlock *BB) {
+  return const_pred_iterator(BB, true);
+}
+inline bool pred_empty(const BasicBlock *BB) {
+  return pred_begin(BB) == pred_end(BB);
+}
+inline pred_range predecessors(BasicBlock *BB) {
+  return pred_range(pred_begin(BB), pred_end(BB));
+}
+inline pred_const_range predecessors(const BasicBlock *BB) {
+  return pred_const_range(pred_begin(BB), pred_end(BB));
+}
+
+//===----------------------------------------------------------------------===//
+// BasicBlock succ_iterator helpers
+//===----------------------------------------------------------------------===//
+
+using succ_iterator =
+    TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>;
+using succ_const_iterator =
+    TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>;
+using succ_range = iterator_range<succ_iterator>;
+using succ_const_range = iterator_range<succ_const_iterator>;
+
+inline succ_iterator succ_begin(BasicBlock *BB) {
+  return succ_iterator(BB->getTerminator());
+}
+inline succ_const_iterator succ_begin(const BasicBlock *BB) {
+  return succ_const_iterator(BB->getTerminator());
+}
+inline succ_iterator succ_end(BasicBlock *BB) {
+  return succ_iterator(BB->getTerminator(), true);
+}
+inline succ_const_iterator succ_end(const BasicBlock *BB) {
+  return succ_const_iterator(BB->getTerminator(), true);
+}
+inline bool succ_empty(const BasicBlock *BB) {
+  return succ_begin(BB) == succ_end(BB);
+}
+inline succ_range successors(BasicBlock *BB) {
+  return succ_range(succ_begin(BB), succ_end(BB));
+}
+inline succ_const_range successors(const BasicBlock *BB) {
+  return succ_const_range(succ_begin(BB), succ_end(BB));
+}
+
+template <typename T, typename U>
+struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
+  static const bool value = isPodLike<T>::value;
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks...
+
+template <> struct GraphTraits<BasicBlock*> {
+  using NodeRef = BasicBlock *;
+  using ChildIteratorType = succ_iterator;
+
+  static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
+  static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
+  static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
+};
+
+template <> struct GraphTraits<const BasicBlock*> {
+  using NodeRef = const BasicBlock *;
+  using ChildIteratorType = succ_const_iterator;
+
+  static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
+
+  static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
+  static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
+};
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order.  Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<BasicBlock*>> {
+  using NodeRef = BasicBlock *;
+  using ChildIteratorType = pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
+  static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
+  static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
+};
+
+template <> struct GraphTraits<Inverse<const BasicBlock*>> {
+  using NodeRef = const BasicBlock *;
+  using ChildIteratorType = const_pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
+  static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
+  static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... these are the same as the basic block iterators,
+// except that the root node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
+  static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator = pointer_iterator<Function::iterator>;
+
+  static nodes_iterator nodes_begin(Function *F) {
+    return nodes_iterator(F->begin());
+  }
+
+  static nodes_iterator nodes_end(Function *F) {
+    return nodes_iterator(F->end());
+  }
+
+  static size_t size(Function *F) { return F->size(); }
+};
+template <> struct GraphTraits<const Function*> :
+  public GraphTraits<const BasicBlock*> {
+  static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator = pointer_iterator<Function::const_iterator>;
+
+  static nodes_iterator nodes_begin(const Function *F) {
+    return nodes_iterator(F->begin());
+  }
+
+  static nodes_iterator nodes_end(const Function *F) {
+    return nodes_iterator(F->end());
+  }
+
+  static size_t size(const Function *F) { return F->size(); }
+};
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order.  Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<Function*>> :
+  public GraphTraits<Inverse<BasicBlock*>> {
+  static NodeRef getEntryNode(Inverse<Function *> G) {
+    return &G.Graph->getEntryBlock();
+  }
+};
+template <> struct GraphTraits<Inverse<const Function*>> :
+  public GraphTraits<Inverse<const BasicBlock*>> {
+  static NodeRef getEntryNode(Inverse<const Function *> G) {
+    return &G.Graph->getEntryBlock();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CFG_H
diff --git a/linux-x64/clang/include/llvm/IR/CallSite.h b/linux-x64/clang/include/llvm/IR/CallSite.h
new file mode 100644
index 0000000..5b10da8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/CallSite.h
@@ -0,0 +1,725 @@
+//===- CallSite.h - Abstract Call & Invoke instrs ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CallSite class, which is a handy wrapper for code that
+// wants to treat Call and Invoke instructions in a generic way. When in non-
+// mutation context (e.g. an analysis) ImmutableCallSite should be used.
+// Finally, when some degree of customization is necessary between these two
+// extremes, CallSiteBase<> can be supplied with fine-tuned parameters.
+//
+// NOTE: These classes are supposed to have "value semantics". So they should be
+// passed by value, not by reference; they should not be "new"ed or "delete"d.
+// They are efficiently copyable, assignable and constructable, with cost
+// equivalent to copying a pointer (notice that they have only a single data
+// member). The internal representation carries a flag which indicates which of
+// the two variants is enclosed. This allows for cheaper checks when various
+// accessors of CallSite are employed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CALLSITE_H
+#define LLVM_IR_CALLSITE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+
+namespace llvm {
+
+namespace Intrinsic {
+enum ID : unsigned;
+}
+
+template <typename FunTy = const Function,
+          typename BBTy = const BasicBlock,
+          typename ValTy = const Value,
+          typename UserTy = const User,
+          typename UseTy = const Use,
+          typename InstrTy = const Instruction,
+          typename CallTy = const CallInst,
+          typename InvokeTy = const InvokeInst,
+          typename IterTy = User::const_op_iterator>
+class CallSiteBase {
+protected:
+  PointerIntPair<InstrTy*, 1, bool> I;
+
+  CallSiteBase() = default;
+  CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); }
+  CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); }
+  explicit CallSiteBase(ValTy *II) { *this = get(II); }
+
+private:
+  /// This static method is like a constructor. It will create an appropriate
+  /// call site for a Call or Invoke instruction, but it can also create a null
+  /// initialized CallSiteBase object for something which is NOT a call site.
+  static CallSiteBase get(ValTy *V) {
+    if (InstrTy *II = dyn_cast<InstrTy>(V)) {
+      if (II->getOpcode() == Instruction::Call)
+        return CallSiteBase(static_cast<CallTy*>(II));
+      else if (II->getOpcode() == Instruction::Invoke)
+        return CallSiteBase(static_cast<InvokeTy*>(II));
+    }
+    return CallSiteBase();
+  }
+
+public:
+  /// Return true if a CallInst is enclosed. Note that !isCall() does not mean
+  /// an InvokeInst is enclosed. It may also signify a NULL instruction pointer.
+  bool isCall() const { return I.getInt(); }
+
+  /// Return true if a InvokeInst is enclosed.
+  bool isInvoke() const { return getInstruction() && !I.getInt(); }
+
+  InstrTy *getInstruction() const { return I.getPointer(); }
+  InstrTy *operator->() const { return I.getPointer(); }
+  explicit operator bool() const { return I.getPointer(); }
+
+  /// Get the basic block containing the call site.
+  BBTy* getParent() const { return getInstruction()->getParent(); }
+
+  /// Return the pointer to function that is being called.
+  ValTy *getCalledValue() const {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    return *getCallee();
+  }
+
+  /// Return the function being called if this is a direct call, otherwise
+  /// return null (if it's an indirect call).
+  FunTy *getCalledFunction() const {
+    return dyn_cast<FunTy>(getCalledValue());
+  }
+
+  /// Return true if the callsite is an indirect call.
+  bool isIndirectCall() const {
+    const Value *V = getCalledValue();
+    if (!V)
+      return false;
+    if (isa<FunTy>(V) || isa<Constant>(V))
+      return false;
+    if (const CallInst *CI = dyn_cast<CallInst>(getInstruction())) {
+      if (CI->isInlineAsm())
+        return false;
+    }
+    return true;
+  }
+
+  /// Set the callee to the specified value.
+  void setCalledFunction(Value *V) {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    *getCallee() = V;
+  }
+
+  /// Return the intrinsic ID of the intrinsic called by this CallSite,
+  /// or Intrinsic::not_intrinsic if the called function is not an
+  /// intrinsic, or if this CallSite is an indirect call.
+  Intrinsic::ID getIntrinsicID() const {
+    if (auto *F = getCalledFunction())
+      return F->getIntrinsicID();
+    // Don't use Intrinsic::not_intrinsic, as it will require pulling
+    // Intrinsics.h into every header that uses CallSite.
+    return static_cast<Intrinsic::ID>(0);
+  }
+
+  /// Determine whether the passed iterator points to the callee operand's Use.
+  bool isCallee(Value::const_user_iterator UI) const {
+    return isCallee(&UI.getUse());
+  }
+
+  /// Determine whether this Use is the callee operand's Use.
+  bool isCallee(const Use *U) const { return getCallee() == U; }
+
+  /// Determine whether the passed iterator points to an argument operand.
+  bool isArgOperand(Value::const_user_iterator UI) const {
+    return isArgOperand(&UI.getUse());
+  }
+
+  /// Determine whether the passed use points to an argument operand.
+  bool isArgOperand(const Use *U) const {
+    assert(getInstruction() == U->getUser());
+    return arg_begin() <= U && U < arg_end();
+  }
+
+  /// Determine whether the passed iterator points to a bundle operand.
+  bool isBundleOperand(Value::const_user_iterator UI) const {
+    return isBundleOperand(&UI.getUse());
+  }
+
+  /// Determine whether the passed use points to a bundle operand.
+  bool isBundleOperand(const Use *U) const {
+    assert(getInstruction() == U->getUser());
+    if (!hasOperandBundles())
+      return false;
+    unsigned OperandNo = U - (*this)->op_begin();
+    return getBundleOperandsStartIndex() <= OperandNo &&
+           OperandNo < getBundleOperandsEndIndex();
+  }
+
+  /// Determine whether the passed iterator points to a data operand.
+  bool isDataOperand(Value::const_user_iterator UI) const {
+    return isDataOperand(&UI.getUse());
+  }
+
+  /// Determine whether the passed use points to a data operand.
+  bool isDataOperand(const Use *U) const {
+    return data_operands_begin() <= U && U < data_operands_end();
+  }
+
+  ValTy *getArgument(unsigned ArgNo) const {
+    assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
+    return *(arg_begin() + ArgNo);
+  }
+
+  void setArgument(unsigned ArgNo, Value* newVal) {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
+    getInstruction()->setOperand(ArgNo, newVal);
+  }
+
+  /// Given a value use iterator, returns the argument that corresponds to it.
+  /// Iterator must actually correspond to an argument.
+  unsigned getArgumentNo(Value::const_user_iterator I) const {
+    return getArgumentNo(&I.getUse());
+  }
+
+  /// Given a use for an argument, get the argument number that corresponds to
+  /// it.
+  unsigned getArgumentNo(const Use *U) const {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    assert(isArgOperand(U) && "Argument # out of range!");
+    return U - arg_begin();
+  }
+
+  /// The type of iterator to use when looping over actual arguments at this
+  /// call site.
+  using arg_iterator = IterTy;
+
+  iterator_range<IterTy> args() const {
+    return make_range(arg_begin(), arg_end());
+  }
+  bool arg_empty() const { return arg_end() == arg_begin(); }
+  unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); }
+
+  /// Given a value use iterator, return the data operand corresponding to it.
+  /// Iterator must actually correspond to a data operand.
+  unsigned getDataOperandNo(Value::const_user_iterator UI) const {
+    return getDataOperandNo(&UI.getUse());
+  }
+
+  /// Given a use for a data operand, get the data operand number that
+  /// corresponds to it.
+  unsigned getDataOperandNo(const Use *U) const {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    assert(isDataOperand(U) && "Data operand # out of range!");
+    return U - data_operands_begin();
+  }
+
+  /// Type of iterator to use when looping over data operands at this call site
+  /// (see below).
+  using data_operand_iterator = IterTy;
+
+  /// data_operands_begin/data_operands_end - Return iterators iterating over
+  /// the call / invoke argument list and bundle operands.  For invokes, this is
+  /// the set of instruction operands except the invoke target and the two
+  /// successor blocks; and for calls this is the set of instruction operands
+  /// except the call target.
+
+  IterTy data_operands_begin() const {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    return (*this)->op_begin();
+  }
+  IterTy data_operands_end() const {
+    assert(getInstruction() && "Not a call or invoke instruction!");
+    return (*this)->op_end() - (isCall() ? 1 : 3);
+  }
+  iterator_range<IterTy> data_ops() const {
+    return make_range(data_operands_begin(), data_operands_end());
+  }
+  bool data_operands_empty() const {
+    return data_operands_end() == data_operands_begin();
+  }
+  unsigned data_operands_size() const {
+    return std::distance(data_operands_begin(), data_operands_end());
+  }
+
+  /// Return the type of the instruction that generated this call site.
+  Type *getType() const { return (*this)->getType(); }
+
+  /// Return the caller function for this call site.
+  FunTy *getCaller() const { return (*this)->getParent()->getParent(); }
+
+  /// Tests if this call site must be tail call optimized. Only a CallInst can
+  /// be tail call optimized.
+  bool isMustTailCall() const {
+    return isCall() && cast<CallInst>(getInstruction())->isMustTailCall();
+  }
+
+  /// Tests if this call site is marked as a tail call.
+  bool isTailCall() const {
+    return isCall() && cast<CallInst>(getInstruction())->isTailCall();
+  }
+
+#define CALLSITE_DELEGATE_GETTER(METHOD) \
+  InstrTy *II = getInstruction();    \
+  return isCall()                        \
+    ? cast<CallInst>(II)->METHOD         \
+    : cast<InvokeInst>(II)->METHOD
+
+#define CALLSITE_DELEGATE_SETTER(METHOD) \
+  InstrTy *II = getInstruction();    \
+  if (isCall())                          \
+    cast<CallInst>(II)->METHOD;          \
+  else                                   \
+    cast<InvokeInst>(II)->METHOD
+
+  unsigned getNumArgOperands() const {
+    CALLSITE_DELEGATE_GETTER(getNumArgOperands());
+  }
+
+  ValTy *getArgOperand(unsigned i) const {
+    CALLSITE_DELEGATE_GETTER(getArgOperand(i));
+  }
+
+  ValTy *getReturnedArgOperand() const {
+    CALLSITE_DELEGATE_GETTER(getReturnedArgOperand());
+  }
+
+  bool isInlineAsm() const {
+    if (isCall())
+      return cast<CallInst>(getInstruction())->isInlineAsm();
+    return false;
+  }
+
+  /// Get the calling convention of the call.
+  CallingConv::ID getCallingConv() const {
+    CALLSITE_DELEGATE_GETTER(getCallingConv());
+  }
+  /// Set the calling convention of the call.
+  void setCallingConv(CallingConv::ID CC) {
+    CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
+  }
+
+  FunctionType *getFunctionType() const {
+    CALLSITE_DELEGATE_GETTER(getFunctionType());
+  }
+
+  void mutateFunctionType(FunctionType *Ty) const {
+    CALLSITE_DELEGATE_SETTER(mutateFunctionType(Ty));
+  }
+
+  /// Get the parameter attributes of the call.
+  AttributeList getAttributes() const {
+    CALLSITE_DELEGATE_GETTER(getAttributes());
+  }
+  /// Set the parameter attributes of the call.
+  void setAttributes(AttributeList PAL) {
+    CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
+  }
+
+  void addAttribute(unsigned i, Attribute::AttrKind Kind) {
+    CALLSITE_DELEGATE_SETTER(addAttribute(i, Kind));
+  }
+
+  void addAttribute(unsigned i, Attribute Attr) {
+    CALLSITE_DELEGATE_SETTER(addAttribute(i, Attr));
+  }
+
+  void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+    CALLSITE_DELEGATE_SETTER(addParamAttr(ArgNo, Kind));
+  }
+
+  void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
+    CALLSITE_DELEGATE_SETTER(removeAttribute(i, Kind));
+  }
+
+  void removeAttribute(unsigned i, StringRef Kind) {
+    CALLSITE_DELEGATE_SETTER(removeAttribute(i, Kind));
+  }
+
+  void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+    CALLSITE_DELEGATE_SETTER(removeParamAttr(ArgNo, Kind));
+  }
+
+  /// Return true if this function has the given attribute.
+  bool hasFnAttr(Attribute::AttrKind Kind) const {
+    CALLSITE_DELEGATE_GETTER(hasFnAttr(Kind));
+  }
+
+  /// Return true if this function has the given attribute.
+  bool hasFnAttr(StringRef Kind) const {
+    CALLSITE_DELEGATE_GETTER(hasFnAttr(Kind));
+  }
+
+  /// Return true if this return value has the given attribute.
+  bool hasRetAttr(Attribute::AttrKind Kind) const {
+    CALLSITE_DELEGATE_GETTER(hasRetAttr(Kind));
+  }
+
+  /// Return true if the call or the callee has the given attribute.
+  bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+    CALLSITE_DELEGATE_GETTER(paramHasAttr(ArgNo, Kind));
+  }
+
+  Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
+    CALLSITE_DELEGATE_GETTER(getAttribute(i, Kind));
+  }
+
+  Attribute getAttribute(unsigned i, StringRef Kind) const {
+    CALLSITE_DELEGATE_GETTER(getAttribute(i, Kind));
+  }
+
+  /// Return true if the data operand at index \p i directly or indirectly has
+  /// the attribute \p A.
+  ///
+  /// Normal call or invoke arguments have per operand attributes, as specified
+  /// in the attribute set attached to this instruction, while operand bundle
+  /// operands may have some attributes implied by the type of its containing
+  /// operand bundle.
+  bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
+    CALLSITE_DELEGATE_GETTER(dataOperandHasImpliedAttr(i, Kind));
+  }
+
+  /// Extract the alignment of the return value.
+  unsigned getRetAlignment() const {
+    CALLSITE_DELEGATE_GETTER(getRetAlignment());
+  }
+
+  /// Extract the alignment for a call or parameter (0=unknown).
+  unsigned getParamAlignment(unsigned ArgNo) const {
+    CALLSITE_DELEGATE_GETTER(getParamAlignment(ArgNo));
+  }
+
+  /// Extract the number of dereferenceable bytes for a call or parameter
+  /// (0=unknown).
+  uint64_t getDereferenceableBytes(unsigned i) const {
+    CALLSITE_DELEGATE_GETTER(getDereferenceableBytes(i));
+  }
+
+  /// Extract the number of dereferenceable_or_null bytes for a call or
+  /// parameter (0=unknown).
+  uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+    CALLSITE_DELEGATE_GETTER(getDereferenceableOrNullBytes(i));
+  }
+
+  /// Determine if the return value is marked with NoAlias attribute.
+  bool returnDoesNotAlias() const {
+    CALLSITE_DELEGATE_GETTER(returnDoesNotAlias());
+  }
+
+  /// Return true if the call should not be treated as a call to a builtin.
+  bool isNoBuiltin() const {
+    CALLSITE_DELEGATE_GETTER(isNoBuiltin());
+  }
+
+  /// Return true if the call requires strict floating point semantics.
+  bool isStrictFP() const {
+    CALLSITE_DELEGATE_GETTER(isStrictFP());
+  }
+
+  /// Return true if the call should not be inlined.
+  bool isNoInline() const {
+    CALLSITE_DELEGATE_GETTER(isNoInline());
+  }
+  void setIsNoInline(bool Value = true) {
+    CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
+  }
+
+  /// Determine if the call does not access memory.
+  bool doesNotAccessMemory() const {
+    CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
+  }
+  void setDoesNotAccessMemory() {
+    CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory());
+  }
+
+  /// Determine if the call does not access or only reads memory.
+  bool onlyReadsMemory() const {
+    CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
+  }
+  void setOnlyReadsMemory() {
+    CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory());
+  }
+
+  /// Determine if the call does not access or only writes memory.
+  bool doesNotReadMemory() const {
+    CALLSITE_DELEGATE_GETTER(doesNotReadMemory());
+  }
+  void setDoesNotReadMemory() {
+    CALLSITE_DELEGATE_SETTER(setDoesNotReadMemory());
+  }
+
+  /// Determine if the call can access memmory only using pointers based
+  /// on its arguments.
+  bool onlyAccessesArgMemory() const {
+    CALLSITE_DELEGATE_GETTER(onlyAccessesArgMemory());
+  }
+  void setOnlyAccessesArgMemory() {
+    CALLSITE_DELEGATE_SETTER(setOnlyAccessesArgMemory());
+  }
+
+  /// Determine if the function may only access memory that is
+  /// inaccessible from the IR.
+  bool onlyAccessesInaccessibleMemory() const {
+    CALLSITE_DELEGATE_GETTER(onlyAccessesInaccessibleMemory());
+  }
+  void setOnlyAccessesInaccessibleMemory() {
+    CALLSITE_DELEGATE_SETTER(setOnlyAccessesInaccessibleMemory());
+  }
+
+  /// Determine if the function may only access memory that is
+  /// either inaccessible from the IR or pointed to by its arguments.
+  bool onlyAccessesInaccessibleMemOrArgMem() const {
+    CALLSITE_DELEGATE_GETTER(onlyAccessesInaccessibleMemOrArgMem());
+  }
+  void setOnlyAccessesInaccessibleMemOrArgMem() {
+    CALLSITE_DELEGATE_SETTER(setOnlyAccessesInaccessibleMemOrArgMem());
+  }
+
+  /// Determine if the call cannot return.
+  bool doesNotReturn() const {
+    CALLSITE_DELEGATE_GETTER(doesNotReturn());
+  }
+  void setDoesNotReturn() {
+    CALLSITE_DELEGATE_SETTER(setDoesNotReturn());
+  }
+
+  /// Determine if the call cannot unwind.
+  bool doesNotThrow() const {
+    CALLSITE_DELEGATE_GETTER(doesNotThrow());
+  }
+  void setDoesNotThrow() {
+    CALLSITE_DELEGATE_SETTER(setDoesNotThrow());
+  }
+
+  /// Determine if the call can be duplicated.
+  bool cannotDuplicate() const {
+    CALLSITE_DELEGATE_GETTER(cannotDuplicate());
+  }
+  void setCannotDuplicate() {
+    CALLSITE_DELEGATE_SETTER(setCannotDuplicate());
+  }
+
+  /// Determine if the call is convergent.
+  bool isConvergent() const {
+    CALLSITE_DELEGATE_GETTER(isConvergent());
+  }
+  void setConvergent() {
+    CALLSITE_DELEGATE_SETTER(setConvergent());
+  }
+  void setNotConvergent() {
+    CALLSITE_DELEGATE_SETTER(setNotConvergent());
+  }
+
+  unsigned getNumOperandBundles() const {
+    CALLSITE_DELEGATE_GETTER(getNumOperandBundles());
+  }
+
+  bool hasOperandBundles() const {
+    CALLSITE_DELEGATE_GETTER(hasOperandBundles());
+  }
+
+  unsigned getBundleOperandsStartIndex() const {
+    CALLSITE_DELEGATE_GETTER(getBundleOperandsStartIndex());
+  }
+
+  unsigned getBundleOperandsEndIndex() const {
+    CALLSITE_DELEGATE_GETTER(getBundleOperandsEndIndex());
+  }
+
+  unsigned getNumTotalBundleOperands() const {
+    CALLSITE_DELEGATE_GETTER(getNumTotalBundleOperands());
+  }
+
+  OperandBundleUse getOperandBundleAt(unsigned Index) const {
+    CALLSITE_DELEGATE_GETTER(getOperandBundleAt(Index));
+  }
+
+  Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
+    CALLSITE_DELEGATE_GETTER(getOperandBundle(Name));
+  }
+
+  Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
+    CALLSITE_DELEGATE_GETTER(getOperandBundle(ID));
+  }
+
+  unsigned countOperandBundlesOfType(uint32_t ID) const {
+    CALLSITE_DELEGATE_GETTER(countOperandBundlesOfType(ID));
+  }
+
+  bool isBundleOperand(unsigned Idx) const {
+    CALLSITE_DELEGATE_GETTER(isBundleOperand(Idx));
+  }
+
+  IterTy arg_begin() const {
+    CALLSITE_DELEGATE_GETTER(arg_begin());
+  }
+
+  IterTy arg_end() const {
+    CALLSITE_DELEGATE_GETTER(arg_end());
+  }
+
+#undef CALLSITE_DELEGATE_GETTER
+#undef CALLSITE_DELEGATE_SETTER
+
+  void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
+    const Instruction *II = getInstruction();
+    // Since this is actually a getter that "looks like" a setter, don't use the
+    // above macros to avoid confusion.
+    if (isCall())
+      cast<CallInst>(II)->getOperandBundlesAsDefs(Defs);
+    else
+      cast<InvokeInst>(II)->getOperandBundlesAsDefs(Defs);
+  }
+
+  /// Determine whether this data operand is not captured.
+  bool doesNotCapture(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
+  }
+
+  /// Determine whether this argument is passed by value.
+  bool isByValArgument(unsigned ArgNo) const {
+    return paramHasAttr(ArgNo, Attribute::ByVal);
+  }
+
+  /// Determine whether this argument is passed in an alloca.
+  bool isInAllocaArgument(unsigned ArgNo) const {
+    return paramHasAttr(ArgNo, Attribute::InAlloca);
+  }
+
+  /// Determine whether this argument is passed by value or in an alloca.
+  bool isByValOrInAllocaArgument(unsigned ArgNo) const {
+    return paramHasAttr(ArgNo, Attribute::ByVal) ||
+           paramHasAttr(ArgNo, Attribute::InAlloca);
+  }
+
+  /// Determine if there are is an inalloca argument. Only the last argument can
+  /// have the inalloca attribute.
+  bool hasInAllocaArgument() const {
+    return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
+  }
+
+  bool doesNotAccessMemory(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+  }
+
+  bool onlyReadsMemory(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
+           dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+  }
+
+  bool doesNotReadMemory(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
+           dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+  }
+
+  /// Return true if the return value is known to be not null.
+  /// This may be because it has the nonnull attribute, or because at least
+  /// one byte is dereferenceable and the pointer is in addrspace(0).
+  bool isReturnNonNull() const {
+    if (hasRetAttr(Attribute::NonNull))
+      return true;
+    else if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
+             getType()->getPointerAddressSpace() == 0)
+      return true;
+
+    return false;
+  }
+
+  /// Returns true if this CallSite passes the given Value* as an argument to
+  /// the called function.
+  bool hasArgument(const Value *Arg) const {
+    for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E;
+         ++AI)
+      if (AI->get() == Arg)
+        return true;
+    return false;
+  }
+
+private:
+  IterTy getCallee() const {
+    if (isCall()) // Skip Callee
+      return cast<CallInst>(getInstruction())->op_end() - 1;
+    else // Skip BB, BB, Callee
+      return cast<InvokeInst>(getInstruction())->op_end() - 3;
+  }
+};
+
+class CallSite : public CallSiteBase<Function, BasicBlock, Value, User, Use,
+                                     Instruction, CallInst, InvokeInst,
+                                     User::op_iterator> {
+public:
+  CallSite() = default;
+  CallSite(CallSiteBase B) : CallSiteBase(B) {}
+  CallSite(CallInst *CI) : CallSiteBase(CI) {}
+  CallSite(InvokeInst *II) : CallSiteBase(II) {}
+  explicit CallSite(Instruction *II) : CallSiteBase(II) {}
+  explicit CallSite(Value *V) : CallSiteBase(V) {}
+
+  bool operator==(const CallSite &CS) const { return I == CS.I; }
+  bool operator!=(const CallSite &CS) const { return I != CS.I; }
+  bool operator<(const CallSite &CS) const {
+    return getInstruction() < CS.getInstruction();
+  }
+
+private:
+  friend struct DenseMapInfo<CallSite>;
+
+  User::op_iterator getCallee() const;
+};
+
+template <> struct DenseMapInfo<CallSite> {
+  using BaseInfo = DenseMapInfo<decltype(CallSite::I)>;
+
+  static CallSite getEmptyKey() {
+    CallSite CS;
+    CS.I = BaseInfo::getEmptyKey();
+    return CS;
+  }
+
+  static CallSite getTombstoneKey() {
+    CallSite CS;
+    CS.I = BaseInfo::getTombstoneKey();
+    return CS;
+  }
+
+  static unsigned getHashValue(const CallSite &CS) {
+    return BaseInfo::getHashValue(CS.I);
+  }
+
+  static bool isEqual(const CallSite &LHS, const CallSite &RHS) {
+    return LHS == RHS;
+  }
+};
+
+/// Establish a view to a call site for examination.
+class ImmutableCallSite : public CallSiteBase<> {
+public:
+  ImmutableCallSite() = default;
+  ImmutableCallSite(const CallInst *CI) : CallSiteBase(CI) {}
+  ImmutableCallSite(const InvokeInst *II) : CallSiteBase(II) {}
+  explicit ImmutableCallSite(const Instruction *II) : CallSiteBase(II) {}
+  explicit ImmutableCallSite(const Value *V) : CallSiteBase(V) {}
+  ImmutableCallSite(CallSite CS) : CallSiteBase(CS.getInstruction()) {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CALLSITE_H
diff --git a/linux-x64/clang/include/llvm/IR/CallingConv.h b/linux-x64/clang/include/llvm/IR/CallingConv.h
new file mode 100644
index 0000000..84fe836
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/CallingConv.h
@@ -0,0 +1,231 @@
+//===- llvm/CallingConv.h - LLVM Calling Conventions ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines LLVM's set of calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CALLINGCONV_H
+#define LLVM_IR_CALLINGCONV_H
+
+namespace llvm {
+
+/// CallingConv Namespace - This namespace contains an enum with a value for
+/// the well-known calling conventions.
+///
+namespace CallingConv {
+
+  /// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
+  using ID = unsigned;
+
+  /// A set of enums which specify the assigned numeric values for known llvm
+  /// calling conventions.
+  /// @brief LLVM Calling Convention Representation
+  enum {
+    /// C - The default llvm calling convention, compatible with C.  This
+    /// convention is the only calling convention that supports varargs calls.
+    /// As with typical C calling conventions, the callee/caller have to
+    /// tolerate certain amounts of prototype mismatch.
+    C = 0,
+
+    // Generic LLVM calling conventions.  None of these calling conventions
+    // support varargs calls, and all assume that the caller and callee
+    // prototype exactly match.
+
+    /// Fast - This calling convention attempts to make calls as fast as
+    /// possible (e.g. by passing things in registers).
+    Fast = 8,
+
+    // Cold - This calling convention attempts to make code in the caller as
+    // efficient as possible under the assumption that the call is not commonly
+    // executed.  As such, these calls often preserve all registers so that the
+    // call does not break any live ranges in the caller side.
+    Cold = 9,
+
+    // GHC - Calling convention used by the Glasgow Haskell Compiler (GHC).
+    GHC = 10,
+
+    // HiPE - Calling convention used by the High-Performance Erlang Compiler
+    // (HiPE).
+    HiPE = 11,
+
+    // WebKit JS - Calling convention for stack based JavaScript calls
+    WebKit_JS = 12,
+
+    // AnyReg - Calling convention for dynamic register based calls (e.g.
+    // stackmap and patchpoint intrinsics).
+    AnyReg = 13,
+
+    // PreserveMost - Calling convention for runtime calls that preserves most
+    // registers.
+    PreserveMost = 14,
+
+    // PreserveAll - Calling convention for runtime calls that preserves
+    // (almost) all registers.
+    PreserveAll = 15,
+
+    // Swift - Calling convention for Swift.
+    Swift = 16,
+
+    // CXX_FAST_TLS - Calling convention for access functions.
+    CXX_FAST_TLS = 17,
+
+    // Target - This is the start of the target-specific calling conventions,
+    // e.g. fastcall and thiscall on X86.
+    FirstTargetCC = 64,
+
+    /// X86_StdCall - stdcall is the calling conventions mostly used by the
+    /// Win32 API. It is basically the same as the C convention with the
+    /// difference in that the callee is responsible for popping the arguments
+    /// from the stack.
+    X86_StdCall = 64,
+
+    /// X86_FastCall - 'fast' analog of X86_StdCall. Passes first two arguments
+    /// in ECX:EDX registers, others - via stack. Callee is responsible for
+    /// stack cleaning.
+    X86_FastCall = 65,
+
+    /// ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete,
+    /// but still used on some targets).
+    ARM_APCS = 66,
+
+    /// ARM_AAPCS - ARM Architecture Procedure Calling Standard calling
+    /// convention (aka EABI). Soft float variant.
+    ARM_AAPCS = 67,
+
+    /// ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
+    ARM_AAPCS_VFP = 68,
+
+    /// MSP430_INTR - Calling convention used for MSP430 interrupt routines.
+    MSP430_INTR = 69,
+
+    /// X86_ThisCall - Similar to X86_StdCall. Passes first argument in ECX,
+    /// others via stack. Callee is responsible for stack cleaning. MSVC uses
+    /// this by default for methods in its ABI.
+    X86_ThisCall = 70,
+
+    /// PTX_Kernel - Call to a PTX kernel.
+    /// Passes all arguments in parameter space.
+    PTX_Kernel = 71,
+
+    /// PTX_Device - Call to a PTX device function.
+    /// Passes all arguments in register or parameter space.
+    PTX_Device = 72,
+
+    /// SPIR_FUNC - Calling convention for SPIR non-kernel device functions.
+    /// No lowering or expansion of arguments.
+    /// Structures are passed as a pointer to a struct with the byval attribute.
+    /// Functions can only call SPIR_FUNC and SPIR_KERNEL functions.
+    /// Functions can only have zero or one return values.
+    /// Variable arguments are not allowed, except for printf.
+    /// How arguments/return values are lowered are not specified.
+    /// Functions are only visible to the devices.
+    SPIR_FUNC = 75,
+
+    /// SPIR_KERNEL - Calling convention for SPIR kernel functions.
+    /// Inherits the restrictions of SPIR_FUNC, except
+    /// Cannot have non-void return values.
+    /// Cannot have variable arguments.
+    /// Can also be called by the host.
+    /// Is externally visible.
+    SPIR_KERNEL = 76,
+
+    /// Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins
+    Intel_OCL_BI = 77,
+
+    /// \brief The C convention as specified in the x86-64 supplement to the
+    /// System V ABI, used on most non-Windows systems.
+    X86_64_SysV = 78,
+
+    /// \brief The C convention as implemented on Windows/x86-64 and
+    /// AArch64. This convention differs from the more common
+    /// \c X86_64_SysV convention in a number of ways, most notably in
+    /// that XMM registers used to pass arguments are shadowed by GPRs,
+    /// and vice versa.
+    /// On AArch64, this is identical to the normal C (AAPCS) calling
+    /// convention for normal functions, but floats are passed in integer
+    /// registers to variadic functions.
+    Win64 = 79,
+
+    /// \brief MSVC calling convention that passes vectors and vector aggregates
+    /// in SSE registers.
+    X86_VectorCall = 80,
+
+    /// \brief Calling convention used by HipHop Virtual Machine (HHVM) to
+    /// perform calls to and from translation cache, and for calling PHP
+    /// functions.
+    /// HHVM calling convention supports tail/sibling call elimination.
+    HHVM = 81,
+
+    /// \brief HHVM calling convention for invoking C/C++ helpers.
+    HHVM_C = 82,
+
+    /// X86_INTR - x86 hardware interrupt context. Callee may take one or two
+    /// parameters, where the 1st represents a pointer to hardware context frame
+    /// and the 2nd represents hardware error code, the presence of the later
+    /// depends on the interrupt vector taken. Valid for both 32- and 64-bit
+    /// subtargets.
+    X86_INTR = 83,
+
+    /// Used for AVR interrupt routines.
+    AVR_INTR = 84,
+
+    /// Calling convention used for AVR signal routines.
+    AVR_SIGNAL = 85,
+
+    /// Calling convention used for special AVR rtlib functions
+    /// which have an "optimized" convention to preserve registers.
+    AVR_BUILTIN = 86,
+
+    /// Calling convention used for Mesa vertex shaders, or AMDPAL last shader
+    /// stage before rasterization (vertex shader if tessellation and geometry
+    /// are not in use, or otherwise copy shader if one is needed).
+    AMDGPU_VS = 87,
+
+    /// Calling convention used for Mesa/AMDPAL geometry shaders.
+    AMDGPU_GS = 88,
+
+    /// Calling convention used for Mesa/AMDPAL pixel shaders.
+    AMDGPU_PS = 89,
+
+    /// Calling convention used for Mesa/AMDPAL compute shaders.
+    AMDGPU_CS = 90,
+
+    /// Calling convention for AMDGPU code object kernels.
+    AMDGPU_KERNEL = 91,
+
+    /// Register calling convention used for parameters transfer optimization
+    X86_RegCall = 92,
+
+    /// Calling convention used for Mesa/AMDPAL hull shaders (= tessellation
+    /// control shaders).
+    AMDGPU_HS = 93,
+
+    /// Calling convention used for special MSP430 rtlib functions
+    /// which have an "optimized" convention using additional registers.
+    MSP430_BUILTIN = 94,
+
+    /// Calling convention used for AMDPAL vertex shader if tessellation is in
+    /// use.
+    AMDGPU_LS = 95,
+
+    /// Calling convention used for AMDPAL shader stage before geometry shader
+    /// if geometry is in use. So either the domain (= tessellation evaluation)
+    /// shader if tessellation is in use, or otherwise the vertex shader.
+    AMDGPU_ES = 96,
+
+    /// The highest possible calling convention ID. Must be some 2^k - 1.
+    MaxID = 1023
+  };
+
+} // end namespace CallingConv
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CALLINGCONV_H
diff --git a/linux-x64/clang/include/llvm/IR/Comdat.h b/linux-x64/clang/include/llvm/IR/Comdat.h
new file mode 100644
index 0000000..555121e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Comdat.h
@@ -0,0 +1,71 @@
+//===- llvm/IR/Comdat.h - Comdat definitions --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the declaration of the Comdat class, which represents a
+/// single COMDAT in LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_COMDAT_H
+#define LLVM_IR_COMDAT_H
+
+#include "llvm-c/Types.h"
+#include "llvm/Support/CBindingWrapping.h"
+
+namespace llvm {
+
+class raw_ostream;
+class StringRef;
+template <typename ValueTy> class StringMapEntry;
+
+// This is a Name X SelectionKind pair. The reason for having this be an
+// independent object instead of just adding the name and the SelectionKind
+// to a GlobalObject is that it is invalid to have two Comdats with the same
+// name but different SelectionKind. This structure makes that unrepresentable.
+class Comdat {
+public:
+  enum SelectionKind {
+    Any,          ///< The linker may choose any COMDAT.
+    ExactMatch,   ///< The data referenced by the COMDAT must be the same.
+    Largest,      ///< The linker will choose the largest COMDAT.
+    NoDuplicates, ///< No other Module may specify this COMDAT.
+    SameSize,     ///< The data referenced by the COMDAT must be the same size.
+  };
+
+  Comdat(const Comdat &) = delete;
+  Comdat(Comdat &&C);
+
+  SelectionKind getSelectionKind() const { return SK; }
+  void setSelectionKind(SelectionKind Val) { SK = Val; }
+  StringRef getName() const;
+  void print(raw_ostream &OS, bool IsForDebug = false) const;
+  void dump() const;
+
+private:
+  friend class Module;
+
+  Comdat();
+
+  // Points to the map in Module.
+  StringMapEntry<Comdat> *Name = nullptr;
+  SelectionKind SK = Any;
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Comdat, LLVMComdatRef)
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Comdat &C) {
+  C.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_COMDAT_H
diff --git a/linux-x64/clang/include/llvm/IR/Constant.h b/linux-x64/clang/include/llvm/IR/Constant.h
new file mode 100644
index 0000000..6048160
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Constant.h
@@ -0,0 +1,181 @@
+//===-- llvm/Constant.h - Constant class definition -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Constant class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANT_H
+#define LLVM_IR_CONSTANT_H
+
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+
+class APInt;
+
+/// This is an important base class in LLVM. It provides the common facilities
+/// of all constant values in an LLVM program. A constant is a value that is
+/// immutable at runtime. Functions are constants because their address is
+/// immutable. Same with global variables.
+///
+/// All constants share the capabilities provided in this class. All constants
+/// can have a null value. They can have an operand list. Constants can be
+/// simple (integer and floating point values), complex (arrays and structures),
+/// or expression based (computations yielding a constant value composed of
+/// only certain operators and other constant values).
+///
+/// Note that Constants are immutable (once created they never change)
+/// and are fully shared by structural equivalence.  This means that two
+/// structurally equivalent constants will always have the same address.
+/// Constants are created on demand as needed and never deleted: thus clients
+/// don't have to worry about the lifetime of the objects.
+/// @brief LLVM Constant Representation
+class Constant : public User {
+protected:
+  Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
+    : User(ty, vty, Ops, NumOps) {}
+
+public:
+  void operator=(const Constant &) = delete;
+  Constant(const Constant &) = delete;
+
+  /// Return true if this is the value that would be returned by getNullValue.
+  bool isNullValue() const;
+
+  /// Returns true if the value is one.
+  bool isOneValue() const;
+
+  /// Return true if this is the value that would be returned by
+  /// getAllOnesValue.
+  bool isAllOnesValue() const;
+
+  /// Return true if the value is what would be returned by
+  /// getZeroValueForNegation.
+  bool isNegativeZeroValue() const;
+
+  /// Return true if the value is negative zero or null value.
+  bool isZeroValue() const;
+
+  /// Return true if the value is not the smallest signed value.
+  bool isNotMinSignedValue() const;
+
+  /// Return true if the value is the smallest signed value.
+  bool isMinSignedValue() const;
+
+  /// Return true if this is a finite and non-zero floating-point scalar
+  /// constant or a vector constant with all finite and non-zero elements.
+  bool isFiniteNonZeroFP() const;
+
+  /// Return true if this is a normal (as opposed to denormal) floating-point
+  /// scalar constant or a vector constant with all normal elements.
+  bool isNormalFP() const;
+
+  /// Return true if this scalar has an exact multiplicative inverse or this
+  /// vector has an exact multiplicative inverse for each element in the vector.
+  bool hasExactInverseFP() const;
+
+  /// Return true if this is a floating-point NaN constant or a vector
+  /// floating-point constant with all NaN elements.
+  bool isNaN() const;
+
+  /// Return true if evaluation of this constant could trap. This is true for
+  /// things like constant expressions that could divide by zero.
+  bool canTrap() const;
+
+  /// Return true if the value can vary between threads.
+  bool isThreadDependent() const;
+
+  /// Return true if the value is dependent on a dllimport variable.
+  bool isDLLImportDependent() const;
+
+  /// Return true if the constant has users other than constant expressions and
+  /// other dangling things.
+  bool isConstantUsed() const;
+
+  /// This method classifies the entry according to whether or not it may
+  /// generate a relocation entry.  This must be conservative, so if it might
+  /// codegen to a relocatable entry, it should say so.
+  ///
+  /// FIXME: This really should not be in IR.
+  bool needsRelocation() const;
+
+  /// For aggregates (struct/array/vector) return the constant that corresponds
+  /// to the specified element if possible, or null if not. This can return null
+  /// if the element index is a ConstantExpr, or if 'this' is a constant expr.
+  Constant *getAggregateElement(unsigned Elt) const;
+  Constant *getAggregateElement(Constant *Elt) const;
+
+  /// If this is a splat vector constant, meaning that all of the elements have
+  /// the same value, return that value. Otherwise return 0.
+  Constant *getSplatValue() const;
+
+  /// If C is a constant integer then return its value, otherwise C must be a
+  /// vector of constant integers, all equal, and the common value is returned.
+  const APInt &getUniqueInteger() const;
+
+  /// Called if some element of this constant is no longer valid.
+  /// At this point only other constants may be on the use_list for this
+  /// constant.  Any constants on our Use list must also be destroy'd.  The
+  /// implementation must be sure to remove the constant from the list of
+  /// available cached constants.  Implementations should implement
+  /// destroyConstantImpl to remove constants from any pools/maps they are
+  /// contained it.
+  void destroyConstant();
+
+  //// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    static_assert(ConstantFirstVal == 0, "V->getValueID() >= ConstantFirstVal always succeeds");
+    return V->getValueID() <= ConstantLastVal;
+  }
+
+  /// This method is a special form of User::replaceUsesOfWith
+  /// (which does not work on constants) that does work
+  /// on constants.  Basically this method goes through the trouble of building
+  /// a new constant that is equivalent to the current one, with all uses of
+  /// From replaced with uses of To.  After this construction is completed, all
+  /// of the users of 'this' are replaced to use the new constant, and then
+  /// 'this' is deleted.  In general, you should not call this method, instead,
+  /// use Value::replaceAllUsesWith, which automatically dispatches to this
+  /// method as needed.
+  ///
+  void handleOperandChange(Value *, Value *);
+
+  static Constant *getNullValue(Type* Ty);
+
+  /// @returns the value for an integer or vector of integer constant of the
+  /// given type that has all its bits set to true.
+  /// @brief Get the all ones value
+  static Constant *getAllOnesValue(Type* Ty);
+
+  /// Return the value for an integer or pointer constant, or a vector thereof,
+  /// with the given scalar value.
+  static Constant *getIntegerValue(Type *Ty, const APInt &V);
+
+  /// If there are any dead constant users dangling off of this constant, remove
+  /// them. This method is useful for clients that want to check to see if a
+  /// global is unused, but don't want to deal with potentially dead constants
+  /// hanging off of the globals.
+  void removeDeadConstantUsers() const;
+
+  const Constant *stripPointerCasts() const {
+    return cast<Constant>(Value::stripPointerCasts());
+  }
+
+  Constant *stripPointerCasts() {
+    return const_cast<Constant*>(
+                      static_cast<const Constant *>(this)->stripPointerCasts());
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CONSTANT_H
diff --git a/linux-x64/clang/include/llvm/IR/ConstantFolder.h b/linux-x64/clang/include/llvm/IR/ConstantFolder.h
new file mode 100644
index 0000000..da5bba7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ConstantFolder.h
@@ -0,0 +1,277 @@
+//===- ConstantFolder.h - Constant folding helper ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ConstantFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for creating constants
+// with minimal folding.  For general constant creation and folding,
+// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTFOLDER_H
+#define LLVM_IR_CONSTANTFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+
+/// ConstantFolder - Create constants with minimum, target independent, folding.
+class ConstantFolder {
+public:
+  explicit ConstantFolder() = default;
+
+  //===--------------------------------------------------------------------===//
+  // Binary Operators
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateAdd(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
+  }
+
+  Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getFAdd(LHS, RHS);
+  }
+
+  Constant *CreateSub(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
+  }
+
+  Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getFSub(LHS, RHS);
+  }
+
+  Constant *CreateMul(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
+  }
+
+  Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getFMul(LHS, RHS);
+  }
+
+  Constant *CreateUDiv(Constant *LHS, Constant *RHS,
+                       bool isExact = false) const {
+    return ConstantExpr::getUDiv(LHS, RHS, isExact);
+  }
+
+  Constant *CreateSDiv(Constant *LHS, Constant *RHS,
+                       bool isExact = false) const {
+    return ConstantExpr::getSDiv(LHS, RHS, isExact);
+  }
+
+  Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getFDiv(LHS, RHS);
+  }
+
+  Constant *CreateURem(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getURem(LHS, RHS);
+  }
+
+  Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getSRem(LHS, RHS);
+  }
+
+  Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getFRem(LHS, RHS);
+  }
+
+  Constant *CreateShl(Constant *LHS, Constant *RHS,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
+  }
+
+  Constant *CreateLShr(Constant *LHS, Constant *RHS,
+                       bool isExact = false) const {
+    return ConstantExpr::getLShr(LHS, RHS, isExact);
+  }
+
+  Constant *CreateAShr(Constant *LHS, Constant *RHS,
+                       bool isExact = false) const {
+    return ConstantExpr::getAShr(LHS, RHS, isExact);
+  }
+
+  Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getAnd(LHS, RHS);
+  }
+
+  Constant *CreateOr(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getOr(LHS, RHS);
+  }
+
+  Constant *CreateXor(Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::getXor(LHS, RHS);
+  }
+
+  Constant *CreateBinOp(Instruction::BinaryOps Opc,
+                        Constant *LHS, Constant *RHS) const {
+    return ConstantExpr::get(Opc, LHS, RHS);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Unary Operators
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateNeg(Constant *C,
+                      bool HasNUW = false, bool HasNSW = false) const {
+    return ConstantExpr::getNeg(C, HasNUW, HasNSW);
+  }
+
+  Constant *CreateFNeg(Constant *C) const {
+    return ConstantExpr::getFNeg(C);
+  }
+
+  Constant *CreateNot(Constant *C) const {
+    return ConstantExpr::getNot(C);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Memory Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+                                ArrayRef<Constant *> IdxList) const {
+    return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
+  }
+
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return ConstantExpr::getGetElementPtr(Ty, C, Idx);
+  }
+
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+                                ArrayRef<Value *> IdxList) const {
+    return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
+  }
+
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        ArrayRef<Constant *> IdxList) const {
+    return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
+  }
+
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        Constant *Idx) const {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
+  }
+
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        ArrayRef<Value *> IdxList) const {
+    return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Cast/Conversion Operators
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateCast(Instruction::CastOps Op, Constant *C,
+                       Type *DestTy) const {
+    return ConstantExpr::getCast(Op, C, DestTy);
+  }
+
+  Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
+    return ConstantExpr::getPointerCast(C, DestTy);
+  }
+
+  Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+                                                Type *DestTy) const {
+    return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
+  }
+
+  Constant *CreateIntCast(Constant *C, Type *DestTy,
+                          bool isSigned) const {
+    return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
+  }
+
+  Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+    return ConstantExpr::getFPCast(C, DestTy);
+  }
+
+  Constant *CreateBitCast(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::BitCast, C, DestTy);
+  }
+
+  Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::IntToPtr, C, DestTy);
+  }
+
+  Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::PtrToInt, C, DestTy);
+  }
+
+  Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+    return ConstantExpr::getZExtOrBitCast(C, DestTy);
+  }
+
+  Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+    return ConstantExpr::getSExtOrBitCast(C, DestTy);
+  }
+
+  Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+    return ConstantExpr::getTruncOrBitCast(C, DestTy);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Compare Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+                       Constant *RHS) const {
+    return ConstantExpr::getCompare(P, LHS, RHS);
+  }
+
+  Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+                       Constant *RHS) const {
+    return ConstantExpr::getCompare(P, LHS, RHS);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Other Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+    return ConstantExpr::getSelect(C, True, False);
+  }
+
+  Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+    return ConstantExpr::getExtractElement(Vec, Idx);
+  }
+
+  Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
+                                Constant *Idx) const {
+    return ConstantExpr::getInsertElement(Vec, NewElt, Idx);
+  }
+
+  Constant *CreateShuffleVector(Constant *V1, Constant *V2,
+                                Constant *Mask) const {
+    return ConstantExpr::getShuffleVector(V1, V2, Mask);
+  }
+
+  Constant *CreateExtractValue(Constant *Agg,
+                               ArrayRef<unsigned> IdxList) const {
+    return ConstantExpr::getExtractValue(Agg, IdxList);
+  }
+
+  Constant *CreateInsertValue(Constant *Agg, Constant *Val,
+                              ArrayRef<unsigned> IdxList) const {
+    return ConstantExpr::getInsertValue(Agg, Val, IdxList);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CONSTANTFOLDER_H
diff --git a/linux-x64/clang/include/llvm/IR/ConstantRange.h b/linux-x64/clang/include/llvm/IR/ConstantRange.h
new file mode 100644
index 0000000..6889e26
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ConstantRange.h
@@ -0,0 +1,346 @@
+//===- ConstantRange.h - Represent a range ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Represent a range of possible values that may occur when the program is run
+// for an integral value.  This keeps track of a lower and upper bound for the
+// constant, which MAY wrap around the end of the numeric range.  To do this, it
+// keeps track of a [lower, upper) bound, which specifies an interval just like
+// STL iterators.  When used with boolean values, the following are important
+// ranges: :
+//
+//  [F, F) = {}     = Empty set
+//  [T, F) = {T}
+//  [F, T) = {F}
+//  [T, T) = {F, T} = Full set
+//
+// The other integral ranges use min/max values for special range values. For
+// example, for 8-bit types, it uses:
+// [0, 0)     = {}       = Empty set
+// [255, 255) = {0..255} = Full Set
+//
+// Note that ConstantRange can be used to represent either signed or
+// unsigned ranges.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTRANGE_H
+#define LLVM_IR_CONSTANTRANGE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
+#include <cstdint>
+
+namespace llvm {
+
+class MDNode;
+class raw_ostream;
+
+/// This class represents a range of values.
+class LLVM_NODISCARD ConstantRange {
+  APInt Lower, Upper;
+
+public:
+  /// Initialize a full (the default) or empty set for the specified bit width.
+  explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true);
+
+  /// Initialize a range to hold the single specified value.
+  ConstantRange(APInt Value);
+
+  /// @brief Initialize a range of values explicitly. This will assert out if
+  /// Lower==Upper and Lower != Min or Max value for its type. It will also
+  /// assert out if the two APInt's are not the same bit width.
+  ConstantRange(APInt Lower, APInt Upper);
+
+  /// Produce the smallest range such that all values that may satisfy the given
+  /// predicate with any value contained within Other is contained in the
+  /// returned range.  Formally, this returns a superset of
+  /// 'union over all y in Other . { x : icmp op x y is true }'.  If the exact
+  /// answer is not representable as a ConstantRange, the return value will be a
+  /// proper superset of the above.
+  ///
+  /// Example: Pred = ult and Other = i8 [2, 5) returns Result = [0, 4)
+  static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred,
+                                             const ConstantRange &Other);
+
+  /// Produce the largest range such that all values in the returned range
+  /// satisfy the given predicate with all values contained within Other.
+  /// Formally, this returns a subset of
+  /// 'intersection over all y in Other . { x : icmp op x y is true }'.  If the
+  /// exact answer is not representable as a ConstantRange, the return value
+  /// will be a proper subset of the above.
+  ///
+  /// Example: Pred = ult and Other = i8 [2, 5) returns [0, 2)
+  static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred,
+                                                const ConstantRange &Other);
+
+  /// Produce the exact range such that all values in the returned range satisfy
+  /// the given predicate with any value contained within Other. Formally, this
+  /// returns the exact answer when the superset of 'union over all y in Other
+  /// is exactly same as the subset of intersection over all y in Other.
+  /// { x : icmp op x y is true}'.
+  ///
+  /// Example: Pred = ult and Other = i8 3 returns [0, 3)
+  static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred,
+                                           const APInt &Other);
+
+  /// Return the largest range containing all X such that "X BinOpC Y" is
+  /// guaranteed not to wrap (overflow) for all Y in Other.
+  ///
+  /// NB! The returned set does *not* contain **all** possible values of X for
+  /// which "X BinOpC Y" does not wrap -- some viable values of X may be
+  /// missing, so you cannot use this to constrain X's range.  E.g. in the
+  /// fourth example, "(-2) + 1" is both nsw and nuw (so the "X" could be -2),
+  /// but (-2) is not in the set returned.
+  ///
+  /// Examples:
+  ///  typedef OverflowingBinaryOperator OBO;
+  ///  #define MGNR makeGuaranteedNoWrapRegion
+  ///  MGNR(Add, [i8 1, 2), OBO::NoSignedWrap) == [-128, 127)
+  ///  MGNR(Add, [i8 1, 2), OBO::NoUnsignedWrap) == [0, -1)
+  ///  MGNR(Add, [i8 0, 1), OBO::NoUnsignedWrap) == Full Set
+  ///  MGNR(Add, [i8 1, 2), OBO::NoUnsignedWrap | OBO::NoSignedWrap)
+  ///    == [0,INT_MAX)
+  ///  MGNR(Add, [i8 -1, 6), OBO::NoSignedWrap) == [INT_MIN+1, INT_MAX-4)
+  ///  MGNR(Sub, [i8 1, 2), OBO::NoSignedWrap) == [-127, 128)
+  ///  MGNR(Sub, [i8 1, 2), OBO::NoUnsignedWrap) == [1, 0)
+  ///  MGNR(Sub, [i8 1, 2), OBO::NoUnsignedWrap | OBO::NoSignedWrap)
+  ///    == [1,INT_MAX)
+  static ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
+                                                  const ConstantRange &Other,
+                                                  unsigned NoWrapKind);
+
+  /// Set up \p Pred and \p RHS such that
+  /// ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.  Return true if
+  /// successful.
+  bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const;
+
+  /// Return the lower value for this range.
+  const APInt &getLower() const { return Lower; }
+
+  /// Return the upper value for this range.
+  const APInt &getUpper() const { return Upper; }
+
+  /// Get the bit width of this ConstantRange.
+  uint32_t getBitWidth() const { return Lower.getBitWidth(); }
+
+  /// Return true if this set contains all of the elements possible
+  /// for this data-type.
+  bool isFullSet() const;
+
+  /// Return true if this set contains no members.
+  bool isEmptySet() const;
+
+  /// Return true if this set wraps around the top of the range.
+  /// For example: [100, 8).
+  bool isWrappedSet() const;
+
+  /// Return true if this set wraps around the INT_MIN of
+  /// its bitwidth. For example: i8 [120, 140).
+  bool isSignWrappedSet() const;
+
+  /// Return true if the specified value is in the set.
+  bool contains(const APInt &Val) const;
+
+  /// Return true if the other range is a subset of this one.
+  bool contains(const ConstantRange &CR) const;
+
+  /// If this set contains a single element, return it, otherwise return null.
+  const APInt *getSingleElement() const {
+    if (Upper == Lower + 1)
+      return &Lower;
+    return nullptr;
+  }
+
+  /// If this set contains all but a single element, return it, otherwise return
+  /// null.
+  const APInt *getSingleMissingElement() const {
+    if (Lower == Upper + 1)
+      return &Upper;
+    return nullptr;
+  }
+
+  /// Return true if this set contains exactly one member.
+  bool isSingleElement() const { return getSingleElement() != nullptr; }
+
+  /// Return the number of elements in this set.
+  APInt getSetSize() const;
+
+  /// Compare set size of this range with the range CR.
+  bool isSizeStrictlySmallerThan(const ConstantRange &CR) const;
+
+  // Compare set size of this range with Value.
+  bool isSizeLargerThan(uint64_t MaxSize) const;
+
+  /// Return the largest unsigned value contained in the ConstantRange.
+  APInt getUnsignedMax() const;
+
+  /// Return the smallest unsigned value contained in the ConstantRange.
+  APInt getUnsignedMin() const;
+
+  /// Return the largest signed value contained in the ConstantRange.
+  APInt getSignedMax() const;
+
+  /// Return the smallest signed value contained in the ConstantRange.
+  APInt getSignedMin() const;
+
+  /// Return true if this range is equal to another range.
+  bool operator==(const ConstantRange &CR) const {
+    return Lower == CR.Lower && Upper == CR.Upper;
+  }
+  bool operator!=(const ConstantRange &CR) const {
+    return !operator==(CR);
+  }
+
+  /// Subtract the specified constant from the endpoints of this constant range.
+  ConstantRange subtract(const APInt &CI) const;
+
+  /// Subtract the specified range from this range (aka relative complement of
+  /// the sets).
+  ConstantRange difference(const ConstantRange &CR) const;
+
+  /// Return the range that results from the intersection of
+  /// this range with another range.  The resultant range is guaranteed to
+  /// include all elements contained in both input ranges, and to have the
+  /// smallest possible set size that does so.  Because there may be two
+  /// intersections with the same set size, A.intersectWith(B) might not
+  /// be equal to B.intersectWith(A).
+  ConstantRange intersectWith(const ConstantRange &CR) const;
+
+  /// Return the range that results from the union of this range
+  /// with another range.  The resultant range is guaranteed to include the
+  /// elements of both sets, but may contain more.  For example, [3, 9) union
+  /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included
+  /// in either set before.
+  ConstantRange unionWith(const ConstantRange &CR) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from an application of the specified cast operator to this range. \p
+  /// BitWidth is the target bitwidth of the cast.  For casts which don't
+  /// change bitwidth, it must be the same as the source bitwidth.  For casts
+  /// which do change bitwidth, the bitwidth must be consistent with the
+  /// requested cast and source bitwidth.
+  ConstantRange castOp(Instruction::CastOps CastOp,
+                       uint32_t BitWidth) const;
+
+  /// Return a new range in the specified integer type, which must
+  /// be strictly larger than the current type.  The returned range will
+  /// correspond to the possible range of values if the source range had been
+  /// zero extended to BitWidth.
+  ConstantRange zeroExtend(uint32_t BitWidth) const;
+
+  /// Return a new range in the specified integer type, which must
+  /// be strictly larger than the current type.  The returned range will
+  /// correspond to the possible range of values if the source range had been
+  /// sign extended to BitWidth.
+  ConstantRange signExtend(uint32_t BitWidth) const;
+
+  /// Return a new range in the specified integer type, which must be
+  /// strictly smaller than the current type.  The returned range will
+  /// correspond to the possible range of values if the source range had been
+  /// truncated to the specified type.
+  ConstantRange truncate(uint32_t BitWidth) const;
+
+  /// Make this range have the bit width given by \p BitWidth. The
+  /// value is zero extended, truncated, or left alone to make it that width.
+  ConstantRange zextOrTrunc(uint32_t BitWidth) const;
+
+  /// Make this range have the bit width given by \p BitWidth. The
+  /// value is sign extended, truncated, or left alone to make it that width.
+  ConstantRange sextOrTrunc(uint32_t BitWidth) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from an application of the specified binary operator to an left hand side
+  /// of this range and a right hand side of \p Other.
+  ConstantRange binaryOp(Instruction::BinaryOps BinOp,
+                         const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from an addition of a value in this range and a value in \p Other.
+  ConstantRange add(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting from a
+  /// known NSW addition of a value in this range and \p Other constant.
+  ConstantRange addWithNoSignedWrap(const APInt &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a subtraction of a value in this range and a value in \p Other.
+  ConstantRange sub(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a multiplication of a value in this range and a value in \p Other,
+  /// treating both this and \p Other as unsigned ranges.
+  ConstantRange multiply(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a signed maximum of a value in this range and a value in \p Other.
+  ConstantRange smax(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from an unsigned maximum of a value in this range and a value in \p Other.
+  ConstantRange umax(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a signed minimum of a value in this range and a value in \p Other.
+  ConstantRange smin(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from an unsigned minimum of a value in this range and a value in \p Other.
+  ConstantRange umin(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from an unsigned division of a value in this range and a value in
+  /// \p Other.
+  ConstantRange udiv(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a binary-and of a value in this range by a value in \p Other.
+  ConstantRange binaryAnd(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a binary-or of a value in this range by a value in \p Other.
+  ConstantRange binaryOr(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting
+  /// from a left shift of a value in this range by a value in \p Other.
+  /// TODO: This isn't fully implemented yet.
+  ConstantRange shl(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting from a
+  /// logical right shift of a value in this range and a value in \p Other.
+  ConstantRange lshr(const ConstantRange &Other) const;
+
+  /// Return a new range representing the possible values resulting from a
+  /// arithmetic right shift of a value in this range and a value in \p Other.
+  ConstantRange ashr(const ConstantRange &Other) const;
+
+  /// Return a new range that is the logical not of the current set.
+  ConstantRange inverse() const;
+
+  /// Print out the bounds to a stream.
+  void print(raw_ostream &OS) const;
+
+  /// Allow printing from a debugger easily.
+  void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) {
+  CR.print(OS);
+  return OS;
+}
+
+/// Parse out a conservative ConstantRange from !range metadata.
+///
+/// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
+ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD);
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CONSTANTRANGE_H
diff --git a/linux-x64/clang/include/llvm/IR/Constants.h b/linux-x64/clang/include/llvm/IR/Constants.h
new file mode 100644
index 0000000..1a7596d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Constants.h
@@ -0,0 +1,1299 @@
+//===-- llvm/Constants.h - Constant class subclass definitions --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the declarations for the subclasses of Constant,
+/// which represent the different flavors of constant values that live in LLVM.
+/// Note that Constants are immutable (once created they never change) and are
+/// fully shared by structural equivalence.  This means that two structurally
+/// equivalent constants will always have the same address.  Constants are
+/// created on demand as needed and never deleted: thus clients don't have to
+/// worry about the lifetime of the objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTS_H
+#define LLVM_IR_CONSTANTS_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+class ArrayType;
+class IntegerType;
+class PointerType;
+class SequentialType;
+class StructType;
+class VectorType;
+template <class ConstantClass> struct ConstantAggrKeyType;
+
+/// Base class for constants with no operands.
+///
+/// These constants have no operands; they represent their data directly.
+/// Since they can be in use by unrelated modules (and are never based on
+/// GlobalValues), it never makes sense to RAUW them.
+class ConstantData : public Constant {
+  friend class Constant;
+
+  Value *handleOperandChangeImpl(Value *From, Value *To) {
+    llvm_unreachable("Constant data does not have operands!");
+  }
+
+protected:
+  explicit ConstantData(Type *Ty, ValueTy VT) : Constant(Ty, VT, nullptr, 0) {}
+
+  void *operator new(size_t s) { return User::operator new(s, 0); }
+
+public:
+  ConstantData(const ConstantData &) = delete;
+
+  /// Methods to support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Value *V) {
+    return V->getValueID() >= ConstantDataFirstVal &&
+           V->getValueID() <= ConstantDataLastVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// This is the shared class of boolean and integer constants. This class
+/// represents both boolean and integral constants.
+/// @brief Class for constant integers.
+class ConstantInt final : public ConstantData {
+  friend class Constant;
+
+  APInt Val;
+
+  ConstantInt(IntegerType *Ty, const APInt& V);
+
+  void destroyConstantImpl();
+
+public:
+  ConstantInt(const ConstantInt &) = delete;
+
+  static ConstantInt *getTrue(LLVMContext &Context);
+  static ConstantInt *getFalse(LLVMContext &Context);
+  static Constant *getTrue(Type *Ty);
+  static Constant *getFalse(Type *Ty);
+
+  /// If Ty is a vector type, return a Constant with a splat of the given
+  /// value. Otherwise return a ConstantInt for the given value.
+  static Constant *get(Type *Ty, uint64_t V, bool isSigned = false);
+
+  /// Return a ConstantInt with the specified integer value for the specified
+  /// type. If the type is wider than 64 bits, the value will be zero-extended
+  /// to fit the type, unless isSigned is true, in which case the value will
+  /// be interpreted as a 64-bit signed integer and sign-extended to fit
+  /// the type.
+  /// @brief Get a ConstantInt for a specific value.
+  static ConstantInt *get(IntegerType *Ty, uint64_t V,
+                          bool isSigned = false);
+
+  /// Return a ConstantInt with the specified value for the specified type. The
+  /// value V will be canonicalized to a an unsigned APInt. Accessing it with
+  /// either getSExtValue() or getZExtValue() will yield a correctly sized and
+  /// signed value for the type Ty.
+  /// @brief Get a ConstantInt for a specific signed value.
+  static ConstantInt *getSigned(IntegerType *Ty, int64_t V);
+  static Constant *getSigned(Type *Ty, int64_t V);
+
+  /// Return a ConstantInt with the specified value and an implied Type. The
+  /// type is the integer type that corresponds to the bit width of the value.
+  static ConstantInt *get(LLVMContext &Context, const APInt &V);
+
+  /// Return a ConstantInt constructed from the string strStart with the given
+  /// radix.
+  static ConstantInt *get(IntegerType *Ty, StringRef Str,
+                          uint8_t radix);
+
+  /// If Ty is a vector type, return a Constant with a splat of the given
+  /// value. Otherwise return a ConstantInt for the given value.
+  static Constant *get(Type* Ty, const APInt& V);
+
+  /// Return the constant as an APInt value reference. This allows clients to
+  /// obtain a full-precision copy of the value.
+  /// @brief Return the constant's value.
+  inline const APInt &getValue() const {
+    return Val;
+  }
+
+  /// getBitWidth - Return the bitwidth of this constant.
+  unsigned getBitWidth() const { return Val.getBitWidth(); }
+
+  /// Return the constant as a 64-bit unsigned integer value after it
+  /// has been zero extended as appropriate for the type of this constant. Note
+  /// that this method can assert if the value does not fit in 64 bits.
+  /// @brief Return the zero extended value.
+  inline uint64_t getZExtValue() const {
+    return Val.getZExtValue();
+  }
+
+  /// Return the constant as a 64-bit integer value after it has been sign
+  /// extended as appropriate for the type of this constant. Note that
+  /// this method can assert if the value does not fit in 64 bits.
+  /// @brief Return the sign extended value.
+  inline int64_t getSExtValue() const {
+    return Val.getSExtValue();
+  }
+
+  /// A helper method that can be used to determine if the constant contained
+  /// within is equal to a constant.  This only works for very small values,
+  /// because this is all that can be represented with all types.
+  /// @brief Determine if this constant's value is same as an unsigned char.
+  bool equalsInt(uint64_t V) const {
+    return Val == V;
+  }
+
+  /// getType - Specialize the getType() method to always return an IntegerType,
+  /// which reduces the amount of casting needed in parts of the compiler.
+  ///
+  inline IntegerType *getType() const {
+    return cast<IntegerType>(Value::getType());
+  }
+
+  /// This static method returns true if the type Ty is big enough to
+  /// represent the value V. This can be used to avoid having the get method
+  /// assert when V is larger than Ty can represent. Note that there are two
+  /// versions of this method, one for unsigned and one for signed integers.
+  /// Although ConstantInt canonicalizes everything to an unsigned integer,
+  /// the signed version avoids callers having to convert a signed quantity
+  /// to the appropriate unsigned type before calling the method.
+  /// @returns true if V is a valid value for type Ty
+  /// @brief Determine if the value is in range for the given type.
+  static bool isValueValidForType(Type *Ty, uint64_t V);
+  static bool isValueValidForType(Type *Ty, int64_t V);
+
+  bool isNegative() const { return Val.isNegative(); }
+
+  /// This is just a convenience method to make client code smaller for a
+  /// common code. It also correctly performs the comparison without the
+  /// potential for an assertion from getZExtValue().
+  bool isZero() const {
+    return Val.isNullValue();
+  }
+
+  /// This is just a convenience method to make client code smaller for a
+  /// common case. It also correctly performs the comparison without the
+  /// potential for an assertion from getZExtValue().
+  /// @brief Determine if the value is one.
+  bool isOne() const {
+    return Val.isOneValue();
+  }
+
+  /// This function will return true iff every bit in this constant is set
+  /// to true.
+  /// @returns true iff this constant's bits are all set to true.
+  /// @brief Determine if the value is all ones.
+  bool isMinusOne() const {
+    return Val.isAllOnesValue();
+  }
+
+  /// This function will return true iff this constant represents the largest
+  /// value that may be represented by the constant's type.
+  /// @returns true iff this is the largest value that may be represented
+  /// by this type.
+  /// @brief Determine if the value is maximal.
+  bool isMaxValue(bool isSigned) const {
+    if (isSigned)
+      return Val.isMaxSignedValue();
+    else
+      return Val.isMaxValue();
+  }
+
+  /// This function will return true iff this constant represents the smallest
+  /// value that may be represented by this constant's type.
+  /// @returns true if this is the smallest value that may be represented by
+  /// this type.
+  /// @brief Determine if the value is minimal.
+  bool isMinValue(bool isSigned) const {
+    if (isSigned)
+      return Val.isMinSignedValue();
+    else
+      return Val.isMinValue();
+  }
+
+  /// This function will return true iff this constant represents a value with
+  /// active bits bigger than 64 bits or a value greater than the given uint64_t
+  /// value.
+  /// @returns true iff this constant is greater or equal to the given number.
+  /// @brief Determine if the value is greater or equal to the given number.
+  bool uge(uint64_t Num) const {
+    return Val.uge(Num);
+  }
+
+  /// getLimitedValue - If the value is smaller than the specified limit,
+  /// return it, otherwise return the limit value.  This causes the value
+  /// to saturate to the limit.
+  /// @returns the min of the value of the constant and the specified value
+  /// @brief Get the constant's value with a saturation limit
+  uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const {
+    return Val.getLimitedValue(Limit);
+  }
+
+  /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantIntVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantFP - Floating Point Values [float, double]
+///
+class ConstantFP final : public ConstantData {
+  friend class Constant;
+
+  APFloat Val;
+
+  ConstantFP(Type *Ty, const APFloat& V);
+
+  void destroyConstantImpl();
+
+public:
+  ConstantFP(const ConstantFP &) = delete;
+
+  /// Floating point negation must be implemented with f(x) = -0.0 - x. This
+  /// method returns the negative zero constant for floating point or vector
+  /// floating point types; for all other types, it returns the null value.
+  static Constant *getZeroValueForNegation(Type *Ty);
+
+  /// This returns a ConstantFP, or a vector containing a splat of a ConstantFP,
+  /// for the specified value in the specified type. This should only be used
+  /// for simple constant values like 2.0/1.0 etc, that are known-valid both as
+  /// host double and as the target format.
+  static Constant *get(Type* Ty, double V);
+
+  /// If Ty is a vector type, return a Constant with a splat of the given
+  /// value. Otherwise return a ConstantFP for the given value.
+  static Constant *get(Type *Ty, const APFloat &V);
+
+  static Constant *get(Type* Ty, StringRef Str);
+  static ConstantFP *get(LLVMContext &Context, const APFloat &V);
+  static Constant *getNaN(Type *Ty, bool Negative = false, unsigned type = 0);
+  static Constant *getNegativeZero(Type *Ty);
+  static Constant *getInfinity(Type *Ty, bool Negative = false);
+
+  /// Return true if Ty is big enough to represent V.
+  static bool isValueValidForType(Type *Ty, const APFloat &V);
+  inline const APFloat &getValueAPF() const { return Val; }
+
+  /// Return true if the value is positive or negative zero.
+  bool isZero() const { return Val.isZero(); }
+
+  /// Return true if the sign bit is set.
+  bool isNegative() const { return Val.isNegative(); }
+
+  /// Return true if the value is infinity
+  bool isInfinity() const { return Val.isInfinity(); }
+
+  /// Return true if the value is a NaN.
+  bool isNaN() const { return Val.isNaN(); }
+
+  /// We don't rely on operator== working on double values, as it returns true
+  /// for things that are clearly not equal, like -0.0 and 0.0.
+  /// As such, this method can be used to do an exact bit-for-bit comparison of
+  /// two floating point values.  The version with a double operand is retained
+  /// because it's so convenient to write isExactlyValue(2.0), but please use
+  /// it only for simple constants.
+  bool isExactlyValue(const APFloat &V) const;
+
+  bool isExactlyValue(double V) const {
+    bool ignored;
+    APFloat FV(V);
+    FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
+    return isExactlyValue(FV);
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantFPVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// All zero aggregate value
+///
+class ConstantAggregateZero final : public ConstantData {
+  friend class Constant;
+
+  explicit ConstantAggregateZero(Type *Ty)
+      : ConstantData(Ty, ConstantAggregateZeroVal) {}
+
+  void destroyConstantImpl();
+
+public:
+  ConstantAggregateZero(const ConstantAggregateZero &) = delete;
+
+  static ConstantAggregateZero *get(Type *Ty);
+
+  /// If this CAZ has array or vector type, return a zero with the right element
+  /// type.
+  Constant *getSequentialElement() const;
+
+  /// If this CAZ has struct type, return a zero with the right element type for
+  /// the specified element.
+  Constant *getStructElement(unsigned Elt) const;
+
+  /// Return a zero of the right value for the specified GEP index if we can,
+  /// otherwise return null (e.g. if C is a ConstantExpr).
+  Constant *getElementValue(Constant *C) const;
+
+  /// Return a zero of the right value for the specified GEP index.
+  Constant *getElementValue(unsigned Idx) const;
+
+  /// Return the number of elements in the array, vector, or struct.
+  unsigned getNumElements() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  ///
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantAggregateZeroVal;
+  }
+};
+
+/// Base class for aggregate constants (with operands).
+///
+/// These constants are aggregates of other constants, which are stored as
+/// operands.
+///
+/// Subclasses are \a ConstantStruct, \a ConstantArray, and \a
+/// ConstantVector.
+///
+/// \note Some subclasses of \a ConstantData are semantically aggregates --
+/// such as \a ConstantDataArray -- but are not subclasses of this because they
+/// use operands.
+class ConstantAggregate : public Constant {
+protected:
+  ConstantAggregate(CompositeType *T, ValueTy VT, ArrayRef<Constant *> V);
+
+public:
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() >= ConstantAggregateFirstVal &&
+           V->getValueID() <= ConstantAggregateLastVal;
+  }
+};
+
+template <>
+struct OperandTraits<ConstantAggregate>
+    : public VariadicOperandTraits<ConstantAggregate> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantAggregate, Constant)
+
+//===----------------------------------------------------------------------===//
+/// ConstantArray - Constant Array Declarations
+///
+class ConstantArray final : public ConstantAggregate {
+  friend struct ConstantAggrKeyType<ConstantArray>;
+  friend class Constant;
+
+  ConstantArray(ArrayType *T, ArrayRef<Constant *> Val);
+
+  void destroyConstantImpl();
+  Value *handleOperandChangeImpl(Value *From, Value *To);
+
+public:
+  // ConstantArray accessors
+  static Constant *get(ArrayType *T, ArrayRef<Constant*> V);
+
+private:
+  static Constant *getImpl(ArrayType *T, ArrayRef<Constant *> V);
+
+public:
+  /// Specialize the getType() method to always return an ArrayType,
+  /// which reduces the amount of casting needed in parts of the compiler.
+  inline ArrayType *getType() const {
+    return cast<ArrayType>(Value::getType());
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantArrayVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Constant Struct Declarations
+//
+class ConstantStruct final : public ConstantAggregate {
+  friend struct ConstantAggrKeyType<ConstantStruct>;
+  friend class Constant;
+
+  ConstantStruct(StructType *T, ArrayRef<Constant *> Val);
+
+  void destroyConstantImpl();
+  Value *handleOperandChangeImpl(Value *From, Value *To);
+
+public:
+  // ConstantStruct accessors
+  static Constant *get(StructType *T, ArrayRef<Constant*> V);
+
+  template <typename... Csts>
+  static typename std::enable_if<are_base_of<Constant, Csts...>::value,
+                                 Constant *>::type
+  get(StructType *T, Csts *... Vs) {
+    SmallVector<Constant *, 8> Values({Vs...});
+    return get(T, Values);
+  }
+
+  /// Return an anonymous struct that has the specified elements.
+  /// If the struct is possibly empty, then you must specify a context.
+  static Constant *getAnon(ArrayRef<Constant*> V, bool Packed = false) {
+    return get(getTypeForElements(V, Packed), V);
+  }
+  static Constant *getAnon(LLVMContext &Ctx,
+                           ArrayRef<Constant*> V, bool Packed = false) {
+    return get(getTypeForElements(Ctx, V, Packed), V);
+  }
+
+  /// Return an anonymous struct type to use for a constant with the specified
+  /// set of elements. The list must not be empty.
+  static StructType *getTypeForElements(ArrayRef<Constant*> V,
+                                        bool Packed = false);
+  /// This version of the method allows an empty list.
+  static StructType *getTypeForElements(LLVMContext &Ctx,
+                                        ArrayRef<Constant*> V,
+                                        bool Packed = false);
+
+  /// Specialization - reduce amount of casting.
+  inline StructType *getType() const {
+    return cast<StructType>(Value::getType());
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantStructVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// Constant Vector Declarations
+///
+class ConstantVector final : public ConstantAggregate {
+  friend struct ConstantAggrKeyType<ConstantVector>;
+  friend class Constant;
+
+  ConstantVector(VectorType *T, ArrayRef<Constant *> Val);
+
+  void destroyConstantImpl();
+  Value *handleOperandChangeImpl(Value *From, Value *To);
+
+public:
+  // ConstantVector accessors
+  static Constant *get(ArrayRef<Constant*> V);
+
+private:
+  static Constant *getImpl(ArrayRef<Constant *> V);
+
+public:
+  /// Return a ConstantVector with the specified constant in each element.
+  static Constant *getSplat(unsigned NumElts, Constant *Elt);
+
+  /// Specialize the getType() method to always return a VectorType,
+  /// which reduces the amount of casting needed in parts of the compiler.
+  inline VectorType *getType() const {
+    return cast<VectorType>(Value::getType());
+  }
+
+  /// If this is a splat constant, meaning that all of the elements have the
+  /// same value, return that value. Otherwise return NULL.
+  Constant *getSplatValue() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantVectorVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// A constant pointer value that points to null
+///
+class ConstantPointerNull final : public ConstantData {
+  friend class Constant;
+
+  explicit ConstantPointerNull(PointerType *T)
+      : ConstantData(T, Value::ConstantPointerNullVal) {}
+
+  void destroyConstantImpl();
+
+public:
+  ConstantPointerNull(const ConstantPointerNull &) = delete;
+
+  /// Static factory methods - Return objects of the specified value
+  static ConstantPointerNull *get(PointerType *T);
+
+  /// Specialize the getType() method to always return an PointerType,
+  /// which reduces the amount of casting needed in parts of the compiler.
+  inline PointerType *getType() const {
+    return cast<PointerType>(Value::getType());
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantPointerNullVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataSequential - A vector or array constant whose element type is a
+/// simple 1/2/4/8-byte integer or float/double, and whose elements are just
+/// simple data values (i.e. ConstantInt/ConstantFP).  This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+///
+/// This is the common base class of ConstantDataArray and ConstantDataVector.
+///
+class ConstantDataSequential : public ConstantData {
+  friend class LLVMContextImpl;
+  friend class Constant;
+
+  /// A pointer to the bytes underlying this constant (which is owned by the
+  /// uniquing StringMap).
+  const char *DataElements;
+
+  /// This forms a link list of ConstantDataSequential nodes that have
+  /// the same value but different type.  For example, 0,0,0,1 could be a 4
+  /// element array of i8, or a 1-element array of i32.  They'll both end up in
+  /// the same StringMap bucket, linked up.
+  ConstantDataSequential *Next;
+
+  void destroyConstantImpl();
+
+protected:
+  explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
+      : ConstantData(ty, VT), DataElements(Data), Next(nullptr) {}
+  ~ConstantDataSequential() { delete Next; }
+
+  static Constant *getImpl(StringRef Bytes, Type *Ty);
+
+public:
+  ConstantDataSequential(const ConstantDataSequential &) = delete;
+
+  /// Return true if a ConstantDataSequential can be formed with a vector or
+  /// array of the specified element type.
+  /// ConstantDataArray only works with normal float and int types that are
+  /// stored densely in memory, not with things like i42 or x86_f80.
+  static bool isElementTypeCompatible(Type *Ty);
+
+  /// If this is a sequential container of integers (of any size), return the
+  /// specified element in the low bits of a uint64_t.
+  uint64_t getElementAsInteger(unsigned i) const;
+
+  /// If this is a sequential container of integers (of any size), return the
+  /// specified element as an APInt.
+  APInt getElementAsAPInt(unsigned i) const;
+
+  /// If this is a sequential container of floating point type, return the
+  /// specified element as an APFloat.
+  APFloat getElementAsAPFloat(unsigned i) const;
+
+  /// If this is an sequential container of floats, return the specified element
+  /// as a float.
+  float getElementAsFloat(unsigned i) const;
+
+  /// If this is an sequential container of doubles, return the specified
+  /// element as a double.
+  double getElementAsDouble(unsigned i) const;
+
+  /// Return a Constant for a specified index's element.
+  /// Note that this has to compute a new constant to return, so it isn't as
+  /// efficient as getElementAsInteger/Float/Double.
+  Constant *getElementAsConstant(unsigned i) const;
+
+  /// Specialize the getType() method to always return a SequentialType, which
+  /// reduces the amount of casting needed in parts of the compiler.
+  inline SequentialType *getType() const {
+    return cast<SequentialType>(Value::getType());
+  }
+
+  /// Return the element type of the array/vector.
+  Type *getElementType() const;
+
+  /// Return the number of elements in the array or vector.
+  unsigned getNumElements() const;
+
+  /// Return the size (in bytes) of each element in the array/vector.
+  /// The size of the elements is known to be a multiple of one byte.
+  uint64_t getElementByteSize() const;
+
+  /// This method returns true if this is an array of \p CharSize integers.
+  bool isString(unsigned CharSize = 8) const;
+
+  /// This method returns true if the array "isString", ends with a null byte,
+  /// and does not contains any other null bytes.
+  bool isCString() const;
+
+  /// If this array is isString(), then this method returns the array as a
+  /// StringRef. Otherwise, it asserts out.
+  StringRef getAsString() const {
+    assert(isString() && "Not a string");
+    return getRawDataValues();
+  }
+
+  /// If this array is isCString(), then this method returns the array (without
+  /// the trailing null byte) as a StringRef. Otherwise, it asserts out.
+  StringRef getAsCString() const {
+    assert(isCString() && "Isn't a C string");
+    StringRef Str = getAsString();
+    return Str.substr(0, Str.size()-1);
+  }
+
+  /// Return the raw, underlying, bytes of this data. Note that this is an
+  /// extremely tricky thing to work with, as it exposes the host endianness of
+  /// the data elements.
+  StringRef getRawDataValues() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantDataArrayVal ||
+           V->getValueID() == ConstantDataVectorVal;
+  }
+
+private:
+  const char *getElementPointer(unsigned Elt) const;
+};
+
+//===----------------------------------------------------------------------===//
+/// An array constant whose element type is a simple 1/2/4/8-byte integer or
+/// float/double, and whose elements are just simple data values
+/// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it
+/// stores all of the elements of the constant as densely packed data, instead
+/// of as Value*'s.
+class ConstantDataArray final : public ConstantDataSequential {
+  friend class ConstantDataSequential;
+
+  explicit ConstantDataArray(Type *ty, const char *Data)
+      : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
+
+public:
+  ConstantDataArray(const ConstantDataArray &) = delete;
+
+  /// get() constructor - Return a constant with array type with an element
+  /// count and element type matching the ArrayRef passed in.  Note that this
+  /// can return a ConstantAggregateZero object.
+  template <typename ElementTy>
+  static Constant *get(LLVMContext &Context, ArrayRef<ElementTy> Elts) {
+    const char *Data = reinterpret_cast<const char *>(Elts.data());
+    Type *Ty =
+        ArrayType::get(Type::getScalarTy<ElementTy>(Context), Elts.size());
+    return getImpl(StringRef(Data, Elts.size() * sizeof(ElementTy)), Ty);
+  }
+
+  /// get() constructor - ArrayTy needs to be compatible with
+  /// ArrayRef<ElementTy>. Calls get(LLVMContext, ArrayRef<ElementTy>).
+  template <typename ArrayTy>
+  static Constant *get(LLVMContext &Context, ArrayTy &Elts) {
+    return ConstantDataArray::get(Context, makeArrayRef(Elts));
+  }
+
+  /// getFP() constructors - Return a constant with array type with an element
+  /// count and element type of float with precision matching the number of
+  /// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
+  /// double for 64bits) Note that this can return a ConstantAggregateZero
+  /// object.
+  static Constant *getFP(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+  static Constant *getFP(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+  static Constant *getFP(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+
+  /// This method constructs a CDS and initializes it with a text string.
+  /// The default behavior (AddNull==true) causes a null terminator to
+  /// be placed at the end of the array (increasing the length of the string by
+  /// one more than the StringRef would normally indicate.  Pass AddNull=false
+  /// to disable this behavior.
+  static Constant *getString(LLVMContext &Context, StringRef Initializer,
+                             bool AddNull = true);
+
+  /// Specialize the getType() method to always return an ArrayType,
+  /// which reduces the amount of casting needed in parts of the compiler.
+  inline ArrayType *getType() const {
+    return cast<ArrayType>(Value::getType());
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantDataArrayVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// A vector constant whose element type is a simple 1/2/4/8-byte integer or
+/// float/double, and whose elements are just simple data values
+/// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it
+/// stores all of the elements of the constant as densely packed data, instead
+/// of as Value*'s.
+class ConstantDataVector final : public ConstantDataSequential {
+  friend class ConstantDataSequential;
+
+  explicit ConstantDataVector(Type *ty, const char *Data)
+      : ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
+
+public:
+  ConstantDataVector(const ConstantDataVector &) = delete;
+
+  /// get() constructors - Return a constant with vector type with an element
+  /// count and element type matching the ArrayRef passed in.  Note that this
+  /// can return a ConstantAggregateZero object.
+  static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts);
+  static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+  static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+  static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+  static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
+  static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);
+
+  /// getFP() constructors - Return a constant with vector type with an element
+  /// count and element type of float with the precision matching the number of
+  /// bits in the ArrayRef passed in.  (i.e. half for 16bits, float for 32bits,
+  /// double for 64bits) Note that this can return a ConstantAggregateZero
+  /// object.
+  static Constant *getFP(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+  static Constant *getFP(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+  static Constant *getFP(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+
+  /// Return a ConstantVector with the specified constant in each element.
+  /// The specified constant has to be a of a compatible type (i8/i16/
+  /// i32/i64/float/double) and must be a ConstantFP or ConstantInt.
+  static Constant *getSplat(unsigned NumElts, Constant *Elt);
+
+  /// Returns true if this is a splat constant, meaning that all elements have
+  /// the same value.
+  bool isSplat() const;
+
+  /// If this is a splat constant, meaning that all of the elements have the
+  /// same value, return that value. Otherwise return NULL.
+  Constant *getSplatValue() const;
+
+  /// Specialize the getType() method to always return a VectorType,
+  /// which reduces the amount of casting needed in parts of the compiler.
+  inline VectorType *getType() const {
+    return cast<VectorType>(Value::getType());
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantDataVectorVal;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// A constant token which is empty
+///
+class ConstantTokenNone final : public ConstantData {
+  friend class Constant;
+
+  explicit ConstantTokenNone(LLVMContext &Context)
+      : ConstantData(Type::getTokenTy(Context), ConstantTokenNoneVal) {}
+
+  void destroyConstantImpl();
+
+public:
+  ConstantTokenNone(const ConstantTokenNone &) = delete;
+
+  /// Return the ConstantTokenNone.
+  static ConstantTokenNone *get(LLVMContext &Context);
+
+  /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantTokenNoneVal;
+  }
+};
+
+/// The address of a basic block.
+///
+class BlockAddress final : public Constant {
+  friend class Constant;
+
+  BlockAddress(Function *F, BasicBlock *BB);
+
+  void *operator new(size_t s) { return User::operator new(s, 2); }
+
+  void destroyConstantImpl();
+  Value *handleOperandChangeImpl(Value *From, Value *To);
+
+public:
+  /// Return a BlockAddress for the specified function and basic block.
+  static BlockAddress *get(Function *F, BasicBlock *BB);
+
+  /// Return a BlockAddress for the specified basic block.  The basic
+  /// block must be embedded into a function.
+  static BlockAddress *get(BasicBlock *BB);
+
+  /// Lookup an existing \c BlockAddress constant for the given BasicBlock.
+  ///
+  /// \returns 0 if \c !BB->hasAddressTaken(), otherwise the \c BlockAddress.
+  static BlockAddress *lookup(const BasicBlock *BB);
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  Function *getFunction() const { return (Function*)Op<0>().get(); }
+  BasicBlock *getBasicBlock() const { return (BasicBlock*)Op<1>().get(); }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == BlockAddressVal;
+  }
+};
+
+template <>
+struct OperandTraits<BlockAddress> :
+  public FixedNumOperandTraits<BlockAddress, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)
+
+//===----------------------------------------------------------------------===//
+/// A constant value that is initialized with an expression using
+/// other constant values.
+///
+/// This class uses the standard Instruction opcodes to define the various
+/// constant expressions.  The Opcode field for the ConstantExpr class is
+/// maintained in the Value::SubclassData field.
+class ConstantExpr : public Constant {
+  friend struct ConstantExprKeyType;
+  friend class Constant;
+
+  void destroyConstantImpl();
+  Value *handleOperandChangeImpl(Value *From, Value *To);
+
+protected:
+  ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
+      : Constant(ty, ConstantExprVal, Ops, NumOps) {
+    // Operation type (an Instruction opcode) is stored as the SubclassData.
+    setValueSubclassData(Opcode);
+  }
+
+public:
+  // Static methods to construct a ConstantExpr of different kinds.  Note that
+  // these methods may return a object that is not an instance of the
+  // ConstantExpr class, because they will attempt to fold the constant
+  // expression into something simpler if possible.
+
+  /// getAlignOf constant expr - computes the alignment of a type in a target
+  /// independent way (Note: the return type is an i64).
+  static Constant *getAlignOf(Type *Ty);
+
+  /// getSizeOf constant expr - computes the (alloc) size of a type (in
+  /// address-units, not bits) in a target independent way (Note: the return
+  /// type is an i64).
+  ///
+  static Constant *getSizeOf(Type *Ty);
+
+  /// getOffsetOf constant expr - computes the offset of a struct field in a
+  /// target independent way (Note: the return type is an i64).
+  ///
+  static Constant *getOffsetOf(StructType *STy, unsigned FieldNo);
+
+  /// getOffsetOf constant expr - This is a generalized form of getOffsetOf,
+  /// which supports any aggregate type, and any Constant index.
+  ///
+  static Constant *getOffsetOf(Type *Ty, Constant *FieldNo);
+
+  static Constant *getNeg(Constant *C, bool HasNUW = false, bool HasNSW =false);
+  static Constant *getFNeg(Constant *C);
+  static Constant *getNot(Constant *C);
+  static Constant *getAdd(Constant *C1, Constant *C2,
+                          bool HasNUW = false, bool HasNSW = false);
+  static Constant *getFAdd(Constant *C1, Constant *C2);
+  static Constant *getSub(Constant *C1, Constant *C2,
+                          bool HasNUW = false, bool HasNSW = false);
+  static Constant *getFSub(Constant *C1, Constant *C2);
+  static Constant *getMul(Constant *C1, Constant *C2,
+                          bool HasNUW = false, bool HasNSW = false);
+  static Constant *getFMul(Constant *C1, Constant *C2);
+  static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false);
+  static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false);
+  static Constant *getFDiv(Constant *C1, Constant *C2);
+  static Constant *getURem(Constant *C1, Constant *C2);
+  static Constant *getSRem(Constant *C1, Constant *C2);
+  static Constant *getFRem(Constant *C1, Constant *C2);
+  static Constant *getAnd(Constant *C1, Constant *C2);
+  static Constant *getOr(Constant *C1, Constant *C2);
+  static Constant *getXor(Constant *C1, Constant *C2);
+  static Constant *getShl(Constant *C1, Constant *C2,
+                          bool HasNUW = false, bool HasNSW = false);
+  static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false);
+  static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false);
+  static Constant *getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getSExt(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getZExt(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getFPTrunc(Constant *C, Type *Ty,
+                              bool OnlyIfReduced = false);
+  static Constant *getFPExtend(Constant *C, Type *Ty,
+                               bool OnlyIfReduced = false);
+  static Constant *getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+  static Constant *getPtrToInt(Constant *C, Type *Ty,
+                               bool OnlyIfReduced = false);
+  static Constant *getIntToPtr(Constant *C, Type *Ty,
+                               bool OnlyIfReduced = false);
+  static Constant *getBitCast(Constant *C, Type *Ty,
+                              bool OnlyIfReduced = false);
+  static Constant *getAddrSpaceCast(Constant *C, Type *Ty,
+                                    bool OnlyIfReduced = false);
+
+  static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
+  static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
+
+  static Constant *getNSWAdd(Constant *C1, Constant *C2) {
+    return getAdd(C1, C2, false, true);
+  }
+
+  static Constant *getNUWAdd(Constant *C1, Constant *C2) {
+    return getAdd(C1, C2, true, false);
+  }
+
+  static Constant *getNSWSub(Constant *C1, Constant *C2) {
+    return getSub(C1, C2, false, true);
+  }
+
+  static Constant *getNUWSub(Constant *C1, Constant *C2) {
+    return getSub(C1, C2, true, false);
+  }
+
+  static Constant *getNSWMul(Constant *C1, Constant *C2) {
+    return getMul(C1, C2, false, true);
+  }
+
+  static Constant *getNUWMul(Constant *C1, Constant *C2) {
+    return getMul(C1, C2, true, false);
+  }
+
+  static Constant *getNSWShl(Constant *C1, Constant *C2) {
+    return getShl(C1, C2, false, true);
+  }
+
+  static Constant *getNUWShl(Constant *C1, Constant *C2) {
+    return getShl(C1, C2, true, false);
+  }
+
+  static Constant *getExactSDiv(Constant *C1, Constant *C2) {
+    return getSDiv(C1, C2, true);
+  }
+
+  static Constant *getExactUDiv(Constant *C1, Constant *C2) {
+    return getUDiv(C1, C2, true);
+  }
+
+  static Constant *getExactAShr(Constant *C1, Constant *C2) {
+    return getAShr(C1, C2, true);
+  }
+
+  static Constant *getExactLShr(Constant *C1, Constant *C2) {
+    return getLShr(C1, C2, true);
+  }
+
+  /// Return the identity for the given binary operation,
+  /// i.e. a constant C such that X op C = X and C op X = X for every X.  It
+  /// returns null if the operator doesn't have an identity.
+  static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty);
+
+  /// Return the absorbing element for the given binary
+  /// operation, i.e. a constant C such that X op C = C and C op X = C for
+  /// every X.  For example, this returns zero for integer multiplication.
+  /// It returns null if the operator doesn't have an absorbing element.
+  static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty);
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+  /// \brief Convenience function for getting a Cast operation.
+  ///
+  /// \param ops The opcode for the conversion
+  /// \param C  The constant to be converted
+  /// \param Ty The type to which the constant is converted
+  /// \param OnlyIfReduced see \a getWithOperands() docs.
+  static Constant *getCast(unsigned ops, Constant *C, Type *Ty,
+                           bool OnlyIfReduced = false);
+
+  // @brief Create a ZExt or BitCast cast constant expression
+  static Constant *getZExtOrBitCast(
+    Constant *C,   ///< The constant to zext or bitcast
+    Type *Ty ///< The type to zext or bitcast C to
+  );
+
+  // @brief Create a SExt or BitCast cast constant expression
+  static Constant *getSExtOrBitCast(
+    Constant *C,   ///< The constant to sext or bitcast
+    Type *Ty ///< The type to sext or bitcast C to
+  );
+
+  // @brief Create a Trunc or BitCast cast constant expression
+  static Constant *getTruncOrBitCast(
+    Constant *C,   ///< The constant to trunc or bitcast
+    Type *Ty ///< The type to trunc or bitcast C to
+  );
+
+  /// @brief Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant
+  /// expression.
+  static Constant *getPointerCast(
+    Constant *C,   ///< The pointer value to be casted (operand 0)
+    Type *Ty ///< The type to which cast should be made
+  );
+
+  /// @brief Create a BitCast or AddrSpaceCast for a pointer type depending on
+  /// the address space.
+  static Constant *getPointerBitCastOrAddrSpaceCast(
+    Constant *C,   ///< The constant to addrspacecast or bitcast
+    Type *Ty ///< The type to bitcast or addrspacecast C to
+  );
+
+  /// @brief Create a ZExt, Bitcast or Trunc for integer -> integer casts
+  static Constant *getIntegerCast(
+    Constant *C,    ///< The integer constant to be casted
+    Type *Ty, ///< The integer type to cast to
+    bool isSigned   ///< Whether C should be treated as signed or not
+  );
+
+  /// @brief Create a FPExt, Bitcast or FPTrunc for fp -> fp casts
+  static Constant *getFPCast(
+    Constant *C,    ///< The integer constant to be casted
+    Type *Ty ///< The integer type to cast to
+  );
+
+  /// @brief Return true if this is a convert constant expression
+  bool isCast() const;
+
+  /// @brief Return true if this is a compare constant expression
+  bool isCompare() const;
+
+  /// @brief Return true if this is an insertvalue or extractvalue expression,
+  /// and the getIndices() method may be used.
+  bool hasIndices() const;
+
+  /// @brief Return true if this is a getelementptr expression and all
+  /// the index operands are compile-time known integers within the
+  /// corresponding notional static array extents. Note that this is
+  /// not equivalant to, a subset of, or a superset of the "inbounds"
+  /// property.
+  bool isGEPWithNoNotionalOverIndexing() const;
+
+  /// Select constant expr
+  ///
+  /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+  static Constant *getSelect(Constant *C, Constant *V1, Constant *V2,
+                             Type *OnlyIfReducedTy = nullptr);
+
+  /// get - Return a binary or shift operator constant expression,
+  /// folding if possible.
+  ///
+  /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+  static Constant *get(unsigned Opcode, Constant *C1, Constant *C2,
+                       unsigned Flags = 0, Type *OnlyIfReducedTy = nullptr);
+
+  /// \brief Return an ICmp or FCmp comparison operator constant expression.
+  ///
+  /// \param OnlyIfReduced see \a getWithOperands() docs.
+  static Constant *getCompare(unsigned short pred, Constant *C1, Constant *C2,
+                              bool OnlyIfReduced = false);
+
+  /// get* - Return some common constants without having to
+  /// specify the full Instruction::OPCODE identifier.
+  ///
+  static Constant *getICmp(unsigned short pred, Constant *LHS, Constant *RHS,
+                           bool OnlyIfReduced = false);
+  static Constant *getFCmp(unsigned short pred, Constant *LHS, Constant *RHS,
+                           bool OnlyIfReduced = false);
+
+  /// Getelementptr form.  Value* is only accepted for convenience;
+  /// all elements must be Constants.
+  ///
+  /// \param InRangeIndex the inrange index if present or None.
+  /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+  static Constant *getGetElementPtr(Type *Ty, Constant *C,
+                                    ArrayRef<Constant *> IdxList,
+                                    bool InBounds = false,
+                                    Optional<unsigned> InRangeIndex = None,
+                                    Type *OnlyIfReducedTy = nullptr) {
+    return getGetElementPtr(
+        Ty, C, makeArrayRef((Value * const *)IdxList.data(), IdxList.size()),
+        InBounds, InRangeIndex, OnlyIfReducedTy);
+  }
+  static Constant *getGetElementPtr(Type *Ty, Constant *C, Constant *Idx,
+                                    bool InBounds = false,
+                                    Optional<unsigned> InRangeIndex = None,
+                                    Type *OnlyIfReducedTy = nullptr) {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRangeIndex,
+                            OnlyIfReducedTy);
+  }
+  static Constant *getGetElementPtr(Type *Ty, Constant *C,
+                                    ArrayRef<Value *> IdxList,
+                                    bool InBounds = false,
+                                    Optional<unsigned> InRangeIndex = None,
+                                    Type *OnlyIfReducedTy = nullptr);
+
+  /// Create an "inbounds" getelementptr. See the documentation for the
+  /// "inbounds" flag in LangRef.html for details.
+  static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                            ArrayRef<Constant *> IdxList) {
+    return getGetElementPtr(Ty, C, IdxList, true);
+  }
+  static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                            Constant *Idx) {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return getGetElementPtr(Ty, C, Idx, true);
+  }
+  static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                            ArrayRef<Value *> IdxList) {
+    return getGetElementPtr(Ty, C, IdxList, true);
+  }
+
+  static Constant *getExtractElement(Constant *Vec, Constant *Idx,
+                                     Type *OnlyIfReducedTy = nullptr);
+  static Constant *getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx,
+                                    Type *OnlyIfReducedTy = nullptr);
+  static Constant *getShuffleVector(Constant *V1, Constant *V2, Constant *Mask,
+                                    Type *OnlyIfReducedTy = nullptr);
+  static Constant *getExtractValue(Constant *Agg, ArrayRef<unsigned> Idxs,
+                                   Type *OnlyIfReducedTy = nullptr);
+  static Constant *getInsertValue(Constant *Agg, Constant *Val,
+                                  ArrayRef<unsigned> Idxs,
+                                  Type *OnlyIfReducedTy = nullptr);
+
+  /// Return the opcode at the root of this constant expression
+  unsigned getOpcode() const { return getSubclassDataFromValue(); }
+
+  /// Return the ICMP or FCMP predicate value. Assert if this is not an ICMP or
+  /// FCMP constant expression.
+  unsigned getPredicate() const;
+
+  /// Assert that this is an insertvalue or exactvalue
+  /// expression and return the list of indices.
+  ArrayRef<unsigned> getIndices() const;
+
+  /// Return a string representation for an opcode.
+  const char *getOpcodeName() const;
+
+  /// Return a constant expression identical to this one, but with the specified
+  /// operand set to the specified value.
+  Constant *getWithOperandReplaced(unsigned OpNo, Constant *Op) const;
+
+  /// This returns the current constant expression with the operands replaced
+  /// with the specified values. The specified array must have the same number
+  /// of operands as our current one.
+  Constant *getWithOperands(ArrayRef<Constant*> Ops) const {
+    return getWithOperands(Ops, getType());
+  }
+
+  /// Get the current expression with the operands replaced.
+  ///
+  /// Return the current constant expression with the operands replaced with \c
+  /// Ops and the type with \c Ty.  The new operands must have the same number
+  /// as the current ones.
+  ///
+  /// If \c OnlyIfReduced is \c true, nullptr will be returned unless something
+  /// gets constant-folded, the type changes, or the expression is otherwise
+  /// canonicalized.  This parameter should almost always be \c false.
+  Constant *getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
+                            bool OnlyIfReduced = false,
+                            Type *SrcTy = nullptr) const;
+
+  /// Returns an Instruction which implements the same operation as this
+  /// ConstantExpr. The instruction is not linked to any basic block.
+  ///
+  /// A better approach to this could be to have a constructor for Instruction
+  /// which would take a ConstantExpr parameter, but that would have spread
+  /// implementation details of ConstantExpr outside of Constants.cpp, which
+  /// would make it harder to remove ConstantExprs altogether.
+  Instruction *getAsInstruction();
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == ConstantExprVal;
+  }
+
+private:
+  // Shadow Value::setValueSubclassData with a private forwarding method so that
+  // subclasses cannot accidentally use it.
+  void setValueSubclassData(unsigned short D) {
+    Value::setValueSubclassData(D);
+  }
+};
+
+template <>
+struct OperandTraits<ConstantExpr> :
+  public VariadicOperandTraits<ConstantExpr, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)
+
+//===----------------------------------------------------------------------===//
+/// 'undef' values are things that do not have specified contents.
+/// These are used for a variety of purposes, including global variable
+/// initializers and operands to instructions.  'undef' values can occur with
+/// any first-class type.
+///
+/// Undef values aren't exactly constants; if they have multiple uses, they
+/// can appear to have different bit patterns at each use. See
+/// LangRef.html#undefvalues for details.
+///
+class UndefValue final : public ConstantData {
+  friend class Constant;
+
+  explicit UndefValue(Type *T) : ConstantData(T, UndefValueVal) {}
+
+  void destroyConstantImpl();
+
+public:
+  UndefValue(const UndefValue &) = delete;
+
+  /// Static factory methods - Return an 'undef' object of the specified type.
+  static UndefValue *get(Type *T);
+
+  /// If this Undef has array or vector type, return a undef with the right
+  /// element type.
+  UndefValue *getSequentialElement() const;
+
+  /// If this undef has struct type, return a undef with the right element type
+  /// for the specified element.
+  UndefValue *getStructElement(unsigned Elt) const;
+
+  /// Return an undef of the right value for the specified GEP index if we can,
+  /// otherwise return null (e.g. if C is a ConstantExpr).
+  UndefValue *getElementValue(Constant *C) const;
+
+  /// Return an undef of the right value for the specified GEP index.
+  UndefValue *getElementValue(unsigned Idx) const;
+
+  /// Return the number of elements in the array, vector, or struct.
+  unsigned getNumElements() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == UndefValueVal;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_CONSTANTS_H
diff --git a/linux-x64/clang/include/llvm/IR/DIBuilder.h b/linux-x64/clang/include/llvm/IR/DIBuilder.h
new file mode 100644
index 0000000..aa8a8ec
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DIBuilder.h
@@ -0,0 +1,847 @@
+//===- DIBuilder.h - Debug Information Builder ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DIBuilder that is useful for creating debugging
+// information entries in LLVM IR form.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIBUILDER_H
+#define LLVM_IR_DIBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cstdint>
+
+namespace llvm {
+
+  class BasicBlock;
+  class Constant;
+  class Function;
+  class Instruction;
+  class LLVMContext;
+  class Module;
+  class Value;
+
+  class DIBuilder {
+    Module &M;
+    LLVMContext &VMContext;
+
+    DICompileUnit *CUNode;   ///< The one compile unit created by this DIBuiler.
+    Function *DeclareFn;     ///< llvm.dbg.declare
+    Function *ValueFn;       ///< llvm.dbg.value
+
+    SmallVector<Metadata *, 4> AllEnumTypes;
+    /// Track the RetainTypes, since they can be updated later on.
+    SmallVector<TrackingMDNodeRef, 4> AllRetainTypes;
+    SmallVector<Metadata *, 4> AllSubprograms;
+    SmallVector<Metadata *, 4> AllGVs;
+    SmallVector<TrackingMDNodeRef, 4> AllImportedModules;
+    /// Map Macro parent (which can be DIMacroFile or nullptr) to a list of
+    /// Metadata all of type DIMacroNode.
+    /// DIMacroNode's with nullptr parent are DICompileUnit direct children.
+    MapVector<MDNode *, SetVector<Metadata *>> AllMacrosPerParent;
+
+    /// Track nodes that may be unresolved.
+    SmallVector<TrackingMDNodeRef, 4> UnresolvedNodes;
+    bool AllowUnresolvedNodes;
+
+    /// Each subprogram's preserved local variables.
+    ///
+    /// Do not use a std::vector.  Some versions of libc++ apparently copy
+    /// instead of move on grow operations, and TrackingMDRef is expensive to
+    /// copy.
+    DenseMap<MDNode *, SmallVector<TrackingMDNodeRef, 1>> PreservedVariables;
+
+    /// Create a temporary.
+    ///
+    /// Create an \a temporary node and track it in \a UnresolvedNodes.
+    void trackIfUnresolved(MDNode *N);
+
+    /// Internal helper for insertDeclare.
+    Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
+                               DIExpression *Expr, const DILocation *DL,
+                               BasicBlock *InsertBB, Instruction *InsertBefore);
+
+    /// Internal helper for insertDbgValueIntrinsic.
+    Instruction *
+    insertDbgValueIntrinsic(llvm::Value *Val, DILocalVariable *VarInfo,
+                            DIExpression *Expr, const DILocation *DL,
+                            BasicBlock *InsertBB, Instruction *InsertBefore);
+
+  public:
+    /// Construct a builder for a module.
+    ///
+    /// If \c AllowUnresolved, collect unresolved nodes attached to the module
+    /// in order to resolve cycles during \a finalize().
+    ///
+    /// If \p CU is given a value other than nullptr, then set \p CUNode to CU.
+    explicit DIBuilder(Module &M, bool AllowUnresolved = true,
+                       DICompileUnit *CU = nullptr);
+    DIBuilder(const DIBuilder &) = delete;
+    DIBuilder &operator=(const DIBuilder &) = delete;
+
+    /// Construct any deferred debug info descriptors.
+    void finalize();
+
+    /// Finalize a specific subprogram - no new variables may be added to this
+    /// subprogram afterwards.
+    void finalizeSubprogram(DISubprogram *SP);
+
+    /// A CompileUnit provides an anchor for all debugging
+    /// information generated during this instance of compilation.
+    /// \param Lang          Source programming language, eg. dwarf::DW_LANG_C99
+    /// \param File          File info.
+    /// \param Producer      Identify the producer of debugging information
+    ///                      and code.  Usually this is a compiler
+    ///                      version string.
+    /// \param isOptimized   A boolean flag which indicates whether optimization
+    ///                      is enabled or not.
+    /// \param Flags         This string lists command line options. This
+    ///                      string is directly embedded in debug info
+    ///                      output which may be used by a tool
+    ///                      analyzing generated debugging information.
+    /// \param RV            This indicates runtime version for languages like
+    ///                      Objective-C.
+    /// \param SplitName     The name of the file that we'll split debug info
+    ///                      out into.
+    /// \param Kind          The kind of debug information to generate.
+    /// \param DWOId         The DWOId if this is a split skeleton compile unit.
+    /// \param SplitDebugInlining    Whether to emit inline debug info.
+    /// \param DebugInfoForProfiling Whether to emit extra debug info for
+    ///                              profile collection.
+    /// \param GnuPubnames   Whether to emit .debug_gnu_pubnames section instead
+    ///                      of .debug_pubnames.
+    DICompileUnit *
+    createCompileUnit(unsigned Lang, DIFile *File, StringRef Producer,
+                      bool isOptimized, StringRef Flags, unsigned RV,
+                      StringRef SplitName = StringRef(),
+                      DICompileUnit::DebugEmissionKind Kind =
+                          DICompileUnit::DebugEmissionKind::FullDebug,
+                      uint64_t DWOId = 0, bool SplitDebugInlining = true,
+                      bool DebugInfoForProfiling = false,
+                      bool GnuPubnames = false);
+
+    /// Create a file descriptor to hold debugging information for a file.
+    /// \param Filename  File name.
+    /// \param Directory Directory.
+    /// \param Checksum  Optional checksum kind (e.g. CSK_MD5, CSK_SHA1, etc.)
+    ///                  and value.
+    /// \param Source    Optional source text.
+    DIFile *
+    createFile(StringRef Filename, StringRef Directory,
+               Optional<DIFile::ChecksumInfo<StringRef>> Checksum = None,
+               Optional<StringRef> Source = None);
+
+    /// Create debugging information entry for a macro.
+    /// \param Parent     Macro parent (could be nullptr).
+    /// \param Line       Source line number where the macro is defined.
+    /// \param MacroType  DW_MACINFO_define or DW_MACINFO_undef.
+    /// \param Name       Macro name.
+    /// \param Value      Macro value.
+    DIMacro *createMacro(DIMacroFile *Parent, unsigned Line, unsigned MacroType,
+                         StringRef Name, StringRef Value = StringRef());
+
+    /// Create debugging information temporary entry for a macro file.
+    /// List of macro node direct children will be calculated by DIBuilder,
+    /// using the \p Parent relationship.
+    /// \param Parent     Macro file parent (could be nullptr).
+    /// \param Line       Source line number where the macro file is included.
+    /// \param File       File descriptor containing the name of the macro file.
+    DIMacroFile *createTempMacroFile(DIMacroFile *Parent, unsigned Line,
+                                     DIFile *File);
+
+    /// Create a single enumerator value.
+    DIEnumerator *createEnumerator(StringRef Name, int64_t Val, bool IsUnsigned = false);
+
+    /// Create a DWARF unspecified type.
+    DIBasicType *createUnspecifiedType(StringRef Name);
+
+    /// Create C++11 nullptr type.
+    DIBasicType *createNullPtrType();
+
+    /// Create debugging information entry for a basic
+    /// type.
+    /// \param Name        Type name.
+    /// \param SizeInBits  Size of the type.
+    /// \param Encoding    DWARF encoding code, e.g. dwarf::DW_ATE_float.
+    DIBasicType *createBasicType(StringRef Name, uint64_t SizeInBits,
+                                 unsigned Encoding);
+
+    /// Create debugging information entry for a qualified
+    /// type, e.g. 'const int'.
+    /// \param Tag         Tag identifing type, e.g. dwarf::TAG_volatile_type
+    /// \param FromTy      Base Type.
+    DIDerivedType *createQualifiedType(unsigned Tag, DIType *FromTy);
+
+    /// Create debugging information entry for a pointer.
+    /// \param PointeeTy         Type pointed by this pointer.
+    /// \param SizeInBits        Size.
+    /// \param AlignInBits       Alignment. (optional)
+    /// \param DWARFAddressSpace DWARF address space. (optional)
+    /// \param Name              Pointer type name. (optional)
+    DIDerivedType *createPointerType(DIType *PointeeTy, uint64_t SizeInBits,
+                                     uint32_t AlignInBits = 0,
+                                     Optional<unsigned> DWARFAddressSpace =
+                                         None,
+                                     StringRef Name = "");
+
+    /// Create debugging information entry for a pointer to member.
+    /// \param PointeeTy Type pointed to by this pointer.
+    /// \param SizeInBits  Size.
+    /// \param AlignInBits Alignment. (optional)
+    /// \param Class Type for which this pointer points to members of.
+    DIDerivedType *
+    createMemberPointerType(DIType *PointeeTy, DIType *Class,
+                            uint64_t SizeInBits, uint32_t AlignInBits = 0,
+                            DINode::DIFlags Flags = DINode::FlagZero);
+
+    /// Create debugging information entry for a c++
+    /// style reference or rvalue reference type.
+    DIDerivedType *createReferenceType(unsigned Tag, DIType *RTy,
+                                       uint64_t SizeInBits = 0,
+                                       uint32_t AlignInBits = 0,
+                                       Optional<unsigned> DWARFAddressSpace =
+                                           None);
+
+    /// Create debugging information entry for a typedef.
+    /// \param Ty          Original type.
+    /// \param Name        Typedef name.
+    /// \param File        File where this type is defined.
+    /// \param LineNo      Line number.
+    /// \param Context     The surrounding context for the typedef.
+    DIDerivedType *createTypedef(DIType *Ty, StringRef Name, DIFile *File,
+                                 unsigned LineNo, DIScope *Context);
+
+    /// Create debugging information entry for a 'friend'.
+    DIDerivedType *createFriend(DIType *Ty, DIType *FriendTy);
+
+    /// Create debugging information entry to establish
+    /// inheritance relationship between two types.
+    /// \param Ty           Original type.
+    /// \param BaseTy       Base type. Ty is inherits from base.
+    /// \param BaseOffset   Base offset.
+    /// \param Flags        Flags to describe inheritance attribute,
+    ///                     e.g. private
+    DIDerivedType *createInheritance(DIType *Ty, DIType *BaseTy,
+                                     uint64_t BaseOffset,
+                                     DINode::DIFlags Flags);
+
+    /// Create debugging information entry for a member.
+    /// \param Scope        Member scope.
+    /// \param Name         Member name.
+    /// \param File         File where this member is defined.
+    /// \param LineNo       Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param OffsetInBits Member offset.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Ty           Parent type.
+    DIDerivedType *createMemberType(DIScope *Scope, StringRef Name,
+                                    DIFile *File, unsigned LineNo,
+                                    uint64_t SizeInBits,
+                                    uint32_t AlignInBits,
+                                    uint64_t OffsetInBits,
+                                    DINode::DIFlags Flags, DIType *Ty);
+
+    /// Create debugging information entry for a variant.  A variant
+    /// normally should be a member of a variant part.
+    /// \param Scope        Member scope.
+    /// \param Name         Member name.
+    /// \param File         File where this member is defined.
+    /// \param LineNo       Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param OffsetInBits Member offset.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Discriminant The discriminant for this branch; null for
+    ///                     the default branch
+    /// \param Ty           Parent type.
+    DIDerivedType *createVariantMemberType(DIScope *Scope, StringRef Name,
+					   DIFile *File, unsigned LineNo,
+					   uint64_t SizeInBits,
+					   uint32_t AlignInBits,
+					   uint64_t OffsetInBits,
+					   Constant *Discriminant,
+					   DINode::DIFlags Flags, DIType *Ty);
+
+    /// Create debugging information entry for a bit field member.
+    /// \param Scope               Member scope.
+    /// \param Name                Member name.
+    /// \param File                File where this member is defined.
+    /// \param LineNo              Line number.
+    /// \param SizeInBits          Member size.
+    /// \param OffsetInBits        Member offset.
+    /// \param StorageOffsetInBits Member storage offset.
+    /// \param Flags               Flags to encode member attribute.
+    /// \param Ty                  Parent type.
+    DIDerivedType *createBitFieldMemberType(
+        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo,
+        uint64_t SizeInBits, uint64_t OffsetInBits,
+        uint64_t StorageOffsetInBits, DINode::DIFlags Flags, DIType *Ty);
+
+    /// Create debugging information entry for a
+    /// C++ static data member.
+    /// \param Scope      Member scope.
+    /// \param Name       Member name.
+    /// \param File       File where this member is declared.
+    /// \param LineNo     Line number.
+    /// \param Ty         Type of the static member.
+    /// \param Flags      Flags to encode member attribute, e.g. private.
+    /// \param Val        Const initializer of the member.
+    /// \param AlignInBits  Member alignment.
+    DIDerivedType *createStaticMemberType(DIScope *Scope, StringRef Name,
+                                          DIFile *File, unsigned LineNo,
+                                          DIType *Ty, DINode::DIFlags Flags,
+                                          Constant *Val,
+                                          uint32_t AlignInBits = 0);
+
+    /// Create debugging information entry for Objective-C
+    /// instance variable.
+    /// \param Name         Member name.
+    /// \param File         File where this member is defined.
+    /// \param LineNo       Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param OffsetInBits Member offset.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Ty           Parent type.
+    /// \param PropertyNode Property associated with this ivar.
+    DIDerivedType *createObjCIVar(StringRef Name, DIFile *File, unsigned LineNo,
+                                  uint64_t SizeInBits, uint32_t AlignInBits,
+                                  uint64_t OffsetInBits, DINode::DIFlags Flags,
+                                  DIType *Ty, MDNode *PropertyNode);
+
+    /// Create debugging information entry for Objective-C
+    /// property.
+    /// \param Name         Property name.
+    /// \param File         File where this property is defined.
+    /// \param LineNumber   Line number.
+    /// \param GetterName   Name of the Objective C property getter selector.
+    /// \param SetterName   Name of the Objective C property setter selector.
+    /// \param PropertyAttributes Objective C property attributes.
+    /// \param Ty           Type.
+    DIObjCProperty *createObjCProperty(StringRef Name, DIFile *File,
+                                       unsigned LineNumber,
+                                       StringRef GetterName,
+                                       StringRef SetterName,
+                                       unsigned PropertyAttributes, DIType *Ty);
+
+    /// Create debugging information entry for a class.
+    /// \param Scope        Scope in which this class is defined.
+    /// \param Name         class name.
+    /// \param File         File where this member is defined.
+    /// \param LineNumber   Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param OffsetInBits Member offset.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Elements     class members.
+    /// \param VTableHolder Debug info of the base class that contains vtable
+    ///                     for this type. This is used in
+    ///                     DW_AT_containing_type. See DWARF documentation
+    ///                     for more info.
+    /// \param TemplateParms Template type parameters.
+    /// \param UniqueIdentifier A unique identifier for the class.
+    DICompositeType *createClassType(
+        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+        uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+        DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements,
+        DIType *VTableHolder = nullptr, MDNode *TemplateParms = nullptr,
+        StringRef UniqueIdentifier = "");
+
+    /// Create debugging information entry for a struct.
+    /// \param Scope        Scope in which this struct is defined.
+    /// \param Name         Struct name.
+    /// \param File         File where this member is defined.
+    /// \param LineNumber   Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Elements     Struct elements.
+    /// \param RunTimeLang  Optional parameter, Objective-C runtime version.
+    /// \param UniqueIdentifier A unique identifier for the struct.
+    DICompositeType *createStructType(
+        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+        uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags,
+        DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang = 0,
+        DIType *VTableHolder = nullptr, StringRef UniqueIdentifier = "");
+
+    /// Create debugging information entry for an union.
+    /// \param Scope        Scope in which this union is defined.
+    /// \param Name         Union name.
+    /// \param File         File where this member is defined.
+    /// \param LineNumber   Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Elements     Union elements.
+    /// \param RunTimeLang  Optional parameter, Objective-C runtime version.
+    /// \param UniqueIdentifier A unique identifier for the union.
+    DICompositeType *createUnionType(DIScope *Scope, StringRef Name,
+                                     DIFile *File, unsigned LineNumber,
+                                     uint64_t SizeInBits, uint32_t AlignInBits,
+                                     DINode::DIFlags Flags,
+                                     DINodeArray Elements,
+                                     unsigned RunTimeLang = 0,
+                                     StringRef UniqueIdentifier = "");
+
+    /// Create debugging information entry for a variant part.  A
+    /// variant part normally has a discriminator (though this is not
+    /// required) and a number of variant children.
+    /// \param Scope        Scope in which this union is defined.
+    /// \param Name         Union name.
+    /// \param File         File where this member is defined.
+    /// \param LineNumber   Line number.
+    /// \param SizeInBits   Member size.
+    /// \param AlignInBits  Member alignment.
+    /// \param Flags        Flags to encode member attribute, e.g. private
+    /// \param Discriminator Discriminant member
+    /// \param Elements     Variant elements.
+    /// \param UniqueIdentifier A unique identifier for the union.
+    DICompositeType *createVariantPart(DIScope *Scope, StringRef Name,
+				       DIFile *File, unsigned LineNumber,
+				       uint64_t SizeInBits, uint32_t AlignInBits,
+				       DINode::DIFlags Flags,
+				       DIDerivedType *Discriminator,
+				       DINodeArray Elements,
+				       StringRef UniqueIdentifier = "");
+
+    /// Create debugging information for template
+    /// type parameter.
+    /// \param Scope        Scope in which this type is defined.
+    /// \param Name         Type parameter name.
+    /// \param Ty           Parameter type.
+    DITemplateTypeParameter *
+    createTemplateTypeParameter(DIScope *Scope, StringRef Name, DIType *Ty);
+
+    /// Create debugging information for template
+    /// value parameter.
+    /// \param Scope        Scope in which this type is defined.
+    /// \param Name         Value parameter name.
+    /// \param Ty           Parameter type.
+    /// \param Val          Constant parameter value.
+    DITemplateValueParameter *createTemplateValueParameter(DIScope *Scope,
+                                                           StringRef Name,
+                                                           DIType *Ty,
+                                                           Constant *Val);
+
+    /// Create debugging information for a template template parameter.
+    /// \param Scope        Scope in which this type is defined.
+    /// \param Name         Value parameter name.
+    /// \param Ty           Parameter type.
+    /// \param Val          The fully qualified name of the template.
+    DITemplateValueParameter *createTemplateTemplateParameter(DIScope *Scope,
+                                                              StringRef Name,
+                                                              DIType *Ty,
+                                                              StringRef Val);
+
+    /// Create debugging information for a template parameter pack.
+    /// \param Scope        Scope in which this type is defined.
+    /// \param Name         Value parameter name.
+    /// \param Ty           Parameter type.
+    /// \param Val          An array of types in the pack.
+    DITemplateValueParameter *createTemplateParameterPack(DIScope *Scope,
+                                                          StringRef Name,
+                                                          DIType *Ty,
+                                                          DINodeArray Val);
+
+    /// Create debugging information entry for an array.
+    /// \param Size         Array size.
+    /// \param AlignInBits  Alignment.
+    /// \param Ty           Element type.
+    /// \param Subscripts   Subscripts.
+    DICompositeType *createArrayType(uint64_t Size, uint32_t AlignInBits,
+                                     DIType *Ty, DINodeArray Subscripts);
+
+    /// Create debugging information entry for a vector type.
+    /// \param Size         Array size.
+    /// \param AlignInBits  Alignment.
+    /// \param Ty           Element type.
+    /// \param Subscripts   Subscripts.
+    DICompositeType *createVectorType(uint64_t Size, uint32_t AlignInBits,
+                                      DIType *Ty, DINodeArray Subscripts);
+
+    /// Create debugging information entry for an
+    /// enumeration.
+    /// \param Scope          Scope in which this enumeration is defined.
+    /// \param Name           Union name.
+    /// \param File           File where this member is defined.
+    /// \param LineNumber     Line number.
+    /// \param SizeInBits     Member size.
+    /// \param AlignInBits    Member alignment.
+    /// \param Elements       Enumeration elements.
+    /// \param UnderlyingType Underlying type of a C++11/ObjC fixed enum.
+    /// \param UniqueIdentifier A unique identifier for the enum.
+    /// \param IsFixed Boolean flag indicate if this is C++11/ObjC fixed enum.
+    DICompositeType *createEnumerationType(
+        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+        uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
+        DIType *UnderlyingType, StringRef UniqueIdentifier = "", bool IsFixed = false);
+
+    /// Create subroutine type.
+    /// \param ParameterTypes  An array of subroutine parameter types. This
+    ///                        includes return type at 0th index.
+    /// \param Flags           E.g.: LValueReference.
+    ///                        These flags are used to emit dwarf attributes.
+    /// \param CC              Calling convention, e.g. dwarf::DW_CC_normal
+    DISubroutineType *
+    createSubroutineType(DITypeRefArray ParameterTypes,
+                         DINode::DIFlags Flags = DINode::FlagZero,
+                         unsigned CC = 0);
+
+    /// Create a new DIType* with "artificial" flag set.
+    DIType *createArtificialType(DIType *Ty);
+
+    /// Create a new DIType* with the "object pointer"
+    /// flag set.
+    DIType *createObjectPointerType(DIType *Ty);
+
+    /// Create a permanent forward-declared type.
+    DICompositeType *createForwardDecl(unsigned Tag, StringRef Name,
+                                       DIScope *Scope, DIFile *F, unsigned Line,
+                                       unsigned RuntimeLang = 0,
+                                       uint64_t SizeInBits = 0,
+                                       uint32_t AlignInBits = 0,
+                                       StringRef UniqueIdentifier = "");
+
+    /// Create a temporary forward-declared type.
+    DICompositeType *createReplaceableCompositeType(
+        unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line,
+        unsigned RuntimeLang = 0, uint64_t SizeInBits = 0,
+        uint32_t AlignInBits = 0, DINode::DIFlags Flags = DINode::FlagFwdDecl,
+        StringRef UniqueIdentifier = "");
+
+    /// Retain DIScope* in a module even if it is not referenced
+    /// through debug info anchors.
+    void retainType(DIScope *T);
+
+    /// Create unspecified parameter type
+    /// for a subroutine type.
+    DIBasicType *createUnspecifiedParameter();
+
+    /// Get a DINodeArray, create one if required.
+    DINodeArray getOrCreateArray(ArrayRef<Metadata *> Elements);
+
+    /// Get a DIMacroNodeArray, create one if required.
+    DIMacroNodeArray getOrCreateMacroArray(ArrayRef<Metadata *> Elements);
+
+    /// Get a DITypeRefArray, create one if required.
+    DITypeRefArray getOrCreateTypeArray(ArrayRef<Metadata *> Elements);
+
+    /// Create a descriptor for a value range.  This
+    /// implicitly uniques the values returned.
+    DISubrange *getOrCreateSubrange(int64_t Lo, int64_t Count);
+    DISubrange *getOrCreateSubrange(int64_t Lo, Metadata *CountNode);
+
+    /// Create a new descriptor for the specified variable.
+    /// \param Context     Variable scope.
+    /// \param Name        Name of the variable.
+    /// \param LinkageName Mangled  name of the variable.
+    /// \param File        File where this variable is defined.
+    /// \param LineNo      Line number.
+    /// \param Ty          Variable Type.
+    /// \param isLocalToUnit Boolean flag indicate whether this variable is
+    ///                      externally visible or not.
+    /// \param Expr        The location of the global relative to the attached
+    ///                    GlobalVariable.
+    /// \param Decl        Reference to the corresponding declaration.
+    /// \param AlignInBits Variable alignment(or 0 if no alignment attr was
+    ///                    specified)
+    DIGlobalVariableExpression *createGlobalVariableExpression(
+        DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+        unsigned LineNo, DIType *Ty, bool isLocalToUnit,
+        DIExpression *Expr = nullptr, MDNode *Decl = nullptr,
+        uint32_t AlignInBits = 0);
+
+    /// Identical to createGlobalVariable
+    /// except that the resulting DbgNode is temporary and meant to be RAUWed.
+    DIGlobalVariable *createTempGlobalVariableFwdDecl(
+        DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+        unsigned LineNo, DIType *Ty, bool isLocalToUnit, MDNode *Decl = nullptr,
+        uint32_t AlignInBits = 0);
+
+    /// Create a new descriptor for an auto variable.  This is a local variable
+    /// that is not a subprogram parameter.
+    ///
+    /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
+    /// leads to a \a DISubprogram.
+    ///
+    /// If \c AlwaysPreserve, this variable will be referenced from its
+    /// containing subprogram, and will survive some optimizations.
+    DILocalVariable *
+    createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File,
+                       unsigned LineNo, DIType *Ty, bool AlwaysPreserve = false,
+                       DINode::DIFlags Flags = DINode::FlagZero,
+                       uint32_t AlignInBits = 0);
+
+    /// Create a new descriptor for a parameter variable.
+    ///
+    /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
+    /// leads to a \a DISubprogram.
+    ///
+    /// \c ArgNo is the index (starting from \c 1) of this variable in the
+    /// subprogram parameters.  \c ArgNo should not conflict with other
+    /// parameters of the same subprogram.
+    ///
+    /// If \c AlwaysPreserve, this variable will be referenced from its
+    /// containing subprogram, and will survive some optimizations.
+    DILocalVariable *
+    createParameterVariable(DIScope *Scope, StringRef Name, unsigned ArgNo,
+                            DIFile *File, unsigned LineNo, DIType *Ty,
+                            bool AlwaysPreserve = false,
+                            DINode::DIFlags Flags = DINode::FlagZero);
+
+    /// Create a new descriptor for the specified
+    /// variable which has a complex address expression for its address.
+    /// \param Addr        An array of complex address operations.
+    DIExpression *createExpression(ArrayRef<uint64_t> Addr = None);
+    DIExpression *createExpression(ArrayRef<int64_t> Addr);
+
+    /// Create an expression for a variable that does not have an address, but
+    /// does have a constant value.
+    DIExpression *createConstantValueExpression(uint64_t Val) {
+      return DIExpression::get(
+          VMContext, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_stack_value});
+    }
+
+    /// Create a new descriptor for the specified subprogram.
+    /// See comments in DISubprogram* for descriptions of these fields.
+    /// \param Scope         Function scope.
+    /// \param Name          Function name.
+    /// \param LinkageName   Mangled function name.
+    /// \param File          File where this variable is defined.
+    /// \param LineNo        Line number.
+    /// \param Ty            Function type.
+    /// \param isLocalToUnit True if this function is not externally visible.
+    /// \param isDefinition  True if this is a function definition.
+    /// \param ScopeLine     Set to the beginning of the scope this starts
+    /// \param Flags         e.g. is this function prototyped or not.
+    ///                      These flags are used to emit dwarf attributes.
+    /// \param isOptimized   True if optimization is ON.
+    /// \param TParams       Function template parameters.
+    /// \param ThrownTypes   Exception types this function may throw.
+    DISubprogram *createFunction(
+        DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+        unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+        bool isDefinition, unsigned ScopeLine,
+        DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+        DITemplateParameterArray TParams = nullptr,
+        DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);
+
+    /// Identical to createFunction,
+    /// except that the resulting DbgNode is meant to be RAUWed.
+    DISubprogram *createTempFunctionFwdDecl(
+        DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+        unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+        bool isDefinition, unsigned ScopeLine,
+        DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+        DITemplateParameterArray TParams = nullptr,
+        DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);
+
+    /// Create a new descriptor for the specified C++ method.
+    /// See comments in \a DISubprogram* for descriptions of these fields.
+    /// \param Scope         Function scope.
+    /// \param Name          Function name.
+    /// \param LinkageName   Mangled function name.
+    /// \param File          File where this variable is defined.
+    /// \param LineNo        Line number.
+    /// \param Ty            Function type.
+    /// \param isLocalToUnit True if this function is not externally visible..
+    /// \param isDefinition  True if this is a function definition.
+    /// \param Virtuality    Attributes describing virtualness. e.g. pure
+    ///                      virtual function.
+    /// \param VTableIndex   Index no of this method in virtual table, or -1u if
+    ///                      unrepresentable.
+    /// \param ThisAdjustment
+    ///                      MS ABI-specific adjustment of 'this' that occurs
+    ///                      in the prologue.
+    /// \param VTableHolder  Type that holds vtable.
+    /// \param Flags         e.g. is this function prototyped or not.
+    ///                      This flags are used to emit dwarf attributes.
+    /// \param isOptimized   True if optimization is ON.
+    /// \param TParams       Function template parameters.
+    /// \param ThrownTypes   Exception types this function may throw.
+    DISubprogram *createMethod(
+        DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+        unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+        bool isDefinition, unsigned Virtuality = 0, unsigned VTableIndex = 0,
+        int ThisAdjustment = 0, DIType *VTableHolder = nullptr,
+        DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+        DITemplateParameterArray TParams = nullptr,
+        DITypeArray ThrownTypes = nullptr);
+
+    /// This creates new descriptor for a namespace with the specified
+    /// parent scope.
+    /// \param Scope       Namespace scope
+    /// \param Name        Name of this namespace
+    /// \param ExportSymbols True for C++ inline namespaces.
+    DINamespace *createNameSpace(DIScope *Scope, StringRef Name,
+                                 bool ExportSymbols);
+
+    /// This creates new descriptor for a module with the specified
+    /// parent scope.
+    /// \param Scope       Parent scope
+    /// \param Name        Name of this module
+    /// \param ConfigurationMacros
+    ///                    A space-separated shell-quoted list of -D macro
+    ///                    definitions as they would appear on a command line.
+    /// \param IncludePath The path to the module map file.
+    /// \param ISysRoot    The clang system root (value of -isysroot).
+    DIModule *createModule(DIScope *Scope, StringRef Name,
+                           StringRef ConfigurationMacros,
+                           StringRef IncludePath,
+                           StringRef ISysRoot);
+
+    /// This creates a descriptor for a lexical block with a new file
+    /// attached. This merely extends the existing
+    /// lexical block as it crosses a file.
+    /// \param Scope       Lexical block.
+    /// \param File        Source file.
+    /// \param Discriminator DWARF path discriminator value.
+    DILexicalBlockFile *createLexicalBlockFile(DIScope *Scope, DIFile *File,
+                                               unsigned Discriminator = 0);
+
+    /// This creates a descriptor for a lexical block with the
+    /// specified parent context.
+    /// \param Scope         Parent lexical scope.
+    /// \param File          Source file.
+    /// \param Line          Line number.
+    /// \param Col           Column number.
+    DILexicalBlock *createLexicalBlock(DIScope *Scope, DIFile *File,
+                                       unsigned Line, unsigned Col);
+
+    /// Create a descriptor for an imported module.
+    /// \param Context The scope this module is imported into
+    /// \param NS      The namespace being imported here.
+    /// \param File    File where the declaration is located.
+    /// \param Line    Line number of the declaration.
+    DIImportedEntity *createImportedModule(DIScope *Context, DINamespace *NS,
+                                           DIFile *File, unsigned Line);
+
+    /// Create a descriptor for an imported module.
+    /// \param Context The scope this module is imported into.
+    /// \param NS      An aliased namespace.
+    /// \param File    File where the declaration is located.
+    /// \param Line    Line number of the declaration.
+    DIImportedEntity *createImportedModule(DIScope *Context,
+                                           DIImportedEntity *NS, DIFile *File,
+                                           unsigned Line);
+
+    /// Create a descriptor for an imported module.
+    /// \param Context The scope this module is imported into.
+    /// \param M       The module being imported here
+    /// \param File    File where the declaration is located.
+    /// \param Line    Line number of the declaration.
+    DIImportedEntity *createImportedModule(DIScope *Context, DIModule *M,
+                                           DIFile *File, unsigned Line);
+
+    /// Create a descriptor for an imported function.
+    /// \param Context The scope this module is imported into.
+    /// \param Decl    The declaration (or definition) of a function, type, or
+    ///                variable.
+    /// \param File    File where the declaration is located.
+    /// \param Line    Line number of the declaration.
+    DIImportedEntity *createImportedDeclaration(DIScope *Context, DINode *Decl,
+                                                DIFile *File, unsigned Line,
+                                                StringRef Name = "");
+
+    /// Insert a new llvm.dbg.declare intrinsic call.
+    /// \param Storage     llvm::Value of the variable
+    /// \param VarInfo     Variable's debug info descriptor.
+    /// \param Expr        A complex location expression.
+    /// \param DL          Debug info location.
+    /// \param InsertAtEnd Location for the new intrinsic.
+    Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
+                               DIExpression *Expr, const DILocation *DL,
+                               BasicBlock *InsertAtEnd);
+
+    /// Insert a new llvm.dbg.declare intrinsic call.
+    /// \param Storage      llvm::Value of the variable
+    /// \param VarInfo      Variable's debug info descriptor.
+    /// \param Expr         A complex location expression.
+    /// \param DL           Debug info location.
+    /// \param InsertBefore Location for the new intrinsic.
+    Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
+                               DIExpression *Expr, const DILocation *DL,
+                               Instruction *InsertBefore);
+
+    /// Insert a new llvm.dbg.value intrinsic call.
+    /// \param Val          llvm::Value of the variable
+    /// \param VarInfo      Variable's debug info descriptor.
+    /// \param Expr         A complex location expression.
+    /// \param DL           Debug info location.
+    /// \param InsertAtEnd Location for the new intrinsic.
+    Instruction *insertDbgValueIntrinsic(llvm::Value *Val,
+                                         DILocalVariable *VarInfo,
+                                         DIExpression *Expr,
+                                         const DILocation *DL,
+                                         BasicBlock *InsertAtEnd);
+
+    /// Insert a new llvm.dbg.value intrinsic call.
+    /// \param Val          llvm::Value of the variable
+    /// \param VarInfo      Variable's debug info descriptor.
+    /// \param Expr         A complex location expression.
+    /// \param DL           Debug info location.
+    /// \param InsertBefore Location for the new intrinsic.
+    Instruction *insertDbgValueIntrinsic(llvm::Value *Val,
+                                         DILocalVariable *VarInfo,
+                                         DIExpression *Expr,
+                                         const DILocation *DL,
+                                         Instruction *InsertBefore);
+
+    /// Replace the vtable holder in the given type.
+    ///
+    /// If this creates a self reference, it may orphan some unresolved cycles
+    /// in the operands of \c T, so \a DIBuilder needs to track that.
+    void replaceVTableHolder(DICompositeType *&T,
+                             DIType *VTableHolder);
+
+    /// Replace arrays on a composite type.
+    ///
+    /// If \c T is resolved, but the arrays aren't -- which can happen if \c T
+    /// has a self-reference -- \a DIBuilder needs to track the array to
+    /// resolve cycles.
+    void replaceArrays(DICompositeType *&T, DINodeArray Elements,
+                       DINodeArray TParams = DINodeArray());
+
+    /// Replace a temporary node.
+    ///
+    /// Call \a MDNode::replaceAllUsesWith() on \c N, replacing it with \c
+    /// Replacement.
+    ///
+    /// If \c Replacement is the same as \c N.get(), instead call \a
+    /// MDNode::replaceWithUniqued().  In this case, the uniqued node could
+    /// have a different address, so we return the final address.
+    template <class NodeTy>
+    NodeTy *replaceTemporary(TempMDNode &&N, NodeTy *Replacement) {
+      if (N.get() == Replacement)
+        return cast<NodeTy>(MDNode::replaceWithUniqued(std::move(N)));
+
+      N->replaceAllUsesWith(Replacement);
+      return Replacement;
+    }
+  };
+
+  // Create wrappers for C Binding types (see CBindingWrapping.h).
+  DEFINE_ISA_CONVERSION_FUNCTIONS(DIBuilder, LLVMDIBuilderRef)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DIBUILDER_H
diff --git a/linux-x64/clang/include/llvm/IR/DataLayout.h b/linux-x64/clang/include/llvm/IR/DataLayout.h
new file mode 100644
index 0000000..c48e140
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DataLayout.h
@@ -0,0 +1,602 @@
+//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines layout properties related to datatype size/offset/alignment
+// information.  It uses lazy annotations to cache information about how
+// structure types are laid out and used.
+//
+// This structure should be created once, filled in if the defaults are not
+// correct and then passed around by const&.  None of the members functions
+// require modification to the object.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DATALAYOUT_H
+#define LLVM_IR_DATALAYOUT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+// This needs to be outside of the namespace, to avoid conflict with llvm-c
+// decl.
+using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;
+
+namespace llvm {
+
+class GlobalVariable;
+class LLVMContext;
+class Module;
+class StructLayout;
+class Triple;
+class Value;
+
+/// Enum used to categorize the alignment types stored by LayoutAlignElem
+enum AlignTypeEnum {
+  INVALID_ALIGN = 0,
+  INTEGER_ALIGN = 'i',
+  VECTOR_ALIGN = 'v',
+  FLOAT_ALIGN = 'f',
+  AGGREGATE_ALIGN = 'a'
+};
+
+// FIXME: Currently the DataLayout string carries a "preferred alignment"
+// for types. As the DataLayout is module/global, this should likely be
+// sunk down to an FTTI element that is queried rather than a global
+// preference.
+
+/// \brief Layout alignment element.
+///
+/// Stores the alignment data associated with a given alignment type (integer,
+/// vector, float) and type bit width.
+///
+/// \note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct LayoutAlignElem {
+  /// \brief Alignment type from \c AlignTypeEnum
+  unsigned AlignType : 8;
+  unsigned TypeBitWidth : 24;
+  unsigned ABIAlign : 16;
+  unsigned PrefAlign : 16;
+
+  static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+                             unsigned pref_align, uint32_t bit_width);
+
+  bool operator==(const LayoutAlignElem &rhs) const;
+};
+
+/// \brief Layout pointer alignment element.
+///
+/// Stores the alignment data associated with a given pointer and address space.
+///
+/// \note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct PointerAlignElem {
+  unsigned ABIAlign;
+  unsigned PrefAlign;
+  uint32_t TypeByteWidth;
+  uint32_t AddressSpace;
+  uint32_t IndexWidth;
+
+  /// Initializer
+  static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
+                              unsigned PrefAlign, uint32_t TypeByteWidth,
+                              uint32_t IndexWidth);
+
+  bool operator==(const PointerAlignElem &rhs) const;
+};
+
+/// \brief A parsed version of the target data layout string in and methods for
+/// querying it.
+///
+/// The target data layout string is specified *by the target* - a frontend
+/// generating LLVM IR is required to generate the right target data for the
+/// target being codegen'd to.
+class DataLayout {
+private:
+  /// Defaults to false.
+  bool BigEndian;
+
+  unsigned AllocaAddrSpace;
+  unsigned StackNaturalAlign;
+  unsigned ProgramAddrSpace;
+
+  enum ManglingModeT {
+    MM_None,
+    MM_ELF,
+    MM_MachO,
+    MM_WinCOFF,
+    MM_WinCOFFX86,
+    MM_Mips
+  };
+  ManglingModeT ManglingMode;
+
+  SmallVector<unsigned char, 8> LegalIntWidths;
+
+  /// \brief Primitive type alignment data. This is sorted by type and bit
+  /// width during construction.
+  using AlignmentsTy = SmallVector<LayoutAlignElem, 16>;
+  AlignmentsTy Alignments;
+
+  AlignmentsTy::const_iterator
+  findAlignmentLowerBound(AlignTypeEnum AlignType, uint32_t BitWidth) const {
+    return const_cast<DataLayout *>(this)->findAlignmentLowerBound(AlignType,
+                                                                   BitWidth);
+  }
+
+  AlignmentsTy::iterator
+  findAlignmentLowerBound(AlignTypeEnum AlignType, uint32_t BitWidth);
+
+  /// \brief The string representation used to create this DataLayout
+  std::string StringRepresentation;
+
+  using PointersTy = SmallVector<PointerAlignElem, 8>;
+  PointersTy Pointers;
+
+  PointersTy::const_iterator
+  findPointerLowerBound(uint32_t AddressSpace) const {
+    return const_cast<DataLayout *>(this)->findPointerLowerBound(AddressSpace);
+  }
+
+  PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace);
+
+  // The StructType -> StructLayout map.
+  mutable void *LayoutMap = nullptr;
+
+  /// Pointers in these address spaces are non-integral, and don't have a
+  /// well-defined bitwise representation.
+  SmallVector<unsigned, 8> NonIntegralAddressSpaces;
+
+  void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+                    unsigned pref_align, uint32_t bit_width);
+  unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
+                            bool ABIAlign, Type *Ty) const;
+  void setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
+                           unsigned PrefAlign, uint32_t TypeByteWidth,
+                           uint32_t IndexWidth);
+
+  /// Internal helper method that returns requested alignment for type.
+  unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
+
+  /// Parses a target data specification string. Assert if the string is
+  /// malformed.
+  void parseSpecifier(StringRef LayoutDescription);
+
+  // Free all internal data structures.
+  void clear();
+
+public:
+  /// Constructs a DataLayout from a specification string. See reset().
+  explicit DataLayout(StringRef LayoutDescription) {
+    reset(LayoutDescription);
+  }
+
+  /// Initialize target data from properties stored in the module.
+  explicit DataLayout(const Module *M);
+
+  DataLayout(const DataLayout &DL) { *this = DL; }
+
+  ~DataLayout(); // Not virtual, do not subclass this class
+
+  DataLayout &operator=(const DataLayout &DL) {
+    clear();
+    StringRepresentation = DL.StringRepresentation;
+    BigEndian = DL.isBigEndian();
+    AllocaAddrSpace = DL.AllocaAddrSpace;
+    StackNaturalAlign = DL.StackNaturalAlign;
+    ProgramAddrSpace = DL.ProgramAddrSpace;
+    ManglingMode = DL.ManglingMode;
+    LegalIntWidths = DL.LegalIntWidths;
+    Alignments = DL.Alignments;
+    Pointers = DL.Pointers;
+    NonIntegralAddressSpaces = DL.NonIntegralAddressSpaces;
+    return *this;
+  }
+
+  bool operator==(const DataLayout &Other) const;
+  bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
+
+  void init(const Module *M);
+
+  /// Parse a data layout string (with fallback to default values).
+  void reset(StringRef LayoutDescription);
+
+  /// Layout endianness...
+  bool isLittleEndian() const { return !BigEndian; }
+  bool isBigEndian() const { return BigEndian; }
+
+  /// \brief Returns the string representation of the DataLayout.
+  ///
+  /// This representation is in the same format accepted by the string
+  /// constructor above. This should not be used to compare two DataLayout as
+  /// different string can represent the same layout.
+  const std::string &getStringRepresentation() const {
+    return StringRepresentation;
+  }
+
+  /// \brief Test if the DataLayout was constructed from an empty string.
+  bool isDefault() const { return StringRepresentation.empty(); }
+
+  /// \brief Returns true if the specified type is known to be a native integer
+  /// type supported by the CPU.
+  ///
+  /// For example, i64 is not native on most 32-bit CPUs and i37 is not native
+  /// on any known one. This returns false if the integer width is not legal.
+  ///
+  /// The width is specified in bits.
+  bool isLegalInteger(uint64_t Width) const {
+    for (unsigned LegalIntWidth : LegalIntWidths)
+      if (LegalIntWidth == Width)
+        return true;
+    return false;
+  }
+
+  bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
+
+  /// Returns true if the given alignment exceeds the natural stack alignment.
+  bool exceedsNaturalStackAlignment(unsigned Align) const {
+    return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
+  }
+
+  unsigned getStackAlignment() const { return StackNaturalAlign; }
+  unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; }
+
+  unsigned getProgramAddressSpace() const { return ProgramAddrSpace; }
+
+  bool hasMicrosoftFastStdCallMangling() const {
+    return ManglingMode == MM_WinCOFFX86;
+  }
+
+  /// Returns true if symbols with leading question marks should not receive IR
+  /// mangling. True for Windows mangling modes.
+  bool doNotMangleLeadingQuestionMark() const {
+    return ManglingMode == MM_WinCOFF || ManglingMode == MM_WinCOFFX86;
+  }
+
+  bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; }
+
+  StringRef getLinkerPrivateGlobalPrefix() const {
+    if (ManglingMode == MM_MachO)
+      return "l";
+    return "";
+  }
+
+  char getGlobalPrefix() const {
+    switch (ManglingMode) {
+    case MM_None:
+    case MM_ELF:
+    case MM_Mips:
+    case MM_WinCOFF:
+      return '\0';
+    case MM_MachO:
+    case MM_WinCOFFX86:
+      return '_';
+    }
+    llvm_unreachable("invalid mangling mode");
+  }
+
+  StringRef getPrivateGlobalPrefix() const {
+    switch (ManglingMode) {
+    case MM_None:
+      return "";
+    case MM_ELF:
+    case MM_WinCOFF:
+      return ".L";
+    case MM_Mips:
+      return "$";
+    case MM_MachO:
+    case MM_WinCOFFX86:
+      return "L";
+    }
+    llvm_unreachable("invalid mangling mode");
+  }
+
+  static const char *getManglingComponent(const Triple &T);
+
+  /// \brief Returns true if the specified type fits in a native integer type
+  /// supported by the CPU.
+  ///
+  /// For example, if the CPU only supports i32 as a native integer type, then
+  /// i27 fits in a legal integer type but i45 does not.
+  bool fitsInLegalInteger(unsigned Width) const {
+    for (unsigned LegalIntWidth : LegalIntWidths)
+      if (Width <= LegalIntWidth)
+        return true;
+    return false;
+  }
+
+  /// Layout pointer alignment
+  unsigned getPointerABIAlignment(unsigned AS) const;
+
+  /// Return target's alignment for stack-based pointers
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerPrefAlignment(unsigned AS = 0) const;
+
+  /// Layout pointer size
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerSize(unsigned AS = 0) const;
+
+  // Index size used for address calculation.
+  unsigned getIndexSize(unsigned AS) const;
+
+  /// Return the address spaces containing non-integral pointers.  Pointers in
+  /// this address space don't have a well-defined bitwise representation.
+  ArrayRef<unsigned> getNonIntegralAddressSpaces() const {
+    return NonIntegralAddressSpaces;
+  }
+
+  bool isNonIntegralPointerType(PointerType *PT) const {
+    ArrayRef<unsigned> NonIntegralSpaces = getNonIntegralAddressSpaces();
+    return find(NonIntegralSpaces, PT->getAddressSpace()) !=
+           NonIntegralSpaces.end();
+  }
+
+  bool isNonIntegralPointerType(Type *Ty) const {
+    auto *PTy = dyn_cast<PointerType>(Ty);
+    return PTy && isNonIntegralPointerType(PTy);
+  }
+
+  /// Layout pointer size, in bits
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerSizeInBits(unsigned AS = 0) const {
+    return getPointerSize(AS) * 8;
+  }
+
+  /// Size in bits of index used for address calculation in getelementptr.
+  unsigned getIndexSizeInBits(unsigned AS) const {
+    return getIndexSize(AS) * 8;
+  }
+
+  /// Layout pointer size, in bits, based on the type.  If this function is
+  /// called with a pointer type, then the type size of the pointer is returned.
+  /// If this function is called with a vector of pointers, then the type size
+  /// of the pointer is returned.  This should only be called with a pointer or
+  /// vector of pointers.
+  unsigned getPointerTypeSizeInBits(Type *) const;
+
+  /// Layout size of the index used in GEP calculation.
+  /// The function should be called with pointer or vector of pointers type.
+  unsigned getIndexTypeSizeInBits(Type *Ty) const;
+
+  unsigned getPointerTypeSize(Type *Ty) const {
+    return getPointerTypeSizeInBits(Ty) / 8;
+  }
+
+  /// Size examples:
+  ///
+  /// Type        SizeInBits  StoreSizeInBits  AllocSizeInBits[*]
+  /// ----        ----------  ---------------  ---------------
+  ///  i1            1           8                8
+  ///  i8            8           8                8
+  ///  i19          19          24               32
+  ///  i32          32          32               32
+  ///  i100        100         104              128
+  ///  i128        128         128              128
+  ///  Float        32          32               32
+  ///  Double       64          64               64
+  ///  X86_FP80     80          80               96
+  ///
+  /// [*] The alloc size depends on the alignment, and thus on the target.
+  ///     These values are for x86-32 linux.
+
+  /// \brief Returns the number of bits necessary to hold the specified type.
+  ///
+  /// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must
+  /// have a size (Type::isSized() must return true).
+  uint64_t getTypeSizeInBits(Type *Ty) const;
+
+  /// \brief Returns the maximum number of bytes that may be overwritten by
+  /// storing the specified type.
+  ///
+  /// For example, returns 5 for i36 and 10 for x86_fp80.
+  uint64_t getTypeStoreSize(Type *Ty) const {
+    return (getTypeSizeInBits(Ty) + 7) / 8;
+  }
+
+  /// \brief Returns the maximum number of bits that may be overwritten by
+  /// storing the specified type; always a multiple of 8.
+  ///
+  /// For example, returns 40 for i36 and 80 for x86_fp80.
+  uint64_t getTypeStoreSizeInBits(Type *Ty) const {
+    return 8 * getTypeStoreSize(Ty);
+  }
+
+  /// \brief Returns the offset in bytes between successive objects of the
+  /// specified type, including alignment padding.
+  ///
+  /// This is the amount that alloca reserves for this type. For example,
+  /// returns 12 or 16 for x86_fp80, depending on alignment.
+  uint64_t getTypeAllocSize(Type *Ty) const {
+    // Round up to the next alignment boundary.
+    return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
+  }
+
+  /// \brief Returns the offset in bits between successive objects of the
+  /// specified type, including alignment padding; always a multiple of 8.
+  ///
+  /// This is the amount that alloca reserves for this type. For example,
+  /// returns 96 or 128 for x86_fp80, depending on alignment.
+  uint64_t getTypeAllocSizeInBits(Type *Ty) const {
+    return 8 * getTypeAllocSize(Ty);
+  }
+
+  /// \brief Returns the minimum ABI-required alignment for the specified type.
+  unsigned getABITypeAlignment(Type *Ty) const;
+
+  /// \brief Returns the minimum ABI-required alignment for an integer type of
+  /// the specified bitwidth.
+  unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
+
+  /// \brief Returns the preferred stack/global alignment for the specified
+  /// type.
+  ///
+  /// This is always at least as good as the ABI alignment.
+  unsigned getPrefTypeAlignment(Type *Ty) const;
+
+  /// \brief Returns the preferred alignment for the specified type, returned as
+  /// log2 of the value (a shift amount).
+  unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
+
+  /// \brief Returns an integer type with size at least as big as that of a
+  /// pointer in the given address space.
+  IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
+
+  /// \brief Returns an integer (vector of integer) type with size at least as
+  /// big as that of a pointer of the given pointer (vector of pointer) type.
+  Type *getIntPtrType(Type *) const;
+
+  /// \brief Returns the smallest integer type with size at least as big as
+  /// Width bits.
+  Type *getSmallestLegalIntType(LLVMContext &C, unsigned Width = 0) const;
+
+  /// \brief Returns the largest legal integer type, or null if none are set.
+  Type *getLargestLegalIntType(LLVMContext &C) const {
+    unsigned LargestSize = getLargestLegalIntTypeSizeInBits();
+    return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize);
+  }
+
+  /// \brief Returns the size of largest legal integer type size, or 0 if none
+  /// are set.
+  unsigned getLargestLegalIntTypeSizeInBits() const;
+
+  /// \brief Returns the type of a GEP index.
+  /// If it was not specified explicitly, it will be the integer type of the
+  /// pointer width - IntPtrType.
+  Type *getIndexType(Type *PtrTy) const;
+
+  /// \brief Returns the offset from the beginning of the type for the specified
+  /// indices.
+  ///
+  /// Note that this takes the element type, not the pointer type.
+  /// This is used to implement getelementptr.
+  int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef<Value *> Indices) const;
+
+  /// \brief Returns a StructLayout object, indicating the alignment of the
+  /// struct, its size, and the offsets of its fields.
+  ///
+  /// Note that this information is lazily cached.
+  const StructLayout *getStructLayout(StructType *Ty) const;
+
+  /// \brief Returns the preferred alignment of the specified global.
+  ///
+  /// This includes an explicitly requested alignment (if the global has one).
+  unsigned getPreferredAlignment(const GlobalVariable *GV) const;
+
+  /// \brief Returns the preferred alignment of the specified global, returned
+  /// in log form.
+  ///
+  /// This includes an explicitly requested alignment (if the global has one).
+  unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
+};
+
+inline DataLayout *unwrap(LLVMTargetDataRef P) {
+  return reinterpret_cast<DataLayout *>(P);
+}
+
+inline LLVMTargetDataRef wrap(const DataLayout *P) {
+  return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
+}
+
+/// Used to lazily calculate structure layout information for a target machine,
+/// based on the DataLayout structure.
+class StructLayout {
+  uint64_t StructSize;
+  unsigned StructAlignment;
+  unsigned IsPadded : 1;
+  unsigned NumElements : 31;
+  uint64_t MemberOffsets[1]; // variable sized array!
+
+public:
+  uint64_t getSizeInBytes() const { return StructSize; }
+
+  uint64_t getSizeInBits() const { return 8 * StructSize; }
+
+  unsigned getAlignment() const { return StructAlignment; }
+
+  /// Returns whether the struct has padding or not between its fields.
+  /// NB: Padding in nested element is not taken into account.
+  bool hasPadding() const { return IsPadded; }
+
+  /// \brief Given a valid byte offset into the structure, returns the structure
+  /// index that contains it.
+  unsigned getElementContainingOffset(uint64_t Offset) const;
+
+  uint64_t getElementOffset(unsigned Idx) const {
+    assert(Idx < NumElements && "Invalid element idx!");
+    return MemberOffsets[Idx];
+  }
+
+  uint64_t getElementOffsetInBits(unsigned Idx) const {
+    return getElementOffset(Idx) * 8;
+  }
+
+private:
+  friend class DataLayout; // Only DataLayout can create this class
+
+  StructLayout(StructType *ST, const DataLayout &DL);
+};
+
+// The implementation of this method is provided inline as it is particularly
+// well suited to constant folding when called on a specific Type subclass.
+inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
+  assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
+  switch (Ty->getTypeID()) {
+  case Type::LabelTyID:
+    return getPointerSizeInBits(0);
+  case Type::PointerTyID:
+    return getPointerSizeInBits(Ty->getPointerAddressSpace());
+  case Type::ArrayTyID: {
+    ArrayType *ATy = cast<ArrayType>(Ty);
+    return ATy->getNumElements() *
+           getTypeAllocSizeInBits(ATy->getElementType());
+  }
+  case Type::StructTyID:
+    // Get the layout annotation... which is lazily created on demand.
+    return getStructLayout(cast<StructType>(Ty))->getSizeInBits();
+  case Type::IntegerTyID:
+    return Ty->getIntegerBitWidth();
+  case Type::HalfTyID:
+    return 16;
+  case Type::FloatTyID:
+    return 32;
+  case Type::DoubleTyID:
+  case Type::X86_MMXTyID:
+    return 64;
+  case Type::PPC_FP128TyID:
+  case Type::FP128TyID:
+    return 128;
+  // In memory objects this is always aligned to a higher boundary, but
+  // only 80 bits contain information.
+  case Type::X86_FP80TyID:
+    return 80;
+  case Type::VectorTyID: {
+    VectorType *VTy = cast<VectorType>(Ty);
+    return VTy->getNumElements() * getTypeSizeInBits(VTy->getElementType());
+  }
+  default:
+    llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
+  }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DATALAYOUT_H
diff --git a/linux-x64/clang/include/llvm/IR/DebugInfo.h b/linux-x64/clang/include/llvm/IR/DebugInfo.h
new file mode 100644
index 0000000..1d8e7e2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DebugInfo.h
@@ -0,0 +1,138 @@
+//===- DebugInfo.h - Debug Information Helpers ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a bunch of datatypes that are useful for creating and
+// walking debug info in LLVM IR form. They essentially provide wrappers around
+// the information in the global variables that's needed when constructing the
+// DWARF information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGINFO_H
+#define LLVM_IR_DEBUGINFO_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+
+namespace llvm {
+
+class DbgDeclareInst;
+class DbgValueInst;
+class Module;
+
+/// \brief Find subprogram that is enclosing this scope.
+DISubprogram *getDISubprogram(const MDNode *Scope);
+
+/// \brief Strip debug info in the module if it exists.
+///
+/// To do this, we remove all calls to the debugger intrinsics and any named
+/// metadata for debugging. We also remove debug locations for instructions.
+/// Return true if module is modified.
+bool StripDebugInfo(Module &M);
+bool stripDebugInfo(Function &F);
+
+/// Downgrade the debug info in a module to contain only line table information.
+///
+/// In order to convert debug info to what -gline-tables-only would have
+/// created, this does the following:
+///   1) Delete all debug intrinsics.
+///   2) Delete all non-CU named metadata debug info nodes.
+///   3) Create new DebugLocs for each instruction.
+///   4) Create a new CU debug info, and similarly for every metadata node
+///      that's reachable from the CU debug info.
+///   All debug type metadata nodes are unreachable and garbage collected.
+bool stripNonLineTableDebugInfo(Module &M);
+
+/// \brief Return Debug Info Metadata Version by checking module flags.
+unsigned getDebugMetadataVersionFromModule(const Module &M);
+
+/// \brief Utility to find all debug info in a module.
+///
+/// DebugInfoFinder tries to list all debug info MDNodes used in a module. To
+/// list debug info MDNodes used by an instruction, DebugInfoFinder uses
+/// processDeclare, processValue and processLocation to handle DbgDeclareInst,
+/// DbgValueInst and DbgLoc attached to instructions. processModule will go
+/// through all DICompileUnits in llvm.dbg.cu and list debug info MDNodes
+/// used by the CUs.
+class DebugInfoFinder {
+public:
+  /// \brief Process entire module and collect debug info anchors.
+  void processModule(const Module &M);
+
+  /// \brief Process DbgDeclareInst.
+  void processDeclare(const Module &M, const DbgDeclareInst *DDI);
+  /// \brief Process DbgValueInst.
+  void processValue(const Module &M, const DbgValueInst *DVI);
+  /// \brief Process debug info location.
+  void processLocation(const Module &M, const DILocation *Loc);
+
+  /// \brief Clear all lists.
+  void reset();
+
+private:
+  void InitializeTypeMap(const Module &M);
+
+  void processType(DIType *DT);
+  void processSubprogram(DISubprogram *SP);
+  void processScope(DIScope *Scope);
+  bool addCompileUnit(DICompileUnit *CU);
+  bool addGlobalVariable(DIGlobalVariableExpression *DIG);
+  bool addSubprogram(DISubprogram *SP);
+  bool addType(DIType *DT);
+  bool addScope(DIScope *Scope);
+
+public:
+  using compile_unit_iterator =
+      SmallVectorImpl<DICompileUnit *>::const_iterator;
+  using subprogram_iterator = SmallVectorImpl<DISubprogram *>::const_iterator;
+  using global_variable_expression_iterator =
+      SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator;
+  using type_iterator = SmallVectorImpl<DIType *>::const_iterator;
+  using scope_iterator = SmallVectorImpl<DIScope *>::const_iterator;
+
+  iterator_range<compile_unit_iterator> compile_units() const {
+    return make_range(CUs.begin(), CUs.end());
+  }
+
+  iterator_range<subprogram_iterator> subprograms() const {
+    return make_range(SPs.begin(), SPs.end());
+  }
+
+  iterator_range<global_variable_expression_iterator> global_variables() const {
+    return make_range(GVs.begin(), GVs.end());
+  }
+
+  iterator_range<type_iterator> types() const {
+    return make_range(TYs.begin(), TYs.end());
+  }
+
+  iterator_range<scope_iterator> scopes() const {
+    return make_range(Scopes.begin(), Scopes.end());
+  }
+
+  unsigned compile_unit_count() const { return CUs.size(); }
+  unsigned global_variable_count() const { return GVs.size(); }
+  unsigned subprogram_count() const { return SPs.size(); }
+  unsigned type_count() const { return TYs.size(); }
+  unsigned scope_count() const { return Scopes.size(); }
+
+private:
+  SmallVector<DICompileUnit *, 8> CUs;
+  SmallVector<DISubprogram *, 8> SPs;
+  SmallVector<DIGlobalVariableExpression *, 8> GVs;
+  SmallVector<DIType *, 8> TYs;
+  SmallVector<DIScope *, 8> Scopes;
+  SmallPtrSet<const MDNode *, 32> NodesSeen;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DEBUGINFO_H
diff --git a/linux-x64/clang/include/llvm/IR/DebugInfoFlags.def b/linux-x64/clang/include/llvm/IR/DebugInfoFlags.def
new file mode 100644
index 0000000..676b978
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DebugInfoFlags.def
@@ -0,0 +1,62 @@
+//===- llvm/IR/DebugInfoFlags.def - Debug info flag definitions -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through debug info flags.
+//
+//===----------------------------------------------------------------------===//
+
+// TODO: Add other DW-based macros.
+#ifndef HANDLE_DI_FLAG
+#error "Missing macro definition of HANDLE_DI_FLAG"
+#endif
+
+HANDLE_DI_FLAG(0, Zero) // Use it as zero value.
+                        // For example: void foo(DIFlags Flags = FlagZero).
+HANDLE_DI_FLAG(1, Private)
+HANDLE_DI_FLAG(2, Protected)
+HANDLE_DI_FLAG(3, Public)
+HANDLE_DI_FLAG((1 << 2), FwdDecl)
+HANDLE_DI_FLAG((1 << 3), AppleBlock)
+HANDLE_DI_FLAG((1 << 4), BlockByrefStruct)
+HANDLE_DI_FLAG((1 << 5), Virtual)
+HANDLE_DI_FLAG((1 << 6), Artificial)
+HANDLE_DI_FLAG((1 << 7), Explicit)
+HANDLE_DI_FLAG((1 << 8), Prototyped)
+HANDLE_DI_FLAG((1 << 9), ObjcClassComplete)
+HANDLE_DI_FLAG((1 << 10), ObjectPointer)
+HANDLE_DI_FLAG((1 << 11), Vector)
+HANDLE_DI_FLAG((1 << 12), StaticMember)
+HANDLE_DI_FLAG((1 << 13), LValueReference)
+HANDLE_DI_FLAG((1 << 14), RValueReference)
+// 15 was formerly ExternalTypeRef, but this was never used.
+HANDLE_DI_FLAG((1 << 15), Reserved)
+HANDLE_DI_FLAG((1 << 16), SingleInheritance)
+HANDLE_DI_FLAG((2 << 16), MultipleInheritance)
+HANDLE_DI_FLAG((3 << 16), VirtualInheritance)
+HANDLE_DI_FLAG((1 << 18), IntroducedVirtual)
+HANDLE_DI_FLAG((1 << 19), BitField)
+HANDLE_DI_FLAG((1 << 20), NoReturn)
+HANDLE_DI_FLAG((1 << 21), MainSubprogram)
+HANDLE_DI_FLAG((1 << 22), TypePassByValue)
+HANDLE_DI_FLAG((1 << 23), TypePassByReference)
+HANDLE_DI_FLAG((1 << 24), FixedEnum)
+
+// To avoid needing a dedicated value for IndirectVirtualBase, we use
+// the bitwise or of Virtual and FwdDecl, which does not otherwise
+// make sense for inheritance.
+HANDLE_DI_FLAG((1 << 2) | (1 << 5), IndirectVirtualBase)
+
+#ifdef DI_FLAG_LARGEST_NEEDED
+// intended to be used with ADT/BitmaskEnum.h
+// NOTE: always must be equal to largest flag, check this when adding new flag
+HANDLE_DI_FLAG((1 << 24), Largest)
+#undef DI_FLAG_LARGEST_NEEDED
+#endif
+
+#undef HANDLE_DI_FLAG
diff --git a/linux-x64/clang/include/llvm/IR/DebugInfoMetadata.h b/linux-x64/clang/include/llvm/IR/DebugInfoMetadata.h
new file mode 100644
index 0000000..e2210bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DebugInfoMetadata.h
@@ -0,0 +1,2961 @@
+//===- llvm/IR/DebugInfoMetadata.h - Debug info metadata --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declarations for metadata specific to debug info.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGINFOMETADATA_H
+#define LLVM_IR_DEBUGINFOMETADATA_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+// Helper macros for defining get() overrides.
+#define DEFINE_MDNODE_GET_UNPACK_IMPL(...) __VA_ARGS__
+#define DEFINE_MDNODE_GET_UNPACK(ARGS) DEFINE_MDNODE_GET_UNPACK_IMPL ARGS
+#define DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(CLASS, FORMAL, ARGS)              \
+  static CLASS *getDistinct(LLVMContext &Context,                              \
+                            DEFINE_MDNODE_GET_UNPACK(FORMAL)) {                \
+    return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Distinct);         \
+  }                                                                            \
+  static Temp##CLASS getTemporary(LLVMContext &Context,                        \
+                                  DEFINE_MDNODE_GET_UNPACK(FORMAL)) {          \
+    return Temp##CLASS(                                                        \
+        getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Temporary));          \
+  }
+#define DEFINE_MDNODE_GET(CLASS, FORMAL, ARGS)                                 \
+  static CLASS *get(LLVMContext &Context, DEFINE_MDNODE_GET_UNPACK(FORMAL)) {  \
+    return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Uniqued);          \
+  }                                                                            \
+  static CLASS *getIfExists(LLVMContext &Context,                              \
+                            DEFINE_MDNODE_GET_UNPACK(FORMAL)) {                \
+    return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Uniqued,           \
+                   /* ShouldCreate */ false);                                  \
+  }                                                                            \
+  DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(CLASS, FORMAL, ARGS)
+
+namespace llvm {
+
+/// Holds a subclass of DINode.
+///
+/// FIXME: This class doesn't currently make much sense.  Previously it was a
+/// union beteen MDString (for ODR-uniqued types) and things like DIType.  To
+/// support CodeView work, it wasn't deleted outright when MDString-based type
+/// references were deleted; we'll soon need a similar concept for CodeView
+/// DITypeIndex.
+template <class T> class TypedDINodeRef {
+  const Metadata *MD = nullptr;
+
+public:
+  TypedDINodeRef() = default;
+  TypedDINodeRef(std::nullptr_t) {}
+  TypedDINodeRef(const T *MD) : MD(MD) {}
+
+  explicit TypedDINodeRef(const Metadata *MD) : MD(MD) {
+    assert((!MD || isa<T>(MD)) && "Expected valid type ref");
+  }
+
+  template <class U>
+  TypedDINodeRef(
+      const TypedDINodeRef<U> &X,
+      typename std::enable_if<std::is_convertible<U *, T *>::value>::type * =
+          nullptr)
+      : MD(X) {}
+
+  operator Metadata *() const { return const_cast<Metadata *>(MD); }
+
+  T *resolve() const { return const_cast<T *>(cast_or_null<T>(MD)); }
+
+  bool operator==(const TypedDINodeRef<T> &X) const { return MD == X.MD; }
+  bool operator!=(const TypedDINodeRef<T> &X) const { return MD != X.MD; }
+};
+
+using DINodeRef = TypedDINodeRef<DINode>;
+using DIScopeRef = TypedDINodeRef<DIScope>;
+using DITypeRef = TypedDINodeRef<DIType>;
+
+class DITypeRefArray {
+  const MDTuple *N = nullptr;
+
+public:
+  DITypeRefArray() = default;
+  DITypeRefArray(const MDTuple *N) : N(N) {}
+
+  explicit operator bool() const { return get(); }
+  explicit operator MDTuple *() const { return get(); }
+
+  MDTuple *get() const { return const_cast<MDTuple *>(N); }
+  MDTuple *operator->() const { return get(); }
+  MDTuple &operator*() const { return *get(); }
+
+  // FIXME: Fix callers and remove condition on N.
+  unsigned size() const { return N ? N->getNumOperands() : 0u; }
+  DITypeRef operator[](unsigned I) const { return DITypeRef(N->getOperand(I)); }
+
+  class iterator : std::iterator<std::input_iterator_tag, DITypeRef,
+                                 std::ptrdiff_t, void, DITypeRef> {
+    MDNode::op_iterator I = nullptr;
+
+  public:
+    iterator() = default;
+    explicit iterator(MDNode::op_iterator I) : I(I) {}
+
+    DITypeRef operator*() const { return DITypeRef(*I); }
+
+    iterator &operator++() {
+      ++I;
+      return *this;
+    }
+
+    iterator operator++(int) {
+      iterator Temp(*this);
+      ++I;
+      return Temp;
+    }
+
+    bool operator==(const iterator &X) const { return I == X.I; }
+    bool operator!=(const iterator &X) const { return I != X.I; }
+  };
+
+  // FIXME: Fix callers and remove condition on N.
+  iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); }
+  iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
+};
+
+/// Tagged DWARF-like metadata node.
+///
+/// A metadata node with a DWARF tag (i.e., a constant named \c DW_TAG_*,
+/// defined in llvm/BinaryFormat/Dwarf.h).  Called \a DINode because it's
+/// potentially used for non-DWARF output.
+class DINode : public MDNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+protected:
+  DINode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+         ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None)
+      : MDNode(C, ID, Storage, Ops1, Ops2) {
+    assert(Tag < 1u << 16);
+    SubclassData16 = Tag;
+  }
+  ~DINode() = default;
+
+  template <class Ty> Ty *getOperandAs(unsigned I) const {
+    return cast_or_null<Ty>(getOperand(I));
+  }
+
+  StringRef getStringOperand(unsigned I) const {
+    if (auto *S = getOperandAs<MDString>(I))
+      return S->getString();
+    return StringRef();
+  }
+
+  static MDString *getCanonicalMDString(LLVMContext &Context, StringRef S) {
+    if (S.empty())
+      return nullptr;
+    return MDString::get(Context, S);
+  }
+
+  /// Allow subclasses to mutate the tag.
+  void setTag(unsigned Tag) { SubclassData16 = Tag; }
+
+public:
+  unsigned getTag() const { return SubclassData16; }
+
+  /// Debug info flags.
+  ///
+  /// The three accessibility flags are mutually exclusive and rolled together
+  /// in the first two bits.
+  enum DIFlags : uint32_t {
+#define HANDLE_DI_FLAG(ID, NAME) Flag##NAME = ID,
+#define DI_FLAG_LARGEST_NEEDED
+#include "llvm/IR/DebugInfoFlags.def"
+    FlagAccessibility = FlagPrivate | FlagProtected | FlagPublic,
+    FlagPtrToMemberRep = FlagSingleInheritance | FlagMultipleInheritance |
+                         FlagVirtualInheritance,
+    LLVM_MARK_AS_BITMASK_ENUM(FlagLargest)
+  };
+
+  static DIFlags getFlag(StringRef Flag);
+  static StringRef getFlagString(DIFlags Flag);
+
+  /// Split up a flags bitfield.
+  ///
+  /// Split \c Flags into \c SplitFlags, a vector of its components.  Returns
+  /// any remaining (unrecognized) bits.
+  static DIFlags splitFlags(DIFlags Flags,
+                            SmallVectorImpl<DIFlags> &SplitFlags);
+
+  static bool classof(const Metadata *MD) {
+    switch (MD->getMetadataID()) {
+    default:
+      return false;
+    case GenericDINodeKind:
+    case DISubrangeKind:
+    case DIEnumeratorKind:
+    case DIBasicTypeKind:
+    case DIDerivedTypeKind:
+    case DICompositeTypeKind:
+    case DISubroutineTypeKind:
+    case DIFileKind:
+    case DICompileUnitKind:
+    case DISubprogramKind:
+    case DILexicalBlockKind:
+    case DILexicalBlockFileKind:
+    case DINamespaceKind:
+    case DITemplateTypeParameterKind:
+    case DITemplateValueParameterKind:
+    case DIGlobalVariableKind:
+    case DILocalVariableKind:
+    case DIObjCPropertyKind:
+    case DIImportedEntityKind:
+    case DIModuleKind:
+      return true;
+    }
+  }
+};
+
+template <class T> struct simplify_type<const TypedDINodeRef<T>> {
+  using SimpleType = Metadata *;
+
+  static SimpleType getSimplifiedValue(const TypedDINodeRef<T> &MD) {
+    return MD;
+  }
+};
+
+template <class T>
+struct simplify_type<TypedDINodeRef<T>>
+    : simplify_type<const TypedDINodeRef<T>> {};
+
+/// Generic tagged DWARF-like metadata node.
+///
+/// An un-specialized DWARF-like metadata node.  The first operand is a
+/// (possibly empty) null-separated \a MDString header that contains arbitrary
+/// fields.  The remaining operands are \a dwarf_operands(), and are pointers
+/// to other metadata.
+class GenericDINode : public DINode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  GenericDINode(LLVMContext &C, StorageType Storage, unsigned Hash,
+                unsigned Tag, ArrayRef<Metadata *> Ops1,
+                ArrayRef<Metadata *> Ops2)
+      : DINode(C, GenericDINodeKind, Storage, Tag, Ops1, Ops2) {
+    setHash(Hash);
+  }
+  ~GenericDINode() { dropAllReferences(); }
+
+  void setHash(unsigned Hash) { SubclassData32 = Hash; }
+  void recalculateHash();
+
+  static GenericDINode *getImpl(LLVMContext &Context, unsigned Tag,
+                                StringRef Header, ArrayRef<Metadata *> DwarfOps,
+                                StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Tag, getCanonicalMDString(Context, Header),
+                   DwarfOps, Storage, ShouldCreate);
+  }
+
+  static GenericDINode *getImpl(LLVMContext &Context, unsigned Tag,
+                                MDString *Header, ArrayRef<Metadata *> DwarfOps,
+                                StorageType Storage, bool ShouldCreate = true);
+
+  TempGenericDINode cloneImpl() const {
+    return getTemporary(
+        getContext(), getTag(), getHeader(),
+        SmallVector<Metadata *, 4>(dwarf_op_begin(), dwarf_op_end()));
+  }
+
+public:
+  unsigned getHash() const { return SubclassData32; }
+
+  DEFINE_MDNODE_GET(GenericDINode, (unsigned Tag, StringRef Header,
+                                    ArrayRef<Metadata *> DwarfOps),
+                    (Tag, Header, DwarfOps))
+  DEFINE_MDNODE_GET(GenericDINode, (unsigned Tag, MDString *Header,
+                                    ArrayRef<Metadata *> DwarfOps),
+                    (Tag, Header, DwarfOps))
+
+  /// Return a (temporary) clone of this.
+  TempGenericDINode clone() const { return cloneImpl(); }
+
+  unsigned getTag() const { return SubclassData16; }
+  StringRef getHeader() const { return getStringOperand(0); }
+  MDString *getRawHeader() const { return getOperandAs<MDString>(0); }
+
+  op_iterator dwarf_op_begin() const { return op_begin() + 1; }
+  op_iterator dwarf_op_end() const { return op_end(); }
+  op_range dwarf_operands() const {
+    return op_range(dwarf_op_begin(), dwarf_op_end());
+  }
+
+  unsigned getNumDwarfOperands() const { return getNumOperands() - 1; }
+  const MDOperand &getDwarfOperand(unsigned I) const {
+    return getOperand(I + 1);
+  }
+  void replaceDwarfOperandWith(unsigned I, Metadata *New) {
+    replaceOperandWith(I + 1, New);
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == GenericDINodeKind;
+  }
+};
+
+/// Array subrange.
+///
+/// TODO: Merge into node for DW_TAG_array_type, which should have a custom
+/// type.
+class DISubrange : public DINode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  int64_t LowerBound;
+
+  DISubrange(LLVMContext &C, StorageType Storage, Metadata *Node,
+             int64_t LowerBound, ArrayRef<Metadata *> Ops)
+      : DINode(C, DISubrangeKind, Storage, dwarf::DW_TAG_subrange_type, Ops),
+        LowerBound(LowerBound) {}
+
+  ~DISubrange() = default;
+
+  static DISubrange *getImpl(LLVMContext &Context, int64_t Count,
+                             int64_t LowerBound, StorageType Storage,
+                             bool ShouldCreate = true);
+
+  static DISubrange *getImpl(LLVMContext &Context, Metadata *CountNode,
+                             int64_t LowerBound, StorageType Storage,
+                             bool ShouldCreate = true);
+
+  TempDISubrange cloneImpl() const {
+    return getTemporary(getContext(), getRawCountNode(), getLowerBound());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DISubrange, (int64_t Count, int64_t LowerBound = 0),
+                    (Count, LowerBound))
+
+  DEFINE_MDNODE_GET(DISubrange, (Metadata *CountNode, int64_t LowerBound = 0),
+                    (CountNode, LowerBound))
+
+  TempDISubrange clone() const { return cloneImpl(); }
+
+  int64_t getLowerBound() const { return LowerBound; }
+
+  Metadata *getRawCountNode() const {
+    return getOperand(0).get();
+  }
+
+  typedef PointerUnion<ConstantInt*, DIVariable*> CountType;
+
+  CountType getCount() const {
+    if (auto *MD = dyn_cast<ConstantAsMetadata>(getRawCountNode()))
+      return CountType(cast<ConstantInt>(MD->getValue()));
+
+    if (auto *DV = dyn_cast<DIVariable>(getRawCountNode()))
+      return CountType(DV);
+
+    return CountType();
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DISubrangeKind;
+  }
+};
+
+/// Enumeration value.
+///
+/// TODO: Add a pointer to the context (DW_TAG_enumeration_type) once that no
+/// longer creates a type cycle.
+class DIEnumerator : public DINode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  int64_t Value;
+  DIEnumerator(LLVMContext &C, StorageType Storage, int64_t Value,
+               bool IsUnsigned, ArrayRef<Metadata *> Ops)
+      : DINode(C, DIEnumeratorKind, Storage, dwarf::DW_TAG_enumerator, Ops),
+        Value(Value) {
+    SubclassData32 = IsUnsigned;
+  }
+  ~DIEnumerator() = default;
+
+  static DIEnumerator *getImpl(LLVMContext &Context, int64_t Value,
+                               bool IsUnsigned, StringRef Name,
+                               StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Value, IsUnsigned,
+                   getCanonicalMDString(Context, Name), Storage, ShouldCreate);
+  }
+  static DIEnumerator *getImpl(LLVMContext &Context, int64_t Value,
+                               bool IsUnsigned, MDString *Name,
+                               StorageType Storage, bool ShouldCreate = true);
+
+  TempDIEnumerator cloneImpl() const {
+    return getTemporary(getContext(), getValue(), isUnsigned(), getName());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIEnumerator, (int64_t Value, bool IsUnsigned, StringRef Name),
+                    (Value, IsUnsigned, Name))
+  DEFINE_MDNODE_GET(DIEnumerator, (int64_t Value, bool IsUnsigned, MDString *Name),
+                    (Value, IsUnsigned, Name))
+
+  TempDIEnumerator clone() const { return cloneImpl(); }
+
+  int64_t getValue() const { return Value; }
+  bool isUnsigned() const { return SubclassData32; }
+  StringRef getName() const { return getStringOperand(0); }
+
+  MDString *getRawName() const { return getOperandAs<MDString>(0); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIEnumeratorKind;
+  }
+};
+
+/// Base class for scope-like contexts.
+///
+/// Base class for lexical scopes and types (which are also declaration
+/// contexts).
+///
+/// TODO: Separate the concepts of declaration contexts and lexical scopes.
+class DIScope : public DINode {
+protected:
+  DIScope(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+          ArrayRef<Metadata *> Ops)
+      : DINode(C, ID, Storage, Tag, Ops) {}
+  ~DIScope() = default;
+
+public:
+  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+
+  inline StringRef getFilename() const;
+  inline StringRef getDirectory() const;
+  inline Optional<StringRef> getSource() const;
+
+  StringRef getName() const;
+  DIScopeRef getScope() const;
+
+  /// Return the raw underlying file.
+  ///
+  /// A \a DIFile is a \a DIScope, but it doesn't point at a separate file (it
+  /// \em is the file).  If \c this is an \a DIFile, we need to return \c this.
+  /// Otherwise, return the first operand, which is where all other subclasses
+  /// store their file pointer.
+  Metadata *getRawFile() const {
+    return isa<DIFile>(this) ? const_cast<DIScope *>(this)
+                             : static_cast<Metadata *>(getOperand(0));
+  }
+
+  static bool classof(const Metadata *MD) {
+    switch (MD->getMetadataID()) {
+    default:
+      return false;
+    case DIBasicTypeKind:
+    case DIDerivedTypeKind:
+    case DICompositeTypeKind:
+    case DISubroutineTypeKind:
+    case DIFileKind:
+    case DICompileUnitKind:
+    case DISubprogramKind:
+    case DILexicalBlockKind:
+    case DILexicalBlockFileKind:
+    case DINamespaceKind:
+    case DIModuleKind:
+      return true;
+    }
+  }
+};
+
+/// File.
+///
+/// TODO: Merge with directory/file node (including users).
+/// TODO: Canonicalize paths on creation.
+class DIFile : public DIScope {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+public:
+  /// Which algorithm (e.g. MD5) a checksum was generated with.
+  ///
+  /// The encoding is explicit because it is used directly in Bitcode. The
+  /// value 0 is reserved to indicate the absence of a checksum in Bitcode.
+  enum ChecksumKind {
+    // The first variant was originally CSK_None, encoded as 0. The new
+    // internal representation removes the need for this by wrapping the
+    // ChecksumInfo in an Optional, but to preserve Bitcode compatibility the 0
+    // encoding is reserved.
+    CSK_MD5 = 1,
+    CSK_SHA1 = 2,
+    CSK_Last = CSK_SHA1 // Should be last enumeration.
+  };
+
+  /// A single checksum, represented by a \a Kind and a \a Value (a string).
+  template <typename T>
+  struct ChecksumInfo {
+    /// The kind of checksum which \a Value encodes.
+    ChecksumKind Kind;
+    /// The string value of the checksum.
+    T Value;
+
+    ChecksumInfo(ChecksumKind Kind, T Value) : Kind(Kind), Value(Value) { }
+    ~ChecksumInfo() = default;
+    bool operator==(const ChecksumInfo<T> &X) const {
+      return Kind == X.Kind && Value == X.Value;
+    }
+    bool operator!=(const ChecksumInfo<T> &X) const { return !(*this == X); }
+    StringRef getKindAsString() const { return getChecksumKindAsString(Kind); }
+  };
+
+private:
+  Optional<ChecksumInfo<MDString *>> Checksum;
+  Optional<MDString *> Source;
+
+  DIFile(LLVMContext &C, StorageType Storage,
+         Optional<ChecksumInfo<MDString *>> CS, Optional<MDString *> Src,
+         ArrayRef<Metadata *> Ops)
+      : DIScope(C, DIFileKind, Storage, dwarf::DW_TAG_file_type, Ops),
+        Checksum(CS), Source(Src) {}
+  ~DIFile() = default;
+
+  static DIFile *getImpl(LLVMContext &Context, StringRef Filename,
+                         StringRef Directory,
+                         Optional<ChecksumInfo<StringRef>> CS,
+                         Optional<StringRef> Source,
+                         StorageType Storage, bool ShouldCreate = true) {
+    Optional<ChecksumInfo<MDString *>> MDChecksum;
+    if (CS)
+      MDChecksum.emplace(CS->Kind, getCanonicalMDString(Context, CS->Value));
+    return getImpl(Context, getCanonicalMDString(Context, Filename),
+                   getCanonicalMDString(Context, Directory), MDChecksum,
+                   Source ? Optional<MDString *>(getCanonicalMDString(Context, *Source)) : None,
+                   Storage, ShouldCreate);
+  }
+  static DIFile *getImpl(LLVMContext &Context, MDString *Filename,
+                         MDString *Directory,
+                         Optional<ChecksumInfo<MDString *>> CS,
+                         Optional<MDString *> Source, StorageType Storage,
+                         bool ShouldCreate = true);
+
+  TempDIFile cloneImpl() const {
+    return getTemporary(getContext(), getFilename(), getDirectory(),
+                        getChecksum(), getSource());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIFile, (StringRef Filename, StringRef Directory,
+                             Optional<ChecksumInfo<StringRef>> CS = None,
+                             Optional<StringRef> Source = None),
+                    (Filename, Directory, CS, Source))
+  DEFINE_MDNODE_GET(DIFile, (MDString * Filename, MDString *Directory,
+                             Optional<ChecksumInfo<MDString *>> CS = None,
+                             Optional<MDString *> Source = None),
+                    (Filename, Directory, CS, Source))
+
+  TempDIFile clone() const { return cloneImpl(); }
+
+  StringRef getFilename() const { return getStringOperand(0); }
+  StringRef getDirectory() const { return getStringOperand(1); }
+  Optional<ChecksumInfo<StringRef>> getChecksum() const {
+    Optional<ChecksumInfo<StringRef>> StringRefChecksum;
+    if (Checksum)
+      StringRefChecksum.emplace(Checksum->Kind, Checksum->Value->getString());
+    return StringRefChecksum;
+  }
+  Optional<StringRef> getSource() const {
+    return Source ? Optional<StringRef>((*Source)->getString()) : None;
+  }
+
+  MDString *getRawFilename() const { return getOperandAs<MDString>(0); }
+  MDString *getRawDirectory() const { return getOperandAs<MDString>(1); }
+  Optional<ChecksumInfo<MDString *>> getRawChecksum() const { return Checksum; }
+  Optional<MDString *> getRawSource() const { return Source; }
+
+  static StringRef getChecksumKindAsString(ChecksumKind CSKind);
+  static Optional<ChecksumKind> getChecksumKind(StringRef CSKindStr);
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIFileKind;
+  }
+};
+
+StringRef DIScope::getFilename() const {
+  if (auto *F = getFile())
+    return F->getFilename();
+  return "";
+}
+
+StringRef DIScope::getDirectory() const {
+  if (auto *F = getFile())
+    return F->getDirectory();
+  return "";
+}
+
+Optional<StringRef> DIScope::getSource() const {
+  if (auto *F = getFile())
+    return F->getSource();
+  return None;
+}
+
+/// Base class for types.
+///
+/// TODO: Remove the hardcoded name and context, since many types don't use
+/// them.
+/// TODO: Split up flags.
+class DIType : public DIScope {
+  unsigned Line;
+  DIFlags Flags;
+  uint64_t SizeInBits;
+  uint64_t OffsetInBits;
+  uint32_t AlignInBits;
+
+protected:
+  DIType(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+         unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
+         uint64_t OffsetInBits, DIFlags Flags, ArrayRef<Metadata *> Ops)
+      : DIScope(C, ID, Storage, Tag, Ops) {
+    init(Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
+  }
+  ~DIType() = default;
+
+  void init(unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
+            uint64_t OffsetInBits, DIFlags Flags) {
+    this->Line = Line;
+    this->Flags = Flags;
+    this->SizeInBits = SizeInBits;
+    this->AlignInBits = AlignInBits;
+    this->OffsetInBits = OffsetInBits;
+  }
+
+  /// Change fields in place.
+  void mutate(unsigned Tag, unsigned Line, uint64_t SizeInBits,
+              uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags) {
+    assert(isDistinct() && "Only distinct nodes can mutate");
+    setTag(Tag);
+    init(Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
+  }
+
+public:
+  TempDIType clone() const {
+    return TempDIType(cast<DIType>(MDNode::clone().release()));
+  }
+
+  unsigned getLine() const { return Line; }
+  uint64_t getSizeInBits() const { return SizeInBits; }
+  uint32_t getAlignInBits() const { return AlignInBits; }
+  uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
+  uint64_t getOffsetInBits() const { return OffsetInBits; }
+  DIFlags getFlags() const { return Flags; }
+
+  DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
+  StringRef getName() const { return getStringOperand(2); }
+
+
+  Metadata *getRawScope() const { return getOperand(1); }
+  MDString *getRawName() const { return getOperandAs<MDString>(2); }
+
+  void setFlags(DIFlags NewFlags) {
+    assert(!isUniqued() && "Cannot set flags on uniqued nodes");
+    Flags = NewFlags;
+  }
+
+  bool isPrivate() const {
+    return (getFlags() & FlagAccessibility) == FlagPrivate;
+  }
+  bool isProtected() const {
+    return (getFlags() & FlagAccessibility) == FlagProtected;
+  }
+  bool isPublic() const {
+    return (getFlags() & FlagAccessibility) == FlagPublic;
+  }
+  bool isForwardDecl() const { return getFlags() & FlagFwdDecl; }
+  bool isAppleBlockExtension() const { return getFlags() & FlagAppleBlock; }
+  bool isBlockByrefStruct() const { return getFlags() & FlagBlockByrefStruct; }
+  bool isVirtual() const { return getFlags() & FlagVirtual; }
+  bool isArtificial() const { return getFlags() & FlagArtificial; }
+  bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
+  bool isObjcClassComplete() const {
+    return getFlags() & FlagObjcClassComplete;
+  }
+  bool isVector() const { return getFlags() & FlagVector; }
+  bool isBitField() const { return getFlags() & FlagBitField; }
+  bool isStaticMember() const { return getFlags() & FlagStaticMember; }
+  bool isLValueReference() const { return getFlags() & FlagLValueReference; }
+  bool isRValueReference() const { return getFlags() & FlagRValueReference; }
+  bool isTypePassByValue() const { return getFlags() & FlagTypePassByValue; }
+  bool isTypePassByReference() const {
+    return getFlags() & FlagTypePassByReference;
+  }
+
+  static bool classof(const Metadata *MD) {
+    switch (MD->getMetadataID()) {
+    default:
+      return false;
+    case DIBasicTypeKind:
+    case DIDerivedTypeKind:
+    case DICompositeTypeKind:
+    case DISubroutineTypeKind:
+      return true;
+    }
+  }
+};
+
+/// Basic type, like 'int' or 'float'.
+///
+/// TODO: Split out DW_TAG_unspecified_type.
+/// TODO: Drop unused accessors.
+class DIBasicType : public DIType {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Encoding;
+
+  DIBasicType(LLVMContext &C, StorageType Storage, unsigned Tag,
+              uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding,
+              ArrayRef<Metadata *> Ops)
+      : DIType(C, DIBasicTypeKind, Storage, Tag, 0, SizeInBits, AlignInBits, 0,
+               FlagZero, Ops),
+        Encoding(Encoding) {}
+  ~DIBasicType() = default;
+
+  static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
+                              StringRef Name, uint64_t SizeInBits,
+                              uint32_t AlignInBits, unsigned Encoding,
+                              StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
+                   SizeInBits, AlignInBits, Encoding, Storage, ShouldCreate);
+  }
+  static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
+                              MDString *Name, uint64_t SizeInBits,
+                              uint32_t AlignInBits, unsigned Encoding,
+                              StorageType Storage, bool ShouldCreate = true);
+
+  TempDIBasicType cloneImpl() const {
+    return getTemporary(getContext(), getTag(), getName(), getSizeInBits(),
+                        getAlignInBits(), getEncoding());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIBasicType, (unsigned Tag, StringRef Name),
+                    (Tag, Name, 0, 0, 0))
+  DEFINE_MDNODE_GET(DIBasicType,
+                    (unsigned Tag, StringRef Name, uint64_t SizeInBits,
+                     uint32_t AlignInBits, unsigned Encoding),
+                    (Tag, Name, SizeInBits, AlignInBits, Encoding))
+  DEFINE_MDNODE_GET(DIBasicType,
+                    (unsigned Tag, MDString *Name, uint64_t SizeInBits,
+                     uint32_t AlignInBits, unsigned Encoding),
+                    (Tag, Name, SizeInBits, AlignInBits, Encoding))
+
+  TempDIBasicType clone() const { return cloneImpl(); }
+
+  unsigned getEncoding() const { return Encoding; }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIBasicTypeKind;
+  }
+};
+
+/// Derived types.
+///
+/// This includes qualified types, pointers, references, friends, typedefs, and
+/// class members.
+///
+/// TODO: Split out members (inheritance, fields, methods, etc.).
+class DIDerivedType : public DIType {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  /// \brief The DWARF address space of the memory pointed to or referenced by a
+  /// pointer or reference type respectively.
+  Optional<unsigned> DWARFAddressSpace;
+
+  DIDerivedType(LLVMContext &C, StorageType Storage, unsigned Tag,
+                unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
+                uint64_t OffsetInBits, Optional<unsigned> DWARFAddressSpace,
+                DIFlags Flags, ArrayRef<Metadata *> Ops)
+      : DIType(C, DIDerivedTypeKind, Storage, Tag, Line, SizeInBits,
+               AlignInBits, OffsetInBits, Flags, Ops),
+        DWARFAddressSpace(DWARFAddressSpace) {}
+  ~DIDerivedType() = default;
+
+  static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag,
+                                StringRef Name, DIFile *File, unsigned Line,
+                                DIScopeRef Scope, DITypeRef BaseType,
+                                uint64_t SizeInBits, uint32_t AlignInBits,
+                                uint64_t OffsetInBits,
+                                Optional<unsigned> DWARFAddressSpace,
+                                DIFlags Flags, Metadata *ExtraData,
+                                StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File,
+                   Line, Scope, BaseType, SizeInBits, AlignInBits, OffsetInBits,
+                   DWARFAddressSpace, Flags, ExtraData, Storage, ShouldCreate);
+  }
+  static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag,
+                                MDString *Name, Metadata *File, unsigned Line,
+                                Metadata *Scope, Metadata *BaseType,
+                                uint64_t SizeInBits, uint32_t AlignInBits,
+                                uint64_t OffsetInBits,
+                                Optional<unsigned> DWARFAddressSpace,
+                                DIFlags Flags, Metadata *ExtraData,
+                                StorageType Storage, bool ShouldCreate = true);
+
+  TempDIDerivedType cloneImpl() const {
+    return getTemporary(getContext(), getTag(), getName(), getFile(), getLine(),
+                        getScope(), getBaseType(), getSizeInBits(),
+                        getAlignInBits(), getOffsetInBits(),
+                        getDWARFAddressSpace(), getFlags(), getExtraData());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIDerivedType,
+                    (unsigned Tag, MDString *Name, Metadata *File,
+                     unsigned Line, Metadata *Scope, Metadata *BaseType,
+                     uint64_t SizeInBits, uint32_t AlignInBits,
+                     uint64_t OffsetInBits,
+                     Optional<unsigned> DWARFAddressSpace, DIFlags Flags,
+                     Metadata *ExtraData = nullptr),
+                    (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+                     AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
+                     ExtraData))
+  DEFINE_MDNODE_GET(DIDerivedType,
+                    (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
+                     DIScopeRef Scope, DITypeRef BaseType, uint64_t SizeInBits,
+                     uint32_t AlignInBits, uint64_t OffsetInBits,
+                     Optional<unsigned> DWARFAddressSpace, DIFlags Flags,
+                     Metadata *ExtraData = nullptr),
+                    (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+                     AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
+                     ExtraData))
+
+  TempDIDerivedType clone() const { return cloneImpl(); }
+
+  /// Get the base type this is derived from.
+  DITypeRef getBaseType() const { return DITypeRef(getRawBaseType()); }
+  Metadata *getRawBaseType() const { return getOperand(3); }
+
+  /// \returns The DWARF address space of the memory pointed to or referenced by
+  /// a pointer or reference type respectively.
+  Optional<unsigned> getDWARFAddressSpace() const { return DWARFAddressSpace; }
+
+  /// Get extra data associated with this derived type.
+  ///
+  /// Class type for pointer-to-members, objective-c property node for ivars,
+  /// or global constant wrapper for static members.
+  ///
+  /// TODO: Separate out types that need this extra operand: pointer-to-member
+  /// types and member fields (static members and ivars).
+  Metadata *getExtraData() const { return getRawExtraData(); }
+  Metadata *getRawExtraData() const { return getOperand(4); }
+
+  /// Get casted version of extra data.
+  /// @{
+  DITypeRef getClassType() const {
+    assert(getTag() == dwarf::DW_TAG_ptr_to_member_type);
+    return DITypeRef(getExtraData());
+  }
+
+  DIObjCProperty *getObjCProperty() const {
+    return dyn_cast_or_null<DIObjCProperty>(getExtraData());
+  }
+
+  Constant *getStorageOffsetInBits() const {
+    assert(getTag() == dwarf::DW_TAG_member && isBitField());
+    if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+      return C->getValue();
+    return nullptr;
+  }
+
+  Constant *getConstant() const {
+    assert(getTag() == dwarf::DW_TAG_member && isStaticMember());
+    if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+      return C->getValue();
+    return nullptr;
+  }
+  Constant *getDiscriminantValue() const {
+    assert(getTag() == dwarf::DW_TAG_member && !isStaticMember());
+    if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+      return C->getValue();
+    return nullptr;
+  }
+  /// @}
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIDerivedTypeKind;
+  }
+};
+
+/// Composite types.
+///
+/// TODO: Detach from DerivedTypeBase (split out MDEnumType?).
+/// TODO: Create a custom, unrelated node for DW_TAG_array_type.
+class DICompositeType : public DIType {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned RuntimeLang;
+
+  DICompositeType(LLVMContext &C, StorageType Storage, unsigned Tag,
+                  unsigned Line, unsigned RuntimeLang, uint64_t SizeInBits,
+                  uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
+                  ArrayRef<Metadata *> Ops)
+      : DIType(C, DICompositeTypeKind, Storage, Tag, Line, SizeInBits,
+               AlignInBits, OffsetInBits, Flags, Ops),
+        RuntimeLang(RuntimeLang) {}
+  ~DICompositeType() = default;
+
+  /// Change fields in place.
+  void mutate(unsigned Tag, unsigned Line, unsigned RuntimeLang,
+              uint64_t SizeInBits, uint32_t AlignInBits,
+              uint64_t OffsetInBits, DIFlags Flags) {
+    assert(isDistinct() && "Only distinct nodes can mutate");
+    assert(getRawIdentifier() && "Only ODR-uniqued nodes should mutate");
+    this->RuntimeLang = RuntimeLang;
+    DIType::mutate(Tag, Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
+  }
+
+  static DICompositeType *
+  getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, Metadata *File,
+          unsigned Line, DIScopeRef Scope, DITypeRef BaseType,
+          uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+          DIFlags Flags, DINodeArray Elements, unsigned RuntimeLang,
+          DITypeRef VTableHolder, DITemplateParameterArray TemplateParams,
+          StringRef Identifier, DIDerivedType *Discriminator,
+          StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(
+        Context, Tag, getCanonicalMDString(Context, Name), File, Line, Scope,
+        BaseType, SizeInBits, AlignInBits, OffsetInBits, Flags, Elements.get(),
+        RuntimeLang, VTableHolder, TemplateParams.get(),
+        getCanonicalMDString(Context, Identifier), Discriminator, Storage, ShouldCreate);
+  }
+  static DICompositeType *
+  getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+          unsigned Line, Metadata *Scope, Metadata *BaseType,
+          uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+          DIFlags Flags, Metadata *Elements, unsigned RuntimeLang,
+          Metadata *VTableHolder, Metadata *TemplateParams,
+          MDString *Identifier, Metadata *Discriminator,
+          StorageType Storage, bool ShouldCreate = true);
+
+  TempDICompositeType cloneImpl() const {
+    return getTemporary(getContext(), getTag(), getName(), getFile(), getLine(),
+                        getScope(), getBaseType(), getSizeInBits(),
+                        getAlignInBits(), getOffsetInBits(), getFlags(),
+                        getElements(), getRuntimeLang(), getVTableHolder(),
+                        getTemplateParams(), getIdentifier(), getDiscriminator());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DICompositeType,
+                    (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
+                     DIScopeRef Scope, DITypeRef BaseType, uint64_t SizeInBits,
+                     uint32_t AlignInBits, uint64_t OffsetInBits,
+                     DIFlags Flags, DINodeArray Elements, unsigned RuntimeLang,
+                     DITypeRef VTableHolder,
+                     DITemplateParameterArray TemplateParams = nullptr,
+                     StringRef Identifier = "", DIDerivedType *Discriminator = nullptr),
+                    (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+                     AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+                     VTableHolder, TemplateParams, Identifier, Discriminator))
+  DEFINE_MDNODE_GET(DICompositeType,
+                    (unsigned Tag, MDString *Name, Metadata *File,
+                     unsigned Line, Metadata *Scope, Metadata *BaseType,
+                     uint64_t SizeInBits, uint32_t AlignInBits,
+                     uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
+                     unsigned RuntimeLang, Metadata *VTableHolder,
+                     Metadata *TemplateParams = nullptr,
+                     MDString *Identifier = nullptr,
+                     Metadata *Discriminator = nullptr),
+                    (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+                     AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+                     VTableHolder, TemplateParams, Identifier, Discriminator))
+
+  TempDICompositeType clone() const { return cloneImpl(); }
+
+  /// Get a DICompositeType with the given ODR identifier.
+  ///
+  /// If \a LLVMContext::isODRUniquingDebugTypes(), gets the mapped
+  /// DICompositeType for the given ODR \c Identifier.  If none exists, creates
+  /// a new node.
+  ///
+  /// Else, returns \c nullptr.
+  static DICompositeType *
+  getODRType(LLVMContext &Context, MDString &Identifier, unsigned Tag,
+             MDString *Name, Metadata *File, unsigned Line, Metadata *Scope,
+             Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
+             uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
+             unsigned RuntimeLang, Metadata *VTableHolder,
+             Metadata *TemplateParams, Metadata *Discriminator);
+  static DICompositeType *getODRTypeIfExists(LLVMContext &Context,
+                                             MDString &Identifier);
+
+  /// Build a DICompositeType with the given ODR identifier.
+  ///
+  /// Looks up the mapped DICompositeType for the given ODR \c Identifier.  If
+  /// it doesn't exist, creates a new one.  If it does exist and \a
+  /// isForwardDecl(), and the new arguments would be a definition, mutates the
+  /// the type in place.  In either case, returns the type.
+  ///
+  /// If not \a LLVMContext::isODRUniquingDebugTypes(), this function returns
+  /// nullptr.
+  static DICompositeType *
+  buildODRType(LLVMContext &Context, MDString &Identifier, unsigned Tag,
+               MDString *Name, Metadata *File, unsigned Line, Metadata *Scope,
+               Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
+               uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
+               unsigned RuntimeLang, Metadata *VTableHolder,
+               Metadata *TemplateParams, Metadata *Discriminator);
+
+  DITypeRef getBaseType() const { return DITypeRef(getRawBaseType()); }
+  DINodeArray getElements() const {
+    return cast_or_null<MDTuple>(getRawElements());
+  }
+  DITypeRef getVTableHolder() const { return DITypeRef(getRawVTableHolder()); }
+  DITemplateParameterArray getTemplateParams() const {
+    return cast_or_null<MDTuple>(getRawTemplateParams());
+  }
+  StringRef getIdentifier() const { return getStringOperand(7); }
+  unsigned getRuntimeLang() const { return RuntimeLang; }
+
+  Metadata *getRawBaseType() const { return getOperand(3); }
+  Metadata *getRawElements() const { return getOperand(4); }
+  Metadata *getRawVTableHolder() const { return getOperand(5); }
+  Metadata *getRawTemplateParams() const { return getOperand(6); }
+  MDString *getRawIdentifier() const { return getOperandAs<MDString>(7); }
+  Metadata *getRawDiscriminator() const { return getOperand(8); }
+  DIDerivedType *getDiscriminator() const { return getOperandAs<DIDerivedType>(8); }
+
+  /// Replace operands.
+  ///
+  /// If this \a isUniqued() and not \a isResolved(), on a uniquing collision
+  /// this will be RAUW'ed and deleted.  Use a \a TrackingMDRef to keep track
+  /// of its movement if necessary.
+  /// @{
+  void replaceElements(DINodeArray Elements) {
+#ifndef NDEBUG
+    for (DINode *Op : getElements())
+      assert(is_contained(Elements->operands(), Op) &&
+             "Lost a member during member list replacement");
+#endif
+    replaceOperandWith(4, Elements.get());
+  }
+
+  void replaceVTableHolder(DITypeRef VTableHolder) {
+    replaceOperandWith(5, VTableHolder);
+  }
+
+  void replaceTemplateParams(DITemplateParameterArray TemplateParams) {
+    replaceOperandWith(6, TemplateParams.get());
+  }
+  /// @}
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DICompositeTypeKind;
+  }
+};
+
+/// Type array for a subprogram.
+///
+/// TODO: Fold the array of types in directly as operands.
+class DISubroutineType : public DIType {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  /// The calling convention used with DW_AT_calling_convention. Actually of
+  /// type dwarf::CallingConvention.
+  uint8_t CC;
+
+  DISubroutineType(LLVMContext &C, StorageType Storage, DIFlags Flags,
+                   uint8_t CC, ArrayRef<Metadata *> Ops)
+      : DIType(C, DISubroutineTypeKind, Storage, dwarf::DW_TAG_subroutine_type,
+               0, 0, 0, 0, Flags, Ops),
+        CC(CC) {}
+  ~DISubroutineType() = default;
+
+  static DISubroutineType *getImpl(LLVMContext &Context, DIFlags Flags,
+                                   uint8_t CC, DITypeRefArray TypeArray,
+                                   StorageType Storage,
+                                   bool ShouldCreate = true) {
+    return getImpl(Context, Flags, CC, TypeArray.get(), Storage, ShouldCreate);
+  }
+  static DISubroutineType *getImpl(LLVMContext &Context, DIFlags Flags,
+                                   uint8_t CC, Metadata *TypeArray,
+                                   StorageType Storage,
+                                   bool ShouldCreate = true);
+
+  TempDISubroutineType cloneImpl() const {
+    return getTemporary(getContext(), getFlags(), getCC(), getTypeArray());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DISubroutineType,
+                    (DIFlags Flags, uint8_t CC, DITypeRefArray TypeArray),
+                    (Flags, CC, TypeArray))
+  DEFINE_MDNODE_GET(DISubroutineType,
+                    (DIFlags Flags, uint8_t CC, Metadata *TypeArray),
+                    (Flags, CC, TypeArray))
+
+  TempDISubroutineType clone() const { return cloneImpl(); }
+
+  uint8_t getCC() const { return CC; }
+
+  DITypeRefArray getTypeArray() const {
+    return cast_or_null<MDTuple>(getRawTypeArray());
+  }
+
+  Metadata *getRawTypeArray() const { return getOperand(3); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DISubroutineTypeKind;
+  }
+};
+
+/// Compile unit.
+class DICompileUnit : public DIScope {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+public:
+  enum DebugEmissionKind : unsigned {
+    NoDebug = 0,
+    FullDebug,
+    LineTablesOnly,
+    LastEmissionKind = LineTablesOnly
+  };
+
+  static Optional<DebugEmissionKind> getEmissionKind(StringRef Str);
+  static const char *EmissionKindString(DebugEmissionKind EK);
+
+private:
+  unsigned SourceLanguage;
+  bool IsOptimized;
+  unsigned RuntimeVersion;
+  unsigned EmissionKind;
+  uint64_t DWOId;
+  bool SplitDebugInlining;
+  bool DebugInfoForProfiling;
+  bool GnuPubnames;
+
+  DICompileUnit(LLVMContext &C, StorageType Storage, unsigned SourceLanguage,
+                bool IsOptimized, unsigned RuntimeVersion,
+                unsigned EmissionKind, uint64_t DWOId, bool SplitDebugInlining,
+                bool DebugInfoForProfiling, bool GnuPubnames, ArrayRef<Metadata *> Ops)
+      : DIScope(C, DICompileUnitKind, Storage, dwarf::DW_TAG_compile_unit, Ops),
+        SourceLanguage(SourceLanguage), IsOptimized(IsOptimized),
+        RuntimeVersion(RuntimeVersion), EmissionKind(EmissionKind),
+        DWOId(DWOId), SplitDebugInlining(SplitDebugInlining),
+        DebugInfoForProfiling(DebugInfoForProfiling), GnuPubnames(GnuPubnames) {
+    assert(Storage != Uniqued);
+  }
+  ~DICompileUnit() = default;
+
+  static DICompileUnit *
+  getImpl(LLVMContext &Context, unsigned SourceLanguage, DIFile *File,
+          StringRef Producer, bool IsOptimized, StringRef Flags,
+          unsigned RuntimeVersion, StringRef SplitDebugFilename,
+          unsigned EmissionKind, DICompositeTypeArray EnumTypes,
+          DIScopeArray RetainedTypes,
+          DIGlobalVariableExpressionArray GlobalVariables,
+          DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
+          uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
+          bool GnuPubnames, StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(
+        Context, SourceLanguage, File, getCanonicalMDString(Context, Producer),
+        IsOptimized, getCanonicalMDString(Context, Flags), RuntimeVersion,
+        getCanonicalMDString(Context, SplitDebugFilename), EmissionKind,
+        EnumTypes.get(), RetainedTypes.get(), GlobalVariables.get(),
+        ImportedEntities.get(), Macros.get(), DWOId, SplitDebugInlining,
+        DebugInfoForProfiling, GnuPubnames, Storage, ShouldCreate);
+  }
+  static DICompileUnit *
+  getImpl(LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
+          MDString *Producer, bool IsOptimized, MDString *Flags,
+          unsigned RuntimeVersion, MDString *SplitDebugFilename,
+          unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
+          Metadata *GlobalVariables, Metadata *ImportedEntities,
+          Metadata *Macros, uint64_t DWOId, bool SplitDebugInlining,
+          bool DebugInfoForProfiling, bool GnuPubnames, StorageType Storage,
+          bool ShouldCreate = true);
+
+  TempDICompileUnit cloneImpl() const {
+    return getTemporary(getContext(), getSourceLanguage(), getFile(),
+                        getProducer(), isOptimized(), getFlags(),
+                        getRuntimeVersion(), getSplitDebugFilename(),
+                        getEmissionKind(), getEnumTypes(), getRetainedTypes(),
+                        getGlobalVariables(), getImportedEntities(),
+                        getMacros(), DWOId, getSplitDebugInlining(),
+                        getDebugInfoForProfiling(), getGnuPubnames());
+  }
+
+public:
+  static void get() = delete;
+  static void getIfExists() = delete;
+
+  DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
+      DICompileUnit,
+      (unsigned SourceLanguage, DIFile *File, StringRef Producer,
+       bool IsOptimized, StringRef Flags, unsigned RuntimeVersion,
+       StringRef SplitDebugFilename, DebugEmissionKind EmissionKind,
+       DICompositeTypeArray EnumTypes, DIScopeArray RetainedTypes,
+       DIGlobalVariableExpressionArray GlobalVariables,
+       DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
+       uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
+       bool GnuPubnames),
+      (SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
+       SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
+       GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
+       DebugInfoForProfiling, GnuPubnames))
+  DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
+      DICompileUnit,
+      (unsigned SourceLanguage, Metadata *File, MDString *Producer,
+       bool IsOptimized, MDString *Flags, unsigned RuntimeVersion,
+       MDString *SplitDebugFilename, unsigned EmissionKind, Metadata *EnumTypes,
+       Metadata *RetainedTypes, Metadata *GlobalVariables,
+       Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
+       bool SplitDebugInlining, bool DebugInfoForProfiling, bool GnuPubnames),
+      (SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
+       SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
+       GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
+       DebugInfoForProfiling, GnuPubnames))
+
+  TempDICompileUnit clone() const { return cloneImpl(); }
+
+  unsigned getSourceLanguage() const { return SourceLanguage; }
+  bool isOptimized() const { return IsOptimized; }
+  unsigned getRuntimeVersion() const { return RuntimeVersion; }
+  DebugEmissionKind getEmissionKind() const {
+    return (DebugEmissionKind)EmissionKind;
+  }
+  bool getDebugInfoForProfiling() const { return DebugInfoForProfiling; }
+  bool getGnuPubnames() const { return GnuPubnames; }
+  StringRef getProducer() const { return getStringOperand(1); }
+  StringRef getFlags() const { return getStringOperand(2); }
+  StringRef getSplitDebugFilename() const { return getStringOperand(3); }
+  DICompositeTypeArray getEnumTypes() const {
+    return cast_or_null<MDTuple>(getRawEnumTypes());
+  }
+  DIScopeArray getRetainedTypes() const {
+    return cast_or_null<MDTuple>(getRawRetainedTypes());
+  }
+  DIGlobalVariableExpressionArray getGlobalVariables() const {
+    return cast_or_null<MDTuple>(getRawGlobalVariables());
+  }
+  DIImportedEntityArray getImportedEntities() const {
+    return cast_or_null<MDTuple>(getRawImportedEntities());
+  }
+  DIMacroNodeArray getMacros() const {
+    return cast_or_null<MDTuple>(getRawMacros());
+  }
+  uint64_t getDWOId() const { return DWOId; }
+  void setDWOId(uint64_t DwoId) { DWOId = DwoId; }
+  bool getSplitDebugInlining() const { return SplitDebugInlining; }
+  void setSplitDebugInlining(bool SplitDebugInlining) {
+    this->SplitDebugInlining = SplitDebugInlining;
+  }
+
+  MDString *getRawProducer() const { return getOperandAs<MDString>(1); }
+  MDString *getRawFlags() const { return getOperandAs<MDString>(2); }
+  MDString *getRawSplitDebugFilename() const {
+    return getOperandAs<MDString>(3);
+  }
+  Metadata *getRawEnumTypes() const { return getOperand(4); }
+  Metadata *getRawRetainedTypes() const { return getOperand(5); }
+  Metadata *getRawGlobalVariables() const { return getOperand(6); }
+  Metadata *getRawImportedEntities() const { return getOperand(7); }
+  Metadata *getRawMacros() const { return getOperand(8); }
+
+  /// Replace arrays.
+  ///
+  /// If this \a isUniqued() and not \a isResolved(), it will be RAUW'ed and
+  /// deleted on a uniquing collision.  In practice, uniquing collisions on \a
+  /// DICompileUnit should be fairly rare.
+  /// @{
+  void replaceEnumTypes(DICompositeTypeArray N) {
+    replaceOperandWith(4, N.get());
+  }
+  void replaceRetainedTypes(DITypeArray N) {
+    replaceOperandWith(5, N.get());
+  }
+  void replaceGlobalVariables(DIGlobalVariableExpressionArray N) {
+    replaceOperandWith(6, N.get());
+  }
+  void replaceImportedEntities(DIImportedEntityArray N) {
+    replaceOperandWith(7, N.get());
+  }
+  void replaceMacros(DIMacroNodeArray N) { replaceOperandWith(8, N.get()); }
+  /// @}
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DICompileUnitKind;
+  }
+};
+
+/// A scope for locals.
+///
+/// A legal scope for lexical blocks, local variables, and debug info
+/// locations.  Subclasses are \a DISubprogram, \a DILexicalBlock, and \a
+/// DILexicalBlockFile.
+class DILocalScope : public DIScope {
+protected:
+  DILocalScope(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+               ArrayRef<Metadata *> Ops)
+      : DIScope(C, ID, Storage, Tag, Ops) {}
+  ~DILocalScope() = default;
+
+public:
+  /// Get the subprogram for this scope.
+  ///
+  /// Return this if it's an \a DISubprogram; otherwise, look up the scope
+  /// chain.
+  DISubprogram *getSubprogram() const;
+
+  /// Get the first non DILexicalBlockFile scope of this scope.
+  ///
+  /// Return this if it's not a \a DILexicalBlockFIle; otherwise, look up the
+  /// scope chain.
+  DILocalScope *getNonLexicalBlockFileScope() const;
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DISubprogramKind ||
+           MD->getMetadataID() == DILexicalBlockKind ||
+           MD->getMetadataID() == DILexicalBlockFileKind;
+  }
+};
+
+/// Debug location.
+///
+/// A debug location in source code, used for debug info and otherwise.
+class DILocation : public MDNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
+             unsigned Column, ArrayRef<Metadata *> MDs);
+  ~DILocation() { dropAllReferences(); }
+
+  static DILocation *getImpl(LLVMContext &Context, unsigned Line,
+                             unsigned Column, Metadata *Scope,
+                             Metadata *InlinedAt, StorageType Storage,
+                             bool ShouldCreate = true);
+  static DILocation *getImpl(LLVMContext &Context, unsigned Line,
+                             unsigned Column, DILocalScope *Scope,
+                             DILocation *InlinedAt, StorageType Storage,
+                             bool ShouldCreate = true) {
+    return getImpl(Context, Line, Column, static_cast<Metadata *>(Scope),
+                   static_cast<Metadata *>(InlinedAt), Storage, ShouldCreate);
+  }
+
+  /// With a given unsigned int \p U, use up to 13 bits to represent it.
+  /// old_bit 1~5  --> new_bit 1~5
+  /// old_bit 6~12 --> new_bit 7~13
+  /// new_bit_6 is 0 if higher bits (7~13) are all 0
+  static unsigned getPrefixEncodingFromUnsigned(unsigned U) {
+    U &= 0xfff;
+    return U > 0x1f ? (((U & 0xfe0) << 1) | (U & 0x1f) | 0x20) : U;
+  }
+
+  /// Reverse transformation as getPrefixEncodingFromUnsigned.
+  static unsigned getUnsignedFromPrefixEncoding(unsigned U) {
+    return (U & 0x20) ? (((U >> 1) & 0xfe0) | (U & 0x1f)) : (U & 0x1f);
+  }
+
+  /// Returns the next component stored in discriminator.
+  static unsigned getNextComponentInDiscriminator(unsigned D) {
+    if ((D & 1) == 0)
+      return D >> ((D & 0x40) ? 14 : 7);
+    else
+      return D >> 1;
+  }
+
+  TempDILocation cloneImpl() const {
+    // Get the raw scope/inlinedAt since it is possible to invoke this on
+    // a DILocation containing temporary metadata.
+    return getTemporary(getContext(), getLine(), getColumn(), getRawScope(),
+                        getRawInlinedAt());
+  }
+
+public:
+  // Disallow replacing operands.
+  void replaceOperandWith(unsigned I, Metadata *New) = delete;
+
+  DEFINE_MDNODE_GET(DILocation,
+                    (unsigned Line, unsigned Column, Metadata *Scope,
+                     Metadata *InlinedAt = nullptr),
+                    (Line, Column, Scope, InlinedAt))
+  DEFINE_MDNODE_GET(DILocation,
+                    (unsigned Line, unsigned Column, DILocalScope *Scope,
+                     DILocation *InlinedAt = nullptr),
+                    (Line, Column, Scope, InlinedAt))
+
+  /// Return a (temporary) clone of this.
+  TempDILocation clone() const { return cloneImpl(); }
+
+  unsigned getLine() const { return SubclassData32; }
+  unsigned getColumn() const { return SubclassData16; }
+  DILocalScope *getScope() const { return cast<DILocalScope>(getRawScope()); }
+
+  DILocation *getInlinedAt() const {
+    return cast_or_null<DILocation>(getRawInlinedAt());
+  }
+
+  DIFile *getFile() const { return getScope()->getFile(); }
+  StringRef getFilename() const { return getScope()->getFilename(); }
+  StringRef getDirectory() const { return getScope()->getDirectory(); }
+  Optional<StringRef> getSource() const { return getScope()->getSource(); }
+
+  /// Get the scope where this is inlined.
+  ///
+  /// Walk through \a getInlinedAt() and return \a getScope() from the deepest
+  /// location.
+  DILocalScope *getInlinedAtScope() const {
+    if (auto *IA = getInlinedAt())
+      return IA->getInlinedAtScope();
+    return getScope();
+  }
+
+  /// Check whether this can be discriminated from another location.
+  ///
+  /// Check \c this can be discriminated from \c RHS in a linetable entry.
+  /// Scope and inlined-at chains are not recorded in the linetable, so they
+  /// cannot be used to distinguish basic blocks.
+  bool canDiscriminate(const DILocation &RHS) const {
+    return getLine() != RHS.getLine() ||
+           getColumn() != RHS.getColumn() ||
+           getDiscriminator() != RHS.getDiscriminator() ||
+           getFilename() != RHS.getFilename() ||
+           getDirectory() != RHS.getDirectory();
+  }
+
+  /// Get the DWARF discriminator.
+  ///
+  /// DWARF discriminators distinguish identical file locations between
+  /// instructions that are on different basic blocks.
+  ///
+  /// There are 3 components stored in discriminator, from lower bits:
+  ///
+  /// Base discriminator: assigned by AddDiscriminators pass to identify IRs
+  ///                     that are defined by the same source line, but
+  ///                     different basic blocks.
+  /// Duplication factor: assigned by optimizations that will scale down
+  ///                     the execution frequency of the original IR.
+  /// Copy Identifier: assigned by optimizations that clones the IR.
+  ///                  Each copy of the IR will be assigned an identifier.
+  ///
+  /// Encoding:
+  ///
+  /// The above 3 components are encoded into a 32bit unsigned integer in
+  /// order. If the lowest bit is 1, the current component is empty, and the
+  /// next component will start in the next bit. Otherwise, the current
+  /// component is non-empty, and its content starts in the next bit. The
+  /// length of each components is either 5 bit or 12 bit: if the 7th bit
+  /// is 0, the bit 2~6 (5 bits) are used to represent the component; if the
+  /// 7th bit is 1, the bit 2~6 (5 bits) and 8~14 (7 bits) are combined to
+  /// represent the component.
+
+  inline unsigned getDiscriminator() const;
+
+  /// Returns a new DILocation with updated \p Discriminator.
+  inline const DILocation *cloneWithDiscriminator(unsigned Discriminator) const;
+
+  /// Returns a new DILocation with updated base discriminator \p BD.
+  inline const DILocation *setBaseDiscriminator(unsigned BD) const;
+
+  /// Returns the duplication factor stored in the discriminator.
+  inline unsigned getDuplicationFactor() const;
+
+  /// Returns the copy identifier stored in the discriminator.
+  inline unsigned getCopyIdentifier() const;
+
+  /// Returns the base discriminator stored in the discriminator.
+  inline unsigned getBaseDiscriminator() const;
+
+  /// Returns a new DILocation with duplication factor \p DF encoded in the
+  /// discriminator.
+  inline const DILocation *cloneWithDuplicationFactor(unsigned DF) const;
+
+  /// When two instructions are combined into a single instruction we also
+  /// need to combine the original locations into a single location.
+  ///
+  /// When the locations are the same we can use either location. When they
+  /// differ, we need a third location which is distinct from either. If
+  /// they have the same file/line but have a different discriminator we
+  /// could create a location with a new discriminator. If they are from
+  /// different files/lines the location is ambiguous and can't be
+  /// represented in a single line entry.  In this case, no location
+  /// should be set, unless the merged instruction is a call, which we will
+  /// set the merged debug location as line 0 of the nearest common scope
+  /// where 2 locations are inlined from. This only applies to Instruction;
+  /// for MachineInstruction, as it is post-inline, we will treat the call
+  /// instruction the same way as other instructions.
+  ///
+  /// \p ForInst: The Instruction the merged DILocation is for. If the
+  /// Instruction is unavailable or non-existent, use nullptr.
+  static const DILocation *
+  getMergedLocation(const DILocation *LocA, const DILocation *LocB,
+                    const Instruction *ForInst = nullptr);
+
+  /// Returns the base discriminator for a given encoded discriminator \p D.
+  static unsigned getBaseDiscriminatorFromDiscriminator(unsigned D) {
+    if ((D & 1) == 0)
+      return getUnsignedFromPrefixEncoding(D >> 1);
+    else
+      return 0;
+  }
+
+  /// Returns the duplication factor for a given encoded discriminator \p D.
+  static unsigned getDuplicationFactorFromDiscriminator(unsigned D) {
+    D = getNextComponentInDiscriminator(D);
+    if (D == 0 || (D & 1))
+      return 1;
+    else
+      return getUnsignedFromPrefixEncoding(D >> 1);
+  }
+
+  /// Returns the copy identifier for a given encoded discriminator \p D.
+  static unsigned getCopyIdentifierFromDiscriminator(unsigned D) {
+    return getUnsignedFromPrefixEncoding(getNextComponentInDiscriminator(
+        getNextComponentInDiscriminator(D)));
+  }
+
+
+  Metadata *getRawScope() const { return getOperand(0); }
+  Metadata *getRawInlinedAt() const {
+    if (getNumOperands() == 2)
+      return getOperand(1);
+    return nullptr;
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DILocationKind;
+  }
+};
+
+/// Subprogram description.
+///
+/// TODO: Remove DisplayName.  It's always equal to Name.
+/// TODO: Split up flags.
+class DISubprogram : public DILocalScope {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Line;
+  unsigned ScopeLine;
+  unsigned VirtualIndex;
+
+  /// In the MS ABI, the implicit 'this' parameter is adjusted in the prologue
+  /// of method overrides from secondary bases by this amount. It may be
+  /// negative.
+  int ThisAdjustment;
+
+  // Virtuality can only assume three values, so we can pack
+  // in 2 bits (none/pure/pure_virtual).
+  unsigned Virtuality : 2;
+
+  // These are boolean flags so one bit is enough.
+  // MSVC starts a new container field every time the base
+  // type changes so we can't use 'bool' to ensure these bits
+  // are packed.
+  unsigned IsLocalToUnit : 1;
+  unsigned IsDefinition : 1;
+  unsigned IsOptimized : 1;
+
+  unsigned Padding : 3;
+
+  DIFlags Flags;
+
+  DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line,
+               unsigned ScopeLine, unsigned Virtuality, unsigned VirtualIndex,
+               int ThisAdjustment, DIFlags Flags, bool IsLocalToUnit,
+               bool IsDefinition, bool IsOptimized, ArrayRef<Metadata *> Ops)
+      : DILocalScope(C, DISubprogramKind, Storage, dwarf::DW_TAG_subprogram,
+                     Ops),
+        Line(Line), ScopeLine(ScopeLine), VirtualIndex(VirtualIndex),
+        ThisAdjustment(ThisAdjustment), Virtuality(Virtuality),
+        IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition),
+        IsOptimized(IsOptimized), Flags(Flags) {
+    static_assert(dwarf::DW_VIRTUALITY_max < 4, "Virtuality out of range");
+    assert(Virtuality < 4 && "Virtuality out of range");
+  }
+  ~DISubprogram() = default;
+
+  static DISubprogram *
+  getImpl(LLVMContext &Context, DIScopeRef Scope, StringRef Name,
+          StringRef LinkageName, DIFile *File, unsigned Line,
+          DISubroutineType *Type, bool IsLocalToUnit, bool IsDefinition,
+          unsigned ScopeLine, DITypeRef ContainingType, unsigned Virtuality,
+          unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
+          bool IsOptimized, DICompileUnit *Unit,
+          DITemplateParameterArray TemplateParams, DISubprogram *Declaration,
+          DILocalVariableArray Variables, DITypeArray ThrownTypes,
+          StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+                   getCanonicalMDString(Context, LinkageName), File, Line, Type,
+                   IsLocalToUnit, IsDefinition, ScopeLine, ContainingType,
+                   Virtuality, VirtualIndex, ThisAdjustment, Flags, IsOptimized,
+                   Unit, TemplateParams.get(), Declaration, Variables.get(),
+                   ThrownTypes.get(), Storage, ShouldCreate);
+  }
+  static DISubprogram *
+  getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+          MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+          bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
+          Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
+          int ThisAdjustment, DIFlags Flags, bool IsOptimized, Metadata *Unit,
+          Metadata *TemplateParams, Metadata *Declaration, Metadata *Variables,
+          Metadata *ThrownTypes, StorageType Storage, bool ShouldCreate = true);
+
+  TempDISubprogram cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
+                        getFile(), getLine(), getType(), isLocalToUnit(),
+                        isDefinition(), getScopeLine(), getContainingType(),
+                        getVirtuality(), getVirtualIndex(), getThisAdjustment(),
+                        getFlags(), isOptimized(), getUnit(),
+                        getTemplateParams(), getDeclaration(), getVariables(),
+                        getThrownTypes());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DISubprogram,
+                    (DIScopeRef Scope, StringRef Name, StringRef LinkageName,
+                     DIFile *File, unsigned Line, DISubroutineType *Type,
+                     bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
+                     DITypeRef ContainingType, unsigned Virtuality,
+                     unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
+                     bool IsOptimized, DICompileUnit *Unit,
+                     DITemplateParameterArray TemplateParams = nullptr,
+                     DISubprogram *Declaration = nullptr,
+                     DILocalVariableArray Variables = nullptr,
+                     DITypeArray ThrownTypes = nullptr),
+                    (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+                     IsDefinition, ScopeLine, ContainingType, Virtuality,
+                     VirtualIndex, ThisAdjustment, Flags, IsOptimized, Unit,
+                     TemplateParams, Declaration, Variables, ThrownTypes))
+  DEFINE_MDNODE_GET(
+      DISubprogram,
+      (Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
+       unsigned Line, Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
+       unsigned ScopeLine, Metadata *ContainingType, unsigned Virtuality,
+       unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
+       bool IsOptimized, Metadata *Unit, Metadata *TemplateParams = nullptr,
+       Metadata *Declaration = nullptr, Metadata *Variables = nullptr,
+       Metadata *ThrownTypes = nullptr),
+      (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
+       ScopeLine, ContainingType, Virtuality, VirtualIndex, ThisAdjustment,
+       Flags, IsOptimized, Unit, TemplateParams, Declaration, Variables,
+       ThrownTypes))
+
+  TempDISubprogram clone() const { return cloneImpl(); }
+
+public:
+  unsigned getLine() const { return Line; }
+  unsigned getVirtuality() const { return Virtuality; }
+  unsigned getVirtualIndex() const { return VirtualIndex; }
+  int getThisAdjustment() const { return ThisAdjustment; }
+  unsigned getScopeLine() const { return ScopeLine; }
+  DIFlags getFlags() const { return Flags; }
+  bool isLocalToUnit() const { return IsLocalToUnit; }
+  bool isDefinition() const { return IsDefinition; }
+  bool isOptimized() const { return IsOptimized; }
+
+  bool isArtificial() const { return getFlags() & FlagArtificial; }
+  bool isPrivate() const {
+    return (getFlags() & FlagAccessibility) == FlagPrivate;
+  }
+  bool isProtected() const {
+    return (getFlags() & FlagAccessibility) == FlagProtected;
+  }
+  bool isPublic() const {
+    return (getFlags() & FlagAccessibility) == FlagPublic;
+  }
+  bool isExplicit() const { return getFlags() & FlagExplicit; }
+  bool isPrototyped() const { return getFlags() & FlagPrototyped; }
+  bool isMainSubprogram() const { return getFlags() & FlagMainSubprogram; }
+
+  /// Check if this is reference-qualified.
+  ///
+  /// Return true if this subprogram is a C++11 reference-qualified non-static
+  /// member function (void foo() &).
+  bool isLValueReference() const { return getFlags() & FlagLValueReference; }
+
+  /// Check if this is rvalue-reference-qualified.
+  ///
+  /// Return true if this subprogram is a C++11 rvalue-reference-qualified
+  /// non-static member function (void foo() &&).
+  bool isRValueReference() const { return getFlags() & FlagRValueReference; }
+
+  /// Check if this is marked as noreturn.
+  ///
+  /// Return true if this subprogram is C++11 noreturn or C11 _Noreturn
+  bool isNoReturn() const { return getFlags() & FlagNoReturn; }
+
+  DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
+
+  StringRef getName() const { return getStringOperand(2); }
+  StringRef getLinkageName() const { return getStringOperand(3); }
+
+  DISubroutineType *getType() const {
+    return cast_or_null<DISubroutineType>(getRawType());
+  }
+  DITypeRef getContainingType() const {
+    return DITypeRef(getRawContainingType());
+  }
+
+  DICompileUnit *getUnit() const {
+    return cast_or_null<DICompileUnit>(getRawUnit());
+  }
+  void replaceUnit(DICompileUnit *CU) { replaceOperandWith(5, CU); }
+  DITemplateParameterArray getTemplateParams() const {
+    return cast_or_null<MDTuple>(getRawTemplateParams());
+  }
+  DISubprogram *getDeclaration() const {
+    return cast_or_null<DISubprogram>(getRawDeclaration());
+  }
+  DILocalVariableArray getVariables() const {
+    return cast_or_null<MDTuple>(getRawVariables());
+  }
+  DITypeArray getThrownTypes() const {
+    return cast_or_null<MDTuple>(getRawThrownTypes());
+  }
+
+  Metadata *getRawScope() const { return getOperand(1); }
+  MDString *getRawName() const { return getOperandAs<MDString>(2); }
+  MDString *getRawLinkageName() const { return getOperandAs<MDString>(3); }
+  Metadata *getRawType() const { return getOperand(4); }
+  Metadata *getRawUnit() const { return getOperand(5); }
+  Metadata *getRawDeclaration() const { return getOperand(6); }
+  Metadata *getRawVariables() const { return getOperand(7); }
+  Metadata *getRawContainingType() const {
+    return getNumOperands() > 8 ? getOperandAs<Metadata>(8) : nullptr;
+  }
+  Metadata *getRawTemplateParams() const {
+    return getNumOperands() > 9 ? getOperandAs<Metadata>(9) : nullptr;
+  }
+  Metadata *getRawThrownTypes() const {
+    return getNumOperands() > 10 ? getOperandAs<Metadata>(10) : nullptr;
+  }
+
+  /// Check if this subprogram describes the given function.
+  ///
+  /// FIXME: Should this be looking through bitcasts?
+  bool describes(const Function *F) const;
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DISubprogramKind;
+  }
+};
+
+class DILexicalBlockBase : public DILocalScope {
+protected:
+  DILexicalBlockBase(LLVMContext &C, unsigned ID, StorageType Storage,
+                     ArrayRef<Metadata *> Ops)
+      : DILocalScope(C, ID, Storage, dwarf::DW_TAG_lexical_block, Ops) {}
+  ~DILexicalBlockBase() = default;
+
+public:
+  DILocalScope *getScope() const { return cast<DILocalScope>(getRawScope()); }
+
+  Metadata *getRawScope() const { return getOperand(1); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DILexicalBlockKind ||
+           MD->getMetadataID() == DILexicalBlockFileKind;
+  }
+};
+
+class DILexicalBlock : public DILexicalBlockBase {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Line;
+  uint16_t Column;
+
+  DILexicalBlock(LLVMContext &C, StorageType Storage, unsigned Line,
+                 unsigned Column, ArrayRef<Metadata *> Ops)
+      : DILexicalBlockBase(C, DILexicalBlockKind, Storage, Ops), Line(Line),
+        Column(Column) {
+    assert(Column < (1u << 16) && "Expected 16-bit column");
+  }
+  ~DILexicalBlock() = default;
+
+  static DILexicalBlock *getImpl(LLVMContext &Context, DILocalScope *Scope,
+                                 DIFile *File, unsigned Line, unsigned Column,
+                                 StorageType Storage,
+                                 bool ShouldCreate = true) {
+    return getImpl(Context, static_cast<Metadata *>(Scope),
+                   static_cast<Metadata *>(File), Line, Column, Storage,
+                   ShouldCreate);
+  }
+
+  static DILexicalBlock *getImpl(LLVMContext &Context, Metadata *Scope,
+                                 Metadata *File, unsigned Line, unsigned Column,
+                                 StorageType Storage, bool ShouldCreate = true);
+
+  TempDILexicalBlock cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getFile(), getLine(),
+                        getColumn());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DILexicalBlock, (DILocalScope * Scope, DIFile *File,
+                                     unsigned Line, unsigned Column),
+                    (Scope, File, Line, Column))
+  DEFINE_MDNODE_GET(DILexicalBlock, (Metadata * Scope, Metadata *File,
+                                     unsigned Line, unsigned Column),
+                    (Scope, File, Line, Column))
+
+  TempDILexicalBlock clone() const { return cloneImpl(); }
+
+  unsigned getLine() const { return Line; }
+  unsigned getColumn() const { return Column; }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DILexicalBlockKind;
+  }
+};
+
+class DILexicalBlockFile : public DILexicalBlockBase {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Discriminator;
+
+  DILexicalBlockFile(LLVMContext &C, StorageType Storage,
+                     unsigned Discriminator, ArrayRef<Metadata *> Ops)
+      : DILexicalBlockBase(C, DILexicalBlockFileKind, Storage, Ops),
+        Discriminator(Discriminator) {}
+  ~DILexicalBlockFile() = default;
+
+  static DILexicalBlockFile *getImpl(LLVMContext &Context, DILocalScope *Scope,
+                                     DIFile *File, unsigned Discriminator,
+                                     StorageType Storage,
+                                     bool ShouldCreate = true) {
+    return getImpl(Context, static_cast<Metadata *>(Scope),
+                   static_cast<Metadata *>(File), Discriminator, Storage,
+                   ShouldCreate);
+  }
+
+  static DILexicalBlockFile *getImpl(LLVMContext &Context, Metadata *Scope,
+                                     Metadata *File, unsigned Discriminator,
+                                     StorageType Storage,
+                                     bool ShouldCreate = true);
+
+  TempDILexicalBlockFile cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getFile(),
+                        getDiscriminator());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DILexicalBlockFile, (DILocalScope * Scope, DIFile *File,
+                                         unsigned Discriminator),
+                    (Scope, File, Discriminator))
+  DEFINE_MDNODE_GET(DILexicalBlockFile,
+                    (Metadata * Scope, Metadata *File, unsigned Discriminator),
+                    (Scope, File, Discriminator))
+
+  TempDILexicalBlockFile clone() const { return cloneImpl(); }
+
+  // TODO: Remove these once they're gone from DILexicalBlockBase.
+  unsigned getLine() const = delete;
+  unsigned getColumn() const = delete;
+
+  unsigned getDiscriminator() const { return Discriminator; }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DILexicalBlockFileKind;
+  }
+};
+
+unsigned DILocation::getDiscriminator() const {
+  if (auto *F = dyn_cast<DILexicalBlockFile>(getScope()))
+    return F->getDiscriminator();
+  return 0;
+}
+
+const DILocation *
+DILocation::cloneWithDiscriminator(unsigned Discriminator) const {
+  DIScope *Scope = getScope();
+  // Skip all parent DILexicalBlockFile that already have a discriminator
+  // assigned. We do not want to have nested DILexicalBlockFiles that have
+  // mutliple discriminators because only the leaf DILexicalBlockFile's
+  // dominator will be used.
+  for (auto *LBF = dyn_cast<DILexicalBlockFile>(Scope);
+       LBF && LBF->getDiscriminator() != 0;
+       LBF = dyn_cast<DILexicalBlockFile>(Scope))
+    Scope = LBF->getScope();
+  DILexicalBlockFile *NewScope =
+      DILexicalBlockFile::get(getContext(), Scope, getFile(), Discriminator);
+  return DILocation::get(getContext(), getLine(), getColumn(), NewScope,
+                         getInlinedAt());
+}
+
+unsigned DILocation::getBaseDiscriminator() const {
+  return getBaseDiscriminatorFromDiscriminator(getDiscriminator());
+}
+
+unsigned DILocation::getDuplicationFactor() const {
+  return getDuplicationFactorFromDiscriminator(getDiscriminator());
+}
+
+unsigned DILocation::getCopyIdentifier() const {
+  return getCopyIdentifierFromDiscriminator(getDiscriminator());
+}
+
+const DILocation *DILocation::setBaseDiscriminator(unsigned D) const {
+  if (D == 0)
+    return this;
+  else
+    return cloneWithDiscriminator(getPrefixEncodingFromUnsigned(D) << 1);
+}
+
+const DILocation *DILocation::cloneWithDuplicationFactor(unsigned DF) const {
+  DF *= getDuplicationFactor();
+  if (DF <= 1)
+    return this;
+
+  unsigned BD = getBaseDiscriminator();
+  unsigned CI = getCopyIdentifier() << (DF > 0x1f ? 14 : 7);
+  unsigned D = CI | (getPrefixEncodingFromUnsigned(DF) << 1);
+
+  if (BD == 0)
+    D = (D << 1) | 1;
+  else
+    D = (D << (BD > 0x1f ? 14 : 7)) | (getPrefixEncodingFromUnsigned(BD) << 1);
+
+  return cloneWithDiscriminator(D);
+}
+
+class DINamespace : public DIScope {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned ExportSymbols : 1;
+
+  DINamespace(LLVMContext &Context, StorageType Storage, bool ExportSymbols,
+              ArrayRef<Metadata *> Ops)
+      : DIScope(Context, DINamespaceKind, Storage, dwarf::DW_TAG_namespace,
+                Ops),
+        ExportSymbols(ExportSymbols) {}
+  ~DINamespace() = default;
+
+  static DINamespace *getImpl(LLVMContext &Context, DIScope *Scope,
+                              StringRef Name, bool ExportSymbols,
+                              StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+                   ExportSymbols, Storage, ShouldCreate);
+  }
+  static DINamespace *getImpl(LLVMContext &Context, Metadata *Scope,
+                              MDString *Name, bool ExportSymbols,
+                              StorageType Storage, bool ShouldCreate = true);
+
+  TempDINamespace cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getName(),
+                        getExportSymbols());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DINamespace,
+                    (DIScope *Scope, StringRef Name, bool ExportSymbols),
+                    (Scope, Name, ExportSymbols))
+  DEFINE_MDNODE_GET(DINamespace,
+                    (Metadata *Scope, MDString *Name, bool ExportSymbols),
+                    (Scope, Name, ExportSymbols))
+
+  TempDINamespace clone() const { return cloneImpl(); }
+
+  bool getExportSymbols() const { return ExportSymbols; }
+  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+  StringRef getName() const { return getStringOperand(2); }
+
+  Metadata *getRawScope() const { return getOperand(1); }
+  MDString *getRawName() const { return getOperandAs<MDString>(2); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DINamespaceKind;
+  }
+};
+
+/// A (clang) module that has been imported by the compile unit.
+///
+class DIModule : public DIScope {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  DIModule(LLVMContext &Context, StorageType Storage, ArrayRef<Metadata *> Ops)
+      : DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops) {}
+  ~DIModule() = default;
+
+  static DIModule *getImpl(LLVMContext &Context, DIScope *Scope,
+                           StringRef Name, StringRef ConfigurationMacros,
+                           StringRef IncludePath, StringRef ISysRoot,
+                           StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+                   getCanonicalMDString(Context, ConfigurationMacros),
+                   getCanonicalMDString(Context, IncludePath),
+                   getCanonicalMDString(Context, ISysRoot),
+                   Storage, ShouldCreate);
+  }
+  static DIModule *getImpl(LLVMContext &Context, Metadata *Scope,
+                           MDString *Name, MDString *ConfigurationMacros,
+                           MDString *IncludePath, MDString *ISysRoot,
+                           StorageType Storage, bool ShouldCreate = true);
+
+  TempDIModule cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getName(),
+                        getConfigurationMacros(), getIncludePath(),
+                        getISysRoot());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIModule, (DIScope *Scope, StringRef Name,
+                               StringRef ConfigurationMacros, StringRef IncludePath,
+                               StringRef ISysRoot),
+                    (Scope, Name, ConfigurationMacros, IncludePath, ISysRoot))
+  DEFINE_MDNODE_GET(DIModule,
+                    (Metadata *Scope, MDString *Name, MDString *ConfigurationMacros,
+                     MDString *IncludePath, MDString *ISysRoot),
+                    (Scope, Name, ConfigurationMacros, IncludePath, ISysRoot))
+
+  TempDIModule clone() const { return cloneImpl(); }
+
+  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+  StringRef getName() const { return getStringOperand(1); }
+  StringRef getConfigurationMacros() const { return getStringOperand(2); }
+  StringRef getIncludePath() const { return getStringOperand(3); }
+  StringRef getISysRoot() const { return getStringOperand(4); }
+
+  Metadata *getRawScope() const { return getOperand(0); }
+  MDString *getRawName() const { return getOperandAs<MDString>(1); }
+  MDString *getRawConfigurationMacros() const { return getOperandAs<MDString>(2); }
+  MDString *getRawIncludePath() const { return getOperandAs<MDString>(3); }
+  MDString *getRawISysRoot() const { return getOperandAs<MDString>(4); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIModuleKind;
+  }
+};
+
+/// Base class for template parameters.
+class DITemplateParameter : public DINode {
+protected:
+  DITemplateParameter(LLVMContext &Context, unsigned ID, StorageType Storage,
+                      unsigned Tag, ArrayRef<Metadata *> Ops)
+      : DINode(Context, ID, Storage, Tag, Ops) {}
+  ~DITemplateParameter() = default;
+
+public:
+  StringRef getName() const { return getStringOperand(0); }
+  DITypeRef getType() const { return DITypeRef(getRawType()); }
+
+  MDString *getRawName() const { return getOperandAs<MDString>(0); }
+  Metadata *getRawType() const { return getOperand(1); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DITemplateTypeParameterKind ||
+           MD->getMetadataID() == DITemplateValueParameterKind;
+  }
+};
+
+class DITemplateTypeParameter : public DITemplateParameter {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  DITemplateTypeParameter(LLVMContext &Context, StorageType Storage,
+                          ArrayRef<Metadata *> Ops)
+      : DITemplateParameter(Context, DITemplateTypeParameterKind, Storage,
+                            dwarf::DW_TAG_template_type_parameter, Ops) {}
+  ~DITemplateTypeParameter() = default;
+
+  static DITemplateTypeParameter *getImpl(LLVMContext &Context, StringRef Name,
+                                          DITypeRef Type, StorageType Storage,
+                                          bool ShouldCreate = true) {
+    return getImpl(Context, getCanonicalMDString(Context, Name), Type, Storage,
+                   ShouldCreate);
+  }
+  static DITemplateTypeParameter *getImpl(LLVMContext &Context, MDString *Name,
+                                          Metadata *Type, StorageType Storage,
+                                          bool ShouldCreate = true);
+
+  TempDITemplateTypeParameter cloneImpl() const {
+    return getTemporary(getContext(), getName(), getType());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DITemplateTypeParameter, (StringRef Name, DITypeRef Type),
+                    (Name, Type))
+  DEFINE_MDNODE_GET(DITemplateTypeParameter, (MDString * Name, Metadata *Type),
+                    (Name, Type))
+
+  TempDITemplateTypeParameter clone() const { return cloneImpl(); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DITemplateTypeParameterKind;
+  }
+};
+
+class DITemplateValueParameter : public DITemplateParameter {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  DITemplateValueParameter(LLVMContext &Context, StorageType Storage,
+                           unsigned Tag, ArrayRef<Metadata *> Ops)
+      : DITemplateParameter(Context, DITemplateValueParameterKind, Storage, Tag,
+                            Ops) {}
+  ~DITemplateValueParameter() = default;
+
+  static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
+                                           StringRef Name, DITypeRef Type,
+                                           Metadata *Value, StorageType Storage,
+                                           bool ShouldCreate = true) {
+    return getImpl(Context, Tag, getCanonicalMDString(Context, Name), Type,
+                   Value, Storage, ShouldCreate);
+  }
+  static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
+                                           MDString *Name, Metadata *Type,
+                                           Metadata *Value, StorageType Storage,
+                                           bool ShouldCreate = true);
+
+  TempDITemplateValueParameter cloneImpl() const {
+    return getTemporary(getContext(), getTag(), getName(), getType(),
+                        getValue());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DITemplateValueParameter, (unsigned Tag, StringRef Name,
+                                               DITypeRef Type, Metadata *Value),
+                    (Tag, Name, Type, Value))
+  DEFINE_MDNODE_GET(DITemplateValueParameter, (unsigned Tag, MDString *Name,
+                                               Metadata *Type, Metadata *Value),
+                    (Tag, Name, Type, Value))
+
+  TempDITemplateValueParameter clone() const { return cloneImpl(); }
+
+  Metadata *getValue() const { return getOperand(2); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DITemplateValueParameterKind;
+  }
+};
+
+/// Base class for variables.
+class DIVariable : public DINode {
+  unsigned Line;
+  uint32_t AlignInBits;
+
+protected:
+  DIVariable(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Line,
+             ArrayRef<Metadata *> Ops, uint32_t AlignInBits = 0)
+      : DINode(C, ID, Storage, dwarf::DW_TAG_variable, Ops), Line(Line),
+        AlignInBits(AlignInBits) {}
+  ~DIVariable() = default;
+
+public:
+  unsigned getLine() const { return Line; }
+  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+  StringRef getName() const { return getStringOperand(1); }
+  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+  DITypeRef getType() const { return DITypeRef(getRawType()); }
+  uint32_t getAlignInBits() const { return AlignInBits; }
+  uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
+  /// Determines the size of the variable's type.
+  Optional<uint64_t> getSizeInBits() const;
+
+  StringRef getFilename() const {
+    if (auto *F = getFile())
+      return F->getFilename();
+    return "";
+  }
+
+  StringRef getDirectory() const {
+    if (auto *F = getFile())
+      return F->getDirectory();
+    return "";
+  }
+
+  Optional<StringRef> getSource() const {
+    if (auto *F = getFile())
+      return F->getSource();
+    return None;
+  }
+
+  Metadata *getRawScope() const { return getOperand(0); }
+  MDString *getRawName() const { return getOperandAs<MDString>(1); }
+  Metadata *getRawFile() const { return getOperand(2); }
+  Metadata *getRawType() const { return getOperand(3); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DILocalVariableKind ||
+           MD->getMetadataID() == DIGlobalVariableKind;
+  }
+};
+
+/// DWARF expression.
+///
+/// This is (almost) a DWARF expression that modifies the location of a
+/// variable, or the location of a single piece of a variable, or (when using
+/// DW_OP_stack_value) is the constant variable value.
+///
+/// TODO: Co-allocate the expression elements.
+/// TODO: Separate from MDNode, or otherwise drop Distinct and Temporary
+/// storage types.
+class DIExpression : public MDNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  std::vector<uint64_t> Elements;
+
+  DIExpression(LLVMContext &C, StorageType Storage, ArrayRef<uint64_t> Elements)
+      : MDNode(C, DIExpressionKind, Storage, None),
+        Elements(Elements.begin(), Elements.end()) {}
+  ~DIExpression() = default;
+
+  static DIExpression *getImpl(LLVMContext &Context,
+                               ArrayRef<uint64_t> Elements, StorageType Storage,
+                               bool ShouldCreate = true);
+
+  TempDIExpression cloneImpl() const {
+    return getTemporary(getContext(), getElements());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIExpression, (ArrayRef<uint64_t> Elements), (Elements))
+
+  TempDIExpression clone() const { return cloneImpl(); }
+
+  ArrayRef<uint64_t> getElements() const { return Elements; }
+
+  unsigned getNumElements() const { return Elements.size(); }
+
+  uint64_t getElement(unsigned I) const {
+    assert(I < Elements.size() && "Index out of range");
+    return Elements[I];
+  }
+
+  /// Determine whether this represents a standalone constant value.
+  bool isConstant() const;
+
+  using element_iterator = ArrayRef<uint64_t>::iterator;
+
+  element_iterator elements_begin() const { return getElements().begin(); }
+  element_iterator elements_end() const { return getElements().end(); }
+
+  /// A lightweight wrapper around an expression operand.
+  ///
+  /// TODO: Store arguments directly and change \a DIExpression to store a
+  /// range of these.
+  class ExprOperand {
+    const uint64_t *Op = nullptr;
+
+  public:
+    ExprOperand() = default;
+    explicit ExprOperand(const uint64_t *Op) : Op(Op) {}
+
+    const uint64_t *get() const { return Op; }
+
+    /// Get the operand code.
+    uint64_t getOp() const { return *Op; }
+
+    /// Get an argument to the operand.
+    ///
+    /// Never returns the operand itself.
+    uint64_t getArg(unsigned I) const { return Op[I + 1]; }
+
+    unsigned getNumArgs() const { return getSize() - 1; }
+
+    /// Return the size of the operand.
+    ///
+    /// Return the number of elements in the operand (1 + args).
+    unsigned getSize() const;
+  };
+
+  /// An iterator for expression operands.
+  class expr_op_iterator
+      : public std::iterator<std::input_iterator_tag, ExprOperand> {
+    ExprOperand Op;
+
+  public:
+    expr_op_iterator() = default;
+    explicit expr_op_iterator(element_iterator I) : Op(I) {}
+
+    element_iterator getBase() const { return Op.get(); }
+    const ExprOperand &operator*() const { return Op; }
+    const ExprOperand *operator->() const { return &Op; }
+
+    expr_op_iterator &operator++() {
+      increment();
+      return *this;
+    }
+    expr_op_iterator operator++(int) {
+      expr_op_iterator T(*this);
+      increment();
+      return T;
+    }
+
+    /// Get the next iterator.
+    ///
+    /// \a std::next() doesn't work because this is technically an
+    /// input_iterator, but it's a perfectly valid operation.  This is an
+    /// accessor to provide the same functionality.
+    expr_op_iterator getNext() const { return ++expr_op_iterator(*this); }
+
+    bool operator==(const expr_op_iterator &X) const {
+      return getBase() == X.getBase();
+    }
+    bool operator!=(const expr_op_iterator &X) const {
+      return getBase() != X.getBase();
+    }
+
+  private:
+    void increment() { Op = ExprOperand(getBase() + Op.getSize()); }
+  };
+
+  /// Visit the elements via ExprOperand wrappers.
+  ///
+  /// These range iterators visit elements through \a ExprOperand wrappers.
+  /// This is not guaranteed to be a valid range unless \a isValid() gives \c
+  /// true.
+  ///
+  /// \pre \a isValid() gives \c true.
+  /// @{
+  expr_op_iterator expr_op_begin() const {
+    return expr_op_iterator(elements_begin());
+  }
+  expr_op_iterator expr_op_end() const {
+    return expr_op_iterator(elements_end());
+  }
+  iterator_range<expr_op_iterator> expr_ops() const {
+    return {expr_op_begin(), expr_op_end()};
+  }
+  /// @}
+
+  bool isValid() const;
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIExpressionKind;
+  }
+
+  /// Return whether the first element a DW_OP_deref.
+  bool startsWithDeref() const {
+    return getNumElements() > 0 && getElement(0) == dwarf::DW_OP_deref;
+  }
+
+  /// Holds the characteristics of one fragment of a larger variable.
+  struct FragmentInfo {
+    uint64_t SizeInBits;
+    uint64_t OffsetInBits;
+  };
+
+  /// Retrieve the details of this fragment expression.
+  static Optional<FragmentInfo> getFragmentInfo(expr_op_iterator Start,
+                                                expr_op_iterator End);
+
+  /// Retrieve the details of this fragment expression.
+  Optional<FragmentInfo> getFragmentInfo() const {
+    return getFragmentInfo(expr_op_begin(), expr_op_end());
+  }
+
+  /// Return whether this is a piece of an aggregate variable.
+  bool isFragment() const { return getFragmentInfo().hasValue(); }
+
+  /// Append \p Ops with operations to apply the \p Offset.
+  static void appendOffset(SmallVectorImpl<uint64_t> &Ops, int64_t Offset);
+
+  /// If this is a constant offset, extract it. If there is no expression,
+  /// return true with an offset of zero.
+  bool extractIfOffset(int64_t &Offset) const;
+
+  /// Constants for DIExpression::prepend.
+  enum { NoDeref = false, WithDeref = true, WithStackValue = true };
+
+  /// Prepend \p DIExpr with a deref and offset operation and optionally turn it
+  /// into a stack value.
+  static DIExpression *prepend(const DIExpression *DIExpr, bool DerefBefore,
+                               int64_t Offset = 0, bool DerefAfter = false,
+                               bool StackValue = false);
+
+  /// Prepend \p DIExpr with the given opcodes and optionally turn it into a
+  /// stack value.
+  static DIExpression *doPrepend(const DIExpression *DIExpr,
+                                 SmallVectorImpl<uint64_t> &Ops,
+                                 bool StackValue = false);
+
+  /// Create a DIExpression to describe one part of an aggregate variable that
+  /// is fragmented across multiple Values. The DW_OP_LLVM_fragment operation
+  /// will be appended to the elements of \c Expr. If \c Expr already contains
+  /// a \c DW_OP_LLVM_fragment \c OffsetInBits is interpreted as an offset
+  /// into the existing fragment.
+  ///
+  /// \param OffsetInBits Offset of the piece in bits.
+  /// \param SizeInBits   Size of the piece in bits.
+  /// \return             Creating a fragment expression may fail if \c Expr
+  ///                     contains arithmetic operations that would be truncated.
+  static Optional<DIExpression *>
+  createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits,
+                           unsigned SizeInBits);
+
+  /// Determine the relative position of the fragments described by this
+  /// DIExpression and \p Other.
+  /// Returns -1 if this is entirely before Other, 0 if this and Other overlap,
+  /// 1 if this is entirely after Other.
+  int fragmentCmp(const DIExpression *Other) const {
+    auto Fragment1 = *getFragmentInfo();
+    auto Fragment2 = *Other->getFragmentInfo();
+    unsigned l1 = Fragment1.OffsetInBits;
+    unsigned l2 = Fragment2.OffsetInBits;
+    unsigned r1 = l1 + Fragment1.SizeInBits;
+    unsigned r2 = l2 + Fragment2.SizeInBits;
+    if (r1 <= l2)
+      return -1;
+    else if (r2 <= l1)
+      return 1;
+    else
+      return 0;
+  }
+
+  /// Check if fragments overlap between this DIExpression and \p Other.
+  bool fragmentsOverlap(const DIExpression *Other) const {
+    if (!isFragment() || !Other->isFragment())
+      return true;
+    return fragmentCmp(Other) == 0;
+  }
+};
+
+/// Global variables.
+///
+/// TODO: Remove DisplayName.  It's always equal to Name.
+class DIGlobalVariable : public DIVariable {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  bool IsLocalToUnit;
+  bool IsDefinition;
+
+  DIGlobalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
+                   bool IsLocalToUnit, bool IsDefinition, uint32_t AlignInBits,
+                   ArrayRef<Metadata *> Ops)
+      : DIVariable(C, DIGlobalVariableKind, Storage, Line, Ops, AlignInBits),
+        IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition) {}
+  ~DIGlobalVariable() = default;
+
+  static DIGlobalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
+                                   StringRef Name, StringRef LinkageName,
+                                   DIFile *File, unsigned Line, DITypeRef Type,
+                                   bool IsLocalToUnit, bool IsDefinition,
+                                   DIDerivedType *StaticDataMemberDeclaration,
+                                   uint32_t AlignInBits, StorageType Storage,
+                                   bool ShouldCreate = true) {
+    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+                   getCanonicalMDString(Context, LinkageName), File, Line, Type,
+                   IsLocalToUnit, IsDefinition, StaticDataMemberDeclaration,
+                   AlignInBits, Storage, ShouldCreate);
+  }
+  static DIGlobalVariable *
+  getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+          MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+          bool IsLocalToUnit, bool IsDefinition,
+          Metadata *StaticDataMemberDeclaration, uint32_t AlignInBits,
+          StorageType Storage, bool ShouldCreate = true);
+
+  TempDIGlobalVariable cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
+                        getFile(), getLine(), getType(), isLocalToUnit(),
+                        isDefinition(), getStaticDataMemberDeclaration(),
+                        getAlignInBits());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIGlobalVariable,
+                    (DIScope * Scope, StringRef Name, StringRef LinkageName,
+                     DIFile *File, unsigned Line, DITypeRef Type,
+                     bool IsLocalToUnit, bool IsDefinition,
+                     DIDerivedType *StaticDataMemberDeclaration,
+                     uint32_t AlignInBits),
+                    (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+                     IsDefinition, StaticDataMemberDeclaration, AlignInBits))
+  DEFINE_MDNODE_GET(DIGlobalVariable,
+                    (Metadata * Scope, MDString *Name, MDString *LinkageName,
+                     Metadata *File, unsigned Line, Metadata *Type,
+                     bool IsLocalToUnit, bool IsDefinition,
+                     Metadata *StaticDataMemberDeclaration,
+                     uint32_t AlignInBits),
+                    (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+                     IsDefinition, StaticDataMemberDeclaration, AlignInBits))
+
+  TempDIGlobalVariable clone() const { return cloneImpl(); }
+
+  bool isLocalToUnit() const { return IsLocalToUnit; }
+  bool isDefinition() const { return IsDefinition; }
+  StringRef getDisplayName() const { return getStringOperand(4); }
+  StringRef getLinkageName() const { return getStringOperand(5); }
+  DIDerivedType *getStaticDataMemberDeclaration() const {
+    return cast_or_null<DIDerivedType>(getRawStaticDataMemberDeclaration());
+  }
+
+  MDString *getRawLinkageName() const { return getOperandAs<MDString>(5); }
+  Metadata *getRawStaticDataMemberDeclaration() const { return getOperand(6); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIGlobalVariableKind;
+  }
+};
+
+/// Local variable.
+///
+/// TODO: Split up flags.
+class DILocalVariable : public DIVariable {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Arg : 16;
+  DIFlags Flags;
+
+  DILocalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
+                  unsigned Arg, DIFlags Flags, uint32_t AlignInBits,
+                  ArrayRef<Metadata *> Ops)
+      : DIVariable(C, DILocalVariableKind, Storage, Line, Ops, AlignInBits),
+        Arg(Arg), Flags(Flags) {
+    assert(Arg < (1 << 16) && "DILocalVariable: Arg out of range");
+  }
+  ~DILocalVariable() = default;
+
+  static DILocalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
+                                  StringRef Name, DIFile *File, unsigned Line,
+                                  DITypeRef Type, unsigned Arg, DIFlags Flags,
+                                  uint32_t AlignInBits, StorageType Storage,
+                                  bool ShouldCreate = true) {
+    return getImpl(Context, Scope, getCanonicalMDString(Context, Name), File,
+                   Line, Type, Arg, Flags, AlignInBits, Storage, ShouldCreate);
+  }
+  static DILocalVariable *getImpl(LLVMContext &Context, Metadata *Scope,
+                                  MDString *Name, Metadata *File, unsigned Line,
+                                  Metadata *Type, unsigned Arg, DIFlags Flags,
+                                  uint32_t AlignInBits, StorageType Storage,
+                                  bool ShouldCreate = true);
+
+  TempDILocalVariable cloneImpl() const {
+    return getTemporary(getContext(), getScope(), getName(), getFile(),
+                        getLine(), getType(), getArg(), getFlags(),
+                        getAlignInBits());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DILocalVariable,
+                    (DILocalScope * Scope, StringRef Name, DIFile *File,
+                     unsigned Line, DITypeRef Type, unsigned Arg,
+                     DIFlags Flags, uint32_t AlignInBits),
+                    (Scope, Name, File, Line, Type, Arg, Flags, AlignInBits))
+  DEFINE_MDNODE_GET(DILocalVariable,
+                    (Metadata * Scope, MDString *Name, Metadata *File,
+                     unsigned Line, Metadata *Type, unsigned Arg,
+                     DIFlags Flags, uint32_t AlignInBits),
+                    (Scope, Name, File, Line, Type, Arg, Flags, AlignInBits))
+
+  TempDILocalVariable clone() const { return cloneImpl(); }
+
+  /// Get the local scope for this variable.
+  ///
+  /// Variables must be defined in a local scope.
+  DILocalScope *getScope() const {
+    return cast<DILocalScope>(DIVariable::getScope());
+  }
+
+  bool isParameter() const { return Arg; }
+  unsigned getArg() const { return Arg; }
+  DIFlags getFlags() const { return Flags; }
+
+  bool isArtificial() const { return getFlags() & FlagArtificial; }
+  bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
+
+  /// Check that a location is valid for this variable.
+  ///
+  /// Check that \c DL exists, is in the same subprogram, and has the same
+  /// inlined-at location as \c this.  (Otherwise, it's not a valid attachment
+  /// to a \a DbgInfoIntrinsic.)
+  bool isValidLocationForIntrinsic(const DILocation *DL) const {
+    return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram();
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DILocalVariableKind;
+  }
+};
+
+class DIObjCProperty : public DINode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Line;
+  unsigned Attributes;
+
+  DIObjCProperty(LLVMContext &C, StorageType Storage, unsigned Line,
+                 unsigned Attributes, ArrayRef<Metadata *> Ops)
+      : DINode(C, DIObjCPropertyKind, Storage, dwarf::DW_TAG_APPLE_property,
+               Ops),
+        Line(Line), Attributes(Attributes) {}
+  ~DIObjCProperty() = default;
+
+  static DIObjCProperty *
+  getImpl(LLVMContext &Context, StringRef Name, DIFile *File, unsigned Line,
+          StringRef GetterName, StringRef SetterName, unsigned Attributes,
+          DITypeRef Type, StorageType Storage, bool ShouldCreate = true) {
+    return getImpl(Context, getCanonicalMDString(Context, Name), File, Line,
+                   getCanonicalMDString(Context, GetterName),
+                   getCanonicalMDString(Context, SetterName), Attributes, Type,
+                   Storage, ShouldCreate);
+  }
+  static DIObjCProperty *getImpl(LLVMContext &Context, MDString *Name,
+                                 Metadata *File, unsigned Line,
+                                 MDString *GetterName, MDString *SetterName,
+                                 unsigned Attributes, Metadata *Type,
+                                 StorageType Storage, bool ShouldCreate = true);
+
+  TempDIObjCProperty cloneImpl() const {
+    return getTemporary(getContext(), getName(), getFile(), getLine(),
+                        getGetterName(), getSetterName(), getAttributes(),
+                        getType());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIObjCProperty,
+                    (StringRef Name, DIFile *File, unsigned Line,
+                     StringRef GetterName, StringRef SetterName,
+                     unsigned Attributes, DITypeRef Type),
+                    (Name, File, Line, GetterName, SetterName, Attributes,
+                     Type))
+  DEFINE_MDNODE_GET(DIObjCProperty,
+                    (MDString * Name, Metadata *File, unsigned Line,
+                     MDString *GetterName, MDString *SetterName,
+                     unsigned Attributes, Metadata *Type),
+                    (Name, File, Line, GetterName, SetterName, Attributes,
+                     Type))
+
+  TempDIObjCProperty clone() const { return cloneImpl(); }
+
+  unsigned getLine() const { return Line; }
+  unsigned getAttributes() const { return Attributes; }
+  StringRef getName() const { return getStringOperand(0); }
+  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+  StringRef getGetterName() const { return getStringOperand(2); }
+  StringRef getSetterName() const { return getStringOperand(3); }
+  DITypeRef getType() const { return DITypeRef(getRawType()); }
+
+  StringRef getFilename() const {
+    if (auto *F = getFile())
+      return F->getFilename();
+    return "";
+  }
+
+  StringRef getDirectory() const {
+    if (auto *F = getFile())
+      return F->getDirectory();
+    return "";
+  }
+
+  Optional<StringRef> getSource() const {
+    if (auto *F = getFile())
+      return F->getSource();
+    return None;
+  }
+
+  MDString *getRawName() const { return getOperandAs<MDString>(0); }
+  Metadata *getRawFile() const { return getOperand(1); }
+  MDString *getRawGetterName() const { return getOperandAs<MDString>(2); }
+  MDString *getRawSetterName() const { return getOperandAs<MDString>(3); }
+  Metadata *getRawType() const { return getOperand(4); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIObjCPropertyKind;
+  }
+};
+
+/// An imported module (C++ using directive or similar).
+class DIImportedEntity : public DINode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Line;
+
+  DIImportedEntity(LLVMContext &C, StorageType Storage, unsigned Tag,
+                   unsigned Line, ArrayRef<Metadata *> Ops)
+      : DINode(C, DIImportedEntityKind, Storage, Tag, Ops), Line(Line) {}
+  ~DIImportedEntity() = default;
+
+  static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
+                                   DIScope *Scope, DINodeRef Entity,
+                                   DIFile *File, unsigned Line, StringRef Name,
+                                   StorageType Storage,
+                                   bool ShouldCreate = true) {
+    return getImpl(Context, Tag, Scope, Entity, File, Line,
+                   getCanonicalMDString(Context, Name), Storage, ShouldCreate);
+  }
+  static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
+                                   Metadata *Scope, Metadata *Entity,
+                                   Metadata *File, unsigned Line,
+                                   MDString *Name, StorageType Storage,
+                                   bool ShouldCreate = true);
+
+  TempDIImportedEntity cloneImpl() const {
+    return getTemporary(getContext(), getTag(), getScope(), getEntity(),
+                        getFile(), getLine(), getName());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIImportedEntity,
+                    (unsigned Tag, DIScope *Scope, DINodeRef Entity,
+                     DIFile *File, unsigned Line, StringRef Name = ""),
+                    (Tag, Scope, Entity, File, Line, Name))
+  DEFINE_MDNODE_GET(DIImportedEntity,
+                    (unsigned Tag, Metadata *Scope, Metadata *Entity,
+                     Metadata *File, unsigned Line, MDString *Name),
+                    (Tag, Scope, Entity, File, Line, Name))
+
+  TempDIImportedEntity clone() const { return cloneImpl(); }
+
+  unsigned getLine() const { return Line; }
+  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+  DINodeRef getEntity() const { return DINodeRef(getRawEntity()); }
+  StringRef getName() const { return getStringOperand(2); }
+  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+
+  Metadata *getRawScope() const { return getOperand(0); }
+  Metadata *getRawEntity() const { return getOperand(1); }
+  MDString *getRawName() const { return getOperandAs<MDString>(2); }
+  Metadata *getRawFile() const { return getOperand(3); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIImportedEntityKind;
+  }
+};
+
+/// A pair of DIGlobalVariable and DIExpression.
+class DIGlobalVariableExpression : public MDNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  DIGlobalVariableExpression(LLVMContext &C, StorageType Storage,
+                             ArrayRef<Metadata *> Ops)
+      : MDNode(C, DIGlobalVariableExpressionKind, Storage, Ops) {}
+  ~DIGlobalVariableExpression() = default;
+
+  static DIGlobalVariableExpression *
+  getImpl(LLVMContext &Context, Metadata *Variable, Metadata *Expression,
+          StorageType Storage, bool ShouldCreate = true);
+
+  TempDIGlobalVariableExpression cloneImpl() const {
+    return getTemporary(getContext(), getVariable(), getExpression());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIGlobalVariableExpression,
+                    (Metadata * Variable, Metadata *Expression),
+                    (Variable, Expression))
+
+  TempDIGlobalVariableExpression clone() const { return cloneImpl(); }
+
+  Metadata *getRawVariable() const { return getOperand(0); }
+
+  DIGlobalVariable *getVariable() const {
+    return cast_or_null<DIGlobalVariable>(getRawVariable());
+  }
+
+  Metadata *getRawExpression() const { return getOperand(1); }
+
+  DIExpression *getExpression() const {
+    return cast<DIExpression>(getRawExpression());
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIGlobalVariableExpressionKind;
+  }
+};
+
+/// Macro Info DWARF-like metadata node.
+///
+/// A metadata node with a DWARF macro info (i.e., a constant named
+/// \c DW_MACINFO_*, defined in llvm/BinaryFormat/Dwarf.h).  Called \a
+/// DIMacroNode
+/// because it's potentially used for non-DWARF output.
+class DIMacroNode : public MDNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+protected:
+  DIMacroNode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned MIType,
+              ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None)
+      : MDNode(C, ID, Storage, Ops1, Ops2) {
+    assert(MIType < 1u << 16);
+    SubclassData16 = MIType;
+  }
+  ~DIMacroNode() = default;
+
+  template <class Ty> Ty *getOperandAs(unsigned I) const {
+    return cast_or_null<Ty>(getOperand(I));
+  }
+
+  StringRef getStringOperand(unsigned I) const {
+    if (auto *S = getOperandAs<MDString>(I))
+      return S->getString();
+    return StringRef();
+  }
+
+  static MDString *getCanonicalMDString(LLVMContext &Context, StringRef S) {
+    if (S.empty())
+      return nullptr;
+    return MDString::get(Context, S);
+  }
+
+public:
+  unsigned getMacinfoType() const { return SubclassData16; }
+
+  static bool classof(const Metadata *MD) {
+    switch (MD->getMetadataID()) {
+    default:
+      return false;
+    case DIMacroKind:
+    case DIMacroFileKind:
+      return true;
+    }
+  }
+};
+
+class DIMacro : public DIMacroNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Line;
+
+  DIMacro(LLVMContext &C, StorageType Storage, unsigned MIType, unsigned Line,
+          ArrayRef<Metadata *> Ops)
+      : DIMacroNode(C, DIMacroKind, Storage, MIType, Ops), Line(Line) {}
+  ~DIMacro() = default;
+
+  static DIMacro *getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
+                          StringRef Name, StringRef Value, StorageType Storage,
+                          bool ShouldCreate = true) {
+    return getImpl(Context, MIType, Line, getCanonicalMDString(Context, Name),
+                   getCanonicalMDString(Context, Value), Storage, ShouldCreate);
+  }
+  static DIMacro *getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
+                          MDString *Name, MDString *Value, StorageType Storage,
+                          bool ShouldCreate = true);
+
+  TempDIMacro cloneImpl() const {
+    return getTemporary(getContext(), getMacinfoType(), getLine(), getName(),
+                        getValue());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIMacro, (unsigned MIType, unsigned Line, StringRef Name,
+                              StringRef Value = ""),
+                    (MIType, Line, Name, Value))
+  DEFINE_MDNODE_GET(DIMacro, (unsigned MIType, unsigned Line, MDString *Name,
+                              MDString *Value),
+                    (MIType, Line, Name, Value))
+
+  TempDIMacro clone() const { return cloneImpl(); }
+
+  unsigned getLine() const { return Line; }
+
+  StringRef getName() const { return getStringOperand(0); }
+  StringRef getValue() const { return getStringOperand(1); }
+
+  MDString *getRawName() const { return getOperandAs<MDString>(0); }
+  MDString *getRawValue() const { return getOperandAs<MDString>(1); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIMacroKind;
+  }
+};
+
+class DIMacroFile : public DIMacroNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  unsigned Line;
+
+  DIMacroFile(LLVMContext &C, StorageType Storage, unsigned MIType,
+              unsigned Line, ArrayRef<Metadata *> Ops)
+      : DIMacroNode(C, DIMacroFileKind, Storage, MIType, Ops), Line(Line) {}
+  ~DIMacroFile() = default;
+
+  static DIMacroFile *getImpl(LLVMContext &Context, unsigned MIType,
+                              unsigned Line, DIFile *File,
+                              DIMacroNodeArray Elements, StorageType Storage,
+                              bool ShouldCreate = true) {
+    return getImpl(Context, MIType, Line, static_cast<Metadata *>(File),
+                   Elements.get(), Storage, ShouldCreate);
+  }
+
+  static DIMacroFile *getImpl(LLVMContext &Context, unsigned MIType,
+                              unsigned Line, Metadata *File, Metadata *Elements,
+                              StorageType Storage, bool ShouldCreate = true);
+
+  TempDIMacroFile cloneImpl() const {
+    return getTemporary(getContext(), getMacinfoType(), getLine(), getFile(),
+                        getElements());
+  }
+
+public:
+  DEFINE_MDNODE_GET(DIMacroFile, (unsigned MIType, unsigned Line, DIFile *File,
+                                  DIMacroNodeArray Elements),
+                    (MIType, Line, File, Elements))
+  DEFINE_MDNODE_GET(DIMacroFile, (unsigned MIType, unsigned Line,
+                                  Metadata *File, Metadata *Elements),
+                    (MIType, Line, File, Elements))
+
+  TempDIMacroFile clone() const { return cloneImpl(); }
+
+  void replaceElements(DIMacroNodeArray Elements) {
+#ifndef NDEBUG
+    for (DIMacroNode *Op : getElements())
+      assert(is_contained(Elements->operands(), Op) &&
+             "Lost a macro node during macro node list replacement");
+#endif
+    replaceOperandWith(1, Elements.get());
+  }
+
+  unsigned getLine() const { return Line; }
+  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+
+  DIMacroNodeArray getElements() const {
+    return cast_or_null<MDTuple>(getRawElements());
+  }
+
+  Metadata *getRawFile() const { return getOperand(0); }
+  Metadata *getRawElements() const { return getOperand(1); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == DIMacroFileKind;
+  }
+};
+
+} // end namespace llvm
+
+#undef DEFINE_MDNODE_GET_UNPACK_IMPL
+#undef DEFINE_MDNODE_GET_UNPACK
+#undef DEFINE_MDNODE_GET
+
+#endif // LLVM_IR_DEBUGINFOMETADATA_H
diff --git a/linux-x64/clang/include/llvm/IR/DebugLoc.h b/linux-x64/clang/include/llvm/IR/DebugLoc.h
new file mode 100644
index 0000000..eef1212
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DebugLoc.h
@@ -0,0 +1,126 @@
+//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a number of light weight data structures used
+// to describe and track debug location information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGLOC_H
+#define LLVM_IR_DEBUGLOC_H
+
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+  class LLVMContext;
+  class raw_ostream;
+  class DILocation;
+
+  /// \brief A debug info location.
+  ///
+  /// This class is a wrapper around a tracking reference to an \a DILocation
+  /// pointer.
+  ///
+  /// To avoid extra includes, \a DebugLoc doubles the \a DILocation API with a
+  /// one based on relatively opaque \a MDNode pointers.
+  class DebugLoc {
+    TrackingMDNodeRef Loc;
+
+  public:
+    DebugLoc() = default;
+
+    /// \brief Construct from an \a DILocation.
+    DebugLoc(const DILocation *L);
+
+    /// \brief Construct from an \a MDNode.
+    ///
+    /// Note: if \c N is not an \a DILocation, a verifier check will fail, and
+    /// accessors will crash.  However, construction from other nodes is
+    /// supported in order to handle forward references when reading textual
+    /// IR.
+    explicit DebugLoc(const MDNode *N);
+
+    /// \brief Get the underlying \a DILocation.
+    ///
+    /// \pre !*this or \c isa<DILocation>(getAsMDNode()).
+    /// @{
+    DILocation *get() const;
+    operator DILocation *() const { return get(); }
+    DILocation *operator->() const { return get(); }
+    DILocation &operator*() const { return *get(); }
+    /// @}
+
+    /// \brief Check for null.
+    ///
+    /// Check for null in a way that is safe with broken debug info.  Unlike
+    /// the conversion to \c DILocation, this doesn't require that \c Loc is of
+    /// the right type.  Important for cases like \a llvm::StripDebugInfo() and
+    /// \a Instruction::hasMetadata().
+    explicit operator bool() const { return Loc; }
+
+    /// \brief Check whether this has a trivial destructor.
+    bool hasTrivialDestructor() const { return Loc.hasTrivialDestructor(); }
+
+    /// \brief Create a new DebugLoc.
+    ///
+    /// Create a new DebugLoc at the specified line/col and scope/inline.  This
+    /// forwards to \a DILocation::get().
+    ///
+    /// If \c !Scope, returns a default-constructed \a DebugLoc.
+    ///
+    /// FIXME: Remove this.  Users should use DILocation::get().
+    static DebugLoc get(unsigned Line, unsigned Col, const MDNode *Scope,
+                        const MDNode *InlinedAt = nullptr);
+
+    enum { ReplaceLastInlinedAt = true };
+    /// Rebuild the entire inlined-at chain for this instruction so that the top of
+    /// the chain now is inlined-at the new call site.
+    /// \param   InlinedAt    The new outermost inlined-at in the chain.
+    /// \param   ReplaceLast  Replace the last location in the inlined-at chain.
+    static DebugLoc appendInlinedAt(DebugLoc DL, DILocation *InlinedAt,
+                                    LLVMContext &Ctx,
+                                    DenseMap<const MDNode *, MDNode *> &Cache,
+                                    bool ReplaceLast = false);
+
+    unsigned getLine() const;
+    unsigned getCol() const;
+    MDNode *getScope() const;
+    DILocation *getInlinedAt() const;
+
+    /// \brief Get the fully inlined-at scope for a DebugLoc.
+    ///
+    /// Gets the inlined-at scope for a DebugLoc.
+    MDNode *getInlinedAtScope() const;
+
+    /// \brief Find the debug info location for the start of the function.
+    ///
+    /// Walk up the scope chain of given debug loc and find line number info
+    /// for the function.
+    ///
+    /// FIXME: Remove this.  Users should use DILocation/DILocalScope API to
+    /// find the subprogram, and then DILocation::get().
+    DebugLoc getFnDebugLoc() const;
+
+    /// \brief Return \c this as a bar \a MDNode.
+    MDNode *getAsMDNode() const { return Loc; }
+
+    bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; }
+    bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; }
+
+    void dump() const;
+
+    /// \brief prints source location /path/to/file.exe:line:col @[inlined at]
+    void print(raw_ostream &OS) const;
+  };
+
+} // end namespace llvm
+
+#endif /* LLVM_SUPPORT_DEBUGLOC_H */
diff --git a/linux-x64/clang/include/llvm/IR/DerivedTypes.h b/linux-x64/clang/include/llvm/IR/DerivedTypes.h
new file mode 100644
index 0000000..6e5e085
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DerivedTypes.h
@@ -0,0 +1,509 @@
+//===- llvm/DerivedTypes.h - Classes for handling data types ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of classes that represent "derived
+// types".  These are things like "arrays of x" or "structure of x, y, z" or
+// "function returning x taking (y,z) as parameters", etc...
+//
+// The implementations of these classes live in the Type.cpp file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DERIVEDTYPES_H
+#define LLVM_IR_DERIVEDTYPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstdint>
+
+namespace llvm {
+
+class Value;
+class APInt;
+class LLVMContext;
+
+/// Class to represent integer types. Note that this class is also used to
+/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and
+/// Int64Ty.
+/// @brief Integer representation type
+class IntegerType : public Type {
+  friend class LLVMContextImpl;
+
+protected:
+  explicit IntegerType(LLVMContext &C, unsigned NumBits) : Type(C, IntegerTyID){
+    setSubclassData(NumBits);
+  }
+
+public:
+  /// This enum is just used to hold constants we need for IntegerType.
+  enum {
+    MIN_INT_BITS = 1,        ///< Minimum number of bits that can be specified
+    MAX_INT_BITS = (1<<24)-1 ///< Maximum number of bits that can be specified
+      ///< Note that bit width is stored in the Type classes SubclassData field
+      ///< which has 24 bits. This yields a maximum bit width of 16,777,215
+      ///< bits.
+  };
+
+  /// This static method is the primary way of constructing an IntegerType.
+  /// If an IntegerType with the same NumBits value was previously instantiated,
+  /// that instance will be returned. Otherwise a new one will be created. Only
+  /// one instance with a given NumBits value is ever created.
+  /// @brief Get or create an IntegerType instance.
+  static IntegerType *get(LLVMContext &C, unsigned NumBits);
+
+  /// @brief Get the number of bits in this IntegerType
+  unsigned getBitWidth() const { return getSubclassData(); }
+
+  /// Return a bitmask with ones set for all of the bits that can be set by an
+  /// unsigned version of this type. This is 0xFF for i8, 0xFFFF for i16, etc.
+  uint64_t getBitMask() const {
+    return ~uint64_t(0UL) >> (64-getBitWidth());
+  }
+
+  /// Return a uint64_t with just the most significant bit set (the sign bit, if
+  /// the value is treated as a signed number).
+  uint64_t getSignBit() const {
+    return 1ULL << (getBitWidth()-1);
+  }
+
+  /// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
+  /// @returns a bit mask with ones set for all the bits of this type.
+  /// @brief Get a bit mask for this type.
+  APInt getMask() const;
+
+  /// This method determines if the width of this IntegerType is a power-of-2
+  /// in terms of 8 bit bytes.
+  /// @returns true if this is a power-of-2 byte width.
+  /// @brief Is this a power-of-2 byte-width IntegerType ?
+  bool isPowerOf2ByteWidth() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == IntegerTyID;
+  }
+};
+
+unsigned Type::getIntegerBitWidth() const {
+  return cast<IntegerType>(this)->getBitWidth();
+}
+
+/// Class to represent function types
+///
+class FunctionType : public Type {
+  FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
+
+public:
+  FunctionType(const FunctionType &) = delete;
+  FunctionType &operator=(const FunctionType &) = delete;
+
+  /// This static method is the primary way of constructing a FunctionType.
+  static FunctionType *get(Type *Result,
+                           ArrayRef<Type*> Params, bool isVarArg);
+
+  /// Create a FunctionType taking no parameters.
+  static FunctionType *get(Type *Result, bool isVarArg);
+
+  /// Return true if the specified type is valid as a return type.
+  static bool isValidReturnType(Type *RetTy);
+
+  /// Return true if the specified type is valid as an argument type.
+  static bool isValidArgumentType(Type *ArgTy);
+
+  bool isVarArg() const { return getSubclassData()!=0; }
+  Type *getReturnType() const { return ContainedTys[0]; }
+
+  using param_iterator = Type::subtype_iterator;
+
+  param_iterator param_begin() const { return ContainedTys + 1; }
+  param_iterator param_end() const { return &ContainedTys[NumContainedTys]; }
+  ArrayRef<Type *> params() const {
+    return makeArrayRef(param_begin(), param_end());
+  }
+
+  /// Parameter type accessors.
+  Type *getParamType(unsigned i) const { return ContainedTys[i+1]; }
+
+  /// Return the number of fixed parameters this function type requires.
+  /// This does not consider varargs.
+  unsigned getNumParams() const { return NumContainedTys - 1; }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == FunctionTyID;
+  }
+};
+static_assert(alignof(FunctionType) >= alignof(Type *),
+              "Alignment sufficient for objects appended to FunctionType");
+
+bool Type::isFunctionVarArg() const {
+  return cast<FunctionType>(this)->isVarArg();
+}
+
+Type *Type::getFunctionParamType(unsigned i) const {
+  return cast<FunctionType>(this)->getParamType(i);
+}
+
+unsigned Type::getFunctionNumParams() const {
+  return cast<FunctionType>(this)->getNumParams();
+}
+
+/// Common super class of ArrayType, StructType and VectorType.
+class CompositeType : public Type {
+protected:
+  explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) {}
+
+public:
+  /// Given an index value into the type, return the type of the element.
+  Type *getTypeAtIndex(const Value *V) const;
+  Type *getTypeAtIndex(unsigned Idx) const;
+  bool indexValid(const Value *V) const;
+  bool indexValid(unsigned Idx) const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == ArrayTyID ||
+           T->getTypeID() == StructTyID ||
+           T->getTypeID() == VectorTyID;
+  }
+};
+
+/// Class to represent struct types. There are two different kinds of struct
+/// types: Literal structs and Identified structs.
+///
+/// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must
+/// always have a body when created.  You can get one of these by using one of
+/// the StructType::get() forms.
+///
+/// Identified structs (e.g. %foo or %42) may optionally have a name and are not
+/// uniqued.  The names for identified structs are managed at the LLVMContext
+/// level, so there can only be a single identified struct with a given name in
+/// a particular LLVMContext.  Identified structs may also optionally be opaque
+/// (have no body specified).  You get one of these by using one of the
+/// StructType::create() forms.
+///
+/// Independent of what kind of struct you have, the body of a struct type are
+/// laid out in memory consequtively with the elements directly one after the
+/// other (if the struct is packed) or (if not packed) with padding between the
+/// elements as defined by DataLayout (which is required to match what the code
+/// generator for a target expects).
+///
+class StructType : public CompositeType {
+  StructType(LLVMContext &C) : CompositeType(C, StructTyID) {}
+
+  enum {
+    /// This is the contents of the SubClassData field.
+    SCDB_HasBody = 1,
+    SCDB_Packed = 2,
+    SCDB_IsLiteral = 4,
+    SCDB_IsSized = 8
+  };
+
+  /// For a named struct that actually has a name, this is a pointer to the
+  /// symbol table entry (maintained by LLVMContext) for the struct.
+  /// This is null if the type is an literal struct or if it is a identified
+  /// type that has an empty name.
+  void *SymbolTableEntry = nullptr;
+
+public:
+  StructType(const StructType &) = delete;
+  StructType &operator=(const StructType &) = delete;
+
+  /// This creates an identified struct.
+  static StructType *create(LLVMContext &Context, StringRef Name);
+  static StructType *create(LLVMContext &Context);
+
+  static StructType *create(ArrayRef<Type *> Elements, StringRef Name,
+                            bool isPacked = false);
+  static StructType *create(ArrayRef<Type *> Elements);
+  static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements,
+                            StringRef Name, bool isPacked = false);
+  static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements);
+  template <class... Tys>
+  static typename std::enable_if<are_base_of<Type, Tys...>::value,
+                                 StructType *>::type
+  create(StringRef Name, Type *elt1, Tys *... elts) {
+    assert(elt1 && "Cannot create a struct type with no elements with this");
+    SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
+    return create(StructFields, Name);
+  }
+
+  /// This static method is the primary way to create a literal StructType.
+  static StructType *get(LLVMContext &Context, ArrayRef<Type*> Elements,
+                         bool isPacked = false);
+
+  /// Create an empty structure type.
+  static StructType *get(LLVMContext &Context, bool isPacked = false);
+
+  /// This static method is a convenience method for creating structure types by
+  /// specifying the elements as arguments. Note that this method always returns
+  /// a non-packed struct, and requires at least one element type.
+  template <class... Tys>
+  static typename std::enable_if<are_base_of<Type, Tys...>::value,
+                                 StructType *>::type
+  get(Type *elt1, Tys *... elts) {
+    assert(elt1 && "Cannot create a struct type with no elements with this");
+    LLVMContext &Ctx = elt1->getContext();
+    SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
+    return llvm::StructType::get(Ctx, StructFields);
+  }
+
+  bool isPacked() const { return (getSubclassData() & SCDB_Packed) != 0; }
+
+  /// Return true if this type is uniqued by structural equivalence, false if it
+  /// is a struct definition.
+  bool isLiteral() const { return (getSubclassData() & SCDB_IsLiteral) != 0; }
+
+  /// Return true if this is a type with an identity that has no body specified
+  /// yet. These prints as 'opaque' in .ll files.
+  bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
+
+  /// isSized - Return true if this is a sized type.
+  bool isSized(SmallPtrSetImpl<Type *> *Visited = nullptr) const;
+
+  /// Return true if this is a named struct that has a non-empty name.
+  bool hasName() const { return SymbolTableEntry != nullptr; }
+
+  /// Return the name for this struct type if it has an identity.
+  /// This may return an empty string for an unnamed struct type.  Do not call
+  /// this on an literal type.
+  StringRef getName() const;
+
+  /// Change the name of this type to the specified name, or to a name with a
+  /// suffix if there is a collision. Do not call this on an literal type.
+  void setName(StringRef Name);
+
+  /// Specify a body for an opaque identified type.
+  void setBody(ArrayRef<Type*> Elements, bool isPacked = false);
+
+  template <typename... Tys>
+  typename std::enable_if<are_base_of<Type, Tys...>::value, void>::type
+  setBody(Type *elt1, Tys *... elts) {
+    assert(elt1 && "Cannot create a struct type with no elements with this");
+    SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
+    setBody(StructFields);
+  }
+
+  /// Return true if the specified type is valid as a element type.
+  static bool isValidElementType(Type *ElemTy);
+
+  // Iterator access to the elements.
+  using element_iterator = Type::subtype_iterator;
+
+  element_iterator element_begin() const { return ContainedTys; }
+  element_iterator element_end() const { return &ContainedTys[NumContainedTys];}
+  ArrayRef<Type *> const elements() const {
+    return makeArrayRef(element_begin(), element_end());
+  }
+
+  /// Return true if this is layout identical to the specified struct.
+  bool isLayoutIdentical(StructType *Other) const;
+
+  /// Random access to the elements
+  unsigned getNumElements() const { return NumContainedTys; }
+  Type *getElementType(unsigned N) const {
+    assert(N < NumContainedTys && "Element number out of range!");
+    return ContainedTys[N];
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == StructTyID;
+  }
+};
+
+StringRef Type::getStructName() const {
+  return cast<StructType>(this)->getName();
+}
+
+unsigned Type::getStructNumElements() const {
+  return cast<StructType>(this)->getNumElements();
+}
+
+Type *Type::getStructElementType(unsigned N) const {
+  return cast<StructType>(this)->getElementType(N);
+}
+
+/// This is the superclass of the array and vector type classes. Both of these
+/// represent "arrays" in memory. The array type represents a specifically sized
+/// array, and the vector type represents a specifically sized array that allows
+/// for use of SIMD instructions. SequentialType holds the common features of
+/// both, which stem from the fact that both lay their components out in memory
+/// identically.
+class SequentialType : public CompositeType {
+  Type *ContainedType;               ///< Storage for the single contained type.
+  uint64_t NumElements;
+
+protected:
+  SequentialType(TypeID TID, Type *ElType, uint64_t NumElements)
+    : CompositeType(ElType->getContext(), TID), ContainedType(ElType),
+      NumElements(NumElements) {
+    ContainedTys = &ContainedType;
+    NumContainedTys = 1;
+  }
+
+public:
+  SequentialType(const SequentialType &) = delete;
+  SequentialType &operator=(const SequentialType &) = delete;
+
+  uint64_t getNumElements() const { return NumElements; }
+  Type *getElementType() const { return ContainedType; }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == ArrayTyID || T->getTypeID() == VectorTyID;
+  }
+};
+
+/// Class to represent array types.
+class ArrayType : public SequentialType {
+  ArrayType(Type *ElType, uint64_t NumEl);
+
+public:
+  ArrayType(const ArrayType &) = delete;
+  ArrayType &operator=(const ArrayType &) = delete;
+
+  /// This static method is the primary way to construct an ArrayType
+  static ArrayType *get(Type *ElementType, uint64_t NumElements);
+
+  /// Return true if the specified type is valid as a element type.
+  static bool isValidElementType(Type *ElemTy);
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == ArrayTyID;
+  }
+};
+
+uint64_t Type::getArrayNumElements() const {
+  return cast<ArrayType>(this)->getNumElements();
+}
+
+/// Class to represent vector types.
+class VectorType : public SequentialType {
+  VectorType(Type *ElType, unsigned NumEl);
+
+public:
+  VectorType(const VectorType &) = delete;
+  VectorType &operator=(const VectorType &) = delete;
+
+  /// This static method is the primary way to construct an VectorType.
+  static VectorType *get(Type *ElementType, unsigned NumElements);
+
+  /// This static method gets a VectorType with the same number of elements as
+  /// the input type, and the element type is an integer type of the same width
+  /// as the input element type.
+  static VectorType *getInteger(VectorType *VTy) {
+    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+    assert(EltBits && "Element size must be of a non-zero size");
+    Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
+    return VectorType::get(EltTy, VTy->getNumElements());
+  }
+
+  /// This static method is like getInteger except that the element types are
+  /// twice as wide as the elements in the input type.
+  static VectorType *getExtendedElementVectorType(VectorType *VTy) {
+    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+    Type *EltTy = IntegerType::get(VTy->getContext(), EltBits * 2);
+    return VectorType::get(EltTy, VTy->getNumElements());
+  }
+
+  /// This static method is like getInteger except that the element types are
+  /// half as wide as the elements in the input type.
+  static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
+    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+    assert((EltBits & 1) == 0 &&
+           "Cannot truncate vector element with odd bit-width");
+    Type *EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
+    return VectorType::get(EltTy, VTy->getNumElements());
+  }
+
+  /// This static method returns a VectorType with half as many elements as the
+  /// input type and the same element type.
+  static VectorType *getHalfElementsVectorType(VectorType *VTy) {
+    unsigned NumElts = VTy->getNumElements();
+    assert ((NumElts & 1) == 0 &&
+            "Cannot halve vector with odd number of elements.");
+    return VectorType::get(VTy->getElementType(), NumElts/2);
+  }
+
+  /// This static method returns a VectorType with twice as many elements as the
+  /// input type and the same element type.
+  static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
+    unsigned NumElts = VTy->getNumElements();
+    return VectorType::get(VTy->getElementType(), NumElts*2);
+  }
+
+  /// Return true if the specified type is valid as a element type.
+  static bool isValidElementType(Type *ElemTy);
+
+  /// Return the number of bits in the Vector type.
+  /// Returns zero when the vector is a vector of pointers.
+  unsigned getBitWidth() const {
+    return getNumElements() * getElementType()->getPrimitiveSizeInBits();
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == VectorTyID;
+  }
+};
+
+unsigned Type::getVectorNumElements() const {
+  return cast<VectorType>(this)->getNumElements();
+}
+
+/// Class to represent pointers.
+class PointerType : public Type {
+  explicit PointerType(Type *ElType, unsigned AddrSpace);
+
+  Type *PointeeTy;
+
+public:
+  PointerType(const PointerType &) = delete;
+  PointerType &operator=(const PointerType &) = delete;
+
+  /// This constructs a pointer to an object of the specified type in a numbered
+  /// address space.
+  static PointerType *get(Type *ElementType, unsigned AddressSpace);
+
+  /// This constructs a pointer to an object of the specified type in the
+  /// generic address space (address space zero).
+  static PointerType *getUnqual(Type *ElementType) {
+    return PointerType::get(ElementType, 0);
+  }
+
+  Type *getElementType() const { return PointeeTy; }
+
+  /// Return true if the specified type is valid as a element type.
+  static bool isValidElementType(Type *ElemTy);
+
+  /// Return true if we can load or store from a pointer to this type.
+  static bool isLoadableOrStorableType(Type *ElemTy);
+
+  /// Return the address space of the Pointer type.
+  inline unsigned getAddressSpace() const { return getSubclassData(); }
+
+  /// Implement support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Type *T) {
+    return T->getTypeID() == PointerTyID;
+  }
+};
+
+unsigned Type::getPointerAddressSpace() const {
+  return cast<PointerType>(getScalarType())->getAddressSpace();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DERIVEDTYPES_H
diff --git a/linux-x64/clang/include/llvm/IR/DerivedUser.h b/linux-x64/clang/include/llvm/IR/DerivedUser.h
new file mode 100644
index 0000000..67c483d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DerivedUser.h
@@ -0,0 +1,45 @@
+//===- DerivedUser.h - Base for non-IR Users --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DERIVEDUSER_H
+#define LLVM_IR_DERIVEDUSER_H
+
+#include "llvm/IR/User.h"
+
+namespace llvm {
+
+class Type;
+class Use;
+
+/// Extension point for the Value hierarchy. All classes outside of lib/IR
+/// that wish to inherit from User should instead inherit from DerivedUser
+/// instead. Inheriting from this class is discouraged.
+///
+/// Generally speaking, Value is the base of a closed class hierarchy
+/// that can't be extended by code outside of lib/IR. This class creates a
+/// loophole that allows classes outside of lib/IR to extend User to leverage
+/// its use/def list machinery.
+class DerivedUser : public User {
+protected:
+  using  DeleteValueTy = void (*)(DerivedUser *);
+
+private:
+  friend class Value;
+
+  DeleteValueTy DeleteValue;
+
+public:
+  DerivedUser(Type *Ty, unsigned VK, Use *U, unsigned NumOps,
+              DeleteValueTy DeleteValue)
+      : User(Ty, VK, U, NumOps), DeleteValue(DeleteValue) {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DERIVEDUSER_H
diff --git a/linux-x64/clang/include/llvm/IR/DiagnosticHandler.h b/linux-x64/clang/include/llvm/IR/DiagnosticHandler.h
new file mode 100644
index 0000000..9256d48
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DiagnosticHandler.h
@@ -0,0 +1,75 @@
+//===- DiagnosticHandler.h - DiagnosticHandler class for LLVM -*- C++ ---*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Base DiagnosticHandler class declaration. Derive from this class to provide
+// custom diagnostic reporting.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIAGNOSTICHANDLER_H
+#define LLVM_IR_DIAGNOSTICHANDLER_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class DiagnosticInfo;
+
+/// \brief This is the base class for diagnostic handling in LLVM.
+/// The handleDiagnostics method must be overriden by the subclasses to handle
+/// diagnostic. The *RemarkEnabled methods can be overriden to control
+/// which remarks are enabled.
+struct DiagnosticHandler {
+  void *DiagnosticContext = nullptr;
+  DiagnosticHandler(void *DiagContext = nullptr)
+      : DiagnosticContext(DiagContext) {}
+  virtual ~DiagnosticHandler() = default;
+
+  using DiagnosticHandlerTy = void (*)(const DiagnosticInfo &DI, void *Context);
+
+  /// DiagHandlerCallback is settable from the C API and base implementation
+  /// of DiagnosticHandler will call it from handleDiagnostics(). Any derived
+  /// class of DiagnosticHandler should not use callback but
+  /// implement handleDiagnostics().
+  DiagnosticHandlerTy DiagHandlerCallback = nullptr;
+
+  /// Override handleDiagnostics to provide custom implementation.
+  /// Return true if it handles diagnostics reporting properly otherwise
+  /// return false to make LLVMContext::diagnose() to print the message
+  /// with a prefix based on the severity.
+  virtual bool handleDiagnostics(const DiagnosticInfo &DI) {
+    if (DiagHandlerCallback) {
+      DiagHandlerCallback(DI, DiagnosticContext);
+      return true;
+    }
+    return false;
+  }
+
+  /// Return true if analysis remarks are enabled, override
+  /// to provide different implementation.
+  virtual bool isAnalysisRemarkEnabled(StringRef PassName) const;
+
+  /// Return true if missed optimization remarks are enabled, override
+  /// to provide different implementation.
+  virtual bool isMissedOptRemarkEnabled(StringRef PassName) const;
+
+  /// Return true if passed optimization remarks are enabled, override
+  /// to provide different implementation.
+  virtual bool isPassedOptRemarkEnabled(StringRef PassName) const;
+
+  /// Return true if any type of remarks are enabled for this pass.
+  bool isAnyRemarkEnabled(StringRef PassName) const {
+    return (isMissedOptRemarkEnabled(PassName) ||
+            isPassedOptRemarkEnabled(PassName) ||
+            isAnalysisRemarkEnabled(PassName));
+  }
+
+  /// Return true if any type of remarks are enabled for any pass.
+  virtual bool isAnyRemarkEnabled() const;
+};
+} // namespace llvm
+
+#endif // LLVM_IR_DIAGNOSTICHANDLER_H
diff --git a/linux-x64/clang/include/llvm/IR/DiagnosticInfo.h b/linux-x64/clang/include/llvm/IR/DiagnosticInfo.h
new file mode 100644
index 0000000..bfec2be
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DiagnosticInfo.h
@@ -0,0 +1,999 @@
+//===- llvm/IR/DiagnosticInfo.h - Diagnostic Declaration --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the different classes involved in low level diagnostics.
+//
+// Diagnostics reporting is still done as part of the LLVMContext.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIAGNOSTICINFO_H
+#define LLVM_IR_DIAGNOSTICINFO_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <algorithm>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <string>
+
+namespace llvm {
+
+// Forward declarations.
+class DiagnosticPrinter;
+class Function;
+class Instruction;
+class LLVMContext;
+class Module;
+class SMDiagnostic;
+
+/// \brief Defines the different supported severity of a diagnostic.
+enum DiagnosticSeverity : char {
+  DS_Error,
+  DS_Warning,
+  DS_Remark,
+  // A note attaches additional information to one of the previous diagnostic
+  // types.
+  DS_Note
+};
+
+/// \brief Defines the different supported kind of a diagnostic.
+/// This enum should be extended with a new ID for each added concrete subclass.
+enum DiagnosticKind {
+  DK_InlineAsm,
+  DK_ResourceLimit,
+  DK_StackSize,
+  DK_Linker,
+  DK_DebugMetadataVersion,
+  DK_DebugMetadataInvalid,
+  DK_ISelFallback,
+  DK_SampleProfile,
+  DK_OptimizationRemark,
+  DK_OptimizationRemarkMissed,
+  DK_OptimizationRemarkAnalysis,
+  DK_OptimizationRemarkAnalysisFPCommute,
+  DK_OptimizationRemarkAnalysisAliasing,
+  DK_OptimizationFailure,
+  DK_FirstRemark = DK_OptimizationRemark,
+  DK_LastRemark = DK_OptimizationFailure,
+  DK_MachineOptimizationRemark,
+  DK_MachineOptimizationRemarkMissed,
+  DK_MachineOptimizationRemarkAnalysis,
+  DK_FirstMachineRemark = DK_MachineOptimizationRemark,
+  DK_LastMachineRemark = DK_MachineOptimizationRemarkAnalysis,
+  DK_MIRParser,
+  DK_PGOProfile,
+  DK_Unsupported,
+  DK_FirstPluginKind
+};
+
+/// \brief Get the next available kind ID for a plugin diagnostic.
+/// Each time this function is called, it returns a different number.
+/// Therefore, a plugin that wants to "identify" its own classes
+/// with a dynamic identifier, just have to use this method to get a new ID
+/// and assign it to each of its classes.
+/// The returned ID will be greater than or equal to DK_FirstPluginKind.
+/// Thus, the plugin identifiers will not conflict with the
+/// DiagnosticKind values.
+int getNextAvailablePluginDiagnosticKind();
+
+/// \brief This is the base abstract class for diagnostic reporting in
+/// the backend.
+/// The print method must be overloaded by the subclasses to print a
+/// user-friendly message in the client of the backend (let us call it a
+/// frontend).
+class DiagnosticInfo {
+private:
+  /// Kind defines the kind of report this is about.
+  const /* DiagnosticKind */ int Kind;
+  /// Severity gives the severity of the diagnostic.
+  const DiagnosticSeverity Severity;
+
+public:
+  DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
+      : Kind(Kind), Severity(Severity) {}
+
+  virtual ~DiagnosticInfo() = default;
+
+  /* DiagnosticKind */ int getKind() const { return Kind; }
+  DiagnosticSeverity getSeverity() const { return Severity; }
+
+  /// Print using the given \p DP a user-friendly message.
+  /// This is the default message that will be printed to the user.
+  /// It is used when the frontend does not directly take advantage
+  /// of the information contained in fields of the subclasses.
+  /// The printed message must not end with '.' nor start with a severity
+  /// keyword.
+  virtual void print(DiagnosticPrinter &DP) const = 0;
+};
+
+using DiagnosticHandlerFunction = std::function<void(const DiagnosticInfo &)>;
+
+/// Diagnostic information for inline asm reporting.
+/// This is basically a message and an optional location.
+class DiagnosticInfoInlineAsm : public DiagnosticInfo {
+private:
+  /// Optional line information. 0 if not set.
+  unsigned LocCookie = 0;
+  /// Message to be reported.
+  const Twine &MsgStr;
+  /// Optional origin of the problem.
+  const Instruction *Instr = nullptr;
+
+public:
+  /// \p MsgStr is the message to be reported to the frontend.
+  /// This class does not copy \p MsgStr, therefore the reference must be valid
+  /// for the whole life time of the Diagnostic.
+  DiagnosticInfoInlineAsm(const Twine &MsgStr,
+                          DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfo(DK_InlineAsm, Severity), MsgStr(MsgStr) {}
+
+  /// \p LocCookie if non-zero gives the line number for this report.
+  /// \p MsgStr gives the message.
+  /// This class does not copy \p MsgStr, therefore the reference must be valid
+  /// for the whole life time of the Diagnostic.
+  DiagnosticInfoInlineAsm(unsigned LocCookie, const Twine &MsgStr,
+                          DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(LocCookie),
+        MsgStr(MsgStr) {}
+
+  /// \p Instr gives the original instruction that triggered the diagnostic.
+  /// \p MsgStr gives the message.
+  /// This class does not copy \p MsgStr, therefore the reference must be valid
+  /// for the whole life time of the Diagnostic.
+  /// Same for \p I.
+  DiagnosticInfoInlineAsm(const Instruction &I, const Twine &MsgStr,
+                          DiagnosticSeverity Severity = DS_Error);
+
+  unsigned getLocCookie() const { return LocCookie; }
+  const Twine &getMsgStr() const { return MsgStr; }
+  const Instruction *getInstruction() const { return Instr; }
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_InlineAsm;
+  }
+};
+
+/// Diagnostic information for stack size etc. reporting.
+/// This is basically a function and a size.
+class DiagnosticInfoResourceLimit : public DiagnosticInfo {
+private:
+  /// The function that is concerned by this resource limit diagnostic.
+  const Function &Fn;
+
+  /// Description of the resource type (e.g. stack size)
+  const char *ResourceName;
+
+  /// The computed size usage
+  uint64_t ResourceSize;
+
+  // Threshould passed
+  uint64_t ResourceLimit;
+
+public:
+  /// \p The function that is concerned by this stack size diagnostic.
+  /// \p The computed stack size.
+  DiagnosticInfoResourceLimit(const Function &Fn, const char *ResourceName,
+                              uint64_t ResourceSize,
+                              DiagnosticSeverity Severity = DS_Warning,
+                              DiagnosticKind Kind = DK_ResourceLimit,
+                              uint64_t ResourceLimit = 0)
+      : DiagnosticInfo(Kind, Severity), Fn(Fn), ResourceName(ResourceName),
+        ResourceSize(ResourceSize), ResourceLimit(ResourceLimit) {}
+
+  const Function &getFunction() const { return Fn; }
+  const char *getResourceName() const { return ResourceName; }
+  uint64_t getResourceSize() const { return ResourceSize; }
+  uint64_t getResourceLimit() const { return ResourceLimit; }
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_ResourceLimit || DI->getKind() == DK_StackSize;
+  }
+};
+
+class DiagnosticInfoStackSize : public DiagnosticInfoResourceLimit {
+public:
+  DiagnosticInfoStackSize(const Function &Fn, uint64_t StackSize,
+                          DiagnosticSeverity Severity = DS_Warning,
+                          uint64_t StackLimit = 0)
+      : DiagnosticInfoResourceLimit(Fn, "stack size", StackSize, Severity,
+                                    DK_StackSize, StackLimit) {}
+
+  uint64_t getStackSize() const { return getResourceSize(); }
+  uint64_t getStackLimit() const { return getResourceLimit(); }
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_StackSize;
+  }
+};
+
+/// Diagnostic information for debug metadata version reporting.
+/// This is basically a module and a version.
+class DiagnosticInfoDebugMetadataVersion : public DiagnosticInfo {
+private:
+  /// The module that is concerned by this debug metadata version diagnostic.
+  const Module &M;
+  /// The actual metadata version.
+  unsigned MetadataVersion;
+
+public:
+  /// \p The module that is concerned by this debug metadata version diagnostic.
+  /// \p The actual metadata version.
+  DiagnosticInfoDebugMetadataVersion(const Module &M, unsigned MetadataVersion,
+                                     DiagnosticSeverity Severity = DS_Warning)
+      : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M),
+        MetadataVersion(MetadataVersion) {}
+
+  const Module &getModule() const { return M; }
+  unsigned getMetadataVersion() const { return MetadataVersion; }
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_DebugMetadataVersion;
+  }
+};
+
+/// Diagnostic information for stripping invalid debug metadata.
+class DiagnosticInfoIgnoringInvalidDebugMetadata : public DiagnosticInfo {
+private:
+  /// The module that is concerned by this debug metadata version diagnostic.
+  const Module &M;
+
+public:
+  /// \p The module that is concerned by this debug metadata version diagnostic.
+  DiagnosticInfoIgnoringInvalidDebugMetadata(
+      const Module &M, DiagnosticSeverity Severity = DS_Warning)
+      : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M) {}
+
+  const Module &getModule() const { return M; }
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_DebugMetadataInvalid;
+  }
+};
+
+/// Diagnostic information for the sample profiler.
+class DiagnosticInfoSampleProfile : public DiagnosticInfo {
+public:
+  DiagnosticInfoSampleProfile(StringRef FileName, unsigned LineNum,
+                              const Twine &Msg,
+                              DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
+        LineNum(LineNum), Msg(Msg) {}
+  DiagnosticInfoSampleProfile(StringRef FileName, const Twine &Msg,
+                              DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
+        Msg(Msg) {}
+  DiagnosticInfoSampleProfile(const Twine &Msg,
+                              DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfo(DK_SampleProfile, Severity), Msg(Msg) {}
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_SampleProfile;
+  }
+
+  StringRef getFileName() const { return FileName; }
+  unsigned getLineNum() const { return LineNum; }
+  const Twine &getMsg() const { return Msg; }
+
+private:
+  /// Name of the input file associated with this diagnostic.
+  StringRef FileName;
+
+  /// Line number where the diagnostic occurred. If 0, no line number will
+  /// be emitted in the message.
+  unsigned LineNum = 0;
+
+  /// Message to report.
+  const Twine &Msg;
+};
+
+/// Diagnostic information for the PGO profiler.
+class DiagnosticInfoPGOProfile : public DiagnosticInfo {
+public:
+  DiagnosticInfoPGOProfile(const char *FileName, const Twine &Msg,
+                           DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfo(DK_PGOProfile, Severity), FileName(FileName), Msg(Msg) {}
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_PGOProfile;
+  }
+
+  const char *getFileName() const { return FileName; }
+  const Twine &getMsg() const { return Msg; }
+
+private:
+  /// Name of the input file associated with this diagnostic.
+  const char *FileName;
+
+  /// Message to report.
+  const Twine &Msg;
+};
+
+class DiagnosticLocation {
+  StringRef Filename;
+  unsigned Line = 0;
+  unsigned Column = 0;
+
+public:
+  DiagnosticLocation() = default;
+  DiagnosticLocation(const DebugLoc &DL);
+  DiagnosticLocation(const DISubprogram *SP);
+
+  bool isValid() const { return !Filename.empty(); }
+  StringRef getFilename() const { return Filename; }
+  unsigned getLine() const { return Line; }
+  unsigned getColumn() const { return Column; }
+};
+
+/// Common features for diagnostics with an associated location.
+class DiagnosticInfoWithLocationBase : public DiagnosticInfo {
+public:
+  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
+  /// the location information to use in the diagnostic.
+  DiagnosticInfoWithLocationBase(enum DiagnosticKind Kind,
+                                 enum DiagnosticSeverity Severity,
+                                 const Function &Fn,
+                                 const DiagnosticLocation &Loc)
+      : DiagnosticInfo(Kind, Severity), Fn(Fn), Loc(Loc) {}
+
+  /// Return true if location information is available for this diagnostic.
+  bool isLocationAvailable() const { return Loc.isValid(); }
+
+  /// Return a string with the location information for this diagnostic
+  /// in the format "file:line:col". If location information is not available,
+  /// it returns "<unknown>:0:0".
+  const std::string getLocationStr() const;
+
+  /// Return location information for this diagnostic in three parts:
+  /// the source file name, line number and column.
+  void getLocation(StringRef *Filename, unsigned *Line, unsigned *Column) const;
+
+  const Function &getFunction() const { return Fn; }
+  DiagnosticLocation getLocation() const { return Loc; }
+
+private:
+  /// Function where this diagnostic is triggered.
+  const Function &Fn;
+
+  /// Debug location where this diagnostic is triggered.
+  DiagnosticLocation Loc;
+};
+
+/// \brief Common features for diagnostics dealing with optimization remarks
+/// that are used by both IR and MIR passes.
+class DiagnosticInfoOptimizationBase : public DiagnosticInfoWithLocationBase {
+public:
+  /// \brief Used to set IsVerbose via the stream interface.
+  struct setIsVerbose {};
+
+  /// \brief When an instance of this is inserted into the stream, the arguments
+  /// following will not appear in the remark printed in the compiler output
+  /// (-Rpass) but only in the optimization record file
+  /// (-fsave-optimization-record).
+  struct setExtraArgs {};
+
+  /// \brief Used in the streaming interface as the general argument type.  It
+  /// internally converts everything into a key-value pair.
+  struct Argument {
+    std::string Key;
+    std::string Val;
+    // If set, the debug location corresponding to the value.
+    DiagnosticLocation Loc;
+
+    explicit Argument(StringRef Str = "") : Key("String"), Val(Str) {}
+    Argument(StringRef Key, const Value *V);
+    Argument(StringRef Key, const Type *T);
+    Argument(StringRef Key, StringRef S);
+    Argument(StringRef Key, int N);
+    Argument(StringRef Key, float N);
+    Argument(StringRef Key, long N);
+    Argument(StringRef Key, long long N);
+    Argument(StringRef Key, unsigned N);
+    Argument(StringRef Key, unsigned long N);
+    Argument(StringRef Key, unsigned long long N);
+    Argument(StringRef Key, bool B) : Key(Key), Val(B ? "true" : "false") {}
+    Argument(StringRef Key, DebugLoc dl);
+  };
+
+  /// \p PassName is the name of the pass emitting this diagnostic. \p
+  /// RemarkName is a textual identifier for the remark (single-word,
+  /// camel-case). \p Fn is the function where the diagnostic is being emitted.
+  /// \p Loc is the location information to use in the diagnostic. If line table
+  /// information is available, the diagnostic will include the source code
+  /// location.
+  DiagnosticInfoOptimizationBase(enum DiagnosticKind Kind,
+                                 enum DiagnosticSeverity Severity,
+                                 const char *PassName, StringRef RemarkName,
+                                 const Function &Fn,
+                                 const DiagnosticLocation &Loc)
+      : DiagnosticInfoWithLocationBase(Kind, Severity, Fn, Loc),
+        PassName(PassName), RemarkName(RemarkName) {}
+
+  void insert(StringRef S);
+  void insert(Argument A);
+  void insert(setIsVerbose V);
+  void insert(setExtraArgs EA);
+
+  /// \see DiagnosticInfo::print.
+  void print(DiagnosticPrinter &DP) const override;
+
+  /// Return true if this optimization remark is enabled by one of
+  /// of the LLVM command line flags (-pass-remarks, -pass-remarks-missed,
+  /// or -pass-remarks-analysis). Note that this only handles the LLVM
+  /// flags. We cannot access Clang flags from here (they are handled
+  /// in BackendConsumer::OptimizationRemarkHandler).
+  virtual bool isEnabled() const = 0;
+
+  StringRef getPassName() const { return PassName; }
+  std::string getMsg() const;
+  Optional<uint64_t> getHotness() const { return Hotness; }
+  void setHotness(Optional<uint64_t> H) { Hotness = H; }
+
+  bool isVerbose() const { return IsVerbose; }
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return (DI->getKind() >= DK_FirstRemark &&
+            DI->getKind() <= DK_LastRemark) ||
+           (DI->getKind() >= DK_FirstMachineRemark &&
+            DI->getKind() <= DK_LastMachineRemark);
+  }
+
+  bool isPassed() const {
+    return (getKind() == DK_OptimizationRemark ||
+            getKind() == DK_MachineOptimizationRemark);
+  }
+
+  bool isMissed() const {
+    return (getKind() == DK_OptimizationRemarkMissed ||
+            getKind() == DK_MachineOptimizationRemarkMissed);
+  }
+
+  bool isAnalysis() const {
+    return (getKind() == DK_OptimizationRemarkAnalysis ||
+            getKind() == DK_MachineOptimizationRemarkAnalysis);
+  }
+
+protected:
+  /// Name of the pass that triggers this report. If this matches the
+  /// regular expression given in -Rpass=regexp, then the remark will
+  /// be emitted.
+  const char *PassName;
+
+  /// Textual identifier for the remark (single-word, camel-case). Can be used
+  /// by external tools reading the YAML output file for optimization remarks to
+  /// identify the remark.
+  StringRef RemarkName;
+
+  /// If profile information is available, this is the number of times the
+  /// corresponding code was executed in a profile instrumentation run.
+  Optional<uint64_t> Hotness;
+
+  /// Arguments collected via the streaming interface.
+  SmallVector<Argument, 4> Args;
+
+  /// The remark is expected to be noisy.
+  bool IsVerbose = false;
+
+  /// \brief If positive, the index of the first argument that only appear in
+  /// the optimization records and not in the remark printed in the compiler
+  /// output.
+  int FirstExtraArgIndex = -1;
+
+  friend struct yaml::MappingTraits<DiagnosticInfoOptimizationBase *>;
+};
+
+/// Allow the insertion operator to return the actual remark type rather than a
+/// common base class.  This allows returning the result of the insertion
+/// directly by value, e.g. return OptimizationRemarkAnalysis(...) << "blah".
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               StringRef>::type S) {
+  R.insert(S);
+  return R;
+}
+
+/// Also allow r-value for the remark to allow insertion into a
+/// temporarily-constructed remark.
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &&R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               StringRef>::type S) {
+  R.insert(S);
+  return R;
+}
+
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               DiagnosticInfoOptimizationBase::Argument>::type A) {
+  R.insert(A);
+  return R;
+}
+
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &&R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               DiagnosticInfoOptimizationBase::Argument>::type A) {
+  R.insert(A);
+  return R;
+}
+
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               DiagnosticInfoOptimizationBase::setIsVerbose>::type V) {
+  R.insert(V);
+  return R;
+}
+
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &&R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               DiagnosticInfoOptimizationBase::setIsVerbose>::type V) {
+  R.insert(V);
+  return R;
+}
+
+template <class RemarkT>
+RemarkT &
+operator<<(RemarkT &R,
+           typename std::enable_if<
+               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
+               DiagnosticInfoOptimizationBase::setExtraArgs>::type EA) {
+  R.insert(EA);
+  return R;
+}
+
+/// \brief Common features for diagnostics dealing with optimization remarks
+/// that are used by IR passes.
+class DiagnosticInfoIROptimization : public DiagnosticInfoOptimizationBase {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. \p
+  /// RemarkName is a textual identifier for the remark (single-word,
+  /// camel-case). \p Fn is the function where the diagnostic is being emitted.
+  /// \p Loc is the location information to use in the diagnostic. If line table
+  /// information is available, the diagnostic will include the source code
+  /// location. \p CodeRegion is IR value (currently basic block) that the
+  /// optimization operates on. This is currently used to provide run-time
+  /// hotness information with PGO.
+  DiagnosticInfoIROptimization(enum DiagnosticKind Kind,
+                               enum DiagnosticSeverity Severity,
+                               const char *PassName, StringRef RemarkName,
+                               const Function &Fn,
+                               const DiagnosticLocation &Loc,
+                               const Value *CodeRegion = nullptr)
+      : DiagnosticInfoOptimizationBase(Kind, Severity, PassName, RemarkName, Fn,
+                                       Loc),
+        CodeRegion(CodeRegion) {}
+
+  /// \brief This is ctor variant allows a pass to build an optimization remark
+  /// from an existing remark.
+  ///
+  /// This is useful when a transformation pass (e.g LV) wants to emit a remark
+  /// (\p Orig) generated by one of its analyses (e.g. LAA) as its own analysis
+  /// remark.  The string \p Prepend will be emitted before the original
+  /// message.
+  DiagnosticInfoIROptimization(const char *PassName, StringRef Prepend,
+                               const DiagnosticInfoIROptimization &Orig)
+      : DiagnosticInfoOptimizationBase(
+            (DiagnosticKind)Orig.getKind(), Orig.getSeverity(), PassName,
+            Orig.RemarkName, Orig.getFunction(), Orig.getLocation()),
+        CodeRegion(Orig.getCodeRegion()) {
+    *this << Prepend;
+    std::copy(Orig.Args.begin(), Orig.Args.end(), std::back_inserter(Args));
+  }
+
+  /// Legacy interface.
+  /// \p PassName is the name of the pass emitting this diagnostic.
+  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
+  /// the location information to use in the diagnostic. If line table
+  /// information is available, the diagnostic will include the source code
+  /// location. \p Msg is the message to show. Note that this class does not
+  /// copy this message, so this reference must be valid for the whole life time
+  /// of the diagnostic.
+  DiagnosticInfoIROptimization(enum DiagnosticKind Kind,
+                               enum DiagnosticSeverity Severity,
+                               const char *PassName, const Function &Fn,
+                               const DiagnosticLocation &Loc, const Twine &Msg)
+      : DiagnosticInfoOptimizationBase(Kind, Severity, PassName, "", Fn, Loc) {
+    *this << Msg.str();
+  }
+
+  const Value *getCodeRegion() const { return CodeRegion; }
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() >= DK_FirstRemark && DI->getKind() <= DK_LastRemark;
+  }
+
+private:
+  /// The IR value (currently basic block) that the optimization operates on.
+  /// This is currently used to provide run-time hotness information with PGO.
+  const Value *CodeRegion;
+};
+
+/// Diagnostic information for applied optimization remarks.
+class OptimizationRemark : public DiagnosticInfoIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass=, then the diagnostic will
+  /// be emitted. \p RemarkName is a textual identifier for the remark (single-
+  /// word, camel-case). \p Loc is the debug location and \p CodeRegion is the
+  /// region that the optimization operates on (currently only block is
+  /// supported).
+  OptimizationRemark(const char *PassName, StringRef RemarkName,
+                     const DiagnosticLocation &Loc, const Value *CodeRegion);
+
+  /// Same as above, but the debug location and code region are derived from \p
+  /// Instr.
+  OptimizationRemark(const char *PassName, StringRef RemarkName,
+                     const Instruction *Inst);
+
+  /// Same as above, but the debug location and code region are derived from \p
+  /// Func.
+  OptimizationRemark(const char *PassName, StringRef RemarkName,
+                     const Function *Func);
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_OptimizationRemark;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override;
+
+private:
+  /// This is deprecated now and only used by the function API below.
+  /// \p PassName is the name of the pass emitting this diagnostic. If
+  /// this name matches the regular expression given in -Rpass=, then the
+  /// diagnostic will be emitted. \p Fn is the function where the diagnostic
+  /// is being emitted. \p Loc is the location information to use in the
+  /// diagnostic. If line table information is available, the diagnostic
+  /// will include the source code location. \p Msg is the message to show.
+  /// Note that this class does not copy this message, so this reference
+  /// must be valid for the whole life time of the diagnostic.
+  OptimizationRemark(const char *PassName, const Function &Fn,
+                     const DiagnosticLocation &Loc, const Twine &Msg)
+      : DiagnosticInfoIROptimization(DK_OptimizationRemark, DS_Remark, PassName,
+                                     Fn, Loc, Msg) {}
+};
+
+/// Diagnostic information for missed-optimization remarks.
+class OptimizationRemarkMissed : public DiagnosticInfoIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-missed=, then the
+  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+  /// remark (single-word, camel-case). \p Loc is the debug location and \p
+  /// CodeRegion is the region that the optimization operates on (currently only
+  /// block is supported).
+  OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
+                           const DiagnosticLocation &Loc,
+                           const Value *CodeRegion);
+
+  /// \brief Same as above but \p Inst is used to derive code region and debug
+  /// location.
+  OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
+                           const Instruction *Inst);
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_OptimizationRemarkMissed;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override;
+
+private:
+  /// This is deprecated now and only used by the function API below.
+  /// \p PassName is the name of the pass emitting this diagnostic. If
+  /// this name matches the regular expression given in -Rpass-missed=, then the
+  /// diagnostic will be emitted. \p Fn is the function where the diagnostic
+  /// is being emitted. \p Loc is the location information to use in the
+  /// diagnostic. If line table information is available, the diagnostic
+  /// will include the source code location. \p Msg is the message to show.
+  /// Note that this class does not copy this message, so this reference
+  /// must be valid for the whole life time of the diagnostic.
+  OptimizationRemarkMissed(const char *PassName, const Function &Fn,
+                           const DiagnosticLocation &Loc, const Twine &Msg)
+      : DiagnosticInfoIROptimization(DK_OptimizationRemarkMissed, DS_Remark,
+                                     PassName, Fn, Loc, Msg) {}
+};
+
+/// Diagnostic information for optimization analysis remarks.
+class OptimizationRemarkAnalysis : public DiagnosticInfoIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-analysis=, then the
+  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+  /// remark (single-word, camel-case). \p Loc is the debug location and \p
+  /// CodeRegion is the region that the optimization operates on (currently only
+  /// block is supported).
+  OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
+                             const DiagnosticLocation &Loc,
+                             const Value *CodeRegion);
+
+  /// \brief This is ctor variant allows a pass to build an optimization remark
+  /// from an existing remark.
+  ///
+  /// This is useful when a transformation pass (e.g LV) wants to emit a remark
+  /// (\p Orig) generated by one of its analyses (e.g. LAA) as its own analysis
+  /// remark.  The string \p Prepend will be emitted before the original
+  /// message.
+  OptimizationRemarkAnalysis(const char *PassName, StringRef Prepend,
+                             const OptimizationRemarkAnalysis &Orig)
+      : DiagnosticInfoIROptimization(PassName, Prepend, Orig) {}
+
+  /// \brief Same as above but \p Inst is used to derive code region and debug
+  /// location.
+  OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
+                             const Instruction *Inst);
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_OptimizationRemarkAnalysis;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override;
+
+  static const char *AlwaysPrint;
+
+  bool shouldAlwaysPrint() const { return getPassName() == AlwaysPrint; }
+
+protected:
+  OptimizationRemarkAnalysis(enum DiagnosticKind Kind, const char *PassName,
+                             const Function &Fn, const DiagnosticLocation &Loc,
+                             const Twine &Msg)
+      : DiagnosticInfoIROptimization(Kind, DS_Remark, PassName, Fn, Loc, Msg) {}
+
+  OptimizationRemarkAnalysis(enum DiagnosticKind Kind, const char *PassName,
+                             StringRef RemarkName,
+                             const DiagnosticLocation &Loc,
+                             const Value *CodeRegion);
+
+private:
+  /// This is deprecated now and only used by the function API below.
+  /// \p PassName is the name of the pass emitting this diagnostic. If
+  /// this name matches the regular expression given in -Rpass-analysis=, then
+  /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+  /// is being emitted. \p Loc is the location information to use in the
+  /// diagnostic. If line table information is available, the diagnostic will
+  /// include the source code location. \p Msg is the message to show. Note that
+  /// this class does not copy this message, so this reference must be valid for
+  /// the whole life time of the diagnostic.
+  OptimizationRemarkAnalysis(const char *PassName, const Function &Fn,
+                             const DiagnosticLocation &Loc, const Twine &Msg)
+      : DiagnosticInfoIROptimization(DK_OptimizationRemarkAnalysis, DS_Remark,
+                                     PassName, Fn, Loc, Msg) {}
+};
+
+/// Diagnostic information for optimization analysis remarks related to
+/// floating-point non-commutativity.
+class OptimizationRemarkAnalysisFPCommute : public OptimizationRemarkAnalysis {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-analysis=, then the
+  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+  /// remark (single-word, camel-case). \p Loc is the debug location and \p
+  /// CodeRegion is the region that the optimization operates on (currently only
+  /// block is supported). The front-end will append its own message related to
+  /// options that address floating-point non-commutativity.
+  OptimizationRemarkAnalysisFPCommute(const char *PassName,
+                                      StringRef RemarkName,
+                                      const DiagnosticLocation &Loc,
+                                      const Value *CodeRegion)
+      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisFPCommute,
+                                   PassName, RemarkName, Loc, CodeRegion) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_OptimizationRemarkAnalysisFPCommute;
+  }
+
+private:
+  /// This is deprecated now and only used by the function API below.
+  /// \p PassName is the name of the pass emitting this diagnostic. If
+  /// this name matches the regular expression given in -Rpass-analysis=, then
+  /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+  /// is being emitted. \p Loc is the location information to use in the
+  /// diagnostic. If line table information is available, the diagnostic will
+  /// include the source code location. \p Msg is the message to show. The
+  /// front-end will append its own message related to options that address
+  /// floating-point non-commutativity. Note that this class does not copy this
+  /// message, so this reference must be valid for the whole life time of the
+  /// diagnostic.
+  OptimizationRemarkAnalysisFPCommute(const char *PassName, const Function &Fn,
+                                      const DiagnosticLocation &Loc,
+                                      const Twine &Msg)
+      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisFPCommute,
+                                   PassName, Fn, Loc, Msg) {}
+};
+
+/// Diagnostic information for optimization analysis remarks related to
+/// pointer aliasing.
+class OptimizationRemarkAnalysisAliasing : public OptimizationRemarkAnalysis {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-analysis=, then the
+  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+  /// remark (single-word, camel-case). \p Loc is the debug location and \p
+  /// CodeRegion is the region that the optimization operates on (currently only
+  /// block is supported). The front-end will append its own message related to
+  /// options that address pointer aliasing legality.
+  OptimizationRemarkAnalysisAliasing(const char *PassName, StringRef RemarkName,
+                                     const DiagnosticLocation &Loc,
+                                     const Value *CodeRegion)
+      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisAliasing,
+                                   PassName, RemarkName, Loc, CodeRegion) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_OptimizationRemarkAnalysisAliasing;
+  }
+
+private:
+  /// This is deprecated now and only used by the function API below.
+  /// \p PassName is the name of the pass emitting this diagnostic. If
+  /// this name matches the regular expression given in -Rpass-analysis=, then
+  /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+  /// is being emitted. \p Loc is the location information to use in the
+  /// diagnostic. If line table information is available, the diagnostic will
+  /// include the source code location. \p Msg is the message to show. The
+  /// front-end will append its own message related to options that address
+  /// pointer aliasing legality. Note that this class does not copy this
+  /// message, so this reference must be valid for the whole life time of the
+  /// diagnostic.
+  OptimizationRemarkAnalysisAliasing(const char *PassName, const Function &Fn,
+                                     const DiagnosticLocation &Loc,
+                                     const Twine &Msg)
+      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisAliasing,
+                                   PassName, Fn, Loc, Msg) {}
+};
+
+/// Diagnostic information for machine IR parser.
+class DiagnosticInfoMIRParser : public DiagnosticInfo {
+  const SMDiagnostic &Diagnostic;
+
+public:
+  DiagnosticInfoMIRParser(DiagnosticSeverity Severity,
+                          const SMDiagnostic &Diagnostic)
+      : DiagnosticInfo(DK_MIRParser, Severity), Diagnostic(Diagnostic) {}
+
+  const SMDiagnostic &getDiagnostic() const { return Diagnostic; }
+
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MIRParser;
+  }
+};
+
+/// Diagnostic information for ISel fallback path.
+class DiagnosticInfoISelFallback : public DiagnosticInfo {
+  /// The function that is concerned by this diagnostic.
+  const Function &Fn;
+
+public:
+  DiagnosticInfoISelFallback(const Function &Fn,
+                             DiagnosticSeverity Severity = DS_Warning)
+      : DiagnosticInfo(DK_ISelFallback, Severity), Fn(Fn) {}
+
+  const Function &getFunction() const { return Fn; }
+
+  void print(DiagnosticPrinter &DP) const override;
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_ISelFallback;
+  }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DiagnosticInfo, LLVMDiagnosticInfoRef)
+
+/// Diagnostic information for optimization failures.
+class DiagnosticInfoOptimizationFailure : public DiagnosticInfoIROptimization {
+public:
+  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
+  /// the location information to use in the diagnostic. If line table
+  /// information is available, the diagnostic will include the source code
+  /// location. \p Msg is the message to show. Note that this class does not
+  /// copy this message, so this reference must be valid for the whole life time
+  /// of the diagnostic.
+  DiagnosticInfoOptimizationFailure(const Function &Fn,
+                                    const DiagnosticLocation &Loc,
+                                    const Twine &Msg)
+      : DiagnosticInfoIROptimization(DK_OptimizationFailure, DS_Warning,
+                                     nullptr, Fn, Loc, Msg) {}
+
+  /// \p PassName is the name of the pass emitting this diagnostic.  \p
+  /// RemarkName is a textual identifier for the remark (single-word,
+  /// camel-case).  \p Loc is the debug location and \p CodeRegion is the
+  /// region that the optimization operates on (currently basic block is
+  /// supported).
+  DiagnosticInfoOptimizationFailure(const char *PassName, StringRef RemarkName,
+                                    const DiagnosticLocation &Loc,
+                                    const Value *CodeRegion);
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_OptimizationFailure;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override;
+};
+
+/// Diagnostic information for unsupported feature in backend.
+class DiagnosticInfoUnsupported : public DiagnosticInfoWithLocationBase {
+private:
+  Twine Msg;
+
+public:
+  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
+  /// the location information to use in the diagnostic. If line table
+  /// information is available, the diagnostic will include the source code
+  /// location. \p Msg is the message to show. Note that this class does not
+  /// copy this message, so this reference must be valid for the whole life time
+  /// of the diagnostic.
+  DiagnosticInfoUnsupported(
+      const Function &Fn, const Twine &Msg,
+      const DiagnosticLocation &Loc = DiagnosticLocation(),
+      DiagnosticSeverity Severity = DS_Error)
+      : DiagnosticInfoWithLocationBase(DK_Unsupported, Severity, Fn, Loc),
+        Msg(Msg) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_Unsupported;
+  }
+
+  const Twine &getMessage() const { return Msg; }
+
+  void print(DiagnosticPrinter &DP) const override;
+};
+
+namespace yaml {
+template <> struct MappingTraits<DiagnosticInfoOptimizationBase *> {
+  static void mapping(IO &io, DiagnosticInfoOptimizationBase *&OptDiag);
+};
+} // namespace yaml
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DIAGNOSTICINFO_H
diff --git a/linux-x64/clang/include/llvm/IR/DiagnosticPrinter.h b/linux-x64/clang/include/llvm/IR/DiagnosticPrinter.h
new file mode 100644
index 0000000..59c8329
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/DiagnosticPrinter.h
@@ -0,0 +1,96 @@
+//===- llvm/Support/DiagnosticPrinter.h - Diagnostic Printer ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the main interface for printer backend diagnostic.
+//
+// Clients of the backend diagnostics should overload this interface based
+// on their needs.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIAGNOSTICPRINTER_H
+#define LLVM_IR_DIAGNOSTICPRINTER_H
+
+#include <string>
+
+namespace llvm {
+
+// Forward declarations.
+class Module;
+class raw_ostream;
+class SMDiagnostic;
+class StringRef;
+class Twine;
+class Value;
+
+/// \brief Interface for custom diagnostic printing.
+class DiagnosticPrinter {
+public:
+  virtual ~DiagnosticPrinter() = default;
+
+  // Simple types.
+  virtual DiagnosticPrinter &operator<<(char C) = 0;
+  virtual DiagnosticPrinter &operator<<(unsigned char C) = 0;
+  virtual DiagnosticPrinter &operator<<(signed char C) = 0;
+  virtual DiagnosticPrinter &operator<<(StringRef Str) = 0;
+  virtual DiagnosticPrinter &operator<<(const char *Str) = 0;
+  virtual DiagnosticPrinter &operator<<(const std::string &Str) = 0;
+  virtual DiagnosticPrinter &operator<<(unsigned long N) = 0;
+  virtual DiagnosticPrinter &operator<<(long N) = 0;
+  virtual DiagnosticPrinter &operator<<(unsigned long long N) = 0;
+  virtual DiagnosticPrinter &operator<<(long long N) = 0;
+  virtual DiagnosticPrinter &operator<<(const void *P) = 0;
+  virtual DiagnosticPrinter &operator<<(unsigned int N) = 0;
+  virtual DiagnosticPrinter &operator<<(int N) = 0;
+  virtual DiagnosticPrinter &operator<<(double N) = 0;
+  virtual DiagnosticPrinter &operator<<(const Twine &Str) = 0;
+
+  // IR related types.
+  virtual DiagnosticPrinter &operator<<(const Value &V) = 0;
+  virtual DiagnosticPrinter &operator<<(const Module &M) = 0;
+
+  // Other types.
+  virtual DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) = 0;
+};
+
+/// \brief Basic diagnostic printer that uses an underlying raw_ostream.
+class DiagnosticPrinterRawOStream : public DiagnosticPrinter {
+protected:
+  raw_ostream &Stream;
+
+public:
+  DiagnosticPrinterRawOStream(raw_ostream &Stream) : Stream(Stream) {}
+
+  // Simple types.
+  DiagnosticPrinter &operator<<(char C) override;
+  DiagnosticPrinter &operator<<(unsigned char C) override;
+  DiagnosticPrinter &operator<<(signed char C) override;
+  DiagnosticPrinter &operator<<(StringRef Str) override;
+  DiagnosticPrinter &operator<<(const char *Str) override;
+  DiagnosticPrinter &operator<<(const std::string &Str) override;
+  DiagnosticPrinter &operator<<(unsigned long N) override;
+  DiagnosticPrinter &operator<<(long N) override;
+  DiagnosticPrinter &operator<<(unsigned long long N) override;
+  DiagnosticPrinter &operator<<(long long N) override;
+  DiagnosticPrinter &operator<<(const void *P) override;
+  DiagnosticPrinter &operator<<(unsigned int N) override;
+  DiagnosticPrinter &operator<<(int N) override;
+  DiagnosticPrinter &operator<<(double N) override;
+  DiagnosticPrinter &operator<<(const Twine &Str) override;
+
+  // IR related types.
+  DiagnosticPrinter &operator<<(const Value &V) override;
+  DiagnosticPrinter &operator<<(const Module &M) override;
+
+  // Other types.
+  DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DIAGNOSTICPRINTER_H
diff --git a/linux-x64/clang/include/llvm/IR/Dominators.h b/linux-x64/clang/include/llvm/IR/Dominators.h
new file mode 100644
index 0000000..f6811bc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Dominators.h
@@ -0,0 +1,369 @@
+//===- Dominators.h - Dominator Info Calculation ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DominatorTree class, which provides fast and efficient
+// dominance queries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DOMINATORS_H
+#define LLVM_IR_DOMINATORS_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/GenericDomTree.h"
+#include <utility>
+
+namespace llvm {
+
+class Function;
+class Instruction;
+class Module;
+class raw_ostream;
+
+extern template class DomTreeNodeBase<BasicBlock>;
+extern template class DominatorTreeBase<BasicBlock, false>; // DomTree
+extern template class DominatorTreeBase<BasicBlock, true>; // PostDomTree
+
+namespace DomTreeBuilder {
+using BBDomTree = DomTreeBase<BasicBlock>;
+using BBPostDomTree = PostDomTreeBase<BasicBlock>;
+
+extern template struct Update<BasicBlock *>;
+
+using BBUpdates = ArrayRef<Update<BasicBlock *>>;
+
+extern template void Calculate<BBDomTree>(BBDomTree &DT);
+extern template void Calculate<BBPostDomTree>(BBPostDomTree &DT);
+
+extern template void InsertEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
+                                           BasicBlock *To);
+extern template void InsertEdge<BBPostDomTree>(BBPostDomTree &DT,
+                                               BasicBlock *From,
+                                               BasicBlock *To);
+
+extern template void DeleteEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
+                                           BasicBlock *To);
+extern template void DeleteEdge<BBPostDomTree>(BBPostDomTree &DT,
+                                               BasicBlock *From,
+                                               BasicBlock *To);
+
+extern template void ApplyUpdates<BBDomTree>(BBDomTree &DT, BBUpdates);
+extern template void ApplyUpdates<BBPostDomTree>(BBPostDomTree &DT, BBUpdates);
+
+extern template bool Verify<BBDomTree>(const BBDomTree &DT,
+                                       BBDomTree::VerificationLevel VL);
+extern template bool Verify<BBPostDomTree>(const BBPostDomTree &DT,
+                                           BBPostDomTree::VerificationLevel VL);
+}  // namespace DomTreeBuilder
+
+using DomTreeNode = DomTreeNodeBase<BasicBlock>;
+
+class BasicBlockEdge {
+  const BasicBlock *Start;
+  const BasicBlock *End;
+
+public:
+  BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
+    Start(Start_), End(End_) {}
+
+  BasicBlockEdge(const std::pair<BasicBlock *, BasicBlock *> &Pair)
+      : Start(Pair.first), End(Pair.second) {}
+
+  BasicBlockEdge(const std::pair<const BasicBlock *, const BasicBlock *> &Pair)
+      : Start(Pair.first), End(Pair.second) {}
+
+  const BasicBlock *getStart() const {
+    return Start;
+  }
+
+  const BasicBlock *getEnd() const {
+    return End;
+  }
+
+  /// Check if this is the only edge between Start and End.
+  bool isSingleEdge() const;
+};
+
+template <> struct DenseMapInfo<BasicBlockEdge> {
+  using BBInfo = DenseMapInfo<const BasicBlock *>;
+
+  static unsigned getHashValue(const BasicBlockEdge *V);
+
+  static inline BasicBlockEdge getEmptyKey() {
+    return BasicBlockEdge(BBInfo::getEmptyKey(), BBInfo::getEmptyKey());
+  }
+
+  static inline BasicBlockEdge getTombstoneKey() {
+    return BasicBlockEdge(BBInfo::getTombstoneKey(), BBInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const BasicBlockEdge &Edge) {
+    return hash_combine(BBInfo::getHashValue(Edge.getStart()),
+                        BBInfo::getHashValue(Edge.getEnd()));
+  }
+
+  static bool isEqual(const BasicBlockEdge &LHS, const BasicBlockEdge &RHS) {
+    return BBInfo::isEqual(LHS.getStart(), RHS.getStart()) &&
+           BBInfo::isEqual(LHS.getEnd(), RHS.getEnd());
+  }
+};
+
+/// \brief Concrete subclass of DominatorTreeBase that is used to compute a
+/// normal dominator tree.
+///
+/// Definition: A block is said to be forward statically reachable if there is
+/// a path from the entry of the function to the block.  A statically reachable
+/// block may become statically unreachable during optimization.
+///
+/// A forward unreachable block may appear in the dominator tree, or it may
+/// not.  If it does, dominance queries will return results as if all reachable
+/// blocks dominate it.  When asking for a Node corresponding to a potentially
+/// unreachable block, calling code must handle the case where the block was
+/// unreachable and the result of getNode() is nullptr.
+///
+/// Generally, a block known to be unreachable when the dominator tree is
+/// constructed will not be in the tree.  One which becomes unreachable after
+/// the dominator tree is initially constructed may still exist in the tree,
+/// even if the tree is properly updated. Calling code should not rely on the
+/// preceding statements; this is stated only to assist human understanding.
+class DominatorTree : public DominatorTreeBase<BasicBlock, false> {
+ public:
+  using Base = DominatorTreeBase<BasicBlock, false>;
+
+  DominatorTree() = default;
+  explicit DominatorTree(Function &F) { recalculate(F); }
+
+  /// Handle invalidation explicitly.
+  bool invalidate(Function &F, const PreservedAnalyses &PA,
+                  FunctionAnalysisManager::Invalidator &);
+
+  // Ensure base-class overloads are visible.
+  using Base::dominates;
+
+  /// \brief Return true if Def dominates a use in User.
+  ///
+  /// This performs the special checks necessary if Def and User are in the same
+  /// basic block. Note that Def doesn't dominate a use in Def itself!
+  bool dominates(const Instruction *Def, const Use &U) const;
+  bool dominates(const Instruction *Def, const Instruction *User) const;
+  bool dominates(const Instruction *Def, const BasicBlock *BB) const;
+
+  /// Return true if an edge dominates a use.
+  ///
+  /// If BBE is not a unique edge between start and end of the edge, it can
+  /// never dominate the use.
+  bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
+  bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
+
+  // Ensure base class overloads are visible.
+  using Base::isReachableFromEntry;
+
+  /// \brief Provide an overload for a Use.
+  bool isReachableFromEntry(const Use &U) const;
+
+  // Pop up a GraphViz/gv window with the Dominator Tree rendered using `dot`.
+  void viewGraph(const Twine &Name, const Twine &Title);
+  void viewGraph();
+};
+
+//===-------------------------------------
+// DominatorTree GraphTraits specializations so the DominatorTree can be
+// iterable by generic graph iterators.
+
+template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
+  using NodeRef = Node *;
+  using ChildIteratorType = ChildIterator;
+  using nodes_iterator = df_iterator<Node *, df_iterator_default_set<Node*>>;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+
+  static nodes_iterator nodes_begin(NodeRef N) {
+    return df_begin(getEntryNode(N));
+  }
+
+  static nodes_iterator nodes_end(NodeRef N) { return df_end(getEntryNode(N)); }
+};
+
+template <>
+struct GraphTraits<DomTreeNode *>
+    : public DomTreeGraphTraitsBase<DomTreeNode, DomTreeNode::iterator> {};
+
+template <>
+struct GraphTraits<const DomTreeNode *>
+    : public DomTreeGraphTraitsBase<const DomTreeNode,
+                                    DomTreeNode::const_iterator> {};
+
+template <> struct GraphTraits<DominatorTree*>
+  : public GraphTraits<DomTreeNode*> {
+  static NodeRef getEntryNode(DominatorTree *DT) { return DT->getRootNode(); }
+
+  static nodes_iterator nodes_begin(DominatorTree *N) {
+    return df_begin(getEntryNode(N));
+  }
+
+  static nodes_iterator nodes_end(DominatorTree *N) {
+    return df_end(getEntryNode(N));
+  }
+};
+
+/// \brief Analysis pass which computes a \c DominatorTree.
+class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
+  friend AnalysisInfoMixin<DominatorTreeAnalysis>;
+  static AnalysisKey Key;
+
+public:
+  /// \brief Provide the result typedef for this analysis pass.
+  using Result = DominatorTree;
+
+  /// \brief Run the analysis pass over a function and produce a dominator tree.
+  DominatorTree run(Function &F, FunctionAnalysisManager &);
+};
+
+/// \brief Printer pass for the \c DominatorTree.
+class DominatorTreePrinterPass
+    : public PassInfoMixin<DominatorTreePrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit DominatorTreePrinterPass(raw_ostream &OS);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for the \c DominatorTree.
+struct DominatorTreeVerifierPass : PassInfoMixin<DominatorTreeVerifierPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Legacy analysis pass which computes a \c DominatorTree.
+class DominatorTreeWrapperPass : public FunctionPass {
+  DominatorTree DT;
+
+public:
+  static char ID;
+
+  DominatorTreeWrapperPass() : FunctionPass(ID) {
+    initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  DominatorTree &getDomTree() { return DT; }
+  const DominatorTree &getDomTree() const { return DT; }
+
+  bool runOnFunction(Function &F) override;
+
+  void verifyAnalysis() const override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  void releaseMemory() override { DT.releaseMemory(); }
+
+  void print(raw_ostream &OS, const Module *M = nullptr) const override;
+};
+
+//===-------------------------------------
+/// \brief Class to defer updates to a DominatorTree.
+///
+/// Definition: Applying updates to every edge insertion and deletion is
+/// expensive and not necessary. When one needs the DominatorTree for analysis
+/// they can request a flush() to perform a larger batch update. This has the
+/// advantage of the DominatorTree inspecting the set of updates to find
+/// duplicates or unnecessary subtree updates.
+///
+/// The scope of DeferredDominance operates at a Function level.
+///
+/// It is not necessary for the user to scrub the updates for duplicates or
+/// updates that point to the same block (Delete, BB_A, BB_A). Performance
+/// can be gained if the caller attempts to batch updates before submitting
+/// to applyUpdates(ArrayRef) in cases where duplicate edge requests will
+/// occur.
+///
+/// It is required for the state of the LLVM IR to be applied *before*
+/// submitting updates. The update routines must analyze the current state
+/// between a pair of (From, To) basic blocks to determine if the update
+/// needs to be queued.
+/// Example (good):
+///     TerminatorInstructionBB->removeFromParent();
+///     DDT->deleteEdge(BB, Successor);
+/// Example (bad):
+///     DDT->deleteEdge(BB, Successor);
+///     TerminatorInstructionBB->removeFromParent();
+class DeferredDominance {
+public:
+  DeferredDominance(DominatorTree &DT_) : DT(DT_) {}
+
+  /// \brief Queues multiple updates and discards duplicates.
+  void applyUpdates(ArrayRef<DominatorTree::UpdateType> Updates);
+
+  /// \brief Helper method for a single edge insertion. It's almost always
+  /// better to batch updates and call applyUpdates to quickly remove duplicate
+  /// edges. This is best used when there is only a single insertion needed to
+  /// update Dominators.
+  void insertEdge(BasicBlock *From, BasicBlock *To);
+
+  /// \brief Helper method for a single edge deletion. It's almost always better
+  /// to batch updates and call applyUpdates to quickly remove duplicate edges.
+  /// This is best used when there is only a single deletion needed to update
+  /// Dominators.
+  void deleteEdge(BasicBlock *From, BasicBlock *To);
+
+  /// \brief Delays the deletion of a basic block until a flush() event.
+  void deleteBB(BasicBlock *DelBB);
+
+  /// \brief Returns true if DelBB is awaiting deletion at a flush() event.
+  bool pendingDeletedBB(BasicBlock *DelBB);
+
+  /// \brief Returns true if pending DT updates are queued for a flush() event.
+  bool pending();
+
+  /// \brief Flushes all pending updates and block deletions. Returns a
+  /// correct DominatorTree reference to be used by the caller for analysis.
+  DominatorTree &flush();
+
+  /// \brief Drops all internal state and forces a (slow) recalculation of the
+  /// DominatorTree based on the current state of the LLVM IR in F. This should
+  /// only be used in corner cases such as the Entry block of F being deleted.
+  void recalculate(Function &F);
+
+  /// \brief Debug method to help view the state of pending updates.
+  LLVM_DUMP_METHOD void dump() const;
+
+private:
+  DominatorTree &DT;
+  SmallVector<DominatorTree::UpdateType, 16> PendUpdates;
+  SmallPtrSet<BasicBlock *, 8> DeletedBBs;
+
+  /// Apply an update (Kind, From, To) to the internal queued updates. The
+  /// update is only added when determined to be necessary. Checks for
+  /// self-domination, unnecessary updates, duplicate requests, and balanced
+  /// pairs of requests are all performed. Returns true if the update is
+  /// queued and false if it is discarded.
+  bool applyUpdate(DominatorTree::UpdateKind Kind, BasicBlock *From,
+                   BasicBlock *To);
+
+  /// Performs all pending basic block deletions. We have to defer the deletion
+  /// of these blocks until after the DominatorTree updates are applied. The
+  /// internal workings of the DominatorTree code expect every update's From
+  /// and To blocks to exist and to be a member of the same Function.
+  bool flushDelBB();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_DOMINATORS_H
diff --git a/linux-x64/clang/include/llvm/IR/Function.h b/linux-x64/clang/include/llvm/IR/Function.h
new file mode 100644
index 0000000..ec9d370
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Function.h
@@ -0,0 +1,798 @@
+//===- llvm/Function.h - Class to represent a single function ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Function class, which represents a
+// single function/procedure in LLVM.
+//
+// A function basically consists of a list of basic blocks, a list of arguments,
+// and a symbol table.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_FUNCTION_H
+#define LLVM_IR_FUNCTION_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+namespace Intrinsic {
+enum ID : unsigned;
+}
+
+class AssemblyAnnotationWriter;
+class Constant;
+class DISubprogram;
+class LLVMContext;
+class Module;
+template <typename T> class Optional;
+class raw_ostream;
+class Type;
+class User;
+
+class Function : public GlobalObject, public ilist_node<Function> {
+public:
+  using BasicBlockListType = SymbolTableList<BasicBlock>;
+
+  // BasicBlock iterators...
+  using iterator = BasicBlockListType::iterator;
+  using const_iterator = BasicBlockListType::const_iterator;
+
+  using arg_iterator = Argument *;
+  using const_arg_iterator = const Argument *;
+
+private:
+  // Important things that make up a function!
+  BasicBlockListType BasicBlocks;         ///< The basic blocks
+  mutable Argument *Arguments = nullptr;  ///< The formal arguments
+  size_t NumArgs;
+  std::unique_ptr<ValueSymbolTable>
+      SymTab;                             ///< Symbol table of args/instructions
+  AttributeList AttributeSets;            ///< Parameter attributes
+
+  /*
+   * Value::SubclassData
+   *
+   * bit 0      : HasLazyArguments
+   * bit 1      : HasPrefixData
+   * bit 2      : HasPrologueData
+   * bit 3      : HasPersonalityFn
+   * bits 4-13  : CallingConvention
+   * bits 14    : HasGC
+   * bits 15 : [reserved]
+   */
+
+  /// Bits from GlobalObject::GlobalObjectSubclassData.
+  enum {
+    /// Whether this function is materializable.
+    IsMaterializableBit = 0,
+  };
+
+  friend class SymbolTableListTraits<Function>;
+
+  /// hasLazyArguments/CheckLazyArguments - The argument list of a function is
+  /// built on demand, so that the list isn't allocated until the first client
+  /// needs it.  The hasLazyArguments predicate returns true if the arg list
+  /// hasn't been set up yet.
+public:
+  bool hasLazyArguments() const {
+    return getSubclassDataFromValue() & (1<<0);
+  }
+
+private:
+  void CheckLazyArguments() const {
+    if (hasLazyArguments())
+      BuildLazyArguments();
+  }
+
+  void BuildLazyArguments() const;
+
+  void clearArguments();
+
+  /// Function ctor - If the (optional) Module argument is specified, the
+  /// function is automatically inserted into the end of the function list for
+  /// the module.
+  ///
+  Function(FunctionType *Ty, LinkageTypes Linkage,
+           const Twine &N = "", Module *M = nullptr);
+
+public:
+  Function(const Function&) = delete;
+  void operator=(const Function&) = delete;
+  ~Function();
+
+  // This is here to help easily convert from FunctionT * (Function * or
+  // MachineFunction *) in BlockFrequencyInfoImpl to Function * by calling
+  // FunctionT->getFunction().
+  const Function &getFunction() const { return *this; }
+
+  static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
+                          const Twine &N = "", Module *M = nullptr) {
+    return new Function(Ty, Linkage, N, M);
+  }
+
+  // Provide fast operand accessors.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Returns the FunctionType for me.
+  FunctionType *getFunctionType() const {
+    return cast<FunctionType>(getValueType());
+  }
+
+  /// Returns the type of the ret val.
+  Type *getReturnType() const { return getFunctionType()->getReturnType(); }
+
+  /// getContext - Return a reference to the LLVMContext associated with this
+  /// function.
+  LLVMContext &getContext() const;
+
+  /// isVarArg - Return true if this function takes a variable number of
+  /// arguments.
+  bool isVarArg() const { return getFunctionType()->isVarArg(); }
+
+  bool isMaterializable() const {
+    return getGlobalObjectSubClassData() & (1 << IsMaterializableBit);
+  }
+  void setIsMaterializable(bool V) {
+    unsigned Mask = 1 << IsMaterializableBit;
+    setGlobalObjectSubClassData((~Mask & getGlobalObjectSubClassData()) |
+                                (V ? Mask : 0u));
+  }
+
+  /// getIntrinsicID - This method returns the ID number of the specified
+  /// function, or Intrinsic::not_intrinsic if the function is not an
+  /// intrinsic, or if the pointer is null.  This value is always defined to be
+  /// zero to allow easy checking for whether a function is intrinsic or not.
+  /// The particular intrinsic functions which correspond to this value are
+  /// defined in llvm/Intrinsics.h.
+  Intrinsic::ID getIntrinsicID() const LLVM_READONLY { return IntID; }
+
+  /// isIntrinsic - Returns true if the function's name starts with "llvm.".
+  /// It's possible for this function to return true while getIntrinsicID()
+  /// returns Intrinsic::not_intrinsic!
+  bool isIntrinsic() const { return HasLLVMReservedName; }
+
+  static Intrinsic::ID lookupIntrinsicID(StringRef Name);
+
+  /// \brief Recalculate the ID for this function if it is an Intrinsic defined
+  /// in llvm/Intrinsics.h.  Sets the intrinsic ID to Intrinsic::not_intrinsic
+  /// if the name of this function does not match an intrinsic in that header.
+  /// Note, this method does not need to be called directly, as it is called
+  /// from Value::setName() whenever the name of this function changes.
+  void recalculateIntrinsicID();
+
+  /// getCallingConv()/setCallingConv(CC) - These method get and set the
+  /// calling convention of this function.  The enum values for the known
+  /// calling conventions are defined in CallingConv.h.
+  CallingConv::ID getCallingConv() const {
+    return static_cast<CallingConv::ID>((getSubclassDataFromValue() >> 4) &
+                                        CallingConv::MaxID);
+  }
+  void setCallingConv(CallingConv::ID CC) {
+    auto ID = static_cast<unsigned>(CC);
+    assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
+    setValueSubclassData((getSubclassDataFromValue() & 0xc00f) | (ID << 4));
+  }
+
+  /// @brief Return the attribute list for this Function.
+  AttributeList getAttributes() const { return AttributeSets; }
+
+  /// @brief Set the attribute list for this Function.
+  void setAttributes(AttributeList Attrs) { AttributeSets = Attrs; }
+
+  /// @brief Add function attributes to this function.
+  void addFnAttr(Attribute::AttrKind Kind) {
+    addAttribute(AttributeList::FunctionIndex, Kind);
+  }
+
+  /// @brief Add function attributes to this function.
+  void addFnAttr(StringRef Kind, StringRef Val = StringRef()) {
+    addAttribute(AttributeList::FunctionIndex,
+                 Attribute::get(getContext(), Kind, Val));
+  }
+
+  /// @brief Add function attributes to this function.
+  void addFnAttr(Attribute Attr) {
+    addAttribute(AttributeList::FunctionIndex, Attr);
+  }
+
+  /// @brief Remove function attributes from this function.
+  void removeFnAttr(Attribute::AttrKind Kind) {
+    removeAttribute(AttributeList::FunctionIndex, Kind);
+  }
+
+  /// @brief Remove function attribute from this function.
+  void removeFnAttr(StringRef Kind) {
+    setAttributes(getAttributes().removeAttribute(
+        getContext(), AttributeList::FunctionIndex, Kind));
+  }
+
+  enum ProfileCountType { PCT_Invalid, PCT_Real, PCT_Synthetic };
+
+  /// Class to represent profile counts.
+  ///
+  /// This class represents both real and synthetic profile counts.
+  class ProfileCount {
+  private:
+    uint64_t Count;
+    ProfileCountType PCT;
+    static ProfileCount Invalid;
+
+  public:
+    ProfileCount() : Count(-1), PCT(PCT_Invalid) {}
+    ProfileCount(uint64_t Count, ProfileCountType PCT)
+        : Count(Count), PCT(PCT) {}
+    bool hasValue() const { return PCT != PCT_Invalid; }
+    uint64_t getCount() const { return Count; }
+    ProfileCountType getType() const { return PCT; }
+    bool isSynthetic() const { return PCT == PCT_Synthetic; }
+    explicit operator bool() { return hasValue(); }
+    bool operator!() const { return !hasValue(); }
+    // Update the count retaining the same profile count type.
+    ProfileCount &setCount(uint64_t C) {
+      Count = C;
+      return *this;
+    }
+    static ProfileCount getInvalid() { return ProfileCount(-1, PCT_Invalid); }
+  };
+
+  /// \brief Set the entry count for this function.
+  ///
+  /// Entry count is the number of times this function was executed based on
+  /// pgo data. \p Imports points to a set of GUIDs that needs to
+  /// be imported by the function for sample PGO, to enable the same inlines as
+  /// the profiled optimized binary.
+  void setEntryCount(ProfileCount Count,
+                     const DenseSet<GlobalValue::GUID> *Imports = nullptr);
+
+  /// A convenience wrapper for setting entry count
+  void setEntryCount(uint64_t Count, ProfileCountType Type = PCT_Real,
+                     const DenseSet<GlobalValue::GUID> *Imports = nullptr);
+
+  /// \brief Get the entry count for this function.
+  ///
+  /// Entry count is the number of times the function was executed based on
+  /// pgo data.
+  ProfileCount getEntryCount() const;
+
+  /// Return true if the function is annotated with profile data.
+  ///
+  /// Presence of entry counts from a profile run implies the function has
+  /// profile annotations.
+  bool hasProfileData() const { return getEntryCount().hasValue(); }
+
+  /// Returns the set of GUIDs that needs to be imported to the function for
+  /// sample PGO, to enable the same inlines as the profiled optimized binary.
+  DenseSet<GlobalValue::GUID> getImportGUIDs() const;
+
+  /// Set the section prefix for this function.
+  void setSectionPrefix(StringRef Prefix);
+
+  /// Get the section prefix for this function.
+  Optional<StringRef> getSectionPrefix() const;
+
+  /// @brief Return true if the function has the attribute.
+  bool hasFnAttribute(Attribute::AttrKind Kind) const {
+    return AttributeSets.hasFnAttribute(Kind);
+  }
+
+  /// @brief Return true if the function has the attribute.
+  bool hasFnAttribute(StringRef Kind) const {
+    return AttributeSets.hasFnAttribute(Kind);
+  }
+
+  /// @brief Return the attribute for the given attribute kind.
+  Attribute getFnAttribute(Attribute::AttrKind Kind) const {
+    return getAttribute(AttributeList::FunctionIndex, Kind);
+  }
+
+  /// @brief Return the attribute for the given attribute kind.
+  Attribute getFnAttribute(StringRef Kind) const {
+    return getAttribute(AttributeList::FunctionIndex, Kind);
+  }
+
+  /// \brief Return the stack alignment for the function.
+  unsigned getFnStackAlignment() const {
+    if (!hasFnAttribute(Attribute::StackAlignment))
+      return 0;
+    return AttributeSets.getStackAlignment(AttributeList::FunctionIndex);
+  }
+
+  /// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
+  ///                             to use during code generation.
+  bool hasGC() const {
+    return getSubclassDataFromValue() & (1<<14);
+  }
+  const std::string &getGC() const;
+  void setGC(std::string Str);
+  void clearGC();
+
+  /// @brief adds the attribute to the list of attributes.
+  void addAttribute(unsigned i, Attribute::AttrKind Kind);
+
+  /// @brief adds the attribute to the list of attributes.
+  void addAttribute(unsigned i, Attribute Attr);
+
+  /// @brief adds the attributes to the list of attributes.
+  void addAttributes(unsigned i, const AttrBuilder &Attrs);
+
+  /// @brief adds the attribute to the list of attributes for the given arg.
+  void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
+
+  /// @brief adds the attribute to the list of attributes for the given arg.
+  void addParamAttr(unsigned ArgNo, Attribute Attr);
+
+  /// @brief adds the attributes to the list of attributes for the given arg.
+  void addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs);
+
+  /// @brief removes the attribute from the list of attributes.
+  void removeAttribute(unsigned i, Attribute::AttrKind Kind);
+
+  /// @brief removes the attribute from the list of attributes.
+  void removeAttribute(unsigned i, StringRef Kind);
+
+  /// @brief removes the attributes from the list of attributes.
+  void removeAttributes(unsigned i, const AttrBuilder &Attrs);
+
+  /// @brief removes the attribute from the list of attributes.
+  void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
+
+  /// @brief removes the attribute from the list of attributes.
+  void removeParamAttr(unsigned ArgNo, StringRef Kind);
+
+  /// @brief removes the attribute from the list of attributes.
+  void removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs);
+
+  /// @brief check if an attributes is in the list of attributes.
+  bool hasAttribute(unsigned i, Attribute::AttrKind Kind) const {
+    return getAttributes().hasAttribute(i, Kind);
+  }
+
+  /// @brief check if an attributes is in the list of attributes.
+  bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const {
+    return getAttributes().hasParamAttribute(ArgNo, Kind);
+  }
+
+  /// @brief gets the attribute from the list of attributes.
+  Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
+    return AttributeSets.getAttribute(i, Kind);
+  }
+
+  /// @brief gets the attribute from the list of attributes.
+  Attribute getAttribute(unsigned i, StringRef Kind) const {
+    return AttributeSets.getAttribute(i, Kind);
+  }
+
+  /// @brief adds the dereferenceable attribute to the list of attributes.
+  void addDereferenceableAttr(unsigned i, uint64_t Bytes);
+
+  /// @brief adds the dereferenceable attribute to the list of attributes for
+  /// the given arg.
+  void addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes);
+
+  /// @brief adds the dereferenceable_or_null attribute to the list of
+  /// attributes.
+  void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
+
+  /// @brief adds the dereferenceable_or_null attribute to the list of
+  /// attributes for the given arg.
+  void addDereferenceableOrNullParamAttr(unsigned ArgNo, uint64_t Bytes);
+
+  /// @brief Extract the alignment for a call or parameter (0=unknown).
+  unsigned getParamAlignment(unsigned ArgNo) const {
+    return AttributeSets.getParamAlignment(ArgNo);
+  }
+
+  /// @brief Extract the number of dereferenceable bytes for a call or
+  /// parameter (0=unknown).
+  /// @param i AttributeList index, referring to a return value or argument.
+  uint64_t getDereferenceableBytes(unsigned i) const {
+    return AttributeSets.getDereferenceableBytes(i);
+  }
+
+  /// @brief Extract the number of dereferenceable bytes for a parameter.
+  /// @param ArgNo Index of an argument, with 0 being the first function arg.
+  uint64_t getParamDereferenceableBytes(unsigned ArgNo) const {
+    return AttributeSets.getParamDereferenceableBytes(ArgNo);
+  }
+
+  /// @brief Extract the number of dereferenceable_or_null bytes for a call or
+  /// parameter (0=unknown).
+  /// @param i AttributeList index, referring to a return value or argument.
+  uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+    return AttributeSets.getDereferenceableOrNullBytes(i);
+  }
+
+  /// @brief Extract the number of dereferenceable_or_null bytes for a
+  /// parameter.
+  /// @param ArgNo AttributeList ArgNo, referring to an argument.
+  uint64_t getParamDereferenceableOrNullBytes(unsigned ArgNo) const {
+    return AttributeSets.getParamDereferenceableOrNullBytes(ArgNo);
+  }
+
+  /// @brief Determine if the function does not access memory.
+  bool doesNotAccessMemory() const {
+    return hasFnAttribute(Attribute::ReadNone);
+  }
+  void setDoesNotAccessMemory() {
+    addFnAttr(Attribute::ReadNone);
+  }
+
+  /// @brief Determine if the function does not access or only reads memory.
+  bool onlyReadsMemory() const {
+    return doesNotAccessMemory() || hasFnAttribute(Attribute::ReadOnly);
+  }
+  void setOnlyReadsMemory() {
+    addFnAttr(Attribute::ReadOnly);
+  }
+
+  /// @brief Determine if the function does not access or only writes memory.
+  bool doesNotReadMemory() const {
+    return doesNotAccessMemory() || hasFnAttribute(Attribute::WriteOnly);
+  }
+  void setDoesNotReadMemory() {
+    addFnAttr(Attribute::WriteOnly);
+  }
+
+  /// @brief Determine if the call can access memmory only using pointers based
+  /// on its arguments.
+  bool onlyAccessesArgMemory() const {
+    return hasFnAttribute(Attribute::ArgMemOnly);
+  }
+  void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); }
+
+  /// @brief Determine if the function may only access memory that is
+  ///  inaccessible from the IR.
+  bool onlyAccessesInaccessibleMemory() const {
+    return hasFnAttribute(Attribute::InaccessibleMemOnly);
+  }
+  void setOnlyAccessesInaccessibleMemory() {
+    addFnAttr(Attribute::InaccessibleMemOnly);
+  }
+
+  /// @brief Determine if the function may only access memory that is
+  ///  either inaccessible from the IR or pointed to by its arguments.
+  bool onlyAccessesInaccessibleMemOrArgMem() const {
+    return hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly);
+  }
+  void setOnlyAccessesInaccessibleMemOrArgMem() {
+    addFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
+  }
+
+  /// @brief Determine if the function cannot return.
+  bool doesNotReturn() const {
+    return hasFnAttribute(Attribute::NoReturn);
+  }
+  void setDoesNotReturn() {
+    addFnAttr(Attribute::NoReturn);
+  }
+
+  /// Determine if the function should not perform indirect branch tracking.
+  bool doesNoCfCheck() const { return hasFnAttribute(Attribute::NoCfCheck); }
+
+  /// @brief Determine if the function cannot unwind.
+  bool doesNotThrow() const {
+    return hasFnAttribute(Attribute::NoUnwind);
+  }
+  void setDoesNotThrow() {
+    addFnAttr(Attribute::NoUnwind);
+  }
+
+  /// @brief Determine if the call cannot be duplicated.
+  bool cannotDuplicate() const {
+    return hasFnAttribute(Attribute::NoDuplicate);
+  }
+  void setCannotDuplicate() {
+    addFnAttr(Attribute::NoDuplicate);
+  }
+
+  /// @brief Determine if the call is convergent.
+  bool isConvergent() const {
+    return hasFnAttribute(Attribute::Convergent);
+  }
+  void setConvergent() {
+    addFnAttr(Attribute::Convergent);
+  }
+  void setNotConvergent() {
+    removeFnAttr(Attribute::Convergent);
+  }
+
+  /// @brief Determine if the call has sideeffects.
+  bool isSpeculatable() const {
+    return hasFnAttribute(Attribute::Speculatable);
+  }
+  void setSpeculatable() {
+    addFnAttr(Attribute::Speculatable);
+  }
+
+  /// Determine if the function is known not to recurse, directly or
+  /// indirectly.
+  bool doesNotRecurse() const {
+    return hasFnAttribute(Attribute::NoRecurse);
+  }
+  void setDoesNotRecurse() {
+    addFnAttr(Attribute::NoRecurse);
+  }
+
+  /// @brief True if the ABI mandates (or the user requested) that this
+  /// function be in a unwind table.
+  bool hasUWTable() const {
+    return hasFnAttribute(Attribute::UWTable);
+  }
+  void setHasUWTable() {
+    addFnAttr(Attribute::UWTable);
+  }
+
+  /// @brief True if this function needs an unwind table.
+  bool needsUnwindTableEntry() const {
+    return hasUWTable() || !doesNotThrow();
+  }
+
+  /// @brief Determine if the function returns a structure through first
+  /// or second pointer argument.
+  bool hasStructRetAttr() const {
+    return AttributeSets.hasParamAttribute(0, Attribute::StructRet) ||
+           AttributeSets.hasParamAttribute(1, Attribute::StructRet);
+  }
+
+  /// @brief Determine if the parameter or return value is marked with NoAlias
+  /// attribute.
+  bool returnDoesNotAlias() const {
+    return AttributeSets.hasAttribute(AttributeList::ReturnIndex,
+                                      Attribute::NoAlias);
+  }
+  void setReturnDoesNotAlias() {
+    addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+  }
+
+  /// Optimize this function for minimum size (-Oz).
+  bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); }
+
+  /// Optimize this function for size (-Os) or minimum size (-Oz).
+  bool optForSize() const {
+    return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize();
+  }
+
+  /// copyAttributesFrom - copy all additional attributes (those not needed to
+  /// create a Function) from the Function Src to this one.
+  void copyAttributesFrom(const Function *Src);
+
+  /// deleteBody - This method deletes the body of the function, and converts
+  /// the linkage to external.
+  ///
+  void deleteBody() {
+    dropAllReferences();
+    setLinkage(ExternalLinkage);
+  }
+
+  /// removeFromParent - This method unlinks 'this' from the containing module,
+  /// but does not delete it.
+  ///
+  void removeFromParent();
+
+  /// eraseFromParent - This method unlinks 'this' from the containing module
+  /// and deletes it.
+  ///
+  void eraseFromParent();
+
+  /// Steal arguments from another function.
+  ///
+  /// Drop this function's arguments and splice in the ones from \c Src.
+  /// Requires that this has no function body.
+  void stealArgumentListFrom(Function &Src);
+
+  /// Get the underlying elements of the Function... the basic block list is
+  /// empty for external functions.
+  ///
+  const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; }
+        BasicBlockListType &getBasicBlockList()       { return BasicBlocks; }
+
+  static BasicBlockListType Function::*getSublistAccess(BasicBlock*) {
+    return &Function::BasicBlocks;
+  }
+
+  const BasicBlock       &getEntryBlock() const   { return front(); }
+        BasicBlock       &getEntryBlock()         { return front(); }
+
+  //===--------------------------------------------------------------------===//
+  // Symbol Table Accessing functions...
+
+  /// getSymbolTable() - Return the symbol table if any, otherwise nullptr.
+  ///
+  inline ValueSymbolTable *getValueSymbolTable() { return SymTab.get(); }
+  inline const ValueSymbolTable *getValueSymbolTable() const {
+    return SymTab.get();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // BasicBlock iterator forwarding functions
+  //
+  iterator                begin()       { return BasicBlocks.begin(); }
+  const_iterator          begin() const { return BasicBlocks.begin(); }
+  iterator                end  ()       { return BasicBlocks.end();   }
+  const_iterator          end  () const { return BasicBlocks.end();   }
+
+  size_t                   size() const { return BasicBlocks.size();  }
+  bool                    empty() const { return BasicBlocks.empty(); }
+  const BasicBlock       &front() const { return BasicBlocks.front(); }
+        BasicBlock       &front()       { return BasicBlocks.front(); }
+  const BasicBlock        &back() const { return BasicBlocks.back();  }
+        BasicBlock        &back()       { return BasicBlocks.back();  }
+
+/// @name Function Argument Iteration
+/// @{
+
+  arg_iterator arg_begin() {
+    CheckLazyArguments();
+    return Arguments;
+  }
+  const_arg_iterator arg_begin() const {
+    CheckLazyArguments();
+    return Arguments;
+  }
+
+  arg_iterator arg_end() {
+    CheckLazyArguments();
+    return Arguments + NumArgs;
+  }
+  const_arg_iterator arg_end() const {
+    CheckLazyArguments();
+    return Arguments + NumArgs;
+  }
+
+  iterator_range<arg_iterator> args() {
+    return make_range(arg_begin(), arg_end());
+  }
+  iterator_range<const_arg_iterator> args() const {
+    return make_range(arg_begin(), arg_end());
+  }
+
+/// @}
+
+  size_t arg_size() const { return NumArgs; }
+  bool arg_empty() const { return arg_size() == 0; }
+
+  /// \brief Check whether this function has a personality function.
+  bool hasPersonalityFn() const {
+    return getSubclassDataFromValue() & (1<<3);
+  }
+
+  /// \brief Get the personality function associated with this function.
+  Constant *getPersonalityFn() const;
+  void setPersonalityFn(Constant *Fn);
+
+  /// \brief Check whether this function has prefix data.
+  bool hasPrefixData() const {
+    return getSubclassDataFromValue() & (1<<1);
+  }
+
+  /// \brief Get the prefix data associated with this function.
+  Constant *getPrefixData() const;
+  void setPrefixData(Constant *PrefixData);
+
+  /// \brief Check whether this function has prologue data.
+  bool hasPrologueData() const {
+    return getSubclassDataFromValue() & (1<<2);
+  }
+
+  /// \brief Get the prologue data associated with this function.
+  Constant *getPrologueData() const;
+  void setPrologueData(Constant *PrologueData);
+
+  /// Print the function to an output stream with an optional
+  /// AssemblyAnnotationWriter.
+  void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr,
+             bool ShouldPreserveUseListOrder = false,
+             bool IsForDebug = false) const;
+
+  /// viewCFG - This function is meant for use from the debugger.  You can just
+  /// say 'call F->viewCFG()' and a ghostview window should pop up from the
+  /// program, displaying the CFG of the current function with the code for each
+  /// basic block inside.  This depends on there being a 'dot' and 'gv' program
+  /// in your path.
+  ///
+  void viewCFG() const;
+
+  /// viewCFGOnly - This function is meant for use from the debugger.  It works
+  /// just like viewCFG, but it does not include the contents of basic blocks
+  /// into the nodes, just the label.  If you are only interested in the CFG
+  /// this can make the graph smaller.
+  ///
+  void viewCFGOnly() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::FunctionVal;
+  }
+
+  /// dropAllReferences() - This method causes all the subinstructions to "let
+  /// go" of all references that they are maintaining.  This allows one to
+  /// 'delete' a whole module at a time, even though there may be circular
+  /// references... first all references are dropped, and all use counts go to
+  /// zero.  Then everything is deleted for real.  Note that no operations are
+  /// valid on an object that has "dropped all references", except operator
+  /// delete.
+  ///
+  /// Since no other object in the module can have references into the body of a
+  /// function, dropping all references deletes the entire body of the function,
+  /// including any contained basic blocks.
+  ///
+  void dropAllReferences();
+
+  /// hasAddressTaken - returns true if there are any uses of this function
+  /// other than direct calls or invokes to it, or blockaddress expressions.
+  /// Optionally passes back an offending user for diagnostic purposes.
+  ///
+  bool hasAddressTaken(const User** = nullptr) const;
+
+  /// isDefTriviallyDead - Return true if it is trivially safe to remove
+  /// this function definition from the module (because it isn't externally
+  /// visible, does not have its address taken, and has no callers).  To make
+  /// this more accurate, call removeDeadConstantUsers first.
+  bool isDefTriviallyDead() const;
+
+  /// callsFunctionThatReturnsTwice - Return true if the function has a call to
+  /// setjmp or other function that gcc recognizes as "returning twice".
+  bool callsFunctionThatReturnsTwice() const;
+
+  /// \brief Set the attached subprogram.
+  ///
+  /// Calls \a setMetadata() with \a LLVMContext::MD_dbg.
+  void setSubprogram(DISubprogram *SP);
+
+  /// \brief Get the attached subprogram.
+  ///
+  /// Calls \a getMetadata() with \a LLVMContext::MD_dbg and casts the result
+  /// to \a DISubprogram.
+  DISubprogram *getSubprogram() const;
+
+  /// Returns true if we should emit debug info for profiling.
+  bool isDebugInfoForProfiling() const;
+
+private:
+  void allocHungoffUselist();
+  template<int Idx> void setHungoffOperand(Constant *C);
+
+  /// Shadow Value::setValueSubclassData with a private forwarding method so
+  /// that subclasses cannot accidentally use it.
+  void setValueSubclassData(unsigned short D) {
+    Value::setValueSubclassData(D);
+  }
+  void setValueSubclassDataBit(unsigned Bit, bool On);
+};
+
+template <>
+struct OperandTraits<Function> : public HungoffOperandTraits<3> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(Function, Value)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_FUNCTION_H
diff --git a/linux-x64/clang/include/llvm/IR/GVMaterializer.h b/linux-x64/clang/include/llvm/IR/GVMaterializer.h
new file mode 100644
index 0000000..675abeb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GVMaterializer.h
@@ -0,0 +1,52 @@
+//===- GVMaterializer.h - Interface for GV materializers --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an abstract interface for loading a module from some
+// place.  This interface allows incremental or random access loading of
+// functions from the file.  This is useful for applications like JIT compilers
+// or interprocedural optimizers that do not need the entire program in memory
+// at the same time.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GVMATERIALIZER_H
+#define LLVM_IR_GVMATERIALIZER_H
+
+#include <vector>
+
+namespace llvm {
+
+class Error;
+class GlobalValue;
+class StructType;
+
+class GVMaterializer {
+protected:
+  GVMaterializer() = default;
+
+public:
+  virtual ~GVMaterializer();
+
+  /// Make sure the given GlobalValue is fully read.
+  ///
+  virtual Error materialize(GlobalValue *GV) = 0;
+
+  /// Make sure the entire Module has been completely read.
+  ///
+  virtual Error materializeModule() = 0;
+
+  virtual Error materializeMetadata() = 0;
+  virtual void setStripDebugInfo() = 0;
+
+  virtual std::vector<StructType *> getIdentifiedStructTypes() const = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GVMATERIALIZER_H
diff --git a/linux-x64/clang/include/llvm/IR/GetElementPtrTypeIterator.h b/linux-x64/clang/include/llvm/IR/GetElementPtrTypeIterator.h
new file mode 100644
index 0000000..3c143ea
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -0,0 +1,164 @@
+//===- GetElementPtrTypeIterator.h ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an iterator for walking through the types indexed by
+// getelementptr instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/User.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+
+namespace llvm {
+
+  template<typename ItTy = User::const_op_iterator>
+  class generic_gep_type_iterator
+    : public std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t> {
+    using super = std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t>;
+
+    ItTy OpIt;
+    PointerUnion<StructType *, Type *> CurTy;
+    enum : uint64_t { Unbounded = -1ull };
+    uint64_t NumElements = Unbounded;
+
+    generic_gep_type_iterator() = default;
+
+  public:
+    static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
+      generic_gep_type_iterator I;
+      I.CurTy = Ty;
+      I.OpIt = It;
+      return I;
+    }
+
+    static generic_gep_type_iterator end(ItTy It) {
+      generic_gep_type_iterator I;
+      I.OpIt = It;
+      return I;
+    }
+
+    bool operator==(const generic_gep_type_iterator& x) const {
+      return OpIt == x.OpIt;
+    }
+
+    bool operator!=(const generic_gep_type_iterator& x) const {
+      return !operator==(x);
+    }
+
+    // FIXME: Make this the iterator's operator*() after the 4.0 release.
+    // operator*() had a different meaning in earlier releases, so we're
+    // temporarily not giving this iterator an operator*() to avoid a subtle
+    // semantics break.
+    Type *getIndexedType() const {
+      if (auto *T = CurTy.dyn_cast<Type *>())
+        return T;
+      return CurTy.get<StructType *>()->getTypeAtIndex(getOperand());
+    }
+
+    Value *getOperand() const { return const_cast<Value *>(&**OpIt); }
+
+    generic_gep_type_iterator& operator++() {   // Preincrement
+      Type *Ty = getIndexedType();
+      if (auto *STy = dyn_cast<SequentialType>(Ty)) {
+        CurTy = STy->getElementType();
+        NumElements = STy->getNumElements();
+      } else
+        CurTy = dyn_cast<StructType>(Ty);
+      ++OpIt;
+      return *this;
+    }
+
+    generic_gep_type_iterator operator++(int) { // Postincrement
+      generic_gep_type_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    // All of the below API is for querying properties of the "outer type", i.e.
+    // the type that contains the indexed type. Most of the time this is just
+    // the type that was visited immediately prior to the indexed type, but for
+    // the first element this is an unbounded array of the GEP's source element
+    // type, for which there is no clearly corresponding IR type (we've
+    // historically used a pointer type as the outer type in this case, but
+    // pointers will soon lose their element type).
+    //
+    // FIXME: Most current users of this class are just interested in byte
+    // offsets (a few need to know whether the outer type is a struct because
+    // they are trying to replace a constant with a variable, which is only
+    // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
+    // we should provide a more minimal API here that exposes not much more than
+    // that.
+
+    bool isStruct() const { return CurTy.is<StructType *>(); }
+    bool isSequential() const { return CurTy.is<Type *>(); }
+
+    StructType *getStructType() const { return CurTy.get<StructType *>(); }
+
+    StructType *getStructTypeOrNull() const {
+      return CurTy.dyn_cast<StructType *>();
+    }
+
+    bool isBoundedSequential() const {
+      return isSequential() && NumElements != Unbounded;
+    }
+
+    uint64_t getSequentialNumElements() const {
+      assert(isBoundedSequential());
+      return NumElements;
+    }
+  };
+
+  using gep_type_iterator = generic_gep_type_iterator<>;
+
+  inline gep_type_iterator gep_type_begin(const User *GEP) {
+    auto *GEPOp = cast<GEPOperator>(GEP);
+    return gep_type_iterator::begin(
+        GEPOp->getSourceElementType(),
+        GEP->op_begin() + 1);
+  }
+
+  inline gep_type_iterator gep_type_end(const User *GEP) {
+    return gep_type_iterator::end(GEP->op_end());
+  }
+
+  inline gep_type_iterator gep_type_begin(const User &GEP) {
+    auto &GEPOp = cast<GEPOperator>(GEP);
+    return gep_type_iterator::begin(
+        GEPOp.getSourceElementType(),
+        GEP.op_begin() + 1);
+  }
+
+  inline gep_type_iterator gep_type_end(const User &GEP) {
+    return gep_type_iterator::end(GEP.op_end());
+  }
+
+  template<typename T>
+  inline generic_gep_type_iterator<const T *>
+  gep_type_begin(Type *Op0, ArrayRef<T> A) {
+    return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
+  }
+
+  template<typename T>
+  inline generic_gep_type_iterator<const T *>
+  gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
+    return generic_gep_type_iterator<const T *>::end(A.end());
+  }
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
diff --git a/linux-x64/clang/include/llvm/IR/GlobalAlias.h b/linux-x64/clang/include/llvm/IR/GlobalAlias.h
new file mode 100644
index 0000000..450583b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GlobalAlias.h
@@ -0,0 +1,98 @@
+//===-------- llvm/GlobalAlias.h - GlobalAlias class ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the GlobalAlias class, which
+// represents a single function or variable alias in the IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALALIAS_H
+#define LLVM_IR_GLOBALALIAS_H
+
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/Value.h"
+
+namespace llvm {
+
+class Twine;
+class Module;
+template <typename ValueSubClass> class SymbolTableListTraits;
+
+class GlobalAlias : public GlobalIndirectSymbol,
+                    public ilist_node<GlobalAlias> {
+  friend class SymbolTableListTraits<GlobalAlias>;
+
+  GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
+              const Twine &Name, Constant *Aliasee, Module *Parent);
+
+public:
+  GlobalAlias(const GlobalAlias &) = delete;
+  GlobalAlias &operator=(const GlobalAlias &) = delete;
+
+  /// If a parent module is specified, the alias is automatically inserted into
+  /// the end of the specified module's alias list.
+  static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+                             LinkageTypes Linkage, const Twine &Name,
+                             Constant *Aliasee, Module *Parent);
+
+  // Without the Aliasee.
+  static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+                             LinkageTypes Linkage, const Twine &Name,
+                             Module *Parent);
+
+  // The module is taken from the Aliasee.
+  static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+                             LinkageTypes Linkage, const Twine &Name,
+                             GlobalValue *Aliasee);
+
+  // Type, Parent and AddressSpace taken from the Aliasee.
+  static GlobalAlias *create(LinkageTypes Linkage, const Twine &Name,
+                             GlobalValue *Aliasee);
+
+  // Linkage, Type, Parent and AddressSpace taken from the Aliasee.
+  static GlobalAlias *create(const Twine &Name, GlobalValue *Aliasee);
+
+  void copyAttributesFrom(const GlobalValue *Src) {
+    GlobalValue::copyAttributesFrom(Src);
+  }
+
+  /// removeFromParent - This method unlinks 'this' from the containing module,
+  /// but does not delete it.
+  ///
+  void removeFromParent();
+
+  /// eraseFromParent - This method unlinks 'this' from the containing module
+  /// and deletes it.
+  ///
+  void eraseFromParent();
+
+  /// These methods retrieve and set alias target.
+  void setAliasee(Constant *Aliasee);
+  const Constant *getAliasee() const {
+    return getIndirectSymbol();
+  }
+  Constant *getAliasee() {
+    return getIndirectSymbol();
+  }
+
+  static bool isValidLinkage(LinkageTypes L) {
+    return isExternalLinkage(L) || isLocalLinkage(L) ||
+      isWeakLinkage(L) || isLinkOnceLinkage(L);
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::GlobalAliasVal;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GLOBALALIAS_H
diff --git a/linux-x64/clang/include/llvm/IR/GlobalIFunc.h b/linux-x64/clang/include/llvm/IR/GlobalIFunc.h
new file mode 100644
index 0000000..ef51315
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GlobalIFunc.h
@@ -0,0 +1,80 @@
+//===-------- llvm/GlobalIFunc.h - GlobalIFunc class ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \brief
+/// This file contains the declaration of the GlobalIFunc class, which
+/// represents a single indirect function in the IR. Indirect function uses
+/// ELF symbol type extension to mark that the address of a declaration should
+/// be resolved at runtime by calling a resolver function.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALIFUNC_H
+#define LLVM_IR_GLOBALIFUNC_H
+
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/Value.h"
+
+namespace llvm {
+
+class Twine;
+class Module;
+
+// Traits class for using GlobalIFunc in symbol table in Module.
+template <typename ValueSubClass> class SymbolTableListTraits;
+
+class GlobalIFunc final : public GlobalIndirectSymbol,
+                          public ilist_node<GlobalIFunc> {
+  friend class SymbolTableListTraits<GlobalIFunc>;
+
+  GlobalIFunc(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
+              const Twine &Name, Constant *Resolver, Module *Parent);
+
+public:
+  GlobalIFunc(const GlobalIFunc &) = delete;
+  GlobalIFunc &operator=(const GlobalIFunc &) = delete;
+
+  /// If a parent module is specified, the ifunc is automatically inserted into
+  /// the end of the specified module's ifunc list.
+  static GlobalIFunc *create(Type *Ty, unsigned AddressSpace,
+                             LinkageTypes Linkage, const Twine &Name,
+                             Constant *Resolver, Module *Parent);
+
+  void copyAttributesFrom(const GlobalIFunc *Src) {
+    GlobalValue::copyAttributesFrom(Src);
+  }
+
+  /// This method unlinks 'this' from the containing module, but does not
+  /// delete it.
+  void removeFromParent();
+
+  /// This method unlinks 'this' from the containing module and deletes it.
+  void eraseFromParent();
+
+  /// These methods retrieve and set ifunc resolver function.
+  void setResolver(Constant *Resolver) {
+    setIndirectSymbol(Resolver);
+  }
+  const Constant *getResolver() const {
+    return getIndirectSymbol();
+  }
+  Constant *getResolver() {
+    return getIndirectSymbol();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::GlobalIFuncVal;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GLOBALIFUNC_H
diff --git a/linux-x64/clang/include/llvm/IR/GlobalIndirectSymbol.h b/linux-x64/clang/include/llvm/IR/GlobalIndirectSymbol.h
new file mode 100644
index 0000000..22c0068
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GlobalIndirectSymbol.h
@@ -0,0 +1,93 @@
+//===- llvm/GlobalIndirectSymbol.h - GlobalIndirectSymbol class -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the GlobalIndirectSymbol class, which
+// is a base class for GlobalAlias and GlobalIFunc. It contains all common code
+// for aliases and ifuncs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALINDIRECTSYMBOL_H
+#define LLVM_IR_GLOBALINDIRECTSYMBOL_H
+
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cstddef>
+
+namespace llvm {
+
+class GlobalIndirectSymbol : public GlobalValue {
+protected:
+  GlobalIndirectSymbol(Type *Ty, ValueTy VTy, unsigned AddressSpace,
+      LinkageTypes Linkage, const Twine &Name, Constant *Symbol);
+
+public:
+  GlobalIndirectSymbol(const GlobalIndirectSymbol &) = delete;
+  GlobalIndirectSymbol &operator=(const GlobalIndirectSymbol &) = delete;
+
+  // allocate space for exactly one operand
+  void *operator new(size_t s) {
+    return User::operator new(s, 1);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+  /// These methods set and retrieve indirect symbol.
+  void setIndirectSymbol(Constant *Symbol) {
+    setOperand(0, Symbol);
+  }
+  const Constant *getIndirectSymbol() const {
+    return getOperand(0);
+  }
+  Constant *getIndirectSymbol() {
+    return const_cast<Constant *>(
+          static_cast<const GlobalIndirectSymbol *>(this)->getIndirectSymbol());
+  }
+
+  const GlobalObject *getBaseObject() const {
+    return dyn_cast<GlobalObject>(getIndirectSymbol()->stripInBoundsOffsets());
+  }
+  GlobalObject *getBaseObject() {
+    return const_cast<GlobalObject *>(
+              static_cast<const GlobalIndirectSymbol *>(this)->getBaseObject());
+  }
+
+  const GlobalObject *getBaseObject(const DataLayout &DL, APInt &Offset) const {
+    return dyn_cast<GlobalObject>(
+        getIndirectSymbol()->stripAndAccumulateInBoundsConstantOffsets(DL,
+                                                                       Offset));
+  }
+  GlobalObject *getBaseObject(const DataLayout &DL, APInt &Offset) {
+    return const_cast<GlobalObject *>(
+                                 static_cast<const GlobalIndirectSymbol *>(this)
+                                   ->getBaseObject(DL, Offset));
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::GlobalAliasVal ||
+           V->getValueID() == Value::GlobalIFuncVal;
+  }
+};
+
+template <>
+struct OperandTraits<GlobalIndirectSymbol> :
+  public FixedNumOperandTraits<GlobalIndirectSymbol, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalIndirectSymbol, Constant)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GLOBALINDIRECTSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/IR/GlobalObject.h b/linux-x64/clang/include/llvm/IR/GlobalObject.h
new file mode 100644
index 0000000..278b193
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GlobalObject.h
@@ -0,0 +1,184 @@
+//===-- llvm/GlobalObject.h - Class to represent global objects -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This represents an independent object. That is, a function or a global
+// variable, but not an alias.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALOBJECT_H
+#define LLVM_IR_GLOBALOBJECT_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Value.h"
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class Comdat;
+class MDNode;
+class Metadata;
+
+class GlobalObject : public GlobalValue {
+protected:
+  GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
+               LinkageTypes Linkage, const Twine &Name,
+               unsigned AddressSpace = 0)
+      : GlobalValue(Ty, VTy, Ops, NumOps, Linkage, Name, AddressSpace),
+        ObjComdat(nullptr) {
+    setGlobalValueSubClassData(0);
+  }
+
+  Comdat *ObjComdat;
+  enum {
+    LastAlignmentBit = 4,
+    HasMetadataHashEntryBit,
+    HasSectionHashEntryBit,
+
+    GlobalObjectBits,
+  };
+  static const unsigned GlobalObjectSubClassDataBits =
+      GlobalValueSubClassDataBits - GlobalObjectBits;
+
+private:
+  static const unsigned AlignmentBits = LastAlignmentBit + 1;
+  static const unsigned AlignmentMask = (1 << AlignmentBits) - 1;
+  static const unsigned GlobalObjectMask = (1 << GlobalObjectBits) - 1;
+
+public:
+  GlobalObject(const GlobalObject &) = delete;
+
+  unsigned getAlignment() const {
+    unsigned Data = getGlobalValueSubClassData();
+    unsigned AlignmentData = Data & AlignmentMask;
+    return (1u << AlignmentData) >> 1;
+  }
+  void setAlignment(unsigned Align);
+
+  unsigned getGlobalObjectSubClassData() const {
+    unsigned ValueData = getGlobalValueSubClassData();
+    return ValueData >> GlobalObjectBits;
+  }
+
+  void setGlobalObjectSubClassData(unsigned Val) {
+    unsigned OldData = getGlobalValueSubClassData();
+    setGlobalValueSubClassData((OldData & GlobalObjectMask) |
+                               (Val << GlobalObjectBits));
+    assert(getGlobalObjectSubClassData() == Val && "representation error");
+  }
+
+  /// Check if this global has a custom object file section.
+  ///
+  /// This is more efficient than calling getSection() and checking for an empty
+  /// string.
+  bool hasSection() const {
+    return getGlobalValueSubClassData() & (1 << HasSectionHashEntryBit);
+  }
+
+  /// Get the custom section of this global if it has one.
+  ///
+  /// If this global does not have a custom section, this will be empty and the
+  /// default object file section (.text, .data, etc) will be used.
+  StringRef getSection() const {
+    return hasSection() ? getSectionImpl() : StringRef();
+  }
+
+  /// Change the section for this global.
+  ///
+  /// Setting the section to the empty string tells LLVM to choose an
+  /// appropriate default object file section.
+  void setSection(StringRef S);
+
+  bool hasComdat() const { return getComdat() != nullptr; }
+  const Comdat *getComdat() const { return ObjComdat; }
+  Comdat *getComdat() { return ObjComdat; }
+  void setComdat(Comdat *C) { ObjComdat = C; }
+
+  /// Check if this has any metadata.
+  bool hasMetadata() const { return hasMetadataHashEntry(); }
+
+  /// Get the current metadata attachments for the given kind, if any.
+  ///
+  /// These functions require that the function have at most a single attachment
+  /// of the given kind, and return \c nullptr if such an attachment is missing.
+  /// @{
+  MDNode *getMetadata(unsigned KindID) const;
+  MDNode *getMetadata(StringRef Kind) const;
+  /// @}
+
+  /// Appends all attachments with the given ID to \c MDs in insertion order.
+  /// If the global has no attachments with the given ID, or if ID is invalid,
+  /// leaves MDs unchanged.
+  /// @{
+  void getMetadata(unsigned KindID, SmallVectorImpl<MDNode *> &MDs) const;
+  void getMetadata(StringRef Kind, SmallVectorImpl<MDNode *> &MDs) const;
+  /// @}
+
+  /// Set a particular kind of metadata attachment.
+  ///
+  /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or
+  /// replacing it if it already exists.
+  /// @{
+  void setMetadata(unsigned KindID, MDNode *MD);
+  void setMetadata(StringRef Kind, MDNode *MD);
+  /// @}
+
+  /// Add a metadata attachment.
+  /// @{
+  void addMetadata(unsigned KindID, MDNode &MD);
+  void addMetadata(StringRef Kind, MDNode &MD);
+  /// @}
+
+  /// Appends all attachments for the global to \c MDs, sorting by attachment
+  /// ID. Attachments with the same ID appear in insertion order.
+  void
+  getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const;
+
+  /// Erase all metadata attachments with the given kind.
+  void eraseMetadata(unsigned KindID);
+
+  /// Copy metadata from Src, adjusting offsets by Offset.
+  void copyMetadata(const GlobalObject *Src, unsigned Offset);
+
+  void addTypeMetadata(unsigned Offset, Metadata *TypeID);
+
+protected:
+  void copyAttributesFrom(const GlobalObject *Src);
+
+public:
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::FunctionVal ||
+           V->getValueID() == Value::GlobalVariableVal;
+  }
+
+  void clearMetadata();
+
+private:
+  void setGlobalObjectFlag(unsigned Bit, bool Val) {
+    unsigned Mask = 1 << Bit;
+    setGlobalValueSubClassData((~Mask & getGlobalValueSubClassData()) |
+                               (Val ? Mask : 0u));
+  }
+
+  bool hasMetadataHashEntry() const {
+    return getGlobalValueSubClassData() & (1 << HasMetadataHashEntryBit);
+  }
+  void setHasMetadataHashEntry(bool HasEntry) {
+    setGlobalObjectFlag(HasMetadataHashEntryBit, HasEntry);
+  }
+
+  StringRef getSectionImpl() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GLOBALOBJECT_H
diff --git a/linux-x64/clang/include/llvm/IR/GlobalValue.h b/linux-x64/clang/include/llvm/IR/GlobalValue.h
new file mode 100644
index 0000000..35b0b69
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GlobalValue.h
@@ -0,0 +1,586 @@
+//===-- llvm/GlobalValue.h - Class to represent a global value --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a common base class of all globally definable objects.  As such,
+// it is subclassed by GlobalVariable, GlobalAlias and by Function.  This is
+// used because you can do certain things with these global objects that you
+// can't do to anything else.  For example, use the address of one as a
+// constant.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALVALUE_H
+#define LLVM_IR_GLOBALVALUE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MD5.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+class Comdat;
+class ConstantRange;
+class Error;
+class GlobalObject;
+class Module;
+
+namespace Intrinsic {
+  enum ID : unsigned;
+} // end namespace Intrinsic
+
+class GlobalValue : public Constant {
+public:
+  /// @brief An enumeration for the kinds of linkage for global values.
+  enum LinkageTypes {
+    ExternalLinkage = 0,///< Externally visible function
+    AvailableExternallyLinkage, ///< Available for inspection, not emission.
+    LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline)
+    LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent.
+    WeakAnyLinkage,     ///< Keep one copy of named function when linking (weak)
+    WeakODRLinkage,     ///< Same, but only replaced by something equivalent.
+    AppendingLinkage,   ///< Special purpose, only applies to global arrays
+    InternalLinkage,    ///< Rename collisions when linking (static functions).
+    PrivateLinkage,     ///< Like Internal, but omit from symbol table.
+    ExternalWeakLinkage,///< ExternalWeak linkage description.
+    CommonLinkage       ///< Tentative definitions.
+  };
+
+  /// @brief An enumeration for the kinds of visibility of global values.
+  enum VisibilityTypes {
+    DefaultVisibility = 0,  ///< The GV is visible
+    HiddenVisibility,       ///< The GV is hidden
+    ProtectedVisibility     ///< The GV is protected
+  };
+
+  /// @brief Storage classes of global values for PE targets.
+  enum DLLStorageClassTypes {
+    DefaultStorageClass   = 0,
+    DLLImportStorageClass = 1, ///< Function to be imported from DLL
+    DLLExportStorageClass = 2  ///< Function to be accessible from DLL.
+  };
+
+protected:
+  GlobalValue(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
+              LinkageTypes Linkage, const Twine &Name, unsigned AddressSpace)
+      : Constant(PointerType::get(Ty, AddressSpace), VTy, Ops, NumOps),
+        ValueType(Ty), Visibility(DefaultVisibility),
+        UnnamedAddrVal(unsigned(UnnamedAddr::None)),
+        DllStorageClass(DefaultStorageClass), ThreadLocal(NotThreadLocal),
+        HasLLVMReservedName(false), IsDSOLocal(false), IntID((Intrinsic::ID)0U),
+        Parent(nullptr) {
+    setLinkage(Linkage);
+    setName(Name);
+  }
+
+  Type *ValueType;
+
+  static const unsigned GlobalValueSubClassDataBits = 17;
+
+  // All bitfields use unsigned as the underlying type so that MSVC will pack
+  // them.
+  unsigned Linkage : 4;       // The linkage of this global
+  unsigned Visibility : 2;    // The visibility style of this global
+  unsigned UnnamedAddrVal : 2; // This value's address is not significant
+  unsigned DllStorageClass : 2; // DLL storage class
+
+  unsigned ThreadLocal : 3; // Is this symbol "Thread Local", if so, what is
+                            // the desired model?
+
+  /// True if the function's name starts with "llvm.".  This corresponds to the
+  /// value of Function::isIntrinsic(), which may be true even if
+  /// Function::intrinsicID() returns Intrinsic::not_intrinsic.
+  unsigned HasLLVMReservedName : 1;
+
+  /// If true then there is a definition within the same linkage unit and that
+  /// definition cannot be runtime preempted.
+  unsigned IsDSOLocal : 1;
+
+private:
+  friend class Constant;
+
+  void maybeSetDsoLocal() {
+    if (hasLocalLinkage() ||
+        (!hasDefaultVisibility() && !hasExternalWeakLinkage()))
+      setDSOLocal(true);
+  }
+
+  // Give subclasses access to what otherwise would be wasted padding.
+  // (17 + 4 + 2 + 2 + 2 + 3 + 1 + 1) == 32.
+  unsigned SubClassData : GlobalValueSubClassDataBits;
+
+  void destroyConstantImpl();
+  Value *handleOperandChangeImpl(Value *From, Value *To);
+
+  /// Returns true if the definition of this global may be replaced by a
+  /// differently optimized variant of the same source level function at link
+  /// time.
+  bool mayBeDerefined() const {
+    switch (getLinkage()) {
+    case WeakODRLinkage:
+    case LinkOnceODRLinkage:
+    case AvailableExternallyLinkage:
+      return true;
+
+    case WeakAnyLinkage:
+    case LinkOnceAnyLinkage:
+    case CommonLinkage:
+    case ExternalWeakLinkage:
+    case ExternalLinkage:
+    case AppendingLinkage:
+    case InternalLinkage:
+    case PrivateLinkage:
+      return isInterposable();
+    }
+
+    llvm_unreachable("Fully covered switch above!");
+  }
+
+protected:
+  /// \brief The intrinsic ID for this subclass (which must be a Function).
+  ///
+  /// This member is defined by this class, but not used for anything.
+  /// Subclasses can use it to store their intrinsic ID, if they have one.
+  ///
+  /// This is stored here to save space in Function on 64-bit hosts.
+  Intrinsic::ID IntID;
+
+  unsigned getGlobalValueSubClassData() const {
+    return SubClassData;
+  }
+  void setGlobalValueSubClassData(unsigned V) {
+    assert(V < (1 << GlobalValueSubClassDataBits) && "It will not fit");
+    SubClassData = V;
+  }
+
+  Module *Parent;             // The containing module.
+
+  // Used by SymbolTableListTraits.
+  void setParent(Module *parent) {
+    Parent = parent;
+  }
+
+  ~GlobalValue() {
+    removeDeadConstantUsers();   // remove any dead constants using this.
+  }
+
+public:
+  enum ThreadLocalMode {
+    NotThreadLocal = 0,
+    GeneralDynamicTLSModel,
+    LocalDynamicTLSModel,
+    InitialExecTLSModel,
+    LocalExecTLSModel
+  };
+
+  GlobalValue(const GlobalValue &) = delete;
+
+  unsigned getAlignment() const;
+
+  enum class UnnamedAddr {
+    None,
+    Local,
+    Global,
+  };
+
+  bool hasGlobalUnnamedAddr() const {
+    return getUnnamedAddr() == UnnamedAddr::Global;
+  }
+
+  /// Returns true if this value's address is not significant in this module.
+  /// This attribute is intended to be used only by the code generator and LTO
+  /// to allow the linker to decide whether the global needs to be in the symbol
+  /// table. It should probably not be used in optimizations, as the value may
+  /// have uses outside the module; use hasGlobalUnnamedAddr() instead.
+  bool hasAtLeastLocalUnnamedAddr() const {
+    return getUnnamedAddr() != UnnamedAddr::None;
+  }
+
+  UnnamedAddr getUnnamedAddr() const {
+    return UnnamedAddr(UnnamedAddrVal);
+  }
+  void setUnnamedAddr(UnnamedAddr Val) { UnnamedAddrVal = unsigned(Val); }
+
+  static UnnamedAddr getMinUnnamedAddr(UnnamedAddr A, UnnamedAddr B) {
+    if (A == UnnamedAddr::None || B == UnnamedAddr::None)
+      return UnnamedAddr::None;
+    if (A == UnnamedAddr::Local || B == UnnamedAddr::Local)
+      return UnnamedAddr::Local;
+    return UnnamedAddr::Global;
+  }
+
+  bool hasComdat() const { return getComdat() != nullptr; }
+  const Comdat *getComdat() const;
+  Comdat *getComdat() {
+    return const_cast<Comdat *>(
+                           static_cast<const GlobalValue *>(this)->getComdat());
+  }
+
+  VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); }
+  bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; }
+  bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; }
+  bool hasProtectedVisibility() const {
+    return Visibility == ProtectedVisibility;
+  }
+  void setVisibility(VisibilityTypes V) {
+    assert((!hasLocalLinkage() || V == DefaultVisibility) &&
+           "local linkage requires default visibility");
+    Visibility = V;
+    maybeSetDsoLocal();
+  }
+
+  /// If the value is "Thread Local", its value isn't shared by the threads.
+  bool isThreadLocal() const { return getThreadLocalMode() != NotThreadLocal; }
+  void setThreadLocal(bool Val) {
+    setThreadLocalMode(Val ? GeneralDynamicTLSModel : NotThreadLocal);
+  }
+  void setThreadLocalMode(ThreadLocalMode Val) {
+    assert(Val == NotThreadLocal || getValueID() != Value::FunctionVal);
+    ThreadLocal = Val;
+  }
+  ThreadLocalMode getThreadLocalMode() const {
+    return static_cast<ThreadLocalMode>(ThreadLocal);
+  }
+
+  DLLStorageClassTypes getDLLStorageClass() const {
+    return DLLStorageClassTypes(DllStorageClass);
+  }
+  bool hasDLLImportStorageClass() const {
+    return DllStorageClass == DLLImportStorageClass;
+  }
+  bool hasDLLExportStorageClass() const {
+    return DllStorageClass == DLLExportStorageClass;
+  }
+  void setDLLStorageClass(DLLStorageClassTypes C) { DllStorageClass = C; }
+
+  bool hasSection() const { return !getSection().empty(); }
+  StringRef getSection() const;
+
+  /// Global values are always pointers.
+  PointerType *getType() const { return cast<PointerType>(User::getType()); }
+
+  Type *getValueType() const { return ValueType; }
+
+  void setDSOLocal(bool Local) { IsDSOLocal = Local; }
+
+  bool isDSOLocal() const {
+    return IsDSOLocal;
+  }
+
+  static LinkageTypes getLinkOnceLinkage(bool ODR) {
+    return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage;
+  }
+  static LinkageTypes getWeakLinkage(bool ODR) {
+    return ODR ? WeakODRLinkage : WeakAnyLinkage;
+  }
+
+  static bool isExternalLinkage(LinkageTypes Linkage) {
+    return Linkage == ExternalLinkage;
+  }
+  static bool isAvailableExternallyLinkage(LinkageTypes Linkage) {
+    return Linkage == AvailableExternallyLinkage;
+  }
+  static bool isLinkOnceODRLinkage(LinkageTypes Linkage) {
+    return Linkage == LinkOnceODRLinkage;
+  }
+  static bool isLinkOnceLinkage(LinkageTypes Linkage) {
+    return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage;
+  }
+  static bool isWeakAnyLinkage(LinkageTypes Linkage) {
+    return Linkage == WeakAnyLinkage;
+  }
+  static bool isWeakODRLinkage(LinkageTypes Linkage) {
+    return Linkage == WeakODRLinkage;
+  }
+  static bool isWeakLinkage(LinkageTypes Linkage) {
+    return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage);
+  }
+  static bool isAppendingLinkage(LinkageTypes Linkage) {
+    return Linkage == AppendingLinkage;
+  }
+  static bool isInternalLinkage(LinkageTypes Linkage) {
+    return Linkage == InternalLinkage;
+  }
+  static bool isPrivateLinkage(LinkageTypes Linkage) {
+    return Linkage == PrivateLinkage;
+  }
+  static bool isLocalLinkage(LinkageTypes Linkage) {
+    return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage);
+  }
+  static bool isExternalWeakLinkage(LinkageTypes Linkage) {
+    return Linkage == ExternalWeakLinkage;
+  }
+  static bool isCommonLinkage(LinkageTypes Linkage) {
+    return Linkage == CommonLinkage;
+  }
+  static bool isValidDeclarationLinkage(LinkageTypes Linkage) {
+    return isExternalWeakLinkage(Linkage) || isExternalLinkage(Linkage);
+  }
+
+  /// Whether the definition of this global may be replaced by something
+  /// non-equivalent at link time. For example, if a function has weak linkage
+  /// then the code defining it may be replaced by different code.
+  static bool isInterposableLinkage(LinkageTypes Linkage) {
+    switch (Linkage) {
+    case WeakAnyLinkage:
+    case LinkOnceAnyLinkage:
+    case CommonLinkage:
+    case ExternalWeakLinkage:
+      return true;
+
+    case AvailableExternallyLinkage:
+    case LinkOnceODRLinkage:
+    case WeakODRLinkage:
+    // The above three cannot be overridden but can be de-refined.
+
+    case ExternalLinkage:
+    case AppendingLinkage:
+    case InternalLinkage:
+    case PrivateLinkage:
+      return false;
+    }
+    llvm_unreachable("Fully covered switch above!");
+  }
+
+  /// Whether the definition of this global may be discarded if it is not used
+  /// in its compilation unit.
+  static bool isDiscardableIfUnused(LinkageTypes Linkage) {
+    return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage) ||
+           isAvailableExternallyLinkage(Linkage);
+  }
+
+  /// Whether the definition of this global may be replaced at link time.  NB:
+  /// Using this method outside of the code generators is almost always a
+  /// mistake: when working at the IR level use isInterposable instead as it
+  /// knows about ODR semantics.
+  static bool isWeakForLinker(LinkageTypes Linkage)  {
+    return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage ||
+           Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage ||
+           Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
+  }
+
+  /// Return true if the currently visible definition of this global (if any) is
+  /// exactly the definition we will see at runtime.
+  ///
+  /// Non-exact linkage types inhibits most non-inlining IPO, since a
+  /// differently optimized variant of the same function can have different
+  /// observable or undefined behavior than in the variant currently visible.
+  /// For instance, we could have started with
+  ///
+  ///   void foo(int *v) {
+  ///     int t = 5 / v[0];
+  ///     (void) t;
+  ///   }
+  ///
+  /// and "refined" it to
+  ///
+  ///   void foo(int *v) { }
+  ///
+  /// However, we cannot infer readnone for `foo`, since that would justify
+  /// DSE'ing a store to `v[0]` across a call to `foo`, which can cause
+  /// undefined behavior if the linker replaces the actual call destination with
+  /// the unoptimized `foo`.
+  ///
+  /// Inlining is okay across non-exact linkage types as long as they're not
+  /// interposable (see \c isInterposable), since in such cases the currently
+  /// visible variant is *a* correct implementation of the original source
+  /// function; it just isn't the *only* correct implementation.
+  bool isDefinitionExact() const {
+    return !mayBeDerefined();
+  }
+
+  /// Return true if this global has an exact defintion.
+  bool hasExactDefinition() const {
+    // While this computes exactly the same thing as
+    // isStrongDefinitionForLinker, the intended uses are different.  This
+    // function is intended to help decide if specific inter-procedural
+    // transforms are correct, while isStrongDefinitionForLinker's intended use
+    // is in low level code generation.
+    return !isDeclaration() && isDefinitionExact();
+  }
+
+  /// Return true if this global's definition can be substituted with an
+  /// *arbitrary* definition at link time.  We cannot do any IPO or inlinining
+  /// across interposable call edges, since the callee can be replaced with
+  /// something arbitrary at link time.
+  bool isInterposable() const { return isInterposableLinkage(getLinkage()); }
+
+  bool hasExternalLinkage() const { return isExternalLinkage(getLinkage()); }
+  bool hasAvailableExternallyLinkage() const {
+    return isAvailableExternallyLinkage(getLinkage());
+  }
+  bool hasLinkOnceLinkage() const { return isLinkOnceLinkage(getLinkage()); }
+  bool hasLinkOnceODRLinkage() const {
+    return isLinkOnceODRLinkage(getLinkage());
+  }
+  bool hasWeakLinkage() const { return isWeakLinkage(getLinkage()); }
+  bool hasWeakAnyLinkage() const { return isWeakAnyLinkage(getLinkage()); }
+  bool hasWeakODRLinkage() const { return isWeakODRLinkage(getLinkage()); }
+  bool hasAppendingLinkage() const { return isAppendingLinkage(getLinkage()); }
+  bool hasInternalLinkage() const { return isInternalLinkage(getLinkage()); }
+  bool hasPrivateLinkage() const { return isPrivateLinkage(getLinkage()); }
+  bool hasLocalLinkage() const { return isLocalLinkage(getLinkage()); }
+  bool hasExternalWeakLinkage() const {
+    return isExternalWeakLinkage(getLinkage());
+  }
+  bool hasCommonLinkage() const { return isCommonLinkage(getLinkage()); }
+  bool hasValidDeclarationLinkage() const {
+    return isValidDeclarationLinkage(getLinkage());
+  }
+
+  void setLinkage(LinkageTypes LT) {
+    if (isLocalLinkage(LT))
+      Visibility = DefaultVisibility;
+    Linkage = LT;
+    maybeSetDsoLocal();
+  }
+  LinkageTypes getLinkage() const { return LinkageTypes(Linkage); }
+
+  bool isDiscardableIfUnused() const {
+    return isDiscardableIfUnused(getLinkage());
+  }
+
+  bool isWeakForLinker() const { return isWeakForLinker(getLinkage()); }
+
+protected:
+  /// Copy all additional attributes (those not needed to create a GlobalValue)
+  /// from the GlobalValue Src to this one.
+  void copyAttributesFrom(const GlobalValue *Src);
+
+public:
+  /// If the given string begins with the GlobalValue name mangling escape
+  /// character '\1', drop it.
+  ///
+  /// This function applies a specific mangling that is used in PGO profiles,
+  /// among other things. If you're trying to get a symbol name for an
+  /// arbitrary GlobalValue, this is not the function you're looking for; see
+  /// Mangler.h.
+  static StringRef dropLLVMManglingEscape(StringRef Name) {
+    if (!Name.empty() && Name[0] == '\1')
+      return Name.substr(1);
+    return Name;
+  }
+
+  /// Return the modified name for a global value suitable to be
+  /// used as the key for a global lookup (e.g. profile or ThinLTO).
+  /// The value's original name is \c Name and has linkage of type
+  /// \c Linkage. The value is defined in module \c FileName.
+  static std::string getGlobalIdentifier(StringRef Name,
+                                         GlobalValue::LinkageTypes Linkage,
+                                         StringRef FileName);
+
+  /// Return the modified name for this global value suitable to be
+  /// used as the key for a global lookup (e.g. profile or ThinLTO).
+  std::string getGlobalIdentifier() const;
+
+  /// Declare a type to represent a global unique identifier for a global value.
+  /// This is a 64 bits hash that is used by PGO and ThinLTO to have a compact
+  /// unique way to identify a symbol.
+  using GUID = uint64_t;
+
+  /// Return a 64-bit global unique ID constructed from global value name
+  /// (i.e. returned by getGlobalIdentifier()).
+  static GUID getGUID(StringRef GlobalName) { return MD5Hash(GlobalName); }
+
+  /// Return a 64-bit global unique ID constructed from global value name
+  /// (i.e. returned by getGlobalIdentifier()).
+  GUID getGUID() const { return getGUID(getGlobalIdentifier()); }
+
+  /// @name Materialization
+  /// Materialization is used to construct functions only as they're needed.
+  /// This
+  /// is useful to reduce memory usage in LLVM or parsing work done by the
+  /// BitcodeReader to load the Module.
+  /// @{
+
+  /// If this function's Module is being lazily streamed in functions from disk
+  /// or some other source, this method can be used to check to see if the
+  /// function has been read in yet or not.
+  bool isMaterializable() const;
+
+  /// Make sure this GlobalValue is fully read.
+  Error materialize();
+
+/// @}
+
+  /// Return true if the primary definition of this global value is outside of
+  /// the current translation unit.
+  bool isDeclaration() const;
+
+  bool isDeclarationForLinker() const {
+    if (hasAvailableExternallyLinkage())
+      return true;
+
+    return isDeclaration();
+  }
+
+  /// Returns true if this global's definition will be the one chosen by the
+  /// linker.
+  ///
+  /// NB! Ideally this should not be used at the IR level at all.  If you're
+  /// interested in optimization constraints implied by the linker's ability to
+  /// choose an implementation, prefer using \c hasExactDefinition.
+  bool isStrongDefinitionForLinker() const {
+    return !(isDeclarationForLinker() || isWeakForLinker());
+  }
+
+  // Returns true if the alignment of the value can be unilaterally
+  // increased.
+  bool canIncreaseAlignment() const;
+
+  const GlobalObject *getBaseObject() const;
+  GlobalObject *getBaseObject() {
+    return const_cast<GlobalObject *>(
+                       static_cast<const GlobalValue *>(this)->getBaseObject());
+  }
+
+  /// Returns whether this is a reference to an absolute symbol.
+  bool isAbsoluteSymbolRef() const;
+
+  /// If this is an absolute symbol reference, returns the range of the symbol,
+  /// otherwise returns None.
+  Optional<ConstantRange> getAbsoluteSymbolRange() const;
+
+  /// This method unlinks 'this' from the containing module, but does not delete
+  /// it.
+  void removeFromParent();
+
+  /// This method unlinks 'this' from the containing module and deletes it.
+  void eraseFromParent();
+
+  /// Get the module that this global value is contained inside of...
+  Module *getParent() { return Parent; }
+  const Module *getParent() const { return Parent; }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::FunctionVal ||
+           V->getValueID() == Value::GlobalVariableVal ||
+           V->getValueID() == Value::GlobalAliasVal ||
+           V->getValueID() == Value::GlobalIFuncVal;
+  }
+
+  /// True if GV can be left out of the object symbol table. This is the case
+  /// for linkonce_odr values whose address is not significant. While legal, it
+  /// is not normally profitable to omit them from the .o symbol table. Using
+  /// this analysis makes sense when the information can be passed down to the
+  /// linker or we are in LTO.
+  bool canBeOmittedFromSymbolTable() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GLOBALVALUE_H
diff --git a/linux-x64/clang/include/llvm/IR/GlobalVariable.h b/linux-x64/clang/include/llvm/IR/GlobalVariable.h
new file mode 100644
index 0000000..03b9ec4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/GlobalVariable.h
@@ -0,0 +1,265 @@
+//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the GlobalVariable class, which
+// represents a single global variable (or constant) in the VM.
+//
+// Global variables are constant pointers that refer to hunks of space that are
+// allocated by either the VM, or by the linker in a static compiler.  A global
+// variable may have an initial value, which is copied into the executables .data
+// area.  Global Constants are required to have initializers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALVARIABLE_H
+#define LLVM_IR_GLOBALVARIABLE_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Value.h"
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+
+class Constant;
+class Module;
+
+template <typename ValueSubClass> class SymbolTableListTraits;
+class DIGlobalVariable;
+class DIGlobalVariableExpression;
+
+class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
+  friend class SymbolTableListTraits<GlobalVariable>;
+
+  AttributeSet Attrs;
+  bool isConstantGlobal : 1;                   // Is this a global constant?
+  bool isExternallyInitializedConstant : 1;    // Is this a global whose value
+                                               // can change from its initial
+                                               // value before global
+                                               // initializers are run?
+
+public:
+  /// GlobalVariable ctor - If a parent module is specified, the global is
+  /// automatically inserted into the end of the specified modules global list.
+  GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
+                 Constant *Initializer = nullptr, const Twine &Name = "",
+                 ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
+                 bool isExternallyInitialized = false);
+  /// GlobalVariable ctor - This creates a global and inserts it before the
+  /// specified other global.
+  GlobalVariable(Module &M, Type *Ty, bool isConstant,
+                 LinkageTypes Linkage, Constant *Initializer,
+                 const Twine &Name = "", GlobalVariable *InsertBefore = nullptr,
+                 ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
+                 bool isExternallyInitialized = false);
+  GlobalVariable(const GlobalVariable &) = delete;
+  GlobalVariable &operator=(const GlobalVariable &) = delete;
+
+  ~GlobalVariable() {
+    dropAllReferences();
+  }
+
+  // allocate space for exactly one operand
+  void *operator new(size_t s) {
+    return User::operator new(s, 1);
+  }
+
+  // delete space for exactly one operand as created in the corresponding new operator
+  void operator delete(void *ptr){
+    assert(ptr != nullptr && "must not be nullptr");
+    User *Obj = static_cast<User *>(ptr);
+    // Number of operands can be set to 0 after construction and initialization. Make sure
+    // that number of operands is reset to 1, as this is needed in User::operator delete
+    Obj->setGlobalVariableNumOperands(1);
+    User::operator delete(Obj);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Definitions have initializers, declarations don't.
+  ///
+  inline bool hasInitializer() const { return !isDeclaration(); }
+
+  /// hasDefinitiveInitializer - Whether the global variable has an initializer,
+  /// and any other instances of the global (this can happen due to weak
+  /// linkage) are guaranteed to have the same initializer.
+  ///
+  /// Note that if you want to transform a global, you must use
+  /// hasUniqueInitializer() instead, because of the *_odr linkage type.
+  ///
+  /// Example:
+  ///
+  /// @a = global SomeType* null - Initializer is both definitive and unique.
+  ///
+  /// @b = global weak SomeType* null - Initializer is neither definitive nor
+  /// unique.
+  ///
+  /// @c = global weak_odr SomeType* null - Initializer is definitive, but not
+  /// unique.
+  inline bool hasDefinitiveInitializer() const {
+    return hasInitializer() &&
+      // The initializer of a global variable may change to something arbitrary
+      // at link time.
+      !isInterposable() &&
+      // The initializer of a global variable with the externally_initialized
+      // marker may change at runtime before C++ initializers are evaluated.
+      !isExternallyInitialized();
+  }
+
+  /// hasUniqueInitializer - Whether the global variable has an initializer, and
+  /// any changes made to the initializer will turn up in the final executable.
+  inline bool hasUniqueInitializer() const {
+    return
+        // We need to be sure this is the definition that will actually be used
+        isStrongDefinitionForLinker() &&
+        // It is not safe to modify initializers of global variables with the
+        // external_initializer marker since the value may be changed at runtime
+        // before C++ initializers are evaluated.
+        !isExternallyInitialized();
+  }
+
+  /// getInitializer - Return the initializer for this global variable.  It is
+  /// illegal to call this method if the global is external, because we cannot
+  /// tell what the value is initialized to!
+  ///
+  inline const Constant *getInitializer() const {
+    assert(hasInitializer() && "GV doesn't have initializer!");
+    return static_cast<Constant*>(Op<0>().get());
+  }
+  inline Constant *getInitializer() {
+    assert(hasInitializer() && "GV doesn't have initializer!");
+    return static_cast<Constant*>(Op<0>().get());
+  }
+  /// setInitializer - Sets the initializer for this global variable, removing
+  /// any existing initializer if InitVal==NULL.  If this GV has type T*, the
+  /// initializer must have type T.
+  void setInitializer(Constant *InitVal);
+
+  /// If the value is a global constant, its value is immutable throughout the
+  /// runtime execution of the program.  Assigning a value into the constant
+  /// leads to undefined behavior.
+  ///
+  bool isConstant() const { return isConstantGlobal; }
+  void setConstant(bool Val) { isConstantGlobal = Val; }
+
+  bool isExternallyInitialized() const {
+    return isExternallyInitializedConstant;
+  }
+  void setExternallyInitialized(bool Val) {
+    isExternallyInitializedConstant = Val;
+  }
+
+  /// copyAttributesFrom - copy all additional attributes (those not needed to
+  /// create a GlobalVariable) from the GlobalVariable Src to this one.
+  void copyAttributesFrom(const GlobalVariable *Src);
+
+  /// removeFromParent - This method unlinks 'this' from the containing module,
+  /// but does not delete it.
+  ///
+  void removeFromParent();
+
+  /// eraseFromParent - This method unlinks 'this' from the containing module
+  /// and deletes it.
+  ///
+  void eraseFromParent();
+
+  /// Drop all references in preparation to destroy the GlobalVariable. This
+  /// drops not only the reference to the initializer but also to any metadata.
+  void dropAllReferences();
+
+  /// Attach a DIGlobalVariableExpression.
+  void addDebugInfo(DIGlobalVariableExpression *GV);
+
+  /// Fill the vector with all debug info attachements.
+  void getDebugInfo(SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const;
+
+  /// Add attribute to this global.
+  void addAttribute(Attribute::AttrKind Kind) {
+    Attrs = Attrs.addAttribute(getContext(), Kind);
+  }
+
+  /// Add attribute to this global.
+  void addAttribute(StringRef Kind, StringRef Val = StringRef()) {
+    Attrs = Attrs.addAttribute(getContext(), Kind, Val);
+  }
+
+  /// Return true if the attribute exists.
+  bool hasAttribute(Attribute::AttrKind Kind) const {
+    return Attrs.hasAttribute(Kind);
+  }
+
+  /// Return true if the attribute exists.
+  bool hasAttribute(StringRef Kind) const {
+    return Attrs.hasAttribute(Kind);
+  }
+
+  /// Return true if any attributes exist.
+  bool hasAttributes() const {
+    return Attrs.hasAttributes();
+  }
+
+  /// Return the attribute object.
+  Attribute getAttribute(Attribute::AttrKind Kind) const {
+    return Attrs.getAttribute(Kind);
+  }
+
+  /// Return the attribute object.
+  Attribute getAttribute(StringRef Kind) const {
+    return Attrs.getAttribute(Kind);
+  }
+
+  /// Return the attribute set for this global
+  AttributeSet getAttributes() const {
+    return Attrs;
+  }
+
+  /// Return attribute set as list with index.
+  /// FIXME: This may not be required once ValueEnumerators
+  /// in bitcode-writer can enumerate attribute-set.
+  AttributeList getAttributesAsList(unsigned index) const {
+    if (!hasAttributes())
+      return AttributeList();
+    std::pair<unsigned, AttributeSet> AS[1] = {{index, Attrs}};
+    return AttributeList::get(getContext(), AS);
+  }
+
+  /// Set attribute list for this global
+  void setAttributes(AttributeSet A) {
+    Attrs = A;
+  }
+
+  /// Check if section name is present
+  bool hasImplicitSection() const {
+    return getAttributes().hasAttribute("bss-section") ||
+           getAttributes().hasAttribute("data-section") ||
+           getAttributes().hasAttribute("rodata-section");
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::GlobalVariableVal;
+  }
+};
+
+template <>
+struct OperandTraits<GlobalVariable> :
+  public OptionalOperandTraits<GlobalVariable> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GLOBALVARIABLE_H
diff --git a/linux-x64/clang/include/llvm/IR/IRBuilder.h b/linux-x64/clang/include/llvm/IR/IRBuilder.h
new file mode 100644
index 0000000..e46544a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IRBuilder.h
@@ -0,0 +1,2115 @@
+//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IRBuilder class, which is used as a convenient way
+// to create LLVM instructions with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_IRBUILDER_H
+#define LLVM_IR_IRBUILDER_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/ConstantFolder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <utility>
+
+namespace llvm {
+
+class APInt;
+class MDNode;
+class Use;
+
+/// \brief This provides the default implementation of the IRBuilder
+/// 'InsertHelper' method that is called whenever an instruction is created by
+/// IRBuilder and needs to be inserted.
+///
+/// By default, this inserts the instruction at the insertion point.
+class IRBuilderDefaultInserter {
+protected:
+  void InsertHelper(Instruction *I, const Twine &Name,
+                    BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+    if (BB) BB->getInstList().insert(InsertPt, I);
+    I->setName(Name);
+  }
+};
+
+/// Provides an 'InsertHelper' that calls a user-provided callback after
+/// performing the default insertion.
+class IRBuilderCallbackInserter : IRBuilderDefaultInserter {
+  std::function<void(Instruction *)> Callback;
+
+public:
+  IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
+      : Callback(std::move(Callback)) {}
+
+protected:
+  void InsertHelper(Instruction *I, const Twine &Name,
+                    BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+    IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
+    Callback(I);
+  }
+};
+
+/// \brief Common base class shared among various IRBuilders.
+class IRBuilderBase {
+  DebugLoc CurDbgLocation;
+
+protected:
+  BasicBlock *BB;
+  BasicBlock::iterator InsertPt;
+  LLVMContext &Context;
+
+  MDNode *DefaultFPMathTag;
+  FastMathFlags FMF;
+
+  ArrayRef<OperandBundleDef> DefaultOperandBundles;
+
+public:
+  IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr,
+                ArrayRef<OperandBundleDef> OpBundles = None)
+      : Context(context), DefaultFPMathTag(FPMathTag),
+        DefaultOperandBundles(OpBundles) {
+    ClearInsertionPoint();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Builder configuration methods
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Clear the insertion point: created instructions will not be
+  /// inserted into a block.
+  void ClearInsertionPoint() {
+    BB = nullptr;
+    InsertPt = BasicBlock::iterator();
+  }
+
+  BasicBlock *GetInsertBlock() const { return BB; }
+  BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
+  LLVMContext &getContext() const { return Context; }
+
+  /// \brief This specifies that created instructions should be appended to the
+  /// end of the specified block.
+  void SetInsertPoint(BasicBlock *TheBB) {
+    BB = TheBB;
+    InsertPt = BB->end();
+  }
+
+  /// \brief This specifies that created instructions should be inserted before
+  /// the specified instruction.
+  void SetInsertPoint(Instruction *I) {
+    BB = I->getParent();
+    InsertPt = I->getIterator();
+    assert(InsertPt != BB->end() && "Can't read debug loc from end()");
+    SetCurrentDebugLocation(I->getDebugLoc());
+  }
+
+  /// \brief This specifies that created instructions should be inserted at the
+  /// specified point.
+  void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
+    BB = TheBB;
+    InsertPt = IP;
+    if (IP != TheBB->end())
+      SetCurrentDebugLocation(IP->getDebugLoc());
+  }
+
+  /// \brief Set location information used by debugging information.
+  void SetCurrentDebugLocation(DebugLoc L) { CurDbgLocation = std::move(L); }
+
+  /// \brief Get location information used by debugging information.
+  const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
+
+  /// \brief If this builder has a current debug location, set it on the
+  /// specified instruction.
+  void SetInstDebugLocation(Instruction *I) const {
+    if (CurDbgLocation)
+      I->setDebugLoc(CurDbgLocation);
+  }
+
+  /// \brief Get the return type of the current function that we're emitting
+  /// into.
+  Type *getCurrentFunctionReturnType() const;
+
+  /// InsertPoint - A saved insertion point.
+  class InsertPoint {
+    BasicBlock *Block = nullptr;
+    BasicBlock::iterator Point;
+
+  public:
+    /// \brief Creates a new insertion point which doesn't point to anything.
+    InsertPoint() = default;
+
+    /// \brief Creates a new insertion point at the given location.
+    InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
+        : Block(InsertBlock), Point(InsertPoint) {}
+
+    /// \brief Returns true if this insert point is set.
+    bool isSet() const { return (Block != nullptr); }
+
+    BasicBlock *getBlock() const { return Block; }
+    BasicBlock::iterator getPoint() const { return Point; }
+  };
+
+  /// \brief Returns the current insert point.
+  InsertPoint saveIP() const {
+    return InsertPoint(GetInsertBlock(), GetInsertPoint());
+  }
+
+  /// \brief Returns the current insert point, clearing it in the process.
+  InsertPoint saveAndClearIP() {
+    InsertPoint IP(GetInsertBlock(), GetInsertPoint());
+    ClearInsertionPoint();
+    return IP;
+  }
+
+  /// \brief Sets the current insert point to a previously-saved location.
+  void restoreIP(InsertPoint IP) {
+    if (IP.isSet())
+      SetInsertPoint(IP.getBlock(), IP.getPoint());
+    else
+      ClearInsertionPoint();
+  }
+
+  /// \brief Get the floating point math metadata being used.
+  MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
+
+  /// \brief Get the flags to be applied to created floating point ops
+  FastMathFlags getFastMathFlags() const { return FMF; }
+
+  /// \brief Clear the fast-math flags.
+  void clearFastMathFlags() { FMF.clear(); }
+
+  /// \brief Set the floating point math metadata to be used.
+  void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
+
+  /// \brief Set the fast-math flags to be used with generated fp-math operators
+  void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
+
+  //===--------------------------------------------------------------------===//
+  // RAII helpers.
+  //===--------------------------------------------------------------------===//
+
+  // \brief RAII object that stores the current insertion point and restores it
+  // when the object is destroyed. This includes the debug location.
+  class InsertPointGuard {
+    IRBuilderBase &Builder;
+    AssertingVH<BasicBlock> Block;
+    BasicBlock::iterator Point;
+    DebugLoc DbgLoc;
+
+  public:
+    InsertPointGuard(IRBuilderBase &B)
+        : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
+          DbgLoc(B.getCurrentDebugLocation()) {}
+
+    InsertPointGuard(const InsertPointGuard &) = delete;
+    InsertPointGuard &operator=(const InsertPointGuard &) = delete;
+
+    ~InsertPointGuard() {
+      Builder.restoreIP(InsertPoint(Block, Point));
+      Builder.SetCurrentDebugLocation(DbgLoc);
+    }
+  };
+
+  // \brief RAII object that stores the current fast math settings and restores
+  // them when the object is destroyed.
+  class FastMathFlagGuard {
+    IRBuilderBase &Builder;
+    FastMathFlags FMF;
+    MDNode *FPMathTag;
+
+  public:
+    FastMathFlagGuard(IRBuilderBase &B)
+        : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {}
+
+    FastMathFlagGuard(const FastMathFlagGuard &) = delete;
+    FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
+
+    ~FastMathFlagGuard() {
+      Builder.FMF = FMF;
+      Builder.DefaultFPMathTag = FPMathTag;
+    }
+  };
+
+  //===--------------------------------------------------------------------===//
+  // Miscellaneous creation methods.
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Make a new global variable with initializer type i8*
+  ///
+  /// Make a new global variable with an initializer that has array of i8 type
+  /// filled in with the null terminated string value specified.  The new global
+  /// variable will be marked mergable with any others of the same contents.  If
+  /// Name is specified, it is the name of the global variable created.
+  GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
+                                     unsigned AddressSpace = 0);
+
+  /// \brief Get a constant value representing either true or false.
+  ConstantInt *getInt1(bool V) {
+    return ConstantInt::get(getInt1Ty(), V);
+  }
+
+  /// \brief Get the constant value for i1 true.
+  ConstantInt *getTrue() {
+    return ConstantInt::getTrue(Context);
+  }
+
+  /// \brief Get the constant value for i1 false.
+  ConstantInt *getFalse() {
+    return ConstantInt::getFalse(Context);
+  }
+
+  /// \brief Get a constant 8-bit value.
+  ConstantInt *getInt8(uint8_t C) {
+    return ConstantInt::get(getInt8Ty(), C);
+  }
+
+  /// \brief Get a constant 16-bit value.
+  ConstantInt *getInt16(uint16_t C) {
+    return ConstantInt::get(getInt16Ty(), C);
+  }
+
+  /// \brief Get a constant 32-bit value.
+  ConstantInt *getInt32(uint32_t C) {
+    return ConstantInt::get(getInt32Ty(), C);
+  }
+
+  /// \brief Get a constant 64-bit value.
+  ConstantInt *getInt64(uint64_t C) {
+    return ConstantInt::get(getInt64Ty(), C);
+  }
+
+  /// \brief Get a constant N-bit value, zero extended or truncated from
+  /// a 64-bit value.
+  ConstantInt *getIntN(unsigned N, uint64_t C) {
+    return ConstantInt::get(getIntNTy(N), C);
+  }
+
+  /// \brief Get a constant integer value.
+  ConstantInt *getInt(const APInt &AI) {
+    return ConstantInt::get(Context, AI);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Type creation methods
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Fetch the type representing a single bit
+  IntegerType *getInt1Ty() {
+    return Type::getInt1Ty(Context);
+  }
+
+  /// \brief Fetch the type representing an 8-bit integer.
+  IntegerType *getInt8Ty() {
+    return Type::getInt8Ty(Context);
+  }
+
+  /// \brief Fetch the type representing a 16-bit integer.
+  IntegerType *getInt16Ty() {
+    return Type::getInt16Ty(Context);
+  }
+
+  /// \brief Fetch the type representing a 32-bit integer.
+  IntegerType *getInt32Ty() {
+    return Type::getInt32Ty(Context);
+  }
+
+  /// \brief Fetch the type representing a 64-bit integer.
+  IntegerType *getInt64Ty() {
+    return Type::getInt64Ty(Context);
+  }
+
+  /// \brief Fetch the type representing a 128-bit integer.
+  IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
+
+  /// \brief Fetch the type representing an N-bit integer.
+  IntegerType *getIntNTy(unsigned N) {
+    return Type::getIntNTy(Context, N);
+  }
+
+  /// \brief Fetch the type representing a 16-bit floating point value.
+  Type *getHalfTy() {
+    return Type::getHalfTy(Context);
+  }
+
+  /// \brief Fetch the type representing a 32-bit floating point value.
+  Type *getFloatTy() {
+    return Type::getFloatTy(Context);
+  }
+
+  /// \brief Fetch the type representing a 64-bit floating point value.
+  Type *getDoubleTy() {
+    return Type::getDoubleTy(Context);
+  }
+
+  /// \brief Fetch the type representing void.
+  Type *getVoidTy() {
+    return Type::getVoidTy(Context);
+  }
+
+  /// \brief Fetch the type representing a pointer to an 8-bit integer value.
+  PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
+    return Type::getInt8PtrTy(Context, AddrSpace);
+  }
+
+  /// \brief Fetch the type representing a pointer to an integer value.
+  IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
+    return DL.getIntPtrType(Context, AddrSpace);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Intrinsic creation methods
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Create and insert a memset to the specified pointer and the
+  /// specified value.
+  ///
+  /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
+  /// specified, it will be added to the instruction. Likewise with alias.scope
+  /// and noalias tags.
+  CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align,
+                         bool isVolatile = false, MDNode *TBAATag = nullptr,
+                         MDNode *ScopeTag = nullptr,
+                         MDNode *NoAliasTag = nullptr) {
+    return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
+                        TBAATag, ScopeTag, NoAliasTag);
+  }
+
+  CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
+                         bool isVolatile = false, MDNode *TBAATag = nullptr,
+                         MDNode *ScopeTag = nullptr,
+                         MDNode *NoAliasTag = nullptr);
+
+  /// \brief Create and insert a memcpy between the specified pointers.
+  ///
+  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
+  /// specified, it will be added to the instruction. Likewise with alias.scope
+  /// and noalias tags.
+  CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src,
+                         unsigned SrcAlign, uint64_t Size,
+                         bool isVolatile = false, MDNode *TBAATag = nullptr,
+                         MDNode *TBAAStructTag = nullptr,
+                         MDNode *ScopeTag = nullptr,
+                         MDNode *NoAliasTag = nullptr) {
+    return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
+                        isVolatile, TBAATag, TBAAStructTag, ScopeTag,
+                        NoAliasTag);
+  }
+
+  CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src,
+                         unsigned SrcAlign, Value *Size,
+                         bool isVolatile = false, MDNode *TBAATag = nullptr,
+                         MDNode *TBAAStructTag = nullptr,
+                         MDNode *ScopeTag = nullptr,
+                         MDNode *NoAliasTag = nullptr);
+
+  /// \brief Create and insert an element unordered-atomic memcpy between the
+  /// specified pointers.
+  ///
+  /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
+  ///
+  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
+  /// specified, it will be added to the instruction. Likewise with alias.scope
+  /// and noalias tags.
+  CallInst *CreateElementUnorderedAtomicMemCpy(
+      Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
+      uint64_t Size, uint32_t ElementSize, MDNode *TBAATag = nullptr,
+      MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
+      MDNode *NoAliasTag = nullptr) {
+    return CreateElementUnorderedAtomicMemCpy(
+        Dst, DstAlign, Src, SrcAlign, getInt64(Size), ElementSize, TBAATag,
+        TBAAStructTag, ScopeTag, NoAliasTag);
+  }
+
+  CallInst *CreateElementUnorderedAtomicMemCpy(
+      Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, Value *Size,
+      uint32_t ElementSize, MDNode *TBAATag = nullptr,
+      MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
+      MDNode *NoAliasTag = nullptr);
+
+  /// \brief Create and insert a memmove between the specified
+  /// pointers.
+  ///
+  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
+  /// specified, it will be added to the instruction. Likewise with alias.scope
+  /// and noalias tags.
+  CallInst *CreateMemMove(Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
+                          uint64_t Size, bool isVolatile = false,
+                          MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
+                          MDNode *NoAliasTag = nullptr) {
+    return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), isVolatile,
+                         TBAATag, ScopeTag, NoAliasTag);
+  }
+
+  CallInst *CreateMemMove(Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
+                          Value *Size, bool isVolatile = false, MDNode *TBAATag = nullptr,
+                          MDNode *ScopeTag = nullptr,
+                          MDNode *NoAliasTag = nullptr);
+
+  /// \brief Create a vector fadd reduction intrinsic of the source vector.
+  /// The first parameter is a scalar accumulator value for ordered reductions.
+  CallInst *CreateFAddReduce(Value *Acc, Value *Src);
+
+  /// \brief Create a vector fmul reduction intrinsic of the source vector.
+  /// The first parameter is a scalar accumulator value for ordered reductions.
+  CallInst *CreateFMulReduce(Value *Acc, Value *Src);
+
+  /// \brief Create a vector int add reduction intrinsic of the source vector.
+  CallInst *CreateAddReduce(Value *Src);
+
+  /// \brief Create a vector int mul reduction intrinsic of the source vector.
+  CallInst *CreateMulReduce(Value *Src);
+
+  /// \brief Create a vector int AND reduction intrinsic of the source vector.
+  CallInst *CreateAndReduce(Value *Src);
+
+  /// \brief Create a vector int OR reduction intrinsic of the source vector.
+  CallInst *CreateOrReduce(Value *Src);
+
+  /// \brief Create a vector int XOR reduction intrinsic of the source vector.
+  CallInst *CreateXorReduce(Value *Src);
+
+  /// \brief Create a vector integer max reduction intrinsic of the source
+  /// vector.
+  CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
+
+  /// \brief Create a vector integer min reduction intrinsic of the source
+  /// vector.
+  CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
+
+  /// \brief Create a vector float max reduction intrinsic of the source
+  /// vector.
+  CallInst *CreateFPMaxReduce(Value *Src, bool NoNaN = false);
+
+  /// \brief Create a vector float min reduction intrinsic of the source
+  /// vector.
+  CallInst *CreateFPMinReduce(Value *Src, bool NoNaN = false);
+
+  /// \brief Create a lifetime.start intrinsic.
+  ///
+  /// If the pointer isn't i8* it will be converted.
+  CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
+
+  /// \brief Create a lifetime.end intrinsic.
+  ///
+  /// If the pointer isn't i8* it will be converted.
+  CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
+
+  /// Create a call to invariant.start intrinsic.
+  ///
+  /// If the pointer isn't i8* it will be converted.
+  CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
+
+  /// \brief Create a call to Masked Load intrinsic
+  CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
+                             Value *PassThru = nullptr, const Twine &Name = "");
+
+  /// \brief Create a call to Masked Store intrinsic
+  CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
+                              Value *Mask);
+
+  /// \brief Create a call to Masked Gather intrinsic
+  CallInst *CreateMaskedGather(Value *Ptrs, unsigned Align,
+                               Value *Mask = nullptr,
+                               Value *PassThru = nullptr,
+                               const Twine& Name = "");
+
+  /// \brief Create a call to Masked Scatter intrinsic
+  CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Align,
+                                Value *Mask = nullptr);
+
+  /// \brief Create an assume intrinsic call that allows the optimizer to
+  /// assume that the provided condition will be true.
+  CallInst *CreateAssumption(Value *Cond);
+
+  /// \brief Create a call to the experimental.gc.statepoint intrinsic to
+  /// start a new statepoint sequence.
+  CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
+                                   Value *ActualCallee,
+                                   ArrayRef<Value *> CallArgs,
+                                   ArrayRef<Value *> DeoptArgs,
+                                   ArrayRef<Value *> GCArgs,
+                                   const Twine &Name = "");
+
+  /// \brief Create a call to the experimental.gc.statepoint intrinsic to
+  /// start a new statepoint sequence.
+  CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
+                                   Value *ActualCallee, uint32_t Flags,
+                                   ArrayRef<Use> CallArgs,
+                                   ArrayRef<Use> TransitionArgs,
+                                   ArrayRef<Use> DeoptArgs,
+                                   ArrayRef<Value *> GCArgs,
+                                   const Twine &Name = "");
+
+  /// \brief Conveninence function for the common case when CallArgs are filled
+  /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
+  /// .get()'ed to get the Value pointer.
+  CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
+                                   Value *ActualCallee, ArrayRef<Use> CallArgs,
+                                   ArrayRef<Value *> DeoptArgs,
+                                   ArrayRef<Value *> GCArgs,
+                                   const Twine &Name = "");
+
+  /// \brief Create an invoke to the experimental.gc.statepoint intrinsic to
+  /// start a new statepoint sequence.
+  InvokeInst *
+  CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
+                           Value *ActualInvokee, BasicBlock *NormalDest,
+                           BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
+                           ArrayRef<Value *> DeoptArgs,
+                           ArrayRef<Value *> GCArgs, const Twine &Name = "");
+
+  /// \brief Create an invoke to the experimental.gc.statepoint intrinsic to
+  /// start a new statepoint sequence.
+  InvokeInst *CreateGCStatepointInvoke(
+      uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
+      BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
+      ArrayRef<Use> InvokeArgs, ArrayRef<Use> TransitionArgs,
+      ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs,
+      const Twine &Name = "");
+
+  // Conveninence function for the common case when CallArgs are filled in using
+  // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
+  // get the Value *.
+  InvokeInst *
+  CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
+                           Value *ActualInvokee, BasicBlock *NormalDest,
+                           BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
+                           ArrayRef<Value *> DeoptArgs,
+                           ArrayRef<Value *> GCArgs, const Twine &Name = "");
+
+  /// \brief Create a call to the experimental.gc.result intrinsic to extract
+  /// the result from a call wrapped in a statepoint.
+  CallInst *CreateGCResult(Instruction *Statepoint,
+                           Type *ResultType,
+                           const Twine &Name = "");
+
+  /// \brief Create a call to the experimental.gc.relocate intrinsics to
+  /// project the relocated value of one pointer from the statepoint.
+  CallInst *CreateGCRelocate(Instruction *Statepoint,
+                             int BaseOffset,
+                             int DerivedOffset,
+                             Type *ResultType,
+                             const Twine &Name = "");
+
+  /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
+  /// first type.
+  CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID,
+                                  Value *LHS, Value *RHS,
+                                  const Twine &Name = "");
+
+  /// Create a call to intrinsic \p ID with 1 or more operands assuming the
+  /// intrinsic and all operands have the same type. If \p FMFSource is
+  /// provided, copy fast-math-flags from that instruction to the intrinsic.
+  CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Value *> Args,
+                            Instruction *FMFSource = nullptr,
+                            const Twine &Name = "");
+
+  /// Create call to the minnum intrinsic.
+  CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, Name);
+  }
+
+  /// Create call to the maxnum intrinsic.
+  CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, Name);
+  }
+
+private:
+  /// \brief Create a call to a masked intrinsic with given Id.
+  CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
+                                  ArrayRef<Type *> OverloadedTypes,
+                                  const Twine &Name = "");
+
+  Value *getCastedInt8PtrValue(Value *Ptr);
+};
+
+/// \brief This provides a uniform API for creating instructions and inserting
+/// them into a basic block: either at the end of a BasicBlock, or at a specific
+/// iterator location in a block.
+///
+/// Note that the builder does not expose the full generality of LLVM
+/// instructions.  For access to extra instruction properties, use the mutators
+/// (e.g. setVolatile) on the instructions after they have been
+/// created. Convenience state exists to specify fast-math flags and fp-math
+/// tags.
+///
+/// The first template argument specifies a class to use for creating constants.
+/// This defaults to creating minimally folded constants.  The second template
+/// argument allows clients to specify custom insertion hooks that are called on
+/// every newly created insertion.
+template <typename T = ConstantFolder,
+          typename Inserter = IRBuilderDefaultInserter>
+class IRBuilder : public IRBuilderBase, public Inserter {
+  T Folder;
+
+public:
+  IRBuilder(LLVMContext &C, const T &F, Inserter I = Inserter(),
+            MDNode *FPMathTag = nullptr,
+            ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(C, FPMathTag, OpBundles), Inserter(std::move(I)),
+        Folder(F) {}
+
+  explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
+                     ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(C, FPMathTag, OpBundles) {}
+
+  explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr,
+                     ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
+    SetInsertPoint(TheBB);
+  }
+
+  explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
+                     ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles) {
+    SetInsertPoint(TheBB);
+  }
+
+  explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
+                     ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(IP->getContext(), FPMathTag, OpBundles) {
+    SetInsertPoint(IP);
+  }
+
+  IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T &F,
+            MDNode *FPMathTag = nullptr,
+            ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
+    SetInsertPoint(TheBB, IP);
+  }
+
+  IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
+            MDNode *FPMathTag = nullptr,
+            ArrayRef<OperandBundleDef> OpBundles = None)
+      : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles) {
+    SetInsertPoint(TheBB, IP);
+  }
+
+  /// \brief Get the constant folder being used.
+  const T &getFolder() { return Folder; }
+
+  /// \brief Insert and return the specified instruction.
+  template<typename InstTy>
+  InstTy *Insert(InstTy *I, const Twine &Name = "") const {
+    this->InsertHelper(I, Name, BB, InsertPt);
+    this->SetInstDebugLocation(I);
+    return I;
+  }
+
+  /// \brief No-op overload to handle constants.
+  Constant *Insert(Constant *C, const Twine& = "") const {
+    return C;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Instruction creation methods: Terminators
+  //===--------------------------------------------------------------------===//
+
+private:
+  /// \brief Helper to add branch weight and unpredictable metadata onto an
+  /// instruction.
+  /// \returns The annotated instruction.
+  template <typename InstTy>
+  InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
+    if (Weights)
+      I->setMetadata(LLVMContext::MD_prof, Weights);
+    if (Unpredictable)
+      I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
+    return I;
+  }
+
+public:
+  /// \brief Create a 'ret void' instruction.
+  ReturnInst *CreateRetVoid() {
+    return Insert(ReturnInst::Create(Context));
+  }
+
+  /// \brief Create a 'ret <val>' instruction.
+  ReturnInst *CreateRet(Value *V) {
+    return Insert(ReturnInst::Create(Context, V));
+  }
+
+  /// \brief Create a sequence of N insertvalue instructions,
+  /// with one Value from the retVals array each, that build a aggregate
+  /// return value one value at a time, and a ret instruction to return
+  /// the resulting aggregate value.
+  ///
+  /// This is a convenience function for code that uses aggregate return values
+  /// as a vehicle for having multiple return values.
+  ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
+    Value *V = UndefValue::get(getCurrentFunctionReturnType());
+    for (unsigned i = 0; i != N; ++i)
+      V = CreateInsertValue(V, retVals[i], i, "mrv");
+    return Insert(ReturnInst::Create(Context, V));
+  }
+
+  /// \brief Create an unconditional 'br label X' instruction.
+  BranchInst *CreateBr(BasicBlock *Dest) {
+    return Insert(BranchInst::Create(Dest));
+  }
+
+  /// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
+  /// instruction.
+  BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
+                           MDNode *BranchWeights = nullptr,
+                           MDNode *Unpredictable = nullptr) {
+    return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
+                                    BranchWeights, Unpredictable));
+  }
+
+  /// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
+  /// instruction. Copy branch meta data if available.
+  BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
+                           Instruction *MDSrc) {
+    BranchInst *Br = BranchInst::Create(True, False, Cond);
+    if (MDSrc) {
+      unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
+                        LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
+      Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
+    }
+    return Insert(Br);
+  }
+
+  /// \brief Create a switch instruction with the specified value, default dest,
+  /// and with a hint for the number of cases that will be added (for efficient
+  /// allocation).
+  SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
+                           MDNode *BranchWeights = nullptr,
+                           MDNode *Unpredictable = nullptr) {
+    return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
+                                    BranchWeights, Unpredictable));
+  }
+
+  /// \brief Create an indirect branch instruction with the specified address
+  /// operand, with an optional hint for the number of destinations that will be
+  /// added (for efficient allocation).
+  IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
+    return Insert(IndirectBrInst::Create(Addr, NumDests));
+  }
+
+  /// \brief Create an invoke instruction.
+  InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+                           BasicBlock *UnwindDest,
+                           ArrayRef<Value *> Args = None,
+                           const Twine &Name = "") {
+    return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args),
+                  Name);
+  }
+  InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+                           BasicBlock *UnwindDest, ArrayRef<Value *> Args,
+                           ArrayRef<OperandBundleDef> OpBundles,
+                           const Twine &Name = "") {
+    return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args,
+                                     OpBundles), Name);
+  }
+
+  ResumeInst *CreateResume(Value *Exn) {
+    return Insert(ResumeInst::Create(Exn));
+  }
+
+  CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
+                                      BasicBlock *UnwindBB = nullptr) {
+    return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
+  }
+
+  CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
+                                     unsigned NumHandlers,
+                                     const Twine &Name = "") {
+    return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
+                  Name);
+  }
+
+  CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
+                               const Twine &Name = "") {
+    return Insert(CatchPadInst::Create(ParentPad, Args), Name);
+  }
+
+  CleanupPadInst *CreateCleanupPad(Value *ParentPad,
+                                   ArrayRef<Value *> Args = None,
+                                   const Twine &Name = "") {
+    return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
+  }
+
+  CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
+    return Insert(CatchReturnInst::Create(CatchPad, BB));
+  }
+
+  UnreachableInst *CreateUnreachable() {
+    return Insert(new UnreachableInst(Context));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Instruction creation methods: Binary Operators
+  //===--------------------------------------------------------------------===//
+private:
+  BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
+                                          Value *LHS, Value *RHS,
+                                          const Twine &Name,
+                                          bool HasNUW, bool HasNSW) {
+    BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
+                          FastMathFlags FMF) const {
+    if (!FPMD)
+      FPMD = DefaultFPMathTag;
+    if (FPMD)
+      I->setMetadata(LLVMContext::MD_fpmath, FPMD);
+    I->setFastMathFlags(FMF);
+    return I;
+  }
+
+  Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
+                      Value *R, const Twine &Name = nullptr) const {
+    auto *LC = dyn_cast<Constant>(L);
+    auto *RC = dyn_cast<Constant>(R);
+    return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
+  }
+
+public:
+  Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
+    return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
+                                   HasNUW, HasNSW);
+  }
+
+  Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateAdd(LHS, RHS, Name, false, true);
+  }
+
+  Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateAdd(LHS, RHS, Name, true, false);
+  }
+
+  Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
+    return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
+                                   HasNUW, HasNSW);
+  }
+
+  Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateSub(LHS, RHS, Name, false, true);
+  }
+
+  Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateSub(LHS, RHS, Name, true, false);
+  }
+
+  Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
+    return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
+                                   HasNUW, HasNSW);
+  }
+
+  Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateMul(LHS, RHS, Name, false, true);
+  }
+
+  Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateMul(LHS, RHS, Name, true, false);
+  }
+
+  Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
+    if (!isExact)
+      return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
+    return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
+  }
+
+  Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateUDiv(LHS, RHS, Name, true);
+  }
+
+  Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
+    if (!isExact)
+      return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
+    return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
+  }
+
+  Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateSDiv(LHS, RHS, Name, true);
+  }
+
+  Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
+    return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
+  }
+
+  Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
+    return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
+  }
+
+  Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
+    return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
+                                   HasNUW, HasNSW);
+  }
+
+  Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
+                     HasNUW, HasNSW);
+  }
+
+  Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
+                     HasNUW, HasNSW);
+  }
+
+  Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
+    if (!isExact)
+      return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
+    return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
+  }
+
+  Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+  }
+
+  Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+  }
+
+  Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
+    if (!isExact)
+      return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
+    return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
+  }
+
+  Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+  }
+
+  Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
+                    bool isExact = false) {
+    return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+  }
+
+  Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
+    if (auto *RC = dyn_cast<Constant>(RHS)) {
+      if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
+        return LHS;  // LHS & -1 -> LHS
+      if (auto *LC = dyn_cast<Constant>(LHS))
+        return Insert(Folder.CreateAnd(LC, RC), Name);
+    }
+    return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
+  }
+
+  Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+    return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+  }
+
+  Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+    return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+  }
+
+  Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
+    if (auto *RC = dyn_cast<Constant>(RHS)) {
+      if (RC->isNullValue())
+        return LHS;  // LHS | 0 -> LHS
+      if (auto *LC = dyn_cast<Constant>(LHS))
+        return Insert(Folder.CreateOr(LC, RC), Name);
+    }
+    return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
+  }
+
+  Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+    return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+  }
+
+  Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+    return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+  }
+
+  Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
+    return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
+  }
+
+  Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+    return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+  }
+
+  Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+    return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+  }
+
+  Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
+                    MDNode *FPMD = nullptr) {
+    if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
+    return Insert(I, Name);
+  }
+
+  /// Copy fast-math-flags from an instruction rather than using the builder's
+  /// default FMF.
+  Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
+                       const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
+                                FMFSource->getFastMathFlags());
+    return Insert(I, Name);
+  }
+
+  Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
+                    MDNode *FPMD = nullptr) {
+    if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
+    return Insert(I, Name);
+  }
+
+  /// Copy fast-math-flags from an instruction rather than using the builder's
+  /// default FMF.
+  Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
+                       const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
+                                FMFSource->getFastMathFlags());
+    return Insert(I, Name);
+  }
+
+  Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
+                    MDNode *FPMD = nullptr) {
+    if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
+    return Insert(I, Name);
+  }
+
+  /// Copy fast-math-flags from an instruction rather than using the builder's
+  /// default FMF.
+  Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
+                       const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
+                                FMFSource->getFastMathFlags());
+    return Insert(I, Name);
+  }
+
+  Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
+                    MDNode *FPMD = nullptr) {
+    if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
+    return Insert(I, Name);
+  }
+
+  /// Copy fast-math-flags from an instruction rather than using the builder's
+  /// default FMF.
+  Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
+                       const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
+                                FMFSource->getFastMathFlags());
+    return Insert(I, Name);
+  }
+
+  Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
+                    MDNode *FPMD = nullptr) {
+    if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
+    return Insert(I, Name);
+  }
+
+  /// Copy fast-math-flags from an instruction rather than using the builder's
+  /// default FMF.
+  Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
+                       const Twine &Name = "") {
+    if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
+    Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
+                                FMFSource->getFastMathFlags());
+    return Insert(I, Name);
+  }
+
+  Value *CreateBinOp(Instruction::BinaryOps Opc,
+                     Value *LHS, Value *RHS, const Twine &Name = "",
+                     MDNode *FPMathTag = nullptr) {
+    if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
+    Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
+    if (isa<FPMathOperator>(BinOp))
+      BinOp = setFPAttrs(BinOp, FPMathTag, FMF);
+    return Insert(BinOp, Name);
+  }
+
+  Value *CreateNeg(Value *V, const Twine &Name = "",
+                   bool HasNUW = false, bool HasNSW = false) {
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
+    BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
+    return CreateNeg(V, Name, false, true);
+  }
+
+  Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
+    return CreateNeg(V, Name, true, false);
+  }
+
+  Value *CreateFNeg(Value *V, const Twine &Name = "",
+                    MDNode *FPMathTag = nullptr) {
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateFNeg(VC), Name);
+    return Insert(setFPAttrs(BinaryOperator::CreateFNeg(V), FPMathTag, FMF),
+                  Name);
+  }
+
+  Value *CreateNot(Value *V, const Twine &Name = "") {
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateNot(VC), Name);
+    return Insert(BinaryOperator::CreateNot(V), Name);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Instruction creation methods: Memory Instructions
+  //===--------------------------------------------------------------------===//
+
+  AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
+                           Value *ArraySize = nullptr, const Twine &Name = "") {
+    return Insert(new AllocaInst(Ty, AddrSpace, ArraySize), Name);
+  }
+
+  AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
+                           const Twine &Name = "") {
+    const DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+    return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name);
+  }
+
+  /// \brief Provided to resolve 'CreateLoad(Ptr, "...")' correctly, instead of
+  /// converting the string to 'bool' for the isVolatile parameter.
+  LoadInst *CreateLoad(Value *Ptr, const char *Name) {
+    return Insert(new LoadInst(Ptr), Name);
+  }
+
+  LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
+    return Insert(new LoadInst(Ptr), Name);
+  }
+
+  LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
+    return Insert(new LoadInst(Ty, Ptr), Name);
+  }
+
+  LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
+    return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name);
+  }
+
+  StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
+    return Insert(new StoreInst(Val, Ptr, isVolatile));
+  }
+
+  /// \brief Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
+  /// correctly, instead of converting the string to 'bool' for the isVolatile
+  /// parameter.
+  LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+    LoadInst *LI = CreateLoad(Ptr, Name);
+    LI->setAlignment(Align);
+    return LI;
+  }
+  LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
+                              const Twine &Name = "") {
+    LoadInst *LI = CreateLoad(Ptr, Name);
+    LI->setAlignment(Align);
+    return LI;
+  }
+  LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
+                              const Twine &Name = "") {
+    LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
+    LI->setAlignment(Align);
+    return LI;
+  }
+
+  StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
+                                bool isVolatile = false) {
+    StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
+    SI->setAlignment(Align);
+    return SI;
+  }
+
+  FenceInst *CreateFence(AtomicOrdering Ordering,
+                         SyncScope::ID SSID = SyncScope::System,
+                         const Twine &Name = "") {
+    return Insert(new FenceInst(Context, Ordering, SSID), Name);
+  }
+
+  AtomicCmpXchgInst *
+  CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
+                      AtomicOrdering SuccessOrdering,
+                      AtomicOrdering FailureOrdering,
+                      SyncScope::ID SSID = SyncScope::System) {
+    return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
+                                        FailureOrdering, SSID));
+  }
+
+  AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
+                                 AtomicOrdering Ordering,
+                                 SyncScope::ID SSID = SyncScope::System) {
+    return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
+  }
+
+  Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+                   const Twine &Name = "") {
+    return CreateGEP(nullptr, Ptr, IdxList, Name);
+  }
+
+  Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+                   const Twine &Name = "") {
+    if (auto *PC = dyn_cast<Constant>(Ptr)) {
+      // Every index must be constant.
+      size_t i, e;
+      for (i = 0, e = IdxList.size(); i != e; ++i)
+        if (!isa<Constant>(IdxList[i]))
+          break;
+      if (i == e)
+        return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
+    }
+    return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
+  }
+
+  Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+                           const Twine &Name = "") {
+    return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
+  }
+
+  Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+                           const Twine &Name = "") {
+    if (auto *PC = dyn_cast<Constant>(Ptr)) {
+      // Every index must be constant.
+      size_t i, e;
+      for (i = 0, e = IdxList.size(); i != e; ++i)
+        if (!isa<Constant>(IdxList[i]))
+          break;
+      if (i == e)
+        return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
+                      Name);
+    }
+    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
+  }
+
+  Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
+    return CreateGEP(nullptr, Ptr, Idx, Name);
+  }
+
+  Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      if (auto *IC = dyn_cast<Constant>(Idx))
+        return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
+    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
+  }
+
+  Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
+                           const Twine &Name = "") {
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      if (auto *IC = dyn_cast<Constant>(Idx))
+        return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
+    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
+  }
+
+  Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
+    return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
+  }
+
+  Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
+                            const Twine &Name = "") {
+    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
+
+    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
+  }
+
+  Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
+                                    const Twine &Name = "") {
+    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
+
+    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
+  }
+
+  Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
+                            const Twine &Name = "") {
+    Value *Idxs[] = {
+      ConstantInt::get(Type::getInt32Ty(Context), Idx0),
+      ConstantInt::get(Type::getInt32Ty(Context), Idx1)
+    };
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
+
+    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
+  }
+
+  Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
+                                    unsigned Idx1, const Twine &Name = "") {
+    Value *Idxs[] = {
+      ConstantInt::get(Type::getInt32Ty(Context), Idx0),
+      ConstantInt::get(Type::getInt32Ty(Context), Idx1)
+    };
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
+
+    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
+  }
+
+  Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
+    Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idx), Name);
+
+    return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idx), Name);
+  }
+
+  Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
+                                    const Twine &Name = "") {
+    Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idx), Name);
+
+    return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name);
+  }
+
+  Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+                    const Twine &Name = "") {
+    Value *Idxs[] = {
+      ConstantInt::get(Type::getInt64Ty(Context), Idx0),
+      ConstantInt::get(Type::getInt64Ty(Context), Idx1)
+    };
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idxs), Name);
+
+    return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idxs), Name);
+  }
+
+  Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+                                    const Twine &Name = "") {
+    Value *Idxs[] = {
+      ConstantInt::get(Type::getInt64Ty(Context), Idx0),
+      ConstantInt::get(Type::getInt64Ty(Context), Idx1)
+    };
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idxs),
+                    Name);
+
+    return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idxs), Name);
+  }
+
+  Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
+                         const Twine &Name = "") {
+    return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
+  }
+
+  /// \brief Same as CreateGlobalString, but return a pointer with "i8*" type
+  /// instead of a pointer to array of i8.
+  Value *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
+                               unsigned AddressSpace = 0) {
+    GlobalVariable *gv = CreateGlobalString(Str, Name, AddressSpace);
+    Value *zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
+    Value *Args[] = { zero, zero };
+    return CreateInBoundsGEP(gv->getValueType(), gv, Args, Name);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Instruction creation methods: Cast/Conversion Operators
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
+    return CreateCast(Instruction::Trunc, V, DestTy, Name);
+  }
+
+  Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
+    return CreateCast(Instruction::ZExt, V, DestTy, Name);
+  }
+
+  Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
+    return CreateCast(Instruction::SExt, V, DestTy, Name);
+  }
+
+  /// \brief Create a ZExt or Trunc from the integer value V to DestTy. Return
+  /// the value untouched if the type of V is already DestTy.
+  Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
+                           const Twine &Name = "") {
+    assert(V->getType()->isIntOrIntVectorTy() &&
+           DestTy->isIntOrIntVectorTy() &&
+           "Can only zero extend/truncate integers!");
+    Type *VTy = V->getType();
+    if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
+      return CreateZExt(V, DestTy, Name);
+    if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
+      return CreateTrunc(V, DestTy, Name);
+    return V;
+  }
+
+  /// \brief Create a SExt or Trunc from the integer value V to DestTy. Return
+  /// the value untouched if the type of V is already DestTy.
+  Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
+                           const Twine &Name = "") {
+    assert(V->getType()->isIntOrIntVectorTy() &&
+           DestTy->isIntOrIntVectorTy() &&
+           "Can only sign extend/truncate integers!");
+    Type *VTy = V->getType();
+    if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
+      return CreateSExt(V, DestTy, Name);
+    if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
+      return CreateTrunc(V, DestTy, Name);
+    return V;
+  }
+
+  Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
+    return CreateCast(Instruction::FPToUI, V, DestTy, Name);
+  }
+
+  Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){
+    return CreateCast(Instruction::FPToSI, V, DestTy, Name);
+  }
+
+  Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
+    return CreateCast(Instruction::UIToFP, V, DestTy, Name);
+  }
+
+  Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
+    return CreateCast(Instruction::SIToFP, V, DestTy, Name);
+  }
+
+  Value *CreateFPTrunc(Value *V, Type *DestTy,
+                       const Twine &Name = "") {
+    return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
+  }
+
+  Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
+    return CreateCast(Instruction::FPExt, V, DestTy, Name);
+  }
+
+  Value *CreatePtrToInt(Value *V, Type *DestTy,
+                        const Twine &Name = "") {
+    return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
+  }
+
+  Value *CreateIntToPtr(Value *V, Type *DestTy,
+                        const Twine &Name = "") {
+    return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
+  }
+
+  Value *CreateBitCast(Value *V, Type *DestTy,
+                       const Twine &Name = "") {
+    return CreateCast(Instruction::BitCast, V, DestTy, Name);
+  }
+
+  Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
+                             const Twine &Name = "") {
+    return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
+  }
+
+  Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
+                             const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
+    return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
+  }
+
+  Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
+                             const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
+    return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
+  }
+
+  Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
+                              const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
+    return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
+  }
+
+  Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
+                    const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
+    return Insert(CastInst::Create(Op, V, DestTy), Name);
+  }
+
+  Value *CreatePointerCast(Value *V, Type *DestTy,
+                           const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
+    return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
+  }
+
+  Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
+                                             const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+
+    if (auto *VC = dyn_cast<Constant>(V)) {
+      return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
+                    Name);
+    }
+
+    return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
+                  Name);
+  }
+
+  Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
+                       const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
+    return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
+  }
+
+  Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
+                                const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
+      return CreatePtrToInt(V, DestTy, Name);
+    if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
+      return CreateIntToPtr(V, DestTy, Name);
+
+    return CreateBitCast(V, DestTy, Name);
+  }
+
+  Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
+    if (V->getType() == DestTy)
+      return V;
+    if (auto *VC = dyn_cast<Constant>(V))
+      return Insert(Folder.CreateFPCast(VC, DestTy), Name);
+    return Insert(CastInst::CreateFPCast(V, DestTy), Name);
+  }
+
+  // \brief Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
+  // compile time error, instead of converting the string to bool for the
+  // isSigned parameter.
+  Value *CreateIntCast(Value *, Type *, const char *) = delete;
+
+  //===--------------------------------------------------------------------===//
+  // Instruction creation methods: Compare Instructions
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
+  }
+
+  Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
+    return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
+  }
+
+  Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
+  }
+
+  Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
+                    const Twine &Name = "") {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateICmp(P, LC, RC), Name);
+    return Insert(new ICmpInst(P, LHS, RHS), Name);
+  }
+
+  Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
+                    const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    if (auto *LC = dyn_cast<Constant>(LHS))
+      if (auto *RC = dyn_cast<Constant>(RHS))
+        return Insert(Folder.CreateFCmp(P, LC, RC), Name);
+    return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Instruction creation methods: Other Instructions
+  //===--------------------------------------------------------------------===//
+
+  PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
+                     const Twine &Name = "") {
+    return Insert(PHINode::Create(Ty, NumReservedValues), Name);
+  }
+
+  CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    auto *PTy = cast<PointerType>(Callee->getType());
+    auto *FTy = cast<FunctionType>(PTy->getElementType());
+    return CreateCall(FTy, Callee, Args, Name, FPMathTag);
+  }
+
+  CallInst *CreateCall(FunctionType *FTy, Value *Callee,
+                       ArrayRef<Value *> Args, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
+    CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
+    if (isa<FPMathOperator>(CI))
+      CI = cast<CallInst>(setFPAttrs(CI, FPMathTag, FMF));
+    return Insert(CI, Name);
+  }
+
+  CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
+                       ArrayRef<OperandBundleDef> OpBundles,
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    CallInst *CI = CallInst::Create(Callee, Args, OpBundles);
+    if (isa<FPMathOperator>(CI))
+      CI = cast<CallInst>(setFPAttrs(CI, FPMathTag, FMF));
+    return Insert(CI, Name);
+  }
+
+  CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    return CreateCall(Callee->getFunctionType(), Callee, Args, Name, FPMathTag);
+  }
+
+  Value *CreateSelect(Value *C, Value *True, Value *False,
+                      const Twine &Name = "", Instruction *MDFrom = nullptr) {
+    if (auto *CC = dyn_cast<Constant>(C))
+      if (auto *TC = dyn_cast<Constant>(True))
+        if (auto *FC = dyn_cast<Constant>(False))
+          return Insert(Folder.CreateSelect(CC, TC, FC), Name);
+
+    SelectInst *Sel = SelectInst::Create(C, True, False);
+    if (MDFrom) {
+      MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
+      MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
+      Sel = addBranchMetadata(Sel, Prof, Unpred);
+    }
+    return Insert(Sel, Name);
+  }
+
+  VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
+    return Insert(new VAArgInst(List, Ty), Name);
+  }
+
+  Value *CreateExtractElement(Value *Vec, Value *Idx,
+                              const Twine &Name = "") {
+    if (auto *VC = dyn_cast<Constant>(Vec))
+      if (auto *IC = dyn_cast<Constant>(Idx))
+        return Insert(Folder.CreateExtractElement(VC, IC), Name);
+    return Insert(ExtractElementInst::Create(Vec, Idx), Name);
+  }
+
+  Value *CreateExtractElement(Value *Vec, uint64_t Idx,
+                              const Twine &Name = "") {
+    return CreateExtractElement(Vec, getInt64(Idx), Name);
+  }
+
+  Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
+                             const Twine &Name = "") {
+    if (auto *VC = dyn_cast<Constant>(Vec))
+      if (auto *NC = dyn_cast<Constant>(NewElt))
+        if (auto *IC = dyn_cast<Constant>(Idx))
+          return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
+    return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
+  }
+
+  Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
+                             const Twine &Name = "") {
+    return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
+  }
+
+  Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
+                             const Twine &Name = "") {
+    if (auto *V1C = dyn_cast<Constant>(V1))
+      if (auto *V2C = dyn_cast<Constant>(V2))
+        if (auto *MC = dyn_cast<Constant>(Mask))
+          return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name);
+    return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
+  }
+
+  Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<uint32_t> IntMask,
+                             const Twine &Name = "") {
+    Value *Mask = ConstantDataVector::get(Context, IntMask);
+    return CreateShuffleVector(V1, V2, Mask, Name);
+  }
+
+  Value *CreateExtractValue(Value *Agg,
+                            ArrayRef<unsigned> Idxs,
+                            const Twine &Name = "") {
+    if (auto *AggC = dyn_cast<Constant>(Agg))
+      return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
+    return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
+  }
+
+  Value *CreateInsertValue(Value *Agg, Value *Val,
+                           ArrayRef<unsigned> Idxs,
+                           const Twine &Name = "") {
+    if (auto *AggC = dyn_cast<Constant>(Agg))
+      if (auto *ValC = dyn_cast<Constant>(Val))
+        return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
+    return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
+  }
+
+  LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
+                                   const Twine &Name = "") {
+    return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Utility creation methods
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Return an i1 value testing if \p Arg is null.
+  Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
+    return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
+                        Name);
+  }
+
+  /// \brief Return an i1 value testing if \p Arg is not null.
+  Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
+    return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
+                        Name);
+  }
+
+  /// \brief Return the i64 difference between two pointer values, dividing out
+  /// the size of the pointed-to objects.
+  ///
+  /// This is intended to implement C-style pointer subtraction. As such, the
+  /// pointers must be appropriately aligned for their element types and
+  /// pointing into the same object.
+  Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "") {
+    assert(LHS->getType() == RHS->getType() &&
+           "Pointer subtraction operand types must match!");
+    auto *ArgType = cast<PointerType>(LHS->getType());
+    Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
+    Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
+    Value *Difference = CreateSub(LHS_int, RHS_int);
+    return CreateExactSDiv(Difference,
+                           ConstantExpr::getSizeOf(ArgType->getElementType()),
+                           Name);
+  }
+
+  /// \brief Create an invariant.group.barrier intrinsic call, that stops
+  /// optimizer to propagate equality using invariant.group metadata.
+  /// If Ptr type is different from pointer to i8, it's casted to pointer to i8
+  /// in the same address space before call and casted back to Ptr type after
+  /// call.
+  Value *CreateInvariantGroupBarrier(Value *Ptr) {
+    assert(isa<PointerType>(Ptr->getType()) &&
+           "invariant.group.barrier only applies to pointers.");
+    auto *PtrType = Ptr->getType();
+    auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
+    if (PtrType != Int8PtrTy)
+      Ptr = CreateBitCast(Ptr, Int8PtrTy);
+    Module *M = BB->getParent()->getParent();
+    Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(
+        M, Intrinsic::invariant_group_barrier, {Int8PtrTy});
+
+    assert(FnInvariantGroupBarrier->getReturnType() == Int8PtrTy &&
+           FnInvariantGroupBarrier->getFunctionType()->getParamType(0) ==
+               Int8PtrTy &&
+           "InvariantGroupBarrier should take and return the same type");
+
+    CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr});
+
+    if (PtrType != Int8PtrTy)
+      return CreateBitCast(Fn, PtrType);
+    return Fn;
+  }
+
+  /// \brief Return a vector value that contains \arg V broadcasted to \p
+  /// NumElts elements.
+  Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") {
+    assert(NumElts > 0 && "Cannot splat to an empty vector!");
+
+    // First insert it into an undef vector so we can shuffle it.
+    Type *I32Ty = getInt32Ty();
+    Value *Undef = UndefValue::get(VectorType::get(V->getType(), NumElts));
+    V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
+                            Name + ".splatinsert");
+
+    // Shuffle the value across the desired number of elements.
+    Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32Ty, NumElts));
+    return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
+  }
+
+  /// \brief Return a value that has been extracted from a larger integer type.
+  Value *CreateExtractInteger(const DataLayout &DL, Value *From,
+                              IntegerType *ExtractedTy, uint64_t Offset,
+                              const Twine &Name) {
+    auto *IntTy = cast<IntegerType>(From->getType());
+    assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
+               DL.getTypeStoreSize(IntTy) &&
+           "Element extends past full value");
+    uint64_t ShAmt = 8 * Offset;
+    Value *V = From;
+    if (DL.isBigEndian())
+      ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
+                   DL.getTypeStoreSize(ExtractedTy) - Offset);
+    if (ShAmt) {
+      V = CreateLShr(V, ShAmt, Name + ".shift");
+    }
+    assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
+           "Cannot extract to a larger integer!");
+    if (ExtractedTy != IntTy) {
+      V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
+    }
+    return V;
+  }
+
+private:
+  /// \brief Helper function that creates an assume intrinsic call that
+  /// represents an alignment assumption on the provided Ptr, Mask, Type
+  /// and Offset.
+  CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
+                                            Value *PtrValue, Value *Mask,
+                                            Type *IntPtrTy,
+                                            Value *OffsetValue) {
+    Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
+
+    if (OffsetValue) {
+      bool IsOffsetZero = false;
+      if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
+        IsOffsetZero = CI->isZero();
+
+      if (!IsOffsetZero) {
+        if (OffsetValue->getType() != IntPtrTy)
+          OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
+                                      "offsetcast");
+        PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
+      }
+    }
+
+    Value *Zero = ConstantInt::get(IntPtrTy, 0);
+    Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
+    Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
+    return CreateAssumption(InvCond);
+  }
+
+public:
+  /// \brief Create an assume intrinsic call that represents an alignment
+  /// assumption on the provided pointer.
+  ///
+  /// An optional offset can be provided, and if it is provided, the offset
+  /// must be subtracted from the provided pointer to get the pointer with the
+  /// specified alignment.
+  CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
+                                      unsigned Alignment,
+                                      Value *OffsetValue = nullptr) {
+    assert(isa<PointerType>(PtrValue->getType()) &&
+           "trying to create an alignment assumption on a non-pointer?");
+    auto *PtrTy = cast<PointerType>(PtrValue->getType());
+    Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
+
+    Value *Mask = ConstantInt::get(IntPtrTy, Alignment > 0 ? Alignment - 1 : 0);
+    return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
+                                           OffsetValue);
+  }
+
+  /// \brief Create an assume intrinsic call that represents an alignment
+  /// assumption on the provided pointer.
+  ///
+  /// An optional offset can be provided, and if it is provided, the offset
+  /// must be subtracted from the provided pointer to get the pointer with the
+  /// specified alignment.
+  ///
+  /// This overload handles the condition where the Alignment is dependent
+  /// on an existing value rather than a static value.
+  CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
+                                      Value *Alignment,
+                                      Value *OffsetValue = nullptr) {
+    assert(isa<PointerType>(PtrValue->getType()) &&
+           "trying to create an alignment assumption on a non-pointer?");
+    auto *PtrTy = cast<PointerType>(PtrValue->getType());
+    Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
+
+    if (Alignment->getType() != IntPtrTy)
+      Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ true,
+                                "alignmentcast");
+    Value *IsPositive =
+        CreateICmp(CmpInst::ICMP_SGT, Alignment,
+                   ConstantInt::get(Alignment->getType(), 0), "ispositive");
+    Value *PositiveMask =
+        CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "positivemask");
+    Value *Mask = CreateSelect(IsPositive, PositiveMask,
+                               ConstantInt::get(IntPtrTy, 0), "mask");
+
+    return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
+                                           OffsetValue);
+  }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_IRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/IR/IRPrintingPasses.h b/linux-x64/clang/include/llvm/IR/IRPrintingPasses.h
new file mode 100644
index 0000000..0825e06
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IRPrintingPasses.h
@@ -0,0 +1,95 @@
+//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines passes to print out IR in various granularities. The
+/// PrintModulePass pass simply prints out the entire module when it is
+/// executed. The PrintFunctionPass class is designed to be pipelined with
+/// other FunctionPass's, and prints out the functions of the module as they
+/// are processed.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_IRPRINTINGPASSES_H
+#define LLVM_IR_IRPRINTINGPASSES_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class Module;
+class ModulePass;
+class PreservedAnalyses;
+class raw_ostream;
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
+
+/// \brief Create and return a pass that writes the module to the specified
+/// \c raw_ostream.
+ModulePass *createPrintModulePass(raw_ostream &OS,
+                                  const std::string &Banner = "",
+                                  bool ShouldPreserveUseListOrder = false);
+
+/// \brief Create and return a pass that prints functions to the specified
+/// \c raw_ostream as they are processed.
+FunctionPass *createPrintFunctionPass(raw_ostream &OS,
+                                      const std::string &Banner = "");
+
+/// \brief Create and return a pass that writes the BB to the specified
+/// \c raw_ostream.
+BasicBlockPass *createPrintBasicBlockPass(raw_ostream &OS,
+                                          const std::string &Banner = "");
+
+/// Print out a name of an LLVM value without any prefixes.
+///
+/// The name is surrounded with ""'s and escaped if it has any special or
+/// non-printable characters in it.
+void printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name);
+
+/// \brief Pass for printing a Module as LLVM's text IR assembly.
+///
+/// Note: This pass is for use with the new pass manager. Use the create...Pass
+/// functions above to create passes for use with the legacy pass manager.
+class PrintModulePass {
+  raw_ostream &OS;
+  std::string Banner;
+  bool ShouldPreserveUseListOrder;
+
+public:
+  PrintModulePass();
+  PrintModulePass(raw_ostream &OS, const std::string &Banner = "",
+                  bool ShouldPreserveUseListOrder = false);
+
+  PreservedAnalyses run(Module &M, AnalysisManager<Module> &);
+
+  static StringRef name() { return "PrintModulePass"; }
+};
+
+/// \brief Pass for printing a Function as LLVM's text IR assembly.
+///
+/// Note: This pass is for use with the new pass manager. Use the create...Pass
+/// functions above to create passes for use with the legacy pass manager.
+class PrintFunctionPass {
+  raw_ostream &OS;
+  std::string Banner;
+
+public:
+  PrintFunctionPass();
+  PrintFunctionPass(raw_ostream &OS, const std::string &Banner = "");
+
+  PreservedAnalyses run(Function &F, AnalysisManager<Function> &);
+
+  static StringRef name() { return "PrintFunctionPass"; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/InlineAsm.h b/linux-x64/clang/include/llvm/IR/InlineAsm.h
new file mode 100644
index 0000000..1519a45
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/InlineAsm.h
@@ -0,0 +1,366 @@
+//===- llvm/InlineAsm.h - Class to represent inline asm strings -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents the inline asm strings, which are Value*'s that are
+// used as the callee operand of call instructions.  InlineAsm's are uniqued
+// like constants, and created via InlineAsm::get(...).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INLINEASM_H
+#define LLVM_IR_INLINEASM_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Value.h"
+#include <cassert>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class FunctionType;
+class PointerType;
+template <class ConstantClass> class ConstantUniqueMap;
+
+class InlineAsm final : public Value {
+public:
+  enum AsmDialect {
+    AD_ATT,
+    AD_Intel
+  };
+
+private:
+  friend struct InlineAsmKeyType;
+  friend class ConstantUniqueMap<InlineAsm>;
+
+  std::string AsmString, Constraints;
+  FunctionType *FTy;
+  bool HasSideEffects;
+  bool IsAlignStack;
+  AsmDialect Dialect;
+
+  InlineAsm(FunctionType *Ty, const std::string &AsmString,
+            const std::string &Constraints, bool hasSideEffects,
+            bool isAlignStack, AsmDialect asmDialect);
+
+  /// When the ConstantUniqueMap merges two types and makes two InlineAsms
+  /// identical, it destroys one of them with this method.
+  void destroyConstant();
+
+public:
+  InlineAsm(const InlineAsm &) = delete;
+  InlineAsm &operator=(const InlineAsm &) = delete;
+
+  /// InlineAsm::get - Return the specified uniqued inline asm string.
+  ///
+  static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
+                        StringRef Constraints, bool hasSideEffects,
+                        bool isAlignStack = false,
+                        AsmDialect asmDialect = AD_ATT);
+
+  bool hasSideEffects() const { return HasSideEffects; }
+  bool isAlignStack() const { return IsAlignStack; }
+  AsmDialect getDialect() const { return Dialect; }
+
+  /// getType - InlineAsm's are always pointers.
+  ///
+  PointerType *getType() const {
+    return reinterpret_cast<PointerType*>(Value::getType());
+  }
+
+  /// getFunctionType - InlineAsm's are always pointers to functions.
+  ///
+  FunctionType *getFunctionType() const;
+
+  const std::string &getAsmString() const { return AsmString; }
+  const std::string &getConstraintString() const { return Constraints; }
+
+  /// Verify - This static method can be used by the parser to check to see if
+  /// the specified constraint string is legal for the type.  This returns true
+  /// if legal, false if not.
+  ///
+  static bool Verify(FunctionType *Ty, StringRef Constraints);
+
+  // Constraint String Parsing
+  enum ConstraintPrefix {
+    isInput,            // 'x'
+    isOutput,           // '=x'
+    isClobber           // '~x'
+  };
+
+  using ConstraintCodeVector = std::vector<std::string>;
+
+  struct SubConstraintInfo {
+    /// MatchingInput - If this is not -1, this is an output constraint where an
+    /// input constraint is required to match it (e.g. "0").  The value is the
+    /// constraint number that matches this one (for example, if this is
+    /// constraint #0 and constraint #4 has the value "0", this will be 4).
+    int MatchingInput = -1;
+
+    /// Code - The constraint code, either the register name (in braces) or the
+    /// constraint letter/number.
+    ConstraintCodeVector Codes;
+
+    /// Default constructor.
+    SubConstraintInfo() = default;
+  };
+
+  using SubConstraintInfoVector = std::vector<SubConstraintInfo>;
+  struct ConstraintInfo;
+  using ConstraintInfoVector = std::vector<ConstraintInfo>;
+
+  struct ConstraintInfo {
+    /// Type - The basic type of the constraint: input/output/clobber
+    ///
+    ConstraintPrefix Type = isInput;
+
+    /// isEarlyClobber - "&": output operand writes result before inputs are all
+    /// read.  This is only ever set for an output operand.
+    bool isEarlyClobber = false;
+
+    /// MatchingInput - If this is not -1, this is an output constraint where an
+    /// input constraint is required to match it (e.g. "0").  The value is the
+    /// constraint number that matches this one (for example, if this is
+    /// constraint #0 and constraint #4 has the value "0", this will be 4).
+    int MatchingInput = -1;
+
+    /// hasMatchingInput - Return true if this is an output constraint that has
+    /// a matching input constraint.
+    bool hasMatchingInput() const { return MatchingInput != -1; }
+
+    /// isCommutative - This is set to true for a constraint that is commutative
+    /// with the next operand.
+    bool isCommutative = false;
+
+    /// isIndirect - True if this operand is an indirect operand.  This means
+    /// that the address of the source or destination is present in the call
+    /// instruction, instead of it being returned or passed in explicitly.  This
+    /// is represented with a '*' in the asm string.
+    bool isIndirect = false;
+
+    /// Code - The constraint code, either the register name (in braces) or the
+    /// constraint letter/number.
+    ConstraintCodeVector Codes;
+
+    /// isMultipleAlternative - '|': has multiple-alternative constraints.
+    bool isMultipleAlternative = false;
+
+    /// multipleAlternatives - If there are multiple alternative constraints,
+    /// this array will contain them.  Otherwise it will be empty.
+    SubConstraintInfoVector multipleAlternatives;
+
+    /// The currently selected alternative constraint index.
+    unsigned currentAlternativeIndex = 0;
+
+    /// Default constructor.
+    ConstraintInfo() = default;
+
+    /// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the
+    /// fields in this structure.  If the constraint string is not understood,
+    /// return true, otherwise return false.
+    bool Parse(StringRef Str, ConstraintInfoVector &ConstraintsSoFar);
+
+    /// selectAlternative - Point this constraint to the alternative constraint
+    /// indicated by the index.
+    void selectAlternative(unsigned index);
+  };
+
+  /// ParseConstraints - Split up the constraint string into the specific
+  /// constraints and their prefixes.  If this returns an empty vector, and if
+  /// the constraint string itself isn't empty, there was an error parsing.
+  static ConstraintInfoVector ParseConstraints(StringRef ConstraintString);
+
+  /// ParseConstraints - Parse the constraints of this inlineasm object,
+  /// returning them the same way that ParseConstraints(str) does.
+  ConstraintInfoVector ParseConstraints() const {
+    return ParseConstraints(Constraints);
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() == Value::InlineAsmVal;
+  }
+
+  // These are helper methods for dealing with flags in the INLINEASM SDNode
+  // in the backend.
+  //
+  // The encoding of the flag word is currently:
+  //   Bits 2-0 - A Kind_* value indicating the kind of the operand.
+  //   Bits 15-3 - The number of SDNode operands associated with this inline
+  //               assembly operand.
+  //   If bit 31 is set:
+  //     Bit 30-16 - The operand number that this operand must match.
+  //                 When bits 2-0 are Kind_Mem, the Constraint_* value must be
+  //                 obtained from the flags for this operand number.
+  //   Else if bits 2-0 are Kind_Mem:
+  //     Bit 30-16 - A Constraint_* value indicating the original constraint
+  //                 code.
+  //   Else:
+  //     Bit 30-16 - The register class ID to use for the operand.
+
+  enum : uint32_t {
+    // Fixed operands on an INLINEASM SDNode.
+    Op_InputChain = 0,
+    Op_AsmString = 1,
+    Op_MDNode = 2,
+    Op_ExtraInfo = 3,    // HasSideEffects, IsAlignStack, AsmDialect.
+    Op_FirstOperand = 4,
+
+    // Fixed operands on an INLINEASM MachineInstr.
+    MIOp_AsmString = 0,
+    MIOp_ExtraInfo = 1,    // HasSideEffects, IsAlignStack, AsmDialect.
+    MIOp_FirstOperand = 2,
+
+    // Interpretation of the MIOp_ExtraInfo bit field.
+    Extra_HasSideEffects = 1,
+    Extra_IsAlignStack = 2,
+    Extra_AsmDialect = 4,
+    Extra_MayLoad = 8,
+    Extra_MayStore = 16,
+    Extra_IsConvergent = 32,
+
+    // Inline asm operands map to multiple SDNode / MachineInstr operands.
+    // The first operand is an immediate describing the asm operand, the low
+    // bits is the kind:
+    Kind_RegUse = 1,             // Input register, "r".
+    Kind_RegDef = 2,             // Output register, "=r".
+    Kind_RegDefEarlyClobber = 3, // Early-clobber output register, "=&r".
+    Kind_Clobber = 4,            // Clobbered register, "~r".
+    Kind_Imm = 5,                // Immediate.
+    Kind_Mem = 6,                // Memory operand, "m".
+
+    // Memory constraint codes.
+    // These could be tablegenerated but there's little need to do that since
+    // there's plenty of space in the encoding to support the union of all
+    // constraint codes for all targets.
+    Constraint_Unknown = 0,
+    Constraint_es,
+    Constraint_i,
+    Constraint_m,
+    Constraint_o,
+    Constraint_v,
+    Constraint_Q,
+    Constraint_R,
+    Constraint_S,
+    Constraint_T,
+    Constraint_Um,
+    Constraint_Un,
+    Constraint_Uq,
+    Constraint_Us,
+    Constraint_Ut,
+    Constraint_Uv,
+    Constraint_Uy,
+    Constraint_X,
+    Constraint_Z,
+    Constraint_ZC,
+    Constraint_Zy,
+    Constraints_Max = Constraint_Zy,
+    Constraints_ShiftAmount = 16,
+
+    Flag_MatchingOperand = 0x80000000
+  };
+
+  static unsigned getFlagWord(unsigned Kind, unsigned NumOps) {
+    assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!");
+    assert(Kind >= Kind_RegUse && Kind <= Kind_Mem && "Invalid Kind");
+    return Kind | (NumOps << 3);
+  }
+
+  static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
+  static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
+  static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
+  static bool isRegDefEarlyClobberKind(unsigned Flag) {
+    return getKind(Flag) == Kind_RegDefEarlyClobber;
+  }
+  static bool isClobberKind(unsigned Flag) {
+    return getKind(Flag) == Kind_Clobber;
+  }
+
+  /// getFlagWordForMatchingOp - Augment an existing flag word returned by
+  /// getFlagWord with information indicating that this input operand is tied
+  /// to a previous output operand.
+  static unsigned getFlagWordForMatchingOp(unsigned InputFlag,
+                                           unsigned MatchedOperandNo) {
+    assert(MatchedOperandNo <= 0x7fff && "Too big matched operand");
+    assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
+    return InputFlag | Flag_MatchingOperand | (MatchedOperandNo << 16);
+  }
+
+  /// getFlagWordForRegClass - Augment an existing flag word returned by
+  /// getFlagWord with the required register class for the following register
+  /// operands.
+  /// A tied use operand cannot have a register class, use the register class
+  /// from the def operand instead.
+  static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) {
+    // Store RC + 1, reserve the value 0 to mean 'no register class'.
+    ++RC;
+    assert(!isImmKind(InputFlag) && "Immediates cannot have a register class");
+    assert(!isMemKind(InputFlag) && "Memory operand cannot have a register class");
+    assert(RC <= 0x7fff && "Too large register class ID");
+    assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
+    return InputFlag | (RC << 16);
+  }
+
+  /// Augment an existing flag word returned by getFlagWord with the constraint
+  /// code for a memory constraint.
+  static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
+    assert(isMemKind(InputFlag) && "InputFlag is not a memory constraint!");
+    assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
+    assert(Constraint <= Constraints_Max && "Unknown constraint ID");
+    assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
+    return InputFlag | (Constraint << Constraints_ShiftAmount);
+  }
+
+  static unsigned convertMemFlagWordToMatchingFlagWord(unsigned InputFlag) {
+    assert(isMemKind(InputFlag));
+    return InputFlag & ~(0x7fff << Constraints_ShiftAmount);
+  }
+
+  static unsigned getKind(unsigned Flags) {
+    return Flags & 7;
+  }
+
+  static unsigned getMemoryConstraintID(unsigned Flag) {
+    assert(isMemKind(Flag));
+    return (Flag >> Constraints_ShiftAmount) & 0x7fff;
+  }
+
+  /// getNumOperandRegisters - Extract the number of registers field from the
+  /// inline asm operand flag.
+  static unsigned getNumOperandRegisters(unsigned Flag) {
+    return (Flag & 0xffff) >> 3;
+  }
+
+  /// isUseOperandTiedToDef - Return true if the flag of the inline asm
+  /// operand indicates it is an use operand that's matched to a def operand.
+  static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) {
+    if ((Flag & Flag_MatchingOperand) == 0)
+      return false;
+    Idx = (Flag & ~Flag_MatchingOperand) >> 16;
+    return true;
+  }
+
+  /// hasRegClassConstraint - Returns true if the flag contains a register
+  /// class constraint.  Sets RC to the register class ID.
+  static bool hasRegClassConstraint(unsigned Flag, unsigned &RC) {
+    if (Flag & Flag_MatchingOperand)
+      return false;
+    unsigned High = Flag >> 16;
+    // getFlagWordForRegClass() uses 0 to mean no register class, and otherwise
+    // stores RC + 1.
+    if (!High)
+      return false;
+    RC = High - 1;
+    return true;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_INLINEASM_H
diff --git a/linux-x64/clang/include/llvm/IR/InstIterator.h b/linux-x64/clang/include/llvm/IR/InstIterator.h
new file mode 100644
index 0000000..2988fc9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/InstIterator.h
@@ -0,0 +1,163 @@
+//===- InstIterator.h - Classes for inst iteration --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions of two iterators for iterating over the
+// instructions in a function.  This is effectively a wrapper around a two level
+// iterator that can probably be genericized later.
+//
+// Note that this iterator gets invalidated any time that basic blocks or
+// instructions are moved around.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTITERATOR_H
+#define LLVM_IR_INSTITERATOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include <iterator>
+
+namespace llvm {
+
+// This class implements inst_begin() & inst_end() for
+// inst_iterator and const_inst_iterator's.
+//
+template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
+  using BBty = BB_t;
+  using BBIty = BB_i_t;
+  using BIty = BI_t;
+  using IIty = II_t;
+  BB_t *BBs; // BasicBlocksType
+  BB_i_t BB; // BasicBlocksType::iterator
+  BI_t BI;   // BasicBlock::iterator
+
+public:
+  using iterator_category = std::bidirectional_iterator_tag;
+  using value_type = IIty;
+  using difference_type = signed;
+  using pointer = IIty *;
+  using reference = IIty &;
+
+  // Default constructor
+  InstIterator() = default;
+
+  // Copy constructor...
+  template<typename A, typename B, typename C, typename D>
+  InstIterator(const InstIterator<A,B,C,D> &II)
+    : BBs(II.BBs), BB(II.BB), BI(II.BI) {}
+
+  template<typename A, typename B, typename C, typename D>
+  InstIterator(InstIterator<A,B,C,D> &II)
+    : BBs(II.BBs), BB(II.BB), BI(II.BI) {}
+
+  template<class M> InstIterator(M &m)
+    : BBs(&m.getBasicBlockList()), BB(BBs->begin()) {    // begin ctor
+    if (BB != BBs->end()) {
+      BI = BB->begin();
+      advanceToNextBB();
+    }
+  }
+
+  template<class M> InstIterator(M &m, bool)
+    : BBs(&m.getBasicBlockList()), BB(BBs->end()) {    // end ctor
+  }
+
+  // Accessors to get at the underlying iterators...
+  inline BBIty &getBasicBlockIterator()  { return BB; }
+  inline BIty  &getInstructionIterator() { return BI; }
+
+  inline reference operator*()  const { return *BI; }
+  inline pointer operator->() const { return &operator*(); }
+
+  inline bool operator==(const InstIterator &y) const {
+    return BB == y.BB && (BB == BBs->end() || BI == y.BI);
+  }
+  inline bool operator!=(const InstIterator& y) const {
+    return !operator==(y);
+  }
+
+  InstIterator& operator++() {
+    ++BI;
+    advanceToNextBB();
+    return *this;
+  }
+  inline InstIterator operator++(int) {
+    InstIterator tmp = *this; ++*this; return tmp;
+  }
+
+  InstIterator& operator--() {
+    while (BB == BBs->end() || BI == BB->begin()) {
+      --BB;
+      BI = BB->end();
+    }
+    --BI;
+    return *this;
+  }
+  inline InstIterator operator--(int) {
+    InstIterator tmp = *this; --*this; return tmp;
+  }
+
+  inline bool atEnd() const { return BB == BBs->end(); }
+
+private:
+  inline void advanceToNextBB() {
+    // The only way that the II could be broken is if it is now pointing to
+    // the end() of the current BasicBlock and there are successor BBs.
+    while (BI == BB->end()) {
+      ++BB;
+      if (BB == BBs->end()) break;
+      BI = BB->begin();
+    }
+  }
+};
+
+using inst_iterator =
+    InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
+                 BasicBlock::iterator, Instruction>;
+using const_inst_iterator =
+    InstIterator<const SymbolTableList<BasicBlock>,
+                 Function::const_iterator, BasicBlock::const_iterator,
+                 const Instruction>;
+using inst_range = iterator_range<inst_iterator>;
+using const_inst_range = iterator_range<const_inst_iterator>;
+
+inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
+inline inst_iterator inst_end(Function *F)   { return inst_iterator(*F, true); }
+inline inst_range instructions(Function *F) {
+  return inst_range(inst_begin(F), inst_end(F));
+}
+inline const_inst_iterator inst_begin(const Function *F) {
+  return const_inst_iterator(*F);
+}
+inline const_inst_iterator inst_end(const Function *F) {
+  return const_inst_iterator(*F, true);
+}
+inline const_inst_range instructions(const Function *F) {
+  return const_inst_range(inst_begin(F), inst_end(F));
+}
+inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); }
+inline inst_iterator inst_end(Function &F)   { return inst_iterator(F, true); }
+inline inst_range instructions(Function &F) {
+  return inst_range(inst_begin(F), inst_end(F));
+}
+inline const_inst_iterator inst_begin(const Function &F) {
+  return const_inst_iterator(F);
+}
+inline const_inst_iterator inst_end(const Function &F) {
+  return const_inst_iterator(F, true);
+}
+inline const_inst_range instructions(const Function &F) {
+  return const_inst_range(inst_begin(F), inst_end(F));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_INSTITERATOR_H
diff --git a/linux-x64/clang/include/llvm/IR/InstVisitor.h b/linux-x64/clang/include/llvm/IR/InstVisitor.h
new file mode 100644
index 0000000..5557981
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/InstVisitor.h
@@ -0,0 +1,298 @@
+//===- InstVisitor.h - Instruction visitor templates ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_IR_INSTVISITOR_H
+#define LLVM_IR_INSTVISITOR_H
+
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+// We operate on opaque instruction classes, so forward declare all instruction
+// types now...
+//
+#define HANDLE_INST(NUM, OPCODE, CLASS)   class CLASS;
+#include "llvm/IR/Instruction.def"
+
+#define DELEGATE(CLASS_TO_VISIT) \
+  return static_cast<SubClass*>(this)-> \
+               visit##CLASS_TO_VISIT(static_cast<CLASS_TO_VISIT&>(I))
+
+
+/// @brief Base class for instruction visitors
+///
+/// Instruction visitors are used when you want to perform different actions
+/// for different kinds of instructions without having to use lots of casts
+/// and a big switch statement (in your code, that is).
+///
+/// To define your own visitor, inherit from this class, specifying your
+/// new type for the 'SubClass' template parameter, and "override" visitXXX
+/// functions in your class. I say "override" because this class is defined
+/// in terms of statically resolved overloading, not virtual functions.
+///
+/// For example, here is a visitor that counts the number of malloc
+/// instructions processed:
+///
+///  /// Declare the class.  Note that we derive from InstVisitor instantiated
+///  /// with _our new subclasses_ type.
+///  ///
+///  struct CountAllocaVisitor : public InstVisitor<CountAllocaVisitor> {
+///    unsigned Count;
+///    CountAllocaVisitor() : Count(0) {}
+///
+///    void visitAllocaInst(AllocaInst &AI) { ++Count; }
+///  };
+///
+///  And this class would be used like this:
+///    CountAllocaVisitor CAV;
+///    CAV.visit(function);
+///    NumAllocas = CAV.Count;
+///
+/// The defined has 'visit' methods for Instruction, and also for BasicBlock,
+/// Function, and Module, which recursively process all contained instructions.
+///
+/// Note that if you don't implement visitXXX for some instruction type,
+/// the visitXXX method for instruction superclass will be invoked. So
+/// if instructions are added in the future, they will be automatically
+/// supported, if you handle one of their superclasses.
+///
+/// The optional second template argument specifies the type that instruction
+/// visitation functions should return. If you specify this, you *MUST* provide
+/// an implementation of visitInstruction though!.
+///
+/// Note that this class is specifically designed as a template to avoid
+/// virtual function call overhead.  Defining and using an InstVisitor is just
+/// as efficient as having your own switch statement over the instruction
+/// opcode.
+template<typename SubClass, typename RetTy=void>
+class InstVisitor {
+  //===--------------------------------------------------------------------===//
+  // Interface code - This is the public interface of the InstVisitor that you
+  // use to visit instructions...
+  //
+
+public:
+  // Generic visit method - Allow visitation to all instructions in a range
+  template<class Iterator>
+  void visit(Iterator Start, Iterator End) {
+    while (Start != End)
+      static_cast<SubClass*>(this)->visit(*Start++);
+  }
+
+  // Define visitors for functions and basic blocks...
+  //
+  void visit(Module &M) {
+    static_cast<SubClass*>(this)->visitModule(M);
+    visit(M.begin(), M.end());
+  }
+  void visit(Function &F) {
+    static_cast<SubClass*>(this)->visitFunction(F);
+    visit(F.begin(), F.end());
+  }
+  void visit(BasicBlock &BB) {
+    static_cast<SubClass*>(this)->visitBasicBlock(BB);
+    visit(BB.begin(), BB.end());
+  }
+
+  // Forwarding functions so that the user can visit with pointers AND refs.
+  void visit(Module       *M)  { visit(*M); }
+  void visit(Function     *F)  { visit(*F); }
+  void visit(BasicBlock   *BB) { visit(*BB); }
+  RetTy visit(Instruction *I)  { return visit(*I); }
+
+  // visit - Finally, code to visit an instruction...
+  //
+  RetTy visit(Instruction &I) {
+    static_assert(std::is_base_of<InstVisitor, SubClass>::value,
+                  "Must pass the derived type to this template!");
+
+    switch (I.getOpcode()) {
+    default: llvm_unreachable("Unknown instruction type encountered!");
+      // Build the switch statement using the Instruction.def file...
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+    case Instruction::OPCODE: return \
+           static_cast<SubClass*>(this)-> \
+                      visit##OPCODE(static_cast<CLASS&>(I));
+#include "llvm/IR/Instruction.def"
+    }
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Visitation functions... these functions provide default fallbacks in case
+  // the user does not specify what to do for a particular instruction type.
+  // The default behavior is to generalize the instruction type to its subtype
+  // and try visiting the subtype.  All of this should be inlined perfectly,
+  // because there are no virtual functions to get in the way.
+  //
+
+  // When visiting a module, function or basic block directly, these methods get
+  // called to indicate when transitioning into a new unit.
+  //
+  void visitModule    (Module &M) {}
+  void visitFunction  (Function &F) {}
+  void visitBasicBlock(BasicBlock &BB) {}
+
+  // Define instruction specific visitor functions that can be overridden to
+  // handle SPECIFIC instructions.  These functions automatically define
+  // visitMul to proxy to visitBinaryOperator for instance in case the user does
+  // not need this generality.
+  //
+  // These functions can also implement fan-out, when a single opcode and
+  // instruction have multiple more specific Instruction subclasses. The Call
+  // instruction currently supports this. We implement that by redirecting that
+  // instruction to a special delegation helper.
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+    RetTy visit##OPCODE(CLASS &I) { \
+      if (NUM == Instruction::Call) \
+        return delegateCallInst(I); \
+      else \
+        DELEGATE(CLASS); \
+    }
+#include "llvm/IR/Instruction.def"
+
+  // Specific Instruction type classes... note that all of the casts are
+  // necessary because we use the instruction classes as opaque types...
+  //
+  RetTy visitReturnInst(ReturnInst &I)            { DELEGATE(TerminatorInst);}
+  RetTy visitBranchInst(BranchInst &I)            { DELEGATE(TerminatorInst);}
+  RetTy visitSwitchInst(SwitchInst &I)            { DELEGATE(TerminatorInst);}
+  RetTy visitIndirectBrInst(IndirectBrInst &I)    { DELEGATE(TerminatorInst);}
+  RetTy visitResumeInst(ResumeInst &I)            { DELEGATE(TerminatorInst);}
+  RetTy visitUnreachableInst(UnreachableInst &I)  { DELEGATE(TerminatorInst);}
+  RetTy visitCleanupReturnInst(CleanupReturnInst &I) { DELEGATE(TerminatorInst);}
+  RetTy visitCatchReturnInst(CatchReturnInst &I)  { DELEGATE(TerminatorInst); }
+  RetTy visitCatchSwitchInst(CatchSwitchInst &I)  { DELEGATE(TerminatorInst);}
+  RetTy visitICmpInst(ICmpInst &I)                { DELEGATE(CmpInst);}
+  RetTy visitFCmpInst(FCmpInst &I)                { DELEGATE(CmpInst);}
+  RetTy visitAllocaInst(AllocaInst &I)            { DELEGATE(UnaryInstruction);}
+  RetTy visitLoadInst(LoadInst     &I)            { DELEGATE(UnaryInstruction);}
+  RetTy visitStoreInst(StoreInst   &I)            { DELEGATE(Instruction);}
+  RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
+  RetTy visitAtomicRMWInst(AtomicRMWInst &I)      { DELEGATE(Instruction);}
+  RetTy visitFenceInst(FenceInst   &I)            { DELEGATE(Instruction);}
+  RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction);}
+  RetTy visitPHINode(PHINode       &I)            { DELEGATE(Instruction);}
+  RetTy visitTruncInst(TruncInst &I)              { DELEGATE(CastInst);}
+  RetTy visitZExtInst(ZExtInst &I)                { DELEGATE(CastInst);}
+  RetTy visitSExtInst(SExtInst &I)                { DELEGATE(CastInst);}
+  RetTy visitFPTruncInst(FPTruncInst &I)          { DELEGATE(CastInst);}
+  RetTy visitFPExtInst(FPExtInst &I)              { DELEGATE(CastInst);}
+  RetTy visitFPToUIInst(FPToUIInst &I)            { DELEGATE(CastInst);}
+  RetTy visitFPToSIInst(FPToSIInst &I)            { DELEGATE(CastInst);}
+  RetTy visitUIToFPInst(UIToFPInst &I)            { DELEGATE(CastInst);}
+  RetTy visitSIToFPInst(SIToFPInst &I)            { DELEGATE(CastInst);}
+  RetTy visitPtrToIntInst(PtrToIntInst &I)        { DELEGATE(CastInst);}
+  RetTy visitIntToPtrInst(IntToPtrInst &I)        { DELEGATE(CastInst);}
+  RetTy visitBitCastInst(BitCastInst &I)          { DELEGATE(CastInst);}
+  RetTy visitAddrSpaceCastInst(AddrSpaceCastInst &I) { DELEGATE(CastInst);}
+  RetTy visitSelectInst(SelectInst &I)            { DELEGATE(Instruction);}
+  RetTy visitVAArgInst(VAArgInst   &I)            { DELEGATE(UnaryInstruction);}
+  RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);}
+  RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction);}
+  RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction);}
+  RetTy visitExtractValueInst(ExtractValueInst &I){ DELEGATE(UnaryInstruction);}
+  RetTy visitInsertValueInst(InsertValueInst &I)  { DELEGATE(Instruction); }
+  RetTy visitLandingPadInst(LandingPadInst &I)    { DELEGATE(Instruction); }
+  RetTy visitFuncletPadInst(FuncletPadInst &I) { DELEGATE(Instruction); }
+  RetTy visitCleanupPadInst(CleanupPadInst &I) { DELEGATE(FuncletPadInst); }
+  RetTy visitCatchPadInst(CatchPadInst &I)     { DELEGATE(FuncletPadInst); }
+
+  // Handle the special instrinsic instruction classes.
+  RetTy visitDbgDeclareInst(DbgDeclareInst &I)    { DELEGATE(DbgInfoIntrinsic);}
+  RetTy visitDbgValueInst(DbgValueInst &I)        { DELEGATE(DbgInfoIntrinsic);}
+  RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { DELEGATE(IntrinsicInst); }
+  RetTy visitMemSetInst(MemSetInst &I)            { DELEGATE(MemIntrinsic); }
+  RetTy visitMemCpyInst(MemCpyInst &I)            { DELEGATE(MemTransferInst); }
+  RetTy visitMemMoveInst(MemMoveInst &I)          { DELEGATE(MemTransferInst); }
+  RetTy visitMemTransferInst(MemTransferInst &I)  { DELEGATE(MemIntrinsic); }
+  RetTy visitMemIntrinsic(MemIntrinsic &I)        { DELEGATE(IntrinsicInst); }
+  RetTy visitVAStartInst(VAStartInst &I)          { DELEGATE(IntrinsicInst); }
+  RetTy visitVAEndInst(VAEndInst &I)              { DELEGATE(IntrinsicInst); }
+  RetTy visitVACopyInst(VACopyInst &I)            { DELEGATE(IntrinsicInst); }
+  RetTy visitIntrinsicInst(IntrinsicInst &I)      { DELEGATE(CallInst); }
+
+  // Call and Invoke are slightly different as they delegate first through
+  // a generic CallSite visitor.
+  RetTy visitCallInst(CallInst &I) {
+    return static_cast<SubClass*>(this)->visitCallSite(&I);
+  }
+  RetTy visitInvokeInst(InvokeInst &I) {
+    return static_cast<SubClass*>(this)->visitCallSite(&I);
+  }
+
+  // Next level propagators: If the user does not overload a specific
+  // instruction type, they can overload one of these to get the whole class
+  // of instructions...
+  //
+  RetTy visitCastInst(CastInst &I)                { DELEGATE(UnaryInstruction);}
+  RetTy visitBinaryOperator(BinaryOperator &I)    { DELEGATE(Instruction);}
+  RetTy visitCmpInst(CmpInst &I)                  { DELEGATE(Instruction);}
+  RetTy visitTerminatorInst(TerminatorInst &I)    { DELEGATE(Instruction);}
+  RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
+
+  // Provide a special visitor for a 'callsite' that visits both calls and
+  // invokes. When unimplemented, properly delegates to either the terminator or
+  // regular instruction visitor.
+  RetTy visitCallSite(CallSite CS) {
+    assert(CS);
+    Instruction &I = *CS.getInstruction();
+    if (CS.isCall())
+      DELEGATE(Instruction);
+
+    assert(CS.isInvoke());
+    DELEGATE(TerminatorInst);
+  }
+
+  // If the user wants a 'default' case, they can choose to override this
+  // function.  If this function is not overloaded in the user's subclass, then
+  // this instruction just gets ignored.
+  //
+  // Note that you MUST override this function if your return type is not void.
+  //
+  void visitInstruction(Instruction &I) {}  // Ignore unhandled instructions
+
+private:
+  // Special helper function to delegate to CallInst subclass visitors.
+  RetTy delegateCallInst(CallInst &I) {
+    if (const Function *F = I.getCalledFunction()) {
+      switch (F->getIntrinsicID()) {
+      default:                     DELEGATE(IntrinsicInst);
+      case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
+      case Intrinsic::dbg_value:   DELEGATE(DbgValueInst);
+      case Intrinsic::memcpy:      DELEGATE(MemCpyInst);
+      case Intrinsic::memmove:     DELEGATE(MemMoveInst);
+      case Intrinsic::memset:      DELEGATE(MemSetInst);
+      case Intrinsic::vastart:     DELEGATE(VAStartInst);
+      case Intrinsic::vaend:       DELEGATE(VAEndInst);
+      case Intrinsic::vacopy:      DELEGATE(VACopyInst);
+      case Intrinsic::not_intrinsic: break;
+      }
+    }
+    DELEGATE(CallInst);
+  }
+
+  // An overload that will never actually be called, it is used only from dead
+  // code in the dispatching from opcodes to instruction subclasses.
+  RetTy delegateCallInst(Instruction &I) {
+    llvm_unreachable("delegateCallInst called for non-CallInst");
+  }
+};
+
+#undef DELEGATE
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/InstrTypes.h b/linux-x64/clang/include/llvm/IR/InstrTypes.h
new file mode 100644
index 0000000..0243c4c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/InstrTypes.h
@@ -0,0 +1,1694 @@
+//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various meta classes of instructions that exist in the VM
+// representation.  Specific concrete subclasses of these may be found in the
+// i*.h files...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTRTYPES_H
+#define LLVM_IR_INSTRTYPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//                            TerminatorInst Class
+//===----------------------------------------------------------------------===//
+
+/// Subclasses of this class are all able to terminate a basic
+/// block. Thus, these are all the flow control type of operations.
+///
+class TerminatorInst : public Instruction {
+protected:
+  TerminatorInst(Type *Ty, Instruction::TermOps iType,
+                 Use *Ops, unsigned NumOps,
+                 Instruction *InsertBefore = nullptr)
+    : Instruction(Ty, iType, Ops, NumOps, InsertBefore) {}
+
+  TerminatorInst(Type *Ty, Instruction::TermOps iType,
+                 Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd)
+    : Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {}
+
+public:
+  /// Return the number of successors that this terminator has.
+  unsigned getNumSuccessors() const;
+
+  /// Return the specified successor.
+  BasicBlock *getSuccessor(unsigned idx) const;
+
+  /// Update the specified successor to point at the provided block.
+  void setSuccessor(unsigned idx, BasicBlock *B);
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->isTerminator();
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+  // \brief Returns true if this terminator relates to exception handling.
+  bool isExceptional() const {
+    switch (getOpcode()) {
+    case Instruction::CatchSwitch:
+    case Instruction::CatchRet:
+    case Instruction::CleanupRet:
+    case Instruction::Invoke:
+    case Instruction::Resume:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  //===--------------------------------------------------------------------===//
+  // succ_iterator definition
+  //===--------------------------------------------------------------------===//
+
+  template <class Term, class BB> // Successor Iterator
+  class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB,
+                                            int, BB *, BB *> {
+    using super =
+        std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>;
+
+  public:
+    using pointer = typename super::pointer;
+    using reference = typename super::reference;
+
+  private:
+    Term TermInst;
+    unsigned idx;
+    using Self = SuccIterator<Term, BB>;
+
+    inline bool index_is_valid(unsigned idx) {
+      return idx < TermInst->getNumSuccessors();
+    }
+
+    /// \brief Proxy object to allow write access in operator[]
+    class SuccessorProxy {
+      Self it;
+
+    public:
+      explicit SuccessorProxy(const Self &it) : it(it) {}
+
+      SuccessorProxy(const SuccessorProxy &) = default;
+
+      SuccessorProxy &operator=(SuccessorProxy r) {
+        *this = reference(r);
+        return *this;
+      }
+
+      SuccessorProxy &operator=(reference r) {
+        it.TermInst->setSuccessor(it.idx, r);
+        return *this;
+      }
+
+      operator reference() const { return *it; }
+    };
+
+  public:
+    // begin iterator
+    explicit inline SuccIterator(Term T) : TermInst(T), idx(0) {}
+    // end iterator
+    inline SuccIterator(Term T, bool) : TermInst(T) {
+      if (TermInst)
+        idx = TermInst->getNumSuccessors();
+      else
+        // Term == NULL happens, if a basic block is not fully constructed and
+        // consequently getTerminator() returns NULL. In this case we construct
+        // a SuccIterator which describes a basic block that has zero
+        // successors.
+        // Defining SuccIterator for incomplete and malformed CFGs is especially
+        // useful for debugging.
+        idx = 0;
+    }
+
+    /// This is used to interface between code that wants to
+    /// operate on terminator instructions directly.
+    unsigned getSuccessorIndex() const { return idx; }
+
+    inline bool operator==(const Self &x) const { return idx == x.idx; }
+    inline bool operator!=(const Self &x) const { return !operator==(x); }
+
+    inline reference operator*() const { return TermInst->getSuccessor(idx); }
+    inline pointer operator->() const { return operator*(); }
+
+    inline Self &operator++() {
+      ++idx;
+      return *this;
+    } // Preincrement
+
+    inline Self operator++(int) { // Postincrement
+      Self tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    inline Self &operator--() {
+      --idx;
+      return *this;
+    }                             // Predecrement
+    inline Self operator--(int) { // Postdecrement
+      Self tmp = *this;
+      --*this;
+      return tmp;
+    }
+
+    inline bool operator<(const Self &x) const {
+      assert(TermInst == x.TermInst &&
+             "Cannot compare iterators of different blocks!");
+      return idx < x.idx;
+    }
+
+    inline bool operator<=(const Self &x) const {
+      assert(TermInst == x.TermInst &&
+             "Cannot compare iterators of different blocks!");
+      return idx <= x.idx;
+    }
+    inline bool operator>=(const Self &x) const {
+      assert(TermInst == x.TermInst &&
+             "Cannot compare iterators of different blocks!");
+      return idx >= x.idx;
+    }
+
+    inline bool operator>(const Self &x) const {
+      assert(TermInst == x.TermInst &&
+             "Cannot compare iterators of different blocks!");
+      return idx > x.idx;
+    }
+
+    inline Self &operator+=(int Right) {
+      unsigned new_idx = idx + Right;
+      assert(index_is_valid(new_idx) && "Iterator index out of bound");
+      idx = new_idx;
+      return *this;
+    }
+
+    inline Self operator+(int Right) const {
+      Self tmp = *this;
+      tmp += Right;
+      return tmp;
+    }
+
+    inline Self &operator-=(int Right) { return operator+=(-Right); }
+
+    inline Self operator-(int Right) const { return operator+(-Right); }
+
+    inline int operator-(const Self &x) const {
+      assert(TermInst == x.TermInst &&
+             "Cannot work on iterators of different blocks!");
+      int distance = idx - x.idx;
+      return distance;
+    }
+
+    inline SuccessorProxy operator[](int offset) {
+      Self tmp = *this;
+      tmp += offset;
+      return SuccessorProxy(tmp);
+    }
+
+    /// Get the source BB of this iterator.
+    inline BB *getSource() {
+      assert(TermInst && "Source not available, if basic block was malformed");
+      return TermInst->getParent();
+    }
+  };
+
+  using succ_iterator = SuccIterator<TerminatorInst *, BasicBlock>;
+  using succ_const_iterator =
+      SuccIterator<const TerminatorInst *, const BasicBlock>;
+  using succ_range = iterator_range<succ_iterator>;
+  using succ_const_range = iterator_range<succ_const_iterator>;
+
+private:
+  inline succ_iterator succ_begin() { return succ_iterator(this); }
+  inline succ_const_iterator succ_begin() const {
+    return succ_const_iterator(this);
+  }
+  inline succ_iterator succ_end() { return succ_iterator(this, true); }
+  inline succ_const_iterator succ_end() const {
+    return succ_const_iterator(this, true);
+  }
+
+public:
+  inline succ_range successors() {
+    return succ_range(succ_begin(), succ_end());
+  }
+  inline succ_const_range successors() const {
+    return succ_const_range(succ_begin(), succ_end());
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                          UnaryInstruction Class
+//===----------------------------------------------------------------------===//
+
+class UnaryInstruction : public Instruction {
+protected:
+  UnaryInstruction(Type *Ty, unsigned iType, Value *V,
+                   Instruction *IB = nullptr)
+    : Instruction(Ty, iType, &Op<0>(), 1, IB) {
+    Op<0>() = V;
+  }
+  UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
+    : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
+    Op<0>() = V;
+  }
+
+public:
+  // allocate space for exactly one operand
+  void *operator new(size_t s) {
+    return User::operator new(s, 1);
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Alloca ||
+           I->getOpcode() == Instruction::Load ||
+           I->getOpcode() == Instruction::VAArg ||
+           I->getOpcode() == Instruction::ExtractValue ||
+           (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<UnaryInstruction> :
+  public FixedNumOperandTraits<UnaryInstruction, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
+
+//===----------------------------------------------------------------------===//
+//                           BinaryOperator Class
+//===----------------------------------------------------------------------===//
+
+class BinaryOperator : public Instruction {
+  void AssertOK();
+
+protected:
+  BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
+                 const Twine &Name, Instruction *InsertBefore);
+  BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
+                 const Twine &Name, BasicBlock *InsertAtEnd);
+
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  BinaryOperator *cloneImpl() const;
+
+public:
+  // allocate space for exactly two operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 2);
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Construct a binary instruction, given the opcode and the two
+  /// operands.  Optionally (if InstBefore is specified) insert the instruction
+  /// into a BasicBlock right before the specified instruction.  The specified
+  /// Instruction is allowed to be a dereferenced end iterator.
+  ///
+  static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
+                                const Twine &Name = Twine(),
+                                Instruction *InsertBefore = nullptr);
+
+  /// Construct a binary instruction, given the opcode and the two
+  /// operands.  Also automatically insert this instruction to the end of the
+  /// BasicBlock specified.
+  ///
+  static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
+                                const Twine &Name, BasicBlock *InsertAtEnd);
+
+  /// These methods just forward to Create, and are useful when you
+  /// statically know what type of instruction you're going to create.  These
+  /// helpers just save some typing.
+#define HANDLE_BINARY_INST(N, OPC, CLASS) \
+  static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
+                                     const Twine &Name = "") {\
+    return Create(Instruction::OPC, V1, V2, Name);\
+  }
+#include "llvm/IR/Instruction.def"
+#define HANDLE_BINARY_INST(N, OPC, CLASS) \
+  static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
+                                     const Twine &Name, BasicBlock *BB) {\
+    return Create(Instruction::OPC, V1, V2, Name, BB);\
+  }
+#include "llvm/IR/Instruction.def"
+#define HANDLE_BINARY_INST(N, OPC, CLASS) \
+  static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
+                                     const Twine &Name, Instruction *I) {\
+    return Create(Instruction::OPC, V1, V2, Name, I);\
+  }
+#include "llvm/IR/Instruction.def"
+
+  static BinaryOperator *CreateWithCopiedFlags(BinaryOps Opc,
+                                               Value *V1, Value *V2,
+                                               BinaryOperator *CopyBO,
+                                               const Twine &Name = "") {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name);
+    BO->copyIRFlags(CopyBO);
+    return BO;
+  }
+
+  static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
+                                       BinaryOperator *FMFSource,
+                                       const Twine &Name = "") {
+    return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
+  }
+  static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
+                                       BinaryOperator *FMFSource,
+                                       const Twine &Name = "") {
+    return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
+  }
+  static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
+                                       BinaryOperator *FMFSource,
+                                       const Twine &Name = "") {
+    return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
+  }
+  static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
+                                       BinaryOperator *FMFSource,
+                                       const Twine &Name = "") {
+    return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
+  }
+  static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
+                                       BinaryOperator *FMFSource,
+                                       const Twine &Name = "") {
+    return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
+  }
+  static BinaryOperator *CreateFNegFMF(Value *Op, BinaryOperator *FMFSource,
+                                       const Twine &Name = "") {
+    Value *Zero = ConstantFP::getNegativeZero(Op->getType());
+    return CreateWithCopiedFlags(Instruction::FSub, Zero, Op, FMFSource);
+  }
+
+  static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+                                   const Twine &Name = "") {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name);
+    BO->setHasNoSignedWrap(true);
+    return BO;
+  }
+  static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+                                   const Twine &Name, BasicBlock *BB) {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
+    BO->setHasNoSignedWrap(true);
+    return BO;
+  }
+  static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+                                   const Twine &Name, Instruction *I) {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
+    BO->setHasNoSignedWrap(true);
+    return BO;
+  }
+
+  static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+                                   const Twine &Name = "") {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name);
+    BO->setHasNoUnsignedWrap(true);
+    return BO;
+  }
+  static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+                                   const Twine &Name, BasicBlock *BB) {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
+    BO->setHasNoUnsignedWrap(true);
+    return BO;
+  }
+  static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+                                   const Twine &Name, Instruction *I) {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
+    BO->setHasNoUnsignedWrap(true);
+    return BO;
+  }
+
+  static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+                                     const Twine &Name = "") {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name);
+    BO->setIsExact(true);
+    return BO;
+  }
+  static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+                                     const Twine &Name, BasicBlock *BB) {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
+    BO->setIsExact(true);
+    return BO;
+  }
+  static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+                                     const Twine &Name, Instruction *I) {
+    BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
+    BO->setIsExact(true);
+    return BO;
+  }
+
+#define DEFINE_HELPERS(OPC, NUWNSWEXACT)                                       \
+  static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2,        \
+                                                  const Twine &Name = "") {    \
+    return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name);                \
+  }                                                                            \
+  static BinaryOperator *Create##NUWNSWEXACT##OPC(                             \
+      Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) {               \
+    return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB);            \
+  }                                                                            \
+  static BinaryOperator *Create##NUWNSWEXACT##OPC(                             \
+      Value *V1, Value *V2, const Twine &Name, Instruction *I) {               \
+    return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I);             \
+  }
+
+  DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
+  DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
+  DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
+  DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
+  DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
+  DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
+  DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
+  DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
+
+  DEFINE_HELPERS(SDiv, Exact)  // CreateExactSDiv
+  DEFINE_HELPERS(UDiv, Exact)  // CreateExactUDiv
+  DEFINE_HELPERS(AShr, Exact)  // CreateExactAShr
+  DEFINE_HELPERS(LShr, Exact)  // CreateExactLShr
+
+#undef DEFINE_HELPERS
+
+  /// Helper functions to construct and inspect unary operations (NEG and NOT)
+  /// via binary operators SUB and XOR:
+  ///
+  /// Create the NEG and NOT instructions out of SUB and XOR instructions.
+  ///
+  static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
+                                   Instruction *InsertBefore = nullptr);
+  static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
+                                   BasicBlock *InsertAtEnd);
+  static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
+                                      Instruction *InsertBefore = nullptr);
+  static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
+                                      BasicBlock *InsertAtEnd);
+  static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
+                                      Instruction *InsertBefore = nullptr);
+  static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
+                                      BasicBlock *InsertAtEnd);
+  static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name = "",
+                                    Instruction *InsertBefore = nullptr);
+  static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name,
+                                    BasicBlock *InsertAtEnd);
+  static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
+                                   Instruction *InsertBefore = nullptr);
+  static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
+                                   BasicBlock *InsertAtEnd);
+
+  /// Check if the given Value is a NEG, FNeg, or NOT instruction.
+  ///
+  static bool isNeg(const Value *V);
+  static bool isFNeg(const Value *V, bool IgnoreZeroSign=false);
+  static bool isNot(const Value *V);
+
+  /// Helper functions to extract the unary argument of a NEG, FNEG or NOT
+  /// operation implemented via Sub, FSub, or Xor.
+  ///
+  static const Value *getNegArgument(const Value *BinOp);
+  static       Value *getNegArgument(      Value *BinOp);
+  static const Value *getFNegArgument(const Value *BinOp);
+  static       Value *getFNegArgument(      Value *BinOp);
+  static const Value *getNotArgument(const Value *BinOp);
+  static       Value *getNotArgument(      Value *BinOp);
+
+  BinaryOps getOpcode() const {
+    return static_cast<BinaryOps>(Instruction::getOpcode());
+  }
+
+  /// Exchange the two operands to this instruction.
+  /// This instruction is safe to use on any binary instruction and
+  /// does not modify the semantics of the instruction.  If the instruction
+  /// cannot be reversed (ie, it's a Div), then return true.
+  ///
+  bool swapOperands();
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->isBinaryOp();
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<BinaryOperator> :
+  public FixedNumOperandTraits<BinaryOperator, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
+
+//===----------------------------------------------------------------------===//
+//                               CastInst Class
+//===----------------------------------------------------------------------===//
+
+/// This is the base class for all instructions that perform data
+/// casts. It is simply provided so that instruction category testing
+/// can be performed with code like:
+///
+/// if (isa<CastInst>(Instr)) { ... }
+/// @brief Base class of casting instructions.
+class CastInst : public UnaryInstruction {
+protected:
+  /// @brief Constructor with insert-before-instruction semantics for subclasses
+  CastInst(Type *Ty, unsigned iType, Value *S,
+           const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
+    : UnaryInstruction(Ty, iType, S, InsertBefore) {
+    setName(NameStr);
+  }
+  /// @brief Constructor with insert-at-end-of-block semantics for subclasses
+  CastInst(Type *Ty, unsigned iType, Value *S,
+           const Twine &NameStr, BasicBlock *InsertAtEnd)
+    : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
+    setName(NameStr);
+  }
+
+public:
+  /// Provides a way to construct any of the CastInst subclasses using an
+  /// opcode instead of the subclass's constructor. The opcode must be in the
+  /// CastOps category (Instruction::isCast(opcode) returns true). This
+  /// constructor has insert-before-instruction semantics to automatically
+  /// insert the new CastInst before InsertBefore (if it is non-null).
+  /// @brief Construct any of the CastInst subclasses
+  static CastInst *Create(
+    Instruction::CastOps,    ///< The opcode of the cast instruction
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+  /// Provides a way to construct any of the CastInst subclasses using an
+  /// opcode instead of the subclass's constructor. The opcode must be in the
+  /// CastOps category. This constructor has insert-at-end-of-block semantics
+  /// to automatically insert the new CastInst at the end of InsertAtEnd (if
+  /// its non-null).
+  /// @brief Construct any of the CastInst subclasses
+  static CastInst *Create(
+    Instruction::CastOps,    ///< The opcode for the cast instruction
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which operand is casted
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create a ZExt or BitCast cast instruction
+  static CastInst *CreateZExtOrBitCast(
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a ZExt or BitCast cast instruction
+  static CastInst *CreateZExtOrBitCast(
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which operand is casted
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create a SExt or BitCast cast instruction
+  static CastInst *CreateSExtOrBitCast(
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a SExt or BitCast cast instruction
+  static CastInst *CreateSExtOrBitCast(
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which operand is casted
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
+  static CastInst *CreatePointerCast(
+    Value *S,                ///< The pointer value to be casted (operand 0)
+    Type *Ty,          ///< The type to which operand is casted
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
+  static CastInst *CreatePointerCast(
+    Value *S,                ///< The pointer value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a BitCast or an AddrSpaceCast cast instruction.
+  static CastInst *CreatePointerBitCastOrAddrSpaceCast(
+    Value *S,                ///< The pointer value to be casted (operand 0)
+    Type *Ty,          ///< The type to which operand is casted
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create a BitCast or an AddrSpaceCast cast instruction.
+  static CastInst *CreatePointerBitCastOrAddrSpaceCast(
+    Value *S,                ///< The pointer value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
+  ///
+  /// If the value is a pointer type and the destination an integer type,
+  /// creates a PtrToInt cast. If the value is an integer type and the
+  /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
+  /// a bitcast.
+  static CastInst *CreateBitOrPointerCast(
+    Value *S,                ///< The pointer value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
+  static CastInst *CreateIntegerCast(
+    Value *S,                ///< The pointer value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    bool isSigned,           ///< Whether to regard S as signed or not
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
+  static CastInst *CreateIntegerCast(
+    Value *S,                ///< The integer value to be casted (operand 0)
+    Type *Ty,          ///< The integer type to which operand is casted
+    bool isSigned,           ///< Whether to regard S as signed or not
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
+  static CastInst *CreateFPCast(
+    Value *S,                ///< The floating point value to be casted
+    Type *Ty,          ///< The floating point type to cast to
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
+  static CastInst *CreateFPCast(
+    Value *S,                ///< The floating point value to be casted
+    Type *Ty,          ///< The floating point type to cast to
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Create a Trunc or BitCast cast instruction
+  static CastInst *CreateTruncOrBitCast(
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which cast should be made
+    const Twine &Name = "", ///< Name for the instruction
+    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+  );
+
+  /// @brief Create a Trunc or BitCast cast instruction
+  static CastInst *CreateTruncOrBitCast(
+    Value *S,                ///< The value to be casted (operand 0)
+    Type *Ty,          ///< The type to which operand is casted
+    const Twine &Name, ///< The name for the instruction
+    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
+  );
+
+  /// @brief Check whether it is valid to call getCastOpcode for these types.
+  static bool isCastable(
+    Type *SrcTy, ///< The Type from which the value should be cast.
+    Type *DestTy ///< The Type to which the value should be cast.
+  );
+
+  /// @brief Check whether a bitcast between these types is valid
+  static bool isBitCastable(
+    Type *SrcTy, ///< The Type from which the value should be cast.
+    Type *DestTy ///< The Type to which the value should be cast.
+  );
+
+  /// @brief Check whether a bitcast, inttoptr, or ptrtoint cast between these
+  /// types is valid and a no-op.
+  ///
+  /// This ensures that any pointer<->integer cast has enough bits in the
+  /// integer and any other cast is a bitcast.
+  static bool isBitOrNoopPointerCastable(
+      Type *SrcTy,  ///< The Type from which the value should be cast.
+      Type *DestTy, ///< The Type to which the value should be cast.
+      const DataLayout &DL);
+
+  /// Returns the opcode necessary to cast Val into Ty using usual casting
+  /// rules.
+  /// @brief Infer the opcode for cast operand and type
+  static Instruction::CastOps getCastOpcode(
+    const Value *Val, ///< The value to cast
+    bool SrcIsSigned, ///< Whether to treat the source as signed
+    Type *Ty,   ///< The Type to which the value should be casted
+    bool DstIsSigned  ///< Whether to treate the dest. as signed
+  );
+
+  /// There are several places where we need to know if a cast instruction
+  /// only deals with integer source and destination types. To simplify that
+  /// logic, this method is provided.
+  /// @returns true iff the cast has only integral typed operand and dest type.
+  /// @brief Determine if this is an integer-only cast.
+  bool isIntegerCast() const;
+
+  /// A lossless cast is one that does not alter the basic value. It implies
+  /// a no-op cast but is more stringent, preventing things like int->float,
+  /// long->double, or int->ptr.
+  /// @returns true iff the cast is lossless.
+  /// @brief Determine if this is a lossless cast.
+  bool isLosslessCast() const;
+
+  /// A no-op cast is one that can be effected without changing any bits.
+  /// It implies that the source and destination types are the same size. The
+  /// DataLayout argument is to determine the pointer size when examining casts
+  /// involving Integer and Pointer types. They are no-op casts if the integer
+  /// is the same size as the pointer. However, pointer size varies with
+  /// platform.
+  /// @brief Determine if the described cast is a no-op cast.
+  static bool isNoopCast(
+    Instruction::CastOps Opcode, ///< Opcode of cast
+    Type *SrcTy,         ///< SrcTy of cast
+    Type *DstTy,         ///< DstTy of cast
+    const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
+  );
+
+  /// @brief Determine if this cast is a no-op cast.
+  ///
+  /// \param DL is the DataLayout to determine pointer size.
+  bool isNoopCast(const DataLayout &DL) const;
+
+  /// Determine how a pair of casts can be eliminated, if they can be at all.
+  /// This is a helper function for both CastInst and ConstantExpr.
+  /// @returns 0 if the CastInst pair can't be eliminated, otherwise
+  /// returns Instruction::CastOps value for a cast that can replace
+  /// the pair, casting SrcTy to DstTy.
+  /// @brief Determine if a cast pair is eliminable
+  static unsigned isEliminableCastPair(
+    Instruction::CastOps firstOpcode,  ///< Opcode of first cast
+    Instruction::CastOps secondOpcode, ///< Opcode of second cast
+    Type *SrcTy, ///< SrcTy of 1st cast
+    Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
+    Type *DstTy, ///< DstTy of 2nd cast
+    Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
+    Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
+    Type *DstIntPtrTy  ///< Integer type corresponding to Ptr DstTy, or null
+  );
+
+  /// @brief Return the opcode of this CastInst
+  Instruction::CastOps getOpcode() const {
+    return Instruction::CastOps(Instruction::getOpcode());
+  }
+
+  /// @brief Return the source type, as a convenience
+  Type* getSrcTy() const { return getOperand(0)->getType(); }
+  /// @brief Return the destination type, as a convenience
+  Type* getDestTy() const { return getType(); }
+
+  /// This method can be used to determine if a cast from S to DstTy using
+  /// Opcode op is valid or not.
+  /// @returns true iff the proposed cast is valid.
+  /// @brief Determine if a cast is valid without creating one.
+  static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
+
+  /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->isCast();
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                               CmpInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class is the base class for the comparison instructions.
+/// @brief Abstract base class of comparison instructions.
+class CmpInst : public Instruction {
+public:
+  /// This enumeration lists the possible predicates for CmpInst subclasses.
+  /// Values in the range 0-31 are reserved for FCmpInst, while values in the
+  /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
+  /// predicate values are not overlapping between the classes.
+  ///
+  /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
+  /// FCMP_* values. Changing the bit patterns requires a potential change to
+  /// those passes.
+  enum Predicate {
+    // Opcode              U L G E    Intuitive operation
+    FCMP_FALSE =  0,  ///< 0 0 0 0    Always false (always folded)
+    FCMP_OEQ   =  1,  ///< 0 0 0 1    True if ordered and equal
+    FCMP_OGT   =  2,  ///< 0 0 1 0    True if ordered and greater than
+    FCMP_OGE   =  3,  ///< 0 0 1 1    True if ordered and greater than or equal
+    FCMP_OLT   =  4,  ///< 0 1 0 0    True if ordered and less than
+    FCMP_OLE   =  5,  ///< 0 1 0 1    True if ordered and less than or equal
+    FCMP_ONE   =  6,  ///< 0 1 1 0    True if ordered and operands are unequal
+    FCMP_ORD   =  7,  ///< 0 1 1 1    True if ordered (no nans)
+    FCMP_UNO   =  8,  ///< 1 0 0 0    True if unordered: isnan(X) | isnan(Y)
+    FCMP_UEQ   =  9,  ///< 1 0 0 1    True if unordered or equal
+    FCMP_UGT   = 10,  ///< 1 0 1 0    True if unordered or greater than
+    FCMP_UGE   = 11,  ///< 1 0 1 1    True if unordered, greater than, or equal
+    FCMP_ULT   = 12,  ///< 1 1 0 0    True if unordered or less than
+    FCMP_ULE   = 13,  ///< 1 1 0 1    True if unordered, less than, or equal
+    FCMP_UNE   = 14,  ///< 1 1 1 0    True if unordered or not equal
+    FCMP_TRUE  = 15,  ///< 1 1 1 1    Always true (always folded)
+    FIRST_FCMP_PREDICATE = FCMP_FALSE,
+    LAST_FCMP_PREDICATE = FCMP_TRUE,
+    BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
+    ICMP_EQ    = 32,  ///< equal
+    ICMP_NE    = 33,  ///< not equal
+    ICMP_UGT   = 34,  ///< unsigned greater than
+    ICMP_UGE   = 35,  ///< unsigned greater or equal
+    ICMP_ULT   = 36,  ///< unsigned less than
+    ICMP_ULE   = 37,  ///< unsigned less or equal
+    ICMP_SGT   = 38,  ///< signed greater than
+    ICMP_SGE   = 39,  ///< signed greater or equal
+    ICMP_SLT   = 40,  ///< signed less than
+    ICMP_SLE   = 41,  ///< signed less or equal
+    FIRST_ICMP_PREDICATE = ICMP_EQ,
+    LAST_ICMP_PREDICATE = ICMP_SLE,
+    BAD_ICMP_PREDICATE = ICMP_SLE + 1
+  };
+
+protected:
+  CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
+          Value *LHS, Value *RHS, const Twine &Name = "",
+          Instruction *InsertBefore = nullptr);
+
+  CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
+          Value *LHS, Value *RHS, const Twine &Name,
+          BasicBlock *InsertAtEnd);
+
+public:
+  // allocate space for exactly two operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 2);
+  }
+
+  /// Construct a compare instruction, given the opcode, the predicate and
+  /// the two operands.  Optionally (if InstBefore is specified) insert the
+  /// instruction into a BasicBlock right before the specified instruction.
+  /// The specified Instruction is allowed to be a dereferenced end iterator.
+  /// @brief Create a CmpInst
+  static CmpInst *Create(OtherOps Op,
+                         Predicate predicate, Value *S1,
+                         Value *S2, const Twine &Name = "",
+                         Instruction *InsertBefore = nullptr);
+
+  /// Construct a compare instruction, given the opcode, the predicate and the
+  /// two operands.  Also automatically insert this instruction to the end of
+  /// the BasicBlock specified.
+  /// @brief Create a CmpInst
+  static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
+                         Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
+
+  /// @brief Get the opcode casted to the right type
+  OtherOps getOpcode() const {
+    return static_cast<OtherOps>(Instruction::getOpcode());
+  }
+
+  /// @brief Return the predicate for this instruction.
+  Predicate getPredicate() const {
+    return Predicate(getSubclassDataFromInstruction());
+  }
+
+  /// @brief Set the predicate for this instruction to the specified value.
+  void setPredicate(Predicate P) { setInstructionSubclassData(P); }
+
+  static bool isFPPredicate(Predicate P) {
+    return P >= FIRST_FCMP_PREDICATE && P <= LAST_FCMP_PREDICATE;
+  }
+
+  static bool isIntPredicate(Predicate P) {
+    return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
+  }
+
+  static StringRef getPredicateName(Predicate P);
+
+  bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
+  bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
+
+  /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
+  ///              OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
+  /// @returns the inverse predicate for the instruction's current predicate.
+  /// @brief Return the inverse of the instruction's predicate.
+  Predicate getInversePredicate() const {
+    return getInversePredicate(getPredicate());
+  }
+
+  /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
+  ///              OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
+  /// @returns the inverse predicate for predicate provided in \p pred.
+  /// @brief Return the inverse of a given predicate
+  static Predicate getInversePredicate(Predicate pred);
+
+  /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
+  ///              OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
+  /// @returns the predicate that would be the result of exchanging the two
+  /// operands of the CmpInst instruction without changing the result
+  /// produced.
+  /// @brief Return the predicate as if the operands were swapped
+  Predicate getSwappedPredicate() const {
+    return getSwappedPredicate(getPredicate());
+  }
+
+  /// This is a static version that you can use without an instruction
+  /// available.
+  /// @brief Return the predicate as if the operands were swapped.
+  static Predicate getSwappedPredicate(Predicate pred);
+
+  /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
+  /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
+  /// does not support other kind of predicates.
+  /// @returns the predicate that does not contains is equal to zero if
+  /// it had and vice versa.
+  /// @brief Return the flipped strictness of predicate
+  Predicate getFlippedStrictnessPredicate() const {
+    return getFlippedStrictnessPredicate(getPredicate());
+  }
+
+  /// This is a static version that you can use without an instruction
+  /// available.
+  /// @brief Return the flipped strictness of predicate
+  static Predicate getFlippedStrictnessPredicate(Predicate pred);
+
+  /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
+  /// @brief Returns the non-strict version of strict comparisons.
+  Predicate getNonStrictPredicate() const {
+    return getNonStrictPredicate(getPredicate());
+  }
+
+  /// This is a static version that you can use without an instruction
+  /// available.
+  /// @returns the non-strict version of comparison provided in \p pred.
+  /// If \p pred is not a strict comparison predicate, returns \p pred.
+  /// @brief Returns the non-strict version of strict comparisons.
+  static Predicate getNonStrictPredicate(Predicate pred);
+
+  /// @brief Provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// This is just a convenience that dispatches to the subclasses.
+  /// @brief Swap the operands and adjust predicate accordingly to retain
+  /// the same comparison.
+  void swapOperands();
+
+  /// This is just a convenience that dispatches to the subclasses.
+  /// @brief Determine if this CmpInst is commutative.
+  bool isCommutative() const;
+
+  /// This is just a convenience that dispatches to the subclasses.
+  /// @brief Determine if this is an equals/not equals predicate.
+  bool isEquality() const;
+
+  /// @returns true if the comparison is signed, false otherwise.
+  /// @brief Determine if this instruction is using a signed comparison.
+  bool isSigned() const {
+    return isSigned(getPredicate());
+  }
+
+  /// @returns true if the comparison is unsigned, false otherwise.
+  /// @brief Determine if this instruction is using an unsigned comparison.
+  bool isUnsigned() const {
+    return isUnsigned(getPredicate());
+  }
+
+  /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
+  /// @returns the signed version of the unsigned predicate pred.
+  /// @brief return the signed version of a predicate
+  static Predicate getSignedPredicate(Predicate pred);
+
+  /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
+  /// @returns the signed version of the predicate for this instruction (which
+  /// has to be an unsigned predicate).
+  /// @brief return the signed version of a predicate
+  Predicate getSignedPredicate() {
+    return getSignedPredicate(getPredicate());
+  }
+
+  /// This is just a convenience.
+  /// @brief Determine if this is true when both operands are the same.
+  bool isTrueWhenEqual() const {
+    return isTrueWhenEqual(getPredicate());
+  }
+
+  /// This is just a convenience.
+  /// @brief Determine if this is false when both operands are the same.
+  bool isFalseWhenEqual() const {
+    return isFalseWhenEqual(getPredicate());
+  }
+
+  /// @returns true if the predicate is unsigned, false otherwise.
+  /// @brief Determine if the predicate is an unsigned operation.
+  static bool isUnsigned(Predicate predicate);
+
+  /// @returns true if the predicate is signed, false otherwise.
+  /// @brief Determine if the predicate is an signed operation.
+  static bool isSigned(Predicate predicate);
+
+  /// @brief Determine if the predicate is an ordered operation.
+  static bool isOrdered(Predicate predicate);
+
+  /// @brief Determine if the predicate is an unordered operation.
+  static bool isUnordered(Predicate predicate);
+
+  /// Determine if the predicate is true when comparing a value with itself.
+  static bool isTrueWhenEqual(Predicate predicate);
+
+  /// Determine if the predicate is false when comparing a value with itself.
+  static bool isFalseWhenEqual(Predicate predicate);
+
+  /// Determine if Pred1 implies Pred2 is true when two compares have matching
+  /// operands.
+  static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
+
+  /// Determine if Pred1 implies Pred2 is false when two compares have matching
+  /// operands.
+  static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
+
+  /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::ICmp ||
+           I->getOpcode() == Instruction::FCmp;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+  /// @brief Create a result type for fcmp/icmp
+  static Type* makeCmpResultType(Type* opnd_type) {
+    if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
+      return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
+                             vt->getNumElements());
+    }
+    return Type::getInt1Ty(opnd_type->getContext());
+  }
+
+private:
+  // Shadow Value::setValueSubclassData with a private forwarding method so that
+  // subclasses cannot accidentally use it.
+  void setValueSubclassData(unsigned short D) {
+    Value::setValueSubclassData(D);
+  }
+};
+
+// FIXME: these are redundant if CmpInst < BinaryOperator
+template <>
+struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                           FuncletPadInst Class
+//===----------------------------------------------------------------------===//
+class FuncletPadInst : public Instruction {
+private:
+  FuncletPadInst(const FuncletPadInst &CPI);
+
+  explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+                          ArrayRef<Value *> Args, unsigned Values,
+                          const Twine &NameStr, Instruction *InsertBefore);
+  explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+                          ArrayRef<Value *> Args, unsigned Values,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+  friend class CatchPadInst;
+  friend class CleanupPadInst;
+
+  FuncletPadInst *cloneImpl() const;
+
+public:
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// getNumArgOperands - Return the number of funcletpad arguments.
+  ///
+  unsigned getNumArgOperands() const { return getNumOperands() - 1; }
+
+  /// Convenience accessors
+
+  /// \brief Return the outer EH-pad this funclet is nested within.
+  ///
+  /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
+  /// is a CatchPadInst.
+  Value *getParentPad() const { return Op<-1>(); }
+  void setParentPad(Value *ParentPad) {
+    assert(ParentPad);
+    Op<-1>() = ParentPad;
+  }
+
+  /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
+  ///
+  Value *getArgOperand(unsigned i) const { return getOperand(i); }
+  void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
+  /// arg_operands - iteration adapter for range-for loops.
+  op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
+
+  /// arg_operands - iteration adapter for range-for loops.
+  const_op_range arg_operands() const {
+    return const_op_range(op_begin(), op_end() - 1);
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) { return I->isFuncletPad(); }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<FuncletPadInst>
+    : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)
+
+/// \brief A lightweight accessor for an operand bundle meant to be passed
+/// around by value.
+struct OperandBundleUse {
+  ArrayRef<Use> Inputs;
+
+  OperandBundleUse() = default;
+  explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
+      : Inputs(Inputs), Tag(Tag) {}
+
+  /// \brief Return true if the operand at index \p Idx in this operand bundle
+  /// has the attribute A.
+  bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
+    if (isDeoptOperandBundle())
+      if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
+        return Inputs[Idx]->getType()->isPointerTy();
+
+    // Conservative answer:  no operands have any attributes.
+    return false;
+  }
+
+  /// \brief Return the tag of this operand bundle as a string.
+  StringRef getTagName() const {
+    return Tag->getKey();
+  }
+
+  /// \brief Return the tag of this operand bundle as an integer.
+  ///
+  /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
+  /// and this function returns the unique integer getOrInsertBundleTag
+  /// associated the tag of this operand bundle to.
+  uint32_t getTagID() const {
+    return Tag->getValue();
+  }
+
+  /// \brief Return true if this is a "deopt" operand bundle.
+  bool isDeoptOperandBundle() const {
+    return getTagID() == LLVMContext::OB_deopt;
+  }
+
+  /// \brief Return true if this is a "funclet" operand bundle.
+  bool isFuncletOperandBundle() const {
+    return getTagID() == LLVMContext::OB_funclet;
+  }
+
+private:
+  /// \brief Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
+  StringMapEntry<uint32_t> *Tag;
+};
+
+/// \brief A container for an operand bundle being viewed as a set of values
+/// rather than a set of uses.
+///
+/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
+/// so it is possible to create and pass around "self-contained" instances of
+/// OperandBundleDef and ConstOperandBundleDef.
+template <typename InputTy> class OperandBundleDefT {
+  std::string Tag;
+  std::vector<InputTy> Inputs;
+
+public:
+  explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
+      : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
+  explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
+      : Tag(std::move(Tag)), Inputs(Inputs) {}
+
+  explicit OperandBundleDefT(const OperandBundleUse &OBU) {
+    Tag = OBU.getTagName();
+    Inputs.insert(Inputs.end(), OBU.Inputs.begin(), OBU.Inputs.end());
+  }
+
+  ArrayRef<InputTy> inputs() const { return Inputs; }
+
+  using input_iterator = typename std::vector<InputTy>::const_iterator;
+
+  size_t input_size() const { return Inputs.size(); }
+  input_iterator input_begin() const { return Inputs.begin(); }
+  input_iterator input_end() const { return Inputs.end(); }
+
+  StringRef getTag() const { return Tag; }
+};
+
+using OperandBundleDef = OperandBundleDefT<Value *>;
+using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
+
+/// \brief A mixin to add operand bundle functionality to llvm instruction
+/// classes.
+///
+/// OperandBundleUser uses the descriptor area co-allocated with the host User
+/// to store some meta information about which operands are "normal" operands,
+/// and which ones belong to some operand bundle.
+///
+/// The layout of an operand bundle user is
+///
+///          +-----------uint32_t End-------------------------------------+
+///          |                                                            |
+///          |  +--------uint32_t Begin--------------------+              |
+///          |  |                                          |              |
+///          ^  ^                                          v              v
+///  |------|------|----|----|----|----|----|---------|----|---------|----|-----
+///  | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
+///  |------|------|----|----|----|----|----|---------|----|---------|----|-----
+///   v  v                                  ^              ^
+///   |  |                                  |              |
+///   |  +--------uint32_t Begin------------+              |
+///   |                                                    |
+///   +-----------uint32_t End-----------------------------+
+///
+///
+/// BOI0, BOI1 ... are descriptions of operand bundles in this User's use list.
+/// These descriptions are installed and managed by this class, and they're all
+/// instances of OperandBundleUser<T>::BundleOpInfo.
+///
+/// DU is an additional descriptor installed by User's 'operator new' to keep
+/// track of the 'BOI0 ... BOIN' co-allocation.  OperandBundleUser does not
+/// access or modify DU in any way, it's an implementation detail private to
+/// User.
+///
+/// The regular Use& vector for the User starts at U0.  The operand bundle uses
+/// are part of the Use& vector, just like normal uses.  In the diagram above,
+/// the operand bundle uses start at BOI0_U0.  Each instance of BundleOpInfo has
+/// information about a contiguous set of uses constituting an operand bundle,
+/// and the total set of operand bundle uses themselves form a contiguous set of
+/// uses (i.e. there are no gaps between uses corresponding to individual
+/// operand bundles).
+///
+/// This class does not know the location of the set of operand bundle uses
+/// within the use list -- that is decided by the User using this class via the
+/// BeginIdx argument in populateBundleOperandInfos.
+///
+/// Currently operand bundle users with hung-off operands are not supported.
+template <typename InstrTy, typename OpIteratorTy> class OperandBundleUser {
+public:
+  /// \brief Return the number of operand bundles associated with this User.
+  unsigned getNumOperandBundles() const {
+    return std::distance(bundle_op_info_begin(), bundle_op_info_end());
+  }
+
+  /// \brief Return true if this User has any operand bundles.
+  bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
+
+  /// \brief Return the index of the first bundle operand in the Use array.
+  unsigned getBundleOperandsStartIndex() const {
+    assert(hasOperandBundles() && "Don't call otherwise!");
+    return bundle_op_info_begin()->Begin;
+  }
+
+  /// \brief Return the index of the last bundle operand in the Use array.
+  unsigned getBundleOperandsEndIndex() const {
+    assert(hasOperandBundles() && "Don't call otherwise!");
+    return bundle_op_info_end()[-1].End;
+  }
+
+  /// Return true if the operand at index \p Idx is a bundle operand.
+  bool isBundleOperand(unsigned Idx) const {
+    return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
+           Idx < getBundleOperandsEndIndex();
+  }
+
+  /// \brief Return the total number operands (not operand bundles) used by
+  /// every operand bundle in this OperandBundleUser.
+  unsigned getNumTotalBundleOperands() const {
+    if (!hasOperandBundles())
+      return 0;
+
+    unsigned Begin = getBundleOperandsStartIndex();
+    unsigned End = getBundleOperandsEndIndex();
+
+    assert(Begin <= End && "Should be!");
+    return End - Begin;
+  }
+
+  /// \brief Return the operand bundle at a specific index.
+  OperandBundleUse getOperandBundleAt(unsigned Index) const {
+    assert(Index < getNumOperandBundles() && "Index out of bounds!");
+    return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
+  }
+
+  /// \brief Return the number of operand bundles with the tag Name attached to
+  /// this instruction.
+  unsigned countOperandBundlesOfType(StringRef Name) const {
+    unsigned Count = 0;
+    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+      if (getOperandBundleAt(i).getTagName() == Name)
+        Count++;
+
+    return Count;
+  }
+
+  /// \brief Return the number of operand bundles with the tag ID attached to
+  /// this instruction.
+  unsigned countOperandBundlesOfType(uint32_t ID) const {
+    unsigned Count = 0;
+    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+      if (getOperandBundleAt(i).getTagID() == ID)
+        Count++;
+
+    return Count;
+  }
+
+  /// \brief Return an operand bundle by name, if present.
+  ///
+  /// It is an error to call this for operand bundle types that may have
+  /// multiple instances of them on the same instruction.
+  Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
+    assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!");
+
+    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
+      OperandBundleUse U = getOperandBundleAt(i);
+      if (U.getTagName() == Name)
+        return U;
+    }
+
+    return None;
+  }
+
+  /// \brief Return an operand bundle by tag ID, if present.
+  ///
+  /// It is an error to call this for operand bundle types that may have
+  /// multiple instances of them on the same instruction.
+  Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
+    assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!");
+
+    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
+      OperandBundleUse U = getOperandBundleAt(i);
+      if (U.getTagID() == ID)
+        return U;
+    }
+
+    return None;
+  }
+
+  /// \brief Return the list of operand bundles attached to this instruction as
+  /// a vector of OperandBundleDefs.
+  ///
+  /// This function copies the OperandBundeUse instances associated with this
+  /// OperandBundleUser to a vector of OperandBundleDefs.  Note:
+  /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
+  /// representations of operand bundles (see documentation above).
+  void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
+    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+      Defs.emplace_back(getOperandBundleAt(i));
+  }
+
+  /// \brief Return the operand bundle for the operand at index OpIdx.
+  ///
+  /// It is an error to call this with an OpIdx that does not correspond to an
+  /// bundle operand.
+  OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
+    return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
+  }
+
+  /// \brief Return true if this operand bundle user has operand bundles that
+  /// may read from the heap.
+  bool hasReadingOperandBundles() const {
+    // Implementation note: this is a conservative implementation of operand
+    // bundle semantics, where *any* operand bundle forces a callsite to be at
+    // least readonly.
+    return hasOperandBundles();
+  }
+
+  /// \brief Return true if this operand bundle user has operand bundles that
+  /// may write to the heap.
+  bool hasClobberingOperandBundles() const {
+    for (auto &BOI : bundle_op_infos()) {
+      if (BOI.Tag->second == LLVMContext::OB_deopt ||
+          BOI.Tag->second == LLVMContext::OB_funclet)
+        continue;
+
+      // This instruction has an operand bundle that is not known to us.
+      // Assume the worst.
+      return true;
+    }
+
+    return false;
+  }
+
+  /// \brief Return true if the bundle operand at index \p OpIdx has the
+  /// attribute \p A.
+  bool bundleOperandHasAttr(unsigned OpIdx,  Attribute::AttrKind A) const {
+    auto &BOI = getBundleOpInfoForOperand(OpIdx);
+    auto OBU = operandBundleFromBundleOpInfo(BOI);
+    return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
+  }
+
+  /// \brief Return true if \p Other has the same sequence of operand bundle
+  /// tags with the same number of operands on each one of them as this
+  /// OperandBundleUser.
+  bool hasIdenticalOperandBundleSchema(
+      const OperandBundleUser<InstrTy, OpIteratorTy> &Other) const {
+    if (getNumOperandBundles() != Other.getNumOperandBundles())
+      return false;
+
+    return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
+                      Other.bundle_op_info_begin());
+  }
+
+  /// \brief Return true if this operand bundle user contains operand bundles
+  /// with tags other than those specified in \p IDs.
+  bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
+    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
+      uint32_t ID = getOperandBundleAt(i).getTagID();
+      if (!is_contained(IDs, ID))
+        return true;
+    }
+    return false;
+  }
+
+protected:
+  /// \brief Is the function attribute S disallowed by some operand bundle on
+  /// this operand bundle user?
+  bool isFnAttrDisallowedByOpBundle(StringRef S) const {
+    // Operand bundles only possibly disallow readnone, readonly and argmenonly
+    // attributes.  All String attributes are fine.
+    return false;
+  }
+
+  /// \brief Is the function attribute A disallowed by some operand bundle on
+  /// this operand bundle user?
+  bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
+    switch (A) {
+    default:
+      return false;
+
+    case Attribute::InaccessibleMemOrArgMemOnly:
+      return hasReadingOperandBundles();
+
+    case Attribute::InaccessibleMemOnly:
+      return hasReadingOperandBundles();
+
+    case Attribute::ArgMemOnly:
+      return hasReadingOperandBundles();
+
+    case Attribute::ReadNone:
+      return hasReadingOperandBundles();
+
+    case Attribute::ReadOnly:
+      return hasClobberingOperandBundles();
+    }
+
+    llvm_unreachable("switch has a default case!");
+  }
+
+  /// \brief Used to keep track of an operand bundle.  See the main comment on
+  /// OperandBundleUser above.
+  struct BundleOpInfo {
+    /// \brief The operand bundle tag, interned by
+    /// LLVMContextImpl::getOrInsertBundleTag.
+    StringMapEntry<uint32_t> *Tag;
+
+    /// \brief The index in the Use& vector where operands for this operand
+    /// bundle starts.
+    uint32_t Begin;
+
+    /// \brief The index in the Use& vector where operands for this operand
+    /// bundle ends.
+    uint32_t End;
+
+    bool operator==(const BundleOpInfo &Other) const {
+      return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
+    }
+  };
+
+  /// \brief Simple helper function to map a BundleOpInfo to an
+  /// OperandBundleUse.
+  OperandBundleUse
+  operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
+    auto op_begin = static_cast<const InstrTy *>(this)->op_begin();
+    ArrayRef<Use> Inputs(op_begin + BOI.Begin, op_begin + BOI.End);
+    return OperandBundleUse(BOI.Tag, Inputs);
+  }
+
+  using bundle_op_iterator = BundleOpInfo *;
+  using const_bundle_op_iterator = const BundleOpInfo *;
+
+  /// \brief Return the start of the list of BundleOpInfo instances associated
+  /// with this OperandBundleUser.
+  bundle_op_iterator bundle_op_info_begin() {
+    if (!static_cast<InstrTy *>(this)->hasDescriptor())
+      return nullptr;
+
+    uint8_t *BytesBegin = static_cast<InstrTy *>(this)->getDescriptor().begin();
+    return reinterpret_cast<bundle_op_iterator>(BytesBegin);
+  }
+
+  /// \brief Return the start of the list of BundleOpInfo instances associated
+  /// with this OperandBundleUser.
+  const_bundle_op_iterator bundle_op_info_begin() const {
+    auto *NonConstThis =
+        const_cast<OperandBundleUser<InstrTy, OpIteratorTy> *>(this);
+    return NonConstThis->bundle_op_info_begin();
+  }
+
+  /// \brief Return the end of the list of BundleOpInfo instances associated
+  /// with this OperandBundleUser.
+  bundle_op_iterator bundle_op_info_end() {
+    if (!static_cast<InstrTy *>(this)->hasDescriptor())
+      return nullptr;
+
+    uint8_t *BytesEnd = static_cast<InstrTy *>(this)->getDescriptor().end();
+    return reinterpret_cast<bundle_op_iterator>(BytesEnd);
+  }
+
+  /// \brief Return the end of the list of BundleOpInfo instances associated
+  /// with this OperandBundleUser.
+  const_bundle_op_iterator bundle_op_info_end() const {
+    auto *NonConstThis =
+        const_cast<OperandBundleUser<InstrTy, OpIteratorTy> *>(this);
+    return NonConstThis->bundle_op_info_end();
+  }
+
+  /// \brief Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
+  iterator_range<bundle_op_iterator> bundle_op_infos() {
+    return make_range(bundle_op_info_begin(), bundle_op_info_end());
+  }
+
+  /// \brief Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
+  iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
+    return make_range(bundle_op_info_begin(), bundle_op_info_end());
+  }
+
+  /// \brief Populate the BundleOpInfo instances and the Use& vector from \p
+  /// Bundles.  Return the op_iterator pointing to the Use& one past the last
+  /// last bundle operand use.
+  ///
+  /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
+  /// instance allocated in this User's descriptor.
+  OpIteratorTy populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
+                                          const unsigned BeginIndex) {
+    auto It = static_cast<InstrTy *>(this)->op_begin() + BeginIndex;
+    for (auto &B : Bundles)
+      It = std::copy(B.input_begin(), B.input_end(), It);
+
+    auto *ContextImpl = static_cast<InstrTy *>(this)->getContext().pImpl;
+    auto BI = Bundles.begin();
+    unsigned CurrentIndex = BeginIndex;
+
+    for (auto &BOI : bundle_op_infos()) {
+      assert(BI != Bundles.end() && "Incorrect allocation?");
+
+      BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
+      BOI.Begin = CurrentIndex;
+      BOI.End = CurrentIndex + BI->input_size();
+      CurrentIndex = BOI.End;
+      BI++;
+    }
+
+    assert(BI == Bundles.end() && "Incorrect allocation?");
+
+    return It;
+  }
+
+  /// \brief Return the BundleOpInfo for the operand at index OpIdx.
+  ///
+  /// It is an error to call this with an OpIdx that does not correspond to an
+  /// bundle operand.
+  const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
+    for (auto &BOI : bundle_op_infos())
+      if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
+        return BOI;
+
+    llvm_unreachable("Did not find operand bundle for operand!");
+  }
+
+  /// \brief Return the total number of values used in \p Bundles.
+  static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
+    unsigned Total = 0;
+    for (auto &B : Bundles)
+      Total += B.input_size();
+    return Total;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_INSTRTYPES_H
diff --git a/linux-x64/clang/include/llvm/IR/Instruction.def b/linux-x64/clang/include/llvm/IR/Instruction.def
new file mode 100644
index 0000000..8661729
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Instruction.def
@@ -0,0 +1,231 @@
+//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains descriptions of the various LLVM instructions.  This is
+// used as a central place for enumerating the different instructions and
+// should eventually be the place to put comments about the instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+//
+#ifndef FIRST_TERM_INST
+#define FIRST_TERM_INST(num)
+#endif
+#ifndef HANDLE_TERM_INST
+#ifndef HANDLE_INST
+#define HANDLE_TERM_INST(num, opcode, Class)
+#else
+#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_TERM_INST
+#define LAST_TERM_INST(num)
+#endif
+
+#ifndef FIRST_BINARY_INST
+#define FIRST_BINARY_INST(num)
+#endif
+#ifndef HANDLE_BINARY_INST
+#ifndef HANDLE_INST
+#define HANDLE_BINARY_INST(num, opcode, instclass)
+#else
+#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_BINARY_INST
+#define LAST_BINARY_INST(num)
+#endif
+
+#ifndef FIRST_MEMORY_INST
+#define FIRST_MEMORY_INST(num)
+#endif
+#ifndef HANDLE_MEMORY_INST
+#ifndef HANDLE_INST
+#define HANDLE_MEMORY_INST(num, opcode, Class)
+#else
+#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_MEMORY_INST
+#define LAST_MEMORY_INST(num)
+#endif
+
+#ifndef FIRST_CAST_INST
+#define FIRST_CAST_INST(num)
+#endif
+#ifndef HANDLE_CAST_INST
+#ifndef HANDLE_INST
+#define HANDLE_CAST_INST(num, opcode, Class)
+#else
+#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_CAST_INST
+#define LAST_CAST_INST(num)
+#endif
+
+#ifndef FIRST_FUNCLETPAD_INST
+#define FIRST_FUNCLETPAD_INST(num)
+#endif
+#ifndef HANDLE_FUNCLETPAD_INST
+#ifndef HANDLE_INST
+#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
+#else
+#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_FUNCLETPAD_INST
+#define LAST_FUNCLETPAD_INST(num)
+#endif
+
+#ifndef FIRST_OTHER_INST
+#define FIRST_OTHER_INST(num)
+#endif
+#ifndef HANDLE_OTHER_INST
+#ifndef HANDLE_INST
+#define HANDLE_OTHER_INST(num, opcode, Class)
+#else
+#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_OTHER_INST
+#define LAST_OTHER_INST(num)
+#endif
+
+#ifndef HANDLE_USER_INST
+#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
+#endif
+
+// Terminator Instructions - These instructions are used to terminate a basic
+// block of the program.   Every basic block must end with one of these
+// instructions for it to be a well formed basic block.
+//
+ FIRST_TERM_INST  ( 1)
+HANDLE_TERM_INST  ( 1, Ret           , ReturnInst)
+HANDLE_TERM_INST  ( 2, Br            , BranchInst)
+HANDLE_TERM_INST  ( 3, Switch        , SwitchInst)
+HANDLE_TERM_INST  ( 4, IndirectBr    , IndirectBrInst)
+HANDLE_TERM_INST  ( 5, Invoke        , InvokeInst)
+HANDLE_TERM_INST  ( 6, Resume        , ResumeInst)
+HANDLE_TERM_INST  ( 7, Unreachable   , UnreachableInst)
+HANDLE_TERM_INST  ( 8, CleanupRet    , CleanupReturnInst)
+HANDLE_TERM_INST  ( 9, CatchRet      , CatchReturnInst)
+HANDLE_TERM_INST  (10, CatchSwitch   , CatchSwitchInst)
+  LAST_TERM_INST  (10)
+
+// Standard binary operators...
+ FIRST_BINARY_INST(11)
+HANDLE_BINARY_INST(11, Add  , BinaryOperator)
+HANDLE_BINARY_INST(12, FAdd , BinaryOperator)
+HANDLE_BINARY_INST(13, Sub  , BinaryOperator)
+HANDLE_BINARY_INST(14, FSub , BinaryOperator)
+HANDLE_BINARY_INST(15, Mul  , BinaryOperator)
+HANDLE_BINARY_INST(16, FMul , BinaryOperator)
+HANDLE_BINARY_INST(17, UDiv , BinaryOperator)
+HANDLE_BINARY_INST(18, SDiv , BinaryOperator)
+HANDLE_BINARY_INST(19, FDiv , BinaryOperator)
+HANDLE_BINARY_INST(20, URem , BinaryOperator)
+HANDLE_BINARY_INST(21, SRem , BinaryOperator)
+HANDLE_BINARY_INST(22, FRem , BinaryOperator)
+
+// Logical operators (integer operands)
+HANDLE_BINARY_INST(23, Shl  , BinaryOperator) // Shift left  (logical)
+HANDLE_BINARY_INST(24, LShr , BinaryOperator) // Shift right (logical)
+HANDLE_BINARY_INST(25, AShr , BinaryOperator) // Shift right (arithmetic)
+HANDLE_BINARY_INST(26, And  , BinaryOperator)
+HANDLE_BINARY_INST(27, Or   , BinaryOperator)
+HANDLE_BINARY_INST(28, Xor  , BinaryOperator)
+  LAST_BINARY_INST(28)
+
+// Memory operators...
+ FIRST_MEMORY_INST(29)
+HANDLE_MEMORY_INST(29, Alloca, AllocaInst)  // Stack management
+HANDLE_MEMORY_INST(30, Load  , LoadInst  )  // Memory manipulation instrs
+HANDLE_MEMORY_INST(31, Store , StoreInst )
+HANDLE_MEMORY_INST(32, GetElementPtr, GetElementPtrInst)
+HANDLE_MEMORY_INST(33, Fence , FenceInst )
+HANDLE_MEMORY_INST(34, AtomicCmpXchg , AtomicCmpXchgInst )
+HANDLE_MEMORY_INST(35, AtomicRMW , AtomicRMWInst )
+  LAST_MEMORY_INST(35)
+
+// Cast operators ...
+// NOTE: The order matters here because CastInst::isEliminableCastPair
+// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
+ FIRST_CAST_INST(36)
+HANDLE_CAST_INST(36, Trunc   , TruncInst   )  // Truncate integers
+HANDLE_CAST_INST(37, ZExt    , ZExtInst    )  // Zero extend integers
+HANDLE_CAST_INST(38, SExt    , SExtInst    )  // Sign extend integers
+HANDLE_CAST_INST(39, FPToUI  , FPToUIInst  )  // floating point -> UInt
+HANDLE_CAST_INST(40, FPToSI  , FPToSIInst  )  // floating point -> SInt
+HANDLE_CAST_INST(41, UIToFP  , UIToFPInst  )  // UInt -> floating point
+HANDLE_CAST_INST(42, SIToFP  , SIToFPInst  )  // SInt -> floating point
+HANDLE_CAST_INST(43, FPTrunc , FPTruncInst )  // Truncate floating point
+HANDLE_CAST_INST(44, FPExt   , FPExtInst   )  // Extend floating point
+HANDLE_CAST_INST(45, PtrToInt, PtrToIntInst)  // Pointer -> Integer
+HANDLE_CAST_INST(46, IntToPtr, IntToPtrInst)  // Integer -> Pointer
+HANDLE_CAST_INST(47, BitCast , BitCastInst )  // Type cast
+HANDLE_CAST_INST(48, AddrSpaceCast, AddrSpaceCastInst)  // addrspace cast
+  LAST_CAST_INST(48)
+
+ FIRST_FUNCLETPAD_INST(49)
+HANDLE_FUNCLETPAD_INST(49, CleanupPad, CleanupPadInst)
+HANDLE_FUNCLETPAD_INST(50, CatchPad  , CatchPadInst)
+  LAST_FUNCLETPAD_INST(50)
+
+// Other operators...
+ FIRST_OTHER_INST(51)
+HANDLE_OTHER_INST(51, ICmp   , ICmpInst   )  // Integer comparison instruction
+HANDLE_OTHER_INST(52, FCmp   , FCmpInst   )  // Floating point comparison instr.
+HANDLE_OTHER_INST(53, PHI    , PHINode    )  // PHI node instruction
+HANDLE_OTHER_INST(54, Call   , CallInst   )  // Call a function
+HANDLE_OTHER_INST(55, Select , SelectInst )  // select instruction
+HANDLE_USER_INST (56, UserOp1, Instruction)  // May be used internally in a pass
+HANDLE_USER_INST (57, UserOp2, Instruction)  // Internal to passes only
+HANDLE_OTHER_INST(58, VAArg  , VAArgInst  )  // vaarg instruction
+HANDLE_OTHER_INST(59, ExtractElement, ExtractElementInst)// extract from vector
+HANDLE_OTHER_INST(60, InsertElement, InsertElementInst)  // insert into vector
+HANDLE_OTHER_INST(61, ShuffleVector, ShuffleVectorInst)  // shuffle two vectors.
+HANDLE_OTHER_INST(62, ExtractValue, ExtractValueInst)// extract from aggregate
+HANDLE_OTHER_INST(63, InsertValue, InsertValueInst)  // insert into aggregate
+HANDLE_OTHER_INST(64, LandingPad, LandingPadInst)  // Landing pad instruction.
+  LAST_OTHER_INST(64)
+
+#undef  FIRST_TERM_INST
+#undef HANDLE_TERM_INST
+#undef   LAST_TERM_INST
+
+#undef  FIRST_BINARY_INST
+#undef HANDLE_BINARY_INST
+#undef   LAST_BINARY_INST
+
+#undef  FIRST_MEMORY_INST
+#undef HANDLE_MEMORY_INST
+#undef   LAST_MEMORY_INST
+
+#undef  FIRST_CAST_INST
+#undef HANDLE_CAST_INST
+#undef   LAST_CAST_INST
+
+#undef  FIRST_FUNCLETPAD_INST
+#undef HANDLE_FUNCLETPAD_INST
+#undef   LAST_FUNCLETPAD_INST
+
+#undef  FIRST_OTHER_INST
+#undef HANDLE_OTHER_INST
+#undef   LAST_OTHER_INST
+
+#undef HANDLE_USER_INST
+
+#ifdef HANDLE_INST
+#undef HANDLE_INST
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Instruction.h b/linux-x64/clang/include/llvm/IR/Instruction.h
new file mode 100644
index 0000000..76bc401
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Instruction.h
@@ -0,0 +1,702 @@
+//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Instruction class, which is the
+// base class for all of the LLVM instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTRUCTION_H
+#define LLVM_IR_INSTRUCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class BasicBlock;
+class FastMathFlags;
+class MDNode;
+class Module;
+struct AAMDNodes;
+
+template <> struct ilist_alloc_traits<Instruction> {
+  static inline void deleteNode(Instruction *V);
+};
+
+class Instruction : public User,
+                    public ilist_node_with_parent<Instruction, BasicBlock> {
+  BasicBlock *Parent;
+  DebugLoc DbgLoc;                         // 'dbg' Metadata cache.
+
+  enum {
+    /// This is a bit stored in the SubClassData field which indicates whether
+    /// this instruction has metadata attached to it or not.
+    HasMetadataBit = 1 << 15
+  };
+
+protected:
+  ~Instruction(); // Use deleteValue() to delete a generic Instruction.
+
+public:
+  Instruction(const Instruction &) = delete;
+  Instruction &operator=(const Instruction &) = delete;
+
+  /// Specialize the methods defined in Value, as we know that an instruction
+  /// can only be used by other instructions.
+  Instruction       *user_back()       { return cast<Instruction>(*user_begin());}
+  const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
+
+  inline const BasicBlock *getParent() const { return Parent; }
+  inline       BasicBlock *getParent()       { return Parent; }
+
+  /// Return the module owning the function this instruction belongs to
+  /// or nullptr it the function does not have a module.
+  ///
+  /// Note: this is undefined behavior if the instruction does not have a
+  /// parent, or the parent basic block does not have a parent function.
+  const Module *getModule() const;
+  Module *getModule() {
+    return const_cast<Module *>(
+                           static_cast<const Instruction *>(this)->getModule());
+  }
+
+  /// Return the function this instruction belongs to.
+  ///
+  /// Note: it is undefined behavior to call this on an instruction not
+  /// currently inserted into a function.
+  const Function *getFunction() const;
+  Function *getFunction() {
+    return const_cast<Function *>(
+                         static_cast<const Instruction *>(this)->getFunction());
+  }
+
+  /// This method unlinks 'this' from the containing basic block, but does not
+  /// delete it.
+  void removeFromParent();
+
+  /// This method unlinks 'this' from the containing basic block and deletes it.
+  ///
+  /// \returns an iterator pointing to the element after the erased one
+  SymbolTableList<Instruction>::iterator eraseFromParent();
+
+  /// Insert an unlinked instruction into a basic block immediately before
+  /// the specified instruction.
+  void insertBefore(Instruction *InsertPos);
+
+  /// Insert an unlinked instruction into a basic block immediately after the
+  /// specified instruction.
+  void insertAfter(Instruction *InsertPos);
+
+  /// Unlink this instruction from its current basic block and insert it into
+  /// the basic block that MovePos lives in, right before MovePos.
+  void moveBefore(Instruction *MovePos);
+
+  /// Unlink this instruction and insert into BB before I.
+  ///
+  /// \pre I is a valid iterator into BB.
+  void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I);
+
+  /// Unlink this instruction from its current basic block and insert it into
+  /// the basic block that MovePos lives in, right after MovePos.
+  void moveAfter(Instruction *MovePos);
+
+  //===--------------------------------------------------------------------===//
+  // Subclass classification.
+  //===--------------------------------------------------------------------===//
+
+  /// Returns a member of one of the enums like Instruction::Add.
+  unsigned getOpcode() const { return getValueID() - InstructionVal; }
+
+  const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
+  bool isTerminator() const { return isTerminator(getOpcode()); }
+  bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
+  bool isShift() { return isShift(getOpcode()); }
+  bool isCast() const { return isCast(getOpcode()); }
+  bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
+
+  static const char* getOpcodeName(unsigned OpCode);
+
+  static inline bool isTerminator(unsigned OpCode) {
+    return OpCode >= TermOpsBegin && OpCode < TermOpsEnd;
+  }
+
+  static inline bool isBinaryOp(unsigned Opcode) {
+    return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
+  }
+
+  /// Determine if the Opcode is one of the shift instructions.
+  static inline bool isShift(unsigned Opcode) {
+    return Opcode >= Shl && Opcode <= AShr;
+  }
+
+  /// Return true if this is a logical shift left or a logical shift right.
+  inline bool isLogicalShift() const {
+    return getOpcode() == Shl || getOpcode() == LShr;
+  }
+
+  /// Return true if this is an arithmetic shift right.
+  inline bool isArithmeticShift() const {
+    return getOpcode() == AShr;
+  }
+
+  /// Determine if the Opcode is and/or/xor.
+  static inline bool isBitwiseLogicOp(unsigned Opcode) {
+    return Opcode == And || Opcode == Or || Opcode == Xor;
+  }
+
+  /// Return true if this is and/or/xor.
+  inline bool isBitwiseLogicOp() const {
+    return isBitwiseLogicOp(getOpcode());
+  }
+
+  /// Determine if the OpCode is one of the CastInst instructions.
+  static inline bool isCast(unsigned OpCode) {
+    return OpCode >= CastOpsBegin && OpCode < CastOpsEnd;
+  }
+
+  /// Determine if the OpCode is one of the FuncletPadInst instructions.
+  static inline bool isFuncletPad(unsigned OpCode) {
+    return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Metadata manipulation.
+  //===--------------------------------------------------------------------===//
+
+  /// Return true if this instruction has any metadata attached to it.
+  bool hasMetadata() const { return DbgLoc || hasMetadataHashEntry(); }
+
+  /// Return true if this instruction has metadata attached to it other than a
+  /// debug location.
+  bool hasMetadataOtherThanDebugLoc() const {
+    return hasMetadataHashEntry();
+  }
+
+  /// Get the metadata of given kind attached to this Instruction.
+  /// If the metadata is not found then return null.
+  MDNode *getMetadata(unsigned KindID) const {
+    if (!hasMetadata()) return nullptr;
+    return getMetadataImpl(KindID);
+  }
+
+  /// Get the metadata of given kind attached to this Instruction.
+  /// If the metadata is not found then return null.
+  MDNode *getMetadata(StringRef Kind) const {
+    if (!hasMetadata()) return nullptr;
+    return getMetadataImpl(Kind);
+  }
+
+  /// Get all metadata attached to this Instruction. The first element of each
+  /// pair returned is the KindID, the second element is the metadata value.
+  /// This list is returned sorted by the KindID.
+  void
+  getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
+    if (hasMetadata())
+      getAllMetadataImpl(MDs);
+  }
+
+  /// This does the same thing as getAllMetadata, except that it filters out the
+  /// debug location.
+  void getAllMetadataOtherThanDebugLoc(
+      SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
+    if (hasMetadataOtherThanDebugLoc())
+      getAllMetadataOtherThanDebugLocImpl(MDs);
+  }
+
+  /// Fills the AAMDNodes structure with AA metadata from this instruction.
+  /// When Merge is true, the existing AA metadata is merged with that from this
+  /// instruction providing the most-general result.
+  void getAAMetadata(AAMDNodes &N, bool Merge = false) const;
+
+  /// Set the metadata of the specified kind to the specified node. This updates
+  /// or replaces metadata if already present, or removes it if Node is null.
+  void setMetadata(unsigned KindID, MDNode *Node);
+  void setMetadata(StringRef Kind, MDNode *Node);
+
+  /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty,
+  /// specifies the list of meta data that needs to be copied. If \p WL is
+  /// empty, all meta data will be copied.
+  void copyMetadata(const Instruction &SrcInst,
+                    ArrayRef<unsigned> WL = ArrayRef<unsigned>());
+
+  /// If the instruction has "branch_weights" MD_prof metadata and the MDNode
+  /// has three operands (including name string), swap the order of the
+  /// metadata.
+  void swapProfMetadata();
+
+  /// Drop all unknown metadata except for debug locations.
+  /// @{
+  /// Passes are required to drop metadata they don't understand. This is a
+  /// convenience method for passes to do so.
+  void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs);
+  void dropUnknownNonDebugMetadata() {
+    return dropUnknownNonDebugMetadata(None);
+  }
+  void dropUnknownNonDebugMetadata(unsigned ID1) {
+    return dropUnknownNonDebugMetadata(makeArrayRef(ID1));
+  }
+  void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) {
+    unsigned IDs[] = {ID1, ID2};
+    return dropUnknownNonDebugMetadata(IDs);
+  }
+  /// @}
+
+  /// Sets the metadata on this instruction from the AAMDNodes structure.
+  void setAAMetadata(const AAMDNodes &N);
+
+  /// Retrieve the raw weight values of a conditional branch or select.
+  /// Returns true on success with profile weights filled in.
+  /// Returns false if no metadata or invalid metadata was found.
+  bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const;
+
+  /// Retrieve total raw weight values of a branch.
+  /// Returns true on success with profile total weights filled in.
+  /// Returns false if no metadata was found.
+  bool extractProfTotalWeight(uint64_t &TotalVal) const;
+
+  /// Updates branch_weights metadata by scaling it by \p S / \p T.
+  void updateProfWeight(uint64_t S, uint64_t T);
+
+  /// Sets the branch_weights metadata to \p W for CallInst.
+  void setProfWeight(uint64_t W);
+
+  /// Set the debug location information for this instruction.
+  void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
+
+  /// Return the debug location for this node as a DebugLoc.
+  const DebugLoc &getDebugLoc() const { return DbgLoc; }
+
+  /// Set or clear the nsw flag on this instruction, which must be an operator
+  /// which supports this flag. See LangRef.html for the meaning of this flag.
+  void setHasNoUnsignedWrap(bool b = true);
+
+  /// Set or clear the nsw flag on this instruction, which must be an operator
+  /// which supports this flag. See LangRef.html for the meaning of this flag.
+  void setHasNoSignedWrap(bool b = true);
+
+  /// Set or clear the exact flag on this instruction, which must be an operator
+  /// which supports this flag. See LangRef.html for the meaning of this flag.
+  void setIsExact(bool b = true);
+
+  /// Determine whether the no unsigned wrap flag is set.
+  bool hasNoUnsignedWrap() const;
+
+  /// Determine whether the no signed wrap flag is set.
+  bool hasNoSignedWrap() const;
+
+  /// Drops flags that may cause this instruction to evaluate to poison despite
+  /// having non-poison inputs.
+  void dropPoisonGeneratingFlags();
+
+  /// Determine whether the exact flag is set.
+  bool isExact() const;
+
+  /// Set or clear all fast-math-flags on this instruction, which must be an
+  /// operator which supports this flag. See LangRef.html for the meaning of
+  /// this flag.
+  void setFast(bool B);
+
+  /// Set or clear the reassociation flag on this instruction, which must be
+  /// an operator which supports this flag. See LangRef.html for the meaning of
+  /// this flag.
+  void setHasAllowReassoc(bool B);
+
+  /// Set or clear the no-nans flag on this instruction, which must be an
+  /// operator which supports this flag. See LangRef.html for the meaning of
+  /// this flag.
+  void setHasNoNaNs(bool B);
+
+  /// Set or clear the no-infs flag on this instruction, which must be an
+  /// operator which supports this flag. See LangRef.html for the meaning of
+  /// this flag.
+  void setHasNoInfs(bool B);
+
+  /// Set or clear the no-signed-zeros flag on this instruction, which must be
+  /// an operator which supports this flag. See LangRef.html for the meaning of
+  /// this flag.
+  void setHasNoSignedZeros(bool B);
+
+  /// Set or clear the allow-reciprocal flag on this instruction, which must be
+  /// an operator which supports this flag. See LangRef.html for the meaning of
+  /// this flag.
+  void setHasAllowReciprocal(bool B);
+
+  /// Set or clear the approximate-math-functions flag on this instruction,
+  /// which must be an operator which supports this flag. See LangRef.html for
+  /// the meaning of this flag.
+  void setHasApproxFunc(bool B);
+
+  /// Convenience function for setting multiple fast-math flags on this
+  /// instruction, which must be an operator which supports these flags. See
+  /// LangRef.html for the meaning of these flags.
+  void setFastMathFlags(FastMathFlags FMF);
+
+  /// Convenience function for transferring all fast-math flag values to this
+  /// instruction, which must be an operator which supports these flags. See
+  /// LangRef.html for the meaning of these flags.
+  void copyFastMathFlags(FastMathFlags FMF);
+
+  /// Determine whether all fast-math-flags are set.
+  bool isFast() const;
+
+  /// Determine whether the allow-reassociation flag is set.
+  bool hasAllowReassoc() const;
+
+  /// Determine whether the no-NaNs flag is set.
+  bool hasNoNaNs() const;
+
+  /// Determine whether the no-infs flag is set.
+  bool hasNoInfs() const;
+
+  /// Determine whether the no-signed-zeros flag is set.
+  bool hasNoSignedZeros() const;
+
+  /// Determine whether the allow-reciprocal flag is set.
+  bool hasAllowReciprocal() const;
+
+  /// Determine whether the allow-contract flag is set.
+  bool hasAllowContract() const;
+
+  /// Determine whether the approximate-math-functions flag is set.
+  bool hasApproxFunc() const;
+
+  /// Convenience function for getting all the fast-math flags, which must be an
+  /// operator which supports these flags. See LangRef.html for the meaning of
+  /// these flags.
+  FastMathFlags getFastMathFlags() const;
+
+  /// Copy I's fast-math flags
+  void copyFastMathFlags(const Instruction *I);
+
+  /// Convenience method to copy supported exact, fast-math, and (optionally)
+  /// wrapping flags from V to this instruction.
+  void copyIRFlags(const Value *V, bool IncludeWrapFlags = true);
+
+  /// Logical 'and' of any supported wrapping, exact, and fast-math flags of
+  /// V and this instruction.
+  void andIRFlags(const Value *V);
+
+  /// Merge 2 debug locations and apply it to the Instruction. If the
+  /// instruction is a CallIns, we need to traverse the inline chain to find
+  /// the common scope. This is not efficient for N-way merging as each time
+  /// you merge 2 iterations, you need to rebuild the hashmap to find the
+  /// common scope. However, we still choose this API because:
+  ///  1) Simplicity: it takes 2 locations instead of a list of locations.
+  ///  2) In worst case, it increases the complexity from O(N*I) to
+  ///     O(2*N*I), where N is # of Instructions to merge, and I is the
+  ///     maximum level of inline stack. So it is still linear.
+  ///  3) Merging of call instructions should be extremely rare in real
+  ///     applications, thus the N-way merging should be in code path.
+  /// The DebugLoc attached to this instruction will be overwritten by the
+  /// merged DebugLoc.
+  void applyMergedLocation(const DILocation *LocA, const DILocation *LocB);
+
+private:
+  /// Return true if we have an entry in the on-the-side metadata hash.
+  bool hasMetadataHashEntry() const {
+    return (getSubclassDataFromValue() & HasMetadataBit) != 0;
+  }
+
+  // These are all implemented in Metadata.cpp.
+  MDNode *getMetadataImpl(unsigned KindID) const;
+  MDNode *getMetadataImpl(StringRef Kind) const;
+  void
+  getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
+  void getAllMetadataOtherThanDebugLocImpl(
+      SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
+  /// Clear all hashtable-based metadata from this instruction.
+  void clearMetadataHashEntries();
+
+public:
+  //===--------------------------------------------------------------------===//
+  // Predicates and helper methods.
+  //===--------------------------------------------------------------------===//
+
+  /// Return true if the instruction is associative:
+  ///
+  ///   Associative operators satisfy:  x op (y op z) === (x op y) op z
+  ///
+  /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
+  ///
+  bool isAssociative() const LLVM_READONLY;
+  static bool isAssociative(unsigned Opcode) {
+    return Opcode == And || Opcode == Or || Opcode == Xor ||
+           Opcode == Add || Opcode == Mul;
+  }
+
+  /// Return true if the instruction is commutative:
+  ///
+  ///   Commutative operators satisfy: (x op y) === (y op x)
+  ///
+  /// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when
+  /// applied to any type.
+  ///
+  bool isCommutative() const { return isCommutative(getOpcode()); }
+  static bool isCommutative(unsigned Opcode) {
+    switch (Opcode) {
+    case Add: case FAdd:
+    case Mul: case FMul:
+    case And: case Or: case Xor:
+      return true;
+    default:
+      return false;
+  }
+  }
+
+  /// Return true if the instruction is idempotent:
+  ///
+  ///   Idempotent operators satisfy:  x op x === x
+  ///
+  /// In LLVM, the And and Or operators are idempotent.
+  ///
+  bool isIdempotent() const { return isIdempotent(getOpcode()); }
+  static bool isIdempotent(unsigned Opcode) {
+    return Opcode == And || Opcode == Or;
+  }
+
+  /// Return true if the instruction is nilpotent:
+  ///
+  ///   Nilpotent operators satisfy:  x op x === Id,
+  ///
+  ///   where Id is the identity for the operator, i.e. a constant such that
+  ///     x op Id === x and Id op x === x for all x.
+  ///
+  /// In LLVM, the Xor operator is nilpotent.
+  ///
+  bool isNilpotent() const { return isNilpotent(getOpcode()); }
+  static bool isNilpotent(unsigned Opcode) {
+    return Opcode == Xor;
+  }
+
+  /// Return true if this instruction may modify memory.
+  bool mayWriteToMemory() const;
+
+  /// Return true if this instruction may read memory.
+  bool mayReadFromMemory() const;
+
+  /// Return true if this instruction may read or write memory.
+  bool mayReadOrWriteMemory() const {
+    return mayReadFromMemory() || mayWriteToMemory();
+  }
+
+  /// Return true if this instruction has an AtomicOrdering of unordered or
+  /// higher.
+  bool isAtomic() const;
+
+  /// Return true if this atomic instruction loads from memory.
+  bool hasAtomicLoad() const;
+
+  /// Return true if this atomic instruction stores to memory.
+  bool hasAtomicStore() const;
+
+  /// Return true if this instruction may throw an exception.
+  bool mayThrow() const;
+
+  /// Return true if this instruction behaves like a memory fence: it can load
+  /// or store to memory location without being given a memory location.
+  bool isFenceLike() const {
+    switch (getOpcode()) {
+    default:
+      return false;
+    // This list should be kept in sync with the list in mayWriteToMemory for
+    // all opcodes which don't have a memory location.
+    case Instruction::Fence:
+    case Instruction::CatchPad:
+    case Instruction::CatchRet:
+    case Instruction::Call:
+    case Instruction::Invoke:
+      return true;
+    }
+  }
+
+  /// Return true if the instruction may have side effects.
+  ///
+  /// Note that this does not consider malloc and alloca to have side
+  /// effects because the newly allocated memory is completely invisible to
+  /// instructions which don't use the returned value.  For cases where this
+  /// matters, isSafeToSpeculativelyExecute may be more appropriate.
+  bool mayHaveSideEffects() const { return mayWriteToMemory() || mayThrow(); }
+
+  /// Return true if the instruction can be removed if the result is unused.
+  ///
+  /// When constant folding some instructions cannot be removed even if their
+  /// results are unused. Specifically terminator instructions and calls that
+  /// may have side effects cannot be removed without semantically changing the
+  /// generated program.
+  bool isSafeToRemove() const;
+  
+  /// Return true if the instruction is a variety of EH-block.
+  bool isEHPad() const {
+    switch (getOpcode()) {
+    case Instruction::CatchSwitch:
+    case Instruction::CatchPad:
+    case Instruction::CleanupPad:
+    case Instruction::LandingPad:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /// Create a copy of 'this' instruction that is identical in all ways except
+  /// the following:
+  ///   * The instruction has no parent
+  ///   * The instruction has no name
+  ///
+  Instruction *clone() const;
+
+  /// Return true if the specified instruction is exactly identical to the
+  /// current one. This means that all operands match and any extra information
+  /// (e.g. load is volatile) agree.
+  bool isIdenticalTo(const Instruction *I) const;
+
+  /// This is like isIdenticalTo, except that it ignores the
+  /// SubclassOptionalData flags, which may specify conditions under which the
+  /// instruction's result is undefined.
+  bool isIdenticalToWhenDefined(const Instruction *I) const;
+
+  /// When checking for operation equivalence (using isSameOperationAs) it is
+  /// sometimes useful to ignore certain attributes.
+  enum OperationEquivalenceFlags {
+    /// Check for equivalence ignoring load/store alignment.
+    CompareIgnoringAlignment = 1<<0,
+    /// Check for equivalence treating a type and a vector of that type
+    /// as equivalent.
+    CompareUsingScalarTypes = 1<<1
+  };
+
+  /// This function determines if the specified instruction executes the same
+  /// operation as the current one. This means that the opcodes, type, operand
+  /// types and any other factors affecting the operation must be the same. This
+  /// is similar to isIdenticalTo except the operands themselves don't have to
+  /// be identical.
+  /// @returns true if the specified instruction is the same operation as
+  /// the current one.
+  /// @brief Determine if one instruction is the same operation as another.
+  bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const;
+
+  /// Return true if there are any uses of this instruction in blocks other than
+  /// the specified block. Note that PHI nodes are considered to evaluate their
+  /// operands in the corresponding predecessor block.
+  bool isUsedOutsideOfBlock(const BasicBlock *BB) const;
+
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return V->getValueID() >= Value::InstructionVal;
+  }
+
+  //----------------------------------------------------------------------
+  // Exported enumerations.
+  //
+  enum TermOps {       // These terminate basic blocks
+#define  FIRST_TERM_INST(N)             TermOpsBegin = N,
+#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N,
+#define   LAST_TERM_INST(N)             TermOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+  };
+
+  enum BinaryOps {
+#define  FIRST_BINARY_INST(N)             BinaryOpsBegin = N,
+#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
+#define   LAST_BINARY_INST(N)             BinaryOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+  };
+
+  enum MemoryOps {
+#define  FIRST_MEMORY_INST(N)             MemoryOpsBegin = N,
+#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N,
+#define   LAST_MEMORY_INST(N)             MemoryOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+  };
+
+  enum CastOps {
+#define  FIRST_CAST_INST(N)             CastOpsBegin = N,
+#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N,
+#define   LAST_CAST_INST(N)             CastOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+  };
+
+  enum FuncletPadOps {
+#define  FIRST_FUNCLETPAD_INST(N)             FuncletPadOpsBegin = N,
+#define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N,
+#define   LAST_FUNCLETPAD_INST(N)             FuncletPadOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+  };
+
+  enum OtherOps {
+#define  FIRST_OTHER_INST(N)             OtherOpsBegin = N,
+#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N,
+#define   LAST_OTHER_INST(N)             OtherOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+  };
+
+private:
+  friend class SymbolTableListTraits<Instruction>;
+
+  // Shadow Value::setValueSubclassData with a private forwarding method so that
+  // subclasses cannot accidentally use it.
+  void setValueSubclassData(unsigned short D) {
+    Value::setValueSubclassData(D);
+  }
+
+  unsigned short getSubclassDataFromValue() const {
+    return Value::getSubclassDataFromValue();
+  }
+
+  void setHasMetadataHashEntry(bool V) {
+    setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) |
+                         (V ? HasMetadataBit : 0));
+  }
+
+  void setParent(BasicBlock *P);
+
+protected:
+  // Instruction subclasses can stick up to 15 bits of stuff into the
+  // SubclassData field of instruction with these members.
+
+  // Verify that only the low 15 bits are used.
+  void setInstructionSubclassData(unsigned short D) {
+    assert((D & HasMetadataBit) == 0 && "Out of range value put into field");
+    setValueSubclassData((getSubclassDataFromValue() & HasMetadataBit) | D);
+  }
+
+  unsigned getSubclassDataFromInstruction() const {
+    return getSubclassDataFromValue() & ~HasMetadataBit;
+  }
+
+  Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
+              Instruction *InsertBefore = nullptr);
+  Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
+              BasicBlock *InsertAtEnd);
+
+private:
+  /// Create a copy of this instruction.
+  Instruction *cloneImpl() const;
+};
+
+inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) {
+  V->deleteValue();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_INSTRUCTION_H
diff --git a/linux-x64/clang/include/llvm/IR/Instructions.h b/linux-x64/clang/include/llvm/IR/Instructions.h
new file mode 100644
index 0000000..6c15d5d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Instructions.h
@@ -0,0 +1,5078 @@
+//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes the class definitions of all of the subclasses of the
+// Instruction class.  This is meant to be an easy way to get access to all
+// instruction subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTRUCTIONS_H
+#define LLVM_IR_INSTRUCTIONS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+
+namespace llvm {
+
+class APInt;
+class ConstantInt;
+class DataLayout;
+class LLVMContext;
+
+//===----------------------------------------------------------------------===//
+//                                AllocaInst Class
+//===----------------------------------------------------------------------===//
+
+/// an instruction to allocate memory on the stack
+class AllocaInst : public UnaryInstruction {
+  Type *AllocatedType;
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  AllocaInst *cloneImpl() const;
+
+public:
+  explicit AllocaInst(Type *Ty, unsigned AddrSpace,
+                      Value *ArraySize = nullptr,
+                      const Twine &Name = "",
+                      Instruction *InsertBefore = nullptr);
+  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+             const Twine &Name, BasicBlock *InsertAtEnd);
+
+  AllocaInst(Type *Ty, unsigned AddrSpace,
+             const Twine &Name, Instruction *InsertBefore = nullptr);
+  AllocaInst(Type *Ty, unsigned AddrSpace,
+             const Twine &Name, BasicBlock *InsertAtEnd);
+
+  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
+             const Twine &Name = "", Instruction *InsertBefore = nullptr);
+  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
+             const Twine &Name, BasicBlock *InsertAtEnd);
+
+  /// Return true if there is an allocation size parameter to the allocation
+  /// instruction that is not 1.
+  bool isArrayAllocation() const;
+
+  /// Get the number of elements allocated. For a simple allocation of a single
+  /// element, this will return a constant 1 value.
+  const Value *getArraySize() const { return getOperand(0); }
+  Value *getArraySize() { return getOperand(0); }
+
+  /// Overload to return most specific pointer type.
+  PointerType *getType() const {
+    return cast<PointerType>(Instruction::getType());
+  }
+
+  /// Return the type that is being allocated by the instruction.
+  Type *getAllocatedType() const { return AllocatedType; }
+  /// for use only in special circumstances that need to generically
+  /// transform a whole instruction (eg: IR linking and vectorization).
+  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
+
+  /// Return the alignment of the memory that is being allocated by the
+  /// instruction.
+  unsigned getAlignment() const {
+    return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
+  }
+  void setAlignment(unsigned Align);
+
+  /// Return true if this alloca is in the entry block of the function and is a
+  /// constant size. If so, the code generator will fold it into the
+  /// prolog/epilog code, so it is basically free.
+  bool isStaticAlloca() const;
+
+  /// Return true if this alloca is used as an inalloca argument to a call. Such
+  /// allocas are never considered static even if they are in the entry block.
+  bool isUsedWithInAlloca() const {
+    return getSubclassDataFromInstruction() & 32;
+  }
+
+  /// Specify whether this alloca is used to represent the arguments to a call.
+  void setUsedWithInAlloca(bool V) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
+                               (V ? 32 : 0));
+  }
+
+  /// Return true if this alloca is used as a swifterror argument to a call.
+  bool isSwiftError() const {
+    return getSubclassDataFromInstruction() & 64;
+  }
+
+  /// Specify whether this alloca is used to represent a swifterror.
+  void setSwiftError(bool V) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
+                               (V ? 64 : 0));
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::Alloca);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                LoadInst Class
+//===----------------------------------------------------------------------===//
+
+/// An instruction for reading from memory. This uses the SubclassData field in
+/// Value to store whether or not the load is volatile.
+class LoadInst : public UnaryInstruction {
+  void AssertOK();
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  LoadInst *cloneImpl() const;
+
+public:
+  LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
+  LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
+           Instruction *InsertBefore = nullptr);
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
+           Instruction *InsertBefore = nullptr)
+      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+                 NameStr, isVolatile, InsertBefore) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+           BasicBlock *InsertAtEnd);
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+           Instruction *InsertBefore = nullptr)
+      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+                 NameStr, isVolatile, Align, InsertBefore) {}
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
+           unsigned Align, Instruction *InsertBefore = nullptr);
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+           unsigned Align, BasicBlock *InsertAtEnd);
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+           AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
+           Instruction *InsertBefore = nullptr)
+      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+                 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
+           unsigned Align, AtomicOrdering Order,
+           SyncScope::ID SSID = SyncScope::System,
+           Instruction *InsertBefore = nullptr);
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+           unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
+           BasicBlock *InsertAtEnd);
+  LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
+  LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
+  LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
+           bool isVolatile = false, Instruction *InsertBefore = nullptr);
+  explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
+                    bool isVolatile = false,
+                    Instruction *InsertBefore = nullptr)
+      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+                 NameStr, isVolatile, InsertBefore) {}
+  LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
+           BasicBlock *InsertAtEnd);
+
+  /// Return true if this is a load from a volatile memory location.
+  bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
+
+  /// Specify whether this is a volatile load or not.
+  void setVolatile(bool V) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+                               (V ? 1 : 0));
+  }
+
+  /// Return the alignment of the access that is being performed.
+  unsigned getAlignment() const {
+    return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
+  }
+
+  void setAlignment(unsigned Align);
+
+  /// Returns the ordering constraint of this load instruction.
+  AtomicOrdering getOrdering() const {
+    return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
+  }
+
+  /// Sets the ordering constraint of this load instruction.  May not be Release
+  /// or AcquireRelease.
+  void setOrdering(AtomicOrdering Ordering) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
+                               ((unsigned)Ordering << 7));
+  }
+
+  /// Returns the synchronization scope ID of this load instruction.
+  SyncScope::ID getSyncScopeID() const {
+    return SSID;
+  }
+
+  /// Sets the synchronization scope ID of this load instruction.
+  void setSyncScopeID(SyncScope::ID SSID) {
+    this->SSID = SSID;
+  }
+
+  /// Sets the ordering constraint and the synchronization scope ID of this load
+  /// instruction.
+  void setAtomic(AtomicOrdering Ordering,
+                 SyncScope::ID SSID = SyncScope::System) {
+    setOrdering(Ordering);
+    setSyncScopeID(SSID);
+  }
+
+  bool isSimple() const { return !isAtomic() && !isVolatile(); }
+
+  bool isUnordered() const {
+    return (getOrdering() == AtomicOrdering::NotAtomic ||
+            getOrdering() == AtomicOrdering::Unordered) &&
+           !isVolatile();
+  }
+
+  Value *getPointerOperand() { return getOperand(0); }
+  const Value *getPointerOperand() const { return getOperand(0); }
+  static unsigned getPointerOperandIndex() { return 0U; }
+  Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperandType()->getPointerAddressSpace();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Load;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+
+  /// The synchronization scope ID of this load instruction.  Not quite enough
+  /// room in SubClassData for everything, so synchronization scope ID gets its
+  /// own field.
+  SyncScope::ID SSID;
+};
+
+//===----------------------------------------------------------------------===//
+//                                StoreInst Class
+//===----------------------------------------------------------------------===//
+
+/// An instruction for storing to memory.
+class StoreInst : public Instruction {
+  void AssertOK();
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  StoreInst *cloneImpl() const;
+
+public:
+  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
+  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
+  StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
+            Instruction *InsertBefore = nullptr);
+  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
+  StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+            unsigned Align, Instruction *InsertBefore = nullptr);
+  StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+            unsigned Align, BasicBlock *InsertAtEnd);
+  StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+            unsigned Align, AtomicOrdering Order,
+            SyncScope::ID SSID = SyncScope::System,
+            Instruction *InsertBefore = nullptr);
+  StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+            unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
+            BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly two operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 2);
+  }
+
+  /// Return true if this is a store to a volatile memory location.
+  bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
+
+  /// Specify whether this is a volatile store or not.
+  void setVolatile(bool V) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+                               (V ? 1 : 0));
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Return the alignment of the access that is being performed
+  unsigned getAlignment() const {
+    return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
+  }
+
+  void setAlignment(unsigned Align);
+
+  /// Returns the ordering constraint of this store instruction.
+  AtomicOrdering getOrdering() const {
+    return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
+  }
+
+  /// Sets the ordering constraint of this store instruction.  May not be
+  /// Acquire or AcquireRelease.
+  void setOrdering(AtomicOrdering Ordering) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
+                               ((unsigned)Ordering << 7));
+  }
+
+  /// Returns the synchronization scope ID of this store instruction.
+  SyncScope::ID getSyncScopeID() const {
+    return SSID;
+  }
+
+  /// Sets the synchronization scope ID of this store instruction.
+  void setSyncScopeID(SyncScope::ID SSID) {
+    this->SSID = SSID;
+  }
+
+  /// Sets the ordering constraint and the synchronization scope ID of this
+  /// store instruction.
+  void setAtomic(AtomicOrdering Ordering,
+                 SyncScope::ID SSID = SyncScope::System) {
+    setOrdering(Ordering);
+    setSyncScopeID(SSID);
+  }
+
+  bool isSimple() const { return !isAtomic() && !isVolatile(); }
+
+  bool isUnordered() const {
+    return (getOrdering() == AtomicOrdering::NotAtomic ||
+            getOrdering() == AtomicOrdering::Unordered) &&
+           !isVolatile();
+  }
+
+  Value *getValueOperand() { return getOperand(0); }
+  const Value *getValueOperand() const { return getOperand(0); }
+
+  Value *getPointerOperand() { return getOperand(1); }
+  const Value *getPointerOperand() const { return getOperand(1); }
+  static unsigned getPointerOperandIndex() { return 1U; }
+  Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperandType()->getPointerAddressSpace();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Store;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+
+  /// The synchronization scope ID of this store instruction.  Not quite enough
+  /// room in SubClassData for everything, so synchronization scope ID gets its
+  /// own field.
+  SyncScope::ID SSID;
+};
+
+template <>
+struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                                FenceInst Class
+//===----------------------------------------------------------------------===//
+
+/// An instruction for ordering other memory operations.
+class FenceInst : public Instruction {
+  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  FenceInst *cloneImpl() const;
+
+public:
+  // Ordering may only be Acquire, Release, AcquireRelease, or
+  // SequentiallyConsistent.
+  FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+            SyncScope::ID SSID = SyncScope::System,
+            Instruction *InsertBefore = nullptr);
+  FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
+            BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly zero operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 0);
+  }
+
+  /// Returns the ordering constraint of this fence instruction.
+  AtomicOrdering getOrdering() const {
+    return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
+  }
+
+  /// Sets the ordering constraint of this fence instruction.  May only be
+  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
+  void setOrdering(AtomicOrdering Ordering) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
+                               ((unsigned)Ordering << 1));
+  }
+
+  /// Returns the synchronization scope ID of this fence instruction.
+  SyncScope::ID getSyncScopeID() const {
+    return SSID;
+  }
+
+  /// Sets the synchronization scope ID of this fence instruction.
+  void setSyncScopeID(SyncScope::ID SSID) {
+    this->SSID = SSID;
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Fence;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+
+  /// The synchronization scope ID of this fence instruction.  Not quite enough
+  /// room in SubClassData for everything, so synchronization scope ID gets its
+  /// own field.
+  SyncScope::ID SSID;
+};
+
+//===----------------------------------------------------------------------===//
+//                                AtomicCmpXchgInst Class
+//===----------------------------------------------------------------------===//
+
+/// an instruction that atomically checks whether a
+/// specified value is in a memory location, and, if it is, stores a new value
+/// there.  Returns the value that was loaded.
+///
+class AtomicCmpXchgInst : public Instruction {
+  void Init(Value *Ptr, Value *Cmp, Value *NewVal,
+            AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
+            SyncScope::ID SSID);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  AtomicCmpXchgInst *cloneImpl() const;
+
+public:
+  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+                    AtomicOrdering SuccessOrdering,
+                    AtomicOrdering FailureOrdering,
+                    SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
+  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+                    AtomicOrdering SuccessOrdering,
+                    AtomicOrdering FailureOrdering,
+                    SyncScope::ID SSID, BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly three operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 3);
+  }
+
+  /// Return true if this is a cmpxchg from a volatile memory
+  /// location.
+  ///
+  bool isVolatile() const {
+    return getSubclassDataFromInstruction() & 1;
+  }
+
+  /// Specify whether this is a volatile cmpxchg.
+  ///
+  void setVolatile(bool V) {
+     setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+                                (unsigned)V);
+  }
+
+  /// Return true if this cmpxchg may spuriously fail.
+  bool isWeak() const {
+    return getSubclassDataFromInstruction() & 0x100;
+  }
+
+  void setWeak(bool IsWeak) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
+                               (IsWeak << 8));
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Returns the success ordering constraint of this cmpxchg instruction.
+  AtomicOrdering getSuccessOrdering() const {
+    return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+  }
+
+  /// Sets the success ordering constraint of this cmpxchg instruction.
+  void setSuccessOrdering(AtomicOrdering Ordering) {
+    assert(Ordering != AtomicOrdering::NotAtomic &&
+           "CmpXchg instructions can only be atomic.");
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
+                               ((unsigned)Ordering << 2));
+  }
+
+  /// Returns the failure ordering constraint of this cmpxchg instruction.
+  AtomicOrdering getFailureOrdering() const {
+    return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+  }
+
+  /// Sets the failure ordering constraint of this cmpxchg instruction.
+  void setFailureOrdering(AtomicOrdering Ordering) {
+    assert(Ordering != AtomicOrdering::NotAtomic &&
+           "CmpXchg instructions can only be atomic.");
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
+                               ((unsigned)Ordering << 5));
+  }
+
+  /// Returns the synchronization scope ID of this cmpxchg instruction.
+  SyncScope::ID getSyncScopeID() const {
+    return SSID;
+  }
+
+  /// Sets the synchronization scope ID of this cmpxchg instruction.
+  void setSyncScopeID(SyncScope::ID SSID) {
+    this->SSID = SSID;
+  }
+
+  Value *getPointerOperand() { return getOperand(0); }
+  const Value *getPointerOperand() const { return getOperand(0); }
+  static unsigned getPointerOperandIndex() { return 0U; }
+
+  Value *getCompareOperand() { return getOperand(1); }
+  const Value *getCompareOperand() const { return getOperand(1); }
+
+  Value *getNewValOperand() { return getOperand(2); }
+  const Value *getNewValOperand() const { return getOperand(2); }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperand()->getType()->getPointerAddressSpace();
+  }
+
+  /// Returns the strongest permitted ordering on failure, given the
+  /// desired ordering on success.
+  ///
+  /// If the comparison in a cmpxchg operation fails, there is no atomic store
+  /// so release semantics cannot be provided. So this function drops explicit
+  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
+  /// operation would remain SequentiallyConsistent.
+  static AtomicOrdering
+  getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
+    switch (SuccessOrdering) {
+    default:
+      llvm_unreachable("invalid cmpxchg success ordering");
+    case AtomicOrdering::Release:
+    case AtomicOrdering::Monotonic:
+      return AtomicOrdering::Monotonic;
+    case AtomicOrdering::AcquireRelease:
+    case AtomicOrdering::Acquire:
+      return AtomicOrdering::Acquire;
+    case AtomicOrdering::SequentiallyConsistent:
+      return AtomicOrdering::SequentiallyConsistent;
+    }
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::AtomicCmpXchg;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+
+  /// The synchronization scope ID of this cmpxchg instruction.  Not quite
+  /// enough room in SubClassData for everything, so synchronization scope ID
+  /// gets its own field.
+  SyncScope::ID SSID;
+};
+
+template <>
+struct OperandTraits<AtomicCmpXchgInst> :
+    public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                                AtomicRMWInst Class
+//===----------------------------------------------------------------------===//
+
+/// an instruction that atomically reads a memory location,
+/// combines it with another value, and then stores the result back.  Returns
+/// the old value.
+///
+class AtomicRMWInst : public Instruction {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  AtomicRMWInst *cloneImpl() const;
+
+public:
+  /// This enumeration lists the possible modifications atomicrmw can make.  In
+  /// the descriptions, 'p' is the pointer to the instruction's memory location,
+  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
+  /// instruction.  These instructions always return 'old'.
+  enum BinOp {
+    /// *p = v
+    Xchg,
+    /// *p = old + v
+    Add,
+    /// *p = old - v
+    Sub,
+    /// *p = old & v
+    And,
+    /// *p = ~(old & v)
+    Nand,
+    /// *p = old | v
+    Or,
+    /// *p = old ^ v
+    Xor,
+    /// *p = old >signed v ? old : v
+    Max,
+    /// *p = old <signed v ? old : v
+    Min,
+    /// *p = old >unsigned v ? old : v
+    UMax,
+    /// *p = old <unsigned v ? old : v
+    UMin,
+
+    FIRST_BINOP = Xchg,
+    LAST_BINOP = UMin,
+    BAD_BINOP
+  };
+
+  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+                AtomicOrdering Ordering, SyncScope::ID SSID,
+                Instruction *InsertBefore = nullptr);
+  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+                AtomicOrdering Ordering, SyncScope::ID SSID,
+                BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly two operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 2);
+  }
+
+  BinOp getOperation() const {
+    return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
+  }
+
+  void setOperation(BinOp Operation) {
+    unsigned short SubclassData = getSubclassDataFromInstruction();
+    setInstructionSubclassData((SubclassData & 31) |
+                               (Operation << 5));
+  }
+
+  /// Return true if this is a RMW on a volatile memory location.
+  ///
+  bool isVolatile() const {
+    return getSubclassDataFromInstruction() & 1;
+  }
+
+  /// Specify whether this is a volatile RMW or not.
+  ///
+  void setVolatile(bool V) {
+     setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+                                (unsigned)V);
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Returns the ordering constraint of this rmw instruction.
+  AtomicOrdering getOrdering() const {
+    return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+  }
+
+  /// Sets the ordering constraint of this rmw instruction.
+  void setOrdering(AtomicOrdering Ordering) {
+    assert(Ordering != AtomicOrdering::NotAtomic &&
+           "atomicrmw instructions can only be atomic.");
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
+                               ((unsigned)Ordering << 2));
+  }
+
+  /// Returns the synchronization scope ID of this rmw instruction.
+  SyncScope::ID getSyncScopeID() const {
+    return SSID;
+  }
+
+  /// Sets the synchronization scope ID of this rmw instruction.
+  void setSyncScopeID(SyncScope::ID SSID) {
+    this->SSID = SSID;
+  }
+
+  Value *getPointerOperand() { return getOperand(0); }
+  const Value *getPointerOperand() const { return getOperand(0); }
+  static unsigned getPointerOperandIndex() { return 0U; }
+
+  Value *getValOperand() { return getOperand(1); }
+  const Value *getValOperand() const { return getOperand(1); }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperand()->getType()->getPointerAddressSpace();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::AtomicRMW;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  void Init(BinOp Operation, Value *Ptr, Value *Val,
+            AtomicOrdering Ordering, SyncScope::ID SSID);
+
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+
+  /// The synchronization scope ID of this rmw instruction.  Not quite enough
+  /// room in SubClassData for everything, so synchronization scope ID gets its
+  /// own field.
+  SyncScope::ID SSID;
+};
+
+template <>
+struct OperandTraits<AtomicRMWInst>
+    : public FixedNumOperandTraits<AtomicRMWInst,2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                             GetElementPtrInst Class
+//===----------------------------------------------------------------------===//
+
+// checkGEPType - Simple wrapper function to give a better assertion failure
+// message on bad indexes for a gep instruction.
+//
+inline Type *checkGEPType(Type *Ty) {
+  assert(Ty && "Invalid GetElementPtrInst indices for type!");
+  return Ty;
+}
+
+/// an instruction for type-safe pointer arithmetic to
+/// access elements of arrays and structs
+///
+class GetElementPtrInst : public Instruction {
+  Type *SourceElementType;
+  Type *ResultElementType;
+
+  GetElementPtrInst(const GetElementPtrInst &GEPI);
+
+  /// Constructors - Create a getelementptr instruction with a base pointer an
+  /// list of indices. The first ctor can optionally insert before an existing
+  /// instruction, the second appends the new instruction to the specified
+  /// BasicBlock.
+  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
+                           ArrayRef<Value *> IdxList, unsigned Values,
+                           const Twine &NameStr, Instruction *InsertBefore);
+  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
+                           ArrayRef<Value *> IdxList, unsigned Values,
+                           const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  GetElementPtrInst *cloneImpl() const;
+
+public:
+  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
+                                   ArrayRef<Value *> IdxList,
+                                   const Twine &NameStr = "",
+                                   Instruction *InsertBefore = nullptr) {
+    unsigned Values = 1 + unsigned(IdxList.size());
+    if (!PointeeType)
+      PointeeType =
+          cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
+    else
+      assert(
+          PointeeType ==
+          cast<PointerType>(Ptr->getType()->getScalarType())->getElementType());
+    return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
+                                          NameStr, InsertBefore);
+  }
+
+  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
+                                   ArrayRef<Value *> IdxList,
+                                   const Twine &NameStr,
+                                   BasicBlock *InsertAtEnd) {
+    unsigned Values = 1 + unsigned(IdxList.size());
+    if (!PointeeType)
+      PointeeType =
+          cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
+    else
+      assert(
+          PointeeType ==
+          cast<PointerType>(Ptr->getType()->getScalarType())->getElementType());
+    return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
+                                          NameStr, InsertAtEnd);
+  }
+
+  /// Create an "inbounds" getelementptr. See the documentation for the
+  /// "inbounds" flag in LangRef.html for details.
+  static GetElementPtrInst *CreateInBounds(Value *Ptr,
+                                           ArrayRef<Value *> IdxList,
+                                           const Twine &NameStr = "",
+                                           Instruction *InsertBefore = nullptr){
+    return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
+  }
+
+  static GetElementPtrInst *
+  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
+                 const Twine &NameStr = "",
+                 Instruction *InsertBefore = nullptr) {
+    GetElementPtrInst *GEP =
+        Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
+    GEP->setIsInBounds(true);
+    return GEP;
+  }
+
+  static GetElementPtrInst *CreateInBounds(Value *Ptr,
+                                           ArrayRef<Value *> IdxList,
+                                           const Twine &NameStr,
+                                           BasicBlock *InsertAtEnd) {
+    return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
+  }
+
+  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
+                                           ArrayRef<Value *> IdxList,
+                                           const Twine &NameStr,
+                                           BasicBlock *InsertAtEnd) {
+    GetElementPtrInst *GEP =
+        Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
+    GEP->setIsInBounds(true);
+    return GEP;
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  Type *getSourceElementType() const { return SourceElementType; }
+
+  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
+  void setResultElementType(Type *Ty) { ResultElementType = Ty; }
+
+  Type *getResultElementType() const {
+    assert(ResultElementType ==
+           cast<PointerType>(getType()->getScalarType())->getElementType());
+    return ResultElementType;
+  }
+
+  /// Returns the address space of this instruction's pointer type.
+  unsigned getAddressSpace() const {
+    // Note that this is always the same as the pointer operand's address space
+    // and that is cheaper to compute, so cheat here.
+    return getPointerAddressSpace();
+  }
+
+  /// Returns the type of the element that would be loaded with
+  /// a load instruction with the specified parameters.
+  ///
+  /// Null is returned if the indices are invalid for the specified
+  /// pointer type.
+  ///
+  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
+  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
+  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
+
+  inline op_iterator       idx_begin()       { return op_begin()+1; }
+  inline const_op_iterator idx_begin() const { return op_begin()+1; }
+  inline op_iterator       idx_end()         { return op_end(); }
+  inline const_op_iterator idx_end()   const { return op_end(); }
+
+  inline iterator_range<op_iterator> indices() {
+    return make_range(idx_begin(), idx_end());
+  }
+
+  inline iterator_range<const_op_iterator> indices() const {
+    return make_range(idx_begin(), idx_end());
+  }
+
+  Value *getPointerOperand() {
+    return getOperand(0);
+  }
+  const Value *getPointerOperand() const {
+    return getOperand(0);
+  }
+  static unsigned getPointerOperandIndex() {
+    return 0U;    // get index for modifying correct operand.
+  }
+
+  /// Method to return the pointer operand as a
+  /// PointerType.
+  Type *getPointerOperandType() const {
+    return getPointerOperand()->getType();
+  }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperandType()->getPointerAddressSpace();
+  }
+
+  /// Returns the pointer type returned by the GEP
+  /// instruction, which may be a vector of pointers.
+  static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
+    return getGEPReturnType(
+      cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
+      Ptr, IdxList);
+  }
+  static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
+                                ArrayRef<Value *> IdxList) {
+    Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
+                                   Ptr->getType()->getPointerAddressSpace());
+    // Vector GEP
+    if (Ptr->getType()->isVectorTy()) {
+      unsigned NumElem = Ptr->getType()->getVectorNumElements();
+      return VectorType::get(PtrTy, NumElem);
+    }
+    for (Value *Index : IdxList)
+      if (Index->getType()->isVectorTy()) {
+        unsigned NumElem = Index->getType()->getVectorNumElements();
+        return VectorType::get(PtrTy, NumElem);
+      }
+    // Scalar GEP
+    return PtrTy;
+  }
+
+  unsigned getNumIndices() const {  // Note: always non-negative
+    return getNumOperands() - 1;
+  }
+
+  bool hasIndices() const {
+    return getNumOperands() > 1;
+  }
+
+  /// Return true if all of the indices of this GEP are
+  /// zeros.  If so, the result pointer and the first operand have the same
+  /// value, just potentially different types.
+  bool hasAllZeroIndices() const;
+
+  /// Return true if all of the indices of this GEP are
+  /// constant integers.  If so, the result pointer and the first operand have
+  /// a constant offset between them.
+  bool hasAllConstantIndices() const;
+
+  /// Set or clear the inbounds flag on this GEP instruction.
+  /// See LangRef.html for the meaning of inbounds on a getelementptr.
+  void setIsInBounds(bool b = true);
+
+  /// Determine whether the GEP has the inbounds flag.
+  bool isInBounds() const;
+
+  /// Accumulate the constant address offset of this GEP if possible.
+  ///
+  /// This routine accepts an APInt into which it will accumulate the constant
+  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
+  /// all-constant, it returns false and the value of the offset APInt is
+  /// undefined (it is *not* preserved!). The APInt passed into this routine
+  /// must be at least as wide as the IntPtr type for the address space of
+  /// the base GEP pointer.
+  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::GetElementPtr);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<GetElementPtrInst> :
+  public VariadicOperandTraits<GetElementPtrInst, 1> {
+};
+
+GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
+                                     ArrayRef<Value *> IdxList, unsigned Values,
+                                     const Twine &NameStr,
+                                     Instruction *InsertBefore)
+    : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
+                  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
+                  Values, InsertBefore),
+      SourceElementType(PointeeType),
+      ResultElementType(getIndexedType(PointeeType, IdxList)) {
+  assert(ResultElementType ==
+         cast<PointerType>(getType()->getScalarType())->getElementType());
+  init(Ptr, IdxList, NameStr);
+}
+
+GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
+                                     ArrayRef<Value *> IdxList, unsigned Values,
+                                     const Twine &NameStr,
+                                     BasicBlock *InsertAtEnd)
+    : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
+                  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
+                  Values, InsertAtEnd),
+      SourceElementType(PointeeType),
+      ResultElementType(getIndexedType(PointeeType, IdxList)) {
+  assert(ResultElementType ==
+         cast<PointerType>(getType()->getScalarType())->getElementType());
+  init(Ptr, IdxList, NameStr);
+}
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               ICmpInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction compares its operands according to the predicate given
+/// to the constructor. It only operates on integers or pointers. The operands
+/// must be identical types.
+/// Represent an integer comparison operator.
+class ICmpInst: public CmpInst {
+  void AssertOK() {
+    assert(isIntPredicate() &&
+           "Invalid ICmp predicate value");
+    assert(getOperand(0)->getType() == getOperand(1)->getType() &&
+          "Both operands to ICmp instruction are not of the same type!");
+    // Check that the operands are the right type
+    assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
+            getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
+           "Invalid operand types for ICmp instruction");
+  }
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical ICmpInst
+  ICmpInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics.
+  ICmpInst(
+    Instruction *InsertBefore,  ///< Where to insert
+    Predicate pred,  ///< The predicate to use for the comparison
+    Value *LHS,      ///< The left-hand-side of the expression
+    Value *RHS,      ///< The right-hand-side of the expression
+    const Twine &NameStr = ""  ///< Name of the instruction
+  ) : CmpInst(makeCmpResultType(LHS->getType()),
+              Instruction::ICmp, pred, LHS, RHS, NameStr,
+              InsertBefore) {
+#ifndef NDEBUG
+  AssertOK();
+#endif
+  }
+
+  /// Constructor with insert-at-end semantics.
+  ICmpInst(
+    BasicBlock &InsertAtEnd, ///< Block to insert into.
+    Predicate pred,  ///< The predicate to use for the comparison
+    Value *LHS,      ///< The left-hand-side of the expression
+    Value *RHS,      ///< The right-hand-side of the expression
+    const Twine &NameStr = ""  ///< Name of the instruction
+  ) : CmpInst(makeCmpResultType(LHS->getType()),
+              Instruction::ICmp, pred, LHS, RHS, NameStr,
+              &InsertAtEnd) {
+#ifndef NDEBUG
+  AssertOK();
+#endif
+  }
+
+  /// Constructor with no-insertion semantics
+  ICmpInst(
+    Predicate pred, ///< The predicate to use for the comparison
+    Value *LHS,     ///< The left-hand-side of the expression
+    Value *RHS,     ///< The right-hand-side of the expression
+    const Twine &NameStr = "" ///< Name of the instruction
+  ) : CmpInst(makeCmpResultType(LHS->getType()),
+              Instruction::ICmp, pred, LHS, RHS, NameStr) {
+#ifndef NDEBUG
+  AssertOK();
+#endif
+  }
+
+  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
+  /// @returns the predicate that would be the result if the operand were
+  /// regarded as signed.
+  /// Return the signed version of the predicate
+  Predicate getSignedPredicate() const {
+    return getSignedPredicate(getPredicate());
+  }
+
+  /// This is a static version that you can use without an instruction.
+  /// Return the signed version of the predicate.
+  static Predicate getSignedPredicate(Predicate pred);
+
+  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
+  /// @returns the predicate that would be the result if the operand were
+  /// regarded as unsigned.
+  /// Return the unsigned version of the predicate
+  Predicate getUnsignedPredicate() const {
+    return getUnsignedPredicate(getPredicate());
+  }
+
+  /// This is a static version that you can use without an instruction.
+  /// Return the unsigned version of the predicate.
+  static Predicate getUnsignedPredicate(Predicate pred);
+
+  /// Return true if this predicate is either EQ or NE.  This also
+  /// tests for commutativity.
+  static bool isEquality(Predicate P) {
+    return P == ICMP_EQ || P == ICMP_NE;
+  }
+
+  /// Return true if this predicate is either EQ or NE.  This also
+  /// tests for commutativity.
+  bool isEquality() const {
+    return isEquality(getPredicate());
+  }
+
+  /// @returns true if the predicate of this ICmpInst is commutative
+  /// Determine if this relation is commutative.
+  bool isCommutative() const { return isEquality(); }
+
+  /// Return true if the predicate is relational (not EQ or NE).
+  ///
+  bool isRelational() const {
+    return !isEquality();
+  }
+
+  /// Return true if the predicate is relational (not EQ or NE).
+  ///
+  static bool isRelational(Predicate P) {
+    return !isEquality(P);
+  }
+
+  /// Exchange the two operands to this instruction in such a way that it does
+  /// not modify the semantics of the instruction. The predicate value may be
+  /// changed to retain the same result if the predicate is order dependent
+  /// (e.g. ult).
+  /// Swap operands and adjust predicate.
+  void swapOperands() {
+    setPredicate(getSwappedPredicate());
+    Op<0>().swap(Op<1>());
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::ICmp;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                               FCmpInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction compares its operands according to the predicate given
+/// to the constructor. It only operates on floating point values or packed
+/// vectors of floating point values. The operands must be identical types.
+/// Represents a floating point comparison operator.
+class FCmpInst: public CmpInst {
+  void AssertOK() {
+    assert(isFPPredicate() && "Invalid FCmp predicate value");
+    assert(getOperand(0)->getType() == getOperand(1)->getType() &&
+           "Both operands to FCmp instruction are not of the same type!");
+    // Check that the operands are the right type
+    assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
+           "Invalid operand types for FCmp instruction");
+  }
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical FCmpInst
+  FCmpInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics.
+  FCmpInst(
+    Instruction *InsertBefore, ///< Where to insert
+    Predicate pred,  ///< The predicate to use for the comparison
+    Value *LHS,      ///< The left-hand-side of the expression
+    Value *RHS,      ///< The right-hand-side of the expression
+    const Twine &NameStr = ""  ///< Name of the instruction
+  ) : CmpInst(makeCmpResultType(LHS->getType()),
+              Instruction::FCmp, pred, LHS, RHS, NameStr,
+              InsertBefore) {
+    AssertOK();
+  }
+
+  /// Constructor with insert-at-end semantics.
+  FCmpInst(
+    BasicBlock &InsertAtEnd, ///< Block to insert into.
+    Predicate pred,  ///< The predicate to use for the comparison
+    Value *LHS,      ///< The left-hand-side of the expression
+    Value *RHS,      ///< The right-hand-side of the expression
+    const Twine &NameStr = ""  ///< Name of the instruction
+  ) : CmpInst(makeCmpResultType(LHS->getType()),
+              Instruction::FCmp, pred, LHS, RHS, NameStr,
+              &InsertAtEnd) {
+    AssertOK();
+  }
+
+  /// Constructor with no-insertion semantics
+  FCmpInst(
+    Predicate pred, ///< The predicate to use for the comparison
+    Value *LHS,     ///< The left-hand-side of the expression
+    Value *RHS,     ///< The right-hand-side of the expression
+    const Twine &NameStr = "" ///< Name of the instruction
+  ) : CmpInst(makeCmpResultType(LHS->getType()),
+              Instruction::FCmp, pred, LHS, RHS, NameStr) {
+    AssertOK();
+  }
+
+  /// @returns true if the predicate of this instruction is EQ or NE.
+  /// Determine if this is an equality predicate.
+  static bool isEquality(Predicate Pred) {
+    return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
+           Pred == FCMP_UNE;
+  }
+
+  /// @returns true if the predicate of this instruction is EQ or NE.
+  /// Determine if this is an equality predicate.
+  bool isEquality() const { return isEquality(getPredicate()); }
+
+  /// @returns true if the predicate of this instruction is commutative.
+  /// Determine if this is a commutative predicate.
+  bool isCommutative() const {
+    return isEquality() ||
+           getPredicate() == FCMP_FALSE ||
+           getPredicate() == FCMP_TRUE ||
+           getPredicate() == FCMP_ORD ||
+           getPredicate() == FCMP_UNO;
+  }
+
+  /// @returns true if the predicate is relational (not EQ or NE).
+  /// Determine if this a relational predicate.
+  bool isRelational() const { return !isEquality(); }
+
+  /// Exchange the two operands to this instruction in such a way that it does
+  /// not modify the semantics of the instruction. The predicate value may be
+  /// changed to retain the same result if the predicate is order dependent
+  /// (e.g. ult).
+  /// Swap operands and adjust predicate.
+  void swapOperands() {
+    setPredicate(getSwappedPredicate());
+    Op<0>().swap(Op<1>());
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::FCmp;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+class CallInst;
+class InvokeInst;
+
+template <class T> struct CallBaseParent { using type = Instruction; };
+
+template <> struct CallBaseParent<InvokeInst> { using type = TerminatorInst; };
+
+//===----------------------------------------------------------------------===//
+/// Base class for all callable instructions (InvokeInst and CallInst)
+/// Holds everything related to calling a function, abstracting from the base
+/// type @p BaseInstTy and the concrete instruction @p InstTy
+///
+template <class InstTy>
+class CallBase : public CallBaseParent<InstTy>::type,
+                 public OperandBundleUser<InstTy, User::op_iterator> {
+protected:
+  AttributeList Attrs; ///< parameter attributes for callable
+  FunctionType *FTy;
+  using BaseInstTy = typename CallBaseParent<InstTy>::type;
+
+  template <class... ArgsTy>
+  CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
+      : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
+  bool hasDescriptor() const { return Value::HasDescriptor; }
+
+  using BaseInstTy::BaseInstTy;
+
+  using OperandBundleUser<InstTy,
+                          User::op_iterator>::isFnAttrDisallowedByOpBundle;
+  using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
+  using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
+  using Instruction::getSubclassDataFromInstruction;
+  using Instruction::setInstructionSubclassData;
+
+public:
+  using Instruction::getContext;
+  using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
+  using OperandBundleUser<InstTy,
+                          User::op_iterator>::getBundleOperandsStartIndex;
+
+  static bool classof(const Instruction *I) {
+    llvm_unreachable(
+        "CallBase is not meant to be used as part of the classof hierarchy");
+  }
+
+public:
+  /// Return the parameter attributes for this call.
+  ///
+  AttributeList getAttributes() const { return Attrs; }
+
+  /// Set the parameter attributes for this call.
+  ///
+  void setAttributes(AttributeList A) { Attrs = A; }
+
+  FunctionType *getFunctionType() const { return FTy; }
+
+  void mutateFunctionType(FunctionType *FTy) {
+    Value::mutateType(FTy->getReturnType());
+    this->FTy = FTy;
+  }
+
+  /// Return the number of call arguments.
+  ///
+  unsigned getNumArgOperands() const {
+    return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
+  }
+
+  /// getArgOperand/setArgOperand - Return/set the i-th call argument.
+  ///
+  Value *getArgOperand(unsigned i) const {
+    assert(i < getNumArgOperands() && "Out of bounds!");
+    return getOperand(i);
+  }
+  void setArgOperand(unsigned i, Value *v) {
+    assert(i < getNumArgOperands() && "Out of bounds!");
+    setOperand(i, v);
+  }
+
+  /// Return the iterator pointing to the beginning of the argument list.
+  User::op_iterator arg_begin() { return op_begin(); }
+
+  /// Return the iterator pointing to the end of the argument list.
+  User::op_iterator arg_end() {
+    // [ call args ], [ operand bundles ], callee
+    return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
+  }
+
+  /// Iteration adapter for range-for loops.
+  iterator_range<User::op_iterator> arg_operands() {
+    return make_range(arg_begin(), arg_end());
+  }
+
+  /// Return the iterator pointing to the beginning of the argument list.
+  User::const_op_iterator arg_begin() const { return op_begin(); }
+
+  /// Return the iterator pointing to the end of the argument list.
+  User::const_op_iterator arg_end() const {
+    // [ call args ], [ operand bundles ], callee
+    return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
+  }
+
+  /// Iteration adapter for range-for loops.
+  iterator_range<User::const_op_iterator> arg_operands() const {
+    return make_range(arg_begin(), arg_end());
+  }
+
+  /// Wrappers for getting the \c Use of a call argument.
+  const Use &getArgOperandUse(unsigned i) const {
+    assert(i < getNumArgOperands() && "Out of bounds!");
+    return User::getOperandUse(i);
+  }
+  Use &getArgOperandUse(unsigned i) {
+    assert(i < getNumArgOperands() && "Out of bounds!");
+    return User::getOperandUse(i);
+  }
+
+  /// If one of the arguments has the 'returned' attribute, return its
+  /// operand value. Otherwise, return nullptr.
+  Value *getReturnedArgOperand() const {
+    unsigned Index;
+
+    if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
+      return getArgOperand(Index - AttributeList::FirstArgIndex);
+    if (const Function *F = getCalledFunction())
+      if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
+          Index)
+        return getArgOperand(Index - AttributeList::FirstArgIndex);
+
+    return nullptr;
+  }
+
+  User::op_iterator op_begin() {
+    return OperandTraits<CallBase>::op_begin(this);
+  }
+
+  User::const_op_iterator op_begin() const {
+    return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
+  }
+
+  User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
+
+  User::const_op_iterator op_end() const {
+    return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
+  }
+
+  Value *getOperand(unsigned i_nocapture) const {
+    assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&
+           "getOperand() out of range!");
+    return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
+                                   const_cast<CallBase *>(this))[i_nocapture]
+                                   .get());
+  }
+
+  void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
+    assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&
+           "setOperand() out of range!");
+    OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
+  }
+
+  unsigned getNumOperands() const {
+    return OperandTraits<CallBase>::operands(this);
+  }
+  template <int Idx_nocapture> Use &Op() {
+    return User::OpFrom<Idx_nocapture>(this);
+  }
+  template <int Idx_nocapture> const Use &Op() const {
+    return User::OpFrom<Idx_nocapture>(this);
+  }
+
+  /// Return the function called, or null if this is an
+  /// indirect function invocation.
+  ///
+  Function *getCalledFunction() const {
+    return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
+  }
+
+  /// Determine whether this call has the given attribute.
+  bool hasFnAttr(Attribute::AttrKind Kind) const {
+    assert(Kind != Attribute::NoBuiltin &&
+           "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin");
+    return hasFnAttrImpl(Kind);
+  }
+
+  /// Determine whether this call has the given attribute.
+  bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
+
+  /// getCallingConv/setCallingConv - Get or set the calling convention of this
+  /// function call.
+  CallingConv::ID getCallingConv() const {
+    return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
+  }
+  void setCallingConv(CallingConv::ID CC) {
+    auto ID = static_cast<unsigned>(CC);
+    assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
+    setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
+                               (ID << 2));
+  }
+
+
+  /// adds the attribute to the list of attributes.
+  void addAttribute(unsigned i, Attribute::AttrKind Kind) {
+    AttributeList PAL = getAttributes();
+    PAL = PAL.addAttribute(getContext(), i, Kind);
+    setAttributes(PAL);
+  }
+
+  /// adds the attribute to the list of attributes.
+  void addAttribute(unsigned i, Attribute Attr) {
+    AttributeList PAL = getAttributes();
+    PAL = PAL.addAttribute(getContext(), i, Attr);
+    setAttributes(PAL);
+  }
+
+  /// Adds the attribute to the indicated argument
+  void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+    assert(ArgNo < getNumArgOperands() && "Out of bounds");
+    AttributeList PAL = getAttributes();
+    PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
+    setAttributes(PAL);
+  }
+
+  /// Adds the attribute to the indicated argument
+  void addParamAttr(unsigned ArgNo, Attribute Attr) {
+    assert(ArgNo < getNumArgOperands() && "Out of bounds");
+    AttributeList PAL = getAttributes();
+    PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
+    setAttributes(PAL);
+  }
+
+  /// removes the attribute from the list of attributes.
+  void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
+    AttributeList PAL = getAttributes();
+    PAL = PAL.removeAttribute(getContext(), i, Kind);
+    setAttributes(PAL);
+  }
+
+  /// removes the attribute from the list of attributes.
+  void removeAttribute(unsigned i, StringRef Kind) {
+    AttributeList PAL = getAttributes();
+    PAL = PAL.removeAttribute(getContext(), i, Kind);
+    setAttributes(PAL);
+  }
+
+  /// Removes the attribute from the given argument
+  void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+    assert(ArgNo < getNumArgOperands() && "Out of bounds");
+    AttributeList PAL = getAttributes();
+    PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+    setAttributes(PAL);
+  }
+
+  /// Removes the attribute from the given argument
+  void removeParamAttr(unsigned ArgNo, StringRef Kind) {
+    assert(ArgNo < getNumArgOperands() && "Out of bounds");
+    AttributeList PAL = getAttributes();
+    PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+    setAttributes(PAL);
+  }
+
+  /// adds the dereferenceable attribute to the list of attributes.
+  void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
+    AttributeList PAL = getAttributes();
+    PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
+    setAttributes(PAL);
+  }
+
+  /// adds the dereferenceable_or_null attribute to the list of
+  /// attributes.
+  void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
+    AttributeList PAL = getAttributes();
+    PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
+    setAttributes(PAL);
+  }
+
+  /// Determine whether the return value has the given attribute.
+  bool hasRetAttr(Attribute::AttrKind Kind) const {
+    if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
+      return true;
+
+    // Look at the callee, if available.
+    if (const Function *F = getCalledFunction())
+      return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
+    return false;
+  }
+
+  /// Determine whether the argument or parameter has the given attribute.
+  bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+    assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
+
+    if (Attrs.hasParamAttribute(ArgNo, Kind))
+      return true;
+    if (const Function *F = getCalledFunction())
+      return F->getAttributes().hasParamAttribute(ArgNo, Kind);
+    return false;
+  }
+
+  /// Get the attribute of a given kind at a position.
+  Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
+    return getAttributes().getAttribute(i, Kind);
+  }
+
+  /// Get the attribute of a given kind at a position.
+  Attribute getAttribute(unsigned i, StringRef Kind) const {
+    return getAttributes().getAttribute(i, Kind);
+  }
+
+  /// Get the attribute of a given kind from a given arg
+  Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+    assert(ArgNo < getNumArgOperands() && "Out of bounds");
+    return getAttributes().getParamAttr(ArgNo, Kind);
+  }
+
+  /// Get the attribute of a given kind from a given arg
+  Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
+    assert(ArgNo < getNumArgOperands() && "Out of bounds");
+    return getAttributes().getParamAttr(ArgNo, Kind);
+  }
+  /// Return true if the data operand at index \p i has the attribute \p
+  /// A.
+  ///
+  /// Data operands include call arguments and values used in operand bundles,
+  /// but does not include the callee operand.  This routine dispatches to the
+  /// underlying AttributeList or the OperandBundleUser as appropriate.
+  ///
+  /// The index \p i is interpreted as
+  ///
+  ///  \p i == Attribute::ReturnIndex  -> the return value
+  ///  \p i in [1, arg_size + 1)  -> argument number (\p i - 1)
+  ///  \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
+  ///     (\p i - 1) in the operand list.
+  bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
+    // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
+    // The last operand is the callee.
+    assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&
+           "Data operand index out of bounds!");
+
+    // The attribute A can either be directly specified, if the operand in
+    // question is a call argument; or be indirectly implied by the kind of its
+    // containing operand bundle, if the operand is a bundle operand.
+
+    if (i == AttributeList::ReturnIndex)
+      return hasRetAttr(Kind);
+
+    // FIXME: Avoid these i - 1 calculations and update the API to use
+    // zero-based indices.
+    if (i < (getNumArgOperands() + 1))
+      return paramHasAttr(i - 1, Kind);
+
+    assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
+           "Must be either a call argument or an operand bundle!");
+    return bundleOperandHasAttr(i - 1, Kind);
+  }
+
+  /// Extract the alignment of the return value.
+  unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
+
+  /// Extract the alignment for a call or parameter (0=unknown).
+  unsigned getParamAlignment(unsigned ArgNo) const {
+    return Attrs.getParamAlignment(ArgNo);
+  }
+
+  /// Extract the number of dereferenceable bytes for a call or
+  /// parameter (0=unknown).
+  uint64_t getDereferenceableBytes(unsigned i) const {
+    return Attrs.getDereferenceableBytes(i);
+  }
+
+  /// Extract the number of dereferenceable_or_null bytes for a call or
+  /// parameter (0=unknown).
+  uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+    return Attrs.getDereferenceableOrNullBytes(i);
+  }
+
+  /// @brief Determine if the return value is marked with NoAlias attribute.
+  bool returnDoesNotAlias() const {
+    return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+  }
+
+  /// Return true if the call should not be treated as a call to a
+  /// builtin.
+  bool isNoBuiltin() const {
+    return hasFnAttrImpl(Attribute::NoBuiltin) &&
+      !hasFnAttrImpl(Attribute::Builtin);
+  }
+
+  /// Determine if the call requires strict floating point semantics.
+  bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
+
+  /// Return true if the call should not be inlined.
+  bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
+  void setIsNoInline() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
+  }
+  /// Determine if the call does not access memory.
+  bool doesNotAccessMemory() const {
+    return hasFnAttr(Attribute::ReadNone);
+  }
+  void setDoesNotAccessMemory() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
+  }
+
+  /// Determine if the call does not access or only reads memory.
+  bool onlyReadsMemory() const {
+    return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+  }
+  void setOnlyReadsMemory() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
+  }
+
+  /// Determine if the call does not access or only writes memory.
+  bool doesNotReadMemory() const {
+    return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
+  }
+  void setDoesNotReadMemory() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
+  }
+
+  /// @brief Determine if the call can access memmory only using pointers based
+  /// on its arguments.
+  bool onlyAccessesArgMemory() const {
+    return hasFnAttr(Attribute::ArgMemOnly);
+  }
+  void setOnlyAccessesArgMemory() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
+  }
+
+  /// @brief Determine if the function may only access memory that is
+  /// inaccessible from the IR.
+  bool onlyAccessesInaccessibleMemory() const {
+    return hasFnAttr(Attribute::InaccessibleMemOnly);
+  }
+  void setOnlyAccessesInaccessibleMemory() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
+  }
+
+  /// @brief Determine if the function may only access memory that is
+  /// either inaccessible from the IR or pointed to by its arguments.
+  bool onlyAccessesInaccessibleMemOrArgMem() const {
+    return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
+  }
+  void setOnlyAccessesInaccessibleMemOrArgMem() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
+  }
+  /// Determine if the call cannot return.
+  bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
+  void setDoesNotReturn() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
+  }
+
+  /// Determine if the call should not perform indirect branch tracking.
+  bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
+
+  /// Determine if the call cannot unwind.
+  bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
+  void setDoesNotThrow() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
+  }
+
+  /// Determine if the invoke cannot be duplicated.
+  bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
+  void setCannotDuplicate() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
+  }
+
+  /// Determine if the invoke is convergent
+  bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
+  void setConvergent() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
+  }
+  void setNotConvergent() {
+    removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
+  }
+
+  /// Determine if the call returns a structure through first
+  /// pointer argument.
+  bool hasStructRetAttr() const {
+    if (getNumArgOperands() == 0)
+      return false;
+
+    // Be friendly and also check the callee.
+    return paramHasAttr(0, Attribute::StructRet);
+  }
+
+  /// Determine if any call argument is an aggregate passed by value.
+  bool hasByValArgument() const {
+    return Attrs.hasAttrSomewhere(Attribute::ByVal);
+  }
+  /// Get a pointer to the function that is invoked by this
+  /// instruction.
+  const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
+  Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
+
+  /// Set the function called.
+  void setCalledFunction(Value* Fn) {
+    setCalledFunction(
+        cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
+        Fn);
+  }
+  void setCalledFunction(FunctionType *FTy, Value *Fn) {
+    this->FTy = FTy;
+    assert(FTy == cast<FunctionType>(
+                      cast<PointerType>(Fn->getType())->getElementType()));
+    Op<-InstTy::ArgOffset>() = Fn;
+  }
+
+protected:
+  template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
+    if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
+      return true;
+
+    // Operand bundles override attributes on the called function, but don't
+    // override attributes directly present on the call instruction.
+    if (isFnAttrDisallowedByOpBundle(Kind))
+      return false;
+
+    if (const Function *F = getCalledFunction())
+      return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
+                                             Kind);
+    return false;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// This class represents a function call, abstracting a target
+/// machine's calling convention.  This class uses low bit of the SubClassData
+/// field to indicate whether or not this is a tail call.  The rest of the bits
+/// hold the calling convention of the call.
+///
+class CallInst : public CallBase<CallInst> {
+  friend class OperandBundleUser<CallInst, User::op_iterator>;
+
+  CallInst(const CallInst &CI);
+
+  /// Construct a CallInst given a range of arguments.
+  /// Construct a CallInst from a range of arguments
+  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+                  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+                  Instruction *InsertBefore);
+
+  inline CallInst(Value *Func, ArrayRef<Value *> Args,
+                  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+                  Instruction *InsertBefore)
+      : CallInst(cast<FunctionType>(
+                     cast<PointerType>(Func->getType())->getElementType()),
+                 Func, Args, Bundles, NameStr, InsertBefore) {}
+
+  inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
+                  Instruction *InsertBefore)
+      : CallInst(Func, Args, None, NameStr, InsertBefore) {}
+
+  /// Construct a CallInst given a range of arguments.
+  /// Construct a CallInst from a range of arguments
+  inline CallInst(Value *Func, ArrayRef<Value *> Args,
+                  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+                  BasicBlock *InsertAtEnd);
+
+  explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
+
+  CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  void init(Value *Func, ArrayRef<Value *> Args,
+            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
+    init(cast<FunctionType>(
+             cast<PointerType>(Func->getType())->getElementType()),
+         Func, Args, Bundles, NameStr);
+  }
+  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
+            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+  void init(Value *Func, const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  CallInst *cloneImpl() const;
+
+public:
+  static constexpr int ArgOffset = 1;
+
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          ArrayRef<OperandBundleDef> Bundles = None,
+                          const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, Args, Bundles, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr,
+                          Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, Args, None, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr,
+                          Instruction *InsertBefore = nullptr) {
+    return new (unsigned(Args.size() + 1))
+        CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+                          ArrayRef<OperandBundleDef> Bundles = None,
+                          const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    const unsigned TotalOps =
+        unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
+    const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+    return new (TotalOps, DescriptorBytes)
+        CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          ArrayRef<OperandBundleDef> Bundles,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    const unsigned TotalOps =
+        unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
+    const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+    return new (TotalOps, DescriptorBytes)
+        CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
+  }
+
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return new (unsigned(Args.size() + 1))
+        CallInst(Func, Args, None, NameStr, InsertAtEnd);
+  }
+
+  static CallInst *Create(Value *F, const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    return new (1) CallInst(F, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(Value *F, const Twine &NameStr,
+                          BasicBlock *InsertAtEnd) {
+    return new (1) CallInst(F, NameStr, InsertAtEnd);
+  }
+
+  /// Create a clone of \p CI with a different set of operand bundles and
+  /// insert it before \p InsertPt.
+  ///
+  /// The returned call instruction is identical \p CI in every way except that
+  /// the operand bundles for the new instruction are set to the operand bundles
+  /// in \p Bundles.
+  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
+                          Instruction *InsertPt = nullptr);
+
+  /// Generate the IR for a call to malloc:
+  /// 1. Compute the malloc call's argument as the specified type's size,
+  ///    possibly multiplied by the array size if the array size is not
+  ///    constant 1.
+  /// 2. Call malloc with that argument.
+  /// 3. Bitcast the result of the malloc call to the specified type.
+  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
+                                   Type *AllocTy, Value *AllocSize,
+                                   Value *ArraySize = nullptr,
+                                   Function *MallocF = nullptr,
+                                   const Twine &Name = "");
+  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
+                                   Type *AllocTy, Value *AllocSize,
+                                   Value *ArraySize = nullptr,
+                                   Function *MallocF = nullptr,
+                                   const Twine &Name = "");
+  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
+                                   Type *AllocTy, Value *AllocSize,
+                                   Value *ArraySize = nullptr,
+                                   ArrayRef<OperandBundleDef> Bundles = None,
+                                   Function *MallocF = nullptr,
+                                   const Twine &Name = "");
+  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
+                                   Type *AllocTy, Value *AllocSize,
+                                   Value *ArraySize = nullptr,
+                                   ArrayRef<OperandBundleDef> Bundles = None,
+                                   Function *MallocF = nullptr,
+                                   const Twine &Name = "");
+  /// Generate the IR for a call to the builtin free function.
+  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
+  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
+  static Instruction *CreateFree(Value *Source,
+                                 ArrayRef<OperandBundleDef> Bundles,
+                                 Instruction *InsertBefore);
+  static Instruction *CreateFree(Value *Source,
+                                 ArrayRef<OperandBundleDef> Bundles,
+                                 BasicBlock *InsertAtEnd);
+
+  // Note that 'musttail' implies 'tail'.
+  enum TailCallKind {
+    TCK_None = 0,
+    TCK_Tail = 1,
+    TCK_MustTail = 2,
+    TCK_NoTail = 3
+  };
+  TailCallKind getTailCallKind() const {
+    return TailCallKind(getSubclassDataFromInstruction() & 3);
+  }
+
+  bool isTailCall() const {
+    unsigned Kind = getSubclassDataFromInstruction() & 3;
+    return Kind == TCK_Tail || Kind == TCK_MustTail;
+  }
+
+  bool isMustTailCall() const {
+    return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
+  }
+
+  bool isNoTailCall() const {
+    return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
+  }
+
+  void setTailCall(bool isTC = true) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
+                               unsigned(isTC ? TCK_Tail : TCK_None));
+  }
+
+  void setTailCallKind(TailCallKind TCK) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
+                               unsigned(TCK));
+  }
+
+  /// Return true if the call can return twice
+  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
+  void setCanReturnTwice() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
+  }
+
+  /// Check if this call is an inline asm statement.
+  bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Call;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+};
+
+template <>
+struct OperandTraits<CallBase<CallInst>>
+    : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
+
+CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
+                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+                   BasicBlock *InsertAtEnd)
+    : CallBase<CallInst>(
+          cast<FunctionType>(
+              cast<PointerType>(Func->getType())->getElementType())
+              ->getReturnType(),
+          Instruction::Call,
+          OperandTraits<CallBase<CallInst>>::op_end(this) -
+              (Args.size() + CountBundleInputs(Bundles) + 1),
+          unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
+  init(Func, Args, Bundles, NameStr);
+}
+
+CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+                   Instruction *InsertBefore)
+    : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
+                         OperandTraits<CallBase<CallInst>>::op_end(this) -
+                             (Args.size() + CountBundleInputs(Bundles) + 1),
+                         unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
+                         InsertBefore) {
+  init(Ty, Func, Args, Bundles, NameStr);
+}
+
+//===----------------------------------------------------------------------===//
+//                               SelectInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents the LLVM 'select' instruction.
+///
+class SelectInst : public Instruction {
+  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
+             Instruction *InsertBefore)
+    : Instruction(S1->getType(), Instruction::Select,
+                  &Op<0>(), 3, InsertBefore) {
+    init(C, S1, S2);
+    setName(NameStr);
+  }
+
+  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
+             BasicBlock *InsertAtEnd)
+    : Instruction(S1->getType(), Instruction::Select,
+                  &Op<0>(), 3, InsertAtEnd) {
+    init(C, S1, S2);
+    setName(NameStr);
+  }
+
+  void init(Value *C, Value *S1, Value *S2) {
+    assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
+    Op<0>() = C;
+    Op<1>() = S1;
+    Op<2>() = S2;
+  }
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  SelectInst *cloneImpl() const;
+
+public:
+  static SelectInst *Create(Value *C, Value *S1, Value *S2,
+                            const Twine &NameStr = "",
+                            Instruction *InsertBefore = nullptr,
+                            Instruction *MDFrom = nullptr) {
+    SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
+    if (MDFrom)
+      Sel->copyMetadata(*MDFrom);
+    return Sel;
+  }
+
+  static SelectInst *Create(Value *C, Value *S1, Value *S2,
+                            const Twine &NameStr,
+                            BasicBlock *InsertAtEnd) {
+    return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
+  }
+
+  const Value *getCondition() const { return Op<0>(); }
+  const Value *getTrueValue() const { return Op<1>(); }
+  const Value *getFalseValue() const { return Op<2>(); }
+  Value *getCondition() { return Op<0>(); }
+  Value *getTrueValue() { return Op<1>(); }
+  Value *getFalseValue() { return Op<2>(); }
+
+  void setCondition(Value *V) { Op<0>() = V; }
+  void setTrueValue(Value *V) { Op<1>() = V; }
+  void setFalseValue(Value *V) { Op<2>() = V; }
+
+  /// Return a string if the specified operands are invalid
+  /// for a select operation, otherwise return null.
+  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  OtherOps getOpcode() const {
+    return static_cast<OtherOps>(Instruction::getOpcode());
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Select;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                                VAArgInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents the va_arg llvm instruction, which returns
+/// an argument of the specified type given a va_list and increments that list
+///
+class VAArgInst : public UnaryInstruction {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  VAArgInst *cloneImpl() const;
+
+public:
+  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
+             Instruction *InsertBefore = nullptr)
+    : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
+    setName(NameStr);
+  }
+
+  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
+            BasicBlock *InsertAtEnd)
+    : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
+    setName(NameStr);
+  }
+
+  Value *getPointerOperand() { return getOperand(0); }
+  const Value *getPointerOperand() const { return getOperand(0); }
+  static unsigned getPointerOperandIndex() { return 0U; }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == VAArg;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                ExtractElementInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction extracts a single (scalar)
+/// element from a VectorType value
+///
+class ExtractElementInst : public Instruction {
+  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
+                     Instruction *InsertBefore = nullptr);
+  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
+                     BasicBlock *InsertAtEnd);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  ExtractElementInst *cloneImpl() const;
+
+public:
+  static ExtractElementInst *Create(Value *Vec, Value *Idx,
+                                   const Twine &NameStr = "",
+                                   Instruction *InsertBefore = nullptr) {
+    return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
+  }
+
+  static ExtractElementInst *Create(Value *Vec, Value *Idx,
+                                   const Twine &NameStr,
+                                   BasicBlock *InsertAtEnd) {
+    return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
+  }
+
+  /// Return true if an extractelement instruction can be
+  /// formed with the specified operands.
+  static bool isValidOperands(const Value *Vec, const Value *Idx);
+
+  Value *getVectorOperand() { return Op<0>(); }
+  Value *getIndexOperand() { return Op<1>(); }
+  const Value *getVectorOperand() const { return Op<0>(); }
+  const Value *getIndexOperand() const { return Op<1>(); }
+
+  VectorType *getVectorOperandType() const {
+    return cast<VectorType>(getVectorOperand()->getType());
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::ExtractElement;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<ExtractElementInst> :
+  public FixedNumOperandTraits<ExtractElementInst, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                                InsertElementInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction inserts a single (scalar)
+/// element into a VectorType value
+///
+class InsertElementInst : public Instruction {
+  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
+                    const Twine &NameStr = "",
+                    Instruction *InsertBefore = nullptr);
+  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
+                    BasicBlock *InsertAtEnd);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  InsertElementInst *cloneImpl() const;
+
+public:
+  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
+                                   const Twine &NameStr = "",
+                                   Instruction *InsertBefore = nullptr) {
+    return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
+  }
+
+  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
+                                   const Twine &NameStr,
+                                   BasicBlock *InsertAtEnd) {
+    return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
+  }
+
+  /// Return true if an insertelement instruction can be
+  /// formed with the specified operands.
+  static bool isValidOperands(const Value *Vec, const Value *NewElt,
+                              const Value *Idx);
+
+  /// Overload to return most specific vector type.
+  ///
+  VectorType *getType() const {
+    return cast<VectorType>(Instruction::getType());
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::InsertElement;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<InsertElementInst> :
+  public FixedNumOperandTraits<InsertElementInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                           ShuffleVectorInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction constructs a fixed permutation of two
+/// input vectors.
+///
+class ShuffleVectorInst : public Instruction {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  ShuffleVectorInst *cloneImpl() const;
+
+public:
+  ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+                    const Twine &NameStr = "",
+                    Instruction *InsertBefor = nullptr);
+  ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+                    const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly three operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 3);
+  }
+
+  /// Return true if a shufflevector instruction can be
+  /// formed with the specified operands.
+  static bool isValidOperands(const Value *V1, const Value *V2,
+                              const Value *Mask);
+
+  /// Overload to return most specific vector type.
+  ///
+  VectorType *getType() const {
+    return cast<VectorType>(Instruction::getType());
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  Constant *getMask() const {
+    return cast<Constant>(getOperand(2));
+  }
+
+  /// Return the shuffle mask value for the specified element of the mask.
+  /// Return -1 if the element is undef.
+  static int getMaskValue(Constant *Mask, unsigned Elt);
+
+  /// Return the shuffle mask value of this instruction for the given element
+  /// index. Return -1 if the element is undef.
+  int getMaskValue(unsigned Elt) const {
+    return getMaskValue(getMask(), Elt);
+  }
+
+  /// Convert the input shuffle mask operand to a vector of integers. Undefined
+  /// elements of the mask are returned as -1.
+  static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
+
+  /// Return the mask for this instruction as a vector of integers. Undefined
+  /// elements of the mask are returned as -1.
+  void getShuffleMask(SmallVectorImpl<int> &Result) const {
+    return getShuffleMask(getMask(), Result);
+  }
+
+  SmallVector<int, 16> getShuffleMask() const {
+    SmallVector<int, 16> Mask;
+    getShuffleMask(Mask);
+    return Mask;
+  }
+
+  /// Change values in a shuffle permute mask assuming the two vector operands
+  /// of length InVecNumElts have swapped position.
+  static void commuteShuffleMask(MutableArrayRef<int> Mask,
+                                 unsigned InVecNumElts) {
+    for (int &Idx : Mask) {
+      if (Idx == -1)
+        continue;
+      Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
+      assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
+             "shufflevector mask index out of range");
+    }
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::ShuffleVector;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<ShuffleVectorInst> :
+  public FixedNumOperandTraits<ShuffleVectorInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                                ExtractValueInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction extracts a struct member or array
+/// element value from an aggregate value.
+///
+class ExtractValueInst : public UnaryInstruction {
+  SmallVector<unsigned, 4> Indices;
+
+  ExtractValueInst(const ExtractValueInst &EVI);
+
+  /// Constructors - Create a extractvalue instruction with a base aggregate
+  /// value and a list of indices.  The first ctor can optionally insert before
+  /// an existing instruction, the second appends the new instruction to the
+  /// specified BasicBlock.
+  inline ExtractValueInst(Value *Agg,
+                          ArrayRef<unsigned> Idxs,
+                          const Twine &NameStr,
+                          Instruction *InsertBefore);
+  inline ExtractValueInst(Value *Agg,
+                          ArrayRef<unsigned> Idxs,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  ExtractValueInst *cloneImpl() const;
+
+public:
+  static ExtractValueInst *Create(Value *Agg,
+                                  ArrayRef<unsigned> Idxs,
+                                  const Twine &NameStr = "",
+                                  Instruction *InsertBefore = nullptr) {
+    return new
+      ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
+  }
+
+  static ExtractValueInst *Create(Value *Agg,
+                                  ArrayRef<unsigned> Idxs,
+                                  const Twine &NameStr,
+                                  BasicBlock *InsertAtEnd) {
+    return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
+  }
+
+  /// Returns the type of the element that would be extracted
+  /// with an extractvalue instruction with the specified parameters.
+  ///
+  /// Null is returned if the indices are invalid for the specified type.
+  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
+
+  using idx_iterator = const unsigned*;
+
+  inline idx_iterator idx_begin() const { return Indices.begin(); }
+  inline idx_iterator idx_end()   const { return Indices.end(); }
+  inline iterator_range<idx_iterator> indices() const {
+    return make_range(idx_begin(), idx_end());
+  }
+
+  Value *getAggregateOperand() {
+    return getOperand(0);
+  }
+  const Value *getAggregateOperand() const {
+    return getOperand(0);
+  }
+  static unsigned getAggregateOperandIndex() {
+    return 0U;                      // get index for modifying correct operand
+  }
+
+  ArrayRef<unsigned> getIndices() const {
+    return Indices;
+  }
+
+  unsigned getNumIndices() const {
+    return (unsigned)Indices.size();
+  }
+
+  bool hasIndices() const {
+    return true;
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::ExtractValue;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+ExtractValueInst::ExtractValueInst(Value *Agg,
+                                   ArrayRef<unsigned> Idxs,
+                                   const Twine &NameStr,
+                                   Instruction *InsertBefore)
+  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
+                     ExtractValue, Agg, InsertBefore) {
+  init(Idxs, NameStr);
+}
+
+ExtractValueInst::ExtractValueInst(Value *Agg,
+                                   ArrayRef<unsigned> Idxs,
+                                   const Twine &NameStr,
+                                   BasicBlock *InsertAtEnd)
+  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
+                     ExtractValue, Agg, InsertAtEnd) {
+  init(Idxs, NameStr);
+}
+
+//===----------------------------------------------------------------------===//
+//                                InsertValueInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction inserts a struct field of array element
+/// value into an aggregate value.
+///
+class InsertValueInst : public Instruction {
+  SmallVector<unsigned, 4> Indices;
+
+  InsertValueInst(const InsertValueInst &IVI);
+
+  /// Constructors - Create a insertvalue instruction with a base aggregate
+  /// value, a value to insert, and a list of indices.  The first ctor can
+  /// optionally insert before an existing instruction, the second appends
+  /// the new instruction to the specified BasicBlock.
+  inline InsertValueInst(Value *Agg, Value *Val,
+                         ArrayRef<unsigned> Idxs,
+                         const Twine &NameStr,
+                         Instruction *InsertBefore);
+  inline InsertValueInst(Value *Agg, Value *Val,
+                         ArrayRef<unsigned> Idxs,
+                         const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  /// Constructors - These two constructors are convenience methods because one
+  /// and two index insertvalue instructions are so common.
+  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
+                  const Twine &NameStr = "",
+                  Instruction *InsertBefore = nullptr);
+  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
+                  BasicBlock *InsertAtEnd);
+
+  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+            const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  InsertValueInst *cloneImpl() const;
+
+public:
+  // allocate space for exactly two operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 2);
+  }
+
+  static InsertValueInst *Create(Value *Agg, Value *Val,
+                                 ArrayRef<unsigned> Idxs,
+                                 const Twine &NameStr = "",
+                                 Instruction *InsertBefore = nullptr) {
+    return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
+  }
+
+  static InsertValueInst *Create(Value *Agg, Value *Val,
+                                 ArrayRef<unsigned> Idxs,
+                                 const Twine &NameStr,
+                                 BasicBlock *InsertAtEnd) {
+    return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  using idx_iterator = const unsigned*;
+
+  inline idx_iterator idx_begin() const { return Indices.begin(); }
+  inline idx_iterator idx_end()   const { return Indices.end(); }
+  inline iterator_range<idx_iterator> indices() const {
+    return make_range(idx_begin(), idx_end());
+  }
+
+  Value *getAggregateOperand() {
+    return getOperand(0);
+  }
+  const Value *getAggregateOperand() const {
+    return getOperand(0);
+  }
+  static unsigned getAggregateOperandIndex() {
+    return 0U;                      // get index for modifying correct operand
+  }
+
+  Value *getInsertedValueOperand() {
+    return getOperand(1);
+  }
+  const Value *getInsertedValueOperand() const {
+    return getOperand(1);
+  }
+  static unsigned getInsertedValueOperandIndex() {
+    return 1U;                      // get index for modifying correct operand
+  }
+
+  ArrayRef<unsigned> getIndices() const {
+    return Indices;
+  }
+
+  unsigned getNumIndices() const {
+    return (unsigned)Indices.size();
+  }
+
+  bool hasIndices() const {
+    return true;
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::InsertValue;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<InsertValueInst> :
+  public FixedNumOperandTraits<InsertValueInst, 2> {
+};
+
+InsertValueInst::InsertValueInst(Value *Agg,
+                                 Value *Val,
+                                 ArrayRef<unsigned> Idxs,
+                                 const Twine &NameStr,
+                                 Instruction *InsertBefore)
+  : Instruction(Agg->getType(), InsertValue,
+                OperandTraits<InsertValueInst>::op_begin(this),
+                2, InsertBefore) {
+  init(Agg, Val, Idxs, NameStr);
+}
+
+InsertValueInst::InsertValueInst(Value *Agg,
+                                 Value *Val,
+                                 ArrayRef<unsigned> Idxs,
+                                 const Twine &NameStr,
+                                 BasicBlock *InsertAtEnd)
+  : Instruction(Agg->getType(), InsertValue,
+                OperandTraits<InsertValueInst>::op_begin(this),
+                2, InsertAtEnd) {
+  init(Agg, Val, Idxs, NameStr);
+}
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               PHINode Class
+//===----------------------------------------------------------------------===//
+
+// PHINode - The PHINode class is used to represent the magical mystical PHI
+// node, that can not exist in nature, but can be synthesized in a computer
+// scientist's overactive imagination.
+//
+class PHINode : public Instruction {
+  /// The number of operands actually allocated.  NumOperands is
+  /// the number actually in use.
+  unsigned ReservedSpace;
+
+  PHINode(const PHINode &PN);
+
+  explicit PHINode(Type *Ty, unsigned NumReservedValues,
+                   const Twine &NameStr = "",
+                   Instruction *InsertBefore = nullptr)
+    : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
+      ReservedSpace(NumReservedValues) {
+    setName(NameStr);
+    allocHungoffUses(ReservedSpace);
+  }
+
+  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
+          BasicBlock *InsertAtEnd)
+    : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
+      ReservedSpace(NumReservedValues) {
+    setName(NameStr);
+    allocHungoffUses(ReservedSpace);
+  }
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  PHINode *cloneImpl() const;
+
+  // allocHungoffUses - this is more complicated than the generic
+  // User::allocHungoffUses, because we have to allocate Uses for the incoming
+  // values and pointers to the incoming blocks, all in one allocation.
+  void allocHungoffUses(unsigned N) {
+    User::allocHungoffUses(N, /* IsPhi */ true);
+  }
+
+public:
+  /// Constructors - NumReservedValues is a hint for the number of incoming
+  /// edges that this phi node will have (use 0 if you really have no idea).
+  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
+                         const Twine &NameStr = "",
+                         Instruction *InsertBefore = nullptr) {
+    return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
+  }
+
+  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
+                         const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Block iterator interface. This provides access to the list of incoming
+  // basic blocks, which parallels the list of incoming values.
+
+  using block_iterator = BasicBlock **;
+  using const_block_iterator = BasicBlock * const *;
+
+  block_iterator block_begin() {
+    Use::UserRef *ref =
+      reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
+    return reinterpret_cast<block_iterator>(ref + 1);
+  }
+
+  const_block_iterator block_begin() const {
+    const Use::UserRef *ref =
+      reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
+    return reinterpret_cast<const_block_iterator>(ref + 1);
+  }
+
+  block_iterator block_end() {
+    return block_begin() + getNumOperands();
+  }
+
+  const_block_iterator block_end() const {
+    return block_begin() + getNumOperands();
+  }
+
+  iterator_range<block_iterator> blocks() {
+    return make_range(block_begin(), block_end());
+  }
+
+  iterator_range<const_block_iterator> blocks() const {
+    return make_range(block_begin(), block_end());
+  }
+
+  op_range incoming_values() { return operands(); }
+
+  const_op_range incoming_values() const { return operands(); }
+
+  /// Return the number of incoming edges
+  ///
+  unsigned getNumIncomingValues() const { return getNumOperands(); }
+
+  /// Return incoming value number x
+  ///
+  Value *getIncomingValue(unsigned i) const {
+    return getOperand(i);
+  }
+  void setIncomingValue(unsigned i, Value *V) {
+    assert(V && "PHI node got a null value!");
+    assert(getType() == V->getType() &&
+           "All operands to PHI node must be the same type as the PHI node!");
+    setOperand(i, V);
+  }
+
+  static unsigned getOperandNumForIncomingValue(unsigned i) {
+    return i;
+  }
+
+  static unsigned getIncomingValueNumForOperand(unsigned i) {
+    return i;
+  }
+
+  /// Return incoming basic block number @p i.
+  ///
+  BasicBlock *getIncomingBlock(unsigned i) const {
+    return block_begin()[i];
+  }
+
+  /// Return incoming basic block corresponding
+  /// to an operand of the PHI.
+  ///
+  BasicBlock *getIncomingBlock(const Use &U) const {
+    assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
+    return getIncomingBlock(unsigned(&U - op_begin()));
+  }
+
+  /// Return incoming basic block corresponding
+  /// to value use iterator.
+  ///
+  BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
+    return getIncomingBlock(I.getUse());
+  }
+
+  void setIncomingBlock(unsigned i, BasicBlock *BB) {
+    assert(BB && "PHI node got a null basic block!");
+    block_begin()[i] = BB;
+  }
+
+  /// Add an incoming value to the end of the PHI list
+  ///
+  void addIncoming(Value *V, BasicBlock *BB) {
+    if (getNumOperands() == ReservedSpace)
+      growOperands();  // Get more space!
+    // Initialize some new operands.
+    setNumHungOffUseOperands(getNumOperands() + 1);
+    setIncomingValue(getNumOperands() - 1, V);
+    setIncomingBlock(getNumOperands() - 1, BB);
+  }
+
+  /// Remove an incoming value.  This is useful if a
+  /// predecessor basic block is deleted.  The value removed is returned.
+  ///
+  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
+  /// is true), the PHI node is destroyed and any uses of it are replaced with
+  /// dummy values.  The only time there should be zero incoming values to a PHI
+  /// node is when the block is dead, so this strategy is sound.
+  ///
+  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
+
+  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
+    int Idx = getBasicBlockIndex(BB);
+    assert(Idx >= 0 && "Invalid basic block argument to remove!");
+    return removeIncomingValue(Idx, DeletePHIIfEmpty);
+  }
+
+  /// Return the first index of the specified basic
+  /// block in the value list for this PHI.  Returns -1 if no instance.
+  ///
+  int getBasicBlockIndex(const BasicBlock *BB) const {
+    for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+      if (block_begin()[i] == BB)
+        return i;
+    return -1;
+  }
+
+  Value *getIncomingValueForBlock(const BasicBlock *BB) const {
+    int Idx = getBasicBlockIndex(BB);
+    assert(Idx >= 0 && "Invalid basic block argument!");
+    return getIncomingValue(Idx);
+  }
+
+  /// If the specified PHI node always merges together the
+  /// same value, return the value, otherwise return null.
+  Value *hasConstantValue() const;
+
+  /// Whether the specified PHI node always merges
+  /// together the same value, assuming undefs are equal to a unique
+  /// non-undef value.
+  bool hasConstantOrUndefValue() const;
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::PHI;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  void growOperands();
+};
+
+template <>
+struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
+
+//===----------------------------------------------------------------------===//
+//                           LandingPadInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// The landingpad instruction holds all of the information
+/// necessary to generate correct exception handling. The landingpad instruction
+/// cannot be moved from the top of a landing pad block, which itself is
+/// accessible only from the 'unwind' edge of an invoke. This uses the
+/// SubclassData field in Value to store whether or not the landingpad is a
+/// cleanup.
+///
+class LandingPadInst : public Instruction {
+  /// The number of operands actually allocated.  NumOperands is
+  /// the number actually in use.
+  unsigned ReservedSpace;
+
+  LandingPadInst(const LandingPadInst &LP);
+
+public:
+  enum ClauseType { Catch, Filter };
+
+private:
+  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+                          const Twine &NameStr, Instruction *InsertBefore);
+  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  // Allocate space for exactly zero operands.
+  void *operator new(size_t s) {
+    return User::operator new(s);
+  }
+
+  void growOperands(unsigned Size);
+  void init(unsigned NumReservedValues, const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  LandingPadInst *cloneImpl() const;
+
+public:
+  /// Constructors - NumReservedClauses is a hint for the number of incoming
+  /// clauses that this landingpad will have (use 0 if you really have no idea).
+  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
+                                const Twine &NameStr = "",
+                                Instruction *InsertBefore = nullptr);
+  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
+                                const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Return 'true' if this landingpad instruction is a
+  /// cleanup. I.e., it should be run when unwinding even if its landing pad
+  /// doesn't catch the exception.
+  bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
+
+  /// Indicate that this landingpad instruction is a cleanup.
+  void setCleanup(bool V) {
+    setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+                               (V ? 1 : 0));
+  }
+
+  /// Add a catch or filter clause to the landing pad.
+  void addClause(Constant *ClauseVal);
+
+  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
+  /// determine what type of clause this is.
+  Constant *getClause(unsigned Idx) const {
+    return cast<Constant>(getOperandList()[Idx]);
+  }
+
+  /// Return 'true' if the clause and index Idx is a catch clause.
+  bool isCatch(unsigned Idx) const {
+    return !isa<ArrayType>(getOperandList()[Idx]->getType());
+  }
+
+  /// Return 'true' if the clause and index Idx is a filter clause.
+  bool isFilter(unsigned Idx) const {
+    return isa<ArrayType>(getOperandList()[Idx]->getType());
+  }
+
+  /// Get the number of clauses for this landing pad.
+  unsigned getNumClauses() const { return getNumOperands(); }
+
+  /// Grow the size of the operand list to accommodate the new
+  /// number of clauses.
+  void reserveClauses(unsigned Size) { growOperands(Size); }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::LandingPad;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               ReturnInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// Return a value (possibly void), from a function.  Execution
+/// does not continue in this function any longer.
+///
+class ReturnInst : public TerminatorInst {
+  ReturnInst(const ReturnInst &RI);
+
+private:
+  // ReturnInst constructors:
+  // ReturnInst()                  - 'ret void' instruction
+  // ReturnInst(    null)          - 'ret void' instruction
+  // ReturnInst(Value* X)          - 'ret X'    instruction
+  // ReturnInst(    null, Inst *I) - 'ret void' instruction, insert before I
+  // ReturnInst(Value* X, Inst *I) - 'ret X'    instruction, insert before I
+  // ReturnInst(    null, BB *B)   - 'ret void' instruction, insert @ end of B
+  // ReturnInst(Value* X, BB *B)   - 'ret X'    instruction, insert @ end of B
+  //
+  // NOTE: If the Value* passed is of type void then the constructor behaves as
+  // if it was passed NULL.
+  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
+                      Instruction *InsertBefore = nullptr);
+  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
+  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  ReturnInst *cloneImpl() const;
+
+public:
+  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
+                            Instruction *InsertBefore = nullptr) {
+    return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
+  }
+
+  static ReturnInst* Create(LLVMContext &C, Value *retVal,
+                            BasicBlock *InsertAtEnd) {
+    return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
+  }
+
+  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
+    return new(0) ReturnInst(C, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Convenience accessor. Returns null if there is no return value.
+  Value *getReturnValue() const {
+    return getNumOperands() != 0 ? getOperand(0) : nullptr;
+  }
+
+  unsigned getNumSuccessors() const { return 0; }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::Ret);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  friend TerminatorInst;
+
+  BasicBlock *getSuccessor(unsigned idx) const {
+    llvm_unreachable("ReturnInst has no successors!");
+  }
+
+  void setSuccessor(unsigned idx, BasicBlock *B) {
+    llvm_unreachable("ReturnInst has no successors!");
+  }
+};
+
+template <>
+struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               BranchInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// Conditional or Unconditional Branch instruction.
+///
+class BranchInst : public TerminatorInst {
+  /// Ops list - Branches are strange.  The operands are ordered:
+  ///  [Cond, FalseDest,] TrueDest.  This makes some accessors faster because
+  /// they don't have to check for cond/uncond branchness. These are mostly
+  /// accessed relative from op_end().
+  BranchInst(const BranchInst &BI);
+  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
+  // BranchInst(BB *B)                           - 'br B'
+  // BranchInst(BB* T, BB *F, Value *C)          - 'br C, T, F'
+  // BranchInst(BB* B, Inst *I)                  - 'br B'        insert before I
+  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
+  // BranchInst(BB* B, BB *I)                    - 'br B'        insert at end
+  // BranchInst(BB* T, BB *F, Value *C, BB *I)   - 'br C, T, F', insert at end
+  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
+  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+             Instruction *InsertBefore = nullptr);
+  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
+  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+             BasicBlock *InsertAtEnd);
+
+  void AssertOK();
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  BranchInst *cloneImpl() const;
+
+public:
+  static BranchInst *Create(BasicBlock *IfTrue,
+                            Instruction *InsertBefore = nullptr) {
+    return new(1) BranchInst(IfTrue, InsertBefore);
+  }
+
+  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
+                            Value *Cond, Instruction *InsertBefore = nullptr) {
+    return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
+  }
+
+  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
+    return new(1) BranchInst(IfTrue, InsertAtEnd);
+  }
+
+  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
+                            Value *Cond, BasicBlock *InsertAtEnd) {
+    return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
+  }
+
+  /// Transparently provide more efficient getOperand methods.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  bool isUnconditional() const { return getNumOperands() == 1; }
+  bool isConditional()   const { return getNumOperands() == 3; }
+
+  Value *getCondition() const {
+    assert(isConditional() && "Cannot get condition of an uncond branch!");
+    return Op<-3>();
+  }
+
+  void setCondition(Value *V) {
+    assert(isConditional() && "Cannot set condition of unconditional branch!");
+    Op<-3>() = V;
+  }
+
+  unsigned getNumSuccessors() const { return 1+isConditional(); }
+
+  BasicBlock *getSuccessor(unsigned i) const {
+    assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
+    return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
+  }
+
+  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+    assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
+    *(&Op<-1>() - idx) = NewSucc;
+  }
+
+  /// Swap the successors of this branch instruction.
+  ///
+  /// Swaps the successors of the branch instruction. This also swaps any
+  /// branch weight metadata associated with the instruction so that it
+  /// continues to map correctly to each operand.
+  void swapSuccessors();
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::Br);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               SwitchInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// Multiway switch
+///
+class SwitchInst : public TerminatorInst {
+  unsigned ReservedSpace;
+
+  // Operand[0]    = Value to switch on
+  // Operand[1]    = Default basic block destination
+  // Operand[2n  ] = Value to match
+  // Operand[2n+1] = BasicBlock to go to on match
+  SwitchInst(const SwitchInst &SI);
+
+  /// Create a new switch instruction, specifying a value to switch on and a
+  /// default destination. The number of additional cases can be specified here
+  /// to make memory allocation more efficient. This constructor can also
+  /// auto-insert before another instruction.
+  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+             Instruction *InsertBefore);
+
+  /// Create a new switch instruction, specifying a value to switch on and a
+  /// default destination. The number of additional cases can be specified here
+  /// to make memory allocation more efficient. This constructor also
+  /// auto-inserts at the end of the specified BasicBlock.
+  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+             BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly zero operands
+  void *operator new(size_t s) {
+    return User::operator new(s);
+  }
+
+  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
+  void growOperands();
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  SwitchInst *cloneImpl() const;
+
+public:
+  // -2
+  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
+
+  template <typename CaseHandleT> class CaseIteratorImpl;
+
+  /// A handle to a particular switch case. It exposes a convenient interface
+  /// to both the case value and the successor block.
+  ///
+  /// We define this as a template and instantiate it to form both a const and
+  /// non-const handle.
+  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
+  class CaseHandleImpl {
+    // Directly befriend both const and non-const iterators.
+    friend class SwitchInst::CaseIteratorImpl<
+        CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
+
+  protected:
+    // Expose the switch type we're parameterized with to the iterator.
+    using SwitchInstType = SwitchInstT;
+
+    SwitchInstT *SI;
+    ptrdiff_t Index;
+
+    CaseHandleImpl() = default;
+    CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
+
+  public:
+    /// Resolves case value for current case.
+    ConstantIntT *getCaseValue() const {
+      assert((unsigned)Index < SI->getNumCases() &&
+             "Index out the number of cases.");
+      return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
+    }
+
+    /// Resolves successor for current case.
+    BasicBlockT *getCaseSuccessor() const {
+      assert(((unsigned)Index < SI->getNumCases() ||
+              (unsigned)Index == DefaultPseudoIndex) &&
+             "Index out the number of cases.");
+      return SI->getSuccessor(getSuccessorIndex());
+    }
+
+    /// Returns number of current case.
+    unsigned getCaseIndex() const { return Index; }
+
+    /// Returns TerminatorInst's successor index for current case successor.
+    unsigned getSuccessorIndex() const {
+      assert(((unsigned)Index == DefaultPseudoIndex ||
+              (unsigned)Index < SI->getNumCases()) &&
+             "Index out the number of cases.");
+      return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
+    }
+
+    bool operator==(const CaseHandleImpl &RHS) const {
+      assert(SI == RHS.SI && "Incompatible operators.");
+      return Index == RHS.Index;
+    }
+  };
+
+  using ConstCaseHandle =
+      CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
+
+  class CaseHandle
+      : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
+    friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
+
+  public:
+    CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
+
+    /// Sets the new value for current case.
+    void setValue(ConstantInt *V) {
+      assert((unsigned)Index < SI->getNumCases() &&
+             "Index out the number of cases.");
+      SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
+    }
+
+    /// Sets the new successor for current case.
+    void setSuccessor(BasicBlock *S) {
+      SI->setSuccessor(getSuccessorIndex(), S);
+    }
+  };
+
+  template <typename CaseHandleT>
+  class CaseIteratorImpl
+      : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
+                                    std::random_access_iterator_tag,
+                                    CaseHandleT> {
+    using SwitchInstT = typename CaseHandleT::SwitchInstType;
+
+    CaseHandleT Case;
+
+  public:
+    /// Default constructed iterator is in an invalid state until assigned to
+    /// a case for a particular switch.
+    CaseIteratorImpl() = default;
+
+    /// Initializes case iterator for given SwitchInst and for given
+    /// case number.
+    CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
+
+    /// Initializes case iterator for given SwitchInst and for given
+    /// TerminatorInst's successor index.
+    static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
+                                               unsigned SuccessorIndex) {
+      assert(SuccessorIndex < SI->getNumSuccessors() &&
+             "Successor index # out of range!");
+      return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
+                                 : CaseIteratorImpl(SI, DefaultPseudoIndex);
+    }
+
+    /// Support converting to the const variant. This will be a no-op for const
+    /// variant.
+    operator CaseIteratorImpl<ConstCaseHandle>() const {
+      return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
+    }
+
+    CaseIteratorImpl &operator+=(ptrdiff_t N) {
+      // Check index correctness after addition.
+      // Note: Index == getNumCases() means end().
+      assert(Case.Index + N >= 0 &&
+             (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
+             "Case.Index out the number of cases.");
+      Case.Index += N;
+      return *this;
+    }
+    CaseIteratorImpl &operator-=(ptrdiff_t N) {
+      // Check index correctness after subtraction.
+      // Note: Case.Index == getNumCases() means end().
+      assert(Case.Index - N >= 0 &&
+             (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
+             "Case.Index out the number of cases.");
+      Case.Index -= N;
+      return *this;
+    }
+    ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
+      assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
+      return Case.Index - RHS.Case.Index;
+    }
+    bool operator==(const CaseIteratorImpl &RHS) const {
+      return Case == RHS.Case;
+    }
+    bool operator<(const CaseIteratorImpl &RHS) const {
+      assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
+      return Case.Index < RHS.Case.Index;
+    }
+    CaseHandleT &operator*() { return Case; }
+    const CaseHandleT &operator*() const { return Case; }
+  };
+
+  using CaseIt = CaseIteratorImpl<CaseHandle>;
+  using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
+
+  static SwitchInst *Create(Value *Value, BasicBlock *Default,
+                            unsigned NumCases,
+                            Instruction *InsertBefore = nullptr) {
+    return new SwitchInst(Value, Default, NumCases, InsertBefore);
+  }
+
+  static SwitchInst *Create(Value *Value, BasicBlock *Default,
+                            unsigned NumCases, BasicBlock *InsertAtEnd) {
+    return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Accessor Methods for Switch stmt
+  Value *getCondition() const { return getOperand(0); }
+  void setCondition(Value *V) { setOperand(0, V); }
+
+  BasicBlock *getDefaultDest() const {
+    return cast<BasicBlock>(getOperand(1));
+  }
+
+  void setDefaultDest(BasicBlock *DefaultCase) {
+    setOperand(1, reinterpret_cast<Value*>(DefaultCase));
+  }
+
+  /// Return the number of 'cases' in this switch instruction, excluding the
+  /// default case.
+  unsigned getNumCases() const {
+    return getNumOperands()/2 - 1;
+  }
+
+  /// Returns a read/write iterator that points to the first case in the
+  /// SwitchInst.
+  CaseIt case_begin() {
+    return CaseIt(this, 0);
+  }
+
+  /// Returns a read-only iterator that points to the first case in the
+  /// SwitchInst.
+  ConstCaseIt case_begin() const {
+    return ConstCaseIt(this, 0);
+  }
+
+  /// Returns a read/write iterator that points one past the last in the
+  /// SwitchInst.
+  CaseIt case_end() {
+    return CaseIt(this, getNumCases());
+  }
+
+  /// Returns a read-only iterator that points one past the last in the
+  /// SwitchInst.
+  ConstCaseIt case_end() const {
+    return ConstCaseIt(this, getNumCases());
+  }
+
+  /// Iteration adapter for range-for loops.
+  iterator_range<CaseIt> cases() {
+    return make_range(case_begin(), case_end());
+  }
+
+  /// Constant iteration adapter for range-for loops.
+  iterator_range<ConstCaseIt> cases() const {
+    return make_range(case_begin(), case_end());
+  }
+
+  /// Returns an iterator that points to the default case.
+  /// Note: this iterator allows to resolve successor only. Attempt
+  /// to resolve case value causes an assertion.
+  /// Also note, that increment and decrement also causes an assertion and
+  /// makes iterator invalid.
+  CaseIt case_default() {
+    return CaseIt(this, DefaultPseudoIndex);
+  }
+  ConstCaseIt case_default() const {
+    return ConstCaseIt(this, DefaultPseudoIndex);
+  }
+
+  /// Search all of the case values for the specified constant. If it is
+  /// explicitly handled, return the case iterator of it, otherwise return
+  /// default case iterator to indicate that it is handled by the default
+  /// handler.
+  CaseIt findCaseValue(const ConstantInt *C) {
+    CaseIt I = llvm::find_if(
+        cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
+    if (I != case_end())
+      return I;
+
+    return case_default();
+  }
+  ConstCaseIt findCaseValue(const ConstantInt *C) const {
+    ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
+      return Case.getCaseValue() == C;
+    });
+    if (I != case_end())
+      return I;
+
+    return case_default();
+  }
+
+  /// Finds the unique case value for a given successor. Returns null if the
+  /// successor is not found, not unique, or is the default case.
+  ConstantInt *findCaseDest(BasicBlock *BB) {
+    if (BB == getDefaultDest())
+      return nullptr;
+
+    ConstantInt *CI = nullptr;
+    for (auto Case : cases()) {
+      if (Case.getCaseSuccessor() != BB)
+        continue;
+
+      if (CI)
+        return nullptr; // Multiple cases lead to BB.
+
+      CI = Case.getCaseValue();
+    }
+
+    return CI;
+  }
+
+  /// Add an entry to the switch instruction.
+  /// Note:
+  /// This action invalidates case_end(). Old case_end() iterator will
+  /// point to the added case.
+  void addCase(ConstantInt *OnVal, BasicBlock *Dest);
+
+  /// This method removes the specified case and its successor from the switch
+  /// instruction. Note that this operation may reorder the remaining cases at
+  /// index idx and above.
+  /// Note:
+  /// This action invalidates iterators for all cases following the one removed,
+  /// including the case_end() iterator. It returns an iterator for the next
+  /// case.
+  CaseIt removeCase(CaseIt I);
+
+  unsigned getNumSuccessors() const { return getNumOperands()/2; }
+  BasicBlock *getSuccessor(unsigned idx) const {
+    assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
+    return cast<BasicBlock>(getOperand(idx*2+1));
+  }
+  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+    assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
+    setOperand(idx * 2 + 1, NewSucc);
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Switch;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                             IndirectBrInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// Indirect Branch Instruction.
+///
+class IndirectBrInst : public TerminatorInst {
+  unsigned ReservedSpace;
+
+  // Operand[0]   = Address to jump to
+  // Operand[n+1] = n-th destination
+  IndirectBrInst(const IndirectBrInst &IBI);
+
+  /// Create a new indirectbr instruction, specifying an
+  /// Address to jump to.  The number of expected destinations can be specified
+  /// here to make memory allocation more efficient.  This constructor can also
+  /// autoinsert before another instruction.
+  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
+
+  /// Create a new indirectbr instruction, specifying an
+  /// Address to jump to.  The number of expected destinations can be specified
+  /// here to make memory allocation more efficient.  This constructor also
+  /// autoinserts at the end of the specified BasicBlock.
+  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly zero operands
+  void *operator new(size_t s) {
+    return User::operator new(s);
+  }
+
+  void init(Value *Address, unsigned NumDests);
+  void growOperands();
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  IndirectBrInst *cloneImpl() const;
+
+public:
+  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
+                                Instruction *InsertBefore = nullptr) {
+    return new IndirectBrInst(Address, NumDests, InsertBefore);
+  }
+
+  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
+                                BasicBlock *InsertAtEnd) {
+    return new IndirectBrInst(Address, NumDests, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors.
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Accessor Methods for IndirectBrInst instruction.
+  Value *getAddress() { return getOperand(0); }
+  const Value *getAddress() const { return getOperand(0); }
+  void setAddress(Value *V) { setOperand(0, V); }
+
+  /// return the number of possible destinations in this
+  /// indirectbr instruction.
+  unsigned getNumDestinations() const { return getNumOperands()-1; }
+
+  /// Return the specified destination.
+  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
+  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
+
+  /// Add a destination.
+  ///
+  void addDestination(BasicBlock *Dest);
+
+  /// This method removes the specified successor from the
+  /// indirectbr instruction.
+  void removeDestination(unsigned i);
+
+  unsigned getNumSuccessors() const { return getNumOperands()-1; }
+  BasicBlock *getSuccessor(unsigned i) const {
+    return cast<BasicBlock>(getOperand(i+1));
+  }
+  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
+    setOperand(i + 1, NewSucc);
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::IndirectBr;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               InvokeInst Class
+//===----------------------------------------------------------------------===//
+
+/// Invoke instruction.  The SubclassData field is used to hold the
+/// calling convention of the call.
+///
+class InvokeInst : public CallBase<InvokeInst> {
+  friend class OperandBundleUser<InvokeInst, User::op_iterator>;
+
+  InvokeInst(const InvokeInst &BI);
+
+  /// Construct an InvokeInst given a range of arguments.
+  ///
+  /// Construct an InvokeInst from a range of arguments
+  inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+                    ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+                    unsigned Values, const Twine &NameStr,
+                    Instruction *InsertBefore)
+      : InvokeInst(cast<FunctionType>(
+                       cast<PointerType>(Func->getType())->getElementType()),
+                   Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
+                   InsertBefore) {}
+
+  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+                    BasicBlock *IfException, ArrayRef<Value *> Args,
+                    ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+                    const Twine &NameStr, Instruction *InsertBefore);
+  /// Construct an InvokeInst given a range of arguments.
+  ///
+  /// Construct an InvokeInst from a range of arguments
+  inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+                    ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+                    unsigned Values, const Twine &NameStr,
+                    BasicBlock *InsertAtEnd);
+
+
+  void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+            ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+            const Twine &NameStr) {
+    init(cast<FunctionType>(
+             cast<PointerType>(Func->getType())->getElementType()),
+         Func, IfNormal, IfException, Args, Bundles, NameStr);
+  }
+
+  void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
+            BasicBlock *IfException, ArrayRef<Value *> Args,
+            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  InvokeInst *cloneImpl() const;
+
+public:
+  static constexpr int ArgOffset = 3;
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr,
+                            Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, IfNormal, IfException, Args, None, NameStr,
+                  InsertBefore);
+  }
+
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles = None,
+                            const Twine &NameStr = "",
+                            Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, IfNormal, IfException, Args, Bundles, NameStr,
+                  InsertBefore);
+  }
+
+  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr,
+                            Instruction *InsertBefore = nullptr) {
+    unsigned Values = unsigned(Args.size()) + 3;
+    return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, None,
+                                   Values, NameStr, InsertBefore);
+  }
+
+  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles = None,
+                            const Twine &NameStr = "",
+                            Instruction *InsertBefore = nullptr) {
+    unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
+    unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+    return new (Values, DescriptorBytes)
+        InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, Values,
+                   NameStr, InsertBefore);
+  }
+
+  static InvokeInst *Create(Value *Func,
+                            BasicBlock *IfNormal, BasicBlock *IfException,
+                            ArrayRef<Value *> Args, const Twine &NameStr,
+                            BasicBlock *InsertAtEnd) {
+    unsigned Values = unsigned(Args.size()) + 3;
+    return new (Values) InvokeInst(Func, IfNormal, IfException, Args, None,
+                                   Values, NameStr, InsertAtEnd);
+  }
+
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles,
+                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
+    unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+    return new (Values, DescriptorBytes)
+        InvokeInst(Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
+                   InsertAtEnd);
+  }
+
+  /// Create a clone of \p II with a different set of operand bundles and
+  /// insert it before \p InsertPt.
+  ///
+  /// The returned invoke instruction is identical to \p II in every way except
+  /// that the operand bundles for the new instruction are set to the operand
+  /// bundles in \p Bundles.
+  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
+                            Instruction *InsertPt = nullptr);
+
+  /// Determine if the call should not perform indirect branch tracking.
+  bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
+
+  /// Determine if the call cannot unwind.
+  bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
+  void setDoesNotThrow() {
+    addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
+  }
+  
+  /// Return the function called, or null if this is an
+  /// indirect function invocation.
+  ///
+  Function *getCalledFunction() const {
+    return dyn_cast<Function>(Op<-3>());
+  }
+
+  /// Get a pointer to the function that is invoked by this
+  /// instruction
+  const Value *getCalledValue() const { return Op<-3>(); }
+        Value *getCalledValue()       { return Op<-3>(); }
+
+  /// Set the function called.
+  void setCalledFunction(Value* Fn) {
+    setCalledFunction(
+        cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
+        Fn);
+  }
+  void setCalledFunction(FunctionType *FTy, Value *Fn) {
+    this->FTy = FTy;
+    assert(FTy == cast<FunctionType>(
+                      cast<PointerType>(Fn->getType())->getElementType()));
+    Op<-3>() = Fn;
+  }
+
+  // get*Dest - Return the destination basic blocks...
+  BasicBlock *getNormalDest() const {
+    return cast<BasicBlock>(Op<-2>());
+  }
+  BasicBlock *getUnwindDest() const {
+    return cast<BasicBlock>(Op<-1>());
+  }
+  void setNormalDest(BasicBlock *B) {
+    Op<-2>() = reinterpret_cast<Value*>(B);
+  }
+  void setUnwindDest(BasicBlock *B) {
+    Op<-1>() = reinterpret_cast<Value*>(B);
+  }
+
+  /// Get the landingpad instruction from the landing pad
+  /// block (the unwind destination).
+  LandingPadInst *getLandingPadInst() const;
+
+  BasicBlock *getSuccessor(unsigned i) const {
+    assert(i < 2 && "Successor # out of range for invoke!");
+    return i == 0 ? getNormalDest() : getUnwindDest();
+  }
+
+  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+    assert(idx < 2 && "Successor # out of range for invoke!");
+    *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc);
+  }
+
+  unsigned getNumSuccessors() const { return 2; }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::Invoke);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+};
+
+template <>
+struct OperandTraits<CallBase<InvokeInst>>
+    : public VariadicOperandTraits<CallBase<InvokeInst>, 3> {};
+
+InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+                       BasicBlock *IfException, ArrayRef<Value *> Args,
+                       ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+                       const Twine &NameStr, Instruction *InsertBefore)
+    : CallBase<InvokeInst>(Ty->getReturnType(), Instruction::Invoke,
+                           OperandTraits<CallBase<InvokeInst>>::op_end(this) -
+                               Values,
+                           Values, InsertBefore) {
+  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
+}
+
+InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
+                       BasicBlock *IfException, ArrayRef<Value *> Args,
+                       ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+                       const Twine &NameStr, BasicBlock *InsertAtEnd)
+    : CallBase<InvokeInst>(
+          cast<FunctionType>(
+              cast<PointerType>(Func->getType())->getElementType())
+              ->getReturnType(),
+          Instruction::Invoke,
+          OperandTraits<CallBase<InvokeInst>>::op_end(this) - Values, Values,
+          InsertAtEnd) {
+  init(Func, IfNormal, IfException, Args, Bundles, NameStr);
+}
+
+
+//===----------------------------------------------------------------------===//
+//                              ResumeInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// Resume the propagation of an exception.
+///
+class ResumeInst : public TerminatorInst {
+  ResumeInst(const ResumeInst &RI);
+
+  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
+  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  ResumeInst *cloneImpl() const;
+
+public:
+  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
+    return new(1) ResumeInst(Exn, InsertBefore);
+  }
+
+  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
+    return new(1) ResumeInst(Exn, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Convenience accessor.
+  Value *getValue() const { return Op<0>(); }
+
+  unsigned getNumSuccessors() const { return 0; }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Resume;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  friend TerminatorInst;
+
+  BasicBlock *getSuccessor(unsigned idx) const {
+    llvm_unreachable("ResumeInst has no successors!");
+  }
+
+  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+    llvm_unreachable("ResumeInst has no successors!");
+  }
+};
+
+template <>
+struct OperandTraits<ResumeInst> :
+    public FixedNumOperandTraits<ResumeInst, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                         CatchSwitchInst Class
+//===----------------------------------------------------------------------===//
+class CatchSwitchInst : public TerminatorInst {
+  /// The number of operands actually allocated.  NumOperands is
+  /// the number actually in use.
+  unsigned ReservedSpace;
+
+  // Operand[0] = Outer scope
+  // Operand[1] = Unwind block destination
+  // Operand[n] = BasicBlock to go to on match
+  CatchSwitchInst(const CatchSwitchInst &CSI);
+
+  /// Create a new switch instruction, specifying a
+  /// default destination.  The number of additional handlers can be specified
+  /// here to make memory allocation more efficient.
+  /// This constructor can also autoinsert before another instruction.
+  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+                  unsigned NumHandlers, const Twine &NameStr,
+                  Instruction *InsertBefore);
+
+  /// Create a new switch instruction, specifying a
+  /// default destination.  The number of additional handlers can be specified
+  /// here to make memory allocation more efficient.
+  /// This constructor also autoinserts at the end of the specified BasicBlock.
+  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+                  unsigned NumHandlers, const Twine &NameStr,
+                  BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly zero operands
+  void *operator new(size_t s) { return User::operator new(s); }
+
+  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
+  void growOperands(unsigned Size);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  CatchSwitchInst *cloneImpl() const;
+
+public:
+  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
+                                 unsigned NumHandlers,
+                                 const Twine &NameStr = "",
+                                 Instruction *InsertBefore = nullptr) {
+    return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
+                               InsertBefore);
+  }
+
+  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
+                                 unsigned NumHandlers, const Twine &NameStr,
+                                 BasicBlock *InsertAtEnd) {
+    return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
+                               InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  // Accessor Methods for CatchSwitch stmt
+  Value *getParentPad() const { return getOperand(0); }
+  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
+
+  // Accessor Methods for CatchSwitch stmt
+  bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
+  bool unwindsToCaller() const { return !hasUnwindDest(); }
+  BasicBlock *getUnwindDest() const {
+    if (hasUnwindDest())
+      return cast<BasicBlock>(getOperand(1));
+    return nullptr;
+  }
+  void setUnwindDest(BasicBlock *UnwindDest) {
+    assert(UnwindDest);
+    assert(hasUnwindDest());
+    setOperand(1, UnwindDest);
+  }
+
+  /// return the number of 'handlers' in this catchswitch
+  /// instruction, except the default handler
+  unsigned getNumHandlers() const {
+    if (hasUnwindDest())
+      return getNumOperands() - 2;
+    return getNumOperands() - 1;
+  }
+
+private:
+  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
+  static const BasicBlock *handler_helper(const Value *V) {
+    return cast<BasicBlock>(V);
+  }
+
+public:
+  using DerefFnTy = BasicBlock *(*)(Value *);
+  using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
+  using handler_range = iterator_range<handler_iterator>;
+  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
+  using const_handler_iterator =
+      mapped_iterator<const_op_iterator, ConstDerefFnTy>;
+  using const_handler_range = iterator_range<const_handler_iterator>;
+
+  /// Returns an iterator that points to the first handler in CatchSwitchInst.
+  handler_iterator handler_begin() {
+    op_iterator It = op_begin() + 1;
+    if (hasUnwindDest())
+      ++It;
+    return handler_iterator(It, DerefFnTy(handler_helper));
+  }
+
+  /// Returns an iterator that points to the first handler in the
+  /// CatchSwitchInst.
+  const_handler_iterator handler_begin() const {
+    const_op_iterator It = op_begin() + 1;
+    if (hasUnwindDest())
+      ++It;
+    return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
+  }
+
+  /// Returns a read-only iterator that points one past the last
+  /// handler in the CatchSwitchInst.
+  handler_iterator handler_end() {
+    return handler_iterator(op_end(), DerefFnTy(handler_helper));
+  }
+
+  /// Returns an iterator that points one past the last handler in the
+  /// CatchSwitchInst.
+  const_handler_iterator handler_end() const {
+    return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
+  }
+
+  /// iteration adapter for range-for loops.
+  handler_range handlers() {
+    return make_range(handler_begin(), handler_end());
+  }
+
+  /// iteration adapter for range-for loops.
+  const_handler_range handlers() const {
+    return make_range(handler_begin(), handler_end());
+  }
+
+  /// Add an entry to the switch instruction...
+  /// Note:
+  /// This action invalidates handler_end(). Old handler_end() iterator will
+  /// point to the added handler.
+  void addHandler(BasicBlock *Dest);
+
+  void removeHandler(handler_iterator HI);
+
+  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
+  BasicBlock *getSuccessor(unsigned Idx) const {
+    assert(Idx < getNumSuccessors() &&
+           "Successor # out of range for catchswitch!");
+    return cast<BasicBlock>(getOperand(Idx + 1));
+  }
+  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
+    assert(Idx < getNumSuccessors() &&
+           "Successor # out of range for catchswitch!");
+    setOperand(Idx + 1, NewSucc);
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::CatchSwitch;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+template <>
+struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               CleanupPadInst Class
+//===----------------------------------------------------------------------===//
+class CleanupPadInst : public FuncletPadInst {
+private:
+  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
+                          unsigned Values, const Twine &NameStr,
+                          Instruction *InsertBefore)
+      : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
+                       NameStr, InsertBefore) {}
+  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
+                          unsigned Values, const Twine &NameStr,
+                          BasicBlock *InsertAtEnd)
+      : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
+                       NameStr, InsertAtEnd) {}
+
+public:
+  static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
+                                const Twine &NameStr = "",
+                                Instruction *InsertBefore = nullptr) {
+    unsigned Values = 1 + Args.size();
+    return new (Values)
+        CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
+  }
+
+  static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
+                                const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    unsigned Values = 1 + Args.size();
+    return new (Values)
+        CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::CleanupPad;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                               CatchPadInst Class
+//===----------------------------------------------------------------------===//
+class CatchPadInst : public FuncletPadInst {
+private:
+  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
+                        unsigned Values, const Twine &NameStr,
+                        Instruction *InsertBefore)
+      : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
+                       NameStr, InsertBefore) {}
+  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
+                        unsigned Values, const Twine &NameStr,
+                        BasicBlock *InsertAtEnd)
+      : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
+                       NameStr, InsertAtEnd) {}
+
+public:
+  static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
+                              const Twine &NameStr = "",
+                              Instruction *InsertBefore = nullptr) {
+    unsigned Values = 1 + Args.size();
+    return new (Values)
+        CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
+  }
+
+  static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
+                              const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    unsigned Values = 1 + Args.size();
+    return new (Values)
+        CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
+  }
+
+  /// Convenience accessors
+  CatchSwitchInst *getCatchSwitch() const {
+    return cast<CatchSwitchInst>(Op<-1>());
+  }
+  void setCatchSwitch(Value *CatchSwitch) {
+    assert(CatchSwitch);
+    Op<-1>() = CatchSwitch;
+  }
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::CatchPad;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                               CatchReturnInst Class
+//===----------------------------------------------------------------------===//
+
+class CatchReturnInst : public TerminatorInst {
+  CatchReturnInst(const CatchReturnInst &RI);
+  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
+  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
+
+  void init(Value *CatchPad, BasicBlock *BB);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  CatchReturnInst *cloneImpl() const;
+
+public:
+  static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
+                                 Instruction *InsertBefore = nullptr) {
+    assert(CatchPad);
+    assert(BB);
+    return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
+  }
+
+  static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
+                                 BasicBlock *InsertAtEnd) {
+    assert(CatchPad);
+    assert(BB);
+    return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// Convenience accessors.
+  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
+  void setCatchPad(CatchPadInst *CatchPad) {
+    assert(CatchPad);
+    Op<0>() = CatchPad;
+  }
+
+  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
+  void setSuccessor(BasicBlock *NewSucc) {
+    assert(NewSucc);
+    Op<1>() = NewSucc;
+  }
+  unsigned getNumSuccessors() const { return 1; }
+
+  /// Get the parentPad of this catchret's catchpad's catchswitch.
+  /// The successor block is implicitly a member of this funclet.
+  Value *getCatchSwitchParentPad() const {
+    return getCatchPad()->getCatchSwitch()->getParentPad();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::CatchRet);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  friend TerminatorInst;
+
+  BasicBlock *getSuccessor(unsigned Idx) const {
+    assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
+    return getSuccessor();
+  }
+
+  void setSuccessor(unsigned Idx, BasicBlock *B) {
+    assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
+    setSuccessor(B);
+  }
+};
+
+template <>
+struct OperandTraits<CatchReturnInst>
+    : public FixedNumOperandTraits<CatchReturnInst, 2> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                               CleanupReturnInst Class
+//===----------------------------------------------------------------------===//
+
+class CleanupReturnInst : public TerminatorInst {
+private:
+  CleanupReturnInst(const CleanupReturnInst &RI);
+  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
+                    Instruction *InsertBefore = nullptr);
+  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
+                    BasicBlock *InsertAtEnd);
+
+  void init(Value *CleanupPad, BasicBlock *UnwindBB);
+
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  CleanupReturnInst *cloneImpl() const;
+
+public:
+  static CleanupReturnInst *Create(Value *CleanupPad,
+                                   BasicBlock *UnwindBB = nullptr,
+                                   Instruction *InsertBefore = nullptr) {
+    assert(CleanupPad);
+    unsigned Values = 1;
+    if (UnwindBB)
+      ++Values;
+    return new (Values)
+        CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
+  }
+
+  static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
+                                   BasicBlock *InsertAtEnd) {
+    assert(CleanupPad);
+    unsigned Values = 1;
+    if (UnwindBB)
+      ++Values;
+    return new (Values)
+        CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
+  }
+
+  /// Provide fast operand accessors
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
+  bool unwindsToCaller() const { return !hasUnwindDest(); }
+
+  /// Convenience accessor.
+  CleanupPadInst *getCleanupPad() const {
+    return cast<CleanupPadInst>(Op<0>());
+  }
+  void setCleanupPad(CleanupPadInst *CleanupPad) {
+    assert(CleanupPad);
+    Op<0>() = CleanupPad;
+  }
+
+  unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
+
+  BasicBlock *getUnwindDest() const {
+    return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
+  }
+  void setUnwindDest(BasicBlock *NewDest) {
+    assert(NewDest);
+    assert(hasUnwindDest());
+    Op<1>() = NewDest;
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return (I->getOpcode() == Instruction::CleanupRet);
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  friend TerminatorInst;
+
+  BasicBlock *getSuccessor(unsigned Idx) const {
+    assert(Idx == 0);
+    return getUnwindDest();
+  }
+
+  void setSuccessor(unsigned Idx, BasicBlock *B) {
+    assert(Idx == 0);
+    setUnwindDest(B);
+  }
+
+  // Shadow Instruction::setInstructionSubclassData with a private forwarding
+  // method so that subclasses cannot accidentally use it.
+  void setInstructionSubclassData(unsigned short D) {
+    Instruction::setInstructionSubclassData(D);
+  }
+};
+
+template <>
+struct OperandTraits<CleanupReturnInst>
+    : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
+
+//===----------------------------------------------------------------------===//
+//                           UnreachableInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// This function has undefined behavior.  In particular, the
+/// presence of this instruction indicates some higher level knowledge that the
+/// end of the block cannot be reached.
+///
+class UnreachableInst : public TerminatorInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  UnreachableInst *cloneImpl() const;
+
+public:
+  explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
+  explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
+
+  // allocate space for exactly zero operands
+  void *operator new(size_t s) {
+    return User::operator new(s, 0);
+  }
+
+  unsigned getNumSuccessors() const { return 0; }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Unreachable;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+private:
+  friend TerminatorInst;
+
+  BasicBlock *getSuccessor(unsigned idx) const {
+    llvm_unreachable("UnreachableInst has no successors!");
+  }
+
+  void setSuccessor(unsigned idx, BasicBlock *B) {
+    llvm_unreachable("UnreachableInst has no successors!");
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 TruncInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a truncation of integer types.
+class TruncInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical TruncInst
+  TruncInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  TruncInst(
+    Value *S,                           ///< The value to be truncated
+    Type *Ty,                           ///< The (smaller) type to truncate to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  TruncInst(
+    Value *S,                     ///< The value to be truncated
+    Type *Ty,                     ///< The (smaller) type to truncate to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Trunc;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 ZExtInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents zero extension of integer types.
+class ZExtInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical ZExtInst
+  ZExtInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  ZExtInst(
+    Value *S,                           ///< The value to be zero extended
+    Type *Ty,                           ///< The type to zero extend to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end semantics.
+  ZExtInst(
+    Value *S,                     ///< The value to be zero extended
+    Type *Ty,                     ///< The type to zero extend to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == ZExt;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 SExtInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a sign extension of integer types.
+class SExtInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical SExtInst
+  SExtInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  SExtInst(
+    Value *S,                           ///< The value to be sign extended
+    Type *Ty,                           ///< The type to sign extend to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  SExtInst(
+    Value *S,                     ///< The value to be sign extended
+    Type *Ty,                     ///< The type to sign extend to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == SExt;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 FPTruncInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a truncation of floating point types.
+class FPTruncInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical FPTruncInst
+  FPTruncInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  FPTruncInst(
+    Value *S,                           ///< The value to be truncated
+    Type *Ty,                           ///< The type to truncate to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-before-instruction semantics
+  FPTruncInst(
+    Value *S,                     ///< The value to be truncated
+    Type *Ty,                     ///< The type to truncate to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == FPTrunc;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 FPExtInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents an extension of floating point types.
+class FPExtInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical FPExtInst
+  FPExtInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  FPExtInst(
+    Value *S,                           ///< The value to be extended
+    Type *Ty,                           ///< The type to extend to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  FPExtInst(
+    Value *S,                     ///< The value to be extended
+    Type *Ty,                     ///< The type to extend to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == FPExt;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 UIToFPInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a cast unsigned integer to floating point.
+class UIToFPInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical UIToFPInst
+  UIToFPInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  UIToFPInst(
+    Value *S,                           ///< The value to be converted
+    Type *Ty,                           ///< The type to convert to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  UIToFPInst(
+    Value *S,                     ///< The value to be converted
+    Type *Ty,                     ///< The type to convert to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == UIToFP;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 SIToFPInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a cast from signed integer to floating point.
+class SIToFPInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical SIToFPInst
+  SIToFPInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  SIToFPInst(
+    Value *S,                           ///< The value to be converted
+    Type *Ty,                           ///< The type to convert to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  SIToFPInst(
+    Value *S,                     ///< The value to be converted
+    Type *Ty,                     ///< The type to convert to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == SIToFP;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 FPToUIInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a cast from floating point to unsigned integer
+class FPToUIInst  : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical FPToUIInst
+  FPToUIInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  FPToUIInst(
+    Value *S,                           ///< The value to be converted
+    Type *Ty,                           ///< The type to convert to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  FPToUIInst(
+    Value *S,                     ///< The value to be converted
+    Type *Ty,                     ///< The type to convert to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< Where to insert the new instruction
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == FPToUI;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 FPToSIInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a cast from floating point to signed integer.
+class FPToSIInst  : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical FPToSIInst
+  FPToSIInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  FPToSIInst(
+    Value *S,                           ///< The value to be converted
+    Type *Ty,                           ///< The type to convert to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  FPToSIInst(
+    Value *S,                     ///< The value to be converted
+    Type *Ty,                     ///< The type to convert to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == FPToSI;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 IntToPtrInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a cast from an integer to a pointer.
+class IntToPtrInst : public CastInst {
+public:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Constructor with insert-before-instruction semantics
+  IntToPtrInst(
+    Value *S,                           ///< The value to be converted
+    Type *Ty,                           ///< The type to convert to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  IntToPtrInst(
+    Value *S,                     ///< The value to be converted
+    Type *Ty,                     ///< The type to convert to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Clone an identical IntToPtrInst.
+  IntToPtrInst *cloneImpl() const;
+
+  /// Returns the address space of this instruction's pointer type.
+  unsigned getAddressSpace() const {
+    return getType()->getPointerAddressSpace();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == IntToPtr;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                                 PtrToIntInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a cast from a pointer to an integer.
+class PtrToIntInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical PtrToIntInst.
+  PtrToIntInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  PtrToIntInst(
+    Value *S,                           ///< The value to be converted
+    Type *Ty,                           ///< The type to convert to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  PtrToIntInst(
+    Value *S,                     ///< The value to be converted
+    Type *Ty,                     ///< The type to convert to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  /// Gets the pointer operand.
+  Value *getPointerOperand() { return getOperand(0); }
+  /// Gets the pointer operand.
+  const Value *getPointerOperand() const { return getOperand(0); }
+  /// Gets the operand index of the pointer operand.
+  static unsigned getPointerOperandIndex() { return 0U; }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperand()->getType()->getPointerAddressSpace();
+  }
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == PtrToInt;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                             BitCastInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a no-op cast from one type to another.
+class BitCastInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical BitCastInst.
+  BitCastInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  BitCastInst(
+    Value *S,                           ///< The value to be casted
+    Type *Ty,                           ///< The type to casted to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  BitCastInst(
+    Value *S,                     ///< The value to be casted
+    Type *Ty,                     ///< The type to casted to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == BitCast;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                          AddrSpaceCastInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class represents a conversion between pointers from one address space
+/// to another.
+class AddrSpaceCastInst : public CastInst {
+protected:
+  // Note: Instruction needs to be a friend here to call cloneImpl.
+  friend class Instruction;
+
+  /// Clone an identical AddrSpaceCastInst.
+  AddrSpaceCastInst *cloneImpl() const;
+
+public:
+  /// Constructor with insert-before-instruction semantics
+  AddrSpaceCastInst(
+    Value *S,                           ///< The value to be casted
+    Type *Ty,                           ///< The type to casted to
+    const Twine &NameStr = "",          ///< A name for the new instruction
+    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+  );
+
+  /// Constructor with insert-at-end-of-block semantics
+  AddrSpaceCastInst(
+    Value *S,                     ///< The value to be casted
+    Type *Ty,                     ///< The type to casted to
+    const Twine &NameStr,         ///< A name for the new instruction
+    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
+  );
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == AddrSpaceCast;
+  }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
+
+  /// Gets the pointer operand.
+  Value *getPointerOperand() {
+    return getOperand(0);
+  }
+
+  /// Gets the pointer operand.
+  const Value *getPointerOperand() const {
+    return getOperand(0);
+  }
+
+  /// Gets the operand index of the pointer operand.
+  static unsigned getPointerOperandIndex() {
+    return 0U;
+  }
+
+  /// Returns the address space of the pointer operand.
+  unsigned getSrcAddressSpace() const {
+    return getPointerOperand()->getType()->getPointerAddressSpace();
+  }
+
+  /// Returns the address space of the result.
+  unsigned getDestAddressSpace() const {
+    return getType()->getPointerAddressSpace();
+  }
+};
+
+/// A helper function that returns the pointer operand of a load or store
+/// instruction. Returns nullptr if not load or store.
+inline Value *getLoadStorePointerOperand(Value *V) {
+  if (auto *Load = dyn_cast<LoadInst>(V))
+    return Load->getPointerOperand();
+  if (auto *Store = dyn_cast<StoreInst>(V))
+    return Store->getPointerOperand();
+  return nullptr;
+}
+
+/// A helper function that returns the pointer operand of a load, store
+/// or GEP instruction. Returns nullptr if not load, store, or GEP.
+inline Value *getPointerOperand(Value *V) {
+  if (auto *Ptr = getLoadStorePointerOperand(V))
+    return Ptr;
+  if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
+    return Gep->getPointerOperand();
+  return nullptr;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_INSTRUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicInst.h b/linux-x64/clang/include/llvm/IR/IntrinsicInst.h
new file mode 100644
index 0000000..80d428c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicInst.h
@@ -0,0 +1,815 @@
+//===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes that make it really easy to deal with intrinsic
+// functions with the isa/dyncast family of functions.  In particular, this
+// allows you to do things like:
+//
+//     if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
+//        ... MCI->getDest() ... MCI->getSource() ...
+//
+// All intrinsic function calls are instances of the call instruction, so these
+// are all subclasses of the CallInst class.  Note that none of these classes
+// has state or virtual methods, which is an important part of this gross/neat
+// hack working.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INTRINSICINST_H
+#define LLVM_IR_INTRINSICINST_H
+
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstdint>
+
+namespace llvm {
+
+  /// A wrapper class for inspecting calls to intrinsic functions.
+  /// This allows the standard isa/dyncast/cast functionality to work with calls
+  /// to intrinsic functions.
+  class IntrinsicInst : public CallInst {
+  public:
+    IntrinsicInst() = delete;
+    IntrinsicInst(const IntrinsicInst &) = delete;
+    IntrinsicInst &operator=(const IntrinsicInst &) = delete;
+
+    /// Return the intrinsic ID of this intrinsic.
+    Intrinsic::ID getIntrinsicID() const {
+      return getCalledFunction()->getIntrinsicID();
+    }
+
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const CallInst *I) {
+      if (const Function *CF = I->getCalledFunction())
+        return CF->isIntrinsic();
+      return false;
+    }
+    static bool classof(const Value *V) {
+      return isa<CallInst>(V) && classof(cast<CallInst>(V));
+    }
+  };
+
+  /// This is the common base class for debug info intrinsics.
+  class DbgInfoIntrinsic : public IntrinsicInst {
+  public:
+    /// Get the location corresponding to the variable referenced by the debug
+    /// info intrinsic.  Depending on the intrinsic, this could be the
+    /// variable's value or its address.
+    Value *getVariableLocation(bool AllowNullOp = true) const;
+
+    /// Does this describe the address of a local variable. True for dbg.addr
+    /// and dbg.declare, but not dbg.value, which describes its value.
+    bool isAddressOfVariable() const {
+      return getIntrinsicID() != Intrinsic::dbg_value;
+    }
+
+    DILocalVariable *getVariable() const {
+      return cast<DILocalVariable>(getRawVariable());
+    }
+
+    DIExpression *getExpression() const {
+      return cast<DIExpression>(getRawExpression());
+    }
+
+    Metadata *getRawVariable() const {
+      return cast<MetadataAsValue>(getArgOperand(1))->getMetadata();
+    }
+
+    Metadata *getRawExpression() const {
+      return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
+    }
+
+    /// \name Casting methods
+    /// @{
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::dbg_declare:
+      case Intrinsic::dbg_value:
+      case Intrinsic::dbg_addr:
+        return true;
+      default: return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+    /// @}
+  };
+
+  /// This represents the llvm.dbg.declare instruction.
+  class DbgDeclareInst : public DbgInfoIntrinsic {
+  public:
+    Value *getAddress() const { return getVariableLocation(); }
+
+    /// \name Casting methods
+    /// @{
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::dbg_declare;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+    /// @}
+  };
+
+  /// This represents the llvm.dbg.addr instruction.
+  class DbgAddrIntrinsic : public DbgInfoIntrinsic {
+  public:
+    Value *getAddress() const { return getVariableLocation(); }
+
+    /// \name Casting methods
+    /// @{
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::dbg_addr;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This represents the llvm.dbg.value instruction.
+  class DbgValueInst : public DbgInfoIntrinsic {
+  public:
+    Value *getValue() const {
+      return getVariableLocation(/* AllowNullOp = */ false);
+    }
+
+    /// \name Casting methods
+    /// @{
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::dbg_value;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+    /// @}
+  };
+
+  /// This is the common base class for constrained floating point intrinsics.
+  class ConstrainedFPIntrinsic : public IntrinsicInst {
+  public:
+    enum RoundingMode {
+      rmInvalid,
+      rmDynamic,
+      rmToNearest,
+      rmDownward,
+      rmUpward,
+      rmTowardZero
+    };
+
+    enum ExceptionBehavior {
+      ebInvalid,
+      ebIgnore,
+      ebMayTrap,
+      ebStrict
+    };
+
+    bool isUnaryOp() const;
+    bool isTernaryOp() const;
+    RoundingMode getRoundingMode() const;
+    ExceptionBehavior getExceptionBehavior() const;
+
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::experimental_constrained_fadd:
+      case Intrinsic::experimental_constrained_fsub:
+      case Intrinsic::experimental_constrained_fmul:
+      case Intrinsic::experimental_constrained_fdiv:
+      case Intrinsic::experimental_constrained_frem:
+      case Intrinsic::experimental_constrained_fma:
+      case Intrinsic::experimental_constrained_sqrt:
+      case Intrinsic::experimental_constrained_pow:
+      case Intrinsic::experimental_constrained_powi:
+      case Intrinsic::experimental_constrained_sin:
+      case Intrinsic::experimental_constrained_cos:
+      case Intrinsic::experimental_constrained_exp:
+      case Intrinsic::experimental_constrained_exp2:
+      case Intrinsic::experimental_constrained_log:
+      case Intrinsic::experimental_constrained_log10:
+      case Intrinsic::experimental_constrained_log2:
+      case Intrinsic::experimental_constrained_rint:
+      case Intrinsic::experimental_constrained_nearbyint:
+        return true;
+      default: return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// Common base class for all memory intrinsics. Simply provides
+  /// common methods.
+  /// Written as CRTP to avoid a common base class amongst the
+  /// three atomicity hierarchies.
+  template <typename Derived> class MemIntrinsicBase : public IntrinsicInst {
+  private:
+    enum { ARG_DEST = 0, ARG_LENGTH = 2 };
+
+  public:
+    Value *getRawDest() const {
+      return const_cast<Value *>(getArgOperand(ARG_DEST));
+    }
+    const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+    Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+    Value *getLength() const {
+      return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+    }
+    const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+    Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+    /// This is just like getRawDest, but it strips off any cast
+    /// instructions (including addrspacecast) that feed it, giving the
+    /// original input.  The returned value is guaranteed to be a pointer.
+    Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+    unsigned getDestAddressSpace() const {
+      return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+    }
+
+    unsigned getDestAlignment() const { return getParamAlignment(ARG_DEST); }
+
+    /// Set the specified arguments of the instruction.
+    void setDest(Value *Ptr) {
+      assert(getRawDest()->getType() == Ptr->getType() &&
+             "setDest called with pointer of wrong type!");
+      setArgOperand(ARG_DEST, Ptr);
+    }
+
+    void setDestAlignment(unsigned Align) {
+      removeParamAttr(ARG_DEST, Attribute::Alignment);
+      if (Align > 0)
+        addParamAttr(ARG_DEST,
+                     Attribute::getWithAlignment(getContext(), Align));
+    }
+
+    void setLength(Value *L) {
+      assert(getLength()->getType() == L->getType() &&
+             "setLength called with value of wrong type!");
+      setArgOperand(ARG_LENGTH, L);
+    }
+  };
+
+  // The common base class for the atomic memset/memmove/memcpy intrinsics
+  // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
+  class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
+  private:
+    enum { ARG_ELEMENTSIZE = 3 };
+
+  public:
+    Value *getRawElementSizeInBytes() const {
+      return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+    }
+
+    ConstantInt *getElementSizeInBytesCst() const {
+      return cast<ConstantInt>(getRawElementSizeInBytes());
+    }
+
+    uint32_t getElementSizeInBytes() const {
+      return getElementSizeInBytesCst()->getZExtValue();
+    }
+
+    void setElementSizeInBytes(Constant *V) {
+      assert(V->getType() == Type::getInt8Ty(getContext()) &&
+             "setElementSizeInBytes called with value of wrong type!");
+      setArgOperand(ARG_ELEMENTSIZE, V);
+    }
+
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memcpy_element_unordered_atomic:
+      case Intrinsic::memmove_element_unordered_atomic:
+      case Intrinsic::memset_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class represents atomic memset intrinsic
+  // i.e. llvm.element.unordered.atomic.memset
+  class AtomicMemSetInst : public AtomicMemIntrinsic {
+  private:
+    enum { ARG_VALUE = 1 };
+
+  public:
+    Value *getValue() const {
+      return const_cast<Value *>(getArgOperand(ARG_VALUE));
+    }
+    const Use &getValueUse() const { return getArgOperandUse(ARG_VALUE); }
+    Use &getValueUse() { return getArgOperandUse(ARG_VALUE); }
+
+    void setValue(Value *Val) {
+      assert(getValue()->getType() == Val->getType() &&
+             "setValue called with value of wrong type!");
+      setArgOperand(ARG_VALUE, Val);
+    }
+
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  // This class wraps the atomic memcpy/memmove intrinsics
+  // i.e. llvm.element.unordered.atomic.memcpy/memmove
+  class AtomicMemTransferInst : public AtomicMemIntrinsic {
+  private:
+    enum { ARG_SOURCE = 1 };
+
+  public:
+    /// Return the arguments to the instruction.
+    Value *getRawSource() const {
+      return const_cast<Value *>(getArgOperand(ARG_SOURCE));
+    }
+    const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
+    Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
+
+    /// This is just like getRawSource, but it strips off any cast
+    /// instructions that feed it, giving the original input.  The returned
+    /// value is guaranteed to be a pointer.
+    Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+    unsigned getSourceAddressSpace() const {
+      return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+    }
+
+    unsigned getSourceAlignment() const {
+      return getParamAlignment(ARG_SOURCE);
+    }
+
+    void setSource(Value *Ptr) {
+      assert(getRawSource()->getType() == Ptr->getType() &&
+             "setSource called with pointer of wrong type!");
+      setArgOperand(ARG_SOURCE, Ptr);
+    }
+
+    void setSourceAlignment(unsigned Align) {
+      removeParamAttr(ARG_SOURCE, Attribute::Alignment);
+      if (Align > 0)
+        addParamAttr(ARG_SOURCE,
+                     Attribute::getWithAlignment(getContext(), Align));
+    }
+
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memcpy_element_unordered_atomic:
+      case Intrinsic::memmove_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class represents the atomic memcpy intrinsic
+  /// i.e. llvm.element.unordered.atomic.memcpy
+  class AtomicMemCpyInst : public AtomicMemTransferInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class represents the atomic memmove intrinsic
+  /// i.e. llvm.element.unordered.atomic.memmove
+  class AtomicMemMoveInst : public AtomicMemTransferInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This is the common base class for memset/memcpy/memmove.
+  class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
+  private:
+    enum { ARG_VOLATILE = 3 };
+
+  public:
+    ConstantInt *getVolatileCst() const {
+      return cast<ConstantInt>(
+          const_cast<Value *>(getArgOperand(ARG_VOLATILE)));
+    }
+
+    bool isVolatile() const {
+      return !getVolatileCst()->isZero();
+    }
+
+    void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
+
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memcpy:
+      case Intrinsic::memmove:
+      case Intrinsic::memset:
+        return true;
+      default: return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class wraps the llvm.memset intrinsic.
+  class MemSetInst : public MemIntrinsic {
+  public:
+    /// Return the arguments to the instruction.
+    Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+    const Use &getValueUse() const { return getArgOperandUse(1); }
+    Use &getValueUse() { return getArgOperandUse(1); }
+
+    void setValue(Value *Val) {
+      assert(getValue()->getType() == Val->getType() &&
+             "setValue called with value of wrong type!");
+      setArgOperand(1, Val);
+    }
+
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memset;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class wraps the llvm.memcpy/memmove intrinsics.
+  class MemTransferInst : public MemIntrinsic {
+  private:
+    enum { ARG_SOURCE = 1 };
+
+  public:
+    /// Return the arguments to the instruction.
+    Value *getRawSource() const { return const_cast<Value*>(getArgOperand(ARG_SOURCE)); }
+    const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
+    Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
+
+    /// This is just like getRawSource, but it strips off any cast
+    /// instructions that feed it, giving the original input.  The returned
+    /// value is guaranteed to be a pointer.
+    Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+    unsigned getSourceAddressSpace() const {
+      return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+    }
+
+    unsigned getSourceAlignment() const {
+      return getParamAlignment(ARG_SOURCE);
+    }
+
+    void setSource(Value *Ptr) {
+      assert(getRawSource()->getType() == Ptr->getType() &&
+             "setSource called with pointer of wrong type!");
+      setArgOperand(ARG_SOURCE, Ptr);
+    }
+
+    void setSourceAlignment(unsigned Align) {
+      removeParamAttr(ARG_SOURCE, Attribute::Alignment);
+      if (Align > 0)
+        addParamAttr(ARG_SOURCE,
+                     Attribute::getWithAlignment(getContext(), Align));
+    }
+
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memcpy ||
+             I->getIntrinsicID() == Intrinsic::memmove;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class wraps the llvm.memcpy intrinsic.
+  class MemCpyInst : public MemTransferInst {
+  public:
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memcpy;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class wraps the llvm.memmove intrinsic.
+  class MemMoveInst : public MemTransferInst {
+  public:
+    // Methods for support type inquiry through isa, cast, and dyn_cast:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::memmove;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  // The common base class for any memset/memmove/memcpy intrinsics;
+  // whether they be atomic or non-atomic.
+  // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
+  //  and llvm.memset/memcpy/memmove
+  class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
+  public:
+    bool isVolatile() const {
+      // Only the non-atomic intrinsics can be volatile
+      if (auto *MI = dyn_cast<MemIntrinsic>(this))
+        return MI->isVolatile();
+      return false;
+    }
+
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memcpy:
+      case Intrinsic::memmove:
+      case Intrinsic::memset:
+      case Intrinsic::memcpy_element_unordered_atomic:
+      case Intrinsic::memmove_element_unordered_atomic:
+      case Intrinsic::memset_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class represents any memset intrinsic
+  // i.e. llvm.element.unordered.atomic.memset
+  // and  llvm.memset
+  class AnyMemSetInst : public AnyMemIntrinsic {
+  private:
+    enum { ARG_VALUE = 1 };
+
+  public:
+    Value *getValue() const {
+      return const_cast<Value *>(getArgOperand(ARG_VALUE));
+    }
+    const Use &getValueUse() const { return getArgOperandUse(ARG_VALUE); }
+    Use &getValueUse() { return getArgOperandUse(ARG_VALUE); }
+
+    void setValue(Value *Val) {
+      assert(getValue()->getType() == Val->getType() &&
+             "setValue called with value of wrong type!");
+      setArgOperand(ARG_VALUE, Val);
+    }
+
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memset:
+      case Intrinsic::memset_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  // This class wraps any memcpy/memmove intrinsics
+  // i.e. llvm.element.unordered.atomic.memcpy/memmove
+  // and  llvm.memcpy/memmove
+  class AnyMemTransferInst : public AnyMemIntrinsic {
+  private:
+    enum { ARG_SOURCE = 1 };
+
+  public:
+    /// Return the arguments to the instruction.
+    Value *getRawSource() const {
+      return const_cast<Value *>(getArgOperand(ARG_SOURCE));
+    }
+    const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
+    Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
+
+    /// This is just like getRawSource, but it strips off any cast
+    /// instructions that feed it, giving the original input.  The returned
+    /// value is guaranteed to be a pointer.
+    Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+    unsigned getSourceAddressSpace() const {
+      return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+    }
+
+    unsigned getSourceAlignment() const {
+      return getParamAlignment(ARG_SOURCE);
+    }
+
+    void setSource(Value *Ptr) {
+      assert(getRawSource()->getType() == Ptr->getType() &&
+             "setSource called with pointer of wrong type!");
+      setArgOperand(ARG_SOURCE, Ptr);
+    }
+
+    void setSourceAlignment(unsigned Align) {
+      removeParamAttr(ARG_SOURCE, Attribute::Alignment);
+      if (Align > 0)
+        addParamAttr(ARG_SOURCE,
+                     Attribute::getWithAlignment(getContext(), Align));
+    }
+
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memcpy:
+      case Intrinsic::memmove:
+      case Intrinsic::memcpy_element_unordered_atomic:
+      case Intrinsic::memmove_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class represents any memcpy intrinsic
+  /// i.e. llvm.element.unordered.atomic.memcpy
+  ///  and llvm.memcpy
+  class AnyMemCpyInst : public AnyMemTransferInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memcpy:
+      case Intrinsic::memcpy_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This class represents any memmove intrinsic
+  /// i.e. llvm.element.unordered.atomic.memmove
+  ///  and llvm.memmove
+  class AnyMemMoveInst : public AnyMemTransferInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      switch (I->getIntrinsicID()) {
+      case Intrinsic::memmove:
+      case Intrinsic::memmove_element_unordered_atomic:
+        return true;
+      default:
+        return false;
+      }
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This represents the llvm.va_start intrinsic.
+  class VAStartInst : public IntrinsicInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::vastart;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+
+    Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+  };
+
+  /// This represents the llvm.va_end intrinsic.
+  class VAEndInst : public IntrinsicInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::vaend;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+
+    Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+  };
+
+  /// This represents the llvm.va_copy intrinsic.
+  class VACopyInst : public IntrinsicInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::vacopy;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+
+    Value *getDest() const { return const_cast<Value*>(getArgOperand(0)); }
+    Value *getSrc() const { return const_cast<Value*>(getArgOperand(1)); }
+  };
+
+  /// This represents the llvm.instrprof_increment intrinsic.
+  class InstrProfIncrementInst : public IntrinsicInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::instrprof_increment;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+
+    GlobalVariable *getName() const {
+      return cast<GlobalVariable>(
+          const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+    }
+
+    ConstantInt *getHash() const {
+      return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+    }
+
+    ConstantInt *getNumCounters() const {
+      return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
+    }
+
+    ConstantInt *getIndex() const {
+      return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+    }
+
+    Value *getStep() const;
+  };
+
+  class InstrProfIncrementInstStep : public InstrProfIncrementInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::instrprof_increment_step;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+  };
+
+  /// This represents the llvm.instrprof_value_profile intrinsic.
+  class InstrProfValueProfileInst : public IntrinsicInst {
+  public:
+    static bool classof(const IntrinsicInst *I) {
+      return I->getIntrinsicID() == Intrinsic::instrprof_value_profile;
+    }
+    static bool classof(const Value *V) {
+      return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+    }
+
+    GlobalVariable *getName() const {
+      return cast<GlobalVariable>(
+          const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+    }
+
+    ConstantInt *getHash() const {
+      return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+    }
+
+    Value *getTargetValue() const {
+      return cast<Value>(const_cast<Value *>(getArgOperand(2)));
+    }
+
+    ConstantInt *getValueKind() const {
+      return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+    }
+
+    // Returns the value site index.
+    ConstantInt *getIndex() const {
+      return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4)));
+    }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_IR_INTRINSICINST_H
diff --git a/linux-x64/clang/include/llvm/IR/Intrinsics.gen b/linux-x64/clang/include/llvm/IR/Intrinsics.gen
new file mode 100644
index 0000000..7919f07
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Intrinsics.gen
@@ -0,0 +1,36169 @@
+/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
+|*                                                                            *|
+|* Intrinsic Function Source Fragment                                         *|
+|*                                                                            *|
+|* Automatically generated file, do not edit!                                 *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+// VisualStudio defines setjmp as _setjmp
+#if defined(_MSC_VER) && defined(setjmp) && \
+                         !defined(setjmp_undefined_for_msvc)
+#  pragma push_macro("setjmp")
+#  undef setjmp
+#  define setjmp_undefined_for_msvc
+#endif
+
+// Enum values for Intrinsics.h
+#ifdef GET_INTRINSIC_ENUM_VALUES
+    addressofreturnaddress,                    // llvm.addressofreturnaddress
+    adjust_trampoline,                         // llvm.adjust.trampoline
+    annotation,                                // llvm.annotation
+    assume,                                    // llvm.assume
+    bitreverse,                                // llvm.bitreverse
+    bswap,                                     // llvm.bswap
+    canonicalize,                              // llvm.canonicalize
+    ceil,                                      // llvm.ceil
+    clear_cache,                               // llvm.clear_cache
+    codeview_annotation,                       // llvm.codeview.annotation
+    convert_from_fp16,                         // llvm.convert.from.fp16
+    convert_to_fp16,                           // llvm.convert.to.fp16
+    copysign,                                  // llvm.copysign
+    coro_alloc,                                // llvm.coro.alloc
+    coro_begin,                                // llvm.coro.begin
+    coro_destroy,                              // llvm.coro.destroy
+    coro_done,                                 // llvm.coro.done
+    coro_end,                                  // llvm.coro.end
+    coro_frame,                                // llvm.coro.frame
+    coro_free,                                 // llvm.coro.free
+    coro_id,                                   // llvm.coro.id
+    coro_param,                                // llvm.coro.param
+    coro_promise,                              // llvm.coro.promise
+    coro_resume,                               // llvm.coro.resume
+    coro_save,                                 // llvm.coro.save
+    coro_size,                                 // llvm.coro.size
+    coro_subfn_addr,                           // llvm.coro.subfn.addr
+    coro_suspend,                              // llvm.coro.suspend
+    cos,                                       // llvm.cos
+    ctlz,                                      // llvm.ctlz
+    ctpop,                                     // llvm.ctpop
+    cttz,                                      // llvm.cttz
+    dbg_addr,                                  // llvm.dbg.addr
+    dbg_declare,                               // llvm.dbg.declare
+    dbg_value,                                 // llvm.dbg.value
+    debugtrap,                                 // llvm.debugtrap
+    donothing,                                 // llvm.donothing
+    eh_dwarf_cfa,                              // llvm.eh.dwarf.cfa
+    eh_exceptioncode,                          // llvm.eh.exceptioncode
+    eh_exceptionpointer,                       // llvm.eh.exceptionpointer
+    eh_return_i32,                             // llvm.eh.return.i32
+    eh_return_i64,                             // llvm.eh.return.i64
+    eh_sjlj_callsite,                          // llvm.eh.sjlj.callsite
+    eh_sjlj_functioncontext,                   // llvm.eh.sjlj.functioncontext
+    eh_sjlj_longjmp,                           // llvm.eh.sjlj.longjmp
+    eh_sjlj_lsda,                              // llvm.eh.sjlj.lsda
+    eh_sjlj_setjmp,                            // llvm.eh.sjlj.setjmp
+    eh_sjlj_setup_dispatch,                    // llvm.eh.sjlj.setup.dispatch
+    eh_typeid_for,                             // llvm.eh.typeid.for
+    eh_unwind_init,                            // llvm.eh.unwind.init
+    exp,                                       // llvm.exp
+    exp2,                                      // llvm.exp2
+    expect,                                    // llvm.expect
+    experimental_constrained_cos,              // llvm.experimental.constrained.cos
+    experimental_constrained_exp,              // llvm.experimental.constrained.exp
+    experimental_constrained_exp2,             // llvm.experimental.constrained.exp2
+    experimental_constrained_fadd,             // llvm.experimental.constrained.fadd
+    experimental_constrained_fdiv,             // llvm.experimental.constrained.fdiv
+    experimental_constrained_fma,              // llvm.experimental.constrained.fma
+    experimental_constrained_fmul,             // llvm.experimental.constrained.fmul
+    experimental_constrained_frem,             // llvm.experimental.constrained.frem
+    experimental_constrained_fsub,             // llvm.experimental.constrained.fsub
+    experimental_constrained_log,              // llvm.experimental.constrained.log
+    experimental_constrained_log10,            // llvm.experimental.constrained.log10
+    experimental_constrained_log2,             // llvm.experimental.constrained.log2
+    experimental_constrained_nearbyint,        // llvm.experimental.constrained.nearbyint
+    experimental_constrained_pow,              // llvm.experimental.constrained.pow
+    experimental_constrained_powi,             // llvm.experimental.constrained.powi
+    experimental_constrained_rint,             // llvm.experimental.constrained.rint
+    experimental_constrained_sin,              // llvm.experimental.constrained.sin
+    experimental_constrained_sqrt,             // llvm.experimental.constrained.sqrt
+    experimental_deoptimize,                   // llvm.experimental.deoptimize
+    experimental_gc_relocate,                  // llvm.experimental.gc.relocate
+    experimental_gc_result,                    // llvm.experimental.gc.result
+    experimental_gc_statepoint,                // llvm.experimental.gc.statepoint
+    experimental_guard,                        // llvm.experimental.guard
+    experimental_patchpoint_i64,               // llvm.experimental.patchpoint.i64
+    experimental_patchpoint_void,              // llvm.experimental.patchpoint.void
+    experimental_stackmap,                     // llvm.experimental.stackmap
+    experimental_vector_reduce_add,            // llvm.experimental.vector.reduce.add
+    experimental_vector_reduce_and,            // llvm.experimental.vector.reduce.and
+    experimental_vector_reduce_fadd,           // llvm.experimental.vector.reduce.fadd
+    experimental_vector_reduce_fmax,           // llvm.experimental.vector.reduce.fmax
+    experimental_vector_reduce_fmin,           // llvm.experimental.vector.reduce.fmin
+    experimental_vector_reduce_fmul,           // llvm.experimental.vector.reduce.fmul
+    experimental_vector_reduce_mul,            // llvm.experimental.vector.reduce.mul
+    experimental_vector_reduce_or,             // llvm.experimental.vector.reduce.or
+    experimental_vector_reduce_smax,           // llvm.experimental.vector.reduce.smax
+    experimental_vector_reduce_smin,           // llvm.experimental.vector.reduce.smin
+    experimental_vector_reduce_umax,           // llvm.experimental.vector.reduce.umax
+    experimental_vector_reduce_umin,           // llvm.experimental.vector.reduce.umin
+    experimental_vector_reduce_xor,            // llvm.experimental.vector.reduce.xor
+    fabs,                                      // llvm.fabs
+    floor,                                     // llvm.floor
+    flt_rounds,                                // llvm.flt.rounds
+    fma,                                       // llvm.fma
+    fmuladd,                                   // llvm.fmuladd
+    frameaddress,                              // llvm.frameaddress
+    gcread,                                    // llvm.gcread
+    gcroot,                                    // llvm.gcroot
+    gcwrite,                                   // llvm.gcwrite
+    get_dynamic_area_offset,                   // llvm.get.dynamic.area.offset
+    icall_branch_funnel,                       // llvm.icall.branch.funnel
+    init_trampoline,                           // llvm.init.trampoline
+    instrprof_increment,                       // llvm.instrprof.increment
+    instrprof_increment_step,                  // llvm.instrprof.increment.step
+    instrprof_value_profile,                   // llvm.instrprof.value.profile
+    invariant_end,                             // llvm.invariant.end
+    invariant_group_barrier,                   // llvm.invariant.group.barrier
+    invariant_start,                           // llvm.invariant.start
+    lifetime_end,                              // llvm.lifetime.end
+    lifetime_start,                            // llvm.lifetime.start
+    load_relative,                             // llvm.load.relative
+    localaddress,                              // llvm.localaddress
+    localescape,                               // llvm.localescape
+    localrecover,                              // llvm.localrecover
+    log,                                       // llvm.log
+    log10,                                     // llvm.log10
+    log2,                                      // llvm.log2
+    longjmp,                                   // llvm.longjmp
+    masked_compressstore,                      // llvm.masked.compressstore
+    masked_expandload,                         // llvm.masked.expandload
+    masked_gather,                             // llvm.masked.gather
+    masked_load,                               // llvm.masked.load
+    masked_scatter,                            // llvm.masked.scatter
+    masked_store,                              // llvm.masked.store
+    maxnum,                                    // llvm.maxnum
+    memcpy,                                    // llvm.memcpy
+    memcpy_element_unordered_atomic,           // llvm.memcpy.element.unordered.atomic
+    memmove,                                   // llvm.memmove
+    memmove_element_unordered_atomic,          // llvm.memmove.element.unordered.atomic
+    memset,                                    // llvm.memset
+    memset_element_unordered_atomic,           // llvm.memset.element.unordered.atomic
+    minnum,                                    // llvm.minnum
+    nearbyint,                                 // llvm.nearbyint
+    objectsize,                                // llvm.objectsize
+    pcmarker,                                  // llvm.pcmarker
+    pow,                                       // llvm.pow
+    powi,                                      // llvm.powi
+    prefetch,                                  // llvm.prefetch
+    ptr_annotation,                            // llvm.ptr.annotation
+    read_register,                             // llvm.read_register
+    readcyclecounter,                          // llvm.readcyclecounter
+    returnaddress,                             // llvm.returnaddress
+    rint,                                      // llvm.rint
+    round,                                     // llvm.round
+    sadd_with_overflow,                        // llvm.sadd.with.overflow
+    setjmp,                                    // llvm.setjmp
+    sideeffect,                                // llvm.sideeffect
+    siglongjmp,                                // llvm.siglongjmp
+    sigsetjmp,                                 // llvm.sigsetjmp
+    sin,                                       // llvm.sin
+    smul_with_overflow,                        // llvm.smul.with.overflow
+    sqrt,                                      // llvm.sqrt
+    ssa_copy,                                  // llvm.ssa.copy
+    ssub_with_overflow,                        // llvm.ssub.with.overflow
+    stackguard,                                // llvm.stackguard
+    stackprotector,                            // llvm.stackprotector
+    stackrestore,                              // llvm.stackrestore
+    stacksave,                                 // llvm.stacksave
+    thread_pointer,                            // llvm.thread.pointer
+    trap,                                      // llvm.trap
+    trunc,                                     // llvm.trunc
+    type_checked_load,                         // llvm.type.checked.load
+    type_test,                                 // llvm.type.test
+    uadd_with_overflow,                        // llvm.uadd.with.overflow
+    umul_with_overflow,                        // llvm.umul.with.overflow
+    usub_with_overflow,                        // llvm.usub.with.overflow
+    vacopy,                                    // llvm.va_copy
+    vaend,                                     // llvm.va_end
+    vastart,                                   // llvm.va_start
+    var_annotation,                            // llvm.var.annotation
+    write_register,                            // llvm.write_register
+    xray_customevent,                          // llvm.xray.customevent
+    aarch64_clrex,                             // llvm.aarch64.clrex
+    aarch64_crc32b,                            // llvm.aarch64.crc32b
+    aarch64_crc32cb,                           // llvm.aarch64.crc32cb
+    aarch64_crc32ch,                           // llvm.aarch64.crc32ch
+    aarch64_crc32cw,                           // llvm.aarch64.crc32cw
+    aarch64_crc32cx,                           // llvm.aarch64.crc32cx
+    aarch64_crc32h,                            // llvm.aarch64.crc32h
+    aarch64_crc32w,                            // llvm.aarch64.crc32w
+    aarch64_crc32x,                            // llvm.aarch64.crc32x
+    aarch64_crypto_aesd,                       // llvm.aarch64.crypto.aesd
+    aarch64_crypto_aese,                       // llvm.aarch64.crypto.aese
+    aarch64_crypto_aesimc,                     // llvm.aarch64.crypto.aesimc
+    aarch64_crypto_aesmc,                      // llvm.aarch64.crypto.aesmc
+    aarch64_crypto_sha1c,                      // llvm.aarch64.crypto.sha1c
+    aarch64_crypto_sha1h,                      // llvm.aarch64.crypto.sha1h
+    aarch64_crypto_sha1m,                      // llvm.aarch64.crypto.sha1m
+    aarch64_crypto_sha1p,                      // llvm.aarch64.crypto.sha1p
+    aarch64_crypto_sha1su0,                    // llvm.aarch64.crypto.sha1su0
+    aarch64_crypto_sha1su1,                    // llvm.aarch64.crypto.sha1su1
+    aarch64_crypto_sha256h,                    // llvm.aarch64.crypto.sha256h
+    aarch64_crypto_sha256h2,                   // llvm.aarch64.crypto.sha256h2
+    aarch64_crypto_sha256su0,                  // llvm.aarch64.crypto.sha256su0
+    aarch64_crypto_sha256su1,                  // llvm.aarch64.crypto.sha256su1
+    aarch64_dmb,                               // llvm.aarch64.dmb
+    aarch64_dsb,                               // llvm.aarch64.dsb
+    aarch64_hint,                              // llvm.aarch64.hint
+    aarch64_isb,                               // llvm.aarch64.isb
+    aarch64_ldaxp,                             // llvm.aarch64.ldaxp
+    aarch64_ldaxr,                             // llvm.aarch64.ldaxr
+    aarch64_ldxp,                              // llvm.aarch64.ldxp
+    aarch64_ldxr,                              // llvm.aarch64.ldxr
+    aarch64_neon_abs,                          // llvm.aarch64.neon.abs
+    aarch64_neon_addhn,                        // llvm.aarch64.neon.addhn
+    aarch64_neon_addp,                         // llvm.aarch64.neon.addp
+    aarch64_neon_cls,                          // llvm.aarch64.neon.cls
+    aarch64_neon_fabd,                         // llvm.aarch64.neon.fabd
+    aarch64_neon_facge,                        // llvm.aarch64.neon.facge
+    aarch64_neon_facgt,                        // llvm.aarch64.neon.facgt
+    aarch64_neon_faddv,                        // llvm.aarch64.neon.faddv
+    aarch64_neon_fcvtas,                       // llvm.aarch64.neon.fcvtas
+    aarch64_neon_fcvtau,                       // llvm.aarch64.neon.fcvtau
+    aarch64_neon_fcvtms,                       // llvm.aarch64.neon.fcvtms
+    aarch64_neon_fcvtmu,                       // llvm.aarch64.neon.fcvtmu
+    aarch64_neon_fcvtns,                       // llvm.aarch64.neon.fcvtns
+    aarch64_neon_fcvtnu,                       // llvm.aarch64.neon.fcvtnu
+    aarch64_neon_fcvtps,                       // llvm.aarch64.neon.fcvtps
+    aarch64_neon_fcvtpu,                       // llvm.aarch64.neon.fcvtpu
+    aarch64_neon_fcvtxn,                       // llvm.aarch64.neon.fcvtxn
+    aarch64_neon_fcvtzs,                       // llvm.aarch64.neon.fcvtzs
+    aarch64_neon_fcvtzu,                       // llvm.aarch64.neon.fcvtzu
+    aarch64_neon_fmax,                         // llvm.aarch64.neon.fmax
+    aarch64_neon_fmaxnm,                       // llvm.aarch64.neon.fmaxnm
+    aarch64_neon_fmaxnmp,                      // llvm.aarch64.neon.fmaxnmp
+    aarch64_neon_fmaxnmv,                      // llvm.aarch64.neon.fmaxnmv
+    aarch64_neon_fmaxp,                        // llvm.aarch64.neon.fmaxp
+    aarch64_neon_fmaxv,                        // llvm.aarch64.neon.fmaxv
+    aarch64_neon_fmin,                         // llvm.aarch64.neon.fmin
+    aarch64_neon_fminnm,                       // llvm.aarch64.neon.fminnm
+    aarch64_neon_fminnmp,                      // llvm.aarch64.neon.fminnmp
+    aarch64_neon_fminnmv,                      // llvm.aarch64.neon.fminnmv
+    aarch64_neon_fminp,                        // llvm.aarch64.neon.fminp
+    aarch64_neon_fminv,                        // llvm.aarch64.neon.fminv
+    aarch64_neon_fmulx,                        // llvm.aarch64.neon.fmulx
+    aarch64_neon_frecpe,                       // llvm.aarch64.neon.frecpe
+    aarch64_neon_frecps,                       // llvm.aarch64.neon.frecps
+    aarch64_neon_frecpx,                       // llvm.aarch64.neon.frecpx
+    aarch64_neon_frintn,                       // llvm.aarch64.neon.frintn
+    aarch64_neon_frsqrte,                      // llvm.aarch64.neon.frsqrte
+    aarch64_neon_frsqrts,                      // llvm.aarch64.neon.frsqrts
+    aarch64_neon_ld1x2,                        // llvm.aarch64.neon.ld1x2
+    aarch64_neon_ld1x3,                        // llvm.aarch64.neon.ld1x3
+    aarch64_neon_ld1x4,                        // llvm.aarch64.neon.ld1x4
+    aarch64_neon_ld2,                          // llvm.aarch64.neon.ld2
+    aarch64_neon_ld2lane,                      // llvm.aarch64.neon.ld2lane
+    aarch64_neon_ld2r,                         // llvm.aarch64.neon.ld2r
+    aarch64_neon_ld3,                          // llvm.aarch64.neon.ld3
+    aarch64_neon_ld3lane,                      // llvm.aarch64.neon.ld3lane
+    aarch64_neon_ld3r,                         // llvm.aarch64.neon.ld3r
+    aarch64_neon_ld4,                          // llvm.aarch64.neon.ld4
+    aarch64_neon_ld4lane,                      // llvm.aarch64.neon.ld4lane
+    aarch64_neon_ld4r,                         // llvm.aarch64.neon.ld4r
+    aarch64_neon_pmul,                         // llvm.aarch64.neon.pmul
+    aarch64_neon_pmull,                        // llvm.aarch64.neon.pmull
+    aarch64_neon_pmull64,                      // llvm.aarch64.neon.pmull64
+    aarch64_neon_raddhn,                       // llvm.aarch64.neon.raddhn
+    aarch64_neon_rbit,                         // llvm.aarch64.neon.rbit
+    aarch64_neon_rshrn,                        // llvm.aarch64.neon.rshrn
+    aarch64_neon_rsubhn,                       // llvm.aarch64.neon.rsubhn
+    aarch64_neon_sabd,                         // llvm.aarch64.neon.sabd
+    aarch64_neon_saddlp,                       // llvm.aarch64.neon.saddlp
+    aarch64_neon_saddlv,                       // llvm.aarch64.neon.saddlv
+    aarch64_neon_saddv,                        // llvm.aarch64.neon.saddv
+    aarch64_neon_scalar_sqxtn,                 // llvm.aarch64.neon.scalar.sqxtn
+    aarch64_neon_scalar_sqxtun,                // llvm.aarch64.neon.scalar.sqxtun
+    aarch64_neon_scalar_uqxtn,                 // llvm.aarch64.neon.scalar.uqxtn
+    aarch64_neon_shadd,                        // llvm.aarch64.neon.shadd
+    aarch64_neon_shll,                         // llvm.aarch64.neon.shll
+    aarch64_neon_shsub,                        // llvm.aarch64.neon.shsub
+    aarch64_neon_smax,                         // llvm.aarch64.neon.smax
+    aarch64_neon_smaxp,                        // llvm.aarch64.neon.smaxp
+    aarch64_neon_smaxv,                        // llvm.aarch64.neon.smaxv
+    aarch64_neon_smin,                         // llvm.aarch64.neon.smin
+    aarch64_neon_sminp,                        // llvm.aarch64.neon.sminp
+    aarch64_neon_sminv,                        // llvm.aarch64.neon.sminv
+    aarch64_neon_smull,                        // llvm.aarch64.neon.smull
+    aarch64_neon_sqabs,                        // llvm.aarch64.neon.sqabs
+    aarch64_neon_sqadd,                        // llvm.aarch64.neon.sqadd
+    aarch64_neon_sqdmulh,                      // llvm.aarch64.neon.sqdmulh
+    aarch64_neon_sqdmull,                      // llvm.aarch64.neon.sqdmull
+    aarch64_neon_sqdmulls_scalar,              // llvm.aarch64.neon.sqdmulls.scalar
+    aarch64_neon_sqneg,                        // llvm.aarch64.neon.sqneg
+    aarch64_neon_sqrdmulh,                     // llvm.aarch64.neon.sqrdmulh
+    aarch64_neon_sqrshl,                       // llvm.aarch64.neon.sqrshl
+    aarch64_neon_sqrshrn,                      // llvm.aarch64.neon.sqrshrn
+    aarch64_neon_sqrshrun,                     // llvm.aarch64.neon.sqrshrun
+    aarch64_neon_sqshl,                        // llvm.aarch64.neon.sqshl
+    aarch64_neon_sqshlu,                       // llvm.aarch64.neon.sqshlu
+    aarch64_neon_sqshrn,                       // llvm.aarch64.neon.sqshrn
+    aarch64_neon_sqshrun,                      // llvm.aarch64.neon.sqshrun
+    aarch64_neon_sqsub,                        // llvm.aarch64.neon.sqsub
+    aarch64_neon_sqxtn,                        // llvm.aarch64.neon.sqxtn
+    aarch64_neon_sqxtun,                       // llvm.aarch64.neon.sqxtun
+    aarch64_neon_srhadd,                       // llvm.aarch64.neon.srhadd
+    aarch64_neon_srshl,                        // llvm.aarch64.neon.srshl
+    aarch64_neon_sshl,                         // llvm.aarch64.neon.sshl
+    aarch64_neon_sshll,                        // llvm.aarch64.neon.sshll
+    aarch64_neon_st1x2,                        // llvm.aarch64.neon.st1x2
+    aarch64_neon_st1x3,                        // llvm.aarch64.neon.st1x3
+    aarch64_neon_st1x4,                        // llvm.aarch64.neon.st1x4
+    aarch64_neon_st2,                          // llvm.aarch64.neon.st2
+    aarch64_neon_st2lane,                      // llvm.aarch64.neon.st2lane
+    aarch64_neon_st3,                          // llvm.aarch64.neon.st3
+    aarch64_neon_st3lane,                      // llvm.aarch64.neon.st3lane
+    aarch64_neon_st4,                          // llvm.aarch64.neon.st4
+    aarch64_neon_st4lane,                      // llvm.aarch64.neon.st4lane
+    aarch64_neon_subhn,                        // llvm.aarch64.neon.subhn
+    aarch64_neon_suqadd,                       // llvm.aarch64.neon.suqadd
+    aarch64_neon_tbl1,                         // llvm.aarch64.neon.tbl1
+    aarch64_neon_tbl2,                         // llvm.aarch64.neon.tbl2
+    aarch64_neon_tbl3,                         // llvm.aarch64.neon.tbl3
+    aarch64_neon_tbl4,                         // llvm.aarch64.neon.tbl4
+    aarch64_neon_tbx1,                         // llvm.aarch64.neon.tbx1
+    aarch64_neon_tbx2,                         // llvm.aarch64.neon.tbx2
+    aarch64_neon_tbx3,                         // llvm.aarch64.neon.tbx3
+    aarch64_neon_tbx4,                         // llvm.aarch64.neon.tbx4
+    aarch64_neon_uabd,                         // llvm.aarch64.neon.uabd
+    aarch64_neon_uaddlp,                       // llvm.aarch64.neon.uaddlp
+    aarch64_neon_uaddlv,                       // llvm.aarch64.neon.uaddlv
+    aarch64_neon_uaddv,                        // llvm.aarch64.neon.uaddv
+    aarch64_neon_uhadd,                        // llvm.aarch64.neon.uhadd
+    aarch64_neon_uhsub,                        // llvm.aarch64.neon.uhsub
+    aarch64_neon_umax,                         // llvm.aarch64.neon.umax
+    aarch64_neon_umaxp,                        // llvm.aarch64.neon.umaxp
+    aarch64_neon_umaxv,                        // llvm.aarch64.neon.umaxv
+    aarch64_neon_umin,                         // llvm.aarch64.neon.umin
+    aarch64_neon_uminp,                        // llvm.aarch64.neon.uminp
+    aarch64_neon_uminv,                        // llvm.aarch64.neon.uminv
+    aarch64_neon_umull,                        // llvm.aarch64.neon.umull
+    aarch64_neon_uqadd,                        // llvm.aarch64.neon.uqadd
+    aarch64_neon_uqrshl,                       // llvm.aarch64.neon.uqrshl
+    aarch64_neon_uqrshrn,                      // llvm.aarch64.neon.uqrshrn
+    aarch64_neon_uqshl,                        // llvm.aarch64.neon.uqshl
+    aarch64_neon_uqshrn,                       // llvm.aarch64.neon.uqshrn
+    aarch64_neon_uqsub,                        // llvm.aarch64.neon.uqsub
+    aarch64_neon_uqxtn,                        // llvm.aarch64.neon.uqxtn
+    aarch64_neon_urecpe,                       // llvm.aarch64.neon.urecpe
+    aarch64_neon_urhadd,                       // llvm.aarch64.neon.urhadd
+    aarch64_neon_urshl,                        // llvm.aarch64.neon.urshl
+    aarch64_neon_ursqrte,                      // llvm.aarch64.neon.ursqrte
+    aarch64_neon_ushl,                         // llvm.aarch64.neon.ushl
+    aarch64_neon_ushll,                        // llvm.aarch64.neon.ushll
+    aarch64_neon_usqadd,                       // llvm.aarch64.neon.usqadd
+    aarch64_neon_vcopy_lane,                   // llvm.aarch64.neon.vcopy.lane
+    aarch64_neon_vcvtfp2fxs,                   // llvm.aarch64.neon.vcvtfp2fxs
+    aarch64_neon_vcvtfp2fxu,                   // llvm.aarch64.neon.vcvtfp2fxu
+    aarch64_neon_vcvtfp2hf,                    // llvm.aarch64.neon.vcvtfp2hf
+    aarch64_neon_vcvtfxs2fp,                   // llvm.aarch64.neon.vcvtfxs2fp
+    aarch64_neon_vcvtfxu2fp,                   // llvm.aarch64.neon.vcvtfxu2fp
+    aarch64_neon_vcvthf2fp,                    // llvm.aarch64.neon.vcvthf2fp
+    aarch64_neon_vsli,                         // llvm.aarch64.neon.vsli
+    aarch64_neon_vsri,                         // llvm.aarch64.neon.vsri
+    aarch64_sdiv,                              // llvm.aarch64.sdiv
+    aarch64_sisd_fabd,                         // llvm.aarch64.sisd.fabd
+    aarch64_sisd_fcvtxn,                       // llvm.aarch64.sisd.fcvtxn
+    aarch64_stlxp,                             // llvm.aarch64.stlxp
+    aarch64_stlxr,                             // llvm.aarch64.stlxr
+    aarch64_stxp,                              // llvm.aarch64.stxp
+    aarch64_stxr,                              // llvm.aarch64.stxr
+    aarch64_udiv,                              // llvm.aarch64.udiv
+    amdgcn_alignbit,                           // llvm.amdgcn.alignbit
+    amdgcn_alignbyte,                          // llvm.amdgcn.alignbyte
+    amdgcn_atomic_dec,                         // llvm.amdgcn.atomic.dec
+    amdgcn_atomic_inc,                         // llvm.amdgcn.atomic.inc
+    amdgcn_break,                              // llvm.amdgcn.break
+    amdgcn_buffer_atomic_add,                  // llvm.amdgcn.buffer.atomic.add
+    amdgcn_buffer_atomic_and,                  // llvm.amdgcn.buffer.atomic.and
+    amdgcn_buffer_atomic_cmpswap,              // llvm.amdgcn.buffer.atomic.cmpswap
+    amdgcn_buffer_atomic_or,                   // llvm.amdgcn.buffer.atomic.or
+    amdgcn_buffer_atomic_smax,                 // llvm.amdgcn.buffer.atomic.smax
+    amdgcn_buffer_atomic_smin,                 // llvm.amdgcn.buffer.atomic.smin
+    amdgcn_buffer_atomic_sub,                  // llvm.amdgcn.buffer.atomic.sub
+    amdgcn_buffer_atomic_swap,                 // llvm.amdgcn.buffer.atomic.swap
+    amdgcn_buffer_atomic_umax,                 // llvm.amdgcn.buffer.atomic.umax
+    amdgcn_buffer_atomic_umin,                 // llvm.amdgcn.buffer.atomic.umin
+    amdgcn_buffer_atomic_xor,                  // llvm.amdgcn.buffer.atomic.xor
+    amdgcn_buffer_load,                        // llvm.amdgcn.buffer.load
+    amdgcn_buffer_load_format,                 // llvm.amdgcn.buffer.load.format
+    amdgcn_buffer_store,                       // llvm.amdgcn.buffer.store
+    amdgcn_buffer_store_format,                // llvm.amdgcn.buffer.store.format
+    amdgcn_buffer_wbinvl1,                     // llvm.amdgcn.buffer.wbinvl1
+    amdgcn_buffer_wbinvl1_sc,                  // llvm.amdgcn.buffer.wbinvl1.sc
+    amdgcn_buffer_wbinvl1_vol,                 // llvm.amdgcn.buffer.wbinvl1.vol
+    amdgcn_class,                              // llvm.amdgcn.class
+    amdgcn_cos,                                // llvm.amdgcn.cos
+    amdgcn_cubeid,                             // llvm.amdgcn.cubeid
+    amdgcn_cubema,                             // llvm.amdgcn.cubema
+    amdgcn_cubesc,                             // llvm.amdgcn.cubesc
+    amdgcn_cubetc,                             // llvm.amdgcn.cubetc
+    amdgcn_cvt_pk_i16,                         // llvm.amdgcn.cvt.pk.i16
+    amdgcn_cvt_pk_u16,                         // llvm.amdgcn.cvt.pk.u16
+    amdgcn_cvt_pk_u8_f32,                      // llvm.amdgcn.cvt.pk.u8.f32
+    amdgcn_cvt_pknorm_i16,                     // llvm.amdgcn.cvt.pknorm.i16
+    amdgcn_cvt_pknorm_u16,                     // llvm.amdgcn.cvt.pknorm.u16
+    amdgcn_cvt_pkrtz,                          // llvm.amdgcn.cvt.pkrtz
+    amdgcn_dispatch_id,                        // llvm.amdgcn.dispatch.id
+    amdgcn_dispatch_ptr,                       // llvm.amdgcn.dispatch.ptr
+    amdgcn_div_fixup,                          // llvm.amdgcn.div.fixup
+    amdgcn_div_fmas,                           // llvm.amdgcn.div.fmas
+    amdgcn_div_scale,                          // llvm.amdgcn.div.scale
+    amdgcn_ds_bpermute,                        // llvm.amdgcn.ds.bpermute
+    amdgcn_ds_fadd,                            // llvm.amdgcn.ds.fadd
+    amdgcn_ds_fmax,                            // llvm.amdgcn.ds.fmax
+    amdgcn_ds_fmin,                            // llvm.amdgcn.ds.fmin
+    amdgcn_ds_permute,                         // llvm.amdgcn.ds.permute
+    amdgcn_ds_swizzle,                         // llvm.amdgcn.ds.swizzle
+    amdgcn_else,                               // llvm.amdgcn.else
+    amdgcn_else_break,                         // llvm.amdgcn.else.break
+    amdgcn_end_cf,                             // llvm.amdgcn.end.cf
+    amdgcn_exp,                                // llvm.amdgcn.exp
+    amdgcn_exp_compr,                          // llvm.amdgcn.exp.compr
+    amdgcn_fcmp,                               // llvm.amdgcn.fcmp
+    amdgcn_fdiv_fast,                          // llvm.amdgcn.fdiv.fast
+    amdgcn_fmed3,                              // llvm.amdgcn.fmed3
+    amdgcn_fmul_legacy,                        // llvm.amdgcn.fmul.legacy
+    amdgcn_fract,                              // llvm.amdgcn.fract
+    amdgcn_frexp_exp,                          // llvm.amdgcn.frexp.exp
+    amdgcn_frexp_mant,                         // llvm.amdgcn.frexp.mant
+    amdgcn_groupstaticsize,                    // llvm.amdgcn.groupstaticsize
+    amdgcn_icmp,                               // llvm.amdgcn.icmp
+    amdgcn_if,                                 // llvm.amdgcn.if
+    amdgcn_if_break,                           // llvm.amdgcn.if.break
+    amdgcn_image_atomic_add,                   // llvm.amdgcn.image.atomic.add
+    amdgcn_image_atomic_and,                   // llvm.amdgcn.image.atomic.and
+    amdgcn_image_atomic_cmpswap,               // llvm.amdgcn.image.atomic.cmpswap
+    amdgcn_image_atomic_dec,                   // llvm.amdgcn.image.atomic.dec
+    amdgcn_image_atomic_inc,                   // llvm.amdgcn.image.atomic.inc
+    amdgcn_image_atomic_or,                    // llvm.amdgcn.image.atomic.or
+    amdgcn_image_atomic_smax,                  // llvm.amdgcn.image.atomic.smax
+    amdgcn_image_atomic_smin,                  // llvm.amdgcn.image.atomic.smin
+    amdgcn_image_atomic_sub,                   // llvm.amdgcn.image.atomic.sub
+    amdgcn_image_atomic_swap,                  // llvm.amdgcn.image.atomic.swap
+    amdgcn_image_atomic_umax,                  // llvm.amdgcn.image.atomic.umax
+    amdgcn_image_atomic_umin,                  // llvm.amdgcn.image.atomic.umin
+    amdgcn_image_atomic_xor,                   // llvm.amdgcn.image.atomic.xor
+    amdgcn_image_gather4,                      // llvm.amdgcn.image.gather4
+    amdgcn_image_gather4_b,                    // llvm.amdgcn.image.gather4.b
+    amdgcn_image_gather4_b_cl,                 // llvm.amdgcn.image.gather4.b.cl
+    amdgcn_image_gather4_b_cl_o,               // llvm.amdgcn.image.gather4.b.cl.o
+    amdgcn_image_gather4_b_o,                  // llvm.amdgcn.image.gather4.b.o
+    amdgcn_image_gather4_c,                    // llvm.amdgcn.image.gather4.c
+    amdgcn_image_gather4_c_b,                  // llvm.amdgcn.image.gather4.c.b
+    amdgcn_image_gather4_c_b_cl,               // llvm.amdgcn.image.gather4.c.b.cl
+    amdgcn_image_gather4_c_b_cl_o,             // llvm.amdgcn.image.gather4.c.b.cl.o
+    amdgcn_image_gather4_c_b_o,                // llvm.amdgcn.image.gather4.c.b.o
+    amdgcn_image_gather4_c_cl,                 // llvm.amdgcn.image.gather4.c.cl
+    amdgcn_image_gather4_c_cl_o,               // llvm.amdgcn.image.gather4.c.cl.o
+    amdgcn_image_gather4_c_l,                  // llvm.amdgcn.image.gather4.c.l
+    amdgcn_image_gather4_c_l_o,                // llvm.amdgcn.image.gather4.c.l.o
+    amdgcn_image_gather4_c_lz,                 // llvm.amdgcn.image.gather4.c.lz
+    amdgcn_image_gather4_c_lz_o,               // llvm.amdgcn.image.gather4.c.lz.o
+    amdgcn_image_gather4_c_o,                  // llvm.amdgcn.image.gather4.c.o
+    amdgcn_image_gather4_cl,                   // llvm.amdgcn.image.gather4.cl
+    amdgcn_image_gather4_cl_o,                 // llvm.amdgcn.image.gather4.cl.o
+    amdgcn_image_gather4_l,                    // llvm.amdgcn.image.gather4.l
+    amdgcn_image_gather4_l_o,                  // llvm.amdgcn.image.gather4.l.o
+    amdgcn_image_gather4_lz,                   // llvm.amdgcn.image.gather4.lz
+    amdgcn_image_gather4_lz_o,                 // llvm.amdgcn.image.gather4.lz.o
+    amdgcn_image_gather4_o,                    // llvm.amdgcn.image.gather4.o
+    amdgcn_image_getlod,                       // llvm.amdgcn.image.getlod
+    amdgcn_image_getresinfo,                   // llvm.amdgcn.image.getresinfo
+    amdgcn_image_load,                         // llvm.amdgcn.image.load
+    amdgcn_image_load_mip,                     // llvm.amdgcn.image.load.mip
+    amdgcn_image_sample,                       // llvm.amdgcn.image.sample
+    amdgcn_image_sample_b,                     // llvm.amdgcn.image.sample.b
+    amdgcn_image_sample_b_cl,                  // llvm.amdgcn.image.sample.b.cl
+    amdgcn_image_sample_b_cl_o,                // llvm.amdgcn.image.sample.b.cl.o
+    amdgcn_image_sample_b_o,                   // llvm.amdgcn.image.sample.b.o
+    amdgcn_image_sample_c,                     // llvm.amdgcn.image.sample.c
+    amdgcn_image_sample_c_b,                   // llvm.amdgcn.image.sample.c.b
+    amdgcn_image_sample_c_b_cl,                // llvm.amdgcn.image.sample.c.b.cl
+    amdgcn_image_sample_c_b_cl_o,              // llvm.amdgcn.image.sample.c.b.cl.o
+    amdgcn_image_sample_c_b_o,                 // llvm.amdgcn.image.sample.c.b.o
+    amdgcn_image_sample_c_cd,                  // llvm.amdgcn.image.sample.c.cd
+    amdgcn_image_sample_c_cd_cl,               // llvm.amdgcn.image.sample.c.cd.cl
+    amdgcn_image_sample_c_cd_cl_o,             // llvm.amdgcn.image.sample.c.cd.cl.o
+    amdgcn_image_sample_c_cd_o,                // llvm.amdgcn.image.sample.c.cd.o
+    amdgcn_image_sample_c_cl,                  // llvm.amdgcn.image.sample.c.cl
+    amdgcn_image_sample_c_cl_o,                // llvm.amdgcn.image.sample.c.cl.o
+    amdgcn_image_sample_c_d,                   // llvm.amdgcn.image.sample.c.d
+    amdgcn_image_sample_c_d_cl,                // llvm.amdgcn.image.sample.c.d.cl
+    amdgcn_image_sample_c_d_cl_o,              // llvm.amdgcn.image.sample.c.d.cl.o
+    amdgcn_image_sample_c_d_o,                 // llvm.amdgcn.image.sample.c.d.o
+    amdgcn_image_sample_c_l,                   // llvm.amdgcn.image.sample.c.l
+    amdgcn_image_sample_c_l_o,                 // llvm.amdgcn.image.sample.c.l.o
+    amdgcn_image_sample_c_lz,                  // llvm.amdgcn.image.sample.c.lz
+    amdgcn_image_sample_c_lz_o,                // llvm.amdgcn.image.sample.c.lz.o
+    amdgcn_image_sample_c_o,                   // llvm.amdgcn.image.sample.c.o
+    amdgcn_image_sample_cd,                    // llvm.amdgcn.image.sample.cd
+    amdgcn_image_sample_cd_cl,                 // llvm.amdgcn.image.sample.cd.cl
+    amdgcn_image_sample_cd_cl_o,               // llvm.amdgcn.image.sample.cd.cl.o
+    amdgcn_image_sample_cd_o,                  // llvm.amdgcn.image.sample.cd.o
+    amdgcn_image_sample_cl,                    // llvm.amdgcn.image.sample.cl
+    amdgcn_image_sample_cl_o,                  // llvm.amdgcn.image.sample.cl.o
+    amdgcn_image_sample_d,                     // llvm.amdgcn.image.sample.d
+    amdgcn_image_sample_d_cl,                  // llvm.amdgcn.image.sample.d.cl
+    amdgcn_image_sample_d_cl_o,                // llvm.amdgcn.image.sample.d.cl.o
+    amdgcn_image_sample_d_o,                   // llvm.amdgcn.image.sample.d.o
+    amdgcn_image_sample_l,                     // llvm.amdgcn.image.sample.l
+    amdgcn_image_sample_l_o,                   // llvm.amdgcn.image.sample.l.o
+    amdgcn_image_sample_lz,                    // llvm.amdgcn.image.sample.lz
+    amdgcn_image_sample_lz_o,                  // llvm.amdgcn.image.sample.lz.o
+    amdgcn_image_sample_o,                     // llvm.amdgcn.image.sample.o
+    amdgcn_image_store,                        // llvm.amdgcn.image.store
+    amdgcn_image_store_mip,                    // llvm.amdgcn.image.store.mip
+    amdgcn_implicit_buffer_ptr,                // llvm.amdgcn.implicit.buffer.ptr
+    amdgcn_implicitarg_ptr,                    // llvm.amdgcn.implicitarg.ptr
+    amdgcn_init_exec,                          // llvm.amdgcn.init.exec
+    amdgcn_init_exec_from_input,               // llvm.amdgcn.init.exec.from.input
+    amdgcn_interp_mov,                         // llvm.amdgcn.interp.mov
+    amdgcn_interp_p1,                          // llvm.amdgcn.interp.p1
+    amdgcn_interp_p2,                          // llvm.amdgcn.interp.p2
+    amdgcn_kernarg_segment_ptr,                // llvm.amdgcn.kernarg.segment.ptr
+    amdgcn_kill,                               // llvm.amdgcn.kill
+    amdgcn_ldexp,                              // llvm.amdgcn.ldexp
+    amdgcn_lerp,                               // llvm.amdgcn.lerp
+    amdgcn_log_clamp,                          // llvm.amdgcn.log.clamp
+    amdgcn_loop,                               // llvm.amdgcn.loop
+    amdgcn_mbcnt_hi,                           // llvm.amdgcn.mbcnt.hi
+    amdgcn_mbcnt_lo,                           // llvm.amdgcn.mbcnt.lo
+    amdgcn_mov_dpp,                            // llvm.amdgcn.mov.dpp
+    amdgcn_mqsad_pk_u16_u8,                    // llvm.amdgcn.mqsad.pk.u16.u8
+    amdgcn_mqsad_u32_u8,                       // llvm.amdgcn.mqsad.u32.u8
+    amdgcn_msad_u8,                            // llvm.amdgcn.msad.u8
+    amdgcn_ps_live,                            // llvm.amdgcn.ps.live
+    amdgcn_qsad_pk_u16_u8,                     // llvm.amdgcn.qsad.pk.u16.u8
+    amdgcn_queue_ptr,                          // llvm.amdgcn.queue.ptr
+    amdgcn_rcp,                                // llvm.amdgcn.rcp
+    amdgcn_rcp_legacy,                         // llvm.amdgcn.rcp.legacy
+    amdgcn_readfirstlane,                      // llvm.amdgcn.readfirstlane
+    amdgcn_readlane,                           // llvm.amdgcn.readlane
+    amdgcn_rsq,                                // llvm.amdgcn.rsq
+    amdgcn_rsq_clamp,                          // llvm.amdgcn.rsq.clamp
+    amdgcn_rsq_legacy,                         // llvm.amdgcn.rsq.legacy
+    amdgcn_s_barrier,                          // llvm.amdgcn.s.barrier
+    amdgcn_s_dcache_inv,                       // llvm.amdgcn.s.dcache.inv
+    amdgcn_s_dcache_inv_vol,                   // llvm.amdgcn.s.dcache.inv.vol
+    amdgcn_s_dcache_wb,                        // llvm.amdgcn.s.dcache.wb
+    amdgcn_s_dcache_wb_vol,                    // llvm.amdgcn.s.dcache.wb.vol
+    amdgcn_s_decperflevel,                     // llvm.amdgcn.s.decperflevel
+    amdgcn_s_getpc,                            // llvm.amdgcn.s.getpc
+    amdgcn_s_getreg,                           // llvm.amdgcn.s.getreg
+    amdgcn_s_incperflevel,                     // llvm.amdgcn.s.incperflevel
+    amdgcn_s_memrealtime,                      // llvm.amdgcn.s.memrealtime
+    amdgcn_s_memtime,                          // llvm.amdgcn.s.memtime
+    amdgcn_s_sendmsg,                          // llvm.amdgcn.s.sendmsg
+    amdgcn_s_sendmsghalt,                      // llvm.amdgcn.s.sendmsghalt
+    amdgcn_s_sleep,                            // llvm.amdgcn.s.sleep
+    amdgcn_s_waitcnt,                          // llvm.amdgcn.s.waitcnt
+    amdgcn_sad_hi_u8,                          // llvm.amdgcn.sad.hi.u8
+    amdgcn_sad_u16,                            // llvm.amdgcn.sad.u16
+    amdgcn_sad_u8,                             // llvm.amdgcn.sad.u8
+    amdgcn_sbfe,                               // llvm.amdgcn.sbfe
+    amdgcn_set_inactive,                       // llvm.amdgcn.set.inactive
+    amdgcn_sffbh,                              // llvm.amdgcn.sffbh
+    amdgcn_sin,                                // llvm.amdgcn.sin
+    amdgcn_tbuffer_load,                       // llvm.amdgcn.tbuffer.load
+    amdgcn_tbuffer_store,                      // llvm.amdgcn.tbuffer.store
+    amdgcn_trig_preop,                         // llvm.amdgcn.trig.preop
+    amdgcn_ubfe,                               // llvm.amdgcn.ubfe
+    amdgcn_unreachable,                        // llvm.amdgcn.unreachable
+    amdgcn_update_dpp,                         // llvm.amdgcn.update.dpp
+    amdgcn_wave_barrier,                       // llvm.amdgcn.wave.barrier
+    amdgcn_workgroup_id_x,                     // llvm.amdgcn.workgroup.id.x
+    amdgcn_workgroup_id_y,                     // llvm.amdgcn.workgroup.id.y
+    amdgcn_workgroup_id_z,                     // llvm.amdgcn.workgroup.id.z
+    amdgcn_workitem_id_x,                      // llvm.amdgcn.workitem.id.x
+    amdgcn_workitem_id_y,                      // llvm.amdgcn.workitem.id.y
+    amdgcn_workitem_id_z,                      // llvm.amdgcn.workitem.id.z
+    amdgcn_wqm,                                // llvm.amdgcn.wqm
+    amdgcn_wqm_vote,                           // llvm.amdgcn.wqm.vote
+    amdgcn_writelane,                          // llvm.amdgcn.writelane
+    amdgcn_wwm,                                // llvm.amdgcn.wwm
+    arm_cdp,                                   // llvm.arm.cdp
+    arm_cdp2,                                  // llvm.arm.cdp2
+    arm_clrex,                                 // llvm.arm.clrex
+    arm_crc32b,                                // llvm.arm.crc32b
+    arm_crc32cb,                               // llvm.arm.crc32cb
+    arm_crc32ch,                               // llvm.arm.crc32ch
+    arm_crc32cw,                               // llvm.arm.crc32cw
+    arm_crc32h,                                // llvm.arm.crc32h
+    arm_crc32w,                                // llvm.arm.crc32w
+    arm_dbg,                                   // llvm.arm.dbg
+    arm_dmb,                                   // llvm.arm.dmb
+    arm_dsb,                                   // llvm.arm.dsb
+    arm_get_fpscr,                             // llvm.arm.get.fpscr
+    arm_hint,                                  // llvm.arm.hint
+    arm_isb,                                   // llvm.arm.isb
+    arm_ldaex,                                 // llvm.arm.ldaex
+    arm_ldaexd,                                // llvm.arm.ldaexd
+    arm_ldc,                                   // llvm.arm.ldc
+    arm_ldc2,                                  // llvm.arm.ldc2
+    arm_ldc2l,                                 // llvm.arm.ldc2l
+    arm_ldcl,                                  // llvm.arm.ldcl
+    arm_ldrex,                                 // llvm.arm.ldrex
+    arm_ldrexd,                                // llvm.arm.ldrexd
+    arm_mcr,                                   // llvm.arm.mcr
+    arm_mcr2,                                  // llvm.arm.mcr2
+    arm_mcrr,                                  // llvm.arm.mcrr
+    arm_mcrr2,                                 // llvm.arm.mcrr2
+    arm_mrc,                                   // llvm.arm.mrc
+    arm_mrc2,                                  // llvm.arm.mrc2
+    arm_mrrc,                                  // llvm.arm.mrrc
+    arm_mrrc2,                                 // llvm.arm.mrrc2
+    arm_neon_aesd,                             // llvm.arm.neon.aesd
+    arm_neon_aese,                             // llvm.arm.neon.aese
+    arm_neon_aesimc,                           // llvm.arm.neon.aesimc
+    arm_neon_aesmc,                            // llvm.arm.neon.aesmc
+    arm_neon_sha1c,                            // llvm.arm.neon.sha1c
+    arm_neon_sha1h,                            // llvm.arm.neon.sha1h
+    arm_neon_sha1m,                            // llvm.arm.neon.sha1m
+    arm_neon_sha1p,                            // llvm.arm.neon.sha1p
+    arm_neon_sha1su0,                          // llvm.arm.neon.sha1su0
+    arm_neon_sha1su1,                          // llvm.arm.neon.sha1su1
+    arm_neon_sha256h,                          // llvm.arm.neon.sha256h
+    arm_neon_sha256h2,                         // llvm.arm.neon.sha256h2
+    arm_neon_sha256su0,                        // llvm.arm.neon.sha256su0
+    arm_neon_sha256su1,                        // llvm.arm.neon.sha256su1
+    arm_neon_vabds,                            // llvm.arm.neon.vabds
+    arm_neon_vabdu,                            // llvm.arm.neon.vabdu
+    arm_neon_vabs,                             // llvm.arm.neon.vabs
+    arm_neon_vacge,                            // llvm.arm.neon.vacge
+    arm_neon_vacgt,                            // llvm.arm.neon.vacgt
+    arm_neon_vbsl,                             // llvm.arm.neon.vbsl
+    arm_neon_vcls,                             // llvm.arm.neon.vcls
+    arm_neon_vcvtas,                           // llvm.arm.neon.vcvtas
+    arm_neon_vcvtau,                           // llvm.arm.neon.vcvtau
+    arm_neon_vcvtfp2fxs,                       // llvm.arm.neon.vcvtfp2fxs
+    arm_neon_vcvtfp2fxu,                       // llvm.arm.neon.vcvtfp2fxu
+    arm_neon_vcvtfp2hf,                        // llvm.arm.neon.vcvtfp2hf
+    arm_neon_vcvtfxs2fp,                       // llvm.arm.neon.vcvtfxs2fp
+    arm_neon_vcvtfxu2fp,                       // llvm.arm.neon.vcvtfxu2fp
+    arm_neon_vcvthf2fp,                        // llvm.arm.neon.vcvthf2fp
+    arm_neon_vcvtms,                           // llvm.arm.neon.vcvtms
+    arm_neon_vcvtmu,                           // llvm.arm.neon.vcvtmu
+    arm_neon_vcvtns,                           // llvm.arm.neon.vcvtns
+    arm_neon_vcvtnu,                           // llvm.arm.neon.vcvtnu
+    arm_neon_vcvtps,                           // llvm.arm.neon.vcvtps
+    arm_neon_vcvtpu,                           // llvm.arm.neon.vcvtpu
+    arm_neon_vhadds,                           // llvm.arm.neon.vhadds
+    arm_neon_vhaddu,                           // llvm.arm.neon.vhaddu
+    arm_neon_vhsubs,                           // llvm.arm.neon.vhsubs
+    arm_neon_vhsubu,                           // llvm.arm.neon.vhsubu
+    arm_neon_vld1,                             // llvm.arm.neon.vld1
+    arm_neon_vld2,                             // llvm.arm.neon.vld2
+    arm_neon_vld2lane,                         // llvm.arm.neon.vld2lane
+    arm_neon_vld3,                             // llvm.arm.neon.vld3
+    arm_neon_vld3lane,                         // llvm.arm.neon.vld3lane
+    arm_neon_vld4,                             // llvm.arm.neon.vld4
+    arm_neon_vld4lane,                         // llvm.arm.neon.vld4lane
+    arm_neon_vmaxnm,                           // llvm.arm.neon.vmaxnm
+    arm_neon_vmaxs,                            // llvm.arm.neon.vmaxs
+    arm_neon_vmaxu,                            // llvm.arm.neon.vmaxu
+    arm_neon_vminnm,                           // llvm.arm.neon.vminnm
+    arm_neon_vmins,                            // llvm.arm.neon.vmins
+    arm_neon_vminu,                            // llvm.arm.neon.vminu
+    arm_neon_vmullp,                           // llvm.arm.neon.vmullp
+    arm_neon_vmulls,                           // llvm.arm.neon.vmulls
+    arm_neon_vmullu,                           // llvm.arm.neon.vmullu
+    arm_neon_vmulp,                            // llvm.arm.neon.vmulp
+    arm_neon_vpadals,                          // llvm.arm.neon.vpadals
+    arm_neon_vpadalu,                          // llvm.arm.neon.vpadalu
+    arm_neon_vpadd,                            // llvm.arm.neon.vpadd
+    arm_neon_vpaddls,                          // llvm.arm.neon.vpaddls
+    arm_neon_vpaddlu,                          // llvm.arm.neon.vpaddlu
+    arm_neon_vpmaxs,                           // llvm.arm.neon.vpmaxs
+    arm_neon_vpmaxu,                           // llvm.arm.neon.vpmaxu
+    arm_neon_vpmins,                           // llvm.arm.neon.vpmins
+    arm_neon_vpminu,                           // llvm.arm.neon.vpminu
+    arm_neon_vqabs,                            // llvm.arm.neon.vqabs
+    arm_neon_vqadds,                           // llvm.arm.neon.vqadds
+    arm_neon_vqaddu,                           // llvm.arm.neon.vqaddu
+    arm_neon_vqdmulh,                          // llvm.arm.neon.vqdmulh
+    arm_neon_vqdmull,                          // llvm.arm.neon.vqdmull
+    arm_neon_vqmovns,                          // llvm.arm.neon.vqmovns
+    arm_neon_vqmovnsu,                         // llvm.arm.neon.vqmovnsu
+    arm_neon_vqmovnu,                          // llvm.arm.neon.vqmovnu
+    arm_neon_vqneg,                            // llvm.arm.neon.vqneg
+    arm_neon_vqrdmulh,                         // llvm.arm.neon.vqrdmulh
+    arm_neon_vqrshiftns,                       // llvm.arm.neon.vqrshiftns
+    arm_neon_vqrshiftnsu,                      // llvm.arm.neon.vqrshiftnsu
+    arm_neon_vqrshiftnu,                       // llvm.arm.neon.vqrshiftnu
+    arm_neon_vqrshifts,                        // llvm.arm.neon.vqrshifts
+    arm_neon_vqrshiftu,                        // llvm.arm.neon.vqrshiftu
+    arm_neon_vqshiftns,                        // llvm.arm.neon.vqshiftns
+    arm_neon_vqshiftnsu,                       // llvm.arm.neon.vqshiftnsu
+    arm_neon_vqshiftnu,                        // llvm.arm.neon.vqshiftnu
+    arm_neon_vqshifts,                         // llvm.arm.neon.vqshifts
+    arm_neon_vqshiftsu,                        // llvm.arm.neon.vqshiftsu
+    arm_neon_vqshiftu,                         // llvm.arm.neon.vqshiftu
+    arm_neon_vqsubs,                           // llvm.arm.neon.vqsubs
+    arm_neon_vqsubu,                           // llvm.arm.neon.vqsubu
+    arm_neon_vraddhn,                          // llvm.arm.neon.vraddhn
+    arm_neon_vrecpe,                           // llvm.arm.neon.vrecpe
+    arm_neon_vrecps,                           // llvm.arm.neon.vrecps
+    arm_neon_vrhadds,                          // llvm.arm.neon.vrhadds
+    arm_neon_vrhaddu,                          // llvm.arm.neon.vrhaddu
+    arm_neon_vrinta,                           // llvm.arm.neon.vrinta
+    arm_neon_vrintm,                           // llvm.arm.neon.vrintm
+    arm_neon_vrintn,                           // llvm.arm.neon.vrintn
+    arm_neon_vrintp,                           // llvm.arm.neon.vrintp
+    arm_neon_vrintx,                           // llvm.arm.neon.vrintx
+    arm_neon_vrintz,                           // llvm.arm.neon.vrintz
+    arm_neon_vrshiftn,                         // llvm.arm.neon.vrshiftn
+    arm_neon_vrshifts,                         // llvm.arm.neon.vrshifts
+    arm_neon_vrshiftu,                         // llvm.arm.neon.vrshiftu
+    arm_neon_vrsqrte,                          // llvm.arm.neon.vrsqrte
+    arm_neon_vrsqrts,                          // llvm.arm.neon.vrsqrts
+    arm_neon_vrsubhn,                          // llvm.arm.neon.vrsubhn
+    arm_neon_vshiftins,                        // llvm.arm.neon.vshiftins
+    arm_neon_vshifts,                          // llvm.arm.neon.vshifts
+    arm_neon_vshiftu,                          // llvm.arm.neon.vshiftu
+    arm_neon_vst1,                             // llvm.arm.neon.vst1
+    arm_neon_vst2,                             // llvm.arm.neon.vst2
+    arm_neon_vst2lane,                         // llvm.arm.neon.vst2lane
+    arm_neon_vst3,                             // llvm.arm.neon.vst3
+    arm_neon_vst3lane,                         // llvm.arm.neon.vst3lane
+    arm_neon_vst4,                             // llvm.arm.neon.vst4
+    arm_neon_vst4lane,                         // llvm.arm.neon.vst4lane
+    arm_neon_vtbl1,                            // llvm.arm.neon.vtbl1
+    arm_neon_vtbl2,                            // llvm.arm.neon.vtbl2
+    arm_neon_vtbl3,                            // llvm.arm.neon.vtbl3
+    arm_neon_vtbl4,                            // llvm.arm.neon.vtbl4
+    arm_neon_vtbx1,                            // llvm.arm.neon.vtbx1
+    arm_neon_vtbx2,                            // llvm.arm.neon.vtbx2
+    arm_neon_vtbx3,                            // llvm.arm.neon.vtbx3
+    arm_neon_vtbx4,                            // llvm.arm.neon.vtbx4
+    arm_qadd,                                  // llvm.arm.qadd
+    arm_qadd16,                                // llvm.arm.qadd16
+    arm_qadd8,                                 // llvm.arm.qadd8
+    arm_qasx,                                  // llvm.arm.qasx
+    arm_qsax,                                  // llvm.arm.qsax
+    arm_qsub,                                  // llvm.arm.qsub
+    arm_qsub16,                                // llvm.arm.qsub16
+    arm_qsub8,                                 // llvm.arm.qsub8
+    arm_sadd16,                                // llvm.arm.sadd16
+    arm_sadd8,                                 // llvm.arm.sadd8
+    arm_sasx,                                  // llvm.arm.sasx
+    arm_sel,                                   // llvm.arm.sel
+    arm_set_fpscr,                             // llvm.arm.set.fpscr
+    arm_shadd16,                               // llvm.arm.shadd16
+    arm_shadd8,                                // llvm.arm.shadd8
+    arm_shasx,                                 // llvm.arm.shasx
+    arm_shsax,                                 // llvm.arm.shsax
+    arm_shsub16,                               // llvm.arm.shsub16
+    arm_shsub8,                                // llvm.arm.shsub8
+    arm_smlabb,                                // llvm.arm.smlabb
+    arm_smlabt,                                // llvm.arm.smlabt
+    arm_smlad,                                 // llvm.arm.smlad
+    arm_smladx,                                // llvm.arm.smladx
+    arm_smlald,                                // llvm.arm.smlald
+    arm_smlaldx,                               // llvm.arm.smlaldx
+    arm_smlatb,                                // llvm.arm.smlatb
+    arm_smlatt,                                // llvm.arm.smlatt
+    arm_smlawb,                                // llvm.arm.smlawb
+    arm_smlawt,                                // llvm.arm.smlawt
+    arm_smlsd,                                 // llvm.arm.smlsd
+    arm_smlsdx,                                // llvm.arm.smlsdx
+    arm_smlsld,                                // llvm.arm.smlsld
+    arm_smlsldx,                               // llvm.arm.smlsldx
+    arm_smuad,                                 // llvm.arm.smuad
+    arm_smuadx,                                // llvm.arm.smuadx
+    arm_smulbb,                                // llvm.arm.smulbb
+    arm_smulbt,                                // llvm.arm.smulbt
+    arm_smultb,                                // llvm.arm.smultb
+    arm_smultt,                                // llvm.arm.smultt
+    arm_smulwb,                                // llvm.arm.smulwb
+    arm_smulwt,                                // llvm.arm.smulwt
+    arm_smusd,                                 // llvm.arm.smusd
+    arm_smusdx,                                // llvm.arm.smusdx
+    arm_space,                                 // llvm.arm.space
+    arm_ssat,                                  // llvm.arm.ssat
+    arm_ssat16,                                // llvm.arm.ssat16
+    arm_ssax,                                  // llvm.arm.ssax
+    arm_ssub16,                                // llvm.arm.ssub16
+    arm_ssub8,                                 // llvm.arm.ssub8
+    arm_stc,                                   // llvm.arm.stc
+    arm_stc2,                                  // llvm.arm.stc2
+    arm_stc2l,                                 // llvm.arm.stc2l
+    arm_stcl,                                  // llvm.arm.stcl
+    arm_stlex,                                 // llvm.arm.stlex
+    arm_stlexd,                                // llvm.arm.stlexd
+    arm_strex,                                 // llvm.arm.strex
+    arm_strexd,                                // llvm.arm.strexd
+    arm_sxtab16,                               // llvm.arm.sxtab16
+    arm_sxtb16,                                // llvm.arm.sxtb16
+    arm_uadd16,                                // llvm.arm.uadd16
+    arm_uadd8,                                 // llvm.arm.uadd8
+    arm_uasx,                                  // llvm.arm.uasx
+    arm_uhadd16,                               // llvm.arm.uhadd16
+    arm_uhadd8,                                // llvm.arm.uhadd8
+    arm_uhasx,                                 // llvm.arm.uhasx
+    arm_uhsax,                                 // llvm.arm.uhsax
+    arm_uhsub16,                               // llvm.arm.uhsub16
+    arm_uhsub8,                                // llvm.arm.uhsub8
+    arm_undefined,                             // llvm.arm.undefined
+    arm_uqadd16,                               // llvm.arm.uqadd16
+    arm_uqadd8,                                // llvm.arm.uqadd8
+    arm_uqasx,                                 // llvm.arm.uqasx
+    arm_uqsax,                                 // llvm.arm.uqsax
+    arm_uqsub16,                               // llvm.arm.uqsub16
+    arm_uqsub8,                                // llvm.arm.uqsub8
+    arm_usad8,                                 // llvm.arm.usad8
+    arm_usada8,                                // llvm.arm.usada8
+    arm_usat,                                  // llvm.arm.usat
+    arm_usat16,                                // llvm.arm.usat16
+    arm_usax,                                  // llvm.arm.usax
+    arm_usub16,                                // llvm.arm.usub16
+    arm_usub8,                                 // llvm.arm.usub8
+    arm_uxtab16,                               // llvm.arm.uxtab16
+    arm_uxtb16,                                // llvm.arm.uxtb16
+    arm_vcvtr,                                 // llvm.arm.vcvtr
+    arm_vcvtru,                                // llvm.arm.vcvtru
+    bpf_load_byte,                             // llvm.bpf.load.byte
+    bpf_load_half,                             // llvm.bpf.load.half
+    bpf_load_word,                             // llvm.bpf.load.word
+    bpf_pseudo,                                // llvm.bpf.pseudo
+    hexagon_A2_abs,                            // llvm.hexagon.A2.abs
+    hexagon_A2_absp,                           // llvm.hexagon.A2.absp
+    hexagon_A2_abssat,                         // llvm.hexagon.A2.abssat
+    hexagon_A2_add,                            // llvm.hexagon.A2.add
+    hexagon_A2_addh_h16_hh,                    // llvm.hexagon.A2.addh.h16.hh
+    hexagon_A2_addh_h16_hl,                    // llvm.hexagon.A2.addh.h16.hl
+    hexagon_A2_addh_h16_lh,                    // llvm.hexagon.A2.addh.h16.lh
+    hexagon_A2_addh_h16_ll,                    // llvm.hexagon.A2.addh.h16.ll
+    hexagon_A2_addh_h16_sat_hh,                // llvm.hexagon.A2.addh.h16.sat.hh
+    hexagon_A2_addh_h16_sat_hl,                // llvm.hexagon.A2.addh.h16.sat.hl
+    hexagon_A2_addh_h16_sat_lh,                // llvm.hexagon.A2.addh.h16.sat.lh
+    hexagon_A2_addh_h16_sat_ll,                // llvm.hexagon.A2.addh.h16.sat.ll
+    hexagon_A2_addh_l16_hl,                    // llvm.hexagon.A2.addh.l16.hl
+    hexagon_A2_addh_l16_ll,                    // llvm.hexagon.A2.addh.l16.ll
+    hexagon_A2_addh_l16_sat_hl,                // llvm.hexagon.A2.addh.l16.sat.hl
+    hexagon_A2_addh_l16_sat_ll,                // llvm.hexagon.A2.addh.l16.sat.ll
+    hexagon_A2_addi,                           // llvm.hexagon.A2.addi
+    hexagon_A2_addp,                           // llvm.hexagon.A2.addp
+    hexagon_A2_addpsat,                        // llvm.hexagon.A2.addpsat
+    hexagon_A2_addsat,                         // llvm.hexagon.A2.addsat
+    hexagon_A2_addsp,                          // llvm.hexagon.A2.addsp
+    hexagon_A2_and,                            // llvm.hexagon.A2.and
+    hexagon_A2_andir,                          // llvm.hexagon.A2.andir
+    hexagon_A2_andp,                           // llvm.hexagon.A2.andp
+    hexagon_A2_aslh,                           // llvm.hexagon.A2.aslh
+    hexagon_A2_asrh,                           // llvm.hexagon.A2.asrh
+    hexagon_A2_combine_hh,                     // llvm.hexagon.A2.combine.hh
+    hexagon_A2_combine_hl,                     // llvm.hexagon.A2.combine.hl
+    hexagon_A2_combine_lh,                     // llvm.hexagon.A2.combine.lh
+    hexagon_A2_combine_ll,                     // llvm.hexagon.A2.combine.ll
+    hexagon_A2_combineii,                      // llvm.hexagon.A2.combineii
+    hexagon_A2_combinew,                       // llvm.hexagon.A2.combinew
+    hexagon_A2_max,                            // llvm.hexagon.A2.max
+    hexagon_A2_maxp,                           // llvm.hexagon.A2.maxp
+    hexagon_A2_maxu,                           // llvm.hexagon.A2.maxu
+    hexagon_A2_maxup,                          // llvm.hexagon.A2.maxup
+    hexagon_A2_min,                            // llvm.hexagon.A2.min
+    hexagon_A2_minp,                           // llvm.hexagon.A2.minp
+    hexagon_A2_minu,                           // llvm.hexagon.A2.minu
+    hexagon_A2_minup,                          // llvm.hexagon.A2.minup
+    hexagon_A2_neg,                            // llvm.hexagon.A2.neg
+    hexagon_A2_negp,                           // llvm.hexagon.A2.negp
+    hexagon_A2_negsat,                         // llvm.hexagon.A2.negsat
+    hexagon_A2_not,                            // llvm.hexagon.A2.not
+    hexagon_A2_notp,                           // llvm.hexagon.A2.notp
+    hexagon_A2_or,                             // llvm.hexagon.A2.or
+    hexagon_A2_orir,                           // llvm.hexagon.A2.orir
+    hexagon_A2_orp,                            // llvm.hexagon.A2.orp
+    hexagon_A2_roundsat,                       // llvm.hexagon.A2.roundsat
+    hexagon_A2_sat,                            // llvm.hexagon.A2.sat
+    hexagon_A2_satb,                           // llvm.hexagon.A2.satb
+    hexagon_A2_sath,                           // llvm.hexagon.A2.sath
+    hexagon_A2_satub,                          // llvm.hexagon.A2.satub
+    hexagon_A2_satuh,                          // llvm.hexagon.A2.satuh
+    hexagon_A2_sub,                            // llvm.hexagon.A2.sub
+    hexagon_A2_subh_h16_hh,                    // llvm.hexagon.A2.subh.h16.hh
+    hexagon_A2_subh_h16_hl,                    // llvm.hexagon.A2.subh.h16.hl
+    hexagon_A2_subh_h16_lh,                    // llvm.hexagon.A2.subh.h16.lh
+    hexagon_A2_subh_h16_ll,                    // llvm.hexagon.A2.subh.h16.ll
+    hexagon_A2_subh_h16_sat_hh,                // llvm.hexagon.A2.subh.h16.sat.hh
+    hexagon_A2_subh_h16_sat_hl,                // llvm.hexagon.A2.subh.h16.sat.hl
+    hexagon_A2_subh_h16_sat_lh,                // llvm.hexagon.A2.subh.h16.sat.lh
+    hexagon_A2_subh_h16_sat_ll,                // llvm.hexagon.A2.subh.h16.sat.ll
+    hexagon_A2_subh_l16_hl,                    // llvm.hexagon.A2.subh.l16.hl
+    hexagon_A2_subh_l16_ll,                    // llvm.hexagon.A2.subh.l16.ll
+    hexagon_A2_subh_l16_sat_hl,                // llvm.hexagon.A2.subh.l16.sat.hl
+    hexagon_A2_subh_l16_sat_ll,                // llvm.hexagon.A2.subh.l16.sat.ll
+    hexagon_A2_subp,                           // llvm.hexagon.A2.subp
+    hexagon_A2_subri,                          // llvm.hexagon.A2.subri
+    hexagon_A2_subsat,                         // llvm.hexagon.A2.subsat
+    hexagon_A2_svaddh,                         // llvm.hexagon.A2.svaddh
+    hexagon_A2_svaddhs,                        // llvm.hexagon.A2.svaddhs
+    hexagon_A2_svadduhs,                       // llvm.hexagon.A2.svadduhs
+    hexagon_A2_svavgh,                         // llvm.hexagon.A2.svavgh
+    hexagon_A2_svavghs,                        // llvm.hexagon.A2.svavghs
+    hexagon_A2_svnavgh,                        // llvm.hexagon.A2.svnavgh
+    hexagon_A2_svsubh,                         // llvm.hexagon.A2.svsubh
+    hexagon_A2_svsubhs,                        // llvm.hexagon.A2.svsubhs
+    hexagon_A2_svsubuhs,                       // llvm.hexagon.A2.svsubuhs
+    hexagon_A2_swiz,                           // llvm.hexagon.A2.swiz
+    hexagon_A2_sxtb,                           // llvm.hexagon.A2.sxtb
+    hexagon_A2_sxth,                           // llvm.hexagon.A2.sxth
+    hexagon_A2_sxtw,                           // llvm.hexagon.A2.sxtw
+    hexagon_A2_tfr,                            // llvm.hexagon.A2.tfr
+    hexagon_A2_tfrih,                          // llvm.hexagon.A2.tfrih
+    hexagon_A2_tfril,                          // llvm.hexagon.A2.tfril
+    hexagon_A2_tfrp,                           // llvm.hexagon.A2.tfrp
+    hexagon_A2_tfrpi,                          // llvm.hexagon.A2.tfrpi
+    hexagon_A2_tfrsi,                          // llvm.hexagon.A2.tfrsi
+    hexagon_A2_vabsh,                          // llvm.hexagon.A2.vabsh
+    hexagon_A2_vabshsat,                       // llvm.hexagon.A2.vabshsat
+    hexagon_A2_vabsw,                          // llvm.hexagon.A2.vabsw
+    hexagon_A2_vabswsat,                       // llvm.hexagon.A2.vabswsat
+    hexagon_A2_vaddb_map,                      // llvm.hexagon.A2.vaddb.map
+    hexagon_A2_vaddh,                          // llvm.hexagon.A2.vaddh
+    hexagon_A2_vaddhs,                         // llvm.hexagon.A2.vaddhs
+    hexagon_A2_vaddub,                         // llvm.hexagon.A2.vaddub
+    hexagon_A2_vaddubs,                        // llvm.hexagon.A2.vaddubs
+    hexagon_A2_vadduhs,                        // llvm.hexagon.A2.vadduhs
+    hexagon_A2_vaddw,                          // llvm.hexagon.A2.vaddw
+    hexagon_A2_vaddws,                         // llvm.hexagon.A2.vaddws
+    hexagon_A2_vavgh,                          // llvm.hexagon.A2.vavgh
+    hexagon_A2_vavghcr,                        // llvm.hexagon.A2.vavghcr
+    hexagon_A2_vavghr,                         // llvm.hexagon.A2.vavghr
+    hexagon_A2_vavgub,                         // llvm.hexagon.A2.vavgub
+    hexagon_A2_vavgubr,                        // llvm.hexagon.A2.vavgubr
+    hexagon_A2_vavguh,                         // llvm.hexagon.A2.vavguh
+    hexagon_A2_vavguhr,                        // llvm.hexagon.A2.vavguhr
+    hexagon_A2_vavguw,                         // llvm.hexagon.A2.vavguw
+    hexagon_A2_vavguwr,                        // llvm.hexagon.A2.vavguwr
+    hexagon_A2_vavgw,                          // llvm.hexagon.A2.vavgw
+    hexagon_A2_vavgwcr,                        // llvm.hexagon.A2.vavgwcr
+    hexagon_A2_vavgwr,                         // llvm.hexagon.A2.vavgwr
+    hexagon_A2_vcmpbeq,                        // llvm.hexagon.A2.vcmpbeq
+    hexagon_A2_vcmpbgtu,                       // llvm.hexagon.A2.vcmpbgtu
+    hexagon_A2_vcmpheq,                        // llvm.hexagon.A2.vcmpheq
+    hexagon_A2_vcmphgt,                        // llvm.hexagon.A2.vcmphgt
+    hexagon_A2_vcmphgtu,                       // llvm.hexagon.A2.vcmphgtu
+    hexagon_A2_vcmpweq,                        // llvm.hexagon.A2.vcmpweq
+    hexagon_A2_vcmpwgt,                        // llvm.hexagon.A2.vcmpwgt
+    hexagon_A2_vcmpwgtu,                       // llvm.hexagon.A2.vcmpwgtu
+    hexagon_A2_vconj,                          // llvm.hexagon.A2.vconj
+    hexagon_A2_vmaxb,                          // llvm.hexagon.A2.vmaxb
+    hexagon_A2_vmaxh,                          // llvm.hexagon.A2.vmaxh
+    hexagon_A2_vmaxub,                         // llvm.hexagon.A2.vmaxub
+    hexagon_A2_vmaxuh,                         // llvm.hexagon.A2.vmaxuh
+    hexagon_A2_vmaxuw,                         // llvm.hexagon.A2.vmaxuw
+    hexagon_A2_vmaxw,                          // llvm.hexagon.A2.vmaxw
+    hexagon_A2_vminb,                          // llvm.hexagon.A2.vminb
+    hexagon_A2_vminh,                          // llvm.hexagon.A2.vminh
+    hexagon_A2_vminub,                         // llvm.hexagon.A2.vminub
+    hexagon_A2_vminuh,                         // llvm.hexagon.A2.vminuh
+    hexagon_A2_vminuw,                         // llvm.hexagon.A2.vminuw
+    hexagon_A2_vminw,                          // llvm.hexagon.A2.vminw
+    hexagon_A2_vnavgh,                         // llvm.hexagon.A2.vnavgh
+    hexagon_A2_vnavghcr,                       // llvm.hexagon.A2.vnavghcr
+    hexagon_A2_vnavghr,                        // llvm.hexagon.A2.vnavghr
+    hexagon_A2_vnavgw,                         // llvm.hexagon.A2.vnavgw
+    hexagon_A2_vnavgwcr,                       // llvm.hexagon.A2.vnavgwcr
+    hexagon_A2_vnavgwr,                        // llvm.hexagon.A2.vnavgwr
+    hexagon_A2_vraddub,                        // llvm.hexagon.A2.vraddub
+    hexagon_A2_vraddub_acc,                    // llvm.hexagon.A2.vraddub.acc
+    hexagon_A2_vrsadub,                        // llvm.hexagon.A2.vrsadub
+    hexagon_A2_vrsadub_acc,                    // llvm.hexagon.A2.vrsadub.acc
+    hexagon_A2_vsubb_map,                      // llvm.hexagon.A2.vsubb.map
+    hexagon_A2_vsubh,                          // llvm.hexagon.A2.vsubh
+    hexagon_A2_vsubhs,                         // llvm.hexagon.A2.vsubhs
+    hexagon_A2_vsubub,                         // llvm.hexagon.A2.vsubub
+    hexagon_A2_vsububs,                        // llvm.hexagon.A2.vsububs
+    hexagon_A2_vsubuhs,                        // llvm.hexagon.A2.vsubuhs
+    hexagon_A2_vsubw,                          // llvm.hexagon.A2.vsubw
+    hexagon_A2_vsubws,                         // llvm.hexagon.A2.vsubws
+    hexagon_A2_xor,                            // llvm.hexagon.A2.xor
+    hexagon_A2_xorp,                           // llvm.hexagon.A2.xorp
+    hexagon_A2_zxtb,                           // llvm.hexagon.A2.zxtb
+    hexagon_A2_zxth,                           // llvm.hexagon.A2.zxth
+    hexagon_A4_andn,                           // llvm.hexagon.A4.andn
+    hexagon_A4_andnp,                          // llvm.hexagon.A4.andnp
+    hexagon_A4_bitsplit,                       // llvm.hexagon.A4.bitsplit
+    hexagon_A4_bitspliti,                      // llvm.hexagon.A4.bitspliti
+    hexagon_A4_boundscheck,                    // llvm.hexagon.A4.boundscheck
+    hexagon_A4_cmpbeq,                         // llvm.hexagon.A4.cmpbeq
+    hexagon_A4_cmpbeqi,                        // llvm.hexagon.A4.cmpbeqi
+    hexagon_A4_cmpbgt,                         // llvm.hexagon.A4.cmpbgt
+    hexagon_A4_cmpbgti,                        // llvm.hexagon.A4.cmpbgti
+    hexagon_A4_cmpbgtu,                        // llvm.hexagon.A4.cmpbgtu
+    hexagon_A4_cmpbgtui,                       // llvm.hexagon.A4.cmpbgtui
+    hexagon_A4_cmpheq,                         // llvm.hexagon.A4.cmpheq
+    hexagon_A4_cmpheqi,                        // llvm.hexagon.A4.cmpheqi
+    hexagon_A4_cmphgt,                         // llvm.hexagon.A4.cmphgt
+    hexagon_A4_cmphgti,                        // llvm.hexagon.A4.cmphgti
+    hexagon_A4_cmphgtu,                        // llvm.hexagon.A4.cmphgtu
+    hexagon_A4_cmphgtui,                       // llvm.hexagon.A4.cmphgtui
+    hexagon_A4_combineir,                      // llvm.hexagon.A4.combineir
+    hexagon_A4_combineri,                      // llvm.hexagon.A4.combineri
+    hexagon_A4_cround_ri,                      // llvm.hexagon.A4.cround.ri
+    hexagon_A4_cround_rr,                      // llvm.hexagon.A4.cround.rr
+    hexagon_A4_modwrapu,                       // llvm.hexagon.A4.modwrapu
+    hexagon_A4_orn,                            // llvm.hexagon.A4.orn
+    hexagon_A4_ornp,                           // llvm.hexagon.A4.ornp
+    hexagon_A4_rcmpeq,                         // llvm.hexagon.A4.rcmpeq
+    hexagon_A4_rcmpeqi,                        // llvm.hexagon.A4.rcmpeqi
+    hexagon_A4_rcmpneq,                        // llvm.hexagon.A4.rcmpneq
+    hexagon_A4_rcmpneqi,                       // llvm.hexagon.A4.rcmpneqi
+    hexagon_A4_round_ri,                       // llvm.hexagon.A4.round.ri
+    hexagon_A4_round_ri_sat,                   // llvm.hexagon.A4.round.ri.sat
+    hexagon_A4_round_rr,                       // llvm.hexagon.A4.round.rr
+    hexagon_A4_round_rr_sat,                   // llvm.hexagon.A4.round.rr.sat
+    hexagon_A4_tlbmatch,                       // llvm.hexagon.A4.tlbmatch
+    hexagon_A4_vcmpbeq_any,                    // llvm.hexagon.A4.vcmpbeq.any
+    hexagon_A4_vcmpbeqi,                       // llvm.hexagon.A4.vcmpbeqi
+    hexagon_A4_vcmpbgt,                        // llvm.hexagon.A4.vcmpbgt
+    hexagon_A4_vcmpbgti,                       // llvm.hexagon.A4.vcmpbgti
+    hexagon_A4_vcmpbgtui,                      // llvm.hexagon.A4.vcmpbgtui
+    hexagon_A4_vcmpheqi,                       // llvm.hexagon.A4.vcmpheqi
+    hexagon_A4_vcmphgti,                       // llvm.hexagon.A4.vcmphgti
+    hexagon_A4_vcmphgtui,                      // llvm.hexagon.A4.vcmphgtui
+    hexagon_A4_vcmpweqi,                       // llvm.hexagon.A4.vcmpweqi
+    hexagon_A4_vcmpwgti,                       // llvm.hexagon.A4.vcmpwgti
+    hexagon_A4_vcmpwgtui,                      // llvm.hexagon.A4.vcmpwgtui
+    hexagon_A4_vrmaxh,                         // llvm.hexagon.A4.vrmaxh
+    hexagon_A4_vrmaxuh,                        // llvm.hexagon.A4.vrmaxuh
+    hexagon_A4_vrmaxuw,                        // llvm.hexagon.A4.vrmaxuw
+    hexagon_A4_vrmaxw,                         // llvm.hexagon.A4.vrmaxw
+    hexagon_A4_vrminh,                         // llvm.hexagon.A4.vrminh
+    hexagon_A4_vrminuh,                        // llvm.hexagon.A4.vrminuh
+    hexagon_A4_vrminuw,                        // llvm.hexagon.A4.vrminuw
+    hexagon_A4_vrminw,                         // llvm.hexagon.A4.vrminw
+    hexagon_A5_vaddhubs,                       // llvm.hexagon.A5.vaddhubs
+    hexagon_A6_vcmpbeq_notany,                 // llvm.hexagon.A6.vcmpbeq.notany
+    hexagon_A6_vcmpbeq_notany_128B,            // llvm.hexagon.A6.vcmpbeq.notany.128B
+    hexagon_C2_all8,                           // llvm.hexagon.C2.all8
+    hexagon_C2_and,                            // llvm.hexagon.C2.and
+    hexagon_C2_andn,                           // llvm.hexagon.C2.andn
+    hexagon_C2_any8,                           // llvm.hexagon.C2.any8
+    hexagon_C2_bitsclr,                        // llvm.hexagon.C2.bitsclr
+    hexagon_C2_bitsclri,                       // llvm.hexagon.C2.bitsclri
+    hexagon_C2_bitsset,                        // llvm.hexagon.C2.bitsset
+    hexagon_C2_cmpeq,                          // llvm.hexagon.C2.cmpeq
+    hexagon_C2_cmpeqi,                         // llvm.hexagon.C2.cmpeqi
+    hexagon_C2_cmpeqp,                         // llvm.hexagon.C2.cmpeqp
+    hexagon_C2_cmpgei,                         // llvm.hexagon.C2.cmpgei
+    hexagon_C2_cmpgeui,                        // llvm.hexagon.C2.cmpgeui
+    hexagon_C2_cmpgt,                          // llvm.hexagon.C2.cmpgt
+    hexagon_C2_cmpgti,                         // llvm.hexagon.C2.cmpgti
+    hexagon_C2_cmpgtp,                         // llvm.hexagon.C2.cmpgtp
+    hexagon_C2_cmpgtu,                         // llvm.hexagon.C2.cmpgtu
+    hexagon_C2_cmpgtui,                        // llvm.hexagon.C2.cmpgtui
+    hexagon_C2_cmpgtup,                        // llvm.hexagon.C2.cmpgtup
+    hexagon_C2_cmplt,                          // llvm.hexagon.C2.cmplt
+    hexagon_C2_cmpltu,                         // llvm.hexagon.C2.cmpltu
+    hexagon_C2_mask,                           // llvm.hexagon.C2.mask
+    hexagon_C2_mux,                            // llvm.hexagon.C2.mux
+    hexagon_C2_muxii,                          // llvm.hexagon.C2.muxii
+    hexagon_C2_muxir,                          // llvm.hexagon.C2.muxir
+    hexagon_C2_muxri,                          // llvm.hexagon.C2.muxri
+    hexagon_C2_not,                            // llvm.hexagon.C2.not
+    hexagon_C2_or,                             // llvm.hexagon.C2.or
+    hexagon_C2_orn,                            // llvm.hexagon.C2.orn
+    hexagon_C2_pxfer_map,                      // llvm.hexagon.C2.pxfer.map
+    hexagon_C2_tfrpr,                          // llvm.hexagon.C2.tfrpr
+    hexagon_C2_tfrrp,                          // llvm.hexagon.C2.tfrrp
+    hexagon_C2_vitpack,                        // llvm.hexagon.C2.vitpack
+    hexagon_C2_vmux,                           // llvm.hexagon.C2.vmux
+    hexagon_C2_xor,                            // llvm.hexagon.C2.xor
+    hexagon_C4_and_and,                        // llvm.hexagon.C4.and.and
+    hexagon_C4_and_andn,                       // llvm.hexagon.C4.and.andn
+    hexagon_C4_and_or,                         // llvm.hexagon.C4.and.or
+    hexagon_C4_and_orn,                        // llvm.hexagon.C4.and.orn
+    hexagon_C4_cmplte,                         // llvm.hexagon.C4.cmplte
+    hexagon_C4_cmpltei,                        // llvm.hexagon.C4.cmpltei
+    hexagon_C4_cmplteu,                        // llvm.hexagon.C4.cmplteu
+    hexagon_C4_cmplteui,                       // llvm.hexagon.C4.cmplteui
+    hexagon_C4_cmpneq,                         // llvm.hexagon.C4.cmpneq
+    hexagon_C4_cmpneqi,                        // llvm.hexagon.C4.cmpneqi
+    hexagon_C4_fastcorner9,                    // llvm.hexagon.C4.fastcorner9
+    hexagon_C4_fastcorner9_not,                // llvm.hexagon.C4.fastcorner9.not
+    hexagon_C4_nbitsclr,                       // llvm.hexagon.C4.nbitsclr
+    hexagon_C4_nbitsclri,                      // llvm.hexagon.C4.nbitsclri
+    hexagon_C4_nbitsset,                       // llvm.hexagon.C4.nbitsset
+    hexagon_C4_or_and,                         // llvm.hexagon.C4.or.and
+    hexagon_C4_or_andn,                        // llvm.hexagon.C4.or.andn
+    hexagon_C4_or_or,                          // llvm.hexagon.C4.or.or
+    hexagon_C4_or_orn,                         // llvm.hexagon.C4.or.orn
+    hexagon_F2_conv_d2df,                      // llvm.hexagon.F2.conv.d2df
+    hexagon_F2_conv_d2sf,                      // llvm.hexagon.F2.conv.d2sf
+    hexagon_F2_conv_df2d,                      // llvm.hexagon.F2.conv.df2d
+    hexagon_F2_conv_df2d_chop,                 // llvm.hexagon.F2.conv.df2d.chop
+    hexagon_F2_conv_df2sf,                     // llvm.hexagon.F2.conv.df2sf
+    hexagon_F2_conv_df2ud,                     // llvm.hexagon.F2.conv.df2ud
+    hexagon_F2_conv_df2ud_chop,                // llvm.hexagon.F2.conv.df2ud.chop
+    hexagon_F2_conv_df2uw,                     // llvm.hexagon.F2.conv.df2uw
+    hexagon_F2_conv_df2uw_chop,                // llvm.hexagon.F2.conv.df2uw.chop
+    hexagon_F2_conv_df2w,                      // llvm.hexagon.F2.conv.df2w
+    hexagon_F2_conv_df2w_chop,                 // llvm.hexagon.F2.conv.df2w.chop
+    hexagon_F2_conv_sf2d,                      // llvm.hexagon.F2.conv.sf2d
+    hexagon_F2_conv_sf2d_chop,                 // llvm.hexagon.F2.conv.sf2d.chop
+    hexagon_F2_conv_sf2df,                     // llvm.hexagon.F2.conv.sf2df
+    hexagon_F2_conv_sf2ud,                     // llvm.hexagon.F2.conv.sf2ud
+    hexagon_F2_conv_sf2ud_chop,                // llvm.hexagon.F2.conv.sf2ud.chop
+    hexagon_F2_conv_sf2uw,                     // llvm.hexagon.F2.conv.sf2uw
+    hexagon_F2_conv_sf2uw_chop,                // llvm.hexagon.F2.conv.sf2uw.chop
+    hexagon_F2_conv_sf2w,                      // llvm.hexagon.F2.conv.sf2w
+    hexagon_F2_conv_sf2w_chop,                 // llvm.hexagon.F2.conv.sf2w.chop
+    hexagon_F2_conv_ud2df,                     // llvm.hexagon.F2.conv.ud2df
+    hexagon_F2_conv_ud2sf,                     // llvm.hexagon.F2.conv.ud2sf
+    hexagon_F2_conv_uw2df,                     // llvm.hexagon.F2.conv.uw2df
+    hexagon_F2_conv_uw2sf,                     // llvm.hexagon.F2.conv.uw2sf
+    hexagon_F2_conv_w2df,                      // llvm.hexagon.F2.conv.w2df
+    hexagon_F2_conv_w2sf,                      // llvm.hexagon.F2.conv.w2sf
+    hexagon_F2_dfclass,                        // llvm.hexagon.F2.dfclass
+    hexagon_F2_dfcmpeq,                        // llvm.hexagon.F2.dfcmpeq
+    hexagon_F2_dfcmpge,                        // llvm.hexagon.F2.dfcmpge
+    hexagon_F2_dfcmpgt,                        // llvm.hexagon.F2.dfcmpgt
+    hexagon_F2_dfcmpuo,                        // llvm.hexagon.F2.dfcmpuo
+    hexagon_F2_dfimm_n,                        // llvm.hexagon.F2.dfimm.n
+    hexagon_F2_dfimm_p,                        // llvm.hexagon.F2.dfimm.p
+    hexagon_F2_sfadd,                          // llvm.hexagon.F2.sfadd
+    hexagon_F2_sfclass,                        // llvm.hexagon.F2.sfclass
+    hexagon_F2_sfcmpeq,                        // llvm.hexagon.F2.sfcmpeq
+    hexagon_F2_sfcmpge,                        // llvm.hexagon.F2.sfcmpge
+    hexagon_F2_sfcmpgt,                        // llvm.hexagon.F2.sfcmpgt
+    hexagon_F2_sfcmpuo,                        // llvm.hexagon.F2.sfcmpuo
+    hexagon_F2_sffixupd,                       // llvm.hexagon.F2.sffixupd
+    hexagon_F2_sffixupn,                       // llvm.hexagon.F2.sffixupn
+    hexagon_F2_sffixupr,                       // llvm.hexagon.F2.sffixupr
+    hexagon_F2_sffma,                          // llvm.hexagon.F2.sffma
+    hexagon_F2_sffma_lib,                      // llvm.hexagon.F2.sffma.lib
+    hexagon_F2_sffma_sc,                       // llvm.hexagon.F2.sffma.sc
+    hexagon_F2_sffms,                          // llvm.hexagon.F2.sffms
+    hexagon_F2_sffms_lib,                      // llvm.hexagon.F2.sffms.lib
+    hexagon_F2_sfimm_n,                        // llvm.hexagon.F2.sfimm.n
+    hexagon_F2_sfimm_p,                        // llvm.hexagon.F2.sfimm.p
+    hexagon_F2_sfmax,                          // llvm.hexagon.F2.sfmax
+    hexagon_F2_sfmin,                          // llvm.hexagon.F2.sfmin
+    hexagon_F2_sfmpy,                          // llvm.hexagon.F2.sfmpy
+    hexagon_F2_sfsub,                          // llvm.hexagon.F2.sfsub
+    hexagon_L2_loadrb_pbr,                     // llvm.hexagon.L2.loadrb.pbr
+    hexagon_L2_loadrb_pci,                     // llvm.hexagon.L2.loadrb.pci
+    hexagon_L2_loadrb_pcr,                     // llvm.hexagon.L2.loadrb.pcr
+    hexagon_L2_loadrd_pbr,                     // llvm.hexagon.L2.loadrd.pbr
+    hexagon_L2_loadrd_pci,                     // llvm.hexagon.L2.loadrd.pci
+    hexagon_L2_loadrd_pcr,                     // llvm.hexagon.L2.loadrd.pcr
+    hexagon_L2_loadrh_pbr,                     // llvm.hexagon.L2.loadrh.pbr
+    hexagon_L2_loadrh_pci,                     // llvm.hexagon.L2.loadrh.pci
+    hexagon_L2_loadrh_pcr,                     // llvm.hexagon.L2.loadrh.pcr
+    hexagon_L2_loadri_pbr,                     // llvm.hexagon.L2.loadri.pbr
+    hexagon_L2_loadri_pci,                     // llvm.hexagon.L2.loadri.pci
+    hexagon_L2_loadri_pcr,                     // llvm.hexagon.L2.loadri.pcr
+    hexagon_L2_loadrub_pbr,                    // llvm.hexagon.L2.loadrub.pbr
+    hexagon_L2_loadrub_pci,                    // llvm.hexagon.L2.loadrub.pci
+    hexagon_L2_loadrub_pcr,                    // llvm.hexagon.L2.loadrub.pcr
+    hexagon_L2_loadruh_pbr,                    // llvm.hexagon.L2.loadruh.pbr
+    hexagon_L2_loadruh_pci,                    // llvm.hexagon.L2.loadruh.pci
+    hexagon_L2_loadruh_pcr,                    // llvm.hexagon.L2.loadruh.pcr
+    hexagon_L2_loadw_locked,                   // llvm.hexagon.L2.loadw.locked
+    hexagon_L4_loadd_locked,                   // llvm.hexagon.L4.loadd.locked
+    hexagon_M2_acci,                           // llvm.hexagon.M2.acci
+    hexagon_M2_accii,                          // llvm.hexagon.M2.accii
+    hexagon_M2_cmaci_s0,                       // llvm.hexagon.M2.cmaci.s0
+    hexagon_M2_cmacr_s0,                       // llvm.hexagon.M2.cmacr.s0
+    hexagon_M2_cmacs_s0,                       // llvm.hexagon.M2.cmacs.s0
+    hexagon_M2_cmacs_s1,                       // llvm.hexagon.M2.cmacs.s1
+    hexagon_M2_cmacsc_s0,                      // llvm.hexagon.M2.cmacsc.s0
+    hexagon_M2_cmacsc_s1,                      // llvm.hexagon.M2.cmacsc.s1
+    hexagon_M2_cmpyi_s0,                       // llvm.hexagon.M2.cmpyi.s0
+    hexagon_M2_cmpyr_s0,                       // llvm.hexagon.M2.cmpyr.s0
+    hexagon_M2_cmpyrs_s0,                      // llvm.hexagon.M2.cmpyrs.s0
+    hexagon_M2_cmpyrs_s1,                      // llvm.hexagon.M2.cmpyrs.s1
+    hexagon_M2_cmpyrsc_s0,                     // llvm.hexagon.M2.cmpyrsc.s0
+    hexagon_M2_cmpyrsc_s1,                     // llvm.hexagon.M2.cmpyrsc.s1
+    hexagon_M2_cmpys_s0,                       // llvm.hexagon.M2.cmpys.s0
+    hexagon_M2_cmpys_s1,                       // llvm.hexagon.M2.cmpys.s1
+    hexagon_M2_cmpysc_s0,                      // llvm.hexagon.M2.cmpysc.s0
+    hexagon_M2_cmpysc_s1,                      // llvm.hexagon.M2.cmpysc.s1
+    hexagon_M2_cnacs_s0,                       // llvm.hexagon.M2.cnacs.s0
+    hexagon_M2_cnacs_s1,                       // llvm.hexagon.M2.cnacs.s1
+    hexagon_M2_cnacsc_s0,                      // llvm.hexagon.M2.cnacsc.s0
+    hexagon_M2_cnacsc_s1,                      // llvm.hexagon.M2.cnacsc.s1
+    hexagon_M2_dpmpyss_acc_s0,                 // llvm.hexagon.M2.dpmpyss.acc.s0
+    hexagon_M2_dpmpyss_nac_s0,                 // llvm.hexagon.M2.dpmpyss.nac.s0
+    hexagon_M2_dpmpyss_rnd_s0,                 // llvm.hexagon.M2.dpmpyss.rnd.s0
+    hexagon_M2_dpmpyss_s0,                     // llvm.hexagon.M2.dpmpyss.s0
+    hexagon_M2_dpmpyuu_acc_s0,                 // llvm.hexagon.M2.dpmpyuu.acc.s0
+    hexagon_M2_dpmpyuu_nac_s0,                 // llvm.hexagon.M2.dpmpyuu.nac.s0
+    hexagon_M2_dpmpyuu_s0,                     // llvm.hexagon.M2.dpmpyuu.s0
+    hexagon_M2_hmmpyh_rs1,                     // llvm.hexagon.M2.hmmpyh.rs1
+    hexagon_M2_hmmpyh_s1,                      // llvm.hexagon.M2.hmmpyh.s1
+    hexagon_M2_hmmpyl_rs1,                     // llvm.hexagon.M2.hmmpyl.rs1
+    hexagon_M2_hmmpyl_s1,                      // llvm.hexagon.M2.hmmpyl.s1
+    hexagon_M2_maci,                           // llvm.hexagon.M2.maci
+    hexagon_M2_macsin,                         // llvm.hexagon.M2.macsin
+    hexagon_M2_macsip,                         // llvm.hexagon.M2.macsip
+    hexagon_M2_mmachs_rs0,                     // llvm.hexagon.M2.mmachs.rs0
+    hexagon_M2_mmachs_rs1,                     // llvm.hexagon.M2.mmachs.rs1
+    hexagon_M2_mmachs_s0,                      // llvm.hexagon.M2.mmachs.s0
+    hexagon_M2_mmachs_s1,                      // llvm.hexagon.M2.mmachs.s1
+    hexagon_M2_mmacls_rs0,                     // llvm.hexagon.M2.mmacls.rs0
+    hexagon_M2_mmacls_rs1,                     // llvm.hexagon.M2.mmacls.rs1
+    hexagon_M2_mmacls_s0,                      // llvm.hexagon.M2.mmacls.s0
+    hexagon_M2_mmacls_s1,                      // llvm.hexagon.M2.mmacls.s1
+    hexagon_M2_mmacuhs_rs0,                    // llvm.hexagon.M2.mmacuhs.rs0
+    hexagon_M2_mmacuhs_rs1,                    // llvm.hexagon.M2.mmacuhs.rs1
+    hexagon_M2_mmacuhs_s0,                     // llvm.hexagon.M2.mmacuhs.s0
+    hexagon_M2_mmacuhs_s1,                     // llvm.hexagon.M2.mmacuhs.s1
+    hexagon_M2_mmaculs_rs0,                    // llvm.hexagon.M2.mmaculs.rs0
+    hexagon_M2_mmaculs_rs1,                    // llvm.hexagon.M2.mmaculs.rs1
+    hexagon_M2_mmaculs_s0,                     // llvm.hexagon.M2.mmaculs.s0
+    hexagon_M2_mmaculs_s1,                     // llvm.hexagon.M2.mmaculs.s1
+    hexagon_M2_mmpyh_rs0,                      // llvm.hexagon.M2.mmpyh.rs0
+    hexagon_M2_mmpyh_rs1,                      // llvm.hexagon.M2.mmpyh.rs1
+    hexagon_M2_mmpyh_s0,                       // llvm.hexagon.M2.mmpyh.s0
+    hexagon_M2_mmpyh_s1,                       // llvm.hexagon.M2.mmpyh.s1
+    hexagon_M2_mmpyl_rs0,                      // llvm.hexagon.M2.mmpyl.rs0
+    hexagon_M2_mmpyl_rs1,                      // llvm.hexagon.M2.mmpyl.rs1
+    hexagon_M2_mmpyl_s0,                       // llvm.hexagon.M2.mmpyl.s0
+    hexagon_M2_mmpyl_s1,                       // llvm.hexagon.M2.mmpyl.s1
+    hexagon_M2_mmpyuh_rs0,                     // llvm.hexagon.M2.mmpyuh.rs0
+    hexagon_M2_mmpyuh_rs1,                     // llvm.hexagon.M2.mmpyuh.rs1
+    hexagon_M2_mmpyuh_s0,                      // llvm.hexagon.M2.mmpyuh.s0
+    hexagon_M2_mmpyuh_s1,                      // llvm.hexagon.M2.mmpyuh.s1
+    hexagon_M2_mmpyul_rs0,                     // llvm.hexagon.M2.mmpyul.rs0
+    hexagon_M2_mmpyul_rs1,                     // llvm.hexagon.M2.mmpyul.rs1
+    hexagon_M2_mmpyul_s0,                      // llvm.hexagon.M2.mmpyul.s0
+    hexagon_M2_mmpyul_s1,                      // llvm.hexagon.M2.mmpyul.s1
+    hexagon_M2_mpy_acc_hh_s0,                  // llvm.hexagon.M2.mpy.acc.hh.s0
+    hexagon_M2_mpy_acc_hh_s1,                  // llvm.hexagon.M2.mpy.acc.hh.s1
+    hexagon_M2_mpy_acc_hl_s0,                  // llvm.hexagon.M2.mpy.acc.hl.s0
+    hexagon_M2_mpy_acc_hl_s1,                  // llvm.hexagon.M2.mpy.acc.hl.s1
+    hexagon_M2_mpy_acc_lh_s0,                  // llvm.hexagon.M2.mpy.acc.lh.s0
+    hexagon_M2_mpy_acc_lh_s1,                  // llvm.hexagon.M2.mpy.acc.lh.s1
+    hexagon_M2_mpy_acc_ll_s0,                  // llvm.hexagon.M2.mpy.acc.ll.s0
+    hexagon_M2_mpy_acc_ll_s1,                  // llvm.hexagon.M2.mpy.acc.ll.s1
+    hexagon_M2_mpy_acc_sat_hh_s0,              // llvm.hexagon.M2.mpy.acc.sat.hh.s0
+    hexagon_M2_mpy_acc_sat_hh_s1,              // llvm.hexagon.M2.mpy.acc.sat.hh.s1
+    hexagon_M2_mpy_acc_sat_hl_s0,              // llvm.hexagon.M2.mpy.acc.sat.hl.s0
+    hexagon_M2_mpy_acc_sat_hl_s1,              // llvm.hexagon.M2.mpy.acc.sat.hl.s1
+    hexagon_M2_mpy_acc_sat_lh_s0,              // llvm.hexagon.M2.mpy.acc.sat.lh.s0
+    hexagon_M2_mpy_acc_sat_lh_s1,              // llvm.hexagon.M2.mpy.acc.sat.lh.s1
+    hexagon_M2_mpy_acc_sat_ll_s0,              // llvm.hexagon.M2.mpy.acc.sat.ll.s0
+    hexagon_M2_mpy_acc_sat_ll_s1,              // llvm.hexagon.M2.mpy.acc.sat.ll.s1
+    hexagon_M2_mpy_hh_s0,                      // llvm.hexagon.M2.mpy.hh.s0
+    hexagon_M2_mpy_hh_s1,                      // llvm.hexagon.M2.mpy.hh.s1
+    hexagon_M2_mpy_hl_s0,                      // llvm.hexagon.M2.mpy.hl.s0
+    hexagon_M2_mpy_hl_s1,                      // llvm.hexagon.M2.mpy.hl.s1
+    hexagon_M2_mpy_lh_s0,                      // llvm.hexagon.M2.mpy.lh.s0
+    hexagon_M2_mpy_lh_s1,                      // llvm.hexagon.M2.mpy.lh.s1
+    hexagon_M2_mpy_ll_s0,                      // llvm.hexagon.M2.mpy.ll.s0
+    hexagon_M2_mpy_ll_s1,                      // llvm.hexagon.M2.mpy.ll.s1
+    hexagon_M2_mpy_nac_hh_s0,                  // llvm.hexagon.M2.mpy.nac.hh.s0
+    hexagon_M2_mpy_nac_hh_s1,                  // llvm.hexagon.M2.mpy.nac.hh.s1
+    hexagon_M2_mpy_nac_hl_s0,                  // llvm.hexagon.M2.mpy.nac.hl.s0
+    hexagon_M2_mpy_nac_hl_s1,                  // llvm.hexagon.M2.mpy.nac.hl.s1
+    hexagon_M2_mpy_nac_lh_s0,                  // llvm.hexagon.M2.mpy.nac.lh.s0
+    hexagon_M2_mpy_nac_lh_s1,                  // llvm.hexagon.M2.mpy.nac.lh.s1
+    hexagon_M2_mpy_nac_ll_s0,                  // llvm.hexagon.M2.mpy.nac.ll.s0
+    hexagon_M2_mpy_nac_ll_s1,                  // llvm.hexagon.M2.mpy.nac.ll.s1
+    hexagon_M2_mpy_nac_sat_hh_s0,              // llvm.hexagon.M2.mpy.nac.sat.hh.s0
+    hexagon_M2_mpy_nac_sat_hh_s1,              // llvm.hexagon.M2.mpy.nac.sat.hh.s1
+    hexagon_M2_mpy_nac_sat_hl_s0,              // llvm.hexagon.M2.mpy.nac.sat.hl.s0
+    hexagon_M2_mpy_nac_sat_hl_s1,              // llvm.hexagon.M2.mpy.nac.sat.hl.s1
+    hexagon_M2_mpy_nac_sat_lh_s0,              // llvm.hexagon.M2.mpy.nac.sat.lh.s0
+    hexagon_M2_mpy_nac_sat_lh_s1,              // llvm.hexagon.M2.mpy.nac.sat.lh.s1
+    hexagon_M2_mpy_nac_sat_ll_s0,              // llvm.hexagon.M2.mpy.nac.sat.ll.s0
+    hexagon_M2_mpy_nac_sat_ll_s1,              // llvm.hexagon.M2.mpy.nac.sat.ll.s1
+    hexagon_M2_mpy_rnd_hh_s0,                  // llvm.hexagon.M2.mpy.rnd.hh.s0
+    hexagon_M2_mpy_rnd_hh_s1,                  // llvm.hexagon.M2.mpy.rnd.hh.s1
+    hexagon_M2_mpy_rnd_hl_s0,                  // llvm.hexagon.M2.mpy.rnd.hl.s0
+    hexagon_M2_mpy_rnd_hl_s1,                  // llvm.hexagon.M2.mpy.rnd.hl.s1
+    hexagon_M2_mpy_rnd_lh_s0,                  // llvm.hexagon.M2.mpy.rnd.lh.s0
+    hexagon_M2_mpy_rnd_lh_s1,                  // llvm.hexagon.M2.mpy.rnd.lh.s1
+    hexagon_M2_mpy_rnd_ll_s0,                  // llvm.hexagon.M2.mpy.rnd.ll.s0
+    hexagon_M2_mpy_rnd_ll_s1,                  // llvm.hexagon.M2.mpy.rnd.ll.s1
+    hexagon_M2_mpy_sat_hh_s0,                  // llvm.hexagon.M2.mpy.sat.hh.s0
+    hexagon_M2_mpy_sat_hh_s1,                  // llvm.hexagon.M2.mpy.sat.hh.s1
+    hexagon_M2_mpy_sat_hl_s0,                  // llvm.hexagon.M2.mpy.sat.hl.s0
+    hexagon_M2_mpy_sat_hl_s1,                  // llvm.hexagon.M2.mpy.sat.hl.s1
+    hexagon_M2_mpy_sat_lh_s0,                  // llvm.hexagon.M2.mpy.sat.lh.s0
+    hexagon_M2_mpy_sat_lh_s1,                  // llvm.hexagon.M2.mpy.sat.lh.s1
+    hexagon_M2_mpy_sat_ll_s0,                  // llvm.hexagon.M2.mpy.sat.ll.s0
+    hexagon_M2_mpy_sat_ll_s1,                  // llvm.hexagon.M2.mpy.sat.ll.s1
+    hexagon_M2_mpy_sat_rnd_hh_s0,              // llvm.hexagon.M2.mpy.sat.rnd.hh.s0
+    hexagon_M2_mpy_sat_rnd_hh_s1,              // llvm.hexagon.M2.mpy.sat.rnd.hh.s1
+    hexagon_M2_mpy_sat_rnd_hl_s0,              // llvm.hexagon.M2.mpy.sat.rnd.hl.s0
+    hexagon_M2_mpy_sat_rnd_hl_s1,              // llvm.hexagon.M2.mpy.sat.rnd.hl.s1
+    hexagon_M2_mpy_sat_rnd_lh_s0,              // llvm.hexagon.M2.mpy.sat.rnd.lh.s0
+    hexagon_M2_mpy_sat_rnd_lh_s1,              // llvm.hexagon.M2.mpy.sat.rnd.lh.s1
+    hexagon_M2_mpy_sat_rnd_ll_s0,              // llvm.hexagon.M2.mpy.sat.rnd.ll.s0
+    hexagon_M2_mpy_sat_rnd_ll_s1,              // llvm.hexagon.M2.mpy.sat.rnd.ll.s1
+    hexagon_M2_mpy_up,                         // llvm.hexagon.M2.mpy.up
+    hexagon_M2_mpy_up_s1,                      // llvm.hexagon.M2.mpy.up.s1
+    hexagon_M2_mpy_up_s1_sat,                  // llvm.hexagon.M2.mpy.up.s1.sat
+    hexagon_M2_mpyd_acc_hh_s0,                 // llvm.hexagon.M2.mpyd.acc.hh.s0
+    hexagon_M2_mpyd_acc_hh_s1,                 // llvm.hexagon.M2.mpyd.acc.hh.s1
+    hexagon_M2_mpyd_acc_hl_s0,                 // llvm.hexagon.M2.mpyd.acc.hl.s0
+    hexagon_M2_mpyd_acc_hl_s1,                 // llvm.hexagon.M2.mpyd.acc.hl.s1
+    hexagon_M2_mpyd_acc_lh_s0,                 // llvm.hexagon.M2.mpyd.acc.lh.s0
+    hexagon_M2_mpyd_acc_lh_s1,                 // llvm.hexagon.M2.mpyd.acc.lh.s1
+    hexagon_M2_mpyd_acc_ll_s0,                 // llvm.hexagon.M2.mpyd.acc.ll.s0
+    hexagon_M2_mpyd_acc_ll_s1,                 // llvm.hexagon.M2.mpyd.acc.ll.s1
+    hexagon_M2_mpyd_hh_s0,                     // llvm.hexagon.M2.mpyd.hh.s0
+    hexagon_M2_mpyd_hh_s1,                     // llvm.hexagon.M2.mpyd.hh.s1
+    hexagon_M2_mpyd_hl_s0,                     // llvm.hexagon.M2.mpyd.hl.s0
+    hexagon_M2_mpyd_hl_s1,                     // llvm.hexagon.M2.mpyd.hl.s1
+    hexagon_M2_mpyd_lh_s0,                     // llvm.hexagon.M2.mpyd.lh.s0
+    hexagon_M2_mpyd_lh_s1,                     // llvm.hexagon.M2.mpyd.lh.s1
+    hexagon_M2_mpyd_ll_s0,                     // llvm.hexagon.M2.mpyd.ll.s0
+    hexagon_M2_mpyd_ll_s1,                     // llvm.hexagon.M2.mpyd.ll.s1
+    hexagon_M2_mpyd_nac_hh_s0,                 // llvm.hexagon.M2.mpyd.nac.hh.s0
+    hexagon_M2_mpyd_nac_hh_s1,                 // llvm.hexagon.M2.mpyd.nac.hh.s1
+    hexagon_M2_mpyd_nac_hl_s0,                 // llvm.hexagon.M2.mpyd.nac.hl.s0
+    hexagon_M2_mpyd_nac_hl_s1,                 // llvm.hexagon.M2.mpyd.nac.hl.s1
+    hexagon_M2_mpyd_nac_lh_s0,                 // llvm.hexagon.M2.mpyd.nac.lh.s0
+    hexagon_M2_mpyd_nac_lh_s1,                 // llvm.hexagon.M2.mpyd.nac.lh.s1
+    hexagon_M2_mpyd_nac_ll_s0,                 // llvm.hexagon.M2.mpyd.nac.ll.s0
+    hexagon_M2_mpyd_nac_ll_s1,                 // llvm.hexagon.M2.mpyd.nac.ll.s1
+    hexagon_M2_mpyd_rnd_hh_s0,                 // llvm.hexagon.M2.mpyd.rnd.hh.s0
+    hexagon_M2_mpyd_rnd_hh_s1,                 // llvm.hexagon.M2.mpyd.rnd.hh.s1
+    hexagon_M2_mpyd_rnd_hl_s0,                 // llvm.hexagon.M2.mpyd.rnd.hl.s0
+    hexagon_M2_mpyd_rnd_hl_s1,                 // llvm.hexagon.M2.mpyd.rnd.hl.s1
+    hexagon_M2_mpyd_rnd_lh_s0,                 // llvm.hexagon.M2.mpyd.rnd.lh.s0
+    hexagon_M2_mpyd_rnd_lh_s1,                 // llvm.hexagon.M2.mpyd.rnd.lh.s1
+    hexagon_M2_mpyd_rnd_ll_s0,                 // llvm.hexagon.M2.mpyd.rnd.ll.s0
+    hexagon_M2_mpyd_rnd_ll_s1,                 // llvm.hexagon.M2.mpyd.rnd.ll.s1
+    hexagon_M2_mpyi,                           // llvm.hexagon.M2.mpyi
+    hexagon_M2_mpysmi,                         // llvm.hexagon.M2.mpysmi
+    hexagon_M2_mpysu_up,                       // llvm.hexagon.M2.mpysu.up
+    hexagon_M2_mpyu_acc_hh_s0,                 // llvm.hexagon.M2.mpyu.acc.hh.s0
+    hexagon_M2_mpyu_acc_hh_s1,                 // llvm.hexagon.M2.mpyu.acc.hh.s1
+    hexagon_M2_mpyu_acc_hl_s0,                 // llvm.hexagon.M2.mpyu.acc.hl.s0
+    hexagon_M2_mpyu_acc_hl_s1,                 // llvm.hexagon.M2.mpyu.acc.hl.s1
+    hexagon_M2_mpyu_acc_lh_s0,                 // llvm.hexagon.M2.mpyu.acc.lh.s0
+    hexagon_M2_mpyu_acc_lh_s1,                 // llvm.hexagon.M2.mpyu.acc.lh.s1
+    hexagon_M2_mpyu_acc_ll_s0,                 // llvm.hexagon.M2.mpyu.acc.ll.s0
+    hexagon_M2_mpyu_acc_ll_s1,                 // llvm.hexagon.M2.mpyu.acc.ll.s1
+    hexagon_M2_mpyu_hh_s0,                     // llvm.hexagon.M2.mpyu.hh.s0
+    hexagon_M2_mpyu_hh_s1,                     // llvm.hexagon.M2.mpyu.hh.s1
+    hexagon_M2_mpyu_hl_s0,                     // llvm.hexagon.M2.mpyu.hl.s0
+    hexagon_M2_mpyu_hl_s1,                     // llvm.hexagon.M2.mpyu.hl.s1
+    hexagon_M2_mpyu_lh_s0,                     // llvm.hexagon.M2.mpyu.lh.s0
+    hexagon_M2_mpyu_lh_s1,                     // llvm.hexagon.M2.mpyu.lh.s1
+    hexagon_M2_mpyu_ll_s0,                     // llvm.hexagon.M2.mpyu.ll.s0
+    hexagon_M2_mpyu_ll_s1,                     // llvm.hexagon.M2.mpyu.ll.s1
+    hexagon_M2_mpyu_nac_hh_s0,                 // llvm.hexagon.M2.mpyu.nac.hh.s0
+    hexagon_M2_mpyu_nac_hh_s1,                 // llvm.hexagon.M2.mpyu.nac.hh.s1
+    hexagon_M2_mpyu_nac_hl_s0,                 // llvm.hexagon.M2.mpyu.nac.hl.s0
+    hexagon_M2_mpyu_nac_hl_s1,                 // llvm.hexagon.M2.mpyu.nac.hl.s1
+    hexagon_M2_mpyu_nac_lh_s0,                 // llvm.hexagon.M2.mpyu.nac.lh.s0
+    hexagon_M2_mpyu_nac_lh_s1,                 // llvm.hexagon.M2.mpyu.nac.lh.s1
+    hexagon_M2_mpyu_nac_ll_s0,                 // llvm.hexagon.M2.mpyu.nac.ll.s0
+    hexagon_M2_mpyu_nac_ll_s1,                 // llvm.hexagon.M2.mpyu.nac.ll.s1
+    hexagon_M2_mpyu_up,                        // llvm.hexagon.M2.mpyu.up
+    hexagon_M2_mpyud_acc_hh_s0,                // llvm.hexagon.M2.mpyud.acc.hh.s0
+    hexagon_M2_mpyud_acc_hh_s1,                // llvm.hexagon.M2.mpyud.acc.hh.s1
+    hexagon_M2_mpyud_acc_hl_s0,                // llvm.hexagon.M2.mpyud.acc.hl.s0
+    hexagon_M2_mpyud_acc_hl_s1,                // llvm.hexagon.M2.mpyud.acc.hl.s1
+    hexagon_M2_mpyud_acc_lh_s0,                // llvm.hexagon.M2.mpyud.acc.lh.s0
+    hexagon_M2_mpyud_acc_lh_s1,                // llvm.hexagon.M2.mpyud.acc.lh.s1
+    hexagon_M2_mpyud_acc_ll_s0,                // llvm.hexagon.M2.mpyud.acc.ll.s0
+    hexagon_M2_mpyud_acc_ll_s1,                // llvm.hexagon.M2.mpyud.acc.ll.s1
+    hexagon_M2_mpyud_hh_s0,                    // llvm.hexagon.M2.mpyud.hh.s0
+    hexagon_M2_mpyud_hh_s1,                    // llvm.hexagon.M2.mpyud.hh.s1
+    hexagon_M2_mpyud_hl_s0,                    // llvm.hexagon.M2.mpyud.hl.s0
+    hexagon_M2_mpyud_hl_s1,                    // llvm.hexagon.M2.mpyud.hl.s1
+    hexagon_M2_mpyud_lh_s0,                    // llvm.hexagon.M2.mpyud.lh.s0
+    hexagon_M2_mpyud_lh_s1,                    // llvm.hexagon.M2.mpyud.lh.s1
+    hexagon_M2_mpyud_ll_s0,                    // llvm.hexagon.M2.mpyud.ll.s0
+    hexagon_M2_mpyud_ll_s1,                    // llvm.hexagon.M2.mpyud.ll.s1
+    hexagon_M2_mpyud_nac_hh_s0,                // llvm.hexagon.M2.mpyud.nac.hh.s0
+    hexagon_M2_mpyud_nac_hh_s1,                // llvm.hexagon.M2.mpyud.nac.hh.s1
+    hexagon_M2_mpyud_nac_hl_s0,                // llvm.hexagon.M2.mpyud.nac.hl.s0
+    hexagon_M2_mpyud_nac_hl_s1,                // llvm.hexagon.M2.mpyud.nac.hl.s1
+    hexagon_M2_mpyud_nac_lh_s0,                // llvm.hexagon.M2.mpyud.nac.lh.s0
+    hexagon_M2_mpyud_nac_lh_s1,                // llvm.hexagon.M2.mpyud.nac.lh.s1
+    hexagon_M2_mpyud_nac_ll_s0,                // llvm.hexagon.M2.mpyud.nac.ll.s0
+    hexagon_M2_mpyud_nac_ll_s1,                // llvm.hexagon.M2.mpyud.nac.ll.s1
+    hexagon_M2_mpyui,                          // llvm.hexagon.M2.mpyui
+    hexagon_M2_nacci,                          // llvm.hexagon.M2.nacci
+    hexagon_M2_naccii,                         // llvm.hexagon.M2.naccii
+    hexagon_M2_subacc,                         // llvm.hexagon.M2.subacc
+    hexagon_M2_vabsdiffh,                      // llvm.hexagon.M2.vabsdiffh
+    hexagon_M2_vabsdiffw,                      // llvm.hexagon.M2.vabsdiffw
+    hexagon_M2_vcmac_s0_sat_i,                 // llvm.hexagon.M2.vcmac.s0.sat.i
+    hexagon_M2_vcmac_s0_sat_r,                 // llvm.hexagon.M2.vcmac.s0.sat.r
+    hexagon_M2_vcmpy_s0_sat_i,                 // llvm.hexagon.M2.vcmpy.s0.sat.i
+    hexagon_M2_vcmpy_s0_sat_r,                 // llvm.hexagon.M2.vcmpy.s0.sat.r
+    hexagon_M2_vcmpy_s1_sat_i,                 // llvm.hexagon.M2.vcmpy.s1.sat.i
+    hexagon_M2_vcmpy_s1_sat_r,                 // llvm.hexagon.M2.vcmpy.s1.sat.r
+    hexagon_M2_vdmacs_s0,                      // llvm.hexagon.M2.vdmacs.s0
+    hexagon_M2_vdmacs_s1,                      // llvm.hexagon.M2.vdmacs.s1
+    hexagon_M2_vdmpyrs_s0,                     // llvm.hexagon.M2.vdmpyrs.s0
+    hexagon_M2_vdmpyrs_s1,                     // llvm.hexagon.M2.vdmpyrs.s1
+    hexagon_M2_vdmpys_s0,                      // llvm.hexagon.M2.vdmpys.s0
+    hexagon_M2_vdmpys_s1,                      // llvm.hexagon.M2.vdmpys.s1
+    hexagon_M2_vmac2,                          // llvm.hexagon.M2.vmac2
+    hexagon_M2_vmac2es,                        // llvm.hexagon.M2.vmac2es
+    hexagon_M2_vmac2es_s0,                     // llvm.hexagon.M2.vmac2es.s0
+    hexagon_M2_vmac2es_s1,                     // llvm.hexagon.M2.vmac2es.s1
+    hexagon_M2_vmac2s_s0,                      // llvm.hexagon.M2.vmac2s.s0
+    hexagon_M2_vmac2s_s1,                      // llvm.hexagon.M2.vmac2s.s1
+    hexagon_M2_vmac2su_s0,                     // llvm.hexagon.M2.vmac2su.s0
+    hexagon_M2_vmac2su_s1,                     // llvm.hexagon.M2.vmac2su.s1
+    hexagon_M2_vmpy2es_s0,                     // llvm.hexagon.M2.vmpy2es.s0
+    hexagon_M2_vmpy2es_s1,                     // llvm.hexagon.M2.vmpy2es.s1
+    hexagon_M2_vmpy2s_s0,                      // llvm.hexagon.M2.vmpy2s.s0
+    hexagon_M2_vmpy2s_s0pack,                  // llvm.hexagon.M2.vmpy2s.s0pack
+    hexagon_M2_vmpy2s_s1,                      // llvm.hexagon.M2.vmpy2s.s1
+    hexagon_M2_vmpy2s_s1pack,                  // llvm.hexagon.M2.vmpy2s.s1pack
+    hexagon_M2_vmpy2su_s0,                     // llvm.hexagon.M2.vmpy2su.s0
+    hexagon_M2_vmpy2su_s1,                     // llvm.hexagon.M2.vmpy2su.s1
+    hexagon_M2_vraddh,                         // llvm.hexagon.M2.vraddh
+    hexagon_M2_vradduh,                        // llvm.hexagon.M2.vradduh
+    hexagon_M2_vrcmaci_s0,                     // llvm.hexagon.M2.vrcmaci.s0
+    hexagon_M2_vrcmaci_s0c,                    // llvm.hexagon.M2.vrcmaci.s0c
+    hexagon_M2_vrcmacr_s0,                     // llvm.hexagon.M2.vrcmacr.s0
+    hexagon_M2_vrcmacr_s0c,                    // llvm.hexagon.M2.vrcmacr.s0c
+    hexagon_M2_vrcmpyi_s0,                     // llvm.hexagon.M2.vrcmpyi.s0
+    hexagon_M2_vrcmpyi_s0c,                    // llvm.hexagon.M2.vrcmpyi.s0c
+    hexagon_M2_vrcmpyr_s0,                     // llvm.hexagon.M2.vrcmpyr.s0
+    hexagon_M2_vrcmpyr_s0c,                    // llvm.hexagon.M2.vrcmpyr.s0c
+    hexagon_M2_vrcmpys_acc_s1,                 // llvm.hexagon.M2.vrcmpys.acc.s1
+    hexagon_M2_vrcmpys_s1,                     // llvm.hexagon.M2.vrcmpys.s1
+    hexagon_M2_vrcmpys_s1rp,                   // llvm.hexagon.M2.vrcmpys.s1rp
+    hexagon_M2_vrmac_s0,                       // llvm.hexagon.M2.vrmac.s0
+    hexagon_M2_vrmpy_s0,                       // llvm.hexagon.M2.vrmpy.s0
+    hexagon_M2_xor_xacc,                       // llvm.hexagon.M2.xor.xacc
+    hexagon_M4_and_and,                        // llvm.hexagon.M4.and.and
+    hexagon_M4_and_andn,                       // llvm.hexagon.M4.and.andn
+    hexagon_M4_and_or,                         // llvm.hexagon.M4.and.or
+    hexagon_M4_and_xor,                        // llvm.hexagon.M4.and.xor
+    hexagon_M4_cmpyi_wh,                       // llvm.hexagon.M4.cmpyi.wh
+    hexagon_M4_cmpyi_whc,                      // llvm.hexagon.M4.cmpyi.whc
+    hexagon_M4_cmpyr_wh,                       // llvm.hexagon.M4.cmpyr.wh
+    hexagon_M4_cmpyr_whc,                      // llvm.hexagon.M4.cmpyr.whc
+    hexagon_M4_mac_up_s1_sat,                  // llvm.hexagon.M4.mac.up.s1.sat
+    hexagon_M4_mpyri_addi,                     // llvm.hexagon.M4.mpyri.addi
+    hexagon_M4_mpyri_addr,                     // llvm.hexagon.M4.mpyri.addr
+    hexagon_M4_mpyri_addr_u2,                  // llvm.hexagon.M4.mpyri.addr.u2
+    hexagon_M4_mpyrr_addi,                     // llvm.hexagon.M4.mpyrr.addi
+    hexagon_M4_mpyrr_addr,                     // llvm.hexagon.M4.mpyrr.addr
+    hexagon_M4_nac_up_s1_sat,                  // llvm.hexagon.M4.nac.up.s1.sat
+    hexagon_M4_or_and,                         // llvm.hexagon.M4.or.and
+    hexagon_M4_or_andn,                        // llvm.hexagon.M4.or.andn
+    hexagon_M4_or_or,                          // llvm.hexagon.M4.or.or
+    hexagon_M4_or_xor,                         // llvm.hexagon.M4.or.xor
+    hexagon_M4_pmpyw,                          // llvm.hexagon.M4.pmpyw
+    hexagon_M4_pmpyw_acc,                      // llvm.hexagon.M4.pmpyw.acc
+    hexagon_M4_vpmpyh,                         // llvm.hexagon.M4.vpmpyh
+    hexagon_M4_vpmpyh_acc,                     // llvm.hexagon.M4.vpmpyh.acc
+    hexagon_M4_vrmpyeh_acc_s0,                 // llvm.hexagon.M4.vrmpyeh.acc.s0
+    hexagon_M4_vrmpyeh_acc_s1,                 // llvm.hexagon.M4.vrmpyeh.acc.s1
+    hexagon_M4_vrmpyeh_s0,                     // llvm.hexagon.M4.vrmpyeh.s0
+    hexagon_M4_vrmpyeh_s1,                     // llvm.hexagon.M4.vrmpyeh.s1
+    hexagon_M4_vrmpyoh_acc_s0,                 // llvm.hexagon.M4.vrmpyoh.acc.s0
+    hexagon_M4_vrmpyoh_acc_s1,                 // llvm.hexagon.M4.vrmpyoh.acc.s1
+    hexagon_M4_vrmpyoh_s0,                     // llvm.hexagon.M4.vrmpyoh.s0
+    hexagon_M4_vrmpyoh_s1,                     // llvm.hexagon.M4.vrmpyoh.s1
+    hexagon_M4_xor_and,                        // llvm.hexagon.M4.xor.and
+    hexagon_M4_xor_andn,                       // llvm.hexagon.M4.xor.andn
+    hexagon_M4_xor_or,                         // llvm.hexagon.M4.xor.or
+    hexagon_M4_xor_xacc,                       // llvm.hexagon.M4.xor.xacc
+    hexagon_M5_vdmacbsu,                       // llvm.hexagon.M5.vdmacbsu
+    hexagon_M5_vdmpybsu,                       // llvm.hexagon.M5.vdmpybsu
+    hexagon_M5_vmacbsu,                        // llvm.hexagon.M5.vmacbsu
+    hexagon_M5_vmacbuu,                        // llvm.hexagon.M5.vmacbuu
+    hexagon_M5_vmpybsu,                        // llvm.hexagon.M5.vmpybsu
+    hexagon_M5_vmpybuu,                        // llvm.hexagon.M5.vmpybuu
+    hexagon_M5_vrmacbsu,                       // llvm.hexagon.M5.vrmacbsu
+    hexagon_M5_vrmacbuu,                       // llvm.hexagon.M5.vrmacbuu
+    hexagon_M5_vrmpybsu,                       // llvm.hexagon.M5.vrmpybsu
+    hexagon_M5_vrmpybuu,                       // llvm.hexagon.M5.vrmpybuu
+    hexagon_M6_vabsdiffb,                      // llvm.hexagon.M6.vabsdiffb
+    hexagon_M6_vabsdiffub,                     // llvm.hexagon.M6.vabsdiffub
+    hexagon_S2_addasl_rrri,                    // llvm.hexagon.S2.addasl.rrri
+    hexagon_S2_asl_i_p,                        // llvm.hexagon.S2.asl.i.p
+    hexagon_S2_asl_i_p_acc,                    // llvm.hexagon.S2.asl.i.p.acc
+    hexagon_S2_asl_i_p_and,                    // llvm.hexagon.S2.asl.i.p.and
+    hexagon_S2_asl_i_p_nac,                    // llvm.hexagon.S2.asl.i.p.nac
+    hexagon_S2_asl_i_p_or,                     // llvm.hexagon.S2.asl.i.p.or
+    hexagon_S2_asl_i_p_xacc,                   // llvm.hexagon.S2.asl.i.p.xacc
+    hexagon_S2_asl_i_r,                        // llvm.hexagon.S2.asl.i.r
+    hexagon_S2_asl_i_r_acc,                    // llvm.hexagon.S2.asl.i.r.acc
+    hexagon_S2_asl_i_r_and,                    // llvm.hexagon.S2.asl.i.r.and
+    hexagon_S2_asl_i_r_nac,                    // llvm.hexagon.S2.asl.i.r.nac
+    hexagon_S2_asl_i_r_or,                     // llvm.hexagon.S2.asl.i.r.or
+    hexagon_S2_asl_i_r_sat,                    // llvm.hexagon.S2.asl.i.r.sat
+    hexagon_S2_asl_i_r_xacc,                   // llvm.hexagon.S2.asl.i.r.xacc
+    hexagon_S2_asl_i_vh,                       // llvm.hexagon.S2.asl.i.vh
+    hexagon_S2_asl_i_vw,                       // llvm.hexagon.S2.asl.i.vw
+    hexagon_S2_asl_r_p,                        // llvm.hexagon.S2.asl.r.p
+    hexagon_S2_asl_r_p_acc,                    // llvm.hexagon.S2.asl.r.p.acc
+    hexagon_S2_asl_r_p_and,                    // llvm.hexagon.S2.asl.r.p.and
+    hexagon_S2_asl_r_p_nac,                    // llvm.hexagon.S2.asl.r.p.nac
+    hexagon_S2_asl_r_p_or,                     // llvm.hexagon.S2.asl.r.p.or
+    hexagon_S2_asl_r_p_xor,                    // llvm.hexagon.S2.asl.r.p.xor
+    hexagon_S2_asl_r_r,                        // llvm.hexagon.S2.asl.r.r
+    hexagon_S2_asl_r_r_acc,                    // llvm.hexagon.S2.asl.r.r.acc
+    hexagon_S2_asl_r_r_and,                    // llvm.hexagon.S2.asl.r.r.and
+    hexagon_S2_asl_r_r_nac,                    // llvm.hexagon.S2.asl.r.r.nac
+    hexagon_S2_asl_r_r_or,                     // llvm.hexagon.S2.asl.r.r.or
+    hexagon_S2_asl_r_r_sat,                    // llvm.hexagon.S2.asl.r.r.sat
+    hexagon_S2_asl_r_vh,                       // llvm.hexagon.S2.asl.r.vh
+    hexagon_S2_asl_r_vw,                       // llvm.hexagon.S2.asl.r.vw
+    hexagon_S2_asr_i_p,                        // llvm.hexagon.S2.asr.i.p
+    hexagon_S2_asr_i_p_acc,                    // llvm.hexagon.S2.asr.i.p.acc
+    hexagon_S2_asr_i_p_and,                    // llvm.hexagon.S2.asr.i.p.and
+    hexagon_S2_asr_i_p_nac,                    // llvm.hexagon.S2.asr.i.p.nac
+    hexagon_S2_asr_i_p_or,                     // llvm.hexagon.S2.asr.i.p.or
+    hexagon_S2_asr_i_p_rnd,                    // llvm.hexagon.S2.asr.i.p.rnd
+    hexagon_S2_asr_i_p_rnd_goodsyntax,         // llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
+    hexagon_S2_asr_i_r,                        // llvm.hexagon.S2.asr.i.r
+    hexagon_S2_asr_i_r_acc,                    // llvm.hexagon.S2.asr.i.r.acc
+    hexagon_S2_asr_i_r_and,                    // llvm.hexagon.S2.asr.i.r.and
+    hexagon_S2_asr_i_r_nac,                    // llvm.hexagon.S2.asr.i.r.nac
+    hexagon_S2_asr_i_r_or,                     // llvm.hexagon.S2.asr.i.r.or
+    hexagon_S2_asr_i_r_rnd,                    // llvm.hexagon.S2.asr.i.r.rnd
+    hexagon_S2_asr_i_r_rnd_goodsyntax,         // llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
+    hexagon_S2_asr_i_svw_trun,                 // llvm.hexagon.S2.asr.i.svw.trun
+    hexagon_S2_asr_i_vh,                       // llvm.hexagon.S2.asr.i.vh
+    hexagon_S2_asr_i_vw,                       // llvm.hexagon.S2.asr.i.vw
+    hexagon_S2_asr_r_p,                        // llvm.hexagon.S2.asr.r.p
+    hexagon_S2_asr_r_p_acc,                    // llvm.hexagon.S2.asr.r.p.acc
+    hexagon_S2_asr_r_p_and,                    // llvm.hexagon.S2.asr.r.p.and
+    hexagon_S2_asr_r_p_nac,                    // llvm.hexagon.S2.asr.r.p.nac
+    hexagon_S2_asr_r_p_or,                     // llvm.hexagon.S2.asr.r.p.or
+    hexagon_S2_asr_r_p_xor,                    // llvm.hexagon.S2.asr.r.p.xor
+    hexagon_S2_asr_r_r,                        // llvm.hexagon.S2.asr.r.r
+    hexagon_S2_asr_r_r_acc,                    // llvm.hexagon.S2.asr.r.r.acc
+    hexagon_S2_asr_r_r_and,                    // llvm.hexagon.S2.asr.r.r.and
+    hexagon_S2_asr_r_r_nac,                    // llvm.hexagon.S2.asr.r.r.nac
+    hexagon_S2_asr_r_r_or,                     // llvm.hexagon.S2.asr.r.r.or
+    hexagon_S2_asr_r_r_sat,                    // llvm.hexagon.S2.asr.r.r.sat
+    hexagon_S2_asr_r_svw_trun,                 // llvm.hexagon.S2.asr.r.svw.trun
+    hexagon_S2_asr_r_vh,                       // llvm.hexagon.S2.asr.r.vh
+    hexagon_S2_asr_r_vw,                       // llvm.hexagon.S2.asr.r.vw
+    hexagon_S2_brev,                           // llvm.hexagon.S2.brev
+    hexagon_S2_brevp,                          // llvm.hexagon.S2.brevp
+    hexagon_S2_cabacencbin,                    // llvm.hexagon.S2.cabacencbin
+    hexagon_S2_cl0,                            // llvm.hexagon.S2.cl0
+    hexagon_S2_cl0p,                           // llvm.hexagon.S2.cl0p
+    hexagon_S2_cl1,                            // llvm.hexagon.S2.cl1
+    hexagon_S2_cl1p,                           // llvm.hexagon.S2.cl1p
+    hexagon_S2_clb,                            // llvm.hexagon.S2.clb
+    hexagon_S2_clbnorm,                        // llvm.hexagon.S2.clbnorm
+    hexagon_S2_clbp,                           // llvm.hexagon.S2.clbp
+    hexagon_S2_clrbit_i,                       // llvm.hexagon.S2.clrbit.i
+    hexagon_S2_clrbit_r,                       // llvm.hexagon.S2.clrbit.r
+    hexagon_S2_ct0,                            // llvm.hexagon.S2.ct0
+    hexagon_S2_ct0p,                           // llvm.hexagon.S2.ct0p
+    hexagon_S2_ct1,                            // llvm.hexagon.S2.ct1
+    hexagon_S2_ct1p,                           // llvm.hexagon.S2.ct1p
+    hexagon_S2_deinterleave,                   // llvm.hexagon.S2.deinterleave
+    hexagon_S2_extractu,                       // llvm.hexagon.S2.extractu
+    hexagon_S2_extractu_rp,                    // llvm.hexagon.S2.extractu.rp
+    hexagon_S2_extractup,                      // llvm.hexagon.S2.extractup
+    hexagon_S2_extractup_rp,                   // llvm.hexagon.S2.extractup.rp
+    hexagon_S2_insert,                         // llvm.hexagon.S2.insert
+    hexagon_S2_insert_rp,                      // llvm.hexagon.S2.insert.rp
+    hexagon_S2_insertp,                        // llvm.hexagon.S2.insertp
+    hexagon_S2_insertp_rp,                     // llvm.hexagon.S2.insertp.rp
+    hexagon_S2_interleave,                     // llvm.hexagon.S2.interleave
+    hexagon_S2_lfsp,                           // llvm.hexagon.S2.lfsp
+    hexagon_S2_lsl_r_p,                        // llvm.hexagon.S2.lsl.r.p
+    hexagon_S2_lsl_r_p_acc,                    // llvm.hexagon.S2.lsl.r.p.acc
+    hexagon_S2_lsl_r_p_and,                    // llvm.hexagon.S2.lsl.r.p.and
+    hexagon_S2_lsl_r_p_nac,                    // llvm.hexagon.S2.lsl.r.p.nac
+    hexagon_S2_lsl_r_p_or,                     // llvm.hexagon.S2.lsl.r.p.or
+    hexagon_S2_lsl_r_p_xor,                    // llvm.hexagon.S2.lsl.r.p.xor
+    hexagon_S2_lsl_r_r,                        // llvm.hexagon.S2.lsl.r.r
+    hexagon_S2_lsl_r_r_acc,                    // llvm.hexagon.S2.lsl.r.r.acc
+    hexagon_S2_lsl_r_r_and,                    // llvm.hexagon.S2.lsl.r.r.and
+    hexagon_S2_lsl_r_r_nac,                    // llvm.hexagon.S2.lsl.r.r.nac
+    hexagon_S2_lsl_r_r_or,                     // llvm.hexagon.S2.lsl.r.r.or
+    hexagon_S2_lsl_r_vh,                       // llvm.hexagon.S2.lsl.r.vh
+    hexagon_S2_lsl_r_vw,                       // llvm.hexagon.S2.lsl.r.vw
+    hexagon_S2_lsr_i_p,                        // llvm.hexagon.S2.lsr.i.p
+    hexagon_S2_lsr_i_p_acc,                    // llvm.hexagon.S2.lsr.i.p.acc
+    hexagon_S2_lsr_i_p_and,                    // llvm.hexagon.S2.lsr.i.p.and
+    hexagon_S2_lsr_i_p_nac,                    // llvm.hexagon.S2.lsr.i.p.nac
+    hexagon_S2_lsr_i_p_or,                     // llvm.hexagon.S2.lsr.i.p.or
+    hexagon_S2_lsr_i_p_xacc,                   // llvm.hexagon.S2.lsr.i.p.xacc
+    hexagon_S2_lsr_i_r,                        // llvm.hexagon.S2.lsr.i.r
+    hexagon_S2_lsr_i_r_acc,                    // llvm.hexagon.S2.lsr.i.r.acc
+    hexagon_S2_lsr_i_r_and,                    // llvm.hexagon.S2.lsr.i.r.and
+    hexagon_S2_lsr_i_r_nac,                    // llvm.hexagon.S2.lsr.i.r.nac
+    hexagon_S2_lsr_i_r_or,                     // llvm.hexagon.S2.lsr.i.r.or
+    hexagon_S2_lsr_i_r_xacc,                   // llvm.hexagon.S2.lsr.i.r.xacc
+    hexagon_S2_lsr_i_vh,                       // llvm.hexagon.S2.lsr.i.vh
+    hexagon_S2_lsr_i_vw,                       // llvm.hexagon.S2.lsr.i.vw
+    hexagon_S2_lsr_r_p,                        // llvm.hexagon.S2.lsr.r.p
+    hexagon_S2_lsr_r_p_acc,                    // llvm.hexagon.S2.lsr.r.p.acc
+    hexagon_S2_lsr_r_p_and,                    // llvm.hexagon.S2.lsr.r.p.and
+    hexagon_S2_lsr_r_p_nac,                    // llvm.hexagon.S2.lsr.r.p.nac
+    hexagon_S2_lsr_r_p_or,                     // llvm.hexagon.S2.lsr.r.p.or
+    hexagon_S2_lsr_r_p_xor,                    // llvm.hexagon.S2.lsr.r.p.xor
+    hexagon_S2_lsr_r_r,                        // llvm.hexagon.S2.lsr.r.r
+    hexagon_S2_lsr_r_r_acc,                    // llvm.hexagon.S2.lsr.r.r.acc
+    hexagon_S2_lsr_r_r_and,                    // llvm.hexagon.S2.lsr.r.r.and
+    hexagon_S2_lsr_r_r_nac,                    // llvm.hexagon.S2.lsr.r.r.nac
+    hexagon_S2_lsr_r_r_or,                     // llvm.hexagon.S2.lsr.r.r.or
+    hexagon_S2_lsr_r_vh,                       // llvm.hexagon.S2.lsr.r.vh
+    hexagon_S2_lsr_r_vw,                       // llvm.hexagon.S2.lsr.r.vw
+    hexagon_S2_packhl,                         // llvm.hexagon.S2.packhl
+    hexagon_S2_parityp,                        // llvm.hexagon.S2.parityp
+    hexagon_S2_setbit_i,                       // llvm.hexagon.S2.setbit.i
+    hexagon_S2_setbit_r,                       // llvm.hexagon.S2.setbit.r
+    hexagon_S2_shuffeb,                        // llvm.hexagon.S2.shuffeb
+    hexagon_S2_shuffeh,                        // llvm.hexagon.S2.shuffeh
+    hexagon_S2_shuffob,                        // llvm.hexagon.S2.shuffob
+    hexagon_S2_shuffoh,                        // llvm.hexagon.S2.shuffoh
+    hexagon_S2_storerb_pbr,                    // llvm.hexagon.S2.storerb.pbr
+    hexagon_S2_storerb_pci,                    // llvm.hexagon.S2.storerb.pci
+    hexagon_S2_storerb_pcr,                    // llvm.hexagon.S2.storerb.pcr
+    hexagon_S2_storerd_pbr,                    // llvm.hexagon.S2.storerd.pbr
+    hexagon_S2_storerd_pci,                    // llvm.hexagon.S2.storerd.pci
+    hexagon_S2_storerd_pcr,                    // llvm.hexagon.S2.storerd.pcr
+    hexagon_S2_storerf_pbr,                    // llvm.hexagon.S2.storerf.pbr
+    hexagon_S2_storerf_pci,                    // llvm.hexagon.S2.storerf.pci
+    hexagon_S2_storerf_pcr,                    // llvm.hexagon.S2.storerf.pcr
+    hexagon_S2_storerh_pbr,                    // llvm.hexagon.S2.storerh.pbr
+    hexagon_S2_storerh_pci,                    // llvm.hexagon.S2.storerh.pci
+    hexagon_S2_storerh_pcr,                    // llvm.hexagon.S2.storerh.pcr
+    hexagon_S2_storeri_pbr,                    // llvm.hexagon.S2.storeri.pbr
+    hexagon_S2_storeri_pci,                    // llvm.hexagon.S2.storeri.pci
+    hexagon_S2_storeri_pcr,                    // llvm.hexagon.S2.storeri.pcr
+    hexagon_S2_storew_locked,                  // llvm.hexagon.S2.storew.locked
+    hexagon_S2_svsathb,                        // llvm.hexagon.S2.svsathb
+    hexagon_S2_svsathub,                       // llvm.hexagon.S2.svsathub
+    hexagon_S2_tableidxb_goodsyntax,           // llvm.hexagon.S2.tableidxb.goodsyntax
+    hexagon_S2_tableidxd_goodsyntax,           // llvm.hexagon.S2.tableidxd.goodsyntax
+    hexagon_S2_tableidxh_goodsyntax,           // llvm.hexagon.S2.tableidxh.goodsyntax
+    hexagon_S2_tableidxw_goodsyntax,           // llvm.hexagon.S2.tableidxw.goodsyntax
+    hexagon_S2_togglebit_i,                    // llvm.hexagon.S2.togglebit.i
+    hexagon_S2_togglebit_r,                    // llvm.hexagon.S2.togglebit.r
+    hexagon_S2_tstbit_i,                       // llvm.hexagon.S2.tstbit.i
+    hexagon_S2_tstbit_r,                       // llvm.hexagon.S2.tstbit.r
+    hexagon_S2_valignib,                       // llvm.hexagon.S2.valignib
+    hexagon_S2_valignrb,                       // llvm.hexagon.S2.valignrb
+    hexagon_S2_vcnegh,                         // llvm.hexagon.S2.vcnegh
+    hexagon_S2_vcrotate,                       // llvm.hexagon.S2.vcrotate
+    hexagon_S2_vrcnegh,                        // llvm.hexagon.S2.vrcnegh
+    hexagon_S2_vrndpackwh,                     // llvm.hexagon.S2.vrndpackwh
+    hexagon_S2_vrndpackwhs,                    // llvm.hexagon.S2.vrndpackwhs
+    hexagon_S2_vsathb,                         // llvm.hexagon.S2.vsathb
+    hexagon_S2_vsathb_nopack,                  // llvm.hexagon.S2.vsathb.nopack
+    hexagon_S2_vsathub,                        // llvm.hexagon.S2.vsathub
+    hexagon_S2_vsathub_nopack,                 // llvm.hexagon.S2.vsathub.nopack
+    hexagon_S2_vsatwh,                         // llvm.hexagon.S2.vsatwh
+    hexagon_S2_vsatwh_nopack,                  // llvm.hexagon.S2.vsatwh.nopack
+    hexagon_S2_vsatwuh,                        // llvm.hexagon.S2.vsatwuh
+    hexagon_S2_vsatwuh_nopack,                 // llvm.hexagon.S2.vsatwuh.nopack
+    hexagon_S2_vsplatrb,                       // llvm.hexagon.S2.vsplatrb
+    hexagon_S2_vsplatrh,                       // llvm.hexagon.S2.vsplatrh
+    hexagon_S2_vspliceib,                      // llvm.hexagon.S2.vspliceib
+    hexagon_S2_vsplicerb,                      // llvm.hexagon.S2.vsplicerb
+    hexagon_S2_vsxtbh,                         // llvm.hexagon.S2.vsxtbh
+    hexagon_S2_vsxthw,                         // llvm.hexagon.S2.vsxthw
+    hexagon_S2_vtrunehb,                       // llvm.hexagon.S2.vtrunehb
+    hexagon_S2_vtrunewh,                       // llvm.hexagon.S2.vtrunewh
+    hexagon_S2_vtrunohb,                       // llvm.hexagon.S2.vtrunohb
+    hexagon_S2_vtrunowh,                       // llvm.hexagon.S2.vtrunowh
+    hexagon_S2_vzxtbh,                         // llvm.hexagon.S2.vzxtbh
+    hexagon_S2_vzxthw,                         // llvm.hexagon.S2.vzxthw
+    hexagon_S4_addaddi,                        // llvm.hexagon.S4.addaddi
+    hexagon_S4_addi_asl_ri,                    // llvm.hexagon.S4.addi.asl.ri
+    hexagon_S4_addi_lsr_ri,                    // llvm.hexagon.S4.addi.lsr.ri
+    hexagon_S4_andi_asl_ri,                    // llvm.hexagon.S4.andi.asl.ri
+    hexagon_S4_andi_lsr_ri,                    // llvm.hexagon.S4.andi.lsr.ri
+    hexagon_S4_clbaddi,                        // llvm.hexagon.S4.clbaddi
+    hexagon_S4_clbpaddi,                       // llvm.hexagon.S4.clbpaddi
+    hexagon_S4_clbpnorm,                       // llvm.hexagon.S4.clbpnorm
+    hexagon_S4_extract,                        // llvm.hexagon.S4.extract
+    hexagon_S4_extract_rp,                     // llvm.hexagon.S4.extract.rp
+    hexagon_S4_extractp,                       // llvm.hexagon.S4.extractp
+    hexagon_S4_extractp_rp,                    // llvm.hexagon.S4.extractp.rp
+    hexagon_S4_lsli,                           // llvm.hexagon.S4.lsli
+    hexagon_S4_ntstbit_i,                      // llvm.hexagon.S4.ntstbit.i
+    hexagon_S4_ntstbit_r,                      // llvm.hexagon.S4.ntstbit.r
+    hexagon_S4_or_andi,                        // llvm.hexagon.S4.or.andi
+    hexagon_S4_or_andix,                       // llvm.hexagon.S4.or.andix
+    hexagon_S4_or_ori,                         // llvm.hexagon.S4.or.ori
+    hexagon_S4_ori_asl_ri,                     // llvm.hexagon.S4.ori.asl.ri
+    hexagon_S4_ori_lsr_ri,                     // llvm.hexagon.S4.ori.lsr.ri
+    hexagon_S4_parity,                         // llvm.hexagon.S4.parity
+    hexagon_S4_stored_locked,                  // llvm.hexagon.S4.stored.locked
+    hexagon_S4_subaddi,                        // llvm.hexagon.S4.subaddi
+    hexagon_S4_subi_asl_ri,                    // llvm.hexagon.S4.subi.asl.ri
+    hexagon_S4_subi_lsr_ri,                    // llvm.hexagon.S4.subi.lsr.ri
+    hexagon_S4_vrcrotate,                      // llvm.hexagon.S4.vrcrotate
+    hexagon_S4_vrcrotate_acc,                  // llvm.hexagon.S4.vrcrotate.acc
+    hexagon_S4_vxaddsubh,                      // llvm.hexagon.S4.vxaddsubh
+    hexagon_S4_vxaddsubhr,                     // llvm.hexagon.S4.vxaddsubhr
+    hexagon_S4_vxaddsubw,                      // llvm.hexagon.S4.vxaddsubw
+    hexagon_S4_vxsubaddh,                      // llvm.hexagon.S4.vxsubaddh
+    hexagon_S4_vxsubaddhr,                     // llvm.hexagon.S4.vxsubaddhr
+    hexagon_S4_vxsubaddw,                      // llvm.hexagon.S4.vxsubaddw
+    hexagon_S5_asrhub_rnd_sat_goodsyntax,      // llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
+    hexagon_S5_asrhub_sat,                     // llvm.hexagon.S5.asrhub.sat
+    hexagon_S5_popcountp,                      // llvm.hexagon.S5.popcountp
+    hexagon_S5_vasrhrnd_goodsyntax,            // llvm.hexagon.S5.vasrhrnd.goodsyntax
+    hexagon_S6_rol_i_p,                        // llvm.hexagon.S6.rol.i.p
+    hexagon_S6_rol_i_p_acc,                    // llvm.hexagon.S6.rol.i.p.acc
+    hexagon_S6_rol_i_p_and,                    // llvm.hexagon.S6.rol.i.p.and
+    hexagon_S6_rol_i_p_nac,                    // llvm.hexagon.S6.rol.i.p.nac
+    hexagon_S6_rol_i_p_or,                     // llvm.hexagon.S6.rol.i.p.or
+    hexagon_S6_rol_i_p_xacc,                   // llvm.hexagon.S6.rol.i.p.xacc
+    hexagon_S6_rol_i_r,                        // llvm.hexagon.S6.rol.i.r
+    hexagon_S6_rol_i_r_acc,                    // llvm.hexagon.S6.rol.i.r.acc
+    hexagon_S6_rol_i_r_and,                    // llvm.hexagon.S6.rol.i.r.and
+    hexagon_S6_rol_i_r_nac,                    // llvm.hexagon.S6.rol.i.r.nac
+    hexagon_S6_rol_i_r_or,                     // llvm.hexagon.S6.rol.i.r.or
+    hexagon_S6_rol_i_r_xacc,                   // llvm.hexagon.S6.rol.i.r.xacc
+    hexagon_S6_vsplatrbp,                      // llvm.hexagon.S6.vsplatrbp
+    hexagon_S6_vtrunehb_ppp,                   // llvm.hexagon.S6.vtrunehb.ppp
+    hexagon_S6_vtrunohb_ppp,                   // llvm.hexagon.S6.vtrunohb.ppp
+    hexagon_V6_extractw,                       // llvm.hexagon.V6.extractw
+    hexagon_V6_extractw_128B,                  // llvm.hexagon.V6.extractw.128B
+    hexagon_V6_hi,                             // llvm.hexagon.V6.hi
+    hexagon_V6_hi_128B,                        // llvm.hexagon.V6.hi.128B
+    hexagon_V6_lo,                             // llvm.hexagon.V6.lo
+    hexagon_V6_lo_128B,                        // llvm.hexagon.V6.lo.128B
+    hexagon_V6_lvsplatb,                       // llvm.hexagon.V6.lvsplatb
+    hexagon_V6_lvsplatb_128B,                  // llvm.hexagon.V6.lvsplatb.128B
+    hexagon_V6_lvsplath,                       // llvm.hexagon.V6.lvsplath
+    hexagon_V6_lvsplath_128B,                  // llvm.hexagon.V6.lvsplath.128B
+    hexagon_V6_lvsplatw,                       // llvm.hexagon.V6.lvsplatw
+    hexagon_V6_lvsplatw_128B,                  // llvm.hexagon.V6.lvsplatw.128B
+    hexagon_V6_pred_and,                       // llvm.hexagon.V6.pred.and
+    hexagon_V6_pred_and_128B,                  // llvm.hexagon.V6.pred.and.128B
+    hexagon_V6_pred_and_n,                     // llvm.hexagon.V6.pred.and.n
+    hexagon_V6_pred_and_n_128B,                // llvm.hexagon.V6.pred.and.n.128B
+    hexagon_V6_pred_not,                       // llvm.hexagon.V6.pred.not
+    hexagon_V6_pred_not_128B,                  // llvm.hexagon.V6.pred.not.128B
+    hexagon_V6_pred_or,                        // llvm.hexagon.V6.pred.or
+    hexagon_V6_pred_or_128B,                   // llvm.hexagon.V6.pred.or.128B
+    hexagon_V6_pred_or_n,                      // llvm.hexagon.V6.pred.or.n
+    hexagon_V6_pred_or_n_128B,                 // llvm.hexagon.V6.pred.or.n.128B
+    hexagon_V6_pred_scalar2,                   // llvm.hexagon.V6.pred.scalar2
+    hexagon_V6_pred_scalar2_128B,              // llvm.hexagon.V6.pred.scalar2.128B
+    hexagon_V6_pred_scalar2v2,                 // llvm.hexagon.V6.pred.scalar2v2
+    hexagon_V6_pred_scalar2v2_128B,            // llvm.hexagon.V6.pred.scalar2v2.128B
+    hexagon_V6_pred_xor,                       // llvm.hexagon.V6.pred.xor
+    hexagon_V6_pred_xor_128B,                  // llvm.hexagon.V6.pred.xor.128B
+    hexagon_V6_shuffeqh,                       // llvm.hexagon.V6.shuffeqh
+    hexagon_V6_shuffeqh_128B,                  // llvm.hexagon.V6.shuffeqh.128B
+    hexagon_V6_shuffeqw,                       // llvm.hexagon.V6.shuffeqw
+    hexagon_V6_shuffeqw_128B,                  // llvm.hexagon.V6.shuffeqw.128B
+    hexagon_V6_vS32b_nqpred_ai,                // llvm.hexagon.V6.vS32b.nqpred.ai
+    hexagon_V6_vS32b_nqpred_ai_128B,           // llvm.hexagon.V6.vS32b.nqpred.ai.128B
+    hexagon_V6_vS32b_nt_nqpred_ai,             // llvm.hexagon.V6.vS32b.nt.nqpred.ai
+    hexagon_V6_vS32b_nt_nqpred_ai_128B,        // llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B
+    hexagon_V6_vS32b_nt_qpred_ai,              // llvm.hexagon.V6.vS32b.nt.qpred.ai
+    hexagon_V6_vS32b_nt_qpred_ai_128B,         // llvm.hexagon.V6.vS32b.nt.qpred.ai.128B
+    hexagon_V6_vS32b_qpred_ai,                 // llvm.hexagon.V6.vS32b.qpred.ai
+    hexagon_V6_vS32b_qpred_ai_128B,            // llvm.hexagon.V6.vS32b.qpred.ai.128B
+    hexagon_V6_vabsb,                          // llvm.hexagon.V6.vabsb
+    hexagon_V6_vabsb_128B,                     // llvm.hexagon.V6.vabsb.128B
+    hexagon_V6_vabsb_sat,                      // llvm.hexagon.V6.vabsb.sat
+    hexagon_V6_vabsb_sat_128B,                 // llvm.hexagon.V6.vabsb.sat.128B
+    hexagon_V6_vabsdiffh,                      // llvm.hexagon.V6.vabsdiffh
+    hexagon_V6_vabsdiffh_128B,                 // llvm.hexagon.V6.vabsdiffh.128B
+    hexagon_V6_vabsdiffub,                     // llvm.hexagon.V6.vabsdiffub
+    hexagon_V6_vabsdiffub_128B,                // llvm.hexagon.V6.vabsdiffub.128B
+    hexagon_V6_vabsdiffuh,                     // llvm.hexagon.V6.vabsdiffuh
+    hexagon_V6_vabsdiffuh_128B,                // llvm.hexagon.V6.vabsdiffuh.128B
+    hexagon_V6_vabsdiffw,                      // llvm.hexagon.V6.vabsdiffw
+    hexagon_V6_vabsdiffw_128B,                 // llvm.hexagon.V6.vabsdiffw.128B
+    hexagon_V6_vabsh,                          // llvm.hexagon.V6.vabsh
+    hexagon_V6_vabsh_128B,                     // llvm.hexagon.V6.vabsh.128B
+    hexagon_V6_vabsh_sat,                      // llvm.hexagon.V6.vabsh.sat
+    hexagon_V6_vabsh_sat_128B,                 // llvm.hexagon.V6.vabsh.sat.128B
+    hexagon_V6_vabsw,                          // llvm.hexagon.V6.vabsw
+    hexagon_V6_vabsw_128B,                     // llvm.hexagon.V6.vabsw.128B
+    hexagon_V6_vabsw_sat,                      // llvm.hexagon.V6.vabsw.sat
+    hexagon_V6_vabsw_sat_128B,                 // llvm.hexagon.V6.vabsw.sat.128B
+    hexagon_V6_vaddb,                          // llvm.hexagon.V6.vaddb
+    hexagon_V6_vaddb_128B,                     // llvm.hexagon.V6.vaddb.128B
+    hexagon_V6_vaddb_dv,                       // llvm.hexagon.V6.vaddb.dv
+    hexagon_V6_vaddb_dv_128B,                  // llvm.hexagon.V6.vaddb.dv.128B
+    hexagon_V6_vaddbnq,                        // llvm.hexagon.V6.vaddbnq
+    hexagon_V6_vaddbnq_128B,                   // llvm.hexagon.V6.vaddbnq.128B
+    hexagon_V6_vaddbq,                         // llvm.hexagon.V6.vaddbq
+    hexagon_V6_vaddbq_128B,                    // llvm.hexagon.V6.vaddbq.128B
+    hexagon_V6_vaddbsat,                       // llvm.hexagon.V6.vaddbsat
+    hexagon_V6_vaddbsat_128B,                  // llvm.hexagon.V6.vaddbsat.128B
+    hexagon_V6_vaddbsat_dv,                    // llvm.hexagon.V6.vaddbsat.dv
+    hexagon_V6_vaddbsat_dv_128B,               // llvm.hexagon.V6.vaddbsat.dv.128B
+    hexagon_V6_vaddcarry,                      // llvm.hexagon.V6.vaddcarry
+    hexagon_V6_vaddcarry_128B,                 // llvm.hexagon.V6.vaddcarry.128B
+    hexagon_V6_vaddclbh,                       // llvm.hexagon.V6.vaddclbh
+    hexagon_V6_vaddclbh_128B,                  // llvm.hexagon.V6.vaddclbh.128B
+    hexagon_V6_vaddclbw,                       // llvm.hexagon.V6.vaddclbw
+    hexagon_V6_vaddclbw_128B,                  // llvm.hexagon.V6.vaddclbw.128B
+    hexagon_V6_vaddh,                          // llvm.hexagon.V6.vaddh
+    hexagon_V6_vaddh_128B,                     // llvm.hexagon.V6.vaddh.128B
+    hexagon_V6_vaddh_dv,                       // llvm.hexagon.V6.vaddh.dv
+    hexagon_V6_vaddh_dv_128B,                  // llvm.hexagon.V6.vaddh.dv.128B
+    hexagon_V6_vaddhnq,                        // llvm.hexagon.V6.vaddhnq
+    hexagon_V6_vaddhnq_128B,                   // llvm.hexagon.V6.vaddhnq.128B
+    hexagon_V6_vaddhq,                         // llvm.hexagon.V6.vaddhq
+    hexagon_V6_vaddhq_128B,                    // llvm.hexagon.V6.vaddhq.128B
+    hexagon_V6_vaddhsat,                       // llvm.hexagon.V6.vaddhsat
+    hexagon_V6_vaddhsat_128B,                  // llvm.hexagon.V6.vaddhsat.128B
+    hexagon_V6_vaddhsat_dv,                    // llvm.hexagon.V6.vaddhsat.dv
+    hexagon_V6_vaddhsat_dv_128B,               // llvm.hexagon.V6.vaddhsat.dv.128B
+    hexagon_V6_vaddhw,                         // llvm.hexagon.V6.vaddhw
+    hexagon_V6_vaddhw_128B,                    // llvm.hexagon.V6.vaddhw.128B
+    hexagon_V6_vaddhw_acc,                     // llvm.hexagon.V6.vaddhw.acc
+    hexagon_V6_vaddhw_acc_128B,                // llvm.hexagon.V6.vaddhw.acc.128B
+    hexagon_V6_vaddubh,                        // llvm.hexagon.V6.vaddubh
+    hexagon_V6_vaddubh_128B,                   // llvm.hexagon.V6.vaddubh.128B
+    hexagon_V6_vaddubh_acc,                    // llvm.hexagon.V6.vaddubh.acc
+    hexagon_V6_vaddubh_acc_128B,               // llvm.hexagon.V6.vaddubh.acc.128B
+    hexagon_V6_vaddubsat,                      // llvm.hexagon.V6.vaddubsat
+    hexagon_V6_vaddubsat_128B,                 // llvm.hexagon.V6.vaddubsat.128B
+    hexagon_V6_vaddubsat_dv,                   // llvm.hexagon.V6.vaddubsat.dv
+    hexagon_V6_vaddubsat_dv_128B,              // llvm.hexagon.V6.vaddubsat.dv.128B
+    hexagon_V6_vaddububb_sat,                  // llvm.hexagon.V6.vaddububb.sat
+    hexagon_V6_vaddububb_sat_128B,             // llvm.hexagon.V6.vaddububb.sat.128B
+    hexagon_V6_vadduhsat,                      // llvm.hexagon.V6.vadduhsat
+    hexagon_V6_vadduhsat_128B,                 // llvm.hexagon.V6.vadduhsat.128B
+    hexagon_V6_vadduhsat_dv,                   // llvm.hexagon.V6.vadduhsat.dv
+    hexagon_V6_vadduhsat_dv_128B,              // llvm.hexagon.V6.vadduhsat.dv.128B
+    hexagon_V6_vadduhw,                        // llvm.hexagon.V6.vadduhw
+    hexagon_V6_vadduhw_128B,                   // llvm.hexagon.V6.vadduhw.128B
+    hexagon_V6_vadduhw_acc,                    // llvm.hexagon.V6.vadduhw.acc
+    hexagon_V6_vadduhw_acc_128B,               // llvm.hexagon.V6.vadduhw.acc.128B
+    hexagon_V6_vadduwsat,                      // llvm.hexagon.V6.vadduwsat
+    hexagon_V6_vadduwsat_128B,                 // llvm.hexagon.V6.vadduwsat.128B
+    hexagon_V6_vadduwsat_dv,                   // llvm.hexagon.V6.vadduwsat.dv
+    hexagon_V6_vadduwsat_dv_128B,              // llvm.hexagon.V6.vadduwsat.dv.128B
+    hexagon_V6_vaddw,                          // llvm.hexagon.V6.vaddw
+    hexagon_V6_vaddw_128B,                     // llvm.hexagon.V6.vaddw.128B
+    hexagon_V6_vaddw_dv,                       // llvm.hexagon.V6.vaddw.dv
+    hexagon_V6_vaddw_dv_128B,                  // llvm.hexagon.V6.vaddw.dv.128B
+    hexagon_V6_vaddwnq,                        // llvm.hexagon.V6.vaddwnq
+    hexagon_V6_vaddwnq_128B,                   // llvm.hexagon.V6.vaddwnq.128B
+    hexagon_V6_vaddwq,                         // llvm.hexagon.V6.vaddwq
+    hexagon_V6_vaddwq_128B,                    // llvm.hexagon.V6.vaddwq.128B
+    hexagon_V6_vaddwsat,                       // llvm.hexagon.V6.vaddwsat
+    hexagon_V6_vaddwsat_128B,                  // llvm.hexagon.V6.vaddwsat.128B
+    hexagon_V6_vaddwsat_dv,                    // llvm.hexagon.V6.vaddwsat.dv
+    hexagon_V6_vaddwsat_dv_128B,               // llvm.hexagon.V6.vaddwsat.dv.128B
+    hexagon_V6_valignb,                        // llvm.hexagon.V6.valignb
+    hexagon_V6_valignb_128B,                   // llvm.hexagon.V6.valignb.128B
+    hexagon_V6_valignbi,                       // llvm.hexagon.V6.valignbi
+    hexagon_V6_valignbi_128B,                  // llvm.hexagon.V6.valignbi.128B
+    hexagon_V6_vand,                           // llvm.hexagon.V6.vand
+    hexagon_V6_vand_128B,                      // llvm.hexagon.V6.vand.128B
+    hexagon_V6_vandnqrt,                       // llvm.hexagon.V6.vandnqrt
+    hexagon_V6_vandnqrt_128B,                  // llvm.hexagon.V6.vandnqrt.128B
+    hexagon_V6_vandnqrt_acc,                   // llvm.hexagon.V6.vandnqrt.acc
+    hexagon_V6_vandnqrt_acc_128B,              // llvm.hexagon.V6.vandnqrt.acc.128B
+    hexagon_V6_vandqrt,                        // llvm.hexagon.V6.vandqrt
+    hexagon_V6_vandqrt_128B,                   // llvm.hexagon.V6.vandqrt.128B
+    hexagon_V6_vandqrt_acc,                    // llvm.hexagon.V6.vandqrt.acc
+    hexagon_V6_vandqrt_acc_128B,               // llvm.hexagon.V6.vandqrt.acc.128B
+    hexagon_V6_vandvnqv,                       // llvm.hexagon.V6.vandvnqv
+    hexagon_V6_vandvnqv_128B,                  // llvm.hexagon.V6.vandvnqv.128B
+    hexagon_V6_vandvqv,                        // llvm.hexagon.V6.vandvqv
+    hexagon_V6_vandvqv_128B,                   // llvm.hexagon.V6.vandvqv.128B
+    hexagon_V6_vandvrt,                        // llvm.hexagon.V6.vandvrt
+    hexagon_V6_vandvrt_128B,                   // llvm.hexagon.V6.vandvrt.128B
+    hexagon_V6_vandvrt_acc,                    // llvm.hexagon.V6.vandvrt.acc
+    hexagon_V6_vandvrt_acc_128B,               // llvm.hexagon.V6.vandvrt.acc.128B
+    hexagon_V6_vaslh,                          // llvm.hexagon.V6.vaslh
+    hexagon_V6_vaslh_128B,                     // llvm.hexagon.V6.vaslh.128B
+    hexagon_V6_vaslh_acc,                      // llvm.hexagon.V6.vaslh.acc
+    hexagon_V6_vaslh_acc_128B,                 // llvm.hexagon.V6.vaslh.acc.128B
+    hexagon_V6_vaslhv,                         // llvm.hexagon.V6.vaslhv
+    hexagon_V6_vaslhv_128B,                    // llvm.hexagon.V6.vaslhv.128B
+    hexagon_V6_vaslw,                          // llvm.hexagon.V6.vaslw
+    hexagon_V6_vaslw_128B,                     // llvm.hexagon.V6.vaslw.128B
+    hexagon_V6_vaslw_acc,                      // llvm.hexagon.V6.vaslw.acc
+    hexagon_V6_vaslw_acc_128B,                 // llvm.hexagon.V6.vaslw.acc.128B
+    hexagon_V6_vaslwv,                         // llvm.hexagon.V6.vaslwv
+    hexagon_V6_vaslwv_128B,                    // llvm.hexagon.V6.vaslwv.128B
+    hexagon_V6_vasrh,                          // llvm.hexagon.V6.vasrh
+    hexagon_V6_vasrh_128B,                     // llvm.hexagon.V6.vasrh.128B
+    hexagon_V6_vasrh_acc,                      // llvm.hexagon.V6.vasrh.acc
+    hexagon_V6_vasrh_acc_128B,                 // llvm.hexagon.V6.vasrh.acc.128B
+    hexagon_V6_vasrhbrndsat,                   // llvm.hexagon.V6.vasrhbrndsat
+    hexagon_V6_vasrhbrndsat_128B,              // llvm.hexagon.V6.vasrhbrndsat.128B
+    hexagon_V6_vasrhbsat,                      // llvm.hexagon.V6.vasrhbsat
+    hexagon_V6_vasrhbsat_128B,                 // llvm.hexagon.V6.vasrhbsat.128B
+    hexagon_V6_vasrhubrndsat,                  // llvm.hexagon.V6.vasrhubrndsat
+    hexagon_V6_vasrhubrndsat_128B,             // llvm.hexagon.V6.vasrhubrndsat.128B
+    hexagon_V6_vasrhubsat,                     // llvm.hexagon.V6.vasrhubsat
+    hexagon_V6_vasrhubsat_128B,                // llvm.hexagon.V6.vasrhubsat.128B
+    hexagon_V6_vasrhv,                         // llvm.hexagon.V6.vasrhv
+    hexagon_V6_vasrhv_128B,                    // llvm.hexagon.V6.vasrhv.128B
+    hexagon_V6_vasruhubrndsat,                 // llvm.hexagon.V6.vasruhubrndsat
+    hexagon_V6_vasruhubrndsat_128B,            // llvm.hexagon.V6.vasruhubrndsat.128B
+    hexagon_V6_vasruhubsat,                    // llvm.hexagon.V6.vasruhubsat
+    hexagon_V6_vasruhubsat_128B,               // llvm.hexagon.V6.vasruhubsat.128B
+    hexagon_V6_vasruwuhrndsat,                 // llvm.hexagon.V6.vasruwuhrndsat
+    hexagon_V6_vasruwuhrndsat_128B,            // llvm.hexagon.V6.vasruwuhrndsat.128B
+    hexagon_V6_vasruwuhsat,                    // llvm.hexagon.V6.vasruwuhsat
+    hexagon_V6_vasruwuhsat_128B,               // llvm.hexagon.V6.vasruwuhsat.128B
+    hexagon_V6_vasrw,                          // llvm.hexagon.V6.vasrw
+    hexagon_V6_vasrw_128B,                     // llvm.hexagon.V6.vasrw.128B
+    hexagon_V6_vasrw_acc,                      // llvm.hexagon.V6.vasrw.acc
+    hexagon_V6_vasrw_acc_128B,                 // llvm.hexagon.V6.vasrw.acc.128B
+    hexagon_V6_vasrwh,                         // llvm.hexagon.V6.vasrwh
+    hexagon_V6_vasrwh_128B,                    // llvm.hexagon.V6.vasrwh.128B
+    hexagon_V6_vasrwhrndsat,                   // llvm.hexagon.V6.vasrwhrndsat
+    hexagon_V6_vasrwhrndsat_128B,              // llvm.hexagon.V6.vasrwhrndsat.128B
+    hexagon_V6_vasrwhsat,                      // llvm.hexagon.V6.vasrwhsat
+    hexagon_V6_vasrwhsat_128B,                 // llvm.hexagon.V6.vasrwhsat.128B
+    hexagon_V6_vasrwuhrndsat,                  // llvm.hexagon.V6.vasrwuhrndsat
+    hexagon_V6_vasrwuhrndsat_128B,             // llvm.hexagon.V6.vasrwuhrndsat.128B
+    hexagon_V6_vasrwuhsat,                     // llvm.hexagon.V6.vasrwuhsat
+    hexagon_V6_vasrwuhsat_128B,                // llvm.hexagon.V6.vasrwuhsat.128B
+    hexagon_V6_vasrwv,                         // llvm.hexagon.V6.vasrwv
+    hexagon_V6_vasrwv_128B,                    // llvm.hexagon.V6.vasrwv.128B
+    hexagon_V6_vassign,                        // llvm.hexagon.V6.vassign
+    hexagon_V6_vassign_128B,                   // llvm.hexagon.V6.vassign.128B
+    hexagon_V6_vassignp,                       // llvm.hexagon.V6.vassignp
+    hexagon_V6_vassignp_128B,                  // llvm.hexagon.V6.vassignp.128B
+    hexagon_V6_vavgb,                          // llvm.hexagon.V6.vavgb
+    hexagon_V6_vavgb_128B,                     // llvm.hexagon.V6.vavgb.128B
+    hexagon_V6_vavgbrnd,                       // llvm.hexagon.V6.vavgbrnd
+    hexagon_V6_vavgbrnd_128B,                  // llvm.hexagon.V6.vavgbrnd.128B
+    hexagon_V6_vavgh,                          // llvm.hexagon.V6.vavgh
+    hexagon_V6_vavgh_128B,                     // llvm.hexagon.V6.vavgh.128B
+    hexagon_V6_vavghrnd,                       // llvm.hexagon.V6.vavghrnd
+    hexagon_V6_vavghrnd_128B,                  // llvm.hexagon.V6.vavghrnd.128B
+    hexagon_V6_vavgub,                         // llvm.hexagon.V6.vavgub
+    hexagon_V6_vavgub_128B,                    // llvm.hexagon.V6.vavgub.128B
+    hexagon_V6_vavgubrnd,                      // llvm.hexagon.V6.vavgubrnd
+    hexagon_V6_vavgubrnd_128B,                 // llvm.hexagon.V6.vavgubrnd.128B
+    hexagon_V6_vavguh,                         // llvm.hexagon.V6.vavguh
+    hexagon_V6_vavguh_128B,                    // llvm.hexagon.V6.vavguh.128B
+    hexagon_V6_vavguhrnd,                      // llvm.hexagon.V6.vavguhrnd
+    hexagon_V6_vavguhrnd_128B,                 // llvm.hexagon.V6.vavguhrnd.128B
+    hexagon_V6_vavguw,                         // llvm.hexagon.V6.vavguw
+    hexagon_V6_vavguw_128B,                    // llvm.hexagon.V6.vavguw.128B
+    hexagon_V6_vavguwrnd,                      // llvm.hexagon.V6.vavguwrnd
+    hexagon_V6_vavguwrnd_128B,                 // llvm.hexagon.V6.vavguwrnd.128B
+    hexagon_V6_vavgw,                          // llvm.hexagon.V6.vavgw
+    hexagon_V6_vavgw_128B,                     // llvm.hexagon.V6.vavgw.128B
+    hexagon_V6_vavgwrnd,                       // llvm.hexagon.V6.vavgwrnd
+    hexagon_V6_vavgwrnd_128B,                  // llvm.hexagon.V6.vavgwrnd.128B
+    hexagon_V6_vcl0h,                          // llvm.hexagon.V6.vcl0h
+    hexagon_V6_vcl0h_128B,                     // llvm.hexagon.V6.vcl0h.128B
+    hexagon_V6_vcl0w,                          // llvm.hexagon.V6.vcl0w
+    hexagon_V6_vcl0w_128B,                     // llvm.hexagon.V6.vcl0w.128B
+    hexagon_V6_vcombine,                       // llvm.hexagon.V6.vcombine
+    hexagon_V6_vcombine_128B,                  // llvm.hexagon.V6.vcombine.128B
+    hexagon_V6_vd0,                            // llvm.hexagon.V6.vd0
+    hexagon_V6_vd0_128B,                       // llvm.hexagon.V6.vd0.128B
+    hexagon_V6_vdd0,                           // llvm.hexagon.V6.vdd0
+    hexagon_V6_vdd0_128B,                      // llvm.hexagon.V6.vdd0.128B
+    hexagon_V6_vdealb,                         // llvm.hexagon.V6.vdealb
+    hexagon_V6_vdealb_128B,                    // llvm.hexagon.V6.vdealb.128B
+    hexagon_V6_vdealb4w,                       // llvm.hexagon.V6.vdealb4w
+    hexagon_V6_vdealb4w_128B,                  // llvm.hexagon.V6.vdealb4w.128B
+    hexagon_V6_vdealh,                         // llvm.hexagon.V6.vdealh
+    hexagon_V6_vdealh_128B,                    // llvm.hexagon.V6.vdealh.128B
+    hexagon_V6_vdealvdd,                       // llvm.hexagon.V6.vdealvdd
+    hexagon_V6_vdealvdd_128B,                  // llvm.hexagon.V6.vdealvdd.128B
+    hexagon_V6_vdelta,                         // llvm.hexagon.V6.vdelta
+    hexagon_V6_vdelta_128B,                    // llvm.hexagon.V6.vdelta.128B
+    hexagon_V6_vdmpybus,                       // llvm.hexagon.V6.vdmpybus
+    hexagon_V6_vdmpybus_128B,                  // llvm.hexagon.V6.vdmpybus.128B
+    hexagon_V6_vdmpybus_acc,                   // llvm.hexagon.V6.vdmpybus.acc
+    hexagon_V6_vdmpybus_acc_128B,              // llvm.hexagon.V6.vdmpybus.acc.128B
+    hexagon_V6_vdmpybus_dv,                    // llvm.hexagon.V6.vdmpybus.dv
+    hexagon_V6_vdmpybus_dv_128B,               // llvm.hexagon.V6.vdmpybus.dv.128B
+    hexagon_V6_vdmpybus_dv_acc,                // llvm.hexagon.V6.vdmpybus.dv.acc
+    hexagon_V6_vdmpybus_dv_acc_128B,           // llvm.hexagon.V6.vdmpybus.dv.acc.128B
+    hexagon_V6_vdmpyhb,                        // llvm.hexagon.V6.vdmpyhb
+    hexagon_V6_vdmpyhb_128B,                   // llvm.hexagon.V6.vdmpyhb.128B
+    hexagon_V6_vdmpyhb_acc,                    // llvm.hexagon.V6.vdmpyhb.acc
+    hexagon_V6_vdmpyhb_acc_128B,               // llvm.hexagon.V6.vdmpyhb.acc.128B
+    hexagon_V6_vdmpyhb_dv,                     // llvm.hexagon.V6.vdmpyhb.dv
+    hexagon_V6_vdmpyhb_dv_128B,                // llvm.hexagon.V6.vdmpyhb.dv.128B
+    hexagon_V6_vdmpyhb_dv_acc,                 // llvm.hexagon.V6.vdmpyhb.dv.acc
+    hexagon_V6_vdmpyhb_dv_acc_128B,            // llvm.hexagon.V6.vdmpyhb.dv.acc.128B
+    hexagon_V6_vdmpyhisat,                     // llvm.hexagon.V6.vdmpyhisat
+    hexagon_V6_vdmpyhisat_128B,                // llvm.hexagon.V6.vdmpyhisat.128B
+    hexagon_V6_vdmpyhisat_acc,                 // llvm.hexagon.V6.vdmpyhisat.acc
+    hexagon_V6_vdmpyhisat_acc_128B,            // llvm.hexagon.V6.vdmpyhisat.acc.128B
+    hexagon_V6_vdmpyhsat,                      // llvm.hexagon.V6.vdmpyhsat
+    hexagon_V6_vdmpyhsat_128B,                 // llvm.hexagon.V6.vdmpyhsat.128B
+    hexagon_V6_vdmpyhsat_acc,                  // llvm.hexagon.V6.vdmpyhsat.acc
+    hexagon_V6_vdmpyhsat_acc_128B,             // llvm.hexagon.V6.vdmpyhsat.acc.128B
+    hexagon_V6_vdmpyhsuisat,                   // llvm.hexagon.V6.vdmpyhsuisat
+    hexagon_V6_vdmpyhsuisat_128B,              // llvm.hexagon.V6.vdmpyhsuisat.128B
+    hexagon_V6_vdmpyhsuisat_acc,               // llvm.hexagon.V6.vdmpyhsuisat.acc
+    hexagon_V6_vdmpyhsuisat_acc_128B,          // llvm.hexagon.V6.vdmpyhsuisat.acc.128B
+    hexagon_V6_vdmpyhsusat,                    // llvm.hexagon.V6.vdmpyhsusat
+    hexagon_V6_vdmpyhsusat_128B,               // llvm.hexagon.V6.vdmpyhsusat.128B
+    hexagon_V6_vdmpyhsusat_acc,                // llvm.hexagon.V6.vdmpyhsusat.acc
+    hexagon_V6_vdmpyhsusat_acc_128B,           // llvm.hexagon.V6.vdmpyhsusat.acc.128B
+    hexagon_V6_vdmpyhvsat,                     // llvm.hexagon.V6.vdmpyhvsat
+    hexagon_V6_vdmpyhvsat_128B,                // llvm.hexagon.V6.vdmpyhvsat.128B
+    hexagon_V6_vdmpyhvsat_acc,                 // llvm.hexagon.V6.vdmpyhvsat.acc
+    hexagon_V6_vdmpyhvsat_acc_128B,            // llvm.hexagon.V6.vdmpyhvsat.acc.128B
+    hexagon_V6_vdsaduh,                        // llvm.hexagon.V6.vdsaduh
+    hexagon_V6_vdsaduh_128B,                   // llvm.hexagon.V6.vdsaduh.128B
+    hexagon_V6_vdsaduh_acc,                    // llvm.hexagon.V6.vdsaduh.acc
+    hexagon_V6_vdsaduh_acc_128B,               // llvm.hexagon.V6.vdsaduh.acc.128B
+    hexagon_V6_veqb,                           // llvm.hexagon.V6.veqb
+    hexagon_V6_veqb_128B,                      // llvm.hexagon.V6.veqb.128B
+    hexagon_V6_veqb_and,                       // llvm.hexagon.V6.veqb.and
+    hexagon_V6_veqb_and_128B,                  // llvm.hexagon.V6.veqb.and.128B
+    hexagon_V6_veqb_or,                        // llvm.hexagon.V6.veqb.or
+    hexagon_V6_veqb_or_128B,                   // llvm.hexagon.V6.veqb.or.128B
+    hexagon_V6_veqb_xor,                       // llvm.hexagon.V6.veqb.xor
+    hexagon_V6_veqb_xor_128B,                  // llvm.hexagon.V6.veqb.xor.128B
+    hexagon_V6_veqh,                           // llvm.hexagon.V6.veqh
+    hexagon_V6_veqh_128B,                      // llvm.hexagon.V6.veqh.128B
+    hexagon_V6_veqh_and,                       // llvm.hexagon.V6.veqh.and
+    hexagon_V6_veqh_and_128B,                  // llvm.hexagon.V6.veqh.and.128B
+    hexagon_V6_veqh_or,                        // llvm.hexagon.V6.veqh.or
+    hexagon_V6_veqh_or_128B,                   // llvm.hexagon.V6.veqh.or.128B
+    hexagon_V6_veqh_xor,                       // llvm.hexagon.V6.veqh.xor
+    hexagon_V6_veqh_xor_128B,                  // llvm.hexagon.V6.veqh.xor.128B
+    hexagon_V6_veqw,                           // llvm.hexagon.V6.veqw
+    hexagon_V6_veqw_128B,                      // llvm.hexagon.V6.veqw.128B
+    hexagon_V6_veqw_and,                       // llvm.hexagon.V6.veqw.and
+    hexagon_V6_veqw_and_128B,                  // llvm.hexagon.V6.veqw.and.128B
+    hexagon_V6_veqw_or,                        // llvm.hexagon.V6.veqw.or
+    hexagon_V6_veqw_or_128B,                   // llvm.hexagon.V6.veqw.or.128B
+    hexagon_V6_veqw_xor,                       // llvm.hexagon.V6.veqw.xor
+    hexagon_V6_veqw_xor_128B,                  // llvm.hexagon.V6.veqw.xor.128B
+    hexagon_V6_vgathermh,                      // llvm.hexagon.V6.vgathermh
+    hexagon_V6_vgathermh_128B,                 // llvm.hexagon.V6.vgathermh.128B
+    hexagon_V6_vgathermhq,                     // llvm.hexagon.V6.vgathermhq
+    hexagon_V6_vgathermhq_128B,                // llvm.hexagon.V6.vgathermhq.128B
+    hexagon_V6_vgathermhw,                     // llvm.hexagon.V6.vgathermhw
+    hexagon_V6_vgathermhw_128B,                // llvm.hexagon.V6.vgathermhw.128B
+    hexagon_V6_vgathermhwq,                    // llvm.hexagon.V6.vgathermhwq
+    hexagon_V6_vgathermhwq_128B,               // llvm.hexagon.V6.vgathermhwq.128B
+    hexagon_V6_vgathermw,                      // llvm.hexagon.V6.vgathermw
+    hexagon_V6_vgathermw_128B,                 // llvm.hexagon.V6.vgathermw.128B
+    hexagon_V6_vgathermwq,                     // llvm.hexagon.V6.vgathermwq
+    hexagon_V6_vgathermwq_128B,                // llvm.hexagon.V6.vgathermwq.128B
+    hexagon_V6_vgtb,                           // llvm.hexagon.V6.vgtb
+    hexagon_V6_vgtb_128B,                      // llvm.hexagon.V6.vgtb.128B
+    hexagon_V6_vgtb_and,                       // llvm.hexagon.V6.vgtb.and
+    hexagon_V6_vgtb_and_128B,                  // llvm.hexagon.V6.vgtb.and.128B
+    hexagon_V6_vgtb_or,                        // llvm.hexagon.V6.vgtb.or
+    hexagon_V6_vgtb_or_128B,                   // llvm.hexagon.V6.vgtb.or.128B
+    hexagon_V6_vgtb_xor,                       // llvm.hexagon.V6.vgtb.xor
+    hexagon_V6_vgtb_xor_128B,                  // llvm.hexagon.V6.vgtb.xor.128B
+    hexagon_V6_vgth,                           // llvm.hexagon.V6.vgth
+    hexagon_V6_vgth_128B,                      // llvm.hexagon.V6.vgth.128B
+    hexagon_V6_vgth_and,                       // llvm.hexagon.V6.vgth.and
+    hexagon_V6_vgth_and_128B,                  // llvm.hexagon.V6.vgth.and.128B
+    hexagon_V6_vgth_or,                        // llvm.hexagon.V6.vgth.or
+    hexagon_V6_vgth_or_128B,                   // llvm.hexagon.V6.vgth.or.128B
+    hexagon_V6_vgth_xor,                       // llvm.hexagon.V6.vgth.xor
+    hexagon_V6_vgth_xor_128B,                  // llvm.hexagon.V6.vgth.xor.128B
+    hexagon_V6_vgtub,                          // llvm.hexagon.V6.vgtub
+    hexagon_V6_vgtub_128B,                     // llvm.hexagon.V6.vgtub.128B
+    hexagon_V6_vgtub_and,                      // llvm.hexagon.V6.vgtub.and
+    hexagon_V6_vgtub_and_128B,                 // llvm.hexagon.V6.vgtub.and.128B
+    hexagon_V6_vgtub_or,                       // llvm.hexagon.V6.vgtub.or
+    hexagon_V6_vgtub_or_128B,                  // llvm.hexagon.V6.vgtub.or.128B
+    hexagon_V6_vgtub_xor,                      // llvm.hexagon.V6.vgtub.xor
+    hexagon_V6_vgtub_xor_128B,                 // llvm.hexagon.V6.vgtub.xor.128B
+    hexagon_V6_vgtuh,                          // llvm.hexagon.V6.vgtuh
+    hexagon_V6_vgtuh_128B,                     // llvm.hexagon.V6.vgtuh.128B
+    hexagon_V6_vgtuh_and,                      // llvm.hexagon.V6.vgtuh.and
+    hexagon_V6_vgtuh_and_128B,                 // llvm.hexagon.V6.vgtuh.and.128B
+    hexagon_V6_vgtuh_or,                       // llvm.hexagon.V6.vgtuh.or
+    hexagon_V6_vgtuh_or_128B,                  // llvm.hexagon.V6.vgtuh.or.128B
+    hexagon_V6_vgtuh_xor,                      // llvm.hexagon.V6.vgtuh.xor
+    hexagon_V6_vgtuh_xor_128B,                 // llvm.hexagon.V6.vgtuh.xor.128B
+    hexagon_V6_vgtuw,                          // llvm.hexagon.V6.vgtuw
+    hexagon_V6_vgtuw_128B,                     // llvm.hexagon.V6.vgtuw.128B
+    hexagon_V6_vgtuw_and,                      // llvm.hexagon.V6.vgtuw.and
+    hexagon_V6_vgtuw_and_128B,                 // llvm.hexagon.V6.vgtuw.and.128B
+    hexagon_V6_vgtuw_or,                       // llvm.hexagon.V6.vgtuw.or
+    hexagon_V6_vgtuw_or_128B,                  // llvm.hexagon.V6.vgtuw.or.128B
+    hexagon_V6_vgtuw_xor,                      // llvm.hexagon.V6.vgtuw.xor
+    hexagon_V6_vgtuw_xor_128B,                 // llvm.hexagon.V6.vgtuw.xor.128B
+    hexagon_V6_vgtw,                           // llvm.hexagon.V6.vgtw
+    hexagon_V6_vgtw_128B,                      // llvm.hexagon.V6.vgtw.128B
+    hexagon_V6_vgtw_and,                       // llvm.hexagon.V6.vgtw.and
+    hexagon_V6_vgtw_and_128B,                  // llvm.hexagon.V6.vgtw.and.128B
+    hexagon_V6_vgtw_or,                        // llvm.hexagon.V6.vgtw.or
+    hexagon_V6_vgtw_or_128B,                   // llvm.hexagon.V6.vgtw.or.128B
+    hexagon_V6_vgtw_xor,                       // llvm.hexagon.V6.vgtw.xor
+    hexagon_V6_vgtw_xor_128B,                  // llvm.hexagon.V6.vgtw.xor.128B
+    hexagon_V6_vinsertwr,                      // llvm.hexagon.V6.vinsertwr
+    hexagon_V6_vinsertwr_128B,                 // llvm.hexagon.V6.vinsertwr.128B
+    hexagon_V6_vlalignb,                       // llvm.hexagon.V6.vlalignb
+    hexagon_V6_vlalignb_128B,                  // llvm.hexagon.V6.vlalignb.128B
+    hexagon_V6_vlalignbi,                      // llvm.hexagon.V6.vlalignbi
+    hexagon_V6_vlalignbi_128B,                 // llvm.hexagon.V6.vlalignbi.128B
+    hexagon_V6_vlsrb,                          // llvm.hexagon.V6.vlsrb
+    hexagon_V6_vlsrb_128B,                     // llvm.hexagon.V6.vlsrb.128B
+    hexagon_V6_vlsrh,                          // llvm.hexagon.V6.vlsrh
+    hexagon_V6_vlsrh_128B,                     // llvm.hexagon.V6.vlsrh.128B
+    hexagon_V6_vlsrhv,                         // llvm.hexagon.V6.vlsrhv
+    hexagon_V6_vlsrhv_128B,                    // llvm.hexagon.V6.vlsrhv.128B
+    hexagon_V6_vlsrw,                          // llvm.hexagon.V6.vlsrw
+    hexagon_V6_vlsrw_128B,                     // llvm.hexagon.V6.vlsrw.128B
+    hexagon_V6_vlsrwv,                         // llvm.hexagon.V6.vlsrwv
+    hexagon_V6_vlsrwv_128B,                    // llvm.hexagon.V6.vlsrwv.128B
+    hexagon_V6_vlut4,                          // llvm.hexagon.V6.vlut4
+    hexagon_V6_vlut4_128B,                     // llvm.hexagon.V6.vlut4.128B
+    hexagon_V6_vlutvvb,                        // llvm.hexagon.V6.vlutvvb
+    hexagon_V6_vlutvvb_128B,                   // llvm.hexagon.V6.vlutvvb.128B
+    hexagon_V6_vlutvvb_nm,                     // llvm.hexagon.V6.vlutvvb.nm
+    hexagon_V6_vlutvvb_nm_128B,                // llvm.hexagon.V6.vlutvvb.nm.128B
+    hexagon_V6_vlutvvb_oracc,                  // llvm.hexagon.V6.vlutvvb.oracc
+    hexagon_V6_vlutvvb_oracc_128B,             // llvm.hexagon.V6.vlutvvb.oracc.128B
+    hexagon_V6_vlutvvb_oracci,                 // llvm.hexagon.V6.vlutvvb.oracci
+    hexagon_V6_vlutvvb_oracci_128B,            // llvm.hexagon.V6.vlutvvb.oracci.128B
+    hexagon_V6_vlutvvbi,                       // llvm.hexagon.V6.vlutvvbi
+    hexagon_V6_vlutvvbi_128B,                  // llvm.hexagon.V6.vlutvvbi.128B
+    hexagon_V6_vlutvwh,                        // llvm.hexagon.V6.vlutvwh
+    hexagon_V6_vlutvwh_128B,                   // llvm.hexagon.V6.vlutvwh.128B
+    hexagon_V6_vlutvwh_nm,                     // llvm.hexagon.V6.vlutvwh.nm
+    hexagon_V6_vlutvwh_nm_128B,                // llvm.hexagon.V6.vlutvwh.nm.128B
+    hexagon_V6_vlutvwh_oracc,                  // llvm.hexagon.V6.vlutvwh.oracc
+    hexagon_V6_vlutvwh_oracc_128B,             // llvm.hexagon.V6.vlutvwh.oracc.128B
+    hexagon_V6_vlutvwh_oracci,                 // llvm.hexagon.V6.vlutvwh.oracci
+    hexagon_V6_vlutvwh_oracci_128B,            // llvm.hexagon.V6.vlutvwh.oracci.128B
+    hexagon_V6_vlutvwhi,                       // llvm.hexagon.V6.vlutvwhi
+    hexagon_V6_vlutvwhi_128B,                  // llvm.hexagon.V6.vlutvwhi.128B
+    hexagon_V6_vmaskedstorenq,                 // llvm.hexagon.V6.vmaskedstorenq
+    hexagon_V6_vmaskedstorenq_128B,            // llvm.hexagon.V6.vmaskedstorenq.128B
+    hexagon_V6_vmaskedstorentnq,               // llvm.hexagon.V6.vmaskedstorentnq
+    hexagon_V6_vmaskedstorentnq_128B,          // llvm.hexagon.V6.vmaskedstorentnq.128B
+    hexagon_V6_vmaskedstorentq,                // llvm.hexagon.V6.vmaskedstorentq
+    hexagon_V6_vmaskedstorentq_128B,           // llvm.hexagon.V6.vmaskedstorentq.128B
+    hexagon_V6_vmaskedstoreq,                  // llvm.hexagon.V6.vmaskedstoreq
+    hexagon_V6_vmaskedstoreq_128B,             // llvm.hexagon.V6.vmaskedstoreq.128B
+    hexagon_V6_vmaxb,                          // llvm.hexagon.V6.vmaxb
+    hexagon_V6_vmaxb_128B,                     // llvm.hexagon.V6.vmaxb.128B
+    hexagon_V6_vmaxh,                          // llvm.hexagon.V6.vmaxh
+    hexagon_V6_vmaxh_128B,                     // llvm.hexagon.V6.vmaxh.128B
+    hexagon_V6_vmaxub,                         // llvm.hexagon.V6.vmaxub
+    hexagon_V6_vmaxub_128B,                    // llvm.hexagon.V6.vmaxub.128B
+    hexagon_V6_vmaxuh,                         // llvm.hexagon.V6.vmaxuh
+    hexagon_V6_vmaxuh_128B,                    // llvm.hexagon.V6.vmaxuh.128B
+    hexagon_V6_vmaxw,                          // llvm.hexagon.V6.vmaxw
+    hexagon_V6_vmaxw_128B,                     // llvm.hexagon.V6.vmaxw.128B
+    hexagon_V6_vminb,                          // llvm.hexagon.V6.vminb
+    hexagon_V6_vminb_128B,                     // llvm.hexagon.V6.vminb.128B
+    hexagon_V6_vminh,                          // llvm.hexagon.V6.vminh
+    hexagon_V6_vminh_128B,                     // llvm.hexagon.V6.vminh.128B
+    hexagon_V6_vminub,                         // llvm.hexagon.V6.vminub
+    hexagon_V6_vminub_128B,                    // llvm.hexagon.V6.vminub.128B
+    hexagon_V6_vminuh,                         // llvm.hexagon.V6.vminuh
+    hexagon_V6_vminuh_128B,                    // llvm.hexagon.V6.vminuh.128B
+    hexagon_V6_vminw,                          // llvm.hexagon.V6.vminw
+    hexagon_V6_vminw_128B,                     // llvm.hexagon.V6.vminw.128B
+    hexagon_V6_vmpabus,                        // llvm.hexagon.V6.vmpabus
+    hexagon_V6_vmpabus_128B,                   // llvm.hexagon.V6.vmpabus.128B
+    hexagon_V6_vmpabus_acc,                    // llvm.hexagon.V6.vmpabus.acc
+    hexagon_V6_vmpabus_acc_128B,               // llvm.hexagon.V6.vmpabus.acc.128B
+    hexagon_V6_vmpabusv,                       // llvm.hexagon.V6.vmpabusv
+    hexagon_V6_vmpabusv_128B,                  // llvm.hexagon.V6.vmpabusv.128B
+    hexagon_V6_vmpabuu,                        // llvm.hexagon.V6.vmpabuu
+    hexagon_V6_vmpabuu_128B,                   // llvm.hexagon.V6.vmpabuu.128B
+    hexagon_V6_vmpabuu_acc,                    // llvm.hexagon.V6.vmpabuu.acc
+    hexagon_V6_vmpabuu_acc_128B,               // llvm.hexagon.V6.vmpabuu.acc.128B
+    hexagon_V6_vmpabuuv,                       // llvm.hexagon.V6.vmpabuuv
+    hexagon_V6_vmpabuuv_128B,                  // llvm.hexagon.V6.vmpabuuv.128B
+    hexagon_V6_vmpahb,                         // llvm.hexagon.V6.vmpahb
+    hexagon_V6_vmpahb_128B,                    // llvm.hexagon.V6.vmpahb.128B
+    hexagon_V6_vmpahb_acc,                     // llvm.hexagon.V6.vmpahb.acc
+    hexagon_V6_vmpahb_acc_128B,                // llvm.hexagon.V6.vmpahb.acc.128B
+    hexagon_V6_vmpahhsat,                      // llvm.hexagon.V6.vmpahhsat
+    hexagon_V6_vmpahhsat_128B,                 // llvm.hexagon.V6.vmpahhsat.128B
+    hexagon_V6_vmpauhb,                        // llvm.hexagon.V6.vmpauhb
+    hexagon_V6_vmpauhb_128B,                   // llvm.hexagon.V6.vmpauhb.128B
+    hexagon_V6_vmpauhb_acc,                    // llvm.hexagon.V6.vmpauhb.acc
+    hexagon_V6_vmpauhb_acc_128B,               // llvm.hexagon.V6.vmpauhb.acc.128B
+    hexagon_V6_vmpauhuhsat,                    // llvm.hexagon.V6.vmpauhuhsat
+    hexagon_V6_vmpauhuhsat_128B,               // llvm.hexagon.V6.vmpauhuhsat.128B
+    hexagon_V6_vmpsuhuhsat,                    // llvm.hexagon.V6.vmpsuhuhsat
+    hexagon_V6_vmpsuhuhsat_128B,               // llvm.hexagon.V6.vmpsuhuhsat.128B
+    hexagon_V6_vmpybus,                        // llvm.hexagon.V6.vmpybus
+    hexagon_V6_vmpybus_128B,                   // llvm.hexagon.V6.vmpybus.128B
+    hexagon_V6_vmpybus_acc,                    // llvm.hexagon.V6.vmpybus.acc
+    hexagon_V6_vmpybus_acc_128B,               // llvm.hexagon.V6.vmpybus.acc.128B
+    hexagon_V6_vmpybusv,                       // llvm.hexagon.V6.vmpybusv
+    hexagon_V6_vmpybusv_128B,                  // llvm.hexagon.V6.vmpybusv.128B
+    hexagon_V6_vmpybusv_acc,                   // llvm.hexagon.V6.vmpybusv.acc
+    hexagon_V6_vmpybusv_acc_128B,              // llvm.hexagon.V6.vmpybusv.acc.128B
+    hexagon_V6_vmpybv,                         // llvm.hexagon.V6.vmpybv
+    hexagon_V6_vmpybv_128B,                    // llvm.hexagon.V6.vmpybv.128B
+    hexagon_V6_vmpybv_acc,                     // llvm.hexagon.V6.vmpybv.acc
+    hexagon_V6_vmpybv_acc_128B,                // llvm.hexagon.V6.vmpybv.acc.128B
+    hexagon_V6_vmpyewuh,                       // llvm.hexagon.V6.vmpyewuh
+    hexagon_V6_vmpyewuh_128B,                  // llvm.hexagon.V6.vmpyewuh.128B
+    hexagon_V6_vmpyewuh_64,                    // llvm.hexagon.V6.vmpyewuh.64
+    hexagon_V6_vmpyewuh_64_128B,               // llvm.hexagon.V6.vmpyewuh.64.128B
+    hexagon_V6_vmpyh,                          // llvm.hexagon.V6.vmpyh
+    hexagon_V6_vmpyh_128B,                     // llvm.hexagon.V6.vmpyh.128B
+    hexagon_V6_vmpyh_acc,                      // llvm.hexagon.V6.vmpyh.acc
+    hexagon_V6_vmpyh_acc_128B,                 // llvm.hexagon.V6.vmpyh.acc.128B
+    hexagon_V6_vmpyhsat_acc,                   // llvm.hexagon.V6.vmpyhsat.acc
+    hexagon_V6_vmpyhsat_acc_128B,              // llvm.hexagon.V6.vmpyhsat.acc.128B
+    hexagon_V6_vmpyhsrs,                       // llvm.hexagon.V6.vmpyhsrs
+    hexagon_V6_vmpyhsrs_128B,                  // llvm.hexagon.V6.vmpyhsrs.128B
+    hexagon_V6_vmpyhss,                        // llvm.hexagon.V6.vmpyhss
+    hexagon_V6_vmpyhss_128B,                   // llvm.hexagon.V6.vmpyhss.128B
+    hexagon_V6_vmpyhus,                        // llvm.hexagon.V6.vmpyhus
+    hexagon_V6_vmpyhus_128B,                   // llvm.hexagon.V6.vmpyhus.128B
+    hexagon_V6_vmpyhus_acc,                    // llvm.hexagon.V6.vmpyhus.acc
+    hexagon_V6_vmpyhus_acc_128B,               // llvm.hexagon.V6.vmpyhus.acc.128B
+    hexagon_V6_vmpyhv,                         // llvm.hexagon.V6.vmpyhv
+    hexagon_V6_vmpyhv_128B,                    // llvm.hexagon.V6.vmpyhv.128B
+    hexagon_V6_vmpyhv_acc,                     // llvm.hexagon.V6.vmpyhv.acc
+    hexagon_V6_vmpyhv_acc_128B,                // llvm.hexagon.V6.vmpyhv.acc.128B
+    hexagon_V6_vmpyhvsrs,                      // llvm.hexagon.V6.vmpyhvsrs
+    hexagon_V6_vmpyhvsrs_128B,                 // llvm.hexagon.V6.vmpyhvsrs.128B
+    hexagon_V6_vmpyieoh,                       // llvm.hexagon.V6.vmpyieoh
+    hexagon_V6_vmpyieoh_128B,                  // llvm.hexagon.V6.vmpyieoh.128B
+    hexagon_V6_vmpyiewh_acc,                   // llvm.hexagon.V6.vmpyiewh.acc
+    hexagon_V6_vmpyiewh_acc_128B,              // llvm.hexagon.V6.vmpyiewh.acc.128B
+    hexagon_V6_vmpyiewuh,                      // llvm.hexagon.V6.vmpyiewuh
+    hexagon_V6_vmpyiewuh_128B,                 // llvm.hexagon.V6.vmpyiewuh.128B
+    hexagon_V6_vmpyiewuh_acc,                  // llvm.hexagon.V6.vmpyiewuh.acc
+    hexagon_V6_vmpyiewuh_acc_128B,             // llvm.hexagon.V6.vmpyiewuh.acc.128B
+    hexagon_V6_vmpyih,                         // llvm.hexagon.V6.vmpyih
+    hexagon_V6_vmpyih_128B,                    // llvm.hexagon.V6.vmpyih.128B
+    hexagon_V6_vmpyih_acc,                     // llvm.hexagon.V6.vmpyih.acc
+    hexagon_V6_vmpyih_acc_128B,                // llvm.hexagon.V6.vmpyih.acc.128B
+    hexagon_V6_vmpyihb,                        // llvm.hexagon.V6.vmpyihb
+    hexagon_V6_vmpyihb_128B,                   // llvm.hexagon.V6.vmpyihb.128B
+    hexagon_V6_vmpyihb_acc,                    // llvm.hexagon.V6.vmpyihb.acc
+    hexagon_V6_vmpyihb_acc_128B,               // llvm.hexagon.V6.vmpyihb.acc.128B
+    hexagon_V6_vmpyiowh,                       // llvm.hexagon.V6.vmpyiowh
+    hexagon_V6_vmpyiowh_128B,                  // llvm.hexagon.V6.vmpyiowh.128B
+    hexagon_V6_vmpyiwb,                        // llvm.hexagon.V6.vmpyiwb
+    hexagon_V6_vmpyiwb_128B,                   // llvm.hexagon.V6.vmpyiwb.128B
+    hexagon_V6_vmpyiwb_acc,                    // llvm.hexagon.V6.vmpyiwb.acc
+    hexagon_V6_vmpyiwb_acc_128B,               // llvm.hexagon.V6.vmpyiwb.acc.128B
+    hexagon_V6_vmpyiwh,                        // llvm.hexagon.V6.vmpyiwh
+    hexagon_V6_vmpyiwh_128B,                   // llvm.hexagon.V6.vmpyiwh.128B
+    hexagon_V6_vmpyiwh_acc,                    // llvm.hexagon.V6.vmpyiwh.acc
+    hexagon_V6_vmpyiwh_acc_128B,               // llvm.hexagon.V6.vmpyiwh.acc.128B
+    hexagon_V6_vmpyiwub,                       // llvm.hexagon.V6.vmpyiwub
+    hexagon_V6_vmpyiwub_128B,                  // llvm.hexagon.V6.vmpyiwub.128B
+    hexagon_V6_vmpyiwub_acc,                   // llvm.hexagon.V6.vmpyiwub.acc
+    hexagon_V6_vmpyiwub_acc_128B,              // llvm.hexagon.V6.vmpyiwub.acc.128B
+    hexagon_V6_vmpyowh,                        // llvm.hexagon.V6.vmpyowh
+    hexagon_V6_vmpyowh_128B,                   // llvm.hexagon.V6.vmpyowh.128B
+    hexagon_V6_vmpyowh_64_acc,                 // llvm.hexagon.V6.vmpyowh.64.acc
+    hexagon_V6_vmpyowh_64_acc_128B,            // llvm.hexagon.V6.vmpyowh.64.acc.128B
+    hexagon_V6_vmpyowh_rnd,                    // llvm.hexagon.V6.vmpyowh.rnd
+    hexagon_V6_vmpyowh_rnd_128B,               // llvm.hexagon.V6.vmpyowh.rnd.128B
+    hexagon_V6_vmpyowh_rnd_sacc,               // llvm.hexagon.V6.vmpyowh.rnd.sacc
+    hexagon_V6_vmpyowh_rnd_sacc_128B,          // llvm.hexagon.V6.vmpyowh.rnd.sacc.128B
+    hexagon_V6_vmpyowh_sacc,                   // llvm.hexagon.V6.vmpyowh.sacc
+    hexagon_V6_vmpyowh_sacc_128B,              // llvm.hexagon.V6.vmpyowh.sacc.128B
+    hexagon_V6_vmpyub,                         // llvm.hexagon.V6.vmpyub
+    hexagon_V6_vmpyub_128B,                    // llvm.hexagon.V6.vmpyub.128B
+    hexagon_V6_vmpyub_acc,                     // llvm.hexagon.V6.vmpyub.acc
+    hexagon_V6_vmpyub_acc_128B,                // llvm.hexagon.V6.vmpyub.acc.128B
+    hexagon_V6_vmpyubv,                        // llvm.hexagon.V6.vmpyubv
+    hexagon_V6_vmpyubv_128B,                   // llvm.hexagon.V6.vmpyubv.128B
+    hexagon_V6_vmpyubv_acc,                    // llvm.hexagon.V6.vmpyubv.acc
+    hexagon_V6_vmpyubv_acc_128B,               // llvm.hexagon.V6.vmpyubv.acc.128B
+    hexagon_V6_vmpyuh,                         // llvm.hexagon.V6.vmpyuh
+    hexagon_V6_vmpyuh_128B,                    // llvm.hexagon.V6.vmpyuh.128B
+    hexagon_V6_vmpyuh_acc,                     // llvm.hexagon.V6.vmpyuh.acc
+    hexagon_V6_vmpyuh_acc_128B,                // llvm.hexagon.V6.vmpyuh.acc.128B
+    hexagon_V6_vmpyuhe,                        // llvm.hexagon.V6.vmpyuhe
+    hexagon_V6_vmpyuhe_128B,                   // llvm.hexagon.V6.vmpyuhe.128B
+    hexagon_V6_vmpyuhe_acc,                    // llvm.hexagon.V6.vmpyuhe.acc
+    hexagon_V6_vmpyuhe_acc_128B,               // llvm.hexagon.V6.vmpyuhe.acc.128B
+    hexagon_V6_vmpyuhv,                        // llvm.hexagon.V6.vmpyuhv
+    hexagon_V6_vmpyuhv_128B,                   // llvm.hexagon.V6.vmpyuhv.128B
+    hexagon_V6_vmpyuhv_acc,                    // llvm.hexagon.V6.vmpyuhv.acc
+    hexagon_V6_vmpyuhv_acc_128B,               // llvm.hexagon.V6.vmpyuhv.acc.128B
+    hexagon_V6_vmux,                           // llvm.hexagon.V6.vmux
+    hexagon_V6_vmux_128B,                      // llvm.hexagon.V6.vmux.128B
+    hexagon_V6_vnavgb,                         // llvm.hexagon.V6.vnavgb
+    hexagon_V6_vnavgb_128B,                    // llvm.hexagon.V6.vnavgb.128B
+    hexagon_V6_vnavgh,                         // llvm.hexagon.V6.vnavgh
+    hexagon_V6_vnavgh_128B,                    // llvm.hexagon.V6.vnavgh.128B
+    hexagon_V6_vnavgub,                        // llvm.hexagon.V6.vnavgub
+    hexagon_V6_vnavgub_128B,                   // llvm.hexagon.V6.vnavgub.128B
+    hexagon_V6_vnavgw,                         // llvm.hexagon.V6.vnavgw
+    hexagon_V6_vnavgw_128B,                    // llvm.hexagon.V6.vnavgw.128B
+    hexagon_V6_vnormamth,                      // llvm.hexagon.V6.vnormamth
+    hexagon_V6_vnormamth_128B,                 // llvm.hexagon.V6.vnormamth.128B
+    hexagon_V6_vnormamtw,                      // llvm.hexagon.V6.vnormamtw
+    hexagon_V6_vnormamtw_128B,                 // llvm.hexagon.V6.vnormamtw.128B
+    hexagon_V6_vnot,                           // llvm.hexagon.V6.vnot
+    hexagon_V6_vnot_128B,                      // llvm.hexagon.V6.vnot.128B
+    hexagon_V6_vor,                            // llvm.hexagon.V6.vor
+    hexagon_V6_vor_128B,                       // llvm.hexagon.V6.vor.128B
+    hexagon_V6_vpackeb,                        // llvm.hexagon.V6.vpackeb
+    hexagon_V6_vpackeb_128B,                   // llvm.hexagon.V6.vpackeb.128B
+    hexagon_V6_vpackeh,                        // llvm.hexagon.V6.vpackeh
+    hexagon_V6_vpackeh_128B,                   // llvm.hexagon.V6.vpackeh.128B
+    hexagon_V6_vpackhb_sat,                    // llvm.hexagon.V6.vpackhb.sat
+    hexagon_V6_vpackhb_sat_128B,               // llvm.hexagon.V6.vpackhb.sat.128B
+    hexagon_V6_vpackhub_sat,                   // llvm.hexagon.V6.vpackhub.sat
+    hexagon_V6_vpackhub_sat_128B,              // llvm.hexagon.V6.vpackhub.sat.128B
+    hexagon_V6_vpackob,                        // llvm.hexagon.V6.vpackob
+    hexagon_V6_vpackob_128B,                   // llvm.hexagon.V6.vpackob.128B
+    hexagon_V6_vpackoh,                        // llvm.hexagon.V6.vpackoh
+    hexagon_V6_vpackoh_128B,                   // llvm.hexagon.V6.vpackoh.128B
+    hexagon_V6_vpackwh_sat,                    // llvm.hexagon.V6.vpackwh.sat
+    hexagon_V6_vpackwh_sat_128B,               // llvm.hexagon.V6.vpackwh.sat.128B
+    hexagon_V6_vpackwuh_sat,                   // llvm.hexagon.V6.vpackwuh.sat
+    hexagon_V6_vpackwuh_sat_128B,              // llvm.hexagon.V6.vpackwuh.sat.128B
+    hexagon_V6_vpopcounth,                     // llvm.hexagon.V6.vpopcounth
+    hexagon_V6_vpopcounth_128B,                // llvm.hexagon.V6.vpopcounth.128B
+    hexagon_V6_vprefixqb,                      // llvm.hexagon.V6.vprefixqb
+    hexagon_V6_vprefixqb_128B,                 // llvm.hexagon.V6.vprefixqb.128B
+    hexagon_V6_vprefixqh,                      // llvm.hexagon.V6.vprefixqh
+    hexagon_V6_vprefixqh_128B,                 // llvm.hexagon.V6.vprefixqh.128B
+    hexagon_V6_vprefixqw,                      // llvm.hexagon.V6.vprefixqw
+    hexagon_V6_vprefixqw_128B,                 // llvm.hexagon.V6.vprefixqw.128B
+    hexagon_V6_vrdelta,                        // llvm.hexagon.V6.vrdelta
+    hexagon_V6_vrdelta_128B,                   // llvm.hexagon.V6.vrdelta.128B
+    hexagon_V6_vrmpybub_rtt,                   // llvm.hexagon.V6.vrmpybub.rtt
+    hexagon_V6_vrmpybub_rtt_128B,              // llvm.hexagon.V6.vrmpybub.rtt.128B
+    hexagon_V6_vrmpybub_rtt_acc,               // llvm.hexagon.V6.vrmpybub.rtt.acc
+    hexagon_V6_vrmpybub_rtt_acc_128B,          // llvm.hexagon.V6.vrmpybub.rtt.acc.128B
+    hexagon_V6_vrmpybus,                       // llvm.hexagon.V6.vrmpybus
+    hexagon_V6_vrmpybus_128B,                  // llvm.hexagon.V6.vrmpybus.128B
+    hexagon_V6_vrmpybus_acc,                   // llvm.hexagon.V6.vrmpybus.acc
+    hexagon_V6_vrmpybus_acc_128B,              // llvm.hexagon.V6.vrmpybus.acc.128B
+    hexagon_V6_vrmpybusi,                      // llvm.hexagon.V6.vrmpybusi
+    hexagon_V6_vrmpybusi_128B,                 // llvm.hexagon.V6.vrmpybusi.128B
+    hexagon_V6_vrmpybusi_acc,                  // llvm.hexagon.V6.vrmpybusi.acc
+    hexagon_V6_vrmpybusi_acc_128B,             // llvm.hexagon.V6.vrmpybusi.acc.128B
+    hexagon_V6_vrmpybusv,                      // llvm.hexagon.V6.vrmpybusv
+    hexagon_V6_vrmpybusv_128B,                 // llvm.hexagon.V6.vrmpybusv.128B
+    hexagon_V6_vrmpybusv_acc,                  // llvm.hexagon.V6.vrmpybusv.acc
+    hexagon_V6_vrmpybusv_acc_128B,             // llvm.hexagon.V6.vrmpybusv.acc.128B
+    hexagon_V6_vrmpybv,                        // llvm.hexagon.V6.vrmpybv
+    hexagon_V6_vrmpybv_128B,                   // llvm.hexagon.V6.vrmpybv.128B
+    hexagon_V6_vrmpybv_acc,                    // llvm.hexagon.V6.vrmpybv.acc
+    hexagon_V6_vrmpybv_acc_128B,               // llvm.hexagon.V6.vrmpybv.acc.128B
+    hexagon_V6_vrmpyub,                        // llvm.hexagon.V6.vrmpyub
+    hexagon_V6_vrmpyub_128B,                   // llvm.hexagon.V6.vrmpyub.128B
+    hexagon_V6_vrmpyub_acc,                    // llvm.hexagon.V6.vrmpyub.acc
+    hexagon_V6_vrmpyub_acc_128B,               // llvm.hexagon.V6.vrmpyub.acc.128B
+    hexagon_V6_vrmpyub_rtt,                    // llvm.hexagon.V6.vrmpyub.rtt
+    hexagon_V6_vrmpyub_rtt_128B,               // llvm.hexagon.V6.vrmpyub.rtt.128B
+    hexagon_V6_vrmpyub_rtt_acc,                // llvm.hexagon.V6.vrmpyub.rtt.acc
+    hexagon_V6_vrmpyub_rtt_acc_128B,           // llvm.hexagon.V6.vrmpyub.rtt.acc.128B
+    hexagon_V6_vrmpyubi,                       // llvm.hexagon.V6.vrmpyubi
+    hexagon_V6_vrmpyubi_128B,                  // llvm.hexagon.V6.vrmpyubi.128B
+    hexagon_V6_vrmpyubi_acc,                   // llvm.hexagon.V6.vrmpyubi.acc
+    hexagon_V6_vrmpyubi_acc_128B,              // llvm.hexagon.V6.vrmpyubi.acc.128B
+    hexagon_V6_vrmpyubv,                       // llvm.hexagon.V6.vrmpyubv
+    hexagon_V6_vrmpyubv_128B,                  // llvm.hexagon.V6.vrmpyubv.128B
+    hexagon_V6_vrmpyubv_acc,                   // llvm.hexagon.V6.vrmpyubv.acc
+    hexagon_V6_vrmpyubv_acc_128B,              // llvm.hexagon.V6.vrmpyubv.acc.128B
+    hexagon_V6_vror,                           // llvm.hexagon.V6.vror
+    hexagon_V6_vror_128B,                      // llvm.hexagon.V6.vror.128B
+    hexagon_V6_vroundhb,                       // llvm.hexagon.V6.vroundhb
+    hexagon_V6_vroundhb_128B,                  // llvm.hexagon.V6.vroundhb.128B
+    hexagon_V6_vroundhub,                      // llvm.hexagon.V6.vroundhub
+    hexagon_V6_vroundhub_128B,                 // llvm.hexagon.V6.vroundhub.128B
+    hexagon_V6_vrounduhub,                     // llvm.hexagon.V6.vrounduhub
+    hexagon_V6_vrounduhub_128B,                // llvm.hexagon.V6.vrounduhub.128B
+    hexagon_V6_vrounduwuh,                     // llvm.hexagon.V6.vrounduwuh
+    hexagon_V6_vrounduwuh_128B,                // llvm.hexagon.V6.vrounduwuh.128B
+    hexagon_V6_vroundwh,                       // llvm.hexagon.V6.vroundwh
+    hexagon_V6_vroundwh_128B,                  // llvm.hexagon.V6.vroundwh.128B
+    hexagon_V6_vroundwuh,                      // llvm.hexagon.V6.vroundwuh
+    hexagon_V6_vroundwuh_128B,                 // llvm.hexagon.V6.vroundwuh.128B
+    hexagon_V6_vrsadubi,                       // llvm.hexagon.V6.vrsadubi
+    hexagon_V6_vrsadubi_128B,                  // llvm.hexagon.V6.vrsadubi.128B
+    hexagon_V6_vrsadubi_acc,                   // llvm.hexagon.V6.vrsadubi.acc
+    hexagon_V6_vrsadubi_acc_128B,              // llvm.hexagon.V6.vrsadubi.acc.128B
+    hexagon_V6_vsathub,                        // llvm.hexagon.V6.vsathub
+    hexagon_V6_vsathub_128B,                   // llvm.hexagon.V6.vsathub.128B
+    hexagon_V6_vsatuwuh,                       // llvm.hexagon.V6.vsatuwuh
+    hexagon_V6_vsatuwuh_128B,                  // llvm.hexagon.V6.vsatuwuh.128B
+    hexagon_V6_vsatwh,                         // llvm.hexagon.V6.vsatwh
+    hexagon_V6_vsatwh_128B,                    // llvm.hexagon.V6.vsatwh.128B
+    hexagon_V6_vsb,                            // llvm.hexagon.V6.vsb
+    hexagon_V6_vsb_128B,                       // llvm.hexagon.V6.vsb.128B
+    hexagon_V6_vscattermh,                     // llvm.hexagon.V6.vscattermh
+    hexagon_V6_vscattermh_128B,                // llvm.hexagon.V6.vscattermh.128B
+    hexagon_V6_vscattermh_add,                 // llvm.hexagon.V6.vscattermh.add
+    hexagon_V6_vscattermh_add_128B,            // llvm.hexagon.V6.vscattermh.add.128B
+    hexagon_V6_vscattermhq,                    // llvm.hexagon.V6.vscattermhq
+    hexagon_V6_vscattermhq_128B,               // llvm.hexagon.V6.vscattermhq.128B
+    hexagon_V6_vscattermhw,                    // llvm.hexagon.V6.vscattermhw
+    hexagon_V6_vscattermhw_128B,               // llvm.hexagon.V6.vscattermhw.128B
+    hexagon_V6_vscattermhw_add,                // llvm.hexagon.V6.vscattermhw.add
+    hexagon_V6_vscattermhw_add_128B,           // llvm.hexagon.V6.vscattermhw.add.128B
+    hexagon_V6_vscattermhwq,                   // llvm.hexagon.V6.vscattermhwq
+    hexagon_V6_vscattermhwq_128B,              // llvm.hexagon.V6.vscattermhwq.128B
+    hexagon_V6_vscattermw,                     // llvm.hexagon.V6.vscattermw
+    hexagon_V6_vscattermw_128B,                // llvm.hexagon.V6.vscattermw.128B
+    hexagon_V6_vscattermw_add,                 // llvm.hexagon.V6.vscattermw.add
+    hexagon_V6_vscattermw_add_128B,            // llvm.hexagon.V6.vscattermw.add.128B
+    hexagon_V6_vscattermwq,                    // llvm.hexagon.V6.vscattermwq
+    hexagon_V6_vscattermwq_128B,               // llvm.hexagon.V6.vscattermwq.128B
+    hexagon_V6_vsh,                            // llvm.hexagon.V6.vsh
+    hexagon_V6_vsh_128B,                       // llvm.hexagon.V6.vsh.128B
+    hexagon_V6_vshufeh,                        // llvm.hexagon.V6.vshufeh
+    hexagon_V6_vshufeh_128B,                   // llvm.hexagon.V6.vshufeh.128B
+    hexagon_V6_vshuffb,                        // llvm.hexagon.V6.vshuffb
+    hexagon_V6_vshuffb_128B,                   // llvm.hexagon.V6.vshuffb.128B
+    hexagon_V6_vshuffeb,                       // llvm.hexagon.V6.vshuffeb
+    hexagon_V6_vshuffeb_128B,                  // llvm.hexagon.V6.vshuffeb.128B
+    hexagon_V6_vshuffh,                        // llvm.hexagon.V6.vshuffh
+    hexagon_V6_vshuffh_128B,                   // llvm.hexagon.V6.vshuffh.128B
+    hexagon_V6_vshuffob,                       // llvm.hexagon.V6.vshuffob
+    hexagon_V6_vshuffob_128B,                  // llvm.hexagon.V6.vshuffob.128B
+    hexagon_V6_vshuffvdd,                      // llvm.hexagon.V6.vshuffvdd
+    hexagon_V6_vshuffvdd_128B,                 // llvm.hexagon.V6.vshuffvdd.128B
+    hexagon_V6_vshufoeb,                       // llvm.hexagon.V6.vshufoeb
+    hexagon_V6_vshufoeb_128B,                  // llvm.hexagon.V6.vshufoeb.128B
+    hexagon_V6_vshufoeh,                       // llvm.hexagon.V6.vshufoeh
+    hexagon_V6_vshufoeh_128B,                  // llvm.hexagon.V6.vshufoeh.128B
+    hexagon_V6_vshufoh,                        // llvm.hexagon.V6.vshufoh
+    hexagon_V6_vshufoh_128B,                   // llvm.hexagon.V6.vshufoh.128B
+    hexagon_V6_vsubb,                          // llvm.hexagon.V6.vsubb
+    hexagon_V6_vsubb_128B,                     // llvm.hexagon.V6.vsubb.128B
+    hexagon_V6_vsubb_dv,                       // llvm.hexagon.V6.vsubb.dv
+    hexagon_V6_vsubb_dv_128B,                  // llvm.hexagon.V6.vsubb.dv.128B
+    hexagon_V6_vsubbnq,                        // llvm.hexagon.V6.vsubbnq
+    hexagon_V6_vsubbnq_128B,                   // llvm.hexagon.V6.vsubbnq.128B
+    hexagon_V6_vsubbq,                         // llvm.hexagon.V6.vsubbq
+    hexagon_V6_vsubbq_128B,                    // llvm.hexagon.V6.vsubbq.128B
+    hexagon_V6_vsubbsat,                       // llvm.hexagon.V6.vsubbsat
+    hexagon_V6_vsubbsat_128B,                  // llvm.hexagon.V6.vsubbsat.128B
+    hexagon_V6_vsubbsat_dv,                    // llvm.hexagon.V6.vsubbsat.dv
+    hexagon_V6_vsubbsat_dv_128B,               // llvm.hexagon.V6.vsubbsat.dv.128B
+    hexagon_V6_vsubcarry,                      // llvm.hexagon.V6.vsubcarry
+    hexagon_V6_vsubcarry_128B,                 // llvm.hexagon.V6.vsubcarry.128B
+    hexagon_V6_vsubh,                          // llvm.hexagon.V6.vsubh
+    hexagon_V6_vsubh_128B,                     // llvm.hexagon.V6.vsubh.128B
+    hexagon_V6_vsubh_dv,                       // llvm.hexagon.V6.vsubh.dv
+    hexagon_V6_vsubh_dv_128B,                  // llvm.hexagon.V6.vsubh.dv.128B
+    hexagon_V6_vsubhnq,                        // llvm.hexagon.V6.vsubhnq
+    hexagon_V6_vsubhnq_128B,                   // llvm.hexagon.V6.vsubhnq.128B
+    hexagon_V6_vsubhq,                         // llvm.hexagon.V6.vsubhq
+    hexagon_V6_vsubhq_128B,                    // llvm.hexagon.V6.vsubhq.128B
+    hexagon_V6_vsubhsat,                       // llvm.hexagon.V6.vsubhsat
+    hexagon_V6_vsubhsat_128B,                  // llvm.hexagon.V6.vsubhsat.128B
+    hexagon_V6_vsubhsat_dv,                    // llvm.hexagon.V6.vsubhsat.dv
+    hexagon_V6_vsubhsat_dv_128B,               // llvm.hexagon.V6.vsubhsat.dv.128B
+    hexagon_V6_vsubhw,                         // llvm.hexagon.V6.vsubhw
+    hexagon_V6_vsubhw_128B,                    // llvm.hexagon.V6.vsubhw.128B
+    hexagon_V6_vsububh,                        // llvm.hexagon.V6.vsububh
+    hexagon_V6_vsububh_128B,                   // llvm.hexagon.V6.vsububh.128B
+    hexagon_V6_vsububsat,                      // llvm.hexagon.V6.vsububsat
+    hexagon_V6_vsububsat_128B,                 // llvm.hexagon.V6.vsububsat.128B
+    hexagon_V6_vsububsat_dv,                   // llvm.hexagon.V6.vsububsat.dv
+    hexagon_V6_vsububsat_dv_128B,              // llvm.hexagon.V6.vsububsat.dv.128B
+    hexagon_V6_vsubububb_sat,                  // llvm.hexagon.V6.vsubububb.sat
+    hexagon_V6_vsubububb_sat_128B,             // llvm.hexagon.V6.vsubububb.sat.128B
+    hexagon_V6_vsubuhsat,                      // llvm.hexagon.V6.vsubuhsat
+    hexagon_V6_vsubuhsat_128B,                 // llvm.hexagon.V6.vsubuhsat.128B
+    hexagon_V6_vsubuhsat_dv,                   // llvm.hexagon.V6.vsubuhsat.dv
+    hexagon_V6_vsubuhsat_dv_128B,              // llvm.hexagon.V6.vsubuhsat.dv.128B
+    hexagon_V6_vsubuhw,                        // llvm.hexagon.V6.vsubuhw
+    hexagon_V6_vsubuhw_128B,                   // llvm.hexagon.V6.vsubuhw.128B
+    hexagon_V6_vsubuwsat,                      // llvm.hexagon.V6.vsubuwsat
+    hexagon_V6_vsubuwsat_128B,                 // llvm.hexagon.V6.vsubuwsat.128B
+    hexagon_V6_vsubuwsat_dv,                   // llvm.hexagon.V6.vsubuwsat.dv
+    hexagon_V6_vsubuwsat_dv_128B,              // llvm.hexagon.V6.vsubuwsat.dv.128B
+    hexagon_V6_vsubw,                          // llvm.hexagon.V6.vsubw
+    hexagon_V6_vsubw_128B,                     // llvm.hexagon.V6.vsubw.128B
+    hexagon_V6_vsubw_dv,                       // llvm.hexagon.V6.vsubw.dv
+    hexagon_V6_vsubw_dv_128B,                  // llvm.hexagon.V6.vsubw.dv.128B
+    hexagon_V6_vsubwnq,                        // llvm.hexagon.V6.vsubwnq
+    hexagon_V6_vsubwnq_128B,                   // llvm.hexagon.V6.vsubwnq.128B
+    hexagon_V6_vsubwq,                         // llvm.hexagon.V6.vsubwq
+    hexagon_V6_vsubwq_128B,                    // llvm.hexagon.V6.vsubwq.128B
+    hexagon_V6_vsubwsat,                       // llvm.hexagon.V6.vsubwsat
+    hexagon_V6_vsubwsat_128B,                  // llvm.hexagon.V6.vsubwsat.128B
+    hexagon_V6_vsubwsat_dv,                    // llvm.hexagon.V6.vsubwsat.dv
+    hexagon_V6_vsubwsat_dv_128B,               // llvm.hexagon.V6.vsubwsat.dv.128B
+    hexagon_V6_vswap,                          // llvm.hexagon.V6.vswap
+    hexagon_V6_vswap_128B,                     // llvm.hexagon.V6.vswap.128B
+    hexagon_V6_vtmpyb,                         // llvm.hexagon.V6.vtmpyb
+    hexagon_V6_vtmpyb_128B,                    // llvm.hexagon.V6.vtmpyb.128B
+    hexagon_V6_vtmpyb_acc,                     // llvm.hexagon.V6.vtmpyb.acc
+    hexagon_V6_vtmpyb_acc_128B,                // llvm.hexagon.V6.vtmpyb.acc.128B
+    hexagon_V6_vtmpybus,                       // llvm.hexagon.V6.vtmpybus
+    hexagon_V6_vtmpybus_128B,                  // llvm.hexagon.V6.vtmpybus.128B
+    hexagon_V6_vtmpybus_acc,                   // llvm.hexagon.V6.vtmpybus.acc
+    hexagon_V6_vtmpybus_acc_128B,              // llvm.hexagon.V6.vtmpybus.acc.128B
+    hexagon_V6_vtmpyhb,                        // llvm.hexagon.V6.vtmpyhb
+    hexagon_V6_vtmpyhb_128B,                   // llvm.hexagon.V6.vtmpyhb.128B
+    hexagon_V6_vtmpyhb_acc,                    // llvm.hexagon.V6.vtmpyhb.acc
+    hexagon_V6_vtmpyhb_acc_128B,               // llvm.hexagon.V6.vtmpyhb.acc.128B
+    hexagon_V6_vunpackb,                       // llvm.hexagon.V6.vunpackb
+    hexagon_V6_vunpackb_128B,                  // llvm.hexagon.V6.vunpackb.128B
+    hexagon_V6_vunpackh,                       // llvm.hexagon.V6.vunpackh
+    hexagon_V6_vunpackh_128B,                  // llvm.hexagon.V6.vunpackh.128B
+    hexagon_V6_vunpackob,                      // llvm.hexagon.V6.vunpackob
+    hexagon_V6_vunpackob_128B,                 // llvm.hexagon.V6.vunpackob.128B
+    hexagon_V6_vunpackoh,                      // llvm.hexagon.V6.vunpackoh
+    hexagon_V6_vunpackoh_128B,                 // llvm.hexagon.V6.vunpackoh.128B
+    hexagon_V6_vunpackub,                      // llvm.hexagon.V6.vunpackub
+    hexagon_V6_vunpackub_128B,                 // llvm.hexagon.V6.vunpackub.128B
+    hexagon_V6_vunpackuh,                      // llvm.hexagon.V6.vunpackuh
+    hexagon_V6_vunpackuh_128B,                 // llvm.hexagon.V6.vunpackuh.128B
+    hexagon_V6_vxor,                           // llvm.hexagon.V6.vxor
+    hexagon_V6_vxor_128B,                      // llvm.hexagon.V6.vxor.128B
+    hexagon_V6_vzb,                            // llvm.hexagon.V6.vzb
+    hexagon_V6_vzb_128B,                       // llvm.hexagon.V6.vzb.128B
+    hexagon_V6_vzh,                            // llvm.hexagon.V6.vzh
+    hexagon_V6_vzh_128B,                       // llvm.hexagon.V6.vzh.128B
+    hexagon_Y2_dccleana,                       // llvm.hexagon.Y2.dccleana
+    hexagon_Y2_dccleaninva,                    // llvm.hexagon.Y2.dccleaninva
+    hexagon_Y2_dcinva,                         // llvm.hexagon.Y2.dcinva
+    hexagon_Y2_dczeroa,                        // llvm.hexagon.Y2.dczeroa
+    hexagon_Y4_l2fetch,                        // llvm.hexagon.Y4.l2fetch
+    hexagon_Y5_l2fetch,                        // llvm.hexagon.Y5.l2fetch
+    hexagon_circ_ldb,                          // llvm.hexagon.circ.ldb
+    hexagon_circ_ldd,                          // llvm.hexagon.circ.ldd
+    hexagon_circ_ldh,                          // llvm.hexagon.circ.ldh
+    hexagon_circ_ldub,                         // llvm.hexagon.circ.ldub
+    hexagon_circ_lduh,                         // llvm.hexagon.circ.lduh
+    hexagon_circ_ldw,                          // llvm.hexagon.circ.ldw
+    hexagon_circ_stb,                          // llvm.hexagon.circ.stb
+    hexagon_circ_std,                          // llvm.hexagon.circ.std
+    hexagon_circ_sth,                          // llvm.hexagon.circ.sth
+    hexagon_circ_sthhi,                        // llvm.hexagon.circ.sthhi
+    hexagon_circ_stw,                          // llvm.hexagon.circ.stw
+    hexagon_mm256i_vaddw,                      // llvm.hexagon.mm256i.vaddw
+    hexagon_prefetch,                          // llvm.hexagon.prefetch
+    mips_absq_s_ph,                            // llvm.mips.absq.s.ph
+    mips_absq_s_qb,                            // llvm.mips.absq.s.qb
+    mips_absq_s_w,                             // llvm.mips.absq.s.w
+    mips_add_a_b,                              // llvm.mips.add.a.b
+    mips_add_a_d,                              // llvm.mips.add.a.d
+    mips_add_a_h,                              // llvm.mips.add.a.h
+    mips_add_a_w,                              // llvm.mips.add.a.w
+    mips_addq_ph,                              // llvm.mips.addq.ph
+    mips_addq_s_ph,                            // llvm.mips.addq.s.ph
+    mips_addq_s_w,                             // llvm.mips.addq.s.w
+    mips_addqh_ph,                             // llvm.mips.addqh.ph
+    mips_addqh_r_ph,                           // llvm.mips.addqh.r.ph
+    mips_addqh_r_w,                            // llvm.mips.addqh.r.w
+    mips_addqh_w,                              // llvm.mips.addqh.w
+    mips_adds_a_b,                             // llvm.mips.adds.a.b
+    mips_adds_a_d,                             // llvm.mips.adds.a.d
+    mips_adds_a_h,                             // llvm.mips.adds.a.h
+    mips_adds_a_w,                             // llvm.mips.adds.a.w
+    mips_adds_s_b,                             // llvm.mips.adds.s.b
+    mips_adds_s_d,                             // llvm.mips.adds.s.d
+    mips_adds_s_h,                             // llvm.mips.adds.s.h
+    mips_adds_s_w,                             // llvm.mips.adds.s.w
+    mips_adds_u_b,                             // llvm.mips.adds.u.b
+    mips_adds_u_d,                             // llvm.mips.adds.u.d
+    mips_adds_u_h,                             // llvm.mips.adds.u.h
+    mips_adds_u_w,                             // llvm.mips.adds.u.w
+    mips_addsc,                                // llvm.mips.addsc
+    mips_addu_ph,                              // llvm.mips.addu.ph
+    mips_addu_qb,                              // llvm.mips.addu.qb
+    mips_addu_s_ph,                            // llvm.mips.addu.s.ph
+    mips_addu_s_qb,                            // llvm.mips.addu.s.qb
+    mips_adduh_qb,                             // llvm.mips.adduh.qb
+    mips_adduh_r_qb,                           // llvm.mips.adduh.r.qb
+    mips_addv_b,                               // llvm.mips.addv.b
+    mips_addv_d,                               // llvm.mips.addv.d
+    mips_addv_h,                               // llvm.mips.addv.h
+    mips_addv_w,                               // llvm.mips.addv.w
+    mips_addvi_b,                              // llvm.mips.addvi.b
+    mips_addvi_d,                              // llvm.mips.addvi.d
+    mips_addvi_h,                              // llvm.mips.addvi.h
+    mips_addvi_w,                              // llvm.mips.addvi.w
+    mips_addwc,                                // llvm.mips.addwc
+    mips_and_v,                                // llvm.mips.and.v
+    mips_andi_b,                               // llvm.mips.andi.b
+    mips_append,                               // llvm.mips.append
+    mips_asub_s_b,                             // llvm.mips.asub.s.b
+    mips_asub_s_d,                             // llvm.mips.asub.s.d
+    mips_asub_s_h,                             // llvm.mips.asub.s.h
+    mips_asub_s_w,                             // llvm.mips.asub.s.w
+    mips_asub_u_b,                             // llvm.mips.asub.u.b
+    mips_asub_u_d,                             // llvm.mips.asub.u.d
+    mips_asub_u_h,                             // llvm.mips.asub.u.h
+    mips_asub_u_w,                             // llvm.mips.asub.u.w
+    mips_ave_s_b,                              // llvm.mips.ave.s.b
+    mips_ave_s_d,                              // llvm.mips.ave.s.d
+    mips_ave_s_h,                              // llvm.mips.ave.s.h
+    mips_ave_s_w,                              // llvm.mips.ave.s.w
+    mips_ave_u_b,                              // llvm.mips.ave.u.b
+    mips_ave_u_d,                              // llvm.mips.ave.u.d
+    mips_ave_u_h,                              // llvm.mips.ave.u.h
+    mips_ave_u_w,                              // llvm.mips.ave.u.w
+    mips_aver_s_b,                             // llvm.mips.aver.s.b
+    mips_aver_s_d,                             // llvm.mips.aver.s.d
+    mips_aver_s_h,                             // llvm.mips.aver.s.h
+    mips_aver_s_w,                             // llvm.mips.aver.s.w
+    mips_aver_u_b,                             // llvm.mips.aver.u.b
+    mips_aver_u_d,                             // llvm.mips.aver.u.d
+    mips_aver_u_h,                             // llvm.mips.aver.u.h
+    mips_aver_u_w,                             // llvm.mips.aver.u.w
+    mips_balign,                               // llvm.mips.balign
+    mips_bclr_b,                               // llvm.mips.bclr.b
+    mips_bclr_d,                               // llvm.mips.bclr.d
+    mips_bclr_h,                               // llvm.mips.bclr.h
+    mips_bclr_w,                               // llvm.mips.bclr.w
+    mips_bclri_b,                              // llvm.mips.bclri.b
+    mips_bclri_d,                              // llvm.mips.bclri.d
+    mips_bclri_h,                              // llvm.mips.bclri.h
+    mips_bclri_w,                              // llvm.mips.bclri.w
+    mips_binsl_b,                              // llvm.mips.binsl.b
+    mips_binsl_d,                              // llvm.mips.binsl.d
+    mips_binsl_h,                              // llvm.mips.binsl.h
+    mips_binsl_w,                              // llvm.mips.binsl.w
+    mips_binsli_b,                             // llvm.mips.binsli.b
+    mips_binsli_d,                             // llvm.mips.binsli.d
+    mips_binsli_h,                             // llvm.mips.binsli.h
+    mips_binsli_w,                             // llvm.mips.binsli.w
+    mips_binsr_b,                              // llvm.mips.binsr.b
+    mips_binsr_d,                              // llvm.mips.binsr.d
+    mips_binsr_h,                              // llvm.mips.binsr.h
+    mips_binsr_w,                              // llvm.mips.binsr.w
+    mips_binsri_b,                             // llvm.mips.binsri.b
+    mips_binsri_d,                             // llvm.mips.binsri.d
+    mips_binsri_h,                             // llvm.mips.binsri.h
+    mips_binsri_w,                             // llvm.mips.binsri.w
+    mips_bitrev,                               // llvm.mips.bitrev
+    mips_bmnz_v,                               // llvm.mips.bmnz.v
+    mips_bmnzi_b,                              // llvm.mips.bmnzi.b
+    mips_bmz_v,                                // llvm.mips.bmz.v
+    mips_bmzi_b,                               // llvm.mips.bmzi.b
+    mips_bneg_b,                               // llvm.mips.bneg.b
+    mips_bneg_d,                               // llvm.mips.bneg.d
+    mips_bneg_h,                               // llvm.mips.bneg.h
+    mips_bneg_w,                               // llvm.mips.bneg.w
+    mips_bnegi_b,                              // llvm.mips.bnegi.b
+    mips_bnegi_d,                              // llvm.mips.bnegi.d
+    mips_bnegi_h,                              // llvm.mips.bnegi.h
+    mips_bnegi_w,                              // llvm.mips.bnegi.w
+    mips_bnz_b,                                // llvm.mips.bnz.b
+    mips_bnz_d,                                // llvm.mips.bnz.d
+    mips_bnz_h,                                // llvm.mips.bnz.h
+    mips_bnz_v,                                // llvm.mips.bnz.v
+    mips_bnz_w,                                // llvm.mips.bnz.w
+    mips_bposge32,                             // llvm.mips.bposge32
+    mips_bsel_v,                               // llvm.mips.bsel.v
+    mips_bseli_b,                              // llvm.mips.bseli.b
+    mips_bset_b,                               // llvm.mips.bset.b
+    mips_bset_d,                               // llvm.mips.bset.d
+    mips_bset_h,                               // llvm.mips.bset.h
+    mips_bset_w,                               // llvm.mips.bset.w
+    mips_bseti_b,                              // llvm.mips.bseti.b
+    mips_bseti_d,                              // llvm.mips.bseti.d
+    mips_bseti_h,                              // llvm.mips.bseti.h
+    mips_bseti_w,                              // llvm.mips.bseti.w
+    mips_bz_b,                                 // llvm.mips.bz.b
+    mips_bz_d,                                 // llvm.mips.bz.d
+    mips_bz_h,                                 // llvm.mips.bz.h
+    mips_bz_v,                                 // llvm.mips.bz.v
+    mips_bz_w,                                 // llvm.mips.bz.w
+    mips_ceq_b,                                // llvm.mips.ceq.b
+    mips_ceq_d,                                // llvm.mips.ceq.d
+    mips_ceq_h,                                // llvm.mips.ceq.h
+    mips_ceq_w,                                // llvm.mips.ceq.w
+    mips_ceqi_b,                               // llvm.mips.ceqi.b
+    mips_ceqi_d,                               // llvm.mips.ceqi.d
+    mips_ceqi_h,                               // llvm.mips.ceqi.h
+    mips_ceqi_w,                               // llvm.mips.ceqi.w
+    mips_cfcmsa,                               // llvm.mips.cfcmsa
+    mips_cle_s_b,                              // llvm.mips.cle.s.b
+    mips_cle_s_d,                              // llvm.mips.cle.s.d
+    mips_cle_s_h,                              // llvm.mips.cle.s.h
+    mips_cle_s_w,                              // llvm.mips.cle.s.w
+    mips_cle_u_b,                              // llvm.mips.cle.u.b
+    mips_cle_u_d,                              // llvm.mips.cle.u.d
+    mips_cle_u_h,                              // llvm.mips.cle.u.h
+    mips_cle_u_w,                              // llvm.mips.cle.u.w
+    mips_clei_s_b,                             // llvm.mips.clei.s.b
+    mips_clei_s_d,                             // llvm.mips.clei.s.d
+    mips_clei_s_h,                             // llvm.mips.clei.s.h
+    mips_clei_s_w,                             // llvm.mips.clei.s.w
+    mips_clei_u_b,                             // llvm.mips.clei.u.b
+    mips_clei_u_d,                             // llvm.mips.clei.u.d
+    mips_clei_u_h,                             // llvm.mips.clei.u.h
+    mips_clei_u_w,                             // llvm.mips.clei.u.w
+    mips_clt_s_b,                              // llvm.mips.clt.s.b
+    mips_clt_s_d,                              // llvm.mips.clt.s.d
+    mips_clt_s_h,                              // llvm.mips.clt.s.h
+    mips_clt_s_w,                              // llvm.mips.clt.s.w
+    mips_clt_u_b,                              // llvm.mips.clt.u.b
+    mips_clt_u_d,                              // llvm.mips.clt.u.d
+    mips_clt_u_h,                              // llvm.mips.clt.u.h
+    mips_clt_u_w,                              // llvm.mips.clt.u.w
+    mips_clti_s_b,                             // llvm.mips.clti.s.b
+    mips_clti_s_d,                             // llvm.mips.clti.s.d
+    mips_clti_s_h,                             // llvm.mips.clti.s.h
+    mips_clti_s_w,                             // llvm.mips.clti.s.w
+    mips_clti_u_b,                             // llvm.mips.clti.u.b
+    mips_clti_u_d,                             // llvm.mips.clti.u.d
+    mips_clti_u_h,                             // llvm.mips.clti.u.h
+    mips_clti_u_w,                             // llvm.mips.clti.u.w
+    mips_cmp_eq_ph,                            // llvm.mips.cmp.eq.ph
+    mips_cmp_le_ph,                            // llvm.mips.cmp.le.ph
+    mips_cmp_lt_ph,                            // llvm.mips.cmp.lt.ph
+    mips_cmpgdu_eq_qb,                         // llvm.mips.cmpgdu.eq.qb
+    mips_cmpgdu_le_qb,                         // llvm.mips.cmpgdu.le.qb
+    mips_cmpgdu_lt_qb,                         // llvm.mips.cmpgdu.lt.qb
+    mips_cmpgu_eq_qb,                          // llvm.mips.cmpgu.eq.qb
+    mips_cmpgu_le_qb,                          // llvm.mips.cmpgu.le.qb
+    mips_cmpgu_lt_qb,                          // llvm.mips.cmpgu.lt.qb
+    mips_cmpu_eq_qb,                           // llvm.mips.cmpu.eq.qb
+    mips_cmpu_le_qb,                           // llvm.mips.cmpu.le.qb
+    mips_cmpu_lt_qb,                           // llvm.mips.cmpu.lt.qb
+    mips_copy_s_b,                             // llvm.mips.copy.s.b
+    mips_copy_s_d,                             // llvm.mips.copy.s.d
+    mips_copy_s_h,                             // llvm.mips.copy.s.h
+    mips_copy_s_w,                             // llvm.mips.copy.s.w
+    mips_copy_u_b,                             // llvm.mips.copy.u.b
+    mips_copy_u_d,                             // llvm.mips.copy.u.d
+    mips_copy_u_h,                             // llvm.mips.copy.u.h
+    mips_copy_u_w,                             // llvm.mips.copy.u.w
+    mips_ctcmsa,                               // llvm.mips.ctcmsa
+    mips_div_s_b,                              // llvm.mips.div.s.b
+    mips_div_s_d,                              // llvm.mips.div.s.d
+    mips_div_s_h,                              // llvm.mips.div.s.h
+    mips_div_s_w,                              // llvm.mips.div.s.w
+    mips_div_u_b,                              // llvm.mips.div.u.b
+    mips_div_u_d,                              // llvm.mips.div.u.d
+    mips_div_u_h,                              // llvm.mips.div.u.h
+    mips_div_u_w,                              // llvm.mips.div.u.w
+    mips_dlsa,                                 // llvm.mips.dlsa
+    mips_dotp_s_d,                             // llvm.mips.dotp.s.d
+    mips_dotp_s_h,                             // llvm.mips.dotp.s.h
+    mips_dotp_s_w,                             // llvm.mips.dotp.s.w
+    mips_dotp_u_d,                             // llvm.mips.dotp.u.d
+    mips_dotp_u_h,                             // llvm.mips.dotp.u.h
+    mips_dotp_u_w,                             // llvm.mips.dotp.u.w
+    mips_dpa_w_ph,                             // llvm.mips.dpa.w.ph
+    mips_dpadd_s_d,                            // llvm.mips.dpadd.s.d
+    mips_dpadd_s_h,                            // llvm.mips.dpadd.s.h
+    mips_dpadd_s_w,                            // llvm.mips.dpadd.s.w
+    mips_dpadd_u_d,                            // llvm.mips.dpadd.u.d
+    mips_dpadd_u_h,                            // llvm.mips.dpadd.u.h
+    mips_dpadd_u_w,                            // llvm.mips.dpadd.u.w
+    mips_dpaq_s_w_ph,                          // llvm.mips.dpaq.s.w.ph
+    mips_dpaq_sa_l_w,                          // llvm.mips.dpaq.sa.l.w
+    mips_dpaqx_s_w_ph,                         // llvm.mips.dpaqx.s.w.ph
+    mips_dpaqx_sa_w_ph,                        // llvm.mips.dpaqx.sa.w.ph
+    mips_dpau_h_qbl,                           // llvm.mips.dpau.h.qbl
+    mips_dpau_h_qbr,                           // llvm.mips.dpau.h.qbr
+    mips_dpax_w_ph,                            // llvm.mips.dpax.w.ph
+    mips_dps_w_ph,                             // llvm.mips.dps.w.ph
+    mips_dpsq_s_w_ph,                          // llvm.mips.dpsq.s.w.ph
+    mips_dpsq_sa_l_w,                          // llvm.mips.dpsq.sa.l.w
+    mips_dpsqx_s_w_ph,                         // llvm.mips.dpsqx.s.w.ph
+    mips_dpsqx_sa_w_ph,                        // llvm.mips.dpsqx.sa.w.ph
+    mips_dpsu_h_qbl,                           // llvm.mips.dpsu.h.qbl
+    mips_dpsu_h_qbr,                           // llvm.mips.dpsu.h.qbr
+    mips_dpsub_s_d,                            // llvm.mips.dpsub.s.d
+    mips_dpsub_s_h,                            // llvm.mips.dpsub.s.h
+    mips_dpsub_s_w,                            // llvm.mips.dpsub.s.w
+    mips_dpsub_u_d,                            // llvm.mips.dpsub.u.d
+    mips_dpsub_u_h,                            // llvm.mips.dpsub.u.h
+    mips_dpsub_u_w,                            // llvm.mips.dpsub.u.w
+    mips_dpsx_w_ph,                            // llvm.mips.dpsx.w.ph
+    mips_extp,                                 // llvm.mips.extp
+    mips_extpdp,                               // llvm.mips.extpdp
+    mips_extr_r_w,                             // llvm.mips.extr.r.w
+    mips_extr_rs_w,                            // llvm.mips.extr.rs.w
+    mips_extr_s_h,                             // llvm.mips.extr.s.h
+    mips_extr_w,                               // llvm.mips.extr.w
+    mips_fadd_d,                               // llvm.mips.fadd.d
+    mips_fadd_w,                               // llvm.mips.fadd.w
+    mips_fcaf_d,                               // llvm.mips.fcaf.d
+    mips_fcaf_w,                               // llvm.mips.fcaf.w
+    mips_fceq_d,                               // llvm.mips.fceq.d
+    mips_fceq_w,                               // llvm.mips.fceq.w
+    mips_fclass_d,                             // llvm.mips.fclass.d
+    mips_fclass_w,                             // llvm.mips.fclass.w
+    mips_fcle_d,                               // llvm.mips.fcle.d
+    mips_fcle_w,                               // llvm.mips.fcle.w
+    mips_fclt_d,                               // llvm.mips.fclt.d
+    mips_fclt_w,                               // llvm.mips.fclt.w
+    mips_fcne_d,                               // llvm.mips.fcne.d
+    mips_fcne_w,                               // llvm.mips.fcne.w
+    mips_fcor_d,                               // llvm.mips.fcor.d
+    mips_fcor_w,                               // llvm.mips.fcor.w
+    mips_fcueq_d,                              // llvm.mips.fcueq.d
+    mips_fcueq_w,                              // llvm.mips.fcueq.w
+    mips_fcule_d,                              // llvm.mips.fcule.d
+    mips_fcule_w,                              // llvm.mips.fcule.w
+    mips_fcult_d,                              // llvm.mips.fcult.d
+    mips_fcult_w,                              // llvm.mips.fcult.w
+    mips_fcun_d,                               // llvm.mips.fcun.d
+    mips_fcun_w,                               // llvm.mips.fcun.w
+    mips_fcune_d,                              // llvm.mips.fcune.d
+    mips_fcune_w,                              // llvm.mips.fcune.w
+    mips_fdiv_d,                               // llvm.mips.fdiv.d
+    mips_fdiv_w,                               // llvm.mips.fdiv.w
+    mips_fexdo_h,                              // llvm.mips.fexdo.h
+    mips_fexdo_w,                              // llvm.mips.fexdo.w
+    mips_fexp2_d,                              // llvm.mips.fexp2.d
+    mips_fexp2_w,                              // llvm.mips.fexp2.w
+    mips_fexupl_d,                             // llvm.mips.fexupl.d
+    mips_fexupl_w,                             // llvm.mips.fexupl.w
+    mips_fexupr_d,                             // llvm.mips.fexupr.d
+    mips_fexupr_w,                             // llvm.mips.fexupr.w
+    mips_ffint_s_d,                            // llvm.mips.ffint.s.d
+    mips_ffint_s_w,                            // llvm.mips.ffint.s.w
+    mips_ffint_u_d,                            // llvm.mips.ffint.u.d
+    mips_ffint_u_w,                            // llvm.mips.ffint.u.w
+    mips_ffql_d,                               // llvm.mips.ffql.d
+    mips_ffql_w,                               // llvm.mips.ffql.w
+    mips_ffqr_d,                               // llvm.mips.ffqr.d
+    mips_ffqr_w,                               // llvm.mips.ffqr.w
+    mips_fill_b,                               // llvm.mips.fill.b
+    mips_fill_d,                               // llvm.mips.fill.d
+    mips_fill_h,                               // llvm.mips.fill.h
+    mips_fill_w,                               // llvm.mips.fill.w
+    mips_flog2_d,                              // llvm.mips.flog2.d
+    mips_flog2_w,                              // llvm.mips.flog2.w
+    mips_fmadd_d,                              // llvm.mips.fmadd.d
+    mips_fmadd_w,                              // llvm.mips.fmadd.w
+    mips_fmax_a_d,                             // llvm.mips.fmax.a.d
+    mips_fmax_a_w,                             // llvm.mips.fmax.a.w
+    mips_fmax_d,                               // llvm.mips.fmax.d
+    mips_fmax_w,                               // llvm.mips.fmax.w
+    mips_fmin_a_d,                             // llvm.mips.fmin.a.d
+    mips_fmin_a_w,                             // llvm.mips.fmin.a.w
+    mips_fmin_d,                               // llvm.mips.fmin.d
+    mips_fmin_w,                               // llvm.mips.fmin.w
+    mips_fmsub_d,                              // llvm.mips.fmsub.d
+    mips_fmsub_w,                              // llvm.mips.fmsub.w
+    mips_fmul_d,                               // llvm.mips.fmul.d
+    mips_fmul_w,                               // llvm.mips.fmul.w
+    mips_frcp_d,                               // llvm.mips.frcp.d
+    mips_frcp_w,                               // llvm.mips.frcp.w
+    mips_frint_d,                              // llvm.mips.frint.d
+    mips_frint_w,                              // llvm.mips.frint.w
+    mips_frsqrt_d,                             // llvm.mips.frsqrt.d
+    mips_frsqrt_w,                             // llvm.mips.frsqrt.w
+    mips_fsaf_d,                               // llvm.mips.fsaf.d
+    mips_fsaf_w,                               // llvm.mips.fsaf.w
+    mips_fseq_d,                               // llvm.mips.fseq.d
+    mips_fseq_w,                               // llvm.mips.fseq.w
+    mips_fsle_d,                               // llvm.mips.fsle.d
+    mips_fsle_w,                               // llvm.mips.fsle.w
+    mips_fslt_d,                               // llvm.mips.fslt.d
+    mips_fslt_w,                               // llvm.mips.fslt.w
+    mips_fsne_d,                               // llvm.mips.fsne.d
+    mips_fsne_w,                               // llvm.mips.fsne.w
+    mips_fsor_d,                               // llvm.mips.fsor.d
+    mips_fsor_w,                               // llvm.mips.fsor.w
+    mips_fsqrt_d,                              // llvm.mips.fsqrt.d
+    mips_fsqrt_w,                              // llvm.mips.fsqrt.w
+    mips_fsub_d,                               // llvm.mips.fsub.d
+    mips_fsub_w,                               // llvm.mips.fsub.w
+    mips_fsueq_d,                              // llvm.mips.fsueq.d
+    mips_fsueq_w,                              // llvm.mips.fsueq.w
+    mips_fsule_d,                              // llvm.mips.fsule.d
+    mips_fsule_w,                              // llvm.mips.fsule.w
+    mips_fsult_d,                              // llvm.mips.fsult.d
+    mips_fsult_w,                              // llvm.mips.fsult.w
+    mips_fsun_d,                               // llvm.mips.fsun.d
+    mips_fsun_w,                               // llvm.mips.fsun.w
+    mips_fsune_d,                              // llvm.mips.fsune.d
+    mips_fsune_w,                              // llvm.mips.fsune.w
+    mips_ftint_s_d,                            // llvm.mips.ftint.s.d
+    mips_ftint_s_w,                            // llvm.mips.ftint.s.w
+    mips_ftint_u_d,                            // llvm.mips.ftint.u.d
+    mips_ftint_u_w,                            // llvm.mips.ftint.u.w
+    mips_ftq_h,                                // llvm.mips.ftq.h
+    mips_ftq_w,                                // llvm.mips.ftq.w
+    mips_ftrunc_s_d,                           // llvm.mips.ftrunc.s.d
+    mips_ftrunc_s_w,                           // llvm.mips.ftrunc.s.w
+    mips_ftrunc_u_d,                           // llvm.mips.ftrunc.u.d
+    mips_ftrunc_u_w,                           // llvm.mips.ftrunc.u.w
+    mips_hadd_s_d,                             // llvm.mips.hadd.s.d
+    mips_hadd_s_h,                             // llvm.mips.hadd.s.h
+    mips_hadd_s_w,                             // llvm.mips.hadd.s.w
+    mips_hadd_u_d,                             // llvm.mips.hadd.u.d
+    mips_hadd_u_h,                             // llvm.mips.hadd.u.h
+    mips_hadd_u_w,                             // llvm.mips.hadd.u.w
+    mips_hsub_s_d,                             // llvm.mips.hsub.s.d
+    mips_hsub_s_h,                             // llvm.mips.hsub.s.h
+    mips_hsub_s_w,                             // llvm.mips.hsub.s.w
+    mips_hsub_u_d,                             // llvm.mips.hsub.u.d
+    mips_hsub_u_h,                             // llvm.mips.hsub.u.h
+    mips_hsub_u_w,                             // llvm.mips.hsub.u.w
+    mips_ilvev_b,                              // llvm.mips.ilvev.b
+    mips_ilvev_d,                              // llvm.mips.ilvev.d
+    mips_ilvev_h,                              // llvm.mips.ilvev.h
+    mips_ilvev_w,                              // llvm.mips.ilvev.w
+    mips_ilvl_b,                               // llvm.mips.ilvl.b
+    mips_ilvl_d,                               // llvm.mips.ilvl.d
+    mips_ilvl_h,                               // llvm.mips.ilvl.h
+    mips_ilvl_w,                               // llvm.mips.ilvl.w
+    mips_ilvod_b,                              // llvm.mips.ilvod.b
+    mips_ilvod_d,                              // llvm.mips.ilvod.d
+    mips_ilvod_h,                              // llvm.mips.ilvod.h
+    mips_ilvod_w,                              // llvm.mips.ilvod.w
+    mips_ilvr_b,                               // llvm.mips.ilvr.b
+    mips_ilvr_d,                               // llvm.mips.ilvr.d
+    mips_ilvr_h,                               // llvm.mips.ilvr.h
+    mips_ilvr_w,                               // llvm.mips.ilvr.w
+    mips_insert_b,                             // llvm.mips.insert.b
+    mips_insert_d,                             // llvm.mips.insert.d
+    mips_insert_h,                             // llvm.mips.insert.h
+    mips_insert_w,                             // llvm.mips.insert.w
+    mips_insv,                                 // llvm.mips.insv
+    mips_insve_b,                              // llvm.mips.insve.b
+    mips_insve_d,                              // llvm.mips.insve.d
+    mips_insve_h,                              // llvm.mips.insve.h
+    mips_insve_w,                              // llvm.mips.insve.w
+    mips_lbux,                                 // llvm.mips.lbux
+    mips_ld_b,                                 // llvm.mips.ld.b
+    mips_ld_d,                                 // llvm.mips.ld.d
+    mips_ld_h,                                 // llvm.mips.ld.h
+    mips_ld_w,                                 // llvm.mips.ld.w
+    mips_ldi_b,                                // llvm.mips.ldi.b
+    mips_ldi_d,                                // llvm.mips.ldi.d
+    mips_ldi_h,                                // llvm.mips.ldi.h
+    mips_ldi_w,                                // llvm.mips.ldi.w
+    mips_lhx,                                  // llvm.mips.lhx
+    mips_lsa,                                  // llvm.mips.lsa
+    mips_lwx,                                  // llvm.mips.lwx
+    mips_madd,                                 // llvm.mips.madd
+    mips_madd_q_h,                             // llvm.mips.madd.q.h
+    mips_madd_q_w,                             // llvm.mips.madd.q.w
+    mips_maddr_q_h,                            // llvm.mips.maddr.q.h
+    mips_maddr_q_w,                            // llvm.mips.maddr.q.w
+    mips_maddu,                                // llvm.mips.maddu
+    mips_maddv_b,                              // llvm.mips.maddv.b
+    mips_maddv_d,                              // llvm.mips.maddv.d
+    mips_maddv_h,                              // llvm.mips.maddv.h
+    mips_maddv_w,                              // llvm.mips.maddv.w
+    mips_maq_s_w_phl,                          // llvm.mips.maq.s.w.phl
+    mips_maq_s_w_phr,                          // llvm.mips.maq.s.w.phr
+    mips_maq_sa_w_phl,                         // llvm.mips.maq.sa.w.phl
+    mips_maq_sa_w_phr,                         // llvm.mips.maq.sa.w.phr
+    mips_max_a_b,                              // llvm.mips.max.a.b
+    mips_max_a_d,                              // llvm.mips.max.a.d
+    mips_max_a_h,                              // llvm.mips.max.a.h
+    mips_max_a_w,                              // llvm.mips.max.a.w
+    mips_max_s_b,                              // llvm.mips.max.s.b
+    mips_max_s_d,                              // llvm.mips.max.s.d
+    mips_max_s_h,                              // llvm.mips.max.s.h
+    mips_max_s_w,                              // llvm.mips.max.s.w
+    mips_max_u_b,                              // llvm.mips.max.u.b
+    mips_max_u_d,                              // llvm.mips.max.u.d
+    mips_max_u_h,                              // llvm.mips.max.u.h
+    mips_max_u_w,                              // llvm.mips.max.u.w
+    mips_maxi_s_b,                             // llvm.mips.maxi.s.b
+    mips_maxi_s_d,                             // llvm.mips.maxi.s.d
+    mips_maxi_s_h,                             // llvm.mips.maxi.s.h
+    mips_maxi_s_w,                             // llvm.mips.maxi.s.w
+    mips_maxi_u_b,                             // llvm.mips.maxi.u.b
+    mips_maxi_u_d,                             // llvm.mips.maxi.u.d
+    mips_maxi_u_h,                             // llvm.mips.maxi.u.h
+    mips_maxi_u_w,                             // llvm.mips.maxi.u.w
+    mips_min_a_b,                              // llvm.mips.min.a.b
+    mips_min_a_d,                              // llvm.mips.min.a.d
+    mips_min_a_h,                              // llvm.mips.min.a.h
+    mips_min_a_w,                              // llvm.mips.min.a.w
+    mips_min_s_b,                              // llvm.mips.min.s.b
+    mips_min_s_d,                              // llvm.mips.min.s.d
+    mips_min_s_h,                              // llvm.mips.min.s.h
+    mips_min_s_w,                              // llvm.mips.min.s.w
+    mips_min_u_b,                              // llvm.mips.min.u.b
+    mips_min_u_d,                              // llvm.mips.min.u.d
+    mips_min_u_h,                              // llvm.mips.min.u.h
+    mips_min_u_w,                              // llvm.mips.min.u.w
+    mips_mini_s_b,                             // llvm.mips.mini.s.b
+    mips_mini_s_d,                             // llvm.mips.mini.s.d
+    mips_mini_s_h,                             // llvm.mips.mini.s.h
+    mips_mini_s_w,                             // llvm.mips.mini.s.w
+    mips_mini_u_b,                             // llvm.mips.mini.u.b
+    mips_mini_u_d,                             // llvm.mips.mini.u.d
+    mips_mini_u_h,                             // llvm.mips.mini.u.h
+    mips_mini_u_w,                             // llvm.mips.mini.u.w
+    mips_mod_s_b,                              // llvm.mips.mod.s.b
+    mips_mod_s_d,                              // llvm.mips.mod.s.d
+    mips_mod_s_h,                              // llvm.mips.mod.s.h
+    mips_mod_s_w,                              // llvm.mips.mod.s.w
+    mips_mod_u_b,                              // llvm.mips.mod.u.b
+    mips_mod_u_d,                              // llvm.mips.mod.u.d
+    mips_mod_u_h,                              // llvm.mips.mod.u.h
+    mips_mod_u_w,                              // llvm.mips.mod.u.w
+    mips_modsub,                               // llvm.mips.modsub
+    mips_move_v,                               // llvm.mips.move.v
+    mips_msub,                                 // llvm.mips.msub
+    mips_msub_q_h,                             // llvm.mips.msub.q.h
+    mips_msub_q_w,                             // llvm.mips.msub.q.w
+    mips_msubr_q_h,                            // llvm.mips.msubr.q.h
+    mips_msubr_q_w,                            // llvm.mips.msubr.q.w
+    mips_msubu,                                // llvm.mips.msubu
+    mips_msubv_b,                              // llvm.mips.msubv.b
+    mips_msubv_d,                              // llvm.mips.msubv.d
+    mips_msubv_h,                              // llvm.mips.msubv.h
+    mips_msubv_w,                              // llvm.mips.msubv.w
+    mips_mthlip,                               // llvm.mips.mthlip
+    mips_mul_ph,                               // llvm.mips.mul.ph
+    mips_mul_q_h,                              // llvm.mips.mul.q.h
+    mips_mul_q_w,                              // llvm.mips.mul.q.w
+    mips_mul_s_ph,                             // llvm.mips.mul.s.ph
+    mips_muleq_s_w_phl,                        // llvm.mips.muleq.s.w.phl
+    mips_muleq_s_w_phr,                        // llvm.mips.muleq.s.w.phr
+    mips_muleu_s_ph_qbl,                       // llvm.mips.muleu.s.ph.qbl
+    mips_muleu_s_ph_qbr,                       // llvm.mips.muleu.s.ph.qbr
+    mips_mulq_rs_ph,                           // llvm.mips.mulq.rs.ph
+    mips_mulq_rs_w,                            // llvm.mips.mulq.rs.w
+    mips_mulq_s_ph,                            // llvm.mips.mulq.s.ph
+    mips_mulq_s_w,                             // llvm.mips.mulq.s.w
+    mips_mulr_q_h,                             // llvm.mips.mulr.q.h
+    mips_mulr_q_w,                             // llvm.mips.mulr.q.w
+    mips_mulsa_w_ph,                           // llvm.mips.mulsa.w.ph
+    mips_mulsaq_s_w_ph,                        // llvm.mips.mulsaq.s.w.ph
+    mips_mult,                                 // llvm.mips.mult
+    mips_multu,                                // llvm.mips.multu
+    mips_mulv_b,                               // llvm.mips.mulv.b
+    mips_mulv_d,                               // llvm.mips.mulv.d
+    mips_mulv_h,                               // llvm.mips.mulv.h
+    mips_mulv_w,                               // llvm.mips.mulv.w
+    mips_nloc_b,                               // llvm.mips.nloc.b
+    mips_nloc_d,                               // llvm.mips.nloc.d
+    mips_nloc_h,                               // llvm.mips.nloc.h
+    mips_nloc_w,                               // llvm.mips.nloc.w
+    mips_nlzc_b,                               // llvm.mips.nlzc.b
+    mips_nlzc_d,                               // llvm.mips.nlzc.d
+    mips_nlzc_h,                               // llvm.mips.nlzc.h
+    mips_nlzc_w,                               // llvm.mips.nlzc.w
+    mips_nor_v,                                // llvm.mips.nor.v
+    mips_nori_b,                               // llvm.mips.nori.b
+    mips_or_v,                                 // llvm.mips.or.v
+    mips_ori_b,                                // llvm.mips.ori.b
+    mips_packrl_ph,                            // llvm.mips.packrl.ph
+    mips_pckev_b,                              // llvm.mips.pckev.b
+    mips_pckev_d,                              // llvm.mips.pckev.d
+    mips_pckev_h,                              // llvm.mips.pckev.h
+    mips_pckev_w,                              // llvm.mips.pckev.w
+    mips_pckod_b,                              // llvm.mips.pckod.b
+    mips_pckod_d,                              // llvm.mips.pckod.d
+    mips_pckod_h,                              // llvm.mips.pckod.h
+    mips_pckod_w,                              // llvm.mips.pckod.w
+    mips_pcnt_b,                               // llvm.mips.pcnt.b
+    mips_pcnt_d,                               // llvm.mips.pcnt.d
+    mips_pcnt_h,                               // llvm.mips.pcnt.h
+    mips_pcnt_w,                               // llvm.mips.pcnt.w
+    mips_pick_ph,                              // llvm.mips.pick.ph
+    mips_pick_qb,                              // llvm.mips.pick.qb
+    mips_preceq_w_phl,                         // llvm.mips.preceq.w.phl
+    mips_preceq_w_phr,                         // llvm.mips.preceq.w.phr
+    mips_precequ_ph_qbl,                       // llvm.mips.precequ.ph.qbl
+    mips_precequ_ph_qbla,                      // llvm.mips.precequ.ph.qbla
+    mips_precequ_ph_qbr,                       // llvm.mips.precequ.ph.qbr
+    mips_precequ_ph_qbra,                      // llvm.mips.precequ.ph.qbra
+    mips_preceu_ph_qbl,                        // llvm.mips.preceu.ph.qbl
+    mips_preceu_ph_qbla,                       // llvm.mips.preceu.ph.qbla
+    mips_preceu_ph_qbr,                        // llvm.mips.preceu.ph.qbr
+    mips_preceu_ph_qbra,                       // llvm.mips.preceu.ph.qbra
+    mips_precr_qb_ph,                          // llvm.mips.precr.qb.ph
+    mips_precr_sra_ph_w,                       // llvm.mips.precr.sra.ph.w
+    mips_precr_sra_r_ph_w,                     // llvm.mips.precr.sra.r.ph.w
+    mips_precrq_ph_w,                          // llvm.mips.precrq.ph.w
+    mips_precrq_qb_ph,                         // llvm.mips.precrq.qb.ph
+    mips_precrq_rs_ph_w,                       // llvm.mips.precrq.rs.ph.w
+    mips_precrqu_s_qb_ph,                      // llvm.mips.precrqu.s.qb.ph
+    mips_prepend,                              // llvm.mips.prepend
+    mips_raddu_w_qb,                           // llvm.mips.raddu.w.qb
+    mips_rddsp,                                // llvm.mips.rddsp
+    mips_repl_ph,                              // llvm.mips.repl.ph
+    mips_repl_qb,                              // llvm.mips.repl.qb
+    mips_sat_s_b,                              // llvm.mips.sat.s.b
+    mips_sat_s_d,                              // llvm.mips.sat.s.d
+    mips_sat_s_h,                              // llvm.mips.sat.s.h
+    mips_sat_s_w,                              // llvm.mips.sat.s.w
+    mips_sat_u_b,                              // llvm.mips.sat.u.b
+    mips_sat_u_d,                              // llvm.mips.sat.u.d
+    mips_sat_u_h,                              // llvm.mips.sat.u.h
+    mips_sat_u_w,                              // llvm.mips.sat.u.w
+    mips_shf_b,                                // llvm.mips.shf.b
+    mips_shf_h,                                // llvm.mips.shf.h
+    mips_shf_w,                                // llvm.mips.shf.w
+    mips_shilo,                                // llvm.mips.shilo
+    mips_shll_ph,                              // llvm.mips.shll.ph
+    mips_shll_qb,                              // llvm.mips.shll.qb
+    mips_shll_s_ph,                            // llvm.mips.shll.s.ph
+    mips_shll_s_w,                             // llvm.mips.shll.s.w
+    mips_shra_ph,                              // llvm.mips.shra.ph
+    mips_shra_qb,                              // llvm.mips.shra.qb
+    mips_shra_r_ph,                            // llvm.mips.shra.r.ph
+    mips_shra_r_qb,                            // llvm.mips.shra.r.qb
+    mips_shra_r_w,                             // llvm.mips.shra.r.w
+    mips_shrl_ph,                              // llvm.mips.shrl.ph
+    mips_shrl_qb,                              // llvm.mips.shrl.qb
+    mips_sld_b,                                // llvm.mips.sld.b
+    mips_sld_d,                                // llvm.mips.sld.d
+    mips_sld_h,                                // llvm.mips.sld.h
+    mips_sld_w,                                // llvm.mips.sld.w
+    mips_sldi_b,                               // llvm.mips.sldi.b
+    mips_sldi_d,                               // llvm.mips.sldi.d
+    mips_sldi_h,                               // llvm.mips.sldi.h
+    mips_sldi_w,                               // llvm.mips.sldi.w
+    mips_sll_b,                                // llvm.mips.sll.b
+    mips_sll_d,                                // llvm.mips.sll.d
+    mips_sll_h,                                // llvm.mips.sll.h
+    mips_sll_w,                                // llvm.mips.sll.w
+    mips_slli_b,                               // llvm.mips.slli.b
+    mips_slli_d,                               // llvm.mips.slli.d
+    mips_slli_h,                               // llvm.mips.slli.h
+    mips_slli_w,                               // llvm.mips.slli.w
+    mips_splat_b,                              // llvm.mips.splat.b
+    mips_splat_d,                              // llvm.mips.splat.d
+    mips_splat_h,                              // llvm.mips.splat.h
+    mips_splat_w,                              // llvm.mips.splat.w
+    mips_splati_b,                             // llvm.mips.splati.b
+    mips_splati_d,                             // llvm.mips.splati.d
+    mips_splati_h,                             // llvm.mips.splati.h
+    mips_splati_w,                             // llvm.mips.splati.w
+    mips_sra_b,                                // llvm.mips.sra.b
+    mips_sra_d,                                // llvm.mips.sra.d
+    mips_sra_h,                                // llvm.mips.sra.h
+    mips_sra_w,                                // llvm.mips.sra.w
+    mips_srai_b,                               // llvm.mips.srai.b
+    mips_srai_d,                               // llvm.mips.srai.d
+    mips_srai_h,                               // llvm.mips.srai.h
+    mips_srai_w,                               // llvm.mips.srai.w
+    mips_srar_b,                               // llvm.mips.srar.b
+    mips_srar_d,                               // llvm.mips.srar.d
+    mips_srar_h,                               // llvm.mips.srar.h
+    mips_srar_w,                               // llvm.mips.srar.w
+    mips_srari_b,                              // llvm.mips.srari.b
+    mips_srari_d,                              // llvm.mips.srari.d
+    mips_srari_h,                              // llvm.mips.srari.h
+    mips_srari_w,                              // llvm.mips.srari.w
+    mips_srl_b,                                // llvm.mips.srl.b
+    mips_srl_d,                                // llvm.mips.srl.d
+    mips_srl_h,                                // llvm.mips.srl.h
+    mips_srl_w,                                // llvm.mips.srl.w
+    mips_srli_b,                               // llvm.mips.srli.b
+    mips_srli_d,                               // llvm.mips.srli.d
+    mips_srli_h,                               // llvm.mips.srli.h
+    mips_srli_w,                               // llvm.mips.srli.w
+    mips_srlr_b,                               // llvm.mips.srlr.b
+    mips_srlr_d,                               // llvm.mips.srlr.d
+    mips_srlr_h,                               // llvm.mips.srlr.h
+    mips_srlr_w,                               // llvm.mips.srlr.w
+    mips_srlri_b,                              // llvm.mips.srlri.b
+    mips_srlri_d,                              // llvm.mips.srlri.d
+    mips_srlri_h,                              // llvm.mips.srlri.h
+    mips_srlri_w,                              // llvm.mips.srlri.w
+    mips_st_b,                                 // llvm.mips.st.b
+    mips_st_d,                                 // llvm.mips.st.d
+    mips_st_h,                                 // llvm.mips.st.h
+    mips_st_w,                                 // llvm.mips.st.w
+    mips_subq_ph,                              // llvm.mips.subq.ph
+    mips_subq_s_ph,                            // llvm.mips.subq.s.ph
+    mips_subq_s_w,                             // llvm.mips.subq.s.w
+    mips_subqh_ph,                             // llvm.mips.subqh.ph
+    mips_subqh_r_ph,                           // llvm.mips.subqh.r.ph
+    mips_subqh_r_w,                            // llvm.mips.subqh.r.w
+    mips_subqh_w,                              // llvm.mips.subqh.w
+    mips_subs_s_b,                             // llvm.mips.subs.s.b
+    mips_subs_s_d,                             // llvm.mips.subs.s.d
+    mips_subs_s_h,                             // llvm.mips.subs.s.h
+    mips_subs_s_w,                             // llvm.mips.subs.s.w
+    mips_subs_u_b,                             // llvm.mips.subs.u.b
+    mips_subs_u_d,                             // llvm.mips.subs.u.d
+    mips_subs_u_h,                             // llvm.mips.subs.u.h
+    mips_subs_u_w,                             // llvm.mips.subs.u.w
+    mips_subsus_u_b,                           // llvm.mips.subsus.u.b
+    mips_subsus_u_d,                           // llvm.mips.subsus.u.d
+    mips_subsus_u_h,                           // llvm.mips.subsus.u.h
+    mips_subsus_u_w,                           // llvm.mips.subsus.u.w
+    mips_subsuu_s_b,                           // llvm.mips.subsuu.s.b
+    mips_subsuu_s_d,                           // llvm.mips.subsuu.s.d
+    mips_subsuu_s_h,                           // llvm.mips.subsuu.s.h
+    mips_subsuu_s_w,                           // llvm.mips.subsuu.s.w
+    mips_subu_ph,                              // llvm.mips.subu.ph
+    mips_subu_qb,                              // llvm.mips.subu.qb
+    mips_subu_s_ph,                            // llvm.mips.subu.s.ph
+    mips_subu_s_qb,                            // llvm.mips.subu.s.qb
+    mips_subuh_qb,                             // llvm.mips.subuh.qb
+    mips_subuh_r_qb,                           // llvm.mips.subuh.r.qb
+    mips_subv_b,                               // llvm.mips.subv.b
+    mips_subv_d,                               // llvm.mips.subv.d
+    mips_subv_h,                               // llvm.mips.subv.h
+    mips_subv_w,                               // llvm.mips.subv.w
+    mips_subvi_b,                              // llvm.mips.subvi.b
+    mips_subvi_d,                              // llvm.mips.subvi.d
+    mips_subvi_h,                              // llvm.mips.subvi.h
+    mips_subvi_w,                              // llvm.mips.subvi.w
+    mips_vshf_b,                               // llvm.mips.vshf.b
+    mips_vshf_d,                               // llvm.mips.vshf.d
+    mips_vshf_h,                               // llvm.mips.vshf.h
+    mips_vshf_w,                               // llvm.mips.vshf.w
+    mips_wrdsp,                                // llvm.mips.wrdsp
+    mips_xor_v,                                // llvm.mips.xor.v
+    mips_xori_b,                               // llvm.mips.xori.b
+    nvvm_add_rm_d,                             // llvm.nvvm.add.rm.d
+    nvvm_add_rm_f,                             // llvm.nvvm.add.rm.f
+    nvvm_add_rm_ftz_f,                         // llvm.nvvm.add.rm.ftz.f
+    nvvm_add_rn_d,                             // llvm.nvvm.add.rn.d
+    nvvm_add_rn_f,                             // llvm.nvvm.add.rn.f
+    nvvm_add_rn_ftz_f,                         // llvm.nvvm.add.rn.ftz.f
+    nvvm_add_rp_d,                             // llvm.nvvm.add.rp.d
+    nvvm_add_rp_f,                             // llvm.nvvm.add.rp.f
+    nvvm_add_rp_ftz_f,                         // llvm.nvvm.add.rp.ftz.f
+    nvvm_add_rz_d,                             // llvm.nvvm.add.rz.d
+    nvvm_add_rz_f,                             // llvm.nvvm.add.rz.f
+    nvvm_add_rz_ftz_f,                         // llvm.nvvm.add.rz.ftz.f
+    nvvm_atomic_add_gen_f_cta,                 // llvm.nvvm.atomic.add.gen.f.cta
+    nvvm_atomic_add_gen_f_sys,                 // llvm.nvvm.atomic.add.gen.f.sys
+    nvvm_atomic_add_gen_i_cta,                 // llvm.nvvm.atomic.add.gen.i.cta
+    nvvm_atomic_add_gen_i_sys,                 // llvm.nvvm.atomic.add.gen.i.sys
+    nvvm_atomic_and_gen_i_cta,                 // llvm.nvvm.atomic.and.gen.i.cta
+    nvvm_atomic_and_gen_i_sys,                 // llvm.nvvm.atomic.and.gen.i.sys
+    nvvm_atomic_cas_gen_i_cta,                 // llvm.nvvm.atomic.cas.gen.i.cta
+    nvvm_atomic_cas_gen_i_sys,                 // llvm.nvvm.atomic.cas.gen.i.sys
+    nvvm_atomic_dec_gen_i_cta,                 // llvm.nvvm.atomic.dec.gen.i.cta
+    nvvm_atomic_dec_gen_i_sys,                 // llvm.nvvm.atomic.dec.gen.i.sys
+    nvvm_atomic_exch_gen_i_cta,                // llvm.nvvm.atomic.exch.gen.i.cta
+    nvvm_atomic_exch_gen_i_sys,                // llvm.nvvm.atomic.exch.gen.i.sys
+    nvvm_atomic_inc_gen_i_cta,                 // llvm.nvvm.atomic.inc.gen.i.cta
+    nvvm_atomic_inc_gen_i_sys,                 // llvm.nvvm.atomic.inc.gen.i.sys
+    nvvm_atomic_load_add_f32,                  // llvm.nvvm.atomic.load.add.f32
+    nvvm_atomic_load_add_f64,                  // llvm.nvvm.atomic.load.add.f64
+    nvvm_atomic_load_dec_32,                   // llvm.nvvm.atomic.load.dec.32
+    nvvm_atomic_load_inc_32,                   // llvm.nvvm.atomic.load.inc.32
+    nvvm_atomic_max_gen_i_cta,                 // llvm.nvvm.atomic.max.gen.i.cta
+    nvvm_atomic_max_gen_i_sys,                 // llvm.nvvm.atomic.max.gen.i.sys
+    nvvm_atomic_min_gen_i_cta,                 // llvm.nvvm.atomic.min.gen.i.cta
+    nvvm_atomic_min_gen_i_sys,                 // llvm.nvvm.atomic.min.gen.i.sys
+    nvvm_atomic_or_gen_i_cta,                  // llvm.nvvm.atomic.or.gen.i.cta
+    nvvm_atomic_or_gen_i_sys,                  // llvm.nvvm.atomic.or.gen.i.sys
+    nvvm_atomic_xor_gen_i_cta,                 // llvm.nvvm.atomic.xor.gen.i.cta
+    nvvm_atomic_xor_gen_i_sys,                 // llvm.nvvm.atomic.xor.gen.i.sys
+    nvvm_bar_sync,                             // llvm.nvvm.bar.sync
+    nvvm_bar_warp_sync,                        // llvm.nvvm.bar.warp.sync
+    nvvm_barrier,                              // llvm.nvvm.barrier
+    nvvm_barrier_n,                            // llvm.nvvm.barrier.n
+    nvvm_barrier_sync,                         // llvm.nvvm.barrier.sync
+    nvvm_barrier_sync_cnt,                     // llvm.nvvm.barrier.sync.cnt
+    nvvm_barrier0,                             // llvm.nvvm.barrier0
+    nvvm_barrier0_and,                         // llvm.nvvm.barrier0.and
+    nvvm_barrier0_or,                          // llvm.nvvm.barrier0.or
+    nvvm_barrier0_popc,                        // llvm.nvvm.barrier0.popc
+    nvvm_bitcast_d2ll,                         // llvm.nvvm.bitcast.d2ll
+    nvvm_bitcast_f2i,                          // llvm.nvvm.bitcast.f2i
+    nvvm_bitcast_i2f,                          // llvm.nvvm.bitcast.i2f
+    nvvm_bitcast_ll2d,                         // llvm.nvvm.bitcast.ll2d
+    nvvm_ceil_d,                               // llvm.nvvm.ceil.d
+    nvvm_ceil_f,                               // llvm.nvvm.ceil.f
+    nvvm_ceil_ftz_f,                           // llvm.nvvm.ceil.ftz.f
+    nvvm_compiler_error,                       // llvm.nvvm.compiler.error
+    nvvm_compiler_warn,                        // llvm.nvvm.compiler.warn
+    nvvm_cos_approx_f,                         // llvm.nvvm.cos.approx.f
+    nvvm_cos_approx_ftz_f,                     // llvm.nvvm.cos.approx.ftz.f
+    nvvm_d2f_rm,                               // llvm.nvvm.d2f.rm
+    nvvm_d2f_rm_ftz,                           // llvm.nvvm.d2f.rm.ftz
+    nvvm_d2f_rn,                               // llvm.nvvm.d2f.rn
+    nvvm_d2f_rn_ftz,                           // llvm.nvvm.d2f.rn.ftz
+    nvvm_d2f_rp,                               // llvm.nvvm.d2f.rp
+    nvvm_d2f_rp_ftz,                           // llvm.nvvm.d2f.rp.ftz
+    nvvm_d2f_rz,                               // llvm.nvvm.d2f.rz
+    nvvm_d2f_rz_ftz,                           // llvm.nvvm.d2f.rz.ftz
+    nvvm_d2i_hi,                               // llvm.nvvm.d2i.hi
+    nvvm_d2i_lo,                               // llvm.nvvm.d2i.lo
+    nvvm_d2i_rm,                               // llvm.nvvm.d2i.rm
+    nvvm_d2i_rn,                               // llvm.nvvm.d2i.rn
+    nvvm_d2i_rp,                               // llvm.nvvm.d2i.rp
+    nvvm_d2i_rz,                               // llvm.nvvm.d2i.rz
+    nvvm_d2ll_rm,                              // llvm.nvvm.d2ll.rm
+    nvvm_d2ll_rn,                              // llvm.nvvm.d2ll.rn
+    nvvm_d2ll_rp,                              // llvm.nvvm.d2ll.rp
+    nvvm_d2ll_rz,                              // llvm.nvvm.d2ll.rz
+    nvvm_d2ui_rm,                              // llvm.nvvm.d2ui.rm
+    nvvm_d2ui_rn,                              // llvm.nvvm.d2ui.rn
+    nvvm_d2ui_rp,                              // llvm.nvvm.d2ui.rp
+    nvvm_d2ui_rz,                              // llvm.nvvm.d2ui.rz
+    nvvm_d2ull_rm,                             // llvm.nvvm.d2ull.rm
+    nvvm_d2ull_rn,                             // llvm.nvvm.d2ull.rn
+    nvvm_d2ull_rp,                             // llvm.nvvm.d2ull.rp
+    nvvm_d2ull_rz,                             // llvm.nvvm.d2ull.rz
+    nvvm_div_approx_f,                         // llvm.nvvm.div.approx.f
+    nvvm_div_approx_ftz_f,                     // llvm.nvvm.div.approx.ftz.f
+    nvvm_div_rm_d,                             // llvm.nvvm.div.rm.d
+    nvvm_div_rm_f,                             // llvm.nvvm.div.rm.f
+    nvvm_div_rm_ftz_f,                         // llvm.nvvm.div.rm.ftz.f
+    nvvm_div_rn_d,                             // llvm.nvvm.div.rn.d
+    nvvm_div_rn_f,                             // llvm.nvvm.div.rn.f
+    nvvm_div_rn_ftz_f,                         // llvm.nvvm.div.rn.ftz.f
+    nvvm_div_rp_d,                             // llvm.nvvm.div.rp.d
+    nvvm_div_rp_f,                             // llvm.nvvm.div.rp.f
+    nvvm_div_rp_ftz_f,                         // llvm.nvvm.div.rp.ftz.f
+    nvvm_div_rz_d,                             // llvm.nvvm.div.rz.d
+    nvvm_div_rz_f,                             // llvm.nvvm.div.rz.f
+    nvvm_div_rz_ftz_f,                         // llvm.nvvm.div.rz.ftz.f
+    nvvm_ex2_approx_d,                         // llvm.nvvm.ex2.approx.d
+    nvvm_ex2_approx_f,                         // llvm.nvvm.ex2.approx.f
+    nvvm_ex2_approx_ftz_f,                     // llvm.nvvm.ex2.approx.ftz.f
+    nvvm_f2h_rn,                               // llvm.nvvm.f2h.rn
+    nvvm_f2h_rn_ftz,                           // llvm.nvvm.f2h.rn.ftz
+    nvvm_f2i_rm,                               // llvm.nvvm.f2i.rm
+    nvvm_f2i_rm_ftz,                           // llvm.nvvm.f2i.rm.ftz
+    nvvm_f2i_rn,                               // llvm.nvvm.f2i.rn
+    nvvm_f2i_rn_ftz,                           // llvm.nvvm.f2i.rn.ftz
+    nvvm_f2i_rp,                               // llvm.nvvm.f2i.rp
+    nvvm_f2i_rp_ftz,                           // llvm.nvvm.f2i.rp.ftz
+    nvvm_f2i_rz,                               // llvm.nvvm.f2i.rz
+    nvvm_f2i_rz_ftz,                           // llvm.nvvm.f2i.rz.ftz
+    nvvm_f2ll_rm,                              // llvm.nvvm.f2ll.rm
+    nvvm_f2ll_rm_ftz,                          // llvm.nvvm.f2ll.rm.ftz
+    nvvm_f2ll_rn,                              // llvm.nvvm.f2ll.rn
+    nvvm_f2ll_rn_ftz,                          // llvm.nvvm.f2ll.rn.ftz
+    nvvm_f2ll_rp,                              // llvm.nvvm.f2ll.rp
+    nvvm_f2ll_rp_ftz,                          // llvm.nvvm.f2ll.rp.ftz
+    nvvm_f2ll_rz,                              // llvm.nvvm.f2ll.rz
+    nvvm_f2ll_rz_ftz,                          // llvm.nvvm.f2ll.rz.ftz
+    nvvm_f2ui_rm,                              // llvm.nvvm.f2ui.rm
+    nvvm_f2ui_rm_ftz,                          // llvm.nvvm.f2ui.rm.ftz
+    nvvm_f2ui_rn,                              // llvm.nvvm.f2ui.rn
+    nvvm_f2ui_rn_ftz,                          // llvm.nvvm.f2ui.rn.ftz
+    nvvm_f2ui_rp,                              // llvm.nvvm.f2ui.rp
+    nvvm_f2ui_rp_ftz,                          // llvm.nvvm.f2ui.rp.ftz
+    nvvm_f2ui_rz,                              // llvm.nvvm.f2ui.rz
+    nvvm_f2ui_rz_ftz,                          // llvm.nvvm.f2ui.rz.ftz
+    nvvm_f2ull_rm,                             // llvm.nvvm.f2ull.rm
+    nvvm_f2ull_rm_ftz,                         // llvm.nvvm.f2ull.rm.ftz
+    nvvm_f2ull_rn,                             // llvm.nvvm.f2ull.rn
+    nvvm_f2ull_rn_ftz,                         // llvm.nvvm.f2ull.rn.ftz
+    nvvm_f2ull_rp,                             // llvm.nvvm.f2ull.rp
+    nvvm_f2ull_rp_ftz,                         // llvm.nvvm.f2ull.rp.ftz
+    nvvm_f2ull_rz,                             // llvm.nvvm.f2ull.rz
+    nvvm_f2ull_rz_ftz,                         // llvm.nvvm.f2ull.rz.ftz
+    nvvm_fabs_d,                               // llvm.nvvm.fabs.d
+    nvvm_fabs_f,                               // llvm.nvvm.fabs.f
+    nvvm_fabs_ftz_f,                           // llvm.nvvm.fabs.ftz.f
+    nvvm_floor_d,                              // llvm.nvvm.floor.d
+    nvvm_floor_f,                              // llvm.nvvm.floor.f
+    nvvm_floor_ftz_f,                          // llvm.nvvm.floor.ftz.f
+    nvvm_fma_rm_d,                             // llvm.nvvm.fma.rm.d
+    nvvm_fma_rm_f,                             // llvm.nvvm.fma.rm.f
+    nvvm_fma_rm_ftz_f,                         // llvm.nvvm.fma.rm.ftz.f
+    nvvm_fma_rn_d,                             // llvm.nvvm.fma.rn.d
+    nvvm_fma_rn_f,                             // llvm.nvvm.fma.rn.f
+    nvvm_fma_rn_ftz_f,                         // llvm.nvvm.fma.rn.ftz.f
+    nvvm_fma_rp_d,                             // llvm.nvvm.fma.rp.d
+    nvvm_fma_rp_f,                             // llvm.nvvm.fma.rp.f
+    nvvm_fma_rp_ftz_f,                         // llvm.nvvm.fma.rp.ftz.f
+    nvvm_fma_rz_d,                             // llvm.nvvm.fma.rz.d
+    nvvm_fma_rz_f,                             // llvm.nvvm.fma.rz.f
+    nvvm_fma_rz_ftz_f,                         // llvm.nvvm.fma.rz.ftz.f
+    nvvm_fmax_d,                               // llvm.nvvm.fmax.d
+    nvvm_fmax_f,                               // llvm.nvvm.fmax.f
+    nvvm_fmax_ftz_f,                           // llvm.nvvm.fmax.ftz.f
+    nvvm_fmin_d,                               // llvm.nvvm.fmin.d
+    nvvm_fmin_f,                               // llvm.nvvm.fmin.f
+    nvvm_fmin_ftz_f,                           // llvm.nvvm.fmin.ftz.f
+    nvvm_fns,                                  // llvm.nvvm.fns
+    nvvm_i2d_rm,                               // llvm.nvvm.i2d.rm
+    nvvm_i2d_rn,                               // llvm.nvvm.i2d.rn
+    nvvm_i2d_rp,                               // llvm.nvvm.i2d.rp
+    nvvm_i2d_rz,                               // llvm.nvvm.i2d.rz
+    nvvm_i2f_rm,                               // llvm.nvvm.i2f.rm
+    nvvm_i2f_rn,                               // llvm.nvvm.i2f.rn
+    nvvm_i2f_rp,                               // llvm.nvvm.i2f.rp
+    nvvm_i2f_rz,                               // llvm.nvvm.i2f.rz
+    nvvm_isspacep_const,                       // llvm.nvvm.isspacep.const
+    nvvm_isspacep_global,                      // llvm.nvvm.isspacep.global
+    nvvm_isspacep_local,                       // llvm.nvvm.isspacep.local
+    nvvm_isspacep_shared,                      // llvm.nvvm.isspacep.shared
+    nvvm_istypep_sampler,                      // llvm.nvvm.istypep.sampler
+    nvvm_istypep_surface,                      // llvm.nvvm.istypep.surface
+    nvvm_istypep_texture,                      // llvm.nvvm.istypep.texture
+    nvvm_ldg_global_f,                         // llvm.nvvm.ldg.global.f
+    nvvm_ldg_global_i,                         // llvm.nvvm.ldg.global.i
+    nvvm_ldg_global_p,                         // llvm.nvvm.ldg.global.p
+    nvvm_ldu_global_f,                         // llvm.nvvm.ldu.global.f
+    nvvm_ldu_global_i,                         // llvm.nvvm.ldu.global.i
+    nvvm_ldu_global_p,                         // llvm.nvvm.ldu.global.p
+    nvvm_lg2_approx_d,                         // llvm.nvvm.lg2.approx.d
+    nvvm_lg2_approx_f,                         // llvm.nvvm.lg2.approx.f
+    nvvm_lg2_approx_ftz_f,                     // llvm.nvvm.lg2.approx.ftz.f
+    nvvm_ll2d_rm,                              // llvm.nvvm.ll2d.rm
+    nvvm_ll2d_rn,                              // llvm.nvvm.ll2d.rn
+    nvvm_ll2d_rp,                              // llvm.nvvm.ll2d.rp
+    nvvm_ll2d_rz,                              // llvm.nvvm.ll2d.rz
+    nvvm_ll2f_rm,                              // llvm.nvvm.ll2f.rm
+    nvvm_ll2f_rn,                              // llvm.nvvm.ll2f.rn
+    nvvm_ll2f_rp,                              // llvm.nvvm.ll2f.rp
+    nvvm_ll2f_rz,                              // llvm.nvvm.ll2f.rz
+    nvvm_lohi_i2d,                             // llvm.nvvm.lohi.i2d
+    nvvm_match_all_sync_i32p,                  // llvm.nvvm.match.all.sync.i32p
+    nvvm_match_all_sync_i64p,                  // llvm.nvvm.match.all.sync.i64p
+    nvvm_match_any_sync_i32,                   // llvm.nvvm.match.any.sync.i32
+    nvvm_match_any_sync_i64,                   // llvm.nvvm.match.any.sync.i64
+    nvvm_membar_cta,                           // llvm.nvvm.membar.cta
+    nvvm_membar_gl,                            // llvm.nvvm.membar.gl
+    nvvm_membar_sys,                           // llvm.nvvm.membar.sys
+    nvvm_move_double,                          // llvm.nvvm.move.double
+    nvvm_move_float,                           // llvm.nvvm.move.float
+    nvvm_move_i16,                             // llvm.nvvm.move.i16
+    nvvm_move_i32,                             // llvm.nvvm.move.i32
+    nvvm_move_i64,                             // llvm.nvvm.move.i64
+    nvvm_move_ptr,                             // llvm.nvvm.move.ptr
+    nvvm_mul_rm_d,                             // llvm.nvvm.mul.rm.d
+    nvvm_mul_rm_f,                             // llvm.nvvm.mul.rm.f
+    nvvm_mul_rm_ftz_f,                         // llvm.nvvm.mul.rm.ftz.f
+    nvvm_mul_rn_d,                             // llvm.nvvm.mul.rn.d
+    nvvm_mul_rn_f,                             // llvm.nvvm.mul.rn.f
+    nvvm_mul_rn_ftz_f,                         // llvm.nvvm.mul.rn.ftz.f
+    nvvm_mul_rp_d,                             // llvm.nvvm.mul.rp.d
+    nvvm_mul_rp_f,                             // llvm.nvvm.mul.rp.f
+    nvvm_mul_rp_ftz_f,                         // llvm.nvvm.mul.rp.ftz.f
+    nvvm_mul_rz_d,                             // llvm.nvvm.mul.rz.d
+    nvvm_mul_rz_f,                             // llvm.nvvm.mul.rz.f
+    nvvm_mul_rz_ftz_f,                         // llvm.nvvm.mul.rz.ftz.f
+    nvvm_mul24_i,                              // llvm.nvvm.mul24.i
+    nvvm_mul24_ui,                             // llvm.nvvm.mul24.ui
+    nvvm_mulhi_i,                              // llvm.nvvm.mulhi.i
+    nvvm_mulhi_ll,                             // llvm.nvvm.mulhi.ll
+    nvvm_mulhi_ui,                             // llvm.nvvm.mulhi.ui
+    nvvm_mulhi_ull,                            // llvm.nvvm.mulhi.ull
+    nvvm_prmt,                                 // llvm.nvvm.prmt
+    nvvm_ptr_constant_to_gen,                  // llvm.nvvm.ptr.constant.to.gen
+    nvvm_ptr_gen_to_constant,                  // llvm.nvvm.ptr.gen.to.constant
+    nvvm_ptr_gen_to_global,                    // llvm.nvvm.ptr.gen.to.global
+    nvvm_ptr_gen_to_local,                     // llvm.nvvm.ptr.gen.to.local
+    nvvm_ptr_gen_to_param,                     // llvm.nvvm.ptr.gen.to.param
+    nvvm_ptr_gen_to_shared,                    // llvm.nvvm.ptr.gen.to.shared
+    nvvm_ptr_global_to_gen,                    // llvm.nvvm.ptr.global.to.gen
+    nvvm_ptr_local_to_gen,                     // llvm.nvvm.ptr.local.to.gen
+    nvvm_ptr_shared_to_gen,                    // llvm.nvvm.ptr.shared.to.gen
+    nvvm_rcp_approx_ftz_d,                     // llvm.nvvm.rcp.approx.ftz.d
+    nvvm_rcp_rm_d,                             // llvm.nvvm.rcp.rm.d
+    nvvm_rcp_rm_f,                             // llvm.nvvm.rcp.rm.f
+    nvvm_rcp_rm_ftz_f,                         // llvm.nvvm.rcp.rm.ftz.f
+    nvvm_rcp_rn_d,                             // llvm.nvvm.rcp.rn.d
+    nvvm_rcp_rn_f,                             // llvm.nvvm.rcp.rn.f
+    nvvm_rcp_rn_ftz_f,                         // llvm.nvvm.rcp.rn.ftz.f
+    nvvm_rcp_rp_d,                             // llvm.nvvm.rcp.rp.d
+    nvvm_rcp_rp_f,                             // llvm.nvvm.rcp.rp.f
+    nvvm_rcp_rp_ftz_f,                         // llvm.nvvm.rcp.rp.ftz.f
+    nvvm_rcp_rz_d,                             // llvm.nvvm.rcp.rz.d
+    nvvm_rcp_rz_f,                             // llvm.nvvm.rcp.rz.f
+    nvvm_rcp_rz_ftz_f,                         // llvm.nvvm.rcp.rz.ftz.f
+    nvvm_read_ptx_sreg_clock,                  // llvm.nvvm.read.ptx.sreg.clock
+    nvvm_read_ptx_sreg_clock64,                // llvm.nvvm.read.ptx.sreg.clock64
+    nvvm_read_ptx_sreg_ctaid_w,                // llvm.nvvm.read.ptx.sreg.ctaid.w
+    nvvm_read_ptx_sreg_ctaid_x,                // llvm.nvvm.read.ptx.sreg.ctaid.x
+    nvvm_read_ptx_sreg_ctaid_y,                // llvm.nvvm.read.ptx.sreg.ctaid.y
+    nvvm_read_ptx_sreg_ctaid_z,                // llvm.nvvm.read.ptx.sreg.ctaid.z
+    nvvm_read_ptx_sreg_envreg0,                // llvm.nvvm.read.ptx.sreg.envreg0
+    nvvm_read_ptx_sreg_envreg1,                // llvm.nvvm.read.ptx.sreg.envreg1
+    nvvm_read_ptx_sreg_envreg10,               // llvm.nvvm.read.ptx.sreg.envreg10
+    nvvm_read_ptx_sreg_envreg11,               // llvm.nvvm.read.ptx.sreg.envreg11
+    nvvm_read_ptx_sreg_envreg12,               // llvm.nvvm.read.ptx.sreg.envreg12
+    nvvm_read_ptx_sreg_envreg13,               // llvm.nvvm.read.ptx.sreg.envreg13
+    nvvm_read_ptx_sreg_envreg14,               // llvm.nvvm.read.ptx.sreg.envreg14
+    nvvm_read_ptx_sreg_envreg15,               // llvm.nvvm.read.ptx.sreg.envreg15
+    nvvm_read_ptx_sreg_envreg16,               // llvm.nvvm.read.ptx.sreg.envreg16
+    nvvm_read_ptx_sreg_envreg17,               // llvm.nvvm.read.ptx.sreg.envreg17
+    nvvm_read_ptx_sreg_envreg18,               // llvm.nvvm.read.ptx.sreg.envreg18
+    nvvm_read_ptx_sreg_envreg19,               // llvm.nvvm.read.ptx.sreg.envreg19
+    nvvm_read_ptx_sreg_envreg2,                // llvm.nvvm.read.ptx.sreg.envreg2
+    nvvm_read_ptx_sreg_envreg20,               // llvm.nvvm.read.ptx.sreg.envreg20
+    nvvm_read_ptx_sreg_envreg21,               // llvm.nvvm.read.ptx.sreg.envreg21
+    nvvm_read_ptx_sreg_envreg22,               // llvm.nvvm.read.ptx.sreg.envreg22
+    nvvm_read_ptx_sreg_envreg23,               // llvm.nvvm.read.ptx.sreg.envreg23
+    nvvm_read_ptx_sreg_envreg24,               // llvm.nvvm.read.ptx.sreg.envreg24
+    nvvm_read_ptx_sreg_envreg25,               // llvm.nvvm.read.ptx.sreg.envreg25
+    nvvm_read_ptx_sreg_envreg26,               // llvm.nvvm.read.ptx.sreg.envreg26
+    nvvm_read_ptx_sreg_envreg27,               // llvm.nvvm.read.ptx.sreg.envreg27
+    nvvm_read_ptx_sreg_envreg28,               // llvm.nvvm.read.ptx.sreg.envreg28
+    nvvm_read_ptx_sreg_envreg29,               // llvm.nvvm.read.ptx.sreg.envreg29
+    nvvm_read_ptx_sreg_envreg3,                // llvm.nvvm.read.ptx.sreg.envreg3
+    nvvm_read_ptx_sreg_envreg30,               // llvm.nvvm.read.ptx.sreg.envreg30
+    nvvm_read_ptx_sreg_envreg31,               // llvm.nvvm.read.ptx.sreg.envreg31
+    nvvm_read_ptx_sreg_envreg4,                // llvm.nvvm.read.ptx.sreg.envreg4
+    nvvm_read_ptx_sreg_envreg5,                // llvm.nvvm.read.ptx.sreg.envreg5
+    nvvm_read_ptx_sreg_envreg6,                // llvm.nvvm.read.ptx.sreg.envreg6
+    nvvm_read_ptx_sreg_envreg7,                // llvm.nvvm.read.ptx.sreg.envreg7
+    nvvm_read_ptx_sreg_envreg8,                // llvm.nvvm.read.ptx.sreg.envreg8
+    nvvm_read_ptx_sreg_envreg9,                // llvm.nvvm.read.ptx.sreg.envreg9
+    nvvm_read_ptx_sreg_gridid,                 // llvm.nvvm.read.ptx.sreg.gridid
+    nvvm_read_ptx_sreg_laneid,                 // llvm.nvvm.read.ptx.sreg.laneid
+    nvvm_read_ptx_sreg_lanemask_eq,            // llvm.nvvm.read.ptx.sreg.lanemask.eq
+    nvvm_read_ptx_sreg_lanemask_ge,            // llvm.nvvm.read.ptx.sreg.lanemask.ge
+    nvvm_read_ptx_sreg_lanemask_gt,            // llvm.nvvm.read.ptx.sreg.lanemask.gt
+    nvvm_read_ptx_sreg_lanemask_le,            // llvm.nvvm.read.ptx.sreg.lanemask.le
+    nvvm_read_ptx_sreg_lanemask_lt,            // llvm.nvvm.read.ptx.sreg.lanemask.lt
+    nvvm_read_ptx_sreg_nctaid_w,               // llvm.nvvm.read.ptx.sreg.nctaid.w
+    nvvm_read_ptx_sreg_nctaid_x,               // llvm.nvvm.read.ptx.sreg.nctaid.x
+    nvvm_read_ptx_sreg_nctaid_y,               // llvm.nvvm.read.ptx.sreg.nctaid.y
+    nvvm_read_ptx_sreg_nctaid_z,               // llvm.nvvm.read.ptx.sreg.nctaid.z
+    nvvm_read_ptx_sreg_nsmid,                  // llvm.nvvm.read.ptx.sreg.nsmid
+    nvvm_read_ptx_sreg_ntid_w,                 // llvm.nvvm.read.ptx.sreg.ntid.w
+    nvvm_read_ptx_sreg_ntid_x,                 // llvm.nvvm.read.ptx.sreg.ntid.x
+    nvvm_read_ptx_sreg_ntid_y,                 // llvm.nvvm.read.ptx.sreg.ntid.y
+    nvvm_read_ptx_sreg_ntid_z,                 // llvm.nvvm.read.ptx.sreg.ntid.z
+    nvvm_read_ptx_sreg_nwarpid,                // llvm.nvvm.read.ptx.sreg.nwarpid
+    nvvm_read_ptx_sreg_pm0,                    // llvm.nvvm.read.ptx.sreg.pm0
+    nvvm_read_ptx_sreg_pm1,                    // llvm.nvvm.read.ptx.sreg.pm1
+    nvvm_read_ptx_sreg_pm2,                    // llvm.nvvm.read.ptx.sreg.pm2
+    nvvm_read_ptx_sreg_pm3,                    // llvm.nvvm.read.ptx.sreg.pm3
+    nvvm_read_ptx_sreg_smid,                   // llvm.nvvm.read.ptx.sreg.smid
+    nvvm_read_ptx_sreg_tid_w,                  // llvm.nvvm.read.ptx.sreg.tid.w
+    nvvm_read_ptx_sreg_tid_x,                  // llvm.nvvm.read.ptx.sreg.tid.x
+    nvvm_read_ptx_sreg_tid_y,                  // llvm.nvvm.read.ptx.sreg.tid.y
+    nvvm_read_ptx_sreg_tid_z,                  // llvm.nvvm.read.ptx.sreg.tid.z
+    nvvm_read_ptx_sreg_warpid,                 // llvm.nvvm.read.ptx.sreg.warpid
+    nvvm_read_ptx_sreg_warpsize,               // llvm.nvvm.read.ptx.sreg.warpsize
+    nvvm_reflect,                              // llvm.nvvm.reflect
+    nvvm_rotate_b32,                           // llvm.nvvm.rotate.b32
+    nvvm_rotate_b64,                           // llvm.nvvm.rotate.b64
+    nvvm_rotate_right_b64,                     // llvm.nvvm.rotate.right.b64
+    nvvm_round_d,                              // llvm.nvvm.round.d
+    nvvm_round_f,                              // llvm.nvvm.round.f
+    nvvm_round_ftz_f,                          // llvm.nvvm.round.ftz.f
+    nvvm_rsqrt_approx_d,                       // llvm.nvvm.rsqrt.approx.d
+    nvvm_rsqrt_approx_f,                       // llvm.nvvm.rsqrt.approx.f
+    nvvm_rsqrt_approx_ftz_f,                   // llvm.nvvm.rsqrt.approx.ftz.f
+    nvvm_sad_i,                                // llvm.nvvm.sad.i
+    nvvm_sad_ui,                               // llvm.nvvm.sad.ui
+    nvvm_saturate_d,                           // llvm.nvvm.saturate.d
+    nvvm_saturate_f,                           // llvm.nvvm.saturate.f
+    nvvm_saturate_ftz_f,                       // llvm.nvvm.saturate.ftz.f
+    nvvm_shfl_bfly_f32,                        // llvm.nvvm.shfl.bfly.f32
+    nvvm_shfl_bfly_i32,                        // llvm.nvvm.shfl.bfly.i32
+    nvvm_shfl_down_f32,                        // llvm.nvvm.shfl.down.f32
+    nvvm_shfl_down_i32,                        // llvm.nvvm.shfl.down.i32
+    nvvm_shfl_idx_f32,                         // llvm.nvvm.shfl.idx.f32
+    nvvm_shfl_idx_i32,                         // llvm.nvvm.shfl.idx.i32
+    nvvm_shfl_sync_bfly_f32,                   // llvm.nvvm.shfl.sync.bfly.f32
+    nvvm_shfl_sync_bfly_i32,                   // llvm.nvvm.shfl.sync.bfly.i32
+    nvvm_shfl_sync_down_f32,                   // llvm.nvvm.shfl.sync.down.f32
+    nvvm_shfl_sync_down_i32,                   // llvm.nvvm.shfl.sync.down.i32
+    nvvm_shfl_sync_idx_f32,                    // llvm.nvvm.shfl.sync.idx.f32
+    nvvm_shfl_sync_idx_i32,                    // llvm.nvvm.shfl.sync.idx.i32
+    nvvm_shfl_sync_up_f32,                     // llvm.nvvm.shfl.sync.up.f32
+    nvvm_shfl_sync_up_i32,                     // llvm.nvvm.shfl.sync.up.i32
+    nvvm_shfl_up_f32,                          // llvm.nvvm.shfl.up.f32
+    nvvm_shfl_up_i32,                          // llvm.nvvm.shfl.up.i32
+    nvvm_sin_approx_f,                         // llvm.nvvm.sin.approx.f
+    nvvm_sin_approx_ftz_f,                     // llvm.nvvm.sin.approx.ftz.f
+    nvvm_sqrt_approx_f,                        // llvm.nvvm.sqrt.approx.f
+    nvvm_sqrt_approx_ftz_f,                    // llvm.nvvm.sqrt.approx.ftz.f
+    nvvm_sqrt_f,                               // llvm.nvvm.sqrt.f
+    nvvm_sqrt_rm_d,                            // llvm.nvvm.sqrt.rm.d
+    nvvm_sqrt_rm_f,                            // llvm.nvvm.sqrt.rm.f
+    nvvm_sqrt_rm_ftz_f,                        // llvm.nvvm.sqrt.rm.ftz.f
+    nvvm_sqrt_rn_d,                            // llvm.nvvm.sqrt.rn.d
+    nvvm_sqrt_rn_f,                            // llvm.nvvm.sqrt.rn.f
+    nvvm_sqrt_rn_ftz_f,                        // llvm.nvvm.sqrt.rn.ftz.f
+    nvvm_sqrt_rp_d,                            // llvm.nvvm.sqrt.rp.d
+    nvvm_sqrt_rp_f,                            // llvm.nvvm.sqrt.rp.f
+    nvvm_sqrt_rp_ftz_f,                        // llvm.nvvm.sqrt.rp.ftz.f
+    nvvm_sqrt_rz_d,                            // llvm.nvvm.sqrt.rz.d
+    nvvm_sqrt_rz_f,                            // llvm.nvvm.sqrt.rz.f
+    nvvm_sqrt_rz_ftz_f,                        // llvm.nvvm.sqrt.rz.ftz.f
+    nvvm_suld_1d_array_i16_clamp,              // llvm.nvvm.suld.1d.array.i16.clamp
+    nvvm_suld_1d_array_i16_trap,               // llvm.nvvm.suld.1d.array.i16.trap
+    nvvm_suld_1d_array_i16_zero,               // llvm.nvvm.suld.1d.array.i16.zero
+    nvvm_suld_1d_array_i32_clamp,              // llvm.nvvm.suld.1d.array.i32.clamp
+    nvvm_suld_1d_array_i32_trap,               // llvm.nvvm.suld.1d.array.i32.trap
+    nvvm_suld_1d_array_i32_zero,               // llvm.nvvm.suld.1d.array.i32.zero
+    nvvm_suld_1d_array_i64_clamp,              // llvm.nvvm.suld.1d.array.i64.clamp
+    nvvm_suld_1d_array_i64_trap,               // llvm.nvvm.suld.1d.array.i64.trap
+    nvvm_suld_1d_array_i64_zero,               // llvm.nvvm.suld.1d.array.i64.zero
+    nvvm_suld_1d_array_i8_clamp,               // llvm.nvvm.suld.1d.array.i8.clamp
+    nvvm_suld_1d_array_i8_trap,                // llvm.nvvm.suld.1d.array.i8.trap
+    nvvm_suld_1d_array_i8_zero,                // llvm.nvvm.suld.1d.array.i8.zero
+    nvvm_suld_1d_array_v2i16_clamp,            // llvm.nvvm.suld.1d.array.v2i16.clamp
+    nvvm_suld_1d_array_v2i16_trap,             // llvm.nvvm.suld.1d.array.v2i16.trap
+    nvvm_suld_1d_array_v2i16_zero,             // llvm.nvvm.suld.1d.array.v2i16.zero
+    nvvm_suld_1d_array_v2i32_clamp,            // llvm.nvvm.suld.1d.array.v2i32.clamp
+    nvvm_suld_1d_array_v2i32_trap,             // llvm.nvvm.suld.1d.array.v2i32.trap
+    nvvm_suld_1d_array_v2i32_zero,             // llvm.nvvm.suld.1d.array.v2i32.zero
+    nvvm_suld_1d_array_v2i64_clamp,            // llvm.nvvm.suld.1d.array.v2i64.clamp
+    nvvm_suld_1d_array_v2i64_trap,             // llvm.nvvm.suld.1d.array.v2i64.trap
+    nvvm_suld_1d_array_v2i64_zero,             // llvm.nvvm.suld.1d.array.v2i64.zero
+    nvvm_suld_1d_array_v2i8_clamp,             // llvm.nvvm.suld.1d.array.v2i8.clamp
+    nvvm_suld_1d_array_v2i8_trap,              // llvm.nvvm.suld.1d.array.v2i8.trap
+    nvvm_suld_1d_array_v2i8_zero,              // llvm.nvvm.suld.1d.array.v2i8.zero
+    nvvm_suld_1d_array_v4i16_clamp,            // llvm.nvvm.suld.1d.array.v4i16.clamp
+    nvvm_suld_1d_array_v4i16_trap,             // llvm.nvvm.suld.1d.array.v4i16.trap
+    nvvm_suld_1d_array_v4i16_zero,             // llvm.nvvm.suld.1d.array.v4i16.zero
+    nvvm_suld_1d_array_v4i32_clamp,            // llvm.nvvm.suld.1d.array.v4i32.clamp
+    nvvm_suld_1d_array_v4i32_trap,             // llvm.nvvm.suld.1d.array.v4i32.trap
+    nvvm_suld_1d_array_v4i32_zero,             // llvm.nvvm.suld.1d.array.v4i32.zero
+    nvvm_suld_1d_array_v4i8_clamp,             // llvm.nvvm.suld.1d.array.v4i8.clamp
+    nvvm_suld_1d_array_v4i8_trap,              // llvm.nvvm.suld.1d.array.v4i8.trap
+    nvvm_suld_1d_array_v4i8_zero,              // llvm.nvvm.suld.1d.array.v4i8.zero
+    nvvm_suld_1d_i16_clamp,                    // llvm.nvvm.suld.1d.i16.clamp
+    nvvm_suld_1d_i16_trap,                     // llvm.nvvm.suld.1d.i16.trap
+    nvvm_suld_1d_i16_zero,                     // llvm.nvvm.suld.1d.i16.zero
+    nvvm_suld_1d_i32_clamp,                    // llvm.nvvm.suld.1d.i32.clamp
+    nvvm_suld_1d_i32_trap,                     // llvm.nvvm.suld.1d.i32.trap
+    nvvm_suld_1d_i32_zero,                     // llvm.nvvm.suld.1d.i32.zero
+    nvvm_suld_1d_i64_clamp,                    // llvm.nvvm.suld.1d.i64.clamp
+    nvvm_suld_1d_i64_trap,                     // llvm.nvvm.suld.1d.i64.trap
+    nvvm_suld_1d_i64_zero,                     // llvm.nvvm.suld.1d.i64.zero
+    nvvm_suld_1d_i8_clamp,                     // llvm.nvvm.suld.1d.i8.clamp
+    nvvm_suld_1d_i8_trap,                      // llvm.nvvm.suld.1d.i8.trap
+    nvvm_suld_1d_i8_zero,                      // llvm.nvvm.suld.1d.i8.zero
+    nvvm_suld_1d_v2i16_clamp,                  // llvm.nvvm.suld.1d.v2i16.clamp
+    nvvm_suld_1d_v2i16_trap,                   // llvm.nvvm.suld.1d.v2i16.trap
+    nvvm_suld_1d_v2i16_zero,                   // llvm.nvvm.suld.1d.v2i16.zero
+    nvvm_suld_1d_v2i32_clamp,                  // llvm.nvvm.suld.1d.v2i32.clamp
+    nvvm_suld_1d_v2i32_trap,                   // llvm.nvvm.suld.1d.v2i32.trap
+    nvvm_suld_1d_v2i32_zero,                   // llvm.nvvm.suld.1d.v2i32.zero
+    nvvm_suld_1d_v2i64_clamp,                  // llvm.nvvm.suld.1d.v2i64.clamp
+    nvvm_suld_1d_v2i64_trap,                   // llvm.nvvm.suld.1d.v2i64.trap
+    nvvm_suld_1d_v2i64_zero,                   // llvm.nvvm.suld.1d.v2i64.zero
+    nvvm_suld_1d_v2i8_clamp,                   // llvm.nvvm.suld.1d.v2i8.clamp
+    nvvm_suld_1d_v2i8_trap,                    // llvm.nvvm.suld.1d.v2i8.trap
+    nvvm_suld_1d_v2i8_zero,                    // llvm.nvvm.suld.1d.v2i8.zero
+    nvvm_suld_1d_v4i16_clamp,                  // llvm.nvvm.suld.1d.v4i16.clamp
+    nvvm_suld_1d_v4i16_trap,                   // llvm.nvvm.suld.1d.v4i16.trap
+    nvvm_suld_1d_v4i16_zero,                   // llvm.nvvm.suld.1d.v4i16.zero
+    nvvm_suld_1d_v4i32_clamp,                  // llvm.nvvm.suld.1d.v4i32.clamp
+    nvvm_suld_1d_v4i32_trap,                   // llvm.nvvm.suld.1d.v4i32.trap
+    nvvm_suld_1d_v4i32_zero,                   // llvm.nvvm.suld.1d.v4i32.zero
+    nvvm_suld_1d_v4i8_clamp,                   // llvm.nvvm.suld.1d.v4i8.clamp
+    nvvm_suld_1d_v4i8_trap,                    // llvm.nvvm.suld.1d.v4i8.trap
+    nvvm_suld_1d_v4i8_zero,                    // llvm.nvvm.suld.1d.v4i8.zero
+    nvvm_suld_2d_array_i16_clamp,              // llvm.nvvm.suld.2d.array.i16.clamp
+    nvvm_suld_2d_array_i16_trap,               // llvm.nvvm.suld.2d.array.i16.trap
+    nvvm_suld_2d_array_i16_zero,               // llvm.nvvm.suld.2d.array.i16.zero
+    nvvm_suld_2d_array_i32_clamp,              // llvm.nvvm.suld.2d.array.i32.clamp
+    nvvm_suld_2d_array_i32_trap,               // llvm.nvvm.suld.2d.array.i32.trap
+    nvvm_suld_2d_array_i32_zero,               // llvm.nvvm.suld.2d.array.i32.zero
+    nvvm_suld_2d_array_i64_clamp,              // llvm.nvvm.suld.2d.array.i64.clamp
+    nvvm_suld_2d_array_i64_trap,               // llvm.nvvm.suld.2d.array.i64.trap
+    nvvm_suld_2d_array_i64_zero,               // llvm.nvvm.suld.2d.array.i64.zero
+    nvvm_suld_2d_array_i8_clamp,               // llvm.nvvm.suld.2d.array.i8.clamp
+    nvvm_suld_2d_array_i8_trap,                // llvm.nvvm.suld.2d.array.i8.trap
+    nvvm_suld_2d_array_i8_zero,                // llvm.nvvm.suld.2d.array.i8.zero
+    nvvm_suld_2d_array_v2i16_clamp,            // llvm.nvvm.suld.2d.array.v2i16.clamp
+    nvvm_suld_2d_array_v2i16_trap,             // llvm.nvvm.suld.2d.array.v2i16.trap
+    nvvm_suld_2d_array_v2i16_zero,             // llvm.nvvm.suld.2d.array.v2i16.zero
+    nvvm_suld_2d_array_v2i32_clamp,            // llvm.nvvm.suld.2d.array.v2i32.clamp
+    nvvm_suld_2d_array_v2i32_trap,             // llvm.nvvm.suld.2d.array.v2i32.trap
+    nvvm_suld_2d_array_v2i32_zero,             // llvm.nvvm.suld.2d.array.v2i32.zero
+    nvvm_suld_2d_array_v2i64_clamp,            // llvm.nvvm.suld.2d.array.v2i64.clamp
+    nvvm_suld_2d_array_v2i64_trap,             // llvm.nvvm.suld.2d.array.v2i64.trap
+    nvvm_suld_2d_array_v2i64_zero,             // llvm.nvvm.suld.2d.array.v2i64.zero
+    nvvm_suld_2d_array_v2i8_clamp,             // llvm.nvvm.suld.2d.array.v2i8.clamp
+    nvvm_suld_2d_array_v2i8_trap,              // llvm.nvvm.suld.2d.array.v2i8.trap
+    nvvm_suld_2d_array_v2i8_zero,              // llvm.nvvm.suld.2d.array.v2i8.zero
+    nvvm_suld_2d_array_v4i16_clamp,            // llvm.nvvm.suld.2d.array.v4i16.clamp
+    nvvm_suld_2d_array_v4i16_trap,             // llvm.nvvm.suld.2d.array.v4i16.trap
+    nvvm_suld_2d_array_v4i16_zero,             // llvm.nvvm.suld.2d.array.v4i16.zero
+    nvvm_suld_2d_array_v4i32_clamp,            // llvm.nvvm.suld.2d.array.v4i32.clamp
+    nvvm_suld_2d_array_v4i32_trap,             // llvm.nvvm.suld.2d.array.v4i32.trap
+    nvvm_suld_2d_array_v4i32_zero,             // llvm.nvvm.suld.2d.array.v4i32.zero
+    nvvm_suld_2d_array_v4i8_clamp,             // llvm.nvvm.suld.2d.array.v4i8.clamp
+    nvvm_suld_2d_array_v4i8_trap,              // llvm.nvvm.suld.2d.array.v4i8.trap
+    nvvm_suld_2d_array_v4i8_zero,              // llvm.nvvm.suld.2d.array.v4i8.zero
+    nvvm_suld_2d_i16_clamp,                    // llvm.nvvm.suld.2d.i16.clamp
+    nvvm_suld_2d_i16_trap,                     // llvm.nvvm.suld.2d.i16.trap
+    nvvm_suld_2d_i16_zero,                     // llvm.nvvm.suld.2d.i16.zero
+    nvvm_suld_2d_i32_clamp,                    // llvm.nvvm.suld.2d.i32.clamp
+    nvvm_suld_2d_i32_trap,                     // llvm.nvvm.suld.2d.i32.trap
+    nvvm_suld_2d_i32_zero,                     // llvm.nvvm.suld.2d.i32.zero
+    nvvm_suld_2d_i64_clamp,                    // llvm.nvvm.suld.2d.i64.clamp
+    nvvm_suld_2d_i64_trap,                     // llvm.nvvm.suld.2d.i64.trap
+    nvvm_suld_2d_i64_zero,                     // llvm.nvvm.suld.2d.i64.zero
+    nvvm_suld_2d_i8_clamp,                     // llvm.nvvm.suld.2d.i8.clamp
+    nvvm_suld_2d_i8_trap,                      // llvm.nvvm.suld.2d.i8.trap
+    nvvm_suld_2d_i8_zero,                      // llvm.nvvm.suld.2d.i8.zero
+    nvvm_suld_2d_v2i16_clamp,                  // llvm.nvvm.suld.2d.v2i16.clamp
+    nvvm_suld_2d_v2i16_trap,                   // llvm.nvvm.suld.2d.v2i16.trap
+    nvvm_suld_2d_v2i16_zero,                   // llvm.nvvm.suld.2d.v2i16.zero
+    nvvm_suld_2d_v2i32_clamp,                  // llvm.nvvm.suld.2d.v2i32.clamp
+    nvvm_suld_2d_v2i32_trap,                   // llvm.nvvm.suld.2d.v2i32.trap
+    nvvm_suld_2d_v2i32_zero,                   // llvm.nvvm.suld.2d.v2i32.zero
+    nvvm_suld_2d_v2i64_clamp,                  // llvm.nvvm.suld.2d.v2i64.clamp
+    nvvm_suld_2d_v2i64_trap,                   // llvm.nvvm.suld.2d.v2i64.trap
+    nvvm_suld_2d_v2i64_zero,                   // llvm.nvvm.suld.2d.v2i64.zero
+    nvvm_suld_2d_v2i8_clamp,                   // llvm.nvvm.suld.2d.v2i8.clamp
+    nvvm_suld_2d_v2i8_trap,                    // llvm.nvvm.suld.2d.v2i8.trap
+    nvvm_suld_2d_v2i8_zero,                    // llvm.nvvm.suld.2d.v2i8.zero
+    nvvm_suld_2d_v4i16_clamp,                  // llvm.nvvm.suld.2d.v4i16.clamp
+    nvvm_suld_2d_v4i16_trap,                   // llvm.nvvm.suld.2d.v4i16.trap
+    nvvm_suld_2d_v4i16_zero,                   // llvm.nvvm.suld.2d.v4i16.zero
+    nvvm_suld_2d_v4i32_clamp,                  // llvm.nvvm.suld.2d.v4i32.clamp
+    nvvm_suld_2d_v4i32_trap,                   // llvm.nvvm.suld.2d.v4i32.trap
+    nvvm_suld_2d_v4i32_zero,                   // llvm.nvvm.suld.2d.v4i32.zero
+    nvvm_suld_2d_v4i8_clamp,                   // llvm.nvvm.suld.2d.v4i8.clamp
+    nvvm_suld_2d_v4i8_trap,                    // llvm.nvvm.suld.2d.v4i8.trap
+    nvvm_suld_2d_v4i8_zero,                    // llvm.nvvm.suld.2d.v4i8.zero
+    nvvm_suld_3d_i16_clamp,                    // llvm.nvvm.suld.3d.i16.clamp
+    nvvm_suld_3d_i16_trap,                     // llvm.nvvm.suld.3d.i16.trap
+    nvvm_suld_3d_i16_zero,                     // llvm.nvvm.suld.3d.i16.zero
+    nvvm_suld_3d_i32_clamp,                    // llvm.nvvm.suld.3d.i32.clamp
+    nvvm_suld_3d_i32_trap,                     // llvm.nvvm.suld.3d.i32.trap
+    nvvm_suld_3d_i32_zero,                     // llvm.nvvm.suld.3d.i32.zero
+    nvvm_suld_3d_i64_clamp,                    // llvm.nvvm.suld.3d.i64.clamp
+    nvvm_suld_3d_i64_trap,                     // llvm.nvvm.suld.3d.i64.trap
+    nvvm_suld_3d_i64_zero,                     // llvm.nvvm.suld.3d.i64.zero
+    nvvm_suld_3d_i8_clamp,                     // llvm.nvvm.suld.3d.i8.clamp
+    nvvm_suld_3d_i8_trap,                      // llvm.nvvm.suld.3d.i8.trap
+    nvvm_suld_3d_i8_zero,                      // llvm.nvvm.suld.3d.i8.zero
+    nvvm_suld_3d_v2i16_clamp,                  // llvm.nvvm.suld.3d.v2i16.clamp
+    nvvm_suld_3d_v2i16_trap,                   // llvm.nvvm.suld.3d.v2i16.trap
+    nvvm_suld_3d_v2i16_zero,                   // llvm.nvvm.suld.3d.v2i16.zero
+    nvvm_suld_3d_v2i32_clamp,                  // llvm.nvvm.suld.3d.v2i32.clamp
+    nvvm_suld_3d_v2i32_trap,                   // llvm.nvvm.suld.3d.v2i32.trap
+    nvvm_suld_3d_v2i32_zero,                   // llvm.nvvm.suld.3d.v2i32.zero
+    nvvm_suld_3d_v2i64_clamp,                  // llvm.nvvm.suld.3d.v2i64.clamp
+    nvvm_suld_3d_v2i64_trap,                   // llvm.nvvm.suld.3d.v2i64.trap
+    nvvm_suld_3d_v2i64_zero,                   // llvm.nvvm.suld.3d.v2i64.zero
+    nvvm_suld_3d_v2i8_clamp,                   // llvm.nvvm.suld.3d.v2i8.clamp
+    nvvm_suld_3d_v2i8_trap,                    // llvm.nvvm.suld.3d.v2i8.trap
+    nvvm_suld_3d_v2i8_zero,                    // llvm.nvvm.suld.3d.v2i8.zero
+    nvvm_suld_3d_v4i16_clamp,                  // llvm.nvvm.suld.3d.v4i16.clamp
+    nvvm_suld_3d_v4i16_trap,                   // llvm.nvvm.suld.3d.v4i16.trap
+    nvvm_suld_3d_v4i16_zero,                   // llvm.nvvm.suld.3d.v4i16.zero
+    nvvm_suld_3d_v4i32_clamp,                  // llvm.nvvm.suld.3d.v4i32.clamp
+    nvvm_suld_3d_v4i32_trap,                   // llvm.nvvm.suld.3d.v4i32.trap
+    nvvm_suld_3d_v4i32_zero,                   // llvm.nvvm.suld.3d.v4i32.zero
+    nvvm_suld_3d_v4i8_clamp,                   // llvm.nvvm.suld.3d.v4i8.clamp
+    nvvm_suld_3d_v4i8_trap,                    // llvm.nvvm.suld.3d.v4i8.trap
+    nvvm_suld_3d_v4i8_zero,                    // llvm.nvvm.suld.3d.v4i8.zero
+    nvvm_suq_array_size,                       // llvm.nvvm.suq.array.size
+    nvvm_suq_channel_data_type,                // llvm.nvvm.suq.channel.data.type
+    nvvm_suq_channel_order,                    // llvm.nvvm.suq.channel.order
+    nvvm_suq_depth,                            // llvm.nvvm.suq.depth
+    nvvm_suq_height,                           // llvm.nvvm.suq.height
+    nvvm_suq_width,                            // llvm.nvvm.suq.width
+    nvvm_sust_b_1d_array_i16_clamp,            // llvm.nvvm.sust.b.1d.array.i16.clamp
+    nvvm_sust_b_1d_array_i16_trap,             // llvm.nvvm.sust.b.1d.array.i16.trap
+    nvvm_sust_b_1d_array_i16_zero,             // llvm.nvvm.sust.b.1d.array.i16.zero
+    nvvm_sust_b_1d_array_i32_clamp,            // llvm.nvvm.sust.b.1d.array.i32.clamp
+    nvvm_sust_b_1d_array_i32_trap,             // llvm.nvvm.sust.b.1d.array.i32.trap
+    nvvm_sust_b_1d_array_i32_zero,             // llvm.nvvm.sust.b.1d.array.i32.zero
+    nvvm_sust_b_1d_array_i64_clamp,            // llvm.nvvm.sust.b.1d.array.i64.clamp
+    nvvm_sust_b_1d_array_i64_trap,             // llvm.nvvm.sust.b.1d.array.i64.trap
+    nvvm_sust_b_1d_array_i64_zero,             // llvm.nvvm.sust.b.1d.array.i64.zero
+    nvvm_sust_b_1d_array_i8_clamp,             // llvm.nvvm.sust.b.1d.array.i8.clamp
+    nvvm_sust_b_1d_array_i8_trap,              // llvm.nvvm.sust.b.1d.array.i8.trap
+    nvvm_sust_b_1d_array_i8_zero,              // llvm.nvvm.sust.b.1d.array.i8.zero
+    nvvm_sust_b_1d_array_v2i16_clamp,          // llvm.nvvm.sust.b.1d.array.v2i16.clamp
+    nvvm_sust_b_1d_array_v2i16_trap,           // llvm.nvvm.sust.b.1d.array.v2i16.trap
+    nvvm_sust_b_1d_array_v2i16_zero,           // llvm.nvvm.sust.b.1d.array.v2i16.zero
+    nvvm_sust_b_1d_array_v2i32_clamp,          // llvm.nvvm.sust.b.1d.array.v2i32.clamp
+    nvvm_sust_b_1d_array_v2i32_trap,           // llvm.nvvm.sust.b.1d.array.v2i32.trap
+    nvvm_sust_b_1d_array_v2i32_zero,           // llvm.nvvm.sust.b.1d.array.v2i32.zero
+    nvvm_sust_b_1d_array_v2i64_clamp,          // llvm.nvvm.sust.b.1d.array.v2i64.clamp
+    nvvm_sust_b_1d_array_v2i64_trap,           // llvm.nvvm.sust.b.1d.array.v2i64.trap
+    nvvm_sust_b_1d_array_v2i64_zero,           // llvm.nvvm.sust.b.1d.array.v2i64.zero
+    nvvm_sust_b_1d_array_v2i8_clamp,           // llvm.nvvm.sust.b.1d.array.v2i8.clamp
+    nvvm_sust_b_1d_array_v2i8_trap,            // llvm.nvvm.sust.b.1d.array.v2i8.trap
+    nvvm_sust_b_1d_array_v2i8_zero,            // llvm.nvvm.sust.b.1d.array.v2i8.zero
+    nvvm_sust_b_1d_array_v4i16_clamp,          // llvm.nvvm.sust.b.1d.array.v4i16.clamp
+    nvvm_sust_b_1d_array_v4i16_trap,           // llvm.nvvm.sust.b.1d.array.v4i16.trap
+    nvvm_sust_b_1d_array_v4i16_zero,           // llvm.nvvm.sust.b.1d.array.v4i16.zero
+    nvvm_sust_b_1d_array_v4i32_clamp,          // llvm.nvvm.sust.b.1d.array.v4i32.clamp
+    nvvm_sust_b_1d_array_v4i32_trap,           // llvm.nvvm.sust.b.1d.array.v4i32.trap
+    nvvm_sust_b_1d_array_v4i32_zero,           // llvm.nvvm.sust.b.1d.array.v4i32.zero
+    nvvm_sust_b_1d_array_v4i8_clamp,           // llvm.nvvm.sust.b.1d.array.v4i8.clamp
+    nvvm_sust_b_1d_array_v4i8_trap,            // llvm.nvvm.sust.b.1d.array.v4i8.trap
+    nvvm_sust_b_1d_array_v4i8_zero,            // llvm.nvvm.sust.b.1d.array.v4i8.zero
+    nvvm_sust_b_1d_i16_clamp,                  // llvm.nvvm.sust.b.1d.i16.clamp
+    nvvm_sust_b_1d_i16_trap,                   // llvm.nvvm.sust.b.1d.i16.trap
+    nvvm_sust_b_1d_i16_zero,                   // llvm.nvvm.sust.b.1d.i16.zero
+    nvvm_sust_b_1d_i32_clamp,                  // llvm.nvvm.sust.b.1d.i32.clamp
+    nvvm_sust_b_1d_i32_trap,                   // llvm.nvvm.sust.b.1d.i32.trap
+    nvvm_sust_b_1d_i32_zero,                   // llvm.nvvm.sust.b.1d.i32.zero
+    nvvm_sust_b_1d_i64_clamp,                  // llvm.nvvm.sust.b.1d.i64.clamp
+    nvvm_sust_b_1d_i64_trap,                   // llvm.nvvm.sust.b.1d.i64.trap
+    nvvm_sust_b_1d_i64_zero,                   // llvm.nvvm.sust.b.1d.i64.zero
+    nvvm_sust_b_1d_i8_clamp,                   // llvm.nvvm.sust.b.1d.i8.clamp
+    nvvm_sust_b_1d_i8_trap,                    // llvm.nvvm.sust.b.1d.i8.trap
+    nvvm_sust_b_1d_i8_zero,                    // llvm.nvvm.sust.b.1d.i8.zero
+    nvvm_sust_b_1d_v2i16_clamp,                // llvm.nvvm.sust.b.1d.v2i16.clamp
+    nvvm_sust_b_1d_v2i16_trap,                 // llvm.nvvm.sust.b.1d.v2i16.trap
+    nvvm_sust_b_1d_v2i16_zero,                 // llvm.nvvm.sust.b.1d.v2i16.zero
+    nvvm_sust_b_1d_v2i32_clamp,                // llvm.nvvm.sust.b.1d.v2i32.clamp
+    nvvm_sust_b_1d_v2i32_trap,                 // llvm.nvvm.sust.b.1d.v2i32.trap
+    nvvm_sust_b_1d_v2i32_zero,                 // llvm.nvvm.sust.b.1d.v2i32.zero
+    nvvm_sust_b_1d_v2i64_clamp,                // llvm.nvvm.sust.b.1d.v2i64.clamp
+    nvvm_sust_b_1d_v2i64_trap,                 // llvm.nvvm.sust.b.1d.v2i64.trap
+    nvvm_sust_b_1d_v2i64_zero,                 // llvm.nvvm.sust.b.1d.v2i64.zero
+    nvvm_sust_b_1d_v2i8_clamp,                 // llvm.nvvm.sust.b.1d.v2i8.clamp
+    nvvm_sust_b_1d_v2i8_trap,                  // llvm.nvvm.sust.b.1d.v2i8.trap
+    nvvm_sust_b_1d_v2i8_zero,                  // llvm.nvvm.sust.b.1d.v2i8.zero
+    nvvm_sust_b_1d_v4i16_clamp,                // llvm.nvvm.sust.b.1d.v4i16.clamp
+    nvvm_sust_b_1d_v4i16_trap,                 // llvm.nvvm.sust.b.1d.v4i16.trap
+    nvvm_sust_b_1d_v4i16_zero,                 // llvm.nvvm.sust.b.1d.v4i16.zero
+    nvvm_sust_b_1d_v4i32_clamp,                // llvm.nvvm.sust.b.1d.v4i32.clamp
+    nvvm_sust_b_1d_v4i32_trap,                 // llvm.nvvm.sust.b.1d.v4i32.trap
+    nvvm_sust_b_1d_v4i32_zero,                 // llvm.nvvm.sust.b.1d.v4i32.zero
+    nvvm_sust_b_1d_v4i8_clamp,                 // llvm.nvvm.sust.b.1d.v4i8.clamp
+    nvvm_sust_b_1d_v4i8_trap,                  // llvm.nvvm.sust.b.1d.v4i8.trap
+    nvvm_sust_b_1d_v4i8_zero,                  // llvm.nvvm.sust.b.1d.v4i8.zero
+    nvvm_sust_b_2d_array_i16_clamp,            // llvm.nvvm.sust.b.2d.array.i16.clamp
+    nvvm_sust_b_2d_array_i16_trap,             // llvm.nvvm.sust.b.2d.array.i16.trap
+    nvvm_sust_b_2d_array_i16_zero,             // llvm.nvvm.sust.b.2d.array.i16.zero
+    nvvm_sust_b_2d_array_i32_clamp,            // llvm.nvvm.sust.b.2d.array.i32.clamp
+    nvvm_sust_b_2d_array_i32_trap,             // llvm.nvvm.sust.b.2d.array.i32.trap
+    nvvm_sust_b_2d_array_i32_zero,             // llvm.nvvm.sust.b.2d.array.i32.zero
+    nvvm_sust_b_2d_array_i64_clamp,            // llvm.nvvm.sust.b.2d.array.i64.clamp
+    nvvm_sust_b_2d_array_i64_trap,             // llvm.nvvm.sust.b.2d.array.i64.trap
+    nvvm_sust_b_2d_array_i64_zero,             // llvm.nvvm.sust.b.2d.array.i64.zero
+    nvvm_sust_b_2d_array_i8_clamp,             // llvm.nvvm.sust.b.2d.array.i8.clamp
+    nvvm_sust_b_2d_array_i8_trap,              // llvm.nvvm.sust.b.2d.array.i8.trap
+    nvvm_sust_b_2d_array_i8_zero,              // llvm.nvvm.sust.b.2d.array.i8.zero
+    nvvm_sust_b_2d_array_v2i16_clamp,          // llvm.nvvm.sust.b.2d.array.v2i16.clamp
+    nvvm_sust_b_2d_array_v2i16_trap,           // llvm.nvvm.sust.b.2d.array.v2i16.trap
+    nvvm_sust_b_2d_array_v2i16_zero,           // llvm.nvvm.sust.b.2d.array.v2i16.zero
+    nvvm_sust_b_2d_array_v2i32_clamp,          // llvm.nvvm.sust.b.2d.array.v2i32.clamp
+    nvvm_sust_b_2d_array_v2i32_trap,           // llvm.nvvm.sust.b.2d.array.v2i32.trap
+    nvvm_sust_b_2d_array_v2i32_zero,           // llvm.nvvm.sust.b.2d.array.v2i32.zero
+    nvvm_sust_b_2d_array_v2i64_clamp,          // llvm.nvvm.sust.b.2d.array.v2i64.clamp
+    nvvm_sust_b_2d_array_v2i64_trap,           // llvm.nvvm.sust.b.2d.array.v2i64.trap
+    nvvm_sust_b_2d_array_v2i64_zero,           // llvm.nvvm.sust.b.2d.array.v2i64.zero
+    nvvm_sust_b_2d_array_v2i8_clamp,           // llvm.nvvm.sust.b.2d.array.v2i8.clamp
+    nvvm_sust_b_2d_array_v2i8_trap,            // llvm.nvvm.sust.b.2d.array.v2i8.trap
+    nvvm_sust_b_2d_array_v2i8_zero,            // llvm.nvvm.sust.b.2d.array.v2i8.zero
+    nvvm_sust_b_2d_array_v4i16_clamp,          // llvm.nvvm.sust.b.2d.array.v4i16.clamp
+    nvvm_sust_b_2d_array_v4i16_trap,           // llvm.nvvm.sust.b.2d.array.v4i16.trap
+    nvvm_sust_b_2d_array_v4i16_zero,           // llvm.nvvm.sust.b.2d.array.v4i16.zero
+    nvvm_sust_b_2d_array_v4i32_clamp,          // llvm.nvvm.sust.b.2d.array.v4i32.clamp
+    nvvm_sust_b_2d_array_v4i32_trap,           // llvm.nvvm.sust.b.2d.array.v4i32.trap
+    nvvm_sust_b_2d_array_v4i32_zero,           // llvm.nvvm.sust.b.2d.array.v4i32.zero
+    nvvm_sust_b_2d_array_v4i8_clamp,           // llvm.nvvm.sust.b.2d.array.v4i8.clamp
+    nvvm_sust_b_2d_array_v4i8_trap,            // llvm.nvvm.sust.b.2d.array.v4i8.trap
+    nvvm_sust_b_2d_array_v4i8_zero,            // llvm.nvvm.sust.b.2d.array.v4i8.zero
+    nvvm_sust_b_2d_i16_clamp,                  // llvm.nvvm.sust.b.2d.i16.clamp
+    nvvm_sust_b_2d_i16_trap,                   // llvm.nvvm.sust.b.2d.i16.trap
+    nvvm_sust_b_2d_i16_zero,                   // llvm.nvvm.sust.b.2d.i16.zero
+    nvvm_sust_b_2d_i32_clamp,                  // llvm.nvvm.sust.b.2d.i32.clamp
+    nvvm_sust_b_2d_i32_trap,                   // llvm.nvvm.sust.b.2d.i32.trap
+    nvvm_sust_b_2d_i32_zero,                   // llvm.nvvm.sust.b.2d.i32.zero
+    nvvm_sust_b_2d_i64_clamp,                  // llvm.nvvm.sust.b.2d.i64.clamp
+    nvvm_sust_b_2d_i64_trap,                   // llvm.nvvm.sust.b.2d.i64.trap
+    nvvm_sust_b_2d_i64_zero,                   // llvm.nvvm.sust.b.2d.i64.zero
+    nvvm_sust_b_2d_i8_clamp,                   // llvm.nvvm.sust.b.2d.i8.clamp
+    nvvm_sust_b_2d_i8_trap,                    // llvm.nvvm.sust.b.2d.i8.trap
+    nvvm_sust_b_2d_i8_zero,                    // llvm.nvvm.sust.b.2d.i8.zero
+    nvvm_sust_b_2d_v2i16_clamp,                // llvm.nvvm.sust.b.2d.v2i16.clamp
+    nvvm_sust_b_2d_v2i16_trap,                 // llvm.nvvm.sust.b.2d.v2i16.trap
+    nvvm_sust_b_2d_v2i16_zero,                 // llvm.nvvm.sust.b.2d.v2i16.zero
+    nvvm_sust_b_2d_v2i32_clamp,                // llvm.nvvm.sust.b.2d.v2i32.clamp
+    nvvm_sust_b_2d_v2i32_trap,                 // llvm.nvvm.sust.b.2d.v2i32.trap
+    nvvm_sust_b_2d_v2i32_zero,                 // llvm.nvvm.sust.b.2d.v2i32.zero
+    nvvm_sust_b_2d_v2i64_clamp,                // llvm.nvvm.sust.b.2d.v2i64.clamp
+    nvvm_sust_b_2d_v2i64_trap,                 // llvm.nvvm.sust.b.2d.v2i64.trap
+    nvvm_sust_b_2d_v2i64_zero,                 // llvm.nvvm.sust.b.2d.v2i64.zero
+    nvvm_sust_b_2d_v2i8_clamp,                 // llvm.nvvm.sust.b.2d.v2i8.clamp
+    nvvm_sust_b_2d_v2i8_trap,                  // llvm.nvvm.sust.b.2d.v2i8.trap
+    nvvm_sust_b_2d_v2i8_zero,                  // llvm.nvvm.sust.b.2d.v2i8.zero
+    nvvm_sust_b_2d_v4i16_clamp,                // llvm.nvvm.sust.b.2d.v4i16.clamp
+    nvvm_sust_b_2d_v4i16_trap,                 // llvm.nvvm.sust.b.2d.v4i16.trap
+    nvvm_sust_b_2d_v4i16_zero,                 // llvm.nvvm.sust.b.2d.v4i16.zero
+    nvvm_sust_b_2d_v4i32_clamp,                // llvm.nvvm.sust.b.2d.v4i32.clamp
+    nvvm_sust_b_2d_v4i32_trap,                 // llvm.nvvm.sust.b.2d.v4i32.trap
+    nvvm_sust_b_2d_v4i32_zero,                 // llvm.nvvm.sust.b.2d.v4i32.zero
+    nvvm_sust_b_2d_v4i8_clamp,                 // llvm.nvvm.sust.b.2d.v4i8.clamp
+    nvvm_sust_b_2d_v4i8_trap,                  // llvm.nvvm.sust.b.2d.v4i8.trap
+    nvvm_sust_b_2d_v4i8_zero,                  // llvm.nvvm.sust.b.2d.v4i8.zero
+    nvvm_sust_b_3d_i16_clamp,                  // llvm.nvvm.sust.b.3d.i16.clamp
+    nvvm_sust_b_3d_i16_trap,                   // llvm.nvvm.sust.b.3d.i16.trap
+    nvvm_sust_b_3d_i16_zero,                   // llvm.nvvm.sust.b.3d.i16.zero
+    nvvm_sust_b_3d_i32_clamp,                  // llvm.nvvm.sust.b.3d.i32.clamp
+    nvvm_sust_b_3d_i32_trap,                   // llvm.nvvm.sust.b.3d.i32.trap
+    nvvm_sust_b_3d_i32_zero,                   // llvm.nvvm.sust.b.3d.i32.zero
+    nvvm_sust_b_3d_i64_clamp,                  // llvm.nvvm.sust.b.3d.i64.clamp
+    nvvm_sust_b_3d_i64_trap,                   // llvm.nvvm.sust.b.3d.i64.trap
+    nvvm_sust_b_3d_i64_zero,                   // llvm.nvvm.sust.b.3d.i64.zero
+    nvvm_sust_b_3d_i8_clamp,                   // llvm.nvvm.sust.b.3d.i8.clamp
+    nvvm_sust_b_3d_i8_trap,                    // llvm.nvvm.sust.b.3d.i8.trap
+    nvvm_sust_b_3d_i8_zero,                    // llvm.nvvm.sust.b.3d.i8.zero
+    nvvm_sust_b_3d_v2i16_clamp,                // llvm.nvvm.sust.b.3d.v2i16.clamp
+    nvvm_sust_b_3d_v2i16_trap,                 // llvm.nvvm.sust.b.3d.v2i16.trap
+    nvvm_sust_b_3d_v2i16_zero,                 // llvm.nvvm.sust.b.3d.v2i16.zero
+    nvvm_sust_b_3d_v2i32_clamp,                // llvm.nvvm.sust.b.3d.v2i32.clamp
+    nvvm_sust_b_3d_v2i32_trap,                 // llvm.nvvm.sust.b.3d.v2i32.trap
+    nvvm_sust_b_3d_v2i32_zero,                 // llvm.nvvm.sust.b.3d.v2i32.zero
+    nvvm_sust_b_3d_v2i64_clamp,                // llvm.nvvm.sust.b.3d.v2i64.clamp
+    nvvm_sust_b_3d_v2i64_trap,                 // llvm.nvvm.sust.b.3d.v2i64.trap
+    nvvm_sust_b_3d_v2i64_zero,                 // llvm.nvvm.sust.b.3d.v2i64.zero
+    nvvm_sust_b_3d_v2i8_clamp,                 // llvm.nvvm.sust.b.3d.v2i8.clamp
+    nvvm_sust_b_3d_v2i8_trap,                  // llvm.nvvm.sust.b.3d.v2i8.trap
+    nvvm_sust_b_3d_v2i8_zero,                  // llvm.nvvm.sust.b.3d.v2i8.zero
+    nvvm_sust_b_3d_v4i16_clamp,                // llvm.nvvm.sust.b.3d.v4i16.clamp
+    nvvm_sust_b_3d_v4i16_trap,                 // llvm.nvvm.sust.b.3d.v4i16.trap
+    nvvm_sust_b_3d_v4i16_zero,                 // llvm.nvvm.sust.b.3d.v4i16.zero
+    nvvm_sust_b_3d_v4i32_clamp,                // llvm.nvvm.sust.b.3d.v4i32.clamp
+    nvvm_sust_b_3d_v4i32_trap,                 // llvm.nvvm.sust.b.3d.v4i32.trap
+    nvvm_sust_b_3d_v4i32_zero,                 // llvm.nvvm.sust.b.3d.v4i32.zero
+    nvvm_sust_b_3d_v4i8_clamp,                 // llvm.nvvm.sust.b.3d.v4i8.clamp
+    nvvm_sust_b_3d_v4i8_trap,                  // llvm.nvvm.sust.b.3d.v4i8.trap
+    nvvm_sust_b_3d_v4i8_zero,                  // llvm.nvvm.sust.b.3d.v4i8.zero
+    nvvm_sust_p_1d_array_i16_trap,             // llvm.nvvm.sust.p.1d.array.i16.trap
+    nvvm_sust_p_1d_array_i32_trap,             // llvm.nvvm.sust.p.1d.array.i32.trap
+    nvvm_sust_p_1d_array_i8_trap,              // llvm.nvvm.sust.p.1d.array.i8.trap
+    nvvm_sust_p_1d_array_v2i16_trap,           // llvm.nvvm.sust.p.1d.array.v2i16.trap
+    nvvm_sust_p_1d_array_v2i32_trap,           // llvm.nvvm.sust.p.1d.array.v2i32.trap
+    nvvm_sust_p_1d_array_v2i8_trap,            // llvm.nvvm.sust.p.1d.array.v2i8.trap
+    nvvm_sust_p_1d_array_v4i16_trap,           // llvm.nvvm.sust.p.1d.array.v4i16.trap
+    nvvm_sust_p_1d_array_v4i32_trap,           // llvm.nvvm.sust.p.1d.array.v4i32.trap
+    nvvm_sust_p_1d_array_v4i8_trap,            // llvm.nvvm.sust.p.1d.array.v4i8.trap
+    nvvm_sust_p_1d_i16_trap,                   // llvm.nvvm.sust.p.1d.i16.trap
+    nvvm_sust_p_1d_i32_trap,                   // llvm.nvvm.sust.p.1d.i32.trap
+    nvvm_sust_p_1d_i8_trap,                    // llvm.nvvm.sust.p.1d.i8.trap
+    nvvm_sust_p_1d_v2i16_trap,                 // llvm.nvvm.sust.p.1d.v2i16.trap
+    nvvm_sust_p_1d_v2i32_trap,                 // llvm.nvvm.sust.p.1d.v2i32.trap
+    nvvm_sust_p_1d_v2i8_trap,                  // llvm.nvvm.sust.p.1d.v2i8.trap
+    nvvm_sust_p_1d_v4i16_trap,                 // llvm.nvvm.sust.p.1d.v4i16.trap
+    nvvm_sust_p_1d_v4i32_trap,                 // llvm.nvvm.sust.p.1d.v4i32.trap
+    nvvm_sust_p_1d_v4i8_trap,                  // llvm.nvvm.sust.p.1d.v4i8.trap
+    nvvm_sust_p_2d_array_i16_trap,             // llvm.nvvm.sust.p.2d.array.i16.trap
+    nvvm_sust_p_2d_array_i32_trap,             // llvm.nvvm.sust.p.2d.array.i32.trap
+    nvvm_sust_p_2d_array_i8_trap,              // llvm.nvvm.sust.p.2d.array.i8.trap
+    nvvm_sust_p_2d_array_v2i16_trap,           // llvm.nvvm.sust.p.2d.array.v2i16.trap
+    nvvm_sust_p_2d_array_v2i32_trap,           // llvm.nvvm.sust.p.2d.array.v2i32.trap
+    nvvm_sust_p_2d_array_v2i8_trap,            // llvm.nvvm.sust.p.2d.array.v2i8.trap
+    nvvm_sust_p_2d_array_v4i16_trap,           // llvm.nvvm.sust.p.2d.array.v4i16.trap
+    nvvm_sust_p_2d_array_v4i32_trap,           // llvm.nvvm.sust.p.2d.array.v4i32.trap
+    nvvm_sust_p_2d_array_v4i8_trap,            // llvm.nvvm.sust.p.2d.array.v4i8.trap
+    nvvm_sust_p_2d_i16_trap,                   // llvm.nvvm.sust.p.2d.i16.trap
+    nvvm_sust_p_2d_i32_trap,                   // llvm.nvvm.sust.p.2d.i32.trap
+    nvvm_sust_p_2d_i8_trap,                    // llvm.nvvm.sust.p.2d.i8.trap
+    nvvm_sust_p_2d_v2i16_trap,                 // llvm.nvvm.sust.p.2d.v2i16.trap
+    nvvm_sust_p_2d_v2i32_trap,                 // llvm.nvvm.sust.p.2d.v2i32.trap
+    nvvm_sust_p_2d_v2i8_trap,                  // llvm.nvvm.sust.p.2d.v2i8.trap
+    nvvm_sust_p_2d_v4i16_trap,                 // llvm.nvvm.sust.p.2d.v4i16.trap
+    nvvm_sust_p_2d_v4i32_trap,                 // llvm.nvvm.sust.p.2d.v4i32.trap
+    nvvm_sust_p_2d_v4i8_trap,                  // llvm.nvvm.sust.p.2d.v4i8.trap
+    nvvm_sust_p_3d_i16_trap,                   // llvm.nvvm.sust.p.3d.i16.trap
+    nvvm_sust_p_3d_i32_trap,                   // llvm.nvvm.sust.p.3d.i32.trap
+    nvvm_sust_p_3d_i8_trap,                    // llvm.nvvm.sust.p.3d.i8.trap
+    nvvm_sust_p_3d_v2i16_trap,                 // llvm.nvvm.sust.p.3d.v2i16.trap
+    nvvm_sust_p_3d_v2i32_trap,                 // llvm.nvvm.sust.p.3d.v2i32.trap
+    nvvm_sust_p_3d_v2i8_trap,                  // llvm.nvvm.sust.p.3d.v2i8.trap
+    nvvm_sust_p_3d_v4i16_trap,                 // llvm.nvvm.sust.p.3d.v4i16.trap
+    nvvm_sust_p_3d_v4i32_trap,                 // llvm.nvvm.sust.p.3d.v4i32.trap
+    nvvm_sust_p_3d_v4i8_trap,                  // llvm.nvvm.sust.p.3d.v4i8.trap
+    nvvm_swap_lo_hi_b64,                       // llvm.nvvm.swap.lo.hi.b64
+    nvvm_tex_1d_array_grad_v4f32_f32,          // llvm.nvvm.tex.1d.array.grad.v4f32.f32
+    nvvm_tex_1d_array_grad_v4s32_f32,          // llvm.nvvm.tex.1d.array.grad.v4s32.f32
+    nvvm_tex_1d_array_grad_v4u32_f32,          // llvm.nvvm.tex.1d.array.grad.v4u32.f32
+    nvvm_tex_1d_array_level_v4f32_f32,         // llvm.nvvm.tex.1d.array.level.v4f32.f32
+    nvvm_tex_1d_array_level_v4s32_f32,         // llvm.nvvm.tex.1d.array.level.v4s32.f32
+    nvvm_tex_1d_array_level_v4u32_f32,         // llvm.nvvm.tex.1d.array.level.v4u32.f32
+    nvvm_tex_1d_array_v4f32_f32,               // llvm.nvvm.tex.1d.array.v4f32.f32
+    nvvm_tex_1d_array_v4f32_s32,               // llvm.nvvm.tex.1d.array.v4f32.s32
+    nvvm_tex_1d_array_v4s32_f32,               // llvm.nvvm.tex.1d.array.v4s32.f32
+    nvvm_tex_1d_array_v4s32_s32,               // llvm.nvvm.tex.1d.array.v4s32.s32
+    nvvm_tex_1d_array_v4u32_f32,               // llvm.nvvm.tex.1d.array.v4u32.f32
+    nvvm_tex_1d_array_v4u32_s32,               // llvm.nvvm.tex.1d.array.v4u32.s32
+    nvvm_tex_1d_grad_v4f32_f32,                // llvm.nvvm.tex.1d.grad.v4f32.f32
+    nvvm_tex_1d_grad_v4s32_f32,                // llvm.nvvm.tex.1d.grad.v4s32.f32
+    nvvm_tex_1d_grad_v4u32_f32,                // llvm.nvvm.tex.1d.grad.v4u32.f32
+    nvvm_tex_1d_level_v4f32_f32,               // llvm.nvvm.tex.1d.level.v4f32.f32
+    nvvm_tex_1d_level_v4s32_f32,               // llvm.nvvm.tex.1d.level.v4s32.f32
+    nvvm_tex_1d_level_v4u32_f32,               // llvm.nvvm.tex.1d.level.v4u32.f32
+    nvvm_tex_1d_v4f32_f32,                     // llvm.nvvm.tex.1d.v4f32.f32
+    nvvm_tex_1d_v4f32_s32,                     // llvm.nvvm.tex.1d.v4f32.s32
+    nvvm_tex_1d_v4s32_f32,                     // llvm.nvvm.tex.1d.v4s32.f32
+    nvvm_tex_1d_v4s32_s32,                     // llvm.nvvm.tex.1d.v4s32.s32
+    nvvm_tex_1d_v4u32_f32,                     // llvm.nvvm.tex.1d.v4u32.f32
+    nvvm_tex_1d_v4u32_s32,                     // llvm.nvvm.tex.1d.v4u32.s32
+    nvvm_tex_2d_array_grad_v4f32_f32,          // llvm.nvvm.tex.2d.array.grad.v4f32.f32
+    nvvm_tex_2d_array_grad_v4s32_f32,          // llvm.nvvm.tex.2d.array.grad.v4s32.f32
+    nvvm_tex_2d_array_grad_v4u32_f32,          // llvm.nvvm.tex.2d.array.grad.v4u32.f32
+    nvvm_tex_2d_array_level_v4f32_f32,         // llvm.nvvm.tex.2d.array.level.v4f32.f32
+    nvvm_tex_2d_array_level_v4s32_f32,         // llvm.nvvm.tex.2d.array.level.v4s32.f32
+    nvvm_tex_2d_array_level_v4u32_f32,         // llvm.nvvm.tex.2d.array.level.v4u32.f32
+    nvvm_tex_2d_array_v4f32_f32,               // llvm.nvvm.tex.2d.array.v4f32.f32
+    nvvm_tex_2d_array_v4f32_s32,               // llvm.nvvm.tex.2d.array.v4f32.s32
+    nvvm_tex_2d_array_v4s32_f32,               // llvm.nvvm.tex.2d.array.v4s32.f32
+    nvvm_tex_2d_array_v4s32_s32,               // llvm.nvvm.tex.2d.array.v4s32.s32
+    nvvm_tex_2d_array_v4u32_f32,               // llvm.nvvm.tex.2d.array.v4u32.f32
+    nvvm_tex_2d_array_v4u32_s32,               // llvm.nvvm.tex.2d.array.v4u32.s32
+    nvvm_tex_2d_grad_v4f32_f32,                // llvm.nvvm.tex.2d.grad.v4f32.f32
+    nvvm_tex_2d_grad_v4s32_f32,                // llvm.nvvm.tex.2d.grad.v4s32.f32
+    nvvm_tex_2d_grad_v4u32_f32,                // llvm.nvvm.tex.2d.grad.v4u32.f32
+    nvvm_tex_2d_level_v4f32_f32,               // llvm.nvvm.tex.2d.level.v4f32.f32
+    nvvm_tex_2d_level_v4s32_f32,               // llvm.nvvm.tex.2d.level.v4s32.f32
+    nvvm_tex_2d_level_v4u32_f32,               // llvm.nvvm.tex.2d.level.v4u32.f32
+    nvvm_tex_2d_v4f32_f32,                     // llvm.nvvm.tex.2d.v4f32.f32
+    nvvm_tex_2d_v4f32_s32,                     // llvm.nvvm.tex.2d.v4f32.s32
+    nvvm_tex_2d_v4s32_f32,                     // llvm.nvvm.tex.2d.v4s32.f32
+    nvvm_tex_2d_v4s32_s32,                     // llvm.nvvm.tex.2d.v4s32.s32
+    nvvm_tex_2d_v4u32_f32,                     // llvm.nvvm.tex.2d.v4u32.f32
+    nvvm_tex_2d_v4u32_s32,                     // llvm.nvvm.tex.2d.v4u32.s32
+    nvvm_tex_3d_grad_v4f32_f32,                // llvm.nvvm.tex.3d.grad.v4f32.f32
+    nvvm_tex_3d_grad_v4s32_f32,                // llvm.nvvm.tex.3d.grad.v4s32.f32
+    nvvm_tex_3d_grad_v4u32_f32,                // llvm.nvvm.tex.3d.grad.v4u32.f32
+    nvvm_tex_3d_level_v4f32_f32,               // llvm.nvvm.tex.3d.level.v4f32.f32
+    nvvm_tex_3d_level_v4s32_f32,               // llvm.nvvm.tex.3d.level.v4s32.f32
+    nvvm_tex_3d_level_v4u32_f32,               // llvm.nvvm.tex.3d.level.v4u32.f32
+    nvvm_tex_3d_v4f32_f32,                     // llvm.nvvm.tex.3d.v4f32.f32
+    nvvm_tex_3d_v4f32_s32,                     // llvm.nvvm.tex.3d.v4f32.s32
+    nvvm_tex_3d_v4s32_f32,                     // llvm.nvvm.tex.3d.v4s32.f32
+    nvvm_tex_3d_v4s32_s32,                     // llvm.nvvm.tex.3d.v4s32.s32
+    nvvm_tex_3d_v4u32_f32,                     // llvm.nvvm.tex.3d.v4u32.f32
+    nvvm_tex_3d_v4u32_s32,                     // llvm.nvvm.tex.3d.v4u32.s32
+    nvvm_tex_cube_array_level_v4f32_f32,       // llvm.nvvm.tex.cube.array.level.v4f32.f32
+    nvvm_tex_cube_array_level_v4s32_f32,       // llvm.nvvm.tex.cube.array.level.v4s32.f32
+    nvvm_tex_cube_array_level_v4u32_f32,       // llvm.nvvm.tex.cube.array.level.v4u32.f32
+    nvvm_tex_cube_array_v4f32_f32,             // llvm.nvvm.tex.cube.array.v4f32.f32
+    nvvm_tex_cube_array_v4s32_f32,             // llvm.nvvm.tex.cube.array.v4s32.f32
+    nvvm_tex_cube_array_v4u32_f32,             // llvm.nvvm.tex.cube.array.v4u32.f32
+    nvvm_tex_cube_level_v4f32_f32,             // llvm.nvvm.tex.cube.level.v4f32.f32
+    nvvm_tex_cube_level_v4s32_f32,             // llvm.nvvm.tex.cube.level.v4s32.f32
+    nvvm_tex_cube_level_v4u32_f32,             // llvm.nvvm.tex.cube.level.v4u32.f32
+    nvvm_tex_cube_v4f32_f32,                   // llvm.nvvm.tex.cube.v4f32.f32
+    nvvm_tex_cube_v4s32_f32,                   // llvm.nvvm.tex.cube.v4s32.f32
+    nvvm_tex_cube_v4u32_f32,                   // llvm.nvvm.tex.cube.v4u32.f32
+    nvvm_tex_unified_1d_array_grad_v4f32_f32,  // llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32
+    nvvm_tex_unified_1d_array_grad_v4s32_f32,  // llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32
+    nvvm_tex_unified_1d_array_grad_v4u32_f32,  // llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32
+    nvvm_tex_unified_1d_array_level_v4f32_f32,  // llvm.nvvm.tex.unified.1d.array.level.v4f32.f32
+    nvvm_tex_unified_1d_array_level_v4s32_f32,  // llvm.nvvm.tex.unified.1d.array.level.v4s32.f32
+    nvvm_tex_unified_1d_array_level_v4u32_f32,  // llvm.nvvm.tex.unified.1d.array.level.v4u32.f32
+    nvvm_tex_unified_1d_array_v4f32_f32,       // llvm.nvvm.tex.unified.1d.array.v4f32.f32
+    nvvm_tex_unified_1d_array_v4f32_s32,       // llvm.nvvm.tex.unified.1d.array.v4f32.s32
+    nvvm_tex_unified_1d_array_v4s32_f32,       // llvm.nvvm.tex.unified.1d.array.v4s32.f32
+    nvvm_tex_unified_1d_array_v4s32_s32,       // llvm.nvvm.tex.unified.1d.array.v4s32.s32
+    nvvm_tex_unified_1d_array_v4u32_f32,       // llvm.nvvm.tex.unified.1d.array.v4u32.f32
+    nvvm_tex_unified_1d_array_v4u32_s32,       // llvm.nvvm.tex.unified.1d.array.v4u32.s32
+    nvvm_tex_unified_1d_grad_v4f32_f32,        // llvm.nvvm.tex.unified.1d.grad.v4f32.f32
+    nvvm_tex_unified_1d_grad_v4s32_f32,        // llvm.nvvm.tex.unified.1d.grad.v4s32.f32
+    nvvm_tex_unified_1d_grad_v4u32_f32,        // llvm.nvvm.tex.unified.1d.grad.v4u32.f32
+    nvvm_tex_unified_1d_level_v4f32_f32,       // llvm.nvvm.tex.unified.1d.level.v4f32.f32
+    nvvm_tex_unified_1d_level_v4s32_f32,       // llvm.nvvm.tex.unified.1d.level.v4s32.f32
+    nvvm_tex_unified_1d_level_v4u32_f32,       // llvm.nvvm.tex.unified.1d.level.v4u32.f32
+    nvvm_tex_unified_1d_v4f32_f32,             // llvm.nvvm.tex.unified.1d.v4f32.f32
+    nvvm_tex_unified_1d_v4f32_s32,             // llvm.nvvm.tex.unified.1d.v4f32.s32
+    nvvm_tex_unified_1d_v4s32_f32,             // llvm.nvvm.tex.unified.1d.v4s32.f32
+    nvvm_tex_unified_1d_v4s32_s32,             // llvm.nvvm.tex.unified.1d.v4s32.s32
+    nvvm_tex_unified_1d_v4u32_f32,             // llvm.nvvm.tex.unified.1d.v4u32.f32
+    nvvm_tex_unified_1d_v4u32_s32,             // llvm.nvvm.tex.unified.1d.v4u32.s32
+    nvvm_tex_unified_2d_array_grad_v4f32_f32,  // llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32
+    nvvm_tex_unified_2d_array_grad_v4s32_f32,  // llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32
+    nvvm_tex_unified_2d_array_grad_v4u32_f32,  // llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32
+    nvvm_tex_unified_2d_array_level_v4f32_f32,  // llvm.nvvm.tex.unified.2d.array.level.v4f32.f32
+    nvvm_tex_unified_2d_array_level_v4s32_f32,  // llvm.nvvm.tex.unified.2d.array.level.v4s32.f32
+    nvvm_tex_unified_2d_array_level_v4u32_f32,  // llvm.nvvm.tex.unified.2d.array.level.v4u32.f32
+    nvvm_tex_unified_2d_array_v4f32_f32,       // llvm.nvvm.tex.unified.2d.array.v4f32.f32
+    nvvm_tex_unified_2d_array_v4f32_s32,       // llvm.nvvm.tex.unified.2d.array.v4f32.s32
+    nvvm_tex_unified_2d_array_v4s32_f32,       // llvm.nvvm.tex.unified.2d.array.v4s32.f32
+    nvvm_tex_unified_2d_array_v4s32_s32,       // llvm.nvvm.tex.unified.2d.array.v4s32.s32
+    nvvm_tex_unified_2d_array_v4u32_f32,       // llvm.nvvm.tex.unified.2d.array.v4u32.f32
+    nvvm_tex_unified_2d_array_v4u32_s32,       // llvm.nvvm.tex.unified.2d.array.v4u32.s32
+    nvvm_tex_unified_2d_grad_v4f32_f32,        // llvm.nvvm.tex.unified.2d.grad.v4f32.f32
+    nvvm_tex_unified_2d_grad_v4s32_f32,        // llvm.nvvm.tex.unified.2d.grad.v4s32.f32
+    nvvm_tex_unified_2d_grad_v4u32_f32,        // llvm.nvvm.tex.unified.2d.grad.v4u32.f32
+    nvvm_tex_unified_2d_level_v4f32_f32,       // llvm.nvvm.tex.unified.2d.level.v4f32.f32
+    nvvm_tex_unified_2d_level_v4s32_f32,       // llvm.nvvm.tex.unified.2d.level.v4s32.f32
+    nvvm_tex_unified_2d_level_v4u32_f32,       // llvm.nvvm.tex.unified.2d.level.v4u32.f32
+    nvvm_tex_unified_2d_v4f32_f32,             // llvm.nvvm.tex.unified.2d.v4f32.f32
+    nvvm_tex_unified_2d_v4f32_s32,             // llvm.nvvm.tex.unified.2d.v4f32.s32
+    nvvm_tex_unified_2d_v4s32_f32,             // llvm.nvvm.tex.unified.2d.v4s32.f32
+    nvvm_tex_unified_2d_v4s32_s32,             // llvm.nvvm.tex.unified.2d.v4s32.s32
+    nvvm_tex_unified_2d_v4u32_f32,             // llvm.nvvm.tex.unified.2d.v4u32.f32
+    nvvm_tex_unified_2d_v4u32_s32,             // llvm.nvvm.tex.unified.2d.v4u32.s32
+    nvvm_tex_unified_3d_grad_v4f32_f32,        // llvm.nvvm.tex.unified.3d.grad.v4f32.f32
+    nvvm_tex_unified_3d_grad_v4s32_f32,        // llvm.nvvm.tex.unified.3d.grad.v4s32.f32
+    nvvm_tex_unified_3d_grad_v4u32_f32,        // llvm.nvvm.tex.unified.3d.grad.v4u32.f32
+    nvvm_tex_unified_3d_level_v4f32_f32,       // llvm.nvvm.tex.unified.3d.level.v4f32.f32
+    nvvm_tex_unified_3d_level_v4s32_f32,       // llvm.nvvm.tex.unified.3d.level.v4s32.f32
+    nvvm_tex_unified_3d_level_v4u32_f32,       // llvm.nvvm.tex.unified.3d.level.v4u32.f32
+    nvvm_tex_unified_3d_v4f32_f32,             // llvm.nvvm.tex.unified.3d.v4f32.f32
+    nvvm_tex_unified_3d_v4f32_s32,             // llvm.nvvm.tex.unified.3d.v4f32.s32
+    nvvm_tex_unified_3d_v4s32_f32,             // llvm.nvvm.tex.unified.3d.v4s32.f32
+    nvvm_tex_unified_3d_v4s32_s32,             // llvm.nvvm.tex.unified.3d.v4s32.s32
+    nvvm_tex_unified_3d_v4u32_f32,             // llvm.nvvm.tex.unified.3d.v4u32.f32
+    nvvm_tex_unified_3d_v4u32_s32,             // llvm.nvvm.tex.unified.3d.v4u32.s32
+    nvvm_tex_unified_cube_array_level_v4f32_f32,  // llvm.nvvm.tex.unified.cube.array.level.v4f32.f32
+    nvvm_tex_unified_cube_array_level_v4s32_f32,  // llvm.nvvm.tex.unified.cube.array.level.v4s32.f32
+    nvvm_tex_unified_cube_array_level_v4u32_f32,  // llvm.nvvm.tex.unified.cube.array.level.v4u32.f32
+    nvvm_tex_unified_cube_array_v4f32_f32,     // llvm.nvvm.tex.unified.cube.array.v4f32.f32
+    nvvm_tex_unified_cube_array_v4s32_f32,     // llvm.nvvm.tex.unified.cube.array.v4s32.f32
+    nvvm_tex_unified_cube_array_v4u32_f32,     // llvm.nvvm.tex.unified.cube.array.v4u32.f32
+    nvvm_tex_unified_cube_level_v4f32_f32,     // llvm.nvvm.tex.unified.cube.level.v4f32.f32
+    nvvm_tex_unified_cube_level_v4s32_f32,     // llvm.nvvm.tex.unified.cube.level.v4s32.f32
+    nvvm_tex_unified_cube_level_v4u32_f32,     // llvm.nvvm.tex.unified.cube.level.v4u32.f32
+    nvvm_tex_unified_cube_v4f32_f32,           // llvm.nvvm.tex.unified.cube.v4f32.f32
+    nvvm_tex_unified_cube_v4s32_f32,           // llvm.nvvm.tex.unified.cube.v4s32.f32
+    nvvm_tex_unified_cube_v4u32_f32,           // llvm.nvvm.tex.unified.cube.v4u32.f32
+    nvvm_texsurf_handle,                       // llvm.nvvm.texsurf.handle
+    nvvm_texsurf_handle_internal,              // llvm.nvvm.texsurf.handle.internal
+    nvvm_tld4_a_2d_v4f32_f32,                  // llvm.nvvm.tld4.a.2d.v4f32.f32
+    nvvm_tld4_a_2d_v4s32_f32,                  // llvm.nvvm.tld4.a.2d.v4s32.f32
+    nvvm_tld4_a_2d_v4u32_f32,                  // llvm.nvvm.tld4.a.2d.v4u32.f32
+    nvvm_tld4_b_2d_v4f32_f32,                  // llvm.nvvm.tld4.b.2d.v4f32.f32
+    nvvm_tld4_b_2d_v4s32_f32,                  // llvm.nvvm.tld4.b.2d.v4s32.f32
+    nvvm_tld4_b_2d_v4u32_f32,                  // llvm.nvvm.tld4.b.2d.v4u32.f32
+    nvvm_tld4_g_2d_v4f32_f32,                  // llvm.nvvm.tld4.g.2d.v4f32.f32
+    nvvm_tld4_g_2d_v4s32_f32,                  // llvm.nvvm.tld4.g.2d.v4s32.f32
+    nvvm_tld4_g_2d_v4u32_f32,                  // llvm.nvvm.tld4.g.2d.v4u32.f32
+    nvvm_tld4_r_2d_v4f32_f32,                  // llvm.nvvm.tld4.r.2d.v4f32.f32
+    nvvm_tld4_r_2d_v4s32_f32,                  // llvm.nvvm.tld4.r.2d.v4s32.f32
+    nvvm_tld4_r_2d_v4u32_f32,                  // llvm.nvvm.tld4.r.2d.v4u32.f32
+    nvvm_tld4_unified_a_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.a.2d.v4f32.f32
+    nvvm_tld4_unified_a_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.a.2d.v4s32.f32
+    nvvm_tld4_unified_a_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.a.2d.v4u32.f32
+    nvvm_tld4_unified_b_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.b.2d.v4f32.f32
+    nvvm_tld4_unified_b_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.b.2d.v4s32.f32
+    nvvm_tld4_unified_b_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.b.2d.v4u32.f32
+    nvvm_tld4_unified_g_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.g.2d.v4f32.f32
+    nvvm_tld4_unified_g_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.g.2d.v4s32.f32
+    nvvm_tld4_unified_g_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.g.2d.v4u32.f32
+    nvvm_tld4_unified_r_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.r.2d.v4f32.f32
+    nvvm_tld4_unified_r_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.r.2d.v4s32.f32
+    nvvm_tld4_unified_r_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.r.2d.v4u32.f32
+    nvvm_trunc_d,                              // llvm.nvvm.trunc.d
+    nvvm_trunc_f,                              // llvm.nvvm.trunc.f
+    nvvm_trunc_ftz_f,                          // llvm.nvvm.trunc.ftz.f
+    nvvm_txq_array_size,                       // llvm.nvvm.txq.array.size
+    nvvm_txq_channel_data_type,                // llvm.nvvm.txq.channel.data.type
+    nvvm_txq_channel_order,                    // llvm.nvvm.txq.channel.order
+    nvvm_txq_depth,                            // llvm.nvvm.txq.depth
+    nvvm_txq_height,                           // llvm.nvvm.txq.height
+    nvvm_txq_num_mipmap_levels,                // llvm.nvvm.txq.num.mipmap.levels
+    nvvm_txq_num_samples,                      // llvm.nvvm.txq.num.samples
+    nvvm_txq_width,                            // llvm.nvvm.txq.width
+    nvvm_ui2d_rm,                              // llvm.nvvm.ui2d.rm
+    nvvm_ui2d_rn,                              // llvm.nvvm.ui2d.rn
+    nvvm_ui2d_rp,                              // llvm.nvvm.ui2d.rp
+    nvvm_ui2d_rz,                              // llvm.nvvm.ui2d.rz
+    nvvm_ui2f_rm,                              // llvm.nvvm.ui2f.rm
+    nvvm_ui2f_rn,                              // llvm.nvvm.ui2f.rn
+    nvvm_ui2f_rp,                              // llvm.nvvm.ui2f.rp
+    nvvm_ui2f_rz,                              // llvm.nvvm.ui2f.rz
+    nvvm_ull2d_rm,                             // llvm.nvvm.ull2d.rm
+    nvvm_ull2d_rn,                             // llvm.nvvm.ull2d.rn
+    nvvm_ull2d_rp,                             // llvm.nvvm.ull2d.rp
+    nvvm_ull2d_rz,                             // llvm.nvvm.ull2d.rz
+    nvvm_ull2f_rm,                             // llvm.nvvm.ull2f.rm
+    nvvm_ull2f_rn,                             // llvm.nvvm.ull2f.rn
+    nvvm_ull2f_rp,                             // llvm.nvvm.ull2f.rp
+    nvvm_ull2f_rz,                             // llvm.nvvm.ull2f.rz
+    nvvm_vote_all,                             // llvm.nvvm.vote.all
+    nvvm_vote_all_sync,                        // llvm.nvvm.vote.all.sync
+    nvvm_vote_any,                             // llvm.nvvm.vote.any
+    nvvm_vote_any_sync,                        // llvm.nvvm.vote.any.sync
+    nvvm_vote_ballot,                          // llvm.nvvm.vote.ballot
+    nvvm_vote_ballot_sync,                     // llvm.nvvm.vote.ballot.sync
+    nvvm_vote_uni,                             // llvm.nvvm.vote.uni
+    nvvm_vote_uni_sync,                        // llvm.nvvm.vote.uni.sync
+    nvvm_wmma_m16n16k16_load_a_f16_col,        // llvm.nvvm.wmma.m16n16k16.load.a.col.f16
+    nvvm_wmma_m16n16k16_load_a_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16
+    nvvm_wmma_m16n16k16_load_a_f16_row,        // llvm.nvvm.wmma.m16n16k16.load.a.row.f16
+    nvvm_wmma_m16n16k16_load_a_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16
+    nvvm_wmma_m16n16k16_load_b_f16_col,        // llvm.nvvm.wmma.m16n16k16.load.b.col.f16
+    nvvm_wmma_m16n16k16_load_b_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16
+    nvvm_wmma_m16n16k16_load_b_f16_row,        // llvm.nvvm.wmma.m16n16k16.load.b.row.f16
+    nvvm_wmma_m16n16k16_load_b_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16
+    nvvm_wmma_m16n16k16_load_c_f16_col,        // llvm.nvvm.wmma.m16n16k16.load.c.col.f16
+    nvvm_wmma_m16n16k16_load_c_f32_col,        // llvm.nvvm.wmma.m16n16k16.load.c.col.f32
+    nvvm_wmma_m16n16k16_load_c_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16
+    nvvm_wmma_m16n16k16_load_c_f32_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32
+    nvvm_wmma_m16n16k16_load_c_f16_row,        // llvm.nvvm.wmma.m16n16k16.load.c.row.f16
+    nvvm_wmma_m16n16k16_load_c_f32_row,        // llvm.nvvm.wmma.m16n16k16.load.c.row.f32
+    nvvm_wmma_m16n16k16_load_c_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16
+    nvvm_wmma_m16n16k16_load_c_f32_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32
+    nvvm_wmma_m16n16k16_mma_col_col_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16
+    nvvm_wmma_m16n16k16_mma_col_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_col_col_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32
+    nvvm_wmma_m16n16k16_mma_col_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_col_col_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16
+    nvvm_wmma_m16n16k16_mma_col_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_col_col_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32
+    nvvm_wmma_m16n16k16_mma_col_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_col_row_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16
+    nvvm_wmma_m16n16k16_mma_col_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_col_row_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32
+    nvvm_wmma_m16n16k16_mma_col_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_col_row_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16
+    nvvm_wmma_m16n16k16_mma_col_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_col_row_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32
+    nvvm_wmma_m16n16k16_mma_col_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_row_col_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16
+    nvvm_wmma_m16n16k16_mma_row_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_row_col_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32
+    nvvm_wmma_m16n16k16_mma_row_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_row_col_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16
+    nvvm_wmma_m16n16k16_mma_row_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_row_col_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32
+    nvvm_wmma_m16n16k16_mma_row_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_row_row_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16
+    nvvm_wmma_m16n16k16_mma_row_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_row_row_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32
+    nvvm_wmma_m16n16k16_mma_row_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite
+    nvvm_wmma_m16n16k16_mma_row_row_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16
+    nvvm_wmma_m16n16k16_mma_row_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite
+    nvvm_wmma_m16n16k16_mma_row_row_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32
+    nvvm_wmma_m16n16k16_mma_row_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite
+    nvvm_wmma_m16n16k16_store_d_f16_col,       // llvm.nvvm.wmma.m16n16k16.store.d.col.f16
+    nvvm_wmma_m16n16k16_store_d_f32_col,       // llvm.nvvm.wmma.m16n16k16.store.d.col.f32
+    nvvm_wmma_m16n16k16_store_d_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16
+    nvvm_wmma_m16n16k16_store_d_f32_col_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32
+    nvvm_wmma_m16n16k16_store_d_f16_row,       // llvm.nvvm.wmma.m16n16k16.store.d.row.f16
+    nvvm_wmma_m16n16k16_store_d_f32_row,       // llvm.nvvm.wmma.m16n16k16.store.d.row.f32
+    nvvm_wmma_m16n16k16_store_d_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16
+    nvvm_wmma_m16n16k16_store_d_f32_row_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32
+    ppc_altivec_crypto_vcipher,                // llvm.ppc.altivec.crypto.vcipher
+    ppc_altivec_crypto_vcipherlast,            // llvm.ppc.altivec.crypto.vcipherlast
+    ppc_altivec_crypto_vncipher,               // llvm.ppc.altivec.crypto.vncipher
+    ppc_altivec_crypto_vncipherlast,           // llvm.ppc.altivec.crypto.vncipherlast
+    ppc_altivec_crypto_vpermxor,               // llvm.ppc.altivec.crypto.vpermxor
+    ppc_altivec_crypto_vpmsumb,                // llvm.ppc.altivec.crypto.vpmsumb
+    ppc_altivec_crypto_vpmsumd,                // llvm.ppc.altivec.crypto.vpmsumd
+    ppc_altivec_crypto_vpmsumh,                // llvm.ppc.altivec.crypto.vpmsumh
+    ppc_altivec_crypto_vpmsumw,                // llvm.ppc.altivec.crypto.vpmsumw
+    ppc_altivec_crypto_vsbox,                  // llvm.ppc.altivec.crypto.vsbox
+    ppc_altivec_crypto_vshasigmad,             // llvm.ppc.altivec.crypto.vshasigmad
+    ppc_altivec_crypto_vshasigmaw,             // llvm.ppc.altivec.crypto.vshasigmaw
+    ppc_altivec_dss,                           // llvm.ppc.altivec.dss
+    ppc_altivec_dssall,                        // llvm.ppc.altivec.dssall
+    ppc_altivec_dst,                           // llvm.ppc.altivec.dst
+    ppc_altivec_dstst,                         // llvm.ppc.altivec.dstst
+    ppc_altivec_dststt,                        // llvm.ppc.altivec.dststt
+    ppc_altivec_dstt,                          // llvm.ppc.altivec.dstt
+    ppc_altivec_lvebx,                         // llvm.ppc.altivec.lvebx
+    ppc_altivec_lvehx,                         // llvm.ppc.altivec.lvehx
+    ppc_altivec_lvewx,                         // llvm.ppc.altivec.lvewx
+    ppc_altivec_lvsl,                          // llvm.ppc.altivec.lvsl
+    ppc_altivec_lvsr,                          // llvm.ppc.altivec.lvsr
+    ppc_altivec_lvx,                           // llvm.ppc.altivec.lvx
+    ppc_altivec_lvxl,                          // llvm.ppc.altivec.lvxl
+    ppc_altivec_mfvscr,                        // llvm.ppc.altivec.mfvscr
+    ppc_altivec_mtvscr,                        // llvm.ppc.altivec.mtvscr
+    ppc_altivec_stvebx,                        // llvm.ppc.altivec.stvebx
+    ppc_altivec_stvehx,                        // llvm.ppc.altivec.stvehx
+    ppc_altivec_stvewx,                        // llvm.ppc.altivec.stvewx
+    ppc_altivec_stvx,                          // llvm.ppc.altivec.stvx
+    ppc_altivec_stvxl,                         // llvm.ppc.altivec.stvxl
+    ppc_altivec_vabsdub,                       // llvm.ppc.altivec.vabsdub
+    ppc_altivec_vabsduh,                       // llvm.ppc.altivec.vabsduh
+    ppc_altivec_vabsduw,                       // llvm.ppc.altivec.vabsduw
+    ppc_altivec_vaddcuq,                       // llvm.ppc.altivec.vaddcuq
+    ppc_altivec_vaddcuw,                       // llvm.ppc.altivec.vaddcuw
+    ppc_altivec_vaddecuq,                      // llvm.ppc.altivec.vaddecuq
+    ppc_altivec_vaddeuqm,                      // llvm.ppc.altivec.vaddeuqm
+    ppc_altivec_vaddsbs,                       // llvm.ppc.altivec.vaddsbs
+    ppc_altivec_vaddshs,                       // llvm.ppc.altivec.vaddshs
+    ppc_altivec_vaddsws,                       // llvm.ppc.altivec.vaddsws
+    ppc_altivec_vaddubs,                       // llvm.ppc.altivec.vaddubs
+    ppc_altivec_vadduhs,                       // llvm.ppc.altivec.vadduhs
+    ppc_altivec_vadduws,                       // llvm.ppc.altivec.vadduws
+    ppc_altivec_vavgsb,                        // llvm.ppc.altivec.vavgsb
+    ppc_altivec_vavgsh,                        // llvm.ppc.altivec.vavgsh
+    ppc_altivec_vavgsw,                        // llvm.ppc.altivec.vavgsw
+    ppc_altivec_vavgub,                        // llvm.ppc.altivec.vavgub
+    ppc_altivec_vavguh,                        // llvm.ppc.altivec.vavguh
+    ppc_altivec_vavguw,                        // llvm.ppc.altivec.vavguw
+    ppc_altivec_vbpermq,                       // llvm.ppc.altivec.vbpermq
+    ppc_altivec_vcfsx,                         // llvm.ppc.altivec.vcfsx
+    ppc_altivec_vcfux,                         // llvm.ppc.altivec.vcfux
+    ppc_altivec_vclzlsbb,                      // llvm.ppc.altivec.vclzlsbb
+    ppc_altivec_vcmpbfp,                       // llvm.ppc.altivec.vcmpbfp
+    ppc_altivec_vcmpbfp_p,                     // llvm.ppc.altivec.vcmpbfp.p
+    ppc_altivec_vcmpeqfp,                      // llvm.ppc.altivec.vcmpeqfp
+    ppc_altivec_vcmpeqfp_p,                    // llvm.ppc.altivec.vcmpeqfp.p
+    ppc_altivec_vcmpequb,                      // llvm.ppc.altivec.vcmpequb
+    ppc_altivec_vcmpequb_p,                    // llvm.ppc.altivec.vcmpequb.p
+    ppc_altivec_vcmpequd,                      // llvm.ppc.altivec.vcmpequd
+    ppc_altivec_vcmpequd_p,                    // llvm.ppc.altivec.vcmpequd.p
+    ppc_altivec_vcmpequh,                      // llvm.ppc.altivec.vcmpequh
+    ppc_altivec_vcmpequh_p,                    // llvm.ppc.altivec.vcmpequh.p
+    ppc_altivec_vcmpequw,                      // llvm.ppc.altivec.vcmpequw
+    ppc_altivec_vcmpequw_p,                    // llvm.ppc.altivec.vcmpequw.p
+    ppc_altivec_vcmpgefp,                      // llvm.ppc.altivec.vcmpgefp
+    ppc_altivec_vcmpgefp_p,                    // llvm.ppc.altivec.vcmpgefp.p
+    ppc_altivec_vcmpgtfp,                      // llvm.ppc.altivec.vcmpgtfp
+    ppc_altivec_vcmpgtfp_p,                    // llvm.ppc.altivec.vcmpgtfp.p
+    ppc_altivec_vcmpgtsb,                      // llvm.ppc.altivec.vcmpgtsb
+    ppc_altivec_vcmpgtsb_p,                    // llvm.ppc.altivec.vcmpgtsb.p
+    ppc_altivec_vcmpgtsd,                      // llvm.ppc.altivec.vcmpgtsd
+    ppc_altivec_vcmpgtsd_p,                    // llvm.ppc.altivec.vcmpgtsd.p
+    ppc_altivec_vcmpgtsh,                      // llvm.ppc.altivec.vcmpgtsh
+    ppc_altivec_vcmpgtsh_p,                    // llvm.ppc.altivec.vcmpgtsh.p
+    ppc_altivec_vcmpgtsw,                      // llvm.ppc.altivec.vcmpgtsw
+    ppc_altivec_vcmpgtsw_p,                    // llvm.ppc.altivec.vcmpgtsw.p
+    ppc_altivec_vcmpgtub,                      // llvm.ppc.altivec.vcmpgtub
+    ppc_altivec_vcmpgtub_p,                    // llvm.ppc.altivec.vcmpgtub.p
+    ppc_altivec_vcmpgtud,                      // llvm.ppc.altivec.vcmpgtud
+    ppc_altivec_vcmpgtud_p,                    // llvm.ppc.altivec.vcmpgtud.p
+    ppc_altivec_vcmpgtuh,                      // llvm.ppc.altivec.vcmpgtuh
+    ppc_altivec_vcmpgtuh_p,                    // llvm.ppc.altivec.vcmpgtuh.p
+    ppc_altivec_vcmpgtuw,                      // llvm.ppc.altivec.vcmpgtuw
+    ppc_altivec_vcmpgtuw_p,                    // llvm.ppc.altivec.vcmpgtuw.p
+    ppc_altivec_vcmpneb,                       // llvm.ppc.altivec.vcmpneb
+    ppc_altivec_vcmpneb_p,                     // llvm.ppc.altivec.vcmpneb.p
+    ppc_altivec_vcmpneh,                       // llvm.ppc.altivec.vcmpneh
+    ppc_altivec_vcmpneh_p,                     // llvm.ppc.altivec.vcmpneh.p
+    ppc_altivec_vcmpnew,                       // llvm.ppc.altivec.vcmpnew
+    ppc_altivec_vcmpnew_p,                     // llvm.ppc.altivec.vcmpnew.p
+    ppc_altivec_vcmpnezb,                      // llvm.ppc.altivec.vcmpnezb
+    ppc_altivec_vcmpnezb_p,                    // llvm.ppc.altivec.vcmpnezb.p
+    ppc_altivec_vcmpnezh,                      // llvm.ppc.altivec.vcmpnezh
+    ppc_altivec_vcmpnezh_p,                    // llvm.ppc.altivec.vcmpnezh.p
+    ppc_altivec_vcmpnezw,                      // llvm.ppc.altivec.vcmpnezw
+    ppc_altivec_vcmpnezw_p,                    // llvm.ppc.altivec.vcmpnezw.p
+    ppc_altivec_vctsxs,                        // llvm.ppc.altivec.vctsxs
+    ppc_altivec_vctuxs,                        // llvm.ppc.altivec.vctuxs
+    ppc_altivec_vctzlsbb,                      // llvm.ppc.altivec.vctzlsbb
+    ppc_altivec_vexptefp,                      // llvm.ppc.altivec.vexptefp
+    ppc_altivec_vgbbd,                         // llvm.ppc.altivec.vgbbd
+    ppc_altivec_vlogefp,                       // llvm.ppc.altivec.vlogefp
+    ppc_altivec_vmaddfp,                       // llvm.ppc.altivec.vmaddfp
+    ppc_altivec_vmaxfp,                        // llvm.ppc.altivec.vmaxfp
+    ppc_altivec_vmaxsb,                        // llvm.ppc.altivec.vmaxsb
+    ppc_altivec_vmaxsd,                        // llvm.ppc.altivec.vmaxsd
+    ppc_altivec_vmaxsh,                        // llvm.ppc.altivec.vmaxsh
+    ppc_altivec_vmaxsw,                        // llvm.ppc.altivec.vmaxsw
+    ppc_altivec_vmaxub,                        // llvm.ppc.altivec.vmaxub
+    ppc_altivec_vmaxud,                        // llvm.ppc.altivec.vmaxud
+    ppc_altivec_vmaxuh,                        // llvm.ppc.altivec.vmaxuh
+    ppc_altivec_vmaxuw,                        // llvm.ppc.altivec.vmaxuw
+    ppc_altivec_vmhaddshs,                     // llvm.ppc.altivec.vmhaddshs
+    ppc_altivec_vmhraddshs,                    // llvm.ppc.altivec.vmhraddshs
+    ppc_altivec_vminfp,                        // llvm.ppc.altivec.vminfp
+    ppc_altivec_vminsb,                        // llvm.ppc.altivec.vminsb
+    ppc_altivec_vminsd,                        // llvm.ppc.altivec.vminsd
+    ppc_altivec_vminsh,                        // llvm.ppc.altivec.vminsh
+    ppc_altivec_vminsw,                        // llvm.ppc.altivec.vminsw
+    ppc_altivec_vminub,                        // llvm.ppc.altivec.vminub
+    ppc_altivec_vminud,                        // llvm.ppc.altivec.vminud
+    ppc_altivec_vminuh,                        // llvm.ppc.altivec.vminuh
+    ppc_altivec_vminuw,                        // llvm.ppc.altivec.vminuw
+    ppc_altivec_vmladduhm,                     // llvm.ppc.altivec.vmladduhm
+    ppc_altivec_vmsummbm,                      // llvm.ppc.altivec.vmsummbm
+    ppc_altivec_vmsumshm,                      // llvm.ppc.altivec.vmsumshm
+    ppc_altivec_vmsumshs,                      // llvm.ppc.altivec.vmsumshs
+    ppc_altivec_vmsumubm,                      // llvm.ppc.altivec.vmsumubm
+    ppc_altivec_vmsumuhm,                      // llvm.ppc.altivec.vmsumuhm
+    ppc_altivec_vmsumuhs,                      // llvm.ppc.altivec.vmsumuhs
+    ppc_altivec_vmulesb,                       // llvm.ppc.altivec.vmulesb
+    ppc_altivec_vmulesh,                       // llvm.ppc.altivec.vmulesh
+    ppc_altivec_vmulesw,                       // llvm.ppc.altivec.vmulesw
+    ppc_altivec_vmuleub,                       // llvm.ppc.altivec.vmuleub
+    ppc_altivec_vmuleuh,                       // llvm.ppc.altivec.vmuleuh
+    ppc_altivec_vmuleuw,                       // llvm.ppc.altivec.vmuleuw
+    ppc_altivec_vmulosb,                       // llvm.ppc.altivec.vmulosb
+    ppc_altivec_vmulosh,                       // llvm.ppc.altivec.vmulosh
+    ppc_altivec_vmulosw,                       // llvm.ppc.altivec.vmulosw
+    ppc_altivec_vmuloub,                       // llvm.ppc.altivec.vmuloub
+    ppc_altivec_vmulouh,                       // llvm.ppc.altivec.vmulouh
+    ppc_altivec_vmulouw,                       // llvm.ppc.altivec.vmulouw
+    ppc_altivec_vnmsubfp,                      // llvm.ppc.altivec.vnmsubfp
+    ppc_altivec_vperm,                         // llvm.ppc.altivec.vperm
+    ppc_altivec_vpkpx,                         // llvm.ppc.altivec.vpkpx
+    ppc_altivec_vpksdss,                       // llvm.ppc.altivec.vpksdss
+    ppc_altivec_vpksdus,                       // llvm.ppc.altivec.vpksdus
+    ppc_altivec_vpkshss,                       // llvm.ppc.altivec.vpkshss
+    ppc_altivec_vpkshus,                       // llvm.ppc.altivec.vpkshus
+    ppc_altivec_vpkswss,                       // llvm.ppc.altivec.vpkswss
+    ppc_altivec_vpkswus,                       // llvm.ppc.altivec.vpkswus
+    ppc_altivec_vpkudus,                       // llvm.ppc.altivec.vpkudus
+    ppc_altivec_vpkuhus,                       // llvm.ppc.altivec.vpkuhus
+    ppc_altivec_vpkuwus,                       // llvm.ppc.altivec.vpkuwus
+    ppc_altivec_vprtybd,                       // llvm.ppc.altivec.vprtybd
+    ppc_altivec_vprtybq,                       // llvm.ppc.altivec.vprtybq
+    ppc_altivec_vprtybw,                       // llvm.ppc.altivec.vprtybw
+    ppc_altivec_vrefp,                         // llvm.ppc.altivec.vrefp
+    ppc_altivec_vrfim,                         // llvm.ppc.altivec.vrfim
+    ppc_altivec_vrfin,                         // llvm.ppc.altivec.vrfin
+    ppc_altivec_vrfip,                         // llvm.ppc.altivec.vrfip
+    ppc_altivec_vrfiz,                         // llvm.ppc.altivec.vrfiz
+    ppc_altivec_vrlb,                          // llvm.ppc.altivec.vrlb
+    ppc_altivec_vrld,                          // llvm.ppc.altivec.vrld
+    ppc_altivec_vrldmi,                        // llvm.ppc.altivec.vrldmi
+    ppc_altivec_vrldnm,                        // llvm.ppc.altivec.vrldnm
+    ppc_altivec_vrlh,                          // llvm.ppc.altivec.vrlh
+    ppc_altivec_vrlw,                          // llvm.ppc.altivec.vrlw
+    ppc_altivec_vrlwmi,                        // llvm.ppc.altivec.vrlwmi
+    ppc_altivec_vrlwnm,                        // llvm.ppc.altivec.vrlwnm
+    ppc_altivec_vrsqrtefp,                     // llvm.ppc.altivec.vrsqrtefp
+    ppc_altivec_vsel,                          // llvm.ppc.altivec.vsel
+    ppc_altivec_vsl,                           // llvm.ppc.altivec.vsl
+    ppc_altivec_vslb,                          // llvm.ppc.altivec.vslb
+    ppc_altivec_vslh,                          // llvm.ppc.altivec.vslh
+    ppc_altivec_vslo,                          // llvm.ppc.altivec.vslo
+    ppc_altivec_vslv,                          // llvm.ppc.altivec.vslv
+    ppc_altivec_vslw,                          // llvm.ppc.altivec.vslw
+    ppc_altivec_vsr,                           // llvm.ppc.altivec.vsr
+    ppc_altivec_vsrab,                         // llvm.ppc.altivec.vsrab
+    ppc_altivec_vsrah,                         // llvm.ppc.altivec.vsrah
+    ppc_altivec_vsraw,                         // llvm.ppc.altivec.vsraw
+    ppc_altivec_vsrb,                          // llvm.ppc.altivec.vsrb
+    ppc_altivec_vsrh,                          // llvm.ppc.altivec.vsrh
+    ppc_altivec_vsro,                          // llvm.ppc.altivec.vsro
+    ppc_altivec_vsrv,                          // llvm.ppc.altivec.vsrv
+    ppc_altivec_vsrw,                          // llvm.ppc.altivec.vsrw
+    ppc_altivec_vsubcuq,                       // llvm.ppc.altivec.vsubcuq
+    ppc_altivec_vsubcuw,                       // llvm.ppc.altivec.vsubcuw
+    ppc_altivec_vsubecuq,                      // llvm.ppc.altivec.vsubecuq
+    ppc_altivec_vsubeuqm,                      // llvm.ppc.altivec.vsubeuqm
+    ppc_altivec_vsubsbs,                       // llvm.ppc.altivec.vsubsbs
+    ppc_altivec_vsubshs,                       // llvm.ppc.altivec.vsubshs
+    ppc_altivec_vsubsws,                       // llvm.ppc.altivec.vsubsws
+    ppc_altivec_vsububs,                       // llvm.ppc.altivec.vsububs
+    ppc_altivec_vsubuhs,                       // llvm.ppc.altivec.vsubuhs
+    ppc_altivec_vsubuws,                       // llvm.ppc.altivec.vsubuws
+    ppc_altivec_vsum2sws,                      // llvm.ppc.altivec.vsum2sws
+    ppc_altivec_vsum4sbs,                      // llvm.ppc.altivec.vsum4sbs
+    ppc_altivec_vsum4shs,                      // llvm.ppc.altivec.vsum4shs
+    ppc_altivec_vsum4ubs,                      // llvm.ppc.altivec.vsum4ubs
+    ppc_altivec_vsumsws,                       // llvm.ppc.altivec.vsumsws
+    ppc_altivec_vupkhpx,                       // llvm.ppc.altivec.vupkhpx
+    ppc_altivec_vupkhsb,                       // llvm.ppc.altivec.vupkhsb
+    ppc_altivec_vupkhsh,                       // llvm.ppc.altivec.vupkhsh
+    ppc_altivec_vupkhsw,                       // llvm.ppc.altivec.vupkhsw
+    ppc_altivec_vupklpx,                       // llvm.ppc.altivec.vupklpx
+    ppc_altivec_vupklsb,                       // llvm.ppc.altivec.vupklsb
+    ppc_altivec_vupklsh,                       // llvm.ppc.altivec.vupklsh
+    ppc_altivec_vupklsw,                       // llvm.ppc.altivec.vupklsw
+    ppc_bpermd,                                // llvm.ppc.bpermd
+    ppc_cfence,                                // llvm.ppc.cfence
+    ppc_dcba,                                  // llvm.ppc.dcba
+    ppc_dcbf,                                  // llvm.ppc.dcbf
+    ppc_dcbi,                                  // llvm.ppc.dcbi
+    ppc_dcbst,                                 // llvm.ppc.dcbst
+    ppc_dcbt,                                  // llvm.ppc.dcbt
+    ppc_dcbtst,                                // llvm.ppc.dcbtst
+    ppc_dcbz,                                  // llvm.ppc.dcbz
+    ppc_dcbzl,                                 // llvm.ppc.dcbzl
+    ppc_divde,                                 // llvm.ppc.divde
+    ppc_divdeu,                                // llvm.ppc.divdeu
+    ppc_divwe,                                 // llvm.ppc.divwe
+    ppc_divweu,                                // llvm.ppc.divweu
+    ppc_get_texasr,                            // llvm.ppc.get.texasr
+    ppc_get_texasru,                           // llvm.ppc.get.texasru
+    ppc_get_tfhar,                             // llvm.ppc.get.tfhar
+    ppc_get_tfiar,                             // llvm.ppc.get.tfiar
+    ppc_is_decremented_ctr_nonzero,            // llvm.ppc.is.decremented.ctr.nonzero
+    ppc_lwsync,                                // llvm.ppc.lwsync
+    ppc_mtctr,                                 // llvm.ppc.mtctr
+    ppc_qpx_qvfabs,                            // llvm.ppc.qpx.qvfabs
+    ppc_qpx_qvfadd,                            // llvm.ppc.qpx.qvfadd
+    ppc_qpx_qvfadds,                           // llvm.ppc.qpx.qvfadds
+    ppc_qpx_qvfcfid,                           // llvm.ppc.qpx.qvfcfid
+    ppc_qpx_qvfcfids,                          // llvm.ppc.qpx.qvfcfids
+    ppc_qpx_qvfcfidu,                          // llvm.ppc.qpx.qvfcfidu
+    ppc_qpx_qvfcfidus,                         // llvm.ppc.qpx.qvfcfidus
+    ppc_qpx_qvfcmpeq,                          // llvm.ppc.qpx.qvfcmpeq
+    ppc_qpx_qvfcmpgt,                          // llvm.ppc.qpx.qvfcmpgt
+    ppc_qpx_qvfcmplt,                          // llvm.ppc.qpx.qvfcmplt
+    ppc_qpx_qvfcpsgn,                          // llvm.ppc.qpx.qvfcpsgn
+    ppc_qpx_qvfctid,                           // llvm.ppc.qpx.qvfctid
+    ppc_qpx_qvfctidu,                          // llvm.ppc.qpx.qvfctidu
+    ppc_qpx_qvfctiduz,                         // llvm.ppc.qpx.qvfctiduz
+    ppc_qpx_qvfctidz,                          // llvm.ppc.qpx.qvfctidz
+    ppc_qpx_qvfctiw,                           // llvm.ppc.qpx.qvfctiw
+    ppc_qpx_qvfctiwu,                          // llvm.ppc.qpx.qvfctiwu
+    ppc_qpx_qvfctiwuz,                         // llvm.ppc.qpx.qvfctiwuz
+    ppc_qpx_qvfctiwz,                          // llvm.ppc.qpx.qvfctiwz
+    ppc_qpx_qvflogical,                        // llvm.ppc.qpx.qvflogical
+    ppc_qpx_qvfmadd,                           // llvm.ppc.qpx.qvfmadd
+    ppc_qpx_qvfmadds,                          // llvm.ppc.qpx.qvfmadds
+    ppc_qpx_qvfmsub,                           // llvm.ppc.qpx.qvfmsub
+    ppc_qpx_qvfmsubs,                          // llvm.ppc.qpx.qvfmsubs
+    ppc_qpx_qvfmul,                            // llvm.ppc.qpx.qvfmul
+    ppc_qpx_qvfmuls,                           // llvm.ppc.qpx.qvfmuls
+    ppc_qpx_qvfnabs,                           // llvm.ppc.qpx.qvfnabs
+    ppc_qpx_qvfneg,                            // llvm.ppc.qpx.qvfneg
+    ppc_qpx_qvfnmadd,                          // llvm.ppc.qpx.qvfnmadd
+    ppc_qpx_qvfnmadds,                         // llvm.ppc.qpx.qvfnmadds
+    ppc_qpx_qvfnmsub,                          // llvm.ppc.qpx.qvfnmsub
+    ppc_qpx_qvfnmsubs,                         // llvm.ppc.qpx.qvfnmsubs
+    ppc_qpx_qvfperm,                           // llvm.ppc.qpx.qvfperm
+    ppc_qpx_qvfre,                             // llvm.ppc.qpx.qvfre
+    ppc_qpx_qvfres,                            // llvm.ppc.qpx.qvfres
+    ppc_qpx_qvfrim,                            // llvm.ppc.qpx.qvfrim
+    ppc_qpx_qvfrin,                            // llvm.ppc.qpx.qvfrin
+    ppc_qpx_qvfrip,                            // llvm.ppc.qpx.qvfrip
+    ppc_qpx_qvfriz,                            // llvm.ppc.qpx.qvfriz
+    ppc_qpx_qvfrsp,                            // llvm.ppc.qpx.qvfrsp
+    ppc_qpx_qvfrsqrte,                         // llvm.ppc.qpx.qvfrsqrte
+    ppc_qpx_qvfrsqrtes,                        // llvm.ppc.qpx.qvfrsqrtes
+    ppc_qpx_qvfsel,                            // llvm.ppc.qpx.qvfsel
+    ppc_qpx_qvfsub,                            // llvm.ppc.qpx.qvfsub
+    ppc_qpx_qvfsubs,                           // llvm.ppc.qpx.qvfsubs
+    ppc_qpx_qvftstnan,                         // llvm.ppc.qpx.qvftstnan
+    ppc_qpx_qvfxmadd,                          // llvm.ppc.qpx.qvfxmadd
+    ppc_qpx_qvfxmadds,                         // llvm.ppc.qpx.qvfxmadds
+    ppc_qpx_qvfxmul,                           // llvm.ppc.qpx.qvfxmul
+    ppc_qpx_qvfxmuls,                          // llvm.ppc.qpx.qvfxmuls
+    ppc_qpx_qvfxxcpnmadd,                      // llvm.ppc.qpx.qvfxxcpnmadd
+    ppc_qpx_qvfxxcpnmadds,                     // llvm.ppc.qpx.qvfxxcpnmadds
+    ppc_qpx_qvfxxmadd,                         // llvm.ppc.qpx.qvfxxmadd
+    ppc_qpx_qvfxxmadds,                        // llvm.ppc.qpx.qvfxxmadds
+    ppc_qpx_qvfxxnpmadd,                       // llvm.ppc.qpx.qvfxxnpmadd
+    ppc_qpx_qvfxxnpmadds,                      // llvm.ppc.qpx.qvfxxnpmadds
+    ppc_qpx_qvgpci,                            // llvm.ppc.qpx.qvgpci
+    ppc_qpx_qvlfcd,                            // llvm.ppc.qpx.qvlfcd
+    ppc_qpx_qvlfcda,                           // llvm.ppc.qpx.qvlfcda
+    ppc_qpx_qvlfcs,                            // llvm.ppc.qpx.qvlfcs
+    ppc_qpx_qvlfcsa,                           // llvm.ppc.qpx.qvlfcsa
+    ppc_qpx_qvlfd,                             // llvm.ppc.qpx.qvlfd
+    ppc_qpx_qvlfda,                            // llvm.ppc.qpx.qvlfda
+    ppc_qpx_qvlfiwa,                           // llvm.ppc.qpx.qvlfiwa
+    ppc_qpx_qvlfiwaa,                          // llvm.ppc.qpx.qvlfiwaa
+    ppc_qpx_qvlfiwz,                           // llvm.ppc.qpx.qvlfiwz
+    ppc_qpx_qvlfiwza,                          // llvm.ppc.qpx.qvlfiwza
+    ppc_qpx_qvlfs,                             // llvm.ppc.qpx.qvlfs
+    ppc_qpx_qvlfsa,                            // llvm.ppc.qpx.qvlfsa
+    ppc_qpx_qvlpcld,                           // llvm.ppc.qpx.qvlpcld
+    ppc_qpx_qvlpcls,                           // llvm.ppc.qpx.qvlpcls
+    ppc_qpx_qvlpcrd,                           // llvm.ppc.qpx.qvlpcrd
+    ppc_qpx_qvlpcrs,                           // llvm.ppc.qpx.qvlpcrs
+    ppc_qpx_qvstfcd,                           // llvm.ppc.qpx.qvstfcd
+    ppc_qpx_qvstfcda,                          // llvm.ppc.qpx.qvstfcda
+    ppc_qpx_qvstfcs,                           // llvm.ppc.qpx.qvstfcs
+    ppc_qpx_qvstfcsa,                          // llvm.ppc.qpx.qvstfcsa
+    ppc_qpx_qvstfd,                            // llvm.ppc.qpx.qvstfd
+    ppc_qpx_qvstfda,                           // llvm.ppc.qpx.qvstfda
+    ppc_qpx_qvstfiw,                           // llvm.ppc.qpx.qvstfiw
+    ppc_qpx_qvstfiwa,                          // llvm.ppc.qpx.qvstfiwa
+    ppc_qpx_qvstfs,                            // llvm.ppc.qpx.qvstfs
+    ppc_qpx_qvstfsa,                           // llvm.ppc.qpx.qvstfsa
+    ppc_set_texasr,                            // llvm.ppc.set.texasr
+    ppc_set_texasru,                           // llvm.ppc.set.texasru
+    ppc_set_tfhar,                             // llvm.ppc.set.tfhar
+    ppc_set_tfiar,                             // llvm.ppc.set.tfiar
+    ppc_sync,                                  // llvm.ppc.sync
+    ppc_tabort,                                // llvm.ppc.tabort
+    ppc_tabortdc,                              // llvm.ppc.tabortdc
+    ppc_tabortdci,                             // llvm.ppc.tabortdci
+    ppc_tabortwc,                              // llvm.ppc.tabortwc
+    ppc_tabortwci,                             // llvm.ppc.tabortwci
+    ppc_tbegin,                                // llvm.ppc.tbegin
+    ppc_tcheck,                                // llvm.ppc.tcheck
+    ppc_tend,                                  // llvm.ppc.tend
+    ppc_tendall,                               // llvm.ppc.tendall
+    ppc_trechkpt,                              // llvm.ppc.trechkpt
+    ppc_treclaim,                              // llvm.ppc.treclaim
+    ppc_tresume,                               // llvm.ppc.tresume
+    ppc_tsr,                                   // llvm.ppc.tsr
+    ppc_tsuspend,                              // llvm.ppc.tsuspend
+    ppc_ttest,                                 // llvm.ppc.ttest
+    ppc_vsx_lxvd2x,                            // llvm.ppc.vsx.lxvd2x
+    ppc_vsx_lxvd2x_be,                         // llvm.ppc.vsx.lxvd2x.be
+    ppc_vsx_lxvl,                              // llvm.ppc.vsx.lxvl
+    ppc_vsx_lxvll,                             // llvm.ppc.vsx.lxvll
+    ppc_vsx_lxvw4x,                            // llvm.ppc.vsx.lxvw4x
+    ppc_vsx_lxvw4x_be,                         // llvm.ppc.vsx.lxvw4x.be
+    ppc_vsx_stxvd2x,                           // llvm.ppc.vsx.stxvd2x
+    ppc_vsx_stxvd2x_be,                        // llvm.ppc.vsx.stxvd2x.be
+    ppc_vsx_stxvl,                             // llvm.ppc.vsx.stxvl
+    ppc_vsx_stxvll,                            // llvm.ppc.vsx.stxvll
+    ppc_vsx_stxvw4x,                           // llvm.ppc.vsx.stxvw4x
+    ppc_vsx_stxvw4x_be,                        // llvm.ppc.vsx.stxvw4x.be
+    ppc_vsx_xsmaxdp,                           // llvm.ppc.vsx.xsmaxdp
+    ppc_vsx_xsmindp,                           // llvm.ppc.vsx.xsmindp
+    ppc_vsx_xvcmpeqdp,                         // llvm.ppc.vsx.xvcmpeqdp
+    ppc_vsx_xvcmpeqdp_p,                       // llvm.ppc.vsx.xvcmpeqdp.p
+    ppc_vsx_xvcmpeqsp,                         // llvm.ppc.vsx.xvcmpeqsp
+    ppc_vsx_xvcmpeqsp_p,                       // llvm.ppc.vsx.xvcmpeqsp.p
+    ppc_vsx_xvcmpgedp,                         // llvm.ppc.vsx.xvcmpgedp
+    ppc_vsx_xvcmpgedp_p,                       // llvm.ppc.vsx.xvcmpgedp.p
+    ppc_vsx_xvcmpgesp,                         // llvm.ppc.vsx.xvcmpgesp
+    ppc_vsx_xvcmpgesp_p,                       // llvm.ppc.vsx.xvcmpgesp.p
+    ppc_vsx_xvcmpgtdp,                         // llvm.ppc.vsx.xvcmpgtdp
+    ppc_vsx_xvcmpgtdp_p,                       // llvm.ppc.vsx.xvcmpgtdp.p
+    ppc_vsx_xvcmpgtsp,                         // llvm.ppc.vsx.xvcmpgtsp
+    ppc_vsx_xvcmpgtsp_p,                       // llvm.ppc.vsx.xvcmpgtsp.p
+    ppc_vsx_xvcvdpsp,                          // llvm.ppc.vsx.xvcvdpsp
+    ppc_vsx_xvcvdpsxws,                        // llvm.ppc.vsx.xvcvdpsxws
+    ppc_vsx_xvcvdpuxws,                        // llvm.ppc.vsx.xvcvdpuxws
+    ppc_vsx_xvcvhpsp,                          // llvm.ppc.vsx.xvcvhpsp
+    ppc_vsx_xvcvspdp,                          // llvm.ppc.vsx.xvcvspdp
+    ppc_vsx_xvcvsphp,                          // llvm.ppc.vsx.xvcvsphp
+    ppc_vsx_xvcvsxdsp,                         // llvm.ppc.vsx.xvcvsxdsp
+    ppc_vsx_xvcvsxwdp,                         // llvm.ppc.vsx.xvcvsxwdp
+    ppc_vsx_xvcvuxdsp,                         // llvm.ppc.vsx.xvcvuxdsp
+    ppc_vsx_xvcvuxwdp,                         // llvm.ppc.vsx.xvcvuxwdp
+    ppc_vsx_xvdivdp,                           // llvm.ppc.vsx.xvdivdp
+    ppc_vsx_xvdivsp,                           // llvm.ppc.vsx.xvdivsp
+    ppc_vsx_xviexpdp,                          // llvm.ppc.vsx.xviexpdp
+    ppc_vsx_xviexpsp,                          // llvm.ppc.vsx.xviexpsp
+    ppc_vsx_xvmaxdp,                           // llvm.ppc.vsx.xvmaxdp
+    ppc_vsx_xvmaxsp,                           // llvm.ppc.vsx.xvmaxsp
+    ppc_vsx_xvmindp,                           // llvm.ppc.vsx.xvmindp
+    ppc_vsx_xvminsp,                           // llvm.ppc.vsx.xvminsp
+    ppc_vsx_xvrdpip,                           // llvm.ppc.vsx.xvrdpip
+    ppc_vsx_xvredp,                            // llvm.ppc.vsx.xvredp
+    ppc_vsx_xvresp,                            // llvm.ppc.vsx.xvresp
+    ppc_vsx_xvrspip,                           // llvm.ppc.vsx.xvrspip
+    ppc_vsx_xvrsqrtedp,                        // llvm.ppc.vsx.xvrsqrtedp
+    ppc_vsx_xvrsqrtesp,                        // llvm.ppc.vsx.xvrsqrtesp
+    ppc_vsx_xvtstdcdp,                         // llvm.ppc.vsx.xvtstdcdp
+    ppc_vsx_xvtstdcsp,                         // llvm.ppc.vsx.xvtstdcsp
+    ppc_vsx_xvxexpdp,                          // llvm.ppc.vsx.xvxexpdp
+    ppc_vsx_xvxexpsp,                          // llvm.ppc.vsx.xvxexpsp
+    ppc_vsx_xvxsigdp,                          // llvm.ppc.vsx.xvxsigdp
+    ppc_vsx_xvxsigsp,                          // llvm.ppc.vsx.xvxsigsp
+    ppc_vsx_xxextractuw,                       // llvm.ppc.vsx.xxextractuw
+    ppc_vsx_xxinsertw,                         // llvm.ppc.vsx.xxinsertw
+    ppc_vsx_xxleqv,                            // llvm.ppc.vsx.xxleqv
+    r600_cube,                                 // llvm.r600.cube
+    r600_group_barrier,                        // llvm.r600.group.barrier
+    r600_implicitarg_ptr,                      // llvm.r600.implicitarg.ptr
+    r600_rat_store_typed,                      // llvm.r600.rat.store.typed
+    r600_read_global_size_x,                   // llvm.r600.read.global.size.x
+    r600_read_global_size_y,                   // llvm.r600.read.global.size.y
+    r600_read_global_size_z,                   // llvm.r600.read.global.size.z
+    r600_read_local_size_x,                    // llvm.r600.read.local.size.x
+    r600_read_local_size_y,                    // llvm.r600.read.local.size.y
+    r600_read_local_size_z,                    // llvm.r600.read.local.size.z
+    r600_read_ngroups_x,                       // llvm.r600.read.ngroups.x
+    r600_read_ngroups_y,                       // llvm.r600.read.ngroups.y
+    r600_read_ngroups_z,                       // llvm.r600.read.ngroups.z
+    r600_read_tgid_x,                          // llvm.r600.read.tgid.x
+    r600_read_tgid_y,                          // llvm.r600.read.tgid.y
+    r600_read_tgid_z,                          // llvm.r600.read.tgid.z
+    r600_read_tidig_x,                         // llvm.r600.read.tidig.x
+    r600_read_tidig_y,                         // llvm.r600.read.tidig.y
+    r600_read_tidig_z,                         // llvm.r600.read.tidig.z
+    r600_recipsqrt_clamped,                    // llvm.r600.recipsqrt.clamped
+    r600_recipsqrt_ieee,                       // llvm.r600.recipsqrt.ieee
+    s390_efpc,                                 // llvm.s390.efpc
+    s390_etnd,                                 // llvm.s390.etnd
+    s390_lcbb,                                 // llvm.s390.lcbb
+    s390_ntstg,                                // llvm.s390.ntstg
+    s390_ppa_txassist,                         // llvm.s390.ppa.txassist
+    s390_sfpc,                                 // llvm.s390.sfpc
+    s390_tabort,                               // llvm.s390.tabort
+    s390_tbegin,                               // llvm.s390.tbegin
+    s390_tbegin_nofloat,                       // llvm.s390.tbegin.nofloat
+    s390_tbeginc,                              // llvm.s390.tbeginc
+    s390_tdc,                                  // llvm.s390.tdc
+    s390_tend,                                 // llvm.s390.tend
+    s390_vaccb,                                // llvm.s390.vaccb
+    s390_vacccq,                               // llvm.s390.vacccq
+    s390_vaccf,                                // llvm.s390.vaccf
+    s390_vaccg,                                // llvm.s390.vaccg
+    s390_vacch,                                // llvm.s390.vacch
+    s390_vaccq,                                // llvm.s390.vaccq
+    s390_vacq,                                 // llvm.s390.vacq
+    s390_vaq,                                  // llvm.s390.vaq
+    s390_vavgb,                                // llvm.s390.vavgb
+    s390_vavgf,                                // llvm.s390.vavgf
+    s390_vavgg,                                // llvm.s390.vavgg
+    s390_vavgh,                                // llvm.s390.vavgh
+    s390_vavglb,                               // llvm.s390.vavglb
+    s390_vavglf,                               // llvm.s390.vavglf
+    s390_vavglg,                               // llvm.s390.vavglg
+    s390_vavglh,                               // llvm.s390.vavglh
+    s390_vbperm,                               // llvm.s390.vbperm
+    s390_vceqbs,                               // llvm.s390.vceqbs
+    s390_vceqfs,                               // llvm.s390.vceqfs
+    s390_vceqgs,                               // llvm.s390.vceqgs
+    s390_vceqhs,                               // llvm.s390.vceqhs
+    s390_vchbs,                                // llvm.s390.vchbs
+    s390_vchfs,                                // llvm.s390.vchfs
+    s390_vchgs,                                // llvm.s390.vchgs
+    s390_vchhs,                                // llvm.s390.vchhs
+    s390_vchlbs,                               // llvm.s390.vchlbs
+    s390_vchlfs,                               // llvm.s390.vchlfs
+    s390_vchlgs,                               // llvm.s390.vchlgs
+    s390_vchlhs,                               // llvm.s390.vchlhs
+    s390_vcksm,                                // llvm.s390.vcksm
+    s390_verimb,                               // llvm.s390.verimb
+    s390_verimf,                               // llvm.s390.verimf
+    s390_verimg,                               // llvm.s390.verimg
+    s390_verimh,                               // llvm.s390.verimh
+    s390_verllb,                               // llvm.s390.verllb
+    s390_verllf,                               // llvm.s390.verllf
+    s390_verllg,                               // llvm.s390.verllg
+    s390_verllh,                               // llvm.s390.verllh
+    s390_verllvb,                              // llvm.s390.verllvb
+    s390_verllvf,                              // llvm.s390.verllvf
+    s390_verllvg,                              // llvm.s390.verllvg
+    s390_verllvh,                              // llvm.s390.verllvh
+    s390_vfaeb,                                // llvm.s390.vfaeb
+    s390_vfaebs,                               // llvm.s390.vfaebs
+    s390_vfaef,                                // llvm.s390.vfaef
+    s390_vfaefs,                               // llvm.s390.vfaefs
+    s390_vfaeh,                                // llvm.s390.vfaeh
+    s390_vfaehs,                               // llvm.s390.vfaehs
+    s390_vfaezb,                               // llvm.s390.vfaezb
+    s390_vfaezbs,                              // llvm.s390.vfaezbs
+    s390_vfaezf,                               // llvm.s390.vfaezf
+    s390_vfaezfs,                              // llvm.s390.vfaezfs
+    s390_vfaezh,                               // llvm.s390.vfaezh
+    s390_vfaezhs,                              // llvm.s390.vfaezhs
+    s390_vfcedbs,                              // llvm.s390.vfcedbs
+    s390_vfcesbs,                              // llvm.s390.vfcesbs
+    s390_vfchdbs,                              // llvm.s390.vfchdbs
+    s390_vfchedbs,                             // llvm.s390.vfchedbs
+    s390_vfchesbs,                             // llvm.s390.vfchesbs
+    s390_vfchsbs,                              // llvm.s390.vfchsbs
+    s390_vfeeb,                                // llvm.s390.vfeeb
+    s390_vfeebs,                               // llvm.s390.vfeebs
+    s390_vfeef,                                // llvm.s390.vfeef
+    s390_vfeefs,                               // llvm.s390.vfeefs
+    s390_vfeeh,                                // llvm.s390.vfeeh
+    s390_vfeehs,                               // llvm.s390.vfeehs
+    s390_vfeezb,                               // llvm.s390.vfeezb
+    s390_vfeezbs,                              // llvm.s390.vfeezbs
+    s390_vfeezf,                               // llvm.s390.vfeezf
+    s390_vfeezfs,                              // llvm.s390.vfeezfs
+    s390_vfeezh,                               // llvm.s390.vfeezh
+    s390_vfeezhs,                              // llvm.s390.vfeezhs
+    s390_vfeneb,                               // llvm.s390.vfeneb
+    s390_vfenebs,                              // llvm.s390.vfenebs
+    s390_vfenef,                               // llvm.s390.vfenef
+    s390_vfenefs,                              // llvm.s390.vfenefs
+    s390_vfeneh,                               // llvm.s390.vfeneh
+    s390_vfenehs,                              // llvm.s390.vfenehs
+    s390_vfenezb,                              // llvm.s390.vfenezb
+    s390_vfenezbs,                             // llvm.s390.vfenezbs
+    s390_vfenezf,                              // llvm.s390.vfenezf
+    s390_vfenezfs,                             // llvm.s390.vfenezfs
+    s390_vfenezh,                              // llvm.s390.vfenezh
+    s390_vfenezhs,                             // llvm.s390.vfenezhs
+    s390_vfidb,                                // llvm.s390.vfidb
+    s390_vfisb,                                // llvm.s390.vfisb
+    s390_vfmaxdb,                              // llvm.s390.vfmaxdb
+    s390_vfmaxsb,                              // llvm.s390.vfmaxsb
+    s390_vfmindb,                              // llvm.s390.vfmindb
+    s390_vfminsb,                              // llvm.s390.vfminsb
+    s390_vftcidb,                              // llvm.s390.vftcidb
+    s390_vftcisb,                              // llvm.s390.vftcisb
+    s390_vgfmab,                               // llvm.s390.vgfmab
+    s390_vgfmaf,                               // llvm.s390.vgfmaf
+    s390_vgfmag,                               // llvm.s390.vgfmag
+    s390_vgfmah,                               // llvm.s390.vgfmah
+    s390_vgfmb,                                // llvm.s390.vgfmb
+    s390_vgfmf,                                // llvm.s390.vgfmf
+    s390_vgfmg,                                // llvm.s390.vgfmg
+    s390_vgfmh,                                // llvm.s390.vgfmh
+    s390_vistrb,                               // llvm.s390.vistrb
+    s390_vistrbs,                              // llvm.s390.vistrbs
+    s390_vistrf,                               // llvm.s390.vistrf
+    s390_vistrfs,                              // llvm.s390.vistrfs
+    s390_vistrh,                               // llvm.s390.vistrh
+    s390_vistrhs,                              // llvm.s390.vistrhs
+    s390_vlbb,                                 // llvm.s390.vlbb
+    s390_vll,                                  // llvm.s390.vll
+    s390_vlrl,                                 // llvm.s390.vlrl
+    s390_vmaeb,                                // llvm.s390.vmaeb
+    s390_vmaef,                                // llvm.s390.vmaef
+    s390_vmaeh,                                // llvm.s390.vmaeh
+    s390_vmahb,                                // llvm.s390.vmahb
+    s390_vmahf,                                // llvm.s390.vmahf
+    s390_vmahh,                                // llvm.s390.vmahh
+    s390_vmaleb,                               // llvm.s390.vmaleb
+    s390_vmalef,                               // llvm.s390.vmalef
+    s390_vmaleh,                               // llvm.s390.vmaleh
+    s390_vmalhb,                               // llvm.s390.vmalhb
+    s390_vmalhf,                               // llvm.s390.vmalhf
+    s390_vmalhh,                               // llvm.s390.vmalhh
+    s390_vmalob,                               // llvm.s390.vmalob
+    s390_vmalof,                               // llvm.s390.vmalof
+    s390_vmaloh,                               // llvm.s390.vmaloh
+    s390_vmaob,                                // llvm.s390.vmaob
+    s390_vmaof,                                // llvm.s390.vmaof
+    s390_vmaoh,                                // llvm.s390.vmaoh
+    s390_vmeb,                                 // llvm.s390.vmeb
+    s390_vmef,                                 // llvm.s390.vmef
+    s390_vmeh,                                 // llvm.s390.vmeh
+    s390_vmhb,                                 // llvm.s390.vmhb
+    s390_vmhf,                                 // llvm.s390.vmhf
+    s390_vmhh,                                 // llvm.s390.vmhh
+    s390_vmleb,                                // llvm.s390.vmleb
+    s390_vmlef,                                // llvm.s390.vmlef
+    s390_vmleh,                                // llvm.s390.vmleh
+    s390_vmlhb,                                // llvm.s390.vmlhb
+    s390_vmlhf,                                // llvm.s390.vmlhf
+    s390_vmlhh,                                // llvm.s390.vmlhh
+    s390_vmlob,                                // llvm.s390.vmlob
+    s390_vmlof,                                // llvm.s390.vmlof
+    s390_vmloh,                                // llvm.s390.vmloh
+    s390_vmob,                                 // llvm.s390.vmob
+    s390_vmof,                                 // llvm.s390.vmof
+    s390_vmoh,                                 // llvm.s390.vmoh
+    s390_vmslg,                                // llvm.s390.vmslg
+    s390_vpdi,                                 // llvm.s390.vpdi
+    s390_vperm,                                // llvm.s390.vperm
+    s390_vpklsf,                               // llvm.s390.vpklsf
+    s390_vpklsfs,                              // llvm.s390.vpklsfs
+    s390_vpklsg,                               // llvm.s390.vpklsg
+    s390_vpklsgs,                              // llvm.s390.vpklsgs
+    s390_vpklsh,                               // llvm.s390.vpklsh
+    s390_vpklshs,                              // llvm.s390.vpklshs
+    s390_vpksf,                                // llvm.s390.vpksf
+    s390_vpksfs,                               // llvm.s390.vpksfs
+    s390_vpksg,                                // llvm.s390.vpksg
+    s390_vpksgs,                               // llvm.s390.vpksgs
+    s390_vpksh,                                // llvm.s390.vpksh
+    s390_vpkshs,                               // llvm.s390.vpkshs
+    s390_vsbcbiq,                              // llvm.s390.vsbcbiq
+    s390_vsbiq,                                // llvm.s390.vsbiq
+    s390_vscbib,                               // llvm.s390.vscbib
+    s390_vscbif,                               // llvm.s390.vscbif
+    s390_vscbig,                               // llvm.s390.vscbig
+    s390_vscbih,                               // llvm.s390.vscbih
+    s390_vscbiq,                               // llvm.s390.vscbiq
+    s390_vsl,                                  // llvm.s390.vsl
+    s390_vslb,                                 // llvm.s390.vslb
+    s390_vsldb,                                // llvm.s390.vsldb
+    s390_vsq,                                  // llvm.s390.vsq
+    s390_vsra,                                 // llvm.s390.vsra
+    s390_vsrab,                                // llvm.s390.vsrab
+    s390_vsrl,                                 // llvm.s390.vsrl
+    s390_vsrlb,                                // llvm.s390.vsrlb
+    s390_vstl,                                 // llvm.s390.vstl
+    s390_vstrcb,                               // llvm.s390.vstrcb
+    s390_vstrcbs,                              // llvm.s390.vstrcbs
+    s390_vstrcf,                               // llvm.s390.vstrcf
+    s390_vstrcfs,                              // llvm.s390.vstrcfs
+    s390_vstrch,                               // llvm.s390.vstrch
+    s390_vstrchs,                              // llvm.s390.vstrchs
+    s390_vstrczb,                              // llvm.s390.vstrczb
+    s390_vstrczbs,                             // llvm.s390.vstrczbs
+    s390_vstrczf,                              // llvm.s390.vstrczf
+    s390_vstrczfs,                             // llvm.s390.vstrczfs
+    s390_vstrczh,                              // llvm.s390.vstrczh
+    s390_vstrczhs,                             // llvm.s390.vstrczhs
+    s390_vstrl,                                // llvm.s390.vstrl
+    s390_vsumb,                                // llvm.s390.vsumb
+    s390_vsumgf,                               // llvm.s390.vsumgf
+    s390_vsumgh,                               // llvm.s390.vsumgh
+    s390_vsumh,                                // llvm.s390.vsumh
+    s390_vsumqf,                               // llvm.s390.vsumqf
+    s390_vsumqg,                               // llvm.s390.vsumqg
+    s390_vtm,                                  // llvm.s390.vtm
+    s390_vuphb,                                // llvm.s390.vuphb
+    s390_vuphf,                                // llvm.s390.vuphf
+    s390_vuphh,                                // llvm.s390.vuphh
+    s390_vuplb,                                // llvm.s390.vuplb
+    s390_vuplf,                                // llvm.s390.vuplf
+    s390_vuplhb,                               // llvm.s390.vuplhb
+    s390_vuplhf,                               // llvm.s390.vuplhf
+    s390_vuplhh,                               // llvm.s390.vuplhh
+    s390_vuplhw,                               // llvm.s390.vuplhw
+    s390_vupllb,                               // llvm.s390.vupllb
+    s390_vupllf,                               // llvm.s390.vupllf
+    s390_vupllh,                               // llvm.s390.vupllh
+    wasm_current_memory,                       // llvm.wasm.current.memory
+    wasm_get_ehselector,                       // llvm.wasm.get.ehselector
+    wasm_get_exception,                        // llvm.wasm.get.exception
+    wasm_grow_memory,                          // llvm.wasm.grow.memory
+    wasm_mem_grow,                             // llvm.wasm.mem.grow
+    wasm_mem_size,                             // llvm.wasm.mem.size
+    wasm_rethrow,                              // llvm.wasm.rethrow
+    wasm_throw,                                // llvm.wasm.throw
+    x86_3dnow_pavgusb,                         // llvm.x86.3dnow.pavgusb
+    x86_3dnow_pf2id,                           // llvm.x86.3dnow.pf2id
+    x86_3dnow_pfacc,                           // llvm.x86.3dnow.pfacc
+    x86_3dnow_pfadd,                           // llvm.x86.3dnow.pfadd
+    x86_3dnow_pfcmpeq,                         // llvm.x86.3dnow.pfcmpeq
+    x86_3dnow_pfcmpge,                         // llvm.x86.3dnow.pfcmpge
+    x86_3dnow_pfcmpgt,                         // llvm.x86.3dnow.pfcmpgt
+    x86_3dnow_pfmax,                           // llvm.x86.3dnow.pfmax
+    x86_3dnow_pfmin,                           // llvm.x86.3dnow.pfmin
+    x86_3dnow_pfmul,                           // llvm.x86.3dnow.pfmul
+    x86_3dnow_pfrcp,                           // llvm.x86.3dnow.pfrcp
+    x86_3dnow_pfrcpit1,                        // llvm.x86.3dnow.pfrcpit1
+    x86_3dnow_pfrcpit2,                        // llvm.x86.3dnow.pfrcpit2
+    x86_3dnow_pfrsqit1,                        // llvm.x86.3dnow.pfrsqit1
+    x86_3dnow_pfrsqrt,                         // llvm.x86.3dnow.pfrsqrt
+    x86_3dnow_pfsub,                           // llvm.x86.3dnow.pfsub
+    x86_3dnow_pfsubr,                          // llvm.x86.3dnow.pfsubr
+    x86_3dnow_pi2fd,                           // llvm.x86.3dnow.pi2fd
+    x86_3dnow_pmulhrw,                         // llvm.x86.3dnow.pmulhrw
+    x86_3dnowa_pf2iw,                          // llvm.x86.3dnowa.pf2iw
+    x86_3dnowa_pfnacc,                         // llvm.x86.3dnowa.pfnacc
+    x86_3dnowa_pfpnacc,                        // llvm.x86.3dnowa.pfpnacc
+    x86_3dnowa_pi2fw,                          // llvm.x86.3dnowa.pi2fw
+    x86_3dnowa_pswapd,                         // llvm.x86.3dnowa.pswapd
+    x86_addcarry_u32,                          // llvm.x86.addcarry.u32
+    x86_addcarry_u64,                          // llvm.x86.addcarry.u64
+    x86_addcarryx_u32,                         // llvm.x86.addcarryx.u32
+    x86_addcarryx_u64,                         // llvm.x86.addcarryx.u64
+    x86_aesni_aesdec,                          // llvm.x86.aesni.aesdec
+    x86_aesni_aesdec_256,                      // llvm.x86.aesni.aesdec.256
+    x86_aesni_aesdec_512,                      // llvm.x86.aesni.aesdec.512
+    x86_aesni_aesdeclast,                      // llvm.x86.aesni.aesdeclast
+    x86_aesni_aesdeclast_256,                  // llvm.x86.aesni.aesdeclast.256
+    x86_aesni_aesdeclast_512,                  // llvm.x86.aesni.aesdeclast.512
+    x86_aesni_aesenc,                          // llvm.x86.aesni.aesenc
+    x86_aesni_aesenc_256,                      // llvm.x86.aesni.aesenc.256
+    x86_aesni_aesenc_512,                      // llvm.x86.aesni.aesenc.512
+    x86_aesni_aesenclast,                      // llvm.x86.aesni.aesenclast
+    x86_aesni_aesenclast_256,                  // llvm.x86.aesni.aesenclast.256
+    x86_aesni_aesenclast_512,                  // llvm.x86.aesni.aesenclast.512
+    x86_aesni_aesimc,                          // llvm.x86.aesni.aesimc
+    x86_aesni_aeskeygenassist,                 // llvm.x86.aesni.aeskeygenassist
+    x86_avx_addsub_pd_256,                     // llvm.x86.avx.addsub.pd.256
+    x86_avx_addsub_ps_256,                     // llvm.x86.avx.addsub.ps.256
+    x86_avx_blendv_pd_256,                     // llvm.x86.avx.blendv.pd.256
+    x86_avx_blendv_ps_256,                     // llvm.x86.avx.blendv.ps.256
+    x86_avx_cmp_pd_256,                        // llvm.x86.avx.cmp.pd.256
+    x86_avx_cmp_ps_256,                        // llvm.x86.avx.cmp.ps.256
+    x86_avx_cvt_pd2_ps_256,                    // llvm.x86.avx.cvt.pd2.ps.256
+    x86_avx_cvt_pd2dq_256,                     // llvm.x86.avx.cvt.pd2dq.256
+    x86_avx_cvt_ps2dq_256,                     // llvm.x86.avx.cvt.ps2dq.256
+    x86_avx_cvtdq2_ps_256,                     // llvm.x86.avx.cvtdq2.ps.256
+    x86_avx_cvtt_pd2dq_256,                    // llvm.x86.avx.cvtt.pd2dq.256
+    x86_avx_cvtt_ps2dq_256,                    // llvm.x86.avx.cvtt.ps2dq.256
+    x86_avx_dp_ps_256,                         // llvm.x86.avx.dp.ps.256
+    x86_avx_hadd_pd_256,                       // llvm.x86.avx.hadd.pd.256
+    x86_avx_hadd_ps_256,                       // llvm.x86.avx.hadd.ps.256
+    x86_avx_hsub_pd_256,                       // llvm.x86.avx.hsub.pd.256
+    x86_avx_hsub_ps_256,                       // llvm.x86.avx.hsub.ps.256
+    x86_avx_ldu_dq_256,                        // llvm.x86.avx.ldu.dq.256
+    x86_avx_maskload_pd,                       // llvm.x86.avx.maskload.pd
+    x86_avx_maskload_pd_256,                   // llvm.x86.avx.maskload.pd.256
+    x86_avx_maskload_ps,                       // llvm.x86.avx.maskload.ps
+    x86_avx_maskload_ps_256,                   // llvm.x86.avx.maskload.ps.256
+    x86_avx_maskstore_pd,                      // llvm.x86.avx.maskstore.pd
+    x86_avx_maskstore_pd_256,                  // llvm.x86.avx.maskstore.pd.256
+    x86_avx_maskstore_ps,                      // llvm.x86.avx.maskstore.ps
+    x86_avx_maskstore_ps_256,                  // llvm.x86.avx.maskstore.ps.256
+    x86_avx_max_pd_256,                        // llvm.x86.avx.max.pd.256
+    x86_avx_max_ps_256,                        // llvm.x86.avx.max.ps.256
+    x86_avx_min_pd_256,                        // llvm.x86.avx.min.pd.256
+    x86_avx_min_ps_256,                        // llvm.x86.avx.min.ps.256
+    x86_avx_movmsk_pd_256,                     // llvm.x86.avx.movmsk.pd.256
+    x86_avx_movmsk_ps_256,                     // llvm.x86.avx.movmsk.ps.256
+    x86_avx_ptestc_256,                        // llvm.x86.avx.ptestc.256
+    x86_avx_ptestnzc_256,                      // llvm.x86.avx.ptestnzc.256
+    x86_avx_ptestz_256,                        // llvm.x86.avx.ptestz.256
+    x86_avx_rcp_ps_256,                        // llvm.x86.avx.rcp.ps.256
+    x86_avx_round_pd_256,                      // llvm.x86.avx.round.pd.256
+    x86_avx_round_ps_256,                      // llvm.x86.avx.round.ps.256
+    x86_avx_rsqrt_ps_256,                      // llvm.x86.avx.rsqrt.ps.256
+    x86_avx_sqrt_pd_256,                       // llvm.x86.avx.sqrt.pd.256
+    x86_avx_sqrt_ps_256,                       // llvm.x86.avx.sqrt.ps.256
+    x86_avx_vpermilvar_pd,                     // llvm.x86.avx.vpermilvar.pd
+    x86_avx_vpermilvar_pd_256,                 // llvm.x86.avx.vpermilvar.pd.256
+    x86_avx_vpermilvar_ps,                     // llvm.x86.avx.vpermilvar.ps
+    x86_avx_vpermilvar_ps_256,                 // llvm.x86.avx.vpermilvar.ps.256
+    x86_avx_vtestc_pd,                         // llvm.x86.avx.vtestc.pd
+    x86_avx_vtestc_pd_256,                     // llvm.x86.avx.vtestc.pd.256
+    x86_avx_vtestc_ps,                         // llvm.x86.avx.vtestc.ps
+    x86_avx_vtestc_ps_256,                     // llvm.x86.avx.vtestc.ps.256
+    x86_avx_vtestnzc_pd,                       // llvm.x86.avx.vtestnzc.pd
+    x86_avx_vtestnzc_pd_256,                   // llvm.x86.avx.vtestnzc.pd.256
+    x86_avx_vtestnzc_ps,                       // llvm.x86.avx.vtestnzc.ps
+    x86_avx_vtestnzc_ps_256,                   // llvm.x86.avx.vtestnzc.ps.256
+    x86_avx_vtestz_pd,                         // llvm.x86.avx.vtestz.pd
+    x86_avx_vtestz_pd_256,                     // llvm.x86.avx.vtestz.pd.256
+    x86_avx_vtestz_ps,                         // llvm.x86.avx.vtestz.ps
+    x86_avx_vtestz_ps_256,                     // llvm.x86.avx.vtestz.ps.256
+    x86_avx_vzeroall,                          // llvm.x86.avx.vzeroall
+    x86_avx_vzeroupper,                        // llvm.x86.avx.vzeroupper
+    x86_avx2_gather_d_d,                       // llvm.x86.avx2.gather.d.d
+    x86_avx2_gather_d_d_256,                   // llvm.x86.avx2.gather.d.d.256
+    x86_avx2_gather_d_pd,                      // llvm.x86.avx2.gather.d.pd
+    x86_avx2_gather_d_pd_256,                  // llvm.x86.avx2.gather.d.pd.256
+    x86_avx2_gather_d_ps,                      // llvm.x86.avx2.gather.d.ps
+    x86_avx2_gather_d_ps_256,                  // llvm.x86.avx2.gather.d.ps.256
+    x86_avx2_gather_d_q,                       // llvm.x86.avx2.gather.d.q
+    x86_avx2_gather_d_q_256,                   // llvm.x86.avx2.gather.d.q.256
+    x86_avx2_gather_q_d,                       // llvm.x86.avx2.gather.q.d
+    x86_avx2_gather_q_d_256,                   // llvm.x86.avx2.gather.q.d.256
+    x86_avx2_gather_q_pd,                      // llvm.x86.avx2.gather.q.pd
+    x86_avx2_gather_q_pd_256,                  // llvm.x86.avx2.gather.q.pd.256
+    x86_avx2_gather_q_ps,                      // llvm.x86.avx2.gather.q.ps
+    x86_avx2_gather_q_ps_256,                  // llvm.x86.avx2.gather.q.ps.256
+    x86_avx2_gather_q_q,                       // llvm.x86.avx2.gather.q.q
+    x86_avx2_gather_q_q_256,                   // llvm.x86.avx2.gather.q.q.256
+    x86_avx2_maskload_d,                       // llvm.x86.avx2.maskload.d
+    x86_avx2_maskload_d_256,                   // llvm.x86.avx2.maskload.d.256
+    x86_avx2_maskload_q,                       // llvm.x86.avx2.maskload.q
+    x86_avx2_maskload_q_256,                   // llvm.x86.avx2.maskload.q.256
+    x86_avx2_maskstore_d,                      // llvm.x86.avx2.maskstore.d
+    x86_avx2_maskstore_d_256,                  // llvm.x86.avx2.maskstore.d.256
+    x86_avx2_maskstore_q,                      // llvm.x86.avx2.maskstore.q
+    x86_avx2_maskstore_q_256,                  // llvm.x86.avx2.maskstore.q.256
+    x86_avx2_mpsadbw,                          // llvm.x86.avx2.mpsadbw
+    x86_avx2_packssdw,                         // llvm.x86.avx2.packssdw
+    x86_avx2_packsswb,                         // llvm.x86.avx2.packsswb
+    x86_avx2_packusdw,                         // llvm.x86.avx2.packusdw
+    x86_avx2_packuswb,                         // llvm.x86.avx2.packuswb
+    x86_avx2_padds_b,                          // llvm.x86.avx2.padds.b
+    x86_avx2_padds_w,                          // llvm.x86.avx2.padds.w
+    x86_avx2_paddus_b,                         // llvm.x86.avx2.paddus.b
+    x86_avx2_paddus_w,                         // llvm.x86.avx2.paddus.w
+    x86_avx2_pblendvb,                         // llvm.x86.avx2.pblendvb
+    x86_avx2_permd,                            // llvm.x86.avx2.permd
+    x86_avx2_permps,                           // llvm.x86.avx2.permps
+    x86_avx2_phadd_d,                          // llvm.x86.avx2.phadd.d
+    x86_avx2_phadd_sw,                         // llvm.x86.avx2.phadd.sw
+    x86_avx2_phadd_w,                          // llvm.x86.avx2.phadd.w
+    x86_avx2_phsub_d,                          // llvm.x86.avx2.phsub.d
+    x86_avx2_phsub_sw,                         // llvm.x86.avx2.phsub.sw
+    x86_avx2_phsub_w,                          // llvm.x86.avx2.phsub.w
+    x86_avx2_pmadd_ub_sw,                      // llvm.x86.avx2.pmadd.ub.sw
+    x86_avx2_pmadd_wd,                         // llvm.x86.avx2.pmadd.wd
+    x86_avx2_pmovmskb,                         // llvm.x86.avx2.pmovmskb
+    x86_avx2_pmul_dq,                          // llvm.x86.avx2.pmul.dq
+    x86_avx2_pmul_hr_sw,                       // llvm.x86.avx2.pmul.hr.sw
+    x86_avx2_pmulh_w,                          // llvm.x86.avx2.pmulh.w
+    x86_avx2_pmulhu_w,                         // llvm.x86.avx2.pmulhu.w
+    x86_avx2_pmulu_dq,                         // llvm.x86.avx2.pmulu.dq
+    x86_avx2_psad_bw,                          // llvm.x86.avx2.psad.bw
+    x86_avx2_pshuf_b,                          // llvm.x86.avx2.pshuf.b
+    x86_avx2_psign_b,                          // llvm.x86.avx2.psign.b
+    x86_avx2_psign_d,                          // llvm.x86.avx2.psign.d
+    x86_avx2_psign_w,                          // llvm.x86.avx2.psign.w
+    x86_avx2_psll_d,                           // llvm.x86.avx2.psll.d
+    x86_avx2_psll_q,                           // llvm.x86.avx2.psll.q
+    x86_avx2_psll_w,                           // llvm.x86.avx2.psll.w
+    x86_avx2_pslli_d,                          // llvm.x86.avx2.pslli.d
+    x86_avx2_pslli_q,                          // llvm.x86.avx2.pslli.q
+    x86_avx2_pslli_w,                          // llvm.x86.avx2.pslli.w
+    x86_avx2_psllv_d,                          // llvm.x86.avx2.psllv.d
+    x86_avx2_psllv_d_256,                      // llvm.x86.avx2.psllv.d.256
+    x86_avx2_psllv_q,                          // llvm.x86.avx2.psllv.q
+    x86_avx2_psllv_q_256,                      // llvm.x86.avx2.psllv.q.256
+    x86_avx2_psra_d,                           // llvm.x86.avx2.psra.d
+    x86_avx2_psra_w,                           // llvm.x86.avx2.psra.w
+    x86_avx2_psrai_d,                          // llvm.x86.avx2.psrai.d
+    x86_avx2_psrai_w,                          // llvm.x86.avx2.psrai.w
+    x86_avx2_psrav_d,                          // llvm.x86.avx2.psrav.d
+    x86_avx2_psrav_d_256,                      // llvm.x86.avx2.psrav.d.256
+    x86_avx2_psrl_d,                           // llvm.x86.avx2.psrl.d
+    x86_avx2_psrl_q,                           // llvm.x86.avx2.psrl.q
+    x86_avx2_psrl_w,                           // llvm.x86.avx2.psrl.w
+    x86_avx2_psrli_d,                          // llvm.x86.avx2.psrli.d
+    x86_avx2_psrli_q,                          // llvm.x86.avx2.psrli.q
+    x86_avx2_psrli_w,                          // llvm.x86.avx2.psrli.w
+    x86_avx2_psrlv_d,                          // llvm.x86.avx2.psrlv.d
+    x86_avx2_psrlv_d_256,                      // llvm.x86.avx2.psrlv.d.256
+    x86_avx2_psrlv_q,                          // llvm.x86.avx2.psrlv.q
+    x86_avx2_psrlv_q_256,                      // llvm.x86.avx2.psrlv.q.256
+    x86_avx2_psubs_b,                          // llvm.x86.avx2.psubs.b
+    x86_avx2_psubs_w,                          // llvm.x86.avx2.psubs.w
+    x86_avx2_psubus_b,                         // llvm.x86.avx2.psubus.b
+    x86_avx2_psubus_w,                         // llvm.x86.avx2.psubus.w
+    x86_avx512_broadcastmb_128,                // llvm.x86.avx512.broadcastmb.128
+    x86_avx512_broadcastmb_256,                // llvm.x86.avx512.broadcastmb.256
+    x86_avx512_broadcastmb_512,                // llvm.x86.avx512.broadcastmb.512
+    x86_avx512_broadcastmw_128,                // llvm.x86.avx512.broadcastmw.128
+    x86_avx512_broadcastmw_256,                // llvm.x86.avx512.broadcastmw.256
+    x86_avx512_broadcastmw_512,                // llvm.x86.avx512.broadcastmw.512
+    x86_avx512_cvtsi2sd64,                     // llvm.x86.avx512.cvtsi2sd64
+    x86_avx512_cvtsi2ss32,                     // llvm.x86.avx512.cvtsi2ss32
+    x86_avx512_cvtsi2ss64,                     // llvm.x86.avx512.cvtsi2ss64
+    x86_avx512_cvttsd2si,                      // llvm.x86.avx512.cvttsd2si
+    x86_avx512_cvttsd2si64,                    // llvm.x86.avx512.cvttsd2si64
+    x86_avx512_cvttsd2usi,                     // llvm.x86.avx512.cvttsd2usi
+    x86_avx512_cvttsd2usi64,                   // llvm.x86.avx512.cvttsd2usi64
+    x86_avx512_cvttss2si,                      // llvm.x86.avx512.cvttss2si
+    x86_avx512_cvttss2si64,                    // llvm.x86.avx512.cvttss2si64
+    x86_avx512_cvttss2usi,                     // llvm.x86.avx512.cvttss2usi
+    x86_avx512_cvttss2usi64,                   // llvm.x86.avx512.cvttss2usi64
+    x86_avx512_cvtusi2sd,                      // llvm.x86.avx512.cvtusi2sd
+    x86_avx512_cvtusi2ss,                      // llvm.x86.avx512.cvtusi2ss
+    x86_avx512_cvtusi642sd,                    // llvm.x86.avx512.cvtusi642sd
+    x86_avx512_cvtusi642ss,                    // llvm.x86.avx512.cvtusi642ss
+    x86_avx512_exp2_pd,                        // llvm.x86.avx512.exp2.pd
+    x86_avx512_exp2_ps,                        // llvm.x86.avx512.exp2.ps
+    x86_avx512_gather_dpd_512,                 // llvm.x86.avx512.gather.dpd.512
+    x86_avx512_gather_dpi_512,                 // llvm.x86.avx512.gather.dpi.512
+    x86_avx512_gather_dpq_512,                 // llvm.x86.avx512.gather.dpq.512
+    x86_avx512_gather_dps_512,                 // llvm.x86.avx512.gather.dps.512
+    x86_avx512_gather_qpd_512,                 // llvm.x86.avx512.gather.qpd.512
+    x86_avx512_gather_qpi_512,                 // llvm.x86.avx512.gather.qpi.512
+    x86_avx512_gather_qpq_512,                 // llvm.x86.avx512.gather.qpq.512
+    x86_avx512_gather_qps_512,                 // llvm.x86.avx512.gather.qps.512
+    x86_avx512_gather3div2_df,                 // llvm.x86.avx512.gather3div2.df
+    x86_avx512_gather3div2_di,                 // llvm.x86.avx512.gather3div2.di
+    x86_avx512_gather3div4_df,                 // llvm.x86.avx512.gather3div4.df
+    x86_avx512_gather3div4_di,                 // llvm.x86.avx512.gather3div4.di
+    x86_avx512_gather3div4_sf,                 // llvm.x86.avx512.gather3div4.sf
+    x86_avx512_gather3div4_si,                 // llvm.x86.avx512.gather3div4.si
+    x86_avx512_gather3div8_sf,                 // llvm.x86.avx512.gather3div8.sf
+    x86_avx512_gather3div8_si,                 // llvm.x86.avx512.gather3div8.si
+    x86_avx512_gather3siv2_df,                 // llvm.x86.avx512.gather3siv2.df
+    x86_avx512_gather3siv2_di,                 // llvm.x86.avx512.gather3siv2.di
+    x86_avx512_gather3siv4_df,                 // llvm.x86.avx512.gather3siv4.df
+    x86_avx512_gather3siv4_di,                 // llvm.x86.avx512.gather3siv4.di
+    x86_avx512_gather3siv4_sf,                 // llvm.x86.avx512.gather3siv4.sf
+    x86_avx512_gather3siv4_si,                 // llvm.x86.avx512.gather3siv4.si
+    x86_avx512_gather3siv8_sf,                 // llvm.x86.avx512.gather3siv8.sf
+    x86_avx512_gather3siv8_si,                 // llvm.x86.avx512.gather3siv8.si
+    x86_avx512_gatherpf_dpd_512,               // llvm.x86.avx512.gatherpf.dpd.512
+    x86_avx512_gatherpf_dps_512,               // llvm.x86.avx512.gatherpf.dps.512
+    x86_avx512_gatherpf_qpd_512,               // llvm.x86.avx512.gatherpf.qpd.512
+    x86_avx512_gatherpf_qps_512,               // llvm.x86.avx512.gatherpf.qps.512
+    x86_avx512_mask_add_pd_512,                // llvm.x86.avx512.mask.add.pd.512
+    x86_avx512_mask_add_ps_512,                // llvm.x86.avx512.mask.add.ps.512
+    x86_avx512_mask_add_sd_round,              // llvm.x86.avx512.mask.add.sd.round
+    x86_avx512_mask_add_ss_round,              // llvm.x86.avx512.mask.add.ss.round
+    x86_avx512_mask_cmp_pd_128,                // llvm.x86.avx512.mask.cmp.pd.128
+    x86_avx512_mask_cmp_pd_256,                // llvm.x86.avx512.mask.cmp.pd.256
+    x86_avx512_mask_cmp_pd_512,                // llvm.x86.avx512.mask.cmp.pd.512
+    x86_avx512_mask_cmp_ps_128,                // llvm.x86.avx512.mask.cmp.ps.128
+    x86_avx512_mask_cmp_ps_256,                // llvm.x86.avx512.mask.cmp.ps.256
+    x86_avx512_mask_cmp_ps_512,                // llvm.x86.avx512.mask.cmp.ps.512
+    x86_avx512_mask_cmp_sd,                    // llvm.x86.avx512.mask.cmp.sd
+    x86_avx512_mask_cmp_ss,                    // llvm.x86.avx512.mask.cmp.ss
+    x86_avx512_mask_compress_b_128,            // llvm.x86.avx512.mask.compress.b.128
+    x86_avx512_mask_compress_b_256,            // llvm.x86.avx512.mask.compress.b.256
+    x86_avx512_mask_compress_b_512,            // llvm.x86.avx512.mask.compress.b.512
+    x86_avx512_mask_compress_d_128,            // llvm.x86.avx512.mask.compress.d.128
+    x86_avx512_mask_compress_d_256,            // llvm.x86.avx512.mask.compress.d.256
+    x86_avx512_mask_compress_d_512,            // llvm.x86.avx512.mask.compress.d.512
+    x86_avx512_mask_compress_pd_128,           // llvm.x86.avx512.mask.compress.pd.128
+    x86_avx512_mask_compress_pd_256,           // llvm.x86.avx512.mask.compress.pd.256
+    x86_avx512_mask_compress_pd_512,           // llvm.x86.avx512.mask.compress.pd.512
+    x86_avx512_mask_compress_ps_128,           // llvm.x86.avx512.mask.compress.ps.128
+    x86_avx512_mask_compress_ps_256,           // llvm.x86.avx512.mask.compress.ps.256
+    x86_avx512_mask_compress_ps_512,           // llvm.x86.avx512.mask.compress.ps.512
+    x86_avx512_mask_compress_q_128,            // llvm.x86.avx512.mask.compress.q.128
+    x86_avx512_mask_compress_q_256,            // llvm.x86.avx512.mask.compress.q.256
+    x86_avx512_mask_compress_q_512,            // llvm.x86.avx512.mask.compress.q.512
+    x86_avx512_mask_compress_store_b_128,      // llvm.x86.avx512.mask.compress.store.b.128
+    x86_avx512_mask_compress_store_b_256,      // llvm.x86.avx512.mask.compress.store.b.256
+    x86_avx512_mask_compress_store_b_512,      // llvm.x86.avx512.mask.compress.store.b.512
+    x86_avx512_mask_compress_store_d_128,      // llvm.x86.avx512.mask.compress.store.d.128
+    x86_avx512_mask_compress_store_d_256,      // llvm.x86.avx512.mask.compress.store.d.256
+    x86_avx512_mask_compress_store_d_512,      // llvm.x86.avx512.mask.compress.store.d.512
+    x86_avx512_mask_compress_store_pd_128,     // llvm.x86.avx512.mask.compress.store.pd.128
+    x86_avx512_mask_compress_store_pd_256,     // llvm.x86.avx512.mask.compress.store.pd.256
+    x86_avx512_mask_compress_store_pd_512,     // llvm.x86.avx512.mask.compress.store.pd.512
+    x86_avx512_mask_compress_store_ps_128,     // llvm.x86.avx512.mask.compress.store.ps.128
+    x86_avx512_mask_compress_store_ps_256,     // llvm.x86.avx512.mask.compress.store.ps.256
+    x86_avx512_mask_compress_store_ps_512,     // llvm.x86.avx512.mask.compress.store.ps.512
+    x86_avx512_mask_compress_store_q_128,      // llvm.x86.avx512.mask.compress.store.q.128
+    x86_avx512_mask_compress_store_q_256,      // llvm.x86.avx512.mask.compress.store.q.256
+    x86_avx512_mask_compress_store_q_512,      // llvm.x86.avx512.mask.compress.store.q.512
+    x86_avx512_mask_compress_store_w_128,      // llvm.x86.avx512.mask.compress.store.w.128
+    x86_avx512_mask_compress_store_w_256,      // llvm.x86.avx512.mask.compress.store.w.256
+    x86_avx512_mask_compress_store_w_512,      // llvm.x86.avx512.mask.compress.store.w.512
+    x86_avx512_mask_compress_w_128,            // llvm.x86.avx512.mask.compress.w.128
+    x86_avx512_mask_compress_w_256,            // llvm.x86.avx512.mask.compress.w.256
+    x86_avx512_mask_compress_w_512,            // llvm.x86.avx512.mask.compress.w.512
+    x86_avx512_mask_conflict_d_128,            // llvm.x86.avx512.mask.conflict.d.128
+    x86_avx512_mask_conflict_d_256,            // llvm.x86.avx512.mask.conflict.d.256
+    x86_avx512_mask_conflict_d_512,            // llvm.x86.avx512.mask.conflict.d.512
+    x86_avx512_mask_conflict_q_128,            // llvm.x86.avx512.mask.conflict.q.128
+    x86_avx512_mask_conflict_q_256,            // llvm.x86.avx512.mask.conflict.q.256
+    x86_avx512_mask_conflict_q_512,            // llvm.x86.avx512.mask.conflict.q.512
+    x86_avx512_mask_cvtdq2ps_128,              // llvm.x86.avx512.mask.cvtdq2ps.128
+    x86_avx512_mask_cvtdq2ps_256,              // llvm.x86.avx512.mask.cvtdq2ps.256
+    x86_avx512_mask_cvtdq2ps_512,              // llvm.x86.avx512.mask.cvtdq2ps.512
+    x86_avx512_mask_cvtpd2dq_128,              // llvm.x86.avx512.mask.cvtpd2dq.128
+    x86_avx512_mask_cvtpd2dq_256,              // llvm.x86.avx512.mask.cvtpd2dq.256
+    x86_avx512_mask_cvtpd2dq_512,              // llvm.x86.avx512.mask.cvtpd2dq.512
+    x86_avx512_mask_cvtpd2ps,                  // llvm.x86.avx512.mask.cvtpd2ps
+    x86_avx512_mask_cvtpd2ps_256,              // llvm.x86.avx512.mask.cvtpd2ps.256
+    x86_avx512_mask_cvtpd2ps_512,              // llvm.x86.avx512.mask.cvtpd2ps.512
+    x86_avx512_mask_cvtpd2qq_128,              // llvm.x86.avx512.mask.cvtpd2qq.128
+    x86_avx512_mask_cvtpd2qq_256,              // llvm.x86.avx512.mask.cvtpd2qq.256
+    x86_avx512_mask_cvtpd2qq_512,              // llvm.x86.avx512.mask.cvtpd2qq.512
+    x86_avx512_mask_cvtpd2udq_128,             // llvm.x86.avx512.mask.cvtpd2udq.128
+    x86_avx512_mask_cvtpd2udq_256,             // llvm.x86.avx512.mask.cvtpd2udq.256
+    x86_avx512_mask_cvtpd2udq_512,             // llvm.x86.avx512.mask.cvtpd2udq.512
+    x86_avx512_mask_cvtpd2uqq_128,             // llvm.x86.avx512.mask.cvtpd2uqq.128
+    x86_avx512_mask_cvtpd2uqq_256,             // llvm.x86.avx512.mask.cvtpd2uqq.256
+    x86_avx512_mask_cvtpd2uqq_512,             // llvm.x86.avx512.mask.cvtpd2uqq.512
+    x86_avx512_mask_cvtps2dq_128,              // llvm.x86.avx512.mask.cvtps2dq.128
+    x86_avx512_mask_cvtps2dq_256,              // llvm.x86.avx512.mask.cvtps2dq.256
+    x86_avx512_mask_cvtps2dq_512,              // llvm.x86.avx512.mask.cvtps2dq.512
+    x86_avx512_mask_cvtps2pd_128,              // llvm.x86.avx512.mask.cvtps2pd.128
+    x86_avx512_mask_cvtps2pd_256,              // llvm.x86.avx512.mask.cvtps2pd.256
+    x86_avx512_mask_cvtps2pd_512,              // llvm.x86.avx512.mask.cvtps2pd.512
+    x86_avx512_mask_cvtps2qq_128,              // llvm.x86.avx512.mask.cvtps2qq.128
+    x86_avx512_mask_cvtps2qq_256,              // llvm.x86.avx512.mask.cvtps2qq.256
+    x86_avx512_mask_cvtps2qq_512,              // llvm.x86.avx512.mask.cvtps2qq.512
+    x86_avx512_mask_cvtps2udq_128,             // llvm.x86.avx512.mask.cvtps2udq.128
+    x86_avx512_mask_cvtps2udq_256,             // llvm.x86.avx512.mask.cvtps2udq.256
+    x86_avx512_mask_cvtps2udq_512,             // llvm.x86.avx512.mask.cvtps2udq.512
+    x86_avx512_mask_cvtps2uqq_128,             // llvm.x86.avx512.mask.cvtps2uqq.128
+    x86_avx512_mask_cvtps2uqq_256,             // llvm.x86.avx512.mask.cvtps2uqq.256
+    x86_avx512_mask_cvtps2uqq_512,             // llvm.x86.avx512.mask.cvtps2uqq.512
+    x86_avx512_mask_cvtqq2pd_128,              // llvm.x86.avx512.mask.cvtqq2pd.128
+    x86_avx512_mask_cvtqq2pd_256,              // llvm.x86.avx512.mask.cvtqq2pd.256
+    x86_avx512_mask_cvtqq2pd_512,              // llvm.x86.avx512.mask.cvtqq2pd.512
+    x86_avx512_mask_cvtqq2ps_128,              // llvm.x86.avx512.mask.cvtqq2ps.128
+    x86_avx512_mask_cvtqq2ps_256,              // llvm.x86.avx512.mask.cvtqq2ps.256
+    x86_avx512_mask_cvtqq2ps_512,              // llvm.x86.avx512.mask.cvtqq2ps.512
+    x86_avx512_mask_cvtsd2ss_round,            // llvm.x86.avx512.mask.cvtsd2ss.round
+    x86_avx512_mask_cvtss2sd_round,            // llvm.x86.avx512.mask.cvtss2sd.round
+    x86_avx512_mask_cvttpd2dq_128,             // llvm.x86.avx512.mask.cvttpd2dq.128
+    x86_avx512_mask_cvttpd2dq_256,             // llvm.x86.avx512.mask.cvttpd2dq.256
+    x86_avx512_mask_cvttpd2dq_512,             // llvm.x86.avx512.mask.cvttpd2dq.512
+    x86_avx512_mask_cvttpd2qq_128,             // llvm.x86.avx512.mask.cvttpd2qq.128
+    x86_avx512_mask_cvttpd2qq_256,             // llvm.x86.avx512.mask.cvttpd2qq.256
+    x86_avx512_mask_cvttpd2qq_512,             // llvm.x86.avx512.mask.cvttpd2qq.512
+    x86_avx512_mask_cvttpd2udq_128,            // llvm.x86.avx512.mask.cvttpd2udq.128
+    x86_avx512_mask_cvttpd2udq_256,            // llvm.x86.avx512.mask.cvttpd2udq.256
+    x86_avx512_mask_cvttpd2udq_512,            // llvm.x86.avx512.mask.cvttpd2udq.512
+    x86_avx512_mask_cvttpd2uqq_128,            // llvm.x86.avx512.mask.cvttpd2uqq.128
+    x86_avx512_mask_cvttpd2uqq_256,            // llvm.x86.avx512.mask.cvttpd2uqq.256
+    x86_avx512_mask_cvttpd2uqq_512,            // llvm.x86.avx512.mask.cvttpd2uqq.512
+    x86_avx512_mask_cvttps2dq_128,             // llvm.x86.avx512.mask.cvttps2dq.128
+    x86_avx512_mask_cvttps2dq_256,             // llvm.x86.avx512.mask.cvttps2dq.256
+    x86_avx512_mask_cvttps2dq_512,             // llvm.x86.avx512.mask.cvttps2dq.512
+    x86_avx512_mask_cvttps2qq_128,             // llvm.x86.avx512.mask.cvttps2qq.128
+    x86_avx512_mask_cvttps2qq_256,             // llvm.x86.avx512.mask.cvttps2qq.256
+    x86_avx512_mask_cvttps2qq_512,             // llvm.x86.avx512.mask.cvttps2qq.512
+    x86_avx512_mask_cvttps2udq_128,            // llvm.x86.avx512.mask.cvttps2udq.128
+    x86_avx512_mask_cvttps2udq_256,            // llvm.x86.avx512.mask.cvttps2udq.256
+    x86_avx512_mask_cvttps2udq_512,            // llvm.x86.avx512.mask.cvttps2udq.512
+    x86_avx512_mask_cvttps2uqq_128,            // llvm.x86.avx512.mask.cvttps2uqq.128
+    x86_avx512_mask_cvttps2uqq_256,            // llvm.x86.avx512.mask.cvttps2uqq.256
+    x86_avx512_mask_cvttps2uqq_512,            // llvm.x86.avx512.mask.cvttps2uqq.512
+    x86_avx512_mask_cvtudq2ps_128,             // llvm.x86.avx512.mask.cvtudq2ps.128
+    x86_avx512_mask_cvtudq2ps_256,             // llvm.x86.avx512.mask.cvtudq2ps.256
+    x86_avx512_mask_cvtudq2ps_512,             // llvm.x86.avx512.mask.cvtudq2ps.512
+    x86_avx512_mask_cvtuqq2pd_128,             // llvm.x86.avx512.mask.cvtuqq2pd.128
+    x86_avx512_mask_cvtuqq2pd_256,             // llvm.x86.avx512.mask.cvtuqq2pd.256
+    x86_avx512_mask_cvtuqq2pd_512,             // llvm.x86.avx512.mask.cvtuqq2pd.512
+    x86_avx512_mask_cvtuqq2ps_128,             // llvm.x86.avx512.mask.cvtuqq2ps.128
+    x86_avx512_mask_cvtuqq2ps_256,             // llvm.x86.avx512.mask.cvtuqq2ps.256
+    x86_avx512_mask_cvtuqq2ps_512,             // llvm.x86.avx512.mask.cvtuqq2ps.512
+    x86_avx512_mask_dbpsadbw_128,              // llvm.x86.avx512.mask.dbpsadbw.128
+    x86_avx512_mask_dbpsadbw_256,              // llvm.x86.avx512.mask.dbpsadbw.256
+    x86_avx512_mask_dbpsadbw_512,              // llvm.x86.avx512.mask.dbpsadbw.512
+    x86_avx512_mask_div_pd_512,                // llvm.x86.avx512.mask.div.pd.512
+    x86_avx512_mask_div_ps_512,                // llvm.x86.avx512.mask.div.ps.512
+    x86_avx512_mask_div_sd_round,              // llvm.x86.avx512.mask.div.sd.round
+    x86_avx512_mask_div_ss_round,              // llvm.x86.avx512.mask.div.ss.round
+    x86_avx512_mask_expand_b_128,              // llvm.x86.avx512.mask.expand.b.128
+    x86_avx512_mask_expand_b_256,              // llvm.x86.avx512.mask.expand.b.256
+    x86_avx512_mask_expand_b_512,              // llvm.x86.avx512.mask.expand.b.512
+    x86_avx512_mask_expand_d_128,              // llvm.x86.avx512.mask.expand.d.128
+    x86_avx512_mask_expand_d_256,              // llvm.x86.avx512.mask.expand.d.256
+    x86_avx512_mask_expand_d_512,              // llvm.x86.avx512.mask.expand.d.512
+    x86_avx512_mask_expand_load_b_128,         // llvm.x86.avx512.mask.expand.load.b.128
+    x86_avx512_mask_expand_load_b_256,         // llvm.x86.avx512.mask.expand.load.b.256
+    x86_avx512_mask_expand_load_b_512,         // llvm.x86.avx512.mask.expand.load.b.512
+    x86_avx512_mask_expand_load_d_128,         // llvm.x86.avx512.mask.expand.load.d.128
+    x86_avx512_mask_expand_load_d_256,         // llvm.x86.avx512.mask.expand.load.d.256
+    x86_avx512_mask_expand_load_d_512,         // llvm.x86.avx512.mask.expand.load.d.512
+    x86_avx512_mask_expand_load_pd_128,        // llvm.x86.avx512.mask.expand.load.pd.128
+    x86_avx512_mask_expand_load_pd_256,        // llvm.x86.avx512.mask.expand.load.pd.256
+    x86_avx512_mask_expand_load_pd_512,        // llvm.x86.avx512.mask.expand.load.pd.512
+    x86_avx512_mask_expand_load_ps_128,        // llvm.x86.avx512.mask.expand.load.ps.128
+    x86_avx512_mask_expand_load_ps_256,        // llvm.x86.avx512.mask.expand.load.ps.256
+    x86_avx512_mask_expand_load_ps_512,        // llvm.x86.avx512.mask.expand.load.ps.512
+    x86_avx512_mask_expand_load_q_128,         // llvm.x86.avx512.mask.expand.load.q.128
+    x86_avx512_mask_expand_load_q_256,         // llvm.x86.avx512.mask.expand.load.q.256
+    x86_avx512_mask_expand_load_q_512,         // llvm.x86.avx512.mask.expand.load.q.512
+    x86_avx512_mask_expand_load_w_128,         // llvm.x86.avx512.mask.expand.load.w.128
+    x86_avx512_mask_expand_load_w_256,         // llvm.x86.avx512.mask.expand.load.w.256
+    x86_avx512_mask_expand_load_w_512,         // llvm.x86.avx512.mask.expand.load.w.512
+    x86_avx512_mask_expand_pd_128,             // llvm.x86.avx512.mask.expand.pd.128
+    x86_avx512_mask_expand_pd_256,             // llvm.x86.avx512.mask.expand.pd.256
+    x86_avx512_mask_expand_pd_512,             // llvm.x86.avx512.mask.expand.pd.512
+    x86_avx512_mask_expand_ps_128,             // llvm.x86.avx512.mask.expand.ps.128
+    x86_avx512_mask_expand_ps_256,             // llvm.x86.avx512.mask.expand.ps.256
+    x86_avx512_mask_expand_ps_512,             // llvm.x86.avx512.mask.expand.ps.512
+    x86_avx512_mask_expand_q_128,              // llvm.x86.avx512.mask.expand.q.128
+    x86_avx512_mask_expand_q_256,              // llvm.x86.avx512.mask.expand.q.256
+    x86_avx512_mask_expand_q_512,              // llvm.x86.avx512.mask.expand.q.512
+    x86_avx512_mask_expand_w_128,              // llvm.x86.avx512.mask.expand.w.128
+    x86_avx512_mask_expand_w_256,              // llvm.x86.avx512.mask.expand.w.256
+    x86_avx512_mask_expand_w_512,              // llvm.x86.avx512.mask.expand.w.512
+    x86_avx512_mask_fixupimm_pd_128,           // llvm.x86.avx512.mask.fixupimm.pd.128
+    x86_avx512_mask_fixupimm_pd_256,           // llvm.x86.avx512.mask.fixupimm.pd.256
+    x86_avx512_mask_fixupimm_pd_512,           // llvm.x86.avx512.mask.fixupimm.pd.512
+    x86_avx512_mask_fixupimm_ps_128,           // llvm.x86.avx512.mask.fixupimm.ps.128
+    x86_avx512_mask_fixupimm_ps_256,           // llvm.x86.avx512.mask.fixupimm.ps.256
+    x86_avx512_mask_fixupimm_ps_512,           // llvm.x86.avx512.mask.fixupimm.ps.512
+    x86_avx512_mask_fixupimm_sd,               // llvm.x86.avx512.mask.fixupimm.sd
+    x86_avx512_mask_fixupimm_ss,               // llvm.x86.avx512.mask.fixupimm.ss
+    x86_avx512_mask_fpclass_pd_128,            // llvm.x86.avx512.mask.fpclass.pd.128
+    x86_avx512_mask_fpclass_pd_256,            // llvm.x86.avx512.mask.fpclass.pd.256
+    x86_avx512_mask_fpclass_pd_512,            // llvm.x86.avx512.mask.fpclass.pd.512
+    x86_avx512_mask_fpclass_ps_128,            // llvm.x86.avx512.mask.fpclass.ps.128
+    x86_avx512_mask_fpclass_ps_256,            // llvm.x86.avx512.mask.fpclass.ps.256
+    x86_avx512_mask_fpclass_ps_512,            // llvm.x86.avx512.mask.fpclass.ps.512
+    x86_avx512_mask_fpclass_sd,                // llvm.x86.avx512.mask.fpclass.sd
+    x86_avx512_mask_fpclass_ss,                // llvm.x86.avx512.mask.fpclass.ss
+    x86_avx512_mask_getexp_pd_128,             // llvm.x86.avx512.mask.getexp.pd.128
+    x86_avx512_mask_getexp_pd_256,             // llvm.x86.avx512.mask.getexp.pd.256
+    x86_avx512_mask_getexp_pd_512,             // llvm.x86.avx512.mask.getexp.pd.512
+    x86_avx512_mask_getexp_ps_128,             // llvm.x86.avx512.mask.getexp.ps.128
+    x86_avx512_mask_getexp_ps_256,             // llvm.x86.avx512.mask.getexp.ps.256
+    x86_avx512_mask_getexp_ps_512,             // llvm.x86.avx512.mask.getexp.ps.512
+    x86_avx512_mask_getexp_sd,                 // llvm.x86.avx512.mask.getexp.sd
+    x86_avx512_mask_getexp_ss,                 // llvm.x86.avx512.mask.getexp.ss
+    x86_avx512_mask_getmant_pd_128,            // llvm.x86.avx512.mask.getmant.pd.128
+    x86_avx512_mask_getmant_pd_256,            // llvm.x86.avx512.mask.getmant.pd.256
+    x86_avx512_mask_getmant_pd_512,            // llvm.x86.avx512.mask.getmant.pd.512
+    x86_avx512_mask_getmant_ps_128,            // llvm.x86.avx512.mask.getmant.ps.128
+    x86_avx512_mask_getmant_ps_256,            // llvm.x86.avx512.mask.getmant.ps.256
+    x86_avx512_mask_getmant_ps_512,            // llvm.x86.avx512.mask.getmant.ps.512
+    x86_avx512_mask_getmant_sd,                // llvm.x86.avx512.mask.getmant.sd
+    x86_avx512_mask_getmant_ss,                // llvm.x86.avx512.mask.getmant.ss
+    x86_avx512_mask_max_pd_512,                // llvm.x86.avx512.mask.max.pd.512
+    x86_avx512_mask_max_ps_512,                // llvm.x86.avx512.mask.max.ps.512
+    x86_avx512_mask_max_sd_round,              // llvm.x86.avx512.mask.max.sd.round
+    x86_avx512_mask_max_ss_round,              // llvm.x86.avx512.mask.max.ss.round
+    x86_avx512_mask_min_pd_512,                // llvm.x86.avx512.mask.min.pd.512
+    x86_avx512_mask_min_ps_512,                // llvm.x86.avx512.mask.min.ps.512
+    x86_avx512_mask_min_sd_round,              // llvm.x86.avx512.mask.min.sd.round
+    x86_avx512_mask_min_ss_round,              // llvm.x86.avx512.mask.min.ss.round
+    x86_avx512_mask_mul_pd_512,                // llvm.x86.avx512.mask.mul.pd.512
+    x86_avx512_mask_mul_ps_512,                // llvm.x86.avx512.mask.mul.ps.512
+    x86_avx512_mask_mul_sd_round,              // llvm.x86.avx512.mask.mul.sd.round
+    x86_avx512_mask_mul_ss_round,              // llvm.x86.avx512.mask.mul.ss.round
+    x86_avx512_mask_padds_b_128,               // llvm.x86.avx512.mask.padds.b.128
+    x86_avx512_mask_padds_b_256,               // llvm.x86.avx512.mask.padds.b.256
+    x86_avx512_mask_padds_b_512,               // llvm.x86.avx512.mask.padds.b.512
+    x86_avx512_mask_padds_w_128,               // llvm.x86.avx512.mask.padds.w.128
+    x86_avx512_mask_padds_w_256,               // llvm.x86.avx512.mask.padds.w.256
+    x86_avx512_mask_padds_w_512,               // llvm.x86.avx512.mask.padds.w.512
+    x86_avx512_mask_paddus_b_128,              // llvm.x86.avx512.mask.paddus.b.128
+    x86_avx512_mask_paddus_b_256,              // llvm.x86.avx512.mask.paddus.b.256
+    x86_avx512_mask_paddus_b_512,              // llvm.x86.avx512.mask.paddus.b.512
+    x86_avx512_mask_paddus_w_128,              // llvm.x86.avx512.mask.paddus.w.128
+    x86_avx512_mask_paddus_w_256,              // llvm.x86.avx512.mask.paddus.w.256
+    x86_avx512_mask_paddus_w_512,              // llvm.x86.avx512.mask.paddus.w.512
+    x86_avx512_mask_permvar_df_256,            // llvm.x86.avx512.mask.permvar.df.256
+    x86_avx512_mask_permvar_df_512,            // llvm.x86.avx512.mask.permvar.df.512
+    x86_avx512_mask_permvar_di_256,            // llvm.x86.avx512.mask.permvar.di.256
+    x86_avx512_mask_permvar_di_512,            // llvm.x86.avx512.mask.permvar.di.512
+    x86_avx512_mask_permvar_hi_128,            // llvm.x86.avx512.mask.permvar.hi.128
+    x86_avx512_mask_permvar_hi_256,            // llvm.x86.avx512.mask.permvar.hi.256
+    x86_avx512_mask_permvar_hi_512,            // llvm.x86.avx512.mask.permvar.hi.512
+    x86_avx512_mask_permvar_qi_128,            // llvm.x86.avx512.mask.permvar.qi.128
+    x86_avx512_mask_permvar_qi_256,            // llvm.x86.avx512.mask.permvar.qi.256
+    x86_avx512_mask_permvar_qi_512,            // llvm.x86.avx512.mask.permvar.qi.512
+    x86_avx512_mask_permvar_sf_256,            // llvm.x86.avx512.mask.permvar.sf.256
+    x86_avx512_mask_permvar_sf_512,            // llvm.x86.avx512.mask.permvar.sf.512
+    x86_avx512_mask_permvar_si_256,            // llvm.x86.avx512.mask.permvar.si.256
+    x86_avx512_mask_permvar_si_512,            // llvm.x86.avx512.mask.permvar.si.512
+    x86_avx512_mask_pmaddubs_w_128,            // llvm.x86.avx512.mask.pmaddubs.w.128
+    x86_avx512_mask_pmaddubs_w_256,            // llvm.x86.avx512.mask.pmaddubs.w.256
+    x86_avx512_mask_pmaddubs_w_512,            // llvm.x86.avx512.mask.pmaddubs.w.512
+    x86_avx512_mask_pmaddw_d_128,              // llvm.x86.avx512.mask.pmaddw.d.128
+    x86_avx512_mask_pmaddw_d_256,              // llvm.x86.avx512.mask.pmaddw.d.256
+    x86_avx512_mask_pmaddw_d_512,              // llvm.x86.avx512.mask.pmaddw.d.512
+    x86_avx512_mask_pmov_db_128,               // llvm.x86.avx512.mask.pmov.db.128
+    x86_avx512_mask_pmov_db_256,               // llvm.x86.avx512.mask.pmov.db.256
+    x86_avx512_mask_pmov_db_512,               // llvm.x86.avx512.mask.pmov.db.512
+    x86_avx512_mask_pmov_db_mem_128,           // llvm.x86.avx512.mask.pmov.db.mem.128
+    x86_avx512_mask_pmov_db_mem_256,           // llvm.x86.avx512.mask.pmov.db.mem.256
+    x86_avx512_mask_pmov_db_mem_512,           // llvm.x86.avx512.mask.pmov.db.mem.512
+    x86_avx512_mask_pmov_dw_128,               // llvm.x86.avx512.mask.pmov.dw.128
+    x86_avx512_mask_pmov_dw_256,               // llvm.x86.avx512.mask.pmov.dw.256
+    x86_avx512_mask_pmov_dw_512,               // llvm.x86.avx512.mask.pmov.dw.512
+    x86_avx512_mask_pmov_dw_mem_128,           // llvm.x86.avx512.mask.pmov.dw.mem.128
+    x86_avx512_mask_pmov_dw_mem_256,           // llvm.x86.avx512.mask.pmov.dw.mem.256
+    x86_avx512_mask_pmov_dw_mem_512,           // llvm.x86.avx512.mask.pmov.dw.mem.512
+    x86_avx512_mask_pmov_qb_128,               // llvm.x86.avx512.mask.pmov.qb.128
+    x86_avx512_mask_pmov_qb_256,               // llvm.x86.avx512.mask.pmov.qb.256
+    x86_avx512_mask_pmov_qb_512,               // llvm.x86.avx512.mask.pmov.qb.512
+    x86_avx512_mask_pmov_qb_mem_128,           // llvm.x86.avx512.mask.pmov.qb.mem.128
+    x86_avx512_mask_pmov_qb_mem_256,           // llvm.x86.avx512.mask.pmov.qb.mem.256
+    x86_avx512_mask_pmov_qb_mem_512,           // llvm.x86.avx512.mask.pmov.qb.mem.512
+    x86_avx512_mask_pmov_qd_128,               // llvm.x86.avx512.mask.pmov.qd.128
+    x86_avx512_mask_pmov_qd_256,               // llvm.x86.avx512.mask.pmov.qd.256
+    x86_avx512_mask_pmov_qd_512,               // llvm.x86.avx512.mask.pmov.qd.512
+    x86_avx512_mask_pmov_qd_mem_128,           // llvm.x86.avx512.mask.pmov.qd.mem.128
+    x86_avx512_mask_pmov_qd_mem_256,           // llvm.x86.avx512.mask.pmov.qd.mem.256
+    x86_avx512_mask_pmov_qd_mem_512,           // llvm.x86.avx512.mask.pmov.qd.mem.512
+    x86_avx512_mask_pmov_qw_128,               // llvm.x86.avx512.mask.pmov.qw.128
+    x86_avx512_mask_pmov_qw_256,               // llvm.x86.avx512.mask.pmov.qw.256
+    x86_avx512_mask_pmov_qw_512,               // llvm.x86.avx512.mask.pmov.qw.512
+    x86_avx512_mask_pmov_qw_mem_128,           // llvm.x86.avx512.mask.pmov.qw.mem.128
+    x86_avx512_mask_pmov_qw_mem_256,           // llvm.x86.avx512.mask.pmov.qw.mem.256
+    x86_avx512_mask_pmov_qw_mem_512,           // llvm.x86.avx512.mask.pmov.qw.mem.512
+    x86_avx512_mask_pmov_wb_128,               // llvm.x86.avx512.mask.pmov.wb.128
+    x86_avx512_mask_pmov_wb_256,               // llvm.x86.avx512.mask.pmov.wb.256
+    x86_avx512_mask_pmov_wb_512,               // llvm.x86.avx512.mask.pmov.wb.512
+    x86_avx512_mask_pmov_wb_mem_128,           // llvm.x86.avx512.mask.pmov.wb.mem.128
+    x86_avx512_mask_pmov_wb_mem_256,           // llvm.x86.avx512.mask.pmov.wb.mem.256
+    x86_avx512_mask_pmov_wb_mem_512,           // llvm.x86.avx512.mask.pmov.wb.mem.512
+    x86_avx512_mask_pmovs_db_128,              // llvm.x86.avx512.mask.pmovs.db.128
+    x86_avx512_mask_pmovs_db_256,              // llvm.x86.avx512.mask.pmovs.db.256
+    x86_avx512_mask_pmovs_db_512,              // llvm.x86.avx512.mask.pmovs.db.512
+    x86_avx512_mask_pmovs_db_mem_128,          // llvm.x86.avx512.mask.pmovs.db.mem.128
+    x86_avx512_mask_pmovs_db_mem_256,          // llvm.x86.avx512.mask.pmovs.db.mem.256
+    x86_avx512_mask_pmovs_db_mem_512,          // llvm.x86.avx512.mask.pmovs.db.mem.512
+    x86_avx512_mask_pmovs_dw_128,              // llvm.x86.avx512.mask.pmovs.dw.128
+    x86_avx512_mask_pmovs_dw_256,              // llvm.x86.avx512.mask.pmovs.dw.256
+    x86_avx512_mask_pmovs_dw_512,              // llvm.x86.avx512.mask.pmovs.dw.512
+    x86_avx512_mask_pmovs_dw_mem_128,          // llvm.x86.avx512.mask.pmovs.dw.mem.128
+    x86_avx512_mask_pmovs_dw_mem_256,          // llvm.x86.avx512.mask.pmovs.dw.mem.256
+    x86_avx512_mask_pmovs_dw_mem_512,          // llvm.x86.avx512.mask.pmovs.dw.mem.512
+    x86_avx512_mask_pmovs_qb_128,              // llvm.x86.avx512.mask.pmovs.qb.128
+    x86_avx512_mask_pmovs_qb_256,              // llvm.x86.avx512.mask.pmovs.qb.256
+    x86_avx512_mask_pmovs_qb_512,              // llvm.x86.avx512.mask.pmovs.qb.512
+    x86_avx512_mask_pmovs_qb_mem_128,          // llvm.x86.avx512.mask.pmovs.qb.mem.128
+    x86_avx512_mask_pmovs_qb_mem_256,          // llvm.x86.avx512.mask.pmovs.qb.mem.256
+    x86_avx512_mask_pmovs_qb_mem_512,          // llvm.x86.avx512.mask.pmovs.qb.mem.512
+    x86_avx512_mask_pmovs_qd_128,              // llvm.x86.avx512.mask.pmovs.qd.128
+    x86_avx512_mask_pmovs_qd_256,              // llvm.x86.avx512.mask.pmovs.qd.256
+    x86_avx512_mask_pmovs_qd_512,              // llvm.x86.avx512.mask.pmovs.qd.512
+    x86_avx512_mask_pmovs_qd_mem_128,          // llvm.x86.avx512.mask.pmovs.qd.mem.128
+    x86_avx512_mask_pmovs_qd_mem_256,          // llvm.x86.avx512.mask.pmovs.qd.mem.256
+    x86_avx512_mask_pmovs_qd_mem_512,          // llvm.x86.avx512.mask.pmovs.qd.mem.512
+    x86_avx512_mask_pmovs_qw_128,              // llvm.x86.avx512.mask.pmovs.qw.128
+    x86_avx512_mask_pmovs_qw_256,              // llvm.x86.avx512.mask.pmovs.qw.256
+    x86_avx512_mask_pmovs_qw_512,              // llvm.x86.avx512.mask.pmovs.qw.512
+    x86_avx512_mask_pmovs_qw_mem_128,          // llvm.x86.avx512.mask.pmovs.qw.mem.128
+    x86_avx512_mask_pmovs_qw_mem_256,          // llvm.x86.avx512.mask.pmovs.qw.mem.256
+    x86_avx512_mask_pmovs_qw_mem_512,          // llvm.x86.avx512.mask.pmovs.qw.mem.512
+    x86_avx512_mask_pmovs_wb_128,              // llvm.x86.avx512.mask.pmovs.wb.128
+    x86_avx512_mask_pmovs_wb_256,              // llvm.x86.avx512.mask.pmovs.wb.256
+    x86_avx512_mask_pmovs_wb_512,              // llvm.x86.avx512.mask.pmovs.wb.512
+    x86_avx512_mask_pmovs_wb_mem_128,          // llvm.x86.avx512.mask.pmovs.wb.mem.128
+    x86_avx512_mask_pmovs_wb_mem_256,          // llvm.x86.avx512.mask.pmovs.wb.mem.256
+    x86_avx512_mask_pmovs_wb_mem_512,          // llvm.x86.avx512.mask.pmovs.wb.mem.512
+    x86_avx512_mask_pmovus_db_128,             // llvm.x86.avx512.mask.pmovus.db.128
+    x86_avx512_mask_pmovus_db_256,             // llvm.x86.avx512.mask.pmovus.db.256
+    x86_avx512_mask_pmovus_db_512,             // llvm.x86.avx512.mask.pmovus.db.512
+    x86_avx512_mask_pmovus_db_mem_128,         // llvm.x86.avx512.mask.pmovus.db.mem.128
+    x86_avx512_mask_pmovus_db_mem_256,         // llvm.x86.avx512.mask.pmovus.db.mem.256
+    x86_avx512_mask_pmovus_db_mem_512,         // llvm.x86.avx512.mask.pmovus.db.mem.512
+    x86_avx512_mask_pmovus_dw_128,             // llvm.x86.avx512.mask.pmovus.dw.128
+    x86_avx512_mask_pmovus_dw_256,             // llvm.x86.avx512.mask.pmovus.dw.256
+    x86_avx512_mask_pmovus_dw_512,             // llvm.x86.avx512.mask.pmovus.dw.512
+    x86_avx512_mask_pmovus_dw_mem_128,         // llvm.x86.avx512.mask.pmovus.dw.mem.128
+    x86_avx512_mask_pmovus_dw_mem_256,         // llvm.x86.avx512.mask.pmovus.dw.mem.256
+    x86_avx512_mask_pmovus_dw_mem_512,         // llvm.x86.avx512.mask.pmovus.dw.mem.512
+    x86_avx512_mask_pmovus_qb_128,             // llvm.x86.avx512.mask.pmovus.qb.128
+    x86_avx512_mask_pmovus_qb_256,             // llvm.x86.avx512.mask.pmovus.qb.256
+    x86_avx512_mask_pmovus_qb_512,             // llvm.x86.avx512.mask.pmovus.qb.512
+    x86_avx512_mask_pmovus_qb_mem_128,         // llvm.x86.avx512.mask.pmovus.qb.mem.128
+    x86_avx512_mask_pmovus_qb_mem_256,         // llvm.x86.avx512.mask.pmovus.qb.mem.256
+    x86_avx512_mask_pmovus_qb_mem_512,         // llvm.x86.avx512.mask.pmovus.qb.mem.512
+    x86_avx512_mask_pmovus_qd_128,             // llvm.x86.avx512.mask.pmovus.qd.128
+    x86_avx512_mask_pmovus_qd_256,             // llvm.x86.avx512.mask.pmovus.qd.256
+    x86_avx512_mask_pmovus_qd_512,             // llvm.x86.avx512.mask.pmovus.qd.512
+    x86_avx512_mask_pmovus_qd_mem_128,         // llvm.x86.avx512.mask.pmovus.qd.mem.128
+    x86_avx512_mask_pmovus_qd_mem_256,         // llvm.x86.avx512.mask.pmovus.qd.mem.256
+    x86_avx512_mask_pmovus_qd_mem_512,         // llvm.x86.avx512.mask.pmovus.qd.mem.512
+    x86_avx512_mask_pmovus_qw_128,             // llvm.x86.avx512.mask.pmovus.qw.128
+    x86_avx512_mask_pmovus_qw_256,             // llvm.x86.avx512.mask.pmovus.qw.256
+    x86_avx512_mask_pmovus_qw_512,             // llvm.x86.avx512.mask.pmovus.qw.512
+    x86_avx512_mask_pmovus_qw_mem_128,         // llvm.x86.avx512.mask.pmovus.qw.mem.128
+    x86_avx512_mask_pmovus_qw_mem_256,         // llvm.x86.avx512.mask.pmovus.qw.mem.256
+    x86_avx512_mask_pmovus_qw_mem_512,         // llvm.x86.avx512.mask.pmovus.qw.mem.512
+    x86_avx512_mask_pmovus_wb_128,             // llvm.x86.avx512.mask.pmovus.wb.128
+    x86_avx512_mask_pmovus_wb_256,             // llvm.x86.avx512.mask.pmovus.wb.256
+    x86_avx512_mask_pmovus_wb_512,             // llvm.x86.avx512.mask.pmovus.wb.512
+    x86_avx512_mask_pmovus_wb_mem_128,         // llvm.x86.avx512.mask.pmovus.wb.mem.128
+    x86_avx512_mask_pmovus_wb_mem_256,         // llvm.x86.avx512.mask.pmovus.wb.mem.256
+    x86_avx512_mask_pmovus_wb_mem_512,         // llvm.x86.avx512.mask.pmovus.wb.mem.512
+    x86_avx512_mask_pmultishift_qb_128,        // llvm.x86.avx512.mask.pmultishift.qb.128
+    x86_avx512_mask_pmultishift_qb_256,        // llvm.x86.avx512.mask.pmultishift.qb.256
+    x86_avx512_mask_pmultishift_qb_512,        // llvm.x86.avx512.mask.pmultishift.qb.512
+    x86_avx512_mask_prol_d_128,                // llvm.x86.avx512.mask.prol.d.128
+    x86_avx512_mask_prol_d_256,                // llvm.x86.avx512.mask.prol.d.256
+    x86_avx512_mask_prol_d_512,                // llvm.x86.avx512.mask.prol.d.512
+    x86_avx512_mask_prol_q_128,                // llvm.x86.avx512.mask.prol.q.128
+    x86_avx512_mask_prol_q_256,                // llvm.x86.avx512.mask.prol.q.256
+    x86_avx512_mask_prol_q_512,                // llvm.x86.avx512.mask.prol.q.512
+    x86_avx512_mask_prolv_d_128,               // llvm.x86.avx512.mask.prolv.d.128
+    x86_avx512_mask_prolv_d_256,               // llvm.x86.avx512.mask.prolv.d.256
+    x86_avx512_mask_prolv_d_512,               // llvm.x86.avx512.mask.prolv.d.512
+    x86_avx512_mask_prolv_q_128,               // llvm.x86.avx512.mask.prolv.q.128
+    x86_avx512_mask_prolv_q_256,               // llvm.x86.avx512.mask.prolv.q.256
+    x86_avx512_mask_prolv_q_512,               // llvm.x86.avx512.mask.prolv.q.512
+    x86_avx512_mask_pror_d_128,                // llvm.x86.avx512.mask.pror.d.128
+    x86_avx512_mask_pror_d_256,                // llvm.x86.avx512.mask.pror.d.256
+    x86_avx512_mask_pror_d_512,                // llvm.x86.avx512.mask.pror.d.512
+    x86_avx512_mask_pror_q_128,                // llvm.x86.avx512.mask.pror.q.128
+    x86_avx512_mask_pror_q_256,                // llvm.x86.avx512.mask.pror.q.256
+    x86_avx512_mask_pror_q_512,                // llvm.x86.avx512.mask.pror.q.512
+    x86_avx512_mask_prorv_d_128,               // llvm.x86.avx512.mask.prorv.d.128
+    x86_avx512_mask_prorv_d_256,               // llvm.x86.avx512.mask.prorv.d.256
+    x86_avx512_mask_prorv_d_512,               // llvm.x86.avx512.mask.prorv.d.512
+    x86_avx512_mask_prorv_q_128,               // llvm.x86.avx512.mask.prorv.q.128
+    x86_avx512_mask_prorv_q_256,               // llvm.x86.avx512.mask.prorv.q.256
+    x86_avx512_mask_prorv_q_512,               // llvm.x86.avx512.mask.prorv.q.512
+    x86_avx512_mask_psubs_b_128,               // llvm.x86.avx512.mask.psubs.b.128
+    x86_avx512_mask_psubs_b_256,               // llvm.x86.avx512.mask.psubs.b.256
+    x86_avx512_mask_psubs_b_512,               // llvm.x86.avx512.mask.psubs.b.512
+    x86_avx512_mask_psubs_w_128,               // llvm.x86.avx512.mask.psubs.w.128
+    x86_avx512_mask_psubs_w_256,               // llvm.x86.avx512.mask.psubs.w.256
+    x86_avx512_mask_psubs_w_512,               // llvm.x86.avx512.mask.psubs.w.512
+    x86_avx512_mask_psubus_b_128,              // llvm.x86.avx512.mask.psubus.b.128
+    x86_avx512_mask_psubus_b_256,              // llvm.x86.avx512.mask.psubus.b.256
+    x86_avx512_mask_psubus_b_512,              // llvm.x86.avx512.mask.psubus.b.512
+    x86_avx512_mask_psubus_w_128,              // llvm.x86.avx512.mask.psubus.w.128
+    x86_avx512_mask_psubus_w_256,              // llvm.x86.avx512.mask.psubus.w.256
+    x86_avx512_mask_psubus_w_512,              // llvm.x86.avx512.mask.psubus.w.512
+    x86_avx512_mask_pternlog_d_128,            // llvm.x86.avx512.mask.pternlog.d.128
+    x86_avx512_mask_pternlog_d_256,            // llvm.x86.avx512.mask.pternlog.d.256
+    x86_avx512_mask_pternlog_d_512,            // llvm.x86.avx512.mask.pternlog.d.512
+    x86_avx512_mask_pternlog_q_128,            // llvm.x86.avx512.mask.pternlog.q.128
+    x86_avx512_mask_pternlog_q_256,            // llvm.x86.avx512.mask.pternlog.q.256
+    x86_avx512_mask_pternlog_q_512,            // llvm.x86.avx512.mask.pternlog.q.512
+    x86_avx512_mask_range_pd_128,              // llvm.x86.avx512.mask.range.pd.128
+    x86_avx512_mask_range_pd_256,              // llvm.x86.avx512.mask.range.pd.256
+    x86_avx512_mask_range_pd_512,              // llvm.x86.avx512.mask.range.pd.512
+    x86_avx512_mask_range_ps_128,              // llvm.x86.avx512.mask.range.ps.128
+    x86_avx512_mask_range_ps_256,              // llvm.x86.avx512.mask.range.ps.256
+    x86_avx512_mask_range_ps_512,              // llvm.x86.avx512.mask.range.ps.512
+    x86_avx512_mask_range_sd,                  // llvm.x86.avx512.mask.range.sd
+    x86_avx512_mask_range_ss,                  // llvm.x86.avx512.mask.range.ss
+    x86_avx512_mask_reduce_pd_128,             // llvm.x86.avx512.mask.reduce.pd.128
+    x86_avx512_mask_reduce_pd_256,             // llvm.x86.avx512.mask.reduce.pd.256
+    x86_avx512_mask_reduce_pd_512,             // llvm.x86.avx512.mask.reduce.pd.512
+    x86_avx512_mask_reduce_ps_128,             // llvm.x86.avx512.mask.reduce.ps.128
+    x86_avx512_mask_reduce_ps_256,             // llvm.x86.avx512.mask.reduce.ps.256
+    x86_avx512_mask_reduce_ps_512,             // llvm.x86.avx512.mask.reduce.ps.512
+    x86_avx512_mask_reduce_sd,                 // llvm.x86.avx512.mask.reduce.sd
+    x86_avx512_mask_reduce_ss,                 // llvm.x86.avx512.mask.reduce.ss
+    x86_avx512_mask_rndscale_pd_128,           // llvm.x86.avx512.mask.rndscale.pd.128
+    x86_avx512_mask_rndscale_pd_256,           // llvm.x86.avx512.mask.rndscale.pd.256
+    x86_avx512_mask_rndscale_pd_512,           // llvm.x86.avx512.mask.rndscale.pd.512
+    x86_avx512_mask_rndscale_ps_128,           // llvm.x86.avx512.mask.rndscale.ps.128
+    x86_avx512_mask_rndscale_ps_256,           // llvm.x86.avx512.mask.rndscale.ps.256
+    x86_avx512_mask_rndscale_ps_512,           // llvm.x86.avx512.mask.rndscale.ps.512
+    x86_avx512_mask_rndscale_sd,               // llvm.x86.avx512.mask.rndscale.sd
+    x86_avx512_mask_rndscale_ss,               // llvm.x86.avx512.mask.rndscale.ss
+    x86_avx512_mask_scalef_pd_128,             // llvm.x86.avx512.mask.scalef.pd.128
+    x86_avx512_mask_scalef_pd_256,             // llvm.x86.avx512.mask.scalef.pd.256
+    x86_avx512_mask_scalef_pd_512,             // llvm.x86.avx512.mask.scalef.pd.512
+    x86_avx512_mask_scalef_ps_128,             // llvm.x86.avx512.mask.scalef.ps.128
+    x86_avx512_mask_scalef_ps_256,             // llvm.x86.avx512.mask.scalef.ps.256
+    x86_avx512_mask_scalef_ps_512,             // llvm.x86.avx512.mask.scalef.ps.512
+    x86_avx512_mask_scalef_sd,                 // llvm.x86.avx512.mask.scalef.sd
+    x86_avx512_mask_scalef_ss,                 // llvm.x86.avx512.mask.scalef.ss
+    x86_avx512_mask_sqrt_pd_128,               // llvm.x86.avx512.mask.sqrt.pd.128
+    x86_avx512_mask_sqrt_pd_256,               // llvm.x86.avx512.mask.sqrt.pd.256
+    x86_avx512_mask_sqrt_pd_512,               // llvm.x86.avx512.mask.sqrt.pd.512
+    x86_avx512_mask_sqrt_ps_128,               // llvm.x86.avx512.mask.sqrt.ps.128
+    x86_avx512_mask_sqrt_ps_256,               // llvm.x86.avx512.mask.sqrt.ps.256
+    x86_avx512_mask_sqrt_ps_512,               // llvm.x86.avx512.mask.sqrt.ps.512
+    x86_avx512_mask_sqrt_sd,                   // llvm.x86.avx512.mask.sqrt.sd
+    x86_avx512_mask_sqrt_ss,                   // llvm.x86.avx512.mask.sqrt.ss
+    x86_avx512_mask_store_ss,                  // llvm.x86.avx512.mask.store.ss
+    x86_avx512_mask_sub_pd_512,                // llvm.x86.avx512.mask.sub.pd.512
+    x86_avx512_mask_sub_ps_512,                // llvm.x86.avx512.mask.sub.ps.512
+    x86_avx512_mask_sub_sd_round,              // llvm.x86.avx512.mask.sub.sd.round
+    x86_avx512_mask_sub_ss_round,              // llvm.x86.avx512.mask.sub.ss.round
+    x86_avx512_mask_vcvtph2ps_128,             // llvm.x86.avx512.mask.vcvtph2ps.128
+    x86_avx512_mask_vcvtph2ps_256,             // llvm.x86.avx512.mask.vcvtph2ps.256
+    x86_avx512_mask_vcvtph2ps_512,             // llvm.x86.avx512.mask.vcvtph2ps.512
+    x86_avx512_mask_vcvtps2ph_128,             // llvm.x86.avx512.mask.vcvtps2ph.128
+    x86_avx512_mask_vcvtps2ph_256,             // llvm.x86.avx512.mask.vcvtps2ph.256
+    x86_avx512_mask_vcvtps2ph_512,             // llvm.x86.avx512.mask.vcvtps2ph.512
+    x86_avx512_mask_vfmadd_pd_128,             // llvm.x86.avx512.mask.vfmadd.pd.128
+    x86_avx512_mask_vfmadd_pd_256,             // llvm.x86.avx512.mask.vfmadd.pd.256
+    x86_avx512_mask_vfmadd_pd_512,             // llvm.x86.avx512.mask.vfmadd.pd.512
+    x86_avx512_mask_vfmadd_ps_128,             // llvm.x86.avx512.mask.vfmadd.ps.128
+    x86_avx512_mask_vfmadd_ps_256,             // llvm.x86.avx512.mask.vfmadd.ps.256
+    x86_avx512_mask_vfmadd_ps_512,             // llvm.x86.avx512.mask.vfmadd.ps.512
+    x86_avx512_mask_vfmadd_sd,                 // llvm.x86.avx512.mask.vfmadd.sd
+    x86_avx512_mask_vfmadd_ss,                 // llvm.x86.avx512.mask.vfmadd.ss
+    x86_avx512_mask_vfmaddsub_pd_128,          // llvm.x86.avx512.mask.vfmaddsub.pd.128
+    x86_avx512_mask_vfmaddsub_pd_256,          // llvm.x86.avx512.mask.vfmaddsub.pd.256
+    x86_avx512_mask_vfmaddsub_pd_512,          // llvm.x86.avx512.mask.vfmaddsub.pd.512
+    x86_avx512_mask_vfmaddsub_ps_128,          // llvm.x86.avx512.mask.vfmaddsub.ps.128
+    x86_avx512_mask_vfmaddsub_ps_256,          // llvm.x86.avx512.mask.vfmaddsub.ps.256
+    x86_avx512_mask_vfmaddsub_ps_512,          // llvm.x86.avx512.mask.vfmaddsub.ps.512
+    x86_avx512_mask_vfnmadd_pd_128,            // llvm.x86.avx512.mask.vfnmadd.pd.128
+    x86_avx512_mask_vfnmadd_pd_256,            // llvm.x86.avx512.mask.vfnmadd.pd.256
+    x86_avx512_mask_vfnmadd_pd_512,            // llvm.x86.avx512.mask.vfnmadd.pd.512
+    x86_avx512_mask_vfnmadd_ps_128,            // llvm.x86.avx512.mask.vfnmadd.ps.128
+    x86_avx512_mask_vfnmadd_ps_256,            // llvm.x86.avx512.mask.vfnmadd.ps.256
+    x86_avx512_mask_vfnmadd_ps_512,            // llvm.x86.avx512.mask.vfnmadd.ps.512
+    x86_avx512_mask_vfnmsub_pd_128,            // llvm.x86.avx512.mask.vfnmsub.pd.128
+    x86_avx512_mask_vfnmsub_pd_256,            // llvm.x86.avx512.mask.vfnmsub.pd.256
+    x86_avx512_mask_vfnmsub_pd_512,            // llvm.x86.avx512.mask.vfnmsub.pd.512
+    x86_avx512_mask_vfnmsub_ps_128,            // llvm.x86.avx512.mask.vfnmsub.ps.128
+    x86_avx512_mask_vfnmsub_ps_256,            // llvm.x86.avx512.mask.vfnmsub.ps.256
+    x86_avx512_mask_vfnmsub_ps_512,            // llvm.x86.avx512.mask.vfnmsub.ps.512
+    x86_avx512_mask_vpdpbusd_128,              // llvm.x86.avx512.mask.vpdpbusd.128
+    x86_avx512_mask_vpdpbusd_256,              // llvm.x86.avx512.mask.vpdpbusd.256
+    x86_avx512_mask_vpdpbusd_512,              // llvm.x86.avx512.mask.vpdpbusd.512
+    x86_avx512_mask_vpdpbusds_128,             // llvm.x86.avx512.mask.vpdpbusds.128
+    x86_avx512_mask_vpdpbusds_256,             // llvm.x86.avx512.mask.vpdpbusds.256
+    x86_avx512_mask_vpdpbusds_512,             // llvm.x86.avx512.mask.vpdpbusds.512
+    x86_avx512_mask_vpdpwssd_128,              // llvm.x86.avx512.mask.vpdpwssd.128
+    x86_avx512_mask_vpdpwssd_256,              // llvm.x86.avx512.mask.vpdpwssd.256
+    x86_avx512_mask_vpdpwssd_512,              // llvm.x86.avx512.mask.vpdpwssd.512
+    x86_avx512_mask_vpdpwssds_128,             // llvm.x86.avx512.mask.vpdpwssds.128
+    x86_avx512_mask_vpdpwssds_256,             // llvm.x86.avx512.mask.vpdpwssds.256
+    x86_avx512_mask_vpdpwssds_512,             // llvm.x86.avx512.mask.vpdpwssds.512
+    x86_avx512_mask_vpermi2var_d_128,          // llvm.x86.avx512.mask.vpermi2var.d.128
+    x86_avx512_mask_vpermi2var_d_256,          // llvm.x86.avx512.mask.vpermi2var.d.256
+    x86_avx512_mask_vpermi2var_d_512,          // llvm.x86.avx512.mask.vpermi2var.d.512
+    x86_avx512_mask_vpermi2var_hi_128,         // llvm.x86.avx512.mask.vpermi2var.hi.128
+    x86_avx512_mask_vpermi2var_hi_256,         // llvm.x86.avx512.mask.vpermi2var.hi.256
+    x86_avx512_mask_vpermi2var_hi_512,         // llvm.x86.avx512.mask.vpermi2var.hi.512
+    x86_avx512_mask_vpermi2var_pd_128,         // llvm.x86.avx512.mask.vpermi2var.pd.128
+    x86_avx512_mask_vpermi2var_pd_256,         // llvm.x86.avx512.mask.vpermi2var.pd.256
+    x86_avx512_mask_vpermi2var_pd_512,         // llvm.x86.avx512.mask.vpermi2var.pd.512
+    x86_avx512_mask_vpermi2var_ps_128,         // llvm.x86.avx512.mask.vpermi2var.ps.128
+    x86_avx512_mask_vpermi2var_ps_256,         // llvm.x86.avx512.mask.vpermi2var.ps.256
+    x86_avx512_mask_vpermi2var_ps_512,         // llvm.x86.avx512.mask.vpermi2var.ps.512
+    x86_avx512_mask_vpermi2var_q_128,          // llvm.x86.avx512.mask.vpermi2var.q.128
+    x86_avx512_mask_vpermi2var_q_256,          // llvm.x86.avx512.mask.vpermi2var.q.256
+    x86_avx512_mask_vpermi2var_q_512,          // llvm.x86.avx512.mask.vpermi2var.q.512
+    x86_avx512_mask_vpermi2var_qi_128,         // llvm.x86.avx512.mask.vpermi2var.qi.128
+    x86_avx512_mask_vpermi2var_qi_256,         // llvm.x86.avx512.mask.vpermi2var.qi.256
+    x86_avx512_mask_vpermi2var_qi_512,         // llvm.x86.avx512.mask.vpermi2var.qi.512
+    x86_avx512_mask_vpermt2var_d_128,          // llvm.x86.avx512.mask.vpermt2var.d.128
+    x86_avx512_mask_vpermt2var_d_256,          // llvm.x86.avx512.mask.vpermt2var.d.256
+    x86_avx512_mask_vpermt2var_d_512,          // llvm.x86.avx512.mask.vpermt2var.d.512
+    x86_avx512_mask_vpermt2var_hi_128,         // llvm.x86.avx512.mask.vpermt2var.hi.128
+    x86_avx512_mask_vpermt2var_hi_256,         // llvm.x86.avx512.mask.vpermt2var.hi.256
+    x86_avx512_mask_vpermt2var_hi_512,         // llvm.x86.avx512.mask.vpermt2var.hi.512
+    x86_avx512_mask_vpermt2var_pd_128,         // llvm.x86.avx512.mask.vpermt2var.pd.128
+    x86_avx512_mask_vpermt2var_pd_256,         // llvm.x86.avx512.mask.vpermt2var.pd.256
+    x86_avx512_mask_vpermt2var_pd_512,         // llvm.x86.avx512.mask.vpermt2var.pd.512
+    x86_avx512_mask_vpermt2var_ps_128,         // llvm.x86.avx512.mask.vpermt2var.ps.128
+    x86_avx512_mask_vpermt2var_ps_256,         // llvm.x86.avx512.mask.vpermt2var.ps.256
+    x86_avx512_mask_vpermt2var_ps_512,         // llvm.x86.avx512.mask.vpermt2var.ps.512
+    x86_avx512_mask_vpermt2var_q_128,          // llvm.x86.avx512.mask.vpermt2var.q.128
+    x86_avx512_mask_vpermt2var_q_256,          // llvm.x86.avx512.mask.vpermt2var.q.256
+    x86_avx512_mask_vpermt2var_q_512,          // llvm.x86.avx512.mask.vpermt2var.q.512
+    x86_avx512_mask_vpermt2var_qi_128,         // llvm.x86.avx512.mask.vpermt2var.qi.128
+    x86_avx512_mask_vpermt2var_qi_256,         // llvm.x86.avx512.mask.vpermt2var.qi.256
+    x86_avx512_mask_vpermt2var_qi_512,         // llvm.x86.avx512.mask.vpermt2var.qi.512
+    x86_avx512_mask_vpmadd52h_uq_128,          // llvm.x86.avx512.mask.vpmadd52h.uq.128
+    x86_avx512_mask_vpmadd52h_uq_256,          // llvm.x86.avx512.mask.vpmadd52h.uq.256
+    x86_avx512_mask_vpmadd52h_uq_512,          // llvm.x86.avx512.mask.vpmadd52h.uq.512
+    x86_avx512_mask_vpmadd52l_uq_128,          // llvm.x86.avx512.mask.vpmadd52l.uq.128
+    x86_avx512_mask_vpmadd52l_uq_256,          // llvm.x86.avx512.mask.vpmadd52l.uq.256
+    x86_avx512_mask_vpmadd52l_uq_512,          // llvm.x86.avx512.mask.vpmadd52l.uq.512
+    x86_avx512_mask_vpshld_d_128,              // llvm.x86.avx512.mask.vpshld.d.128
+    x86_avx512_mask_vpshld_d_256,              // llvm.x86.avx512.mask.vpshld.d.256
+    x86_avx512_mask_vpshld_d_512,              // llvm.x86.avx512.mask.vpshld.d.512
+    x86_avx512_mask_vpshld_q_128,              // llvm.x86.avx512.mask.vpshld.q.128
+    x86_avx512_mask_vpshld_q_256,              // llvm.x86.avx512.mask.vpshld.q.256
+    x86_avx512_mask_vpshld_q_512,              // llvm.x86.avx512.mask.vpshld.q.512
+    x86_avx512_mask_vpshld_w_128,              // llvm.x86.avx512.mask.vpshld.w.128
+    x86_avx512_mask_vpshld_w_256,              // llvm.x86.avx512.mask.vpshld.w.256
+    x86_avx512_mask_vpshld_w_512,              // llvm.x86.avx512.mask.vpshld.w.512
+    x86_avx512_mask_vpshldv_d_128,             // llvm.x86.avx512.mask.vpshldv.d.128
+    x86_avx512_mask_vpshldv_d_256,             // llvm.x86.avx512.mask.vpshldv.d.256
+    x86_avx512_mask_vpshldv_d_512,             // llvm.x86.avx512.mask.vpshldv.d.512
+    x86_avx512_mask_vpshldv_q_128,             // llvm.x86.avx512.mask.vpshldv.q.128
+    x86_avx512_mask_vpshldv_q_256,             // llvm.x86.avx512.mask.vpshldv.q.256
+    x86_avx512_mask_vpshldv_q_512,             // llvm.x86.avx512.mask.vpshldv.q.512
+    x86_avx512_mask_vpshldv_w_128,             // llvm.x86.avx512.mask.vpshldv.w.128
+    x86_avx512_mask_vpshldv_w_256,             // llvm.x86.avx512.mask.vpshldv.w.256
+    x86_avx512_mask_vpshldv_w_512,             // llvm.x86.avx512.mask.vpshldv.w.512
+    x86_avx512_mask_vpshrd_d_128,              // llvm.x86.avx512.mask.vpshrd.d.128
+    x86_avx512_mask_vpshrd_d_256,              // llvm.x86.avx512.mask.vpshrd.d.256
+    x86_avx512_mask_vpshrd_d_512,              // llvm.x86.avx512.mask.vpshrd.d.512
+    x86_avx512_mask_vpshrd_q_128,              // llvm.x86.avx512.mask.vpshrd.q.128
+    x86_avx512_mask_vpshrd_q_256,              // llvm.x86.avx512.mask.vpshrd.q.256
+    x86_avx512_mask_vpshrd_q_512,              // llvm.x86.avx512.mask.vpshrd.q.512
+    x86_avx512_mask_vpshrd_w_128,              // llvm.x86.avx512.mask.vpshrd.w.128
+    x86_avx512_mask_vpshrd_w_256,              // llvm.x86.avx512.mask.vpshrd.w.256
+    x86_avx512_mask_vpshrd_w_512,              // llvm.x86.avx512.mask.vpshrd.w.512
+    x86_avx512_mask_vpshrdv_d_128,             // llvm.x86.avx512.mask.vpshrdv.d.128
+    x86_avx512_mask_vpshrdv_d_256,             // llvm.x86.avx512.mask.vpshrdv.d.256
+    x86_avx512_mask_vpshrdv_d_512,             // llvm.x86.avx512.mask.vpshrdv.d.512
+    x86_avx512_mask_vpshrdv_q_128,             // llvm.x86.avx512.mask.vpshrdv.q.128
+    x86_avx512_mask_vpshrdv_q_256,             // llvm.x86.avx512.mask.vpshrdv.q.256
+    x86_avx512_mask_vpshrdv_q_512,             // llvm.x86.avx512.mask.vpshrdv.q.512
+    x86_avx512_mask_vpshrdv_w_128,             // llvm.x86.avx512.mask.vpshrdv.w.128
+    x86_avx512_mask_vpshrdv_w_256,             // llvm.x86.avx512.mask.vpshrdv.w.256
+    x86_avx512_mask_vpshrdv_w_512,             // llvm.x86.avx512.mask.vpshrdv.w.512
+    x86_avx512_mask_vpshufbitqmb_128,          // llvm.x86.avx512.mask.vpshufbitqmb.128
+    x86_avx512_mask_vpshufbitqmb_256,          // llvm.x86.avx512.mask.vpshufbitqmb.256
+    x86_avx512_mask_vpshufbitqmb_512,          // llvm.x86.avx512.mask.vpshufbitqmb.512
+    x86_avx512_mask3_vfmadd_pd_128,            // llvm.x86.avx512.mask3.vfmadd.pd.128
+    x86_avx512_mask3_vfmadd_pd_256,            // llvm.x86.avx512.mask3.vfmadd.pd.256
+    x86_avx512_mask3_vfmadd_pd_512,            // llvm.x86.avx512.mask3.vfmadd.pd.512
+    x86_avx512_mask3_vfmadd_ps_128,            // llvm.x86.avx512.mask3.vfmadd.ps.128
+    x86_avx512_mask3_vfmadd_ps_256,            // llvm.x86.avx512.mask3.vfmadd.ps.256
+    x86_avx512_mask3_vfmadd_ps_512,            // llvm.x86.avx512.mask3.vfmadd.ps.512
+    x86_avx512_mask3_vfmadd_sd,                // llvm.x86.avx512.mask3.vfmadd.sd
+    x86_avx512_mask3_vfmadd_ss,                // llvm.x86.avx512.mask3.vfmadd.ss
+    x86_avx512_mask3_vfmaddsub_pd_128,         // llvm.x86.avx512.mask3.vfmaddsub.pd.128
+    x86_avx512_mask3_vfmaddsub_pd_256,         // llvm.x86.avx512.mask3.vfmaddsub.pd.256
+    x86_avx512_mask3_vfmaddsub_pd_512,         // llvm.x86.avx512.mask3.vfmaddsub.pd.512
+    x86_avx512_mask3_vfmaddsub_ps_128,         // llvm.x86.avx512.mask3.vfmaddsub.ps.128
+    x86_avx512_mask3_vfmaddsub_ps_256,         // llvm.x86.avx512.mask3.vfmaddsub.ps.256
+    x86_avx512_mask3_vfmaddsub_ps_512,         // llvm.x86.avx512.mask3.vfmaddsub.ps.512
+    x86_avx512_mask3_vfmsub_pd_128,            // llvm.x86.avx512.mask3.vfmsub.pd.128
+    x86_avx512_mask3_vfmsub_pd_256,            // llvm.x86.avx512.mask3.vfmsub.pd.256
+    x86_avx512_mask3_vfmsub_pd_512,            // llvm.x86.avx512.mask3.vfmsub.pd.512
+    x86_avx512_mask3_vfmsub_ps_128,            // llvm.x86.avx512.mask3.vfmsub.ps.128
+    x86_avx512_mask3_vfmsub_ps_256,            // llvm.x86.avx512.mask3.vfmsub.ps.256
+    x86_avx512_mask3_vfmsub_ps_512,            // llvm.x86.avx512.mask3.vfmsub.ps.512
+    x86_avx512_mask3_vfmsub_sd,                // llvm.x86.avx512.mask3.vfmsub.sd
+    x86_avx512_mask3_vfmsub_ss,                // llvm.x86.avx512.mask3.vfmsub.ss
+    x86_avx512_mask3_vfmsubadd_pd_128,         // llvm.x86.avx512.mask3.vfmsubadd.pd.128
+    x86_avx512_mask3_vfmsubadd_pd_256,         // llvm.x86.avx512.mask3.vfmsubadd.pd.256
+    x86_avx512_mask3_vfmsubadd_pd_512,         // llvm.x86.avx512.mask3.vfmsubadd.pd.512
+    x86_avx512_mask3_vfmsubadd_ps_128,         // llvm.x86.avx512.mask3.vfmsubadd.ps.128
+    x86_avx512_mask3_vfmsubadd_ps_256,         // llvm.x86.avx512.mask3.vfmsubadd.ps.256
+    x86_avx512_mask3_vfmsubadd_ps_512,         // llvm.x86.avx512.mask3.vfmsubadd.ps.512
+    x86_avx512_mask3_vfnmsub_pd_128,           // llvm.x86.avx512.mask3.vfnmsub.pd.128
+    x86_avx512_mask3_vfnmsub_pd_256,           // llvm.x86.avx512.mask3.vfnmsub.pd.256
+    x86_avx512_mask3_vfnmsub_pd_512,           // llvm.x86.avx512.mask3.vfnmsub.pd.512
+    x86_avx512_mask3_vfnmsub_ps_128,           // llvm.x86.avx512.mask3.vfnmsub.ps.128
+    x86_avx512_mask3_vfnmsub_ps_256,           // llvm.x86.avx512.mask3.vfnmsub.ps.256
+    x86_avx512_mask3_vfnmsub_ps_512,           // llvm.x86.avx512.mask3.vfnmsub.ps.512
+    x86_avx512_mask3_vfnmsub_sd,               // llvm.x86.avx512.mask3.vfnmsub.sd
+    x86_avx512_mask3_vfnmsub_ss,               // llvm.x86.avx512.mask3.vfnmsub.ss
+    x86_avx512_maskz_fixupimm_pd_128,          // llvm.x86.avx512.maskz.fixupimm.pd.128
+    x86_avx512_maskz_fixupimm_pd_256,          // llvm.x86.avx512.maskz.fixupimm.pd.256
+    x86_avx512_maskz_fixupimm_pd_512,          // llvm.x86.avx512.maskz.fixupimm.pd.512
+    x86_avx512_maskz_fixupimm_ps_128,          // llvm.x86.avx512.maskz.fixupimm.ps.128
+    x86_avx512_maskz_fixupimm_ps_256,          // llvm.x86.avx512.maskz.fixupimm.ps.256
+    x86_avx512_maskz_fixupimm_ps_512,          // llvm.x86.avx512.maskz.fixupimm.ps.512
+    x86_avx512_maskz_fixupimm_sd,              // llvm.x86.avx512.maskz.fixupimm.sd
+    x86_avx512_maskz_fixupimm_ss,              // llvm.x86.avx512.maskz.fixupimm.ss
+    x86_avx512_maskz_pternlog_d_128,           // llvm.x86.avx512.maskz.pternlog.d.128
+    x86_avx512_maskz_pternlog_d_256,           // llvm.x86.avx512.maskz.pternlog.d.256
+    x86_avx512_maskz_pternlog_d_512,           // llvm.x86.avx512.maskz.pternlog.d.512
+    x86_avx512_maskz_pternlog_q_128,           // llvm.x86.avx512.maskz.pternlog.q.128
+    x86_avx512_maskz_pternlog_q_256,           // llvm.x86.avx512.maskz.pternlog.q.256
+    x86_avx512_maskz_pternlog_q_512,           // llvm.x86.avx512.maskz.pternlog.q.512
+    x86_avx512_maskz_vfmadd_pd_128,            // llvm.x86.avx512.maskz.vfmadd.pd.128
+    x86_avx512_maskz_vfmadd_pd_256,            // llvm.x86.avx512.maskz.vfmadd.pd.256
+    x86_avx512_maskz_vfmadd_pd_512,            // llvm.x86.avx512.maskz.vfmadd.pd.512
+    x86_avx512_maskz_vfmadd_ps_128,            // llvm.x86.avx512.maskz.vfmadd.ps.128
+    x86_avx512_maskz_vfmadd_ps_256,            // llvm.x86.avx512.maskz.vfmadd.ps.256
+    x86_avx512_maskz_vfmadd_ps_512,            // llvm.x86.avx512.maskz.vfmadd.ps.512
+    x86_avx512_maskz_vfmadd_sd,                // llvm.x86.avx512.maskz.vfmadd.sd
+    x86_avx512_maskz_vfmadd_ss,                // llvm.x86.avx512.maskz.vfmadd.ss
+    x86_avx512_maskz_vfmaddsub_pd_128,         // llvm.x86.avx512.maskz.vfmaddsub.pd.128
+    x86_avx512_maskz_vfmaddsub_pd_256,         // llvm.x86.avx512.maskz.vfmaddsub.pd.256
+    x86_avx512_maskz_vfmaddsub_pd_512,         // llvm.x86.avx512.maskz.vfmaddsub.pd.512
+    x86_avx512_maskz_vfmaddsub_ps_128,         // llvm.x86.avx512.maskz.vfmaddsub.ps.128
+    x86_avx512_maskz_vfmaddsub_ps_256,         // llvm.x86.avx512.maskz.vfmaddsub.ps.256
+    x86_avx512_maskz_vfmaddsub_ps_512,         // llvm.x86.avx512.maskz.vfmaddsub.ps.512
+    x86_avx512_maskz_vpdpbusd_128,             // llvm.x86.avx512.maskz.vpdpbusd.128
+    x86_avx512_maskz_vpdpbusd_256,             // llvm.x86.avx512.maskz.vpdpbusd.256
+    x86_avx512_maskz_vpdpbusd_512,             // llvm.x86.avx512.maskz.vpdpbusd.512
+    x86_avx512_maskz_vpdpbusds_128,            // llvm.x86.avx512.maskz.vpdpbusds.128
+    x86_avx512_maskz_vpdpbusds_256,            // llvm.x86.avx512.maskz.vpdpbusds.256
+    x86_avx512_maskz_vpdpbusds_512,            // llvm.x86.avx512.maskz.vpdpbusds.512
+    x86_avx512_maskz_vpdpwssd_128,             // llvm.x86.avx512.maskz.vpdpwssd.128
+    x86_avx512_maskz_vpdpwssd_256,             // llvm.x86.avx512.maskz.vpdpwssd.256
+    x86_avx512_maskz_vpdpwssd_512,             // llvm.x86.avx512.maskz.vpdpwssd.512
+    x86_avx512_maskz_vpdpwssds_128,            // llvm.x86.avx512.maskz.vpdpwssds.128
+    x86_avx512_maskz_vpdpwssds_256,            // llvm.x86.avx512.maskz.vpdpwssds.256
+    x86_avx512_maskz_vpdpwssds_512,            // llvm.x86.avx512.maskz.vpdpwssds.512
+    x86_avx512_maskz_vpermt2var_d_128,         // llvm.x86.avx512.maskz.vpermt2var.d.128
+    x86_avx512_maskz_vpermt2var_d_256,         // llvm.x86.avx512.maskz.vpermt2var.d.256
+    x86_avx512_maskz_vpermt2var_d_512,         // llvm.x86.avx512.maskz.vpermt2var.d.512
+    x86_avx512_maskz_vpermt2var_hi_128,        // llvm.x86.avx512.maskz.vpermt2var.hi.128
+    x86_avx512_maskz_vpermt2var_hi_256,        // llvm.x86.avx512.maskz.vpermt2var.hi.256
+    x86_avx512_maskz_vpermt2var_hi_512,        // llvm.x86.avx512.maskz.vpermt2var.hi.512
+    x86_avx512_maskz_vpermt2var_pd_128,        // llvm.x86.avx512.maskz.vpermt2var.pd.128
+    x86_avx512_maskz_vpermt2var_pd_256,        // llvm.x86.avx512.maskz.vpermt2var.pd.256
+    x86_avx512_maskz_vpermt2var_pd_512,        // llvm.x86.avx512.maskz.vpermt2var.pd.512
+    x86_avx512_maskz_vpermt2var_ps_128,        // llvm.x86.avx512.maskz.vpermt2var.ps.128
+    x86_avx512_maskz_vpermt2var_ps_256,        // llvm.x86.avx512.maskz.vpermt2var.ps.256
+    x86_avx512_maskz_vpermt2var_ps_512,        // llvm.x86.avx512.maskz.vpermt2var.ps.512
+    x86_avx512_maskz_vpermt2var_q_128,         // llvm.x86.avx512.maskz.vpermt2var.q.128
+    x86_avx512_maskz_vpermt2var_q_256,         // llvm.x86.avx512.maskz.vpermt2var.q.256
+    x86_avx512_maskz_vpermt2var_q_512,         // llvm.x86.avx512.maskz.vpermt2var.q.512
+    x86_avx512_maskz_vpermt2var_qi_128,        // llvm.x86.avx512.maskz.vpermt2var.qi.128
+    x86_avx512_maskz_vpermt2var_qi_256,        // llvm.x86.avx512.maskz.vpermt2var.qi.256
+    x86_avx512_maskz_vpermt2var_qi_512,        // llvm.x86.avx512.maskz.vpermt2var.qi.512
+    x86_avx512_maskz_vpmadd52h_uq_128,         // llvm.x86.avx512.maskz.vpmadd52h.uq.128
+    x86_avx512_maskz_vpmadd52h_uq_256,         // llvm.x86.avx512.maskz.vpmadd52h.uq.256
+    x86_avx512_maskz_vpmadd52h_uq_512,         // llvm.x86.avx512.maskz.vpmadd52h.uq.512
+    x86_avx512_maskz_vpmadd52l_uq_128,         // llvm.x86.avx512.maskz.vpmadd52l.uq.128
+    x86_avx512_maskz_vpmadd52l_uq_256,         // llvm.x86.avx512.maskz.vpmadd52l.uq.256
+    x86_avx512_maskz_vpmadd52l_uq_512,         // llvm.x86.avx512.maskz.vpmadd52l.uq.512
+    x86_avx512_maskz_vpshldv_d_128,            // llvm.x86.avx512.maskz.vpshldv.d.128
+    x86_avx512_maskz_vpshldv_d_256,            // llvm.x86.avx512.maskz.vpshldv.d.256
+    x86_avx512_maskz_vpshldv_d_512,            // llvm.x86.avx512.maskz.vpshldv.d.512
+    x86_avx512_maskz_vpshldv_q_128,            // llvm.x86.avx512.maskz.vpshldv.q.128
+    x86_avx512_maskz_vpshldv_q_256,            // llvm.x86.avx512.maskz.vpshldv.q.256
+    x86_avx512_maskz_vpshldv_q_512,            // llvm.x86.avx512.maskz.vpshldv.q.512
+    x86_avx512_maskz_vpshldv_w_128,            // llvm.x86.avx512.maskz.vpshldv.w.128
+    x86_avx512_maskz_vpshldv_w_256,            // llvm.x86.avx512.maskz.vpshldv.w.256
+    x86_avx512_maskz_vpshldv_w_512,            // llvm.x86.avx512.maskz.vpshldv.w.512
+    x86_avx512_maskz_vpshrdv_d_128,            // llvm.x86.avx512.maskz.vpshrdv.d.128
+    x86_avx512_maskz_vpshrdv_d_256,            // llvm.x86.avx512.maskz.vpshrdv.d.256
+    x86_avx512_maskz_vpshrdv_d_512,            // llvm.x86.avx512.maskz.vpshrdv.d.512
+    x86_avx512_maskz_vpshrdv_q_128,            // llvm.x86.avx512.maskz.vpshrdv.q.128
+    x86_avx512_maskz_vpshrdv_q_256,            // llvm.x86.avx512.maskz.vpshrdv.q.256
+    x86_avx512_maskz_vpshrdv_q_512,            // llvm.x86.avx512.maskz.vpshrdv.q.512
+    x86_avx512_maskz_vpshrdv_w_128,            // llvm.x86.avx512.maskz.vpshrdv.w.128
+    x86_avx512_maskz_vpshrdv_w_256,            // llvm.x86.avx512.maskz.vpshrdv.w.256
+    x86_avx512_maskz_vpshrdv_w_512,            // llvm.x86.avx512.maskz.vpshrdv.w.512
+    x86_avx512_packssdw_512,                   // llvm.x86.avx512.packssdw.512
+    x86_avx512_packsswb_512,                   // llvm.x86.avx512.packsswb.512
+    x86_avx512_packusdw_512,                   // llvm.x86.avx512.packusdw.512
+    x86_avx512_packuswb_512,                   // llvm.x86.avx512.packuswb.512
+    x86_avx512_pmul_dq_512,                    // llvm.x86.avx512.pmul.dq.512
+    x86_avx512_pmul_hr_sw_512,                 // llvm.x86.avx512.pmul.hr.sw.512
+    x86_avx512_pmulh_w_512,                    // llvm.x86.avx512.pmulh.w.512
+    x86_avx512_pmulhu_w_512,                   // llvm.x86.avx512.pmulhu.w.512
+    x86_avx512_pmulu_dq_512,                   // llvm.x86.avx512.pmulu.dq.512
+    x86_avx512_psad_bw_512,                    // llvm.x86.avx512.psad.bw.512
+    x86_avx512_pshuf_b_512,                    // llvm.x86.avx512.pshuf.b.512
+    x86_avx512_psll_d_512,                     // llvm.x86.avx512.psll.d.512
+    x86_avx512_psll_q_512,                     // llvm.x86.avx512.psll.q.512
+    x86_avx512_psll_w_512,                     // llvm.x86.avx512.psll.w.512
+    x86_avx512_pslli_d_512,                    // llvm.x86.avx512.pslli.d.512
+    x86_avx512_pslli_q_512,                    // llvm.x86.avx512.pslli.q.512
+    x86_avx512_pslli_w_512,                    // llvm.x86.avx512.pslli.w.512
+    x86_avx512_psllv_d_512,                    // llvm.x86.avx512.psllv.d.512
+    x86_avx512_psllv_q_512,                    // llvm.x86.avx512.psllv.q.512
+    x86_avx512_psllv_w_128,                    // llvm.x86.avx512.psllv.w.128
+    x86_avx512_psllv_w_256,                    // llvm.x86.avx512.psllv.w.256
+    x86_avx512_psllv_w_512,                    // llvm.x86.avx512.psllv.w.512
+    x86_avx512_psra_d_512,                     // llvm.x86.avx512.psra.d.512
+    x86_avx512_psra_q_128,                     // llvm.x86.avx512.psra.q.128
+    x86_avx512_psra_q_256,                     // llvm.x86.avx512.psra.q.256
+    x86_avx512_psra_q_512,                     // llvm.x86.avx512.psra.q.512
+    x86_avx512_psra_w_512,                     // llvm.x86.avx512.psra.w.512
+    x86_avx512_psrai_d_512,                    // llvm.x86.avx512.psrai.d.512
+    x86_avx512_psrai_q_128,                    // llvm.x86.avx512.psrai.q.128
+    x86_avx512_psrai_q_256,                    // llvm.x86.avx512.psrai.q.256
+    x86_avx512_psrai_q_512,                    // llvm.x86.avx512.psrai.q.512
+    x86_avx512_psrai_w_512,                    // llvm.x86.avx512.psrai.w.512
+    x86_avx512_psrav_d_512,                    // llvm.x86.avx512.psrav.d.512
+    x86_avx512_psrav_q_128,                    // llvm.x86.avx512.psrav.q.128
+    x86_avx512_psrav_q_256,                    // llvm.x86.avx512.psrav.q.256
+    x86_avx512_psrav_q_512,                    // llvm.x86.avx512.psrav.q.512
+    x86_avx512_psrav_w_128,                    // llvm.x86.avx512.psrav.w.128
+    x86_avx512_psrav_w_256,                    // llvm.x86.avx512.psrav.w.256
+    x86_avx512_psrav_w_512,                    // llvm.x86.avx512.psrav.w.512
+    x86_avx512_psrl_d_512,                     // llvm.x86.avx512.psrl.d.512
+    x86_avx512_psrl_q_512,                     // llvm.x86.avx512.psrl.q.512
+    x86_avx512_psrl_w_512,                     // llvm.x86.avx512.psrl.w.512
+    x86_avx512_psrli_d_512,                    // llvm.x86.avx512.psrli.d.512
+    x86_avx512_psrli_q_512,                    // llvm.x86.avx512.psrli.q.512
+    x86_avx512_psrli_w_512,                    // llvm.x86.avx512.psrli.w.512
+    x86_avx512_psrlv_d_512,                    // llvm.x86.avx512.psrlv.d.512
+    x86_avx512_psrlv_q_512,                    // llvm.x86.avx512.psrlv.q.512
+    x86_avx512_psrlv_w_128,                    // llvm.x86.avx512.psrlv.w.128
+    x86_avx512_psrlv_w_256,                    // llvm.x86.avx512.psrlv.w.256
+    x86_avx512_psrlv_w_512,                    // llvm.x86.avx512.psrlv.w.512
+    x86_avx512_rcp14_pd_128,                   // llvm.x86.avx512.rcp14.pd.128
+    x86_avx512_rcp14_pd_256,                   // llvm.x86.avx512.rcp14.pd.256
+    x86_avx512_rcp14_pd_512,                   // llvm.x86.avx512.rcp14.pd.512
+    x86_avx512_rcp14_ps_128,                   // llvm.x86.avx512.rcp14.ps.128
+    x86_avx512_rcp14_ps_256,                   // llvm.x86.avx512.rcp14.ps.256
+    x86_avx512_rcp14_ps_512,                   // llvm.x86.avx512.rcp14.ps.512
+    x86_avx512_rcp14_sd,                       // llvm.x86.avx512.rcp14.sd
+    x86_avx512_rcp14_ss,                       // llvm.x86.avx512.rcp14.ss
+    x86_avx512_rcp28_pd,                       // llvm.x86.avx512.rcp28.pd
+    x86_avx512_rcp28_ps,                       // llvm.x86.avx512.rcp28.ps
+    x86_avx512_rcp28_sd,                       // llvm.x86.avx512.rcp28.sd
+    x86_avx512_rcp28_ss,                       // llvm.x86.avx512.rcp28.ss
+    x86_avx512_rsqrt14_pd_128,                 // llvm.x86.avx512.rsqrt14.pd.128
+    x86_avx512_rsqrt14_pd_256,                 // llvm.x86.avx512.rsqrt14.pd.256
+    x86_avx512_rsqrt14_pd_512,                 // llvm.x86.avx512.rsqrt14.pd.512
+    x86_avx512_rsqrt14_ps_128,                 // llvm.x86.avx512.rsqrt14.ps.128
+    x86_avx512_rsqrt14_ps_256,                 // llvm.x86.avx512.rsqrt14.ps.256
+    x86_avx512_rsqrt14_ps_512,                 // llvm.x86.avx512.rsqrt14.ps.512
+    x86_avx512_rsqrt14_sd,                     // llvm.x86.avx512.rsqrt14.sd
+    x86_avx512_rsqrt14_ss,                     // llvm.x86.avx512.rsqrt14.ss
+    x86_avx512_rsqrt28_pd,                     // llvm.x86.avx512.rsqrt28.pd
+    x86_avx512_rsqrt28_ps,                     // llvm.x86.avx512.rsqrt28.ps
+    x86_avx512_rsqrt28_sd,                     // llvm.x86.avx512.rsqrt28.sd
+    x86_avx512_rsqrt28_ss,                     // llvm.x86.avx512.rsqrt28.ss
+    x86_avx512_scatter_dpd_512,                // llvm.x86.avx512.scatter.dpd.512
+    x86_avx512_scatter_dpi_512,                // llvm.x86.avx512.scatter.dpi.512
+    x86_avx512_scatter_dpq_512,                // llvm.x86.avx512.scatter.dpq.512
+    x86_avx512_scatter_dps_512,                // llvm.x86.avx512.scatter.dps.512
+    x86_avx512_scatter_qpd_512,                // llvm.x86.avx512.scatter.qpd.512
+    x86_avx512_scatter_qpi_512,                // llvm.x86.avx512.scatter.qpi.512
+    x86_avx512_scatter_qpq_512,                // llvm.x86.avx512.scatter.qpq.512
+    x86_avx512_scatter_qps_512,                // llvm.x86.avx512.scatter.qps.512
+    x86_avx512_scatterdiv2_df,                 // llvm.x86.avx512.scatterdiv2.df
+    x86_avx512_scatterdiv2_di,                 // llvm.x86.avx512.scatterdiv2.di
+    x86_avx512_scatterdiv4_df,                 // llvm.x86.avx512.scatterdiv4.df
+    x86_avx512_scatterdiv4_di,                 // llvm.x86.avx512.scatterdiv4.di
+    x86_avx512_scatterdiv4_sf,                 // llvm.x86.avx512.scatterdiv4.sf
+    x86_avx512_scatterdiv4_si,                 // llvm.x86.avx512.scatterdiv4.si
+    x86_avx512_scatterdiv8_sf,                 // llvm.x86.avx512.scatterdiv8.sf
+    x86_avx512_scatterdiv8_si,                 // llvm.x86.avx512.scatterdiv8.si
+    x86_avx512_scatterpf_dpd_512,              // llvm.x86.avx512.scatterpf.dpd.512
+    x86_avx512_scatterpf_dps_512,              // llvm.x86.avx512.scatterpf.dps.512
+    x86_avx512_scatterpf_qpd_512,              // llvm.x86.avx512.scatterpf.qpd.512
+    x86_avx512_scatterpf_qps_512,              // llvm.x86.avx512.scatterpf.qps.512
+    x86_avx512_scattersiv2_df,                 // llvm.x86.avx512.scattersiv2.df
+    x86_avx512_scattersiv2_di,                 // llvm.x86.avx512.scattersiv2.di
+    x86_avx512_scattersiv4_df,                 // llvm.x86.avx512.scattersiv4.df
+    x86_avx512_scattersiv4_di,                 // llvm.x86.avx512.scattersiv4.di
+    x86_avx512_scattersiv4_sf,                 // llvm.x86.avx512.scattersiv4.sf
+    x86_avx512_scattersiv4_si,                 // llvm.x86.avx512.scattersiv4.si
+    x86_avx512_scattersiv8_sf,                 // llvm.x86.avx512.scattersiv8.sf
+    x86_avx512_scattersiv8_si,                 // llvm.x86.avx512.scattersiv8.si
+    x86_avx512_vbroadcast_sd_512,              // llvm.x86.avx512.vbroadcast.sd.512
+    x86_avx512_vbroadcast_ss_512,              // llvm.x86.avx512.vbroadcast.ss.512
+    x86_avx512_vcomi_sd,                       // llvm.x86.avx512.vcomi.sd
+    x86_avx512_vcomi_ss,                       // llvm.x86.avx512.vcomi.ss
+    x86_avx512_vcvtsd2si32,                    // llvm.x86.avx512.vcvtsd2si32
+    x86_avx512_vcvtsd2si64,                    // llvm.x86.avx512.vcvtsd2si64
+    x86_avx512_vcvtsd2usi32,                   // llvm.x86.avx512.vcvtsd2usi32
+    x86_avx512_vcvtsd2usi64,                   // llvm.x86.avx512.vcvtsd2usi64
+    x86_avx512_vcvtss2si32,                    // llvm.x86.avx512.vcvtss2si32
+    x86_avx512_vcvtss2si64,                    // llvm.x86.avx512.vcvtss2si64
+    x86_avx512_vcvtss2usi32,                   // llvm.x86.avx512.vcvtss2usi32
+    x86_avx512_vcvtss2usi64,                   // llvm.x86.avx512.vcvtss2usi64
+    x86_avx512_vpermilvar_pd_512,              // llvm.x86.avx512.vpermilvar.pd.512
+    x86_avx512_vpermilvar_ps_512,              // llvm.x86.avx512.vpermilvar.ps.512
+    x86_bmi_bextr_32,                          // llvm.x86.bmi.bextr.32
+    x86_bmi_bextr_64,                          // llvm.x86.bmi.bextr.64
+    x86_bmi_bzhi_32,                           // llvm.x86.bmi.bzhi.32
+    x86_bmi_bzhi_64,                           // llvm.x86.bmi.bzhi.64
+    x86_bmi_pdep_32,                           // llvm.x86.bmi.pdep.32
+    x86_bmi_pdep_64,                           // llvm.x86.bmi.pdep.64
+    x86_bmi_pext_32,                           // llvm.x86.bmi.pext.32
+    x86_bmi_pext_64,                           // llvm.x86.bmi.pext.64
+    x86_clflushopt,                            // llvm.x86.clflushopt
+    x86_clrssbsy,                              // llvm.x86.clrssbsy
+    x86_clwb,                                  // llvm.x86.clwb
+    x86_clzero,                                // llvm.x86.clzero
+    x86_flags_read_u32,                        // llvm.x86.flags.read.u32
+    x86_flags_read_u64,                        // llvm.x86.flags.read.u64
+    x86_flags_write_u32,                       // llvm.x86.flags.write.u32
+    x86_flags_write_u64,                       // llvm.x86.flags.write.u64
+    x86_fma_vfmadd_pd,                         // llvm.x86.fma.vfmadd.pd
+    x86_fma_vfmadd_pd_256,                     // llvm.x86.fma.vfmadd.pd.256
+    x86_fma_vfmadd_ps,                         // llvm.x86.fma.vfmadd.ps
+    x86_fma_vfmadd_ps_256,                     // llvm.x86.fma.vfmadd.ps.256
+    x86_fma_vfmadd_sd,                         // llvm.x86.fma.vfmadd.sd
+    x86_fma_vfmadd_ss,                         // llvm.x86.fma.vfmadd.ss
+    x86_fma_vfmaddsub_pd,                      // llvm.x86.fma.vfmaddsub.pd
+    x86_fma_vfmaddsub_pd_256,                  // llvm.x86.fma.vfmaddsub.pd.256
+    x86_fma_vfmaddsub_ps,                      // llvm.x86.fma.vfmaddsub.ps
+    x86_fma_vfmaddsub_ps_256,                  // llvm.x86.fma.vfmaddsub.ps.256
+    x86_fma_vfmsub_pd,                         // llvm.x86.fma.vfmsub.pd
+    x86_fma_vfmsub_pd_256,                     // llvm.x86.fma.vfmsub.pd.256
+    x86_fma_vfmsub_ps,                         // llvm.x86.fma.vfmsub.ps
+    x86_fma_vfmsub_ps_256,                     // llvm.x86.fma.vfmsub.ps.256
+    x86_fma_vfmsub_sd,                         // llvm.x86.fma.vfmsub.sd
+    x86_fma_vfmsub_ss,                         // llvm.x86.fma.vfmsub.ss
+    x86_fma_vfmsubadd_pd,                      // llvm.x86.fma.vfmsubadd.pd
+    x86_fma_vfmsubadd_pd_256,                  // llvm.x86.fma.vfmsubadd.pd.256
+    x86_fma_vfmsubadd_ps,                      // llvm.x86.fma.vfmsubadd.ps
+    x86_fma_vfmsubadd_ps_256,                  // llvm.x86.fma.vfmsubadd.ps.256
+    x86_fma_vfnmadd_pd,                        // llvm.x86.fma.vfnmadd.pd
+    x86_fma_vfnmadd_pd_256,                    // llvm.x86.fma.vfnmadd.pd.256
+    x86_fma_vfnmadd_ps,                        // llvm.x86.fma.vfnmadd.ps
+    x86_fma_vfnmadd_ps_256,                    // llvm.x86.fma.vfnmadd.ps.256
+    x86_fma_vfnmadd_sd,                        // llvm.x86.fma.vfnmadd.sd
+    x86_fma_vfnmadd_ss,                        // llvm.x86.fma.vfnmadd.ss
+    x86_fma_vfnmsub_pd,                        // llvm.x86.fma.vfnmsub.pd
+    x86_fma_vfnmsub_pd_256,                    // llvm.x86.fma.vfnmsub.pd.256
+    x86_fma_vfnmsub_ps,                        // llvm.x86.fma.vfnmsub.ps
+    x86_fma_vfnmsub_ps_256,                    // llvm.x86.fma.vfnmsub.ps.256
+    x86_fma_vfnmsub_sd,                        // llvm.x86.fma.vfnmsub.sd
+    x86_fma_vfnmsub_ss,                        // llvm.x86.fma.vfnmsub.ss
+    x86_fma4_vfmadd_sd,                        // llvm.x86.fma4.vfmadd.sd
+    x86_fma4_vfmadd_ss,                        // llvm.x86.fma4.vfmadd.ss
+    x86_fxrstor,                               // llvm.x86.fxrstor
+    x86_fxrstor64,                             // llvm.x86.fxrstor64
+    x86_fxsave,                                // llvm.x86.fxsave
+    x86_fxsave64,                              // llvm.x86.fxsave64
+    x86_incsspd,                               // llvm.x86.incsspd
+    x86_incsspq,                               // llvm.x86.incsspq
+    x86_int,                                   // llvm.x86.int
+    x86_llwpcb,                                // llvm.x86.llwpcb
+    x86_lwpins32,                              // llvm.x86.lwpins32
+    x86_lwpins64,                              // llvm.x86.lwpins64
+    x86_lwpval32,                              // llvm.x86.lwpval32
+    x86_lwpval64,                              // llvm.x86.lwpval64
+    x86_mmx_emms,                              // llvm.x86.mmx.emms
+    x86_mmx_femms,                             // llvm.x86.mmx.femms
+    x86_mmx_maskmovq,                          // llvm.x86.mmx.maskmovq
+    x86_mmx_movnt_dq,                          // llvm.x86.mmx.movnt.dq
+    x86_mmx_packssdw,                          // llvm.x86.mmx.packssdw
+    x86_mmx_packsswb,                          // llvm.x86.mmx.packsswb
+    x86_mmx_packuswb,                          // llvm.x86.mmx.packuswb
+    x86_mmx_padd_b,                            // llvm.x86.mmx.padd.b
+    x86_mmx_padd_d,                            // llvm.x86.mmx.padd.d
+    x86_mmx_padd_q,                            // llvm.x86.mmx.padd.q
+    x86_mmx_padd_w,                            // llvm.x86.mmx.padd.w
+    x86_mmx_padds_b,                           // llvm.x86.mmx.padds.b
+    x86_mmx_padds_w,                           // llvm.x86.mmx.padds.w
+    x86_mmx_paddus_b,                          // llvm.x86.mmx.paddus.b
+    x86_mmx_paddus_w,                          // llvm.x86.mmx.paddus.w
+    x86_mmx_palignr_b,                         // llvm.x86.mmx.palignr.b
+    x86_mmx_pand,                              // llvm.x86.mmx.pand
+    x86_mmx_pandn,                             // llvm.x86.mmx.pandn
+    x86_mmx_pavg_b,                            // llvm.x86.mmx.pavg.b
+    x86_mmx_pavg_w,                            // llvm.x86.mmx.pavg.w
+    x86_mmx_pcmpeq_b,                          // llvm.x86.mmx.pcmpeq.b
+    x86_mmx_pcmpeq_d,                          // llvm.x86.mmx.pcmpeq.d
+    x86_mmx_pcmpeq_w,                          // llvm.x86.mmx.pcmpeq.w
+    x86_mmx_pcmpgt_b,                          // llvm.x86.mmx.pcmpgt.b
+    x86_mmx_pcmpgt_d,                          // llvm.x86.mmx.pcmpgt.d
+    x86_mmx_pcmpgt_w,                          // llvm.x86.mmx.pcmpgt.w
+    x86_mmx_pextr_w,                           // llvm.x86.mmx.pextr.w
+    x86_mmx_pinsr_w,                           // llvm.x86.mmx.pinsr.w
+    x86_mmx_pmadd_wd,                          // llvm.x86.mmx.pmadd.wd
+    x86_mmx_pmaxs_w,                           // llvm.x86.mmx.pmaxs.w
+    x86_mmx_pmaxu_b,                           // llvm.x86.mmx.pmaxu.b
+    x86_mmx_pmins_w,                           // llvm.x86.mmx.pmins.w
+    x86_mmx_pminu_b,                           // llvm.x86.mmx.pminu.b
+    x86_mmx_pmovmskb,                          // llvm.x86.mmx.pmovmskb
+    x86_mmx_pmulh_w,                           // llvm.x86.mmx.pmulh.w
+    x86_mmx_pmulhu_w,                          // llvm.x86.mmx.pmulhu.w
+    x86_mmx_pmull_w,                           // llvm.x86.mmx.pmull.w
+    x86_mmx_pmulu_dq,                          // llvm.x86.mmx.pmulu.dq
+    x86_mmx_por,                               // llvm.x86.mmx.por
+    x86_mmx_psad_bw,                           // llvm.x86.mmx.psad.bw
+    x86_mmx_psll_d,                            // llvm.x86.mmx.psll.d
+    x86_mmx_psll_q,                            // llvm.x86.mmx.psll.q
+    x86_mmx_psll_w,                            // llvm.x86.mmx.psll.w
+    x86_mmx_pslli_d,                           // llvm.x86.mmx.pslli.d
+    x86_mmx_pslli_q,                           // llvm.x86.mmx.pslli.q
+    x86_mmx_pslli_w,                           // llvm.x86.mmx.pslli.w
+    x86_mmx_psra_d,                            // llvm.x86.mmx.psra.d
+    x86_mmx_psra_w,                            // llvm.x86.mmx.psra.w
+    x86_mmx_psrai_d,                           // llvm.x86.mmx.psrai.d
+    x86_mmx_psrai_w,                           // llvm.x86.mmx.psrai.w
+    x86_mmx_psrl_d,                            // llvm.x86.mmx.psrl.d
+    x86_mmx_psrl_q,                            // llvm.x86.mmx.psrl.q
+    x86_mmx_psrl_w,                            // llvm.x86.mmx.psrl.w
+    x86_mmx_psrli_d,                           // llvm.x86.mmx.psrli.d
+    x86_mmx_psrli_q,                           // llvm.x86.mmx.psrli.q
+    x86_mmx_psrli_w,                           // llvm.x86.mmx.psrli.w
+    x86_mmx_psub_b,                            // llvm.x86.mmx.psub.b
+    x86_mmx_psub_d,                            // llvm.x86.mmx.psub.d
+    x86_mmx_psub_q,                            // llvm.x86.mmx.psub.q
+    x86_mmx_psub_w,                            // llvm.x86.mmx.psub.w
+    x86_mmx_psubs_b,                           // llvm.x86.mmx.psubs.b
+    x86_mmx_psubs_w,                           // llvm.x86.mmx.psubs.w
+    x86_mmx_psubus_b,                          // llvm.x86.mmx.psubus.b
+    x86_mmx_psubus_w,                          // llvm.x86.mmx.psubus.w
+    x86_mmx_punpckhbw,                         // llvm.x86.mmx.punpckhbw
+    x86_mmx_punpckhdq,                         // llvm.x86.mmx.punpckhdq
+    x86_mmx_punpckhwd,                         // llvm.x86.mmx.punpckhwd
+    x86_mmx_punpcklbw,                         // llvm.x86.mmx.punpcklbw
+    x86_mmx_punpckldq,                         // llvm.x86.mmx.punpckldq
+    x86_mmx_punpcklwd,                         // llvm.x86.mmx.punpcklwd
+    x86_mmx_pxor,                              // llvm.x86.mmx.pxor
+    x86_monitorx,                              // llvm.x86.monitorx
+    x86_mwaitx,                                // llvm.x86.mwaitx
+    x86_pclmulqdq,                             // llvm.x86.pclmulqdq
+    x86_pclmulqdq_256,                         // llvm.x86.pclmulqdq.256
+    x86_pclmulqdq_512,                         // llvm.x86.pclmulqdq.512
+    x86_rdfsbase_32,                           // llvm.x86.rdfsbase.32
+    x86_rdfsbase_64,                           // llvm.x86.rdfsbase.64
+    x86_rdgsbase_32,                           // llvm.x86.rdgsbase.32
+    x86_rdgsbase_64,                           // llvm.x86.rdgsbase.64
+    x86_rdpid,                                 // llvm.x86.rdpid
+    x86_rdpkru,                                // llvm.x86.rdpkru
+    x86_rdpmc,                                 // llvm.x86.rdpmc
+    x86_rdrand_16,                             // llvm.x86.rdrand.16
+    x86_rdrand_32,                             // llvm.x86.rdrand.32
+    x86_rdrand_64,                             // llvm.x86.rdrand.64
+    x86_rdseed_16,                             // llvm.x86.rdseed.16
+    x86_rdseed_32,                             // llvm.x86.rdseed.32
+    x86_rdseed_64,                             // llvm.x86.rdseed.64
+    x86_rdsspd,                                // llvm.x86.rdsspd
+    x86_rdsspq,                                // llvm.x86.rdsspq
+    x86_rdtsc,                                 // llvm.x86.rdtsc
+    x86_rdtscp,                                // llvm.x86.rdtscp
+    x86_rstorssp,                              // llvm.x86.rstorssp
+    x86_saveprevssp,                           // llvm.x86.saveprevssp
+    x86_seh_ehguard,                           // llvm.x86.seh.ehguard
+    x86_seh_ehregnode,                         // llvm.x86.seh.ehregnode
+    x86_seh_lsda,                              // llvm.x86.seh.lsda
+    x86_seh_recoverfp,                         // llvm.x86.seh.recoverfp
+    x86_setssbsy,                              // llvm.x86.setssbsy
+    x86_sha1msg1,                              // llvm.x86.sha1msg1
+    x86_sha1msg2,                              // llvm.x86.sha1msg2
+    x86_sha1nexte,                             // llvm.x86.sha1nexte
+    x86_sha1rnds4,                             // llvm.x86.sha1rnds4
+    x86_sha256msg1,                            // llvm.x86.sha256msg1
+    x86_sha256msg2,                            // llvm.x86.sha256msg2
+    x86_sha256rnds2,                           // llvm.x86.sha256rnds2
+    x86_slwpcb,                                // llvm.x86.slwpcb
+    x86_sse_cmp_ps,                            // llvm.x86.sse.cmp.ps
+    x86_sse_cmp_ss,                            // llvm.x86.sse.cmp.ss
+    x86_sse_comieq_ss,                         // llvm.x86.sse.comieq.ss
+    x86_sse_comige_ss,                         // llvm.x86.sse.comige.ss
+    x86_sse_comigt_ss,                         // llvm.x86.sse.comigt.ss
+    x86_sse_comile_ss,                         // llvm.x86.sse.comile.ss
+    x86_sse_comilt_ss,                         // llvm.x86.sse.comilt.ss
+    x86_sse_comineq_ss,                        // llvm.x86.sse.comineq.ss
+    x86_sse_cvtpd2pi,                          // llvm.x86.sse.cvtpd2pi
+    x86_sse_cvtpi2pd,                          // llvm.x86.sse.cvtpi2pd
+    x86_sse_cvtpi2ps,                          // llvm.x86.sse.cvtpi2ps
+    x86_sse_cvtps2pi,                          // llvm.x86.sse.cvtps2pi
+    x86_sse_cvtsi2ss,                          // llvm.x86.sse.cvtsi2ss
+    x86_sse_cvtsi642ss,                        // llvm.x86.sse.cvtsi642ss
+    x86_sse_cvtss2si,                          // llvm.x86.sse.cvtss2si
+    x86_sse_cvtss2si64,                        // llvm.x86.sse.cvtss2si64
+    x86_sse_cvttpd2pi,                         // llvm.x86.sse.cvttpd2pi
+    x86_sse_cvttps2pi,                         // llvm.x86.sse.cvttps2pi
+    x86_sse_cvttss2si,                         // llvm.x86.sse.cvttss2si
+    x86_sse_cvttss2si64,                       // llvm.x86.sse.cvttss2si64
+    x86_sse_ldmxcsr,                           // llvm.x86.sse.ldmxcsr
+    x86_sse_max_ps,                            // llvm.x86.sse.max.ps
+    x86_sse_max_ss,                            // llvm.x86.sse.max.ss
+    x86_sse_min_ps,                            // llvm.x86.sse.min.ps
+    x86_sse_min_ss,                            // llvm.x86.sse.min.ss
+    x86_sse_movmsk_ps,                         // llvm.x86.sse.movmsk.ps
+    x86_sse_pshuf_w,                           // llvm.x86.sse.pshuf.w
+    x86_sse_rcp_ps,                            // llvm.x86.sse.rcp.ps
+    x86_sse_rcp_ss,                            // llvm.x86.sse.rcp.ss
+    x86_sse_rsqrt_ps,                          // llvm.x86.sse.rsqrt.ps
+    x86_sse_rsqrt_ss,                          // llvm.x86.sse.rsqrt.ss
+    x86_sse_sfence,                            // llvm.x86.sse.sfence
+    x86_sse_sqrt_ps,                           // llvm.x86.sse.sqrt.ps
+    x86_sse_sqrt_ss,                           // llvm.x86.sse.sqrt.ss
+    x86_sse_stmxcsr,                           // llvm.x86.sse.stmxcsr
+    x86_sse_ucomieq_ss,                        // llvm.x86.sse.ucomieq.ss
+    x86_sse_ucomige_ss,                        // llvm.x86.sse.ucomige.ss
+    x86_sse_ucomigt_ss,                        // llvm.x86.sse.ucomigt.ss
+    x86_sse_ucomile_ss,                        // llvm.x86.sse.ucomile.ss
+    x86_sse_ucomilt_ss,                        // llvm.x86.sse.ucomilt.ss
+    x86_sse_ucomineq_ss,                       // llvm.x86.sse.ucomineq.ss
+    x86_sse2_clflush,                          // llvm.x86.sse2.clflush
+    x86_sse2_cmp_pd,                           // llvm.x86.sse2.cmp.pd
+    x86_sse2_cmp_sd,                           // llvm.x86.sse2.cmp.sd
+    x86_sse2_comieq_sd,                        // llvm.x86.sse2.comieq.sd
+    x86_sse2_comige_sd,                        // llvm.x86.sse2.comige.sd
+    x86_sse2_comigt_sd,                        // llvm.x86.sse2.comigt.sd
+    x86_sse2_comile_sd,                        // llvm.x86.sse2.comile.sd
+    x86_sse2_comilt_sd,                        // llvm.x86.sse2.comilt.sd
+    x86_sse2_comineq_sd,                       // llvm.x86.sse2.comineq.sd
+    x86_sse2_cvtdq2ps,                         // llvm.x86.sse2.cvtdq2ps
+    x86_sse2_cvtpd2dq,                         // llvm.x86.sse2.cvtpd2dq
+    x86_sse2_cvtpd2ps,                         // llvm.x86.sse2.cvtpd2ps
+    x86_sse2_cvtps2dq,                         // llvm.x86.sse2.cvtps2dq
+    x86_sse2_cvtsd2si,                         // llvm.x86.sse2.cvtsd2si
+    x86_sse2_cvtsd2si64,                       // llvm.x86.sse2.cvtsd2si64
+    x86_sse2_cvtsd2ss,                         // llvm.x86.sse2.cvtsd2ss
+    x86_sse2_cvtsi2sd,                         // llvm.x86.sse2.cvtsi2sd
+    x86_sse2_cvtsi642sd,                       // llvm.x86.sse2.cvtsi642sd
+    x86_sse2_cvtss2sd,                         // llvm.x86.sse2.cvtss2sd
+    x86_sse2_cvttpd2dq,                        // llvm.x86.sse2.cvttpd2dq
+    x86_sse2_cvttps2dq,                        // llvm.x86.sse2.cvttps2dq
+    x86_sse2_cvttsd2si,                        // llvm.x86.sse2.cvttsd2si
+    x86_sse2_cvttsd2si64,                      // llvm.x86.sse2.cvttsd2si64
+    x86_sse2_lfence,                           // llvm.x86.sse2.lfence
+    x86_sse2_maskmov_dqu,                      // llvm.x86.sse2.maskmov.dqu
+    x86_sse2_max_pd,                           // llvm.x86.sse2.max.pd
+    x86_sse2_max_sd,                           // llvm.x86.sse2.max.sd
+    x86_sse2_mfence,                           // llvm.x86.sse2.mfence
+    x86_sse2_min_pd,                           // llvm.x86.sse2.min.pd
+    x86_sse2_min_sd,                           // llvm.x86.sse2.min.sd
+    x86_sse2_movmsk_pd,                        // llvm.x86.sse2.movmsk.pd
+    x86_sse2_packssdw_128,                     // llvm.x86.sse2.packssdw.128
+    x86_sse2_packsswb_128,                     // llvm.x86.sse2.packsswb.128
+    x86_sse2_packuswb_128,                     // llvm.x86.sse2.packuswb.128
+    x86_sse2_padds_b,                          // llvm.x86.sse2.padds.b
+    x86_sse2_padds_w,                          // llvm.x86.sse2.padds.w
+    x86_sse2_paddus_b,                         // llvm.x86.sse2.paddus.b
+    x86_sse2_paddus_w,                         // llvm.x86.sse2.paddus.w
+    x86_sse2_pause,                            // llvm.x86.sse2.pause
+    x86_sse2_pmadd_wd,                         // llvm.x86.sse2.pmadd.wd
+    x86_sse2_pmovmskb_128,                     // llvm.x86.sse2.pmovmskb.128
+    x86_sse2_pmulh_w,                          // llvm.x86.sse2.pmulh.w
+    x86_sse2_pmulhu_w,                         // llvm.x86.sse2.pmulhu.w
+    x86_sse2_pmulu_dq,                         // llvm.x86.sse2.pmulu.dq
+    x86_sse2_psad_bw,                          // llvm.x86.sse2.psad.bw
+    x86_sse2_psll_d,                           // llvm.x86.sse2.psll.d
+    x86_sse2_psll_q,                           // llvm.x86.sse2.psll.q
+    x86_sse2_psll_w,                           // llvm.x86.sse2.psll.w
+    x86_sse2_pslli_d,                          // llvm.x86.sse2.pslli.d
+    x86_sse2_pslli_q,                          // llvm.x86.sse2.pslli.q
+    x86_sse2_pslli_w,                          // llvm.x86.sse2.pslli.w
+    x86_sse2_psra_d,                           // llvm.x86.sse2.psra.d
+    x86_sse2_psra_w,                           // llvm.x86.sse2.psra.w
+    x86_sse2_psrai_d,                          // llvm.x86.sse2.psrai.d
+    x86_sse2_psrai_w,                          // llvm.x86.sse2.psrai.w
+    x86_sse2_psrl_d,                           // llvm.x86.sse2.psrl.d
+    x86_sse2_psrl_q,                           // llvm.x86.sse2.psrl.q
+    x86_sse2_psrl_w,                           // llvm.x86.sse2.psrl.w
+    x86_sse2_psrli_d,                          // llvm.x86.sse2.psrli.d
+    x86_sse2_psrli_q,                          // llvm.x86.sse2.psrli.q
+    x86_sse2_psrli_w,                          // llvm.x86.sse2.psrli.w
+    x86_sse2_psubs_b,                          // llvm.x86.sse2.psubs.b
+    x86_sse2_psubs_w,                          // llvm.x86.sse2.psubs.w
+    x86_sse2_psubus_b,                         // llvm.x86.sse2.psubus.b
+    x86_sse2_psubus_w,                         // llvm.x86.sse2.psubus.w
+    x86_sse2_sqrt_pd,                          // llvm.x86.sse2.sqrt.pd
+    x86_sse2_sqrt_sd,                          // llvm.x86.sse2.sqrt.sd
+    x86_sse2_ucomieq_sd,                       // llvm.x86.sse2.ucomieq.sd
+    x86_sse2_ucomige_sd,                       // llvm.x86.sse2.ucomige.sd
+    x86_sse2_ucomigt_sd,                       // llvm.x86.sse2.ucomigt.sd
+    x86_sse2_ucomile_sd,                       // llvm.x86.sse2.ucomile.sd
+    x86_sse2_ucomilt_sd,                       // llvm.x86.sse2.ucomilt.sd
+    x86_sse2_ucomineq_sd,                      // llvm.x86.sse2.ucomineq.sd
+    x86_sse3_addsub_pd,                        // llvm.x86.sse3.addsub.pd
+    x86_sse3_addsub_ps,                        // llvm.x86.sse3.addsub.ps
+    x86_sse3_hadd_pd,                          // llvm.x86.sse3.hadd.pd
+    x86_sse3_hadd_ps,                          // llvm.x86.sse3.hadd.ps
+    x86_sse3_hsub_pd,                          // llvm.x86.sse3.hsub.pd
+    x86_sse3_hsub_ps,                          // llvm.x86.sse3.hsub.ps
+    x86_sse3_ldu_dq,                           // llvm.x86.sse3.ldu.dq
+    x86_sse3_monitor,                          // llvm.x86.sse3.monitor
+    x86_sse3_mwait,                            // llvm.x86.sse3.mwait
+    x86_sse41_blendvpd,                        // llvm.x86.sse41.blendvpd
+    x86_sse41_blendvps,                        // llvm.x86.sse41.blendvps
+    x86_sse41_dppd,                            // llvm.x86.sse41.dppd
+    x86_sse41_dpps,                            // llvm.x86.sse41.dpps
+    x86_sse41_insertps,                        // llvm.x86.sse41.insertps
+    x86_sse41_mpsadbw,                         // llvm.x86.sse41.mpsadbw
+    x86_sse41_packusdw,                        // llvm.x86.sse41.packusdw
+    x86_sse41_pblendvb,                        // llvm.x86.sse41.pblendvb
+    x86_sse41_phminposuw,                      // llvm.x86.sse41.phminposuw
+    x86_sse41_pmuldq,                          // llvm.x86.sse41.pmuldq
+    x86_sse41_ptestc,                          // llvm.x86.sse41.ptestc
+    x86_sse41_ptestnzc,                        // llvm.x86.sse41.ptestnzc
+    x86_sse41_ptestz,                          // llvm.x86.sse41.ptestz
+    x86_sse41_round_pd,                        // llvm.x86.sse41.round.pd
+    x86_sse41_round_ps,                        // llvm.x86.sse41.round.ps
+    x86_sse41_round_sd,                        // llvm.x86.sse41.round.sd
+    x86_sse41_round_ss,                        // llvm.x86.sse41.round.ss
+    x86_sse42_crc32_32_16,                     // llvm.x86.sse42.crc32.32.16
+    x86_sse42_crc32_32_32,                     // llvm.x86.sse42.crc32.32.32
+    x86_sse42_crc32_32_8,                      // llvm.x86.sse42.crc32.32.8
+    x86_sse42_crc32_64_64,                     // llvm.x86.sse42.crc32.64.64
+    x86_sse42_pcmpestri128,                    // llvm.x86.sse42.pcmpestri128
+    x86_sse42_pcmpestria128,                   // llvm.x86.sse42.pcmpestria128
+    x86_sse42_pcmpestric128,                   // llvm.x86.sse42.pcmpestric128
+    x86_sse42_pcmpestrio128,                   // llvm.x86.sse42.pcmpestrio128
+    x86_sse42_pcmpestris128,                   // llvm.x86.sse42.pcmpestris128
+    x86_sse42_pcmpestriz128,                   // llvm.x86.sse42.pcmpestriz128
+    x86_sse42_pcmpestrm128,                    // llvm.x86.sse42.pcmpestrm128
+    x86_sse42_pcmpistri128,                    // llvm.x86.sse42.pcmpistri128
+    x86_sse42_pcmpistria128,                   // llvm.x86.sse42.pcmpistria128
+    x86_sse42_pcmpistric128,                   // llvm.x86.sse42.pcmpistric128
+    x86_sse42_pcmpistrio128,                   // llvm.x86.sse42.pcmpistrio128
+    x86_sse42_pcmpistris128,                   // llvm.x86.sse42.pcmpistris128
+    x86_sse42_pcmpistriz128,                   // llvm.x86.sse42.pcmpistriz128
+    x86_sse42_pcmpistrm128,                    // llvm.x86.sse42.pcmpistrm128
+    x86_sse4a_extrq,                           // llvm.x86.sse4a.extrq
+    x86_sse4a_extrqi,                          // llvm.x86.sse4a.extrqi
+    x86_sse4a_insertq,                         // llvm.x86.sse4a.insertq
+    x86_sse4a_insertqi,                        // llvm.x86.sse4a.insertqi
+    x86_ssse3_pabs_b,                          // llvm.x86.ssse3.pabs.b
+    x86_ssse3_pabs_d,                          // llvm.x86.ssse3.pabs.d
+    x86_ssse3_pabs_w,                          // llvm.x86.ssse3.pabs.w
+    x86_ssse3_phadd_d,                         // llvm.x86.ssse3.phadd.d
+    x86_ssse3_phadd_d_128,                     // llvm.x86.ssse3.phadd.d.128
+    x86_ssse3_phadd_sw,                        // llvm.x86.ssse3.phadd.sw
+    x86_ssse3_phadd_sw_128,                    // llvm.x86.ssse3.phadd.sw.128
+    x86_ssse3_phadd_w,                         // llvm.x86.ssse3.phadd.w
+    x86_ssse3_phadd_w_128,                     // llvm.x86.ssse3.phadd.w.128
+    x86_ssse3_phsub_d,                         // llvm.x86.ssse3.phsub.d
+    x86_ssse3_phsub_d_128,                     // llvm.x86.ssse3.phsub.d.128
+    x86_ssse3_phsub_sw,                        // llvm.x86.ssse3.phsub.sw
+    x86_ssse3_phsub_sw_128,                    // llvm.x86.ssse3.phsub.sw.128
+    x86_ssse3_phsub_w,                         // llvm.x86.ssse3.phsub.w
+    x86_ssse3_phsub_w_128,                     // llvm.x86.ssse3.phsub.w.128
+    x86_ssse3_pmadd_ub_sw,                     // llvm.x86.ssse3.pmadd.ub.sw
+    x86_ssse3_pmadd_ub_sw_128,                 // llvm.x86.ssse3.pmadd.ub.sw.128
+    x86_ssse3_pmul_hr_sw,                      // llvm.x86.ssse3.pmul.hr.sw
+    x86_ssse3_pmul_hr_sw_128,                  // llvm.x86.ssse3.pmul.hr.sw.128
+    x86_ssse3_pshuf_b,                         // llvm.x86.ssse3.pshuf.b
+    x86_ssse3_pshuf_b_128,                     // llvm.x86.ssse3.pshuf.b.128
+    x86_ssse3_psign_b,                         // llvm.x86.ssse3.psign.b
+    x86_ssse3_psign_b_128,                     // llvm.x86.ssse3.psign.b.128
+    x86_ssse3_psign_d,                         // llvm.x86.ssse3.psign.d
+    x86_ssse3_psign_d_128,                     // llvm.x86.ssse3.psign.d.128
+    x86_ssse3_psign_w,                         // llvm.x86.ssse3.psign.w
+    x86_ssse3_psign_w_128,                     // llvm.x86.ssse3.psign.w.128
+    x86_subborrow_u32,                         // llvm.x86.subborrow.u32
+    x86_subborrow_u64,                         // llvm.x86.subborrow.u64
+    x86_tbm_bextri_u32,                        // llvm.x86.tbm.bextri.u32
+    x86_tbm_bextri_u64,                        // llvm.x86.tbm.bextri.u64
+    x86_vcvtph2ps_128,                         // llvm.x86.vcvtph2ps.128
+    x86_vcvtph2ps_256,                         // llvm.x86.vcvtph2ps.256
+    x86_vcvtps2ph_128,                         // llvm.x86.vcvtps2ph.128
+    x86_vcvtps2ph_256,                         // llvm.x86.vcvtps2ph.256
+    x86_vgf2p8affineinvqb_128,                 // llvm.x86.vgf2p8affineinvqb.128
+    x86_vgf2p8affineinvqb_256,                 // llvm.x86.vgf2p8affineinvqb.256
+    x86_vgf2p8affineinvqb_512,                 // llvm.x86.vgf2p8affineinvqb.512
+    x86_vgf2p8affineqb_128,                    // llvm.x86.vgf2p8affineqb.128
+    x86_vgf2p8affineqb_256,                    // llvm.x86.vgf2p8affineqb.256
+    x86_vgf2p8affineqb_512,                    // llvm.x86.vgf2p8affineqb.512
+    x86_vgf2p8mulb_128,                        // llvm.x86.vgf2p8mulb.128
+    x86_vgf2p8mulb_256,                        // llvm.x86.vgf2p8mulb.256
+    x86_vgf2p8mulb_512,                        // llvm.x86.vgf2p8mulb.512
+    x86_wrfsbase_32,                           // llvm.x86.wrfsbase.32
+    x86_wrfsbase_64,                           // llvm.x86.wrfsbase.64
+    x86_wrgsbase_32,                           // llvm.x86.wrgsbase.32
+    x86_wrgsbase_64,                           // llvm.x86.wrgsbase.64
+    x86_wrpkru,                                // llvm.x86.wrpkru
+    x86_wrssd,                                 // llvm.x86.wrssd
+    x86_wrssq,                                 // llvm.x86.wrssq
+    x86_wrussd,                                // llvm.x86.wrussd
+    x86_wrussq,                                // llvm.x86.wrussq
+    x86_xabort,                                // llvm.x86.xabort
+    x86_xbegin,                                // llvm.x86.xbegin
+    x86_xend,                                  // llvm.x86.xend
+    x86_xgetbv,                                // llvm.x86.xgetbv
+    x86_xop_vfrcz_pd,                          // llvm.x86.xop.vfrcz.pd
+    x86_xop_vfrcz_pd_256,                      // llvm.x86.xop.vfrcz.pd.256
+    x86_xop_vfrcz_ps,                          // llvm.x86.xop.vfrcz.ps
+    x86_xop_vfrcz_ps_256,                      // llvm.x86.xop.vfrcz.ps.256
+    x86_xop_vfrcz_sd,                          // llvm.x86.xop.vfrcz.sd
+    x86_xop_vfrcz_ss,                          // llvm.x86.xop.vfrcz.ss
+    x86_xop_vpcomb,                            // llvm.x86.xop.vpcomb
+    x86_xop_vpcomd,                            // llvm.x86.xop.vpcomd
+    x86_xop_vpcomq,                            // llvm.x86.xop.vpcomq
+    x86_xop_vpcomub,                           // llvm.x86.xop.vpcomub
+    x86_xop_vpcomud,                           // llvm.x86.xop.vpcomud
+    x86_xop_vpcomuq,                           // llvm.x86.xop.vpcomuq
+    x86_xop_vpcomuw,                           // llvm.x86.xop.vpcomuw
+    x86_xop_vpcomw,                            // llvm.x86.xop.vpcomw
+    x86_xop_vpermil2pd,                        // llvm.x86.xop.vpermil2pd
+    x86_xop_vpermil2pd_256,                    // llvm.x86.xop.vpermil2pd.256
+    x86_xop_vpermil2ps,                        // llvm.x86.xop.vpermil2ps
+    x86_xop_vpermil2ps_256,                    // llvm.x86.xop.vpermil2ps.256
+    x86_xop_vphaddbd,                          // llvm.x86.xop.vphaddbd
+    x86_xop_vphaddbq,                          // llvm.x86.xop.vphaddbq
+    x86_xop_vphaddbw,                          // llvm.x86.xop.vphaddbw
+    x86_xop_vphadddq,                          // llvm.x86.xop.vphadddq
+    x86_xop_vphaddubd,                         // llvm.x86.xop.vphaddubd
+    x86_xop_vphaddubq,                         // llvm.x86.xop.vphaddubq
+    x86_xop_vphaddubw,                         // llvm.x86.xop.vphaddubw
+    x86_xop_vphaddudq,                         // llvm.x86.xop.vphaddudq
+    x86_xop_vphadduwd,                         // llvm.x86.xop.vphadduwd
+    x86_xop_vphadduwq,                         // llvm.x86.xop.vphadduwq
+    x86_xop_vphaddwd,                          // llvm.x86.xop.vphaddwd
+    x86_xop_vphaddwq,                          // llvm.x86.xop.vphaddwq
+    x86_xop_vphsubbw,                          // llvm.x86.xop.vphsubbw
+    x86_xop_vphsubdq,                          // llvm.x86.xop.vphsubdq
+    x86_xop_vphsubwd,                          // llvm.x86.xop.vphsubwd
+    x86_xop_vpmacsdd,                          // llvm.x86.xop.vpmacsdd
+    x86_xop_vpmacsdqh,                         // llvm.x86.xop.vpmacsdqh
+    x86_xop_vpmacsdql,                         // llvm.x86.xop.vpmacsdql
+    x86_xop_vpmacssdd,                         // llvm.x86.xop.vpmacssdd
+    x86_xop_vpmacssdqh,                        // llvm.x86.xop.vpmacssdqh
+    x86_xop_vpmacssdql,                        // llvm.x86.xop.vpmacssdql
+    x86_xop_vpmacsswd,                         // llvm.x86.xop.vpmacsswd
+    x86_xop_vpmacssww,                         // llvm.x86.xop.vpmacssww
+    x86_xop_vpmacswd,                          // llvm.x86.xop.vpmacswd
+    x86_xop_vpmacsww,                          // llvm.x86.xop.vpmacsww
+    x86_xop_vpmadcsswd,                        // llvm.x86.xop.vpmadcsswd
+    x86_xop_vpmadcswd,                         // llvm.x86.xop.vpmadcswd
+    x86_xop_vpperm,                            // llvm.x86.xop.vpperm
+    x86_xop_vprotb,                            // llvm.x86.xop.vprotb
+    x86_xop_vprotbi,                           // llvm.x86.xop.vprotbi
+    x86_xop_vprotd,                            // llvm.x86.xop.vprotd
+    x86_xop_vprotdi,                           // llvm.x86.xop.vprotdi
+    x86_xop_vprotq,                            // llvm.x86.xop.vprotq
+    x86_xop_vprotqi,                           // llvm.x86.xop.vprotqi
+    x86_xop_vprotw,                            // llvm.x86.xop.vprotw
+    x86_xop_vprotwi,                           // llvm.x86.xop.vprotwi
+    x86_xop_vpshab,                            // llvm.x86.xop.vpshab
+    x86_xop_vpshad,                            // llvm.x86.xop.vpshad
+    x86_xop_vpshaq,                            // llvm.x86.xop.vpshaq
+    x86_xop_vpshaw,                            // llvm.x86.xop.vpshaw
+    x86_xop_vpshlb,                            // llvm.x86.xop.vpshlb
+    x86_xop_vpshld,                            // llvm.x86.xop.vpshld
+    x86_xop_vpshlq,                            // llvm.x86.xop.vpshlq
+    x86_xop_vpshlw,                            // llvm.x86.xop.vpshlw
+    x86_xrstor,                                // llvm.x86.xrstor
+    x86_xrstor64,                              // llvm.x86.xrstor64
+    x86_xrstors,                               // llvm.x86.xrstors
+    x86_xrstors64,                             // llvm.x86.xrstors64
+    x86_xsave,                                 // llvm.x86.xsave
+    x86_xsave64,                               // llvm.x86.xsave64
+    x86_xsavec,                                // llvm.x86.xsavec
+    x86_xsavec64,                              // llvm.x86.xsavec64
+    x86_xsaveopt,                              // llvm.x86.xsaveopt
+    x86_xsaveopt64,                            // llvm.x86.xsaveopt64
+    x86_xsaves,                                // llvm.x86.xsaves
+    x86_xsaves64,                              // llvm.x86.xsaves64
+    x86_xsetbv,                                // llvm.x86.xsetbv
+    x86_xtest,                                 // llvm.x86.xtest
+    xcore_bitrev,                              // llvm.xcore.bitrev
+    xcore_checkevent,                          // llvm.xcore.checkevent
+    xcore_chkct,                               // llvm.xcore.chkct
+    xcore_clre,                                // llvm.xcore.clre
+    xcore_clrpt,                               // llvm.xcore.clrpt
+    xcore_clrsr,                               // llvm.xcore.clrsr
+    xcore_crc32,                               // llvm.xcore.crc32
+    xcore_crc8,                                // llvm.xcore.crc8
+    xcore_edu,                                 // llvm.xcore.edu
+    xcore_eeu,                                 // llvm.xcore.eeu
+    xcore_endin,                               // llvm.xcore.endin
+    xcore_freer,                               // llvm.xcore.freer
+    xcore_geted,                               // llvm.xcore.geted
+    xcore_getet,                               // llvm.xcore.getet
+    xcore_getid,                               // llvm.xcore.getid
+    xcore_getps,                               // llvm.xcore.getps
+    xcore_getr,                                // llvm.xcore.getr
+    xcore_getst,                               // llvm.xcore.getst
+    xcore_getts,                               // llvm.xcore.getts
+    xcore_in,                                  // llvm.xcore.in
+    xcore_inct,                                // llvm.xcore.inct
+    xcore_initcp,                              // llvm.xcore.initcp
+    xcore_initdp,                              // llvm.xcore.initdp
+    xcore_initlr,                              // llvm.xcore.initlr
+    xcore_initpc,                              // llvm.xcore.initpc
+    xcore_initsp,                              // llvm.xcore.initsp
+    xcore_inshr,                               // llvm.xcore.inshr
+    xcore_int,                                 // llvm.xcore.int
+    xcore_mjoin,                               // llvm.xcore.mjoin
+    xcore_msync,                               // llvm.xcore.msync
+    xcore_out,                                 // llvm.xcore.out
+    xcore_outct,                               // llvm.xcore.outct
+    xcore_outshr,                              // llvm.xcore.outshr
+    xcore_outt,                                // llvm.xcore.outt
+    xcore_peek,                                // llvm.xcore.peek
+    xcore_setc,                                // llvm.xcore.setc
+    xcore_setclk,                              // llvm.xcore.setclk
+    xcore_setd,                                // llvm.xcore.setd
+    xcore_setev,                               // llvm.xcore.setev
+    xcore_setps,                               // llvm.xcore.setps
+    xcore_setpsc,                              // llvm.xcore.setpsc
+    xcore_setpt,                               // llvm.xcore.setpt
+    xcore_setrdy,                              // llvm.xcore.setrdy
+    xcore_setsr,                               // llvm.xcore.setsr
+    xcore_settw,                               // llvm.xcore.settw
+    xcore_setv,                                // llvm.xcore.setv
+    xcore_sext,                                // llvm.xcore.sext
+    xcore_ssync,                               // llvm.xcore.ssync
+    xcore_syncr,                               // llvm.xcore.syncr
+    xcore_testct,                              // llvm.xcore.testct
+    xcore_testwct,                             // llvm.xcore.testwct
+    xcore_waitevent,                           // llvm.xcore.waitevent
+    xcore_zext                                 // llvm.xcore.zext
+#endif
+
+// Target mapping
+#ifdef GET_INTRINSIC_TARGET_DATA
+struct IntrinsicTargetInfo {
+  llvm::StringLiteral Name;
+  size_t Offset;
+  size_t Count;
+};
+static constexpr IntrinsicTargetInfo TargetInfos[] = {
+  {llvm::StringLiteral(""), 0, 174},
+  {llvm::StringLiteral("aarch64"), 174, 189},
+  {llvm::StringLiteral("amdgcn"), 363, 213},
+  {llvm::StringLiteral("arm"), 576, 240},
+  {llvm::StringLiteral("bpf"), 816, 4},
+  {llvm::StringLiteral("hexagon"), 820, 1708},
+  {llvm::StringLiteral("mips"), 2528, 667},
+  {llvm::StringLiteral("nvvm"), 3195, 1005},
+  {llvm::StringLiteral("ppc"), 4200, 396},
+  {llvm::StringLiteral("r600"), 4596, 21},
+  {llvm::StringLiteral("s390"), 4617, 220},
+  {llvm::StringLiteral("wasm"), 4837, 8},
+  {llvm::StringLiteral("x86"), 4845, 1506},
+  {llvm::StringLiteral("xcore"), 6351, 53},
+};
+#endif
+
+// Intrinsic ID to name table
+#ifdef GET_INTRINSIC_NAME_TABLE
+  // Note that entry #0 is the invalid intrinsic!
+  "llvm.addressofreturnaddress",
+  "llvm.adjust.trampoline",
+  "llvm.annotation",
+  "llvm.assume",
+  "llvm.bitreverse",
+  "llvm.bswap",
+  "llvm.canonicalize",
+  "llvm.ceil",
+  "llvm.clear_cache",
+  "llvm.codeview.annotation",
+  "llvm.convert.from.fp16",
+  "llvm.convert.to.fp16",
+  "llvm.copysign",
+  "llvm.coro.alloc",
+  "llvm.coro.begin",
+  "llvm.coro.destroy",
+  "llvm.coro.done",
+  "llvm.coro.end",
+  "llvm.coro.frame",
+  "llvm.coro.free",
+  "llvm.coro.id",
+  "llvm.coro.param",
+  "llvm.coro.promise",
+  "llvm.coro.resume",
+  "llvm.coro.save",
+  "llvm.coro.size",
+  "llvm.coro.subfn.addr",
+  "llvm.coro.suspend",
+  "llvm.cos",
+  "llvm.ctlz",
+  "llvm.ctpop",
+  "llvm.cttz",
+  "llvm.dbg.addr",
+  "llvm.dbg.declare",
+  "llvm.dbg.value",
+  "llvm.debugtrap",
+  "llvm.donothing",
+  "llvm.eh.dwarf.cfa",
+  "llvm.eh.exceptioncode",
+  "llvm.eh.exceptionpointer",
+  "llvm.eh.return.i32",
+  "llvm.eh.return.i64",
+  "llvm.eh.sjlj.callsite",
+  "llvm.eh.sjlj.functioncontext",
+  "llvm.eh.sjlj.longjmp",
+  "llvm.eh.sjlj.lsda",
+  "llvm.eh.sjlj.setjmp",
+  "llvm.eh.sjlj.setup.dispatch",
+  "llvm.eh.typeid.for",
+  "llvm.eh.unwind.init",
+  "llvm.exp",
+  "llvm.exp2",
+  "llvm.expect",
+  "llvm.experimental.constrained.cos",
+  "llvm.experimental.constrained.exp",
+  "llvm.experimental.constrained.exp2",
+  "llvm.experimental.constrained.fadd",
+  "llvm.experimental.constrained.fdiv",
+  "llvm.experimental.constrained.fma",
+  "llvm.experimental.constrained.fmul",
+  "llvm.experimental.constrained.frem",
+  "llvm.experimental.constrained.fsub",
+  "llvm.experimental.constrained.log",
+  "llvm.experimental.constrained.log10",
+  "llvm.experimental.constrained.log2",
+  "llvm.experimental.constrained.nearbyint",
+  "llvm.experimental.constrained.pow",
+  "llvm.experimental.constrained.powi",
+  "llvm.experimental.constrained.rint",
+  "llvm.experimental.constrained.sin",
+  "llvm.experimental.constrained.sqrt",
+  "llvm.experimental.deoptimize",
+  "llvm.experimental.gc.relocate",
+  "llvm.experimental.gc.result",
+  "llvm.experimental.gc.statepoint",
+  "llvm.experimental.guard",
+  "llvm.experimental.patchpoint.i64",
+  "llvm.experimental.patchpoint.void",
+  "llvm.experimental.stackmap",
+  "llvm.experimental.vector.reduce.add",
+  "llvm.experimental.vector.reduce.and",
+  "llvm.experimental.vector.reduce.fadd",
+  "llvm.experimental.vector.reduce.fmax",
+  "llvm.experimental.vector.reduce.fmin",
+  "llvm.experimental.vector.reduce.fmul",
+  "llvm.experimental.vector.reduce.mul",
+  "llvm.experimental.vector.reduce.or",
+  "llvm.experimental.vector.reduce.smax",
+  "llvm.experimental.vector.reduce.smin",
+  "llvm.experimental.vector.reduce.umax",
+  "llvm.experimental.vector.reduce.umin",
+  "llvm.experimental.vector.reduce.xor",
+  "llvm.fabs",
+  "llvm.floor",
+  "llvm.flt.rounds",
+  "llvm.fma",
+  "llvm.fmuladd",
+  "llvm.frameaddress",
+  "llvm.gcread",
+  "llvm.gcroot",
+  "llvm.gcwrite",
+  "llvm.get.dynamic.area.offset",
+  "llvm.icall.branch.funnel",
+  "llvm.init.trampoline",
+  "llvm.instrprof.increment",
+  "llvm.instrprof.increment.step",
+  "llvm.instrprof.value.profile",
+  "llvm.invariant.end",
+  "llvm.invariant.group.barrier",
+  "llvm.invariant.start",
+  "llvm.lifetime.end",
+  "llvm.lifetime.start",
+  "llvm.load.relative",
+  "llvm.localaddress",
+  "llvm.localescape",
+  "llvm.localrecover",
+  "llvm.log",
+  "llvm.log10",
+  "llvm.log2",
+  "llvm.longjmp",
+  "llvm.masked.compressstore",
+  "llvm.masked.expandload",
+  "llvm.masked.gather",
+  "llvm.masked.load",
+  "llvm.masked.scatter",
+  "llvm.masked.store",
+  "llvm.maxnum",
+  "llvm.memcpy",
+  "llvm.memcpy.element.unordered.atomic",
+  "llvm.memmove",
+  "llvm.memmove.element.unordered.atomic",
+  "llvm.memset",
+  "llvm.memset.element.unordered.atomic",
+  "llvm.minnum",
+  "llvm.nearbyint",
+  "llvm.objectsize",
+  "llvm.pcmarker",
+  "llvm.pow",
+  "llvm.powi",
+  "llvm.prefetch",
+  "llvm.ptr.annotation",
+  "llvm.read_register",
+  "llvm.readcyclecounter",
+  "llvm.returnaddress",
+  "llvm.rint",
+  "llvm.round",
+  "llvm.sadd.with.overflow",
+  "llvm.setjmp",
+  "llvm.sideeffect",
+  "llvm.siglongjmp",
+  "llvm.sigsetjmp",
+  "llvm.sin",
+  "llvm.smul.with.overflow",
+  "llvm.sqrt",
+  "llvm.ssa.copy",
+  "llvm.ssub.with.overflow",
+  "llvm.stackguard",
+  "llvm.stackprotector",
+  "llvm.stackrestore",
+  "llvm.stacksave",
+  "llvm.thread.pointer",
+  "llvm.trap",
+  "llvm.trunc",
+  "llvm.type.checked.load",
+  "llvm.type.test",
+  "llvm.uadd.with.overflow",
+  "llvm.umul.with.overflow",
+  "llvm.usub.with.overflow",
+  "llvm.va_copy",
+  "llvm.va_end",
+  "llvm.va_start",
+  "llvm.var.annotation",
+  "llvm.write_register",
+  "llvm.xray.customevent",
+  "llvm.aarch64.clrex",
+  "llvm.aarch64.crc32b",
+  "llvm.aarch64.crc32cb",
+  "llvm.aarch64.crc32ch",
+  "llvm.aarch64.crc32cw",
+  "llvm.aarch64.crc32cx",
+  "llvm.aarch64.crc32h",
+  "llvm.aarch64.crc32w",
+  "llvm.aarch64.crc32x",
+  "llvm.aarch64.crypto.aesd",
+  "llvm.aarch64.crypto.aese",
+  "llvm.aarch64.crypto.aesimc",
+  "llvm.aarch64.crypto.aesmc",
+  "llvm.aarch64.crypto.sha1c",
+  "llvm.aarch64.crypto.sha1h",
+  "llvm.aarch64.crypto.sha1m",
+  "llvm.aarch64.crypto.sha1p",
+  "llvm.aarch64.crypto.sha1su0",
+  "llvm.aarch64.crypto.sha1su1",
+  "llvm.aarch64.crypto.sha256h",
+  "llvm.aarch64.crypto.sha256h2",
+  "llvm.aarch64.crypto.sha256su0",
+  "llvm.aarch64.crypto.sha256su1",
+  "llvm.aarch64.dmb",
+  "llvm.aarch64.dsb",
+  "llvm.aarch64.hint",
+  "llvm.aarch64.isb",
+  "llvm.aarch64.ldaxp",
+  "llvm.aarch64.ldaxr",
+  "llvm.aarch64.ldxp",
+  "llvm.aarch64.ldxr",
+  "llvm.aarch64.neon.abs",
+  "llvm.aarch64.neon.addhn",
+  "llvm.aarch64.neon.addp",
+  "llvm.aarch64.neon.cls",
+  "llvm.aarch64.neon.fabd",
+  "llvm.aarch64.neon.facge",
+  "llvm.aarch64.neon.facgt",
+  "llvm.aarch64.neon.faddv",
+  "llvm.aarch64.neon.fcvtas",
+  "llvm.aarch64.neon.fcvtau",
+  "llvm.aarch64.neon.fcvtms",
+  "llvm.aarch64.neon.fcvtmu",
+  "llvm.aarch64.neon.fcvtns",
+  "llvm.aarch64.neon.fcvtnu",
+  "llvm.aarch64.neon.fcvtps",
+  "llvm.aarch64.neon.fcvtpu",
+  "llvm.aarch64.neon.fcvtxn",
+  "llvm.aarch64.neon.fcvtzs",
+  "llvm.aarch64.neon.fcvtzu",
+  "llvm.aarch64.neon.fmax",
+  "llvm.aarch64.neon.fmaxnm",
+  "llvm.aarch64.neon.fmaxnmp",
+  "llvm.aarch64.neon.fmaxnmv",
+  "llvm.aarch64.neon.fmaxp",
+  "llvm.aarch64.neon.fmaxv",
+  "llvm.aarch64.neon.fmin",
+  "llvm.aarch64.neon.fminnm",
+  "llvm.aarch64.neon.fminnmp",
+  "llvm.aarch64.neon.fminnmv",
+  "llvm.aarch64.neon.fminp",
+  "llvm.aarch64.neon.fminv",
+  "llvm.aarch64.neon.fmulx",
+  "llvm.aarch64.neon.frecpe",
+  "llvm.aarch64.neon.frecps",
+  "llvm.aarch64.neon.frecpx",
+  "llvm.aarch64.neon.frintn",
+  "llvm.aarch64.neon.frsqrte",
+  "llvm.aarch64.neon.frsqrts",
+  "llvm.aarch64.neon.ld1x2",
+  "llvm.aarch64.neon.ld1x3",
+  "llvm.aarch64.neon.ld1x4",
+  "llvm.aarch64.neon.ld2",
+  "llvm.aarch64.neon.ld2lane",
+  "llvm.aarch64.neon.ld2r",
+  "llvm.aarch64.neon.ld3",
+  "llvm.aarch64.neon.ld3lane",
+  "llvm.aarch64.neon.ld3r",
+  "llvm.aarch64.neon.ld4",
+  "llvm.aarch64.neon.ld4lane",
+  "llvm.aarch64.neon.ld4r",
+  "llvm.aarch64.neon.pmul",
+  "llvm.aarch64.neon.pmull",
+  "llvm.aarch64.neon.pmull64",
+  "llvm.aarch64.neon.raddhn",
+  "llvm.aarch64.neon.rbit",
+  "llvm.aarch64.neon.rshrn",
+  "llvm.aarch64.neon.rsubhn",
+  "llvm.aarch64.neon.sabd",
+  "llvm.aarch64.neon.saddlp",
+  "llvm.aarch64.neon.saddlv",
+  "llvm.aarch64.neon.saddv",
+  "llvm.aarch64.neon.scalar.sqxtn",
+  "llvm.aarch64.neon.scalar.sqxtun",
+  "llvm.aarch64.neon.scalar.uqxtn",
+  "llvm.aarch64.neon.shadd",
+  "llvm.aarch64.neon.shll",
+  "llvm.aarch64.neon.shsub",
+  "llvm.aarch64.neon.smax",
+  "llvm.aarch64.neon.smaxp",
+  "llvm.aarch64.neon.smaxv",
+  "llvm.aarch64.neon.smin",
+  "llvm.aarch64.neon.sminp",
+  "llvm.aarch64.neon.sminv",
+  "llvm.aarch64.neon.smull",
+  "llvm.aarch64.neon.sqabs",
+  "llvm.aarch64.neon.sqadd",
+  "llvm.aarch64.neon.sqdmulh",
+  "llvm.aarch64.neon.sqdmull",
+  "llvm.aarch64.neon.sqdmulls.scalar",
+  "llvm.aarch64.neon.sqneg",
+  "llvm.aarch64.neon.sqrdmulh",
+  "llvm.aarch64.neon.sqrshl",
+  "llvm.aarch64.neon.sqrshrn",
+  "llvm.aarch64.neon.sqrshrun",
+  "llvm.aarch64.neon.sqshl",
+  "llvm.aarch64.neon.sqshlu",
+  "llvm.aarch64.neon.sqshrn",
+  "llvm.aarch64.neon.sqshrun",
+  "llvm.aarch64.neon.sqsub",
+  "llvm.aarch64.neon.sqxtn",
+  "llvm.aarch64.neon.sqxtun",
+  "llvm.aarch64.neon.srhadd",
+  "llvm.aarch64.neon.srshl",
+  "llvm.aarch64.neon.sshl",
+  "llvm.aarch64.neon.sshll",
+  "llvm.aarch64.neon.st1x2",
+  "llvm.aarch64.neon.st1x3",
+  "llvm.aarch64.neon.st1x4",
+  "llvm.aarch64.neon.st2",
+  "llvm.aarch64.neon.st2lane",
+  "llvm.aarch64.neon.st3",
+  "llvm.aarch64.neon.st3lane",
+  "llvm.aarch64.neon.st4",
+  "llvm.aarch64.neon.st4lane",
+  "llvm.aarch64.neon.subhn",
+  "llvm.aarch64.neon.suqadd",
+  "llvm.aarch64.neon.tbl1",
+  "llvm.aarch64.neon.tbl2",
+  "llvm.aarch64.neon.tbl3",
+  "llvm.aarch64.neon.tbl4",
+  "llvm.aarch64.neon.tbx1",
+  "llvm.aarch64.neon.tbx2",
+  "llvm.aarch64.neon.tbx3",
+  "llvm.aarch64.neon.tbx4",
+  "llvm.aarch64.neon.uabd",
+  "llvm.aarch64.neon.uaddlp",
+  "llvm.aarch64.neon.uaddlv",
+  "llvm.aarch64.neon.uaddv",
+  "llvm.aarch64.neon.uhadd",
+  "llvm.aarch64.neon.uhsub",
+  "llvm.aarch64.neon.umax",
+  "llvm.aarch64.neon.umaxp",
+  "llvm.aarch64.neon.umaxv",
+  "llvm.aarch64.neon.umin",
+  "llvm.aarch64.neon.uminp",
+  "llvm.aarch64.neon.uminv",
+  "llvm.aarch64.neon.umull",
+  "llvm.aarch64.neon.uqadd",
+  "llvm.aarch64.neon.uqrshl",
+  "llvm.aarch64.neon.uqrshrn",
+  "llvm.aarch64.neon.uqshl",
+  "llvm.aarch64.neon.uqshrn",
+  "llvm.aarch64.neon.uqsub",
+  "llvm.aarch64.neon.uqxtn",
+  "llvm.aarch64.neon.urecpe",
+  "llvm.aarch64.neon.urhadd",
+  "llvm.aarch64.neon.urshl",
+  "llvm.aarch64.neon.ursqrte",
+  "llvm.aarch64.neon.ushl",
+  "llvm.aarch64.neon.ushll",
+  "llvm.aarch64.neon.usqadd",
+  "llvm.aarch64.neon.vcopy.lane",
+  "llvm.aarch64.neon.vcvtfp2fxs",
+  "llvm.aarch64.neon.vcvtfp2fxu",
+  "llvm.aarch64.neon.vcvtfp2hf",
+  "llvm.aarch64.neon.vcvtfxs2fp",
+  "llvm.aarch64.neon.vcvtfxu2fp",
+  "llvm.aarch64.neon.vcvthf2fp",
+  "llvm.aarch64.neon.vsli",
+  "llvm.aarch64.neon.vsri",
+  "llvm.aarch64.sdiv",
+  "llvm.aarch64.sisd.fabd",
+  "llvm.aarch64.sisd.fcvtxn",
+  "llvm.aarch64.stlxp",
+  "llvm.aarch64.stlxr",
+  "llvm.aarch64.stxp",
+  "llvm.aarch64.stxr",
+  "llvm.aarch64.udiv",
+  "llvm.amdgcn.alignbit",
+  "llvm.amdgcn.alignbyte",
+  "llvm.amdgcn.atomic.dec",
+  "llvm.amdgcn.atomic.inc",
+  "llvm.amdgcn.break",
+  "llvm.amdgcn.buffer.atomic.add",
+  "llvm.amdgcn.buffer.atomic.and",
+  "llvm.amdgcn.buffer.atomic.cmpswap",
+  "llvm.amdgcn.buffer.atomic.or",
+  "llvm.amdgcn.buffer.atomic.smax",
+  "llvm.amdgcn.buffer.atomic.smin",
+  "llvm.amdgcn.buffer.atomic.sub",
+  "llvm.amdgcn.buffer.atomic.swap",
+  "llvm.amdgcn.buffer.atomic.umax",
+  "llvm.amdgcn.buffer.atomic.umin",
+  "llvm.amdgcn.buffer.atomic.xor",
+  "llvm.amdgcn.buffer.load",
+  "llvm.amdgcn.buffer.load.format",
+  "llvm.amdgcn.buffer.store",
+  "llvm.amdgcn.buffer.store.format",
+  "llvm.amdgcn.buffer.wbinvl1",
+  "llvm.amdgcn.buffer.wbinvl1.sc",
+  "llvm.amdgcn.buffer.wbinvl1.vol",
+  "llvm.amdgcn.class",
+  "llvm.amdgcn.cos",
+  "llvm.amdgcn.cubeid",
+  "llvm.amdgcn.cubema",
+  "llvm.amdgcn.cubesc",
+  "llvm.amdgcn.cubetc",
+  "llvm.amdgcn.cvt.pk.i16",
+  "llvm.amdgcn.cvt.pk.u16",
+  "llvm.amdgcn.cvt.pk.u8.f32",
+  "llvm.amdgcn.cvt.pknorm.i16",
+  "llvm.amdgcn.cvt.pknorm.u16",
+  "llvm.amdgcn.cvt.pkrtz",
+  "llvm.amdgcn.dispatch.id",
+  "llvm.amdgcn.dispatch.ptr",
+  "llvm.amdgcn.div.fixup",
+  "llvm.amdgcn.div.fmas",
+  "llvm.amdgcn.div.scale",
+  "llvm.amdgcn.ds.bpermute",
+  "llvm.amdgcn.ds.fadd",
+  "llvm.amdgcn.ds.fmax",
+  "llvm.amdgcn.ds.fmin",
+  "llvm.amdgcn.ds.permute",
+  "llvm.amdgcn.ds.swizzle",
+  "llvm.amdgcn.else",
+  "llvm.amdgcn.else.break",
+  "llvm.amdgcn.end.cf",
+  "llvm.amdgcn.exp",
+  "llvm.amdgcn.exp.compr",
+  "llvm.amdgcn.fcmp",
+  "llvm.amdgcn.fdiv.fast",
+  "llvm.amdgcn.fmed3",
+  "llvm.amdgcn.fmul.legacy",
+  "llvm.amdgcn.fract",
+  "llvm.amdgcn.frexp.exp",
+  "llvm.amdgcn.frexp.mant",
+  "llvm.amdgcn.groupstaticsize",
+  "llvm.amdgcn.icmp",
+  "llvm.amdgcn.if",
+  "llvm.amdgcn.if.break",
+  "llvm.amdgcn.image.atomic.add",
+  "llvm.amdgcn.image.atomic.and",
+  "llvm.amdgcn.image.atomic.cmpswap",
+  "llvm.amdgcn.image.atomic.dec",
+  "llvm.amdgcn.image.atomic.inc",
+  "llvm.amdgcn.image.atomic.or",
+  "llvm.amdgcn.image.atomic.smax",
+  "llvm.amdgcn.image.atomic.smin",
+  "llvm.amdgcn.image.atomic.sub",
+  "llvm.amdgcn.image.atomic.swap",
+  "llvm.amdgcn.image.atomic.umax",
+  "llvm.amdgcn.image.atomic.umin",
+  "llvm.amdgcn.image.atomic.xor",
+  "llvm.amdgcn.image.gather4",
+  "llvm.amdgcn.image.gather4.b",
+  "llvm.amdgcn.image.gather4.b.cl",
+  "llvm.amdgcn.image.gather4.b.cl.o",
+  "llvm.amdgcn.image.gather4.b.o",
+  "llvm.amdgcn.image.gather4.c",
+  "llvm.amdgcn.image.gather4.c.b",
+  "llvm.amdgcn.image.gather4.c.b.cl",
+  "llvm.amdgcn.image.gather4.c.b.cl.o",
+  "llvm.amdgcn.image.gather4.c.b.o",
+  "llvm.amdgcn.image.gather4.c.cl",
+  "llvm.amdgcn.image.gather4.c.cl.o",
+  "llvm.amdgcn.image.gather4.c.l",
+  "llvm.amdgcn.image.gather4.c.l.o",
+  "llvm.amdgcn.image.gather4.c.lz",
+  "llvm.amdgcn.image.gather4.c.lz.o",
+  "llvm.amdgcn.image.gather4.c.o",
+  "llvm.amdgcn.image.gather4.cl",
+  "llvm.amdgcn.image.gather4.cl.o",
+  "llvm.amdgcn.image.gather4.l",
+  "llvm.amdgcn.image.gather4.l.o",
+  "llvm.amdgcn.image.gather4.lz",
+  "llvm.amdgcn.image.gather4.lz.o",
+  "llvm.amdgcn.image.gather4.o",
+  "llvm.amdgcn.image.getlod",
+  "llvm.amdgcn.image.getresinfo",
+  "llvm.amdgcn.image.load",
+  "llvm.amdgcn.image.load.mip",
+  "llvm.amdgcn.image.sample",
+  "llvm.amdgcn.image.sample.b",
+  "llvm.amdgcn.image.sample.b.cl",
+  "llvm.amdgcn.image.sample.b.cl.o",
+  "llvm.amdgcn.image.sample.b.o",
+  "llvm.amdgcn.image.sample.c",
+  "llvm.amdgcn.image.sample.c.b",
+  "llvm.amdgcn.image.sample.c.b.cl",
+  "llvm.amdgcn.image.sample.c.b.cl.o",
+  "llvm.amdgcn.image.sample.c.b.o",
+  "llvm.amdgcn.image.sample.c.cd",
+  "llvm.amdgcn.image.sample.c.cd.cl",
+  "llvm.amdgcn.image.sample.c.cd.cl.o",
+  "llvm.amdgcn.image.sample.c.cd.o",
+  "llvm.amdgcn.image.sample.c.cl",
+  "llvm.amdgcn.image.sample.c.cl.o",
+  "llvm.amdgcn.image.sample.c.d",
+  "llvm.amdgcn.image.sample.c.d.cl",
+  "llvm.amdgcn.image.sample.c.d.cl.o",
+  "llvm.amdgcn.image.sample.c.d.o",
+  "llvm.amdgcn.image.sample.c.l",
+  "llvm.amdgcn.image.sample.c.l.o",
+  "llvm.amdgcn.image.sample.c.lz",
+  "llvm.amdgcn.image.sample.c.lz.o",
+  "llvm.amdgcn.image.sample.c.o",
+  "llvm.amdgcn.image.sample.cd",
+  "llvm.amdgcn.image.sample.cd.cl",
+  "llvm.amdgcn.image.sample.cd.cl.o",
+  "llvm.amdgcn.image.sample.cd.o",
+  "llvm.amdgcn.image.sample.cl",
+  "llvm.amdgcn.image.sample.cl.o",
+  "llvm.amdgcn.image.sample.d",
+  "llvm.amdgcn.image.sample.d.cl",
+  "llvm.amdgcn.image.sample.d.cl.o",
+  "llvm.amdgcn.image.sample.d.o",
+  "llvm.amdgcn.image.sample.l",
+  "llvm.amdgcn.image.sample.l.o",
+  "llvm.amdgcn.image.sample.lz",
+  "llvm.amdgcn.image.sample.lz.o",
+  "llvm.amdgcn.image.sample.o",
+  "llvm.amdgcn.image.store",
+  "llvm.amdgcn.image.store.mip",
+  "llvm.amdgcn.implicit.buffer.ptr",
+  "llvm.amdgcn.implicitarg.ptr",
+  "llvm.amdgcn.init.exec",
+  "llvm.amdgcn.init.exec.from.input",
+  "llvm.amdgcn.interp.mov",
+  "llvm.amdgcn.interp.p1",
+  "llvm.amdgcn.interp.p2",
+  "llvm.amdgcn.kernarg.segment.ptr",
+  "llvm.amdgcn.kill",
+  "llvm.amdgcn.ldexp",
+  "llvm.amdgcn.lerp",
+  "llvm.amdgcn.log.clamp",
+  "llvm.amdgcn.loop",
+  "llvm.amdgcn.mbcnt.hi",
+  "llvm.amdgcn.mbcnt.lo",
+  "llvm.amdgcn.mov.dpp",
+  "llvm.amdgcn.mqsad.pk.u16.u8",
+  "llvm.amdgcn.mqsad.u32.u8",
+  "llvm.amdgcn.msad.u8",
+  "llvm.amdgcn.ps.live",
+  "llvm.amdgcn.qsad.pk.u16.u8",
+  "llvm.amdgcn.queue.ptr",
+  "llvm.amdgcn.rcp",
+  "llvm.amdgcn.rcp.legacy",
+  "llvm.amdgcn.readfirstlane",
+  "llvm.amdgcn.readlane",
+  "llvm.amdgcn.rsq",
+  "llvm.amdgcn.rsq.clamp",
+  "llvm.amdgcn.rsq.legacy",
+  "llvm.amdgcn.s.barrier",
+  "llvm.amdgcn.s.dcache.inv",
+  "llvm.amdgcn.s.dcache.inv.vol",
+  "llvm.amdgcn.s.dcache.wb",
+  "llvm.amdgcn.s.dcache.wb.vol",
+  "llvm.amdgcn.s.decperflevel",
+  "llvm.amdgcn.s.getpc",
+  "llvm.amdgcn.s.getreg",
+  "llvm.amdgcn.s.incperflevel",
+  "llvm.amdgcn.s.memrealtime",
+  "llvm.amdgcn.s.memtime",
+  "llvm.amdgcn.s.sendmsg",
+  "llvm.amdgcn.s.sendmsghalt",
+  "llvm.amdgcn.s.sleep",
+  "llvm.amdgcn.s.waitcnt",
+  "llvm.amdgcn.sad.hi.u8",
+  "llvm.amdgcn.sad.u16",
+  "llvm.amdgcn.sad.u8",
+  "llvm.amdgcn.sbfe",
+  "llvm.amdgcn.set.inactive",
+  "llvm.amdgcn.sffbh",
+  "llvm.amdgcn.sin",
+  "llvm.amdgcn.tbuffer.load",
+  "llvm.amdgcn.tbuffer.store",
+  "llvm.amdgcn.trig.preop",
+  "llvm.amdgcn.ubfe",
+  "llvm.amdgcn.unreachable",
+  "llvm.amdgcn.update.dpp",
+  "llvm.amdgcn.wave.barrier",
+  "llvm.amdgcn.workgroup.id.x",
+  "llvm.amdgcn.workgroup.id.y",
+  "llvm.amdgcn.workgroup.id.z",
+  "llvm.amdgcn.workitem.id.x",
+  "llvm.amdgcn.workitem.id.y",
+  "llvm.amdgcn.workitem.id.z",
+  "llvm.amdgcn.wqm",
+  "llvm.amdgcn.wqm.vote",
+  "llvm.amdgcn.writelane",
+  "llvm.amdgcn.wwm",
+  "llvm.arm.cdp",
+  "llvm.arm.cdp2",
+  "llvm.arm.clrex",
+  "llvm.arm.crc32b",
+  "llvm.arm.crc32cb",
+  "llvm.arm.crc32ch",
+  "llvm.arm.crc32cw",
+  "llvm.arm.crc32h",
+  "llvm.arm.crc32w",
+  "llvm.arm.dbg",
+  "llvm.arm.dmb",
+  "llvm.arm.dsb",
+  "llvm.arm.get.fpscr",
+  "llvm.arm.hint",
+  "llvm.arm.isb",
+  "llvm.arm.ldaex",
+  "llvm.arm.ldaexd",
+  "llvm.arm.ldc",
+  "llvm.arm.ldc2",
+  "llvm.arm.ldc2l",
+  "llvm.arm.ldcl",
+  "llvm.arm.ldrex",
+  "llvm.arm.ldrexd",
+  "llvm.arm.mcr",
+  "llvm.arm.mcr2",
+  "llvm.arm.mcrr",
+  "llvm.arm.mcrr2",
+  "llvm.arm.mrc",
+  "llvm.arm.mrc2",
+  "llvm.arm.mrrc",
+  "llvm.arm.mrrc2",
+  "llvm.arm.neon.aesd",
+  "llvm.arm.neon.aese",
+  "llvm.arm.neon.aesimc",
+  "llvm.arm.neon.aesmc",
+  "llvm.arm.neon.sha1c",
+  "llvm.arm.neon.sha1h",
+  "llvm.arm.neon.sha1m",
+  "llvm.arm.neon.sha1p",
+  "llvm.arm.neon.sha1su0",
+  "llvm.arm.neon.sha1su1",
+  "llvm.arm.neon.sha256h",
+  "llvm.arm.neon.sha256h2",
+  "llvm.arm.neon.sha256su0",
+  "llvm.arm.neon.sha256su1",
+  "llvm.arm.neon.vabds",
+  "llvm.arm.neon.vabdu",
+  "llvm.arm.neon.vabs",
+  "llvm.arm.neon.vacge",
+  "llvm.arm.neon.vacgt",
+  "llvm.arm.neon.vbsl",
+  "llvm.arm.neon.vcls",
+  "llvm.arm.neon.vcvtas",
+  "llvm.arm.neon.vcvtau",
+  "llvm.arm.neon.vcvtfp2fxs",
+  "llvm.arm.neon.vcvtfp2fxu",
+  "llvm.arm.neon.vcvtfp2hf",
+  "llvm.arm.neon.vcvtfxs2fp",
+  "llvm.arm.neon.vcvtfxu2fp",
+  "llvm.arm.neon.vcvthf2fp",
+  "llvm.arm.neon.vcvtms",
+  "llvm.arm.neon.vcvtmu",
+  "llvm.arm.neon.vcvtns",
+  "llvm.arm.neon.vcvtnu",
+  "llvm.arm.neon.vcvtps",
+  "llvm.arm.neon.vcvtpu",
+  "llvm.arm.neon.vhadds",
+  "llvm.arm.neon.vhaddu",
+  "llvm.arm.neon.vhsubs",
+  "llvm.arm.neon.vhsubu",
+  "llvm.arm.neon.vld1",
+  "llvm.arm.neon.vld2",
+  "llvm.arm.neon.vld2lane",
+  "llvm.arm.neon.vld3",
+  "llvm.arm.neon.vld3lane",
+  "llvm.arm.neon.vld4",
+  "llvm.arm.neon.vld4lane",
+  "llvm.arm.neon.vmaxnm",
+  "llvm.arm.neon.vmaxs",
+  "llvm.arm.neon.vmaxu",
+  "llvm.arm.neon.vminnm",
+  "llvm.arm.neon.vmins",
+  "llvm.arm.neon.vminu",
+  "llvm.arm.neon.vmullp",
+  "llvm.arm.neon.vmulls",
+  "llvm.arm.neon.vmullu",
+  "llvm.arm.neon.vmulp",
+  "llvm.arm.neon.vpadals",
+  "llvm.arm.neon.vpadalu",
+  "llvm.arm.neon.vpadd",
+  "llvm.arm.neon.vpaddls",
+  "llvm.arm.neon.vpaddlu",
+  "llvm.arm.neon.vpmaxs",
+  "llvm.arm.neon.vpmaxu",
+  "llvm.arm.neon.vpmins",
+  "llvm.arm.neon.vpminu",
+  "llvm.arm.neon.vqabs",
+  "llvm.arm.neon.vqadds",
+  "llvm.arm.neon.vqaddu",
+  "llvm.arm.neon.vqdmulh",
+  "llvm.arm.neon.vqdmull",
+  "llvm.arm.neon.vqmovns",
+  "llvm.arm.neon.vqmovnsu",
+  "llvm.arm.neon.vqmovnu",
+  "llvm.arm.neon.vqneg",
+  "llvm.arm.neon.vqrdmulh",
+  "llvm.arm.neon.vqrshiftns",
+  "llvm.arm.neon.vqrshiftnsu",
+  "llvm.arm.neon.vqrshiftnu",
+  "llvm.arm.neon.vqrshifts",
+  "llvm.arm.neon.vqrshiftu",
+  "llvm.arm.neon.vqshiftns",
+  "llvm.arm.neon.vqshiftnsu",
+  "llvm.arm.neon.vqshiftnu",
+  "llvm.arm.neon.vqshifts",
+  "llvm.arm.neon.vqshiftsu",
+  "llvm.arm.neon.vqshiftu",
+  "llvm.arm.neon.vqsubs",
+  "llvm.arm.neon.vqsubu",
+  "llvm.arm.neon.vraddhn",
+  "llvm.arm.neon.vrecpe",
+  "llvm.arm.neon.vrecps",
+  "llvm.arm.neon.vrhadds",
+  "llvm.arm.neon.vrhaddu",
+  "llvm.arm.neon.vrinta",
+  "llvm.arm.neon.vrintm",
+  "llvm.arm.neon.vrintn",
+  "llvm.arm.neon.vrintp",
+  "llvm.arm.neon.vrintx",
+  "llvm.arm.neon.vrintz",
+  "llvm.arm.neon.vrshiftn",
+  "llvm.arm.neon.vrshifts",
+  "llvm.arm.neon.vrshiftu",
+  "llvm.arm.neon.vrsqrte",
+  "llvm.arm.neon.vrsqrts",
+  "llvm.arm.neon.vrsubhn",
+  "llvm.arm.neon.vshiftins",
+  "llvm.arm.neon.vshifts",
+  "llvm.arm.neon.vshiftu",
+  "llvm.arm.neon.vst1",
+  "llvm.arm.neon.vst2",
+  "llvm.arm.neon.vst2lane",
+  "llvm.arm.neon.vst3",
+  "llvm.arm.neon.vst3lane",
+  "llvm.arm.neon.vst4",
+  "llvm.arm.neon.vst4lane",
+  "llvm.arm.neon.vtbl1",
+  "llvm.arm.neon.vtbl2",
+  "llvm.arm.neon.vtbl3",
+  "llvm.arm.neon.vtbl4",
+  "llvm.arm.neon.vtbx1",
+  "llvm.arm.neon.vtbx2",
+  "llvm.arm.neon.vtbx3",
+  "llvm.arm.neon.vtbx4",
+  "llvm.arm.qadd",
+  "llvm.arm.qadd16",
+  "llvm.arm.qadd8",
+  "llvm.arm.qasx",
+  "llvm.arm.qsax",
+  "llvm.arm.qsub",
+  "llvm.arm.qsub16",
+  "llvm.arm.qsub8",
+  "llvm.arm.sadd16",
+  "llvm.arm.sadd8",
+  "llvm.arm.sasx",
+  "llvm.arm.sel",
+  "llvm.arm.set.fpscr",
+  "llvm.arm.shadd16",
+  "llvm.arm.shadd8",
+  "llvm.arm.shasx",
+  "llvm.arm.shsax",
+  "llvm.arm.shsub16",
+  "llvm.arm.shsub8",
+  "llvm.arm.smlabb",
+  "llvm.arm.smlabt",
+  "llvm.arm.smlad",
+  "llvm.arm.smladx",
+  "llvm.arm.smlald",
+  "llvm.arm.smlaldx",
+  "llvm.arm.smlatb",
+  "llvm.arm.smlatt",
+  "llvm.arm.smlawb",
+  "llvm.arm.smlawt",
+  "llvm.arm.smlsd",
+  "llvm.arm.smlsdx",
+  "llvm.arm.smlsld",
+  "llvm.arm.smlsldx",
+  "llvm.arm.smuad",
+  "llvm.arm.smuadx",
+  "llvm.arm.smulbb",
+  "llvm.arm.smulbt",
+  "llvm.arm.smultb",
+  "llvm.arm.smultt",
+  "llvm.arm.smulwb",
+  "llvm.arm.smulwt",
+  "llvm.arm.smusd",
+  "llvm.arm.smusdx",
+  "llvm.arm.space",
+  "llvm.arm.ssat",
+  "llvm.arm.ssat16",
+  "llvm.arm.ssax",
+  "llvm.arm.ssub16",
+  "llvm.arm.ssub8",
+  "llvm.arm.stc",
+  "llvm.arm.stc2",
+  "llvm.arm.stc2l",
+  "llvm.arm.stcl",
+  "llvm.arm.stlex",
+  "llvm.arm.stlexd",
+  "llvm.arm.strex",
+  "llvm.arm.strexd",
+  "llvm.arm.sxtab16",
+  "llvm.arm.sxtb16",
+  "llvm.arm.uadd16",
+  "llvm.arm.uadd8",
+  "llvm.arm.uasx",
+  "llvm.arm.uhadd16",
+  "llvm.arm.uhadd8",
+  "llvm.arm.uhasx",
+  "llvm.arm.uhsax",
+  "llvm.arm.uhsub16",
+  "llvm.arm.uhsub8",
+  "llvm.arm.undefined",
+  "llvm.arm.uqadd16",
+  "llvm.arm.uqadd8",
+  "llvm.arm.uqasx",
+  "llvm.arm.uqsax",
+  "llvm.arm.uqsub16",
+  "llvm.arm.uqsub8",
+  "llvm.arm.usad8",
+  "llvm.arm.usada8",
+  "llvm.arm.usat",
+  "llvm.arm.usat16",
+  "llvm.arm.usax",
+  "llvm.arm.usub16",
+  "llvm.arm.usub8",
+  "llvm.arm.uxtab16",
+  "llvm.arm.uxtb16",
+  "llvm.arm.vcvtr",
+  "llvm.arm.vcvtru",
+  "llvm.bpf.load.byte",
+  "llvm.bpf.load.half",
+  "llvm.bpf.load.word",
+  "llvm.bpf.pseudo",
+  "llvm.hexagon.A2.abs",
+  "llvm.hexagon.A2.absp",
+  "llvm.hexagon.A2.abssat",
+  "llvm.hexagon.A2.add",
+  "llvm.hexagon.A2.addh.h16.hh",
+  "llvm.hexagon.A2.addh.h16.hl",
+  "llvm.hexagon.A2.addh.h16.lh",
+  "llvm.hexagon.A2.addh.h16.ll",
+  "llvm.hexagon.A2.addh.h16.sat.hh",
+  "llvm.hexagon.A2.addh.h16.sat.hl",
+  "llvm.hexagon.A2.addh.h16.sat.lh",
+  "llvm.hexagon.A2.addh.h16.sat.ll",
+  "llvm.hexagon.A2.addh.l16.hl",
+  "llvm.hexagon.A2.addh.l16.ll",
+  "llvm.hexagon.A2.addh.l16.sat.hl",
+  "llvm.hexagon.A2.addh.l16.sat.ll",
+  "llvm.hexagon.A2.addi",
+  "llvm.hexagon.A2.addp",
+  "llvm.hexagon.A2.addpsat",
+  "llvm.hexagon.A2.addsat",
+  "llvm.hexagon.A2.addsp",
+  "llvm.hexagon.A2.and",
+  "llvm.hexagon.A2.andir",
+  "llvm.hexagon.A2.andp",
+  "llvm.hexagon.A2.aslh",
+  "llvm.hexagon.A2.asrh",
+  "llvm.hexagon.A2.combine.hh",
+  "llvm.hexagon.A2.combine.hl",
+  "llvm.hexagon.A2.combine.lh",
+  "llvm.hexagon.A2.combine.ll",
+  "llvm.hexagon.A2.combineii",
+  "llvm.hexagon.A2.combinew",
+  "llvm.hexagon.A2.max",
+  "llvm.hexagon.A2.maxp",
+  "llvm.hexagon.A2.maxu",
+  "llvm.hexagon.A2.maxup",
+  "llvm.hexagon.A2.min",
+  "llvm.hexagon.A2.minp",
+  "llvm.hexagon.A2.minu",
+  "llvm.hexagon.A2.minup",
+  "llvm.hexagon.A2.neg",
+  "llvm.hexagon.A2.negp",
+  "llvm.hexagon.A2.negsat",
+  "llvm.hexagon.A2.not",
+  "llvm.hexagon.A2.notp",
+  "llvm.hexagon.A2.or",
+  "llvm.hexagon.A2.orir",
+  "llvm.hexagon.A2.orp",
+  "llvm.hexagon.A2.roundsat",
+  "llvm.hexagon.A2.sat",
+  "llvm.hexagon.A2.satb",
+  "llvm.hexagon.A2.sath",
+  "llvm.hexagon.A2.satub",
+  "llvm.hexagon.A2.satuh",
+  "llvm.hexagon.A2.sub",
+  "llvm.hexagon.A2.subh.h16.hh",
+  "llvm.hexagon.A2.subh.h16.hl",
+  "llvm.hexagon.A2.subh.h16.lh",
+  "llvm.hexagon.A2.subh.h16.ll",
+  "llvm.hexagon.A2.subh.h16.sat.hh",
+  "llvm.hexagon.A2.subh.h16.sat.hl",
+  "llvm.hexagon.A2.subh.h16.sat.lh",
+  "llvm.hexagon.A2.subh.h16.sat.ll",
+  "llvm.hexagon.A2.subh.l16.hl",
+  "llvm.hexagon.A2.subh.l16.ll",
+  "llvm.hexagon.A2.subh.l16.sat.hl",
+  "llvm.hexagon.A2.subh.l16.sat.ll",
+  "llvm.hexagon.A2.subp",
+  "llvm.hexagon.A2.subri",
+  "llvm.hexagon.A2.subsat",
+  "llvm.hexagon.A2.svaddh",
+  "llvm.hexagon.A2.svaddhs",
+  "llvm.hexagon.A2.svadduhs",
+  "llvm.hexagon.A2.svavgh",
+  "llvm.hexagon.A2.svavghs",
+  "llvm.hexagon.A2.svnavgh",
+  "llvm.hexagon.A2.svsubh",
+  "llvm.hexagon.A2.svsubhs",
+  "llvm.hexagon.A2.svsubuhs",
+  "llvm.hexagon.A2.swiz",
+  "llvm.hexagon.A2.sxtb",
+  "llvm.hexagon.A2.sxth",
+  "llvm.hexagon.A2.sxtw",
+  "llvm.hexagon.A2.tfr",
+  "llvm.hexagon.A2.tfrih",
+  "llvm.hexagon.A2.tfril",
+  "llvm.hexagon.A2.tfrp",
+  "llvm.hexagon.A2.tfrpi",
+  "llvm.hexagon.A2.tfrsi",
+  "llvm.hexagon.A2.vabsh",
+  "llvm.hexagon.A2.vabshsat",
+  "llvm.hexagon.A2.vabsw",
+  "llvm.hexagon.A2.vabswsat",
+  "llvm.hexagon.A2.vaddb.map",
+  "llvm.hexagon.A2.vaddh",
+  "llvm.hexagon.A2.vaddhs",
+  "llvm.hexagon.A2.vaddub",
+  "llvm.hexagon.A2.vaddubs",
+  "llvm.hexagon.A2.vadduhs",
+  "llvm.hexagon.A2.vaddw",
+  "llvm.hexagon.A2.vaddws",
+  "llvm.hexagon.A2.vavgh",
+  "llvm.hexagon.A2.vavghcr",
+  "llvm.hexagon.A2.vavghr",
+  "llvm.hexagon.A2.vavgub",
+  "llvm.hexagon.A2.vavgubr",
+  "llvm.hexagon.A2.vavguh",
+  "llvm.hexagon.A2.vavguhr",
+  "llvm.hexagon.A2.vavguw",
+  "llvm.hexagon.A2.vavguwr",
+  "llvm.hexagon.A2.vavgw",
+  "llvm.hexagon.A2.vavgwcr",
+  "llvm.hexagon.A2.vavgwr",
+  "llvm.hexagon.A2.vcmpbeq",
+  "llvm.hexagon.A2.vcmpbgtu",
+  "llvm.hexagon.A2.vcmpheq",
+  "llvm.hexagon.A2.vcmphgt",
+  "llvm.hexagon.A2.vcmphgtu",
+  "llvm.hexagon.A2.vcmpweq",
+  "llvm.hexagon.A2.vcmpwgt",
+  "llvm.hexagon.A2.vcmpwgtu",
+  "llvm.hexagon.A2.vconj",
+  "llvm.hexagon.A2.vmaxb",
+  "llvm.hexagon.A2.vmaxh",
+  "llvm.hexagon.A2.vmaxub",
+  "llvm.hexagon.A2.vmaxuh",
+  "llvm.hexagon.A2.vmaxuw",
+  "llvm.hexagon.A2.vmaxw",
+  "llvm.hexagon.A2.vminb",
+  "llvm.hexagon.A2.vminh",
+  "llvm.hexagon.A2.vminub",
+  "llvm.hexagon.A2.vminuh",
+  "llvm.hexagon.A2.vminuw",
+  "llvm.hexagon.A2.vminw",
+  "llvm.hexagon.A2.vnavgh",
+  "llvm.hexagon.A2.vnavghcr",
+  "llvm.hexagon.A2.vnavghr",
+  "llvm.hexagon.A2.vnavgw",
+  "llvm.hexagon.A2.vnavgwcr",
+  "llvm.hexagon.A2.vnavgwr",
+  "llvm.hexagon.A2.vraddub",
+  "llvm.hexagon.A2.vraddub.acc",
+  "llvm.hexagon.A2.vrsadub",
+  "llvm.hexagon.A2.vrsadub.acc",
+  "llvm.hexagon.A2.vsubb.map",
+  "llvm.hexagon.A2.vsubh",
+  "llvm.hexagon.A2.vsubhs",
+  "llvm.hexagon.A2.vsubub",
+  "llvm.hexagon.A2.vsububs",
+  "llvm.hexagon.A2.vsubuhs",
+  "llvm.hexagon.A2.vsubw",
+  "llvm.hexagon.A2.vsubws",
+  "llvm.hexagon.A2.xor",
+  "llvm.hexagon.A2.xorp",
+  "llvm.hexagon.A2.zxtb",
+  "llvm.hexagon.A2.zxth",
+  "llvm.hexagon.A4.andn",
+  "llvm.hexagon.A4.andnp",
+  "llvm.hexagon.A4.bitsplit",
+  "llvm.hexagon.A4.bitspliti",
+  "llvm.hexagon.A4.boundscheck",
+  "llvm.hexagon.A4.cmpbeq",
+  "llvm.hexagon.A4.cmpbeqi",
+  "llvm.hexagon.A4.cmpbgt",
+  "llvm.hexagon.A4.cmpbgti",
+  "llvm.hexagon.A4.cmpbgtu",
+  "llvm.hexagon.A4.cmpbgtui",
+  "llvm.hexagon.A4.cmpheq",
+  "llvm.hexagon.A4.cmpheqi",
+  "llvm.hexagon.A4.cmphgt",
+  "llvm.hexagon.A4.cmphgti",
+  "llvm.hexagon.A4.cmphgtu",
+  "llvm.hexagon.A4.cmphgtui",
+  "llvm.hexagon.A4.combineir",
+  "llvm.hexagon.A4.combineri",
+  "llvm.hexagon.A4.cround.ri",
+  "llvm.hexagon.A4.cround.rr",
+  "llvm.hexagon.A4.modwrapu",
+  "llvm.hexagon.A4.orn",
+  "llvm.hexagon.A4.ornp",
+  "llvm.hexagon.A4.rcmpeq",
+  "llvm.hexagon.A4.rcmpeqi",
+  "llvm.hexagon.A4.rcmpneq",
+  "llvm.hexagon.A4.rcmpneqi",
+  "llvm.hexagon.A4.round.ri",
+  "llvm.hexagon.A4.round.ri.sat",
+  "llvm.hexagon.A4.round.rr",
+  "llvm.hexagon.A4.round.rr.sat",
+  "llvm.hexagon.A4.tlbmatch",
+  "llvm.hexagon.A4.vcmpbeq.any",
+  "llvm.hexagon.A4.vcmpbeqi",
+  "llvm.hexagon.A4.vcmpbgt",
+  "llvm.hexagon.A4.vcmpbgti",
+  "llvm.hexagon.A4.vcmpbgtui",
+  "llvm.hexagon.A4.vcmpheqi",
+  "llvm.hexagon.A4.vcmphgti",
+  "llvm.hexagon.A4.vcmphgtui",
+  "llvm.hexagon.A4.vcmpweqi",
+  "llvm.hexagon.A4.vcmpwgti",
+  "llvm.hexagon.A4.vcmpwgtui",
+  "llvm.hexagon.A4.vrmaxh",
+  "llvm.hexagon.A4.vrmaxuh",
+  "llvm.hexagon.A4.vrmaxuw",
+  "llvm.hexagon.A4.vrmaxw",
+  "llvm.hexagon.A4.vrminh",
+  "llvm.hexagon.A4.vrminuh",
+  "llvm.hexagon.A4.vrminuw",
+  "llvm.hexagon.A4.vrminw",
+  "llvm.hexagon.A5.vaddhubs",
+  "llvm.hexagon.A6.vcmpbeq.notany",
+  "llvm.hexagon.A6.vcmpbeq.notany.128B",
+  "llvm.hexagon.C2.all8",
+  "llvm.hexagon.C2.and",
+  "llvm.hexagon.C2.andn",
+  "llvm.hexagon.C2.any8",
+  "llvm.hexagon.C2.bitsclr",
+  "llvm.hexagon.C2.bitsclri",
+  "llvm.hexagon.C2.bitsset",
+  "llvm.hexagon.C2.cmpeq",
+  "llvm.hexagon.C2.cmpeqi",
+  "llvm.hexagon.C2.cmpeqp",
+  "llvm.hexagon.C2.cmpgei",
+  "llvm.hexagon.C2.cmpgeui",
+  "llvm.hexagon.C2.cmpgt",
+  "llvm.hexagon.C2.cmpgti",
+  "llvm.hexagon.C2.cmpgtp",
+  "llvm.hexagon.C2.cmpgtu",
+  "llvm.hexagon.C2.cmpgtui",
+  "llvm.hexagon.C2.cmpgtup",
+  "llvm.hexagon.C2.cmplt",
+  "llvm.hexagon.C2.cmpltu",
+  "llvm.hexagon.C2.mask",
+  "llvm.hexagon.C2.mux",
+  "llvm.hexagon.C2.muxii",
+  "llvm.hexagon.C2.muxir",
+  "llvm.hexagon.C2.muxri",
+  "llvm.hexagon.C2.not",
+  "llvm.hexagon.C2.or",
+  "llvm.hexagon.C2.orn",
+  "llvm.hexagon.C2.pxfer.map",
+  "llvm.hexagon.C2.tfrpr",
+  "llvm.hexagon.C2.tfrrp",
+  "llvm.hexagon.C2.vitpack",
+  "llvm.hexagon.C2.vmux",
+  "llvm.hexagon.C2.xor",
+  "llvm.hexagon.C4.and.and",
+  "llvm.hexagon.C4.and.andn",
+  "llvm.hexagon.C4.and.or",
+  "llvm.hexagon.C4.and.orn",
+  "llvm.hexagon.C4.cmplte",
+  "llvm.hexagon.C4.cmpltei",
+  "llvm.hexagon.C4.cmplteu",
+  "llvm.hexagon.C4.cmplteui",
+  "llvm.hexagon.C4.cmpneq",
+  "llvm.hexagon.C4.cmpneqi",
+  "llvm.hexagon.C4.fastcorner9",
+  "llvm.hexagon.C4.fastcorner9.not",
+  "llvm.hexagon.C4.nbitsclr",
+  "llvm.hexagon.C4.nbitsclri",
+  "llvm.hexagon.C4.nbitsset",
+  "llvm.hexagon.C4.or.and",
+  "llvm.hexagon.C4.or.andn",
+  "llvm.hexagon.C4.or.or",
+  "llvm.hexagon.C4.or.orn",
+  "llvm.hexagon.F2.conv.d2df",
+  "llvm.hexagon.F2.conv.d2sf",
+  "llvm.hexagon.F2.conv.df2d",
+  "llvm.hexagon.F2.conv.df2d.chop",
+  "llvm.hexagon.F2.conv.df2sf",
+  "llvm.hexagon.F2.conv.df2ud",
+  "llvm.hexagon.F2.conv.df2ud.chop",
+  "llvm.hexagon.F2.conv.df2uw",
+  "llvm.hexagon.F2.conv.df2uw.chop",
+  "llvm.hexagon.F2.conv.df2w",
+  "llvm.hexagon.F2.conv.df2w.chop",
+  "llvm.hexagon.F2.conv.sf2d",
+  "llvm.hexagon.F2.conv.sf2d.chop",
+  "llvm.hexagon.F2.conv.sf2df",
+  "llvm.hexagon.F2.conv.sf2ud",
+  "llvm.hexagon.F2.conv.sf2ud.chop",
+  "llvm.hexagon.F2.conv.sf2uw",
+  "llvm.hexagon.F2.conv.sf2uw.chop",
+  "llvm.hexagon.F2.conv.sf2w",
+  "llvm.hexagon.F2.conv.sf2w.chop",
+  "llvm.hexagon.F2.conv.ud2df",
+  "llvm.hexagon.F2.conv.ud2sf",
+  "llvm.hexagon.F2.conv.uw2df",
+  "llvm.hexagon.F2.conv.uw2sf",
+  "llvm.hexagon.F2.conv.w2df",
+  "llvm.hexagon.F2.conv.w2sf",
+  "llvm.hexagon.F2.dfclass",
+  "llvm.hexagon.F2.dfcmpeq",
+  "llvm.hexagon.F2.dfcmpge",
+  "llvm.hexagon.F2.dfcmpgt",
+  "llvm.hexagon.F2.dfcmpuo",
+  "llvm.hexagon.F2.dfimm.n",
+  "llvm.hexagon.F2.dfimm.p",
+  "llvm.hexagon.F2.sfadd",
+  "llvm.hexagon.F2.sfclass",
+  "llvm.hexagon.F2.sfcmpeq",
+  "llvm.hexagon.F2.sfcmpge",
+  "llvm.hexagon.F2.sfcmpgt",
+  "llvm.hexagon.F2.sfcmpuo",
+  "llvm.hexagon.F2.sffixupd",
+  "llvm.hexagon.F2.sffixupn",
+  "llvm.hexagon.F2.sffixupr",
+  "llvm.hexagon.F2.sffma",
+  "llvm.hexagon.F2.sffma.lib",
+  "llvm.hexagon.F2.sffma.sc",
+  "llvm.hexagon.F2.sffms",
+  "llvm.hexagon.F2.sffms.lib",
+  "llvm.hexagon.F2.sfimm.n",
+  "llvm.hexagon.F2.sfimm.p",
+  "llvm.hexagon.F2.sfmax",
+  "llvm.hexagon.F2.sfmin",
+  "llvm.hexagon.F2.sfmpy",
+  "llvm.hexagon.F2.sfsub",
+  "llvm.hexagon.L2.loadrb.pbr",
+  "llvm.hexagon.L2.loadrb.pci",
+  "llvm.hexagon.L2.loadrb.pcr",
+  "llvm.hexagon.L2.loadrd.pbr",
+  "llvm.hexagon.L2.loadrd.pci",
+  "llvm.hexagon.L2.loadrd.pcr",
+  "llvm.hexagon.L2.loadrh.pbr",
+  "llvm.hexagon.L2.loadrh.pci",
+  "llvm.hexagon.L2.loadrh.pcr",
+  "llvm.hexagon.L2.loadri.pbr",
+  "llvm.hexagon.L2.loadri.pci",
+  "llvm.hexagon.L2.loadri.pcr",
+  "llvm.hexagon.L2.loadrub.pbr",
+  "llvm.hexagon.L2.loadrub.pci",
+  "llvm.hexagon.L2.loadrub.pcr",
+  "llvm.hexagon.L2.loadruh.pbr",
+  "llvm.hexagon.L2.loadruh.pci",
+  "llvm.hexagon.L2.loadruh.pcr",
+  "llvm.hexagon.L2.loadw.locked",
+  "llvm.hexagon.L4.loadd.locked",
+  "llvm.hexagon.M2.acci",
+  "llvm.hexagon.M2.accii",
+  "llvm.hexagon.M2.cmaci.s0",
+  "llvm.hexagon.M2.cmacr.s0",
+  "llvm.hexagon.M2.cmacs.s0",
+  "llvm.hexagon.M2.cmacs.s1",
+  "llvm.hexagon.M2.cmacsc.s0",
+  "llvm.hexagon.M2.cmacsc.s1",
+  "llvm.hexagon.M2.cmpyi.s0",
+  "llvm.hexagon.M2.cmpyr.s0",
+  "llvm.hexagon.M2.cmpyrs.s0",
+  "llvm.hexagon.M2.cmpyrs.s1",
+  "llvm.hexagon.M2.cmpyrsc.s0",
+  "llvm.hexagon.M2.cmpyrsc.s1",
+  "llvm.hexagon.M2.cmpys.s0",
+  "llvm.hexagon.M2.cmpys.s1",
+  "llvm.hexagon.M2.cmpysc.s0",
+  "llvm.hexagon.M2.cmpysc.s1",
+  "llvm.hexagon.M2.cnacs.s0",
+  "llvm.hexagon.M2.cnacs.s1",
+  "llvm.hexagon.M2.cnacsc.s0",
+  "llvm.hexagon.M2.cnacsc.s1",
+  "llvm.hexagon.M2.dpmpyss.acc.s0",
+  "llvm.hexagon.M2.dpmpyss.nac.s0",
+  "llvm.hexagon.M2.dpmpyss.rnd.s0",
+  "llvm.hexagon.M2.dpmpyss.s0",
+  "llvm.hexagon.M2.dpmpyuu.acc.s0",
+  "llvm.hexagon.M2.dpmpyuu.nac.s0",
+  "llvm.hexagon.M2.dpmpyuu.s0",
+  "llvm.hexagon.M2.hmmpyh.rs1",
+  "llvm.hexagon.M2.hmmpyh.s1",
+  "llvm.hexagon.M2.hmmpyl.rs1",
+  "llvm.hexagon.M2.hmmpyl.s1",
+  "llvm.hexagon.M2.maci",
+  "llvm.hexagon.M2.macsin",
+  "llvm.hexagon.M2.macsip",
+  "llvm.hexagon.M2.mmachs.rs0",
+  "llvm.hexagon.M2.mmachs.rs1",
+  "llvm.hexagon.M2.mmachs.s0",
+  "llvm.hexagon.M2.mmachs.s1",
+  "llvm.hexagon.M2.mmacls.rs0",
+  "llvm.hexagon.M2.mmacls.rs1",
+  "llvm.hexagon.M2.mmacls.s0",
+  "llvm.hexagon.M2.mmacls.s1",
+  "llvm.hexagon.M2.mmacuhs.rs0",
+  "llvm.hexagon.M2.mmacuhs.rs1",
+  "llvm.hexagon.M2.mmacuhs.s0",
+  "llvm.hexagon.M2.mmacuhs.s1",
+  "llvm.hexagon.M2.mmaculs.rs0",
+  "llvm.hexagon.M2.mmaculs.rs1",
+  "llvm.hexagon.M2.mmaculs.s0",
+  "llvm.hexagon.M2.mmaculs.s1",
+  "llvm.hexagon.M2.mmpyh.rs0",
+  "llvm.hexagon.M2.mmpyh.rs1",
+  "llvm.hexagon.M2.mmpyh.s0",
+  "llvm.hexagon.M2.mmpyh.s1",
+  "llvm.hexagon.M2.mmpyl.rs0",
+  "llvm.hexagon.M2.mmpyl.rs1",
+  "llvm.hexagon.M2.mmpyl.s0",
+  "llvm.hexagon.M2.mmpyl.s1",
+  "llvm.hexagon.M2.mmpyuh.rs0",
+  "llvm.hexagon.M2.mmpyuh.rs1",
+  "llvm.hexagon.M2.mmpyuh.s0",
+  "llvm.hexagon.M2.mmpyuh.s1",
+  "llvm.hexagon.M2.mmpyul.rs0",
+  "llvm.hexagon.M2.mmpyul.rs1",
+  "llvm.hexagon.M2.mmpyul.s0",
+  "llvm.hexagon.M2.mmpyul.s1",
+  "llvm.hexagon.M2.mpy.acc.hh.s0",
+  "llvm.hexagon.M2.mpy.acc.hh.s1",
+  "llvm.hexagon.M2.mpy.acc.hl.s0",
+  "llvm.hexagon.M2.mpy.acc.hl.s1",
+  "llvm.hexagon.M2.mpy.acc.lh.s0",
+  "llvm.hexagon.M2.mpy.acc.lh.s1",
+  "llvm.hexagon.M2.mpy.acc.ll.s0",
+  "llvm.hexagon.M2.mpy.acc.ll.s1",
+  "llvm.hexagon.M2.mpy.acc.sat.hh.s0",
+  "llvm.hexagon.M2.mpy.acc.sat.hh.s1",
+  "llvm.hexagon.M2.mpy.acc.sat.hl.s0",
+  "llvm.hexagon.M2.mpy.acc.sat.hl.s1",
+  "llvm.hexagon.M2.mpy.acc.sat.lh.s0",
+  "llvm.hexagon.M2.mpy.acc.sat.lh.s1",
+  "llvm.hexagon.M2.mpy.acc.sat.ll.s0",
+  "llvm.hexagon.M2.mpy.acc.sat.ll.s1",
+  "llvm.hexagon.M2.mpy.hh.s0",
+  "llvm.hexagon.M2.mpy.hh.s1",
+  "llvm.hexagon.M2.mpy.hl.s0",
+  "llvm.hexagon.M2.mpy.hl.s1",
+  "llvm.hexagon.M2.mpy.lh.s0",
+  "llvm.hexagon.M2.mpy.lh.s1",
+  "llvm.hexagon.M2.mpy.ll.s0",
+  "llvm.hexagon.M2.mpy.ll.s1",
+  "llvm.hexagon.M2.mpy.nac.hh.s0",
+  "llvm.hexagon.M2.mpy.nac.hh.s1",
+  "llvm.hexagon.M2.mpy.nac.hl.s0",
+  "llvm.hexagon.M2.mpy.nac.hl.s1",
+  "llvm.hexagon.M2.mpy.nac.lh.s0",
+  "llvm.hexagon.M2.mpy.nac.lh.s1",
+  "llvm.hexagon.M2.mpy.nac.ll.s0",
+  "llvm.hexagon.M2.mpy.nac.ll.s1",
+  "llvm.hexagon.M2.mpy.nac.sat.hh.s0",
+  "llvm.hexagon.M2.mpy.nac.sat.hh.s1",
+  "llvm.hexagon.M2.mpy.nac.sat.hl.s0",
+  "llvm.hexagon.M2.mpy.nac.sat.hl.s1",
+  "llvm.hexagon.M2.mpy.nac.sat.lh.s0",
+  "llvm.hexagon.M2.mpy.nac.sat.lh.s1",
+  "llvm.hexagon.M2.mpy.nac.sat.ll.s0",
+  "llvm.hexagon.M2.mpy.nac.sat.ll.s1",
+  "llvm.hexagon.M2.mpy.rnd.hh.s0",
+  "llvm.hexagon.M2.mpy.rnd.hh.s1",
+  "llvm.hexagon.M2.mpy.rnd.hl.s0",
+  "llvm.hexagon.M2.mpy.rnd.hl.s1",
+  "llvm.hexagon.M2.mpy.rnd.lh.s0",
+  "llvm.hexagon.M2.mpy.rnd.lh.s1",
+  "llvm.hexagon.M2.mpy.rnd.ll.s0",
+  "llvm.hexagon.M2.mpy.rnd.ll.s1",
+  "llvm.hexagon.M2.mpy.sat.hh.s0",
+  "llvm.hexagon.M2.mpy.sat.hh.s1",
+  "llvm.hexagon.M2.mpy.sat.hl.s0",
+  "llvm.hexagon.M2.mpy.sat.hl.s1",
+  "llvm.hexagon.M2.mpy.sat.lh.s0",
+  "llvm.hexagon.M2.mpy.sat.lh.s1",
+  "llvm.hexagon.M2.mpy.sat.ll.s0",
+  "llvm.hexagon.M2.mpy.sat.ll.s1",
+  "llvm.hexagon.M2.mpy.sat.rnd.hh.s0",
+  "llvm.hexagon.M2.mpy.sat.rnd.hh.s1",
+  "llvm.hexagon.M2.mpy.sat.rnd.hl.s0",
+  "llvm.hexagon.M2.mpy.sat.rnd.hl.s1",
+  "llvm.hexagon.M2.mpy.sat.rnd.lh.s0",
+  "llvm.hexagon.M2.mpy.sat.rnd.lh.s1",
+  "llvm.hexagon.M2.mpy.sat.rnd.ll.s0",
+  "llvm.hexagon.M2.mpy.sat.rnd.ll.s1",
+  "llvm.hexagon.M2.mpy.up",
+  "llvm.hexagon.M2.mpy.up.s1",
+  "llvm.hexagon.M2.mpy.up.s1.sat",
+  "llvm.hexagon.M2.mpyd.acc.hh.s0",
+  "llvm.hexagon.M2.mpyd.acc.hh.s1",
+  "llvm.hexagon.M2.mpyd.acc.hl.s0",
+  "llvm.hexagon.M2.mpyd.acc.hl.s1",
+  "llvm.hexagon.M2.mpyd.acc.lh.s0",
+  "llvm.hexagon.M2.mpyd.acc.lh.s1",
+  "llvm.hexagon.M2.mpyd.acc.ll.s0",
+  "llvm.hexagon.M2.mpyd.acc.ll.s1",
+  "llvm.hexagon.M2.mpyd.hh.s0",
+  "llvm.hexagon.M2.mpyd.hh.s1",
+  "llvm.hexagon.M2.mpyd.hl.s0",
+  "llvm.hexagon.M2.mpyd.hl.s1",
+  "llvm.hexagon.M2.mpyd.lh.s0",
+  "llvm.hexagon.M2.mpyd.lh.s1",
+  "llvm.hexagon.M2.mpyd.ll.s0",
+  "llvm.hexagon.M2.mpyd.ll.s1",
+  "llvm.hexagon.M2.mpyd.nac.hh.s0",
+  "llvm.hexagon.M2.mpyd.nac.hh.s1",
+  "llvm.hexagon.M2.mpyd.nac.hl.s0",
+  "llvm.hexagon.M2.mpyd.nac.hl.s1",
+  "llvm.hexagon.M2.mpyd.nac.lh.s0",
+  "llvm.hexagon.M2.mpyd.nac.lh.s1",
+  "llvm.hexagon.M2.mpyd.nac.ll.s0",
+  "llvm.hexagon.M2.mpyd.nac.ll.s1",
+  "llvm.hexagon.M2.mpyd.rnd.hh.s0",
+  "llvm.hexagon.M2.mpyd.rnd.hh.s1",
+  "llvm.hexagon.M2.mpyd.rnd.hl.s0",
+  "llvm.hexagon.M2.mpyd.rnd.hl.s1",
+  "llvm.hexagon.M2.mpyd.rnd.lh.s0",
+  "llvm.hexagon.M2.mpyd.rnd.lh.s1",
+  "llvm.hexagon.M2.mpyd.rnd.ll.s0",
+  "llvm.hexagon.M2.mpyd.rnd.ll.s1",
+  "llvm.hexagon.M2.mpyi",
+  "llvm.hexagon.M2.mpysmi",
+  "llvm.hexagon.M2.mpysu.up",
+  "llvm.hexagon.M2.mpyu.acc.hh.s0",
+  "llvm.hexagon.M2.mpyu.acc.hh.s1",
+  "llvm.hexagon.M2.mpyu.acc.hl.s0",
+  "llvm.hexagon.M2.mpyu.acc.hl.s1",
+  "llvm.hexagon.M2.mpyu.acc.lh.s0",
+  "llvm.hexagon.M2.mpyu.acc.lh.s1",
+  "llvm.hexagon.M2.mpyu.acc.ll.s0",
+  "llvm.hexagon.M2.mpyu.acc.ll.s1",
+  "llvm.hexagon.M2.mpyu.hh.s0",
+  "llvm.hexagon.M2.mpyu.hh.s1",
+  "llvm.hexagon.M2.mpyu.hl.s0",
+  "llvm.hexagon.M2.mpyu.hl.s1",
+  "llvm.hexagon.M2.mpyu.lh.s0",
+  "llvm.hexagon.M2.mpyu.lh.s1",
+  "llvm.hexagon.M2.mpyu.ll.s0",
+  "llvm.hexagon.M2.mpyu.ll.s1",
+  "llvm.hexagon.M2.mpyu.nac.hh.s0",
+  "llvm.hexagon.M2.mpyu.nac.hh.s1",
+  "llvm.hexagon.M2.mpyu.nac.hl.s0",
+  "llvm.hexagon.M2.mpyu.nac.hl.s1",
+  "llvm.hexagon.M2.mpyu.nac.lh.s0",
+  "llvm.hexagon.M2.mpyu.nac.lh.s1",
+  "llvm.hexagon.M2.mpyu.nac.ll.s0",
+  "llvm.hexagon.M2.mpyu.nac.ll.s1",
+  "llvm.hexagon.M2.mpyu.up",
+  "llvm.hexagon.M2.mpyud.acc.hh.s0",
+  "llvm.hexagon.M2.mpyud.acc.hh.s1",
+  "llvm.hexagon.M2.mpyud.acc.hl.s0",
+  "llvm.hexagon.M2.mpyud.acc.hl.s1",
+  "llvm.hexagon.M2.mpyud.acc.lh.s0",
+  "llvm.hexagon.M2.mpyud.acc.lh.s1",
+  "llvm.hexagon.M2.mpyud.acc.ll.s0",
+  "llvm.hexagon.M2.mpyud.acc.ll.s1",
+  "llvm.hexagon.M2.mpyud.hh.s0",
+  "llvm.hexagon.M2.mpyud.hh.s1",
+  "llvm.hexagon.M2.mpyud.hl.s0",
+  "llvm.hexagon.M2.mpyud.hl.s1",
+  "llvm.hexagon.M2.mpyud.lh.s0",
+  "llvm.hexagon.M2.mpyud.lh.s1",
+  "llvm.hexagon.M2.mpyud.ll.s0",
+  "llvm.hexagon.M2.mpyud.ll.s1",
+  "llvm.hexagon.M2.mpyud.nac.hh.s0",
+  "llvm.hexagon.M2.mpyud.nac.hh.s1",
+  "llvm.hexagon.M2.mpyud.nac.hl.s0",
+  "llvm.hexagon.M2.mpyud.nac.hl.s1",
+  "llvm.hexagon.M2.mpyud.nac.lh.s0",
+  "llvm.hexagon.M2.mpyud.nac.lh.s1",
+  "llvm.hexagon.M2.mpyud.nac.ll.s0",
+  "llvm.hexagon.M2.mpyud.nac.ll.s1",
+  "llvm.hexagon.M2.mpyui",
+  "llvm.hexagon.M2.nacci",
+  "llvm.hexagon.M2.naccii",
+  "llvm.hexagon.M2.subacc",
+  "llvm.hexagon.M2.vabsdiffh",
+  "llvm.hexagon.M2.vabsdiffw",
+  "llvm.hexagon.M2.vcmac.s0.sat.i",
+  "llvm.hexagon.M2.vcmac.s0.sat.r",
+  "llvm.hexagon.M2.vcmpy.s0.sat.i",
+  "llvm.hexagon.M2.vcmpy.s0.sat.r",
+  "llvm.hexagon.M2.vcmpy.s1.sat.i",
+  "llvm.hexagon.M2.vcmpy.s1.sat.r",
+  "llvm.hexagon.M2.vdmacs.s0",
+  "llvm.hexagon.M2.vdmacs.s1",
+  "llvm.hexagon.M2.vdmpyrs.s0",
+  "llvm.hexagon.M2.vdmpyrs.s1",
+  "llvm.hexagon.M2.vdmpys.s0",
+  "llvm.hexagon.M2.vdmpys.s1",
+  "llvm.hexagon.M2.vmac2",
+  "llvm.hexagon.M2.vmac2es",
+  "llvm.hexagon.M2.vmac2es.s0",
+  "llvm.hexagon.M2.vmac2es.s1",
+  "llvm.hexagon.M2.vmac2s.s0",
+  "llvm.hexagon.M2.vmac2s.s1",
+  "llvm.hexagon.M2.vmac2su.s0",
+  "llvm.hexagon.M2.vmac2su.s1",
+  "llvm.hexagon.M2.vmpy2es.s0",
+  "llvm.hexagon.M2.vmpy2es.s1",
+  "llvm.hexagon.M2.vmpy2s.s0",
+  "llvm.hexagon.M2.vmpy2s.s0pack",
+  "llvm.hexagon.M2.vmpy2s.s1",
+  "llvm.hexagon.M2.vmpy2s.s1pack",
+  "llvm.hexagon.M2.vmpy2su.s0",
+  "llvm.hexagon.M2.vmpy2su.s1",
+  "llvm.hexagon.M2.vraddh",
+  "llvm.hexagon.M2.vradduh",
+  "llvm.hexagon.M2.vrcmaci.s0",
+  "llvm.hexagon.M2.vrcmaci.s0c",
+  "llvm.hexagon.M2.vrcmacr.s0",
+  "llvm.hexagon.M2.vrcmacr.s0c",
+  "llvm.hexagon.M2.vrcmpyi.s0",
+  "llvm.hexagon.M2.vrcmpyi.s0c",
+  "llvm.hexagon.M2.vrcmpyr.s0",
+  "llvm.hexagon.M2.vrcmpyr.s0c",
+  "llvm.hexagon.M2.vrcmpys.acc.s1",
+  "llvm.hexagon.M2.vrcmpys.s1",
+  "llvm.hexagon.M2.vrcmpys.s1rp",
+  "llvm.hexagon.M2.vrmac.s0",
+  "llvm.hexagon.M2.vrmpy.s0",
+  "llvm.hexagon.M2.xor.xacc",
+  "llvm.hexagon.M4.and.and",
+  "llvm.hexagon.M4.and.andn",
+  "llvm.hexagon.M4.and.or",
+  "llvm.hexagon.M4.and.xor",
+  "llvm.hexagon.M4.cmpyi.wh",
+  "llvm.hexagon.M4.cmpyi.whc",
+  "llvm.hexagon.M4.cmpyr.wh",
+  "llvm.hexagon.M4.cmpyr.whc",
+  "llvm.hexagon.M4.mac.up.s1.sat",
+  "llvm.hexagon.M4.mpyri.addi",
+  "llvm.hexagon.M4.mpyri.addr",
+  "llvm.hexagon.M4.mpyri.addr.u2",
+  "llvm.hexagon.M4.mpyrr.addi",
+  "llvm.hexagon.M4.mpyrr.addr",
+  "llvm.hexagon.M4.nac.up.s1.sat",
+  "llvm.hexagon.M4.or.and",
+  "llvm.hexagon.M4.or.andn",
+  "llvm.hexagon.M4.or.or",
+  "llvm.hexagon.M4.or.xor",
+  "llvm.hexagon.M4.pmpyw",
+  "llvm.hexagon.M4.pmpyw.acc",
+  "llvm.hexagon.M4.vpmpyh",
+  "llvm.hexagon.M4.vpmpyh.acc",
+  "llvm.hexagon.M4.vrmpyeh.acc.s0",
+  "llvm.hexagon.M4.vrmpyeh.acc.s1",
+  "llvm.hexagon.M4.vrmpyeh.s0",
+  "llvm.hexagon.M4.vrmpyeh.s1",
+  "llvm.hexagon.M4.vrmpyoh.acc.s0",
+  "llvm.hexagon.M4.vrmpyoh.acc.s1",
+  "llvm.hexagon.M4.vrmpyoh.s0",
+  "llvm.hexagon.M4.vrmpyoh.s1",
+  "llvm.hexagon.M4.xor.and",
+  "llvm.hexagon.M4.xor.andn",
+  "llvm.hexagon.M4.xor.or",
+  "llvm.hexagon.M4.xor.xacc",
+  "llvm.hexagon.M5.vdmacbsu",
+  "llvm.hexagon.M5.vdmpybsu",
+  "llvm.hexagon.M5.vmacbsu",
+  "llvm.hexagon.M5.vmacbuu",
+  "llvm.hexagon.M5.vmpybsu",
+  "llvm.hexagon.M5.vmpybuu",
+  "llvm.hexagon.M5.vrmacbsu",
+  "llvm.hexagon.M5.vrmacbuu",
+  "llvm.hexagon.M5.vrmpybsu",
+  "llvm.hexagon.M5.vrmpybuu",
+  "llvm.hexagon.M6.vabsdiffb",
+  "llvm.hexagon.M6.vabsdiffub",
+  "llvm.hexagon.S2.addasl.rrri",
+  "llvm.hexagon.S2.asl.i.p",
+  "llvm.hexagon.S2.asl.i.p.acc",
+  "llvm.hexagon.S2.asl.i.p.and",
+  "llvm.hexagon.S2.asl.i.p.nac",
+  "llvm.hexagon.S2.asl.i.p.or",
+  "llvm.hexagon.S2.asl.i.p.xacc",
+  "llvm.hexagon.S2.asl.i.r",
+  "llvm.hexagon.S2.asl.i.r.acc",
+  "llvm.hexagon.S2.asl.i.r.and",
+  "llvm.hexagon.S2.asl.i.r.nac",
+  "llvm.hexagon.S2.asl.i.r.or",
+  "llvm.hexagon.S2.asl.i.r.sat",
+  "llvm.hexagon.S2.asl.i.r.xacc",
+  "llvm.hexagon.S2.asl.i.vh",
+  "llvm.hexagon.S2.asl.i.vw",
+  "llvm.hexagon.S2.asl.r.p",
+  "llvm.hexagon.S2.asl.r.p.acc",
+  "llvm.hexagon.S2.asl.r.p.and",
+  "llvm.hexagon.S2.asl.r.p.nac",
+  "llvm.hexagon.S2.asl.r.p.or",
+  "llvm.hexagon.S2.asl.r.p.xor",
+  "llvm.hexagon.S2.asl.r.r",
+  "llvm.hexagon.S2.asl.r.r.acc",
+  "llvm.hexagon.S2.asl.r.r.and",
+  "llvm.hexagon.S2.asl.r.r.nac",
+  "llvm.hexagon.S2.asl.r.r.or",
+  "llvm.hexagon.S2.asl.r.r.sat",
+  "llvm.hexagon.S2.asl.r.vh",
+  "llvm.hexagon.S2.asl.r.vw",
+  "llvm.hexagon.S2.asr.i.p",
+  "llvm.hexagon.S2.asr.i.p.acc",
+  "llvm.hexagon.S2.asr.i.p.and",
+  "llvm.hexagon.S2.asr.i.p.nac",
+  "llvm.hexagon.S2.asr.i.p.or",
+  "llvm.hexagon.S2.asr.i.p.rnd",
+  "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax",
+  "llvm.hexagon.S2.asr.i.r",
+  "llvm.hexagon.S2.asr.i.r.acc",
+  "llvm.hexagon.S2.asr.i.r.and",
+  "llvm.hexagon.S2.asr.i.r.nac",
+  "llvm.hexagon.S2.asr.i.r.or",
+  "llvm.hexagon.S2.asr.i.r.rnd",
+  "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax",
+  "llvm.hexagon.S2.asr.i.svw.trun",
+  "llvm.hexagon.S2.asr.i.vh",
+  "llvm.hexagon.S2.asr.i.vw",
+  "llvm.hexagon.S2.asr.r.p",
+  "llvm.hexagon.S2.asr.r.p.acc",
+  "llvm.hexagon.S2.asr.r.p.and",
+  "llvm.hexagon.S2.asr.r.p.nac",
+  "llvm.hexagon.S2.asr.r.p.or",
+  "llvm.hexagon.S2.asr.r.p.xor",
+  "llvm.hexagon.S2.asr.r.r",
+  "llvm.hexagon.S2.asr.r.r.acc",
+  "llvm.hexagon.S2.asr.r.r.and",
+  "llvm.hexagon.S2.asr.r.r.nac",
+  "llvm.hexagon.S2.asr.r.r.or",
+  "llvm.hexagon.S2.asr.r.r.sat",
+  "llvm.hexagon.S2.asr.r.svw.trun",
+  "llvm.hexagon.S2.asr.r.vh",
+  "llvm.hexagon.S2.asr.r.vw",
+  "llvm.hexagon.S2.brev",
+  "llvm.hexagon.S2.brevp",
+  "llvm.hexagon.S2.cabacencbin",
+  "llvm.hexagon.S2.cl0",
+  "llvm.hexagon.S2.cl0p",
+  "llvm.hexagon.S2.cl1",
+  "llvm.hexagon.S2.cl1p",
+  "llvm.hexagon.S2.clb",
+  "llvm.hexagon.S2.clbnorm",
+  "llvm.hexagon.S2.clbp",
+  "llvm.hexagon.S2.clrbit.i",
+  "llvm.hexagon.S2.clrbit.r",
+  "llvm.hexagon.S2.ct0",
+  "llvm.hexagon.S2.ct0p",
+  "llvm.hexagon.S2.ct1",
+  "llvm.hexagon.S2.ct1p",
+  "llvm.hexagon.S2.deinterleave",
+  "llvm.hexagon.S2.extractu",
+  "llvm.hexagon.S2.extractu.rp",
+  "llvm.hexagon.S2.extractup",
+  "llvm.hexagon.S2.extractup.rp",
+  "llvm.hexagon.S2.insert",
+  "llvm.hexagon.S2.insert.rp",
+  "llvm.hexagon.S2.insertp",
+  "llvm.hexagon.S2.insertp.rp",
+  "llvm.hexagon.S2.interleave",
+  "llvm.hexagon.S2.lfsp",
+  "llvm.hexagon.S2.lsl.r.p",
+  "llvm.hexagon.S2.lsl.r.p.acc",
+  "llvm.hexagon.S2.lsl.r.p.and",
+  "llvm.hexagon.S2.lsl.r.p.nac",
+  "llvm.hexagon.S2.lsl.r.p.or",
+  "llvm.hexagon.S2.lsl.r.p.xor",
+  "llvm.hexagon.S2.lsl.r.r",
+  "llvm.hexagon.S2.lsl.r.r.acc",
+  "llvm.hexagon.S2.lsl.r.r.and",
+  "llvm.hexagon.S2.lsl.r.r.nac",
+  "llvm.hexagon.S2.lsl.r.r.or",
+  "llvm.hexagon.S2.lsl.r.vh",
+  "llvm.hexagon.S2.lsl.r.vw",
+  "llvm.hexagon.S2.lsr.i.p",
+  "llvm.hexagon.S2.lsr.i.p.acc",
+  "llvm.hexagon.S2.lsr.i.p.and",
+  "llvm.hexagon.S2.lsr.i.p.nac",
+  "llvm.hexagon.S2.lsr.i.p.or",
+  "llvm.hexagon.S2.lsr.i.p.xacc",
+  "llvm.hexagon.S2.lsr.i.r",
+  "llvm.hexagon.S2.lsr.i.r.acc",
+  "llvm.hexagon.S2.lsr.i.r.and",
+  "llvm.hexagon.S2.lsr.i.r.nac",
+  "llvm.hexagon.S2.lsr.i.r.or",
+  "llvm.hexagon.S2.lsr.i.r.xacc",
+  "llvm.hexagon.S2.lsr.i.vh",
+  "llvm.hexagon.S2.lsr.i.vw",
+  "llvm.hexagon.S2.lsr.r.p",
+  "llvm.hexagon.S2.lsr.r.p.acc",
+  "llvm.hexagon.S2.lsr.r.p.and",
+  "llvm.hexagon.S2.lsr.r.p.nac",
+  "llvm.hexagon.S2.lsr.r.p.or",
+  "llvm.hexagon.S2.lsr.r.p.xor",
+  "llvm.hexagon.S2.lsr.r.r",
+  "llvm.hexagon.S2.lsr.r.r.acc",
+  "llvm.hexagon.S2.lsr.r.r.and",
+  "llvm.hexagon.S2.lsr.r.r.nac",
+  "llvm.hexagon.S2.lsr.r.r.or",
+  "llvm.hexagon.S2.lsr.r.vh",
+  "llvm.hexagon.S2.lsr.r.vw",
+  "llvm.hexagon.S2.packhl",
+  "llvm.hexagon.S2.parityp",
+  "llvm.hexagon.S2.setbit.i",
+  "llvm.hexagon.S2.setbit.r",
+  "llvm.hexagon.S2.shuffeb",
+  "llvm.hexagon.S2.shuffeh",
+  "llvm.hexagon.S2.shuffob",
+  "llvm.hexagon.S2.shuffoh",
+  "llvm.hexagon.S2.storerb.pbr",
+  "llvm.hexagon.S2.storerb.pci",
+  "llvm.hexagon.S2.storerb.pcr",
+  "llvm.hexagon.S2.storerd.pbr",
+  "llvm.hexagon.S2.storerd.pci",
+  "llvm.hexagon.S2.storerd.pcr",
+  "llvm.hexagon.S2.storerf.pbr",
+  "llvm.hexagon.S2.storerf.pci",
+  "llvm.hexagon.S2.storerf.pcr",
+  "llvm.hexagon.S2.storerh.pbr",
+  "llvm.hexagon.S2.storerh.pci",
+  "llvm.hexagon.S2.storerh.pcr",
+  "llvm.hexagon.S2.storeri.pbr",
+  "llvm.hexagon.S2.storeri.pci",
+  "llvm.hexagon.S2.storeri.pcr",
+  "llvm.hexagon.S2.storew.locked",
+  "llvm.hexagon.S2.svsathb",
+  "llvm.hexagon.S2.svsathub",
+  "llvm.hexagon.S2.tableidxb.goodsyntax",
+  "llvm.hexagon.S2.tableidxd.goodsyntax",
+  "llvm.hexagon.S2.tableidxh.goodsyntax",
+  "llvm.hexagon.S2.tableidxw.goodsyntax",
+  "llvm.hexagon.S2.togglebit.i",
+  "llvm.hexagon.S2.togglebit.r",
+  "llvm.hexagon.S2.tstbit.i",
+  "llvm.hexagon.S2.tstbit.r",
+  "llvm.hexagon.S2.valignib",
+  "llvm.hexagon.S2.valignrb",
+  "llvm.hexagon.S2.vcnegh",
+  "llvm.hexagon.S2.vcrotate",
+  "llvm.hexagon.S2.vrcnegh",
+  "llvm.hexagon.S2.vrndpackwh",
+  "llvm.hexagon.S2.vrndpackwhs",
+  "llvm.hexagon.S2.vsathb",
+  "llvm.hexagon.S2.vsathb.nopack",
+  "llvm.hexagon.S2.vsathub",
+  "llvm.hexagon.S2.vsathub.nopack",
+  "llvm.hexagon.S2.vsatwh",
+  "llvm.hexagon.S2.vsatwh.nopack",
+  "llvm.hexagon.S2.vsatwuh",
+  "llvm.hexagon.S2.vsatwuh.nopack",
+  "llvm.hexagon.S2.vsplatrb",
+  "llvm.hexagon.S2.vsplatrh",
+  "llvm.hexagon.S2.vspliceib",
+  "llvm.hexagon.S2.vsplicerb",
+  "llvm.hexagon.S2.vsxtbh",
+  "llvm.hexagon.S2.vsxthw",
+  "llvm.hexagon.S2.vtrunehb",
+  "llvm.hexagon.S2.vtrunewh",
+  "llvm.hexagon.S2.vtrunohb",
+  "llvm.hexagon.S2.vtrunowh",
+  "llvm.hexagon.S2.vzxtbh",
+  "llvm.hexagon.S2.vzxthw",
+  "llvm.hexagon.S4.addaddi",
+  "llvm.hexagon.S4.addi.asl.ri",
+  "llvm.hexagon.S4.addi.lsr.ri",
+  "llvm.hexagon.S4.andi.asl.ri",
+  "llvm.hexagon.S4.andi.lsr.ri",
+  "llvm.hexagon.S4.clbaddi",
+  "llvm.hexagon.S4.clbpaddi",
+  "llvm.hexagon.S4.clbpnorm",
+  "llvm.hexagon.S4.extract",
+  "llvm.hexagon.S4.extract.rp",
+  "llvm.hexagon.S4.extractp",
+  "llvm.hexagon.S4.extractp.rp",
+  "llvm.hexagon.S4.lsli",
+  "llvm.hexagon.S4.ntstbit.i",
+  "llvm.hexagon.S4.ntstbit.r",
+  "llvm.hexagon.S4.or.andi",
+  "llvm.hexagon.S4.or.andix",
+  "llvm.hexagon.S4.or.ori",
+  "llvm.hexagon.S4.ori.asl.ri",
+  "llvm.hexagon.S4.ori.lsr.ri",
+  "llvm.hexagon.S4.parity",
+  "llvm.hexagon.S4.stored.locked",
+  "llvm.hexagon.S4.subaddi",
+  "llvm.hexagon.S4.subi.asl.ri",
+  "llvm.hexagon.S4.subi.lsr.ri",
+  "llvm.hexagon.S4.vrcrotate",
+  "llvm.hexagon.S4.vrcrotate.acc",
+  "llvm.hexagon.S4.vxaddsubh",
+  "llvm.hexagon.S4.vxaddsubhr",
+  "llvm.hexagon.S4.vxaddsubw",
+  "llvm.hexagon.S4.vxsubaddh",
+  "llvm.hexagon.S4.vxsubaddhr",
+  "llvm.hexagon.S4.vxsubaddw",
+  "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax",
+  "llvm.hexagon.S5.asrhub.sat",
+  "llvm.hexagon.S5.popcountp",
+  "llvm.hexagon.S5.vasrhrnd.goodsyntax",
+  "llvm.hexagon.S6.rol.i.p",
+  "llvm.hexagon.S6.rol.i.p.acc",
+  "llvm.hexagon.S6.rol.i.p.and",
+  "llvm.hexagon.S6.rol.i.p.nac",
+  "llvm.hexagon.S6.rol.i.p.or",
+  "llvm.hexagon.S6.rol.i.p.xacc",
+  "llvm.hexagon.S6.rol.i.r",
+  "llvm.hexagon.S6.rol.i.r.acc",
+  "llvm.hexagon.S6.rol.i.r.and",
+  "llvm.hexagon.S6.rol.i.r.nac",
+  "llvm.hexagon.S6.rol.i.r.or",
+  "llvm.hexagon.S6.rol.i.r.xacc",
+  "llvm.hexagon.S6.vsplatrbp",
+  "llvm.hexagon.S6.vtrunehb.ppp",
+  "llvm.hexagon.S6.vtrunohb.ppp",
+  "llvm.hexagon.V6.extractw",
+  "llvm.hexagon.V6.extractw.128B",
+  "llvm.hexagon.V6.hi",
+  "llvm.hexagon.V6.hi.128B",
+  "llvm.hexagon.V6.lo",
+  "llvm.hexagon.V6.lo.128B",
+  "llvm.hexagon.V6.lvsplatb",
+  "llvm.hexagon.V6.lvsplatb.128B",
+  "llvm.hexagon.V6.lvsplath",
+  "llvm.hexagon.V6.lvsplath.128B",
+  "llvm.hexagon.V6.lvsplatw",
+  "llvm.hexagon.V6.lvsplatw.128B",
+  "llvm.hexagon.V6.pred.and",
+  "llvm.hexagon.V6.pred.and.128B",
+  "llvm.hexagon.V6.pred.and.n",
+  "llvm.hexagon.V6.pred.and.n.128B",
+  "llvm.hexagon.V6.pred.not",
+  "llvm.hexagon.V6.pred.not.128B",
+  "llvm.hexagon.V6.pred.or",
+  "llvm.hexagon.V6.pred.or.128B",
+  "llvm.hexagon.V6.pred.or.n",
+  "llvm.hexagon.V6.pred.or.n.128B",
+  "llvm.hexagon.V6.pred.scalar2",
+  "llvm.hexagon.V6.pred.scalar2.128B",
+  "llvm.hexagon.V6.pred.scalar2v2",
+  "llvm.hexagon.V6.pred.scalar2v2.128B",
+  "llvm.hexagon.V6.pred.xor",
+  "llvm.hexagon.V6.pred.xor.128B",
+  "llvm.hexagon.V6.shuffeqh",
+  "llvm.hexagon.V6.shuffeqh.128B",
+  "llvm.hexagon.V6.shuffeqw",
+  "llvm.hexagon.V6.shuffeqw.128B",
+  "llvm.hexagon.V6.vS32b.nqpred.ai",
+  "llvm.hexagon.V6.vS32b.nqpred.ai.128B",
+  "llvm.hexagon.V6.vS32b.nt.nqpred.ai",
+  "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B",
+  "llvm.hexagon.V6.vS32b.nt.qpred.ai",
+  "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B",
+  "llvm.hexagon.V6.vS32b.qpred.ai",
+  "llvm.hexagon.V6.vS32b.qpred.ai.128B",
+  "llvm.hexagon.V6.vabsb",
+  "llvm.hexagon.V6.vabsb.128B",
+  "llvm.hexagon.V6.vabsb.sat",
+  "llvm.hexagon.V6.vabsb.sat.128B",
+  "llvm.hexagon.V6.vabsdiffh",
+  "llvm.hexagon.V6.vabsdiffh.128B",
+  "llvm.hexagon.V6.vabsdiffub",
+  "llvm.hexagon.V6.vabsdiffub.128B",
+  "llvm.hexagon.V6.vabsdiffuh",
+  "llvm.hexagon.V6.vabsdiffuh.128B",
+  "llvm.hexagon.V6.vabsdiffw",
+  "llvm.hexagon.V6.vabsdiffw.128B",
+  "llvm.hexagon.V6.vabsh",
+  "llvm.hexagon.V6.vabsh.128B",
+  "llvm.hexagon.V6.vabsh.sat",
+  "llvm.hexagon.V6.vabsh.sat.128B",
+  "llvm.hexagon.V6.vabsw",
+  "llvm.hexagon.V6.vabsw.128B",
+  "llvm.hexagon.V6.vabsw.sat",
+  "llvm.hexagon.V6.vabsw.sat.128B",
+  "llvm.hexagon.V6.vaddb",
+  "llvm.hexagon.V6.vaddb.128B",
+  "llvm.hexagon.V6.vaddb.dv",
+  "llvm.hexagon.V6.vaddb.dv.128B",
+  "llvm.hexagon.V6.vaddbnq",
+  "llvm.hexagon.V6.vaddbnq.128B",
+  "llvm.hexagon.V6.vaddbq",
+  "llvm.hexagon.V6.vaddbq.128B",
+  "llvm.hexagon.V6.vaddbsat",
+  "llvm.hexagon.V6.vaddbsat.128B",
+  "llvm.hexagon.V6.vaddbsat.dv",
+  "llvm.hexagon.V6.vaddbsat.dv.128B",
+  "llvm.hexagon.V6.vaddcarry",
+  "llvm.hexagon.V6.vaddcarry.128B",
+  "llvm.hexagon.V6.vaddclbh",
+  "llvm.hexagon.V6.vaddclbh.128B",
+  "llvm.hexagon.V6.vaddclbw",
+  "llvm.hexagon.V6.vaddclbw.128B",
+  "llvm.hexagon.V6.vaddh",
+  "llvm.hexagon.V6.vaddh.128B",
+  "llvm.hexagon.V6.vaddh.dv",
+  "llvm.hexagon.V6.vaddh.dv.128B",
+  "llvm.hexagon.V6.vaddhnq",
+  "llvm.hexagon.V6.vaddhnq.128B",
+  "llvm.hexagon.V6.vaddhq",
+  "llvm.hexagon.V6.vaddhq.128B",
+  "llvm.hexagon.V6.vaddhsat",
+  "llvm.hexagon.V6.vaddhsat.128B",
+  "llvm.hexagon.V6.vaddhsat.dv",
+  "llvm.hexagon.V6.vaddhsat.dv.128B",
+  "llvm.hexagon.V6.vaddhw",
+  "llvm.hexagon.V6.vaddhw.128B",
+  "llvm.hexagon.V6.vaddhw.acc",
+  "llvm.hexagon.V6.vaddhw.acc.128B",
+  "llvm.hexagon.V6.vaddubh",
+  "llvm.hexagon.V6.vaddubh.128B",
+  "llvm.hexagon.V6.vaddubh.acc",
+  "llvm.hexagon.V6.vaddubh.acc.128B",
+  "llvm.hexagon.V6.vaddubsat",
+  "llvm.hexagon.V6.vaddubsat.128B",
+  "llvm.hexagon.V6.vaddubsat.dv",
+  "llvm.hexagon.V6.vaddubsat.dv.128B",
+  "llvm.hexagon.V6.vaddububb.sat",
+  "llvm.hexagon.V6.vaddububb.sat.128B",
+  "llvm.hexagon.V6.vadduhsat",
+  "llvm.hexagon.V6.vadduhsat.128B",
+  "llvm.hexagon.V6.vadduhsat.dv",
+  "llvm.hexagon.V6.vadduhsat.dv.128B",
+  "llvm.hexagon.V6.vadduhw",
+  "llvm.hexagon.V6.vadduhw.128B",
+  "llvm.hexagon.V6.vadduhw.acc",
+  "llvm.hexagon.V6.vadduhw.acc.128B",
+  "llvm.hexagon.V6.vadduwsat",
+  "llvm.hexagon.V6.vadduwsat.128B",
+  "llvm.hexagon.V6.vadduwsat.dv",
+  "llvm.hexagon.V6.vadduwsat.dv.128B",
+  "llvm.hexagon.V6.vaddw",
+  "llvm.hexagon.V6.vaddw.128B",
+  "llvm.hexagon.V6.vaddw.dv",
+  "llvm.hexagon.V6.vaddw.dv.128B",
+  "llvm.hexagon.V6.vaddwnq",
+  "llvm.hexagon.V6.vaddwnq.128B",
+  "llvm.hexagon.V6.vaddwq",
+  "llvm.hexagon.V6.vaddwq.128B",
+  "llvm.hexagon.V6.vaddwsat",
+  "llvm.hexagon.V6.vaddwsat.128B",
+  "llvm.hexagon.V6.vaddwsat.dv",
+  "llvm.hexagon.V6.vaddwsat.dv.128B",
+  "llvm.hexagon.V6.valignb",
+  "llvm.hexagon.V6.valignb.128B",
+  "llvm.hexagon.V6.valignbi",
+  "llvm.hexagon.V6.valignbi.128B",
+  "llvm.hexagon.V6.vand",
+  "llvm.hexagon.V6.vand.128B",
+  "llvm.hexagon.V6.vandnqrt",
+  "llvm.hexagon.V6.vandnqrt.128B",
+  "llvm.hexagon.V6.vandnqrt.acc",
+  "llvm.hexagon.V6.vandnqrt.acc.128B",
+  "llvm.hexagon.V6.vandqrt",
+  "llvm.hexagon.V6.vandqrt.128B",
+  "llvm.hexagon.V6.vandqrt.acc",
+  "llvm.hexagon.V6.vandqrt.acc.128B",
+  "llvm.hexagon.V6.vandvnqv",
+  "llvm.hexagon.V6.vandvnqv.128B",
+  "llvm.hexagon.V6.vandvqv",
+  "llvm.hexagon.V6.vandvqv.128B",
+  "llvm.hexagon.V6.vandvrt",
+  "llvm.hexagon.V6.vandvrt.128B",
+  "llvm.hexagon.V6.vandvrt.acc",
+  "llvm.hexagon.V6.vandvrt.acc.128B",
+  "llvm.hexagon.V6.vaslh",
+  "llvm.hexagon.V6.vaslh.128B",
+  "llvm.hexagon.V6.vaslh.acc",
+  "llvm.hexagon.V6.vaslh.acc.128B",
+  "llvm.hexagon.V6.vaslhv",
+  "llvm.hexagon.V6.vaslhv.128B",
+  "llvm.hexagon.V6.vaslw",
+  "llvm.hexagon.V6.vaslw.128B",
+  "llvm.hexagon.V6.vaslw.acc",
+  "llvm.hexagon.V6.vaslw.acc.128B",
+  "llvm.hexagon.V6.vaslwv",
+  "llvm.hexagon.V6.vaslwv.128B",
+  "llvm.hexagon.V6.vasrh",
+  "llvm.hexagon.V6.vasrh.128B",
+  "llvm.hexagon.V6.vasrh.acc",
+  "llvm.hexagon.V6.vasrh.acc.128B",
+  "llvm.hexagon.V6.vasrhbrndsat",
+  "llvm.hexagon.V6.vasrhbrndsat.128B",
+  "llvm.hexagon.V6.vasrhbsat",
+  "llvm.hexagon.V6.vasrhbsat.128B",
+  "llvm.hexagon.V6.vasrhubrndsat",
+  "llvm.hexagon.V6.vasrhubrndsat.128B",
+  "llvm.hexagon.V6.vasrhubsat",
+  "llvm.hexagon.V6.vasrhubsat.128B",
+  "llvm.hexagon.V6.vasrhv",
+  "llvm.hexagon.V6.vasrhv.128B",
+  "llvm.hexagon.V6.vasruhubrndsat",
+  "llvm.hexagon.V6.vasruhubrndsat.128B",
+  "llvm.hexagon.V6.vasruhubsat",
+  "llvm.hexagon.V6.vasruhubsat.128B",
+  "llvm.hexagon.V6.vasruwuhrndsat",
+  "llvm.hexagon.V6.vasruwuhrndsat.128B",
+  "llvm.hexagon.V6.vasruwuhsat",
+  "llvm.hexagon.V6.vasruwuhsat.128B",
+  "llvm.hexagon.V6.vasrw",
+  "llvm.hexagon.V6.vasrw.128B",
+  "llvm.hexagon.V6.vasrw.acc",
+  "llvm.hexagon.V6.vasrw.acc.128B",
+  "llvm.hexagon.V6.vasrwh",
+  "llvm.hexagon.V6.vasrwh.128B",
+  "llvm.hexagon.V6.vasrwhrndsat",
+  "llvm.hexagon.V6.vasrwhrndsat.128B",
+  "llvm.hexagon.V6.vasrwhsat",
+  "llvm.hexagon.V6.vasrwhsat.128B",
+  "llvm.hexagon.V6.vasrwuhrndsat",
+  "llvm.hexagon.V6.vasrwuhrndsat.128B",
+  "llvm.hexagon.V6.vasrwuhsat",
+  "llvm.hexagon.V6.vasrwuhsat.128B",
+  "llvm.hexagon.V6.vasrwv",
+  "llvm.hexagon.V6.vasrwv.128B",
+  "llvm.hexagon.V6.vassign",
+  "llvm.hexagon.V6.vassign.128B",
+  "llvm.hexagon.V6.vassignp",
+  "llvm.hexagon.V6.vassignp.128B",
+  "llvm.hexagon.V6.vavgb",
+  "llvm.hexagon.V6.vavgb.128B",
+  "llvm.hexagon.V6.vavgbrnd",
+  "llvm.hexagon.V6.vavgbrnd.128B",
+  "llvm.hexagon.V6.vavgh",
+  "llvm.hexagon.V6.vavgh.128B",
+  "llvm.hexagon.V6.vavghrnd",
+  "llvm.hexagon.V6.vavghrnd.128B",
+  "llvm.hexagon.V6.vavgub",
+  "llvm.hexagon.V6.vavgub.128B",
+  "llvm.hexagon.V6.vavgubrnd",
+  "llvm.hexagon.V6.vavgubrnd.128B",
+  "llvm.hexagon.V6.vavguh",
+  "llvm.hexagon.V6.vavguh.128B",
+  "llvm.hexagon.V6.vavguhrnd",
+  "llvm.hexagon.V6.vavguhrnd.128B",
+  "llvm.hexagon.V6.vavguw",
+  "llvm.hexagon.V6.vavguw.128B",
+  "llvm.hexagon.V6.vavguwrnd",
+  "llvm.hexagon.V6.vavguwrnd.128B",
+  "llvm.hexagon.V6.vavgw",
+  "llvm.hexagon.V6.vavgw.128B",
+  "llvm.hexagon.V6.vavgwrnd",
+  "llvm.hexagon.V6.vavgwrnd.128B",
+  "llvm.hexagon.V6.vcl0h",
+  "llvm.hexagon.V6.vcl0h.128B",
+  "llvm.hexagon.V6.vcl0w",
+  "llvm.hexagon.V6.vcl0w.128B",
+  "llvm.hexagon.V6.vcombine",
+  "llvm.hexagon.V6.vcombine.128B",
+  "llvm.hexagon.V6.vd0",
+  "llvm.hexagon.V6.vd0.128B",
+  "llvm.hexagon.V6.vdd0",
+  "llvm.hexagon.V6.vdd0.128B",
+  "llvm.hexagon.V6.vdealb",
+  "llvm.hexagon.V6.vdealb.128B",
+  "llvm.hexagon.V6.vdealb4w",
+  "llvm.hexagon.V6.vdealb4w.128B",
+  "llvm.hexagon.V6.vdealh",
+  "llvm.hexagon.V6.vdealh.128B",
+  "llvm.hexagon.V6.vdealvdd",
+  "llvm.hexagon.V6.vdealvdd.128B",
+  "llvm.hexagon.V6.vdelta",
+  "llvm.hexagon.V6.vdelta.128B",
+  "llvm.hexagon.V6.vdmpybus",
+  "llvm.hexagon.V6.vdmpybus.128B",
+  "llvm.hexagon.V6.vdmpybus.acc",
+  "llvm.hexagon.V6.vdmpybus.acc.128B",
+  "llvm.hexagon.V6.vdmpybus.dv",
+  "llvm.hexagon.V6.vdmpybus.dv.128B",
+  "llvm.hexagon.V6.vdmpybus.dv.acc",
+  "llvm.hexagon.V6.vdmpybus.dv.acc.128B",
+  "llvm.hexagon.V6.vdmpyhb",
+  "llvm.hexagon.V6.vdmpyhb.128B",
+  "llvm.hexagon.V6.vdmpyhb.acc",
+  "llvm.hexagon.V6.vdmpyhb.acc.128B",
+  "llvm.hexagon.V6.vdmpyhb.dv",
+  "llvm.hexagon.V6.vdmpyhb.dv.128B",
+  "llvm.hexagon.V6.vdmpyhb.dv.acc",
+  "llvm.hexagon.V6.vdmpyhb.dv.acc.128B",
+  "llvm.hexagon.V6.vdmpyhisat",
+  "llvm.hexagon.V6.vdmpyhisat.128B",
+  "llvm.hexagon.V6.vdmpyhisat.acc",
+  "llvm.hexagon.V6.vdmpyhisat.acc.128B",
+  "llvm.hexagon.V6.vdmpyhsat",
+  "llvm.hexagon.V6.vdmpyhsat.128B",
+  "llvm.hexagon.V6.vdmpyhsat.acc",
+  "llvm.hexagon.V6.vdmpyhsat.acc.128B",
+  "llvm.hexagon.V6.vdmpyhsuisat",
+  "llvm.hexagon.V6.vdmpyhsuisat.128B",
+  "llvm.hexagon.V6.vdmpyhsuisat.acc",
+  "llvm.hexagon.V6.vdmpyhsuisat.acc.128B",
+  "llvm.hexagon.V6.vdmpyhsusat",
+  "llvm.hexagon.V6.vdmpyhsusat.128B",
+  "llvm.hexagon.V6.vdmpyhsusat.acc",
+  "llvm.hexagon.V6.vdmpyhsusat.acc.128B",
+  "llvm.hexagon.V6.vdmpyhvsat",
+  "llvm.hexagon.V6.vdmpyhvsat.128B",
+  "llvm.hexagon.V6.vdmpyhvsat.acc",
+  "llvm.hexagon.V6.vdmpyhvsat.acc.128B",
+  "llvm.hexagon.V6.vdsaduh",
+  "llvm.hexagon.V6.vdsaduh.128B",
+  "llvm.hexagon.V6.vdsaduh.acc",
+  "llvm.hexagon.V6.vdsaduh.acc.128B",
+  "llvm.hexagon.V6.veqb",
+  "llvm.hexagon.V6.veqb.128B",
+  "llvm.hexagon.V6.veqb.and",
+  "llvm.hexagon.V6.veqb.and.128B",
+  "llvm.hexagon.V6.veqb.or",
+  "llvm.hexagon.V6.veqb.or.128B",
+  "llvm.hexagon.V6.veqb.xor",
+  "llvm.hexagon.V6.veqb.xor.128B",
+  "llvm.hexagon.V6.veqh",
+  "llvm.hexagon.V6.veqh.128B",
+  "llvm.hexagon.V6.veqh.and",
+  "llvm.hexagon.V6.veqh.and.128B",
+  "llvm.hexagon.V6.veqh.or",
+  "llvm.hexagon.V6.veqh.or.128B",
+  "llvm.hexagon.V6.veqh.xor",
+  "llvm.hexagon.V6.veqh.xor.128B",
+  "llvm.hexagon.V6.veqw",
+  "llvm.hexagon.V6.veqw.128B",
+  "llvm.hexagon.V6.veqw.and",
+  "llvm.hexagon.V6.veqw.and.128B",
+  "llvm.hexagon.V6.veqw.or",
+  "llvm.hexagon.V6.veqw.or.128B",
+  "llvm.hexagon.V6.veqw.xor",
+  "llvm.hexagon.V6.veqw.xor.128B",
+  "llvm.hexagon.V6.vgathermh",
+  "llvm.hexagon.V6.vgathermh.128B",
+  "llvm.hexagon.V6.vgathermhq",
+  "llvm.hexagon.V6.vgathermhq.128B",
+  "llvm.hexagon.V6.vgathermhw",
+  "llvm.hexagon.V6.vgathermhw.128B",
+  "llvm.hexagon.V6.vgathermhwq",
+  "llvm.hexagon.V6.vgathermhwq.128B",
+  "llvm.hexagon.V6.vgathermw",
+  "llvm.hexagon.V6.vgathermw.128B",
+  "llvm.hexagon.V6.vgathermwq",
+  "llvm.hexagon.V6.vgathermwq.128B",
+  "llvm.hexagon.V6.vgtb",
+  "llvm.hexagon.V6.vgtb.128B",
+  "llvm.hexagon.V6.vgtb.and",
+  "llvm.hexagon.V6.vgtb.and.128B",
+  "llvm.hexagon.V6.vgtb.or",
+  "llvm.hexagon.V6.vgtb.or.128B",
+  "llvm.hexagon.V6.vgtb.xor",
+  "llvm.hexagon.V6.vgtb.xor.128B",
+  "llvm.hexagon.V6.vgth",
+  "llvm.hexagon.V6.vgth.128B",
+  "llvm.hexagon.V6.vgth.and",
+  "llvm.hexagon.V6.vgth.and.128B",
+  "llvm.hexagon.V6.vgth.or",
+  "llvm.hexagon.V6.vgth.or.128B",
+  "llvm.hexagon.V6.vgth.xor",
+  "llvm.hexagon.V6.vgth.xor.128B",
+  "llvm.hexagon.V6.vgtub",
+  "llvm.hexagon.V6.vgtub.128B",
+  "llvm.hexagon.V6.vgtub.and",
+  "llvm.hexagon.V6.vgtub.and.128B",
+  "llvm.hexagon.V6.vgtub.or",
+  "llvm.hexagon.V6.vgtub.or.128B",
+  "llvm.hexagon.V6.vgtub.xor",
+  "llvm.hexagon.V6.vgtub.xor.128B",
+  "llvm.hexagon.V6.vgtuh",
+  "llvm.hexagon.V6.vgtuh.128B",
+  "llvm.hexagon.V6.vgtuh.and",
+  "llvm.hexagon.V6.vgtuh.and.128B",
+  "llvm.hexagon.V6.vgtuh.or",
+  "llvm.hexagon.V6.vgtuh.or.128B",
+  "llvm.hexagon.V6.vgtuh.xor",
+  "llvm.hexagon.V6.vgtuh.xor.128B",
+  "llvm.hexagon.V6.vgtuw",
+  "llvm.hexagon.V6.vgtuw.128B",
+  "llvm.hexagon.V6.vgtuw.and",
+  "llvm.hexagon.V6.vgtuw.and.128B",
+  "llvm.hexagon.V6.vgtuw.or",
+  "llvm.hexagon.V6.vgtuw.or.128B",
+  "llvm.hexagon.V6.vgtuw.xor",
+  "llvm.hexagon.V6.vgtuw.xor.128B",
+  "llvm.hexagon.V6.vgtw",
+  "llvm.hexagon.V6.vgtw.128B",
+  "llvm.hexagon.V6.vgtw.and",
+  "llvm.hexagon.V6.vgtw.and.128B",
+  "llvm.hexagon.V6.vgtw.or",
+  "llvm.hexagon.V6.vgtw.or.128B",
+  "llvm.hexagon.V6.vgtw.xor",
+  "llvm.hexagon.V6.vgtw.xor.128B",
+  "llvm.hexagon.V6.vinsertwr",
+  "llvm.hexagon.V6.vinsertwr.128B",
+  "llvm.hexagon.V6.vlalignb",
+  "llvm.hexagon.V6.vlalignb.128B",
+  "llvm.hexagon.V6.vlalignbi",
+  "llvm.hexagon.V6.vlalignbi.128B",
+  "llvm.hexagon.V6.vlsrb",
+  "llvm.hexagon.V6.vlsrb.128B",
+  "llvm.hexagon.V6.vlsrh",
+  "llvm.hexagon.V6.vlsrh.128B",
+  "llvm.hexagon.V6.vlsrhv",
+  "llvm.hexagon.V6.vlsrhv.128B",
+  "llvm.hexagon.V6.vlsrw",
+  "llvm.hexagon.V6.vlsrw.128B",
+  "llvm.hexagon.V6.vlsrwv",
+  "llvm.hexagon.V6.vlsrwv.128B",
+  "llvm.hexagon.V6.vlut4",
+  "llvm.hexagon.V6.vlut4.128B",
+  "llvm.hexagon.V6.vlutvvb",
+  "llvm.hexagon.V6.vlutvvb.128B",
+  "llvm.hexagon.V6.vlutvvb.nm",
+  "llvm.hexagon.V6.vlutvvb.nm.128B",
+  "llvm.hexagon.V6.vlutvvb.oracc",
+  "llvm.hexagon.V6.vlutvvb.oracc.128B",
+  "llvm.hexagon.V6.vlutvvb.oracci",
+  "llvm.hexagon.V6.vlutvvb.oracci.128B",
+  "llvm.hexagon.V6.vlutvvbi",
+  "llvm.hexagon.V6.vlutvvbi.128B",
+  "llvm.hexagon.V6.vlutvwh",
+  "llvm.hexagon.V6.vlutvwh.128B",
+  "llvm.hexagon.V6.vlutvwh.nm",
+  "llvm.hexagon.V6.vlutvwh.nm.128B",
+  "llvm.hexagon.V6.vlutvwh.oracc",
+  "llvm.hexagon.V6.vlutvwh.oracc.128B",
+  "llvm.hexagon.V6.vlutvwh.oracci",
+  "llvm.hexagon.V6.vlutvwh.oracci.128B",
+  "llvm.hexagon.V6.vlutvwhi",
+  "llvm.hexagon.V6.vlutvwhi.128B",
+  "llvm.hexagon.V6.vmaskedstorenq",
+  "llvm.hexagon.V6.vmaskedstorenq.128B",
+  "llvm.hexagon.V6.vmaskedstorentnq",
+  "llvm.hexagon.V6.vmaskedstorentnq.128B",
+  "llvm.hexagon.V6.vmaskedstorentq",
+  "llvm.hexagon.V6.vmaskedstorentq.128B",
+  "llvm.hexagon.V6.vmaskedstoreq",
+  "llvm.hexagon.V6.vmaskedstoreq.128B",
+  "llvm.hexagon.V6.vmaxb",
+  "llvm.hexagon.V6.vmaxb.128B",
+  "llvm.hexagon.V6.vmaxh",
+  "llvm.hexagon.V6.vmaxh.128B",
+  "llvm.hexagon.V6.vmaxub",
+  "llvm.hexagon.V6.vmaxub.128B",
+  "llvm.hexagon.V6.vmaxuh",
+  "llvm.hexagon.V6.vmaxuh.128B",
+  "llvm.hexagon.V6.vmaxw",
+  "llvm.hexagon.V6.vmaxw.128B",
+  "llvm.hexagon.V6.vminb",
+  "llvm.hexagon.V6.vminb.128B",
+  "llvm.hexagon.V6.vminh",
+  "llvm.hexagon.V6.vminh.128B",
+  "llvm.hexagon.V6.vminub",
+  "llvm.hexagon.V6.vminub.128B",
+  "llvm.hexagon.V6.vminuh",
+  "llvm.hexagon.V6.vminuh.128B",
+  "llvm.hexagon.V6.vminw",
+  "llvm.hexagon.V6.vminw.128B",
+  "llvm.hexagon.V6.vmpabus",
+  "llvm.hexagon.V6.vmpabus.128B",
+  "llvm.hexagon.V6.vmpabus.acc",
+  "llvm.hexagon.V6.vmpabus.acc.128B",
+  "llvm.hexagon.V6.vmpabusv",
+  "llvm.hexagon.V6.vmpabusv.128B",
+  "llvm.hexagon.V6.vmpabuu",
+  "llvm.hexagon.V6.vmpabuu.128B",
+  "llvm.hexagon.V6.vmpabuu.acc",
+  "llvm.hexagon.V6.vmpabuu.acc.128B",
+  "llvm.hexagon.V6.vmpabuuv",
+  "llvm.hexagon.V6.vmpabuuv.128B",
+  "llvm.hexagon.V6.vmpahb",
+  "llvm.hexagon.V6.vmpahb.128B",
+  "llvm.hexagon.V6.vmpahb.acc",
+  "llvm.hexagon.V6.vmpahb.acc.128B",
+  "llvm.hexagon.V6.vmpahhsat",
+  "llvm.hexagon.V6.vmpahhsat.128B",
+  "llvm.hexagon.V6.vmpauhb",
+  "llvm.hexagon.V6.vmpauhb.128B",
+  "llvm.hexagon.V6.vmpauhb.acc",
+  "llvm.hexagon.V6.vmpauhb.acc.128B",
+  "llvm.hexagon.V6.vmpauhuhsat",
+  "llvm.hexagon.V6.vmpauhuhsat.128B",
+  "llvm.hexagon.V6.vmpsuhuhsat",
+  "llvm.hexagon.V6.vmpsuhuhsat.128B",
+  "llvm.hexagon.V6.vmpybus",
+  "llvm.hexagon.V6.vmpybus.128B",
+  "llvm.hexagon.V6.vmpybus.acc",
+  "llvm.hexagon.V6.vmpybus.acc.128B",
+  "llvm.hexagon.V6.vmpybusv",
+  "llvm.hexagon.V6.vmpybusv.128B",
+  "llvm.hexagon.V6.vmpybusv.acc",
+  "llvm.hexagon.V6.vmpybusv.acc.128B",
+  "llvm.hexagon.V6.vmpybv",
+  "llvm.hexagon.V6.vmpybv.128B",
+  "llvm.hexagon.V6.vmpybv.acc",
+  "llvm.hexagon.V6.vmpybv.acc.128B",
+  "llvm.hexagon.V6.vmpyewuh",
+  "llvm.hexagon.V6.vmpyewuh.128B",
+  "llvm.hexagon.V6.vmpyewuh.64",
+  "llvm.hexagon.V6.vmpyewuh.64.128B",
+  "llvm.hexagon.V6.vmpyh",
+  "llvm.hexagon.V6.vmpyh.128B",
+  "llvm.hexagon.V6.vmpyh.acc",
+  "llvm.hexagon.V6.vmpyh.acc.128B",
+  "llvm.hexagon.V6.vmpyhsat.acc",
+  "llvm.hexagon.V6.vmpyhsat.acc.128B",
+  "llvm.hexagon.V6.vmpyhsrs",
+  "llvm.hexagon.V6.vmpyhsrs.128B",
+  "llvm.hexagon.V6.vmpyhss",
+  "llvm.hexagon.V6.vmpyhss.128B",
+  "llvm.hexagon.V6.vmpyhus",
+  "llvm.hexagon.V6.vmpyhus.128B",
+  "llvm.hexagon.V6.vmpyhus.acc",
+  "llvm.hexagon.V6.vmpyhus.acc.128B",
+  "llvm.hexagon.V6.vmpyhv",
+  "llvm.hexagon.V6.vmpyhv.128B",
+  "llvm.hexagon.V6.vmpyhv.acc",
+  "llvm.hexagon.V6.vmpyhv.acc.128B",
+  "llvm.hexagon.V6.vmpyhvsrs",
+  "llvm.hexagon.V6.vmpyhvsrs.128B",
+  "llvm.hexagon.V6.vmpyieoh",
+  "llvm.hexagon.V6.vmpyieoh.128B",
+  "llvm.hexagon.V6.vmpyiewh.acc",
+  "llvm.hexagon.V6.vmpyiewh.acc.128B",
+  "llvm.hexagon.V6.vmpyiewuh",
+  "llvm.hexagon.V6.vmpyiewuh.128B",
+  "llvm.hexagon.V6.vmpyiewuh.acc",
+  "llvm.hexagon.V6.vmpyiewuh.acc.128B",
+  "llvm.hexagon.V6.vmpyih",
+  "llvm.hexagon.V6.vmpyih.128B",
+  "llvm.hexagon.V6.vmpyih.acc",
+  "llvm.hexagon.V6.vmpyih.acc.128B",
+  "llvm.hexagon.V6.vmpyihb",
+  "llvm.hexagon.V6.vmpyihb.128B",
+  "llvm.hexagon.V6.vmpyihb.acc",
+  "llvm.hexagon.V6.vmpyihb.acc.128B",
+  "llvm.hexagon.V6.vmpyiowh",
+  "llvm.hexagon.V6.vmpyiowh.128B",
+  "llvm.hexagon.V6.vmpyiwb",
+  "llvm.hexagon.V6.vmpyiwb.128B",
+  "llvm.hexagon.V6.vmpyiwb.acc",
+  "llvm.hexagon.V6.vmpyiwb.acc.128B",
+  "llvm.hexagon.V6.vmpyiwh",
+  "llvm.hexagon.V6.vmpyiwh.128B",
+  "llvm.hexagon.V6.vmpyiwh.acc",
+  "llvm.hexagon.V6.vmpyiwh.acc.128B",
+  "llvm.hexagon.V6.vmpyiwub",
+  "llvm.hexagon.V6.vmpyiwub.128B",
+  "llvm.hexagon.V6.vmpyiwub.acc",
+  "llvm.hexagon.V6.vmpyiwub.acc.128B",
+  "llvm.hexagon.V6.vmpyowh",
+  "llvm.hexagon.V6.vmpyowh.128B",
+  "llvm.hexagon.V6.vmpyowh.64.acc",
+  "llvm.hexagon.V6.vmpyowh.64.acc.128B",
+  "llvm.hexagon.V6.vmpyowh.rnd",
+  "llvm.hexagon.V6.vmpyowh.rnd.128B",
+  "llvm.hexagon.V6.vmpyowh.rnd.sacc",
+  "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B",
+  "llvm.hexagon.V6.vmpyowh.sacc",
+  "llvm.hexagon.V6.vmpyowh.sacc.128B",
+  "llvm.hexagon.V6.vmpyub",
+  "llvm.hexagon.V6.vmpyub.128B",
+  "llvm.hexagon.V6.vmpyub.acc",
+  "llvm.hexagon.V6.vmpyub.acc.128B",
+  "llvm.hexagon.V6.vmpyubv",
+  "llvm.hexagon.V6.vmpyubv.128B",
+  "llvm.hexagon.V6.vmpyubv.acc",
+  "llvm.hexagon.V6.vmpyubv.acc.128B",
+  "llvm.hexagon.V6.vmpyuh",
+  "llvm.hexagon.V6.vmpyuh.128B",
+  "llvm.hexagon.V6.vmpyuh.acc",
+  "llvm.hexagon.V6.vmpyuh.acc.128B",
+  "llvm.hexagon.V6.vmpyuhe",
+  "llvm.hexagon.V6.vmpyuhe.128B",
+  "llvm.hexagon.V6.vmpyuhe.acc",
+  "llvm.hexagon.V6.vmpyuhe.acc.128B",
+  "llvm.hexagon.V6.vmpyuhv",
+  "llvm.hexagon.V6.vmpyuhv.128B",
+  "llvm.hexagon.V6.vmpyuhv.acc",
+  "llvm.hexagon.V6.vmpyuhv.acc.128B",
+  "llvm.hexagon.V6.vmux",
+  "llvm.hexagon.V6.vmux.128B",
+  "llvm.hexagon.V6.vnavgb",
+  "llvm.hexagon.V6.vnavgb.128B",
+  "llvm.hexagon.V6.vnavgh",
+  "llvm.hexagon.V6.vnavgh.128B",
+  "llvm.hexagon.V6.vnavgub",
+  "llvm.hexagon.V6.vnavgub.128B",
+  "llvm.hexagon.V6.vnavgw",
+  "llvm.hexagon.V6.vnavgw.128B",
+  "llvm.hexagon.V6.vnormamth",
+  "llvm.hexagon.V6.vnormamth.128B",
+  "llvm.hexagon.V6.vnormamtw",
+  "llvm.hexagon.V6.vnormamtw.128B",
+  "llvm.hexagon.V6.vnot",
+  "llvm.hexagon.V6.vnot.128B",
+  "llvm.hexagon.V6.vor",
+  "llvm.hexagon.V6.vor.128B",
+  "llvm.hexagon.V6.vpackeb",
+  "llvm.hexagon.V6.vpackeb.128B",
+  "llvm.hexagon.V6.vpackeh",
+  "llvm.hexagon.V6.vpackeh.128B",
+  "llvm.hexagon.V6.vpackhb.sat",
+  "llvm.hexagon.V6.vpackhb.sat.128B",
+  "llvm.hexagon.V6.vpackhub.sat",
+  "llvm.hexagon.V6.vpackhub.sat.128B",
+  "llvm.hexagon.V6.vpackob",
+  "llvm.hexagon.V6.vpackob.128B",
+  "llvm.hexagon.V6.vpackoh",
+  "llvm.hexagon.V6.vpackoh.128B",
+  "llvm.hexagon.V6.vpackwh.sat",
+  "llvm.hexagon.V6.vpackwh.sat.128B",
+  "llvm.hexagon.V6.vpackwuh.sat",
+  "llvm.hexagon.V6.vpackwuh.sat.128B",
+  "llvm.hexagon.V6.vpopcounth",
+  "llvm.hexagon.V6.vpopcounth.128B",
+  "llvm.hexagon.V6.vprefixqb",
+  "llvm.hexagon.V6.vprefixqb.128B",
+  "llvm.hexagon.V6.vprefixqh",
+  "llvm.hexagon.V6.vprefixqh.128B",
+  "llvm.hexagon.V6.vprefixqw",
+  "llvm.hexagon.V6.vprefixqw.128B",
+  "llvm.hexagon.V6.vrdelta",
+  "llvm.hexagon.V6.vrdelta.128B",
+  "llvm.hexagon.V6.vrmpybub.rtt",
+  "llvm.hexagon.V6.vrmpybub.rtt.128B",
+  "llvm.hexagon.V6.vrmpybub.rtt.acc",
+  "llvm.hexagon.V6.vrmpybub.rtt.acc.128B",
+  "llvm.hexagon.V6.vrmpybus",
+  "llvm.hexagon.V6.vrmpybus.128B",
+  "llvm.hexagon.V6.vrmpybus.acc",
+  "llvm.hexagon.V6.vrmpybus.acc.128B",
+  "llvm.hexagon.V6.vrmpybusi",
+  "llvm.hexagon.V6.vrmpybusi.128B",
+  "llvm.hexagon.V6.vrmpybusi.acc",
+  "llvm.hexagon.V6.vrmpybusi.acc.128B",
+  "llvm.hexagon.V6.vrmpybusv",
+  "llvm.hexagon.V6.vrmpybusv.128B",
+  "llvm.hexagon.V6.vrmpybusv.acc",
+  "llvm.hexagon.V6.vrmpybusv.acc.128B",
+  "llvm.hexagon.V6.vrmpybv",
+  "llvm.hexagon.V6.vrmpybv.128B",
+  "llvm.hexagon.V6.vrmpybv.acc",
+  "llvm.hexagon.V6.vrmpybv.acc.128B",
+  "llvm.hexagon.V6.vrmpyub",
+  "llvm.hexagon.V6.vrmpyub.128B",
+  "llvm.hexagon.V6.vrmpyub.acc",
+  "llvm.hexagon.V6.vrmpyub.acc.128B",
+  "llvm.hexagon.V6.vrmpyub.rtt",
+  "llvm.hexagon.V6.vrmpyub.rtt.128B",
+  "llvm.hexagon.V6.vrmpyub.rtt.acc",
+  "llvm.hexagon.V6.vrmpyub.rtt.acc.128B",
+  "llvm.hexagon.V6.vrmpyubi",
+  "llvm.hexagon.V6.vrmpyubi.128B",
+  "llvm.hexagon.V6.vrmpyubi.acc",
+  "llvm.hexagon.V6.vrmpyubi.acc.128B",
+  "llvm.hexagon.V6.vrmpyubv",
+  "llvm.hexagon.V6.vrmpyubv.128B",
+  "llvm.hexagon.V6.vrmpyubv.acc",
+  "llvm.hexagon.V6.vrmpyubv.acc.128B",
+  "llvm.hexagon.V6.vror",
+  "llvm.hexagon.V6.vror.128B",
+  "llvm.hexagon.V6.vroundhb",
+  "llvm.hexagon.V6.vroundhb.128B",
+  "llvm.hexagon.V6.vroundhub",
+  "llvm.hexagon.V6.vroundhub.128B",
+  "llvm.hexagon.V6.vrounduhub",
+  "llvm.hexagon.V6.vrounduhub.128B",
+  "llvm.hexagon.V6.vrounduwuh",
+  "llvm.hexagon.V6.vrounduwuh.128B",
+  "llvm.hexagon.V6.vroundwh",
+  "llvm.hexagon.V6.vroundwh.128B",
+  "llvm.hexagon.V6.vroundwuh",
+  "llvm.hexagon.V6.vroundwuh.128B",
+  "llvm.hexagon.V6.vrsadubi",
+  "llvm.hexagon.V6.vrsadubi.128B",
+  "llvm.hexagon.V6.vrsadubi.acc",
+  "llvm.hexagon.V6.vrsadubi.acc.128B",
+  "llvm.hexagon.V6.vsathub",
+  "llvm.hexagon.V6.vsathub.128B",
+  "llvm.hexagon.V6.vsatuwuh",
+  "llvm.hexagon.V6.vsatuwuh.128B",
+  "llvm.hexagon.V6.vsatwh",
+  "llvm.hexagon.V6.vsatwh.128B",
+  "llvm.hexagon.V6.vsb",
+  "llvm.hexagon.V6.vsb.128B",
+  "llvm.hexagon.V6.vscattermh",
+  "llvm.hexagon.V6.vscattermh.128B",
+  "llvm.hexagon.V6.vscattermh.add",
+  "llvm.hexagon.V6.vscattermh.add.128B",
+  "llvm.hexagon.V6.vscattermhq",
+  "llvm.hexagon.V6.vscattermhq.128B",
+  "llvm.hexagon.V6.vscattermhw",
+  "llvm.hexagon.V6.vscattermhw.128B",
+  "llvm.hexagon.V6.vscattermhw.add",
+  "llvm.hexagon.V6.vscattermhw.add.128B",
+  "llvm.hexagon.V6.vscattermhwq",
+  "llvm.hexagon.V6.vscattermhwq.128B",
+  "llvm.hexagon.V6.vscattermw",
+  "llvm.hexagon.V6.vscattermw.128B",
+  "llvm.hexagon.V6.vscattermw.add",
+  "llvm.hexagon.V6.vscattermw.add.128B",
+  "llvm.hexagon.V6.vscattermwq",
+  "llvm.hexagon.V6.vscattermwq.128B",
+  "llvm.hexagon.V6.vsh",
+  "llvm.hexagon.V6.vsh.128B",
+  "llvm.hexagon.V6.vshufeh",
+  "llvm.hexagon.V6.vshufeh.128B",
+  "llvm.hexagon.V6.vshuffb",
+  "llvm.hexagon.V6.vshuffb.128B",
+  "llvm.hexagon.V6.vshuffeb",
+  "llvm.hexagon.V6.vshuffeb.128B",
+  "llvm.hexagon.V6.vshuffh",
+  "llvm.hexagon.V6.vshuffh.128B",
+  "llvm.hexagon.V6.vshuffob",
+  "llvm.hexagon.V6.vshuffob.128B",
+  "llvm.hexagon.V6.vshuffvdd",
+  "llvm.hexagon.V6.vshuffvdd.128B",
+  "llvm.hexagon.V6.vshufoeb",
+  "llvm.hexagon.V6.vshufoeb.128B",
+  "llvm.hexagon.V6.vshufoeh",
+  "llvm.hexagon.V6.vshufoeh.128B",
+  "llvm.hexagon.V6.vshufoh",
+  "llvm.hexagon.V6.vshufoh.128B",
+  "llvm.hexagon.V6.vsubb",
+  "llvm.hexagon.V6.vsubb.128B",
+  "llvm.hexagon.V6.vsubb.dv",
+  "llvm.hexagon.V6.vsubb.dv.128B",
+  "llvm.hexagon.V6.vsubbnq",
+  "llvm.hexagon.V6.vsubbnq.128B",
+  "llvm.hexagon.V6.vsubbq",
+  "llvm.hexagon.V6.vsubbq.128B",
+  "llvm.hexagon.V6.vsubbsat",
+  "llvm.hexagon.V6.vsubbsat.128B",
+  "llvm.hexagon.V6.vsubbsat.dv",
+  "llvm.hexagon.V6.vsubbsat.dv.128B",
+  "llvm.hexagon.V6.vsubcarry",
+  "llvm.hexagon.V6.vsubcarry.128B",
+  "llvm.hexagon.V6.vsubh",
+  "llvm.hexagon.V6.vsubh.128B",
+  "llvm.hexagon.V6.vsubh.dv",
+  "llvm.hexagon.V6.vsubh.dv.128B",
+  "llvm.hexagon.V6.vsubhnq",
+  "llvm.hexagon.V6.vsubhnq.128B",
+  "llvm.hexagon.V6.vsubhq",
+  "llvm.hexagon.V6.vsubhq.128B",
+  "llvm.hexagon.V6.vsubhsat",
+  "llvm.hexagon.V6.vsubhsat.128B",
+  "llvm.hexagon.V6.vsubhsat.dv",
+  "llvm.hexagon.V6.vsubhsat.dv.128B",
+  "llvm.hexagon.V6.vsubhw",
+  "llvm.hexagon.V6.vsubhw.128B",
+  "llvm.hexagon.V6.vsububh",
+  "llvm.hexagon.V6.vsububh.128B",
+  "llvm.hexagon.V6.vsububsat",
+  "llvm.hexagon.V6.vsububsat.128B",
+  "llvm.hexagon.V6.vsububsat.dv",
+  "llvm.hexagon.V6.vsububsat.dv.128B",
+  "llvm.hexagon.V6.vsubububb.sat",
+  "llvm.hexagon.V6.vsubububb.sat.128B",
+  "llvm.hexagon.V6.vsubuhsat",
+  "llvm.hexagon.V6.vsubuhsat.128B",
+  "llvm.hexagon.V6.vsubuhsat.dv",
+  "llvm.hexagon.V6.vsubuhsat.dv.128B",
+  "llvm.hexagon.V6.vsubuhw",
+  "llvm.hexagon.V6.vsubuhw.128B",
+  "llvm.hexagon.V6.vsubuwsat",
+  "llvm.hexagon.V6.vsubuwsat.128B",
+  "llvm.hexagon.V6.vsubuwsat.dv",
+  "llvm.hexagon.V6.vsubuwsat.dv.128B",
+  "llvm.hexagon.V6.vsubw",
+  "llvm.hexagon.V6.vsubw.128B",
+  "llvm.hexagon.V6.vsubw.dv",
+  "llvm.hexagon.V6.vsubw.dv.128B",
+  "llvm.hexagon.V6.vsubwnq",
+  "llvm.hexagon.V6.vsubwnq.128B",
+  "llvm.hexagon.V6.vsubwq",
+  "llvm.hexagon.V6.vsubwq.128B",
+  "llvm.hexagon.V6.vsubwsat",
+  "llvm.hexagon.V6.vsubwsat.128B",
+  "llvm.hexagon.V6.vsubwsat.dv",
+  "llvm.hexagon.V6.vsubwsat.dv.128B",
+  "llvm.hexagon.V6.vswap",
+  "llvm.hexagon.V6.vswap.128B",
+  "llvm.hexagon.V6.vtmpyb",
+  "llvm.hexagon.V6.vtmpyb.128B",
+  "llvm.hexagon.V6.vtmpyb.acc",
+  "llvm.hexagon.V6.vtmpyb.acc.128B",
+  "llvm.hexagon.V6.vtmpybus",
+  "llvm.hexagon.V6.vtmpybus.128B",
+  "llvm.hexagon.V6.vtmpybus.acc",
+  "llvm.hexagon.V6.vtmpybus.acc.128B",
+  "llvm.hexagon.V6.vtmpyhb",
+  "llvm.hexagon.V6.vtmpyhb.128B",
+  "llvm.hexagon.V6.vtmpyhb.acc",
+  "llvm.hexagon.V6.vtmpyhb.acc.128B",
+  "llvm.hexagon.V6.vunpackb",
+  "llvm.hexagon.V6.vunpackb.128B",
+  "llvm.hexagon.V6.vunpackh",
+  "llvm.hexagon.V6.vunpackh.128B",
+  "llvm.hexagon.V6.vunpackob",
+  "llvm.hexagon.V6.vunpackob.128B",
+  "llvm.hexagon.V6.vunpackoh",
+  "llvm.hexagon.V6.vunpackoh.128B",
+  "llvm.hexagon.V6.vunpackub",
+  "llvm.hexagon.V6.vunpackub.128B",
+  "llvm.hexagon.V6.vunpackuh",
+  "llvm.hexagon.V6.vunpackuh.128B",
+  "llvm.hexagon.V6.vxor",
+  "llvm.hexagon.V6.vxor.128B",
+  "llvm.hexagon.V6.vzb",
+  "llvm.hexagon.V6.vzb.128B",
+  "llvm.hexagon.V6.vzh",
+  "llvm.hexagon.V6.vzh.128B",
+  "llvm.hexagon.Y2.dccleana",
+  "llvm.hexagon.Y2.dccleaninva",
+  "llvm.hexagon.Y2.dcinva",
+  "llvm.hexagon.Y2.dczeroa",
+  "llvm.hexagon.Y4.l2fetch",
+  "llvm.hexagon.Y5.l2fetch",
+  "llvm.hexagon.circ.ldb",
+  "llvm.hexagon.circ.ldd",
+  "llvm.hexagon.circ.ldh",
+  "llvm.hexagon.circ.ldub",
+  "llvm.hexagon.circ.lduh",
+  "llvm.hexagon.circ.ldw",
+  "llvm.hexagon.circ.stb",
+  "llvm.hexagon.circ.std",
+  "llvm.hexagon.circ.sth",
+  "llvm.hexagon.circ.sthhi",
+  "llvm.hexagon.circ.stw",
+  "llvm.hexagon.mm256i.vaddw",
+  "llvm.hexagon.prefetch",
+  "llvm.mips.absq.s.ph",
+  "llvm.mips.absq.s.qb",
+  "llvm.mips.absq.s.w",
+  "llvm.mips.add.a.b",
+  "llvm.mips.add.a.d",
+  "llvm.mips.add.a.h",
+  "llvm.mips.add.a.w",
+  "llvm.mips.addq.ph",
+  "llvm.mips.addq.s.ph",
+  "llvm.mips.addq.s.w",
+  "llvm.mips.addqh.ph",
+  "llvm.mips.addqh.r.ph",
+  "llvm.mips.addqh.r.w",
+  "llvm.mips.addqh.w",
+  "llvm.mips.adds.a.b",
+  "llvm.mips.adds.a.d",
+  "llvm.mips.adds.a.h",
+  "llvm.mips.adds.a.w",
+  "llvm.mips.adds.s.b",
+  "llvm.mips.adds.s.d",
+  "llvm.mips.adds.s.h",
+  "llvm.mips.adds.s.w",
+  "llvm.mips.adds.u.b",
+  "llvm.mips.adds.u.d",
+  "llvm.mips.adds.u.h",
+  "llvm.mips.adds.u.w",
+  "llvm.mips.addsc",
+  "llvm.mips.addu.ph",
+  "llvm.mips.addu.qb",
+  "llvm.mips.addu.s.ph",
+  "llvm.mips.addu.s.qb",
+  "llvm.mips.adduh.qb",
+  "llvm.mips.adduh.r.qb",
+  "llvm.mips.addv.b",
+  "llvm.mips.addv.d",
+  "llvm.mips.addv.h",
+  "llvm.mips.addv.w",
+  "llvm.mips.addvi.b",
+  "llvm.mips.addvi.d",
+  "llvm.mips.addvi.h",
+  "llvm.mips.addvi.w",
+  "llvm.mips.addwc",
+  "llvm.mips.and.v",
+  "llvm.mips.andi.b",
+  "llvm.mips.append",
+  "llvm.mips.asub.s.b",
+  "llvm.mips.asub.s.d",
+  "llvm.mips.asub.s.h",
+  "llvm.mips.asub.s.w",
+  "llvm.mips.asub.u.b",
+  "llvm.mips.asub.u.d",
+  "llvm.mips.asub.u.h",
+  "llvm.mips.asub.u.w",
+  "llvm.mips.ave.s.b",
+  "llvm.mips.ave.s.d",
+  "llvm.mips.ave.s.h",
+  "llvm.mips.ave.s.w",
+  "llvm.mips.ave.u.b",
+  "llvm.mips.ave.u.d",
+  "llvm.mips.ave.u.h",
+  "llvm.mips.ave.u.w",
+  "llvm.mips.aver.s.b",
+  "llvm.mips.aver.s.d",
+  "llvm.mips.aver.s.h",
+  "llvm.mips.aver.s.w",
+  "llvm.mips.aver.u.b",
+  "llvm.mips.aver.u.d",
+  "llvm.mips.aver.u.h",
+  "llvm.mips.aver.u.w",
+  "llvm.mips.balign",
+  "llvm.mips.bclr.b",
+  "llvm.mips.bclr.d",
+  "llvm.mips.bclr.h",
+  "llvm.mips.bclr.w",
+  "llvm.mips.bclri.b",
+  "llvm.mips.bclri.d",
+  "llvm.mips.bclri.h",
+  "llvm.mips.bclri.w",
+  "llvm.mips.binsl.b",
+  "llvm.mips.binsl.d",
+  "llvm.mips.binsl.h",
+  "llvm.mips.binsl.w",
+  "llvm.mips.binsli.b",
+  "llvm.mips.binsli.d",
+  "llvm.mips.binsli.h",
+  "llvm.mips.binsli.w",
+  "llvm.mips.binsr.b",
+  "llvm.mips.binsr.d",
+  "llvm.mips.binsr.h",
+  "llvm.mips.binsr.w",
+  "llvm.mips.binsri.b",
+  "llvm.mips.binsri.d",
+  "llvm.mips.binsri.h",
+  "llvm.mips.binsri.w",
+  "llvm.mips.bitrev",
+  "llvm.mips.bmnz.v",
+  "llvm.mips.bmnzi.b",
+  "llvm.mips.bmz.v",
+  "llvm.mips.bmzi.b",
+  "llvm.mips.bneg.b",
+  "llvm.mips.bneg.d",
+  "llvm.mips.bneg.h",
+  "llvm.mips.bneg.w",
+  "llvm.mips.bnegi.b",
+  "llvm.mips.bnegi.d",
+  "llvm.mips.bnegi.h",
+  "llvm.mips.bnegi.w",
+  "llvm.mips.bnz.b",
+  "llvm.mips.bnz.d",
+  "llvm.mips.bnz.h",
+  "llvm.mips.bnz.v",
+  "llvm.mips.bnz.w",
+  "llvm.mips.bposge32",
+  "llvm.mips.bsel.v",
+  "llvm.mips.bseli.b",
+  "llvm.mips.bset.b",
+  "llvm.mips.bset.d",
+  "llvm.mips.bset.h",
+  "llvm.mips.bset.w",
+  "llvm.mips.bseti.b",
+  "llvm.mips.bseti.d",
+  "llvm.mips.bseti.h",
+  "llvm.mips.bseti.w",
+  "llvm.mips.bz.b",
+  "llvm.mips.bz.d",
+  "llvm.mips.bz.h",
+  "llvm.mips.bz.v",
+  "llvm.mips.bz.w",
+  "llvm.mips.ceq.b",
+  "llvm.mips.ceq.d",
+  "llvm.mips.ceq.h",
+  "llvm.mips.ceq.w",
+  "llvm.mips.ceqi.b",
+  "llvm.mips.ceqi.d",
+  "llvm.mips.ceqi.h",
+  "llvm.mips.ceqi.w",
+  "llvm.mips.cfcmsa",
+  "llvm.mips.cle.s.b",
+  "llvm.mips.cle.s.d",
+  "llvm.mips.cle.s.h",
+  "llvm.mips.cle.s.w",
+  "llvm.mips.cle.u.b",
+  "llvm.mips.cle.u.d",
+  "llvm.mips.cle.u.h",
+  "llvm.mips.cle.u.w",
+  "llvm.mips.clei.s.b",
+  "llvm.mips.clei.s.d",
+  "llvm.mips.clei.s.h",
+  "llvm.mips.clei.s.w",
+  "llvm.mips.clei.u.b",
+  "llvm.mips.clei.u.d",
+  "llvm.mips.clei.u.h",
+  "llvm.mips.clei.u.w",
+  "llvm.mips.clt.s.b",
+  "llvm.mips.clt.s.d",
+  "llvm.mips.clt.s.h",
+  "llvm.mips.clt.s.w",
+  "llvm.mips.clt.u.b",
+  "llvm.mips.clt.u.d",
+  "llvm.mips.clt.u.h",
+  "llvm.mips.clt.u.w",
+  "llvm.mips.clti.s.b",
+  "llvm.mips.clti.s.d",
+  "llvm.mips.clti.s.h",
+  "llvm.mips.clti.s.w",
+  "llvm.mips.clti.u.b",
+  "llvm.mips.clti.u.d",
+  "llvm.mips.clti.u.h",
+  "llvm.mips.clti.u.w",
+  "llvm.mips.cmp.eq.ph",
+  "llvm.mips.cmp.le.ph",
+  "llvm.mips.cmp.lt.ph",
+  "llvm.mips.cmpgdu.eq.qb",
+  "llvm.mips.cmpgdu.le.qb",
+  "llvm.mips.cmpgdu.lt.qb",
+  "llvm.mips.cmpgu.eq.qb",
+  "llvm.mips.cmpgu.le.qb",
+  "llvm.mips.cmpgu.lt.qb",
+  "llvm.mips.cmpu.eq.qb",
+  "llvm.mips.cmpu.le.qb",
+  "llvm.mips.cmpu.lt.qb",
+  "llvm.mips.copy.s.b",
+  "llvm.mips.copy.s.d",
+  "llvm.mips.copy.s.h",
+  "llvm.mips.copy.s.w",
+  "llvm.mips.copy.u.b",
+  "llvm.mips.copy.u.d",
+  "llvm.mips.copy.u.h",
+  "llvm.mips.copy.u.w",
+  "llvm.mips.ctcmsa",
+  "llvm.mips.div.s.b",
+  "llvm.mips.div.s.d",
+  "llvm.mips.div.s.h",
+  "llvm.mips.div.s.w",
+  "llvm.mips.div.u.b",
+  "llvm.mips.div.u.d",
+  "llvm.mips.div.u.h",
+  "llvm.mips.div.u.w",
+  "llvm.mips.dlsa",
+  "llvm.mips.dotp.s.d",
+  "llvm.mips.dotp.s.h",
+  "llvm.mips.dotp.s.w",
+  "llvm.mips.dotp.u.d",
+  "llvm.mips.dotp.u.h",
+  "llvm.mips.dotp.u.w",
+  "llvm.mips.dpa.w.ph",
+  "llvm.mips.dpadd.s.d",
+  "llvm.mips.dpadd.s.h",
+  "llvm.mips.dpadd.s.w",
+  "llvm.mips.dpadd.u.d",
+  "llvm.mips.dpadd.u.h",
+  "llvm.mips.dpadd.u.w",
+  "llvm.mips.dpaq.s.w.ph",
+  "llvm.mips.dpaq.sa.l.w",
+  "llvm.mips.dpaqx.s.w.ph",
+  "llvm.mips.dpaqx.sa.w.ph",
+  "llvm.mips.dpau.h.qbl",
+  "llvm.mips.dpau.h.qbr",
+  "llvm.mips.dpax.w.ph",
+  "llvm.mips.dps.w.ph",
+  "llvm.mips.dpsq.s.w.ph",
+  "llvm.mips.dpsq.sa.l.w",
+  "llvm.mips.dpsqx.s.w.ph",
+  "llvm.mips.dpsqx.sa.w.ph",
+  "llvm.mips.dpsu.h.qbl",
+  "llvm.mips.dpsu.h.qbr",
+  "llvm.mips.dpsub.s.d",
+  "llvm.mips.dpsub.s.h",
+  "llvm.mips.dpsub.s.w",
+  "llvm.mips.dpsub.u.d",
+  "llvm.mips.dpsub.u.h",
+  "llvm.mips.dpsub.u.w",
+  "llvm.mips.dpsx.w.ph",
+  "llvm.mips.extp",
+  "llvm.mips.extpdp",
+  "llvm.mips.extr.r.w",
+  "llvm.mips.extr.rs.w",
+  "llvm.mips.extr.s.h",
+  "llvm.mips.extr.w",
+  "llvm.mips.fadd.d",
+  "llvm.mips.fadd.w",
+  "llvm.mips.fcaf.d",
+  "llvm.mips.fcaf.w",
+  "llvm.mips.fceq.d",
+  "llvm.mips.fceq.w",
+  "llvm.mips.fclass.d",
+  "llvm.mips.fclass.w",
+  "llvm.mips.fcle.d",
+  "llvm.mips.fcle.w",
+  "llvm.mips.fclt.d",
+  "llvm.mips.fclt.w",
+  "llvm.mips.fcne.d",
+  "llvm.mips.fcne.w",
+  "llvm.mips.fcor.d",
+  "llvm.mips.fcor.w",
+  "llvm.mips.fcueq.d",
+  "llvm.mips.fcueq.w",
+  "llvm.mips.fcule.d",
+  "llvm.mips.fcule.w",
+  "llvm.mips.fcult.d",
+  "llvm.mips.fcult.w",
+  "llvm.mips.fcun.d",
+  "llvm.mips.fcun.w",
+  "llvm.mips.fcune.d",
+  "llvm.mips.fcune.w",
+  "llvm.mips.fdiv.d",
+  "llvm.mips.fdiv.w",
+  "llvm.mips.fexdo.h",
+  "llvm.mips.fexdo.w",
+  "llvm.mips.fexp2.d",
+  "llvm.mips.fexp2.w",
+  "llvm.mips.fexupl.d",
+  "llvm.mips.fexupl.w",
+  "llvm.mips.fexupr.d",
+  "llvm.mips.fexupr.w",
+  "llvm.mips.ffint.s.d",
+  "llvm.mips.ffint.s.w",
+  "llvm.mips.ffint.u.d",
+  "llvm.mips.ffint.u.w",
+  "llvm.mips.ffql.d",
+  "llvm.mips.ffql.w",
+  "llvm.mips.ffqr.d",
+  "llvm.mips.ffqr.w",
+  "llvm.mips.fill.b",
+  "llvm.mips.fill.d",
+  "llvm.mips.fill.h",
+  "llvm.mips.fill.w",
+  "llvm.mips.flog2.d",
+  "llvm.mips.flog2.w",
+  "llvm.mips.fmadd.d",
+  "llvm.mips.fmadd.w",
+  "llvm.mips.fmax.a.d",
+  "llvm.mips.fmax.a.w",
+  "llvm.mips.fmax.d",
+  "llvm.mips.fmax.w",
+  "llvm.mips.fmin.a.d",
+  "llvm.mips.fmin.a.w",
+  "llvm.mips.fmin.d",
+  "llvm.mips.fmin.w",
+  "llvm.mips.fmsub.d",
+  "llvm.mips.fmsub.w",
+  "llvm.mips.fmul.d",
+  "llvm.mips.fmul.w",
+  "llvm.mips.frcp.d",
+  "llvm.mips.frcp.w",
+  "llvm.mips.frint.d",
+  "llvm.mips.frint.w",
+  "llvm.mips.frsqrt.d",
+  "llvm.mips.frsqrt.w",
+  "llvm.mips.fsaf.d",
+  "llvm.mips.fsaf.w",
+  "llvm.mips.fseq.d",
+  "llvm.mips.fseq.w",
+  "llvm.mips.fsle.d",
+  "llvm.mips.fsle.w",
+  "llvm.mips.fslt.d",
+  "llvm.mips.fslt.w",
+  "llvm.mips.fsne.d",
+  "llvm.mips.fsne.w",
+  "llvm.mips.fsor.d",
+  "llvm.mips.fsor.w",
+  "llvm.mips.fsqrt.d",
+  "llvm.mips.fsqrt.w",
+  "llvm.mips.fsub.d",
+  "llvm.mips.fsub.w",
+  "llvm.mips.fsueq.d",
+  "llvm.mips.fsueq.w",
+  "llvm.mips.fsule.d",
+  "llvm.mips.fsule.w",
+  "llvm.mips.fsult.d",
+  "llvm.mips.fsult.w",
+  "llvm.mips.fsun.d",
+  "llvm.mips.fsun.w",
+  "llvm.mips.fsune.d",
+  "llvm.mips.fsune.w",
+  "llvm.mips.ftint.s.d",
+  "llvm.mips.ftint.s.w",
+  "llvm.mips.ftint.u.d",
+  "llvm.mips.ftint.u.w",
+  "llvm.mips.ftq.h",
+  "llvm.mips.ftq.w",
+  "llvm.mips.ftrunc.s.d",
+  "llvm.mips.ftrunc.s.w",
+  "llvm.mips.ftrunc.u.d",
+  "llvm.mips.ftrunc.u.w",
+  "llvm.mips.hadd.s.d",
+  "llvm.mips.hadd.s.h",
+  "llvm.mips.hadd.s.w",
+  "llvm.mips.hadd.u.d",
+  "llvm.mips.hadd.u.h",
+  "llvm.mips.hadd.u.w",
+  "llvm.mips.hsub.s.d",
+  "llvm.mips.hsub.s.h",
+  "llvm.mips.hsub.s.w",
+  "llvm.mips.hsub.u.d",
+  "llvm.mips.hsub.u.h",
+  "llvm.mips.hsub.u.w",
+  "llvm.mips.ilvev.b",
+  "llvm.mips.ilvev.d",
+  "llvm.mips.ilvev.h",
+  "llvm.mips.ilvev.w",
+  "llvm.mips.ilvl.b",
+  "llvm.mips.ilvl.d",
+  "llvm.mips.ilvl.h",
+  "llvm.mips.ilvl.w",
+  "llvm.mips.ilvod.b",
+  "llvm.mips.ilvod.d",
+  "llvm.mips.ilvod.h",
+  "llvm.mips.ilvod.w",
+  "llvm.mips.ilvr.b",
+  "llvm.mips.ilvr.d",
+  "llvm.mips.ilvr.h",
+  "llvm.mips.ilvr.w",
+  "llvm.mips.insert.b",
+  "llvm.mips.insert.d",
+  "llvm.mips.insert.h",
+  "llvm.mips.insert.w",
+  "llvm.mips.insv",
+  "llvm.mips.insve.b",
+  "llvm.mips.insve.d",
+  "llvm.mips.insve.h",
+  "llvm.mips.insve.w",
+  "llvm.mips.lbux",
+  "llvm.mips.ld.b",
+  "llvm.mips.ld.d",
+  "llvm.mips.ld.h",
+  "llvm.mips.ld.w",
+  "llvm.mips.ldi.b",
+  "llvm.mips.ldi.d",
+  "llvm.mips.ldi.h",
+  "llvm.mips.ldi.w",
+  "llvm.mips.lhx",
+  "llvm.mips.lsa",
+  "llvm.mips.lwx",
+  "llvm.mips.madd",
+  "llvm.mips.madd.q.h",
+  "llvm.mips.madd.q.w",
+  "llvm.mips.maddr.q.h",
+  "llvm.mips.maddr.q.w",
+  "llvm.mips.maddu",
+  "llvm.mips.maddv.b",
+  "llvm.mips.maddv.d",
+  "llvm.mips.maddv.h",
+  "llvm.mips.maddv.w",
+  "llvm.mips.maq.s.w.phl",
+  "llvm.mips.maq.s.w.phr",
+  "llvm.mips.maq.sa.w.phl",
+  "llvm.mips.maq.sa.w.phr",
+  "llvm.mips.max.a.b",
+  "llvm.mips.max.a.d",
+  "llvm.mips.max.a.h",
+  "llvm.mips.max.a.w",
+  "llvm.mips.max.s.b",
+  "llvm.mips.max.s.d",
+  "llvm.mips.max.s.h",
+  "llvm.mips.max.s.w",
+  "llvm.mips.max.u.b",
+  "llvm.mips.max.u.d",
+  "llvm.mips.max.u.h",
+  "llvm.mips.max.u.w",
+  "llvm.mips.maxi.s.b",
+  "llvm.mips.maxi.s.d",
+  "llvm.mips.maxi.s.h",
+  "llvm.mips.maxi.s.w",
+  "llvm.mips.maxi.u.b",
+  "llvm.mips.maxi.u.d",
+  "llvm.mips.maxi.u.h",
+  "llvm.mips.maxi.u.w",
+  "llvm.mips.min.a.b",
+  "llvm.mips.min.a.d",
+  "llvm.mips.min.a.h",
+  "llvm.mips.min.a.w",
+  "llvm.mips.min.s.b",
+  "llvm.mips.min.s.d",
+  "llvm.mips.min.s.h",
+  "llvm.mips.min.s.w",
+  "llvm.mips.min.u.b",
+  "llvm.mips.min.u.d",
+  "llvm.mips.min.u.h",
+  "llvm.mips.min.u.w",
+  "llvm.mips.mini.s.b",
+  "llvm.mips.mini.s.d",
+  "llvm.mips.mini.s.h",
+  "llvm.mips.mini.s.w",
+  "llvm.mips.mini.u.b",
+  "llvm.mips.mini.u.d",
+  "llvm.mips.mini.u.h",
+  "llvm.mips.mini.u.w",
+  "llvm.mips.mod.s.b",
+  "llvm.mips.mod.s.d",
+  "llvm.mips.mod.s.h",
+  "llvm.mips.mod.s.w",
+  "llvm.mips.mod.u.b",
+  "llvm.mips.mod.u.d",
+  "llvm.mips.mod.u.h",
+  "llvm.mips.mod.u.w",
+  "llvm.mips.modsub",
+  "llvm.mips.move.v",
+  "llvm.mips.msub",
+  "llvm.mips.msub.q.h",
+  "llvm.mips.msub.q.w",
+  "llvm.mips.msubr.q.h",
+  "llvm.mips.msubr.q.w",
+  "llvm.mips.msubu",
+  "llvm.mips.msubv.b",
+  "llvm.mips.msubv.d",
+  "llvm.mips.msubv.h",
+  "llvm.mips.msubv.w",
+  "llvm.mips.mthlip",
+  "llvm.mips.mul.ph",
+  "llvm.mips.mul.q.h",
+  "llvm.mips.mul.q.w",
+  "llvm.mips.mul.s.ph",
+  "llvm.mips.muleq.s.w.phl",
+  "llvm.mips.muleq.s.w.phr",
+  "llvm.mips.muleu.s.ph.qbl",
+  "llvm.mips.muleu.s.ph.qbr",
+  "llvm.mips.mulq.rs.ph",
+  "llvm.mips.mulq.rs.w",
+  "llvm.mips.mulq.s.ph",
+  "llvm.mips.mulq.s.w",
+  "llvm.mips.mulr.q.h",
+  "llvm.mips.mulr.q.w",
+  "llvm.mips.mulsa.w.ph",
+  "llvm.mips.mulsaq.s.w.ph",
+  "llvm.mips.mult",
+  "llvm.mips.multu",
+  "llvm.mips.mulv.b",
+  "llvm.mips.mulv.d",
+  "llvm.mips.mulv.h",
+  "llvm.mips.mulv.w",
+  "llvm.mips.nloc.b",
+  "llvm.mips.nloc.d",
+  "llvm.mips.nloc.h",
+  "llvm.mips.nloc.w",
+  "llvm.mips.nlzc.b",
+  "llvm.mips.nlzc.d",
+  "llvm.mips.nlzc.h",
+  "llvm.mips.nlzc.w",
+  "llvm.mips.nor.v",
+  "llvm.mips.nori.b",
+  "llvm.mips.or.v",
+  "llvm.mips.ori.b",
+  "llvm.mips.packrl.ph",
+  "llvm.mips.pckev.b",
+  "llvm.mips.pckev.d",
+  "llvm.mips.pckev.h",
+  "llvm.mips.pckev.w",
+  "llvm.mips.pckod.b",
+  "llvm.mips.pckod.d",
+  "llvm.mips.pckod.h",
+  "llvm.mips.pckod.w",
+  "llvm.mips.pcnt.b",
+  "llvm.mips.pcnt.d",
+  "llvm.mips.pcnt.h",
+  "llvm.mips.pcnt.w",
+  "llvm.mips.pick.ph",
+  "llvm.mips.pick.qb",
+  "llvm.mips.preceq.w.phl",
+  "llvm.mips.preceq.w.phr",
+  "llvm.mips.precequ.ph.qbl",
+  "llvm.mips.precequ.ph.qbla",
+  "llvm.mips.precequ.ph.qbr",
+  "llvm.mips.precequ.ph.qbra",
+  "llvm.mips.preceu.ph.qbl",
+  "llvm.mips.preceu.ph.qbla",
+  "llvm.mips.preceu.ph.qbr",
+  "llvm.mips.preceu.ph.qbra",
+  "llvm.mips.precr.qb.ph",
+  "llvm.mips.precr.sra.ph.w",
+  "llvm.mips.precr.sra.r.ph.w",
+  "llvm.mips.precrq.ph.w",
+  "llvm.mips.precrq.qb.ph",
+  "llvm.mips.precrq.rs.ph.w",
+  "llvm.mips.precrqu.s.qb.ph",
+  "llvm.mips.prepend",
+  "llvm.mips.raddu.w.qb",
+  "llvm.mips.rddsp",
+  "llvm.mips.repl.ph",
+  "llvm.mips.repl.qb",
+  "llvm.mips.sat.s.b",
+  "llvm.mips.sat.s.d",
+  "llvm.mips.sat.s.h",
+  "llvm.mips.sat.s.w",
+  "llvm.mips.sat.u.b",
+  "llvm.mips.sat.u.d",
+  "llvm.mips.sat.u.h",
+  "llvm.mips.sat.u.w",
+  "llvm.mips.shf.b",
+  "llvm.mips.shf.h",
+  "llvm.mips.shf.w",
+  "llvm.mips.shilo",
+  "llvm.mips.shll.ph",
+  "llvm.mips.shll.qb",
+  "llvm.mips.shll.s.ph",
+  "llvm.mips.shll.s.w",
+  "llvm.mips.shra.ph",
+  "llvm.mips.shra.qb",
+  "llvm.mips.shra.r.ph",
+  "llvm.mips.shra.r.qb",
+  "llvm.mips.shra.r.w",
+  "llvm.mips.shrl.ph",
+  "llvm.mips.shrl.qb",
+  "llvm.mips.sld.b",
+  "llvm.mips.sld.d",
+  "llvm.mips.sld.h",
+  "llvm.mips.sld.w",
+  "llvm.mips.sldi.b",
+  "llvm.mips.sldi.d",
+  "llvm.mips.sldi.h",
+  "llvm.mips.sldi.w",
+  "llvm.mips.sll.b",
+  "llvm.mips.sll.d",
+  "llvm.mips.sll.h",
+  "llvm.mips.sll.w",
+  "llvm.mips.slli.b",
+  "llvm.mips.slli.d",
+  "llvm.mips.slli.h",
+  "llvm.mips.slli.w",
+  "llvm.mips.splat.b",
+  "llvm.mips.splat.d",
+  "llvm.mips.splat.h",
+  "llvm.mips.splat.w",
+  "llvm.mips.splati.b",
+  "llvm.mips.splati.d",
+  "llvm.mips.splati.h",
+  "llvm.mips.splati.w",
+  "llvm.mips.sra.b",
+  "llvm.mips.sra.d",
+  "llvm.mips.sra.h",
+  "llvm.mips.sra.w",
+  "llvm.mips.srai.b",
+  "llvm.mips.srai.d",
+  "llvm.mips.srai.h",
+  "llvm.mips.srai.w",
+  "llvm.mips.srar.b",
+  "llvm.mips.srar.d",
+  "llvm.mips.srar.h",
+  "llvm.mips.srar.w",
+  "llvm.mips.srari.b",
+  "llvm.mips.srari.d",
+  "llvm.mips.srari.h",
+  "llvm.mips.srari.w",
+  "llvm.mips.srl.b",
+  "llvm.mips.srl.d",
+  "llvm.mips.srl.h",
+  "llvm.mips.srl.w",
+  "llvm.mips.srli.b",
+  "llvm.mips.srli.d",
+  "llvm.mips.srli.h",
+  "llvm.mips.srli.w",
+  "llvm.mips.srlr.b",
+  "llvm.mips.srlr.d",
+  "llvm.mips.srlr.h",
+  "llvm.mips.srlr.w",
+  "llvm.mips.srlri.b",
+  "llvm.mips.srlri.d",
+  "llvm.mips.srlri.h",
+  "llvm.mips.srlri.w",
+  "llvm.mips.st.b",
+  "llvm.mips.st.d",
+  "llvm.mips.st.h",
+  "llvm.mips.st.w",
+  "llvm.mips.subq.ph",
+  "llvm.mips.subq.s.ph",
+  "llvm.mips.subq.s.w",
+  "llvm.mips.subqh.ph",
+  "llvm.mips.subqh.r.ph",
+  "llvm.mips.subqh.r.w",
+  "llvm.mips.subqh.w",
+  "llvm.mips.subs.s.b",
+  "llvm.mips.subs.s.d",
+  "llvm.mips.subs.s.h",
+  "llvm.mips.subs.s.w",
+  "llvm.mips.subs.u.b",
+  "llvm.mips.subs.u.d",
+  "llvm.mips.subs.u.h",
+  "llvm.mips.subs.u.w",
+  "llvm.mips.subsus.u.b",
+  "llvm.mips.subsus.u.d",
+  "llvm.mips.subsus.u.h",
+  "llvm.mips.subsus.u.w",
+  "llvm.mips.subsuu.s.b",
+  "llvm.mips.subsuu.s.d",
+  "llvm.mips.subsuu.s.h",
+  "llvm.mips.subsuu.s.w",
+  "llvm.mips.subu.ph",
+  "llvm.mips.subu.qb",
+  "llvm.mips.subu.s.ph",
+  "llvm.mips.subu.s.qb",
+  "llvm.mips.subuh.qb",
+  "llvm.mips.subuh.r.qb",
+  "llvm.mips.subv.b",
+  "llvm.mips.subv.d",
+  "llvm.mips.subv.h",
+  "llvm.mips.subv.w",
+  "llvm.mips.subvi.b",
+  "llvm.mips.subvi.d",
+  "llvm.mips.subvi.h",
+  "llvm.mips.subvi.w",
+  "llvm.mips.vshf.b",
+  "llvm.mips.vshf.d",
+  "llvm.mips.vshf.h",
+  "llvm.mips.vshf.w",
+  "llvm.mips.wrdsp",
+  "llvm.mips.xor.v",
+  "llvm.mips.xori.b",
+  "llvm.nvvm.add.rm.d",
+  "llvm.nvvm.add.rm.f",
+  "llvm.nvvm.add.rm.ftz.f",
+  "llvm.nvvm.add.rn.d",
+  "llvm.nvvm.add.rn.f",
+  "llvm.nvvm.add.rn.ftz.f",
+  "llvm.nvvm.add.rp.d",
+  "llvm.nvvm.add.rp.f",
+  "llvm.nvvm.add.rp.ftz.f",
+  "llvm.nvvm.add.rz.d",
+  "llvm.nvvm.add.rz.f",
+  "llvm.nvvm.add.rz.ftz.f",
+  "llvm.nvvm.atomic.add.gen.f.cta",
+  "llvm.nvvm.atomic.add.gen.f.sys",
+  "llvm.nvvm.atomic.add.gen.i.cta",
+  "llvm.nvvm.atomic.add.gen.i.sys",
+  "llvm.nvvm.atomic.and.gen.i.cta",
+  "llvm.nvvm.atomic.and.gen.i.sys",
+  "llvm.nvvm.atomic.cas.gen.i.cta",
+  "llvm.nvvm.atomic.cas.gen.i.sys",
+  "llvm.nvvm.atomic.dec.gen.i.cta",
+  "llvm.nvvm.atomic.dec.gen.i.sys",
+  "llvm.nvvm.atomic.exch.gen.i.cta",
+  "llvm.nvvm.atomic.exch.gen.i.sys",
+  "llvm.nvvm.atomic.inc.gen.i.cta",
+  "llvm.nvvm.atomic.inc.gen.i.sys",
+  "llvm.nvvm.atomic.load.add.f32",
+  "llvm.nvvm.atomic.load.add.f64",
+  "llvm.nvvm.atomic.load.dec.32",
+  "llvm.nvvm.atomic.load.inc.32",
+  "llvm.nvvm.atomic.max.gen.i.cta",
+  "llvm.nvvm.atomic.max.gen.i.sys",
+  "llvm.nvvm.atomic.min.gen.i.cta",
+  "llvm.nvvm.atomic.min.gen.i.sys",
+  "llvm.nvvm.atomic.or.gen.i.cta",
+  "llvm.nvvm.atomic.or.gen.i.sys",
+  "llvm.nvvm.atomic.xor.gen.i.cta",
+  "llvm.nvvm.atomic.xor.gen.i.sys",
+  "llvm.nvvm.bar.sync",
+  "llvm.nvvm.bar.warp.sync",
+  "llvm.nvvm.barrier",
+  "llvm.nvvm.barrier.n",
+  "llvm.nvvm.barrier.sync",
+  "llvm.nvvm.barrier.sync.cnt",
+  "llvm.nvvm.barrier0",
+  "llvm.nvvm.barrier0.and",
+  "llvm.nvvm.barrier0.or",
+  "llvm.nvvm.barrier0.popc",
+  "llvm.nvvm.bitcast.d2ll",
+  "llvm.nvvm.bitcast.f2i",
+  "llvm.nvvm.bitcast.i2f",
+  "llvm.nvvm.bitcast.ll2d",
+  "llvm.nvvm.ceil.d",
+  "llvm.nvvm.ceil.f",
+  "llvm.nvvm.ceil.ftz.f",
+  "llvm.nvvm.compiler.error",
+  "llvm.nvvm.compiler.warn",
+  "llvm.nvvm.cos.approx.f",
+  "llvm.nvvm.cos.approx.ftz.f",
+  "llvm.nvvm.d2f.rm",
+  "llvm.nvvm.d2f.rm.ftz",
+  "llvm.nvvm.d2f.rn",
+  "llvm.nvvm.d2f.rn.ftz",
+  "llvm.nvvm.d2f.rp",
+  "llvm.nvvm.d2f.rp.ftz",
+  "llvm.nvvm.d2f.rz",
+  "llvm.nvvm.d2f.rz.ftz",
+  "llvm.nvvm.d2i.hi",
+  "llvm.nvvm.d2i.lo",
+  "llvm.nvvm.d2i.rm",
+  "llvm.nvvm.d2i.rn",
+  "llvm.nvvm.d2i.rp",
+  "llvm.nvvm.d2i.rz",
+  "llvm.nvvm.d2ll.rm",
+  "llvm.nvvm.d2ll.rn",
+  "llvm.nvvm.d2ll.rp",
+  "llvm.nvvm.d2ll.rz",
+  "llvm.nvvm.d2ui.rm",
+  "llvm.nvvm.d2ui.rn",
+  "llvm.nvvm.d2ui.rp",
+  "llvm.nvvm.d2ui.rz",
+  "llvm.nvvm.d2ull.rm",
+  "llvm.nvvm.d2ull.rn",
+  "llvm.nvvm.d2ull.rp",
+  "llvm.nvvm.d2ull.rz",
+  "llvm.nvvm.div.approx.f",
+  "llvm.nvvm.div.approx.ftz.f",
+  "llvm.nvvm.div.rm.d",
+  "llvm.nvvm.div.rm.f",
+  "llvm.nvvm.div.rm.ftz.f",
+  "llvm.nvvm.div.rn.d",
+  "llvm.nvvm.div.rn.f",
+  "llvm.nvvm.div.rn.ftz.f",
+  "llvm.nvvm.div.rp.d",
+  "llvm.nvvm.div.rp.f",
+  "llvm.nvvm.div.rp.ftz.f",
+  "llvm.nvvm.div.rz.d",
+  "llvm.nvvm.div.rz.f",
+  "llvm.nvvm.div.rz.ftz.f",
+  "llvm.nvvm.ex2.approx.d",
+  "llvm.nvvm.ex2.approx.f",
+  "llvm.nvvm.ex2.approx.ftz.f",
+  "llvm.nvvm.f2h.rn",
+  "llvm.nvvm.f2h.rn.ftz",
+  "llvm.nvvm.f2i.rm",
+  "llvm.nvvm.f2i.rm.ftz",
+  "llvm.nvvm.f2i.rn",
+  "llvm.nvvm.f2i.rn.ftz",
+  "llvm.nvvm.f2i.rp",
+  "llvm.nvvm.f2i.rp.ftz",
+  "llvm.nvvm.f2i.rz",
+  "llvm.nvvm.f2i.rz.ftz",
+  "llvm.nvvm.f2ll.rm",
+  "llvm.nvvm.f2ll.rm.ftz",
+  "llvm.nvvm.f2ll.rn",
+  "llvm.nvvm.f2ll.rn.ftz",
+  "llvm.nvvm.f2ll.rp",
+  "llvm.nvvm.f2ll.rp.ftz",
+  "llvm.nvvm.f2ll.rz",
+  "llvm.nvvm.f2ll.rz.ftz",
+  "llvm.nvvm.f2ui.rm",
+  "llvm.nvvm.f2ui.rm.ftz",
+  "llvm.nvvm.f2ui.rn",
+  "llvm.nvvm.f2ui.rn.ftz",
+  "llvm.nvvm.f2ui.rp",
+  "llvm.nvvm.f2ui.rp.ftz",
+  "llvm.nvvm.f2ui.rz",
+  "llvm.nvvm.f2ui.rz.ftz",
+  "llvm.nvvm.f2ull.rm",
+  "llvm.nvvm.f2ull.rm.ftz",
+  "llvm.nvvm.f2ull.rn",
+  "llvm.nvvm.f2ull.rn.ftz",
+  "llvm.nvvm.f2ull.rp",
+  "llvm.nvvm.f2ull.rp.ftz",
+  "llvm.nvvm.f2ull.rz",
+  "llvm.nvvm.f2ull.rz.ftz",
+  "llvm.nvvm.fabs.d",
+  "llvm.nvvm.fabs.f",
+  "llvm.nvvm.fabs.ftz.f",
+  "llvm.nvvm.floor.d",
+  "llvm.nvvm.floor.f",
+  "llvm.nvvm.floor.ftz.f",
+  "llvm.nvvm.fma.rm.d",
+  "llvm.nvvm.fma.rm.f",
+  "llvm.nvvm.fma.rm.ftz.f",
+  "llvm.nvvm.fma.rn.d",
+  "llvm.nvvm.fma.rn.f",
+  "llvm.nvvm.fma.rn.ftz.f",
+  "llvm.nvvm.fma.rp.d",
+  "llvm.nvvm.fma.rp.f",
+  "llvm.nvvm.fma.rp.ftz.f",
+  "llvm.nvvm.fma.rz.d",
+  "llvm.nvvm.fma.rz.f",
+  "llvm.nvvm.fma.rz.ftz.f",
+  "llvm.nvvm.fmax.d",
+  "llvm.nvvm.fmax.f",
+  "llvm.nvvm.fmax.ftz.f",
+  "llvm.nvvm.fmin.d",
+  "llvm.nvvm.fmin.f",
+  "llvm.nvvm.fmin.ftz.f",
+  "llvm.nvvm.fns",
+  "llvm.nvvm.i2d.rm",
+  "llvm.nvvm.i2d.rn",
+  "llvm.nvvm.i2d.rp",
+  "llvm.nvvm.i2d.rz",
+  "llvm.nvvm.i2f.rm",
+  "llvm.nvvm.i2f.rn",
+  "llvm.nvvm.i2f.rp",
+  "llvm.nvvm.i2f.rz",
+  "llvm.nvvm.isspacep.const",
+  "llvm.nvvm.isspacep.global",
+  "llvm.nvvm.isspacep.local",
+  "llvm.nvvm.isspacep.shared",
+  "llvm.nvvm.istypep.sampler",
+  "llvm.nvvm.istypep.surface",
+  "llvm.nvvm.istypep.texture",
+  "llvm.nvvm.ldg.global.f",
+  "llvm.nvvm.ldg.global.i",
+  "llvm.nvvm.ldg.global.p",
+  "llvm.nvvm.ldu.global.f",
+  "llvm.nvvm.ldu.global.i",
+  "llvm.nvvm.ldu.global.p",
+  "llvm.nvvm.lg2.approx.d",
+  "llvm.nvvm.lg2.approx.f",
+  "llvm.nvvm.lg2.approx.ftz.f",
+  "llvm.nvvm.ll2d.rm",
+  "llvm.nvvm.ll2d.rn",
+  "llvm.nvvm.ll2d.rp",
+  "llvm.nvvm.ll2d.rz",
+  "llvm.nvvm.ll2f.rm",
+  "llvm.nvvm.ll2f.rn",
+  "llvm.nvvm.ll2f.rp",
+  "llvm.nvvm.ll2f.rz",
+  "llvm.nvvm.lohi.i2d",
+  "llvm.nvvm.match.all.sync.i32p",
+  "llvm.nvvm.match.all.sync.i64p",
+  "llvm.nvvm.match.any.sync.i32",
+  "llvm.nvvm.match.any.sync.i64",
+  "llvm.nvvm.membar.cta",
+  "llvm.nvvm.membar.gl",
+  "llvm.nvvm.membar.sys",
+  "llvm.nvvm.move.double",
+  "llvm.nvvm.move.float",
+  "llvm.nvvm.move.i16",
+  "llvm.nvvm.move.i32",
+  "llvm.nvvm.move.i64",
+  "llvm.nvvm.move.ptr",
+  "llvm.nvvm.mul.rm.d",
+  "llvm.nvvm.mul.rm.f",
+  "llvm.nvvm.mul.rm.ftz.f",
+  "llvm.nvvm.mul.rn.d",
+  "llvm.nvvm.mul.rn.f",
+  "llvm.nvvm.mul.rn.ftz.f",
+  "llvm.nvvm.mul.rp.d",
+  "llvm.nvvm.mul.rp.f",
+  "llvm.nvvm.mul.rp.ftz.f",
+  "llvm.nvvm.mul.rz.d",
+  "llvm.nvvm.mul.rz.f",
+  "llvm.nvvm.mul.rz.ftz.f",
+  "llvm.nvvm.mul24.i",
+  "llvm.nvvm.mul24.ui",
+  "llvm.nvvm.mulhi.i",
+  "llvm.nvvm.mulhi.ll",
+  "llvm.nvvm.mulhi.ui",
+  "llvm.nvvm.mulhi.ull",
+  "llvm.nvvm.prmt",
+  "llvm.nvvm.ptr.constant.to.gen",
+  "llvm.nvvm.ptr.gen.to.constant",
+  "llvm.nvvm.ptr.gen.to.global",
+  "llvm.nvvm.ptr.gen.to.local",
+  "llvm.nvvm.ptr.gen.to.param",
+  "llvm.nvvm.ptr.gen.to.shared",
+  "llvm.nvvm.ptr.global.to.gen",
+  "llvm.nvvm.ptr.local.to.gen",
+  "llvm.nvvm.ptr.shared.to.gen",
+  "llvm.nvvm.rcp.approx.ftz.d",
+  "llvm.nvvm.rcp.rm.d",
+  "llvm.nvvm.rcp.rm.f",
+  "llvm.nvvm.rcp.rm.ftz.f",
+  "llvm.nvvm.rcp.rn.d",
+  "llvm.nvvm.rcp.rn.f",
+  "llvm.nvvm.rcp.rn.ftz.f",
+  "llvm.nvvm.rcp.rp.d",
+  "llvm.nvvm.rcp.rp.f",
+  "llvm.nvvm.rcp.rp.ftz.f",
+  "llvm.nvvm.rcp.rz.d",
+  "llvm.nvvm.rcp.rz.f",
+  "llvm.nvvm.rcp.rz.ftz.f",
+  "llvm.nvvm.read.ptx.sreg.clock",
+  "llvm.nvvm.read.ptx.sreg.clock64",
+  "llvm.nvvm.read.ptx.sreg.ctaid.w",
+  "llvm.nvvm.read.ptx.sreg.ctaid.x",
+  "llvm.nvvm.read.ptx.sreg.ctaid.y",
+  "llvm.nvvm.read.ptx.sreg.ctaid.z",
+  "llvm.nvvm.read.ptx.sreg.envreg0",
+  "llvm.nvvm.read.ptx.sreg.envreg1",
+  "llvm.nvvm.read.ptx.sreg.envreg10",
+  "llvm.nvvm.read.ptx.sreg.envreg11",
+  "llvm.nvvm.read.ptx.sreg.envreg12",
+  "llvm.nvvm.read.ptx.sreg.envreg13",
+  "llvm.nvvm.read.ptx.sreg.envreg14",
+  "llvm.nvvm.read.ptx.sreg.envreg15",
+  "llvm.nvvm.read.ptx.sreg.envreg16",
+  "llvm.nvvm.read.ptx.sreg.envreg17",
+  "llvm.nvvm.read.ptx.sreg.envreg18",
+  "llvm.nvvm.read.ptx.sreg.envreg19",
+  "llvm.nvvm.read.ptx.sreg.envreg2",
+  "llvm.nvvm.read.ptx.sreg.envreg20",
+  "llvm.nvvm.read.ptx.sreg.envreg21",
+  "llvm.nvvm.read.ptx.sreg.envreg22",
+  "llvm.nvvm.read.ptx.sreg.envreg23",
+  "llvm.nvvm.read.ptx.sreg.envreg24",
+  "llvm.nvvm.read.ptx.sreg.envreg25",
+  "llvm.nvvm.read.ptx.sreg.envreg26",
+  "llvm.nvvm.read.ptx.sreg.envreg27",
+  "llvm.nvvm.read.ptx.sreg.envreg28",
+  "llvm.nvvm.read.ptx.sreg.envreg29",
+  "llvm.nvvm.read.ptx.sreg.envreg3",
+  "llvm.nvvm.read.ptx.sreg.envreg30",
+  "llvm.nvvm.read.ptx.sreg.envreg31",
+  "llvm.nvvm.read.ptx.sreg.envreg4",
+  "llvm.nvvm.read.ptx.sreg.envreg5",
+  "llvm.nvvm.read.ptx.sreg.envreg6",
+  "llvm.nvvm.read.ptx.sreg.envreg7",
+  "llvm.nvvm.read.ptx.sreg.envreg8",
+  "llvm.nvvm.read.ptx.sreg.envreg9",
+  "llvm.nvvm.read.ptx.sreg.gridid",
+  "llvm.nvvm.read.ptx.sreg.laneid",
+  "llvm.nvvm.read.ptx.sreg.lanemask.eq",
+  "llvm.nvvm.read.ptx.sreg.lanemask.ge",
+  "llvm.nvvm.read.ptx.sreg.lanemask.gt",
+  "llvm.nvvm.read.ptx.sreg.lanemask.le",
+  "llvm.nvvm.read.ptx.sreg.lanemask.lt",
+  "llvm.nvvm.read.ptx.sreg.nctaid.w",
+  "llvm.nvvm.read.ptx.sreg.nctaid.x",
+  "llvm.nvvm.read.ptx.sreg.nctaid.y",
+  "llvm.nvvm.read.ptx.sreg.nctaid.z",
+  "llvm.nvvm.read.ptx.sreg.nsmid",
+  "llvm.nvvm.read.ptx.sreg.ntid.w",
+  "llvm.nvvm.read.ptx.sreg.ntid.x",
+  "llvm.nvvm.read.ptx.sreg.ntid.y",
+  "llvm.nvvm.read.ptx.sreg.ntid.z",
+  "llvm.nvvm.read.ptx.sreg.nwarpid",
+  "llvm.nvvm.read.ptx.sreg.pm0",
+  "llvm.nvvm.read.ptx.sreg.pm1",
+  "llvm.nvvm.read.ptx.sreg.pm2",
+  "llvm.nvvm.read.ptx.sreg.pm3",
+  "llvm.nvvm.read.ptx.sreg.smid",
+  "llvm.nvvm.read.ptx.sreg.tid.w",
+  "llvm.nvvm.read.ptx.sreg.tid.x",
+  "llvm.nvvm.read.ptx.sreg.tid.y",
+  "llvm.nvvm.read.ptx.sreg.tid.z",
+  "llvm.nvvm.read.ptx.sreg.warpid",
+  "llvm.nvvm.read.ptx.sreg.warpsize",
+  "llvm.nvvm.reflect",
+  "llvm.nvvm.rotate.b32",
+  "llvm.nvvm.rotate.b64",
+  "llvm.nvvm.rotate.right.b64",
+  "llvm.nvvm.round.d",
+  "llvm.nvvm.round.f",
+  "llvm.nvvm.round.ftz.f",
+  "llvm.nvvm.rsqrt.approx.d",
+  "llvm.nvvm.rsqrt.approx.f",
+  "llvm.nvvm.rsqrt.approx.ftz.f",
+  "llvm.nvvm.sad.i",
+  "llvm.nvvm.sad.ui",
+  "llvm.nvvm.saturate.d",
+  "llvm.nvvm.saturate.f",
+  "llvm.nvvm.saturate.ftz.f",
+  "llvm.nvvm.shfl.bfly.f32",
+  "llvm.nvvm.shfl.bfly.i32",
+  "llvm.nvvm.shfl.down.f32",
+  "llvm.nvvm.shfl.down.i32",
+  "llvm.nvvm.shfl.idx.f32",
+  "llvm.nvvm.shfl.idx.i32",
+  "llvm.nvvm.shfl.sync.bfly.f32",
+  "llvm.nvvm.shfl.sync.bfly.i32",
+  "llvm.nvvm.shfl.sync.down.f32",
+  "llvm.nvvm.shfl.sync.down.i32",
+  "llvm.nvvm.shfl.sync.idx.f32",
+  "llvm.nvvm.shfl.sync.idx.i32",
+  "llvm.nvvm.shfl.sync.up.f32",
+  "llvm.nvvm.shfl.sync.up.i32",
+  "llvm.nvvm.shfl.up.f32",
+  "llvm.nvvm.shfl.up.i32",
+  "llvm.nvvm.sin.approx.f",
+  "llvm.nvvm.sin.approx.ftz.f",
+  "llvm.nvvm.sqrt.approx.f",
+  "llvm.nvvm.sqrt.approx.ftz.f",
+  "llvm.nvvm.sqrt.f",
+  "llvm.nvvm.sqrt.rm.d",
+  "llvm.nvvm.sqrt.rm.f",
+  "llvm.nvvm.sqrt.rm.ftz.f",
+  "llvm.nvvm.sqrt.rn.d",
+  "llvm.nvvm.sqrt.rn.f",
+  "llvm.nvvm.sqrt.rn.ftz.f",
+  "llvm.nvvm.sqrt.rp.d",
+  "llvm.nvvm.sqrt.rp.f",
+  "llvm.nvvm.sqrt.rp.ftz.f",
+  "llvm.nvvm.sqrt.rz.d",
+  "llvm.nvvm.sqrt.rz.f",
+  "llvm.nvvm.sqrt.rz.ftz.f",
+  "llvm.nvvm.suld.1d.array.i16.clamp",
+  "llvm.nvvm.suld.1d.array.i16.trap",
+  "llvm.nvvm.suld.1d.array.i16.zero",
+  "llvm.nvvm.suld.1d.array.i32.clamp",
+  "llvm.nvvm.suld.1d.array.i32.trap",
+  "llvm.nvvm.suld.1d.array.i32.zero",
+  "llvm.nvvm.suld.1d.array.i64.clamp",
+  "llvm.nvvm.suld.1d.array.i64.trap",
+  "llvm.nvvm.suld.1d.array.i64.zero",
+  "llvm.nvvm.suld.1d.array.i8.clamp",
+  "llvm.nvvm.suld.1d.array.i8.trap",
+  "llvm.nvvm.suld.1d.array.i8.zero",
+  "llvm.nvvm.suld.1d.array.v2i16.clamp",
+  "llvm.nvvm.suld.1d.array.v2i16.trap",
+  "llvm.nvvm.suld.1d.array.v2i16.zero",
+  "llvm.nvvm.suld.1d.array.v2i32.clamp",
+  "llvm.nvvm.suld.1d.array.v2i32.trap",
+  "llvm.nvvm.suld.1d.array.v2i32.zero",
+  "llvm.nvvm.suld.1d.array.v2i64.clamp",
+  "llvm.nvvm.suld.1d.array.v2i64.trap",
+  "llvm.nvvm.suld.1d.array.v2i64.zero",
+  "llvm.nvvm.suld.1d.array.v2i8.clamp",
+  "llvm.nvvm.suld.1d.array.v2i8.trap",
+  "llvm.nvvm.suld.1d.array.v2i8.zero",
+  "llvm.nvvm.suld.1d.array.v4i16.clamp",
+  "llvm.nvvm.suld.1d.array.v4i16.trap",
+  "llvm.nvvm.suld.1d.array.v4i16.zero",
+  "llvm.nvvm.suld.1d.array.v4i32.clamp",
+  "llvm.nvvm.suld.1d.array.v4i32.trap",
+  "llvm.nvvm.suld.1d.array.v4i32.zero",
+  "llvm.nvvm.suld.1d.array.v4i8.clamp",
+  "llvm.nvvm.suld.1d.array.v4i8.trap",
+  "llvm.nvvm.suld.1d.array.v4i8.zero",
+  "llvm.nvvm.suld.1d.i16.clamp",
+  "llvm.nvvm.suld.1d.i16.trap",
+  "llvm.nvvm.suld.1d.i16.zero",
+  "llvm.nvvm.suld.1d.i32.clamp",
+  "llvm.nvvm.suld.1d.i32.trap",
+  "llvm.nvvm.suld.1d.i32.zero",
+  "llvm.nvvm.suld.1d.i64.clamp",
+  "llvm.nvvm.suld.1d.i64.trap",
+  "llvm.nvvm.suld.1d.i64.zero",
+  "llvm.nvvm.suld.1d.i8.clamp",
+  "llvm.nvvm.suld.1d.i8.trap",
+  "llvm.nvvm.suld.1d.i8.zero",
+  "llvm.nvvm.suld.1d.v2i16.clamp",
+  "llvm.nvvm.suld.1d.v2i16.trap",
+  "llvm.nvvm.suld.1d.v2i16.zero",
+  "llvm.nvvm.suld.1d.v2i32.clamp",
+  "llvm.nvvm.suld.1d.v2i32.trap",
+  "llvm.nvvm.suld.1d.v2i32.zero",
+  "llvm.nvvm.suld.1d.v2i64.clamp",
+  "llvm.nvvm.suld.1d.v2i64.trap",
+  "llvm.nvvm.suld.1d.v2i64.zero",
+  "llvm.nvvm.suld.1d.v2i8.clamp",
+  "llvm.nvvm.suld.1d.v2i8.trap",
+  "llvm.nvvm.suld.1d.v2i8.zero",
+  "llvm.nvvm.suld.1d.v4i16.clamp",
+  "llvm.nvvm.suld.1d.v4i16.trap",
+  "llvm.nvvm.suld.1d.v4i16.zero",
+  "llvm.nvvm.suld.1d.v4i32.clamp",
+  "llvm.nvvm.suld.1d.v4i32.trap",
+  "llvm.nvvm.suld.1d.v4i32.zero",
+  "llvm.nvvm.suld.1d.v4i8.clamp",
+  "llvm.nvvm.suld.1d.v4i8.trap",
+  "llvm.nvvm.suld.1d.v4i8.zero",
+  "llvm.nvvm.suld.2d.array.i16.clamp",
+  "llvm.nvvm.suld.2d.array.i16.trap",
+  "llvm.nvvm.suld.2d.array.i16.zero",
+  "llvm.nvvm.suld.2d.array.i32.clamp",
+  "llvm.nvvm.suld.2d.array.i32.trap",
+  "llvm.nvvm.suld.2d.array.i32.zero",
+  "llvm.nvvm.suld.2d.array.i64.clamp",
+  "llvm.nvvm.suld.2d.array.i64.trap",
+  "llvm.nvvm.suld.2d.array.i64.zero",
+  "llvm.nvvm.suld.2d.array.i8.clamp",
+  "llvm.nvvm.suld.2d.array.i8.trap",
+  "llvm.nvvm.suld.2d.array.i8.zero",
+  "llvm.nvvm.suld.2d.array.v2i16.clamp",
+  "llvm.nvvm.suld.2d.array.v2i16.trap",
+  "llvm.nvvm.suld.2d.array.v2i16.zero",
+  "llvm.nvvm.suld.2d.array.v2i32.clamp",
+  "llvm.nvvm.suld.2d.array.v2i32.trap",
+  "llvm.nvvm.suld.2d.array.v2i32.zero",
+  "llvm.nvvm.suld.2d.array.v2i64.clamp",
+  "llvm.nvvm.suld.2d.array.v2i64.trap",
+  "llvm.nvvm.suld.2d.array.v2i64.zero",
+  "llvm.nvvm.suld.2d.array.v2i8.clamp",
+  "llvm.nvvm.suld.2d.array.v2i8.trap",
+  "llvm.nvvm.suld.2d.array.v2i8.zero",
+  "llvm.nvvm.suld.2d.array.v4i16.clamp",
+  "llvm.nvvm.suld.2d.array.v4i16.trap",
+  "llvm.nvvm.suld.2d.array.v4i16.zero",
+  "llvm.nvvm.suld.2d.array.v4i32.clamp",
+  "llvm.nvvm.suld.2d.array.v4i32.trap",
+  "llvm.nvvm.suld.2d.array.v4i32.zero",
+  "llvm.nvvm.suld.2d.array.v4i8.clamp",
+  "llvm.nvvm.suld.2d.array.v4i8.trap",
+  "llvm.nvvm.suld.2d.array.v4i8.zero",
+  "llvm.nvvm.suld.2d.i16.clamp",
+  "llvm.nvvm.suld.2d.i16.trap",
+  "llvm.nvvm.suld.2d.i16.zero",
+  "llvm.nvvm.suld.2d.i32.clamp",
+  "llvm.nvvm.suld.2d.i32.trap",
+  "llvm.nvvm.suld.2d.i32.zero",
+  "llvm.nvvm.suld.2d.i64.clamp",
+  "llvm.nvvm.suld.2d.i64.trap",
+  "llvm.nvvm.suld.2d.i64.zero",
+  "llvm.nvvm.suld.2d.i8.clamp",
+  "llvm.nvvm.suld.2d.i8.trap",
+  "llvm.nvvm.suld.2d.i8.zero",
+  "llvm.nvvm.suld.2d.v2i16.clamp",
+  "llvm.nvvm.suld.2d.v2i16.trap",
+  "llvm.nvvm.suld.2d.v2i16.zero",
+  "llvm.nvvm.suld.2d.v2i32.clamp",
+  "llvm.nvvm.suld.2d.v2i32.trap",
+  "llvm.nvvm.suld.2d.v2i32.zero",
+  "llvm.nvvm.suld.2d.v2i64.clamp",
+  "llvm.nvvm.suld.2d.v2i64.trap",
+  "llvm.nvvm.suld.2d.v2i64.zero",
+  "llvm.nvvm.suld.2d.v2i8.clamp",
+  "llvm.nvvm.suld.2d.v2i8.trap",
+  "llvm.nvvm.suld.2d.v2i8.zero",
+  "llvm.nvvm.suld.2d.v4i16.clamp",
+  "llvm.nvvm.suld.2d.v4i16.trap",
+  "llvm.nvvm.suld.2d.v4i16.zero",
+  "llvm.nvvm.suld.2d.v4i32.clamp",
+  "llvm.nvvm.suld.2d.v4i32.trap",
+  "llvm.nvvm.suld.2d.v4i32.zero",
+  "llvm.nvvm.suld.2d.v4i8.clamp",
+  "llvm.nvvm.suld.2d.v4i8.trap",
+  "llvm.nvvm.suld.2d.v4i8.zero",
+  "llvm.nvvm.suld.3d.i16.clamp",
+  "llvm.nvvm.suld.3d.i16.trap",
+  "llvm.nvvm.suld.3d.i16.zero",
+  "llvm.nvvm.suld.3d.i32.clamp",
+  "llvm.nvvm.suld.3d.i32.trap",
+  "llvm.nvvm.suld.3d.i32.zero",
+  "llvm.nvvm.suld.3d.i64.clamp",
+  "llvm.nvvm.suld.3d.i64.trap",
+  "llvm.nvvm.suld.3d.i64.zero",
+  "llvm.nvvm.suld.3d.i8.clamp",
+  "llvm.nvvm.suld.3d.i8.trap",
+  "llvm.nvvm.suld.3d.i8.zero",
+  "llvm.nvvm.suld.3d.v2i16.clamp",
+  "llvm.nvvm.suld.3d.v2i16.trap",
+  "llvm.nvvm.suld.3d.v2i16.zero",
+  "llvm.nvvm.suld.3d.v2i32.clamp",
+  "llvm.nvvm.suld.3d.v2i32.trap",
+  "llvm.nvvm.suld.3d.v2i32.zero",
+  "llvm.nvvm.suld.3d.v2i64.clamp",
+  "llvm.nvvm.suld.3d.v2i64.trap",
+  "llvm.nvvm.suld.3d.v2i64.zero",
+  "llvm.nvvm.suld.3d.v2i8.clamp",
+  "llvm.nvvm.suld.3d.v2i8.trap",
+  "llvm.nvvm.suld.3d.v2i8.zero",
+  "llvm.nvvm.suld.3d.v4i16.clamp",
+  "llvm.nvvm.suld.3d.v4i16.trap",
+  "llvm.nvvm.suld.3d.v4i16.zero",
+  "llvm.nvvm.suld.3d.v4i32.clamp",
+  "llvm.nvvm.suld.3d.v4i32.trap",
+  "llvm.nvvm.suld.3d.v4i32.zero",
+  "llvm.nvvm.suld.3d.v4i8.clamp",
+  "llvm.nvvm.suld.3d.v4i8.trap",
+  "llvm.nvvm.suld.3d.v4i8.zero",
+  "llvm.nvvm.suq.array.size",
+  "llvm.nvvm.suq.channel.data.type",
+  "llvm.nvvm.suq.channel.order",
+  "llvm.nvvm.suq.depth",
+  "llvm.nvvm.suq.height",
+  "llvm.nvvm.suq.width",
+  "llvm.nvvm.sust.b.1d.array.i16.clamp",
+  "llvm.nvvm.sust.b.1d.array.i16.trap",
+  "llvm.nvvm.sust.b.1d.array.i16.zero",
+  "llvm.nvvm.sust.b.1d.array.i32.clamp",
+  "llvm.nvvm.sust.b.1d.array.i32.trap",
+  "llvm.nvvm.sust.b.1d.array.i32.zero",
+  "llvm.nvvm.sust.b.1d.array.i64.clamp",
+  "llvm.nvvm.sust.b.1d.array.i64.trap",
+  "llvm.nvvm.sust.b.1d.array.i64.zero",
+  "llvm.nvvm.sust.b.1d.array.i8.clamp",
+  "llvm.nvvm.sust.b.1d.array.i8.trap",
+  "llvm.nvvm.sust.b.1d.array.i8.zero",
+  "llvm.nvvm.sust.b.1d.array.v2i16.clamp",
+  "llvm.nvvm.sust.b.1d.array.v2i16.trap",
+  "llvm.nvvm.sust.b.1d.array.v2i16.zero",
+  "llvm.nvvm.sust.b.1d.array.v2i32.clamp",
+  "llvm.nvvm.sust.b.1d.array.v2i32.trap",
+  "llvm.nvvm.sust.b.1d.array.v2i32.zero",
+  "llvm.nvvm.sust.b.1d.array.v2i64.clamp",
+  "llvm.nvvm.sust.b.1d.array.v2i64.trap",
+  "llvm.nvvm.sust.b.1d.array.v2i64.zero",
+  "llvm.nvvm.sust.b.1d.array.v2i8.clamp",
+  "llvm.nvvm.sust.b.1d.array.v2i8.trap",
+  "llvm.nvvm.sust.b.1d.array.v2i8.zero",
+  "llvm.nvvm.sust.b.1d.array.v4i16.clamp",
+  "llvm.nvvm.sust.b.1d.array.v4i16.trap",
+  "llvm.nvvm.sust.b.1d.array.v4i16.zero",
+  "llvm.nvvm.sust.b.1d.array.v4i32.clamp",
+  "llvm.nvvm.sust.b.1d.array.v4i32.trap",
+  "llvm.nvvm.sust.b.1d.array.v4i32.zero",
+  "llvm.nvvm.sust.b.1d.array.v4i8.clamp",
+  "llvm.nvvm.sust.b.1d.array.v4i8.trap",
+  "llvm.nvvm.sust.b.1d.array.v4i8.zero",
+  "llvm.nvvm.sust.b.1d.i16.clamp",
+  "llvm.nvvm.sust.b.1d.i16.trap",
+  "llvm.nvvm.sust.b.1d.i16.zero",
+  "llvm.nvvm.sust.b.1d.i32.clamp",
+  "llvm.nvvm.sust.b.1d.i32.trap",
+  "llvm.nvvm.sust.b.1d.i32.zero",
+  "llvm.nvvm.sust.b.1d.i64.clamp",
+  "llvm.nvvm.sust.b.1d.i64.trap",
+  "llvm.nvvm.sust.b.1d.i64.zero",
+  "llvm.nvvm.sust.b.1d.i8.clamp",
+  "llvm.nvvm.sust.b.1d.i8.trap",
+  "llvm.nvvm.sust.b.1d.i8.zero",
+  "llvm.nvvm.sust.b.1d.v2i16.clamp",
+  "llvm.nvvm.sust.b.1d.v2i16.trap",
+  "llvm.nvvm.sust.b.1d.v2i16.zero",
+  "llvm.nvvm.sust.b.1d.v2i32.clamp",
+  "llvm.nvvm.sust.b.1d.v2i32.trap",
+  "llvm.nvvm.sust.b.1d.v2i32.zero",
+  "llvm.nvvm.sust.b.1d.v2i64.clamp",
+  "llvm.nvvm.sust.b.1d.v2i64.trap",
+  "llvm.nvvm.sust.b.1d.v2i64.zero",
+  "llvm.nvvm.sust.b.1d.v2i8.clamp",
+  "llvm.nvvm.sust.b.1d.v2i8.trap",
+  "llvm.nvvm.sust.b.1d.v2i8.zero",
+  "llvm.nvvm.sust.b.1d.v4i16.clamp",
+  "llvm.nvvm.sust.b.1d.v4i16.trap",
+  "llvm.nvvm.sust.b.1d.v4i16.zero",
+  "llvm.nvvm.sust.b.1d.v4i32.clamp",
+  "llvm.nvvm.sust.b.1d.v4i32.trap",
+  "llvm.nvvm.sust.b.1d.v4i32.zero",
+  "llvm.nvvm.sust.b.1d.v4i8.clamp",
+  "llvm.nvvm.sust.b.1d.v4i8.trap",
+  "llvm.nvvm.sust.b.1d.v4i8.zero",
+  "llvm.nvvm.sust.b.2d.array.i16.clamp",
+  "llvm.nvvm.sust.b.2d.array.i16.trap",
+  "llvm.nvvm.sust.b.2d.array.i16.zero",
+  "llvm.nvvm.sust.b.2d.array.i32.clamp",
+  "llvm.nvvm.sust.b.2d.array.i32.trap",
+  "llvm.nvvm.sust.b.2d.array.i32.zero",
+  "llvm.nvvm.sust.b.2d.array.i64.clamp",
+  "llvm.nvvm.sust.b.2d.array.i64.trap",
+  "llvm.nvvm.sust.b.2d.array.i64.zero",
+  "llvm.nvvm.sust.b.2d.array.i8.clamp",
+  "llvm.nvvm.sust.b.2d.array.i8.trap",
+  "llvm.nvvm.sust.b.2d.array.i8.zero",
+  "llvm.nvvm.sust.b.2d.array.v2i16.clamp",
+  "llvm.nvvm.sust.b.2d.array.v2i16.trap",
+  "llvm.nvvm.sust.b.2d.array.v2i16.zero",
+  "llvm.nvvm.sust.b.2d.array.v2i32.clamp",
+  "llvm.nvvm.sust.b.2d.array.v2i32.trap",
+  "llvm.nvvm.sust.b.2d.array.v2i32.zero",
+  "llvm.nvvm.sust.b.2d.array.v2i64.clamp",
+  "llvm.nvvm.sust.b.2d.array.v2i64.trap",
+  "llvm.nvvm.sust.b.2d.array.v2i64.zero",
+  "llvm.nvvm.sust.b.2d.array.v2i8.clamp",
+  "llvm.nvvm.sust.b.2d.array.v2i8.trap",
+  "llvm.nvvm.sust.b.2d.array.v2i8.zero",
+  "llvm.nvvm.sust.b.2d.array.v4i16.clamp",
+  "llvm.nvvm.sust.b.2d.array.v4i16.trap",
+  "llvm.nvvm.sust.b.2d.array.v4i16.zero",
+  "llvm.nvvm.sust.b.2d.array.v4i32.clamp",
+  "llvm.nvvm.sust.b.2d.array.v4i32.trap",
+  "llvm.nvvm.sust.b.2d.array.v4i32.zero",
+  "llvm.nvvm.sust.b.2d.array.v4i8.clamp",
+  "llvm.nvvm.sust.b.2d.array.v4i8.trap",
+  "llvm.nvvm.sust.b.2d.array.v4i8.zero",
+  "llvm.nvvm.sust.b.2d.i16.clamp",
+  "llvm.nvvm.sust.b.2d.i16.trap",
+  "llvm.nvvm.sust.b.2d.i16.zero",
+  "llvm.nvvm.sust.b.2d.i32.clamp",
+  "llvm.nvvm.sust.b.2d.i32.trap",
+  "llvm.nvvm.sust.b.2d.i32.zero",
+  "llvm.nvvm.sust.b.2d.i64.clamp",
+  "llvm.nvvm.sust.b.2d.i64.trap",
+  "llvm.nvvm.sust.b.2d.i64.zero",
+  "llvm.nvvm.sust.b.2d.i8.clamp",
+  "llvm.nvvm.sust.b.2d.i8.trap",
+  "llvm.nvvm.sust.b.2d.i8.zero",
+  "llvm.nvvm.sust.b.2d.v2i16.clamp",
+  "llvm.nvvm.sust.b.2d.v2i16.trap",
+  "llvm.nvvm.sust.b.2d.v2i16.zero",
+  "llvm.nvvm.sust.b.2d.v2i32.clamp",
+  "llvm.nvvm.sust.b.2d.v2i32.trap",
+  "llvm.nvvm.sust.b.2d.v2i32.zero",
+  "llvm.nvvm.sust.b.2d.v2i64.clamp",
+  "llvm.nvvm.sust.b.2d.v2i64.trap",
+  "llvm.nvvm.sust.b.2d.v2i64.zero",
+  "llvm.nvvm.sust.b.2d.v2i8.clamp",
+  "llvm.nvvm.sust.b.2d.v2i8.trap",
+  "llvm.nvvm.sust.b.2d.v2i8.zero",
+  "llvm.nvvm.sust.b.2d.v4i16.clamp",
+  "llvm.nvvm.sust.b.2d.v4i16.trap",
+  "llvm.nvvm.sust.b.2d.v4i16.zero",
+  "llvm.nvvm.sust.b.2d.v4i32.clamp",
+  "llvm.nvvm.sust.b.2d.v4i32.trap",
+  "llvm.nvvm.sust.b.2d.v4i32.zero",
+  "llvm.nvvm.sust.b.2d.v4i8.clamp",
+  "llvm.nvvm.sust.b.2d.v4i8.trap",
+  "llvm.nvvm.sust.b.2d.v4i8.zero",
+  "llvm.nvvm.sust.b.3d.i16.clamp",
+  "llvm.nvvm.sust.b.3d.i16.trap",
+  "llvm.nvvm.sust.b.3d.i16.zero",
+  "llvm.nvvm.sust.b.3d.i32.clamp",
+  "llvm.nvvm.sust.b.3d.i32.trap",
+  "llvm.nvvm.sust.b.3d.i32.zero",
+  "llvm.nvvm.sust.b.3d.i64.clamp",
+  "llvm.nvvm.sust.b.3d.i64.trap",
+  "llvm.nvvm.sust.b.3d.i64.zero",
+  "llvm.nvvm.sust.b.3d.i8.clamp",
+  "llvm.nvvm.sust.b.3d.i8.trap",
+  "llvm.nvvm.sust.b.3d.i8.zero",
+  "llvm.nvvm.sust.b.3d.v2i16.clamp",
+  "llvm.nvvm.sust.b.3d.v2i16.trap",
+  "llvm.nvvm.sust.b.3d.v2i16.zero",
+  "llvm.nvvm.sust.b.3d.v2i32.clamp",
+  "llvm.nvvm.sust.b.3d.v2i32.trap",
+  "llvm.nvvm.sust.b.3d.v2i32.zero",
+  "llvm.nvvm.sust.b.3d.v2i64.clamp",
+  "llvm.nvvm.sust.b.3d.v2i64.trap",
+  "llvm.nvvm.sust.b.3d.v2i64.zero",
+  "llvm.nvvm.sust.b.3d.v2i8.clamp",
+  "llvm.nvvm.sust.b.3d.v2i8.trap",
+  "llvm.nvvm.sust.b.3d.v2i8.zero",
+  "llvm.nvvm.sust.b.3d.v4i16.clamp",
+  "llvm.nvvm.sust.b.3d.v4i16.trap",
+  "llvm.nvvm.sust.b.3d.v4i16.zero",
+  "llvm.nvvm.sust.b.3d.v4i32.clamp",
+  "llvm.nvvm.sust.b.3d.v4i32.trap",
+  "llvm.nvvm.sust.b.3d.v4i32.zero",
+  "llvm.nvvm.sust.b.3d.v4i8.clamp",
+  "llvm.nvvm.sust.b.3d.v4i8.trap",
+  "llvm.nvvm.sust.b.3d.v4i8.zero",
+  "llvm.nvvm.sust.p.1d.array.i16.trap",
+  "llvm.nvvm.sust.p.1d.array.i32.trap",
+  "llvm.nvvm.sust.p.1d.array.i8.trap",
+  "llvm.nvvm.sust.p.1d.array.v2i16.trap",
+  "llvm.nvvm.sust.p.1d.array.v2i32.trap",
+  "llvm.nvvm.sust.p.1d.array.v2i8.trap",
+  "llvm.nvvm.sust.p.1d.array.v4i16.trap",
+  "llvm.nvvm.sust.p.1d.array.v4i32.trap",
+  "llvm.nvvm.sust.p.1d.array.v4i8.trap",
+  "llvm.nvvm.sust.p.1d.i16.trap",
+  "llvm.nvvm.sust.p.1d.i32.trap",
+  "llvm.nvvm.sust.p.1d.i8.trap",
+  "llvm.nvvm.sust.p.1d.v2i16.trap",
+  "llvm.nvvm.sust.p.1d.v2i32.trap",
+  "llvm.nvvm.sust.p.1d.v2i8.trap",
+  "llvm.nvvm.sust.p.1d.v4i16.trap",
+  "llvm.nvvm.sust.p.1d.v4i32.trap",
+  "llvm.nvvm.sust.p.1d.v4i8.trap",
+  "llvm.nvvm.sust.p.2d.array.i16.trap",
+  "llvm.nvvm.sust.p.2d.array.i32.trap",
+  "llvm.nvvm.sust.p.2d.array.i8.trap",
+  "llvm.nvvm.sust.p.2d.array.v2i16.trap",
+  "llvm.nvvm.sust.p.2d.array.v2i32.trap",
+  "llvm.nvvm.sust.p.2d.array.v2i8.trap",
+  "llvm.nvvm.sust.p.2d.array.v4i16.trap",
+  "llvm.nvvm.sust.p.2d.array.v4i32.trap",
+  "llvm.nvvm.sust.p.2d.array.v4i8.trap",
+  "llvm.nvvm.sust.p.2d.i16.trap",
+  "llvm.nvvm.sust.p.2d.i32.trap",
+  "llvm.nvvm.sust.p.2d.i8.trap",
+  "llvm.nvvm.sust.p.2d.v2i16.trap",
+  "llvm.nvvm.sust.p.2d.v2i32.trap",
+  "llvm.nvvm.sust.p.2d.v2i8.trap",
+  "llvm.nvvm.sust.p.2d.v4i16.trap",
+  "llvm.nvvm.sust.p.2d.v4i32.trap",
+  "llvm.nvvm.sust.p.2d.v4i8.trap",
+  "llvm.nvvm.sust.p.3d.i16.trap",
+  "llvm.nvvm.sust.p.3d.i32.trap",
+  "llvm.nvvm.sust.p.3d.i8.trap",
+  "llvm.nvvm.sust.p.3d.v2i16.trap",
+  "llvm.nvvm.sust.p.3d.v2i32.trap",
+  "llvm.nvvm.sust.p.3d.v2i8.trap",
+  "llvm.nvvm.sust.p.3d.v4i16.trap",
+  "llvm.nvvm.sust.p.3d.v4i32.trap",
+  "llvm.nvvm.sust.p.3d.v4i8.trap",
+  "llvm.nvvm.swap.lo.hi.b64",
+  "llvm.nvvm.tex.1d.array.grad.v4f32.f32",
+  "llvm.nvvm.tex.1d.array.grad.v4s32.f32",
+  "llvm.nvvm.tex.1d.array.grad.v4u32.f32",
+  "llvm.nvvm.tex.1d.array.level.v4f32.f32",
+  "llvm.nvvm.tex.1d.array.level.v4s32.f32",
+  "llvm.nvvm.tex.1d.array.level.v4u32.f32",
+  "llvm.nvvm.tex.1d.array.v4f32.f32",
+  "llvm.nvvm.tex.1d.array.v4f32.s32",
+  "llvm.nvvm.tex.1d.array.v4s32.f32",
+  "llvm.nvvm.tex.1d.array.v4s32.s32",
+  "llvm.nvvm.tex.1d.array.v4u32.f32",
+  "llvm.nvvm.tex.1d.array.v4u32.s32",
+  "llvm.nvvm.tex.1d.grad.v4f32.f32",
+  "llvm.nvvm.tex.1d.grad.v4s32.f32",
+  "llvm.nvvm.tex.1d.grad.v4u32.f32",
+  "llvm.nvvm.tex.1d.level.v4f32.f32",
+  "llvm.nvvm.tex.1d.level.v4s32.f32",
+  "llvm.nvvm.tex.1d.level.v4u32.f32",
+  "llvm.nvvm.tex.1d.v4f32.f32",
+  "llvm.nvvm.tex.1d.v4f32.s32",
+  "llvm.nvvm.tex.1d.v4s32.f32",
+  "llvm.nvvm.tex.1d.v4s32.s32",
+  "llvm.nvvm.tex.1d.v4u32.f32",
+  "llvm.nvvm.tex.1d.v4u32.s32",
+  "llvm.nvvm.tex.2d.array.grad.v4f32.f32",
+  "llvm.nvvm.tex.2d.array.grad.v4s32.f32",
+  "llvm.nvvm.tex.2d.array.grad.v4u32.f32",
+  "llvm.nvvm.tex.2d.array.level.v4f32.f32",
+  "llvm.nvvm.tex.2d.array.level.v4s32.f32",
+  "llvm.nvvm.tex.2d.array.level.v4u32.f32",
+  "llvm.nvvm.tex.2d.array.v4f32.f32",
+  "llvm.nvvm.tex.2d.array.v4f32.s32",
+  "llvm.nvvm.tex.2d.array.v4s32.f32",
+  "llvm.nvvm.tex.2d.array.v4s32.s32",
+  "llvm.nvvm.tex.2d.array.v4u32.f32",
+  "llvm.nvvm.tex.2d.array.v4u32.s32",
+  "llvm.nvvm.tex.2d.grad.v4f32.f32",
+  "llvm.nvvm.tex.2d.grad.v4s32.f32",
+  "llvm.nvvm.tex.2d.grad.v4u32.f32",
+  "llvm.nvvm.tex.2d.level.v4f32.f32",
+  "llvm.nvvm.tex.2d.level.v4s32.f32",
+  "llvm.nvvm.tex.2d.level.v4u32.f32",
+  "llvm.nvvm.tex.2d.v4f32.f32",
+  "llvm.nvvm.tex.2d.v4f32.s32",
+  "llvm.nvvm.tex.2d.v4s32.f32",
+  "llvm.nvvm.tex.2d.v4s32.s32",
+  "llvm.nvvm.tex.2d.v4u32.f32",
+  "llvm.nvvm.tex.2d.v4u32.s32",
+  "llvm.nvvm.tex.3d.grad.v4f32.f32",
+  "llvm.nvvm.tex.3d.grad.v4s32.f32",
+  "llvm.nvvm.tex.3d.grad.v4u32.f32",
+  "llvm.nvvm.tex.3d.level.v4f32.f32",
+  "llvm.nvvm.tex.3d.level.v4s32.f32",
+  "llvm.nvvm.tex.3d.level.v4u32.f32",
+  "llvm.nvvm.tex.3d.v4f32.f32",
+  "llvm.nvvm.tex.3d.v4f32.s32",
+  "llvm.nvvm.tex.3d.v4s32.f32",
+  "llvm.nvvm.tex.3d.v4s32.s32",
+  "llvm.nvvm.tex.3d.v4u32.f32",
+  "llvm.nvvm.tex.3d.v4u32.s32",
+  "llvm.nvvm.tex.cube.array.level.v4f32.f32",
+  "llvm.nvvm.tex.cube.array.level.v4s32.f32",
+  "llvm.nvvm.tex.cube.array.level.v4u32.f32",
+  "llvm.nvvm.tex.cube.array.v4f32.f32",
+  "llvm.nvvm.tex.cube.array.v4s32.f32",
+  "llvm.nvvm.tex.cube.array.v4u32.f32",
+  "llvm.nvvm.tex.cube.level.v4f32.f32",
+  "llvm.nvvm.tex.cube.level.v4s32.f32",
+  "llvm.nvvm.tex.cube.level.v4u32.f32",
+  "llvm.nvvm.tex.cube.v4f32.f32",
+  "llvm.nvvm.tex.cube.v4s32.f32",
+  "llvm.nvvm.tex.cube.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32",
+  "llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32",
+  "llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.array.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.1d.array.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.1d.array.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.array.v4f32.f32",
+  "llvm.nvvm.tex.unified.1d.array.v4f32.s32",
+  "llvm.nvvm.tex.unified.1d.array.v4s32.f32",
+  "llvm.nvvm.tex.unified.1d.array.v4s32.s32",
+  "llvm.nvvm.tex.unified.1d.array.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.array.v4u32.s32",
+  "llvm.nvvm.tex.unified.1d.grad.v4f32.f32",
+  "llvm.nvvm.tex.unified.1d.grad.v4s32.f32",
+  "llvm.nvvm.tex.unified.1d.grad.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.1d.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.1d.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.v4f32.f32",
+  "llvm.nvvm.tex.unified.1d.v4f32.s32",
+  "llvm.nvvm.tex.unified.1d.v4s32.f32",
+  "llvm.nvvm.tex.unified.1d.v4s32.s32",
+  "llvm.nvvm.tex.unified.1d.v4u32.f32",
+  "llvm.nvvm.tex.unified.1d.v4u32.s32",
+  "llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32",
+  "llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32",
+  "llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32",
+  "llvm.nvvm.tex.unified.2d.array.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.2d.array.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.2d.array.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.2d.array.v4f32.f32",
+  "llvm.nvvm.tex.unified.2d.array.v4f32.s32",
+  "llvm.nvvm.tex.unified.2d.array.v4s32.f32",
+  "llvm.nvvm.tex.unified.2d.array.v4s32.s32",
+  "llvm.nvvm.tex.unified.2d.array.v4u32.f32",
+  "llvm.nvvm.tex.unified.2d.array.v4u32.s32",
+  "llvm.nvvm.tex.unified.2d.grad.v4f32.f32",
+  "llvm.nvvm.tex.unified.2d.grad.v4s32.f32",
+  "llvm.nvvm.tex.unified.2d.grad.v4u32.f32",
+  "llvm.nvvm.tex.unified.2d.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.2d.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.2d.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.2d.v4f32.f32",
+  "llvm.nvvm.tex.unified.2d.v4f32.s32",
+  "llvm.nvvm.tex.unified.2d.v4s32.f32",
+  "llvm.nvvm.tex.unified.2d.v4s32.s32",
+  "llvm.nvvm.tex.unified.2d.v4u32.f32",
+  "llvm.nvvm.tex.unified.2d.v4u32.s32",
+  "llvm.nvvm.tex.unified.3d.grad.v4f32.f32",
+  "llvm.nvvm.tex.unified.3d.grad.v4s32.f32",
+  "llvm.nvvm.tex.unified.3d.grad.v4u32.f32",
+  "llvm.nvvm.tex.unified.3d.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.3d.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.3d.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.3d.v4f32.f32",
+  "llvm.nvvm.tex.unified.3d.v4f32.s32",
+  "llvm.nvvm.tex.unified.3d.v4s32.f32",
+  "llvm.nvvm.tex.unified.3d.v4s32.s32",
+  "llvm.nvvm.tex.unified.3d.v4u32.f32",
+  "llvm.nvvm.tex.unified.3d.v4u32.s32",
+  "llvm.nvvm.tex.unified.cube.array.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.cube.array.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.cube.array.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.cube.array.v4f32.f32",
+  "llvm.nvvm.tex.unified.cube.array.v4s32.f32",
+  "llvm.nvvm.tex.unified.cube.array.v4u32.f32",
+  "llvm.nvvm.tex.unified.cube.level.v4f32.f32",
+  "llvm.nvvm.tex.unified.cube.level.v4s32.f32",
+  "llvm.nvvm.tex.unified.cube.level.v4u32.f32",
+  "llvm.nvvm.tex.unified.cube.v4f32.f32",
+  "llvm.nvvm.tex.unified.cube.v4s32.f32",
+  "llvm.nvvm.tex.unified.cube.v4u32.f32",
+  "llvm.nvvm.texsurf.handle",
+  "llvm.nvvm.texsurf.handle.internal",
+  "llvm.nvvm.tld4.a.2d.v4f32.f32",
+  "llvm.nvvm.tld4.a.2d.v4s32.f32",
+  "llvm.nvvm.tld4.a.2d.v4u32.f32",
+  "llvm.nvvm.tld4.b.2d.v4f32.f32",
+  "llvm.nvvm.tld4.b.2d.v4s32.f32",
+  "llvm.nvvm.tld4.b.2d.v4u32.f32",
+  "llvm.nvvm.tld4.g.2d.v4f32.f32",
+  "llvm.nvvm.tld4.g.2d.v4s32.f32",
+  "llvm.nvvm.tld4.g.2d.v4u32.f32",
+  "llvm.nvvm.tld4.r.2d.v4f32.f32",
+  "llvm.nvvm.tld4.r.2d.v4s32.f32",
+  "llvm.nvvm.tld4.r.2d.v4u32.f32",
+  "llvm.nvvm.tld4.unified.a.2d.v4f32.f32",
+  "llvm.nvvm.tld4.unified.a.2d.v4s32.f32",
+  "llvm.nvvm.tld4.unified.a.2d.v4u32.f32",
+  "llvm.nvvm.tld4.unified.b.2d.v4f32.f32",
+  "llvm.nvvm.tld4.unified.b.2d.v4s32.f32",
+  "llvm.nvvm.tld4.unified.b.2d.v4u32.f32",
+  "llvm.nvvm.tld4.unified.g.2d.v4f32.f32",
+  "llvm.nvvm.tld4.unified.g.2d.v4s32.f32",
+  "llvm.nvvm.tld4.unified.g.2d.v4u32.f32",
+  "llvm.nvvm.tld4.unified.r.2d.v4f32.f32",
+  "llvm.nvvm.tld4.unified.r.2d.v4s32.f32",
+  "llvm.nvvm.tld4.unified.r.2d.v4u32.f32",
+  "llvm.nvvm.trunc.d",
+  "llvm.nvvm.trunc.f",
+  "llvm.nvvm.trunc.ftz.f",
+  "llvm.nvvm.txq.array.size",
+  "llvm.nvvm.txq.channel.data.type",
+  "llvm.nvvm.txq.channel.order",
+  "llvm.nvvm.txq.depth",
+  "llvm.nvvm.txq.height",
+  "llvm.nvvm.txq.num.mipmap.levels",
+  "llvm.nvvm.txq.num.samples",
+  "llvm.nvvm.txq.width",
+  "llvm.nvvm.ui2d.rm",
+  "llvm.nvvm.ui2d.rn",
+  "llvm.nvvm.ui2d.rp",
+  "llvm.nvvm.ui2d.rz",
+  "llvm.nvvm.ui2f.rm",
+  "llvm.nvvm.ui2f.rn",
+  "llvm.nvvm.ui2f.rp",
+  "llvm.nvvm.ui2f.rz",
+  "llvm.nvvm.ull2d.rm",
+  "llvm.nvvm.ull2d.rn",
+  "llvm.nvvm.ull2d.rp",
+  "llvm.nvvm.ull2d.rz",
+  "llvm.nvvm.ull2f.rm",
+  "llvm.nvvm.ull2f.rn",
+  "llvm.nvvm.ull2f.rp",
+  "llvm.nvvm.ull2f.rz",
+  "llvm.nvvm.vote.all",
+  "llvm.nvvm.vote.all.sync",
+  "llvm.nvvm.vote.any",
+  "llvm.nvvm.vote.any.sync",
+  "llvm.nvvm.vote.ballot",
+  "llvm.nvvm.vote.ballot.sync",
+  "llvm.nvvm.vote.uni",
+  "llvm.nvvm.vote.uni.sync",
+  "llvm.nvvm.wmma.m16n16k16.load.a.col.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.a.row.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.b.col.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.b.row.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.c.col.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.c.col.f32",
+  "llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32",
+  "llvm.nvvm.wmma.m16n16k16.load.c.row.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.c.row.f32",
+  "llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32",
+  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite",
+  "llvm.nvvm.wmma.m16n16k16.store.d.col.f16",
+  "llvm.nvvm.wmma.m16n16k16.store.d.col.f32",
+  "llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32",
+  "llvm.nvvm.wmma.m16n16k16.store.d.row.f16",
+  "llvm.nvvm.wmma.m16n16k16.store.d.row.f32",
+  "llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16",
+  "llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32",
+  "llvm.ppc.altivec.crypto.vcipher",
+  "llvm.ppc.altivec.crypto.vcipherlast",
+  "llvm.ppc.altivec.crypto.vncipher",
+  "llvm.ppc.altivec.crypto.vncipherlast",
+  "llvm.ppc.altivec.crypto.vpermxor",
+  "llvm.ppc.altivec.crypto.vpmsumb",
+  "llvm.ppc.altivec.crypto.vpmsumd",
+  "llvm.ppc.altivec.crypto.vpmsumh",
+  "llvm.ppc.altivec.crypto.vpmsumw",
+  "llvm.ppc.altivec.crypto.vsbox",
+  "llvm.ppc.altivec.crypto.vshasigmad",
+  "llvm.ppc.altivec.crypto.vshasigmaw",
+  "llvm.ppc.altivec.dss",
+  "llvm.ppc.altivec.dssall",
+  "llvm.ppc.altivec.dst",
+  "llvm.ppc.altivec.dstst",
+  "llvm.ppc.altivec.dststt",
+  "llvm.ppc.altivec.dstt",
+  "llvm.ppc.altivec.lvebx",
+  "llvm.ppc.altivec.lvehx",
+  "llvm.ppc.altivec.lvewx",
+  "llvm.ppc.altivec.lvsl",
+  "llvm.ppc.altivec.lvsr",
+  "llvm.ppc.altivec.lvx",
+  "llvm.ppc.altivec.lvxl",
+  "llvm.ppc.altivec.mfvscr",
+  "llvm.ppc.altivec.mtvscr",
+  "llvm.ppc.altivec.stvebx",
+  "llvm.ppc.altivec.stvehx",
+  "llvm.ppc.altivec.stvewx",
+  "llvm.ppc.altivec.stvx",
+  "llvm.ppc.altivec.stvxl",
+  "llvm.ppc.altivec.vabsdub",
+  "llvm.ppc.altivec.vabsduh",
+  "llvm.ppc.altivec.vabsduw",
+  "llvm.ppc.altivec.vaddcuq",
+  "llvm.ppc.altivec.vaddcuw",
+  "llvm.ppc.altivec.vaddecuq",
+  "llvm.ppc.altivec.vaddeuqm",
+  "llvm.ppc.altivec.vaddsbs",
+  "llvm.ppc.altivec.vaddshs",
+  "llvm.ppc.altivec.vaddsws",
+  "llvm.ppc.altivec.vaddubs",
+  "llvm.ppc.altivec.vadduhs",
+  "llvm.ppc.altivec.vadduws",
+  "llvm.ppc.altivec.vavgsb",
+  "llvm.ppc.altivec.vavgsh",
+  "llvm.ppc.altivec.vavgsw",
+  "llvm.ppc.altivec.vavgub",
+  "llvm.ppc.altivec.vavguh",
+  "llvm.ppc.altivec.vavguw",
+  "llvm.ppc.altivec.vbpermq",
+  "llvm.ppc.altivec.vcfsx",
+  "llvm.ppc.altivec.vcfux",
+  "llvm.ppc.altivec.vclzlsbb",
+  "llvm.ppc.altivec.vcmpbfp",
+  "llvm.ppc.altivec.vcmpbfp.p",
+  "llvm.ppc.altivec.vcmpeqfp",
+  "llvm.ppc.altivec.vcmpeqfp.p",
+  "llvm.ppc.altivec.vcmpequb",
+  "llvm.ppc.altivec.vcmpequb.p",
+  "llvm.ppc.altivec.vcmpequd",
+  "llvm.ppc.altivec.vcmpequd.p",
+  "llvm.ppc.altivec.vcmpequh",
+  "llvm.ppc.altivec.vcmpequh.p",
+  "llvm.ppc.altivec.vcmpequw",
+  "llvm.ppc.altivec.vcmpequw.p",
+  "llvm.ppc.altivec.vcmpgefp",
+  "llvm.ppc.altivec.vcmpgefp.p",
+  "llvm.ppc.altivec.vcmpgtfp",
+  "llvm.ppc.altivec.vcmpgtfp.p",
+  "llvm.ppc.altivec.vcmpgtsb",
+  "llvm.ppc.altivec.vcmpgtsb.p",
+  "llvm.ppc.altivec.vcmpgtsd",
+  "llvm.ppc.altivec.vcmpgtsd.p",
+  "llvm.ppc.altivec.vcmpgtsh",
+  "llvm.ppc.altivec.vcmpgtsh.p",
+  "llvm.ppc.altivec.vcmpgtsw",
+  "llvm.ppc.altivec.vcmpgtsw.p",
+  "llvm.ppc.altivec.vcmpgtub",
+  "llvm.ppc.altivec.vcmpgtub.p",
+  "llvm.ppc.altivec.vcmpgtud",
+  "llvm.ppc.altivec.vcmpgtud.p",
+  "llvm.ppc.altivec.vcmpgtuh",
+  "llvm.ppc.altivec.vcmpgtuh.p",
+  "llvm.ppc.altivec.vcmpgtuw",
+  "llvm.ppc.altivec.vcmpgtuw.p",
+  "llvm.ppc.altivec.vcmpneb",
+  "llvm.ppc.altivec.vcmpneb.p",
+  "llvm.ppc.altivec.vcmpneh",
+  "llvm.ppc.altivec.vcmpneh.p",
+  "llvm.ppc.altivec.vcmpnew",
+  "llvm.ppc.altivec.vcmpnew.p",
+  "llvm.ppc.altivec.vcmpnezb",
+  "llvm.ppc.altivec.vcmpnezb.p",
+  "llvm.ppc.altivec.vcmpnezh",
+  "llvm.ppc.altivec.vcmpnezh.p",
+  "llvm.ppc.altivec.vcmpnezw",
+  "llvm.ppc.altivec.vcmpnezw.p",
+  "llvm.ppc.altivec.vctsxs",
+  "llvm.ppc.altivec.vctuxs",
+  "llvm.ppc.altivec.vctzlsbb",
+  "llvm.ppc.altivec.vexptefp",
+  "llvm.ppc.altivec.vgbbd",
+  "llvm.ppc.altivec.vlogefp",
+  "llvm.ppc.altivec.vmaddfp",
+  "llvm.ppc.altivec.vmaxfp",
+  "llvm.ppc.altivec.vmaxsb",
+  "llvm.ppc.altivec.vmaxsd",
+  "llvm.ppc.altivec.vmaxsh",
+  "llvm.ppc.altivec.vmaxsw",
+  "llvm.ppc.altivec.vmaxub",
+  "llvm.ppc.altivec.vmaxud",
+  "llvm.ppc.altivec.vmaxuh",
+  "llvm.ppc.altivec.vmaxuw",
+  "llvm.ppc.altivec.vmhaddshs",
+  "llvm.ppc.altivec.vmhraddshs",
+  "llvm.ppc.altivec.vminfp",
+  "llvm.ppc.altivec.vminsb",
+  "llvm.ppc.altivec.vminsd",
+  "llvm.ppc.altivec.vminsh",
+  "llvm.ppc.altivec.vminsw",
+  "llvm.ppc.altivec.vminub",
+  "llvm.ppc.altivec.vminud",
+  "llvm.ppc.altivec.vminuh",
+  "llvm.ppc.altivec.vminuw",
+  "llvm.ppc.altivec.vmladduhm",
+  "llvm.ppc.altivec.vmsummbm",
+  "llvm.ppc.altivec.vmsumshm",
+  "llvm.ppc.altivec.vmsumshs",
+  "llvm.ppc.altivec.vmsumubm",
+  "llvm.ppc.altivec.vmsumuhm",
+  "llvm.ppc.altivec.vmsumuhs",
+  "llvm.ppc.altivec.vmulesb",
+  "llvm.ppc.altivec.vmulesh",
+  "llvm.ppc.altivec.vmulesw",
+  "llvm.ppc.altivec.vmuleub",
+  "llvm.ppc.altivec.vmuleuh",
+  "llvm.ppc.altivec.vmuleuw",
+  "llvm.ppc.altivec.vmulosb",
+  "llvm.ppc.altivec.vmulosh",
+  "llvm.ppc.altivec.vmulosw",
+  "llvm.ppc.altivec.vmuloub",
+  "llvm.ppc.altivec.vmulouh",
+  "llvm.ppc.altivec.vmulouw",
+  "llvm.ppc.altivec.vnmsubfp",
+  "llvm.ppc.altivec.vperm",
+  "llvm.ppc.altivec.vpkpx",
+  "llvm.ppc.altivec.vpksdss",
+  "llvm.ppc.altivec.vpksdus",
+  "llvm.ppc.altivec.vpkshss",
+  "llvm.ppc.altivec.vpkshus",
+  "llvm.ppc.altivec.vpkswss",
+  "llvm.ppc.altivec.vpkswus",
+  "llvm.ppc.altivec.vpkudus",
+  "llvm.ppc.altivec.vpkuhus",
+  "llvm.ppc.altivec.vpkuwus",
+  "llvm.ppc.altivec.vprtybd",
+  "llvm.ppc.altivec.vprtybq",
+  "llvm.ppc.altivec.vprtybw",
+  "llvm.ppc.altivec.vrefp",
+  "llvm.ppc.altivec.vrfim",
+  "llvm.ppc.altivec.vrfin",
+  "llvm.ppc.altivec.vrfip",
+  "llvm.ppc.altivec.vrfiz",
+  "llvm.ppc.altivec.vrlb",
+  "llvm.ppc.altivec.vrld",
+  "llvm.ppc.altivec.vrldmi",
+  "llvm.ppc.altivec.vrldnm",
+  "llvm.ppc.altivec.vrlh",
+  "llvm.ppc.altivec.vrlw",
+  "llvm.ppc.altivec.vrlwmi",
+  "llvm.ppc.altivec.vrlwnm",
+  "llvm.ppc.altivec.vrsqrtefp",
+  "llvm.ppc.altivec.vsel",
+  "llvm.ppc.altivec.vsl",
+  "llvm.ppc.altivec.vslb",
+  "llvm.ppc.altivec.vslh",
+  "llvm.ppc.altivec.vslo",
+  "llvm.ppc.altivec.vslv",
+  "llvm.ppc.altivec.vslw",
+  "llvm.ppc.altivec.vsr",
+  "llvm.ppc.altivec.vsrab",
+  "llvm.ppc.altivec.vsrah",
+  "llvm.ppc.altivec.vsraw",
+  "llvm.ppc.altivec.vsrb",
+  "llvm.ppc.altivec.vsrh",
+  "llvm.ppc.altivec.vsro",
+  "llvm.ppc.altivec.vsrv",
+  "llvm.ppc.altivec.vsrw",
+  "llvm.ppc.altivec.vsubcuq",
+  "llvm.ppc.altivec.vsubcuw",
+  "llvm.ppc.altivec.vsubecuq",
+  "llvm.ppc.altivec.vsubeuqm",
+  "llvm.ppc.altivec.vsubsbs",
+  "llvm.ppc.altivec.vsubshs",
+  "llvm.ppc.altivec.vsubsws",
+  "llvm.ppc.altivec.vsububs",
+  "llvm.ppc.altivec.vsubuhs",
+  "llvm.ppc.altivec.vsubuws",
+  "llvm.ppc.altivec.vsum2sws",
+  "llvm.ppc.altivec.vsum4sbs",
+  "llvm.ppc.altivec.vsum4shs",
+  "llvm.ppc.altivec.vsum4ubs",
+  "llvm.ppc.altivec.vsumsws",
+  "llvm.ppc.altivec.vupkhpx",
+  "llvm.ppc.altivec.vupkhsb",
+  "llvm.ppc.altivec.vupkhsh",
+  "llvm.ppc.altivec.vupkhsw",
+  "llvm.ppc.altivec.vupklpx",
+  "llvm.ppc.altivec.vupklsb",
+  "llvm.ppc.altivec.vupklsh",
+  "llvm.ppc.altivec.vupklsw",
+  "llvm.ppc.bpermd",
+  "llvm.ppc.cfence",
+  "llvm.ppc.dcba",
+  "llvm.ppc.dcbf",
+  "llvm.ppc.dcbi",
+  "llvm.ppc.dcbst",
+  "llvm.ppc.dcbt",
+  "llvm.ppc.dcbtst",
+  "llvm.ppc.dcbz",
+  "llvm.ppc.dcbzl",
+  "llvm.ppc.divde",
+  "llvm.ppc.divdeu",
+  "llvm.ppc.divwe",
+  "llvm.ppc.divweu",
+  "llvm.ppc.get.texasr",
+  "llvm.ppc.get.texasru",
+  "llvm.ppc.get.tfhar",
+  "llvm.ppc.get.tfiar",
+  "llvm.ppc.is.decremented.ctr.nonzero",
+  "llvm.ppc.lwsync",
+  "llvm.ppc.mtctr",
+  "llvm.ppc.qpx.qvfabs",
+  "llvm.ppc.qpx.qvfadd",
+  "llvm.ppc.qpx.qvfadds",
+  "llvm.ppc.qpx.qvfcfid",
+  "llvm.ppc.qpx.qvfcfids",
+  "llvm.ppc.qpx.qvfcfidu",
+  "llvm.ppc.qpx.qvfcfidus",
+  "llvm.ppc.qpx.qvfcmpeq",
+  "llvm.ppc.qpx.qvfcmpgt",
+  "llvm.ppc.qpx.qvfcmplt",
+  "llvm.ppc.qpx.qvfcpsgn",
+  "llvm.ppc.qpx.qvfctid",
+  "llvm.ppc.qpx.qvfctidu",
+  "llvm.ppc.qpx.qvfctiduz",
+  "llvm.ppc.qpx.qvfctidz",
+  "llvm.ppc.qpx.qvfctiw",
+  "llvm.ppc.qpx.qvfctiwu",
+  "llvm.ppc.qpx.qvfctiwuz",
+  "llvm.ppc.qpx.qvfctiwz",
+  "llvm.ppc.qpx.qvflogical",
+  "llvm.ppc.qpx.qvfmadd",
+  "llvm.ppc.qpx.qvfmadds",
+  "llvm.ppc.qpx.qvfmsub",
+  "llvm.ppc.qpx.qvfmsubs",
+  "llvm.ppc.qpx.qvfmul",
+  "llvm.ppc.qpx.qvfmuls",
+  "llvm.ppc.qpx.qvfnabs",
+  "llvm.ppc.qpx.qvfneg",
+  "llvm.ppc.qpx.qvfnmadd",
+  "llvm.ppc.qpx.qvfnmadds",
+  "llvm.ppc.qpx.qvfnmsub",
+  "llvm.ppc.qpx.qvfnmsubs",
+  "llvm.ppc.qpx.qvfperm",
+  "llvm.ppc.qpx.qvfre",
+  "llvm.ppc.qpx.qvfres",
+  "llvm.ppc.qpx.qvfrim",
+  "llvm.ppc.qpx.qvfrin",
+  "llvm.ppc.qpx.qvfrip",
+  "llvm.ppc.qpx.qvfriz",
+  "llvm.ppc.qpx.qvfrsp",
+  "llvm.ppc.qpx.qvfrsqrte",
+  "llvm.ppc.qpx.qvfrsqrtes",
+  "llvm.ppc.qpx.qvfsel",
+  "llvm.ppc.qpx.qvfsub",
+  "llvm.ppc.qpx.qvfsubs",
+  "llvm.ppc.qpx.qvftstnan",
+  "llvm.ppc.qpx.qvfxmadd",
+  "llvm.ppc.qpx.qvfxmadds",
+  "llvm.ppc.qpx.qvfxmul",
+  "llvm.ppc.qpx.qvfxmuls",
+  "llvm.ppc.qpx.qvfxxcpnmadd",
+  "llvm.ppc.qpx.qvfxxcpnmadds",
+  "llvm.ppc.qpx.qvfxxmadd",
+  "llvm.ppc.qpx.qvfxxmadds",
+  "llvm.ppc.qpx.qvfxxnpmadd",
+  "llvm.ppc.qpx.qvfxxnpmadds",
+  "llvm.ppc.qpx.qvgpci",
+  "llvm.ppc.qpx.qvlfcd",
+  "llvm.ppc.qpx.qvlfcda",
+  "llvm.ppc.qpx.qvlfcs",
+  "llvm.ppc.qpx.qvlfcsa",
+  "llvm.ppc.qpx.qvlfd",
+  "llvm.ppc.qpx.qvlfda",
+  "llvm.ppc.qpx.qvlfiwa",
+  "llvm.ppc.qpx.qvlfiwaa",
+  "llvm.ppc.qpx.qvlfiwz",
+  "llvm.ppc.qpx.qvlfiwza",
+  "llvm.ppc.qpx.qvlfs",
+  "llvm.ppc.qpx.qvlfsa",
+  "llvm.ppc.qpx.qvlpcld",
+  "llvm.ppc.qpx.qvlpcls",
+  "llvm.ppc.qpx.qvlpcrd",
+  "llvm.ppc.qpx.qvlpcrs",
+  "llvm.ppc.qpx.qvstfcd",
+  "llvm.ppc.qpx.qvstfcda",
+  "llvm.ppc.qpx.qvstfcs",
+  "llvm.ppc.qpx.qvstfcsa",
+  "llvm.ppc.qpx.qvstfd",
+  "llvm.ppc.qpx.qvstfda",
+  "llvm.ppc.qpx.qvstfiw",
+  "llvm.ppc.qpx.qvstfiwa",
+  "llvm.ppc.qpx.qvstfs",
+  "llvm.ppc.qpx.qvstfsa",
+  "llvm.ppc.set.texasr",
+  "llvm.ppc.set.texasru",
+  "llvm.ppc.set.tfhar",
+  "llvm.ppc.set.tfiar",
+  "llvm.ppc.sync",
+  "llvm.ppc.tabort",
+  "llvm.ppc.tabortdc",
+  "llvm.ppc.tabortdci",
+  "llvm.ppc.tabortwc",
+  "llvm.ppc.tabortwci",
+  "llvm.ppc.tbegin",
+  "llvm.ppc.tcheck",
+  "llvm.ppc.tend",
+  "llvm.ppc.tendall",
+  "llvm.ppc.trechkpt",
+  "llvm.ppc.treclaim",
+  "llvm.ppc.tresume",
+  "llvm.ppc.tsr",
+  "llvm.ppc.tsuspend",
+  "llvm.ppc.ttest",
+  "llvm.ppc.vsx.lxvd2x",
+  "llvm.ppc.vsx.lxvd2x.be",
+  "llvm.ppc.vsx.lxvl",
+  "llvm.ppc.vsx.lxvll",
+  "llvm.ppc.vsx.lxvw4x",
+  "llvm.ppc.vsx.lxvw4x.be",
+  "llvm.ppc.vsx.stxvd2x",
+  "llvm.ppc.vsx.stxvd2x.be",
+  "llvm.ppc.vsx.stxvl",
+  "llvm.ppc.vsx.stxvll",
+  "llvm.ppc.vsx.stxvw4x",
+  "llvm.ppc.vsx.stxvw4x.be",
+  "llvm.ppc.vsx.xsmaxdp",
+  "llvm.ppc.vsx.xsmindp",
+  "llvm.ppc.vsx.xvcmpeqdp",
+  "llvm.ppc.vsx.xvcmpeqdp.p",
+  "llvm.ppc.vsx.xvcmpeqsp",
+  "llvm.ppc.vsx.xvcmpeqsp.p",
+  "llvm.ppc.vsx.xvcmpgedp",
+  "llvm.ppc.vsx.xvcmpgedp.p",
+  "llvm.ppc.vsx.xvcmpgesp",
+  "llvm.ppc.vsx.xvcmpgesp.p",
+  "llvm.ppc.vsx.xvcmpgtdp",
+  "llvm.ppc.vsx.xvcmpgtdp.p",
+  "llvm.ppc.vsx.xvcmpgtsp",
+  "llvm.ppc.vsx.xvcmpgtsp.p",
+  "llvm.ppc.vsx.xvcvdpsp",
+  "llvm.ppc.vsx.xvcvdpsxws",
+  "llvm.ppc.vsx.xvcvdpuxws",
+  "llvm.ppc.vsx.xvcvhpsp",
+  "llvm.ppc.vsx.xvcvspdp",
+  "llvm.ppc.vsx.xvcvsphp",
+  "llvm.ppc.vsx.xvcvsxdsp",
+  "llvm.ppc.vsx.xvcvsxwdp",
+  "llvm.ppc.vsx.xvcvuxdsp",
+  "llvm.ppc.vsx.xvcvuxwdp",
+  "llvm.ppc.vsx.xvdivdp",
+  "llvm.ppc.vsx.xvdivsp",
+  "llvm.ppc.vsx.xviexpdp",
+  "llvm.ppc.vsx.xviexpsp",
+  "llvm.ppc.vsx.xvmaxdp",
+  "llvm.ppc.vsx.xvmaxsp",
+  "llvm.ppc.vsx.xvmindp",
+  "llvm.ppc.vsx.xvminsp",
+  "llvm.ppc.vsx.xvrdpip",
+  "llvm.ppc.vsx.xvredp",
+  "llvm.ppc.vsx.xvresp",
+  "llvm.ppc.vsx.xvrspip",
+  "llvm.ppc.vsx.xvrsqrtedp",
+  "llvm.ppc.vsx.xvrsqrtesp",
+  "llvm.ppc.vsx.xvtstdcdp",
+  "llvm.ppc.vsx.xvtstdcsp",
+  "llvm.ppc.vsx.xvxexpdp",
+  "llvm.ppc.vsx.xvxexpsp",
+  "llvm.ppc.vsx.xvxsigdp",
+  "llvm.ppc.vsx.xvxsigsp",
+  "llvm.ppc.vsx.xxextractuw",
+  "llvm.ppc.vsx.xxinsertw",
+  "llvm.ppc.vsx.xxleqv",
+  "llvm.r600.cube",
+  "llvm.r600.group.barrier",
+  "llvm.r600.implicitarg.ptr",
+  "llvm.r600.rat.store.typed",
+  "llvm.r600.read.global.size.x",
+  "llvm.r600.read.global.size.y",
+  "llvm.r600.read.global.size.z",
+  "llvm.r600.read.local.size.x",
+  "llvm.r600.read.local.size.y",
+  "llvm.r600.read.local.size.z",
+  "llvm.r600.read.ngroups.x",
+  "llvm.r600.read.ngroups.y",
+  "llvm.r600.read.ngroups.z",
+  "llvm.r600.read.tgid.x",
+  "llvm.r600.read.tgid.y",
+  "llvm.r600.read.tgid.z",
+  "llvm.r600.read.tidig.x",
+  "llvm.r600.read.tidig.y",
+  "llvm.r600.read.tidig.z",
+  "llvm.r600.recipsqrt.clamped",
+  "llvm.r600.recipsqrt.ieee",
+  "llvm.s390.efpc",
+  "llvm.s390.etnd",
+  "llvm.s390.lcbb",
+  "llvm.s390.ntstg",
+  "llvm.s390.ppa.txassist",
+  "llvm.s390.sfpc",
+  "llvm.s390.tabort",
+  "llvm.s390.tbegin",
+  "llvm.s390.tbegin.nofloat",
+  "llvm.s390.tbeginc",
+  "llvm.s390.tdc",
+  "llvm.s390.tend",
+  "llvm.s390.vaccb",
+  "llvm.s390.vacccq",
+  "llvm.s390.vaccf",
+  "llvm.s390.vaccg",
+  "llvm.s390.vacch",
+  "llvm.s390.vaccq",
+  "llvm.s390.vacq",
+  "llvm.s390.vaq",
+  "llvm.s390.vavgb",
+  "llvm.s390.vavgf",
+  "llvm.s390.vavgg",
+  "llvm.s390.vavgh",
+  "llvm.s390.vavglb",
+  "llvm.s390.vavglf",
+  "llvm.s390.vavglg",
+  "llvm.s390.vavglh",
+  "llvm.s390.vbperm",
+  "llvm.s390.vceqbs",
+  "llvm.s390.vceqfs",
+  "llvm.s390.vceqgs",
+  "llvm.s390.vceqhs",
+  "llvm.s390.vchbs",
+  "llvm.s390.vchfs",
+  "llvm.s390.vchgs",
+  "llvm.s390.vchhs",
+  "llvm.s390.vchlbs",
+  "llvm.s390.vchlfs",
+  "llvm.s390.vchlgs",
+  "llvm.s390.vchlhs",
+  "llvm.s390.vcksm",
+  "llvm.s390.verimb",
+  "llvm.s390.verimf",
+  "llvm.s390.verimg",
+  "llvm.s390.verimh",
+  "llvm.s390.verllb",
+  "llvm.s390.verllf",
+  "llvm.s390.verllg",
+  "llvm.s390.verllh",
+  "llvm.s390.verllvb",
+  "llvm.s390.verllvf",
+  "llvm.s390.verllvg",
+  "llvm.s390.verllvh",
+  "llvm.s390.vfaeb",
+  "llvm.s390.vfaebs",
+  "llvm.s390.vfaef",
+  "llvm.s390.vfaefs",
+  "llvm.s390.vfaeh",
+  "llvm.s390.vfaehs",
+  "llvm.s390.vfaezb",
+  "llvm.s390.vfaezbs",
+  "llvm.s390.vfaezf",
+  "llvm.s390.vfaezfs",
+  "llvm.s390.vfaezh",
+  "llvm.s390.vfaezhs",
+  "llvm.s390.vfcedbs",
+  "llvm.s390.vfcesbs",
+  "llvm.s390.vfchdbs",
+  "llvm.s390.vfchedbs",
+  "llvm.s390.vfchesbs",
+  "llvm.s390.vfchsbs",
+  "llvm.s390.vfeeb",
+  "llvm.s390.vfeebs",
+  "llvm.s390.vfeef",
+  "llvm.s390.vfeefs",
+  "llvm.s390.vfeeh",
+  "llvm.s390.vfeehs",
+  "llvm.s390.vfeezb",
+  "llvm.s390.vfeezbs",
+  "llvm.s390.vfeezf",
+  "llvm.s390.vfeezfs",
+  "llvm.s390.vfeezh",
+  "llvm.s390.vfeezhs",
+  "llvm.s390.vfeneb",
+  "llvm.s390.vfenebs",
+  "llvm.s390.vfenef",
+  "llvm.s390.vfenefs",
+  "llvm.s390.vfeneh",
+  "llvm.s390.vfenehs",
+  "llvm.s390.vfenezb",
+  "llvm.s390.vfenezbs",
+  "llvm.s390.vfenezf",
+  "llvm.s390.vfenezfs",
+  "llvm.s390.vfenezh",
+  "llvm.s390.vfenezhs",
+  "llvm.s390.vfidb",
+  "llvm.s390.vfisb",
+  "llvm.s390.vfmaxdb",
+  "llvm.s390.vfmaxsb",
+  "llvm.s390.vfmindb",
+  "llvm.s390.vfminsb",
+  "llvm.s390.vftcidb",
+  "llvm.s390.vftcisb",
+  "llvm.s390.vgfmab",
+  "llvm.s390.vgfmaf",
+  "llvm.s390.vgfmag",
+  "llvm.s390.vgfmah",
+  "llvm.s390.vgfmb",
+  "llvm.s390.vgfmf",
+  "llvm.s390.vgfmg",
+  "llvm.s390.vgfmh",
+  "llvm.s390.vistrb",
+  "llvm.s390.vistrbs",
+  "llvm.s390.vistrf",
+  "llvm.s390.vistrfs",
+  "llvm.s390.vistrh",
+  "llvm.s390.vistrhs",
+  "llvm.s390.vlbb",
+  "llvm.s390.vll",
+  "llvm.s390.vlrl",
+  "llvm.s390.vmaeb",
+  "llvm.s390.vmaef",
+  "llvm.s390.vmaeh",
+  "llvm.s390.vmahb",
+  "llvm.s390.vmahf",
+  "llvm.s390.vmahh",
+  "llvm.s390.vmaleb",
+  "llvm.s390.vmalef",
+  "llvm.s390.vmaleh",
+  "llvm.s390.vmalhb",
+  "llvm.s390.vmalhf",
+  "llvm.s390.vmalhh",
+  "llvm.s390.vmalob",
+  "llvm.s390.vmalof",
+  "llvm.s390.vmaloh",
+  "llvm.s390.vmaob",
+  "llvm.s390.vmaof",
+  "llvm.s390.vmaoh",
+  "llvm.s390.vmeb",
+  "llvm.s390.vmef",
+  "llvm.s390.vmeh",
+  "llvm.s390.vmhb",
+  "llvm.s390.vmhf",
+  "llvm.s390.vmhh",
+  "llvm.s390.vmleb",
+  "llvm.s390.vmlef",
+  "llvm.s390.vmleh",
+  "llvm.s390.vmlhb",
+  "llvm.s390.vmlhf",
+  "llvm.s390.vmlhh",
+  "llvm.s390.vmlob",
+  "llvm.s390.vmlof",
+  "llvm.s390.vmloh",
+  "llvm.s390.vmob",
+  "llvm.s390.vmof",
+  "llvm.s390.vmoh",
+  "llvm.s390.vmslg",
+  "llvm.s390.vpdi",
+  "llvm.s390.vperm",
+  "llvm.s390.vpklsf",
+  "llvm.s390.vpklsfs",
+  "llvm.s390.vpklsg",
+  "llvm.s390.vpklsgs",
+  "llvm.s390.vpklsh",
+  "llvm.s390.vpklshs",
+  "llvm.s390.vpksf",
+  "llvm.s390.vpksfs",
+  "llvm.s390.vpksg",
+  "llvm.s390.vpksgs",
+  "llvm.s390.vpksh",
+  "llvm.s390.vpkshs",
+  "llvm.s390.vsbcbiq",
+  "llvm.s390.vsbiq",
+  "llvm.s390.vscbib",
+  "llvm.s390.vscbif",
+  "llvm.s390.vscbig",
+  "llvm.s390.vscbih",
+  "llvm.s390.vscbiq",
+  "llvm.s390.vsl",
+  "llvm.s390.vslb",
+  "llvm.s390.vsldb",
+  "llvm.s390.vsq",
+  "llvm.s390.vsra",
+  "llvm.s390.vsrab",
+  "llvm.s390.vsrl",
+  "llvm.s390.vsrlb",
+  "llvm.s390.vstl",
+  "llvm.s390.vstrcb",
+  "llvm.s390.vstrcbs",
+  "llvm.s390.vstrcf",
+  "llvm.s390.vstrcfs",
+  "llvm.s390.vstrch",
+  "llvm.s390.vstrchs",
+  "llvm.s390.vstrczb",
+  "llvm.s390.vstrczbs",
+  "llvm.s390.vstrczf",
+  "llvm.s390.vstrczfs",
+  "llvm.s390.vstrczh",
+  "llvm.s390.vstrczhs",
+  "llvm.s390.vstrl",
+  "llvm.s390.vsumb",
+  "llvm.s390.vsumgf",
+  "llvm.s390.vsumgh",
+  "llvm.s390.vsumh",
+  "llvm.s390.vsumqf",
+  "llvm.s390.vsumqg",
+  "llvm.s390.vtm",
+  "llvm.s390.vuphb",
+  "llvm.s390.vuphf",
+  "llvm.s390.vuphh",
+  "llvm.s390.vuplb",
+  "llvm.s390.vuplf",
+  "llvm.s390.vuplhb",
+  "llvm.s390.vuplhf",
+  "llvm.s390.vuplhh",
+  "llvm.s390.vuplhw",
+  "llvm.s390.vupllb",
+  "llvm.s390.vupllf",
+  "llvm.s390.vupllh",
+  "llvm.wasm.current.memory",
+  "llvm.wasm.get.ehselector",
+  "llvm.wasm.get.exception",
+  "llvm.wasm.grow.memory",
+  "llvm.wasm.mem.grow",
+  "llvm.wasm.mem.size",
+  "llvm.wasm.rethrow",
+  "llvm.wasm.throw",
+  "llvm.x86.3dnow.pavgusb",
+  "llvm.x86.3dnow.pf2id",
+  "llvm.x86.3dnow.pfacc",
+  "llvm.x86.3dnow.pfadd",
+  "llvm.x86.3dnow.pfcmpeq",
+  "llvm.x86.3dnow.pfcmpge",
+  "llvm.x86.3dnow.pfcmpgt",
+  "llvm.x86.3dnow.pfmax",
+  "llvm.x86.3dnow.pfmin",
+  "llvm.x86.3dnow.pfmul",
+  "llvm.x86.3dnow.pfrcp",
+  "llvm.x86.3dnow.pfrcpit1",
+  "llvm.x86.3dnow.pfrcpit2",
+  "llvm.x86.3dnow.pfrsqit1",
+  "llvm.x86.3dnow.pfrsqrt",
+  "llvm.x86.3dnow.pfsub",
+  "llvm.x86.3dnow.pfsubr",
+  "llvm.x86.3dnow.pi2fd",
+  "llvm.x86.3dnow.pmulhrw",
+  "llvm.x86.3dnowa.pf2iw",
+  "llvm.x86.3dnowa.pfnacc",
+  "llvm.x86.3dnowa.pfpnacc",
+  "llvm.x86.3dnowa.pi2fw",
+  "llvm.x86.3dnowa.pswapd",
+  "llvm.x86.addcarry.u32",
+  "llvm.x86.addcarry.u64",
+  "llvm.x86.addcarryx.u32",
+  "llvm.x86.addcarryx.u64",
+  "llvm.x86.aesni.aesdec",
+  "llvm.x86.aesni.aesdec.256",
+  "llvm.x86.aesni.aesdec.512",
+  "llvm.x86.aesni.aesdeclast",
+  "llvm.x86.aesni.aesdeclast.256",
+  "llvm.x86.aesni.aesdeclast.512",
+  "llvm.x86.aesni.aesenc",
+  "llvm.x86.aesni.aesenc.256",
+  "llvm.x86.aesni.aesenc.512",
+  "llvm.x86.aesni.aesenclast",
+  "llvm.x86.aesni.aesenclast.256",
+  "llvm.x86.aesni.aesenclast.512",
+  "llvm.x86.aesni.aesimc",
+  "llvm.x86.aesni.aeskeygenassist",
+  "llvm.x86.avx.addsub.pd.256",
+  "llvm.x86.avx.addsub.ps.256",
+  "llvm.x86.avx.blendv.pd.256",
+  "llvm.x86.avx.blendv.ps.256",
+  "llvm.x86.avx.cmp.pd.256",
+  "llvm.x86.avx.cmp.ps.256",
+  "llvm.x86.avx.cvt.pd2.ps.256",
+  "llvm.x86.avx.cvt.pd2dq.256",
+  "llvm.x86.avx.cvt.ps2dq.256",
+  "llvm.x86.avx.cvtdq2.ps.256",
+  "llvm.x86.avx.cvtt.pd2dq.256",
+  "llvm.x86.avx.cvtt.ps2dq.256",
+  "llvm.x86.avx.dp.ps.256",
+  "llvm.x86.avx.hadd.pd.256",
+  "llvm.x86.avx.hadd.ps.256",
+  "llvm.x86.avx.hsub.pd.256",
+  "llvm.x86.avx.hsub.ps.256",
+  "llvm.x86.avx.ldu.dq.256",
+  "llvm.x86.avx.maskload.pd",
+  "llvm.x86.avx.maskload.pd.256",
+  "llvm.x86.avx.maskload.ps",
+  "llvm.x86.avx.maskload.ps.256",
+  "llvm.x86.avx.maskstore.pd",
+  "llvm.x86.avx.maskstore.pd.256",
+  "llvm.x86.avx.maskstore.ps",
+  "llvm.x86.avx.maskstore.ps.256",
+  "llvm.x86.avx.max.pd.256",
+  "llvm.x86.avx.max.ps.256",
+  "llvm.x86.avx.min.pd.256",
+  "llvm.x86.avx.min.ps.256",
+  "llvm.x86.avx.movmsk.pd.256",
+  "llvm.x86.avx.movmsk.ps.256",
+  "llvm.x86.avx.ptestc.256",
+  "llvm.x86.avx.ptestnzc.256",
+  "llvm.x86.avx.ptestz.256",
+  "llvm.x86.avx.rcp.ps.256",
+  "llvm.x86.avx.round.pd.256",
+  "llvm.x86.avx.round.ps.256",
+  "llvm.x86.avx.rsqrt.ps.256",
+  "llvm.x86.avx.sqrt.pd.256",
+  "llvm.x86.avx.sqrt.ps.256",
+  "llvm.x86.avx.vpermilvar.pd",
+  "llvm.x86.avx.vpermilvar.pd.256",
+  "llvm.x86.avx.vpermilvar.ps",
+  "llvm.x86.avx.vpermilvar.ps.256",
+  "llvm.x86.avx.vtestc.pd",
+  "llvm.x86.avx.vtestc.pd.256",
+  "llvm.x86.avx.vtestc.ps",
+  "llvm.x86.avx.vtestc.ps.256",
+  "llvm.x86.avx.vtestnzc.pd",
+  "llvm.x86.avx.vtestnzc.pd.256",
+  "llvm.x86.avx.vtestnzc.ps",
+  "llvm.x86.avx.vtestnzc.ps.256",
+  "llvm.x86.avx.vtestz.pd",
+  "llvm.x86.avx.vtestz.pd.256",
+  "llvm.x86.avx.vtestz.ps",
+  "llvm.x86.avx.vtestz.ps.256",
+  "llvm.x86.avx.vzeroall",
+  "llvm.x86.avx.vzeroupper",
+  "llvm.x86.avx2.gather.d.d",
+  "llvm.x86.avx2.gather.d.d.256",
+  "llvm.x86.avx2.gather.d.pd",
+  "llvm.x86.avx2.gather.d.pd.256",
+  "llvm.x86.avx2.gather.d.ps",
+  "llvm.x86.avx2.gather.d.ps.256",
+  "llvm.x86.avx2.gather.d.q",
+  "llvm.x86.avx2.gather.d.q.256",
+  "llvm.x86.avx2.gather.q.d",
+  "llvm.x86.avx2.gather.q.d.256",
+  "llvm.x86.avx2.gather.q.pd",
+  "llvm.x86.avx2.gather.q.pd.256",
+  "llvm.x86.avx2.gather.q.ps",
+  "llvm.x86.avx2.gather.q.ps.256",
+  "llvm.x86.avx2.gather.q.q",
+  "llvm.x86.avx2.gather.q.q.256",
+  "llvm.x86.avx2.maskload.d",
+  "llvm.x86.avx2.maskload.d.256",
+  "llvm.x86.avx2.maskload.q",
+  "llvm.x86.avx2.maskload.q.256",
+  "llvm.x86.avx2.maskstore.d",
+  "llvm.x86.avx2.maskstore.d.256",
+  "llvm.x86.avx2.maskstore.q",
+  "llvm.x86.avx2.maskstore.q.256",
+  "llvm.x86.avx2.mpsadbw",
+  "llvm.x86.avx2.packssdw",
+  "llvm.x86.avx2.packsswb",
+  "llvm.x86.avx2.packusdw",
+  "llvm.x86.avx2.packuswb",
+  "llvm.x86.avx2.padds.b",
+  "llvm.x86.avx2.padds.w",
+  "llvm.x86.avx2.paddus.b",
+  "llvm.x86.avx2.paddus.w",
+  "llvm.x86.avx2.pblendvb",
+  "llvm.x86.avx2.permd",
+  "llvm.x86.avx2.permps",
+  "llvm.x86.avx2.phadd.d",
+  "llvm.x86.avx2.phadd.sw",
+  "llvm.x86.avx2.phadd.w",
+  "llvm.x86.avx2.phsub.d",
+  "llvm.x86.avx2.phsub.sw",
+  "llvm.x86.avx2.phsub.w",
+  "llvm.x86.avx2.pmadd.ub.sw",
+  "llvm.x86.avx2.pmadd.wd",
+  "llvm.x86.avx2.pmovmskb",
+  "llvm.x86.avx2.pmul.dq",
+  "llvm.x86.avx2.pmul.hr.sw",
+  "llvm.x86.avx2.pmulh.w",
+  "llvm.x86.avx2.pmulhu.w",
+  "llvm.x86.avx2.pmulu.dq",
+  "llvm.x86.avx2.psad.bw",
+  "llvm.x86.avx2.pshuf.b",
+  "llvm.x86.avx2.psign.b",
+  "llvm.x86.avx2.psign.d",
+  "llvm.x86.avx2.psign.w",
+  "llvm.x86.avx2.psll.d",
+  "llvm.x86.avx2.psll.q",
+  "llvm.x86.avx2.psll.w",
+  "llvm.x86.avx2.pslli.d",
+  "llvm.x86.avx2.pslli.q",
+  "llvm.x86.avx2.pslli.w",
+  "llvm.x86.avx2.psllv.d",
+  "llvm.x86.avx2.psllv.d.256",
+  "llvm.x86.avx2.psllv.q",
+  "llvm.x86.avx2.psllv.q.256",
+  "llvm.x86.avx2.psra.d",
+  "llvm.x86.avx2.psra.w",
+  "llvm.x86.avx2.psrai.d",
+  "llvm.x86.avx2.psrai.w",
+  "llvm.x86.avx2.psrav.d",
+  "llvm.x86.avx2.psrav.d.256",
+  "llvm.x86.avx2.psrl.d",
+  "llvm.x86.avx2.psrl.q",
+  "llvm.x86.avx2.psrl.w",
+  "llvm.x86.avx2.psrli.d",
+  "llvm.x86.avx2.psrli.q",
+  "llvm.x86.avx2.psrli.w",
+  "llvm.x86.avx2.psrlv.d",
+  "llvm.x86.avx2.psrlv.d.256",
+  "llvm.x86.avx2.psrlv.q",
+  "llvm.x86.avx2.psrlv.q.256",
+  "llvm.x86.avx2.psubs.b",
+  "llvm.x86.avx2.psubs.w",
+  "llvm.x86.avx2.psubus.b",
+  "llvm.x86.avx2.psubus.w",
+  "llvm.x86.avx512.broadcastmb.128",
+  "llvm.x86.avx512.broadcastmb.256",
+  "llvm.x86.avx512.broadcastmb.512",
+  "llvm.x86.avx512.broadcastmw.128",
+  "llvm.x86.avx512.broadcastmw.256",
+  "llvm.x86.avx512.broadcastmw.512",
+  "llvm.x86.avx512.cvtsi2sd64",
+  "llvm.x86.avx512.cvtsi2ss32",
+  "llvm.x86.avx512.cvtsi2ss64",
+  "llvm.x86.avx512.cvttsd2si",
+  "llvm.x86.avx512.cvttsd2si64",
+  "llvm.x86.avx512.cvttsd2usi",
+  "llvm.x86.avx512.cvttsd2usi64",
+  "llvm.x86.avx512.cvttss2si",
+  "llvm.x86.avx512.cvttss2si64",
+  "llvm.x86.avx512.cvttss2usi",
+  "llvm.x86.avx512.cvttss2usi64",
+  "llvm.x86.avx512.cvtusi2sd",
+  "llvm.x86.avx512.cvtusi2ss",
+  "llvm.x86.avx512.cvtusi642sd",
+  "llvm.x86.avx512.cvtusi642ss",
+  "llvm.x86.avx512.exp2.pd",
+  "llvm.x86.avx512.exp2.ps",
+  "llvm.x86.avx512.gather.dpd.512",
+  "llvm.x86.avx512.gather.dpi.512",
+  "llvm.x86.avx512.gather.dpq.512",
+  "llvm.x86.avx512.gather.dps.512",
+  "llvm.x86.avx512.gather.qpd.512",
+  "llvm.x86.avx512.gather.qpi.512",
+  "llvm.x86.avx512.gather.qpq.512",
+  "llvm.x86.avx512.gather.qps.512",
+  "llvm.x86.avx512.gather3div2.df",
+  "llvm.x86.avx512.gather3div2.di",
+  "llvm.x86.avx512.gather3div4.df",
+  "llvm.x86.avx512.gather3div4.di",
+  "llvm.x86.avx512.gather3div4.sf",
+  "llvm.x86.avx512.gather3div4.si",
+  "llvm.x86.avx512.gather3div8.sf",
+  "llvm.x86.avx512.gather3div8.si",
+  "llvm.x86.avx512.gather3siv2.df",
+  "llvm.x86.avx512.gather3siv2.di",
+  "llvm.x86.avx512.gather3siv4.df",
+  "llvm.x86.avx512.gather3siv4.di",
+  "llvm.x86.avx512.gather3siv4.sf",
+  "llvm.x86.avx512.gather3siv4.si",
+  "llvm.x86.avx512.gather3siv8.sf",
+  "llvm.x86.avx512.gather3siv8.si",
+  "llvm.x86.avx512.gatherpf.dpd.512",
+  "llvm.x86.avx512.gatherpf.dps.512",
+  "llvm.x86.avx512.gatherpf.qpd.512",
+  "llvm.x86.avx512.gatherpf.qps.512",
+  "llvm.x86.avx512.mask.add.pd.512",
+  "llvm.x86.avx512.mask.add.ps.512",
+  "llvm.x86.avx512.mask.add.sd.round",
+  "llvm.x86.avx512.mask.add.ss.round",
+  "llvm.x86.avx512.mask.cmp.pd.128",
+  "llvm.x86.avx512.mask.cmp.pd.256",
+  "llvm.x86.avx512.mask.cmp.pd.512",
+  "llvm.x86.avx512.mask.cmp.ps.128",
+  "llvm.x86.avx512.mask.cmp.ps.256",
+  "llvm.x86.avx512.mask.cmp.ps.512",
+  "llvm.x86.avx512.mask.cmp.sd",
+  "llvm.x86.avx512.mask.cmp.ss",
+  "llvm.x86.avx512.mask.compress.b.128",
+  "llvm.x86.avx512.mask.compress.b.256",
+  "llvm.x86.avx512.mask.compress.b.512",
+  "llvm.x86.avx512.mask.compress.d.128",
+  "llvm.x86.avx512.mask.compress.d.256",
+  "llvm.x86.avx512.mask.compress.d.512",
+  "llvm.x86.avx512.mask.compress.pd.128",
+  "llvm.x86.avx512.mask.compress.pd.256",
+  "llvm.x86.avx512.mask.compress.pd.512",
+  "llvm.x86.avx512.mask.compress.ps.128",
+  "llvm.x86.avx512.mask.compress.ps.256",
+  "llvm.x86.avx512.mask.compress.ps.512",
+  "llvm.x86.avx512.mask.compress.q.128",
+  "llvm.x86.avx512.mask.compress.q.256",
+  "llvm.x86.avx512.mask.compress.q.512",
+  "llvm.x86.avx512.mask.compress.store.b.128",
+  "llvm.x86.avx512.mask.compress.store.b.256",
+  "llvm.x86.avx512.mask.compress.store.b.512",
+  "llvm.x86.avx512.mask.compress.store.d.128",
+  "llvm.x86.avx512.mask.compress.store.d.256",
+  "llvm.x86.avx512.mask.compress.store.d.512",
+  "llvm.x86.avx512.mask.compress.store.pd.128",
+  "llvm.x86.avx512.mask.compress.store.pd.256",
+  "llvm.x86.avx512.mask.compress.store.pd.512",
+  "llvm.x86.avx512.mask.compress.store.ps.128",
+  "llvm.x86.avx512.mask.compress.store.ps.256",
+  "llvm.x86.avx512.mask.compress.store.ps.512",
+  "llvm.x86.avx512.mask.compress.store.q.128",
+  "llvm.x86.avx512.mask.compress.store.q.256",
+  "llvm.x86.avx512.mask.compress.store.q.512",
+  "llvm.x86.avx512.mask.compress.store.w.128",
+  "llvm.x86.avx512.mask.compress.store.w.256",
+  "llvm.x86.avx512.mask.compress.store.w.512",
+  "llvm.x86.avx512.mask.compress.w.128",
+  "llvm.x86.avx512.mask.compress.w.256",
+  "llvm.x86.avx512.mask.compress.w.512",
+  "llvm.x86.avx512.mask.conflict.d.128",
+  "llvm.x86.avx512.mask.conflict.d.256",
+  "llvm.x86.avx512.mask.conflict.d.512",
+  "llvm.x86.avx512.mask.conflict.q.128",
+  "llvm.x86.avx512.mask.conflict.q.256",
+  "llvm.x86.avx512.mask.conflict.q.512",
+  "llvm.x86.avx512.mask.cvtdq2ps.128",
+  "llvm.x86.avx512.mask.cvtdq2ps.256",
+  "llvm.x86.avx512.mask.cvtdq2ps.512",
+  "llvm.x86.avx512.mask.cvtpd2dq.128",
+  "llvm.x86.avx512.mask.cvtpd2dq.256",
+  "llvm.x86.avx512.mask.cvtpd2dq.512",
+  "llvm.x86.avx512.mask.cvtpd2ps",
+  "llvm.x86.avx512.mask.cvtpd2ps.256",
+  "llvm.x86.avx512.mask.cvtpd2ps.512",
+  "llvm.x86.avx512.mask.cvtpd2qq.128",
+  "llvm.x86.avx512.mask.cvtpd2qq.256",
+  "llvm.x86.avx512.mask.cvtpd2qq.512",
+  "llvm.x86.avx512.mask.cvtpd2udq.128",
+  "llvm.x86.avx512.mask.cvtpd2udq.256",
+  "llvm.x86.avx512.mask.cvtpd2udq.512",
+  "llvm.x86.avx512.mask.cvtpd2uqq.128",
+  "llvm.x86.avx512.mask.cvtpd2uqq.256",
+  "llvm.x86.avx512.mask.cvtpd2uqq.512",
+  "llvm.x86.avx512.mask.cvtps2dq.128",
+  "llvm.x86.avx512.mask.cvtps2dq.256",
+  "llvm.x86.avx512.mask.cvtps2dq.512",
+  "llvm.x86.avx512.mask.cvtps2pd.128",
+  "llvm.x86.avx512.mask.cvtps2pd.256",
+  "llvm.x86.avx512.mask.cvtps2pd.512",
+  "llvm.x86.avx512.mask.cvtps2qq.128",
+  "llvm.x86.avx512.mask.cvtps2qq.256",
+  "llvm.x86.avx512.mask.cvtps2qq.512",
+  "llvm.x86.avx512.mask.cvtps2udq.128",
+  "llvm.x86.avx512.mask.cvtps2udq.256",
+  "llvm.x86.avx512.mask.cvtps2udq.512",
+  "llvm.x86.avx512.mask.cvtps2uqq.128",
+  "llvm.x86.avx512.mask.cvtps2uqq.256",
+  "llvm.x86.avx512.mask.cvtps2uqq.512",
+  "llvm.x86.avx512.mask.cvtqq2pd.128",
+  "llvm.x86.avx512.mask.cvtqq2pd.256",
+  "llvm.x86.avx512.mask.cvtqq2pd.512",
+  "llvm.x86.avx512.mask.cvtqq2ps.128",
+  "llvm.x86.avx512.mask.cvtqq2ps.256",
+  "llvm.x86.avx512.mask.cvtqq2ps.512",
+  "llvm.x86.avx512.mask.cvtsd2ss.round",
+  "llvm.x86.avx512.mask.cvtss2sd.round",
+  "llvm.x86.avx512.mask.cvttpd2dq.128",
+  "llvm.x86.avx512.mask.cvttpd2dq.256",
+  "llvm.x86.avx512.mask.cvttpd2dq.512",
+  "llvm.x86.avx512.mask.cvttpd2qq.128",
+  "llvm.x86.avx512.mask.cvttpd2qq.256",
+  "llvm.x86.avx512.mask.cvttpd2qq.512",
+  "llvm.x86.avx512.mask.cvttpd2udq.128",
+  "llvm.x86.avx512.mask.cvttpd2udq.256",
+  "llvm.x86.avx512.mask.cvttpd2udq.512",
+  "llvm.x86.avx512.mask.cvttpd2uqq.128",
+  "llvm.x86.avx512.mask.cvttpd2uqq.256",
+  "llvm.x86.avx512.mask.cvttpd2uqq.512",
+  "llvm.x86.avx512.mask.cvttps2dq.128",
+  "llvm.x86.avx512.mask.cvttps2dq.256",
+  "llvm.x86.avx512.mask.cvttps2dq.512",
+  "llvm.x86.avx512.mask.cvttps2qq.128",
+  "llvm.x86.avx512.mask.cvttps2qq.256",
+  "llvm.x86.avx512.mask.cvttps2qq.512",
+  "llvm.x86.avx512.mask.cvttps2udq.128",
+  "llvm.x86.avx512.mask.cvttps2udq.256",
+  "llvm.x86.avx512.mask.cvttps2udq.512",
+  "llvm.x86.avx512.mask.cvttps2uqq.128",
+  "llvm.x86.avx512.mask.cvttps2uqq.256",
+  "llvm.x86.avx512.mask.cvttps2uqq.512",
+  "llvm.x86.avx512.mask.cvtudq2ps.128",
+  "llvm.x86.avx512.mask.cvtudq2ps.256",
+  "llvm.x86.avx512.mask.cvtudq2ps.512",
+  "llvm.x86.avx512.mask.cvtuqq2pd.128",
+  "llvm.x86.avx512.mask.cvtuqq2pd.256",
+  "llvm.x86.avx512.mask.cvtuqq2pd.512",
+  "llvm.x86.avx512.mask.cvtuqq2ps.128",
+  "llvm.x86.avx512.mask.cvtuqq2ps.256",
+  "llvm.x86.avx512.mask.cvtuqq2ps.512",
+  "llvm.x86.avx512.mask.dbpsadbw.128",
+  "llvm.x86.avx512.mask.dbpsadbw.256",
+  "llvm.x86.avx512.mask.dbpsadbw.512",
+  "llvm.x86.avx512.mask.div.pd.512",
+  "llvm.x86.avx512.mask.div.ps.512",
+  "llvm.x86.avx512.mask.div.sd.round",
+  "llvm.x86.avx512.mask.div.ss.round",
+  "llvm.x86.avx512.mask.expand.b.128",
+  "llvm.x86.avx512.mask.expand.b.256",
+  "llvm.x86.avx512.mask.expand.b.512",
+  "llvm.x86.avx512.mask.expand.d.128",
+  "llvm.x86.avx512.mask.expand.d.256",
+  "llvm.x86.avx512.mask.expand.d.512",
+  "llvm.x86.avx512.mask.expand.load.b.128",
+  "llvm.x86.avx512.mask.expand.load.b.256",
+  "llvm.x86.avx512.mask.expand.load.b.512",
+  "llvm.x86.avx512.mask.expand.load.d.128",
+  "llvm.x86.avx512.mask.expand.load.d.256",
+  "llvm.x86.avx512.mask.expand.load.d.512",
+  "llvm.x86.avx512.mask.expand.load.pd.128",
+  "llvm.x86.avx512.mask.expand.load.pd.256",
+  "llvm.x86.avx512.mask.expand.load.pd.512",
+  "llvm.x86.avx512.mask.expand.load.ps.128",
+  "llvm.x86.avx512.mask.expand.load.ps.256",
+  "llvm.x86.avx512.mask.expand.load.ps.512",
+  "llvm.x86.avx512.mask.expand.load.q.128",
+  "llvm.x86.avx512.mask.expand.load.q.256",
+  "llvm.x86.avx512.mask.expand.load.q.512",
+  "llvm.x86.avx512.mask.expand.load.w.128",
+  "llvm.x86.avx512.mask.expand.load.w.256",
+  "llvm.x86.avx512.mask.expand.load.w.512",
+  "llvm.x86.avx512.mask.expand.pd.128",
+  "llvm.x86.avx512.mask.expand.pd.256",
+  "llvm.x86.avx512.mask.expand.pd.512",
+  "llvm.x86.avx512.mask.expand.ps.128",
+  "llvm.x86.avx512.mask.expand.ps.256",
+  "llvm.x86.avx512.mask.expand.ps.512",
+  "llvm.x86.avx512.mask.expand.q.128",
+  "llvm.x86.avx512.mask.expand.q.256",
+  "llvm.x86.avx512.mask.expand.q.512",
+  "llvm.x86.avx512.mask.expand.w.128",
+  "llvm.x86.avx512.mask.expand.w.256",
+  "llvm.x86.avx512.mask.expand.w.512",
+  "llvm.x86.avx512.mask.fixupimm.pd.128",
+  "llvm.x86.avx512.mask.fixupimm.pd.256",
+  "llvm.x86.avx512.mask.fixupimm.pd.512",
+  "llvm.x86.avx512.mask.fixupimm.ps.128",
+  "llvm.x86.avx512.mask.fixupimm.ps.256",
+  "llvm.x86.avx512.mask.fixupimm.ps.512",
+  "llvm.x86.avx512.mask.fixupimm.sd",
+  "llvm.x86.avx512.mask.fixupimm.ss",
+  "llvm.x86.avx512.mask.fpclass.pd.128",
+  "llvm.x86.avx512.mask.fpclass.pd.256",
+  "llvm.x86.avx512.mask.fpclass.pd.512",
+  "llvm.x86.avx512.mask.fpclass.ps.128",
+  "llvm.x86.avx512.mask.fpclass.ps.256",
+  "llvm.x86.avx512.mask.fpclass.ps.512",
+  "llvm.x86.avx512.mask.fpclass.sd",
+  "llvm.x86.avx512.mask.fpclass.ss",
+  "llvm.x86.avx512.mask.getexp.pd.128",
+  "llvm.x86.avx512.mask.getexp.pd.256",
+  "llvm.x86.avx512.mask.getexp.pd.512",
+  "llvm.x86.avx512.mask.getexp.ps.128",
+  "llvm.x86.avx512.mask.getexp.ps.256",
+  "llvm.x86.avx512.mask.getexp.ps.512",
+  "llvm.x86.avx512.mask.getexp.sd",
+  "llvm.x86.avx512.mask.getexp.ss",
+  "llvm.x86.avx512.mask.getmant.pd.128",
+  "llvm.x86.avx512.mask.getmant.pd.256",
+  "llvm.x86.avx512.mask.getmant.pd.512",
+  "llvm.x86.avx512.mask.getmant.ps.128",
+  "llvm.x86.avx512.mask.getmant.ps.256",
+  "llvm.x86.avx512.mask.getmant.ps.512",
+  "llvm.x86.avx512.mask.getmant.sd",
+  "llvm.x86.avx512.mask.getmant.ss",
+  "llvm.x86.avx512.mask.max.pd.512",
+  "llvm.x86.avx512.mask.max.ps.512",
+  "llvm.x86.avx512.mask.max.sd.round",
+  "llvm.x86.avx512.mask.max.ss.round",
+  "llvm.x86.avx512.mask.min.pd.512",
+  "llvm.x86.avx512.mask.min.ps.512",
+  "llvm.x86.avx512.mask.min.sd.round",
+  "llvm.x86.avx512.mask.min.ss.round",
+  "llvm.x86.avx512.mask.mul.pd.512",
+  "llvm.x86.avx512.mask.mul.ps.512",
+  "llvm.x86.avx512.mask.mul.sd.round",
+  "llvm.x86.avx512.mask.mul.ss.round",
+  "llvm.x86.avx512.mask.padds.b.128",
+  "llvm.x86.avx512.mask.padds.b.256",
+  "llvm.x86.avx512.mask.padds.b.512",
+  "llvm.x86.avx512.mask.padds.w.128",
+  "llvm.x86.avx512.mask.padds.w.256",
+  "llvm.x86.avx512.mask.padds.w.512",
+  "llvm.x86.avx512.mask.paddus.b.128",
+  "llvm.x86.avx512.mask.paddus.b.256",
+  "llvm.x86.avx512.mask.paddus.b.512",
+  "llvm.x86.avx512.mask.paddus.w.128",
+  "llvm.x86.avx512.mask.paddus.w.256",
+  "llvm.x86.avx512.mask.paddus.w.512",
+  "llvm.x86.avx512.mask.permvar.df.256",
+  "llvm.x86.avx512.mask.permvar.df.512",
+  "llvm.x86.avx512.mask.permvar.di.256",
+  "llvm.x86.avx512.mask.permvar.di.512",
+  "llvm.x86.avx512.mask.permvar.hi.128",
+  "llvm.x86.avx512.mask.permvar.hi.256",
+  "llvm.x86.avx512.mask.permvar.hi.512",
+  "llvm.x86.avx512.mask.permvar.qi.128",
+  "llvm.x86.avx512.mask.permvar.qi.256",
+  "llvm.x86.avx512.mask.permvar.qi.512",
+  "llvm.x86.avx512.mask.permvar.sf.256",
+  "llvm.x86.avx512.mask.permvar.sf.512",
+  "llvm.x86.avx512.mask.permvar.si.256",
+  "llvm.x86.avx512.mask.permvar.si.512",
+  "llvm.x86.avx512.mask.pmaddubs.w.128",
+  "llvm.x86.avx512.mask.pmaddubs.w.256",
+  "llvm.x86.avx512.mask.pmaddubs.w.512",
+  "llvm.x86.avx512.mask.pmaddw.d.128",
+  "llvm.x86.avx512.mask.pmaddw.d.256",
+  "llvm.x86.avx512.mask.pmaddw.d.512",
+  "llvm.x86.avx512.mask.pmov.db.128",
+  "llvm.x86.avx512.mask.pmov.db.256",
+  "llvm.x86.avx512.mask.pmov.db.512",
+  "llvm.x86.avx512.mask.pmov.db.mem.128",
+  "llvm.x86.avx512.mask.pmov.db.mem.256",
+  "llvm.x86.avx512.mask.pmov.db.mem.512",
+  "llvm.x86.avx512.mask.pmov.dw.128",
+  "llvm.x86.avx512.mask.pmov.dw.256",
+  "llvm.x86.avx512.mask.pmov.dw.512",
+  "llvm.x86.avx512.mask.pmov.dw.mem.128",
+  "llvm.x86.avx512.mask.pmov.dw.mem.256",
+  "llvm.x86.avx512.mask.pmov.dw.mem.512",
+  "llvm.x86.avx512.mask.pmov.qb.128",
+  "llvm.x86.avx512.mask.pmov.qb.256",
+  "llvm.x86.avx512.mask.pmov.qb.512",
+  "llvm.x86.avx512.mask.pmov.qb.mem.128",
+  "llvm.x86.avx512.mask.pmov.qb.mem.256",
+  "llvm.x86.avx512.mask.pmov.qb.mem.512",
+  "llvm.x86.avx512.mask.pmov.qd.128",
+  "llvm.x86.avx512.mask.pmov.qd.256",
+  "llvm.x86.avx512.mask.pmov.qd.512",
+  "llvm.x86.avx512.mask.pmov.qd.mem.128",
+  "llvm.x86.avx512.mask.pmov.qd.mem.256",
+  "llvm.x86.avx512.mask.pmov.qd.mem.512",
+  "llvm.x86.avx512.mask.pmov.qw.128",
+  "llvm.x86.avx512.mask.pmov.qw.256",
+  "llvm.x86.avx512.mask.pmov.qw.512",
+  "llvm.x86.avx512.mask.pmov.qw.mem.128",
+  "llvm.x86.avx512.mask.pmov.qw.mem.256",
+  "llvm.x86.avx512.mask.pmov.qw.mem.512",
+  "llvm.x86.avx512.mask.pmov.wb.128",
+  "llvm.x86.avx512.mask.pmov.wb.256",
+  "llvm.x86.avx512.mask.pmov.wb.512",
+  "llvm.x86.avx512.mask.pmov.wb.mem.128",
+  "llvm.x86.avx512.mask.pmov.wb.mem.256",
+  "llvm.x86.avx512.mask.pmov.wb.mem.512",
+  "llvm.x86.avx512.mask.pmovs.db.128",
+  "llvm.x86.avx512.mask.pmovs.db.256",
+  "llvm.x86.avx512.mask.pmovs.db.512",
+  "llvm.x86.avx512.mask.pmovs.db.mem.128",
+  "llvm.x86.avx512.mask.pmovs.db.mem.256",
+  "llvm.x86.avx512.mask.pmovs.db.mem.512",
+  "llvm.x86.avx512.mask.pmovs.dw.128",
+  "llvm.x86.avx512.mask.pmovs.dw.256",
+  "llvm.x86.avx512.mask.pmovs.dw.512",
+  "llvm.x86.avx512.mask.pmovs.dw.mem.128",
+  "llvm.x86.avx512.mask.pmovs.dw.mem.256",
+  "llvm.x86.avx512.mask.pmovs.dw.mem.512",
+  "llvm.x86.avx512.mask.pmovs.qb.128",
+  "llvm.x86.avx512.mask.pmovs.qb.256",
+  "llvm.x86.avx512.mask.pmovs.qb.512",
+  "llvm.x86.avx512.mask.pmovs.qb.mem.128",
+  "llvm.x86.avx512.mask.pmovs.qb.mem.256",
+  "llvm.x86.avx512.mask.pmovs.qb.mem.512",
+  "llvm.x86.avx512.mask.pmovs.qd.128",
+  "llvm.x86.avx512.mask.pmovs.qd.256",
+  "llvm.x86.avx512.mask.pmovs.qd.512",
+  "llvm.x86.avx512.mask.pmovs.qd.mem.128",
+  "llvm.x86.avx512.mask.pmovs.qd.mem.256",
+  "llvm.x86.avx512.mask.pmovs.qd.mem.512",
+  "llvm.x86.avx512.mask.pmovs.qw.128",
+  "llvm.x86.avx512.mask.pmovs.qw.256",
+  "llvm.x86.avx512.mask.pmovs.qw.512",
+  "llvm.x86.avx512.mask.pmovs.qw.mem.128",
+  "llvm.x86.avx512.mask.pmovs.qw.mem.256",
+  "llvm.x86.avx512.mask.pmovs.qw.mem.512",
+  "llvm.x86.avx512.mask.pmovs.wb.128",
+  "llvm.x86.avx512.mask.pmovs.wb.256",
+  "llvm.x86.avx512.mask.pmovs.wb.512",
+  "llvm.x86.avx512.mask.pmovs.wb.mem.128",
+  "llvm.x86.avx512.mask.pmovs.wb.mem.256",
+  "llvm.x86.avx512.mask.pmovs.wb.mem.512",
+  "llvm.x86.avx512.mask.pmovus.db.128",
+  "llvm.x86.avx512.mask.pmovus.db.256",
+  "llvm.x86.avx512.mask.pmovus.db.512",
+  "llvm.x86.avx512.mask.pmovus.db.mem.128",
+  "llvm.x86.avx512.mask.pmovus.db.mem.256",
+  "llvm.x86.avx512.mask.pmovus.db.mem.512",
+  "llvm.x86.avx512.mask.pmovus.dw.128",
+  "llvm.x86.avx512.mask.pmovus.dw.256",
+  "llvm.x86.avx512.mask.pmovus.dw.512",
+  "llvm.x86.avx512.mask.pmovus.dw.mem.128",
+  "llvm.x86.avx512.mask.pmovus.dw.mem.256",
+  "llvm.x86.avx512.mask.pmovus.dw.mem.512",
+  "llvm.x86.avx512.mask.pmovus.qb.128",
+  "llvm.x86.avx512.mask.pmovus.qb.256",
+  "llvm.x86.avx512.mask.pmovus.qb.512",
+  "llvm.x86.avx512.mask.pmovus.qb.mem.128",
+  "llvm.x86.avx512.mask.pmovus.qb.mem.256",
+  "llvm.x86.avx512.mask.pmovus.qb.mem.512",
+  "llvm.x86.avx512.mask.pmovus.qd.128",
+  "llvm.x86.avx512.mask.pmovus.qd.256",
+  "llvm.x86.avx512.mask.pmovus.qd.512",
+  "llvm.x86.avx512.mask.pmovus.qd.mem.128",
+  "llvm.x86.avx512.mask.pmovus.qd.mem.256",
+  "llvm.x86.avx512.mask.pmovus.qd.mem.512",
+  "llvm.x86.avx512.mask.pmovus.qw.128",
+  "llvm.x86.avx512.mask.pmovus.qw.256",
+  "llvm.x86.avx512.mask.pmovus.qw.512",
+  "llvm.x86.avx512.mask.pmovus.qw.mem.128",
+  "llvm.x86.avx512.mask.pmovus.qw.mem.256",
+  "llvm.x86.avx512.mask.pmovus.qw.mem.512",
+  "llvm.x86.avx512.mask.pmovus.wb.128",
+  "llvm.x86.avx512.mask.pmovus.wb.256",
+  "llvm.x86.avx512.mask.pmovus.wb.512",
+  "llvm.x86.avx512.mask.pmovus.wb.mem.128",
+  "llvm.x86.avx512.mask.pmovus.wb.mem.256",
+  "llvm.x86.avx512.mask.pmovus.wb.mem.512",
+  "llvm.x86.avx512.mask.pmultishift.qb.128",
+  "llvm.x86.avx512.mask.pmultishift.qb.256",
+  "llvm.x86.avx512.mask.pmultishift.qb.512",
+  "llvm.x86.avx512.mask.prol.d.128",
+  "llvm.x86.avx512.mask.prol.d.256",
+  "llvm.x86.avx512.mask.prol.d.512",
+  "llvm.x86.avx512.mask.prol.q.128",
+  "llvm.x86.avx512.mask.prol.q.256",
+  "llvm.x86.avx512.mask.prol.q.512",
+  "llvm.x86.avx512.mask.prolv.d.128",
+  "llvm.x86.avx512.mask.prolv.d.256",
+  "llvm.x86.avx512.mask.prolv.d.512",
+  "llvm.x86.avx512.mask.prolv.q.128",
+  "llvm.x86.avx512.mask.prolv.q.256",
+  "llvm.x86.avx512.mask.prolv.q.512",
+  "llvm.x86.avx512.mask.pror.d.128",
+  "llvm.x86.avx512.mask.pror.d.256",
+  "llvm.x86.avx512.mask.pror.d.512",
+  "llvm.x86.avx512.mask.pror.q.128",
+  "llvm.x86.avx512.mask.pror.q.256",
+  "llvm.x86.avx512.mask.pror.q.512",
+  "llvm.x86.avx512.mask.prorv.d.128",
+  "llvm.x86.avx512.mask.prorv.d.256",
+  "llvm.x86.avx512.mask.prorv.d.512",
+  "llvm.x86.avx512.mask.prorv.q.128",
+  "llvm.x86.avx512.mask.prorv.q.256",
+  "llvm.x86.avx512.mask.prorv.q.512",
+  "llvm.x86.avx512.mask.psubs.b.128",
+  "llvm.x86.avx512.mask.psubs.b.256",
+  "llvm.x86.avx512.mask.psubs.b.512",
+  "llvm.x86.avx512.mask.psubs.w.128",
+  "llvm.x86.avx512.mask.psubs.w.256",
+  "llvm.x86.avx512.mask.psubs.w.512",
+  "llvm.x86.avx512.mask.psubus.b.128",
+  "llvm.x86.avx512.mask.psubus.b.256",
+  "llvm.x86.avx512.mask.psubus.b.512",
+  "llvm.x86.avx512.mask.psubus.w.128",
+  "llvm.x86.avx512.mask.psubus.w.256",
+  "llvm.x86.avx512.mask.psubus.w.512",
+  "llvm.x86.avx512.mask.pternlog.d.128",
+  "llvm.x86.avx512.mask.pternlog.d.256",
+  "llvm.x86.avx512.mask.pternlog.d.512",
+  "llvm.x86.avx512.mask.pternlog.q.128",
+  "llvm.x86.avx512.mask.pternlog.q.256",
+  "llvm.x86.avx512.mask.pternlog.q.512",
+  "llvm.x86.avx512.mask.range.pd.128",
+  "llvm.x86.avx512.mask.range.pd.256",
+  "llvm.x86.avx512.mask.range.pd.512",
+  "llvm.x86.avx512.mask.range.ps.128",
+  "llvm.x86.avx512.mask.range.ps.256",
+  "llvm.x86.avx512.mask.range.ps.512",
+  "llvm.x86.avx512.mask.range.sd",
+  "llvm.x86.avx512.mask.range.ss",
+  "llvm.x86.avx512.mask.reduce.pd.128",
+  "llvm.x86.avx512.mask.reduce.pd.256",
+  "llvm.x86.avx512.mask.reduce.pd.512",
+  "llvm.x86.avx512.mask.reduce.ps.128",
+  "llvm.x86.avx512.mask.reduce.ps.256",
+  "llvm.x86.avx512.mask.reduce.ps.512",
+  "llvm.x86.avx512.mask.reduce.sd",
+  "llvm.x86.avx512.mask.reduce.ss",
+  "llvm.x86.avx512.mask.rndscale.pd.128",
+  "llvm.x86.avx512.mask.rndscale.pd.256",
+  "llvm.x86.avx512.mask.rndscale.pd.512",
+  "llvm.x86.avx512.mask.rndscale.ps.128",
+  "llvm.x86.avx512.mask.rndscale.ps.256",
+  "llvm.x86.avx512.mask.rndscale.ps.512",
+  "llvm.x86.avx512.mask.rndscale.sd",
+  "llvm.x86.avx512.mask.rndscale.ss",
+  "llvm.x86.avx512.mask.scalef.pd.128",
+  "llvm.x86.avx512.mask.scalef.pd.256",
+  "llvm.x86.avx512.mask.scalef.pd.512",
+  "llvm.x86.avx512.mask.scalef.ps.128",
+  "llvm.x86.avx512.mask.scalef.ps.256",
+  "llvm.x86.avx512.mask.scalef.ps.512",
+  "llvm.x86.avx512.mask.scalef.sd",
+  "llvm.x86.avx512.mask.scalef.ss",
+  "llvm.x86.avx512.mask.sqrt.pd.128",
+  "llvm.x86.avx512.mask.sqrt.pd.256",
+  "llvm.x86.avx512.mask.sqrt.pd.512",
+  "llvm.x86.avx512.mask.sqrt.ps.128",
+  "llvm.x86.avx512.mask.sqrt.ps.256",
+  "llvm.x86.avx512.mask.sqrt.ps.512",
+  "llvm.x86.avx512.mask.sqrt.sd",
+  "llvm.x86.avx512.mask.sqrt.ss",
+  "llvm.x86.avx512.mask.store.ss",
+  "llvm.x86.avx512.mask.sub.pd.512",
+  "llvm.x86.avx512.mask.sub.ps.512",
+  "llvm.x86.avx512.mask.sub.sd.round",
+  "llvm.x86.avx512.mask.sub.ss.round",
+  "llvm.x86.avx512.mask.vcvtph2ps.128",
+  "llvm.x86.avx512.mask.vcvtph2ps.256",
+  "llvm.x86.avx512.mask.vcvtph2ps.512",
+  "llvm.x86.avx512.mask.vcvtps2ph.128",
+  "llvm.x86.avx512.mask.vcvtps2ph.256",
+  "llvm.x86.avx512.mask.vcvtps2ph.512",
+  "llvm.x86.avx512.mask.vfmadd.pd.128",
+  "llvm.x86.avx512.mask.vfmadd.pd.256",
+  "llvm.x86.avx512.mask.vfmadd.pd.512",
+  "llvm.x86.avx512.mask.vfmadd.ps.128",
+  "llvm.x86.avx512.mask.vfmadd.ps.256",
+  "llvm.x86.avx512.mask.vfmadd.ps.512",
+  "llvm.x86.avx512.mask.vfmadd.sd",
+  "llvm.x86.avx512.mask.vfmadd.ss",
+  "llvm.x86.avx512.mask.vfmaddsub.pd.128",
+  "llvm.x86.avx512.mask.vfmaddsub.pd.256",
+  "llvm.x86.avx512.mask.vfmaddsub.pd.512",
+  "llvm.x86.avx512.mask.vfmaddsub.ps.128",
+  "llvm.x86.avx512.mask.vfmaddsub.ps.256",
+  "llvm.x86.avx512.mask.vfmaddsub.ps.512",
+  "llvm.x86.avx512.mask.vfnmadd.pd.128",
+  "llvm.x86.avx512.mask.vfnmadd.pd.256",
+  "llvm.x86.avx512.mask.vfnmadd.pd.512",
+  "llvm.x86.avx512.mask.vfnmadd.ps.128",
+  "llvm.x86.avx512.mask.vfnmadd.ps.256",
+  "llvm.x86.avx512.mask.vfnmadd.ps.512",
+  "llvm.x86.avx512.mask.vfnmsub.pd.128",
+  "llvm.x86.avx512.mask.vfnmsub.pd.256",
+  "llvm.x86.avx512.mask.vfnmsub.pd.512",
+  "llvm.x86.avx512.mask.vfnmsub.ps.128",
+  "llvm.x86.avx512.mask.vfnmsub.ps.256",
+  "llvm.x86.avx512.mask.vfnmsub.ps.512",
+  "llvm.x86.avx512.mask.vpdpbusd.128",
+  "llvm.x86.avx512.mask.vpdpbusd.256",
+  "llvm.x86.avx512.mask.vpdpbusd.512",
+  "llvm.x86.avx512.mask.vpdpbusds.128",
+  "llvm.x86.avx512.mask.vpdpbusds.256",
+  "llvm.x86.avx512.mask.vpdpbusds.512",
+  "llvm.x86.avx512.mask.vpdpwssd.128",
+  "llvm.x86.avx512.mask.vpdpwssd.256",
+  "llvm.x86.avx512.mask.vpdpwssd.512",
+  "llvm.x86.avx512.mask.vpdpwssds.128",
+  "llvm.x86.avx512.mask.vpdpwssds.256",
+  "llvm.x86.avx512.mask.vpdpwssds.512",
+  "llvm.x86.avx512.mask.vpermi2var.d.128",
+  "llvm.x86.avx512.mask.vpermi2var.d.256",
+  "llvm.x86.avx512.mask.vpermi2var.d.512",
+  "llvm.x86.avx512.mask.vpermi2var.hi.128",
+  "llvm.x86.avx512.mask.vpermi2var.hi.256",
+  "llvm.x86.avx512.mask.vpermi2var.hi.512",
+  "llvm.x86.avx512.mask.vpermi2var.pd.128",
+  "llvm.x86.avx512.mask.vpermi2var.pd.256",
+  "llvm.x86.avx512.mask.vpermi2var.pd.512",
+  "llvm.x86.avx512.mask.vpermi2var.ps.128",
+  "llvm.x86.avx512.mask.vpermi2var.ps.256",
+  "llvm.x86.avx512.mask.vpermi2var.ps.512",
+  "llvm.x86.avx512.mask.vpermi2var.q.128",
+  "llvm.x86.avx512.mask.vpermi2var.q.256",
+  "llvm.x86.avx512.mask.vpermi2var.q.512",
+  "llvm.x86.avx512.mask.vpermi2var.qi.128",
+  "llvm.x86.avx512.mask.vpermi2var.qi.256",
+  "llvm.x86.avx512.mask.vpermi2var.qi.512",
+  "llvm.x86.avx512.mask.vpermt2var.d.128",
+  "llvm.x86.avx512.mask.vpermt2var.d.256",
+  "llvm.x86.avx512.mask.vpermt2var.d.512",
+  "llvm.x86.avx512.mask.vpermt2var.hi.128",
+  "llvm.x86.avx512.mask.vpermt2var.hi.256",
+  "llvm.x86.avx512.mask.vpermt2var.hi.512",
+  "llvm.x86.avx512.mask.vpermt2var.pd.128",
+  "llvm.x86.avx512.mask.vpermt2var.pd.256",
+  "llvm.x86.avx512.mask.vpermt2var.pd.512",
+  "llvm.x86.avx512.mask.vpermt2var.ps.128",
+  "llvm.x86.avx512.mask.vpermt2var.ps.256",
+  "llvm.x86.avx512.mask.vpermt2var.ps.512",
+  "llvm.x86.avx512.mask.vpermt2var.q.128",
+  "llvm.x86.avx512.mask.vpermt2var.q.256",
+  "llvm.x86.avx512.mask.vpermt2var.q.512",
+  "llvm.x86.avx512.mask.vpermt2var.qi.128",
+  "llvm.x86.avx512.mask.vpermt2var.qi.256",
+  "llvm.x86.avx512.mask.vpermt2var.qi.512",
+  "llvm.x86.avx512.mask.vpmadd52h.uq.128",
+  "llvm.x86.avx512.mask.vpmadd52h.uq.256",
+  "llvm.x86.avx512.mask.vpmadd52h.uq.512",
+  "llvm.x86.avx512.mask.vpmadd52l.uq.128",
+  "llvm.x86.avx512.mask.vpmadd52l.uq.256",
+  "llvm.x86.avx512.mask.vpmadd52l.uq.512",
+  "llvm.x86.avx512.mask.vpshld.d.128",
+  "llvm.x86.avx512.mask.vpshld.d.256",
+  "llvm.x86.avx512.mask.vpshld.d.512",
+  "llvm.x86.avx512.mask.vpshld.q.128",
+  "llvm.x86.avx512.mask.vpshld.q.256",
+  "llvm.x86.avx512.mask.vpshld.q.512",
+  "llvm.x86.avx512.mask.vpshld.w.128",
+  "llvm.x86.avx512.mask.vpshld.w.256",
+  "llvm.x86.avx512.mask.vpshld.w.512",
+  "llvm.x86.avx512.mask.vpshldv.d.128",
+  "llvm.x86.avx512.mask.vpshldv.d.256",
+  "llvm.x86.avx512.mask.vpshldv.d.512",
+  "llvm.x86.avx512.mask.vpshldv.q.128",
+  "llvm.x86.avx512.mask.vpshldv.q.256",
+  "llvm.x86.avx512.mask.vpshldv.q.512",
+  "llvm.x86.avx512.mask.vpshldv.w.128",
+  "llvm.x86.avx512.mask.vpshldv.w.256",
+  "llvm.x86.avx512.mask.vpshldv.w.512",
+  "llvm.x86.avx512.mask.vpshrd.d.128",
+  "llvm.x86.avx512.mask.vpshrd.d.256",
+  "llvm.x86.avx512.mask.vpshrd.d.512",
+  "llvm.x86.avx512.mask.vpshrd.q.128",
+  "llvm.x86.avx512.mask.vpshrd.q.256",
+  "llvm.x86.avx512.mask.vpshrd.q.512",
+  "llvm.x86.avx512.mask.vpshrd.w.128",
+  "llvm.x86.avx512.mask.vpshrd.w.256",
+  "llvm.x86.avx512.mask.vpshrd.w.512",
+  "llvm.x86.avx512.mask.vpshrdv.d.128",
+  "llvm.x86.avx512.mask.vpshrdv.d.256",
+  "llvm.x86.avx512.mask.vpshrdv.d.512",
+  "llvm.x86.avx512.mask.vpshrdv.q.128",
+  "llvm.x86.avx512.mask.vpshrdv.q.256",
+  "llvm.x86.avx512.mask.vpshrdv.q.512",
+  "llvm.x86.avx512.mask.vpshrdv.w.128",
+  "llvm.x86.avx512.mask.vpshrdv.w.256",
+  "llvm.x86.avx512.mask.vpshrdv.w.512",
+  "llvm.x86.avx512.mask.vpshufbitqmb.128",
+  "llvm.x86.avx512.mask.vpshufbitqmb.256",
+  "llvm.x86.avx512.mask.vpshufbitqmb.512",
+  "llvm.x86.avx512.mask3.vfmadd.pd.128",
+  "llvm.x86.avx512.mask3.vfmadd.pd.256",
+  "llvm.x86.avx512.mask3.vfmadd.pd.512",
+  "llvm.x86.avx512.mask3.vfmadd.ps.128",
+  "llvm.x86.avx512.mask3.vfmadd.ps.256",
+  "llvm.x86.avx512.mask3.vfmadd.ps.512",
+  "llvm.x86.avx512.mask3.vfmadd.sd",
+  "llvm.x86.avx512.mask3.vfmadd.ss",
+  "llvm.x86.avx512.mask3.vfmaddsub.pd.128",
+  "llvm.x86.avx512.mask3.vfmaddsub.pd.256",
+  "llvm.x86.avx512.mask3.vfmaddsub.pd.512",
+  "llvm.x86.avx512.mask3.vfmaddsub.ps.128",
+  "llvm.x86.avx512.mask3.vfmaddsub.ps.256",
+  "llvm.x86.avx512.mask3.vfmaddsub.ps.512",
+  "llvm.x86.avx512.mask3.vfmsub.pd.128",
+  "llvm.x86.avx512.mask3.vfmsub.pd.256",
+  "llvm.x86.avx512.mask3.vfmsub.pd.512",
+  "llvm.x86.avx512.mask3.vfmsub.ps.128",
+  "llvm.x86.avx512.mask3.vfmsub.ps.256",
+  "llvm.x86.avx512.mask3.vfmsub.ps.512",
+  "llvm.x86.avx512.mask3.vfmsub.sd",
+  "llvm.x86.avx512.mask3.vfmsub.ss",
+  "llvm.x86.avx512.mask3.vfmsubadd.pd.128",
+  "llvm.x86.avx512.mask3.vfmsubadd.pd.256",
+  "llvm.x86.avx512.mask3.vfmsubadd.pd.512",
+  "llvm.x86.avx512.mask3.vfmsubadd.ps.128",
+  "llvm.x86.avx512.mask3.vfmsubadd.ps.256",
+  "llvm.x86.avx512.mask3.vfmsubadd.ps.512",
+  "llvm.x86.avx512.mask3.vfnmsub.pd.128",
+  "llvm.x86.avx512.mask3.vfnmsub.pd.256",
+  "llvm.x86.avx512.mask3.vfnmsub.pd.512",
+  "llvm.x86.avx512.mask3.vfnmsub.ps.128",
+  "llvm.x86.avx512.mask3.vfnmsub.ps.256",
+  "llvm.x86.avx512.mask3.vfnmsub.ps.512",
+  "llvm.x86.avx512.mask3.vfnmsub.sd",
+  "llvm.x86.avx512.mask3.vfnmsub.ss",
+  "llvm.x86.avx512.maskz.fixupimm.pd.128",
+  "llvm.x86.avx512.maskz.fixupimm.pd.256",
+  "llvm.x86.avx512.maskz.fixupimm.pd.512",
+  "llvm.x86.avx512.maskz.fixupimm.ps.128",
+  "llvm.x86.avx512.maskz.fixupimm.ps.256",
+  "llvm.x86.avx512.maskz.fixupimm.ps.512",
+  "llvm.x86.avx512.maskz.fixupimm.sd",
+  "llvm.x86.avx512.maskz.fixupimm.ss",
+  "llvm.x86.avx512.maskz.pternlog.d.128",
+  "llvm.x86.avx512.maskz.pternlog.d.256",
+  "llvm.x86.avx512.maskz.pternlog.d.512",
+  "llvm.x86.avx512.maskz.pternlog.q.128",
+  "llvm.x86.avx512.maskz.pternlog.q.256",
+  "llvm.x86.avx512.maskz.pternlog.q.512",
+  "llvm.x86.avx512.maskz.vfmadd.pd.128",
+  "llvm.x86.avx512.maskz.vfmadd.pd.256",
+  "llvm.x86.avx512.maskz.vfmadd.pd.512",
+  "llvm.x86.avx512.maskz.vfmadd.ps.128",
+  "llvm.x86.avx512.maskz.vfmadd.ps.256",
+  "llvm.x86.avx512.maskz.vfmadd.ps.512",
+  "llvm.x86.avx512.maskz.vfmadd.sd",
+  "llvm.x86.avx512.maskz.vfmadd.ss",
+  "llvm.x86.avx512.maskz.vfmaddsub.pd.128",
+  "llvm.x86.avx512.maskz.vfmaddsub.pd.256",
+  "llvm.x86.avx512.maskz.vfmaddsub.pd.512",
+  "llvm.x86.avx512.maskz.vfmaddsub.ps.128",
+  "llvm.x86.avx512.maskz.vfmaddsub.ps.256",
+  "llvm.x86.avx512.maskz.vfmaddsub.ps.512",
+  "llvm.x86.avx512.maskz.vpdpbusd.128",
+  "llvm.x86.avx512.maskz.vpdpbusd.256",
+  "llvm.x86.avx512.maskz.vpdpbusd.512",
+  "llvm.x86.avx512.maskz.vpdpbusds.128",
+  "llvm.x86.avx512.maskz.vpdpbusds.256",
+  "llvm.x86.avx512.maskz.vpdpbusds.512",
+  "llvm.x86.avx512.maskz.vpdpwssd.128",
+  "llvm.x86.avx512.maskz.vpdpwssd.256",
+  "llvm.x86.avx512.maskz.vpdpwssd.512",
+  "llvm.x86.avx512.maskz.vpdpwssds.128",
+  "llvm.x86.avx512.maskz.vpdpwssds.256",
+  "llvm.x86.avx512.maskz.vpdpwssds.512",
+  "llvm.x86.avx512.maskz.vpermt2var.d.128",
+  "llvm.x86.avx512.maskz.vpermt2var.d.256",
+  "llvm.x86.avx512.maskz.vpermt2var.d.512",
+  "llvm.x86.avx512.maskz.vpermt2var.hi.128",
+  "llvm.x86.avx512.maskz.vpermt2var.hi.256",
+  "llvm.x86.avx512.maskz.vpermt2var.hi.512",
+  "llvm.x86.avx512.maskz.vpermt2var.pd.128",
+  "llvm.x86.avx512.maskz.vpermt2var.pd.256",
+  "llvm.x86.avx512.maskz.vpermt2var.pd.512",
+  "llvm.x86.avx512.maskz.vpermt2var.ps.128",
+  "llvm.x86.avx512.maskz.vpermt2var.ps.256",
+  "llvm.x86.avx512.maskz.vpermt2var.ps.512",
+  "llvm.x86.avx512.maskz.vpermt2var.q.128",
+  "llvm.x86.avx512.maskz.vpermt2var.q.256",
+  "llvm.x86.avx512.maskz.vpermt2var.q.512",
+  "llvm.x86.avx512.maskz.vpermt2var.qi.128",
+  "llvm.x86.avx512.maskz.vpermt2var.qi.256",
+  "llvm.x86.avx512.maskz.vpermt2var.qi.512",
+  "llvm.x86.avx512.maskz.vpmadd52h.uq.128",
+  "llvm.x86.avx512.maskz.vpmadd52h.uq.256",
+  "llvm.x86.avx512.maskz.vpmadd52h.uq.512",
+  "llvm.x86.avx512.maskz.vpmadd52l.uq.128",
+  "llvm.x86.avx512.maskz.vpmadd52l.uq.256",
+  "llvm.x86.avx512.maskz.vpmadd52l.uq.512",
+  "llvm.x86.avx512.maskz.vpshldv.d.128",
+  "llvm.x86.avx512.maskz.vpshldv.d.256",
+  "llvm.x86.avx512.maskz.vpshldv.d.512",
+  "llvm.x86.avx512.maskz.vpshldv.q.128",
+  "llvm.x86.avx512.maskz.vpshldv.q.256",
+  "llvm.x86.avx512.maskz.vpshldv.q.512",
+  "llvm.x86.avx512.maskz.vpshldv.w.128",
+  "llvm.x86.avx512.maskz.vpshldv.w.256",
+  "llvm.x86.avx512.maskz.vpshldv.w.512",
+  "llvm.x86.avx512.maskz.vpshrdv.d.128",
+  "llvm.x86.avx512.maskz.vpshrdv.d.256",
+  "llvm.x86.avx512.maskz.vpshrdv.d.512",
+  "llvm.x86.avx512.maskz.vpshrdv.q.128",
+  "llvm.x86.avx512.maskz.vpshrdv.q.256",
+  "llvm.x86.avx512.maskz.vpshrdv.q.512",
+  "llvm.x86.avx512.maskz.vpshrdv.w.128",
+  "llvm.x86.avx512.maskz.vpshrdv.w.256",
+  "llvm.x86.avx512.maskz.vpshrdv.w.512",
+  "llvm.x86.avx512.packssdw.512",
+  "llvm.x86.avx512.packsswb.512",
+  "llvm.x86.avx512.packusdw.512",
+  "llvm.x86.avx512.packuswb.512",
+  "llvm.x86.avx512.pmul.dq.512",
+  "llvm.x86.avx512.pmul.hr.sw.512",
+  "llvm.x86.avx512.pmulh.w.512",
+  "llvm.x86.avx512.pmulhu.w.512",
+  "llvm.x86.avx512.pmulu.dq.512",
+  "llvm.x86.avx512.psad.bw.512",
+  "llvm.x86.avx512.pshuf.b.512",
+  "llvm.x86.avx512.psll.d.512",
+  "llvm.x86.avx512.psll.q.512",
+  "llvm.x86.avx512.psll.w.512",
+  "llvm.x86.avx512.pslli.d.512",
+  "llvm.x86.avx512.pslli.q.512",
+  "llvm.x86.avx512.pslli.w.512",
+  "llvm.x86.avx512.psllv.d.512",
+  "llvm.x86.avx512.psllv.q.512",
+  "llvm.x86.avx512.psllv.w.128",
+  "llvm.x86.avx512.psllv.w.256",
+  "llvm.x86.avx512.psllv.w.512",
+  "llvm.x86.avx512.psra.d.512",
+  "llvm.x86.avx512.psra.q.128",
+  "llvm.x86.avx512.psra.q.256",
+  "llvm.x86.avx512.psra.q.512",
+  "llvm.x86.avx512.psra.w.512",
+  "llvm.x86.avx512.psrai.d.512",
+  "llvm.x86.avx512.psrai.q.128",
+  "llvm.x86.avx512.psrai.q.256",
+  "llvm.x86.avx512.psrai.q.512",
+  "llvm.x86.avx512.psrai.w.512",
+  "llvm.x86.avx512.psrav.d.512",
+  "llvm.x86.avx512.psrav.q.128",
+  "llvm.x86.avx512.psrav.q.256",
+  "llvm.x86.avx512.psrav.q.512",
+  "llvm.x86.avx512.psrav.w.128",
+  "llvm.x86.avx512.psrav.w.256",
+  "llvm.x86.avx512.psrav.w.512",
+  "llvm.x86.avx512.psrl.d.512",
+  "llvm.x86.avx512.psrl.q.512",
+  "llvm.x86.avx512.psrl.w.512",
+  "llvm.x86.avx512.psrli.d.512",
+  "llvm.x86.avx512.psrli.q.512",
+  "llvm.x86.avx512.psrli.w.512",
+  "llvm.x86.avx512.psrlv.d.512",
+  "llvm.x86.avx512.psrlv.q.512",
+  "llvm.x86.avx512.psrlv.w.128",
+  "llvm.x86.avx512.psrlv.w.256",
+  "llvm.x86.avx512.psrlv.w.512",
+  "llvm.x86.avx512.rcp14.pd.128",
+  "llvm.x86.avx512.rcp14.pd.256",
+  "llvm.x86.avx512.rcp14.pd.512",
+  "llvm.x86.avx512.rcp14.ps.128",
+  "llvm.x86.avx512.rcp14.ps.256",
+  "llvm.x86.avx512.rcp14.ps.512",
+  "llvm.x86.avx512.rcp14.sd",
+  "llvm.x86.avx512.rcp14.ss",
+  "llvm.x86.avx512.rcp28.pd",
+  "llvm.x86.avx512.rcp28.ps",
+  "llvm.x86.avx512.rcp28.sd",
+  "llvm.x86.avx512.rcp28.ss",
+  "llvm.x86.avx512.rsqrt14.pd.128",
+  "llvm.x86.avx512.rsqrt14.pd.256",
+  "llvm.x86.avx512.rsqrt14.pd.512",
+  "llvm.x86.avx512.rsqrt14.ps.128",
+  "llvm.x86.avx512.rsqrt14.ps.256",
+  "llvm.x86.avx512.rsqrt14.ps.512",
+  "llvm.x86.avx512.rsqrt14.sd",
+  "llvm.x86.avx512.rsqrt14.ss",
+  "llvm.x86.avx512.rsqrt28.pd",
+  "llvm.x86.avx512.rsqrt28.ps",
+  "llvm.x86.avx512.rsqrt28.sd",
+  "llvm.x86.avx512.rsqrt28.ss",
+  "llvm.x86.avx512.scatter.dpd.512",
+  "llvm.x86.avx512.scatter.dpi.512",
+  "llvm.x86.avx512.scatter.dpq.512",
+  "llvm.x86.avx512.scatter.dps.512",
+  "llvm.x86.avx512.scatter.qpd.512",
+  "llvm.x86.avx512.scatter.qpi.512",
+  "llvm.x86.avx512.scatter.qpq.512",
+  "llvm.x86.avx512.scatter.qps.512",
+  "llvm.x86.avx512.scatterdiv2.df",
+  "llvm.x86.avx512.scatterdiv2.di",
+  "llvm.x86.avx512.scatterdiv4.df",
+  "llvm.x86.avx512.scatterdiv4.di",
+  "llvm.x86.avx512.scatterdiv4.sf",
+  "llvm.x86.avx512.scatterdiv4.si",
+  "llvm.x86.avx512.scatterdiv8.sf",
+  "llvm.x86.avx512.scatterdiv8.si",
+  "llvm.x86.avx512.scatterpf.dpd.512",
+  "llvm.x86.avx512.scatterpf.dps.512",
+  "llvm.x86.avx512.scatterpf.qpd.512",
+  "llvm.x86.avx512.scatterpf.qps.512",
+  "llvm.x86.avx512.scattersiv2.df",
+  "llvm.x86.avx512.scattersiv2.di",
+  "llvm.x86.avx512.scattersiv4.df",
+  "llvm.x86.avx512.scattersiv4.di",
+  "llvm.x86.avx512.scattersiv4.sf",
+  "llvm.x86.avx512.scattersiv4.si",
+  "llvm.x86.avx512.scattersiv8.sf",
+  "llvm.x86.avx512.scattersiv8.si",
+  "llvm.x86.avx512.vbroadcast.sd.512",
+  "llvm.x86.avx512.vbroadcast.ss.512",
+  "llvm.x86.avx512.vcomi.sd",
+  "llvm.x86.avx512.vcomi.ss",
+  "llvm.x86.avx512.vcvtsd2si32",
+  "llvm.x86.avx512.vcvtsd2si64",
+  "llvm.x86.avx512.vcvtsd2usi32",
+  "llvm.x86.avx512.vcvtsd2usi64",
+  "llvm.x86.avx512.vcvtss2si32",
+  "llvm.x86.avx512.vcvtss2si64",
+  "llvm.x86.avx512.vcvtss2usi32",
+  "llvm.x86.avx512.vcvtss2usi64",
+  "llvm.x86.avx512.vpermilvar.pd.512",
+  "llvm.x86.avx512.vpermilvar.ps.512",
+  "llvm.x86.bmi.bextr.32",
+  "llvm.x86.bmi.bextr.64",
+  "llvm.x86.bmi.bzhi.32",
+  "llvm.x86.bmi.bzhi.64",
+  "llvm.x86.bmi.pdep.32",
+  "llvm.x86.bmi.pdep.64",
+  "llvm.x86.bmi.pext.32",
+  "llvm.x86.bmi.pext.64",
+  "llvm.x86.clflushopt",
+  "llvm.x86.clrssbsy",
+  "llvm.x86.clwb",
+  "llvm.x86.clzero",
+  "llvm.x86.flags.read.u32",
+  "llvm.x86.flags.read.u64",
+  "llvm.x86.flags.write.u32",
+  "llvm.x86.flags.write.u64",
+  "llvm.x86.fma.vfmadd.pd",
+  "llvm.x86.fma.vfmadd.pd.256",
+  "llvm.x86.fma.vfmadd.ps",
+  "llvm.x86.fma.vfmadd.ps.256",
+  "llvm.x86.fma.vfmadd.sd",
+  "llvm.x86.fma.vfmadd.ss",
+  "llvm.x86.fma.vfmaddsub.pd",
+  "llvm.x86.fma.vfmaddsub.pd.256",
+  "llvm.x86.fma.vfmaddsub.ps",
+  "llvm.x86.fma.vfmaddsub.ps.256",
+  "llvm.x86.fma.vfmsub.pd",
+  "llvm.x86.fma.vfmsub.pd.256",
+  "llvm.x86.fma.vfmsub.ps",
+  "llvm.x86.fma.vfmsub.ps.256",
+  "llvm.x86.fma.vfmsub.sd",
+  "llvm.x86.fma.vfmsub.ss",
+  "llvm.x86.fma.vfmsubadd.pd",
+  "llvm.x86.fma.vfmsubadd.pd.256",
+  "llvm.x86.fma.vfmsubadd.ps",
+  "llvm.x86.fma.vfmsubadd.ps.256",
+  "llvm.x86.fma.vfnmadd.pd",
+  "llvm.x86.fma.vfnmadd.pd.256",
+  "llvm.x86.fma.vfnmadd.ps",
+  "llvm.x86.fma.vfnmadd.ps.256",
+  "llvm.x86.fma.vfnmadd.sd",
+  "llvm.x86.fma.vfnmadd.ss",
+  "llvm.x86.fma.vfnmsub.pd",
+  "llvm.x86.fma.vfnmsub.pd.256",
+  "llvm.x86.fma.vfnmsub.ps",
+  "llvm.x86.fma.vfnmsub.ps.256",
+  "llvm.x86.fma.vfnmsub.sd",
+  "llvm.x86.fma.vfnmsub.ss",
+  "llvm.x86.fma4.vfmadd.sd",
+  "llvm.x86.fma4.vfmadd.ss",
+  "llvm.x86.fxrstor",
+  "llvm.x86.fxrstor64",
+  "llvm.x86.fxsave",
+  "llvm.x86.fxsave64",
+  "llvm.x86.incsspd",
+  "llvm.x86.incsspq",
+  "llvm.x86.int",
+  "llvm.x86.llwpcb",
+  "llvm.x86.lwpins32",
+  "llvm.x86.lwpins64",
+  "llvm.x86.lwpval32",
+  "llvm.x86.lwpval64",
+  "llvm.x86.mmx.emms",
+  "llvm.x86.mmx.femms",
+  "llvm.x86.mmx.maskmovq",
+  "llvm.x86.mmx.movnt.dq",
+  "llvm.x86.mmx.packssdw",
+  "llvm.x86.mmx.packsswb",
+  "llvm.x86.mmx.packuswb",
+  "llvm.x86.mmx.padd.b",
+  "llvm.x86.mmx.padd.d",
+  "llvm.x86.mmx.padd.q",
+  "llvm.x86.mmx.padd.w",
+  "llvm.x86.mmx.padds.b",
+  "llvm.x86.mmx.padds.w",
+  "llvm.x86.mmx.paddus.b",
+  "llvm.x86.mmx.paddus.w",
+  "llvm.x86.mmx.palignr.b",
+  "llvm.x86.mmx.pand",
+  "llvm.x86.mmx.pandn",
+  "llvm.x86.mmx.pavg.b",
+  "llvm.x86.mmx.pavg.w",
+  "llvm.x86.mmx.pcmpeq.b",
+  "llvm.x86.mmx.pcmpeq.d",
+  "llvm.x86.mmx.pcmpeq.w",
+  "llvm.x86.mmx.pcmpgt.b",
+  "llvm.x86.mmx.pcmpgt.d",
+  "llvm.x86.mmx.pcmpgt.w",
+  "llvm.x86.mmx.pextr.w",
+  "llvm.x86.mmx.pinsr.w",
+  "llvm.x86.mmx.pmadd.wd",
+  "llvm.x86.mmx.pmaxs.w",
+  "llvm.x86.mmx.pmaxu.b",
+  "llvm.x86.mmx.pmins.w",
+  "llvm.x86.mmx.pminu.b",
+  "llvm.x86.mmx.pmovmskb",
+  "llvm.x86.mmx.pmulh.w",
+  "llvm.x86.mmx.pmulhu.w",
+  "llvm.x86.mmx.pmull.w",
+  "llvm.x86.mmx.pmulu.dq",
+  "llvm.x86.mmx.por",
+  "llvm.x86.mmx.psad.bw",
+  "llvm.x86.mmx.psll.d",
+  "llvm.x86.mmx.psll.q",
+  "llvm.x86.mmx.psll.w",
+  "llvm.x86.mmx.pslli.d",
+  "llvm.x86.mmx.pslli.q",
+  "llvm.x86.mmx.pslli.w",
+  "llvm.x86.mmx.psra.d",
+  "llvm.x86.mmx.psra.w",
+  "llvm.x86.mmx.psrai.d",
+  "llvm.x86.mmx.psrai.w",
+  "llvm.x86.mmx.psrl.d",
+  "llvm.x86.mmx.psrl.q",
+  "llvm.x86.mmx.psrl.w",
+  "llvm.x86.mmx.psrli.d",
+  "llvm.x86.mmx.psrli.q",
+  "llvm.x86.mmx.psrli.w",
+  "llvm.x86.mmx.psub.b",
+  "llvm.x86.mmx.psub.d",
+  "llvm.x86.mmx.psub.q",
+  "llvm.x86.mmx.psub.w",
+  "llvm.x86.mmx.psubs.b",
+  "llvm.x86.mmx.psubs.w",
+  "llvm.x86.mmx.psubus.b",
+  "llvm.x86.mmx.psubus.w",
+  "llvm.x86.mmx.punpckhbw",
+  "llvm.x86.mmx.punpckhdq",
+  "llvm.x86.mmx.punpckhwd",
+  "llvm.x86.mmx.punpcklbw",
+  "llvm.x86.mmx.punpckldq",
+  "llvm.x86.mmx.punpcklwd",
+  "llvm.x86.mmx.pxor",
+  "llvm.x86.monitorx",
+  "llvm.x86.mwaitx",
+  "llvm.x86.pclmulqdq",
+  "llvm.x86.pclmulqdq.256",
+  "llvm.x86.pclmulqdq.512",
+  "llvm.x86.rdfsbase.32",
+  "llvm.x86.rdfsbase.64",
+  "llvm.x86.rdgsbase.32",
+  "llvm.x86.rdgsbase.64",
+  "llvm.x86.rdpid",
+  "llvm.x86.rdpkru",
+  "llvm.x86.rdpmc",
+  "llvm.x86.rdrand.16",
+  "llvm.x86.rdrand.32",
+  "llvm.x86.rdrand.64",
+  "llvm.x86.rdseed.16",
+  "llvm.x86.rdseed.32",
+  "llvm.x86.rdseed.64",
+  "llvm.x86.rdsspd",
+  "llvm.x86.rdsspq",
+  "llvm.x86.rdtsc",
+  "llvm.x86.rdtscp",
+  "llvm.x86.rstorssp",
+  "llvm.x86.saveprevssp",
+  "llvm.x86.seh.ehguard",
+  "llvm.x86.seh.ehregnode",
+  "llvm.x86.seh.lsda",
+  "llvm.x86.seh.recoverfp",
+  "llvm.x86.setssbsy",
+  "llvm.x86.sha1msg1",
+  "llvm.x86.sha1msg2",
+  "llvm.x86.sha1nexte",
+  "llvm.x86.sha1rnds4",
+  "llvm.x86.sha256msg1",
+  "llvm.x86.sha256msg2",
+  "llvm.x86.sha256rnds2",
+  "llvm.x86.slwpcb",
+  "llvm.x86.sse.cmp.ps",
+  "llvm.x86.sse.cmp.ss",
+  "llvm.x86.sse.comieq.ss",
+  "llvm.x86.sse.comige.ss",
+  "llvm.x86.sse.comigt.ss",
+  "llvm.x86.sse.comile.ss",
+  "llvm.x86.sse.comilt.ss",
+  "llvm.x86.sse.comineq.ss",
+  "llvm.x86.sse.cvtpd2pi",
+  "llvm.x86.sse.cvtpi2pd",
+  "llvm.x86.sse.cvtpi2ps",
+  "llvm.x86.sse.cvtps2pi",
+  "llvm.x86.sse.cvtsi2ss",
+  "llvm.x86.sse.cvtsi642ss",
+  "llvm.x86.sse.cvtss2si",
+  "llvm.x86.sse.cvtss2si64",
+  "llvm.x86.sse.cvttpd2pi",
+  "llvm.x86.sse.cvttps2pi",
+  "llvm.x86.sse.cvttss2si",
+  "llvm.x86.sse.cvttss2si64",
+  "llvm.x86.sse.ldmxcsr",
+  "llvm.x86.sse.max.ps",
+  "llvm.x86.sse.max.ss",
+  "llvm.x86.sse.min.ps",
+  "llvm.x86.sse.min.ss",
+  "llvm.x86.sse.movmsk.ps",
+  "llvm.x86.sse.pshuf.w",
+  "llvm.x86.sse.rcp.ps",
+  "llvm.x86.sse.rcp.ss",
+  "llvm.x86.sse.rsqrt.ps",
+  "llvm.x86.sse.rsqrt.ss",
+  "llvm.x86.sse.sfence",
+  "llvm.x86.sse.sqrt.ps",
+  "llvm.x86.sse.sqrt.ss",
+  "llvm.x86.sse.stmxcsr",
+  "llvm.x86.sse.ucomieq.ss",
+  "llvm.x86.sse.ucomige.ss",
+  "llvm.x86.sse.ucomigt.ss",
+  "llvm.x86.sse.ucomile.ss",
+  "llvm.x86.sse.ucomilt.ss",
+  "llvm.x86.sse.ucomineq.ss",
+  "llvm.x86.sse2.clflush",
+  "llvm.x86.sse2.cmp.pd",
+  "llvm.x86.sse2.cmp.sd",
+  "llvm.x86.sse2.comieq.sd",
+  "llvm.x86.sse2.comige.sd",
+  "llvm.x86.sse2.comigt.sd",
+  "llvm.x86.sse2.comile.sd",
+  "llvm.x86.sse2.comilt.sd",
+  "llvm.x86.sse2.comineq.sd",
+  "llvm.x86.sse2.cvtdq2ps",
+  "llvm.x86.sse2.cvtpd2dq",
+  "llvm.x86.sse2.cvtpd2ps",
+  "llvm.x86.sse2.cvtps2dq",
+  "llvm.x86.sse2.cvtsd2si",
+  "llvm.x86.sse2.cvtsd2si64",
+  "llvm.x86.sse2.cvtsd2ss",
+  "llvm.x86.sse2.cvtsi2sd",
+  "llvm.x86.sse2.cvtsi642sd",
+  "llvm.x86.sse2.cvtss2sd",
+  "llvm.x86.sse2.cvttpd2dq",
+  "llvm.x86.sse2.cvttps2dq",
+  "llvm.x86.sse2.cvttsd2si",
+  "llvm.x86.sse2.cvttsd2si64",
+  "llvm.x86.sse2.lfence",
+  "llvm.x86.sse2.maskmov.dqu",
+  "llvm.x86.sse2.max.pd",
+  "llvm.x86.sse2.max.sd",
+  "llvm.x86.sse2.mfence",
+  "llvm.x86.sse2.min.pd",
+  "llvm.x86.sse2.min.sd",
+  "llvm.x86.sse2.movmsk.pd",
+  "llvm.x86.sse2.packssdw.128",
+  "llvm.x86.sse2.packsswb.128",
+  "llvm.x86.sse2.packuswb.128",
+  "llvm.x86.sse2.padds.b",
+  "llvm.x86.sse2.padds.w",
+  "llvm.x86.sse2.paddus.b",
+  "llvm.x86.sse2.paddus.w",
+  "llvm.x86.sse2.pause",
+  "llvm.x86.sse2.pmadd.wd",
+  "llvm.x86.sse2.pmovmskb.128",
+  "llvm.x86.sse2.pmulh.w",
+  "llvm.x86.sse2.pmulhu.w",
+  "llvm.x86.sse2.pmulu.dq",
+  "llvm.x86.sse2.psad.bw",
+  "llvm.x86.sse2.psll.d",
+  "llvm.x86.sse2.psll.q",
+  "llvm.x86.sse2.psll.w",
+  "llvm.x86.sse2.pslli.d",
+  "llvm.x86.sse2.pslli.q",
+  "llvm.x86.sse2.pslli.w",
+  "llvm.x86.sse2.psra.d",
+  "llvm.x86.sse2.psra.w",
+  "llvm.x86.sse2.psrai.d",
+  "llvm.x86.sse2.psrai.w",
+  "llvm.x86.sse2.psrl.d",
+  "llvm.x86.sse2.psrl.q",
+  "llvm.x86.sse2.psrl.w",
+  "llvm.x86.sse2.psrli.d",
+  "llvm.x86.sse2.psrli.q",
+  "llvm.x86.sse2.psrli.w",
+  "llvm.x86.sse2.psubs.b",
+  "llvm.x86.sse2.psubs.w",
+  "llvm.x86.sse2.psubus.b",
+  "llvm.x86.sse2.psubus.w",
+  "llvm.x86.sse2.sqrt.pd",
+  "llvm.x86.sse2.sqrt.sd",
+  "llvm.x86.sse2.ucomieq.sd",
+  "llvm.x86.sse2.ucomige.sd",
+  "llvm.x86.sse2.ucomigt.sd",
+  "llvm.x86.sse2.ucomile.sd",
+  "llvm.x86.sse2.ucomilt.sd",
+  "llvm.x86.sse2.ucomineq.sd",
+  "llvm.x86.sse3.addsub.pd",
+  "llvm.x86.sse3.addsub.ps",
+  "llvm.x86.sse3.hadd.pd",
+  "llvm.x86.sse3.hadd.ps",
+  "llvm.x86.sse3.hsub.pd",
+  "llvm.x86.sse3.hsub.ps",
+  "llvm.x86.sse3.ldu.dq",
+  "llvm.x86.sse3.monitor",
+  "llvm.x86.sse3.mwait",
+  "llvm.x86.sse41.blendvpd",
+  "llvm.x86.sse41.blendvps",
+  "llvm.x86.sse41.dppd",
+  "llvm.x86.sse41.dpps",
+  "llvm.x86.sse41.insertps",
+  "llvm.x86.sse41.mpsadbw",
+  "llvm.x86.sse41.packusdw",
+  "llvm.x86.sse41.pblendvb",
+  "llvm.x86.sse41.phminposuw",
+  "llvm.x86.sse41.pmuldq",
+  "llvm.x86.sse41.ptestc",
+  "llvm.x86.sse41.ptestnzc",
+  "llvm.x86.sse41.ptestz",
+  "llvm.x86.sse41.round.pd",
+  "llvm.x86.sse41.round.ps",
+  "llvm.x86.sse41.round.sd",
+  "llvm.x86.sse41.round.ss",
+  "llvm.x86.sse42.crc32.32.16",
+  "llvm.x86.sse42.crc32.32.32",
+  "llvm.x86.sse42.crc32.32.8",
+  "llvm.x86.sse42.crc32.64.64",
+  "llvm.x86.sse42.pcmpestri128",
+  "llvm.x86.sse42.pcmpestria128",
+  "llvm.x86.sse42.pcmpestric128",
+  "llvm.x86.sse42.pcmpestrio128",
+  "llvm.x86.sse42.pcmpestris128",
+  "llvm.x86.sse42.pcmpestriz128",
+  "llvm.x86.sse42.pcmpestrm128",
+  "llvm.x86.sse42.pcmpistri128",
+  "llvm.x86.sse42.pcmpistria128",
+  "llvm.x86.sse42.pcmpistric128",
+  "llvm.x86.sse42.pcmpistrio128",
+  "llvm.x86.sse42.pcmpistris128",
+  "llvm.x86.sse42.pcmpistriz128",
+  "llvm.x86.sse42.pcmpistrm128",
+  "llvm.x86.sse4a.extrq",
+  "llvm.x86.sse4a.extrqi",
+  "llvm.x86.sse4a.insertq",
+  "llvm.x86.sse4a.insertqi",
+  "llvm.x86.ssse3.pabs.b",
+  "llvm.x86.ssse3.pabs.d",
+  "llvm.x86.ssse3.pabs.w",
+  "llvm.x86.ssse3.phadd.d",
+  "llvm.x86.ssse3.phadd.d.128",
+  "llvm.x86.ssse3.phadd.sw",
+  "llvm.x86.ssse3.phadd.sw.128",
+  "llvm.x86.ssse3.phadd.w",
+  "llvm.x86.ssse3.phadd.w.128",
+  "llvm.x86.ssse3.phsub.d",
+  "llvm.x86.ssse3.phsub.d.128",
+  "llvm.x86.ssse3.phsub.sw",
+  "llvm.x86.ssse3.phsub.sw.128",
+  "llvm.x86.ssse3.phsub.w",
+  "llvm.x86.ssse3.phsub.w.128",
+  "llvm.x86.ssse3.pmadd.ub.sw",
+  "llvm.x86.ssse3.pmadd.ub.sw.128",
+  "llvm.x86.ssse3.pmul.hr.sw",
+  "llvm.x86.ssse3.pmul.hr.sw.128",
+  "llvm.x86.ssse3.pshuf.b",
+  "llvm.x86.ssse3.pshuf.b.128",
+  "llvm.x86.ssse3.psign.b",
+  "llvm.x86.ssse3.psign.b.128",
+  "llvm.x86.ssse3.psign.d",
+  "llvm.x86.ssse3.psign.d.128",
+  "llvm.x86.ssse3.psign.w",
+  "llvm.x86.ssse3.psign.w.128",
+  "llvm.x86.subborrow.u32",
+  "llvm.x86.subborrow.u64",
+  "llvm.x86.tbm.bextri.u32",
+  "llvm.x86.tbm.bextri.u64",
+  "llvm.x86.vcvtph2ps.128",
+  "llvm.x86.vcvtph2ps.256",
+  "llvm.x86.vcvtps2ph.128",
+  "llvm.x86.vcvtps2ph.256",
+  "llvm.x86.vgf2p8affineinvqb.128",
+  "llvm.x86.vgf2p8affineinvqb.256",
+  "llvm.x86.vgf2p8affineinvqb.512",
+  "llvm.x86.vgf2p8affineqb.128",
+  "llvm.x86.vgf2p8affineqb.256",
+  "llvm.x86.vgf2p8affineqb.512",
+  "llvm.x86.vgf2p8mulb.128",
+  "llvm.x86.vgf2p8mulb.256",
+  "llvm.x86.vgf2p8mulb.512",
+  "llvm.x86.wrfsbase.32",
+  "llvm.x86.wrfsbase.64",
+  "llvm.x86.wrgsbase.32",
+  "llvm.x86.wrgsbase.64",
+  "llvm.x86.wrpkru",
+  "llvm.x86.wrssd",
+  "llvm.x86.wrssq",
+  "llvm.x86.wrussd",
+  "llvm.x86.wrussq",
+  "llvm.x86.xabort",
+  "llvm.x86.xbegin",
+  "llvm.x86.xend",
+  "llvm.x86.xgetbv",
+  "llvm.x86.xop.vfrcz.pd",
+  "llvm.x86.xop.vfrcz.pd.256",
+  "llvm.x86.xop.vfrcz.ps",
+  "llvm.x86.xop.vfrcz.ps.256",
+  "llvm.x86.xop.vfrcz.sd",
+  "llvm.x86.xop.vfrcz.ss",
+  "llvm.x86.xop.vpcomb",
+  "llvm.x86.xop.vpcomd",
+  "llvm.x86.xop.vpcomq",
+  "llvm.x86.xop.vpcomub",
+  "llvm.x86.xop.vpcomud",
+  "llvm.x86.xop.vpcomuq",
+  "llvm.x86.xop.vpcomuw",
+  "llvm.x86.xop.vpcomw",
+  "llvm.x86.xop.vpermil2pd",
+  "llvm.x86.xop.vpermil2pd.256",
+  "llvm.x86.xop.vpermil2ps",
+  "llvm.x86.xop.vpermil2ps.256",
+  "llvm.x86.xop.vphaddbd",
+  "llvm.x86.xop.vphaddbq",
+  "llvm.x86.xop.vphaddbw",
+  "llvm.x86.xop.vphadddq",
+  "llvm.x86.xop.vphaddubd",
+  "llvm.x86.xop.vphaddubq",
+  "llvm.x86.xop.vphaddubw",
+  "llvm.x86.xop.vphaddudq",
+  "llvm.x86.xop.vphadduwd",
+  "llvm.x86.xop.vphadduwq",
+  "llvm.x86.xop.vphaddwd",
+  "llvm.x86.xop.vphaddwq",
+  "llvm.x86.xop.vphsubbw",
+  "llvm.x86.xop.vphsubdq",
+  "llvm.x86.xop.vphsubwd",
+  "llvm.x86.xop.vpmacsdd",
+  "llvm.x86.xop.vpmacsdqh",
+  "llvm.x86.xop.vpmacsdql",
+  "llvm.x86.xop.vpmacssdd",
+  "llvm.x86.xop.vpmacssdqh",
+  "llvm.x86.xop.vpmacssdql",
+  "llvm.x86.xop.vpmacsswd",
+  "llvm.x86.xop.vpmacssww",
+  "llvm.x86.xop.vpmacswd",
+  "llvm.x86.xop.vpmacsww",
+  "llvm.x86.xop.vpmadcsswd",
+  "llvm.x86.xop.vpmadcswd",
+  "llvm.x86.xop.vpperm",
+  "llvm.x86.xop.vprotb",
+  "llvm.x86.xop.vprotbi",
+  "llvm.x86.xop.vprotd",
+  "llvm.x86.xop.vprotdi",
+  "llvm.x86.xop.vprotq",
+  "llvm.x86.xop.vprotqi",
+  "llvm.x86.xop.vprotw",
+  "llvm.x86.xop.vprotwi",
+  "llvm.x86.xop.vpshab",
+  "llvm.x86.xop.vpshad",
+  "llvm.x86.xop.vpshaq",
+  "llvm.x86.xop.vpshaw",
+  "llvm.x86.xop.vpshlb",
+  "llvm.x86.xop.vpshld",
+  "llvm.x86.xop.vpshlq",
+  "llvm.x86.xop.vpshlw",
+  "llvm.x86.xrstor",
+  "llvm.x86.xrstor64",
+  "llvm.x86.xrstors",
+  "llvm.x86.xrstors64",
+  "llvm.x86.xsave",
+  "llvm.x86.xsave64",
+  "llvm.x86.xsavec",
+  "llvm.x86.xsavec64",
+  "llvm.x86.xsaveopt",
+  "llvm.x86.xsaveopt64",
+  "llvm.x86.xsaves",
+  "llvm.x86.xsaves64",
+  "llvm.x86.xsetbv",
+  "llvm.x86.xtest",
+  "llvm.xcore.bitrev",
+  "llvm.xcore.checkevent",
+  "llvm.xcore.chkct",
+  "llvm.xcore.clre",
+  "llvm.xcore.clrpt",
+  "llvm.xcore.clrsr",
+  "llvm.xcore.crc32",
+  "llvm.xcore.crc8",
+  "llvm.xcore.edu",
+  "llvm.xcore.eeu",
+  "llvm.xcore.endin",
+  "llvm.xcore.freer",
+  "llvm.xcore.geted",
+  "llvm.xcore.getet",
+  "llvm.xcore.getid",
+  "llvm.xcore.getps",
+  "llvm.xcore.getr",
+  "llvm.xcore.getst",
+  "llvm.xcore.getts",
+  "llvm.xcore.in",
+  "llvm.xcore.inct",
+  "llvm.xcore.initcp",
+  "llvm.xcore.initdp",
+  "llvm.xcore.initlr",
+  "llvm.xcore.initpc",
+  "llvm.xcore.initsp",
+  "llvm.xcore.inshr",
+  "llvm.xcore.int",
+  "llvm.xcore.mjoin",
+  "llvm.xcore.msync",
+  "llvm.xcore.out",
+  "llvm.xcore.outct",
+  "llvm.xcore.outshr",
+  "llvm.xcore.outt",
+  "llvm.xcore.peek",
+  "llvm.xcore.setc",
+  "llvm.xcore.setclk",
+  "llvm.xcore.setd",
+  "llvm.xcore.setev",
+  "llvm.xcore.setps",
+  "llvm.xcore.setpsc",
+  "llvm.xcore.setpt",
+  "llvm.xcore.setrdy",
+  "llvm.xcore.setsr",
+  "llvm.xcore.settw",
+  "llvm.xcore.setv",
+  "llvm.xcore.sext",
+  "llvm.xcore.ssync",
+  "llvm.xcore.syncr",
+  "llvm.xcore.testct",
+  "llvm.xcore.testwct",
+  "llvm.xcore.waitevent",
+  "llvm.xcore.zext",
+#endif
+
+// Intrinsic ID to overload bitset
+#ifdef GET_INTRINSIC_OVERLOAD_TABLE
+static const uint8_t OTable[] = {
+  0 | (1<<3) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<3) | (1<<4) | (1<<5),
+  0,
+  0 | (1<<2) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0),
+  0 | (1<<0),
+  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
+  0 | (1<<0) | (1<<1) | (1<<6),
+  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<2) | (1<<3) | (1<<5) | (1<<6),
+  0 | (1<<1) | (1<<2) | (1<<3),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
+  0 | (1<<3) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<5),
+  0,
+  0,
+  0,
+  0 | (1<<3) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
+  0 | (1<<0) | (1<<2) | (1<<3) | (1<<4) | (1<<5),
+  0 | (1<<0) | (1<<2) | (1<<3) | (1<<6) | (1<<7),
+  0,
+  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<3) | (1<<4),
+  0,
+  0 | (1<<1) | (1<<2) | (1<<3),
+  0 | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<1) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
+  0 | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
+  0 | (1<<6),
+  0 | (1<<0) | (1<<4),
+  0 | (1<<3) | (1<<7),
+  0 | (1<<0),
+  0,
+  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<5),
+  0 | (1<<5),
+  0 | (1<<0),
+  0,
+  0 | (1<<0) | (1<<6),
+  0,
+  0,
+  0 | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<2) | (1<<3) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<0) | (1<<2),
+  0,
+  0,
+  0 | (1<<7),
+  0 | (1<<0),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1),
+  0,
+  0 | (1<<3) | (1<<4),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1),
+  0,
+  0,
+  0 | (1<<2),
+  0,
+  0 | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<6),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<4) | (1<<5),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0),
+  0,
+  0,
+  0,
+  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<7),
+  0,
+  0,
+  0 | (1<<2),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<0) | (1<<1),
+  0 | (1<<4),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<6),
+  0 | (1<<1) | (1<<2) | (1<<3),
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0 | (1<<2) | (1<<4),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
+  0 | (1<<0) | (1<<1) | (1<<2) | (1<<4) | (1<<5),
+  0 | (1<<0) | (1<<1) | (1<<2)
+};
+
+return (OTable[id/8] & (1 << (id%8))) != 0;
+#endif
+
+// Global intrinsic function declaration type table.
+#ifdef GET_INTRINSIC_GENERATOR_GLOBAL
+static const unsigned IIT_Table[] = {
+  0x2e, 0x2e2e, (1U<<31) | 1790, 0x10, 0x1f1f, 0x1f1f, 0x2f2f, 
+  0x2f2f, 0x2e2e0, (1U<<31) | 4170, 0x32f, 0x2f3, 0x2f2f2f, (1U<<31) | 4163, (1U<<31) | 1070, 
+  0x2e0, 0x2e1, 0x12e1, 0x2e, (1U<<31) | 1070, (1U<<31) | 981, 0x2e2e1, 0x142e2e, 
+  0x2e0, (1U<<31) | 1072, 0x1f, 0x22e2e, (1U<<31) | 195, 0x2f2f, 0x11f1f, 0x1f1f, 
+  0x11f1f, (1U<<31) | 4217, (1U<<31) | 4217, (1U<<31) | 4217, 0x0, 0x0, 0x42e, (1U<<31) | 4167, 
+  (1U<<31) | 4166, 0x2e40, 0x2e50, 0x40, 0x2e0, 0x2e0, 0x2e, 0x2e4, 
+  0x0, 0x2e4, 0x0, 0x2f2f, 0x2f2f, 0x1f1f1f, (1U<<31) | 4202, (1U<<31) | 4202, 
+  (1U<<31) | 4202, (1U<<31) | 4200, (1U<<31) | 4200, (1U<<31) | 4198, (1U<<31) | 4200, (1U<<31) | 4200, (1U<<31) | 4200, (1U<<31) | 4202, 
+  (1U<<31) | 4202, (1U<<31) | 4202, (1U<<31) | 4202, (1U<<31) | 4200, (1U<<31) | 4209, (1U<<31) | 4202, (1U<<31) | 4202, (1U<<31) | 4202, 
+  (1U<<31) | 4222, (1U<<31) | 2481, (1U<<31) | 4159, (1U<<31) | 4246, (1U<<31) | 4226, (1U<<31) | 4238, (1U<<31) | 4230, (1U<<31) | 4255, 
+  0xbf1f, 0xbf1f, (1U<<31) | 4191, 0xbf2f, 0xbf2f, (1U<<31) | 4191, 0xbf1f, 0xbf1f, 
+  0xbf1f, 0xbf1f, 0xbf1f, 0xbf1f, 0xbf1f, 0x2f2f, 0x2f2f, 0x4, 
+  0x2f2f2f2f, 0x2f2f2f2f, 0x42e, 0x2ee2e2e, 0x2e2ee0, 0x2ee2e2e0, 0x1f, (1U<<31) | 4223, 
+  0x2e2e2e0, 0x4452e0, 0x54452e0, 0x44552e0, (1U<<31) | 3047, 0x4f4f, (1U<<31) | 3048, 0x4f50, 
+  0x4f50, 0x1f2e2e, 0x2e, (1U<<31) | 4223, 0x42e2e2e, 0x2f2f, 0x2f2f, 0x2f2f, 
+  0x42e0, (1U<<31) | 107, (1U<<31) | 1251, (1U<<31) | 1261, (1U<<31) | 1273, (1U<<31) | 116, (1U<<31) | 127, 0x2f2f2f, 
+  (1U<<31) | 186, (1U<<31) | 3137, (1U<<31) | 186, (1U<<31) | 3137, 0x19f24f0, 0x49f24f0, 0x2f2f2f, 0x2f2f, 
+  0x11cf1f, 0x40, 0x2f2f2f, 0x42f2f, 0x4442e0, (1U<<31) | 1800, (1U<<31) | 4173, 0x5, 
+  0x42e, 0x2f2f, 0x2f2f, (1U<<31) | 172, 0x2e4, 0x0, 0x42e0, 0x42e4, 
+  0x2f2f, (1U<<31) | 172, 0x2f2f, 0xf0f, (1U<<31) | 172, 0x2e, 0x2ee2e0, 0x2e0, 
+  0x2e, 0x2e, 0x0, 0x2f2f, (1U<<31) | 4182, (1U<<31) | 4177, (1U<<31) | 172, (1U<<31) | 172, 
+  (1U<<31) | 172, 0x2e2e0, 0x2e0, 0x2e0, 0x42e2e2e0, (1U<<31) | 181, 0x42e0, 0x0, 
+  0x444, 0x444, 0x444, 0x444, 0x544, 0x444, 0x444, 0x544, 
+  0x2c2c2c, 0x2c2c2c, 0x2c2c, 0x2c2c, 0x4a44a4a, 0x44, 0x4a44a4a, 0x4a44a4a, 
+  0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 0x40, 0x40, 
+  0x40, 0x40, (1U<<31) | 1058, 0x4f5, (1U<<31) | 1058, 0x4f5, 0xf0f, (1U<<31) | 1328, 
+  0x3f3f3f, 0x3f3f, 0x3f3f3f, 0xafaf1f, 0xafaf1f, 0xbf2f, 0xaf1f, 0xaf1f, 
+  0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xbf3f, 0xaf1f, 
+  0xaf1f, 0x2f2f2f, 0x2f2f2f, 0x3f3f3f, 0xbf2f, 0x3f3f3f, 0xbf2f, 0x2f2f2f, 
+  0x2f2f2f, 0x3f3f3f, 0xbf2f, 0x3f3f3f, 0xbf2f, 0x2f2f2f, 0x2f2f, 0x2f2f2f, 
+  0x2f2f, 0x2f2f, 0x2f2f, 0x2f2f2f, (1U<<31) | 4045, (1U<<31) | 4035, (1U<<31) | 4023, (1U<<31) | 4045, 
+  (1U<<31) | 4124, (1U<<31) | 4045, (1U<<31) | 4035, (1U<<31) | 4107, (1U<<31) | 4035, (1U<<31) | 4023, (1U<<31) | 4086, (1U<<31) | 4023, 
+  0x3f3f3f, (1U<<31) | 1340, 0x552c, (1U<<31) | 1328, 0x3f3f, (1U<<31) | 1347, (1U<<31) | 1328, 0x3f3f3f, 
+  0xbf3f, 0xbf1f, 0xbf1f, 0x9f1f, 0x9f1f, 0x9f1f, 0x3f3f3f, (1U<<31) | 1335, 
+  0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0xbf1f, 0x3f3f3f, 0x3f3f3f, 0xbf1f, (1U<<31) | 1340, 
+  0x1f1f, 0x1f1f1f, 0x1f1f1f, (1U<<31) | 1340, 0x445, 0x1f1f, 0x1f1f1f, 0x1f1f1f, 
+  (1U<<31) | 1347, (1U<<31) | 1347, 0x1f1f1f, 0x1f1f1f, (1U<<31) | 1347, (1U<<31) | 1347, 0x1f1f1f, (1U<<31) | 199, 
+  (1U<<31) | 199, 0x3f3f3f, 0x1f1f1f, 0x1f1f1f, (1U<<31) | 1961, 0xcf3f3f0, (1U<<31) | 4001, (1U<<31) | 4011, 
+  0xcf3f3f0, (1U<<31) | 4053, (1U<<31) | 4001, (1U<<31) | 4062, (1U<<31) | 4011, (1U<<31) | 4073, (1U<<31) | 1328, 0x1f1f1f, 
+  0x3f2c3f, 0x3f2c2c3f, (1U<<31) | 1301, (1U<<31) | 1286, 0x3f2c3f3f, (1U<<31) | 1312, (1U<<31) | 1299, (1U<<31) | 1284, 
+  0x3f3f3f, 0xbf3f, 0xbf1f, 0xbf1f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 
+  0xbf1f, 0x3f3f3f, 0x3f3f3f, 0xbf1f, (1U<<31) | 1340, 0x1f1f1f, 0x1f1f1f, (1U<<31) | 1347, 
+  0x1f1f1f, (1U<<31) | 1347, 0x1f1f1f, (1U<<31) | 199, 0x3f3f, 0x3f3f3f, 0x1f1f1f, 0x3f3f, 
+  0x1f1f1f, (1U<<31) | 1961, 0x1f1f1f, 0x53f5bf3f, 0x4af1f, 0x4af1f, 0x7a3a, 0x49f2f, 
+  0x49f2f, 0x3a7a, 0x43f3f3f, 0x43f3f3f, 0x1f1f1f, 0x2f2f2f, 0x87, 0x2e554, 
+  0x4f54, 0x2e554, 0x4f54, 0x1f1f1f, 0x4444, 0x4444, (1U<<31) | 137, (1U<<31) | 137, 
+  0x55, 0x1444a44, 0x1444a44, 0x1444a444, 0x1444a44, 0x1444a44, 0x1444a44, 0x1444a44, 
+  0x1444a44, 0x1444a44, 0x1444a44, 0x1444a44, 0x11444a2f, 0x11444a2f, (1U<<31) | 77, (1U<<31) | 77, 
+  0x0, 0x0, 0x0, 0x42f1, 0x2f2f, 0x7777, 0x7777, 0x7777, 
+  0x7777, 0x4439, 0x4439, 0x4474, 0x7739, 0x7739, 0x7769, 0x5, 
+  (1U<<31) | 531, 0x2f2f2f2f, (1U<<31) | 97, (1U<<31) | 87, 0x444, (1U<<31) | 158, (1U<<31) | 158, (1U<<31) | 158, 
+  0x444, 0x444, (1U<<31) | 3196, 0x555, 0x50, (1U<<31) | 0, (1U<<31) | 53, 0x42f2f5, 
+  0x777, 0x2f2f2f2f, 0x777, 0x2f2f, 0xaf1f, 0x2f2f, 0x4, 0x41f1f5, 
+  (1U<<31) | 167, 0x515, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 42, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 43, 
+  (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 43, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 30, (1U<<31) | 30, (1U<<31) | 30, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, 
+  (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 14, (1U<<31) | 29, (1U<<31) | 29, (1U<<31) | 531, (1U<<31) | 531, 0x50, 
+  0x440, 0x44447, 0x44477, 0x444777, (1U<<31) | 531, 0x10, 0x42f2f, 0x4444, 
+  0x2f2f, 0x51, 0x444, 0x444, 0x14441f1f, 0x5455, 0x4a454a, 0x4444, 
+  0x1, 0x5455, (1U<<31) | 531, 0x2f2f, 0x77, 0x44, 0x444, 0x2f2f, 
+  0x2f2f, 0x77, 0x0, 0x0, 0x0, 0x0, 0x0, 0x40, 
+  0x5, 0x44, 0x40, 0x5, 0x5, 0x440, 0x440, 0x40, 
+  0x40, 0x4444, 0x4444, 0x4444, 0x441f1f, 0x1f1f1f, 0x1f1f, 0x2f2f, 
+  (1U<<31) | 64, (1U<<31) | 63, 0x42f2f, 0x441f1f, 0x0, (1U<<31) | 147, 0x0, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0xf0f, 0x11, 0x4444, 
+  0xf0f, 0x4444440, 0x4444440, 0x0, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x40, 0x40, 0x40, 0x4, 0x40, 0x40, 
+  0x4f4, (1U<<31) | 1042, 0x2e440, 0x2e440, 0x2e440, 0x2e440, 0x4f4, (1U<<31) | 1042, 
+  0x4444440, 0x4444440, 0x444440, 0x444440, 0x444444, 0x444444, (1U<<31) | 2061, (1U<<31) | 2061, 
+  0x2c2c2c, 0x2c2c2c, 0x2c2c, 0x2c2c, 0x4a44a4a, 0x44, 0x4a44a4a, 0x4a44a4a, 
+  0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 0x3f3f3f, 0x3f3f3f, 
+  0x3f3f, 0xbfbf3f, 0xbfbf3f, 0x3f3f3f3f, 0x3f3f, 0xbf3f, 0xbf3f, 0x4af1f, 
+  0x4af1f, 0x7a3a, 0x49f2f, 0x49f2f, 0x3a7a, 0xbf3f, 0xbf3f, 0xbf3f, 
+  0xbf3f, 0xbf3f, 0xbf3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x4cf3f, 
+  (1U<<31) | 2932, (1U<<31) | 2037, (1U<<31) | 2921, (1U<<31) | 2019, (1U<<31) | 2908, (1U<<31) | 1997, 0x3f3f3f, 0x3f3f3f, 
+  0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, (1U<<31) | 1340, (1U<<31) | 1340, (1U<<31) | 1340, 0x3f3f3f, 
+  0xbf3f3f, 0xbf3f3f, 0x3f3f3f, 0xbf3f, 0xbf3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 
+  0x3f3f3f, 0x3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, (1U<<31) | 1340, (1U<<31) | 1323, (1U<<31) | 1323, 
+  (1U<<31) | 1323, 0x3f3f, 0x3f3f3f, (1U<<31) | 1328, (1U<<31) | 1328, (1U<<31) | 1328, 0x3f3f3f, 0x3f3f3f, 
+  (1U<<31) | 1328, (1U<<31) | 1328, (1U<<31) | 1328, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 
+  (1U<<31) | 1328, 0x3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x3f3f, 0x3f3f, 0x3f3f, 
+  0x3f3f, 0x3f3f, 0x3f3f, (1U<<31) | 1328, 0x3f3f3f, 0x3f3f3f, 0x3f3f, 0x3f3f3f, 
+  (1U<<31) | 1328, 0x3f3f3f3f, 0x3f3f3f, 0x3f3f3f, 0x4bf4f0, 0x4bfbf4f0, (1U<<31) | 2303, (1U<<31) | 2812, 
+  (1U<<31) | 2313, (1U<<31) | 2823, (1U<<31) | 2325, 0x2b2b2b, 0x2b2b2b2b, (1U<<31) | 954, (1U<<31) | 952, 0x2b2b2b2b, 
+  (1U<<31) | 954, (1U<<31) | 952, (1U<<31) | 950, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x40, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x5445, 0x5445, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x5445, 0x5445, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x2e440, 0x2e440, 0x2e440, 0x2e440, 
+  0x4f44, 0x2e444, 0x4f44, 0x2e444, 0x444, 0x44, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x40, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x4444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x44, 0x2f7, 
+  0x2f7, 0x52e5, 0x52e5, 0x52e5, 0x555, 0x44, 0x55, 0x44, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x555, 0x555, 
+  0x444, 0x545, 0x444, 0x444, 0x555, 0x44, 0x44, 0x444, 
+  0x444, 0x444, 0x444, 0x445, 0x445, 0x444, 0x555, 0x444, 
+  0x555, 0x444, 0x555, 0x444, 0x555, 0x44, 0x55, 0x44, 
+  0x44, 0x55, 0x444, 0x444, 0x555, 0x54, 0x54, 0x44, 
+  0x44, 0x44, 0x44, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x555, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x44, 0x44, 0x44, 0x45, 
+  0x44, 0x444, 0x444, 0x55, 0x45, 0x44, 0x55, 0x55, 
+  0x55, 0x55, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x554, 0x554, 
+  0x554, 0x554, 0x554, 0x554, 0x554, 0x554, 0x55, 0x555, 
+  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x5555, 0x555, 0x5555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x555, 0x555, 0x555, 0x444, 0x555, 0x44, 
+  0x44, 0x444, 0x555, 0x445, 0x445, 0x544, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x445, 0x445, 0x444, 0x444, 0x444, 0x444, 
+  0x555, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x454, 0x554, 0x454, 0x554, 0x454, 0x454, 0x454, 
+  0x454, 0x454, 0x454, 0x454, 0x454, 0x4555, 0x4555, 0x4555, 
+  0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x554, 0x554, 0x554, 
+  0x44, 0x444, 0x444, 0x44, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x554, 0x444, 0x444, 0x444, 0x444, 0x554, 0x444, 
+  0x444, 0x554, 0x444, 0x444, 0x45, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x44, 0x444, 0x444, 0x44, 0x44, 0x44, 0x444, 
+  0x5545, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x58, 0x57, 0x85, 
+  0x85, 0x87, 0x85, 0x85, 0x84, 0x84, 0x84, 0x84, 
+  0x75, 0x75, 0x78, 0x75, 0x75, 0x74, 0x74, 0x74, 
+  0x74, 0x58, 0x57, 0x48, 0x47, 0x48, 0x47, 0x484, 
+  0x884, 0x884, 0x884, 0x884, 0x48, 0x48, 0x777, 0x474, 
+  0x774, 0x774, 0x774, 0x774, 0x777, 0x777, 0x77, 0x7777, 
+  0x7777, 0x47777, 0x7777, 0x7777, 0x47, 0x47, 0x777, 0x777, 
+  0x777, 0x777, (1U<<31) | 1810, (1U<<31) | 1010, (1U<<31) | 990, (1U<<31) | 1818, (1U<<31) | 1021, (1U<<31) | 1000, 
+  (1U<<31) | 1810, (1U<<31) | 1010, (1U<<31) | 990, (1U<<31) | 1810, (1U<<31) | 1010, (1U<<31) | 990, (1U<<31) | 1810, (1U<<31) | 1010, 
+  (1U<<31) | 990, (1U<<31) | 1810, (1U<<31) | 1010, (1U<<31) | 990, 0x4e4, 0x5e5, 0x4444, 0x4444, 
+  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x445, 0x445, 
+  0x444, 0x444, 0x444, 0x444, 0x445, 0x445, 0x445, 0x445, 
+  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x444, 0x445, 
+  0x4455, 0x4455, 0x445, 0x444, 0x444, 0x444, 0x444, 0x4444, 
+  0x4444, 0x4444, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 
+  0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 
+  0x5555, 0x5555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x555, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x4455, 0x4455, 0x4455, 
+  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x445, 0x445, 0x445, 
+  0x445, 0x445, 0x445, 0x445, 0x445, 0x4455, 0x4455, 0x4455, 
+  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x445, 0x445, 0x445, 
+  0x445, 0x445, 0x445, 0x445, 0x445, 0x444, 0x444, 0x444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x444, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 
+  0x4455, 0x445, 0x445, 0x445, 0x445, 0x445, 0x445, 0x445, 
+  0x445, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 
+  0x4455, 0x444, 0x4444, 0x4444, 0x4444, 0x555, 0x555, 0x5555, 
+  0x5555, 0x555, 0x555, 0x555, 0x555, 0x5555, 0x5555, 0x554, 
+  0x554, 0x555, 0x555, 0x4455, 0x5555, 0x5555, 0x5555, 0x4455, 
+  0x4455, 0x4455, 0x4455, 0x555, 0x555, 0x445, 0x444, 0x445, 
+  0x444, 0x445, 0x445, 0x554, 0x554, 0x5555, 0x5555, 0x5555, 
+  0x5555, 0x555, 0x555, 0x555, 0x555, 0x4555, 0x455, 0x454, 
+  0x5555, 0x555, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x454, 
+  0x454, 0x454, 0x454, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x445, 0x4455, 
+  0x445, 0x4455, 0x5555, 0x5555, 0x555, 0x555, 0x5555, 0x5555, 
+  0x555, 0x555, 0x4444, 0x4444, 0x4444, 0x5555, 0x5555, 0x555, 
+  0x4455, 0x4455, 0x445, 0x445, 0x5555, 0x5555, 0x555, 0x555, 
+  0x555, 0x555, 0x4444, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 
+  0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x4444, 
+  0x455, 0x455, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 
+  0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x455, 0x455, 
+  0x455, 0x4555, 0x4555, 0x4555, 0x4555, 0x455, 0x455, 0x444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x444, 0x454, 0x455, 
+  0x455, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x454, 0x455, 0x455, 
+  0x44, 0x55, 0x4555, 0x44, 0x54, 0x44, 0x54, 0x44, 
+  0x44, 0x54, 0x444, 0x444, 0x44, 0x54, 0x44, 0x54, 
+  0x55, 0x4444, 0x544, 0x4455, 0x555, 0x44444, 0x5444, 0x44555, 
+  0x5555, 0x55, 0x555, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 
+  0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x455, 0x455, 
+  0x455, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x444, 0x4444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x455, 0x455, 0x455, 0x4555, 
+  0x4555, 0x4555, 0x4555, 0x4555, 0x444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x455, 0x455, 0x445, 0x554, 0x444, 0x444, 0x555, 
+  0x555, 0x555, 0x555, 0x442e2e, (1U<<31) | 1032, 0x2e442e2e, 0x452e2e, (1U<<31) | 1048, 
+  0x2e542e2e, 0x442e2e, (1U<<31) | 1032, 0x2e442e2e, 0x442e2e, (1U<<31) | 1032, 0x2e442e2e, 0x442e2e, 
+  (1U<<31) | 1032, 0x2e442e2e, 0x44e4, 0x44, 0x44, 0x44444, 0x44444, 0x44444, 
+  0x44444, 0x444, 0x444, 0x444, 0x444, 0x4555, 0x4555, 0x455, 
+  0x455, 0x4555, 0x54, 0x54, 0x54, 0x55, 0x54, 0x55, 
+  0x54, 0x55, 0x54, 0x55, 0x44, 0x45, 0x4555, 0x4555, 
+  0x45, 0x45, 0x54, 0x555, 0x54, 0x555, 0x45, 0x45, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x454, 0x54, 
+  0x4444, 0x544, 0x4455, 0x555, 0x444, 0x444, 0x444, 0x4444, 
+  0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x55e4, 0x4444, 0x4444, 
+  0x4444, 0x4455, 0x44555, 0x555, 0x555, 0x555, 0x555, 0x555, 
+  0x555, 0x454, 0x454, 0x54, 0x455, 0x455, 0x4555, 0x4555, 
+  0x4555, 0x4555, 0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x4444, 0x45, 0x555, 0x555, 0x44c4, 0x44d4, 0x4d4c, (1U<<31) | 3121, 
+  0x4d4c, (1U<<31) | 3121, 0x44c, 0x44d, 0x44c, 0x44d, 0x44c, 0x44d, 
+  (1U<<31) | 204, (1U<<31) | 223, (1U<<31) | 204, (1U<<31) | 223, (1U<<31) | 206, (1U<<31) | 225, (1U<<31) | 204, (1U<<31) | 223, 
+  (1U<<31) | 204, (1U<<31) | 223, (1U<<31) | 1357, (1U<<31) | 1365, (1U<<31) | 1357, (1U<<31) | 1365, (1U<<31) | 204, (1U<<31) | 223, 
+  (1U<<31) | 204, (1U<<31) | 223, (1U<<31) | 204, (1U<<31) | 223, (1U<<31) | 2843, (1U<<31) | 2948, (1U<<31) | 2843, (1U<<31) | 2948, 
+  (1U<<31) | 2843, (1U<<31) | 2948, (1U<<31) | 2843, (1U<<31) | 2948, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 2870, (1U<<31) | 2985, (1U<<31) | 2870, (1U<<31) | 2985, 
+  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 211, (1U<<31) | 230, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 2870, (1U<<31) | 2985, 
+  (1U<<31) | 2870, (1U<<31) | 2985, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 0x4c4c4d, (1U<<31) | 3015, 
+  0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4c, 0x4d4d4d, 
+  0x4d4d4d, (1U<<31) | 3126, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 
+  0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 
+  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 2870, (1U<<31) | 2985, (1U<<31) | 2870, (1U<<31) | 2985, 
+  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x4c4c4c, 0x4d4d4d, (1U<<31) | 1355, (1U<<31) | 1363, (1U<<31) | 1353, (1U<<31) | 1361, (1U<<31) | 1355, (1U<<31) | 1363, 
+  (1U<<31) | 1353, (1U<<31) | 1361, (1U<<31) | 2836, (1U<<31) | 2941, (1U<<31) | 2836, (1U<<31) | 2941, (1U<<31) | 2341, (1U<<31) | 2379, 
+  (1U<<31) | 2339, (1U<<31) | 2377, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x44c4c, 0x44d4d, 
+  0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4d4d, (1U<<31) | 3128, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c4d, (1U<<31) | 3015, 0x4c, 0x4d, 
+  0x4d, (1U<<31) | 3110, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 
+  0x44c4c4d, (1U<<31) | 2397, 0x4c4c4c, 0x4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 0x44d4c, (1U<<31) | 2462, 0x44d4c4c, (1U<<31) | 2460, 
+  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x44d4c, (1U<<31) | 2462, 0x44d4c4c, (1U<<31) | 2460, 
+  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 
+  0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 0x4c442e0, 0x4d442e0, (1U<<31) | 2851, (1U<<31) | 2966, 
+  0x4d442e0, (1U<<31) | 3113, (1U<<31) | 2956, (1U<<31) | 3103, 0x4c442e0, 0x4d442e0, (1U<<31) | 2851, (1U<<31) | 2966, 
+  (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 
+  (1U<<31) | 2863, (1U<<31) | 2978, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, (1U<<31) | 2861, (1U<<31) | 2976, 
+  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c, 0x44d4d, 
+  0x44c4c, 0x44d4d, 0x4c4c4c, 0x4d4d4d, 0x44c4c, 0x44d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x54c4c, 0x54d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, (1U<<31) | 2357, (1U<<31) | 2385, 
+  (1U<<31) | 2357, (1U<<31) | 2385, 0x44c4c4c, 0x44d4d4d, 0x44c4c4d, (1U<<31) | 2397, 0x44c4c4d, (1U<<31) | 2397, 
+  (1U<<31) | 2367, (1U<<31) | 2395, (1U<<31) | 2367, (1U<<31) | 2395, 0x44c4c4d, (1U<<31) | 2397, (1U<<31) | 2843, (1U<<31) | 2948, 
+  (1U<<31) | 2843, (1U<<31) | 2948, (1U<<31) | 2843, (1U<<31) | 2948, (1U<<31) | 2843, (1U<<31) | 2948, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 0x4d4d4d, (1U<<31) | 3126, 
+  0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 0x4d4d4d, (1U<<31) | 3126, 0x44d4d, (1U<<31) | 2470, 
+  0x44d4d4d, (1U<<31) | 2468, 0x54c4c4c, 0x54d4d4d, 0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 
+  0x54c4c4c, 0x54d4d4d, 0x54c4c4c, 0x54d4d4d, 0x44c4d, (1U<<31) | 2407, 0x44c4d4d, (1U<<31) | 2405, 
+  0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4d, (1U<<31) | 3015, 0x44c4d, (1U<<31) | 2407, 0x44c4d4d, (1U<<31) | 2405, 
+  0x44c4d4d, (1U<<31) | 2405, 0x44c4c, 0x44d4d, 0x44c4c, 0x44d4d, 0x4c4c4d, (1U<<31) | 3015, 
+  0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c, 0x44d4d, 
+  0x44c4c4c, 0x44d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4d4d, (1U<<31) | 3013, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 
+  0x44c4d, (1U<<31) | 2407, 0x44c4d4d, (1U<<31) | 2405, 0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, 
+  0x44c4d, (1U<<31) | 2407, 0x44c4d4d, (1U<<31) | 2405, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x4c4c4d, (1U<<31) | 3015, 0x4c4c4d4d, (1U<<31) | 3013, (1U<<31) | 2870, (1U<<31) | 2985, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 
+  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 
+  (1U<<31) | 218, (1U<<31) | 237, (1U<<31) | 218, (1U<<31) | 237, (1U<<31) | 218, (1U<<31) | 237, 0x4c4c4c, 0x4d4d4d, 
+  0x54c4d, (1U<<31) | 3190, 0x54c4d4d, (1U<<31) | 3188, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x444d4d, (1U<<31) | 2146, 0x444d4d4d, (1U<<31) | 2144, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
+  0x54c4d, (1U<<31) | 3190, 0x54c4d4d, (1U<<31) | 3188, 0x444d4d, (1U<<31) | 2146, 0x444d4d4d, (1U<<31) | 2144, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x444d4d, (1U<<31) | 2146, 0x444d4d4d, (1U<<31) | 2144, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4d, (1U<<31) | 3025, 0x4c4c440, 0x4d4d440, 
+  0x4c4c440, 0x4d4d440, (1U<<31) | 2888, (1U<<31) | 3003, 0x4c4d440, (1U<<31) | 3022, 0x4c4d440, (1U<<31) | 3022, 
+  (1U<<31) | 2898, (1U<<31) | 3030, 0x4c4c440, 0x4d4d440, 0x4c4c440, 0x4d4d440, (1U<<31) | 2888, (1U<<31) | 3003, 
+  0x4c4d, (1U<<31) | 3025, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x44c4c4d, (1U<<31) | 2397, 0x4c4c4d, (1U<<31) | 3015, 
+  0x4c4c4d, (1U<<31) | 3015, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 
+  (1U<<31) | 2870, (1U<<31) | 2985, (1U<<31) | 2870, (1U<<31) | 2985, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 
+  (1U<<31) | 211, (1U<<31) | 230, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 2870, (1U<<31) | 2985, 
+  (1U<<31) | 2870, (1U<<31) | 2985, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 0x4c4c4d, (1U<<31) | 3015, 
+  0x4c4c4d, (1U<<31) | 3015, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 0x4c4c4c, 0x4d4d4d, 
+  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, 0x4c4c4d, (1U<<31) | 3015, 0x4c4c4c, 0x4d4d4d, 
+  0x4d4d4d, (1U<<31) | 3126, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 2870, (1U<<31) | 2985, 
+  (1U<<31) | 2870, (1U<<31) | 2985, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 3126, (1U<<31) | 2879, (1U<<31) | 2994, 
+  0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 
+  0x44d4d, (1U<<31) | 2470, 0x44d4d4d, (1U<<31) | 2468, 0x4c4d, (1U<<31) | 3025, 0x4c4d, (1U<<31) | 3025, 
+  0x4c4d4d, (1U<<31) | 3040, 0x4c4d4d, (1U<<31) | 3040, 0x4c4d, (1U<<31) | 3025, 0x4c4d, (1U<<31) | 3025, 
+  0x4c4c4c, 0x4d4d4d, 0x4c4d, (1U<<31) | 3025, 0x4c4d, (1U<<31) | 3025, 0x2e0, 0x2e0, 
+  0x2e0, 0x2e0, 0x42e0, 0x52e0, 0x442e2e2e, 0x442e2e2e, 0x442e2e2e, 0x442e2e2e, 
+  0x442e2e2e, 0x442e2e2e, 0x4442e2e, 0x4452e2e, 0x4442e2e, 0x4442e2e, 0x4442e2e, 0x4b4b4b, 
+  0x2e0, 0x3939, 0x2a2a, 0x44, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x393939, 0x393939, 0x444, 0x393939, 0x393939, 0x444, 0x444, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x444, 0x393939, 0x2a2a2a, 0x393939, 0x2a2a2a, 
+  0x2a2a2a, 0x2a2a2a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 
+  0x43b3b, 0x44a4a, 0x444, 0x2c2c2c, 0x42c2c, 0x4444, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x4444, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c2c, 
+  0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 0x42c2c2c, 0x4595959, 0x43b3b3b, 0x44a4a4a, 0x2c2c2c2c, 
+  0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 0x42c2c2c, 0x4595959, 0x43b3b3b, 0x44a4a4a, 0x44, 
+  0x2c2c2c2c, 0x42c2c2c, 0x2c2c2c2c, 0x42c2c2c, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c4, 0x594, 0x3b4, 0x2c4, 
+  0x4a4, 0x4, 0x2c2c2c2c, 0x42c2c2c, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c4, 0x594, 0x3b4, 0x2c4, 
+  0x4a4, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
+  0x44a4a, 0x44, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 
+  0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 
+  0x43b3b, 0x44a4a, 0x39390, 0x39390, 0x39390, 0x2a2a4, 0x2a2a4, 0x2a2a4, 
+  0x2a2a4, 0x2a2a4, 0x2a2a4, 0x2a2a0, 0x2a2a0, 0x2a2a0, 0x42c4, 0x4595, 
+  0x43b4, 0x44a4, 0x42c4, 0x4595, 0x43b4, 0x44a4, 0x440, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x4555, 
+  0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x393955, 0x4a4a5959, 
+  0x2c2c3b3b, 0x3b3b4a4a, 0x4a4a5959, 0x2c2c3b3b, 0x3b3b4a4a, 0x393955, 0x4455, 0x393955, 
+  0x393955, 0x2a2a55, 0x2a2a55, 0x393955, 0x393955, 0x393955, 0x4455, 0x393955, 
+  0x393955, 0x2a2a55, 0x2a2a55, 0x4a4a5959, 0x2c2c3b3b, 0x3b3b4a4a, 0x4a4a5959, 0x2c2c3b3b, 
+  0x3b3b4a4a, 0x393955, 0x454, 0x454, 0x454, 0x454, 0x454, 0x454, 
+  0x898989, 0x7a7a7a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x8959, 0x7a4a, 
+  0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 
+  0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 
+  0x898959, 0x7a7a4a, 0x898989, 0x7a7a7a, 0x7a7a6b, 0x89897a, 0x598989, 0x4a7a7a, 
+  0x7a89, 0x6b7a, 0x7a89, 0x6b7a, 0x5989, 0x4a7a, 0x5989, 0x4a7a, 
+  0x4a89, 0x3b7a, 0x4a89, 0x3b7a, 0x42c, 0x559, 0x43b, 0x44a, 
+  0x8989, 0x7a7a, (1U<<31) | 3979, 0x7a7a7a7a, 0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, 
+  0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, (1U<<31) | 3979, 0x7a7a7a7a, 0x898989, 0x7a7a7a, 
+  0x8989, 0x7a7a, 0x8989, 0x7a7a, 0x8989, 0x7a7a, 0x898959, 0x7a7a4a, 
+  0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 
+  0x898959, 0x7a7a4a, 0x8989, 0x7a7a, 0x898989, 0x7a7a7a, 0x898959, 0x7a7a4a, 
+  0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 
+  0x8959, 0x7a4a, 0x8959, 0x7a4a, 0x7a7a3b, 0x89894a, 0x8959, 0x7a4a, 
+  0x8959, 0x7a4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 
+  0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x442c2c, 0x545959, 
+  0x443b3b, 0x444a4a, 0x444, 0x2c42c2c, 0x5945959, 0x3b43b3b, 0x4a44a4a, 0x42e4, 
+  0x42e2c, 0x42e59, 0x42e3b, 0x42e4a, 0x42c, 0x459, 0x43b, 0x44a, 
+  0x42e4, 0x4444, 0x42e4, 0x4455, 0x3b3b3b3b, 0x4a4a4a4a, 0x3b3b3b3b, 0x4a4a4a4a, 
+  0x4455, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 0x393955, 0x393955, 0x393955, 
+  0x393955, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
+  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
+  0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
+  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
+  0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 
+  0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
+  0x4a4a4a, 0x444, 0x2c2c, 0x4455, 0x3b3b3b3b, 0x4a4a4a4a, 0x3b3b3b3b, 0x4a4a4a4a, 
+  0x4455, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 0x455, 0x393939, 0x3b3b3b, 
+  0x4a4a4a, 0x393939, 0x39394, 0x39394, 0x392a39, 0x392a39, 0x393939, 0x444, 
+  0x393939, 0x444, 0x3b3b3b, 0x4a4a4a, 0x393955, 0x393955, 0x445, 0x445, 
+  0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c, 0x5959, 0x3b3b, 0x4a4a, 
+  0x2c2c, 0x5959, 0x3b3b, 0x4a4a, 0x2c2c2c, 0x42c2c, 0x2c2c2c, 0x42c2c, 
+  0x393939, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
+  0x4a4a4a, 0x2c2c, 0x5959, 0x3b3b, 0x4a4a, 0x393939, 0x2a2a2a, 0x394, 
+  0x394, 0x2a39, 0x2a39, 0x2a39, 0x2a39, 0x2a39, 0x2a39, 0x2a39, 
+  0x2a39, 0x39392a, 0x44439, 0x44439, 0x4439, 0x39392a, 0x4439, 0x39392a, 
+  0x4444, 0x2a4, 0x44, 0x439, 0x42a, 0x42c2c, 0x45959, 0x43b3b, 
+  0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x43b3b, 0x44a4a, 
+  0x455, 0x43939, 0x42a2a, 0x43939, 0x444, 0x43939, 0x42a2a, 0x43939, 
+  0x42a2a, 0x444, 0x43939, 0x42a2a, 0x42c2c2c, 0x4595959, 0x43b3b3b, 0x44a4a4a, 
+  0x42c2c2c, 0x4595959, 0x43b3b3b, 0x44a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42e2c0, 0x42e590, 0x42e3b0, 0x42e4a0, 
+  0x393939, 0x393939, 0x444, 0x393939, 0x393939, 0x444, 0x444, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x393939, 
+  0x2a2a2a, 0x393939, 0x2a2a2a, 0x2a2a2a, 0x2a2a2a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
+  0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 
+  0x4a4a4a4a, 0x440, 0x2c2c2c, 0x42c2c, 0x888, 0x777, 0x777, 0x888, 
+  0x777, 0x777, 0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 
+  0x2fcf2f, 0x2fcf2f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1f1fcf1f, 0x1f1fcf1f, 
+  0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x74f7, 0x84f8, 
+  0x44f4, 0x44f4, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 0x1fcf1f, 
+  0x1fcf1f, 0x1fcf1f, 0x40, 0x40, 0x440, 0x40, 0x40, 0x440, 
+  0x0, 0x44, 0x44, 0x44, 0x85, 0x74, 0x47, 0x58, 
+  0x88, 0x77, 0x77, 0x4f0, 0x4f0, 0x77, 0x77, 0x87, 
+  0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x84, 
+  0x84, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 
+  0x85, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 
+  0x85, 0x777, 0x777, 0x888, 0x777, 0x777, 0x888, 0x777, 
+  0x777, 0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 0x88, 
+  0x77, 0x77, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 
+  0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 
+  0x75, 0x75, 0x75, 0x75, 0x74, 0x74, 0x74, 0x74, 
+  0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 
+  0x75, 0x75, 0x75, 0x75, 0x88, 0x77, 0x77, 0x88, 
+  0x77, 0x77, 0x8888, 0x7777, 0x7777, 0x8888, 0x7777, 0x7777, 
+  0x8888, 0x7777, 0x7777, 0x8888, 0x7777, 0x7777, 0x888, 0x777, 
+  0x777, 0x888, 0x777, 0x777, 0x4444, 0x48, 0x48, 0x48, 
+  0x48, 0x47, 0x47, 0x47, 0x47, 0x2e1, 0x2e1, 0x2e1, 
+  0x2e1, 0x51, 0x51, 0x51, 0x4cf2f, 0x4cf1f, 0x4cf4f, 0x4cf2f, 
+  0x4cf1f, 0x4cf4f, 0x88, 0x77, 0x77, 0x58, 0x58, 0x58, 
+  0x58, 0x57, 0x57, 0x57, 0x57, 0x448, (1U<<31) | 1967, (1U<<31) | 3182, 
+  0x444, 0x545, 0x0, 0x0, 0x0, 0x88, 0x77, 0x33, 
+  0x44, 0x55, 0xcf4f, 0x888, 0x777, 0x777, 0x888, 0x777, 
+  0x777, 0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 0x444, 
+  0x444, 0x444, 0x555, 0x444, 0x555, 0x4444, 0xcf4f, 0xcf4f, 
+  0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0x88, 
+  0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 0x88, 0x77, 
+  0x77, 0x88, 0x77, 0x77, 0x4, 0x5, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4f4, 0x444, 
+  0x455, 0x455, 0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 
+  0x4444, 0x4444, 0x88, 0x77, 0x77, 0x4477, 0x4444, 0x4477, 
+  0x4444, 0x4477, 0x4444, 0x44747, 0x44444, 0x44747, 0x44444, 0x44747, 
+  0x44444, 0x44747, 0x44444, 0x4477, 0x4444, 0x77, 0x77, 0x77, 
+  0x77, 0x77, 0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 
+  0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 0x4453, 0x4453, 
+  0x4453, 0x4454, 0x4454, 0x4454, 0x4455, 0x4455, 0x4455, 0x4453, 
+  0x4453, 0x4453, (1U<<31) | 2162, (1U<<31) | 2162, (1U<<31) | 2162, (1U<<31) | 2178, (1U<<31) | 2178, (1U<<31) | 2178, 
+  (1U<<31) | 2195, (1U<<31) | 2195, (1U<<31) | 2195, (1U<<31) | 2162, (1U<<31) | 2162, (1U<<31) | 2162, (1U<<31) | 2153, (1U<<31) | 2153, 
+  (1U<<31) | 2153, (1U<<31) | 2169, (1U<<31) | 2169, (1U<<31) | 2169, (1U<<31) | 2153, (1U<<31) | 2153, (1U<<31) | 2153, 0x453, 
+  0x453, 0x453, 0x454, 0x454, 0x454, 0x455, 0x455, 0x455, 
+  0x453, 0x453, 0x453, (1U<<31) | 2499, (1U<<31) | 2499, (1U<<31) | 2499, (1U<<31) | 2513, (1U<<31) | 2513, 
+  (1U<<31) | 2513, (1U<<31) | 2528, (1U<<31) | 2528, (1U<<31) | 2528, (1U<<31) | 2499, (1U<<31) | 2499, (1U<<31) | 2499, (1U<<31) | 2491, 
+  (1U<<31) | 2491, (1U<<31) | 2491, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2491, (1U<<31) | 2491, (1U<<31) | 2491, 
+  0x44453, 0x44453, 0x44453, 0x44454, 0x44454, 0x44454, 0x44455, 0x44455, 
+  0x44455, 0x44453, 0x44453, 0x44453, (1U<<31) | 2078, (1U<<31) | 2078, (1U<<31) | 2078, (1U<<31) | 2096, 
+  (1U<<31) | 2096, (1U<<31) | 2096, (1U<<31) | 2115, (1U<<31) | 2115, (1U<<31) | 2115, (1U<<31) | 2078, (1U<<31) | 2078, (1U<<31) | 2078, 
+  (1U<<31) | 2068, (1U<<31) | 2068, (1U<<31) | 2068, (1U<<31) | 2086, (1U<<31) | 2086, (1U<<31) | 2086, (1U<<31) | 2068, (1U<<31) | 2068, 
+  (1U<<31) | 2068, 0x4453, 0x4453, 0x4453, 0x4454, 0x4454, 0x4454, 0x4455, 
+  0x4455, 0x4455, 0x4453, 0x4453, 0x4453, (1U<<31) | 2162, (1U<<31) | 2162, (1U<<31) | 2162, 
+  (1U<<31) | 2178, (1U<<31) | 2178, (1U<<31) | 2178, (1U<<31) | 2195, (1U<<31) | 2195, (1U<<31) | 2195, (1U<<31) | 2162, (1U<<31) | 2162, 
+  (1U<<31) | 2162, (1U<<31) | 2153, (1U<<31) | 2153, (1U<<31) | 2153, (1U<<31) | 2169, (1U<<31) | 2169, (1U<<31) | 2169, (1U<<31) | 2153, 
+  (1U<<31) | 2153, (1U<<31) | 2153, 0x44453, 0x44453, 0x44453, 0x44454, 0x44454, 0x44454, 
+  0x44455, 0x44455, 0x44455, 0x44453, 0x44453, 0x44453, (1U<<31) | 2078, (1U<<31) | 2078, 
+  (1U<<31) | 2078, (1U<<31) | 2096, (1U<<31) | 2096, (1U<<31) | 2096, (1U<<31) | 2115, (1U<<31) | 2115, (1U<<31) | 2115, (1U<<31) | 2078, 
+  (1U<<31) | 2078, (1U<<31) | 2078, (1U<<31) | 2068, (1U<<31) | 2068, (1U<<31) | 2068, (1U<<31) | 2086, (1U<<31) | 2086, (1U<<31) | 2086, 
+  (1U<<31) | 2068, (1U<<31) | 2068, (1U<<31) | 2068, 0x54, 0x54, 0x54, 0x54, 0x54, 
+  0x54, 0x34450, 0x34450, 0x34450, 0x44450, 0x44450, 0x44450, 0x54450, 
+  0x54450, 0x54450, 0x34450, 0x34450, 0x34450, 0x334450, 0x334450, 0x334450, 
+  0x444450, 0x444450, 0x444450, 0x554450, 0x554450, 0x554450, 0x334450, 0x334450, 
+  0x334450, 0x33334450, 0x33334450, 0x33334450, 0x44444450, 0x44444450, 0x44444450, 0x33334450, 
+  0x33334450, 0x33334450, 0x3450, 0x3450, 0x3450, 0x4450, 0x4450, 0x4450, 
+  0x5450, 0x5450, 0x5450, 0x3450, 0x3450, 0x3450, 0x33450, 0x33450, 
+  0x33450, 0x44450, 0x44450, 0x44450, 0x55450, 0x55450, 0x55450, 0x33450, 
+  0x33450, 0x33450, 0x3333450, 0x3333450, 0x3333450, 0x4444450, 0x4444450, 0x4444450, 
+  0x3333450, 0x3333450, 0x3333450, 0x344450, 0x344450, 0x344450, 0x444450, 0x444450, 
+  0x444450, 0x544450, 0x544450, 0x544450, 0x344450, 0x344450, 0x344450, 0x3344450, 
+  0x3344450, 0x3344450, 0x4444450, 0x4444450, 0x4444450, 0x5544450, 0x5544450, 0x5544450, 
+  0x3344450, 0x3344450, 0x3344450, (1U<<31) | 1105, (1U<<31) | 1105, (1U<<31) | 1105, (1U<<31) | 2051, (1U<<31) | 2051, 
+  (1U<<31) | 2051, (1U<<31) | 1105, (1U<<31) | 1105, (1U<<31) | 1105, 0x34450, 0x34450, 0x34450, 0x44450, 
+  0x44450, 0x44450, 0x54450, 0x54450, 0x54450, 0x34450, 0x34450, 0x34450, 
+  0x334450, 0x334450, 0x334450, 0x444450, 0x444450, 0x444450, 0x554450, 0x554450, 
+  0x554450, 0x334450, 0x334450, 0x334450, 0x33334450, 0x33334450, 0x33334450, 0x44444450, 
+  0x44444450, 0x44444450, 0x33334450, 0x33334450, 0x33334450, 0x344450, 0x344450, 0x344450, 
+  0x444450, 0x444450, 0x444450, 0x544450, 0x544450, 0x544450, 0x344450, 0x344450, 
+  0x344450, 0x3344450, 0x3344450, 0x3344450, 0x4444450, 0x4444450, 0x4444450, 0x5544450, 
+  0x5544450, 0x5544450, 0x3344450, 0x3344450, 0x3344450, (1U<<31) | 1105, (1U<<31) | 1105, (1U<<31) | 1105, 
+  (1U<<31) | 2051, (1U<<31) | 2051, (1U<<31) | 2051, (1U<<31) | 1105, (1U<<31) | 1105, (1U<<31) | 1105, 0x34450, 0x44450, 
+  0x34450, 0x334450, 0x444450, 0x334450, 0x33334450, 0x44444450, 0x33334450, 0x3450, 
+  0x4450, 0x3450, 0x33450, 0x44450, 0x33450, 0x3333450, 0x4444450, 0x3333450, 
+  0x344450, 0x444450, 0x344450, 0x3344450, 0x4444450, 0x3344450, (1U<<31) | 1105, (1U<<31) | 2051, 
+  (1U<<31) | 1105, 0x34450, 0x44450, 0x34450, 0x334450, 0x444450, 0x334450, 0x33334450, 
+  0x44444450, 0x33334450, 0x344450, 0x444450, 0x344450, 0x3344450, 0x4444450, 0x3344450, 
+  (1U<<31) | 1105, (1U<<31) | 2051, (1U<<31) | 1105, 0x55, (1U<<31) | 3506, (1U<<31) | 3494, (1U<<31) | 3494, (1U<<31) | 3424, 
+  (1U<<31) | 3413, (1U<<31) | 3413, (1U<<31) | 3350, (1U<<31) | 2202, (1U<<31) | 3340, (1U<<31) | 2185, (1U<<31) | 3340, (1U<<31) | 2185, 
+  (1U<<31) | 3550, (1U<<31) | 3539, (1U<<31) | 3539, (1U<<31) | 3464, (1U<<31) | 3454, (1U<<31) | 3454, (1U<<31) | 3386, (1U<<31) | 2534, 
+  (1U<<31) | 3377, (1U<<31) | 2519, (1U<<31) | 3377, (1U<<31) | 2519, (1U<<31) | 3696, (1U<<31) | 3681, (1U<<31) | 3681, (1U<<31) | 3506, 
+  (1U<<31) | 3494, (1U<<31) | 3494, (1U<<31) | 3424, (1U<<31) | 2123, (1U<<31) | 3413, (1U<<31) | 2104, (1U<<31) | 3413, (1U<<31) | 2104, 
+  (1U<<31) | 3752, (1U<<31) | 3738, (1U<<31) | 3738, (1U<<31) | 3550, (1U<<31) | 3539, (1U<<31) | 3539, (1U<<31) | 3464, (1U<<31) | 2202, 
+  (1U<<31) | 3454, (1U<<31) | 2185, (1U<<31) | 3454, (1U<<31) | 2185, (1U<<31) | 3924, (1U<<31) | 3907, (1U<<31) | 3907, (1U<<31) | 3644, 
+  (1U<<31) | 3632, (1U<<31) | 3632, (1U<<31) | 3550, (1U<<31) | 2123, (1U<<31) | 3539, (1U<<31) | 2104, (1U<<31) | 3539, (1U<<31) | 2104, 
+  (1U<<31) | 3596, (1U<<31) | 3583, (1U<<31) | 3583, (1U<<31) | 3506, (1U<<31) | 3494, (1U<<31) | 3494, (1U<<31) | 3644, (1U<<31) | 3632, 
+  (1U<<31) | 3632, (1U<<31) | 3550, (1U<<31) | 3539, (1U<<31) | 3539, (1U<<31) | 3518, (1U<<31) | 3483, (1U<<31) | 3483, (1U<<31) | 3435, 
+  (1U<<31) | 3403, (1U<<31) | 3403, (1U<<31) | 3360, (1U<<31) | 2212, (1U<<31) | 3331, (1U<<31) | 2169, (1U<<31) | 3331, (1U<<31) | 2169, 
+  (1U<<31) | 3561, (1U<<31) | 3529, (1U<<31) | 3529, (1U<<31) | 3474, (1U<<31) | 3445, (1U<<31) | 3445, (1U<<31) | 3395, (1U<<31) | 2543, 
+  (1U<<31) | 3369, (1U<<31) | 2505, (1U<<31) | 3369, (1U<<31) | 2505, (1U<<31) | 3711, (1U<<31) | 3667, (1U<<31) | 3667, (1U<<31) | 3518, 
+  (1U<<31) | 3483, (1U<<31) | 3483, (1U<<31) | 3435, (1U<<31) | 2134, (1U<<31) | 3403, (1U<<31) | 2086, (1U<<31) | 3403, (1U<<31) | 2086, 
+  (1U<<31) | 3766, (1U<<31) | 3725, (1U<<31) | 3725, (1U<<31) | 3561, (1U<<31) | 3529, (1U<<31) | 3529, (1U<<31) | 3474, (1U<<31) | 2212, 
+  (1U<<31) | 3445, (1U<<31) | 2169, (1U<<31) | 3445, (1U<<31) | 2169, (1U<<31) | 3941, (1U<<31) | 3891, (1U<<31) | 3891, (1U<<31) | 3656, 
+  (1U<<31) | 3621, (1U<<31) | 3621, (1U<<31) | 3561, (1U<<31) | 2134, (1U<<31) | 3529, (1U<<31) | 2086, (1U<<31) | 3529, (1U<<31) | 2086, 
+  (1U<<31) | 3609, (1U<<31) | 3571, (1U<<31) | 3571, (1U<<31) | 3518, (1U<<31) | 3483, (1U<<31) | 3483, (1U<<31) | 3656, (1U<<31) | 3621, 
+  (1U<<31) | 3621, (1U<<31) | 3561, (1U<<31) | 3529, (1U<<31) | 3529, (1U<<31) | 3098, 0x4f5, (1U<<31) | 3464, (1U<<31) | 3454, 
+  (1U<<31) | 3454, (1U<<31) | 3464, (1U<<31) | 3454, (1U<<31) | 3454, (1U<<31) | 3464, (1U<<31) | 3454, (1U<<31) | 3454, (1U<<31) | 3464, 
+  (1U<<31) | 3454, (1U<<31) | 3454, (1U<<31) | 3474, (1U<<31) | 3445, (1U<<31) | 3445, (1U<<31) | 3474, (1U<<31) | 3445, (1U<<31) | 3445, 
+  (1U<<31) | 3474, (1U<<31) | 3445, (1U<<31) | 3445, (1U<<31) | 3474, (1U<<31) | 3445, (1U<<31) | 3445, 0x88, 0x77, 
+  0x77, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 
+  0x54, 0x48, 0x48, 0x48, 0x48, 0x47, 0x47, 0x47, 
+  0x47, 0x58, 0x58, 0x58, 0x58, 0x57, 0x57, 0x57, 
+  0x57, 0x11, 0x141, 0x11, 0x141, 0x14, 0x144, 0x11, 
+  0x141, (1U<<31) | 3054, (1U<<31) | 2413, (1U<<31) | 3054, (1U<<31) | 2413, (1U<<31) | 3054, (1U<<31) | 2413, (1U<<31) | 3054, 
+  (1U<<31) | 2413, (1U<<31) | 3074, (1U<<31) | 3086, (1U<<31) | 2434, (1U<<31) | 2447, (1U<<31) | 3074, (1U<<31) | 3086, (1U<<31) | 2434, 
+  (1U<<31) | 2447, (1U<<31) | 3231, (1U<<31) | 3231, (1U<<31) | 3791, (1U<<31) | 3791, (1U<<31) | 3281, (1U<<31) | 3281, (1U<<31) | 3841, 
+  (1U<<31) | 3841, (1U<<31) | 3231, (1U<<31) | 3231, (1U<<31) | 3791, (1U<<31) | 3791, (1U<<31) | 3281, (1U<<31) | 3281, (1U<<31) | 3841, 
+  (1U<<31) | 3841, (1U<<31) | 3231, (1U<<31) | 3231, (1U<<31) | 3791, (1U<<31) | 3791, (1U<<31) | 3281, (1U<<31) | 3281, (1U<<31) | 3841, 
+  (1U<<31) | 3841, (1U<<31) | 3231, (1U<<31) | 3231, (1U<<31) | 3791, (1U<<31) | 3791, (1U<<31) | 3281, (1U<<31) | 3281, (1U<<31) | 3841, 
+  (1U<<31) | 3841, (1U<<31) | 3219, (1U<<31) | 3779, (1U<<31) | 2625, (1U<<31) | 2638, (1U<<31) | 3219, (1U<<31) | 3779, (1U<<31) | 2625, 
+  (1U<<31) | 2638, 0x595959, 0x595959, 0x595959, 0x595959, 0x2c2c2c2c, 0x2c2c2c, 0x595959, 
+  0x3b3b3b, 0x4a4a4a, 0x5959, 0x445959, 0x444a4a, 0x40, 0x0, 0x442e0, 
+  0x442e0, 0x442e0, 0x442e0, 0x2e2c, 0x2e3b, 0x2e4a, 0x2e2c, 0x2e2c, 
+  0x2e4a, 0x2e4a, 0x3b, 0x4a0, 0x2e2c0, 0x2e3b0, 0x2e4a0, 0x2e4a0, 
+  0x2e4a0, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, (1U<<31) | 4262, 0x4a4a4a, (1U<<31) | 4260, (1U<<31) | 4260, 
+  0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 
+  0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c59, 0x44a7a, 0x44a7a, 0x2c4, 
+  0x7a7a4a, 0x7a7a44, 0x7a7a4a, 0x7a7a44, 0x2c2c2c, 0x2c2c44, 0x595959, 0x595944, 
+  0x3b3b3b, 0x3b3b44, 0x4a4a4a, 0x4a4a44, 0x7a7a4a, 0x7a7a44, 0x7a7a4a, 0x7a7a44, 
+  0x2c2c2c, 0x2c2c44, 0x595959, 0x595944, 0x3b3b3b, 0x3b3b44, 0x4a4a4a, 0x4a4a44, 
+  0x2c2c2c, 0x2c2c44, 0x595959, 0x595944, 0x3b3b3b, 0x3b3b44, 0x4a4a4a, 0x4a4a44, 
+  0x2c2c2c, 0x2c2c44, 0x3b3b3b, 0x3b3b44, 0x4a4a4a, 0x4a4a44, 0x2c2c2c, 0x2c2c44, 
+  0x3b3b3b, 0x3b3b44, 0x4a4a4a, 0x4a4a44, 0x47a4a, 0x47a4a, 0x2c4, 0x7a7a, 
+  0x2c2c, 0x7a7a, 0x7a7a7a7a, 0x7a7a7a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
+  0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x3b3b3b3b, 0x3b3b3b3b, 0x7a7a7a, 0x2c2c2c, 
+  0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x3b3b3b3b, 
+  0x4a2c2c4a, 0x4a3b3b4a, 0x4a3b3b4a, 0x4a2c2c4a, 0x4a3b3b4a, 0x4a3b3b4a, 0x2c2c3b, 0x3b3b4a, 
+  0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 
+  0x3b3b4a, 0x4a4a59, 0x7a7a7a7a, 0x2c4a4a4a, 0x4a4a3b, 0x59594a, 0x59594a, 0x3b3b2c, 
+  0x3b3b2c, 0x4a4a3b, 0x4a4a3b, 0x59594a, 0x3b3b2c, 0x4a4a3b, 0x5959, (1U<<31) | 4264, 
+  0x4a4a, 0x7a7a, 0x7a7a, 0x7a7a, 0x7a7a, 0x7a7a, 0x2c2c2c, 0x595959, 
+  0x59595959, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x4a4a4a4a, 0x4a4a4a, 0x7a7a, 0x4a4a4a4a, 
+  0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x4a4a4a, 0x4a4a4a, 0x2c2c2c, 
+  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x4a4a4a, (1U<<31) | 4262, 
+  0x4a4a4a, (1U<<31) | 4260, (1U<<31) | 4260, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 
+  0x4a4a4a, 0x4a4a4a, 0x4a2c4a, 0x4a3b4a, 0x4a2c4a, 0x4a4a4a, 0x3b4a, 0x2c3b, 
+  0x3b4a, 0x4a59, 0x3b4a, 0x2c3b, 0x3b4a, 0x4a59, 0x555, 0x1f0, 
+  0x2e0, 0x2e0, 0x2e0, 0x2e0, 0x2e0, 0x2e0, 0x2e0, 0x2e0, 
+  0x555, 0x555, 0x444, 0x444, 0x5, 0x5, 0x5, 0x5, 
+  0x1, 0x0, 0x1f0, 0x8a8a, 0x8a8a8a, 0x8a8a8a, 0x8a8a, 0x8a8a, 
+  0x8a8a, 0x8a8a, 0x8a8a8a, 0x8a8a8a, 0x8a8a8a, 0x8a8a8a, 0x8a8a, 0x8a8a, 
+  0x8a8a, 0x8a8a, 0x8a8a, 0x8a8a, 0x8a8a, 0x8a8a, 0x48a8a8a, (1U<<31) | 3992, 
+  (1U<<31) | 3992, (1U<<31) | 3992, (1U<<31) | 3992, 0x8a8a8a, 0x8a8a8a, 0x8a8a, 0x8a8a, (1U<<31) | 3992, 
+  (1U<<31) | 3992, (1U<<31) | 3992, (1U<<31) | 3992, (1U<<31) | 3992, 0x8a8a, 0x8a8a, 0x8a8a, 0x8a8a, 
+  0x8a8a, 0x8a8a, 0x8a8a, 0x8a8a, 0x8a8a, (1U<<31) | 3992, 0x8a8a8a, 0x8a8a8a, 
+  0x8a8a8a, (1U<<31) | 3992, (1U<<31) | 3992, 0x8a8a8a, 0x8a8a8a, (1U<<31) | 3992, (1U<<31) | 3992, (1U<<31) | 3992, 
+  (1U<<31) | 3992, (1U<<31) | 3992, (1U<<31) | 3992, 0x48a, 0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 
+  0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 
+  0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a, 0x2e8a0, 0x2e8a0, 0x2e8a0, 0x2e8a0, 
+  0x2e8a0, 0x2e8a0, 0x2e8a0, 0x2e8a0, 0x2e8a0, 0x2e8a0, 0x50, 0x50, 
+  0x50, 0x50, 0x0, 0x44, 0x4444, 0x4444, 0x4444, 0x4444, 
+  0x44, 0x4, 0x44, 0x4, 0x4, 0x44, 0x4, 0x44, 
+  0x4, 0x5, 0x2e89, 0x2e89, 0x52e4a, 0x52e4a, 0x2e4a, 0x2e4a, 
+  0x2e890, 0x2e890, 0x52e4a0, 0x52e4a0, 0x2e4a0, 0x2e4a0, 0x888, 0x888, 
+  0x898959, 0x898944, 0x7a7a4a, 0x7a7a44, 0x898959, 0x898944, 0x7a7a4a, 0x7a7a44, 
+  0x898959, 0x898944, 0x7a7a4a, 0x7a7a44, 0x897a, 0x894a, 0x894a, 0x3b7a, 
+  0x7a89, 0x7a7a, 0x597a, 0x4a89, 0x597a, 0x4a89, 0x898989, 0x7a7a7a, 
+  0x595989, 0x4a4a7a, 0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, 0x8989, 0x8989, 
+  0x7a7a, 0x7a7a, 0x8989, 0x7a7a, 0x48959, 0x47a4a, 0x8959, 0x7a4a, 
+  0x8959, 0x7a4a, 0x45959, 0x4594a4a, 0x4a4a4a, 0x7a7a, 0x0, (1U<<31) | 796, 
+  0x44a4a0, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
+  0x2f2f, 0x2f2f, 0x4, 0x4, 0x42e4, 0x5e50, 0x40, 0x40, 
+  0x50, 0x42e4, 0x42e4, 0x42e0, 0x52f4, 0x4, 0x2c2c2c, 0x2c2c2c2c, 
+  0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c2c, 0x2c2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x4a4a4a, 
+  0x595959, 0x3b3b3b, 0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c59, (1U<<31) | 965, 
+  (1U<<31) | 2803, (1U<<31) | 3210, (1U<<31) | 1228, (1U<<31) | 965, (1U<<31) | 2803, (1U<<31) | 3210, (1U<<31) | 1228, (1U<<31) | 965, 
+  (1U<<31) | 2803, (1U<<31) | 3210, (1U<<31) | 1228, 0x4a4a4a, (1U<<31) | 1738, (1U<<31) | 2253, (1U<<31) | 2571, (1U<<31) | 1883, 
+  0x42c2c, 0x44a4a, 0x45959, 0x43b3b, 0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 
+  0x42c2c2c, (1U<<31) | 1760, 0x44a4a4a, (1U<<31) | 2231, 0x43b3b3b, (1U<<31) | 1905, 0x42c2c2c, (1U<<31) | 1760, 
+  0x44a4a4a, (1U<<31) | 2231, 0x43b3b3b, (1U<<31) | 1905, (1U<<31) | 3970, (1U<<31) | 3957, (1U<<31) | 3970, (1U<<31) | 3970, 
+  (1U<<31) | 3957, (1U<<31) | 3957, 0x2c2c2c, (1U<<31) | 965, 0x4a4a4a, (1U<<31) | 2803, 0x3b3b3b, (1U<<31) | 1228, 
+  0x2c2c2c, (1U<<31) | 965, 0x4a4a4a, (1U<<31) | 2803, 0x3b3b3b, (1U<<31) | 1228, 0x2c2c2c, (1U<<31) | 965, 
+  0x4a4a4a, (1U<<31) | 2803, 0x3b3b3b, (1U<<31) | 1228, 0x2c2c2c, (1U<<31) | 965, 0x4a4a4a, (1U<<31) | 2803, 
+  0x3b3b3b, (1U<<31) | 1228, 0x448989, 0x447a7a, 0x4898989, 0x47a7a7a, 0x4898989, 0x47a7a7a, 
+  (1U<<31) | 2719, (1U<<31) | 2651, 0x3b2c2c3b, 0x594a4a59, 0x2c59592c, 0x4a3b3b4a, 0x2c2c3b, 0x4a4a59, 
+  0x59592c, 0x3b3b4a, 0x2c2c, (1U<<31) | 974, 0x4a4a, (1U<<31) | 2787, 0x3b3b, (1U<<31) | 1237, 
+  0x42e2c, 0x2e42c, 0x2e42c, 0x3b2c2c3b, 0x594a4a59, 0x4a3b3b4a, 0x2c2c2c2c, 0x4a4a4a4a, 
+  0x3b3b3b3b, 0x3b2c2c3b, 0x594a4a59, 0x4a3b3b4a, 0x2c2c2c2c, 0x4a4a4a4a, 0x3b3b3b3b, 0x3b2c2c3b, 
+  0x594a4a59, 0x4a3b3b4a, 0x3b2c2c3b, 0x594a4a59, 0x4a3b3b4a, 0x2c2c3b, 0x4a4a59, 0x3b3b4a, 
+  0x2c2c2c, 0x4a4a4a, 0x3b3b3b, 0x2c2c3b, 0x4a4a59, 0x3b3b4a, 0x2c2c2c, 0x4a4a4a, 
+  0x3b3b3b, 0x2c2c3b, 0x4a4a59, 0x3b3b4a, 0x2c2c3b, 0x4a4a59, 0x3b3b4a, (1U<<31) | 1770, 
+  0x4595959, 0x2c2c2c2c, 0x4a4a3b, (1U<<31) | 2794, 0x59594a, (1U<<31) | 3201, 0x3b3b2c, (1U<<31) | 1219, 
+  0x4a4a3b, (1U<<31) | 2794, 0x59594a, (1U<<31) | 3201, 0x3b3b2c, (1U<<31) | 1219, 0x2c2c2c2c, 0x2c2c2c2c, 
+  0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x42c2c2c, 
+  0x2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x2e42c0, (1U<<31) | 1738, (1U<<31) | 1748, 
+  (1U<<31) | 2253, (1U<<31) | 2241, (1U<<31) | 1883, (1U<<31) | 1893, (1U<<31) | 1738, (1U<<31) | 1748, (1U<<31) | 2253, (1U<<31) | 2241, 
+  (1U<<31) | 1883, (1U<<31) | 1893, 0x2e42c0, 0x2c2c4a, 0x4a4a59, 0x3b3b59, 0x3b3b4a, 0x4a4a2c, 
+  0x59592c, 0x2c2c4, 0x2c3b, 0x4a59, 0x3b4a, 0x2c3b, 0x4a59, 0x2c3b, 
+  0x4a59, 0x3b4a, 0x3b4a, 0x2c3b, 0x4a59, 0x3b4a, 0x1f, 0x4, 
+  0x2e, 0x1f1f, 0x1f41f, 0x41f, 0x0, 0x2e40, (1U<<31) | 4155, (1U<<31) | 4152, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 
+  (1U<<31) | 4152, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4152, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4152, 
+  (1U<<31) | 4155, (1U<<31) | 4152, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4152, (1U<<31) | 4152, 0x2e4422, 0x2e5522, 
+  0x2e4422, 0x2e5522, 0x595959, 0x5a5a5a, 0x5b5b5b, 0x595959, 0x5a5a5a, 0x5b5b5b, 
+  0x595959, 0x5a5a5a, 0x5b5b5b, 0x595959, 0x5a5a5a, 0x5b5b5b, 0x5959, 0x25959, 
+  0x8a8a8a, 0x7b7b7b, (1U<<31) | 3992, 0x7b7b7b7b, 0x28a8a8a, 0x27b7b7b, 0x8a7a, 0x8a4a, 
+  0x7b4b, 0x4b7b, 0x8a4a, 0x7b4b, 0x27b7b7b, 0x8a8a8a, 0x7b7b7b, 0x8a8a8a, 
+  0x7b7b7b, 0x2e2d, 0x592e89, 0x5a2e8a, 0x4a2e7a, 0x4b2e7b, 0x89592e0, 0x8a5a2e0, 
+  0x7a4a2e0, 0x7b4b2e0, 0x8a8a8a, 0x7b7b7b, 0x8a8a8a, 0x7b7b7b, 0x8a4, 0x7b4, 
+  0x5a5a4, 0x5a5a4, 0x5a5a4, 0x7b7b, 0x48a8a, 0x47b7b, 0x7b7b, 0x8a8a, 
+  0x7b7b, 0x598989, 0x5a8a8a, 0x4a7a7a, 0x4b7b7b, 0x89894, 0x8a8a4, 0x7a7a4, 
+  0x7b7b4, 0x89894, 0x8a8a4, 0x7a7a4, 0x7b7b4, 0x89894, 0x8a8a4, 0x7a7a4, 
+  0x7b7b4, 0x0, 0x0, (1U<<31) | 422, (1U<<31) | 499, (1U<<31) | 811, (1U<<31) | 876, (1U<<31) | 677, 
+  (1U<<31) | 754, (1U<<31) | 546, (1U<<31) | 601, (1U<<31) | 444, (1U<<31) | 456, (1U<<31) | 823, (1U<<31) | 888, (1U<<31) | 699, 
+  (1U<<31) | 711, (1U<<31) | 558, (1U<<31) | 613, 0x4a2e4a, 0x4b2e4b, 0x592e59, 0x5a2e5a, 0x4a4a2e0, 
+  0x4b4b2e0, 0x59592e0, 0x5a5a2e0, 0x22d2d3c, 0x4b4b3c, 0x3c3c2d, 0x4b4b3c, 0x3c3c2d, 
+  0x2d2d2d, 0x3c3c3c, 0x2d2d2d, 0x3c3c3c, 0x2d2d2d2d, 0x4b4b4b, 0x4b7b7b, 0x4b4b4b, 
+  0x3c3c3c, 0x3c3c3c, 0x4b4b4b, 0x3c3c3c, 0x3c3c3c, 0x2d2d3c, 0x3c3c4b, 0x2d4, 
+  0x4b4b5a, 0x3c3c3c, 0x3c3c3c, 0x3c3c3c, 0x4b4b5a, 0x2d2d5a, 0x2d2d2d, 0x2d2d2d, 
+  0x4b4b4b, 0x3c3c3c, 0x4a4b4b, 0x595a5a, 0x3b3c3c, 0x44b4b, 0x45a5a, 0x43c3c, 
+  0x4a4a4a, 0x4b4b4b, 0x595959, 0x5a5a5a, 0x4a4b4b, 0x3b3c3c, 0x44b4b, 0x43c3c, 
+  0x4a4a4a, 0x4b4b4b, 0x4a4b4b, 0x595a5a, 0x3b3c3c, 0x44b4b, 0x45a5a, 0x43c3c, 
+  0x4a4a4a, 0x4b4b4b, 0x595959, 0x5a5a5a, 0x2d2d2d, 0x3c3c3c, 0x2d2d2d, 0x3c3c3c, 
+  0x259, 0x25a, 0x25b, 0x34a, 0x34b, 0x34c, 0x458989, 0x447a7a, 
+  0x457a7a, 0x4894, 0x4895, 0x4894, 0x4895, 0x47a4, 0x47a5, 0x47a4, 
+  0x47a5, 0x48989, 0x447a7a, 0x458989, 0x457a7a, 0x428b8b8b, 0x437c7c7c, (1U<<31) | 1504, 
+  (1U<<31) | 1838, (1U<<31) | 1482, (1U<<31) | 1849, (1U<<31) | 1636, (1U<<31) | 1603, (1U<<31) | 1614, (1U<<31) | 1625, (1U<<31) | 1548, 
+  (1U<<31) | 1526, (1U<<31) | 1592, (1U<<31) | 1570, (1U<<31) | 1537, (1U<<31) | 1515, (1U<<31) | 1581, (1U<<31) | 1559, (1U<<31) | 1449, 
+  (1U<<31) | 1416, (1U<<31) | 1460, (1U<<31) | 1427, (1U<<31) | 1438, (1U<<31) | 1405, (1U<<31) | 1493, (1U<<31) | 1471, 0x442e4b20, 
+  0x442e4c30, 0x442e5b20, 0x442e5b20, (1U<<31) | 1727, (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, 0x4898919, 
+  0x48a8a1a, 0x448b8b1b, 0x47a7a1a, 0x47b7b1b, 0x447c7c1c, 0x42489892, 0x4247a7a2, 0x32c2c2c, 
+  0x42d2d2d, (1U<<31) | 3167, 0x24a4a4a, 0x24b4b4b, 0x34c4c4c, 0x2898989, 0x28a8a8a, 0x28b8b8b, 
+  0x27a7a7a, 0x27b7b7b, 0x37c7c7c, 0x2595959, 0x25a5a5a, 0x25b5b5b, 0x32c2e0, 0x42d2e0, 
+  (1U<<31) | 3150, 0x24a2e0, 0x24b2e0, 0x34c2e0, 0x2892e0, 0x28a2e0, 0x28b2e0, 0x27a2e0, 
+  0x27b2e0, 0x37c2e0, 0x2592e0, 0x25a2e0, 0x25b2e0, 0x23b2e0, 0x33c2e0, 0x43d2e0, 
+  0x23b3b3b, 0x33c3c3c, 0x43d3d3d, 0x24a4a4a, 0x24b4b4b, 0x34c4c4c, 0x2595959, 0x25a5a5a, 
+  0x25b5b5b, 0x27a4a7a, 0x27b4b7b, 0x437c4c7c, 0x24a894a, 0x24a8a4a, 0x424b8b4b, 0x27a897a, 
+  0x27a8a7a, 0x427b8b7b, 0x2598959, 0x25a8a5a, 0x425b8b5b, 0x24a894a, 0x24a8a4a, 0x424b8b4b, 
+  0x2598959, 0x25a8a5a, 0x425b8b5b, 0x24a7a4a, 0x24b7b4b, 0x434c7c4c, 0x2897a89, 0x28a7a8a, 
+  0x428b7b8b, 0x2597a59, 0x25a7a5a, 0x425b7b5b, 0x24a7a4a, 0x24b7b4b, 0x434c7c4c, 0x2597a59, 
+  0x25a7a5a, 0x425b7b5b, 0x2895989, 0x28a5a8a, 0x428b5b8b, 0x27a597a, 0x27a5a7a, 0x427b5b7b, 
+  (1U<<31) | 1670, (1U<<31) | 1693, 0x24a894a, 0x24a8a4a, 0x424b8b4b, 0x2598959, 0x25a8a5a, 0x425b8b5b, 
+  0x24a894a, 0x24a8a4a, 0x424b8b4b, 0x2598959, 0x25a8a5a, 0x425b8b5b, 0x24a7a4a, 0x24b7b4b, 
+  0x434c7c4c, 0x2597a59, 0x25a7a5a, 0x425b7b5b, 0x24a7a4a, 0x24b7b4b, 0x434c7c4c, 0x2597a59, 
+  0x25a7a5a, 0x425b7b5b, 0x27a4a7a, 0x27b4b7b, 0x437c4c7c, 0x2895989, 0x28a5a8a, 0x428b5b8b, 
+  0x27a597a, 0x27a5a7a, 0x427b5b7b, (1U<<31) | 270, (1U<<31) | 1135, (1U<<31) | 1935, (1U<<31) | 1727, (1U<<31) | 1872, 
+  (1U<<31) | 1704, (1U<<31) | 1659, 0x32c2c2c, 0x42d2d2d, (1U<<31) | 3167, 0x24a4a4a, 0x24b4b4b, 0x34c4c4c, 
+  0x32c2e2c, 0x42d2e2d, (1U<<31) | 3157, 0x24a2e4a, 0x24b2e4b, 0x34c2e4c, 0x2892e89, 0x28a2e8a, 
+  0x28b2e8b, 0x27a2e7a, 0x27b2e7b, 0x37c2e7c, 0x2592e59, 0x25a2e5a, 0x25b2e5b, 0x23b2e3b, 
+  0x33c2e3c, 0x43d2e3d, 0x2898989, 0x28a8a8a, 0x28b8b8b, 0x27a7a7a, 0x27b7b7b, 0x37c7c7c, 
+  0x2595959, 0x25a5a5a, 0x25b5b5b, 0x23b3b3b, 0x33c3c3c, 0x43d3d3d, (1U<<31) | 357, (1U<<31) | 379, 
+  (1U<<31) | 1393, (1U<<31) | 313, (1U<<31) | 335, (1U<<31) | 1826, (1U<<31) | 1381, (1U<<31) | 1369, 0x24892, 0x248a2, 
+  0x248b2, 0x247a2, 0x247b2, 0x347c3, 0x24892, 0x247a2, 0x2898989, 0x28a8a8a, 
+  0x428b8b8b, 0x27a7a7a, 0x27b7b7b, 0x437c7c7c, (1U<<31) | 1704, (1U<<31) | 1659, 0x28948989, 0x28a48a8a, 
+  (1U<<31) | 1717, 0x27a47a7a, 0x27b47b7b, (1U<<31) | 1862, (1U<<31) | 1681, (1U<<31) | 1647, (1U<<31) | 1727, (1U<<31) | 1872, 
+  (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 1727, (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 1727, (1U<<31) | 1872, 
+  (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, 
+  (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 900, (1U<<31) | 930, 
+  (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, 
+  (1U<<31) | 766, (1U<<31) | 1199, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 250, (1U<<31) | 1115, (1U<<31) | 1915, (1U<<31) | 401, 
+  (1U<<31) | 478, (1U<<31) | 1168, 0x22c4a2c, 0x22c4b2c, 0x32c4c2c, 0x24a2e0, 0x24b2e0, 0x34c2e0, 
+  0x23b4a3b, 0x23b4b3b, 0x33c4c3c, 0x24a2e0, 0x24b2e0, 0x34c2e0, 0x22c592c, 0x22c5a2c, 
+  0x22c5b2c, 0x2592e0, 0x25a2e0, 0x25b2e0, 0x24a594a, 0x24a5a4a, 0x24b5b4b, 0x2592e0, 
+  0x25a2e0, 0x25b2e0, 0x23b593b, 0x23b5a3b, 0x23b5b3b, 0x2592e0, 0x25a2e0, 0x25b2e0, 
+  0x22c3b2c, 0x32c3c2c, 0x42d3d2d, 0x23b2e0, 0x33c2e0, 0x43d2e0, 0x22c4a2c, 0x22c4b2c, 
+  0x32c4c2c, 0x24a2e0, 0x24b2e0, 0x34c2e0, 0x23b4a3b, 0x23b4b3b, 0x33c4c3c, 0x24a2e0, 
+  0x24b2e0, 0x34c2e0, 0x22c592c, 0x22c5a2c, 0x22c5b2c, 0x2592e0, 0x25a2e0, 0x25b2e0, 
+  0x24a594a, 0x24a5a4a, 0x24b5b4b, 0x2592e0, 0x25a2e0, 0x25b2e0, 0x23b593b, 0x23b5a3b, 
+  0x23b5b3b, 0x2592e0, 0x25a2e0, 0x25b2e0, 0x22c3b2c, 0x32c3c2c, 0x42d3d2d, 0x23b2e0, 
+  0x33c2e0, 0x43d2e0, 0x22c4a2c, 0x22c4b2c, 0x32c4c2c, 0x24a2e0, 0x24b2e0, 0x34c2e0, 
+  0x23b4a3b, 0x23b4b3b, 0x33c4c3c, 0x24a2e0, 0x24b2e0, 0x34c2e0, 0x22c592c, 0x22c5a2c, 
+  0x22c5b2c, 0x2592e0, 0x25a2e0, 0x25b2e0, 0x24a594a, 0x24a5a4a, 0x24b5b4b, 0x2592e0, 
+  0x25a2e0, 0x25b2e0, 0x23b593b, 0x23b5a3b, 0x23b5b3b, 0x2592e0, 0x25a2e0, 0x25b2e0, 
+  0x22c3b2c, 0x32c3c2c, 0x42d3d2d, 0x23b2e0, 0x33c2e0, 0x43d2e0, (1U<<31) | 1095, (1U<<31) | 1780, 
+  (1U<<31) | 3165, 0x24a44a4a, 0x24b44b4b, 0x34c44c4c, 0x25945959, 0x25a45a5a, 0x25b45b5b, (1U<<31) | 434, 
+  (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, 0x24a44a4a, 0x24b44b4b, 0x34c44c4c, 
+  0x25945959, 0x25a45a5a, 0x25b45b5b, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 570, (1U<<31) | 625, 
+  (1U<<31) | 656, (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 1095, 
+  (1U<<31) | 1780, (1U<<31) | 3165, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 302, (1U<<31) | 324, (1U<<31) | 1157, 
+  (1U<<31) | 346, (1U<<31) | 368, (1U<<31) | 390, (1U<<31) | 800, (1U<<31) | 865, (1U<<31) | 1715, (1U<<31) | 666, (1U<<31) | 743, 
+  (1U<<31) | 1860, (1U<<31) | 1985, (1U<<31) | 1973, 0x28948989, 0x28a48a8a, (1U<<31) | 1717, 0x27a47a7a, 0x27b47b7b, 
+  (1U<<31) | 1862, (1U<<31) | 1985, (1U<<31) | 1973, 0x28948989, 0x28a48a8a, (1U<<31) | 1717, 0x27a47a7a, 0x27b47b7b, 
+  (1U<<31) | 1862, (1U<<31) | 1985, (1U<<31) | 1973, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, 
+  (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, 0x2898989, 0x28a8a8a, 0x428b8b8b, 0x27a7a7a, 0x27b7b7b, 
+  0x437c7c7c, (1U<<31) | 1704, (1U<<31) | 1659, 0x27a2e0, (1U<<31) | 1727, (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, 
+  0x27a3b7a, 0x27b3b7b, 0x437c3c7c, 0x23b47a3b, 0x23b47b3b, 0x33c47c3c, (1U<<31) | 855, (1U<<31) | 920, 
+  (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 855, (1U<<31) | 920, 
+  (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, 
+  (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, 
+  (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, 
+  (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 260, 
+  (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 835, (1U<<31) | 900, (1U<<31) | 930, (1U<<31) | 689, (1U<<31) | 766, (1U<<31) | 1199, 
+  (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, (1U<<31) | 434, (1U<<31) | 511, 
+  (1U<<31) | 1189, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 845, (1U<<31) | 910, (1U<<31) | 940, (1U<<31) | 723, 
+  (1U<<31) | 776, (1U<<31) | 1209, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, 
+  (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 411, (1U<<31) | 488, 
+  (1U<<31) | 1178, (1U<<31) | 535, (1U<<31) | 590, (1U<<31) | 645, (1U<<31) | 281, (1U<<31) | 1146, (1U<<31) | 1946, (1U<<31) | 434, 
+  (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, 
+  (1U<<31) | 411, (1U<<31) | 488, (1U<<31) | 1178, (1U<<31) | 535, (1U<<31) | 590, (1U<<31) | 645, (1U<<31) | 281, (1U<<31) | 1146, 
+  (1U<<31) | 1946, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 260, 
+  (1U<<31) | 1125, (1U<<31) | 1925, 0x32c2c3, 0x42d2d4, (1U<<31) | 3175, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, 
+  (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, 
+  (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, 
+  (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, 
+  (1U<<31) | 1872, (1U<<31) | 855, (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 1704, 
+  (1U<<31) | 1659, (1U<<31) | 357, (1U<<31) | 379, (1U<<31) | 1393, (1U<<31) | 313, (1U<<31) | 335, (1U<<31) | 1826, (1U<<31) | 1381, 
+  (1U<<31) | 1369, (1U<<31) | 302, (1U<<31) | 324, (1U<<31) | 1157, (1U<<31) | 346, (1U<<31) | 368, (1U<<31) | 390, (1U<<31) | 855, 
+  (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 855, 
+  (1U<<31) | 920, (1U<<31) | 1727, (1U<<31) | 733, (1U<<31) | 786, (1U<<31) | 1872, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, 
+  (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, 
+  (1U<<31) | 1189, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 845, 
+  (1U<<31) | 910, (1U<<31) | 940, (1U<<31) | 723, (1U<<31) | 776, (1U<<31) | 1209, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, 
+  (1U<<31) | 1095, (1U<<31) | 1780, (1U<<31) | 3165, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 570, (1U<<31) | 625, 
+  (1U<<31) | 656, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, (1U<<31) | 260, 
+  (1U<<31) | 1125, (1U<<31) | 1925, (1U<<31) | 434, (1U<<31) | 511, (1U<<31) | 1189, (1U<<31) | 570, (1U<<31) | 625, (1U<<31) | 656, 
+  (1U<<31) | 260, (1U<<31) | 1125, (1U<<31) | 1925, 0x4c4c3d, (1U<<31) | 1244, 0x4c4c3d, (1U<<31) | 1244, 0x4c4c5b, 
+  0x3d3d3d, 0x3d3d3d, 0x3d3d3d, 0x4c4c5b, (1U<<31) | 1083, (1U<<31) | 1076, 0x4a4c4c, 0x595b5b, 
+  0x3b3d3d, 0x44c4c, 0x45b5b, 0x43d3d, 0x4c4c4c, 0x5b5b5b, 0x3b3b3b, 0x3c3c3c, 
+  0x3d3d3d, 0x4a4c4c, 0x595959, 0x595a5a, 0x595b5b, 0x3b3d3d, 0x44c4c, 0x45959, 
+  0x45a5a, 0x45b5b, 0x43d3d, 0x4c4c4c, 0x595959, 0x5a5a5a, 0x5b5b5b, 0x3b3b3b, 
+  0x3c3c3c, 0x3d3d3d, 0x4a4c4c, 0x595b5b, 0x3b3d3d, 0x44c4c, 0x45b5b, 0x43d3d, 
+  0x4c4c4c, 0x5b5b5b, 0x3b3b3b, 0x3c3c3c, 0x3d3d3d, 0x2898989, 0x28a8a8a, 0x28b8b8b, 
+  0x27a7a7a, 0x27b7b7b, 0x37c7c7c, (1U<<31) | 855, (1U<<31) | 733, 0x428b8b8b, 0x437c7c7c, (1U<<31) | 1704, 
+  (1U<<31) | 1659, 0x2898989, 0x28a8a8a, 0x28b8b8b, 0x27a7a7a, 0x27b7b7b, 0x37c7c7c, (1U<<31) | 855, 
+  (1U<<31) | 733, 0x428b8b8b, 0x437c7c7c, (1U<<31) | 1704, (1U<<31) | 1659, (1U<<31) | 2767, (1U<<31) | 2347, (1U<<31) | 2601, 
+  (1U<<31) | 2709, (1U<<31) | 2777, (1U<<31) | 2293, (1U<<31) | 2611, (1U<<31) | 2699, (1U<<31) | 2737, (1U<<31) | 2561, (1U<<31) | 2757, 
+  (1U<<31) | 2591, (1U<<31) | 2669, (1U<<31) | 2263, (1U<<31) | 2679, (1U<<31) | 2273, 0x442e4b20, 0x442e4c30, 0x442e5b20, 
+  0x442e5b20, (1U<<31) | 2727, (1U<<31) | 2551, (1U<<31) | 2747, (1U<<31) | 2581, (1U<<31) | 2659, (1U<<31) | 2221, (1U<<31) | 2689, 
+  (1U<<31) | 2283, 0x2e8b, 0x2e7c, 0x4489894, 0x447a7a4, 0x4894, 0x4895, 0x4894, 
+  0x4895, 0x47a4, 0x47a5, 0x47a4, 0x47a5, 0x5b8b8b, 0x4c7c7c, 0x444, 
+  0x555, 0x444, 0x555, 0x444, 0x555, 0x444, 0x555, 0x2e0, 
+  0x2e0, 0x2e0, 0x2e0, 0x4, 0x5, 0x40, 0x50, (1U<<31) | 3979, 
+  (1U<<31) | 3992, 0x7a7a7a7a, 0x7b7b7b7b, (1U<<31) | 3979, 0x7a7a7a7a, (1U<<31) | 3979, (1U<<31) | 3992, 0x7a7a7a7a, 
+  0x7b7b7b7b, (1U<<31) | 3979, (1U<<31) | 3992, 0x7a7a7a7a, 0x7b7b7b7b, (1U<<31) | 3979, 0x7a7a7a7a, (1U<<31) | 3979, 
+  (1U<<31) | 3992, 0x7a7a7a7a, 0x7b7b7b7b, (1U<<31) | 3979, (1U<<31) | 3992, 0x7a7a7a7a, 0x7b7b7b7b, (1U<<31) | 3979, 
+  0x7a7a7a7a, (1U<<31) | 3979, (1U<<31) | 3992, 0x7a7a7a7a, 0x7b7b7b7b, (1U<<31) | 3979, 0x7a7a7a7a, (1U<<31) | 3979, 
+  0x7a7a7a7a, 0x2e0, 0x2e0, 0x2e0, 0x2e0, 0x40, 0x50, 0x20, 
+  0x2e0, 0x4442, 0x4452, 0x4440, 0x4450, 0x0, 0x0, (1U<<31) | 1064, 
+  (1U<<31) | 4150, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 1090, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 3133, 
+  (1U<<31) | 2476, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4137, (1U<<31) | 4155, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 
+  (1U<<31) | 3146, (1U<<31) | 3146, (1U<<31) | 3146, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 3146, (1U<<31) | 3146, (1U<<31) | 4155, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 3146, (1U<<31) | 3146, (1U<<31) | 3146, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 
+  (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, (1U<<31) | 4155, 0x442e0, 0x4440, 0x2595959, 0x25a5a5a, 
+  0x25b5b5b, 0x4, 0x5, 0x4, 0x5, 0x4, 0x4, 0x45, 
+  (1U<<31) | 1957, (1U<<31) | 2487, (1U<<31) | 2621, (1U<<31) | 1957, (1U<<31) | 2487, (1U<<31) | 2621, 0x44, 0x55, 
+  0x5, 0x2e5, 0x2e0, 0x0, 0x2e0, 0x2e0, 0x2e2e, 0x2e2e2e, 
+  0x0, 0x4a4a4a, 0x4a4a4a, 0x4a4a4a, 0x24a4a4a, 0x4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 
+  0x2e, 0x27a7a7a, 0x27a7a7a, 0x7a7a4, 0x7a7a4, 0x7a7a4, 0x7a7a4, 0x7a7a4, 
+  0x7a7a4, (1U<<31) | 3988, (1U<<31) | 4146, (1U<<31) | 4140, (1U<<31) | 3966, 0x47a7a, 0x57a7a, 0x7a4, 
+  0x7a5, (1U<<31) | 3988, (1U<<31) | 3966, 0x7a4, 0x7a5, 0x2e0, 0x7a7a7a, 0x7a7a7a, 
+  0x7a7a7a, 0x7a7a7a, 0x7a4, (1U<<31) | 1091, 0x7a7a, 0x7a7a, 0x7a7a, 0x7a7a, 
+  0x0, 0x7a7a, 0x7a7a, 0x2e0, 0x7a7a4, 0x7a7a4, 0x7a7a4, 0x7a7a4, 
+  0x7a7a4, 0x7a7a4, 0x2e0, 0x2898989, 0x2898989, 0x89894, 0x89894, 0x89894, 
+  0x89894, 0x89894, 0x89894, 0x4a7a, 0x894a, 0x897a, 0x7a4a, 0x894, 
+  0x895, 0x897a7a, 0x48989, 0x58989, 0x7a8989, 0x894a, 0x7a4a, 0x894, 
+  0x895, 0x0, 0x2e2c2c0, 0x898989, 0x898989, 0x0, 0x898989, 0x898989, 
+  0x894, 0x4a4a3b, 0x3b3b2c, 0x3b3b2c, 0x2c2c2c, 0x3b3b3b, 0x2c2c2c, 0x3b3b3b, 
+  0x0, 0x3b3b4a, 0x2c4, 0x3b3b3b, 0x3b3b3b, 0x4a4a59, 0x2c2c59, 0x4a4a4a, 
+  0x595959, 0x3b3b3b, 0x44a4a, 0x45959, 0x43b3b, 0x4a4a4a, 0x3b3b3b, 0x44a4a, 
+  0x43b3b, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x44a4a, 0x45959, 0x43b3b, 0x2c2c2c, 
+  0x3b3b3b, 0x2c2c2c, 0x3b3b3b, 0x8989, 0x8989, 0x89894, 0x89894, 0x89894, 
+  0x89894, 0x89894, 0x89894, 0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, 0x898989, 
+  0x7a7a7a, 0x2e2c, 0x442e0, 0x440, (1U<<31) | 3979, 0x7a7a7a7a, 0x2898989, 0x27a7a7a, 
+  0x27a7a7a, 0x22c2c3b, 0x4a4a3b, 0x2c2c2c2c, 0x3b3b, 0x4a4a59, 0x59594, 0x59594, 
+  0x59594, 0x48989, 0x47a7a, 0x4898989, 0x47a7a7a, 0x344, 0x444, 0x244, 
+  0x555, 0x242c42c4, 0x242c42c4, 0x242c42c4, 0x242c42c4, 0x242c42c4, 0x242c42c4, (1U<<31) | 292, 
+  0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c2c, 0x2c5959, 
+  0x225959, 0x595959, 0x22595959, (1U<<31) | 4152, (1U<<31) | 4152, (1U<<31) | 4152, (1U<<31) | 4155, 0x4a4a4a, 
+  (1U<<31) | 4155, 0x3b3b3b, (1U<<31) | 4155, 0x3b3b3b, (1U<<31) | 4155, 0x4a4a4a, (1U<<31) | 4155, 0x3b3b3b, 
+  (1U<<31) | 4155, 0x3b3b3b, (1U<<31) | 4155, 0x2c2c3b, (1U<<31) | 4155, 0x3b3b3b, (1U<<31) | 4155, 0x2c2c2c, 
+  (1U<<31) | 4155, 0x2c2c2c, (1U<<31) | 4155, 0x4a4a4a, (1U<<31) | 4155, 0x3b3b3b, 0x2e4422, 0x2e5522, 
+  0x444, 0x555, 0x3b7a, 0x3b7b, 0x47a3b, 0x47b3b, 0x22c2c2c, 0x22d2d2d, 
+  (1U<<31) | 242, 0x22c2c2c, 0x22d2d2d, (1U<<31) | 242, 0x2c2c2c, 0x2d2d2d, (1U<<31) | 1076, 0x40, 
+  0x50, 0x40, 0x50, 0x40, 0x2e40, 0x2e50, 0x2e40, 0x2e50, 
+  0x20, 0x4, 0x0, 0x45, 0x8989, 0x8a8a, 0x7a7a, 0x7b7b, 
+  0x8989, 0x7a7a, 0x22c2c2c, 0x24a4a4a, 0x2595959, 0x22c2c2c, 0x24a4a4a, 0x2595959, 
+  0x23b3b3b, 0x23b3b3b, (1U<<31) | 580, (1U<<31) | 635, (1U<<31) | 468, (1U<<31) | 521, 0x2c4a, 0x2c59, 
+  0x2c3b, 0x4a59, 0x2c4a, 0x2c59, 0x2c3b, 0x4a59, 0x3b4a, 0x3b59, 
+  0x3b4a, 0x3b59, 0x2c3b, 0x4a59, 0x3b4a, 0x4a4a4a4a, 0x594a4a59, 0x594a4a59, 
+  0x4a4a4a4a, 0x594a4a59, 0x594a4a59, 0x4a3b3b4a, 0x3b3b3b3b, 0x4a3b3b4a, 0x3b3b3b3b, 0x4a3b3b4a, 
+  0x4a3b3b4a, 0x2c2c2c2c, 0x2c2c2c, 0x22c2c, 0x4a4a4a, 0x24a4a, 0x595959, 0x25959, 
+  0x3b3b3b, 0x23b3b, 0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c2c, 0x4a4a4a, 
+  0x595959, 0x3b3b3b, 0x442e0, 0x442e0, 0x442e0, 0x442e0, 0x442e0, 0x442e0, 
+  0x442e0, 0x442e0, 0x442e0, 0x442e0, 0x442e0, 0x442e0, 0x4440, 0x4, 
+  0x44, 0x2e2e, 0x44f0, 0x0, 0x4f0, 0x40, 0x4444, (1U<<31) | 2061, 
+  0x4f0, 0x4f0, 0x4f4, 0x4f0, 0x4, 0x4, 0x4, 0x44, 
+  0x44f, 0xcf4f, 0x4f4, 0x4f4, 0x4f4, 0x2e4f0, 0x2e4f0, 0x2e4f0, 
+  0x2e4f0, 0x2e4f0, 0x44f4, 0x4f4, 0x4f0, 0x4f0, 0x44f0, 0x44f0, 
+  0x44f4, 0x44f0, 0x4f4, 0x44f0, 0xcf4f0, 0x44f0, 0x2e4f0, 0x440, 
+  0x44f0, 0x44f0, 0xcf4f0, 0x40, 0x44f0, 0x2e4f0, 0x444, 0x0, 
+  0x4f0, 0x4f4, 0x4f4, 0x2e, 0x444, 0
+};
+
+static const unsigned char IIT_LongEncodingTable[] = {
+  /* 0 */ 0, 4, 4, 15, 0, 15, 0, 15, 0, 15, 0, 1, 1, 0,
+  /* 14 */ 15, 2, 15, 10, 15, 17, 10, 4, 4, 1, 1, 1, 1, 1, 0,
+  /* 29 */ 0, 15, 2, 15, 9, 15, 17, 4, 1, 1, 1, 1, 0,
+  /* 42 */ 4, 4, 4, 15, 1, 11, 4, 1, 1, 1, 0,
+  /* 53 */ 0, 4, 4, 15, 3, 15, 3, 1, 1, 0,
+  /* 63 */ 0, 15, 0, 10, 4, 4, 4, 4, 4, 4, 4, 1, 1, 0,
+  /* 77 */ 0, 15, 2, 10, 4, 4, 4, 1, 1, 0,
+  /* 87 */ 21, 15, 2, 1, 15, 2, 15, 2, 1, 0,
+  /* 97 */ 15, 2, 15, 2, 15, 2, 15, 2, 1, 0,
+  /* 107 */ 0, 15, 3, 33, 3, 31, 3, 1, 0,
+  /* 116 */ 0, 15, 3, 34, 1, 0, 4, 31, 3, 1, 0,
+  /* 127 */ 0, 15, 3, 15, 12, 4, 31, 3, 1, 0,
+  /* 137 */ 15, 1, 15, 12, 15, 1, 4, 4, 1, 0,
+  /* 147 */ 15, 1, 15, 1, 15, 1, 4, 4, 4, 1, 0,
+  /* 158 */ 7, 27, 3, 7, 7, 4, 4, 1, 0,
+  /* 167 */ 21, 1, 5, 1, 0,
+  /* 172 */ 21, 15, 1, 1, 15, 1, 15, 1, 0,
+  /* 181 */ 0, 19, 15, 1, 0,
+  /* 186 */ 0, 15, 4, 15, 12, 15, 17, 1, 0,
+  /* 195 */ 2, 18, 1, 0,
+  /* 199 */ 15, 1, 25, 1, 0,
+  /* 204 */ 36, 1, 36, 1, 36, 1, 0,
+  /* 211 */ 21, 12, 4, 36, 1, 12, 4, 12, 4, 36, 1, 0,
+  /* 223 */ 37, 1, 37, 1, 37, 1, 0,
+  /* 230 */ 21, 13, 4, 37, 1, 13, 4, 13, 4, 37, 1, 0,
+  /* 242 */ 16, 2, 16, 2, 16, 2, 2, 0,
+  /* 250 */ 11, 3, 12, 2, 12, 2, 11, 3, 2, 0,
+  /* 260 */ 11, 3, 11, 3, 11, 3, 11, 3, 2, 0,
+  /* 270 */ 11, 3, 12, 2, 12, 2, 4, 11, 3, 2, 0,
+  /* 281 */ 11, 3, 11, 3, 11, 3, 4, 11, 3, 2, 0,
+  /* 292 */ 12, 2, 12, 2, 4, 12, 2, 4, 2, 0,
+  /* 302 */ 10, 4, 10, 4, 10, 4, 10, 4, 4, 2, 0,
+  /* 313 */ 10, 7, 10, 7, 10, 7, 10, 4, 4, 2, 0,
+  /* 324 */ 11, 4, 11, 4, 11, 4, 11, 4, 4, 2, 0,
+  /* 335 */ 11, 7, 11, 7, 11, 7, 11, 4, 4, 2, 0,
+  /* 346 */ 9, 5, 9, 5, 9, 5, 9, 5, 4, 2, 0,
+  /* 357 */ 9, 8, 9, 8, 9, 8, 9, 5, 4, 2, 0,
+  /* 368 */ 10, 5, 10, 5, 10, 5, 10, 5, 4, 2, 0,
+  /* 379 */ 10, 8, 10, 8, 10, 8, 10, 5, 4, 2, 0,
+  /* 390 */ 11, 5, 11, 5, 11, 5, 11, 5, 4, 2, 0,
+  /* 401 */ 10, 4, 11, 3, 11, 3, 10, 4, 2, 0,
+  /* 411 */ 10, 4, 10, 4, 10, 4, 4, 10, 4, 2, 0,
+  /* 422 */ 10, 4, 10, 4, 14, 2, 10, 4, 10, 4, 2, 0,
+  /* 434 */ 10, 4, 10, 4, 10, 4, 10, 4, 2, 0,
+  /* 444 */ 10, 4, 10, 4, 14, 2, 9, 5, 10, 4, 2, 0,
+  /* 456 */ 10, 4, 10, 4, 14, 2, 10, 5, 10, 4, 2, 0,
+  /* 468 */ 10, 7, 10, 7, 10, 7, 10, 4, 2, 0,
+  /* 478 */ 11, 4, 12, 3, 12, 3, 11, 4, 2, 0,
+  /* 488 */ 11, 4, 11, 4, 11, 4, 4, 11, 4, 2, 0,
+  /* 499 */ 11, 4, 11, 4, 14, 2, 11, 4, 11, 4, 2, 0,
+  /* 511 */ 11, 4, 11, 4, 11, 4, 11, 4, 2, 0,
+  /* 521 */ 11, 7, 11, 7, 11, 7, 11, 4, 2, 0,
+  /* 531 */ 27, 4, 2, 0,
+  /* 535 */ 9, 5, 9, 5, 9, 5, 4, 9, 5, 2, 0,
+  /* 546 */ 9, 5, 9, 5, 14, 2, 10, 4, 9, 5, 2, 0,
+  /* 558 */ 9, 5, 9, 5, 14, 2, 9, 5, 9, 5, 2, 0,
+  /* 570 */ 9, 5, 9, 5, 9, 5, 9, 5, 2, 0,
+  /* 580 */ 9, 8, 9, 8, 9, 8, 9, 5, 2, 0,
+  /* 590 */ 10, 5, 10, 5, 10, 5, 4, 10, 5, 2, 0,
+  /* 601 */ 10, 5, 10, 5, 14, 2, 10, 4, 10, 5, 2, 0,
+  /* 613 */ 10, 5, 10, 5, 14, 2, 10, 5, 10, 5, 2, 0,
+  /* 625 */ 10, 5, 10, 5, 10, 5, 10, 5, 2, 0,
+  /* 635 */ 10, 8, 10, 8, 10, 8, 10, 5, 2, 0,
+  /* 645 */ 11, 5, 11, 5, 11, 5, 4, 11, 5, 2, 0,
+  /* 656 */ 11, 5, 11, 5, 11, 5, 11, 5, 2, 0,
+  /* 666 */ 10, 7, 10, 7, 10, 7, 4, 10, 7, 2, 0,
+  /* 677 */ 10, 7, 10, 7, 14, 2, 10, 4, 10, 7, 2, 0,
+  /* 689 */ 10, 7, 10, 7, 10, 4, 10, 7, 2, 0,
+  /* 699 */ 10, 7, 10, 7, 14, 2, 9, 5, 10, 7, 2, 0,
+  /* 711 */ 10, 7, 10, 7, 14, 2, 10, 5, 10, 7, 2, 0,
+  /* 723 */ 10, 7, 10, 4, 10, 7, 10, 7, 2, 0,
+  /* 733 */ 10, 7, 10, 7, 10, 7, 10, 7, 2, 0,
+  /* 743 */ 11, 7, 11, 7, 11, 7, 4, 11, 7, 2, 0,
+  /* 754 */ 11, 7, 11, 7, 14, 2, 11, 4, 11, 7, 2, 0,
+  /* 766 */ 11, 7, 11, 7, 11, 4, 11, 7, 2, 0,
+  /* 776 */ 11, 7, 11, 4, 11, 7, 11, 7, 2, 0,
+  /* 786 */ 11, 7, 11, 7, 11, 7, 11, 7, 2, 0,
+  /* 796 */ 27, 7, 2, 0,
+  /* 800 */ 9, 8, 9, 8, 9, 8, 4, 9, 8, 2, 0,
+  /* 811 */ 9, 8, 9, 8, 14, 2, 10, 4, 9, 8, 2, 0,
+  /* 823 */ 9, 8, 9, 8, 14, 2, 9, 5, 9, 8, 2, 0,
+  /* 835 */ 9, 8, 9, 8, 9, 5, 9, 8, 2, 0,
+  /* 845 */ 9, 8, 9, 5, 9, 8, 9, 8, 2, 0,
+  /* 855 */ 9, 8, 9, 8, 9, 8, 9, 8, 2, 0,
+  /* 865 */ 10, 8, 10, 8, 10, 8, 4, 10, 8, 2, 0,
+  /* 876 */ 10, 8, 10, 8, 14, 2, 10, 4, 10, 8, 2, 0,
+  /* 888 */ 10, 8, 10, 8, 14, 2, 10, 5, 10, 8, 2, 0,
+  /* 900 */ 10, 8, 10, 8, 10, 5, 10, 8, 2, 0,
+  /* 910 */ 10, 8, 10, 5, 10, 8, 10, 8, 2, 0,
+  /* 920 */ 10, 8, 10, 8, 10, 8, 10, 8, 2, 0,
+  /* 930 */ 11, 8, 11, 8, 11, 5, 11, 8, 2, 0,
+  /* 940 */ 11, 8, 11, 5, 11, 8, 11, 8, 2, 0,
+  /* 950 */ 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 0,
+  /* 965 */ 21, 12, 2, 4, 12, 2, 12, 2, 0,
+  /* 974 */ 21, 12, 2, 4, 12, 2, 0,
+  /* 981 */ 18, 4, 14, 2, 14, 2, 14, 2, 0,
+  /* 990 */ 21, 4, 14, 2, 14, 2, 4, 14, 2, 0,
+  /* 1000 */ 21, 5, 14, 2, 14, 2, 4, 14, 2, 0,
+  /* 1010 */ 21, 4, 14, 2, 14, 2, 4, 4, 14, 2, 0,
+  /* 1021 */ 21, 5, 14, 2, 14, 2, 4, 4, 14, 2, 0,
+  /* 1032 */ 14, 2, 14, 2, 4, 4, 4, 14, 2, 0,
+  /* 1042 */ 21, 4, 4, 14, 2, 0,
+  /* 1048 */ 14, 2, 14, 2, 4, 4, 5, 14, 2, 0,
+  /* 1058 */ 21, 5, 5, 14, 2, 0,
+  /* 1064 */ 0, 17, 17, 14, 2, 0,
+  /* 1070 */ 14, 2, 18, 14, 2, 0,
+  /* 1076 */ 16, 2, 16, 2, 16, 2, 0,
+  /* 1083 */ 11, 5, 16, 2, 16, 2, 0,
+  /* 1090 */ 17, 17, 17, 2, 0,
+  /* 1095 */ 12, 2, 12, 2, 12, 2, 12, 2, 3, 0,
+  /* 1105 */ 0, 5, 4, 4, 4, 3, 3, 3, 3, 0,
+  /* 1115 */ 12, 3, 13, 2, 13, 2, 12, 3, 3, 0,
+  /* 1125 */ 12, 3, 12, 3, 12, 3, 12, 3, 3, 0,
+  /* 1135 */ 12, 3, 13, 2, 13, 2, 4, 12, 3, 3, 0,
+  /* 1146 */ 12, 3, 12, 3, 12, 3, 4, 12, 3, 3, 0,
+  /* 1157 */ 12, 4, 12, 4, 12, 4, 12, 4, 4, 3, 0,
+  /* 1168 */ 12, 4, 13, 3, 13, 3, 12, 4, 3, 0,
+  /* 1178 */ 12, 4, 12, 4, 12, 4, 4, 12, 4, 3, 0,
+  /* 1189 */ 12, 4, 12, 4, 12, 4, 12, 4, 3, 0,
+  /* 1199 */ 12, 7, 12, 7, 12, 4, 12, 7, 3, 0,
+  /* 1209 */ 12, 7, 12, 4, 12, 7, 12, 7, 3, 0,
+  /* 1219 */ 21, 12, 2, 4, 11, 3, 11, 3, 0,
+  /* 1228 */ 21, 11, 3, 4, 11, 3, 11, 3, 0,
+  /* 1237 */ 21, 11, 3, 4, 11, 3, 0,
+  /* 1244 */ 16, 2, 13, 3, 13, 3, 0,
+  /* 1251 */ 15, 3, 33, 3, 31, 3, 1, 15, 3, 0,
+  /* 1261 */ 15, 3, 34, 1, 0, 4, 31, 3, 1, 15, 3, 0,
+  /* 1273 */ 15, 3, 15, 12, 4, 31, 3, 1, 15, 3, 0,
+  /* 1284 */ 15, 3, 15, 3, 12, 2, 12, 2, 12, 2, 12, 2, 15, 3, 0,
+  /* 1299 */ 15, 3, 15, 3, 12, 2, 12, 2, 12, 2, 15, 3, 0,
+  /* 1312 */ 15, 3, 15, 3, 12, 2, 12, 2, 15, 3, 0,
+  /* 1323 */ 15, 3, 25, 3, 0,
+  /* 1328 */ 15, 3, 25, 3, 25, 3, 0,
+  /* 1335 */ 15, 3, 26, 3, 0,
+  /* 1340 */ 15, 3, 26, 3, 26, 3, 0,
+  /* 1347 */ 15, 1, 25, 1, 4, 0,
+  /* 1353 */ 12, 4, 12, 4, 36, 1, 4, 0,
+  /* 1361 */ 13, 4, 13, 4, 37, 1, 4, 0,
+  /* 1369 */ 10, 7, 10, 7, 10, 7, 10, 4, 4, 2, 4, 0,
+  /* 1381 */ 9, 8, 9, 8, 9, 8, 9, 5, 4, 2, 4, 0,
+  /* 1393 */ 11, 8, 11, 8, 11, 8, 11, 5, 4, 2, 4, 0,
+  /* 1405 */ 10, 4, 10, 4, 14, 2, 10, 4, 2, 4, 0,
+  /* 1416 */ 9, 5, 9, 5, 14, 2, 10, 4, 2, 4, 0,
+  /* 1427 */ 10, 5, 10, 5, 14, 2, 10, 4, 2, 4, 0,
+  /* 1438 */ 10, 7, 10, 7, 14, 2, 10, 4, 2, 4, 0,
+  /* 1449 */ 9, 8, 9, 8, 14, 2, 10, 4, 2, 4, 0,
+  /* 1460 */ 10, 8, 10, 8, 14, 2, 10, 4, 2, 4, 0,
+  /* 1471 */ 11, 4, 11, 4, 14, 2, 11, 4, 2, 4, 0,
+  /* 1482 */ 11, 5, 11, 5, 14, 2, 11, 4, 2, 4, 0,
+  /* 1493 */ 11, 7, 11, 7, 14, 2, 11, 4, 2, 4, 0,
+  /* 1504 */ 11, 8, 11, 8, 14, 2, 11, 4, 2, 4, 0,
+  /* 1515 */ 10, 4, 10, 4, 14, 2, 9, 5, 2, 4, 0,
+  /* 1526 */ 9, 5, 9, 5, 14, 2, 9, 5, 2, 4, 0,
+  /* 1537 */ 10, 7, 10, 7, 14, 2, 9, 5, 2, 4, 0,
+  /* 1548 */ 9, 8, 9, 8, 14, 2, 9, 5, 2, 4, 0,
+  /* 1559 */ 10, 4, 10, 4, 14, 2, 10, 5, 2, 4, 0,
+  /* 1570 */ 10, 5, 10, 5, 14, 2, 10, 5, 2, 4, 0,
+  /* 1581 */ 10, 7, 10, 7, 14, 2, 10, 5, 2, 4, 0,
+  /* 1592 */ 10, 8, 10, 8, 14, 2, 10, 5, 2, 4, 0,
+  /* 1603 */ 11, 4, 11, 4, 14, 2, 11, 5, 2, 4, 0,
+  /* 1614 */ 11, 5, 11, 5, 14, 2, 11, 5, 2, 4, 0,
+  /* 1625 */ 11, 7, 11, 7, 14, 2, 11, 5, 2, 4, 0,
+  /* 1636 */ 11, 8, 11, 8, 14, 2, 11, 5, 2, 4, 0,
+  /* 1647 */ 10, 7, 10, 7, 10, 7, 4, 10, 7, 2, 4, 0,
+  /* 1659 */ 10, 7, 10, 7, 10, 7, 10, 7, 2, 4, 0,
+  /* 1670 */ 10, 7, 10, 7, 9, 8, 10, 7, 2, 4, 0,
+  /* 1681 */ 9, 8, 9, 8, 9, 8, 4, 9, 8, 2, 4, 0,
+  /* 1693 */ 9, 8, 9, 8, 10, 7, 9, 8, 2, 4, 0,
+  /* 1704 */ 9, 8, 9, 8, 9, 8, 9, 8, 2, 4, 0,
+  /* 1715 */ 11, 8, 11, 8, 11, 8, 4, 11, 8, 2, 4, 0,
+  /* 1727 */ 11, 8, 11, 8, 11, 8, 11, 8, 2, 4, 0,
+  /* 1738 */ 12, 2, 12, 2, 12, 2, 12, 2, 4, 0,
+  /* 1748 */ 21, 12, 2, 4, 12, 2, 12, 2, 12, 2, 4, 0,
+  /* 1760 */ 21, 12, 2, 4, 12, 2, 12, 2, 4, 0,
+  /* 1770 */ 12, 2, 9, 5, 9, 5, 12, 2, 4, 0,
+  /* 1780 */ 13, 2, 13, 2, 13, 2, 13, 2, 4, 0,
+  /* 1790 */ 15, 1, 15, 1, 14, 2, 14, 2, 4, 0,
+  /* 1800 */ 15, 4, 15, 4, 14, 2, 14, 2, 4, 0,
+  /* 1810 */ 21, 4, 14, 2, 14, 2, 4, 0,
+  /* 1818 */ 21, 5, 14, 2, 14, 2, 4, 0,
+  /* 1826 */ 12, 7, 12, 7, 12, 7, 12, 4, 4, 3, 4, 0,
+  /* 1838 */ 12, 4, 12, 4, 14, 2, 12, 4, 3, 4, 0,
+  /* 1849 */ 12, 7, 12, 7, 14, 2, 12, 4, 3, 4, 0,
+  /* 1860 */ 12, 7, 12, 7, 12, 7, 4, 12, 7, 3, 4, 0,
+  /* 1872 */ 12, 7, 12, 7, 12, 7, 12, 7, 3, 4, 0,
+  /* 1883 */ 11, 3, 11, 3, 11, 3, 11, 3, 4, 0,
+  /* 1893 */ 21, 11, 3, 4, 11, 3, 11, 3, 11, 3, 4, 0,
+  /* 1905 */ 21, 11, 3, 4, 11, 3, 11, 3, 4, 0,
+  /* 1915 */ 13, 3, 16, 2, 16, 2, 13, 3, 4, 0,
+  /* 1925 */ 13, 3, 13, 3, 13, 3, 13, 3, 4, 0,
+  /* 1935 */ 13, 3, 16, 2, 16, 2, 4, 13, 3, 4, 0,
+  /* 1946 */ 13, 3, 13, 3, 13, 3, 4, 13, 3, 4, 0,
+  /* 1957 */ 21, 3, 4, 0,
+  /* 1961 */ 15, 3, 26, 3, 4, 0,
+  /* 1967 */ 21, 4, 1, 4, 4, 0,
+  /* 1973 */ 10, 7, 10, 7, 10, 7, 10, 7, 2, 4, 4, 0,
+  /* 1985 */ 9, 8, 9, 8, 9, 8, 9, 8, 2, 4, 4, 0,
+  /* 1997 */ 23, 15, 3, 15, 3, 15, 3, 15, 3, 15, 12, 15, 3, 15, 3, 15, 3, 15, 3, 4, 4, 0,
+  /* 2019 */ 22, 15, 3, 15, 3, 15, 3, 15, 12, 15, 3, 15, 3, 15, 3, 4, 4, 0,
+  /* 2037 */ 21, 15, 3, 15, 3, 15, 12, 15, 3, 15, 3, 4, 4, 0,
+  /* 2051 */ 0, 5, 4, 4, 4, 4, 4, 4, 4, 0,
+  /* 2061 */ 21, 4, 4, 4, 4, 4, 0,
+  /* 2068 */ 23, 3, 3, 3, 3, 5, 4, 4, 4, 0,
+  /* 2078 */ 21, 3, 3, 5, 4, 4, 4, 0,
+  /* 2086 */ 23, 4, 4, 4, 4, 5, 4, 4, 4, 0,
+  /* 2096 */ 21, 4, 4, 5, 4, 4, 4, 0,
+  /* 2104 */ 23, 4, 4, 4, 4, 5, 5, 4, 4, 4, 0,
+  /* 2115 */ 21, 5, 5, 5, 4, 4, 4, 0,
+  /* 2123 */ 23, 7, 7, 7, 7, 5, 5, 4, 4, 4, 0,
+  /* 2134 */ 23, 7, 7, 7, 7, 5, 4, 4, 4, 0,
+  /* 2144 */ 16, 4, 16, 4, 16, 4, 4, 4, 0,
+  /* 2153 */ 23, 3, 3, 3, 3, 5, 4, 4, 0,
+  /* 2162 */ 21, 3, 3, 5, 4, 4, 0,
+  /* 2169 */ 23, 4, 4, 4, 4, 5, 4, 4, 0,
+  /* 2178 */ 21, 4, 4, 5, 4, 4, 0,
+  /* 2185 */ 23, 4, 4, 4, 4, 5, 5, 4, 4, 0,
+  /* 2195 */ 21, 5, 5, 5, 4, 4, 0,
+  /* 2202 */ 23, 7, 7, 7, 7, 5, 5, 4, 4, 0,
+  /* 2212 */ 23, 7, 7, 7, 7, 5, 4, 4, 0,
+  /* 2221 */ 0, 14, 2, 2, 10, 4, 10, 4, 4, 0,
+  /* 2231 */ 21, 10, 4, 4, 10, 4, 10, 4, 4, 0,
+  /* 2241 */ 21, 10, 4, 4, 10, 4, 10, 4, 10, 4, 4, 0,
+  /* 2253 */ 10, 4, 10, 4, 10, 4, 10, 4, 4, 0,
+  /* 2263 */ 0, 14, 2, 2, 9, 5, 10, 4, 4, 0,
+  /* 2273 */ 0, 14, 2, 2, 10, 5, 10, 4, 4, 0,
+  /* 2283 */ 0, 14, 2, 2, 11, 4, 11, 4, 4, 0,
+  /* 2293 */ 0, 14, 2, 2, 11, 5, 11, 4, 4, 0,
+  /* 2303 */ 0, 15, 4, 15, 11, 15, 11, 4, 4, 0,
+  /* 2313 */ 0, 15, 4, 15, 11, 15, 11, 15, 11, 4, 4, 0,
+  /* 2325 */ 0, 15, 4, 15, 11, 15, 11, 15, 11, 15, 11, 4, 4, 0,
+  /* 2339 */ 36, 1, 36, 1, 12, 4, 4, 0,
+  /* 2347 */ 0, 14, 2, 3, 12, 4, 12, 4, 4, 0,
+  /* 2357 */ 12, 4, 12, 4, 12, 4, 12, 4, 4, 0,
+  /* 2367 */ 13, 4, 13, 4, 12, 4, 12, 4, 4, 0,
+  /* 2377 */ 37, 1, 37, 1, 13, 4, 4, 0,
+  /* 2385 */ 13, 4, 13, 4, 13, 4, 13, 4, 4, 0,
+  /* 2395 */ 16, 4, 16, 4, 13, 4, 13, 4, 4, 0,
+  /* 2405 */ 16, 4, 16, 4, 13, 4, 4, 0,
+  /* 2413 */ 40, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 4, 0,
+  /* 2434 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 4, 0,
+  /* 2447 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 15, 4, 4, 0,
+  /* 2460 */ 13, 4, 13, 4, 16, 4, 4, 0,
+  /* 2468 */ 16, 4, 16, 4, 16, 4, 4, 0,
+  /* 2476 */ 17, 17, 4, 4, 0,
+  /* 2481 */ 15, 0, 18, 4, 4, 0,
+  /* 2487 */ 21, 4, 4, 0,
+  /* 2491 */ 23, 3, 3, 3, 3, 5, 4, 0,
+  /* 2499 */ 21, 3, 3, 5, 4, 0,
+  /* 2505 */ 23, 4, 4, 4, 4, 5, 4, 0,
+  /* 2513 */ 21, 4, 4, 5, 4, 0,
+  /* 2519 */ 23, 4, 4, 4, 4, 5, 5, 4, 0,
+  /* 2528 */ 21, 5, 5, 5, 4, 0,
+  /* 2534 */ 23, 7, 7, 7, 7, 5, 5, 4, 0,
+  /* 2543 */ 23, 7, 7, 7, 7, 5, 4, 0,
+  /* 2551 */ 0, 14, 2, 2, 10, 4, 9, 5, 4, 0,
+  /* 2561 */ 0, 14, 2, 2, 9, 5, 9, 5, 4, 0,
+  /* 2571 */ 9, 5, 9, 5, 9, 5, 9, 5, 4, 0,
+  /* 2581 */ 0, 14, 2, 2, 10, 4, 10, 5, 4, 0,
+  /* 2591 */ 0, 14, 2, 2, 10, 5, 10, 5, 4, 0,
+  /* 2601 */ 0, 14, 2, 2, 11, 4, 11, 5, 4, 0,
+  /* 2611 */ 0, 14, 2, 2, 11, 5, 11, 5, 4, 0,
+  /* 2621 */ 21, 5, 4, 0,
+  /* 2625 */ 0, 15, 4, 9, 6, 9, 6, 9, 6, 9, 6, 4, 0,
+  /* 2638 */ 0, 15, 4, 7, 7, 7, 7, 7, 7, 7, 7, 4, 0,
+  /* 2651 */ 21, 10, 4, 4, 10, 7, 4, 0,
+  /* 2659 */ 0, 14, 2, 2, 10, 4, 10, 7, 4, 0,
+  /* 2669 */ 0, 14, 2, 2, 9, 5, 10, 7, 4, 0,
+  /* 2679 */ 0, 14, 2, 2, 10, 5, 10, 7, 4, 0,
+  /* 2689 */ 0, 14, 2, 2, 11, 4, 11, 7, 4, 0,
+  /* 2699 */ 0, 14, 2, 2, 11, 5, 11, 7, 4, 0,
+  /* 2709 */ 0, 14, 2, 3, 12, 4, 12, 7, 4, 0,
+  /* 2719 */ 21, 9, 5, 4, 9, 8, 4, 0,
+  /* 2727 */ 0, 14, 2, 2, 10, 4, 9, 8, 4, 0,
+  /* 2737 */ 0, 14, 2, 2, 9, 5, 9, 8, 4, 0,
+  /* 2747 */ 0, 14, 2, 2, 10, 4, 10, 8, 4, 0,
+  /* 2757 */ 0, 14, 2, 2, 10, 5, 10, 8, 4, 0,
+  /* 2767 */ 0, 14, 2, 2, 11, 4, 11, 8, 4, 0,
+  /* 2777 */ 0, 14, 2, 2, 11, 5, 11, 8, 4, 0,
+  /* 2787 */ 21, 10, 4, 4, 10, 4, 0,
+  /* 2794 */ 21, 11, 3, 4, 10, 4, 10, 4, 0,
+  /* 2803 */ 21, 10, 4, 4, 10, 4, 10, 4, 0,
+  /* 2812 */ 0, 15, 4, 15, 11, 15, 11, 15, 11, 4, 0,
+  /* 2823 */ 0, 15, 4, 15, 11, 15, 11, 15, 11, 15, 11, 4, 0,
+  /* 2836 */ 12, 4, 36, 1, 12, 4, 0,
+  /* 2843 */ 0, 36, 1, 14, 2, 12, 4, 0,
+  /* 2851 */ 0, 14, 2, 36, 1, 4, 4, 12, 4, 0,
+  /* 2861 */ 36, 1, 36, 1, 12, 4, 12, 4, 0,
+  /* 2870 */ 12, 4, 36, 1, 12, 4, 12, 4, 0,
+  /* 2879 */ 13, 4, 36, 1, 12, 4, 12, 4, 0,
+  /* 2888 */ 0, 36, 1, 4, 4, 12, 4, 12, 4, 0,
+  /* 2898 */ 0, 36, 1, 4, 4, 13, 4, 12, 4, 0,
+  /* 2908 */ 23, 15, 3, 15, 3, 15, 3, 15, 3, 15, 12, 4, 0,
+  /* 2921 */ 22, 15, 3, 15, 3, 15, 3, 15, 12, 4, 0,
+  /* 2932 */ 21, 15, 3, 15, 3, 15, 12, 4, 0,
+  /* 2941 */ 13, 4, 37, 1, 13, 4, 0,
+  /* 2948 */ 0, 37, 1, 14, 2, 13, 4, 0,
+  /* 2956 */ 0, 14, 2, 36, 1, 4, 4, 13, 4, 0,
+  /* 2966 */ 0, 14, 2, 37, 1, 4, 4, 13, 4, 0,
+  /* 2976 */ 37, 1, 37, 1, 13, 4, 13, 4, 0,
+  /* 2985 */ 13, 4, 37, 1, 13, 4, 13, 4, 0,
+  /* 2994 */ 16, 4, 37, 1, 13, 4, 13, 4, 0,
+  /* 3003 */ 0, 37, 1, 4, 4, 13, 4, 13, 4, 0,
+  /* 3013 */ 16, 4, 16, 4, 13, 4, 13, 4, 0,
+  /* 3022 */ 0, 4, 4, 16, 4, 13, 4, 0,
+  /* 3030 */ 0, 37, 1, 4, 4, 16, 4, 13, 4, 0,
+  /* 3040 */ 16, 4, 16, 4, 13, 4, 0,
+  /* 3047 */ 0, 14, 20, 5, 15, 4, 0,
+  /* 3054 */ 40, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 0,
+  /* 3074 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 0,
+  /* 3086 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 15, 4, 0,
+  /* 3098 */ 5, 19, 15, 4, 0,
+  /* 3103 */ 0, 14, 2, 37, 1, 4, 4, 16, 4, 0,
+  /* 3113 */ 0, 14, 2, 4, 4, 16, 4, 0,
+  /* 3121 */ 13, 4, 16, 4, 0,
+  /* 3126 */ 16, 4, 16, 4, 16, 4, 0,
+  /* 3133 */ 4, 17, 4, 0,
+  /* 3137 */ 0, 15, 4, 15, 12, 15, 17, 4, 0,
+  /* 3146 */ 17, 17, 4, 0,
+  /* 3150 */ 0, 14, 2, 16, 2, 5, 0,
+  /* 3157 */ 16, 2, 14, 2, 16, 2, 5, 0,
+  /* 3165 */ 16, 2, 16, 2, 16, 2, 16, 2, 5, 0,
+  /* 3175 */ 5, 16, 2, 16, 2, 5, 0,
+  /* 3182 */ 21, 5, 1, 4, 5, 0,
+  /* 3188 */ 16, 4, 16, 4, 13, 4, 5, 0,
+  /* 3196 */ 21, 1, 5, 5, 0,
+  /* 3201 */ 21, 10, 4, 4, 9, 5, 9, 5, 0,
+  /* 3210 */ 21, 9, 5, 4, 9, 5, 9, 5, 0,
+  /* 3219 */ 0, 15, 4, 9, 6, 9, 6, 9, 6, 9, 6, 0,
+  /* 3231 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
+  /* 3281 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
+  /* 3331 */ 23, 4, 4, 4, 4, 5, 4, 7, 0,
+  /* 3340 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 0,
+  /* 3350 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 0,
+  /* 3360 */ 23, 7, 7, 7, 7, 5, 4, 7, 0,
+  /* 3369 */ 23, 4, 4, 4, 4, 5, 7, 0,
+  /* 3377 */ 23, 4, 4, 4, 4, 5, 5, 7, 0,
+  /* 3386 */ 23, 7, 7, 7, 7, 5, 5, 7, 0,
+  /* 3395 */ 23, 7, 7, 7, 7, 5, 7, 0,
+  /* 3403 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 0,
+  /* 3413 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 0,
+  /* 3424 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 0,
+  /* 3435 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 0,
+  /* 3445 */ 23, 4, 4, 4, 4, 5, 7, 7, 0,
+  /* 3454 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 0,
+  /* 3464 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 0,
+  /* 3474 */ 23, 7, 7, 7, 7, 5, 7, 7, 0,
+  /* 3483 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 7, 0,
+  /* 3494 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 7, 0,
+  /* 3506 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 7, 0,
+  /* 3518 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 7, 0,
+  /* 3529 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 0,
+  /* 3539 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 0,
+  /* 3550 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 0,
+  /* 3561 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 0,
+  /* 3571 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 7, 7, 0,
+  /* 3583 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 7, 7, 0,
+  /* 3596 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 7, 7, 0,
+  /* 3609 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 7, 7, 0,
+  /* 3621 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 7, 0,
+  /* 3632 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 7, 0,
+  /* 3644 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 7, 0,
+  /* 3656 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 7, 0,
+  /* 3667 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 7, 7, 7, 7, 0,
+  /* 3681 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 7, 7, 7, 7, 0,
+  /* 3696 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 7, 7, 7, 7, 0,
+  /* 3711 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 7, 7, 7, 7, 0,
+  /* 3725 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 0,
+  /* 3738 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 7, 7, 7, 0,
+  /* 3752 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 7, 7, 7, 0,
+  /* 3766 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 7, 7, 7, 0,
+  /* 3779 */ 0, 15, 4, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3791 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3841 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3891 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3907 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3924 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3941 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
+  /* 3957 */ 21, 10, 4, 4, 10, 7, 10, 7, 0,
+  /* 3966 */ 17, 10, 7, 0,
+  /* 3970 */ 21, 9, 5, 4, 9, 8, 9, 8, 0,
+  /* 3979 */ 9, 8, 9, 8, 9, 8, 9, 8, 0,
+  /* 3988 */ 17, 9, 8, 0,
+  /* 3992 */ 10, 8, 10, 8, 10, 8, 10, 8, 0,
+  /* 4001 */ 0, 15, 3, 15, 3, 15, 3, 15, 12, 0,
+  /* 4011 */ 0, 15, 3, 15, 3, 15, 3, 15, 3, 15, 12, 0,
+  /* 4023 */ 23, 15, 3, 15, 3, 15, 3, 15, 3, 15, 12, 0,
+  /* 4035 */ 22, 15, 3, 15, 3, 15, 3, 15, 12, 0,
+  /* 4045 */ 21, 15, 3, 15, 3, 15, 12, 0,
+  /* 4053 */ 0, 15, 3, 15, 3, 5, 15, 12, 0,
+  /* 4062 */ 0, 15, 3, 15, 3, 15, 3, 5, 15, 12, 0,
+  /* 4073 */ 0, 15, 3, 15, 3, 15, 3, 15, 3, 5, 15, 12, 0,
+  /* 4086 */ 23, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 5, 15, 12, 0,
+  /* 4107 */ 22, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 5, 15, 12, 0,
+  /* 4124 */ 21, 15, 3, 15, 3, 15, 3, 15, 3, 5, 15, 12, 0,
+  /* 4137 */ 4, 17, 0,
+  /* 4140 */ 10, 7, 10, 7, 17, 0,
+  /* 4146 */ 9, 8, 17, 0,
+  /* 4150 */ 0, 14, 17, 17, 0,
+  /* 4155 */ 17, 17, 17, 0,
+  /* 4159 */ 15, 0, 18, 0,
+  /* 4163 */ 1, 18, 0,
+  /* 4166 */ 15, 4, 18, 0,
+  /* 4170 */ 0, 19, 0,
+  /* 4173 */ 15, 1, 19, 0,
+  /* 4177 */ 1, 14, 2, 19, 0,
+  /* 4182 */ 21, 14, 2, 1, 14, 2, 4, 19, 0,
+  /* 4191 */ 15, 2, 15, 10, 15, 19, 0,
+  /* 4198 */ 15, 2, 15, 2, 15, 2, 15, 2, 19, 19, 0,
+  /* 4209 */ 15, 2, 15, 2, 4, 19, 19, 0,
+  /* 4217 */ 0, 19, 19, 19, 0,
+  /* 4222 */ 15, 0, 29, 0,
+  /* 4226 */ 0, 1, 29, 0,
+  /* 4230 */ 0, 5, 4, 14, 2, 4, 29, 0,
+  /* 4238 */ 5, 5, 4, 14, 2, 4, 29, 0,
+  /* 4246 */ 18, 5, 4, 15, 4, 4, 4, 29, 0,
+  /* 4255 */ 0, 5, 4, 29, 0,
+  /* 4260 */ 28, 35, 28, 35, 28, 35, 28, 35, 0,
+  255
+};
+
+#endif
+
+// Add parameter attributes that are not common to all intrinsics.
+#ifdef GET_INTRINSIC_ATTRIBUTES
+AttributeList Intrinsic::getAttributes(LLVMContext &C, ID id) {
+  static const uint8_t IntrinsicsToAttributesMap[] = {
+    1, // llvm.addressofreturnaddress
+    2, // llvm.adjust.trampoline
+    3, // llvm.annotation
+    3, // llvm.assume
+    4, // llvm.bitreverse
+    4, // llvm.bswap
+    4, // llvm.canonicalize
+    4, // llvm.ceil
+    3, // llvm.clear_cache
+    5, // llvm.codeview.annotation
+    1, // llvm.convert.from.fp16
+    1, // llvm.convert.to.fp16
+    4, // llvm.copysign
+    3, // llvm.coro.alloc
+    6, // llvm.coro.begin
+    7, // llvm.coro.destroy
+    8, // llvm.coro.done
+    3, // llvm.coro.end
+    1, // llvm.coro.frame
+    9, // llvm.coro.free
+    10, // llvm.coro.id
+    11, // llvm.coro.param
+    12, // llvm.coro.promise
+    7, // llvm.coro.resume
+    3, // llvm.coro.save
+    1, // llvm.coro.size
+    13, // llvm.coro.subfn.addr
+    3, // llvm.coro.suspend
+    4, // llvm.cos
+    4, // llvm.ctlz
+    4, // llvm.ctpop
+    4, // llvm.cttz
+    4, // llvm.dbg.addr
+    4, // llvm.dbg.declare
+    4, // llvm.dbg.value
+    3, // llvm.debugtrap
+    1, // llvm.donothing
+    3, // llvm.eh.dwarf.cfa
+    1, // llvm.eh.exceptioncode
+    1, // llvm.eh.exceptionpointer
+    3, // llvm.eh.return.i32
+    3, // llvm.eh.return.i64
+    1, // llvm.eh.sjlj.callsite
+    3, // llvm.eh.sjlj.functioncontext
+    14, // llvm.eh.sjlj.longjmp
+    1, // llvm.eh.sjlj.lsda
+    3, // llvm.eh.sjlj.setjmp
+    3, // llvm.eh.sjlj.setup.dispatch
+    1, // llvm.eh.typeid.for
+    3, // llvm.eh.unwind.init
+    4, // llvm.exp
+    4, // llvm.exp2
+    1, // llvm.expect
+    15, // llvm.experimental.constrained.cos
+    15, // llvm.experimental.constrained.exp
+    15, // llvm.experimental.constrained.exp2
+    15, // llvm.experimental.constrained.fadd
+    15, // llvm.experimental.constrained.fdiv
+    15, // llvm.experimental.constrained.fma
+    15, // llvm.experimental.constrained.fmul
+    15, // llvm.experimental.constrained.frem
+    15, // llvm.experimental.constrained.fsub
+    15, // llvm.experimental.constrained.log
+    15, // llvm.experimental.constrained.log10
+    15, // llvm.experimental.constrained.log2
+    15, // llvm.experimental.constrained.nearbyint
+    15, // llvm.experimental.constrained.pow
+    15, // llvm.experimental.constrained.powi
+    15, // llvm.experimental.constrained.rint
+    15, // llvm.experimental.constrained.sin
+    15, // llvm.experimental.constrained.sqrt
+    7, // llvm.experimental.deoptimize
+    16, // llvm.experimental.gc.relocate
+    16, // llvm.experimental.gc.result
+    7, // llvm.experimental.gc.statepoint
+    7, // llvm.experimental.guard
+    7, // llvm.experimental.patchpoint.i64
+    7, // llvm.experimental.patchpoint.void
+    7, // llvm.experimental.stackmap
+    1, // llvm.experimental.vector.reduce.add
+    1, // llvm.experimental.vector.reduce.and
+    1, // llvm.experimental.vector.reduce.fadd
+    1, // llvm.experimental.vector.reduce.fmax
+    1, // llvm.experimental.vector.reduce.fmin
+    1, // llvm.experimental.vector.reduce.fmul
+    1, // llvm.experimental.vector.reduce.mul
+    1, // llvm.experimental.vector.reduce.or
+    1, // llvm.experimental.vector.reduce.smax
+    1, // llvm.experimental.vector.reduce.smin
+    1, // llvm.experimental.vector.reduce.umax
+    1, // llvm.experimental.vector.reduce.umin
+    1, // llvm.experimental.vector.reduce.xor
+    4, // llvm.fabs
+    4, // llvm.floor
+    3, // llvm.flt.rounds
+    4, // llvm.fma
+    4, // llvm.fmuladd
+    1, // llvm.frameaddress
+    2, // llvm.gcread
+    3, // llvm.gcroot
+    17, // llvm.gcwrite
+    3, // llvm.get.dynamic.area.offset
+    3, // llvm.icall.branch.funnel
+    18, // llvm.init.trampoline
+    3, // llvm.instrprof.increment
+    3, // llvm.instrprof.increment.step
+    3, // llvm.instrprof.value.profile
+    19, // llvm.invariant.end
+    2, // llvm.invariant.group.barrier
+    20, // llvm.invariant.start
+    20, // llvm.lifetime.end
+    20, // llvm.lifetime.start
+    2, // llvm.load.relative
+    1, // llvm.localaddress
+    3, // llvm.localescape
+    1, // llvm.localrecover
+    4, // llvm.log
+    4, // llvm.log10
+    4, // llvm.log2
+    14, // llvm.longjmp
+    21, // llvm.masked.compressstore
+    16, // llvm.masked.expandload
+    16, // llvm.masked.gather
+    2, // llvm.masked.load
+    3, // llvm.masked.scatter
+    21, // llvm.masked.store
+    4, // llvm.maxnum
+    22, // llvm.memcpy
+    22, // llvm.memcpy.element.unordered.atomic
+    23, // llvm.memmove
+    22, // llvm.memmove.element.unordered.atomic
+    24, // llvm.memset
+    24, // llvm.memset.element.unordered.atomic
+    4, // llvm.minnum
+    4, // llvm.nearbyint
+    4, // llvm.objectsize
+    3, // llvm.pcmarker
+    4, // llvm.pow
+    4, // llvm.powi
+    25, // llvm.prefetch
+    3, // llvm.ptr.annotation
+    16, // llvm.read_register
+    3, // llvm.readcyclecounter
+    1, // llvm.returnaddress
+    4, // llvm.rint
+    4, // llvm.round
+    4, // llvm.sadd.with.overflow
+    3, // llvm.setjmp
+    15, // llvm.sideeffect
+    14, // llvm.siglongjmp
+    3, // llvm.sigsetjmp
+    4, // llvm.sin
+    4, // llvm.smul.with.overflow
+    4, // llvm.sqrt
+    26, // llvm.ssa.copy
+    4, // llvm.ssub.with.overflow
+    3, // llvm.stackguard
+    3, // llvm.stackprotector
+    3, // llvm.stackrestore
+    3, // llvm.stacksave
+    1, // llvm.thread.pointer
+    14, // llvm.trap
+    4, // llvm.trunc
+    1, // llvm.type.checked.load
+    1, // llvm.type.test
+    4, // llvm.uadd.with.overflow
+    4, // llvm.umul.with.overflow
+    4, // llvm.usub.with.overflow
+    3, // llvm.va_copy
+    3, // llvm.va_end
+    3, // llvm.va_start
+    3, // llvm.var.annotation
+    3, // llvm.write_register
+    27, // llvm.xray.customevent
+    3, // llvm.aarch64.clrex
+    1, // llvm.aarch64.crc32b
+    1, // llvm.aarch64.crc32cb
+    1, // llvm.aarch64.crc32ch
+    1, // llvm.aarch64.crc32cw
+    1, // llvm.aarch64.crc32cx
+    1, // llvm.aarch64.crc32h
+    1, // llvm.aarch64.crc32w
+    1, // llvm.aarch64.crc32x
+    1, // llvm.aarch64.crypto.aesd
+    1, // llvm.aarch64.crypto.aese
+    1, // llvm.aarch64.crypto.aesimc
+    1, // llvm.aarch64.crypto.aesmc
+    1, // llvm.aarch64.crypto.sha1c
+    1, // llvm.aarch64.crypto.sha1h
+    1, // llvm.aarch64.crypto.sha1m
+    1, // llvm.aarch64.crypto.sha1p
+    1, // llvm.aarch64.crypto.sha1su0
+    1, // llvm.aarch64.crypto.sha1su1
+    1, // llvm.aarch64.crypto.sha256h
+    1, // llvm.aarch64.crypto.sha256h2
+    1, // llvm.aarch64.crypto.sha256su0
+    1, // llvm.aarch64.crypto.sha256su1
+    3, // llvm.aarch64.dmb
+    3, // llvm.aarch64.dsb
+    3, // llvm.aarch64.hint
+    3, // llvm.aarch64.isb
+    3, // llvm.aarch64.ldaxp
+    3, // llvm.aarch64.ldaxr
+    3, // llvm.aarch64.ldxp
+    3, // llvm.aarch64.ldxr
+    1, // llvm.aarch64.neon.abs
+    1, // llvm.aarch64.neon.addhn
+    1, // llvm.aarch64.neon.addp
+    1, // llvm.aarch64.neon.cls
+    1, // llvm.aarch64.neon.fabd
+    1, // llvm.aarch64.neon.facge
+    1, // llvm.aarch64.neon.facgt
+    1, // llvm.aarch64.neon.faddv
+    1, // llvm.aarch64.neon.fcvtas
+    1, // llvm.aarch64.neon.fcvtau
+    1, // llvm.aarch64.neon.fcvtms
+    1, // llvm.aarch64.neon.fcvtmu
+    1, // llvm.aarch64.neon.fcvtns
+    1, // llvm.aarch64.neon.fcvtnu
+    1, // llvm.aarch64.neon.fcvtps
+    1, // llvm.aarch64.neon.fcvtpu
+    1, // llvm.aarch64.neon.fcvtxn
+    1, // llvm.aarch64.neon.fcvtzs
+    1, // llvm.aarch64.neon.fcvtzu
+    1, // llvm.aarch64.neon.fmax
+    1, // llvm.aarch64.neon.fmaxnm
+    1, // llvm.aarch64.neon.fmaxnmp
+    1, // llvm.aarch64.neon.fmaxnmv
+    1, // llvm.aarch64.neon.fmaxp
+    1, // llvm.aarch64.neon.fmaxv
+    1, // llvm.aarch64.neon.fmin
+    1, // llvm.aarch64.neon.fminnm
+    1, // llvm.aarch64.neon.fminnmp
+    1, // llvm.aarch64.neon.fminnmv
+    1, // llvm.aarch64.neon.fminp
+    1, // llvm.aarch64.neon.fminv
+    1, // llvm.aarch64.neon.fmulx
+    1, // llvm.aarch64.neon.frecpe
+    1, // llvm.aarch64.neon.frecps
+    1, // llvm.aarch64.neon.frecpx
+    1, // llvm.aarch64.neon.frintn
+    1, // llvm.aarch64.neon.frsqrte
+    1, // llvm.aarch64.neon.frsqrts
+    2, // llvm.aarch64.neon.ld1x2
+    2, // llvm.aarch64.neon.ld1x3
+    2, // llvm.aarch64.neon.ld1x4
+    2, // llvm.aarch64.neon.ld2
+    2, // llvm.aarch64.neon.ld2lane
+    2, // llvm.aarch64.neon.ld2r
+    2, // llvm.aarch64.neon.ld3
+    2, // llvm.aarch64.neon.ld3lane
+    2, // llvm.aarch64.neon.ld3r
+    2, // llvm.aarch64.neon.ld4
+    2, // llvm.aarch64.neon.ld4lane
+    2, // llvm.aarch64.neon.ld4r
+    1, // llvm.aarch64.neon.pmul
+    1, // llvm.aarch64.neon.pmull
+    1, // llvm.aarch64.neon.pmull64
+    1, // llvm.aarch64.neon.raddhn
+    1, // llvm.aarch64.neon.rbit
+    1, // llvm.aarch64.neon.rshrn
+    1, // llvm.aarch64.neon.rsubhn
+    1, // llvm.aarch64.neon.sabd
+    1, // llvm.aarch64.neon.saddlp
+    1, // llvm.aarch64.neon.saddlv
+    1, // llvm.aarch64.neon.saddv
+    1, // llvm.aarch64.neon.scalar.sqxtn
+    1, // llvm.aarch64.neon.scalar.sqxtun
+    1, // llvm.aarch64.neon.scalar.uqxtn
+    1, // llvm.aarch64.neon.shadd
+    1, // llvm.aarch64.neon.shll
+    1, // llvm.aarch64.neon.shsub
+    1, // llvm.aarch64.neon.smax
+    1, // llvm.aarch64.neon.smaxp
+    1, // llvm.aarch64.neon.smaxv
+    1, // llvm.aarch64.neon.smin
+    1, // llvm.aarch64.neon.sminp
+    1, // llvm.aarch64.neon.sminv
+    1, // llvm.aarch64.neon.smull
+    1, // llvm.aarch64.neon.sqabs
+    1, // llvm.aarch64.neon.sqadd
+    1, // llvm.aarch64.neon.sqdmulh
+    1, // llvm.aarch64.neon.sqdmull
+    1, // llvm.aarch64.neon.sqdmulls.scalar
+    1, // llvm.aarch64.neon.sqneg
+    1, // llvm.aarch64.neon.sqrdmulh
+    1, // llvm.aarch64.neon.sqrshl
+    1, // llvm.aarch64.neon.sqrshrn
+    1, // llvm.aarch64.neon.sqrshrun
+    1, // llvm.aarch64.neon.sqshl
+    1, // llvm.aarch64.neon.sqshlu
+    1, // llvm.aarch64.neon.sqshrn
+    1, // llvm.aarch64.neon.sqshrun
+    1, // llvm.aarch64.neon.sqsub
+    1, // llvm.aarch64.neon.sqxtn
+    1, // llvm.aarch64.neon.sqxtun
+    1, // llvm.aarch64.neon.srhadd
+    1, // llvm.aarch64.neon.srshl
+    1, // llvm.aarch64.neon.sshl
+    1, // llvm.aarch64.neon.sshll
+    19, // llvm.aarch64.neon.st1x2
+    28, // llvm.aarch64.neon.st1x3
+    29, // llvm.aarch64.neon.st1x4
+    19, // llvm.aarch64.neon.st2
+    28, // llvm.aarch64.neon.st2lane
+    28, // llvm.aarch64.neon.st3
+    29, // llvm.aarch64.neon.st3lane
+    29, // llvm.aarch64.neon.st4
+    30, // llvm.aarch64.neon.st4lane
+    1, // llvm.aarch64.neon.subhn
+    1, // llvm.aarch64.neon.suqadd
+    1, // llvm.aarch64.neon.tbl1
+    1, // llvm.aarch64.neon.tbl2
+    1, // llvm.aarch64.neon.tbl3
+    1, // llvm.aarch64.neon.tbl4
+    1, // llvm.aarch64.neon.tbx1
+    1, // llvm.aarch64.neon.tbx2
+    1, // llvm.aarch64.neon.tbx3
+    1, // llvm.aarch64.neon.tbx4
+    1, // llvm.aarch64.neon.uabd
+    1, // llvm.aarch64.neon.uaddlp
+    1, // llvm.aarch64.neon.uaddlv
+    1, // llvm.aarch64.neon.uaddv
+    1, // llvm.aarch64.neon.uhadd
+    1, // llvm.aarch64.neon.uhsub
+    1, // llvm.aarch64.neon.umax
+    1, // llvm.aarch64.neon.umaxp
+    1, // llvm.aarch64.neon.umaxv
+    1, // llvm.aarch64.neon.umin
+    1, // llvm.aarch64.neon.uminp
+    1, // llvm.aarch64.neon.uminv
+    1, // llvm.aarch64.neon.umull
+    1, // llvm.aarch64.neon.uqadd
+    1, // llvm.aarch64.neon.uqrshl
+    1, // llvm.aarch64.neon.uqrshrn
+    1, // llvm.aarch64.neon.uqshl
+    1, // llvm.aarch64.neon.uqshrn
+    1, // llvm.aarch64.neon.uqsub
+    1, // llvm.aarch64.neon.uqxtn
+    1, // llvm.aarch64.neon.urecpe
+    1, // llvm.aarch64.neon.urhadd
+    1, // llvm.aarch64.neon.urshl
+    1, // llvm.aarch64.neon.ursqrte
+    1, // llvm.aarch64.neon.ushl
+    1, // llvm.aarch64.neon.ushll
+    1, // llvm.aarch64.neon.usqadd
+    1, // llvm.aarch64.neon.vcopy.lane
+    1, // llvm.aarch64.neon.vcvtfp2fxs
+    1, // llvm.aarch64.neon.vcvtfp2fxu
+    1, // llvm.aarch64.neon.vcvtfp2hf
+    1, // llvm.aarch64.neon.vcvtfxs2fp
+    1, // llvm.aarch64.neon.vcvtfxu2fp
+    1, // llvm.aarch64.neon.vcvthf2fp
+    1, // llvm.aarch64.neon.vsli
+    1, // llvm.aarch64.neon.vsri
+    1, // llvm.aarch64.sdiv
+    1, // llvm.aarch64.sisd.fabd
+    1, // llvm.aarch64.sisd.fcvtxn
+    3, // llvm.aarch64.stlxp
+    3, // llvm.aarch64.stlxr
+    3, // llvm.aarch64.stxp
+    3, // llvm.aarch64.stxr
+    1, // llvm.aarch64.udiv
+    4, // llvm.amdgcn.alignbit
+    4, // llvm.amdgcn.alignbyte
+    18, // llvm.amdgcn.atomic.dec
+    18, // llvm.amdgcn.atomic.inc
+    31, // llvm.amdgcn.break
+    3, // llvm.amdgcn.buffer.atomic.add
+    3, // llvm.amdgcn.buffer.atomic.and
+    3, // llvm.amdgcn.buffer.atomic.cmpswap
+    3, // llvm.amdgcn.buffer.atomic.or
+    3, // llvm.amdgcn.buffer.atomic.smax
+    3, // llvm.amdgcn.buffer.atomic.smin
+    3, // llvm.amdgcn.buffer.atomic.sub
+    3, // llvm.amdgcn.buffer.atomic.swap
+    3, // llvm.amdgcn.buffer.atomic.umax
+    3, // llvm.amdgcn.buffer.atomic.umin
+    3, // llvm.amdgcn.buffer.atomic.xor
+    16, // llvm.amdgcn.buffer.load
+    16, // llvm.amdgcn.buffer.load.format
+    32, // llvm.amdgcn.buffer.store
+    32, // llvm.amdgcn.buffer.store.format
+    3, // llvm.amdgcn.buffer.wbinvl1
+    3, // llvm.amdgcn.buffer.wbinvl1.sc
+    3, // llvm.amdgcn.buffer.wbinvl1.vol
+    4, // llvm.amdgcn.class
+    4, // llvm.amdgcn.cos
+    4, // llvm.amdgcn.cubeid
+    4, // llvm.amdgcn.cubema
+    4, // llvm.amdgcn.cubesc
+    4, // llvm.amdgcn.cubetc
+    4, // llvm.amdgcn.cvt.pk.i16
+    4, // llvm.amdgcn.cvt.pk.u16
+    4, // llvm.amdgcn.cvt.pk.u8.f32
+    4, // llvm.amdgcn.cvt.pknorm.i16
+    4, // llvm.amdgcn.cvt.pknorm.u16
+    4, // llvm.amdgcn.cvt.pkrtz
+    4, // llvm.amdgcn.dispatch.id
+    4, // llvm.amdgcn.dispatch.ptr
+    4, // llvm.amdgcn.div.fixup
+    4, // llvm.amdgcn.div.fmas
+    4, // llvm.amdgcn.div.scale
+    31, // llvm.amdgcn.ds.bpermute
+    18, // llvm.amdgcn.ds.fadd
+    18, // llvm.amdgcn.ds.fmax
+    18, // llvm.amdgcn.ds.fmin
+    31, // llvm.amdgcn.ds.permute
+    31, // llvm.amdgcn.ds.swizzle
+    33, // llvm.amdgcn.else
+    31, // llvm.amdgcn.else.break
+    33, // llvm.amdgcn.end.cf
+    3, // llvm.amdgcn.exp
+    3, // llvm.amdgcn.exp.compr
+    31, // llvm.amdgcn.fcmp
+    4, // llvm.amdgcn.fdiv.fast
+    4, // llvm.amdgcn.fmed3
+    4, // llvm.amdgcn.fmul.legacy
+    4, // llvm.amdgcn.fract
+    4, // llvm.amdgcn.frexp.exp
+    4, // llvm.amdgcn.frexp.mant
+    4, // llvm.amdgcn.groupstaticsize
+    31, // llvm.amdgcn.icmp
+    33, // llvm.amdgcn.if
+    31, // llvm.amdgcn.if.break
+    3, // llvm.amdgcn.image.atomic.add
+    3, // llvm.amdgcn.image.atomic.and
+    3, // llvm.amdgcn.image.atomic.cmpswap
+    3, // llvm.amdgcn.image.atomic.dec
+    3, // llvm.amdgcn.image.atomic.inc
+    3, // llvm.amdgcn.image.atomic.or
+    3, // llvm.amdgcn.image.atomic.smax
+    3, // llvm.amdgcn.image.atomic.smin
+    3, // llvm.amdgcn.image.atomic.sub
+    3, // llvm.amdgcn.image.atomic.swap
+    3, // llvm.amdgcn.image.atomic.umax
+    3, // llvm.amdgcn.image.atomic.umin
+    3, // llvm.amdgcn.image.atomic.xor
+    16, // llvm.amdgcn.image.gather4
+    16, // llvm.amdgcn.image.gather4.b
+    16, // llvm.amdgcn.image.gather4.b.cl
+    16, // llvm.amdgcn.image.gather4.b.cl.o
+    16, // llvm.amdgcn.image.gather4.b.o
+    16, // llvm.amdgcn.image.gather4.c
+    16, // llvm.amdgcn.image.gather4.c.b
+    16, // llvm.amdgcn.image.gather4.c.b.cl
+    16, // llvm.amdgcn.image.gather4.c.b.cl.o
+    16, // llvm.amdgcn.image.gather4.c.b.o
+    16, // llvm.amdgcn.image.gather4.c.cl
+    16, // llvm.amdgcn.image.gather4.c.cl.o
+    16, // llvm.amdgcn.image.gather4.c.l
+    16, // llvm.amdgcn.image.gather4.c.l.o
+    16, // llvm.amdgcn.image.gather4.c.lz
+    16, // llvm.amdgcn.image.gather4.c.lz.o
+    16, // llvm.amdgcn.image.gather4.c.o
+    16, // llvm.amdgcn.image.gather4.cl
+    16, // llvm.amdgcn.image.gather4.cl.o
+    16, // llvm.amdgcn.image.gather4.l
+    16, // llvm.amdgcn.image.gather4.l.o
+    16, // llvm.amdgcn.image.gather4.lz
+    16, // llvm.amdgcn.image.gather4.lz.o
+    16, // llvm.amdgcn.image.gather4.o
+    1, // llvm.amdgcn.image.getlod
+    1, // llvm.amdgcn.image.getresinfo
+    16, // llvm.amdgcn.image.load
+    16, // llvm.amdgcn.image.load.mip
+    16, // llvm.amdgcn.image.sample
+    16, // llvm.amdgcn.image.sample.b
+    16, // llvm.amdgcn.image.sample.b.cl
+    16, // llvm.amdgcn.image.sample.b.cl.o
+    16, // llvm.amdgcn.image.sample.b.o
+    16, // llvm.amdgcn.image.sample.c
+    16, // llvm.amdgcn.image.sample.c.b
+    16, // llvm.amdgcn.image.sample.c.b.cl
+    16, // llvm.amdgcn.image.sample.c.b.cl.o
+    16, // llvm.amdgcn.image.sample.c.b.o
+    16, // llvm.amdgcn.image.sample.c.cd
+    16, // llvm.amdgcn.image.sample.c.cd.cl
+    16, // llvm.amdgcn.image.sample.c.cd.cl.o
+    16, // llvm.amdgcn.image.sample.c.cd.o
+    16, // llvm.amdgcn.image.sample.c.cl
+    16, // llvm.amdgcn.image.sample.c.cl.o
+    16, // llvm.amdgcn.image.sample.c.d
+    16, // llvm.amdgcn.image.sample.c.d.cl
+    16, // llvm.amdgcn.image.sample.c.d.cl.o
+    16, // llvm.amdgcn.image.sample.c.d.o
+    16, // llvm.amdgcn.image.sample.c.l
+    16, // llvm.amdgcn.image.sample.c.l.o
+    16, // llvm.amdgcn.image.sample.c.lz
+    16, // llvm.amdgcn.image.sample.c.lz.o
+    16, // llvm.amdgcn.image.sample.c.o
+    16, // llvm.amdgcn.image.sample.cd
+    16, // llvm.amdgcn.image.sample.cd.cl
+    16, // llvm.amdgcn.image.sample.cd.cl.o
+    16, // llvm.amdgcn.image.sample.cd.o
+    16, // llvm.amdgcn.image.sample.cl
+    16, // llvm.amdgcn.image.sample.cl.o
+    16, // llvm.amdgcn.image.sample.d
+    16, // llvm.amdgcn.image.sample.d.cl
+    16, // llvm.amdgcn.image.sample.d.cl.o
+    16, // llvm.amdgcn.image.sample.d.o
+    16, // llvm.amdgcn.image.sample.l
+    16, // llvm.amdgcn.image.sample.l.o
+    16, // llvm.amdgcn.image.sample.lz
+    16, // llvm.amdgcn.image.sample.lz.o
+    16, // llvm.amdgcn.image.sample.o
+    32, // llvm.amdgcn.image.store
+    32, // llvm.amdgcn.image.store.mip
+    4, // llvm.amdgcn.implicit.buffer.ptr
+    4, // llvm.amdgcn.implicitarg.ptr
+    33, // llvm.amdgcn.init.exec
+    33, // llvm.amdgcn.init.exec.from.input
+    4, // llvm.amdgcn.interp.mov
+    4, // llvm.amdgcn.interp.p1
+    4, // llvm.amdgcn.interp.p2
+    4, // llvm.amdgcn.kernarg.segment.ptr
+    3, // llvm.amdgcn.kill
+    4, // llvm.amdgcn.ldexp
+    4, // llvm.amdgcn.lerp
+    4, // llvm.amdgcn.log.clamp
+    33, // llvm.amdgcn.loop
+    1, // llvm.amdgcn.mbcnt.hi
+    1, // llvm.amdgcn.mbcnt.lo
+    31, // llvm.amdgcn.mov.dpp
+    4, // llvm.amdgcn.mqsad.pk.u16.u8
+    4, // llvm.amdgcn.mqsad.u32.u8
+    4, // llvm.amdgcn.msad.u8
+    1, // llvm.amdgcn.ps.live
+    4, // llvm.amdgcn.qsad.pk.u16.u8
+    4, // llvm.amdgcn.queue.ptr
+    4, // llvm.amdgcn.rcp
+    4, // llvm.amdgcn.rcp.legacy
+    31, // llvm.amdgcn.readfirstlane
+    31, // llvm.amdgcn.readlane
+    4, // llvm.amdgcn.rsq
+    4, // llvm.amdgcn.rsq.clamp
+    4, // llvm.amdgcn.rsq.legacy
+    33, // llvm.amdgcn.s.barrier
+    3, // llvm.amdgcn.s.dcache.inv
+    3, // llvm.amdgcn.s.dcache.inv.vol
+    3, // llvm.amdgcn.s.dcache.wb
+    3, // llvm.amdgcn.s.dcache.wb.vol
+    3, // llvm.amdgcn.s.decperflevel
+    4, // llvm.amdgcn.s.getpc
+    34, // llvm.amdgcn.s.getreg
+    3, // llvm.amdgcn.s.incperflevel
+    16, // llvm.amdgcn.s.memrealtime
+    16, // llvm.amdgcn.s.memtime
+    3, // llvm.amdgcn.s.sendmsg
+    3, // llvm.amdgcn.s.sendmsghalt
+    3, // llvm.amdgcn.s.sleep
+    3, // llvm.amdgcn.s.waitcnt
+    4, // llvm.amdgcn.sad.hi.u8
+    4, // llvm.amdgcn.sad.u16
+    4, // llvm.amdgcn.sad.u8
+    4, // llvm.amdgcn.sbfe
+    31, // llvm.amdgcn.set.inactive
+    4, // llvm.amdgcn.sffbh
+    4, // llvm.amdgcn.sin
+    16, // llvm.amdgcn.tbuffer.load
+    32, // llvm.amdgcn.tbuffer.store
+    4, // llvm.amdgcn.trig.preop
+    4, // llvm.amdgcn.ubfe
+    33, // llvm.amdgcn.unreachable
+    31, // llvm.amdgcn.update.dpp
+    33, // llvm.amdgcn.wave.barrier
+    4, // llvm.amdgcn.workgroup.id.x
+    4, // llvm.amdgcn.workgroup.id.y
+    4, // llvm.amdgcn.workgroup.id.z
+    4, // llvm.amdgcn.workitem.id.x
+    4, // llvm.amdgcn.workitem.id.y
+    4, // llvm.amdgcn.workitem.id.z
+    4, // llvm.amdgcn.wqm
+    31, // llvm.amdgcn.wqm.vote
+    31, // llvm.amdgcn.writelane
+    4, // llvm.amdgcn.wwm
+    3, // llvm.arm.cdp
+    3, // llvm.arm.cdp2
+    3, // llvm.arm.clrex
+    1, // llvm.arm.crc32b
+    1, // llvm.arm.crc32cb
+    1, // llvm.arm.crc32ch
+    1, // llvm.arm.crc32cw
+    1, // llvm.arm.crc32h
+    1, // llvm.arm.crc32w
+    3, // llvm.arm.dbg
+    3, // llvm.arm.dmb
+    3, // llvm.arm.dsb
+    3, // llvm.arm.get.fpscr
+    3, // llvm.arm.hint
+    3, // llvm.arm.isb
+    3, // llvm.arm.ldaex
+    3, // llvm.arm.ldaexd
+    3, // llvm.arm.ldc
+    3, // llvm.arm.ldc2
+    3, // llvm.arm.ldc2l
+    3, // llvm.arm.ldcl
+    3, // llvm.arm.ldrex
+    3, // llvm.arm.ldrexd
+    3, // llvm.arm.mcr
+    3, // llvm.arm.mcr2
+    3, // llvm.arm.mcrr
+    3, // llvm.arm.mcrr2
+    3, // llvm.arm.mrc
+    3, // llvm.arm.mrc2
+    3, // llvm.arm.mrrc
+    3, // llvm.arm.mrrc2
+    1, // llvm.arm.neon.aesd
+    1, // llvm.arm.neon.aese
+    1, // llvm.arm.neon.aesimc
+    1, // llvm.arm.neon.aesmc
+    1, // llvm.arm.neon.sha1c
+    1, // llvm.arm.neon.sha1h
+    1, // llvm.arm.neon.sha1m
+    1, // llvm.arm.neon.sha1p
+    1, // llvm.arm.neon.sha1su0
+    1, // llvm.arm.neon.sha1su1
+    1, // llvm.arm.neon.sha256h
+    1, // llvm.arm.neon.sha256h2
+    1, // llvm.arm.neon.sha256su0
+    1, // llvm.arm.neon.sha256su1
+    1, // llvm.arm.neon.vabds
+    1, // llvm.arm.neon.vabdu
+    1, // llvm.arm.neon.vabs
+    1, // llvm.arm.neon.vacge
+    1, // llvm.arm.neon.vacgt
+    1, // llvm.arm.neon.vbsl
+    1, // llvm.arm.neon.vcls
+    1, // llvm.arm.neon.vcvtas
+    1, // llvm.arm.neon.vcvtau
+    1, // llvm.arm.neon.vcvtfp2fxs
+    1, // llvm.arm.neon.vcvtfp2fxu
+    1, // llvm.arm.neon.vcvtfp2hf
+    1, // llvm.arm.neon.vcvtfxs2fp
+    1, // llvm.arm.neon.vcvtfxu2fp
+    1, // llvm.arm.neon.vcvthf2fp
+    1, // llvm.arm.neon.vcvtms
+    1, // llvm.arm.neon.vcvtmu
+    1, // llvm.arm.neon.vcvtns
+    1, // llvm.arm.neon.vcvtnu
+    1, // llvm.arm.neon.vcvtps
+    1, // llvm.arm.neon.vcvtpu
+    1, // llvm.arm.neon.vhadds
+    1, // llvm.arm.neon.vhaddu
+    1, // llvm.arm.neon.vhsubs
+    1, // llvm.arm.neon.vhsubu
+    2, // llvm.arm.neon.vld1
+    2, // llvm.arm.neon.vld2
+    2, // llvm.arm.neon.vld2lane
+    2, // llvm.arm.neon.vld3
+    2, // llvm.arm.neon.vld3lane
+    2, // llvm.arm.neon.vld4
+    2, // llvm.arm.neon.vld4lane
+    1, // llvm.arm.neon.vmaxnm
+    1, // llvm.arm.neon.vmaxs
+    1, // llvm.arm.neon.vmaxu
+    1, // llvm.arm.neon.vminnm
+    1, // llvm.arm.neon.vmins
+    1, // llvm.arm.neon.vminu
+    1, // llvm.arm.neon.vmullp
+    1, // llvm.arm.neon.vmulls
+    1, // llvm.arm.neon.vmullu
+    1, // llvm.arm.neon.vmulp
+    1, // llvm.arm.neon.vpadals
+    1, // llvm.arm.neon.vpadalu
+    1, // llvm.arm.neon.vpadd
+    1, // llvm.arm.neon.vpaddls
+    1, // llvm.arm.neon.vpaddlu
+    1, // llvm.arm.neon.vpmaxs
+    1, // llvm.arm.neon.vpmaxu
+    1, // llvm.arm.neon.vpmins
+    1, // llvm.arm.neon.vpminu
+    1, // llvm.arm.neon.vqabs
+    1, // llvm.arm.neon.vqadds
+    1, // llvm.arm.neon.vqaddu
+    1, // llvm.arm.neon.vqdmulh
+    1, // llvm.arm.neon.vqdmull
+    1, // llvm.arm.neon.vqmovns
+    1, // llvm.arm.neon.vqmovnsu
+    1, // llvm.arm.neon.vqmovnu
+    1, // llvm.arm.neon.vqneg
+    1, // llvm.arm.neon.vqrdmulh
+    1, // llvm.arm.neon.vqrshiftns
+    1, // llvm.arm.neon.vqrshiftnsu
+    1, // llvm.arm.neon.vqrshiftnu
+    1, // llvm.arm.neon.vqrshifts
+    1, // llvm.arm.neon.vqrshiftu
+    1, // llvm.arm.neon.vqshiftns
+    1, // llvm.arm.neon.vqshiftnsu
+    1, // llvm.arm.neon.vqshiftnu
+    1, // llvm.arm.neon.vqshifts
+    1, // llvm.arm.neon.vqshiftsu
+    1, // llvm.arm.neon.vqshiftu
+    1, // llvm.arm.neon.vqsubs
+    1, // llvm.arm.neon.vqsubu
+    1, // llvm.arm.neon.vraddhn
+    1, // llvm.arm.neon.vrecpe
+    1, // llvm.arm.neon.vrecps
+    1, // llvm.arm.neon.vrhadds
+    1, // llvm.arm.neon.vrhaddu
+    1, // llvm.arm.neon.vrinta
+    1, // llvm.arm.neon.vrintm
+    1, // llvm.arm.neon.vrintn
+    1, // llvm.arm.neon.vrintp
+    1, // llvm.arm.neon.vrintx
+    1, // llvm.arm.neon.vrintz
+    1, // llvm.arm.neon.vrshiftn
+    1, // llvm.arm.neon.vrshifts
+    1, // llvm.arm.neon.vrshiftu
+    1, // llvm.arm.neon.vrsqrte
+    1, // llvm.arm.neon.vrsqrts
+    1, // llvm.arm.neon.vrsubhn
+    1, // llvm.arm.neon.vshiftins
+    1, // llvm.arm.neon.vshifts
+    1, // llvm.arm.neon.vshiftu
+    21, // llvm.arm.neon.vst1
+    21, // llvm.arm.neon.vst2
+    21, // llvm.arm.neon.vst2lane
+    21, // llvm.arm.neon.vst3
+    21, // llvm.arm.neon.vst3lane
+    21, // llvm.arm.neon.vst4
+    21, // llvm.arm.neon.vst4lane
+    1, // llvm.arm.neon.vtbl1
+    1, // llvm.arm.neon.vtbl2
+    1, // llvm.arm.neon.vtbl3
+    1, // llvm.arm.neon.vtbl4
+    1, // llvm.arm.neon.vtbx1
+    1, // llvm.arm.neon.vtbx2
+    1, // llvm.arm.neon.vtbx3
+    1, // llvm.arm.neon.vtbx4
+    1, // llvm.arm.qadd
+    1, // llvm.arm.qadd16
+    1, // llvm.arm.qadd8
+    1, // llvm.arm.qasx
+    1, // llvm.arm.qsax
+    1, // llvm.arm.qsub
+    1, // llvm.arm.qsub16
+    1, // llvm.arm.qsub8
+    3, // llvm.arm.sadd16
+    3, // llvm.arm.sadd8
+    3, // llvm.arm.sasx
+    16, // llvm.arm.sel
+    3, // llvm.arm.set.fpscr
+    1, // llvm.arm.shadd16
+    1, // llvm.arm.shadd8
+    1, // llvm.arm.shasx
+    1, // llvm.arm.shsax
+    1, // llvm.arm.shsub16
+    1, // llvm.arm.shsub8
+    1, // llvm.arm.smlabb
+    1, // llvm.arm.smlabt
+    1, // llvm.arm.smlad
+    1, // llvm.arm.smladx
+    1, // llvm.arm.smlald
+    1, // llvm.arm.smlaldx
+    1, // llvm.arm.smlatb
+    1, // llvm.arm.smlatt
+    1, // llvm.arm.smlawb
+    1, // llvm.arm.smlawt
+    1, // llvm.arm.smlsd
+    1, // llvm.arm.smlsdx
+    1, // llvm.arm.smlsld
+    1, // llvm.arm.smlsldx
+    1, // llvm.arm.smuad
+    1, // llvm.arm.smuadx
+    1, // llvm.arm.smulbb
+    1, // llvm.arm.smulbt
+    1, // llvm.arm.smultb
+    1, // llvm.arm.smultt
+    1, // llvm.arm.smulwb
+    1, // llvm.arm.smulwt
+    1, // llvm.arm.smusd
+    1, // llvm.arm.smusdx
+    3, // llvm.arm.space
+    1, // llvm.arm.ssat
+    1, // llvm.arm.ssat16
+    3, // llvm.arm.ssax
+    3, // llvm.arm.ssub16
+    3, // llvm.arm.ssub8
+    3, // llvm.arm.stc
+    3, // llvm.arm.stc2
+    3, // llvm.arm.stc2l
+    3, // llvm.arm.stcl
+    3, // llvm.arm.stlex
+    3, // llvm.arm.stlexd
+    3, // llvm.arm.strex
+    3, // llvm.arm.strexd
+    1, // llvm.arm.sxtab16
+    1, // llvm.arm.sxtb16
+    3, // llvm.arm.uadd16
+    3, // llvm.arm.uadd8
+    3, // llvm.arm.uasx
+    1, // llvm.arm.uhadd16
+    1, // llvm.arm.uhadd8
+    1, // llvm.arm.uhasx
+    1, // llvm.arm.uhsax
+    1, // llvm.arm.uhsub16
+    1, // llvm.arm.uhsub8
+    3, // llvm.arm.undefined
+    1, // llvm.arm.uqadd16
+    1, // llvm.arm.uqadd8
+    1, // llvm.arm.uqasx
+    1, // llvm.arm.uqsax
+    1, // llvm.arm.uqsub16
+    1, // llvm.arm.uqsub8
+    1, // llvm.arm.usad8
+    1, // llvm.arm.usada8
+    1, // llvm.arm.usat
+    1, // llvm.arm.usat16
+    3, // llvm.arm.usax
+    3, // llvm.arm.usub16
+    3, // llvm.arm.usub8
+    1, // llvm.arm.uxtab16
+    1, // llvm.arm.uxtb16
+    1, // llvm.arm.vcvtr
+    1, // llvm.arm.vcvtru
+    16, // llvm.bpf.load.byte
+    16, // llvm.bpf.load.half
+    16, // llvm.bpf.load.word
+    3, // llvm.bpf.pseudo
+    1, // llvm.hexagon.A2.abs
+    1, // llvm.hexagon.A2.absp
+    1, // llvm.hexagon.A2.abssat
+    1, // llvm.hexagon.A2.add
+    1, // llvm.hexagon.A2.addh.h16.hh
+    1, // llvm.hexagon.A2.addh.h16.hl
+    1, // llvm.hexagon.A2.addh.h16.lh
+    1, // llvm.hexagon.A2.addh.h16.ll
+    1, // llvm.hexagon.A2.addh.h16.sat.hh
+    1, // llvm.hexagon.A2.addh.h16.sat.hl
+    1, // llvm.hexagon.A2.addh.h16.sat.lh
+    1, // llvm.hexagon.A2.addh.h16.sat.ll
+    1, // llvm.hexagon.A2.addh.l16.hl
+    1, // llvm.hexagon.A2.addh.l16.ll
+    1, // llvm.hexagon.A2.addh.l16.sat.hl
+    1, // llvm.hexagon.A2.addh.l16.sat.ll
+    1, // llvm.hexagon.A2.addi
+    1, // llvm.hexagon.A2.addp
+    1, // llvm.hexagon.A2.addpsat
+    1, // llvm.hexagon.A2.addsat
+    1, // llvm.hexagon.A2.addsp
+    1, // llvm.hexagon.A2.and
+    1, // llvm.hexagon.A2.andir
+    1, // llvm.hexagon.A2.andp
+    1, // llvm.hexagon.A2.aslh
+    1, // llvm.hexagon.A2.asrh
+    1, // llvm.hexagon.A2.combine.hh
+    1, // llvm.hexagon.A2.combine.hl
+    1, // llvm.hexagon.A2.combine.lh
+    1, // llvm.hexagon.A2.combine.ll
+    1, // llvm.hexagon.A2.combineii
+    1, // llvm.hexagon.A2.combinew
+    1, // llvm.hexagon.A2.max
+    1, // llvm.hexagon.A2.maxp
+    1, // llvm.hexagon.A2.maxu
+    1, // llvm.hexagon.A2.maxup
+    1, // llvm.hexagon.A2.min
+    1, // llvm.hexagon.A2.minp
+    1, // llvm.hexagon.A2.minu
+    1, // llvm.hexagon.A2.minup
+    1, // llvm.hexagon.A2.neg
+    1, // llvm.hexagon.A2.negp
+    1, // llvm.hexagon.A2.negsat
+    1, // llvm.hexagon.A2.not
+    1, // llvm.hexagon.A2.notp
+    1, // llvm.hexagon.A2.or
+    1, // llvm.hexagon.A2.orir
+    1, // llvm.hexagon.A2.orp
+    1, // llvm.hexagon.A2.roundsat
+    1, // llvm.hexagon.A2.sat
+    1, // llvm.hexagon.A2.satb
+    1, // llvm.hexagon.A2.sath
+    1, // llvm.hexagon.A2.satub
+    1, // llvm.hexagon.A2.satuh
+    1, // llvm.hexagon.A2.sub
+    1, // llvm.hexagon.A2.subh.h16.hh
+    1, // llvm.hexagon.A2.subh.h16.hl
+    1, // llvm.hexagon.A2.subh.h16.lh
+    1, // llvm.hexagon.A2.subh.h16.ll
+    1, // llvm.hexagon.A2.subh.h16.sat.hh
+    1, // llvm.hexagon.A2.subh.h16.sat.hl
+    1, // llvm.hexagon.A2.subh.h16.sat.lh
+    1, // llvm.hexagon.A2.subh.h16.sat.ll
+    1, // llvm.hexagon.A2.subh.l16.hl
+    1, // llvm.hexagon.A2.subh.l16.ll
+    1, // llvm.hexagon.A2.subh.l16.sat.hl
+    1, // llvm.hexagon.A2.subh.l16.sat.ll
+    1, // llvm.hexagon.A2.subp
+    1, // llvm.hexagon.A2.subri
+    1, // llvm.hexagon.A2.subsat
+    1, // llvm.hexagon.A2.svaddh
+    1, // llvm.hexagon.A2.svaddhs
+    1, // llvm.hexagon.A2.svadduhs
+    1, // llvm.hexagon.A2.svavgh
+    1, // llvm.hexagon.A2.svavghs
+    1, // llvm.hexagon.A2.svnavgh
+    1, // llvm.hexagon.A2.svsubh
+    1, // llvm.hexagon.A2.svsubhs
+    1, // llvm.hexagon.A2.svsubuhs
+    1, // llvm.hexagon.A2.swiz
+    1, // llvm.hexagon.A2.sxtb
+    1, // llvm.hexagon.A2.sxth
+    1, // llvm.hexagon.A2.sxtw
+    1, // llvm.hexagon.A2.tfr
+    1, // llvm.hexagon.A2.tfrih
+    1, // llvm.hexagon.A2.tfril
+    1, // llvm.hexagon.A2.tfrp
+    1, // llvm.hexagon.A2.tfrpi
+    1, // llvm.hexagon.A2.tfrsi
+    1, // llvm.hexagon.A2.vabsh
+    1, // llvm.hexagon.A2.vabshsat
+    1, // llvm.hexagon.A2.vabsw
+    1, // llvm.hexagon.A2.vabswsat
+    1, // llvm.hexagon.A2.vaddb.map
+    1, // llvm.hexagon.A2.vaddh
+    1, // llvm.hexagon.A2.vaddhs
+    1, // llvm.hexagon.A2.vaddub
+    1, // llvm.hexagon.A2.vaddubs
+    1, // llvm.hexagon.A2.vadduhs
+    1, // llvm.hexagon.A2.vaddw
+    1, // llvm.hexagon.A2.vaddws
+    1, // llvm.hexagon.A2.vavgh
+    1, // llvm.hexagon.A2.vavghcr
+    1, // llvm.hexagon.A2.vavghr
+    1, // llvm.hexagon.A2.vavgub
+    1, // llvm.hexagon.A2.vavgubr
+    1, // llvm.hexagon.A2.vavguh
+    1, // llvm.hexagon.A2.vavguhr
+    1, // llvm.hexagon.A2.vavguw
+    1, // llvm.hexagon.A2.vavguwr
+    1, // llvm.hexagon.A2.vavgw
+    1, // llvm.hexagon.A2.vavgwcr
+    1, // llvm.hexagon.A2.vavgwr
+    1, // llvm.hexagon.A2.vcmpbeq
+    1, // llvm.hexagon.A2.vcmpbgtu
+    1, // llvm.hexagon.A2.vcmpheq
+    1, // llvm.hexagon.A2.vcmphgt
+    1, // llvm.hexagon.A2.vcmphgtu
+    1, // llvm.hexagon.A2.vcmpweq
+    1, // llvm.hexagon.A2.vcmpwgt
+    1, // llvm.hexagon.A2.vcmpwgtu
+    1, // llvm.hexagon.A2.vconj
+    1, // llvm.hexagon.A2.vmaxb
+    1, // llvm.hexagon.A2.vmaxh
+    1, // llvm.hexagon.A2.vmaxub
+    1, // llvm.hexagon.A2.vmaxuh
+    1, // llvm.hexagon.A2.vmaxuw
+    1, // llvm.hexagon.A2.vmaxw
+    1, // llvm.hexagon.A2.vminb
+    1, // llvm.hexagon.A2.vminh
+    1, // llvm.hexagon.A2.vminub
+    1, // llvm.hexagon.A2.vminuh
+    1, // llvm.hexagon.A2.vminuw
+    1, // llvm.hexagon.A2.vminw
+    1, // llvm.hexagon.A2.vnavgh
+    1, // llvm.hexagon.A2.vnavghcr
+    1, // llvm.hexagon.A2.vnavghr
+    1, // llvm.hexagon.A2.vnavgw
+    1, // llvm.hexagon.A2.vnavgwcr
+    1, // llvm.hexagon.A2.vnavgwr
+    1, // llvm.hexagon.A2.vraddub
+    1, // llvm.hexagon.A2.vraddub.acc
+    1, // llvm.hexagon.A2.vrsadub
+    1, // llvm.hexagon.A2.vrsadub.acc
+    1, // llvm.hexagon.A2.vsubb.map
+    1, // llvm.hexagon.A2.vsubh
+    1, // llvm.hexagon.A2.vsubhs
+    1, // llvm.hexagon.A2.vsubub
+    1, // llvm.hexagon.A2.vsububs
+    1, // llvm.hexagon.A2.vsubuhs
+    1, // llvm.hexagon.A2.vsubw
+    1, // llvm.hexagon.A2.vsubws
+    1, // llvm.hexagon.A2.xor
+    1, // llvm.hexagon.A2.xorp
+    1, // llvm.hexagon.A2.zxtb
+    1, // llvm.hexagon.A2.zxth
+    1, // llvm.hexagon.A4.andn
+    1, // llvm.hexagon.A4.andnp
+    1, // llvm.hexagon.A4.bitsplit
+    1, // llvm.hexagon.A4.bitspliti
+    1, // llvm.hexagon.A4.boundscheck
+    1, // llvm.hexagon.A4.cmpbeq
+    1, // llvm.hexagon.A4.cmpbeqi
+    1, // llvm.hexagon.A4.cmpbgt
+    1, // llvm.hexagon.A4.cmpbgti
+    1, // llvm.hexagon.A4.cmpbgtu
+    1, // llvm.hexagon.A4.cmpbgtui
+    1, // llvm.hexagon.A4.cmpheq
+    1, // llvm.hexagon.A4.cmpheqi
+    1, // llvm.hexagon.A4.cmphgt
+    1, // llvm.hexagon.A4.cmphgti
+    1, // llvm.hexagon.A4.cmphgtu
+    1, // llvm.hexagon.A4.cmphgtui
+    1, // llvm.hexagon.A4.combineir
+    1, // llvm.hexagon.A4.combineri
+    1, // llvm.hexagon.A4.cround.ri
+    1, // llvm.hexagon.A4.cround.rr
+    1, // llvm.hexagon.A4.modwrapu
+    1, // llvm.hexagon.A4.orn
+    1, // llvm.hexagon.A4.ornp
+    1, // llvm.hexagon.A4.rcmpeq
+    1, // llvm.hexagon.A4.rcmpeqi
+    1, // llvm.hexagon.A4.rcmpneq
+    1, // llvm.hexagon.A4.rcmpneqi
+    1, // llvm.hexagon.A4.round.ri
+    1, // llvm.hexagon.A4.round.ri.sat
+    1, // llvm.hexagon.A4.round.rr
+    1, // llvm.hexagon.A4.round.rr.sat
+    1, // llvm.hexagon.A4.tlbmatch
+    1, // llvm.hexagon.A4.vcmpbeq.any
+    1, // llvm.hexagon.A4.vcmpbeqi
+    1, // llvm.hexagon.A4.vcmpbgt
+    1, // llvm.hexagon.A4.vcmpbgti
+    1, // llvm.hexagon.A4.vcmpbgtui
+    1, // llvm.hexagon.A4.vcmpheqi
+    1, // llvm.hexagon.A4.vcmphgti
+    1, // llvm.hexagon.A4.vcmphgtui
+    1, // llvm.hexagon.A4.vcmpweqi
+    1, // llvm.hexagon.A4.vcmpwgti
+    1, // llvm.hexagon.A4.vcmpwgtui
+    1, // llvm.hexagon.A4.vrmaxh
+    1, // llvm.hexagon.A4.vrmaxuh
+    1, // llvm.hexagon.A4.vrmaxuw
+    1, // llvm.hexagon.A4.vrmaxw
+    1, // llvm.hexagon.A4.vrminh
+    1, // llvm.hexagon.A4.vrminuh
+    1, // llvm.hexagon.A4.vrminuw
+    1, // llvm.hexagon.A4.vrminw
+    1, // llvm.hexagon.A5.vaddhubs
+    1, // llvm.hexagon.A6.vcmpbeq.notany
+    1, // llvm.hexagon.A6.vcmpbeq.notany.128B
+    1, // llvm.hexagon.C2.all8
+    1, // llvm.hexagon.C2.and
+    1, // llvm.hexagon.C2.andn
+    1, // llvm.hexagon.C2.any8
+    1, // llvm.hexagon.C2.bitsclr
+    1, // llvm.hexagon.C2.bitsclri
+    1, // llvm.hexagon.C2.bitsset
+    1, // llvm.hexagon.C2.cmpeq
+    1, // llvm.hexagon.C2.cmpeqi
+    1, // llvm.hexagon.C2.cmpeqp
+    1, // llvm.hexagon.C2.cmpgei
+    1, // llvm.hexagon.C2.cmpgeui
+    1, // llvm.hexagon.C2.cmpgt
+    1, // llvm.hexagon.C2.cmpgti
+    1, // llvm.hexagon.C2.cmpgtp
+    1, // llvm.hexagon.C2.cmpgtu
+    1, // llvm.hexagon.C2.cmpgtui
+    1, // llvm.hexagon.C2.cmpgtup
+    1, // llvm.hexagon.C2.cmplt
+    1, // llvm.hexagon.C2.cmpltu
+    1, // llvm.hexagon.C2.mask
+    1, // llvm.hexagon.C2.mux
+    1, // llvm.hexagon.C2.muxii
+    1, // llvm.hexagon.C2.muxir
+    1, // llvm.hexagon.C2.muxri
+    1, // llvm.hexagon.C2.not
+    1, // llvm.hexagon.C2.or
+    1, // llvm.hexagon.C2.orn
+    1, // llvm.hexagon.C2.pxfer.map
+    1, // llvm.hexagon.C2.tfrpr
+    1, // llvm.hexagon.C2.tfrrp
+    1, // llvm.hexagon.C2.vitpack
+    1, // llvm.hexagon.C2.vmux
+    1, // llvm.hexagon.C2.xor
+    1, // llvm.hexagon.C4.and.and
+    1, // llvm.hexagon.C4.and.andn
+    1, // llvm.hexagon.C4.and.or
+    1, // llvm.hexagon.C4.and.orn
+    1, // llvm.hexagon.C4.cmplte
+    1, // llvm.hexagon.C4.cmpltei
+    1, // llvm.hexagon.C4.cmplteu
+    1, // llvm.hexagon.C4.cmplteui
+    1, // llvm.hexagon.C4.cmpneq
+    1, // llvm.hexagon.C4.cmpneqi
+    1, // llvm.hexagon.C4.fastcorner9
+    1, // llvm.hexagon.C4.fastcorner9.not
+    1, // llvm.hexagon.C4.nbitsclr
+    1, // llvm.hexagon.C4.nbitsclri
+    1, // llvm.hexagon.C4.nbitsset
+    1, // llvm.hexagon.C4.or.and
+    1, // llvm.hexagon.C4.or.andn
+    1, // llvm.hexagon.C4.or.or
+    1, // llvm.hexagon.C4.or.orn
+    1, // llvm.hexagon.F2.conv.d2df
+    1, // llvm.hexagon.F2.conv.d2sf
+    1, // llvm.hexagon.F2.conv.df2d
+    1, // llvm.hexagon.F2.conv.df2d.chop
+    1, // llvm.hexagon.F2.conv.df2sf
+    1, // llvm.hexagon.F2.conv.df2ud
+    1, // llvm.hexagon.F2.conv.df2ud.chop
+    1, // llvm.hexagon.F2.conv.df2uw
+    1, // llvm.hexagon.F2.conv.df2uw.chop
+    1, // llvm.hexagon.F2.conv.df2w
+    1, // llvm.hexagon.F2.conv.df2w.chop
+    1, // llvm.hexagon.F2.conv.sf2d
+    1, // llvm.hexagon.F2.conv.sf2d.chop
+    1, // llvm.hexagon.F2.conv.sf2df
+    1, // llvm.hexagon.F2.conv.sf2ud
+    1, // llvm.hexagon.F2.conv.sf2ud.chop
+    1, // llvm.hexagon.F2.conv.sf2uw
+    1, // llvm.hexagon.F2.conv.sf2uw.chop
+    1, // llvm.hexagon.F2.conv.sf2w
+    1, // llvm.hexagon.F2.conv.sf2w.chop
+    1, // llvm.hexagon.F2.conv.ud2df
+    1, // llvm.hexagon.F2.conv.ud2sf
+    35, // llvm.hexagon.F2.conv.uw2df
+    35, // llvm.hexagon.F2.conv.uw2sf
+    35, // llvm.hexagon.F2.conv.w2df
+    35, // llvm.hexagon.F2.conv.w2sf
+    35, // llvm.hexagon.F2.dfclass
+    35, // llvm.hexagon.F2.dfcmpeq
+    35, // llvm.hexagon.F2.dfcmpge
+    35, // llvm.hexagon.F2.dfcmpgt
+    35, // llvm.hexagon.F2.dfcmpuo
+    35, // llvm.hexagon.F2.dfimm.n
+    35, // llvm.hexagon.F2.dfimm.p
+    35, // llvm.hexagon.F2.sfadd
+    35, // llvm.hexagon.F2.sfclass
+    35, // llvm.hexagon.F2.sfcmpeq
+    35, // llvm.hexagon.F2.sfcmpge
+    35, // llvm.hexagon.F2.sfcmpgt
+    35, // llvm.hexagon.F2.sfcmpuo
+    35, // llvm.hexagon.F2.sffixupd
+    35, // llvm.hexagon.F2.sffixupn
+    1, // llvm.hexagon.F2.sffixupr
+    35, // llvm.hexagon.F2.sffma
+    35, // llvm.hexagon.F2.sffma.lib
+    35, // llvm.hexagon.F2.sffma.sc
+    35, // llvm.hexagon.F2.sffms
+    35, // llvm.hexagon.F2.sffms.lib
+    35, // llvm.hexagon.F2.sfimm.n
+    35, // llvm.hexagon.F2.sfimm.p
+    35, // llvm.hexagon.F2.sfmax
+    35, // llvm.hexagon.F2.sfmin
+    35, // llvm.hexagon.F2.sfmpy
+    35, // llvm.hexagon.F2.sfsub
+    16, // llvm.hexagon.L2.loadrb.pbr
+    28, // llvm.hexagon.L2.loadrb.pci
+    19, // llvm.hexagon.L2.loadrb.pcr
+    16, // llvm.hexagon.L2.loadrd.pbr
+    28, // llvm.hexagon.L2.loadrd.pci
+    19, // llvm.hexagon.L2.loadrd.pcr
+    16, // llvm.hexagon.L2.loadrh.pbr
+    28, // llvm.hexagon.L2.loadrh.pci
+    19, // llvm.hexagon.L2.loadrh.pcr
+    16, // llvm.hexagon.L2.loadri.pbr
+    28, // llvm.hexagon.L2.loadri.pci
+    19, // llvm.hexagon.L2.loadri.pcr
+    16, // llvm.hexagon.L2.loadrub.pbr
+    28, // llvm.hexagon.L2.loadrub.pci
+    19, // llvm.hexagon.L2.loadrub.pcr
+    16, // llvm.hexagon.L2.loadruh.pbr
+    28, // llvm.hexagon.L2.loadruh.pci
+    19, // llvm.hexagon.L2.loadruh.pcr
+    18, // llvm.hexagon.L2.loadw.locked
+    18, // llvm.hexagon.L4.loadd.locked
+    1, // llvm.hexagon.M2.acci
+    1, // llvm.hexagon.M2.accii
+    1, // llvm.hexagon.M2.cmaci.s0
+    1, // llvm.hexagon.M2.cmacr.s0
+    1, // llvm.hexagon.M2.cmacs.s0
+    1, // llvm.hexagon.M2.cmacs.s1
+    1, // llvm.hexagon.M2.cmacsc.s0
+    1, // llvm.hexagon.M2.cmacsc.s1
+    1, // llvm.hexagon.M2.cmpyi.s0
+    1, // llvm.hexagon.M2.cmpyr.s0
+    1, // llvm.hexagon.M2.cmpyrs.s0
+    1, // llvm.hexagon.M2.cmpyrs.s1
+    1, // llvm.hexagon.M2.cmpyrsc.s0
+    1, // llvm.hexagon.M2.cmpyrsc.s1
+    1, // llvm.hexagon.M2.cmpys.s0
+    1, // llvm.hexagon.M2.cmpys.s1
+    1, // llvm.hexagon.M2.cmpysc.s0
+    1, // llvm.hexagon.M2.cmpysc.s1
+    1, // llvm.hexagon.M2.cnacs.s0
+    1, // llvm.hexagon.M2.cnacs.s1
+    1, // llvm.hexagon.M2.cnacsc.s0
+    1, // llvm.hexagon.M2.cnacsc.s1
+    1, // llvm.hexagon.M2.dpmpyss.acc.s0
+    1, // llvm.hexagon.M2.dpmpyss.nac.s0
+    1, // llvm.hexagon.M2.dpmpyss.rnd.s0
+    1, // llvm.hexagon.M2.dpmpyss.s0
+    1, // llvm.hexagon.M2.dpmpyuu.acc.s0
+    1, // llvm.hexagon.M2.dpmpyuu.nac.s0
+    1, // llvm.hexagon.M2.dpmpyuu.s0
+    1, // llvm.hexagon.M2.hmmpyh.rs1
+    1, // llvm.hexagon.M2.hmmpyh.s1
+    1, // llvm.hexagon.M2.hmmpyl.rs1
+    1, // llvm.hexagon.M2.hmmpyl.s1
+    1, // llvm.hexagon.M2.maci
+    1, // llvm.hexagon.M2.macsin
+    1, // llvm.hexagon.M2.macsip
+    1, // llvm.hexagon.M2.mmachs.rs0
+    1, // llvm.hexagon.M2.mmachs.rs1
+    1, // llvm.hexagon.M2.mmachs.s0
+    1, // llvm.hexagon.M2.mmachs.s1
+    1, // llvm.hexagon.M2.mmacls.rs0
+    1, // llvm.hexagon.M2.mmacls.rs1
+    1, // llvm.hexagon.M2.mmacls.s0
+    1, // llvm.hexagon.M2.mmacls.s1
+    1, // llvm.hexagon.M2.mmacuhs.rs0
+    1, // llvm.hexagon.M2.mmacuhs.rs1
+    1, // llvm.hexagon.M2.mmacuhs.s0
+    1, // llvm.hexagon.M2.mmacuhs.s1
+    1, // llvm.hexagon.M2.mmaculs.rs0
+    1, // llvm.hexagon.M2.mmaculs.rs1
+    1, // llvm.hexagon.M2.mmaculs.s0
+    1, // llvm.hexagon.M2.mmaculs.s1
+    1, // llvm.hexagon.M2.mmpyh.rs0
+    1, // llvm.hexagon.M2.mmpyh.rs1
+    1, // llvm.hexagon.M2.mmpyh.s0
+    1, // llvm.hexagon.M2.mmpyh.s1
+    1, // llvm.hexagon.M2.mmpyl.rs0
+    1, // llvm.hexagon.M2.mmpyl.rs1
+    1, // llvm.hexagon.M2.mmpyl.s0
+    1, // llvm.hexagon.M2.mmpyl.s1
+    1, // llvm.hexagon.M2.mmpyuh.rs0
+    1, // llvm.hexagon.M2.mmpyuh.rs1
+    1, // llvm.hexagon.M2.mmpyuh.s0
+    1, // llvm.hexagon.M2.mmpyuh.s1
+    1, // llvm.hexagon.M2.mmpyul.rs0
+    1, // llvm.hexagon.M2.mmpyul.rs1
+    1, // llvm.hexagon.M2.mmpyul.s0
+    1, // llvm.hexagon.M2.mmpyul.s1
+    1, // llvm.hexagon.M2.mpy.acc.hh.s0
+    1, // llvm.hexagon.M2.mpy.acc.hh.s1
+    1, // llvm.hexagon.M2.mpy.acc.hl.s0
+    1, // llvm.hexagon.M2.mpy.acc.hl.s1
+    1, // llvm.hexagon.M2.mpy.acc.lh.s0
+    1, // llvm.hexagon.M2.mpy.acc.lh.s1
+    1, // llvm.hexagon.M2.mpy.acc.ll.s0
+    1, // llvm.hexagon.M2.mpy.acc.ll.s1
+    1, // llvm.hexagon.M2.mpy.acc.sat.hh.s0
+    1, // llvm.hexagon.M2.mpy.acc.sat.hh.s1
+    1, // llvm.hexagon.M2.mpy.acc.sat.hl.s0
+    1, // llvm.hexagon.M2.mpy.acc.sat.hl.s1
+    1, // llvm.hexagon.M2.mpy.acc.sat.lh.s0
+    1, // llvm.hexagon.M2.mpy.acc.sat.lh.s1
+    1, // llvm.hexagon.M2.mpy.acc.sat.ll.s0
+    1, // llvm.hexagon.M2.mpy.acc.sat.ll.s1
+    1, // llvm.hexagon.M2.mpy.hh.s0
+    1, // llvm.hexagon.M2.mpy.hh.s1
+    1, // llvm.hexagon.M2.mpy.hl.s0
+    1, // llvm.hexagon.M2.mpy.hl.s1
+    1, // llvm.hexagon.M2.mpy.lh.s0
+    1, // llvm.hexagon.M2.mpy.lh.s1
+    1, // llvm.hexagon.M2.mpy.ll.s0
+    1, // llvm.hexagon.M2.mpy.ll.s1
+    1, // llvm.hexagon.M2.mpy.nac.hh.s0
+    1, // llvm.hexagon.M2.mpy.nac.hh.s1
+    1, // llvm.hexagon.M2.mpy.nac.hl.s0
+    1, // llvm.hexagon.M2.mpy.nac.hl.s1
+    1, // llvm.hexagon.M2.mpy.nac.lh.s0
+    1, // llvm.hexagon.M2.mpy.nac.lh.s1
+    1, // llvm.hexagon.M2.mpy.nac.ll.s0
+    1, // llvm.hexagon.M2.mpy.nac.ll.s1
+    1, // llvm.hexagon.M2.mpy.nac.sat.hh.s0
+    1, // llvm.hexagon.M2.mpy.nac.sat.hh.s1
+    1, // llvm.hexagon.M2.mpy.nac.sat.hl.s0
+    1, // llvm.hexagon.M2.mpy.nac.sat.hl.s1
+    1, // llvm.hexagon.M2.mpy.nac.sat.lh.s0
+    1, // llvm.hexagon.M2.mpy.nac.sat.lh.s1
+    1, // llvm.hexagon.M2.mpy.nac.sat.ll.s0
+    1, // llvm.hexagon.M2.mpy.nac.sat.ll.s1
+    1, // llvm.hexagon.M2.mpy.rnd.hh.s0
+    1, // llvm.hexagon.M2.mpy.rnd.hh.s1
+    1, // llvm.hexagon.M2.mpy.rnd.hl.s0
+    1, // llvm.hexagon.M2.mpy.rnd.hl.s1
+    1, // llvm.hexagon.M2.mpy.rnd.lh.s0
+    1, // llvm.hexagon.M2.mpy.rnd.lh.s1
+    1, // llvm.hexagon.M2.mpy.rnd.ll.s0
+    1, // llvm.hexagon.M2.mpy.rnd.ll.s1
+    1, // llvm.hexagon.M2.mpy.sat.hh.s0
+    1, // llvm.hexagon.M2.mpy.sat.hh.s1
+    1, // llvm.hexagon.M2.mpy.sat.hl.s0
+    1, // llvm.hexagon.M2.mpy.sat.hl.s1
+    1, // llvm.hexagon.M2.mpy.sat.lh.s0
+    1, // llvm.hexagon.M2.mpy.sat.lh.s1
+    1, // llvm.hexagon.M2.mpy.sat.ll.s0
+    1, // llvm.hexagon.M2.mpy.sat.ll.s1
+    1, // llvm.hexagon.M2.mpy.sat.rnd.hh.s0
+    1, // llvm.hexagon.M2.mpy.sat.rnd.hh.s1
+    1, // llvm.hexagon.M2.mpy.sat.rnd.hl.s0
+    1, // llvm.hexagon.M2.mpy.sat.rnd.hl.s1
+    1, // llvm.hexagon.M2.mpy.sat.rnd.lh.s0
+    1, // llvm.hexagon.M2.mpy.sat.rnd.lh.s1
+    1, // llvm.hexagon.M2.mpy.sat.rnd.ll.s0
+    1, // llvm.hexagon.M2.mpy.sat.rnd.ll.s1
+    1, // llvm.hexagon.M2.mpy.up
+    1, // llvm.hexagon.M2.mpy.up.s1
+    1, // llvm.hexagon.M2.mpy.up.s1.sat
+    1, // llvm.hexagon.M2.mpyd.acc.hh.s0
+    1, // llvm.hexagon.M2.mpyd.acc.hh.s1
+    1, // llvm.hexagon.M2.mpyd.acc.hl.s0
+    1, // llvm.hexagon.M2.mpyd.acc.hl.s1
+    1, // llvm.hexagon.M2.mpyd.acc.lh.s0
+    1, // llvm.hexagon.M2.mpyd.acc.lh.s1
+    1, // llvm.hexagon.M2.mpyd.acc.ll.s0
+    1, // llvm.hexagon.M2.mpyd.acc.ll.s1
+    1, // llvm.hexagon.M2.mpyd.hh.s0
+    1, // llvm.hexagon.M2.mpyd.hh.s1
+    1, // llvm.hexagon.M2.mpyd.hl.s0
+    1, // llvm.hexagon.M2.mpyd.hl.s1
+    1, // llvm.hexagon.M2.mpyd.lh.s0
+    1, // llvm.hexagon.M2.mpyd.lh.s1
+    1, // llvm.hexagon.M2.mpyd.ll.s0
+    1, // llvm.hexagon.M2.mpyd.ll.s1
+    1, // llvm.hexagon.M2.mpyd.nac.hh.s0
+    1, // llvm.hexagon.M2.mpyd.nac.hh.s1
+    1, // llvm.hexagon.M2.mpyd.nac.hl.s0
+    1, // llvm.hexagon.M2.mpyd.nac.hl.s1
+    1, // llvm.hexagon.M2.mpyd.nac.lh.s0
+    1, // llvm.hexagon.M2.mpyd.nac.lh.s1
+    1, // llvm.hexagon.M2.mpyd.nac.ll.s0
+    1, // llvm.hexagon.M2.mpyd.nac.ll.s1
+    1, // llvm.hexagon.M2.mpyd.rnd.hh.s0
+    1, // llvm.hexagon.M2.mpyd.rnd.hh.s1
+    1, // llvm.hexagon.M2.mpyd.rnd.hl.s0
+    1, // llvm.hexagon.M2.mpyd.rnd.hl.s1
+    1, // llvm.hexagon.M2.mpyd.rnd.lh.s0
+    1, // llvm.hexagon.M2.mpyd.rnd.lh.s1
+    1, // llvm.hexagon.M2.mpyd.rnd.ll.s0
+    1, // llvm.hexagon.M2.mpyd.rnd.ll.s1
+    1, // llvm.hexagon.M2.mpyi
+    1, // llvm.hexagon.M2.mpysmi
+    1, // llvm.hexagon.M2.mpysu.up
+    1, // llvm.hexagon.M2.mpyu.acc.hh.s0
+    1, // llvm.hexagon.M2.mpyu.acc.hh.s1
+    1, // llvm.hexagon.M2.mpyu.acc.hl.s0
+    1, // llvm.hexagon.M2.mpyu.acc.hl.s1
+    1, // llvm.hexagon.M2.mpyu.acc.lh.s0
+    1, // llvm.hexagon.M2.mpyu.acc.lh.s1
+    1, // llvm.hexagon.M2.mpyu.acc.ll.s0
+    1, // llvm.hexagon.M2.mpyu.acc.ll.s1
+    1, // llvm.hexagon.M2.mpyu.hh.s0
+    1, // llvm.hexagon.M2.mpyu.hh.s1
+    1, // llvm.hexagon.M2.mpyu.hl.s0
+    1, // llvm.hexagon.M2.mpyu.hl.s1
+    1, // llvm.hexagon.M2.mpyu.lh.s0
+    1, // llvm.hexagon.M2.mpyu.lh.s1
+    1, // llvm.hexagon.M2.mpyu.ll.s0
+    1, // llvm.hexagon.M2.mpyu.ll.s1
+    1, // llvm.hexagon.M2.mpyu.nac.hh.s0
+    1, // llvm.hexagon.M2.mpyu.nac.hh.s1
+    1, // llvm.hexagon.M2.mpyu.nac.hl.s0
+    1, // llvm.hexagon.M2.mpyu.nac.hl.s1
+    1, // llvm.hexagon.M2.mpyu.nac.lh.s0
+    1, // llvm.hexagon.M2.mpyu.nac.lh.s1
+    1, // llvm.hexagon.M2.mpyu.nac.ll.s0
+    1, // llvm.hexagon.M2.mpyu.nac.ll.s1
+    1, // llvm.hexagon.M2.mpyu.up
+    1, // llvm.hexagon.M2.mpyud.acc.hh.s0
+    1, // llvm.hexagon.M2.mpyud.acc.hh.s1
+    1, // llvm.hexagon.M2.mpyud.acc.hl.s0
+    1, // llvm.hexagon.M2.mpyud.acc.hl.s1
+    1, // llvm.hexagon.M2.mpyud.acc.lh.s0
+    1, // llvm.hexagon.M2.mpyud.acc.lh.s1
+    1, // llvm.hexagon.M2.mpyud.acc.ll.s0
+    1, // llvm.hexagon.M2.mpyud.acc.ll.s1
+    1, // llvm.hexagon.M2.mpyud.hh.s0
+    1, // llvm.hexagon.M2.mpyud.hh.s1
+    1, // llvm.hexagon.M2.mpyud.hl.s0
+    1, // llvm.hexagon.M2.mpyud.hl.s1
+    1, // llvm.hexagon.M2.mpyud.lh.s0
+    1, // llvm.hexagon.M2.mpyud.lh.s1
+    1, // llvm.hexagon.M2.mpyud.ll.s0
+    1, // llvm.hexagon.M2.mpyud.ll.s1
+    1, // llvm.hexagon.M2.mpyud.nac.hh.s0
+    1, // llvm.hexagon.M2.mpyud.nac.hh.s1
+    1, // llvm.hexagon.M2.mpyud.nac.hl.s0
+    1, // llvm.hexagon.M2.mpyud.nac.hl.s1
+    1, // llvm.hexagon.M2.mpyud.nac.lh.s0
+    1, // llvm.hexagon.M2.mpyud.nac.lh.s1
+    1, // llvm.hexagon.M2.mpyud.nac.ll.s0
+    1, // llvm.hexagon.M2.mpyud.nac.ll.s1
+    1, // llvm.hexagon.M2.mpyui
+    1, // llvm.hexagon.M2.nacci
+    1, // llvm.hexagon.M2.naccii
+    1, // llvm.hexagon.M2.subacc
+    1, // llvm.hexagon.M2.vabsdiffh
+    1, // llvm.hexagon.M2.vabsdiffw
+    1, // llvm.hexagon.M2.vcmac.s0.sat.i
+    1, // llvm.hexagon.M2.vcmac.s0.sat.r
+    1, // llvm.hexagon.M2.vcmpy.s0.sat.i
+    1, // llvm.hexagon.M2.vcmpy.s0.sat.r
+    1, // llvm.hexagon.M2.vcmpy.s1.sat.i
+    1, // llvm.hexagon.M2.vcmpy.s1.sat.r
+    1, // llvm.hexagon.M2.vdmacs.s0
+    1, // llvm.hexagon.M2.vdmacs.s1
+    1, // llvm.hexagon.M2.vdmpyrs.s0
+    1, // llvm.hexagon.M2.vdmpyrs.s1
+    1, // llvm.hexagon.M2.vdmpys.s0
+    1, // llvm.hexagon.M2.vdmpys.s1
+    1, // llvm.hexagon.M2.vmac2
+    1, // llvm.hexagon.M2.vmac2es
+    1, // llvm.hexagon.M2.vmac2es.s0
+    1, // llvm.hexagon.M2.vmac2es.s1
+    1, // llvm.hexagon.M2.vmac2s.s0
+    1, // llvm.hexagon.M2.vmac2s.s1
+    1, // llvm.hexagon.M2.vmac2su.s0
+    1, // llvm.hexagon.M2.vmac2su.s1
+    1, // llvm.hexagon.M2.vmpy2es.s0
+    1, // llvm.hexagon.M2.vmpy2es.s1
+    1, // llvm.hexagon.M2.vmpy2s.s0
+    1, // llvm.hexagon.M2.vmpy2s.s0pack
+    1, // llvm.hexagon.M2.vmpy2s.s1
+    1, // llvm.hexagon.M2.vmpy2s.s1pack
+    1, // llvm.hexagon.M2.vmpy2su.s0
+    1, // llvm.hexagon.M2.vmpy2su.s1
+    1, // llvm.hexagon.M2.vraddh
+    1, // llvm.hexagon.M2.vradduh
+    1, // llvm.hexagon.M2.vrcmaci.s0
+    1, // llvm.hexagon.M2.vrcmaci.s0c
+    1, // llvm.hexagon.M2.vrcmacr.s0
+    1, // llvm.hexagon.M2.vrcmacr.s0c
+    1, // llvm.hexagon.M2.vrcmpyi.s0
+    1, // llvm.hexagon.M2.vrcmpyi.s0c
+    1, // llvm.hexagon.M2.vrcmpyr.s0
+    1, // llvm.hexagon.M2.vrcmpyr.s0c
+    1, // llvm.hexagon.M2.vrcmpys.acc.s1
+    1, // llvm.hexagon.M2.vrcmpys.s1
+    1, // llvm.hexagon.M2.vrcmpys.s1rp
+    1, // llvm.hexagon.M2.vrmac.s0
+    1, // llvm.hexagon.M2.vrmpy.s0
+    1, // llvm.hexagon.M2.xor.xacc
+    1, // llvm.hexagon.M4.and.and
+    1, // llvm.hexagon.M4.and.andn
+    1, // llvm.hexagon.M4.and.or
+    1, // llvm.hexagon.M4.and.xor
+    1, // llvm.hexagon.M4.cmpyi.wh
+    1, // llvm.hexagon.M4.cmpyi.whc
+    1, // llvm.hexagon.M4.cmpyr.wh
+    1, // llvm.hexagon.M4.cmpyr.whc
+    1, // llvm.hexagon.M4.mac.up.s1.sat
+    1, // llvm.hexagon.M4.mpyri.addi
+    1, // llvm.hexagon.M4.mpyri.addr
+    1, // llvm.hexagon.M4.mpyri.addr.u2
+    1, // llvm.hexagon.M4.mpyrr.addi
+    1, // llvm.hexagon.M4.mpyrr.addr
+    1, // llvm.hexagon.M4.nac.up.s1.sat
+    1, // llvm.hexagon.M4.or.and
+    1, // llvm.hexagon.M4.or.andn
+    1, // llvm.hexagon.M4.or.or
+    1, // llvm.hexagon.M4.or.xor
+    1, // llvm.hexagon.M4.pmpyw
+    1, // llvm.hexagon.M4.pmpyw.acc
+    1, // llvm.hexagon.M4.vpmpyh
+    1, // llvm.hexagon.M4.vpmpyh.acc
+    1, // llvm.hexagon.M4.vrmpyeh.acc.s0
+    1, // llvm.hexagon.M4.vrmpyeh.acc.s1
+    1, // llvm.hexagon.M4.vrmpyeh.s0
+    1, // llvm.hexagon.M4.vrmpyeh.s1
+    1, // llvm.hexagon.M4.vrmpyoh.acc.s0
+    1, // llvm.hexagon.M4.vrmpyoh.acc.s1
+    1, // llvm.hexagon.M4.vrmpyoh.s0
+    1, // llvm.hexagon.M4.vrmpyoh.s1
+    1, // llvm.hexagon.M4.xor.and
+    1, // llvm.hexagon.M4.xor.andn
+    1, // llvm.hexagon.M4.xor.or
+    1, // llvm.hexagon.M4.xor.xacc
+    1, // llvm.hexagon.M5.vdmacbsu
+    1, // llvm.hexagon.M5.vdmpybsu
+    1, // llvm.hexagon.M5.vmacbsu
+    1, // llvm.hexagon.M5.vmacbuu
+    1, // llvm.hexagon.M5.vmpybsu
+    1, // llvm.hexagon.M5.vmpybuu
+    1, // llvm.hexagon.M5.vrmacbsu
+    1, // llvm.hexagon.M5.vrmacbuu
+    1, // llvm.hexagon.M5.vrmpybsu
+    1, // llvm.hexagon.M5.vrmpybuu
+    1, // llvm.hexagon.M6.vabsdiffb
+    1, // llvm.hexagon.M6.vabsdiffub
+    1, // llvm.hexagon.S2.addasl.rrri
+    1, // llvm.hexagon.S2.asl.i.p
+    1, // llvm.hexagon.S2.asl.i.p.acc
+    1, // llvm.hexagon.S2.asl.i.p.and
+    1, // llvm.hexagon.S2.asl.i.p.nac
+    1, // llvm.hexagon.S2.asl.i.p.or
+    1, // llvm.hexagon.S2.asl.i.p.xacc
+    1, // llvm.hexagon.S2.asl.i.r
+    1, // llvm.hexagon.S2.asl.i.r.acc
+    1, // llvm.hexagon.S2.asl.i.r.and
+    1, // llvm.hexagon.S2.asl.i.r.nac
+    1, // llvm.hexagon.S2.asl.i.r.or
+    1, // llvm.hexagon.S2.asl.i.r.sat
+    1, // llvm.hexagon.S2.asl.i.r.xacc
+    1, // llvm.hexagon.S2.asl.i.vh
+    1, // llvm.hexagon.S2.asl.i.vw
+    1, // llvm.hexagon.S2.asl.r.p
+    1, // llvm.hexagon.S2.asl.r.p.acc
+    1, // llvm.hexagon.S2.asl.r.p.and
+    1, // llvm.hexagon.S2.asl.r.p.nac
+    1, // llvm.hexagon.S2.asl.r.p.or
+    1, // llvm.hexagon.S2.asl.r.p.xor
+    1, // llvm.hexagon.S2.asl.r.r
+    1, // llvm.hexagon.S2.asl.r.r.acc
+    1, // llvm.hexagon.S2.asl.r.r.and
+    1, // llvm.hexagon.S2.asl.r.r.nac
+    1, // llvm.hexagon.S2.asl.r.r.or
+    1, // llvm.hexagon.S2.asl.r.r.sat
+    1, // llvm.hexagon.S2.asl.r.vh
+    1, // llvm.hexagon.S2.asl.r.vw
+    1, // llvm.hexagon.S2.asr.i.p
+    1, // llvm.hexagon.S2.asr.i.p.acc
+    1, // llvm.hexagon.S2.asr.i.p.and
+    1, // llvm.hexagon.S2.asr.i.p.nac
+    1, // llvm.hexagon.S2.asr.i.p.or
+    1, // llvm.hexagon.S2.asr.i.p.rnd
+    1, // llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
+    1, // llvm.hexagon.S2.asr.i.r
+    1, // llvm.hexagon.S2.asr.i.r.acc
+    1, // llvm.hexagon.S2.asr.i.r.and
+    1, // llvm.hexagon.S2.asr.i.r.nac
+    1, // llvm.hexagon.S2.asr.i.r.or
+    1, // llvm.hexagon.S2.asr.i.r.rnd
+    1, // llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
+    1, // llvm.hexagon.S2.asr.i.svw.trun
+    1, // llvm.hexagon.S2.asr.i.vh
+    1, // llvm.hexagon.S2.asr.i.vw
+    1, // llvm.hexagon.S2.asr.r.p
+    1, // llvm.hexagon.S2.asr.r.p.acc
+    1, // llvm.hexagon.S2.asr.r.p.and
+    1, // llvm.hexagon.S2.asr.r.p.nac
+    1, // llvm.hexagon.S2.asr.r.p.or
+    1, // llvm.hexagon.S2.asr.r.p.xor
+    1, // llvm.hexagon.S2.asr.r.r
+    1, // llvm.hexagon.S2.asr.r.r.acc
+    1, // llvm.hexagon.S2.asr.r.r.and
+    1, // llvm.hexagon.S2.asr.r.r.nac
+    1, // llvm.hexagon.S2.asr.r.r.or
+    1, // llvm.hexagon.S2.asr.r.r.sat
+    1, // llvm.hexagon.S2.asr.r.svw.trun
+    1, // llvm.hexagon.S2.asr.r.vh
+    1, // llvm.hexagon.S2.asr.r.vw
+    1, // llvm.hexagon.S2.brev
+    1, // llvm.hexagon.S2.brevp
+    1, // llvm.hexagon.S2.cabacencbin
+    1, // llvm.hexagon.S2.cl0
+    1, // llvm.hexagon.S2.cl0p
+    1, // llvm.hexagon.S2.cl1
+    1, // llvm.hexagon.S2.cl1p
+    1, // llvm.hexagon.S2.clb
+    1, // llvm.hexagon.S2.clbnorm
+    1, // llvm.hexagon.S2.clbp
+    1, // llvm.hexagon.S2.clrbit.i
+    1, // llvm.hexagon.S2.clrbit.r
+    1, // llvm.hexagon.S2.ct0
+    1, // llvm.hexagon.S2.ct0p
+    1, // llvm.hexagon.S2.ct1
+    1, // llvm.hexagon.S2.ct1p
+    1, // llvm.hexagon.S2.deinterleave
+    1, // llvm.hexagon.S2.extractu
+    1, // llvm.hexagon.S2.extractu.rp
+    1, // llvm.hexagon.S2.extractup
+    1, // llvm.hexagon.S2.extractup.rp
+    1, // llvm.hexagon.S2.insert
+    1, // llvm.hexagon.S2.insert.rp
+    1, // llvm.hexagon.S2.insertp
+    1, // llvm.hexagon.S2.insertp.rp
+    1, // llvm.hexagon.S2.interleave
+    1, // llvm.hexagon.S2.lfsp
+    1, // llvm.hexagon.S2.lsl.r.p
+    1, // llvm.hexagon.S2.lsl.r.p.acc
+    1, // llvm.hexagon.S2.lsl.r.p.and
+    1, // llvm.hexagon.S2.lsl.r.p.nac
+    1, // llvm.hexagon.S2.lsl.r.p.or
+    1, // llvm.hexagon.S2.lsl.r.p.xor
+    1, // llvm.hexagon.S2.lsl.r.r
+    1, // llvm.hexagon.S2.lsl.r.r.acc
+    1, // llvm.hexagon.S2.lsl.r.r.and
+    1, // llvm.hexagon.S2.lsl.r.r.nac
+    1, // llvm.hexagon.S2.lsl.r.r.or
+    1, // llvm.hexagon.S2.lsl.r.vh
+    1, // llvm.hexagon.S2.lsl.r.vw
+    1, // llvm.hexagon.S2.lsr.i.p
+    1, // llvm.hexagon.S2.lsr.i.p.acc
+    1, // llvm.hexagon.S2.lsr.i.p.and
+    1, // llvm.hexagon.S2.lsr.i.p.nac
+    1, // llvm.hexagon.S2.lsr.i.p.or
+    1, // llvm.hexagon.S2.lsr.i.p.xacc
+    1, // llvm.hexagon.S2.lsr.i.r
+    1, // llvm.hexagon.S2.lsr.i.r.acc
+    1, // llvm.hexagon.S2.lsr.i.r.and
+    1, // llvm.hexagon.S2.lsr.i.r.nac
+    1, // llvm.hexagon.S2.lsr.i.r.or
+    1, // llvm.hexagon.S2.lsr.i.r.xacc
+    1, // llvm.hexagon.S2.lsr.i.vh
+    1, // llvm.hexagon.S2.lsr.i.vw
+    1, // llvm.hexagon.S2.lsr.r.p
+    1, // llvm.hexagon.S2.lsr.r.p.acc
+    1, // llvm.hexagon.S2.lsr.r.p.and
+    1, // llvm.hexagon.S2.lsr.r.p.nac
+    1, // llvm.hexagon.S2.lsr.r.p.or
+    1, // llvm.hexagon.S2.lsr.r.p.xor
+    1, // llvm.hexagon.S2.lsr.r.r
+    1, // llvm.hexagon.S2.lsr.r.r.acc
+    1, // llvm.hexagon.S2.lsr.r.r.and
+    1, // llvm.hexagon.S2.lsr.r.r.nac
+    1, // llvm.hexagon.S2.lsr.r.r.or
+    1, // llvm.hexagon.S2.lsr.r.vh
+    1, // llvm.hexagon.S2.lsr.r.vw
+    1, // llvm.hexagon.S2.packhl
+    1, // llvm.hexagon.S2.parityp
+    1, // llvm.hexagon.S2.setbit.i
+    1, // llvm.hexagon.S2.setbit.r
+    1, // llvm.hexagon.S2.shuffeb
+    1, // llvm.hexagon.S2.shuffeh
+    1, // llvm.hexagon.S2.shuffob
+    1, // llvm.hexagon.S2.shuffoh
+    32, // llvm.hexagon.S2.storerb.pbr
+    29, // llvm.hexagon.S2.storerb.pci
+    28, // llvm.hexagon.S2.storerb.pcr
+    32, // llvm.hexagon.S2.storerd.pbr
+    29, // llvm.hexagon.S2.storerd.pci
+    28, // llvm.hexagon.S2.storerd.pcr
+    32, // llvm.hexagon.S2.storerf.pbr
+    29, // llvm.hexagon.S2.storerf.pci
+    28, // llvm.hexagon.S2.storerf.pcr
+    32, // llvm.hexagon.S2.storerh.pbr
+    29, // llvm.hexagon.S2.storerh.pci
+    28, // llvm.hexagon.S2.storerh.pcr
+    32, // llvm.hexagon.S2.storeri.pbr
+    29, // llvm.hexagon.S2.storeri.pci
+    28, // llvm.hexagon.S2.storeri.pcr
+    18, // llvm.hexagon.S2.storew.locked
+    1, // llvm.hexagon.S2.svsathb
+    1, // llvm.hexagon.S2.svsathub
+    1, // llvm.hexagon.S2.tableidxb.goodsyntax
+    1, // llvm.hexagon.S2.tableidxd.goodsyntax
+    1, // llvm.hexagon.S2.tableidxh.goodsyntax
+    1, // llvm.hexagon.S2.tableidxw.goodsyntax
+    1, // llvm.hexagon.S2.togglebit.i
+    1, // llvm.hexagon.S2.togglebit.r
+    1, // llvm.hexagon.S2.tstbit.i
+    1, // llvm.hexagon.S2.tstbit.r
+    1, // llvm.hexagon.S2.valignib
+    1, // llvm.hexagon.S2.valignrb
+    1, // llvm.hexagon.S2.vcnegh
+    1, // llvm.hexagon.S2.vcrotate
+    1, // llvm.hexagon.S2.vrcnegh
+    1, // llvm.hexagon.S2.vrndpackwh
+    1, // llvm.hexagon.S2.vrndpackwhs
+    1, // llvm.hexagon.S2.vsathb
+    1, // llvm.hexagon.S2.vsathb.nopack
+    1, // llvm.hexagon.S2.vsathub
+    1, // llvm.hexagon.S2.vsathub.nopack
+    1, // llvm.hexagon.S2.vsatwh
+    1, // llvm.hexagon.S2.vsatwh.nopack
+    1, // llvm.hexagon.S2.vsatwuh
+    1, // llvm.hexagon.S2.vsatwuh.nopack
+    1, // llvm.hexagon.S2.vsplatrb
+    1, // llvm.hexagon.S2.vsplatrh
+    1, // llvm.hexagon.S2.vspliceib
+    1, // llvm.hexagon.S2.vsplicerb
+    1, // llvm.hexagon.S2.vsxtbh
+    1, // llvm.hexagon.S2.vsxthw
+    1, // llvm.hexagon.S2.vtrunehb
+    1, // llvm.hexagon.S2.vtrunewh
+    1, // llvm.hexagon.S2.vtrunohb
+    1, // llvm.hexagon.S2.vtrunowh
+    1, // llvm.hexagon.S2.vzxtbh
+    1, // llvm.hexagon.S2.vzxthw
+    1, // llvm.hexagon.S4.addaddi
+    1, // llvm.hexagon.S4.addi.asl.ri
+    1, // llvm.hexagon.S4.addi.lsr.ri
+    1, // llvm.hexagon.S4.andi.asl.ri
+    1, // llvm.hexagon.S4.andi.lsr.ri
+    1, // llvm.hexagon.S4.clbaddi
+    1, // llvm.hexagon.S4.clbpaddi
+    1, // llvm.hexagon.S4.clbpnorm
+    1, // llvm.hexagon.S4.extract
+    1, // llvm.hexagon.S4.extract.rp
+    1, // llvm.hexagon.S4.extractp
+    1, // llvm.hexagon.S4.extractp.rp
+    1, // llvm.hexagon.S4.lsli
+    1, // llvm.hexagon.S4.ntstbit.i
+    1, // llvm.hexagon.S4.ntstbit.r
+    1, // llvm.hexagon.S4.or.andi
+    1, // llvm.hexagon.S4.or.andix
+    1, // llvm.hexagon.S4.or.ori
+    1, // llvm.hexagon.S4.ori.asl.ri
+    1, // llvm.hexagon.S4.ori.lsr.ri
+    1, // llvm.hexagon.S4.parity
+    18, // llvm.hexagon.S4.stored.locked
+    1, // llvm.hexagon.S4.subaddi
+    1, // llvm.hexagon.S4.subi.asl.ri
+    1, // llvm.hexagon.S4.subi.lsr.ri
+    1, // llvm.hexagon.S4.vrcrotate
+    1, // llvm.hexagon.S4.vrcrotate.acc
+    1, // llvm.hexagon.S4.vxaddsubh
+    1, // llvm.hexagon.S4.vxaddsubhr
+    1, // llvm.hexagon.S4.vxaddsubw
+    1, // llvm.hexagon.S4.vxsubaddh
+    1, // llvm.hexagon.S4.vxsubaddhr
+    1, // llvm.hexagon.S4.vxsubaddw
+    1, // llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
+    1, // llvm.hexagon.S5.asrhub.sat
+    1, // llvm.hexagon.S5.popcountp
+    1, // llvm.hexagon.S5.vasrhrnd.goodsyntax
+    1, // llvm.hexagon.S6.rol.i.p
+    1, // llvm.hexagon.S6.rol.i.p.acc
+    1, // llvm.hexagon.S6.rol.i.p.and
+    1, // llvm.hexagon.S6.rol.i.p.nac
+    1, // llvm.hexagon.S6.rol.i.p.or
+    1, // llvm.hexagon.S6.rol.i.p.xacc
+    1, // llvm.hexagon.S6.rol.i.r
+    1, // llvm.hexagon.S6.rol.i.r.acc
+    1, // llvm.hexagon.S6.rol.i.r.and
+    1, // llvm.hexagon.S6.rol.i.r.nac
+    1, // llvm.hexagon.S6.rol.i.r.or
+    1, // llvm.hexagon.S6.rol.i.r.xacc
+    1, // llvm.hexagon.S6.vsplatrbp
+    1, // llvm.hexagon.S6.vtrunehb.ppp
+    1, // llvm.hexagon.S6.vtrunohb.ppp
+    1, // llvm.hexagon.V6.extractw
+    1, // llvm.hexagon.V6.extractw.128B
+    1, // llvm.hexagon.V6.hi
+    1, // llvm.hexagon.V6.hi.128B
+    1, // llvm.hexagon.V6.lo
+    1, // llvm.hexagon.V6.lo.128B
+    1, // llvm.hexagon.V6.lvsplatb
+    1, // llvm.hexagon.V6.lvsplatb.128B
+    1, // llvm.hexagon.V6.lvsplath
+    1, // llvm.hexagon.V6.lvsplath.128B
+    1, // llvm.hexagon.V6.lvsplatw
+    1, // llvm.hexagon.V6.lvsplatw.128B
+    1, // llvm.hexagon.V6.pred.and
+    1, // llvm.hexagon.V6.pred.and.128B
+    1, // llvm.hexagon.V6.pred.and.n
+    1, // llvm.hexagon.V6.pred.and.n.128B
+    1, // llvm.hexagon.V6.pred.not
+    1, // llvm.hexagon.V6.pred.not.128B
+    1, // llvm.hexagon.V6.pred.or
+    1, // llvm.hexagon.V6.pred.or.128B
+    1, // llvm.hexagon.V6.pred.or.n
+    1, // llvm.hexagon.V6.pred.or.n.128B
+    1, // llvm.hexagon.V6.pred.scalar2
+    1, // llvm.hexagon.V6.pred.scalar2.128B
+    1, // llvm.hexagon.V6.pred.scalar2v2
+    1, // llvm.hexagon.V6.pred.scalar2v2.128B
+    1, // llvm.hexagon.V6.pred.xor
+    1, // llvm.hexagon.V6.pred.xor.128B
+    1, // llvm.hexagon.V6.shuffeqh
+    1, // llvm.hexagon.V6.shuffeqh.128B
+    1, // llvm.hexagon.V6.shuffeqw
+    1, // llvm.hexagon.V6.shuffeqw.128B
+    21, // llvm.hexagon.V6.vS32b.nqpred.ai
+    21, // llvm.hexagon.V6.vS32b.nqpred.ai.128B
+    21, // llvm.hexagon.V6.vS32b.nt.nqpred.ai
+    21, // llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B
+    21, // llvm.hexagon.V6.vS32b.nt.qpred.ai
+    21, // llvm.hexagon.V6.vS32b.nt.qpred.ai.128B
+    21, // llvm.hexagon.V6.vS32b.qpred.ai
+    21, // llvm.hexagon.V6.vS32b.qpred.ai.128B
+    1, // llvm.hexagon.V6.vabsb
+    1, // llvm.hexagon.V6.vabsb.128B
+    1, // llvm.hexagon.V6.vabsb.sat
+    1, // llvm.hexagon.V6.vabsb.sat.128B
+    1, // llvm.hexagon.V6.vabsdiffh
+    1, // llvm.hexagon.V6.vabsdiffh.128B
+    1, // llvm.hexagon.V6.vabsdiffub
+    1, // llvm.hexagon.V6.vabsdiffub.128B
+    1, // llvm.hexagon.V6.vabsdiffuh
+    1, // llvm.hexagon.V6.vabsdiffuh.128B
+    1, // llvm.hexagon.V6.vabsdiffw
+    1, // llvm.hexagon.V6.vabsdiffw.128B
+    1, // llvm.hexagon.V6.vabsh
+    1, // llvm.hexagon.V6.vabsh.128B
+    1, // llvm.hexagon.V6.vabsh.sat
+    1, // llvm.hexagon.V6.vabsh.sat.128B
+    1, // llvm.hexagon.V6.vabsw
+    1, // llvm.hexagon.V6.vabsw.128B
+    1, // llvm.hexagon.V6.vabsw.sat
+    1, // llvm.hexagon.V6.vabsw.sat.128B
+    1, // llvm.hexagon.V6.vaddb
+    1, // llvm.hexagon.V6.vaddb.128B
+    1, // llvm.hexagon.V6.vaddb.dv
+    1, // llvm.hexagon.V6.vaddb.dv.128B
+    1, // llvm.hexagon.V6.vaddbnq
+    1, // llvm.hexagon.V6.vaddbnq.128B
+    1, // llvm.hexagon.V6.vaddbq
+    1, // llvm.hexagon.V6.vaddbq.128B
+    1, // llvm.hexagon.V6.vaddbsat
+    1, // llvm.hexagon.V6.vaddbsat.128B
+    1, // llvm.hexagon.V6.vaddbsat.dv
+    1, // llvm.hexagon.V6.vaddbsat.dv.128B
+    1, // llvm.hexagon.V6.vaddcarry
+    1, // llvm.hexagon.V6.vaddcarry.128B
+    1, // llvm.hexagon.V6.vaddclbh
+    1, // llvm.hexagon.V6.vaddclbh.128B
+    1, // llvm.hexagon.V6.vaddclbw
+    1, // llvm.hexagon.V6.vaddclbw.128B
+    1, // llvm.hexagon.V6.vaddh
+    1, // llvm.hexagon.V6.vaddh.128B
+    1, // llvm.hexagon.V6.vaddh.dv
+    1, // llvm.hexagon.V6.vaddh.dv.128B
+    1, // llvm.hexagon.V6.vaddhnq
+    1, // llvm.hexagon.V6.vaddhnq.128B
+    1, // llvm.hexagon.V6.vaddhq
+    1, // llvm.hexagon.V6.vaddhq.128B
+    1, // llvm.hexagon.V6.vaddhsat
+    1, // llvm.hexagon.V6.vaddhsat.128B
+    1, // llvm.hexagon.V6.vaddhsat.dv
+    1, // llvm.hexagon.V6.vaddhsat.dv.128B
+    1, // llvm.hexagon.V6.vaddhw
+    1, // llvm.hexagon.V6.vaddhw.128B
+    1, // llvm.hexagon.V6.vaddhw.acc
+    1, // llvm.hexagon.V6.vaddhw.acc.128B
+    1, // llvm.hexagon.V6.vaddubh
+    1, // llvm.hexagon.V6.vaddubh.128B
+    1, // llvm.hexagon.V6.vaddubh.acc
+    1, // llvm.hexagon.V6.vaddubh.acc.128B
+    1, // llvm.hexagon.V6.vaddubsat
+    1, // llvm.hexagon.V6.vaddubsat.128B
+    1, // llvm.hexagon.V6.vaddubsat.dv
+    1, // llvm.hexagon.V6.vaddubsat.dv.128B
+    1, // llvm.hexagon.V6.vaddububb.sat
+    1, // llvm.hexagon.V6.vaddububb.sat.128B
+    1, // llvm.hexagon.V6.vadduhsat
+    1, // llvm.hexagon.V6.vadduhsat.128B
+    1, // llvm.hexagon.V6.vadduhsat.dv
+    1, // llvm.hexagon.V6.vadduhsat.dv.128B
+    1, // llvm.hexagon.V6.vadduhw
+    1, // llvm.hexagon.V6.vadduhw.128B
+    1, // llvm.hexagon.V6.vadduhw.acc
+    1, // llvm.hexagon.V6.vadduhw.acc.128B
+    1, // llvm.hexagon.V6.vadduwsat
+    1, // llvm.hexagon.V6.vadduwsat.128B
+    1, // llvm.hexagon.V6.vadduwsat.dv
+    1, // llvm.hexagon.V6.vadduwsat.dv.128B
+    1, // llvm.hexagon.V6.vaddw
+    1, // llvm.hexagon.V6.vaddw.128B
+    1, // llvm.hexagon.V6.vaddw.dv
+    1, // llvm.hexagon.V6.vaddw.dv.128B
+    1, // llvm.hexagon.V6.vaddwnq
+    1, // llvm.hexagon.V6.vaddwnq.128B
+    1, // llvm.hexagon.V6.vaddwq
+    1, // llvm.hexagon.V6.vaddwq.128B
+    1, // llvm.hexagon.V6.vaddwsat
+    1, // llvm.hexagon.V6.vaddwsat.128B
+    1, // llvm.hexagon.V6.vaddwsat.dv
+    1, // llvm.hexagon.V6.vaddwsat.dv.128B
+    1, // llvm.hexagon.V6.valignb
+    1, // llvm.hexagon.V6.valignb.128B
+    1, // llvm.hexagon.V6.valignbi
+    1, // llvm.hexagon.V6.valignbi.128B
+    1, // llvm.hexagon.V6.vand
+    1, // llvm.hexagon.V6.vand.128B
+    1, // llvm.hexagon.V6.vandnqrt
+    1, // llvm.hexagon.V6.vandnqrt.128B
+    1, // llvm.hexagon.V6.vandnqrt.acc
+    1, // llvm.hexagon.V6.vandnqrt.acc.128B
+    1, // llvm.hexagon.V6.vandqrt
+    1, // llvm.hexagon.V6.vandqrt.128B
+    1, // llvm.hexagon.V6.vandqrt.acc
+    1, // llvm.hexagon.V6.vandqrt.acc.128B
+    1, // llvm.hexagon.V6.vandvnqv
+    1, // llvm.hexagon.V6.vandvnqv.128B
+    1, // llvm.hexagon.V6.vandvqv
+    1, // llvm.hexagon.V6.vandvqv.128B
+    1, // llvm.hexagon.V6.vandvrt
+    1, // llvm.hexagon.V6.vandvrt.128B
+    1, // llvm.hexagon.V6.vandvrt.acc
+    1, // llvm.hexagon.V6.vandvrt.acc.128B
+    1, // llvm.hexagon.V6.vaslh
+    1, // llvm.hexagon.V6.vaslh.128B
+    1, // llvm.hexagon.V6.vaslh.acc
+    1, // llvm.hexagon.V6.vaslh.acc.128B
+    1, // llvm.hexagon.V6.vaslhv
+    1, // llvm.hexagon.V6.vaslhv.128B
+    1, // llvm.hexagon.V6.vaslw
+    1, // llvm.hexagon.V6.vaslw.128B
+    1, // llvm.hexagon.V6.vaslw.acc
+    1, // llvm.hexagon.V6.vaslw.acc.128B
+    1, // llvm.hexagon.V6.vaslwv
+    1, // llvm.hexagon.V6.vaslwv.128B
+    1, // llvm.hexagon.V6.vasrh
+    1, // llvm.hexagon.V6.vasrh.128B
+    1, // llvm.hexagon.V6.vasrh.acc
+    1, // llvm.hexagon.V6.vasrh.acc.128B
+    1, // llvm.hexagon.V6.vasrhbrndsat
+    1, // llvm.hexagon.V6.vasrhbrndsat.128B
+    1, // llvm.hexagon.V6.vasrhbsat
+    1, // llvm.hexagon.V6.vasrhbsat.128B
+    1, // llvm.hexagon.V6.vasrhubrndsat
+    1, // llvm.hexagon.V6.vasrhubrndsat.128B
+    1, // llvm.hexagon.V6.vasrhubsat
+    1, // llvm.hexagon.V6.vasrhubsat.128B
+    1, // llvm.hexagon.V6.vasrhv
+    1, // llvm.hexagon.V6.vasrhv.128B
+    1, // llvm.hexagon.V6.vasruhubrndsat
+    1, // llvm.hexagon.V6.vasruhubrndsat.128B
+    1, // llvm.hexagon.V6.vasruhubsat
+    1, // llvm.hexagon.V6.vasruhubsat.128B
+    1, // llvm.hexagon.V6.vasruwuhrndsat
+    1, // llvm.hexagon.V6.vasruwuhrndsat.128B
+    1, // llvm.hexagon.V6.vasruwuhsat
+    1, // llvm.hexagon.V6.vasruwuhsat.128B
+    1, // llvm.hexagon.V6.vasrw
+    1, // llvm.hexagon.V6.vasrw.128B
+    1, // llvm.hexagon.V6.vasrw.acc
+    1, // llvm.hexagon.V6.vasrw.acc.128B
+    1, // llvm.hexagon.V6.vasrwh
+    1, // llvm.hexagon.V6.vasrwh.128B
+    1, // llvm.hexagon.V6.vasrwhrndsat
+    1, // llvm.hexagon.V6.vasrwhrndsat.128B
+    1, // llvm.hexagon.V6.vasrwhsat
+    1, // llvm.hexagon.V6.vasrwhsat.128B
+    1, // llvm.hexagon.V6.vasrwuhrndsat
+    1, // llvm.hexagon.V6.vasrwuhrndsat.128B
+    1, // llvm.hexagon.V6.vasrwuhsat
+    1, // llvm.hexagon.V6.vasrwuhsat.128B
+    1, // llvm.hexagon.V6.vasrwv
+    1, // llvm.hexagon.V6.vasrwv.128B
+    1, // llvm.hexagon.V6.vassign
+    1, // llvm.hexagon.V6.vassign.128B
+    1, // llvm.hexagon.V6.vassignp
+    1, // llvm.hexagon.V6.vassignp.128B
+    1, // llvm.hexagon.V6.vavgb
+    1, // llvm.hexagon.V6.vavgb.128B
+    1, // llvm.hexagon.V6.vavgbrnd
+    1, // llvm.hexagon.V6.vavgbrnd.128B
+    1, // llvm.hexagon.V6.vavgh
+    1, // llvm.hexagon.V6.vavgh.128B
+    1, // llvm.hexagon.V6.vavghrnd
+    1, // llvm.hexagon.V6.vavghrnd.128B
+    1, // llvm.hexagon.V6.vavgub
+    1, // llvm.hexagon.V6.vavgub.128B
+    1, // llvm.hexagon.V6.vavgubrnd
+    1, // llvm.hexagon.V6.vavgubrnd.128B
+    1, // llvm.hexagon.V6.vavguh
+    1, // llvm.hexagon.V6.vavguh.128B
+    1, // llvm.hexagon.V6.vavguhrnd
+    1, // llvm.hexagon.V6.vavguhrnd.128B
+    1, // llvm.hexagon.V6.vavguw
+    1, // llvm.hexagon.V6.vavguw.128B
+    1, // llvm.hexagon.V6.vavguwrnd
+    1, // llvm.hexagon.V6.vavguwrnd.128B
+    1, // llvm.hexagon.V6.vavgw
+    1, // llvm.hexagon.V6.vavgw.128B
+    1, // llvm.hexagon.V6.vavgwrnd
+    1, // llvm.hexagon.V6.vavgwrnd.128B
+    1, // llvm.hexagon.V6.vcl0h
+    1, // llvm.hexagon.V6.vcl0h.128B
+    1, // llvm.hexagon.V6.vcl0w
+    1, // llvm.hexagon.V6.vcl0w.128B
+    1, // llvm.hexagon.V6.vcombine
+    1, // llvm.hexagon.V6.vcombine.128B
+    1, // llvm.hexagon.V6.vd0
+    1, // llvm.hexagon.V6.vd0.128B
+    1, // llvm.hexagon.V6.vdd0
+    1, // llvm.hexagon.V6.vdd0.128B
+    1, // llvm.hexagon.V6.vdealb
+    1, // llvm.hexagon.V6.vdealb.128B
+    1, // llvm.hexagon.V6.vdealb4w
+    1, // llvm.hexagon.V6.vdealb4w.128B
+    1, // llvm.hexagon.V6.vdealh
+    1, // llvm.hexagon.V6.vdealh.128B
+    1, // llvm.hexagon.V6.vdealvdd
+    1, // llvm.hexagon.V6.vdealvdd.128B
+    1, // llvm.hexagon.V6.vdelta
+    1, // llvm.hexagon.V6.vdelta.128B
+    1, // llvm.hexagon.V6.vdmpybus
+    1, // llvm.hexagon.V6.vdmpybus.128B
+    1, // llvm.hexagon.V6.vdmpybus.acc
+    1, // llvm.hexagon.V6.vdmpybus.acc.128B
+    1, // llvm.hexagon.V6.vdmpybus.dv
+    1, // llvm.hexagon.V6.vdmpybus.dv.128B
+    1, // llvm.hexagon.V6.vdmpybus.dv.acc
+    1, // llvm.hexagon.V6.vdmpybus.dv.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhb
+    1, // llvm.hexagon.V6.vdmpyhb.128B
+    1, // llvm.hexagon.V6.vdmpyhb.acc
+    1, // llvm.hexagon.V6.vdmpyhb.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhb.dv
+    1, // llvm.hexagon.V6.vdmpyhb.dv.128B
+    1, // llvm.hexagon.V6.vdmpyhb.dv.acc
+    1, // llvm.hexagon.V6.vdmpyhb.dv.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhisat
+    1, // llvm.hexagon.V6.vdmpyhisat.128B
+    1, // llvm.hexagon.V6.vdmpyhisat.acc
+    1, // llvm.hexagon.V6.vdmpyhisat.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhsat
+    1, // llvm.hexagon.V6.vdmpyhsat.128B
+    1, // llvm.hexagon.V6.vdmpyhsat.acc
+    1, // llvm.hexagon.V6.vdmpyhsat.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhsuisat
+    1, // llvm.hexagon.V6.vdmpyhsuisat.128B
+    1, // llvm.hexagon.V6.vdmpyhsuisat.acc
+    1, // llvm.hexagon.V6.vdmpyhsuisat.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhsusat
+    1, // llvm.hexagon.V6.vdmpyhsusat.128B
+    1, // llvm.hexagon.V6.vdmpyhsusat.acc
+    1, // llvm.hexagon.V6.vdmpyhsusat.acc.128B
+    1, // llvm.hexagon.V6.vdmpyhvsat
+    1, // llvm.hexagon.V6.vdmpyhvsat.128B
+    1, // llvm.hexagon.V6.vdmpyhvsat.acc
+    1, // llvm.hexagon.V6.vdmpyhvsat.acc.128B
+    1, // llvm.hexagon.V6.vdsaduh
+    1, // llvm.hexagon.V6.vdsaduh.128B
+    1, // llvm.hexagon.V6.vdsaduh.acc
+    1, // llvm.hexagon.V6.vdsaduh.acc.128B
+    1, // llvm.hexagon.V6.veqb
+    1, // llvm.hexagon.V6.veqb.128B
+    1, // llvm.hexagon.V6.veqb.and
+    1, // llvm.hexagon.V6.veqb.and.128B
+    1, // llvm.hexagon.V6.veqb.or
+    1, // llvm.hexagon.V6.veqb.or.128B
+    1, // llvm.hexagon.V6.veqb.xor
+    1, // llvm.hexagon.V6.veqb.xor.128B
+    1, // llvm.hexagon.V6.veqh
+    1, // llvm.hexagon.V6.veqh.128B
+    1, // llvm.hexagon.V6.veqh.and
+    1, // llvm.hexagon.V6.veqh.and.128B
+    1, // llvm.hexagon.V6.veqh.or
+    1, // llvm.hexagon.V6.veqh.or.128B
+    1, // llvm.hexagon.V6.veqh.xor
+    1, // llvm.hexagon.V6.veqh.xor.128B
+    1, // llvm.hexagon.V6.veqw
+    1, // llvm.hexagon.V6.veqw.128B
+    1, // llvm.hexagon.V6.veqw.and
+    1, // llvm.hexagon.V6.veqw.and.128B
+    1, // llvm.hexagon.V6.veqw.or
+    1, // llvm.hexagon.V6.veqw.or.128B
+    1, // llvm.hexagon.V6.veqw.xor
+    1, // llvm.hexagon.V6.veqw.xor.128B
+    21, // llvm.hexagon.V6.vgathermh
+    21, // llvm.hexagon.V6.vgathermh.128B
+    21, // llvm.hexagon.V6.vgathermhq
+    21, // llvm.hexagon.V6.vgathermhq.128B
+    21, // llvm.hexagon.V6.vgathermhw
+    21, // llvm.hexagon.V6.vgathermhw.128B
+    21, // llvm.hexagon.V6.vgathermhwq
+    21, // llvm.hexagon.V6.vgathermhwq.128B
+    21, // llvm.hexagon.V6.vgathermw
+    21, // llvm.hexagon.V6.vgathermw.128B
+    21, // llvm.hexagon.V6.vgathermwq
+    21, // llvm.hexagon.V6.vgathermwq.128B
+    1, // llvm.hexagon.V6.vgtb
+    1, // llvm.hexagon.V6.vgtb.128B
+    1, // llvm.hexagon.V6.vgtb.and
+    1, // llvm.hexagon.V6.vgtb.and.128B
+    1, // llvm.hexagon.V6.vgtb.or
+    1, // llvm.hexagon.V6.vgtb.or.128B
+    1, // llvm.hexagon.V6.vgtb.xor
+    1, // llvm.hexagon.V6.vgtb.xor.128B
+    1, // llvm.hexagon.V6.vgth
+    1, // llvm.hexagon.V6.vgth.128B
+    1, // llvm.hexagon.V6.vgth.and
+    1, // llvm.hexagon.V6.vgth.and.128B
+    1, // llvm.hexagon.V6.vgth.or
+    1, // llvm.hexagon.V6.vgth.or.128B
+    1, // llvm.hexagon.V6.vgth.xor
+    1, // llvm.hexagon.V6.vgth.xor.128B
+    1, // llvm.hexagon.V6.vgtub
+    1, // llvm.hexagon.V6.vgtub.128B
+    1, // llvm.hexagon.V6.vgtub.and
+    1, // llvm.hexagon.V6.vgtub.and.128B
+    1, // llvm.hexagon.V6.vgtub.or
+    1, // llvm.hexagon.V6.vgtub.or.128B
+    1, // llvm.hexagon.V6.vgtub.xor
+    1, // llvm.hexagon.V6.vgtub.xor.128B
+    1, // llvm.hexagon.V6.vgtuh
+    1, // llvm.hexagon.V6.vgtuh.128B
+    1, // llvm.hexagon.V6.vgtuh.and
+    1, // llvm.hexagon.V6.vgtuh.and.128B
+    1, // llvm.hexagon.V6.vgtuh.or
+    1, // llvm.hexagon.V6.vgtuh.or.128B
+    1, // llvm.hexagon.V6.vgtuh.xor
+    1, // llvm.hexagon.V6.vgtuh.xor.128B
+    1, // llvm.hexagon.V6.vgtuw
+    1, // llvm.hexagon.V6.vgtuw.128B
+    1, // llvm.hexagon.V6.vgtuw.and
+    1, // llvm.hexagon.V6.vgtuw.and.128B
+    1, // llvm.hexagon.V6.vgtuw.or
+    1, // llvm.hexagon.V6.vgtuw.or.128B
+    1, // llvm.hexagon.V6.vgtuw.xor
+    1, // llvm.hexagon.V6.vgtuw.xor.128B
+    1, // llvm.hexagon.V6.vgtw
+    1, // llvm.hexagon.V6.vgtw.128B
+    1, // llvm.hexagon.V6.vgtw.and
+    1, // llvm.hexagon.V6.vgtw.and.128B
+    1, // llvm.hexagon.V6.vgtw.or
+    1, // llvm.hexagon.V6.vgtw.or.128B
+    1, // llvm.hexagon.V6.vgtw.xor
+    1, // llvm.hexagon.V6.vgtw.xor.128B
+    1, // llvm.hexagon.V6.vinsertwr
+    1, // llvm.hexagon.V6.vinsertwr.128B
+    1, // llvm.hexagon.V6.vlalignb
+    1, // llvm.hexagon.V6.vlalignb.128B
+    1, // llvm.hexagon.V6.vlalignbi
+    1, // llvm.hexagon.V6.vlalignbi.128B
+    1, // llvm.hexagon.V6.vlsrb
+    1, // llvm.hexagon.V6.vlsrb.128B
+    1, // llvm.hexagon.V6.vlsrh
+    1, // llvm.hexagon.V6.vlsrh.128B
+    1, // llvm.hexagon.V6.vlsrhv
+    1, // llvm.hexagon.V6.vlsrhv.128B
+    1, // llvm.hexagon.V6.vlsrw
+    1, // llvm.hexagon.V6.vlsrw.128B
+    1, // llvm.hexagon.V6.vlsrwv
+    1, // llvm.hexagon.V6.vlsrwv.128B
+    1, // llvm.hexagon.V6.vlut4
+    1, // llvm.hexagon.V6.vlut4.128B
+    1, // llvm.hexagon.V6.vlutvvb
+    1, // llvm.hexagon.V6.vlutvvb.128B
+    1, // llvm.hexagon.V6.vlutvvb.nm
+    1, // llvm.hexagon.V6.vlutvvb.nm.128B
+    1, // llvm.hexagon.V6.vlutvvb.oracc
+    1, // llvm.hexagon.V6.vlutvvb.oracc.128B
+    1, // llvm.hexagon.V6.vlutvvb.oracci
+    1, // llvm.hexagon.V6.vlutvvb.oracci.128B
+    1, // llvm.hexagon.V6.vlutvvbi
+    1, // llvm.hexagon.V6.vlutvvbi.128B
+    1, // llvm.hexagon.V6.vlutvwh
+    1, // llvm.hexagon.V6.vlutvwh.128B
+    1, // llvm.hexagon.V6.vlutvwh.nm
+    1, // llvm.hexagon.V6.vlutvwh.nm.128B
+    1, // llvm.hexagon.V6.vlutvwh.oracc
+    1, // llvm.hexagon.V6.vlutvwh.oracc.128B
+    1, // llvm.hexagon.V6.vlutvwh.oracci
+    1, // llvm.hexagon.V6.vlutvwh.oracci.128B
+    1, // llvm.hexagon.V6.vlutvwhi
+    1, // llvm.hexagon.V6.vlutvwhi.128B
+    21, // llvm.hexagon.V6.vmaskedstorenq
+    21, // llvm.hexagon.V6.vmaskedstorenq.128B
+    21, // llvm.hexagon.V6.vmaskedstorentnq
+    21, // llvm.hexagon.V6.vmaskedstorentnq.128B
+    21, // llvm.hexagon.V6.vmaskedstorentq
+    21, // llvm.hexagon.V6.vmaskedstorentq.128B
+    21, // llvm.hexagon.V6.vmaskedstoreq
+    21, // llvm.hexagon.V6.vmaskedstoreq.128B
+    1, // llvm.hexagon.V6.vmaxb
+    1, // llvm.hexagon.V6.vmaxb.128B
+    1, // llvm.hexagon.V6.vmaxh
+    1, // llvm.hexagon.V6.vmaxh.128B
+    1, // llvm.hexagon.V6.vmaxub
+    1, // llvm.hexagon.V6.vmaxub.128B
+    1, // llvm.hexagon.V6.vmaxuh
+    1, // llvm.hexagon.V6.vmaxuh.128B
+    1, // llvm.hexagon.V6.vmaxw
+    1, // llvm.hexagon.V6.vmaxw.128B
+    1, // llvm.hexagon.V6.vminb
+    1, // llvm.hexagon.V6.vminb.128B
+    1, // llvm.hexagon.V6.vminh
+    1, // llvm.hexagon.V6.vminh.128B
+    1, // llvm.hexagon.V6.vminub
+    1, // llvm.hexagon.V6.vminub.128B
+    1, // llvm.hexagon.V6.vminuh
+    1, // llvm.hexagon.V6.vminuh.128B
+    1, // llvm.hexagon.V6.vminw
+    1, // llvm.hexagon.V6.vminw.128B
+    1, // llvm.hexagon.V6.vmpabus
+    1, // llvm.hexagon.V6.vmpabus.128B
+    1, // llvm.hexagon.V6.vmpabus.acc
+    1, // llvm.hexagon.V6.vmpabus.acc.128B
+    1, // llvm.hexagon.V6.vmpabusv
+    1, // llvm.hexagon.V6.vmpabusv.128B
+    1, // llvm.hexagon.V6.vmpabuu
+    1, // llvm.hexagon.V6.vmpabuu.128B
+    1, // llvm.hexagon.V6.vmpabuu.acc
+    1, // llvm.hexagon.V6.vmpabuu.acc.128B
+    1, // llvm.hexagon.V6.vmpabuuv
+    1, // llvm.hexagon.V6.vmpabuuv.128B
+    1, // llvm.hexagon.V6.vmpahb
+    1, // llvm.hexagon.V6.vmpahb.128B
+    1, // llvm.hexagon.V6.vmpahb.acc
+    1, // llvm.hexagon.V6.vmpahb.acc.128B
+    1, // llvm.hexagon.V6.vmpahhsat
+    1, // llvm.hexagon.V6.vmpahhsat.128B
+    1, // llvm.hexagon.V6.vmpauhb
+    1, // llvm.hexagon.V6.vmpauhb.128B
+    1, // llvm.hexagon.V6.vmpauhb.acc
+    1, // llvm.hexagon.V6.vmpauhb.acc.128B
+    1, // llvm.hexagon.V6.vmpauhuhsat
+    1, // llvm.hexagon.V6.vmpauhuhsat.128B
+    1, // llvm.hexagon.V6.vmpsuhuhsat
+    1, // llvm.hexagon.V6.vmpsuhuhsat.128B
+    1, // llvm.hexagon.V6.vmpybus
+    1, // llvm.hexagon.V6.vmpybus.128B
+    1, // llvm.hexagon.V6.vmpybus.acc
+    1, // llvm.hexagon.V6.vmpybus.acc.128B
+    1, // llvm.hexagon.V6.vmpybusv
+    1, // llvm.hexagon.V6.vmpybusv.128B
+    1, // llvm.hexagon.V6.vmpybusv.acc
+    1, // llvm.hexagon.V6.vmpybusv.acc.128B
+    1, // llvm.hexagon.V6.vmpybv
+    1, // llvm.hexagon.V6.vmpybv.128B
+    1, // llvm.hexagon.V6.vmpybv.acc
+    1, // llvm.hexagon.V6.vmpybv.acc.128B
+    1, // llvm.hexagon.V6.vmpyewuh
+    1, // llvm.hexagon.V6.vmpyewuh.128B
+    1, // llvm.hexagon.V6.vmpyewuh.64
+    1, // llvm.hexagon.V6.vmpyewuh.64.128B
+    1, // llvm.hexagon.V6.vmpyh
+    1, // llvm.hexagon.V6.vmpyh.128B
+    1, // llvm.hexagon.V6.vmpyh.acc
+    1, // llvm.hexagon.V6.vmpyh.acc.128B
+    1, // llvm.hexagon.V6.vmpyhsat.acc
+    1, // llvm.hexagon.V6.vmpyhsat.acc.128B
+    1, // llvm.hexagon.V6.vmpyhsrs
+    1, // llvm.hexagon.V6.vmpyhsrs.128B
+    1, // llvm.hexagon.V6.vmpyhss
+    1, // llvm.hexagon.V6.vmpyhss.128B
+    1, // llvm.hexagon.V6.vmpyhus
+    1, // llvm.hexagon.V6.vmpyhus.128B
+    1, // llvm.hexagon.V6.vmpyhus.acc
+    1, // llvm.hexagon.V6.vmpyhus.acc.128B
+    1, // llvm.hexagon.V6.vmpyhv
+    1, // llvm.hexagon.V6.vmpyhv.128B
+    1, // llvm.hexagon.V6.vmpyhv.acc
+    1, // llvm.hexagon.V6.vmpyhv.acc.128B
+    1, // llvm.hexagon.V6.vmpyhvsrs
+    1, // llvm.hexagon.V6.vmpyhvsrs.128B
+    1, // llvm.hexagon.V6.vmpyieoh
+    1, // llvm.hexagon.V6.vmpyieoh.128B
+    1, // llvm.hexagon.V6.vmpyiewh.acc
+    1, // llvm.hexagon.V6.vmpyiewh.acc.128B
+    1, // llvm.hexagon.V6.vmpyiewuh
+    1, // llvm.hexagon.V6.vmpyiewuh.128B
+    1, // llvm.hexagon.V6.vmpyiewuh.acc
+    1, // llvm.hexagon.V6.vmpyiewuh.acc.128B
+    1, // llvm.hexagon.V6.vmpyih
+    1, // llvm.hexagon.V6.vmpyih.128B
+    1, // llvm.hexagon.V6.vmpyih.acc
+    1, // llvm.hexagon.V6.vmpyih.acc.128B
+    1, // llvm.hexagon.V6.vmpyihb
+    1, // llvm.hexagon.V6.vmpyihb.128B
+    1, // llvm.hexagon.V6.vmpyihb.acc
+    1, // llvm.hexagon.V6.vmpyihb.acc.128B
+    1, // llvm.hexagon.V6.vmpyiowh
+    1, // llvm.hexagon.V6.vmpyiowh.128B
+    1, // llvm.hexagon.V6.vmpyiwb
+    1, // llvm.hexagon.V6.vmpyiwb.128B
+    1, // llvm.hexagon.V6.vmpyiwb.acc
+    1, // llvm.hexagon.V6.vmpyiwb.acc.128B
+    1, // llvm.hexagon.V6.vmpyiwh
+    1, // llvm.hexagon.V6.vmpyiwh.128B
+    1, // llvm.hexagon.V6.vmpyiwh.acc
+    1, // llvm.hexagon.V6.vmpyiwh.acc.128B
+    1, // llvm.hexagon.V6.vmpyiwub
+    1, // llvm.hexagon.V6.vmpyiwub.128B
+    1, // llvm.hexagon.V6.vmpyiwub.acc
+    1, // llvm.hexagon.V6.vmpyiwub.acc.128B
+    1, // llvm.hexagon.V6.vmpyowh
+    1, // llvm.hexagon.V6.vmpyowh.128B
+    1, // llvm.hexagon.V6.vmpyowh.64.acc
+    1, // llvm.hexagon.V6.vmpyowh.64.acc.128B
+    1, // llvm.hexagon.V6.vmpyowh.rnd
+    1, // llvm.hexagon.V6.vmpyowh.rnd.128B
+    1, // llvm.hexagon.V6.vmpyowh.rnd.sacc
+    1, // llvm.hexagon.V6.vmpyowh.rnd.sacc.128B
+    1, // llvm.hexagon.V6.vmpyowh.sacc
+    1, // llvm.hexagon.V6.vmpyowh.sacc.128B
+    1, // llvm.hexagon.V6.vmpyub
+    1, // llvm.hexagon.V6.vmpyub.128B
+    1, // llvm.hexagon.V6.vmpyub.acc
+    1, // llvm.hexagon.V6.vmpyub.acc.128B
+    1, // llvm.hexagon.V6.vmpyubv
+    1, // llvm.hexagon.V6.vmpyubv.128B
+    1, // llvm.hexagon.V6.vmpyubv.acc
+    1, // llvm.hexagon.V6.vmpyubv.acc.128B
+    1, // llvm.hexagon.V6.vmpyuh
+    1, // llvm.hexagon.V6.vmpyuh.128B
+    1, // llvm.hexagon.V6.vmpyuh.acc
+    1, // llvm.hexagon.V6.vmpyuh.acc.128B
+    1, // llvm.hexagon.V6.vmpyuhe
+    1, // llvm.hexagon.V6.vmpyuhe.128B
+    1, // llvm.hexagon.V6.vmpyuhe.acc
+    1, // llvm.hexagon.V6.vmpyuhe.acc.128B
+    1, // llvm.hexagon.V6.vmpyuhv
+    1, // llvm.hexagon.V6.vmpyuhv.128B
+    1, // llvm.hexagon.V6.vmpyuhv.acc
+    1, // llvm.hexagon.V6.vmpyuhv.acc.128B
+    1, // llvm.hexagon.V6.vmux
+    1, // llvm.hexagon.V6.vmux.128B
+    1, // llvm.hexagon.V6.vnavgb
+    1, // llvm.hexagon.V6.vnavgb.128B
+    1, // llvm.hexagon.V6.vnavgh
+    1, // llvm.hexagon.V6.vnavgh.128B
+    1, // llvm.hexagon.V6.vnavgub
+    1, // llvm.hexagon.V6.vnavgub.128B
+    1, // llvm.hexagon.V6.vnavgw
+    1, // llvm.hexagon.V6.vnavgw.128B
+    1, // llvm.hexagon.V6.vnormamth
+    1, // llvm.hexagon.V6.vnormamth.128B
+    1, // llvm.hexagon.V6.vnormamtw
+    1, // llvm.hexagon.V6.vnormamtw.128B
+    1, // llvm.hexagon.V6.vnot
+    1, // llvm.hexagon.V6.vnot.128B
+    1, // llvm.hexagon.V6.vor
+    1, // llvm.hexagon.V6.vor.128B
+    1, // llvm.hexagon.V6.vpackeb
+    1, // llvm.hexagon.V6.vpackeb.128B
+    1, // llvm.hexagon.V6.vpackeh
+    1, // llvm.hexagon.V6.vpackeh.128B
+    1, // llvm.hexagon.V6.vpackhb.sat
+    1, // llvm.hexagon.V6.vpackhb.sat.128B
+    1, // llvm.hexagon.V6.vpackhub.sat
+    1, // llvm.hexagon.V6.vpackhub.sat.128B
+    1, // llvm.hexagon.V6.vpackob
+    1, // llvm.hexagon.V6.vpackob.128B
+    1, // llvm.hexagon.V6.vpackoh
+    1, // llvm.hexagon.V6.vpackoh.128B
+    1, // llvm.hexagon.V6.vpackwh.sat
+    1, // llvm.hexagon.V6.vpackwh.sat.128B
+    1, // llvm.hexagon.V6.vpackwuh.sat
+    1, // llvm.hexagon.V6.vpackwuh.sat.128B
+    1, // llvm.hexagon.V6.vpopcounth
+    1, // llvm.hexagon.V6.vpopcounth.128B
+    1, // llvm.hexagon.V6.vprefixqb
+    1, // llvm.hexagon.V6.vprefixqb.128B
+    1, // llvm.hexagon.V6.vprefixqh
+    1, // llvm.hexagon.V6.vprefixqh.128B
+    1, // llvm.hexagon.V6.vprefixqw
+    1, // llvm.hexagon.V6.vprefixqw.128B
+    1, // llvm.hexagon.V6.vrdelta
+    1, // llvm.hexagon.V6.vrdelta.128B
+    1, // llvm.hexagon.V6.vrmpybub.rtt
+    1, // llvm.hexagon.V6.vrmpybub.rtt.128B
+    1, // llvm.hexagon.V6.vrmpybub.rtt.acc
+    1, // llvm.hexagon.V6.vrmpybub.rtt.acc.128B
+    1, // llvm.hexagon.V6.vrmpybus
+    1, // llvm.hexagon.V6.vrmpybus.128B
+    1, // llvm.hexagon.V6.vrmpybus.acc
+    1, // llvm.hexagon.V6.vrmpybus.acc.128B
+    1, // llvm.hexagon.V6.vrmpybusi
+    1, // llvm.hexagon.V6.vrmpybusi.128B
+    1, // llvm.hexagon.V6.vrmpybusi.acc
+    1, // llvm.hexagon.V6.vrmpybusi.acc.128B
+    1, // llvm.hexagon.V6.vrmpybusv
+    1, // llvm.hexagon.V6.vrmpybusv.128B
+    1, // llvm.hexagon.V6.vrmpybusv.acc
+    1, // llvm.hexagon.V6.vrmpybusv.acc.128B
+    1, // llvm.hexagon.V6.vrmpybv
+    1, // llvm.hexagon.V6.vrmpybv.128B
+    1, // llvm.hexagon.V6.vrmpybv.acc
+    1, // llvm.hexagon.V6.vrmpybv.acc.128B
+    1, // llvm.hexagon.V6.vrmpyub
+    1, // llvm.hexagon.V6.vrmpyub.128B
+    1, // llvm.hexagon.V6.vrmpyub.acc
+    1, // llvm.hexagon.V6.vrmpyub.acc.128B
+    1, // llvm.hexagon.V6.vrmpyub.rtt
+    1, // llvm.hexagon.V6.vrmpyub.rtt.128B
+    1, // llvm.hexagon.V6.vrmpyub.rtt.acc
+    1, // llvm.hexagon.V6.vrmpyub.rtt.acc.128B
+    1, // llvm.hexagon.V6.vrmpyubi
+    1, // llvm.hexagon.V6.vrmpyubi.128B
+    1, // llvm.hexagon.V6.vrmpyubi.acc
+    1, // llvm.hexagon.V6.vrmpyubi.acc.128B
+    1, // llvm.hexagon.V6.vrmpyubv
+    1, // llvm.hexagon.V6.vrmpyubv.128B
+    1, // llvm.hexagon.V6.vrmpyubv.acc
+    1, // llvm.hexagon.V6.vrmpyubv.acc.128B
+    1, // llvm.hexagon.V6.vror
+    1, // llvm.hexagon.V6.vror.128B
+    1, // llvm.hexagon.V6.vroundhb
+    1, // llvm.hexagon.V6.vroundhb.128B
+    1, // llvm.hexagon.V6.vroundhub
+    1, // llvm.hexagon.V6.vroundhub.128B
+    1, // llvm.hexagon.V6.vrounduhub
+    1, // llvm.hexagon.V6.vrounduhub.128B
+    1, // llvm.hexagon.V6.vrounduwuh
+    1, // llvm.hexagon.V6.vrounduwuh.128B
+    1, // llvm.hexagon.V6.vroundwh
+    1, // llvm.hexagon.V6.vroundwh.128B
+    1, // llvm.hexagon.V6.vroundwuh
+    1, // llvm.hexagon.V6.vroundwuh.128B
+    1, // llvm.hexagon.V6.vrsadubi
+    1, // llvm.hexagon.V6.vrsadubi.128B
+    1, // llvm.hexagon.V6.vrsadubi.acc
+    1, // llvm.hexagon.V6.vrsadubi.acc.128B
+    1, // llvm.hexagon.V6.vsathub
+    1, // llvm.hexagon.V6.vsathub.128B
+    1, // llvm.hexagon.V6.vsatuwuh
+    1, // llvm.hexagon.V6.vsatuwuh.128B
+    1, // llvm.hexagon.V6.vsatwh
+    1, // llvm.hexagon.V6.vsatwh.128B
+    1, // llvm.hexagon.V6.vsb
+    1, // llvm.hexagon.V6.vsb.128B
+    32, // llvm.hexagon.V6.vscattermh
+    32, // llvm.hexagon.V6.vscattermh.128B
+    32, // llvm.hexagon.V6.vscattermh.add
+    32, // llvm.hexagon.V6.vscattermh.add.128B
+    32, // llvm.hexagon.V6.vscattermhq
+    32, // llvm.hexagon.V6.vscattermhq.128B
+    32, // llvm.hexagon.V6.vscattermhw
+    32, // llvm.hexagon.V6.vscattermhw.128B
+    32, // llvm.hexagon.V6.vscattermhw.add
+    32, // llvm.hexagon.V6.vscattermhw.add.128B
+    32, // llvm.hexagon.V6.vscattermhwq
+    32, // llvm.hexagon.V6.vscattermhwq.128B
+    32, // llvm.hexagon.V6.vscattermw
+    32, // llvm.hexagon.V6.vscattermw.128B
+    32, // llvm.hexagon.V6.vscattermw.add
+    32, // llvm.hexagon.V6.vscattermw.add.128B
+    32, // llvm.hexagon.V6.vscattermwq
+    32, // llvm.hexagon.V6.vscattermwq.128B
+    1, // llvm.hexagon.V6.vsh
+    1, // llvm.hexagon.V6.vsh.128B
+    1, // llvm.hexagon.V6.vshufeh
+    1, // llvm.hexagon.V6.vshufeh.128B
+    1, // llvm.hexagon.V6.vshuffb
+    1, // llvm.hexagon.V6.vshuffb.128B
+    1, // llvm.hexagon.V6.vshuffeb
+    1, // llvm.hexagon.V6.vshuffeb.128B
+    1, // llvm.hexagon.V6.vshuffh
+    1, // llvm.hexagon.V6.vshuffh.128B
+    1, // llvm.hexagon.V6.vshuffob
+    1, // llvm.hexagon.V6.vshuffob.128B
+    1, // llvm.hexagon.V6.vshuffvdd
+    1, // llvm.hexagon.V6.vshuffvdd.128B
+    1, // llvm.hexagon.V6.vshufoeb
+    1, // llvm.hexagon.V6.vshufoeb.128B
+    1, // llvm.hexagon.V6.vshufoeh
+    1, // llvm.hexagon.V6.vshufoeh.128B
+    1, // llvm.hexagon.V6.vshufoh
+    1, // llvm.hexagon.V6.vshufoh.128B
+    1, // llvm.hexagon.V6.vsubb
+    1, // llvm.hexagon.V6.vsubb.128B
+    1, // llvm.hexagon.V6.vsubb.dv
+    1, // llvm.hexagon.V6.vsubb.dv.128B
+    1, // llvm.hexagon.V6.vsubbnq
+    1, // llvm.hexagon.V6.vsubbnq.128B
+    1, // llvm.hexagon.V6.vsubbq
+    1, // llvm.hexagon.V6.vsubbq.128B
+    1, // llvm.hexagon.V6.vsubbsat
+    1, // llvm.hexagon.V6.vsubbsat.128B
+    1, // llvm.hexagon.V6.vsubbsat.dv
+    1, // llvm.hexagon.V6.vsubbsat.dv.128B
+    1, // llvm.hexagon.V6.vsubcarry
+    1, // llvm.hexagon.V6.vsubcarry.128B
+    1, // llvm.hexagon.V6.vsubh
+    1, // llvm.hexagon.V6.vsubh.128B
+    1, // llvm.hexagon.V6.vsubh.dv
+    1, // llvm.hexagon.V6.vsubh.dv.128B
+    1, // llvm.hexagon.V6.vsubhnq
+    1, // llvm.hexagon.V6.vsubhnq.128B
+    1, // llvm.hexagon.V6.vsubhq
+    1, // llvm.hexagon.V6.vsubhq.128B
+    1, // llvm.hexagon.V6.vsubhsat
+    1, // llvm.hexagon.V6.vsubhsat.128B
+    1, // llvm.hexagon.V6.vsubhsat.dv
+    1, // llvm.hexagon.V6.vsubhsat.dv.128B
+    1, // llvm.hexagon.V6.vsubhw
+    1, // llvm.hexagon.V6.vsubhw.128B
+    1, // llvm.hexagon.V6.vsububh
+    1, // llvm.hexagon.V6.vsububh.128B
+    1, // llvm.hexagon.V6.vsububsat
+    1, // llvm.hexagon.V6.vsububsat.128B
+    1, // llvm.hexagon.V6.vsububsat.dv
+    1, // llvm.hexagon.V6.vsububsat.dv.128B
+    1, // llvm.hexagon.V6.vsubububb.sat
+    1, // llvm.hexagon.V6.vsubububb.sat.128B
+    1, // llvm.hexagon.V6.vsubuhsat
+    1, // llvm.hexagon.V6.vsubuhsat.128B
+    1, // llvm.hexagon.V6.vsubuhsat.dv
+    1, // llvm.hexagon.V6.vsubuhsat.dv.128B
+    1, // llvm.hexagon.V6.vsubuhw
+    1, // llvm.hexagon.V6.vsubuhw.128B
+    1, // llvm.hexagon.V6.vsubuwsat
+    1, // llvm.hexagon.V6.vsubuwsat.128B
+    1, // llvm.hexagon.V6.vsubuwsat.dv
+    1, // llvm.hexagon.V6.vsubuwsat.dv.128B
+    1, // llvm.hexagon.V6.vsubw
+    1, // llvm.hexagon.V6.vsubw.128B
+    1, // llvm.hexagon.V6.vsubw.dv
+    1, // llvm.hexagon.V6.vsubw.dv.128B
+    1, // llvm.hexagon.V6.vsubwnq
+    1, // llvm.hexagon.V6.vsubwnq.128B
+    1, // llvm.hexagon.V6.vsubwq
+    1, // llvm.hexagon.V6.vsubwq.128B
+    1, // llvm.hexagon.V6.vsubwsat
+    1, // llvm.hexagon.V6.vsubwsat.128B
+    1, // llvm.hexagon.V6.vsubwsat.dv
+    1, // llvm.hexagon.V6.vsubwsat.dv.128B
+    1, // llvm.hexagon.V6.vswap
+    1, // llvm.hexagon.V6.vswap.128B
+    1, // llvm.hexagon.V6.vtmpyb
+    1, // llvm.hexagon.V6.vtmpyb.128B
+    1, // llvm.hexagon.V6.vtmpyb.acc
+    1, // llvm.hexagon.V6.vtmpyb.acc.128B
+    1, // llvm.hexagon.V6.vtmpybus
+    1, // llvm.hexagon.V6.vtmpybus.128B
+    1, // llvm.hexagon.V6.vtmpybus.acc
+    1, // llvm.hexagon.V6.vtmpybus.acc.128B
+    1, // llvm.hexagon.V6.vtmpyhb
+    1, // llvm.hexagon.V6.vtmpyhb.128B
+    1, // llvm.hexagon.V6.vtmpyhb.acc
+    1, // llvm.hexagon.V6.vtmpyhb.acc.128B
+    1, // llvm.hexagon.V6.vunpackb
+    1, // llvm.hexagon.V6.vunpackb.128B
+    1, // llvm.hexagon.V6.vunpackh
+    1, // llvm.hexagon.V6.vunpackh.128B
+    1, // llvm.hexagon.V6.vunpackob
+    1, // llvm.hexagon.V6.vunpackob.128B
+    1, // llvm.hexagon.V6.vunpackoh
+    1, // llvm.hexagon.V6.vunpackoh.128B
+    1, // llvm.hexagon.V6.vunpackub
+    1, // llvm.hexagon.V6.vunpackub.128B
+    1, // llvm.hexagon.V6.vunpackuh
+    1, // llvm.hexagon.V6.vunpackuh.128B
+    1, // llvm.hexagon.V6.vxor
+    1, // llvm.hexagon.V6.vxor.128B
+    1, // llvm.hexagon.V6.vzb
+    1, // llvm.hexagon.V6.vzb.128B
+    1, // llvm.hexagon.V6.vzh
+    1, // llvm.hexagon.V6.vzh.128B
+    3, // llvm.hexagon.Y2.dccleana
+    3, // llvm.hexagon.Y2.dccleaninva
+    3, // llvm.hexagon.Y2.dcinva
+    36, // llvm.hexagon.Y2.dczeroa
+    3, // llvm.hexagon.Y4.l2fetch
+    3, // llvm.hexagon.Y5.l2fetch
+    21, // llvm.hexagon.circ.ldb
+    21, // llvm.hexagon.circ.ldd
+    21, // llvm.hexagon.circ.ldh
+    21, // llvm.hexagon.circ.ldub
+    21, // llvm.hexagon.circ.lduh
+    21, // llvm.hexagon.circ.ldw
+    32, // llvm.hexagon.circ.stb
+    32, // llvm.hexagon.circ.std
+    32, // llvm.hexagon.circ.sth
+    32, // llvm.hexagon.circ.sthhi
+    32, // llvm.hexagon.circ.stw
+    21, // llvm.hexagon.mm256i.vaddw
+    3, // llvm.hexagon.prefetch
+    3, // llvm.mips.absq.s.ph
+    3, // llvm.mips.absq.s.qb
+    3, // llvm.mips.absq.s.w
+    1, // llvm.mips.add.a.b
+    1, // llvm.mips.add.a.d
+    1, // llvm.mips.add.a.h
+    1, // llvm.mips.add.a.w
+    1, // llvm.mips.addq.ph
+    1, // llvm.mips.addq.s.ph
+    3, // llvm.mips.addq.s.w
+    1, // llvm.mips.addqh.ph
+    1, // llvm.mips.addqh.r.ph
+    1, // llvm.mips.addqh.r.w
+    1, // llvm.mips.addqh.w
+    1, // llvm.mips.adds.a.b
+    1, // llvm.mips.adds.a.d
+    1, // llvm.mips.adds.a.h
+    1, // llvm.mips.adds.a.w
+    1, // llvm.mips.adds.s.b
+    1, // llvm.mips.adds.s.d
+    1, // llvm.mips.adds.s.h
+    1, // llvm.mips.adds.s.w
+    1, // llvm.mips.adds.u.b
+    1, // llvm.mips.adds.u.d
+    1, // llvm.mips.adds.u.h
+    1, // llvm.mips.adds.u.w
+    3, // llvm.mips.addsc
+    3, // llvm.mips.addu.ph
+    1, // llvm.mips.addu.qb
+    3, // llvm.mips.addu.s.ph
+    1, // llvm.mips.addu.s.qb
+    1, // llvm.mips.adduh.qb
+    1, // llvm.mips.adduh.r.qb
+    1, // llvm.mips.addv.b
+    1, // llvm.mips.addv.d
+    1, // llvm.mips.addv.h
+    1, // llvm.mips.addv.w
+    1, // llvm.mips.addvi.b
+    1, // llvm.mips.addvi.d
+    1, // llvm.mips.addvi.h
+    1, // llvm.mips.addvi.w
+    3, // llvm.mips.addwc
+    1, // llvm.mips.and.v
+    1, // llvm.mips.andi.b
+    1, // llvm.mips.append
+    1, // llvm.mips.asub.s.b
+    1, // llvm.mips.asub.s.d
+    1, // llvm.mips.asub.s.h
+    1, // llvm.mips.asub.s.w
+    1, // llvm.mips.asub.u.b
+    1, // llvm.mips.asub.u.d
+    1, // llvm.mips.asub.u.h
+    1, // llvm.mips.asub.u.w
+    1, // llvm.mips.ave.s.b
+    1, // llvm.mips.ave.s.d
+    1, // llvm.mips.ave.s.h
+    1, // llvm.mips.ave.s.w
+    1, // llvm.mips.ave.u.b
+    1, // llvm.mips.ave.u.d
+    1, // llvm.mips.ave.u.h
+    1, // llvm.mips.ave.u.w
+    1, // llvm.mips.aver.s.b
+    1, // llvm.mips.aver.s.d
+    1, // llvm.mips.aver.s.h
+    1, // llvm.mips.aver.s.w
+    1, // llvm.mips.aver.u.b
+    1, // llvm.mips.aver.u.d
+    1, // llvm.mips.aver.u.h
+    1, // llvm.mips.aver.u.w
+    1, // llvm.mips.balign
+    1, // llvm.mips.bclr.b
+    1, // llvm.mips.bclr.d
+    1, // llvm.mips.bclr.h
+    1, // llvm.mips.bclr.w
+    1, // llvm.mips.bclri.b
+    1, // llvm.mips.bclri.d
+    1, // llvm.mips.bclri.h
+    1, // llvm.mips.bclri.w
+    1, // llvm.mips.binsl.b
+    1, // llvm.mips.binsl.d
+    1, // llvm.mips.binsl.h
+    1, // llvm.mips.binsl.w
+    1, // llvm.mips.binsli.b
+    1, // llvm.mips.binsli.d
+    1, // llvm.mips.binsli.h
+    1, // llvm.mips.binsli.w
+    1, // llvm.mips.binsr.b
+    1, // llvm.mips.binsr.d
+    1, // llvm.mips.binsr.h
+    1, // llvm.mips.binsr.w
+    1, // llvm.mips.binsri.b
+    1, // llvm.mips.binsri.d
+    1, // llvm.mips.binsri.h
+    1, // llvm.mips.binsri.w
+    1, // llvm.mips.bitrev
+    1, // llvm.mips.bmnz.v
+    1, // llvm.mips.bmnzi.b
+    1, // llvm.mips.bmz.v
+    1, // llvm.mips.bmzi.b
+    1, // llvm.mips.bneg.b
+    1, // llvm.mips.bneg.d
+    1, // llvm.mips.bneg.h
+    1, // llvm.mips.bneg.w
+    1, // llvm.mips.bnegi.b
+    1, // llvm.mips.bnegi.d
+    1, // llvm.mips.bnegi.h
+    1, // llvm.mips.bnegi.w
+    1, // llvm.mips.bnz.b
+    1, // llvm.mips.bnz.d
+    1, // llvm.mips.bnz.h
+    1, // llvm.mips.bnz.v
+    1, // llvm.mips.bnz.w
+    16, // llvm.mips.bposge32
+    1, // llvm.mips.bsel.v
+    1, // llvm.mips.bseli.b
+    1, // llvm.mips.bset.b
+    1, // llvm.mips.bset.d
+    1, // llvm.mips.bset.h
+    1, // llvm.mips.bset.w
+    1, // llvm.mips.bseti.b
+    1, // llvm.mips.bseti.d
+    1, // llvm.mips.bseti.h
+    1, // llvm.mips.bseti.w
+    1, // llvm.mips.bz.b
+    1, // llvm.mips.bz.d
+    1, // llvm.mips.bz.h
+    1, // llvm.mips.bz.v
+    1, // llvm.mips.bz.w
+    1, // llvm.mips.ceq.b
+    1, // llvm.mips.ceq.d
+    1, // llvm.mips.ceq.h
+    1, // llvm.mips.ceq.w
+    1, // llvm.mips.ceqi.b
+    1, // llvm.mips.ceqi.d
+    1, // llvm.mips.ceqi.h
+    1, // llvm.mips.ceqi.w
+    3, // llvm.mips.cfcmsa
+    1, // llvm.mips.cle.s.b
+    1, // llvm.mips.cle.s.d
+    1, // llvm.mips.cle.s.h
+    1, // llvm.mips.cle.s.w
+    1, // llvm.mips.cle.u.b
+    1, // llvm.mips.cle.u.d
+    1, // llvm.mips.cle.u.h
+    1, // llvm.mips.cle.u.w
+    1, // llvm.mips.clei.s.b
+    1, // llvm.mips.clei.s.d
+    1, // llvm.mips.clei.s.h
+    1, // llvm.mips.clei.s.w
+    1, // llvm.mips.clei.u.b
+    1, // llvm.mips.clei.u.d
+    1, // llvm.mips.clei.u.h
+    1, // llvm.mips.clei.u.w
+    1, // llvm.mips.clt.s.b
+    1, // llvm.mips.clt.s.d
+    1, // llvm.mips.clt.s.h
+    1, // llvm.mips.clt.s.w
+    1, // llvm.mips.clt.u.b
+    1, // llvm.mips.clt.u.d
+    1, // llvm.mips.clt.u.h
+    1, // llvm.mips.clt.u.w
+    1, // llvm.mips.clti.s.b
+    1, // llvm.mips.clti.s.d
+    1, // llvm.mips.clti.s.h
+    1, // llvm.mips.clti.s.w
+    1, // llvm.mips.clti.u.b
+    1, // llvm.mips.clti.u.d
+    1, // llvm.mips.clti.u.h
+    1, // llvm.mips.clti.u.w
+    3, // llvm.mips.cmp.eq.ph
+    3, // llvm.mips.cmp.le.ph
+    3, // llvm.mips.cmp.lt.ph
+    3, // llvm.mips.cmpgdu.eq.qb
+    3, // llvm.mips.cmpgdu.le.qb
+    3, // llvm.mips.cmpgdu.lt.qb
+    3, // llvm.mips.cmpgu.eq.qb
+    3, // llvm.mips.cmpgu.le.qb
+    3, // llvm.mips.cmpgu.lt.qb
+    3, // llvm.mips.cmpu.eq.qb
+    3, // llvm.mips.cmpu.le.qb
+    3, // llvm.mips.cmpu.lt.qb
+    1, // llvm.mips.copy.s.b
+    1, // llvm.mips.copy.s.d
+    1, // llvm.mips.copy.s.h
+    1, // llvm.mips.copy.s.w
+    1, // llvm.mips.copy.u.b
+    1, // llvm.mips.copy.u.d
+    1, // llvm.mips.copy.u.h
+    1, // llvm.mips.copy.u.w
+    3, // llvm.mips.ctcmsa
+    1, // llvm.mips.div.s.b
+    1, // llvm.mips.div.s.d
+    1, // llvm.mips.div.s.h
+    1, // llvm.mips.div.s.w
+    1, // llvm.mips.div.u.b
+    1, // llvm.mips.div.u.d
+    1, // llvm.mips.div.u.h
+    1, // llvm.mips.div.u.w
+    1, // llvm.mips.dlsa
+    1, // llvm.mips.dotp.s.d
+    1, // llvm.mips.dotp.s.h
+    1, // llvm.mips.dotp.s.w
+    1, // llvm.mips.dotp.u.d
+    1, // llvm.mips.dotp.u.h
+    1, // llvm.mips.dotp.u.w
+    1, // llvm.mips.dpa.w.ph
+    1, // llvm.mips.dpadd.s.d
+    1, // llvm.mips.dpadd.s.h
+    1, // llvm.mips.dpadd.s.w
+    1, // llvm.mips.dpadd.u.d
+    1, // llvm.mips.dpadd.u.h
+    1, // llvm.mips.dpadd.u.w
+    3, // llvm.mips.dpaq.s.w.ph
+    3, // llvm.mips.dpaq.sa.l.w
+    3, // llvm.mips.dpaqx.s.w.ph
+    3, // llvm.mips.dpaqx.sa.w.ph
+    1, // llvm.mips.dpau.h.qbl
+    1, // llvm.mips.dpau.h.qbr
+    1, // llvm.mips.dpax.w.ph
+    1, // llvm.mips.dps.w.ph
+    3, // llvm.mips.dpsq.s.w.ph
+    3, // llvm.mips.dpsq.sa.l.w
+    3, // llvm.mips.dpsqx.s.w.ph
+    3, // llvm.mips.dpsqx.sa.w.ph
+    1, // llvm.mips.dpsu.h.qbl
+    1, // llvm.mips.dpsu.h.qbr
+    1, // llvm.mips.dpsub.s.d
+    1, // llvm.mips.dpsub.s.h
+    1, // llvm.mips.dpsub.s.w
+    1, // llvm.mips.dpsub.u.d
+    1, // llvm.mips.dpsub.u.h
+    1, // llvm.mips.dpsub.u.w
+    1, // llvm.mips.dpsx.w.ph
+    3, // llvm.mips.extp
+    3, // llvm.mips.extpdp
+    3, // llvm.mips.extr.r.w
+    3, // llvm.mips.extr.rs.w
+    3, // llvm.mips.extr.s.h
+    3, // llvm.mips.extr.w
+    1, // llvm.mips.fadd.d
+    1, // llvm.mips.fadd.w
+    1, // llvm.mips.fcaf.d
+    1, // llvm.mips.fcaf.w
+    1, // llvm.mips.fceq.d
+    1, // llvm.mips.fceq.w
+    1, // llvm.mips.fclass.d
+    1, // llvm.mips.fclass.w
+    1, // llvm.mips.fcle.d
+    1, // llvm.mips.fcle.w
+    1, // llvm.mips.fclt.d
+    1, // llvm.mips.fclt.w
+    1, // llvm.mips.fcne.d
+    1, // llvm.mips.fcne.w
+    1, // llvm.mips.fcor.d
+    1, // llvm.mips.fcor.w
+    1, // llvm.mips.fcueq.d
+    1, // llvm.mips.fcueq.w
+    1, // llvm.mips.fcule.d
+    1, // llvm.mips.fcule.w
+    1, // llvm.mips.fcult.d
+    1, // llvm.mips.fcult.w
+    1, // llvm.mips.fcun.d
+    1, // llvm.mips.fcun.w
+    1, // llvm.mips.fcune.d
+    1, // llvm.mips.fcune.w
+    1, // llvm.mips.fdiv.d
+    1, // llvm.mips.fdiv.w
+    1, // llvm.mips.fexdo.h
+    1, // llvm.mips.fexdo.w
+    1, // llvm.mips.fexp2.d
+    1, // llvm.mips.fexp2.w
+    1, // llvm.mips.fexupl.d
+    1, // llvm.mips.fexupl.w
+    1, // llvm.mips.fexupr.d
+    1, // llvm.mips.fexupr.w
+    1, // llvm.mips.ffint.s.d
+    1, // llvm.mips.ffint.s.w
+    1, // llvm.mips.ffint.u.d
+    1, // llvm.mips.ffint.u.w
+    1, // llvm.mips.ffql.d
+    1, // llvm.mips.ffql.w
+    1, // llvm.mips.ffqr.d
+    1, // llvm.mips.ffqr.w
+    1, // llvm.mips.fill.b
+    1, // llvm.mips.fill.d
+    1, // llvm.mips.fill.h
+    1, // llvm.mips.fill.w
+    1, // llvm.mips.flog2.d
+    1, // llvm.mips.flog2.w
+    1, // llvm.mips.fmadd.d
+    1, // llvm.mips.fmadd.w
+    1, // llvm.mips.fmax.a.d
+    1, // llvm.mips.fmax.a.w
+    1, // llvm.mips.fmax.d
+    1, // llvm.mips.fmax.w
+    1, // llvm.mips.fmin.a.d
+    1, // llvm.mips.fmin.a.w
+    1, // llvm.mips.fmin.d
+    1, // llvm.mips.fmin.w
+    1, // llvm.mips.fmsub.d
+    1, // llvm.mips.fmsub.w
+    1, // llvm.mips.fmul.d
+    1, // llvm.mips.fmul.w
+    1, // llvm.mips.frcp.d
+    1, // llvm.mips.frcp.w
+    1, // llvm.mips.frint.d
+    1, // llvm.mips.frint.w
+    1, // llvm.mips.frsqrt.d
+    1, // llvm.mips.frsqrt.w
+    1, // llvm.mips.fsaf.d
+    1, // llvm.mips.fsaf.w
+    1, // llvm.mips.fseq.d
+    1, // llvm.mips.fseq.w
+    1, // llvm.mips.fsle.d
+    1, // llvm.mips.fsle.w
+    1, // llvm.mips.fslt.d
+    1, // llvm.mips.fslt.w
+    1, // llvm.mips.fsne.d
+    1, // llvm.mips.fsne.w
+    1, // llvm.mips.fsor.d
+    1, // llvm.mips.fsor.w
+    1, // llvm.mips.fsqrt.d
+    1, // llvm.mips.fsqrt.w
+    1, // llvm.mips.fsub.d
+    1, // llvm.mips.fsub.w
+    1, // llvm.mips.fsueq.d
+    1, // llvm.mips.fsueq.w
+    1, // llvm.mips.fsule.d
+    1, // llvm.mips.fsule.w
+    1, // llvm.mips.fsult.d
+    1, // llvm.mips.fsult.w
+    1, // llvm.mips.fsun.d
+    1, // llvm.mips.fsun.w
+    1, // llvm.mips.fsune.d
+    1, // llvm.mips.fsune.w
+    1, // llvm.mips.ftint.s.d
+    1, // llvm.mips.ftint.s.w
+    1, // llvm.mips.ftint.u.d
+    1, // llvm.mips.ftint.u.w
+    1, // llvm.mips.ftq.h
+    1, // llvm.mips.ftq.w
+    1, // llvm.mips.ftrunc.s.d
+    1, // llvm.mips.ftrunc.s.w
+    1, // llvm.mips.ftrunc.u.d
+    1, // llvm.mips.ftrunc.u.w
+    1, // llvm.mips.hadd.s.d
+    1, // llvm.mips.hadd.s.h
+    1, // llvm.mips.hadd.s.w
+    1, // llvm.mips.hadd.u.d
+    1, // llvm.mips.hadd.u.h
+    1, // llvm.mips.hadd.u.w
+    1, // llvm.mips.hsub.s.d
+    1, // llvm.mips.hsub.s.h
+    1, // llvm.mips.hsub.s.w
+    1, // llvm.mips.hsub.u.d
+    1, // llvm.mips.hsub.u.h
+    1, // llvm.mips.hsub.u.w
+    1, // llvm.mips.ilvev.b
+    1, // llvm.mips.ilvev.d
+    1, // llvm.mips.ilvev.h
+    1, // llvm.mips.ilvev.w
+    1, // llvm.mips.ilvl.b
+    1, // llvm.mips.ilvl.d
+    1, // llvm.mips.ilvl.h
+    1, // llvm.mips.ilvl.w
+    1, // llvm.mips.ilvod.b
+    1, // llvm.mips.ilvod.d
+    1, // llvm.mips.ilvod.h
+    1, // llvm.mips.ilvod.w
+    1, // llvm.mips.ilvr.b
+    1, // llvm.mips.ilvr.d
+    1, // llvm.mips.ilvr.h
+    1, // llvm.mips.ilvr.w
+    1, // llvm.mips.insert.b
+    1, // llvm.mips.insert.d
+    1, // llvm.mips.insert.h
+    1, // llvm.mips.insert.w
+    16, // llvm.mips.insv
+    1, // llvm.mips.insve.b
+    1, // llvm.mips.insve.d
+    1, // llvm.mips.insve.h
+    1, // llvm.mips.insve.w
+    2, // llvm.mips.lbux
+    2, // llvm.mips.ld.b
+    2, // llvm.mips.ld.d
+    2, // llvm.mips.ld.h
+    2, // llvm.mips.ld.w
+    1, // llvm.mips.ldi.b
+    1, // llvm.mips.ldi.d
+    1, // llvm.mips.ldi.h
+    1, // llvm.mips.ldi.w
+    2, // llvm.mips.lhx
+    1, // llvm.mips.lsa
+    2, // llvm.mips.lwx
+    1, // llvm.mips.madd
+    1, // llvm.mips.madd.q.h
+    1, // llvm.mips.madd.q.w
+    1, // llvm.mips.maddr.q.h
+    1, // llvm.mips.maddr.q.w
+    1, // llvm.mips.maddu
+    1, // llvm.mips.maddv.b
+    1, // llvm.mips.maddv.d
+    1, // llvm.mips.maddv.h
+    1, // llvm.mips.maddv.w
+    3, // llvm.mips.maq.s.w.phl
+    3, // llvm.mips.maq.s.w.phr
+    3, // llvm.mips.maq.sa.w.phl
+    3, // llvm.mips.maq.sa.w.phr
+    1, // llvm.mips.max.a.b
+    1, // llvm.mips.max.a.d
+    1, // llvm.mips.max.a.h
+    1, // llvm.mips.max.a.w
+    1, // llvm.mips.max.s.b
+    1, // llvm.mips.max.s.d
+    1, // llvm.mips.max.s.h
+    1, // llvm.mips.max.s.w
+    1, // llvm.mips.max.u.b
+    1, // llvm.mips.max.u.d
+    1, // llvm.mips.max.u.h
+    1, // llvm.mips.max.u.w
+    1, // llvm.mips.maxi.s.b
+    1, // llvm.mips.maxi.s.d
+    1, // llvm.mips.maxi.s.h
+    1, // llvm.mips.maxi.s.w
+    1, // llvm.mips.maxi.u.b
+    1, // llvm.mips.maxi.u.d
+    1, // llvm.mips.maxi.u.h
+    1, // llvm.mips.maxi.u.w
+    1, // llvm.mips.min.a.b
+    1, // llvm.mips.min.a.d
+    1, // llvm.mips.min.a.h
+    1, // llvm.mips.min.a.w
+    1, // llvm.mips.min.s.b
+    1, // llvm.mips.min.s.d
+    1, // llvm.mips.min.s.h
+    1, // llvm.mips.min.s.w
+    1, // llvm.mips.min.u.b
+    1, // llvm.mips.min.u.d
+    1, // llvm.mips.min.u.h
+    1, // llvm.mips.min.u.w
+    1, // llvm.mips.mini.s.b
+    1, // llvm.mips.mini.s.d
+    1, // llvm.mips.mini.s.h
+    1, // llvm.mips.mini.s.w
+    1, // llvm.mips.mini.u.b
+    1, // llvm.mips.mini.u.d
+    1, // llvm.mips.mini.u.h
+    1, // llvm.mips.mini.u.w
+    1, // llvm.mips.mod.s.b
+    1, // llvm.mips.mod.s.d
+    1, // llvm.mips.mod.s.h
+    1, // llvm.mips.mod.s.w
+    1, // llvm.mips.mod.u.b
+    1, // llvm.mips.mod.u.d
+    1, // llvm.mips.mod.u.h
+    1, // llvm.mips.mod.u.w
+    1, // llvm.mips.modsub
+    1, // llvm.mips.move.v
+    1, // llvm.mips.msub
+    1, // llvm.mips.msub.q.h
+    1, // llvm.mips.msub.q.w
+    1, // llvm.mips.msubr.q.h
+    1, // llvm.mips.msubr.q.w
+    1, // llvm.mips.msubu
+    1, // llvm.mips.msubv.b
+    1, // llvm.mips.msubv.d
+    1, // llvm.mips.msubv.h
+    1, // llvm.mips.msubv.w
+    3, // llvm.mips.mthlip
+    3, // llvm.mips.mul.ph
+    1, // llvm.mips.mul.q.h
+    1, // llvm.mips.mul.q.w
+    3, // llvm.mips.mul.s.ph
+    3, // llvm.mips.muleq.s.w.phl
+    3, // llvm.mips.muleq.s.w.phr
+    3, // llvm.mips.muleu.s.ph.qbl
+    3, // llvm.mips.muleu.s.ph.qbr
+    3, // llvm.mips.mulq.rs.ph
+    3, // llvm.mips.mulq.rs.w
+    3, // llvm.mips.mulq.s.ph
+    3, // llvm.mips.mulq.s.w
+    1, // llvm.mips.mulr.q.h
+    1, // llvm.mips.mulr.q.w
+    1, // llvm.mips.mulsa.w.ph
+    3, // llvm.mips.mulsaq.s.w.ph
+    1, // llvm.mips.mult
+    1, // llvm.mips.multu
+    1, // llvm.mips.mulv.b
+    1, // llvm.mips.mulv.d
+    1, // llvm.mips.mulv.h
+    1, // llvm.mips.mulv.w
+    1, // llvm.mips.nloc.b
+    1, // llvm.mips.nloc.d
+    1, // llvm.mips.nloc.h
+    1, // llvm.mips.nloc.w
+    1, // llvm.mips.nlzc.b
+    1, // llvm.mips.nlzc.d
+    1, // llvm.mips.nlzc.h
+    1, // llvm.mips.nlzc.w
+    1, // llvm.mips.nor.v
+    1, // llvm.mips.nori.b
+    1, // llvm.mips.or.v
+    1, // llvm.mips.ori.b
+    1, // llvm.mips.packrl.ph
+    1, // llvm.mips.pckev.b
+    1, // llvm.mips.pckev.d
+    1, // llvm.mips.pckev.h
+    1, // llvm.mips.pckev.w
+    1, // llvm.mips.pckod.b
+    1, // llvm.mips.pckod.d
+    1, // llvm.mips.pckod.h
+    1, // llvm.mips.pckod.w
+    1, // llvm.mips.pcnt.b
+    1, // llvm.mips.pcnt.d
+    1, // llvm.mips.pcnt.h
+    1, // llvm.mips.pcnt.w
+    16, // llvm.mips.pick.ph
+    16, // llvm.mips.pick.qb
+    1, // llvm.mips.preceq.w.phl
+    1, // llvm.mips.preceq.w.phr
+    1, // llvm.mips.precequ.ph.qbl
+    1, // llvm.mips.precequ.ph.qbla
+    1, // llvm.mips.precequ.ph.qbr
+    1, // llvm.mips.precequ.ph.qbra
+    1, // llvm.mips.preceu.ph.qbl
+    1, // llvm.mips.preceu.ph.qbla
+    1, // llvm.mips.preceu.ph.qbr
+    1, // llvm.mips.preceu.ph.qbra
+    3, // llvm.mips.precr.qb.ph
+    1, // llvm.mips.precr.sra.ph.w
+    1, // llvm.mips.precr.sra.r.ph.w
+    1, // llvm.mips.precrq.ph.w
+    1, // llvm.mips.precrq.qb.ph
+    3, // llvm.mips.precrq.rs.ph.w
+    3, // llvm.mips.precrqu.s.qb.ph
+    1, // llvm.mips.prepend
+    1, // llvm.mips.raddu.w.qb
+    16, // llvm.mips.rddsp
+    1, // llvm.mips.repl.ph
+    1, // llvm.mips.repl.qb
+    1, // llvm.mips.sat.s.b
+    1, // llvm.mips.sat.s.d
+    1, // llvm.mips.sat.s.h
+    1, // llvm.mips.sat.s.w
+    1, // llvm.mips.sat.u.b
+    1, // llvm.mips.sat.u.d
+    1, // llvm.mips.sat.u.h
+    1, // llvm.mips.sat.u.w
+    1, // llvm.mips.shf.b
+    1, // llvm.mips.shf.h
+    1, // llvm.mips.shf.w
+    1, // llvm.mips.shilo
+    3, // llvm.mips.shll.ph
+    3, // llvm.mips.shll.qb
+    3, // llvm.mips.shll.s.ph
+    3, // llvm.mips.shll.s.w
+    1, // llvm.mips.shra.ph
+    1, // llvm.mips.shra.qb
+    1, // llvm.mips.shra.r.ph
+    1, // llvm.mips.shra.r.qb
+    1, // llvm.mips.shra.r.w
+    1, // llvm.mips.shrl.ph
+    1, // llvm.mips.shrl.qb
+    1, // llvm.mips.sld.b
+    1, // llvm.mips.sld.d
+    1, // llvm.mips.sld.h
+    1, // llvm.mips.sld.w
+    1, // llvm.mips.sldi.b
+    1, // llvm.mips.sldi.d
+    1, // llvm.mips.sldi.h
+    1, // llvm.mips.sldi.w
+    1, // llvm.mips.sll.b
+    1, // llvm.mips.sll.d
+    1, // llvm.mips.sll.h
+    1, // llvm.mips.sll.w
+    1, // llvm.mips.slli.b
+    1, // llvm.mips.slli.d
+    1, // llvm.mips.slli.h
+    1, // llvm.mips.slli.w
+    1, // llvm.mips.splat.b
+    1, // llvm.mips.splat.d
+    1, // llvm.mips.splat.h
+    1, // llvm.mips.splat.w
+    1, // llvm.mips.splati.b
+    1, // llvm.mips.splati.d
+    1, // llvm.mips.splati.h
+    1, // llvm.mips.splati.w
+    1, // llvm.mips.sra.b
+    1, // llvm.mips.sra.d
+    1, // llvm.mips.sra.h
+    1, // llvm.mips.sra.w
+    1, // llvm.mips.srai.b
+    1, // llvm.mips.srai.d
+    1, // llvm.mips.srai.h
+    1, // llvm.mips.srai.w
+    1, // llvm.mips.srar.b
+    1, // llvm.mips.srar.d
+    1, // llvm.mips.srar.h
+    1, // llvm.mips.srar.w
+    1, // llvm.mips.srari.b
+    1, // llvm.mips.srari.d
+    1, // llvm.mips.srari.h
+    1, // llvm.mips.srari.w
+    1, // llvm.mips.srl.b
+    1, // llvm.mips.srl.d
+    1, // llvm.mips.srl.h
+    1, // llvm.mips.srl.w
+    1, // llvm.mips.srli.b
+    1, // llvm.mips.srli.d
+    1, // llvm.mips.srli.h
+    1, // llvm.mips.srli.w
+    1, // llvm.mips.srlr.b
+    1, // llvm.mips.srlr.d
+    1, // llvm.mips.srlr.h
+    1, // llvm.mips.srlr.w
+    1, // llvm.mips.srlri.b
+    1, // llvm.mips.srlri.d
+    1, // llvm.mips.srlri.h
+    1, // llvm.mips.srlri.w
+    21, // llvm.mips.st.b
+    21, // llvm.mips.st.d
+    21, // llvm.mips.st.h
+    21, // llvm.mips.st.w
+    1, // llvm.mips.subq.ph
+    1, // llvm.mips.subq.s.ph
+    3, // llvm.mips.subq.s.w
+    1, // llvm.mips.subqh.ph
+    1, // llvm.mips.subqh.r.ph
+    1, // llvm.mips.subqh.r.w
+    1, // llvm.mips.subqh.w
+    1, // llvm.mips.subs.s.b
+    1, // llvm.mips.subs.s.d
+    1, // llvm.mips.subs.s.h
+    1, // llvm.mips.subs.s.w
+    1, // llvm.mips.subs.u.b
+    1, // llvm.mips.subs.u.d
+    1, // llvm.mips.subs.u.h
+    1, // llvm.mips.subs.u.w
+    1, // llvm.mips.subsus.u.b
+    1, // llvm.mips.subsus.u.d
+    1, // llvm.mips.subsus.u.h
+    1, // llvm.mips.subsus.u.w
+    1, // llvm.mips.subsuu.s.b
+    1, // llvm.mips.subsuu.s.d
+    1, // llvm.mips.subsuu.s.h
+    1, // llvm.mips.subsuu.s.w
+    3, // llvm.mips.subu.ph
+    1, // llvm.mips.subu.qb
+    3, // llvm.mips.subu.s.ph
+    1, // llvm.mips.subu.s.qb
+    1, // llvm.mips.subuh.qb
+    1, // llvm.mips.subuh.r.qb
+    1, // llvm.mips.subv.b
+    1, // llvm.mips.subv.d
+    1, // llvm.mips.subv.h
+    1, // llvm.mips.subv.w
+    1, // llvm.mips.subvi.b
+    1, // llvm.mips.subvi.d
+    1, // llvm.mips.subvi.h
+    1, // llvm.mips.subvi.w
+    1, // llvm.mips.vshf.b
+    1, // llvm.mips.vshf.d
+    1, // llvm.mips.vshf.h
+    1, // llvm.mips.vshf.w
+    3, // llvm.mips.wrdsp
+    1, // llvm.mips.xor.v
+    1, // llvm.mips.xori.b
+    1, // llvm.nvvm.add.rm.d
+    1, // llvm.nvvm.add.rm.f
+    1, // llvm.nvvm.add.rm.ftz.f
+    1, // llvm.nvvm.add.rn.d
+    1, // llvm.nvvm.add.rn.f
+    1, // llvm.nvvm.add.rn.ftz.f
+    1, // llvm.nvvm.add.rp.d
+    1, // llvm.nvvm.add.rp.f
+    1, // llvm.nvvm.add.rp.ftz.f
+    1, // llvm.nvvm.add.rz.d
+    1, // llvm.nvvm.add.rz.f
+    1, // llvm.nvvm.add.rz.ftz.f
+    18, // llvm.nvvm.atomic.add.gen.f.cta
+    18, // llvm.nvvm.atomic.add.gen.f.sys
+    18, // llvm.nvvm.atomic.add.gen.i.cta
+    18, // llvm.nvvm.atomic.add.gen.i.sys
+    18, // llvm.nvvm.atomic.and.gen.i.cta
+    18, // llvm.nvvm.atomic.and.gen.i.sys
+    18, // llvm.nvvm.atomic.cas.gen.i.cta
+    18, // llvm.nvvm.atomic.cas.gen.i.sys
+    18, // llvm.nvvm.atomic.dec.gen.i.cta
+    18, // llvm.nvvm.atomic.dec.gen.i.sys
+    18, // llvm.nvvm.atomic.exch.gen.i.cta
+    18, // llvm.nvvm.atomic.exch.gen.i.sys
+    18, // llvm.nvvm.atomic.inc.gen.i.cta
+    18, // llvm.nvvm.atomic.inc.gen.i.sys
+    18, // llvm.nvvm.atomic.load.add.f32
+    18, // llvm.nvvm.atomic.load.add.f64
+    18, // llvm.nvvm.atomic.load.dec.32
+    18, // llvm.nvvm.atomic.load.inc.32
+    18, // llvm.nvvm.atomic.max.gen.i.cta
+    18, // llvm.nvvm.atomic.max.gen.i.sys
+    18, // llvm.nvvm.atomic.min.gen.i.cta
+    18, // llvm.nvvm.atomic.min.gen.i.sys
+    18, // llvm.nvvm.atomic.or.gen.i.cta
+    18, // llvm.nvvm.atomic.or.gen.i.sys
+    18, // llvm.nvvm.atomic.xor.gen.i.cta
+    18, // llvm.nvvm.atomic.xor.gen.i.sys
+    33, // llvm.nvvm.bar.sync
+    33, // llvm.nvvm.bar.warp.sync
+    33, // llvm.nvvm.barrier
+    33, // llvm.nvvm.barrier.n
+    33, // llvm.nvvm.barrier.sync
+    33, // llvm.nvvm.barrier.sync.cnt
+    33, // llvm.nvvm.barrier0
+    33, // llvm.nvvm.barrier0.and
+    33, // llvm.nvvm.barrier0.or
+    33, // llvm.nvvm.barrier0.popc
+    1, // llvm.nvvm.bitcast.d2ll
+    1, // llvm.nvvm.bitcast.f2i
+    1, // llvm.nvvm.bitcast.i2f
+    1, // llvm.nvvm.bitcast.ll2d
+    1, // llvm.nvvm.ceil.d
+    1, // llvm.nvvm.ceil.f
+    1, // llvm.nvvm.ceil.ftz.f
+    3, // llvm.nvvm.compiler.error
+    3, // llvm.nvvm.compiler.warn
+    1, // llvm.nvvm.cos.approx.f
+    1, // llvm.nvvm.cos.approx.ftz.f
+    1, // llvm.nvvm.d2f.rm
+    1, // llvm.nvvm.d2f.rm.ftz
+    1, // llvm.nvvm.d2f.rn
+    1, // llvm.nvvm.d2f.rn.ftz
+    1, // llvm.nvvm.d2f.rp
+    1, // llvm.nvvm.d2f.rp.ftz
+    1, // llvm.nvvm.d2f.rz
+    1, // llvm.nvvm.d2f.rz.ftz
+    1, // llvm.nvvm.d2i.hi
+    1, // llvm.nvvm.d2i.lo
+    1, // llvm.nvvm.d2i.rm
+    1, // llvm.nvvm.d2i.rn
+    1, // llvm.nvvm.d2i.rp
+    1, // llvm.nvvm.d2i.rz
+    1, // llvm.nvvm.d2ll.rm
+    1, // llvm.nvvm.d2ll.rn
+    1, // llvm.nvvm.d2ll.rp
+    1, // llvm.nvvm.d2ll.rz
+    1, // llvm.nvvm.d2ui.rm
+    1, // llvm.nvvm.d2ui.rn
+    1, // llvm.nvvm.d2ui.rp
+    1, // llvm.nvvm.d2ui.rz
+    1, // llvm.nvvm.d2ull.rm
+    1, // llvm.nvvm.d2ull.rn
+    1, // llvm.nvvm.d2ull.rp
+    1, // llvm.nvvm.d2ull.rz
+    1, // llvm.nvvm.div.approx.f
+    1, // llvm.nvvm.div.approx.ftz.f
+    1, // llvm.nvvm.div.rm.d
+    1, // llvm.nvvm.div.rm.f
+    1, // llvm.nvvm.div.rm.ftz.f
+    1, // llvm.nvvm.div.rn.d
+    1, // llvm.nvvm.div.rn.f
+    1, // llvm.nvvm.div.rn.ftz.f
+    1, // llvm.nvvm.div.rp.d
+    1, // llvm.nvvm.div.rp.f
+    1, // llvm.nvvm.div.rp.ftz.f
+    1, // llvm.nvvm.div.rz.d
+    1, // llvm.nvvm.div.rz.f
+    1, // llvm.nvvm.div.rz.ftz.f
+    1, // llvm.nvvm.ex2.approx.d
+    1, // llvm.nvvm.ex2.approx.f
+    1, // llvm.nvvm.ex2.approx.ftz.f
+    1, // llvm.nvvm.f2h.rn
+    1, // llvm.nvvm.f2h.rn.ftz
+    1, // llvm.nvvm.f2i.rm
+    1, // llvm.nvvm.f2i.rm.ftz
+    1, // llvm.nvvm.f2i.rn
+    1, // llvm.nvvm.f2i.rn.ftz
+    1, // llvm.nvvm.f2i.rp
+    1, // llvm.nvvm.f2i.rp.ftz
+    1, // llvm.nvvm.f2i.rz
+    1, // llvm.nvvm.f2i.rz.ftz
+    1, // llvm.nvvm.f2ll.rm
+    1, // llvm.nvvm.f2ll.rm.ftz
+    1, // llvm.nvvm.f2ll.rn
+    1, // llvm.nvvm.f2ll.rn.ftz
+    1, // llvm.nvvm.f2ll.rp
+    1, // llvm.nvvm.f2ll.rp.ftz
+    1, // llvm.nvvm.f2ll.rz
+    1, // llvm.nvvm.f2ll.rz.ftz
+    1, // llvm.nvvm.f2ui.rm
+    1, // llvm.nvvm.f2ui.rm.ftz
+    1, // llvm.nvvm.f2ui.rn
+    1, // llvm.nvvm.f2ui.rn.ftz
+    1, // llvm.nvvm.f2ui.rp
+    1, // llvm.nvvm.f2ui.rp.ftz
+    1, // llvm.nvvm.f2ui.rz
+    1, // llvm.nvvm.f2ui.rz.ftz
+    1, // llvm.nvvm.f2ull.rm
+    1, // llvm.nvvm.f2ull.rm.ftz
+    1, // llvm.nvvm.f2ull.rn
+    1, // llvm.nvvm.f2ull.rn.ftz
+    1, // llvm.nvvm.f2ull.rp
+    1, // llvm.nvvm.f2ull.rp.ftz
+    1, // llvm.nvvm.f2ull.rz
+    1, // llvm.nvvm.f2ull.rz.ftz
+    1, // llvm.nvvm.fabs.d
+    1, // llvm.nvvm.fabs.f
+    1, // llvm.nvvm.fabs.ftz.f
+    1, // llvm.nvvm.floor.d
+    1, // llvm.nvvm.floor.f
+    1, // llvm.nvvm.floor.ftz.f
+    1, // llvm.nvvm.fma.rm.d
+    1, // llvm.nvvm.fma.rm.f
+    1, // llvm.nvvm.fma.rm.ftz.f
+    1, // llvm.nvvm.fma.rn.d
+    1, // llvm.nvvm.fma.rn.f
+    1, // llvm.nvvm.fma.rn.ftz.f
+    1, // llvm.nvvm.fma.rp.d
+    1, // llvm.nvvm.fma.rp.f
+    1, // llvm.nvvm.fma.rp.ftz.f
+    1, // llvm.nvvm.fma.rz.d
+    1, // llvm.nvvm.fma.rz.f
+    1, // llvm.nvvm.fma.rz.ftz.f
+    1, // llvm.nvvm.fmax.d
+    1, // llvm.nvvm.fmax.f
+    1, // llvm.nvvm.fmax.ftz.f
+    1, // llvm.nvvm.fmin.d
+    1, // llvm.nvvm.fmin.f
+    1, // llvm.nvvm.fmin.ftz.f
+    1, // llvm.nvvm.fns
+    1, // llvm.nvvm.i2d.rm
+    1, // llvm.nvvm.i2d.rn
+    1, // llvm.nvvm.i2d.rp
+    1, // llvm.nvvm.i2d.rz
+    1, // llvm.nvvm.i2f.rm
+    1, // llvm.nvvm.i2f.rn
+    1, // llvm.nvvm.i2f.rp
+    1, // llvm.nvvm.i2f.rz
+    1, // llvm.nvvm.isspacep.const
+    1, // llvm.nvvm.isspacep.global
+    1, // llvm.nvvm.isspacep.local
+    1, // llvm.nvvm.isspacep.shared
+    1, // llvm.nvvm.istypep.sampler
+    1, // llvm.nvvm.istypep.surface
+    1, // llvm.nvvm.istypep.texture
+    37, // llvm.nvvm.ldg.global.f
+    37, // llvm.nvvm.ldg.global.i
+    37, // llvm.nvvm.ldg.global.p
+    37, // llvm.nvvm.ldu.global.f
+    37, // llvm.nvvm.ldu.global.i
+    37, // llvm.nvvm.ldu.global.p
+    1, // llvm.nvvm.lg2.approx.d
+    1, // llvm.nvvm.lg2.approx.f
+    1, // llvm.nvvm.lg2.approx.ftz.f
+    1, // llvm.nvvm.ll2d.rm
+    1, // llvm.nvvm.ll2d.rn
+    1, // llvm.nvvm.ll2d.rp
+    1, // llvm.nvvm.ll2d.rz
+    1, // llvm.nvvm.ll2f.rm
+    1, // llvm.nvvm.ll2f.rn
+    1, // llvm.nvvm.ll2f.rp
+    1, // llvm.nvvm.ll2f.rz
+    1, // llvm.nvvm.lohi.i2d
+    38, // llvm.nvvm.match.all.sync.i32p
+    38, // llvm.nvvm.match.all.sync.i64p
+    38, // llvm.nvvm.match.any.sync.i32
+    38, // llvm.nvvm.match.any.sync.i64
+    3, // llvm.nvvm.membar.cta
+    3, // llvm.nvvm.membar.gl
+    3, // llvm.nvvm.membar.sys
+    1, // llvm.nvvm.move.double
+    1, // llvm.nvvm.move.float
+    1, // llvm.nvvm.move.i16
+    1, // llvm.nvvm.move.i32
+    1, // llvm.nvvm.move.i64
+    12, // llvm.nvvm.move.ptr
+    1, // llvm.nvvm.mul.rm.d
+    1, // llvm.nvvm.mul.rm.f
+    1, // llvm.nvvm.mul.rm.ftz.f
+    1, // llvm.nvvm.mul.rn.d
+    1, // llvm.nvvm.mul.rn.f
+    1, // llvm.nvvm.mul.rn.ftz.f
+    1, // llvm.nvvm.mul.rp.d
+    1, // llvm.nvvm.mul.rp.f
+    1, // llvm.nvvm.mul.rp.ftz.f
+    1, // llvm.nvvm.mul.rz.d
+    1, // llvm.nvvm.mul.rz.f
+    1, // llvm.nvvm.mul.rz.ftz.f
+    1, // llvm.nvvm.mul24.i
+    1, // llvm.nvvm.mul24.ui
+    1, // llvm.nvvm.mulhi.i
+    1, // llvm.nvvm.mulhi.ll
+    1, // llvm.nvvm.mulhi.ui
+    1, // llvm.nvvm.mulhi.ull
+    1, // llvm.nvvm.prmt
+    1, // llvm.nvvm.ptr.constant.to.gen
+    1, // llvm.nvvm.ptr.gen.to.constant
+    1, // llvm.nvvm.ptr.gen.to.global
+    1, // llvm.nvvm.ptr.gen.to.local
+    1, // llvm.nvvm.ptr.gen.to.param
+    1, // llvm.nvvm.ptr.gen.to.shared
+    1, // llvm.nvvm.ptr.global.to.gen
+    1, // llvm.nvvm.ptr.local.to.gen
+    1, // llvm.nvvm.ptr.shared.to.gen
+    1, // llvm.nvvm.rcp.approx.ftz.d
+    1, // llvm.nvvm.rcp.rm.d
+    1, // llvm.nvvm.rcp.rm.f
+    1, // llvm.nvvm.rcp.rm.ftz.f
+    1, // llvm.nvvm.rcp.rn.d
+    1, // llvm.nvvm.rcp.rn.f
+    1, // llvm.nvvm.rcp.rn.ftz.f
+    1, // llvm.nvvm.rcp.rp.d
+    1, // llvm.nvvm.rcp.rp.f
+    1, // llvm.nvvm.rcp.rp.ftz.f
+    1, // llvm.nvvm.rcp.rz.d
+    1, // llvm.nvvm.rcp.rz.f
+    1, // llvm.nvvm.rcp.rz.ftz.f
+    1, // llvm.nvvm.read.ptx.sreg.clock
+    1, // llvm.nvvm.read.ptx.sreg.clock64
+    1, // llvm.nvvm.read.ptx.sreg.ctaid.w
+    1, // llvm.nvvm.read.ptx.sreg.ctaid.x
+    1, // llvm.nvvm.read.ptx.sreg.ctaid.y
+    1, // llvm.nvvm.read.ptx.sreg.ctaid.z
+    1, // llvm.nvvm.read.ptx.sreg.envreg0
+    1, // llvm.nvvm.read.ptx.sreg.envreg1
+    1, // llvm.nvvm.read.ptx.sreg.envreg10
+    1, // llvm.nvvm.read.ptx.sreg.envreg11
+    1, // llvm.nvvm.read.ptx.sreg.envreg12
+    1, // llvm.nvvm.read.ptx.sreg.envreg13
+    1, // llvm.nvvm.read.ptx.sreg.envreg14
+    1, // llvm.nvvm.read.ptx.sreg.envreg15
+    1, // llvm.nvvm.read.ptx.sreg.envreg16
+    1, // llvm.nvvm.read.ptx.sreg.envreg17
+    1, // llvm.nvvm.read.ptx.sreg.envreg18
+    1, // llvm.nvvm.read.ptx.sreg.envreg19
+    1, // llvm.nvvm.read.ptx.sreg.envreg2
+    1, // llvm.nvvm.read.ptx.sreg.envreg20
+    1, // llvm.nvvm.read.ptx.sreg.envreg21
+    1, // llvm.nvvm.read.ptx.sreg.envreg22
+    1, // llvm.nvvm.read.ptx.sreg.envreg23
+    1, // llvm.nvvm.read.ptx.sreg.envreg24
+    1, // llvm.nvvm.read.ptx.sreg.envreg25
+    1, // llvm.nvvm.read.ptx.sreg.envreg26
+    1, // llvm.nvvm.read.ptx.sreg.envreg27
+    1, // llvm.nvvm.read.ptx.sreg.envreg28
+    1, // llvm.nvvm.read.ptx.sreg.envreg29
+    1, // llvm.nvvm.read.ptx.sreg.envreg3
+    1, // llvm.nvvm.read.ptx.sreg.envreg30
+    1, // llvm.nvvm.read.ptx.sreg.envreg31
+    1, // llvm.nvvm.read.ptx.sreg.envreg4
+    1, // llvm.nvvm.read.ptx.sreg.envreg5
+    1, // llvm.nvvm.read.ptx.sreg.envreg6
+    1, // llvm.nvvm.read.ptx.sreg.envreg7
+    1, // llvm.nvvm.read.ptx.sreg.envreg8
+    1, // llvm.nvvm.read.ptx.sreg.envreg9
+    1, // llvm.nvvm.read.ptx.sreg.gridid
+    1, // llvm.nvvm.read.ptx.sreg.laneid
+    1, // llvm.nvvm.read.ptx.sreg.lanemask.eq
+    1, // llvm.nvvm.read.ptx.sreg.lanemask.ge
+    1, // llvm.nvvm.read.ptx.sreg.lanemask.gt
+    1, // llvm.nvvm.read.ptx.sreg.lanemask.le
+    1, // llvm.nvvm.read.ptx.sreg.lanemask.lt
+    1, // llvm.nvvm.read.ptx.sreg.nctaid.w
+    1, // llvm.nvvm.read.ptx.sreg.nctaid.x
+    1, // llvm.nvvm.read.ptx.sreg.nctaid.y
+    1, // llvm.nvvm.read.ptx.sreg.nctaid.z
+    1, // llvm.nvvm.read.ptx.sreg.nsmid
+    1, // llvm.nvvm.read.ptx.sreg.ntid.w
+    1, // llvm.nvvm.read.ptx.sreg.ntid.x
+    1, // llvm.nvvm.read.ptx.sreg.ntid.y
+    1, // llvm.nvvm.read.ptx.sreg.ntid.z
+    1, // llvm.nvvm.read.ptx.sreg.nwarpid
+    1, // llvm.nvvm.read.ptx.sreg.pm0
+    1, // llvm.nvvm.read.ptx.sreg.pm1
+    1, // llvm.nvvm.read.ptx.sreg.pm2
+    1, // llvm.nvvm.read.ptx.sreg.pm3
+    1, // llvm.nvvm.read.ptx.sreg.smid
+    1, // llvm.nvvm.read.ptx.sreg.tid.w
+    1, // llvm.nvvm.read.ptx.sreg.tid.x
+    1, // llvm.nvvm.read.ptx.sreg.tid.y
+    1, // llvm.nvvm.read.ptx.sreg.tid.z
+    1, // llvm.nvvm.read.ptx.sreg.warpid
+    1, // llvm.nvvm.read.ptx.sreg.warpsize
+    1, // llvm.nvvm.reflect
+    1, // llvm.nvvm.rotate.b32
+    1, // llvm.nvvm.rotate.b64
+    1, // llvm.nvvm.rotate.right.b64
+    1, // llvm.nvvm.round.d
+    1, // llvm.nvvm.round.f
+    1, // llvm.nvvm.round.ftz.f
+    1, // llvm.nvvm.rsqrt.approx.d
+    1, // llvm.nvvm.rsqrt.approx.f
+    1, // llvm.nvvm.rsqrt.approx.ftz.f
+    1, // llvm.nvvm.sad.i
+    1, // llvm.nvvm.sad.ui
+    1, // llvm.nvvm.saturate.d
+    1, // llvm.nvvm.saturate.f
+    1, // llvm.nvvm.saturate.ftz.f
+    38, // llvm.nvvm.shfl.bfly.f32
+    38, // llvm.nvvm.shfl.bfly.i32
+    38, // llvm.nvvm.shfl.down.f32
+    38, // llvm.nvvm.shfl.down.i32
+    38, // llvm.nvvm.shfl.idx.f32
+    38, // llvm.nvvm.shfl.idx.i32
+    38, // llvm.nvvm.shfl.sync.bfly.f32
+    38, // llvm.nvvm.shfl.sync.bfly.i32
+    38, // llvm.nvvm.shfl.sync.down.f32
+    38, // llvm.nvvm.shfl.sync.down.i32
+    38, // llvm.nvvm.shfl.sync.idx.f32
+    38, // llvm.nvvm.shfl.sync.idx.i32
+    38, // llvm.nvvm.shfl.sync.up.f32
+    38, // llvm.nvvm.shfl.sync.up.i32
+    38, // llvm.nvvm.shfl.up.f32
+    38, // llvm.nvvm.shfl.up.i32
+    1, // llvm.nvvm.sin.approx.f
+    1, // llvm.nvvm.sin.approx.ftz.f
+    1, // llvm.nvvm.sqrt.approx.f
+    1, // llvm.nvvm.sqrt.approx.ftz.f
+    1, // llvm.nvvm.sqrt.f
+    1, // llvm.nvvm.sqrt.rm.d
+    1, // llvm.nvvm.sqrt.rm.f
+    1, // llvm.nvvm.sqrt.rm.ftz.f
+    1, // llvm.nvvm.sqrt.rn.d
+    1, // llvm.nvvm.sqrt.rn.f
+    1, // llvm.nvvm.sqrt.rn.ftz.f
+    1, // llvm.nvvm.sqrt.rp.d
+    1, // llvm.nvvm.sqrt.rp.f
+    1, // llvm.nvvm.sqrt.rp.ftz.f
+    1, // llvm.nvvm.sqrt.rz.d
+    1, // llvm.nvvm.sqrt.rz.f
+    1, // llvm.nvvm.sqrt.rz.ftz.f
+    3, // llvm.nvvm.suld.1d.array.i16.clamp
+    3, // llvm.nvvm.suld.1d.array.i16.trap
+    3, // llvm.nvvm.suld.1d.array.i16.zero
+    3, // llvm.nvvm.suld.1d.array.i32.clamp
+    3, // llvm.nvvm.suld.1d.array.i32.trap
+    3, // llvm.nvvm.suld.1d.array.i32.zero
+    3, // llvm.nvvm.suld.1d.array.i64.clamp
+    3, // llvm.nvvm.suld.1d.array.i64.trap
+    3, // llvm.nvvm.suld.1d.array.i64.zero
+    3, // llvm.nvvm.suld.1d.array.i8.clamp
+    3, // llvm.nvvm.suld.1d.array.i8.trap
+    3, // llvm.nvvm.suld.1d.array.i8.zero
+    3, // llvm.nvvm.suld.1d.array.v2i16.clamp
+    3, // llvm.nvvm.suld.1d.array.v2i16.trap
+    3, // llvm.nvvm.suld.1d.array.v2i16.zero
+    3, // llvm.nvvm.suld.1d.array.v2i32.clamp
+    3, // llvm.nvvm.suld.1d.array.v2i32.trap
+    3, // llvm.nvvm.suld.1d.array.v2i32.zero
+    3, // llvm.nvvm.suld.1d.array.v2i64.clamp
+    3, // llvm.nvvm.suld.1d.array.v2i64.trap
+    3, // llvm.nvvm.suld.1d.array.v2i64.zero
+    3, // llvm.nvvm.suld.1d.array.v2i8.clamp
+    3, // llvm.nvvm.suld.1d.array.v2i8.trap
+    3, // llvm.nvvm.suld.1d.array.v2i8.zero
+    3, // llvm.nvvm.suld.1d.array.v4i16.clamp
+    3, // llvm.nvvm.suld.1d.array.v4i16.trap
+    3, // llvm.nvvm.suld.1d.array.v4i16.zero
+    3, // llvm.nvvm.suld.1d.array.v4i32.clamp
+    3, // llvm.nvvm.suld.1d.array.v4i32.trap
+    3, // llvm.nvvm.suld.1d.array.v4i32.zero
+    3, // llvm.nvvm.suld.1d.array.v4i8.clamp
+    3, // llvm.nvvm.suld.1d.array.v4i8.trap
+    3, // llvm.nvvm.suld.1d.array.v4i8.zero
+    3, // llvm.nvvm.suld.1d.i16.clamp
+    3, // llvm.nvvm.suld.1d.i16.trap
+    3, // llvm.nvvm.suld.1d.i16.zero
+    3, // llvm.nvvm.suld.1d.i32.clamp
+    3, // llvm.nvvm.suld.1d.i32.trap
+    3, // llvm.nvvm.suld.1d.i32.zero
+    3, // llvm.nvvm.suld.1d.i64.clamp
+    3, // llvm.nvvm.suld.1d.i64.trap
+    3, // llvm.nvvm.suld.1d.i64.zero
+    3, // llvm.nvvm.suld.1d.i8.clamp
+    3, // llvm.nvvm.suld.1d.i8.trap
+    3, // llvm.nvvm.suld.1d.i8.zero
+    3, // llvm.nvvm.suld.1d.v2i16.clamp
+    3, // llvm.nvvm.suld.1d.v2i16.trap
+    3, // llvm.nvvm.suld.1d.v2i16.zero
+    3, // llvm.nvvm.suld.1d.v2i32.clamp
+    3, // llvm.nvvm.suld.1d.v2i32.trap
+    3, // llvm.nvvm.suld.1d.v2i32.zero
+    3, // llvm.nvvm.suld.1d.v2i64.clamp
+    3, // llvm.nvvm.suld.1d.v2i64.trap
+    3, // llvm.nvvm.suld.1d.v2i64.zero
+    3, // llvm.nvvm.suld.1d.v2i8.clamp
+    3, // llvm.nvvm.suld.1d.v2i8.trap
+    3, // llvm.nvvm.suld.1d.v2i8.zero
+    3, // llvm.nvvm.suld.1d.v4i16.clamp
+    3, // llvm.nvvm.suld.1d.v4i16.trap
+    3, // llvm.nvvm.suld.1d.v4i16.zero
+    3, // llvm.nvvm.suld.1d.v4i32.clamp
+    3, // llvm.nvvm.suld.1d.v4i32.trap
+    3, // llvm.nvvm.suld.1d.v4i32.zero
+    3, // llvm.nvvm.suld.1d.v4i8.clamp
+    3, // llvm.nvvm.suld.1d.v4i8.trap
+    3, // llvm.nvvm.suld.1d.v4i8.zero
+    3, // llvm.nvvm.suld.2d.array.i16.clamp
+    3, // llvm.nvvm.suld.2d.array.i16.trap
+    3, // llvm.nvvm.suld.2d.array.i16.zero
+    3, // llvm.nvvm.suld.2d.array.i32.clamp
+    3, // llvm.nvvm.suld.2d.array.i32.trap
+    3, // llvm.nvvm.suld.2d.array.i32.zero
+    3, // llvm.nvvm.suld.2d.array.i64.clamp
+    3, // llvm.nvvm.suld.2d.array.i64.trap
+    3, // llvm.nvvm.suld.2d.array.i64.zero
+    3, // llvm.nvvm.suld.2d.array.i8.clamp
+    3, // llvm.nvvm.suld.2d.array.i8.trap
+    3, // llvm.nvvm.suld.2d.array.i8.zero
+    3, // llvm.nvvm.suld.2d.array.v2i16.clamp
+    3, // llvm.nvvm.suld.2d.array.v2i16.trap
+    3, // llvm.nvvm.suld.2d.array.v2i16.zero
+    3, // llvm.nvvm.suld.2d.array.v2i32.clamp
+    3, // llvm.nvvm.suld.2d.array.v2i32.trap
+    3, // llvm.nvvm.suld.2d.array.v2i32.zero
+    3, // llvm.nvvm.suld.2d.array.v2i64.clamp
+    3, // llvm.nvvm.suld.2d.array.v2i64.trap
+    3, // llvm.nvvm.suld.2d.array.v2i64.zero
+    3, // llvm.nvvm.suld.2d.array.v2i8.clamp
+    3, // llvm.nvvm.suld.2d.array.v2i8.trap
+    3, // llvm.nvvm.suld.2d.array.v2i8.zero
+    3, // llvm.nvvm.suld.2d.array.v4i16.clamp
+    3, // llvm.nvvm.suld.2d.array.v4i16.trap
+    3, // llvm.nvvm.suld.2d.array.v4i16.zero
+    3, // llvm.nvvm.suld.2d.array.v4i32.clamp
+    3, // llvm.nvvm.suld.2d.array.v4i32.trap
+    3, // llvm.nvvm.suld.2d.array.v4i32.zero
+    3, // llvm.nvvm.suld.2d.array.v4i8.clamp
+    3, // llvm.nvvm.suld.2d.array.v4i8.trap
+    3, // llvm.nvvm.suld.2d.array.v4i8.zero
+    3, // llvm.nvvm.suld.2d.i16.clamp
+    3, // llvm.nvvm.suld.2d.i16.trap
+    3, // llvm.nvvm.suld.2d.i16.zero
+    3, // llvm.nvvm.suld.2d.i32.clamp
+    3, // llvm.nvvm.suld.2d.i32.trap
+    3, // llvm.nvvm.suld.2d.i32.zero
+    3, // llvm.nvvm.suld.2d.i64.clamp
+    3, // llvm.nvvm.suld.2d.i64.trap
+    3, // llvm.nvvm.suld.2d.i64.zero
+    3, // llvm.nvvm.suld.2d.i8.clamp
+    3, // llvm.nvvm.suld.2d.i8.trap
+    3, // llvm.nvvm.suld.2d.i8.zero
+    3, // llvm.nvvm.suld.2d.v2i16.clamp
+    3, // llvm.nvvm.suld.2d.v2i16.trap
+    3, // llvm.nvvm.suld.2d.v2i16.zero
+    3, // llvm.nvvm.suld.2d.v2i32.clamp
+    3, // llvm.nvvm.suld.2d.v2i32.trap
+    3, // llvm.nvvm.suld.2d.v2i32.zero
+    3, // llvm.nvvm.suld.2d.v2i64.clamp
+    3, // llvm.nvvm.suld.2d.v2i64.trap
+    3, // llvm.nvvm.suld.2d.v2i64.zero
+    3, // llvm.nvvm.suld.2d.v2i8.clamp
+    3, // llvm.nvvm.suld.2d.v2i8.trap
+    3, // llvm.nvvm.suld.2d.v2i8.zero
+    3, // llvm.nvvm.suld.2d.v4i16.clamp
+    3, // llvm.nvvm.suld.2d.v4i16.trap
+    3, // llvm.nvvm.suld.2d.v4i16.zero
+    3, // llvm.nvvm.suld.2d.v4i32.clamp
+    3, // llvm.nvvm.suld.2d.v4i32.trap
+    3, // llvm.nvvm.suld.2d.v4i32.zero
+    3, // llvm.nvvm.suld.2d.v4i8.clamp
+    3, // llvm.nvvm.suld.2d.v4i8.trap
+    3, // llvm.nvvm.suld.2d.v4i8.zero
+    3, // llvm.nvvm.suld.3d.i16.clamp
+    3, // llvm.nvvm.suld.3d.i16.trap
+    3, // llvm.nvvm.suld.3d.i16.zero
+    3, // llvm.nvvm.suld.3d.i32.clamp
+    3, // llvm.nvvm.suld.3d.i32.trap
+    3, // llvm.nvvm.suld.3d.i32.zero
+    3, // llvm.nvvm.suld.3d.i64.clamp
+    3, // llvm.nvvm.suld.3d.i64.trap
+    3, // llvm.nvvm.suld.3d.i64.zero
+    3, // llvm.nvvm.suld.3d.i8.clamp
+    3, // llvm.nvvm.suld.3d.i8.trap
+    3, // llvm.nvvm.suld.3d.i8.zero
+    3, // llvm.nvvm.suld.3d.v2i16.clamp
+    3, // llvm.nvvm.suld.3d.v2i16.trap
+    3, // llvm.nvvm.suld.3d.v2i16.zero
+    3, // llvm.nvvm.suld.3d.v2i32.clamp
+    3, // llvm.nvvm.suld.3d.v2i32.trap
+    3, // llvm.nvvm.suld.3d.v2i32.zero
+    3, // llvm.nvvm.suld.3d.v2i64.clamp
+    3, // llvm.nvvm.suld.3d.v2i64.trap
+    3, // llvm.nvvm.suld.3d.v2i64.zero
+    3, // llvm.nvvm.suld.3d.v2i8.clamp
+    3, // llvm.nvvm.suld.3d.v2i8.trap
+    3, // llvm.nvvm.suld.3d.v2i8.zero
+    3, // llvm.nvvm.suld.3d.v4i16.clamp
+    3, // llvm.nvvm.suld.3d.v4i16.trap
+    3, // llvm.nvvm.suld.3d.v4i16.zero
+    3, // llvm.nvvm.suld.3d.v4i32.clamp
+    3, // llvm.nvvm.suld.3d.v4i32.trap
+    3, // llvm.nvvm.suld.3d.v4i32.zero
+    3, // llvm.nvvm.suld.3d.v4i8.clamp
+    3, // llvm.nvvm.suld.3d.v4i8.trap
+    3, // llvm.nvvm.suld.3d.v4i8.zero
+    1, // llvm.nvvm.suq.array.size
+    1, // llvm.nvvm.suq.channel.data.type
+    1, // llvm.nvvm.suq.channel.order
+    1, // llvm.nvvm.suq.depth
+    1, // llvm.nvvm.suq.height
+    1, // llvm.nvvm.suq.width
+    3, // llvm.nvvm.sust.b.1d.array.i16.clamp
+    3, // llvm.nvvm.sust.b.1d.array.i16.trap
+    3, // llvm.nvvm.sust.b.1d.array.i16.zero
+    3, // llvm.nvvm.sust.b.1d.array.i32.clamp
+    3, // llvm.nvvm.sust.b.1d.array.i32.trap
+    3, // llvm.nvvm.sust.b.1d.array.i32.zero
+    3, // llvm.nvvm.sust.b.1d.array.i64.clamp
+    3, // llvm.nvvm.sust.b.1d.array.i64.trap
+    3, // llvm.nvvm.sust.b.1d.array.i64.zero
+    3, // llvm.nvvm.sust.b.1d.array.i8.clamp
+    3, // llvm.nvvm.sust.b.1d.array.i8.trap
+    3, // llvm.nvvm.sust.b.1d.array.i8.zero
+    3, // llvm.nvvm.sust.b.1d.array.v2i16.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v2i16.trap
+    3, // llvm.nvvm.sust.b.1d.array.v2i16.zero
+    3, // llvm.nvvm.sust.b.1d.array.v2i32.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v2i32.trap
+    3, // llvm.nvvm.sust.b.1d.array.v2i32.zero
+    3, // llvm.nvvm.sust.b.1d.array.v2i64.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v2i64.trap
+    3, // llvm.nvvm.sust.b.1d.array.v2i64.zero
+    3, // llvm.nvvm.sust.b.1d.array.v2i8.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v2i8.trap
+    3, // llvm.nvvm.sust.b.1d.array.v2i8.zero
+    3, // llvm.nvvm.sust.b.1d.array.v4i16.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v4i16.trap
+    3, // llvm.nvvm.sust.b.1d.array.v4i16.zero
+    3, // llvm.nvvm.sust.b.1d.array.v4i32.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v4i32.trap
+    3, // llvm.nvvm.sust.b.1d.array.v4i32.zero
+    3, // llvm.nvvm.sust.b.1d.array.v4i8.clamp
+    3, // llvm.nvvm.sust.b.1d.array.v4i8.trap
+    3, // llvm.nvvm.sust.b.1d.array.v4i8.zero
+    3, // llvm.nvvm.sust.b.1d.i16.clamp
+    3, // llvm.nvvm.sust.b.1d.i16.trap
+    3, // llvm.nvvm.sust.b.1d.i16.zero
+    3, // llvm.nvvm.sust.b.1d.i32.clamp
+    3, // llvm.nvvm.sust.b.1d.i32.trap
+    3, // llvm.nvvm.sust.b.1d.i32.zero
+    3, // llvm.nvvm.sust.b.1d.i64.clamp
+    3, // llvm.nvvm.sust.b.1d.i64.trap
+    3, // llvm.nvvm.sust.b.1d.i64.zero
+    3, // llvm.nvvm.sust.b.1d.i8.clamp
+    3, // llvm.nvvm.sust.b.1d.i8.trap
+    3, // llvm.nvvm.sust.b.1d.i8.zero
+    3, // llvm.nvvm.sust.b.1d.v2i16.clamp
+    3, // llvm.nvvm.sust.b.1d.v2i16.trap
+    3, // llvm.nvvm.sust.b.1d.v2i16.zero
+    3, // llvm.nvvm.sust.b.1d.v2i32.clamp
+    3, // llvm.nvvm.sust.b.1d.v2i32.trap
+    3, // llvm.nvvm.sust.b.1d.v2i32.zero
+    3, // llvm.nvvm.sust.b.1d.v2i64.clamp
+    3, // llvm.nvvm.sust.b.1d.v2i64.trap
+    3, // llvm.nvvm.sust.b.1d.v2i64.zero
+    3, // llvm.nvvm.sust.b.1d.v2i8.clamp
+    3, // llvm.nvvm.sust.b.1d.v2i8.trap
+    3, // llvm.nvvm.sust.b.1d.v2i8.zero
+    3, // llvm.nvvm.sust.b.1d.v4i16.clamp
+    3, // llvm.nvvm.sust.b.1d.v4i16.trap
+    3, // llvm.nvvm.sust.b.1d.v4i16.zero
+    3, // llvm.nvvm.sust.b.1d.v4i32.clamp
+    3, // llvm.nvvm.sust.b.1d.v4i32.trap
+    3, // llvm.nvvm.sust.b.1d.v4i32.zero
+    3, // llvm.nvvm.sust.b.1d.v4i8.clamp
+    3, // llvm.nvvm.sust.b.1d.v4i8.trap
+    3, // llvm.nvvm.sust.b.1d.v4i8.zero
+    3, // llvm.nvvm.sust.b.2d.array.i16.clamp
+    3, // llvm.nvvm.sust.b.2d.array.i16.trap
+    3, // llvm.nvvm.sust.b.2d.array.i16.zero
+    3, // llvm.nvvm.sust.b.2d.array.i32.clamp
+    3, // llvm.nvvm.sust.b.2d.array.i32.trap
+    3, // llvm.nvvm.sust.b.2d.array.i32.zero
+    3, // llvm.nvvm.sust.b.2d.array.i64.clamp
+    3, // llvm.nvvm.sust.b.2d.array.i64.trap
+    3, // llvm.nvvm.sust.b.2d.array.i64.zero
+    3, // llvm.nvvm.sust.b.2d.array.i8.clamp
+    3, // llvm.nvvm.sust.b.2d.array.i8.trap
+    3, // llvm.nvvm.sust.b.2d.array.i8.zero
+    3, // llvm.nvvm.sust.b.2d.array.v2i16.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v2i16.trap
+    3, // llvm.nvvm.sust.b.2d.array.v2i16.zero
+    3, // llvm.nvvm.sust.b.2d.array.v2i32.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v2i32.trap
+    3, // llvm.nvvm.sust.b.2d.array.v2i32.zero
+    3, // llvm.nvvm.sust.b.2d.array.v2i64.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v2i64.trap
+    3, // llvm.nvvm.sust.b.2d.array.v2i64.zero
+    3, // llvm.nvvm.sust.b.2d.array.v2i8.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v2i8.trap
+    3, // llvm.nvvm.sust.b.2d.array.v2i8.zero
+    3, // llvm.nvvm.sust.b.2d.array.v4i16.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v4i16.trap
+    3, // llvm.nvvm.sust.b.2d.array.v4i16.zero
+    3, // llvm.nvvm.sust.b.2d.array.v4i32.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v4i32.trap
+    3, // llvm.nvvm.sust.b.2d.array.v4i32.zero
+    3, // llvm.nvvm.sust.b.2d.array.v4i8.clamp
+    3, // llvm.nvvm.sust.b.2d.array.v4i8.trap
+    3, // llvm.nvvm.sust.b.2d.array.v4i8.zero
+    3, // llvm.nvvm.sust.b.2d.i16.clamp
+    3, // llvm.nvvm.sust.b.2d.i16.trap
+    3, // llvm.nvvm.sust.b.2d.i16.zero
+    3, // llvm.nvvm.sust.b.2d.i32.clamp
+    3, // llvm.nvvm.sust.b.2d.i32.trap
+    3, // llvm.nvvm.sust.b.2d.i32.zero
+    3, // llvm.nvvm.sust.b.2d.i64.clamp
+    3, // llvm.nvvm.sust.b.2d.i64.trap
+    3, // llvm.nvvm.sust.b.2d.i64.zero
+    3, // llvm.nvvm.sust.b.2d.i8.clamp
+    3, // llvm.nvvm.sust.b.2d.i8.trap
+    3, // llvm.nvvm.sust.b.2d.i8.zero
+    3, // llvm.nvvm.sust.b.2d.v2i16.clamp
+    3, // llvm.nvvm.sust.b.2d.v2i16.trap
+    3, // llvm.nvvm.sust.b.2d.v2i16.zero
+    3, // llvm.nvvm.sust.b.2d.v2i32.clamp
+    3, // llvm.nvvm.sust.b.2d.v2i32.trap
+    3, // llvm.nvvm.sust.b.2d.v2i32.zero
+    3, // llvm.nvvm.sust.b.2d.v2i64.clamp
+    3, // llvm.nvvm.sust.b.2d.v2i64.trap
+    3, // llvm.nvvm.sust.b.2d.v2i64.zero
+    3, // llvm.nvvm.sust.b.2d.v2i8.clamp
+    3, // llvm.nvvm.sust.b.2d.v2i8.trap
+    3, // llvm.nvvm.sust.b.2d.v2i8.zero
+    3, // llvm.nvvm.sust.b.2d.v4i16.clamp
+    3, // llvm.nvvm.sust.b.2d.v4i16.trap
+    3, // llvm.nvvm.sust.b.2d.v4i16.zero
+    3, // llvm.nvvm.sust.b.2d.v4i32.clamp
+    3, // llvm.nvvm.sust.b.2d.v4i32.trap
+    3, // llvm.nvvm.sust.b.2d.v4i32.zero
+    3, // llvm.nvvm.sust.b.2d.v4i8.clamp
+    3, // llvm.nvvm.sust.b.2d.v4i8.trap
+    3, // llvm.nvvm.sust.b.2d.v4i8.zero
+    3, // llvm.nvvm.sust.b.3d.i16.clamp
+    3, // llvm.nvvm.sust.b.3d.i16.trap
+    3, // llvm.nvvm.sust.b.3d.i16.zero
+    3, // llvm.nvvm.sust.b.3d.i32.clamp
+    3, // llvm.nvvm.sust.b.3d.i32.trap
+    3, // llvm.nvvm.sust.b.3d.i32.zero
+    3, // llvm.nvvm.sust.b.3d.i64.clamp
+    3, // llvm.nvvm.sust.b.3d.i64.trap
+    3, // llvm.nvvm.sust.b.3d.i64.zero
+    3, // llvm.nvvm.sust.b.3d.i8.clamp
+    3, // llvm.nvvm.sust.b.3d.i8.trap
+    3, // llvm.nvvm.sust.b.3d.i8.zero
+    3, // llvm.nvvm.sust.b.3d.v2i16.clamp
+    3, // llvm.nvvm.sust.b.3d.v2i16.trap
+    3, // llvm.nvvm.sust.b.3d.v2i16.zero
+    3, // llvm.nvvm.sust.b.3d.v2i32.clamp
+    3, // llvm.nvvm.sust.b.3d.v2i32.trap
+    3, // llvm.nvvm.sust.b.3d.v2i32.zero
+    3, // llvm.nvvm.sust.b.3d.v2i64.clamp
+    3, // llvm.nvvm.sust.b.3d.v2i64.trap
+    3, // llvm.nvvm.sust.b.3d.v2i64.zero
+    3, // llvm.nvvm.sust.b.3d.v2i8.clamp
+    3, // llvm.nvvm.sust.b.3d.v2i8.trap
+    3, // llvm.nvvm.sust.b.3d.v2i8.zero
+    3, // llvm.nvvm.sust.b.3d.v4i16.clamp
+    3, // llvm.nvvm.sust.b.3d.v4i16.trap
+    3, // llvm.nvvm.sust.b.3d.v4i16.zero
+    3, // llvm.nvvm.sust.b.3d.v4i32.clamp
+    3, // llvm.nvvm.sust.b.3d.v4i32.trap
+    3, // llvm.nvvm.sust.b.3d.v4i32.zero
+    3, // llvm.nvvm.sust.b.3d.v4i8.clamp
+    3, // llvm.nvvm.sust.b.3d.v4i8.trap
+    3, // llvm.nvvm.sust.b.3d.v4i8.zero
+    3, // llvm.nvvm.sust.p.1d.array.i16.trap
+    3, // llvm.nvvm.sust.p.1d.array.i32.trap
+    3, // llvm.nvvm.sust.p.1d.array.i8.trap
+    3, // llvm.nvvm.sust.p.1d.array.v2i16.trap
+    3, // llvm.nvvm.sust.p.1d.array.v2i32.trap
+    3, // llvm.nvvm.sust.p.1d.array.v2i8.trap
+    3, // llvm.nvvm.sust.p.1d.array.v4i16.trap
+    3, // llvm.nvvm.sust.p.1d.array.v4i32.trap
+    3, // llvm.nvvm.sust.p.1d.array.v4i8.trap
+    3, // llvm.nvvm.sust.p.1d.i16.trap
+    3, // llvm.nvvm.sust.p.1d.i32.trap
+    3, // llvm.nvvm.sust.p.1d.i8.trap
+    3, // llvm.nvvm.sust.p.1d.v2i16.trap
+    3, // llvm.nvvm.sust.p.1d.v2i32.trap
+    3, // llvm.nvvm.sust.p.1d.v2i8.trap
+    3, // llvm.nvvm.sust.p.1d.v4i16.trap
+    3, // llvm.nvvm.sust.p.1d.v4i32.trap
+    3, // llvm.nvvm.sust.p.1d.v4i8.trap
+    3, // llvm.nvvm.sust.p.2d.array.i16.trap
+    3, // llvm.nvvm.sust.p.2d.array.i32.trap
+    3, // llvm.nvvm.sust.p.2d.array.i8.trap
+    3, // llvm.nvvm.sust.p.2d.array.v2i16.trap
+    3, // llvm.nvvm.sust.p.2d.array.v2i32.trap
+    3, // llvm.nvvm.sust.p.2d.array.v2i8.trap
+    3, // llvm.nvvm.sust.p.2d.array.v4i16.trap
+    3, // llvm.nvvm.sust.p.2d.array.v4i32.trap
+    3, // llvm.nvvm.sust.p.2d.array.v4i8.trap
+    3, // llvm.nvvm.sust.p.2d.i16.trap
+    3, // llvm.nvvm.sust.p.2d.i32.trap
+    3, // llvm.nvvm.sust.p.2d.i8.trap
+    3, // llvm.nvvm.sust.p.2d.v2i16.trap
+    3, // llvm.nvvm.sust.p.2d.v2i32.trap
+    3, // llvm.nvvm.sust.p.2d.v2i8.trap
+    3, // llvm.nvvm.sust.p.2d.v4i16.trap
+    3, // llvm.nvvm.sust.p.2d.v4i32.trap
+    3, // llvm.nvvm.sust.p.2d.v4i8.trap
+    3, // llvm.nvvm.sust.p.3d.i16.trap
+    3, // llvm.nvvm.sust.p.3d.i32.trap
+    3, // llvm.nvvm.sust.p.3d.i8.trap
+    3, // llvm.nvvm.sust.p.3d.v2i16.trap
+    3, // llvm.nvvm.sust.p.3d.v2i32.trap
+    3, // llvm.nvvm.sust.p.3d.v2i8.trap
+    3, // llvm.nvvm.sust.p.3d.v4i16.trap
+    3, // llvm.nvvm.sust.p.3d.v4i32.trap
+    3, // llvm.nvvm.sust.p.3d.v4i8.trap
+    1, // llvm.nvvm.swap.lo.hi.b64
+    3, // llvm.nvvm.tex.1d.array.grad.v4f32.f32
+    3, // llvm.nvvm.tex.1d.array.grad.v4s32.f32
+    3, // llvm.nvvm.tex.1d.array.grad.v4u32.f32
+    3, // llvm.nvvm.tex.1d.array.level.v4f32.f32
+    3, // llvm.nvvm.tex.1d.array.level.v4s32.f32
+    3, // llvm.nvvm.tex.1d.array.level.v4u32.f32
+    3, // llvm.nvvm.tex.1d.array.v4f32.f32
+    3, // llvm.nvvm.tex.1d.array.v4f32.s32
+    3, // llvm.nvvm.tex.1d.array.v4s32.f32
+    3, // llvm.nvvm.tex.1d.array.v4s32.s32
+    3, // llvm.nvvm.tex.1d.array.v4u32.f32
+    3, // llvm.nvvm.tex.1d.array.v4u32.s32
+    3, // llvm.nvvm.tex.1d.grad.v4f32.f32
+    3, // llvm.nvvm.tex.1d.grad.v4s32.f32
+    3, // llvm.nvvm.tex.1d.grad.v4u32.f32
+    3, // llvm.nvvm.tex.1d.level.v4f32.f32
+    3, // llvm.nvvm.tex.1d.level.v4s32.f32
+    3, // llvm.nvvm.tex.1d.level.v4u32.f32
+    3, // llvm.nvvm.tex.1d.v4f32.f32
+    3, // llvm.nvvm.tex.1d.v4f32.s32
+    3, // llvm.nvvm.tex.1d.v4s32.f32
+    3, // llvm.nvvm.tex.1d.v4s32.s32
+    3, // llvm.nvvm.tex.1d.v4u32.f32
+    3, // llvm.nvvm.tex.1d.v4u32.s32
+    3, // llvm.nvvm.tex.2d.array.grad.v4f32.f32
+    3, // llvm.nvvm.tex.2d.array.grad.v4s32.f32
+    3, // llvm.nvvm.tex.2d.array.grad.v4u32.f32
+    3, // llvm.nvvm.tex.2d.array.level.v4f32.f32
+    3, // llvm.nvvm.tex.2d.array.level.v4s32.f32
+    3, // llvm.nvvm.tex.2d.array.level.v4u32.f32
+    3, // llvm.nvvm.tex.2d.array.v4f32.f32
+    3, // llvm.nvvm.tex.2d.array.v4f32.s32
+    3, // llvm.nvvm.tex.2d.array.v4s32.f32
+    3, // llvm.nvvm.tex.2d.array.v4s32.s32
+    3, // llvm.nvvm.tex.2d.array.v4u32.f32
+    3, // llvm.nvvm.tex.2d.array.v4u32.s32
+    3, // llvm.nvvm.tex.2d.grad.v4f32.f32
+    3, // llvm.nvvm.tex.2d.grad.v4s32.f32
+    3, // llvm.nvvm.tex.2d.grad.v4u32.f32
+    3, // llvm.nvvm.tex.2d.level.v4f32.f32
+    3, // llvm.nvvm.tex.2d.level.v4s32.f32
+    3, // llvm.nvvm.tex.2d.level.v4u32.f32
+    3, // llvm.nvvm.tex.2d.v4f32.f32
+    3, // llvm.nvvm.tex.2d.v4f32.s32
+    3, // llvm.nvvm.tex.2d.v4s32.f32
+    3, // llvm.nvvm.tex.2d.v4s32.s32
+    3, // llvm.nvvm.tex.2d.v4u32.f32
+    3, // llvm.nvvm.tex.2d.v4u32.s32
+    3, // llvm.nvvm.tex.3d.grad.v4f32.f32
+    3, // llvm.nvvm.tex.3d.grad.v4s32.f32
+    3, // llvm.nvvm.tex.3d.grad.v4u32.f32
+    3, // llvm.nvvm.tex.3d.level.v4f32.f32
+    3, // llvm.nvvm.tex.3d.level.v4s32.f32
+    3, // llvm.nvvm.tex.3d.level.v4u32.f32
+    3, // llvm.nvvm.tex.3d.v4f32.f32
+    3, // llvm.nvvm.tex.3d.v4f32.s32
+    3, // llvm.nvvm.tex.3d.v4s32.f32
+    3, // llvm.nvvm.tex.3d.v4s32.s32
+    3, // llvm.nvvm.tex.3d.v4u32.f32
+    3, // llvm.nvvm.tex.3d.v4u32.s32
+    3, // llvm.nvvm.tex.cube.array.level.v4f32.f32
+    3, // llvm.nvvm.tex.cube.array.level.v4s32.f32
+    3, // llvm.nvvm.tex.cube.array.level.v4u32.f32
+    3, // llvm.nvvm.tex.cube.array.v4f32.f32
+    3, // llvm.nvvm.tex.cube.array.v4s32.f32
+    3, // llvm.nvvm.tex.cube.array.v4u32.f32
+    3, // llvm.nvvm.tex.cube.level.v4f32.f32
+    3, // llvm.nvvm.tex.cube.level.v4s32.f32
+    3, // llvm.nvvm.tex.cube.level.v4u32.f32
+    3, // llvm.nvvm.tex.cube.v4f32.f32
+    3, // llvm.nvvm.tex.cube.v4s32.f32
+    3, // llvm.nvvm.tex.cube.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.v4f32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.v4f32.s32
+    3, // llvm.nvvm.tex.unified.1d.array.v4s32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.v4s32.s32
+    3, // llvm.nvvm.tex.unified.1d.array.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.array.v4u32.s32
+    3, // llvm.nvvm.tex.unified.1d.grad.v4f32.f32
+    3, // llvm.nvvm.tex.unified.1d.grad.v4s32.f32
+    3, // llvm.nvvm.tex.unified.1d.grad.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.1d.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.1d.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.v4f32.f32
+    3, // llvm.nvvm.tex.unified.1d.v4f32.s32
+    3, // llvm.nvvm.tex.unified.1d.v4s32.f32
+    3, // llvm.nvvm.tex.unified.1d.v4s32.s32
+    3, // llvm.nvvm.tex.unified.1d.v4u32.f32
+    3, // llvm.nvvm.tex.unified.1d.v4u32.s32
+    3, // llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.v4f32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.v4f32.s32
+    3, // llvm.nvvm.tex.unified.2d.array.v4s32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.v4s32.s32
+    3, // llvm.nvvm.tex.unified.2d.array.v4u32.f32
+    3, // llvm.nvvm.tex.unified.2d.array.v4u32.s32
+    3, // llvm.nvvm.tex.unified.2d.grad.v4f32.f32
+    3, // llvm.nvvm.tex.unified.2d.grad.v4s32.f32
+    3, // llvm.nvvm.tex.unified.2d.grad.v4u32.f32
+    3, // llvm.nvvm.tex.unified.2d.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.2d.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.2d.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.2d.v4f32.f32
+    3, // llvm.nvvm.tex.unified.2d.v4f32.s32
+    3, // llvm.nvvm.tex.unified.2d.v4s32.f32
+    3, // llvm.nvvm.tex.unified.2d.v4s32.s32
+    3, // llvm.nvvm.tex.unified.2d.v4u32.f32
+    3, // llvm.nvvm.tex.unified.2d.v4u32.s32
+    3, // llvm.nvvm.tex.unified.3d.grad.v4f32.f32
+    3, // llvm.nvvm.tex.unified.3d.grad.v4s32.f32
+    3, // llvm.nvvm.tex.unified.3d.grad.v4u32.f32
+    3, // llvm.nvvm.tex.unified.3d.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.3d.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.3d.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.3d.v4f32.f32
+    3, // llvm.nvvm.tex.unified.3d.v4f32.s32
+    3, // llvm.nvvm.tex.unified.3d.v4s32.f32
+    3, // llvm.nvvm.tex.unified.3d.v4s32.s32
+    3, // llvm.nvvm.tex.unified.3d.v4u32.f32
+    3, // llvm.nvvm.tex.unified.3d.v4u32.s32
+    3, // llvm.nvvm.tex.unified.cube.array.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.cube.array.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.cube.array.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.cube.array.v4f32.f32
+    3, // llvm.nvvm.tex.unified.cube.array.v4s32.f32
+    3, // llvm.nvvm.tex.unified.cube.array.v4u32.f32
+    3, // llvm.nvvm.tex.unified.cube.level.v4f32.f32
+    3, // llvm.nvvm.tex.unified.cube.level.v4s32.f32
+    3, // llvm.nvvm.tex.unified.cube.level.v4u32.f32
+    3, // llvm.nvvm.tex.unified.cube.v4f32.f32
+    3, // llvm.nvvm.tex.unified.cube.v4s32.f32
+    3, // llvm.nvvm.tex.unified.cube.v4u32.f32
+    1, // llvm.nvvm.texsurf.handle
+    1, // llvm.nvvm.texsurf.handle.internal
+    3, // llvm.nvvm.tld4.a.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.a.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.a.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.b.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.b.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.b.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.g.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.g.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.g.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.r.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.r.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.r.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.unified.a.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.unified.a.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.unified.a.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.unified.b.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.unified.b.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.unified.b.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.unified.g.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.unified.g.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.unified.g.2d.v4u32.f32
+    3, // llvm.nvvm.tld4.unified.r.2d.v4f32.f32
+    3, // llvm.nvvm.tld4.unified.r.2d.v4s32.f32
+    3, // llvm.nvvm.tld4.unified.r.2d.v4u32.f32
+    1, // llvm.nvvm.trunc.d
+    1, // llvm.nvvm.trunc.f
+    1, // llvm.nvvm.trunc.ftz.f
+    1, // llvm.nvvm.txq.array.size
+    1, // llvm.nvvm.txq.channel.data.type
+    1, // llvm.nvvm.txq.channel.order
+    1, // llvm.nvvm.txq.depth
+    1, // llvm.nvvm.txq.height
+    1, // llvm.nvvm.txq.num.mipmap.levels
+    1, // llvm.nvvm.txq.num.samples
+    1, // llvm.nvvm.txq.width
+    1, // llvm.nvvm.ui2d.rm
+    1, // llvm.nvvm.ui2d.rn
+    1, // llvm.nvvm.ui2d.rp
+    1, // llvm.nvvm.ui2d.rz
+    1, // llvm.nvvm.ui2f.rm
+    1, // llvm.nvvm.ui2f.rn
+    1, // llvm.nvvm.ui2f.rp
+    1, // llvm.nvvm.ui2f.rz
+    1, // llvm.nvvm.ull2d.rm
+    1, // llvm.nvvm.ull2d.rn
+    1, // llvm.nvvm.ull2d.rp
+    1, // llvm.nvvm.ull2d.rz
+    1, // llvm.nvvm.ull2f.rm
+    1, // llvm.nvvm.ull2f.rn
+    1, // llvm.nvvm.ull2f.rp
+    1, // llvm.nvvm.ull2f.rz
+    38, // llvm.nvvm.vote.all
+    38, // llvm.nvvm.vote.all.sync
+    38, // llvm.nvvm.vote.any
+    38, // llvm.nvvm.vote.any.sync
+    38, // llvm.nvvm.vote.ballot
+    38, // llvm.nvvm.vote.ballot.sync
+    38, // llvm.nvvm.vote.uni
+    38, // llvm.nvvm.vote.uni.sync
+    13, // llvm.nvvm.wmma.m16n16k16.load.a.col.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.a.row.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.b.col.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.b.row.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.col.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.col.f32
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.row.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.row.f32
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16
+    13, // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32
+    1, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.col.f16
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.col.f32
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.row.f16
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.row.f32
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16
+    39, // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32
+    1, // llvm.ppc.altivec.crypto.vcipher
+    1, // llvm.ppc.altivec.crypto.vcipherlast
+    1, // llvm.ppc.altivec.crypto.vncipher
+    1, // llvm.ppc.altivec.crypto.vncipherlast
+    1, // llvm.ppc.altivec.crypto.vpermxor
+    1, // llvm.ppc.altivec.crypto.vpmsumb
+    1, // llvm.ppc.altivec.crypto.vpmsumd
+    1, // llvm.ppc.altivec.crypto.vpmsumh
+    1, // llvm.ppc.altivec.crypto.vpmsumw
+    1, // llvm.ppc.altivec.crypto.vsbox
+    1, // llvm.ppc.altivec.crypto.vshasigmad
+    1, // llvm.ppc.altivec.crypto.vshasigmaw
+    3, // llvm.ppc.altivec.dss
+    3, // llvm.ppc.altivec.dssall
+    3, // llvm.ppc.altivec.dst
+    3, // llvm.ppc.altivec.dstst
+    3, // llvm.ppc.altivec.dststt
+    3, // llvm.ppc.altivec.dstt
+    2, // llvm.ppc.altivec.lvebx
+    2, // llvm.ppc.altivec.lvehx
+    2, // llvm.ppc.altivec.lvewx
+    1, // llvm.ppc.altivec.lvsl
+    1, // llvm.ppc.altivec.lvsr
+    2, // llvm.ppc.altivec.lvx
+    2, // llvm.ppc.altivec.lvxl
+    16, // llvm.ppc.altivec.mfvscr
+    3, // llvm.ppc.altivec.mtvscr
+    40, // llvm.ppc.altivec.stvebx
+    40, // llvm.ppc.altivec.stvehx
+    40, // llvm.ppc.altivec.stvewx
+    40, // llvm.ppc.altivec.stvx
+    40, // llvm.ppc.altivec.stvxl
+    1, // llvm.ppc.altivec.vabsdub
+    1, // llvm.ppc.altivec.vabsduh
+    1, // llvm.ppc.altivec.vabsduw
+    1, // llvm.ppc.altivec.vaddcuq
+    1, // llvm.ppc.altivec.vaddcuw
+    1, // llvm.ppc.altivec.vaddecuq
+    1, // llvm.ppc.altivec.vaddeuqm
+    1, // llvm.ppc.altivec.vaddsbs
+    1, // llvm.ppc.altivec.vaddshs
+    1, // llvm.ppc.altivec.vaddsws
+    1, // llvm.ppc.altivec.vaddubs
+    1, // llvm.ppc.altivec.vadduhs
+    1, // llvm.ppc.altivec.vadduws
+    1, // llvm.ppc.altivec.vavgsb
+    1, // llvm.ppc.altivec.vavgsh
+    1, // llvm.ppc.altivec.vavgsw
+    1, // llvm.ppc.altivec.vavgub
+    1, // llvm.ppc.altivec.vavguh
+    1, // llvm.ppc.altivec.vavguw
+    1, // llvm.ppc.altivec.vbpermq
+    1, // llvm.ppc.altivec.vcfsx
+    1, // llvm.ppc.altivec.vcfux
+    1, // llvm.ppc.altivec.vclzlsbb
+    1, // llvm.ppc.altivec.vcmpbfp
+    1, // llvm.ppc.altivec.vcmpbfp.p
+    1, // llvm.ppc.altivec.vcmpeqfp
+    1, // llvm.ppc.altivec.vcmpeqfp.p
+    1, // llvm.ppc.altivec.vcmpequb
+    1, // llvm.ppc.altivec.vcmpequb.p
+    1, // llvm.ppc.altivec.vcmpequd
+    1, // llvm.ppc.altivec.vcmpequd.p
+    1, // llvm.ppc.altivec.vcmpequh
+    1, // llvm.ppc.altivec.vcmpequh.p
+    1, // llvm.ppc.altivec.vcmpequw
+    1, // llvm.ppc.altivec.vcmpequw.p
+    1, // llvm.ppc.altivec.vcmpgefp
+    1, // llvm.ppc.altivec.vcmpgefp.p
+    1, // llvm.ppc.altivec.vcmpgtfp
+    1, // llvm.ppc.altivec.vcmpgtfp.p
+    1, // llvm.ppc.altivec.vcmpgtsb
+    1, // llvm.ppc.altivec.vcmpgtsb.p
+    1, // llvm.ppc.altivec.vcmpgtsd
+    1, // llvm.ppc.altivec.vcmpgtsd.p
+    1, // llvm.ppc.altivec.vcmpgtsh
+    1, // llvm.ppc.altivec.vcmpgtsh.p
+    1, // llvm.ppc.altivec.vcmpgtsw
+    1, // llvm.ppc.altivec.vcmpgtsw.p
+    1, // llvm.ppc.altivec.vcmpgtub
+    1, // llvm.ppc.altivec.vcmpgtub.p
+    1, // llvm.ppc.altivec.vcmpgtud
+    1, // llvm.ppc.altivec.vcmpgtud.p
+    1, // llvm.ppc.altivec.vcmpgtuh
+    1, // llvm.ppc.altivec.vcmpgtuh.p
+    1, // llvm.ppc.altivec.vcmpgtuw
+    1, // llvm.ppc.altivec.vcmpgtuw.p
+    1, // llvm.ppc.altivec.vcmpneb
+    1, // llvm.ppc.altivec.vcmpneb.p
+    1, // llvm.ppc.altivec.vcmpneh
+    1, // llvm.ppc.altivec.vcmpneh.p
+    1, // llvm.ppc.altivec.vcmpnew
+    1, // llvm.ppc.altivec.vcmpnew.p
+    1, // llvm.ppc.altivec.vcmpnezb
+    1, // llvm.ppc.altivec.vcmpnezb.p
+    1, // llvm.ppc.altivec.vcmpnezh
+    1, // llvm.ppc.altivec.vcmpnezh.p
+    1, // llvm.ppc.altivec.vcmpnezw
+    1, // llvm.ppc.altivec.vcmpnezw.p
+    1, // llvm.ppc.altivec.vctsxs
+    1, // llvm.ppc.altivec.vctuxs
+    1, // llvm.ppc.altivec.vctzlsbb
+    1, // llvm.ppc.altivec.vexptefp
+    1, // llvm.ppc.altivec.vgbbd
+    1, // llvm.ppc.altivec.vlogefp
+    1, // llvm.ppc.altivec.vmaddfp
+    1, // llvm.ppc.altivec.vmaxfp
+    1, // llvm.ppc.altivec.vmaxsb
+    1, // llvm.ppc.altivec.vmaxsd
+    1, // llvm.ppc.altivec.vmaxsh
+    1, // llvm.ppc.altivec.vmaxsw
+    1, // llvm.ppc.altivec.vmaxub
+    1, // llvm.ppc.altivec.vmaxud
+    1, // llvm.ppc.altivec.vmaxuh
+    1, // llvm.ppc.altivec.vmaxuw
+    1, // llvm.ppc.altivec.vmhaddshs
+    1, // llvm.ppc.altivec.vmhraddshs
+    1, // llvm.ppc.altivec.vminfp
+    1, // llvm.ppc.altivec.vminsb
+    1, // llvm.ppc.altivec.vminsd
+    1, // llvm.ppc.altivec.vminsh
+    1, // llvm.ppc.altivec.vminsw
+    1, // llvm.ppc.altivec.vminub
+    1, // llvm.ppc.altivec.vminud
+    1, // llvm.ppc.altivec.vminuh
+    1, // llvm.ppc.altivec.vminuw
+    1, // llvm.ppc.altivec.vmladduhm
+    1, // llvm.ppc.altivec.vmsummbm
+    1, // llvm.ppc.altivec.vmsumshm
+    1, // llvm.ppc.altivec.vmsumshs
+    1, // llvm.ppc.altivec.vmsumubm
+    1, // llvm.ppc.altivec.vmsumuhm
+    1, // llvm.ppc.altivec.vmsumuhs
+    1, // llvm.ppc.altivec.vmulesb
+    1, // llvm.ppc.altivec.vmulesh
+    1, // llvm.ppc.altivec.vmulesw
+    1, // llvm.ppc.altivec.vmuleub
+    1, // llvm.ppc.altivec.vmuleuh
+    1, // llvm.ppc.altivec.vmuleuw
+    1, // llvm.ppc.altivec.vmulosb
+    1, // llvm.ppc.altivec.vmulosh
+    1, // llvm.ppc.altivec.vmulosw
+    1, // llvm.ppc.altivec.vmuloub
+    1, // llvm.ppc.altivec.vmulouh
+    1, // llvm.ppc.altivec.vmulouw
+    1, // llvm.ppc.altivec.vnmsubfp
+    1, // llvm.ppc.altivec.vperm
+    1, // llvm.ppc.altivec.vpkpx
+    1, // llvm.ppc.altivec.vpksdss
+    1, // llvm.ppc.altivec.vpksdus
+    1, // llvm.ppc.altivec.vpkshss
+    1, // llvm.ppc.altivec.vpkshus
+    1, // llvm.ppc.altivec.vpkswss
+    1, // llvm.ppc.altivec.vpkswus
+    1, // llvm.ppc.altivec.vpkudus
+    1, // llvm.ppc.altivec.vpkuhus
+    1, // llvm.ppc.altivec.vpkuwus
+    1, // llvm.ppc.altivec.vprtybd
+    1, // llvm.ppc.altivec.vprtybq
+    1, // llvm.ppc.altivec.vprtybw
+    1, // llvm.ppc.altivec.vrefp
+    1, // llvm.ppc.altivec.vrfim
+    1, // llvm.ppc.altivec.vrfin
+    1, // llvm.ppc.altivec.vrfip
+    1, // llvm.ppc.altivec.vrfiz
+    1, // llvm.ppc.altivec.vrlb
+    1, // llvm.ppc.altivec.vrld
+    1, // llvm.ppc.altivec.vrldmi
+    1, // llvm.ppc.altivec.vrldnm
+    1, // llvm.ppc.altivec.vrlh
+    1, // llvm.ppc.altivec.vrlw
+    1, // llvm.ppc.altivec.vrlwmi
+    1, // llvm.ppc.altivec.vrlwnm
+    1, // llvm.ppc.altivec.vrsqrtefp
+    1, // llvm.ppc.altivec.vsel
+    1, // llvm.ppc.altivec.vsl
+    1, // llvm.ppc.altivec.vslb
+    1, // llvm.ppc.altivec.vslh
+    1, // llvm.ppc.altivec.vslo
+    1, // llvm.ppc.altivec.vslv
+    1, // llvm.ppc.altivec.vslw
+    1, // llvm.ppc.altivec.vsr
+    1, // llvm.ppc.altivec.vsrab
+    1, // llvm.ppc.altivec.vsrah
+    1, // llvm.ppc.altivec.vsraw
+    1, // llvm.ppc.altivec.vsrb
+    1, // llvm.ppc.altivec.vsrh
+    1, // llvm.ppc.altivec.vsro
+    1, // llvm.ppc.altivec.vsrv
+    1, // llvm.ppc.altivec.vsrw
+    1, // llvm.ppc.altivec.vsubcuq
+    1, // llvm.ppc.altivec.vsubcuw
+    1, // llvm.ppc.altivec.vsubecuq
+    1, // llvm.ppc.altivec.vsubeuqm
+    1, // llvm.ppc.altivec.vsubsbs
+    1, // llvm.ppc.altivec.vsubshs
+    1, // llvm.ppc.altivec.vsubsws
+    1, // llvm.ppc.altivec.vsububs
+    1, // llvm.ppc.altivec.vsubuhs
+    1, // llvm.ppc.altivec.vsubuws
+    1, // llvm.ppc.altivec.vsum2sws
+    1, // llvm.ppc.altivec.vsum4sbs
+    1, // llvm.ppc.altivec.vsum4shs
+    1, // llvm.ppc.altivec.vsum4ubs
+    1, // llvm.ppc.altivec.vsumsws
+    1, // llvm.ppc.altivec.vupkhpx
+    1, // llvm.ppc.altivec.vupkhsb
+    1, // llvm.ppc.altivec.vupkhsh
+    1, // llvm.ppc.altivec.vupkhsw
+    1, // llvm.ppc.altivec.vupklpx
+    1, // llvm.ppc.altivec.vupklsb
+    1, // llvm.ppc.altivec.vupklsh
+    1, // llvm.ppc.altivec.vupklsw
+    1, // llvm.ppc.bpermd
+    3, // llvm.ppc.cfence
+    3, // llvm.ppc.dcba
+    3, // llvm.ppc.dcbf
+    3, // llvm.ppc.dcbi
+    3, // llvm.ppc.dcbst
+    18, // llvm.ppc.dcbt
+    18, // llvm.ppc.dcbtst
+    3, // llvm.ppc.dcbz
+    3, // llvm.ppc.dcbzl
+    1, // llvm.ppc.divde
+    1, // llvm.ppc.divdeu
+    1, // llvm.ppc.divwe
+    1, // llvm.ppc.divweu
+    3, // llvm.ppc.get.texasr
+    3, // llvm.ppc.get.texasru
+    3, // llvm.ppc.get.tfhar
+    3, // llvm.ppc.get.tfiar
+    3, // llvm.ppc.is.decremented.ctr.nonzero
+    3, // llvm.ppc.lwsync
+    3, // llvm.ppc.mtctr
+    1, // llvm.ppc.qpx.qvfabs
+    1, // llvm.ppc.qpx.qvfadd
+    1, // llvm.ppc.qpx.qvfadds
+    1, // llvm.ppc.qpx.qvfcfid
+    1, // llvm.ppc.qpx.qvfcfids
+    1, // llvm.ppc.qpx.qvfcfidu
+    1, // llvm.ppc.qpx.qvfcfidus
+    1, // llvm.ppc.qpx.qvfcmpeq
+    1, // llvm.ppc.qpx.qvfcmpgt
+    1, // llvm.ppc.qpx.qvfcmplt
+    1, // llvm.ppc.qpx.qvfcpsgn
+    1, // llvm.ppc.qpx.qvfctid
+    1, // llvm.ppc.qpx.qvfctidu
+    1, // llvm.ppc.qpx.qvfctiduz
+    1, // llvm.ppc.qpx.qvfctidz
+    1, // llvm.ppc.qpx.qvfctiw
+    1, // llvm.ppc.qpx.qvfctiwu
+    1, // llvm.ppc.qpx.qvfctiwuz
+    1, // llvm.ppc.qpx.qvfctiwz
+    1, // llvm.ppc.qpx.qvflogical
+    1, // llvm.ppc.qpx.qvfmadd
+    1, // llvm.ppc.qpx.qvfmadds
+    1, // llvm.ppc.qpx.qvfmsub
+    1, // llvm.ppc.qpx.qvfmsubs
+    1, // llvm.ppc.qpx.qvfmul
+    1, // llvm.ppc.qpx.qvfmuls
+    1, // llvm.ppc.qpx.qvfnabs
+    1, // llvm.ppc.qpx.qvfneg
+    1, // llvm.ppc.qpx.qvfnmadd
+    1, // llvm.ppc.qpx.qvfnmadds
+    1, // llvm.ppc.qpx.qvfnmsub
+    1, // llvm.ppc.qpx.qvfnmsubs
+    1, // llvm.ppc.qpx.qvfperm
+    1, // llvm.ppc.qpx.qvfre
+    1, // llvm.ppc.qpx.qvfres
+    1, // llvm.ppc.qpx.qvfrim
+    1, // llvm.ppc.qpx.qvfrin
+    1, // llvm.ppc.qpx.qvfrip
+    1, // llvm.ppc.qpx.qvfriz
+    1, // llvm.ppc.qpx.qvfrsp
+    1, // llvm.ppc.qpx.qvfrsqrte
+    1, // llvm.ppc.qpx.qvfrsqrtes
+    1, // llvm.ppc.qpx.qvfsel
+    1, // llvm.ppc.qpx.qvfsub
+    1, // llvm.ppc.qpx.qvfsubs
+    1, // llvm.ppc.qpx.qvftstnan
+    1, // llvm.ppc.qpx.qvfxmadd
+    1, // llvm.ppc.qpx.qvfxmadds
+    1, // llvm.ppc.qpx.qvfxmul
+    1, // llvm.ppc.qpx.qvfxmuls
+    1, // llvm.ppc.qpx.qvfxxcpnmadd
+    1, // llvm.ppc.qpx.qvfxxcpnmadds
+    1, // llvm.ppc.qpx.qvfxxmadd
+    1, // llvm.ppc.qpx.qvfxxmadds
+    1, // llvm.ppc.qpx.qvfxxnpmadd
+    1, // llvm.ppc.qpx.qvfxxnpmadds
+    1, // llvm.ppc.qpx.qvgpci
+    2, // llvm.ppc.qpx.qvlfcd
+    2, // llvm.ppc.qpx.qvlfcda
+    2, // llvm.ppc.qpx.qvlfcs
+    2, // llvm.ppc.qpx.qvlfcsa
+    2, // llvm.ppc.qpx.qvlfd
+    2, // llvm.ppc.qpx.qvlfda
+    2, // llvm.ppc.qpx.qvlfiwa
+    2, // llvm.ppc.qpx.qvlfiwaa
+    2, // llvm.ppc.qpx.qvlfiwz
+    2, // llvm.ppc.qpx.qvlfiwza
+    2, // llvm.ppc.qpx.qvlfs
+    2, // llvm.ppc.qpx.qvlfsa
+    1, // llvm.ppc.qpx.qvlpcld
+    1, // llvm.ppc.qpx.qvlpcls
+    1, // llvm.ppc.qpx.qvlpcrd
+    1, // llvm.ppc.qpx.qvlpcrs
+    40, // llvm.ppc.qpx.qvstfcd
+    40, // llvm.ppc.qpx.qvstfcda
+    40, // llvm.ppc.qpx.qvstfcs
+    40, // llvm.ppc.qpx.qvstfcsa
+    40, // llvm.ppc.qpx.qvstfd
+    40, // llvm.ppc.qpx.qvstfda
+    40, // llvm.ppc.qpx.qvstfiw
+    40, // llvm.ppc.qpx.qvstfiwa
+    40, // llvm.ppc.qpx.qvstfs
+    40, // llvm.ppc.qpx.qvstfsa
+    3, // llvm.ppc.set.texasr
+    3, // llvm.ppc.set.texasru
+    3, // llvm.ppc.set.tfhar
+    3, // llvm.ppc.set.tfiar
+    3, // llvm.ppc.sync
+    3, // llvm.ppc.tabort
+    3, // llvm.ppc.tabortdc
+    3, // llvm.ppc.tabortdci
+    3, // llvm.ppc.tabortwc
+    3, // llvm.ppc.tabortwci
+    3, // llvm.ppc.tbegin
+    3, // llvm.ppc.tcheck
+    3, // llvm.ppc.tend
+    3, // llvm.ppc.tendall
+    3, // llvm.ppc.trechkpt
+    3, // llvm.ppc.treclaim
+    3, // llvm.ppc.tresume
+    3, // llvm.ppc.tsr
+    3, // llvm.ppc.tsuspend
+    3, // llvm.ppc.ttest
+    2, // llvm.ppc.vsx.lxvd2x
+    2, // llvm.ppc.vsx.lxvd2x.be
+    2, // llvm.ppc.vsx.lxvl
+    2, // llvm.ppc.vsx.lxvll
+    2, // llvm.ppc.vsx.lxvw4x
+    2, // llvm.ppc.vsx.lxvw4x.be
+    40, // llvm.ppc.vsx.stxvd2x
+    40, // llvm.ppc.vsx.stxvd2x.be
+    40, // llvm.ppc.vsx.stxvl
+    40, // llvm.ppc.vsx.stxvll
+    40, // llvm.ppc.vsx.stxvw4x
+    40, // llvm.ppc.vsx.stxvw4x.be
+    1, // llvm.ppc.vsx.xsmaxdp
+    1, // llvm.ppc.vsx.xsmindp
+    1, // llvm.ppc.vsx.xvcmpeqdp
+    1, // llvm.ppc.vsx.xvcmpeqdp.p
+    1, // llvm.ppc.vsx.xvcmpeqsp
+    1, // llvm.ppc.vsx.xvcmpeqsp.p
+    1, // llvm.ppc.vsx.xvcmpgedp
+    1, // llvm.ppc.vsx.xvcmpgedp.p
+    1, // llvm.ppc.vsx.xvcmpgesp
+    1, // llvm.ppc.vsx.xvcmpgesp.p
+    1, // llvm.ppc.vsx.xvcmpgtdp
+    1, // llvm.ppc.vsx.xvcmpgtdp.p
+    1, // llvm.ppc.vsx.xvcmpgtsp
+    1, // llvm.ppc.vsx.xvcmpgtsp.p
+    1, // llvm.ppc.vsx.xvcvdpsp
+    1, // llvm.ppc.vsx.xvcvdpsxws
+    1, // llvm.ppc.vsx.xvcvdpuxws
+    1, // llvm.ppc.vsx.xvcvhpsp
+    1, // llvm.ppc.vsx.xvcvspdp
+    1, // llvm.ppc.vsx.xvcvsphp
+    1, // llvm.ppc.vsx.xvcvsxdsp
+    1, // llvm.ppc.vsx.xvcvsxwdp
+    1, // llvm.ppc.vsx.xvcvuxdsp
+    1, // llvm.ppc.vsx.xvcvuxwdp
+    1, // llvm.ppc.vsx.xvdivdp
+    1, // llvm.ppc.vsx.xvdivsp
+    1, // llvm.ppc.vsx.xviexpdp
+    1, // llvm.ppc.vsx.xviexpsp
+    1, // llvm.ppc.vsx.xvmaxdp
+    1, // llvm.ppc.vsx.xvmaxsp
+    1, // llvm.ppc.vsx.xvmindp
+    1, // llvm.ppc.vsx.xvminsp
+    1, // llvm.ppc.vsx.xvrdpip
+    1, // llvm.ppc.vsx.xvredp
+    1, // llvm.ppc.vsx.xvresp
+    1, // llvm.ppc.vsx.xvrspip
+    1, // llvm.ppc.vsx.xvrsqrtedp
+    1, // llvm.ppc.vsx.xvrsqrtesp
+    1, // llvm.ppc.vsx.xvtstdcdp
+    1, // llvm.ppc.vsx.xvtstdcsp
+    1, // llvm.ppc.vsx.xvxexpdp
+    1, // llvm.ppc.vsx.xvxexpsp
+    1, // llvm.ppc.vsx.xvxsigdp
+    1, // llvm.ppc.vsx.xvxsigsp
+    1, // llvm.ppc.vsx.xxextractuw
+    1, // llvm.ppc.vsx.xxinsertw
+    1, // llvm.ppc.vsx.xxleqv
+    4, // llvm.r600.cube
+    33, // llvm.r600.group.barrier
+    4, // llvm.r600.implicitarg.ptr
+    3, // llvm.r600.rat.store.typed
+    4, // llvm.r600.read.global.size.x
+    4, // llvm.r600.read.global.size.y
+    4, // llvm.r600.read.global.size.z
+    4, // llvm.r600.read.local.size.x
+    4, // llvm.r600.read.local.size.y
+    4, // llvm.r600.read.local.size.z
+    4, // llvm.r600.read.ngroups.x
+    4, // llvm.r600.read.ngroups.y
+    4, // llvm.r600.read.ngroups.z
+    4, // llvm.r600.read.tgid.x
+    4, // llvm.r600.read.tgid.y
+    4, // llvm.r600.read.tgid.z
+    4, // llvm.r600.read.tidig.x
+    4, // llvm.r600.read.tidig.y
+    4, // llvm.r600.read.tidig.z
+    4, // llvm.r600.recipsqrt.clamped
+    4, // llvm.r600.recipsqrt.ieee
+    3, // llvm.s390.efpc
+    1, // llvm.s390.etnd
+    1, // llvm.s390.lcbb
+    40, // llvm.s390.ntstg
+    3, // llvm.s390.ppa.txassist
+    3, // llvm.s390.sfpc
+    41, // llvm.s390.tabort
+    42, // llvm.s390.tbegin
+    42, // llvm.s390.tbegin.nofloat
+    42, // llvm.s390.tbeginc
+    1, // llvm.s390.tdc
+    3, // llvm.s390.tend
+    1, // llvm.s390.vaccb
+    1, // llvm.s390.vacccq
+    1, // llvm.s390.vaccf
+    1, // llvm.s390.vaccg
+    1, // llvm.s390.vacch
+    1, // llvm.s390.vaccq
+    1, // llvm.s390.vacq
+    1, // llvm.s390.vaq
+    1, // llvm.s390.vavgb
+    1, // llvm.s390.vavgf
+    1, // llvm.s390.vavgg
+    1, // llvm.s390.vavgh
+    1, // llvm.s390.vavglb
+    1, // llvm.s390.vavglf
+    1, // llvm.s390.vavglg
+    1, // llvm.s390.vavglh
+    1, // llvm.s390.vbperm
+    1, // llvm.s390.vceqbs
+    1, // llvm.s390.vceqfs
+    1, // llvm.s390.vceqgs
+    1, // llvm.s390.vceqhs
+    1, // llvm.s390.vchbs
+    1, // llvm.s390.vchfs
+    1, // llvm.s390.vchgs
+    1, // llvm.s390.vchhs
+    1, // llvm.s390.vchlbs
+    1, // llvm.s390.vchlfs
+    1, // llvm.s390.vchlgs
+    1, // llvm.s390.vchlhs
+    1, // llvm.s390.vcksm
+    1, // llvm.s390.verimb
+    1, // llvm.s390.verimf
+    1, // llvm.s390.verimg
+    1, // llvm.s390.verimh
+    1, // llvm.s390.verllb
+    1, // llvm.s390.verllf
+    1, // llvm.s390.verllg
+    1, // llvm.s390.verllh
+    1, // llvm.s390.verllvb
+    1, // llvm.s390.verllvf
+    1, // llvm.s390.verllvg
+    1, // llvm.s390.verllvh
+    1, // llvm.s390.vfaeb
+    1, // llvm.s390.vfaebs
+    1, // llvm.s390.vfaef
+    1, // llvm.s390.vfaefs
+    1, // llvm.s390.vfaeh
+    1, // llvm.s390.vfaehs
+    1, // llvm.s390.vfaezb
+    1, // llvm.s390.vfaezbs
+    1, // llvm.s390.vfaezf
+    1, // llvm.s390.vfaezfs
+    1, // llvm.s390.vfaezh
+    1, // llvm.s390.vfaezhs
+    1, // llvm.s390.vfcedbs
+    1, // llvm.s390.vfcesbs
+    1, // llvm.s390.vfchdbs
+    1, // llvm.s390.vfchedbs
+    1, // llvm.s390.vfchesbs
+    1, // llvm.s390.vfchsbs
+    1, // llvm.s390.vfeeb
+    1, // llvm.s390.vfeebs
+    1, // llvm.s390.vfeef
+    1, // llvm.s390.vfeefs
+    1, // llvm.s390.vfeeh
+    1, // llvm.s390.vfeehs
+    1, // llvm.s390.vfeezb
+    1, // llvm.s390.vfeezbs
+    1, // llvm.s390.vfeezf
+    1, // llvm.s390.vfeezfs
+    1, // llvm.s390.vfeezh
+    1, // llvm.s390.vfeezhs
+    1, // llvm.s390.vfeneb
+    1, // llvm.s390.vfenebs
+    1, // llvm.s390.vfenef
+    1, // llvm.s390.vfenefs
+    1, // llvm.s390.vfeneh
+    1, // llvm.s390.vfenehs
+    1, // llvm.s390.vfenezb
+    1, // llvm.s390.vfenezbs
+    1, // llvm.s390.vfenezf
+    1, // llvm.s390.vfenezfs
+    1, // llvm.s390.vfenezh
+    1, // llvm.s390.vfenezhs
+    1, // llvm.s390.vfidb
+    1, // llvm.s390.vfisb
+    1, // llvm.s390.vfmaxdb
+    1, // llvm.s390.vfmaxsb
+    1, // llvm.s390.vfmindb
+    1, // llvm.s390.vfminsb
+    1, // llvm.s390.vftcidb
+    1, // llvm.s390.vftcisb
+    1, // llvm.s390.vgfmab
+    1, // llvm.s390.vgfmaf
+    1, // llvm.s390.vgfmag
+    1, // llvm.s390.vgfmah
+    1, // llvm.s390.vgfmb
+    1, // llvm.s390.vgfmf
+    1, // llvm.s390.vgfmg
+    1, // llvm.s390.vgfmh
+    1, // llvm.s390.vistrb
+    1, // llvm.s390.vistrbs
+    1, // llvm.s390.vistrf
+    1, // llvm.s390.vistrfs
+    1, // llvm.s390.vistrh
+    1, // llvm.s390.vistrhs
+    2, // llvm.s390.vlbb
+    2, // llvm.s390.vll
+    2, // llvm.s390.vlrl
+    1, // llvm.s390.vmaeb
+    1, // llvm.s390.vmaef
+    1, // llvm.s390.vmaeh
+    1, // llvm.s390.vmahb
+    1, // llvm.s390.vmahf
+    1, // llvm.s390.vmahh
+    1, // llvm.s390.vmaleb
+    1, // llvm.s390.vmalef
+    1, // llvm.s390.vmaleh
+    1, // llvm.s390.vmalhb
+    1, // llvm.s390.vmalhf
+    1, // llvm.s390.vmalhh
+    1, // llvm.s390.vmalob
+    1, // llvm.s390.vmalof
+    1, // llvm.s390.vmaloh
+    1, // llvm.s390.vmaob
+    1, // llvm.s390.vmaof
+    1, // llvm.s390.vmaoh
+    1, // llvm.s390.vmeb
+    1, // llvm.s390.vmef
+    1, // llvm.s390.vmeh
+    1, // llvm.s390.vmhb
+    1, // llvm.s390.vmhf
+    1, // llvm.s390.vmhh
+    1, // llvm.s390.vmleb
+    1, // llvm.s390.vmlef
+    1, // llvm.s390.vmleh
+    1, // llvm.s390.vmlhb
+    1, // llvm.s390.vmlhf
+    1, // llvm.s390.vmlhh
+    1, // llvm.s390.vmlob
+    1, // llvm.s390.vmlof
+    1, // llvm.s390.vmloh
+    1, // llvm.s390.vmob
+    1, // llvm.s390.vmof
+    1, // llvm.s390.vmoh
+    1, // llvm.s390.vmslg
+    1, // llvm.s390.vpdi
+    1, // llvm.s390.vperm
+    1, // llvm.s390.vpklsf
+    1, // llvm.s390.vpklsfs
+    1, // llvm.s390.vpklsg
+    1, // llvm.s390.vpklsgs
+    1, // llvm.s390.vpklsh
+    1, // llvm.s390.vpklshs
+    1, // llvm.s390.vpksf
+    1, // llvm.s390.vpksfs
+    1, // llvm.s390.vpksg
+    1, // llvm.s390.vpksgs
+    1, // llvm.s390.vpksh
+    1, // llvm.s390.vpkshs
+    1, // llvm.s390.vsbcbiq
+    1, // llvm.s390.vsbiq
+    1, // llvm.s390.vscbib
+    1, // llvm.s390.vscbif
+    1, // llvm.s390.vscbig
+    1, // llvm.s390.vscbih
+    1, // llvm.s390.vscbiq
+    1, // llvm.s390.vsl
+    1, // llvm.s390.vslb
+    1, // llvm.s390.vsldb
+    1, // llvm.s390.vsq
+    1, // llvm.s390.vsra
+    1, // llvm.s390.vsrab
+    1, // llvm.s390.vsrl
+    1, // llvm.s390.vsrlb
+    40, // llvm.s390.vstl
+    1, // llvm.s390.vstrcb
+    1, // llvm.s390.vstrcbs
+    1, // llvm.s390.vstrcf
+    1, // llvm.s390.vstrcfs
+    1, // llvm.s390.vstrch
+    1, // llvm.s390.vstrchs
+    1, // llvm.s390.vstrczb
+    1, // llvm.s390.vstrczbs
+    1, // llvm.s390.vstrczf
+    1, // llvm.s390.vstrczfs
+    1, // llvm.s390.vstrczh
+    1, // llvm.s390.vstrczhs
+    40, // llvm.s390.vstrl
+    1, // llvm.s390.vsumb
+    1, // llvm.s390.vsumgf
+    1, // llvm.s390.vsumgh
+    1, // llvm.s390.vsumh
+    1, // llvm.s390.vsumqf
+    1, // llvm.s390.vsumqg
+    1, // llvm.s390.vtm
+    1, // llvm.s390.vuphb
+    1, // llvm.s390.vuphf
+    1, // llvm.s390.vuphh
+    1, // llvm.s390.vuplb
+    1, // llvm.s390.vuplf
+    1, // llvm.s390.vuplhb
+    1, // llvm.s390.vuplhf
+    1, // llvm.s390.vuplhh
+    1, // llvm.s390.vuplhw
+    1, // llvm.s390.vupllb
+    1, // llvm.s390.vupllf
+    1, // llvm.s390.vupllh
+    16, // llvm.wasm.current.memory
+    43, // llvm.wasm.get.ehselector
+    43, // llvm.wasm.get.exception
+    3, // llvm.wasm.grow.memory
+    3, // llvm.wasm.mem.grow
+    16, // llvm.wasm.mem.size
+    44, // llvm.wasm.rethrow
+    44, // llvm.wasm.throw
+    1, // llvm.x86.3dnow.pavgusb
+    1, // llvm.x86.3dnow.pf2id
+    1, // llvm.x86.3dnow.pfacc
+    1, // llvm.x86.3dnow.pfadd
+    1, // llvm.x86.3dnow.pfcmpeq
+    1, // llvm.x86.3dnow.pfcmpge
+    1, // llvm.x86.3dnow.pfcmpgt
+    1, // llvm.x86.3dnow.pfmax
+    1, // llvm.x86.3dnow.pfmin
+    1, // llvm.x86.3dnow.pfmul
+    1, // llvm.x86.3dnow.pfrcp
+    1, // llvm.x86.3dnow.pfrcpit1
+    1, // llvm.x86.3dnow.pfrcpit2
+    1, // llvm.x86.3dnow.pfrsqit1
+    1, // llvm.x86.3dnow.pfrsqrt
+    1, // llvm.x86.3dnow.pfsub
+    1, // llvm.x86.3dnow.pfsubr
+    1, // llvm.x86.3dnow.pi2fd
+    1, // llvm.x86.3dnow.pmulhrw
+    1, // llvm.x86.3dnowa.pf2iw
+    1, // llvm.x86.3dnowa.pfnacc
+    1, // llvm.x86.3dnowa.pfpnacc
+    1, // llvm.x86.3dnowa.pi2fw
+    1, // llvm.x86.3dnowa.pswapd
+    21, // llvm.x86.addcarry.u32
+    21, // llvm.x86.addcarry.u64
+    21, // llvm.x86.addcarryx.u32
+    21, // llvm.x86.addcarryx.u64
+    1, // llvm.x86.aesni.aesdec
+    1, // llvm.x86.aesni.aesdec.256
+    1, // llvm.x86.aesni.aesdec.512
+    1, // llvm.x86.aesni.aesdeclast
+    1, // llvm.x86.aesni.aesdeclast.256
+    1, // llvm.x86.aesni.aesdeclast.512
+    1, // llvm.x86.aesni.aesenc
+    1, // llvm.x86.aesni.aesenc.256
+    1, // llvm.x86.aesni.aesenc.512
+    1, // llvm.x86.aesni.aesenclast
+    1, // llvm.x86.aesni.aesenclast.256
+    1, // llvm.x86.aesni.aesenclast.512
+    1, // llvm.x86.aesni.aesimc
+    1, // llvm.x86.aesni.aeskeygenassist
+    1, // llvm.x86.avx.addsub.pd.256
+    1, // llvm.x86.avx.addsub.ps.256
+    1, // llvm.x86.avx.blendv.pd.256
+    1, // llvm.x86.avx.blendv.ps.256
+    1, // llvm.x86.avx.cmp.pd.256
+    1, // llvm.x86.avx.cmp.ps.256
+    1, // llvm.x86.avx.cvt.pd2.ps.256
+    1, // llvm.x86.avx.cvt.pd2dq.256
+    1, // llvm.x86.avx.cvt.ps2dq.256
+    1, // llvm.x86.avx.cvtdq2.ps.256
+    1, // llvm.x86.avx.cvtt.pd2dq.256
+    1, // llvm.x86.avx.cvtt.ps2dq.256
+    1, // llvm.x86.avx.dp.ps.256
+    1, // llvm.x86.avx.hadd.pd.256
+    1, // llvm.x86.avx.hadd.ps.256
+    1, // llvm.x86.avx.hsub.pd.256
+    1, // llvm.x86.avx.hsub.ps.256
+    16, // llvm.x86.avx.ldu.dq.256
+    2, // llvm.x86.avx.maskload.pd
+    2, // llvm.x86.avx.maskload.pd.256
+    2, // llvm.x86.avx.maskload.ps
+    2, // llvm.x86.avx.maskload.ps.256
+    21, // llvm.x86.avx.maskstore.pd
+    21, // llvm.x86.avx.maskstore.pd.256
+    21, // llvm.x86.avx.maskstore.ps
+    21, // llvm.x86.avx.maskstore.ps.256
+    1, // llvm.x86.avx.max.pd.256
+    1, // llvm.x86.avx.max.ps.256
+    1, // llvm.x86.avx.min.pd.256
+    1, // llvm.x86.avx.min.ps.256
+    1, // llvm.x86.avx.movmsk.pd.256
+    1, // llvm.x86.avx.movmsk.ps.256
+    1, // llvm.x86.avx.ptestc.256
+    1, // llvm.x86.avx.ptestnzc.256
+    1, // llvm.x86.avx.ptestz.256
+    1, // llvm.x86.avx.rcp.ps.256
+    1, // llvm.x86.avx.round.pd.256
+    1, // llvm.x86.avx.round.ps.256
+    1, // llvm.x86.avx.rsqrt.ps.256
+    1, // llvm.x86.avx.sqrt.pd.256
+    1, // llvm.x86.avx.sqrt.ps.256
+    1, // llvm.x86.avx.vpermilvar.pd
+    1, // llvm.x86.avx.vpermilvar.pd.256
+    1, // llvm.x86.avx.vpermilvar.ps
+    1, // llvm.x86.avx.vpermilvar.ps.256
+    1, // llvm.x86.avx.vtestc.pd
+    1, // llvm.x86.avx.vtestc.pd.256
+    1, // llvm.x86.avx.vtestc.ps
+    1, // llvm.x86.avx.vtestc.ps.256
+    1, // llvm.x86.avx.vtestnzc.pd
+    1, // llvm.x86.avx.vtestnzc.pd.256
+    1, // llvm.x86.avx.vtestnzc.ps
+    1, // llvm.x86.avx.vtestnzc.ps.256
+    1, // llvm.x86.avx.vtestz.pd
+    1, // llvm.x86.avx.vtestz.pd.256
+    1, // llvm.x86.avx.vtestz.ps
+    1, // llvm.x86.avx.vtestz.ps.256
+    3, // llvm.x86.avx.vzeroall
+    3, // llvm.x86.avx.vzeroupper
+    2, // llvm.x86.avx2.gather.d.d
+    2, // llvm.x86.avx2.gather.d.d.256
+    2, // llvm.x86.avx2.gather.d.pd
+    2, // llvm.x86.avx2.gather.d.pd.256
+    2, // llvm.x86.avx2.gather.d.ps
+    2, // llvm.x86.avx2.gather.d.ps.256
+    2, // llvm.x86.avx2.gather.d.q
+    2, // llvm.x86.avx2.gather.d.q.256
+    2, // llvm.x86.avx2.gather.q.d
+    2, // llvm.x86.avx2.gather.q.d.256
+    2, // llvm.x86.avx2.gather.q.pd
+    2, // llvm.x86.avx2.gather.q.pd.256
+    2, // llvm.x86.avx2.gather.q.ps
+    2, // llvm.x86.avx2.gather.q.ps.256
+    2, // llvm.x86.avx2.gather.q.q
+    2, // llvm.x86.avx2.gather.q.q.256
+    2, // llvm.x86.avx2.maskload.d
+    2, // llvm.x86.avx2.maskload.d.256
+    2, // llvm.x86.avx2.maskload.q
+    2, // llvm.x86.avx2.maskload.q.256
+    21, // llvm.x86.avx2.maskstore.d
+    21, // llvm.x86.avx2.maskstore.d.256
+    21, // llvm.x86.avx2.maskstore.q
+    21, // llvm.x86.avx2.maskstore.q.256
+    1, // llvm.x86.avx2.mpsadbw
+    1, // llvm.x86.avx2.packssdw
+    1, // llvm.x86.avx2.packsswb
+    1, // llvm.x86.avx2.packusdw
+    1, // llvm.x86.avx2.packuswb
+    1, // llvm.x86.avx2.padds.b
+    1, // llvm.x86.avx2.padds.w
+    1, // llvm.x86.avx2.paddus.b
+    1, // llvm.x86.avx2.paddus.w
+    1, // llvm.x86.avx2.pblendvb
+    1, // llvm.x86.avx2.permd
+    1, // llvm.x86.avx2.permps
+    1, // llvm.x86.avx2.phadd.d
+    1, // llvm.x86.avx2.phadd.sw
+    1, // llvm.x86.avx2.phadd.w
+    1, // llvm.x86.avx2.phsub.d
+    1, // llvm.x86.avx2.phsub.sw
+    1, // llvm.x86.avx2.phsub.w
+    1, // llvm.x86.avx2.pmadd.ub.sw
+    1, // llvm.x86.avx2.pmadd.wd
+    1, // llvm.x86.avx2.pmovmskb
+    1, // llvm.x86.avx2.pmul.dq
+    1, // llvm.x86.avx2.pmul.hr.sw
+    1, // llvm.x86.avx2.pmulh.w
+    1, // llvm.x86.avx2.pmulhu.w
+    1, // llvm.x86.avx2.pmulu.dq
+    1, // llvm.x86.avx2.psad.bw
+    1, // llvm.x86.avx2.pshuf.b
+    1, // llvm.x86.avx2.psign.b
+    1, // llvm.x86.avx2.psign.d
+    1, // llvm.x86.avx2.psign.w
+    1, // llvm.x86.avx2.psll.d
+    1, // llvm.x86.avx2.psll.q
+    1, // llvm.x86.avx2.psll.w
+    1, // llvm.x86.avx2.pslli.d
+    1, // llvm.x86.avx2.pslli.q
+    1, // llvm.x86.avx2.pslli.w
+    1, // llvm.x86.avx2.psllv.d
+    1, // llvm.x86.avx2.psllv.d.256
+    1, // llvm.x86.avx2.psllv.q
+    1, // llvm.x86.avx2.psllv.q.256
+    1, // llvm.x86.avx2.psra.d
+    1, // llvm.x86.avx2.psra.w
+    1, // llvm.x86.avx2.psrai.d
+    1, // llvm.x86.avx2.psrai.w
+    1, // llvm.x86.avx2.psrav.d
+    1, // llvm.x86.avx2.psrav.d.256
+    1, // llvm.x86.avx2.psrl.d
+    1, // llvm.x86.avx2.psrl.q
+    1, // llvm.x86.avx2.psrl.w
+    1, // llvm.x86.avx2.psrli.d
+    1, // llvm.x86.avx2.psrli.q
+    1, // llvm.x86.avx2.psrli.w
+    1, // llvm.x86.avx2.psrlv.d
+    1, // llvm.x86.avx2.psrlv.d.256
+    1, // llvm.x86.avx2.psrlv.q
+    1, // llvm.x86.avx2.psrlv.q.256
+    1, // llvm.x86.avx2.psubs.b
+    1, // llvm.x86.avx2.psubs.w
+    1, // llvm.x86.avx2.psubus.b
+    1, // llvm.x86.avx2.psubus.w
+    1, // llvm.x86.avx512.broadcastmb.128
+    1, // llvm.x86.avx512.broadcastmb.256
+    1, // llvm.x86.avx512.broadcastmb.512
+    1, // llvm.x86.avx512.broadcastmw.128
+    1, // llvm.x86.avx512.broadcastmw.256
+    1, // llvm.x86.avx512.broadcastmw.512
+    1, // llvm.x86.avx512.cvtsi2sd64
+    1, // llvm.x86.avx512.cvtsi2ss32
+    1, // llvm.x86.avx512.cvtsi2ss64
+    1, // llvm.x86.avx512.cvttsd2si
+    1, // llvm.x86.avx512.cvttsd2si64
+    1, // llvm.x86.avx512.cvttsd2usi
+    1, // llvm.x86.avx512.cvttsd2usi64
+    1, // llvm.x86.avx512.cvttss2si
+    1, // llvm.x86.avx512.cvttss2si64
+    1, // llvm.x86.avx512.cvttss2usi
+    1, // llvm.x86.avx512.cvttss2usi64
+    1, // llvm.x86.avx512.cvtusi2sd
+    1, // llvm.x86.avx512.cvtusi2ss
+    1, // llvm.x86.avx512.cvtusi642sd
+    1, // llvm.x86.avx512.cvtusi642ss
+    1, // llvm.x86.avx512.exp2.pd
+    1, // llvm.x86.avx512.exp2.ps
+    2, // llvm.x86.avx512.gather.dpd.512
+    2, // llvm.x86.avx512.gather.dpi.512
+    2, // llvm.x86.avx512.gather.dpq.512
+    2, // llvm.x86.avx512.gather.dps.512
+    2, // llvm.x86.avx512.gather.qpd.512
+    2, // llvm.x86.avx512.gather.qpi.512
+    2, // llvm.x86.avx512.gather.qpq.512
+    2, // llvm.x86.avx512.gather.qps.512
+    2, // llvm.x86.avx512.gather3div2.df
+    2, // llvm.x86.avx512.gather3div2.di
+    2, // llvm.x86.avx512.gather3div4.df
+    2, // llvm.x86.avx512.gather3div4.di
+    2, // llvm.x86.avx512.gather3div4.sf
+    2, // llvm.x86.avx512.gather3div4.si
+    2, // llvm.x86.avx512.gather3div8.sf
+    2, // llvm.x86.avx512.gather3div8.si
+    2, // llvm.x86.avx512.gather3siv2.df
+    2, // llvm.x86.avx512.gather3siv2.di
+    2, // llvm.x86.avx512.gather3siv4.df
+    2, // llvm.x86.avx512.gather3siv4.di
+    2, // llvm.x86.avx512.gather3siv4.sf
+    2, // llvm.x86.avx512.gather3siv4.si
+    2, // llvm.x86.avx512.gather3siv8.sf
+    2, // llvm.x86.avx512.gather3siv8.si
+    21, // llvm.x86.avx512.gatherpf.dpd.512
+    21, // llvm.x86.avx512.gatherpf.dps.512
+    21, // llvm.x86.avx512.gatherpf.qpd.512
+    21, // llvm.x86.avx512.gatherpf.qps.512
+    1, // llvm.x86.avx512.mask.add.pd.512
+    1, // llvm.x86.avx512.mask.add.ps.512
+    1, // llvm.x86.avx512.mask.add.sd.round
+    1, // llvm.x86.avx512.mask.add.ss.round
+    1, // llvm.x86.avx512.mask.cmp.pd.128
+    1, // llvm.x86.avx512.mask.cmp.pd.256
+    1, // llvm.x86.avx512.mask.cmp.pd.512
+    1, // llvm.x86.avx512.mask.cmp.ps.128
+    1, // llvm.x86.avx512.mask.cmp.ps.256
+    1, // llvm.x86.avx512.mask.cmp.ps.512
+    1, // llvm.x86.avx512.mask.cmp.sd
+    1, // llvm.x86.avx512.mask.cmp.ss
+    1, // llvm.x86.avx512.mask.compress.b.128
+    1, // llvm.x86.avx512.mask.compress.b.256
+    1, // llvm.x86.avx512.mask.compress.b.512
+    1, // llvm.x86.avx512.mask.compress.d.128
+    1, // llvm.x86.avx512.mask.compress.d.256
+    1, // llvm.x86.avx512.mask.compress.d.512
+    1, // llvm.x86.avx512.mask.compress.pd.128
+    1, // llvm.x86.avx512.mask.compress.pd.256
+    1, // llvm.x86.avx512.mask.compress.pd.512
+    1, // llvm.x86.avx512.mask.compress.ps.128
+    1, // llvm.x86.avx512.mask.compress.ps.256
+    1, // llvm.x86.avx512.mask.compress.ps.512
+    1, // llvm.x86.avx512.mask.compress.q.128
+    1, // llvm.x86.avx512.mask.compress.q.256
+    1, // llvm.x86.avx512.mask.compress.q.512
+    21, // llvm.x86.avx512.mask.compress.store.b.128
+    21, // llvm.x86.avx512.mask.compress.store.b.256
+    21, // llvm.x86.avx512.mask.compress.store.b.512
+    21, // llvm.x86.avx512.mask.compress.store.d.128
+    21, // llvm.x86.avx512.mask.compress.store.d.256
+    21, // llvm.x86.avx512.mask.compress.store.d.512
+    21, // llvm.x86.avx512.mask.compress.store.pd.128
+    21, // llvm.x86.avx512.mask.compress.store.pd.256
+    21, // llvm.x86.avx512.mask.compress.store.pd.512
+    21, // llvm.x86.avx512.mask.compress.store.ps.128
+    21, // llvm.x86.avx512.mask.compress.store.ps.256
+    21, // llvm.x86.avx512.mask.compress.store.ps.512
+    21, // llvm.x86.avx512.mask.compress.store.q.128
+    21, // llvm.x86.avx512.mask.compress.store.q.256
+    21, // llvm.x86.avx512.mask.compress.store.q.512
+    21, // llvm.x86.avx512.mask.compress.store.w.128
+    21, // llvm.x86.avx512.mask.compress.store.w.256
+    21, // llvm.x86.avx512.mask.compress.store.w.512
+    1, // llvm.x86.avx512.mask.compress.w.128
+    1, // llvm.x86.avx512.mask.compress.w.256
+    1, // llvm.x86.avx512.mask.compress.w.512
+    1, // llvm.x86.avx512.mask.conflict.d.128
+    1, // llvm.x86.avx512.mask.conflict.d.256
+    1, // llvm.x86.avx512.mask.conflict.d.512
+    1, // llvm.x86.avx512.mask.conflict.q.128
+    1, // llvm.x86.avx512.mask.conflict.q.256
+    1, // llvm.x86.avx512.mask.conflict.q.512
+    1, // llvm.x86.avx512.mask.cvtdq2ps.128
+    1, // llvm.x86.avx512.mask.cvtdq2ps.256
+    1, // llvm.x86.avx512.mask.cvtdq2ps.512
+    1, // llvm.x86.avx512.mask.cvtpd2dq.128
+    1, // llvm.x86.avx512.mask.cvtpd2dq.256
+    1, // llvm.x86.avx512.mask.cvtpd2dq.512
+    1, // llvm.x86.avx512.mask.cvtpd2ps
+    1, // llvm.x86.avx512.mask.cvtpd2ps.256
+    1, // llvm.x86.avx512.mask.cvtpd2ps.512
+    1, // llvm.x86.avx512.mask.cvtpd2qq.128
+    1, // llvm.x86.avx512.mask.cvtpd2qq.256
+    1, // llvm.x86.avx512.mask.cvtpd2qq.512
+    1, // llvm.x86.avx512.mask.cvtpd2udq.128
+    1, // llvm.x86.avx512.mask.cvtpd2udq.256
+    1, // llvm.x86.avx512.mask.cvtpd2udq.512
+    1, // llvm.x86.avx512.mask.cvtpd2uqq.128
+    1, // llvm.x86.avx512.mask.cvtpd2uqq.256
+    1, // llvm.x86.avx512.mask.cvtpd2uqq.512
+    1, // llvm.x86.avx512.mask.cvtps2dq.128
+    1, // llvm.x86.avx512.mask.cvtps2dq.256
+    1, // llvm.x86.avx512.mask.cvtps2dq.512
+    1, // llvm.x86.avx512.mask.cvtps2pd.128
+    1, // llvm.x86.avx512.mask.cvtps2pd.256
+    1, // llvm.x86.avx512.mask.cvtps2pd.512
+    1, // llvm.x86.avx512.mask.cvtps2qq.128
+    1, // llvm.x86.avx512.mask.cvtps2qq.256
+    1, // llvm.x86.avx512.mask.cvtps2qq.512
+    1, // llvm.x86.avx512.mask.cvtps2udq.128
+    1, // llvm.x86.avx512.mask.cvtps2udq.256
+    1, // llvm.x86.avx512.mask.cvtps2udq.512
+    1, // llvm.x86.avx512.mask.cvtps2uqq.128
+    1, // llvm.x86.avx512.mask.cvtps2uqq.256
+    1, // llvm.x86.avx512.mask.cvtps2uqq.512
+    1, // llvm.x86.avx512.mask.cvtqq2pd.128
+    1, // llvm.x86.avx512.mask.cvtqq2pd.256
+    1, // llvm.x86.avx512.mask.cvtqq2pd.512
+    1, // llvm.x86.avx512.mask.cvtqq2ps.128
+    1, // llvm.x86.avx512.mask.cvtqq2ps.256
+    1, // llvm.x86.avx512.mask.cvtqq2ps.512
+    1, // llvm.x86.avx512.mask.cvtsd2ss.round
+    1, // llvm.x86.avx512.mask.cvtss2sd.round
+    1, // llvm.x86.avx512.mask.cvttpd2dq.128
+    1, // llvm.x86.avx512.mask.cvttpd2dq.256
+    1, // llvm.x86.avx512.mask.cvttpd2dq.512
+    1, // llvm.x86.avx512.mask.cvttpd2qq.128
+    1, // llvm.x86.avx512.mask.cvttpd2qq.256
+    1, // llvm.x86.avx512.mask.cvttpd2qq.512
+    1, // llvm.x86.avx512.mask.cvttpd2udq.128
+    1, // llvm.x86.avx512.mask.cvttpd2udq.256
+    1, // llvm.x86.avx512.mask.cvttpd2udq.512
+    1, // llvm.x86.avx512.mask.cvttpd2uqq.128
+    1, // llvm.x86.avx512.mask.cvttpd2uqq.256
+    1, // llvm.x86.avx512.mask.cvttpd2uqq.512
+    1, // llvm.x86.avx512.mask.cvttps2dq.128
+    1, // llvm.x86.avx512.mask.cvttps2dq.256
+    1, // llvm.x86.avx512.mask.cvttps2dq.512
+    1, // llvm.x86.avx512.mask.cvttps2qq.128
+    1, // llvm.x86.avx512.mask.cvttps2qq.256
+    1, // llvm.x86.avx512.mask.cvttps2qq.512
+    1, // llvm.x86.avx512.mask.cvttps2udq.128
+    1, // llvm.x86.avx512.mask.cvttps2udq.256
+    1, // llvm.x86.avx512.mask.cvttps2udq.512
+    1, // llvm.x86.avx512.mask.cvttps2uqq.128
+    1, // llvm.x86.avx512.mask.cvttps2uqq.256
+    1, // llvm.x86.avx512.mask.cvttps2uqq.512
+    1, // llvm.x86.avx512.mask.cvtudq2ps.128
+    1, // llvm.x86.avx512.mask.cvtudq2ps.256
+    1, // llvm.x86.avx512.mask.cvtudq2ps.512
+    1, // llvm.x86.avx512.mask.cvtuqq2pd.128
+    1, // llvm.x86.avx512.mask.cvtuqq2pd.256
+    1, // llvm.x86.avx512.mask.cvtuqq2pd.512
+    1, // llvm.x86.avx512.mask.cvtuqq2ps.128
+    1, // llvm.x86.avx512.mask.cvtuqq2ps.256
+    1, // llvm.x86.avx512.mask.cvtuqq2ps.512
+    1, // llvm.x86.avx512.mask.dbpsadbw.128
+    1, // llvm.x86.avx512.mask.dbpsadbw.256
+    1, // llvm.x86.avx512.mask.dbpsadbw.512
+    1, // llvm.x86.avx512.mask.div.pd.512
+    1, // llvm.x86.avx512.mask.div.ps.512
+    1, // llvm.x86.avx512.mask.div.sd.round
+    1, // llvm.x86.avx512.mask.div.ss.round
+    1, // llvm.x86.avx512.mask.expand.b.128
+    1, // llvm.x86.avx512.mask.expand.b.256
+    1, // llvm.x86.avx512.mask.expand.b.512
+    1, // llvm.x86.avx512.mask.expand.d.128
+    1, // llvm.x86.avx512.mask.expand.d.256
+    1, // llvm.x86.avx512.mask.expand.d.512
+    2, // llvm.x86.avx512.mask.expand.load.b.128
+    2, // llvm.x86.avx512.mask.expand.load.b.256
+    2, // llvm.x86.avx512.mask.expand.load.b.512
+    2, // llvm.x86.avx512.mask.expand.load.d.128
+    2, // llvm.x86.avx512.mask.expand.load.d.256
+    2, // llvm.x86.avx512.mask.expand.load.d.512
+    2, // llvm.x86.avx512.mask.expand.load.pd.128
+    2, // llvm.x86.avx512.mask.expand.load.pd.256
+    2, // llvm.x86.avx512.mask.expand.load.pd.512
+    2, // llvm.x86.avx512.mask.expand.load.ps.128
+    2, // llvm.x86.avx512.mask.expand.load.ps.256
+    2, // llvm.x86.avx512.mask.expand.load.ps.512
+    2, // llvm.x86.avx512.mask.expand.load.q.128
+    2, // llvm.x86.avx512.mask.expand.load.q.256
+    2, // llvm.x86.avx512.mask.expand.load.q.512
+    2, // llvm.x86.avx512.mask.expand.load.w.128
+    2, // llvm.x86.avx512.mask.expand.load.w.256
+    2, // llvm.x86.avx512.mask.expand.load.w.512
+    1, // llvm.x86.avx512.mask.expand.pd.128
+    1, // llvm.x86.avx512.mask.expand.pd.256
+    1, // llvm.x86.avx512.mask.expand.pd.512
+    1, // llvm.x86.avx512.mask.expand.ps.128
+    1, // llvm.x86.avx512.mask.expand.ps.256
+    1, // llvm.x86.avx512.mask.expand.ps.512
+    1, // llvm.x86.avx512.mask.expand.q.128
+    1, // llvm.x86.avx512.mask.expand.q.256
+    1, // llvm.x86.avx512.mask.expand.q.512
+    1, // llvm.x86.avx512.mask.expand.w.128
+    1, // llvm.x86.avx512.mask.expand.w.256
+    1, // llvm.x86.avx512.mask.expand.w.512
+    1, // llvm.x86.avx512.mask.fixupimm.pd.128
+    1, // llvm.x86.avx512.mask.fixupimm.pd.256
+    1, // llvm.x86.avx512.mask.fixupimm.pd.512
+    1, // llvm.x86.avx512.mask.fixupimm.ps.128
+    1, // llvm.x86.avx512.mask.fixupimm.ps.256
+    1, // llvm.x86.avx512.mask.fixupimm.ps.512
+    1, // llvm.x86.avx512.mask.fixupimm.sd
+    1, // llvm.x86.avx512.mask.fixupimm.ss
+    1, // llvm.x86.avx512.mask.fpclass.pd.128
+    1, // llvm.x86.avx512.mask.fpclass.pd.256
+    1, // llvm.x86.avx512.mask.fpclass.pd.512
+    1, // llvm.x86.avx512.mask.fpclass.ps.128
+    1, // llvm.x86.avx512.mask.fpclass.ps.256
+    1, // llvm.x86.avx512.mask.fpclass.ps.512
+    1, // llvm.x86.avx512.mask.fpclass.sd
+    1, // llvm.x86.avx512.mask.fpclass.ss
+    1, // llvm.x86.avx512.mask.getexp.pd.128
+    1, // llvm.x86.avx512.mask.getexp.pd.256
+    1, // llvm.x86.avx512.mask.getexp.pd.512
+    1, // llvm.x86.avx512.mask.getexp.ps.128
+    1, // llvm.x86.avx512.mask.getexp.ps.256
+    1, // llvm.x86.avx512.mask.getexp.ps.512
+    1, // llvm.x86.avx512.mask.getexp.sd
+    1, // llvm.x86.avx512.mask.getexp.ss
+    1, // llvm.x86.avx512.mask.getmant.pd.128
+    1, // llvm.x86.avx512.mask.getmant.pd.256
+    1, // llvm.x86.avx512.mask.getmant.pd.512
+    1, // llvm.x86.avx512.mask.getmant.ps.128
+    1, // llvm.x86.avx512.mask.getmant.ps.256
+    1, // llvm.x86.avx512.mask.getmant.ps.512
+    1, // llvm.x86.avx512.mask.getmant.sd
+    1, // llvm.x86.avx512.mask.getmant.ss
+    1, // llvm.x86.avx512.mask.max.pd.512
+    1, // llvm.x86.avx512.mask.max.ps.512
+    1, // llvm.x86.avx512.mask.max.sd.round
+    1, // llvm.x86.avx512.mask.max.ss.round
+    1, // llvm.x86.avx512.mask.min.pd.512
+    1, // llvm.x86.avx512.mask.min.ps.512
+    1, // llvm.x86.avx512.mask.min.sd.round
+    1, // llvm.x86.avx512.mask.min.ss.round
+    1, // llvm.x86.avx512.mask.mul.pd.512
+    1, // llvm.x86.avx512.mask.mul.ps.512
+    1, // llvm.x86.avx512.mask.mul.sd.round
+    1, // llvm.x86.avx512.mask.mul.ss.round
+    1, // llvm.x86.avx512.mask.padds.b.128
+    1, // llvm.x86.avx512.mask.padds.b.256
+    1, // llvm.x86.avx512.mask.padds.b.512
+    1, // llvm.x86.avx512.mask.padds.w.128
+    1, // llvm.x86.avx512.mask.padds.w.256
+    1, // llvm.x86.avx512.mask.padds.w.512
+    1, // llvm.x86.avx512.mask.paddus.b.128
+    1, // llvm.x86.avx512.mask.paddus.b.256
+    1, // llvm.x86.avx512.mask.paddus.b.512
+    1, // llvm.x86.avx512.mask.paddus.w.128
+    1, // llvm.x86.avx512.mask.paddus.w.256
+    1, // llvm.x86.avx512.mask.paddus.w.512
+    1, // llvm.x86.avx512.mask.permvar.df.256
+    1, // llvm.x86.avx512.mask.permvar.df.512
+    1, // llvm.x86.avx512.mask.permvar.di.256
+    1, // llvm.x86.avx512.mask.permvar.di.512
+    1, // llvm.x86.avx512.mask.permvar.hi.128
+    1, // llvm.x86.avx512.mask.permvar.hi.256
+    1, // llvm.x86.avx512.mask.permvar.hi.512
+    1, // llvm.x86.avx512.mask.permvar.qi.128
+    1, // llvm.x86.avx512.mask.permvar.qi.256
+    1, // llvm.x86.avx512.mask.permvar.qi.512
+    1, // llvm.x86.avx512.mask.permvar.sf.256
+    1, // llvm.x86.avx512.mask.permvar.sf.512
+    1, // llvm.x86.avx512.mask.permvar.si.256
+    1, // llvm.x86.avx512.mask.permvar.si.512
+    1, // llvm.x86.avx512.mask.pmaddubs.w.128
+    1, // llvm.x86.avx512.mask.pmaddubs.w.256
+    1, // llvm.x86.avx512.mask.pmaddubs.w.512
+    1, // llvm.x86.avx512.mask.pmaddw.d.128
+    1, // llvm.x86.avx512.mask.pmaddw.d.256
+    1, // llvm.x86.avx512.mask.pmaddw.d.512
+    1, // llvm.x86.avx512.mask.pmov.db.128
+    1, // llvm.x86.avx512.mask.pmov.db.256
+    1, // llvm.x86.avx512.mask.pmov.db.512
+    21, // llvm.x86.avx512.mask.pmov.db.mem.128
+    21, // llvm.x86.avx512.mask.pmov.db.mem.256
+    21, // llvm.x86.avx512.mask.pmov.db.mem.512
+    1, // llvm.x86.avx512.mask.pmov.dw.128
+    1, // llvm.x86.avx512.mask.pmov.dw.256
+    1, // llvm.x86.avx512.mask.pmov.dw.512
+    21, // llvm.x86.avx512.mask.pmov.dw.mem.128
+    21, // llvm.x86.avx512.mask.pmov.dw.mem.256
+    21, // llvm.x86.avx512.mask.pmov.dw.mem.512
+    1, // llvm.x86.avx512.mask.pmov.qb.128
+    1, // llvm.x86.avx512.mask.pmov.qb.256
+    1, // llvm.x86.avx512.mask.pmov.qb.512
+    21, // llvm.x86.avx512.mask.pmov.qb.mem.128
+    21, // llvm.x86.avx512.mask.pmov.qb.mem.256
+    21, // llvm.x86.avx512.mask.pmov.qb.mem.512
+    1, // llvm.x86.avx512.mask.pmov.qd.128
+    1, // llvm.x86.avx512.mask.pmov.qd.256
+    1, // llvm.x86.avx512.mask.pmov.qd.512
+    21, // llvm.x86.avx512.mask.pmov.qd.mem.128
+    21, // llvm.x86.avx512.mask.pmov.qd.mem.256
+    21, // llvm.x86.avx512.mask.pmov.qd.mem.512
+    1, // llvm.x86.avx512.mask.pmov.qw.128
+    1, // llvm.x86.avx512.mask.pmov.qw.256
+    1, // llvm.x86.avx512.mask.pmov.qw.512
+    21, // llvm.x86.avx512.mask.pmov.qw.mem.128
+    21, // llvm.x86.avx512.mask.pmov.qw.mem.256
+    21, // llvm.x86.avx512.mask.pmov.qw.mem.512
+    1, // llvm.x86.avx512.mask.pmov.wb.128
+    1, // llvm.x86.avx512.mask.pmov.wb.256
+    1, // llvm.x86.avx512.mask.pmov.wb.512
+    21, // llvm.x86.avx512.mask.pmov.wb.mem.128
+    21, // llvm.x86.avx512.mask.pmov.wb.mem.256
+    21, // llvm.x86.avx512.mask.pmov.wb.mem.512
+    1, // llvm.x86.avx512.mask.pmovs.db.128
+    1, // llvm.x86.avx512.mask.pmovs.db.256
+    1, // llvm.x86.avx512.mask.pmovs.db.512
+    21, // llvm.x86.avx512.mask.pmovs.db.mem.128
+    21, // llvm.x86.avx512.mask.pmovs.db.mem.256
+    21, // llvm.x86.avx512.mask.pmovs.db.mem.512
+    1, // llvm.x86.avx512.mask.pmovs.dw.128
+    1, // llvm.x86.avx512.mask.pmovs.dw.256
+    1, // llvm.x86.avx512.mask.pmovs.dw.512
+    21, // llvm.x86.avx512.mask.pmovs.dw.mem.128
+    21, // llvm.x86.avx512.mask.pmovs.dw.mem.256
+    21, // llvm.x86.avx512.mask.pmovs.dw.mem.512
+    1, // llvm.x86.avx512.mask.pmovs.qb.128
+    1, // llvm.x86.avx512.mask.pmovs.qb.256
+    1, // llvm.x86.avx512.mask.pmovs.qb.512
+    21, // llvm.x86.avx512.mask.pmovs.qb.mem.128
+    21, // llvm.x86.avx512.mask.pmovs.qb.mem.256
+    21, // llvm.x86.avx512.mask.pmovs.qb.mem.512
+    1, // llvm.x86.avx512.mask.pmovs.qd.128
+    1, // llvm.x86.avx512.mask.pmovs.qd.256
+    1, // llvm.x86.avx512.mask.pmovs.qd.512
+    21, // llvm.x86.avx512.mask.pmovs.qd.mem.128
+    21, // llvm.x86.avx512.mask.pmovs.qd.mem.256
+    21, // llvm.x86.avx512.mask.pmovs.qd.mem.512
+    1, // llvm.x86.avx512.mask.pmovs.qw.128
+    1, // llvm.x86.avx512.mask.pmovs.qw.256
+    1, // llvm.x86.avx512.mask.pmovs.qw.512
+    21, // llvm.x86.avx512.mask.pmovs.qw.mem.128
+    21, // llvm.x86.avx512.mask.pmovs.qw.mem.256
+    21, // llvm.x86.avx512.mask.pmovs.qw.mem.512
+    1, // llvm.x86.avx512.mask.pmovs.wb.128
+    1, // llvm.x86.avx512.mask.pmovs.wb.256
+    1, // llvm.x86.avx512.mask.pmovs.wb.512
+    21, // llvm.x86.avx512.mask.pmovs.wb.mem.128
+    21, // llvm.x86.avx512.mask.pmovs.wb.mem.256
+    21, // llvm.x86.avx512.mask.pmovs.wb.mem.512
+    1, // llvm.x86.avx512.mask.pmovus.db.128
+    1, // llvm.x86.avx512.mask.pmovus.db.256
+    1, // llvm.x86.avx512.mask.pmovus.db.512
+    21, // llvm.x86.avx512.mask.pmovus.db.mem.128
+    21, // llvm.x86.avx512.mask.pmovus.db.mem.256
+    21, // llvm.x86.avx512.mask.pmovus.db.mem.512
+    1, // llvm.x86.avx512.mask.pmovus.dw.128
+    1, // llvm.x86.avx512.mask.pmovus.dw.256
+    1, // llvm.x86.avx512.mask.pmovus.dw.512
+    21, // llvm.x86.avx512.mask.pmovus.dw.mem.128
+    21, // llvm.x86.avx512.mask.pmovus.dw.mem.256
+    21, // llvm.x86.avx512.mask.pmovus.dw.mem.512
+    1, // llvm.x86.avx512.mask.pmovus.qb.128
+    1, // llvm.x86.avx512.mask.pmovus.qb.256
+    1, // llvm.x86.avx512.mask.pmovus.qb.512
+    21, // llvm.x86.avx512.mask.pmovus.qb.mem.128
+    21, // llvm.x86.avx512.mask.pmovus.qb.mem.256
+    21, // llvm.x86.avx512.mask.pmovus.qb.mem.512
+    1, // llvm.x86.avx512.mask.pmovus.qd.128
+    1, // llvm.x86.avx512.mask.pmovus.qd.256
+    1, // llvm.x86.avx512.mask.pmovus.qd.512
+    21, // llvm.x86.avx512.mask.pmovus.qd.mem.128
+    21, // llvm.x86.avx512.mask.pmovus.qd.mem.256
+    21, // llvm.x86.avx512.mask.pmovus.qd.mem.512
+    1, // llvm.x86.avx512.mask.pmovus.qw.128
+    1, // llvm.x86.avx512.mask.pmovus.qw.256
+    1, // llvm.x86.avx512.mask.pmovus.qw.512
+    21, // llvm.x86.avx512.mask.pmovus.qw.mem.128
+    21, // llvm.x86.avx512.mask.pmovus.qw.mem.256
+    21, // llvm.x86.avx512.mask.pmovus.qw.mem.512
+    1, // llvm.x86.avx512.mask.pmovus.wb.128
+    1, // llvm.x86.avx512.mask.pmovus.wb.256
+    1, // llvm.x86.avx512.mask.pmovus.wb.512
+    21, // llvm.x86.avx512.mask.pmovus.wb.mem.128
+    21, // llvm.x86.avx512.mask.pmovus.wb.mem.256
+    21, // llvm.x86.avx512.mask.pmovus.wb.mem.512
+    1, // llvm.x86.avx512.mask.pmultishift.qb.128
+    1, // llvm.x86.avx512.mask.pmultishift.qb.256
+    1, // llvm.x86.avx512.mask.pmultishift.qb.512
+    1, // llvm.x86.avx512.mask.prol.d.128
+    1, // llvm.x86.avx512.mask.prol.d.256
+    1, // llvm.x86.avx512.mask.prol.d.512
+    1, // llvm.x86.avx512.mask.prol.q.128
+    1, // llvm.x86.avx512.mask.prol.q.256
+    1, // llvm.x86.avx512.mask.prol.q.512
+    1, // llvm.x86.avx512.mask.prolv.d.128
+    1, // llvm.x86.avx512.mask.prolv.d.256
+    1, // llvm.x86.avx512.mask.prolv.d.512
+    1, // llvm.x86.avx512.mask.prolv.q.128
+    1, // llvm.x86.avx512.mask.prolv.q.256
+    1, // llvm.x86.avx512.mask.prolv.q.512
+    1, // llvm.x86.avx512.mask.pror.d.128
+    1, // llvm.x86.avx512.mask.pror.d.256
+    1, // llvm.x86.avx512.mask.pror.d.512
+    1, // llvm.x86.avx512.mask.pror.q.128
+    1, // llvm.x86.avx512.mask.pror.q.256
+    1, // llvm.x86.avx512.mask.pror.q.512
+    1, // llvm.x86.avx512.mask.prorv.d.128
+    1, // llvm.x86.avx512.mask.prorv.d.256
+    1, // llvm.x86.avx512.mask.prorv.d.512
+    1, // llvm.x86.avx512.mask.prorv.q.128
+    1, // llvm.x86.avx512.mask.prorv.q.256
+    1, // llvm.x86.avx512.mask.prorv.q.512
+    1, // llvm.x86.avx512.mask.psubs.b.128
+    1, // llvm.x86.avx512.mask.psubs.b.256
+    1, // llvm.x86.avx512.mask.psubs.b.512
+    1, // llvm.x86.avx512.mask.psubs.w.128
+    1, // llvm.x86.avx512.mask.psubs.w.256
+    1, // llvm.x86.avx512.mask.psubs.w.512
+    1, // llvm.x86.avx512.mask.psubus.b.128
+    1, // llvm.x86.avx512.mask.psubus.b.256
+    1, // llvm.x86.avx512.mask.psubus.b.512
+    1, // llvm.x86.avx512.mask.psubus.w.128
+    1, // llvm.x86.avx512.mask.psubus.w.256
+    1, // llvm.x86.avx512.mask.psubus.w.512
+    1, // llvm.x86.avx512.mask.pternlog.d.128
+    1, // llvm.x86.avx512.mask.pternlog.d.256
+    1, // llvm.x86.avx512.mask.pternlog.d.512
+    1, // llvm.x86.avx512.mask.pternlog.q.128
+    1, // llvm.x86.avx512.mask.pternlog.q.256
+    1, // llvm.x86.avx512.mask.pternlog.q.512
+    1, // llvm.x86.avx512.mask.range.pd.128
+    1, // llvm.x86.avx512.mask.range.pd.256
+    1, // llvm.x86.avx512.mask.range.pd.512
+    1, // llvm.x86.avx512.mask.range.ps.128
+    1, // llvm.x86.avx512.mask.range.ps.256
+    1, // llvm.x86.avx512.mask.range.ps.512
+    1, // llvm.x86.avx512.mask.range.sd
+    1, // llvm.x86.avx512.mask.range.ss
+    1, // llvm.x86.avx512.mask.reduce.pd.128
+    1, // llvm.x86.avx512.mask.reduce.pd.256
+    1, // llvm.x86.avx512.mask.reduce.pd.512
+    1, // llvm.x86.avx512.mask.reduce.ps.128
+    1, // llvm.x86.avx512.mask.reduce.ps.256
+    1, // llvm.x86.avx512.mask.reduce.ps.512
+    1, // llvm.x86.avx512.mask.reduce.sd
+    1, // llvm.x86.avx512.mask.reduce.ss
+    1, // llvm.x86.avx512.mask.rndscale.pd.128
+    1, // llvm.x86.avx512.mask.rndscale.pd.256
+    1, // llvm.x86.avx512.mask.rndscale.pd.512
+    1, // llvm.x86.avx512.mask.rndscale.ps.128
+    1, // llvm.x86.avx512.mask.rndscale.ps.256
+    1, // llvm.x86.avx512.mask.rndscale.ps.512
+    1, // llvm.x86.avx512.mask.rndscale.sd
+    1, // llvm.x86.avx512.mask.rndscale.ss
+    1, // llvm.x86.avx512.mask.scalef.pd.128
+    1, // llvm.x86.avx512.mask.scalef.pd.256
+    1, // llvm.x86.avx512.mask.scalef.pd.512
+    1, // llvm.x86.avx512.mask.scalef.ps.128
+    1, // llvm.x86.avx512.mask.scalef.ps.256
+    1, // llvm.x86.avx512.mask.scalef.ps.512
+    1, // llvm.x86.avx512.mask.scalef.sd
+    1, // llvm.x86.avx512.mask.scalef.ss
+    1, // llvm.x86.avx512.mask.sqrt.pd.128
+    1, // llvm.x86.avx512.mask.sqrt.pd.256
+    1, // llvm.x86.avx512.mask.sqrt.pd.512
+    1, // llvm.x86.avx512.mask.sqrt.ps.128
+    1, // llvm.x86.avx512.mask.sqrt.ps.256
+    1, // llvm.x86.avx512.mask.sqrt.ps.512
+    1, // llvm.x86.avx512.mask.sqrt.sd
+    1, // llvm.x86.avx512.mask.sqrt.ss
+    21, // llvm.x86.avx512.mask.store.ss
+    1, // llvm.x86.avx512.mask.sub.pd.512
+    1, // llvm.x86.avx512.mask.sub.ps.512
+    1, // llvm.x86.avx512.mask.sub.sd.round
+    1, // llvm.x86.avx512.mask.sub.ss.round
+    1, // llvm.x86.avx512.mask.vcvtph2ps.128
+    1, // llvm.x86.avx512.mask.vcvtph2ps.256
+    1, // llvm.x86.avx512.mask.vcvtph2ps.512
+    1, // llvm.x86.avx512.mask.vcvtps2ph.128
+    1, // llvm.x86.avx512.mask.vcvtps2ph.256
+    1, // llvm.x86.avx512.mask.vcvtps2ph.512
+    1, // llvm.x86.avx512.mask.vfmadd.pd.128
+    1, // llvm.x86.avx512.mask.vfmadd.pd.256
+    1, // llvm.x86.avx512.mask.vfmadd.pd.512
+    1, // llvm.x86.avx512.mask.vfmadd.ps.128
+    1, // llvm.x86.avx512.mask.vfmadd.ps.256
+    1, // llvm.x86.avx512.mask.vfmadd.ps.512
+    1, // llvm.x86.avx512.mask.vfmadd.sd
+    1, // llvm.x86.avx512.mask.vfmadd.ss
+    1, // llvm.x86.avx512.mask.vfmaddsub.pd.128
+    1, // llvm.x86.avx512.mask.vfmaddsub.pd.256
+    1, // llvm.x86.avx512.mask.vfmaddsub.pd.512
+    1, // llvm.x86.avx512.mask.vfmaddsub.ps.128
+    1, // llvm.x86.avx512.mask.vfmaddsub.ps.256
+    1, // llvm.x86.avx512.mask.vfmaddsub.ps.512
+    1, // llvm.x86.avx512.mask.vfnmadd.pd.128
+    1, // llvm.x86.avx512.mask.vfnmadd.pd.256
+    1, // llvm.x86.avx512.mask.vfnmadd.pd.512
+    1, // llvm.x86.avx512.mask.vfnmadd.ps.128
+    1, // llvm.x86.avx512.mask.vfnmadd.ps.256
+    1, // llvm.x86.avx512.mask.vfnmadd.ps.512
+    1, // llvm.x86.avx512.mask.vfnmsub.pd.128
+    1, // llvm.x86.avx512.mask.vfnmsub.pd.256
+    1, // llvm.x86.avx512.mask.vfnmsub.pd.512
+    1, // llvm.x86.avx512.mask.vfnmsub.ps.128
+    1, // llvm.x86.avx512.mask.vfnmsub.ps.256
+    1, // llvm.x86.avx512.mask.vfnmsub.ps.512
+    1, // llvm.x86.avx512.mask.vpdpbusd.128
+    1, // llvm.x86.avx512.mask.vpdpbusd.256
+    1, // llvm.x86.avx512.mask.vpdpbusd.512
+    1, // llvm.x86.avx512.mask.vpdpbusds.128
+    1, // llvm.x86.avx512.mask.vpdpbusds.256
+    1, // llvm.x86.avx512.mask.vpdpbusds.512
+    1, // llvm.x86.avx512.mask.vpdpwssd.128
+    1, // llvm.x86.avx512.mask.vpdpwssd.256
+    1, // llvm.x86.avx512.mask.vpdpwssd.512
+    1, // llvm.x86.avx512.mask.vpdpwssds.128
+    1, // llvm.x86.avx512.mask.vpdpwssds.256
+    1, // llvm.x86.avx512.mask.vpdpwssds.512
+    1, // llvm.x86.avx512.mask.vpermi2var.d.128
+    1, // llvm.x86.avx512.mask.vpermi2var.d.256
+    1, // llvm.x86.avx512.mask.vpermi2var.d.512
+    1, // llvm.x86.avx512.mask.vpermi2var.hi.128
+    1, // llvm.x86.avx512.mask.vpermi2var.hi.256
+    1, // llvm.x86.avx512.mask.vpermi2var.hi.512
+    1, // llvm.x86.avx512.mask.vpermi2var.pd.128
+    1, // llvm.x86.avx512.mask.vpermi2var.pd.256
+    1, // llvm.x86.avx512.mask.vpermi2var.pd.512
+    1, // llvm.x86.avx512.mask.vpermi2var.ps.128
+    1, // llvm.x86.avx512.mask.vpermi2var.ps.256
+    1, // llvm.x86.avx512.mask.vpermi2var.ps.512
+    1, // llvm.x86.avx512.mask.vpermi2var.q.128
+    1, // llvm.x86.avx512.mask.vpermi2var.q.256
+    1, // llvm.x86.avx512.mask.vpermi2var.q.512
+    1, // llvm.x86.avx512.mask.vpermi2var.qi.128
+    1, // llvm.x86.avx512.mask.vpermi2var.qi.256
+    1, // llvm.x86.avx512.mask.vpermi2var.qi.512
+    1, // llvm.x86.avx512.mask.vpermt2var.d.128
+    1, // llvm.x86.avx512.mask.vpermt2var.d.256
+    1, // llvm.x86.avx512.mask.vpermt2var.d.512
+    1, // llvm.x86.avx512.mask.vpermt2var.hi.128
+    1, // llvm.x86.avx512.mask.vpermt2var.hi.256
+    1, // llvm.x86.avx512.mask.vpermt2var.hi.512
+    1, // llvm.x86.avx512.mask.vpermt2var.pd.128
+    1, // llvm.x86.avx512.mask.vpermt2var.pd.256
+    1, // llvm.x86.avx512.mask.vpermt2var.pd.512
+    1, // llvm.x86.avx512.mask.vpermt2var.ps.128
+    1, // llvm.x86.avx512.mask.vpermt2var.ps.256
+    1, // llvm.x86.avx512.mask.vpermt2var.ps.512
+    1, // llvm.x86.avx512.mask.vpermt2var.q.128
+    1, // llvm.x86.avx512.mask.vpermt2var.q.256
+    1, // llvm.x86.avx512.mask.vpermt2var.q.512
+    1, // llvm.x86.avx512.mask.vpermt2var.qi.128
+    1, // llvm.x86.avx512.mask.vpermt2var.qi.256
+    1, // llvm.x86.avx512.mask.vpermt2var.qi.512
+    1, // llvm.x86.avx512.mask.vpmadd52h.uq.128
+    1, // llvm.x86.avx512.mask.vpmadd52h.uq.256
+    1, // llvm.x86.avx512.mask.vpmadd52h.uq.512
+    1, // llvm.x86.avx512.mask.vpmadd52l.uq.128
+    1, // llvm.x86.avx512.mask.vpmadd52l.uq.256
+    1, // llvm.x86.avx512.mask.vpmadd52l.uq.512
+    1, // llvm.x86.avx512.mask.vpshld.d.128
+    1, // llvm.x86.avx512.mask.vpshld.d.256
+    1, // llvm.x86.avx512.mask.vpshld.d.512
+    1, // llvm.x86.avx512.mask.vpshld.q.128
+    1, // llvm.x86.avx512.mask.vpshld.q.256
+    1, // llvm.x86.avx512.mask.vpshld.q.512
+    1, // llvm.x86.avx512.mask.vpshld.w.128
+    1, // llvm.x86.avx512.mask.vpshld.w.256
+    1, // llvm.x86.avx512.mask.vpshld.w.512
+    1, // llvm.x86.avx512.mask.vpshldv.d.128
+    1, // llvm.x86.avx512.mask.vpshldv.d.256
+    1, // llvm.x86.avx512.mask.vpshldv.d.512
+    1, // llvm.x86.avx512.mask.vpshldv.q.128
+    1, // llvm.x86.avx512.mask.vpshldv.q.256
+    1, // llvm.x86.avx512.mask.vpshldv.q.512
+    1, // llvm.x86.avx512.mask.vpshldv.w.128
+    1, // llvm.x86.avx512.mask.vpshldv.w.256
+    1, // llvm.x86.avx512.mask.vpshldv.w.512
+    1, // llvm.x86.avx512.mask.vpshrd.d.128
+    1, // llvm.x86.avx512.mask.vpshrd.d.256
+    1, // llvm.x86.avx512.mask.vpshrd.d.512
+    1, // llvm.x86.avx512.mask.vpshrd.q.128
+    1, // llvm.x86.avx512.mask.vpshrd.q.256
+    1, // llvm.x86.avx512.mask.vpshrd.q.512
+    1, // llvm.x86.avx512.mask.vpshrd.w.128
+    1, // llvm.x86.avx512.mask.vpshrd.w.256
+    1, // llvm.x86.avx512.mask.vpshrd.w.512
+    1, // llvm.x86.avx512.mask.vpshrdv.d.128
+    1, // llvm.x86.avx512.mask.vpshrdv.d.256
+    1, // llvm.x86.avx512.mask.vpshrdv.d.512
+    1, // llvm.x86.avx512.mask.vpshrdv.q.128
+    1, // llvm.x86.avx512.mask.vpshrdv.q.256
+    1, // llvm.x86.avx512.mask.vpshrdv.q.512
+    1, // llvm.x86.avx512.mask.vpshrdv.w.128
+    1, // llvm.x86.avx512.mask.vpshrdv.w.256
+    1, // llvm.x86.avx512.mask.vpshrdv.w.512
+    1, // llvm.x86.avx512.mask.vpshufbitqmb.128
+    1, // llvm.x86.avx512.mask.vpshufbitqmb.256
+    1, // llvm.x86.avx512.mask.vpshufbitqmb.512
+    1, // llvm.x86.avx512.mask3.vfmadd.pd.128
+    1, // llvm.x86.avx512.mask3.vfmadd.pd.256
+    1, // llvm.x86.avx512.mask3.vfmadd.pd.512
+    1, // llvm.x86.avx512.mask3.vfmadd.ps.128
+    1, // llvm.x86.avx512.mask3.vfmadd.ps.256
+    1, // llvm.x86.avx512.mask3.vfmadd.ps.512
+    1, // llvm.x86.avx512.mask3.vfmadd.sd
+    1, // llvm.x86.avx512.mask3.vfmadd.ss
+    1, // llvm.x86.avx512.mask3.vfmaddsub.pd.128
+    1, // llvm.x86.avx512.mask3.vfmaddsub.pd.256
+    1, // llvm.x86.avx512.mask3.vfmaddsub.pd.512
+    1, // llvm.x86.avx512.mask3.vfmaddsub.ps.128
+    1, // llvm.x86.avx512.mask3.vfmaddsub.ps.256
+    1, // llvm.x86.avx512.mask3.vfmaddsub.ps.512
+    1, // llvm.x86.avx512.mask3.vfmsub.pd.128
+    1, // llvm.x86.avx512.mask3.vfmsub.pd.256
+    1, // llvm.x86.avx512.mask3.vfmsub.pd.512
+    1, // llvm.x86.avx512.mask3.vfmsub.ps.128
+    1, // llvm.x86.avx512.mask3.vfmsub.ps.256
+    1, // llvm.x86.avx512.mask3.vfmsub.ps.512
+    1, // llvm.x86.avx512.mask3.vfmsub.sd
+    1, // llvm.x86.avx512.mask3.vfmsub.ss
+    1, // llvm.x86.avx512.mask3.vfmsubadd.pd.128
+    1, // llvm.x86.avx512.mask3.vfmsubadd.pd.256
+    1, // llvm.x86.avx512.mask3.vfmsubadd.pd.512
+    1, // llvm.x86.avx512.mask3.vfmsubadd.ps.128
+    1, // llvm.x86.avx512.mask3.vfmsubadd.ps.256
+    1, // llvm.x86.avx512.mask3.vfmsubadd.ps.512
+    1, // llvm.x86.avx512.mask3.vfnmsub.pd.128
+    1, // llvm.x86.avx512.mask3.vfnmsub.pd.256
+    1, // llvm.x86.avx512.mask3.vfnmsub.pd.512
+    1, // llvm.x86.avx512.mask3.vfnmsub.ps.128
+    1, // llvm.x86.avx512.mask3.vfnmsub.ps.256
+    1, // llvm.x86.avx512.mask3.vfnmsub.ps.512
+    1, // llvm.x86.avx512.mask3.vfnmsub.sd
+    1, // llvm.x86.avx512.mask3.vfnmsub.ss
+    1, // llvm.x86.avx512.maskz.fixupimm.pd.128
+    1, // llvm.x86.avx512.maskz.fixupimm.pd.256
+    1, // llvm.x86.avx512.maskz.fixupimm.pd.512
+    1, // llvm.x86.avx512.maskz.fixupimm.ps.128
+    1, // llvm.x86.avx512.maskz.fixupimm.ps.256
+    1, // llvm.x86.avx512.maskz.fixupimm.ps.512
+    1, // llvm.x86.avx512.maskz.fixupimm.sd
+    1, // llvm.x86.avx512.maskz.fixupimm.ss
+    1, // llvm.x86.avx512.maskz.pternlog.d.128
+    1, // llvm.x86.avx512.maskz.pternlog.d.256
+    1, // llvm.x86.avx512.maskz.pternlog.d.512
+    1, // llvm.x86.avx512.maskz.pternlog.q.128
+    1, // llvm.x86.avx512.maskz.pternlog.q.256
+    1, // llvm.x86.avx512.maskz.pternlog.q.512
+    1, // llvm.x86.avx512.maskz.vfmadd.pd.128
+    1, // llvm.x86.avx512.maskz.vfmadd.pd.256
+    1, // llvm.x86.avx512.maskz.vfmadd.pd.512
+    1, // llvm.x86.avx512.maskz.vfmadd.ps.128
+    1, // llvm.x86.avx512.maskz.vfmadd.ps.256
+    1, // llvm.x86.avx512.maskz.vfmadd.ps.512
+    1, // llvm.x86.avx512.maskz.vfmadd.sd
+    1, // llvm.x86.avx512.maskz.vfmadd.ss
+    1, // llvm.x86.avx512.maskz.vfmaddsub.pd.128
+    1, // llvm.x86.avx512.maskz.vfmaddsub.pd.256
+    1, // llvm.x86.avx512.maskz.vfmaddsub.pd.512
+    1, // llvm.x86.avx512.maskz.vfmaddsub.ps.128
+    1, // llvm.x86.avx512.maskz.vfmaddsub.ps.256
+    1, // llvm.x86.avx512.maskz.vfmaddsub.ps.512
+    1, // llvm.x86.avx512.maskz.vpdpbusd.128
+    1, // llvm.x86.avx512.maskz.vpdpbusd.256
+    1, // llvm.x86.avx512.maskz.vpdpbusd.512
+    1, // llvm.x86.avx512.maskz.vpdpbusds.128
+    1, // llvm.x86.avx512.maskz.vpdpbusds.256
+    1, // llvm.x86.avx512.maskz.vpdpbusds.512
+    1, // llvm.x86.avx512.maskz.vpdpwssd.128
+    1, // llvm.x86.avx512.maskz.vpdpwssd.256
+    1, // llvm.x86.avx512.maskz.vpdpwssd.512
+    1, // llvm.x86.avx512.maskz.vpdpwssds.128
+    1, // llvm.x86.avx512.maskz.vpdpwssds.256
+    1, // llvm.x86.avx512.maskz.vpdpwssds.512
+    1, // llvm.x86.avx512.maskz.vpermt2var.d.128
+    1, // llvm.x86.avx512.maskz.vpermt2var.d.256
+    1, // llvm.x86.avx512.maskz.vpermt2var.d.512
+    1, // llvm.x86.avx512.maskz.vpermt2var.hi.128
+    1, // llvm.x86.avx512.maskz.vpermt2var.hi.256
+    1, // llvm.x86.avx512.maskz.vpermt2var.hi.512
+    1, // llvm.x86.avx512.maskz.vpermt2var.pd.128
+    1, // llvm.x86.avx512.maskz.vpermt2var.pd.256
+    1, // llvm.x86.avx512.maskz.vpermt2var.pd.512
+    1, // llvm.x86.avx512.maskz.vpermt2var.ps.128
+    1, // llvm.x86.avx512.maskz.vpermt2var.ps.256
+    1, // llvm.x86.avx512.maskz.vpermt2var.ps.512
+    1, // llvm.x86.avx512.maskz.vpermt2var.q.128
+    1, // llvm.x86.avx512.maskz.vpermt2var.q.256
+    1, // llvm.x86.avx512.maskz.vpermt2var.q.512
+    1, // llvm.x86.avx512.maskz.vpermt2var.qi.128
+    1, // llvm.x86.avx512.maskz.vpermt2var.qi.256
+    1, // llvm.x86.avx512.maskz.vpermt2var.qi.512
+    1, // llvm.x86.avx512.maskz.vpmadd52h.uq.128
+    1, // llvm.x86.avx512.maskz.vpmadd52h.uq.256
+    1, // llvm.x86.avx512.maskz.vpmadd52h.uq.512
+    1, // llvm.x86.avx512.maskz.vpmadd52l.uq.128
+    1, // llvm.x86.avx512.maskz.vpmadd52l.uq.256
+    1, // llvm.x86.avx512.maskz.vpmadd52l.uq.512
+    1, // llvm.x86.avx512.maskz.vpshldv.d.128
+    1, // llvm.x86.avx512.maskz.vpshldv.d.256
+    1, // llvm.x86.avx512.maskz.vpshldv.d.512
+    1, // llvm.x86.avx512.maskz.vpshldv.q.128
+    1, // llvm.x86.avx512.maskz.vpshldv.q.256
+    1, // llvm.x86.avx512.maskz.vpshldv.q.512
+    1, // llvm.x86.avx512.maskz.vpshldv.w.128
+    1, // llvm.x86.avx512.maskz.vpshldv.w.256
+    1, // llvm.x86.avx512.maskz.vpshldv.w.512
+    1, // llvm.x86.avx512.maskz.vpshrdv.d.128
+    1, // llvm.x86.avx512.maskz.vpshrdv.d.256
+    1, // llvm.x86.avx512.maskz.vpshrdv.d.512
+    1, // llvm.x86.avx512.maskz.vpshrdv.q.128
+    1, // llvm.x86.avx512.maskz.vpshrdv.q.256
+    1, // llvm.x86.avx512.maskz.vpshrdv.q.512
+    1, // llvm.x86.avx512.maskz.vpshrdv.w.128
+    1, // llvm.x86.avx512.maskz.vpshrdv.w.256
+    1, // llvm.x86.avx512.maskz.vpshrdv.w.512
+    1, // llvm.x86.avx512.packssdw.512
+    1, // llvm.x86.avx512.packsswb.512
+    1, // llvm.x86.avx512.packusdw.512
+    1, // llvm.x86.avx512.packuswb.512
+    1, // llvm.x86.avx512.pmul.dq.512
+    1, // llvm.x86.avx512.pmul.hr.sw.512
+    1, // llvm.x86.avx512.pmulh.w.512
+    1, // llvm.x86.avx512.pmulhu.w.512
+    1, // llvm.x86.avx512.pmulu.dq.512
+    1, // llvm.x86.avx512.psad.bw.512
+    1, // llvm.x86.avx512.pshuf.b.512
+    1, // llvm.x86.avx512.psll.d.512
+    1, // llvm.x86.avx512.psll.q.512
+    1, // llvm.x86.avx512.psll.w.512
+    1, // llvm.x86.avx512.pslli.d.512
+    1, // llvm.x86.avx512.pslli.q.512
+    1, // llvm.x86.avx512.pslli.w.512
+    1, // llvm.x86.avx512.psllv.d.512
+    1, // llvm.x86.avx512.psllv.q.512
+    1, // llvm.x86.avx512.psllv.w.128
+    1, // llvm.x86.avx512.psllv.w.256
+    1, // llvm.x86.avx512.psllv.w.512
+    1, // llvm.x86.avx512.psra.d.512
+    1, // llvm.x86.avx512.psra.q.128
+    1, // llvm.x86.avx512.psra.q.256
+    1, // llvm.x86.avx512.psra.q.512
+    1, // llvm.x86.avx512.psra.w.512
+    1, // llvm.x86.avx512.psrai.d.512
+    1, // llvm.x86.avx512.psrai.q.128
+    1, // llvm.x86.avx512.psrai.q.256
+    1, // llvm.x86.avx512.psrai.q.512
+    1, // llvm.x86.avx512.psrai.w.512
+    1, // llvm.x86.avx512.psrav.d.512
+    1, // llvm.x86.avx512.psrav.q.128
+    1, // llvm.x86.avx512.psrav.q.256
+    1, // llvm.x86.avx512.psrav.q.512
+    1, // llvm.x86.avx512.psrav.w.128
+    1, // llvm.x86.avx512.psrav.w.256
+    1, // llvm.x86.avx512.psrav.w.512
+    1, // llvm.x86.avx512.psrl.d.512
+    1, // llvm.x86.avx512.psrl.q.512
+    1, // llvm.x86.avx512.psrl.w.512
+    1, // llvm.x86.avx512.psrli.d.512
+    1, // llvm.x86.avx512.psrli.q.512
+    1, // llvm.x86.avx512.psrli.w.512
+    1, // llvm.x86.avx512.psrlv.d.512
+    1, // llvm.x86.avx512.psrlv.q.512
+    1, // llvm.x86.avx512.psrlv.w.128
+    1, // llvm.x86.avx512.psrlv.w.256
+    1, // llvm.x86.avx512.psrlv.w.512
+    1, // llvm.x86.avx512.rcp14.pd.128
+    1, // llvm.x86.avx512.rcp14.pd.256
+    1, // llvm.x86.avx512.rcp14.pd.512
+    1, // llvm.x86.avx512.rcp14.ps.128
+    1, // llvm.x86.avx512.rcp14.ps.256
+    1, // llvm.x86.avx512.rcp14.ps.512
+    1, // llvm.x86.avx512.rcp14.sd
+    1, // llvm.x86.avx512.rcp14.ss
+    1, // llvm.x86.avx512.rcp28.pd
+    1, // llvm.x86.avx512.rcp28.ps
+    1, // llvm.x86.avx512.rcp28.sd
+    1, // llvm.x86.avx512.rcp28.ss
+    1, // llvm.x86.avx512.rsqrt14.pd.128
+    1, // llvm.x86.avx512.rsqrt14.pd.256
+    1, // llvm.x86.avx512.rsqrt14.pd.512
+    1, // llvm.x86.avx512.rsqrt14.ps.128
+    1, // llvm.x86.avx512.rsqrt14.ps.256
+    1, // llvm.x86.avx512.rsqrt14.ps.512
+    1, // llvm.x86.avx512.rsqrt14.sd
+    1, // llvm.x86.avx512.rsqrt14.ss
+    1, // llvm.x86.avx512.rsqrt28.pd
+    1, // llvm.x86.avx512.rsqrt28.ps
+    1, // llvm.x86.avx512.rsqrt28.sd
+    1, // llvm.x86.avx512.rsqrt28.ss
+    21, // llvm.x86.avx512.scatter.dpd.512
+    21, // llvm.x86.avx512.scatter.dpi.512
+    21, // llvm.x86.avx512.scatter.dpq.512
+    21, // llvm.x86.avx512.scatter.dps.512
+    21, // llvm.x86.avx512.scatter.qpd.512
+    21, // llvm.x86.avx512.scatter.qpi.512
+    21, // llvm.x86.avx512.scatter.qpq.512
+    21, // llvm.x86.avx512.scatter.qps.512
+    21, // llvm.x86.avx512.scatterdiv2.df
+    21, // llvm.x86.avx512.scatterdiv2.di
+    21, // llvm.x86.avx512.scatterdiv4.df
+    21, // llvm.x86.avx512.scatterdiv4.di
+    21, // llvm.x86.avx512.scatterdiv4.sf
+    21, // llvm.x86.avx512.scatterdiv4.si
+    21, // llvm.x86.avx512.scatterdiv8.sf
+    21, // llvm.x86.avx512.scatterdiv8.si
+    21, // llvm.x86.avx512.scatterpf.dpd.512
+    21, // llvm.x86.avx512.scatterpf.dps.512
+    21, // llvm.x86.avx512.scatterpf.qpd.512
+    21, // llvm.x86.avx512.scatterpf.qps.512
+    21, // llvm.x86.avx512.scattersiv2.df
+    21, // llvm.x86.avx512.scattersiv2.di
+    21, // llvm.x86.avx512.scattersiv4.df
+    21, // llvm.x86.avx512.scattersiv4.di
+    21, // llvm.x86.avx512.scattersiv4.sf
+    21, // llvm.x86.avx512.scattersiv4.si
+    21, // llvm.x86.avx512.scattersiv8.sf
+    21, // llvm.x86.avx512.scattersiv8.si
+    2, // llvm.x86.avx512.vbroadcast.sd.512
+    2, // llvm.x86.avx512.vbroadcast.ss.512
+    1, // llvm.x86.avx512.vcomi.sd
+    1, // llvm.x86.avx512.vcomi.ss
+    1, // llvm.x86.avx512.vcvtsd2si32
+    1, // llvm.x86.avx512.vcvtsd2si64
+    1, // llvm.x86.avx512.vcvtsd2usi32
+    1, // llvm.x86.avx512.vcvtsd2usi64
+    1, // llvm.x86.avx512.vcvtss2si32
+    1, // llvm.x86.avx512.vcvtss2si64
+    1, // llvm.x86.avx512.vcvtss2usi32
+    1, // llvm.x86.avx512.vcvtss2usi64
+    1, // llvm.x86.avx512.vpermilvar.pd.512
+    1, // llvm.x86.avx512.vpermilvar.ps.512
+    1, // llvm.x86.bmi.bextr.32
+    1, // llvm.x86.bmi.bextr.64
+    1, // llvm.x86.bmi.bzhi.32
+    1, // llvm.x86.bmi.bzhi.64
+    1, // llvm.x86.bmi.pdep.32
+    1, // llvm.x86.bmi.pdep.64
+    1, // llvm.x86.bmi.pext.32
+    1, // llvm.x86.bmi.pext.64
+    3, // llvm.x86.clflushopt
+    3, // llvm.x86.clrssbsy
+    3, // llvm.x86.clwb
+    3, // llvm.x86.clzero
+    3, // llvm.x86.flags.read.u32
+    3, // llvm.x86.flags.read.u64
+    3, // llvm.x86.flags.write.u32
+    3, // llvm.x86.flags.write.u64
+    1, // llvm.x86.fma.vfmadd.pd
+    1, // llvm.x86.fma.vfmadd.pd.256
+    1, // llvm.x86.fma.vfmadd.ps
+    1, // llvm.x86.fma.vfmadd.ps.256
+    1, // llvm.x86.fma.vfmadd.sd
+    1, // llvm.x86.fma.vfmadd.ss
+    1, // llvm.x86.fma.vfmaddsub.pd
+    1, // llvm.x86.fma.vfmaddsub.pd.256
+    1, // llvm.x86.fma.vfmaddsub.ps
+    1, // llvm.x86.fma.vfmaddsub.ps.256
+    1, // llvm.x86.fma.vfmsub.pd
+    1, // llvm.x86.fma.vfmsub.pd.256
+    1, // llvm.x86.fma.vfmsub.ps
+    1, // llvm.x86.fma.vfmsub.ps.256
+    1, // llvm.x86.fma.vfmsub.sd
+    1, // llvm.x86.fma.vfmsub.ss
+    1, // llvm.x86.fma.vfmsubadd.pd
+    1, // llvm.x86.fma.vfmsubadd.pd.256
+    1, // llvm.x86.fma.vfmsubadd.ps
+    1, // llvm.x86.fma.vfmsubadd.ps.256
+    1, // llvm.x86.fma.vfnmadd.pd
+    1, // llvm.x86.fma.vfnmadd.pd.256
+    1, // llvm.x86.fma.vfnmadd.ps
+    1, // llvm.x86.fma.vfnmadd.ps.256
+    1, // llvm.x86.fma.vfnmadd.sd
+    1, // llvm.x86.fma.vfnmadd.ss
+    1, // llvm.x86.fma.vfnmsub.pd
+    1, // llvm.x86.fma.vfnmsub.pd.256
+    1, // llvm.x86.fma.vfnmsub.ps
+    1, // llvm.x86.fma.vfnmsub.ps.256
+    1, // llvm.x86.fma.vfnmsub.sd
+    1, // llvm.x86.fma.vfnmsub.ss
+    1, // llvm.x86.fma4.vfmadd.sd
+    1, // llvm.x86.fma4.vfmadd.ss
+    3, // llvm.x86.fxrstor
+    3, // llvm.x86.fxrstor64
+    3, // llvm.x86.fxsave
+    3, // llvm.x86.fxsave64
+    3, // llvm.x86.incsspd
+    3, // llvm.x86.incsspq
+    3, // llvm.x86.int
+    3, // llvm.x86.llwpcb
+    3, // llvm.x86.lwpins32
+    3, // llvm.x86.lwpins64
+    3, // llvm.x86.lwpval32
+    3, // llvm.x86.lwpval64
+    3, // llvm.x86.mmx.emms
+    3, // llvm.x86.mmx.femms
+    3, // llvm.x86.mmx.maskmovq
+    3, // llvm.x86.mmx.movnt.dq
+    1, // llvm.x86.mmx.packssdw
+    1, // llvm.x86.mmx.packsswb
+    1, // llvm.x86.mmx.packuswb
+    1, // llvm.x86.mmx.padd.b
+    1, // llvm.x86.mmx.padd.d
+    1, // llvm.x86.mmx.padd.q
+    1, // llvm.x86.mmx.padd.w
+    1, // llvm.x86.mmx.padds.b
+    1, // llvm.x86.mmx.padds.w
+    1, // llvm.x86.mmx.paddus.b
+    1, // llvm.x86.mmx.paddus.w
+    1, // llvm.x86.mmx.palignr.b
+    1, // llvm.x86.mmx.pand
+    1, // llvm.x86.mmx.pandn
+    1, // llvm.x86.mmx.pavg.b
+    1, // llvm.x86.mmx.pavg.w
+    1, // llvm.x86.mmx.pcmpeq.b
+    1, // llvm.x86.mmx.pcmpeq.d
+    1, // llvm.x86.mmx.pcmpeq.w
+    1, // llvm.x86.mmx.pcmpgt.b
+    1, // llvm.x86.mmx.pcmpgt.d
+    1, // llvm.x86.mmx.pcmpgt.w
+    1, // llvm.x86.mmx.pextr.w
+    1, // llvm.x86.mmx.pinsr.w
+    1, // llvm.x86.mmx.pmadd.wd
+    1, // llvm.x86.mmx.pmaxs.w
+    1, // llvm.x86.mmx.pmaxu.b
+    1, // llvm.x86.mmx.pmins.w
+    1, // llvm.x86.mmx.pminu.b
+    1, // llvm.x86.mmx.pmovmskb
+    1, // llvm.x86.mmx.pmulh.w
+    1, // llvm.x86.mmx.pmulhu.w
+    1, // llvm.x86.mmx.pmull.w
+    1, // llvm.x86.mmx.pmulu.dq
+    1, // llvm.x86.mmx.por
+    1, // llvm.x86.mmx.psad.bw
+    1, // llvm.x86.mmx.psll.d
+    1, // llvm.x86.mmx.psll.q
+    1, // llvm.x86.mmx.psll.w
+    1, // llvm.x86.mmx.pslli.d
+    1, // llvm.x86.mmx.pslli.q
+    1, // llvm.x86.mmx.pslli.w
+    1, // llvm.x86.mmx.psra.d
+    1, // llvm.x86.mmx.psra.w
+    1, // llvm.x86.mmx.psrai.d
+    1, // llvm.x86.mmx.psrai.w
+    1, // llvm.x86.mmx.psrl.d
+    1, // llvm.x86.mmx.psrl.q
+    1, // llvm.x86.mmx.psrl.w
+    1, // llvm.x86.mmx.psrli.d
+    1, // llvm.x86.mmx.psrli.q
+    1, // llvm.x86.mmx.psrli.w
+    1, // llvm.x86.mmx.psub.b
+    1, // llvm.x86.mmx.psub.d
+    1, // llvm.x86.mmx.psub.q
+    1, // llvm.x86.mmx.psub.w
+    1, // llvm.x86.mmx.psubs.b
+    1, // llvm.x86.mmx.psubs.w
+    1, // llvm.x86.mmx.psubus.b
+    1, // llvm.x86.mmx.psubus.w
+    1, // llvm.x86.mmx.punpckhbw
+    1, // llvm.x86.mmx.punpckhdq
+    1, // llvm.x86.mmx.punpckhwd
+    1, // llvm.x86.mmx.punpcklbw
+    1, // llvm.x86.mmx.punpckldq
+    1, // llvm.x86.mmx.punpcklwd
+    1, // llvm.x86.mmx.pxor
+    3, // llvm.x86.monitorx
+    3, // llvm.x86.mwaitx
+    1, // llvm.x86.pclmulqdq
+    1, // llvm.x86.pclmulqdq.256
+    1, // llvm.x86.pclmulqdq.512
+    3, // llvm.x86.rdfsbase.32
+    3, // llvm.x86.rdfsbase.64
+    3, // llvm.x86.rdgsbase.32
+    3, // llvm.x86.rdgsbase.64
+    3, // llvm.x86.rdpid
+    3, // llvm.x86.rdpkru
+    3, // llvm.x86.rdpmc
+    3, // llvm.x86.rdrand.16
+    3, // llvm.x86.rdrand.32
+    3, // llvm.x86.rdrand.64
+    3, // llvm.x86.rdseed.16
+    3, // llvm.x86.rdseed.32
+    3, // llvm.x86.rdseed.64
+    3, // llvm.x86.rdsspd
+    3, // llvm.x86.rdsspq
+    3, // llvm.x86.rdtsc
+    21, // llvm.x86.rdtscp
+    3, // llvm.x86.rstorssp
+    3, // llvm.x86.saveprevssp
+    3, // llvm.x86.seh.ehguard
+    3, // llvm.x86.seh.ehregnode
+    1, // llvm.x86.seh.lsda
+    1, // llvm.x86.seh.recoverfp
+    3, // llvm.x86.setssbsy
+    1, // llvm.x86.sha1msg1
+    1, // llvm.x86.sha1msg2
+    1, // llvm.x86.sha1nexte
+    1, // llvm.x86.sha1rnds4
+    1, // llvm.x86.sha256msg1
+    1, // llvm.x86.sha256msg2
+    1, // llvm.x86.sha256rnds2
+    3, // llvm.x86.slwpcb
+    1, // llvm.x86.sse.cmp.ps
+    1, // llvm.x86.sse.cmp.ss
+    1, // llvm.x86.sse.comieq.ss
+    1, // llvm.x86.sse.comige.ss
+    1, // llvm.x86.sse.comigt.ss
+    1, // llvm.x86.sse.comile.ss
+    1, // llvm.x86.sse.comilt.ss
+    1, // llvm.x86.sse.comineq.ss
+    1, // llvm.x86.sse.cvtpd2pi
+    1, // llvm.x86.sse.cvtpi2pd
+    1, // llvm.x86.sse.cvtpi2ps
+    1, // llvm.x86.sse.cvtps2pi
+    1, // llvm.x86.sse.cvtsi2ss
+    1, // llvm.x86.sse.cvtsi642ss
+    1, // llvm.x86.sse.cvtss2si
+    1, // llvm.x86.sse.cvtss2si64
+    1, // llvm.x86.sse.cvttpd2pi
+    1, // llvm.x86.sse.cvttps2pi
+    1, // llvm.x86.sse.cvttss2si
+    1, // llvm.x86.sse.cvttss2si64
+    3, // llvm.x86.sse.ldmxcsr
+    1, // llvm.x86.sse.max.ps
+    1, // llvm.x86.sse.max.ss
+    1, // llvm.x86.sse.min.ps
+    1, // llvm.x86.sse.min.ss
+    1, // llvm.x86.sse.movmsk.ps
+    1, // llvm.x86.sse.pshuf.w
+    1, // llvm.x86.sse.rcp.ps
+    1, // llvm.x86.sse.rcp.ss
+    1, // llvm.x86.sse.rsqrt.ps
+    1, // llvm.x86.sse.rsqrt.ss
+    3, // llvm.x86.sse.sfence
+    1, // llvm.x86.sse.sqrt.ps
+    1, // llvm.x86.sse.sqrt.ss
+    3, // llvm.x86.sse.stmxcsr
+    1, // llvm.x86.sse.ucomieq.ss
+    1, // llvm.x86.sse.ucomige.ss
+    1, // llvm.x86.sse.ucomigt.ss
+    1, // llvm.x86.sse.ucomile.ss
+    1, // llvm.x86.sse.ucomilt.ss
+    1, // llvm.x86.sse.ucomineq.ss
+    3, // llvm.x86.sse2.clflush
+    1, // llvm.x86.sse2.cmp.pd
+    1, // llvm.x86.sse2.cmp.sd
+    1, // llvm.x86.sse2.comieq.sd
+    1, // llvm.x86.sse2.comige.sd
+    1, // llvm.x86.sse2.comigt.sd
+    1, // llvm.x86.sse2.comile.sd
+    1, // llvm.x86.sse2.comilt.sd
+    1, // llvm.x86.sse2.comineq.sd
+    1, // llvm.x86.sse2.cvtdq2ps
+    1, // llvm.x86.sse2.cvtpd2dq
+    1, // llvm.x86.sse2.cvtpd2ps
+    1, // llvm.x86.sse2.cvtps2dq
+    1, // llvm.x86.sse2.cvtsd2si
+    1, // llvm.x86.sse2.cvtsd2si64
+    1, // llvm.x86.sse2.cvtsd2ss
+    1, // llvm.x86.sse2.cvtsi2sd
+    1, // llvm.x86.sse2.cvtsi642sd
+    1, // llvm.x86.sse2.cvtss2sd
+    1, // llvm.x86.sse2.cvttpd2dq
+    1, // llvm.x86.sse2.cvttps2dq
+    1, // llvm.x86.sse2.cvttsd2si
+    1, // llvm.x86.sse2.cvttsd2si64
+    3, // llvm.x86.sse2.lfence
+    3, // llvm.x86.sse2.maskmov.dqu
+    1, // llvm.x86.sse2.max.pd
+    1, // llvm.x86.sse2.max.sd
+    3, // llvm.x86.sse2.mfence
+    1, // llvm.x86.sse2.min.pd
+    1, // llvm.x86.sse2.min.sd
+    1, // llvm.x86.sse2.movmsk.pd
+    1, // llvm.x86.sse2.packssdw.128
+    1, // llvm.x86.sse2.packsswb.128
+    1, // llvm.x86.sse2.packuswb.128
+    1, // llvm.x86.sse2.padds.b
+    1, // llvm.x86.sse2.padds.w
+    1, // llvm.x86.sse2.paddus.b
+    1, // llvm.x86.sse2.paddus.w
+    3, // llvm.x86.sse2.pause
+    1, // llvm.x86.sse2.pmadd.wd
+    1, // llvm.x86.sse2.pmovmskb.128
+    1, // llvm.x86.sse2.pmulh.w
+    1, // llvm.x86.sse2.pmulhu.w
+    1, // llvm.x86.sse2.pmulu.dq
+    1, // llvm.x86.sse2.psad.bw
+    1, // llvm.x86.sse2.psll.d
+    1, // llvm.x86.sse2.psll.q
+    1, // llvm.x86.sse2.psll.w
+    1, // llvm.x86.sse2.pslli.d
+    1, // llvm.x86.sse2.pslli.q
+    1, // llvm.x86.sse2.pslli.w
+    1, // llvm.x86.sse2.psra.d
+    1, // llvm.x86.sse2.psra.w
+    1, // llvm.x86.sse2.psrai.d
+    1, // llvm.x86.sse2.psrai.w
+    1, // llvm.x86.sse2.psrl.d
+    1, // llvm.x86.sse2.psrl.q
+    1, // llvm.x86.sse2.psrl.w
+    1, // llvm.x86.sse2.psrli.d
+    1, // llvm.x86.sse2.psrli.q
+    1, // llvm.x86.sse2.psrli.w
+    1, // llvm.x86.sse2.psubs.b
+    1, // llvm.x86.sse2.psubs.w
+    1, // llvm.x86.sse2.psubus.b
+    1, // llvm.x86.sse2.psubus.w
+    1, // llvm.x86.sse2.sqrt.pd
+    1, // llvm.x86.sse2.sqrt.sd
+    1, // llvm.x86.sse2.ucomieq.sd
+    1, // llvm.x86.sse2.ucomige.sd
+    1, // llvm.x86.sse2.ucomigt.sd
+    1, // llvm.x86.sse2.ucomile.sd
+    1, // llvm.x86.sse2.ucomilt.sd
+    1, // llvm.x86.sse2.ucomineq.sd
+    1, // llvm.x86.sse3.addsub.pd
+    1, // llvm.x86.sse3.addsub.ps
+    1, // llvm.x86.sse3.hadd.pd
+    1, // llvm.x86.sse3.hadd.ps
+    1, // llvm.x86.sse3.hsub.pd
+    1, // llvm.x86.sse3.hsub.ps
+    16, // llvm.x86.sse3.ldu.dq
+    3, // llvm.x86.sse3.monitor
+    3, // llvm.x86.sse3.mwait
+    1, // llvm.x86.sse41.blendvpd
+    1, // llvm.x86.sse41.blendvps
+    1, // llvm.x86.sse41.dppd
+    1, // llvm.x86.sse41.dpps
+    1, // llvm.x86.sse41.insertps
+    1, // llvm.x86.sse41.mpsadbw
+    1, // llvm.x86.sse41.packusdw
+    1, // llvm.x86.sse41.pblendvb
+    1, // llvm.x86.sse41.phminposuw
+    1, // llvm.x86.sse41.pmuldq
+    1, // llvm.x86.sse41.ptestc
+    1, // llvm.x86.sse41.ptestnzc
+    1, // llvm.x86.sse41.ptestz
+    1, // llvm.x86.sse41.round.pd
+    1, // llvm.x86.sse41.round.ps
+    1, // llvm.x86.sse41.round.sd
+    1, // llvm.x86.sse41.round.ss
+    1, // llvm.x86.sse42.crc32.32.16
+    1, // llvm.x86.sse42.crc32.32.32
+    1, // llvm.x86.sse42.crc32.32.8
+    1, // llvm.x86.sse42.crc32.64.64
+    1, // llvm.x86.sse42.pcmpestri128
+    1, // llvm.x86.sse42.pcmpestria128
+    1, // llvm.x86.sse42.pcmpestric128
+    1, // llvm.x86.sse42.pcmpestrio128
+    1, // llvm.x86.sse42.pcmpestris128
+    1, // llvm.x86.sse42.pcmpestriz128
+    1, // llvm.x86.sse42.pcmpestrm128
+    1, // llvm.x86.sse42.pcmpistri128
+    1, // llvm.x86.sse42.pcmpistria128
+    1, // llvm.x86.sse42.pcmpistric128
+    1, // llvm.x86.sse42.pcmpistrio128
+    1, // llvm.x86.sse42.pcmpistris128
+    1, // llvm.x86.sse42.pcmpistriz128
+    1, // llvm.x86.sse42.pcmpistrm128
+    1, // llvm.x86.sse4a.extrq
+    1, // llvm.x86.sse4a.extrqi
+    1, // llvm.x86.sse4a.insertq
+    1, // llvm.x86.sse4a.insertqi
+    1, // llvm.x86.ssse3.pabs.b
+    1, // llvm.x86.ssse3.pabs.d
+    1, // llvm.x86.ssse3.pabs.w
+    1, // llvm.x86.ssse3.phadd.d
+    1, // llvm.x86.ssse3.phadd.d.128
+    1, // llvm.x86.ssse3.phadd.sw
+    1, // llvm.x86.ssse3.phadd.sw.128
+    1, // llvm.x86.ssse3.phadd.w
+    1, // llvm.x86.ssse3.phadd.w.128
+    1, // llvm.x86.ssse3.phsub.d
+    1, // llvm.x86.ssse3.phsub.d.128
+    1, // llvm.x86.ssse3.phsub.sw
+    1, // llvm.x86.ssse3.phsub.sw.128
+    1, // llvm.x86.ssse3.phsub.w
+    1, // llvm.x86.ssse3.phsub.w.128
+    1, // llvm.x86.ssse3.pmadd.ub.sw
+    1, // llvm.x86.ssse3.pmadd.ub.sw.128
+    1, // llvm.x86.ssse3.pmul.hr.sw
+    1, // llvm.x86.ssse3.pmul.hr.sw.128
+    1, // llvm.x86.ssse3.pshuf.b
+    1, // llvm.x86.ssse3.pshuf.b.128
+    1, // llvm.x86.ssse3.psign.b
+    1, // llvm.x86.ssse3.psign.b.128
+    1, // llvm.x86.ssse3.psign.d
+    1, // llvm.x86.ssse3.psign.d.128
+    1, // llvm.x86.ssse3.psign.w
+    1, // llvm.x86.ssse3.psign.w.128
+    21, // llvm.x86.subborrow.u32
+    21, // llvm.x86.subborrow.u64
+    1, // llvm.x86.tbm.bextri.u32
+    1, // llvm.x86.tbm.bextri.u64
+    1, // llvm.x86.vcvtph2ps.128
+    1, // llvm.x86.vcvtph2ps.256
+    1, // llvm.x86.vcvtps2ph.128
+    1, // llvm.x86.vcvtps2ph.256
+    1, // llvm.x86.vgf2p8affineinvqb.128
+    1, // llvm.x86.vgf2p8affineinvqb.256
+    1, // llvm.x86.vgf2p8affineinvqb.512
+    1, // llvm.x86.vgf2p8affineqb.128
+    1, // llvm.x86.vgf2p8affineqb.256
+    1, // llvm.x86.vgf2p8affineqb.512
+    1, // llvm.x86.vgf2p8mulb.128
+    1, // llvm.x86.vgf2p8mulb.256
+    1, // llvm.x86.vgf2p8mulb.512
+    3, // llvm.x86.wrfsbase.32
+    3, // llvm.x86.wrfsbase.64
+    3, // llvm.x86.wrgsbase.32
+    3, // llvm.x86.wrgsbase.64
+    3, // llvm.x86.wrpkru
+    3, // llvm.x86.wrssd
+    3, // llvm.x86.wrssq
+    3, // llvm.x86.wrussd
+    3, // llvm.x86.wrussq
+    3, // llvm.x86.xabort
+    3, // llvm.x86.xbegin
+    3, // llvm.x86.xend
+    3, // llvm.x86.xgetbv
+    1, // llvm.x86.xop.vfrcz.pd
+    1, // llvm.x86.xop.vfrcz.pd.256
+    1, // llvm.x86.xop.vfrcz.ps
+    1, // llvm.x86.xop.vfrcz.ps.256
+    1, // llvm.x86.xop.vfrcz.sd
+    1, // llvm.x86.xop.vfrcz.ss
+    1, // llvm.x86.xop.vpcomb
+    1, // llvm.x86.xop.vpcomd
+    1, // llvm.x86.xop.vpcomq
+    1, // llvm.x86.xop.vpcomub
+    1, // llvm.x86.xop.vpcomud
+    1, // llvm.x86.xop.vpcomuq
+    1, // llvm.x86.xop.vpcomuw
+    1, // llvm.x86.xop.vpcomw
+    1, // llvm.x86.xop.vpermil2pd
+    1, // llvm.x86.xop.vpermil2pd.256
+    1, // llvm.x86.xop.vpermil2ps
+    1, // llvm.x86.xop.vpermil2ps.256
+    1, // llvm.x86.xop.vphaddbd
+    1, // llvm.x86.xop.vphaddbq
+    1, // llvm.x86.xop.vphaddbw
+    1, // llvm.x86.xop.vphadddq
+    1, // llvm.x86.xop.vphaddubd
+    1, // llvm.x86.xop.vphaddubq
+    1, // llvm.x86.xop.vphaddubw
+    1, // llvm.x86.xop.vphaddudq
+    1, // llvm.x86.xop.vphadduwd
+    1, // llvm.x86.xop.vphadduwq
+    1, // llvm.x86.xop.vphaddwd
+    1, // llvm.x86.xop.vphaddwq
+    1, // llvm.x86.xop.vphsubbw
+    1, // llvm.x86.xop.vphsubdq
+    1, // llvm.x86.xop.vphsubwd
+    1, // llvm.x86.xop.vpmacsdd
+    1, // llvm.x86.xop.vpmacsdqh
+    1, // llvm.x86.xop.vpmacsdql
+    1, // llvm.x86.xop.vpmacssdd
+    1, // llvm.x86.xop.vpmacssdqh
+    1, // llvm.x86.xop.vpmacssdql
+    1, // llvm.x86.xop.vpmacsswd
+    1, // llvm.x86.xop.vpmacssww
+    1, // llvm.x86.xop.vpmacswd
+    1, // llvm.x86.xop.vpmacsww
+    1, // llvm.x86.xop.vpmadcsswd
+    1, // llvm.x86.xop.vpmadcswd
+    1, // llvm.x86.xop.vpperm
+    1, // llvm.x86.xop.vprotb
+    1, // llvm.x86.xop.vprotbi
+    1, // llvm.x86.xop.vprotd
+    1, // llvm.x86.xop.vprotdi
+    1, // llvm.x86.xop.vprotq
+    1, // llvm.x86.xop.vprotqi
+    1, // llvm.x86.xop.vprotw
+    1, // llvm.x86.xop.vprotwi
+    1, // llvm.x86.xop.vpshab
+    1, // llvm.x86.xop.vpshad
+    1, // llvm.x86.xop.vpshaq
+    1, // llvm.x86.xop.vpshaw
+    1, // llvm.x86.xop.vpshlb
+    1, // llvm.x86.xop.vpshld
+    1, // llvm.x86.xop.vpshlq
+    1, // llvm.x86.xop.vpshlw
+    3, // llvm.x86.xrstor
+    3, // llvm.x86.xrstor64
+    3, // llvm.x86.xrstors
+    3, // llvm.x86.xrstors64
+    3, // llvm.x86.xsave
+    3, // llvm.x86.xsave64
+    3, // llvm.x86.xsavec
+    3, // llvm.x86.xsavec64
+    3, // llvm.x86.xsaveopt
+    3, // llvm.x86.xsaveopt64
+    3, // llvm.x86.xsaves
+    3, // llvm.x86.xsaves64
+    3, // llvm.x86.xsetbv
+    3, // llvm.x86.xtest
+    1, // llvm.xcore.bitrev
+    3, // llvm.xcore.checkevent
+    45, // llvm.xcore.chkct
+    3, // llvm.xcore.clre
+    45, // llvm.xcore.clrpt
+    3, // llvm.xcore.clrsr
+    1, // llvm.xcore.crc32
+    1, // llvm.xcore.crc8
+    45, // llvm.xcore.edu
+    45, // llvm.xcore.eeu
+    45, // llvm.xcore.endin
+    45, // llvm.xcore.freer
+    3, // llvm.xcore.geted
+    3, // llvm.xcore.getet
+    1, // llvm.xcore.getid
+    3, // llvm.xcore.getps
+    3, // llvm.xcore.getr
+    45, // llvm.xcore.getst
+    45, // llvm.xcore.getts
+    45, // llvm.xcore.in
+    45, // llvm.xcore.inct
+    45, // llvm.xcore.initcp
+    45, // llvm.xcore.initdp
+    45, // llvm.xcore.initlr
+    45, // llvm.xcore.initpc
+    45, // llvm.xcore.initsp
+    45, // llvm.xcore.inshr
+    45, // llvm.xcore.int
+    45, // llvm.xcore.mjoin
+    45, // llvm.xcore.msync
+    45, // llvm.xcore.out
+    45, // llvm.xcore.outct
+    45, // llvm.xcore.outshr
+    45, // llvm.xcore.outt
+    45, // llvm.xcore.peek
+    45, // llvm.xcore.setc
+    46, // llvm.xcore.setclk
+    45, // llvm.xcore.setd
+    45, // llvm.xcore.setev
+    3, // llvm.xcore.setps
+    45, // llvm.xcore.setpsc
+    45, // llvm.xcore.setpt
+    46, // llvm.xcore.setrdy
+    3, // llvm.xcore.setsr
+    45, // llvm.xcore.settw
+    45, // llvm.xcore.setv
+    1, // llvm.xcore.sext
+    3, // llvm.xcore.ssync
+    45, // llvm.xcore.syncr
+    45, // llvm.xcore.testct
+    45, // llvm.xcore.testwct
+    16, // llvm.xcore.waitevent
+    1, // llvm.xcore.zext
+  };
+
+  AttributeList AS[5];
+  unsigned NumAttrs = 0;
+  if (id != 0) {
+    switch(IntrinsicsToAttributesMap[id - 1]) {
+    default: llvm_unreachable("Invalid attribute number");
+    case 3: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 45: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 46: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind AttrParam2[]= {Attribute::NoCapture};
+      AS[1] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind};
+      AS[2] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 3;
+      break;
+      }
+    case 6: {
+      const Attribute::AttrKind AttrParam2[]= {Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 25: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::InaccessibleMemOrArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 21: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 18: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 8: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 24: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 22: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind AttrParam2[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[1] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[2] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 3;
+      break;
+      }
+    case 23: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind AttrParam2[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[1] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[2] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 3;
+      break;
+      }
+    case 20: {
+      const Attribute::AttrKind AttrParam2[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 17: {
+      const Attribute::AttrKind AttrParam2[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind AttrParam3[]= {Attribute::NoCapture};
+      AS[1] = AttributeList::get(C, 3, AttrParam3);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[2] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 3;
+      break;
+      }
+    case 19: {
+      const Attribute::AttrKind AttrParam3[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 3, AttrParam3);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 28: {
+      const Attribute::AttrKind AttrParam4[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 4, AttrParam4);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 29: {
+      const Attribute::AttrKind AttrParam5[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 5, AttrParam5);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 30: {
+      const Attribute::AttrKind AttrParam6[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 6, AttrParam6);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 15: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::InaccessibleMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 32: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 27: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::WriteOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 40: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::WriteOnly,Attribute::ArgMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 39: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::WriteOnly,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 16: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 2: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadOnly,Attribute::ArgMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 37: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadOnly,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 13: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadOnly,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 9: {
+      const Attribute::AttrKind AttrParam2[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadOnly,Attribute::ArgMemOnly};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 10: {
+      const Attribute::AttrKind AttrParam2[]= {Attribute::ReadNone};
+      AS[0] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind AttrParam3[]= {Attribute::NoCapture,Attribute::ReadOnly};
+      AS[1] = AttributeList::get(C, 3, AttrParam3);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadOnly,Attribute::ArgMemOnly};
+      AS[2] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 3;
+      break;
+      }
+    case 1: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadNone};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 12: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::NoCapture};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadNone};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 26: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::Returned};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadNone};
+      AS[1] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 2;
+      break;
+      }
+    case 11: {
+      const Attribute::AttrKind AttrParam1[]= {Attribute::ReadNone};
+      AS[0] = AttributeList::get(C, 1, AttrParam1);
+      const Attribute::AttrKind AttrParam2[]= {Attribute::ReadNone};
+      AS[1] = AttributeList::get(C, 2, AttrParam2);
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::ReadNone};
+      AS[2] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 3;
+      break;
+      }
+    case 43: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 36: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::WriteOnly,Attribute::ArgMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 34: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::Speculatable,Attribute::ReadOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 4: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::Speculatable,Attribute::ReadNone};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 33: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::Convergent};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 38: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::Convergent,Attribute::InaccessibleMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 31: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::Convergent,Attribute::ReadNone};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 14: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::NoReturn};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 5: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::NoDuplicate,Attribute::InaccessibleMemOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 42: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoUnwind,Attribute::NoDuplicate,Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 7: {
+      return AttributeList();
+      }
+    case 35: {
+      const Attribute::AttrKind Atts[] = {Attribute::ReadNone};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 44: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoReturn};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    case 41: {
+      const Attribute::AttrKind Atts[] = {Attribute::NoReturn,Attribute::WriteOnly};
+      AS[0] = AttributeList::get(C, AttributeList::FunctionIndex, Atts);
+      NumAttrs = 1;
+      break;
+      }
+    }
+  }
+  return AttributeList::get(C, makeArrayRef(AS, NumAttrs));
+}
+#endif // GET_INTRINSIC_ATTRIBUTES
+
+// Get the LLVM intrinsic that corresponds to a builtin.
+// This is used by the C front-end.  The builtin name is passed
+// in as BuiltinName, and a target prefix (e.g. 'ppc') is passed
+// in as TargetPrefix.  The result is assigned to 'IntrinsicID'.
+#ifdef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
+Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefixStr, StringRef BuiltinNameStr) {
+  static const char BuiltinNames[] = {
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'd', 'j', 'u', 's',
+  't', '_', 't', 'r', 'a', 'm', 'p', 'o', 'l', 'i', 'n', 'e', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'e', 'b', 'u', 'g', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'u', 'n',
+  'w', 'i', 'n', 'd', '_', 'i', 'n', 'i', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'f', 'l', 't', '_', 'r', 'o', 'u', 'n', 'd', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'n', 'i', 't',
+  '_', 't', 'r', 'a', 'm', 'p', 'o', 'l', 'i', 'n', 'e', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 's',
+  'i', 'z', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  't', 'a', 'c', 'k', '_', 'r', 'e', 's', 't', 'o', 'r', 'e', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 't', 'a', 'c', 'k', '_', 's',
+  'a', 'v', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't',
+  'h', 'r', 'e', 'a', 'd', '_', 'p', 'o', 'i', 'n', 't', 'e', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'r', 'a', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'd', 'm',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
+  '_', 'd', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'r', 'm', '_', 'i', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'b', 'u', 'f', 'f', 'e',
+  'r', '_', 'w', 'b', 'i', 'n', 'v', 'l', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'b', 'u', 'f',
+  'f', 'e', 'r', '_', 'w', 'b', 'i', 'n', 'v', 'l', '1', '_', 's', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
+  'n', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'w', 'b', 'i', 'n', 'v', 'l',
+  '1', '_', 'v', 'o', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'u', 'b', 'e', 'i', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
+  'n', '_', 'c', 'u', 'b', 'e', 'm', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'u', 'b', 'e',
+  's', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
+  'd', 'g', 'c', 'n', '_', 'c', 'u', 'b', 'e', 't', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c',
+  'v', 't', '_', 'p', 'k', '_', 'u', '8', '_', 'f', '3', '2', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
+  'd', 'i', 's', 'p', 'a', 't', 'c', 'h', '_', 'i', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd',
+  'i', 's', 'p', 'a', 't', 'c', 'h', '_', 'p', 't', 'r', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd',
+  's', '_', 'b', 'p', 'e', 'r', 'm', 'u', 't', 'e', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's',
+  '_', 'f', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_', 'f', 'm', 'a', 'x',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
+  'c', 'n', '_', 'd', 's', '_', 'f', 'm', 'i', 'n', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's',
+  '_', 'p', 'e', 'r', 'm', 'u', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_', 's',
+  'w', 'i', 'z', 'z', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'f', 'm', 'e', 'd', '3', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
+  'n', '_', 'f', 'm', 'u', 'l', '_', 'l', 'e', 'g', 'a', 'c', 'y', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
+  '_', 'g', 'r', 'o', 'u', 'p', 's', 't', 'a', 't', 'i', 'c', 's', 'i', 'z',
+  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
+  'g', 'c', 'n', '_', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', '_', 'b', 'u',
+  'f', 'f', 'e', 'r', '_', 'p', 't', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'm', 'p', 'l',
+  'i', 'c', 'i', 't', 'a', 'r', 'g', '_', 'p', 't', 'r', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i',
+  'n', 't', 'e', 'r', 'p', '_', 'm', 'o', 'v', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'n', 't',
+  'e', 'r', 'p', '_', 'p', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'n', 't', 'e', 'r', 'p',
+  '_', 'p', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'm', 'd', 'g', 'c', 'n', '_', 'k', 'e', 'r', 'n', 'a', 'r', 'g', '_', 's',
+  'e', 'g', 'm', 'e', 'n', 't', '_', 'p', 't', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'l', 'e',
+  'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
+  'd', 'g', 'c', 'n', '_', 'm', 'b', 'c', 'n', 't', '_', 'h', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
+  '_', 'm', 'b', 'c', 'n', 't', '_', 'l', 'o', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'q', 's',
+  'a', 'd', '_', 'p', 'k', '_', 'u', '1', '6', '_', 'u', '8', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
+  'm', 'q', 's', 'a', 'd', '_', 'u', '3', '2', '_', 'u', '8', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
+  'm', 's', 'a', 'd', '_', 'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'q', 's', 'a', 'd', '_',
+  'p', 'k', '_', 'u', '1', '6', '_', 'u', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'q', 'u', 'e',
+  'u', 'e', '_', 'p', 't', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'r', 'c', 'p', '_', 'l', 'e',
+  'g', 'a', 'c', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'm', 'd', 'g', 'c', 'n', '_', 'r', 'e', 'a', 'd', 'f', 'i', 'r', 's',
+  't', 'l', 'a', 'n', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'r', 'e', 'a', 'd', 'l', 'a', 'n',
+  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
+  'g', 'c', 'n', '_', 'r', 's', 'q', '_', 'l', 'e', 'g', 'a', 'c', 'y', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
+  'n', '_', 's', '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's',
+  '_', 'd', 'c', 'a', 'c', 'h', 'e', '_', 'i', 'n', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's',
+  '_', 'd', 'c', 'a', 'c', 'h', 'e', '_', 'i', 'n', 'v', '_', 'v', 'o', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
+  'c', 'n', '_', 's', '_', 'd', 'c', 'a', 'c', 'h', 'e', '_', 'w', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
+  'n', '_', 's', '_', 'd', 'c', 'a', 'c', 'h', 'e', '_', 'w', 'b', '_', 'v',
+  'o', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
+  'd', 'g', 'c', 'n', '_', 's', '_', 'd', 'e', 'c', 'p', 'e', 'r', 'f', 'l',
+  'e', 'v', 'e', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'g', 'e', 't', 'p', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
+  'n', '_', 's', '_', 'g', 'e', 't', 'r', 'e', 'g', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_',
+  'i', 'n', 'c', 'p', 'e', 'r', 'f', 'l', 'e', 'v', 'e', 'l', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
+  's', '_', 'm', 'e', 'm', 'r', 'e', 'a', 'l', 't', 'i', 'm', 'e', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
+  '_', 's', '_', 'm', 'e', 'm', 't', 'i', 'm', 'e', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_',
+  's', 'e', 'n', 'd', 'm', 's', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 's', 'e', 'n',
+  'd', 'm', 's', 'g', 'h', 'a', 'l', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 's', 'l',
+  'e', 'e', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'w', 'a', 'i', 't', 'c', 'n', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
+  'c', 'n', '_', 's', 'a', 'd', '_', 'h', 'i', '_', 'u', '8', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
+  's', 'a', 'd', '_', 'u', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'a', 'd', '_', 'u',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
+  'g', 'c', 'n', '_', 'w', 'a', 'v', 'e', '_', 'b', 'a', 'r', 'r', 'i', 'e',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
+  'g', 'c', 'n', '_', 'w', 'o', 'r', 'k', 'g', 'r', 'o', 'u', 'p', '_', 'i',
+  'd', '_', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'm', 'd', 'g', 'c', 'n', '_', 'w', 'o', 'r', 'k', 'g', 'r', 'o', 'u', 'p',
+  '_', 'i', 'd', '_', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'w', 'o', 'r', 'k', 'g', 'r', 'o',
+  'u', 'p', '_', 'i', 'd', '_', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'w', 'r', 'i', 't', 'e',
+  'l', 'a', 'n', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'r', 'm', '_', 'c', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'r', 'm', '_', 'c', 'd', 'p', '2', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'g', 'e', 't', '_',
+  'f', 'p', 's', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'r', 'm', '_', 'l', 'd', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'l', 'd', 'c', '2', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'l', 'd', 'c',
+  '2', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 'l', 'd', 'c', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'r', 'm', '_', 'm', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'm', 'c', 'r', '2', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'm', 'r',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
+  '_', 'm', 'r', 'c', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'r', 'm', '_', 'q', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 'a', 'd', 'd', '1', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  'q', 'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'r', 'm', '_', 'q', 'a', 's', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 's', 'a', 'x', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 's',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 'q', 's', 'u', 'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 's', 'u', 'b', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'a',
+  'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'r', 'm', '_', 's', 'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'a', 's', 'x', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'e',
+  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
+  '_', 's', 'e', 't', '_', 'f', 'p', 's', 'c', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'h', 'a', 'd', 'd',
+  '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 's', 'h', 'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'h', 'a', 's', 'x', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'h',
+  's', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'r', 'm', '_', 's', 'h', 's', 'u', 'b', '1', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'h', 's', 'u', 'b',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
+  '_', 's', 'm', 'l', 'a', 'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'b', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm',
+  'l', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'r', 'm', '_', 's', 'm', 'l', 'a', 'd', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'l', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  's', 'm', 'l', 'a', 'l', 'd', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 't', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm',
+  'l', 'a', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'w', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'w',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
+  '_', 's', 'm', 'l', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 's', 'd', 'x', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l',
+  's', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'r', 'm', '_', 's', 'm', 'l', 's', 'l', 'd', 'x', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'a', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  's', 'm', 'u', 'a', 'd', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 'b', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u',
+  'l', 'b', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'r', 'm', '_', 's', 'm', 'u', 'l', 't', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 't', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  's', 'm', 'u', 'l', 'w', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 'w', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u',
+  's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 's', 'm', 'u', 's', 'd', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's', 'a', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's', 'a',
+  't', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'r', 'm', '_', 's', 's', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's', 'u', 'b', '1', '6', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's',
+  'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'r', 'm', '_', 's', 't', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'r', 'm', '_', 's', 't', 'c', '2', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 't', 'c', '2', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  's', 't', 'c', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'r', 'm', '_', 's', 'x', 't', 'a', 'b', '1', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'x', 't', 'b',
+  '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 'u', 'a', 'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'a', 'd', 'd', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'a',
+  's', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 'u', 'h', 'a', 'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'h', 'a', 'd', 'd', '8',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  'u', 'h', 'a', 's', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'r', 'm', '_', 'u', 'h', 's', 'a', 'x', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'h', 's', 'u', 'b',
+  '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
+  'm', '_', 'u', 'h', 's', 'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 'a', 'd', 'd', '1', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  'u', 'q', 'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 'a', 's', 'x', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 's', 'a',
+  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
+  '_', 'u', 'q', 's', 'u', 'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 's', 'u', 'b', '8', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u',
+  's', 'a', 'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'r', 'm', '_', 'u', 's', 'a', 'd', 'a', '8', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u',
+  's', 'a', 't', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'r', 'm', '_', 'u', 's', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 's', 'u', 'b', '1', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
+  'u', 's', 'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'r', 'm', '_', 'u', 'x', 't', 'a', 'b', '1', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'x', 't',
+  'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b',
+  'p', 'f', '_', 'l', 'o', 'a', 'd', '_', 'b', 'y', 't', 'e', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'f', '_', 'l', 'o', 'a',
+  'd', '_', 'h', 'a', 'l', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'b', 'p', 'f', '_', 'l', 'o', 'a', 'd', '_', 'w', 'o', 'r', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'f', '_',
+  'p', 's', 'e', 'u', 'd', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'b',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'b', 's', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'a', 'b', 's', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd',
+  'h', '_', 'h', '1', '6', '_', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_', 'h', 'l', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_', 'l', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_',
+  'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h',
+  '1', '6', '_', 's', 'a', 't', '_', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'h',
+  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h', '1',
+  '6', '_', 's', 'a', 't', '_', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'l', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'l', '1', '6',
+  '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_',
+  'l', '1', '6', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd',
+  'd', 'h', '_', 'l', '1', '6', '_', 's', 'a', 't', '_', 'h', 'l', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'l', '1', '6', '_', 's',
+  'a', 't', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'a', 'd', 'd', 'p', 's', 'a', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'A', '2', '_', 'a', 'd', 'd', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'a', 'd', 'd', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'n',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'n', 'd', 'i', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'a', 'n', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'a', 's', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 's', 'r',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm', 'b', 'i', 'n', 'e',
+  '_', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm', 'b', 'i',
+  'n', 'e', '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm',
+  'b', 'i', 'n', 'e', '_', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c',
+  'o', 'm', 'b', 'i', 'n', 'e', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'c', 'o', 'm', 'b', 'i', 'n', 'e', 'i', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'c', 'o', 'm', 'b', 'i', 'n', 'e', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'm', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'm', 'a', 'x',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'm', 'a', 'x', 'u', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'm', 'a', 'x', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'm', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'm', 'i', 'n', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'm', 'i', 'n', 'u', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'A', '2', '_', 'm', 'i', 'n', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'n', 'e', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'n', 'e', 'g', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '2', '_', 'n', 'e', 'g', 's', 'a', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'n', 'o', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'n',
+  'o', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'o', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'o', 'r', 'i', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'o', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'r', 'o', 'u', 'n', 'd',
+  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'a', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 's', 'a', 't', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 's', 'a', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'a', 't',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'a', 't', 'u', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 'h', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 'h', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6',
+  '_', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_',
+  'h', '1', '6', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u',
+  'b', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'h', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 's',
+  'a', 't', '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b',
+  'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'l', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 's', 'a',
+  't', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h',
+  '_', 'l', '1', '6', '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's',
+  'u', 'b', 'h', '_', 'l', '1', '6', '_', 'l', 'l', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 's', 'u', 'b', 'h', '_', 'l', '1', '6', '_', 's', 'a', 't', '_',
+  'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'l',
+  '1', '6', '_', 's', 'a', 't', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 's', 'u', 'b', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b',
+  'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 's', 'a', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 's', 'v', 'a', 'd', 'd', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 's', 'v', 'a', 'd', 'd', 'h', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 's', 'v', 'a', 'd', 'd', 'u', 'h', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 's', 'v', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 's', 'v', 'a', 'v', 'g', 'h', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 's', 'v', 'n', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  's', 'v', 's', 'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'v',
+  's', 'u', 'b', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'v', 's',
+  'u', 'b', 'u', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'w', 'i',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'x', 't', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 's', 'x', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  's', 'x', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', 'i', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'A', '2', '_', 't', 'f', 'r', 'i', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  't', 'f', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', 'p',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', 's', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'v', 'a', 'b', 's', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'v', 'a', 'b', 's', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'v', 'a', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
+  'a', 'b', 's', 'w', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
+  'a', 'd', 'd', 'b', '_', 'm', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'v', 'a', 'd', 'd', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd',
+  'd', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'u',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'A', '2', '_', 'v', 'a', 'd', 'd', 'w', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'v', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a',
+  'v', 'g', 'h', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v',
+  'g', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'b', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'h', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'v', 'a', 'v', 'g', 'u', 'w', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'v', 'a', 'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a',
+  'v', 'g', 'w', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v',
+  'g', 'w', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'b',
+  'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'b', 'g',
+  't', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'h', 'e',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'h', 'g', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'h', 'g', 't', 'u',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'w', 'e', 'q', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'w', 'g', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'w', 'g', 't', 'u', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'v', 'c', 'o', 'n', 'j', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'v', 'm', 'a', 'x', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
+  'm', 'a', 'x', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x', 'u', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x', 'u', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'v', 'm', 'i', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
+  'm', 'i', 'n', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n', 'u', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n', 'u', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'v', 'n', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'v', 'n', 'a', 'v', 'g', 'h', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'v', 'n', 'a', 'v', 'g', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
+  'n', 'a', 'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n', 'a',
+  'v', 'g', 'w', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n', 'a',
+  'v', 'g', 'w', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 'a', 'd',
+  'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 'a', 'd', 'd',
+  'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r',
+  's', 'a', 'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 's',
+  'a', 'd', 'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
+  'v', 's', 'u', 'b', 'b', '_', 'm', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
+  '_', 'v', 's', 'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's',
+  'u', 'b', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'u', 'b',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'u', 'h', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'w', 's', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '2', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'x', 'o', 'r',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'z', 'x', 't', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '2', '_', 'z', 'x', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'a', 'n', 'd', 'n',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'b', 'i', 't', 's', 'p', 'l', 'i',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'b', 'i', 't', 's', 'p', 'l', 'i',
+  't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'b', 'o', 'u', 'n', 'd', 's',
+  'c', 'h', 'e', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p',
+  'b', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'e',
+  'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', 'u', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', 'u', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'h', 'e', 'q', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'A', '4', '_', 'c', 'm', 'p', 'h', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '4', '_', 'c', 'm', 'p', 'h', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'c', 'm', 'p', 'h', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c',
+  'm', 'p', 'h', 'g', 't', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm',
+  'p', 'h', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'o',
+  'm', 'b', 'i', 'n', 'e', 'i', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c',
+  'o', 'm', 'b', 'i', 'n', 'e', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'c', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4',
+  '_', 'c', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '4', '_', 'm', 'o', 'd', 'w', 'r', 'a', 'p', 'u', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '4', '_', 'o', 'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'o', 'r', 'n',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'r', 'c', 'm', 'p', 'e', 'q', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'A', '4', '_', 'r', 'c', 'm', 'p', 'e', 'q', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '4', '_', 'r', 'c', 'm', 'p', 'n', 'e', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '4', '_', 'r', 'c', 'm', 'p', 'n', 'e', 'q', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'A', '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'i', '_', 's', 'a',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r',
+  'r', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 't', 'l', 'b',
+  'm', 'a', 't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c', 'm',
+  'p', 'b', 'e', 'q', '_', 'a', 'n', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'v', 'c', 'm', 'p', 'b', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'v', 'c', 'm', 'p', 'b', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v',
+  'c', 'm', 'p', 'b', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v',
+  'c', 'm', 'p', 'b', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'v', 'c', 'm', 'p', 'h', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'v', 'c', 'm', 'p', 'h', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'v', 'c', 'm', 'p', 'h', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4',
+  '_', 'v', 'c', 'm', 'p', 'w', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4',
+  '_', 'v', 'c', 'm', 'p', 'w', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4',
+  '_', 'v', 'c', 'm', 'p', 'w', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
+  '4', '_', 'v', 'r', 'm', 'a', 'x', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
+  'v', 'r', 'm', 'a', 'x', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v',
+  'r', 'm', 'a', 'x', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r',
+  'm', 'a', 'x', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i',
+  'n', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'u',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'u', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '5', '_', 'v', 'a', 'd', 'd', 'h', 'u', 'b', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'A', '6', '_', 'v', 'c', 'm', 'p', 'b', 'e', 'q', '_', 'n', 'o',
+  't', 'a', 'n', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '6', '_', 'v', 'c', 'm', 'p',
+  'b', 'e', 'q', '_', 'n', 'o', 't', 'a', 'n', 'y', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'C', '2', '_', 'a', 'l', 'l', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '2', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'a', 'n',
+  'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'a', 'n', 'y', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'C', '2', '_', 'b', 'i', 't', 's', 'c', 'l', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'C', '2', '_', 'b', 'i', 't', 's', 'c', 'l', 'r', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'C', '2', '_', 'b', 'i', 't', 's', 's', 'e', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '2', '_', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_',
+  'c', 'm', 'p', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm',
+  'p', 'e', 'q', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g',
+  'e', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 'e', 'u',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 't', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '2', '_', 'c', 'm', 'p', 'g', 't', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2',
+  '_', 'c', 'm', 'p', 'g', 't', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c',
+  'm', 'p', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm',
+  'p', 'g', 't', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p',
+  'l', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'l', 't', 'u',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'C', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '2', '_', 'm', 'u', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'm', 'u',
+  'x', 'i', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'm', 'u', 'x', 'i', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'C', '2', '_', 'm', 'u', 'x', 'r', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'C', '2', '_', 'n', 'o', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'o',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'o', 'r', 'n', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '2', '_', 'p', 'x', 'f', 'e', 'r', '_', 'm', 'a', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'C', '2', '_', 't', 'f', 'r', 'p', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2',
+  '_', 't', 'f', 'r', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'v', 'i',
+  't', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'v', 'm', 'u',
+  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'x', 'o', 'r', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '4', '_', 'a', 'n', 'd', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
+  '4', '_', 'a', 'n', 'd', '_', 'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
+  '4', '_', 'a', 'n', 'd', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_',
+  'a', 'n', 'd', '_', 'o', 'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c',
+  'm', 'p', 'l', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p',
+  'l', 't', 'e', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'l',
+  't', 'e', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'l', 't',
+  'e', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'n', 'e',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'n', 'e', 'q', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'C', '4', '_', 'f', 'a', 's', 't', 'c', 'o', 'r', 'n',
+  'e', 'r', '9', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'f', 'a', 's', 't', 'c',
+  'o', 'r', 'n', 'e', 'r', '9', '_', 'n', 'o', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
+  '4', '_', 'n', 'b', 'i', 't', 's', 'c', 'l', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
+  '4', '_', 'n', 'b', 'i', 't', 's', 'c', 'l', 'r', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '4', '_', 'n', 'b', 'i', 't', 's', 's', 'e', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'C', '4', '_', 'o', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4',
+  '_', 'o', 'r', '_', 'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_',
+  'o', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'o', 'r', '_',
+  'o', 'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_',
+  'd', '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v',
+  '_', 'd', '2', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n',
+  'v', '_', 'd', 'f', '2', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o',
+  'n', 'v', '_', 'd', 'f', '2', 'd', '_', 'c', 'h', 'o', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 's', 'f', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 'u',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f',
+  '2', 'u', 'd', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 'u', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F',
+  '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 'u', 'w', '_', 'c', 'h',
+  'o', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd',
+  'f', '2', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_',
+  'd', 'f', '2', 'w', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2',
+  '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F',
+  '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'd', '_', 'c', 'h', 'o',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f',
+  '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_',
+  's', 'f', '2', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n',
+  'v', '_', 's', 'f', '2', 'u', 'd', '_', 'c', 'h', 'o', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'u', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'u',
+  'w', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o',
+  'n', 'v', '_', 's', 'f', '2', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c',
+  'o', 'n', 'v', '_', 's', 'f', '2', 'w', '_', 'c', 'h', 'o', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'u', 'd', '2', 'd', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'u', 'd', '2',
+  's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'u',
+  'w', '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v',
+  '_', 'u', 'w', '2', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o',
+  'n', 'v', '_', 'w', '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c',
+  'o', 'n', 'v', '_', 'w', '2', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  'd', 'f', 'c', 'l', 'a', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd',
+  'f', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f',
+  'c', 'm', 'p', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c',
+  'm', 'p', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c', 'm',
+  'p', 'u', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'i', 'm', 'm',
+  '_', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'i', 'm', 'm', '_',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'a', 'd', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'F', '2', '_', 's', 'f', 'c', 'l', 'a', 's', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'F', '2', '_', 's', 'f', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'F', '2', '_', 's', 'f', 'c', 'm', 'p', 'g', 'e', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F',
+  '2', '_', 's', 'f', 'c', 'm', 'p', 'g', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2',
+  '_', 's', 'f', 'c', 'm', 'p', 'u', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  's', 'f', 'f', 'i', 'x', 'u', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  's', 'f', 'f', 'i', 'x', 'u', 'p', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  's', 'f', 'f', 'i', 'x', 'u', 'p', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  's', 'f', 'f', 'm', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'f',
+  'm', 'a', '_', 'l', 'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f',
+  'f', 'm', 'a', '_', 's', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f',
+  'f', 'm', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'f', 'm', 's',
+  '_', 'l', 'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'i', 'm',
+  'm', '_', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'i', 'm', 'm',
+  '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'm', 'a', 'x', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'F', '2', '_', 's', 'f', 'm', 'i', 'n', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'F', '2', '_', 's', 'f', 'm', 'p', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
+  's', 'f', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'L', '2', '_', 'l', 'o', 'a',
+  'd', 'w', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'L', '4',
+  '_', 'l', 'o', 'a', 'd', 'd', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'a', 'c', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'a', 'c', 'c', 'i', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
+  'a', 'c', 'i', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
+  'a', 'c', 'r', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
+  'a', 'c', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
+  'a', 'c', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
+  'a', 'c', 's', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c',
+  'm', 'a', 'c', 's', 'c', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'c', 'm', 'p', 'y', 'i', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'c', 'm', 'p', 'y', 'r', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'c', 'm', 'p', 'y', 'r', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'c', 'm', 'p', 'y', 'r', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'c', 'm', 'p', 'y', 'r', 's', 'c', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 'r', 's', 'c', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', '_', 's', '0', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', 'c', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', 'c', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', 'c', '_',
+  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', 'c',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm', 'p', 'y',
+  's', 's', '_', 'a', 'c', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'd', 'p', 'm', 'p', 'y', 's', 's', '_', 'n', 'a', 'c', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm', 'p', 'y', 's', 's', '_',
+  'r', 'n', 'd', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p',
+  'm', 'p', 'y', 's', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'd', 'p', 'm', 'p', 'y', 'u', 'u', '_', 'a', 'c', 'c', '_', 's', '0', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm', 'p', 'y', 'u', 'u', '_', 'n',
+  'a', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm',
+  'p', 'y', 'u', 'u', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'h',
+  'm', 'm', 'p', 'y', 'h', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'h', 'm', 'm', 'p', 'y', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'h', 'm', 'm', 'p', 'y', 'l', '_', 'r', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'h', 'm', 'm', 'p', 'y', 'l', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'a', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'a', 'c', 's', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'a', 'c', 's', 'i', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a',
+  'c', 'h', 's', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'm', 'a', 'c', 'h', 's', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'm', 'a', 'c', 'h', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'm', 'a', 'c', 'h', 's', '_', 's', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_', 'r', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_', 'r', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u', 'h',
+  's', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a',
+  'c', 'u', 'h', 's', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'm', 'a', 'c', 'u', 'h', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'm', 'a', 'c', 'u', 'h', 's', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u', 'l', 's', '_', 'r', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u', 'l', 's', '_',
+  'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u',
+  'l', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a',
+  'c', 'u', 'l', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'm', 'p', 'y', 'h', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'm', 'p', 'y', 'h', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'm', 'p', 'y', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'm', 'p', 'y', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'm', 'p', 'y', 'l', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'm', 'p', 'y', 'l', '_', 'r', 's', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'm', 'p', 'y', 'l', '_', 's', '0', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'm', 'p', 'y', 'l', '_', 's', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_', 'r', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_', 'r', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'l',
+  '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y',
+  'u', 'l', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm',
+  'p', 'y', 'u', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'm', 'p', 'y', 'u', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'h', 'h',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a',
+  'c', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'l', 'h',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a',
+  'c', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'l', 'l',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a',
+  'c', 'c', '_', 's', 'a', 't', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't',
+  '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_',
+  's', 'a', 't', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't', '_', 'l', 'h',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a',
+  'c', 'c', '_', 's', 'a', 't', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't',
+  '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't', '_', 'l', 'l', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'h', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'h', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'l',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h',
+  'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_',
+  'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'h', 'h',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n',
+  'a', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'l', 'h',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n',
+  'a', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'l', 'l',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n',
+  'a', 'c', '_', 's', 'a', 't', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't',
+  '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_',
+  's', 'a', 't', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't', '_', 'l', 'h',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n',
+  'a', 'c', '_', 's', 'a', 't', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't',
+  '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't', '_', 'l', 'l', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_',
+  'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 'r', 'n', 'd', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_', 'h', 'l', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_',
+  'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_',
+  'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 'r', 'n', 'd', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'h', 'h', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_',
+  'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_',
+  'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 's', 'a', 't', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'l', 'l', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_',
+  'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'h', 'h', '_', 's', '0', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r',
+  'n', 'd', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'h', 'l', '_',
+  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a',
+  't', '_', 'r', 'n', 'd', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_',
+  'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r',
+  'n', 'd', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'l', 'l', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'u', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'u', 'p', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'u', 'p', '_',
+  's', '1', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'd', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c', '_', 'h', 'h',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_',
+  'a', 'c', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c',
+  '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'd', '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c', '_', 'l', 'l',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_',
+  'a', 'c', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'p', 'y', 'd', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'h', 'h', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'h', 'l', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'h', 'l', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'l',
+  'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
+  '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'd', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', 'd', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a',
+  'c', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'h',
+  'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
+  '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a',
+  'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'h',
+  'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
+  '_', 'r', 'n', 'd', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'h', 'l', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n',
+  'd', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'l',
+  'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
+  '_', 'r', 'n', 'd', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'l', 'l', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 's', 'm', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 's', 'u', '_', 'u', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c',
+  'c', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'h',
+  'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u',
+  '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c',
+  'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'h', 'h', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'h', 'h', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'h',
+  'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u',
+  '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'u', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', 'u', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'u', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'l', 'l', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_',
+  'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  'u', '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'h', 'l', '_',
+  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n',
+  'a', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '0', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_',
+  'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  'u', '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'l', 'l', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'u',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a',
+  'c', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c',
+  'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
+  'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c',
+  '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'u', 'd', '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_',
+  'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  'u', 'd', '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'h', 'h', '_', 's', '0',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'h', 'h',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd',
+  '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'u', 'd', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'm', 'p', 'y', 'u', 'd', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'l', 'h', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'l', 'l',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd',
+  '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
+  'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_',
+  'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
+  'u', 'd', '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'h',
+  'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u',
+  'd', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'l', 'h',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd',
+  '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'l', 'l', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'n', 'a', 'c', 'c', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'n', 'a', 'c', 'c', 'i', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 's', 'u', 'b', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v',
+  'a', 'b', 's', 'd', 'i', 'f', 'f', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'v', 'c', 'm', 'a', 'c', '_', 's', '0', '_', 's', 'a', 't', '_', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c', 'm', 'a', 'c', '_', 's', '0',
+  '_', 's', 'a', 't', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c',
+  'm', 'p', 'y', '_', 's', '0', '_', 's', 'a', 't', '_', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'v', 'c', 'm', 'p', 'y', '_', 's', '0', '_', 's', 'a',
+  't', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c', 'm', 'p', 'y',
+  '_', 's', '1', '_', 's', 'a', 't', '_', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'v', 'c', 'm', 'p', 'y', '_', 's', '1', '_', 's', 'a', 't', '_', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'a', 'c', 's', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'a', 'c', 's', '_',
+  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'p', 'y', 'r',
+  's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'p',
+  'y', 'r', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd',
+  'm', 'p', 'y', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v',
+  'd', 'm', 'p', 'y', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'v', 'm', 'a', 'c', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a',
+  'c', '2', 'e', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a', 'c',
+  '2', 'e', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm',
+  'a', 'c', '2', 'e', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'v', 'm', 'a', 'c', '2', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'v', 'm', 'a', 'c', '2', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'v', 'm', 'a', 'c', '2', 's', 'u', '_', 's', '0', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'v', 'm', 'a', 'c', '2', 's', 'u', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 'e', 's', '_', 's',
+  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 'e', 's',
+  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2',
+  's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y',
+  '2', 's', '_', 's', '0', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
+  '_', 'v', 'm', 'p', 'y', '2', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '2', '_', 'v', 'm', 'p', 'y', '2', 's', '_', 's', '1', 'p', 'a', 'c', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 's', 'u', '_',
+  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 's',
+  'u', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'a', 'd',
+  'd', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'a', 'd', 'd', 'u',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'a', 'c', 'i',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'a',
+  'c', 'i', '_', 's', '0', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r',
+  'c', 'm', 'a', 'c', 'r', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
+  'v', 'r', 'c', 'm', 'a', 'c', 'r', '_', 's', '0', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 'i', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 'i', '_', 's', '0',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 'r',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p',
+  'y', 'r', '_', 's', '0', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r',
+  'c', 'm', 'p', 'y', 's', '_', 'a', 'c', 'c', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 's', '_', 's', '1', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 's', '_', 's',
+  '1', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'm', 'a', 'c',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'm', 'p', 'y',
+  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'x', 'o', 'r', '_', 'x',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'a',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'a', 'n',
+  'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'o', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'x', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '4', '_', 'c', 'm', 'p', 'y', 'i', '_', 'w', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '4', '_', 'c', 'm', 'p', 'y', 'i', '_', 'w', 'h', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'c', 'm', 'p', 'y', 'r', '_', 'w', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'c', 'm', 'p', 'y', 'r', '_', 'w', 'h',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'm', 'a', 'c', '_', 'u', 'p', '_',
+  's', '1', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'm', 'p',
+  'y', 'r', 'i', '_', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_',
+  'm', 'p', 'y', 'r', 'i', '_', 'a', 'd', 'd', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '4', '_', 'm', 'p', 'y', 'r', 'i', '_', 'a', 'd', 'd', 'r', '_', 'u', '2',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'm', 'p', 'y', 'r', 'r', '_', 'a', 'd',
+  'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'm', 'p', 'y', 'r', 'r', '_',
+  'a', 'd', 'd', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'n', 'a', 'c', '_',
+  'u', 'p', '_', 's', '1', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4',
+  '_', 'o', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'o',
+  'r', '_', 'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'o', 'r',
+  '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'o', 'r', '_', 'x', 'o',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'p', 'm', 'p', 'y', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '4', '_', 'p', 'm', 'p', 'y', 'w', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '4', '_', 'v', 'p', 'm', 'p', 'y', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '4', '_', 'v', 'p', 'm', 'p', 'y', 'h', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'e', 'h', '_', 'a',
+  'c', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'v', 'r', 'm',
+  'p', 'y', 'e', 'h', '_', 'a', 'c', 'c', '_', 's', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'e', 'h', '_', 's', '0', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'e', 'h', '_', 's', '1',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'o', 'h', '_',
+  'a', 'c', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'v', 'r',
+  'm', 'p', 'y', 'o', 'h', '_', 'a', 'c', 'c', '_', 's', '1', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'o', 'h', '_', 's', '0', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'o', 'h', '_', 's',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'a', 'n', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'a', 'n', 'd', 'n',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'o', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'x', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '5', '_', 'v', 'd', 'm', 'a', 'c', 'b', 's', 'u', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '5', '_', 'v', 'd', 'm', 'p', 'y', 'b', 's', 'u', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'M', '5', '_', 'v', 'm', 'a', 'c', 'b', 's', 'u', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'M', '5', '_', 'v', 'm', 'a', 'c', 'b', 'u', 'u', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'M', '5', '_', 'v', 'm', 'p', 'y', 'b', 's', 'u', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '5', '_', 'v', 'm', 'p', 'y', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5',
+  '_', 'v', 'r', 'm', 'a', 'c', 'b', 's', 'u', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5',
+  '_', 'v', 'r', 'm', 'a', 'c', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5',
+  '_', 'v', 'r', 'm', 'p', 'y', 'b', 's', 'u', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5',
+  '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '6',
+  '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
+  '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 'd', 'd', 'a', 's', 'l', '_', 'r', 'r', 'r', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'p', '_', 'a',
+  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_',
+  'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l',
+  '_', 'i', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
+  'a', 's', 'l', '_', 'i', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'a', 's', 'l', '_', 'i', '_', 'p', '_', 'x', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r', '_', 'a', 'c',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r',
+  '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_',
+  'i', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
+  's', 'l', '_', 'i', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'a', 's', 'l', '_', 'i', '_', 'r', '_', 's', 'a', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r', '_', 'x', 'a', 'c',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'v',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'v',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'p', '_',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r',
+  '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's',
+  'l', '_', 'r', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'a', 's', 'l', '_', 'r', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'p', '_', 'x', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'r', '_', 'a', 'c',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'r',
+  '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_',
+  'r', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
+  's', 'l', '_', 'r', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'a', 's', 'l', '_', 'r', '_', 'r', '_', 's', 'a', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'v', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'v', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_', 'a',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_',
+  'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r',
+  '_', 'i', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
+  's', 'r', '_', 'i', '_', 'p', '_', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_', 'r', 'n', 'd', '_', 'g',
+  'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'a', 's', 'r', '_', 'i', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
+  'a', 's', 'r', '_', 'i', '_', 'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'r', '_', 'a', 'n', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'r', '_', 'n',
+  'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_',
+  'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_',
+  'i', '_', 'r', '_', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
+  's', 'r', '_', 'i', '_', 'r', '_', 'r', 'n', 'd', '_', 'g', 'o', 'o', 'd',
+  's', 'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's',
+  'r', '_', 'i', '_', 's', 'v', 'w', '_', 't', 'r', 'u', 'n', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'v', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'v', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'p', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'p', '_', 'a',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_',
+  'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r',
+  '_', 'r', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
+  's', 'r', '_', 'r', '_', 'p', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'a', 's', 'r', '_', 'r', '_', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'a', 's', 'r', '_', 'r', '_', 'r', '_', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'r', '_', 'a', 'n', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'r', '_',
+  'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r',
+  '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r',
+  '_', 'r', '_', 'r', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
+  'a', 's', 'r', '_', 'r', '_', 's', 'v', 'w', '_', 't', 'r', 'u', 'n', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'v', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'v', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'b', 'r', 'e', 'v', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'b', 'r', 'e', 'v', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c',
+  'a', 'b', 'a', 'c', 'e', 'n', 'c', 'b', 'i', 'n', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'c', 'l', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', '0',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', '1', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'c', 'l', '1', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c',
+  'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', 'b', 'n', 'o', 'r',
+  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', 'b', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'c', 'l', 'r', 'b', 'i', 't', '_', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'c', 'l', 'r', 'b', 'i', 't', '_', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'c', 't', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c',
+  't', '0', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 't', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'c', 't', '1', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'd', 'e', 'i', 'n', 't', 'e', 'r', 'l', 'e', 'a', 'v', 'e', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'u', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'u', '_', 'r',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c', 't',
+  'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c',
+  't', 'u', 'p', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n',
+  's', 'e', 'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n', 's', 'e',
+  'r', 't', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n', 's',
+  'e', 'r', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n', 's', 'e',
+  'r', 't', 'p', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n',
+  't', 'e', 'r', 'l', 'e', 'a', 'v', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
+  'l', 'f', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_',
+  'r', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r',
+  '_', 'p', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's',
+  'l', '_', 'r', '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'l', 's', 'l', '_', 'r', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'p', '_', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'p', '_', 'x',
+  'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'r',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_',
+  'r', '_', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l',
+  's', 'l', '_', 'r', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'l', 's', 'l', '_', 'r', '_', 'r', '_', 'o', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'v', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'v', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'p', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'p', '_', 'a',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_',
+  'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r',
+  '_', 'i', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l',
+  's', 'r', '_', 'i', '_', 'p', '_', 'x', 'a', 'c', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r', '_', 'a', 'n',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r',
+  '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_',
+  'i', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's',
+  'r', '_', 'i', '_', 'r', '_', 'x', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'l', 's', 'r', '_', 'i', '_', 'v', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'l', 's', 'r', '_', 'i', '_', 'v', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'l', 's', 'r', '_', 'r', '_', 'p', '_', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p', '_', 'a', 'n', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p', '_',
+  'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r',
+  '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r',
+  '_', 'r', '_', 'p', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
+  'l', 's', 'r', '_', 'r', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l',
+  's', 'r', '_', 'r', '_', 'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 'l', 's', 'r', '_', 'r', '_', 'r', '_', 'a', 'n', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'r', '_', 'n', 'a',
+  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'r',
+  '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r',
+  '_', 'v', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r',
+  '_', 'v', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'p', 'a', 'c', 'k', 'h',
+  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'p', 'a', 'r', 'i', 't', 'y', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 's', 'e', 't', 'b', 'i', 't', '_', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 's', 'e', 't', 'b', 'i', 't', '_', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 's', 'h', 'u', 'f', 'f', 'e', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 's', 'h', 'u', 'f', 'f', 'e', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 's', 'h', 'u', 'f', 'f', 'o', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '2', '_', 's', 'h', 'u', 'f', 'f', 'o', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e', 'v', '_', 's', 't', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e', 'v',
+  '_', 's', 't', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'b', 'r', 'e', 'v', '_', 's', 't', 'h', 'h', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e', 'v', '_', 's', 't', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e', 'v', '_',
+  's', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 's', 't', 'o', 'r', 'e',
+  'w', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
+  's', 'v', 's', 'a', 't', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 's',
+  'v', 's', 'a', 't', 'h', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't',
+  'a', 'b', 'l', 'e', 'i', 'd', 'x', 'b', '_', 'g', 'o', 'o', 'd', 's', 'y',
+  'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't', 'a', 'b', 'l',
+  'e', 'i', 'd', 'x', 'd', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a',
+  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't', 'a', 'b', 'l', 'e', 'i', 'd',
+  'x', 'h', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 't', 'a', 'b', 'l', 'e', 'i', 'd', 'x', 'w', '_',
+  'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '2', '_', 't', 'o', 'g', 'g', 'l', 'e', 'b', 'i', 't', '_', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 't', 'o', 'g', 'g', 'l', 'e', 'b', 'i', 't', '_',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't', 's', 't', 'b', 'i', 't', '_',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't', 's', 't', 'b', 'i', 't', '_',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'i',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'r',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 'c', 'n', 'e', 'g', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'v', 'c', 'r', 'o', 't', 'a', 't', 'e', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'v', 'r', 'c', 'n', 'e', 'g', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'v', 'r', 'n', 'd', 'p', 'a', 'c', 'k', 'w', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 'v', 'r', 'n', 'd', 'p', 'a', 'c', 'k',
+  'w', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'h',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'h', 'b', '_',
+  'n', 'o', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's',
+  'a', 't', 'h', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a',
+  't', 'h', 'u', 'b', '_', 'n', 'o', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '2', '_', 'v', 's', 'a', 't', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
+  '_', 'v', 's', 'a', 't', 'w', 'h', '_', 'n', 'o', 'p', 'a', 'c', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'w', 'u', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'w', 'u', 'h', '_', 'n', 'o',
+  'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l',
+  'a', 't', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l',
+  'a', 't', 'r', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l',
+  'i', 'c', 'e', 'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p',
+  'l', 'i', 'c', 'e', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's',
+  'x', 't', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'x', 't',
+  'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'e',
+  'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'e',
+  'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'o',
+  'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'o',
+  'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 'z', 'x', 't', 'b', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '2', '_', 'v', 'z', 'x', 't', 'h', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '4', '_', 'a', 'd', 'd', 'a', 'd', 'd', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '4', '_', 'a', 'd', 'd', 'i', '_', 'a', 's', 'l', '_', 'r', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '4', '_', 'a', 'd', 'd', 'i', '_', 'l', 's', 'r',
+  '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'a', 'n', 'd', 'i', '_',
+  'a', 's', 'l', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'a', 'n',
+  'd', 'i', '_', 'l', 's', 'r', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'c', 'l', 'b', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_',
+  'c', 'l', 'b', 'p', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_',
+  'c', 'l', 'b', 'p', 'n', 'o', 'r', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_',
+  'e', 'x', 't', 'r', 'a', 'c', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'e',
+  'x', 't', 'r', 'a', 'c', 't', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'p', '_', 'r', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '4', '_', 'l', 's', 'l', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_',
+  'n', 't', 's', 't', 'b', 'i', 't', '_', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'n', 't', 's', 't', 'b', 'i', 't', '_', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '4', '_', 'o', 'r', '_', 'a', 'n', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'o', 'r', '_', 'a', 'n', 'd', 'i', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'o', 'r', '_', 'o', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'o',
+  'r', 'i', '_', 'a', 's', 'l', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 'o', 'r', 'i', '_', 'l', 's', 'r', '_', 'r', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '4', '_', 'p', 'a', 'r', 'i', 't', 'y', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
+  '_', 's', 't', 'o', 'r', 'e', 'd', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '4', '_', 's', 'u', 'b', 'a', 'd', 'd', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '4', '_', 's', 'u', 'b', 'i', '_', 'a', 's', 'l', '_', 'r',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'S', '4', '_', 's', 'u', 'b', 'i', '_', 'l', 's',
+  'r', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'v', 'r', 'c', 'r',
+  'o', 't', 'a', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'v', 'r', 'c',
+  'r', 'o', 't', 'a', 't', 'e', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
+  '4', '_', 'v', 'x', 'a', 'd', 'd', 's', 'u', 'b', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '4', '_', 'v', 'x', 'a', 'd', 'd', 's', 'u', 'b', 'h', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '4', '_', 'v', 'x', 'a', 'd', 'd', 's', 'u', 'b', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '4', '_', 'v', 'x', 's', 'u', 'b', 'a', 'd', 'd', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'S', '4', '_', 'v', 'x', 's', 'u', 'b', 'a', 'd', 'd',
+  'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'v', 'x', 's', 'u', 'b', 'a',
+  'd', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_', 'a', 's', 'r', 'h', 'u',
+  'b', '_', 'r', 'n', 'd', '_', 's', 'a', 't', '_', 'g', 'o', 'o', 'd', 's',
+  'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_', 'a', 's', 'r',
+  'h', 'u', 'b', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_', 'p',
+  'o', 'p', 'c', 'o', 'u', 'n', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_',
+  'v', 'a', 's', 'r', 'h', 'r', 'n', 'd', '_', 'g', 'o', 'o', 'd', 's', 'y',
+  'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_',
+  'i', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i',
+  '_', 'p', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o',
+  'l', '_', 'i', '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6',
+  '_', 'r', 'o', 'l', '_', 'i', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'p', '_', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'p', '_', 'x',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i',
+  '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_',
+  'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l',
+  '_', 'i', '_', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_',
+  'r', 'o', 'l', '_', 'i', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'r', '_', 'o', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'r', '_', 'x', 'a',
+  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'v', 's', 'p', 'l', 'a', 't',
+  'r', 'b', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'v', 't', 'r', 'u', 'n',
+  'e', 'h', 'b', '_', 'p', 'p', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'v',
+  't', 'r', 'u', 'n', 'o', 'h', 'b', '_', 'p', 'p', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'w', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'h', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'h', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'l', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'o', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'v', 's', 'p', 'l', 'a',
+  't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'v', 's', 'p', 'l', 'a',
+  't', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l',
+  'v', 's', 'p', 'l', 'a', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l',
+  'v', 's', 'p', 'l', 'a', 't', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'l', 'v', 's', 'p', 'l', 'a', 't', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'l', 'v', 's', 'p', 'l', 'a', 't', 'w', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'a',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'a',
+  'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p',
+  'r', 'e', 'd', '_', 'a', 'n', 'd', '_', 'n', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'p', 'r', 'e', 'd', '_', 'a', 'n', 'd', '_', 'n', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'n', 'o',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'n', 'o',
+  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r',
+  'e', 'd', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e',
+  'd', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'p', 'r', 'e', 'd', '_', 'o', 'r', '_', 'n', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'p', 'r', 'e', 'd', '_', 'o', 'r', '_', 'n', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 's', 'c',
+  'a', 'l', 'a', 'r', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e',
+  'd', '_', 's', 'c', 'a', 'l', 'a', 'r', '2', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 's', 'c', 'a', 'l',
+  'a', 'r', '2', 'v', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e',
+  'd', '_', 's', 'c', 'a', 'l', 'a', 'r', '2', 'v', '2', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'x', 'o',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'x', 'o',
+  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 's', 'h',
+  'u', 'f', 'f', 'e', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 's', 'h',
+  'u', 'f', 'f', 'e', 'q', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 's', 'h', 'u', 'f', 'f', 'e', 'q', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 's', 'h', 'u', 'f', 'f', 'e', 'q', 'w', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'n',
+  'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'S', '3', '2', 'b', '_', 'n', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3',
+  '2', 'b', '_', 'n', 't', '_', 'n', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'n', 't',
+  '_', 'n', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'n', 't',
+  '_', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'S', '3', '2', 'b', '_', 'n', 't', '_', 'q', 'p', 'r', 'e', 'd',
+  '_', 'a', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'S', '3', '2', 'b', '_', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'q', 'p', 'r',
+  'e', 'd', '_', 'a', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'b', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 'b', 's', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'b', 's', 'b', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'b', 's', 'b', '_', 's', 'a', 't', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f',
+  'f', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i',
+  'f', 'f', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'b', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i',
+  'f', 'f', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's',
+  'd', 'i', 'f', 'f', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'w', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'h', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'h', '_',
+  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'h',
+  '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'b', 's', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'b', 's', 'w', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'b', 's', 'w', '_', 's', 'a', 't', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '_', 'd', 'v', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '_', 'd', 'v', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
+  'b', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b',
+  'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 'd', 'd', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
+  'd', 'b', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'd', 'd', 'b', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'd', 'd', 'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', 's', 'a', 't', '_', 'd',
+  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', 's', 'a',
+  't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'v', '6',
+  '_', 'v', 'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'v',
+  '6', '_', 'v', 'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'c', 'l', 'b',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'c', 'l', 'b',
+  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'c', 'l', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'c', 'l', 'b', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 'd', 'd', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'd', 'd', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'd', 'd', 'h', '_', 'd', 'v', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'd', 'd', 'h', '_', 'd', 'v', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 'n', 'q', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 'n', 'q', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 'q', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
+  'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
+  'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'd', 'd', 'h', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 's', 'a', 't', '_', 'd', 'v',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
+  'd', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h',
+  'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'h', 'w', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'd', 'd', 'h', 'w', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 'h', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u',
+  'b', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'u', 'b', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', 'a', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', 'a',
+  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'u', 'b', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', 'a', 't', '_', 'd', 'v', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
+  'u', 'b', 'u', 'b', 'b', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'd', 'd', 'u', 'b', 'u', 'b', 'b', '_', 's', 'a', 't', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
+  'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
+  'd', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', 'a', 't', '_', 'd', 'v',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', 'a',
+  't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'd', 'd', 'u', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'd', 'd', 'u', 'h', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 'w', '_', 'a', 'c', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 'w', '_',
+  'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'd', 'd', 'u', 'w', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'd', 'd', 'u', 'w', 's', 'a', 't', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'w', 's', 'a',
+  't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
+  'u', 'w', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'd', 'd', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', '_', 'd', 'v', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', '_', 'd', 'v', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 'n',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 'n', 'q',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
+  'd', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w',
+  'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'w', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'd', 'd', 'w', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 's', 'a', 't', '_', 'd', 'v', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 's', 'a', 't', '_',
+  'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 'l', 'i', 'g', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'l', 'i', 'g', 'n', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'b', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'b', 'i', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'n', 'q', 'r', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'n', 'q', 'r', 't', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'n', 'q',
+  'r', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'n', 'd', 'n', 'q', 'r', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'q', 'r', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'q', 'r', 't', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'q',
+  'r', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'n', 'd', 'q', 'r', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v', 'n', 'q', 'v', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v', 'n', 'q', 'v', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd',
+  'v', 'q', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v',
+  'q', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 'n', 'd', 'v', 'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'n', 'd', 'v', 'r', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'n', 'd', 'v', 'r', 't', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v', 'r', 't', '_', 'a', 'c',
+  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  's', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's',
+  'l', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  's', 'l', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'h', 'v', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 's', 'l', 'h', 'v', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 's', 'l', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w', '_', 'a', 'c', 'c', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l',
+  'w', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w', 'v',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's',
+  'r', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
+  'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's',
+  'r', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'b', 'r', 'n', 'd', 's', 'a', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'b', 'r', 'n',
+  'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 's', 'r', 'h', 'b', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 's', 'r', 'h', 'b', 's', 'a', 't', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'u', 'b',
+  'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  's', 'r', 'h', 'u', 'b', 'r', 'n', 'd', 's', 'a', 't', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'u', 'b',
+  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h',
+  'u', 'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 's', 'r', 'h', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 's', 'r', 'h', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'h', 'u', 'b', 'r', 'n', 'd', 's',
+  'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'h',
+  'u', 'b', 'r', 'n', 'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'h', 'u', 'b', 's', 'a',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'h', 'u',
+  'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 's', 'r', 'u', 'w', 'u', 'h', 'r', 'n', 'd', 's', 'a', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'w', 'u', 'h',
+  'r', 'n', 'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'w', 'u', 'h', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'w', 'u', 'h', 's',
+  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 's', 'r', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
+  'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  's', 'r', 'w', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 's', 'r', 'w', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', 'r', 'n', 'd',
+  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w',
+  'h', 'r', 'n', 'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', 's', 'a', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', 's', 'a', 't', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
+  'w', 'u', 'h', 'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 's', 'r', 'w', 'u', 'h', 'r', 'n', 'd', 's', 'a', 't', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
+  'w', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  's', 'r', 'w', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'v', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g', 'n', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g', 'n', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g',
+  'n', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g',
+  'n', 'p', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'a', 'v', 'g', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g',
+  'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'v', 'g', 'b', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'v', 'g', 'b', 'r', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'v', 'g', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'v', 'g', 'h', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'v', 'g', 'h', 'r', 'n', 'd', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'b', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'b', 'r',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'b',
+  'r', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'v', 'g', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'v', 'g', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'v', 'g', 'u', 'h', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'a', 'v', 'g', 'u', 'h', 'r', 'n', 'd', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'w', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'w',
+  'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u',
+  'w', 'r', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'a', 'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
+  'v', 'g', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'v', 'g', 'w', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'a', 'v', 'g', 'w', 'r', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'c', 'l', '0', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'c', 'l', '0', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'c', 'l', '0', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'c', 'l', '0', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'c', 'o', 'm', 'b', 'i', 'n', 'e', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'c', 'o', 'm', 'b', 'i', 'n', 'e', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', '0', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', '0', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', 'd', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'd', 'd', '0', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'd', 'e', 'a', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
+  'e', 'a', 'l', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'd', 'e', 'a', 'l', 'b', '4', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'd', 'e', 'a', 'l', 'b', '4', 'w', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'h', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'v', 'd', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'v', 'd', 'd',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e',
+  'l', 't', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'l', 't',
+  'a', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
+  'm', 'p', 'y', 'b', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
+  'm', 'p', 'y', 'b', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'a', 'c', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's',
+  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'd', 'v', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'd', 'v',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm',
+  'p', 'y', 'b', 'u', 's', '_', 'd', 'v', '_', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'd', 'v',
+  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'a', 'c', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '_',
+  'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'd', 'v', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'd', 'v', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h',
+  'b', '_', 'd', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'd', 'v', '_', 'a', 'c', 'c', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p',
+  'y', 'h', 'i', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
+  'm', 'p', 'y', 'h', 'i', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'i', 's', 'a', 't',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p',
+  'y', 'h', 'i', 's', 'a', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'a',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's',
+  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'd', 'm', 'p', 'y', 'h', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'a', 't', '_', 'a',
+  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'd', 'm', 'p', 'y', 'h', 's', 'u', 'i', 's', 'a', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 'i', 's', 'a', 't',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm',
+  'p', 'y', 'h', 's', 'u', 'i', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 'i', 's',
+  'a', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 's',
+  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'd', 'm', 'p', 'y', 'h', 's', 'u', 's', 'a', 't', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 's',
+  'a', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'v', 's', 'a', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'v', 's', 'a', 't',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm',
+  'p', 'y', 'h', 'v', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'v', 's', 'a', 't', '_', 'a',
+  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'd', 's', 'a', 'd', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
+  's', 'a', 'd', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'd', 's', 'a', 'd', 'u', 'h', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'd', 's', 'a', 'd', 'u', 'h', '_', 'a', 'c',
+  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e',
+  'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b', '_',
+  'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b', '_',
+  'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'e', 'q', 'b', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'e', 'q', 'b', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'e', 'q', 'b', '_', 'x', 'o', 'r', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'e', 'q', 'b', '_', 'x', 'o', 'r', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'a', 'n', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'a', 'n', 'd', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_',
+  'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'o',
+  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e',
+  'q', 'h', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e',
+  'q', 'h', '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'e', 'q', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'e', 'q', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'e', 'q', 'w', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'e', 'q', 'w', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'o', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'o', 'r', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'x', 'o',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'x', 'o',
+  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
+  'a', 't', 'h', 'e', 'r', 'm', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', 'q',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm',
+  'h', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', 'w', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r',
+  'm', 'h', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't',
+  'h', 'e', 'r', 'm', 'h', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'w', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't',
+  'h', 'e', 'r', 'm', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
+  'a', 't', 'h', 'e', 'r', 'm', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'g', 't', 'b', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'g', 't', 'b', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_', 'o', 'r', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_',
+  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_',
+  'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'g', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
+  'h', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
+  'h', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'g', 't', 'h', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'h', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', '_', 'x', 'o', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', '_', 'x', 'o', 'r', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b', '_',
+  'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b',
+  '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'u', 'b', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'u', 'b', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b', '_', 'x', 'o', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b', '_', 'x', 'o',
+  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
+  't', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
+  'u', 'h', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
+  't', 'u', 'h', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h', '_', 'o', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h', '_', 'o', 'r', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h', '_',
+  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h',
+  '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
+  't', 'u', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'g', 't', 'u', 'w', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'u', 'w', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', 'o', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', 'o', 'r',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
+  'u', 'w', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
+  't', 'u', 'w', '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'g', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'g', 't', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'w', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'g', 't', 'w', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', 'o', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', 'o', 'r', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', 'x',
+  'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', 'x',
+  'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'i', 'n', 's', 'e', 'r', 't', 'w', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'i', 'n', 's', 'e', 'r', 't', 'w', 'r', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'a', 'l', 'i', 'g', 'n', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'a', 'l', 'i', 'g', 'n', 'b', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'a', 'l',
+  'i', 'g', 'n', 'b', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'a',
+  'l', 'i', 'g', 'n', 'b', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'l', 's', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'l', 's', 'r', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'l', 's', 'r', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'l', 's', 'r', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'l', 's', 'r', 'h', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'l', 's', 'r', 'h', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'l', 's', 'r', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'l', 's', 'r', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'l', 's', 'r', 'w', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'l', 's', 'r', 'w', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'l', 'u', 't', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'l', 'u', 't', '4', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'l', 'u', 't', 'v', 'v', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_', 'n', 'm', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_', 'n',
+  'm', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l',
+  'u', 't', 'v', 'v', 'b', '_', 'o', 'r', 'a', 'c', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_', 'o', 'r', 'a', 'c',
+  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l',
+  'u', 't', 'v', 'v', 'b', '_', 'o', 'r', 'a', 'c', 'c', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_', 'o', 'r', 'a',
+  'c', 'c', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'l', 'u', 't', 'v', 'v', 'b', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'l', 'u', 't', 'v', 'v', 'b', 'i', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h',
+  '_', 'n', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v',
+  'w', 'h', '_', 'n', 'm', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', '_', 'o', 'r', 'a', 'c', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', '_',
+  'o', 'r', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', '_', 'o', 'r', 'a', 'c', 'c',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h',
+  '_', 'o', 'r', 'a', 'c', 'c', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', 'i', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 's', 'k', 'e',
+  'd', 's', 't', 'o', 'r', 'e', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'm', 'a', 's', 'k', 'e', 'd', 's', 't', 'o', 'r', 'e', 'n', 'q', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 's',
+  'k', 'e', 'd', 's', 't', 'o', 'r', 'e', 'n', 't', 'n', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'a', 's', 'k', 'e', 'd', 's', 't', 'o', 'r',
+  'e', 'n', 't', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'a', 's', 'k', 'e', 'd', 's', 't', 'o', 'r', 'e', 'n',
+  't', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 's', 'k', 'e',
+  'd', 's', 't', 'o', 'r', 'e', 'n', 't', 'q', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 's', 'k', 'e', 'd', 's', 't',
+  'o', 'r', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 's',
+  'k', 'e', 'd', 's', 't', 'o', 'r', 'e', 'q', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'a', 'x', 'b', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'a', 'x', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'a', 'x', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'h', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'a', 'x', 'w', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'i', 'n', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'i', 'n', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'i', 'n', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'i', 'n', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'i', 'n', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'i', 'n', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'i', 'n', 'u', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'i', 'n', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'i', 'n', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'i', 'n', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', '_',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b',
+  'u', 's', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', 'v', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a',
+  'b', 'u', 'u', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'a', 'b', 'u', 'u', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', 'v',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', 'v',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'a', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h',
+  'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'a', 'h', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'm', 'p', 'a', 'h', 'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h', 'h', 's', 'a',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h', 'h', 's',
+  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'a', 'u', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'a', 'u', 'h', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'a', 'u', 'h', 'b', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'u', 'h', 'b', '_', 'a', 'c',
+  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'a', 'u', 'h', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'a', 'u', 'h', 'u', 'h', 's', 'a', 't', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 's', 'u', 'h',
+  'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  's', 'u', 'h', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'b', 'u', 's', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', 'v', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', 'v', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u',
+  's', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'y', 'b', 'u', 's', 'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'v', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'v', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'v', '_',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b',
+  'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'e', 'w', 'u', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'e', 'w', 'u', 'h', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'e', 'w', 'u', 'h',
+  '_', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'e',
+  'w', 'u', 'h', '_', '6', '4', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'y', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'm', 'p', 'y', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'y', 'h', '_', 'a', 'c', 'c', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 's',
+  'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'y', 'h', 's', 'a', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 's', 'r', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 's', 'r', 's',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'y', 'h', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'h', 's', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'm', 'p', 'y', 'h', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'y', 'h', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 'u', 's', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 'u', 's', '_', 'a',
+  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'y', 'h', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'y', 'h', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'm', 'p', 'y', 'h', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'h', 'v', '_', 'a', 'c', 'c', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 'v',
+  's', 'r', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h',
+  'v', 's', 'r', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'y', 'i', 'e', 'o', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'y', 'i', 'e', 'o', 'h', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'h', '_',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i',
+  'e', 'w', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'u', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'u', 'h', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'i', 'e', 'w', 'u', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'u', 'h', '_', 'a', 'c', 'c', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'i', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'y', 'i', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'y', 'i', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'h', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'h', 'b', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'h',
+  'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'y', 'i', 'h', 'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'o', 'w', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'o', 'w', 'h', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i',
+  'w', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w',
+  'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'y', 'i', 'w', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'y', 'i', 'w', 'b', '_', 'a', 'c', 'c', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'y', 'i', 'w', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'm', 'p', 'y', 'i', 'w', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'u',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'u',
+  'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'y', 'i', 'w', 'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'u', 'b', '_', 'a', 'c', 'c', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'o', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o',
+  'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'y', 'o', 'w', 'h', '_', '6', '4', '_', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', '6', '4',
+  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 'r', 'n', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 'r', 'n', 'd',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
+  'y', 'o', 'w', 'h', '_', 'r', 'n', 'd', '_', 's', 'a', 'c', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 'r', 'n',
+  'd', '_', 's', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 's', 'a', 'c', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_',
+  's', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'm', 'p', 'y', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'y', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', '_', 'a', 'c', 'c', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'u', 'b', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u',
+  'b', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'p', 'y', 'u', 'b', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', 'v', '_', 'a', 'c', 'c', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'u', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
+  'p', 'y', 'u', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'e', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'e', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'e',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
+  'u', 'h', 'e', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', '_',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u',
+  'h', 'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'm', 'u', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'm', 'u', 'x', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'n', 'a', 'v', 'g', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n',
+  'a', 'v', 'g', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'n', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'n', 'a', 'v', 'g', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'n', 'a', 'v', 'g', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'n', 'a', 'v', 'g', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'w', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'o', 'r', 'm', 'a', 'm', 't', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'o', 'r', 'm', 'a', 'm', 't',
+  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n',
+  'o', 'r', 'm', 'a', 'm', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'n', 'o', 'r', 'm', 'a', 'm', 't', 'w', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'n', 'o', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'n', 'o', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'o', 'r',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a',
+  'c', 'k', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c',
+  'k', 'e', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'p', 'a', 'c', 'k', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'p', 'a', 'c', 'k', 'e', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'h', 'b', '_', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'h', 'b', '_', 's',
+  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'p', 'a', 'c', 'k', 'h', 'u', 'b', '_', 's', 'a', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'h', 'u', 'b', '_', 's', 'a', 't',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a',
+  'c', 'k', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c',
+  'k', 'o', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'p', 'a', 'c', 'k', 'o', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'p', 'a', 'c', 'k', 'o', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'w', 'h', '_', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'w', 'h', '_', 's',
+  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'p', 'a', 'c', 'k', 'w', 'u', 'h', '_', 's', 'a', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'w', 'u', 'h', '_', 's', 'a', 't',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'o',
+  'p', 'c', 'o', 'u', 'n', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'p', 'o', 'p', 'c', 'o', 'u', 'n', 't', 'h', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'r', 'e', 'f', 'i', 'x', 'q', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'r', 'e', 'f', 'i', 'x', 'q',
+  'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p',
+  'r', 'e', 'f', 'i', 'x', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'p', 'r', 'e', 'f', 'i', 'x', 'q', 'h', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'p', 'r', 'e', 'f', 'i', 'x', 'q', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'r', 'e', 'f', 'i', 'x', 'q', 'w',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'd',
+  'e', 'l', 't', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'd', 'e',
+  'l', 't', 'a', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'r', 'm', 'p', 'y', 'b', 'u', 'b', '_', 'r', 't', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 'b', '_', 'r', 't',
+  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  'm', 'p', 'y', 'b', 'u', 'b', '_', 'r', 't', 't', '_', 'a', 'c', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 'b', '_',
+  'r', 't', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b',
+  'u', 's', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  'm', 'p', 'y', 'b', 'u', 's', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u',
+  's', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'r', 'm', 'p', 'y', 'b', 'u', 's', 'i', '_', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'i', '_', 'a',
+  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'r', 'm', 'p', 'y', 'b', 'u', 's', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'v', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'v',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
+  'y', 'b', 'u', 's', 'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'v', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'v', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b',
+  'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm',
+  'p', 'y', 'b', 'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
+  'y', 'u', 'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '_', 'r', 't', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '_',
+  'r', 't', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'r', 'm', 'p', 'y', 'u', 'b', '_', 'r', 't', 't', '_', 'a', 'c', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '_',
+  'r', 't', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'i', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u',
+  'b', 'i', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  'm', 'p', 'y', 'u', 'b', 'i', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'v',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'v',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm',
+  'p', 'y', 'u', 'b', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'v', '_', 'a', 'c', 'c', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'r', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'h', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'h', 'b',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o',
+  'u', 'n', 'd', 'h', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  'o', 'u', 'n', 'd', 'h', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'u', 'h', 'u', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'u', 'h', 'u',
+  'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  'o', 'u', 'n', 'd', 'u', 'w', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'r', 'o', 'u', 'n', 'd', 'u', 'w', 'u', 'h', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'w', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'w', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o',
+  'u', 'n', 'd', 'w', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  'o', 'u', 'n', 'd', 'w', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 's', 'a', 'd', 'u', 'b', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'r', 's', 'a', 'd', 'u', 'b', 'i', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 's', 'a', 'd', 'u',
+  'b', 'i', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
+  's', 'a', 'd', 'u', 'b', 'i', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'a', 't', 'h', 'u', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'a', 't', 'h', 'u', 'b', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'a', 't', 'u',
+  'w', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'a', 't', 'u',
+  'w', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 's', 'a', 't', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'a', 't', 'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'b', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a',
+  't', 't', 'e', 'r', 'm', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'c', 'a', 't', 't', 'e', 'r', 'm', 'h', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h',
+  '_', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a',
+  't', 't', 'e', 'r', 'm', 'h', '_', 'a', 'd', 'd', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r',
+  'm', 'h', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't',
+  't', 'e', 'r', 'm', 'h', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm',
+  'h', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'w', '_', 'a', 'd', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm',
+  'h', 'w', '_', 'a', 'd', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'w', 'q',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r',
+  'm', 'h', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'w', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't',
+  't', 'e', 'r', 'm', 'w', '_', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'w', '_', 'a', 'd', 'd',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c',
+  'a', 't', 't', 'e', 'r', 'm', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'w', 'q', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'e', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'h', 'u', 'f', 'e', 'h', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'b', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f',
+  'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f',
+  'e', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  's', 'h', 'u', 'f', 'f', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'h', 'u', 'f', 'f', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'o', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'o', 'b', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'v', 'd',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'v',
+  'd', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  's', 'h', 'u', 'f', 'o', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  's', 'h', 'u', 'f', 'o', 'e', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'o', 'e', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'o', 'e', 'h', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'o',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'o', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
+  'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b',
+  'b', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b',
+  'b', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'u', 'b', 'b', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 's', 'u', 'b', 'b', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 'q', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'u', 'b', 'b', 'q', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 's', 'a', 't', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 's', 'a', 't', '_', '1',
+  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b',
+  's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'u', 'b', 'b', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'v', '6', '_', 'v', 's', 'u', 'b', 'c', 'a', 'r', 'r', 'y',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'v', '6', '_', 'v', 's', 'u', 'b', 'c', 'a', 'r', 'r',
+  'y', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
+  'b', 'h', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
+  'b', 'h', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'u', 'b', 'h', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'u', 'b', 'h', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', 'q', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'u', 'b', 'h', 'q', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', 's', 'a', 't', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b',
+  'h', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  's', 'u', 'b', 'h', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', 'w', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 'h', '_',
+  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b',
+  'u', 'b', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
+  'b', 'u', 'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 's', 'a', 't', '_', 'd', 'v',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 's', 'a',
+  't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'u', 'b', 'u', 'b', 'u', 'b', 'b', '_', 's', 'a', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 'u', 'b', 'b',
+  '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'u', 'b', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 's',
+  'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
+  'b', 'u', 'h', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 'w', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'w',
+  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u',
+  'w', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
+  '_', 'v', 's', 'u', 'b', 'u', 'w', 's', 'a', 't', '_', 'd', 'v', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'w', 's', 'a', 't', '_',
+  'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  's', 'u', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b',
+  'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'u', 'b', 'w', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
+  'u', 'b', 'w', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
+  'V', '6', '_', 'v', 's', 'u', 'b', 'w', 'n', 'q', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 's', 'u', 'b', 'w', 'n', 'q', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', 'q', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', 's', 'a', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', 's', 'a', 't',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
+  'b', 'w', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 's', 'u', 'b', 'w', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'w', 'a', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 's', 'w', 'a', 'p', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', '_',
+  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
+  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y',
+  'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 't', 'm', 'p', 'y', 'b', 'u', 's', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
+  '6', '_', 'v', 't', 'm', 'p', 'y', 'b', 'u', 's', '_', '1', '2', '8', 'B',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', 'u', 's',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p',
+  'y', 'b', 'u', 's', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'h', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'h', 'b', '_', '1', '2', '8',
+  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'h', 'b',
+  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p',
+  'y', 'h', 'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'b', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c',
+  'k', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c',
+  'k', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'u', 'n', 'p', 'a', 'c', 'k', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
+  'v', 'u', 'n', 'p', 'a', 'c', 'k', 'o', 'b', '_', '1', '2', '8', 'B', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'o', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
+  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'o',
+  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u',
+  'n', 'p', 'a', 'c', 'k', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'u', 'n', 'p', 'a', 'c', 'k', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
+  'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'u', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
+  'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'u', 'h',
+  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'x', 'o',
+  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
+  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'x', 'o', 'r', '_', '1', '2',
+  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'z', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'z', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
+  '_', 'V', '6', '_', 'v', 'z', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
+  'z', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd',
+  'c', 'c', 'l', 'e', 'a', 'n', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd',
+  'c', 'c', 'l', 'e', 'a', 'n', 'i', 'n', 'v', 'a', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y',
+  '2', '_', 'd', 'c', 'i', 'n', 'v', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_',
+  'd', 'c', 'z', 'e', 'r', 'o', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '4', '_', 'l',
+  '2', 'f', 'e', 't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '5', '_', 'l', '2',
+  'f', 'e', 't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'c', 'i', 'r', 'c', '_', 'l', 'd', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l', 'd', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l',
+  'd', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i',
+  'r', 'c', '_', 'l', 'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l', 'd', 'u', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l', 'd',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r',
+  'c', '_', 's', 't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'c', 'i', 'r', 'c', '_', 's', 't', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 's', 't', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 's',
+  't', 'h', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'c', 'i', 'r', 'c', '_', 's', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', '_', 'm', 'm', '2', '5', '6', 'i', '_', 'v', 'a', 'd',
+  'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
+  'X', 'A', 'G', 'O', 'N', '_', 'p', 'r', 'e', 'f', 'e', 't', 'c', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'a', 'b', 's', 'q', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'b', 's', 'q', '_',
+  's', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'a', 'b', 's', 'q', '_', 's', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd',
+  'd', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'a', 'd', 'd', '_', 'a', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd',
+  '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'a', 'd', 'd', '_', 'a', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd',
+  'q', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'q', '_', 's', '_', 'p', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'a', 'd', 'd', 'q', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'q', 'h', '_',
+  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'a', 'd', 'd', 'q', 'h', '_', 'r', '_', 'p', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a',
+  'd', 'd', 'q', 'h', '_', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'q', 'h', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'a', 'd', 'd', 's', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 'a',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'a', 'd', 'd', 's', '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_',
+  'a', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'a', 'd', 'd', 's', '_', 's', '_', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's',
+  '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 's', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd',
+  's', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 'u', '_', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd',
+  'd', 's', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 'u', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a',
+  'd', 'd', 's', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 's', 'c', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a',
+  'd', 'd', 'u', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'u', '_', 'q', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'a', 'd', 'd', 'u', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'u', '_',
+  's', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'u', 'h', '_', 'q', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a',
+  'd', 'd', 'u', 'h', '_', 'r', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', '_', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'a', 'd', 'd', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd',
+  'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'a', 'd', 'd', 'v', 'i', '_', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', 'i',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'a', 'd', 'd', 'v', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', 'i', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 'a', 'd', 'd', 'w', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'n', 'd', '_', 'v', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'n', 'd',
+  'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  'i', 'p', 's', '_', 'a', 'p', 'p', 'e', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_',
+  's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'a', 's', 'u', 'b', '_', 's', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b',
+  '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_', 's', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u',
+  'b', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_', 'u', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's',
+  'u', 'b', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_', 'u', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a',
+  'v', 'e', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', '_', 's', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v',
+  'e', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'a', 'v', 'e', '_', 's', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e',
+  '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'a', 'v', 'e', '_', 'u', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', '_',
+  'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'a', 'v', 'e', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_',
+  's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'a', 'v', 'e', 'r', '_', 's', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r',
+  '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_', 's', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e',
+  'r', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_', 'u', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v',
+  'e', 'r', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_', 'u', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'b', 'a', 'l', 'i', 'g', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l',
+  'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'b', 'c', 'l', 'r', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'b', 'c', 'l', 'r', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', 'i', '_', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
+  'c', 'l', 'r', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', 'i', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i',
+  'n', 's', 'l', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
+  's', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's',
+  'l', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', 'i', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
+  's', 'l', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', 'i', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i',
+  'n', 's', 'r', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
+  's', 'r', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's',
+  'r', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', 'i', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
+  's', 'r', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', 'i', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'b',
+  'i', 't', 'r', 'e', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 'm', 'n', 'z', '_', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'm', 'n', 'z',
+  'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'b', 'm', 'z', '_', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'm', 'z', 'i', '_', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
+  'n', 'e', 'g', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'b', 'n', 'e', 'g', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g', 'i', '_', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'b', 'n', 'e', 'g', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g', 'i', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
+  'n', 'e', 'g', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'b', 'n', 'z', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 'b', 'p', 'o', 's', 'g', 'e', '3', '2', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 'l', '_',
+  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'b', 's', 'e', 'l', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', '_', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
+  's', 'e', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'b', 's', 'e', 't', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', 'i', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'b', 's', 'e', 't', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', 'i', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'b', 'z', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'b', 'z', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'z', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'z', '_',
+  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'b', 'z', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'c', 'e', 'q', '_', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e', 'q', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'c', 'e', 'q', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'c', 'e', 'q', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e', 'q', 'i', '_',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'c', 'e', 'q', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e', 'q', 'i', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e',
+  'q', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'c', 'f', 'c', 'm', 's', 'a', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 's',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'c', 'l', 'e', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 's', '_',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'c', 'l', 'e', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 'u', '_', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'c', 'l', 'e', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 'u', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
+  'l', 'e', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 's', '_', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
+  'l', 'e', 'i', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 's', '_', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'c', 'l', 'e', 'i', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 'u', '_',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'c', 'l', 'e', 'i', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 'u',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'c', 'l', 'e', 'i', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 's',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'c', 'l', 't', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 's', '_',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'c', 'l', 't', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 'u', '_', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'c', 'l', 't', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 'u', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
+  'l', 't', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 's', '_', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
+  'l', 't', 'i', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 's', '_', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'c', 'l', 't', 'i', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 'u', '_',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'c', 'l', 't', 'i', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 'u',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'c', 'l', 't', 'i', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', '_',
+  'e', 'q', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', '_', 'l', 'e', '_', 'p', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'c', 'm', 'p', '_', 'l', 't', '_', 'p', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g',
+  'd', 'u', '_', 'e', 'q', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g', 'd', 'u',
+  '_', 'l', 'e', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g', 'd', 'u', '_', 'l',
+  't', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g', 'u', '_', 'e', 'q', '_', 'q',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 'c', 'm', 'p', 'g', 'u', '_', 'l', 'e', '_', 'q', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c',
+  'm', 'p', 'g', 'u', '_', 'l', 't', '_', 'q', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'u',
+  '_', 'e', 'q', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'u', '_', 'l', 'e', '_',
+  'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'c', 'm', 'p', 'u', '_', 'l', 't', '_', 'q', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o',
+  'p', 'y', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 's', '_', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
+  'o', 'p', 'y', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 's', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'c', 'o', 'p', 'y', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 'u', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'c', 'o', 'p', 'y', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 'u',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'c', 't', 'c', 'm', 's', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 's', '_', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'd', 'i', 'v', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 's', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd',
+  'i', 'v', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 'u', '_', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i',
+  'v', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 'u', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v',
+  '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'd', 'l', 's', 'a', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'o', 't', 'p', '_', 's',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'd', 'o', 't', 'p', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'o', 't', 'p', '_',
+  's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'd', 'o', 't', 'p', '_', 'u', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'o', 't', 'p',
+  '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'd', 'o', 't', 'p', '_', 'u', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p',
+  'a', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_', 's', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'd', 'p', 'a', 'd', 'd', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_',
+  's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_', 'u', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a',
+  'd', 'd', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_', 'u', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'd', 'p', 'a', 'q', '_', 's', '_', 'w', '_', 'p', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p',
+  'a', 'q', '_', 's', 'a', '_', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 'a', 'q', 'x',
+  '_', 's', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 'a', 'q', 'x', '_', 's',
+  'a', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 'a', 'u', '_', 'h', '_', 'q',
+  'b', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'd', 'p', 'a', 'u', '_', 'h', '_', 'q', 'b', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd',
+  'p', 'a', 'x', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', '_', 'w', '_',
+  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'd', 'p', 's', 'q', '_', 's', '_', 'w', '_', 'p', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'd', 'p', 's', 'q', '_', 's', 'a', '_', 'l', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's',
+  'q', 'x', '_', 's', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'q', 'x',
+  '_', 's', 'a', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'u', '_', 'h',
+  '_', 'q', 'b', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'u', '_', 'h', '_', 'q', 'b', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'd', 'p', 's', 'u', 'b', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 's', 'u', 'b', '_',
+  's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'd', 'p', 's', 'u', 'b', '_', 's', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 's',
+  'u', 'b', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'd', 'p', 's', 'u', 'b', '_', 'u', '_', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'd', 'p', 's', 'u', 'b', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'x', '_',
+  'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'p', 'd',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 'e', 'x', 't', 'r', '_', 'r', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'r',
+  '_', 'r', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'r', '_', 's', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'e', 'x', 't', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'a', 'd', 'd', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'a', 'd',
+  'd', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'f', 'c', 'a', 'f', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'a', 'f', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 'c', 'e', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'e', 'q', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'l',
+  'a', 's', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 'c', 'l', 'a', 's', 's', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c',
+  'l', 'e', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 'c', 'l', 'e', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'l', 't', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'f', 'c', 'l', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'n', 'e', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c',
+  'n', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 'c', 'o', 'r', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'o', 'r', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'f', 'c', 'u', 'e', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'e', 'q', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 'c', 'u', 'l', 'e', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'l', 'e', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  'c', 'u', 'l', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'l', 't', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c',
+  'u', 'n', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 'c', 'u', 'n', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'n', 'e',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'f', 'c', 'u', 'n', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'd', 'i', 'v', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 'd', 'i', 'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'e', 'x', 'd', 'o', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e',
+  'x', 'd', 'o', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 'e', 'x', 'p', '2', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e', 'x',
+  'p', '2', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 'e', 'x', 'u', 'p', 'l', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e', 'x',
+  'u', 'p', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 'e', 'x', 'u', 'p', 'r', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e',
+  'x', 'u', 'p', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'i', 'n', 't', '_', 's', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 'f', 'i', 'n', 't', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'i', 'n', 't', '_',
+  'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'f', 'f', 'i', 'n', 't', '_', 'u', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'q',
+  'l', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'f', 'f', 'q', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'q', 'r', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 'f', 'q', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'i', 'l', 'l', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'i', 'l',
+  'l', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'f', 'i', 'l', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'i', 'l', 'l', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 'l', 'o', 'g', '2', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'l', 'o', 'g', '2', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  'm', 'a', 'd', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'a', 'd', 'd', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm',
+  'a', 'x', '_', 'a', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'a', 'x', '_', 'a', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  'm', 'a', 'x', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 'm', 'a', 'x', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'i', 'n',
+  '_', 'a', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 'm', 'i', 'n', '_', 'a', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'i',
+  'n', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'f', 'm', 'i', 'n', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 's', 'u', 'b', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'f', 'm', 's', 'u', 'b', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'u', 'l', '_', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  'm', 'u', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 'r', 'c', 'p', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'r', 'c', 'p',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'f', 'r', 'i', 'n', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'r', 'i', 'n', 't', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'f', 'r', 's', 'q', 'r', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'r', 's', 'q', 'r', 't',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'f', 's', 'a', 'f', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'a', 'f', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  's', 'e', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 's', 'e', 'q', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'l', 'e',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'f', 's', 'l', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'l', 't', '_', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  's', 'l', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'f', 's', 'n', 'e', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'n', 'e',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'f', 's', 'o', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'o', 'r', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  's', 'q', 'r', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 's', 'q', 'r', 't', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's',
+  'u', 'b', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 's', 'u', 'b', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'e', 'q',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'f', 's', 'u', 'e', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'l', 'e', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'f', 's', 'u', 'l', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'l', 't', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 's', 'u', 'l', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'n', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's',
+  'u', 'n', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 's', 'u', 'n', 'e', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'n',
+  'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'f', 't', 'i', 'n', 't', '_', 's', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 't', 'i',
+  'n', 't', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'f', 't', 'i', 'n', 't', '_', 'u', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'f', 't', 'i', 'n', 't', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 't', 'q', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  't', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 't', 'r', 'u', 'n', 'c', '_', 's', '_', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
+  't', 'r', 'u', 'n', 'c', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 't', 'r', 'u', 'n', 'c',
+  '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'f', 't', 'r', 'u', 'n', 'c', '_', 'u', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h',
+  'a', 'd', 'd', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'h', 'a', 'd', 'd', '_', 's', '_', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'h', 'a', 'd', 'd', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 'a', 'd', 'd', '_', 'u', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'h', 'a', 'd', 'd', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 'a', 'd', 'd', '_', 'u',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'h', 's', 'u', 'b', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 's', 'u', 'b', '_',
+  's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'h', 's', 'u', 'b', '_', 's', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 's', 'u', 'b',
+  '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'h', 's', 'u', 'b', '_', 'u', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 's', 'u',
+  'b', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'e', 'v', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v',
+  'e', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'i', 'l', 'v', 'e', 'v', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'e',
+  'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'i', 'l', 'v', 'l', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'l', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'i', 'l', 'v', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'l', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v',
+  'o', 'd', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'i', 'l', 'v', 'o', 'd', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'o',
+  'd', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'i', 'l', 'v', 'o', 'd', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'r', '_',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'i', 'l', 'v', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'r', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l',
+  'v', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'i', 'n', 's', 'e', 'r', 't', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n', 's',
+  'e', 'r', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'i', 'n', 's', 'e', 'r', 't', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n',
+  's', 'e', 'r', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 'i', 'n', 's', 'v', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n', 's', 'v',
+  'e', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'i', 'n', 's', 'v', 'e', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n', 's', 'v', 'e',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'i', 'n', 's', 'v', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'l', 'b', 'u', 'x', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l',
+  'd', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'l', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'l', 'd', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'l', 'd', 'i', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', 'i', '_', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'l', 'd', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 'l', 'h', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'l', 's', 'a', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'l',
+  'w', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'm', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd', 'd', '_', 'q', '_', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'a', 'd', 'd', '_', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd', 'd', 'r', '_', 'q',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'm', 'a', 'd', 'd', 'r', '_', 'q', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a', 'd',
+  'd', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'm', 'a', 'd', 'd', 'v', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd', 'd', 'v', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 'a', 'd', 'd', 'v', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd', 'd', 'v', '_', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'm', 'a', 'q', '_', 's', '_', 'w', '_', 'p', 'h', 'l', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a',
+  'q', '_', 's', '_', 'w', '_', 'p', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a', 'q', '_', 's',
+  'a', '_', 'w', '_', 'p', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a', 'q', '_', 's', 'a', '_',
+  'w', '_', 'p', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'a', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x',
+  '_', 'a', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'a', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_',
+  'a', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'm', 'a', 'x', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 's',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'm', 'a', 'x', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 's', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 'a', 'x', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'u', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'a', 'x', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'u', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
+  'a', 'x', 'i', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_', 's', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'a', 'x', 'i', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_', 's', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 'a', 'x', 'i', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_', 'u',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'm', 'a', 'x', 'i', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_',
+  'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'm', 'i', 'n', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'a',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'm', 'i', 'n', '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'a', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 'i', 'n', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 's', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'i', 'n', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 's', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
+  'i', 'n', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'u', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i',
+  'n', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'u', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n',
+  'i', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 's', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i',
+  'n', 'i', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 's', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
+  'i', 'n', 'i', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 'u', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'i', 'n', 'i', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 'u', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 'o', 'd', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 's', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'o', 'd', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 's', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
+  'o', 'd', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 'u', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o',
+  'd', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 'u', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'o',
+  'd', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'm', 'o', 'v', 'e', '_', 'v', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 's', 'u', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 's', 'u', 'b', '_', 'q', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', '_', 'q', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 's', 'u', 'b', 'r', '_', 'q', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', 'r',
+  '_', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'm', 's', 'u', 'b', 'u', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', 'v',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'm', 's', 'u', 'b', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', 'v', '_',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 's', 'u', 'b', 'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 't', 'h', 'l', 'i', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'm', 'u', 'l', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', '_', 'q', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
+  'u', 'l', '_', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', '_', 's', '_', 'p', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'm', 'u', 'l', 'e', 'q', '_', 's', '_', 'w', '_', 'p', 'h', 'l', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'm', 'u', 'l', 'e', 'q', '_', 's', '_', 'w', '_', 'p', 'h', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm',
+  'u', 'l', 'e', 'u', '_', 's', '_', 'p', 'h', '_', 'q', 'b', 'l', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm',
+  'u', 'l', 'e', 'u', '_', 's', '_', 'p', 'h', '_', 'q', 'b', 'r', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm',
+  'u', 'l', 'q', '_', 'r', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'q', '_',
+  'r', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'q', '_', 's', '_', 'p', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'm', 'u', 'l', 'q', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', 'r', '_', 'q', '_',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'm', 'u', 'l', 'r', '_', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 's', 'a',
+  '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 's', 'a', 'q', '_', 's', '_',
+  'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 't', 'u',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'm', 'u', 'l', 'v', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', 'v', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l',
+  'v', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'm', 'u', 'l', 'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'o', 'c', '_', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'n', 'l', 'o', 'c', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'o', 'c', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'o',
+  'c', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'n', 'l', 'z', 'c', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'z', 'c', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  'n', 'l', 'z', 'c', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'z', 'c', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'o', 'r',
+  '_', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'n', 'o', 'r', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'o', 'r', '_', 'v', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'o', 'r', 'i',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'p', 'a', 'c', 'k', 'r', 'l', '_', 'p', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k',
+  'e', 'v', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 'p', 'c', 'k', 'e', 'v', '_', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'e',
+  'v', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 'p', 'c', 'k', 'e', 'v', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'o', 'd',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'p', 'c', 'k', 'o', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'o', 'd', '_',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'p', 'c', 'k', 'o', 'd', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'n', 't', '_', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p',
+  'c', 'n', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 'p', 'c', 'n', 't', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'n', 't',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'p', 'i', 'c', 'k', '_', 'p', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'i', 'c', 'k',
+  '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'e', 'q', '_', 'w', '_', 'p', 'h',
+  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 'p', 'r', 'e', 'c', 'e', 'q', '_', 'w', '_', 'p', 'h', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'p', 'r', 'e', 'c', 'e', 'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'p', 'r', 'e', 'c', 'e', 'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', 'a',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'p', 'r', 'e', 'c', 'e', 'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'p', 'r', 'e', 'c', 'e', 'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r',
+  'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 'p', 'r', 'e', 'c', 'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'p', 'r', 'e', 'c', 'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', 'a',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 'p', 'r', 'e', 'c', 'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'p', 'r', 'e', 'c', 'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r', 'a', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  'p', 'r', 'e', 'c', 'r', '_', 'q', 'b', '_', 'p', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e',
+  'c', 'r', '_', 's', 'r', 'a', '_', 'p', 'h', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e',
+  'c', 'r', '_', 's', 'r', 'a', '_', 'r', '_', 'p', 'h', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p',
+  'r', 'e', 'c', 'r', 'q', '_', 'p', 'h', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c',
+  'r', 'q', '_', 'q', 'b', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', 'q',
+  '_', 'r', 's', '_', 'p', 'h', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', 'q',
+  'u', '_', 's', '_', 'q', 'b', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'p', 'e',
+  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 'r', 'a', 'd', 'd', 'u', '_', 'w', '_', 'q', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'r',
+  'd', 'd', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 'r', 'e', 'p', 'l', '_', 'p', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'r', 'e',
+  'p', 'l', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 's', 'a', 't', '_', 's', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a', 't',
+  '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'a', 't', '_', 's', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a', 't', '_',
+  's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'a', 't', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a', 't', '_', 'u',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'a', 't', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a', 't', '_', 'u', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'h', 'f', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 's', 'h', 'f', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'h', 'f', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 's', 'h', 'i', 'l', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'l', 'l', '_', 'p', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 's', 'h', 'l', 'l', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'l', 'l', '_', 's',
+  '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  'i', 'p', 's', '_', 's', 'h', 'l', 'l', '_', 's', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h',
+  'r', 'a', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'r', 'a', '_', 'q', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's',
+  'h', 'r', 'a', '_', 'r', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'r', 'a', '_', 'r',
+  '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  'i', 'p', 's', '_', 's', 'h', 'r', 'a', '_', 'r', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h',
+  'r', 'l', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'r', 'l', '_', 'q', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l',
+  'd', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'l', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'd', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l',
+  'd', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'l', 'd', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'd', 'i', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  's', 'l', 'd', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 's', 'l', 'd', 'i', '_', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'l', 'l', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l', '_', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'l', 'l', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l', 'i', '_', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
+  'l', 'l', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 's', 'l', 'l', 'i', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a',
+  't', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'p', 'l', 'a', 't', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'p', 'l', 'a', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't', 'i',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'p', 'l', 'a', 't', 'i', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't',
+  'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'p', 'l', 'a', 't', 'i', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', '_',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'r', 'a', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', '_', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'r', 'a', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'i', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r',
+  'a', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'r', 'a', 'i', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', '_',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'r', 'a', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r',
+  'a', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'r', 'a', 'r', 'i', '_', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r',
+  'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'r', 'a', 'r', 'i', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', 'i',
+  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'r', 'l', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l',
+  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'r', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'i', '_', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r',
+  'l', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'r', 'l', 'i', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'i', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'r', 'l', 'r', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r', '_', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r',
+  'l', 'r', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'r', 'l', 'r', '_', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r', 'i',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'r', 'l', 'r', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r', 'i', '_',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'r', 'l', 'r', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 't', '_', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 't', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 't', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 's', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'q', '_',
+  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
+  'p', 's', '_', 's', 'u', 'b', 'q', '_', 's', '_', 'p', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u',
+  'b', 'q', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'q', 'h', '_', 'p', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
+  '_', 's', 'u', 'b', 'q', 'h', '_', 'r', '_', 'p', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b',
+  'q', 'h', '_', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'q', 'h', '_', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
+  'u', 'b', 's', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 's', '_', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
+  's', 'u', 'b', 's', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 's', '_',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'u', 'b', 's', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 'u',
+  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 's', 'u', 'b', 's', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_',
+  'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'u', 'b', 's', 'u', 's', '_', 'u', '_', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
+  'b', 's', 'u', 's', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', 'u', 's', '_',
+  'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'u', 'b', 's', 'u', 's', '_', 'u', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
+  'b', 's', 'u', 'u', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', 'u', 'u', '_',
+  's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
+  's', 'a', '_', 's', 'u', 'b', 's', 'u', 'u', '_', 's', '_', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
+  'b', 's', 'u', 'u', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', '_', 'p',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
+  's', '_', 's', 'u', 'b', 'u', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', '_',
+  's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', '_', 's', '_', 'q', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
+  's', 'u', 'b', 'u', 'h', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', 'h', '_',
+  'r', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'u', 'b', 'v', '_', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', '_',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 's', 'u', 'b', 'v', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', '_', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
+  'b', 'v', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', 'i', '_', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b',
+  'v', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'm', 's', 'a', '_', 's', 'u', 'b', 'v', 'i', '_', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'v', 's', 'h', 'f',
+  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
+  'a', '_', 'v', 's', 'h', 'f', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'v', 's', 'h', 'f', '_', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'v',
+  's', 'h', 'f', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'm', 'i', 'p', 's', '_', 'w', 'r', 'd', 's', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'x', 'o', 'r', '_',
+  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
+  '_', 'x', 'o', 'r', 'i', '_', 'b', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'a', 'd', 'd', '_', 'r', 'm', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'a', 'd', 'd', '_', 'r', 'm', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'a', 'd', 'd', '_', 'r', 'm', '_', 'f', 't', 'z', '_', 'f', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'n', '_', 'd',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'n', '_',
+  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'n',
+  '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a',
+  'd', 'd', '_', 'r', 'p', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'a', 'd', 'd', '_', 'r', 'p', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'a', 'd', 'd', '_', 'r', 'p', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'z', '_', 'd', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'z', '_', 'f',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'z', '_',
+  'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a',
+  'r', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b',
+  'a', 'r', '_', 'w', 'a', 'r', 'p', '_', 's', 'y', 'n', 'c', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'b', 'a', 'r', '_', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b',
+  'a', 'r', 'r', 'i', 'e', 'r', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 's', 'y', 'n',
+  'c', '_', 'c', 'n', 't', '\000', '_', '_', 's', 'y', 'n', 'c', 't', 'h', 'r',
+  'e', 'a', 'd', 's', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r',
+  '0', '_', 'a', 'n', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a',
+  'r', '0', '_', 'o', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a',
+  'r', '0', '_', 'p', 'o', 'p', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'b', 'i', 't', 'c', 'a', 's', 't', '_', 'd', '2', 'l', 'l', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'b', 'i', 't', 'c', 'a', 's', 't', '_', 'f', '2',
+  'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'i', 't', 'c', 'a', 's',
+  't', '_', 'i', '2', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'i',
+  't', 'c', 'a', 's', 't', '_', 'l', 'l', '2', 'd', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'c', 'e', 'i', 'l', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'c', 'e', 'i', 'l', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'c', 'e', 'i', 'l', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'c', 'o', 's', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_',
+  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'c', 'o', 's', '_', 'a', 'p',
+  'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'd', '2', 'f', '_', 'r', 'm', '_', 'f', 't', 'z', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'n', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'n', '_', 'f', 't', 'z', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'p', '_', 'f', 't',
+  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'z',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r', 'z', '_',
+  'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'i', '_',
+  'h', 'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'i', '_', 'l',
+  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'i', '_', 'r', 'm',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'i', '_', 'r', 'n', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'i', '_', 'r', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'i', '_', 'r', 'z', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'l', 'l', '_', 'r', 'm', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'l', 'l', '_', 'r', 'n', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'l', 'l', '_', 'r', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'l', 'l', '_', 'r', 'z', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'i', '_', 'r', 'm', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'i', '_', 'r', 'n', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'i', '_', 'r', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'i', '_', 'r', 'z', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'l', 'l', '_', 'r', 'm', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'l', 'l', '_', 'r', 'n', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'l', 'l', '_', 'r', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'u', 'l', 'l', '_', 'r',
+  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'a', 'p',
+  'p', 'r', 'o', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd',
+  'i', 'v', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'm', '_',
+  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'm',
+  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r',
+  'm', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'd', 'i', 'v', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'd', 'i', 'v', '_', 'r', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'd', 'i', 'v', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'f', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'p', '_', 'd',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'p', '_',
+  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'p',
+  '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd',
+  'i', 'v', '_', 'r', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'd', 'i', 'v', '_', 'r', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'd', 'i', 'v', '_', 'r', 'z', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'e', 'x', '2', '_', 'a', 'p', 'p', 'r', 'o',
+  'x', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'e', 'x', '2', '_',
+  'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'e', 'x', '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't', 'z',
+  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'h', '_', 'r',
+  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'h', '_', 'r', 'n',
+  '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i',
+  '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i', '_',
+  'r', 'm', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  '2', 'i', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
+  'i', '_', 'r', 'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'f', '2', 'i', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'f', '2', 'i', '_', 'r', 'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'f', '2', 'i', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', '2', 'i', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r', 'm', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r', 'm', '_', 'f', 't',
+  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r',
+  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r',
+  'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
+  'l', 'l', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
+  'l', 'l', '_', 'r', 'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', '2', 'l', 'l', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', '2', 'l', 'l', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'm', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'm', '_', 'f',
+  't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_',
+  'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_',
+  'r', 'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  '2', 'u', 'i', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  '2', 'u', 'i', '_', 'r', 'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'z', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'z', '_', 'f', 't', 'z', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'm',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r',
+  'm', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
+  'u', 'l', 'l', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  '2', 'u', 'l', 'l', '_', 'r', 'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'p', '_', 'f',
+  't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l',
+  '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l',
+  'l', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'f', 'a', 'b', 's', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'f', 'a', 'b', 's', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  'a', 'b', 's', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', 'l', 'o', 'o', 'r', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', 'l', 'o', 'o', 'r', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', 'l', 'o', 'o', 'r', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'm', '_', 'd', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'm', '_', 'f',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'm', '_',
+  'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
+  'a', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  'm', 'a', '_', 'r', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'f', 'm', 'a', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'p', '_', 'd', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'p', '_', 'f', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'p', '_', 'f',
+  't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
+  '_', 'r', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
+  'a', '_', 'r', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
+  'm', 'a', '_', 'r', 'z', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'd', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'd', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'f', 'n', 's', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'i', '2', 'd', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'i', '2', 'd', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'i', '2', 'd', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i',
+  '2', 'd', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2',
+  'f', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f',
+  '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f', '_',
+  'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f', '_', 'r',
+  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 's', 'p', 'a', 'c',
+  'e', 'p', '_', 'c', 'o', 'n', 's', 't', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'i', 's', 's', 'p', 'a', 'c', 'e', 'p', '_', 'g', 'l', 'o', 'b', 'a',
+  'l', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 's', 'p', 'a', 'c',
+  'e', 'p', '_', 'l', 'o', 'c', 'a', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'i', 's', 's', 'p', 'a', 'c', 'e', 'p', '_', 's', 'h', 'a', 'r', 'e',
+  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 't', 'y', 'p', 'e',
+  'p', '_', 's', 'a', 'm', 'p', 'l', 'e', 'r', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'i', 's', 't', 'y', 'p', 'e', 'p', '_', 's', 'u', 'r', 'f', 'a',
+  'c', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 't', 'y', 'p',
+  'e', 'p', '_', 't', 'e', 'x', 't', 'u', 'r', 'e', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'l', 'g', '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'd',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'g', '2', '_', 'a', 'p', 'p',
+  'r', 'o', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'g',
+  '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'm', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'n', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'z', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'm', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'n', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'z', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'o', 'h', 'i', '_', 'i', '2', 'd',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'a', 't', 'c', 'h', '_', 'a',
+  'n', 'y', '_', 's', 'y', 'n', 'c', '_', 'i', '3', '2', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'm', 'a', 't', 'c', 'h', '_', 'a', 'n', 'y', '_', 's',
+  'y', 'n', 'c', '_', 'i', '6', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'm', 'e', 'm', 'b', 'a', 'r', '_', 'c', 't', 'a', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'm', 'e', 'm', 'b', 'a', 'r', '_', 'g', 'l', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'm', 'e', 'm', 'b', 'a', 'r', '_', 's', 'y', 's',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'm', '_',
+  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'm',
+  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r',
+  'm', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'm', 'u', 'l', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'm', 'u', 'l', '_', 'r', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'm', 'u', 'l', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'f', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'p', '_', 'd',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'p', '_',
+  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'p',
+  '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm',
+  'u', 'l', '_', 'r', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'm', 'u', 'l', '_', 'r', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'm', 'u', 'l', '_', 'r', 'z', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '2', '4', '_', 'i', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '2', '4', '_', 'u', 'i', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'i', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'l', 'l',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'u',
+  'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_',
+  'u', 'l', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'p', 'r', 'm', 't',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'a', 'p', 'p',
+  'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'r', 'c', 'p', '_', 'r', 'm', '_', 'd', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'm', '_', 'f', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'm', '_', 'f', 't', 'z', '_',
+  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'n',
+  '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r',
+  'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_',
+  'r', 'n', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'r', 'c', 'p', '_', 'r', 'p', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'r', 'c', 'p', '_', 'r', 'p', '_', 'f', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'p', '_', 'f', 't', 'z', '_', 'f',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'z', '_',
+  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'z',
+  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r',
+  'z', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'c',
+  'l', 'o', 'c', 'k', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
+  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'c', 'l', 'o', 'c',
+  'k', '6', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
+  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'c', 't', 'a', 'i', 'd',
+  '_', 'w', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_',
+  'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'c', 't', 'a', 'i', 'd', '_',
+  'x', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p',
+  't', 'x', '_', 's', 'r', 'e', 'g', '_', 'c', 't', 'a', 'i', 'd', '_', 'y',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'c', 't', 'a', 'i', 'd', '_', 'z', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x',
+  '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '0', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_',
+  's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '0', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '1', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '2', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '3', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '4', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '5', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '6', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '7', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '8', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '1', '9', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '0', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '1', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '2', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '3', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '4', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '5', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '6', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '7', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '8', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '2', '9', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
+  'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '3', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e',
+  'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '3', '0', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e',
+  'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '3', '1', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e',
+  'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '4', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
+  '_', 'e', 'n', 'v', 'r', 'e', 'g', '5', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_',
+  'e', 'n', 'v', 'r', 'e', 'g', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e',
+  'n', 'v', 'r', 'e', 'g', '7', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
+  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
+  'v', 'r', 'e', 'g', '8', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
+  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
+  'r', 'e', 'g', '9', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
+  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'g', 'r', 'i', 'd',
+  'i', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_',
+  'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'i', 'd',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'm', 'a', 's', 'k',
+  '_', 'e', 'q', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
+  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'm',
+  'a', 's', 'k', '_', 'g', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
+  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a',
+  'n', 'e', 'm', 'a', 's', 'k', '_', 'g', 't', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
+  '_', 'l', 'a', 'n', 'e', 'm', 'a', 's', 'k', '_', 'l', 'e', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'm', 'a', 's', 'k', '_', 'l', 't',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'w',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'x',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'y',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'z',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 'n', 's', 'm', 'i', 'd', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'n', 't', 'i', 'd', '_', 'w', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e',
+  'g', '_', 'n', 't', 'i', 'd', '_', 'x', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_',
+  'n', 't', 'i', 'd', '_', 'y', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
+  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'n', 't',
+  'i', 'd', '_', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
+  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'n', 'w', 'a', 'r',
+  'p', 'i', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
+  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'p', 'm', '0', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_',
+  's', 'r', 'e', 'g', '_', 'p', 'm', '1', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_',
+  'p', 'm', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
+  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'p', 'm', '3', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_',
+  's', 'r', 'e', 'g', '_', 's', 'm', 'i', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
+  '_', 't', 'i', 'd', '_', 'w', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
+  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 't', 'i',
+  'd', '_', 'x', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
+  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 't', 'i', 'd', '_', 'y',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
+  'x', '_', 's', 'r', 'e', 'g', '_', 't', 'i', 'd', '_', 'z', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
+  'r', 'e', 'g', '_', 'w', 'a', 'r', 'p', 'i', 'd', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e',
+  'g', '_', 'w', 'a', 'r', 'p', 's', 'i', 'z', 'e', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'o', 't', 'a', 't', 'e', '_', 'b', '3', '2', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 't', 'a', 't', 'e', '_', 'b', '6',
+  '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 't', 'a', 't', 'e',
+  '_', 'r', 'i', 'g', 'h', 't', '_', 'b', '6', '4', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'o', 'u', 'n', 'd', '_', 'd', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'o', 'u', 'n', 'd', '_', 'f', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'r', 'o', 'u', 'n', 'd', '_', 'f', 't', 'z', '_', 'f', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 's', 'q', 'r', 't', '_', 'a', 'p',
+  'p', 'r', 'o', 'x', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
+  's', 'q', 'r', 't', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'r', 's', 'q', 'r', 't', '_', 'a', 'p', 'p',
+  'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'a', 'd', '_', 'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'a', 'd', '_', 'u', 'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'a', 't', 'u', 'r', 'a', 't', 'e', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'a', 't', 'u', 'r', 'a', 't', 'e', '_', 'f', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'a', 't', 'u', 'r', 'a', 't', 'e', '_', 'f',
+  't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f',
+  'l', '_', 'b', 'f', 'l', 'y', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'b', 'f', 'l', 'y', '_', 'i', '3',
+  '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'd',
+  'o', 'w', 'n', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'h', 'f', 'l', '_', 'd', 'o', 'w', 'n', '_', 'i', '3', '2', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'i', 'd', 'x', '_',
+  'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l',
+  '_', 'i', 'd', 'x', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'b', 'f', 'l', 'y',
+  '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f',
+  'l', '_', 's', 'y', 'n', 'c', '_', 'b', 'f', 'l', 'y', '_', 'i', '3', '2',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y',
+  'n', 'c', '_', 'd', 'o', 'w', 'n', '_', 'f', '3', '2', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'd',
+  'o', 'w', 'n', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'i', 'd', 'x', '_', 'f',
+  '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_',
+  's', 'y', 'n', 'c', '_', 'i', 'd', 'x', '_', 'i', '3', '2', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_',
+  'u', 'p', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'u', 'p', '_', 'i', '3', '2',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'u', 'p',
+  '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f',
+  'l', '_', 'u', 'p', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'i', 'n', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'i', 'n', '_', 'a', 'p', 'p', 'r', 'o',
+  'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'q', 'r', 't', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'a', 'p', 'p', 'r',
+  'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'q', 'r', 't', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'q', 'r', 't', '_', 'r', 'm', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'q', 'r', 't', '_', 'r', 'm', '_', 'f', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'm', '_', 'f', 't', 'z',
+  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_',
+  'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r',
+  't', '_', 'r', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'q', 'r', 't', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'p', '_', 'd', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'p', '_',
+  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r',
+  'p', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'q', 'r', 't', '_', 'r', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'q', 'r', 't', '_', 'r', 'z', '_', 'f', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'z', '_', 'f', 't', 'z',
+  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 'q', '_', 'a',
+  'r', 'r', 'a', 'y', '_', 's', 'i', 'z', 'e', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 'q', '_', 'c', 'h', 'a', 'n', 'n', 'e', 'l', '_', 'd',
+  'a', 't', 'a', '_', 't', 'y', 'p', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 'q', '_', 'c', 'h', 'a', 'n', 'n', 'e', 'l', '_', 'o', 'r',
+  'd', 'e', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 'q', '_',
+  'd', 'e', 'p', 't', 'h', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  'q', '_', 'h', 'e', 'i', 'g', 'h', 't', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 'q', '_', 'w', 'i', 'd', 't', 'h', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd',
+  '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 'z', 'e',
+  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_',
+  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i',
+  '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
+  '_', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
+  'a', 'y', '_', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
+  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '6', '4', '_', 'z', 'e', 'r',
+  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_', 'c', 'l',
+  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8',
+  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
+  '2', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
+  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6', '_', 'z',
+  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i',
+  '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a',
+  'y', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 'z', 'e', 'r',
+  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '6', '4',
+  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
+  'v', '2', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'v', '2', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
+  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 'c', 'l',
+  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i',
+  '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
+  'v', '2', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
+  'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
+  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 't',
+  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i',
+  '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
+  '_', 'v', '4', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2',
+  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
+  '4', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
+  'a', 'y', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 'z', 'e', 'r', 'o',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '1', 'd', '_', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '1', '6', '_',
+  'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '1', 'd', '_', 'i', '3', '2', '_', 'c', 'l', 'a', 'm',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '1', 'd', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '6', '4', '_',
+  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '6', '4', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '1', 'd', '_', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '8', '_', 't',
+  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '1', 'd', '_', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd',
+  '_', 'v', '2', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2',
+  'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '3',
+  '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '3', '2',
+  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '3', '2', '_', 'z',
+  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '6', '4', '_', 'c', 'l', 'a',
+  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '1', 'd', '_', 'v', '2', 'i', '6', '4', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '1', 'd', '_', 'v', '2', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd',
+  '_', 'v', '2', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v',
+  '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '8',
+  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '1', '6', '_', 'c',
+  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '1', 'd', '_', 'v', '4', 'i', '1', '6', '_', 'z', 'e', 'r', 'o',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '1', 'd', '_', 'v', '4', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
+  'd', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
+  'v', '4', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4',
+  'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '8',
+  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '8', '_', 'z', 'e',
+  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_',
+  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i',
+  '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y',
+  '_', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r',
+  'a', 'y', '_', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
+  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 'z', 'e', 'r',
+  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '6', '4', '_', 'c',
+  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '6',
+  '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
+  'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a',
+  'y', '_', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a',
+  'r', 'r', 'a', 'y', '_', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6', '_', 'c', 'l', 'a',
+  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1',
+  '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
+  'v', '2', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2',
+  'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a',
+  'y', '_', 'v', '2', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd',
+  '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '6', '4', '_', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '6',
+  '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
+  'v', '2', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd',
+  '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 'z', 'e', 'r',
+  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6',
+  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
+  'v', '4', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
+  'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
+  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2', '_', 'c',
+  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4',
+  'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a',
+  'y', '_', 'v', '4', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 'c', 'l', 'a', 'm',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4',
+  'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '1', '6', '_', 'c',
+  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i',
+  '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '3', '2', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'i', '3', '2', '_', 'z', 'e', 'r', 'o',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '6', '4', '_',
+  'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '8',
+  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '1', '6', '_', 'c',
+  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '2', 'd', '_', 'v', '2', 'i', '1', '6', '_', 'z', 'e', 'r', 'o',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'v', '2', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
+  'd', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'v', '2', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2',
+  'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i',
+  '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '6', '4',
+  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '8', '_', 'c', 'l',
+  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'v', '2', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'v', '4', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v',
+  '4', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i',
+  '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '3', '2',
+  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '3', '2', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '3', '2', '_', 'z', 'e',
+  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '2', 'd', '_', 'v', '4', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
+  '2', 'd', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
+  'v', '4', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '1', '6',
+  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '1', '6', '_', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '3', 'd', '_', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd',
+  '_', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '3',
+  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '3', '2', '_', 'z', 'e',
+  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '3', 'd', '_', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3',
+  'd', '_', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '6',
+  '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '8', '_', 'c', 'l', 'a',
+  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '3', 'd', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
+  'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '1', '6',
+  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '1', '6', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '1', '6', '_', 'z', 'e',
+  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '3', 'd', '_', 'v', '2', 'i', '3', '2', '_', 'c', 'l', 'a', 'm',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
+  '_', '3', 'd', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3',
+  'd', '_', 'v', '2', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
+  'v', '2', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v',
+  '2', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i',
+  '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '8', '_',
+  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '8', '_', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '3', 'd', '_', 'v', '2', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3',
+  'd', '_', 'v', '4', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd',
+  '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v',
+  '4', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i',
+  '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '3',
+  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '3', '2', '_',
+  'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '8', '_', 'c', 'l', 'a',
+  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'b', '_', '3', 'd', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3',
+  'd', '_', 'v', '4', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a',
+  'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd',
+  '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
+  '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
+  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6',
+  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
+  '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r',
+  'a', 'y', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
+  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2',
+  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
+  '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'i', '1', '6', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'p', '_', '1', 'd', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
+  '1', 'd', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'v', '2',
+  'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'v', '2', 'i', '3',
+  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'v', '2', 'i', '8', '_', 't',
+  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'p', '_', '1', 'd', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
+  '_', '1', 'd', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1',
+  'd', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a',
+  'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd',
+  '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
+  '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
+  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6',
+  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
+  '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r',
+  'a', 'y', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_',
+  'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
+  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2',
+  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
+  's', 't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
+  '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'i', '1', '6', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'p', '_', '2', 'd', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
+  '2', 'd', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'v', '2',
+  'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'v', '2', 'i', '3',
+  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'v', '2', 'i', '8', '_', 't',
+  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'p', '_', '2', 'd', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a',
+  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
+  '_', '2', 'd', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2',
+  'd', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'i',
+  '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
+  's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'i', '3', '2', '_', 't',
+  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
+  '_', 'p', '_', '3', 'd', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd',
+  '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'v',
+  '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
+  'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'v', '2', 'i',
+  '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
+  'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'v', '4', 'i', '1', '6', '_',
+  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
+  't', '_', 'p', '_', '3', 'd', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r',
+  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
+  'p', '_', '3', 'd', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'w', 'a', 'p', '_', 'l', 'o', '_',
+  'h', 'i', '_', 'b', '6', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't',
+  'r', 'u', 'n', 'c', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't',
+  'r', 'u', 'n', 'c', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't',
+  'r', 'u', 'n', 'c', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 't', 'x', 'q', '_', 'a', 'r', 'r', 'a', 'y', '_', 's', 'i',
+  'z', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'c',
+  'h', 'a', 'n', 'n', 'e', 'l', '_', 'd', 'a', 't', 'a', '_', 't', 'y', 'p',
+  'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'c', 'h',
+  'a', 'n', 'n', 'e', 'l', '_', 'o', 'r', 'd', 'e', 'r', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'd', 'e', 'p', 't', 'h', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'h', 'e', 'i', 'g', 'h',
+  't', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'n', 'u',
+  'm', '_', 'm', 'i', 'p', 'm', 'a', 'p', '_', 'l', 'e', 'v', 'e', 'l', 's',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'n', 'u', 'm',
+  '_', 's', 'a', 'm', 'p', 'l', 'e', 's', '\000', '_', '_', 'n', 'v', 'v', 'm',
+  '_', 't', 'x', 'q', '_', 'w', 'i', 'd', 't', 'h', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r', 'm', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r', 'n', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r', 'z', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r', 'm', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r', 'n', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r', 'p', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r', 'z', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'u', 'l', 'l', '2', 'd', '_', 'r', 'm', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'd', '_', 'r', 'n', '\000', '_', '_',
+  'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'd', '_', 'r', 'p', '\000', '_',
+  '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'd', '_', 'r', 'z', '\000',
+  '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'f', '_', 'r', 'm',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'f', '_', 'r',
+  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'f', '_',
+  'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'f',
+  '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e',
+  '_', 'a', 'l', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't',
+  'e', '_', 'a', 'l', 'l', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n', 'v',
+  'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'a', 'n', 'y', '\000', '_', '_', 'n',
+  'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'a', 'n', 'y', '_', 's', 'y',
+  'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_',
+  'b', 'a', 'l', 'l', 'o', 't', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v',
+  'o', 't', 'e', '_', 'b', 'a', 'l', 'l', 'o', 't', '_', 's', 'y', 'n', 'c',
+  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'u', 'n',
+  'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'u',
+  'n', 'i', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p',
+  't', 'o', '_', 'v', 'c', 'i', 'p', 'h', 'e', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c',
+  'r', 'y', 'p', 't', 'o', '_', 'v', 'c', 'i', 'p', 'h', 'e', 'r', 'l', 'a',
+  's', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'n',
+  'c', 'i', 'p', 'h', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't',
+  'o', '_', 'v', 'n', 'c', 'i', 'p', 'h', 'e', 'r', 'l', 'a', 's', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'e', 'r', 'm',
+  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v',
+  'p', 'm', 's', 'u', 'm', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't',
+  'o', '_', 'v', 'p', 'm', 's', 'u', 'm', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r',
+  'y', 'p', 't', 'o', '_', 'v', 'p', 'm', 's', 'u', 'm', 'h', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'm', 's', 'u', 'm', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 's', 'b', 'o',
+  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 's', 'h',
+  'a', 's', 'i', 'g', 'm', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p',
+  't', 'o', '_', 'v', 's', 'h', 'a', 's', 'i', 'g', 'm', 'a', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'd', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'd', 's', 's', 'a', 'l', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'd', 's', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'd', 's', 't', 's',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'd', 's', 't', 's', 't', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'd', 's', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'm', 'f', 'v', 's', 'c', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'm', 't', 'v', 's', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a',
+  'b', 's', 'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'b', 's', 'd', 'u',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'a', 'b', 's', 'd', 'u', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'a', 'd', 'd', 'c', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd',
+  'd', 'c', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 'e', 'c', 'u',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 'e', 'u', 'q', 'm', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'a', 'd', 'd', 's', 'b', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a',
+  'd', 'd', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 's', 'w',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd',
+  'd', 'u', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'v', 'g', 's', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'a', 'v', 'g', 's', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a',
+  'v', 'g', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'v', 'g', 'u', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'a', 'v', 'g', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a',
+  'v', 'g', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'b', 'p', 'e', 'r', 'm', 'q',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'c', 'f', 's', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
+  'f', 'u', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'l', 'z', 'l', 's', 'b', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'b', 'f', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'c', 'm', 'p', 'b', 'f', 'p', '_', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
+  'm', 'p', 'e', 'q', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e',
+  'q', 'f', 'p', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'b', '_',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'd', '_', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
+  'm', 'p', 'e', 'q', 'u', 'h', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
+  'p', 'e', 'q', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q',
+  'u', 'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 'e', 'f',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 'e', 'f', 'p', '_', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'f', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'c', 'm', 'p', 'g', 't', 'f', 'p', '_', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'c', 'm', 'p', 'g', 't', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
+  'p', 'g', 't', 's', 'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p',
+  'g', 't', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's',
+  'd', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'h', '_', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'c', 'm', 'p', 'g', 't', 's', 'w', '_', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'c', 'm', 'p', 'g', 't', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p',
+  'g', 't', 'u', 'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g',
+  't', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'd',
+  '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'h', '_', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'c', 'm', 'p', 'g', 't', 'u', 'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
+  'm', 'p', 'n', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e',
+  'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'h', '_', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'c', 'm', 'p', 'n', 'e', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
+  'p', 'n', 'e', 'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n',
+  'e', 'z', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z', 'b',
+  '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z', 'h', '_', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'c', 'm', 'p', 'n', 'e', 'z', 'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
+  't', 's', 'x', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 't', 'u', 'x', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'c', 't', 'z', 'l', 's', 'b', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'e', 'x', 'p', 't', 'e', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'g', 'b',
+  'b', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'l', 'o', 'g', 'e', 'f', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'm', 'a', 'd', 'd', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
+  'a', 'x', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a', 'x', 's', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'm', 'a', 'x', 's', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
+  'a', 'x', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a', 'x', 's', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'm', 'a', 'x', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
+  'a', 'x', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a', 'x', 'u', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'm', 'a', 'x', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
+  'h', 'a', 'd', 'd', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'h', 'r',
+  'a', 'd', 'd', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 'f',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 's', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'm', 'i', 'n', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 's',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 's', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'm', 'i', 'n', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 'u',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 'u', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'm', 'i', 'n', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'l', 'a', 'd',
+  'd', 'u', 'h', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 'm', 'b',
+  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 's', 'h', 'm', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'm', 's', 'u', 'm', 's', 'h', 's', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'm', 's', 'u', 'm', 'u', 'b', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u',
+  'm', 'u', 'h', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 'u', 'h',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 's', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'm', 'u', 'l', 'e', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u',
+  'l', 'e', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 'u', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 'u', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'm', 'u', 'l', 'e', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l',
+  'o', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o', 's', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'm', 'u', 'l', 'o', 's', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'm', 'u', 'l', 'o', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o',
+  'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o', 'u', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'n', 'm', 's', 'u', 'b', 'f', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'p', 'e', 'r', 'm', '_', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k',
+  'p', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 's', 'd', 's', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'p', 'k', 's', 'd', 'u', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p',
+  'k', 's', 'h', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 's', 'h', 'u',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 's', 'w', 's', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'p', 'k', 's', 'w', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k',
+  'u', 'd', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 'u', 'h', 'u', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'p', 'k', 'u', 'w', 'u', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'p', 'r', 't', 'y', 'b', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'r', 't',
+  'y', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'r', 't', 'y', 'b', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'r', 'e', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'f',
+  'i', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'f', 'i', 'n', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'r', 'f', 'i', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'f', 'i', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 'r', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'r', 'l', 'd', 'm', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
+  'r', 'l', 'd', 'n', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'r', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'w', 'm',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'w', 'n', 'm', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 'r', 's', 'q', 'r', 't', 'e', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's',
+  'e', 'l', '_', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 's', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 's', 'l', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', 'v',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 's', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 's', 'r', 'a', 'b', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's',
+  'r', 'a', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'a', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 's', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 's', 'r', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'v', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 's', 'r', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b',
+  'c', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 'c', 'u', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 's', 'u', 'b', 'e', 'c', 'u', 'q', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 's', 'u', 'b', 'e', 'u', 'q', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u',
+  'b', 's', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 's', 'h', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 's', 'w', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 's', 'u', 'b', 'u', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b',
+  'u', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
+  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 'u', 'w', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
+  'e', 'c', '_', 'v', 's', 'u', 'm', '2', 's', 'w', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
+  'v', 's', 'u', 'm', '4', 's', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u',
+  'm', '4', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'm', '4', 'u',
+  'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
+  't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'm', 's', 'w', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
+  'c', '_', 'v', 'u', 'p', 'k', 'h', 'p', 'x', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u',
+  'p', 'k', 'h', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'h', 's',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
+  'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'h', 's', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
+  '_', 'v', 'u', 'p', 'k', 'l', 'p', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u', 'p',
+  'k', 'l', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'l', 's', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
+  'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'l', 's', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'e', 'r', 'm', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v', 'd', 'e', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v', 'd', 'e',
+  'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v',
+  'w', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i',
+  'v', 'w', 'e', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'g', 'e', 't', '_', 't', 'e', 'x', 'a', 's', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'g', 'e', 't', '_', 't', 'e', 'x', 'a', 's',
+  'r', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'g', 'e',
+  't', '_', 't', 'f', 'h', 'a', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'g', 'e', 't', '_', 't', 'f', 'i', 'a', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f',
+  'a', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q',
+  'p', 'x', '_', 'q', 'v', 'f', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'a', 'd', 'd',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'f', 'c', 'f', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 'f', 'i', 'd',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'f', 'c', 'f', 'i', 'd', 'u', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 'f', 'i',
+  'd', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q',
+  'p', 'x', '_', 'q', 'v', 'f', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c',
+  'm', 'p', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 'm', 'p', 'l', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f',
+  'c', 'p', 's', 'g', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 't', 'i', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f',
+  'c', 't', 'i', 'd', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 't', 'i', 'd', 'u', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q',
+  'v', 'f', 'c', 't', 'i', 'd', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 't', 'i', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q',
+  'v', 'f', 'c', 't', 'i', 'w', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'c', 't', 'i', 'w', 'u',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'f', 'c', 't', 'i', 'w', 'z', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'l', 'o', 'g',
+  'i', 'c', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'f', 'm', 'a', 'd', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'm',
+  'a', 'd', 'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'f', 'm', 's', 'u', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'm',
+  's', 'u', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'f', 'm', 'u', 'l', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'm', 'u',
+  'l', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p',
+  'x', '_', 'q', 'v', 'f', 'n', 'a', 'b', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'n', 'e', 'g',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_',
+  'q', 'v', 'f', 'n', 'm', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'n', 'm', 'a', 'd',
+  'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p',
+  'x', '_', 'q', 'v', 'f', 'n', 'm', 's', 'u', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'n', 'm',
+  's', 'u', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'f', 'p', 'e', 'r', 'm', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'r',
+  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'f', 'r', 'e', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'r', 'i', 'm', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v',
+  'f', 'r', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'f', 'r', 'i', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'r', 'i',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'f', 'r', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'r', 's', 'q', 'r', 't',
+  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'f', 'r', 's', 'q', 'r', 't', 'e', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 's',
+  'e', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p',
+  'x', '_', 'q', 'v', 'f', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 's', 'u', 'b', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_',
+  'q', 'v', 'f', 't', 's', 't', 'n', 'a', 'n', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x', 'm', 'a',
+  'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p',
+  'x', '_', 'q', 'v', 'f', 'x', 'm', 'a', 'd', 'd', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x',
+  'm', 'u', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q',
+  'p', 'x', '_', 'q', 'v', 'f', 'x', 'm', 'u', 'l', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x',
+  'x', 'c', 'p', 'n', 'm', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x', 'x', 'c', 'p',
+  'n', 'm', 'a', 'd', 'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x', 'x', 'm', 'a', 'd', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_',
+  'q', 'v', 'f', 'x', 'x', 'm', 'a', 'd', 'd', 's', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x', 'x',
+  'n', 'p', 'm', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'f', 'x', 'x', 'n', 'p', 'm', 'a',
+  'd', 'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q',
+  'p', 'x', '_', 'q', 'v', 'g', 'p', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 'c', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_',
+  'q', 'v', 'l', 'f', 'c', 'd', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 'c', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v',
+  'l', 'f', 'c', 's', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 'd',
+  'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'l', 'f', 'i', 'w', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 'i', 'w', 'a',
+  'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'l', 'f', 'i', 'w', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 'i', 'w', 'z',
+  'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 'l', 'f', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'f', 's', 'a', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l',
+  'p', 'c', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'q', 'p', 'x', '_', 'q', 'v', 'l', 'p', 'c', 'l', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 'l', 'p',
+  'c', 'r', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q',
+  'p', 'x', '_', 'q', 'v', 'l', 'p', 'c', 'r', 's', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 's', 't', 'f',
+  'c', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p',
+  'x', '_', 'q', 'v', 's', 't', 'f', 'c', 'd', 'a', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 's', 't', 'f',
+  'c', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p',
+  'x', '_', 'q', 'v', 's', 't', 'f', 'c', 's', 'a', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 's', 't', 'f',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x',
+  '_', 'q', 'v', 's', 't', 'f', 'd', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 's', 't', 'f', 'i', 'w',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_',
+  'q', 'v', 's', 't', 'f', 'i', 'w', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q', 'v', 's', 't', 'f', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'q', 'p', 'x', '_', 'q',
+  'v', 's', 't', 'f', 's', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', 'e', 't', '_', 't', 'e', 'x', 'a', 's', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'e', 't', '_', 't', 'e', 'x',
+  'a', 's', 'r', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', 'e', 't', '_', 't', 'f', 'h', 'a', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 's', 'e', 't', '_', 't', 'f', 'i', 'a', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b', 'o', 'r',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b',
+  'o', 'r', 't', 'd', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 't', 'a', 'b', 'o', 'r', 't', 'd', 'c', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b', 'o', 'r', 't', 'w', 'c', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b', 'o', 'r',
+  't', 'w', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  't', 'b', 'e', 'g', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 't', 'c', 'h', 'e', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 't', 'e', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 't', 'e', 'n', 'd', 'a', 'l', 'l', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'r', 'e', 'c', 'h', 'k', 'p', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'r', 'e', 'c',
+  'l', 'a', 'i', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  't', 'r', 'e', 's', 'u', 'm', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 't', 's', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 't', 's', 'u', 's', 'p', 'e', 'n', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 't', 't', 'e', 's', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 's', 'm', 'a',
+  'x', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
+  's', 'x', '_', 'x', 's', 'm', 'i', 'n', 'd', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p',
+  'e', 'q', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'e', 'q', 'd', 'p', '_', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
+  'x', 'v', 'c', 'm', 'p', 'e', 'q', 's', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'e',
+  'q', 's', 'p', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g', 'e', 'd', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
+  'v', 'c', 'm', 'p', 'g', 'e', 'd', 'p', '_', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p',
+  'g', 'e', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g', 'e', 's', 'p', '_', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
+  'x', 'v', 'c', 'm', 'p', 'g', 't', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g',
+  't', 'd', 'p', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g', 't', 's', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
+  'v', 'c', 'm', 'p', 'g', 't', 's', 'p', '_', 'p', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 'd',
+  'p', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
+  's', 'x', '_', 'x', 'v', 'c', 'v', 'd', 'p', 's', 'x', 'w', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v',
+  'c', 'v', 'd', 'p', 'u', 'x', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 'h', 'p', 's',
+  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x',
+  '_', 'x', 'v', 'c', 'v', 's', 'p', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 's', 'p',
+  'h', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's',
+  'x', '_', 'x', 'v', 'c', 'v', 's', 'x', 'd', 's', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v',
+  's', 'x', 'w', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 'u', 'x', 'd', 's', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
+  'v', 'c', 'v', 'u', 'x', 'w', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'd', 'i', 'v', 'd', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
+  'x', 'v', 'd', 'i', 'v', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'i', 'e', 'x', 'p', 'd', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
+  'x', 'v', 'i', 'e', 'x', 'p', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'm', 'a', 'x', 'd', 'p',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
+  'x', 'v', 'm', 'a', 'x', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'm', 'i', 'n', 'd', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
+  'v', 'm', 'i', 'n', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'r', 'e', 'd', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'r',
+  'e', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
+  's', 'x', '_', 'x', 'v', 'r', 's', 'q', 'r', 't', 'e', 'd', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v',
+  'r', 's', 'q', 'r', 't', 'e', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 't', 's', 't', 'd', 'c',
+  'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's',
+  'x', '_', 'x', 'v', 't', 's', 't', 'd', 'c', 's', 'p', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'x', 'e',
+  'x', 'p', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'v', 's', 'x', '_', 'x', 'v', 'x', 'e', 'x', 'p', 's', 'p', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'x',
+  's', 'i', 'g', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'v', 's', 'x', '_', 'x', 'v', 'x', 's', 'i', 'g', 's', 'p', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x',
+  'e', 'x', 't', 'r', 'a', 'c', 't', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'i', 'n', 's', 'e',
+  'r', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
+  's', 'x', '_', 'x', 'x', 'l', 'e', 'q', 'v', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'g', 'r', 'o', 'u', 'p',
+  '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'i', 'm', 'p', 'l', 'i', 'c',
+  'i', 't', 'a', 'r', 'g', '_', 'p', 't', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'a', 't', '_', 's',
+  't', 'o', 'r', 'e', '_', 't', 'y', 'p', 'e', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd',
+  '_', 'g', 'l', 'o', 'b', 'a', 'l', '_', 's', 'i', 'z', 'e', '_', 'x', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_',
+  'r', 'e', 'a', 'd', '_', 'g', 'l', 'o', 'b', 'a', 'l', '_', 's', 'i', 'z',
+  'e', '_', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r',
+  '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 'g', 'l', 'o', 'b', 'a', 'l',
+  '_', 's', 'i', 'z', 'e', '_', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 'n', 'g',
+  'r', 'o', 'u', 'p', 's', '_', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 'n', 'g',
+  'r', 'o', 'u', 'p', 's', '_', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 'n', 'g',
+  'r', 'o', 'u', 'p', 's', '_', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 't', 'g',
+  'i', 'd', '_', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 't', 'g', 'i', 'd', '_',
+  'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0',
+  '0', '_', 'r', 'e', 'a', 'd', '_', 't', 'g', 'i', 'd', '_', 'z', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'e',
+  'f', 'p', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't',
+  'x', '_', 'n', 'e', 's', 't', 'i', 'n', 'g', '_', 'd', 'e', 'p', 't', 'h',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'l', 'c', 'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 't', 'x', '_', 'a', 's', 's', 'i', 's', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 's', 'f', 'p', 'c',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'a', 'c', 'c', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'c', 'c', 'c', 'q', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'a', 'c', 'c', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'a', 'c', 'c', 'g', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'c', 'c',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'a', 'c', 'c', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'c', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'a', 'v', 'g', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'a', 'v', 'g', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'v', 'g',
+  'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
+  '9', '0', '_', 'v', 'a', 'v', 'g', 'l', 'f', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'l',
+  'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'a', 'v', 'g', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'b', 'p', 'e', 'r', 'm',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'c', 'k', 's', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'i', 'm', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'e', 'r', 'i', 'm', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'i', 'm', 'g', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e',
+  'r', 'i', 'm', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r',
+  'l', 'l', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  '3', '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'g', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'l',
+  'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
+  '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'v', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'l',
+  'l', 'v', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  '3', '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'v', 'g', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r',
+  'l', 'l', 'v', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'f', 'a', 'e', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'a', 'e',
+  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'f', 'a', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'a', 'e', 'z', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'f', 'a', 'e', 'z', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'a', 'e', 'z', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'f', 'e', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'f', 'e', 'e', 'f', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'e',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'f', 'e', 'e', 'z', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'e', 'z', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'f', 'e', 'e', 'z', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'n', 'e', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'f', 'e', 'n', 'e', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'n', 'e', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'f', 'e', 'n', 'e', 'z', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'n', 'e', 'z', 'f', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'f', 'e', 'n', 'e', 'z', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'a', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'g', 'f', 'm', 'a', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'a', 'g', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'g', 'f', 'm', 'a', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'g', 'f',
+  'm', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
+  '9', '0', '_', 'v', 'g', 'f', 'm', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'h', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'i', 's', 't', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'i', 's', 't', 'r', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'i', 's', 't', 'r', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 's', '3', '9', '0', '_', 'v', 'l', 'b', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'l', 'l', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'l', 'r', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'm', 'a', 'e', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'e',
+  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'm', 'a', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'h', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'm', 'a', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'm', 'a', 'h', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l',
+  'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
+  '9', '0', '_', 'v', 'm', 'a', 'l', 'e', 'f', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'e',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'm', 'a', 'l', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'h', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'm', 'a', 'l', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'o', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'm', 'a', 'l', 'o', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'o', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'm', 'a', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'm', 'a', 'o', 'f', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'o',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'm', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'e', 'f', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'e',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'm', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'h', 'f', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'h',
+  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'm', 'l', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'e', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'm', 'l', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'm', 'l', 'h', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'h',
+  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'm', 'l', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'o', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'm', 'l', 'o', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'm', 'l', 'o', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'o', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'm', 'o', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 's', '3', '9', '0', '_', 'v', 'm', 'o', 'h', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 's', 'l',
+  'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'p', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'e', 'r', 'm', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p',
+  'k', 'l', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'p', 'k', 'l', 's', 'g', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'k',
+  'l', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  '3', '9', '0', '_', 'v', 'p', 'k', 's', 'f', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'k', 's', 'g',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'p', 'k', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'b', 'c', 'b', 'i', 'q', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 's', 'b', 'i', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 's', '3', '9', '0', '_', 'v', 's', 'c', 'b', 'i', 'b', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's',
+  'c', 'b', 'i', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 's', 'c', 'b', 'i', 'g', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'c',
+  'b', 'i', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  '3', '9', '0', '_', 'v', 's', 'c', 'b', 'i', 'q', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'l', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 's', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 's', 'l', 'd', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'q', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 's', 'r', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 's', 'r', 'a', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'r', 'l',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 's', 'r', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't', 'l', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't',
+  'r', 'c', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  '3', '9', '0', '_', 'v', 's', 't', 'r', 'c', 'f', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't', 'r',
+  'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
+  '9', '0', '_', 'v', 's', 't', 'r', 'c', 'z', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't', 'r',
+  'c', 'z', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
+  '3', '9', '0', '_', 'v', 's', 't', 'r', 'c', 'z', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't',
+  'r', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
+  '9', '0', '_', 'v', 's', 'u', 'm', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'u', 'm', 'g', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 's', 'u', 'm', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'u', 'm', 'h', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  's', 'u', 'm', 'q', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 's', '3', '9', '0', '_', 'v', 's', 'u', 'm', 'q', 'g', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 't',
+  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'u', 'p', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'h', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'u', 'p', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l',
+  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
+  '0', '_', 'v', 'u', 'p', 'l', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'h', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
+  '_', 'v', 'u', 'p', 'l', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'h', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
+  'v', 'u', 'p', 'l', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'l', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
+  'u', 'p', 'l', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'u', 's', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'f', '2', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'f', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'a', 'd',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'f', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'c', 'm', 'p',
+  'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'f', 'c', 'm', 'p', 'g', 't', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'm', 'a',
+  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'f', 'm', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'm', 'u', 'l', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'f', 'r', 'c', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'f', 'r', 'c', 'p', 'i', 't', '1', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'f', 'r', 'c', 'p', 'i', 't', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'r', 's', 'q', 'i', 't',
+  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'f', 'r', 's', 'q', 'r', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 's', 'u', 'b',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'f', 's', 'u', 'b', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'i', '2', 'f', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'm', 'u', 'l', 'h', 'r', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', '2', 'i', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f',
+  'n', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'f', 'p', 'n', 'a', 'c', 'c', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'i',
+  '2', 'f', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', '_', 'u', '3',
+  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', '_', 'u', '6', '4', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', 'x', '_', 'u', '3', '2', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a',
+  'd', 'd', 'c', 'a', 'r', 'r', 'y', 'x', '_', 'u', '6', '4', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e',
+  's', 'd', 'e', 'c', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'd', 'e', 'c', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'a', 'e', 's', 'd', 'e', 'c', '5', '1', '2', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e',
+  's', 'd', 'e', 'c', 'l', 'a', 's', 't', '1', '2', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's',
+  'd', 'e', 'c', 'l', 'a', 's', 't', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'd',
+  'e', 'c', 'l', 'a', 's', 't', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n',
+  'c', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n', 'c', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'a', 'e', 's', 'e', 'n', 'c', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n',
+  'c', 'l', 'a', 's', 't', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n', 'c',
+  'l', 'a', 's', 't', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n', 'c', 'l',
+  'a', 's', 't', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'i', 'm', 'c', '1', '2',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'a', 'e', 's', 'k', 'e', 'y', 'g', 'e', 'n', 'a', 's', 's', 'i',
+  's', 't', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'b', 'l', 'e', 'n', 'd', 'v', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'l', 'e',
+  'n', 'd', 'v', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2',
+  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd', 'q', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'v', 't', 'd', 'q', '2', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
+  't', 'p', 'd', '2', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p',
+  's', '2', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'p', 's', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'h', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'h', 'a', 'd', 'd',
+  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'h', 's', 'u', 'b', 'p', 'd', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'h', 's', 'u', 'b', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'd', 'd', 'q',
+  'u', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'p', 'd',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'p', 'd', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'p', 's', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k',
+  'l', 'o', 'a', 'd', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's',
+  't', 'o', 'r', 'e', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r',
+  'e', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r',
+  'e', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'p', 's',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'm', 'a', 'x', 'p', 'd', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a',
+  'x', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 'd', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'm', 'i', 'n', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's',
+  'k', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's', 'k', 'p', 's',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 't', 'e', 's', 't', 'c', '2', '5', '6', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  't', 'e', 's', 't', 'n', 'z', 'c', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 's',
+  't', 'z', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 'p', 's', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'r', 'o', 'u', 'n', 'd', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'o', 'u', 'n',
+  'd', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', 'p', 's', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 's', 'q', 'r', 't', 'p', 'd', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q',
+  'r', 't', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l',
+  'v', 'a', 'r', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v', 'a',
+  'r', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v',
+  'a', 'r', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v', 'a', 'r',
+  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'c', 'p', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 't', 'e', 's', 't', 'c', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e',
+  's', 't', 'c', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'c', 'p', 's', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 't', 'e', 's', 't', 'n', 'z', 'c', 'p', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  't', 'e', 's', 't', 'n', 'z', 'c', 'p', 'd', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't',
+  'e', 's', 't', 'n', 'z', 'c', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'n',
+  'z', 'c', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'z', 'p',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 't', 'e', 's', 't', 'z', 'p', 'd', '2', '5', '6', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  't', 'e', 's', 't', 'z', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'z', 'p',
+  's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'z', 'e', 'r', 'o', 'a', 'l', 'l', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'z', 'e', 'r', 'o', 'u', 'p', 'p', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e',
+  'r', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'd', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'p', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
+  'a', 't', 'h', 'e', 'r', 'd', '_', 'p', 'd', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a',
+  't', 'h', 'e', 'r', 'd', '_', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
+  'd', '_', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd',
+  '_', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'q', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h',
+  'e', 'r', 'q', '_', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
+  'q', '_', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'p', 'd',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'p', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'p', 's', '2', '5', '6', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
+  'a', 't', 'h', 'e', 'r', 'q', '_', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
+  'q', '_', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'd', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'l',
+  'o', 'a', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o',
+  'r', 'e', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'd', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'q', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
+  'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'q', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'p',
+  's', 'a', 'd', 'b', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's',
+  'd', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'w', 'b', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'a', 'c', 'k', 'u', 's', 'd', 'w', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'a', 'c', 'k', 'u', 's', 'w', 'b', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd',
+  'd', 's', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 's', 'w', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'a', 'd', 'd', 'u', 's', 'b', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
+  'd', 'd', 'u', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'b', 'l', 'e', 'n', 'd',
+  'v', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 's', 'i',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 's', 'f', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'h', 'a', 'd', 'd', 'd', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a',
+  'd', 'd', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'w', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'h', 's', 'u', 'b', 'd', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h',
+  's', 'u', 'b', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b', 'w',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'u', 'b', 's', 'w', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'm', 'a', 'd', 'd', 'w', 'd', '2', '5', '6', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'm', 's', 'k', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'd',
+  'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'r', 's', 'w', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'm', 'u', 'l', 'h', 'w', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u',
+  'l', 'h', 'u', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'u', 'd', 'q',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'a', 'd', 'b', 'w', '2', '5', '6', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'h', 'u', 'f', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'b',
+  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'd', '2', '5', '6', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'i', 'g', 'n', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 's', 'l', 'l', 'q', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
+  'l', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', 'i', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 's', 'l', 'l', 'q', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l',
+  'w', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v', '4', 's', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'l', 'l', 'v', '8', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v', '2',
+  'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 's', 'l', 'l', 'v', '4', 'd', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
+  'a', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'a', 'd', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w',
+  'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '4', 's', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'r', 'a', 'v', '8', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'd', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'r', 'l', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l',
+  'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'd', 'i', '2', '5', '6', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'l', 'q', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w',
+  'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '4', 's', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'r', 'l', 'v', '8', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '2', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'r', 'l', 'v', '4', 'd', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b',
+  's', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 's', 'w', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 's', 'u', 'b', 'u', 's', 'b', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u',
+  'b', 'u', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a',
+  's', 't', 'm', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a',
+  's', 't', 'm', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a',
+  's', 't', 'm', 'b', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a',
+  's', 't', 'm', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a',
+  's', 't', 'm', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a',
+  's', 't', 'm', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 'i', '2', 's',
+  'd', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 's', 'i', '2', 's', 's', '3', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'v', 't', 's', 'i', '2', 's', 's', '6', '4', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
+  't', 's', 'd', '2', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's',
+  'd', '2', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 'd', '2',
+  'u', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 'd', '2', 'u',
+  's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 's', '2', 's', 'i',
+  '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 's', '2', 's', 'i', '6', '4',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'c', 'v', 't', 't', 's', 's', '2', 'u', 's', 'i', '3', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'c', 'v', 't', 't', 's', 's', '2', 'u', 's', 'i', '6', '4', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'u', 's', 'i', '2', 's', 'd', '3', '2', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u',
+  's', 'i', '2', 's', 's', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 's', 'i', '2',
+  's', 'd', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 's', 'i', '2', 's', 's', '6',
+  '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'e', 'x', 'p', '2', 'p', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e',
+  'x', 'p', '2', 'p', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h',
+  'e', 'r', 's', 'i', 'v', '8', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
+  's', 'i', 'v', '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 's',
+  'i', 'v', '8', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 's', 'i', 'v',
+  '1', '6', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', 'i', 'v', '8',
+  'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', 'i', 'v', '1', '6', 's',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', 'i', 'v', '8', 'd', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'g', 'a', 't', 'h', 'e', 'r', 'd', 'i', 'v', '1', '6', 's', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
+  'a', 't', 'h', 'e', 'r', '3', 'd', 'i', 'v', '2', 'd', 'f', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a',
+  't', 'h', 'e', 'r', '3', 'd', 'i', 'v', '2', 'd', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't',
+  'h', 'e', 'r', '3', 'd', 'i', 'v', '4', 'd', 'f', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h',
+  'e', 'r', '3', 'd', 'i', 'v', '4', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e',
+  'r', '3', 'd', 'i', 'v', '4', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
+  '3', 'd', 'i', 'v', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3',
+  'd', 'i', 'v', '8', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 'd',
+  'i', 'v', '8', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i',
+  'v', '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v',
+  '2', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v', '4',
+  'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v', '4', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v', '4', 's', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v', '4', 's', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'g', 'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v', '8', 's', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
+  'a', 't', 'h', 'e', 'r', '3', 's', 'i', 'v', '8', 's', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a',
+  't', 'h', 'e', 'r', 'p', 'f', 'd', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e',
+  'r', 'p', 'f', 'd', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'p', 'f',
+  'q', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'p', 'f', 'q', 'p', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'a', 'd', 'd', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'a', 'd', 'd', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a',
+  'd', 'd', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'a', 'd', 'd', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'm', 'p', 's', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'm', 'p', 's', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r',
+  'e', 's', 's', 'q', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'o', 'm', 'p', 'r', 'e', 's', 's', 'q', 'i', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 'q', 'i', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's',
+  's', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p',
+  'r', 'e', 's', 's', 's', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 'i', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 'd', 'f', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's',
+  's', 'd', 'f', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm',
+  'p', 'r', 'e', 's', 's', 'd', 'f', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 'f', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 'f',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e',
+  's', 's', 's', 'f', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o',
+  'm', 'p', 'r', 'e', 's', 's', 'd', 'i', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 'd', 'i', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 'd',
+  'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r',
+  'e', 's', 's', 's', 't', 'o', 'r', 'e', 'q', 'i', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o',
+  'r', 'e', 'q', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o',
+  'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 'q', 'i', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's',
+  's', 't', 'o', 'r', 'e', 's', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 's',
+  'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r',
+  'e', 's', 's', 's', 't', 'o', 'r', 'e', 's', 'i', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o',
+  'r', 'e', 'd', 'f', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o',
+  'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 'd', 'f', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's',
+  's', 't', 'o', 'r', 'e', 'd', 'f', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 's',
+  'f', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r',
+  'e', 's', 's', 's', 't', 'o', 'r', 'e', 's', 'f', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o',
+  'r', 'e', 's', 'f', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o',
+  'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 'd', 'i', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's',
+  's', 't', 'o', 'r', 'e', 'd', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 'd',
+  'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r',
+  'e', 's', 's', 's', 't', 'o', 'r', 'e', 'h', 'i', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o',
+  'r', 'e', 'h', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o',
+  'm', 'p', 'r', 'e', 's', 's', 's', 't', 'o', 'r', 'e', 'h', 'i', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p', 'r', 'e', 's', 's',
+  'h', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'p',
+  'r', 'e', 's', 's', 'h', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'o', 'm', 'p', 'r', 'e', 's', 's', 'h', 'i', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 's',
+  'i', '_', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o',
+  'n', 'f', 'l', 'i', 'c', 't', 's', 'i', '_', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 's', 'i',
+  '_', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'n',
+  'f', 'l', 'i', 'c', 't', 'd', 'i', '_', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 'd', 'i', '_',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'n', 'f',
+  'l', 'i', 'c', 't', 'd', 'i', '_', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'v', 't', 'd', 'q', '2', 'p', 's', '5', '1', '2', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p', 's',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p', 's', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'q',
+  'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd',
+  '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
+  'p', 'd', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'p', 'd', '2', 'u', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'u', 'd', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'u', 'd', 'q', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'u',
+  'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p',
+  'd', '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'p', 'd', '2', 'u', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd', 'q', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd', 'q', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd', 'q', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's',
+  '2', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
+  'p', 's', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'p', 's', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'd', 'q', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'd', 'q', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'd',
+  'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's',
+  '2', 'u', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
+  't', 'p', 's', '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'q', 'q', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 'q', 'q', '2', 'p', 'd', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'q', 'q', '2', 'p', 'd', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'q', 'q', '2', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'q', 'q',
+  '2', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
+  'q', 'q', '2', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'q', 'q', '2', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'v', 't', 's', 'd', '2', 's', 's', '_', 'r', 'o', 'u', 'n', 'd',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 's', '2', 's', 'd', '_',
+  'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't',
+  'p', 'd', '2', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 't', 'p', 'd', '2', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'q', 'q', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'q', 'q', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2',
+  'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't',
+  'p', 'd', '2', 'u', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'v', 't', 't', 'p', 'd', '2', 'u', 'd', 'q', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'u', 'd', 'q', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2',
+  'u', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
+  't', 'p', 'd', '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'u', 'q', 'q', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'd', 'q', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2',
+  'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't',
+  'p', 's', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 't', 'p', 's', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'u', 'd', 'q', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'u', 'd',
+  'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p',
+  's', '2', 'u', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 't', 'p', 's', '2', 'u', 'q', 'q', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'u', 'q', 'q', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'u',
+  'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u',
+  'd', 'q', '2', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'u', 'd', 'q', '2', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 'u', 'd', 'q', '2', 'p', 's', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 'q', 'q', '2', 'p', 'd', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 'q', 'q', '2',
+  'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u',
+  'q', 'q', '2', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 'u', 'q', 'q', '2', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 'u', 'q', 'q', '2', 'p', 's', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 'q', 'q', '2', 'p', 's', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'b', 'p', 's', 'a', 'd', 'b',
+  'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'b', 'p', 's', 'a',
+  'd', 'b', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'b', 'p',
+  's', 'a', 'd', 'b', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd',
+  'i', 'v', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'i',
+  'v', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'i', 'v',
+  's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd',
+  'i', 'v', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'e', 'x', 'p', 'a', 'n', 'd', 'q', 'i', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'q', 'i', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'q', 'i', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 's', 'i',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd',
+  's', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a',
+  'n', 'd', 's', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x',
+  'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'q', 'i', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'q',
+  'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n',
+  'd', 'l', 'o', 'a', 'd', 'q', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 's', 'i', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o',
+  'a', 'd', 's', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x',
+  'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 's', 'i', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'd',
+  'f', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n',
+  'd', 'l', 'o', 'a', 'd', 'd', 'f', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'd', 'f', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o',
+  'a', 'd', 's', 'f', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x',
+  'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 's', 'f', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 's',
+  'f', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n',
+  'd', 'l', 'o', 'a', 'd', 'd', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'd', 'i', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o',
+  'a', 'd', 'd', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x',
+  'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'h', 'i', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'l', 'o', 'a', 'd', 'h',
+  'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n',
+  'd', 'l', 'o', 'a', 'd', 'h', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'e', 'x', 'p', 'a', 'n', 'd', 'd', 'f', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'd', 'f', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'd', 'f', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 's', 'f',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd',
+  's', 'f', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a',
+  'n', 'd', 's', 'f', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x',
+  'p', 'a', 'n', 'd', 'd', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'e', 'x', 'p', 'a', 'n', 'd', 'd', 'i', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'd', 'i', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'h', 'i', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'h', 'i', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', 'a', 'n', 'd', 'h',
+  'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p',
+  'i', 'm', 'm', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f',
+  'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm',
+  'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u',
+  'p', 'i', 'm', 'm', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 's', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 's', 'd', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 's', 's',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's', 's', 'p', 'd',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's',
+  's', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c',
+  'l', 'a', 's', 's', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'f', 'p', 'c', 'l', 'a', 's', 's', 'p', 's', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's', 's', 'p', 's', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's', 's', 'p', 's',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's',
+  's', 's', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's',
+  's', 's', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p',
+  'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e',
+  'x', 'p', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e',
+  't', 'e', 'x', 'p', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'g', 'e', 't', 'e', 'x', 'p', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 'p', 's', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 'p', 's', '5', '1', '2',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 's', 'd', '1',
+  '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
+  'e', 't', 'e', 'x', 'p', 's', 's', '1', '2', '8', '_', 'r', 'o', 'u', 'n',
+  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p',
+  'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a',
+  'n', 't', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e',
+  't', 'm', 'a', 'n', 't', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p', 's', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p', 's', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p',
+  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a',
+  'n', 't', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 's', 's', '_', 'r', 'o', 'u', 'n',
+  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 'p', 'd', '5', '1', '2',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 'p', 's', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 'd', '_', 'r', 'o', 'u', 'n',
+  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 's', '_', 'r', 'o',
+  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 'd', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 's', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 's', 'd', '_', 'r', 'o',
+  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 's', 's', '_',
+  'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l', 'p', 's',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l', 's', 'd', '_',
+  'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l', 's',
+  's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
+  'd', 'd', 's', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
+  'd', 'd', 's', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
+  'd', 'd', 'u', 's', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'a', 'd', 'd', 'u', 's', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'e', 'r', 'm', 'v', 'a', 'r', 'd', 'f', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'd', 'f', '5', '1', '2',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'd', 'i',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a',
+  'r', 'd', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r',
+  'm', 'v', 'a', 'r', 'h', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'e', 'r', 'm', 'v', 'a', 'r', 'h', 'i', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'h', 'i', '5', '1', '2',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'q', 'i',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a',
+  'r', 'q', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r',
+  'm', 'v', 'a', 'r', 'q', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'e', 'r', 'm', 'v', 'a', 'r', 's', 'f', '5', '1', '2', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 's', 'i', '5', '1', '2',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'u', 'b', 's', 'w',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'w',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
+  'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
+  'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
+  'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
+  'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'd', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'd', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'w', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'w', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'w', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'w', '1', '2', '8', 'm', 'e',
+  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'w', '2', '5',
+  '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
+  'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'q', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'q', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'q', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'q', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'q', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'b', '5', '1', '2', 'm', 'e',
+  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'd', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'd', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'd', '1', '2',
+  '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q',
+  'd', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'q', 'd', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'q', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'q', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'q', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'q', 'w', '1', '2', '8', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'w', '2', '5', '6', 'm', 'e',
+  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'w', '5', '1',
+  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'w',
+  'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'w',
+  'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'w',
+  'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'w',
+  'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'w', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'w', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'b', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'b', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'b', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'b', '1',
+  '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  's', 'd', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 's', 'd', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'w', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'w', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'w', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'w', '1',
+  '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  's', 'd', 'w', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 's', 'd', 'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'b', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'b', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'b', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'b', '1',
+  '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  's', 'q', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 's', 'q', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'd', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'd', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'd', '1',
+  '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  's', 'q', 'd', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 's', 'q', 'd', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'w', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'w', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'w', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'w', '1',
+  '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  's', 'q', 'w', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 's', 'q', 'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'w', 'b', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'w', 'b', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'w', 'b', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'w', 'b', '1',
+  '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  's', 'w', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 's', 'w', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'b', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'b', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd',
+  'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u',
+  's', 'd', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 'u', 's', 'd', 'b', '2', '5', '6', 'm', 'e', 'm', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'b', '5', '1',
+  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u',
+  's', 'd', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
+  'v', 'u', 's', 'd', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'm', 'o', 'v', 'u', 's', 'd', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w', '1', '2', '8', 'm', 'e', 'm',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w', '2',
+  '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  'u', 's', 'd', 'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b',
+  '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
+  'v', 'u', 's', 'q', 'b', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b', '5', '1', '2', 'm', 'e',
+  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'd',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's',
+  'q', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
+  'u', 's', 'q', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'u', 's', 'q', 'd', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'd', '2', '5', '6', 'm',
+  'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q',
+  'd', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'o', 'v', 'u', 's', 'q', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '1', '2', '8',
+  'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's',
+  'q', 'w', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'm', 'o', 'v', 'u', 's', 'q', 'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'w', 'b', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'w', 'b', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'w',
+  'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u',
+  's', 'w', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'o', 'v', 'u', 's', 'w', 'b', '2', '5', '6', 'm', 'e', 'm', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'w', 'b', '5', '1',
+  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'u', 'l',
+  't', 'i', 's', 'h', 'i', 'f', 't', 'q', 'b', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'm', 'u', 'l', 't', 'i', 's', 'h', 'i', 'f', 't',
+  'q', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'u',
+  'l', 't', 'i', 's', 'h', 'i', 'f', 't', 'q', 'b', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'r', 'o', 'l', 'd', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'r', 'o', 'l', 'd', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'r', 'o', 'l', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'r', 'o', 'l', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'r', 'o', 'l', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'r', 'o', 'l', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'l', 'v', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'l', 'v', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'l', 'v', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'l', 'v', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'l', 'v', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'l', 'v', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r',
+  'o', 'r', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o',
+  'r', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'q',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'q', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'q', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'v', 'd', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'v', 'd', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'v', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'v', 'q', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'v', 'q', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'r', 'o', 'r', 'v', 'q', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 's', 'b', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 's', 'w', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'u', 's', 'b', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'u', 's', 'w',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l',
+  'o', 'g', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e',
+  'r', 'n', 'l', 'o', 'g', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'd', '5', '1', '2', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'q', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'q',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l',
+  'o', 'g', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'a', 'n',
+  'g', 'e', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'a',
+  'n', 'g', 'e', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
+  'a', 'n', 'g', 'e', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'r', 'a', 'n', 'g', 'e', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'r', 'a', 'n', 'g', 'e', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'r', 'a', 'n', 'g', 'e', 'p', 's', '5', '1', '2', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 'a', 'n', 'g', 'e', 's', 'd', '1', '2', '8', '_', 'r',
+  'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'a', 'n', 'g', 'e',
+  's', 's', '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 'd', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 'd', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 'd', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p',
+  's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c',
+  'e', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd',
+  'u', 'c', 'e', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
+  'e', 'd', 'u', 'c', 'e', 's', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e',
+  'd', 'u', 'c', 'e', 's', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd',
+  's', 'c', 'a', 'l', 'e', 'p', 'd', '_', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p', 'd', '_', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e',
+  'p', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l',
+  'e', 'p', 's', '_', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n',
+  'd', 's', 'c', 'a', 'l', 'e', 'p', 's', '_', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p', 's', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 's', 'd', '_',
+  'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's',
+  'c', 'a', 'l', 'e', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 'd', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 'd', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 'd',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f',
+  'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l',
+  'e', 'f', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c',
+  'a', 'l', 'e', 'f', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  's', 'c', 'a', 'l', 'e', 'f', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 's', 's', '_', 'r',
+  'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 'p',
+  'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 'p',
+  'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 'p',
+  's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 'p',
+  's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 'p',
+  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 's',
+  'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q',
+  'r', 't', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 's', 't', 'o', 'r', 'e', 's', 's', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
+  'u', 'b', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'u',
+  'b', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'u', 'b',
+  's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
+  'u', 'b', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 's', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 's', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 's', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p',
+  'h', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p',
+  'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p',
+  's', '2', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'm', 'a', 'd', 'd', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 'd', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p',
+  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 's', 'd', '3', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 's', 's', '3', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 's', 'u', 'b', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 's', 'u', 'b', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p',
+  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 'a',
+  'd', 'd', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'n', 'm', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd', 'p', 'd', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd', 'p', 's', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd', 'p',
+  's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 'a',
+  'd', 'd', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'n', 'm', 's', 'u', 'b', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'f', 'n', 'm', 's', 'u', 'b', 'p', 'd', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's', 'u', 'b', 'p', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's', 'u', 'b', 'p',
+  's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's',
+  'u', 'b', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'n', 'm', 's', 'u', 'b', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd',
+  's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b',
+  'u', 's', 'd', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'd', 'p', 'b', 'u', 's', 'd', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd',
+  's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w',
+  's', 's', 'd', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'd', 'p', 'w', 's', 's', 'd', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'd', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a',
+  'r', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r',
+  'm', 'i', '2', 'v', 'a', 'r', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'h', 'i', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
+  'a', 'r', 'h', 'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'h', 'i', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p',
+  'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
+  'i', '2', 'v', 'a', 'r', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
+  'a', 'r', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p', 's', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p',
+  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
+  'i', '2', 'v', 'a', 'r', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r',
+  'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
+  'i', '2', 'v', 'a', 'r', 'q', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'q', 'i', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
+  'a', 'r', 'q', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'd', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'd', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2',
+  'v', 'a', 'r', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'h', 'i', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'h',
+  'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
+  't', '2', 'v', 'a', 'r', 'h', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p', 'd', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v',
+  'a', 'r', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p', 'd', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p',
+  's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
+  't', '2', 'v', 'a', 'r', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p', 's', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v',
+  'a', 'r', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e',
+  'r', 'm', 't', '2', 'v', 'a', 'r', 'q', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'q', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v',
+  'a', 'r', 'q', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'q', 'i', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'q',
+  'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd',
+  'd', '5', '2', 'h', 'u', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'h', 'u', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'h', 'u',
+  'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd',
+  'd', '5', '2', 'l', 'u', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'l', 'u', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'l', 'u',
+  'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l',
+  'd', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h',
+  'l', 'd', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'l', 'd', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  's', 'h', 'l', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 's', 'h', 'l', 'd', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'l', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 's', 'h', 'l', 'd', 'w', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'w', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'w', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'd', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'd', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l',
+  'd', 'v', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'l', 'd', 'v', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 's', 'h', 'l', 'd', 'v', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'w', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'w', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'w', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'd', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'd',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r',
+  'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h',
+  'r', 'd', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'r', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  's', 'h', 'r', 'd', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 's', 'h', 'r', 'd', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'r', 'd', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'd', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'd', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'd', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'q',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd',
+  'v', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h',
+  'r', 'd', 'v', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  's', 'h', 'r', 'd', 'v', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'r', 'd', 'v', 'w', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'w', '5', '1', '2', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 's', 'h', 'u', 'f', 'b', 'i', 't', 'q', 'm',
+  'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'u',
+  'f', 'b', 'i', 't', 'q', 'm', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 's', 'h', 'u', 'f', 'b', 'i', 't', 'q', 'm', 'b', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 'd',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'm', 'a', 'd', 'd', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '3',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 's', 'd', '3', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a',
+  'd', 'd', 's', 's', '3', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm',
+  'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd',
+  's', 'u', 'b', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b',
+  'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm',
+  'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'p', 'd', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'p', 'd', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b',
+  'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm',
+  's', 'u', 'b', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '3', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 's', 'u', 'b', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'p', 's', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 's', 'd', '3',
+  '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 's', 's',
+  '3', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'a',
+  'd', 'd', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '3', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'f', 'm', 's', 'u', 'b', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'a', 'd', 'd', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's',
+  'u', 'b', 'a', 'd', 'd', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'a', 'd', 'd', 'p', 's', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 's', 'u', 'b', 'a',
+  'd', 'd', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'f', 'n', 'm', 's', 'u', 'b', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'f', 'n', 'm', 's', 'u', 'b', 'p', 'd', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's', 'u', 'b', 'p',
+  'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm',
+  's', 'u', 'b', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '3', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'n', 'm', 's', 'u', 'b', 'p', 's', '2', '5', '6', '_', 'm', 'a',
+  's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's', 'u', 'b', 'p', 's', '5', '1',
+  '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's', 'u', 'b',
+  's', 'd', '3', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'n', 'm', 's',
+  'u', 'b', 's', 's', '3', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x',
+  'u', 'p', 'i', 'm', 'm', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm',
+  'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x',
+  'u', 'p', 'i', 'm', 'm', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 's', '2', '5', '6',
+  '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm',
+  'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x',
+  'u', 'p', 'i', 'm', 'm', 's', 'd', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f',
+  'i', 'x', 'u', 'p', 'i', 'm', 'm', 's', 's', '_', 'm', 'a', 's', 'k', 'z',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'd', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'd', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l',
+  'o', 'g', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't',
+  'e', 'r', 'n', 'l', 'o', 'g', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'q',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'm', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 'd', '5', '1', '2', '_', 'm', 'a',
+  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd',
+  'd', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'm', 'a', 'd', 'd', 's', 'd', '3', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'f', 'm', 'a', 'd', 'd', 's', 's', '3', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b',
+  'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm',
+  'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd',
+  's', 'u', 'b', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u',
+  's', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd',
+  'p', 'b', 'u', 's', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', 's', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', 's', '2', '5',
+  '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's',
+  'd', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd',
+  'p', 'w', 's', 's', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'd', 'p', 'w', 's', 's', 'd', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', 's',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w',
+  's', 's', 'd', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 'd', 'p', 'w', 's', 's', 'd', 's', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'd', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't',
+  '2', 'v', 'a', 'r', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'd', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a',
+  'r', 'h', 'i', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'h', 'i', '2', '5', '6', '_', 'm',
+  'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r',
+  'h', 'i', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e',
+  'r', 'm', 't', '2', 'v', 'a', 'r', 'p', 'd', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p',
+  'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r',
+  'm', 't', '2', 'v', 'a', 'r', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p', 's',
+  '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
+  't', '2', 'v', 'a', 'r', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k',
+  'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'p', 's', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't',
+  '2', 'v', 'a', 'r', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a',
+  'r', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e',
+  'r', 'm', 't', '2', 'v', 'a', 'r', 'q', 'i', '1', '2', '8', '_', 'm', 'a',
+  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 't', '2', 'v', 'a', 'r', 'q',
+  'i', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r',
+  'm', 't', '2', 'v', 'a', 'r', 'q', 'i', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'h', 'u', 'q', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'd',
+  '5', '2', 'h', 'u', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'h', 'u', 'q', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'l',
+  'u', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
+  'a', 'd', 'd', '5', '2', 'l', 'u', 'q', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'd', '5', '2', 'l', 'u', 'q', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd',
+  'v', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'l', 'd', 'v', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'l', 'd', 'v', 'd', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'q', '1', '2', '8', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'q', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd',
+  'v', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'l', 'd', 'v', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'l', 'd', 'v', 'w', '2', '5', '6', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'd', 'v', 'w', '5', '1', '2', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'd', '1',
+  '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd',
+  'v', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'r', 'd', 'v', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'r', 'd', 'v', 'q', '1', '2', '8', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'q', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd', 'v', 'q', '5',
+  '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'r', 'd',
+  'v', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's',
+  'h', 'r', 'd', 'v', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'r', 'd', 'v', 'w', '5', '1', '2', '_', 'm', 'a', 's',
+  'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'd', 'w', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'a', 'c', 'k', 's', 's', 'w', 'b', '5', '1', '2', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c',
+  'k', 'u', 's', 'd', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 'u', 's',
+  'w', 'b', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'd', 'q', '5', '1', '2',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'u', 'l', 'h', 'r', 's', 'w', '5', '1', '2', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'u', 'l', 'h', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'u', 'w',
+  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'u', 'd', 'q', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'a', 'd', 'b', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'h', 'u', 'f',
+  'b', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', '5', '1', '2', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'l', 'l', 'q', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', '5', '1',
+  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'l', 'l', 'd', 'i', '5', '1', '2', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
+  'l', 'q', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', 'i', '5', '1',
+  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'l', 'l', 'v', '1', '6', 's', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
+  'l', 'v', '8', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v', '8', 'h', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'l', 'l', 'v', '1', '6', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v',
+  '3', '2', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd', '5', '1', '2', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'r', 'a', 'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'q', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'r', 'a', 'q', '5', '1', '2', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a',
+  'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd', 'i', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'a', 'q', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'q',
+  'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'q', 'i', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'a', 'w', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v',
+  '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', 'q', '1', '2', '8', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'a', 'v', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v',
+  '8', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '8', 'h', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
+  'r', 'a', 'v', '1', '6', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '3', '2',
+  'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 's', 'r', 'l', 'd', '5', '1', '2', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
+  'l', 'q', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'l', 'd', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q',
+  'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', 'i', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'l', 'v', '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v',
+  '8', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '8', 'h', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
+  'r', 'l', 'v', '1', '6', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '3', '2',
+  'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 'd', '1', '2', '8', '_', 'm',
+  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 'd', '2', '5', '6', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 'd', '5', '1', '2',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 's', '1', '2',
+  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 's', '2',
+  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 's',
+  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 's',
+  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 's', 's', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'r', 'c', 'p', '2', '8', 'p', 'd', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 'c', 'p', '2', '8', 'p', 's', '_', 'm', 'a', 's', 'k',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'r', 'c', 'p', '2', '8', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_',
+  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'r', 'c', 'p', '2', '8', 's', 's', '_', 'r', 'o',
+  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', '1',
+  '4', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q',
+  'r', 't', '1', '4', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'r', 's', 'q', 'r', 't', '1', '4', 'p', 'd', '5', '1', '2', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 's', 'q', 'r', 't', '1', '4', 'p', 's', '1', '2', '8',
+  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', '1', '4', 'p', 's',
+  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', '1',
+  '4', 'p', 's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q',
+  'r', 't', '1', '4', 's', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q',
+  'r', 't', '1', '4', 's', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q',
+  'r', 't', '2', '8', 'p', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q',
+  'r', 't', '2', '8', 'p', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q',
+  'r', 't', '2', '8', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
+  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 's', 'q', 'r', 't', '2', '8', 's', 's', '_', 'r', 'o',
+  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e',
+  'r', 's', 'i', 'v', '8', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r',
+  's', 'i', 'v', '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r',
+  's', 'i', 'v', '8', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 's',
+  'i', 'v', '1', '6', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd',
+  'i', 'v', '8', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i',
+  'v', '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i',
+  'v', '8', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v',
+  '1', '6', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v',
+  '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '2',
+  'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '4', 'd',
+  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '4', 'd', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '4', 's', 'f', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  's', 'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '4', 's', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
+  'c', 'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '8', 's', 'f', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c',
+  'a', 't', 't', 'e', 'r', 'd', 'i', 'v', '8', 's', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a',
+  't', 't', 'e', 'r', 'p', 'f', 'd', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't',
+  'e', 'r', 'p', 'f', 'd', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r',
+  'p', 'f', 'q', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'p', 'f',
+  'q', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 's', 'i', 'v', '2',
+  'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 's', 'i', 'v', '2', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 's', 'i', 'v', '4', 'd', 'f',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 's', 'c', 'a', 't', 't', 'e', 'r', 's', 'i', 'v', '4', 'd', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  's', 'c', 'a', 't', 't', 'e', 'r', 's', 'i', 'v', '4', 's', 'f', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
+  'c', 'a', 't', 't', 'e', 'r', 's', 'i', 'v', '4', 's', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c',
+  'a', 't', 't', 'e', 'r', 's', 'i', 'v', '8', 's', 'f', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a',
+  't', 't', 'e', 'r', 's', 'i', 'v', '8', 's', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'o', 'm',
+  'i', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'c', 'o', 'm', 'i', 's', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
+  't', 's', 'd', '2', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'd',
+  '2', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'd', '2', 'u', 's',
+  'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'd', '2', 'u', 's', 'i', '6',
+  '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'c', 'v', 't', 's', 's', '2', 's', 'i', '3', '2', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'c', 'v', 't', 's', 's', '2', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
+  's', 's', '2', 'u', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 's',
+  '2', 'u', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v',
+  'a', 'r', 'p', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l',
+  'v', 'a', 'r', 'p', 's', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'e', 'x', 't', 'r', '_',
+  'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'b', 'e', 'x', 't', 'r', '_', 'u', '6', '4', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b',
+  'z', 'h', 'i', '_', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'b', 'z', 'h', 'i', '_', 'd', 'i', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'd', 'e', 'p', '_', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'd', 'e', 'p', '_', 'd', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'e', 'x', 't', '_', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'x', 't', '_', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'l', 'f', 'l', 'u', 's', 'h', 'o', 'p', 't', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l',
+  'r', 's', 's', 'b', 's', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l', 'w', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l', 'z',
+  'e', 'r', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'r', 'e', 'a', 'd', 'e', 'f', 'l', 'a', 'g', 's', '_',
+  'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'r', 'e', 'a', 'd', 'e', 'f', 'l', 'a', 'g', 's', '_',
+  'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'w', 'r', 'i', 't', 'e', 'e', 'f', 'l', 'a', 'g', 's',
+  '_', 'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'w', 'r', 'i', 't', 'e', 'e', 'f', 'l', 'a', 'g',
+  's', '_', 'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'f', 'm', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm',
+  'a', 'd', 'd', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'p', 's', '2',
+  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'd', '3', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'm', 'a', 'd', 'd', 's', 's', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's',
+  'u', 'b', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p',
+  'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a',
+  'd', 'd', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'f', 'x', 'r', 's', 't', 'o', 'r', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'x',
+  'r', 's', 't', 'o', 'r', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'x', 's', 'a', 'v', 'e', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'f', 'x', 's', 'a', 'v', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'i', 'n', 'c', 's', 's', 'p',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'i', 'n', 'c', 's', 's', 'p', 'q', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'l', 'w', 'p', 'c',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'l', 'w', 'p', 'i', 'n', 's', '3', '2', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'w', 'p', 'i',
+  'n', 's', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'l', 'w', 'p', 'v', 'a', 'l', '3', '2', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l',
+  'w', 'p', 'v', 'a', 'l', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'm', 'm', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'e',
+  'm', 'm', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'm', 'a', 's', 'k', 'm', 'o', 'v', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o',
+  'v', 'n', 't', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'd', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'a', 'c', 'k', 's', 's', 'w', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 'u', 's', 'w',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'a', 'd', 'd', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'a', 'd', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd',
+  's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'a', 'd', 'd', 's', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'u',
+  's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'a', 'd', 'd', 'u', 's', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'l', 'i',
+  'g', 'n', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'n', 'd', 'n', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'a', 'v', 'g', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm',
+  'p', 'e', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 'q', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c',
+  'm', 'p', 'e', 'q', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'g', 't', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'c', 'm', 'p', 'g', 't', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'g', 't', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'e', 'c', '_', 'e', 'x', 't', '_', 'v', '4', 'h', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'e',
+  'c', '_', 's', 'e', 't', '_', 'v', '4', 'h', 'i', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd',
+  'd', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'm', 'a', 'x', 's', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'x',
+  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'i', 'n', 's', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'i', 'n', 'u',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'm', 'o', 'v', 'm', 's', 'k', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l',
+  'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'u', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l',
+  'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'u', 'l', 'u', 'd', 'q', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'o', 'r', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'a', 'd', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
+  'l', 'l', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'l', 'l', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 's', 'r', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
+  'r', 'a', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
+  'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 's', 'r', 'l', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'r', 'l', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'r', 'l', 'w', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'b', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u',
+  'b', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 's', 'u', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'u', 'b', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 's', 'w', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'u', 'b', 'u', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'u', 's', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'u', 'n', 'p', 'c', 'k', 'h', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c',
+  'k', 'h', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c', 'k', 'h', 'w', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'u', 'n', 'p', 'c', 'k', 'l', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c',
+  'k', 'l', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c', 'k', 'l', 'w', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'm', 'o', 'n', 'i', 't', 'o', 'r', 'x', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
+  'w', 'a', 'i', 't', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'l', 'm', 'u', 'l', 'q', 'd', 'q',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'c', 'l', 'm', 'u', 'l', 'q', 'd', 'q', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'c', 'l', 'm', 'u', 'l', 'q', 'd', 'q', '5', '1', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'r', 'd', 'f', 's', 'b', 'a', 's', 'e', '3', '2', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'f', 's',
+  'b', 'a', 's', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'g', 's', 'b', 'a', 's', 'e',
+  '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 'd', 'g', 's', 'b', 'a', 's', 'e', '6', '4', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
+  'd', 'p', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'r', 'd', 'p', 'k', 'r', 'u', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'p',
+  'm', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'r', 'd', 's', 's', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 's', 's', 'p',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'r', 'd', 't', 's', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 't', 's', 'c', 'p', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'r', 's', 't', 'o', 'r', 's', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'a', 'v', 'e', 'p', 'r',
+  'e', 'v', 's', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 's', 'e', 't', 's', 's', 'b', 's', 'y', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  's', 'h', 'a', '1', 'm', 's', 'g', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'h', 'a', '1', 'm', 's',
+  'g', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 's', 'h', 'a', '1', 'n', 'e', 'x', 't', 'e', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'h',
+  'a', '1', 'r', 'n', 'd', 's', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'h', 'a', '2', '5', '6', 'm',
+  's', 'g', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 's', 'h', 'a', '2', '5', '6', 'm', 's', 'g', '2', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  's', 'h', 'a', '2', '5', '6', 'r', 'n', 'd', 's', '2', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'l', 'w',
+  'p', 'c', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'm', 'p', 's', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 'e',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'o', 'm', 'i', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 'g', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'o', 'm', 'i', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 'l', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'o', 'm', 'i', 'n', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'c', 'v', 't', 'p', 'i', '2', 'p', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p',
+  'i', '2', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'p', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 's', 's', '2', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 's', '2', 's',
+  'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'p', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
+  'v', 't', 't', 'p', 's', '2', 'p', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 's', 's',
+  '2', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 't', 's', 's', '2', 's', 'i', '6', '4',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'm', 'a', 'x', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i',
+  'n', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'm', 'i', 'n', 's', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's',
+  'k', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'h', 'u', 'f', 'w', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 'p',
+  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'r', 'c', 'p', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', 'p', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'r', 's', 'q', 'r', 't', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'f', 'e', 'n', 'c', 'e',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 's', 'q', 'r', 't', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't', 's', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'u', 'c', 'o', 'm', 'i', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 'g', 'e',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'u', 'c', 'o', 'm', 'i', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 'l',
+  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'u', 'c', 'o', 'm', 'i', 'l', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i',
+  'n', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'l', 'f', 'l', 'u', 's', 'h', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'm', 'p',
+  's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'e', 'q', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm',
+  'i', 's', 'd', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'g', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'o', 'm', 'i', 's', 'd', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 's', 'd',
+  'l', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'n', 'e', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
+  't', 'd', 'q', '2', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'v', 't', 'p', 'd', '2', 'p', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's',
+  '2', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'c', 'v', 't', 's', 'd', '2', 's', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
+  't', 's', 'd', '2', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 'd', '2',
+  's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'd', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
+  't', 't', 'p', 's', '2', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 's', 'd', '2',
+  's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'c', 'v', 't', 't', 's', 'd', '2', 's', 'i', '6', '4', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'l', 'f', 'e', 'n', 'c', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'm', 'o', 'v', 'd',
+  'q', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'm', 'a', 'x', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'm', 'f', 'e', 'n', 'c', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i',
+  'n', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's', 'k', 'p', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
+  'c', 'k', 's', 's', 'd', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's',
+  's', 'w', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 'u', 's', 'w', 'b',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'a', 'd', 'd', 's', 'b', '1', '2', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'a', 'd', 'd', 's', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'u', 's',
+  'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'u', 's', 'w', '1', '2', '8',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'a', 'u', 's', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'w', 'd', '1',
+  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'o', 'v', 'm', 's', 'k', 'b', '1', '2', '8', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'm', 'u', 'l', 'h', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h',
+  'u', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'u', 'd', 'q', '1', '2',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'a', 'd', 'b', 'w', '1', '2', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
+  'l', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', '1', '2', '8', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'l', 'l', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', 'i',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', 'i', '1', '2', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'l', 'l', 'w', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd', '1',
+  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 's', 'r', 'a', 'w', '1', '2', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
+  'a', 'd', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', 'i', '1', '2',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'r', 'l', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l',
+  'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', '1', '2', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'r', 'l', 'd', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q', 'i',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', 'i', '1', '2', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  's', 'u', 'b', 's', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 's', 'w',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'u', 'b', 'u', 's', 'b', '1', '2', '8', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'u', 'b', 'u', 's', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'q', 'r', 't',
+  'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 's', 'q', 'r', 't', 's', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i',
+  's', 'd', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 's', 'd', 'g', 'e', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'u', 'c', 'o', 'm', 'i', 's', 'd', 'g', 't', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i',
+  's', 'd', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 's', 'd', 'l', 't', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'u', 'c', 'o', 'm', 'i', 's', 'd', 'n', 'e', 'q', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's',
+  'u', 'b', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'h',
+  'a', 'd', 'd', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'h', 'a', 'd', 'd', 'p', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'h', 's',
+  'u', 'b', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'h', 's', 'u', 'b', 'p', 's', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'd', 'd',
+  'q', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'm', 'o', 'n', 'i', 't', 'o', 'r', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'w', 'a', 'i',
+  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'b', 'l', 'e', 'n', 'd', 'v', 'p', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'l', 'e', 'n',
+  'd', 'v', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'd', 'p', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'p', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'i', 'n', 's', 'e', 'r', 't', 'p', 's', '1', '2', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'p', 's',
+  'a', 'd', 'b', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 'u', 's', 'd',
+  'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'b', 'l', 'e', 'n', 'd', 'v', 'b', '1', '2',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'h', 'm', 'i', 'n', 'p', 'o', 's', 'u', 'w', '1', '2', '8',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'm', 'u', 'l', 'd', 'q', '1', '2', '8', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 's',
+  't', 'c', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 's', 't', 'n', 'z', 'c', '1',
+  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 't', 'e', 's', 't', 'z', '1', '2', '8', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'o',
+  'u', 'n', 'd', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'r', 'o', 'u', 'n', 'd', 'p', 's', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
+  'o', 'u', 'n', 'd', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'o', 'u', 'n', 'd', 's', 's', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'c', 'r', 'c', '3', '2', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'r', 'c', '3', '2', 's', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'c', 'r', 'c', '3', '2', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'r', 'c', '3', '2', 'd',
+  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'c', 'm', 'p', 'e', 's', 't', 'r', 'i', '1', '2', '8', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'c', 'm', 'p', 'e', 's', 't', 'r', 'i', 'a', '1', '2', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'c', 'm', 'p', 'e', 's', 't', 'r', 'i', 'c', '1', '2', '8', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c',
+  'm', 'p', 'e', 's', 't', 'r', 'i', 'o', '1', '2', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm',
+  'p', 'e', 's', 't', 'r', 'i', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p',
+  'e', 's', 't', 'r', 'i', 'z', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e',
+  's', 't', 'r', 'm', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't',
+  'r', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i',
+  'a', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i', 'c',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i', 'o', '1',
+  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i', 's', '1', '2',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i', 'z', '1', '2', '8',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'm', '1', '2', '8', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e',
+  'x', 't', 'r', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'e', 'x', 't', 'r', 'q', 'i', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'i', 'n', 's',
+  'e', 'r', 't', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'i', 'n', 's', 'e', 'r', 't', 'q', 'i', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
+  'a', 'b', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'a', 'b', 's', 'd', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'b', 's',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'h', 'a', 'd', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'd',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 's', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a',
+  'd', 'd', 's', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 'h', 'a', 'd', 'd', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 'h', 's', 'u', 'b', 'd', '1', '2', '8', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's',
+  'u', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b', 's', 'w', '1', '2', '8',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'p', 'h', 's', 'u', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b', 'w', '1',
+  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'u', 'b', 's', 'w', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
+  'a', 'd', 'd', 'u', 'b', 's', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l',
+  'h', 'r', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'r', 's', 'w', '1', '2',
+  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'p', 's', 'h', 'u', 'f', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'h', 'u', 'f', 'b',
+  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'b', '\000', '_', '_', 'b', 'u',
+  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g',
+  'n', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
+  'i', 'g', 'n', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'p', 's', 'i', 'g', 'n', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'u', 'b', 'b', 'o',
+  'r', 'r', 'o', 'w', '_', 'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'u', 'b', 'b', 'o', 'r',
+  'r', 'o', 'w', '_', 'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'e', 'x', 't', 'r', 'i', '_',
+  'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'b', 'e', 'x', 't', 'r', 'i', '_', 'u', '6', '4', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p',
+  'h', '2', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2',
+  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p', 'h', '2', '5', '6',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n', 'e', 'i', 'n',
+  'v', 'q', 'b', '_', 'v', '1', '6', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p',
+  '8', 'a', 'f', 'f', 'i', 'n', 'e', 'i', 'n', 'v', 'q', 'b', '_', 'v', '3',
+  '2', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n',
+  'e', 'i', 'n', 'v', 'q', 'b', '_', 'v', '6', '4', 'q', 'i', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g',
+  'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n', 'e', 'q', 'b', '_', 'v', '1',
+  '6', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n',
+  'e', 'q', 'b', '_', 'v', '3', '2', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p',
+  '8', 'a', 'f', 'f', 'i', 'n', 'e', 'q', 'b', '_', 'v', '6', '4', 'q', 'i',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'g', 'f', '2', 'p', '8', 'm', 'u', 'l', 'b', '_', 'v', '1', '6',
+  'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'm', 'u', 'l', 'b', '_', 'v',
+  '3', '2', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
+  'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'm', 'u', 'l', 'b',
+  '_', 'v', '6', '4', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'f', 's', 'b', 'a', 's', 'e',
+  '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'w', 'r', 'f', 's', 'b', 'a', 's', 'e', '6', '4', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w',
+  'r', 'g', 's', 'b', 'a', 's', 'e', '3', '2', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'g', 's', 'b',
+  'a', 's', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'p', 'k', 'r', 'u', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r',
+  's', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'w', 'r', 's', 's', 'q', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'u', 's', 's',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'w', 'r', 'u', 's', 's', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'x', 'a', 'b', 'o', 'r', 't',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'x', 'b', 'e', 'g', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'x', 'e', 'n', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
+  'r', 'c', 'z', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'r', 'c', 'z', 'p', 'd', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'f', 'r', 'c', 'z', 'p', 's', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'r', 'c', 'z',
+  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'r', 'c', 'z', 's', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'f', 'r', 'c', 'z', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'm', 'b', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 'c', 'o', 'm', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'm', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'c', 'o', 'm', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'm', 'u', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 'c', 'o', 'm', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'm', 'u', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'c', 'o', 'm', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', '2',
+  'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', '2', 'p', 'd', '2', '5',
+  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', '2', 'p', 's', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'e', 'r', 'm', 'i', 'l', '2', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h',
+  'a', 'd', 'd', 'b', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'b', 'q', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'h', 'a', 'd', 'd', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd',
+  'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'u', 'b', 'd', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'h', 'a', 'd', 'd', 'u', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'u',
+  'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'u', 'd', 'q', '\000', '_', '_',
+  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
+  'h', 'a', 'd', 'd', 'u', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'u',
+  'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'w', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h',
+  'a', 'd', 'd', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 's', 'u', 'b', 'b', 'w', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 'h', 's', 'u', 'b', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 's', 'u', 'b',
+  'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 'd', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
+  'a', 'c', 's', 'd', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 'd', 'q',
+  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 's', 'd', 'd', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
+  'a', 'c', 's', 's', 'd', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 's',
+  'd', 'q', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
+  'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 's', 'w', 'd', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 'm', 'a', 'c', 's', 's', 'w', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's',
+  'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 'w', 'w', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
+  'a', 'd', 'c', 's', 's', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'c', 's',
+  'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
+  '3', '2', '_', 'v', 'p', 'p', 'e', 'r', 'm', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'r', 'o', 't',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'r', 'o', 't', 'b', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'r', 'o', 't',
+  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'r', 'o', 't', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'r', 'o', 't',
+  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'r', 'o', 't', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'r', 'o', 't',
+  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 'r', 'o', 't', 'w', 'i', '\000', '_', '_', 'b', 'u', 'i',
+  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'a',
+  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
+  '2', '_', 'v', 'p', 's', 'h', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
+  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'a', 'q',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
+  '_', 'v', 'p', 's', 'h', 'a', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
+  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'b', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
+  'v', 'p', 's', 'h', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
+  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'q', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
+  'p', 's', 'h', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
+  '_', 'i', 'a', '3', '2', '_', 'x', 't', 'e', 's', 't', '\000', '_', '_', 'b',
+  'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'i', 't', 'r', 'e', 'v', '\000', '_',
+  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'g', 'e', 't', 'i', 'd', '\000',
+  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'g', 'e', 't', 'p', 's',
+  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'e', 't', 'p',
+  's', '\000',
+  };
+
+  struct BuiltinEntry {
+    Intrinsic::ID IntrinID;
+    unsigned StrTabOffset;
+    const char *getName() const {
+      return &BuiltinNames[StrTabOffset];
+    }
+    bool operator<(StringRef RHS) const {
+      return strncmp(getName(), RHS.data(), RHS.size()) < 0;
+    }
+  };
+  StringRef TargetPrefix(TargetPrefixStr);
+
+  /* Target Independent Builtins */ {
+    static const BuiltinEntry Names[] = {
+      {Intrinsic::adjust_trampoline, 0}, // __builtin_adjust_trampoline
+      {Intrinsic::debugtrap, 28}, // __builtin_debugtrap
+      {Intrinsic::flt_rounds, 70}, // __builtin_flt_rounds
+      {Intrinsic::init_trampoline, 91}, // __builtin_init_trampoline
+      {Intrinsic::objectsize, 117}, // __builtin_object_size
+      {Intrinsic::stackrestore, 139}, // __builtin_stack_restore
+      {Intrinsic::stacksave, 163}, // __builtin_stack_save
+      {Intrinsic::thread_pointer, 184}, // __builtin_thread_pointer
+      {Intrinsic::trap, 209}, // __builtin_trap
+      {Intrinsic::eh_unwind_init, 48}, // __builtin_unwind_init
+    };
+    auto I = std::lower_bound(std::begin(Names),
+                              std::end(Names),
+                              BuiltinNameStr);
+    if (I != std::end(Names) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "aarch64") {
+    static const BuiltinEntry aarch64Names[] = {
+      {Intrinsic::aarch64_dmb, 224}, // __builtin_arm_dmb
+      {Intrinsic::aarch64_dsb, 242}, // __builtin_arm_dsb
+      {Intrinsic::aarch64_isb, 260}, // __builtin_arm_isb
+    };
+    auto I = std::lower_bound(std::begin(aarch64Names),
+                              std::end(aarch64Names),
+                              BuiltinNameStr);
+    if (I != std::end(aarch64Names) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "amdgcn") {
+    static const BuiltinEntry amdgcnNames[] = {
+      {Intrinsic::amdgcn_buffer_wbinvl1, 278}, // __builtin_amdgcn_buffer_wbinvl1
+      {Intrinsic::amdgcn_buffer_wbinvl1_sc, 310}, // __builtin_amdgcn_buffer_wbinvl1_sc
+      {Intrinsic::amdgcn_buffer_wbinvl1_vol, 345}, // __builtin_amdgcn_buffer_wbinvl1_vol
+      {Intrinsic::amdgcn_cubeid, 381}, // __builtin_amdgcn_cubeid
+      {Intrinsic::amdgcn_cubema, 405}, // __builtin_amdgcn_cubema
+      {Intrinsic::amdgcn_cubesc, 429}, // __builtin_amdgcn_cubesc
+      {Intrinsic::amdgcn_cubetc, 453}, // __builtin_amdgcn_cubetc
+      {Intrinsic::amdgcn_cvt_pk_u8_f32, 477}, // __builtin_amdgcn_cvt_pk_u8_f32
+      {Intrinsic::amdgcn_dispatch_id, 508}, // __builtin_amdgcn_dispatch_id
+      {Intrinsic::amdgcn_dispatch_ptr, 537}, // __builtin_amdgcn_dispatch_ptr
+      {Intrinsic::amdgcn_ds_bpermute, 567}, // __builtin_amdgcn_ds_bpermute
+      {Intrinsic::amdgcn_ds_fadd, 596}, // __builtin_amdgcn_ds_fadd
+      {Intrinsic::amdgcn_ds_fmax, 621}, // __builtin_amdgcn_ds_fmax
+      {Intrinsic::amdgcn_ds_fmin, 646}, // __builtin_amdgcn_ds_fmin
+      {Intrinsic::amdgcn_ds_permute, 671}, // __builtin_amdgcn_ds_permute
+      {Intrinsic::amdgcn_ds_swizzle, 699}, // __builtin_amdgcn_ds_swizzle
+      {Intrinsic::amdgcn_fmed3, 727}, // __builtin_amdgcn_fmed3
+      {Intrinsic::amdgcn_fmul_legacy, 750}, // __builtin_amdgcn_fmul_legacy
+      {Intrinsic::amdgcn_groupstaticsize, 779}, // __builtin_amdgcn_groupstaticsize
+      {Intrinsic::amdgcn_implicit_buffer_ptr, 812}, // __builtin_amdgcn_implicit_buffer_ptr
+      {Intrinsic::amdgcn_implicitarg_ptr, 849}, // __builtin_amdgcn_implicitarg_ptr
+      {Intrinsic::amdgcn_interp_mov, 882}, // __builtin_amdgcn_interp_mov
+      {Intrinsic::amdgcn_interp_p1, 910}, // __builtin_amdgcn_interp_p1
+      {Intrinsic::amdgcn_interp_p2, 937}, // __builtin_amdgcn_interp_p2
+      {Intrinsic::amdgcn_kernarg_segment_ptr, 964}, // __builtin_amdgcn_kernarg_segment_ptr
+      {Intrinsic::amdgcn_lerp, 1001}, // __builtin_amdgcn_lerp
+      {Intrinsic::amdgcn_mbcnt_hi, 1023}, // __builtin_amdgcn_mbcnt_hi
+      {Intrinsic::amdgcn_mbcnt_lo, 1049}, // __builtin_amdgcn_mbcnt_lo
+      {Intrinsic::amdgcn_mqsad_pk_u16_u8, 1075}, // __builtin_amdgcn_mqsad_pk_u16_u8
+      {Intrinsic::amdgcn_mqsad_u32_u8, 1108}, // __builtin_amdgcn_mqsad_u32_u8
+      {Intrinsic::amdgcn_msad_u8, 1138}, // __builtin_amdgcn_msad_u8
+      {Intrinsic::amdgcn_qsad_pk_u16_u8, 1163}, // __builtin_amdgcn_qsad_pk_u16_u8
+      {Intrinsic::amdgcn_queue_ptr, 1195}, // __builtin_amdgcn_queue_ptr
+      {Intrinsic::amdgcn_rcp_legacy, 1222}, // __builtin_amdgcn_rcp_legacy
+      {Intrinsic::amdgcn_readfirstlane, 1250}, // __builtin_amdgcn_readfirstlane
+      {Intrinsic::amdgcn_readlane, 1281}, // __builtin_amdgcn_readlane
+      {Intrinsic::amdgcn_rsq_legacy, 1307}, // __builtin_amdgcn_rsq_legacy
+      {Intrinsic::amdgcn_s_barrier, 1335}, // __builtin_amdgcn_s_barrier
+      {Intrinsic::amdgcn_s_dcache_inv, 1362}, // __builtin_amdgcn_s_dcache_inv
+      {Intrinsic::amdgcn_s_dcache_inv_vol, 1392}, // __builtin_amdgcn_s_dcache_inv_vol
+      {Intrinsic::amdgcn_s_dcache_wb, 1426}, // __builtin_amdgcn_s_dcache_wb
+      {Intrinsic::amdgcn_s_dcache_wb_vol, 1455}, // __builtin_amdgcn_s_dcache_wb_vol
+      {Intrinsic::amdgcn_s_decperflevel, 1488}, // __builtin_amdgcn_s_decperflevel
+      {Intrinsic::amdgcn_s_getpc, 1520}, // __builtin_amdgcn_s_getpc
+      {Intrinsic::amdgcn_s_getreg, 1545}, // __builtin_amdgcn_s_getreg
+      {Intrinsic::amdgcn_s_incperflevel, 1571}, // __builtin_amdgcn_s_incperflevel
+      {Intrinsic::amdgcn_s_memrealtime, 1603}, // __builtin_amdgcn_s_memrealtime
+      {Intrinsic::amdgcn_s_memtime, 1634}, // __builtin_amdgcn_s_memtime
+      {Intrinsic::amdgcn_s_sendmsg, 1661}, // __builtin_amdgcn_s_sendmsg
+      {Intrinsic::amdgcn_s_sendmsghalt, 1688}, // __builtin_amdgcn_s_sendmsghalt
+      {Intrinsic::amdgcn_s_sleep, 1719}, // __builtin_amdgcn_s_sleep
+      {Intrinsic::amdgcn_s_waitcnt, 1744}, // __builtin_amdgcn_s_waitcnt
+      {Intrinsic::amdgcn_sad_hi_u8, 1771}, // __builtin_amdgcn_sad_hi_u8
+      {Intrinsic::amdgcn_sad_u16, 1798}, // __builtin_amdgcn_sad_u16
+      {Intrinsic::amdgcn_sad_u8, 1823}, // __builtin_amdgcn_sad_u8
+      {Intrinsic::amdgcn_wave_barrier, 1847}, // __builtin_amdgcn_wave_barrier
+      {Intrinsic::amdgcn_workgroup_id_x, 1877}, // __builtin_amdgcn_workgroup_id_x
+      {Intrinsic::amdgcn_workgroup_id_y, 1909}, // __builtin_amdgcn_workgroup_id_y
+      {Intrinsic::amdgcn_workgroup_id_z, 1941}, // __builtin_amdgcn_workgroup_id_z
+      {Intrinsic::amdgcn_writelane, 1973}, // __builtin_amdgcn_writelane
+    };
+    auto I = std::lower_bound(std::begin(amdgcnNames),
+                              std::end(amdgcnNames),
+                              BuiltinNameStr);
+    if (I != std::end(amdgcnNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "arm") {
+    static const BuiltinEntry armNames[] = {
+      {Intrinsic::arm_cdp, 2000}, // __builtin_arm_cdp
+      {Intrinsic::arm_cdp2, 2018}, // __builtin_arm_cdp2
+      {Intrinsic::arm_dmb, 224}, // __builtin_arm_dmb
+      {Intrinsic::arm_dsb, 242}, // __builtin_arm_dsb
+      {Intrinsic::arm_get_fpscr, 2037}, // __builtin_arm_get_fpscr
+      {Intrinsic::arm_isb, 260}, // __builtin_arm_isb
+      {Intrinsic::arm_ldc, 2061}, // __builtin_arm_ldc
+      {Intrinsic::arm_ldc2, 2079}, // __builtin_arm_ldc2
+      {Intrinsic::arm_ldc2l, 2098}, // __builtin_arm_ldc2l
+      {Intrinsic::arm_ldcl, 2118}, // __builtin_arm_ldcl
+      {Intrinsic::arm_mcr, 2137}, // __builtin_arm_mcr
+      {Intrinsic::arm_mcr2, 2155}, // __builtin_arm_mcr2
+      {Intrinsic::arm_mrc, 2174}, // __builtin_arm_mrc
+      {Intrinsic::arm_mrc2, 2192}, // __builtin_arm_mrc2
+      {Intrinsic::arm_qadd, 2211}, // __builtin_arm_qadd
+      {Intrinsic::arm_qadd16, 2230}, // __builtin_arm_qadd16
+      {Intrinsic::arm_qadd8, 2251}, // __builtin_arm_qadd8
+      {Intrinsic::arm_qasx, 2271}, // __builtin_arm_qasx
+      {Intrinsic::arm_qsax, 2290}, // __builtin_arm_qsax
+      {Intrinsic::arm_qsub, 2309}, // __builtin_arm_qsub
+      {Intrinsic::arm_qsub16, 2328}, // __builtin_arm_qsub16
+      {Intrinsic::arm_qsub8, 2349}, // __builtin_arm_qsub8
+      {Intrinsic::arm_sadd16, 2369}, // __builtin_arm_sadd16
+      {Intrinsic::arm_sadd8, 2390}, // __builtin_arm_sadd8
+      {Intrinsic::arm_sasx, 2410}, // __builtin_arm_sasx
+      {Intrinsic::arm_sel, 2429}, // __builtin_arm_sel
+      {Intrinsic::arm_set_fpscr, 2447}, // __builtin_arm_set_fpscr
+      {Intrinsic::arm_shadd16, 2471}, // __builtin_arm_shadd16
+      {Intrinsic::arm_shadd8, 2493}, // __builtin_arm_shadd8
+      {Intrinsic::arm_shasx, 2514}, // __builtin_arm_shasx
+      {Intrinsic::arm_shsax, 2534}, // __builtin_arm_shsax
+      {Intrinsic::arm_shsub16, 2554}, // __builtin_arm_shsub16
+      {Intrinsic::arm_shsub8, 2576}, // __builtin_arm_shsub8
+      {Intrinsic::arm_smlabb, 2597}, // __builtin_arm_smlabb
+      {Intrinsic::arm_smlabt, 2618}, // __builtin_arm_smlabt
+      {Intrinsic::arm_smlad, 2639}, // __builtin_arm_smlad
+      {Intrinsic::arm_smladx, 2659}, // __builtin_arm_smladx
+      {Intrinsic::arm_smlald, 2680}, // __builtin_arm_smlald
+      {Intrinsic::arm_smlaldx, 2701}, // __builtin_arm_smlaldx
+      {Intrinsic::arm_smlatb, 2723}, // __builtin_arm_smlatb
+      {Intrinsic::arm_smlatt, 2744}, // __builtin_arm_smlatt
+      {Intrinsic::arm_smlawb, 2765}, // __builtin_arm_smlawb
+      {Intrinsic::arm_smlawt, 2786}, // __builtin_arm_smlawt
+      {Intrinsic::arm_smlsd, 2807}, // __builtin_arm_smlsd
+      {Intrinsic::arm_smlsdx, 2827}, // __builtin_arm_smlsdx
+      {Intrinsic::arm_smlsld, 2848}, // __builtin_arm_smlsld
+      {Intrinsic::arm_smlsldx, 2869}, // __builtin_arm_smlsldx
+      {Intrinsic::arm_smuad, 2891}, // __builtin_arm_smuad
+      {Intrinsic::arm_smuadx, 2911}, // __builtin_arm_smuadx
+      {Intrinsic::arm_smulbb, 2932}, // __builtin_arm_smulbb
+      {Intrinsic::arm_smulbt, 2953}, // __builtin_arm_smulbt
+      {Intrinsic::arm_smultb, 2974}, // __builtin_arm_smultb
+      {Intrinsic::arm_smultt, 2995}, // __builtin_arm_smultt
+      {Intrinsic::arm_smulwb, 3016}, // __builtin_arm_smulwb
+      {Intrinsic::arm_smulwt, 3037}, // __builtin_arm_smulwt
+      {Intrinsic::arm_smusd, 3058}, // __builtin_arm_smusd
+      {Intrinsic::arm_smusdx, 3078}, // __builtin_arm_smusdx
+      {Intrinsic::arm_ssat, 3099}, // __builtin_arm_ssat
+      {Intrinsic::arm_ssat16, 3118}, // __builtin_arm_ssat16
+      {Intrinsic::arm_ssax, 3139}, // __builtin_arm_ssax
+      {Intrinsic::arm_ssub16, 3158}, // __builtin_arm_ssub16
+      {Intrinsic::arm_ssub8, 3179}, // __builtin_arm_ssub8
+      {Intrinsic::arm_stc, 3199}, // __builtin_arm_stc
+      {Intrinsic::arm_stc2, 3217}, // __builtin_arm_stc2
+      {Intrinsic::arm_stc2l, 3236}, // __builtin_arm_stc2l
+      {Intrinsic::arm_stcl, 3256}, // __builtin_arm_stcl
+      {Intrinsic::arm_sxtab16, 3275}, // __builtin_arm_sxtab16
+      {Intrinsic::arm_sxtb16, 3297}, // __builtin_arm_sxtb16
+      {Intrinsic::arm_uadd16, 3318}, // __builtin_arm_uadd16
+      {Intrinsic::arm_uadd8, 3339}, // __builtin_arm_uadd8
+      {Intrinsic::arm_uasx, 3359}, // __builtin_arm_uasx
+      {Intrinsic::arm_uhadd16, 3378}, // __builtin_arm_uhadd16
+      {Intrinsic::arm_uhadd8, 3400}, // __builtin_arm_uhadd8
+      {Intrinsic::arm_uhasx, 3421}, // __builtin_arm_uhasx
+      {Intrinsic::arm_uhsax, 3441}, // __builtin_arm_uhsax
+      {Intrinsic::arm_uhsub16, 3461}, // __builtin_arm_uhsub16
+      {Intrinsic::arm_uhsub8, 3483}, // __builtin_arm_uhsub8
+      {Intrinsic::arm_uqadd16, 3504}, // __builtin_arm_uqadd16
+      {Intrinsic::arm_uqadd8, 3526}, // __builtin_arm_uqadd8
+      {Intrinsic::arm_uqasx, 3547}, // __builtin_arm_uqasx
+      {Intrinsic::arm_uqsax, 3567}, // __builtin_arm_uqsax
+      {Intrinsic::arm_uqsub16, 3587}, // __builtin_arm_uqsub16
+      {Intrinsic::arm_uqsub8, 3609}, // __builtin_arm_uqsub8
+      {Intrinsic::arm_usad8, 3630}, // __builtin_arm_usad8
+      {Intrinsic::arm_usada8, 3650}, // __builtin_arm_usada8
+      {Intrinsic::arm_usat, 3671}, // __builtin_arm_usat
+      {Intrinsic::arm_usat16, 3690}, // __builtin_arm_usat16
+      {Intrinsic::arm_usax, 3711}, // __builtin_arm_usax
+      {Intrinsic::arm_usub16, 3730}, // __builtin_arm_usub16
+      {Intrinsic::arm_usub8, 3751}, // __builtin_arm_usub8
+      {Intrinsic::arm_uxtab16, 3771}, // __builtin_arm_uxtab16
+      {Intrinsic::arm_uxtb16, 3793}, // __builtin_arm_uxtb16
+    };
+    auto I = std::lower_bound(std::begin(armNames),
+                              std::end(armNames),
+                              BuiltinNameStr);
+    if (I != std::end(armNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "bpf") {
+    static const BuiltinEntry bpfNames[] = {
+      {Intrinsic::bpf_load_byte, 3814}, // __builtin_bpf_load_byte
+      {Intrinsic::bpf_load_half, 3838}, // __builtin_bpf_load_half
+      {Intrinsic::bpf_load_word, 3862}, // __builtin_bpf_load_word
+      {Intrinsic::bpf_pseudo, 3886}, // __builtin_bpf_pseudo
+    };
+    auto I = std::lower_bound(std::begin(bpfNames),
+                              std::end(bpfNames),
+                              BuiltinNameStr);
+    if (I != std::end(bpfNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "hexagon") {
+    static const BuiltinEntry hexagonNames[] = {
+      {Intrinsic::hexagon_A2_abs, 3907}, // __builtin_HEXAGON_A2_abs
+      {Intrinsic::hexagon_A2_absp, 3932}, // __builtin_HEXAGON_A2_absp
+      {Intrinsic::hexagon_A2_abssat, 3958}, // __builtin_HEXAGON_A2_abssat
+      {Intrinsic::hexagon_A2_add, 3986}, // __builtin_HEXAGON_A2_add
+      {Intrinsic::hexagon_A2_addh_h16_hh, 4011}, // __builtin_HEXAGON_A2_addh_h16_hh
+      {Intrinsic::hexagon_A2_addh_h16_hl, 4044}, // __builtin_HEXAGON_A2_addh_h16_hl
+      {Intrinsic::hexagon_A2_addh_h16_lh, 4077}, // __builtin_HEXAGON_A2_addh_h16_lh
+      {Intrinsic::hexagon_A2_addh_h16_ll, 4110}, // __builtin_HEXAGON_A2_addh_h16_ll
+      {Intrinsic::hexagon_A2_addh_h16_sat_hh, 4143}, // __builtin_HEXAGON_A2_addh_h16_sat_hh
+      {Intrinsic::hexagon_A2_addh_h16_sat_hl, 4180}, // __builtin_HEXAGON_A2_addh_h16_sat_hl
+      {Intrinsic::hexagon_A2_addh_h16_sat_lh, 4217}, // __builtin_HEXAGON_A2_addh_h16_sat_lh
+      {Intrinsic::hexagon_A2_addh_h16_sat_ll, 4254}, // __builtin_HEXAGON_A2_addh_h16_sat_ll
+      {Intrinsic::hexagon_A2_addh_l16_hl, 4291}, // __builtin_HEXAGON_A2_addh_l16_hl
+      {Intrinsic::hexagon_A2_addh_l16_ll, 4324}, // __builtin_HEXAGON_A2_addh_l16_ll
+      {Intrinsic::hexagon_A2_addh_l16_sat_hl, 4357}, // __builtin_HEXAGON_A2_addh_l16_sat_hl
+      {Intrinsic::hexagon_A2_addh_l16_sat_ll, 4394}, // __builtin_HEXAGON_A2_addh_l16_sat_ll
+      {Intrinsic::hexagon_A2_addi, 4431}, // __builtin_HEXAGON_A2_addi
+      {Intrinsic::hexagon_A2_addp, 4457}, // __builtin_HEXAGON_A2_addp
+      {Intrinsic::hexagon_A2_addpsat, 4483}, // __builtin_HEXAGON_A2_addpsat
+      {Intrinsic::hexagon_A2_addsat, 4512}, // __builtin_HEXAGON_A2_addsat
+      {Intrinsic::hexagon_A2_addsp, 4540}, // __builtin_HEXAGON_A2_addsp
+      {Intrinsic::hexagon_A2_and, 4567}, // __builtin_HEXAGON_A2_and
+      {Intrinsic::hexagon_A2_andir, 4592}, // __builtin_HEXAGON_A2_andir
+      {Intrinsic::hexagon_A2_andp, 4619}, // __builtin_HEXAGON_A2_andp
+      {Intrinsic::hexagon_A2_aslh, 4645}, // __builtin_HEXAGON_A2_aslh
+      {Intrinsic::hexagon_A2_asrh, 4671}, // __builtin_HEXAGON_A2_asrh
+      {Intrinsic::hexagon_A2_combine_hh, 4697}, // __builtin_HEXAGON_A2_combine_hh
+      {Intrinsic::hexagon_A2_combine_hl, 4729}, // __builtin_HEXAGON_A2_combine_hl
+      {Intrinsic::hexagon_A2_combine_lh, 4761}, // __builtin_HEXAGON_A2_combine_lh
+      {Intrinsic::hexagon_A2_combine_ll, 4793}, // __builtin_HEXAGON_A2_combine_ll
+      {Intrinsic::hexagon_A2_combineii, 4825}, // __builtin_HEXAGON_A2_combineii
+      {Intrinsic::hexagon_A2_combinew, 4856}, // __builtin_HEXAGON_A2_combinew
+      {Intrinsic::hexagon_A2_max, 4886}, // __builtin_HEXAGON_A2_max
+      {Intrinsic::hexagon_A2_maxp, 4911}, // __builtin_HEXAGON_A2_maxp
+      {Intrinsic::hexagon_A2_maxu, 4937}, // __builtin_HEXAGON_A2_maxu
+      {Intrinsic::hexagon_A2_maxup, 4963}, // __builtin_HEXAGON_A2_maxup
+      {Intrinsic::hexagon_A2_min, 4990}, // __builtin_HEXAGON_A2_min
+      {Intrinsic::hexagon_A2_minp, 5015}, // __builtin_HEXAGON_A2_minp
+      {Intrinsic::hexagon_A2_minu, 5041}, // __builtin_HEXAGON_A2_minu
+      {Intrinsic::hexagon_A2_minup, 5067}, // __builtin_HEXAGON_A2_minup
+      {Intrinsic::hexagon_A2_neg, 5094}, // __builtin_HEXAGON_A2_neg
+      {Intrinsic::hexagon_A2_negp, 5119}, // __builtin_HEXAGON_A2_negp
+      {Intrinsic::hexagon_A2_negsat, 5145}, // __builtin_HEXAGON_A2_negsat
+      {Intrinsic::hexagon_A2_not, 5173}, // __builtin_HEXAGON_A2_not
+      {Intrinsic::hexagon_A2_notp, 5198}, // __builtin_HEXAGON_A2_notp
+      {Intrinsic::hexagon_A2_or, 5224}, // __builtin_HEXAGON_A2_or
+      {Intrinsic::hexagon_A2_orir, 5248}, // __builtin_HEXAGON_A2_orir
+      {Intrinsic::hexagon_A2_orp, 5274}, // __builtin_HEXAGON_A2_orp
+      {Intrinsic::hexagon_A2_roundsat, 5299}, // __builtin_HEXAGON_A2_roundsat
+      {Intrinsic::hexagon_A2_sat, 5329}, // __builtin_HEXAGON_A2_sat
+      {Intrinsic::hexagon_A2_satb, 5354}, // __builtin_HEXAGON_A2_satb
+      {Intrinsic::hexagon_A2_sath, 5380}, // __builtin_HEXAGON_A2_sath
+      {Intrinsic::hexagon_A2_satub, 5406}, // __builtin_HEXAGON_A2_satub
+      {Intrinsic::hexagon_A2_satuh, 5433}, // __builtin_HEXAGON_A2_satuh
+      {Intrinsic::hexagon_A2_sub, 5460}, // __builtin_HEXAGON_A2_sub
+      {Intrinsic::hexagon_A2_subh_h16_hh, 5485}, // __builtin_HEXAGON_A2_subh_h16_hh
+      {Intrinsic::hexagon_A2_subh_h16_hl, 5518}, // __builtin_HEXAGON_A2_subh_h16_hl
+      {Intrinsic::hexagon_A2_subh_h16_lh, 5551}, // __builtin_HEXAGON_A2_subh_h16_lh
+      {Intrinsic::hexagon_A2_subh_h16_ll, 5584}, // __builtin_HEXAGON_A2_subh_h16_ll
+      {Intrinsic::hexagon_A2_subh_h16_sat_hh, 5617}, // __builtin_HEXAGON_A2_subh_h16_sat_hh
+      {Intrinsic::hexagon_A2_subh_h16_sat_hl, 5654}, // __builtin_HEXAGON_A2_subh_h16_sat_hl
+      {Intrinsic::hexagon_A2_subh_h16_sat_lh, 5691}, // __builtin_HEXAGON_A2_subh_h16_sat_lh
+      {Intrinsic::hexagon_A2_subh_h16_sat_ll, 5728}, // __builtin_HEXAGON_A2_subh_h16_sat_ll
+      {Intrinsic::hexagon_A2_subh_l16_hl, 5765}, // __builtin_HEXAGON_A2_subh_l16_hl
+      {Intrinsic::hexagon_A2_subh_l16_ll, 5798}, // __builtin_HEXAGON_A2_subh_l16_ll
+      {Intrinsic::hexagon_A2_subh_l16_sat_hl, 5831}, // __builtin_HEXAGON_A2_subh_l16_sat_hl
+      {Intrinsic::hexagon_A2_subh_l16_sat_ll, 5868}, // __builtin_HEXAGON_A2_subh_l16_sat_ll
+      {Intrinsic::hexagon_A2_subp, 5905}, // __builtin_HEXAGON_A2_subp
+      {Intrinsic::hexagon_A2_subri, 5931}, // __builtin_HEXAGON_A2_subri
+      {Intrinsic::hexagon_A2_subsat, 5958}, // __builtin_HEXAGON_A2_subsat
+      {Intrinsic::hexagon_A2_svaddh, 5986}, // __builtin_HEXAGON_A2_svaddh
+      {Intrinsic::hexagon_A2_svaddhs, 6014}, // __builtin_HEXAGON_A2_svaddhs
+      {Intrinsic::hexagon_A2_svadduhs, 6043}, // __builtin_HEXAGON_A2_svadduhs
+      {Intrinsic::hexagon_A2_svavgh, 6073}, // __builtin_HEXAGON_A2_svavgh
+      {Intrinsic::hexagon_A2_svavghs, 6101}, // __builtin_HEXAGON_A2_svavghs
+      {Intrinsic::hexagon_A2_svnavgh, 6130}, // __builtin_HEXAGON_A2_svnavgh
+      {Intrinsic::hexagon_A2_svsubh, 6159}, // __builtin_HEXAGON_A2_svsubh
+      {Intrinsic::hexagon_A2_svsubhs, 6187}, // __builtin_HEXAGON_A2_svsubhs
+      {Intrinsic::hexagon_A2_svsubuhs, 6216}, // __builtin_HEXAGON_A2_svsubuhs
+      {Intrinsic::hexagon_A2_swiz, 6246}, // __builtin_HEXAGON_A2_swiz
+      {Intrinsic::hexagon_A2_sxtb, 6272}, // __builtin_HEXAGON_A2_sxtb
+      {Intrinsic::hexagon_A2_sxth, 6298}, // __builtin_HEXAGON_A2_sxth
+      {Intrinsic::hexagon_A2_sxtw, 6324}, // __builtin_HEXAGON_A2_sxtw
+      {Intrinsic::hexagon_A2_tfr, 6350}, // __builtin_HEXAGON_A2_tfr
+      {Intrinsic::hexagon_A2_tfrih, 6375}, // __builtin_HEXAGON_A2_tfrih
+      {Intrinsic::hexagon_A2_tfril, 6402}, // __builtin_HEXAGON_A2_tfril
+      {Intrinsic::hexagon_A2_tfrp, 6429}, // __builtin_HEXAGON_A2_tfrp
+      {Intrinsic::hexagon_A2_tfrpi, 6455}, // __builtin_HEXAGON_A2_tfrpi
+      {Intrinsic::hexagon_A2_tfrsi, 6482}, // __builtin_HEXAGON_A2_tfrsi
+      {Intrinsic::hexagon_A2_vabsh, 6509}, // __builtin_HEXAGON_A2_vabsh
+      {Intrinsic::hexagon_A2_vabshsat, 6536}, // __builtin_HEXAGON_A2_vabshsat
+      {Intrinsic::hexagon_A2_vabsw, 6566}, // __builtin_HEXAGON_A2_vabsw
+      {Intrinsic::hexagon_A2_vabswsat, 6593}, // __builtin_HEXAGON_A2_vabswsat
+      {Intrinsic::hexagon_A2_vaddb_map, 6623}, // __builtin_HEXAGON_A2_vaddb_map
+      {Intrinsic::hexagon_A2_vaddh, 6654}, // __builtin_HEXAGON_A2_vaddh
+      {Intrinsic::hexagon_A2_vaddhs, 6681}, // __builtin_HEXAGON_A2_vaddhs
+      {Intrinsic::hexagon_A2_vaddub, 6709}, // __builtin_HEXAGON_A2_vaddub
+      {Intrinsic::hexagon_A2_vaddubs, 6737}, // __builtin_HEXAGON_A2_vaddubs
+      {Intrinsic::hexagon_A2_vadduhs, 6766}, // __builtin_HEXAGON_A2_vadduhs
+      {Intrinsic::hexagon_A2_vaddw, 6795}, // __builtin_HEXAGON_A2_vaddw
+      {Intrinsic::hexagon_A2_vaddws, 6822}, // __builtin_HEXAGON_A2_vaddws
+      {Intrinsic::hexagon_A2_vavgh, 6850}, // __builtin_HEXAGON_A2_vavgh
+      {Intrinsic::hexagon_A2_vavghcr, 6877}, // __builtin_HEXAGON_A2_vavghcr
+      {Intrinsic::hexagon_A2_vavghr, 6906}, // __builtin_HEXAGON_A2_vavghr
+      {Intrinsic::hexagon_A2_vavgub, 6934}, // __builtin_HEXAGON_A2_vavgub
+      {Intrinsic::hexagon_A2_vavgubr, 6962}, // __builtin_HEXAGON_A2_vavgubr
+      {Intrinsic::hexagon_A2_vavguh, 6991}, // __builtin_HEXAGON_A2_vavguh
+      {Intrinsic::hexagon_A2_vavguhr, 7019}, // __builtin_HEXAGON_A2_vavguhr
+      {Intrinsic::hexagon_A2_vavguw, 7048}, // __builtin_HEXAGON_A2_vavguw
+      {Intrinsic::hexagon_A2_vavguwr, 7076}, // __builtin_HEXAGON_A2_vavguwr
+      {Intrinsic::hexagon_A2_vavgw, 7105}, // __builtin_HEXAGON_A2_vavgw
+      {Intrinsic::hexagon_A2_vavgwcr, 7132}, // __builtin_HEXAGON_A2_vavgwcr
+      {Intrinsic::hexagon_A2_vavgwr, 7161}, // __builtin_HEXAGON_A2_vavgwr
+      {Intrinsic::hexagon_A2_vcmpbeq, 7189}, // __builtin_HEXAGON_A2_vcmpbeq
+      {Intrinsic::hexagon_A2_vcmpbgtu, 7218}, // __builtin_HEXAGON_A2_vcmpbgtu
+      {Intrinsic::hexagon_A2_vcmpheq, 7248}, // __builtin_HEXAGON_A2_vcmpheq
+      {Intrinsic::hexagon_A2_vcmphgt, 7277}, // __builtin_HEXAGON_A2_vcmphgt
+      {Intrinsic::hexagon_A2_vcmphgtu, 7306}, // __builtin_HEXAGON_A2_vcmphgtu
+      {Intrinsic::hexagon_A2_vcmpweq, 7336}, // __builtin_HEXAGON_A2_vcmpweq
+      {Intrinsic::hexagon_A2_vcmpwgt, 7365}, // __builtin_HEXAGON_A2_vcmpwgt
+      {Intrinsic::hexagon_A2_vcmpwgtu, 7394}, // __builtin_HEXAGON_A2_vcmpwgtu
+      {Intrinsic::hexagon_A2_vconj, 7424}, // __builtin_HEXAGON_A2_vconj
+      {Intrinsic::hexagon_A2_vmaxb, 7451}, // __builtin_HEXAGON_A2_vmaxb
+      {Intrinsic::hexagon_A2_vmaxh, 7478}, // __builtin_HEXAGON_A2_vmaxh
+      {Intrinsic::hexagon_A2_vmaxub, 7505}, // __builtin_HEXAGON_A2_vmaxub
+      {Intrinsic::hexagon_A2_vmaxuh, 7533}, // __builtin_HEXAGON_A2_vmaxuh
+      {Intrinsic::hexagon_A2_vmaxuw, 7561}, // __builtin_HEXAGON_A2_vmaxuw
+      {Intrinsic::hexagon_A2_vmaxw, 7589}, // __builtin_HEXAGON_A2_vmaxw
+      {Intrinsic::hexagon_A2_vminb, 7616}, // __builtin_HEXAGON_A2_vminb
+      {Intrinsic::hexagon_A2_vminh, 7643}, // __builtin_HEXAGON_A2_vminh
+      {Intrinsic::hexagon_A2_vminub, 7670}, // __builtin_HEXAGON_A2_vminub
+      {Intrinsic::hexagon_A2_vminuh, 7698}, // __builtin_HEXAGON_A2_vminuh
+      {Intrinsic::hexagon_A2_vminuw, 7726}, // __builtin_HEXAGON_A2_vminuw
+      {Intrinsic::hexagon_A2_vminw, 7754}, // __builtin_HEXAGON_A2_vminw
+      {Intrinsic::hexagon_A2_vnavgh, 7781}, // __builtin_HEXAGON_A2_vnavgh
+      {Intrinsic::hexagon_A2_vnavghcr, 7809}, // __builtin_HEXAGON_A2_vnavghcr
+      {Intrinsic::hexagon_A2_vnavghr, 7839}, // __builtin_HEXAGON_A2_vnavghr
+      {Intrinsic::hexagon_A2_vnavgw, 7868}, // __builtin_HEXAGON_A2_vnavgw
+      {Intrinsic::hexagon_A2_vnavgwcr, 7896}, // __builtin_HEXAGON_A2_vnavgwcr
+      {Intrinsic::hexagon_A2_vnavgwr, 7926}, // __builtin_HEXAGON_A2_vnavgwr
+      {Intrinsic::hexagon_A2_vraddub, 7955}, // __builtin_HEXAGON_A2_vraddub
+      {Intrinsic::hexagon_A2_vraddub_acc, 7984}, // __builtin_HEXAGON_A2_vraddub_acc
+      {Intrinsic::hexagon_A2_vrsadub, 8017}, // __builtin_HEXAGON_A2_vrsadub
+      {Intrinsic::hexagon_A2_vrsadub_acc, 8046}, // __builtin_HEXAGON_A2_vrsadub_acc
+      {Intrinsic::hexagon_A2_vsubb_map, 8079}, // __builtin_HEXAGON_A2_vsubb_map
+      {Intrinsic::hexagon_A2_vsubh, 8110}, // __builtin_HEXAGON_A2_vsubh
+      {Intrinsic::hexagon_A2_vsubhs, 8137}, // __builtin_HEXAGON_A2_vsubhs
+      {Intrinsic::hexagon_A2_vsubub, 8165}, // __builtin_HEXAGON_A2_vsubub
+      {Intrinsic::hexagon_A2_vsububs, 8193}, // __builtin_HEXAGON_A2_vsububs
+      {Intrinsic::hexagon_A2_vsubuhs, 8222}, // __builtin_HEXAGON_A2_vsubuhs
+      {Intrinsic::hexagon_A2_vsubw, 8251}, // __builtin_HEXAGON_A2_vsubw
+      {Intrinsic::hexagon_A2_vsubws, 8278}, // __builtin_HEXAGON_A2_vsubws
+      {Intrinsic::hexagon_A2_xor, 8306}, // __builtin_HEXAGON_A2_xor
+      {Intrinsic::hexagon_A2_xorp, 8331}, // __builtin_HEXAGON_A2_xorp
+      {Intrinsic::hexagon_A2_zxtb, 8357}, // __builtin_HEXAGON_A2_zxtb
+      {Intrinsic::hexagon_A2_zxth, 8383}, // __builtin_HEXAGON_A2_zxth
+      {Intrinsic::hexagon_A4_andn, 8409}, // __builtin_HEXAGON_A4_andn
+      {Intrinsic::hexagon_A4_andnp, 8435}, // __builtin_HEXAGON_A4_andnp
+      {Intrinsic::hexagon_A4_bitsplit, 8462}, // __builtin_HEXAGON_A4_bitsplit
+      {Intrinsic::hexagon_A4_bitspliti, 8492}, // __builtin_HEXAGON_A4_bitspliti
+      {Intrinsic::hexagon_A4_boundscheck, 8523}, // __builtin_HEXAGON_A4_boundscheck
+      {Intrinsic::hexagon_A4_cmpbeq, 8556}, // __builtin_HEXAGON_A4_cmpbeq
+      {Intrinsic::hexagon_A4_cmpbeqi, 8584}, // __builtin_HEXAGON_A4_cmpbeqi
+      {Intrinsic::hexagon_A4_cmpbgt, 8613}, // __builtin_HEXAGON_A4_cmpbgt
+      {Intrinsic::hexagon_A4_cmpbgti, 8641}, // __builtin_HEXAGON_A4_cmpbgti
+      {Intrinsic::hexagon_A4_cmpbgtu, 8670}, // __builtin_HEXAGON_A4_cmpbgtu
+      {Intrinsic::hexagon_A4_cmpbgtui, 8699}, // __builtin_HEXAGON_A4_cmpbgtui
+      {Intrinsic::hexagon_A4_cmpheq, 8729}, // __builtin_HEXAGON_A4_cmpheq
+      {Intrinsic::hexagon_A4_cmpheqi, 8757}, // __builtin_HEXAGON_A4_cmpheqi
+      {Intrinsic::hexagon_A4_cmphgt, 8786}, // __builtin_HEXAGON_A4_cmphgt
+      {Intrinsic::hexagon_A4_cmphgti, 8814}, // __builtin_HEXAGON_A4_cmphgti
+      {Intrinsic::hexagon_A4_cmphgtu, 8843}, // __builtin_HEXAGON_A4_cmphgtu
+      {Intrinsic::hexagon_A4_cmphgtui, 8872}, // __builtin_HEXAGON_A4_cmphgtui
+      {Intrinsic::hexagon_A4_combineir, 8902}, // __builtin_HEXAGON_A4_combineir
+      {Intrinsic::hexagon_A4_combineri, 8933}, // __builtin_HEXAGON_A4_combineri
+      {Intrinsic::hexagon_A4_cround_ri, 8964}, // __builtin_HEXAGON_A4_cround_ri
+      {Intrinsic::hexagon_A4_cround_rr, 8995}, // __builtin_HEXAGON_A4_cround_rr
+      {Intrinsic::hexagon_A4_modwrapu, 9026}, // __builtin_HEXAGON_A4_modwrapu
+      {Intrinsic::hexagon_A4_orn, 9056}, // __builtin_HEXAGON_A4_orn
+      {Intrinsic::hexagon_A4_ornp, 9081}, // __builtin_HEXAGON_A4_ornp
+      {Intrinsic::hexagon_A4_rcmpeq, 9107}, // __builtin_HEXAGON_A4_rcmpeq
+      {Intrinsic::hexagon_A4_rcmpeqi, 9135}, // __builtin_HEXAGON_A4_rcmpeqi
+      {Intrinsic::hexagon_A4_rcmpneq, 9164}, // __builtin_HEXAGON_A4_rcmpneq
+      {Intrinsic::hexagon_A4_rcmpneqi, 9193}, // __builtin_HEXAGON_A4_rcmpneqi
+      {Intrinsic::hexagon_A4_round_ri, 9223}, // __builtin_HEXAGON_A4_round_ri
+      {Intrinsic::hexagon_A4_round_ri_sat, 9253}, // __builtin_HEXAGON_A4_round_ri_sat
+      {Intrinsic::hexagon_A4_round_rr, 9287}, // __builtin_HEXAGON_A4_round_rr
+      {Intrinsic::hexagon_A4_round_rr_sat, 9317}, // __builtin_HEXAGON_A4_round_rr_sat
+      {Intrinsic::hexagon_A4_tlbmatch, 9351}, // __builtin_HEXAGON_A4_tlbmatch
+      {Intrinsic::hexagon_A4_vcmpbeq_any, 9381}, // __builtin_HEXAGON_A4_vcmpbeq_any
+      {Intrinsic::hexagon_A4_vcmpbeqi, 9414}, // __builtin_HEXAGON_A4_vcmpbeqi
+      {Intrinsic::hexagon_A4_vcmpbgt, 9444}, // __builtin_HEXAGON_A4_vcmpbgt
+      {Intrinsic::hexagon_A4_vcmpbgti, 9473}, // __builtin_HEXAGON_A4_vcmpbgti
+      {Intrinsic::hexagon_A4_vcmpbgtui, 9503}, // __builtin_HEXAGON_A4_vcmpbgtui
+      {Intrinsic::hexagon_A4_vcmpheqi, 9534}, // __builtin_HEXAGON_A4_vcmpheqi
+      {Intrinsic::hexagon_A4_vcmphgti, 9564}, // __builtin_HEXAGON_A4_vcmphgti
+      {Intrinsic::hexagon_A4_vcmphgtui, 9594}, // __builtin_HEXAGON_A4_vcmphgtui
+      {Intrinsic::hexagon_A4_vcmpweqi, 9625}, // __builtin_HEXAGON_A4_vcmpweqi
+      {Intrinsic::hexagon_A4_vcmpwgti, 9655}, // __builtin_HEXAGON_A4_vcmpwgti
+      {Intrinsic::hexagon_A4_vcmpwgtui, 9685}, // __builtin_HEXAGON_A4_vcmpwgtui
+      {Intrinsic::hexagon_A4_vrmaxh, 9716}, // __builtin_HEXAGON_A4_vrmaxh
+      {Intrinsic::hexagon_A4_vrmaxuh, 9744}, // __builtin_HEXAGON_A4_vrmaxuh
+      {Intrinsic::hexagon_A4_vrmaxuw, 9773}, // __builtin_HEXAGON_A4_vrmaxuw
+      {Intrinsic::hexagon_A4_vrmaxw, 9802}, // __builtin_HEXAGON_A4_vrmaxw
+      {Intrinsic::hexagon_A4_vrminh, 9830}, // __builtin_HEXAGON_A4_vrminh
+      {Intrinsic::hexagon_A4_vrminuh, 9858}, // __builtin_HEXAGON_A4_vrminuh
+      {Intrinsic::hexagon_A4_vrminuw, 9887}, // __builtin_HEXAGON_A4_vrminuw
+      {Intrinsic::hexagon_A4_vrminw, 9916}, // __builtin_HEXAGON_A4_vrminw
+      {Intrinsic::hexagon_A5_vaddhubs, 9944}, // __builtin_HEXAGON_A5_vaddhubs
+      {Intrinsic::hexagon_A6_vcmpbeq_notany, 9974}, // __builtin_HEXAGON_A6_vcmpbeq_notany
+      {Intrinsic::hexagon_A6_vcmpbeq_notany_128B, 10010}, // __builtin_HEXAGON_A6_vcmpbeq_notany_128B
+      {Intrinsic::hexagon_C2_all8, 10051}, // __builtin_HEXAGON_C2_all8
+      {Intrinsic::hexagon_C2_and, 10077}, // __builtin_HEXAGON_C2_and
+      {Intrinsic::hexagon_C2_andn, 10102}, // __builtin_HEXAGON_C2_andn
+      {Intrinsic::hexagon_C2_any8, 10128}, // __builtin_HEXAGON_C2_any8
+      {Intrinsic::hexagon_C2_bitsclr, 10154}, // __builtin_HEXAGON_C2_bitsclr
+      {Intrinsic::hexagon_C2_bitsclri, 10183}, // __builtin_HEXAGON_C2_bitsclri
+      {Intrinsic::hexagon_C2_bitsset, 10213}, // __builtin_HEXAGON_C2_bitsset
+      {Intrinsic::hexagon_C2_cmpeq, 10242}, // __builtin_HEXAGON_C2_cmpeq
+      {Intrinsic::hexagon_C2_cmpeqi, 10269}, // __builtin_HEXAGON_C2_cmpeqi
+      {Intrinsic::hexagon_C2_cmpeqp, 10297}, // __builtin_HEXAGON_C2_cmpeqp
+      {Intrinsic::hexagon_C2_cmpgei, 10325}, // __builtin_HEXAGON_C2_cmpgei
+      {Intrinsic::hexagon_C2_cmpgeui, 10353}, // __builtin_HEXAGON_C2_cmpgeui
+      {Intrinsic::hexagon_C2_cmpgt, 10382}, // __builtin_HEXAGON_C2_cmpgt
+      {Intrinsic::hexagon_C2_cmpgti, 10409}, // __builtin_HEXAGON_C2_cmpgti
+      {Intrinsic::hexagon_C2_cmpgtp, 10437}, // __builtin_HEXAGON_C2_cmpgtp
+      {Intrinsic::hexagon_C2_cmpgtu, 10465}, // __builtin_HEXAGON_C2_cmpgtu
+      {Intrinsic::hexagon_C2_cmpgtui, 10493}, // __builtin_HEXAGON_C2_cmpgtui
+      {Intrinsic::hexagon_C2_cmpgtup, 10522}, // __builtin_HEXAGON_C2_cmpgtup
+      {Intrinsic::hexagon_C2_cmplt, 10551}, // __builtin_HEXAGON_C2_cmplt
+      {Intrinsic::hexagon_C2_cmpltu, 10578}, // __builtin_HEXAGON_C2_cmpltu
+      {Intrinsic::hexagon_C2_mask, 10606}, // __builtin_HEXAGON_C2_mask
+      {Intrinsic::hexagon_C2_mux, 10632}, // __builtin_HEXAGON_C2_mux
+      {Intrinsic::hexagon_C2_muxii, 10657}, // __builtin_HEXAGON_C2_muxii
+      {Intrinsic::hexagon_C2_muxir, 10684}, // __builtin_HEXAGON_C2_muxir
+      {Intrinsic::hexagon_C2_muxri, 10711}, // __builtin_HEXAGON_C2_muxri
+      {Intrinsic::hexagon_C2_not, 10738}, // __builtin_HEXAGON_C2_not
+      {Intrinsic::hexagon_C2_or, 10763}, // __builtin_HEXAGON_C2_or
+      {Intrinsic::hexagon_C2_orn, 10787}, // __builtin_HEXAGON_C2_orn
+      {Intrinsic::hexagon_C2_pxfer_map, 10812}, // __builtin_HEXAGON_C2_pxfer_map
+      {Intrinsic::hexagon_C2_tfrpr, 10843}, // __builtin_HEXAGON_C2_tfrpr
+      {Intrinsic::hexagon_C2_tfrrp, 10870}, // __builtin_HEXAGON_C2_tfrrp
+      {Intrinsic::hexagon_C2_vitpack, 10897}, // __builtin_HEXAGON_C2_vitpack
+      {Intrinsic::hexagon_C2_vmux, 10926}, // __builtin_HEXAGON_C2_vmux
+      {Intrinsic::hexagon_C2_xor, 10952}, // __builtin_HEXAGON_C2_xor
+      {Intrinsic::hexagon_C4_and_and, 10977}, // __builtin_HEXAGON_C4_and_and
+      {Intrinsic::hexagon_C4_and_andn, 11006}, // __builtin_HEXAGON_C4_and_andn
+      {Intrinsic::hexagon_C4_and_or, 11036}, // __builtin_HEXAGON_C4_and_or
+      {Intrinsic::hexagon_C4_and_orn, 11064}, // __builtin_HEXAGON_C4_and_orn
+      {Intrinsic::hexagon_C4_cmplte, 11093}, // __builtin_HEXAGON_C4_cmplte
+      {Intrinsic::hexagon_C4_cmpltei, 11121}, // __builtin_HEXAGON_C4_cmpltei
+      {Intrinsic::hexagon_C4_cmplteu, 11150}, // __builtin_HEXAGON_C4_cmplteu
+      {Intrinsic::hexagon_C4_cmplteui, 11179}, // __builtin_HEXAGON_C4_cmplteui
+      {Intrinsic::hexagon_C4_cmpneq, 11209}, // __builtin_HEXAGON_C4_cmpneq
+      {Intrinsic::hexagon_C4_cmpneqi, 11237}, // __builtin_HEXAGON_C4_cmpneqi
+      {Intrinsic::hexagon_C4_fastcorner9, 11266}, // __builtin_HEXAGON_C4_fastcorner9
+      {Intrinsic::hexagon_C4_fastcorner9_not, 11299}, // __builtin_HEXAGON_C4_fastcorner9_not
+      {Intrinsic::hexagon_C4_nbitsclr, 11336}, // __builtin_HEXAGON_C4_nbitsclr
+      {Intrinsic::hexagon_C4_nbitsclri, 11366}, // __builtin_HEXAGON_C4_nbitsclri
+      {Intrinsic::hexagon_C4_nbitsset, 11397}, // __builtin_HEXAGON_C4_nbitsset
+      {Intrinsic::hexagon_C4_or_and, 11427}, // __builtin_HEXAGON_C4_or_and
+      {Intrinsic::hexagon_C4_or_andn, 11455}, // __builtin_HEXAGON_C4_or_andn
+      {Intrinsic::hexagon_C4_or_or, 11484}, // __builtin_HEXAGON_C4_or_or
+      {Intrinsic::hexagon_C4_or_orn, 11511}, // __builtin_HEXAGON_C4_or_orn
+      {Intrinsic::hexagon_F2_conv_d2df, 11539}, // __builtin_HEXAGON_F2_conv_d2df
+      {Intrinsic::hexagon_F2_conv_d2sf, 11570}, // __builtin_HEXAGON_F2_conv_d2sf
+      {Intrinsic::hexagon_F2_conv_df2d, 11601}, // __builtin_HEXAGON_F2_conv_df2d
+      {Intrinsic::hexagon_F2_conv_df2d_chop, 11632}, // __builtin_HEXAGON_F2_conv_df2d_chop
+      {Intrinsic::hexagon_F2_conv_df2sf, 11668}, // __builtin_HEXAGON_F2_conv_df2sf
+      {Intrinsic::hexagon_F2_conv_df2ud, 11700}, // __builtin_HEXAGON_F2_conv_df2ud
+      {Intrinsic::hexagon_F2_conv_df2ud_chop, 11732}, // __builtin_HEXAGON_F2_conv_df2ud_chop
+      {Intrinsic::hexagon_F2_conv_df2uw, 11769}, // __builtin_HEXAGON_F2_conv_df2uw
+      {Intrinsic::hexagon_F2_conv_df2uw_chop, 11801}, // __builtin_HEXAGON_F2_conv_df2uw_chop
+      {Intrinsic::hexagon_F2_conv_df2w, 11838}, // __builtin_HEXAGON_F2_conv_df2w
+      {Intrinsic::hexagon_F2_conv_df2w_chop, 11869}, // __builtin_HEXAGON_F2_conv_df2w_chop
+      {Intrinsic::hexagon_F2_conv_sf2d, 11905}, // __builtin_HEXAGON_F2_conv_sf2d
+      {Intrinsic::hexagon_F2_conv_sf2d_chop, 11936}, // __builtin_HEXAGON_F2_conv_sf2d_chop
+      {Intrinsic::hexagon_F2_conv_sf2df, 11972}, // __builtin_HEXAGON_F2_conv_sf2df
+      {Intrinsic::hexagon_F2_conv_sf2ud, 12004}, // __builtin_HEXAGON_F2_conv_sf2ud
+      {Intrinsic::hexagon_F2_conv_sf2ud_chop, 12036}, // __builtin_HEXAGON_F2_conv_sf2ud_chop
+      {Intrinsic::hexagon_F2_conv_sf2uw, 12073}, // __builtin_HEXAGON_F2_conv_sf2uw
+      {Intrinsic::hexagon_F2_conv_sf2uw_chop, 12105}, // __builtin_HEXAGON_F2_conv_sf2uw_chop
+      {Intrinsic::hexagon_F2_conv_sf2w, 12142}, // __builtin_HEXAGON_F2_conv_sf2w
+      {Intrinsic::hexagon_F2_conv_sf2w_chop, 12173}, // __builtin_HEXAGON_F2_conv_sf2w_chop
+      {Intrinsic::hexagon_F2_conv_ud2df, 12209}, // __builtin_HEXAGON_F2_conv_ud2df
+      {Intrinsic::hexagon_F2_conv_ud2sf, 12241}, // __builtin_HEXAGON_F2_conv_ud2sf
+      {Intrinsic::hexagon_F2_conv_uw2df, 12273}, // __builtin_HEXAGON_F2_conv_uw2df
+      {Intrinsic::hexagon_F2_conv_uw2sf, 12305}, // __builtin_HEXAGON_F2_conv_uw2sf
+      {Intrinsic::hexagon_F2_conv_w2df, 12337}, // __builtin_HEXAGON_F2_conv_w2df
+      {Intrinsic::hexagon_F2_conv_w2sf, 12368}, // __builtin_HEXAGON_F2_conv_w2sf
+      {Intrinsic::hexagon_F2_dfclass, 12399}, // __builtin_HEXAGON_F2_dfclass
+      {Intrinsic::hexagon_F2_dfcmpeq, 12428}, // __builtin_HEXAGON_F2_dfcmpeq
+      {Intrinsic::hexagon_F2_dfcmpge, 12457}, // __builtin_HEXAGON_F2_dfcmpge
+      {Intrinsic::hexagon_F2_dfcmpgt, 12486}, // __builtin_HEXAGON_F2_dfcmpgt
+      {Intrinsic::hexagon_F2_dfcmpuo, 12515}, // __builtin_HEXAGON_F2_dfcmpuo
+      {Intrinsic::hexagon_F2_dfimm_n, 12544}, // __builtin_HEXAGON_F2_dfimm_n
+      {Intrinsic::hexagon_F2_dfimm_p, 12573}, // __builtin_HEXAGON_F2_dfimm_p
+      {Intrinsic::hexagon_F2_sfadd, 12602}, // __builtin_HEXAGON_F2_sfadd
+      {Intrinsic::hexagon_F2_sfclass, 12629}, // __builtin_HEXAGON_F2_sfclass
+      {Intrinsic::hexagon_F2_sfcmpeq, 12658}, // __builtin_HEXAGON_F2_sfcmpeq
+      {Intrinsic::hexagon_F2_sfcmpge, 12687}, // __builtin_HEXAGON_F2_sfcmpge
+      {Intrinsic::hexagon_F2_sfcmpgt, 12716}, // __builtin_HEXAGON_F2_sfcmpgt
+      {Intrinsic::hexagon_F2_sfcmpuo, 12745}, // __builtin_HEXAGON_F2_sfcmpuo
+      {Intrinsic::hexagon_F2_sffixupd, 12774}, // __builtin_HEXAGON_F2_sffixupd
+      {Intrinsic::hexagon_F2_sffixupn, 12804}, // __builtin_HEXAGON_F2_sffixupn
+      {Intrinsic::hexagon_F2_sffixupr, 12834}, // __builtin_HEXAGON_F2_sffixupr
+      {Intrinsic::hexagon_F2_sffma, 12864}, // __builtin_HEXAGON_F2_sffma
+      {Intrinsic::hexagon_F2_sffma_lib, 12891}, // __builtin_HEXAGON_F2_sffma_lib
+      {Intrinsic::hexagon_F2_sffma_sc, 12922}, // __builtin_HEXAGON_F2_sffma_sc
+      {Intrinsic::hexagon_F2_sffms, 12952}, // __builtin_HEXAGON_F2_sffms
+      {Intrinsic::hexagon_F2_sffms_lib, 12979}, // __builtin_HEXAGON_F2_sffms_lib
+      {Intrinsic::hexagon_F2_sfimm_n, 13010}, // __builtin_HEXAGON_F2_sfimm_n
+      {Intrinsic::hexagon_F2_sfimm_p, 13039}, // __builtin_HEXAGON_F2_sfimm_p
+      {Intrinsic::hexagon_F2_sfmax, 13068}, // __builtin_HEXAGON_F2_sfmax
+      {Intrinsic::hexagon_F2_sfmin, 13095}, // __builtin_HEXAGON_F2_sfmin
+      {Intrinsic::hexagon_F2_sfmpy, 13122}, // __builtin_HEXAGON_F2_sfmpy
+      {Intrinsic::hexagon_F2_sfsub, 13149}, // __builtin_HEXAGON_F2_sfsub
+      {Intrinsic::hexagon_L2_loadw_locked, 13176}, // __builtin_HEXAGON_L2_loadw_locked
+      {Intrinsic::hexagon_L4_loadd_locked, 13210}, // __builtin_HEXAGON_L4_loadd_locked
+      {Intrinsic::hexagon_M2_acci, 13244}, // __builtin_HEXAGON_M2_acci
+      {Intrinsic::hexagon_M2_accii, 13270}, // __builtin_HEXAGON_M2_accii
+      {Intrinsic::hexagon_M2_cmaci_s0, 13297}, // __builtin_HEXAGON_M2_cmaci_s0
+      {Intrinsic::hexagon_M2_cmacr_s0, 13327}, // __builtin_HEXAGON_M2_cmacr_s0
+      {Intrinsic::hexagon_M2_cmacs_s0, 13357}, // __builtin_HEXAGON_M2_cmacs_s0
+      {Intrinsic::hexagon_M2_cmacs_s1, 13387}, // __builtin_HEXAGON_M2_cmacs_s1
+      {Intrinsic::hexagon_M2_cmacsc_s0, 13417}, // __builtin_HEXAGON_M2_cmacsc_s0
+      {Intrinsic::hexagon_M2_cmacsc_s1, 13448}, // __builtin_HEXAGON_M2_cmacsc_s1
+      {Intrinsic::hexagon_M2_cmpyi_s0, 13479}, // __builtin_HEXAGON_M2_cmpyi_s0
+      {Intrinsic::hexagon_M2_cmpyr_s0, 13509}, // __builtin_HEXAGON_M2_cmpyr_s0
+      {Intrinsic::hexagon_M2_cmpyrs_s0, 13539}, // __builtin_HEXAGON_M2_cmpyrs_s0
+      {Intrinsic::hexagon_M2_cmpyrs_s1, 13570}, // __builtin_HEXAGON_M2_cmpyrs_s1
+      {Intrinsic::hexagon_M2_cmpyrsc_s0, 13601}, // __builtin_HEXAGON_M2_cmpyrsc_s0
+      {Intrinsic::hexagon_M2_cmpyrsc_s1, 13633}, // __builtin_HEXAGON_M2_cmpyrsc_s1
+      {Intrinsic::hexagon_M2_cmpys_s0, 13665}, // __builtin_HEXAGON_M2_cmpys_s0
+      {Intrinsic::hexagon_M2_cmpys_s1, 13695}, // __builtin_HEXAGON_M2_cmpys_s1
+      {Intrinsic::hexagon_M2_cmpysc_s0, 13725}, // __builtin_HEXAGON_M2_cmpysc_s0
+      {Intrinsic::hexagon_M2_cmpysc_s1, 13756}, // __builtin_HEXAGON_M2_cmpysc_s1
+      {Intrinsic::hexagon_M2_cnacs_s0, 13787}, // __builtin_HEXAGON_M2_cnacs_s0
+      {Intrinsic::hexagon_M2_cnacs_s1, 13817}, // __builtin_HEXAGON_M2_cnacs_s1
+      {Intrinsic::hexagon_M2_cnacsc_s0, 13847}, // __builtin_HEXAGON_M2_cnacsc_s0
+      {Intrinsic::hexagon_M2_cnacsc_s1, 13878}, // __builtin_HEXAGON_M2_cnacsc_s1
+      {Intrinsic::hexagon_M2_dpmpyss_acc_s0, 13909}, // __builtin_HEXAGON_M2_dpmpyss_acc_s0
+      {Intrinsic::hexagon_M2_dpmpyss_nac_s0, 13945}, // __builtin_HEXAGON_M2_dpmpyss_nac_s0
+      {Intrinsic::hexagon_M2_dpmpyss_rnd_s0, 13981}, // __builtin_HEXAGON_M2_dpmpyss_rnd_s0
+      {Intrinsic::hexagon_M2_dpmpyss_s0, 14017}, // __builtin_HEXAGON_M2_dpmpyss_s0
+      {Intrinsic::hexagon_M2_dpmpyuu_acc_s0, 14049}, // __builtin_HEXAGON_M2_dpmpyuu_acc_s0
+      {Intrinsic::hexagon_M2_dpmpyuu_nac_s0, 14085}, // __builtin_HEXAGON_M2_dpmpyuu_nac_s0
+      {Intrinsic::hexagon_M2_dpmpyuu_s0, 14121}, // __builtin_HEXAGON_M2_dpmpyuu_s0
+      {Intrinsic::hexagon_M2_hmmpyh_rs1, 14153}, // __builtin_HEXAGON_M2_hmmpyh_rs1
+      {Intrinsic::hexagon_M2_hmmpyh_s1, 14185}, // __builtin_HEXAGON_M2_hmmpyh_s1
+      {Intrinsic::hexagon_M2_hmmpyl_rs1, 14216}, // __builtin_HEXAGON_M2_hmmpyl_rs1
+      {Intrinsic::hexagon_M2_hmmpyl_s1, 14248}, // __builtin_HEXAGON_M2_hmmpyl_s1
+      {Intrinsic::hexagon_M2_maci, 14279}, // __builtin_HEXAGON_M2_maci
+      {Intrinsic::hexagon_M2_macsin, 14305}, // __builtin_HEXAGON_M2_macsin
+      {Intrinsic::hexagon_M2_macsip, 14333}, // __builtin_HEXAGON_M2_macsip
+      {Intrinsic::hexagon_M2_mmachs_rs0, 14361}, // __builtin_HEXAGON_M2_mmachs_rs0
+      {Intrinsic::hexagon_M2_mmachs_rs1, 14393}, // __builtin_HEXAGON_M2_mmachs_rs1
+      {Intrinsic::hexagon_M2_mmachs_s0, 14425}, // __builtin_HEXAGON_M2_mmachs_s0
+      {Intrinsic::hexagon_M2_mmachs_s1, 14456}, // __builtin_HEXAGON_M2_mmachs_s1
+      {Intrinsic::hexagon_M2_mmacls_rs0, 14487}, // __builtin_HEXAGON_M2_mmacls_rs0
+      {Intrinsic::hexagon_M2_mmacls_rs1, 14519}, // __builtin_HEXAGON_M2_mmacls_rs1
+      {Intrinsic::hexagon_M2_mmacls_s0, 14551}, // __builtin_HEXAGON_M2_mmacls_s0
+      {Intrinsic::hexagon_M2_mmacls_s1, 14582}, // __builtin_HEXAGON_M2_mmacls_s1
+      {Intrinsic::hexagon_M2_mmacuhs_rs0, 14613}, // __builtin_HEXAGON_M2_mmacuhs_rs0
+      {Intrinsic::hexagon_M2_mmacuhs_rs1, 14646}, // __builtin_HEXAGON_M2_mmacuhs_rs1
+      {Intrinsic::hexagon_M2_mmacuhs_s0, 14679}, // __builtin_HEXAGON_M2_mmacuhs_s0
+      {Intrinsic::hexagon_M2_mmacuhs_s1, 14711}, // __builtin_HEXAGON_M2_mmacuhs_s1
+      {Intrinsic::hexagon_M2_mmaculs_rs0, 14743}, // __builtin_HEXAGON_M2_mmaculs_rs0
+      {Intrinsic::hexagon_M2_mmaculs_rs1, 14776}, // __builtin_HEXAGON_M2_mmaculs_rs1
+      {Intrinsic::hexagon_M2_mmaculs_s0, 14809}, // __builtin_HEXAGON_M2_mmaculs_s0
+      {Intrinsic::hexagon_M2_mmaculs_s1, 14841}, // __builtin_HEXAGON_M2_mmaculs_s1
+      {Intrinsic::hexagon_M2_mmpyh_rs0, 14873}, // __builtin_HEXAGON_M2_mmpyh_rs0
+      {Intrinsic::hexagon_M2_mmpyh_rs1, 14904}, // __builtin_HEXAGON_M2_mmpyh_rs1
+      {Intrinsic::hexagon_M2_mmpyh_s0, 14935}, // __builtin_HEXAGON_M2_mmpyh_s0
+      {Intrinsic::hexagon_M2_mmpyh_s1, 14965}, // __builtin_HEXAGON_M2_mmpyh_s1
+      {Intrinsic::hexagon_M2_mmpyl_rs0, 14995}, // __builtin_HEXAGON_M2_mmpyl_rs0
+      {Intrinsic::hexagon_M2_mmpyl_rs1, 15026}, // __builtin_HEXAGON_M2_mmpyl_rs1
+      {Intrinsic::hexagon_M2_mmpyl_s0, 15057}, // __builtin_HEXAGON_M2_mmpyl_s0
+      {Intrinsic::hexagon_M2_mmpyl_s1, 15087}, // __builtin_HEXAGON_M2_mmpyl_s1
+      {Intrinsic::hexagon_M2_mmpyuh_rs0, 15117}, // __builtin_HEXAGON_M2_mmpyuh_rs0
+      {Intrinsic::hexagon_M2_mmpyuh_rs1, 15149}, // __builtin_HEXAGON_M2_mmpyuh_rs1
+      {Intrinsic::hexagon_M2_mmpyuh_s0, 15181}, // __builtin_HEXAGON_M2_mmpyuh_s0
+      {Intrinsic::hexagon_M2_mmpyuh_s1, 15212}, // __builtin_HEXAGON_M2_mmpyuh_s1
+      {Intrinsic::hexagon_M2_mmpyul_rs0, 15243}, // __builtin_HEXAGON_M2_mmpyul_rs0
+      {Intrinsic::hexagon_M2_mmpyul_rs1, 15275}, // __builtin_HEXAGON_M2_mmpyul_rs1
+      {Intrinsic::hexagon_M2_mmpyul_s0, 15307}, // __builtin_HEXAGON_M2_mmpyul_s0
+      {Intrinsic::hexagon_M2_mmpyul_s1, 15338}, // __builtin_HEXAGON_M2_mmpyul_s1
+      {Intrinsic::hexagon_M2_mpy_acc_hh_s0, 15369}, // __builtin_HEXAGON_M2_mpy_acc_hh_s0
+      {Intrinsic::hexagon_M2_mpy_acc_hh_s1, 15404}, // __builtin_HEXAGON_M2_mpy_acc_hh_s1
+      {Intrinsic::hexagon_M2_mpy_acc_hl_s0, 15439}, // __builtin_HEXAGON_M2_mpy_acc_hl_s0
+      {Intrinsic::hexagon_M2_mpy_acc_hl_s1, 15474}, // __builtin_HEXAGON_M2_mpy_acc_hl_s1
+      {Intrinsic::hexagon_M2_mpy_acc_lh_s0, 15509}, // __builtin_HEXAGON_M2_mpy_acc_lh_s0
+      {Intrinsic::hexagon_M2_mpy_acc_lh_s1, 15544}, // __builtin_HEXAGON_M2_mpy_acc_lh_s1
+      {Intrinsic::hexagon_M2_mpy_acc_ll_s0, 15579}, // __builtin_HEXAGON_M2_mpy_acc_ll_s0
+      {Intrinsic::hexagon_M2_mpy_acc_ll_s1, 15614}, // __builtin_HEXAGON_M2_mpy_acc_ll_s1
+      {Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0, 15649}, // __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0
+      {Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1, 15688}, // __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1
+      {Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0, 15727}, // __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0
+      {Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1, 15766}, // __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1
+      {Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0, 15805}, // __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0
+      {Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1, 15844}, // __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1
+      {Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0, 15883}, // __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0
+      {Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1, 15922}, // __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1
+      {Intrinsic::hexagon_M2_mpy_hh_s0, 15961}, // __builtin_HEXAGON_M2_mpy_hh_s0
+      {Intrinsic::hexagon_M2_mpy_hh_s1, 15992}, // __builtin_HEXAGON_M2_mpy_hh_s1
+      {Intrinsic::hexagon_M2_mpy_hl_s0, 16023}, // __builtin_HEXAGON_M2_mpy_hl_s0
+      {Intrinsic::hexagon_M2_mpy_hl_s1, 16054}, // __builtin_HEXAGON_M2_mpy_hl_s1
+      {Intrinsic::hexagon_M2_mpy_lh_s0, 16085}, // __builtin_HEXAGON_M2_mpy_lh_s0
+      {Intrinsic::hexagon_M2_mpy_lh_s1, 16116}, // __builtin_HEXAGON_M2_mpy_lh_s1
+      {Intrinsic::hexagon_M2_mpy_ll_s0, 16147}, // __builtin_HEXAGON_M2_mpy_ll_s0
+      {Intrinsic::hexagon_M2_mpy_ll_s1, 16178}, // __builtin_HEXAGON_M2_mpy_ll_s1
+      {Intrinsic::hexagon_M2_mpy_nac_hh_s0, 16209}, // __builtin_HEXAGON_M2_mpy_nac_hh_s0
+      {Intrinsic::hexagon_M2_mpy_nac_hh_s1, 16244}, // __builtin_HEXAGON_M2_mpy_nac_hh_s1
+      {Intrinsic::hexagon_M2_mpy_nac_hl_s0, 16279}, // __builtin_HEXAGON_M2_mpy_nac_hl_s0
+      {Intrinsic::hexagon_M2_mpy_nac_hl_s1, 16314}, // __builtin_HEXAGON_M2_mpy_nac_hl_s1
+      {Intrinsic::hexagon_M2_mpy_nac_lh_s0, 16349}, // __builtin_HEXAGON_M2_mpy_nac_lh_s0
+      {Intrinsic::hexagon_M2_mpy_nac_lh_s1, 16384}, // __builtin_HEXAGON_M2_mpy_nac_lh_s1
+      {Intrinsic::hexagon_M2_mpy_nac_ll_s0, 16419}, // __builtin_HEXAGON_M2_mpy_nac_ll_s0
+      {Intrinsic::hexagon_M2_mpy_nac_ll_s1, 16454}, // __builtin_HEXAGON_M2_mpy_nac_ll_s1
+      {Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0, 16489}, // __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0
+      {Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1, 16528}, // __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1
+      {Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0, 16567}, // __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0
+      {Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1, 16606}, // __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1
+      {Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0, 16645}, // __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0
+      {Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1, 16684}, // __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1
+      {Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0, 16723}, // __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0
+      {Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1, 16762}, // __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1
+      {Intrinsic::hexagon_M2_mpy_rnd_hh_s0, 16801}, // __builtin_HEXAGON_M2_mpy_rnd_hh_s0
+      {Intrinsic::hexagon_M2_mpy_rnd_hh_s1, 16836}, // __builtin_HEXAGON_M2_mpy_rnd_hh_s1
+      {Intrinsic::hexagon_M2_mpy_rnd_hl_s0, 16871}, // __builtin_HEXAGON_M2_mpy_rnd_hl_s0
+      {Intrinsic::hexagon_M2_mpy_rnd_hl_s1, 16906}, // __builtin_HEXAGON_M2_mpy_rnd_hl_s1
+      {Intrinsic::hexagon_M2_mpy_rnd_lh_s0, 16941}, // __builtin_HEXAGON_M2_mpy_rnd_lh_s0
+      {Intrinsic::hexagon_M2_mpy_rnd_lh_s1, 16976}, // __builtin_HEXAGON_M2_mpy_rnd_lh_s1
+      {Intrinsic::hexagon_M2_mpy_rnd_ll_s0, 17011}, // __builtin_HEXAGON_M2_mpy_rnd_ll_s0
+      {Intrinsic::hexagon_M2_mpy_rnd_ll_s1, 17046}, // __builtin_HEXAGON_M2_mpy_rnd_ll_s1
+      {Intrinsic::hexagon_M2_mpy_sat_hh_s0, 17081}, // __builtin_HEXAGON_M2_mpy_sat_hh_s0
+      {Intrinsic::hexagon_M2_mpy_sat_hh_s1, 17116}, // __builtin_HEXAGON_M2_mpy_sat_hh_s1
+      {Intrinsic::hexagon_M2_mpy_sat_hl_s0, 17151}, // __builtin_HEXAGON_M2_mpy_sat_hl_s0
+      {Intrinsic::hexagon_M2_mpy_sat_hl_s1, 17186}, // __builtin_HEXAGON_M2_mpy_sat_hl_s1
+      {Intrinsic::hexagon_M2_mpy_sat_lh_s0, 17221}, // __builtin_HEXAGON_M2_mpy_sat_lh_s0
+      {Intrinsic::hexagon_M2_mpy_sat_lh_s1, 17256}, // __builtin_HEXAGON_M2_mpy_sat_lh_s1
+      {Intrinsic::hexagon_M2_mpy_sat_ll_s0, 17291}, // __builtin_HEXAGON_M2_mpy_sat_ll_s0
+      {Intrinsic::hexagon_M2_mpy_sat_ll_s1, 17326}, // __builtin_HEXAGON_M2_mpy_sat_ll_s1
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0, 17361}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1, 17400}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0, 17439}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1, 17478}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0, 17517}, // __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1, 17556}, // __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0, 17595}, // __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0
+      {Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1, 17634}, // __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1
+      {Intrinsic::hexagon_M2_mpy_up, 17673}, // __builtin_HEXAGON_M2_mpy_up
+      {Intrinsic::hexagon_M2_mpy_up_s1, 17701}, // __builtin_HEXAGON_M2_mpy_up_s1
+      {Intrinsic::hexagon_M2_mpy_up_s1_sat, 17732}, // __builtin_HEXAGON_M2_mpy_up_s1_sat
+      {Intrinsic::hexagon_M2_mpyd_acc_hh_s0, 17767}, // __builtin_HEXAGON_M2_mpyd_acc_hh_s0
+      {Intrinsic::hexagon_M2_mpyd_acc_hh_s1, 17803}, // __builtin_HEXAGON_M2_mpyd_acc_hh_s1
+      {Intrinsic::hexagon_M2_mpyd_acc_hl_s0, 17839}, // __builtin_HEXAGON_M2_mpyd_acc_hl_s0
+      {Intrinsic::hexagon_M2_mpyd_acc_hl_s1, 17875}, // __builtin_HEXAGON_M2_mpyd_acc_hl_s1
+      {Intrinsic::hexagon_M2_mpyd_acc_lh_s0, 17911}, // __builtin_HEXAGON_M2_mpyd_acc_lh_s0
+      {Intrinsic::hexagon_M2_mpyd_acc_lh_s1, 17947}, // __builtin_HEXAGON_M2_mpyd_acc_lh_s1
+      {Intrinsic::hexagon_M2_mpyd_acc_ll_s0, 17983}, // __builtin_HEXAGON_M2_mpyd_acc_ll_s0
+      {Intrinsic::hexagon_M2_mpyd_acc_ll_s1, 18019}, // __builtin_HEXAGON_M2_mpyd_acc_ll_s1
+      {Intrinsic::hexagon_M2_mpyd_hh_s0, 18055}, // __builtin_HEXAGON_M2_mpyd_hh_s0
+      {Intrinsic::hexagon_M2_mpyd_hh_s1, 18087}, // __builtin_HEXAGON_M2_mpyd_hh_s1
+      {Intrinsic::hexagon_M2_mpyd_hl_s0, 18119}, // __builtin_HEXAGON_M2_mpyd_hl_s0
+      {Intrinsic::hexagon_M2_mpyd_hl_s1, 18151}, // __builtin_HEXAGON_M2_mpyd_hl_s1
+      {Intrinsic::hexagon_M2_mpyd_lh_s0, 18183}, // __builtin_HEXAGON_M2_mpyd_lh_s0
+      {Intrinsic::hexagon_M2_mpyd_lh_s1, 18215}, // __builtin_HEXAGON_M2_mpyd_lh_s1
+      {Intrinsic::hexagon_M2_mpyd_ll_s0, 18247}, // __builtin_HEXAGON_M2_mpyd_ll_s0
+      {Intrinsic::hexagon_M2_mpyd_ll_s1, 18279}, // __builtin_HEXAGON_M2_mpyd_ll_s1
+      {Intrinsic::hexagon_M2_mpyd_nac_hh_s0, 18311}, // __builtin_HEXAGON_M2_mpyd_nac_hh_s0
+      {Intrinsic::hexagon_M2_mpyd_nac_hh_s1, 18347}, // __builtin_HEXAGON_M2_mpyd_nac_hh_s1
+      {Intrinsic::hexagon_M2_mpyd_nac_hl_s0, 18383}, // __builtin_HEXAGON_M2_mpyd_nac_hl_s0
+      {Intrinsic::hexagon_M2_mpyd_nac_hl_s1, 18419}, // __builtin_HEXAGON_M2_mpyd_nac_hl_s1
+      {Intrinsic::hexagon_M2_mpyd_nac_lh_s0, 18455}, // __builtin_HEXAGON_M2_mpyd_nac_lh_s0
+      {Intrinsic::hexagon_M2_mpyd_nac_lh_s1, 18491}, // __builtin_HEXAGON_M2_mpyd_nac_lh_s1
+      {Intrinsic::hexagon_M2_mpyd_nac_ll_s0, 18527}, // __builtin_HEXAGON_M2_mpyd_nac_ll_s0
+      {Intrinsic::hexagon_M2_mpyd_nac_ll_s1, 18563}, // __builtin_HEXAGON_M2_mpyd_nac_ll_s1
+      {Intrinsic::hexagon_M2_mpyd_rnd_hh_s0, 18599}, // __builtin_HEXAGON_M2_mpyd_rnd_hh_s0
+      {Intrinsic::hexagon_M2_mpyd_rnd_hh_s1, 18635}, // __builtin_HEXAGON_M2_mpyd_rnd_hh_s1
+      {Intrinsic::hexagon_M2_mpyd_rnd_hl_s0, 18671}, // __builtin_HEXAGON_M2_mpyd_rnd_hl_s0
+      {Intrinsic::hexagon_M2_mpyd_rnd_hl_s1, 18707}, // __builtin_HEXAGON_M2_mpyd_rnd_hl_s1
+      {Intrinsic::hexagon_M2_mpyd_rnd_lh_s0, 18743}, // __builtin_HEXAGON_M2_mpyd_rnd_lh_s0
+      {Intrinsic::hexagon_M2_mpyd_rnd_lh_s1, 18779}, // __builtin_HEXAGON_M2_mpyd_rnd_lh_s1
+      {Intrinsic::hexagon_M2_mpyd_rnd_ll_s0, 18815}, // __builtin_HEXAGON_M2_mpyd_rnd_ll_s0
+      {Intrinsic::hexagon_M2_mpyd_rnd_ll_s1, 18851}, // __builtin_HEXAGON_M2_mpyd_rnd_ll_s1
+      {Intrinsic::hexagon_M2_mpyi, 18887}, // __builtin_HEXAGON_M2_mpyi
+      {Intrinsic::hexagon_M2_mpysmi, 18913}, // __builtin_HEXAGON_M2_mpysmi
+      {Intrinsic::hexagon_M2_mpysu_up, 18941}, // __builtin_HEXAGON_M2_mpysu_up
+      {Intrinsic::hexagon_M2_mpyu_acc_hh_s0, 18971}, // __builtin_HEXAGON_M2_mpyu_acc_hh_s0
+      {Intrinsic::hexagon_M2_mpyu_acc_hh_s1, 19007}, // __builtin_HEXAGON_M2_mpyu_acc_hh_s1
+      {Intrinsic::hexagon_M2_mpyu_acc_hl_s0, 19043}, // __builtin_HEXAGON_M2_mpyu_acc_hl_s0
+      {Intrinsic::hexagon_M2_mpyu_acc_hl_s1, 19079}, // __builtin_HEXAGON_M2_mpyu_acc_hl_s1
+      {Intrinsic::hexagon_M2_mpyu_acc_lh_s0, 19115}, // __builtin_HEXAGON_M2_mpyu_acc_lh_s0
+      {Intrinsic::hexagon_M2_mpyu_acc_lh_s1, 19151}, // __builtin_HEXAGON_M2_mpyu_acc_lh_s1
+      {Intrinsic::hexagon_M2_mpyu_acc_ll_s0, 19187}, // __builtin_HEXAGON_M2_mpyu_acc_ll_s0
+      {Intrinsic::hexagon_M2_mpyu_acc_ll_s1, 19223}, // __builtin_HEXAGON_M2_mpyu_acc_ll_s1
+      {Intrinsic::hexagon_M2_mpyu_hh_s0, 19259}, // __builtin_HEXAGON_M2_mpyu_hh_s0
+      {Intrinsic::hexagon_M2_mpyu_hh_s1, 19291}, // __builtin_HEXAGON_M2_mpyu_hh_s1
+      {Intrinsic::hexagon_M2_mpyu_hl_s0, 19323}, // __builtin_HEXAGON_M2_mpyu_hl_s0
+      {Intrinsic::hexagon_M2_mpyu_hl_s1, 19355}, // __builtin_HEXAGON_M2_mpyu_hl_s1
+      {Intrinsic::hexagon_M2_mpyu_lh_s0, 19387}, // __builtin_HEXAGON_M2_mpyu_lh_s0
+      {Intrinsic::hexagon_M2_mpyu_lh_s1, 19419}, // __builtin_HEXAGON_M2_mpyu_lh_s1
+      {Intrinsic::hexagon_M2_mpyu_ll_s0, 19451}, // __builtin_HEXAGON_M2_mpyu_ll_s0
+      {Intrinsic::hexagon_M2_mpyu_ll_s1, 19483}, // __builtin_HEXAGON_M2_mpyu_ll_s1
+      {Intrinsic::hexagon_M2_mpyu_nac_hh_s0, 19515}, // __builtin_HEXAGON_M2_mpyu_nac_hh_s0
+      {Intrinsic::hexagon_M2_mpyu_nac_hh_s1, 19551}, // __builtin_HEXAGON_M2_mpyu_nac_hh_s1
+      {Intrinsic::hexagon_M2_mpyu_nac_hl_s0, 19587}, // __builtin_HEXAGON_M2_mpyu_nac_hl_s0
+      {Intrinsic::hexagon_M2_mpyu_nac_hl_s1, 19623}, // __builtin_HEXAGON_M2_mpyu_nac_hl_s1
+      {Intrinsic::hexagon_M2_mpyu_nac_lh_s0, 19659}, // __builtin_HEXAGON_M2_mpyu_nac_lh_s0
+      {Intrinsic::hexagon_M2_mpyu_nac_lh_s1, 19695}, // __builtin_HEXAGON_M2_mpyu_nac_lh_s1
+      {Intrinsic::hexagon_M2_mpyu_nac_ll_s0, 19731}, // __builtin_HEXAGON_M2_mpyu_nac_ll_s0
+      {Intrinsic::hexagon_M2_mpyu_nac_ll_s1, 19767}, // __builtin_HEXAGON_M2_mpyu_nac_ll_s1
+      {Intrinsic::hexagon_M2_mpyu_up, 19803}, // __builtin_HEXAGON_M2_mpyu_up
+      {Intrinsic::hexagon_M2_mpyud_acc_hh_s0, 19832}, // __builtin_HEXAGON_M2_mpyud_acc_hh_s0
+      {Intrinsic::hexagon_M2_mpyud_acc_hh_s1, 19869}, // __builtin_HEXAGON_M2_mpyud_acc_hh_s1
+      {Intrinsic::hexagon_M2_mpyud_acc_hl_s0, 19906}, // __builtin_HEXAGON_M2_mpyud_acc_hl_s0
+      {Intrinsic::hexagon_M2_mpyud_acc_hl_s1, 19943}, // __builtin_HEXAGON_M2_mpyud_acc_hl_s1
+      {Intrinsic::hexagon_M2_mpyud_acc_lh_s0, 19980}, // __builtin_HEXAGON_M2_mpyud_acc_lh_s0
+      {Intrinsic::hexagon_M2_mpyud_acc_lh_s1, 20017}, // __builtin_HEXAGON_M2_mpyud_acc_lh_s1
+      {Intrinsic::hexagon_M2_mpyud_acc_ll_s0, 20054}, // __builtin_HEXAGON_M2_mpyud_acc_ll_s0
+      {Intrinsic::hexagon_M2_mpyud_acc_ll_s1, 20091}, // __builtin_HEXAGON_M2_mpyud_acc_ll_s1
+      {Intrinsic::hexagon_M2_mpyud_hh_s0, 20128}, // __builtin_HEXAGON_M2_mpyud_hh_s0
+      {Intrinsic::hexagon_M2_mpyud_hh_s1, 20161}, // __builtin_HEXAGON_M2_mpyud_hh_s1
+      {Intrinsic::hexagon_M2_mpyud_hl_s0, 20194}, // __builtin_HEXAGON_M2_mpyud_hl_s0
+      {Intrinsic::hexagon_M2_mpyud_hl_s1, 20227}, // __builtin_HEXAGON_M2_mpyud_hl_s1
+      {Intrinsic::hexagon_M2_mpyud_lh_s0, 20260}, // __builtin_HEXAGON_M2_mpyud_lh_s0
+      {Intrinsic::hexagon_M2_mpyud_lh_s1, 20293}, // __builtin_HEXAGON_M2_mpyud_lh_s1
+      {Intrinsic::hexagon_M2_mpyud_ll_s0, 20326}, // __builtin_HEXAGON_M2_mpyud_ll_s0
+      {Intrinsic::hexagon_M2_mpyud_ll_s1, 20359}, // __builtin_HEXAGON_M2_mpyud_ll_s1
+      {Intrinsic::hexagon_M2_mpyud_nac_hh_s0, 20392}, // __builtin_HEXAGON_M2_mpyud_nac_hh_s0
+      {Intrinsic::hexagon_M2_mpyud_nac_hh_s1, 20429}, // __builtin_HEXAGON_M2_mpyud_nac_hh_s1
+      {Intrinsic::hexagon_M2_mpyud_nac_hl_s0, 20466}, // __builtin_HEXAGON_M2_mpyud_nac_hl_s0
+      {Intrinsic::hexagon_M2_mpyud_nac_hl_s1, 20503}, // __builtin_HEXAGON_M2_mpyud_nac_hl_s1
+      {Intrinsic::hexagon_M2_mpyud_nac_lh_s0, 20540}, // __builtin_HEXAGON_M2_mpyud_nac_lh_s0
+      {Intrinsic::hexagon_M2_mpyud_nac_lh_s1, 20577}, // __builtin_HEXAGON_M2_mpyud_nac_lh_s1
+      {Intrinsic::hexagon_M2_mpyud_nac_ll_s0, 20614}, // __builtin_HEXAGON_M2_mpyud_nac_ll_s0
+      {Intrinsic::hexagon_M2_mpyud_nac_ll_s1, 20651}, // __builtin_HEXAGON_M2_mpyud_nac_ll_s1
+      {Intrinsic::hexagon_M2_mpyui, 20688}, // __builtin_HEXAGON_M2_mpyui
+      {Intrinsic::hexagon_M2_nacci, 20715}, // __builtin_HEXAGON_M2_nacci
+      {Intrinsic::hexagon_M2_naccii, 20742}, // __builtin_HEXAGON_M2_naccii
+      {Intrinsic::hexagon_M2_subacc, 20770}, // __builtin_HEXAGON_M2_subacc
+      {Intrinsic::hexagon_M2_vabsdiffh, 20798}, // __builtin_HEXAGON_M2_vabsdiffh
+      {Intrinsic::hexagon_M2_vabsdiffw, 20829}, // __builtin_HEXAGON_M2_vabsdiffw
+      {Intrinsic::hexagon_M2_vcmac_s0_sat_i, 20860}, // __builtin_HEXAGON_M2_vcmac_s0_sat_i
+      {Intrinsic::hexagon_M2_vcmac_s0_sat_r, 20896}, // __builtin_HEXAGON_M2_vcmac_s0_sat_r
+      {Intrinsic::hexagon_M2_vcmpy_s0_sat_i, 20932}, // __builtin_HEXAGON_M2_vcmpy_s0_sat_i
+      {Intrinsic::hexagon_M2_vcmpy_s0_sat_r, 20968}, // __builtin_HEXAGON_M2_vcmpy_s0_sat_r
+      {Intrinsic::hexagon_M2_vcmpy_s1_sat_i, 21004}, // __builtin_HEXAGON_M2_vcmpy_s1_sat_i
+      {Intrinsic::hexagon_M2_vcmpy_s1_sat_r, 21040}, // __builtin_HEXAGON_M2_vcmpy_s1_sat_r
+      {Intrinsic::hexagon_M2_vdmacs_s0, 21076}, // __builtin_HEXAGON_M2_vdmacs_s0
+      {Intrinsic::hexagon_M2_vdmacs_s1, 21107}, // __builtin_HEXAGON_M2_vdmacs_s1
+      {Intrinsic::hexagon_M2_vdmpyrs_s0, 21138}, // __builtin_HEXAGON_M2_vdmpyrs_s0
+      {Intrinsic::hexagon_M2_vdmpyrs_s1, 21170}, // __builtin_HEXAGON_M2_vdmpyrs_s1
+      {Intrinsic::hexagon_M2_vdmpys_s0, 21202}, // __builtin_HEXAGON_M2_vdmpys_s0
+      {Intrinsic::hexagon_M2_vdmpys_s1, 21233}, // __builtin_HEXAGON_M2_vdmpys_s1
+      {Intrinsic::hexagon_M2_vmac2, 21264}, // __builtin_HEXAGON_M2_vmac2
+      {Intrinsic::hexagon_M2_vmac2es, 21291}, // __builtin_HEXAGON_M2_vmac2es
+      {Intrinsic::hexagon_M2_vmac2es_s0, 21320}, // __builtin_HEXAGON_M2_vmac2es_s0
+      {Intrinsic::hexagon_M2_vmac2es_s1, 21352}, // __builtin_HEXAGON_M2_vmac2es_s1
+      {Intrinsic::hexagon_M2_vmac2s_s0, 21384}, // __builtin_HEXAGON_M2_vmac2s_s0
+      {Intrinsic::hexagon_M2_vmac2s_s1, 21415}, // __builtin_HEXAGON_M2_vmac2s_s1
+      {Intrinsic::hexagon_M2_vmac2su_s0, 21446}, // __builtin_HEXAGON_M2_vmac2su_s0
+      {Intrinsic::hexagon_M2_vmac2su_s1, 21478}, // __builtin_HEXAGON_M2_vmac2su_s1
+      {Intrinsic::hexagon_M2_vmpy2es_s0, 21510}, // __builtin_HEXAGON_M2_vmpy2es_s0
+      {Intrinsic::hexagon_M2_vmpy2es_s1, 21542}, // __builtin_HEXAGON_M2_vmpy2es_s1
+      {Intrinsic::hexagon_M2_vmpy2s_s0, 21574}, // __builtin_HEXAGON_M2_vmpy2s_s0
+      {Intrinsic::hexagon_M2_vmpy2s_s0pack, 21605}, // __builtin_HEXAGON_M2_vmpy2s_s0pack
+      {Intrinsic::hexagon_M2_vmpy2s_s1, 21640}, // __builtin_HEXAGON_M2_vmpy2s_s1
+      {Intrinsic::hexagon_M2_vmpy2s_s1pack, 21671}, // __builtin_HEXAGON_M2_vmpy2s_s1pack
+      {Intrinsic::hexagon_M2_vmpy2su_s0, 21706}, // __builtin_HEXAGON_M2_vmpy2su_s0
+      {Intrinsic::hexagon_M2_vmpy2su_s1, 21738}, // __builtin_HEXAGON_M2_vmpy2su_s1
+      {Intrinsic::hexagon_M2_vraddh, 21770}, // __builtin_HEXAGON_M2_vraddh
+      {Intrinsic::hexagon_M2_vradduh, 21798}, // __builtin_HEXAGON_M2_vradduh
+      {Intrinsic::hexagon_M2_vrcmaci_s0, 21827}, // __builtin_HEXAGON_M2_vrcmaci_s0
+      {Intrinsic::hexagon_M2_vrcmaci_s0c, 21859}, // __builtin_HEXAGON_M2_vrcmaci_s0c
+      {Intrinsic::hexagon_M2_vrcmacr_s0, 21892}, // __builtin_HEXAGON_M2_vrcmacr_s0
+      {Intrinsic::hexagon_M2_vrcmacr_s0c, 21924}, // __builtin_HEXAGON_M2_vrcmacr_s0c
+      {Intrinsic::hexagon_M2_vrcmpyi_s0, 21957}, // __builtin_HEXAGON_M2_vrcmpyi_s0
+      {Intrinsic::hexagon_M2_vrcmpyi_s0c, 21989}, // __builtin_HEXAGON_M2_vrcmpyi_s0c
+      {Intrinsic::hexagon_M2_vrcmpyr_s0, 22022}, // __builtin_HEXAGON_M2_vrcmpyr_s0
+      {Intrinsic::hexagon_M2_vrcmpyr_s0c, 22054}, // __builtin_HEXAGON_M2_vrcmpyr_s0c
+      {Intrinsic::hexagon_M2_vrcmpys_acc_s1, 22087}, // __builtin_HEXAGON_M2_vrcmpys_acc_s1
+      {Intrinsic::hexagon_M2_vrcmpys_s1, 22123}, // __builtin_HEXAGON_M2_vrcmpys_s1
+      {Intrinsic::hexagon_M2_vrcmpys_s1rp, 22155}, // __builtin_HEXAGON_M2_vrcmpys_s1rp
+      {Intrinsic::hexagon_M2_vrmac_s0, 22189}, // __builtin_HEXAGON_M2_vrmac_s0
+      {Intrinsic::hexagon_M2_vrmpy_s0, 22219}, // __builtin_HEXAGON_M2_vrmpy_s0
+      {Intrinsic::hexagon_M2_xor_xacc, 22249}, // __builtin_HEXAGON_M2_xor_xacc
+      {Intrinsic::hexagon_M4_and_and, 22279}, // __builtin_HEXAGON_M4_and_and
+      {Intrinsic::hexagon_M4_and_andn, 22308}, // __builtin_HEXAGON_M4_and_andn
+      {Intrinsic::hexagon_M4_and_or, 22338}, // __builtin_HEXAGON_M4_and_or
+      {Intrinsic::hexagon_M4_and_xor, 22366}, // __builtin_HEXAGON_M4_and_xor
+      {Intrinsic::hexagon_M4_cmpyi_wh, 22395}, // __builtin_HEXAGON_M4_cmpyi_wh
+      {Intrinsic::hexagon_M4_cmpyi_whc, 22425}, // __builtin_HEXAGON_M4_cmpyi_whc
+      {Intrinsic::hexagon_M4_cmpyr_wh, 22456}, // __builtin_HEXAGON_M4_cmpyr_wh
+      {Intrinsic::hexagon_M4_cmpyr_whc, 22486}, // __builtin_HEXAGON_M4_cmpyr_whc
+      {Intrinsic::hexagon_M4_mac_up_s1_sat, 22517}, // __builtin_HEXAGON_M4_mac_up_s1_sat
+      {Intrinsic::hexagon_M4_mpyri_addi, 22552}, // __builtin_HEXAGON_M4_mpyri_addi
+      {Intrinsic::hexagon_M4_mpyri_addr, 22584}, // __builtin_HEXAGON_M4_mpyri_addr
+      {Intrinsic::hexagon_M4_mpyri_addr_u2, 22616}, // __builtin_HEXAGON_M4_mpyri_addr_u2
+      {Intrinsic::hexagon_M4_mpyrr_addi, 22651}, // __builtin_HEXAGON_M4_mpyrr_addi
+      {Intrinsic::hexagon_M4_mpyrr_addr, 22683}, // __builtin_HEXAGON_M4_mpyrr_addr
+      {Intrinsic::hexagon_M4_nac_up_s1_sat, 22715}, // __builtin_HEXAGON_M4_nac_up_s1_sat
+      {Intrinsic::hexagon_M4_or_and, 22750}, // __builtin_HEXAGON_M4_or_and
+      {Intrinsic::hexagon_M4_or_andn, 22778}, // __builtin_HEXAGON_M4_or_andn
+      {Intrinsic::hexagon_M4_or_or, 22807}, // __builtin_HEXAGON_M4_or_or
+      {Intrinsic::hexagon_M4_or_xor, 22834}, // __builtin_HEXAGON_M4_or_xor
+      {Intrinsic::hexagon_M4_pmpyw, 22862}, // __builtin_HEXAGON_M4_pmpyw
+      {Intrinsic::hexagon_M4_pmpyw_acc, 22889}, // __builtin_HEXAGON_M4_pmpyw_acc
+      {Intrinsic::hexagon_M4_vpmpyh, 22920}, // __builtin_HEXAGON_M4_vpmpyh
+      {Intrinsic::hexagon_M4_vpmpyh_acc, 22948}, // __builtin_HEXAGON_M4_vpmpyh_acc
+      {Intrinsic::hexagon_M4_vrmpyeh_acc_s0, 22980}, // __builtin_HEXAGON_M4_vrmpyeh_acc_s0
+      {Intrinsic::hexagon_M4_vrmpyeh_acc_s1, 23016}, // __builtin_HEXAGON_M4_vrmpyeh_acc_s1
+      {Intrinsic::hexagon_M4_vrmpyeh_s0, 23052}, // __builtin_HEXAGON_M4_vrmpyeh_s0
+      {Intrinsic::hexagon_M4_vrmpyeh_s1, 23084}, // __builtin_HEXAGON_M4_vrmpyeh_s1
+      {Intrinsic::hexagon_M4_vrmpyoh_acc_s0, 23116}, // __builtin_HEXAGON_M4_vrmpyoh_acc_s0
+      {Intrinsic::hexagon_M4_vrmpyoh_acc_s1, 23152}, // __builtin_HEXAGON_M4_vrmpyoh_acc_s1
+      {Intrinsic::hexagon_M4_vrmpyoh_s0, 23188}, // __builtin_HEXAGON_M4_vrmpyoh_s0
+      {Intrinsic::hexagon_M4_vrmpyoh_s1, 23220}, // __builtin_HEXAGON_M4_vrmpyoh_s1
+      {Intrinsic::hexagon_M4_xor_and, 23252}, // __builtin_HEXAGON_M4_xor_and
+      {Intrinsic::hexagon_M4_xor_andn, 23281}, // __builtin_HEXAGON_M4_xor_andn
+      {Intrinsic::hexagon_M4_xor_or, 23311}, // __builtin_HEXAGON_M4_xor_or
+      {Intrinsic::hexagon_M4_xor_xacc, 23339}, // __builtin_HEXAGON_M4_xor_xacc
+      {Intrinsic::hexagon_M5_vdmacbsu, 23369}, // __builtin_HEXAGON_M5_vdmacbsu
+      {Intrinsic::hexagon_M5_vdmpybsu, 23399}, // __builtin_HEXAGON_M5_vdmpybsu
+      {Intrinsic::hexagon_M5_vmacbsu, 23429}, // __builtin_HEXAGON_M5_vmacbsu
+      {Intrinsic::hexagon_M5_vmacbuu, 23458}, // __builtin_HEXAGON_M5_vmacbuu
+      {Intrinsic::hexagon_M5_vmpybsu, 23487}, // __builtin_HEXAGON_M5_vmpybsu
+      {Intrinsic::hexagon_M5_vmpybuu, 23516}, // __builtin_HEXAGON_M5_vmpybuu
+      {Intrinsic::hexagon_M5_vrmacbsu, 23545}, // __builtin_HEXAGON_M5_vrmacbsu
+      {Intrinsic::hexagon_M5_vrmacbuu, 23575}, // __builtin_HEXAGON_M5_vrmacbuu
+      {Intrinsic::hexagon_M5_vrmpybsu, 23605}, // __builtin_HEXAGON_M5_vrmpybsu
+      {Intrinsic::hexagon_M5_vrmpybuu, 23635}, // __builtin_HEXAGON_M5_vrmpybuu
+      {Intrinsic::hexagon_M6_vabsdiffb, 23665}, // __builtin_HEXAGON_M6_vabsdiffb
+      {Intrinsic::hexagon_M6_vabsdiffub, 23696}, // __builtin_HEXAGON_M6_vabsdiffub
+      {Intrinsic::hexagon_S2_addasl_rrri, 23728}, // __builtin_HEXAGON_S2_addasl_rrri
+      {Intrinsic::hexagon_S2_asl_i_p, 23761}, // __builtin_HEXAGON_S2_asl_i_p
+      {Intrinsic::hexagon_S2_asl_i_p_acc, 23790}, // __builtin_HEXAGON_S2_asl_i_p_acc
+      {Intrinsic::hexagon_S2_asl_i_p_and, 23823}, // __builtin_HEXAGON_S2_asl_i_p_and
+      {Intrinsic::hexagon_S2_asl_i_p_nac, 23856}, // __builtin_HEXAGON_S2_asl_i_p_nac
+      {Intrinsic::hexagon_S2_asl_i_p_or, 23889}, // __builtin_HEXAGON_S2_asl_i_p_or
+      {Intrinsic::hexagon_S2_asl_i_p_xacc, 23921}, // __builtin_HEXAGON_S2_asl_i_p_xacc
+      {Intrinsic::hexagon_S2_asl_i_r, 23955}, // __builtin_HEXAGON_S2_asl_i_r
+      {Intrinsic::hexagon_S2_asl_i_r_acc, 23984}, // __builtin_HEXAGON_S2_asl_i_r_acc
+      {Intrinsic::hexagon_S2_asl_i_r_and, 24017}, // __builtin_HEXAGON_S2_asl_i_r_and
+      {Intrinsic::hexagon_S2_asl_i_r_nac, 24050}, // __builtin_HEXAGON_S2_asl_i_r_nac
+      {Intrinsic::hexagon_S2_asl_i_r_or, 24083}, // __builtin_HEXAGON_S2_asl_i_r_or
+      {Intrinsic::hexagon_S2_asl_i_r_sat, 24115}, // __builtin_HEXAGON_S2_asl_i_r_sat
+      {Intrinsic::hexagon_S2_asl_i_r_xacc, 24148}, // __builtin_HEXAGON_S2_asl_i_r_xacc
+      {Intrinsic::hexagon_S2_asl_i_vh, 24182}, // __builtin_HEXAGON_S2_asl_i_vh
+      {Intrinsic::hexagon_S2_asl_i_vw, 24212}, // __builtin_HEXAGON_S2_asl_i_vw
+      {Intrinsic::hexagon_S2_asl_r_p, 24242}, // __builtin_HEXAGON_S2_asl_r_p
+      {Intrinsic::hexagon_S2_asl_r_p_acc, 24271}, // __builtin_HEXAGON_S2_asl_r_p_acc
+      {Intrinsic::hexagon_S2_asl_r_p_and, 24304}, // __builtin_HEXAGON_S2_asl_r_p_and
+      {Intrinsic::hexagon_S2_asl_r_p_nac, 24337}, // __builtin_HEXAGON_S2_asl_r_p_nac
+      {Intrinsic::hexagon_S2_asl_r_p_or, 24370}, // __builtin_HEXAGON_S2_asl_r_p_or
+      {Intrinsic::hexagon_S2_asl_r_p_xor, 24402}, // __builtin_HEXAGON_S2_asl_r_p_xor
+      {Intrinsic::hexagon_S2_asl_r_r, 24435}, // __builtin_HEXAGON_S2_asl_r_r
+      {Intrinsic::hexagon_S2_asl_r_r_acc, 24464}, // __builtin_HEXAGON_S2_asl_r_r_acc
+      {Intrinsic::hexagon_S2_asl_r_r_and, 24497}, // __builtin_HEXAGON_S2_asl_r_r_and
+      {Intrinsic::hexagon_S2_asl_r_r_nac, 24530}, // __builtin_HEXAGON_S2_asl_r_r_nac
+      {Intrinsic::hexagon_S2_asl_r_r_or, 24563}, // __builtin_HEXAGON_S2_asl_r_r_or
+      {Intrinsic::hexagon_S2_asl_r_r_sat, 24595}, // __builtin_HEXAGON_S2_asl_r_r_sat
+      {Intrinsic::hexagon_S2_asl_r_vh, 24628}, // __builtin_HEXAGON_S2_asl_r_vh
+      {Intrinsic::hexagon_S2_asl_r_vw, 24658}, // __builtin_HEXAGON_S2_asl_r_vw
+      {Intrinsic::hexagon_S2_asr_i_p, 24688}, // __builtin_HEXAGON_S2_asr_i_p
+      {Intrinsic::hexagon_S2_asr_i_p_acc, 24717}, // __builtin_HEXAGON_S2_asr_i_p_acc
+      {Intrinsic::hexagon_S2_asr_i_p_and, 24750}, // __builtin_HEXAGON_S2_asr_i_p_and
+      {Intrinsic::hexagon_S2_asr_i_p_nac, 24783}, // __builtin_HEXAGON_S2_asr_i_p_nac
+      {Intrinsic::hexagon_S2_asr_i_p_or, 24816}, // __builtin_HEXAGON_S2_asr_i_p_or
+      {Intrinsic::hexagon_S2_asr_i_p_rnd, 24848}, // __builtin_HEXAGON_S2_asr_i_p_rnd
+      {Intrinsic::hexagon_S2_asr_i_p_rnd_goodsyntax, 24881}, // __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax
+      {Intrinsic::hexagon_S2_asr_i_r, 24925}, // __builtin_HEXAGON_S2_asr_i_r
+      {Intrinsic::hexagon_S2_asr_i_r_acc, 24954}, // __builtin_HEXAGON_S2_asr_i_r_acc
+      {Intrinsic::hexagon_S2_asr_i_r_and, 24987}, // __builtin_HEXAGON_S2_asr_i_r_and
+      {Intrinsic::hexagon_S2_asr_i_r_nac, 25020}, // __builtin_HEXAGON_S2_asr_i_r_nac
+      {Intrinsic::hexagon_S2_asr_i_r_or, 25053}, // __builtin_HEXAGON_S2_asr_i_r_or
+      {Intrinsic::hexagon_S2_asr_i_r_rnd, 25085}, // __builtin_HEXAGON_S2_asr_i_r_rnd
+      {Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax, 25118}, // __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax
+      {Intrinsic::hexagon_S2_asr_i_svw_trun, 25162}, // __builtin_HEXAGON_S2_asr_i_svw_trun
+      {Intrinsic::hexagon_S2_asr_i_vh, 25198}, // __builtin_HEXAGON_S2_asr_i_vh
+      {Intrinsic::hexagon_S2_asr_i_vw, 25228}, // __builtin_HEXAGON_S2_asr_i_vw
+      {Intrinsic::hexagon_S2_asr_r_p, 25258}, // __builtin_HEXAGON_S2_asr_r_p
+      {Intrinsic::hexagon_S2_asr_r_p_acc, 25287}, // __builtin_HEXAGON_S2_asr_r_p_acc
+      {Intrinsic::hexagon_S2_asr_r_p_and, 25320}, // __builtin_HEXAGON_S2_asr_r_p_and
+      {Intrinsic::hexagon_S2_asr_r_p_nac, 25353}, // __builtin_HEXAGON_S2_asr_r_p_nac
+      {Intrinsic::hexagon_S2_asr_r_p_or, 25386}, // __builtin_HEXAGON_S2_asr_r_p_or
+      {Intrinsic::hexagon_S2_asr_r_p_xor, 25418}, // __builtin_HEXAGON_S2_asr_r_p_xor
+      {Intrinsic::hexagon_S2_asr_r_r, 25451}, // __builtin_HEXAGON_S2_asr_r_r
+      {Intrinsic::hexagon_S2_asr_r_r_acc, 25480}, // __builtin_HEXAGON_S2_asr_r_r_acc
+      {Intrinsic::hexagon_S2_asr_r_r_and, 25513}, // __builtin_HEXAGON_S2_asr_r_r_and
+      {Intrinsic::hexagon_S2_asr_r_r_nac, 25546}, // __builtin_HEXAGON_S2_asr_r_r_nac
+      {Intrinsic::hexagon_S2_asr_r_r_or, 25579}, // __builtin_HEXAGON_S2_asr_r_r_or
+      {Intrinsic::hexagon_S2_asr_r_r_sat, 25611}, // __builtin_HEXAGON_S2_asr_r_r_sat
+      {Intrinsic::hexagon_S2_asr_r_svw_trun, 25644}, // __builtin_HEXAGON_S2_asr_r_svw_trun
+      {Intrinsic::hexagon_S2_asr_r_vh, 25680}, // __builtin_HEXAGON_S2_asr_r_vh
+      {Intrinsic::hexagon_S2_asr_r_vw, 25710}, // __builtin_HEXAGON_S2_asr_r_vw
+      {Intrinsic::hexagon_S2_brev, 25740}, // __builtin_HEXAGON_S2_brev
+      {Intrinsic::hexagon_S2_brevp, 25766}, // __builtin_HEXAGON_S2_brevp
+      {Intrinsic::hexagon_S2_cabacencbin, 25793}, // __builtin_HEXAGON_S2_cabacencbin
+      {Intrinsic::hexagon_S2_cl0, 25826}, // __builtin_HEXAGON_S2_cl0
+      {Intrinsic::hexagon_S2_cl0p, 25851}, // __builtin_HEXAGON_S2_cl0p
+      {Intrinsic::hexagon_S2_cl1, 25877}, // __builtin_HEXAGON_S2_cl1
+      {Intrinsic::hexagon_S2_cl1p, 25902}, // __builtin_HEXAGON_S2_cl1p
+      {Intrinsic::hexagon_S2_clb, 25928}, // __builtin_HEXAGON_S2_clb
+      {Intrinsic::hexagon_S2_clbnorm, 25953}, // __builtin_HEXAGON_S2_clbnorm
+      {Intrinsic::hexagon_S2_clbp, 25982}, // __builtin_HEXAGON_S2_clbp
+      {Intrinsic::hexagon_S2_clrbit_i, 26008}, // __builtin_HEXAGON_S2_clrbit_i
+      {Intrinsic::hexagon_S2_clrbit_r, 26038}, // __builtin_HEXAGON_S2_clrbit_r
+      {Intrinsic::hexagon_S2_ct0, 26068}, // __builtin_HEXAGON_S2_ct0
+      {Intrinsic::hexagon_S2_ct0p, 26093}, // __builtin_HEXAGON_S2_ct0p
+      {Intrinsic::hexagon_S2_ct1, 26119}, // __builtin_HEXAGON_S2_ct1
+      {Intrinsic::hexagon_S2_ct1p, 26144}, // __builtin_HEXAGON_S2_ct1p
+      {Intrinsic::hexagon_S2_deinterleave, 26170}, // __builtin_HEXAGON_S2_deinterleave
+      {Intrinsic::hexagon_S2_extractu, 26204}, // __builtin_HEXAGON_S2_extractu
+      {Intrinsic::hexagon_S2_extractu_rp, 26234}, // __builtin_HEXAGON_S2_extractu_rp
+      {Intrinsic::hexagon_S2_extractup, 26267}, // __builtin_HEXAGON_S2_extractup
+      {Intrinsic::hexagon_S2_extractup_rp, 26298}, // __builtin_HEXAGON_S2_extractup_rp
+      {Intrinsic::hexagon_S2_insert, 26332}, // __builtin_HEXAGON_S2_insert
+      {Intrinsic::hexagon_S2_insert_rp, 26360}, // __builtin_HEXAGON_S2_insert_rp
+      {Intrinsic::hexagon_S2_insertp, 26391}, // __builtin_HEXAGON_S2_insertp
+      {Intrinsic::hexagon_S2_insertp_rp, 26420}, // __builtin_HEXAGON_S2_insertp_rp
+      {Intrinsic::hexagon_S2_interleave, 26452}, // __builtin_HEXAGON_S2_interleave
+      {Intrinsic::hexagon_S2_lfsp, 26484}, // __builtin_HEXAGON_S2_lfsp
+      {Intrinsic::hexagon_S2_lsl_r_p, 26510}, // __builtin_HEXAGON_S2_lsl_r_p
+      {Intrinsic::hexagon_S2_lsl_r_p_acc, 26539}, // __builtin_HEXAGON_S2_lsl_r_p_acc
+      {Intrinsic::hexagon_S2_lsl_r_p_and, 26572}, // __builtin_HEXAGON_S2_lsl_r_p_and
+      {Intrinsic::hexagon_S2_lsl_r_p_nac, 26605}, // __builtin_HEXAGON_S2_lsl_r_p_nac
+      {Intrinsic::hexagon_S2_lsl_r_p_or, 26638}, // __builtin_HEXAGON_S2_lsl_r_p_or
+      {Intrinsic::hexagon_S2_lsl_r_p_xor, 26670}, // __builtin_HEXAGON_S2_lsl_r_p_xor
+      {Intrinsic::hexagon_S2_lsl_r_r, 26703}, // __builtin_HEXAGON_S2_lsl_r_r
+      {Intrinsic::hexagon_S2_lsl_r_r_acc, 26732}, // __builtin_HEXAGON_S2_lsl_r_r_acc
+      {Intrinsic::hexagon_S2_lsl_r_r_and, 26765}, // __builtin_HEXAGON_S2_lsl_r_r_and
+      {Intrinsic::hexagon_S2_lsl_r_r_nac, 26798}, // __builtin_HEXAGON_S2_lsl_r_r_nac
+      {Intrinsic::hexagon_S2_lsl_r_r_or, 26831}, // __builtin_HEXAGON_S2_lsl_r_r_or
+      {Intrinsic::hexagon_S2_lsl_r_vh, 26863}, // __builtin_HEXAGON_S2_lsl_r_vh
+      {Intrinsic::hexagon_S2_lsl_r_vw, 26893}, // __builtin_HEXAGON_S2_lsl_r_vw
+      {Intrinsic::hexagon_S2_lsr_i_p, 26923}, // __builtin_HEXAGON_S2_lsr_i_p
+      {Intrinsic::hexagon_S2_lsr_i_p_acc, 26952}, // __builtin_HEXAGON_S2_lsr_i_p_acc
+      {Intrinsic::hexagon_S2_lsr_i_p_and, 26985}, // __builtin_HEXAGON_S2_lsr_i_p_and
+      {Intrinsic::hexagon_S2_lsr_i_p_nac, 27018}, // __builtin_HEXAGON_S2_lsr_i_p_nac
+      {Intrinsic::hexagon_S2_lsr_i_p_or, 27051}, // __builtin_HEXAGON_S2_lsr_i_p_or
+      {Intrinsic::hexagon_S2_lsr_i_p_xacc, 27083}, // __builtin_HEXAGON_S2_lsr_i_p_xacc
+      {Intrinsic::hexagon_S2_lsr_i_r, 27117}, // __builtin_HEXAGON_S2_lsr_i_r
+      {Intrinsic::hexagon_S2_lsr_i_r_acc, 27146}, // __builtin_HEXAGON_S2_lsr_i_r_acc
+      {Intrinsic::hexagon_S2_lsr_i_r_and, 27179}, // __builtin_HEXAGON_S2_lsr_i_r_and
+      {Intrinsic::hexagon_S2_lsr_i_r_nac, 27212}, // __builtin_HEXAGON_S2_lsr_i_r_nac
+      {Intrinsic::hexagon_S2_lsr_i_r_or, 27245}, // __builtin_HEXAGON_S2_lsr_i_r_or
+      {Intrinsic::hexagon_S2_lsr_i_r_xacc, 27277}, // __builtin_HEXAGON_S2_lsr_i_r_xacc
+      {Intrinsic::hexagon_S2_lsr_i_vh, 27311}, // __builtin_HEXAGON_S2_lsr_i_vh
+      {Intrinsic::hexagon_S2_lsr_i_vw, 27341}, // __builtin_HEXAGON_S2_lsr_i_vw
+      {Intrinsic::hexagon_S2_lsr_r_p, 27371}, // __builtin_HEXAGON_S2_lsr_r_p
+      {Intrinsic::hexagon_S2_lsr_r_p_acc, 27400}, // __builtin_HEXAGON_S2_lsr_r_p_acc
+      {Intrinsic::hexagon_S2_lsr_r_p_and, 27433}, // __builtin_HEXAGON_S2_lsr_r_p_and
+      {Intrinsic::hexagon_S2_lsr_r_p_nac, 27466}, // __builtin_HEXAGON_S2_lsr_r_p_nac
+      {Intrinsic::hexagon_S2_lsr_r_p_or, 27499}, // __builtin_HEXAGON_S2_lsr_r_p_or
+      {Intrinsic::hexagon_S2_lsr_r_p_xor, 27531}, // __builtin_HEXAGON_S2_lsr_r_p_xor
+      {Intrinsic::hexagon_S2_lsr_r_r, 27564}, // __builtin_HEXAGON_S2_lsr_r_r
+      {Intrinsic::hexagon_S2_lsr_r_r_acc, 27593}, // __builtin_HEXAGON_S2_lsr_r_r_acc
+      {Intrinsic::hexagon_S2_lsr_r_r_and, 27626}, // __builtin_HEXAGON_S2_lsr_r_r_and
+      {Intrinsic::hexagon_S2_lsr_r_r_nac, 27659}, // __builtin_HEXAGON_S2_lsr_r_r_nac
+      {Intrinsic::hexagon_S2_lsr_r_r_or, 27692}, // __builtin_HEXAGON_S2_lsr_r_r_or
+      {Intrinsic::hexagon_S2_lsr_r_vh, 27724}, // __builtin_HEXAGON_S2_lsr_r_vh
+      {Intrinsic::hexagon_S2_lsr_r_vw, 27754}, // __builtin_HEXAGON_S2_lsr_r_vw
+      {Intrinsic::hexagon_S2_packhl, 27784}, // __builtin_HEXAGON_S2_packhl
+      {Intrinsic::hexagon_S2_parityp, 27812}, // __builtin_HEXAGON_S2_parityp
+      {Intrinsic::hexagon_S2_setbit_i, 27841}, // __builtin_HEXAGON_S2_setbit_i
+      {Intrinsic::hexagon_S2_setbit_r, 27871}, // __builtin_HEXAGON_S2_setbit_r
+      {Intrinsic::hexagon_S2_shuffeb, 27901}, // __builtin_HEXAGON_S2_shuffeb
+      {Intrinsic::hexagon_S2_shuffeh, 27930}, // __builtin_HEXAGON_S2_shuffeh
+      {Intrinsic::hexagon_S2_shuffob, 27959}, // __builtin_HEXAGON_S2_shuffob
+      {Intrinsic::hexagon_S2_shuffoh, 27988}, // __builtin_HEXAGON_S2_shuffoh
+      {Intrinsic::hexagon_S2_storew_locked, 28114}, // __builtin_HEXAGON_S2_storew_locked
+      {Intrinsic::hexagon_S2_svsathb, 28149}, // __builtin_HEXAGON_S2_svsathb
+      {Intrinsic::hexagon_S2_svsathub, 28178}, // __builtin_HEXAGON_S2_svsathub
+      {Intrinsic::hexagon_S2_tableidxb_goodsyntax, 28208}, // __builtin_HEXAGON_S2_tableidxb_goodsyntax
+      {Intrinsic::hexagon_S2_tableidxd_goodsyntax, 28250}, // __builtin_HEXAGON_S2_tableidxd_goodsyntax
+      {Intrinsic::hexagon_S2_tableidxh_goodsyntax, 28292}, // __builtin_HEXAGON_S2_tableidxh_goodsyntax
+      {Intrinsic::hexagon_S2_tableidxw_goodsyntax, 28334}, // __builtin_HEXAGON_S2_tableidxw_goodsyntax
+      {Intrinsic::hexagon_S2_togglebit_i, 28376}, // __builtin_HEXAGON_S2_togglebit_i
+      {Intrinsic::hexagon_S2_togglebit_r, 28409}, // __builtin_HEXAGON_S2_togglebit_r
+      {Intrinsic::hexagon_S2_tstbit_i, 28442}, // __builtin_HEXAGON_S2_tstbit_i
+      {Intrinsic::hexagon_S2_tstbit_r, 28472}, // __builtin_HEXAGON_S2_tstbit_r
+      {Intrinsic::hexagon_S2_valignib, 28502}, // __builtin_HEXAGON_S2_valignib
+      {Intrinsic::hexagon_S2_valignrb, 28532}, // __builtin_HEXAGON_S2_valignrb
+      {Intrinsic::hexagon_S2_vcnegh, 28562}, // __builtin_HEXAGON_S2_vcnegh
+      {Intrinsic::hexagon_S2_vcrotate, 28590}, // __builtin_HEXAGON_S2_vcrotate
+      {Intrinsic::hexagon_S2_vrcnegh, 28620}, // __builtin_HEXAGON_S2_vrcnegh
+      {Intrinsic::hexagon_S2_vrndpackwh, 28649}, // __builtin_HEXAGON_S2_vrndpackwh
+      {Intrinsic::hexagon_S2_vrndpackwhs, 28681}, // __builtin_HEXAGON_S2_vrndpackwhs
+      {Intrinsic::hexagon_S2_vsathb, 28714}, // __builtin_HEXAGON_S2_vsathb
+      {Intrinsic::hexagon_S2_vsathb_nopack, 28742}, // __builtin_HEXAGON_S2_vsathb_nopack
+      {Intrinsic::hexagon_S2_vsathub, 28777}, // __builtin_HEXAGON_S2_vsathub
+      {Intrinsic::hexagon_S2_vsathub_nopack, 28806}, // __builtin_HEXAGON_S2_vsathub_nopack
+      {Intrinsic::hexagon_S2_vsatwh, 28842}, // __builtin_HEXAGON_S2_vsatwh
+      {Intrinsic::hexagon_S2_vsatwh_nopack, 28870}, // __builtin_HEXAGON_S2_vsatwh_nopack
+      {Intrinsic::hexagon_S2_vsatwuh, 28905}, // __builtin_HEXAGON_S2_vsatwuh
+      {Intrinsic::hexagon_S2_vsatwuh_nopack, 28934}, // __builtin_HEXAGON_S2_vsatwuh_nopack
+      {Intrinsic::hexagon_S2_vsplatrb, 28970}, // __builtin_HEXAGON_S2_vsplatrb
+      {Intrinsic::hexagon_S2_vsplatrh, 29000}, // __builtin_HEXAGON_S2_vsplatrh
+      {Intrinsic::hexagon_S2_vspliceib, 29030}, // __builtin_HEXAGON_S2_vspliceib
+      {Intrinsic::hexagon_S2_vsplicerb, 29061}, // __builtin_HEXAGON_S2_vsplicerb
+      {Intrinsic::hexagon_S2_vsxtbh, 29092}, // __builtin_HEXAGON_S2_vsxtbh
+      {Intrinsic::hexagon_S2_vsxthw, 29120}, // __builtin_HEXAGON_S2_vsxthw
+      {Intrinsic::hexagon_S2_vtrunehb, 29148}, // __builtin_HEXAGON_S2_vtrunehb
+      {Intrinsic::hexagon_S2_vtrunewh, 29178}, // __builtin_HEXAGON_S2_vtrunewh
+      {Intrinsic::hexagon_S2_vtrunohb, 29208}, // __builtin_HEXAGON_S2_vtrunohb
+      {Intrinsic::hexagon_S2_vtrunowh, 29238}, // __builtin_HEXAGON_S2_vtrunowh
+      {Intrinsic::hexagon_S2_vzxtbh, 29268}, // __builtin_HEXAGON_S2_vzxtbh
+      {Intrinsic::hexagon_S2_vzxthw, 29296}, // __builtin_HEXAGON_S2_vzxthw
+      {Intrinsic::hexagon_S4_addaddi, 29324}, // __builtin_HEXAGON_S4_addaddi
+      {Intrinsic::hexagon_S4_addi_asl_ri, 29353}, // __builtin_HEXAGON_S4_addi_asl_ri
+      {Intrinsic::hexagon_S4_addi_lsr_ri, 29386}, // __builtin_HEXAGON_S4_addi_lsr_ri
+      {Intrinsic::hexagon_S4_andi_asl_ri, 29419}, // __builtin_HEXAGON_S4_andi_asl_ri
+      {Intrinsic::hexagon_S4_andi_lsr_ri, 29452}, // __builtin_HEXAGON_S4_andi_lsr_ri
+      {Intrinsic::hexagon_S4_clbaddi, 29485}, // __builtin_HEXAGON_S4_clbaddi
+      {Intrinsic::hexagon_S4_clbpaddi, 29514}, // __builtin_HEXAGON_S4_clbpaddi
+      {Intrinsic::hexagon_S4_clbpnorm, 29544}, // __builtin_HEXAGON_S4_clbpnorm
+      {Intrinsic::hexagon_S4_extract, 29574}, // __builtin_HEXAGON_S4_extract
+      {Intrinsic::hexagon_S4_extract_rp, 29603}, // __builtin_HEXAGON_S4_extract_rp
+      {Intrinsic::hexagon_S4_extractp, 29635}, // __builtin_HEXAGON_S4_extractp
+      {Intrinsic::hexagon_S4_extractp_rp, 29665}, // __builtin_HEXAGON_S4_extractp_rp
+      {Intrinsic::hexagon_S4_lsli, 29698}, // __builtin_HEXAGON_S4_lsli
+      {Intrinsic::hexagon_S4_ntstbit_i, 29724}, // __builtin_HEXAGON_S4_ntstbit_i
+      {Intrinsic::hexagon_S4_ntstbit_r, 29755}, // __builtin_HEXAGON_S4_ntstbit_r
+      {Intrinsic::hexagon_S4_or_andi, 29786}, // __builtin_HEXAGON_S4_or_andi
+      {Intrinsic::hexagon_S4_or_andix, 29815}, // __builtin_HEXAGON_S4_or_andix
+      {Intrinsic::hexagon_S4_or_ori, 29845}, // __builtin_HEXAGON_S4_or_ori
+      {Intrinsic::hexagon_S4_ori_asl_ri, 29873}, // __builtin_HEXAGON_S4_ori_asl_ri
+      {Intrinsic::hexagon_S4_ori_lsr_ri, 29905}, // __builtin_HEXAGON_S4_ori_lsr_ri
+      {Intrinsic::hexagon_S4_parity, 29937}, // __builtin_HEXAGON_S4_parity
+      {Intrinsic::hexagon_S4_stored_locked, 29965}, // __builtin_HEXAGON_S4_stored_locked
+      {Intrinsic::hexagon_S4_subaddi, 30000}, // __builtin_HEXAGON_S4_subaddi
+      {Intrinsic::hexagon_S4_subi_asl_ri, 30029}, // __builtin_HEXAGON_S4_subi_asl_ri
+      {Intrinsic::hexagon_S4_subi_lsr_ri, 30062}, // __builtin_HEXAGON_S4_subi_lsr_ri
+      {Intrinsic::hexagon_S4_vrcrotate, 30095}, // __builtin_HEXAGON_S4_vrcrotate
+      {Intrinsic::hexagon_S4_vrcrotate_acc, 30126}, // __builtin_HEXAGON_S4_vrcrotate_acc
+      {Intrinsic::hexagon_S4_vxaddsubh, 30161}, // __builtin_HEXAGON_S4_vxaddsubh
+      {Intrinsic::hexagon_S4_vxaddsubhr, 30192}, // __builtin_HEXAGON_S4_vxaddsubhr
+      {Intrinsic::hexagon_S4_vxaddsubw, 30224}, // __builtin_HEXAGON_S4_vxaddsubw
+      {Intrinsic::hexagon_S4_vxsubaddh, 30255}, // __builtin_HEXAGON_S4_vxsubaddh
+      {Intrinsic::hexagon_S4_vxsubaddhr, 30286}, // __builtin_HEXAGON_S4_vxsubaddhr
+      {Intrinsic::hexagon_S4_vxsubaddw, 30318}, // __builtin_HEXAGON_S4_vxsubaddw
+      {Intrinsic::hexagon_S5_asrhub_rnd_sat_goodsyntax, 30349}, // __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax
+      {Intrinsic::hexagon_S5_asrhub_sat, 30396}, // __builtin_HEXAGON_S5_asrhub_sat
+      {Intrinsic::hexagon_S5_popcountp, 30428}, // __builtin_HEXAGON_S5_popcountp
+      {Intrinsic::hexagon_S5_vasrhrnd_goodsyntax, 30459}, // __builtin_HEXAGON_S5_vasrhrnd_goodsyntax
+      {Intrinsic::hexagon_S6_rol_i_p, 30500}, // __builtin_HEXAGON_S6_rol_i_p
+      {Intrinsic::hexagon_S6_rol_i_p_acc, 30529}, // __builtin_HEXAGON_S6_rol_i_p_acc
+      {Intrinsic::hexagon_S6_rol_i_p_and, 30562}, // __builtin_HEXAGON_S6_rol_i_p_and
+      {Intrinsic::hexagon_S6_rol_i_p_nac, 30595}, // __builtin_HEXAGON_S6_rol_i_p_nac
+      {Intrinsic::hexagon_S6_rol_i_p_or, 30628}, // __builtin_HEXAGON_S6_rol_i_p_or
+      {Intrinsic::hexagon_S6_rol_i_p_xacc, 30660}, // __builtin_HEXAGON_S6_rol_i_p_xacc
+      {Intrinsic::hexagon_S6_rol_i_r, 30694}, // __builtin_HEXAGON_S6_rol_i_r
+      {Intrinsic::hexagon_S6_rol_i_r_acc, 30723}, // __builtin_HEXAGON_S6_rol_i_r_acc
+      {Intrinsic::hexagon_S6_rol_i_r_and, 30756}, // __builtin_HEXAGON_S6_rol_i_r_and
+      {Intrinsic::hexagon_S6_rol_i_r_nac, 30789}, // __builtin_HEXAGON_S6_rol_i_r_nac
+      {Intrinsic::hexagon_S6_rol_i_r_or, 30822}, // __builtin_HEXAGON_S6_rol_i_r_or
+      {Intrinsic::hexagon_S6_rol_i_r_xacc, 30854}, // __builtin_HEXAGON_S6_rol_i_r_xacc
+      {Intrinsic::hexagon_S6_vsplatrbp, 30888}, // __builtin_HEXAGON_S6_vsplatrbp
+      {Intrinsic::hexagon_S6_vtrunehb_ppp, 30919}, // __builtin_HEXAGON_S6_vtrunehb_ppp
+      {Intrinsic::hexagon_S6_vtrunohb_ppp, 30953}, // __builtin_HEXAGON_S6_vtrunohb_ppp
+      {Intrinsic::hexagon_V6_extractw, 30987}, // __builtin_HEXAGON_V6_extractw
+      {Intrinsic::hexagon_V6_extractw_128B, 31017}, // __builtin_HEXAGON_V6_extractw_128B
+      {Intrinsic::hexagon_V6_hi, 31052}, // __builtin_HEXAGON_V6_hi
+      {Intrinsic::hexagon_V6_hi_128B, 31076}, // __builtin_HEXAGON_V6_hi_128B
+      {Intrinsic::hexagon_V6_lo, 31105}, // __builtin_HEXAGON_V6_lo
+      {Intrinsic::hexagon_V6_lo_128B, 31129}, // __builtin_HEXAGON_V6_lo_128B
+      {Intrinsic::hexagon_V6_lvsplatb, 31158}, // __builtin_HEXAGON_V6_lvsplatb
+      {Intrinsic::hexagon_V6_lvsplatb_128B, 31188}, // __builtin_HEXAGON_V6_lvsplatb_128B
+      {Intrinsic::hexagon_V6_lvsplath, 31223}, // __builtin_HEXAGON_V6_lvsplath
+      {Intrinsic::hexagon_V6_lvsplath_128B, 31253}, // __builtin_HEXAGON_V6_lvsplath_128B
+      {Intrinsic::hexagon_V6_lvsplatw, 31288}, // __builtin_HEXAGON_V6_lvsplatw
+      {Intrinsic::hexagon_V6_lvsplatw_128B, 31318}, // __builtin_HEXAGON_V6_lvsplatw_128B
+      {Intrinsic::hexagon_V6_pred_and, 31353}, // __builtin_HEXAGON_V6_pred_and
+      {Intrinsic::hexagon_V6_pred_and_128B, 31383}, // __builtin_HEXAGON_V6_pred_and_128B
+      {Intrinsic::hexagon_V6_pred_and_n, 31418}, // __builtin_HEXAGON_V6_pred_and_n
+      {Intrinsic::hexagon_V6_pred_and_n_128B, 31450}, // __builtin_HEXAGON_V6_pred_and_n_128B
+      {Intrinsic::hexagon_V6_pred_not, 31487}, // __builtin_HEXAGON_V6_pred_not
+      {Intrinsic::hexagon_V6_pred_not_128B, 31517}, // __builtin_HEXAGON_V6_pred_not_128B
+      {Intrinsic::hexagon_V6_pred_or, 31552}, // __builtin_HEXAGON_V6_pred_or
+      {Intrinsic::hexagon_V6_pred_or_128B, 31581}, // __builtin_HEXAGON_V6_pred_or_128B
+      {Intrinsic::hexagon_V6_pred_or_n, 31615}, // __builtin_HEXAGON_V6_pred_or_n
+      {Intrinsic::hexagon_V6_pred_or_n_128B, 31646}, // __builtin_HEXAGON_V6_pred_or_n_128B
+      {Intrinsic::hexagon_V6_pred_scalar2, 31682}, // __builtin_HEXAGON_V6_pred_scalar2
+      {Intrinsic::hexagon_V6_pred_scalar2_128B, 31716}, // __builtin_HEXAGON_V6_pred_scalar2_128B
+      {Intrinsic::hexagon_V6_pred_scalar2v2, 31755}, // __builtin_HEXAGON_V6_pred_scalar2v2
+      {Intrinsic::hexagon_V6_pred_scalar2v2_128B, 31791}, // __builtin_HEXAGON_V6_pred_scalar2v2_128B
+      {Intrinsic::hexagon_V6_pred_xor, 31832}, // __builtin_HEXAGON_V6_pred_xor
+      {Intrinsic::hexagon_V6_pred_xor_128B, 31862}, // __builtin_HEXAGON_V6_pred_xor_128B
+      {Intrinsic::hexagon_V6_shuffeqh, 31897}, // __builtin_HEXAGON_V6_shuffeqh
+      {Intrinsic::hexagon_V6_shuffeqh_128B, 31927}, // __builtin_HEXAGON_V6_shuffeqh_128B
+      {Intrinsic::hexagon_V6_shuffeqw, 31962}, // __builtin_HEXAGON_V6_shuffeqw
+      {Intrinsic::hexagon_V6_shuffeqw_128B, 31992}, // __builtin_HEXAGON_V6_shuffeqw_128B
+      {Intrinsic::hexagon_V6_vS32b_nqpred_ai, 32027}, // __builtin_HEXAGON_V6_vS32b_nqpred_ai
+      {Intrinsic::hexagon_V6_vS32b_nqpred_ai_128B, 32064}, // __builtin_HEXAGON_V6_vS32b_nqpred_ai_128B
+      {Intrinsic::hexagon_V6_vS32b_nt_nqpred_ai, 32106}, // __builtin_HEXAGON_V6_vS32b_nt_nqpred_ai
+      {Intrinsic::hexagon_V6_vS32b_nt_nqpred_ai_128B, 32146}, // __builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B
+      {Intrinsic::hexagon_V6_vS32b_nt_qpred_ai, 32191}, // __builtin_HEXAGON_V6_vS32b_nt_qpred_ai
+      {Intrinsic::hexagon_V6_vS32b_nt_qpred_ai_128B, 32230}, // __builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B
+      {Intrinsic::hexagon_V6_vS32b_qpred_ai, 32274}, // __builtin_HEXAGON_V6_vS32b_qpred_ai
+      {Intrinsic::hexagon_V6_vS32b_qpred_ai_128B, 32310}, // __builtin_HEXAGON_V6_vS32b_qpred_ai_128B
+      {Intrinsic::hexagon_V6_vabsb, 32351}, // __builtin_HEXAGON_V6_vabsb
+      {Intrinsic::hexagon_V6_vabsb_128B, 32378}, // __builtin_HEXAGON_V6_vabsb_128B
+      {Intrinsic::hexagon_V6_vabsb_sat, 32410}, // __builtin_HEXAGON_V6_vabsb_sat
+      {Intrinsic::hexagon_V6_vabsb_sat_128B, 32441}, // __builtin_HEXAGON_V6_vabsb_sat_128B
+      {Intrinsic::hexagon_V6_vabsdiffh, 32477}, // __builtin_HEXAGON_V6_vabsdiffh
+      {Intrinsic::hexagon_V6_vabsdiffh_128B, 32508}, // __builtin_HEXAGON_V6_vabsdiffh_128B
+      {Intrinsic::hexagon_V6_vabsdiffub, 32544}, // __builtin_HEXAGON_V6_vabsdiffub
+      {Intrinsic::hexagon_V6_vabsdiffub_128B, 32576}, // __builtin_HEXAGON_V6_vabsdiffub_128B
+      {Intrinsic::hexagon_V6_vabsdiffuh, 32613}, // __builtin_HEXAGON_V6_vabsdiffuh
+      {Intrinsic::hexagon_V6_vabsdiffuh_128B, 32645}, // __builtin_HEXAGON_V6_vabsdiffuh_128B
+      {Intrinsic::hexagon_V6_vabsdiffw, 32682}, // __builtin_HEXAGON_V6_vabsdiffw
+      {Intrinsic::hexagon_V6_vabsdiffw_128B, 32713}, // __builtin_HEXAGON_V6_vabsdiffw_128B
+      {Intrinsic::hexagon_V6_vabsh, 32749}, // __builtin_HEXAGON_V6_vabsh
+      {Intrinsic::hexagon_V6_vabsh_128B, 32776}, // __builtin_HEXAGON_V6_vabsh_128B
+      {Intrinsic::hexagon_V6_vabsh_sat, 32808}, // __builtin_HEXAGON_V6_vabsh_sat
+      {Intrinsic::hexagon_V6_vabsh_sat_128B, 32839}, // __builtin_HEXAGON_V6_vabsh_sat_128B
+      {Intrinsic::hexagon_V6_vabsw, 32875}, // __builtin_HEXAGON_V6_vabsw
+      {Intrinsic::hexagon_V6_vabsw_128B, 32902}, // __builtin_HEXAGON_V6_vabsw_128B
+      {Intrinsic::hexagon_V6_vabsw_sat, 32934}, // __builtin_HEXAGON_V6_vabsw_sat
+      {Intrinsic::hexagon_V6_vabsw_sat_128B, 32965}, // __builtin_HEXAGON_V6_vabsw_sat_128B
+      {Intrinsic::hexagon_V6_vaddb, 33001}, // __builtin_HEXAGON_V6_vaddb
+      {Intrinsic::hexagon_V6_vaddb_128B, 33028}, // __builtin_HEXAGON_V6_vaddb_128B
+      {Intrinsic::hexagon_V6_vaddb_dv, 33060}, // __builtin_HEXAGON_V6_vaddb_dv
+      {Intrinsic::hexagon_V6_vaddb_dv_128B, 33090}, // __builtin_HEXAGON_V6_vaddb_dv_128B
+      {Intrinsic::hexagon_V6_vaddbnq, 33125}, // __builtin_HEXAGON_V6_vaddbnq
+      {Intrinsic::hexagon_V6_vaddbnq_128B, 33154}, // __builtin_HEXAGON_V6_vaddbnq_128B
+      {Intrinsic::hexagon_V6_vaddbq, 33188}, // __builtin_HEXAGON_V6_vaddbq
+      {Intrinsic::hexagon_V6_vaddbq_128B, 33216}, // __builtin_HEXAGON_V6_vaddbq_128B
+      {Intrinsic::hexagon_V6_vaddbsat, 33249}, // __builtin_HEXAGON_V6_vaddbsat
+      {Intrinsic::hexagon_V6_vaddbsat_128B, 33279}, // __builtin_HEXAGON_V6_vaddbsat_128B
+      {Intrinsic::hexagon_V6_vaddbsat_dv, 33314}, // __builtin_HEXAGON_V6_vaddbsat_dv
+      {Intrinsic::hexagon_V6_vaddbsat_dv_128B, 33347}, // __builtin_HEXAGON_V6_vaddbsat_dv_128B
+      {Intrinsic::hexagon_V6_vaddclbh, 33452}, // __builtin_HEXAGON_V6_vaddclbh
+      {Intrinsic::hexagon_V6_vaddclbh_128B, 33482}, // __builtin_HEXAGON_V6_vaddclbh_128B
+      {Intrinsic::hexagon_V6_vaddclbw, 33517}, // __builtin_HEXAGON_V6_vaddclbw
+      {Intrinsic::hexagon_V6_vaddclbw_128B, 33547}, // __builtin_HEXAGON_V6_vaddclbw_128B
+      {Intrinsic::hexagon_V6_vaddh, 33582}, // __builtin_HEXAGON_V6_vaddh
+      {Intrinsic::hexagon_V6_vaddh_128B, 33609}, // __builtin_HEXAGON_V6_vaddh_128B
+      {Intrinsic::hexagon_V6_vaddh_dv, 33641}, // __builtin_HEXAGON_V6_vaddh_dv
+      {Intrinsic::hexagon_V6_vaddh_dv_128B, 33671}, // __builtin_HEXAGON_V6_vaddh_dv_128B
+      {Intrinsic::hexagon_V6_vaddhnq, 33706}, // __builtin_HEXAGON_V6_vaddhnq
+      {Intrinsic::hexagon_V6_vaddhnq_128B, 33735}, // __builtin_HEXAGON_V6_vaddhnq_128B
+      {Intrinsic::hexagon_V6_vaddhq, 33769}, // __builtin_HEXAGON_V6_vaddhq
+      {Intrinsic::hexagon_V6_vaddhq_128B, 33797}, // __builtin_HEXAGON_V6_vaddhq_128B
+      {Intrinsic::hexagon_V6_vaddhsat, 33830}, // __builtin_HEXAGON_V6_vaddhsat
+      {Intrinsic::hexagon_V6_vaddhsat_128B, 33860}, // __builtin_HEXAGON_V6_vaddhsat_128B
+      {Intrinsic::hexagon_V6_vaddhsat_dv, 33895}, // __builtin_HEXAGON_V6_vaddhsat_dv
+      {Intrinsic::hexagon_V6_vaddhsat_dv_128B, 33928}, // __builtin_HEXAGON_V6_vaddhsat_dv_128B
+      {Intrinsic::hexagon_V6_vaddhw, 33966}, // __builtin_HEXAGON_V6_vaddhw
+      {Intrinsic::hexagon_V6_vaddhw_128B, 33994}, // __builtin_HEXAGON_V6_vaddhw_128B
+      {Intrinsic::hexagon_V6_vaddhw_acc, 34027}, // __builtin_HEXAGON_V6_vaddhw_acc
+      {Intrinsic::hexagon_V6_vaddhw_acc_128B, 34059}, // __builtin_HEXAGON_V6_vaddhw_acc_128B
+      {Intrinsic::hexagon_V6_vaddubh, 34096}, // __builtin_HEXAGON_V6_vaddubh
+      {Intrinsic::hexagon_V6_vaddubh_128B, 34125}, // __builtin_HEXAGON_V6_vaddubh_128B
+      {Intrinsic::hexagon_V6_vaddubh_acc, 34159}, // __builtin_HEXAGON_V6_vaddubh_acc
+      {Intrinsic::hexagon_V6_vaddubh_acc_128B, 34192}, // __builtin_HEXAGON_V6_vaddubh_acc_128B
+      {Intrinsic::hexagon_V6_vaddubsat, 34230}, // __builtin_HEXAGON_V6_vaddubsat
+      {Intrinsic::hexagon_V6_vaddubsat_128B, 34261}, // __builtin_HEXAGON_V6_vaddubsat_128B
+      {Intrinsic::hexagon_V6_vaddubsat_dv, 34297}, // __builtin_HEXAGON_V6_vaddubsat_dv
+      {Intrinsic::hexagon_V6_vaddubsat_dv_128B, 34331}, // __builtin_HEXAGON_V6_vaddubsat_dv_128B
+      {Intrinsic::hexagon_V6_vaddububb_sat, 34370}, // __builtin_HEXAGON_V6_vaddububb_sat
+      {Intrinsic::hexagon_V6_vaddububb_sat_128B, 34405}, // __builtin_HEXAGON_V6_vaddububb_sat_128B
+      {Intrinsic::hexagon_V6_vadduhsat, 34445}, // __builtin_HEXAGON_V6_vadduhsat
+      {Intrinsic::hexagon_V6_vadduhsat_128B, 34476}, // __builtin_HEXAGON_V6_vadduhsat_128B
+      {Intrinsic::hexagon_V6_vadduhsat_dv, 34512}, // __builtin_HEXAGON_V6_vadduhsat_dv
+      {Intrinsic::hexagon_V6_vadduhsat_dv_128B, 34546}, // __builtin_HEXAGON_V6_vadduhsat_dv_128B
+      {Intrinsic::hexagon_V6_vadduhw, 34585}, // __builtin_HEXAGON_V6_vadduhw
+      {Intrinsic::hexagon_V6_vadduhw_128B, 34614}, // __builtin_HEXAGON_V6_vadduhw_128B
+      {Intrinsic::hexagon_V6_vadduhw_acc, 34648}, // __builtin_HEXAGON_V6_vadduhw_acc
+      {Intrinsic::hexagon_V6_vadduhw_acc_128B, 34681}, // __builtin_HEXAGON_V6_vadduhw_acc_128B
+      {Intrinsic::hexagon_V6_vadduwsat, 34719}, // __builtin_HEXAGON_V6_vadduwsat
+      {Intrinsic::hexagon_V6_vadduwsat_128B, 34750}, // __builtin_HEXAGON_V6_vadduwsat_128B
+      {Intrinsic::hexagon_V6_vadduwsat_dv, 34786}, // __builtin_HEXAGON_V6_vadduwsat_dv
+      {Intrinsic::hexagon_V6_vadduwsat_dv_128B, 34820}, // __builtin_HEXAGON_V6_vadduwsat_dv_128B
+      {Intrinsic::hexagon_V6_vaddw, 34859}, // __builtin_HEXAGON_V6_vaddw
+      {Intrinsic::hexagon_V6_vaddw_128B, 34886}, // __builtin_HEXAGON_V6_vaddw_128B
+      {Intrinsic::hexagon_V6_vaddw_dv, 34918}, // __builtin_HEXAGON_V6_vaddw_dv
+      {Intrinsic::hexagon_V6_vaddw_dv_128B, 34948}, // __builtin_HEXAGON_V6_vaddw_dv_128B
+      {Intrinsic::hexagon_V6_vaddwnq, 34983}, // __builtin_HEXAGON_V6_vaddwnq
+      {Intrinsic::hexagon_V6_vaddwnq_128B, 35012}, // __builtin_HEXAGON_V6_vaddwnq_128B
+      {Intrinsic::hexagon_V6_vaddwq, 35046}, // __builtin_HEXAGON_V6_vaddwq
+      {Intrinsic::hexagon_V6_vaddwq_128B, 35074}, // __builtin_HEXAGON_V6_vaddwq_128B
+      {Intrinsic::hexagon_V6_vaddwsat, 35107}, // __builtin_HEXAGON_V6_vaddwsat
+      {Intrinsic::hexagon_V6_vaddwsat_128B, 35137}, // __builtin_HEXAGON_V6_vaddwsat_128B
+      {Intrinsic::hexagon_V6_vaddwsat_dv, 35172}, // __builtin_HEXAGON_V6_vaddwsat_dv
+      {Intrinsic::hexagon_V6_vaddwsat_dv_128B, 35205}, // __builtin_HEXAGON_V6_vaddwsat_dv_128B
+      {Intrinsic::hexagon_V6_valignb, 35243}, // __builtin_HEXAGON_V6_valignb
+      {Intrinsic::hexagon_V6_valignb_128B, 35272}, // __builtin_HEXAGON_V6_valignb_128B
+      {Intrinsic::hexagon_V6_valignbi, 35306}, // __builtin_HEXAGON_V6_valignbi
+      {Intrinsic::hexagon_V6_valignbi_128B, 35336}, // __builtin_HEXAGON_V6_valignbi_128B
+      {Intrinsic::hexagon_V6_vand, 35371}, // __builtin_HEXAGON_V6_vand
+      {Intrinsic::hexagon_V6_vand_128B, 35397}, // __builtin_HEXAGON_V6_vand_128B
+      {Intrinsic::hexagon_V6_vandnqrt, 35428}, // __builtin_HEXAGON_V6_vandnqrt
+      {Intrinsic::hexagon_V6_vandnqrt_128B, 35458}, // __builtin_HEXAGON_V6_vandnqrt_128B
+      {Intrinsic::hexagon_V6_vandnqrt_acc, 35493}, // __builtin_HEXAGON_V6_vandnqrt_acc
+      {Intrinsic::hexagon_V6_vandnqrt_acc_128B, 35527}, // __builtin_HEXAGON_V6_vandnqrt_acc_128B
+      {Intrinsic::hexagon_V6_vandqrt, 35566}, // __builtin_HEXAGON_V6_vandqrt
+      {Intrinsic::hexagon_V6_vandqrt_128B, 35595}, // __builtin_HEXAGON_V6_vandqrt_128B
+      {Intrinsic::hexagon_V6_vandqrt_acc, 35629}, // __builtin_HEXAGON_V6_vandqrt_acc
+      {Intrinsic::hexagon_V6_vandqrt_acc_128B, 35662}, // __builtin_HEXAGON_V6_vandqrt_acc_128B
+      {Intrinsic::hexagon_V6_vandvnqv, 35700}, // __builtin_HEXAGON_V6_vandvnqv
+      {Intrinsic::hexagon_V6_vandvnqv_128B, 35730}, // __builtin_HEXAGON_V6_vandvnqv_128B
+      {Intrinsic::hexagon_V6_vandvqv, 35765}, // __builtin_HEXAGON_V6_vandvqv
+      {Intrinsic::hexagon_V6_vandvqv_128B, 35794}, // __builtin_HEXAGON_V6_vandvqv_128B
+      {Intrinsic::hexagon_V6_vandvrt, 35828}, // __builtin_HEXAGON_V6_vandvrt
+      {Intrinsic::hexagon_V6_vandvrt_128B, 35857}, // __builtin_HEXAGON_V6_vandvrt_128B
+      {Intrinsic::hexagon_V6_vandvrt_acc, 35891}, // __builtin_HEXAGON_V6_vandvrt_acc
+      {Intrinsic::hexagon_V6_vandvrt_acc_128B, 35924}, // __builtin_HEXAGON_V6_vandvrt_acc_128B
+      {Intrinsic::hexagon_V6_vaslh, 35962}, // __builtin_HEXAGON_V6_vaslh
+      {Intrinsic::hexagon_V6_vaslh_128B, 35989}, // __builtin_HEXAGON_V6_vaslh_128B
+      {Intrinsic::hexagon_V6_vaslh_acc, 36021}, // __builtin_HEXAGON_V6_vaslh_acc
+      {Intrinsic::hexagon_V6_vaslh_acc_128B, 36052}, // __builtin_HEXAGON_V6_vaslh_acc_128B
+      {Intrinsic::hexagon_V6_vaslhv, 36088}, // __builtin_HEXAGON_V6_vaslhv
+      {Intrinsic::hexagon_V6_vaslhv_128B, 36116}, // __builtin_HEXAGON_V6_vaslhv_128B
+      {Intrinsic::hexagon_V6_vaslw, 36149}, // __builtin_HEXAGON_V6_vaslw
+      {Intrinsic::hexagon_V6_vaslw_128B, 36176}, // __builtin_HEXAGON_V6_vaslw_128B
+      {Intrinsic::hexagon_V6_vaslw_acc, 36208}, // __builtin_HEXAGON_V6_vaslw_acc
+      {Intrinsic::hexagon_V6_vaslw_acc_128B, 36239}, // __builtin_HEXAGON_V6_vaslw_acc_128B
+      {Intrinsic::hexagon_V6_vaslwv, 36275}, // __builtin_HEXAGON_V6_vaslwv
+      {Intrinsic::hexagon_V6_vaslwv_128B, 36303}, // __builtin_HEXAGON_V6_vaslwv_128B
+      {Intrinsic::hexagon_V6_vasrh, 36336}, // __builtin_HEXAGON_V6_vasrh
+      {Intrinsic::hexagon_V6_vasrh_128B, 36363}, // __builtin_HEXAGON_V6_vasrh_128B
+      {Intrinsic::hexagon_V6_vasrh_acc, 36395}, // __builtin_HEXAGON_V6_vasrh_acc
+      {Intrinsic::hexagon_V6_vasrh_acc_128B, 36426}, // __builtin_HEXAGON_V6_vasrh_acc_128B
+      {Intrinsic::hexagon_V6_vasrhbrndsat, 36462}, // __builtin_HEXAGON_V6_vasrhbrndsat
+      {Intrinsic::hexagon_V6_vasrhbrndsat_128B, 36496}, // __builtin_HEXAGON_V6_vasrhbrndsat_128B
+      {Intrinsic::hexagon_V6_vasrhbsat, 36535}, // __builtin_HEXAGON_V6_vasrhbsat
+      {Intrinsic::hexagon_V6_vasrhbsat_128B, 36566}, // __builtin_HEXAGON_V6_vasrhbsat_128B
+      {Intrinsic::hexagon_V6_vasrhubrndsat, 36602}, // __builtin_HEXAGON_V6_vasrhubrndsat
+      {Intrinsic::hexagon_V6_vasrhubrndsat_128B, 36637}, // __builtin_HEXAGON_V6_vasrhubrndsat_128B
+      {Intrinsic::hexagon_V6_vasrhubsat, 36677}, // __builtin_HEXAGON_V6_vasrhubsat
+      {Intrinsic::hexagon_V6_vasrhubsat_128B, 36709}, // __builtin_HEXAGON_V6_vasrhubsat_128B
+      {Intrinsic::hexagon_V6_vasrhv, 36746}, // __builtin_HEXAGON_V6_vasrhv
+      {Intrinsic::hexagon_V6_vasrhv_128B, 36774}, // __builtin_HEXAGON_V6_vasrhv_128B
+      {Intrinsic::hexagon_V6_vasruhubrndsat, 36807}, // __builtin_HEXAGON_V6_vasruhubrndsat
+      {Intrinsic::hexagon_V6_vasruhubrndsat_128B, 36843}, // __builtin_HEXAGON_V6_vasruhubrndsat_128B
+      {Intrinsic::hexagon_V6_vasruhubsat, 36884}, // __builtin_HEXAGON_V6_vasruhubsat
+      {Intrinsic::hexagon_V6_vasruhubsat_128B, 36917}, // __builtin_HEXAGON_V6_vasruhubsat_128B
+      {Intrinsic::hexagon_V6_vasruwuhrndsat, 36955}, // __builtin_HEXAGON_V6_vasruwuhrndsat
+      {Intrinsic::hexagon_V6_vasruwuhrndsat_128B, 36991}, // __builtin_HEXAGON_V6_vasruwuhrndsat_128B
+      {Intrinsic::hexagon_V6_vasruwuhsat, 37032}, // __builtin_HEXAGON_V6_vasruwuhsat
+      {Intrinsic::hexagon_V6_vasruwuhsat_128B, 37065}, // __builtin_HEXAGON_V6_vasruwuhsat_128B
+      {Intrinsic::hexagon_V6_vasrw, 37103}, // __builtin_HEXAGON_V6_vasrw
+      {Intrinsic::hexagon_V6_vasrw_128B, 37130}, // __builtin_HEXAGON_V6_vasrw_128B
+      {Intrinsic::hexagon_V6_vasrw_acc, 37162}, // __builtin_HEXAGON_V6_vasrw_acc
+      {Intrinsic::hexagon_V6_vasrw_acc_128B, 37193}, // __builtin_HEXAGON_V6_vasrw_acc_128B
+      {Intrinsic::hexagon_V6_vasrwh, 37229}, // __builtin_HEXAGON_V6_vasrwh
+      {Intrinsic::hexagon_V6_vasrwh_128B, 37257}, // __builtin_HEXAGON_V6_vasrwh_128B
+      {Intrinsic::hexagon_V6_vasrwhrndsat, 37290}, // __builtin_HEXAGON_V6_vasrwhrndsat
+      {Intrinsic::hexagon_V6_vasrwhrndsat_128B, 37324}, // __builtin_HEXAGON_V6_vasrwhrndsat_128B
+      {Intrinsic::hexagon_V6_vasrwhsat, 37363}, // __builtin_HEXAGON_V6_vasrwhsat
+      {Intrinsic::hexagon_V6_vasrwhsat_128B, 37394}, // __builtin_HEXAGON_V6_vasrwhsat_128B
+      {Intrinsic::hexagon_V6_vasrwuhrndsat, 37430}, // __builtin_HEXAGON_V6_vasrwuhrndsat
+      {Intrinsic::hexagon_V6_vasrwuhrndsat_128B, 37465}, // __builtin_HEXAGON_V6_vasrwuhrndsat_128B
+      {Intrinsic::hexagon_V6_vasrwuhsat, 37505}, // __builtin_HEXAGON_V6_vasrwuhsat
+      {Intrinsic::hexagon_V6_vasrwuhsat_128B, 37537}, // __builtin_HEXAGON_V6_vasrwuhsat_128B
+      {Intrinsic::hexagon_V6_vasrwv, 37574}, // __builtin_HEXAGON_V6_vasrwv
+      {Intrinsic::hexagon_V6_vasrwv_128B, 37602}, // __builtin_HEXAGON_V6_vasrwv_128B
+      {Intrinsic::hexagon_V6_vassign, 37635}, // __builtin_HEXAGON_V6_vassign
+      {Intrinsic::hexagon_V6_vassign_128B, 37664}, // __builtin_HEXAGON_V6_vassign_128B
+      {Intrinsic::hexagon_V6_vassignp, 37698}, // __builtin_HEXAGON_V6_vassignp
+      {Intrinsic::hexagon_V6_vassignp_128B, 37728}, // __builtin_HEXAGON_V6_vassignp_128B
+      {Intrinsic::hexagon_V6_vavgb, 37763}, // __builtin_HEXAGON_V6_vavgb
+      {Intrinsic::hexagon_V6_vavgb_128B, 37790}, // __builtin_HEXAGON_V6_vavgb_128B
+      {Intrinsic::hexagon_V6_vavgbrnd, 37822}, // __builtin_HEXAGON_V6_vavgbrnd
+      {Intrinsic::hexagon_V6_vavgbrnd_128B, 37852}, // __builtin_HEXAGON_V6_vavgbrnd_128B
+      {Intrinsic::hexagon_V6_vavgh, 37887}, // __builtin_HEXAGON_V6_vavgh
+      {Intrinsic::hexagon_V6_vavgh_128B, 37914}, // __builtin_HEXAGON_V6_vavgh_128B
+      {Intrinsic::hexagon_V6_vavghrnd, 37946}, // __builtin_HEXAGON_V6_vavghrnd
+      {Intrinsic::hexagon_V6_vavghrnd_128B, 37976}, // __builtin_HEXAGON_V6_vavghrnd_128B
+      {Intrinsic::hexagon_V6_vavgub, 38011}, // __builtin_HEXAGON_V6_vavgub
+      {Intrinsic::hexagon_V6_vavgub_128B, 38039}, // __builtin_HEXAGON_V6_vavgub_128B
+      {Intrinsic::hexagon_V6_vavgubrnd, 38072}, // __builtin_HEXAGON_V6_vavgubrnd
+      {Intrinsic::hexagon_V6_vavgubrnd_128B, 38103}, // __builtin_HEXAGON_V6_vavgubrnd_128B
+      {Intrinsic::hexagon_V6_vavguh, 38139}, // __builtin_HEXAGON_V6_vavguh
+      {Intrinsic::hexagon_V6_vavguh_128B, 38167}, // __builtin_HEXAGON_V6_vavguh_128B
+      {Intrinsic::hexagon_V6_vavguhrnd, 38200}, // __builtin_HEXAGON_V6_vavguhrnd
+      {Intrinsic::hexagon_V6_vavguhrnd_128B, 38231}, // __builtin_HEXAGON_V6_vavguhrnd_128B
+      {Intrinsic::hexagon_V6_vavguw, 38267}, // __builtin_HEXAGON_V6_vavguw
+      {Intrinsic::hexagon_V6_vavguw_128B, 38295}, // __builtin_HEXAGON_V6_vavguw_128B
+      {Intrinsic::hexagon_V6_vavguwrnd, 38328}, // __builtin_HEXAGON_V6_vavguwrnd
+      {Intrinsic::hexagon_V6_vavguwrnd_128B, 38359}, // __builtin_HEXAGON_V6_vavguwrnd_128B
+      {Intrinsic::hexagon_V6_vavgw, 38395}, // __builtin_HEXAGON_V6_vavgw
+      {Intrinsic::hexagon_V6_vavgw_128B, 38422}, // __builtin_HEXAGON_V6_vavgw_128B
+      {Intrinsic::hexagon_V6_vavgwrnd, 38454}, // __builtin_HEXAGON_V6_vavgwrnd
+      {Intrinsic::hexagon_V6_vavgwrnd_128B, 38484}, // __builtin_HEXAGON_V6_vavgwrnd_128B
+      {Intrinsic::hexagon_V6_vcl0h, 38519}, // __builtin_HEXAGON_V6_vcl0h
+      {Intrinsic::hexagon_V6_vcl0h_128B, 38546}, // __builtin_HEXAGON_V6_vcl0h_128B
+      {Intrinsic::hexagon_V6_vcl0w, 38578}, // __builtin_HEXAGON_V6_vcl0w
+      {Intrinsic::hexagon_V6_vcl0w_128B, 38605}, // __builtin_HEXAGON_V6_vcl0w_128B
+      {Intrinsic::hexagon_V6_vcombine, 38637}, // __builtin_HEXAGON_V6_vcombine
+      {Intrinsic::hexagon_V6_vcombine_128B, 38667}, // __builtin_HEXAGON_V6_vcombine_128B
+      {Intrinsic::hexagon_V6_vd0, 38702}, // __builtin_HEXAGON_V6_vd0
+      {Intrinsic::hexagon_V6_vd0_128B, 38727}, // __builtin_HEXAGON_V6_vd0_128B
+      {Intrinsic::hexagon_V6_vdd0, 38757}, // __builtin_HEXAGON_V6_vdd0
+      {Intrinsic::hexagon_V6_vdd0_128B, 38783}, // __builtin_HEXAGON_V6_vdd0_128B
+      {Intrinsic::hexagon_V6_vdealb, 38814}, // __builtin_HEXAGON_V6_vdealb
+      {Intrinsic::hexagon_V6_vdealb4w, 38875}, // __builtin_HEXAGON_V6_vdealb4w
+      {Intrinsic::hexagon_V6_vdealb4w_128B, 38905}, // __builtin_HEXAGON_V6_vdealb4w_128B
+      {Intrinsic::hexagon_V6_vdealb_128B, 38842}, // __builtin_HEXAGON_V6_vdealb_128B
+      {Intrinsic::hexagon_V6_vdealh, 38940}, // __builtin_HEXAGON_V6_vdealh
+      {Intrinsic::hexagon_V6_vdealh_128B, 38968}, // __builtin_HEXAGON_V6_vdealh_128B
+      {Intrinsic::hexagon_V6_vdealvdd, 39001}, // __builtin_HEXAGON_V6_vdealvdd
+      {Intrinsic::hexagon_V6_vdealvdd_128B, 39031}, // __builtin_HEXAGON_V6_vdealvdd_128B
+      {Intrinsic::hexagon_V6_vdelta, 39066}, // __builtin_HEXAGON_V6_vdelta
+      {Intrinsic::hexagon_V6_vdelta_128B, 39094}, // __builtin_HEXAGON_V6_vdelta_128B
+      {Intrinsic::hexagon_V6_vdmpybus, 39127}, // __builtin_HEXAGON_V6_vdmpybus
+      {Intrinsic::hexagon_V6_vdmpybus_128B, 39157}, // __builtin_HEXAGON_V6_vdmpybus_128B
+      {Intrinsic::hexagon_V6_vdmpybus_acc, 39192}, // __builtin_HEXAGON_V6_vdmpybus_acc
+      {Intrinsic::hexagon_V6_vdmpybus_acc_128B, 39226}, // __builtin_HEXAGON_V6_vdmpybus_acc_128B
+      {Intrinsic::hexagon_V6_vdmpybus_dv, 39265}, // __builtin_HEXAGON_V6_vdmpybus_dv
+      {Intrinsic::hexagon_V6_vdmpybus_dv_128B, 39298}, // __builtin_HEXAGON_V6_vdmpybus_dv_128B
+      {Intrinsic::hexagon_V6_vdmpybus_dv_acc, 39336}, // __builtin_HEXAGON_V6_vdmpybus_dv_acc
+      {Intrinsic::hexagon_V6_vdmpybus_dv_acc_128B, 39373}, // __builtin_HEXAGON_V6_vdmpybus_dv_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhb, 39415}, // __builtin_HEXAGON_V6_vdmpyhb
+      {Intrinsic::hexagon_V6_vdmpyhb_128B, 39444}, // __builtin_HEXAGON_V6_vdmpyhb_128B
+      {Intrinsic::hexagon_V6_vdmpyhb_acc, 39478}, // __builtin_HEXAGON_V6_vdmpyhb_acc
+      {Intrinsic::hexagon_V6_vdmpyhb_acc_128B, 39511}, // __builtin_HEXAGON_V6_vdmpyhb_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhb_dv, 39549}, // __builtin_HEXAGON_V6_vdmpyhb_dv
+      {Intrinsic::hexagon_V6_vdmpyhb_dv_128B, 39581}, // __builtin_HEXAGON_V6_vdmpyhb_dv_128B
+      {Intrinsic::hexagon_V6_vdmpyhb_dv_acc, 39618}, // __builtin_HEXAGON_V6_vdmpyhb_dv_acc
+      {Intrinsic::hexagon_V6_vdmpyhb_dv_acc_128B, 39654}, // __builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhisat, 39695}, // __builtin_HEXAGON_V6_vdmpyhisat
+      {Intrinsic::hexagon_V6_vdmpyhisat_128B, 39727}, // __builtin_HEXAGON_V6_vdmpyhisat_128B
+      {Intrinsic::hexagon_V6_vdmpyhisat_acc, 39764}, // __builtin_HEXAGON_V6_vdmpyhisat_acc
+      {Intrinsic::hexagon_V6_vdmpyhisat_acc_128B, 39800}, // __builtin_HEXAGON_V6_vdmpyhisat_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhsat, 39841}, // __builtin_HEXAGON_V6_vdmpyhsat
+      {Intrinsic::hexagon_V6_vdmpyhsat_128B, 39872}, // __builtin_HEXAGON_V6_vdmpyhsat_128B
+      {Intrinsic::hexagon_V6_vdmpyhsat_acc, 39908}, // __builtin_HEXAGON_V6_vdmpyhsat_acc
+      {Intrinsic::hexagon_V6_vdmpyhsat_acc_128B, 39943}, // __builtin_HEXAGON_V6_vdmpyhsat_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhsuisat, 39983}, // __builtin_HEXAGON_V6_vdmpyhsuisat
+      {Intrinsic::hexagon_V6_vdmpyhsuisat_128B, 40017}, // __builtin_HEXAGON_V6_vdmpyhsuisat_128B
+      {Intrinsic::hexagon_V6_vdmpyhsuisat_acc, 40056}, // __builtin_HEXAGON_V6_vdmpyhsuisat_acc
+      {Intrinsic::hexagon_V6_vdmpyhsuisat_acc_128B, 40094}, // __builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhsusat, 40137}, // __builtin_HEXAGON_V6_vdmpyhsusat
+      {Intrinsic::hexagon_V6_vdmpyhsusat_128B, 40170}, // __builtin_HEXAGON_V6_vdmpyhsusat_128B
+      {Intrinsic::hexagon_V6_vdmpyhsusat_acc, 40208}, // __builtin_HEXAGON_V6_vdmpyhsusat_acc
+      {Intrinsic::hexagon_V6_vdmpyhsusat_acc_128B, 40245}, // __builtin_HEXAGON_V6_vdmpyhsusat_acc_128B
+      {Intrinsic::hexagon_V6_vdmpyhvsat, 40287}, // __builtin_HEXAGON_V6_vdmpyhvsat
+      {Intrinsic::hexagon_V6_vdmpyhvsat_128B, 40319}, // __builtin_HEXAGON_V6_vdmpyhvsat_128B
+      {Intrinsic::hexagon_V6_vdmpyhvsat_acc, 40356}, // __builtin_HEXAGON_V6_vdmpyhvsat_acc
+      {Intrinsic::hexagon_V6_vdmpyhvsat_acc_128B, 40392}, // __builtin_HEXAGON_V6_vdmpyhvsat_acc_128B
+      {Intrinsic::hexagon_V6_vdsaduh, 40433}, // __builtin_HEXAGON_V6_vdsaduh
+      {Intrinsic::hexagon_V6_vdsaduh_128B, 40462}, // __builtin_HEXAGON_V6_vdsaduh_128B
+      {Intrinsic::hexagon_V6_vdsaduh_acc, 40496}, // __builtin_HEXAGON_V6_vdsaduh_acc
+      {Intrinsic::hexagon_V6_vdsaduh_acc_128B, 40529}, // __builtin_HEXAGON_V6_vdsaduh_acc_128B
+      {Intrinsic::hexagon_V6_veqb, 40567}, // __builtin_HEXAGON_V6_veqb
+      {Intrinsic::hexagon_V6_veqb_128B, 40593}, // __builtin_HEXAGON_V6_veqb_128B
+      {Intrinsic::hexagon_V6_veqb_and, 40624}, // __builtin_HEXAGON_V6_veqb_and
+      {Intrinsic::hexagon_V6_veqb_and_128B, 40654}, // __builtin_HEXAGON_V6_veqb_and_128B
+      {Intrinsic::hexagon_V6_veqb_or, 40689}, // __builtin_HEXAGON_V6_veqb_or
+      {Intrinsic::hexagon_V6_veqb_or_128B, 40718}, // __builtin_HEXAGON_V6_veqb_or_128B
+      {Intrinsic::hexagon_V6_veqb_xor, 40752}, // __builtin_HEXAGON_V6_veqb_xor
+      {Intrinsic::hexagon_V6_veqb_xor_128B, 40782}, // __builtin_HEXAGON_V6_veqb_xor_128B
+      {Intrinsic::hexagon_V6_veqh, 40817}, // __builtin_HEXAGON_V6_veqh
+      {Intrinsic::hexagon_V6_veqh_128B, 40843}, // __builtin_HEXAGON_V6_veqh_128B
+      {Intrinsic::hexagon_V6_veqh_and, 40874}, // __builtin_HEXAGON_V6_veqh_and
+      {Intrinsic::hexagon_V6_veqh_and_128B, 40904}, // __builtin_HEXAGON_V6_veqh_and_128B
+      {Intrinsic::hexagon_V6_veqh_or, 40939}, // __builtin_HEXAGON_V6_veqh_or
+      {Intrinsic::hexagon_V6_veqh_or_128B, 40968}, // __builtin_HEXAGON_V6_veqh_or_128B
+      {Intrinsic::hexagon_V6_veqh_xor, 41002}, // __builtin_HEXAGON_V6_veqh_xor
+      {Intrinsic::hexagon_V6_veqh_xor_128B, 41032}, // __builtin_HEXAGON_V6_veqh_xor_128B
+      {Intrinsic::hexagon_V6_veqw, 41067}, // __builtin_HEXAGON_V6_veqw
+      {Intrinsic::hexagon_V6_veqw_128B, 41093}, // __builtin_HEXAGON_V6_veqw_128B
+      {Intrinsic::hexagon_V6_veqw_and, 41124}, // __builtin_HEXAGON_V6_veqw_and
+      {Intrinsic::hexagon_V6_veqw_and_128B, 41154}, // __builtin_HEXAGON_V6_veqw_and_128B
+      {Intrinsic::hexagon_V6_veqw_or, 41189}, // __builtin_HEXAGON_V6_veqw_or
+      {Intrinsic::hexagon_V6_veqw_or_128B, 41218}, // __builtin_HEXAGON_V6_veqw_or_128B
+      {Intrinsic::hexagon_V6_veqw_xor, 41252}, // __builtin_HEXAGON_V6_veqw_xor
+      {Intrinsic::hexagon_V6_veqw_xor_128B, 41282}, // __builtin_HEXAGON_V6_veqw_xor_128B
+      {Intrinsic::hexagon_V6_vgathermh, 41317}, // __builtin_HEXAGON_V6_vgathermh
+      {Intrinsic::hexagon_V6_vgathermh_128B, 41348}, // __builtin_HEXAGON_V6_vgathermh_128B
+      {Intrinsic::hexagon_V6_vgathermhq, 41384}, // __builtin_HEXAGON_V6_vgathermhq
+      {Intrinsic::hexagon_V6_vgathermhq_128B, 41416}, // __builtin_HEXAGON_V6_vgathermhq_128B
+      {Intrinsic::hexagon_V6_vgathermhw, 41453}, // __builtin_HEXAGON_V6_vgathermhw
+      {Intrinsic::hexagon_V6_vgathermhw_128B, 41485}, // __builtin_HEXAGON_V6_vgathermhw_128B
+      {Intrinsic::hexagon_V6_vgathermhwq, 41522}, // __builtin_HEXAGON_V6_vgathermhwq
+      {Intrinsic::hexagon_V6_vgathermhwq_128B, 41555}, // __builtin_HEXAGON_V6_vgathermhwq_128B
+      {Intrinsic::hexagon_V6_vgathermw, 41593}, // __builtin_HEXAGON_V6_vgathermw
+      {Intrinsic::hexagon_V6_vgathermw_128B, 41624}, // __builtin_HEXAGON_V6_vgathermw_128B
+      {Intrinsic::hexagon_V6_vgathermwq, 41660}, // __builtin_HEXAGON_V6_vgathermwq
+      {Intrinsic::hexagon_V6_vgathermwq_128B, 41692}, // __builtin_HEXAGON_V6_vgathermwq_128B
+      {Intrinsic::hexagon_V6_vgtb, 41729}, // __builtin_HEXAGON_V6_vgtb
+      {Intrinsic::hexagon_V6_vgtb_128B, 41755}, // __builtin_HEXAGON_V6_vgtb_128B
+      {Intrinsic::hexagon_V6_vgtb_and, 41786}, // __builtin_HEXAGON_V6_vgtb_and
+      {Intrinsic::hexagon_V6_vgtb_and_128B, 41816}, // __builtin_HEXAGON_V6_vgtb_and_128B
+      {Intrinsic::hexagon_V6_vgtb_or, 41851}, // __builtin_HEXAGON_V6_vgtb_or
+      {Intrinsic::hexagon_V6_vgtb_or_128B, 41880}, // __builtin_HEXAGON_V6_vgtb_or_128B
+      {Intrinsic::hexagon_V6_vgtb_xor, 41914}, // __builtin_HEXAGON_V6_vgtb_xor
+      {Intrinsic::hexagon_V6_vgtb_xor_128B, 41944}, // __builtin_HEXAGON_V6_vgtb_xor_128B
+      {Intrinsic::hexagon_V6_vgth, 41979}, // __builtin_HEXAGON_V6_vgth
+      {Intrinsic::hexagon_V6_vgth_128B, 42005}, // __builtin_HEXAGON_V6_vgth_128B
+      {Intrinsic::hexagon_V6_vgth_and, 42036}, // __builtin_HEXAGON_V6_vgth_and
+      {Intrinsic::hexagon_V6_vgth_and_128B, 42066}, // __builtin_HEXAGON_V6_vgth_and_128B
+      {Intrinsic::hexagon_V6_vgth_or, 42101}, // __builtin_HEXAGON_V6_vgth_or
+      {Intrinsic::hexagon_V6_vgth_or_128B, 42130}, // __builtin_HEXAGON_V6_vgth_or_128B
+      {Intrinsic::hexagon_V6_vgth_xor, 42164}, // __builtin_HEXAGON_V6_vgth_xor
+      {Intrinsic::hexagon_V6_vgth_xor_128B, 42194}, // __builtin_HEXAGON_V6_vgth_xor_128B
+      {Intrinsic::hexagon_V6_vgtub, 42229}, // __builtin_HEXAGON_V6_vgtub
+      {Intrinsic::hexagon_V6_vgtub_128B, 42256}, // __builtin_HEXAGON_V6_vgtub_128B
+      {Intrinsic::hexagon_V6_vgtub_and, 42288}, // __builtin_HEXAGON_V6_vgtub_and
+      {Intrinsic::hexagon_V6_vgtub_and_128B, 42319}, // __builtin_HEXAGON_V6_vgtub_and_128B
+      {Intrinsic::hexagon_V6_vgtub_or, 42355}, // __builtin_HEXAGON_V6_vgtub_or
+      {Intrinsic::hexagon_V6_vgtub_or_128B, 42385}, // __builtin_HEXAGON_V6_vgtub_or_128B
+      {Intrinsic::hexagon_V6_vgtub_xor, 42420}, // __builtin_HEXAGON_V6_vgtub_xor
+      {Intrinsic::hexagon_V6_vgtub_xor_128B, 42451}, // __builtin_HEXAGON_V6_vgtub_xor_128B
+      {Intrinsic::hexagon_V6_vgtuh, 42487}, // __builtin_HEXAGON_V6_vgtuh
+      {Intrinsic::hexagon_V6_vgtuh_128B, 42514}, // __builtin_HEXAGON_V6_vgtuh_128B
+      {Intrinsic::hexagon_V6_vgtuh_and, 42546}, // __builtin_HEXAGON_V6_vgtuh_and
+      {Intrinsic::hexagon_V6_vgtuh_and_128B, 42577}, // __builtin_HEXAGON_V6_vgtuh_and_128B
+      {Intrinsic::hexagon_V6_vgtuh_or, 42613}, // __builtin_HEXAGON_V6_vgtuh_or
+      {Intrinsic::hexagon_V6_vgtuh_or_128B, 42643}, // __builtin_HEXAGON_V6_vgtuh_or_128B
+      {Intrinsic::hexagon_V6_vgtuh_xor, 42678}, // __builtin_HEXAGON_V6_vgtuh_xor
+      {Intrinsic::hexagon_V6_vgtuh_xor_128B, 42709}, // __builtin_HEXAGON_V6_vgtuh_xor_128B
+      {Intrinsic::hexagon_V6_vgtuw, 42745}, // __builtin_HEXAGON_V6_vgtuw
+      {Intrinsic::hexagon_V6_vgtuw_128B, 42772}, // __builtin_HEXAGON_V6_vgtuw_128B
+      {Intrinsic::hexagon_V6_vgtuw_and, 42804}, // __builtin_HEXAGON_V6_vgtuw_and
+      {Intrinsic::hexagon_V6_vgtuw_and_128B, 42835}, // __builtin_HEXAGON_V6_vgtuw_and_128B
+      {Intrinsic::hexagon_V6_vgtuw_or, 42871}, // __builtin_HEXAGON_V6_vgtuw_or
+      {Intrinsic::hexagon_V6_vgtuw_or_128B, 42901}, // __builtin_HEXAGON_V6_vgtuw_or_128B
+      {Intrinsic::hexagon_V6_vgtuw_xor, 42936}, // __builtin_HEXAGON_V6_vgtuw_xor
+      {Intrinsic::hexagon_V6_vgtuw_xor_128B, 42967}, // __builtin_HEXAGON_V6_vgtuw_xor_128B
+      {Intrinsic::hexagon_V6_vgtw, 43003}, // __builtin_HEXAGON_V6_vgtw
+      {Intrinsic::hexagon_V6_vgtw_128B, 43029}, // __builtin_HEXAGON_V6_vgtw_128B
+      {Intrinsic::hexagon_V6_vgtw_and, 43060}, // __builtin_HEXAGON_V6_vgtw_and
+      {Intrinsic::hexagon_V6_vgtw_and_128B, 43090}, // __builtin_HEXAGON_V6_vgtw_and_128B
+      {Intrinsic::hexagon_V6_vgtw_or, 43125}, // __builtin_HEXAGON_V6_vgtw_or
+      {Intrinsic::hexagon_V6_vgtw_or_128B, 43154}, // __builtin_HEXAGON_V6_vgtw_or_128B
+      {Intrinsic::hexagon_V6_vgtw_xor, 43188}, // __builtin_HEXAGON_V6_vgtw_xor
+      {Intrinsic::hexagon_V6_vgtw_xor_128B, 43218}, // __builtin_HEXAGON_V6_vgtw_xor_128B
+      {Intrinsic::hexagon_V6_vinsertwr, 43253}, // __builtin_HEXAGON_V6_vinsertwr
+      {Intrinsic::hexagon_V6_vinsertwr_128B, 43284}, // __builtin_HEXAGON_V6_vinsertwr_128B
+      {Intrinsic::hexagon_V6_vlalignb, 43320}, // __builtin_HEXAGON_V6_vlalignb
+      {Intrinsic::hexagon_V6_vlalignb_128B, 43350}, // __builtin_HEXAGON_V6_vlalignb_128B
+      {Intrinsic::hexagon_V6_vlalignbi, 43385}, // __builtin_HEXAGON_V6_vlalignbi
+      {Intrinsic::hexagon_V6_vlalignbi_128B, 43416}, // __builtin_HEXAGON_V6_vlalignbi_128B
+      {Intrinsic::hexagon_V6_vlsrb, 43452}, // __builtin_HEXAGON_V6_vlsrb
+      {Intrinsic::hexagon_V6_vlsrb_128B, 43479}, // __builtin_HEXAGON_V6_vlsrb_128B
+      {Intrinsic::hexagon_V6_vlsrh, 43511}, // __builtin_HEXAGON_V6_vlsrh
+      {Intrinsic::hexagon_V6_vlsrh_128B, 43538}, // __builtin_HEXAGON_V6_vlsrh_128B
+      {Intrinsic::hexagon_V6_vlsrhv, 43570}, // __builtin_HEXAGON_V6_vlsrhv
+      {Intrinsic::hexagon_V6_vlsrhv_128B, 43598}, // __builtin_HEXAGON_V6_vlsrhv_128B
+      {Intrinsic::hexagon_V6_vlsrw, 43631}, // __builtin_HEXAGON_V6_vlsrw
+      {Intrinsic::hexagon_V6_vlsrw_128B, 43658}, // __builtin_HEXAGON_V6_vlsrw_128B
+      {Intrinsic::hexagon_V6_vlsrwv, 43690}, // __builtin_HEXAGON_V6_vlsrwv
+      {Intrinsic::hexagon_V6_vlsrwv_128B, 43718}, // __builtin_HEXAGON_V6_vlsrwv_128B
+      {Intrinsic::hexagon_V6_vlut4, 43751}, // __builtin_HEXAGON_V6_vlut4
+      {Intrinsic::hexagon_V6_vlut4_128B, 43778}, // __builtin_HEXAGON_V6_vlut4_128B
+      {Intrinsic::hexagon_V6_vlutvvb, 43810}, // __builtin_HEXAGON_V6_vlutvvb
+      {Intrinsic::hexagon_V6_vlutvvb_128B, 43839}, // __builtin_HEXAGON_V6_vlutvvb_128B
+      {Intrinsic::hexagon_V6_vlutvvb_nm, 43873}, // __builtin_HEXAGON_V6_vlutvvb_nm
+      {Intrinsic::hexagon_V6_vlutvvb_nm_128B, 43905}, // __builtin_HEXAGON_V6_vlutvvb_nm_128B
+      {Intrinsic::hexagon_V6_vlutvvb_oracc, 43942}, // __builtin_HEXAGON_V6_vlutvvb_oracc
+      {Intrinsic::hexagon_V6_vlutvvb_oracc_128B, 43977}, // __builtin_HEXAGON_V6_vlutvvb_oracc_128B
+      {Intrinsic::hexagon_V6_vlutvvb_oracci, 44017}, // __builtin_HEXAGON_V6_vlutvvb_oracci
+      {Intrinsic::hexagon_V6_vlutvvb_oracci_128B, 44053}, // __builtin_HEXAGON_V6_vlutvvb_oracci_128B
+      {Intrinsic::hexagon_V6_vlutvvbi, 44094}, // __builtin_HEXAGON_V6_vlutvvbi
+      {Intrinsic::hexagon_V6_vlutvvbi_128B, 44124}, // __builtin_HEXAGON_V6_vlutvvbi_128B
+      {Intrinsic::hexagon_V6_vlutvwh, 44159}, // __builtin_HEXAGON_V6_vlutvwh
+      {Intrinsic::hexagon_V6_vlutvwh_128B, 44188}, // __builtin_HEXAGON_V6_vlutvwh_128B
+      {Intrinsic::hexagon_V6_vlutvwh_nm, 44222}, // __builtin_HEXAGON_V6_vlutvwh_nm
+      {Intrinsic::hexagon_V6_vlutvwh_nm_128B, 44254}, // __builtin_HEXAGON_V6_vlutvwh_nm_128B
+      {Intrinsic::hexagon_V6_vlutvwh_oracc, 44291}, // __builtin_HEXAGON_V6_vlutvwh_oracc
+      {Intrinsic::hexagon_V6_vlutvwh_oracc_128B, 44326}, // __builtin_HEXAGON_V6_vlutvwh_oracc_128B
+      {Intrinsic::hexagon_V6_vlutvwh_oracci, 44366}, // __builtin_HEXAGON_V6_vlutvwh_oracci
+      {Intrinsic::hexagon_V6_vlutvwh_oracci_128B, 44402}, // __builtin_HEXAGON_V6_vlutvwh_oracci_128B
+      {Intrinsic::hexagon_V6_vlutvwhi, 44443}, // __builtin_HEXAGON_V6_vlutvwhi
+      {Intrinsic::hexagon_V6_vlutvwhi_128B, 44473}, // __builtin_HEXAGON_V6_vlutvwhi_128B
+      {Intrinsic::hexagon_V6_vmaskedstorenq, 44508}, // __builtin_HEXAGON_V6_vmaskedstorenq
+      {Intrinsic::hexagon_V6_vmaskedstorenq_128B, 44544}, // __builtin_HEXAGON_V6_vmaskedstorenq_128B
+      {Intrinsic::hexagon_V6_vmaskedstorentnq, 44585}, // __builtin_HEXAGON_V6_vmaskedstorentnq
+      {Intrinsic::hexagon_V6_vmaskedstorentnq_128B, 44623}, // __builtin_HEXAGON_V6_vmaskedstorentnq_128B
+      {Intrinsic::hexagon_V6_vmaskedstorentq, 44666}, // __builtin_HEXAGON_V6_vmaskedstorentq
+      {Intrinsic::hexagon_V6_vmaskedstorentq_128B, 44703}, // __builtin_HEXAGON_V6_vmaskedstorentq_128B
+      {Intrinsic::hexagon_V6_vmaskedstoreq, 44745}, // __builtin_HEXAGON_V6_vmaskedstoreq
+      {Intrinsic::hexagon_V6_vmaskedstoreq_128B, 44780}, // __builtin_HEXAGON_V6_vmaskedstoreq_128B
+      {Intrinsic::hexagon_V6_vmaxb, 44820}, // __builtin_HEXAGON_V6_vmaxb
+      {Intrinsic::hexagon_V6_vmaxb_128B, 44847}, // __builtin_HEXAGON_V6_vmaxb_128B
+      {Intrinsic::hexagon_V6_vmaxh, 44879}, // __builtin_HEXAGON_V6_vmaxh
+      {Intrinsic::hexagon_V6_vmaxh_128B, 44906}, // __builtin_HEXAGON_V6_vmaxh_128B
+      {Intrinsic::hexagon_V6_vmaxub, 44938}, // __builtin_HEXAGON_V6_vmaxub
+      {Intrinsic::hexagon_V6_vmaxub_128B, 44966}, // __builtin_HEXAGON_V6_vmaxub_128B
+      {Intrinsic::hexagon_V6_vmaxuh, 44999}, // __builtin_HEXAGON_V6_vmaxuh
+      {Intrinsic::hexagon_V6_vmaxuh_128B, 45027}, // __builtin_HEXAGON_V6_vmaxuh_128B
+      {Intrinsic::hexagon_V6_vmaxw, 45060}, // __builtin_HEXAGON_V6_vmaxw
+      {Intrinsic::hexagon_V6_vmaxw_128B, 45087}, // __builtin_HEXAGON_V6_vmaxw_128B
+      {Intrinsic::hexagon_V6_vminb, 45119}, // __builtin_HEXAGON_V6_vminb
+      {Intrinsic::hexagon_V6_vminb_128B, 45146}, // __builtin_HEXAGON_V6_vminb_128B
+      {Intrinsic::hexagon_V6_vminh, 45178}, // __builtin_HEXAGON_V6_vminh
+      {Intrinsic::hexagon_V6_vminh_128B, 45205}, // __builtin_HEXAGON_V6_vminh_128B
+      {Intrinsic::hexagon_V6_vminub, 45237}, // __builtin_HEXAGON_V6_vminub
+      {Intrinsic::hexagon_V6_vminub_128B, 45265}, // __builtin_HEXAGON_V6_vminub_128B
+      {Intrinsic::hexagon_V6_vminuh, 45298}, // __builtin_HEXAGON_V6_vminuh
+      {Intrinsic::hexagon_V6_vminuh_128B, 45326}, // __builtin_HEXAGON_V6_vminuh_128B
+      {Intrinsic::hexagon_V6_vminw, 45359}, // __builtin_HEXAGON_V6_vminw
+      {Intrinsic::hexagon_V6_vminw_128B, 45386}, // __builtin_HEXAGON_V6_vminw_128B
+      {Intrinsic::hexagon_V6_vmpabus, 45418}, // __builtin_HEXAGON_V6_vmpabus
+      {Intrinsic::hexagon_V6_vmpabus_128B, 45447}, // __builtin_HEXAGON_V6_vmpabus_128B
+      {Intrinsic::hexagon_V6_vmpabus_acc, 45481}, // __builtin_HEXAGON_V6_vmpabus_acc
+      {Intrinsic::hexagon_V6_vmpabus_acc_128B, 45514}, // __builtin_HEXAGON_V6_vmpabus_acc_128B
+      {Intrinsic::hexagon_V6_vmpabusv, 45552}, // __builtin_HEXAGON_V6_vmpabusv
+      {Intrinsic::hexagon_V6_vmpabusv_128B, 45582}, // __builtin_HEXAGON_V6_vmpabusv_128B
+      {Intrinsic::hexagon_V6_vmpabuu, 45617}, // __builtin_HEXAGON_V6_vmpabuu
+      {Intrinsic::hexagon_V6_vmpabuu_128B, 45646}, // __builtin_HEXAGON_V6_vmpabuu_128B
+      {Intrinsic::hexagon_V6_vmpabuu_acc, 45680}, // __builtin_HEXAGON_V6_vmpabuu_acc
+      {Intrinsic::hexagon_V6_vmpabuu_acc_128B, 45713}, // __builtin_HEXAGON_V6_vmpabuu_acc_128B
+      {Intrinsic::hexagon_V6_vmpabuuv, 45751}, // __builtin_HEXAGON_V6_vmpabuuv
+      {Intrinsic::hexagon_V6_vmpabuuv_128B, 45781}, // __builtin_HEXAGON_V6_vmpabuuv_128B
+      {Intrinsic::hexagon_V6_vmpahb, 45816}, // __builtin_HEXAGON_V6_vmpahb
+      {Intrinsic::hexagon_V6_vmpahb_128B, 45844}, // __builtin_HEXAGON_V6_vmpahb_128B
+      {Intrinsic::hexagon_V6_vmpahb_acc, 45877}, // __builtin_HEXAGON_V6_vmpahb_acc
+      {Intrinsic::hexagon_V6_vmpahb_acc_128B, 45909}, // __builtin_HEXAGON_V6_vmpahb_acc_128B
+      {Intrinsic::hexagon_V6_vmpahhsat, 45946}, // __builtin_HEXAGON_V6_vmpahhsat
+      {Intrinsic::hexagon_V6_vmpahhsat_128B, 45977}, // __builtin_HEXAGON_V6_vmpahhsat_128B
+      {Intrinsic::hexagon_V6_vmpauhb, 46013}, // __builtin_HEXAGON_V6_vmpauhb
+      {Intrinsic::hexagon_V6_vmpauhb_128B, 46042}, // __builtin_HEXAGON_V6_vmpauhb_128B
+      {Intrinsic::hexagon_V6_vmpauhb_acc, 46076}, // __builtin_HEXAGON_V6_vmpauhb_acc
+      {Intrinsic::hexagon_V6_vmpauhb_acc_128B, 46109}, // __builtin_HEXAGON_V6_vmpauhb_acc_128B
+      {Intrinsic::hexagon_V6_vmpauhuhsat, 46147}, // __builtin_HEXAGON_V6_vmpauhuhsat
+      {Intrinsic::hexagon_V6_vmpauhuhsat_128B, 46180}, // __builtin_HEXAGON_V6_vmpauhuhsat_128B
+      {Intrinsic::hexagon_V6_vmpsuhuhsat, 46218}, // __builtin_HEXAGON_V6_vmpsuhuhsat
+      {Intrinsic::hexagon_V6_vmpsuhuhsat_128B, 46251}, // __builtin_HEXAGON_V6_vmpsuhuhsat_128B
+      {Intrinsic::hexagon_V6_vmpybus, 46289}, // __builtin_HEXAGON_V6_vmpybus
+      {Intrinsic::hexagon_V6_vmpybus_128B, 46318}, // __builtin_HEXAGON_V6_vmpybus_128B
+      {Intrinsic::hexagon_V6_vmpybus_acc, 46352}, // __builtin_HEXAGON_V6_vmpybus_acc
+      {Intrinsic::hexagon_V6_vmpybus_acc_128B, 46385}, // __builtin_HEXAGON_V6_vmpybus_acc_128B
+      {Intrinsic::hexagon_V6_vmpybusv, 46423}, // __builtin_HEXAGON_V6_vmpybusv
+      {Intrinsic::hexagon_V6_vmpybusv_128B, 46453}, // __builtin_HEXAGON_V6_vmpybusv_128B
+      {Intrinsic::hexagon_V6_vmpybusv_acc, 46488}, // __builtin_HEXAGON_V6_vmpybusv_acc
+      {Intrinsic::hexagon_V6_vmpybusv_acc_128B, 46522}, // __builtin_HEXAGON_V6_vmpybusv_acc_128B
+      {Intrinsic::hexagon_V6_vmpybv, 46561}, // __builtin_HEXAGON_V6_vmpybv
+      {Intrinsic::hexagon_V6_vmpybv_128B, 46589}, // __builtin_HEXAGON_V6_vmpybv_128B
+      {Intrinsic::hexagon_V6_vmpybv_acc, 46622}, // __builtin_HEXAGON_V6_vmpybv_acc
+      {Intrinsic::hexagon_V6_vmpybv_acc_128B, 46654}, // __builtin_HEXAGON_V6_vmpybv_acc_128B
+      {Intrinsic::hexagon_V6_vmpyewuh, 46691}, // __builtin_HEXAGON_V6_vmpyewuh
+      {Intrinsic::hexagon_V6_vmpyewuh_128B, 46721}, // __builtin_HEXAGON_V6_vmpyewuh_128B
+      {Intrinsic::hexagon_V6_vmpyewuh_64, 46756}, // __builtin_HEXAGON_V6_vmpyewuh_64
+      {Intrinsic::hexagon_V6_vmpyewuh_64_128B, 46789}, // __builtin_HEXAGON_V6_vmpyewuh_64_128B
+      {Intrinsic::hexagon_V6_vmpyh, 46827}, // __builtin_HEXAGON_V6_vmpyh
+      {Intrinsic::hexagon_V6_vmpyh_128B, 46854}, // __builtin_HEXAGON_V6_vmpyh_128B
+      {Intrinsic::hexagon_V6_vmpyh_acc, 46886}, // __builtin_HEXAGON_V6_vmpyh_acc
+      {Intrinsic::hexagon_V6_vmpyh_acc_128B, 46917}, // __builtin_HEXAGON_V6_vmpyh_acc_128B
+      {Intrinsic::hexagon_V6_vmpyhsat_acc, 46953}, // __builtin_HEXAGON_V6_vmpyhsat_acc
+      {Intrinsic::hexagon_V6_vmpyhsat_acc_128B, 46987}, // __builtin_HEXAGON_V6_vmpyhsat_acc_128B
+      {Intrinsic::hexagon_V6_vmpyhsrs, 47026}, // __builtin_HEXAGON_V6_vmpyhsrs
+      {Intrinsic::hexagon_V6_vmpyhsrs_128B, 47056}, // __builtin_HEXAGON_V6_vmpyhsrs_128B
+      {Intrinsic::hexagon_V6_vmpyhss, 47091}, // __builtin_HEXAGON_V6_vmpyhss
+      {Intrinsic::hexagon_V6_vmpyhss_128B, 47120}, // __builtin_HEXAGON_V6_vmpyhss_128B
+      {Intrinsic::hexagon_V6_vmpyhus, 47154}, // __builtin_HEXAGON_V6_vmpyhus
+      {Intrinsic::hexagon_V6_vmpyhus_128B, 47183}, // __builtin_HEXAGON_V6_vmpyhus_128B
+      {Intrinsic::hexagon_V6_vmpyhus_acc, 47217}, // __builtin_HEXAGON_V6_vmpyhus_acc
+      {Intrinsic::hexagon_V6_vmpyhus_acc_128B, 47250}, // __builtin_HEXAGON_V6_vmpyhus_acc_128B
+      {Intrinsic::hexagon_V6_vmpyhv, 47288}, // __builtin_HEXAGON_V6_vmpyhv
+      {Intrinsic::hexagon_V6_vmpyhv_128B, 47316}, // __builtin_HEXAGON_V6_vmpyhv_128B
+      {Intrinsic::hexagon_V6_vmpyhv_acc, 47349}, // __builtin_HEXAGON_V6_vmpyhv_acc
+      {Intrinsic::hexagon_V6_vmpyhv_acc_128B, 47381}, // __builtin_HEXAGON_V6_vmpyhv_acc_128B
+      {Intrinsic::hexagon_V6_vmpyhvsrs, 47418}, // __builtin_HEXAGON_V6_vmpyhvsrs
+      {Intrinsic::hexagon_V6_vmpyhvsrs_128B, 47449}, // __builtin_HEXAGON_V6_vmpyhvsrs_128B
+      {Intrinsic::hexagon_V6_vmpyieoh, 47485}, // __builtin_HEXAGON_V6_vmpyieoh
+      {Intrinsic::hexagon_V6_vmpyieoh_128B, 47515}, // __builtin_HEXAGON_V6_vmpyieoh_128B
+      {Intrinsic::hexagon_V6_vmpyiewh_acc, 47550}, // __builtin_HEXAGON_V6_vmpyiewh_acc
+      {Intrinsic::hexagon_V6_vmpyiewh_acc_128B, 47584}, // __builtin_HEXAGON_V6_vmpyiewh_acc_128B
+      {Intrinsic::hexagon_V6_vmpyiewuh, 47623}, // __builtin_HEXAGON_V6_vmpyiewuh
+      {Intrinsic::hexagon_V6_vmpyiewuh_128B, 47654}, // __builtin_HEXAGON_V6_vmpyiewuh_128B
+      {Intrinsic::hexagon_V6_vmpyiewuh_acc, 47690}, // __builtin_HEXAGON_V6_vmpyiewuh_acc
+      {Intrinsic::hexagon_V6_vmpyiewuh_acc_128B, 47725}, // __builtin_HEXAGON_V6_vmpyiewuh_acc_128B
+      {Intrinsic::hexagon_V6_vmpyih, 47765}, // __builtin_HEXAGON_V6_vmpyih
+      {Intrinsic::hexagon_V6_vmpyih_128B, 47793}, // __builtin_HEXAGON_V6_vmpyih_128B
+      {Intrinsic::hexagon_V6_vmpyih_acc, 47826}, // __builtin_HEXAGON_V6_vmpyih_acc
+      {Intrinsic::hexagon_V6_vmpyih_acc_128B, 47858}, // __builtin_HEXAGON_V6_vmpyih_acc_128B
+      {Intrinsic::hexagon_V6_vmpyihb, 47895}, // __builtin_HEXAGON_V6_vmpyihb
+      {Intrinsic::hexagon_V6_vmpyihb_128B, 47924}, // __builtin_HEXAGON_V6_vmpyihb_128B
+      {Intrinsic::hexagon_V6_vmpyihb_acc, 47958}, // __builtin_HEXAGON_V6_vmpyihb_acc
+      {Intrinsic::hexagon_V6_vmpyihb_acc_128B, 47991}, // __builtin_HEXAGON_V6_vmpyihb_acc_128B
+      {Intrinsic::hexagon_V6_vmpyiowh, 48029}, // __builtin_HEXAGON_V6_vmpyiowh
+      {Intrinsic::hexagon_V6_vmpyiowh_128B, 48059}, // __builtin_HEXAGON_V6_vmpyiowh_128B
+      {Intrinsic::hexagon_V6_vmpyiwb, 48094}, // __builtin_HEXAGON_V6_vmpyiwb
+      {Intrinsic::hexagon_V6_vmpyiwb_128B, 48123}, // __builtin_HEXAGON_V6_vmpyiwb_128B
+      {Intrinsic::hexagon_V6_vmpyiwb_acc, 48157}, // __builtin_HEXAGON_V6_vmpyiwb_acc
+      {Intrinsic::hexagon_V6_vmpyiwb_acc_128B, 48190}, // __builtin_HEXAGON_V6_vmpyiwb_acc_128B
+      {Intrinsic::hexagon_V6_vmpyiwh, 48228}, // __builtin_HEXAGON_V6_vmpyiwh
+      {Intrinsic::hexagon_V6_vmpyiwh_128B, 48257}, // __builtin_HEXAGON_V6_vmpyiwh_128B
+      {Intrinsic::hexagon_V6_vmpyiwh_acc, 48291}, // __builtin_HEXAGON_V6_vmpyiwh_acc
+      {Intrinsic::hexagon_V6_vmpyiwh_acc_128B, 48324}, // __builtin_HEXAGON_V6_vmpyiwh_acc_128B
+      {Intrinsic::hexagon_V6_vmpyiwub, 48362}, // __builtin_HEXAGON_V6_vmpyiwub
+      {Intrinsic::hexagon_V6_vmpyiwub_128B, 48392}, // __builtin_HEXAGON_V6_vmpyiwub_128B
+      {Intrinsic::hexagon_V6_vmpyiwub_acc, 48427}, // __builtin_HEXAGON_V6_vmpyiwub_acc
+      {Intrinsic::hexagon_V6_vmpyiwub_acc_128B, 48461}, // __builtin_HEXAGON_V6_vmpyiwub_acc_128B
+      {Intrinsic::hexagon_V6_vmpyowh, 48500}, // __builtin_HEXAGON_V6_vmpyowh
+      {Intrinsic::hexagon_V6_vmpyowh_128B, 48529}, // __builtin_HEXAGON_V6_vmpyowh_128B
+      {Intrinsic::hexagon_V6_vmpyowh_64_acc, 48563}, // __builtin_HEXAGON_V6_vmpyowh_64_acc
+      {Intrinsic::hexagon_V6_vmpyowh_64_acc_128B, 48599}, // __builtin_HEXAGON_V6_vmpyowh_64_acc_128B
+      {Intrinsic::hexagon_V6_vmpyowh_rnd, 48640}, // __builtin_HEXAGON_V6_vmpyowh_rnd
+      {Intrinsic::hexagon_V6_vmpyowh_rnd_128B, 48673}, // __builtin_HEXAGON_V6_vmpyowh_rnd_128B
+      {Intrinsic::hexagon_V6_vmpyowh_rnd_sacc, 48711}, // __builtin_HEXAGON_V6_vmpyowh_rnd_sacc
+      {Intrinsic::hexagon_V6_vmpyowh_rnd_sacc_128B, 48749}, // __builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B
+      {Intrinsic::hexagon_V6_vmpyowh_sacc, 48792}, // __builtin_HEXAGON_V6_vmpyowh_sacc
+      {Intrinsic::hexagon_V6_vmpyowh_sacc_128B, 48826}, // __builtin_HEXAGON_V6_vmpyowh_sacc_128B
+      {Intrinsic::hexagon_V6_vmpyub, 48865}, // __builtin_HEXAGON_V6_vmpyub
+      {Intrinsic::hexagon_V6_vmpyub_128B, 48893}, // __builtin_HEXAGON_V6_vmpyub_128B
+      {Intrinsic::hexagon_V6_vmpyub_acc, 48926}, // __builtin_HEXAGON_V6_vmpyub_acc
+      {Intrinsic::hexagon_V6_vmpyub_acc_128B, 48958}, // __builtin_HEXAGON_V6_vmpyub_acc_128B
+      {Intrinsic::hexagon_V6_vmpyubv, 48995}, // __builtin_HEXAGON_V6_vmpyubv
+      {Intrinsic::hexagon_V6_vmpyubv_128B, 49024}, // __builtin_HEXAGON_V6_vmpyubv_128B
+      {Intrinsic::hexagon_V6_vmpyubv_acc, 49058}, // __builtin_HEXAGON_V6_vmpyubv_acc
+      {Intrinsic::hexagon_V6_vmpyubv_acc_128B, 49091}, // __builtin_HEXAGON_V6_vmpyubv_acc_128B
+      {Intrinsic::hexagon_V6_vmpyuh, 49129}, // __builtin_HEXAGON_V6_vmpyuh
+      {Intrinsic::hexagon_V6_vmpyuh_128B, 49157}, // __builtin_HEXAGON_V6_vmpyuh_128B
+      {Intrinsic::hexagon_V6_vmpyuh_acc, 49190}, // __builtin_HEXAGON_V6_vmpyuh_acc
+      {Intrinsic::hexagon_V6_vmpyuh_acc_128B, 49222}, // __builtin_HEXAGON_V6_vmpyuh_acc_128B
+      {Intrinsic::hexagon_V6_vmpyuhe, 49259}, // __builtin_HEXAGON_V6_vmpyuhe
+      {Intrinsic::hexagon_V6_vmpyuhe_128B, 49288}, // __builtin_HEXAGON_V6_vmpyuhe_128B
+      {Intrinsic::hexagon_V6_vmpyuhe_acc, 49322}, // __builtin_HEXAGON_V6_vmpyuhe_acc
+      {Intrinsic::hexagon_V6_vmpyuhe_acc_128B, 49355}, // __builtin_HEXAGON_V6_vmpyuhe_acc_128B
+      {Intrinsic::hexagon_V6_vmpyuhv, 49393}, // __builtin_HEXAGON_V6_vmpyuhv
+      {Intrinsic::hexagon_V6_vmpyuhv_128B, 49422}, // __builtin_HEXAGON_V6_vmpyuhv_128B
+      {Intrinsic::hexagon_V6_vmpyuhv_acc, 49456}, // __builtin_HEXAGON_V6_vmpyuhv_acc
+      {Intrinsic::hexagon_V6_vmpyuhv_acc_128B, 49489}, // __builtin_HEXAGON_V6_vmpyuhv_acc_128B
+      {Intrinsic::hexagon_V6_vmux, 49527}, // __builtin_HEXAGON_V6_vmux
+      {Intrinsic::hexagon_V6_vmux_128B, 49553}, // __builtin_HEXAGON_V6_vmux_128B
+      {Intrinsic::hexagon_V6_vnavgb, 49584}, // __builtin_HEXAGON_V6_vnavgb
+      {Intrinsic::hexagon_V6_vnavgb_128B, 49612}, // __builtin_HEXAGON_V6_vnavgb_128B
+      {Intrinsic::hexagon_V6_vnavgh, 49645}, // __builtin_HEXAGON_V6_vnavgh
+      {Intrinsic::hexagon_V6_vnavgh_128B, 49673}, // __builtin_HEXAGON_V6_vnavgh_128B
+      {Intrinsic::hexagon_V6_vnavgub, 49706}, // __builtin_HEXAGON_V6_vnavgub
+      {Intrinsic::hexagon_V6_vnavgub_128B, 49735}, // __builtin_HEXAGON_V6_vnavgub_128B
+      {Intrinsic::hexagon_V6_vnavgw, 49769}, // __builtin_HEXAGON_V6_vnavgw
+      {Intrinsic::hexagon_V6_vnavgw_128B, 49797}, // __builtin_HEXAGON_V6_vnavgw_128B
+      {Intrinsic::hexagon_V6_vnormamth, 49830}, // __builtin_HEXAGON_V6_vnormamth
+      {Intrinsic::hexagon_V6_vnormamth_128B, 49861}, // __builtin_HEXAGON_V6_vnormamth_128B
+      {Intrinsic::hexagon_V6_vnormamtw, 49897}, // __builtin_HEXAGON_V6_vnormamtw
+      {Intrinsic::hexagon_V6_vnormamtw_128B, 49928}, // __builtin_HEXAGON_V6_vnormamtw_128B
+      {Intrinsic::hexagon_V6_vnot, 49964}, // __builtin_HEXAGON_V6_vnot
+      {Intrinsic::hexagon_V6_vnot_128B, 49990}, // __builtin_HEXAGON_V6_vnot_128B
+      {Intrinsic::hexagon_V6_vor, 50021}, // __builtin_HEXAGON_V6_vor
+      {Intrinsic::hexagon_V6_vor_128B, 50046}, // __builtin_HEXAGON_V6_vor_128B
+      {Intrinsic::hexagon_V6_vpackeb, 50076}, // __builtin_HEXAGON_V6_vpackeb
+      {Intrinsic::hexagon_V6_vpackeb_128B, 50105}, // __builtin_HEXAGON_V6_vpackeb_128B
+      {Intrinsic::hexagon_V6_vpackeh, 50139}, // __builtin_HEXAGON_V6_vpackeh
+      {Intrinsic::hexagon_V6_vpackeh_128B, 50168}, // __builtin_HEXAGON_V6_vpackeh_128B
+      {Intrinsic::hexagon_V6_vpackhb_sat, 50202}, // __builtin_HEXAGON_V6_vpackhb_sat
+      {Intrinsic::hexagon_V6_vpackhb_sat_128B, 50235}, // __builtin_HEXAGON_V6_vpackhb_sat_128B
+      {Intrinsic::hexagon_V6_vpackhub_sat, 50273}, // __builtin_HEXAGON_V6_vpackhub_sat
+      {Intrinsic::hexagon_V6_vpackhub_sat_128B, 50307}, // __builtin_HEXAGON_V6_vpackhub_sat_128B
+      {Intrinsic::hexagon_V6_vpackob, 50346}, // __builtin_HEXAGON_V6_vpackob
+      {Intrinsic::hexagon_V6_vpackob_128B, 50375}, // __builtin_HEXAGON_V6_vpackob_128B
+      {Intrinsic::hexagon_V6_vpackoh, 50409}, // __builtin_HEXAGON_V6_vpackoh
+      {Intrinsic::hexagon_V6_vpackoh_128B, 50438}, // __builtin_HEXAGON_V6_vpackoh_128B
+      {Intrinsic::hexagon_V6_vpackwh_sat, 50472}, // __builtin_HEXAGON_V6_vpackwh_sat
+      {Intrinsic::hexagon_V6_vpackwh_sat_128B, 50505}, // __builtin_HEXAGON_V6_vpackwh_sat_128B
+      {Intrinsic::hexagon_V6_vpackwuh_sat, 50543}, // __builtin_HEXAGON_V6_vpackwuh_sat
+      {Intrinsic::hexagon_V6_vpackwuh_sat_128B, 50577}, // __builtin_HEXAGON_V6_vpackwuh_sat_128B
+      {Intrinsic::hexagon_V6_vpopcounth, 50616}, // __builtin_HEXAGON_V6_vpopcounth
+      {Intrinsic::hexagon_V6_vpopcounth_128B, 50648}, // __builtin_HEXAGON_V6_vpopcounth_128B
+      {Intrinsic::hexagon_V6_vprefixqb, 50685}, // __builtin_HEXAGON_V6_vprefixqb
+      {Intrinsic::hexagon_V6_vprefixqb_128B, 50716}, // __builtin_HEXAGON_V6_vprefixqb_128B
+      {Intrinsic::hexagon_V6_vprefixqh, 50752}, // __builtin_HEXAGON_V6_vprefixqh
+      {Intrinsic::hexagon_V6_vprefixqh_128B, 50783}, // __builtin_HEXAGON_V6_vprefixqh_128B
+      {Intrinsic::hexagon_V6_vprefixqw, 50819}, // __builtin_HEXAGON_V6_vprefixqw
+      {Intrinsic::hexagon_V6_vprefixqw_128B, 50850}, // __builtin_HEXAGON_V6_vprefixqw_128B
+      {Intrinsic::hexagon_V6_vrdelta, 50886}, // __builtin_HEXAGON_V6_vrdelta
+      {Intrinsic::hexagon_V6_vrdelta_128B, 50915}, // __builtin_HEXAGON_V6_vrdelta_128B
+      {Intrinsic::hexagon_V6_vrmpybub_rtt, 50949}, // __builtin_HEXAGON_V6_vrmpybub_rtt
+      {Intrinsic::hexagon_V6_vrmpybub_rtt_128B, 50983}, // __builtin_HEXAGON_V6_vrmpybub_rtt_128B
+      {Intrinsic::hexagon_V6_vrmpybub_rtt_acc, 51022}, // __builtin_HEXAGON_V6_vrmpybub_rtt_acc
+      {Intrinsic::hexagon_V6_vrmpybub_rtt_acc_128B, 51060}, // __builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B
+      {Intrinsic::hexagon_V6_vrmpybus, 51103}, // __builtin_HEXAGON_V6_vrmpybus
+      {Intrinsic::hexagon_V6_vrmpybus_128B, 51133}, // __builtin_HEXAGON_V6_vrmpybus_128B
+      {Intrinsic::hexagon_V6_vrmpybus_acc, 51168}, // __builtin_HEXAGON_V6_vrmpybus_acc
+      {Intrinsic::hexagon_V6_vrmpybus_acc_128B, 51202}, // __builtin_HEXAGON_V6_vrmpybus_acc_128B
+      {Intrinsic::hexagon_V6_vrmpybusi, 51241}, // __builtin_HEXAGON_V6_vrmpybusi
+      {Intrinsic::hexagon_V6_vrmpybusi_128B, 51272}, // __builtin_HEXAGON_V6_vrmpybusi_128B
+      {Intrinsic::hexagon_V6_vrmpybusi_acc, 51308}, // __builtin_HEXAGON_V6_vrmpybusi_acc
+      {Intrinsic::hexagon_V6_vrmpybusi_acc_128B, 51343}, // __builtin_HEXAGON_V6_vrmpybusi_acc_128B
+      {Intrinsic::hexagon_V6_vrmpybusv, 51383}, // __builtin_HEXAGON_V6_vrmpybusv
+      {Intrinsic::hexagon_V6_vrmpybusv_128B, 51414}, // __builtin_HEXAGON_V6_vrmpybusv_128B
+      {Intrinsic::hexagon_V6_vrmpybusv_acc, 51450}, // __builtin_HEXAGON_V6_vrmpybusv_acc
+      {Intrinsic::hexagon_V6_vrmpybusv_acc_128B, 51485}, // __builtin_HEXAGON_V6_vrmpybusv_acc_128B
+      {Intrinsic::hexagon_V6_vrmpybv, 51525}, // __builtin_HEXAGON_V6_vrmpybv
+      {Intrinsic::hexagon_V6_vrmpybv_128B, 51554}, // __builtin_HEXAGON_V6_vrmpybv_128B
+      {Intrinsic::hexagon_V6_vrmpybv_acc, 51588}, // __builtin_HEXAGON_V6_vrmpybv_acc
+      {Intrinsic::hexagon_V6_vrmpybv_acc_128B, 51621}, // __builtin_HEXAGON_V6_vrmpybv_acc_128B
+      {Intrinsic::hexagon_V6_vrmpyub, 51659}, // __builtin_HEXAGON_V6_vrmpyub
+      {Intrinsic::hexagon_V6_vrmpyub_128B, 51688}, // __builtin_HEXAGON_V6_vrmpyub_128B
+      {Intrinsic::hexagon_V6_vrmpyub_acc, 51722}, // __builtin_HEXAGON_V6_vrmpyub_acc
+      {Intrinsic::hexagon_V6_vrmpyub_acc_128B, 51755}, // __builtin_HEXAGON_V6_vrmpyub_acc_128B
+      {Intrinsic::hexagon_V6_vrmpyub_rtt, 51793}, // __builtin_HEXAGON_V6_vrmpyub_rtt
+      {Intrinsic::hexagon_V6_vrmpyub_rtt_128B, 51826}, // __builtin_HEXAGON_V6_vrmpyub_rtt_128B
+      {Intrinsic::hexagon_V6_vrmpyub_rtt_acc, 51864}, // __builtin_HEXAGON_V6_vrmpyub_rtt_acc
+      {Intrinsic::hexagon_V6_vrmpyub_rtt_acc_128B, 51901}, // __builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B
+      {Intrinsic::hexagon_V6_vrmpyubi, 51943}, // __builtin_HEXAGON_V6_vrmpyubi
+      {Intrinsic::hexagon_V6_vrmpyubi_128B, 51973}, // __builtin_HEXAGON_V6_vrmpyubi_128B
+      {Intrinsic::hexagon_V6_vrmpyubi_acc, 52008}, // __builtin_HEXAGON_V6_vrmpyubi_acc
+      {Intrinsic::hexagon_V6_vrmpyubi_acc_128B, 52042}, // __builtin_HEXAGON_V6_vrmpyubi_acc_128B
+      {Intrinsic::hexagon_V6_vrmpyubv, 52081}, // __builtin_HEXAGON_V6_vrmpyubv
+      {Intrinsic::hexagon_V6_vrmpyubv_128B, 52111}, // __builtin_HEXAGON_V6_vrmpyubv_128B
+      {Intrinsic::hexagon_V6_vrmpyubv_acc, 52146}, // __builtin_HEXAGON_V6_vrmpyubv_acc
+      {Intrinsic::hexagon_V6_vrmpyubv_acc_128B, 52180}, // __builtin_HEXAGON_V6_vrmpyubv_acc_128B
+      {Intrinsic::hexagon_V6_vror, 52219}, // __builtin_HEXAGON_V6_vror
+      {Intrinsic::hexagon_V6_vror_128B, 52245}, // __builtin_HEXAGON_V6_vror_128B
+      {Intrinsic::hexagon_V6_vroundhb, 52276}, // __builtin_HEXAGON_V6_vroundhb
+      {Intrinsic::hexagon_V6_vroundhb_128B, 52306}, // __builtin_HEXAGON_V6_vroundhb_128B
+      {Intrinsic::hexagon_V6_vroundhub, 52341}, // __builtin_HEXAGON_V6_vroundhub
+      {Intrinsic::hexagon_V6_vroundhub_128B, 52372}, // __builtin_HEXAGON_V6_vroundhub_128B
+      {Intrinsic::hexagon_V6_vrounduhub, 52408}, // __builtin_HEXAGON_V6_vrounduhub
+      {Intrinsic::hexagon_V6_vrounduhub_128B, 52440}, // __builtin_HEXAGON_V6_vrounduhub_128B
+      {Intrinsic::hexagon_V6_vrounduwuh, 52477}, // __builtin_HEXAGON_V6_vrounduwuh
+      {Intrinsic::hexagon_V6_vrounduwuh_128B, 52509}, // __builtin_HEXAGON_V6_vrounduwuh_128B
+      {Intrinsic::hexagon_V6_vroundwh, 52546}, // __builtin_HEXAGON_V6_vroundwh
+      {Intrinsic::hexagon_V6_vroundwh_128B, 52576}, // __builtin_HEXAGON_V6_vroundwh_128B
+      {Intrinsic::hexagon_V6_vroundwuh, 52611}, // __builtin_HEXAGON_V6_vroundwuh
+      {Intrinsic::hexagon_V6_vroundwuh_128B, 52642}, // __builtin_HEXAGON_V6_vroundwuh_128B
+      {Intrinsic::hexagon_V6_vrsadubi, 52678}, // __builtin_HEXAGON_V6_vrsadubi
+      {Intrinsic::hexagon_V6_vrsadubi_128B, 52708}, // __builtin_HEXAGON_V6_vrsadubi_128B
+      {Intrinsic::hexagon_V6_vrsadubi_acc, 52743}, // __builtin_HEXAGON_V6_vrsadubi_acc
+      {Intrinsic::hexagon_V6_vrsadubi_acc_128B, 52777}, // __builtin_HEXAGON_V6_vrsadubi_acc_128B
+      {Intrinsic::hexagon_V6_vsathub, 52816}, // __builtin_HEXAGON_V6_vsathub
+      {Intrinsic::hexagon_V6_vsathub_128B, 52845}, // __builtin_HEXAGON_V6_vsathub_128B
+      {Intrinsic::hexagon_V6_vsatuwuh, 52879}, // __builtin_HEXAGON_V6_vsatuwuh
+      {Intrinsic::hexagon_V6_vsatuwuh_128B, 52909}, // __builtin_HEXAGON_V6_vsatuwuh_128B
+      {Intrinsic::hexagon_V6_vsatwh, 52944}, // __builtin_HEXAGON_V6_vsatwh
+      {Intrinsic::hexagon_V6_vsatwh_128B, 52972}, // __builtin_HEXAGON_V6_vsatwh_128B
+      {Intrinsic::hexagon_V6_vsb, 53005}, // __builtin_HEXAGON_V6_vsb
+      {Intrinsic::hexagon_V6_vsb_128B, 53030}, // __builtin_HEXAGON_V6_vsb_128B
+      {Intrinsic::hexagon_V6_vscattermh, 53060}, // __builtin_HEXAGON_V6_vscattermh
+      {Intrinsic::hexagon_V6_vscattermh_128B, 53092}, // __builtin_HEXAGON_V6_vscattermh_128B
+      {Intrinsic::hexagon_V6_vscattermh_add, 53129}, // __builtin_HEXAGON_V6_vscattermh_add
+      {Intrinsic::hexagon_V6_vscattermh_add_128B, 53165}, // __builtin_HEXAGON_V6_vscattermh_add_128B
+      {Intrinsic::hexagon_V6_vscattermhq, 53206}, // __builtin_HEXAGON_V6_vscattermhq
+      {Intrinsic::hexagon_V6_vscattermhq_128B, 53239}, // __builtin_HEXAGON_V6_vscattermhq_128B
+      {Intrinsic::hexagon_V6_vscattermhw, 53277}, // __builtin_HEXAGON_V6_vscattermhw
+      {Intrinsic::hexagon_V6_vscattermhw_128B, 53310}, // __builtin_HEXAGON_V6_vscattermhw_128B
+      {Intrinsic::hexagon_V6_vscattermhw_add, 53348}, // __builtin_HEXAGON_V6_vscattermhw_add
+      {Intrinsic::hexagon_V6_vscattermhw_add_128B, 53385}, // __builtin_HEXAGON_V6_vscattermhw_add_128B
+      {Intrinsic::hexagon_V6_vscattermhwq, 53427}, // __builtin_HEXAGON_V6_vscattermhwq
+      {Intrinsic::hexagon_V6_vscattermhwq_128B, 53461}, // __builtin_HEXAGON_V6_vscattermhwq_128B
+      {Intrinsic::hexagon_V6_vscattermw, 53500}, // __builtin_HEXAGON_V6_vscattermw
+      {Intrinsic::hexagon_V6_vscattermw_128B, 53532}, // __builtin_HEXAGON_V6_vscattermw_128B
+      {Intrinsic::hexagon_V6_vscattermw_add, 53569}, // __builtin_HEXAGON_V6_vscattermw_add
+      {Intrinsic::hexagon_V6_vscattermw_add_128B, 53605}, // __builtin_HEXAGON_V6_vscattermw_add_128B
+      {Intrinsic::hexagon_V6_vscattermwq, 53646}, // __builtin_HEXAGON_V6_vscattermwq
+      {Intrinsic::hexagon_V6_vscattermwq_128B, 53679}, // __builtin_HEXAGON_V6_vscattermwq_128B
+      {Intrinsic::hexagon_V6_vsh, 53717}, // __builtin_HEXAGON_V6_vsh
+      {Intrinsic::hexagon_V6_vsh_128B, 53742}, // __builtin_HEXAGON_V6_vsh_128B
+      {Intrinsic::hexagon_V6_vshufeh, 53772}, // __builtin_HEXAGON_V6_vshufeh
+      {Intrinsic::hexagon_V6_vshufeh_128B, 53801}, // __builtin_HEXAGON_V6_vshufeh_128B
+      {Intrinsic::hexagon_V6_vshuffb, 53835}, // __builtin_HEXAGON_V6_vshuffb
+      {Intrinsic::hexagon_V6_vshuffb_128B, 53864}, // __builtin_HEXAGON_V6_vshuffb_128B
+      {Intrinsic::hexagon_V6_vshuffeb, 53898}, // __builtin_HEXAGON_V6_vshuffeb
+      {Intrinsic::hexagon_V6_vshuffeb_128B, 53928}, // __builtin_HEXAGON_V6_vshuffeb_128B
+      {Intrinsic::hexagon_V6_vshuffh, 53963}, // __builtin_HEXAGON_V6_vshuffh
+      {Intrinsic::hexagon_V6_vshuffh_128B, 53992}, // __builtin_HEXAGON_V6_vshuffh_128B
+      {Intrinsic::hexagon_V6_vshuffob, 54026}, // __builtin_HEXAGON_V6_vshuffob
+      {Intrinsic::hexagon_V6_vshuffob_128B, 54056}, // __builtin_HEXAGON_V6_vshuffob_128B
+      {Intrinsic::hexagon_V6_vshuffvdd, 54091}, // __builtin_HEXAGON_V6_vshuffvdd
+      {Intrinsic::hexagon_V6_vshuffvdd_128B, 54122}, // __builtin_HEXAGON_V6_vshuffvdd_128B
+      {Intrinsic::hexagon_V6_vshufoeb, 54158}, // __builtin_HEXAGON_V6_vshufoeb
+      {Intrinsic::hexagon_V6_vshufoeb_128B, 54188}, // __builtin_HEXAGON_V6_vshufoeb_128B
+      {Intrinsic::hexagon_V6_vshufoeh, 54223}, // __builtin_HEXAGON_V6_vshufoeh
+      {Intrinsic::hexagon_V6_vshufoeh_128B, 54253}, // __builtin_HEXAGON_V6_vshufoeh_128B
+      {Intrinsic::hexagon_V6_vshufoh, 54288}, // __builtin_HEXAGON_V6_vshufoh
+      {Intrinsic::hexagon_V6_vshufoh_128B, 54317}, // __builtin_HEXAGON_V6_vshufoh_128B
+      {Intrinsic::hexagon_V6_vsubb, 54351}, // __builtin_HEXAGON_V6_vsubb
+      {Intrinsic::hexagon_V6_vsubb_128B, 54378}, // __builtin_HEXAGON_V6_vsubb_128B
+      {Intrinsic::hexagon_V6_vsubb_dv, 54410}, // __builtin_HEXAGON_V6_vsubb_dv
+      {Intrinsic::hexagon_V6_vsubb_dv_128B, 54440}, // __builtin_HEXAGON_V6_vsubb_dv_128B
+      {Intrinsic::hexagon_V6_vsubbnq, 54475}, // __builtin_HEXAGON_V6_vsubbnq
+      {Intrinsic::hexagon_V6_vsubbnq_128B, 54504}, // __builtin_HEXAGON_V6_vsubbnq_128B
+      {Intrinsic::hexagon_V6_vsubbq, 54538}, // __builtin_HEXAGON_V6_vsubbq
+      {Intrinsic::hexagon_V6_vsubbq_128B, 54566}, // __builtin_HEXAGON_V6_vsubbq_128B
+      {Intrinsic::hexagon_V6_vsubbsat, 54599}, // __builtin_HEXAGON_V6_vsubbsat
+      {Intrinsic::hexagon_V6_vsubbsat_128B, 54629}, // __builtin_HEXAGON_V6_vsubbsat_128B
+      {Intrinsic::hexagon_V6_vsubbsat_dv, 54664}, // __builtin_HEXAGON_V6_vsubbsat_dv
+      {Intrinsic::hexagon_V6_vsubbsat_dv_128B, 54697}, // __builtin_HEXAGON_V6_vsubbsat_dv_128B
+      {Intrinsic::hexagon_V6_vsubh, 54802}, // __builtin_HEXAGON_V6_vsubh
+      {Intrinsic::hexagon_V6_vsubh_128B, 54829}, // __builtin_HEXAGON_V6_vsubh_128B
+      {Intrinsic::hexagon_V6_vsubh_dv, 54861}, // __builtin_HEXAGON_V6_vsubh_dv
+      {Intrinsic::hexagon_V6_vsubh_dv_128B, 54891}, // __builtin_HEXAGON_V6_vsubh_dv_128B
+      {Intrinsic::hexagon_V6_vsubhnq, 54926}, // __builtin_HEXAGON_V6_vsubhnq
+      {Intrinsic::hexagon_V6_vsubhnq_128B, 54955}, // __builtin_HEXAGON_V6_vsubhnq_128B
+      {Intrinsic::hexagon_V6_vsubhq, 54989}, // __builtin_HEXAGON_V6_vsubhq
+      {Intrinsic::hexagon_V6_vsubhq_128B, 55017}, // __builtin_HEXAGON_V6_vsubhq_128B
+      {Intrinsic::hexagon_V6_vsubhsat, 55050}, // __builtin_HEXAGON_V6_vsubhsat
+      {Intrinsic::hexagon_V6_vsubhsat_128B, 55080}, // __builtin_HEXAGON_V6_vsubhsat_128B
+      {Intrinsic::hexagon_V6_vsubhsat_dv, 55115}, // __builtin_HEXAGON_V6_vsubhsat_dv
+      {Intrinsic::hexagon_V6_vsubhsat_dv_128B, 55148}, // __builtin_HEXAGON_V6_vsubhsat_dv_128B
+      {Intrinsic::hexagon_V6_vsubhw, 55186}, // __builtin_HEXAGON_V6_vsubhw
+      {Intrinsic::hexagon_V6_vsubhw_128B, 55214}, // __builtin_HEXAGON_V6_vsubhw_128B
+      {Intrinsic::hexagon_V6_vsububh, 55247}, // __builtin_HEXAGON_V6_vsububh
+      {Intrinsic::hexagon_V6_vsububh_128B, 55276}, // __builtin_HEXAGON_V6_vsububh_128B
+      {Intrinsic::hexagon_V6_vsububsat, 55310}, // __builtin_HEXAGON_V6_vsububsat
+      {Intrinsic::hexagon_V6_vsububsat_128B, 55341}, // __builtin_HEXAGON_V6_vsububsat_128B
+      {Intrinsic::hexagon_V6_vsububsat_dv, 55377}, // __builtin_HEXAGON_V6_vsububsat_dv
+      {Intrinsic::hexagon_V6_vsububsat_dv_128B, 55411}, // __builtin_HEXAGON_V6_vsububsat_dv_128B
+      {Intrinsic::hexagon_V6_vsubububb_sat, 55450}, // __builtin_HEXAGON_V6_vsubububb_sat
+      {Intrinsic::hexagon_V6_vsubububb_sat_128B, 55485}, // __builtin_HEXAGON_V6_vsubububb_sat_128B
+      {Intrinsic::hexagon_V6_vsubuhsat, 55525}, // __builtin_HEXAGON_V6_vsubuhsat
+      {Intrinsic::hexagon_V6_vsubuhsat_128B, 55556}, // __builtin_HEXAGON_V6_vsubuhsat_128B
+      {Intrinsic::hexagon_V6_vsubuhsat_dv, 55592}, // __builtin_HEXAGON_V6_vsubuhsat_dv
+      {Intrinsic::hexagon_V6_vsubuhsat_dv_128B, 55626}, // __builtin_HEXAGON_V6_vsubuhsat_dv_128B
+      {Intrinsic::hexagon_V6_vsubuhw, 55665}, // __builtin_HEXAGON_V6_vsubuhw
+      {Intrinsic::hexagon_V6_vsubuhw_128B, 55694}, // __builtin_HEXAGON_V6_vsubuhw_128B
+      {Intrinsic::hexagon_V6_vsubuwsat, 55728}, // __builtin_HEXAGON_V6_vsubuwsat
+      {Intrinsic::hexagon_V6_vsubuwsat_128B, 55759}, // __builtin_HEXAGON_V6_vsubuwsat_128B
+      {Intrinsic::hexagon_V6_vsubuwsat_dv, 55795}, // __builtin_HEXAGON_V6_vsubuwsat_dv
+      {Intrinsic::hexagon_V6_vsubuwsat_dv_128B, 55829}, // __builtin_HEXAGON_V6_vsubuwsat_dv_128B
+      {Intrinsic::hexagon_V6_vsubw, 55868}, // __builtin_HEXAGON_V6_vsubw
+      {Intrinsic::hexagon_V6_vsubw_128B, 55895}, // __builtin_HEXAGON_V6_vsubw_128B
+      {Intrinsic::hexagon_V6_vsubw_dv, 55927}, // __builtin_HEXAGON_V6_vsubw_dv
+      {Intrinsic::hexagon_V6_vsubw_dv_128B, 55957}, // __builtin_HEXAGON_V6_vsubw_dv_128B
+      {Intrinsic::hexagon_V6_vsubwnq, 55992}, // __builtin_HEXAGON_V6_vsubwnq
+      {Intrinsic::hexagon_V6_vsubwnq_128B, 56021}, // __builtin_HEXAGON_V6_vsubwnq_128B
+      {Intrinsic::hexagon_V6_vsubwq, 56055}, // __builtin_HEXAGON_V6_vsubwq
+      {Intrinsic::hexagon_V6_vsubwq_128B, 56083}, // __builtin_HEXAGON_V6_vsubwq_128B
+      {Intrinsic::hexagon_V6_vsubwsat, 56116}, // __builtin_HEXAGON_V6_vsubwsat
+      {Intrinsic::hexagon_V6_vsubwsat_128B, 56146}, // __builtin_HEXAGON_V6_vsubwsat_128B
+      {Intrinsic::hexagon_V6_vsubwsat_dv, 56181}, // __builtin_HEXAGON_V6_vsubwsat_dv
+      {Intrinsic::hexagon_V6_vsubwsat_dv_128B, 56214}, // __builtin_HEXAGON_V6_vsubwsat_dv_128B
+      {Intrinsic::hexagon_V6_vswap, 56252}, // __builtin_HEXAGON_V6_vswap
+      {Intrinsic::hexagon_V6_vswap_128B, 56279}, // __builtin_HEXAGON_V6_vswap_128B
+      {Intrinsic::hexagon_V6_vtmpyb, 56311}, // __builtin_HEXAGON_V6_vtmpyb
+      {Intrinsic::hexagon_V6_vtmpyb_128B, 56339}, // __builtin_HEXAGON_V6_vtmpyb_128B
+      {Intrinsic::hexagon_V6_vtmpyb_acc, 56372}, // __builtin_HEXAGON_V6_vtmpyb_acc
+      {Intrinsic::hexagon_V6_vtmpyb_acc_128B, 56404}, // __builtin_HEXAGON_V6_vtmpyb_acc_128B
+      {Intrinsic::hexagon_V6_vtmpybus, 56441}, // __builtin_HEXAGON_V6_vtmpybus
+      {Intrinsic::hexagon_V6_vtmpybus_128B, 56471}, // __builtin_HEXAGON_V6_vtmpybus_128B
+      {Intrinsic::hexagon_V6_vtmpybus_acc, 56506}, // __builtin_HEXAGON_V6_vtmpybus_acc
+      {Intrinsic::hexagon_V6_vtmpybus_acc_128B, 56540}, // __builtin_HEXAGON_V6_vtmpybus_acc_128B
+      {Intrinsic::hexagon_V6_vtmpyhb, 56579}, // __builtin_HEXAGON_V6_vtmpyhb
+      {Intrinsic::hexagon_V6_vtmpyhb_128B, 56608}, // __builtin_HEXAGON_V6_vtmpyhb_128B
+      {Intrinsic::hexagon_V6_vtmpyhb_acc, 56642}, // __builtin_HEXAGON_V6_vtmpyhb_acc
+      {Intrinsic::hexagon_V6_vtmpyhb_acc_128B, 56675}, // __builtin_HEXAGON_V6_vtmpyhb_acc_128B
+      {Intrinsic::hexagon_V6_vunpackb, 56713}, // __builtin_HEXAGON_V6_vunpackb
+      {Intrinsic::hexagon_V6_vunpackb_128B, 56743}, // __builtin_HEXAGON_V6_vunpackb_128B
+      {Intrinsic::hexagon_V6_vunpackh, 56778}, // __builtin_HEXAGON_V6_vunpackh
+      {Intrinsic::hexagon_V6_vunpackh_128B, 56808}, // __builtin_HEXAGON_V6_vunpackh_128B
+      {Intrinsic::hexagon_V6_vunpackob, 56843}, // __builtin_HEXAGON_V6_vunpackob
+      {Intrinsic::hexagon_V6_vunpackob_128B, 56874}, // __builtin_HEXAGON_V6_vunpackob_128B
+      {Intrinsic::hexagon_V6_vunpackoh, 56910}, // __builtin_HEXAGON_V6_vunpackoh
+      {Intrinsic::hexagon_V6_vunpackoh_128B, 56941}, // __builtin_HEXAGON_V6_vunpackoh_128B
+      {Intrinsic::hexagon_V6_vunpackub, 56977}, // __builtin_HEXAGON_V6_vunpackub
+      {Intrinsic::hexagon_V6_vunpackub_128B, 57008}, // __builtin_HEXAGON_V6_vunpackub_128B
+      {Intrinsic::hexagon_V6_vunpackuh, 57044}, // __builtin_HEXAGON_V6_vunpackuh
+      {Intrinsic::hexagon_V6_vunpackuh_128B, 57075}, // __builtin_HEXAGON_V6_vunpackuh_128B
+      {Intrinsic::hexagon_V6_vxor, 57111}, // __builtin_HEXAGON_V6_vxor
+      {Intrinsic::hexagon_V6_vxor_128B, 57137}, // __builtin_HEXAGON_V6_vxor_128B
+      {Intrinsic::hexagon_V6_vzb, 57168}, // __builtin_HEXAGON_V6_vzb
+      {Intrinsic::hexagon_V6_vzb_128B, 57193}, // __builtin_HEXAGON_V6_vzb_128B
+      {Intrinsic::hexagon_V6_vzh, 57223}, // __builtin_HEXAGON_V6_vzh
+      {Intrinsic::hexagon_V6_vzh_128B, 57248}, // __builtin_HEXAGON_V6_vzh_128B
+      {Intrinsic::hexagon_Y2_dccleana, 57278}, // __builtin_HEXAGON_Y2_dccleana
+      {Intrinsic::hexagon_Y2_dccleaninva, 57308}, // __builtin_HEXAGON_Y2_dccleaninva
+      {Intrinsic::hexagon_Y2_dcinva, 57341}, // __builtin_HEXAGON_Y2_dcinva
+      {Intrinsic::hexagon_Y2_dczeroa, 57369}, // __builtin_HEXAGON_Y2_dczeroa
+      {Intrinsic::hexagon_Y4_l2fetch, 57398}, // __builtin_HEXAGON_Y4_l2fetch
+      {Intrinsic::hexagon_Y5_l2fetch, 57427}, // __builtin_HEXAGON_Y5_l2fetch
+      {Intrinsic::hexagon_prefetch, 57693}, // __builtin_HEXAGON_prefetch
+      {Intrinsic::hexagon_V6_vaddcarry, 33385}, // __builtin_HEXAGON_v6_vaddcarry
+      {Intrinsic::hexagon_V6_vaddcarry_128B, 33416}, // __builtin_HEXAGON_v6_vaddcarry_128B
+      {Intrinsic::hexagon_V6_vsubcarry, 54735}, // __builtin_HEXAGON_v6_vsubcarry
+      {Intrinsic::hexagon_V6_vsubcarry_128B, 54766}, // __builtin_HEXAGON_v6_vsubcarry_128B
+      {Intrinsic::hexagon_mm256i_vaddw, 57669}, // __builtin__mm256i_vaddw
+      {Intrinsic::hexagon_S2_storerb_pbr, 28017}, // __builtin_brev_stb
+      {Intrinsic::hexagon_S2_storerd_pbr, 28036}, // __builtin_brev_std
+      {Intrinsic::hexagon_S2_storerh_pbr, 28076}, // __builtin_brev_sth
+      {Intrinsic::hexagon_S2_storerf_pbr, 28055}, // __builtin_brev_sthhi
+      {Intrinsic::hexagon_S2_storeri_pbr, 28095}, // __builtin_brev_stw
+      {Intrinsic::hexagon_circ_ldb, 57456}, // __builtin_circ_ldb
+      {Intrinsic::hexagon_circ_ldd, 57475}, // __builtin_circ_ldd
+      {Intrinsic::hexagon_circ_ldh, 57494}, // __builtin_circ_ldh
+      {Intrinsic::hexagon_circ_ldub, 57513}, // __builtin_circ_ldub
+      {Intrinsic::hexagon_circ_lduh, 57533}, // __builtin_circ_lduh
+      {Intrinsic::hexagon_circ_ldw, 57553}, // __builtin_circ_ldw
+      {Intrinsic::hexagon_circ_stb, 57572}, // __builtin_circ_stb
+      {Intrinsic::hexagon_circ_std, 57591}, // __builtin_circ_std
+      {Intrinsic::hexagon_circ_sth, 57610}, // __builtin_circ_sth
+      {Intrinsic::hexagon_circ_sthhi, 57629}, // __builtin_circ_sthhi
+      {Intrinsic::hexagon_circ_stw, 57650}, // __builtin_circ_stw
+    };
+    auto I = std::lower_bound(std::begin(hexagonNames),
+                              std::end(hexagonNames),
+                              BuiltinNameStr);
+    if (I != std::end(hexagonNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "mips") {
+    static const BuiltinEntry mipsNames[] = {
+      {Intrinsic::mips_absq_s_ph, 57720}, // __builtin_mips_absq_s_ph
+      {Intrinsic::mips_absq_s_qb, 57745}, // __builtin_mips_absq_s_qb
+      {Intrinsic::mips_absq_s_w, 57770}, // __builtin_mips_absq_s_w
+      {Intrinsic::mips_addq_ph, 57882}, // __builtin_mips_addq_ph
+      {Intrinsic::mips_addq_s_ph, 57905}, // __builtin_mips_addq_s_ph
+      {Intrinsic::mips_addq_s_w, 57930}, // __builtin_mips_addq_s_w
+      {Intrinsic::mips_addqh_ph, 57954}, // __builtin_mips_addqh_ph
+      {Intrinsic::mips_addqh_r_ph, 57978}, // __builtin_mips_addqh_r_ph
+      {Intrinsic::mips_addqh_r_w, 58004}, // __builtin_mips_addqh_r_w
+      {Intrinsic::mips_addqh_w, 58029}, // __builtin_mips_addqh_w
+      {Intrinsic::mips_addsc, 58328}, // __builtin_mips_addsc
+      {Intrinsic::mips_addu_ph, 58349}, // __builtin_mips_addu_ph
+      {Intrinsic::mips_addu_qb, 58372}, // __builtin_mips_addu_qb
+      {Intrinsic::mips_addu_s_ph, 58395}, // __builtin_mips_addu_s_ph
+      {Intrinsic::mips_addu_s_qb, 58420}, // __builtin_mips_addu_s_qb
+      {Intrinsic::mips_adduh_qb, 58445}, // __builtin_mips_adduh_qb
+      {Intrinsic::mips_adduh_r_qb, 58469}, // __builtin_mips_adduh_r_qb
+      {Intrinsic::mips_addwc, 58667}, // __builtin_mips_addwc
+      {Intrinsic::mips_append, 58729}, // __builtin_mips_append
+      {Intrinsic::mips_balign, 59295}, // __builtin_mips_balign
+      {Intrinsic::mips_bitrev, 59849}, // __builtin_mips_bitrev
+      {Intrinsic::mips_bposge32, 60227}, // __builtin_mips_bposge32
+      {Intrinsic::mips_cmp_eq_ph, 61466}, // __builtin_mips_cmp_eq_ph
+      {Intrinsic::mips_cmp_le_ph, 61491}, // __builtin_mips_cmp_le_ph
+      {Intrinsic::mips_cmp_lt_ph, 61516}, // __builtin_mips_cmp_lt_ph
+      {Intrinsic::mips_cmpgdu_eq_qb, 61541}, // __builtin_mips_cmpgdu_eq_qb
+      {Intrinsic::mips_cmpgdu_le_qb, 61569}, // __builtin_mips_cmpgdu_le_qb
+      {Intrinsic::mips_cmpgdu_lt_qb, 61597}, // __builtin_mips_cmpgdu_lt_qb
+      {Intrinsic::mips_cmpgu_eq_qb, 61625}, // __builtin_mips_cmpgu_eq_qb
+      {Intrinsic::mips_cmpgu_le_qb, 61652}, // __builtin_mips_cmpgu_le_qb
+      {Intrinsic::mips_cmpgu_lt_qb, 61679}, // __builtin_mips_cmpgu_lt_qb
+      {Intrinsic::mips_cmpu_eq_qb, 61706}, // __builtin_mips_cmpu_eq_qb
+      {Intrinsic::mips_cmpu_le_qb, 61732}, // __builtin_mips_cmpu_le_qb
+      {Intrinsic::mips_cmpu_lt_qb, 61758}, // __builtin_mips_cmpu_lt_qb
+      {Intrinsic::mips_dlsa, 62165}, // __builtin_mips_dlsa
+      {Intrinsic::mips_dpa_w_ph, 62323}, // __builtin_mips_dpa_w_ph
+      {Intrinsic::mips_dpaq_s_w_ph, 62491}, // __builtin_mips_dpaq_s_w_ph
+      {Intrinsic::mips_dpaq_sa_l_w, 62518}, // __builtin_mips_dpaq_sa_l_w
+      {Intrinsic::mips_dpaqx_s_w_ph, 62545}, // __builtin_mips_dpaqx_s_w_ph
+      {Intrinsic::mips_dpaqx_sa_w_ph, 62573}, // __builtin_mips_dpaqx_sa_w_ph
+      {Intrinsic::mips_dpau_h_qbl, 62602}, // __builtin_mips_dpau_h_qbl
+      {Intrinsic::mips_dpau_h_qbr, 62628}, // __builtin_mips_dpau_h_qbr
+      {Intrinsic::mips_dpax_w_ph, 62654}, // __builtin_mips_dpax_w_ph
+      {Intrinsic::mips_dps_w_ph, 62679}, // __builtin_mips_dps_w_ph
+      {Intrinsic::mips_dpsq_s_w_ph, 62703}, // __builtin_mips_dpsq_s_w_ph
+      {Intrinsic::mips_dpsq_sa_l_w, 62730}, // __builtin_mips_dpsq_sa_l_w
+      {Intrinsic::mips_dpsqx_s_w_ph, 62757}, // __builtin_mips_dpsqx_s_w_ph
+      {Intrinsic::mips_dpsqx_sa_w_ph, 62785}, // __builtin_mips_dpsqx_sa_w_ph
+      {Intrinsic::mips_dpsu_h_qbl, 62814}, // __builtin_mips_dpsu_h_qbl
+      {Intrinsic::mips_dpsu_h_qbr, 62840}, // __builtin_mips_dpsu_h_qbr
+      {Intrinsic::mips_dpsx_w_ph, 63010}, // __builtin_mips_dpsx_w_ph
+      {Intrinsic::mips_extp, 63035}, // __builtin_mips_extp
+      {Intrinsic::mips_extpdp, 63055}, // __builtin_mips_extpdp
+      {Intrinsic::mips_extr_r_w, 63077}, // __builtin_mips_extr_r_w
+      {Intrinsic::mips_extr_rs_w, 63101}, // __builtin_mips_extr_rs_w
+      {Intrinsic::mips_extr_s_h, 63126}, // __builtin_mips_extr_s_h
+      {Intrinsic::mips_extr_w, 63150}, // __builtin_mips_extr_w
+      {Intrinsic::mips_insv, 66202}, // __builtin_mips_insv
+      {Intrinsic::mips_lbux, 66310}, // __builtin_mips_lbux
+      {Intrinsic::mips_lhx, 66486}, // __builtin_mips_lhx
+      {Intrinsic::mips_lsa, 66505}, // __builtin_mips_lsa
+      {Intrinsic::mips_lwx, 66524}, // __builtin_mips_lwx
+      {Intrinsic::mips_madd, 66543}, // __builtin_mips_madd
+      {Intrinsic::mips_maddu, 66657}, // __builtin_mips_maddu
+      {Intrinsic::mips_maq_s_w_phl, 66766}, // __builtin_mips_maq_s_w_phl
+      {Intrinsic::mips_maq_s_w_phr, 66793}, // __builtin_mips_maq_s_w_phr
+      {Intrinsic::mips_maq_sa_w_phl, 66820}, // __builtin_mips_maq_sa_w_phl
+      {Intrinsic::mips_maq_sa_w_phr, 66848}, // __builtin_mips_maq_sa_w_phr
+      {Intrinsic::mips_modsub, 67948}, // __builtin_mips_modsub
+      {Intrinsic::mips_msub, 67991}, // __builtin_mips_msub
+      {Intrinsic::mips_msubu, 68105}, // __builtin_mips_msubu
+      {Intrinsic::mips_mthlip, 68214}, // __builtin_mips_mthlip
+      {Intrinsic::mips_mul_ph, 68236}, // __builtin_mips_mul_ph
+      {Intrinsic::mips_mul_s_ph, 68302}, // __builtin_mips_mul_s_ph
+      {Intrinsic::mips_muleq_s_w_phl, 68326}, // __builtin_mips_muleq_s_w_phl
+      {Intrinsic::mips_muleq_s_w_phr, 68355}, // __builtin_mips_muleq_s_w_phr
+      {Intrinsic::mips_muleu_s_ph_qbl, 68384}, // __builtin_mips_muleu_s_ph_qbl
+      {Intrinsic::mips_muleu_s_ph_qbr, 68414}, // __builtin_mips_muleu_s_ph_qbr
+      {Intrinsic::mips_mulq_rs_ph, 68444}, // __builtin_mips_mulq_rs_ph
+      {Intrinsic::mips_mulq_rs_w, 68470}, // __builtin_mips_mulq_rs_w
+      {Intrinsic::mips_mulq_s_ph, 68495}, // __builtin_mips_mulq_s_ph
+      {Intrinsic::mips_mulq_s_w, 68520}, // __builtin_mips_mulq_s_w
+      {Intrinsic::mips_mulsa_w_ph, 68590}, // __builtin_mips_mulsa_w_ph
+      {Intrinsic::mips_mulsaq_s_w_ph, 68616}, // __builtin_mips_mulsaq_s_w_ph
+      {Intrinsic::mips_mult, 68645}, // __builtin_mips_mult
+      {Intrinsic::mips_multu, 68665}, // __builtin_mips_multu
+      {Intrinsic::mips_packrl_ph, 69018}, // __builtin_mips_packrl_ph
+      {Intrinsic::mips_pick_ph, 69303}, // __builtin_mips_pick_ph
+      {Intrinsic::mips_pick_qb, 69326}, // __builtin_mips_pick_qb
+      {Intrinsic::mips_preceq_w_phl, 69349}, // __builtin_mips_preceq_w_phl
+      {Intrinsic::mips_preceq_w_phr, 69377}, // __builtin_mips_preceq_w_phr
+      {Intrinsic::mips_precequ_ph_qbl, 69405}, // __builtin_mips_precequ_ph_qbl
+      {Intrinsic::mips_precequ_ph_qbla, 69435}, // __builtin_mips_precequ_ph_qbla
+      {Intrinsic::mips_precequ_ph_qbr, 69466}, // __builtin_mips_precequ_ph_qbr
+      {Intrinsic::mips_precequ_ph_qbra, 69496}, // __builtin_mips_precequ_ph_qbra
+      {Intrinsic::mips_preceu_ph_qbl, 69527}, // __builtin_mips_preceu_ph_qbl
+      {Intrinsic::mips_preceu_ph_qbla, 69556}, // __builtin_mips_preceu_ph_qbla
+      {Intrinsic::mips_preceu_ph_qbr, 69586}, // __builtin_mips_preceu_ph_qbr
+      {Intrinsic::mips_preceu_ph_qbra, 69615}, // __builtin_mips_preceu_ph_qbra
+      {Intrinsic::mips_precr_qb_ph, 69645}, // __builtin_mips_precr_qb_ph
+      {Intrinsic::mips_precr_sra_ph_w, 69672}, // __builtin_mips_precr_sra_ph_w
+      {Intrinsic::mips_precr_sra_r_ph_w, 69702}, // __builtin_mips_precr_sra_r_ph_w
+      {Intrinsic::mips_precrq_ph_w, 69734}, // __builtin_mips_precrq_ph_w
+      {Intrinsic::mips_precrq_qb_ph, 69761}, // __builtin_mips_precrq_qb_ph
+      {Intrinsic::mips_precrq_rs_ph_w, 69789}, // __builtin_mips_precrq_rs_ph_w
+      {Intrinsic::mips_precrqu_s_qb_ph, 69819}, // __builtin_mips_precrqu_s_qb_ph
+      {Intrinsic::mips_prepend, 69850}, // __builtin_mips_prepend
+      {Intrinsic::mips_raddu_w_qb, 69873}, // __builtin_mips_raddu_w_qb
+      {Intrinsic::mips_rddsp, 69899}, // __builtin_mips_rddsp
+      {Intrinsic::mips_repl_ph, 69920}, // __builtin_mips_repl_ph
+      {Intrinsic::mips_repl_qb, 69943}, // __builtin_mips_repl_qb
+      {Intrinsic::mips_shilo, 70202}, // __builtin_mips_shilo
+      {Intrinsic::mips_shll_ph, 70223}, // __builtin_mips_shll_ph
+      {Intrinsic::mips_shll_qb, 70246}, // __builtin_mips_shll_qb
+      {Intrinsic::mips_shll_s_ph, 70269}, // __builtin_mips_shll_s_ph
+      {Intrinsic::mips_shll_s_w, 70294}, // __builtin_mips_shll_s_w
+      {Intrinsic::mips_shra_ph, 70318}, // __builtin_mips_shra_ph
+      {Intrinsic::mips_shra_qb, 70341}, // __builtin_mips_shra_qb
+      {Intrinsic::mips_shra_r_ph, 70364}, // __builtin_mips_shra_r_ph
+      {Intrinsic::mips_shra_r_qb, 70389}, // __builtin_mips_shra_r_qb
+      {Intrinsic::mips_shra_r_w, 70414}, // __builtin_mips_shra_r_w
+      {Intrinsic::mips_shrl_ph, 70438}, // __builtin_mips_shrl_ph
+      {Intrinsic::mips_shrl_qb, 70461}, // __builtin_mips_shrl_qb
+      {Intrinsic::mips_subq_ph, 71740}, // __builtin_mips_subq_ph
+      {Intrinsic::mips_subq_s_ph, 71763}, // __builtin_mips_subq_s_ph
+      {Intrinsic::mips_subq_s_w, 71788}, // __builtin_mips_subq_s_w
+      {Intrinsic::mips_subqh_ph, 71812}, // __builtin_mips_subqh_ph
+      {Intrinsic::mips_subqh_r_ph, 71836}, // __builtin_mips_subqh_r_ph
+      {Intrinsic::mips_subqh_r_w, 71862}, // __builtin_mips_subqh_r_w
+      {Intrinsic::mips_subqh_w, 71887}, // __builtin_mips_subqh_w
+      {Intrinsic::mips_subu_ph, 72294}, // __builtin_mips_subu_ph
+      {Intrinsic::mips_subu_qb, 72317}, // __builtin_mips_subu_qb
+      {Intrinsic::mips_subu_s_ph, 72340}, // __builtin_mips_subu_s_ph
+      {Intrinsic::mips_subu_s_qb, 72365}, // __builtin_mips_subu_s_qb
+      {Intrinsic::mips_subuh_qb, 72390}, // __builtin_mips_subuh_qb
+      {Intrinsic::mips_subuh_r_qb, 72414}, // __builtin_mips_subuh_r_qb
+      {Intrinsic::mips_wrdsp, 72696}, // __builtin_mips_wrdsp
+      {Intrinsic::mips_add_a_b, 57794}, // __builtin_msa_add_a_b
+      {Intrinsic::mips_add_a_d, 57816}, // __builtin_msa_add_a_d
+      {Intrinsic::mips_add_a_h, 57838}, // __builtin_msa_add_a_h
+      {Intrinsic::mips_add_a_w, 57860}, // __builtin_msa_add_a_w
+      {Intrinsic::mips_adds_a_b, 58052}, // __builtin_msa_adds_a_b
+      {Intrinsic::mips_adds_a_d, 58075}, // __builtin_msa_adds_a_d
+      {Intrinsic::mips_adds_a_h, 58098}, // __builtin_msa_adds_a_h
+      {Intrinsic::mips_adds_a_w, 58121}, // __builtin_msa_adds_a_w
+      {Intrinsic::mips_adds_s_b, 58144}, // __builtin_msa_adds_s_b
+      {Intrinsic::mips_adds_s_d, 58167}, // __builtin_msa_adds_s_d
+      {Intrinsic::mips_adds_s_h, 58190}, // __builtin_msa_adds_s_h
+      {Intrinsic::mips_adds_s_w, 58213}, // __builtin_msa_adds_s_w
+      {Intrinsic::mips_adds_u_b, 58236}, // __builtin_msa_adds_u_b
+      {Intrinsic::mips_adds_u_d, 58259}, // __builtin_msa_adds_u_d
+      {Intrinsic::mips_adds_u_h, 58282}, // __builtin_msa_adds_u_h
+      {Intrinsic::mips_adds_u_w, 58305}, // __builtin_msa_adds_u_w
+      {Intrinsic::mips_addv_b, 58495}, // __builtin_msa_addv_b
+      {Intrinsic::mips_addv_d, 58516}, // __builtin_msa_addv_d
+      {Intrinsic::mips_addv_h, 58537}, // __builtin_msa_addv_h
+      {Intrinsic::mips_addv_w, 58558}, // __builtin_msa_addv_w
+      {Intrinsic::mips_addvi_b, 58579}, // __builtin_msa_addvi_b
+      {Intrinsic::mips_addvi_d, 58601}, // __builtin_msa_addvi_d
+      {Intrinsic::mips_addvi_h, 58623}, // __builtin_msa_addvi_h
+      {Intrinsic::mips_addvi_w, 58645}, // __builtin_msa_addvi_w
+      {Intrinsic::mips_and_v, 58688}, // __builtin_msa_and_v
+      {Intrinsic::mips_andi_b, 58708}, // __builtin_msa_andi_b
+      {Intrinsic::mips_asub_s_b, 58751}, // __builtin_msa_asub_s_b
+      {Intrinsic::mips_asub_s_d, 58774}, // __builtin_msa_asub_s_d
+      {Intrinsic::mips_asub_s_h, 58797}, // __builtin_msa_asub_s_h
+      {Intrinsic::mips_asub_s_w, 58820}, // __builtin_msa_asub_s_w
+      {Intrinsic::mips_asub_u_b, 58843}, // __builtin_msa_asub_u_b
+      {Intrinsic::mips_asub_u_d, 58866}, // __builtin_msa_asub_u_d
+      {Intrinsic::mips_asub_u_h, 58889}, // __builtin_msa_asub_u_h
+      {Intrinsic::mips_asub_u_w, 58912}, // __builtin_msa_asub_u_w
+      {Intrinsic::mips_ave_s_b, 58935}, // __builtin_msa_ave_s_b
+      {Intrinsic::mips_ave_s_d, 58957}, // __builtin_msa_ave_s_d
+      {Intrinsic::mips_ave_s_h, 58979}, // __builtin_msa_ave_s_h
+      {Intrinsic::mips_ave_s_w, 59001}, // __builtin_msa_ave_s_w
+      {Intrinsic::mips_ave_u_b, 59023}, // __builtin_msa_ave_u_b
+      {Intrinsic::mips_ave_u_d, 59045}, // __builtin_msa_ave_u_d
+      {Intrinsic::mips_ave_u_h, 59067}, // __builtin_msa_ave_u_h
+      {Intrinsic::mips_ave_u_w, 59089}, // __builtin_msa_ave_u_w
+      {Intrinsic::mips_aver_s_b, 59111}, // __builtin_msa_aver_s_b
+      {Intrinsic::mips_aver_s_d, 59134}, // __builtin_msa_aver_s_d
+      {Intrinsic::mips_aver_s_h, 59157}, // __builtin_msa_aver_s_h
+      {Intrinsic::mips_aver_s_w, 59180}, // __builtin_msa_aver_s_w
+      {Intrinsic::mips_aver_u_b, 59203}, // __builtin_msa_aver_u_b
+      {Intrinsic::mips_aver_u_d, 59226}, // __builtin_msa_aver_u_d
+      {Intrinsic::mips_aver_u_h, 59249}, // __builtin_msa_aver_u_h
+      {Intrinsic::mips_aver_u_w, 59272}, // __builtin_msa_aver_u_w
+      {Intrinsic::mips_bclr_b, 59317}, // __builtin_msa_bclr_b
+      {Intrinsic::mips_bclr_d, 59338}, // __builtin_msa_bclr_d
+      {Intrinsic::mips_bclr_h, 59359}, // __builtin_msa_bclr_h
+      {Intrinsic::mips_bclr_w, 59380}, // __builtin_msa_bclr_w
+      {Intrinsic::mips_bclri_b, 59401}, // __builtin_msa_bclri_b
+      {Intrinsic::mips_bclri_d, 59423}, // __builtin_msa_bclri_d
+      {Intrinsic::mips_bclri_h, 59445}, // __builtin_msa_bclri_h
+      {Intrinsic::mips_bclri_w, 59467}, // __builtin_msa_bclri_w
+      {Intrinsic::mips_binsl_b, 59489}, // __builtin_msa_binsl_b
+      {Intrinsic::mips_binsl_d, 59511}, // __builtin_msa_binsl_d
+      {Intrinsic::mips_binsl_h, 59533}, // __builtin_msa_binsl_h
+      {Intrinsic::mips_binsl_w, 59555}, // __builtin_msa_binsl_w
+      {Intrinsic::mips_binsli_b, 59577}, // __builtin_msa_binsli_b
+      {Intrinsic::mips_binsli_d, 59600}, // __builtin_msa_binsli_d
+      {Intrinsic::mips_binsli_h, 59623}, // __builtin_msa_binsli_h
+      {Intrinsic::mips_binsli_w, 59646}, // __builtin_msa_binsli_w
+      {Intrinsic::mips_binsr_b, 59669}, // __builtin_msa_binsr_b
+      {Intrinsic::mips_binsr_d, 59691}, // __builtin_msa_binsr_d
+      {Intrinsic::mips_binsr_h, 59713}, // __builtin_msa_binsr_h
+      {Intrinsic::mips_binsr_w, 59735}, // __builtin_msa_binsr_w
+      {Intrinsic::mips_binsri_b, 59757}, // __builtin_msa_binsri_b
+      {Intrinsic::mips_binsri_d, 59780}, // __builtin_msa_binsri_d
+      {Intrinsic::mips_binsri_h, 59803}, // __builtin_msa_binsri_h
+      {Intrinsic::mips_binsri_w, 59826}, // __builtin_msa_binsri_w
+      {Intrinsic::mips_bmnz_v, 59871}, // __builtin_msa_bmnz_v
+      {Intrinsic::mips_bmnzi_b, 59892}, // __builtin_msa_bmnzi_b
+      {Intrinsic::mips_bmz_v, 59914}, // __builtin_msa_bmz_v
+      {Intrinsic::mips_bmzi_b, 59934}, // __builtin_msa_bmzi_b
+      {Intrinsic::mips_bneg_b, 59955}, // __builtin_msa_bneg_b
+      {Intrinsic::mips_bneg_d, 59976}, // __builtin_msa_bneg_d
+      {Intrinsic::mips_bneg_h, 59997}, // __builtin_msa_bneg_h
+      {Intrinsic::mips_bneg_w, 60018}, // __builtin_msa_bneg_w
+      {Intrinsic::mips_bnegi_b, 60039}, // __builtin_msa_bnegi_b
+      {Intrinsic::mips_bnegi_d, 60061}, // __builtin_msa_bnegi_d
+      {Intrinsic::mips_bnegi_h, 60083}, // __builtin_msa_bnegi_h
+      {Intrinsic::mips_bnegi_w, 60105}, // __builtin_msa_bnegi_w
+      {Intrinsic::mips_bnz_b, 60127}, // __builtin_msa_bnz_b
+      {Intrinsic::mips_bnz_d, 60147}, // __builtin_msa_bnz_d
+      {Intrinsic::mips_bnz_h, 60167}, // __builtin_msa_bnz_h
+      {Intrinsic::mips_bnz_v, 60187}, // __builtin_msa_bnz_v
+      {Intrinsic::mips_bnz_w, 60207}, // __builtin_msa_bnz_w
+      {Intrinsic::mips_bsel_v, 60251}, // __builtin_msa_bsel_v
+      {Intrinsic::mips_bseli_b, 60272}, // __builtin_msa_bseli_b
+      {Intrinsic::mips_bset_b, 60294}, // __builtin_msa_bset_b
+      {Intrinsic::mips_bset_d, 60315}, // __builtin_msa_bset_d
+      {Intrinsic::mips_bset_h, 60336}, // __builtin_msa_bset_h
+      {Intrinsic::mips_bset_w, 60357}, // __builtin_msa_bset_w
+      {Intrinsic::mips_bseti_b, 60378}, // __builtin_msa_bseti_b
+      {Intrinsic::mips_bseti_d, 60400}, // __builtin_msa_bseti_d
+      {Intrinsic::mips_bseti_h, 60422}, // __builtin_msa_bseti_h
+      {Intrinsic::mips_bseti_w, 60444}, // __builtin_msa_bseti_w
+      {Intrinsic::mips_bz_b, 60466}, // __builtin_msa_bz_b
+      {Intrinsic::mips_bz_d, 60485}, // __builtin_msa_bz_d
+      {Intrinsic::mips_bz_h, 60504}, // __builtin_msa_bz_h
+      {Intrinsic::mips_bz_v, 60523}, // __builtin_msa_bz_v
+      {Intrinsic::mips_bz_w, 60542}, // __builtin_msa_bz_w
+      {Intrinsic::mips_ceq_b, 60561}, // __builtin_msa_ceq_b
+      {Intrinsic::mips_ceq_d, 60581}, // __builtin_msa_ceq_d
+      {Intrinsic::mips_ceq_h, 60601}, // __builtin_msa_ceq_h
+      {Intrinsic::mips_ceq_w, 60621}, // __builtin_msa_ceq_w
+      {Intrinsic::mips_ceqi_b, 60641}, // __builtin_msa_ceqi_b
+      {Intrinsic::mips_ceqi_d, 60662}, // __builtin_msa_ceqi_d
+      {Intrinsic::mips_ceqi_h, 60683}, // __builtin_msa_ceqi_h
+      {Intrinsic::mips_ceqi_w, 60704}, // __builtin_msa_ceqi_w
+      {Intrinsic::mips_cfcmsa, 60725}, // __builtin_msa_cfcmsa
+      {Intrinsic::mips_cle_s_b, 60746}, // __builtin_msa_cle_s_b
+      {Intrinsic::mips_cle_s_d, 60768}, // __builtin_msa_cle_s_d
+      {Intrinsic::mips_cle_s_h, 60790}, // __builtin_msa_cle_s_h
+      {Intrinsic::mips_cle_s_w, 60812}, // __builtin_msa_cle_s_w
+      {Intrinsic::mips_cle_u_b, 60834}, // __builtin_msa_cle_u_b
+      {Intrinsic::mips_cle_u_d, 60856}, // __builtin_msa_cle_u_d
+      {Intrinsic::mips_cle_u_h, 60878}, // __builtin_msa_cle_u_h
+      {Intrinsic::mips_cle_u_w, 60900}, // __builtin_msa_cle_u_w
+      {Intrinsic::mips_clei_s_b, 60922}, // __builtin_msa_clei_s_b
+      {Intrinsic::mips_clei_s_d, 60945}, // __builtin_msa_clei_s_d
+      {Intrinsic::mips_clei_s_h, 60968}, // __builtin_msa_clei_s_h
+      {Intrinsic::mips_clei_s_w, 60991}, // __builtin_msa_clei_s_w
+      {Intrinsic::mips_clei_u_b, 61014}, // __builtin_msa_clei_u_b
+      {Intrinsic::mips_clei_u_d, 61037}, // __builtin_msa_clei_u_d
+      {Intrinsic::mips_clei_u_h, 61060}, // __builtin_msa_clei_u_h
+      {Intrinsic::mips_clei_u_w, 61083}, // __builtin_msa_clei_u_w
+      {Intrinsic::mips_clt_s_b, 61106}, // __builtin_msa_clt_s_b
+      {Intrinsic::mips_clt_s_d, 61128}, // __builtin_msa_clt_s_d
+      {Intrinsic::mips_clt_s_h, 61150}, // __builtin_msa_clt_s_h
+      {Intrinsic::mips_clt_s_w, 61172}, // __builtin_msa_clt_s_w
+      {Intrinsic::mips_clt_u_b, 61194}, // __builtin_msa_clt_u_b
+      {Intrinsic::mips_clt_u_d, 61216}, // __builtin_msa_clt_u_d
+      {Intrinsic::mips_clt_u_h, 61238}, // __builtin_msa_clt_u_h
+      {Intrinsic::mips_clt_u_w, 61260}, // __builtin_msa_clt_u_w
+      {Intrinsic::mips_clti_s_b, 61282}, // __builtin_msa_clti_s_b
+      {Intrinsic::mips_clti_s_d, 61305}, // __builtin_msa_clti_s_d
+      {Intrinsic::mips_clti_s_h, 61328}, // __builtin_msa_clti_s_h
+      {Intrinsic::mips_clti_s_w, 61351}, // __builtin_msa_clti_s_w
+      {Intrinsic::mips_clti_u_b, 61374}, // __builtin_msa_clti_u_b
+      {Intrinsic::mips_clti_u_d, 61397}, // __builtin_msa_clti_u_d
+      {Intrinsic::mips_clti_u_h, 61420}, // __builtin_msa_clti_u_h
+      {Intrinsic::mips_clti_u_w, 61443}, // __builtin_msa_clti_u_w
+      {Intrinsic::mips_copy_s_b, 61784}, // __builtin_msa_copy_s_b
+      {Intrinsic::mips_copy_s_d, 61807}, // __builtin_msa_copy_s_d
+      {Intrinsic::mips_copy_s_h, 61830}, // __builtin_msa_copy_s_h
+      {Intrinsic::mips_copy_s_w, 61853}, // __builtin_msa_copy_s_w
+      {Intrinsic::mips_copy_u_b, 61876}, // __builtin_msa_copy_u_b
+      {Intrinsic::mips_copy_u_d, 61899}, // __builtin_msa_copy_u_d
+      {Intrinsic::mips_copy_u_h, 61922}, // __builtin_msa_copy_u_h
+      {Intrinsic::mips_copy_u_w, 61945}, // __builtin_msa_copy_u_w
+      {Intrinsic::mips_ctcmsa, 61968}, // __builtin_msa_ctcmsa
+      {Intrinsic::mips_div_s_b, 61989}, // __builtin_msa_div_s_b
+      {Intrinsic::mips_div_s_d, 62011}, // __builtin_msa_div_s_d
+      {Intrinsic::mips_div_s_h, 62033}, // __builtin_msa_div_s_h
+      {Intrinsic::mips_div_s_w, 62055}, // __builtin_msa_div_s_w
+      {Intrinsic::mips_div_u_b, 62077}, // __builtin_msa_div_u_b
+      {Intrinsic::mips_div_u_d, 62099}, // __builtin_msa_div_u_d
+      {Intrinsic::mips_div_u_h, 62121}, // __builtin_msa_div_u_h
+      {Intrinsic::mips_div_u_w, 62143}, // __builtin_msa_div_u_w
+      {Intrinsic::mips_dotp_s_d, 62185}, // __builtin_msa_dotp_s_d
+      {Intrinsic::mips_dotp_s_h, 62208}, // __builtin_msa_dotp_s_h
+      {Intrinsic::mips_dotp_s_w, 62231}, // __builtin_msa_dotp_s_w
+      {Intrinsic::mips_dotp_u_d, 62254}, // __builtin_msa_dotp_u_d
+      {Intrinsic::mips_dotp_u_h, 62277}, // __builtin_msa_dotp_u_h
+      {Intrinsic::mips_dotp_u_w, 62300}, // __builtin_msa_dotp_u_w
+      {Intrinsic::mips_dpadd_s_d, 62347}, // __builtin_msa_dpadd_s_d
+      {Intrinsic::mips_dpadd_s_h, 62371}, // __builtin_msa_dpadd_s_h
+      {Intrinsic::mips_dpadd_s_w, 62395}, // __builtin_msa_dpadd_s_w
+      {Intrinsic::mips_dpadd_u_d, 62419}, // __builtin_msa_dpadd_u_d
+      {Intrinsic::mips_dpadd_u_h, 62443}, // __builtin_msa_dpadd_u_h
+      {Intrinsic::mips_dpadd_u_w, 62467}, // __builtin_msa_dpadd_u_w
+      {Intrinsic::mips_dpsub_s_d, 62866}, // __builtin_msa_dpsub_s_d
+      {Intrinsic::mips_dpsub_s_h, 62890}, // __builtin_msa_dpsub_s_h
+      {Intrinsic::mips_dpsub_s_w, 62914}, // __builtin_msa_dpsub_s_w
+      {Intrinsic::mips_dpsub_u_d, 62938}, // __builtin_msa_dpsub_u_d
+      {Intrinsic::mips_dpsub_u_h, 62962}, // __builtin_msa_dpsub_u_h
+      {Intrinsic::mips_dpsub_u_w, 62986}, // __builtin_msa_dpsub_u_w
+      {Intrinsic::mips_fadd_d, 63172}, // __builtin_msa_fadd_d
+      {Intrinsic::mips_fadd_w, 63193}, // __builtin_msa_fadd_w
+      {Intrinsic::mips_fcaf_d, 63214}, // __builtin_msa_fcaf_d
+      {Intrinsic::mips_fcaf_w, 63235}, // __builtin_msa_fcaf_w
+      {Intrinsic::mips_fceq_d, 63256}, // __builtin_msa_fceq_d
+      {Intrinsic::mips_fceq_w, 63277}, // __builtin_msa_fceq_w
+      {Intrinsic::mips_fclass_d, 63298}, // __builtin_msa_fclass_d
+      {Intrinsic::mips_fclass_w, 63321}, // __builtin_msa_fclass_w
+      {Intrinsic::mips_fcle_d, 63344}, // __builtin_msa_fcle_d
+      {Intrinsic::mips_fcle_w, 63365}, // __builtin_msa_fcle_w
+      {Intrinsic::mips_fclt_d, 63386}, // __builtin_msa_fclt_d
+      {Intrinsic::mips_fclt_w, 63407}, // __builtin_msa_fclt_w
+      {Intrinsic::mips_fcne_d, 63428}, // __builtin_msa_fcne_d
+      {Intrinsic::mips_fcne_w, 63449}, // __builtin_msa_fcne_w
+      {Intrinsic::mips_fcor_d, 63470}, // __builtin_msa_fcor_d
+      {Intrinsic::mips_fcor_w, 63491}, // __builtin_msa_fcor_w
+      {Intrinsic::mips_fcueq_d, 63512}, // __builtin_msa_fcueq_d
+      {Intrinsic::mips_fcueq_w, 63534}, // __builtin_msa_fcueq_w
+      {Intrinsic::mips_fcule_d, 63556}, // __builtin_msa_fcule_d
+      {Intrinsic::mips_fcule_w, 63578}, // __builtin_msa_fcule_w
+      {Intrinsic::mips_fcult_d, 63600}, // __builtin_msa_fcult_d
+      {Intrinsic::mips_fcult_w, 63622}, // __builtin_msa_fcult_w
+      {Intrinsic::mips_fcun_d, 63644}, // __builtin_msa_fcun_d
+      {Intrinsic::mips_fcun_w, 63665}, // __builtin_msa_fcun_w
+      {Intrinsic::mips_fcune_d, 63686}, // __builtin_msa_fcune_d
+      {Intrinsic::mips_fcune_w, 63708}, // __builtin_msa_fcune_w
+      {Intrinsic::mips_fdiv_d, 63730}, // __builtin_msa_fdiv_d
+      {Intrinsic::mips_fdiv_w, 63751}, // __builtin_msa_fdiv_w
+      {Intrinsic::mips_fexdo_h, 63772}, // __builtin_msa_fexdo_h
+      {Intrinsic::mips_fexdo_w, 63794}, // __builtin_msa_fexdo_w
+      {Intrinsic::mips_fexp2_d, 63816}, // __builtin_msa_fexp2_d
+      {Intrinsic::mips_fexp2_w, 63838}, // __builtin_msa_fexp2_w
+      {Intrinsic::mips_fexupl_d, 63860}, // __builtin_msa_fexupl_d
+      {Intrinsic::mips_fexupl_w, 63883}, // __builtin_msa_fexupl_w
+      {Intrinsic::mips_fexupr_d, 63906}, // __builtin_msa_fexupr_d
+      {Intrinsic::mips_fexupr_w, 63929}, // __builtin_msa_fexupr_w
+      {Intrinsic::mips_ffint_s_d, 63952}, // __builtin_msa_ffint_s_d
+      {Intrinsic::mips_ffint_s_w, 63976}, // __builtin_msa_ffint_s_w
+      {Intrinsic::mips_ffint_u_d, 64000}, // __builtin_msa_ffint_u_d
+      {Intrinsic::mips_ffint_u_w, 64024}, // __builtin_msa_ffint_u_w
+      {Intrinsic::mips_ffql_d, 64048}, // __builtin_msa_ffql_d
+      {Intrinsic::mips_ffql_w, 64069}, // __builtin_msa_ffql_w
+      {Intrinsic::mips_ffqr_d, 64090}, // __builtin_msa_ffqr_d
+      {Intrinsic::mips_ffqr_w, 64111}, // __builtin_msa_ffqr_w
+      {Intrinsic::mips_fill_b, 64132}, // __builtin_msa_fill_b
+      {Intrinsic::mips_fill_d, 64153}, // __builtin_msa_fill_d
+      {Intrinsic::mips_fill_h, 64174}, // __builtin_msa_fill_h
+      {Intrinsic::mips_fill_w, 64195}, // __builtin_msa_fill_w
+      {Intrinsic::mips_flog2_d, 64216}, // __builtin_msa_flog2_d
+      {Intrinsic::mips_flog2_w, 64238}, // __builtin_msa_flog2_w
+      {Intrinsic::mips_fmadd_d, 64260}, // __builtin_msa_fmadd_d
+      {Intrinsic::mips_fmadd_w, 64282}, // __builtin_msa_fmadd_w
+      {Intrinsic::mips_fmax_a_d, 64304}, // __builtin_msa_fmax_a_d
+      {Intrinsic::mips_fmax_a_w, 64327}, // __builtin_msa_fmax_a_w
+      {Intrinsic::mips_fmax_d, 64350}, // __builtin_msa_fmax_d
+      {Intrinsic::mips_fmax_w, 64371}, // __builtin_msa_fmax_w
+      {Intrinsic::mips_fmin_a_d, 64392}, // __builtin_msa_fmin_a_d
+      {Intrinsic::mips_fmin_a_w, 64415}, // __builtin_msa_fmin_a_w
+      {Intrinsic::mips_fmin_d, 64438}, // __builtin_msa_fmin_d
+      {Intrinsic::mips_fmin_w, 64459}, // __builtin_msa_fmin_w
+      {Intrinsic::mips_fmsub_d, 64480}, // __builtin_msa_fmsub_d
+      {Intrinsic::mips_fmsub_w, 64502}, // __builtin_msa_fmsub_w
+      {Intrinsic::mips_fmul_d, 64524}, // __builtin_msa_fmul_d
+      {Intrinsic::mips_fmul_w, 64545}, // __builtin_msa_fmul_w
+      {Intrinsic::mips_frcp_d, 64566}, // __builtin_msa_frcp_d
+      {Intrinsic::mips_frcp_w, 64587}, // __builtin_msa_frcp_w
+      {Intrinsic::mips_frint_d, 64608}, // __builtin_msa_frint_d
+      {Intrinsic::mips_frint_w, 64630}, // __builtin_msa_frint_w
+      {Intrinsic::mips_frsqrt_d, 64652}, // __builtin_msa_frsqrt_d
+      {Intrinsic::mips_frsqrt_w, 64675}, // __builtin_msa_frsqrt_w
+      {Intrinsic::mips_fsaf_d, 64698}, // __builtin_msa_fsaf_d
+      {Intrinsic::mips_fsaf_w, 64719}, // __builtin_msa_fsaf_w
+      {Intrinsic::mips_fseq_d, 64740}, // __builtin_msa_fseq_d
+      {Intrinsic::mips_fseq_w, 64761}, // __builtin_msa_fseq_w
+      {Intrinsic::mips_fsle_d, 64782}, // __builtin_msa_fsle_d
+      {Intrinsic::mips_fsle_w, 64803}, // __builtin_msa_fsle_w
+      {Intrinsic::mips_fslt_d, 64824}, // __builtin_msa_fslt_d
+      {Intrinsic::mips_fslt_w, 64845}, // __builtin_msa_fslt_w
+      {Intrinsic::mips_fsne_d, 64866}, // __builtin_msa_fsne_d
+      {Intrinsic::mips_fsne_w, 64887}, // __builtin_msa_fsne_w
+      {Intrinsic::mips_fsor_d, 64908}, // __builtin_msa_fsor_d
+      {Intrinsic::mips_fsor_w, 64929}, // __builtin_msa_fsor_w
+      {Intrinsic::mips_fsqrt_d, 64950}, // __builtin_msa_fsqrt_d
+      {Intrinsic::mips_fsqrt_w, 64972}, // __builtin_msa_fsqrt_w
+      {Intrinsic::mips_fsub_d, 64994}, // __builtin_msa_fsub_d
+      {Intrinsic::mips_fsub_w, 65015}, // __builtin_msa_fsub_w
+      {Intrinsic::mips_fsueq_d, 65036}, // __builtin_msa_fsueq_d
+      {Intrinsic::mips_fsueq_w, 65058}, // __builtin_msa_fsueq_w
+      {Intrinsic::mips_fsule_d, 65080}, // __builtin_msa_fsule_d
+      {Intrinsic::mips_fsule_w, 65102}, // __builtin_msa_fsule_w
+      {Intrinsic::mips_fsult_d, 65124}, // __builtin_msa_fsult_d
+      {Intrinsic::mips_fsult_w, 65146}, // __builtin_msa_fsult_w
+      {Intrinsic::mips_fsun_d, 65168}, // __builtin_msa_fsun_d
+      {Intrinsic::mips_fsun_w, 65189}, // __builtin_msa_fsun_w
+      {Intrinsic::mips_fsune_d, 65210}, // __builtin_msa_fsune_d
+      {Intrinsic::mips_fsune_w, 65232}, // __builtin_msa_fsune_w
+      {Intrinsic::mips_ftint_s_d, 65254}, // __builtin_msa_ftint_s_d
+      {Intrinsic::mips_ftint_s_w, 65278}, // __builtin_msa_ftint_s_w
+      {Intrinsic::mips_ftint_u_d, 65302}, // __builtin_msa_ftint_u_d
+      {Intrinsic::mips_ftint_u_w, 65326}, // __builtin_msa_ftint_u_w
+      {Intrinsic::mips_ftq_h, 65350}, // __builtin_msa_ftq_h
+      {Intrinsic::mips_ftq_w, 65370}, // __builtin_msa_ftq_w
+      {Intrinsic::mips_ftrunc_s_d, 65390}, // __builtin_msa_ftrunc_s_d
+      {Intrinsic::mips_ftrunc_s_w, 65415}, // __builtin_msa_ftrunc_s_w
+      {Intrinsic::mips_ftrunc_u_d, 65440}, // __builtin_msa_ftrunc_u_d
+      {Intrinsic::mips_ftrunc_u_w, 65465}, // __builtin_msa_ftrunc_u_w
+      {Intrinsic::mips_hadd_s_d, 65490}, // __builtin_msa_hadd_s_d
+      {Intrinsic::mips_hadd_s_h, 65513}, // __builtin_msa_hadd_s_h
+      {Intrinsic::mips_hadd_s_w, 65536}, // __builtin_msa_hadd_s_w
+      {Intrinsic::mips_hadd_u_d, 65559}, // __builtin_msa_hadd_u_d
+      {Intrinsic::mips_hadd_u_h, 65582}, // __builtin_msa_hadd_u_h
+      {Intrinsic::mips_hadd_u_w, 65605}, // __builtin_msa_hadd_u_w
+      {Intrinsic::mips_hsub_s_d, 65628}, // __builtin_msa_hsub_s_d
+      {Intrinsic::mips_hsub_s_h, 65651}, // __builtin_msa_hsub_s_h
+      {Intrinsic::mips_hsub_s_w, 65674}, // __builtin_msa_hsub_s_w
+      {Intrinsic::mips_hsub_u_d, 65697}, // __builtin_msa_hsub_u_d
+      {Intrinsic::mips_hsub_u_h, 65720}, // __builtin_msa_hsub_u_h
+      {Intrinsic::mips_hsub_u_w, 65743}, // __builtin_msa_hsub_u_w
+      {Intrinsic::mips_ilvev_b, 65766}, // __builtin_msa_ilvev_b
+      {Intrinsic::mips_ilvev_d, 65788}, // __builtin_msa_ilvev_d
+      {Intrinsic::mips_ilvev_h, 65810}, // __builtin_msa_ilvev_h
+      {Intrinsic::mips_ilvev_w, 65832}, // __builtin_msa_ilvev_w
+      {Intrinsic::mips_ilvl_b, 65854}, // __builtin_msa_ilvl_b
+      {Intrinsic::mips_ilvl_d, 65875}, // __builtin_msa_ilvl_d
+      {Intrinsic::mips_ilvl_h, 65896}, // __builtin_msa_ilvl_h
+      {Intrinsic::mips_ilvl_w, 65917}, // __builtin_msa_ilvl_w
+      {Intrinsic::mips_ilvod_b, 65938}, // __builtin_msa_ilvod_b
+      {Intrinsic::mips_ilvod_d, 65960}, // __builtin_msa_ilvod_d
+      {Intrinsic::mips_ilvod_h, 65982}, // __builtin_msa_ilvod_h
+      {Intrinsic::mips_ilvod_w, 66004}, // __builtin_msa_ilvod_w
+      {Intrinsic::mips_ilvr_b, 66026}, // __builtin_msa_ilvr_b
+      {Intrinsic::mips_ilvr_d, 66047}, // __builtin_msa_ilvr_d
+      {Intrinsic::mips_ilvr_h, 66068}, // __builtin_msa_ilvr_h
+      {Intrinsic::mips_ilvr_w, 66089}, // __builtin_msa_ilvr_w
+      {Intrinsic::mips_insert_b, 66110}, // __builtin_msa_insert_b
+      {Intrinsic::mips_insert_d, 66133}, // __builtin_msa_insert_d
+      {Intrinsic::mips_insert_h, 66156}, // __builtin_msa_insert_h
+      {Intrinsic::mips_insert_w, 66179}, // __builtin_msa_insert_w
+      {Intrinsic::mips_insve_b, 66222}, // __builtin_msa_insve_b
+      {Intrinsic::mips_insve_d, 66244}, // __builtin_msa_insve_d
+      {Intrinsic::mips_insve_h, 66266}, // __builtin_msa_insve_h
+      {Intrinsic::mips_insve_w, 66288}, // __builtin_msa_insve_w
+      {Intrinsic::mips_ld_b, 66330}, // __builtin_msa_ld_b
+      {Intrinsic::mips_ld_d, 66349}, // __builtin_msa_ld_d
+      {Intrinsic::mips_ld_h, 66368}, // __builtin_msa_ld_h
+      {Intrinsic::mips_ld_w, 66387}, // __builtin_msa_ld_w
+      {Intrinsic::mips_ldi_b, 66406}, // __builtin_msa_ldi_b
+      {Intrinsic::mips_ldi_d, 66426}, // __builtin_msa_ldi_d
+      {Intrinsic::mips_ldi_h, 66446}, // __builtin_msa_ldi_h
+      {Intrinsic::mips_ldi_w, 66466}, // __builtin_msa_ldi_w
+      {Intrinsic::mips_madd_q_h, 66563}, // __builtin_msa_madd_q_h
+      {Intrinsic::mips_madd_q_w, 66586}, // __builtin_msa_madd_q_w
+      {Intrinsic::mips_maddr_q_h, 66609}, // __builtin_msa_maddr_q_h
+      {Intrinsic::mips_maddr_q_w, 66633}, // __builtin_msa_maddr_q_w
+      {Intrinsic::mips_maddv_b, 66678}, // __builtin_msa_maddv_b
+      {Intrinsic::mips_maddv_d, 66700}, // __builtin_msa_maddv_d
+      {Intrinsic::mips_maddv_h, 66722}, // __builtin_msa_maddv_h
+      {Intrinsic::mips_maddv_w, 66744}, // __builtin_msa_maddv_w
+      {Intrinsic::mips_max_a_b, 66876}, // __builtin_msa_max_a_b
+      {Intrinsic::mips_max_a_d, 66898}, // __builtin_msa_max_a_d
+      {Intrinsic::mips_max_a_h, 66920}, // __builtin_msa_max_a_h
+      {Intrinsic::mips_max_a_w, 66942}, // __builtin_msa_max_a_w
+      {Intrinsic::mips_max_s_b, 66964}, // __builtin_msa_max_s_b
+      {Intrinsic::mips_max_s_d, 66986}, // __builtin_msa_max_s_d
+      {Intrinsic::mips_max_s_h, 67008}, // __builtin_msa_max_s_h
+      {Intrinsic::mips_max_s_w, 67030}, // __builtin_msa_max_s_w
+      {Intrinsic::mips_max_u_b, 67052}, // __builtin_msa_max_u_b
+      {Intrinsic::mips_max_u_d, 67074}, // __builtin_msa_max_u_d
+      {Intrinsic::mips_max_u_h, 67096}, // __builtin_msa_max_u_h
+      {Intrinsic::mips_max_u_w, 67118}, // __builtin_msa_max_u_w
+      {Intrinsic::mips_maxi_s_b, 67140}, // __builtin_msa_maxi_s_b
+      {Intrinsic::mips_maxi_s_d, 67163}, // __builtin_msa_maxi_s_d
+      {Intrinsic::mips_maxi_s_h, 67186}, // __builtin_msa_maxi_s_h
+      {Intrinsic::mips_maxi_s_w, 67209}, // __builtin_msa_maxi_s_w
+      {Intrinsic::mips_maxi_u_b, 67232}, // __builtin_msa_maxi_u_b
+      {Intrinsic::mips_maxi_u_d, 67255}, // __builtin_msa_maxi_u_d
+      {Intrinsic::mips_maxi_u_h, 67278}, // __builtin_msa_maxi_u_h
+      {Intrinsic::mips_maxi_u_w, 67301}, // __builtin_msa_maxi_u_w
+      {Intrinsic::mips_min_a_b, 67324}, // __builtin_msa_min_a_b
+      {Intrinsic::mips_min_a_d, 67346}, // __builtin_msa_min_a_d
+      {Intrinsic::mips_min_a_h, 67368}, // __builtin_msa_min_a_h
+      {Intrinsic::mips_min_a_w, 67390}, // __builtin_msa_min_a_w
+      {Intrinsic::mips_min_s_b, 67412}, // __builtin_msa_min_s_b
+      {Intrinsic::mips_min_s_d, 67434}, // __builtin_msa_min_s_d
+      {Intrinsic::mips_min_s_h, 67456}, // __builtin_msa_min_s_h
+      {Intrinsic::mips_min_s_w, 67478}, // __builtin_msa_min_s_w
+      {Intrinsic::mips_min_u_b, 67500}, // __builtin_msa_min_u_b
+      {Intrinsic::mips_min_u_d, 67522}, // __builtin_msa_min_u_d
+      {Intrinsic::mips_min_u_h, 67544}, // __builtin_msa_min_u_h
+      {Intrinsic::mips_min_u_w, 67566}, // __builtin_msa_min_u_w
+      {Intrinsic::mips_mini_s_b, 67588}, // __builtin_msa_mini_s_b
+      {Intrinsic::mips_mini_s_d, 67611}, // __builtin_msa_mini_s_d
+      {Intrinsic::mips_mini_s_h, 67634}, // __builtin_msa_mini_s_h
+      {Intrinsic::mips_mini_s_w, 67657}, // __builtin_msa_mini_s_w
+      {Intrinsic::mips_mini_u_b, 67680}, // __builtin_msa_mini_u_b
+      {Intrinsic::mips_mini_u_d, 67703}, // __builtin_msa_mini_u_d
+      {Intrinsic::mips_mini_u_h, 67726}, // __builtin_msa_mini_u_h
+      {Intrinsic::mips_mini_u_w, 67749}, // __builtin_msa_mini_u_w
+      {Intrinsic::mips_mod_s_b, 67772}, // __builtin_msa_mod_s_b
+      {Intrinsic::mips_mod_s_d, 67794}, // __builtin_msa_mod_s_d
+      {Intrinsic::mips_mod_s_h, 67816}, // __builtin_msa_mod_s_h
+      {Intrinsic::mips_mod_s_w, 67838}, // __builtin_msa_mod_s_w
+      {Intrinsic::mips_mod_u_b, 67860}, // __builtin_msa_mod_u_b
+      {Intrinsic::mips_mod_u_d, 67882}, // __builtin_msa_mod_u_d
+      {Intrinsic::mips_mod_u_h, 67904}, // __builtin_msa_mod_u_h
+      {Intrinsic::mips_mod_u_w, 67926}, // __builtin_msa_mod_u_w
+      {Intrinsic::mips_move_v, 67970}, // __builtin_msa_move_v
+      {Intrinsic::mips_msub_q_h, 68011}, // __builtin_msa_msub_q_h
+      {Intrinsic::mips_msub_q_w, 68034}, // __builtin_msa_msub_q_w
+      {Intrinsic::mips_msubr_q_h, 68057}, // __builtin_msa_msubr_q_h
+      {Intrinsic::mips_msubr_q_w, 68081}, // __builtin_msa_msubr_q_w
+      {Intrinsic::mips_msubv_b, 68126}, // __builtin_msa_msubv_b
+      {Intrinsic::mips_msubv_d, 68148}, // __builtin_msa_msubv_d
+      {Intrinsic::mips_msubv_h, 68170}, // __builtin_msa_msubv_h
+      {Intrinsic::mips_msubv_w, 68192}, // __builtin_msa_msubv_w
+      {Intrinsic::mips_mul_q_h, 68258}, // __builtin_msa_mul_q_h
+      {Intrinsic::mips_mul_q_w, 68280}, // __builtin_msa_mul_q_w
+      {Intrinsic::mips_mulr_q_h, 68544}, // __builtin_msa_mulr_q_h
+      {Intrinsic::mips_mulr_q_w, 68567}, // __builtin_msa_mulr_q_w
+      {Intrinsic::mips_mulv_b, 68686}, // __builtin_msa_mulv_b
+      {Intrinsic::mips_mulv_d, 68707}, // __builtin_msa_mulv_d
+      {Intrinsic::mips_mulv_h, 68728}, // __builtin_msa_mulv_h
+      {Intrinsic::mips_mulv_w, 68749}, // __builtin_msa_mulv_w
+      {Intrinsic::mips_nloc_b, 68770}, // __builtin_msa_nloc_b
+      {Intrinsic::mips_nloc_d, 68791}, // __builtin_msa_nloc_d
+      {Intrinsic::mips_nloc_h, 68812}, // __builtin_msa_nloc_h
+      {Intrinsic::mips_nloc_w, 68833}, // __builtin_msa_nloc_w
+      {Intrinsic::mips_nlzc_b, 68854}, // __builtin_msa_nlzc_b
+      {Intrinsic::mips_nlzc_d, 68875}, // __builtin_msa_nlzc_d
+      {Intrinsic::mips_nlzc_h, 68896}, // __builtin_msa_nlzc_h
+      {Intrinsic::mips_nlzc_w, 68917}, // __builtin_msa_nlzc_w
+      {Intrinsic::mips_nor_v, 68938}, // __builtin_msa_nor_v
+      {Intrinsic::mips_nori_b, 68958}, // __builtin_msa_nori_b
+      {Intrinsic::mips_or_v, 68979}, // __builtin_msa_or_v
+      {Intrinsic::mips_ori_b, 68998}, // __builtin_msa_ori_b
+      {Intrinsic::mips_pckev_b, 69043}, // __builtin_msa_pckev_b
+      {Intrinsic::mips_pckev_d, 69065}, // __builtin_msa_pckev_d
+      {Intrinsic::mips_pckev_h, 69087}, // __builtin_msa_pckev_h
+      {Intrinsic::mips_pckev_w, 69109}, // __builtin_msa_pckev_w
+      {Intrinsic::mips_pckod_b, 69131}, // __builtin_msa_pckod_b
+      {Intrinsic::mips_pckod_d, 69153}, // __builtin_msa_pckod_d
+      {Intrinsic::mips_pckod_h, 69175}, // __builtin_msa_pckod_h
+      {Intrinsic::mips_pckod_w, 69197}, // __builtin_msa_pckod_w
+      {Intrinsic::mips_pcnt_b, 69219}, // __builtin_msa_pcnt_b
+      {Intrinsic::mips_pcnt_d, 69240}, // __builtin_msa_pcnt_d
+      {Intrinsic::mips_pcnt_h, 69261}, // __builtin_msa_pcnt_h
+      {Intrinsic::mips_pcnt_w, 69282}, // __builtin_msa_pcnt_w
+      {Intrinsic::mips_sat_s_b, 69966}, // __builtin_msa_sat_s_b
+      {Intrinsic::mips_sat_s_d, 69988}, // __builtin_msa_sat_s_d
+      {Intrinsic::mips_sat_s_h, 70010}, // __builtin_msa_sat_s_h
+      {Intrinsic::mips_sat_s_w, 70032}, // __builtin_msa_sat_s_w
+      {Intrinsic::mips_sat_u_b, 70054}, // __builtin_msa_sat_u_b
+      {Intrinsic::mips_sat_u_d, 70076}, // __builtin_msa_sat_u_d
+      {Intrinsic::mips_sat_u_h, 70098}, // __builtin_msa_sat_u_h
+      {Intrinsic::mips_sat_u_w, 70120}, // __builtin_msa_sat_u_w
+      {Intrinsic::mips_shf_b, 70142}, // __builtin_msa_shf_b
+      {Intrinsic::mips_shf_h, 70162}, // __builtin_msa_shf_h
+      {Intrinsic::mips_shf_w, 70182}, // __builtin_msa_shf_w
+      {Intrinsic::mips_sld_b, 70484}, // __builtin_msa_sld_b
+      {Intrinsic::mips_sld_d, 70504}, // __builtin_msa_sld_d
+      {Intrinsic::mips_sld_h, 70524}, // __builtin_msa_sld_h
+      {Intrinsic::mips_sld_w, 70544}, // __builtin_msa_sld_w
+      {Intrinsic::mips_sldi_b, 70564}, // __builtin_msa_sldi_b
+      {Intrinsic::mips_sldi_d, 70585}, // __builtin_msa_sldi_d
+      {Intrinsic::mips_sldi_h, 70606}, // __builtin_msa_sldi_h
+      {Intrinsic::mips_sldi_w, 70627}, // __builtin_msa_sldi_w
+      {Intrinsic::mips_sll_b, 70648}, // __builtin_msa_sll_b
+      {Intrinsic::mips_sll_d, 70668}, // __builtin_msa_sll_d
+      {Intrinsic::mips_sll_h, 70688}, // __builtin_msa_sll_h
+      {Intrinsic::mips_sll_w, 70708}, // __builtin_msa_sll_w
+      {Intrinsic::mips_slli_b, 70728}, // __builtin_msa_slli_b
+      {Intrinsic::mips_slli_d, 70749}, // __builtin_msa_slli_d
+      {Intrinsic::mips_slli_h, 70770}, // __builtin_msa_slli_h
+      {Intrinsic::mips_slli_w, 70791}, // __builtin_msa_slli_w
+      {Intrinsic::mips_splat_b, 70812}, // __builtin_msa_splat_b
+      {Intrinsic::mips_splat_d, 70834}, // __builtin_msa_splat_d
+      {Intrinsic::mips_splat_h, 70856}, // __builtin_msa_splat_h
+      {Intrinsic::mips_splat_w, 70878}, // __builtin_msa_splat_w
+      {Intrinsic::mips_splati_b, 70900}, // __builtin_msa_splati_b
+      {Intrinsic::mips_splati_d, 70923}, // __builtin_msa_splati_d
+      {Intrinsic::mips_splati_h, 70946}, // __builtin_msa_splati_h
+      {Intrinsic::mips_splati_w, 70969}, // __builtin_msa_splati_w
+      {Intrinsic::mips_sra_b, 70992}, // __builtin_msa_sra_b
+      {Intrinsic::mips_sra_d, 71012}, // __builtin_msa_sra_d
+      {Intrinsic::mips_sra_h, 71032}, // __builtin_msa_sra_h
+      {Intrinsic::mips_sra_w, 71052}, // __builtin_msa_sra_w
+      {Intrinsic::mips_srai_b, 71072}, // __builtin_msa_srai_b
+      {Intrinsic::mips_srai_d, 71093}, // __builtin_msa_srai_d
+      {Intrinsic::mips_srai_h, 71114}, // __builtin_msa_srai_h
+      {Intrinsic::mips_srai_w, 71135}, // __builtin_msa_srai_w
+      {Intrinsic::mips_srar_b, 71156}, // __builtin_msa_srar_b
+      {Intrinsic::mips_srar_d, 71177}, // __builtin_msa_srar_d
+      {Intrinsic::mips_srar_h, 71198}, // __builtin_msa_srar_h
+      {Intrinsic::mips_srar_w, 71219}, // __builtin_msa_srar_w
+      {Intrinsic::mips_srari_b, 71240}, // __builtin_msa_srari_b
+      {Intrinsic::mips_srari_d, 71262}, // __builtin_msa_srari_d
+      {Intrinsic::mips_srari_h, 71284}, // __builtin_msa_srari_h
+      {Intrinsic::mips_srari_w, 71306}, // __builtin_msa_srari_w
+      {Intrinsic::mips_srl_b, 71328}, // __builtin_msa_srl_b
+      {Intrinsic::mips_srl_d, 71348}, // __builtin_msa_srl_d
+      {Intrinsic::mips_srl_h, 71368}, // __builtin_msa_srl_h
+      {Intrinsic::mips_srl_w, 71388}, // __builtin_msa_srl_w
+      {Intrinsic::mips_srli_b, 71408}, // __builtin_msa_srli_b
+      {Intrinsic::mips_srli_d, 71429}, // __builtin_msa_srli_d
+      {Intrinsic::mips_srli_h, 71450}, // __builtin_msa_srli_h
+      {Intrinsic::mips_srli_w, 71471}, // __builtin_msa_srli_w
+      {Intrinsic::mips_srlr_b, 71492}, // __builtin_msa_srlr_b
+      {Intrinsic::mips_srlr_d, 71513}, // __builtin_msa_srlr_d
+      {Intrinsic::mips_srlr_h, 71534}, // __builtin_msa_srlr_h
+      {Intrinsic::mips_srlr_w, 71555}, // __builtin_msa_srlr_w
+      {Intrinsic::mips_srlri_b, 71576}, // __builtin_msa_srlri_b
+      {Intrinsic::mips_srlri_d, 71598}, // __builtin_msa_srlri_d
+      {Intrinsic::mips_srlri_h, 71620}, // __builtin_msa_srlri_h
+      {Intrinsic::mips_srlri_w, 71642}, // __builtin_msa_srlri_w
+      {Intrinsic::mips_st_b, 71664}, // __builtin_msa_st_b
+      {Intrinsic::mips_st_d, 71683}, // __builtin_msa_st_d
+      {Intrinsic::mips_st_h, 71702}, // __builtin_msa_st_h
+      {Intrinsic::mips_st_w, 71721}, // __builtin_msa_st_w
+      {Intrinsic::mips_subs_s_b, 71910}, // __builtin_msa_subs_s_b
+      {Intrinsic::mips_subs_s_d, 71933}, // __builtin_msa_subs_s_d
+      {Intrinsic::mips_subs_s_h, 71956}, // __builtin_msa_subs_s_h
+      {Intrinsic::mips_subs_s_w, 71979}, // __builtin_msa_subs_s_w
+      {Intrinsic::mips_subs_u_b, 72002}, // __builtin_msa_subs_u_b
+      {Intrinsic::mips_subs_u_d, 72025}, // __builtin_msa_subs_u_d
+      {Intrinsic::mips_subs_u_h, 72048}, // __builtin_msa_subs_u_h
+      {Intrinsic::mips_subs_u_w, 72071}, // __builtin_msa_subs_u_w
+      {Intrinsic::mips_subsus_u_b, 72094}, // __builtin_msa_subsus_u_b
+      {Intrinsic::mips_subsus_u_d, 72119}, // __builtin_msa_subsus_u_d
+      {Intrinsic::mips_subsus_u_h, 72144}, // __builtin_msa_subsus_u_h
+      {Intrinsic::mips_subsus_u_w, 72169}, // __builtin_msa_subsus_u_w
+      {Intrinsic::mips_subsuu_s_b, 72194}, // __builtin_msa_subsuu_s_b
+      {Intrinsic::mips_subsuu_s_d, 72219}, // __builtin_msa_subsuu_s_d
+      {Intrinsic::mips_subsuu_s_h, 72244}, // __builtin_msa_subsuu_s_h
+      {Intrinsic::mips_subsuu_s_w, 72269}, // __builtin_msa_subsuu_s_w
+      {Intrinsic::mips_subv_b, 72440}, // __builtin_msa_subv_b
+      {Intrinsic::mips_subv_d, 72461}, // __builtin_msa_subv_d
+      {Intrinsic::mips_subv_h, 72482}, // __builtin_msa_subv_h
+      {Intrinsic::mips_subv_w, 72503}, // __builtin_msa_subv_w
+      {Intrinsic::mips_subvi_b, 72524}, // __builtin_msa_subvi_b
+      {Intrinsic::mips_subvi_d, 72546}, // __builtin_msa_subvi_d
+      {Intrinsic::mips_subvi_h, 72568}, // __builtin_msa_subvi_h
+      {Intrinsic::mips_subvi_w, 72590}, // __builtin_msa_subvi_w
+      {Intrinsic::mips_vshf_b, 72612}, // __builtin_msa_vshf_b
+      {Intrinsic::mips_vshf_d, 72633}, // __builtin_msa_vshf_d
+      {Intrinsic::mips_vshf_h, 72654}, // __builtin_msa_vshf_h
+      {Intrinsic::mips_vshf_w, 72675}, // __builtin_msa_vshf_w
+      {Intrinsic::mips_xor_v, 72717}, // __builtin_msa_xor_v
+      {Intrinsic::mips_xori_b, 72737}, // __builtin_msa_xori_b
+    };
+    auto I = std::lower_bound(std::begin(mipsNames),
+                              std::end(mipsNames),
+                              BuiltinNameStr);
+    if (I != std::end(mipsNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "nvvm") {
+    static const BuiltinEntry nvvmNames[] = {
+      {Intrinsic::nvvm_add_rm_d, 72758}, // __nvvm_add_rm_d
+      {Intrinsic::nvvm_add_rm_f, 72774}, // __nvvm_add_rm_f
+      {Intrinsic::nvvm_add_rm_ftz_f, 72790}, // __nvvm_add_rm_ftz_f
+      {Intrinsic::nvvm_add_rn_d, 72810}, // __nvvm_add_rn_d
+      {Intrinsic::nvvm_add_rn_f, 72826}, // __nvvm_add_rn_f
+      {Intrinsic::nvvm_add_rn_ftz_f, 72842}, // __nvvm_add_rn_ftz_f
+      {Intrinsic::nvvm_add_rp_d, 72862}, // __nvvm_add_rp_d
+      {Intrinsic::nvvm_add_rp_f, 72878}, // __nvvm_add_rp_f
+      {Intrinsic::nvvm_add_rp_ftz_f, 72894}, // __nvvm_add_rp_ftz_f
+      {Intrinsic::nvvm_add_rz_d, 72914}, // __nvvm_add_rz_d
+      {Intrinsic::nvvm_add_rz_f, 72930}, // __nvvm_add_rz_f
+      {Intrinsic::nvvm_add_rz_ftz_f, 72946}, // __nvvm_add_rz_ftz_f
+      {Intrinsic::nvvm_barrier, 73003}, // __nvvm_bar
+      {Intrinsic::nvvm_barrier0_and, 73085}, // __nvvm_bar0_and
+      {Intrinsic::nvvm_barrier0_or, 73101}, // __nvvm_bar0_or
+      {Intrinsic::nvvm_barrier0_popc, 73116}, // __nvvm_bar0_popc
+      {Intrinsic::nvvm_barrier_n, 73014}, // __nvvm_bar_n
+      {Intrinsic::nvvm_bar_sync, 72966}, // __nvvm_bar_sync
+      {Intrinsic::nvvm_bar_warp_sync, 72982}, // __nvvm_bar_warp_sync
+      {Intrinsic::nvvm_barrier_sync, 73027}, // __nvvm_barrier_sync
+      {Intrinsic::nvvm_barrier_sync_cnt, 73047}, // __nvvm_barrier_sync_cnt
+      {Intrinsic::nvvm_bitcast_d2ll, 73133}, // __nvvm_bitcast_d2ll
+      {Intrinsic::nvvm_bitcast_f2i, 73153}, // __nvvm_bitcast_f2i
+      {Intrinsic::nvvm_bitcast_i2f, 73172}, // __nvvm_bitcast_i2f
+      {Intrinsic::nvvm_bitcast_ll2d, 73191}, // __nvvm_bitcast_ll2d
+      {Intrinsic::nvvm_ceil_d, 73211}, // __nvvm_ceil_d
+      {Intrinsic::nvvm_ceil_f, 73225}, // __nvvm_ceil_f
+      {Intrinsic::nvvm_ceil_ftz_f, 73239}, // __nvvm_ceil_ftz_f
+      {Intrinsic::nvvm_cos_approx_f, 73257}, // __nvvm_cos_approx_f
+      {Intrinsic::nvvm_cos_approx_ftz_f, 73277}, // __nvvm_cos_approx_ftz_f
+      {Intrinsic::nvvm_d2f_rm, 73301}, // __nvvm_d2f_rm
+      {Intrinsic::nvvm_d2f_rm_ftz, 73315}, // __nvvm_d2f_rm_ftz
+      {Intrinsic::nvvm_d2f_rn, 73333}, // __nvvm_d2f_rn
+      {Intrinsic::nvvm_d2f_rn_ftz, 73347}, // __nvvm_d2f_rn_ftz
+      {Intrinsic::nvvm_d2f_rp, 73365}, // __nvvm_d2f_rp
+      {Intrinsic::nvvm_d2f_rp_ftz, 73379}, // __nvvm_d2f_rp_ftz
+      {Intrinsic::nvvm_d2f_rz, 73397}, // __nvvm_d2f_rz
+      {Intrinsic::nvvm_d2f_rz_ftz, 73411}, // __nvvm_d2f_rz_ftz
+      {Intrinsic::nvvm_d2i_hi, 73429}, // __nvvm_d2i_hi
+      {Intrinsic::nvvm_d2i_lo, 73443}, // __nvvm_d2i_lo
+      {Intrinsic::nvvm_d2i_rm, 73457}, // __nvvm_d2i_rm
+      {Intrinsic::nvvm_d2i_rn, 73471}, // __nvvm_d2i_rn
+      {Intrinsic::nvvm_d2i_rp, 73485}, // __nvvm_d2i_rp
+      {Intrinsic::nvvm_d2i_rz, 73499}, // __nvvm_d2i_rz
+      {Intrinsic::nvvm_d2ll_rm, 73513}, // __nvvm_d2ll_rm
+      {Intrinsic::nvvm_d2ll_rn, 73528}, // __nvvm_d2ll_rn
+      {Intrinsic::nvvm_d2ll_rp, 73543}, // __nvvm_d2ll_rp
+      {Intrinsic::nvvm_d2ll_rz, 73558}, // __nvvm_d2ll_rz
+      {Intrinsic::nvvm_d2ui_rm, 73573}, // __nvvm_d2ui_rm
+      {Intrinsic::nvvm_d2ui_rn, 73588}, // __nvvm_d2ui_rn
+      {Intrinsic::nvvm_d2ui_rp, 73603}, // __nvvm_d2ui_rp
+      {Intrinsic::nvvm_d2ui_rz, 73618}, // __nvvm_d2ui_rz
+      {Intrinsic::nvvm_d2ull_rm, 73633}, // __nvvm_d2ull_rm
+      {Intrinsic::nvvm_d2ull_rn, 73649}, // __nvvm_d2ull_rn
+      {Intrinsic::nvvm_d2ull_rp, 73665}, // __nvvm_d2ull_rp
+      {Intrinsic::nvvm_d2ull_rz, 73681}, // __nvvm_d2ull_rz
+      {Intrinsic::nvvm_div_approx_f, 73697}, // __nvvm_div_approx_f
+      {Intrinsic::nvvm_div_approx_ftz_f, 73717}, // __nvvm_div_approx_ftz_f
+      {Intrinsic::nvvm_div_rm_d, 73741}, // __nvvm_div_rm_d
+      {Intrinsic::nvvm_div_rm_f, 73757}, // __nvvm_div_rm_f
+      {Intrinsic::nvvm_div_rm_ftz_f, 73773}, // __nvvm_div_rm_ftz_f
+      {Intrinsic::nvvm_div_rn_d, 73793}, // __nvvm_div_rn_d
+      {Intrinsic::nvvm_div_rn_f, 73809}, // __nvvm_div_rn_f
+      {Intrinsic::nvvm_div_rn_ftz_f, 73825}, // __nvvm_div_rn_ftz_f
+      {Intrinsic::nvvm_div_rp_d, 73845}, // __nvvm_div_rp_d
+      {Intrinsic::nvvm_div_rp_f, 73861}, // __nvvm_div_rp_f
+      {Intrinsic::nvvm_div_rp_ftz_f, 73877}, // __nvvm_div_rp_ftz_f
+      {Intrinsic::nvvm_div_rz_d, 73897}, // __nvvm_div_rz_d
+      {Intrinsic::nvvm_div_rz_f, 73913}, // __nvvm_div_rz_f
+      {Intrinsic::nvvm_div_rz_ftz_f, 73929}, // __nvvm_div_rz_ftz_f
+      {Intrinsic::nvvm_ex2_approx_d, 73949}, // __nvvm_ex2_approx_d
+      {Intrinsic::nvvm_ex2_approx_f, 73969}, // __nvvm_ex2_approx_f
+      {Intrinsic::nvvm_ex2_approx_ftz_f, 73989}, // __nvvm_ex2_approx_ftz_f
+      {Intrinsic::nvvm_f2h_rn, 74013}, // __nvvm_f2h_rn
+      {Intrinsic::nvvm_f2h_rn_ftz, 74027}, // __nvvm_f2h_rn_ftz
+      {Intrinsic::nvvm_f2i_rm, 74045}, // __nvvm_f2i_rm
+      {Intrinsic::nvvm_f2i_rm_ftz, 74059}, // __nvvm_f2i_rm_ftz
+      {Intrinsic::nvvm_f2i_rn, 74077}, // __nvvm_f2i_rn
+      {Intrinsic::nvvm_f2i_rn_ftz, 74091}, // __nvvm_f2i_rn_ftz
+      {Intrinsic::nvvm_f2i_rp, 74109}, // __nvvm_f2i_rp
+      {Intrinsic::nvvm_f2i_rp_ftz, 74123}, // __nvvm_f2i_rp_ftz
+      {Intrinsic::nvvm_f2i_rz, 74141}, // __nvvm_f2i_rz
+      {Intrinsic::nvvm_f2i_rz_ftz, 74155}, // __nvvm_f2i_rz_ftz
+      {Intrinsic::nvvm_f2ll_rm, 74173}, // __nvvm_f2ll_rm
+      {Intrinsic::nvvm_f2ll_rm_ftz, 74188}, // __nvvm_f2ll_rm_ftz
+      {Intrinsic::nvvm_f2ll_rn, 74207}, // __nvvm_f2ll_rn
+      {Intrinsic::nvvm_f2ll_rn_ftz, 74222}, // __nvvm_f2ll_rn_ftz
+      {Intrinsic::nvvm_f2ll_rp, 74241}, // __nvvm_f2ll_rp
+      {Intrinsic::nvvm_f2ll_rp_ftz, 74256}, // __nvvm_f2ll_rp_ftz
+      {Intrinsic::nvvm_f2ll_rz, 74275}, // __nvvm_f2ll_rz
+      {Intrinsic::nvvm_f2ll_rz_ftz, 74290}, // __nvvm_f2ll_rz_ftz
+      {Intrinsic::nvvm_f2ui_rm, 74309}, // __nvvm_f2ui_rm
+      {Intrinsic::nvvm_f2ui_rm_ftz, 74324}, // __nvvm_f2ui_rm_ftz
+      {Intrinsic::nvvm_f2ui_rn, 74343}, // __nvvm_f2ui_rn
+      {Intrinsic::nvvm_f2ui_rn_ftz, 74358}, // __nvvm_f2ui_rn_ftz
+      {Intrinsic::nvvm_f2ui_rp, 74377}, // __nvvm_f2ui_rp
+      {Intrinsic::nvvm_f2ui_rp_ftz, 74392}, // __nvvm_f2ui_rp_ftz
+      {Intrinsic::nvvm_f2ui_rz, 74411}, // __nvvm_f2ui_rz
+      {Intrinsic::nvvm_f2ui_rz_ftz, 74426}, // __nvvm_f2ui_rz_ftz
+      {Intrinsic::nvvm_f2ull_rm, 74445}, // __nvvm_f2ull_rm
+      {Intrinsic::nvvm_f2ull_rm_ftz, 74461}, // __nvvm_f2ull_rm_ftz
+      {Intrinsic::nvvm_f2ull_rn, 74481}, // __nvvm_f2ull_rn
+      {Intrinsic::nvvm_f2ull_rn_ftz, 74497}, // __nvvm_f2ull_rn_ftz
+      {Intrinsic::nvvm_f2ull_rp, 74517}, // __nvvm_f2ull_rp
+      {Intrinsic::nvvm_f2ull_rp_ftz, 74533}, // __nvvm_f2ull_rp_ftz
+      {Intrinsic::nvvm_f2ull_rz, 74553}, // __nvvm_f2ull_rz
+      {Intrinsic::nvvm_f2ull_rz_ftz, 74569}, // __nvvm_f2ull_rz_ftz
+      {Intrinsic::nvvm_fabs_d, 74589}, // __nvvm_fabs_d
+      {Intrinsic::nvvm_fabs_f, 74603}, // __nvvm_fabs_f
+      {Intrinsic::nvvm_fabs_ftz_f, 74617}, // __nvvm_fabs_ftz_f
+      {Intrinsic::nvvm_floor_d, 74635}, // __nvvm_floor_d
+      {Intrinsic::nvvm_floor_f, 74650}, // __nvvm_floor_f
+      {Intrinsic::nvvm_floor_ftz_f, 74665}, // __nvvm_floor_ftz_f
+      {Intrinsic::nvvm_fma_rm_d, 74684}, // __nvvm_fma_rm_d
+      {Intrinsic::nvvm_fma_rm_f, 74700}, // __nvvm_fma_rm_f
+      {Intrinsic::nvvm_fma_rm_ftz_f, 74716}, // __nvvm_fma_rm_ftz_f
+      {Intrinsic::nvvm_fma_rn_d, 74736}, // __nvvm_fma_rn_d
+      {Intrinsic::nvvm_fma_rn_f, 74752}, // __nvvm_fma_rn_f
+      {Intrinsic::nvvm_fma_rn_ftz_f, 74768}, // __nvvm_fma_rn_ftz_f
+      {Intrinsic::nvvm_fma_rp_d, 74788}, // __nvvm_fma_rp_d
+      {Intrinsic::nvvm_fma_rp_f, 74804}, // __nvvm_fma_rp_f
+      {Intrinsic::nvvm_fma_rp_ftz_f, 74820}, // __nvvm_fma_rp_ftz_f
+      {Intrinsic::nvvm_fma_rz_d, 74840}, // __nvvm_fma_rz_d
+      {Intrinsic::nvvm_fma_rz_f, 74856}, // __nvvm_fma_rz_f
+      {Intrinsic::nvvm_fma_rz_ftz_f, 74872}, // __nvvm_fma_rz_ftz_f
+      {Intrinsic::nvvm_fmax_d, 74892}, // __nvvm_fmax_d
+      {Intrinsic::nvvm_fmax_f, 74906}, // __nvvm_fmax_f
+      {Intrinsic::nvvm_fmax_ftz_f, 74920}, // __nvvm_fmax_ftz_f
+      {Intrinsic::nvvm_fmin_d, 74938}, // __nvvm_fmin_d
+      {Intrinsic::nvvm_fmin_f, 74952}, // __nvvm_fmin_f
+      {Intrinsic::nvvm_fmin_ftz_f, 74966}, // __nvvm_fmin_ftz_f
+      {Intrinsic::nvvm_fns, 74984}, // __nvvm_fns
+      {Intrinsic::nvvm_i2d_rm, 74995}, // __nvvm_i2d_rm
+      {Intrinsic::nvvm_i2d_rn, 75009}, // __nvvm_i2d_rn
+      {Intrinsic::nvvm_i2d_rp, 75023}, // __nvvm_i2d_rp
+      {Intrinsic::nvvm_i2d_rz, 75037}, // __nvvm_i2d_rz
+      {Intrinsic::nvvm_i2f_rm, 75051}, // __nvvm_i2f_rm
+      {Intrinsic::nvvm_i2f_rn, 75065}, // __nvvm_i2f_rn
+      {Intrinsic::nvvm_i2f_rp, 75079}, // __nvvm_i2f_rp
+      {Intrinsic::nvvm_i2f_rz, 75093}, // __nvvm_i2f_rz
+      {Intrinsic::nvvm_isspacep_const, 75107}, // __nvvm_isspacep_const
+      {Intrinsic::nvvm_isspacep_global, 75129}, // __nvvm_isspacep_global
+      {Intrinsic::nvvm_isspacep_local, 75152}, // __nvvm_isspacep_local
+      {Intrinsic::nvvm_isspacep_shared, 75174}, // __nvvm_isspacep_shared
+      {Intrinsic::nvvm_istypep_sampler, 75197}, // __nvvm_istypep_sampler
+      {Intrinsic::nvvm_istypep_surface, 75220}, // __nvvm_istypep_surface
+      {Intrinsic::nvvm_istypep_texture, 75243}, // __nvvm_istypep_texture
+      {Intrinsic::nvvm_lg2_approx_d, 75266}, // __nvvm_lg2_approx_d
+      {Intrinsic::nvvm_lg2_approx_f, 75286}, // __nvvm_lg2_approx_f
+      {Intrinsic::nvvm_lg2_approx_ftz_f, 75306}, // __nvvm_lg2_approx_ftz_f
+      {Intrinsic::nvvm_ll2d_rm, 75330}, // __nvvm_ll2d_rm
+      {Intrinsic::nvvm_ll2d_rn, 75345}, // __nvvm_ll2d_rn
+      {Intrinsic::nvvm_ll2d_rp, 75360}, // __nvvm_ll2d_rp
+      {Intrinsic::nvvm_ll2d_rz, 75375}, // __nvvm_ll2d_rz
+      {Intrinsic::nvvm_ll2f_rm, 75390}, // __nvvm_ll2f_rm
+      {Intrinsic::nvvm_ll2f_rn, 75405}, // __nvvm_ll2f_rn
+      {Intrinsic::nvvm_ll2f_rp, 75420}, // __nvvm_ll2f_rp
+      {Intrinsic::nvvm_ll2f_rz, 75435}, // __nvvm_ll2f_rz
+      {Intrinsic::nvvm_lohi_i2d, 75450}, // __nvvm_lohi_i2d
+      {Intrinsic::nvvm_match_any_sync_i32, 75466}, // __nvvm_match_any_sync_i32
+      {Intrinsic::nvvm_match_any_sync_i64, 75492}, // __nvvm_match_any_sync_i64
+      {Intrinsic::nvvm_membar_cta, 75518}, // __nvvm_membar_cta
+      {Intrinsic::nvvm_membar_gl, 75536}, // __nvvm_membar_gl
+      {Intrinsic::nvvm_membar_sys, 75553}, // __nvvm_membar_sys
+      {Intrinsic::nvvm_mul24_i, 75779}, // __nvvm_mul24_i
+      {Intrinsic::nvvm_mul24_ui, 75794}, // __nvvm_mul24_ui
+      {Intrinsic::nvvm_mul_rm_d, 75571}, // __nvvm_mul_rm_d
+      {Intrinsic::nvvm_mul_rm_f, 75587}, // __nvvm_mul_rm_f
+      {Intrinsic::nvvm_mul_rm_ftz_f, 75603}, // __nvvm_mul_rm_ftz_f
+      {Intrinsic::nvvm_mul_rn_d, 75623}, // __nvvm_mul_rn_d
+      {Intrinsic::nvvm_mul_rn_f, 75639}, // __nvvm_mul_rn_f
+      {Intrinsic::nvvm_mul_rn_ftz_f, 75655}, // __nvvm_mul_rn_ftz_f
+      {Intrinsic::nvvm_mul_rp_d, 75675}, // __nvvm_mul_rp_d
+      {Intrinsic::nvvm_mul_rp_f, 75691}, // __nvvm_mul_rp_f
+      {Intrinsic::nvvm_mul_rp_ftz_f, 75707}, // __nvvm_mul_rp_ftz_f
+      {Intrinsic::nvvm_mul_rz_d, 75727}, // __nvvm_mul_rz_d
+      {Intrinsic::nvvm_mul_rz_f, 75743}, // __nvvm_mul_rz_f
+      {Intrinsic::nvvm_mul_rz_ftz_f, 75759}, // __nvvm_mul_rz_ftz_f
+      {Intrinsic::nvvm_mulhi_i, 75810}, // __nvvm_mulhi_i
+      {Intrinsic::nvvm_mulhi_ll, 75825}, // __nvvm_mulhi_ll
+      {Intrinsic::nvvm_mulhi_ui, 75841}, // __nvvm_mulhi_ui
+      {Intrinsic::nvvm_mulhi_ull, 75857}, // __nvvm_mulhi_ull
+      {Intrinsic::nvvm_prmt, 75874}, // __nvvm_prmt
+      {Intrinsic::nvvm_rcp_approx_ftz_d, 75886}, // __nvvm_rcp_approx_ftz_d
+      {Intrinsic::nvvm_rcp_rm_d, 75910}, // __nvvm_rcp_rm_d
+      {Intrinsic::nvvm_rcp_rm_f, 75926}, // __nvvm_rcp_rm_f
+      {Intrinsic::nvvm_rcp_rm_ftz_f, 75942}, // __nvvm_rcp_rm_ftz_f
+      {Intrinsic::nvvm_rcp_rn_d, 75962}, // __nvvm_rcp_rn_d
+      {Intrinsic::nvvm_rcp_rn_f, 75978}, // __nvvm_rcp_rn_f
+      {Intrinsic::nvvm_rcp_rn_ftz_f, 75994}, // __nvvm_rcp_rn_ftz_f
+      {Intrinsic::nvvm_rcp_rp_d, 76014}, // __nvvm_rcp_rp_d
+      {Intrinsic::nvvm_rcp_rp_f, 76030}, // __nvvm_rcp_rp_f
+      {Intrinsic::nvvm_rcp_rp_ftz_f, 76046}, // __nvvm_rcp_rp_ftz_f
+      {Intrinsic::nvvm_rcp_rz_d, 76066}, // __nvvm_rcp_rz_d
+      {Intrinsic::nvvm_rcp_rz_f, 76082}, // __nvvm_rcp_rz_f
+      {Intrinsic::nvvm_rcp_rz_ftz_f, 76098}, // __nvvm_rcp_rz_ftz_f
+      {Intrinsic::nvvm_read_ptx_sreg_clock, 76118}, // __nvvm_read_ptx_sreg_clock
+      {Intrinsic::nvvm_read_ptx_sreg_clock64, 76145}, // __nvvm_read_ptx_sreg_clock64
+      {Intrinsic::nvvm_read_ptx_sreg_ctaid_w, 76174}, // __nvvm_read_ptx_sreg_ctaid_w
+      {Intrinsic::nvvm_read_ptx_sreg_ctaid_x, 76203}, // __nvvm_read_ptx_sreg_ctaid_x
+      {Intrinsic::nvvm_read_ptx_sreg_ctaid_y, 76232}, // __nvvm_read_ptx_sreg_ctaid_y
+      {Intrinsic::nvvm_read_ptx_sreg_ctaid_z, 76261}, // __nvvm_read_ptx_sreg_ctaid_z
+      {Intrinsic::nvvm_read_ptx_sreg_envreg0, 76290}, // __nvvm_read_ptx_sreg_envreg0
+      {Intrinsic::nvvm_read_ptx_sreg_envreg1, 76319}, // __nvvm_read_ptx_sreg_envreg1
+      {Intrinsic::nvvm_read_ptx_sreg_envreg10, 76348}, // __nvvm_read_ptx_sreg_envreg10
+      {Intrinsic::nvvm_read_ptx_sreg_envreg11, 76378}, // __nvvm_read_ptx_sreg_envreg11
+      {Intrinsic::nvvm_read_ptx_sreg_envreg12, 76408}, // __nvvm_read_ptx_sreg_envreg12
+      {Intrinsic::nvvm_read_ptx_sreg_envreg13, 76438}, // __nvvm_read_ptx_sreg_envreg13
+      {Intrinsic::nvvm_read_ptx_sreg_envreg14, 76468}, // __nvvm_read_ptx_sreg_envreg14
+      {Intrinsic::nvvm_read_ptx_sreg_envreg15, 76498}, // __nvvm_read_ptx_sreg_envreg15
+      {Intrinsic::nvvm_read_ptx_sreg_envreg16, 76528}, // __nvvm_read_ptx_sreg_envreg16
+      {Intrinsic::nvvm_read_ptx_sreg_envreg17, 76558}, // __nvvm_read_ptx_sreg_envreg17
+      {Intrinsic::nvvm_read_ptx_sreg_envreg18, 76588}, // __nvvm_read_ptx_sreg_envreg18
+      {Intrinsic::nvvm_read_ptx_sreg_envreg19, 76618}, // __nvvm_read_ptx_sreg_envreg19
+      {Intrinsic::nvvm_read_ptx_sreg_envreg2, 76648}, // __nvvm_read_ptx_sreg_envreg2
+      {Intrinsic::nvvm_read_ptx_sreg_envreg20, 76677}, // __nvvm_read_ptx_sreg_envreg20
+      {Intrinsic::nvvm_read_ptx_sreg_envreg21, 76707}, // __nvvm_read_ptx_sreg_envreg21
+      {Intrinsic::nvvm_read_ptx_sreg_envreg22, 76737}, // __nvvm_read_ptx_sreg_envreg22
+      {Intrinsic::nvvm_read_ptx_sreg_envreg23, 76767}, // __nvvm_read_ptx_sreg_envreg23
+      {Intrinsic::nvvm_read_ptx_sreg_envreg24, 76797}, // __nvvm_read_ptx_sreg_envreg24
+      {Intrinsic::nvvm_read_ptx_sreg_envreg25, 76827}, // __nvvm_read_ptx_sreg_envreg25
+      {Intrinsic::nvvm_read_ptx_sreg_envreg26, 76857}, // __nvvm_read_ptx_sreg_envreg26
+      {Intrinsic::nvvm_read_ptx_sreg_envreg27, 76887}, // __nvvm_read_ptx_sreg_envreg27
+      {Intrinsic::nvvm_read_ptx_sreg_envreg28, 76917}, // __nvvm_read_ptx_sreg_envreg28
+      {Intrinsic::nvvm_read_ptx_sreg_envreg29, 76947}, // __nvvm_read_ptx_sreg_envreg29
+      {Intrinsic::nvvm_read_ptx_sreg_envreg3, 76977}, // __nvvm_read_ptx_sreg_envreg3
+      {Intrinsic::nvvm_read_ptx_sreg_envreg30, 77006}, // __nvvm_read_ptx_sreg_envreg30
+      {Intrinsic::nvvm_read_ptx_sreg_envreg31, 77036}, // __nvvm_read_ptx_sreg_envreg31
+      {Intrinsic::nvvm_read_ptx_sreg_envreg4, 77066}, // __nvvm_read_ptx_sreg_envreg4
+      {Intrinsic::nvvm_read_ptx_sreg_envreg5, 77095}, // __nvvm_read_ptx_sreg_envreg5
+      {Intrinsic::nvvm_read_ptx_sreg_envreg6, 77124}, // __nvvm_read_ptx_sreg_envreg6
+      {Intrinsic::nvvm_read_ptx_sreg_envreg7, 77153}, // __nvvm_read_ptx_sreg_envreg7
+      {Intrinsic::nvvm_read_ptx_sreg_envreg8, 77182}, // __nvvm_read_ptx_sreg_envreg8
+      {Intrinsic::nvvm_read_ptx_sreg_envreg9, 77211}, // __nvvm_read_ptx_sreg_envreg9
+      {Intrinsic::nvvm_read_ptx_sreg_gridid, 77240}, // __nvvm_read_ptx_sreg_gridid
+      {Intrinsic::nvvm_read_ptx_sreg_laneid, 77268}, // __nvvm_read_ptx_sreg_laneid
+      {Intrinsic::nvvm_read_ptx_sreg_lanemask_eq, 77296}, // __nvvm_read_ptx_sreg_lanemask_eq
+      {Intrinsic::nvvm_read_ptx_sreg_lanemask_ge, 77329}, // __nvvm_read_ptx_sreg_lanemask_ge
+      {Intrinsic::nvvm_read_ptx_sreg_lanemask_gt, 77362}, // __nvvm_read_ptx_sreg_lanemask_gt
+      {Intrinsic::nvvm_read_ptx_sreg_lanemask_le, 77395}, // __nvvm_read_ptx_sreg_lanemask_le
+      {Intrinsic::nvvm_read_ptx_sreg_lanemask_lt, 77428}, // __nvvm_read_ptx_sreg_lanemask_lt
+      {Intrinsic::nvvm_read_ptx_sreg_nctaid_w, 77461}, // __nvvm_read_ptx_sreg_nctaid_w
+      {Intrinsic::nvvm_read_ptx_sreg_nctaid_x, 77491}, // __nvvm_read_ptx_sreg_nctaid_x
+      {Intrinsic::nvvm_read_ptx_sreg_nctaid_y, 77521}, // __nvvm_read_ptx_sreg_nctaid_y
+      {Intrinsic::nvvm_read_ptx_sreg_nctaid_z, 77551}, // __nvvm_read_ptx_sreg_nctaid_z
+      {Intrinsic::nvvm_read_ptx_sreg_nsmid, 77581}, // __nvvm_read_ptx_sreg_nsmid
+      {Intrinsic::nvvm_read_ptx_sreg_ntid_w, 77608}, // __nvvm_read_ptx_sreg_ntid_w
+      {Intrinsic::nvvm_read_ptx_sreg_ntid_x, 77636}, // __nvvm_read_ptx_sreg_ntid_x
+      {Intrinsic::nvvm_read_ptx_sreg_ntid_y, 77664}, // __nvvm_read_ptx_sreg_ntid_y
+      {Intrinsic::nvvm_read_ptx_sreg_ntid_z, 77692}, // __nvvm_read_ptx_sreg_ntid_z
+      {Intrinsic::nvvm_read_ptx_sreg_nwarpid, 77720}, // __nvvm_read_ptx_sreg_nwarpid
+      {Intrinsic::nvvm_read_ptx_sreg_pm0, 77749}, // __nvvm_read_ptx_sreg_pm0
+      {Intrinsic::nvvm_read_ptx_sreg_pm1, 77774}, // __nvvm_read_ptx_sreg_pm1
+      {Intrinsic::nvvm_read_ptx_sreg_pm2, 77799}, // __nvvm_read_ptx_sreg_pm2
+      {Intrinsic::nvvm_read_ptx_sreg_pm3, 77824}, // __nvvm_read_ptx_sreg_pm3
+      {Intrinsic::nvvm_read_ptx_sreg_smid, 77849}, // __nvvm_read_ptx_sreg_smid
+      {Intrinsic::nvvm_read_ptx_sreg_tid_w, 77875}, // __nvvm_read_ptx_sreg_tid_w
+      {Intrinsic::nvvm_read_ptx_sreg_tid_x, 77902}, // __nvvm_read_ptx_sreg_tid_x
+      {Intrinsic::nvvm_read_ptx_sreg_tid_y, 77929}, // __nvvm_read_ptx_sreg_tid_y
+      {Intrinsic::nvvm_read_ptx_sreg_tid_z, 77956}, // __nvvm_read_ptx_sreg_tid_z
+      {Intrinsic::nvvm_read_ptx_sreg_warpid, 77983}, // __nvvm_read_ptx_sreg_warpid
+      {Intrinsic::nvvm_read_ptx_sreg_warpsize, 78011}, // __nvvm_read_ptx_sreg_warpsize
+      {Intrinsic::nvvm_rotate_b32, 78041}, // __nvvm_rotate_b32
+      {Intrinsic::nvvm_rotate_b64, 78059}, // __nvvm_rotate_b64
+      {Intrinsic::nvvm_rotate_right_b64, 78077}, // __nvvm_rotate_right_b64
+      {Intrinsic::nvvm_round_d, 78101}, // __nvvm_round_d
+      {Intrinsic::nvvm_round_f, 78116}, // __nvvm_round_f
+      {Intrinsic::nvvm_round_ftz_f, 78131}, // __nvvm_round_ftz_f
+      {Intrinsic::nvvm_rsqrt_approx_d, 78150}, // __nvvm_rsqrt_approx_d
+      {Intrinsic::nvvm_rsqrt_approx_f, 78172}, // __nvvm_rsqrt_approx_f
+      {Intrinsic::nvvm_rsqrt_approx_ftz_f, 78194}, // __nvvm_rsqrt_approx_ftz_f
+      {Intrinsic::nvvm_sad_i, 78220}, // __nvvm_sad_i
+      {Intrinsic::nvvm_sad_ui, 78233}, // __nvvm_sad_ui
+      {Intrinsic::nvvm_saturate_d, 78247}, // __nvvm_saturate_d
+      {Intrinsic::nvvm_saturate_f, 78265}, // __nvvm_saturate_f
+      {Intrinsic::nvvm_saturate_ftz_f, 78283}, // __nvvm_saturate_ftz_f
+      {Intrinsic::nvvm_shfl_bfly_f32, 78305}, // __nvvm_shfl_bfly_f32
+      {Intrinsic::nvvm_shfl_bfly_i32, 78326}, // __nvvm_shfl_bfly_i32
+      {Intrinsic::nvvm_shfl_down_f32, 78347}, // __nvvm_shfl_down_f32
+      {Intrinsic::nvvm_shfl_down_i32, 78368}, // __nvvm_shfl_down_i32
+      {Intrinsic::nvvm_shfl_idx_f32, 78389}, // __nvvm_shfl_idx_f32
+      {Intrinsic::nvvm_shfl_idx_i32, 78409}, // __nvvm_shfl_idx_i32
+      {Intrinsic::nvvm_shfl_sync_bfly_f32, 78429}, // __nvvm_shfl_sync_bfly_f32
+      {Intrinsic::nvvm_shfl_sync_bfly_i32, 78455}, // __nvvm_shfl_sync_bfly_i32
+      {Intrinsic::nvvm_shfl_sync_down_f32, 78481}, // __nvvm_shfl_sync_down_f32
+      {Intrinsic::nvvm_shfl_sync_down_i32, 78507}, // __nvvm_shfl_sync_down_i32
+      {Intrinsic::nvvm_shfl_sync_idx_f32, 78533}, // __nvvm_shfl_sync_idx_f32
+      {Intrinsic::nvvm_shfl_sync_idx_i32, 78558}, // __nvvm_shfl_sync_idx_i32
+      {Intrinsic::nvvm_shfl_sync_up_f32, 78583}, // __nvvm_shfl_sync_up_f32
+      {Intrinsic::nvvm_shfl_sync_up_i32, 78607}, // __nvvm_shfl_sync_up_i32
+      {Intrinsic::nvvm_shfl_up_f32, 78631}, // __nvvm_shfl_up_f32
+      {Intrinsic::nvvm_shfl_up_i32, 78650}, // __nvvm_shfl_up_i32
+      {Intrinsic::nvvm_sin_approx_f, 78669}, // __nvvm_sin_approx_f
+      {Intrinsic::nvvm_sin_approx_ftz_f, 78689}, // __nvvm_sin_approx_ftz_f
+      {Intrinsic::nvvm_sqrt_approx_f, 78713}, // __nvvm_sqrt_approx_f
+      {Intrinsic::nvvm_sqrt_approx_ftz_f, 78734}, // __nvvm_sqrt_approx_ftz_f
+      {Intrinsic::nvvm_sqrt_f, 78759}, // __nvvm_sqrt_f
+      {Intrinsic::nvvm_sqrt_rm_d, 78773}, // __nvvm_sqrt_rm_d
+      {Intrinsic::nvvm_sqrt_rm_f, 78790}, // __nvvm_sqrt_rm_f
+      {Intrinsic::nvvm_sqrt_rm_ftz_f, 78807}, // __nvvm_sqrt_rm_ftz_f
+      {Intrinsic::nvvm_sqrt_rn_d, 78828}, // __nvvm_sqrt_rn_d
+      {Intrinsic::nvvm_sqrt_rn_f, 78845}, // __nvvm_sqrt_rn_f
+      {Intrinsic::nvvm_sqrt_rn_ftz_f, 78862}, // __nvvm_sqrt_rn_ftz_f
+      {Intrinsic::nvvm_sqrt_rp_d, 78883}, // __nvvm_sqrt_rp_d
+      {Intrinsic::nvvm_sqrt_rp_f, 78900}, // __nvvm_sqrt_rp_f
+      {Intrinsic::nvvm_sqrt_rp_ftz_f, 78917}, // __nvvm_sqrt_rp_ftz_f
+      {Intrinsic::nvvm_sqrt_rz_d, 78938}, // __nvvm_sqrt_rz_d
+      {Intrinsic::nvvm_sqrt_rz_f, 78955}, // __nvvm_sqrt_rz_f
+      {Intrinsic::nvvm_sqrt_rz_ftz_f, 78972}, // __nvvm_sqrt_rz_ftz_f
+      {Intrinsic::nvvm_suq_array_size, 78993}, // __nvvm_suq_array_size
+      {Intrinsic::nvvm_suq_channel_data_type, 79015}, // __nvvm_suq_channel_data_type
+      {Intrinsic::nvvm_suq_channel_order, 79044}, // __nvvm_suq_channel_order
+      {Intrinsic::nvvm_suq_depth, 79069}, // __nvvm_suq_depth
+      {Intrinsic::nvvm_suq_height, 79086}, // __nvvm_suq_height
+      {Intrinsic::nvvm_suq_width, 79104}, // __nvvm_suq_width
+      {Intrinsic::nvvm_sust_b_1d_array_i16_clamp, 79121}, // __nvvm_sust_b_1d_array_i16_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_i16_trap, 79154}, // __nvvm_sust_b_1d_array_i16_trap
+      {Intrinsic::nvvm_sust_b_1d_array_i16_zero, 79186}, // __nvvm_sust_b_1d_array_i16_zero
+      {Intrinsic::nvvm_sust_b_1d_array_i32_clamp, 79218}, // __nvvm_sust_b_1d_array_i32_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_i32_trap, 79251}, // __nvvm_sust_b_1d_array_i32_trap
+      {Intrinsic::nvvm_sust_b_1d_array_i32_zero, 79283}, // __nvvm_sust_b_1d_array_i32_zero
+      {Intrinsic::nvvm_sust_b_1d_array_i64_clamp, 79315}, // __nvvm_sust_b_1d_array_i64_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_i64_trap, 79348}, // __nvvm_sust_b_1d_array_i64_trap
+      {Intrinsic::nvvm_sust_b_1d_array_i64_zero, 79380}, // __nvvm_sust_b_1d_array_i64_zero
+      {Intrinsic::nvvm_sust_b_1d_array_i8_clamp, 79412}, // __nvvm_sust_b_1d_array_i8_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_i8_trap, 79444}, // __nvvm_sust_b_1d_array_i8_trap
+      {Intrinsic::nvvm_sust_b_1d_array_i8_zero, 79475}, // __nvvm_sust_b_1d_array_i8_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v2i16_clamp, 79506}, // __nvvm_sust_b_1d_array_v2i16_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v2i16_trap, 79541}, // __nvvm_sust_b_1d_array_v2i16_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v2i16_zero, 79575}, // __nvvm_sust_b_1d_array_v2i16_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v2i32_clamp, 79609}, // __nvvm_sust_b_1d_array_v2i32_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v2i32_trap, 79644}, // __nvvm_sust_b_1d_array_v2i32_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v2i32_zero, 79678}, // __nvvm_sust_b_1d_array_v2i32_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v2i64_clamp, 79712}, // __nvvm_sust_b_1d_array_v2i64_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v2i64_trap, 79747}, // __nvvm_sust_b_1d_array_v2i64_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v2i64_zero, 79781}, // __nvvm_sust_b_1d_array_v2i64_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v2i8_clamp, 79815}, // __nvvm_sust_b_1d_array_v2i8_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v2i8_trap, 79849}, // __nvvm_sust_b_1d_array_v2i8_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v2i8_zero, 79882}, // __nvvm_sust_b_1d_array_v2i8_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v4i16_clamp, 79915}, // __nvvm_sust_b_1d_array_v4i16_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v4i16_trap, 79950}, // __nvvm_sust_b_1d_array_v4i16_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v4i16_zero, 79984}, // __nvvm_sust_b_1d_array_v4i16_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v4i32_clamp, 80018}, // __nvvm_sust_b_1d_array_v4i32_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v4i32_trap, 80053}, // __nvvm_sust_b_1d_array_v4i32_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v4i32_zero, 80087}, // __nvvm_sust_b_1d_array_v4i32_zero
+      {Intrinsic::nvvm_sust_b_1d_array_v4i8_clamp, 80121}, // __nvvm_sust_b_1d_array_v4i8_clamp
+      {Intrinsic::nvvm_sust_b_1d_array_v4i8_trap, 80155}, // __nvvm_sust_b_1d_array_v4i8_trap
+      {Intrinsic::nvvm_sust_b_1d_array_v4i8_zero, 80188}, // __nvvm_sust_b_1d_array_v4i8_zero
+      {Intrinsic::nvvm_sust_b_1d_i16_clamp, 80221}, // __nvvm_sust_b_1d_i16_clamp
+      {Intrinsic::nvvm_sust_b_1d_i16_trap, 80248}, // __nvvm_sust_b_1d_i16_trap
+      {Intrinsic::nvvm_sust_b_1d_i16_zero, 80274}, // __nvvm_sust_b_1d_i16_zero
+      {Intrinsic::nvvm_sust_b_1d_i32_clamp, 80300}, // __nvvm_sust_b_1d_i32_clamp
+      {Intrinsic::nvvm_sust_b_1d_i32_trap, 80327}, // __nvvm_sust_b_1d_i32_trap
+      {Intrinsic::nvvm_sust_b_1d_i32_zero, 80353}, // __nvvm_sust_b_1d_i32_zero
+      {Intrinsic::nvvm_sust_b_1d_i64_clamp, 80379}, // __nvvm_sust_b_1d_i64_clamp
+      {Intrinsic::nvvm_sust_b_1d_i64_trap, 80406}, // __nvvm_sust_b_1d_i64_trap
+      {Intrinsic::nvvm_sust_b_1d_i64_zero, 80432}, // __nvvm_sust_b_1d_i64_zero
+      {Intrinsic::nvvm_sust_b_1d_i8_clamp, 80458}, // __nvvm_sust_b_1d_i8_clamp
+      {Intrinsic::nvvm_sust_b_1d_i8_trap, 80484}, // __nvvm_sust_b_1d_i8_trap
+      {Intrinsic::nvvm_sust_b_1d_i8_zero, 80509}, // __nvvm_sust_b_1d_i8_zero
+      {Intrinsic::nvvm_sust_b_1d_v2i16_clamp, 80534}, // __nvvm_sust_b_1d_v2i16_clamp
+      {Intrinsic::nvvm_sust_b_1d_v2i16_trap, 80563}, // __nvvm_sust_b_1d_v2i16_trap
+      {Intrinsic::nvvm_sust_b_1d_v2i16_zero, 80591}, // __nvvm_sust_b_1d_v2i16_zero
+      {Intrinsic::nvvm_sust_b_1d_v2i32_clamp, 80619}, // __nvvm_sust_b_1d_v2i32_clamp
+      {Intrinsic::nvvm_sust_b_1d_v2i32_trap, 80648}, // __nvvm_sust_b_1d_v2i32_trap
+      {Intrinsic::nvvm_sust_b_1d_v2i32_zero, 80676}, // __nvvm_sust_b_1d_v2i32_zero
+      {Intrinsic::nvvm_sust_b_1d_v2i64_clamp, 80704}, // __nvvm_sust_b_1d_v2i64_clamp
+      {Intrinsic::nvvm_sust_b_1d_v2i64_trap, 80733}, // __nvvm_sust_b_1d_v2i64_trap
+      {Intrinsic::nvvm_sust_b_1d_v2i64_zero, 80761}, // __nvvm_sust_b_1d_v2i64_zero
+      {Intrinsic::nvvm_sust_b_1d_v2i8_clamp, 80789}, // __nvvm_sust_b_1d_v2i8_clamp
+      {Intrinsic::nvvm_sust_b_1d_v2i8_trap, 80817}, // __nvvm_sust_b_1d_v2i8_trap
+      {Intrinsic::nvvm_sust_b_1d_v2i8_zero, 80844}, // __nvvm_sust_b_1d_v2i8_zero
+      {Intrinsic::nvvm_sust_b_1d_v4i16_clamp, 80871}, // __nvvm_sust_b_1d_v4i16_clamp
+      {Intrinsic::nvvm_sust_b_1d_v4i16_trap, 80900}, // __nvvm_sust_b_1d_v4i16_trap
+      {Intrinsic::nvvm_sust_b_1d_v4i16_zero, 80928}, // __nvvm_sust_b_1d_v4i16_zero
+      {Intrinsic::nvvm_sust_b_1d_v4i32_clamp, 80956}, // __nvvm_sust_b_1d_v4i32_clamp
+      {Intrinsic::nvvm_sust_b_1d_v4i32_trap, 80985}, // __nvvm_sust_b_1d_v4i32_trap
+      {Intrinsic::nvvm_sust_b_1d_v4i32_zero, 81013}, // __nvvm_sust_b_1d_v4i32_zero
+      {Intrinsic::nvvm_sust_b_1d_v4i8_clamp, 81041}, // __nvvm_sust_b_1d_v4i8_clamp
+      {Intrinsic::nvvm_sust_b_1d_v4i8_trap, 81069}, // __nvvm_sust_b_1d_v4i8_trap
+      {Intrinsic::nvvm_sust_b_1d_v4i8_zero, 81096}, // __nvvm_sust_b_1d_v4i8_zero
+      {Intrinsic::nvvm_sust_b_2d_array_i16_clamp, 81123}, // __nvvm_sust_b_2d_array_i16_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_i16_trap, 81156}, // __nvvm_sust_b_2d_array_i16_trap
+      {Intrinsic::nvvm_sust_b_2d_array_i16_zero, 81188}, // __nvvm_sust_b_2d_array_i16_zero
+      {Intrinsic::nvvm_sust_b_2d_array_i32_clamp, 81220}, // __nvvm_sust_b_2d_array_i32_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_i32_trap, 81253}, // __nvvm_sust_b_2d_array_i32_trap
+      {Intrinsic::nvvm_sust_b_2d_array_i32_zero, 81285}, // __nvvm_sust_b_2d_array_i32_zero
+      {Intrinsic::nvvm_sust_b_2d_array_i64_clamp, 81317}, // __nvvm_sust_b_2d_array_i64_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_i64_trap, 81350}, // __nvvm_sust_b_2d_array_i64_trap
+      {Intrinsic::nvvm_sust_b_2d_array_i64_zero, 81382}, // __nvvm_sust_b_2d_array_i64_zero
+      {Intrinsic::nvvm_sust_b_2d_array_i8_clamp, 81414}, // __nvvm_sust_b_2d_array_i8_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_i8_trap, 81446}, // __nvvm_sust_b_2d_array_i8_trap
+      {Intrinsic::nvvm_sust_b_2d_array_i8_zero, 81477}, // __nvvm_sust_b_2d_array_i8_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v2i16_clamp, 81508}, // __nvvm_sust_b_2d_array_v2i16_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v2i16_trap, 81543}, // __nvvm_sust_b_2d_array_v2i16_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v2i16_zero, 81577}, // __nvvm_sust_b_2d_array_v2i16_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v2i32_clamp, 81611}, // __nvvm_sust_b_2d_array_v2i32_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v2i32_trap, 81646}, // __nvvm_sust_b_2d_array_v2i32_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v2i32_zero, 81680}, // __nvvm_sust_b_2d_array_v2i32_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v2i64_clamp, 81714}, // __nvvm_sust_b_2d_array_v2i64_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v2i64_trap, 81749}, // __nvvm_sust_b_2d_array_v2i64_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v2i64_zero, 81783}, // __nvvm_sust_b_2d_array_v2i64_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v2i8_clamp, 81817}, // __nvvm_sust_b_2d_array_v2i8_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v2i8_trap, 81851}, // __nvvm_sust_b_2d_array_v2i8_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v2i8_zero, 81884}, // __nvvm_sust_b_2d_array_v2i8_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v4i16_clamp, 81917}, // __nvvm_sust_b_2d_array_v4i16_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v4i16_trap, 81952}, // __nvvm_sust_b_2d_array_v4i16_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v4i16_zero, 81986}, // __nvvm_sust_b_2d_array_v4i16_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v4i32_clamp, 82020}, // __nvvm_sust_b_2d_array_v4i32_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v4i32_trap, 82055}, // __nvvm_sust_b_2d_array_v4i32_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v4i32_zero, 82089}, // __nvvm_sust_b_2d_array_v4i32_zero
+      {Intrinsic::nvvm_sust_b_2d_array_v4i8_clamp, 82123}, // __nvvm_sust_b_2d_array_v4i8_clamp
+      {Intrinsic::nvvm_sust_b_2d_array_v4i8_trap, 82157}, // __nvvm_sust_b_2d_array_v4i8_trap
+      {Intrinsic::nvvm_sust_b_2d_array_v4i8_zero, 82190}, // __nvvm_sust_b_2d_array_v4i8_zero
+      {Intrinsic::nvvm_sust_b_2d_i16_clamp, 82223}, // __nvvm_sust_b_2d_i16_clamp
+      {Intrinsic::nvvm_sust_b_2d_i16_trap, 82250}, // __nvvm_sust_b_2d_i16_trap
+      {Intrinsic::nvvm_sust_b_2d_i16_zero, 82276}, // __nvvm_sust_b_2d_i16_zero
+      {Intrinsic::nvvm_sust_b_2d_i32_clamp, 82302}, // __nvvm_sust_b_2d_i32_clamp
+      {Intrinsic::nvvm_sust_b_2d_i32_trap, 82329}, // __nvvm_sust_b_2d_i32_trap
+      {Intrinsic::nvvm_sust_b_2d_i32_zero, 82355}, // __nvvm_sust_b_2d_i32_zero
+      {Intrinsic::nvvm_sust_b_2d_i64_clamp, 82381}, // __nvvm_sust_b_2d_i64_clamp
+      {Intrinsic::nvvm_sust_b_2d_i64_trap, 82408}, // __nvvm_sust_b_2d_i64_trap
+      {Intrinsic::nvvm_sust_b_2d_i64_zero, 82434}, // __nvvm_sust_b_2d_i64_zero
+      {Intrinsic::nvvm_sust_b_2d_i8_clamp, 82460}, // __nvvm_sust_b_2d_i8_clamp
+      {Intrinsic::nvvm_sust_b_2d_i8_trap, 82486}, // __nvvm_sust_b_2d_i8_trap
+      {Intrinsic::nvvm_sust_b_2d_i8_zero, 82511}, // __nvvm_sust_b_2d_i8_zero
+      {Intrinsic::nvvm_sust_b_2d_v2i16_clamp, 82536}, // __nvvm_sust_b_2d_v2i16_clamp
+      {Intrinsic::nvvm_sust_b_2d_v2i16_trap, 82565}, // __nvvm_sust_b_2d_v2i16_trap
+      {Intrinsic::nvvm_sust_b_2d_v2i16_zero, 82593}, // __nvvm_sust_b_2d_v2i16_zero
+      {Intrinsic::nvvm_sust_b_2d_v2i32_clamp, 82621}, // __nvvm_sust_b_2d_v2i32_clamp
+      {Intrinsic::nvvm_sust_b_2d_v2i32_trap, 82650}, // __nvvm_sust_b_2d_v2i32_trap
+      {Intrinsic::nvvm_sust_b_2d_v2i32_zero, 82678}, // __nvvm_sust_b_2d_v2i32_zero
+      {Intrinsic::nvvm_sust_b_2d_v2i64_clamp, 82706}, // __nvvm_sust_b_2d_v2i64_clamp
+      {Intrinsic::nvvm_sust_b_2d_v2i64_trap, 82735}, // __nvvm_sust_b_2d_v2i64_trap
+      {Intrinsic::nvvm_sust_b_2d_v2i64_zero, 82763}, // __nvvm_sust_b_2d_v2i64_zero
+      {Intrinsic::nvvm_sust_b_2d_v2i8_clamp, 82791}, // __nvvm_sust_b_2d_v2i8_clamp
+      {Intrinsic::nvvm_sust_b_2d_v2i8_trap, 82819}, // __nvvm_sust_b_2d_v2i8_trap
+      {Intrinsic::nvvm_sust_b_2d_v2i8_zero, 82846}, // __nvvm_sust_b_2d_v2i8_zero
+      {Intrinsic::nvvm_sust_b_2d_v4i16_clamp, 82873}, // __nvvm_sust_b_2d_v4i16_clamp
+      {Intrinsic::nvvm_sust_b_2d_v4i16_trap, 82902}, // __nvvm_sust_b_2d_v4i16_trap
+      {Intrinsic::nvvm_sust_b_2d_v4i16_zero, 82930}, // __nvvm_sust_b_2d_v4i16_zero
+      {Intrinsic::nvvm_sust_b_2d_v4i32_clamp, 82958}, // __nvvm_sust_b_2d_v4i32_clamp
+      {Intrinsic::nvvm_sust_b_2d_v4i32_trap, 82987}, // __nvvm_sust_b_2d_v4i32_trap
+      {Intrinsic::nvvm_sust_b_2d_v4i32_zero, 83015}, // __nvvm_sust_b_2d_v4i32_zero
+      {Intrinsic::nvvm_sust_b_2d_v4i8_clamp, 83043}, // __nvvm_sust_b_2d_v4i8_clamp
+      {Intrinsic::nvvm_sust_b_2d_v4i8_trap, 83071}, // __nvvm_sust_b_2d_v4i8_trap
+      {Intrinsic::nvvm_sust_b_2d_v4i8_zero, 83098}, // __nvvm_sust_b_2d_v4i8_zero
+      {Intrinsic::nvvm_sust_b_3d_i16_clamp, 83125}, // __nvvm_sust_b_3d_i16_clamp
+      {Intrinsic::nvvm_sust_b_3d_i16_trap, 83152}, // __nvvm_sust_b_3d_i16_trap
+      {Intrinsic::nvvm_sust_b_3d_i16_zero, 83178}, // __nvvm_sust_b_3d_i16_zero
+      {Intrinsic::nvvm_sust_b_3d_i32_clamp, 83204}, // __nvvm_sust_b_3d_i32_clamp
+      {Intrinsic::nvvm_sust_b_3d_i32_trap, 83231}, // __nvvm_sust_b_3d_i32_trap
+      {Intrinsic::nvvm_sust_b_3d_i32_zero, 83257}, // __nvvm_sust_b_3d_i32_zero
+      {Intrinsic::nvvm_sust_b_3d_i64_clamp, 83283}, // __nvvm_sust_b_3d_i64_clamp
+      {Intrinsic::nvvm_sust_b_3d_i64_trap, 83310}, // __nvvm_sust_b_3d_i64_trap
+      {Intrinsic::nvvm_sust_b_3d_i64_zero, 83336}, // __nvvm_sust_b_3d_i64_zero
+      {Intrinsic::nvvm_sust_b_3d_i8_clamp, 83362}, // __nvvm_sust_b_3d_i8_clamp
+      {Intrinsic::nvvm_sust_b_3d_i8_trap, 83388}, // __nvvm_sust_b_3d_i8_trap
+      {Intrinsic::nvvm_sust_b_3d_i8_zero, 83413}, // __nvvm_sust_b_3d_i8_zero
+      {Intrinsic::nvvm_sust_b_3d_v2i16_clamp, 83438}, // __nvvm_sust_b_3d_v2i16_clamp
+      {Intrinsic::nvvm_sust_b_3d_v2i16_trap, 83467}, // __nvvm_sust_b_3d_v2i16_trap
+      {Intrinsic::nvvm_sust_b_3d_v2i16_zero, 83495}, // __nvvm_sust_b_3d_v2i16_zero
+      {Intrinsic::nvvm_sust_b_3d_v2i32_clamp, 83523}, // __nvvm_sust_b_3d_v2i32_clamp
+      {Intrinsic::nvvm_sust_b_3d_v2i32_trap, 83552}, // __nvvm_sust_b_3d_v2i32_trap
+      {Intrinsic::nvvm_sust_b_3d_v2i32_zero, 83580}, // __nvvm_sust_b_3d_v2i32_zero
+      {Intrinsic::nvvm_sust_b_3d_v2i64_clamp, 83608}, // __nvvm_sust_b_3d_v2i64_clamp
+      {Intrinsic::nvvm_sust_b_3d_v2i64_trap, 83637}, // __nvvm_sust_b_3d_v2i64_trap
+      {Intrinsic::nvvm_sust_b_3d_v2i64_zero, 83665}, // __nvvm_sust_b_3d_v2i64_zero
+      {Intrinsic::nvvm_sust_b_3d_v2i8_clamp, 83693}, // __nvvm_sust_b_3d_v2i8_clamp
+      {Intrinsic::nvvm_sust_b_3d_v2i8_trap, 83721}, // __nvvm_sust_b_3d_v2i8_trap
+      {Intrinsic::nvvm_sust_b_3d_v2i8_zero, 83748}, // __nvvm_sust_b_3d_v2i8_zero
+      {Intrinsic::nvvm_sust_b_3d_v4i16_clamp, 83775}, // __nvvm_sust_b_3d_v4i16_clamp
+      {Intrinsic::nvvm_sust_b_3d_v4i16_trap, 83804}, // __nvvm_sust_b_3d_v4i16_trap
+      {Intrinsic::nvvm_sust_b_3d_v4i16_zero, 83832}, // __nvvm_sust_b_3d_v4i16_zero
+      {Intrinsic::nvvm_sust_b_3d_v4i32_clamp, 83860}, // __nvvm_sust_b_3d_v4i32_clamp
+      {Intrinsic::nvvm_sust_b_3d_v4i32_trap, 83889}, // __nvvm_sust_b_3d_v4i32_trap
+      {Intrinsic::nvvm_sust_b_3d_v4i32_zero, 83917}, // __nvvm_sust_b_3d_v4i32_zero
+      {Intrinsic::nvvm_sust_b_3d_v4i8_clamp, 83945}, // __nvvm_sust_b_3d_v4i8_clamp
+      {Intrinsic::nvvm_sust_b_3d_v4i8_trap, 83973}, // __nvvm_sust_b_3d_v4i8_trap
+      {Intrinsic::nvvm_sust_b_3d_v4i8_zero, 84000}, // __nvvm_sust_b_3d_v4i8_zero
+      {Intrinsic::nvvm_sust_p_1d_array_i16_trap, 84027}, // __nvvm_sust_p_1d_array_i16_trap
+      {Intrinsic::nvvm_sust_p_1d_array_i32_trap, 84059}, // __nvvm_sust_p_1d_array_i32_trap
+      {Intrinsic::nvvm_sust_p_1d_array_i8_trap, 84091}, // __nvvm_sust_p_1d_array_i8_trap
+      {Intrinsic::nvvm_sust_p_1d_array_v2i16_trap, 84122}, // __nvvm_sust_p_1d_array_v2i16_trap
+      {Intrinsic::nvvm_sust_p_1d_array_v2i32_trap, 84156}, // __nvvm_sust_p_1d_array_v2i32_trap
+      {Intrinsic::nvvm_sust_p_1d_array_v2i8_trap, 84190}, // __nvvm_sust_p_1d_array_v2i8_trap
+      {Intrinsic::nvvm_sust_p_1d_array_v4i16_trap, 84223}, // __nvvm_sust_p_1d_array_v4i16_trap
+      {Intrinsic::nvvm_sust_p_1d_array_v4i32_trap, 84257}, // __nvvm_sust_p_1d_array_v4i32_trap
+      {Intrinsic::nvvm_sust_p_1d_array_v4i8_trap, 84291}, // __nvvm_sust_p_1d_array_v4i8_trap
+      {Intrinsic::nvvm_sust_p_1d_i16_trap, 84324}, // __nvvm_sust_p_1d_i16_trap
+      {Intrinsic::nvvm_sust_p_1d_i32_trap, 84350}, // __nvvm_sust_p_1d_i32_trap
+      {Intrinsic::nvvm_sust_p_1d_i8_trap, 84376}, // __nvvm_sust_p_1d_i8_trap
+      {Intrinsic::nvvm_sust_p_1d_v2i16_trap, 84401}, // __nvvm_sust_p_1d_v2i16_trap
+      {Intrinsic::nvvm_sust_p_1d_v2i32_trap, 84429}, // __nvvm_sust_p_1d_v2i32_trap
+      {Intrinsic::nvvm_sust_p_1d_v2i8_trap, 84457}, // __nvvm_sust_p_1d_v2i8_trap
+      {Intrinsic::nvvm_sust_p_1d_v4i16_trap, 84484}, // __nvvm_sust_p_1d_v4i16_trap
+      {Intrinsic::nvvm_sust_p_1d_v4i32_trap, 84512}, // __nvvm_sust_p_1d_v4i32_trap
+      {Intrinsic::nvvm_sust_p_1d_v4i8_trap, 84540}, // __nvvm_sust_p_1d_v4i8_trap
+      {Intrinsic::nvvm_sust_p_2d_array_i16_trap, 84567}, // __nvvm_sust_p_2d_array_i16_trap
+      {Intrinsic::nvvm_sust_p_2d_array_i32_trap, 84599}, // __nvvm_sust_p_2d_array_i32_trap
+      {Intrinsic::nvvm_sust_p_2d_array_i8_trap, 84631}, // __nvvm_sust_p_2d_array_i8_trap
+      {Intrinsic::nvvm_sust_p_2d_array_v2i16_trap, 84662}, // __nvvm_sust_p_2d_array_v2i16_trap
+      {Intrinsic::nvvm_sust_p_2d_array_v2i32_trap, 84696}, // __nvvm_sust_p_2d_array_v2i32_trap
+      {Intrinsic::nvvm_sust_p_2d_array_v2i8_trap, 84730}, // __nvvm_sust_p_2d_array_v2i8_trap
+      {Intrinsic::nvvm_sust_p_2d_array_v4i16_trap, 84763}, // __nvvm_sust_p_2d_array_v4i16_trap
+      {Intrinsic::nvvm_sust_p_2d_array_v4i32_trap, 84797}, // __nvvm_sust_p_2d_array_v4i32_trap
+      {Intrinsic::nvvm_sust_p_2d_array_v4i8_trap, 84831}, // __nvvm_sust_p_2d_array_v4i8_trap
+      {Intrinsic::nvvm_sust_p_2d_i16_trap, 84864}, // __nvvm_sust_p_2d_i16_trap
+      {Intrinsic::nvvm_sust_p_2d_i32_trap, 84890}, // __nvvm_sust_p_2d_i32_trap
+      {Intrinsic::nvvm_sust_p_2d_i8_trap, 84916}, // __nvvm_sust_p_2d_i8_trap
+      {Intrinsic::nvvm_sust_p_2d_v2i16_trap, 84941}, // __nvvm_sust_p_2d_v2i16_trap
+      {Intrinsic::nvvm_sust_p_2d_v2i32_trap, 84969}, // __nvvm_sust_p_2d_v2i32_trap
+      {Intrinsic::nvvm_sust_p_2d_v2i8_trap, 84997}, // __nvvm_sust_p_2d_v2i8_trap
+      {Intrinsic::nvvm_sust_p_2d_v4i16_trap, 85024}, // __nvvm_sust_p_2d_v4i16_trap
+      {Intrinsic::nvvm_sust_p_2d_v4i32_trap, 85052}, // __nvvm_sust_p_2d_v4i32_trap
+      {Intrinsic::nvvm_sust_p_2d_v4i8_trap, 85080}, // __nvvm_sust_p_2d_v4i8_trap
+      {Intrinsic::nvvm_sust_p_3d_i16_trap, 85107}, // __nvvm_sust_p_3d_i16_trap
+      {Intrinsic::nvvm_sust_p_3d_i32_trap, 85133}, // __nvvm_sust_p_3d_i32_trap
+      {Intrinsic::nvvm_sust_p_3d_i8_trap, 85159}, // __nvvm_sust_p_3d_i8_trap
+      {Intrinsic::nvvm_sust_p_3d_v2i16_trap, 85184}, // __nvvm_sust_p_3d_v2i16_trap
+      {Intrinsic::nvvm_sust_p_3d_v2i32_trap, 85212}, // __nvvm_sust_p_3d_v2i32_trap
+      {Intrinsic::nvvm_sust_p_3d_v2i8_trap, 85240}, // __nvvm_sust_p_3d_v2i8_trap
+      {Intrinsic::nvvm_sust_p_3d_v4i16_trap, 85267}, // __nvvm_sust_p_3d_v4i16_trap
+      {Intrinsic::nvvm_sust_p_3d_v4i32_trap, 85295}, // __nvvm_sust_p_3d_v4i32_trap
+      {Intrinsic::nvvm_sust_p_3d_v4i8_trap, 85323}, // __nvvm_sust_p_3d_v4i8_trap
+      {Intrinsic::nvvm_swap_lo_hi_b64, 85350}, // __nvvm_swap_lo_hi_b64
+      {Intrinsic::nvvm_trunc_d, 85372}, // __nvvm_trunc_d
+      {Intrinsic::nvvm_trunc_f, 85387}, // __nvvm_trunc_f
+      {Intrinsic::nvvm_trunc_ftz_f, 85402}, // __nvvm_trunc_ftz_f
+      {Intrinsic::nvvm_txq_array_size, 85421}, // __nvvm_txq_array_size
+      {Intrinsic::nvvm_txq_channel_data_type, 85443}, // __nvvm_txq_channel_data_type
+      {Intrinsic::nvvm_txq_channel_order, 85472}, // __nvvm_txq_channel_order
+      {Intrinsic::nvvm_txq_depth, 85497}, // __nvvm_txq_depth
+      {Intrinsic::nvvm_txq_height, 85514}, // __nvvm_txq_height
+      {Intrinsic::nvvm_txq_num_mipmap_levels, 85532}, // __nvvm_txq_num_mipmap_levels
+      {Intrinsic::nvvm_txq_num_samples, 85561}, // __nvvm_txq_num_samples
+      {Intrinsic::nvvm_txq_width, 85584}, // __nvvm_txq_width
+      {Intrinsic::nvvm_ui2d_rm, 85601}, // __nvvm_ui2d_rm
+      {Intrinsic::nvvm_ui2d_rn, 85616}, // __nvvm_ui2d_rn
+      {Intrinsic::nvvm_ui2d_rp, 85631}, // __nvvm_ui2d_rp
+      {Intrinsic::nvvm_ui2d_rz, 85646}, // __nvvm_ui2d_rz
+      {Intrinsic::nvvm_ui2f_rm, 85661}, // __nvvm_ui2f_rm
+      {Intrinsic::nvvm_ui2f_rn, 85676}, // __nvvm_ui2f_rn
+      {Intrinsic::nvvm_ui2f_rp, 85691}, // __nvvm_ui2f_rp
+      {Intrinsic::nvvm_ui2f_rz, 85706}, // __nvvm_ui2f_rz
+      {Intrinsic::nvvm_ull2d_rm, 85721}, // __nvvm_ull2d_rm
+      {Intrinsic::nvvm_ull2d_rn, 85737}, // __nvvm_ull2d_rn
+      {Intrinsic::nvvm_ull2d_rp, 85753}, // __nvvm_ull2d_rp
+      {Intrinsic::nvvm_ull2d_rz, 85769}, // __nvvm_ull2d_rz
+      {Intrinsic::nvvm_ull2f_rm, 85785}, // __nvvm_ull2f_rm
+      {Intrinsic::nvvm_ull2f_rn, 85801}, // __nvvm_ull2f_rn
+      {Intrinsic::nvvm_ull2f_rp, 85817}, // __nvvm_ull2f_rp
+      {Intrinsic::nvvm_ull2f_rz, 85833}, // __nvvm_ull2f_rz
+      {Intrinsic::nvvm_vote_all, 85849}, // __nvvm_vote_all
+      {Intrinsic::nvvm_vote_all_sync, 85865}, // __nvvm_vote_all_sync
+      {Intrinsic::nvvm_vote_any, 85886}, // __nvvm_vote_any
+      {Intrinsic::nvvm_vote_any_sync, 85902}, // __nvvm_vote_any_sync
+      {Intrinsic::nvvm_vote_ballot, 85923}, // __nvvm_vote_ballot
+      {Intrinsic::nvvm_vote_ballot_sync, 85942}, // __nvvm_vote_ballot_sync
+      {Intrinsic::nvvm_vote_uni, 85966}, // __nvvm_vote_uni
+      {Intrinsic::nvvm_vote_uni_sync, 85982}, // __nvvm_vote_uni_sync
+      {Intrinsic::nvvm_barrier0, 73071}, // __syncthreads
+    };
+    auto I = std::lower_bound(std::begin(nvvmNames),
+                              std::end(nvvmNames),
+                              BuiltinNameStr);
+    if (I != std::end(nvvmNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "ppc") {
+    static const BuiltinEntry ppcNames[] = {
+      {Intrinsic::ppc_altivec_crypto_vcipher, 86003}, // __builtin_altivec_crypto_vcipher
+      {Intrinsic::ppc_altivec_crypto_vcipherlast, 86036}, // __builtin_altivec_crypto_vcipherlast
+      {Intrinsic::ppc_altivec_crypto_vncipher, 86073}, // __builtin_altivec_crypto_vncipher
+      {Intrinsic::ppc_altivec_crypto_vncipherlast, 86107}, // __builtin_altivec_crypto_vncipherlast
+      {Intrinsic::ppc_altivec_crypto_vpermxor, 86145}, // __builtin_altivec_crypto_vpermxor
+      {Intrinsic::ppc_altivec_crypto_vpmsumb, 86179}, // __builtin_altivec_crypto_vpmsumb
+      {Intrinsic::ppc_altivec_crypto_vpmsumd, 86212}, // __builtin_altivec_crypto_vpmsumd
+      {Intrinsic::ppc_altivec_crypto_vpmsumh, 86245}, // __builtin_altivec_crypto_vpmsumh
+      {Intrinsic::ppc_altivec_crypto_vpmsumw, 86278}, // __builtin_altivec_crypto_vpmsumw
+      {Intrinsic::ppc_altivec_crypto_vsbox, 86311}, // __builtin_altivec_crypto_vsbox
+      {Intrinsic::ppc_altivec_crypto_vshasigmad, 86342}, // __builtin_altivec_crypto_vshasigmad
+      {Intrinsic::ppc_altivec_crypto_vshasigmaw, 86378}, // __builtin_altivec_crypto_vshasigmaw
+      {Intrinsic::ppc_altivec_dss, 86414}, // __builtin_altivec_dss
+      {Intrinsic::ppc_altivec_dssall, 86436}, // __builtin_altivec_dssall
+      {Intrinsic::ppc_altivec_dst, 86461}, // __builtin_altivec_dst
+      {Intrinsic::ppc_altivec_dstst, 86483}, // __builtin_altivec_dstst
+      {Intrinsic::ppc_altivec_dststt, 86507}, // __builtin_altivec_dststt
+      {Intrinsic::ppc_altivec_dstt, 86532}, // __builtin_altivec_dstt
+      {Intrinsic::ppc_altivec_mfvscr, 86555}, // __builtin_altivec_mfvscr
+      {Intrinsic::ppc_altivec_mtvscr, 86580}, // __builtin_altivec_mtvscr
+      {Intrinsic::ppc_altivec_vabsdub, 86605}, // __builtin_altivec_vabsdub
+      {Intrinsic::ppc_altivec_vabsduh, 86631}, // __builtin_altivec_vabsduh
+      {Intrinsic::ppc_altivec_vabsduw, 86657}, // __builtin_altivec_vabsduw
+      {Intrinsic::ppc_altivec_vaddcuq, 86683}, // __builtin_altivec_vaddcuq
+      {Intrinsic::ppc_altivec_vaddcuw, 86709}, // __builtin_altivec_vaddcuw
+      {Intrinsic::ppc_altivec_vaddecuq, 86735}, // __builtin_altivec_vaddecuq
+      {Intrinsic::ppc_altivec_vaddeuqm, 86762}, // __builtin_altivec_vaddeuqm
+      {Intrinsic::ppc_altivec_vaddsbs, 86789}, // __builtin_altivec_vaddsbs
+      {Intrinsic::ppc_altivec_vaddshs, 86815}, // __builtin_altivec_vaddshs
+      {Intrinsic::ppc_altivec_vaddsws, 86841}, // __builtin_altivec_vaddsws
+      {Intrinsic::ppc_altivec_vaddubs, 86867}, // __builtin_altivec_vaddubs
+      {Intrinsic::ppc_altivec_vadduhs, 86893}, // __builtin_altivec_vadduhs
+      {Intrinsic::ppc_altivec_vadduws, 86919}, // __builtin_altivec_vadduws
+      {Intrinsic::ppc_altivec_vavgsb, 86945}, // __builtin_altivec_vavgsb
+      {Intrinsic::ppc_altivec_vavgsh, 86970}, // __builtin_altivec_vavgsh
+      {Intrinsic::ppc_altivec_vavgsw, 86995}, // __builtin_altivec_vavgsw
+      {Intrinsic::ppc_altivec_vavgub, 87020}, // __builtin_altivec_vavgub
+      {Intrinsic::ppc_altivec_vavguh, 87045}, // __builtin_altivec_vavguh
+      {Intrinsic::ppc_altivec_vavguw, 87070}, // __builtin_altivec_vavguw
+      {Intrinsic::ppc_altivec_vbpermq, 87095}, // __builtin_altivec_vbpermq
+      {Intrinsic::ppc_altivec_vcfsx, 87121}, // __builtin_altivec_vcfsx
+      {Intrinsic::ppc_altivec_vcfux, 87145}, // __builtin_altivec_vcfux
+      {Intrinsic::ppc_altivec_vclzlsbb, 87169}, // __builtin_altivec_vclzlsbb
+      {Intrinsic::ppc_altivec_vcmpbfp, 87196}, // __builtin_altivec_vcmpbfp
+      {Intrinsic::ppc_altivec_vcmpbfp_p, 87222}, // __builtin_altivec_vcmpbfp_p
+      {Intrinsic::ppc_altivec_vcmpeqfp, 87250}, // __builtin_altivec_vcmpeqfp
+      {Intrinsic::ppc_altivec_vcmpeqfp_p, 87277}, // __builtin_altivec_vcmpeqfp_p
+      {Intrinsic::ppc_altivec_vcmpequb, 87306}, // __builtin_altivec_vcmpequb
+      {Intrinsic::ppc_altivec_vcmpequb_p, 87333}, // __builtin_altivec_vcmpequb_p
+      {Intrinsic::ppc_altivec_vcmpequd, 87362}, // __builtin_altivec_vcmpequd
+      {Intrinsic::ppc_altivec_vcmpequd_p, 87389}, // __builtin_altivec_vcmpequd_p
+      {Intrinsic::ppc_altivec_vcmpequh, 87418}, // __builtin_altivec_vcmpequh
+      {Intrinsic::ppc_altivec_vcmpequh_p, 87445}, // __builtin_altivec_vcmpequh_p
+      {Intrinsic::ppc_altivec_vcmpequw, 87474}, // __builtin_altivec_vcmpequw
+      {Intrinsic::ppc_altivec_vcmpequw_p, 87501}, // __builtin_altivec_vcmpequw_p
+      {Intrinsic::ppc_altivec_vcmpgefp, 87530}, // __builtin_altivec_vcmpgefp
+      {Intrinsic::ppc_altivec_vcmpgefp_p, 87557}, // __builtin_altivec_vcmpgefp_p
+      {Intrinsic::ppc_altivec_vcmpgtfp, 87586}, // __builtin_altivec_vcmpgtfp
+      {Intrinsic::ppc_altivec_vcmpgtfp_p, 87613}, // __builtin_altivec_vcmpgtfp_p
+      {Intrinsic::ppc_altivec_vcmpgtsb, 87642}, // __builtin_altivec_vcmpgtsb
+      {Intrinsic::ppc_altivec_vcmpgtsb_p, 87669}, // __builtin_altivec_vcmpgtsb_p
+      {Intrinsic::ppc_altivec_vcmpgtsd, 87698}, // __builtin_altivec_vcmpgtsd
+      {Intrinsic::ppc_altivec_vcmpgtsd_p, 87725}, // __builtin_altivec_vcmpgtsd_p
+      {Intrinsic::ppc_altivec_vcmpgtsh, 87754}, // __builtin_altivec_vcmpgtsh
+      {Intrinsic::ppc_altivec_vcmpgtsh_p, 87781}, // __builtin_altivec_vcmpgtsh_p
+      {Intrinsic::ppc_altivec_vcmpgtsw, 87810}, // __builtin_altivec_vcmpgtsw
+      {Intrinsic::ppc_altivec_vcmpgtsw_p, 87837}, // __builtin_altivec_vcmpgtsw_p
+      {Intrinsic::ppc_altivec_vcmpgtub, 87866}, // __builtin_altivec_vcmpgtub
+      {Intrinsic::ppc_altivec_vcmpgtub_p, 87893}, // __builtin_altivec_vcmpgtub_p
+      {Intrinsic::ppc_altivec_vcmpgtud, 87922}, // __builtin_altivec_vcmpgtud
+      {Intrinsic::ppc_altivec_vcmpgtud_p, 87949}, // __builtin_altivec_vcmpgtud_p
+      {Intrinsic::ppc_altivec_vcmpgtuh, 87978}, // __builtin_altivec_vcmpgtuh
+      {Intrinsic::ppc_altivec_vcmpgtuh_p, 88005}, // __builtin_altivec_vcmpgtuh_p
+      {Intrinsic::ppc_altivec_vcmpgtuw, 88034}, // __builtin_altivec_vcmpgtuw
+      {Intrinsic::ppc_altivec_vcmpgtuw_p, 88061}, // __builtin_altivec_vcmpgtuw_p
+      {Intrinsic::ppc_altivec_vcmpneb, 88090}, // __builtin_altivec_vcmpneb
+      {Intrinsic::ppc_altivec_vcmpneb_p, 88116}, // __builtin_altivec_vcmpneb_p
+      {Intrinsic::ppc_altivec_vcmpneh, 88144}, // __builtin_altivec_vcmpneh
+      {Intrinsic::ppc_altivec_vcmpneh_p, 88170}, // __builtin_altivec_vcmpneh_p
+      {Intrinsic::ppc_altivec_vcmpnew, 88198}, // __builtin_altivec_vcmpnew
+      {Intrinsic::ppc_altivec_vcmpnew_p, 88224}, // __builtin_altivec_vcmpnew_p
+      {Intrinsic::ppc_altivec_vcmpnezb, 88252}, // __builtin_altivec_vcmpnezb
+      {Intrinsic::ppc_altivec_vcmpnezb_p, 88279}, // __builtin_altivec_vcmpnezb_p
+      {Intrinsic::ppc_altivec_vcmpnezh, 88308}, // __builtin_altivec_vcmpnezh
+      {Intrinsic::ppc_altivec_vcmpnezh_p, 88335}, // __builtin_altivec_vcmpnezh_p
+      {Intrinsic::ppc_altivec_vcmpnezw, 88364}, // __builtin_altivec_vcmpnezw
+      {Intrinsic::ppc_altivec_vcmpnezw_p, 88391}, // __builtin_altivec_vcmpnezw_p
+      {Intrinsic::ppc_altivec_vctsxs, 88420}, // __builtin_altivec_vctsxs
+      {Intrinsic::ppc_altivec_vctuxs, 88445}, // __builtin_altivec_vctuxs
+      {Intrinsic::ppc_altivec_vctzlsbb, 88470}, // __builtin_altivec_vctzlsbb
+      {Intrinsic::ppc_altivec_vexptefp, 88497}, // __builtin_altivec_vexptefp
+      {Intrinsic::ppc_altivec_vgbbd, 88524}, // __builtin_altivec_vgbbd
+      {Intrinsic::ppc_altivec_vlogefp, 88548}, // __builtin_altivec_vlogefp
+      {Intrinsic::ppc_altivec_vmaddfp, 88574}, // __builtin_altivec_vmaddfp
+      {Intrinsic::ppc_altivec_vmaxfp, 88600}, // __builtin_altivec_vmaxfp
+      {Intrinsic::ppc_altivec_vmaxsb, 88625}, // __builtin_altivec_vmaxsb
+      {Intrinsic::ppc_altivec_vmaxsd, 88650}, // __builtin_altivec_vmaxsd
+      {Intrinsic::ppc_altivec_vmaxsh, 88675}, // __builtin_altivec_vmaxsh
+      {Intrinsic::ppc_altivec_vmaxsw, 88700}, // __builtin_altivec_vmaxsw
+      {Intrinsic::ppc_altivec_vmaxub, 88725}, // __builtin_altivec_vmaxub
+      {Intrinsic::ppc_altivec_vmaxud, 88750}, // __builtin_altivec_vmaxud
+      {Intrinsic::ppc_altivec_vmaxuh, 88775}, // __builtin_altivec_vmaxuh
+      {Intrinsic::ppc_altivec_vmaxuw, 88800}, // __builtin_altivec_vmaxuw
+      {Intrinsic::ppc_altivec_vmhaddshs, 88825}, // __builtin_altivec_vmhaddshs
+      {Intrinsic::ppc_altivec_vmhraddshs, 88853}, // __builtin_altivec_vmhraddshs
+      {Intrinsic::ppc_altivec_vminfp, 88882}, // __builtin_altivec_vminfp
+      {Intrinsic::ppc_altivec_vminsb, 88907}, // __builtin_altivec_vminsb
+      {Intrinsic::ppc_altivec_vminsd, 88932}, // __builtin_altivec_vminsd
+      {Intrinsic::ppc_altivec_vminsh, 88957}, // __builtin_altivec_vminsh
+      {Intrinsic::ppc_altivec_vminsw, 88982}, // __builtin_altivec_vminsw
+      {Intrinsic::ppc_altivec_vminub, 89007}, // __builtin_altivec_vminub
+      {Intrinsic::ppc_altivec_vminud, 89032}, // __builtin_altivec_vminud
+      {Intrinsic::ppc_altivec_vminuh, 89057}, // __builtin_altivec_vminuh
+      {Intrinsic::ppc_altivec_vminuw, 89082}, // __builtin_altivec_vminuw
+      {Intrinsic::ppc_altivec_vmladduhm, 89107}, // __builtin_altivec_vmladduhm
+      {Intrinsic::ppc_altivec_vmsummbm, 89135}, // __builtin_altivec_vmsummbm
+      {Intrinsic::ppc_altivec_vmsumshm, 89162}, // __builtin_altivec_vmsumshm
+      {Intrinsic::ppc_altivec_vmsumshs, 89189}, // __builtin_altivec_vmsumshs
+      {Intrinsic::ppc_altivec_vmsumubm, 89216}, // __builtin_altivec_vmsumubm
+      {Intrinsic::ppc_altivec_vmsumuhm, 89243}, // __builtin_altivec_vmsumuhm
+      {Intrinsic::ppc_altivec_vmsumuhs, 89270}, // __builtin_altivec_vmsumuhs
+      {Intrinsic::ppc_altivec_vmulesb, 89297}, // __builtin_altivec_vmulesb
+      {Intrinsic::ppc_altivec_vmulesh, 89323}, // __builtin_altivec_vmulesh
+      {Intrinsic::ppc_altivec_vmulesw, 89349}, // __builtin_altivec_vmulesw
+      {Intrinsic::ppc_altivec_vmuleub, 89375}, // __builtin_altivec_vmuleub
+      {Intrinsic::ppc_altivec_vmuleuh, 89401}, // __builtin_altivec_vmuleuh
+      {Intrinsic::ppc_altivec_vmuleuw, 89427}, // __builtin_altivec_vmuleuw
+      {Intrinsic::ppc_altivec_vmulosb, 89453}, // __builtin_altivec_vmulosb
+      {Intrinsic::ppc_altivec_vmulosh, 89479}, // __builtin_altivec_vmulosh
+      {Intrinsic::ppc_altivec_vmulosw, 89505}, // __builtin_altivec_vmulosw
+      {Intrinsic::ppc_altivec_vmuloub, 89531}, // __builtin_altivec_vmuloub
+      {Intrinsic::ppc_altivec_vmulouh, 89557}, // __builtin_altivec_vmulouh
+      {Intrinsic::ppc_altivec_vmulouw, 89583}, // __builtin_altivec_vmulouw
+      {Intrinsic::ppc_altivec_vnmsubfp, 89609}, // __builtin_altivec_vnmsubfp
+      {Intrinsic::ppc_altivec_vperm, 89636}, // __builtin_altivec_vperm_4si
+      {Intrinsic::ppc_altivec_vpkpx, 89664}, // __builtin_altivec_vpkpx
+      {Intrinsic::ppc_altivec_vpksdss, 89688}, // __builtin_altivec_vpksdss
+      {Intrinsic::ppc_altivec_vpksdus, 89714}, // __builtin_altivec_vpksdus
+      {Intrinsic::ppc_altivec_vpkshss, 89740}, // __builtin_altivec_vpkshss
+      {Intrinsic::ppc_altivec_vpkshus, 89766}, // __builtin_altivec_vpkshus
+      {Intrinsic::ppc_altivec_vpkswss, 89792}, // __builtin_altivec_vpkswss
+      {Intrinsic::ppc_altivec_vpkswus, 89818}, // __builtin_altivec_vpkswus
+      {Intrinsic::ppc_altivec_vpkudus, 89844}, // __builtin_altivec_vpkudus
+      {Intrinsic::ppc_altivec_vpkuhus, 89870}, // __builtin_altivec_vpkuhus
+      {Intrinsic::ppc_altivec_vpkuwus, 89896}, // __builtin_altivec_vpkuwus
+      {Intrinsic::ppc_altivec_vprtybd, 89922}, // __builtin_altivec_vprtybd
+      {Intrinsic::ppc_altivec_vprtybq, 89948}, // __builtin_altivec_vprtybq
+      {Intrinsic::ppc_altivec_vprtybw, 89974}, // __builtin_altivec_vprtybw
+      {Intrinsic::ppc_altivec_vrefp, 90000}, // __builtin_altivec_vrefp
+      {Intrinsic::ppc_altivec_vrfim, 90024}, // __builtin_altivec_vrfim
+      {Intrinsic::ppc_altivec_vrfin, 90048}, // __builtin_altivec_vrfin
+      {Intrinsic::ppc_altivec_vrfip, 90072}, // __builtin_altivec_vrfip
+      {Intrinsic::ppc_altivec_vrfiz, 90096}, // __builtin_altivec_vrfiz
+      {Intrinsic::ppc_altivec_vrlb, 90120}, // __builtin_altivec_vrlb
+      {Intrinsic::ppc_altivec_vrld, 90143}, // __builtin_altivec_vrld
+      {Intrinsic::ppc_altivec_vrldmi, 90166}, // __builtin_altivec_vrldmi
+      {Intrinsic::ppc_altivec_vrldnm, 90191}, // __builtin_altivec_vrldnm
+      {Intrinsic::ppc_altivec_vrlh, 90216}, // __builtin_altivec_vrlh
+      {Intrinsic::ppc_altivec_vrlw, 90239}, // __builtin_altivec_vrlw
+      {Intrinsic::ppc_altivec_vrlwmi, 90262}, // __builtin_altivec_vrlwmi
+      {Intrinsic::ppc_altivec_vrlwnm, 90287}, // __builtin_altivec_vrlwnm
+      {Intrinsic::ppc_altivec_vrsqrtefp, 90312}, // __builtin_altivec_vrsqrtefp
+      {Intrinsic::ppc_altivec_vsel, 90340}, // __builtin_altivec_vsel_4si
+      {Intrinsic::ppc_altivec_vsl, 90367}, // __builtin_altivec_vsl
+      {Intrinsic::ppc_altivec_vslb, 90389}, // __builtin_altivec_vslb
+      {Intrinsic::ppc_altivec_vslh, 90412}, // __builtin_altivec_vslh
+      {Intrinsic::ppc_altivec_vslo, 90435}, // __builtin_altivec_vslo
+      {Intrinsic::ppc_altivec_vslv, 90458}, // __builtin_altivec_vslv
+      {Intrinsic::ppc_altivec_vslw, 90481}, // __builtin_altivec_vslw
+      {Intrinsic::ppc_altivec_vsr, 90504}, // __builtin_altivec_vsr
+      {Intrinsic::ppc_altivec_vsrab, 90526}, // __builtin_altivec_vsrab
+      {Intrinsic::ppc_altivec_vsrah, 90550}, // __builtin_altivec_vsrah
+      {Intrinsic::ppc_altivec_vsraw, 90574}, // __builtin_altivec_vsraw
+      {Intrinsic::ppc_altivec_vsrb, 90598}, // __builtin_altivec_vsrb
+      {Intrinsic::ppc_altivec_vsrh, 90621}, // __builtin_altivec_vsrh
+      {Intrinsic::ppc_altivec_vsro, 90644}, // __builtin_altivec_vsro
+      {Intrinsic::ppc_altivec_vsrv, 90667}, // __builtin_altivec_vsrv
+      {Intrinsic::ppc_altivec_vsrw, 90690}, // __builtin_altivec_vsrw
+      {Intrinsic::ppc_altivec_vsubcuq, 90713}, // __builtin_altivec_vsubcuq
+      {Intrinsic::ppc_altivec_vsubcuw, 90739}, // __builtin_altivec_vsubcuw
+      {Intrinsic::ppc_altivec_vsubecuq, 90765}, // __builtin_altivec_vsubecuq
+      {Intrinsic::ppc_altivec_vsubeuqm, 90792}, // __builtin_altivec_vsubeuqm
+      {Intrinsic::ppc_altivec_vsubsbs, 90819}, // __builtin_altivec_vsubsbs
+      {Intrinsic::ppc_altivec_vsubshs, 90845}, // __builtin_altivec_vsubshs
+      {Intrinsic::ppc_altivec_vsubsws, 90871}, // __builtin_altivec_vsubsws
+      {Intrinsic::ppc_altivec_vsububs, 90897}, // __builtin_altivec_vsububs
+      {Intrinsic::ppc_altivec_vsubuhs, 90923}, // __builtin_altivec_vsubuhs
+      {Intrinsic::ppc_altivec_vsubuws, 90949}, // __builtin_altivec_vsubuws
+      {Intrinsic::ppc_altivec_vsum2sws, 90975}, // __builtin_altivec_vsum2sws
+      {Intrinsic::ppc_altivec_vsum4sbs, 91002}, // __builtin_altivec_vsum4sbs
+      {Intrinsic::ppc_altivec_vsum4shs, 91029}, // __builtin_altivec_vsum4shs
+      {Intrinsic::ppc_altivec_vsum4ubs, 91056}, // __builtin_altivec_vsum4ubs
+      {Intrinsic::ppc_altivec_vsumsws, 91083}, // __builtin_altivec_vsumsws
+      {Intrinsic::ppc_altivec_vupkhpx, 91109}, // __builtin_altivec_vupkhpx
+      {Intrinsic::ppc_altivec_vupkhsb, 91135}, // __builtin_altivec_vupkhsb
+      {Intrinsic::ppc_altivec_vupkhsh, 91161}, // __builtin_altivec_vupkhsh
+      {Intrinsic::ppc_altivec_vupkhsw, 91187}, // __builtin_altivec_vupkhsw
+      {Intrinsic::ppc_altivec_vupklpx, 91213}, // __builtin_altivec_vupklpx
+      {Intrinsic::ppc_altivec_vupklsb, 91239}, // __builtin_altivec_vupklsb
+      {Intrinsic::ppc_altivec_vupklsh, 91265}, // __builtin_altivec_vupklsh
+      {Intrinsic::ppc_altivec_vupklsw, 91291}, // __builtin_altivec_vupklsw
+      {Intrinsic::ppc_bpermd, 91317}, // __builtin_bpermd
+      {Intrinsic::ppc_divde, 91334}, // __builtin_divde
+      {Intrinsic::ppc_divdeu, 91350}, // __builtin_divdeu
+      {Intrinsic::ppc_divwe, 91367}, // __builtin_divwe
+      {Intrinsic::ppc_divweu, 91383}, // __builtin_divweu
+      {Intrinsic::ppc_get_texasr, 91400}, // __builtin_get_texasr
+      {Intrinsic::ppc_get_texasru, 91421}, // __builtin_get_texasru
+      {Intrinsic::ppc_get_tfhar, 91443}, // __builtin_get_tfhar
+      {Intrinsic::ppc_get_tfiar, 91463}, // __builtin_get_tfiar
+      {Intrinsic::ppc_qpx_qvfabs, 91483}, // __builtin_qpx_qvfabs
+      {Intrinsic::ppc_qpx_qvfadd, 91504}, // __builtin_qpx_qvfadd
+      {Intrinsic::ppc_qpx_qvfadds, 91525}, // __builtin_qpx_qvfadds
+      {Intrinsic::ppc_qpx_qvfcfid, 91547}, // __builtin_qpx_qvfcfid
+      {Intrinsic::ppc_qpx_qvfcfids, 91569}, // __builtin_qpx_qvfcfids
+      {Intrinsic::ppc_qpx_qvfcfidu, 91592}, // __builtin_qpx_qvfcfidu
+      {Intrinsic::ppc_qpx_qvfcfidus, 91615}, // __builtin_qpx_qvfcfidus
+      {Intrinsic::ppc_qpx_qvfcmpeq, 91639}, // __builtin_qpx_qvfcmpeq
+      {Intrinsic::ppc_qpx_qvfcmpgt, 91662}, // __builtin_qpx_qvfcmpgt
+      {Intrinsic::ppc_qpx_qvfcmplt, 91685}, // __builtin_qpx_qvfcmplt
+      {Intrinsic::ppc_qpx_qvfcpsgn, 91708}, // __builtin_qpx_qvfcpsgn
+      {Intrinsic::ppc_qpx_qvfctid, 91731}, // __builtin_qpx_qvfctid
+      {Intrinsic::ppc_qpx_qvfctidu, 91753}, // __builtin_qpx_qvfctidu
+      {Intrinsic::ppc_qpx_qvfctiduz, 91776}, // __builtin_qpx_qvfctiduz
+      {Intrinsic::ppc_qpx_qvfctidz, 91800}, // __builtin_qpx_qvfctidz
+      {Intrinsic::ppc_qpx_qvfctiw, 91823}, // __builtin_qpx_qvfctiw
+      {Intrinsic::ppc_qpx_qvfctiwu, 91845}, // __builtin_qpx_qvfctiwu
+      {Intrinsic::ppc_qpx_qvfctiwuz, 91868}, // __builtin_qpx_qvfctiwuz
+      {Intrinsic::ppc_qpx_qvfctiwz, 91892}, // __builtin_qpx_qvfctiwz
+      {Intrinsic::ppc_qpx_qvflogical, 91915}, // __builtin_qpx_qvflogical
+      {Intrinsic::ppc_qpx_qvfmadd, 91940}, // __builtin_qpx_qvfmadd
+      {Intrinsic::ppc_qpx_qvfmadds, 91962}, // __builtin_qpx_qvfmadds
+      {Intrinsic::ppc_qpx_qvfmsub, 91985}, // __builtin_qpx_qvfmsub
+      {Intrinsic::ppc_qpx_qvfmsubs, 92007}, // __builtin_qpx_qvfmsubs
+      {Intrinsic::ppc_qpx_qvfmul, 92030}, // __builtin_qpx_qvfmul
+      {Intrinsic::ppc_qpx_qvfmuls, 92051}, // __builtin_qpx_qvfmuls
+      {Intrinsic::ppc_qpx_qvfnabs, 92073}, // __builtin_qpx_qvfnabs
+      {Intrinsic::ppc_qpx_qvfneg, 92095}, // __builtin_qpx_qvfneg
+      {Intrinsic::ppc_qpx_qvfnmadd, 92116}, // __builtin_qpx_qvfnmadd
+      {Intrinsic::ppc_qpx_qvfnmadds, 92139}, // __builtin_qpx_qvfnmadds
+      {Intrinsic::ppc_qpx_qvfnmsub, 92163}, // __builtin_qpx_qvfnmsub
+      {Intrinsic::ppc_qpx_qvfnmsubs, 92186}, // __builtin_qpx_qvfnmsubs
+      {Intrinsic::ppc_qpx_qvfperm, 92210}, // __builtin_qpx_qvfperm
+      {Intrinsic::ppc_qpx_qvfre, 92232}, // __builtin_qpx_qvfre
+      {Intrinsic::ppc_qpx_qvfres, 92252}, // __builtin_qpx_qvfres
+      {Intrinsic::ppc_qpx_qvfrim, 92273}, // __builtin_qpx_qvfrim
+      {Intrinsic::ppc_qpx_qvfrin, 92294}, // __builtin_qpx_qvfrin
+      {Intrinsic::ppc_qpx_qvfrip, 92315}, // __builtin_qpx_qvfrip
+      {Intrinsic::ppc_qpx_qvfriz, 92336}, // __builtin_qpx_qvfriz
+      {Intrinsic::ppc_qpx_qvfrsp, 92357}, // __builtin_qpx_qvfrsp
+      {Intrinsic::ppc_qpx_qvfrsqrte, 92378}, // __builtin_qpx_qvfrsqrte
+      {Intrinsic::ppc_qpx_qvfrsqrtes, 92402}, // __builtin_qpx_qvfrsqrtes
+      {Intrinsic::ppc_qpx_qvfsel, 92427}, // __builtin_qpx_qvfsel
+      {Intrinsic::ppc_qpx_qvfsub, 92448}, // __builtin_qpx_qvfsub
+      {Intrinsic::ppc_qpx_qvfsubs, 92469}, // __builtin_qpx_qvfsubs
+      {Intrinsic::ppc_qpx_qvftstnan, 92491}, // __builtin_qpx_qvftstnan
+      {Intrinsic::ppc_qpx_qvfxmadd, 92515}, // __builtin_qpx_qvfxmadd
+      {Intrinsic::ppc_qpx_qvfxmadds, 92538}, // __builtin_qpx_qvfxmadds
+      {Intrinsic::ppc_qpx_qvfxmul, 92562}, // __builtin_qpx_qvfxmul
+      {Intrinsic::ppc_qpx_qvfxmuls, 92584}, // __builtin_qpx_qvfxmuls
+      {Intrinsic::ppc_qpx_qvfxxcpnmadd, 92607}, // __builtin_qpx_qvfxxcpnmadd
+      {Intrinsic::ppc_qpx_qvfxxcpnmadds, 92634}, // __builtin_qpx_qvfxxcpnmadds
+      {Intrinsic::ppc_qpx_qvfxxmadd, 92662}, // __builtin_qpx_qvfxxmadd
+      {Intrinsic::ppc_qpx_qvfxxmadds, 92686}, // __builtin_qpx_qvfxxmadds
+      {Intrinsic::ppc_qpx_qvfxxnpmadd, 92711}, // __builtin_qpx_qvfxxnpmadd
+      {Intrinsic::ppc_qpx_qvfxxnpmadds, 92737}, // __builtin_qpx_qvfxxnpmadds
+      {Intrinsic::ppc_qpx_qvgpci, 92764}, // __builtin_qpx_qvgpci
+      {Intrinsic::ppc_qpx_qvlfcd, 92785}, // __builtin_qpx_qvlfcd
+      {Intrinsic::ppc_qpx_qvlfcda, 92806}, // __builtin_qpx_qvlfcda
+      {Intrinsic::ppc_qpx_qvlfcs, 92828}, // __builtin_qpx_qvlfcs
+      {Intrinsic::ppc_qpx_qvlfcsa, 92849}, // __builtin_qpx_qvlfcsa
+      {Intrinsic::ppc_qpx_qvlfd, 92871}, // __builtin_qpx_qvlfd
+      {Intrinsic::ppc_qpx_qvlfda, 92891}, // __builtin_qpx_qvlfda
+      {Intrinsic::ppc_qpx_qvlfiwa, 92912}, // __builtin_qpx_qvlfiwa
+      {Intrinsic::ppc_qpx_qvlfiwaa, 92934}, // __builtin_qpx_qvlfiwaa
+      {Intrinsic::ppc_qpx_qvlfiwz, 92957}, // __builtin_qpx_qvlfiwz
+      {Intrinsic::ppc_qpx_qvlfiwza, 92979}, // __builtin_qpx_qvlfiwza
+      {Intrinsic::ppc_qpx_qvlfs, 93002}, // __builtin_qpx_qvlfs
+      {Intrinsic::ppc_qpx_qvlfsa, 93022}, // __builtin_qpx_qvlfsa
+      {Intrinsic::ppc_qpx_qvlpcld, 93043}, // __builtin_qpx_qvlpcld
+      {Intrinsic::ppc_qpx_qvlpcls, 93065}, // __builtin_qpx_qvlpcls
+      {Intrinsic::ppc_qpx_qvlpcrd, 93087}, // __builtin_qpx_qvlpcrd
+      {Intrinsic::ppc_qpx_qvlpcrs, 93109}, // __builtin_qpx_qvlpcrs
+      {Intrinsic::ppc_qpx_qvstfcd, 93131}, // __builtin_qpx_qvstfcd
+      {Intrinsic::ppc_qpx_qvstfcda, 93153}, // __builtin_qpx_qvstfcda
+      {Intrinsic::ppc_qpx_qvstfcs, 93176}, // __builtin_qpx_qvstfcs
+      {Intrinsic::ppc_qpx_qvstfcsa, 93198}, // __builtin_qpx_qvstfcsa
+      {Intrinsic::ppc_qpx_qvstfd, 93221}, // __builtin_qpx_qvstfd
+      {Intrinsic::ppc_qpx_qvstfda, 93242}, // __builtin_qpx_qvstfda
+      {Intrinsic::ppc_qpx_qvstfiw, 93264}, // __builtin_qpx_qvstfiw
+      {Intrinsic::ppc_qpx_qvstfiwa, 93286}, // __builtin_qpx_qvstfiwa
+      {Intrinsic::ppc_qpx_qvstfs, 93309}, // __builtin_qpx_qvstfs
+      {Intrinsic::ppc_qpx_qvstfsa, 93330}, // __builtin_qpx_qvstfsa
+      {Intrinsic::ppc_set_texasr, 93352}, // __builtin_set_texasr
+      {Intrinsic::ppc_set_texasru, 93373}, // __builtin_set_texasru
+      {Intrinsic::ppc_set_tfhar, 93395}, // __builtin_set_tfhar
+      {Intrinsic::ppc_set_tfiar, 93415}, // __builtin_set_tfiar
+      {Intrinsic::ppc_tabort, 93435}, // __builtin_tabort
+      {Intrinsic::ppc_tabortdc, 93452}, // __builtin_tabortdc
+      {Intrinsic::ppc_tabortdci, 93471}, // __builtin_tabortdci
+      {Intrinsic::ppc_tabortwc, 93491}, // __builtin_tabortwc
+      {Intrinsic::ppc_tabortwci, 93510}, // __builtin_tabortwci
+      {Intrinsic::ppc_tbegin, 93530}, // __builtin_tbegin
+      {Intrinsic::ppc_tcheck, 93547}, // __builtin_tcheck
+      {Intrinsic::ppc_tend, 93564}, // __builtin_tend
+      {Intrinsic::ppc_tendall, 93579}, // __builtin_tendall
+      {Intrinsic::ppc_trechkpt, 93597}, // __builtin_trechkpt
+      {Intrinsic::ppc_treclaim, 93616}, // __builtin_treclaim
+      {Intrinsic::ppc_tresume, 93635}, // __builtin_tresume
+      {Intrinsic::ppc_tsr, 93653}, // __builtin_tsr
+      {Intrinsic::ppc_tsuspend, 93667}, // __builtin_tsuspend
+      {Intrinsic::ppc_ttest, 93686}, // __builtin_ttest
+      {Intrinsic::ppc_vsx_xsmaxdp, 93702}, // __builtin_vsx_xsmaxdp
+      {Intrinsic::ppc_vsx_xsmindp, 93724}, // __builtin_vsx_xsmindp
+      {Intrinsic::ppc_vsx_xvcmpeqdp, 93746}, // __builtin_vsx_xvcmpeqdp
+      {Intrinsic::ppc_vsx_xvcmpeqdp_p, 93770}, // __builtin_vsx_xvcmpeqdp_p
+      {Intrinsic::ppc_vsx_xvcmpeqsp, 93796}, // __builtin_vsx_xvcmpeqsp
+      {Intrinsic::ppc_vsx_xvcmpeqsp_p, 93820}, // __builtin_vsx_xvcmpeqsp_p
+      {Intrinsic::ppc_vsx_xvcmpgedp, 93846}, // __builtin_vsx_xvcmpgedp
+      {Intrinsic::ppc_vsx_xvcmpgedp_p, 93870}, // __builtin_vsx_xvcmpgedp_p
+      {Intrinsic::ppc_vsx_xvcmpgesp, 93896}, // __builtin_vsx_xvcmpgesp
+      {Intrinsic::ppc_vsx_xvcmpgesp_p, 93920}, // __builtin_vsx_xvcmpgesp_p
+      {Intrinsic::ppc_vsx_xvcmpgtdp, 93946}, // __builtin_vsx_xvcmpgtdp
+      {Intrinsic::ppc_vsx_xvcmpgtdp_p, 93970}, // __builtin_vsx_xvcmpgtdp_p
+      {Intrinsic::ppc_vsx_xvcmpgtsp, 93996}, // __builtin_vsx_xvcmpgtsp
+      {Intrinsic::ppc_vsx_xvcmpgtsp_p, 94020}, // __builtin_vsx_xvcmpgtsp_p
+      {Intrinsic::ppc_vsx_xvcvdpsp, 94046}, // __builtin_vsx_xvcvdpsp
+      {Intrinsic::ppc_vsx_xvcvdpsxws, 94069}, // __builtin_vsx_xvcvdpsxws
+      {Intrinsic::ppc_vsx_xvcvdpuxws, 94094}, // __builtin_vsx_xvcvdpuxws
+      {Intrinsic::ppc_vsx_xvcvhpsp, 94119}, // __builtin_vsx_xvcvhpsp
+      {Intrinsic::ppc_vsx_xvcvspdp, 94142}, // __builtin_vsx_xvcvspdp
+      {Intrinsic::ppc_vsx_xvcvsphp, 94165}, // __builtin_vsx_xvcvsphp
+      {Intrinsic::ppc_vsx_xvcvsxdsp, 94188}, // __builtin_vsx_xvcvsxdsp
+      {Intrinsic::ppc_vsx_xvcvsxwdp, 94212}, // __builtin_vsx_xvcvsxwdp
+      {Intrinsic::ppc_vsx_xvcvuxdsp, 94236}, // __builtin_vsx_xvcvuxdsp
+      {Intrinsic::ppc_vsx_xvcvuxwdp, 94260}, // __builtin_vsx_xvcvuxwdp
+      {Intrinsic::ppc_vsx_xvdivdp, 94284}, // __builtin_vsx_xvdivdp
+      {Intrinsic::ppc_vsx_xvdivsp, 94306}, // __builtin_vsx_xvdivsp
+      {Intrinsic::ppc_vsx_xviexpdp, 94328}, // __builtin_vsx_xviexpdp
+      {Intrinsic::ppc_vsx_xviexpsp, 94351}, // __builtin_vsx_xviexpsp
+      {Intrinsic::ppc_vsx_xvmaxdp, 94374}, // __builtin_vsx_xvmaxdp
+      {Intrinsic::ppc_vsx_xvmaxsp, 94396}, // __builtin_vsx_xvmaxsp
+      {Intrinsic::ppc_vsx_xvmindp, 94418}, // __builtin_vsx_xvmindp
+      {Intrinsic::ppc_vsx_xvminsp, 94440}, // __builtin_vsx_xvminsp
+      {Intrinsic::ppc_vsx_xvredp, 94462}, // __builtin_vsx_xvredp
+      {Intrinsic::ppc_vsx_xvresp, 94483}, // __builtin_vsx_xvresp
+      {Intrinsic::ppc_vsx_xvrsqrtedp, 94504}, // __builtin_vsx_xvrsqrtedp
+      {Intrinsic::ppc_vsx_xvrsqrtesp, 94529}, // __builtin_vsx_xvrsqrtesp
+      {Intrinsic::ppc_vsx_xvtstdcdp, 94554}, // __builtin_vsx_xvtstdcdp
+      {Intrinsic::ppc_vsx_xvtstdcsp, 94578}, // __builtin_vsx_xvtstdcsp
+      {Intrinsic::ppc_vsx_xvxexpdp, 94602}, // __builtin_vsx_xvxexpdp
+      {Intrinsic::ppc_vsx_xvxexpsp, 94625}, // __builtin_vsx_xvxexpsp
+      {Intrinsic::ppc_vsx_xvxsigdp, 94648}, // __builtin_vsx_xvxsigdp
+      {Intrinsic::ppc_vsx_xvxsigsp, 94671}, // __builtin_vsx_xvxsigsp
+      {Intrinsic::ppc_vsx_xxextractuw, 94694}, // __builtin_vsx_xxextractuw
+      {Intrinsic::ppc_vsx_xxinsertw, 94720}, // __builtin_vsx_xxinsertw
+      {Intrinsic::ppc_vsx_xxleqv, 94744}, // __builtin_vsx_xxleqv
+    };
+    auto I = std::lower_bound(std::begin(ppcNames),
+                              std::end(ppcNames),
+                              BuiltinNameStr);
+    if (I != std::end(ppcNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "r600") {
+    static const BuiltinEntry r600Names[] = {
+      {Intrinsic::r600_group_barrier, 94765}, // __builtin_r600_group_barrier
+      {Intrinsic::r600_implicitarg_ptr, 94794}, // __builtin_r600_implicitarg_ptr
+      {Intrinsic::r600_rat_store_typed, 94825}, // __builtin_r600_rat_store_typed
+      {Intrinsic::r600_read_global_size_x, 94856}, // __builtin_r600_read_global_size_x
+      {Intrinsic::r600_read_global_size_y, 94890}, // __builtin_r600_read_global_size_y
+      {Intrinsic::r600_read_global_size_z, 94924}, // __builtin_r600_read_global_size_z
+      {Intrinsic::r600_read_ngroups_x, 94958}, // __builtin_r600_read_ngroups_x
+      {Intrinsic::r600_read_ngroups_y, 94988}, // __builtin_r600_read_ngroups_y
+      {Intrinsic::r600_read_ngroups_z, 95018}, // __builtin_r600_read_ngroups_z
+      {Intrinsic::r600_read_tgid_x, 95048}, // __builtin_r600_read_tgid_x
+      {Intrinsic::r600_read_tgid_y, 95075}, // __builtin_r600_read_tgid_y
+      {Intrinsic::r600_read_tgid_z, 95102}, // __builtin_r600_read_tgid_z
+    };
+    auto I = std::lower_bound(std::begin(r600Names),
+                              std::end(r600Names),
+                              BuiltinNameStr);
+    if (I != std::end(r600Names) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "s390") {
+    static const BuiltinEntry s390Names[] = {
+      {Intrinsic::s390_efpc, 95129}, // __builtin_s390_efpc
+      {Intrinsic::s390_lcbb, 95176}, // __builtin_s390_lcbb
+      {Intrinsic::s390_sfpc, 95216}, // __builtin_s390_sfpc
+      {Intrinsic::s390_vaccb, 95236}, // __builtin_s390_vaccb
+      {Intrinsic::s390_vacccq, 95257}, // __builtin_s390_vacccq
+      {Intrinsic::s390_vaccf, 95279}, // __builtin_s390_vaccf
+      {Intrinsic::s390_vaccg, 95300}, // __builtin_s390_vaccg
+      {Intrinsic::s390_vacch, 95321}, // __builtin_s390_vacch
+      {Intrinsic::s390_vaccq, 95342}, // __builtin_s390_vaccq
+      {Intrinsic::s390_vacq, 95363}, // __builtin_s390_vacq
+      {Intrinsic::s390_vaq, 95383}, // __builtin_s390_vaq
+      {Intrinsic::s390_vavgb, 95402}, // __builtin_s390_vavgb
+      {Intrinsic::s390_vavgf, 95423}, // __builtin_s390_vavgf
+      {Intrinsic::s390_vavgg, 95444}, // __builtin_s390_vavgg
+      {Intrinsic::s390_vavgh, 95465}, // __builtin_s390_vavgh
+      {Intrinsic::s390_vavglb, 95486}, // __builtin_s390_vavglb
+      {Intrinsic::s390_vavglf, 95508}, // __builtin_s390_vavglf
+      {Intrinsic::s390_vavglg, 95530}, // __builtin_s390_vavglg
+      {Intrinsic::s390_vavglh, 95552}, // __builtin_s390_vavglh
+      {Intrinsic::s390_vbperm, 95574}, // __builtin_s390_vbperm
+      {Intrinsic::s390_vcksm, 95596}, // __builtin_s390_vcksm
+      {Intrinsic::s390_verimb, 95617}, // __builtin_s390_verimb
+      {Intrinsic::s390_verimf, 95639}, // __builtin_s390_verimf
+      {Intrinsic::s390_verimg, 95661}, // __builtin_s390_verimg
+      {Intrinsic::s390_verimh, 95683}, // __builtin_s390_verimh
+      {Intrinsic::s390_verllb, 95705}, // __builtin_s390_verllb
+      {Intrinsic::s390_verllf, 95727}, // __builtin_s390_verllf
+      {Intrinsic::s390_verllg, 95749}, // __builtin_s390_verllg
+      {Intrinsic::s390_verllh, 95771}, // __builtin_s390_verllh
+      {Intrinsic::s390_verllvb, 95793}, // __builtin_s390_verllvb
+      {Intrinsic::s390_verllvf, 95816}, // __builtin_s390_verllvf
+      {Intrinsic::s390_verllvg, 95839}, // __builtin_s390_verllvg
+      {Intrinsic::s390_verllvh, 95862}, // __builtin_s390_verllvh
+      {Intrinsic::s390_vfaeb, 95885}, // __builtin_s390_vfaeb
+      {Intrinsic::s390_vfaef, 95906}, // __builtin_s390_vfaef
+      {Intrinsic::s390_vfaeh, 95927}, // __builtin_s390_vfaeh
+      {Intrinsic::s390_vfaezb, 95948}, // __builtin_s390_vfaezb
+      {Intrinsic::s390_vfaezf, 95970}, // __builtin_s390_vfaezf
+      {Intrinsic::s390_vfaezh, 95992}, // __builtin_s390_vfaezh
+      {Intrinsic::s390_vfeeb, 96014}, // __builtin_s390_vfeeb
+      {Intrinsic::s390_vfeef, 96035}, // __builtin_s390_vfeef
+      {Intrinsic::s390_vfeeh, 96056}, // __builtin_s390_vfeeh
+      {Intrinsic::s390_vfeezb, 96077}, // __builtin_s390_vfeezb
+      {Intrinsic::s390_vfeezf, 96099}, // __builtin_s390_vfeezf
+      {Intrinsic::s390_vfeezh, 96121}, // __builtin_s390_vfeezh
+      {Intrinsic::s390_vfeneb, 96143}, // __builtin_s390_vfeneb
+      {Intrinsic::s390_vfenef, 96165}, // __builtin_s390_vfenef
+      {Intrinsic::s390_vfeneh, 96187}, // __builtin_s390_vfeneh
+      {Intrinsic::s390_vfenezb, 96209}, // __builtin_s390_vfenezb
+      {Intrinsic::s390_vfenezf, 96232}, // __builtin_s390_vfenezf
+      {Intrinsic::s390_vfenezh, 96255}, // __builtin_s390_vfenezh
+      {Intrinsic::s390_vgfmab, 96278}, // __builtin_s390_vgfmab
+      {Intrinsic::s390_vgfmaf, 96300}, // __builtin_s390_vgfmaf
+      {Intrinsic::s390_vgfmag, 96322}, // __builtin_s390_vgfmag
+      {Intrinsic::s390_vgfmah, 96344}, // __builtin_s390_vgfmah
+      {Intrinsic::s390_vgfmb, 96366}, // __builtin_s390_vgfmb
+      {Intrinsic::s390_vgfmf, 96387}, // __builtin_s390_vgfmf
+      {Intrinsic::s390_vgfmg, 96408}, // __builtin_s390_vgfmg
+      {Intrinsic::s390_vgfmh, 96429}, // __builtin_s390_vgfmh
+      {Intrinsic::s390_vistrb, 96450}, // __builtin_s390_vistrb
+      {Intrinsic::s390_vistrf, 96472}, // __builtin_s390_vistrf
+      {Intrinsic::s390_vistrh, 96494}, // __builtin_s390_vistrh
+      {Intrinsic::s390_vlbb, 96516}, // __builtin_s390_vlbb
+      {Intrinsic::s390_vll, 96536}, // __builtin_s390_vll
+      {Intrinsic::s390_vlrl, 96555}, // __builtin_s390_vlrl
+      {Intrinsic::s390_vmaeb, 96575}, // __builtin_s390_vmaeb
+      {Intrinsic::s390_vmaef, 96596}, // __builtin_s390_vmaef
+      {Intrinsic::s390_vmaeh, 96617}, // __builtin_s390_vmaeh
+      {Intrinsic::s390_vmahb, 96638}, // __builtin_s390_vmahb
+      {Intrinsic::s390_vmahf, 96659}, // __builtin_s390_vmahf
+      {Intrinsic::s390_vmahh, 96680}, // __builtin_s390_vmahh
+      {Intrinsic::s390_vmaleb, 96701}, // __builtin_s390_vmaleb
+      {Intrinsic::s390_vmalef, 96723}, // __builtin_s390_vmalef
+      {Intrinsic::s390_vmaleh, 96745}, // __builtin_s390_vmaleh
+      {Intrinsic::s390_vmalhb, 96767}, // __builtin_s390_vmalhb
+      {Intrinsic::s390_vmalhf, 96789}, // __builtin_s390_vmalhf
+      {Intrinsic::s390_vmalhh, 96811}, // __builtin_s390_vmalhh
+      {Intrinsic::s390_vmalob, 96833}, // __builtin_s390_vmalob
+      {Intrinsic::s390_vmalof, 96855}, // __builtin_s390_vmalof
+      {Intrinsic::s390_vmaloh, 96877}, // __builtin_s390_vmaloh
+      {Intrinsic::s390_vmaob, 96899}, // __builtin_s390_vmaob
+      {Intrinsic::s390_vmaof, 96920}, // __builtin_s390_vmaof
+      {Intrinsic::s390_vmaoh, 96941}, // __builtin_s390_vmaoh
+      {Intrinsic::s390_vmeb, 96962}, // __builtin_s390_vmeb
+      {Intrinsic::s390_vmef, 96982}, // __builtin_s390_vmef
+      {Intrinsic::s390_vmeh, 97002}, // __builtin_s390_vmeh
+      {Intrinsic::s390_vmhb, 97022}, // __builtin_s390_vmhb
+      {Intrinsic::s390_vmhf, 97042}, // __builtin_s390_vmhf
+      {Intrinsic::s390_vmhh, 97062}, // __builtin_s390_vmhh
+      {Intrinsic::s390_vmleb, 97082}, // __builtin_s390_vmleb
+      {Intrinsic::s390_vmlef, 97103}, // __builtin_s390_vmlef
+      {Intrinsic::s390_vmleh, 97124}, // __builtin_s390_vmleh
+      {Intrinsic::s390_vmlhb, 97145}, // __builtin_s390_vmlhb
+      {Intrinsic::s390_vmlhf, 97166}, // __builtin_s390_vmlhf
+      {Intrinsic::s390_vmlhh, 97187}, // __builtin_s390_vmlhh
+      {Intrinsic::s390_vmlob, 97208}, // __builtin_s390_vmlob
+      {Intrinsic::s390_vmlof, 97229}, // __builtin_s390_vmlof
+      {Intrinsic::s390_vmloh, 97250}, // __builtin_s390_vmloh
+      {Intrinsic::s390_vmob, 97271}, // __builtin_s390_vmob
+      {Intrinsic::s390_vmof, 97291}, // __builtin_s390_vmof
+      {Intrinsic::s390_vmoh, 97311}, // __builtin_s390_vmoh
+      {Intrinsic::s390_vmslg, 97331}, // __builtin_s390_vmslg
+      {Intrinsic::s390_vpdi, 97352}, // __builtin_s390_vpdi
+      {Intrinsic::s390_vperm, 97372}, // __builtin_s390_vperm
+      {Intrinsic::s390_vpklsf, 97393}, // __builtin_s390_vpklsf
+      {Intrinsic::s390_vpklsg, 97415}, // __builtin_s390_vpklsg
+      {Intrinsic::s390_vpklsh, 97437}, // __builtin_s390_vpklsh
+      {Intrinsic::s390_vpksf, 97459}, // __builtin_s390_vpksf
+      {Intrinsic::s390_vpksg, 97480}, // __builtin_s390_vpksg
+      {Intrinsic::s390_vpksh, 97501}, // __builtin_s390_vpksh
+      {Intrinsic::s390_vsbcbiq, 97522}, // __builtin_s390_vsbcbiq
+      {Intrinsic::s390_vsbiq, 97545}, // __builtin_s390_vsbiq
+      {Intrinsic::s390_vscbib, 97566}, // __builtin_s390_vscbib
+      {Intrinsic::s390_vscbif, 97588}, // __builtin_s390_vscbif
+      {Intrinsic::s390_vscbig, 97610}, // __builtin_s390_vscbig
+      {Intrinsic::s390_vscbih, 97632}, // __builtin_s390_vscbih
+      {Intrinsic::s390_vscbiq, 97654}, // __builtin_s390_vscbiq
+      {Intrinsic::s390_vsl, 97676}, // __builtin_s390_vsl
+      {Intrinsic::s390_vslb, 97695}, // __builtin_s390_vslb
+      {Intrinsic::s390_vsldb, 97715}, // __builtin_s390_vsldb
+      {Intrinsic::s390_vsq, 97736}, // __builtin_s390_vsq
+      {Intrinsic::s390_vsra, 97755}, // __builtin_s390_vsra
+      {Intrinsic::s390_vsrab, 97775}, // __builtin_s390_vsrab
+      {Intrinsic::s390_vsrl, 97796}, // __builtin_s390_vsrl
+      {Intrinsic::s390_vsrlb, 97816}, // __builtin_s390_vsrlb
+      {Intrinsic::s390_vstl, 97837}, // __builtin_s390_vstl
+      {Intrinsic::s390_vstrcb, 97857}, // __builtin_s390_vstrcb
+      {Intrinsic::s390_vstrcf, 97879}, // __builtin_s390_vstrcf
+      {Intrinsic::s390_vstrch, 97901}, // __builtin_s390_vstrch
+      {Intrinsic::s390_vstrczb, 97923}, // __builtin_s390_vstrczb
+      {Intrinsic::s390_vstrczf, 97946}, // __builtin_s390_vstrczf
+      {Intrinsic::s390_vstrczh, 97969}, // __builtin_s390_vstrczh
+      {Intrinsic::s390_vstrl, 97992}, // __builtin_s390_vstrl
+      {Intrinsic::s390_vsumb, 98013}, // __builtin_s390_vsumb
+      {Intrinsic::s390_vsumgf, 98034}, // __builtin_s390_vsumgf
+      {Intrinsic::s390_vsumgh, 98056}, // __builtin_s390_vsumgh
+      {Intrinsic::s390_vsumh, 98078}, // __builtin_s390_vsumh
+      {Intrinsic::s390_vsumqf, 98099}, // __builtin_s390_vsumqf
+      {Intrinsic::s390_vsumqg, 98121}, // __builtin_s390_vsumqg
+      {Intrinsic::s390_vtm, 98143}, // __builtin_s390_vtm
+      {Intrinsic::s390_vuphb, 98162}, // __builtin_s390_vuphb
+      {Intrinsic::s390_vuphf, 98183}, // __builtin_s390_vuphf
+      {Intrinsic::s390_vuphh, 98204}, // __builtin_s390_vuphh
+      {Intrinsic::s390_vuplb, 98225}, // __builtin_s390_vuplb
+      {Intrinsic::s390_vuplf, 98246}, // __builtin_s390_vuplf
+      {Intrinsic::s390_vuplhb, 98267}, // __builtin_s390_vuplhb
+      {Intrinsic::s390_vuplhf, 98289}, // __builtin_s390_vuplhf
+      {Intrinsic::s390_vuplhh, 98311}, // __builtin_s390_vuplhh
+      {Intrinsic::s390_vuplhw, 98333}, // __builtin_s390_vuplhw
+      {Intrinsic::s390_vupllb, 98355}, // __builtin_s390_vupllb
+      {Intrinsic::s390_vupllf, 98377}, // __builtin_s390_vupllf
+      {Intrinsic::s390_vupllh, 98399}, // __builtin_s390_vupllh
+      {Intrinsic::s390_tend, 93564}, // __builtin_tend
+      {Intrinsic::s390_ppa_txassist, 95196}, // __builtin_tx_assist
+      {Intrinsic::s390_etnd, 95149}, // __builtin_tx_nesting_depth
+    };
+    auto I = std::lower_bound(std::begin(s390Names),
+                              std::end(s390Names),
+                              BuiltinNameStr);
+    if (I != std::end(s390Names) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "x86") {
+    static const BuiltinEntry x86Names[] = {
+      {Intrinsic::x86_addcarry_u32, 98929}, // __builtin_ia32_addcarry_u32
+      {Intrinsic::x86_addcarry_u64, 98957}, // __builtin_ia32_addcarry_u64
+      {Intrinsic::x86_addcarryx_u32, 98985}, // __builtin_ia32_addcarryx_u32
+      {Intrinsic::x86_addcarryx_u64, 99014}, // __builtin_ia32_addcarryx_u64
+      {Intrinsic::x86_avx512_mask_add_pd_512, 104566}, // __builtin_ia32_addpd512_mask
+      {Intrinsic::x86_avx512_mask_add_ps_512, 104595}, // __builtin_ia32_addps512_mask
+      {Intrinsic::x86_avx512_mask_add_sd_round, 104624}, // __builtin_ia32_addsd_round_mask
+      {Intrinsic::x86_avx512_mask_add_ss_round, 104656}, // __builtin_ia32_addss_round_mask
+      {Intrinsic::x86_sse3_addsub_pd, 135056}, // __builtin_ia32_addsubpd
+      {Intrinsic::x86_avx_addsub_pd_256, 99426}, // __builtin_ia32_addsubpd256
+      {Intrinsic::x86_sse3_addsub_ps, 135080}, // __builtin_ia32_addsubps
+      {Intrinsic::x86_avx_addsub_ps_256, 99453}, // __builtin_ia32_addsubps256
+      {Intrinsic::x86_aesni_aesdec, 99043}, // __builtin_ia32_aesdec128
+      {Intrinsic::x86_aesni_aesdec_256, 99068}, // __builtin_ia32_aesdec256
+      {Intrinsic::x86_aesni_aesdec_512, 99093}, // __builtin_ia32_aesdec512
+      {Intrinsic::x86_aesni_aesdeclast, 99118}, // __builtin_ia32_aesdeclast128
+      {Intrinsic::x86_aesni_aesdeclast_256, 99147}, // __builtin_ia32_aesdeclast256
+      {Intrinsic::x86_aesni_aesdeclast_512, 99176}, // __builtin_ia32_aesdeclast512
+      {Intrinsic::x86_aesni_aesenc, 99205}, // __builtin_ia32_aesenc128
+      {Intrinsic::x86_aesni_aesenc_256, 99230}, // __builtin_ia32_aesenc256
+      {Intrinsic::x86_aesni_aesenc_512, 99255}, // __builtin_ia32_aesenc512
+      {Intrinsic::x86_aesni_aesenclast, 99280}, // __builtin_ia32_aesenclast128
+      {Intrinsic::x86_aesni_aesenclast_256, 99309}, // __builtin_ia32_aesenclast256
+      {Intrinsic::x86_aesni_aesenclast_512, 99338}, // __builtin_ia32_aesenclast512
+      {Intrinsic::x86_aesni_aesimc, 99367}, // __builtin_ia32_aesimc128
+      {Intrinsic::x86_aesni_aeskeygenassist, 99392}, // __builtin_ia32_aeskeygenassist128
+      {Intrinsic::x86_bmi_bextr_32, 129324}, // __builtin_ia32_bextr_u32
+      {Intrinsic::x86_bmi_bextr_64, 129349}, // __builtin_ia32_bextr_u64
+      {Intrinsic::x86_tbm_bextri_u32, 136958}, // __builtin_ia32_bextri_u32
+      {Intrinsic::x86_tbm_bextri_u64, 136984}, // __builtin_ia32_bextri_u64
+      {Intrinsic::x86_sse41_blendvpd, 135257}, // __builtin_ia32_blendvpd
+      {Intrinsic::x86_avx_blendv_pd_256, 99480}, // __builtin_ia32_blendvpd256
+      {Intrinsic::x86_sse41_blendvps, 135281}, // __builtin_ia32_blendvps
+      {Intrinsic::x86_avx_blendv_ps_256, 99507}, // __builtin_ia32_blendvps256
+      {Intrinsic::x86_avx512_broadcastmb_128, 103118}, // __builtin_ia32_broadcastmb128
+      {Intrinsic::x86_avx512_broadcastmb_256, 103148}, // __builtin_ia32_broadcastmb256
+      {Intrinsic::x86_avx512_broadcastmb_512, 103178}, // __builtin_ia32_broadcastmb512
+      {Intrinsic::x86_avx512_broadcastmw_128, 103208}, // __builtin_ia32_broadcastmw128
+      {Intrinsic::x86_avx512_broadcastmw_256, 103238}, // __builtin_ia32_broadcastmw256
+      {Intrinsic::x86_avx512_broadcastmw_512, 103268}, // __builtin_ia32_broadcastmw512
+      {Intrinsic::x86_bmi_bzhi_64, 129397}, // __builtin_ia32_bzhi_di
+      {Intrinsic::x86_bmi_bzhi_32, 129374}, // __builtin_ia32_bzhi_si
+      {Intrinsic::x86_sse2_clflush, 133369}, // __builtin_ia32_clflush
+      {Intrinsic::x86_clflushopt, 129512}, // __builtin_ia32_clflushopt
+      {Intrinsic::x86_clrssbsy, 129538}, // __builtin_ia32_clrssbsy
+      {Intrinsic::x86_clwb, 129562}, // __builtin_ia32_clwb
+      {Intrinsic::x86_clzero, 129582}, // __builtin_ia32_clzero
+      {Intrinsic::x86_sse2_cmp_sd, 133392}, // __builtin_ia32_cmpsd
+      {Intrinsic::x86_avx512_mask_cmp_sd, 104688}, // __builtin_ia32_cmpsd_mask
+      {Intrinsic::x86_sse_cmp_ss, 132544}, // __builtin_ia32_cmpss
+      {Intrinsic::x86_avx512_mask_cmp_ss, 104714}, // __builtin_ia32_cmpss_mask
+      {Intrinsic::x86_sse_comieq_ss, 132565}, // __builtin_ia32_comieq
+      {Intrinsic::x86_sse_comige_ss, 132587}, // __builtin_ia32_comige
+      {Intrinsic::x86_sse_comigt_ss, 132609}, // __builtin_ia32_comigt
+      {Intrinsic::x86_sse_comile_ss, 132631}, // __builtin_ia32_comile
+      {Intrinsic::x86_sse_comilt_ss, 132653}, // __builtin_ia32_comilt
+      {Intrinsic::x86_sse_comineq_ss, 132675}, // __builtin_ia32_comineq
+      {Intrinsic::x86_sse2_comieq_sd, 133413}, // __builtin_ia32_comisdeq
+      {Intrinsic::x86_sse2_comige_sd, 133437}, // __builtin_ia32_comisdge
+      {Intrinsic::x86_sse2_comigt_sd, 133461}, // __builtin_ia32_comisdgt
+      {Intrinsic::x86_sse2_comile_sd, 133485}, // __builtin_ia32_comisdle
+      {Intrinsic::x86_sse2_comilt_sd, 133509}, // __builtin_ia32_comisdlt
+      {Intrinsic::x86_sse2_comineq_sd, 133533}, // __builtin_ia32_comisdneq
+      {Intrinsic::x86_avx512_mask_compress_pd_128, 104944}, // __builtin_ia32_compressdf128_mask
+      {Intrinsic::x86_avx512_mask_compress_pd_256, 104978}, // __builtin_ia32_compressdf256_mask
+      {Intrinsic::x86_avx512_mask_compress_pd_512, 105012}, // __builtin_ia32_compressdf512_mask
+      {Intrinsic::x86_avx512_mask_compress_q_128, 105148}, // __builtin_ia32_compressdi128_mask
+      {Intrinsic::x86_avx512_mask_compress_q_256, 105182}, // __builtin_ia32_compressdi256_mask
+      {Intrinsic::x86_avx512_mask_compress_q_512, 105216}, // __builtin_ia32_compressdi512_mask
+      {Intrinsic::x86_avx512_mask_compress_w_128, 105952}, // __builtin_ia32_compresshi128_mask
+      {Intrinsic::x86_avx512_mask_compress_w_256, 105986}, // __builtin_ia32_compresshi256_mask
+      {Intrinsic::x86_avx512_mask_compress_w_512, 106020}, // __builtin_ia32_compresshi512_mask
+      {Intrinsic::x86_avx512_mask_compress_b_128, 104740}, // __builtin_ia32_compressqi128_mask
+      {Intrinsic::x86_avx512_mask_compress_b_256, 104774}, // __builtin_ia32_compressqi256_mask
+      {Intrinsic::x86_avx512_mask_compress_b_512, 104808}, // __builtin_ia32_compressqi512_mask
+      {Intrinsic::x86_avx512_mask_compress_ps_128, 105046}, // __builtin_ia32_compresssf128_mask
+      {Intrinsic::x86_avx512_mask_compress_ps_256, 105080}, // __builtin_ia32_compresssf256_mask
+      {Intrinsic::x86_avx512_mask_compress_ps_512, 105114}, // __builtin_ia32_compresssf512_mask
+      {Intrinsic::x86_avx512_mask_compress_d_128, 104842}, // __builtin_ia32_compresssi128_mask
+      {Intrinsic::x86_avx512_mask_compress_d_256, 104876}, // __builtin_ia32_compresssi256_mask
+      {Intrinsic::x86_avx512_mask_compress_d_512, 104910}, // __builtin_ia32_compresssi512_mask
+      {Intrinsic::x86_avx512_mask_compress_store_pd_128, 105484}, // __builtin_ia32_compressstoredf128_mask
+      {Intrinsic::x86_avx512_mask_compress_store_pd_256, 105523}, // __builtin_ia32_compressstoredf256_mask
+      {Intrinsic::x86_avx512_mask_compress_store_pd_512, 105562}, // __builtin_ia32_compressstoredf512_mask
+      {Intrinsic::x86_avx512_mask_compress_store_q_128, 105718}, // __builtin_ia32_compressstoredi128_mask
+      {Intrinsic::x86_avx512_mask_compress_store_q_256, 105757}, // __builtin_ia32_compressstoredi256_mask
+      {Intrinsic::x86_avx512_mask_compress_store_q_512, 105796}, // __builtin_ia32_compressstoredi512_mask
+      {Intrinsic::x86_avx512_mask_compress_store_w_128, 105835}, // __builtin_ia32_compressstorehi128_mask
+      {Intrinsic::x86_avx512_mask_compress_store_w_256, 105874}, // __builtin_ia32_compressstorehi256_mask
+      {Intrinsic::x86_avx512_mask_compress_store_w_512, 105913}, // __builtin_ia32_compressstorehi512_mask
+      {Intrinsic::x86_avx512_mask_compress_store_b_128, 105250}, // __builtin_ia32_compressstoreqi128_mask
+      {Intrinsic::x86_avx512_mask_compress_store_b_256, 105289}, // __builtin_ia32_compressstoreqi256_mask
+      {Intrinsic::x86_avx512_mask_compress_store_b_512, 105328}, // __builtin_ia32_compressstoreqi512_mask
+      {Intrinsic::x86_avx512_mask_compress_store_ps_128, 105601}, // __builtin_ia32_compressstoresf128_mask
+      {Intrinsic::x86_avx512_mask_compress_store_ps_256, 105640}, // __builtin_ia32_compressstoresf256_mask
+      {Intrinsic::x86_avx512_mask_compress_store_ps_512, 105679}, // __builtin_ia32_compressstoresf512_mask
+      {Intrinsic::x86_avx512_mask_compress_store_d_128, 105367}, // __builtin_ia32_compressstoresi128_mask
+      {Intrinsic::x86_avx512_mask_compress_store_d_256, 105406}, // __builtin_ia32_compressstoresi256_mask
+      {Intrinsic::x86_avx512_mask_compress_store_d_512, 105445}, // __builtin_ia32_compressstoresi512_mask
+      {Intrinsic::x86_sse42_crc32_64_64, 135744}, // __builtin_ia32_crc32di
+      {Intrinsic::x86_sse42_crc32_32_16, 135675}, // __builtin_ia32_crc32hi
+      {Intrinsic::x86_sse42_crc32_32_8, 135721}, // __builtin_ia32_crc32qi
+      {Intrinsic::x86_sse42_crc32_32_32, 135698}, // __builtin_ia32_crc32si
+      {Intrinsic::x86_sse2_cvtdq2ps, 133558}, // __builtin_ia32_cvtdq2ps
+      {Intrinsic::x86_avx_cvtdq2_ps_256, 99615}, // __builtin_ia32_cvtdq2ps256
+      {Intrinsic::x86_avx512_mask_cvtdq2ps_512, 106276}, // __builtin_ia32_cvtdq2ps512_mask
+      {Intrinsic::x86_sse2_cvtpd2dq, 133582}, // __builtin_ia32_cvtpd2dq
+      {Intrinsic::x86_avx512_mask_cvtpd2dq_128, 106308}, // __builtin_ia32_cvtpd2dq128_mask
+      {Intrinsic::x86_avx_cvt_pd2dq_256, 99561}, // __builtin_ia32_cvtpd2dq256
+      {Intrinsic::x86_avx512_mask_cvtpd2dq_512, 106340}, // __builtin_ia32_cvtpd2dq512_mask
+      {Intrinsic::x86_sse_cvtpd2pi, 132698}, // __builtin_ia32_cvtpd2pi
+      {Intrinsic::x86_sse2_cvtpd2ps, 133606}, // __builtin_ia32_cvtpd2ps
+      {Intrinsic::x86_avx_cvt_pd2_ps_256, 99534}, // __builtin_ia32_cvtpd2ps256
+      {Intrinsic::x86_avx512_mask_cvtpd2ps_512, 106401}, // __builtin_ia32_cvtpd2ps512_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2ps, 106372}, // __builtin_ia32_cvtpd2ps_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2qq_128, 106433}, // __builtin_ia32_cvtpd2qq128_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2qq_256, 106465}, // __builtin_ia32_cvtpd2qq256_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2qq_512, 106497}, // __builtin_ia32_cvtpd2qq512_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2udq_128, 106529}, // __builtin_ia32_cvtpd2udq128_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2udq_256, 106562}, // __builtin_ia32_cvtpd2udq256_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2udq_512, 106595}, // __builtin_ia32_cvtpd2udq512_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2uqq_128, 106628}, // __builtin_ia32_cvtpd2uqq128_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2uqq_256, 106661}, // __builtin_ia32_cvtpd2uqq256_mask
+      {Intrinsic::x86_avx512_mask_cvtpd2uqq_512, 106694}, // __builtin_ia32_cvtpd2uqq512_mask
+      {Intrinsic::x86_sse_cvtpi2pd, 132722}, // __builtin_ia32_cvtpi2pd
+      {Intrinsic::x86_sse_cvtpi2ps, 132746}, // __builtin_ia32_cvtpi2ps
+      {Intrinsic::x86_sse2_cvtps2dq, 133630}, // __builtin_ia32_cvtps2dq
+      {Intrinsic::x86_avx512_mask_cvtps2dq_128, 106727}, // __builtin_ia32_cvtps2dq128_mask
+      {Intrinsic::x86_avx_cvt_ps2dq_256, 99588}, // __builtin_ia32_cvtps2dq256
+      {Intrinsic::x86_avx512_mask_cvtps2dq_256, 106759}, // __builtin_ia32_cvtps2dq256_mask
+      {Intrinsic::x86_avx512_mask_cvtps2dq_512, 106791}, // __builtin_ia32_cvtps2dq512_mask
+      {Intrinsic::x86_avx512_mask_cvtps2pd_512, 106823}, // __builtin_ia32_cvtps2pd512_mask
+      {Intrinsic::x86_sse_cvtps2pi, 132770}, // __builtin_ia32_cvtps2pi
+      {Intrinsic::x86_avx512_mask_cvtps2qq_128, 106855}, // __builtin_ia32_cvtps2qq128_mask
+      {Intrinsic::x86_avx512_mask_cvtps2qq_256, 106887}, // __builtin_ia32_cvtps2qq256_mask
+      {Intrinsic::x86_avx512_mask_cvtps2qq_512, 106919}, // __builtin_ia32_cvtps2qq512_mask
+      {Intrinsic::x86_avx512_mask_cvtps2udq_128, 106951}, // __builtin_ia32_cvtps2udq128_mask
+      {Intrinsic::x86_avx512_mask_cvtps2udq_256, 106984}, // __builtin_ia32_cvtps2udq256_mask
+      {Intrinsic::x86_avx512_mask_cvtps2udq_512, 107017}, // __builtin_ia32_cvtps2udq512_mask
+      {Intrinsic::x86_avx512_mask_cvtps2uqq_128, 107050}, // __builtin_ia32_cvtps2uqq128_mask
+      {Intrinsic::x86_avx512_mask_cvtps2uqq_256, 107083}, // __builtin_ia32_cvtps2uqq256_mask
+      {Intrinsic::x86_avx512_mask_cvtps2uqq_512, 107116}, // __builtin_ia32_cvtps2uqq512_mask
+      {Intrinsic::x86_avx512_mask_cvtqq2pd_128, 107149}, // __builtin_ia32_cvtqq2pd128_mask
+      {Intrinsic::x86_avx512_mask_cvtqq2pd_256, 107181}, // __builtin_ia32_cvtqq2pd256_mask
+      {Intrinsic::x86_avx512_mask_cvtqq2pd_512, 107213}, // __builtin_ia32_cvtqq2pd512_mask
+      {Intrinsic::x86_avx512_mask_cvtqq2ps_128, 107245}, // __builtin_ia32_cvtqq2ps128_mask
+      {Intrinsic::x86_avx512_mask_cvtqq2ps_256, 107277}, // __builtin_ia32_cvtqq2ps256_mask
+      {Intrinsic::x86_avx512_mask_cvtqq2ps_512, 107309}, // __builtin_ia32_cvtqq2ps512_mask
+      {Intrinsic::x86_sse2_cvtsd2si, 133654}, // __builtin_ia32_cvtsd2si
+      {Intrinsic::x86_sse2_cvtsd2si64, 133678}, // __builtin_ia32_cvtsd2si64
+      {Intrinsic::x86_sse2_cvtsd2ss, 133704}, // __builtin_ia32_cvtsd2ss
+      {Intrinsic::x86_avx512_mask_cvtsd2ss_round, 107341}, // __builtin_ia32_cvtsd2ss_round_mask
+      {Intrinsic::x86_avx512_cvtsi2sd64, 103298}, // __builtin_ia32_cvtsi2sd64
+      {Intrinsic::x86_avx512_cvtsi2ss32, 103324}, // __builtin_ia32_cvtsi2ss32
+      {Intrinsic::x86_avx512_cvtsi2ss64, 103350}, // __builtin_ia32_cvtsi2ss64
+      {Intrinsic::x86_avx512_mask_cvtss2sd_round, 107376}, // __builtin_ia32_cvtss2sd_round_mask
+      {Intrinsic::x86_sse_cvtss2si, 132794}, // __builtin_ia32_cvtss2si
+      {Intrinsic::x86_sse_cvtss2si64, 132818}, // __builtin_ia32_cvtss2si64
+      {Intrinsic::x86_sse2_cvttpd2dq, 133728}, // __builtin_ia32_cvttpd2dq
+      {Intrinsic::x86_avx512_mask_cvttpd2dq_128, 107411}, // __builtin_ia32_cvttpd2dq128_mask
+      {Intrinsic::x86_avx_cvtt_pd2dq_256, 99642}, // __builtin_ia32_cvttpd2dq256
+      {Intrinsic::x86_avx512_mask_cvttpd2dq_512, 107444}, // __builtin_ia32_cvttpd2dq512_mask
+      {Intrinsic::x86_sse_cvttpd2pi, 132844}, // __builtin_ia32_cvttpd2pi
+      {Intrinsic::x86_avx512_mask_cvttpd2qq_128, 107477}, // __builtin_ia32_cvttpd2qq128_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2qq_256, 107510}, // __builtin_ia32_cvttpd2qq256_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2qq_512, 107543}, // __builtin_ia32_cvttpd2qq512_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2udq_128, 107576}, // __builtin_ia32_cvttpd2udq128_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2udq_256, 107610}, // __builtin_ia32_cvttpd2udq256_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2udq_512, 107644}, // __builtin_ia32_cvttpd2udq512_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2uqq_128, 107678}, // __builtin_ia32_cvttpd2uqq128_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2uqq_256, 107712}, // __builtin_ia32_cvttpd2uqq256_mask
+      {Intrinsic::x86_avx512_mask_cvttpd2uqq_512, 107746}, // __builtin_ia32_cvttpd2uqq512_mask
+      {Intrinsic::x86_sse2_cvttps2dq, 133753}, // __builtin_ia32_cvttps2dq
+      {Intrinsic::x86_avx_cvtt_ps2dq_256, 99670}, // __builtin_ia32_cvttps2dq256
+      {Intrinsic::x86_avx512_mask_cvttps2dq_512, 107780}, // __builtin_ia32_cvttps2dq512_mask
+      {Intrinsic::x86_sse_cvttps2pi, 132869}, // __builtin_ia32_cvttps2pi
+      {Intrinsic::x86_avx512_mask_cvttps2qq_128, 107813}, // __builtin_ia32_cvttps2qq128_mask
+      {Intrinsic::x86_avx512_mask_cvttps2qq_256, 107846}, // __builtin_ia32_cvttps2qq256_mask
+      {Intrinsic::x86_avx512_mask_cvttps2qq_512, 107879}, // __builtin_ia32_cvttps2qq512_mask
+      {Intrinsic::x86_avx512_mask_cvttps2udq_128, 107912}, // __builtin_ia32_cvttps2udq128_mask
+      {Intrinsic::x86_avx512_mask_cvttps2udq_256, 107946}, // __builtin_ia32_cvttps2udq256_mask
+      {Intrinsic::x86_avx512_mask_cvttps2udq_512, 107980}, // __builtin_ia32_cvttps2udq512_mask
+      {Intrinsic::x86_avx512_mask_cvttps2uqq_128, 108014}, // __builtin_ia32_cvttps2uqq128_mask
+      {Intrinsic::x86_avx512_mask_cvttps2uqq_256, 108048}, // __builtin_ia32_cvttps2uqq256_mask
+      {Intrinsic::x86_avx512_mask_cvttps2uqq_512, 108082}, // __builtin_ia32_cvttps2uqq512_mask
+      {Intrinsic::x86_sse2_cvttsd2si, 133778}, // __builtin_ia32_cvttsd2si
+      {Intrinsic::x86_sse2_cvttsd2si64, 133803}, // __builtin_ia32_cvttsd2si64
+      {Intrinsic::x86_sse_cvttss2si, 132894}, // __builtin_ia32_cvttss2si
+      {Intrinsic::x86_sse_cvttss2si64, 132919}, // __builtin_ia32_cvttss2si64
+      {Intrinsic::x86_avx512_mask_cvtudq2ps_128, 108116}, // __builtin_ia32_cvtudq2ps128_mask
+      {Intrinsic::x86_avx512_mask_cvtudq2ps_256, 108149}, // __builtin_ia32_cvtudq2ps256_mask
+      {Intrinsic::x86_avx512_mask_cvtudq2ps_512, 108182}, // __builtin_ia32_cvtudq2ps512_mask
+      {Intrinsic::x86_avx512_mask_cvtuqq2pd_128, 108215}, // __builtin_ia32_cvtuqq2pd128_mask
+      {Intrinsic::x86_avx512_mask_cvtuqq2pd_256, 108248}, // __builtin_ia32_cvtuqq2pd256_mask
+      {Intrinsic::x86_avx512_mask_cvtuqq2pd_512, 108281}, // __builtin_ia32_cvtuqq2pd512_mask
+      {Intrinsic::x86_avx512_mask_cvtuqq2ps_128, 108314}, // __builtin_ia32_cvtuqq2ps128_mask
+      {Intrinsic::x86_avx512_mask_cvtuqq2ps_256, 108347}, // __builtin_ia32_cvtuqq2ps256_mask
+      {Intrinsic::x86_avx512_mask_cvtuqq2ps_512, 108380}, // __builtin_ia32_cvtuqq2ps512_mask
+      {Intrinsic::x86_avx512_cvtusi2sd, 103604}, // __builtin_ia32_cvtusi2sd32
+      {Intrinsic::x86_avx512_cvtusi642sd, 103658}, // __builtin_ia32_cvtusi2sd64
+      {Intrinsic::x86_avx512_cvtusi2ss, 103631}, // __builtin_ia32_cvtusi2ss32
+      {Intrinsic::x86_avx512_cvtusi642ss, 103685}, // __builtin_ia32_cvtusi2ss64
+      {Intrinsic::x86_avx512_mask_dbpsadbw_128, 108413}, // __builtin_ia32_dbpsadbw128_mask
+      {Intrinsic::x86_avx512_mask_dbpsadbw_256, 108445}, // __builtin_ia32_dbpsadbw256_mask
+      {Intrinsic::x86_avx512_mask_dbpsadbw_512, 108477}, // __builtin_ia32_dbpsadbw512_mask
+      {Intrinsic::x86_avx512_mask_div_pd_512, 108509}, // __builtin_ia32_divpd512_mask
+      {Intrinsic::x86_avx512_mask_div_ps_512, 108538}, // __builtin_ia32_divps512_mask
+      {Intrinsic::x86_avx512_mask_div_sd_round, 108567}, // __builtin_ia32_divsd_round_mask
+      {Intrinsic::x86_avx512_mask_div_ss_round, 108599}, // __builtin_ia32_divss_round_mask
+      {Intrinsic::x86_sse41_dppd, 135305}, // __builtin_ia32_dppd
+      {Intrinsic::x86_sse41_dpps, 135325}, // __builtin_ia32_dpps
+      {Intrinsic::x86_avx_dp_ps_256, 99698}, // __builtin_ia32_dpps256
+      {Intrinsic::x86_mmx_emms, 130298}, // __builtin_ia32_emms
+      {Intrinsic::x86_avx512_exp2_pd, 103712}, // __builtin_ia32_exp2pd_mask
+      {Intrinsic::x86_avx512_exp2_ps, 103739}, // __builtin_ia32_exp2ps_mask
+      {Intrinsic::x86_avx512_mask_expand_pd_128, 109471}, // __builtin_ia32_expanddf128_mask
+      {Intrinsic::x86_avx512_mask_expand_pd_256, 109503}, // __builtin_ia32_expanddf256_mask
+      {Intrinsic::x86_avx512_mask_expand_pd_512, 109535}, // __builtin_ia32_expanddf512_mask
+      {Intrinsic::x86_avx512_mask_expand_q_128, 109663}, // __builtin_ia32_expanddi128_mask
+      {Intrinsic::x86_avx512_mask_expand_q_256, 109695}, // __builtin_ia32_expanddi256_mask
+      {Intrinsic::x86_avx512_mask_expand_q_512, 109727}, // __builtin_ia32_expanddi512_mask
+      {Intrinsic::x86_avx512_mask_expand_w_128, 109759}, // __builtin_ia32_expandhi128_mask
+      {Intrinsic::x86_avx512_mask_expand_w_256, 109791}, // __builtin_ia32_expandhi256_mask
+      {Intrinsic::x86_avx512_mask_expand_w_512, 109823}, // __builtin_ia32_expandhi512_mask
+      {Intrinsic::x86_avx512_mask_expand_load_pd_128, 109039}, // __builtin_ia32_expandloaddf128_mask
+      {Intrinsic::x86_avx512_mask_expand_load_pd_256, 109075}, // __builtin_ia32_expandloaddf256_mask
+      {Intrinsic::x86_avx512_mask_expand_load_pd_512, 109111}, // __builtin_ia32_expandloaddf512_mask
+      {Intrinsic::x86_avx512_mask_expand_load_q_128, 109255}, // __builtin_ia32_expandloaddi128_mask
+      {Intrinsic::x86_avx512_mask_expand_load_q_256, 109291}, // __builtin_ia32_expandloaddi256_mask
+      {Intrinsic::x86_avx512_mask_expand_load_q_512, 109327}, // __builtin_ia32_expandloaddi512_mask
+      {Intrinsic::x86_avx512_mask_expand_load_w_128, 109363}, // __builtin_ia32_expandloadhi128_mask
+      {Intrinsic::x86_avx512_mask_expand_load_w_256, 109399}, // __builtin_ia32_expandloadhi256_mask
+      {Intrinsic::x86_avx512_mask_expand_load_w_512, 109435}, // __builtin_ia32_expandloadhi512_mask
+      {Intrinsic::x86_avx512_mask_expand_load_b_128, 108823}, // __builtin_ia32_expandloadqi128_mask
+      {Intrinsic::x86_avx512_mask_expand_load_b_256, 108859}, // __builtin_ia32_expandloadqi256_mask
+      {Intrinsic::x86_avx512_mask_expand_load_b_512, 108895}, // __builtin_ia32_expandloadqi512_mask
+      {Intrinsic::x86_avx512_mask_expand_load_ps_128, 109147}, // __builtin_ia32_expandloadsf128_mask
+      {Intrinsic::x86_avx512_mask_expand_load_ps_256, 109183}, // __builtin_ia32_expandloadsf256_mask
+      {Intrinsic::x86_avx512_mask_expand_load_ps_512, 109219}, // __builtin_ia32_expandloadsf512_mask
+      {Intrinsic::x86_avx512_mask_expand_load_d_128, 108931}, // __builtin_ia32_expandloadsi128_mask
+      {Intrinsic::x86_avx512_mask_expand_load_d_256, 108967}, // __builtin_ia32_expandloadsi256_mask
+      {Intrinsic::x86_avx512_mask_expand_load_d_512, 109003}, // __builtin_ia32_expandloadsi512_mask
+      {Intrinsic::x86_avx512_mask_expand_b_128, 108631}, // __builtin_ia32_expandqi128_mask
+      {Intrinsic::x86_avx512_mask_expand_b_256, 108663}, // __builtin_ia32_expandqi256_mask
+      {Intrinsic::x86_avx512_mask_expand_b_512, 108695}, // __builtin_ia32_expandqi512_mask
+      {Intrinsic::x86_avx512_mask_expand_ps_128, 109567}, // __builtin_ia32_expandsf128_mask
+      {Intrinsic::x86_avx512_mask_expand_ps_256, 109599}, // __builtin_ia32_expandsf256_mask
+      {Intrinsic::x86_avx512_mask_expand_ps_512, 109631}, // __builtin_ia32_expandsf512_mask
+      {Intrinsic::x86_avx512_mask_expand_d_128, 108727}, // __builtin_ia32_expandsi128_mask
+      {Intrinsic::x86_avx512_mask_expand_d_256, 108759}, // __builtin_ia32_expandsi256_mask
+      {Intrinsic::x86_avx512_mask_expand_d_512, 108791}, // __builtin_ia32_expandsi512_mask
+      {Intrinsic::x86_sse4a_extrq, 136169}, // __builtin_ia32_extrq
+      {Intrinsic::x86_sse4a_extrqi, 136190}, // __builtin_ia32_extrqi
+      {Intrinsic::x86_mmx_femms, 130318}, // __builtin_ia32_femms
+      {Intrinsic::x86_avx512_mask_fixupimm_pd_128, 109855}, // __builtin_ia32_fixupimmpd128_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_pd_128, 123357}, // __builtin_ia32_fixupimmpd128_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_pd_256, 109889}, // __builtin_ia32_fixupimmpd256_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_pd_256, 123392}, // __builtin_ia32_fixupimmpd256_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_pd_512, 109923}, // __builtin_ia32_fixupimmpd512_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_pd_512, 123427}, // __builtin_ia32_fixupimmpd512_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_ps_128, 109957}, // __builtin_ia32_fixupimmps128_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_ps_128, 123462}, // __builtin_ia32_fixupimmps128_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_ps_256, 109991}, // __builtin_ia32_fixupimmps256_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_ps_256, 123497}, // __builtin_ia32_fixupimmps256_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_ps_512, 110025}, // __builtin_ia32_fixupimmps512_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_ps_512, 123532}, // __builtin_ia32_fixupimmps512_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_sd, 110059}, // __builtin_ia32_fixupimmsd_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_sd, 123567}, // __builtin_ia32_fixupimmsd_maskz
+      {Intrinsic::x86_avx512_mask_fixupimm_ss, 110090}, // __builtin_ia32_fixupimmss_mask
+      {Intrinsic::x86_avx512_maskz_fixupimm_ss, 123599}, // __builtin_ia32_fixupimmss_maskz
+      {Intrinsic::x86_avx512_mask_fpclass_pd_128, 110121}, // __builtin_ia32_fpclasspd128_mask
+      {Intrinsic::x86_avx512_mask_fpclass_pd_256, 110154}, // __builtin_ia32_fpclasspd256_mask
+      {Intrinsic::x86_avx512_mask_fpclass_pd_512, 110187}, // __builtin_ia32_fpclasspd512_mask
+      {Intrinsic::x86_avx512_mask_fpclass_ps_128, 110220}, // __builtin_ia32_fpclassps128_mask
+      {Intrinsic::x86_avx512_mask_fpclass_ps_256, 110253}, // __builtin_ia32_fpclassps256_mask
+      {Intrinsic::x86_avx512_mask_fpclass_ps_512, 110286}, // __builtin_ia32_fpclassps512_mask
+      {Intrinsic::x86_avx512_mask_fpclass_sd, 110319}, // __builtin_ia32_fpclasssd_mask
+      {Intrinsic::x86_avx512_mask_fpclass_ss, 110349}, // __builtin_ia32_fpclassss_mask
+      {Intrinsic::x86_fxrstor, 130040}, // __builtin_ia32_fxrstor
+      {Intrinsic::x86_fxrstor64, 130063}, // __builtin_ia32_fxrstor64
+      {Intrinsic::x86_fxsave, 130088}, // __builtin_ia32_fxsave
+      {Intrinsic::x86_fxsave64, 130110}, // __builtin_ia32_fxsave64
+      {Intrinsic::x86_avx512_gather3div2_df, 103994}, // __builtin_ia32_gather3div2df
+      {Intrinsic::x86_avx512_gather3div2_di, 104023}, // __builtin_ia32_gather3div2di
+      {Intrinsic::x86_avx512_gather3div4_df, 104052}, // __builtin_ia32_gather3div4df
+      {Intrinsic::x86_avx512_gather3div4_di, 104081}, // __builtin_ia32_gather3div4di
+      {Intrinsic::x86_avx512_gather3div4_sf, 104110}, // __builtin_ia32_gather3div4sf
+      {Intrinsic::x86_avx512_gather3div4_si, 104139}, // __builtin_ia32_gather3div4si
+      {Intrinsic::x86_avx512_gather3div8_sf, 104168}, // __builtin_ia32_gather3div8sf
+      {Intrinsic::x86_avx512_gather3div8_si, 104197}, // __builtin_ia32_gather3div8si
+      {Intrinsic::x86_avx512_gather3siv2_df, 104226}, // __builtin_ia32_gather3siv2df
+      {Intrinsic::x86_avx512_gather3siv2_di, 104255}, // __builtin_ia32_gather3siv2di
+      {Intrinsic::x86_avx512_gather3siv4_df, 104284}, // __builtin_ia32_gather3siv4df
+      {Intrinsic::x86_avx512_gather3siv4_di, 104313}, // __builtin_ia32_gather3siv4di
+      {Intrinsic::x86_avx512_gather3siv4_sf, 104342}, // __builtin_ia32_gather3siv4sf
+      {Intrinsic::x86_avx512_gather3siv4_si, 104371}, // __builtin_ia32_gather3siv4si
+      {Intrinsic::x86_avx512_gather3siv8_sf, 104400}, // __builtin_ia32_gather3siv8sf
+      {Intrinsic::x86_avx512_gather3siv8_si, 104429}, // __builtin_ia32_gather3siv8si
+      {Intrinsic::x86_avx2_gather_d_d, 100930}, // __builtin_ia32_gatherd_d
+      {Intrinsic::x86_avx2_gather_d_d_256, 100955}, // __builtin_ia32_gatherd_d256
+      {Intrinsic::x86_avx2_gather_d_pd, 100983}, // __builtin_ia32_gatherd_pd
+      {Intrinsic::x86_avx2_gather_d_pd_256, 101009}, // __builtin_ia32_gatherd_pd256
+      {Intrinsic::x86_avx2_gather_d_ps, 101038}, // __builtin_ia32_gatherd_ps
+      {Intrinsic::x86_avx2_gather_d_ps_256, 101064}, // __builtin_ia32_gatherd_ps256
+      {Intrinsic::x86_avx2_gather_d_q, 101093}, // __builtin_ia32_gatherd_q
+      {Intrinsic::x86_avx2_gather_d_q_256, 101118}, // __builtin_ia32_gatherd_q256
+      {Intrinsic::x86_avx512_gather_qps_512, 103965}, // __builtin_ia32_gatherdiv16sf
+      {Intrinsic::x86_avx512_gather_qpi_512, 103908}, // __builtin_ia32_gatherdiv16si
+      {Intrinsic::x86_avx512_gather_qpd_512, 103880}, // __builtin_ia32_gatherdiv8df
+      {Intrinsic::x86_avx512_gather_qpq_512, 103937}, // __builtin_ia32_gatherdiv8di
+      {Intrinsic::x86_avx512_gatherpf_dpd_512, 104458}, // __builtin_ia32_gatherpfdpd
+      {Intrinsic::x86_avx512_gatherpf_dps_512, 104485}, // __builtin_ia32_gatherpfdps
+      {Intrinsic::x86_avx512_gatherpf_qpd_512, 104512}, // __builtin_ia32_gatherpfqpd
+      {Intrinsic::x86_avx512_gatherpf_qps_512, 104539}, // __builtin_ia32_gatherpfqps
+      {Intrinsic::x86_avx2_gather_q_d, 101146}, // __builtin_ia32_gatherq_d
+      {Intrinsic::x86_avx2_gather_q_d_256, 101171}, // __builtin_ia32_gatherq_d256
+      {Intrinsic::x86_avx2_gather_q_pd, 101199}, // __builtin_ia32_gatherq_pd
+      {Intrinsic::x86_avx2_gather_q_pd_256, 101225}, // __builtin_ia32_gatherq_pd256
+      {Intrinsic::x86_avx2_gather_q_ps, 101254}, // __builtin_ia32_gatherq_ps
+      {Intrinsic::x86_avx2_gather_q_ps_256, 101280}, // __builtin_ia32_gatherq_ps256
+      {Intrinsic::x86_avx2_gather_q_q, 101309}, // __builtin_ia32_gatherq_q
+      {Intrinsic::x86_avx2_gather_q_q_256, 101334}, // __builtin_ia32_gatherq_q256
+      {Intrinsic::x86_avx512_gather_dps_512, 103851}, // __builtin_ia32_gathersiv16sf
+      {Intrinsic::x86_avx512_gather_dpi_512, 103794}, // __builtin_ia32_gathersiv16si
+      {Intrinsic::x86_avx512_gather_dpd_512, 103766}, // __builtin_ia32_gathersiv8df
+      {Intrinsic::x86_avx512_gather_dpq_512, 103823}, // __builtin_ia32_gathersiv8di
+      {Intrinsic::x86_avx512_mask_getexp_pd_128, 110379}, // __builtin_ia32_getexppd128_mask
+      {Intrinsic::x86_avx512_mask_getexp_pd_256, 110411}, // __builtin_ia32_getexppd256_mask
+      {Intrinsic::x86_avx512_mask_getexp_pd_512, 110443}, // __builtin_ia32_getexppd512_mask
+      {Intrinsic::x86_avx512_mask_getexp_ps_128, 110475}, // __builtin_ia32_getexpps128_mask
+      {Intrinsic::x86_avx512_mask_getexp_ps_256, 110507}, // __builtin_ia32_getexpps256_mask
+      {Intrinsic::x86_avx512_mask_getexp_ps_512, 110539}, // __builtin_ia32_getexpps512_mask
+      {Intrinsic::x86_avx512_mask_getexp_sd, 110571}, // __builtin_ia32_getexpsd128_round_mask
+      {Intrinsic::x86_avx512_mask_getexp_ss, 110609}, // __builtin_ia32_getexpss128_round_mask
+      {Intrinsic::x86_avx512_mask_getmant_pd_128, 110647}, // __builtin_ia32_getmantpd128_mask
+      {Intrinsic::x86_avx512_mask_getmant_pd_256, 110680}, // __builtin_ia32_getmantpd256_mask
+      {Intrinsic::x86_avx512_mask_getmant_pd_512, 110713}, // __builtin_ia32_getmantpd512_mask
+      {Intrinsic::x86_avx512_mask_getmant_ps_128, 110746}, // __builtin_ia32_getmantps128_mask
+      {Intrinsic::x86_avx512_mask_getmant_ps_256, 110779}, // __builtin_ia32_getmantps256_mask
+      {Intrinsic::x86_avx512_mask_getmant_ps_512, 110812}, // __builtin_ia32_getmantps512_mask
+      {Intrinsic::x86_avx512_mask_getmant_sd, 110845}, // __builtin_ia32_getmantsd_round_mask
+      {Intrinsic::x86_avx512_mask_getmant_ss, 110881}, // __builtin_ia32_getmantss_round_mask
+      {Intrinsic::x86_sse3_hadd_pd, 135104}, // __builtin_ia32_haddpd
+      {Intrinsic::x86_avx_hadd_pd_256, 99721}, // __builtin_ia32_haddpd256
+      {Intrinsic::x86_sse3_hadd_ps, 135126}, // __builtin_ia32_haddps
+      {Intrinsic::x86_avx_hadd_ps_256, 99746}, // __builtin_ia32_haddps256
+      {Intrinsic::x86_sse3_hsub_pd, 135148}, // __builtin_ia32_hsubpd
+      {Intrinsic::x86_avx_hsub_pd_256, 99771}, // __builtin_ia32_hsubpd256
+      {Intrinsic::x86_sse3_hsub_ps, 135170}, // __builtin_ia32_hsubps
+      {Intrinsic::x86_avx_hsub_ps_256, 99796}, // __builtin_ia32_hsubps256
+      {Intrinsic::x86_incsspd, 130134}, // __builtin_ia32_incsspd
+      {Intrinsic::x86_incsspq, 130157}, // __builtin_ia32_incsspq
+      {Intrinsic::x86_sse41_insertps, 135345}, // __builtin_ia32_insertps128
+      {Intrinsic::x86_sse4a_insertq, 136212}, // __builtin_ia32_insertq
+      {Intrinsic::x86_sse4a_insertqi, 136235}, // __builtin_ia32_insertqi
+      {Intrinsic::x86_sse3_ldu_dq, 135192}, // __builtin_ia32_lddqu
+      {Intrinsic::x86_avx_ldu_dq_256, 99821}, // __builtin_ia32_lddqu256
+      {Intrinsic::x86_sse2_lfence, 133830}, // __builtin_ia32_lfence
+      {Intrinsic::x86_llwpcb, 130180}, // __builtin_ia32_llwpcb
+      {Intrinsic::x86_lwpins32, 130202}, // __builtin_ia32_lwpins32
+      {Intrinsic::x86_lwpins64, 130226}, // __builtin_ia32_lwpins64
+      {Intrinsic::x86_lwpval32, 130250}, // __builtin_ia32_lwpval32
+      {Intrinsic::x86_lwpval64, 130274}, // __builtin_ia32_lwpval64
+      {Intrinsic::x86_avx2_maskload_d, 101362}, // __builtin_ia32_maskloadd
+      {Intrinsic::x86_avx2_maskload_d_256, 101387}, // __builtin_ia32_maskloadd256
+      {Intrinsic::x86_avx_maskload_pd, 99845}, // __builtin_ia32_maskloadpd
+      {Intrinsic::x86_avx_maskload_pd_256, 99871}, // __builtin_ia32_maskloadpd256
+      {Intrinsic::x86_avx_maskload_ps, 99900}, // __builtin_ia32_maskloadps
+      {Intrinsic::x86_avx_maskload_ps_256, 99926}, // __builtin_ia32_maskloadps256
+      {Intrinsic::x86_avx2_maskload_q, 101415}, // __builtin_ia32_maskloadq
+      {Intrinsic::x86_avx2_maskload_q_256, 101440}, // __builtin_ia32_maskloadq256
+      {Intrinsic::x86_sse2_maskmov_dqu, 133852}, // __builtin_ia32_maskmovdqu
+      {Intrinsic::x86_mmx_maskmovq, 130339}, // __builtin_ia32_maskmovq
+      {Intrinsic::x86_avx2_maskstore_d, 101468}, // __builtin_ia32_maskstored
+      {Intrinsic::x86_avx2_maskstore_d_256, 101494}, // __builtin_ia32_maskstored256
+      {Intrinsic::x86_avx_maskstore_pd, 99955}, // __builtin_ia32_maskstorepd
+      {Intrinsic::x86_avx_maskstore_pd_256, 99982}, // __builtin_ia32_maskstorepd256
+      {Intrinsic::x86_avx_maskstore_ps, 100012}, // __builtin_ia32_maskstoreps
+      {Intrinsic::x86_avx_maskstore_ps_256, 100039}, // __builtin_ia32_maskstoreps256
+      {Intrinsic::x86_avx2_maskstore_q, 101523}, // __builtin_ia32_maskstoreq
+      {Intrinsic::x86_avx2_maskstore_q_256, 101549}, // __builtin_ia32_maskstoreq256
+      {Intrinsic::x86_sse2_max_pd, 133878}, // __builtin_ia32_maxpd
+      {Intrinsic::x86_avx_max_pd_256, 100069}, // __builtin_ia32_maxpd256
+      {Intrinsic::x86_avx512_mask_max_pd_512, 110917}, // __builtin_ia32_maxpd512_mask
+      {Intrinsic::x86_sse_max_ps, 132946}, // __builtin_ia32_maxps
+      {Intrinsic::x86_avx_max_ps_256, 100093}, // __builtin_ia32_maxps256
+      {Intrinsic::x86_avx512_mask_max_ps_512, 110946}, // __builtin_ia32_maxps512_mask
+      {Intrinsic::x86_sse2_max_sd, 133899}, // __builtin_ia32_maxsd
+      {Intrinsic::x86_avx512_mask_max_sd_round, 110975}, // __builtin_ia32_maxsd_round_mask
+      {Intrinsic::x86_sse_max_ss, 132967}, // __builtin_ia32_maxss
+      {Intrinsic::x86_avx512_mask_max_ss_round, 111007}, // __builtin_ia32_maxss_round_mask
+      {Intrinsic::x86_sse2_mfence, 133920}, // __builtin_ia32_mfence
+      {Intrinsic::x86_sse2_min_pd, 133942}, // __builtin_ia32_minpd
+      {Intrinsic::x86_avx_min_pd_256, 100117}, // __builtin_ia32_minpd256
+      {Intrinsic::x86_avx512_mask_min_pd_512, 111039}, // __builtin_ia32_minpd512_mask
+      {Intrinsic::x86_sse_min_ps, 132988}, // __builtin_ia32_minps
+      {Intrinsic::x86_avx_min_ps_256, 100141}, // __builtin_ia32_minps256
+      {Intrinsic::x86_avx512_mask_min_ps_512, 111068}, // __builtin_ia32_minps512_mask
+      {Intrinsic::x86_sse2_min_sd, 133963}, // __builtin_ia32_minsd
+      {Intrinsic::x86_avx512_mask_min_sd_round, 111097}, // __builtin_ia32_minsd_round_mask
+      {Intrinsic::x86_sse_min_ss, 133009}, // __builtin_ia32_minss
+      {Intrinsic::x86_avx512_mask_min_ss_round, 111129}, // __builtin_ia32_minss_round_mask
+      {Intrinsic::x86_sse3_monitor, 135213}, // __builtin_ia32_monitor
+      {Intrinsic::x86_monitorx, 131885}, // __builtin_ia32_monitorx
+      {Intrinsic::x86_sse2_movmsk_pd, 133984}, // __builtin_ia32_movmskpd
+      {Intrinsic::x86_avx_movmsk_pd_256, 100165}, // __builtin_ia32_movmskpd256
+      {Intrinsic::x86_sse_movmsk_ps, 133030}, // __builtin_ia32_movmskps
+      {Intrinsic::x86_avx_movmsk_ps_256, 100192}, // __builtin_ia32_movmskps256
+      {Intrinsic::x86_mmx_movnt_dq, 130363}, // __builtin_ia32_movntq
+      {Intrinsic::x86_sse41_mpsadbw, 135372}, // __builtin_ia32_mpsadbw128
+      {Intrinsic::x86_avx2_mpsadbw, 101578}, // __builtin_ia32_mpsadbw256
+      {Intrinsic::x86_avx512_mask_mul_pd_512, 111161}, // __builtin_ia32_mulpd512_mask
+      {Intrinsic::x86_avx512_mask_mul_ps_512, 111190}, // __builtin_ia32_mulps512_mask
+      {Intrinsic::x86_avx512_mask_mul_sd_round, 111219}, // __builtin_ia32_mulsd_round_mask
+      {Intrinsic::x86_avx512_mask_mul_ss_round, 111251}, // __builtin_ia32_mulss_round_mask
+      {Intrinsic::x86_sse3_mwait, 135236}, // __builtin_ia32_mwait
+      {Intrinsic::x86_mwaitx, 131909}, // __builtin_ia32_mwaitx
+      {Intrinsic::x86_ssse3_pabs_b, 136259}, // __builtin_ia32_pabsb
+      {Intrinsic::x86_ssse3_pabs_d, 136280}, // __builtin_ia32_pabsd
+      {Intrinsic::x86_ssse3_pabs_w, 136301}, // __builtin_ia32_pabsw
+      {Intrinsic::x86_mmx_packssdw, 130385}, // __builtin_ia32_packssdw
+      {Intrinsic::x86_sse2_packssdw_128, 134008}, // __builtin_ia32_packssdw128
+      {Intrinsic::x86_avx2_packssdw, 101604}, // __builtin_ia32_packssdw256
+      {Intrinsic::x86_avx512_packssdw_512, 126183}, // __builtin_ia32_packssdw512
+      {Intrinsic::x86_mmx_packsswb, 130409}, // __builtin_ia32_packsswb
+      {Intrinsic::x86_sse2_packsswb_128, 134035}, // __builtin_ia32_packsswb128
+      {Intrinsic::x86_avx2_packsswb, 101631}, // __builtin_ia32_packsswb256
+      {Intrinsic::x86_avx512_packsswb_512, 126210}, // __builtin_ia32_packsswb512
+      {Intrinsic::x86_sse41_packusdw, 135398}, // __builtin_ia32_packusdw128
+      {Intrinsic::x86_avx2_packusdw, 101658}, // __builtin_ia32_packusdw256
+      {Intrinsic::x86_avx512_packusdw_512, 126237}, // __builtin_ia32_packusdw512
+      {Intrinsic::x86_mmx_packuswb, 130433}, // __builtin_ia32_packuswb
+      {Intrinsic::x86_sse2_packuswb_128, 134062}, // __builtin_ia32_packuswb128
+      {Intrinsic::x86_avx2_packuswb, 101685}, // __builtin_ia32_packuswb256
+      {Intrinsic::x86_avx512_packuswb_512, 126264}, // __builtin_ia32_packuswb512
+      {Intrinsic::x86_mmx_padd_b, 130457}, // __builtin_ia32_paddb
+      {Intrinsic::x86_mmx_padd_d, 130478}, // __builtin_ia32_paddd
+      {Intrinsic::x86_mmx_padd_q, 130499}, // __builtin_ia32_paddq
+      {Intrinsic::x86_mmx_padds_b, 130541}, // __builtin_ia32_paddsb
+      {Intrinsic::x86_sse2_padds_b, 134089}, // __builtin_ia32_paddsb128
+      {Intrinsic::x86_avx2_padds_b, 101712}, // __builtin_ia32_paddsb256
+      {Intrinsic::x86_avx512_mask_padds_b_512, 111283}, // __builtin_ia32_paddsb512_mask
+      {Intrinsic::x86_mmx_padds_w, 130563}, // __builtin_ia32_paddsw
+      {Intrinsic::x86_sse2_padds_w, 134114}, // __builtin_ia32_paddsw128
+      {Intrinsic::x86_avx2_padds_w, 101737}, // __builtin_ia32_paddsw256
+      {Intrinsic::x86_avx512_mask_padds_w_512, 111313}, // __builtin_ia32_paddsw512_mask
+      {Intrinsic::x86_mmx_paddus_b, 130585}, // __builtin_ia32_paddusb
+      {Intrinsic::x86_sse2_paddus_b, 134139}, // __builtin_ia32_paddusb128
+      {Intrinsic::x86_avx2_paddus_b, 101762}, // __builtin_ia32_paddusb256
+      {Intrinsic::x86_avx512_mask_paddus_b_512, 111343}, // __builtin_ia32_paddusb512_mask
+      {Intrinsic::x86_mmx_paddus_w, 130608}, // __builtin_ia32_paddusw
+      {Intrinsic::x86_sse2_paddus_w, 134165}, // __builtin_ia32_paddusw128
+      {Intrinsic::x86_avx2_paddus_w, 101788}, // __builtin_ia32_paddusw256
+      {Intrinsic::x86_avx512_mask_paddus_w_512, 111374}, // __builtin_ia32_paddusw512_mask
+      {Intrinsic::x86_mmx_padd_w, 130520}, // __builtin_ia32_paddw
+      {Intrinsic::x86_mmx_palignr_b, 130631}, // __builtin_ia32_palignr
+      {Intrinsic::x86_mmx_pand, 130654}, // __builtin_ia32_pand
+      {Intrinsic::x86_mmx_pandn, 130674}, // __builtin_ia32_pandn
+      {Intrinsic::x86_sse2_pause, 134191}, // __builtin_ia32_pause
+      {Intrinsic::x86_mmx_pavg_b, 130695}, // __builtin_ia32_pavgb
+      {Intrinsic::x86_3dnow_pavgusb, 98421}, // __builtin_ia32_pavgusb
+      {Intrinsic::x86_mmx_pavg_w, 130716}, // __builtin_ia32_pavgw
+      {Intrinsic::x86_sse41_pblendvb, 135425}, // __builtin_ia32_pblendvb128
+      {Intrinsic::x86_avx2_pblendvb, 101814}, // __builtin_ia32_pblendvb256
+      {Intrinsic::x86_pclmulqdq, 131931}, // __builtin_ia32_pclmulqdq128
+      {Intrinsic::x86_pclmulqdq_256, 131959}, // __builtin_ia32_pclmulqdq256
+      {Intrinsic::x86_pclmulqdq_512, 131987}, // __builtin_ia32_pclmulqdq512
+      {Intrinsic::x86_mmx_pcmpeq_b, 130737}, // __builtin_ia32_pcmpeqb
+      {Intrinsic::x86_mmx_pcmpeq_d, 130760}, // __builtin_ia32_pcmpeqd
+      {Intrinsic::x86_mmx_pcmpeq_w, 130783}, // __builtin_ia32_pcmpeqw
+      {Intrinsic::x86_sse42_pcmpestri128, 135767}, // __builtin_ia32_pcmpestri128
+      {Intrinsic::x86_sse42_pcmpestria128, 135795}, // __builtin_ia32_pcmpestria128
+      {Intrinsic::x86_sse42_pcmpestric128, 135824}, // __builtin_ia32_pcmpestric128
+      {Intrinsic::x86_sse42_pcmpestrio128, 135853}, // __builtin_ia32_pcmpestrio128
+      {Intrinsic::x86_sse42_pcmpestris128, 135882}, // __builtin_ia32_pcmpestris128
+      {Intrinsic::x86_sse42_pcmpestriz128, 135911}, // __builtin_ia32_pcmpestriz128
+      {Intrinsic::x86_sse42_pcmpestrm128, 135940}, // __builtin_ia32_pcmpestrm128
+      {Intrinsic::x86_mmx_pcmpgt_b, 130806}, // __builtin_ia32_pcmpgtb
+      {Intrinsic::x86_mmx_pcmpgt_d, 130829}, // __builtin_ia32_pcmpgtd
+      {Intrinsic::x86_mmx_pcmpgt_w, 130852}, // __builtin_ia32_pcmpgtw
+      {Intrinsic::x86_sse42_pcmpistri128, 135968}, // __builtin_ia32_pcmpistri128
+      {Intrinsic::x86_sse42_pcmpistria128, 135996}, // __builtin_ia32_pcmpistria128
+      {Intrinsic::x86_sse42_pcmpistric128, 136025}, // __builtin_ia32_pcmpistric128
+      {Intrinsic::x86_sse42_pcmpistrio128, 136054}, // __builtin_ia32_pcmpistrio128
+      {Intrinsic::x86_sse42_pcmpistris128, 136083}, // __builtin_ia32_pcmpistris128
+      {Intrinsic::x86_sse42_pcmpistriz128, 136112}, // __builtin_ia32_pcmpistriz128
+      {Intrinsic::x86_sse42_pcmpistrm128, 136141}, // __builtin_ia32_pcmpistrm128
+      {Intrinsic::x86_bmi_pdep_64, 129443}, // __builtin_ia32_pdep_di
+      {Intrinsic::x86_bmi_pdep_32, 129420}, // __builtin_ia32_pdep_si
+      {Intrinsic::x86_avx512_mask_permvar_df_256, 111405}, // __builtin_ia32_permvardf256_mask
+      {Intrinsic::x86_avx512_mask_permvar_df_512, 111438}, // __builtin_ia32_permvardf512_mask
+      {Intrinsic::x86_avx512_mask_permvar_di_256, 111471}, // __builtin_ia32_permvardi256_mask
+      {Intrinsic::x86_avx512_mask_permvar_di_512, 111504}, // __builtin_ia32_permvardi512_mask
+      {Intrinsic::x86_avx512_mask_permvar_hi_128, 111537}, // __builtin_ia32_permvarhi128_mask
+      {Intrinsic::x86_avx512_mask_permvar_hi_256, 111570}, // __builtin_ia32_permvarhi256_mask
+      {Intrinsic::x86_avx512_mask_permvar_hi_512, 111603}, // __builtin_ia32_permvarhi512_mask
+      {Intrinsic::x86_avx512_mask_permvar_qi_128, 111636}, // __builtin_ia32_permvarqi128_mask
+      {Intrinsic::x86_avx512_mask_permvar_qi_256, 111669}, // __builtin_ia32_permvarqi256_mask
+      {Intrinsic::x86_avx512_mask_permvar_qi_512, 111702}, // __builtin_ia32_permvarqi512_mask
+      {Intrinsic::x86_avx2_permps, 101869}, // __builtin_ia32_permvarsf256
+      {Intrinsic::x86_avx512_mask_permvar_sf_512, 111735}, // __builtin_ia32_permvarsf512_mask
+      {Intrinsic::x86_avx2_permd, 101841}, // __builtin_ia32_permvarsi256
+      {Intrinsic::x86_avx512_mask_permvar_si_512, 111768}, // __builtin_ia32_permvarsi512_mask
+      {Intrinsic::x86_bmi_pext_64, 129489}, // __builtin_ia32_pext_di
+      {Intrinsic::x86_bmi_pext_32, 129466}, // __builtin_ia32_pext_si
+      {Intrinsic::x86_3dnow_pf2id, 98444}, // __builtin_ia32_pf2id
+      {Intrinsic::x86_3dnowa_pf2iw, 98842}, // __builtin_ia32_pf2iw
+      {Intrinsic::x86_3dnow_pfacc, 98465}, // __builtin_ia32_pfacc
+      {Intrinsic::x86_3dnow_pfadd, 98486}, // __builtin_ia32_pfadd
+      {Intrinsic::x86_3dnow_pfcmpeq, 98507}, // __builtin_ia32_pfcmpeq
+      {Intrinsic::x86_3dnow_pfcmpge, 98530}, // __builtin_ia32_pfcmpge
+      {Intrinsic::x86_3dnow_pfcmpgt, 98553}, // __builtin_ia32_pfcmpgt
+      {Intrinsic::x86_3dnow_pfmax, 98576}, // __builtin_ia32_pfmax
+      {Intrinsic::x86_3dnow_pfmin, 98597}, // __builtin_ia32_pfmin
+      {Intrinsic::x86_3dnow_pfmul, 98618}, // __builtin_ia32_pfmul
+      {Intrinsic::x86_3dnowa_pfnacc, 98863}, // __builtin_ia32_pfnacc
+      {Intrinsic::x86_3dnowa_pfpnacc, 98885}, // __builtin_ia32_pfpnacc
+      {Intrinsic::x86_3dnow_pfrcp, 98639}, // __builtin_ia32_pfrcp
+      {Intrinsic::x86_3dnow_pfrcpit1, 98660}, // __builtin_ia32_pfrcpit1
+      {Intrinsic::x86_3dnow_pfrcpit2, 98684}, // __builtin_ia32_pfrcpit2
+      {Intrinsic::x86_3dnow_pfrsqit1, 98708}, // __builtin_ia32_pfrsqit1
+      {Intrinsic::x86_3dnow_pfrsqrt, 98732}, // __builtin_ia32_pfrsqrt
+      {Intrinsic::x86_3dnow_pfsub, 98755}, // __builtin_ia32_pfsub
+      {Intrinsic::x86_3dnow_pfsubr, 98776}, // __builtin_ia32_pfsubr
+      {Intrinsic::x86_ssse3_phadd_d, 136322}, // __builtin_ia32_phaddd
+      {Intrinsic::x86_ssse3_phadd_d_128, 136344}, // __builtin_ia32_phaddd128
+      {Intrinsic::x86_avx2_phadd_d, 101897}, // __builtin_ia32_phaddd256
+      {Intrinsic::x86_ssse3_phadd_sw, 136369}, // __builtin_ia32_phaddsw
+      {Intrinsic::x86_ssse3_phadd_sw_128, 136392}, // __builtin_ia32_phaddsw128
+      {Intrinsic::x86_avx2_phadd_sw, 101922}, // __builtin_ia32_phaddsw256
+      {Intrinsic::x86_ssse3_phadd_w, 136418}, // __builtin_ia32_phaddw
+      {Intrinsic::x86_ssse3_phadd_w_128, 136440}, // __builtin_ia32_phaddw128
+      {Intrinsic::x86_avx2_phadd_w, 101948}, // __builtin_ia32_phaddw256
+      {Intrinsic::x86_sse41_phminposuw, 135452}, // __builtin_ia32_phminposuw128
+      {Intrinsic::x86_ssse3_phsub_d, 136465}, // __builtin_ia32_phsubd
+      {Intrinsic::x86_ssse3_phsub_d_128, 136487}, // __builtin_ia32_phsubd128
+      {Intrinsic::x86_avx2_phsub_d, 101973}, // __builtin_ia32_phsubd256
+      {Intrinsic::x86_ssse3_phsub_sw, 136512}, // __builtin_ia32_phsubsw
+      {Intrinsic::x86_ssse3_phsub_sw_128, 136535}, // __builtin_ia32_phsubsw128
+      {Intrinsic::x86_avx2_phsub_sw, 101998}, // __builtin_ia32_phsubsw256
+      {Intrinsic::x86_ssse3_phsub_w, 136561}, // __builtin_ia32_phsubw
+      {Intrinsic::x86_ssse3_phsub_w_128, 136583}, // __builtin_ia32_phsubw128
+      {Intrinsic::x86_avx2_phsub_w, 102024}, // __builtin_ia32_phsubw256
+      {Intrinsic::x86_3dnow_pi2fd, 98798}, // __builtin_ia32_pi2fd
+      {Intrinsic::x86_3dnowa_pi2fw, 98908}, // __builtin_ia32_pi2fw
+      {Intrinsic::x86_ssse3_pmadd_ub_sw, 136608}, // __builtin_ia32_pmaddubsw
+      {Intrinsic::x86_ssse3_pmadd_ub_sw_128, 136633}, // __builtin_ia32_pmaddubsw128
+      {Intrinsic::x86_avx2_pmadd_ub_sw, 102049}, // __builtin_ia32_pmaddubsw256
+      {Intrinsic::x86_avx512_mask_pmaddubs_w_512, 111801}, // __builtin_ia32_pmaddubsw512_mask
+      {Intrinsic::x86_mmx_pmadd_wd, 130931}, // __builtin_ia32_pmaddwd
+      {Intrinsic::x86_sse2_pmadd_wd, 134212}, // __builtin_ia32_pmaddwd128
+      {Intrinsic::x86_avx2_pmadd_wd, 102077}, // __builtin_ia32_pmaddwd256
+      {Intrinsic::x86_avx512_mask_pmaddw_d_512, 111834}, // __builtin_ia32_pmaddwd512_mask
+      {Intrinsic::x86_mmx_pmaxs_w, 130954}, // __builtin_ia32_pmaxsw
+      {Intrinsic::x86_mmx_pmaxu_b, 130976}, // __builtin_ia32_pmaxub
+      {Intrinsic::x86_mmx_pmins_w, 130998}, // __builtin_ia32_pminsw
+      {Intrinsic::x86_mmx_pminu_b, 131020}, // __builtin_ia32_pminub
+      {Intrinsic::x86_avx512_mask_pmov_db_128, 111865}, // __builtin_ia32_pmovdb128_mask
+      {Intrinsic::x86_avx512_mask_pmov_db_mem_128, 111955}, // __builtin_ia32_pmovdb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_db_256, 111895}, // __builtin_ia32_pmovdb256_mask
+      {Intrinsic::x86_avx512_mask_pmov_db_mem_256, 111988}, // __builtin_ia32_pmovdb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_db_512, 111925}, // __builtin_ia32_pmovdb512_mask
+      {Intrinsic::x86_avx512_mask_pmov_db_mem_512, 112021}, // __builtin_ia32_pmovdb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_dw_128, 112054}, // __builtin_ia32_pmovdw128_mask
+      {Intrinsic::x86_avx512_mask_pmov_dw_mem_128, 112144}, // __builtin_ia32_pmovdw128mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_dw_256, 112084}, // __builtin_ia32_pmovdw256_mask
+      {Intrinsic::x86_avx512_mask_pmov_dw_mem_256, 112177}, // __builtin_ia32_pmovdw256mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_dw_512, 112114}, // __builtin_ia32_pmovdw512_mask
+      {Intrinsic::x86_avx512_mask_pmov_dw_mem_512, 112210}, // __builtin_ia32_pmovdw512mem_mask
+      {Intrinsic::x86_mmx_pmovmskb, 131042}, // __builtin_ia32_pmovmskb
+      {Intrinsic::x86_sse2_pmovmskb_128, 134238}, // __builtin_ia32_pmovmskb128
+      {Intrinsic::x86_avx2_pmovmskb, 102103}, // __builtin_ia32_pmovmskb256
+      {Intrinsic::x86_avx512_mask_pmov_qb_128, 112243}, // __builtin_ia32_pmovqb128_mask
+      {Intrinsic::x86_avx512_mask_pmov_qb_mem_128, 112333}, // __builtin_ia32_pmovqb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qb_256, 112273}, // __builtin_ia32_pmovqb256_mask
+      {Intrinsic::x86_avx512_mask_pmov_qb_mem_256, 112366}, // __builtin_ia32_pmovqb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qb_512, 112303}, // __builtin_ia32_pmovqb512_mask
+      {Intrinsic::x86_avx512_mask_pmov_qb_mem_512, 112399}, // __builtin_ia32_pmovqb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qd_128, 112432}, // __builtin_ia32_pmovqd128_mask
+      {Intrinsic::x86_avx512_mask_pmov_qd_mem_128, 112522}, // __builtin_ia32_pmovqd128mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qd_256, 112462}, // __builtin_ia32_pmovqd256_mask
+      {Intrinsic::x86_avx512_mask_pmov_qd_mem_256, 112555}, // __builtin_ia32_pmovqd256mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qd_512, 112492}, // __builtin_ia32_pmovqd512_mask
+      {Intrinsic::x86_avx512_mask_pmov_qd_mem_512, 112588}, // __builtin_ia32_pmovqd512mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qw_128, 112621}, // __builtin_ia32_pmovqw128_mask
+      {Intrinsic::x86_avx512_mask_pmov_qw_mem_128, 112711}, // __builtin_ia32_pmovqw128mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qw_256, 112651}, // __builtin_ia32_pmovqw256_mask
+      {Intrinsic::x86_avx512_mask_pmov_qw_mem_256, 112744}, // __builtin_ia32_pmovqw256mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_qw_512, 112681}, // __builtin_ia32_pmovqw512_mask
+      {Intrinsic::x86_avx512_mask_pmov_qw_mem_512, 112777}, // __builtin_ia32_pmovqw512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_db_128, 112999}, // __builtin_ia32_pmovsdb128_mask
+      {Intrinsic::x86_avx512_mask_pmovs_db_mem_128, 113092}, // __builtin_ia32_pmovsdb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_db_256, 113030}, // __builtin_ia32_pmovsdb256_mask
+      {Intrinsic::x86_avx512_mask_pmovs_db_mem_256, 113126}, // __builtin_ia32_pmovsdb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_db_512, 113061}, // __builtin_ia32_pmovsdb512_mask
+      {Intrinsic::x86_avx512_mask_pmovs_db_mem_512, 113160}, // __builtin_ia32_pmovsdb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_dw_128, 113194}, // __builtin_ia32_pmovsdw128_mask
+      {Intrinsic::x86_avx512_mask_pmovs_dw_mem_128, 113287}, // __builtin_ia32_pmovsdw128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_dw_256, 113225}, // __builtin_ia32_pmovsdw256_mask
+      {Intrinsic::x86_avx512_mask_pmovs_dw_mem_256, 113321}, // __builtin_ia32_pmovsdw256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_dw_512, 113256}, // __builtin_ia32_pmovsdw512_mask
+      {Intrinsic::x86_avx512_mask_pmovs_dw_mem_512, 113355}, // __builtin_ia32_pmovsdw512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qb_128, 113389}, // __builtin_ia32_pmovsqb128_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qb_mem_128, 113482}, // __builtin_ia32_pmovsqb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qb_256, 113420}, // __builtin_ia32_pmovsqb256_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qb_mem_256, 113516}, // __builtin_ia32_pmovsqb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qb_512, 113451}, // __builtin_ia32_pmovsqb512_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qb_mem_512, 113550}, // __builtin_ia32_pmovsqb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qd_128, 113584}, // __builtin_ia32_pmovsqd128_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qd_mem_128, 113677}, // __builtin_ia32_pmovsqd128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qd_256, 113615}, // __builtin_ia32_pmovsqd256_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qd_mem_256, 113711}, // __builtin_ia32_pmovsqd256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qd_512, 113646}, // __builtin_ia32_pmovsqd512_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qd_mem_512, 113745}, // __builtin_ia32_pmovsqd512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qw_128, 113779}, // __builtin_ia32_pmovsqw128_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qw_mem_128, 113872}, // __builtin_ia32_pmovsqw128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qw_256, 113810}, // __builtin_ia32_pmovsqw256_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qw_mem_256, 113906}, // __builtin_ia32_pmovsqw256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qw_512, 113841}, // __builtin_ia32_pmovsqw512_mask
+      {Intrinsic::x86_avx512_mask_pmovs_qw_mem_512, 113940}, // __builtin_ia32_pmovsqw512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_wb_128, 113974}, // __builtin_ia32_pmovswb128_mask
+      {Intrinsic::x86_avx512_mask_pmovs_wb_mem_128, 114067}, // __builtin_ia32_pmovswb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_wb_256, 114005}, // __builtin_ia32_pmovswb256_mask
+      {Intrinsic::x86_avx512_mask_pmovs_wb_mem_256, 114101}, // __builtin_ia32_pmovswb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovs_wb_512, 114036}, // __builtin_ia32_pmovswb512_mask
+      {Intrinsic::x86_avx512_mask_pmovs_wb_mem_512, 114135}, // __builtin_ia32_pmovswb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_db_128, 114169}, // __builtin_ia32_pmovusdb128_mask
+      {Intrinsic::x86_avx512_mask_pmovus_db_mem_128, 114265}, // __builtin_ia32_pmovusdb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_db_256, 114201}, // __builtin_ia32_pmovusdb256_mask
+      {Intrinsic::x86_avx512_mask_pmovus_db_mem_256, 114300}, // __builtin_ia32_pmovusdb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_db_512, 114233}, // __builtin_ia32_pmovusdb512_mask
+      {Intrinsic::x86_avx512_mask_pmovus_db_mem_512, 114335}, // __builtin_ia32_pmovusdb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_dw_128, 114370}, // __builtin_ia32_pmovusdw128_mask
+      {Intrinsic::x86_avx512_mask_pmovus_dw_mem_128, 114466}, // __builtin_ia32_pmovusdw128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_dw_256, 114402}, // __builtin_ia32_pmovusdw256_mask
+      {Intrinsic::x86_avx512_mask_pmovus_dw_mem_256, 114501}, // __builtin_ia32_pmovusdw256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_dw_512, 114434}, // __builtin_ia32_pmovusdw512_mask
+      {Intrinsic::x86_avx512_mask_pmovus_dw_mem_512, 114536}, // __builtin_ia32_pmovusdw512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qb_128, 114571}, // __builtin_ia32_pmovusqb128_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qb_mem_128, 114667}, // __builtin_ia32_pmovusqb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qb_256, 114603}, // __builtin_ia32_pmovusqb256_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qb_mem_256, 114702}, // __builtin_ia32_pmovusqb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qb_512, 114635}, // __builtin_ia32_pmovusqb512_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qb_mem_512, 114737}, // __builtin_ia32_pmovusqb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qd_128, 114772}, // __builtin_ia32_pmovusqd128_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qd_mem_128, 114868}, // __builtin_ia32_pmovusqd128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qd_256, 114804}, // __builtin_ia32_pmovusqd256_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qd_mem_256, 114903}, // __builtin_ia32_pmovusqd256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qd_512, 114836}, // __builtin_ia32_pmovusqd512_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qd_mem_512, 114938}, // __builtin_ia32_pmovusqd512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qw_128, 114973}, // __builtin_ia32_pmovusqw128_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qw_mem_128, 115069}, // __builtin_ia32_pmovusqw128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qw_256, 115005}, // __builtin_ia32_pmovusqw256_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qw_mem_256, 115104}, // __builtin_ia32_pmovusqw256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qw_512, 115037}, // __builtin_ia32_pmovusqw512_mask
+      {Intrinsic::x86_avx512_mask_pmovus_qw_mem_512, 115139}, // __builtin_ia32_pmovusqw512mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_wb_128, 115174}, // __builtin_ia32_pmovuswb128_mask
+      {Intrinsic::x86_avx512_mask_pmovus_wb_mem_128, 115270}, // __builtin_ia32_pmovuswb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_wb_256, 115206}, // __builtin_ia32_pmovuswb256_mask
+      {Intrinsic::x86_avx512_mask_pmovus_wb_mem_256, 115305}, // __builtin_ia32_pmovuswb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmovus_wb_512, 115238}, // __builtin_ia32_pmovuswb512_mask
+      {Intrinsic::x86_avx512_mask_pmovus_wb_mem_512, 115340}, // __builtin_ia32_pmovuswb512mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_wb_128, 112810}, // __builtin_ia32_pmovwb128_mask
+      {Intrinsic::x86_avx512_mask_pmov_wb_mem_128, 112900}, // __builtin_ia32_pmovwb128mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_wb_256, 112840}, // __builtin_ia32_pmovwb256_mask
+      {Intrinsic::x86_avx512_mask_pmov_wb_mem_256, 112933}, // __builtin_ia32_pmovwb256mem_mask
+      {Intrinsic::x86_avx512_mask_pmov_wb_512, 112870}, // __builtin_ia32_pmovwb512_mask
+      {Intrinsic::x86_avx512_mask_pmov_wb_mem_512, 112966}, // __builtin_ia32_pmovwb512mem_mask
+      {Intrinsic::x86_sse41_pmuldq, 135481}, // __builtin_ia32_pmuldq128
+      {Intrinsic::x86_avx2_pmul_dq, 102130}, // __builtin_ia32_pmuldq256
+      {Intrinsic::x86_avx512_pmul_dq_512, 126291}, // __builtin_ia32_pmuldq512
+      {Intrinsic::x86_ssse3_pmul_hr_sw, 136661}, // __builtin_ia32_pmulhrsw
+      {Intrinsic::x86_ssse3_pmul_hr_sw_128, 136685}, // __builtin_ia32_pmulhrsw128
+      {Intrinsic::x86_avx2_pmul_hr_sw, 102155}, // __builtin_ia32_pmulhrsw256
+      {Intrinsic::x86_avx512_pmul_hr_sw_512, 126316}, // __builtin_ia32_pmulhrsw512
+      {Intrinsic::x86_3dnow_pmulhrw, 98819}, // __builtin_ia32_pmulhrw
+      {Intrinsic::x86_mmx_pmulhu_w, 131088}, // __builtin_ia32_pmulhuw
+      {Intrinsic::x86_sse2_pmulhu_w, 134290}, // __builtin_ia32_pmulhuw128
+      {Intrinsic::x86_avx2_pmulhu_w, 102207}, // __builtin_ia32_pmulhuw256
+      {Intrinsic::x86_avx512_pmulhu_w_512, 126368}, // __builtin_ia32_pmulhuw512
+      {Intrinsic::x86_mmx_pmulh_w, 131066}, // __builtin_ia32_pmulhw
+      {Intrinsic::x86_sse2_pmulh_w, 134265}, // __builtin_ia32_pmulhw128
+      {Intrinsic::x86_avx2_pmulh_w, 102182}, // __builtin_ia32_pmulhw256
+      {Intrinsic::x86_avx512_pmulh_w_512, 126343}, // __builtin_ia32_pmulhw512
+      {Intrinsic::x86_mmx_pmull_w, 131111}, // __builtin_ia32_pmullw
+      {Intrinsic::x86_mmx_pmulu_dq, 131133}, // __builtin_ia32_pmuludq
+      {Intrinsic::x86_sse2_pmulu_dq, 134316}, // __builtin_ia32_pmuludq128
+      {Intrinsic::x86_avx2_pmulu_dq, 102233}, // __builtin_ia32_pmuludq256
+      {Intrinsic::x86_avx512_pmulu_dq_512, 126394}, // __builtin_ia32_pmuludq512
+      {Intrinsic::x86_mmx_por, 131156}, // __builtin_ia32_por
+      {Intrinsic::x86_avx512_mask_prol_d_128, 115489}, // __builtin_ia32_prold128_mask
+      {Intrinsic::x86_avx512_mask_prol_d_256, 115518}, // __builtin_ia32_prold256_mask
+      {Intrinsic::x86_avx512_mask_prol_d_512, 115547}, // __builtin_ia32_prold512_mask
+      {Intrinsic::x86_avx512_mask_prol_q_128, 115576}, // __builtin_ia32_prolq128_mask
+      {Intrinsic::x86_avx512_mask_prol_q_256, 115605}, // __builtin_ia32_prolq256_mask
+      {Intrinsic::x86_avx512_mask_prol_q_512, 115634}, // __builtin_ia32_prolq512_mask
+      {Intrinsic::x86_avx512_mask_prolv_d_128, 115663}, // __builtin_ia32_prolvd128_mask
+      {Intrinsic::x86_avx512_mask_prolv_d_256, 115693}, // __builtin_ia32_prolvd256_mask
+      {Intrinsic::x86_avx512_mask_prolv_d_512, 115723}, // __builtin_ia32_prolvd512_mask
+      {Intrinsic::x86_avx512_mask_prolv_q_128, 115753}, // __builtin_ia32_prolvq128_mask
+      {Intrinsic::x86_avx512_mask_prolv_q_256, 115783}, // __builtin_ia32_prolvq256_mask
+      {Intrinsic::x86_avx512_mask_prolv_q_512, 115813}, // __builtin_ia32_prolvq512_mask
+      {Intrinsic::x86_avx512_mask_pror_d_128, 115843}, // __builtin_ia32_prord128_mask
+      {Intrinsic::x86_avx512_mask_pror_d_256, 115872}, // __builtin_ia32_prord256_mask
+      {Intrinsic::x86_avx512_mask_pror_d_512, 115901}, // __builtin_ia32_prord512_mask
+      {Intrinsic::x86_avx512_mask_pror_q_128, 115930}, // __builtin_ia32_prorq128_mask
+      {Intrinsic::x86_avx512_mask_pror_q_256, 115959}, // __builtin_ia32_prorq256_mask
+      {Intrinsic::x86_avx512_mask_pror_q_512, 115988}, // __builtin_ia32_prorq512_mask
+      {Intrinsic::x86_avx512_mask_prorv_d_128, 116017}, // __builtin_ia32_prorvd128_mask
+      {Intrinsic::x86_avx512_mask_prorv_d_256, 116047}, // __builtin_ia32_prorvd256_mask
+      {Intrinsic::x86_avx512_mask_prorv_d_512, 116077}, // __builtin_ia32_prorvd512_mask
+      {Intrinsic::x86_avx512_mask_prorv_q_128, 116107}, // __builtin_ia32_prorvq128_mask
+      {Intrinsic::x86_avx512_mask_prorv_q_256, 116137}, // __builtin_ia32_prorvq256_mask
+      {Intrinsic::x86_avx512_mask_prorv_q_512, 116167}, // __builtin_ia32_prorvq512_mask
+      {Intrinsic::x86_mmx_psad_bw, 131175}, // __builtin_ia32_psadbw
+      {Intrinsic::x86_sse2_psad_bw, 134342}, // __builtin_ia32_psadbw128
+      {Intrinsic::x86_avx2_psad_bw, 102259}, // __builtin_ia32_psadbw256
+      {Intrinsic::x86_avx512_psad_bw_512, 126420}, // __builtin_ia32_psadbw512
+      {Intrinsic::x86_ssse3_pshuf_b, 136712}, // __builtin_ia32_pshufb
+      {Intrinsic::x86_ssse3_pshuf_b_128, 136734}, // __builtin_ia32_pshufb128
+      {Intrinsic::x86_avx2_pshuf_b, 102284}, // __builtin_ia32_pshufb256
+      {Intrinsic::x86_avx512_pshuf_b_512, 126445}, // __builtin_ia32_pshufb512
+      {Intrinsic::x86_sse_pshuf_w, 133054}, // __builtin_ia32_pshufw
+      {Intrinsic::x86_ssse3_psign_b, 136759}, // __builtin_ia32_psignb
+      {Intrinsic::x86_ssse3_psign_b_128, 136781}, // __builtin_ia32_psignb128
+      {Intrinsic::x86_avx2_psign_b, 102309}, // __builtin_ia32_psignb256
+      {Intrinsic::x86_ssse3_psign_d, 136806}, // __builtin_ia32_psignd
+      {Intrinsic::x86_ssse3_psign_d_128, 136828}, // __builtin_ia32_psignd128
+      {Intrinsic::x86_avx2_psign_d, 102334}, // __builtin_ia32_psignd256
+      {Intrinsic::x86_ssse3_psign_w, 136853}, // __builtin_ia32_psignw
+      {Intrinsic::x86_ssse3_psign_w_128, 136875}, // __builtin_ia32_psignw128
+      {Intrinsic::x86_avx2_psign_w, 102359}, // __builtin_ia32_psignw256
+      {Intrinsic::x86_mmx_psll_d, 131197}, // __builtin_ia32_pslld
+      {Intrinsic::x86_sse2_psll_d, 134367}, // __builtin_ia32_pslld128
+      {Intrinsic::x86_avx2_psll_d, 102384}, // __builtin_ia32_pslld256
+      {Intrinsic::x86_avx512_psll_d_512, 126470}, // __builtin_ia32_pslld512
+      {Intrinsic::x86_mmx_pslli_d, 131260}, // __builtin_ia32_pslldi
+      {Intrinsic::x86_sse2_pslli_d, 134439}, // __builtin_ia32_pslldi128
+      {Intrinsic::x86_avx2_pslli_d, 102456}, // __builtin_ia32_pslldi256
+      {Intrinsic::x86_avx512_pslli_d_512, 126542}, // __builtin_ia32_pslldi512
+      {Intrinsic::x86_mmx_psll_q, 131218}, // __builtin_ia32_psllq
+      {Intrinsic::x86_sse2_psll_q, 134391}, // __builtin_ia32_psllq128
+      {Intrinsic::x86_avx2_psll_q, 102408}, // __builtin_ia32_psllq256
+      {Intrinsic::x86_avx512_psll_q_512, 126494}, // __builtin_ia32_psllq512
+      {Intrinsic::x86_mmx_pslli_q, 131282}, // __builtin_ia32_psllqi
+      {Intrinsic::x86_sse2_pslli_q, 134464}, // __builtin_ia32_psllqi128
+      {Intrinsic::x86_avx2_pslli_q, 102481}, // __builtin_ia32_psllqi256
+      {Intrinsic::x86_avx512_pslli_q_512, 126567}, // __builtin_ia32_psllqi512
+      {Intrinsic::x86_avx512_psllv_w_256, 126690}, // __builtin_ia32_psllv16hi
+      {Intrinsic::x86_avx512_psllv_d_512, 126617}, // __builtin_ia32_psllv16si
+      {Intrinsic::x86_avx2_psllv_q, 102579}, // __builtin_ia32_psllv2di
+      {Intrinsic::x86_avx512_psllv_w_512, 126715}, // __builtin_ia32_psllv32hi
+      {Intrinsic::x86_avx2_psllv_q_256, 102603}, // __builtin_ia32_psllv4di
+      {Intrinsic::x86_avx2_psllv_d, 102531}, // __builtin_ia32_psllv4si
+      {Intrinsic::x86_avx512_psllv_q_512, 126642}, // __builtin_ia32_psllv8di
+      {Intrinsic::x86_avx512_psllv_w_128, 126666}, // __builtin_ia32_psllv8hi
+      {Intrinsic::x86_avx2_psllv_d_256, 102555}, // __builtin_ia32_psllv8si
+      {Intrinsic::x86_mmx_psll_w, 131239}, // __builtin_ia32_psllw
+      {Intrinsic::x86_sse2_psll_w, 134415}, // __builtin_ia32_psllw128
+      {Intrinsic::x86_avx2_psll_w, 102432}, // __builtin_ia32_psllw256
+      {Intrinsic::x86_avx512_psll_w_512, 126518}, // __builtin_ia32_psllw512
+      {Intrinsic::x86_mmx_pslli_w, 131304}, // __builtin_ia32_psllwi
+      {Intrinsic::x86_sse2_pslli_w, 134489}, // __builtin_ia32_psllwi128
+      {Intrinsic::x86_avx2_pslli_w, 102506}, // __builtin_ia32_psllwi256
+      {Intrinsic::x86_avx512_pslli_w_512, 126592}, // __builtin_ia32_psllwi512
+      {Intrinsic::x86_mmx_psra_d, 131326}, // __builtin_ia32_psrad
+      {Intrinsic::x86_sse2_psra_d, 134514}, // __builtin_ia32_psrad128
+      {Intrinsic::x86_avx2_psra_d, 102627}, // __builtin_ia32_psrad256
+      {Intrinsic::x86_avx512_psra_d_512, 126740}, // __builtin_ia32_psrad512
+      {Intrinsic::x86_mmx_psrai_d, 131368}, // __builtin_ia32_psradi
+      {Intrinsic::x86_sse2_psrai_d, 134562}, // __builtin_ia32_psradi128
+      {Intrinsic::x86_avx2_psrai_d, 102675}, // __builtin_ia32_psradi256
+      {Intrinsic::x86_avx512_psrai_d_512, 126860}, // __builtin_ia32_psradi512
+      {Intrinsic::x86_avx512_psra_q_128, 126764}, // __builtin_ia32_psraq128
+      {Intrinsic::x86_avx512_psra_q_256, 126788}, // __builtin_ia32_psraq256
+      {Intrinsic::x86_avx512_psra_q_512, 126812}, // __builtin_ia32_psraq512
+      {Intrinsic::x86_avx512_psrai_q_128, 126885}, // __builtin_ia32_psraqi128
+      {Intrinsic::x86_avx512_psrai_q_256, 126910}, // __builtin_ia32_psraqi256
+      {Intrinsic::x86_avx512_psrai_q_512, 126935}, // __builtin_ia32_psraqi512
+      {Intrinsic::x86_avx512_psrav_w_256, 127108}, // __builtin_ia32_psrav16hi
+      {Intrinsic::x86_avx512_psrav_d_512, 126985}, // __builtin_ia32_psrav16si
+      {Intrinsic::x86_avx512_psrav_w_512, 127133}, // __builtin_ia32_psrav32hi
+      {Intrinsic::x86_avx2_psrav_d, 102725}, // __builtin_ia32_psrav4si
+      {Intrinsic::x86_avx512_psrav_q_512, 127060}, // __builtin_ia32_psrav8di
+      {Intrinsic::x86_avx512_psrav_w_128, 127084}, // __builtin_ia32_psrav8hi
+      {Intrinsic::x86_avx2_psrav_d_256, 102749}, // __builtin_ia32_psrav8si
+      {Intrinsic::x86_avx512_psrav_q_128, 127010}, // __builtin_ia32_psravq128
+      {Intrinsic::x86_avx512_psrav_q_256, 127035}, // __builtin_ia32_psravq256
+      {Intrinsic::x86_mmx_psra_w, 131347}, // __builtin_ia32_psraw
+      {Intrinsic::x86_sse2_psra_w, 134538}, // __builtin_ia32_psraw128
+      {Intrinsic::x86_avx2_psra_w, 102651}, // __builtin_ia32_psraw256
+      {Intrinsic::x86_avx512_psra_w_512, 126836}, // __builtin_ia32_psraw512
+      {Intrinsic::x86_mmx_psrai_w, 131390}, // __builtin_ia32_psrawi
+      {Intrinsic::x86_sse2_psrai_w, 134587}, // __builtin_ia32_psrawi128
+      {Intrinsic::x86_avx2_psrai_w, 102700}, // __builtin_ia32_psrawi256
+      {Intrinsic::x86_avx512_psrai_w_512, 126960}, // __builtin_ia32_psrawi512
+      {Intrinsic::x86_mmx_psrl_d, 131412}, // __builtin_ia32_psrld
+      {Intrinsic::x86_sse2_psrl_d, 134612}, // __builtin_ia32_psrld128
+      {Intrinsic::x86_avx2_psrl_d, 102773}, // __builtin_ia32_psrld256
+      {Intrinsic::x86_avx512_psrl_d_512, 127158}, // __builtin_ia32_psrld512
+      {Intrinsic::x86_mmx_psrli_d, 131475}, // __builtin_ia32_psrldi
+      {Intrinsic::x86_sse2_psrli_d, 134684}, // __builtin_ia32_psrldi128
+      {Intrinsic::x86_avx2_psrli_d, 102845}, // __builtin_ia32_psrldi256
+      {Intrinsic::x86_avx512_psrli_d_512, 127230}, // __builtin_ia32_psrldi512
+      {Intrinsic::x86_mmx_psrl_q, 131433}, // __builtin_ia32_psrlq
+      {Intrinsic::x86_sse2_psrl_q, 134636}, // __builtin_ia32_psrlq128
+      {Intrinsic::x86_avx2_psrl_q, 102797}, // __builtin_ia32_psrlq256
+      {Intrinsic::x86_avx512_psrl_q_512, 127182}, // __builtin_ia32_psrlq512
+      {Intrinsic::x86_mmx_psrli_q, 131497}, // __builtin_ia32_psrlqi
+      {Intrinsic::x86_sse2_psrli_q, 134709}, // __builtin_ia32_psrlqi128
+      {Intrinsic::x86_avx2_psrli_q, 102870}, // __builtin_ia32_psrlqi256
+      {Intrinsic::x86_avx512_psrli_q_512, 127255}, // __builtin_ia32_psrlqi512
+      {Intrinsic::x86_avx512_psrlv_w_256, 127378}, // __builtin_ia32_psrlv16hi
+      {Intrinsic::x86_avx512_psrlv_d_512, 127305}, // __builtin_ia32_psrlv16si
+      {Intrinsic::x86_avx2_psrlv_q, 102968}, // __builtin_ia32_psrlv2di
+      {Intrinsic::x86_avx512_psrlv_w_512, 127403}, // __builtin_ia32_psrlv32hi
+      {Intrinsic::x86_avx2_psrlv_q_256, 102992}, // __builtin_ia32_psrlv4di
+      {Intrinsic::x86_avx2_psrlv_d, 102920}, // __builtin_ia32_psrlv4si
+      {Intrinsic::x86_avx512_psrlv_q_512, 127330}, // __builtin_ia32_psrlv8di
+      {Intrinsic::x86_avx512_psrlv_w_128, 127354}, // __builtin_ia32_psrlv8hi
+      {Intrinsic::x86_avx2_psrlv_d_256, 102944}, // __builtin_ia32_psrlv8si
+      {Intrinsic::x86_mmx_psrl_w, 131454}, // __builtin_ia32_psrlw
+      {Intrinsic::x86_sse2_psrl_w, 134660}, // __builtin_ia32_psrlw128
+      {Intrinsic::x86_avx2_psrl_w, 102821}, // __builtin_ia32_psrlw256
+      {Intrinsic::x86_avx512_psrl_w_512, 127206}, // __builtin_ia32_psrlw512
+      {Intrinsic::x86_mmx_psrli_w, 131519}, // __builtin_ia32_psrlwi
+      {Intrinsic::x86_sse2_psrli_w, 134734}, // __builtin_ia32_psrlwi128
+      {Intrinsic::x86_avx2_psrli_w, 102895}, // __builtin_ia32_psrlwi256
+      {Intrinsic::x86_avx512_psrli_w_512, 127280}, // __builtin_ia32_psrlwi512
+      {Intrinsic::x86_mmx_psub_b, 131541}, // __builtin_ia32_psubb
+      {Intrinsic::x86_mmx_psub_d, 131562}, // __builtin_ia32_psubd
+      {Intrinsic::x86_mmx_psub_q, 131583}, // __builtin_ia32_psubq
+      {Intrinsic::x86_mmx_psubs_b, 131625}, // __builtin_ia32_psubsb
+      {Intrinsic::x86_sse2_psubs_b, 134759}, // __builtin_ia32_psubsb128
+      {Intrinsic::x86_avx2_psubs_b, 103016}, // __builtin_ia32_psubsb256
+      {Intrinsic::x86_avx512_mask_psubs_b_512, 116197}, // __builtin_ia32_psubsb512_mask
+      {Intrinsic::x86_mmx_psubs_w, 131647}, // __builtin_ia32_psubsw
+      {Intrinsic::x86_sse2_psubs_w, 134784}, // __builtin_ia32_psubsw128
+      {Intrinsic::x86_avx2_psubs_w, 103041}, // __builtin_ia32_psubsw256
+      {Intrinsic::x86_avx512_mask_psubs_w_512, 116227}, // __builtin_ia32_psubsw512_mask
+      {Intrinsic::x86_mmx_psubus_b, 131669}, // __builtin_ia32_psubusb
+      {Intrinsic::x86_sse2_psubus_b, 134809}, // __builtin_ia32_psubusb128
+      {Intrinsic::x86_avx2_psubus_b, 103066}, // __builtin_ia32_psubusb256
+      {Intrinsic::x86_avx512_mask_psubus_b_512, 116257}, // __builtin_ia32_psubusb512_mask
+      {Intrinsic::x86_mmx_psubus_w, 131692}, // __builtin_ia32_psubusw
+      {Intrinsic::x86_sse2_psubus_w, 134835}, // __builtin_ia32_psubusw128
+      {Intrinsic::x86_avx2_psubus_w, 103092}, // __builtin_ia32_psubusw256
+      {Intrinsic::x86_avx512_mask_psubus_w_512, 116288}, // __builtin_ia32_psubusw512_mask
+      {Intrinsic::x86_mmx_psub_w, 131604}, // __builtin_ia32_psubw
+      {Intrinsic::x86_avx512_mask_pternlog_d_128, 116319}, // __builtin_ia32_pternlogd128_mask
+      {Intrinsic::x86_avx512_maskz_pternlog_d_128, 123631}, // __builtin_ia32_pternlogd128_maskz
+      {Intrinsic::x86_avx512_mask_pternlog_d_256, 116352}, // __builtin_ia32_pternlogd256_mask
+      {Intrinsic::x86_avx512_maskz_pternlog_d_256, 123665}, // __builtin_ia32_pternlogd256_maskz
+      {Intrinsic::x86_avx512_mask_pternlog_d_512, 116385}, // __builtin_ia32_pternlogd512_mask
+      {Intrinsic::x86_avx512_maskz_pternlog_d_512, 123699}, // __builtin_ia32_pternlogd512_maskz
+      {Intrinsic::x86_avx512_mask_pternlog_q_128, 116418}, // __builtin_ia32_pternlogq128_mask
+      {Intrinsic::x86_avx512_maskz_pternlog_q_128, 123733}, // __builtin_ia32_pternlogq128_maskz
+      {Intrinsic::x86_avx512_mask_pternlog_q_256, 116451}, // __builtin_ia32_pternlogq256_mask
+      {Intrinsic::x86_avx512_maskz_pternlog_q_256, 123767}, // __builtin_ia32_pternlogq256_maskz
+      {Intrinsic::x86_avx512_mask_pternlog_q_512, 116484}, // __builtin_ia32_pternlogq512_mask
+      {Intrinsic::x86_avx512_maskz_pternlog_q_512, 123801}, // __builtin_ia32_pternlogq512_maskz
+      {Intrinsic::x86_sse41_ptestc, 135506}, // __builtin_ia32_ptestc128
+      {Intrinsic::x86_avx_ptestc_256, 100219}, // __builtin_ia32_ptestc256
+      {Intrinsic::x86_sse41_ptestnzc, 135531}, // __builtin_ia32_ptestnzc128
+      {Intrinsic::x86_avx_ptestnzc_256, 100244}, // __builtin_ia32_ptestnzc256
+      {Intrinsic::x86_sse41_ptestz, 135558}, // __builtin_ia32_ptestz128
+      {Intrinsic::x86_avx_ptestz_256, 100271}, // __builtin_ia32_ptestz256
+      {Intrinsic::x86_mmx_punpckhbw, 131715}, // __builtin_ia32_punpckhbw
+      {Intrinsic::x86_mmx_punpckhdq, 131740}, // __builtin_ia32_punpckhdq
+      {Intrinsic::x86_mmx_punpckhwd, 131765}, // __builtin_ia32_punpckhwd
+      {Intrinsic::x86_mmx_punpcklbw, 131790}, // __builtin_ia32_punpcklbw
+      {Intrinsic::x86_mmx_punpckldq, 131815}, // __builtin_ia32_punpckldq
+      {Intrinsic::x86_mmx_punpcklwd, 131840}, // __builtin_ia32_punpcklwd
+      {Intrinsic::x86_mmx_pxor, 131865}, // __builtin_ia32_pxor
+      {Intrinsic::x86_avx512_mask_range_pd_128, 116517}, // __builtin_ia32_rangepd128_mask
+      {Intrinsic::x86_avx512_mask_range_pd_256, 116548}, // __builtin_ia32_rangepd256_mask
+      {Intrinsic::x86_avx512_mask_range_pd_512, 116579}, // __builtin_ia32_rangepd512_mask
+      {Intrinsic::x86_avx512_mask_range_ps_128, 116610}, // __builtin_ia32_rangeps128_mask
+      {Intrinsic::x86_avx512_mask_range_ps_256, 116641}, // __builtin_ia32_rangeps256_mask
+      {Intrinsic::x86_avx512_mask_range_ps_512, 116672}, // __builtin_ia32_rangeps512_mask
+      {Intrinsic::x86_avx512_mask_range_sd, 116703}, // __builtin_ia32_rangesd128_round_mask
+      {Intrinsic::x86_avx512_mask_range_ss, 116740}, // __builtin_ia32_rangess128_round_mask
+      {Intrinsic::x86_avx512_rcp14_pd_128, 127428}, // __builtin_ia32_rcp14pd128_mask
+      {Intrinsic::x86_avx512_rcp14_pd_256, 127459}, // __builtin_ia32_rcp14pd256_mask
+      {Intrinsic::x86_avx512_rcp14_pd_512, 127490}, // __builtin_ia32_rcp14pd512_mask
+      {Intrinsic::x86_avx512_rcp14_ps_128, 127521}, // __builtin_ia32_rcp14ps128_mask
+      {Intrinsic::x86_avx512_rcp14_ps_256, 127552}, // __builtin_ia32_rcp14ps256_mask
+      {Intrinsic::x86_avx512_rcp14_ps_512, 127583}, // __builtin_ia32_rcp14ps512_mask
+      {Intrinsic::x86_avx512_rcp14_sd, 127614}, // __builtin_ia32_rcp14sd_mask
+      {Intrinsic::x86_avx512_rcp14_ss, 127642}, // __builtin_ia32_rcp14ss_mask
+      {Intrinsic::x86_avx512_rcp28_pd, 127670}, // __builtin_ia32_rcp28pd_mask
+      {Intrinsic::x86_avx512_rcp28_ps, 127698}, // __builtin_ia32_rcp28ps_mask
+      {Intrinsic::x86_avx512_rcp28_sd, 127726}, // __builtin_ia32_rcp28sd_round_mask
+      {Intrinsic::x86_avx512_rcp28_ss, 127760}, // __builtin_ia32_rcp28ss_round_mask
+      {Intrinsic::x86_sse_rcp_ps, 133076}, // __builtin_ia32_rcpps
+      {Intrinsic::x86_avx_rcp_ps_256, 100296}, // __builtin_ia32_rcpps256
+      {Intrinsic::x86_sse_rcp_ss, 133097}, // __builtin_ia32_rcpss
+      {Intrinsic::x86_rdfsbase_32, 132015}, // __builtin_ia32_rdfsbase32
+      {Intrinsic::x86_rdfsbase_64, 132041}, // __builtin_ia32_rdfsbase64
+      {Intrinsic::x86_rdgsbase_32, 132067}, // __builtin_ia32_rdgsbase32
+      {Intrinsic::x86_rdgsbase_64, 132093}, // __builtin_ia32_rdgsbase64
+      {Intrinsic::x86_rdpid, 132119}, // __builtin_ia32_rdpid
+      {Intrinsic::x86_rdpkru, 132140}, // __builtin_ia32_rdpkru
+      {Intrinsic::x86_rdpmc, 132162}, // __builtin_ia32_rdpmc
+      {Intrinsic::x86_rdsspd, 132183}, // __builtin_ia32_rdsspd
+      {Intrinsic::x86_rdsspq, 132205}, // __builtin_ia32_rdsspq
+      {Intrinsic::x86_rdtsc, 132227}, // __builtin_ia32_rdtsc
+      {Intrinsic::x86_rdtscp, 132248}, // __builtin_ia32_rdtscp
+      {Intrinsic::x86_flags_read_u32, 129604}, // __builtin_ia32_readeflags_u32
+      {Intrinsic::x86_flags_read_u64, 129634}, // __builtin_ia32_readeflags_u64
+      {Intrinsic::x86_avx512_mask_reduce_pd_128, 116777}, // __builtin_ia32_reducepd128_mask
+      {Intrinsic::x86_avx512_mask_reduce_pd_256, 116809}, // __builtin_ia32_reducepd256_mask
+      {Intrinsic::x86_avx512_mask_reduce_pd_512, 116841}, // __builtin_ia32_reducepd512_mask
+      {Intrinsic::x86_avx512_mask_reduce_ps_128, 116873}, // __builtin_ia32_reduceps128_mask
+      {Intrinsic::x86_avx512_mask_reduce_ps_256, 116905}, // __builtin_ia32_reduceps256_mask
+      {Intrinsic::x86_avx512_mask_reduce_ps_512, 116937}, // __builtin_ia32_reduceps512_mask
+      {Intrinsic::x86_avx512_mask_reduce_sd, 116969}, // __builtin_ia32_reducesd_mask
+      {Intrinsic::x86_avx512_mask_reduce_ss, 116998}, // __builtin_ia32_reducess_mask
+      {Intrinsic::x86_avx512_mask_rndscale_pd_128, 117027}, // __builtin_ia32_rndscalepd_128_mask
+      {Intrinsic::x86_avx512_mask_rndscale_pd_256, 117062}, // __builtin_ia32_rndscalepd_256_mask
+      {Intrinsic::x86_avx512_mask_rndscale_pd_512, 117097}, // __builtin_ia32_rndscalepd_mask
+      {Intrinsic::x86_avx512_mask_rndscale_ps_128, 117128}, // __builtin_ia32_rndscaleps_128_mask
+      {Intrinsic::x86_avx512_mask_rndscale_ps_256, 117163}, // __builtin_ia32_rndscaleps_256_mask
+      {Intrinsic::x86_avx512_mask_rndscale_ps_512, 117198}, // __builtin_ia32_rndscaleps_mask
+      {Intrinsic::x86_avx512_mask_rndscale_sd, 117229}, // __builtin_ia32_rndscalesd_round_mask
+      {Intrinsic::x86_avx512_mask_rndscale_ss, 117266}, // __builtin_ia32_rndscaless_round_mask
+      {Intrinsic::x86_sse41_round_pd, 135583}, // __builtin_ia32_roundpd
+      {Intrinsic::x86_avx_round_pd_256, 100320}, // __builtin_ia32_roundpd256
+      {Intrinsic::x86_sse41_round_ps, 135606}, // __builtin_ia32_roundps
+      {Intrinsic::x86_avx_round_ps_256, 100346}, // __builtin_ia32_roundps256
+      {Intrinsic::x86_sse41_round_sd, 135629}, // __builtin_ia32_roundsd
+      {Intrinsic::x86_sse41_round_ss, 135652}, // __builtin_ia32_roundss
+      {Intrinsic::x86_avx512_rsqrt14_pd_128, 127794}, // __builtin_ia32_rsqrt14pd128_mask
+      {Intrinsic::x86_avx512_rsqrt14_pd_256, 127827}, // __builtin_ia32_rsqrt14pd256_mask
+      {Intrinsic::x86_avx512_rsqrt14_pd_512, 127860}, // __builtin_ia32_rsqrt14pd512_mask
+      {Intrinsic::x86_avx512_rsqrt14_ps_128, 127893}, // __builtin_ia32_rsqrt14ps128_mask
+      {Intrinsic::x86_avx512_rsqrt14_ps_256, 127926}, // __builtin_ia32_rsqrt14ps256_mask
+      {Intrinsic::x86_avx512_rsqrt14_ps_512, 127959}, // __builtin_ia32_rsqrt14ps512_mask
+      {Intrinsic::x86_avx512_rsqrt14_sd, 127992}, // __builtin_ia32_rsqrt14sd_mask
+      {Intrinsic::x86_avx512_rsqrt14_ss, 128022}, // __builtin_ia32_rsqrt14ss_mask
+      {Intrinsic::x86_avx512_rsqrt28_pd, 128052}, // __builtin_ia32_rsqrt28pd_mask
+      {Intrinsic::x86_avx512_rsqrt28_ps, 128082}, // __builtin_ia32_rsqrt28ps_mask
+      {Intrinsic::x86_avx512_rsqrt28_sd, 128112}, // __builtin_ia32_rsqrt28sd_round_mask
+      {Intrinsic::x86_avx512_rsqrt28_ss, 128148}, // __builtin_ia32_rsqrt28ss_round_mask
+      {Intrinsic::x86_sse_rsqrt_ps, 133118}, // __builtin_ia32_rsqrtps
+      {Intrinsic::x86_avx_rsqrt_ps_256, 100372}, // __builtin_ia32_rsqrtps256
+      {Intrinsic::x86_sse_rsqrt_ss, 133141}, // __builtin_ia32_rsqrtss
+      {Intrinsic::x86_rstorssp, 132270}, // __builtin_ia32_rstorssp
+      {Intrinsic::x86_saveprevssp, 132294}, // __builtin_ia32_saveprevssp
+      {Intrinsic::x86_avx512_mask_scalef_pd_128, 117303}, // __builtin_ia32_scalefpd128_mask
+      {Intrinsic::x86_avx512_mask_scalef_pd_256, 117335}, // __builtin_ia32_scalefpd256_mask
+      {Intrinsic::x86_avx512_mask_scalef_pd_512, 117367}, // __builtin_ia32_scalefpd512_mask
+      {Intrinsic::x86_avx512_mask_scalef_ps_128, 117399}, // __builtin_ia32_scalefps128_mask
+      {Intrinsic::x86_avx512_mask_scalef_ps_256, 117431}, // __builtin_ia32_scalefps256_mask
+      {Intrinsic::x86_avx512_mask_scalef_ps_512, 117463}, // __builtin_ia32_scalefps512_mask
+      {Intrinsic::x86_avx512_mask_scalef_sd, 117495}, // __builtin_ia32_scalefsd_round_mask
+      {Intrinsic::x86_avx512_mask_scalef_ss, 117530}, // __builtin_ia32_scalefss_round_mask
+      {Intrinsic::x86_avx512_scatter_qps_512, 128390}, // __builtin_ia32_scatterdiv16sf
+      {Intrinsic::x86_avx512_scatter_qpi_512, 128331}, // __builtin_ia32_scatterdiv16si
+      {Intrinsic::x86_avx512_scatterdiv2_df, 128420}, // __builtin_ia32_scatterdiv2df
+      {Intrinsic::x86_avx512_scatterdiv2_di, 128449}, // __builtin_ia32_scatterdiv2di
+      {Intrinsic::x86_avx512_scatterdiv4_df, 128478}, // __builtin_ia32_scatterdiv4df
+      {Intrinsic::x86_avx512_scatterdiv4_di, 128507}, // __builtin_ia32_scatterdiv4di
+      {Intrinsic::x86_avx512_scatterdiv4_sf, 128536}, // __builtin_ia32_scatterdiv4sf
+      {Intrinsic::x86_avx512_scatterdiv4_si, 128565}, // __builtin_ia32_scatterdiv4si
+      {Intrinsic::x86_avx512_scatter_qpd_512, 128302}, // __builtin_ia32_scatterdiv8df
+      {Intrinsic::x86_avx512_scatter_qpq_512, 128361}, // __builtin_ia32_scatterdiv8di
+      {Intrinsic::x86_avx512_scatterdiv8_sf, 128594}, // __builtin_ia32_scatterdiv8sf
+      {Intrinsic::x86_avx512_scatterdiv8_si, 128623}, // __builtin_ia32_scatterdiv8si
+      {Intrinsic::x86_avx512_scatterpf_dpd_512, 128652}, // __builtin_ia32_scatterpfdpd
+      {Intrinsic::x86_avx512_scatterpf_dps_512, 128680}, // __builtin_ia32_scatterpfdps
+      {Intrinsic::x86_avx512_scatterpf_qpd_512, 128708}, // __builtin_ia32_scatterpfqpd
+      {Intrinsic::x86_avx512_scatterpf_qps_512, 128736}, // __builtin_ia32_scatterpfqps
+      {Intrinsic::x86_avx512_scatter_dps_512, 128272}, // __builtin_ia32_scattersiv16sf
+      {Intrinsic::x86_avx512_scatter_dpi_512, 128213}, // __builtin_ia32_scattersiv16si
+      {Intrinsic::x86_avx512_scattersiv2_df, 128764}, // __builtin_ia32_scattersiv2df
+      {Intrinsic::x86_avx512_scattersiv2_di, 128793}, // __builtin_ia32_scattersiv2di
+      {Intrinsic::x86_avx512_scattersiv4_df, 128822}, // __builtin_ia32_scattersiv4df
+      {Intrinsic::x86_avx512_scattersiv4_di, 128851}, // __builtin_ia32_scattersiv4di
+      {Intrinsic::x86_avx512_scattersiv4_sf, 128880}, // __builtin_ia32_scattersiv4sf
+      {Intrinsic::x86_avx512_scattersiv4_si, 128909}, // __builtin_ia32_scattersiv4si
+      {Intrinsic::x86_avx512_scatter_dpd_512, 128184}, // __builtin_ia32_scattersiv8df
+      {Intrinsic::x86_avx512_scatter_dpq_512, 128243}, // __builtin_ia32_scattersiv8di
+      {Intrinsic::x86_avx512_scattersiv8_sf, 128938}, // __builtin_ia32_scattersiv8sf
+      {Intrinsic::x86_avx512_scattersiv8_si, 128967}, // __builtin_ia32_scattersiv8si
+      {Intrinsic::x86_setssbsy, 132321}, // __builtin_ia32_setssbsy
+      {Intrinsic::x86_sse_sfence, 133164}, // __builtin_ia32_sfence
+      {Intrinsic::x86_sha1msg1, 132345}, // __builtin_ia32_sha1msg1
+      {Intrinsic::x86_sha1msg2, 132369}, // __builtin_ia32_sha1msg2
+      {Intrinsic::x86_sha1nexte, 132393}, // __builtin_ia32_sha1nexte
+      {Intrinsic::x86_sha1rnds4, 132418}, // __builtin_ia32_sha1rnds4
+      {Intrinsic::x86_sha256msg1, 132443}, // __builtin_ia32_sha256msg1
+      {Intrinsic::x86_sha256msg2, 132469}, // __builtin_ia32_sha256msg2
+      {Intrinsic::x86_sha256rnds2, 132495}, // __builtin_ia32_sha256rnds2
+      {Intrinsic::x86_slwpcb, 132522}, // __builtin_ia32_slwpcb
+      {Intrinsic::x86_sse2_sqrt_pd, 134861}, // __builtin_ia32_sqrtpd
+      {Intrinsic::x86_avx512_mask_sqrt_pd_128, 117565}, // __builtin_ia32_sqrtpd128_mask
+      {Intrinsic::x86_avx_sqrt_pd_256, 100398}, // __builtin_ia32_sqrtpd256
+      {Intrinsic::x86_avx512_mask_sqrt_pd_256, 117595}, // __builtin_ia32_sqrtpd256_mask
+      {Intrinsic::x86_avx512_mask_sqrt_pd_512, 117625}, // __builtin_ia32_sqrtpd512_mask
+      {Intrinsic::x86_sse_sqrt_ps, 133186}, // __builtin_ia32_sqrtps
+      {Intrinsic::x86_avx512_mask_sqrt_ps_128, 117655}, // __builtin_ia32_sqrtps128_mask
+      {Intrinsic::x86_avx_sqrt_ps_256, 100423}, // __builtin_ia32_sqrtps256
+      {Intrinsic::x86_avx512_mask_sqrt_ps_256, 117685}, // __builtin_ia32_sqrtps256_mask
+      {Intrinsic::x86_avx512_mask_sqrt_ps_512, 117715}, // __builtin_ia32_sqrtps512_mask
+      {Intrinsic::x86_sse2_sqrt_sd, 134883}, // __builtin_ia32_sqrtsd
+      {Intrinsic::x86_avx512_mask_sqrt_sd, 117745}, // __builtin_ia32_sqrtsd_round_mask
+      {Intrinsic::x86_sse_sqrt_ss, 133208}, // __builtin_ia32_sqrtss
+      {Intrinsic::x86_avx512_mask_sqrt_ss, 117778}, // __builtin_ia32_sqrtss_round_mask
+      {Intrinsic::x86_avx512_mask_store_ss, 117811}, // __builtin_ia32_storess_mask
+      {Intrinsic::x86_subborrow_u32, 136900}, // __builtin_ia32_subborrow_u32
+      {Intrinsic::x86_subborrow_u64, 136929}, // __builtin_ia32_subborrow_u64
+      {Intrinsic::x86_avx512_mask_sub_pd_512, 117839}, // __builtin_ia32_subpd512_mask
+      {Intrinsic::x86_avx512_mask_sub_ps_512, 117868}, // __builtin_ia32_subps512_mask
+      {Intrinsic::x86_avx512_mask_sub_sd_round, 117897}, // __builtin_ia32_subsd_round_mask
+      {Intrinsic::x86_avx512_mask_sub_ss_round, 117929}, // __builtin_ia32_subss_round_mask
+      {Intrinsic::x86_sse_ucomieq_ss, 133230}, // __builtin_ia32_ucomieq
+      {Intrinsic::x86_sse_ucomige_ss, 133253}, // __builtin_ia32_ucomige
+      {Intrinsic::x86_sse_ucomigt_ss, 133276}, // __builtin_ia32_ucomigt
+      {Intrinsic::x86_sse_ucomile_ss, 133299}, // __builtin_ia32_ucomile
+      {Intrinsic::x86_sse_ucomilt_ss, 133322}, // __builtin_ia32_ucomilt
+      {Intrinsic::x86_sse_ucomineq_ss, 133345}, // __builtin_ia32_ucomineq
+      {Intrinsic::x86_sse2_ucomieq_sd, 134905}, // __builtin_ia32_ucomisdeq
+      {Intrinsic::x86_sse2_ucomige_sd, 134930}, // __builtin_ia32_ucomisdge
+      {Intrinsic::x86_sse2_ucomigt_sd, 134955}, // __builtin_ia32_ucomisdgt
+      {Intrinsic::x86_sse2_ucomile_sd, 134980}, // __builtin_ia32_ucomisdle
+      {Intrinsic::x86_sse2_ucomilt_sd, 135005}, // __builtin_ia32_ucomisdlt
+      {Intrinsic::x86_sse2_ucomineq_sd, 135030}, // __builtin_ia32_ucomisdneq
+      {Intrinsic::x86_avx512_vcomi_sd, 128996}, // __builtin_ia32_vcomisd
+      {Intrinsic::x86_avx512_vcomi_ss, 129019}, // __builtin_ia32_vcomiss
+      {Intrinsic::x86_vcvtph2ps_128, 137010}, // __builtin_ia32_vcvtph2ps
+      {Intrinsic::x86_vcvtph2ps_256, 137035}, // __builtin_ia32_vcvtph2ps256
+      {Intrinsic::x86_avx512_mask_vcvtph2ps_256, 117991}, // __builtin_ia32_vcvtph2ps256_mask
+      {Intrinsic::x86_avx512_mask_vcvtph2ps_512, 118024}, // __builtin_ia32_vcvtph2ps512_mask
+      {Intrinsic::x86_avx512_mask_vcvtph2ps_128, 117961}, // __builtin_ia32_vcvtph2ps_mask
+      {Intrinsic::x86_vcvtps2ph_128, 137063}, // __builtin_ia32_vcvtps2ph
+      {Intrinsic::x86_vcvtps2ph_256, 137088}, // __builtin_ia32_vcvtps2ph256
+      {Intrinsic::x86_avx512_mask_vcvtps2ph_256, 118087}, // __builtin_ia32_vcvtps2ph256_mask
+      {Intrinsic::x86_avx512_mask_vcvtps2ph_512, 118120}, // __builtin_ia32_vcvtps2ph512_mask
+      {Intrinsic::x86_avx512_mask_vcvtps2ph_128, 118057}, // __builtin_ia32_vcvtps2ph_mask
+      {Intrinsic::x86_avx512_vcvtsd2si32, 129042}, // __builtin_ia32_vcvtsd2si32
+      {Intrinsic::x86_avx512_vcvtsd2si64, 129069}, // __builtin_ia32_vcvtsd2si64
+      {Intrinsic::x86_avx512_vcvtsd2usi32, 129096}, // __builtin_ia32_vcvtsd2usi32
+      {Intrinsic::x86_avx512_vcvtsd2usi64, 129124}, // __builtin_ia32_vcvtsd2usi64
+      {Intrinsic::x86_avx512_vcvtss2si32, 129152}, // __builtin_ia32_vcvtss2si32
+      {Intrinsic::x86_avx512_vcvtss2si64, 129179}, // __builtin_ia32_vcvtss2si64
+      {Intrinsic::x86_avx512_vcvtss2usi32, 129206}, // __builtin_ia32_vcvtss2usi32
+      {Intrinsic::x86_avx512_vcvtss2usi64, 129234}, // __builtin_ia32_vcvtss2usi64
+      {Intrinsic::x86_avx512_cvttsd2si, 103376}, // __builtin_ia32_vcvttsd2si32
+      {Intrinsic::x86_avx512_cvttsd2si64, 103404}, // __builtin_ia32_vcvttsd2si64
+      {Intrinsic::x86_avx512_cvttsd2usi, 103432}, // __builtin_ia32_vcvttsd2usi32
+      {Intrinsic::x86_avx512_cvttsd2usi64, 103461}, // __builtin_ia32_vcvttsd2usi64
+      {Intrinsic::x86_avx512_cvttss2si, 103490}, // __builtin_ia32_vcvttss2si32
+      {Intrinsic::x86_avx512_cvttss2si64, 103518}, // __builtin_ia32_vcvttss2si64
+      {Intrinsic::x86_avx512_cvttss2usi, 103546}, // __builtin_ia32_vcvttss2usi32
+      {Intrinsic::x86_avx512_cvttss2usi64, 103575}, // __builtin_ia32_vcvttss2usi64
+      {Intrinsic::x86_mmx_pextr_w, 130875}, // __builtin_ia32_vec_ext_v4hi
+      {Intrinsic::x86_mmx_pinsr_w, 130903}, // __builtin_ia32_vec_set_v4hi
+      {Intrinsic::x86_fma_vfmadd_pd, 129726}, // __builtin_ia32_vfmaddpd
+      {Intrinsic::x86_avx512_mask_vfmadd_pd_128, 118153}, // __builtin_ia32_vfmaddpd128_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_pd_128, 122137}, // __builtin_ia32_vfmaddpd128_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_pd_128, 123835}, // __builtin_ia32_vfmaddpd128_maskz
+      {Intrinsic::x86_fma_vfmadd_pd_256, 129750}, // __builtin_ia32_vfmaddpd256
+      {Intrinsic::x86_avx512_mask_vfmadd_pd_256, 118185}, // __builtin_ia32_vfmaddpd256_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_pd_256, 122170}, // __builtin_ia32_vfmaddpd256_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_pd_256, 123868}, // __builtin_ia32_vfmaddpd256_maskz
+      {Intrinsic::x86_avx512_mask_vfmadd_pd_512, 118217}, // __builtin_ia32_vfmaddpd512_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_pd_512, 122203}, // __builtin_ia32_vfmaddpd512_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_pd_512, 123901}, // __builtin_ia32_vfmaddpd512_maskz
+      {Intrinsic::x86_fma_vfmadd_ps, 129777}, // __builtin_ia32_vfmaddps
+      {Intrinsic::x86_avx512_mask_vfmadd_ps_128, 118249}, // __builtin_ia32_vfmaddps128_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_ps_128, 122236}, // __builtin_ia32_vfmaddps128_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_ps_128, 123934}, // __builtin_ia32_vfmaddps128_maskz
+      {Intrinsic::x86_fma_vfmadd_ps_256, 129801}, // __builtin_ia32_vfmaddps256
+      {Intrinsic::x86_avx512_mask_vfmadd_ps_256, 118281}, // __builtin_ia32_vfmaddps256_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_ps_256, 122269}, // __builtin_ia32_vfmaddps256_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_ps_256, 123967}, // __builtin_ia32_vfmaddps256_maskz
+      {Intrinsic::x86_avx512_mask_vfmadd_ps_512, 118313}, // __builtin_ia32_vfmaddps512_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_ps_512, 122302}, // __builtin_ia32_vfmaddps512_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_ps_512, 124000}, // __builtin_ia32_vfmaddps512_maskz
+      {Intrinsic::x86_fma4_vfmadd_sd, 129992}, // __builtin_ia32_vfmaddsd
+      {Intrinsic::x86_fma_vfmadd_sd, 129828}, // __builtin_ia32_vfmaddsd3
+      {Intrinsic::x86_avx512_mask_vfmadd_sd, 118345}, // __builtin_ia32_vfmaddsd3_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_sd, 122335}, // __builtin_ia32_vfmaddsd3_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_sd, 124033}, // __builtin_ia32_vfmaddsd3_maskz
+      {Intrinsic::x86_fma4_vfmadd_ss, 130016}, // __builtin_ia32_vfmaddss
+      {Intrinsic::x86_fma_vfmadd_ss, 129853}, // __builtin_ia32_vfmaddss3
+      {Intrinsic::x86_avx512_mask_vfmadd_ss, 118375}, // __builtin_ia32_vfmaddss3_mask
+      {Intrinsic::x86_avx512_mask3_vfmadd_ss, 122366}, // __builtin_ia32_vfmaddss3_mask3
+      {Intrinsic::x86_avx512_maskz_vfmadd_ss, 124064}, // __builtin_ia32_vfmaddss3_maskz
+      {Intrinsic::x86_fma_vfmaddsub_pd, 129878}, // __builtin_ia32_vfmaddsubpd
+      {Intrinsic::x86_avx512_mask_vfmaddsub_pd_128, 118405}, // __builtin_ia32_vfmaddsubpd128_mask
+      {Intrinsic::x86_avx512_mask3_vfmaddsub_pd_128, 122397}, // __builtin_ia32_vfmaddsubpd128_mask3
+      {Intrinsic::x86_avx512_maskz_vfmaddsub_pd_128, 124095}, // __builtin_ia32_vfmaddsubpd128_maskz
+      {Intrinsic::x86_fma_vfmaddsub_pd_256, 129905}, // __builtin_ia32_vfmaddsubpd256
+      {Intrinsic::x86_avx512_mask_vfmaddsub_pd_256, 118440}, // __builtin_ia32_vfmaddsubpd256_mask
+      {Intrinsic::x86_avx512_mask3_vfmaddsub_pd_256, 122433}, // __builtin_ia32_vfmaddsubpd256_mask3
+      {Intrinsic::x86_avx512_maskz_vfmaddsub_pd_256, 124131}, // __builtin_ia32_vfmaddsubpd256_maskz
+      {Intrinsic::x86_avx512_mask_vfmaddsub_pd_512, 118475}, // __builtin_ia32_vfmaddsubpd512_mask
+      {Intrinsic::x86_avx512_mask3_vfmaddsub_pd_512, 122469}, // __builtin_ia32_vfmaddsubpd512_mask3
+      {Intrinsic::x86_avx512_maskz_vfmaddsub_pd_512, 124167}, // __builtin_ia32_vfmaddsubpd512_maskz
+      {Intrinsic::x86_fma_vfmaddsub_ps, 129935}, // __builtin_ia32_vfmaddsubps
+      {Intrinsic::x86_avx512_mask_vfmaddsub_ps_128, 118510}, // __builtin_ia32_vfmaddsubps128_mask
+      {Intrinsic::x86_avx512_mask3_vfmaddsub_ps_128, 122505}, // __builtin_ia32_vfmaddsubps128_mask3
+      {Intrinsic::x86_avx512_maskz_vfmaddsub_ps_128, 124203}, // __builtin_ia32_vfmaddsubps128_maskz
+      {Intrinsic::x86_fma_vfmaddsub_ps_256, 129962}, // __builtin_ia32_vfmaddsubps256
+      {Intrinsic::x86_avx512_mask_vfmaddsub_ps_256, 118545}, // __builtin_ia32_vfmaddsubps256_mask
+      {Intrinsic::x86_avx512_mask3_vfmaddsub_ps_256, 122541}, // __builtin_ia32_vfmaddsubps256_mask3
+      {Intrinsic::x86_avx512_maskz_vfmaddsub_ps_256, 124239}, // __builtin_ia32_vfmaddsubps256_maskz
+      {Intrinsic::x86_avx512_mask_vfmaddsub_ps_512, 118580}, // __builtin_ia32_vfmaddsubps512_mask
+      {Intrinsic::x86_avx512_mask3_vfmaddsub_ps_512, 122577}, // __builtin_ia32_vfmaddsubps512_mask3
+      {Intrinsic::x86_avx512_maskz_vfmaddsub_ps_512, 124275}, // __builtin_ia32_vfmaddsubps512_maskz
+      {Intrinsic::x86_avx512_mask3_vfmsubadd_pd_128, 122873}, // __builtin_ia32_vfmsubaddpd128_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsubadd_pd_256, 122909}, // __builtin_ia32_vfmsubaddpd256_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsubadd_pd_512, 122945}, // __builtin_ia32_vfmsubaddpd512_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsubadd_ps_128, 122981}, // __builtin_ia32_vfmsubaddps128_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsubadd_ps_256, 123017}, // __builtin_ia32_vfmsubaddps256_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsubadd_ps_512, 123053}, // __builtin_ia32_vfmsubaddps512_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_pd_128, 122613}, // __builtin_ia32_vfmsubpd128_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_pd_256, 122646}, // __builtin_ia32_vfmsubpd256_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_pd_512, 122679}, // __builtin_ia32_vfmsubpd512_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_ps_128, 122712}, // __builtin_ia32_vfmsubps128_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_ps_256, 122745}, // __builtin_ia32_vfmsubps256_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_ps_512, 122778}, // __builtin_ia32_vfmsubps512_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_sd, 122811}, // __builtin_ia32_vfmsubsd3_mask3
+      {Intrinsic::x86_avx512_mask3_vfmsub_ss, 122842}, // __builtin_ia32_vfmsubss3_mask3
+      {Intrinsic::x86_avx512_mask_vfnmadd_pd_128, 118615}, // __builtin_ia32_vfnmaddpd128_mask
+      {Intrinsic::x86_avx512_mask_vfnmadd_pd_256, 118648}, // __builtin_ia32_vfnmaddpd256_mask
+      {Intrinsic::x86_avx512_mask_vfnmadd_pd_512, 118681}, // __builtin_ia32_vfnmaddpd512_mask
+      {Intrinsic::x86_avx512_mask_vfnmadd_ps_128, 118714}, // __builtin_ia32_vfnmaddps128_mask
+      {Intrinsic::x86_avx512_mask_vfnmadd_ps_256, 118747}, // __builtin_ia32_vfnmaddps256_mask
+      {Intrinsic::x86_avx512_mask_vfnmadd_ps_512, 118780}, // __builtin_ia32_vfnmaddps512_mask
+      {Intrinsic::x86_avx512_mask_vfnmsub_pd_128, 118813}, // __builtin_ia32_vfnmsubpd128_mask
+      {Intrinsic::x86_avx512_mask3_vfnmsub_pd_128, 123089}, // __builtin_ia32_vfnmsubpd128_mask3
+      {Intrinsic::x86_avx512_mask_vfnmsub_pd_256, 118846}, // __builtin_ia32_vfnmsubpd256_mask
+      {Intrinsic::x86_avx512_mask3_vfnmsub_pd_256, 123123}, // __builtin_ia32_vfnmsubpd256_mask3
+      {Intrinsic::x86_avx512_mask_vfnmsub_pd_512, 118879}, // __builtin_ia32_vfnmsubpd512_mask
+      {Intrinsic::x86_avx512_mask3_vfnmsub_pd_512, 123157}, // __builtin_ia32_vfnmsubpd512_mask3
+      {Intrinsic::x86_avx512_mask_vfnmsub_ps_128, 118912}, // __builtin_ia32_vfnmsubps128_mask
+      {Intrinsic::x86_avx512_mask3_vfnmsub_ps_128, 123191}, // __builtin_ia32_vfnmsubps128_mask3
+      {Intrinsic::x86_avx512_mask_vfnmsub_ps_256, 118945}, // __builtin_ia32_vfnmsubps256_mask
+      {Intrinsic::x86_avx512_mask3_vfnmsub_ps_256, 123225}, // __builtin_ia32_vfnmsubps256_mask3
+      {Intrinsic::x86_avx512_mask_vfnmsub_ps_512, 118978}, // __builtin_ia32_vfnmsubps512_mask
+      {Intrinsic::x86_avx512_mask3_vfnmsub_ps_512, 123259}, // __builtin_ia32_vfnmsubps512_mask3
+      {Intrinsic::x86_avx512_mask3_vfnmsub_sd, 123293}, // __builtin_ia32_vfnmsubsd3_mask3
+      {Intrinsic::x86_avx512_mask3_vfnmsub_ss, 123325}, // __builtin_ia32_vfnmsubss3_mask3
+      {Intrinsic::x86_xop_vfrcz_pd, 137713}, // __builtin_ia32_vfrczpd
+      {Intrinsic::x86_xop_vfrcz_pd_256, 137736}, // __builtin_ia32_vfrczpd256
+      {Intrinsic::x86_xop_vfrcz_ps, 137762}, // __builtin_ia32_vfrczps
+      {Intrinsic::x86_xop_vfrcz_ps_256, 137785}, // __builtin_ia32_vfrczps256
+      {Intrinsic::x86_xop_vfrcz_sd, 137811}, // __builtin_ia32_vfrczsd
+      {Intrinsic::x86_xop_vfrcz_ss, 137834}, // __builtin_ia32_vfrczss
+      {Intrinsic::x86_vgf2p8affineinvqb_128, 137116}, // __builtin_ia32_vgf2p8affineinvqb_v16qi
+      {Intrinsic::x86_vgf2p8affineinvqb_256, 137155}, // __builtin_ia32_vgf2p8affineinvqb_v32qi
+      {Intrinsic::x86_vgf2p8affineinvqb_512, 137194}, // __builtin_ia32_vgf2p8affineinvqb_v64qi
+      {Intrinsic::x86_vgf2p8affineqb_128, 137233}, // __builtin_ia32_vgf2p8affineqb_v16qi
+      {Intrinsic::x86_vgf2p8affineqb_256, 137269}, // __builtin_ia32_vgf2p8affineqb_v32qi
+      {Intrinsic::x86_vgf2p8affineqb_512, 137305}, // __builtin_ia32_vgf2p8affineqb_v64qi
+      {Intrinsic::x86_vgf2p8mulb_128, 137341}, // __builtin_ia32_vgf2p8mulb_v16qi
+      {Intrinsic::x86_vgf2p8mulb_256, 137373}, // __builtin_ia32_vgf2p8mulb_v32qi
+      {Intrinsic::x86_vgf2p8mulb_512, 137405}, // __builtin_ia32_vgf2p8mulb_v64qi
+      {Intrinsic::x86_xop_vpcomb, 137857}, // __builtin_ia32_vpcomb
+      {Intrinsic::x86_xop_vpcomd, 137879}, // __builtin_ia32_vpcomd
+      {Intrinsic::x86_xop_vpcomq, 137901}, // __builtin_ia32_vpcomq
+      {Intrinsic::x86_xop_vpcomub, 137923}, // __builtin_ia32_vpcomub
+      {Intrinsic::x86_xop_vpcomud, 137946}, // __builtin_ia32_vpcomud
+      {Intrinsic::x86_xop_vpcomuq, 137969}, // __builtin_ia32_vpcomuq
+      {Intrinsic::x86_xop_vpcomuw, 137992}, // __builtin_ia32_vpcomuw
+      {Intrinsic::x86_xop_vpcomw, 138015}, // __builtin_ia32_vpcomw
+      {Intrinsic::x86_avx512_mask_conflict_q_128, 106165}, // __builtin_ia32_vpconflictdi_128_mask
+      {Intrinsic::x86_avx512_mask_conflict_q_256, 106202}, // __builtin_ia32_vpconflictdi_256_mask
+      {Intrinsic::x86_avx512_mask_conflict_q_512, 106239}, // __builtin_ia32_vpconflictdi_512_mask
+      {Intrinsic::x86_avx512_mask_conflict_d_128, 106054}, // __builtin_ia32_vpconflictsi_128_mask
+      {Intrinsic::x86_avx512_mask_conflict_d_256, 106091}, // __builtin_ia32_vpconflictsi_256_mask
+      {Intrinsic::x86_avx512_mask_conflict_d_512, 106128}, // __builtin_ia32_vpconflictsi_512_mask
+      {Intrinsic::x86_avx512_mask_vpdpbusd_128, 119011}, // __builtin_ia32_vpdpbusd128_mask
+      {Intrinsic::x86_avx512_maskz_vpdpbusd_128, 124311}, // __builtin_ia32_vpdpbusd128_maskz
+      {Intrinsic::x86_avx512_mask_vpdpbusd_256, 119043}, // __builtin_ia32_vpdpbusd256_mask
+      {Intrinsic::x86_avx512_maskz_vpdpbusd_256, 124344}, // __builtin_ia32_vpdpbusd256_maskz
+      {Intrinsic::x86_avx512_mask_vpdpbusd_512, 119075}, // __builtin_ia32_vpdpbusd512_mask
+      {Intrinsic::x86_avx512_maskz_vpdpbusd_512, 124377}, // __builtin_ia32_vpdpbusd512_maskz
+      {Intrinsic::x86_avx512_mask_vpdpbusds_128, 119107}, // __builtin_ia32_vpdpbusds128_mask
+      {Intrinsic::x86_avx512_maskz_vpdpbusds_128, 124410}, // __builtin_ia32_vpdpbusds128_maskz
+      {Intrinsic::x86_avx512_mask_vpdpbusds_256, 119140}, // __builtin_ia32_vpdpbusds256_mask
+      {Intrinsic::x86_avx512_maskz_vpdpbusds_256, 124444}, // __builtin_ia32_vpdpbusds256_maskz
+      {Intrinsic::x86_avx512_mask_vpdpbusds_512, 119173}, // __builtin_ia32_vpdpbusds512_mask
+      {Intrinsic::x86_avx512_maskz_vpdpbusds_512, 124478}, // __builtin_ia32_vpdpbusds512_maskz
+      {Intrinsic::x86_avx512_mask_vpdpwssd_128, 119206}, // __builtin_ia32_vpdpwssd128_mask
+      {Intrinsic::x86_avx512_maskz_vpdpwssd_128, 124512}, // __builtin_ia32_vpdpwssd128_maskz
+      {Intrinsic::x86_avx512_mask_vpdpwssd_256, 119238}, // __builtin_ia32_vpdpwssd256_mask
+      {Intrinsic::x86_avx512_maskz_vpdpwssd_256, 124545}, // __builtin_ia32_vpdpwssd256_maskz
+      {Intrinsic::x86_avx512_mask_vpdpwssd_512, 119270}, // __builtin_ia32_vpdpwssd512_mask
+      {Intrinsic::x86_avx512_maskz_vpdpwssd_512, 124578}, // __builtin_ia32_vpdpwssd512_maskz
+      {Intrinsic::x86_avx512_mask_vpdpwssds_128, 119302}, // __builtin_ia32_vpdpwssds128_mask
+      {Intrinsic::x86_avx512_maskz_vpdpwssds_128, 124611}, // __builtin_ia32_vpdpwssds128_maskz
+      {Intrinsic::x86_avx512_mask_vpdpwssds_256, 119335}, // __builtin_ia32_vpdpwssds256_mask
+      {Intrinsic::x86_avx512_maskz_vpdpwssds_256, 124645}, // __builtin_ia32_vpdpwssds256_maskz
+      {Intrinsic::x86_avx512_mask_vpdpwssds_512, 119368}, // __builtin_ia32_vpdpwssds512_mask
+      {Intrinsic::x86_avx512_maskz_vpdpwssds_512, 124679}, // __builtin_ia32_vpdpwssds512_maskz
+      {Intrinsic::x86_avx512_mask_vpermi2var_d_128, 119401}, // __builtin_ia32_vpermi2vard128_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_d_256, 119436}, // __builtin_ia32_vpermi2vard256_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_d_512, 119471}, // __builtin_ia32_vpermi2vard512_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_hi_128, 119506}, // __builtin_ia32_vpermi2varhi128_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_hi_256, 119542}, // __builtin_ia32_vpermi2varhi256_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_hi_512, 119578}, // __builtin_ia32_vpermi2varhi512_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_pd_128, 119614}, // __builtin_ia32_vpermi2varpd128_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_pd_256, 119650}, // __builtin_ia32_vpermi2varpd256_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_pd_512, 119686}, // __builtin_ia32_vpermi2varpd512_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_ps_128, 119722}, // __builtin_ia32_vpermi2varps128_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_ps_256, 119758}, // __builtin_ia32_vpermi2varps256_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_ps_512, 119794}, // __builtin_ia32_vpermi2varps512_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_q_128, 119830}, // __builtin_ia32_vpermi2varq128_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_q_256, 119865}, // __builtin_ia32_vpermi2varq256_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_q_512, 119900}, // __builtin_ia32_vpermi2varq512_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_qi_128, 119935}, // __builtin_ia32_vpermi2varqi128_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_qi_256, 119971}, // __builtin_ia32_vpermi2varqi256_mask
+      {Intrinsic::x86_avx512_mask_vpermi2var_qi_512, 120007}, // __builtin_ia32_vpermi2varqi512_mask
+      {Intrinsic::x86_xop_vpermil2pd, 138037}, // __builtin_ia32_vpermil2pd
+      {Intrinsic::x86_xop_vpermil2pd_256, 138063}, // __builtin_ia32_vpermil2pd256
+      {Intrinsic::x86_xop_vpermil2ps, 138092}, // __builtin_ia32_vpermil2ps
+      {Intrinsic::x86_xop_vpermil2ps_256, 138118}, // __builtin_ia32_vpermil2ps256
+      {Intrinsic::x86_avx_vpermilvar_pd, 100448}, // __builtin_ia32_vpermilvarpd
+      {Intrinsic::x86_avx_vpermilvar_pd_256, 100476}, // __builtin_ia32_vpermilvarpd256
+      {Intrinsic::x86_avx512_vpermilvar_pd_512, 129262}, // __builtin_ia32_vpermilvarpd512
+      {Intrinsic::x86_avx_vpermilvar_ps, 100507}, // __builtin_ia32_vpermilvarps
+      {Intrinsic::x86_avx_vpermilvar_ps_256, 100535}, // __builtin_ia32_vpermilvarps256
+      {Intrinsic::x86_avx512_vpermilvar_ps_512, 129293}, // __builtin_ia32_vpermilvarps512
+      {Intrinsic::x86_avx512_mask_vpermt2var_d_128, 120043}, // __builtin_ia32_vpermt2vard128_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_d_128, 124713}, // __builtin_ia32_vpermt2vard128_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_d_256, 120078}, // __builtin_ia32_vpermt2vard256_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_d_256, 124749}, // __builtin_ia32_vpermt2vard256_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_d_512, 120113}, // __builtin_ia32_vpermt2vard512_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_d_512, 124785}, // __builtin_ia32_vpermt2vard512_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_hi_128, 120148}, // __builtin_ia32_vpermt2varhi128_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_hi_128, 124821}, // __builtin_ia32_vpermt2varhi128_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_hi_256, 120184}, // __builtin_ia32_vpermt2varhi256_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_hi_256, 124858}, // __builtin_ia32_vpermt2varhi256_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_hi_512, 120220}, // __builtin_ia32_vpermt2varhi512_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_hi_512, 124895}, // __builtin_ia32_vpermt2varhi512_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_pd_128, 120256}, // __builtin_ia32_vpermt2varpd128_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_pd_128, 124932}, // __builtin_ia32_vpermt2varpd128_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_pd_256, 120292}, // __builtin_ia32_vpermt2varpd256_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_pd_256, 124969}, // __builtin_ia32_vpermt2varpd256_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_pd_512, 120328}, // __builtin_ia32_vpermt2varpd512_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_pd_512, 125006}, // __builtin_ia32_vpermt2varpd512_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_ps_128, 120364}, // __builtin_ia32_vpermt2varps128_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_ps_128, 125043}, // __builtin_ia32_vpermt2varps128_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_ps_256, 120400}, // __builtin_ia32_vpermt2varps256_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_ps_256, 125080}, // __builtin_ia32_vpermt2varps256_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_ps_512, 120436}, // __builtin_ia32_vpermt2varps512_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_ps_512, 125117}, // __builtin_ia32_vpermt2varps512_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_q_128, 120472}, // __builtin_ia32_vpermt2varq128_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_q_128, 125154}, // __builtin_ia32_vpermt2varq128_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_q_256, 120507}, // __builtin_ia32_vpermt2varq256_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_q_256, 125190}, // __builtin_ia32_vpermt2varq256_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_q_512, 120542}, // __builtin_ia32_vpermt2varq512_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_q_512, 125226}, // __builtin_ia32_vpermt2varq512_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_qi_128, 120577}, // __builtin_ia32_vpermt2varqi128_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_qi_128, 125262}, // __builtin_ia32_vpermt2varqi128_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_qi_256, 120613}, // __builtin_ia32_vpermt2varqi256_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_qi_256, 125299}, // __builtin_ia32_vpermt2varqi256_maskz
+      {Intrinsic::x86_avx512_mask_vpermt2var_qi_512, 120649}, // __builtin_ia32_vpermt2varqi512_mask
+      {Intrinsic::x86_avx512_maskz_vpermt2var_qi_512, 125336}, // __builtin_ia32_vpermt2varqi512_maskz
+      {Intrinsic::x86_xop_vphaddbd, 138147}, // __builtin_ia32_vphaddbd
+      {Intrinsic::x86_xop_vphaddbq, 138171}, // __builtin_ia32_vphaddbq
+      {Intrinsic::x86_xop_vphaddbw, 138195}, // __builtin_ia32_vphaddbw
+      {Intrinsic::x86_xop_vphadddq, 138219}, // __builtin_ia32_vphadddq
+      {Intrinsic::x86_xop_vphaddubd, 138243}, // __builtin_ia32_vphaddubd
+      {Intrinsic::x86_xop_vphaddubq, 138268}, // __builtin_ia32_vphaddubq
+      {Intrinsic::x86_xop_vphaddubw, 138293}, // __builtin_ia32_vphaddubw
+      {Intrinsic::x86_xop_vphaddudq, 138318}, // __builtin_ia32_vphaddudq
+      {Intrinsic::x86_xop_vphadduwd, 138343}, // __builtin_ia32_vphadduwd
+      {Intrinsic::x86_xop_vphadduwq, 138368}, // __builtin_ia32_vphadduwq
+      {Intrinsic::x86_xop_vphaddwd, 138393}, // __builtin_ia32_vphaddwd
+      {Intrinsic::x86_xop_vphaddwq, 138417}, // __builtin_ia32_vphaddwq
+      {Intrinsic::x86_xop_vphsubbw, 138441}, // __builtin_ia32_vphsubbw
+      {Intrinsic::x86_xop_vphsubdq, 138465}, // __builtin_ia32_vphsubdq
+      {Intrinsic::x86_xop_vphsubwd, 138489}, // __builtin_ia32_vphsubwd
+      {Intrinsic::x86_xop_vpmacsdd, 138513}, // __builtin_ia32_vpmacsdd
+      {Intrinsic::x86_xop_vpmacsdqh, 138537}, // __builtin_ia32_vpmacsdqh
+      {Intrinsic::x86_xop_vpmacsdql, 138562}, // __builtin_ia32_vpmacsdql
+      {Intrinsic::x86_xop_vpmacssdd, 138587}, // __builtin_ia32_vpmacssdd
+      {Intrinsic::x86_xop_vpmacssdqh, 138612}, // __builtin_ia32_vpmacssdqh
+      {Intrinsic::x86_xop_vpmacssdql, 138638}, // __builtin_ia32_vpmacssdql
+      {Intrinsic::x86_xop_vpmacsswd, 138664}, // __builtin_ia32_vpmacsswd
+      {Intrinsic::x86_xop_vpmacssww, 138689}, // __builtin_ia32_vpmacssww
+      {Intrinsic::x86_xop_vpmacswd, 138714}, // __builtin_ia32_vpmacswd
+      {Intrinsic::x86_xop_vpmacsww, 138738}, // __builtin_ia32_vpmacsww
+      {Intrinsic::x86_xop_vpmadcsswd, 138762}, // __builtin_ia32_vpmadcsswd
+      {Intrinsic::x86_xop_vpmadcswd, 138788}, // __builtin_ia32_vpmadcswd
+      {Intrinsic::x86_avx512_mask_vpmadd52h_uq_128, 120685}, // __builtin_ia32_vpmadd52huq128_mask
+      {Intrinsic::x86_avx512_maskz_vpmadd52h_uq_128, 125373}, // __builtin_ia32_vpmadd52huq128_maskz
+      {Intrinsic::x86_avx512_mask_vpmadd52h_uq_256, 120720}, // __builtin_ia32_vpmadd52huq256_mask
+      {Intrinsic::x86_avx512_maskz_vpmadd52h_uq_256, 125409}, // __builtin_ia32_vpmadd52huq256_maskz
+      {Intrinsic::x86_avx512_mask_vpmadd52h_uq_512, 120755}, // __builtin_ia32_vpmadd52huq512_mask
+      {Intrinsic::x86_avx512_maskz_vpmadd52h_uq_512, 125445}, // __builtin_ia32_vpmadd52huq512_maskz
+      {Intrinsic::x86_avx512_mask_vpmadd52l_uq_128, 120790}, // __builtin_ia32_vpmadd52luq128_mask
+      {Intrinsic::x86_avx512_maskz_vpmadd52l_uq_128, 125481}, // __builtin_ia32_vpmadd52luq128_maskz
+      {Intrinsic::x86_avx512_mask_vpmadd52l_uq_256, 120825}, // __builtin_ia32_vpmadd52luq256_mask
+      {Intrinsic::x86_avx512_maskz_vpmadd52l_uq_256, 125517}, // __builtin_ia32_vpmadd52luq256_maskz
+      {Intrinsic::x86_avx512_mask_vpmadd52l_uq_512, 120860}, // __builtin_ia32_vpmadd52luq512_mask
+      {Intrinsic::x86_avx512_maskz_vpmadd52l_uq_512, 125553}, // __builtin_ia32_vpmadd52luq512_maskz
+      {Intrinsic::x86_avx512_mask_pmultishift_qb_128, 115375}, // __builtin_ia32_vpmultishiftqb128_mask
+      {Intrinsic::x86_avx512_mask_pmultishift_qb_256, 115413}, // __builtin_ia32_vpmultishiftqb256_mask
+      {Intrinsic::x86_avx512_mask_pmultishift_qb_512, 115451}, // __builtin_ia32_vpmultishiftqb512_mask
+      {Intrinsic::x86_xop_vpperm, 138813}, // __builtin_ia32_vpperm
+      {Intrinsic::x86_xop_vprotb, 138835}, // __builtin_ia32_vprotb
+      {Intrinsic::x86_xop_vprotbi, 138857}, // __builtin_ia32_vprotbi
+      {Intrinsic::x86_xop_vprotd, 138880}, // __builtin_ia32_vprotd
+      {Intrinsic::x86_xop_vprotdi, 138902}, // __builtin_ia32_vprotdi
+      {Intrinsic::x86_xop_vprotq, 138925}, // __builtin_ia32_vprotq
+      {Intrinsic::x86_xop_vprotqi, 138947}, // __builtin_ia32_vprotqi
+      {Intrinsic::x86_xop_vprotw, 138970}, // __builtin_ia32_vprotw
+      {Intrinsic::x86_xop_vprotwi, 138992}, // __builtin_ia32_vprotwi
+      {Intrinsic::x86_xop_vpshab, 139015}, // __builtin_ia32_vpshab
+      {Intrinsic::x86_xop_vpshad, 139037}, // __builtin_ia32_vpshad
+      {Intrinsic::x86_xop_vpshaq, 139059}, // __builtin_ia32_vpshaq
+      {Intrinsic::x86_xop_vpshaw, 139081}, // __builtin_ia32_vpshaw
+      {Intrinsic::x86_xop_vpshlb, 139103}, // __builtin_ia32_vpshlb
+      {Intrinsic::x86_xop_vpshld, 139125}, // __builtin_ia32_vpshld
+      {Intrinsic::x86_avx512_mask_vpshld_d_128, 120895}, // __builtin_ia32_vpshldd128_mask
+      {Intrinsic::x86_avx512_mask_vpshld_d_256, 120926}, // __builtin_ia32_vpshldd256_mask
+      {Intrinsic::x86_avx512_mask_vpshld_d_512, 120957}, // __builtin_ia32_vpshldd512_mask
+      {Intrinsic::x86_avx512_mask_vpshld_q_128, 120988}, // __builtin_ia32_vpshldq128_mask
+      {Intrinsic::x86_avx512_mask_vpshld_q_256, 121019}, // __builtin_ia32_vpshldq256_mask
+      {Intrinsic::x86_avx512_mask_vpshld_q_512, 121050}, // __builtin_ia32_vpshldq512_mask
+      {Intrinsic::x86_avx512_mask_vpshldv_d_128, 121174}, // __builtin_ia32_vpshldvd128_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_d_128, 125589}, // __builtin_ia32_vpshldvd128_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_d_256, 121206}, // __builtin_ia32_vpshldvd256_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_d_256, 125622}, // __builtin_ia32_vpshldvd256_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_d_512, 121238}, // __builtin_ia32_vpshldvd512_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_d_512, 125655}, // __builtin_ia32_vpshldvd512_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_q_128, 121270}, // __builtin_ia32_vpshldvq128_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_q_128, 125688}, // __builtin_ia32_vpshldvq128_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_q_256, 121302}, // __builtin_ia32_vpshldvq256_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_q_256, 125721}, // __builtin_ia32_vpshldvq256_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_q_512, 121334}, // __builtin_ia32_vpshldvq512_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_q_512, 125754}, // __builtin_ia32_vpshldvq512_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_w_128, 121366}, // __builtin_ia32_vpshldvw128_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_w_128, 125787}, // __builtin_ia32_vpshldvw128_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_w_256, 121398}, // __builtin_ia32_vpshldvw256_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_w_256, 125820}, // __builtin_ia32_vpshldvw256_maskz
+      {Intrinsic::x86_avx512_mask_vpshldv_w_512, 121430}, // __builtin_ia32_vpshldvw512_mask
+      {Intrinsic::x86_avx512_maskz_vpshldv_w_512, 125853}, // __builtin_ia32_vpshldvw512_maskz
+      {Intrinsic::x86_avx512_mask_vpshld_w_128, 121081}, // __builtin_ia32_vpshldw128_mask
+      {Intrinsic::x86_avx512_mask_vpshld_w_256, 121112}, // __builtin_ia32_vpshldw256_mask
+      {Intrinsic::x86_avx512_mask_vpshld_w_512, 121143}, // __builtin_ia32_vpshldw512_mask
+      {Intrinsic::x86_xop_vpshlq, 139147}, // __builtin_ia32_vpshlq
+      {Intrinsic::x86_xop_vpshlw, 139169}, // __builtin_ia32_vpshlw
+      {Intrinsic::x86_avx512_mask_vpshrd_d_128, 121462}, // __builtin_ia32_vpshrdd128_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_d_256, 121493}, // __builtin_ia32_vpshrdd256_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_d_512, 121524}, // __builtin_ia32_vpshrdd512_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_q_128, 121555}, // __builtin_ia32_vpshrdq128_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_q_256, 121586}, // __builtin_ia32_vpshrdq256_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_q_512, 121617}, // __builtin_ia32_vpshrdq512_mask
+      {Intrinsic::x86_avx512_mask_vpshrdv_d_128, 121741}, // __builtin_ia32_vpshrdvd128_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_d_128, 125886}, // __builtin_ia32_vpshrdvd128_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_d_256, 121773}, // __builtin_ia32_vpshrdvd256_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_d_256, 125919}, // __builtin_ia32_vpshrdvd256_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_d_512, 121805}, // __builtin_ia32_vpshrdvd512_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_d_512, 125952}, // __builtin_ia32_vpshrdvd512_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_q_128, 121837}, // __builtin_ia32_vpshrdvq128_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_q_128, 125985}, // __builtin_ia32_vpshrdvq128_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_q_256, 121869}, // __builtin_ia32_vpshrdvq256_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_q_256, 126018}, // __builtin_ia32_vpshrdvq256_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_q_512, 121901}, // __builtin_ia32_vpshrdvq512_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_q_512, 126051}, // __builtin_ia32_vpshrdvq512_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_w_128, 121933}, // __builtin_ia32_vpshrdvw128_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_w_128, 126084}, // __builtin_ia32_vpshrdvw128_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_w_256, 121965}, // __builtin_ia32_vpshrdvw256_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_w_256, 126117}, // __builtin_ia32_vpshrdvw256_maskz
+      {Intrinsic::x86_avx512_mask_vpshrdv_w_512, 121997}, // __builtin_ia32_vpshrdvw512_mask
+      {Intrinsic::x86_avx512_maskz_vpshrdv_w_512, 126150}, // __builtin_ia32_vpshrdvw512_maskz
+      {Intrinsic::x86_avx512_mask_vpshrd_w_128, 121648}, // __builtin_ia32_vpshrdw128_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_w_256, 121679}, // __builtin_ia32_vpshrdw256_mask
+      {Intrinsic::x86_avx512_mask_vpshrd_w_512, 121710}, // __builtin_ia32_vpshrdw512_mask
+      {Intrinsic::x86_avx512_mask_vpshufbitqmb_128, 122029}, // __builtin_ia32_vpshufbitqmb128_mask
+      {Intrinsic::x86_avx512_mask_vpshufbitqmb_256, 122065}, // __builtin_ia32_vpshufbitqmb256_mask
+      {Intrinsic::x86_avx512_mask_vpshufbitqmb_512, 122101}, // __builtin_ia32_vpshufbitqmb512_mask
+      {Intrinsic::x86_avx_vtestc_pd, 100566}, // __builtin_ia32_vtestcpd
+      {Intrinsic::x86_avx_vtestc_pd_256, 100590}, // __builtin_ia32_vtestcpd256
+      {Intrinsic::x86_avx_vtestc_ps, 100617}, // __builtin_ia32_vtestcps
+      {Intrinsic::x86_avx_vtestc_ps_256, 100641}, // __builtin_ia32_vtestcps256
+      {Intrinsic::x86_avx_vtestnzc_pd, 100668}, // __builtin_ia32_vtestnzcpd
+      {Intrinsic::x86_avx_vtestnzc_pd_256, 100694}, // __builtin_ia32_vtestnzcpd256
+      {Intrinsic::x86_avx_vtestnzc_ps, 100723}, // __builtin_ia32_vtestnzcps
+      {Intrinsic::x86_avx_vtestnzc_ps_256, 100749}, // __builtin_ia32_vtestnzcps256
+      {Intrinsic::x86_avx_vtestz_pd, 100778}, // __builtin_ia32_vtestzpd
+      {Intrinsic::x86_avx_vtestz_pd_256, 100802}, // __builtin_ia32_vtestzpd256
+      {Intrinsic::x86_avx_vtestz_ps, 100829}, // __builtin_ia32_vtestzps
+      {Intrinsic::x86_avx_vtestz_ps_256, 100853}, // __builtin_ia32_vtestzps256
+      {Intrinsic::x86_avx_vzeroall, 100880}, // __builtin_ia32_vzeroall
+      {Intrinsic::x86_avx_vzeroupper, 100904}, // __builtin_ia32_vzeroupper
+      {Intrinsic::x86_wrfsbase_32, 137437}, // __builtin_ia32_wrfsbase32
+      {Intrinsic::x86_wrfsbase_64, 137463}, // __builtin_ia32_wrfsbase64
+      {Intrinsic::x86_wrgsbase_32, 137489}, // __builtin_ia32_wrgsbase32
+      {Intrinsic::x86_wrgsbase_64, 137515}, // __builtin_ia32_wrgsbase64
+      {Intrinsic::x86_flags_write_u32, 129664}, // __builtin_ia32_writeeflags_u32
+      {Intrinsic::x86_flags_write_u64, 129695}, // __builtin_ia32_writeeflags_u64
+      {Intrinsic::x86_wrpkru, 137541}, // __builtin_ia32_wrpkru
+      {Intrinsic::x86_wrssd, 137563}, // __builtin_ia32_wrssd
+      {Intrinsic::x86_wrssq, 137584}, // __builtin_ia32_wrssq
+      {Intrinsic::x86_wrussd, 137605}, // __builtin_ia32_wrussd
+      {Intrinsic::x86_wrussq, 137627}, // __builtin_ia32_wrussq
+      {Intrinsic::x86_xabort, 137649}, // __builtin_ia32_xabort
+      {Intrinsic::x86_xbegin, 137671}, // __builtin_ia32_xbegin
+      {Intrinsic::x86_xend, 137693}, // __builtin_ia32_xend
+      {Intrinsic::x86_xtest, 139191}, // __builtin_ia32_xtest
+    };
+    auto I = std::lower_bound(std::begin(x86Names),
+                              std::end(x86Names),
+                              BuiltinNameStr);
+    if (I != std::end(x86Names) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "xcore") {
+    static const BuiltinEntry xcoreNames[] = {
+      {Intrinsic::xcore_bitrev, 139212}, // __builtin_bitrev
+      {Intrinsic::xcore_getid, 139229}, // __builtin_getid
+      {Intrinsic::xcore_getps, 139245}, // __builtin_getps
+      {Intrinsic::xcore_setps, 139261}, // __builtin_setps
+    };
+    auto I = std::lower_bound(std::begin(xcoreNames),
+                              std::end(xcoreNames),
+                              BuiltinNameStr);
+    if (I != std::end(xcoreNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  return Intrinsic::not_intrinsic;
+}
+#endif
+
+// Get the LLVM intrinsic that corresponds to a builtin.
+// This is used by the C front-end.  The builtin name is passed
+// in as BuiltinName, and a target prefix (e.g. 'ppc') is passed
+// in as TargetPrefix.  The result is assigned to 'IntrinsicID'.
+#ifdef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+Intrinsic::ID Intrinsic::getIntrinsicForMSBuiltin(const char *TargetPrefixStr, StringRef BuiltinNameStr) {
+  static const char BuiltinNames[] = {
+  '_', '_', 'd', 'm', 'b', '\000', '_', '_', 'd', 's', 'b', '\000', '_', '_', 'i',
+  's', 'b', '\000', '_', 'M', 'o', 'v', 'e', 'F', 'r', 'o', 'm', 'C', 'o', 'p',
+  'r', 'o', 'c', 'e', 's', 's', 'o', 'r', '\000', '_', 'M', 'o', 'v', 'e', 'F',
+  'r', 'o', 'm', 'C', 'o', 'p', 'r', 'o', 'c', 'e', 's', 's', 'o', 'r', '2',
+  '\000',
+  };
+
+  struct BuiltinEntry {
+    Intrinsic::ID IntrinID;
+    unsigned StrTabOffset;
+    const char *getName() const {
+      return &BuiltinNames[StrTabOffset];
+    }
+    bool operator<(StringRef RHS) const {
+      return strncmp(getName(), RHS.data(), RHS.size()) < 0;
+    }
+  };
+  StringRef TargetPrefix(TargetPrefixStr);
+
+  if (TargetPrefix == "aarch64") {
+    static const BuiltinEntry aarch64Names[] = {
+      {Intrinsic::aarch64_dmb, 0}, // __dmb
+      {Intrinsic::aarch64_dsb, 6}, // __dsb
+      {Intrinsic::aarch64_isb, 12}, // __isb
+    };
+    auto I = std::lower_bound(std::begin(aarch64Names),
+                              std::end(aarch64Names),
+                              BuiltinNameStr);
+    if (I != std::end(aarch64Names) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  if (TargetPrefix == "arm") {
+    static const BuiltinEntry armNames[] = {
+      {Intrinsic::arm_mrc, 18}, // _MoveFromCoprocessor
+      {Intrinsic::arm_mrc2, 39}, // _MoveFromCoprocessor2
+      {Intrinsic::arm_dmb, 0}, // __dmb
+      {Intrinsic::arm_dsb, 6}, // __dsb
+      {Intrinsic::arm_isb, 12}, // __isb
+    };
+    auto I = std::lower_bound(std::begin(armNames),
+                              std::end(armNames),
+                              BuiltinNameStr);
+    if (I != std::end(armNames) &&
+        I->getName() == BuiltinNameStr)
+      return I->IntrinID;
+  }
+  return Intrinsic::not_intrinsic;
+}
+#endif
+
+#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
+// let's return it to _setjmp state
+#  pragma pop_macro("setjmp")
+#  undef setjmp_undefined_for_msvc
+#endif
+
diff --git a/linux-x64/clang/include/llvm/IR/Intrinsics.h b/linux-x64/clang/include/llvm/IR/Intrinsics.h
new file mode 100644
index 0000000..fc79da7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Intrinsics.h
@@ -0,0 +1,188 @@
+//===-- llvm/Instrinsics.h - LLVM Intrinsic Function Handling ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of enums which allow processing of intrinsic
+// functions.  Values of these enum types are returned by
+// Function::getIntrinsicID.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INTRINSICS_H
+#define LLVM_IR_INTRINSICS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include <string>
+
+namespace llvm {
+
+class Type;
+class FunctionType;
+class Function;
+class LLVMContext;
+class Module;
+class AttributeList;
+
+/// This namespace contains an enum with a value for every intrinsic/builtin
+/// function known by LLVM. The enum values are returned by
+/// Function::getIntrinsicID().
+namespace Intrinsic {
+  enum ID : unsigned {
+    not_intrinsic = 0,   // Must be zero
+
+    // Get the intrinsic enums generated from Intrinsics.td
+#define GET_INTRINSIC_ENUM_VALUES
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_INTRINSIC_ENUM_VALUES
+    , num_intrinsics
+  };
+
+  /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
+  /// Note, this version is for intrinsics with no overloads.  Use the other
+  /// version of getName if overloads are required.
+  StringRef getName(ID id);
+
+  /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
+  /// Note, this version of getName supports overloads, but is less efficient
+  /// than the StringRef version of this function.  If no overloads are
+  /// requried, it is safe to use this version, but better to use the StringRef
+  /// version.
+  std::string getName(ID id, ArrayRef<Type*> Tys);
+
+  /// Return the function type for an intrinsic.
+  FunctionType *getType(LLVMContext &Context, ID id,
+                        ArrayRef<Type*> Tys = None);
+
+  /// Returns true if the intrinsic can be overloaded.
+  bool isOverloaded(ID id);
+
+  /// Returns true if the intrinsic is a leaf, i.e. it does not make any calls
+  /// itself.  Most intrinsics are leafs, the exceptions being the patchpoint
+  /// and statepoint intrinsics. These call (or invoke) their "target" argument.
+  bool isLeaf(ID id);
+
+  /// Return the attributes for an intrinsic.
+  AttributeList getAttributes(LLVMContext &C, ID id);
+
+  /// Create or insert an LLVM Function declaration for an intrinsic, and return
+  /// it.
+  ///
+  /// The Tys parameter is for intrinsics with overloaded types (e.g., those
+  /// using iAny, fAny, vAny, or iPTRAny).  For a declaration of an overloaded
+  /// intrinsic, Tys must provide exactly one type for each overloaded type in
+  /// the intrinsic.
+  Function *getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys = None);
+
+  /// Looks up Name in NameTable via binary search. NameTable must be sorted
+  /// and all entries must start with "llvm.".  If NameTable contains an exact
+  /// match for Name or a prefix of Name followed by a dot, its index in
+  /// NameTable is returned. Otherwise, -1 is returned.
+  int lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
+                                StringRef Name);
+
+  /// Map a GCC builtin name to an intrinsic ID.
+  ID getIntrinsicForGCCBuiltin(const char *Prefix, StringRef BuiltinName);
+
+  /// Map a MS builtin name to an intrinsic ID.
+  ID getIntrinsicForMSBuiltin(const char *Prefix, StringRef BuiltinName);
+
+  /// This is a type descriptor which explains the type requirements of an
+  /// intrinsic. This is returned by getIntrinsicInfoTableEntries.
+  struct IITDescriptor {
+    enum IITDescriptorKind {
+      Void, VarArg, MMX, Token, Metadata, Half, Float, Double,
+      Integer, Vector, Pointer, Struct,
+      Argument, ExtendArgument, TruncArgument, HalfVecArgument,
+      SameVecWidthArgument, PtrToArgument, PtrToElt, VecOfAnyPtrsToElt
+    } Kind;
+
+    union {
+      unsigned Integer_Width;
+      unsigned Float_Width;
+      unsigned Vector_Width;
+      unsigned Pointer_AddressSpace;
+      unsigned Struct_NumElements;
+      unsigned Argument_Info;
+    };
+
+    enum ArgKind {
+      AK_Any,
+      AK_AnyInteger,
+      AK_AnyFloat,
+      AK_AnyVector,
+      AK_AnyPointer
+    };
+
+    unsigned getArgumentNumber() const {
+      assert(Kind == Argument || Kind == ExtendArgument ||
+             Kind == TruncArgument || Kind == HalfVecArgument ||
+             Kind == SameVecWidthArgument || Kind == PtrToArgument ||
+             Kind == PtrToElt);
+      return Argument_Info >> 3;
+    }
+    ArgKind getArgumentKind() const {
+      assert(Kind == Argument || Kind == ExtendArgument ||
+             Kind == TruncArgument || Kind == HalfVecArgument ||
+             Kind == SameVecWidthArgument || Kind == PtrToArgument);
+      return (ArgKind)(Argument_Info & 7);
+    }
+
+    // VecOfAnyPtrsToElt uses both an overloaded argument (for address space)
+    // and a reference argument (for matching vector width and element types)
+    unsigned getOverloadArgNumber() const {
+      assert(Kind == VecOfAnyPtrsToElt);
+      return Argument_Info >> 16;
+    }
+    unsigned getRefArgNumber() const {
+      assert(Kind == VecOfAnyPtrsToElt);
+      return Argument_Info & 0xFFFF;
+    }
+
+    static IITDescriptor get(IITDescriptorKind K, unsigned Field) {
+      IITDescriptor Result = { K, { Field } };
+      return Result;
+    }
+
+    static IITDescriptor get(IITDescriptorKind K, unsigned short Hi,
+                             unsigned short Lo) {
+      unsigned Field = Hi << 16 | Lo;
+      IITDescriptor Result = {K, {Field}};
+      return Result;
+    }
+  };
+
+  /// Return the IIT table descriptor for the specified intrinsic into an array
+  /// of IITDescriptors.
+  void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl<IITDescriptor> &T);
+
+  /// Match the specified type (which comes from an intrinsic argument or return
+  /// value) with the type constraints specified by the .td file. If the given
+  /// type is an overloaded type it is pushed to the ArgTys vector.
+  ///
+  /// Returns false if the given type matches with the constraints, true
+  /// otherwise.
+  bool matchIntrinsicType(Type *Ty, ArrayRef<IITDescriptor> &Infos,
+                          SmallVectorImpl<Type*> &ArgTys);
+
+  /// Verify if the intrinsic has variable arguments. This method is intended to
+  /// be called after all the fixed arguments have been matched first.
+  ///
+  /// This method returns true on error.
+  bool matchIntrinsicVarArg(bool isVarArg, ArrayRef<IITDescriptor> &Infos);
+
+  // Checks if the intrinsic name matches with its signature and if not
+  // returns the declaration with the same signature and remangled name.
+  llvm::Optional<Function*> remangleIntrinsicFunction(Function *F);
+
+} // End Intrinsic namespace
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Intrinsics.td b/linux-x64/clang/include/llvm/IR/Intrinsics.td
new file mode 100644
index 0000000..fbb1c5c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Intrinsics.td
@@ -0,0 +1,985 @@
+//===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of all LLVM intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/CodeGen/ValueTypes.td"
+include "llvm/CodeGen/SDNodeProperties.td"
+
+//===----------------------------------------------------------------------===//
+//  Properties we keep track of for intrinsics.
+//===----------------------------------------------------------------------===//
+
+class IntrinsicProperty;
+
+// Intr*Mem - Memory properties.  If no property is set, the worst case
+// is assumed (it may read and write any memory it can get access to and it may
+// have other side effects).
+
+// IntrNoMem - The intrinsic does not access memory or have any other side
+// effects.  It may be CSE'd deleted if dead, etc.
+def IntrNoMem : IntrinsicProperty;
+
+// IntrReadMem - This intrinsic only reads from memory. It does not write to
+// memory and has no other side effects. Therefore, it cannot be moved across
+// potentially aliasing stores. However, it can be reordered otherwise and can
+// be deleted if dead.
+def IntrReadMem : IntrinsicProperty;
+
+// IntrWriteMem - This intrinsic only writes to memory, but does not read from
+// memory, and has no other side effects. This means dead stores before calls
+// to this intrinsics may be removed.
+def IntrWriteMem : IntrinsicProperty;
+
+// IntrArgMemOnly - This intrinsic only accesses memory that its pointer-typed
+// argument(s) points to, but may access an unspecified amount. Other than
+// reads from and (possibly volatile) writes to memory, it has no side effects.
+def IntrArgMemOnly : IntrinsicProperty;
+
+// IntrInaccessibleMemOnly -- This intrinsic only accesses memory that is not
+// accessible by the module being compiled. This is a weaker form of IntrNoMem.
+def IntrInaccessibleMemOnly : IntrinsicProperty;
+
+// IntrInaccessibleMemOrArgMemOnly -- This intrinsic only accesses memory that
+// its pointer-typed arguments point to or memory that is not accessible
+// by the module being compiled. This is a weaker form of IntrArgMemOnly.
+def IntrInaccessibleMemOrArgMemOnly : IntrinsicProperty;
+
+// Commutative - This intrinsic is commutative: X op Y == Y op X.
+def Commutative : IntrinsicProperty;
+
+// Throws - This intrinsic can throw.
+def Throws : IntrinsicProperty;
+
+// NoCapture - The specified argument pointer is not captured by the intrinsic.
+class NoCapture<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// Returned - The specified argument is always the return value of the
+// intrinsic.
+class Returned<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// ReadOnly - The specified argument pointer is not written to through the
+// pointer by the intrinsic.
+class ReadOnly<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// WriteOnly - The intrinsic does not read memory through the specified
+// argument pointer.
+class WriteOnly<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// ReadNone - The specified argument pointer is not dereferenced by the
+// intrinsic.
+class ReadNone<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+def IntrNoReturn : IntrinsicProperty;
+
+// IntrNoduplicate - Calls to this intrinsic cannot be duplicated.
+// Parallels the noduplicate attribute on LLVM IR functions.
+def IntrNoDuplicate : IntrinsicProperty;
+
+// IntrConvergent - Calls to this intrinsic are convergent and may not be made
+// control-dependent on any additional values.
+// Parallels the convergent attribute on LLVM IR functions.
+def IntrConvergent : IntrinsicProperty;
+
+// This property indicates that the intrinsic is safe to speculate.
+def IntrSpeculatable : IntrinsicProperty;
+
+// This property can be used to override the 'has no other side effects'
+// language of the IntrNoMem, IntrReadMem, IntrWriteMem, and IntrArgMemOnly
+// intrinsic properties.  By default, intrinsics are assumed to have side
+// effects, so this property is only necessary if you have defined one of
+// the memory properties listed above.
+// For this property, 'side effects' has the same meaning as 'side effects'
+// defined by the hasSideEffects property of the TableGen Instruction class.
+def IntrHasSideEffects : IntrinsicProperty;
+
+//===----------------------------------------------------------------------===//
+// Types used by intrinsics.
+//===----------------------------------------------------------------------===//
+
+class LLVMType<ValueType vt> {
+  ValueType VT = vt;
+}
+
+class LLVMQualPointerType<LLVMType elty, int addrspace>
+  : LLVMType<iPTR>{
+  LLVMType ElTy = elty;
+  int AddrSpace = addrspace;
+}
+
+class LLVMPointerType<LLVMType elty>
+  : LLVMQualPointerType<elty, 0>;
+
+class LLVMAnyPointerType<LLVMType elty>
+  : LLVMType<iPTRAny>{
+  LLVMType ElTy = elty;
+}
+
+// Match the type of another intrinsic parameter.  Number is an index into the
+// list of overloaded types for the intrinsic, excluding all the fixed types.
+// The Number value must refer to a previously listed type.  For example:
+//   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyfloat_ty, LLVMMatchType<0>]>
+// has two overloaded types, the 2nd and 3rd arguments.  LLVMMatchType<0>
+// refers to the first overloaded type, which is the 2nd argument.
+class LLVMMatchType<int num>
+  : LLVMType<OtherVT>{
+  int Number = num;
+}
+
+// Match the type of another intrinsic parameter that is expected to be based on
+// an integral type (i.e. either iN or <N x iM>), but change the scalar size to
+// be twice as wide or half as wide as the other type.  This is only useful when
+// the intrinsic is overloaded, so the matched type should be declared as iAny.
+class LLVMExtendedType<int num> : LLVMMatchType<num>;
+class LLVMTruncatedType<int num> : LLVMMatchType<num>;
+class LLVMVectorSameWidth<int num, LLVMType elty>
+  : LLVMMatchType<num> {
+  ValueType ElTy = elty.VT;
+}
+class LLVMPointerTo<int num> : LLVMMatchType<num>;
+class LLVMPointerToElt<int num> : LLVMMatchType<num>;
+class LLVMVectorOfAnyPointersToElt<int num> : LLVMMatchType<num>;
+
+// Match the type of another intrinsic parameter that is expected to be a
+// vector type, but change the element count to be half as many
+class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;
+
+def llvm_void_ty       : LLVMType<isVoid>;
+def llvm_any_ty        : LLVMType<Any>;
+def llvm_anyint_ty     : LLVMType<iAny>;
+def llvm_anyfloat_ty   : LLVMType<fAny>;
+def llvm_anyvector_ty  : LLVMType<vAny>;
+def llvm_i1_ty         : LLVMType<i1>;
+def llvm_i8_ty         : LLVMType<i8>;
+def llvm_i16_ty        : LLVMType<i16>;
+def llvm_i32_ty        : LLVMType<i32>;
+def llvm_i64_ty        : LLVMType<i64>;
+def llvm_half_ty       : LLVMType<f16>;
+def llvm_float_ty      : LLVMType<f32>;
+def llvm_double_ty     : LLVMType<f64>;
+def llvm_f80_ty        : LLVMType<f80>;
+def llvm_f128_ty       : LLVMType<f128>;
+def llvm_ppcf128_ty    : LLVMType<ppcf128>;
+def llvm_ptr_ty        : LLVMPointerType<llvm_i8_ty>;             // i8*
+def llvm_ptrptr_ty     : LLVMPointerType<llvm_ptr_ty>;            // i8**
+def llvm_anyptr_ty     : LLVMAnyPointerType<llvm_i8_ty>;          // (space)i8*
+def llvm_empty_ty      : LLVMType<OtherVT>;                       // { }
+def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>;          // { }*
+def llvm_metadata_ty   : LLVMType<MetadataVT>;                    // !{...}
+def llvm_token_ty      : LLVMType<token>;                         // token
+
+def llvm_x86mmx_ty     : LLVMType<x86mmx>;
+def llvm_ptrx86mmx_ty  : LLVMPointerType<llvm_x86mmx_ty>;         // <1 x i64>*
+
+def llvm_v2i1_ty       : LLVMType<v2i1>;     //   2 x i1
+def llvm_v4i1_ty       : LLVMType<v4i1>;     //   4 x i1
+def llvm_v8i1_ty       : LLVMType<v8i1>;     //   8 x i1
+def llvm_v16i1_ty      : LLVMType<v16i1>;    //  16 x i1
+def llvm_v32i1_ty      : LLVMType<v32i1>;    //  32 x i1
+def llvm_v64i1_ty      : LLVMType<v64i1>;    //  64 x i1
+def llvm_v512i1_ty     : LLVMType<v512i1>;   // 512 x i1
+def llvm_v1024i1_ty    : LLVMType<v1024i1>;  //1024 x i1
+
+def llvm_v1i8_ty       : LLVMType<v1i8>;     //  1 x i8
+def llvm_v2i8_ty       : LLVMType<v2i8>;     //  2 x i8
+def llvm_v4i8_ty       : LLVMType<v4i8>;     //  4 x i8
+def llvm_v8i8_ty       : LLVMType<v8i8>;     //  8 x i8
+def llvm_v16i8_ty      : LLVMType<v16i8>;    // 16 x i8
+def llvm_v32i8_ty      : LLVMType<v32i8>;    // 32 x i8
+def llvm_v64i8_ty      : LLVMType<v64i8>;    // 64 x i8
+def llvm_v128i8_ty     : LLVMType<v128i8>;   //128 x i8
+def llvm_v256i8_ty     : LLVMType<v256i8>;   //256 x i8
+
+def llvm_v1i16_ty      : LLVMType<v1i16>;    //  1 x i16
+def llvm_v2i16_ty      : LLVMType<v2i16>;    //  2 x i16
+def llvm_v4i16_ty      : LLVMType<v4i16>;    //  4 x i16
+def llvm_v8i16_ty      : LLVMType<v8i16>;    //  8 x i16
+def llvm_v16i16_ty     : LLVMType<v16i16>;   // 16 x i16
+def llvm_v32i16_ty     : LLVMType<v32i16>;   // 32 x i16
+def llvm_v64i16_ty     : LLVMType<v64i16>;   // 64 x i16
+def llvm_v128i16_ty    : LLVMType<v128i16>;  //128 x i16
+
+def llvm_v1i32_ty      : LLVMType<v1i32>;    //  1 x i32
+def llvm_v2i32_ty      : LLVMType<v2i32>;    //  2 x i32
+def llvm_v4i32_ty      : LLVMType<v4i32>;    //  4 x i32
+def llvm_v8i32_ty      : LLVMType<v8i32>;    //  8 x i32
+def llvm_v16i32_ty     : LLVMType<v16i32>;   // 16 x i32
+def llvm_v32i32_ty     : LLVMType<v32i32>;   // 32 x i32
+def llvm_v64i32_ty     : LLVMType<v64i32>;   // 64 x i32
+
+def llvm_v1i64_ty      : LLVMType<v1i64>;    //  1 x i64
+def llvm_v2i64_ty      : LLVMType<v2i64>;    //  2 x i64
+def llvm_v4i64_ty      : LLVMType<v4i64>;    //  4 x i64
+def llvm_v8i64_ty      : LLVMType<v8i64>;    //  8 x i64
+def llvm_v16i64_ty     : LLVMType<v16i64>;   // 16 x i64
+def llvm_v32i64_ty     : LLVMType<v32i64>;   // 32 x i64
+
+def llvm_v1i128_ty     : LLVMType<v1i128>;   //  1 x i128
+
+def llvm_v2f16_ty      : LLVMType<v2f16>;    //  2 x half (__fp16)
+def llvm_v4f16_ty      : LLVMType<v4f16>;    //  4 x half (__fp16)
+def llvm_v8f16_ty      : LLVMType<v8f16>;    //  8 x half (__fp16)
+def llvm_v1f32_ty      : LLVMType<v1f32>;    //  1 x float
+def llvm_v2f32_ty      : LLVMType<v2f32>;    //  2 x float
+def llvm_v4f32_ty      : LLVMType<v4f32>;    //  4 x float
+def llvm_v8f32_ty      : LLVMType<v8f32>;    //  8 x float
+def llvm_v16f32_ty     : LLVMType<v16f32>;   // 16 x float
+def llvm_v1f64_ty      : LLVMType<v1f64>;    //  1 x double
+def llvm_v2f64_ty      : LLVMType<v2f64>;    //  2 x double
+def llvm_v4f64_ty      : LLVMType<v4f64>;    //  4 x double
+def llvm_v8f64_ty      : LLVMType<v8f64>;    //  8 x double
+
+def llvm_vararg_ty     : LLVMType<isVoid>;   // this means vararg here
+
+
+//===----------------------------------------------------------------------===//
+// Intrinsic Definitions.
+//===----------------------------------------------------------------------===//
+
+// Intrinsic class - This is used to define one LLVM intrinsic.  The name of the
+// intrinsic definition should start with "int_", then match the LLVM intrinsic
+// name with the "llvm." prefix removed, and all "."s turned into "_"s.  For
+// example, llvm.bswap.i16 -> int_bswap_i16.
+//
+//  * RetTypes is a list containing the return types expected for the
+//    intrinsic.
+//  * ParamTypes is a list containing the parameter types expected for the
+//    intrinsic.
+//  * Properties can be set to describe the behavior of the intrinsic.
+//
+class Intrinsic<list<LLVMType> ret_types,
+                list<LLVMType> param_types = [],
+                list<IntrinsicProperty> intr_properties = [],
+                string name = "",
+                list<SDNodeProperty> sd_properties = []> : SDPatternOperator {
+  string LLVMName = name;
+  string TargetPrefix = "";   // Set to a prefix for target-specific intrinsics.
+  list<LLVMType> RetTypes = ret_types;
+  list<LLVMType> ParamTypes = param_types;
+  list<IntrinsicProperty> IntrProperties = intr_properties;
+  let Properties = sd_properties;
+
+  bit isTarget = 0;
+}
+
+/// GCCBuiltin - If this intrinsic exactly corresponds to a GCC builtin, this
+/// specifies the name of the builtin.  This provides automatic CBE and CFE
+/// support.
+class GCCBuiltin<string name> {
+  string GCCBuiltinName = name;
+}
+
+class MSBuiltin<string name> {
+  string MSBuiltinName = name;
+}
+
+
+//===--------------- Variable Argument Handling Intrinsics ----------------===//
+//
+
+def int_vastart : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">;
+def int_vacopy  : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [],
+                            "llvm.va_copy">;
+def int_vaend   : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;
+
+//===------------------- Garbage Collection Intrinsics --------------------===//
+//
+def int_gcroot  : Intrinsic<[],
+                            [llvm_ptrptr_ty, llvm_ptr_ty]>;
+def int_gcread  : Intrinsic<[llvm_ptr_ty],
+                            [llvm_ptr_ty, llvm_ptrptr_ty],
+                            [IntrReadMem, IntrArgMemOnly]>;
+def int_gcwrite : Intrinsic<[],
+                            [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
+                            [IntrArgMemOnly, NoCapture<1>, NoCapture<2>]>;
+
+//===--------------------- Code Generator Intrinsics ----------------------===//
+//
+def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+def int_frameaddress  : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_read_register  : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
+                                   [IntrReadMem], "llvm.read_register">;
+def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
+                                   [], "llvm.write_register">;
+
+// Gets the address of the local variable area. This is typically a copy of the
+// stack, frame, or base pointer depending on the type of prologue.
+def int_localaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+
+// Escapes local variables to allow access from other functions.
+def int_localescape : Intrinsic<[], [llvm_vararg_ty]>;
+
+// Given a function and the localaddress of a parent frame, returns a pointer
+// to an escaped allocation indicated by the index.
+def int_localrecover : Intrinsic<[llvm_ptr_ty],
+                                 [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+// Note: we treat stacksave/stackrestore as writemem because we don't otherwise
+// model their dependencies on allocas.
+def int_stacksave     : Intrinsic<[llvm_ptr_ty]>,
+                        GCCBuiltin<"__builtin_stack_save">;
+def int_stackrestore  : Intrinsic<[], [llvm_ptr_ty]>,
+                        GCCBuiltin<"__builtin_stack_restore">;
+
+def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>;
+
+def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
+                         GCCBuiltin<"__builtin_thread_pointer">;
+
+// IntrInaccessibleMemOrArgMemOnly is a little more pessimistic than strictly
+// necessary for prefetch, however it does conveniently prevent the prefetch
+// from being reordered overly much with respect to nearby access to the same
+// memory while not impeding optimization.
+def int_prefetch
+    : Intrinsic<[], [ llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ],
+                [ IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0> ]>;
+def int_pcmarker      : Intrinsic<[], [llvm_i32_ty]>;
+
+def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
+
+// The assume intrinsic is marked as arbitrarily writing so that proper
+// control dependencies will be maintained.
+def int_assume        : Intrinsic<[], [llvm_i1_ty], []>;
+
+// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
+// guard to the correct place on the stack frame.
+def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
+def int_stackguard : Intrinsic<[llvm_ptr_ty], [], []>;
+
+// A counter increment for instrumentation based profiling.
+def int_instrprof_increment : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_i64_ty,
+                                         llvm_i32_ty, llvm_i32_ty],
+                                        []>;
+
+// A counter increment with step for instrumentation based profiling.
+def int_instrprof_increment_step : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_i64_ty,
+                                         llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+                                        []>;
+
+// A call to profile runtime for value profiling of target expressions
+// through instrumentation based profiling.
+def int_instrprof_value_profile : Intrinsic<[],
+                                            [llvm_ptr_ty, llvm_i64_ty,
+                                             llvm_i64_ty, llvm_i32_ty,
+                                             llvm_i32_ty],
+                                            []>;
+
+//===------------------- Standard C Library Intrinsics --------------------===//
+//
+
+def int_memcpy  : Intrinsic<[],
+                             [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+                              llvm_i1_ty],
+                            [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+                             WriteOnly<0>, ReadOnly<1>]>;
+def int_memmove : Intrinsic<[],
+                            [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+                             llvm_i1_ty],
+                            [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+                             ReadOnly<1>]>;
+def int_memset  : Intrinsic<[],
+                            [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
+                             llvm_i1_ty],
+                            [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
+
+// FIXME: Add version of these floating point intrinsics which allow non-default
+// rounding modes and FP exception handling.
+
+let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+  def int_fma  : Intrinsic<[llvm_anyfloat_ty],
+                           [LLVMMatchType<0>, LLVMMatchType<0>,
+                            LLVMMatchType<0>]>;
+  def int_fmuladd : Intrinsic<[llvm_anyfloat_ty],
+                              [LLVMMatchType<0>, LLVMMatchType<0>,
+                               LLVMMatchType<0>]>;
+
+  // These functions do not read memory, but are sensitive to the
+  // rounding mode. LLVM purposely does not model changes to the FP
+  // environment so they can be treated as readnone.
+  def int_sqrt : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_powi : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>;
+  def int_sin  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_cos  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_pow  : Intrinsic<[llvm_anyfloat_ty],
+                           [LLVMMatchType<0>, LLVMMatchType<0>]>;
+  def int_log  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_log10: Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_log2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_exp  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_exp2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_fabs : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_copysign : Intrinsic<[llvm_anyfloat_ty],
+                               [LLVMMatchType<0>, LLVMMatchType<0>]>;
+  def int_floor : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_ceil  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_trunc : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_rint  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_nearbyint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
+                                   [IntrNoMem]>;
+}
+
+def int_minnum : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>],
+  [IntrNoMem, IntrSpeculatable, Commutative]
+>;
+def int_maxnum : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>],
+  [IntrNoMem, IntrSpeculatable, Commutative]
+>;
+
+// NOTE: these are internal interfaces.
+def int_setjmp     : Intrinsic<[llvm_i32_ty],  [llvm_ptr_ty]>;
+def int_longjmp    : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
+def int_sigsetjmp  : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
+def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
+
+// Internal interface for object size checking
+def int_objectsize : Intrinsic<[llvm_anyint_ty],
+                               [llvm_anyptr_ty, llvm_i1_ty, llvm_i1_ty],
+                               [IntrNoMem, IntrSpeculatable]>,
+                               GCCBuiltin<"__builtin_object_size">;
+
+//===--------------- Constrained Floating Point Intrinsics ----------------===//
+//
+
+let IntrProperties = [IntrInaccessibleMemOnly] in {
+  def int_experimental_constrained_fadd : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_fsub : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_fmul : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_fdiv : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_frem : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+
+  def int_experimental_constrained_fma : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+
+  // These intrinsics are sensitive to the rounding mode so we need constrained
+  // versions of each of them.  When strict rounding and exception control are
+  // not required the non-constrained versions of these intrinsics should be
+  // used.
+  def int_experimental_constrained_sqrt : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_powi : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_i32_ty,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_sin  : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_cos  : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_pow  : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_log  : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_log10: Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_log2 : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_exp  : Intrinsic<[ llvm_anyfloat_ty ], 
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_exp2 : Intrinsic<[ llvm_anyfloat_ty ],
+                                                    [ LLVMMatchType<0>,
+                                                      llvm_metadata_ty,
+                                                      llvm_metadata_ty ]>;
+  def int_experimental_constrained_rint  : Intrinsic<[ llvm_anyfloat_ty ],
+                                                     [ LLVMMatchType<0>,
+                                                       llvm_metadata_ty,
+                                                       llvm_metadata_ty ]>;
+  def int_experimental_constrained_nearbyint : Intrinsic<[ llvm_anyfloat_ty ],
+                                                         [ LLVMMatchType<0>,
+                                                           llvm_metadata_ty,
+                                                           llvm_metadata_ty ]>;
+}
+// FIXME: Add intrinsics for fcmp, fptrunc, fpext, fptoui and fptosi.
+// FIXME: Add intrinsics for fabs, copysign, floor, ceil, trunc and round?
+
+
+//===------------------------- Expect Intrinsics --------------------------===//
+//
+def int_expect : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+                                              LLVMMatchType<0>], [IntrNoMem]>;
+
+//===-------------------- Bit Manipulation Intrinsics ---------------------===//
+//
+
+// None of these intrinsics accesses memory at all.
+let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+  def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+  def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+  def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+  def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+  def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+}
+
+//===------------------------ Debugger Intrinsics -------------------------===//
+//
+
+// None of these intrinsics accesses memory at all...but that doesn't
+// mean the optimizers can change them aggressively.  Special handling
+// needed in a few places. These synthetic intrinsics have no
+// side-effects and just mark information about their operands.
+let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+  def int_dbg_declare      : Intrinsic<[],
+                                       [llvm_metadata_ty,
+                                        llvm_metadata_ty,
+                                        llvm_metadata_ty]>;
+  def int_dbg_value        : Intrinsic<[],
+                                       [llvm_metadata_ty,
+                                        llvm_metadata_ty,
+                                        llvm_metadata_ty]>;
+  def int_dbg_addr         : Intrinsic<[],
+                                       [llvm_metadata_ty,
+                                        llvm_metadata_ty,
+                                        llvm_metadata_ty]>;
+}
+
+//===------------------ Exception Handling Intrinsics----------------------===//
+//
+
+// The result of eh.typeid.for depends on the enclosing function, but inside a
+// given function it is 'const' and may be CSE'd etc.
+def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>;
+def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>;
+
+// eh.exceptionpointer returns the pointer to the exception caught by
+// the given `catchpad`.
+def int_eh_exceptionpointer : Intrinsic<[llvm_anyptr_ty], [llvm_token_ty],
+                                        [IntrNoMem]>;
+
+// Gets the exception code from a catchpad token. Only used on some platforms.
+def int_eh_exceptioncode : Intrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrNoMem]>;
+
+// __builtin_unwind_init is an undocumented GCC intrinsic that causes all
+// callee-saved registers to be saved and restored (regardless of whether they
+// are used) in the calling function. It is used by libgcc_eh.
+def int_eh_unwind_init: Intrinsic<[]>,
+                        GCCBuiltin<"__builtin_unwind_init">;
+
+def int_eh_dwarf_cfa  : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>;
+
+let IntrProperties = [IntrNoMem] in {
+  def int_eh_sjlj_lsda             : Intrinsic<[llvm_ptr_ty]>;
+  def int_eh_sjlj_callsite         : Intrinsic<[], [llvm_i32_ty]>;
+}
+def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
+def int_eh_sjlj_setjmp          : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp         : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>;
+def int_eh_sjlj_setup_dispatch  : Intrinsic<[], []>;
+
+//===---------------- Generic Variable Attribute Intrinsics----------------===//
+//
+def int_var_annotation : Intrinsic<[],
+                                   [llvm_ptr_ty, llvm_ptr_ty,
+                                    llvm_ptr_ty, llvm_i32_ty],
+                                   [], "llvm.var.annotation">;
+def int_ptr_annotation : Intrinsic<[LLVMAnyPointerType<llvm_anyint_ty>],
+                                   [LLVMMatchType<0>, llvm_ptr_ty, llvm_ptr_ty,
+                                    llvm_i32_ty],
+                                   [], "llvm.ptr.annotation">;
+def int_annotation : Intrinsic<[llvm_anyint_ty],
+                               [LLVMMatchType<0>, llvm_ptr_ty,
+                                llvm_ptr_ty, llvm_i32_ty],
+                               [], "llvm.annotation">;
+
+// Annotates the current program point with metadata strings which are emitted
+// as CodeView debug info records. This is expensive, as it disables inlining
+// and is modelled as having side effects.
+def int_codeview_annotation : Intrinsic<[], [llvm_metadata_ty],
+                                        [IntrInaccessibleMemOnly, IntrNoDuplicate],
+                                        "llvm.codeview.annotation">;
+
+//===------------------------ Trampoline Intrinsics -----------------------===//
+//
+def int_init_trampoline : Intrinsic<[],
+                                    [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
+                                    [IntrArgMemOnly, NoCapture<0>]>,
+                                   GCCBuiltin<"__builtin_init_trampoline">;
+
+def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
+                                      [IntrReadMem, IntrArgMemOnly]>,
+                                     GCCBuiltin<"__builtin_adjust_trampoline">;
+
+//===------------------------ Overflow Intrinsics -------------------------===//
+//
+
+// Expose the carry flag from add operations on two integrals.
+def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem, IntrSpeculatable]>;
+def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem, IntrSpeculatable]>;
+
+def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem, IntrSpeculatable]>;
+def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem, IntrSpeculatable]>;
+
+def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem, IntrSpeculatable]>;
+def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem, IntrSpeculatable]>;
+
+//===------------------------- Memory Use Markers -------------------------===//
+//
+def int_lifetime_start  : Intrinsic<[],
+                                    [llvm_i64_ty, llvm_anyptr_ty],
+                                    [IntrArgMemOnly, NoCapture<1>]>;
+def int_lifetime_end    : Intrinsic<[],
+                                    [llvm_i64_ty, llvm_anyptr_ty],
+                                    [IntrArgMemOnly, NoCapture<1>]>;
+def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
+                                    [llvm_i64_ty, llvm_anyptr_ty],
+                                    [IntrArgMemOnly, NoCapture<1>]>;
+def int_invariant_end   : Intrinsic<[],
+                                    [llvm_descriptor_ty, llvm_i64_ty,
+                                     llvm_anyptr_ty],
+                                    [IntrArgMemOnly, NoCapture<2>]>;
+
+// invariant.group.barrier can't be marked with 'readnone' (IntrNoMem),
+// because it would cause CSE of two barriers with the same argument.
+// Readonly and argmemonly says that barrier only reads its argument and
+// it can be CSE only if memory didn't change between 2 barriers call,
+// which is valid.
+// The argument also can't be marked with 'returned' attribute, because
+// it would remove barrier.
+def int_invariant_group_barrier : Intrinsic<[llvm_anyptr_ty],
+                                            [LLVMMatchType<0>],
+                                            [IntrReadMem, IntrArgMemOnly]>;
+
+//===------------------------ Stackmap Intrinsics -------------------------===//
+//
+def int_experimental_stackmap : Intrinsic<[],
+                                  [llvm_i64_ty, llvm_i32_ty, llvm_vararg_ty],
+                                  [Throws]>;
+def int_experimental_patchpoint_void : Intrinsic<[],
+                                                 [llvm_i64_ty, llvm_i32_ty,
+                                                  llvm_ptr_ty, llvm_i32_ty,
+                                                  llvm_vararg_ty],
+                                                  [Throws]>;
+def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty],
+                                                [llvm_i64_ty, llvm_i32_ty,
+                                                 llvm_ptr_ty, llvm_i32_ty,
+                                                 llvm_vararg_ty],
+                                                 [Throws]>;
+
+
+//===------------------------ Garbage Collection Intrinsics ---------------===//
+// These are documented in docs/Statepoint.rst
+
+def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty],
+                               [llvm_i64_ty, llvm_i32_ty,
+                                llvm_anyptr_ty, llvm_i32_ty,
+                                llvm_i32_ty, llvm_vararg_ty],
+                                [Throws]>;
+
+def int_experimental_gc_result   : Intrinsic<[llvm_any_ty], [llvm_token_ty],
+                                             [IntrReadMem]>;
+def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty],
+                                [llvm_token_ty, llvm_i32_ty, llvm_i32_ty],
+                                [IntrReadMem]>;
+
+//===------------------------ Coroutine Intrinsics ---------------===//
+// These are documented in docs/Coroutines.rst
+
+// Coroutine Structure Intrinsics.
+
+def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty,
+                             llvm_ptr_ty, llvm_ptr_ty],
+                            [IntrArgMemOnly, IntrReadMem,
+                             ReadNone<1>, ReadOnly<2>, NoCapture<2>]>;
+def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>;
+def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
+                               [WriteOnly<1>]>;
+
+def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
+                              [IntrReadMem, IntrArgMemOnly, ReadOnly<1>,
+                               NoCapture<1>]>;
+def int_coro_end : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty], []>;
+
+def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
+
+def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>;
+def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
+
+def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty],
+                               [IntrNoMem, ReadNone<0>, ReadNone<1>]>;
+
+// Coroutine Manipulation Intrinsics.
+
+def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
+def int_coro_destroy : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
+def int_coro_done : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+                              [IntrArgMemOnly, ReadOnly<0>, NoCapture<0>]>;
+def int_coro_promise : Intrinsic<[llvm_ptr_ty],
+                                 [llvm_ptr_ty, llvm_i32_ty, llvm_i1_ty],
+                                 [IntrNoMem, NoCapture<0>]>;
+
+// Coroutine Lowering Intrinsics. Used internally by coroutine passes.
+
+def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty],
+                                    [IntrReadMem, IntrArgMemOnly, ReadOnly<0>,
+                                     NoCapture<0>]>;
+
+///===-------------------------- Other Intrinsics --------------------------===//
+//
+def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
+                     GCCBuiltin<"__builtin_flt_rounds">;
+def int_trap : Intrinsic<[], [], [IntrNoReturn]>,
+               GCCBuiltin<"__builtin_trap">;
+def int_debugtrap : Intrinsic<[]>,
+                    GCCBuiltin<"__builtin_debugtrap">;
+
+// Support for dynamic deoptimization (or de-specialization)
+def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty],
+                                            [Throws]>;
+
+// Support for speculative runtime guards
+def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty],
+                                       [Throws]>;
+
+// NOP: calls/invokes to this intrinsic are removed by codegen
+def int_donothing : Intrinsic<[], [], [IntrNoMem]>;
+
+// This instruction has no actual effect, though it is treated by the optimizer
+// has having opaque side effects. This may be inserted into loops to ensure
+// that they are not removed even if they turn out to be empty, for languages
+// which specify that infinite loops must be preserved.
+def int_sideeffect : Intrinsic<[], [], [IntrInaccessibleMemOnly]>;
+
+// Intrisics to support half precision floating point format
+let IntrProperties = [IntrNoMem] in {
+def int_convert_to_fp16   : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>;
+def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
+}
+
+// Clear cache intrinsic, default to ignore (ie. emit nothing)
+// maps to void __clear_cache() on supporting platforms
+def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
+                                [], "llvm.clear_cache">;
+
+//===-------------------------- Masked Intrinsics -------------------------===//
+//
+def int_masked_store : Intrinsic<[], [llvm_anyvector_ty,
+                                      LLVMAnyPointerType<LLVMMatchType<0>>,
+                                      llvm_i32_ty,
+                                      LLVMVectorSameWidth<0, llvm_i1_ty>],
+                                 [IntrArgMemOnly]>;
+
+def int_masked_load  : Intrinsic<[llvm_anyvector_ty],
+                                 [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty,
+                                  LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
+                                 [IntrReadMem, IntrArgMemOnly]>;
+
+def int_masked_gather: Intrinsic<[llvm_anyvector_ty],
+                                 [LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
+                                  LLVMVectorSameWidth<0, llvm_i1_ty>,
+                                  LLVMMatchType<0>],
+                                 [IntrReadMem]>;
+
+def int_masked_scatter: Intrinsic<[],
+                                  [llvm_anyvector_ty,
+                                   LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
+                                   LLVMVectorSameWidth<0, llvm_i1_ty>]>;
+
+def int_masked_expandload: Intrinsic<[llvm_anyvector_ty],
+                                     [LLVMPointerToElt<0>,
+                                      LLVMVectorSameWidth<0, llvm_i1_ty>,
+                                      LLVMMatchType<0>],
+                                     [IntrReadMem]>;
+
+def int_masked_compressstore: Intrinsic<[],
+                                     [llvm_anyvector_ty,
+                                      LLVMPointerToElt<0>,
+                                      LLVMVectorSameWidth<0, llvm_i1_ty>],
+                                     [IntrArgMemOnly]>;
+
+// Test whether a pointer is associated with a type metadata identifier.
+def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
+                              [IntrNoMem]>;
+
+// Safely loads a function pointer from a virtual table pointer using type metadata.
+def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
+                                      [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
+                                      [IntrNoMem]>;
+
+// Create a branch funnel that implements an indirect call to a limited set of
+// callees. This needs to be a musttail call.
+def int_icall_branch_funnel : Intrinsic<[], [llvm_vararg_ty], []>;
+
+def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
+                                 [IntrReadMem, IntrArgMemOnly]>;
+
+// Xray intrinsics
+//===----------------------------------------------------------------------===//
+// Custom event logging for x-ray.
+// Takes a pointer to a string and the length of the string.
+def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+                                     [NoCapture<0>, ReadOnly<0>, IntrWriteMem]>;
+//===----------------------------------------------------------------------===//
+
+//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
+//
+
+// @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize)
+def int_memcpy_element_unordered_atomic
+    : Intrinsic<[],
+                [
+                  llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
+                ],
+                [
+                  IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+                  ReadOnly<1>
+                ]>;
+
+// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize)
+def int_memmove_element_unordered_atomic
+    : Intrinsic<[],
+                [
+                  llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
+                ],
+                [
+                  IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+                  ReadOnly<1>
+                ]>;
+
+// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize)
+def int_memset_element_unordered_atomic
+    : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ],
+                [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0> ]>;
+
+//===------------------------ Reduction Intrinsics ------------------------===//
+//
+def int_experimental_vector_reduce_fadd : Intrinsic<[llvm_anyfloat_ty],
+                                                    [llvm_anyfloat_ty,
+                                                     llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_fmul : Intrinsic<[llvm_anyfloat_ty],
+                                                    [llvm_anyfloat_ty,
+                                                     llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_add : Intrinsic<[llvm_anyint_ty],
+                                                   [llvm_anyvector_ty],
+                                                   [IntrNoMem]>;
+def int_experimental_vector_reduce_mul : Intrinsic<[llvm_anyint_ty],
+                                                   [llvm_anyvector_ty],
+                                                   [IntrNoMem]>;
+def int_experimental_vector_reduce_and : Intrinsic<[llvm_anyint_ty],
+                                                   [llvm_anyvector_ty],
+                                                   [IntrNoMem]>;
+def int_experimental_vector_reduce_or : Intrinsic<[llvm_anyint_ty],
+                                                  [llvm_anyvector_ty],
+                                                  [IntrNoMem]>;
+def int_experimental_vector_reduce_xor : Intrinsic<[llvm_anyint_ty],
+                                                   [llvm_anyvector_ty],
+                                                   [IntrNoMem]>;
+def int_experimental_vector_reduce_smax : Intrinsic<[llvm_anyint_ty],
+                                                    [llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_smin : Intrinsic<[llvm_anyint_ty],
+                                                    [llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_umax : Intrinsic<[llvm_anyint_ty],
+                                                    [llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_umin : Intrinsic<[llvm_anyint_ty],
+                                                    [llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_fmax : Intrinsic<[llvm_anyfloat_ty],
+                                                    [llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+def int_experimental_vector_reduce_fmin : Intrinsic<[llvm_anyfloat_ty],
+                                                    [llvm_anyvector_ty],
+                                                    [IntrNoMem]>;
+
+//===----- Intrinsics that are used to provide predicate information -----===//
+
+def int_ssa_copy : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
+                             [IntrNoMem, Returned<0>]>;
+//===----------------------------------------------------------------------===//
+// Target-specific intrinsics
+//===----------------------------------------------------------------------===//
+
+include "llvm/IR/IntrinsicsPowerPC.td"
+include "llvm/IR/IntrinsicsX86.td"
+include "llvm/IR/IntrinsicsARM.td"
+include "llvm/IR/IntrinsicsAArch64.td"
+include "llvm/IR/IntrinsicsXCore.td"
+include "llvm/IR/IntrinsicsHexagon.td"
+include "llvm/IR/IntrinsicsNVVM.td"
+include "llvm/IR/IntrinsicsMips.td"
+include "llvm/IR/IntrinsicsAMDGPU.td"
+include "llvm/IR/IntrinsicsBPF.td"
+include "llvm/IR/IntrinsicsSystemZ.td"
+include "llvm/IR/IntrinsicsWebAssembly.td"
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsAArch64.td b/linux-x64/clang/include/llvm/IR/IntrinsicsAArch64.td
new file mode 100644
index 0000000..5034133
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsAArch64.td
@@ -0,0 +1,652 @@
+//===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the AARCH64-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "aarch64" in {
+
+def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+
+def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
+                               [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
+def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
+                                [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
+
+def int_aarch64_clrex : Intrinsic<[]>;
+
+def int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+                                LLVMMatchType<0>], [IntrNoMem]>;
+def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+                                LLVMMatchType<0>], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// HINT
+
+def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// Data Barrier Instructions
+
+def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">, Intrinsic<[], [llvm_i32_ty]>;
+def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">, Intrinsic<[], [llvm_i32_ty]>;
+def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">, Intrinsic<[], [llvm_i32_ty]>;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Advanced SIMD (NEON)
+
+let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
+  class AdvSIMD_2Scalar_Float_Intrinsic
+    : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+                [IntrNoMem]>;
+
+  class AdvSIMD_FPToIntRounding_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+
+  class AdvSIMD_1IntArg_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+  class AdvSIMD_1FloatArg_Intrinsic
+    : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+  class AdvSIMD_1VectorArg_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+  class AdvSIMD_1VectorArg_Expand_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+  class AdvSIMD_1VectorArg_Long_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
+  class AdvSIMD_1IntArg_Narrow_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
+  class AdvSIMD_1VectorArg_Narrow_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
+  class AdvSIMD_1VectorArg_Int_Across_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+  class AdvSIMD_1VectorArg_Float_Across_Intrinsic
+    : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+  class AdvSIMD_2IntArg_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2FloatArg_Intrinsic
+    : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Compare_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+                [IntrNoMem]>;
+  class AdvSIMD_2Arg_FloatCompare_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Long_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Wide_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMMatchType<0>, LLVMTruncatedType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Narrow_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMExtendedType<0>, LLVMExtendedType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
+    : Intrinsic<[llvm_anyint_ty],
+                [LLVMExtendedType<0>, llvm_i32_ty],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [llvm_anyvector_ty],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMTruncatedType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMTruncatedType<0>, llvm_i32_ty],
+                [IntrNoMem]>;
+  class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
+                [IntrNoMem]>;
+
+  class AdvSIMD_3VectorArg_Intrinsic
+      : Intrinsic<[llvm_anyvector_ty],
+               [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+               [IntrNoMem]>;
+  class AdvSIMD_3VectorArg_Scalar_Intrinsic
+      : Intrinsic<[llvm_anyvector_ty],
+               [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+               [IntrNoMem]>;
+  class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
+      : Intrinsic<[llvm_anyvector_ty],
+               [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
+                LLVMMatchType<1>], [IntrNoMem]>;
+  class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
+                [IntrNoMem]>;
+  class AdvSIMD_CvtFxToFP_Intrinsic
+    : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
+                [IntrNoMem]>;
+  class AdvSIMD_CvtFPToFx_Intrinsic
+    : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
+                [IntrNoMem]>;
+
+  class AdvSIMD_1Arg_Intrinsic
+    : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+}
+
+// Arithmetic ops
+
+let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
+  // Vector Add Across Lanes
+  def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+  // Vector Long Add Across Lanes
+  def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+
+  // Vector Halving Add
+  def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Vector Rounding Halving Add
+  def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Vector Saturating Add
+  def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Add High-Half
+  // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+  // header is no longer supported.
+  def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+  // Vector Rounding Add High-Half
+  def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+  // Vector Saturating Doubling Multiply High
+  def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Saturating Rounding Doubling Multiply High
+  def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Polynominal Multiply
+  def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Vector Long Multiply
+  def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
+  def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
+  def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+
+  // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
+  // it with a v16i8.
+  def int_aarch64_neon_pmull64 :
+        Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
+  // Vector Extending Multiply
+  def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
+    let IntrProperties = [IntrNoMem, Commutative];
+  }
+
+  // Vector Saturating Doubling Long Multiply
+  def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+  def int_aarch64_neon_sqdmulls_scalar
+    : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  // Vector Halving Subtract
+  def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Vector Saturating Subtract
+  def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Subtract High-Half
+  // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+  // header is no longer supported.
+  def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+  // Vector Rounding Subtract High-Half
+  def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+  // Vector Compare Absolute Greater-than-or-equal
+  def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+  // Vector Compare Absolute Greater-than
+  def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+  // Vector Absolute Difference
+  def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Scalar Absolute Difference
+  def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
+
+  // Vector Max
+  def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_fmax : AdvSIMD_2FloatArg_Intrinsic;
+  def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Vector Max Across Lanes
+  def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+  def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+  // Vector Min
+  def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_fmin : AdvSIMD_2FloatArg_Intrinsic;
+  def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Vector Min/Max Number
+  def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
+  def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
+
+  // Vector Min Across Lanes
+  def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+  def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+  def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+  // Pairwise Add
+  def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Long Pairwise Add
+  // FIXME: In theory, we shouldn't need intrinsics for saddlp or
+  // uaddlp, but tblgen's type inference currently can't handle the
+  // pattern fragments this ends up generating.
+  def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+  def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+  // Folding Maximum
+  def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Folding Minimum
+  def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
+  def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
+
+  // Reciprocal Estimate/Step
+  def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
+  def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
+
+  // Reciprocal Exponent
+  def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
+
+  // Vector Saturating Shift Left
+  def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Rounding Shift Left
+  def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Saturating Rounding Shift Left
+  def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Signed->Unsigned Shift Left by Constant
+  def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
+  def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+  // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
+  def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+  // Vector Narrowing Shift Right by Constant
+  def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+  def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+  // Vector Rounding Narrowing Shift Right by Constant
+  def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+  // Vector Rounding Narrowing Saturating Shift Right by Constant
+  def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+  def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+  // Vector Shift Left
+  def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
+  def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
+
+  // Vector Widening Shift Left by Constant
+  def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
+  def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+  def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+
+  // Vector Shift Right by Constant and Insert
+  def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+  // Vector Shift Left by Constant and Insert
+  def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+  // Vector Saturating Narrow
+  def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
+  def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
+  def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+  def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+  // Vector Saturating Extract and Unsigned Narrow
+  def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
+  def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+  // Vector Absolute Value
+  def int_aarch64_neon_abs : AdvSIMD_1Arg_Intrinsic;
+
+  // Vector Saturating Absolute Value
+  def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
+
+  // Vector Saturating Negation
+  def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
+
+  // Vector Count Leading Sign Bits
+  def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
+
+  // Vector Reciprocal Estimate
+  def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
+  def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
+
+  // Vector Square Root Estimate
+  def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
+  def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
+
+  // Vector Bitwise Reverse
+  def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
+
+  // Vector Conversions Between Half-Precision and Single-Precision.
+  def int_aarch64_neon_vcvtfp2hf
+    : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_aarch64_neon_vcvthf2fp
+    : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+
+  // Vector Conversions Between Floating-point and Fixed-point.
+  def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
+  def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
+  def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+  def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+
+  // Vector FP->Int Conversions
+  def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
+  def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
+
+  // Vector FP Rounding: only ties to even is unrepresented by a normal
+  // intrinsic.
+  def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
+
+  // Scalar FP->Int conversions
+
+  // Vector FP Inexact Narrowing
+  def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+  // Scalar FP Inexact Narrowing
+  def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
+                                        [IntrNoMem]>;
+}
+
+let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
+  class AdvSIMD_2Vector2Index_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
+                [IntrNoMem]>;
+}
+
+// Vector element to element moves
+def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
+
+let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
+  class AdvSIMD_1Vec_Load_Intrinsic
+      : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_1Vec_Store_Lane_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
+                [IntrArgMemOnly, NoCapture<2>]>;
+
+  class AdvSIMD_2Vec_Load_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+                [LLVMAnyPointerType<LLVMMatchType<0>>],
+                [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_2Vec_Load_Lane_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+                [LLVMMatchType<0>, LLVMMatchType<0>,
+                 llvm_i64_ty, llvm_anyptr_ty],
+                [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_2Vec_Store_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+                     LLVMAnyPointerType<LLVMMatchType<0>>],
+                [IntrArgMemOnly, NoCapture<2>]>;
+  class AdvSIMD_2Vec_Store_Lane_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+                 llvm_i64_ty, llvm_anyptr_ty],
+                [IntrArgMemOnly, NoCapture<3>]>;
+
+  class AdvSIMD_3Vec_Load_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMAnyPointerType<LLVMMatchType<0>>],
+                [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_3Vec_Load_Lane_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+                 llvm_i64_ty, llvm_anyptr_ty],
+                [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_3Vec_Store_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+                     LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
+                [IntrArgMemOnly, NoCapture<3>]>;
+  class AdvSIMD_3Vec_Store_Lane_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty,
+                 LLVMMatchType<0>, LLVMMatchType<0>,
+                 llvm_i64_ty, llvm_anyptr_ty],
+                [IntrArgMemOnly, NoCapture<4>]>;
+
+  class AdvSIMD_4Vec_Load_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                 LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMAnyPointerType<LLVMMatchType<0>>],
+                [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_4Vec_Load_Lane_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                 LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMMatchType<0>, LLVMMatchType<0>,
+                 LLVMMatchType<0>, LLVMMatchType<0>,
+                 llvm_i64_ty, llvm_anyptr_ty],
+                [IntrReadMem, IntrArgMemOnly]>;
+  class AdvSIMD_4Vec_Store_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+                 LLVMMatchType<0>, LLVMMatchType<0>,
+                 LLVMAnyPointerType<LLVMMatchType<0>>],
+                [IntrArgMemOnly, NoCapture<4>]>;
+  class AdvSIMD_4Vec_Store_Lane_Intrinsic
+    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+                 LLVMMatchType<0>, LLVMMatchType<0>,
+                 llvm_i64_ty, llvm_anyptr_ty],
+                [IntrArgMemOnly, NoCapture<5>]>;
+}
+
+// Memory ops
+
+def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
+def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
+def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
+
+def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_st2  : AdvSIMD_2Vec_Store_Intrinsic;
+def int_aarch64_neon_st3  : AdvSIMD_3Vec_Store_Intrinsic;
+def int_aarch64_neon_st4  : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_aarch64_neon_st2lane  : AdvSIMD_2Vec_Store_Lane_Intrinsic;
+def int_aarch64_neon_st3lane  : AdvSIMD_3Vec_Store_Lane_Intrinsic;
+def int_aarch64_neon_st4lane  : AdvSIMD_4Vec_Store_Lane_Intrinsic;
+
+let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
+  class AdvSIMD_Tbl1_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_Tbl2_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+  class AdvSIMD_Tbl3_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+                 LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_Tbl4_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+                 LLVMMatchType<0>],
+                [IntrNoMem]>;
+
+  class AdvSIMD_Tbx1_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_Tbx2_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+                 LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_Tbx3_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+                 llvm_v16i8_ty, LLVMMatchType<0>],
+                [IntrNoMem]>;
+  class AdvSIMD_Tbx4_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+                 llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
+                [IntrNoMem]>;
+}
+def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
+def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
+def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
+def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
+
+def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
+def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
+def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
+def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
+
+let TargetPrefix = "aarch64" in {
+  class Crypto_AES_DataKey_Intrinsic
+    : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+  class Crypto_AES_Data_Intrinsic
+    : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+  // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+  // (v4i32).
+  class Crypto_SHA_5Hash4Schedule_Intrinsic
+    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+                [IntrNoMem]>;
+
+  // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+  // (v4i32).
+  class Crypto_SHA_1Hash_Intrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+  // SHA intrinsic taking 8 words of the schedule
+  class Crypto_SHA_8Schedule_Intrinsic
+    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+  // SHA intrinsic taking 12 words of the schedule
+  class Crypto_SHA_12Schedule_Intrinsic
+    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                [IntrNoMem]>;
+
+  // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
+  class Crypto_SHA_8Hash4Schedule_Intrinsic
+    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                [IntrNoMem]>;
+}
+
+// AES
+def int_aarch64_crypto_aese   : Crypto_AES_DataKey_Intrinsic;
+def int_aarch64_crypto_aesd   : Crypto_AES_DataKey_Intrinsic;
+def int_aarch64_crypto_aesmc  : Crypto_AES_Data_Intrinsic;
+def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
+
+// SHA1
+def int_aarch64_crypto_sha1c  : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1p  : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1m  : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1h  : Crypto_SHA_1Hash_Intrinsic;
+
+def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
+def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
+
+// SHA256
+def int_aarch64_crypto_sha256h   : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha256h2  : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
+def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
+
+//===----------------------------------------------------------------------===//
+// CRC32
+
+let TargetPrefix = "aarch64" in {
+
+def int_aarch64_crc32b  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32h  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32w  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32x  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+    [IntrNoMem]>;
+def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+    [IntrNoMem]>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsAMDGPU.td b/linux-x64/clang/include/llvm/IR/IntrinsicsAMDGPU.td
new file mode 100644
index 0000000..408ab02
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -0,0 +1,922 @@
+//===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the R600-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+class AMDGPUReadPreloadRegisterIntrinsic
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
+
+class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin<name>;
+
+let TargetPrefix = "r600" in {
+
+multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz {
+  def _x : AMDGPUReadPreloadRegisterIntrinsic;
+  def _y : AMDGPUReadPreloadRegisterIntrinsic;
+  def _z : AMDGPUReadPreloadRegisterIntrinsic;
+}
+
+multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named<string prefix> {
+  def _x : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_x")>;
+  def _y : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_y")>;
+  def _z : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_z")>;
+}
+
+defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
+                                 <"__builtin_r600_read_global_size">;
+defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
+                             <"__builtin_r600_read_ngroups">;
+defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
+                          <"__builtin_r600_read_tgid">;
+
+defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
+defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
+
+def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
+  Intrinsic<[], [], [IntrConvergent]>;
+
+// AS 7 is PARAM_I_ADDRESS, used for kernel arguments
+def int_r600_implicitarg_ptr :
+  GCCBuiltin<"__builtin_r600_implicitarg_ptr">,
+  Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
+  [IntrNoMem, IntrSpeculatable]>;
+
+def int_r600_rat_store_typed :
+  // 1st parameter: Data
+  // 2nd parameter: Index
+  // 3rd parameter: Constant RAT ID
+  Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
+  GCCBuiltin<"__builtin_r600_rat_store_typed">;
+
+def int_r600_recipsqrt_ieee :  Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_r600_recipsqrt_clamped : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_r600_cube : Intrinsic<
+  [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
+>;
+
+} // End TargetPrefix = "r600"
+
+let TargetPrefix = "amdgcn" in {
+
+//===----------------------------------------------------------------------===//
+// ABI Special Intrinsics
+//===----------------------------------------------------------------------===//
+
+defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
+defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
+                               <"__builtin_amdgcn_workgroup_id">;
+
+def int_amdgcn_dispatch_ptr :
+  GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
+  Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_queue_ptr :
+  GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
+  Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_kernarg_segment_ptr :
+  GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
+  Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_implicitarg_ptr :
+  GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
+  Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_groupstaticsize :
+  GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
+  Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_dispatch_id :
+  GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
+  Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_implicit_buffer_ptr :
+  GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
+  Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  [IntrNoMem, IntrSpeculatable]>;
+
+// Set EXEC to the 64-bit value given.
+// This is always moved to the beginning of the basic block.
+def int_amdgcn_init_exec : Intrinsic<[],
+  [llvm_i64_ty],      // 64-bit literal constant
+  [IntrConvergent]>;
+
+// Set EXEC according to a thread count packed in an SGPR input:
+//    thread_count = (input >> bitoffset) & 0x7f;
+// This is always moved to the beginning of the basic block.
+def int_amdgcn_init_exec_from_input : Intrinsic<[],
+  [llvm_i32_ty,       // 32-bit SGPR input
+   llvm_i32_ty],      // bit offset of the thread count
+  [IntrConvergent]>;
+
+
+//===----------------------------------------------------------------------===//
+// Instruction Intrinsics
+//===----------------------------------------------------------------------===//
+
+// The first parameter is s_sendmsg immediate (i16),
+// the second one is copied to m0
+def int_amdgcn_s_sendmsg : GCCBuiltin<"__builtin_amdgcn_s_sendmsg">,
+  Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_amdgcn_s_sendmsghalt : GCCBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
+  Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
+
+def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
+  Intrinsic<[], [], [IntrConvergent]>;
+
+def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
+  Intrinsic<[], [], [IntrConvergent]>;
+
+def int_amdgcn_s_waitcnt : GCCBuiltin<"__builtin_amdgcn_s_waitcnt">,
+  Intrinsic<[], [llvm_i32_ty], []>;
+
+def int_amdgcn_div_scale : Intrinsic<
+  // 1st parameter: Numerator
+  // 2nd parameter: Denominator
+  // 3rd parameter: Constant to select select between first and
+  //                second. (0 = first, 1 = second).
+  [llvm_anyfloat_ty, llvm_i1_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_trig_preop : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_sin : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cos : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_log_clamp : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
+  Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_rcp : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
+  Intrinsic<[llvm_float_ty], [llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_rsq :  Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_rsq_legacy :  GCCBuiltin<"__builtin_amdgcn_rsq_legacy">,
+  Intrinsic<
+  [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_rsq_clamp : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_ldexp : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_frexp_mant : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_frexp_exp : Intrinsic<
+  [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
+>;
+
+// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
+// and always uses rtz, so is not suitable for implementing the OpenCL
+// fract function. It should be ok on VI.
+def int_amdgcn_fract : Intrinsic<
+  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cvt_pkrtz : Intrinsic<
+  [llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cvt_pknorm_i16 : Intrinsic<
+  [llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cvt_pknorm_u16 : Intrinsic<
+  [llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cvt_pk_i16 : Intrinsic<
+  [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cvt_pk_u16 : Intrinsic<
+  [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_class : Intrinsic<
+  [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">,
+  Intrinsic<[llvm_anyfloat_ty],
+    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+    [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
+  Intrinsic<[llvm_float_ty],
+    [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+    [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
+  Intrinsic<[llvm_float_ty],
+  [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
+  Intrinsic<[llvm_float_ty],
+    [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+    [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
+  Intrinsic<[llvm_float_ty],
+    [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+    [IntrNoMem, IntrSpeculatable]
+>;
+
+// v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
+// should be used.
+def int_amdgcn_sffbh :
+  Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+
+// Fields should mirror atomicrmw
+class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty],
+  [llvm_anyptr_ty,
+  LLVMMatchType<0>,
+  llvm_i32_ty, // ordering
+  llvm_i32_ty, // scope
+  llvm_i1_ty], // isVolatile
+  [IntrArgMemOnly, NoCapture<0>], "",
+  [SDNPMemOperand]
+>;
+
+def int_amdgcn_atomic_inc : AMDGPUAtomicIncIntrin;
+def int_amdgcn_atomic_dec : AMDGPUAtomicIncIntrin;
+
+class AMDGPULDSF32Intrin<string clang_builtin> :
+  GCCBuiltin<clang_builtin>,
+  Intrinsic<[llvm_float_ty],
+    [LLVMQualPointerType<llvm_float_ty, 3>,
+    llvm_float_ty,
+    llvm_i32_ty, // ordering
+    llvm_i32_ty, // scope
+    llvm_i1_ty], // isVolatile
+    [IntrArgMemOnly, NoCapture<0>]
+>;
+
+def int_amdgcn_ds_fadd : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fadd">;
+def int_amdgcn_ds_fmin : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fmin">;
+def int_amdgcn_ds_fmax : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fmax">;
+
+class AMDGPUImageLoad<bit NoMem = 0> : Intrinsic <
+  [llvm_anyfloat_ty], // vdata(VGPR)
+  [llvm_anyint_ty,    // vaddr(VGPR)
+   llvm_anyint_ty,    // rsrc(SGPR)
+   llvm_i32_ty,       // dmask(imm)
+   llvm_i1_ty,        // glc(imm)
+   llvm_i1_ty,        // slc(imm)
+   llvm_i1_ty,        // lwe(imm)
+   llvm_i1_ty],       // da(imm)
+  !if(NoMem, [IntrNoMem], [IntrReadMem]), "",
+  !if(NoMem, [], [SDNPMemOperand])>;
+
+def int_amdgcn_image_load : AMDGPUImageLoad;
+def int_amdgcn_image_load_mip : AMDGPUImageLoad;
+def int_amdgcn_image_getresinfo : AMDGPUImageLoad<1>;
+
+class AMDGPUImageStore : Intrinsic <
+  [],
+  [llvm_anyfloat_ty,  // vdata(VGPR)
+   llvm_anyint_ty,    // vaddr(VGPR)
+   llvm_anyint_ty,    // rsrc(SGPR)
+   llvm_i32_ty,       // dmask(imm)
+   llvm_i1_ty,        // glc(imm)
+   llvm_i1_ty,        // slc(imm)
+   llvm_i1_ty,        // lwe(imm)
+   llvm_i1_ty],       // da(imm)
+  [IntrWriteMem], "", [SDNPMemOperand]>;
+
+def int_amdgcn_image_store : AMDGPUImageStore;
+def int_amdgcn_image_store_mip : AMDGPUImageStore;
+
+class AMDGPUImageSample<bit NoMem = 0> : Intrinsic <
+    [llvm_anyfloat_ty], // vdata(VGPR)
+    [llvm_anyfloat_ty,  // vaddr(VGPR)
+     llvm_anyint_ty,    // rsrc(SGPR)
+     llvm_v4i32_ty,     // sampler(SGPR)
+     llvm_i32_ty,       // dmask(imm)
+     llvm_i1_ty,        // unorm(imm)
+     llvm_i1_ty,        // glc(imm)
+     llvm_i1_ty,        // slc(imm)
+     llvm_i1_ty,        // lwe(imm)
+     llvm_i1_ty],       // da(imm)
+     !if(NoMem, [IntrNoMem], [IntrReadMem]), "",
+     !if(NoMem, [], [SDNPMemOperand])>;
+
+// Basic sample
+def int_amdgcn_image_sample : AMDGPUImageSample;
+def int_amdgcn_image_sample_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_d : AMDGPUImageSample;
+def int_amdgcn_image_sample_d_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_l : AMDGPUImageSample;
+def int_amdgcn_image_sample_b : AMDGPUImageSample;
+def int_amdgcn_image_sample_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_lz : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd_cl : AMDGPUImageSample;
+
+// Sample with comparison
+def int_amdgcn_image_sample_c : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_l : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_lz : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd_cl : AMDGPUImageSample;
+
+// Sample with offsets
+def int_amdgcn_image_sample_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_d_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_d_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_l_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_b_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_lz_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd_cl_o : AMDGPUImageSample;
+
+// Sample with comparison and offsets
+def int_amdgcn_image_sample_c_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_l_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_lz_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd_cl_o : AMDGPUImageSample;
+
+// Basic gather4
+def int_amdgcn_image_gather4 : AMDGPUImageSample;
+def int_amdgcn_image_gather4_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_l : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_lz : AMDGPUImageSample;
+
+// Gather4 with comparison
+def int_amdgcn_image_gather4_c : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_l : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_lz : AMDGPUImageSample;
+
+// Gather4 with offsets
+def int_amdgcn_image_gather4_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_l_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_lz_o : AMDGPUImageSample;
+
+// Gather4 with comparison and offsets
+def int_amdgcn_image_gather4_c_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_l_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_lz_o : AMDGPUImageSample;
+
+def int_amdgcn_image_getlod : AMDGPUImageSample<1>;
+
+class AMDGPUImageAtomic : Intrinsic <
+  [llvm_i32_ty],
+  [llvm_i32_ty,       // vdata(VGPR)
+   llvm_anyint_ty,    // vaddr(VGPR)
+   llvm_v8i32_ty,     // rsrc(SGPR)
+   llvm_i1_ty,        // r128(imm)
+   llvm_i1_ty,        // da(imm)
+   llvm_i1_ty],       // slc(imm)
+  [], "", [SDNPMemOperand]>;
+
+def int_amdgcn_image_atomic_swap : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_add : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_sub : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_smin : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_umin : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_smax : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_umax : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_and : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_or : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_xor : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_inc : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_dec : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_cmpswap : Intrinsic <
+  [llvm_i32_ty],
+  [llvm_i32_ty,       // src(VGPR)
+   llvm_i32_ty,       // cmp(VGPR)
+   llvm_anyint_ty,    // vaddr(VGPR)
+   llvm_v8i32_ty,     // rsrc(SGPR)
+   llvm_i1_ty,        // r128(imm)
+   llvm_i1_ty,        // da(imm)
+   llvm_i1_ty],       // slc(imm)
+  [], "", [SDNPMemOperand]>;
+
+class AMDGPUBufferLoad : Intrinsic <
+  [llvm_anyfloat_ty],
+  [llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // vindex(VGPR)
+   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
+   llvm_i1_ty,        // glc(imm)
+   llvm_i1_ty],       // slc(imm)
+  [IntrReadMem], "", [SDNPMemOperand]>;
+def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
+def int_amdgcn_buffer_load : AMDGPUBufferLoad;
+
+class AMDGPUBufferStore : Intrinsic <
+  [],
+  [llvm_anyfloat_ty,  // vdata(VGPR) -- can currently only select f32, v2f32, v4f32
+   llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // vindex(VGPR)
+   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
+   llvm_i1_ty,        // glc(imm)
+   llvm_i1_ty],       // slc(imm)
+  [IntrWriteMem], "", [SDNPMemOperand]>;
+def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
+def int_amdgcn_buffer_store : AMDGPUBufferStore;
+
+def int_amdgcn_tbuffer_load : Intrinsic <
+    [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
+    [llvm_v4i32_ty,   // rsrc(SGPR)
+     llvm_i32_ty,     // vindex(VGPR)
+     llvm_i32_ty,     // voffset(VGPR)
+     llvm_i32_ty,     // soffset(SGPR)
+     llvm_i32_ty,     // offset(imm)
+     llvm_i32_ty,     // dfmt(imm)
+     llvm_i32_ty,     // nfmt(imm)
+     llvm_i1_ty,     // glc(imm)
+     llvm_i1_ty],    // slc(imm)
+    [IntrReadMem], "", [SDNPMemOperand]>;
+
+def int_amdgcn_tbuffer_store : Intrinsic <
+    [],
+    [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
+     llvm_v4i32_ty,  // rsrc(SGPR)
+     llvm_i32_ty,    // vindex(VGPR)
+     llvm_i32_ty,    // voffset(VGPR)
+     llvm_i32_ty,    // soffset(SGPR)
+     llvm_i32_ty,    // offset(imm)
+     llvm_i32_ty,    // dfmt(imm)
+     llvm_i32_ty,    // nfmt(imm)
+     llvm_i1_ty,     // glc(imm)
+     llvm_i1_ty],    // slc(imm)
+    [IntrWriteMem], "", [SDNPMemOperand]>;
+
+class AMDGPUBufferAtomic : Intrinsic <
+  [llvm_i32_ty],
+  [llvm_i32_ty,       // vdata(VGPR)
+   llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // vindex(VGPR)
+   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
+   llvm_i1_ty],       // slc(imm)
+  [], "", [SDNPMemOperand]>;
+def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic;
+def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
+  [llvm_i32_ty],
+  [llvm_i32_ty,       // src(VGPR)
+   llvm_i32_ty,       // cmp(VGPR)
+   llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // vindex(VGPR)
+   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
+   llvm_i1_ty],       // slc(imm)
+  [], "", [SDNPMemOperand]>;
+
+// Uses that do not set the done bit should set IntrWriteMem on the
+// call site.
+def int_amdgcn_exp : Intrinsic <[], [
+  llvm_i32_ty,       // tgt,
+  llvm_i32_ty,       // en
+  llvm_any_ty,       // src0 (f32 or i32)
+  LLVMMatchType<0>,  // src1
+  LLVMMatchType<0>,  // src2
+  LLVMMatchType<0>,  // src3
+  llvm_i1_ty,        // done
+  llvm_i1_ty         // vm
+  ],
+  []
+>;
+
+// exp with compr bit set.
+def int_amdgcn_exp_compr : Intrinsic <[], [
+  llvm_i32_ty,       // tgt,
+  llvm_i32_ty,       // en
+  llvm_anyvector_ty, // src0 (v2f16 or v2i16)
+  LLVMMatchType<0>,  // src1
+  llvm_i1_ty,        // done
+  llvm_i1_ty],       // vm
+  []
+>;
+
+def int_amdgcn_buffer_wbinvl1_sc :
+  GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
+  Intrinsic<[], [], []>;
+
+def int_amdgcn_buffer_wbinvl1 :
+  GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
+  Intrinsic<[], [], []>;
+
+def int_amdgcn_s_dcache_inv :
+  GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
+  Intrinsic<[], [], []>;
+
+def int_amdgcn_s_memtime :
+  GCCBuiltin<"__builtin_amdgcn_s_memtime">,
+  Intrinsic<[llvm_i64_ty], [], [IntrReadMem]>;
+
+def int_amdgcn_s_sleep :
+  GCCBuiltin<"__builtin_amdgcn_s_sleep">,
+  Intrinsic<[], [llvm_i32_ty], []> {
+}
+
+def int_amdgcn_s_incperflevel :
+  GCCBuiltin<"__builtin_amdgcn_s_incperflevel">,
+  Intrinsic<[], [llvm_i32_ty], []> {
+}
+
+def int_amdgcn_s_decperflevel :
+  GCCBuiltin<"__builtin_amdgcn_s_decperflevel">,
+  Intrinsic<[], [llvm_i32_ty], []> {
+}
+
+def int_amdgcn_s_getreg :
+  GCCBuiltin<"__builtin_amdgcn_s_getreg">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+  [IntrReadMem, IntrSpeculatable]
+>;
+
+// int_amdgcn_s_getpc is provided to allow a specific style of position
+// independent code to determine the high part of its address when it is
+// known (through convention) that the code and any data of interest does
+// not cross a 4Gb address boundary. Use for any other purpose may not
+// produce the desired results as optimizations may cause code movement,
+// especially as we explicitly use IntrNoMem to allow optimizations.
+def int_amdgcn_s_getpc :
+  GCCBuiltin<"__builtin_amdgcn_s_getpc">,
+  Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
+
+// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
+// param values: 0 = P10, 1 = P20, 2 = P0
+def int_amdgcn_interp_mov :
+  GCCBuiltin<"__builtin_amdgcn_interp_mov">,
+  Intrinsic<[llvm_float_ty],
+            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, IntrSpeculatable]>;
+
+// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
+// This intrinsic reads from lds, but the memory values are constant,
+// so it behaves like IntrNoMem.
+def int_amdgcn_interp_p1 :
+  GCCBuiltin<"__builtin_amdgcn_interp_p1">,
+  Intrinsic<[llvm_float_ty],
+            [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, IntrSpeculatable]>;
+
+// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
+def int_amdgcn_interp_p2 :
+  GCCBuiltin<"__builtin_amdgcn_interp_p2">,
+  Intrinsic<[llvm_float_ty],
+            [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, IntrSpeculatable]>;
+          // See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
+
+// Pixel shaders only: whether the current pixel is live (i.e. not a helper
+// invocation for derivative computation).
+def int_amdgcn_ps_live : Intrinsic <
+  [llvm_i1_ty],
+  [],
+  [IntrNoMem]>;
+
+def int_amdgcn_mbcnt_lo :
+  GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_mbcnt_hi :
+  GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// llvm.amdgcn.ds.swizzle src offset
+def int_amdgcn_ds_swizzle :
+  GCCBuiltin<"__builtin_amdgcn_ds_swizzle">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty],
+  [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty],
+  [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_lerp :
+  GCCBuiltin<"__builtin_amdgcn_lerp">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_sad_u8 :
+  GCCBuiltin<"__builtin_amdgcn_sad_u8">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_msad_u8 :
+  GCCBuiltin<"__builtin_amdgcn_msad_u8">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_sad_hi_u8 :
+  GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_sad_u16 :
+  GCCBuiltin<"__builtin_amdgcn_sad_u16">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_qsad_pk_u16_u8 :
+  GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_mqsad_pk_u16_u8 :
+  GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_mqsad_u32_u8 :
+  GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_cvt_pk_u8_f32 :
+  GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
+  Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_icmp :
+  Intrinsic<[llvm_i64_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty],
+            [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_fcmp :
+  Intrinsic<[llvm_i64_ty], [llvm_anyfloat_ty, LLVMMatchType<0>, llvm_i32_ty],
+            [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_readfirstlane :
+  GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
+// The lane argument must be uniform across the currently active threads of the
+// current wave. Otherwise, the result is undefined.
+def int_amdgcn_readlane :
+  GCCBuiltin<"__builtin_amdgcn_readlane">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
+// The value to write and lane select arguments must be uniform across the
+// currently active threads of the current wave. Otherwise, the result is
+// undefined.
+def int_amdgcn_writelane :
+  GCCBuiltin<"__builtin_amdgcn_writelane">,
+  Intrinsic<[llvm_i32_ty], [
+    llvm_i32_ty,    // uniform value to write: returned by the selected lane
+    llvm_i32_ty,    // uniform lane select
+    llvm_i32_ty     // returned by all lanes other than the selected one
+  ],
+  [IntrNoMem, IntrConvergent]
+>;
+
+def int_amdgcn_alignbit : Intrinsic<[llvm_i32_ty],
+  [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+def int_amdgcn_alignbyte : Intrinsic<[llvm_i32_ty],
+  [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+
+
+// Copies the source value to the destination value, with the guarantee that
+// the source value is computed as if the entire program were executed in WQM.
+def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
+  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+// Return true if at least one thread within the pixel quad passes true into
+// the function.
+def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
+  [llvm_i1_ty], [IntrNoMem, IntrConvergent]
+>;
+
+// If false, set EXEC=0 for the current thread until the end of program.
+def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>;
+
+// Copies the active channels of the source value to the destination value,
+// with the guarantee that the source value is computed as if the entire
+// program were executed in Whole Wavefront Mode, i.e. with all channels
+// enabled, with a few exceptions: - Phi nodes with require WWM return an
+// undefined value.
+def int_amdgcn_wwm : Intrinsic<[llvm_any_ty],
+  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
+// Given a value, copies it while setting all the inactive lanes to a given
+// value. Note that OpenGL helper lanes are considered active, so if the
+// program ever uses WQM, then the instruction and the first source will be
+// computed in WQM.
+def int_amdgcn_set_inactive :
+  Intrinsic<[llvm_anyint_ty],
+            [LLVMMatchType<0>, // value to be copied
+             LLVMMatchType<0>], // value for the inactive lanes to take
+            [IntrNoMem, IntrConvergent]>;
+
+//===----------------------------------------------------------------------===//
+// CI+ Intrinsics
+//===----------------------------------------------------------------------===//
+
+def int_amdgcn_s_dcache_inv_vol :
+  GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
+  Intrinsic<[], [], []>;
+
+def int_amdgcn_buffer_wbinvl1_vol :
+  GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
+  Intrinsic<[], [], []>;
+
+//===----------------------------------------------------------------------===//
+// VI Intrinsics
+//===----------------------------------------------------------------------===//
+
+// llvm.amdgcn.mov.dpp.i32 <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
+def int_amdgcn_mov_dpp :
+  Intrinsic<[llvm_anyint_ty],
+            [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+             llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
+
+// llvm.amdgcn.update.dpp.i32 <old> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
+// Should be equivalent to:
+// v_mov_b32 <dest> <old>
+// v_mov_b32 <dest> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
+def int_amdgcn_update_dpp :
+  Intrinsic<[llvm_anyint_ty],
+            [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty,
+             llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_s_dcache_wb :
+  GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
+  Intrinsic<[], [], []>;
+
+def int_amdgcn_s_dcache_wb_vol :
+  GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
+  Intrinsic<[], [], []>;
+
+def int_amdgcn_s_memrealtime :
+  GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
+  Intrinsic<[llvm_i64_ty], [], [IntrReadMem]>;
+
+// llvm.amdgcn.ds.permute <index> <src>
+def int_amdgcn_ds_permute :
+  GCCBuiltin<"__builtin_amdgcn_ds_permute">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
+// llvm.amdgcn.ds.bpermute <index> <src>
+def int_amdgcn_ds_bpermute :
+  GCCBuiltin<"__builtin_amdgcn_ds_bpermute">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
+
+//===----------------------------------------------------------------------===//
+// Special Intrinsics for backend internal use only. No frontend
+// should emit calls to these.
+// ===----------------------------------------------------------------------===//
+def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
+  [llvm_i1_ty], [IntrConvergent]
+>;
+
+def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
+  [llvm_i64_ty], [IntrConvergent]
+>;
+
+def int_amdgcn_break : Intrinsic<[llvm_i64_ty],
+  [llvm_i64_ty], [IntrNoMem, IntrConvergent]
+>;
+
+def int_amdgcn_if_break : Intrinsic<[llvm_i64_ty],
+  [llvm_i1_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
+>;
+
+def int_amdgcn_else_break : Intrinsic<[llvm_i64_ty],
+  [llvm_i64_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
+>;
+
+def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
+  [llvm_i64_ty], [IntrConvergent]
+>;
+
+def int_amdgcn_end_cf : Intrinsic<[], [llvm_i64_ty], [IntrConvergent]>;
+
+// Represent unreachable in a divergent region.
+def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
+
+// Emit 2.5 ulp, no denormal division. Should only be inserted by
+// pass based on !fpmath metadata.
+def int_amdgcn_fdiv_fast : Intrinsic<
+  [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsARM.td b/linux-x64/clang/include/llvm/IR/IntrinsicsARM.td
new file mode 100644
index 0000000..fe38613
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsARM.td
@@ -0,0 +1,716 @@
+//===- IntrinsicsARM.td - Defines ARM intrinsics -----------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the ARM-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// TLS
+
+let TargetPrefix = "arm" in {  // All intrinsics start with "llvm.arm.".
+
+// A space-consuming intrinsic primarily for testing ARMConstantIslands. The
+// first argument is the number of bytes this "instruction" takes up, the second
+// and return value are essentially chains, used to force ordering during ISel.
+def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// 16-bit multiplications
+def int_arm_smulbb : GCCBuiltin<"__builtin_arm_smulbb">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulbt : GCCBuiltin<"__builtin_arm_smulbt">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smultb : GCCBuiltin<"__builtin_arm_smultb">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smultt : GCCBuiltin<"__builtin_arm_smultt">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulwb : GCCBuiltin<"__builtin_arm_smulwb">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulwt : GCCBuiltin<"__builtin_arm_smulwt">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Saturating Arithmetic
+
+def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [Commutative, IntrNoMem]>;
+def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Accumulating multiplications
+def int_arm_smlabb : GCCBuiltin<"__builtin_arm_smlabb">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_smlabt : GCCBuiltin<"__builtin_arm_smlabt">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_smlatb : GCCBuiltin<"__builtin_arm_smlatb">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_smlatt : GCCBuiltin<"__builtin_arm_smlatt">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_smlawb : GCCBuiltin<"__builtin_arm_smlawb">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_smlawt : GCCBuiltin<"__builtin_arm_smlawt">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+
+// Parallel 16-bit saturation
+def int_arm_ssat16 : GCCBuiltin<"__builtin_arm_ssat16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat16 : GCCBuiltin<"__builtin_arm_usat16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Packing and unpacking
+def int_arm_sxtab16 : GCCBuiltin<"__builtin_arm_sxtab16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_sxtb16 : GCCBuiltin<"__builtin_arm_sxtb16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uxtab16 : GCCBuiltin<"__builtin_arm_uxtab16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uxtb16 : GCCBuiltin<"__builtin_arm_uxtb16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// Parallel selection, reads the GE flags.
+def int_arm_sel : GCCBuiltin<"__builtin_arm_sel">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
+
+// Parallel 8-bit addition and subtraction
+def int_arm_qadd8  : GCCBuiltin<"__builtin_arm_qadd8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsub8  : GCCBuiltin<"__builtin_arm_qsub8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_sadd8  : GCCBuiltin<"__builtin_arm_sadd8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_shadd8  : GCCBuiltin<"__builtin_arm_shadd8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsub8  : GCCBuiltin<"__builtin_arm_shsub8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_ssub8  : GCCBuiltin<"__builtin_arm_ssub8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uadd8  : GCCBuiltin<"__builtin_arm_uadd8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_uhadd8  : GCCBuiltin<"__builtin_arm_uhadd8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsub8  : GCCBuiltin<"__builtin_arm_uhsub8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqadd8  : GCCBuiltin<"__builtin_arm_uqadd8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsub8  : GCCBuiltin<"__builtin_arm_uqsub8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_usub8  : GCCBuiltin<"__builtin_arm_usub8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// Sum of 8-bit absolute differences
+def int_arm_usad8  : GCCBuiltin<"__builtin_arm_usad8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usada8  : GCCBuiltin<"__builtin_arm_usada8">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+
+// Parallel 16-bit addition and subtraction
+def int_arm_qadd16  : GCCBuiltin<"__builtin_arm_qadd16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qasx  : GCCBuiltin<"__builtin_arm_qasx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsax  : GCCBuiltin<"__builtin_arm_qsax">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsub16  : GCCBuiltin<"__builtin_arm_qsub16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_sadd16  : GCCBuiltin<"__builtin_arm_sadd16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_sasx  : GCCBuiltin<"__builtin_arm_sasx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_shadd16  : GCCBuiltin<"__builtin_arm_shadd16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shasx  : GCCBuiltin<"__builtin_arm_shasx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsax  : GCCBuiltin<"__builtin_arm_shsax">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsub16  : GCCBuiltin<"__builtin_arm_shsub16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_ssax  : GCCBuiltin<"__builtin_arm_ssax">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_ssub16  : GCCBuiltin<"__builtin_arm_ssub16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uadd16  : GCCBuiltin<"__builtin_arm_uadd16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uasx  : GCCBuiltin<"__builtin_arm_uasx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_uhadd16  : GCCBuiltin<"__builtin_arm_uhadd16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhasx  : GCCBuiltin<"__builtin_arm_uhasx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsax  : GCCBuiltin<"__builtin_arm_uhsax">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsub16  : GCCBuiltin<"__builtin_arm_uhsub16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqadd16  : GCCBuiltin<"__builtin_arm_uqadd16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqasx  : GCCBuiltin<"__builtin_arm_uqasx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsax  : GCCBuiltin<"__builtin_arm_uqsax">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsub16  : GCCBuiltin<"__builtin_arm_uqsub16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_usax  : GCCBuiltin<"__builtin_arm_usax">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_usub16  : GCCBuiltin<"__builtin_arm_usub16">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// Parallel 16-bit multiplication
+def int_arm_smlad : GCCBuiltin<"__builtin_arm_smlad">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+def int_arm_smladx : GCCBuiltin<"__builtin_arm_smladx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+def int_arm_smlald : GCCBuiltin<"__builtin_arm_smlald">,
+    Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+              [IntrNoMem]>;
+def int_arm_smlaldx : GCCBuiltin<"__builtin_arm_smlaldx">,
+    Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+              [IntrNoMem]>;
+def int_arm_smlsd : GCCBuiltin<"__builtin_arm_smlsd">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+def int_arm_smlsdx : GCCBuiltin<"__builtin_arm_smlsdx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+def int_arm_smlsld : GCCBuiltin<"__builtin_arm_smlsld">,
+    Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+              [IntrNoMem]>;
+def int_arm_smlsldx : GCCBuiltin<"__builtin_arm_smlsldx">,
+    Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+              [IntrNoMem]>;
+def int_arm_smuad : GCCBuiltin<"__builtin_arm_smuad">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smuadx : GCCBuiltin<"__builtin_arm_smuadx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smusd : GCCBuiltin<"__builtin_arm_smusd">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smusdx : GCCBuiltin<"__builtin_arm_smusdx">,
+    Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+
+//===----------------------------------------------------------------------===//
+// Load, Store and Clear exclusive
+
+def int_arm_ldrex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
+def int_arm_strex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;
+
+def int_arm_ldaex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
+def int_arm_stlex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;
+
+def int_arm_clrex : Intrinsic<[]>;
+
+def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+    llvm_ptr_ty]>;
+def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;
+
+def int_arm_stlexd : Intrinsic<[llvm_i32_ty],
+                               [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty]>;
+def int_arm_ldaexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;
+
+//===----------------------------------------------------------------------===//
+// Data barrier instructions
+def int_arm_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">,
+                  Intrinsic<[], [llvm_i32_ty]>;
+def int_arm_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">,
+                  Intrinsic<[], [llvm_i32_ty]>;
+def int_arm_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">,
+                  Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// VFP
+
+def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
+                       Intrinsic<[llvm_i32_ty], [], []>;
+def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
+                       Intrinsic<[], [llvm_i32_ty], []>;
+def int_arm_vcvtr     : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+                                  [IntrNoMem]>;
+def int_arm_vcvtru    : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+                                  [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Coprocessor
+
+def int_arm_ldc : GCCBuiltin<"__builtin_arm_ldc">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+def int_arm_ldcl : GCCBuiltin<"__builtin_arm_ldcl">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+def int_arm_ldc2 : GCCBuiltin<"__builtin_arm_ldc2">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+def int_arm_ldc2l : GCCBuiltin<"__builtin_arm_ldc2l">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+
+def int_arm_stc : GCCBuiltin<"__builtin_arm_stc">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+def int_arm_stcl : GCCBuiltin<"__builtin_arm_stcl">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+def int_arm_stc2 : GCCBuiltin<"__builtin_arm_stc2">, 
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+def int_arm_stc2l : GCCBuiltin<"__builtin_arm_stc2l">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
+
+// Move to coprocessor
+def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from coprocessor
+def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
+                  MSBuiltin<"_MoveFromCoprocessor">,
+   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                             llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
+                   MSBuiltin<"_MoveFromCoprocessor2">,
+   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                             llvm_i32_ty, llvm_i32_ty], []>;
+
+// Coprocessor data processing
+def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
+   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from two registers to coprocessor
+def int_arm_mcrr : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                                  llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcrr2 : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                                   llvm_i32_ty, llvm_i32_ty], []>;
+
+def int_arm_mrrc : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty,
+                              llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mrrc2 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty,
+                               llvm_i32_ty, llvm_i32_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// CRC32
+
+def int_arm_crc32b  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_crc32h  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_crc32w  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+def int_arm_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+    [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// HINT
+
+def int_arm_hint : Intrinsic<[], [llvm_i32_ty]>;
+def int_arm_dbg : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// UND (reserved undefined sequence)
+
+def int_arm_undefined : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// Advanced SIMD (NEON)
+
+// The following classes do not correspond directly to GCC builtins.
+class Neon_1Arg_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+class Neon_1Arg_Narrow_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
+class Neon_2Arg_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+              [IntrNoMem]>;
+class Neon_2Arg_Narrow_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>, LLVMExtendedType<0>],
+              [IntrNoMem]>;
+class Neon_2Arg_Long_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+              [IntrNoMem]>;
+class Neon_3Arg_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty],
+              [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+              [IntrNoMem]>;
+class Neon_3Arg_Long_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty],
+              [LLVMMatchType<0>, LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+              [IntrNoMem]>;
+class Neon_CvtFxToFP_Intrinsic
+  : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
+class Neon_CvtFPToFx_Intrinsic
+  : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+class Neon_CvtFPtoInt_1Arg_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+class Neon_Compare_Intrinsic
+  : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+              [IntrNoMem]>;
+
+// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
+// Besides the table, VTBL has one other v8i8 argument and VTBX has two.
+// Overall, the classes range from 2 to 6 v8i8 arguments.
+class Neon_Tbl2Arg_Intrinsic
+  : Intrinsic<[llvm_v8i8_ty],
+              [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl3Arg_Intrinsic
+  : Intrinsic<[llvm_v8i8_ty],
+              [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl4Arg_Intrinsic
+  : Intrinsic<[llvm_v8i8_ty],
+              [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
+              [IntrNoMem]>;
+class Neon_Tbl5Arg_Intrinsic
+  : Intrinsic<[llvm_v8i8_ty],
+              [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+               llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl6Arg_Intrinsic
+  : Intrinsic<[llvm_v8i8_ty],
+              [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+               llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+
+// Arithmetic ops
+
+let IntrProperties = [IntrNoMem, Commutative] in {
+
+  // Vector Add.
+  def int_arm_neon_vhadds : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vhaddu : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vrhadds : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vrhaddu : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vqadds : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vqaddu : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic;
+
+  // Vector Multiply.
+  def int_arm_neon_vmulp : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vqdmulh : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vqrdmulh : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vmulls : Neon_2Arg_Long_Intrinsic;
+  def int_arm_neon_vmullu : Neon_2Arg_Long_Intrinsic;
+  def int_arm_neon_vmullp : Neon_2Arg_Long_Intrinsic;
+  def int_arm_neon_vqdmull : Neon_2Arg_Long_Intrinsic;
+
+  // Vector Maximum.
+  def int_arm_neon_vmaxs : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vmaxu : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vmaxnm : Neon_2Arg_Intrinsic;
+
+  // Vector Minimum.
+  def int_arm_neon_vmins : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vminu : Neon_2Arg_Intrinsic;
+  def int_arm_neon_vminnm : Neon_2Arg_Intrinsic;
+
+  // Vector Reciprocal Step.
+  def int_arm_neon_vrecps : Neon_2Arg_Intrinsic;
+
+  // Vector Reciprocal Square Root Step.
+  def int_arm_neon_vrsqrts : Neon_2Arg_Intrinsic;
+}
+
+// Vector Subtract.
+def int_arm_neon_vhsubs : Neon_2Arg_Intrinsic;
+def int_arm_neon_vhsubu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqsubs : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Absolute Compare.
+def int_arm_neon_vacge : Neon_Compare_Intrinsic;
+def int_arm_neon_vacgt : Neon_Compare_Intrinsic;
+
+// Vector Absolute Differences.
+def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
+def int_arm_neon_vabdu : Neon_2Arg_Intrinsic;
+
+// Vector Pairwise Add.
+def int_arm_neon_vpadd : Neon_2Arg_Intrinsic;
+
+// Vector Pairwise Add Long.
+// Note: This is different than the other "long" NEON intrinsics because
+// the result vector has half as many elements as the source vector.
+// The source and destination vector types must be specified separately.
+def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+                                     [IntrNoMem]>;
+def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+                                     [IntrNoMem]>;
+
+// Vector Pairwise Add and Accumulate Long.
+// Note: This is similar to vpaddl but the destination vector also appears
+// as the first argument.
+def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty],
+                                     [LLVMMatchType<0>, llvm_anyvector_ty],
+                                     [IntrNoMem]>;
+def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty],
+                                     [LLVMMatchType<0>, llvm_anyvector_ty],
+                                     [IntrNoMem]>;
+
+// Vector Pairwise Maximum and Minimum.
+def int_arm_neon_vpmaxs : Neon_2Arg_Intrinsic;
+def int_arm_neon_vpmaxu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vpmins : Neon_2Arg_Intrinsic;
+def int_arm_neon_vpminu : Neon_2Arg_Intrinsic;
+
+// Vector Shifts:
+//
+// The various saturating and rounding vector shift operations need to be
+// represented by intrinsics in LLVM, and even the basic VSHL variable shift
+// operation cannot be safely translated to LLVM's shift operators.  VSHL can
+// be used for both left and right shifts, or even combinations of the two,
+// depending on the signs of the shift amounts.  It also has well-defined
+// behavior for shift amounts that LLVM leaves undefined.  Only basic shifts
+// by constants can be represented with LLVM's shift operators.
+//
+// The shift counts for these intrinsics are always vectors, even for constant
+// shifts, where the constant is replicated.  For consistency with VSHL (and
+// other variable shift instructions), left shifts have positive shift counts
+// and right shifts have negative shift counts.  This convention is also used
+// for constant right shift intrinsics, and to help preserve sanity, the
+// intrinsic names use "shift" instead of either "shl" or "shr".  Where
+// applicable, signed and unsigned versions of the intrinsics are
+// distinguished with "s" and "u" suffixes.  A few NEON shift instructions,
+// such as VQSHLU, take signed operands but produce unsigned results; these
+// use a "su" suffix.
+
+// Vector Shift.
+def int_arm_neon_vshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vshiftu : Neon_2Arg_Intrinsic;
+
+// Vector Rounding Shift.
+def int_arm_neon_vrshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vrshiftu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vrshiftn : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Saturating Shift.
+def int_arm_neon_vqshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqshiftu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqshiftsu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqshiftns : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqshiftnu : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqshiftnsu : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Saturating Rounding Shift.
+def int_arm_neon_vqrshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqrshiftu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqrshiftns : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqrshiftnu : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqrshiftnsu : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Shift and Insert.
+def int_arm_neon_vshiftins : Neon_3Arg_Intrinsic;
+
+// Vector Absolute Value and Saturating Absolute Value.
+def int_arm_neon_vabs : Neon_1Arg_Intrinsic;
+def int_arm_neon_vqabs : Neon_1Arg_Intrinsic;
+
+// Vector Saturating Negate.
+def int_arm_neon_vqneg : Neon_1Arg_Intrinsic;
+
+// Vector Count Leading Sign/Zero Bits.
+def int_arm_neon_vcls : Neon_1Arg_Intrinsic;
+
+// Vector Reciprocal Estimate.
+def int_arm_neon_vrecpe : Neon_1Arg_Intrinsic;
+
+// Vector Reciprocal Square Root Estimate.
+def int_arm_neon_vrsqrte : Neon_1Arg_Intrinsic;
+
+// Vector Conversions Between Floating-point and Integer
+def int_arm_neon_vcvtau : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtas : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtnu : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtns : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtpu : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtps : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtmu : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtms : Neon_CvtFPtoInt_1Arg_Intrinsic;
+
+// Vector Conversions Between Floating-point and Fixed-point.
+def int_arm_neon_vcvtfp2fxs : Neon_CvtFPToFx_Intrinsic;
+def int_arm_neon_vcvtfp2fxu : Neon_CvtFPToFx_Intrinsic;
+def int_arm_neon_vcvtfxs2fp : Neon_CvtFxToFP_Intrinsic;
+def int_arm_neon_vcvtfxu2fp : Neon_CvtFxToFP_Intrinsic;
+
+// Vector Conversions Between Half-Precision and Single-Precision.
+def int_arm_neon_vcvtfp2hf
+    : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_arm_neon_vcvthf2fp
+    : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+
+// Narrowing Saturating Vector Moves.
+def int_arm_neon_vqmovns : Neon_1Arg_Narrow_Intrinsic;
+def int_arm_neon_vqmovnu : Neon_1Arg_Narrow_Intrinsic;
+def int_arm_neon_vqmovnsu : Neon_1Arg_Narrow_Intrinsic;
+
+// Vector Table Lookup.
+// The first 1-4 arguments are the table.
+def int_arm_neon_vtbl1 : Neon_Tbl2Arg_Intrinsic;
+def int_arm_neon_vtbl2 : Neon_Tbl3Arg_Intrinsic;
+def int_arm_neon_vtbl3 : Neon_Tbl4Arg_Intrinsic;
+def int_arm_neon_vtbl4 : Neon_Tbl5Arg_Intrinsic;
+
+// Vector Table Extension.
+// Some elements of the destination vector may not be updated, so the original
+// value of that vector is passed as the first argument.  The next 1-4
+// arguments after that are the table.
+def int_arm_neon_vtbx1 : Neon_Tbl3Arg_Intrinsic;
+def int_arm_neon_vtbx2 : Neon_Tbl4Arg_Intrinsic;
+def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic;
+def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;
+
+// Vector Rounding
+def int_arm_neon_vrintn : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintx : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrinta : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintz : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintm : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintp : Neon_1Arg_Intrinsic;
+
+// De-interleaving vector loads from N-element structures.
+// Source operands are the address and alignment.
+def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
+                                  [llvm_anyptr_ty, llvm_i32_ty],
+                                  [IntrReadMem, IntrArgMemOnly]>;
+def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+                                  [llvm_anyptr_ty, llvm_i32_ty],
+                                  [IntrReadMem, IntrArgMemOnly]>;
+def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                                   LLVMMatchType<0>],
+                                  [llvm_anyptr_ty, llvm_i32_ty],
+                                  [IntrReadMem, IntrArgMemOnly]>;
+def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                                   LLVMMatchType<0>, LLVMMatchType<0>],
+                                  [llvm_anyptr_ty, llvm_i32_ty],
+                                  [IntrReadMem, IntrArgMemOnly]>;
+
+// Vector load N-element structure to one lane.
+// Source operands are: the address, the N input vectors (since only one
+// lane is assigned), the lane number, and the alignment.
+def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+                                      [llvm_anyptr_ty, LLVMMatchType<0>,
+                                       LLVMMatchType<0>, llvm_i32_ty,
+                                       llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                                       LLVMMatchType<0>],
+                                      [llvm_anyptr_ty, LLVMMatchType<0>,
+                                       LLVMMatchType<0>, LLVMMatchType<0>,
+                                       llvm_i32_ty, llvm_i32_ty],
+                                      [IntrReadMem, IntrArgMemOnly]>;
+def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                                       LLVMMatchType<0>, LLVMMatchType<0>],
+                                      [llvm_anyptr_ty, LLVMMatchType<0>,
+                                       LLVMMatchType<0>, LLVMMatchType<0>,
+                                       LLVMMatchType<0>, llvm_i32_ty,
+                                       llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+// Interleaving vector stores from N-element structures.
+// Source operands are: the address, the N vectors, and the alignment.
+def int_arm_neon_vst1 : Intrinsic<[],
+                                  [llvm_anyptr_ty, llvm_anyvector_ty,
+                                   llvm_i32_ty], [IntrArgMemOnly]>;
+def int_arm_neon_vst2 : Intrinsic<[],
+                                  [llvm_anyptr_ty, llvm_anyvector_ty,
+                                   LLVMMatchType<1>, llvm_i32_ty],
+                                  [IntrArgMemOnly]>;
+def int_arm_neon_vst3 : Intrinsic<[],
+                                  [llvm_anyptr_ty, llvm_anyvector_ty,
+                                   LLVMMatchType<1>, LLVMMatchType<1>,
+                                   llvm_i32_ty], [IntrArgMemOnly]>;
+def int_arm_neon_vst4 : Intrinsic<[],
+                                  [llvm_anyptr_ty, llvm_anyvector_ty,
+                                   LLVMMatchType<1>, LLVMMatchType<1>,
+                                   LLVMMatchType<1>, llvm_i32_ty],
+                                  [IntrArgMemOnly]>;
+
+// Vector store N-element structure from one lane.
+// Source operands are: the address, the N vectors, the lane number, and
+// the alignment.
+def int_arm_neon_vst2lane : Intrinsic<[],
+                                      [llvm_anyptr_ty, llvm_anyvector_ty,
+                                       LLVMMatchType<1>, llvm_i32_ty,
+                                       llvm_i32_ty], [IntrArgMemOnly]>;
+def int_arm_neon_vst3lane : Intrinsic<[],
+                                      [llvm_anyptr_ty, llvm_anyvector_ty,
+                                       LLVMMatchType<1>, LLVMMatchType<1>,
+                                       llvm_i32_ty, llvm_i32_ty],
+                                      [IntrArgMemOnly]>;
+def int_arm_neon_vst4lane : Intrinsic<[],
+                                      [llvm_anyptr_ty, llvm_anyvector_ty,
+                                       LLVMMatchType<1>, LLVMMatchType<1>,
+                                       LLVMMatchType<1>, llvm_i32_ty,
+                                       llvm_i32_ty], [IntrArgMemOnly]>;
+
+// Vector bitwise select.
+def int_arm_neon_vbsl : Intrinsic<[llvm_anyvector_ty],
+                        [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+                        [IntrNoMem]>;
+
+
+// Crypto instructions
+class AES_1Arg_Intrinsic : Intrinsic<[llvm_v16i8_ty],
+                                     [llvm_v16i8_ty], [IntrNoMem]>;
+class AES_2Arg_Intrinsic : Intrinsic<[llvm_v16i8_ty],
+                                     [llvm_v16i8_ty, llvm_v16i8_ty],
+                                     [IntrNoMem]>;
+
+class SHA_1Arg_Intrinsic : Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+                                     [IntrNoMem]>;
+class SHA_2Arg_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+                                     [llvm_v4i32_ty, llvm_v4i32_ty],
+                                     [IntrNoMem]>;
+class SHA_3Arg_i32_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+                                   [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+                                   [IntrNoMem]>;
+class SHA_3Arg_v4i32_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+                                   [llvm_v4i32_ty, llvm_v4i32_ty,llvm_v4i32_ty],
+                                   [IntrNoMem]>;
+
+def int_arm_neon_aesd : AES_2Arg_Intrinsic;
+def int_arm_neon_aese : AES_2Arg_Intrinsic;
+def int_arm_neon_aesimc : AES_1Arg_Intrinsic;
+def int_arm_neon_aesmc : AES_1Arg_Intrinsic;
+def int_arm_neon_sha1h : SHA_1Arg_Intrinsic;
+def int_arm_neon_sha1su1 : SHA_2Arg_Intrinsic;
+def int_arm_neon_sha256su0 : SHA_2Arg_Intrinsic;
+def int_arm_neon_sha1c : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1m : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1p : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1su0: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256h: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256h2: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256su1: SHA_3Arg_v4i32_Intrinsic;
+
+} // end TargetPrefix
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsBPF.td b/linux-x64/clang/include/llvm/IR/IntrinsicsBPF.td
new file mode 100644
index 0000000..94eca8e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsBPF.td
@@ -0,0 +1,24 @@
+//===- IntrinsicsBPF.td - Defines BPF intrinsics -----------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the BPF-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+// Specialized loads from packet
+let TargetPrefix = "bpf" in {  // All intrinsics start with "llvm.bpf."
+  def int_bpf_load_byte : GCCBuiltin<"__builtin_bpf_load_byte">,
+              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
+  def int_bpf_load_half : GCCBuiltin<"__builtin_bpf_load_half">,
+              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
+  def int_bpf_load_word : GCCBuiltin<"__builtin_bpf_load_word">,
+              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
+  def int_bpf_pseudo : GCCBuiltin<"__builtin_bpf_pseudo">,
+              Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsHexagon.td b/linux-x64/clang/include/llvm/IR/IntrinsicsHexagon.td
new file mode 100644
index 0000000..25f4215
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsHexagon.td
@@ -0,0 +1,10975 @@
+//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the Hexagon-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all Hexagon intrinsics.
+//
+// All Hexagon intrinsics start with "llvm.hexagon.".
+let TargetPrefix = "hexagon" in {
+  /// Hexagon_Intrinsic - Base class for all Hexagon intrinsics.
+  class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+                              list<LLVMType> param_types,
+                              list<IntrinsicProperty> properties>
+    : GCCBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
+      Intrinsic<ret_types, param_types, properties>;
+
+  /// Hexagon_NonGCC_Intrinsic - Base class for bitcode convertible Hexagon
+  /// intrinsics.
+  class Hexagon_NonGCC_Intrinsic<list<LLVMType> ret_types,
+                                 list<LLVMType> param_types,
+                                 list<IntrinsicProperty> properties>
+    : Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_MEM,BT_BOOL,BT_PTR) ->
+// Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_ptr_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
+// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i16_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_SI,BT_INT,BT_INT) ->
+// Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_SI,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_DI,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_DI,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_QI,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_SI,BT_BOOL,BT_INT) ->
+// Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_QI,BT_LONGLONG,BT_BOOL) ->
+// Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_QI,BT_INT,BT_BOOL) ->
+// Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SISI,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(void_ftype_SISI,BT_VOID,BT_INT,BT_INT) ->
+// Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SISI,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(USI_ftype_SISI,BT_UINT,BT_INT,BT_INT) ->
+// Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SISI,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_SISI,BT_ULONGLONG,BT_INT,BT_INT) ->
+// Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SIDI,BT_LONGLONG,BT_INT,BT_LONGLONG) ->
+// Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DISI,BT_LONGLONG,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SIDI,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DIDI,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_DIDI,BT_ULONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DISI,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DIDI,BT_BOOL,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SIDI,BT_BOOL,BT_INT,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DISI,BT_BOOL,BT_LONGLONG,BT_INT) ->
+// Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i1_ty, llvm_i1_ty, llvm_i1_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QIQI,BT_INT,BT_BOOL,BT_BOOL) ->
+// Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QISI,BT_INT,BT_BOOL,BT_INT) ->
+// Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i1_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(void_ftype_SISISI,BT_VOID,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISISI,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_SISISI,BT_LONGLONG,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_DISISI,BT_INT,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DISISI,BT_LONGLONG,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDISI,BT_INT,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDISI,BT_LONGLONG,BT_LONGLONG,
+//                     BT_LONGLONG,BT_INT) ->
+// Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDIDI,BT_INT,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+//                     BT_LONGLONG) ->
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISIDI,BT_INT,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_QISISI,BT_INT,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QISISI,BT_LONGLONG,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i1_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QIDIDI,BT_LONGLONG,BT_BOOL,BT_LONGLONG,
+//                     BT_LONGLONG) ->
+// Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIQI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+//                     BT_BOOL) ->
+// Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(SI_ftype_SISISISI,BT_INT,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(DI_ftype_DIDISISI,BT_LONGLONG,BT_LONGLONG,
+//                     BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+
+class Hexagon_mem_memmemsi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
+                           llvm_i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_mem_memsisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_mem_memdisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_mem_memmemsisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_mem_memsisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_mem_memdisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_v256_v256v256_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+                          [IntrArgMemOnly]>;
+
+//
+// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_i32_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_double_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_float_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_float_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_float_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_float_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_si_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_df_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_double_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_si_sfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sfsf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_float_ty, llvm_float_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_si_sfsi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sfsi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_float_ty, llvm_i32_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_float_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
+                                            llvm_float_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
+                                            llvm_float_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_df_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_i32_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_df_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_di_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_di_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_df_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_double_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_df_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_df_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_double_ty],
+                          [IntrNoMem]>;
+//
+// Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_si_dfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_dfdf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_double_ty, llvm_double_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_si_dfsi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_dfsi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_double_ty, llvm_i32_ty],
+                          [IntrNoMem, Throws]>;
+//
+//
+// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
+                                             llvm_double_ty],
+                          [IntrNoMem, Throws]>;
+//
+// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdfdfqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
+                                             llvm_double_ty,
+                          llvm_i32_ty],
+                          [IntrNoMem, Throws]>;
+
+
+// This one below will not be auto-generated,
+// so make sure, you don't overwrite this one.
+//
+// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldd :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldd">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldw,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldw :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldw">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldh,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldh :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldh">;
+//
+// BUILTIN_INFO_NONCONST(circ_lduh,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_lduh :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_lduh">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldb,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldb :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldb">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldub,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldub :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldub">;
+
+//
+// BUILTIN_INFO_NONCONST(circ_std,PTR_ftype_PTRDISISI,4)
+//
+def int_hexagon_circ_std :
+Hexagon_mem_memdisisi_Intrinsic<"circ_std">;
+//
+// BUILTIN_INFO_NONCONST(circ_stw,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_stw :
+Hexagon_mem_memsisisi_Intrinsic<"circ_stw">;
+//
+// BUILTIN_INFO_NONCONST(circ_sth,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_sth :
+Hexagon_mem_memsisisi_Intrinsic<"circ_sth">;
+//
+// BUILTIN_INFO_NONCONST(circ_sthhi,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_sthhi :
+Hexagon_mem_memsisisi_Intrinsic<"circ_sthhi">;
+//
+// BUILTIN_INFO_NONCONST(circ_stb,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_stb :
+Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;
+
+
+def int_hexagon_mm256i_vaddw :
+Hexagon_v256_v256v256_Intrinsic<"_mm256i_vaddw">;
+
+
+// This one above will not be auto-generated,
+// so make sure, you don't overwrite this one.
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpeqp :
+Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpeqp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtp :
+Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpgtp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtup :
+Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpgtup">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsset :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsset :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsclr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgti :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgti">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgtui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgei :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgei">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgeui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgeui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmplt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmplt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpltu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpltu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsclri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpltei :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpltei">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplteui">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplte :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplte">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplteu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_and">;
+//
+// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_or">;
+//
+// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_xor">;
+//
+// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
+//
+def int_hexagon_C2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON_C2_not">;
+//
+// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_orn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_orn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
+//
+def int_hexagon_C2_pxfer_map :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_pxfer_map">;
+//
+// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_any8 :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_any8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_all8 :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_all8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
+//
+def int_hexagon_C2_vitpack :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C2_vitpack">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_mux :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_mux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxii :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxii">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxir :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxir">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxri :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
+//
+def int_hexagon_C2_vmux :
+Hexagon_di_qididi_Intrinsic<"HEXAGON_C2_vmux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
+//
+def int_hexagon_C2_mask :
+Hexagon_di_qi_Intrinsic<"HEXAGON_C2_mask">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbeq :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbeqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbeqi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbeq_any,QI_ftype_DIDI,2)
+//
+def int_hexagon_A4_vcmpbeq_any :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbgtu :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbgtui :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A4_vcmpbgt :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A4_vcmpbgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbgti :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbeq,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgtu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgtui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgt,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgti,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgti :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgti">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpheq :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgt :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgtu :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpheqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpheqi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpheqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmphgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmphgti :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmphgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmphgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmphgtui :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmphgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpheq,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpheq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgt,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgtu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpheqi,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpheqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpheqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgti,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgti :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgtui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpweq :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpweq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgt :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpwgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgtu :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpweqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpweqi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpweqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpwgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpwgti :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpwgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpwgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpwgtui :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpwgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_boundscheck,QI_ftype_SIDI,2)
+//
+def int_hexagon_A4_boundscheck :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_A4_boundscheck">;
+//
+// BUILTIN_INFO(HEXAGON.A4_tlbmatch,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_tlbmatch :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_tlbmatch">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
+//
+def int_hexagon_C2_tfrpr :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_tfrpr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
+//
+def int_hexagon_C2_tfrrp :
+Hexagon_si_si_Intrinsic<"HEXAGON_C2_tfrrp">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9 :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysmi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysmi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsip :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsip">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsin :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsin">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up_s1_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysu_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysu_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysu_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mac_up_s1_sat,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mac_up_s1_sat :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M4_nac_up_s1_sat,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_nac_up_s1_sat :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyui">;
+//
+// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_maci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_maci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_acci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_acci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_accii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_accii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_nacci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_nacci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_naccii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_naccii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_subacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_subacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyrr_addr,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyrr_addr :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addr_u2,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addr_u2 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr_u2">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addr,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addr :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addi,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addi">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyrr_addi,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyrr_addi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2su_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2su_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2su_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2su_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2su_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2su_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrmac_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmpybuu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vrmpybuu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmacbuu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vrmacbuu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmpybsu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vrmpybsu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmacbsu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vrmacbsu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmpybuu,DI_ftype_SISI,2)
+//
+def int_hexagon_M5_vmpybuu :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmpybsu,DI_ftype_SISI,2)
+//
+def int_hexagon_M5_vmpybsu :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmacbuu,DI_ftype_DISISI,3)
+//
+def int_hexagon_M5_vmacbuu :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmacbsu,DI_ftype_DISISI,3)
+//
+def int_hexagon_M5_vmacbsu :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vdmpybsu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vdmpybsu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vdmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vdmacbsu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vdmacbsu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vdmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_di_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
+//
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyeh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyeh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyeh_acc_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyeh_acc_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyoh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyoh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyoh_acc_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyoh_acc_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmaci_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyi_wh,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyi_wh :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyr_wh,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyr_wh :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyi_whc,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyi_whc :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyr_whc,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyr_whc :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcrotate :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vrcrotate_acc,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S4_vrcrotate_acc :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON_S4_vrcrotate_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vrcrotate,DI_ftype_DISISI,3)
+//
+def int_hexagon_S4_vrcrotate :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_vrcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcnegh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcnegh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcnegh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrcnegh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vrcnegh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vrcnegh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_pmpyw,DI_ftype_SISI,2)
+//
+def int_hexagon_M4_pmpyw :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_pmpyw">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vpmpyh,DI_ftype_SISI,2)
+//
+def int_hexagon_M4_vpmpyh :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_vpmpyh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_pmpyw_acc,DI_ftype_DISISI,3)
+//
+def int_hexagon_M4_pmpyw_acc :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vpmpyh_acc,DI_ftype_DISISI,3)
+//
+def int_hexagon_M4_vpmpyh_acc :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_add :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_add">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_sub :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_sub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_aslh :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_aslh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_asrh :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_asrh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addpsat :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addpsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
+//
+def int_hexagon_A2_addsp :
+Hexagon_di_sidi_Intrinsic<"HEXAGON_A2_addsp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_subp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_subp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
+//
+def int_hexagon_A2_neg :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_neg">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_negsat :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_negsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abs :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_abs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abssat :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_abssat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vconj :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vconj">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_negp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_negp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_absp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_absp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_max :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_max">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_maxu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_maxu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_min :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_min">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_minu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_minu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxup :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minup :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfr :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrsi :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfrsi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_tfrp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_tfrp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrpi :
+Hexagon_di_si_Intrinsic<"HEXAGON_A2_tfrpi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxth :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxth :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combinew :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combinew">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineri,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_combineri :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_combineir :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combineii :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combineii">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfril :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfril">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfrih :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfrih">;
+//
+// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_and">;
+//
+// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_or">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_xor">;
+//
+// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
+//
+def int_hexagon_A2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_xor_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_xor_xacc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_andn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_orn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andnp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A4_andnp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_ornp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A4_ornp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subaddi">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andix :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andix">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_ori :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_ori">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_andn">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subri">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_andir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_andir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_orir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_orir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_andp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_andp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_orp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_orp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_xorp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_xorp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_notp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_notp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtw :
+Hexagon_di_si_Intrinsic<"HEXAGON_A2_sxtw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_sat :
+Hexagon_si_di_Intrinsic<"HEXAGON_A2_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_roundsat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_roundsat :
+Hexagon_si_di_Intrinsic<"HEXAGON_A2_roundsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sath :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sath">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satuh :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satub :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satb :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddb_map,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddb_map :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddb_map">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddubs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vadduhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A5_vaddhubs,SI_ftype_DIDI,2)
+//
+def int_hexagon_A5_vaddhubs :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A5_vaddhubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddws :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddws">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubw">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddw">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubh">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddh">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavghs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavghs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svnavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svadduhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubuhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vraddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vraddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vraddub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vraddub_acc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vraddh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vraddh :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vraddh">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vradduh :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vradduh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubb_map,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubb_map :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubb_map">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsububs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsububs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubuhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubws :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsh :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabshsat :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabshsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsw :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabswsat :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabswsat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffw">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vrsadub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vrsadub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgubr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgubr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminuh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminuh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxuh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxuh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminuw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminuw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxuw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxuw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminb,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxb,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_modwrapu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_modwrapu">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfadd,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfadd :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfadd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfsub,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfsub :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfsub">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmpy,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmpy :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmpy">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffma :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma_sc,SF_ftype_SFSFSFQI,4)
+//
+def int_hexagon_F2_sffma_sc :
+Hexagon_sf_sfsfsfqi_Intrinsic<"HEXAGON_F2_sffma_sc">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffms,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffms :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma_lib,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffma_lib :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffms_lib,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffms_lib :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpeq,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpeq :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpgt,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpgt :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpge,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpge :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpge">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpuo,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpuo :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpuo">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmax,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmax :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmax">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmin,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmin :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmin">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfclass,QI_ftype_SFSI,2)
+//
+def int_hexagon_F2_sfclass :
+Hexagon_si_sfsi_Intrinsic<"HEXAGON_F2_sfclass">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfimm_p,SF_ftype_SI,1)
+//
+def int_hexagon_F2_sfimm_p :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_p">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfimm_n,SF_ftype_SI,1)
+//
+def int_hexagon_F2_sfimm_n :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_n">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupn,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sffixupn :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupn">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupd,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sffixupd :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupr,SF_ftype_SF,1)
+//
+def int_hexagon_F2_sffixupr :
+Hexagon_sf_sf_Intrinsic<"HEXAGON_F2_sffixupr">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpeq,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpeq :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpgt,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpgt :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpge,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpge :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpge">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpuo,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpuo :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpuo">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfclass,QI_ftype_DFSI,2)
+//
+def int_hexagon_F2_dfclass :
+Hexagon_si_dfsi_Intrinsic<"HEXAGON_F2_dfclass">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfimm_p,DF_ftype_SI,1)
+//
+def int_hexagon_F2_dfimm_p :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_p">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfimm_n,DF_ftype_SI,1)
+//
+def int_hexagon_F2_dfimm_n :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_n">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2df,DF_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2df :
+Hexagon_df_sf_Intrinsic<"HEXAGON_F2_conv_sf2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2sf,SF_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2sf :
+Hexagon_sf_df_Intrinsic<"HEXAGON_F2_conv_df2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_uw2sf,SF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_uw2sf :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_uw2df,DF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_uw2df :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_uw2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_w2sf,SF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_w2sf :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_w2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_w2df,DF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_w2df :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_w2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_ud2sf,SF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_ud2sf :
+Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_ud2df,DF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_ud2df :
+Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_ud2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_d2sf,SF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_d2sf :
+Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_d2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_d2df,DF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_d2df :
+Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_d2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2uw :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2w,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2w :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2ud :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2d,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2d :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2uw,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2uw :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2w,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2w :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2ud,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2ud :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2d,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2d :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw_chop,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2uw_chop :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2w_chop,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2w_chop :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud_chop,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2ud_chop :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2d_chop,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2d_chop :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2uw_chop,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2uw_chop :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2w_chop,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2w_chop :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2ud_chop,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2ud_chop :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2d_chop,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2d_chop :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsl_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_i_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd_goodsyntax,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p_rnd :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd_goodsyntax,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S4_lsli,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_lsli :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_lsli">;
+//
+// BUILTIN_INFO(HEXAGON.S2_addasl_rrri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_addasl_rrri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_addasl_rrri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_andi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ori_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_ori_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_andi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ori_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_ori_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_valignib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_valignib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignrb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_valignrb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_valignrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vspliceib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vspliceib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vspliceib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplicerb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_vsplicerb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_vsplicerb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrh :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsplatrh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrb :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_vsplatrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_insert :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_insert">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxb_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxb_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxh_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxh_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxw_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxw_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxd_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxd_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.A4_bitspliti,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_bitspliti :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitspliti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_bitsplit,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_bitsplit :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitsplit">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extract,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_extract :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_extract">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_extractu :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_extractu">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S2_insertp :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON_S2_insertp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extractp,DI_ftype_DISISI,3)
+//
+def int_hexagon_S4_extractp :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_extractp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup,DI_ftype_DISISI,3)
+//
+def int_hexagon_S2_extractup :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S2_extractup">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert_rp,SI_ftype_SISIDI,3)
+//
+def int_hexagon_S2_insert_rp :
+Hexagon_si_sisidi_Intrinsic<"HEXAGON_S2_insert_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extract_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S4_extract_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_S4_extract_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S2_extractu_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_S2_extractu_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp_rp,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_S2_insertp_rp :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_S2_insertp_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extractp_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_extractp_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_extractp_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_extractup_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_extractup_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_tstbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ntstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S4_ntstbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_ntstbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_tstbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ntstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S4_ntstbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_ntstbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S5_asrhub_rnd_sat_goodsyntax,SI_ftype_DISI,2)
+//
+def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S5_asrhub_sat,SI_ftype_DISI,2)
+//
+def int_hexagon_S5_asrhub_sat :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S5_vasrhrnd_goodsyntax,DI_ftype_DISI,2)
+//
+def int_hexagon_S5_vasrhrnd_goodsyntax :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_i_svw_trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwh :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwhs,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwhs :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathub,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathub :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathb :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunohb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunohb :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunohb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunewh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunewh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunewh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunowh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunowh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunowh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunehb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunehb :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunehb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwuh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_packhl,DI_ftype_SISI,2)
+//
+def int_hexagon_S2_packhl :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_S2_packhl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_swiz,SI_ftype_SI,1)
+//
+def int_hexagon_A2_swiz :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_swiz">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffob,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffob :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffob">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeb,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffoh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffoh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffoh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeh">;
+//
+// BUILTIN_INFO(HEXAGON.S5_popcountp,SI_ftype_DI,1)
+//
+def int_hexagon_S5_popcountp :
+Hexagon_si_di_Intrinsic<"HEXAGON_S5_popcountp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_parity,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_parity :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_parity">;
+//
+// BUILTIN_INFO(HEXAGON.S2_parityp,SI_ftype_DIDI,2)
+//
+def int_hexagon_S2_parityp :
+Hexagon_si_didi_Intrinsic<"HEXAGON_S2_parityp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lfsp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_lfsp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_lfsp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbnorm,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clbnorm :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_clbnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbaddi,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_clbaddi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_clbaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbpnorm,SI_ftype_DI,1)
+//
+def int_hexagon_S4_clbpnorm :
+Hexagon_si_di_Intrinsic<"HEXAGON_S4_clbpnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbpaddi,SI_ftype_DISI,2)
+//
+def int_hexagon_S4_clbpaddi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S4_clbpaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clb :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_clb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl0 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl1 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbp,SI_ftype_DI,1)
+//
+def int_hexagon_S2_clbp :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_clbp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl0p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl0p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl1p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl1p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brev,SI_ftype_SI,1)
+//
+def int_hexagon_S2_brev :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_brev">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brevp,DI_ftype_DI,1)
+//
+def int_hexagon_S2_brevp :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_brevp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct0 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct1 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct0p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_ct0p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct0p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct1p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_ct1p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct1p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_interleave :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_interleave">;
+//
+// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_deinterleave :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_deinterleave">;
+
+//
+// BUILTIN_INFO(HEXAGON.dcfetch_A,v_ftype_DI*,1)
+//
+def int_hexagon_prefetch :
+Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;
+def int_hexagon_Y2_dccleana :
+Hexagon_Intrinsic<"HEXAGON_Y2_dccleana", [], [llvm_ptr_ty], []>;
+def int_hexagon_Y2_dccleaninva :
+Hexagon_Intrinsic<"HEXAGON_Y2_dccleaninva", [], [llvm_ptr_ty], []>;
+def int_hexagon_Y2_dcinva :
+Hexagon_Intrinsic<"HEXAGON_Y2_dcinva", [], [llvm_ptr_ty], []>;
+def int_hexagon_Y2_dczeroa :
+Hexagon_Intrinsic<"HEXAGON_Y2_dczeroa", [], [llvm_ptr_ty],
+      [IntrWriteMem, IntrArgMemOnly, IntrHasSideEffects]>;
+def int_hexagon_Y4_l2fetch :
+Hexagon_Intrinsic<"HEXAGON_Y4_l2fetch", [], [llvm_ptr_ty, llvm_i32_ty], []>;
+def int_hexagon_Y5_l2fetch :
+Hexagon_Intrinsic<"HEXAGON_Y5_l2fetch", [], [llvm_ptr_ty, llvm_i64_ty], []>;
+
+def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
+def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
+
+// Mark locked loads as read/write to prevent any accidental reordering.
+def int_hexagon_L2_loadw_locked :
+Hexagon_Intrinsic<"HEXAGON_L2_loadw_locked", [llvm_i32_ty], [llvm_ptr32_ty],
+      [IntrArgMemOnly, NoCapture<0>]>;
+def int_hexagon_L4_loadd_locked :
+Hexagon_Intrinsic<"HEXAGON_L4_loadd_locked", [llvm_i64_ty], [llvm_ptr64_ty],
+      [IntrArgMemOnly, NoCapture<0>]>;
+
+def int_hexagon_S2_storew_locked :
+Hexagon_Intrinsic<"HEXAGON_S2_storew_locked", [llvm_i32_ty],
+      [llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<0>]>;
+def int_hexagon_S4_stored_locked :
+Hexagon_Intrinsic<"HEXAGON_S4_stored_locked", [llvm_i32_ty],
+      [llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<0>]>;
+
+// V60
+
+class Hexagon_v2048v2048_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty],
+                          [IntrNoMem]>;
+
+// tag : V6_hi_W
+// tag : V6_lo_W
+class Hexagon_v512v1024_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+// tag : V6_hi_W_128B
+// tag : V6_lo_W_128B
+class Hexagon_v1024v2048_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v64i32_ty],
+                          [IntrNoMem]>;
+
+class Hexagon_v1024v1024_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+// BUILTIN_INFO(HEXAGON.V6_hi_W,VI_ftype_VI,1)
+// tag : V6_hi
+def int_hexagon_V6_hi :
+Hexagon_v512v1024_Intrinsic_T<"HEXAGON_V6_hi">;
+
+// BUILTIN_INFO(HEXAGON.V6_lo_W,VI_ftype_VI,1)
+// tag : V6_lo
+def int_hexagon_V6_lo :
+Hexagon_v512v1024_Intrinsic_T<"HEXAGON_V6_lo">;
+
+// BUILTIN_INFO(HEXAGON.V6_hi_W,VI_ftype_VI,1)
+// tag : V6_hi_128B
+def int_hexagon_V6_hi_128B :
+Hexagon_v1024v2048_Intrinsic_T<"HEXAGON_V6_hi_128B">;
+
+// BUILTIN_INFO(HEXAGON.V6_lo_W,VI_ftype_VI,1)
+// tag : V6_lo_128B
+def int_hexagon_V6_lo_128B :
+Hexagon_v1024v2048_Intrinsic_T<"HEXAGON_V6_lo_128B">;
+
+// BUILTIN_INFO(HEXAGON.V6_vassignp,VI_ftype_VI,1)
+// tag : V6_vassignp
+def int_hexagon_V6_vassignp :
+Hexagon_v1024v1024_Intrinsic_T<"HEXAGON_V6_vassignp">;
+
+// BUILTIN_INFO(HEXAGON.V6_vassignp,VI_ftype_VI,1)
+// tag : V6_vassignp_128B
+def int_hexagon_V6_vassignp_128B :
+Hexagon_v2048v2048_Intrinsic_T<"HEXAGON_V6_vassignp_128B">;
+
+
+//
+// Hexagon_iii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_r
+class Hexagon_iii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_LLiLLii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_p
+class Hexagon_LLiLLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_iiii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_r_acc
+class Hexagon_iiii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_LLiLLiLLii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_p_acc
+class Hexagon_LLiLLiLLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_valignb
+class Hexagon_v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_valignb_128B
+class Hexagon_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vror
+class Hexagon_v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vror_128B
+class Hexagon_v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackub
+class Hexagon_v1024v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackub_128B
+class Hexagon_v2048v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackob
+class Hexagon_v1024v1024v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackob_128B
+class Hexagon_v2048v2048v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vpackeb
+class Hexagon_v512v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vpackeb_128B
+class Hexagon_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpybus_dv_128B
+class Hexagon_v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpybus_dv_acc_128B
+class Hexagon_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhvsat_acc
+class Hexagon_v512v512v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhvsat_acc_128B
+class Hexagon_v1024v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat
+class Hexagon_v512v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat_128B
+class Hexagon_v1024v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat_acc
+class Hexagon_v512v512v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat_acc_128B
+class Hexagon_v1024v1024v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi
+class Hexagon_v1024v1024ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi_128B
+class Hexagon_v2048v2048ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi_acc
+class Hexagon_v1024v1024v1024ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi_acc_128B
+class Hexagon_v2048v2048v2048ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddb_dv_128B
+class Hexagon_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddubh
+class Hexagon_v1024v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddubh_128B
+class Hexagon_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vd0
+class Hexagon_v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vd0_128B
+class Hexagon_v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddbq
+class Hexagon_v512v64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddbq_128B
+class Hexagon_v1024v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vabsh
+class Hexagon_v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vabsh_128B
+class Hexagon_v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpybv_acc
+class Hexagon_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpybv_acc_128B
+class Hexagon_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub
+class Hexagon_v1024v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub_128B
+class Hexagon_v2048v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub_acc
+class Hexagon_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub_acc_128B
+class Hexagon_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt
+class Hexagon_v512v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt_128B
+class Hexagon_v1024v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt_acc
+class Hexagon_v512v512v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt_acc_128B
+class Hexagon_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64iv512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt
+class Hexagon_v64iv512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128iv1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt_128B
+class Hexagon_v128iv1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64iv512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt_acc
+class Hexagon_v64iv64iv512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128iv1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt_acc_128B
+class Hexagon_v128iv128iv1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw
+class Hexagon_v64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw_128B
+class Hexagon_v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw_and
+class Hexagon_v64iv64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw_and_128B
+class Hexagon_v128iv128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_or
+class Hexagon_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_or_128B
+class Hexagon_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_not
+class Hexagon_v64iv64i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v512i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_not_128B
+class Hexagon_v128iv128i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v1024i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_scalar2
+class Hexagon_v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_scalar2_128B
+class Hexagon_v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vswap
+class Hexagon_v1024v64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vswap_128B
+class Hexagon_v2048v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vshuffvdd
+class Hexagon_v1024v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vshuffvdd_128B
+class Hexagon_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+
+//
+// Hexagon_iv512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_extractw
+class Hexagon_iv512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_iv1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_extractw_128B
+class Hexagon_iv1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_lvsplatw
+class Hexagon_v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_lvsplatw_128B
+class Hexagon_v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvvb_oracc
+class Hexagon_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvvb_oracc_128B
+class Hexagon_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwh_oracc
+class Hexagon_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwh_oracc_128B
+class Hexagon_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
+// tag: V6_vS32b_qpred_ai
+class Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v512i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
+                          [IntrArgMemOnly]>;
+
+//
+// Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
+// tag: V6_vS32b_qpred_ai_128B
+class Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v1024i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
+                          [IntrArgMemOnly]>;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r,SI_ftype_SISI,2)
+// tag : S6_rol_i_r
+def int_hexagon_S6_rol_i_r :
+Hexagon_iii_Intrinsic<"HEXAGON_S6_rol_i_r">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p,DI_ftype_DISI,2)
+// tag : S6_rol_i_p
+def int_hexagon_S6_rol_i_p :
+Hexagon_LLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_acc,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_acc
+def int_hexagon_S6_rol_i_r_acc :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_acc,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_acc
+def int_hexagon_S6_rol_i_p_acc :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_nac,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_nac
+def int_hexagon_S6_rol_i_r_nac :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_nac">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_nac,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_nac
+def int_hexagon_S6_rol_i_p_nac :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_nac">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_xacc,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_xacc
+def int_hexagon_S6_rol_i_r_xacc :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_xacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_xacc,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_xacc
+def int_hexagon_S6_rol_i_p_xacc :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_xacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_and,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_and
+def int_hexagon_S6_rol_i_r_and :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_or,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_or
+def int_hexagon_S6_rol_i_r_or :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_and,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_and
+def int_hexagon_S6_rol_i_p_and :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_or,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_or
+def int_hexagon_S6_rol_i_p_or :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.S2_cabacencbin,DI_ftype_DIDIQI,3)
+// tag : S2_cabacencbin
+def int_hexagon_S2_cabacencbin :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S2_cabacencbin">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignb,VI_ftype_VIVISI,3)
+// tag : V6_valignb
+def int_hexagon_V6_valignb :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_valignb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignb_128B,VI_ftype_VIVISI,3)
+// tag : V6_valignb_128B
+def int_hexagon_V6_valignb_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_valignb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignb,VI_ftype_VIVISI,3)
+// tag : V6_vlalignb
+def int_hexagon_V6_vlalignb :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlalignb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignb_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlalignb_128B
+def int_hexagon_V6_vlalignb_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlalignb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignbi,VI_ftype_VIVISI,3)
+// tag : V6_valignbi
+def int_hexagon_V6_valignbi :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_valignbi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignbi_128B,VI_ftype_VIVISI,3)
+// tag : V6_valignbi_128B
+def int_hexagon_V6_valignbi_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_valignbi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignbi,VI_ftype_VIVISI,3)
+// tag : V6_vlalignbi
+def int_hexagon_V6_vlalignbi :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlalignbi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignbi_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlalignbi_128B
+def int_hexagon_V6_vlalignbi_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlalignbi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vror,VI_ftype_VISI,2)
+// tag : V6_vror
+def int_hexagon_V6_vror :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vror">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vror_128B,VI_ftype_VISI,2)
+// tag : V6_vror_128B
+def int_hexagon_V6_vror_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vror_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackub,VD_ftype_VI,1)
+// tag : V6_vunpackub
+def int_hexagon_V6_vunpackub :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackub_128B,VD_ftype_VI,1)
+// tag : V6_vunpackub_128B
+def int_hexagon_V6_vunpackub_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackb,VD_ftype_VI,1)
+// tag : V6_vunpackb
+def int_hexagon_V6_vunpackb :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackb_128B,VD_ftype_VI,1)
+// tag : V6_vunpackb_128B
+def int_hexagon_V6_vunpackb_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackuh,VD_ftype_VI,1)
+// tag : V6_vunpackuh
+def int_hexagon_V6_vunpackuh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackuh_128B,VD_ftype_VI,1)
+// tag : V6_vunpackuh_128B
+def int_hexagon_V6_vunpackuh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackh,VD_ftype_VI,1)
+// tag : V6_vunpackh
+def int_hexagon_V6_vunpackh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackh_128B,VD_ftype_VI,1)
+// tag : V6_vunpackh_128B
+def int_hexagon_V6_vunpackh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackob,VD_ftype_VDVI,2)
+// tag : V6_vunpackob
+def int_hexagon_V6_vunpackob :
+Hexagon_v1024v1024v512_Intrinsic<"HEXAGON_V6_vunpackob">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackob_128B,VD_ftype_VDVI,2)
+// tag : V6_vunpackob_128B
+def int_hexagon_V6_vunpackob_128B :
+Hexagon_v2048v2048v1024_Intrinsic<"HEXAGON_V6_vunpackob_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackoh,VD_ftype_VDVI,2)
+// tag : V6_vunpackoh
+def int_hexagon_V6_vunpackoh :
+Hexagon_v1024v1024v512_Intrinsic<"HEXAGON_V6_vunpackoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackoh_128B,VD_ftype_VDVI,2)
+// tag : V6_vunpackoh_128B
+def int_hexagon_V6_vunpackoh_128B :
+Hexagon_v2048v2048v1024_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeb,VI_ftype_VIVI,2)
+// tag : V6_vpackeb
+def int_hexagon_V6_vpackeb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackeb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeb_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackeb_128B
+def int_hexagon_V6_vpackeb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackeb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeh,VI_ftype_VIVI,2)
+// tag : V6_vpackeh
+def int_hexagon_V6_vpackeh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackeh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeh_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackeh_128B
+def int_hexagon_V6_vpackeh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackeh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackob,VI_ftype_VIVI,2)
+// tag : V6_vpackob
+def int_hexagon_V6_vpackob :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackob">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackob_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackob_128B
+def int_hexagon_V6_vpackob_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackob_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackoh,VI_ftype_VIVI,2)
+// tag : V6_vpackoh
+def int_hexagon_V6_vpackoh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackoh_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackoh_128B
+def int_hexagon_V6_vpackoh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhub_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackhub_sat
+def int_hexagon_V6_vpackhub_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackhub_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhub_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackhub_sat_128B
+def int_hexagon_V6_vpackhub_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhb_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackhb_sat
+def int_hexagon_V6_vpackhb_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackhb_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhb_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackhb_sat_128B
+def int_hexagon_V6_vpackhb_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwuh_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackwuh_sat
+def int_hexagon_V6_vpackwuh_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwuh_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackwuh_sat_128B
+def int_hexagon_V6_vpackwuh_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwh_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackwh_sat
+def int_hexagon_V6_vpackwh_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackwh_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwh_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackwh_sat_128B
+def int_hexagon_V6_vpackwh_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzb,VD_ftype_VI,1)
+// tag : V6_vzb
+def int_hexagon_V6_vzb :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vzb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzb_128B,VD_ftype_VI,1)
+// tag : V6_vzb_128B
+def int_hexagon_V6_vzb_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vzb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsb,VD_ftype_VI,1)
+// tag : V6_vsb
+def int_hexagon_V6_vsb :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vsb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsb_128B,VD_ftype_VI,1)
+// tag : V6_vsb_128B
+def int_hexagon_V6_vsb_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vsb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzh,VD_ftype_VI,1)
+// tag : V6_vzh
+def int_hexagon_V6_vzh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vzh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzh_128B,VD_ftype_VI,1)
+// tag : V6_vzh_128B
+def int_hexagon_V6_vzh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vzh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsh,VD_ftype_VI,1)
+// tag : V6_vsh
+def int_hexagon_V6_vsh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vsh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsh_128B,VD_ftype_VI,1)
+// tag : V6_vsh_128B
+def int_hexagon_V6_vsh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vsh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus,VI_ftype_VISI,2)
+// tag : V6_vdmpybus
+def int_hexagon_V6_vdmpybus :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpybus_128B
+def int_hexagon_V6_vdmpybus_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpybus_acc
+def int_hexagon_V6_vdmpybus_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpybus_acc_128B
+def int_hexagon_V6_vdmpybus_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv,VD_ftype_VDSI,2)
+// tag : V6_vdmpybus_dv
+def int_hexagon_V6_vdmpybus_dv :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_128B,VD_ftype_VDSI,2)
+// tag : V6_vdmpybus_dv_128B
+def int_hexagon_V6_vdmpybus_dv_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpybus_dv_acc
+def int_hexagon_V6_vdmpybus_dv_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpybus_dv_acc_128B
+def int_hexagon_V6_vdmpybus_dv_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb,VI_ftype_VISI,2)
+// tag : V6_vdmpyhb
+def int_hexagon_V6_vdmpyhb :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpyhb_128B
+def int_hexagon_V6_vdmpyhb_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhb_acc
+def int_hexagon_V6_vdmpyhb_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhb_acc_128B
+def int_hexagon_V6_vdmpyhb_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv,VD_ftype_VDSI,2)
+// tag : V6_vdmpyhb_dv
+def int_hexagon_V6_vdmpyhb_dv :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_128B,VD_ftype_VDSI,2)
+// tag : V6_vdmpyhb_dv_128B
+def int_hexagon_V6_vdmpyhb_dv_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpyhb_dv_acc
+def int_hexagon_V6_vdmpyhb_dv_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpyhb_dv_acc_128B
+def int_hexagon_V6_vdmpyhb_dv_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat,VI_ftype_VIVI,2)
+// tag : V6_vdmpyhvsat
+def int_hexagon_V6_vdmpyhvsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vdmpyhvsat_128B
+def int_hexagon_V6_vdmpyhvsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vdmpyhvsat_acc
+def int_hexagon_V6_vdmpyhvsat_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vdmpyhvsat_acc_128B
+def int_hexagon_V6_vdmpyhvsat_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsat
+def int_hexagon_V6_vdmpyhsat :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsat_128B
+def int_hexagon_V6_vdmpyhsat_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsat_acc
+def int_hexagon_V6_vdmpyhsat_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsat_acc_128B
+def int_hexagon_V6_vdmpyhsat_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhisat
+def int_hexagon_V6_vdmpyhisat :
+Hexagon_v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhisat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_128B,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhisat_128B
+def int_hexagon_V6_vdmpyhisat_128B :
+Hexagon_v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_acc,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhisat_acc
+def int_hexagon_V6_vdmpyhisat_acc :
+Hexagon_v512v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_acc_128B,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhisat_acc_128B
+def int_hexagon_V6_vdmpyhisat_acc_128B :
+Hexagon_v1024v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsusat
+def int_hexagon_V6_vdmpyhsusat :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsusat_128B
+def int_hexagon_V6_vdmpyhsusat_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsusat_acc
+def int_hexagon_V6_vdmpyhsusat_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsusat_acc_128B
+def int_hexagon_V6_vdmpyhsusat_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhsuisat
+def int_hexagon_V6_vdmpyhsuisat :
+Hexagon_v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_128B,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhsuisat_128B
+def int_hexagon_V6_vdmpyhsuisat_128B :
+Hexagon_v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_acc,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhsuisat_acc
+def int_hexagon_V6_vdmpyhsuisat_acc :
+Hexagon_v512v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_acc_128B,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhsuisat_acc_128B
+def int_hexagon_V6_vdmpyhsuisat_acc_128B :
+Hexagon_v1024v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb,VD_ftype_VDSI,2)
+// tag : V6_vtmpyb
+def int_hexagon_V6_vtmpyb :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb_128B,VD_ftype_VDSI,2)
+// tag : V6_vtmpyb_128B
+def int_hexagon_V6_vtmpyb_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyb_acc
+def int_hexagon_V6_vtmpyb_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyb_acc_128B
+def int_hexagon_V6_vtmpyb_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus,VD_ftype_VDSI,2)
+// tag : V6_vtmpybus
+def int_hexagon_V6_vtmpybus :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus_128B,VD_ftype_VDSI,2)
+// tag : V6_vtmpybus_128B
+def int_hexagon_V6_vtmpybus_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpybus_acc
+def int_hexagon_V6_vtmpybus_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpybus_acc_128B
+def int_hexagon_V6_vtmpybus_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb,VD_ftype_VDSI,2)
+// tag : V6_vtmpyhb
+def int_hexagon_V6_vtmpyhb :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_128B,VD_ftype_VDSI,2)
+// tag : V6_vtmpyhb_128B
+def int_hexagon_V6_vtmpyhb_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyhb_acc
+def int_hexagon_V6_vtmpyhb_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyhb_acc_128B
+def int_hexagon_V6_vtmpyhb_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub,VI_ftype_VISI,2)
+// tag : V6_vrmpyub
+def int_hexagon_V6_vrmpyub :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vrmpyub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_128B,VI_ftype_VISI,2)
+// tag : V6_vrmpyub_128B
+def int_hexagon_V6_vrmpyub_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_acc,VI_ftype_VIVISI,3)
+// tag : V6_vrmpyub_acc
+def int_hexagon_V6_vrmpyub_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vrmpyub_acc_128B
+def int_hexagon_V6_vrmpyub_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv,VI_ftype_VIVI,2)
+// tag : V6_vrmpyubv
+def int_hexagon_V6_vrmpyubv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpyubv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_128B,VI_ftype_VIVI,2)
+// tag : V6_vrmpyubv_128B
+def int_hexagon_V6_vrmpyubv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpyubv_acc
+def int_hexagon_V6_vrmpyubv_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpyubv_acc_128B
+def int_hexagon_V6_vrmpyubv_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv,VI_ftype_VIVI,2)
+// tag : V6_vrmpybv
+def int_hexagon_V6_vrmpybv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv_128B,VI_ftype_VIVI,2)
+// tag : V6_vrmpybv_128B
+def int_hexagon_V6_vrmpybv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybv_acc
+def int_hexagon_V6_vrmpybv_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybv_acc_128B
+def int_hexagon_V6_vrmpybv_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi,VD_ftype_VDSISI,3)
+// tag : V6_vrmpyubi
+def int_hexagon_V6_vrmpyubi :
+Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpyubi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_128B,VD_ftype_VDSISI,3)
+// tag : V6_vrmpyubi_128B
+def int_hexagon_V6_vrmpyubi_128B :
+Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpyubi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_acc,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpyubi_acc
+def int_hexagon_V6_vrmpyubi_acc :
+Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpyubi_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_acc_128B,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpyubi_acc_128B
+def int_hexagon_V6_vrmpyubi_acc_128B :
+Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus,VI_ftype_VISI,2)
+// tag : V6_vrmpybus
+def int_hexagon_V6_vrmpybus :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vrmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus_128B,VI_ftype_VISI,2)
+// tag : V6_vrmpybus_128B
+def int_hexagon_V6_vrmpybus_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus_acc,VI_ftype_VIVISI,3)
+// tag : V6_vrmpybus_acc
+def int_hexagon_V6_vrmpybus_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vrmpybus_acc_128B
+def int_hexagon_V6_vrmpybus_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi,VD_ftype_VDSISI,3)
+// tag : V6_vrmpybusi
+def int_hexagon_V6_vrmpybusi :
+Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpybusi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_128B,VD_ftype_VDSISI,3)
+// tag : V6_vrmpybusi_128B
+def int_hexagon_V6_vrmpybusi_128B :
+Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpybusi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_acc,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpybusi_acc
+def int_hexagon_V6_vrmpybusi_acc :
+Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpybusi_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_acc_128B,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpybusi_acc_128B
+def int_hexagon_V6_vrmpybusi_acc_128B :
+Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv,VI_ftype_VIVI,2)
+// tag : V6_vrmpybusv
+def int_hexagon_V6_vrmpybusv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybusv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_128B,VI_ftype_VIVI,2)
+// tag : V6_vrmpybusv_128B
+def int_hexagon_V6_vrmpybusv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybusv_acc
+def int_hexagon_V6_vrmpybusv_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybusv_acc_128B
+def int_hexagon_V6_vrmpybusv_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh,VD_ftype_VDSI,2)
+// tag : V6_vdsaduh
+def int_hexagon_V6_vdsaduh :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdsaduh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh_128B,VD_ftype_VDSI,2)
+// tag : V6_vdsaduh_128B
+def int_hexagon_V6_vdsaduh_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vdsaduh_acc
+def int_hexagon_V6_vdsaduh_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vdsaduh_acc_128B
+def int_hexagon_V6_vdsaduh_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi,VD_ftype_VDSISI,3)
+// tag : V6_vrsadubi
+def int_hexagon_V6_vrsadubi :
+Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrsadubi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi_128B,VD_ftype_VDSISI,3)
+// tag : V6_vrsadubi_128B
+def int_hexagon_V6_vrsadubi_128B :
+Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrsadubi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi_acc,VD_ftype_VDVDSISI,4)
+// tag : V6_vrsadubi_acc
+def int_hexagon_V6_vrsadubi_acc :
+Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrsadubi_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi_acc_128B,VD_ftype_VDVDSISI,4)
+// tag : V6_vrsadubi_acc_128B
+def int_hexagon_V6_vrsadubi_acc_128B :
+Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw,VI_ftype_VISI,2)
+// tag : V6_vasrw
+def int_hexagon_V6_vasrw :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vasrw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw_128B,VI_ftype_VISI,2)
+// tag : V6_vasrw_128B
+def int_hexagon_V6_vasrw_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vasrw_128B">;
+
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw,VI_ftype_VISI,2)
+// tag : V6_vaslw
+def int_hexagon_V6_vaslw :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vaslw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw_128B,VI_ftype_VISI,2)
+// tag : V6_vaslw_128B
+def int_hexagon_V6_vaslw_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vaslw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrw,VI_ftype_VISI,2)
+// tag : V6_vlsrw
+def int_hexagon_V6_vlsrw :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vlsrw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrw_128B,VI_ftype_VISI,2)
+// tag : V6_vlsrw_128B
+def int_hexagon_V6_vlsrw_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwv,VI_ftype_VIVI,2)
+// tag : V6_vasrwv
+def int_hexagon_V6_vasrwv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vasrwv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwv_128B,VI_ftype_VIVI,2)
+// tag : V6_vasrwv_128B
+def int_hexagon_V6_vasrwv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vasrwv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslwv,VI_ftype_VIVI,2)
+// tag : V6_vaslwv
+def int_hexagon_V6_vaslwv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaslwv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslwv_128B,VI_ftype_VIVI,2)
+// tag : V6_vaslwv_128B
+def int_hexagon_V6_vaslwv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaslwv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrwv,VI_ftype_VIVI,2)
+// tag : V6_vlsrwv
+def int_hexagon_V6_vlsrwv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vlsrwv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrwv_128B,VI_ftype_VIVI,2)
+// tag : V6_vlsrwv_128B
+def int_hexagon_V6_vlsrwv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrh,VI_ftype_VISI,2)
+// tag : V6_vasrh
+def int_hexagon_V6_vasrh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vasrh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrh_128B,VI_ftype_VISI,2)
+// tag : V6_vasrh_128B
+def int_hexagon_V6_vasrh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vasrh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslh,VI_ftype_VISI,2)
+// tag : V6_vaslh
+def int_hexagon_V6_vaslh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vaslh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslh_128B,VI_ftype_VISI,2)
+// tag : V6_vaslh_128B
+def int_hexagon_V6_vaslh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vaslh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrh,VI_ftype_VISI,2)
+// tag : V6_vlsrh
+def int_hexagon_V6_vlsrh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vlsrh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrh_128B,VI_ftype_VISI,2)
+// tag : V6_vlsrh_128B
+def int_hexagon_V6_vlsrh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhv,VI_ftype_VIVI,2)
+// tag : V6_vasrhv
+def int_hexagon_V6_vasrhv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vasrhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhv_128B,VI_ftype_VIVI,2)
+// tag : V6_vasrhv_128B
+def int_hexagon_V6_vasrhv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vasrhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslhv,VI_ftype_VIVI,2)
+// tag : V6_vaslhv
+def int_hexagon_V6_vaslhv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaslhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslhv_128B,VI_ftype_VIVI,2)
+// tag : V6_vaslhv_128B
+def int_hexagon_V6_vaslhv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaslhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrhv,VI_ftype_VIVI,2)
+// tag : V6_vlsrhv
+def int_hexagon_V6_vlsrhv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vlsrhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrhv_128B,VI_ftype_VIVI,2)
+// tag : V6_vlsrhv_128B
+def int_hexagon_V6_vlsrhv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwh,VI_ftype_VIVISI,3)
+// tag : V6_vasrwh
+def int_hexagon_V6_vasrwh :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwh_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwh_128B
+def int_hexagon_V6_vasrwh_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhsat
+def int_hexagon_V6_vasrwhsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhsat_128B
+def int_hexagon_V6_vasrwhsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhrndsat
+def int_hexagon_V6_vasrwhrndsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhrndsat_128B
+def int_hexagon_V6_vasrwhrndsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwuhsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwuhsat
+def int_hexagon_V6_vasrwuhsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwuhsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwuhsat_128B
+def int_hexagon_V6_vasrwuhsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwh,VI_ftype_VIVI,2)
+// tag : V6_vroundwh
+def int_hexagon_V6_vroundwh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwh_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundwh_128B
+def int_hexagon_V6_vroundwh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwuh,VI_ftype_VIVI,2)
+// tag : V6_vroundwuh
+def int_hexagon_V6_vroundwuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundwuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundwuh_128B
+def int_hexagon_V6_vroundwuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubsat
+def int_hexagon_V6_vasrhubsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhubsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubsat_128B
+def int_hexagon_V6_vasrhubsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubrndsat
+def int_hexagon_V6_vasrhubrndsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubrndsat_128B
+def int_hexagon_V6_vasrhubrndsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhbrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhbrndsat
+def int_hexagon_V6_vasrhbrndsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhbrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhbrndsat_128B
+def int_hexagon_V6_vasrhbrndsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhb,VI_ftype_VIVI,2)
+// tag : V6_vroundhb
+def int_hexagon_V6_vroundhb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhb_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundhb_128B
+def int_hexagon_V6_vroundhb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhub,VI_ftype_VIVI,2)
+// tag : V6_vroundhub
+def int_hexagon_V6_vroundhub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundhub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhub_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundhub_128B
+def int_hexagon_V6_vroundhub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundhub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw_acc,VI_ftype_VIVISI,3)
+// tag : V6_vaslw_acc
+def int_hexagon_V6_vaslw_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vaslw_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vaslw_acc_128B
+def int_hexagon_V6_vaslw_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw_acc,VI_ftype_VIVISI,3)
+// tag : V6_vasrw_acc
+def int_hexagon_V6_vasrw_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrw_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrw_acc_128B
+def int_hexagon_V6_vasrw_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb,VI_ftype_VIVI,2)
+// tag : V6_vaddb
+def int_hexagon_V6_vaddb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddb_128B
+def int_hexagon_V6_vaddb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb,VI_ftype_VIVI,2)
+// tag : V6_vsubb
+def int_hexagon_V6_vsubb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubb_128B
+def int_hexagon_V6_vsubb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddb_dv
+def int_hexagon_V6_vaddb_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddb_dv_128B
+def int_hexagon_V6_vaddb_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubb_dv
+def int_hexagon_V6_vsubb_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubb_dv_128B
+def int_hexagon_V6_vsubb_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh,VI_ftype_VIVI,2)
+// tag : V6_vaddh
+def int_hexagon_V6_vaddh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddh_128B
+def int_hexagon_V6_vaddh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh,VI_ftype_VIVI,2)
+// tag : V6_vsubh
+def int_hexagon_V6_vsubh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubh_128B
+def int_hexagon_V6_vsubh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddh_dv
+def int_hexagon_V6_vaddh_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddh_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddh_dv_128B
+def int_hexagon_V6_vaddh_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubh_dv
+def int_hexagon_V6_vsubh_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubh_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubh_dv_128B
+def int_hexagon_V6_vsubh_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw,VI_ftype_VIVI,2)
+// tag : V6_vaddw
+def int_hexagon_V6_vaddw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddw_128B
+def int_hexagon_V6_vaddw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw,VI_ftype_VIVI,2)
+// tag : V6_vsubw
+def int_hexagon_V6_vsubw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubw_128B
+def int_hexagon_V6_vsubw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddw_dv
+def int_hexagon_V6_vaddw_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddw_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddw_dv_128B
+def int_hexagon_V6_vaddw_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubw_dv
+def int_hexagon_V6_vsubw_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubw_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubw_dv_128B
+def int_hexagon_V6_vsubw_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat,VI_ftype_VIVI,2)
+// tag : V6_vaddubsat
+def int_hexagon_V6_vaddubsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddubsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddubsat_128B
+def int_hexagon_V6_vaddubsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddubsat_dv
+def int_hexagon_V6_vaddubsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddubsat_dv_128B
+def int_hexagon_V6_vaddubsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat,VI_ftype_VIVI,2)
+// tag : V6_vsububsat
+def int_hexagon_V6_vsububsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsububsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsububsat_128B
+def int_hexagon_V6_vsububsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsububsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsububsat_dv
+def int_hexagon_V6_vsububsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsububsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsububsat_dv_128B
+def int_hexagon_V6_vsububsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat,VI_ftype_VIVI,2)
+// tag : V6_vadduhsat
+def int_hexagon_V6_vadduhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vadduhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vadduhsat_128B
+def int_hexagon_V6_vadduhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vadduhsat_dv
+def int_hexagon_V6_vadduhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vadduhsat_dv_128B
+def int_hexagon_V6_vadduhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat,VI_ftype_VIVI,2)
+// tag : V6_vsubuhsat
+def int_hexagon_V6_vsubuhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubuhsat_128B
+def int_hexagon_V6_vsubuhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubuhsat_dv
+def int_hexagon_V6_vsubuhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubuhsat_dv_128B
+def int_hexagon_V6_vsubuhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat,VI_ftype_VIVI,2)
+// tag : V6_vaddhsat
+def int_hexagon_V6_vaddhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddhsat_128B
+def int_hexagon_V6_vaddhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddhsat_dv
+def int_hexagon_V6_vaddhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddhsat_dv_128B
+def int_hexagon_V6_vaddhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat,VI_ftype_VIVI,2)
+// tag : V6_vsubhsat
+def int_hexagon_V6_vsubhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubhsat_128B
+def int_hexagon_V6_vsubhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubhsat_dv
+def int_hexagon_V6_vsubhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubhsat_dv_128B
+def int_hexagon_V6_vsubhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat,VI_ftype_VIVI,2)
+// tag : V6_vaddwsat
+def int_hexagon_V6_vaddwsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddwsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddwsat_128B
+def int_hexagon_V6_vaddwsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddwsat_dv
+def int_hexagon_V6_vaddwsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddwsat_dv_128B
+def int_hexagon_V6_vaddwsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat,VI_ftype_VIVI,2)
+// tag : V6_vsubwsat
+def int_hexagon_V6_vsubwsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubwsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubwsat_128B
+def int_hexagon_V6_vsubwsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubwsat_dv
+def int_hexagon_V6_vsubwsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubwsat_dv_128B
+def int_hexagon_V6_vsubwsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgub,VI_ftype_VIVI,2)
+// tag : V6_vavgub
+def int_hexagon_V6_vavgub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgub_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgub_128B
+def int_hexagon_V6_vavgub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgubrnd,VI_ftype_VIVI,2)
+// tag : V6_vavgubrnd
+def int_hexagon_V6_vavgubrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgubrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgubrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgubrnd_128B
+def int_hexagon_V6_vavgubrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguh,VI_ftype_VIVI,2)
+// tag : V6_vavguh
+def int_hexagon_V6_vavguh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavguh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguh_128B,VI_ftype_VIVI,2)
+// tag : V6_vavguh_128B
+def int_hexagon_V6_vavguh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguhrnd,VI_ftype_VIVI,2)
+// tag : V6_vavguhrnd
+def int_hexagon_V6_vavguhrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavguhrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguhrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavguhrnd_128B
+def int_hexagon_V6_vavguhrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgh,VI_ftype_VIVI,2)
+// tag : V6_vavgh
+def int_hexagon_V6_vavgh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgh_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgh_128B
+def int_hexagon_V6_vavgh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavghrnd,VI_ftype_VIVI,2)
+// tag : V6_vavghrnd
+def int_hexagon_V6_vavghrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavghrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavghrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavghrnd_128B
+def int_hexagon_V6_vavghrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgh,VI_ftype_VIVI,2)
+// tag : V6_vnavgh
+def int_hexagon_V6_vnavgh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgh_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgh_128B
+def int_hexagon_V6_vnavgh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgw,VI_ftype_VIVI,2)
+// tag : V6_vavgw
+def int_hexagon_V6_vavgw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgw_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgw_128B
+def int_hexagon_V6_vavgw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgwrnd,VI_ftype_VIVI,2)
+// tag : V6_vavgwrnd
+def int_hexagon_V6_vavgwrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgwrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgwrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgwrnd_128B
+def int_hexagon_V6_vavgwrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgw,VI_ftype_VIVI,2)
+// tag : V6_vnavgw
+def int_hexagon_V6_vnavgw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgw_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgw_128B
+def int_hexagon_V6_vnavgw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffub,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffub
+def int_hexagon_V6_vabsdiffub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffub_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffub_128B
+def int_hexagon_V6_vabsdiffub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffuh,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffuh
+def int_hexagon_V6_vabsdiffuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffuh_128B
+def int_hexagon_V6_vabsdiffuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffh,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffh
+def int_hexagon_V6_vabsdiffh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffh_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffh_128B
+def int_hexagon_V6_vabsdiffh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffw,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffw
+def int_hexagon_V6_vabsdiffw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffw_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffw_128B
+def int_hexagon_V6_vabsdiffw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgub,VI_ftype_VIVI,2)
+// tag : V6_vnavgub
+def int_hexagon_V6_vnavgub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgub_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgub_128B
+def int_hexagon_V6_vnavgub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubh,VD_ftype_VIVI,2)
+// tag : V6_vaddubh
+def int_hexagon_V6_vaddubh :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vaddubh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubh_128B,VD_ftype_VIVI,2)
+// tag : V6_vaddubh_128B
+def int_hexagon_V6_vaddubh_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddubh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububh,VD_ftype_VIVI,2)
+// tag : V6_vsububh
+def int_hexagon_V6_vsububh :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsububh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububh_128B,VD_ftype_VIVI,2)
+// tag : V6_vsububh_128B
+def int_hexagon_V6_vsububh_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsububh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhw,VD_ftype_VIVI,2)
+// tag : V6_vaddhw
+def int_hexagon_V6_vaddhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vaddhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vaddhw_128B
+def int_hexagon_V6_vaddhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhw,VD_ftype_VIVI,2)
+// tag : V6_vsubhw
+def int_hexagon_V6_vsubhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsubhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vsubhw_128B
+def int_hexagon_V6_vsubhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsubhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhw,VD_ftype_VIVI,2)
+// tag : V6_vadduhw
+def int_hexagon_V6_vadduhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vadduhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vadduhw_128B
+def int_hexagon_V6_vadduhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vadduhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhw,VD_ftype_VIVI,2)
+// tag : V6_vsubuhw
+def int_hexagon_V6_vsubuhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsubuhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vsubuhw_128B
+def int_hexagon_V6_vsubuhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vd0,VI_ftype_,0)
+// tag : V6_vd0
+def int_hexagon_V6_vd0 :
+Hexagon_v512_Intrinsic<"HEXAGON_V6_vd0">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vd0_128B,VI_ftype_,0)
+// tag : V6_vd0_128B
+def int_hexagon_V6_vd0_128B :
+Hexagon_v1024_Intrinsic<"HEXAGON_V6_vd0_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbq
+def int_hexagon_V6_vaddbq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddbq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbq_128B
+def int_hexagon_V6_vaddbq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddbq_128B">;
+
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbq
+def int_hexagon_V6_vsubbq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubbq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbq_128B
+def int_hexagon_V6_vsubbq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubbq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbnq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbnq
+def int_hexagon_V6_vaddbnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddbnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbnq_128B
+def int_hexagon_V6_vaddbnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbnq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbnq
+def int_hexagon_V6_vsubbnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubbnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbnq_128B
+def int_hexagon_V6_vsubbnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhq
+def int_hexagon_V6_vaddhq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddhq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhq_128B
+def int_hexagon_V6_vaddhq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddhq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhq
+def int_hexagon_V6_vsubhq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubhq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhq_128B
+def int_hexagon_V6_vsubhq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubhq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhnq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhnq
+def int_hexagon_V6_vaddhnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddhnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhnq_128B
+def int_hexagon_V6_vaddhnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhnq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhnq
+def int_hexagon_V6_vsubhnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubhnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhnq_128B
+def int_hexagon_V6_vsubhnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwq
+def int_hexagon_V6_vaddwq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddwq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwq_128B
+def int_hexagon_V6_vaddwq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddwq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwq
+def int_hexagon_V6_vsubwq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubwq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwq_128B
+def int_hexagon_V6_vsubwq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubwq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwnq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwnq
+def int_hexagon_V6_vaddwnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddwnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwnq_128B
+def int_hexagon_V6_vaddwnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwnq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwnq
+def int_hexagon_V6_vsubwnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubwnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwnq_128B
+def int_hexagon_V6_vsubwnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh,VI_ftype_VI,1)
+// tag : V6_vabsh
+def int_hexagon_V6_vabsh :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh_128B,VI_ftype_VI,1)
+// tag : V6_vabsh_128B
+def int_hexagon_V6_vabsh_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh_sat,VI_ftype_VI,1)
+// tag : V6_vabsh_sat
+def int_hexagon_V6_vabsh_sat :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsh_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh_sat_128B,VI_ftype_VI,1)
+// tag : V6_vabsh_sat_128B
+def int_hexagon_V6_vabsh_sat_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw,VI_ftype_VI,1)
+// tag : V6_vabsw
+def int_hexagon_V6_vabsw :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw_128B,VI_ftype_VI,1)
+// tag : V6_vabsw_128B
+def int_hexagon_V6_vabsw_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw_sat,VI_ftype_VI,1)
+// tag : V6_vabsw_sat
+def int_hexagon_V6_vabsw_sat :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsw_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw_sat_128B,VI_ftype_VI,1)
+// tag : V6_vabsw_sat_128B
+def int_hexagon_V6_vabsw_sat_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv,VD_ftype_VIVI,2)
+// tag : V6_vmpybv
+def int_hexagon_V6_vmpybv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpybv_128B
+def int_hexagon_V6_vmpybv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybv_acc
+def int_hexagon_V6_vmpybv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybv_acc_128B
+def int_hexagon_V6_vmpybv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv,VD_ftype_VIVI,2)
+// tag : V6_vmpyubv
+def int_hexagon_V6_vmpyubv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyubv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyubv_128B
+def int_hexagon_V6_vmpyubv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyubv_acc
+def int_hexagon_V6_vmpyubv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyubv_acc_128B
+def int_hexagon_V6_vmpyubv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv,VD_ftype_VIVI,2)
+// tag : V6_vmpybusv
+def int_hexagon_V6_vmpybusv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybusv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpybusv_128B
+def int_hexagon_V6_vmpybusv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybusv_acc
+def int_hexagon_V6_vmpybusv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybusv_acc_128B
+def int_hexagon_V6_vmpybusv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabusv,VD_ftype_VDVD,2)
+// tag : V6_vmpabusv
+def int_hexagon_V6_vmpabusv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpabusv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabusv_128B,VD_ftype_VDVD,2)
+// tag : V6_vmpabusv_128B
+def int_hexagon_V6_vmpabusv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuuv,VD_ftype_VDVD,2)
+// tag : V6_vmpabuuv
+def int_hexagon_V6_vmpabuuv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpabuuv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuuv_128B,VD_ftype_VDVD,2)
+// tag : V6_vmpabuuv_128B
+def int_hexagon_V6_vmpabuuv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv,VD_ftype_VIVI,2)
+// tag : V6_vmpyhv
+def int_hexagon_V6_vmpyhv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyhv_128B
+def int_hexagon_V6_vmpyhv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhv_acc
+def int_hexagon_V6_vmpyhv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhv_acc_128B
+def int_hexagon_V6_vmpyhv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv,VD_ftype_VIVI,2)
+// tag : V6_vmpyuhv
+def int_hexagon_V6_vmpyuhv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyuhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyuhv_128B
+def int_hexagon_V6_vmpyuhv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyuhv_acc
+def int_hexagon_V6_vmpyuhv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyuhv_acc_128B
+def int_hexagon_V6_vmpyuhv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhvsrs,VI_ftype_VIVI,2)
+// tag : V6_vmpyhvsrs
+def int_hexagon_V6_vmpyhvsrs :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhvsrs_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyhvsrs_128B
+def int_hexagon_V6_vmpyhvsrs_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus,VD_ftype_VIVI,2)
+// tag : V6_vmpyhus
+def int_hexagon_V6_vmpyhus :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyhus_128B
+def int_hexagon_V6_vmpyhus_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhus_acc
+def int_hexagon_V6_vmpyhus_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhus_acc_128B
+def int_hexagon_V6_vmpyhus_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih,VI_ftype_VIVI,2)
+// tag : V6_vmpyih
+def int_hexagon_V6_vmpyih :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyih">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyih_128B
+def int_hexagon_V6_vmpyih_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyih_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyih_acc
+def int_hexagon_V6_vmpyih_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyih_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyih_acc_128B
+def int_hexagon_V6_vmpyih_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyewuh,VI_ftype_VIVI,2)
+// tag : V6_vmpyewuh
+def int_hexagon_V6_vmpyewuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyewuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyewuh_128B
+def int_hexagon_V6_vmpyewuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh
+def int_hexagon_V6_vmpyowh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh_128B
+def int_hexagon_V6_vmpyowh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh_rnd
+def int_hexagon_V6_vmpyowh_rnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh_rnd_128B
+def int_hexagon_V6_vmpyowh_rnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_sacc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_sacc
+def int_hexagon_V6_vmpyowh_sacc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_sacc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_sacc_128B
+def int_hexagon_V6_vmpyowh_sacc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_sacc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_rnd_sacc
+def int_hexagon_V6_vmpyowh_rnd_sacc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_sacc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_rnd_sacc_128B
+def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyieoh,VI_ftype_VIVI,2)
+// tag : V6_vmpyieoh
+def int_hexagon_V6_vmpyieoh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyieoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyieoh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyieoh_128B
+def int_hexagon_V6_vmpyieoh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh,VI_ftype_VIVI,2)
+// tag : V6_vmpyiewuh
+def int_hexagon_V6_vmpyiewuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyiewuh_128B
+def int_hexagon_V6_vmpyiewuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiowh,VI_ftype_VIVI,2)
+// tag : V6_vmpyiowh
+def int_hexagon_V6_vmpyiowh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiowh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiowh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyiowh_128B
+def int_hexagon_V6_vmpyiowh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewh_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewh_acc
+def int_hexagon_V6_vmpyiewh_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewh_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewh_acc_128B
+def int_hexagon_V6_vmpyiewh_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewuh_acc
+def int_hexagon_V6_vmpyiewuh_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewuh_acc_128B
+def int_hexagon_V6_vmpyiewuh_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub,VD_ftype_VISI,2)
+// tag : V6_vmpyub
+def int_hexagon_V6_vmpyub :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub_128B,VD_ftype_VISI,2)
+// tag : V6_vmpyub_128B
+def int_hexagon_V6_vmpyub_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyub_acc
+def int_hexagon_V6_vmpyub_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyub_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyub_acc_128B
+def int_hexagon_V6_vmpyub_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus,VD_ftype_VISI,2)
+// tag : V6_vmpybus
+def int_hexagon_V6_vmpybus :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus_128B,VD_ftype_VISI,2)
+// tag : V6_vmpybus_128B
+def int_hexagon_V6_vmpybus_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpybus_acc
+def int_hexagon_V6_vmpybus_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpybus_acc_128B
+def int_hexagon_V6_vmpybus_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus,VD_ftype_VDSI,2)
+// tag : V6_vmpabus
+def int_hexagon_V6_vmpabus :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus_128B,VD_ftype_VDSI,2)
+// tag : V6_vmpabus_128B
+def int_hexagon_V6_vmpabus_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vmpabus_acc
+def int_hexagon_V6_vmpabus_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vmpabus_acc_128B
+def int_hexagon_V6_vmpabus_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb,VD_ftype_VDSI,2)
+// tag : V6_vmpahb
+def int_hexagon_V6_vmpahb :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpahb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb_128B,VD_ftype_VDSI,2)
+// tag : V6_vmpahb_128B
+def int_hexagon_V6_vmpahb_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpahb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vmpahb_acc
+def int_hexagon_V6_vmpahb_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpahb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vmpahb_acc_128B
+def int_hexagon_V6_vmpahb_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyh,VD_ftype_VISI,2)
+// tag : V6_vmpyh
+def int_hexagon_V6_vmpyh :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyh_128B,VD_ftype_VISI,2)
+// tag : V6_vmpyh_128B
+def int_hexagon_V6_vmpyh_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsat_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyhsat_acc
+def int_hexagon_V6_vmpyhsat_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsat_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyhsat_acc_128B
+def int_hexagon_V6_vmpyhsat_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhss,VI_ftype_VISI,2)
+// tag : V6_vmpyhss
+def int_hexagon_V6_vmpyhss :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyhss">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhss_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyhss_128B
+def int_hexagon_V6_vmpyhss_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsrs,VI_ftype_VISI,2)
+// tag : V6_vmpyhsrs
+def int_hexagon_V6_vmpyhsrs :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyhsrs">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsrs_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyhsrs_128B
+def int_hexagon_V6_vmpyhsrs_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh,VD_ftype_VISI,2)
+// tag : V6_vmpyuh
+def int_hexagon_V6_vmpyuh :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh_128B,VD_ftype_VISI,2)
+// tag : V6_vmpyuh_128B
+def int_hexagon_V6_vmpyuh_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyuh_acc
+def int_hexagon_V6_vmpyuh_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyuh_acc_128B
+def int_hexagon_V6_vmpyuh_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb,VI_ftype_VISI,2)
+// tag : V6_vmpyihb
+def int_hexagon_V6_vmpyihb :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyihb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyihb_128B
+def int_hexagon_V6_vmpyihb_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyihb_acc
+def int_hexagon_V6_vmpyihb_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyihb_acc_128B
+def int_hexagon_V6_vmpyihb_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb,VI_ftype_VISI,2)
+// tag : V6_vmpyiwb
+def int_hexagon_V6_vmpyiwb :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyiwb_128B
+def int_hexagon_V6_vmpyiwb_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwb_acc
+def int_hexagon_V6_vmpyiwb_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwb_acc_128B
+def int_hexagon_V6_vmpyiwb_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh,VI_ftype_VISI,2)
+// tag : V6_vmpyiwh
+def int_hexagon_V6_vmpyiwh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyiwh_128B
+def int_hexagon_V6_vmpyiwh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwh_acc
+def int_hexagon_V6_vmpyiwh_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwh_acc_128B
+def int_hexagon_V6_vmpyiwh_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vand,VI_ftype_VIVI,2)
+// tag : V6_vand
+def int_hexagon_V6_vand :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vand">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vand_128B,VI_ftype_VIVI,2)
+// tag : V6_vand_128B
+def int_hexagon_V6_vand_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vand_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vor,VI_ftype_VIVI,2)
+// tag : V6_vor
+def int_hexagon_V6_vor :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vor_128B,VI_ftype_VIVI,2)
+// tag : V6_vor_128B
+def int_hexagon_V6_vor_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vxor,VI_ftype_VIVI,2)
+// tag : V6_vxor
+def int_hexagon_V6_vxor :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vxor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vxor_128B,VI_ftype_VIVI,2)
+// tag : V6_vxor_128B
+def int_hexagon_V6_vxor_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vxor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnot,VI_ftype_VI,1)
+// tag : V6_vnot
+def int_hexagon_V6_vnot :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnot">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnot_128B,VI_ftype_VI,1)
+// tag : V6_vnot_128B
+def int_hexagon_V6_vnot_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnot_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt,VI_ftype_QVSI,2)
+// tag : V6_vandqrt
+def int_hexagon_V6_vandqrt :
+Hexagon_v512v64ii_Intrinsic<"HEXAGON_V6_vandqrt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt_128B,VI_ftype_QVSI,2)
+// tag : V6_vandqrt_128B
+def int_hexagon_V6_vandqrt_128B :
+Hexagon_v1024v128ii_Intrinsic<"HEXAGON_V6_vandqrt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt_acc,VI_ftype_VIQVSI,3)
+// tag : V6_vandqrt_acc
+def int_hexagon_V6_vandqrt_acc :
+Hexagon_v512v512v64ii_Intrinsic<"HEXAGON_V6_vandqrt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt_acc_128B,VI_ftype_VIQVSI,3)
+// tag : V6_vandqrt_acc_128B
+def int_hexagon_V6_vandqrt_acc_128B :
+Hexagon_v1024v1024v128ii_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt,QV_ftype_VISI,2)
+// tag : V6_vandvrt
+def int_hexagon_V6_vandvrt :
+Hexagon_v64iv512i_Intrinsic<"HEXAGON_V6_vandvrt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt_128B,QV_ftype_VISI,2)
+// tag : V6_vandvrt_128B
+def int_hexagon_V6_vandvrt_128B :
+Hexagon_v128iv1024i_Intrinsic<"HEXAGON_V6_vandvrt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt_acc,QV_ftype_QVVISI,3)
+// tag : V6_vandvrt_acc
+def int_hexagon_V6_vandvrt_acc :
+Hexagon_v64iv64iv512i_Intrinsic<"HEXAGON_V6_vandvrt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt_acc_128B,QV_ftype_QVVISI,3)
+// tag : V6_vandvrt_acc_128B
+def int_hexagon_V6_vandvrt_acc_128B :
+Hexagon_v128iv128iv1024i_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw,QV_ftype_VIVI,2)
+// tag : V6_vgtw
+def int_hexagon_V6_vgtw :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtw_128B
+def int_hexagon_V6_vgtw_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_and
+def int_hexagon_V6_vgtw_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_and_128B
+def int_hexagon_V6_vgtw_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_or
+def int_hexagon_V6_vgtw_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_or_128B
+def int_hexagon_V6_vgtw_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_xor
+def int_hexagon_V6_vgtw_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_xor_128B
+def int_hexagon_V6_vgtw_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw,QV_ftype_VIVI,2)
+// tag : V6_veqw
+def int_hexagon_V6_veqw :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_128B,QV_ftype_VIVI,2)
+// tag : V6_veqw_128B
+def int_hexagon_V6_veqw_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_and,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_and
+def int_hexagon_V6_veqw_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_and_128B
+def int_hexagon_V6_veqw_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_or,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_or
+def int_hexagon_V6_veqw_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_or_128B
+def int_hexagon_V6_veqw_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_xor,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_xor
+def int_hexagon_V6_veqw_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_xor_128B
+def int_hexagon_V6_veqw_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth,QV_ftype_VIVI,2)
+// tag : V6_vgth
+def int_hexagon_V6_vgth :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgth">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_128B,QV_ftype_VIVI,2)
+// tag : V6_vgth_128B
+def int_hexagon_V6_vgth_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_and
+def int_hexagon_V6_vgth_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_and_128B
+def int_hexagon_V6_vgth_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_or
+def int_hexagon_V6_vgth_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_or_128B
+def int_hexagon_V6_vgth_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_xor
+def int_hexagon_V6_vgth_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_xor_128B
+def int_hexagon_V6_vgth_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh,QV_ftype_VIVI,2)
+// tag : V6_veqh
+def int_hexagon_V6_veqh :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_128B,QV_ftype_VIVI,2)
+// tag : V6_veqh_128B
+def int_hexagon_V6_veqh_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_and,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_and
+def int_hexagon_V6_veqh_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_and_128B
+def int_hexagon_V6_veqh_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_or,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_or
+def int_hexagon_V6_veqh_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_or_128B
+def int_hexagon_V6_veqh_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_xor,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_xor
+def int_hexagon_V6_veqh_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_xor_128B
+def int_hexagon_V6_veqh_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb,QV_ftype_VIVI,2)
+// tag : V6_vgtb
+def int_hexagon_V6_vgtb :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtb_128B
+def int_hexagon_V6_vgtb_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_and
+def int_hexagon_V6_vgtb_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_and_128B
+def int_hexagon_V6_vgtb_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_or
+def int_hexagon_V6_vgtb_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_or_128B
+def int_hexagon_V6_vgtb_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_xor
+def int_hexagon_V6_vgtb_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_xor_128B
+def int_hexagon_V6_vgtb_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb,QV_ftype_VIVI,2)
+// tag : V6_veqb
+def int_hexagon_V6_veqb :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_128B,QV_ftype_VIVI,2)
+// tag : V6_veqb_128B
+def int_hexagon_V6_veqb_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_and,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_and
+def int_hexagon_V6_veqb_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_and_128B
+def int_hexagon_V6_veqb_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_or,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_or
+def int_hexagon_V6_veqb_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_or_128B
+def int_hexagon_V6_veqb_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_xor,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_xor
+def int_hexagon_V6_veqb_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_xor_128B
+def int_hexagon_V6_veqb_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw,QV_ftype_VIVI,2)
+// tag : V6_vgtuw
+def int_hexagon_V6_vgtuw :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtuw_128B
+def int_hexagon_V6_vgtuw_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_and
+def int_hexagon_V6_vgtuw_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_and_128B
+def int_hexagon_V6_vgtuw_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_or
+def int_hexagon_V6_vgtuw_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_or_128B
+def int_hexagon_V6_vgtuw_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_xor
+def int_hexagon_V6_vgtuw_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_xor_128B
+def int_hexagon_V6_vgtuw_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh,QV_ftype_VIVI,2)
+// tag : V6_vgtuh
+def int_hexagon_V6_vgtuh :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtuh_128B
+def int_hexagon_V6_vgtuh_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_and
+def int_hexagon_V6_vgtuh_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_and_128B
+def int_hexagon_V6_vgtuh_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_or
+def int_hexagon_V6_vgtuh_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_or_128B
+def int_hexagon_V6_vgtuh_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_xor
+def int_hexagon_V6_vgtuh_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_xor_128B
+def int_hexagon_V6_vgtuh_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub,QV_ftype_VIVI,2)
+// tag : V6_vgtub
+def int_hexagon_V6_vgtub :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtub_128B
+def int_hexagon_V6_vgtub_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_and
+def int_hexagon_V6_vgtub_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_and_128B
+def int_hexagon_V6_vgtub_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_or
+def int_hexagon_V6_vgtub_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_or_128B
+def int_hexagon_V6_vgtub_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_xor
+def int_hexagon_V6_vgtub_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_xor_128B
+def int_hexagon_V6_vgtub_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or,QV_ftype_QVQV,2)
+// tag : V6_pred_or
+def int_hexagon_V6_pred_or :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_or_128B
+def int_hexagon_V6_pred_or_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and,QV_ftype_QVQV,2)
+// tag : V6_pred_and
+def int_hexagon_V6_pred_and :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_and_128B
+def int_hexagon_V6_pred_and_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_not,QV_ftype_QV,1)
+// tag : V6_pred_not
+def int_hexagon_V6_pred_not :
+Hexagon_v64iv64i_Intrinsic<"HEXAGON_V6_pred_not">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_not_128B,QV_ftype_QV,1)
+// tag : V6_pred_not_128B
+def int_hexagon_V6_pred_not_128B :
+Hexagon_v128iv128i_Intrinsic<"HEXAGON_V6_pred_not_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_xor,QV_ftype_QVQV,2)
+// tag : V6_pred_xor
+def int_hexagon_V6_pred_xor :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_xor_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_xor_128B
+def int_hexagon_V6_pred_xor_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and_n,QV_ftype_QVQV,2)
+// tag : V6_pred_and_n
+def int_hexagon_V6_pred_and_n :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_and_n">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and_n_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_and_n_128B
+def int_hexagon_V6_pred_and_n_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or_n,QV_ftype_QVQV,2)
+// tag : V6_pred_or_n
+def int_hexagon_V6_pred_or_n :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_or_n">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or_n_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_or_n_128B
+def int_hexagon_V6_pred_or_n_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_scalar2,QV_ftype_SI,1)
+// tag : V6_pred_scalar2
+def int_hexagon_V6_pred_scalar2 :
+Hexagon_v64ii_Intrinsic<"HEXAGON_V6_pred_scalar2">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_scalar2_128B,QV_ftype_SI,1)
+// tag : V6_pred_scalar2_128B
+def int_hexagon_V6_pred_scalar2_128B :
+Hexagon_v128ii_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmux,VI_ftype_QVVIVI,3)
+// tag : V6_vmux
+def int_hexagon_V6_vmux :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vmux">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmux_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vmux_128B
+def int_hexagon_V6_vmux_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vmux_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vswap,VD_ftype_QVVIVI,3)
+// tag : V6_vswap
+def int_hexagon_V6_vswap :
+Hexagon_v1024v64iv512v512_Intrinsic<"HEXAGON_V6_vswap">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vswap_128B,VD_ftype_QVVIVI,3)
+// tag : V6_vswap_128B
+def int_hexagon_V6_vswap_128B :
+Hexagon_v2048v128iv1024v1024_Intrinsic<"HEXAGON_V6_vswap_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxub,VI_ftype_VIVI,2)
+// tag : V6_vmaxub
+def int_hexagon_V6_vmaxub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxub_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxub_128B
+def int_hexagon_V6_vmaxub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminub,VI_ftype_VIVI,2)
+// tag : V6_vminub
+def int_hexagon_V6_vminub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminub_128B,VI_ftype_VIVI,2)
+// tag : V6_vminub_128B
+def int_hexagon_V6_vminub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxuh,VI_ftype_VIVI,2)
+// tag : V6_vmaxuh
+def int_hexagon_V6_vmaxuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxuh_128B
+def int_hexagon_V6_vmaxuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminuh,VI_ftype_VIVI,2)
+// tag : V6_vminuh
+def int_hexagon_V6_vminuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vminuh_128B
+def int_hexagon_V6_vminuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxh,VI_ftype_VIVI,2)
+// tag : V6_vmaxh
+def int_hexagon_V6_vmaxh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxh_128B
+def int_hexagon_V6_vmaxh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminh,VI_ftype_VIVI,2)
+// tag : V6_vminh
+def int_hexagon_V6_vminh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminh_128B,VI_ftype_VIVI,2)
+// tag : V6_vminh_128B
+def int_hexagon_V6_vminh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxw,VI_ftype_VIVI,2)
+// tag : V6_vmaxw
+def int_hexagon_V6_vmaxw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxw_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxw_128B
+def int_hexagon_V6_vmaxw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminw,VI_ftype_VIVI,2)
+// tag : V6_vminw
+def int_hexagon_V6_vminw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminw_128B,VI_ftype_VIVI,2)
+// tag : V6_vminw_128B
+def int_hexagon_V6_vminw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsathub,VI_ftype_VIVI,2)
+// tag : V6_vsathub
+def int_hexagon_V6_vsathub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsathub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsathub_128B,VI_ftype_VIVI,2)
+// tag : V6_vsathub_128B
+def int_hexagon_V6_vsathub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsathub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsatwh,VI_ftype_VIVI,2)
+// tag : V6_vsatwh
+def int_hexagon_V6_vsatwh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsatwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsatwh_128B,VI_ftype_VIVI,2)
+// tag : V6_vsatwh_128B
+def int_hexagon_V6_vsatwh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsatwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffeb,VI_ftype_VIVI,2)
+// tag : V6_vshuffeb
+def int_hexagon_V6_vshuffeb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshuffeb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffeb_128B,VI_ftype_VIVI,2)
+// tag : V6_vshuffeb_128B
+def int_hexagon_V6_vshuffeb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffob,VI_ftype_VIVI,2)
+// tag : V6_vshuffob
+def int_hexagon_V6_vshuffob :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshuffob">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffob_128B,VI_ftype_VIVI,2)
+// tag : V6_vshuffob_128B
+def int_hexagon_V6_vshuffob_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshuffob_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufeh,VI_ftype_VIVI,2)
+// tag : V6_vshufeh
+def int_hexagon_V6_vshufeh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshufeh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufeh_128B,VI_ftype_VIVI,2)
+// tag : V6_vshufeh_128B
+def int_hexagon_V6_vshufeh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshufeh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoh,VI_ftype_VIVI,2)
+// tag : V6_vshufoh
+def int_hexagon_V6_vshufoh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshufoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoh_128B,VI_ftype_VIVI,2)
+// tag : V6_vshufoh_128B
+def int_hexagon_V6_vshufoh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshufoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffvdd,VD_ftype_VIVISI,3)
+// tag : V6_vshuffvdd
+def int_hexagon_V6_vshuffvdd :
+Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vshuffvdd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffvdd_128B,VD_ftype_VIVISI,3)
+// tag : V6_vshuffvdd_128B
+def int_hexagon_V6_vshuffvdd_128B :
+Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealvdd,VD_ftype_VIVISI,3)
+// tag : V6_vdealvdd
+def int_hexagon_V6_vdealvdd :
+Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vdealvdd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealvdd_128B,VD_ftype_VIVISI,3)
+// tag : V6_vdealvdd_128B
+def int_hexagon_V6_vdealvdd_128B :
+Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeh,VD_ftype_VIVI,2)
+// tag : V6_vshufoeh
+def int_hexagon_V6_vshufoeh :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vshufoeh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeh_128B,VD_ftype_VIVI,2)
+// tag : V6_vshufoeh_128B
+def int_hexagon_V6_vshufoeh_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeb,VD_ftype_VIVI,2)
+// tag : V6_vshufoeb
+def int_hexagon_V6_vshufoeb :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vshufoeb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeb_128B,VD_ftype_VIVI,2)
+// tag : V6_vshufoeb_128B
+def int_hexagon_V6_vshufoeb_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealh,VI_ftype_VI,1)
+// tag : V6_vdealh
+def int_hexagon_V6_vdealh :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vdealh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealh_128B,VI_ftype_VI,1)
+// tag : V6_vdealh_128B
+def int_hexagon_V6_vdealh_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vdealh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb,VI_ftype_VI,1)
+// tag : V6_vdealb
+def int_hexagon_V6_vdealb :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vdealb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb_128B,VI_ftype_VI,1)
+// tag : V6_vdealb_128B
+def int_hexagon_V6_vdealb_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vdealb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb4w,VI_ftype_VIVI,2)
+// tag : V6_vdealb4w
+def int_hexagon_V6_vdealb4w :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdealb4w">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb4w_128B,VI_ftype_VIVI,2)
+// tag : V6_vdealb4w_128B
+def int_hexagon_V6_vdealb4w_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffh,VI_ftype_VI,1)
+// tag : V6_vshuffh
+def int_hexagon_V6_vshuffh :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vshuffh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffh_128B,VI_ftype_VI,1)
+// tag : V6_vshuffh_128B
+def int_hexagon_V6_vshuffh_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vshuffh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffb,VI_ftype_VI,1)
+// tag : V6_vshuffb
+def int_hexagon_V6_vshuffb :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vshuffb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffb_128B,VI_ftype_VI,1)
+// tag : V6_vshuffb_128B
+def int_hexagon_V6_vshuffb_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vshuffb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_extractw,SI_ftype_VISI,2)
+// tag : V6_extractw
+def int_hexagon_V6_extractw :
+Hexagon_iv512i_Intrinsic<"HEXAGON_V6_extractw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_extractw_128B,SI_ftype_VISI,2)
+// tag : V6_extractw_128B
+def int_hexagon_V6_extractw_128B :
+Hexagon_iv1024i_Intrinsic<"HEXAGON_V6_extractw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vinsertwr,VI_ftype_VISI,2)
+// tag : V6_vinsertwr
+def int_hexagon_V6_vinsertwr :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vinsertwr">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vinsertwr_128B,VI_ftype_VISI,2)
+// tag : V6_vinsertwr_128B
+def int_hexagon_V6_vinsertwr_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplatw,VI_ftype_SI,1)
+// tag : V6_lvsplatw
+def int_hexagon_V6_lvsplatw :
+Hexagon_v512i_Intrinsic<"HEXAGON_V6_lvsplatw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplatw_128B,VI_ftype_SI,1)
+// tag : V6_lvsplatw_128B
+def int_hexagon_V6_lvsplatw_128B :
+Hexagon_v1024i_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vassign,VI_ftype_VI,1)
+// tag : V6_vassign
+def int_hexagon_V6_vassign :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vassign">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vassign_128B,VI_ftype_VI,1)
+// tag : V6_vassign_128B
+def int_hexagon_V6_vassign_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vassign_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcombine,VD_ftype_VIVI,2)
+// tag : V6_vcombine
+def int_hexagon_V6_vcombine :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vcombine">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcombine_128B,VD_ftype_VIVI,2)
+// tag : V6_vcombine_128B
+def int_hexagon_V6_vcombine_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vcombine_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdelta,VI_ftype_VIVI,2)
+// tag : V6_vdelta
+def int_hexagon_V6_vdelta :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdelta">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdelta_128B,VI_ftype_VIVI,2)
+// tag : V6_vdelta_128B
+def int_hexagon_V6_vdelta_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdelta_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrdelta,VI_ftype_VIVI,2)
+// tag : V6_vrdelta
+def int_hexagon_V6_vrdelta :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrdelta">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrdelta_128B,VI_ftype_VIVI,2)
+// tag : V6_vrdelta_128B
+def int_hexagon_V6_vrdelta_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrdelta_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0w,VI_ftype_VI,1)
+// tag : V6_vcl0w
+def int_hexagon_V6_vcl0w :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vcl0w">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0w_128B,VI_ftype_VI,1)
+// tag : V6_vcl0w_128B
+def int_hexagon_V6_vcl0w_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vcl0w_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0h,VI_ftype_VI,1)
+// tag : V6_vcl0h
+def int_hexagon_V6_vcl0h :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vcl0h">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0h_128B,VI_ftype_VI,1)
+// tag : V6_vcl0h_128B
+def int_hexagon_V6_vcl0h_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vcl0h_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamtw,VI_ftype_VI,1)
+// tag : V6_vnormamtw
+def int_hexagon_V6_vnormamtw :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnormamtw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamtw_128B,VI_ftype_VI,1)
+// tag : V6_vnormamtw_128B
+def int_hexagon_V6_vnormamtw_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamth,VI_ftype_VI,1)
+// tag : V6_vnormamth
+def int_hexagon_V6_vnormamth :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnormamth">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamth_128B,VI_ftype_VI,1)
+// tag : V6_vnormamth_128B
+def int_hexagon_V6_vnormamth_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnormamth_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpopcounth,VI_ftype_VI,1)
+// tag : V6_vpopcounth
+def int_hexagon_V6_vpopcounth :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vpopcounth">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpopcounth_128B,VI_ftype_VI,1)
+// tag : V6_vpopcounth_128B
+def int_hexagon_V6_vpopcounth_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvb
+def int_hexagon_V6_vlutvvb :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvb_128B
+def int_hexagon_V6_vlutvvb_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracc,VI_ftype_VIVIVISI,4)
+// tag : V6_vlutvvb_oracc
+def int_hexagon_V6_vlutvvb_oracc :
+Hexagon_v512v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracc_128B,VI_ftype_VIVIVISI,4)
+// tag : V6_vlutvvb_oracc_128B
+def int_hexagon_V6_vlutvvb_oracc_128B :
+Hexagon_v1024v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwh
+def int_hexagon_V6_vlutvwh :
+Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_128B,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwh_128B
+def int_hexagon_V6_vlutvwh_128B :
+Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracc,VD_ftype_VDVIVISI,4)
+// tag : V6_vlutvwh_oracc
+def int_hexagon_V6_vlutvwh_oracc :
+Hexagon_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracc_128B,VD_ftype_VDVIVISI,4)
+// tag : V6_vlutvwh_oracc_128B
+def int_hexagon_V6_vlutvwh_oracc_128B :
+Hexagon_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;
+
+//
+// Masked vector stores
+//
+def int_hexagon_V6_vS32b_qpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai">;
+
+def int_hexagon_V6_vS32b_nqpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai">;
+
+def int_hexagon_V6_vS32b_nt_qpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai">;
+
+def int_hexagon_V6_vS32b_nt_nqpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai">;
+
+def int_hexagon_V6_vS32b_qpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai_128B">;
+
+def int_hexagon_V6_vS32b_nqpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai_128B">;
+
+def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai_128B">;
+
+def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai_128B">;
+
+def int_hexagon_V6_vmaskedstoreq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstoreq">;
+
+def int_hexagon_V6_vmaskedstorenq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorenq">;
+
+def int_hexagon_V6_vmaskedstorentq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentq">;
+
+def int_hexagon_V6_vmaskedstorentnq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentnq">;
+
+def int_hexagon_V6_vmaskedstoreq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstoreq_128B">;
+
+def int_hexagon_V6_vmaskedstorenq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorenq_128B">;
+
+def int_hexagon_V6_vmaskedstorentq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentq_128B">;
+
+def int_hexagon_V6_vmaskedstorentnq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentnq_128B">;
+
+multiclass Hexagon_custom_circ_ld_Intrinsic<LLVMType ElTy> {
+  def NAME#_pci : Hexagon_NonGCC_Intrinsic<
+    [ElTy, llvm_ptr_ty],
+    [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
+    [IntrArgMemOnly, NoCapture<3>]>;
+  def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
+    [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_ptr_ty],
+    [IntrArgMemOnly, NoCapture<2>]>;
+}
+
+defm int_hexagon_L2_loadrub : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadrb : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadruh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadrh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadri : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadrd : Hexagon_custom_circ_ld_Intrinsic<llvm_i64_ty>;
+
+multiclass Hexagon_custom_circ_st_Intrinsic<LLVMType ElTy> {
+  def NAME#_pci : Hexagon_NonGCC_Intrinsic<
+    [llvm_ptr_ty],
+    [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
+    [IntrArgMemOnly, NoCapture<4>]>;
+  def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
+    [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
+    [IntrArgMemOnly, NoCapture<3>]>;
+}
+
+defm int_hexagon_S2_storerb : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storerh : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storerf : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storeri : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storerd : Hexagon_custom_circ_st_Intrinsic<llvm_i64_ty>;
+
+// The front-end emits the intrinsic call with only two arguments. The third
+// argument from the builtin is already used by front-end to write to memory
+// by generating a store.
+class Hexagon_custom_brev_ld_Intrinsic<LLVMType ElTy>
+ : Hexagon_NonGCC_Intrinsic<
+    [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
+    [IntrReadMem]>;
+
+def int_hexagon_L2_loadrub_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadrb_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadruh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadrh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadri_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadrd_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i64_ty>;
+
+def int_hexagon_S2_storerb_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
+def int_hexagon_S2_storerh_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
+def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
+def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
+def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;
+
+
+///
+/// HexagonV62 intrinsics
+///
+
+//
+// Hexagon_LLiLLiLLi_Intrinsic<string GCCIntSuffix>
+// tag : M6_vabsdiffb
+class Hexagon_LLiLLiLLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_LLii_Intrinsic<string GCCIntSuffix>
+// tag : S6_vsplatrbp
+class Hexagon_LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlsrb
+class Hexagon_V62_v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlsrb_128B
+class Hexagon_V62_v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vasrwuhrndsat
+class Hexagon_V62_v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vasrwuhrndsat_128B
+class Hexagon_V62_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrounduwuh
+class Hexagon_V62_v512v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrounduwuh_128B
+class Hexagon_V62_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+// tag : V6_vadduwsat_dv_128B
+class Hexagon_V62_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddhw_acc
+class Hexagon_V62_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddhw_acc_128B
+class Hexagon_V62_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyewuh_64
+class Hexagon_V62_v1024v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyewuh_64_128B
+class Hexagon_V62_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpauhb_128B
+class Hexagon_V62_v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpauhb_acc_128B
+class Hexagon_V62_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandnqrt
+class Hexagon_V62_v512v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandnqrt_128B
+class Hexagon_V62_v1024v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v512v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandnqrt_acc
+class Hexagon_V62_v512v512v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandnqrt_acc_128B
+class Hexagon_V62_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v64iv512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvqv
+class Hexagon_V62_v512v64iv512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v128iv1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvqv_128B
+class Hexagon_V62_v1024v128iv1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_scalar2v2
+class Hexagon_V62_v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_scalar2v2_128B
+class Hexagon_V62_v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+// tag : V6_shuffeqw
+class Hexagon_V62_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
+// tag : V6_shuffeqw_128B
+class Hexagon_V62_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_lvsplath
+class Hexagon_V62_v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_lvsplath_128B
+class Hexagon_V62_v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvvb_oracci
+class Hexagon_V62_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvvb_oracci_128B
+class Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwhi
+class Hexagon_V62_v1024v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwhi_128B
+class Hexagon_V62_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwh_oracci
+class Hexagon_V62_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwh_oracci_128B
+class Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+// Hexagon_v512v64iv512v512v64i_Intrinsic<string GCCIntSuffix>
+// tag: V6_vaddcarry
+class Hexagon_v512v64iv512v512v64i_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty, llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v512i1_ty],
+                          [IntrNoMem]>;
+
+// Hexagon_v1024v128iv1024v1024v128i_Intrinsic<string GCCIntSuffix>
+// tag: V6_vaddcarry_128B
+class Hexagon_v1024v128iv1024v1024v128i_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty, llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v1024i1_ty],
+                          [IntrNoMem]>;
+
+
+//
+// BUILTIN_INFO(HEXAGON.M6_vabsdiffb,DI_ftype_DIDI,2)
+// tag : M6_vabsdiffb
+def int_hexagon_M6_vabsdiffb :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffb">;
+
+//
+// BUILTIN_INFO(HEXAGON.M6_vabsdiffub,DI_ftype_DIDI,2)
+// tag : M6_vabsdiffub
+def int_hexagon_M6_vabsdiffub :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffub">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_vtrunehb_ppp,DI_ftype_DIDI,2)
+// tag : S6_vtrunehb_ppp
+def int_hexagon_S6_vtrunehb_ppp :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_vtrunohb_ppp,DI_ftype_DIDI,2)
+// tag : S6_vtrunohb_ppp
+def int_hexagon_S6_vtrunohb_ppp :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_vsplatrbp,DI_ftype_SI,1)
+// tag : S6_vsplatrbp
+def int_hexagon_S6_vsplatrbp :
+Hexagon_LLii_Intrinsic<"HEXAGON_S6_vsplatrbp">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrb,VI_ftype_VISI,2)
+// tag : V6_vlsrb
+def int_hexagon_V6_vlsrb :
+Hexagon_V62_v512v512i_Intrinsic<"HEXAGON_V6_vlsrb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrb_128B,VI_ftype_VISI,2)
+// tag : V6_vlsrb_128B
+def int_hexagon_V6_vlsrb_128B :
+Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwuhrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwuhrndsat
+def int_hexagon_V6_vasrwuhrndsat :
+Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwuhrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwuhrndsat_128B
+def int_hexagon_V6_vasrwuhrndsat_128B :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruwuhrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasruwuhrndsat
+def int_hexagon_V6_vasruwuhrndsat :
+Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruwuhrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasruwuhrndsat_128B
+def int_hexagon_V6_vasruwuhrndsat_128B :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhbsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhbsat
+def int_hexagon_V6_vasrhbsat :
+Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhbsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhbsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhbsat_128B
+def int_hexagon_V6_vasrhbsat_128B :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrounduwuh,VI_ftype_VIVI,2)
+// tag : V6_vrounduwuh
+def int_hexagon_V6_vrounduwuh :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vrounduwuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrounduwuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vrounduwuh_128B
+def int_hexagon_V6_vrounduwuh_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrounduhub,VI_ftype_VIVI,2)
+// tag : V6_vrounduhub
+def int_hexagon_V6_vrounduhub :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vrounduhub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrounduhub_128B,VI_ftype_VIVI,2)
+// tag : V6_vrounduhub_128B
+def int_hexagon_V6_vrounduhub_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrounduhub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduwsat,VI_ftype_VIVI,2)
+// tag : V6_vadduwsat
+def int_hexagon_V6_vadduwsat :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vadduwsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduwsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vadduwsat_128B
+def int_hexagon_V6_vadduwsat_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduwsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduwsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vadduwsat_dv
+def int_hexagon_V6_vadduwsat_dv :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduwsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduwsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vadduwsat_dv_128B
+def int_hexagon_V6_vadduwsat_dv_128B :
+Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuwsat,VI_ftype_VIVI,2)
+// tag : V6_vsubuwsat
+def int_hexagon_V6_vsubuwsat :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubuwsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubuwsat_128B
+def int_hexagon_V6_vsubuwsat_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubuwsat_dv
+def int_hexagon_V6_vsubuwsat_dv :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubuwsat_dv_128B
+def int_hexagon_V6_vsubuwsat_dv_128B :
+Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbsat,VI_ftype_VIVI,2)
+// tag : V6_vaddbsat
+def int_hexagon_V6_vaddbsat :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddbsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddbsat_128B
+def int_hexagon_V6_vaddbsat_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddbsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddbsat_dv
+def int_hexagon_V6_vaddbsat_dv :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddbsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddbsat_dv_128B
+def int_hexagon_V6_vaddbsat_dv_128B :
+Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbsat,VI_ftype_VIVI,2)
+// tag : V6_vsubbsat
+def int_hexagon_V6_vsubbsat :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubbsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubbsat_128B
+def int_hexagon_V6_vsubbsat_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubbsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubbsat_dv
+def int_hexagon_V6_vsubbsat_dv :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubbsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubbsat_dv_128B
+def int_hexagon_V6_vsubbsat_dv_128B :
+Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddububb_sat,VI_ftype_VIVI,2)
+// tag : V6_vaddububb_sat
+def int_hexagon_V6_vaddububb_sat :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddububb_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddububb_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddububb_sat_128B
+def int_hexagon_V6_vaddububb_sat_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubububb_sat,VI_ftype_VIVI,2)
+// tag : V6_vsubububb_sat
+def int_hexagon_V6_vsubububb_sat :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubububb_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubububb_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubububb_sat_128B
+def int_hexagon_V6_vsubububb_sat_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhw_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vaddhw_acc
+def int_hexagon_V6_vaddhw_acc :
+Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vaddhw_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhw_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vaddhw_acc_128B
+def int_hexagon_V6_vaddhw_acc_128B :
+Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhw_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vadduhw_acc
+def int_hexagon_V6_vadduhw_acc :
+Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vadduhw_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhw_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vadduhw_acc_128B
+def int_hexagon_V6_vadduhw_acc_128B :
+Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubh_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vaddubh_acc
+def int_hexagon_V6_vaddubh_acc :
+Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vaddubh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubh_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vaddubh_acc_128B
+def int_hexagon_V6_vaddubh_acc_128B :
+Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_64,VD_ftype_VIVI,2)
+// tag : V6_vmpyewuh_64
+def int_hexagon_V6_vmpyewuh_64 :
+Hexagon_V62_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyewuh_64">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_64_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyewuh_64_128B
+def int_hexagon_V6_vmpyewuh_64_128B :
+Hexagon_V62_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_64_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyowh_64_acc
+def int_hexagon_V6_vmpyowh_64_acc :
+Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_64_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyowh_64_acc_128B
+def int_hexagon_V6_vmpyowh_64_acc_128B :
+Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpauhb,VD_ftype_VDSI,2)
+// tag : V6_vmpauhb
+def int_hexagon_V6_vmpauhb :
+Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpauhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpauhb_128B,VD_ftype_VDSI,2)
+// tag : V6_vmpauhb_128B
+def int_hexagon_V6_vmpauhb_128B :
+Hexagon_V62_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpauhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpauhb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vmpauhb_acc
+def int_hexagon_V6_vmpauhb_acc :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpauhb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpauhb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vmpauhb_acc_128B
+def int_hexagon_V6_vmpauhb_acc_128B :
+Hexagon_V62_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwub,VI_ftype_VISI,2)
+// tag : V6_vmpyiwub
+def int_hexagon_V6_vmpyiwub :
+Hexagon_V62_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyiwub_128B
+def int_hexagon_V6_vmpyiwub_128B :
+Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwub_acc
+def int_hexagon_V6_vmpyiwub_acc :
+Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwub_acc_128B
+def int_hexagon_V6_vmpyiwub_acc_128B :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandnqrt,VI_ftype_QVSI,2)
+// tag : V6_vandnqrt
+def int_hexagon_V6_vandnqrt :
+Hexagon_V62_v512v64ii_Intrinsic<"HEXAGON_V6_vandnqrt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandnqrt_128B,VI_ftype_QVSI,2)
+// tag : V6_vandnqrt_128B
+def int_hexagon_V6_vandnqrt_128B :
+Hexagon_V62_v1024v128ii_Intrinsic<"HEXAGON_V6_vandnqrt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandnqrt_acc,VI_ftype_VIQVSI,3)
+// tag : V6_vandnqrt_acc
+def int_hexagon_V6_vandnqrt_acc :
+Hexagon_V62_v512v512v64ii_Intrinsic<"HEXAGON_V6_vandnqrt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandnqrt_acc_128B,VI_ftype_VIQVSI,3)
+// tag : V6_vandnqrt_acc_128B
+def int_hexagon_V6_vandnqrt_acc_128B :
+Hexagon_V62_v1024v1024v128ii_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvqv,VI_ftype_QVVI,2)
+// tag : V6_vandvqv
+def int_hexagon_V6_vandvqv :
+Hexagon_V62_v512v64iv512_Intrinsic<"HEXAGON_V6_vandvqv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvqv_128B,VI_ftype_QVVI,2)
+// tag : V6_vandvqv_128B
+def int_hexagon_V6_vandvqv_128B :
+Hexagon_V62_v1024v128iv1024_Intrinsic<"HEXAGON_V6_vandvqv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvnqv,VI_ftype_QVVI,2)
+// tag : V6_vandvnqv
+def int_hexagon_V6_vandvnqv :
+Hexagon_V62_v512v64iv512_Intrinsic<"HEXAGON_V6_vandvnqv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvnqv_128B,VI_ftype_QVVI,2)
+// tag : V6_vandvnqv_128B
+def int_hexagon_V6_vandvnqv_128B :
+Hexagon_V62_v1024v128iv1024_Intrinsic<"HEXAGON_V6_vandvnqv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_scalar2v2,QV_ftype_SI,1)
+// tag : V6_pred_scalar2v2
+def int_hexagon_V6_pred_scalar2v2 :
+Hexagon_V62_v64ii_Intrinsic<"HEXAGON_V6_pred_scalar2v2">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_scalar2v2_128B,QV_ftype_SI,1)
+// tag : V6_pred_scalar2v2_128B
+def int_hexagon_V6_pred_scalar2v2_128B :
+Hexagon_V62_v128ii_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_shuffeqw,QV_ftype_QVQV,2)
+// tag : V6_shuffeqw
+def int_hexagon_V6_shuffeqw :
+Hexagon_V62_v64iv64iv64i_Intrinsic<"HEXAGON_V6_shuffeqw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_shuffeqw_128B,QV_ftype_QVQV,2)
+// tag : V6_shuffeqw_128B
+def int_hexagon_V6_shuffeqw_128B :
+Hexagon_V62_v128iv128iv128i_Intrinsic<"HEXAGON_V6_shuffeqw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_shuffeqh,QV_ftype_QVQV,2)
+// tag : V6_shuffeqh
+def int_hexagon_V6_shuffeqh :
+Hexagon_V62_v64iv64iv64i_Intrinsic<"HEXAGON_V6_shuffeqh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_shuffeqh_128B,QV_ftype_QVQV,2)
+// tag : V6_shuffeqh_128B
+def int_hexagon_V6_shuffeqh_128B :
+Hexagon_V62_v128iv128iv128i_Intrinsic<"HEXAGON_V6_shuffeqh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxb,VI_ftype_VIVI,2)
+// tag : V6_vmaxb
+def int_hexagon_V6_vmaxb :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxb_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxb_128B
+def int_hexagon_V6_vmaxb_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminb,VI_ftype_VIVI,2)
+// tag : V6_vminb
+def int_hexagon_V6_vminb :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vminb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminb_128B,VI_ftype_VIVI,2)
+// tag : V6_vminb_128B
+def int_hexagon_V6_vminb_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsatuwuh,VI_ftype_VIVI,2)
+// tag : V6_vsatuwuh
+def int_hexagon_V6_vsatuwuh :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsatuwuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsatuwuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vsatuwuh_128B
+def int_hexagon_V6_vsatuwuh_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplath,VI_ftype_SI,1)
+// tag : V6_lvsplath
+def int_hexagon_V6_lvsplath :
+Hexagon_V62_v512i_Intrinsic<"HEXAGON_V6_lvsplath">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplath_128B,VI_ftype_SI,1)
+// tag : V6_lvsplath_128B
+def int_hexagon_V6_lvsplath_128B :
+Hexagon_V62_v1024i_Intrinsic<"HEXAGON_V6_lvsplath_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplatb,VI_ftype_SI,1)
+// tag : V6_lvsplatb
+def int_hexagon_V6_lvsplatb :
+Hexagon_V62_v512i_Intrinsic<"HEXAGON_V6_lvsplatb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplatb_128B,VI_ftype_SI,1)
+// tag : V6_lvsplatb_128B
+def int_hexagon_V6_lvsplatb_128B :
+Hexagon_V62_v1024i_Intrinsic<"HEXAGON_V6_lvsplatb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddclbw,VI_ftype_VIVI,2)
+// tag : V6_vaddclbw
+def int_hexagon_V6_vaddclbw :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddclbw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddclbw_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddclbw_128B
+def int_hexagon_V6_vaddclbw_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddclbw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddclbh,VI_ftype_VIVI,2)
+// tag : V6_vaddclbh
+def int_hexagon_V6_vaddclbh :
+Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddclbh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddclbh_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddclbh_128B
+def int_hexagon_V6_vaddclbh_128B :
+Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddclbh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvbi,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvbi
+def int_hexagon_V6_vlutvvbi :
+Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvbi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvbi_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvbi_128B
+def int_hexagon_V6_vlutvvbi_128B :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvbi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracci,VI_ftype_VIVIVISI,4)
+// tag : V6_vlutvvb_oracci
+def int_hexagon_V6_vlutvvb_oracci :
+Hexagon_V62_v512v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_oracci">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracci_128B,VI_ftype_VIVIVISI,4)
+// tag : V6_vlutvvb_oracci_128B
+def int_hexagon_V6_vlutvvb_oracci_128B :
+Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwhi,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwhi
+def int_hexagon_V6_vlutvwhi :
+Hexagon_V62_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwhi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwhi_128B,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwhi_128B
+def int_hexagon_V6_vlutvwhi_128B :
+Hexagon_V62_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwhi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracci,VD_ftype_VDVIVISI,4)
+// tag : V6_vlutvwh_oracci
+def int_hexagon_V6_vlutvwh_oracci :
+Hexagon_V62_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracci">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracci_128B,VD_ftype_VDVIVISI,4)
+// tag : V6_vlutvwh_oracci_128B
+def int_hexagon_V6_vlutvwh_oracci_128B :
+Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_nm,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvb_nm
+def int_hexagon_V6_vlutvvb_nm :
+Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_nm">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_nm_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvb_nm_128B
+def int_hexagon_V6_vlutvvb_nm_128B :
+Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_nm,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwh_nm
+def int_hexagon_V6_vlutvwh_nm :
+Hexagon_V62_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_nm">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_nm_128B,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwh_nm_128B
+def int_hexagon_V6_vlutvwh_nm_128B :
+Hexagon_V62_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddcarry,VI_ftype_VIVIQV,3)
+// tag: V6_vaddcarry
+def int_hexagon_V6_vaddcarry :
+Hexagon_v512v64iv512v512v64i_Intrinsic<"HEXAGON_v6_vaddcarry">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddcarry_128B,VI_ftype_VIVIQV,3)
+// tag: V6_vaddcarry_128B
+def int_hexagon_V6_vaddcarry_128B :
+Hexagon_v1024v128iv1024v1024v128i_Intrinsic<"HEXAGON_v6_vaddcarry_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubcarry,VI_ftype_VIVIQV,3)
+// tag: V6_vsubcarry
+def int_hexagon_V6_vsubcarry :
+Hexagon_v512v64iv512v512v64i_Intrinsic<"HEXAGON_v6_vsubcarry">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubcarry_128B,VI_ftype_VIVIQV,3)
+// tag: V6_vsubcarry_128B
+def int_hexagon_V6_vsubcarry_128B :
+Hexagon_v1024v128iv1024v1024v128i_Intrinsic<"HEXAGON_v6_vsubcarry_128B">;
+
+
+///
+/// HexagonV65 intrinsics
+///
+
+//
+// Hexagon_V65_iLLiLLi_Intrinsic<string GCCIntSuffix>
+// tag : A6_vcmpbeq_notany
+class Hexagon_V65_iLLiLLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v512LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyub_rtt
+class Hexagon_V65_v1024v512LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v2048v1024LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyub_rtt_128B
+class Hexagon_V65_v2048v1024LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024v512LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyub_rtt_acc
+class Hexagon_V65_v1024v1024v512LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v2048v2048v1024LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyub_rtt_acc_128B
+class Hexagon_V65_v2048v2048v1024LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vasruwuhsat
+class Hexagon_V65_v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vasruwuhsat_128B
+class Hexagon_V65_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vavguw
+class Hexagon_V65_v512v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vavguw_128B
+class Hexagon_V65_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vabsb
+class Hexagon_V65_v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vabsb_128B
+class Hexagon_V65_v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpabuu
+class Hexagon_V65_v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpabuu_128B
+class Hexagon_V65_v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpabuu_acc_128B
+class Hexagon_V65_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyh_acc
+class Hexagon_V65_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyh_acc_128B
+class Hexagon_V65_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v512v512LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpahhsat
+class Hexagon_V65_v512v512v512LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024v1024LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpahhsat_128B
+class Hexagon_V65_v1024v1024v1024LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v512LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlut4
+class Hexagon_V65_v512v512LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v1024LLi_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlut4_128B
+class Hexagon_V65_v1024v1024LLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyuhe
+class Hexagon_V65_v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v512v64i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vprefixqb
+class Hexagon_V65_v512v64i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i32_ty], [llvm_v512i1_ty],
+                          [IntrNoMem]>;
+
+//
+// Hexagon_V65_v1024v128i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vprefixqb_128B
+class Hexagon_V65_v1024v128i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v32i32_ty], [llvm_v1024i1_ty],
+                          [IntrNoMem]>;
+
+//
+// BUILTIN_INFO(HEXAGON.A6_vcmpbeq_notany,QI_ftype_DIDI,2)
+// tag : A6_vcmpbeq_notany
+def int_hexagon_A6_vcmpbeq_notany :
+Hexagon_V65_iLLiLLi_Intrinsic<"HEXAGON_A6_vcmpbeq_notany">;
+
+//
+// BUILTIN_INFO(HEXAGON.A6_vcmpbeq_notany_128B,QI_ftype_DIDI,2)
+// tag : A6_vcmpbeq_notany_128B
+def int_hexagon_A6_vcmpbeq_notany_128B :
+Hexagon_V65_iLLiLLi_Intrinsic<"HEXAGON_A6_vcmpbeq_notany_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt,VD_ftype_VIDI,2)
+// tag : V6_vrmpyub_rtt
+def int_hexagon_V6_vrmpyub_rtt :
+Hexagon_V65_v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt_128B,VD_ftype_VIDI,2)
+// tag : V6_vrmpyub_rtt_128B
+def int_hexagon_V6_vrmpyub_rtt_128B :
+Hexagon_V65_v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt_acc,VD_ftype_VDVIDI,3)
+// tag : V6_vrmpyub_rtt_acc
+def int_hexagon_V6_vrmpyub_rtt_acc :
+Hexagon_V65_v1024v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt_acc_128B,VD_ftype_VDVIDI,3)
+// tag : V6_vrmpyub_rtt_acc_128B
+def int_hexagon_V6_vrmpyub_rtt_acc_128B :
+Hexagon_V65_v2048v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt,VD_ftype_VIDI,2)
+// tag : V6_vrmpybub_rtt
+def int_hexagon_V6_vrmpybub_rtt :
+Hexagon_V65_v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt_128B,VD_ftype_VIDI,2)
+// tag : V6_vrmpybub_rtt_128B
+def int_hexagon_V6_vrmpybub_rtt_128B :
+Hexagon_V65_v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt_acc,VD_ftype_VDVIDI,3)
+// tag : V6_vrmpybub_rtt_acc
+def int_hexagon_V6_vrmpybub_rtt_acc :
+Hexagon_V65_v1024v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt_acc_128B,VD_ftype_VDVIDI,3)
+// tag : V6_vrmpybub_rtt_acc_128B
+def int_hexagon_V6_vrmpybub_rtt_acc_128B :
+Hexagon_V65_v2048v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruwuhsat,VI_ftype_VIVISI,3)
+// tag : V6_vasruwuhsat
+def int_hexagon_V6_vasruwuhsat :
+Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruwuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruwuhsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasruwuhsat_128B
+def int_hexagon_V6_vasruwuhsat_128B :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruwuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruhubsat,VI_ftype_VIVISI,3)
+// tag : V6_vasruhubsat
+def int_hexagon_V6_vasruhubsat :
+Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruhubsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruhubsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasruhubsat_128B
+def int_hexagon_V6_vasruhubsat_128B :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruhubsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruhubrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasruhubrndsat
+def int_hexagon_V6_vasruhubrndsat :
+Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruhubrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasruhubrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasruhubrndsat_128B
+def int_hexagon_V6_vasruhubrndsat_128B :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruhubrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslh_acc,VI_ftype_VIVISI,3)
+// tag : V6_vaslh_acc
+def int_hexagon_V6_vaslh_acc :
+Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vaslh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslh_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vaslh_acc_128B
+def int_hexagon_V6_vaslh_acc_128B :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vaslh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrh_acc,VI_ftype_VIVISI,3)
+// tag : V6_vasrh_acc
+def int_hexagon_V6_vasrh_acc :
+Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrh_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrh_acc_128B
+def int_hexagon_V6_vasrh_acc_128B :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguw,VI_ftype_VIVI,2)
+// tag : V6_vavguw
+def int_hexagon_V6_vavguw :
+Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavguw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguw_128B,VI_ftype_VIVI,2)
+// tag : V6_vavguw_128B
+def int_hexagon_V6_vavguw_128B :
+Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguwrnd,VI_ftype_VIVI,2)
+// tag : V6_vavguwrnd
+def int_hexagon_V6_vavguwrnd :
+Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavguwrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguwrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavguwrnd_128B
+def int_hexagon_V6_vavguwrnd_128B :
+Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguwrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgb,VI_ftype_VIVI,2)
+// tag : V6_vavgb
+def int_hexagon_V6_vavgb :
+Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavgb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgb_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgb_128B
+def int_hexagon_V6_vavgb_128B :
+Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgbrnd,VI_ftype_VIVI,2)
+// tag : V6_vavgbrnd
+def int_hexagon_V6_vavgbrnd :
+Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavgbrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgbrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgbrnd_128B
+def int_hexagon_V6_vavgbrnd_128B :
+Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgbrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgb,VI_ftype_VIVI,2)
+// tag : V6_vnavgb
+def int_hexagon_V6_vnavgb :
+Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgb_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgb_128B
+def int_hexagon_V6_vnavgb_128B :
+Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsb,VI_ftype_VI,1)
+// tag : V6_vabsb
+def int_hexagon_V6_vabsb :
+Hexagon_V65_v512v512_Intrinsic<"HEXAGON_V6_vabsb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsb_128B,VI_ftype_VI,1)
+// tag : V6_vabsb_128B
+def int_hexagon_V6_vabsb_128B :
+Hexagon_V65_v1024v1024_Intrinsic<"HEXAGON_V6_vabsb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsb_sat,VI_ftype_VI,1)
+// tag : V6_vabsb_sat
+def int_hexagon_V6_vabsb_sat :
+Hexagon_V65_v512v512_Intrinsic<"HEXAGON_V6_vabsb_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsb_sat_128B,VI_ftype_VI,1)
+// tag : V6_vabsb_sat_128B
+def int_hexagon_V6_vabsb_sat_128B :
+Hexagon_V65_v1024v1024_Intrinsic<"HEXAGON_V6_vabsb_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuu,VD_ftype_VDSI,2)
+// tag : V6_vmpabuu
+def int_hexagon_V6_vmpabuu :
+Hexagon_V65_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabuu">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuu_128B,VD_ftype_VDSI,2)
+// tag : V6_vmpabuu_128B
+def int_hexagon_V6_vmpabuu_128B :
+Hexagon_V65_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabuu_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuu_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vmpabuu_acc
+def int_hexagon_V6_vmpabuu_acc :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabuu_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuu_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vmpabuu_acc_128B
+def int_hexagon_V6_vmpabuu_acc_128B :
+Hexagon_V65_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabuu_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyh_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyh_acc
+def int_hexagon_V6_vmpyh_acc :
+Hexagon_V65_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyh_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyh_acc_128B
+def int_hexagon_V6_vmpyh_acc_128B :
+Hexagon_V65_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahhsat,VI_ftype_VIVIDI,3)
+// tag : V6_vmpahhsat
+def int_hexagon_V6_vmpahhsat :
+Hexagon_V65_v512v512v512LLi_Intrinsic<"HEXAGON_V6_vmpahhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahhsat_128B,VI_ftype_VIVIDI,3)
+// tag : V6_vmpahhsat_128B
+def int_hexagon_V6_vmpahhsat_128B :
+Hexagon_V65_v1024v1024v1024LLi_Intrinsic<"HEXAGON_V6_vmpahhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpauhuhsat,VI_ftype_VIVIDI,3)
+// tag : V6_vmpauhuhsat
+def int_hexagon_V6_vmpauhuhsat :
+Hexagon_V65_v512v512v512LLi_Intrinsic<"HEXAGON_V6_vmpauhuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpauhuhsat_128B,VI_ftype_VIVIDI,3)
+// tag : V6_vmpauhuhsat_128B
+def int_hexagon_V6_vmpauhuhsat_128B :
+Hexagon_V65_v1024v1024v1024LLi_Intrinsic<"HEXAGON_V6_vmpauhuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpsuhuhsat,VI_ftype_VIVIDI,3)
+// tag : V6_vmpsuhuhsat
+def int_hexagon_V6_vmpsuhuhsat :
+Hexagon_V65_v512v512v512LLi_Intrinsic<"HEXAGON_V6_vmpsuhuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpsuhuhsat_128B,VI_ftype_VIVIDI,3)
+// tag : V6_vmpsuhuhsat_128B
+def int_hexagon_V6_vmpsuhuhsat_128B :
+Hexagon_V65_v1024v1024v1024LLi_Intrinsic<"HEXAGON_V6_vmpsuhuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlut4,VI_ftype_VIDI,2)
+// tag : V6_vlut4
+def int_hexagon_V6_vlut4 :
+Hexagon_V65_v512v512LLi_Intrinsic<"HEXAGON_V6_vlut4">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlut4_128B,VI_ftype_VIDI,2)
+// tag : V6_vlut4_128B
+def int_hexagon_V6_vlut4_128B :
+Hexagon_V65_v1024v1024LLi_Intrinsic<"HEXAGON_V6_vlut4_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhe,VI_ftype_VISI,2)
+// tag : V6_vmpyuhe
+def int_hexagon_V6_vmpyuhe :
+Hexagon_V65_v512v512i_Intrinsic<"HEXAGON_V6_vmpyuhe">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhe_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyuhe_128B
+def int_hexagon_V6_vmpyuhe_128B :
+Hexagon_V65_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyuhe_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhe_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyuhe_acc
+def int_hexagon_V6_vmpyuhe_acc :
+Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyuhe_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhe_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyuhe_acc_128B
+def int_hexagon_V6_vmpyuhe_acc_128B :
+Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyuhe_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vprefixqb,VI_ftype_QV,1)
+// tag : V6_vprefixqb
+def int_hexagon_V6_vprefixqb :
+Hexagon_V65_v512v64i_Intrinsic<"HEXAGON_V6_vprefixqb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vprefixqb_128B,VI_ftype_QV,1)
+// tag : V6_vprefixqb_128B
+def int_hexagon_V6_vprefixqb_128B :
+Hexagon_V65_v1024v128i_Intrinsic<"HEXAGON_V6_vprefixqb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vprefixqh,VI_ftype_QV,1)
+// tag : V6_vprefixqh
+def int_hexagon_V6_vprefixqh :
+Hexagon_V65_v512v64i_Intrinsic<"HEXAGON_V6_vprefixqh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vprefixqh_128B,VI_ftype_QV,1)
+// tag : V6_vprefixqh_128B
+def int_hexagon_V6_vprefixqh_128B :
+Hexagon_V65_v1024v128i_Intrinsic<"HEXAGON_V6_vprefixqh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vprefixqw,VI_ftype_QV,1)
+// tag : V6_vprefixqw
+def int_hexagon_V6_vprefixqw :
+Hexagon_V65_v512v64i_Intrinsic<"HEXAGON_V6_vprefixqw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vprefixqw_128B,VI_ftype_QV,1)
+// tag : V6_vprefixqw_128B
+def int_hexagon_V6_vprefixqw_128B :
+Hexagon_V65_v1024v128i_Intrinsic<"HEXAGON_V6_vprefixqw_128B">;
+
+
+// The scatter/gather ones below will not be generated from iset.py. Make sure
+// you don't overwrite these.
+class Hexagon_V65_vvmemiiv512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
+                               llvm_v16i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_V65_vvmemiiv1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
+                               llvm_v32i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_V65_vvmemiiv2048_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
+                               llvm_v64i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_V65_vvmemv64iiiv512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
+                               llvm_i32_ty,llvm_v16i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_V65_vvmemv128iiiv1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
+                               llvm_i32_ty,llvm_v32i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_V65_vvmemv64iiiv1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
+                               llvm_i32_ty,llvm_v32i32_ty],
+                          [IntrArgMemOnly]>;
+
+class Hexagon_V65_vvmemv128iiiv2048_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
+                               llvm_i32_ty,llvm_v64i32_ty],
+                          [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermw :
+Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermw">;
+
+def int_hexagon_V6_vgathermw_128B :
+Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermw_128B">;
+
+def int_hexagon_V6_vgathermh :
+Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermh">;
+
+def int_hexagon_V6_vgathermh_128B :
+Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermh_128B">;
+
+def int_hexagon_V6_vgathermhw :
+Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermhw">;
+
+def int_hexagon_V6_vgathermhw_128B :
+Hexagon_V65_vvmemiiv2048_Intrinsic<"HEXAGON_V6_vgathermhw_128B">;
+
+def int_hexagon_V6_vgathermwq :
+Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermwq">;
+
+def int_hexagon_V6_vgathermwq_128B :
+Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermwq_128B">;
+
+def int_hexagon_V6_vgathermhq :
+Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermhq">;
+
+def int_hexagon_V6_vgathermhq_128B :
+Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhq_128B">;
+
+def int_hexagon_V6_vgathermhwq :
+Hexagon_V65_vvmemv64iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhwq">;
+
+def int_hexagon_V6_vgathermhwq_128B :
+Hexagon_V65_vvmemv128iiiv2048_Intrinsic<"HEXAGON_V6_vgathermhwq_128B">;
+
+class Hexagon_V65_viiv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_i32_ty,llvm_i32_ty,
+                                           llvm_v16i32_ty,llvm_v16i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_viiv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_i32_ty,llvm_i32_ty,
+                                           llvm_v32i32_ty,llvm_v32i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_vv64iiiv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v512i1_ty,llvm_i32_ty,
+                                           llvm_i32_ty,llvm_v16i32_ty,
+                                           llvm_v16i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_vv128iiiv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v1024i1_ty,llvm_i32_ty,
+                                           llvm_i32_ty,llvm_v32i32_ty,
+                                           llvm_v32i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_viiv1024v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_i32_ty,llvm_i32_ty,
+                                           llvm_v32i32_ty,llvm_v16i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_viiv2048v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_i32_ty,llvm_i32_ty,
+                                           llvm_v64i32_ty,llvm_v32i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_vv64iiiv1024v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v512i1_ty,llvm_i32_ty,
+                                           llvm_i32_ty,llvm_v32i32_ty,
+                                           llvm_v16i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_vv128iiiv2048v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v1024i1_ty,llvm_i32_ty,
+                                           llvm_i32_ty,llvm_v64i32_ty,
+                                           llvm_v32i32_ty],
+                          [IntrWriteMem]>;
+
+class Hexagon_V65_v2048_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_v64i32_ty], [],
+                          [IntrNoMem]>;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermw,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw
+def int_hexagon_V6_vscattermw :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermw_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw_128B
+def int_hexagon_V6_vscattermw_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermh,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh
+def int_hexagon_V6_vscattermh :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermh_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh_128B
+def int_hexagon_V6_vscattermh_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermw_add,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw_add
+def int_hexagon_V6_vscattermw_add :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw_add">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermw_add_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw_add_128B
+def int_hexagon_V6_vscattermw_add_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_add_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermh_add,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh_add
+def int_hexagon_V6_vscattermh_add :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh_add">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermh_add_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh_add_128B
+def int_hexagon_V6_vscattermh_add_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_add_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermwq,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermwq
+def int_hexagon_V6_vscattermwq :
+Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermwq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermwq_128B,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermwq_128B
+def int_hexagon_V6_vscattermwq_128B :
+Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermwq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhq,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermhq
+def int_hexagon_V6_vscattermhq :
+Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermhq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhq_128B,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermhq_128B
+def int_hexagon_V6_vscattermhq_128B :
+Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermhq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw
+def int_hexagon_V6_vscattermhw :
+Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw_128B,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw_128B
+def int_hexagon_V6_vscattermhw_128B :
+Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhwq,v_ftype_QVSISIVDVI,5)
+// tag : V6_vscattermhwq
+def int_hexagon_V6_vscattermhwq :
+Hexagon_V65_vv64iiiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhwq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhwq_128B,v_ftype_QVSISIVDVI,5)
+// tag : V6_vscattermhwq_128B
+def int_hexagon_V6_vscattermhwq_128B :
+Hexagon_V65_vv128iiiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhwq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw_add
+def int_hexagon_V6_vscattermhw_add :
+Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw_add">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add_128B,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw_add_128B
+def int_hexagon_V6_vscattermhw_add_128B :
+Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdd0,VD_ftype_,0)
+// tag : V6_vdd0
+def int_hexagon_V6_vdd0 :
+Hexagon_v1024_Intrinsic<"HEXAGON_V6_vdd0">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdd0_128B,VD_ftype_,0)
+// tag : V6_vdd0_128B
+def int_hexagon_V6_vdd0_128B :
+Hexagon_V65_v2048_Intrinsic<"HEXAGON_V6_vdd0_128B">;
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsMips.td b/linux-x64/clang/include/llvm/IR/IntrinsicsMips.td
new file mode 100644
index 0000000..421a79b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsMips.td
@@ -0,0 +1,1771 @@
+//===- IntrinsicsMips.td - Defines Mips intrinsics ---------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the MIPS-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP data types
+def mips_v2q15_ty: LLVMType<v2i16>;
+def mips_v4q7_ty: LLVMType<v4i8>;
+def mips_q31_ty: LLVMType<i32>;
+
+let TargetPrefix = "mips" in {  // All intrinsics start with "llvm.mips.".
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 1
+
+//===----------------------------------------------------------------------===//
+// Addition/subtraction
+
+def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+            [Commutative, IntrNoMem]>;
+def int_mips_addu_s_qb : GCCBuiltin<"__builtin_mips_addu_s_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+            [Commutative, IntrNoMem]>;
+def int_mips_subu_qb : GCCBuiltin<"__builtin_mips_subu_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_subu_s_qb : GCCBuiltin<"__builtin_mips_subu_s_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+
+def int_mips_addq_ph : GCCBuiltin<"__builtin_mips_addq_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+            [Commutative, IntrNoMem]>;
+def int_mips_addq_s_ph : GCCBuiltin<"__builtin_mips_addq_s_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+            [Commutative, IntrNoMem]>;
+def int_mips_subq_ph : GCCBuiltin<"__builtin_mips_subq_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subq_s_ph : GCCBuiltin<"__builtin_mips_subq_s_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+
+def int_mips_madd: GCCBuiltin<"__builtin_mips_madd">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, Commutative]>;
+def int_mips_maddu: GCCBuiltin<"__builtin_mips_maddu">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, Commutative]>;
+
+def int_mips_msub: GCCBuiltin<"__builtin_mips_msub">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_msubu: GCCBuiltin<"__builtin_mips_msubu">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_addq_s_w: GCCBuiltin<"__builtin_mips_addq_s_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_subq_s_w: GCCBuiltin<"__builtin_mips_subq_s_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], []>;
+
+def int_mips_addsc: GCCBuiltin<"__builtin_mips_addsc">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
+def int_mips_addwc: GCCBuiltin<"__builtin_mips_addwc">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
+
+def int_mips_modsub: GCCBuiltin<"__builtin_mips_modsub">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_raddu_w_qb: GCCBuiltin<"__builtin_mips_raddu_w_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Absolute value
+
+def int_mips_absq_s_ph: GCCBuiltin<"__builtin_mips_absq_s_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty], []>;
+def int_mips_absq_s_w: GCCBuiltin<"__builtin_mips_absq_s_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Precision reduce/expand
+
+def int_mips_precrq_qb_ph: GCCBuiltin<"__builtin_mips_precrq_qb_ph">,
+  Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_precrqu_s_qb_ph: GCCBuiltin<"__builtin_mips_precrqu_s_qb_ph">,
+  Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_precrq_ph_w: GCCBuiltin<"__builtin_mips_precrq_ph_w">,
+  Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_precrq_rs_ph_w: GCCBuiltin<"__builtin_mips_precrq_rs_ph_w">,
+  Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], []>;
+def int_mips_preceq_w_phl: GCCBuiltin<"__builtin_mips_preceq_w_phl">,
+  Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_preceq_w_phr: GCCBuiltin<"__builtin_mips_preceq_w_phr">,
+  Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbl: GCCBuiltin<"__builtin_mips_precequ_ph_qbl">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbr: GCCBuiltin<"__builtin_mips_precequ_ph_qbr">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbla: GCCBuiltin<"__builtin_mips_precequ_ph_qbla">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbra: GCCBuiltin<"__builtin_mips_precequ_ph_qbra">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbl: GCCBuiltin<"__builtin_mips_preceu_ph_qbl">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbr: GCCBuiltin<"__builtin_mips_preceu_ph_qbr">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbla: GCCBuiltin<"__builtin_mips_preceu_ph_qbla">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbra: GCCBuiltin<"__builtin_mips_preceu_ph_qbra">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Shift
+
+def int_mips_shll_qb: GCCBuiltin<"__builtin_mips_shll_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], []>;
+def int_mips_shrl_qb: GCCBuiltin<"__builtin_mips_shrl_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shll_ph: GCCBuiltin<"__builtin_mips_shll_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
+def int_mips_shll_s_ph: GCCBuiltin<"__builtin_mips_shll_s_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
+def int_mips_shra_ph: GCCBuiltin<"__builtin_mips_shra_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_ph: GCCBuiltin<"__builtin_mips_shra_r_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shll_s_w: GCCBuiltin<"__builtin_mips_shll_s_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], []>;
+def int_mips_shra_r_w: GCCBuiltin<"__builtin_mips_shra_r_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shilo: GCCBuiltin<"__builtin_mips_shilo">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Multiplication
+
+def int_mips_muleu_s_ph_qbl: GCCBuiltin<"__builtin_mips_muleu_s_ph_qbl">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
+def int_mips_muleu_s_ph_qbr: GCCBuiltin<"__builtin_mips_muleu_s_ph_qbr">,
+  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
+def int_mips_mulq_rs_ph: GCCBuiltin<"__builtin_mips_mulq_rs_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_muleq_s_w_phl: GCCBuiltin<"__builtin_mips_muleq_s_w_phl">,
+  Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_muleq_s_w_phr: GCCBuiltin<"__builtin_mips_muleq_s_w_phr">,
+  Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulsaq_s_w_ph: GCCBuiltin<"__builtin_mips_mulsaq_s_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_s_w_phl: GCCBuiltin<"__builtin_mips_maq_s_w_phl">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_s_w_phr: GCCBuiltin<"__builtin_mips_maq_s_w_phr">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_sa_w_phl: GCCBuiltin<"__builtin_mips_maq_sa_w_phl">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_sa_w_phr: GCCBuiltin<"__builtin_mips_maq_sa_w_phr">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_mult: GCCBuiltin<"__builtin_mips_mult">,
+  Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, Commutative]>;
+def int_mips_multu: GCCBuiltin<"__builtin_mips_multu">,
+  Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem, Commutative]>;
+
+//===----------------------------------------------------------------------===//
+// Dot product with accumulate/subtract
+
+def int_mips_dpau_h_qbl: GCCBuiltin<"__builtin_mips_dpau_h_qbl">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+            [IntrNoMem]>;
+def int_mips_dpau_h_qbr: GCCBuiltin<"__builtin_mips_dpau_h_qbr">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+            [IntrNoMem]>;
+def int_mips_dpsu_h_qbl: GCCBuiltin<"__builtin_mips_dpsu_h_qbl">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+            [IntrNoMem]>;
+def int_mips_dpsu_h_qbr: GCCBuiltin<"__builtin_mips_dpsu_h_qbr">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+            [IntrNoMem]>;
+def int_mips_dpaq_s_w_ph: GCCBuiltin<"__builtin_mips_dpaq_s_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsq_s_w_ph: GCCBuiltin<"__builtin_mips_dpsq_s_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaq_sa_l_w: GCCBuiltin<"__builtin_mips_dpaq_sa_l_w">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
+def int_mips_dpsq_sa_l_w: GCCBuiltin<"__builtin_mips_dpsq_sa_l_w">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Comparison
+
+def int_mips_cmpu_eq_qb: GCCBuiltin<"__builtin_mips_cmpu_eq_qb">,
+  Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpu_lt_qb: GCCBuiltin<"__builtin_mips_cmpu_lt_qb">,
+  Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpu_le_qb: GCCBuiltin<"__builtin_mips_cmpu_le_qb">,
+  Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpgu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgu_eq_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgu_lt_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpgu_le_qb: GCCBuiltin<"__builtin_mips_cmpgu_le_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmp_eq_ph: GCCBuiltin<"__builtin_mips_cmp_eq_ph">,
+  Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_cmp_lt_ph: GCCBuiltin<"__builtin_mips_cmp_lt_ph">,
+  Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_cmp_le_ph: GCCBuiltin<"__builtin_mips_cmp_le_ph">,
+  Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Extracting
+
+def int_mips_extr_s_h: GCCBuiltin<"__builtin_mips_extr_s_h">,
+  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_w: GCCBuiltin<"__builtin_mips_extr_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_rs_w: GCCBuiltin<"__builtin_mips_extr_rs_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_r_w: GCCBuiltin<"__builtin_mips_extr_r_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extp: GCCBuiltin<"__builtin_mips_extp">,
+  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extpdp: GCCBuiltin<"__builtin_mips_extpdp">,
+  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Misc
+
+def int_mips_wrdsp: GCCBuiltin<"__builtin_mips_wrdsp">,
+  Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_mips_rddsp: GCCBuiltin<"__builtin_mips_rddsp">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
+
+def int_mips_insv: GCCBuiltin<"__builtin_mips_insv">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
+def int_mips_bitrev: GCCBuiltin<"__builtin_mips_bitrev">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_packrl_ph: GCCBuiltin<"__builtin_mips_packrl_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+
+def int_mips_repl_qb: GCCBuiltin<"__builtin_mips_repl_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_repl_ph: GCCBuiltin<"__builtin_mips_repl_ph">,
+  Intrinsic<[mips_v2q15_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_pick_qb: GCCBuiltin<"__builtin_mips_pick_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrReadMem]>;
+def int_mips_pick_ph: GCCBuiltin<"__builtin_mips_pick_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrReadMem]>;
+
+def int_mips_mthlip: GCCBuiltin<"__builtin_mips_mthlip">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+
+def int_mips_bposge32: GCCBuiltin<"__builtin_mips_bposge32">,
+  Intrinsic<[llvm_i32_ty], [], [IntrReadMem]>;
+
+def int_mips_lbux: GCCBuiltin<"__builtin_mips_lbux">,
+  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_mips_lhx: GCCBuiltin<"__builtin_mips_lhx">,
+  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_mips_lwx: GCCBuiltin<"__builtin_mips_lwx">,
+  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 2
+
+def int_mips_absq_s_qb: GCCBuiltin<"__builtin_mips_absq_s_qb">,
+  Intrinsic<[mips_v4q7_ty], [mips_v4q7_ty], []>;
+
+def int_mips_addqh_ph: GCCBuiltin<"__builtin_mips_addqh_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+            [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_ph: GCCBuiltin<"__builtin_mips_addqh_r_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+            [IntrNoMem, Commutative]>;
+def int_mips_addqh_w: GCCBuiltin<"__builtin_mips_addqh_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+            [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_w: GCCBuiltin<"__builtin_mips_addqh_r_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+            [IntrNoMem, Commutative]>;
+
+def int_mips_addu_ph: GCCBuiltin<"__builtin_mips_addu_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_addu_s_ph: GCCBuiltin<"__builtin_mips_addu_s_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_adduh_qb: GCCBuiltin<"__builtin_mips_adduh_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+            [IntrNoMem, Commutative]>;
+def int_mips_adduh_r_qb: GCCBuiltin<"__builtin_mips_adduh_r_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+            [IntrNoMem, Commutative]>;
+
+def int_mips_append: GCCBuiltin<"__builtin_mips_append">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem]>;
+def int_mips_balign: GCCBuiltin<"__builtin_mips_balign">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_cmpgdu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgdu_eq_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgdu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgdu_lt_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpgdu_le_qb: GCCBuiltin<"__builtin_mips_cmpgdu_le_qb">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+
+def int_mips_dpa_w_ph: GCCBuiltin<"__builtin_mips_dpa_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+            [IntrNoMem]>;
+def int_mips_dps_w_ph: GCCBuiltin<"__builtin_mips_dps_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+            [IntrNoMem]>;
+
+def int_mips_dpaqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_s_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_sa_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpax_w_ph: GCCBuiltin<"__builtin_mips_dpax_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+            [IntrNoMem]>;
+def int_mips_dpsx_w_ph: GCCBuiltin<"__builtin_mips_dpsx_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+            [IntrNoMem]>;
+def int_mips_dpsqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_s_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_sa_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+
+def int_mips_mul_ph: GCCBuiltin<"__builtin_mips_mul_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_mul_s_ph: GCCBuiltin<"__builtin_mips_mul_s_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_mulq_rs_w: GCCBuiltin<"__builtin_mips_mulq_rs_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulq_s_ph: GCCBuiltin<"__builtin_mips_mulq_s_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulq_s_w: GCCBuiltin<"__builtin_mips_mulq_s_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulsa_w_ph: GCCBuiltin<"__builtin_mips_mulsa_w_ph">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+            [IntrNoMem]>;
+
+def int_mips_precr_qb_ph: GCCBuiltin<"__builtin_mips_precr_qb_ph">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_precr_sra_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_ph_w">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_precr_sra_r_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_r_ph_w">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_prepend: GCCBuiltin<"__builtin_mips_prepend">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_shra_qb: GCCBuiltin<"__builtin_mips_shra_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_qb: GCCBuiltin<"__builtin_mips_shra_r_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shrl_ph: GCCBuiltin<"__builtin_mips_shrl_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_subqh_ph: GCCBuiltin<"__builtin_mips_subqh_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_r_ph: GCCBuiltin<"__builtin_mips_subqh_r_ph">,
+  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_w: GCCBuiltin<"__builtin_mips_subqh_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_subqh_r_w: GCCBuiltin<"__builtin_mips_subqh_r_w">,
+  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+
+def int_mips_subu_ph: GCCBuiltin<"__builtin_mips_subu_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_subu_s_ph: GCCBuiltin<"__builtin_mips_subu_s_ph">,
+  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+
+def int_mips_subuh_qb: GCCBuiltin<"__builtin_mips_subuh_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_subuh_r_qb: GCCBuiltin<"__builtin_mips_subuh_r_qb">,
+  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// MIPS MSA
+
+//===----------------------------------------------------------------------===//
+// Addition/subtraction
+
+def int_mips_add_a_b : GCCBuiltin<"__builtin_msa_add_a_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_add_a_h : GCCBuiltin<"__builtin_msa_add_a_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_add_a_w : GCCBuiltin<"__builtin_msa_add_a_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_add_a_d : GCCBuiltin<"__builtin_msa_add_a_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_adds_a_b : GCCBuiltin<"__builtin_msa_adds_a_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_a_h : GCCBuiltin<"__builtin_msa_adds_a_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_a_w : GCCBuiltin<"__builtin_msa_adds_a_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_a_d : GCCBuiltin<"__builtin_msa_adds_a_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_adds_s_b : GCCBuiltin<"__builtin_msa_adds_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_s_h : GCCBuiltin<"__builtin_msa_adds_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_s_w : GCCBuiltin<"__builtin_msa_adds_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_s_d : GCCBuiltin<"__builtin_msa_adds_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_adds_u_b : GCCBuiltin<"__builtin_msa_adds_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_u_h : GCCBuiltin<"__builtin_msa_adds_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_u_w : GCCBuiltin<"__builtin_msa_adds_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_adds_u_d : GCCBuiltin<"__builtin_msa_adds_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_addv_b : GCCBuiltin<"__builtin_msa_addv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_addv_h : GCCBuiltin<"__builtin_msa_addv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_addv_w : GCCBuiltin<"__builtin_msa_addv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_addv_d : GCCBuiltin<"__builtin_msa_addv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_addvi_b : GCCBuiltin<"__builtin_msa_addvi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_addvi_h : GCCBuiltin<"__builtin_msa_addvi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_addvi_w : GCCBuiltin<"__builtin_msa_addvi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_addvi_d : GCCBuiltin<"__builtin_msa_addvi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_and_v : GCCBuiltin<"__builtin_msa_and_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_andi_b : GCCBuiltin<"__builtin_msa_andi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_asub_s_b : GCCBuiltin<"__builtin_msa_asub_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_asub_s_h : GCCBuiltin<"__builtin_msa_asub_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_asub_s_w : GCCBuiltin<"__builtin_msa_asub_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_asub_s_d : GCCBuiltin<"__builtin_msa_asub_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_asub_u_b : GCCBuiltin<"__builtin_msa_asub_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_asub_u_h : GCCBuiltin<"__builtin_msa_asub_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_asub_u_w : GCCBuiltin<"__builtin_msa_asub_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_asub_u_d : GCCBuiltin<"__builtin_msa_asub_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ave_s_b : GCCBuiltin<"__builtin_msa_ave_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_ave_s_h : GCCBuiltin<"__builtin_msa_ave_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_ave_s_w : GCCBuiltin<"__builtin_msa_ave_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_ave_s_d : GCCBuiltin<"__builtin_msa_ave_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_ave_u_b : GCCBuiltin<"__builtin_msa_ave_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_ave_u_h : GCCBuiltin<"__builtin_msa_ave_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_ave_u_w : GCCBuiltin<"__builtin_msa_ave_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_ave_u_d : GCCBuiltin<"__builtin_msa_ave_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_aver_s_b : GCCBuiltin<"__builtin_msa_aver_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_aver_s_h : GCCBuiltin<"__builtin_msa_aver_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_aver_s_w : GCCBuiltin<"__builtin_msa_aver_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_aver_s_d : GCCBuiltin<"__builtin_msa_aver_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_aver_u_b : GCCBuiltin<"__builtin_msa_aver_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_aver_u_h : GCCBuiltin<"__builtin_msa_aver_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_aver_u_w : GCCBuiltin<"__builtin_msa_aver_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+  [Commutative, IntrNoMem]>;
+def int_mips_aver_u_d : GCCBuiltin<"__builtin_msa_aver_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+  [Commutative, IntrNoMem]>;
+
+def int_mips_bclr_b : GCCBuiltin<"__builtin_msa_bclr_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bclr_h : GCCBuiltin<"__builtin_msa_bclr_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bclr_w : GCCBuiltin<"__builtin_msa_bclr_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bclr_d : GCCBuiltin<"__builtin_msa_bclr_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bclri_b : GCCBuiltin<"__builtin_msa_bclri_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bclri_h : GCCBuiltin<"__builtin_msa_bclri_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bclri_w : GCCBuiltin<"__builtin_msa_bclri_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bclri_d : GCCBuiltin<"__builtin_msa_bclri_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_binsl_b : GCCBuiltin<"__builtin_msa_binsl_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+def int_mips_binsl_h : GCCBuiltin<"__builtin_msa_binsl_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+            [IntrNoMem]>;
+def int_mips_binsl_w : GCCBuiltin<"__builtin_msa_binsl_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsl_d : GCCBuiltin<"__builtin_msa_binsl_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+            [IntrNoMem]>;
+
+def int_mips_binsli_b : GCCBuiltin<"__builtin_msa_binsli_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsli_h : GCCBuiltin<"__builtin_msa_binsli_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsli_w : GCCBuiltin<"__builtin_msa_binsli_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsli_d : GCCBuiltin<"__builtin_msa_binsli_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_binsr_b : GCCBuiltin<"__builtin_msa_binsr_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+def int_mips_binsr_h : GCCBuiltin<"__builtin_msa_binsr_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+            [IntrNoMem]>;
+def int_mips_binsr_w : GCCBuiltin<"__builtin_msa_binsr_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsr_d : GCCBuiltin<"__builtin_msa_binsr_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+            [IntrNoMem]>;
+
+def int_mips_binsri_b : GCCBuiltin<"__builtin_msa_binsri_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsri_h : GCCBuiltin<"__builtin_msa_binsri_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsri_w : GCCBuiltin<"__builtin_msa_binsri_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_binsri_d : GCCBuiltin<"__builtin_msa_binsri_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_bmnz_v : GCCBuiltin<"__builtin_msa_bmnz_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+
+def int_mips_bmnzi_b : GCCBuiltin<"__builtin_msa_bmnzi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_bmz_v : GCCBuiltin<"__builtin_msa_bmz_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+
+def int_mips_bmzi_b : GCCBuiltin<"__builtin_msa_bmzi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_bneg_b : GCCBuiltin<"__builtin_msa_bneg_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bneg_h : GCCBuiltin<"__builtin_msa_bneg_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bneg_w : GCCBuiltin<"__builtin_msa_bneg_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bneg_d : GCCBuiltin<"__builtin_msa_bneg_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bnegi_b : GCCBuiltin<"__builtin_msa_bnegi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bnegi_h : GCCBuiltin<"__builtin_msa_bnegi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bnegi_w : GCCBuiltin<"__builtin_msa_bnegi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bnegi_d : GCCBuiltin<"__builtin_msa_bnegi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_bnz_b : GCCBuiltin<"__builtin_msa_bnz_b">,
+  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bnz_h : GCCBuiltin<"__builtin_msa_bnz_h">,
+  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bnz_w : GCCBuiltin<"__builtin_msa_bnz_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bnz_d : GCCBuiltin<"__builtin_msa_bnz_d">,
+  Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bnz_v : GCCBuiltin<"__builtin_msa_bnz_v">,
+  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_bsel_v : GCCBuiltin<"__builtin_msa_bsel_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+
+def int_mips_bseli_b : GCCBuiltin<"__builtin_msa_bseli_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_bset_b : GCCBuiltin<"__builtin_msa_bset_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bset_h : GCCBuiltin<"__builtin_msa_bset_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bset_w : GCCBuiltin<"__builtin_msa_bset_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bset_d : GCCBuiltin<"__builtin_msa_bset_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bseti_b : GCCBuiltin<"__builtin_msa_bseti_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bseti_h : GCCBuiltin<"__builtin_msa_bseti_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bseti_w : GCCBuiltin<"__builtin_msa_bseti_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bseti_d : GCCBuiltin<"__builtin_msa_bseti_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_bz_b : GCCBuiltin<"__builtin_msa_bz_b">,
+  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bz_h : GCCBuiltin<"__builtin_msa_bz_h">,
+  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bz_w : GCCBuiltin<"__builtin_msa_bz_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bz_d : GCCBuiltin<"__builtin_msa_bz_d">,
+  Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bz_v : GCCBuiltin<"__builtin_msa_bz_v">,
+  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_ceq_b : GCCBuiltin<"__builtin_msa_ceq_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ceq_h : GCCBuiltin<"__builtin_msa_ceq_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ceq_w : GCCBuiltin<"__builtin_msa_ceq_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ceq_d : GCCBuiltin<"__builtin_msa_ceq_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ceqi_b : GCCBuiltin<"__builtin_msa_ceqi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ceqi_h : GCCBuiltin<"__builtin_msa_ceqi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ceqi_w : GCCBuiltin<"__builtin_msa_ceqi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ceqi_d : GCCBuiltin<"__builtin_msa_ceqi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_cfcmsa : GCCBuiltin<"__builtin_msa_cfcmsa">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+def int_mips_cle_s_b : GCCBuiltin<"__builtin_msa_cle_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_cle_s_h : GCCBuiltin<"__builtin_msa_cle_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_cle_s_w : GCCBuiltin<"__builtin_msa_cle_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_cle_s_d : GCCBuiltin<"__builtin_msa_cle_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_cle_u_b : GCCBuiltin<"__builtin_msa_cle_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_cle_u_h : GCCBuiltin<"__builtin_msa_cle_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_cle_u_w : GCCBuiltin<"__builtin_msa_cle_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_cle_u_d : GCCBuiltin<"__builtin_msa_cle_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_clei_s_b : GCCBuiltin<"__builtin_msa_clei_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_s_h : GCCBuiltin<"__builtin_msa_clei_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_s_w : GCCBuiltin<"__builtin_msa_clei_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_s_d : GCCBuiltin<"__builtin_msa_clei_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_clei_u_b : GCCBuiltin<"__builtin_msa_clei_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_u_h : GCCBuiltin<"__builtin_msa_clei_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_u_w : GCCBuiltin<"__builtin_msa_clei_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_u_d : GCCBuiltin<"__builtin_msa_clei_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_clt_s_b : GCCBuiltin<"__builtin_msa_clt_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_clt_s_h : GCCBuiltin<"__builtin_msa_clt_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_clt_s_w : GCCBuiltin<"__builtin_msa_clt_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_clt_s_d : GCCBuiltin<"__builtin_msa_clt_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_clt_u_b : GCCBuiltin<"__builtin_msa_clt_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_clt_u_h : GCCBuiltin<"__builtin_msa_clt_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_clt_u_w : GCCBuiltin<"__builtin_msa_clt_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_clt_u_d : GCCBuiltin<"__builtin_msa_clt_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_clti_s_b : GCCBuiltin<"__builtin_msa_clti_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_s_h : GCCBuiltin<"__builtin_msa_clti_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_s_w : GCCBuiltin<"__builtin_msa_clti_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_s_d : GCCBuiltin<"__builtin_msa_clti_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_clti_u_b : GCCBuiltin<"__builtin_msa_clti_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_u_h : GCCBuiltin<"__builtin_msa_clti_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_u_w : GCCBuiltin<"__builtin_msa_clti_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_u_d : GCCBuiltin<"__builtin_msa_clti_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_copy_s_b : GCCBuiltin<"__builtin_msa_copy_s_b">,
+  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_s_h : GCCBuiltin<"__builtin_msa_copy_s_h">,
+  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_s_w : GCCBuiltin<"__builtin_msa_copy_s_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_s_d : GCCBuiltin<"__builtin_msa_copy_s_d">,
+  Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_copy_u_b : GCCBuiltin<"__builtin_msa_copy_u_b">,
+  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_u_h : GCCBuiltin<"__builtin_msa_copy_u_h">,
+  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_u_w : GCCBuiltin<"__builtin_msa_copy_u_w">,
+  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_u_d : GCCBuiltin<"__builtin_msa_copy_u_d">,
+  Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_ctcmsa : GCCBuiltin<"__builtin_msa_ctcmsa">,
+  Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
+
+def int_mips_div_s_b : GCCBuiltin<"__builtin_msa_div_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_div_s_h : GCCBuiltin<"__builtin_msa_div_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_div_s_w : GCCBuiltin<"__builtin_msa_div_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_div_s_d : GCCBuiltin<"__builtin_msa_div_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_div_u_b : GCCBuiltin<"__builtin_msa_div_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_div_u_h : GCCBuiltin<"__builtin_msa_div_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_div_u_w : GCCBuiltin<"__builtin_msa_div_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_div_u_d : GCCBuiltin<"__builtin_msa_div_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+// This instruction is part of the MSA spec but it does not share the
+// __builtin_msa prefix because it operates on GP registers.
+def int_mips_dlsa : GCCBuiltin<"__builtin_mips_dlsa">,
+  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_dotp_s_h : GCCBuiltin<"__builtin_msa_dotp_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_dotp_s_w : GCCBuiltin<"__builtin_msa_dotp_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_dotp_s_d : GCCBuiltin<"__builtin_msa_dotp_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_dotp_u_h : GCCBuiltin<"__builtin_msa_dotp_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_dotp_u_w : GCCBuiltin<"__builtin_msa_dotp_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_dotp_u_d : GCCBuiltin<"__builtin_msa_dotp_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_dpadd_s_h : GCCBuiltin<"__builtin_msa_dpadd_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+  [IntrNoMem]>;
+def int_mips_dpadd_s_w : GCCBuiltin<"__builtin_msa_dpadd_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_dpadd_s_d : GCCBuiltin<"__builtin_msa_dpadd_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_dpadd_u_h : GCCBuiltin<"__builtin_msa_dpadd_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+  [IntrNoMem]>;
+def int_mips_dpadd_u_w : GCCBuiltin<"__builtin_msa_dpadd_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_dpadd_u_d : GCCBuiltin<"__builtin_msa_dpadd_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_dpsub_s_h : GCCBuiltin<"__builtin_msa_dpsub_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+  [IntrNoMem]>;
+def int_mips_dpsub_s_w : GCCBuiltin<"__builtin_msa_dpsub_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_dpsub_s_d : GCCBuiltin<"__builtin_msa_dpsub_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_dpsub_u_h : GCCBuiltin<"__builtin_msa_dpsub_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+  [IntrNoMem]>;
+def int_mips_dpsub_u_w : GCCBuiltin<"__builtin_msa_dpsub_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_dpsub_u_d : GCCBuiltin<"__builtin_msa_dpsub_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_fadd_w : GCCBuiltin<"__builtin_msa_fadd_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fadd_d : GCCBuiltin<"__builtin_msa_fadd_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcaf_w : GCCBuiltin<"__builtin_msa_fcaf_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcaf_d : GCCBuiltin<"__builtin_msa_fcaf_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fceq_w : GCCBuiltin<"__builtin_msa_fceq_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fceq_d : GCCBuiltin<"__builtin_msa_fceq_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcle_w : GCCBuiltin<"__builtin_msa_fcle_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcle_d : GCCBuiltin<"__builtin_msa_fcle_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fclt_w : GCCBuiltin<"__builtin_msa_fclt_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fclt_d : GCCBuiltin<"__builtin_msa_fclt_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fclass_w : GCCBuiltin<"__builtin_msa_fclass_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fclass_d : GCCBuiltin<"__builtin_msa_fclass_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcne_w : GCCBuiltin<"__builtin_msa_fcne_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcne_d : GCCBuiltin<"__builtin_msa_fcne_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcor_w : GCCBuiltin<"__builtin_msa_fcor_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcor_d : GCCBuiltin<"__builtin_msa_fcor_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcueq_w : GCCBuiltin<"__builtin_msa_fcueq_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcueq_d : GCCBuiltin<"__builtin_msa_fcueq_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcule_w : GCCBuiltin<"__builtin_msa_fcule_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcule_d : GCCBuiltin<"__builtin_msa_fcule_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcult_w : GCCBuiltin<"__builtin_msa_fcult_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcult_d : GCCBuiltin<"__builtin_msa_fcult_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcun_w : GCCBuiltin<"__builtin_msa_fcun_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcun_d : GCCBuiltin<"__builtin_msa_fcun_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcune_w : GCCBuiltin<"__builtin_msa_fcune_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcune_d : GCCBuiltin<"__builtin_msa_fcune_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fdiv_w : GCCBuiltin<"__builtin_msa_fdiv_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fdiv_d : GCCBuiltin<"__builtin_msa_fdiv_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fexdo_h : GCCBuiltin<"__builtin_msa_fexdo_h">,
+  Intrinsic<[llvm_v8f16_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fexdo_w : GCCBuiltin<"__builtin_msa_fexdo_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fexp2_w : GCCBuiltin<"__builtin_msa_fexp2_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_fexp2_d : GCCBuiltin<"__builtin_msa_fexp2_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_fexupl_w : GCCBuiltin<"__builtin_msa_fexupl_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v8f16_ty], [IntrNoMem]>;
+def int_mips_fexupl_d : GCCBuiltin<"__builtin_msa_fexupl_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+def int_mips_fexupr_w : GCCBuiltin<"__builtin_msa_fexupr_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v8f16_ty], [IntrNoMem]>;
+def int_mips_fexupr_d : GCCBuiltin<"__builtin_msa_fexupr_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+def int_mips_ffint_s_w : GCCBuiltin<"__builtin_msa_ffint_s_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ffint_s_d : GCCBuiltin<"__builtin_msa_ffint_s_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ffint_u_w : GCCBuiltin<"__builtin_msa_ffint_u_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ffint_u_d : GCCBuiltin<"__builtin_msa_ffint_u_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ffql_w : GCCBuiltin<"__builtin_msa_ffql_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ffql_d : GCCBuiltin<"__builtin_msa_ffql_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_ffqr_w : GCCBuiltin<"__builtin_msa_ffqr_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ffqr_d : GCCBuiltin<"__builtin_msa_ffqr_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_fill_b : GCCBuiltin<"__builtin_msa_fill_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_fill_h : GCCBuiltin<"__builtin_msa_fill_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_fill_w : GCCBuiltin<"__builtin_msa_fill_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_fill_d : GCCBuiltin<"__builtin_msa_fill_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+def int_mips_flog2_w : GCCBuiltin<"__builtin_msa_flog2_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_flog2_d : GCCBuiltin<"__builtin_msa_flog2_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmadd_w : GCCBuiltin<"__builtin_msa_fmadd_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+            [IntrNoMem]>;
+def int_mips_fmadd_d : GCCBuiltin<"__builtin_msa_fmadd_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+            [IntrNoMem]>;
+
+def int_mips_fmax_w : GCCBuiltin<"__builtin_msa_fmax_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmax_d : GCCBuiltin<"__builtin_msa_fmax_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmax_a_w : GCCBuiltin<"__builtin_msa_fmax_a_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmax_a_d : GCCBuiltin<"__builtin_msa_fmax_a_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmin_w : GCCBuiltin<"__builtin_msa_fmin_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmin_d : GCCBuiltin<"__builtin_msa_fmin_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmin_a_w : GCCBuiltin<"__builtin_msa_fmin_a_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmin_a_d : GCCBuiltin<"__builtin_msa_fmin_a_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmsub_w : GCCBuiltin<"__builtin_msa_fmsub_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+            [IntrNoMem]>;
+def int_mips_fmsub_d : GCCBuiltin<"__builtin_msa_fmsub_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+            [IntrNoMem]>;
+
+def int_mips_fmul_w : GCCBuiltin<"__builtin_msa_fmul_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmul_d : GCCBuiltin<"__builtin_msa_fmul_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_frint_w : GCCBuiltin<"__builtin_msa_frint_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_frint_d : GCCBuiltin<"__builtin_msa_frint_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_frcp_w : GCCBuiltin<"__builtin_msa_frcp_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_frcp_d : GCCBuiltin<"__builtin_msa_frcp_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_frsqrt_w : GCCBuiltin<"__builtin_msa_frsqrt_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_frsqrt_d : GCCBuiltin<"__builtin_msa_frsqrt_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsaf_w : GCCBuiltin<"__builtin_msa_fsaf_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsaf_d : GCCBuiltin<"__builtin_msa_fsaf_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fseq_w : GCCBuiltin<"__builtin_msa_fseq_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fseq_d : GCCBuiltin<"__builtin_msa_fseq_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsle_w : GCCBuiltin<"__builtin_msa_fsle_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsle_d : GCCBuiltin<"__builtin_msa_fsle_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fslt_w : GCCBuiltin<"__builtin_msa_fslt_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fslt_d : GCCBuiltin<"__builtin_msa_fslt_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsne_w : GCCBuiltin<"__builtin_msa_fsne_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsne_d : GCCBuiltin<"__builtin_msa_fsne_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsor_w : GCCBuiltin<"__builtin_msa_fsor_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsor_d : GCCBuiltin<"__builtin_msa_fsor_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsqrt_w : GCCBuiltin<"__builtin_msa_fsqrt_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsqrt_d : GCCBuiltin<"__builtin_msa_fsqrt_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsub_w : GCCBuiltin<"__builtin_msa_fsub_w">,
+  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsub_d : GCCBuiltin<"__builtin_msa_fsub_d">,
+  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsueq_w : GCCBuiltin<"__builtin_msa_fsueq_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsueq_d : GCCBuiltin<"__builtin_msa_fsueq_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsule_w : GCCBuiltin<"__builtin_msa_fsule_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsule_d : GCCBuiltin<"__builtin_msa_fsule_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsult_w : GCCBuiltin<"__builtin_msa_fsult_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsult_d : GCCBuiltin<"__builtin_msa_fsult_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsun_w : GCCBuiltin<"__builtin_msa_fsun_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsun_d : GCCBuiltin<"__builtin_msa_fsun_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsune_w : GCCBuiltin<"__builtin_msa_fsune_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsune_d : GCCBuiltin<"__builtin_msa_fsune_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftint_s_w : GCCBuiltin<"__builtin_msa_ftint_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftint_s_d : GCCBuiltin<"__builtin_msa_ftint_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftint_u_w : GCCBuiltin<"__builtin_msa_ftint_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftint_u_d : GCCBuiltin<"__builtin_msa_ftint_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftq_h : GCCBuiltin<"__builtin_msa_ftq_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftq_w : GCCBuiltin<"__builtin_msa_ftq_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftrunc_s_w : GCCBuiltin<"__builtin_msa_ftrunc_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftrunc_s_d : GCCBuiltin<"__builtin_msa_ftrunc_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftrunc_u_w : GCCBuiltin<"__builtin_msa_ftrunc_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftrunc_u_d : GCCBuiltin<"__builtin_msa_ftrunc_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_hadd_s_h : GCCBuiltin<"__builtin_msa_hadd_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hadd_s_w : GCCBuiltin<"__builtin_msa_hadd_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hadd_s_d : GCCBuiltin<"__builtin_msa_hadd_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_hadd_u_h : GCCBuiltin<"__builtin_msa_hadd_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hadd_u_w : GCCBuiltin<"__builtin_msa_hadd_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hadd_u_d : GCCBuiltin<"__builtin_msa_hadd_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_hsub_s_h : GCCBuiltin<"__builtin_msa_hsub_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hsub_s_w : GCCBuiltin<"__builtin_msa_hsub_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hsub_s_d : GCCBuiltin<"__builtin_msa_hsub_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_hsub_u_h : GCCBuiltin<"__builtin_msa_hsub_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hsub_u_w : GCCBuiltin<"__builtin_msa_hsub_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hsub_u_d : GCCBuiltin<"__builtin_msa_hsub_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_ilvev_b : GCCBuiltin<"__builtin_msa_ilvev_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvev_h : GCCBuiltin<"__builtin_msa_ilvev_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvev_w : GCCBuiltin<"__builtin_msa_ilvev_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvev_d : GCCBuiltin<"__builtin_msa_ilvev_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ilvl_b : GCCBuiltin<"__builtin_msa_ilvl_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvl_h : GCCBuiltin<"__builtin_msa_ilvl_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvl_w : GCCBuiltin<"__builtin_msa_ilvl_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvl_d : GCCBuiltin<"__builtin_msa_ilvl_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ilvod_b : GCCBuiltin<"__builtin_msa_ilvod_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvod_h : GCCBuiltin<"__builtin_msa_ilvod_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvod_w : GCCBuiltin<"__builtin_msa_ilvod_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvod_d : GCCBuiltin<"__builtin_msa_ilvod_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ilvr_b : GCCBuiltin<"__builtin_msa_ilvr_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvr_h : GCCBuiltin<"__builtin_msa_ilvr_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvr_w : GCCBuiltin<"__builtin_msa_ilvr_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvr_d : GCCBuiltin<"__builtin_msa_ilvr_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_insert_b : GCCBuiltin<"__builtin_msa_insert_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem]>;
+def int_mips_insert_h : GCCBuiltin<"__builtin_msa_insert_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem]>;
+def int_mips_insert_w : GCCBuiltin<"__builtin_msa_insert_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty],
+  [IntrNoMem]>;
+def int_mips_insert_d : GCCBuiltin<"__builtin_msa_insert_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty, llvm_i64_ty],
+  [IntrNoMem]>;
+
+def int_mips_insve_b : GCCBuiltin<"__builtin_msa_insve_b">,
+  Intrinsic<[llvm_v16i8_ty],
+            [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+def int_mips_insve_h : GCCBuiltin<"__builtin_msa_insve_h">,
+  Intrinsic<[llvm_v8i16_ty],
+            [llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty],
+            [IntrNoMem]>;
+def int_mips_insve_w : GCCBuiltin<"__builtin_msa_insve_w">,
+  Intrinsic<[llvm_v4i32_ty],
+            [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+            [IntrNoMem]>;
+def int_mips_insve_d : GCCBuiltin<"__builtin_msa_insve_d">,
+  Intrinsic<[llvm_v2i64_ty],
+            [llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty],
+            [IntrNoMem]>;
+
+def int_mips_ld_b : GCCBuiltin<"__builtin_msa_ld_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly]>;
+def int_mips_ld_h : GCCBuiltin<"__builtin_msa_ld_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly]>;
+def int_mips_ld_w : GCCBuiltin<"__builtin_msa_ld_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly]>;
+def int_mips_ld_d : GCCBuiltin<"__builtin_msa_ld_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly]>;
+
+def int_mips_ldi_b : GCCBuiltin<"__builtin_msa_ldi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ldi_h : GCCBuiltin<"__builtin_msa_ldi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ldi_w : GCCBuiltin<"__builtin_msa_ldi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ldi_d : GCCBuiltin<"__builtin_msa_ldi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// This instruction is part of the MSA spec but it does not share the
+// __builtin_msa prefix because it operates on the GPR registers.
+def int_mips_lsa : GCCBuiltin<"__builtin_mips_lsa">,
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_madd_q_h : GCCBuiltin<"__builtin_msa_madd_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_madd_q_w : GCCBuiltin<"__builtin_msa_madd_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_maddr_q_h : GCCBuiltin<"__builtin_msa_maddr_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_maddr_q_w : GCCBuiltin<"__builtin_msa_maddr_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_maddv_b : GCCBuiltin<"__builtin_msa_maddv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+  [IntrNoMem]>;
+def int_mips_maddv_h : GCCBuiltin<"__builtin_msa_maddv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_maddv_w : GCCBuiltin<"__builtin_msa_maddv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+def int_mips_maddv_d : GCCBuiltin<"__builtin_msa_maddv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+  [IntrNoMem]>;
+
+def int_mips_max_a_b : GCCBuiltin<"__builtin_msa_max_a_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_max_a_h : GCCBuiltin<"__builtin_msa_max_a_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_max_a_w : GCCBuiltin<"__builtin_msa_max_a_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_max_a_d : GCCBuiltin<"__builtin_msa_max_a_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_max_s_b : GCCBuiltin<"__builtin_msa_max_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_max_s_h : GCCBuiltin<"__builtin_msa_max_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_max_s_w : GCCBuiltin<"__builtin_msa_max_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_max_s_d : GCCBuiltin<"__builtin_msa_max_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_max_u_b : GCCBuiltin<"__builtin_msa_max_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_max_u_h : GCCBuiltin<"__builtin_msa_max_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_max_u_w : GCCBuiltin<"__builtin_msa_max_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_max_u_d : GCCBuiltin<"__builtin_msa_max_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_maxi_s_b : GCCBuiltin<"__builtin_msa_maxi_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_s_h : GCCBuiltin<"__builtin_msa_maxi_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_s_w : GCCBuiltin<"__builtin_msa_maxi_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_s_d : GCCBuiltin<"__builtin_msa_maxi_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_maxi_u_b : GCCBuiltin<"__builtin_msa_maxi_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_u_h : GCCBuiltin<"__builtin_msa_maxi_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_u_w : GCCBuiltin<"__builtin_msa_maxi_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_u_d : GCCBuiltin<"__builtin_msa_maxi_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_min_a_b : GCCBuiltin<"__builtin_msa_min_a_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_min_a_h : GCCBuiltin<"__builtin_msa_min_a_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_min_a_w : GCCBuiltin<"__builtin_msa_min_a_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_min_a_d : GCCBuiltin<"__builtin_msa_min_a_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_min_s_b : GCCBuiltin<"__builtin_msa_min_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_min_s_h : GCCBuiltin<"__builtin_msa_min_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_min_s_w : GCCBuiltin<"__builtin_msa_min_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_min_s_d : GCCBuiltin<"__builtin_msa_min_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_min_u_b : GCCBuiltin<"__builtin_msa_min_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_min_u_h : GCCBuiltin<"__builtin_msa_min_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_min_u_w : GCCBuiltin<"__builtin_msa_min_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_min_u_d : GCCBuiltin<"__builtin_msa_min_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_mini_s_b : GCCBuiltin<"__builtin_msa_mini_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_s_h : GCCBuiltin<"__builtin_msa_mini_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_s_w : GCCBuiltin<"__builtin_msa_mini_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_s_d : GCCBuiltin<"__builtin_msa_mini_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_mini_u_b : GCCBuiltin<"__builtin_msa_mini_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_u_h : GCCBuiltin<"__builtin_msa_mini_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_u_w : GCCBuiltin<"__builtin_msa_mini_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_u_d : GCCBuiltin<"__builtin_msa_mini_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_mod_s_b : GCCBuiltin<"__builtin_msa_mod_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_mod_s_h : GCCBuiltin<"__builtin_msa_mod_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mod_s_w : GCCBuiltin<"__builtin_msa_mod_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_mod_s_d : GCCBuiltin<"__builtin_msa_mod_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_mod_u_b : GCCBuiltin<"__builtin_msa_mod_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_mod_u_h : GCCBuiltin<"__builtin_msa_mod_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mod_u_w : GCCBuiltin<"__builtin_msa_mod_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_mod_u_d : GCCBuiltin<"__builtin_msa_mod_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_move_v : GCCBuiltin<"__builtin_msa_move_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_msub_q_h : GCCBuiltin<"__builtin_msa_msub_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_msub_q_w : GCCBuiltin<"__builtin_msa_msub_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_msubr_q_h : GCCBuiltin<"__builtin_msa_msubr_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_msubr_q_w : GCCBuiltin<"__builtin_msa_msubr_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+
+def int_mips_msubv_b : GCCBuiltin<"__builtin_msa_msubv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+  [IntrNoMem]>;
+def int_mips_msubv_h : GCCBuiltin<"__builtin_msa_msubv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+  [IntrNoMem]>;
+def int_mips_msubv_w : GCCBuiltin<"__builtin_msa_msubv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+  [IntrNoMem]>;
+def int_mips_msubv_d : GCCBuiltin<"__builtin_msa_msubv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+  [IntrNoMem]>;
+
+def int_mips_mul_q_h : GCCBuiltin<"__builtin_msa_mul_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mul_q_w : GCCBuiltin<"__builtin_msa_mul_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_mulr_q_h : GCCBuiltin<"__builtin_msa_mulr_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mulr_q_w : GCCBuiltin<"__builtin_msa_mulr_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_mulv_b : GCCBuiltin<"__builtin_msa_mulv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_mulv_h : GCCBuiltin<"__builtin_msa_mulv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mulv_w : GCCBuiltin<"__builtin_msa_mulv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_mulv_d : GCCBuiltin<"__builtin_msa_mulv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_nloc_b : GCCBuiltin<"__builtin_msa_nloc_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_nloc_h : GCCBuiltin<"__builtin_msa_nloc_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_nloc_w : GCCBuiltin<"__builtin_msa_nloc_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_nloc_d : GCCBuiltin<"__builtin_msa_nloc_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_nlzc_b : GCCBuiltin<"__builtin_msa_nlzc_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_nlzc_h : GCCBuiltin<"__builtin_msa_nlzc_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_nlzc_w : GCCBuiltin<"__builtin_msa_nlzc_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_nlzc_d : GCCBuiltin<"__builtin_msa_nlzc_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_nor_v : GCCBuiltin<"__builtin_msa_nor_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_nori_b : GCCBuiltin<"__builtin_msa_nori_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_or_v : GCCBuiltin<"__builtin_msa_or_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_ori_b : GCCBuiltin<"__builtin_msa_ori_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_pckev_b : GCCBuiltin<"__builtin_msa_pckev_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_pckev_h : GCCBuiltin<"__builtin_msa_pckev_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_pckev_w : GCCBuiltin<"__builtin_msa_pckev_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_pckev_d : GCCBuiltin<"__builtin_msa_pckev_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_pckod_b : GCCBuiltin<"__builtin_msa_pckod_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_pckod_h : GCCBuiltin<"__builtin_msa_pckod_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_pckod_w : GCCBuiltin<"__builtin_msa_pckod_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_pckod_d : GCCBuiltin<"__builtin_msa_pckod_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_pcnt_b : GCCBuiltin<"__builtin_msa_pcnt_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_pcnt_h : GCCBuiltin<"__builtin_msa_pcnt_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_pcnt_w : GCCBuiltin<"__builtin_msa_pcnt_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_pcnt_d : GCCBuiltin<"__builtin_msa_pcnt_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_sat_s_b : GCCBuiltin<"__builtin_msa_sat_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_s_h : GCCBuiltin<"__builtin_msa_sat_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_s_w : GCCBuiltin<"__builtin_msa_sat_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_s_d : GCCBuiltin<"__builtin_msa_sat_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sat_u_b : GCCBuiltin<"__builtin_msa_sat_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_u_h : GCCBuiltin<"__builtin_msa_sat_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_u_w : GCCBuiltin<"__builtin_msa_sat_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_u_d : GCCBuiltin<"__builtin_msa_sat_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_shf_b : GCCBuiltin<"__builtin_msa_shf_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shf_h : GCCBuiltin<"__builtin_msa_shf_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shf_w : GCCBuiltin<"__builtin_msa_shf_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sld_b : GCCBuiltin<"__builtin_msa_sld_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sld_h : GCCBuiltin<"__builtin_msa_sld_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sld_w : GCCBuiltin<"__builtin_msa_sld_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sld_d : GCCBuiltin<"__builtin_msa_sld_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sldi_b : GCCBuiltin<"__builtin_msa_sldi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_sldi_h : GCCBuiltin<"__builtin_msa_sldi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_sldi_w : GCCBuiltin<"__builtin_msa_sldi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+def int_mips_sldi_d : GCCBuiltin<"__builtin_msa_sldi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+            [IntrNoMem]>;
+
+def int_mips_sll_b : GCCBuiltin<"__builtin_msa_sll_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_sll_h : GCCBuiltin<"__builtin_msa_sll_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_sll_w : GCCBuiltin<"__builtin_msa_sll_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_sll_d : GCCBuiltin<"__builtin_msa_sll_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_slli_b : GCCBuiltin<"__builtin_msa_slli_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_slli_h : GCCBuiltin<"__builtin_msa_slli_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_slli_w : GCCBuiltin<"__builtin_msa_slli_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_slli_d : GCCBuiltin<"__builtin_msa_slli_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_splat_b : GCCBuiltin<"__builtin_msa_splat_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splat_h : GCCBuiltin<"__builtin_msa_splat_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splat_w : GCCBuiltin<"__builtin_msa_splat_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splat_d : GCCBuiltin<"__builtin_msa_splat_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_splati_b : GCCBuiltin<"__builtin_msa_splati_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splati_h : GCCBuiltin<"__builtin_msa_splati_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splati_w : GCCBuiltin<"__builtin_msa_splati_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splati_d : GCCBuiltin<"__builtin_msa_splati_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sra_b : GCCBuiltin<"__builtin_msa_sra_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_sra_h : GCCBuiltin<"__builtin_msa_sra_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_sra_w : GCCBuiltin<"__builtin_msa_sra_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_sra_d : GCCBuiltin<"__builtin_msa_sra_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srai_b : GCCBuiltin<"__builtin_msa_srai_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srai_h : GCCBuiltin<"__builtin_msa_srai_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srai_w : GCCBuiltin<"__builtin_msa_srai_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srai_d : GCCBuiltin<"__builtin_msa_srai_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_srar_b : GCCBuiltin<"__builtin_msa_srar_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_srar_h : GCCBuiltin<"__builtin_msa_srar_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_srar_w : GCCBuiltin<"__builtin_msa_srar_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_srar_d : GCCBuiltin<"__builtin_msa_srar_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srari_b : GCCBuiltin<"__builtin_msa_srari_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srari_h : GCCBuiltin<"__builtin_msa_srari_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srari_w : GCCBuiltin<"__builtin_msa_srari_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srari_d : GCCBuiltin<"__builtin_msa_srari_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_srl_b : GCCBuiltin<"__builtin_msa_srl_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_srl_h : GCCBuiltin<"__builtin_msa_srl_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_srl_w : GCCBuiltin<"__builtin_msa_srl_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_srl_d : GCCBuiltin<"__builtin_msa_srl_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srli_b : GCCBuiltin<"__builtin_msa_srli_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srli_h : GCCBuiltin<"__builtin_msa_srli_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srli_w : GCCBuiltin<"__builtin_msa_srli_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srli_d : GCCBuiltin<"__builtin_msa_srli_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_srlr_b : GCCBuiltin<"__builtin_msa_srlr_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_srlr_h : GCCBuiltin<"__builtin_msa_srlr_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_srlr_w : GCCBuiltin<"__builtin_msa_srlr_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_srlr_d : GCCBuiltin<"__builtin_msa_srlr_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srlri_b : GCCBuiltin<"__builtin_msa_srlri_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srlri_h : GCCBuiltin<"__builtin_msa_srlri_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srlri_w : GCCBuiltin<"__builtin_msa_srlri_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srlri_d : GCCBuiltin<"__builtin_msa_srlri_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_st_b : GCCBuiltin<"__builtin_msa_st_b">,
+  Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty, llvm_i32_ty],
+  [IntrArgMemOnly]>;
+def int_mips_st_h : GCCBuiltin<"__builtin_msa_st_h">,
+  Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty, llvm_i32_ty],
+  [IntrArgMemOnly]>;
+def int_mips_st_w : GCCBuiltin<"__builtin_msa_st_w">,
+  Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i32_ty],
+  [IntrArgMemOnly]>;
+def int_mips_st_d : GCCBuiltin<"__builtin_msa_st_d">,
+  Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
+  [IntrArgMemOnly]>;
+
+def int_mips_subs_s_b : GCCBuiltin<"__builtin_msa_subs_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subs_s_h : GCCBuiltin<"__builtin_msa_subs_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subs_s_w : GCCBuiltin<"__builtin_msa_subs_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subs_s_d : GCCBuiltin<"__builtin_msa_subs_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subs_u_b : GCCBuiltin<"__builtin_msa_subs_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subs_u_h : GCCBuiltin<"__builtin_msa_subs_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subs_u_w : GCCBuiltin<"__builtin_msa_subs_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subs_u_d : GCCBuiltin<"__builtin_msa_subs_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subsus_u_b : GCCBuiltin<"__builtin_msa_subsus_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subsus_u_h : GCCBuiltin<"__builtin_msa_subsus_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subsus_u_w : GCCBuiltin<"__builtin_msa_subsus_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subsus_u_d : GCCBuiltin<"__builtin_msa_subsus_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subsuu_s_b : GCCBuiltin<"__builtin_msa_subsuu_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subsuu_s_h : GCCBuiltin<"__builtin_msa_subsuu_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subsuu_s_w : GCCBuiltin<"__builtin_msa_subsuu_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subsuu_s_d : GCCBuiltin<"__builtin_msa_subsuu_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subv_b : GCCBuiltin<"__builtin_msa_subv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subv_h : GCCBuiltin<"__builtin_msa_subv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subv_w : GCCBuiltin<"__builtin_msa_subv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subv_d : GCCBuiltin<"__builtin_msa_subv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subvi_b : GCCBuiltin<"__builtin_msa_subvi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_subvi_h : GCCBuiltin<"__builtin_msa_subvi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_subvi_w : GCCBuiltin<"__builtin_msa_subvi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_subvi_d : GCCBuiltin<"__builtin_msa_subvi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_vshf_b : GCCBuiltin<"__builtin_msa_vshf_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+            [IntrNoMem]>;
+def int_mips_vshf_h : GCCBuiltin<"__builtin_msa_vshf_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+            [IntrNoMem]>;
+def int_mips_vshf_w : GCCBuiltin<"__builtin_msa_vshf_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+            [IntrNoMem]>;
+def int_mips_vshf_d : GCCBuiltin<"__builtin_msa_vshf_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+            [IntrNoMem]>;
+
+def int_mips_xor_v : GCCBuiltin<"__builtin_msa_xor_v">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_xori_b : GCCBuiltin<"__builtin_msa_xori_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsNVVM.td b/linux-x64/clang/include/llvm/IR/IntrinsicsNVVM.td
new file mode 100644
index 0000000..609aebd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsNVVM.td
@@ -0,0 +1,4041 @@
+//===- IntrinsicsNVVM.td - Defines NVVM intrinsics ---------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the NVVM-specific intrinsics for use with NVPTX.
+//
+//===----------------------------------------------------------------------===//
+
+// The following intrinsics were once defined here, but are now auto-upgraded
+// to target-generic LLVM intrinsics.
+//
+//   * llvm.nvvm.brev32  --> llvm.bitreverse.i32
+//   * llvm.nvvm.brev64  --> llvm.bitreverse.i64
+//   * llvm.nvvm.clz.i   --> llvm.ctlz.i32
+//   * llvm.nvvm.clz.ll  --> trunc i64 llvm.ctlz.i64(x) to i32
+//   * llvm.nvvm.popc.i  --> llvm.ctpop.i32
+//   * llvm.nvvm.popc.ll --> trunc i64 llvm.ctpop.i64 to i32
+//   * llvm.nvvm.abs.i   --> select(x >= -x, x, -x)
+//   * llvm.nvvm.abs.ll  --> ibid.
+//   * llvm.nvvm.max.i   --> select(x sge y, x, y)
+//   * llvm.nvvm.max.ll  --> ibid.
+//   * llvm.nvvm.max.ui  --> select(x uge y, x, y)
+//   * llvm.nvvm.max.ull --> ibid.
+//   * llvm.nvvm.max.i   --> select(x sle y, x, y)
+//   * llvm.nvvm.max.ll  --> ibid.
+//   * llvm.nvvm.max.ui  --> select(x ule y, x, y)
+//   * llvm.nvvm.max.ull --> ibid.
+//   * llvm.nvvm.h2f     --> llvm.convert.to.fp16.f32
+
+def llvm_anyi64ptr_ty     : LLVMAnyPointerType<llvm_i64_ty>;     // (space)i64*
+
+//
+// MISC
+//
+
+let TargetPrefix = "nvvm" in {
+  def int_nvvm_prmt : GCCBuiltin<"__nvvm_prmt">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Min Max
+//
+
+  def int_nvvm_fmin_f : GCCBuiltin<"__nvvm_fmin_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fmin_ftz_f : GCCBuiltin<"__nvvm_fmin_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_fmax_f : GCCBuiltin<"__nvvm_fmax_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty]
+        , [IntrNoMem, Commutative]>;
+  def int_nvvm_fmax_ftz_f : GCCBuiltin<"__nvvm_fmax_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_fmin_d : GCCBuiltin<"__nvvm_fmin_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fmax_d : GCCBuiltin<"__nvvm_fmax_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Multiplication
+//
+
+  def int_nvvm_mulhi_i : GCCBuiltin<"__nvvm_mulhi_i">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mulhi_ui : GCCBuiltin<"__nvvm_mulhi_ui">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_mulhi_ll : GCCBuiltin<"__nvvm_mulhi_ll">,
+      Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mulhi_ull : GCCBuiltin<"__nvvm_mulhi_ull">,
+      Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_mul_rn_ftz_f : GCCBuiltin<"__nvvm_mul_rn_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rn_f : GCCBuiltin<"__nvvm_mul_rn_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rz_ftz_f : GCCBuiltin<"__nvvm_mul_rz_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rz_f : GCCBuiltin<"__nvvm_mul_rz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rm_ftz_f : GCCBuiltin<"__nvvm_mul_rm_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rm_f : GCCBuiltin<"__nvvm_mul_rm_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rp_ftz_f : GCCBuiltin<"__nvvm_mul_rp_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rp_f : GCCBuiltin<"__nvvm_mul_rp_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_mul_rn_d : GCCBuiltin<"__nvvm_mul_rn_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rz_d : GCCBuiltin<"__nvvm_mul_rz_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rm_d : GCCBuiltin<"__nvvm_mul_rm_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul_rp_d : GCCBuiltin<"__nvvm_mul_rp_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_mul24_i : GCCBuiltin<"__nvvm_mul24_i">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_mul24_ui : GCCBuiltin<"__nvvm_mul24_ui">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Div
+//
+
+  def int_nvvm_div_approx_ftz_f : GCCBuiltin<"__nvvm_div_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_approx_f : GCCBuiltin<"__nvvm_div_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_div_rn_ftz_f : GCCBuiltin<"__nvvm_div_rn_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rn_f : GCCBuiltin<"__nvvm_div_rn_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_div_rz_ftz_f : GCCBuiltin<"__nvvm_div_rz_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rz_f : GCCBuiltin<"__nvvm_div_rz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_div_rm_ftz_f : GCCBuiltin<"__nvvm_div_rm_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rm_f : GCCBuiltin<"__nvvm_div_rm_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_div_rp_ftz_f : GCCBuiltin<"__nvvm_div_rp_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rp_f : GCCBuiltin<"__nvvm_div_rp_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_div_rn_d : GCCBuiltin<"__nvvm_div_rn_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rz_d : GCCBuiltin<"__nvvm_div_rz_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rm_d : GCCBuiltin<"__nvvm_div_rm_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_div_rp_d : GCCBuiltin<"__nvvm_div_rp_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Sad
+//
+
+  def int_nvvm_sad_i : GCCBuiltin<"__nvvm_sad_i">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_sad_ui : GCCBuiltin<"__nvvm_sad_ui">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Floor  Ceil
+//
+
+  def int_nvvm_floor_ftz_f : GCCBuiltin<"__nvvm_floor_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_floor_f : GCCBuiltin<"__nvvm_floor_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_floor_d : GCCBuiltin<"__nvvm_floor_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_ceil_ftz_f : GCCBuiltin<"__nvvm_ceil_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_ceil_f : GCCBuiltin<"__nvvm_ceil_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_ceil_d : GCCBuiltin<"__nvvm_ceil_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Abs
+//
+
+  def int_nvvm_fabs_ftz_f : GCCBuiltin<"__nvvm_fabs_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_fabs_f : GCCBuiltin<"__nvvm_fabs_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_fabs_d : GCCBuiltin<"__nvvm_fabs_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Round
+//
+
+  def int_nvvm_round_ftz_f : GCCBuiltin<"__nvvm_round_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_round_f : GCCBuiltin<"__nvvm_round_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_round_d : GCCBuiltin<"__nvvm_round_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Trunc
+//
+
+  def int_nvvm_trunc_ftz_f : GCCBuiltin<"__nvvm_trunc_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_trunc_f : GCCBuiltin<"__nvvm_trunc_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_trunc_d : GCCBuiltin<"__nvvm_trunc_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Saturate
+//
+
+  def int_nvvm_saturate_ftz_f : GCCBuiltin<"__nvvm_saturate_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_saturate_f : GCCBuiltin<"__nvvm_saturate_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_saturate_d : GCCBuiltin<"__nvvm_saturate_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Exp2  Log2
+//
+
+  def int_nvvm_ex2_approx_ftz_f : GCCBuiltin<"__nvvm_ex2_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_ex2_approx_f : GCCBuiltin<"__nvvm_ex2_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_ex2_approx_d : GCCBuiltin<"__nvvm_ex2_approx_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_lg2_approx_ftz_f : GCCBuiltin<"__nvvm_lg2_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_lg2_approx_f : GCCBuiltin<"__nvvm_lg2_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_lg2_approx_d : GCCBuiltin<"__nvvm_lg2_approx_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Sin  Cos
+//
+
+  def int_nvvm_sin_approx_ftz_f : GCCBuiltin<"__nvvm_sin_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sin_approx_f : GCCBuiltin<"__nvvm_sin_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_cos_approx_ftz_f : GCCBuiltin<"__nvvm_cos_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_cos_approx_f : GCCBuiltin<"__nvvm_cos_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+//
+// Fma
+//
+
+  def int_nvvm_fma_rn_ftz_f : GCCBuiltin<"__nvvm_fma_rn_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rn_f : GCCBuiltin<"__nvvm_fma_rn_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rz_ftz_f : GCCBuiltin<"__nvvm_fma_rz_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rz_f : GCCBuiltin<"__nvvm_fma_rz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rm_ftz_f : GCCBuiltin<"__nvvm_fma_rm_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rm_f : GCCBuiltin<"__nvvm_fma_rm_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rp_ftz_f : GCCBuiltin<"__nvvm_fma_rp_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rp_f : GCCBuiltin<"__nvvm_fma_rp_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_fma_rn_d : GCCBuiltin<"__nvvm_fma_rn_d">,
+      Intrinsic<[llvm_double_ty],
+        [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rz_d : GCCBuiltin<"__nvvm_fma_rz_d">,
+      Intrinsic<[llvm_double_ty],
+        [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rm_d : GCCBuiltin<"__nvvm_fma_rm_d">,
+      Intrinsic<[llvm_double_ty],
+        [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_fma_rp_d : GCCBuiltin<"__nvvm_fma_rp_d">,
+      Intrinsic<[llvm_double_ty],
+        [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Rcp
+//
+
+  def int_nvvm_rcp_rn_ftz_f : GCCBuiltin<"__nvvm_rcp_rn_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rn_f : GCCBuiltin<"__nvvm_rcp_rn_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rz_ftz_f : GCCBuiltin<"__nvvm_rcp_rz_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rz_f : GCCBuiltin<"__nvvm_rcp_rz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rm_ftz_f : GCCBuiltin<"__nvvm_rcp_rm_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rm_f : GCCBuiltin<"__nvvm_rcp_rm_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rp_ftz_f : GCCBuiltin<"__nvvm_rcp_rp_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rp_f : GCCBuiltin<"__nvvm_rcp_rp_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_rcp_rn_d : GCCBuiltin<"__nvvm_rcp_rn_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rz_d : GCCBuiltin<"__nvvm_rcp_rz_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rm_d : GCCBuiltin<"__nvvm_rcp_rm_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_rcp_rp_d : GCCBuiltin<"__nvvm_rcp_rp_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_rcp_approx_ftz_d : GCCBuiltin<"__nvvm_rcp_approx_ftz_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Sqrt
+//
+
+  def int_nvvm_sqrt_f : GCCBuiltin<"__nvvm_sqrt_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rn_ftz_f : GCCBuiltin<"__nvvm_sqrt_rn_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rn_f : GCCBuiltin<"__nvvm_sqrt_rn_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rz_ftz_f : GCCBuiltin<"__nvvm_sqrt_rz_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rz_f : GCCBuiltin<"__nvvm_sqrt_rz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rm_ftz_f : GCCBuiltin<"__nvvm_sqrt_rm_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rm_f : GCCBuiltin<"__nvvm_sqrt_rm_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rp_ftz_f : GCCBuiltin<"__nvvm_sqrt_rp_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rp_f : GCCBuiltin<"__nvvm_sqrt_rp_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_approx_ftz_f : GCCBuiltin<"__nvvm_sqrt_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_approx_f : GCCBuiltin<"__nvvm_sqrt_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_sqrt_rn_d : GCCBuiltin<"__nvvm_sqrt_rn_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rz_d : GCCBuiltin<"__nvvm_sqrt_rz_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rm_d : GCCBuiltin<"__nvvm_sqrt_rm_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_sqrt_rp_d : GCCBuiltin<"__nvvm_sqrt_rp_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Rsqrt
+//
+
+  def int_nvvm_rsqrt_approx_ftz_f : GCCBuiltin<"__nvvm_rsqrt_approx_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rsqrt_approx_f : GCCBuiltin<"__nvvm_rsqrt_approx_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_rsqrt_approx_d : GCCBuiltin<"__nvvm_rsqrt_approx_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Add
+//
+
+  def int_nvvm_add_rn_ftz_f : GCCBuiltin<"__nvvm_add_rn_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rn_f : GCCBuiltin<"__nvvm_add_rn_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rz_ftz_f : GCCBuiltin<"__nvvm_add_rz_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rz_f : GCCBuiltin<"__nvvm_add_rz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rm_ftz_f : GCCBuiltin<"__nvvm_add_rm_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rm_f : GCCBuiltin<"__nvvm_add_rm_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rp_ftz_f : GCCBuiltin<"__nvvm_add_rp_ftz_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rp_f : GCCBuiltin<"__nvvm_add_rp_f">,
+      Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_add_rn_d : GCCBuiltin<"__nvvm_add_rn_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rz_d : GCCBuiltin<"__nvvm_add_rz_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rm_d : GCCBuiltin<"__nvvm_add_rm_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+  def int_nvvm_add_rp_d : GCCBuiltin<"__nvvm_add_rp_d">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+        [IntrNoMem, Commutative]>;
+
+//
+// Convert
+//
+
+  def int_nvvm_d2f_rn_ftz : GCCBuiltin<"__nvvm_d2f_rn_ftz">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rn : GCCBuiltin<"__nvvm_d2f_rn">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rz_ftz : GCCBuiltin<"__nvvm_d2f_rz_ftz">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rz : GCCBuiltin<"__nvvm_d2f_rz">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rm_ftz : GCCBuiltin<"__nvvm_d2f_rm_ftz">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rm : GCCBuiltin<"__nvvm_d2f_rm">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rp_ftz : GCCBuiltin<"__nvvm_d2f_rp_ftz">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2f_rp : GCCBuiltin<"__nvvm_d2f_rp">,
+      Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_d2i_rn : GCCBuiltin<"__nvvm_d2i_rn">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2i_rz : GCCBuiltin<"__nvvm_d2i_rz">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2i_rm : GCCBuiltin<"__nvvm_d2i_rm">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2i_rp : GCCBuiltin<"__nvvm_d2i_rp">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_d2ui_rn : GCCBuiltin<"__nvvm_d2ui_rn">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ui_rz : GCCBuiltin<"__nvvm_d2ui_rz">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ui_rm : GCCBuiltin<"__nvvm_d2ui_rm">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ui_rp : GCCBuiltin<"__nvvm_d2ui_rp">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_i2d_rn : GCCBuiltin<"__nvvm_i2d_rn">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_i2d_rz : GCCBuiltin<"__nvvm_i2d_rz">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_i2d_rm : GCCBuiltin<"__nvvm_i2d_rm">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_i2d_rp : GCCBuiltin<"__nvvm_i2d_rp">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+  def int_nvvm_ui2d_rn : GCCBuiltin<"__nvvm_ui2d_rn">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_ui2d_rz : GCCBuiltin<"__nvvm_ui2d_rz">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_ui2d_rm : GCCBuiltin<"__nvvm_ui2d_rm">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_ui2d_rp : GCCBuiltin<"__nvvm_ui2d_rp">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+  def int_nvvm_f2i_rn_ftz : GCCBuiltin<"__nvvm_f2i_rn_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rn : GCCBuiltin<"__nvvm_f2i_rn">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rz_ftz : GCCBuiltin<"__nvvm_f2i_rz_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rz : GCCBuiltin<"__nvvm_f2i_rz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rm_ftz : GCCBuiltin<"__nvvm_f2i_rm_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rm : GCCBuiltin<"__nvvm_f2i_rm">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rp_ftz : GCCBuiltin<"__nvvm_f2i_rp_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2i_rp : GCCBuiltin<"__nvvm_f2i_rp">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_f2ui_rn_ftz : GCCBuiltin<"__nvvm_f2ui_rn_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rn : GCCBuiltin<"__nvvm_f2ui_rn">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rz_ftz : GCCBuiltin<"__nvvm_f2ui_rz_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rz : GCCBuiltin<"__nvvm_f2ui_rz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rm_ftz : GCCBuiltin<"__nvvm_f2ui_rm_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rm : GCCBuiltin<"__nvvm_f2ui_rm">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rp_ftz : GCCBuiltin<"__nvvm_f2ui_rp_ftz">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ui_rp : GCCBuiltin<"__nvvm_f2ui_rp">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_i2f_rn : GCCBuiltin<"__nvvm_i2f_rn">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_i2f_rz : GCCBuiltin<"__nvvm_i2f_rz">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_i2f_rm : GCCBuiltin<"__nvvm_i2f_rm">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_i2f_rp : GCCBuiltin<"__nvvm_i2f_rp">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+  def int_nvvm_ui2f_rn : GCCBuiltin<"__nvvm_ui2f_rn">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_ui2f_rz : GCCBuiltin<"__nvvm_ui2f_rz">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_ui2f_rm : GCCBuiltin<"__nvvm_ui2f_rm">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_nvvm_ui2f_rp : GCCBuiltin<"__nvvm_ui2f_rp">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+  def int_nvvm_lohi_i2d : GCCBuiltin<"__nvvm_lohi_i2d">,
+      Intrinsic<[llvm_double_ty], [llvm_i32_ty, llvm_i32_ty],
+        [IntrNoMem, Commutative]>;
+
+  def int_nvvm_d2i_lo : GCCBuiltin<"__nvvm_d2i_lo">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2i_hi : GCCBuiltin<"__nvvm_d2i_hi">,
+      Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_f2ll_rn_ftz : GCCBuiltin<"__nvvm_f2ll_rn_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rn : GCCBuiltin<"__nvvm_f2ll_rn">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rz_ftz : GCCBuiltin<"__nvvm_f2ll_rz_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rz : GCCBuiltin<"__nvvm_f2ll_rz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rm_ftz : GCCBuiltin<"__nvvm_f2ll_rm_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rm : GCCBuiltin<"__nvvm_f2ll_rm">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rp_ftz : GCCBuiltin<"__nvvm_f2ll_rp_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ll_rp : GCCBuiltin<"__nvvm_f2ll_rp">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_f2ull_rn_ftz : GCCBuiltin<"__nvvm_f2ull_rn_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rn : GCCBuiltin<"__nvvm_f2ull_rn">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rz_ftz : GCCBuiltin<"__nvvm_f2ull_rz_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rz : GCCBuiltin<"__nvvm_f2ull_rz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rm_ftz : GCCBuiltin<"__nvvm_f2ull_rm_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rm : GCCBuiltin<"__nvvm_f2ull_rm">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rp_ftz : GCCBuiltin<"__nvvm_f2ull_rp_ftz">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2ull_rp : GCCBuiltin<"__nvvm_f2ull_rp">,
+      Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+
+  def int_nvvm_d2ll_rn : GCCBuiltin<"__nvvm_d2ll_rn">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ll_rz : GCCBuiltin<"__nvvm_d2ll_rz">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ll_rm : GCCBuiltin<"__nvvm_d2ll_rm">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ll_rp : GCCBuiltin<"__nvvm_d2ll_rp">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_d2ull_rn : GCCBuiltin<"__nvvm_d2ull_rn">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ull_rz : GCCBuiltin<"__nvvm_d2ull_rz">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ull_rm : GCCBuiltin<"__nvvm_d2ull_rm">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+  def int_nvvm_d2ull_rp : GCCBuiltin<"__nvvm_d2ull_rp">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+  def int_nvvm_ll2f_rn : GCCBuiltin<"__nvvm_ll2f_rn">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ll2f_rz : GCCBuiltin<"__nvvm_ll2f_rz">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ll2f_rm : GCCBuiltin<"__nvvm_ll2f_rm">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ll2f_rp : GCCBuiltin<"__nvvm_ll2f_rp">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2f_rn : GCCBuiltin<"__nvvm_ull2f_rn">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2f_rz : GCCBuiltin<"__nvvm_ull2f_rz">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2f_rm : GCCBuiltin<"__nvvm_ull2f_rm">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2f_rp : GCCBuiltin<"__nvvm_ull2f_rp">,
+      Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+  def int_nvvm_ll2d_rn : GCCBuiltin<"__nvvm_ll2d_rn">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ll2d_rz : GCCBuiltin<"__nvvm_ll2d_rz">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ll2d_rm : GCCBuiltin<"__nvvm_ll2d_rm">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ll2d_rp : GCCBuiltin<"__nvvm_ll2d_rp">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2d_rn : GCCBuiltin<"__nvvm_ull2d_rn">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2d_rz : GCCBuiltin<"__nvvm_ull2d_rz">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2d_rm : GCCBuiltin<"__nvvm_ull2d_rm">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_ull2d_rp : GCCBuiltin<"__nvvm_ull2d_rp">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+  def int_nvvm_f2h_rn_ftz : GCCBuiltin<"__nvvm_f2h_rn_ftz">,
+      Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_f2h_rn : GCCBuiltin<"__nvvm_f2h_rn">,
+      Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+
+//
+// Bitcast
+//
+
+  def int_nvvm_bitcast_f2i : GCCBuiltin<"__nvvm_bitcast_f2i">,
+      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_nvvm_bitcast_i2f : GCCBuiltin<"__nvvm_bitcast_i2f">,
+      Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+  def int_nvvm_bitcast_ll2d : GCCBuiltin<"__nvvm_bitcast_ll2d">,
+      Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+  def int_nvvm_bitcast_d2ll : GCCBuiltin<"__nvvm_bitcast_d2ll">,
+      Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+// FNS
+
+  def int_nvvm_fns : GCCBuiltin<"__nvvm_fns">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem]>;
+
+// Atomics not available as llvm intrinsics.
+  def int_nvvm_atomic_load_add_f32 : Intrinsic<[llvm_float_ty],
+          [LLVMAnyPointerType<llvm_float_ty>, llvm_float_ty],
+                                      [IntrArgMemOnly, NoCapture<0>]>;
+  // Atomic add of f64 requires sm_60.
+  def int_nvvm_atomic_load_add_f64 : Intrinsic<[llvm_double_ty],
+          [LLVMAnyPointerType<llvm_double_ty>, llvm_double_ty],
+                                      [IntrArgMemOnly, NoCapture<0>]>;
+
+  def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
+          [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+                                      [IntrArgMemOnly, NoCapture<0>]>;
+  def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
+          [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+                                      [IntrArgMemOnly, NoCapture<0>]>;
+
+  class SCOPED_ATOMIC2_impl<LLVMType elty>
+        : Intrinsic<[elty],
+          [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>],
+          [IntrArgMemOnly, NoCapture<0>]>;
+  class SCOPED_ATOMIC3_impl<LLVMType elty>
+        : Intrinsic<[elty],
+          [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>,
+           LLVMMatchType<0>],
+          [IntrArgMemOnly, NoCapture<0>]>;
+
+  multiclass PTXAtomicWithScope2<LLVMType elty> {
+    def _cta : SCOPED_ATOMIC2_impl<elty>;
+    def _sys : SCOPED_ATOMIC2_impl<elty>;
+  }
+  multiclass PTXAtomicWithScope3<LLVMType elty> {
+    def _cta : SCOPED_ATOMIC3_impl<elty>;
+    def _sys : SCOPED_ATOMIC3_impl<elty>;
+  }
+  multiclass PTXAtomicWithScope2_fi {
+    defm _f: PTXAtomicWithScope2<llvm_anyfloat_ty>;
+    defm _i: PTXAtomicWithScope2<llvm_anyint_ty>;
+  }
+  defm int_nvvm_atomic_add_gen   : PTXAtomicWithScope2_fi;
+  defm int_nvvm_atomic_inc_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_dec_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_exch_gen_i: PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_xor_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_max_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_min_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_or_gen_i  : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_and_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+  defm int_nvvm_atomic_cas_gen_i : PTXAtomicWithScope3<llvm_anyint_ty>;
+
+// Bar.Sync
+
+  // The builtin for "bar.sync 0" is called __syncthreads.  Unlike most of the
+  // intrinsics in this file, this one is a user-facing API.
+  def int_nvvm_barrier0 : GCCBuiltin<"__syncthreads">,
+      Intrinsic<[], [], [IntrConvergent]>;
+  // Synchronize all threads in the CTA at barrier 'n'.
+  def int_nvvm_barrier_n : GCCBuiltin<"__nvvm_bar_n">,
+      Intrinsic<[], [llvm_i32_ty], [IntrConvergent]>;
+  // Synchronize 'm', a multiple of warp size, (arg 2) threads in
+  // the CTA at barrier 'n' (arg 1).
+  def int_nvvm_barrier : GCCBuiltin<"__nvvm_bar">,
+      Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent]>;
+  def int_nvvm_barrier0_popc : GCCBuiltin<"__nvvm_bar0_popc">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrConvergent]>;
+  def int_nvvm_barrier0_and : GCCBuiltin<"__nvvm_bar0_and">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrConvergent]>;
+  def int_nvvm_barrier0_or : GCCBuiltin<"__nvvm_bar0_or">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrConvergent]>;
+
+  def int_nvvm_bar_sync :
+      Intrinsic<[], [llvm_i32_ty], [IntrConvergent]>,
+      GCCBuiltin<"__nvvm_bar_sync">;
+  def int_nvvm_bar_warp_sync :
+      Intrinsic<[], [llvm_i32_ty], [IntrConvergent]>,
+      GCCBuiltin<"__nvvm_bar_warp_sync">;
+
+  // barrier.sync id[, cnt]
+  def int_nvvm_barrier_sync :
+      Intrinsic<[], [llvm_i32_ty], [IntrConvergent]>,
+      GCCBuiltin<"__nvvm_barrier_sync">;
+  def int_nvvm_barrier_sync_cnt :
+      Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent]>,
+      GCCBuiltin<"__nvvm_barrier_sync_cnt">;
+
+  // Membar
+  def int_nvvm_membar_cta : GCCBuiltin<"__nvvm_membar_cta">,
+      Intrinsic<[], [], []>;
+  def int_nvvm_membar_gl : GCCBuiltin<"__nvvm_membar_gl">,
+      Intrinsic<[], [], []>;
+  def int_nvvm_membar_sys : GCCBuiltin<"__nvvm_membar_sys">,
+      Intrinsic<[], [], []>;
+
+// Generated within nvvm. Use for ldu on sm_20 or later.  Second arg is the
+// pointer's alignment.
+def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
+  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+  "llvm.nvvm.ldu.global.i">;
+def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+  "llvm.nvvm.ldu.global.f">;
+def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
+  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+  "llvm.nvvm.ldu.global.p">;
+
+// Generated within nvvm. Use for ldg on sm_35 or later.  Second arg is the
+// pointer's alignment.
+def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty],
+  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+  "llvm.nvvm.ldg.global.i">;
+def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+  "llvm.nvvm.ldg.global.f">;
+def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
+  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+  "llvm.nvvm.ldg.global.p">;
+
+// Use for generic pointers
+// - These intrinsics are used to convert address spaces.
+// - The input pointer and output pointer must have the same type, except for
+//   the address-space. (This restriction is not enforced here as there is
+//   currently no way to describe it).
+// - This complements the llvm bitcast, which can be used to cast one type
+//   of pointer to another type of pointer, while the address space remains
+//   the same.
+def int_nvvm_ptr_local_to_gen: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.local.to.gen">;
+def int_nvvm_ptr_shared_to_gen: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.shared.to.gen">;
+def int_nvvm_ptr_global_to_gen: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.global.to.gen">;
+def int_nvvm_ptr_constant_to_gen: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.constant.to.gen">;
+
+def int_nvvm_ptr_gen_to_global: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.gen.to.global">;
+def int_nvvm_ptr_gen_to_shared: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.gen.to.shared">;
+def int_nvvm_ptr_gen_to_local: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.gen.to.local">;
+def int_nvvm_ptr_gen_to_constant: Intrinsic<[llvm_anyptr_ty],
+                 [llvm_anyptr_ty], [IntrNoMem],
+                 "llvm.nvvm.ptr.gen.to.constant">;
+
+// Used in nvvm internally to help address space opt and ptx code generation
+// This is for params that are passed to kernel functions by pointer by-val.
+def int_nvvm_ptr_gen_to_param: Intrinsic<[llvm_anyptr_ty],
+                                     [llvm_anyptr_ty],
+                                   [IntrNoMem],
+                                   "llvm.nvvm.ptr.gen.to.param">;
+
+// Move intrinsics, used in nvvm internally
+
+def int_nvvm_move_i16 : Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem],
+  "llvm.nvvm.move.i16">;
+def int_nvvm_move_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem],
+  "llvm.nvvm.move.i32">;
+def int_nvvm_move_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem],
+  "llvm.nvvm.move.i64">;
+def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty],
+  [IntrNoMem], "llvm.nvvm.move.float">;
+def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty],
+  [IntrNoMem], "llvm.nvvm.move.double">;
+def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
+  [IntrNoMem, NoCapture<0>], "llvm.nvvm.move.ptr">;
+
+
+// For getting the handle from a texture or surface variable
+def int_nvvm_texsurf_handle
+  : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyi64ptr_ty],
+              [IntrNoMem], "llvm.nvvm.texsurf.handle">;
+def int_nvvm_texsurf_handle_internal
+  : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
+              [IntrNoMem], "llvm.nvvm.texsurf.handle.internal">;
+
+/// Error / Warn
+def int_nvvm_compiler_error :
+    Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.error">;
+def int_nvvm_compiler_warn :
+    Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.warn">;
+
+def int_nvvm_reflect :
+  Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty], [IntrNoMem], "llvm.nvvm.reflect">;
+
+// isspacep.{const, global, local, shared}
+def int_nvvm_isspacep_const
+  : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+              "llvm.nvvm.isspacep.const">,
+    GCCBuiltin<"__nvvm_isspacep_const">;
+def int_nvvm_isspacep_global
+  : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+              "llvm.nvvm.isspacep.global">,
+    GCCBuiltin<"__nvvm_isspacep_global">;
+def int_nvvm_isspacep_local
+  : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+              "llvm.nvvm.isspacep.local">,
+    GCCBuiltin<"__nvvm_isspacep_local">;
+def int_nvvm_isspacep_shared
+  : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+              "llvm.nvvm.isspacep.shared">,
+    GCCBuiltin<"__nvvm_isspacep_shared">;
+
+// Environment register read
+def int_nvvm_read_ptx_sreg_envreg0
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg0">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg0">;
+def int_nvvm_read_ptx_sreg_envreg1
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg1">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg1">;
+def int_nvvm_read_ptx_sreg_envreg2
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg2">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg2">;
+def int_nvvm_read_ptx_sreg_envreg3
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg3">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg3">;
+def int_nvvm_read_ptx_sreg_envreg4
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg4">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg4">;
+def int_nvvm_read_ptx_sreg_envreg5
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg5">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg5">;
+def int_nvvm_read_ptx_sreg_envreg6
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg6">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg6">;
+def int_nvvm_read_ptx_sreg_envreg7
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg7">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg7">;
+def int_nvvm_read_ptx_sreg_envreg8
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg8">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg8">;
+def int_nvvm_read_ptx_sreg_envreg9
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg9">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg9">;
+def int_nvvm_read_ptx_sreg_envreg10
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg10">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg10">;
+def int_nvvm_read_ptx_sreg_envreg11
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg11">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg11">;
+def int_nvvm_read_ptx_sreg_envreg12
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg12">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg12">;
+def int_nvvm_read_ptx_sreg_envreg13
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg13">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg13">;
+def int_nvvm_read_ptx_sreg_envreg14
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg14">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg14">;
+def int_nvvm_read_ptx_sreg_envreg15
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg15">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg15">;
+def int_nvvm_read_ptx_sreg_envreg16
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg16">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg16">;
+def int_nvvm_read_ptx_sreg_envreg17
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg17">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg17">;
+def int_nvvm_read_ptx_sreg_envreg18
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg18">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg18">;
+def int_nvvm_read_ptx_sreg_envreg19
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg19">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg19">;
+def int_nvvm_read_ptx_sreg_envreg20
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg20">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg20">;
+def int_nvvm_read_ptx_sreg_envreg21
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg21">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg21">;
+def int_nvvm_read_ptx_sreg_envreg22
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg22">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg22">;
+def int_nvvm_read_ptx_sreg_envreg23
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg23">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg23">;
+def int_nvvm_read_ptx_sreg_envreg24
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg24">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg24">;
+def int_nvvm_read_ptx_sreg_envreg25
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg25">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg25">;
+def int_nvvm_read_ptx_sreg_envreg26
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg26">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg26">;
+def int_nvvm_read_ptx_sreg_envreg27
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg27">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg27">;
+def int_nvvm_read_ptx_sreg_envreg28
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg28">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg28">;
+def int_nvvm_read_ptx_sreg_envreg29
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg29">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg29">;
+def int_nvvm_read_ptx_sreg_envreg30
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg30">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg30">;
+def int_nvvm_read_ptx_sreg_envreg31
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+              "llvm.nvvm.read.ptx.sreg.envreg31">,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_envreg31">;
+
+
+// Texture Fetch
+// texmode_independent
+def int_nvvm_tex_1d_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.1d.v4f32.s32">;
+def int_nvvm_tex_1d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.v4f32.f32">;
+def int_nvvm_tex_1d_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.level.v4f32.f32">;
+def int_nvvm_tex_1d_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.grad.v4f32.f32">;
+def int_nvvm_tex_1d_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.1d.v4s32.s32">;
+def int_nvvm_tex_1d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.v4s32.f32">;
+def int_nvvm_tex_1d_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.level.v4s32.f32">;
+def int_nvvm_tex_1d_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.grad.v4s32.f32">;
+def int_nvvm_tex_1d_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.1d.v4u32.s32">;
+def int_nvvm_tex_1d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.v4u32.f32">;
+def int_nvvm_tex_1d_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.level.v4u32.f32">;
+def int_nvvm_tex_1d_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.grad.v4u32.f32">;
+
+def int_nvvm_tex_1d_array_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.1d.array.v4f32.s32">;
+def int_nvvm_tex_1d_array_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.v4f32.f32">;
+def int_nvvm_tex_1d_array_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.level.v4f32.f32">;
+def int_nvvm_tex_1d_array_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.grad.v4f32.f32">;
+def int_nvvm_tex_1d_array_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.1d.array.v4s32.s32">;
+def int_nvvm_tex_1d_array_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.v4s32.f32">;
+def int_nvvm_tex_1d_array_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.level.v4s32.f32">;
+def int_nvvm_tex_1d_array_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.grad.v4s32.f32">;
+def int_nvvm_tex_1d_array_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.1d.array.v4u32.s32">;
+def int_nvvm_tex_1d_array_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.v4u32.f32">;
+def int_nvvm_tex_1d_array_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.level.v4u32.f32">;
+def int_nvvm_tex_1d_array_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.1d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_2d_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.2d.v4f32.s32">;
+def int_nvvm_tex_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.v4f32.f32">;
+def int_nvvm_tex_2d_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.level.v4f32.f32">;
+def int_nvvm_tex_2d_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.grad.v4f32.f32">;
+def int_nvvm_tex_2d_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.2d.v4s32.s32">;
+def int_nvvm_tex_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.v4s32.f32">;
+def int_nvvm_tex_2d_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.level.v4s32.f32">;
+def int_nvvm_tex_2d_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.grad.v4s32.f32">;
+def int_nvvm_tex_2d_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.2d.v4u32.s32">;
+def int_nvvm_tex_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.v4u32.f32">;
+def int_nvvm_tex_2d_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.level.v4u32.f32">;
+def int_nvvm_tex_2d_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.grad.v4u32.f32">;
+
+def int_nvvm_tex_2d_array_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+               llvm_i32_ty], [],
+              "llvm.nvvm.tex.2d.array.v4f32.s32">;
+def int_nvvm_tex_2d_array_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.v4f32.f32">;
+def int_nvvm_tex_2d_array_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.level.v4f32.f32">;
+def int_nvvm_tex_2d_array_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.grad.v4f32.f32">;
+def int_nvvm_tex_2d_array_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+               llvm_i32_ty], [],
+              "llvm.nvvm.tex.2d.array.v4s32.s32">;
+def int_nvvm_tex_2d_array_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.v4s32.f32">;
+def int_nvvm_tex_2d_array_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.level.v4s32.f32">;
+def int_nvvm_tex_2d_array_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.grad.v4s32.f32">;
+def int_nvvm_tex_2d_array_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+               llvm_i32_ty], [],
+              "llvm.nvvm.tex.2d.array.v4u32.s32">;
+def int_nvvm_tex_2d_array_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.v4u32.f32">;
+def int_nvvm_tex_2d_array_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.level.v4u32.f32">;
+def int_nvvm_tex_2d_array_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.2d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_3d_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [], "llvm.nvvm.tex.3d.v4f32.s32">;
+def int_nvvm_tex_3d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.v4f32.f32">;
+def int_nvvm_tex_3d_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.level.v4f32.f32">;
+def int_nvvm_tex_3d_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.grad.v4f32.f32">;
+def int_nvvm_tex_3d_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [], "llvm.nvvm.tex.3d.v4s32.s32">;
+def int_nvvm_tex_3d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.v4s32.f32">;
+def int_nvvm_tex_3d_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.level.v4s32.f32">;
+def int_nvvm_tex_3d_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.grad.v4s32.f32">;
+def int_nvvm_tex_3d_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [], "llvm.nvvm.tex.3d.v4u32.s32">;
+def int_nvvm_tex_3d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.v4u32.f32">;
+def int_nvvm_tex_3d_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.level.v4u32.f32">;
+def int_nvvm_tex_3d_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.3d.grad.v4u32.f32">;
+
+def int_nvvm_tex_cube_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.v4f32.f32">;
+def int_nvvm_tex_cube_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.level.v4f32.f32">;
+def int_nvvm_tex_cube_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.v4s32.f32">;
+def int_nvvm_tex_cube_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.level.v4s32.f32">;
+def int_nvvm_tex_cube_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.v4u32.f32">;
+def int_nvvm_tex_cube_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.level.v4u32.f32">;
+
+def int_nvvm_tex_cube_array_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.array.v4f32.f32">;
+def int_nvvm_tex_cube_array_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.array.level.v4f32.f32">;
+def int_nvvm_tex_cube_array_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.array.v4s32.f32">;
+def int_nvvm_tex_cube_array_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.array.level.v4s32.f32">;
+def int_nvvm_tex_cube_array_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.array.v4u32.f32">;
+def int_nvvm_tex_cube_array_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.cube.array.level.v4u32.f32">;
+
+def int_nvvm_tld4_r_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.r.2d.v4f32.f32">;
+def int_nvvm_tld4_g_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.g.2d.v4f32.f32">;
+def int_nvvm_tld4_b_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.b.2d.v4f32.f32">;
+def int_nvvm_tld4_a_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.a.2d.v4f32.f32">;
+def int_nvvm_tld4_r_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.r.2d.v4s32.f32">;
+def int_nvvm_tld4_g_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.g.2d.v4s32.f32">;
+def int_nvvm_tld4_b_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.b.2d.v4s32.f32">;
+def int_nvvm_tld4_a_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.a.2d.v4s32.f32">;
+def int_nvvm_tld4_r_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.r.2d.v4u32.f32">;
+def int_nvvm_tld4_g_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.g.2d.v4u32.f32">;
+def int_nvvm_tld4_b_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.b.2d.v4u32.f32">;
+def int_nvvm_tld4_a_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.a.2d.v4u32.f32">;
+
+
+// texmode_unified
+def int_nvvm_tex_unified_1d_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.1d.v4f32.s32">;
+def int_nvvm_tex_unified_1d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.v4f32.f32">;
+def int_nvvm_tex_unified_1d_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.level.v4f32.f32">;
+def int_nvvm_tex_unified_1d_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.grad.v4f32.f32">;
+def int_nvvm_tex_unified_1d_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.1d.v4s32.s32">;
+def int_nvvm_tex_unified_1d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.v4s32.f32">;
+def int_nvvm_tex_unified_1d_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.level.v4s32.f32">;
+def int_nvvm_tex_unified_1d_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.grad.v4s32.f32">;
+def int_nvvm_tex_unified_1d_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.1d.v4u32.s32">;
+def int_nvvm_tex_unified_1d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.v4u32.f32">;
+def int_nvvm_tex_unified_1d_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.level.v4u32.f32">;
+def int_nvvm_tex_unified_1d_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_1d_array_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.v4f32.s32">;
+def int_nvvm_tex_unified_1d_array_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.v4f32.f32">;
+def int_nvvm_tex_unified_1d_array_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.level.v4f32.f32">;
+def int_nvvm_tex_unified_1d_array_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32">;
+def int_nvvm_tex_unified_1d_array_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.v4s32.s32">;
+def int_nvvm_tex_unified_1d_array_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.v4s32.f32">;
+def int_nvvm_tex_unified_1d_array_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.level.v4s32.f32">;
+def int_nvvm_tex_unified_1d_array_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32">;
+def int_nvvm_tex_unified_1d_array_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.v4u32.s32">;
+def int_nvvm_tex_unified_1d_array_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.v4u32.f32">;
+def int_nvvm_tex_unified_1d_array_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.level.v4u32.f32">;
+def int_nvvm_tex_unified_1d_array_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_2d_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.2d.v4f32.s32">;
+def int_nvvm_tex_unified_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.v4f32.f32">;
+def int_nvvm_tex_unified_2d_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.level.v4f32.f32">;
+def int_nvvm_tex_unified_2d_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.grad.v4f32.f32">;
+def int_nvvm_tex_unified_2d_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.2d.v4s32.s32">;
+def int_nvvm_tex_unified_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.v4s32.f32">;
+def int_nvvm_tex_unified_2d_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.level.v4s32.f32">;
+def int_nvvm_tex_unified_2d_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.grad.v4s32.f32">;
+def int_nvvm_tex_unified_2d_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.2d.v4u32.s32">;
+def int_nvvm_tex_unified_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.v4u32.f32">;
+def int_nvvm_tex_unified_2d_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.level.v4u32.f32">;
+def int_nvvm_tex_unified_2d_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_2d_array_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+               llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.v4f32.s32">;
+def int_nvvm_tex_unified_2d_array_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.v4f32.f32">;
+def int_nvvm_tex_unified_2d_array_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.level.v4f32.f32">;
+def int_nvvm_tex_unified_2d_array_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32">;
+def int_nvvm_tex_unified_2d_array_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+               llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.v4s32.s32">;
+def int_nvvm_tex_unified_2d_array_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.v4s32.f32">;
+def int_nvvm_tex_unified_2d_array_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.level.v4s32.f32">;
+def int_nvvm_tex_unified_2d_array_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32">;
+def int_nvvm_tex_unified_2d_array_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+               llvm_i32_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.v4u32.s32">;
+def int_nvvm_tex_unified_2d_array_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.v4u32.f32">;
+def int_nvvm_tex_unified_2d_array_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.level.v4u32.f32">;
+def int_nvvm_tex_unified_2d_array_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_3d_v4f32_s32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [], "llvm.nvvm.tex.unified.3d.v4f32.s32">;
+def int_nvvm_tex_unified_3d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.v4f32.f32">;
+def int_nvvm_tex_unified_3d_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.level.v4f32.f32">;
+def int_nvvm_tex_unified_3d_grad_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.grad.v4f32.f32">;
+def int_nvvm_tex_unified_3d_v4s32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [], "llvm.nvvm.tex.unified.3d.v4s32.s32">;
+def int_nvvm_tex_unified_3d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.v4s32.f32">;
+def int_nvvm_tex_unified_3d_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.level.v4s32.f32">;
+def int_nvvm_tex_unified_3d_grad_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.grad.v4s32.f32">;
+def int_nvvm_tex_unified_3d_v4u32_s32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [], "llvm.nvvm.tex.unified.3d.v4u32.s32">;
+def int_nvvm_tex_unified_3d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.v4u32.f32">;
+def int_nvvm_tex_unified_3d_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.level.v4u32.f32">;
+def int_nvvm_tex_unified_3d_grad_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.3d.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_cube_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.v4f32.f32">;
+def int_nvvm_tex_unified_cube_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.level.v4f32.f32">;
+def int_nvvm_tex_unified_cube_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.v4s32.f32">;
+def int_nvvm_tex_unified_cube_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.level.v4s32.f32">;
+def int_nvvm_tex_unified_cube_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.v4u32.f32">;
+def int_nvvm_tex_unified_cube_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.level.v4u32.f32">;
+
+def int_nvvm_tex_unified_cube_array_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.array.v4f32.f32">;
+def int_nvvm_tex_unified_cube_array_level_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.array.level.v4f32.f32">;
+def int_nvvm_tex_unified_cube_array_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.array.v4s32.f32">;
+def int_nvvm_tex_unified_cube_array_level_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.array.level.v4s32.f32">;
+def int_nvvm_tex_unified_cube_array_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.array.v4u32.f32">;
+def int_nvvm_tex_unified_cube_array_level_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty,
+               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tex.unified.cube.array.level.v4u32.f32">;
+
+def int_nvvm_tld4_unified_r_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.r.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_g_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.g.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_b_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.b.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_a_2d_v4f32_f32
+  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.a.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_r_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.r.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_g_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.g.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_b_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.b.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_a_2d_v4s32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.a.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_r_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.r.2d.v4u32.f32">;
+def int_nvvm_tld4_unified_g_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.g.2d.v4u32.f32">;
+def int_nvvm_tld4_unified_b_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.b.2d.v4u32.f32">;
+def int_nvvm_tld4_unified_a_2d_v4u32_f32
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+              "llvm.nvvm.tld4.unified.a.2d.v4u32.f32">;
+
+
+//=== Surface Load
+// .clamp variants
+def int_nvvm_suld_1d_i8_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i8.clamp">;
+def int_nvvm_suld_1d_i16_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i16.clamp">;
+def int_nvvm_suld_1d_i32_clamp
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i32.clamp">;
+def int_nvvm_suld_1d_i64_clamp
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i64.clamp">;
+def int_nvvm_suld_1d_v2i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i8.clamp">;
+def int_nvvm_suld_1d_v2i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i16.clamp">;
+def int_nvvm_suld_1d_v2i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i32.clamp">;
+def int_nvvm_suld_1d_v2i64_clamp
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i64.clamp">;
+def int_nvvm_suld_1d_v4i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i8.clamp">;
+def int_nvvm_suld_1d_v4i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i16.clamp">;
+def int_nvvm_suld_1d_v4i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i32.clamp">;
+
+def int_nvvm_suld_1d_array_i8_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i8.clamp">;
+def int_nvvm_suld_1d_array_i16_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i16.clamp">;
+def int_nvvm_suld_1d_array_i32_clamp
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i32.clamp">;
+def int_nvvm_suld_1d_array_i64_clamp
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i64.clamp">;
+def int_nvvm_suld_1d_array_v2i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i8.clamp">;
+def int_nvvm_suld_1d_array_v2i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i16.clamp">;
+def int_nvvm_suld_1d_array_v2i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i32.clamp">;
+def int_nvvm_suld_1d_array_v2i64_clamp
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i64.clamp">;
+def int_nvvm_suld_1d_array_v4i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i8.clamp">;
+def int_nvvm_suld_1d_array_v4i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i16.clamp">;
+def int_nvvm_suld_1d_array_v4i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i32.clamp">;
+
+def int_nvvm_suld_2d_i8_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i8.clamp">;
+def int_nvvm_suld_2d_i16_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i16.clamp">;
+def int_nvvm_suld_2d_i32_clamp
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i32.clamp">;
+def int_nvvm_suld_2d_i64_clamp
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i64.clamp">;
+def int_nvvm_suld_2d_v2i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i8.clamp">;
+def int_nvvm_suld_2d_v2i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i16.clamp">;
+def int_nvvm_suld_2d_v2i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i32.clamp">;
+def int_nvvm_suld_2d_v2i64_clamp
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i64.clamp">;
+def int_nvvm_suld_2d_v4i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i8.clamp">;
+def int_nvvm_suld_2d_v4i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i16.clamp">;
+def int_nvvm_suld_2d_v4i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i32.clamp">;
+
+def int_nvvm_suld_2d_array_i8_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i8.clamp">;
+def int_nvvm_suld_2d_array_i16_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i16.clamp">;
+def int_nvvm_suld_2d_array_i32_clamp
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i32.clamp">;
+def int_nvvm_suld_2d_array_i64_clamp
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i64.clamp">;
+def int_nvvm_suld_2d_array_v2i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i8.clamp">;
+def int_nvvm_suld_2d_array_v2i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i16.clamp">;
+def int_nvvm_suld_2d_array_v2i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i32.clamp">;
+def int_nvvm_suld_2d_array_v2i64_clamp
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i64.clamp">;
+def int_nvvm_suld_2d_array_v4i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i8.clamp">;
+def int_nvvm_suld_2d_array_v4i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i16.clamp">;
+def int_nvvm_suld_2d_array_v4i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i32.clamp">;
+
+def int_nvvm_suld_3d_i8_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i8.clamp">;
+def int_nvvm_suld_3d_i16_clamp
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i16.clamp">;
+def int_nvvm_suld_3d_i32_clamp
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i32.clamp">;
+def int_nvvm_suld_3d_i64_clamp
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i64.clamp">;
+def int_nvvm_suld_3d_v2i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i8.clamp">;
+def int_nvvm_suld_3d_v2i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i16.clamp">;
+def int_nvvm_suld_3d_v2i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i32.clamp">;
+def int_nvvm_suld_3d_v2i64_clamp
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i64.clamp">;
+def int_nvvm_suld_3d_v4i8_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i8.clamp">;
+def int_nvvm_suld_3d_v4i16_clamp
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i16.clamp">;
+def int_nvvm_suld_3d_v4i32_clamp
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i32.clamp">;
+
+// .trap variants
+def int_nvvm_suld_1d_i8_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i8.trap">;
+def int_nvvm_suld_1d_i16_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i16.trap">;
+def int_nvvm_suld_1d_i32_trap
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i32.trap">;
+def int_nvvm_suld_1d_i64_trap
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i64.trap">;
+def int_nvvm_suld_1d_v2i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i8.trap">;
+def int_nvvm_suld_1d_v2i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i16.trap">;
+def int_nvvm_suld_1d_v2i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i32.trap">;
+def int_nvvm_suld_1d_v2i64_trap
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i64.trap">;
+def int_nvvm_suld_1d_v4i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i8.trap">;
+def int_nvvm_suld_1d_v4i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i16.trap">;
+def int_nvvm_suld_1d_v4i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i32.trap">;
+
+def int_nvvm_suld_1d_array_i8_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i8.trap">;
+def int_nvvm_suld_1d_array_i16_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i16.trap">;
+def int_nvvm_suld_1d_array_i32_trap
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i32.trap">;
+def int_nvvm_suld_1d_array_i64_trap
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i64.trap">;
+def int_nvvm_suld_1d_array_v2i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i8.trap">;
+def int_nvvm_suld_1d_array_v2i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i16.trap">;
+def int_nvvm_suld_1d_array_v2i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i32.trap">;
+def int_nvvm_suld_1d_array_v2i64_trap
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i64.trap">;
+def int_nvvm_suld_1d_array_v4i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i8.trap">;
+def int_nvvm_suld_1d_array_v4i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i16.trap">;
+def int_nvvm_suld_1d_array_v4i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i32.trap">;
+
+def int_nvvm_suld_2d_i8_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i8.trap">;
+def int_nvvm_suld_2d_i16_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i16.trap">;
+def int_nvvm_suld_2d_i32_trap
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i32.trap">;
+def int_nvvm_suld_2d_i64_trap
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i64.trap">;
+def int_nvvm_suld_2d_v2i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i8.trap">;
+def int_nvvm_suld_2d_v2i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i16.trap">;
+def int_nvvm_suld_2d_v2i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i32.trap">;
+def int_nvvm_suld_2d_v2i64_trap
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i64.trap">;
+def int_nvvm_suld_2d_v4i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i8.trap">;
+def int_nvvm_suld_2d_v4i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i16.trap">;
+def int_nvvm_suld_2d_v4i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i32.trap">;
+
+def int_nvvm_suld_2d_array_i8_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i8.trap">;
+def int_nvvm_suld_2d_array_i16_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i16.trap">;
+def int_nvvm_suld_2d_array_i32_trap
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i32.trap">;
+def int_nvvm_suld_2d_array_i64_trap
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i64.trap">;
+def int_nvvm_suld_2d_array_v2i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i8.trap">;
+def int_nvvm_suld_2d_array_v2i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i16.trap">;
+def int_nvvm_suld_2d_array_v2i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i32.trap">;
+def int_nvvm_suld_2d_array_v2i64_trap
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i64.trap">;
+def int_nvvm_suld_2d_array_v4i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i8.trap">;
+def int_nvvm_suld_2d_array_v4i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i16.trap">;
+def int_nvvm_suld_2d_array_v4i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i32.trap">;
+
+def int_nvvm_suld_3d_i8_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i8.trap">;
+def int_nvvm_suld_3d_i16_trap
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i16.trap">;
+def int_nvvm_suld_3d_i32_trap
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i32.trap">;
+def int_nvvm_suld_3d_i64_trap
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i64.trap">;
+def int_nvvm_suld_3d_v2i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i8.trap">;
+def int_nvvm_suld_3d_v2i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i16.trap">;
+def int_nvvm_suld_3d_v2i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i32.trap">;
+def int_nvvm_suld_3d_v2i64_trap
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i64.trap">;
+def int_nvvm_suld_3d_v4i8_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i8.trap">;
+def int_nvvm_suld_3d_v4i16_trap
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i16.trap">;
+def int_nvvm_suld_3d_v4i32_trap
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i32.trap">;
+
+// .zero variants
+def int_nvvm_suld_1d_i8_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i8.zero">;
+def int_nvvm_suld_1d_i16_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i16.zero">;
+def int_nvvm_suld_1d_i32_zero
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i32.zero">;
+def int_nvvm_suld_1d_i64_zero
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.i64.zero">;
+def int_nvvm_suld_1d_v2i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i8.zero">;
+def int_nvvm_suld_1d_v2i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i16.zero">;
+def int_nvvm_suld_1d_v2i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i32.zero">;
+def int_nvvm_suld_1d_v2i64_zero
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v2i64.zero">;
+def int_nvvm_suld_1d_v4i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i8.zero">;
+def int_nvvm_suld_1d_v4i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i16.zero">;
+def int_nvvm_suld_1d_v4i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.v4i32.zero">;
+
+def int_nvvm_suld_1d_array_i8_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i8.zero">;
+def int_nvvm_suld_1d_array_i16_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i16.zero">;
+def int_nvvm_suld_1d_array_i32_zero
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i32.zero">;
+def int_nvvm_suld_1d_array_i64_zero
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.i64.zero">;
+def int_nvvm_suld_1d_array_v2i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i8.zero">;
+def int_nvvm_suld_1d_array_v2i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i16.zero">;
+def int_nvvm_suld_1d_array_v2i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i32.zero">;
+def int_nvvm_suld_1d_array_v2i64_zero
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v2i64.zero">;
+def int_nvvm_suld_1d_array_v4i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i8.zero">;
+def int_nvvm_suld_1d_array_v4i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i16.zero">;
+def int_nvvm_suld_1d_array_v4i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.1d.array.v4i32.zero">;
+
+def int_nvvm_suld_2d_i8_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i8.zero">;
+def int_nvvm_suld_2d_i16_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i16.zero">;
+def int_nvvm_suld_2d_i32_zero
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i32.zero">;
+def int_nvvm_suld_2d_i64_zero
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.i64.zero">;
+def int_nvvm_suld_2d_v2i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i8.zero">;
+def int_nvvm_suld_2d_v2i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i16.zero">;
+def int_nvvm_suld_2d_v2i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i32.zero">;
+def int_nvvm_suld_2d_v2i64_zero
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v2i64.zero">;
+def int_nvvm_suld_2d_v4i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i8.zero">;
+def int_nvvm_suld_2d_v4i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i16.zero">;
+def int_nvvm_suld_2d_v4i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.v4i32.zero">;
+
+def int_nvvm_suld_2d_array_i8_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i8.zero">;
+def int_nvvm_suld_2d_array_i16_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i16.zero">;
+def int_nvvm_suld_2d_array_i32_zero
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i32.zero">;
+def int_nvvm_suld_2d_array_i64_zero
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.i64.zero">;
+def int_nvvm_suld_2d_array_v2i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i8.zero">;
+def int_nvvm_suld_2d_array_v2i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i16.zero">;
+def int_nvvm_suld_2d_array_v2i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i32.zero">;
+def int_nvvm_suld_2d_array_v2i64_zero
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v2i64.zero">;
+def int_nvvm_suld_2d_array_v4i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i8.zero">;
+def int_nvvm_suld_2d_array_v4i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i16.zero">;
+def int_nvvm_suld_2d_array_v4i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.2d.array.v4i32.zero">;
+
+def int_nvvm_suld_3d_i8_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i8.zero">;
+def int_nvvm_suld_3d_i16_zero
+  : Intrinsic<[llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i16.zero">;
+def int_nvvm_suld_3d_i32_zero
+  : Intrinsic<[llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i32.zero">;
+def int_nvvm_suld_3d_i64_zero
+  : Intrinsic<[llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.i64.zero">;
+def int_nvvm_suld_3d_v2i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i8.zero">;
+def int_nvvm_suld_3d_v2i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i16.zero">;
+def int_nvvm_suld_3d_v2i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i32.zero">;
+def int_nvvm_suld_3d_v2i64_zero
+  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v2i64.zero">;
+def int_nvvm_suld_3d_v4i8_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i8.zero">;
+def int_nvvm_suld_3d_v4i16_zero
+  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i16.zero">;
+def int_nvvm_suld_3d_v4i32_zero
+  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.suld.3d.v4i32.zero">;
+
+//===- Texture Query ------------------------------------------------------===//
+
+def int_nvvm_txq_channel_order
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.channel.order">,
+    GCCBuiltin<"__nvvm_txq_channel_order">;
+def int_nvvm_txq_channel_data_type
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.channel.data.type">,
+    GCCBuiltin<"__nvvm_txq_channel_data_type">;
+def int_nvvm_txq_width
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.width">,
+    GCCBuiltin<"__nvvm_txq_width">;
+def int_nvvm_txq_height
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.height">,
+    GCCBuiltin<"__nvvm_txq_height">;
+def int_nvvm_txq_depth
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.depth">,
+    GCCBuiltin<"__nvvm_txq_depth">;
+def int_nvvm_txq_array_size
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.array.size">,
+    GCCBuiltin<"__nvvm_txq_array_size">;
+def int_nvvm_txq_num_samples
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.num.samples">,
+    GCCBuiltin<"__nvvm_txq_num_samples">;
+def int_nvvm_txq_num_mipmap_levels
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.txq.num.mipmap.levels">,
+    GCCBuiltin<"__nvvm_txq_num_mipmap_levels">;
+
+//===- Surface Query ------------------------------------------------------===//
+
+def int_nvvm_suq_channel_order
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.suq.channel.order">,
+    GCCBuiltin<"__nvvm_suq_channel_order">;
+def int_nvvm_suq_channel_data_type
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.suq.channel.data.type">,
+    GCCBuiltin<"__nvvm_suq_channel_data_type">;
+def int_nvvm_suq_width
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.suq.width">,
+    GCCBuiltin<"__nvvm_suq_width">;
+def int_nvvm_suq_height
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.suq.height">,
+    GCCBuiltin<"__nvvm_suq_height">;
+def int_nvvm_suq_depth
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.suq.depth">,
+    GCCBuiltin<"__nvvm_suq_depth">;
+def int_nvvm_suq_array_size
+  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.suq.array.size">,
+    GCCBuiltin<"__nvvm_suq_array_size">;
+
+
+//===- Handle Query -------------------------------------------------------===//
+
+def int_nvvm_istypep_sampler
+  : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.istypep.sampler">,
+    GCCBuiltin<"__nvvm_istypep_sampler">;
+def int_nvvm_istypep_surface
+  : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.istypep.surface">,
+    GCCBuiltin<"__nvvm_istypep_surface">;
+def int_nvvm_istypep_texture
+  : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+              "llvm.nvvm.istypep.texture">,
+    GCCBuiltin<"__nvvm_istypep_texture">;
+
+
+
+//===- Surface Stores -----------------------------------------------------===//
+
+// Unformatted
+// .clamp variant
+def int_nvvm_sust_b_1d_i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i8_clamp">;
+def int_nvvm_sust_b_1d_i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i16_clamp">;
+def int_nvvm_sust_b_1d_i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i32_clamp">;
+def int_nvvm_sust_b_1d_i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i64_clamp">;
+def int_nvvm_sust_b_1d_v2i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i8_clamp">;
+def int_nvvm_sust_b_1d_v2i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i16_clamp">;
+def int_nvvm_sust_b_1d_v2i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i32_clamp">;
+def int_nvvm_sust_b_1d_v2i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i64_clamp">;
+def int_nvvm_sust_b_1d_v4i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i8_clamp">;
+def int_nvvm_sust_b_1d_v4i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i16_clamp">;
+def int_nvvm_sust_b_1d_v4i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_1d_array_i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i8_clamp">;
+def int_nvvm_sust_b_1d_array_i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i16_clamp">;
+def int_nvvm_sust_b_1d_array_i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i32_clamp">;
+def int_nvvm_sust_b_1d_array_i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i64_clamp">;
+def int_nvvm_sust_b_1d_array_v2i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_clamp">;
+def int_nvvm_sust_b_1d_array_v2i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_clamp">;
+def int_nvvm_sust_b_1d_array_v2i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_clamp">;
+def int_nvvm_sust_b_1d_array_v2i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i64_clamp">;
+def int_nvvm_sust_b_1d_array_v4i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_clamp">;
+def int_nvvm_sust_b_1d_array_v4i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_clamp">;
+def int_nvvm_sust_b_1d_array_v4i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_2d_i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i8_clamp">;
+def int_nvvm_sust_b_2d_i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i16_clamp">;
+def int_nvvm_sust_b_2d_i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i32_clamp">;
+def int_nvvm_sust_b_2d_i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i64_clamp">;
+def int_nvvm_sust_b_2d_v2i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i8_clamp">;
+def int_nvvm_sust_b_2d_v2i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i16_clamp">;
+def int_nvvm_sust_b_2d_v2i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i32_clamp">;
+def int_nvvm_sust_b_2d_v2i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i64_clamp">;
+def int_nvvm_sust_b_2d_v4i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i8_clamp">;
+def int_nvvm_sust_b_2d_v4i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i16_clamp">;
+def int_nvvm_sust_b_2d_v4i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_2d_array_i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i8_clamp">;
+def int_nvvm_sust_b_2d_array_i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i16_clamp">;
+def int_nvvm_sust_b_2d_array_i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i32_clamp">;
+def int_nvvm_sust_b_2d_array_i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i64_clamp">;
+def int_nvvm_sust_b_2d_array_v2i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_clamp">;
+def int_nvvm_sust_b_2d_array_v2i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_clamp">;
+def int_nvvm_sust_b_2d_array_v2i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_clamp">;
+def int_nvvm_sust_b_2d_array_v2i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i64_clamp">;
+def int_nvvm_sust_b_2d_array_v4i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_clamp">;
+def int_nvvm_sust_b_2d_array_v4i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_clamp">;
+def int_nvvm_sust_b_2d_array_v4i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_3d_i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i8_clamp">;
+def int_nvvm_sust_b_3d_i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i16_clamp">;
+def int_nvvm_sust_b_3d_i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i32_clamp">;
+def int_nvvm_sust_b_3d_i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.3d.i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i64_clamp">;
+def int_nvvm_sust_b_3d_v2i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i8_clamp">;
+def int_nvvm_sust_b_3d_v2i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i16_clamp">;
+def int_nvvm_sust_b_3d_v2i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i32_clamp">;
+def int_nvvm_sust_b_3d_v2i64_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i64.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i64_clamp">;
+def int_nvvm_sust_b_3d_v4i8_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i8.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i8_clamp">;
+def int_nvvm_sust_b_3d_v4i16_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i16.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i16_clamp">;
+def int_nvvm_sust_b_3d_v4i32_clamp
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i32.clamp">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i32_clamp">;
+
+
+// .trap variant
+def int_nvvm_sust_b_1d_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i8_trap">;
+def int_nvvm_sust_b_1d_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i16_trap">;
+def int_nvvm_sust_b_1d_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i32_trap">;
+def int_nvvm_sust_b_1d_i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i64_trap">;
+def int_nvvm_sust_b_1d_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i8_trap">;
+def int_nvvm_sust_b_1d_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i16_trap">;
+def int_nvvm_sust_b_1d_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i32_trap">;
+def int_nvvm_sust_b_1d_v2i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i64_trap">;
+def int_nvvm_sust_b_1d_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i8_trap">;
+def int_nvvm_sust_b_1d_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i16_trap">;
+def int_nvvm_sust_b_1d_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i32_trap">;
+
+
+def int_nvvm_sust_b_1d_array_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i8_trap">;
+def int_nvvm_sust_b_1d_array_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i16_trap">;
+def int_nvvm_sust_b_1d_array_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i32_trap">;
+def int_nvvm_sust_b_1d_array_i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i64_trap">;
+def int_nvvm_sust_b_1d_array_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_trap">;
+def int_nvvm_sust_b_1d_array_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_trap">;
+def int_nvvm_sust_b_1d_array_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_trap">;
+def int_nvvm_sust_b_1d_array_v2i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i64_trap">;
+def int_nvvm_sust_b_1d_array_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_trap">;
+def int_nvvm_sust_b_1d_array_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_trap">;
+def int_nvvm_sust_b_1d_array_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_b_2d_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i8_trap">;
+def int_nvvm_sust_b_2d_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i16_trap">;
+def int_nvvm_sust_b_2d_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i32_trap">;
+def int_nvvm_sust_b_2d_i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i64_trap">;
+def int_nvvm_sust_b_2d_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i8_trap">;
+def int_nvvm_sust_b_2d_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i16_trap">;
+def int_nvvm_sust_b_2d_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i32_trap">;
+def int_nvvm_sust_b_2d_v2i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i64_trap">;
+def int_nvvm_sust_b_2d_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i8_trap">;
+def int_nvvm_sust_b_2d_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i16_trap">;
+def int_nvvm_sust_b_2d_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i32_trap">;
+
+
+def int_nvvm_sust_b_2d_array_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i8_trap">;
+def int_nvvm_sust_b_2d_array_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i16_trap">;
+def int_nvvm_sust_b_2d_array_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i32_trap">;
+def int_nvvm_sust_b_2d_array_i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i64_trap">;
+def int_nvvm_sust_b_2d_array_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_trap">;
+def int_nvvm_sust_b_2d_array_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_trap">;
+def int_nvvm_sust_b_2d_array_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_trap">;
+def int_nvvm_sust_b_2d_array_v2i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i64_trap">;
+def int_nvvm_sust_b_2d_array_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_trap">;
+def int_nvvm_sust_b_2d_array_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_trap">;
+def int_nvvm_sust_b_2d_array_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_b_3d_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i8_trap">;
+def int_nvvm_sust_b_3d_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i16_trap">;
+def int_nvvm_sust_b_3d_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i32_trap">;
+def int_nvvm_sust_b_3d_i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.3d.i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i64_trap">;
+def int_nvvm_sust_b_3d_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i8_trap">;
+def int_nvvm_sust_b_3d_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i16_trap">;
+def int_nvvm_sust_b_3d_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i32_trap">;
+def int_nvvm_sust_b_3d_v2i64_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i64.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i64_trap">;
+def int_nvvm_sust_b_3d_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i8_trap">;
+def int_nvvm_sust_b_3d_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i16_trap">;
+def int_nvvm_sust_b_3d_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i32_trap">;
+
+
+// .zero variant
+def int_nvvm_sust_b_1d_i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i8_zero">;
+def int_nvvm_sust_b_1d_i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i16_zero">;
+def int_nvvm_sust_b_1d_i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i32_zero">;
+def int_nvvm_sust_b_1d_i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_i64_zero">;
+def int_nvvm_sust_b_1d_v2i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i8_zero">;
+def int_nvvm_sust_b_1d_v2i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i16_zero">;
+def int_nvvm_sust_b_1d_v2i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i32_zero">;
+def int_nvvm_sust_b_1d_v2i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.v2i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v2i64_zero">;
+def int_nvvm_sust_b_1d_v4i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i8_zero">;
+def int_nvvm_sust_b_1d_v4i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i16_zero">;
+def int_nvvm_sust_b_1d_v4i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.v4i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_v4i32_zero">;
+
+
+def int_nvvm_sust_b_1d_array_i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i8_zero">;
+def int_nvvm_sust_b_1d_array_i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i16_zero">;
+def int_nvvm_sust_b_1d_array_i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i32_zero">;
+def int_nvvm_sust_b_1d_array_i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.array.i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_i64_zero">;
+def int_nvvm_sust_b_1d_array_v2i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_zero">;
+def int_nvvm_sust_b_1d_array_v2i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_zero">;
+def int_nvvm_sust_b_1d_array_v2i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_zero">;
+def int_nvvm_sust_b_1d_array_v2i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v2i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v2i64_zero">;
+def int_nvvm_sust_b_1d_array_v4i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_zero">;
+def int_nvvm_sust_b_1d_array_v4i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_zero">;
+def int_nvvm_sust_b_1d_array_v4i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.1d.array.v4i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_zero">;
+
+
+def int_nvvm_sust_b_2d_i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i8_zero">;
+def int_nvvm_sust_b_2d_i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i16_zero">;
+def int_nvvm_sust_b_2d_i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i32_zero">;
+def int_nvvm_sust_b_2d_i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_i64_zero">;
+def int_nvvm_sust_b_2d_v2i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i8_zero">;
+def int_nvvm_sust_b_2d_v2i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i16_zero">;
+def int_nvvm_sust_b_2d_v2i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i32_zero">;
+def int_nvvm_sust_b_2d_v2i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.v2i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v2i64_zero">;
+def int_nvvm_sust_b_2d_v4i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i8_zero">;
+def int_nvvm_sust_b_2d_v4i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i16_zero">;
+def int_nvvm_sust_b_2d_v4i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.v4i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_v4i32_zero">;
+
+
+def int_nvvm_sust_b_2d_array_i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i8_zero">;
+def int_nvvm_sust_b_2d_array_i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i16_zero">;
+def int_nvvm_sust_b_2d_array_i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i32_zero">;
+def int_nvvm_sust_b_2d_array_i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.array.i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_i64_zero">;
+def int_nvvm_sust_b_2d_array_v2i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_zero">;
+def int_nvvm_sust_b_2d_array_v2i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_zero">;
+def int_nvvm_sust_b_2d_array_v2i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_zero">;
+def int_nvvm_sust_b_2d_array_v2i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v2i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v2i64_zero">;
+def int_nvvm_sust_b_2d_array_v4i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_zero">;
+def int_nvvm_sust_b_2d_array_v4i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_zero">;
+def int_nvvm_sust_b_2d_array_v4i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.2d.array.v4i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_zero">;
+
+
+def int_nvvm_sust_b_3d_i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i8_zero">;
+def int_nvvm_sust_b_3d_i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i16_zero">;
+def int_nvvm_sust_b_3d_i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i32_zero">;
+def int_nvvm_sust_b_3d_i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.3d.i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_i64_zero">;
+def int_nvvm_sust_b_3d_v2i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i8_zero">;
+def int_nvvm_sust_b_3d_v2i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i16_zero">;
+def int_nvvm_sust_b_3d_v2i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i32_zero">;
+def int_nvvm_sust_b_3d_v2i64_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i64_ty, llvm_i64_ty], [],
+              "llvm.nvvm.sust.b.3d.v2i64.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v2i64_zero">;
+def int_nvvm_sust_b_3d_v4i8_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i8.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i8_zero">;
+def int_nvvm_sust_b_3d_v4i16_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i16.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i16_zero">;
+def int_nvvm_sust_b_3d_v4i32_zero
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.b.3d.v4i32.zero">,
+    GCCBuiltin<"__nvvm_sust_b_3d_v4i32_zero">;
+
+
+
+// Formatted
+
+def int_nvvm_sust_p_1d_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_i8_trap">;
+def int_nvvm_sust_p_1d_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_i16_trap">;
+def int_nvvm_sust_p_1d_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.1d.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_i32_trap">;
+def int_nvvm_sust_p_1d_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_v2i8_trap">;
+def int_nvvm_sust_p_1d_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_v2i16_trap">;
+def int_nvvm_sust_p_1d_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.1d.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_v2i32_trap">;
+def int_nvvm_sust_p_1d_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_v4i8_trap">;
+def int_nvvm_sust_p_1d_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_v4i16_trap">;
+def int_nvvm_sust_p_1d_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.1d.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_v4i32_trap">;
+
+
+def int_nvvm_sust_p_1d_array_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.array.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_i8_trap">;
+def int_nvvm_sust_p_1d_array_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.array.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_i16_trap">;
+def int_nvvm_sust_p_1d_array_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.1d.array.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_i32_trap">;
+def int_nvvm_sust_p_1d_array_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.array.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_v2i8_trap">;
+def int_nvvm_sust_p_1d_array_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.array.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_v2i16_trap">;
+def int_nvvm_sust_p_1d_array_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.1d.array.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_v2i32_trap">;
+def int_nvvm_sust_p_1d_array_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.array.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_v4i8_trap">;
+def int_nvvm_sust_p_1d_array_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.1d.array.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_v4i16_trap">;
+def int_nvvm_sust_p_1d_array_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.1d.array.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_1d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_p_2d_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_i8_trap">;
+def int_nvvm_sust_p_2d_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_i16_trap">;
+def int_nvvm_sust_p_2d_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.2d.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_i32_trap">;
+def int_nvvm_sust_p_2d_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_v2i8_trap">;
+def int_nvvm_sust_p_2d_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_v2i16_trap">;
+def int_nvvm_sust_p_2d_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.2d.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_v2i32_trap">;
+def int_nvvm_sust_p_2d_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_v4i8_trap">;
+def int_nvvm_sust_p_2d_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_v4i16_trap">;
+def int_nvvm_sust_p_2d_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.2d.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_v4i32_trap">;
+
+
+def int_nvvm_sust_p_2d_array_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.array.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_i8_trap">;
+def int_nvvm_sust_p_2d_array_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.array.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_i16_trap">;
+def int_nvvm_sust_p_2d_array_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.2d.array.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_i32_trap">;
+def int_nvvm_sust_p_2d_array_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.array.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_v2i8_trap">;
+def int_nvvm_sust_p_2d_array_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.array.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_v2i16_trap">;
+def int_nvvm_sust_p_2d_array_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.2d.array.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_v2i32_trap">;
+def int_nvvm_sust_p_2d_array_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.array.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_v4i8_trap">;
+def int_nvvm_sust_p_2d_array_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.2d.array.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_v4i16_trap">;
+def int_nvvm_sust_p_2d_array_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.2d.array.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_2d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_p_3d_i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.3d.i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_i8_trap">;
+def int_nvvm_sust_p_3d_i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.3d.i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_i16_trap">;
+def int_nvvm_sust_p_3d_i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.3d.i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_i32_trap">;
+def int_nvvm_sust_p_3d_v2i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.3d.v2i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_v2i8_trap">;
+def int_nvvm_sust_p_3d_v2i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.3d.v2i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_v2i16_trap">;
+def int_nvvm_sust_p_3d_v2i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.3d.v2i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_v2i32_trap">;
+def int_nvvm_sust_p_3d_v4i8_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.3d.v4i8.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_v4i8_trap">;
+def int_nvvm_sust_p_3d_v4i16_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+              "llvm.nvvm.sust.p.3d.v4i16.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_v4i16_trap">;
+def int_nvvm_sust_p_3d_v4i32_trap
+  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+              "llvm.nvvm.sust.p.3d.v4i32.trap">,
+    GCCBuiltin<"__nvvm_sust_p_3d_v4i32_trap">;
+
+
+def int_nvvm_rotate_b32
+  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+              [IntrNoMem], "llvm.nvvm.rotate.b32">,
+              GCCBuiltin<"__nvvm_rotate_b32">;
+
+def int_nvvm_rotate_b64
+  :Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+             [IntrNoMem], "llvm.nvvm.rotate.b64">,
+             GCCBuiltin<"__nvvm_rotate_b64">;
+
+def int_nvvm_rotate_right_b64
+  : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+              [IntrNoMem], "llvm.nvvm.rotate.right.b64">,
+              GCCBuiltin<"__nvvm_rotate_right_b64">;
+
+def int_nvvm_swap_lo_hi_b64
+  : Intrinsic<[llvm_i64_ty], [llvm_i64_ty],
+              [IntrNoMem], "llvm.nvvm.swap.lo.hi.b64">,
+              GCCBuiltin<"__nvvm_swap_lo_hi_b64">;
+
+
+// Accessing special registers.
+multiclass PTXReadSRegIntrinsic_v4i32<string regname> {
+// FIXME: Do we need the 128-bit integer type version?
+//    def _r64   : Intrinsic<[llvm_i128_ty],   [], [IntrNoMem]>;
+
+// FIXME: Enable this once v4i32 support is enabled in back-end.
+//    def _v4i16 : Intrinsic<[llvm_v4i32_ty], [], [IntrNoMem]>;
+
+  def _x     : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+               GCCBuiltin<"__nvvm_read_ptx_sreg_" # regname # "_x">;
+  def _y     : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+               GCCBuiltin<"__nvvm_read_ptx_sreg_" # regname # "_y">;
+  def _z     : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+               GCCBuiltin<"__nvvm_read_ptx_sreg_" # regname # "_z">;
+  def _w     : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+               GCCBuiltin<"__nvvm_read_ptx_sreg_" # regname # "_w">;
+}
+
+class PTXReadSRegIntrinsic_r32<string name>
+  : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_" # name>;
+
+class PTXReadSRegIntrinsic_r64<string name>
+  : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>,
+    GCCBuiltin<"__nvvm_read_ptx_sreg_" # name>;
+
+defm int_nvvm_read_ptx_sreg_tid : PTXReadSRegIntrinsic_v4i32<"tid">;
+defm int_nvvm_read_ptx_sreg_ntid : PTXReadSRegIntrinsic_v4i32<"ntid">;
+
+def int_nvvm_read_ptx_sreg_laneid : PTXReadSRegIntrinsic_r32<"laneid">;
+def int_nvvm_read_ptx_sreg_warpid : PTXReadSRegIntrinsic_r32<"warpid">;
+def int_nvvm_read_ptx_sreg_nwarpid : PTXReadSRegIntrinsic_r32<"nwarpid">;
+
+defm int_nvvm_read_ptx_sreg_ctaid : PTXReadSRegIntrinsic_v4i32<"ctaid">;
+defm int_nvvm_read_ptx_sreg_nctaid : PTXReadSRegIntrinsic_v4i32<"nctaid">;
+
+def int_nvvm_read_ptx_sreg_smid : PTXReadSRegIntrinsic_r32<"smid">;
+def int_nvvm_read_ptx_sreg_nsmid : PTXReadSRegIntrinsic_r32<"nsmid">;
+def int_nvvm_read_ptx_sreg_gridid : PTXReadSRegIntrinsic_r32<"gridid">;
+
+def int_nvvm_read_ptx_sreg_lanemask_eq :
+    PTXReadSRegIntrinsic_r32<"lanemask_eq">;
+def int_nvvm_read_ptx_sreg_lanemask_le :
+    PTXReadSRegIntrinsic_r32<"lanemask_le">;
+def int_nvvm_read_ptx_sreg_lanemask_lt :
+    PTXReadSRegIntrinsic_r32<"lanemask_lt">;
+def int_nvvm_read_ptx_sreg_lanemask_ge :
+    PTXReadSRegIntrinsic_r32<"lanemask_ge">;
+def int_nvvm_read_ptx_sreg_lanemask_gt :
+    PTXReadSRegIntrinsic_r32<"lanemask_gt">;
+
+def int_nvvm_read_ptx_sreg_clock : PTXReadSRegIntrinsic_r32<"clock">;
+def int_nvvm_read_ptx_sreg_clock64 : PTXReadSRegIntrinsic_r64<"clock64">;
+
+def int_nvvm_read_ptx_sreg_pm0 : PTXReadSRegIntrinsic_r32<"pm0">;
+def int_nvvm_read_ptx_sreg_pm1 : PTXReadSRegIntrinsic_r32<"pm1">;
+def int_nvvm_read_ptx_sreg_pm2 : PTXReadSRegIntrinsic_r32<"pm2">;
+def int_nvvm_read_ptx_sreg_pm3 : PTXReadSRegIntrinsic_r32<"pm3">;
+
+def int_nvvm_read_ptx_sreg_warpsize : PTXReadSRegIntrinsic_r32<"warpsize">;
+
+//
+// SHUFFLE
+//
+
+// shfl.down.b32 dest, val, offset, mask_and_clamp
+def int_nvvm_shfl_down_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.down.i32">,
+  GCCBuiltin<"__nvvm_shfl_down_i32">;
+def int_nvvm_shfl_down_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.down.f32">,
+  GCCBuiltin<"__nvvm_shfl_down_f32">;
+
+// shfl.up.b32 dest, val, offset, mask_and_clamp
+def int_nvvm_shfl_up_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.up.i32">,
+  GCCBuiltin<"__nvvm_shfl_up_i32">;
+def int_nvvm_shfl_up_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.up.f32">,
+  GCCBuiltin<"__nvvm_shfl_up_f32">;
+
+// shfl.bfly.b32 dest, val, offset, mask_and_clamp
+def int_nvvm_shfl_bfly_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.bfly.i32">,
+  GCCBuiltin<"__nvvm_shfl_bfly_i32">;
+def int_nvvm_shfl_bfly_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.bfly.f32">,
+  GCCBuiltin<"__nvvm_shfl_bfly_f32">;
+
+// shfl.idx.b32 dest, val, lane, mask_and_clamp
+def int_nvvm_shfl_idx_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.idx.i32">,
+  GCCBuiltin<"__nvvm_shfl_idx_i32">;
+def int_nvvm_shfl_idx_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.idx.f32">,
+  GCCBuiltin<"__nvvm_shfl_idx_f32">;
+
+// Synchronizing shfl variants available in CUDA-9.
+// On sm_70 these don't have to be convergent, so we may eventually want to
+// implement non-convergent variant of this intrinsic.
+
+// shfl.sync.down.b32 dest, threadmask, val, offset , mask_and_clamp
+def int_nvvm_shfl_sync_down_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.down.i32">,
+  GCCBuiltin<"__nvvm_shfl_sync_down_i32">;
+def int_nvvm_shfl_sync_down_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.down.f32">,
+  GCCBuiltin<"__nvvm_shfl_sync_down_f32">;
+
+// shfl.sync.up.b32 dest, threadmask, val, offset, mask_and_clamp
+def int_nvvm_shfl_sync_up_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.up.i32">,
+  GCCBuiltin<"__nvvm_shfl_sync_up_i32">;
+def int_nvvm_shfl_sync_up_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.up.f32">,
+  GCCBuiltin<"__nvvm_shfl_sync_up_f32">;
+
+// shfl.sync.bfly.b32 dest, threadmask, val, offset, mask_and_clamp
+def int_nvvm_shfl_sync_bfly_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.bfly.i32">,
+  GCCBuiltin<"__nvvm_shfl_sync_bfly_i32">;
+def int_nvvm_shfl_sync_bfly_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.bfly.f32">,
+  GCCBuiltin<"__nvvm_shfl_sync_bfly_f32">;
+
+// shfl.sync.idx.b32 dest, threadmask, val, lane, mask_and_clamp
+def int_nvvm_shfl_sync_idx_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.idx.i32">,
+  GCCBuiltin<"__nvvm_shfl_sync_idx_i32">;
+def int_nvvm_shfl_sync_idx_f32 :
+  Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.idx.f32">,
+  GCCBuiltin<"__nvvm_shfl_sync_idx_f32">;
+
+//
+// VOTE
+//
+
+// vote.all pred
+def int_nvvm_vote_all :
+  Intrinsic<[llvm_i1_ty], [llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.all">,
+  GCCBuiltin<"__nvvm_vote_all">;
+// vote.any pred
+def int_nvvm_vote_any :
+  Intrinsic<[llvm_i1_ty], [llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.any">,
+  GCCBuiltin<"__nvvm_vote_any">;
+// vote.uni pred
+def int_nvvm_vote_uni :
+  Intrinsic<[llvm_i1_ty], [llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.uni">,
+  GCCBuiltin<"__nvvm_vote_uni">;
+// vote.ballot pred
+def int_nvvm_vote_ballot :
+  Intrinsic<[llvm_i32_ty], [llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.ballot">,
+  GCCBuiltin<"__nvvm_vote_ballot">;
+
+//
+// VOTE.SYNC
+//
+
+// vote.sync.all mask, pred
+def int_nvvm_vote_all_sync :
+  Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.all.sync">,
+  GCCBuiltin<"__nvvm_vote_all_sync">;
+// vote.sync.any mask, pred
+def int_nvvm_vote_any_sync :
+  Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.any.sync">,
+  GCCBuiltin<"__nvvm_vote_any_sync">;
+// vote.sync.uni mask, pred
+def int_nvvm_vote_uni_sync :
+  Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.uni.sync">,
+  GCCBuiltin<"__nvvm_vote_uni_sync">;
+// vote.sync.ballot mask, pred
+def int_nvvm_vote_ballot_sync :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i1_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.vote.ballot.sync">,
+  GCCBuiltin<"__nvvm_vote_ballot_sync">;
+
+//
+// MATCH.SYNC
+//
+// match.any.sync.b32 mask, value
+def int_nvvm_match_any_sync_i32 :
+  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.match.any.sync.i32">,
+  GCCBuiltin<"__nvvm_match_any_sync_i32">;
+// match.any.sync.b64 mask, value
+def int_nvvm_match_any_sync_i64 :
+  Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.match.any.sync.i64">,
+  GCCBuiltin<"__nvvm_match_any_sync_i64">;
+
+// match.all instruction have two variants -- one returns a single value, another
+// returns a pair {value, predicate}. We currently only implement the latter as
+// that's the variant exposed by CUDA API.
+
+// match.all.sync.b32p mask, value
+def int_nvvm_match_all_sync_i32p :
+  Intrinsic<[llvm_i32_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.match.all.sync.i32p">;
+// match.all.sync.b64p mask, value
+def int_nvvm_match_all_sync_i64p :
+  Intrinsic<[llvm_i64_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
+            [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.match.all.sync.i64p">;
+
+//
+// WMMA instructions
+//
+
+// WMMA.LOAD
+class NVVM_WMMA_LD_GALSTS<string Geometry, string Abc, string Layout,
+                          string Type, LLVMType regty, int WithStride>
+  : Intrinsic<!if(!eq(Abc#Type,"cf16"),
+                  [regty, regty, regty, regty],
+                  [regty, regty, regty, regty,
+                   regty, regty, regty, regty]),
+              !if(WithStride, [llvm_anyptr_ty, llvm_i32_ty], [llvm_anyptr_ty]),
+              [IntrReadMem, IntrArgMemOnly, ReadOnly<0>, NoCapture<0>],
+              "llvm.nvvm.wmma."
+                # Geometry
+                # ".load"
+                # "." # Abc
+                # "." # Layout
+                # !if(WithStride, ".stride", "")
+                # "." # Type>;
+
+multiclass NVVM_WMMA_LD_GALT<string Geometry, string Abc, string Layout,
+                             string Type, LLVMType regty> {
+  def _stride: NVVM_WMMA_LD_GALSTS<Geometry, Abc, Layout, Type, regty, 1>;
+  def NAME   : NVVM_WMMA_LD_GALSTS<Geometry, Abc, Layout, Type, regty, 0>;
+}
+
+multiclass NVVM_WMMA_LD_GAT<string Geometry, string Abc,
+                           string Type, LLVMType regty> {
+  defm _row: NVVM_WMMA_LD_GALT<Geometry, Abc, "row", Type, regty>;
+  defm _col: NVVM_WMMA_LD_GALT<Geometry, Abc, "col", Type, regty>;
+}
+
+multiclass NVVM_WMMA_LD_G<string Geometry> {
+  defm _a_f16: NVVM_WMMA_LD_GAT<Geometry, "a", "f16", llvm_v2f16_ty>;
+  defm _b_f16: NVVM_WMMA_LD_GAT<Geometry, "b", "f16", llvm_v2f16_ty>;
+  defm _c_f16: NVVM_WMMA_LD_GAT<Geometry, "c", "f16", llvm_v2f16_ty>;
+  defm _c_f32: NVVM_WMMA_LD_GAT<Geometry, "c", "f32", llvm_float_ty>;
+}
+
+multiclass NVVM_WMMA_LD {
+  defm _m16n16k16_load: NVVM_WMMA_LD_G<"m16n16k16">;
+}
+
+defm int_nvvm_wmma: NVVM_WMMA_LD;
+
+// WMMA.STORE.D
+class NVVM_WMMA_STD_GLSTS<string Geometry, string Layout,
+                          string Type, LLVMType regty, int WithStride,
+                          // This is only used to create a typed empty array we
+                          // need to pass to !if below.
+                          list<LLVMType>Empty=[]>
+  : Intrinsic<[],
+              !listconcat(
+                [llvm_anyptr_ty],
+                !if(!eq(Type,"f16"),
+                    [regty, regty, regty, regty],
+                    [regty, regty, regty, regty,
+                     regty, regty, regty, regty]),
+                !if(WithStride, [llvm_i32_ty], Empty)),
+              [IntrWriteMem, IntrArgMemOnly, WriteOnly<0>, NoCapture<0>],
+              "llvm.nvvm.wmma."
+                   # Geometry
+                   # ".store.d"
+                   # "." # Layout
+                   # !if(WithStride, ".stride", "")
+                   # "." # Type>;
+
+multiclass NVVM_WMMA_STD_GLT<string Geometry, string Layout, 
+                             string Type, LLVMType regty> {
+  def _stride: NVVM_WMMA_STD_GLSTS<Geometry, Layout, Type, regty, 1>;
+  def NAME:    NVVM_WMMA_STD_GLSTS<Geometry, Layout, Type, regty, 0>;
+}
+
+multiclass NVVM_WMMA_STD_GT<string Geometry, string Type, LLVMType regty> {
+  defm _row: NVVM_WMMA_STD_GLT<Geometry, "row", Type, regty>;
+  defm _col: NVVM_WMMA_STD_GLT<Geometry, "col", Type, regty>;
+}
+multiclass NVVM_WMMA_STD_G<string Geometry> {
+  defm _d_f16: NVVM_WMMA_STD_GT<Geometry, "f16", llvm_v2f16_ty>;
+  defm _d_f32: NVVM_WMMA_STD_GT<Geometry, "f32", llvm_float_ty>;
+}
+
+multiclass NVVM_WMMA_STD {
+  defm _m16n16k16_store: NVVM_WMMA_STD_G<"m16n16k16">;
+}
+
+defm int_nvvm_wmma: NVVM_WMMA_STD;
+
+// WMMA.MMA
+class NVVM_WMMA_MMA_GABDCS<string Geometry,
+                           string ALayout, string BLayout,
+                           string DType, LLVMType d_regty,
+                           string CType, LLVMType c_regty,
+                           string Satfinite = "">
+  : Intrinsic<!if(!eq(DType,"f16"),
+                      [d_regty, d_regty, d_regty, d_regty],
+                      [d_regty, d_regty, d_regty, d_regty,
+                       d_regty, d_regty, d_regty, d_regty]),
+              !listconcat(
+                [// A
+                llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty,
+                llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty,
+                // B
+                llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty,
+                llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty],
+                !if(!eq(CType,"f16"),
+                      [c_regty, c_regty, c_regty, c_regty],
+                      [c_regty, c_regty, c_regty, c_regty,
+                       c_regty, c_regty, c_regty, c_regty])),
+              [IntrNoMem],
+              "llvm.nvvm.wmma."
+                # Geometry
+                # ".mma"
+                # "." # ALayout
+                # "." # BLayout
+                # "." # DType
+                # "." # CType
+                # Satfinite> {
+}
+
+multiclass NVVM_WMMA_MMA_GABDC<string Geometry, string ALayout, string BLayout,
+                               string DType, LLVMType d_regty,
+                               string CType, LLVMType c_regty> {
+  def NAME : NVVM_WMMA_MMA_GABDCS<Geometry, ALayout, BLayout,
+                                  DType, d_regty, CType, c_regty>;
+  def _satfinite: NVVM_WMMA_MMA_GABDCS<Geometry, ALayout, BLayout,
+                                       DType, d_regty, CType, c_regty,".satfinite">;
+}
+
+multiclass NVVM_WMMA_MMA_GABD<string Geometry, string ALayout, string BLayout,
+                              string DType, LLVMType d_regty> {
+  defm _f16: NVVM_WMMA_MMA_GABDC<Geometry, ALayout, BLayout, DType, d_regty,
+                                "f16", llvm_v2f16_ty>;
+  defm _f32: NVVM_WMMA_MMA_GABDC<Geometry, ALayout, BLayout, DType, d_regty,
+                                "f32", llvm_float_ty>;
+}
+
+multiclass NVVM_WMMA_MMA_GAB<string Geometry, string ALayout, string BLayout> {
+  defm _f16: NVVM_WMMA_MMA_GABD<Geometry, ALayout, BLayout, "f16", llvm_v2f16_ty>;
+  defm _f32: NVVM_WMMA_MMA_GABD<Geometry, ALayout, BLayout, "f32", llvm_float_ty>;
+}
+
+multiclass NVVM_WMMA_MMA_GA<string Geometry, string ALayout> {
+  defm _col: NVVM_WMMA_MMA_GAB<Geometry, ALayout, "col">;
+  defm _row: NVVM_WMMA_MMA_GAB<Geometry, ALayout, "row">;
+}
+
+multiclass NVVM_WMMA_MMA_G<string Geometry> {
+  defm _col: NVVM_WMMA_MMA_GA<Geometry, "col">;
+  defm _row: NVVM_WMMA_MMA_GA<Geometry, "row">;
+}
+
+multiclass NVVM_WMMA_MMA {
+  defm _m16n16k16_mma : NVVM_WMMA_MMA_G<"m16n16k16">;
+}
+
+defm int_nvvm_wmma : NVVM_WMMA_MMA;
+
+} // let TargetPrefix = "nvvm"
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsPowerPC.td b/linux-x64/clang/include/llvm/IR/IntrinsicsPowerPC.td
new file mode 100644
index 0000000..6321bb8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsPowerPC.td
@@ -0,0 +1,1137 @@
+//===- IntrinsicsPowerPC.td - Defines PowerPC intrinsics ---*- tablegen -*-===//
+// 
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+// 
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the PowerPC-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all PowerPC intrinsics.
+//
+
+// Non-altivec intrinsics.
+let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
+  // dcba/dcbf/dcbi/dcbst/dcbt/dcbz/dcbzl(PPC970) instructions.
+  def int_ppc_dcba  : Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_ppc_dcbf  : Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_ppc_dcbi  : Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_ppc_dcbst : Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_ppc_dcbt  : Intrinsic<[], [llvm_ptr_ty],
+    [IntrArgMemOnly, NoCapture<0>]>;
+  def int_ppc_dcbtst: Intrinsic<[], [llvm_ptr_ty],
+    [IntrArgMemOnly, NoCapture<0>]>;
+  def int_ppc_dcbz  : Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_ppc_dcbzl : Intrinsic<[], [llvm_ptr_ty], []>;
+
+  // sync instruction (i.e. sync 0, a.k.a hwsync)
+  def int_ppc_sync : Intrinsic<[], [], []>;
+  // lwsync is sync 1
+  def int_ppc_lwsync : Intrinsic<[], [], []>;
+
+  // Intrinsics used to generate ctr-based loops. These should only be
+  // generated by the PowerPC backend!
+  def int_ppc_mtctr : Intrinsic<[], [llvm_anyint_ty], []>;
+  def int_ppc_is_decremented_ctr_nonzero : Intrinsic<[llvm_i1_ty], [], []>;
+
+  // Intrinsics for [double]word extended forms of divide instructions
+  def int_ppc_divwe : GCCBuiltin<"__builtin_divwe">,
+                      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                                [IntrNoMem]>;
+  def int_ppc_divweu : GCCBuiltin<"__builtin_divweu">,
+                       Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+  def int_ppc_divde : GCCBuiltin<"__builtin_divde">,
+                      Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                                [IntrNoMem]>;
+  def int_ppc_divdeu : GCCBuiltin<"__builtin_divdeu">,
+                       Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                                 [IntrNoMem]>;
+
+  // Bit permute doubleword
+  def int_ppc_bpermd : GCCBuiltin<"__builtin_bpermd">,
+                       Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                                 [IntrNoMem]>;
+}
+
+
+let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
+  /// PowerPC_Vec_Intrinsic - Base class for all altivec intrinsics.
+  class PowerPC_Vec_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+                              list<LLVMType> param_types,
+                              list<IntrinsicProperty> properties>
+    : GCCBuiltin<!strconcat("__builtin_altivec_", GCCIntSuffix)>,
+      Intrinsic<ret_types, param_types, properties>;
+
+  /// PowerPC_VSX_Intrinsic - Base class for all VSX intrinsics.
+  class PowerPC_VSX_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+                              list<LLVMType> param_types,
+                              list<IntrinsicProperty> properties>
+    : GCCBuiltin<!strconcat("__builtin_vsx_", GCCIntSuffix)>,
+      Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC Altivec Intrinsic Class Definitions.
+//
+
+/// PowerPC_Vec_FF_Intrinsic - A PowerPC intrinsic that takes one v4f32
+/// vector and returns one.  These intrinsics have no side effects.
+class PowerPC_Vec_FF_Intrinsic<string GCCIntSuffix>
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+/// PowerPC_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_Vec_FFF_Intrinsic<string GCCIntSuffix>
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_Vec_BBB_Intrinsic - A PowerPC intrinsic that takes two v16i8
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix> 
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                          [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_Vec_HHH_Intrinsic - A PowerPC intrinsic that takes two v8i16
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix> 
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                          [llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_Vec_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix> 
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                          [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_Vec_DDD_Intrinsic - A PowerPC intrinsic that takes two v2i64
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_Vec_DDD_Intrinsic<string GCCIntSuffix>
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                          [llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_Vec_QQQ_Intrinsic - A PowerPC intrinsic that takes two v1i128
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_QQQ_Intrinsic<string GCCIntSuffix>
+  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+                         [llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty],
+                         [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC VSX Intrinsic Class Definitions.
+//
+
+/// PowerPC_VSX_Vec_DDD_Intrinsic - A PowerPC intrinsic that takes two v2f64
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_VSX_Vec_DDD_Intrinsic<string GCCIntSuffix>
+  : PowerPC_VSX_Intrinsic<GCCIntSuffix,
+                          [llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_VSX_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_VSX_Vec_FFF_Intrinsic<string GCCIntSuffix>
+  : PowerPC_VSX_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_VSX_Sca_DDD_Intrinsic - A PowerPC intrinsic that takes two f64
+/// scalars and returns one.  These intrinsics have no side effects.
+class PowerPC_VSX_Sca_DDD_Intrinsic<string GCCIntSuffix>
+  : PowerPC_VSX_Intrinsic<GCCIntSuffix,
+                          [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+                          [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC Altivec Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
+  // Data Stream Control.
+  def int_ppc_altivec_dss : GCCBuiltin<"__builtin_altivec_dss">,
+              Intrinsic<[], [llvm_i32_ty], []>;
+  def int_ppc_altivec_dssall : GCCBuiltin<"__builtin_altivec_dssall">,
+              Intrinsic<[], [], []>;
+  def int_ppc_altivec_dst : GCCBuiltin<"__builtin_altivec_dst">,
+              Intrinsic<[],
+                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+                        []>;
+  def int_ppc_altivec_dstt : GCCBuiltin<"__builtin_altivec_dstt">,
+              Intrinsic<[],
+                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+                        []>;
+  def int_ppc_altivec_dstst : GCCBuiltin<"__builtin_altivec_dstst">,
+              Intrinsic<[],
+                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+                        []>;
+  def int_ppc_altivec_dststt : GCCBuiltin<"__builtin_altivec_dststt">,
+              Intrinsic<[],
+                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+                        []>;
+
+  // VSCR access.
+  def int_ppc_altivec_mfvscr : GCCBuiltin<"__builtin_altivec_mfvscr">,
+              Intrinsic<[llvm_v8i16_ty], [], [IntrReadMem]>;
+  def int_ppc_altivec_mtvscr : GCCBuiltin<"__builtin_altivec_mtvscr">,
+              Intrinsic<[], [llvm_v4i32_ty], []>;
+
+
+  // Loads.  These don't map directly to GCC builtins because they represent the
+  // source address with a single pointer.
+  def int_ppc_altivec_lvx :
+              Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_lvxl :
+              Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_lvebx :
+              Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_lvehx :
+              Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_lvewx :
+              Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+  // Stores.  These don't map directly to GCC builtins because they represent the
+  // source address with a single pointer.
+  def int_ppc_altivec_stvx :
+              Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+                        [IntrWriteMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_stvxl :
+              Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+                        [IntrWriteMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_stvebx :
+              Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty],
+                        [IntrWriteMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_stvehx :
+              Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty],
+                        [IntrWriteMem, IntrArgMemOnly]>;
+  def int_ppc_altivec_stvewx :
+              Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+                        [IntrWriteMem, IntrArgMemOnly]>;
+
+  // Comparisons setting a vector.
+  def int_ppc_altivec_vcmpbfp : GCCBuiltin<"__builtin_altivec_vcmpbfp">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpeqfp : GCCBuiltin<"__builtin_altivec_vcmpeqfp">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgefp : GCCBuiltin<"__builtin_altivec_vcmpgefp">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtfp : GCCBuiltin<"__builtin_altivec_vcmpgtfp">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+
+  def int_ppc_altivec_vcmpequd : GCCBuiltin<"__builtin_altivec_vcmpequd">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsd : GCCBuiltin<"__builtin_altivec_vcmpgtsd">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtud : GCCBuiltin<"__builtin_altivec_vcmpgtud">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+                                                
+  def int_ppc_altivec_vcmpequw : GCCBuiltin<"__builtin_altivec_vcmpequw">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsw : GCCBuiltin<"__builtin_altivec_vcmpgtsw">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtuw : GCCBuiltin<"__builtin_altivec_vcmpgtuw">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnew : GCCBuiltin<"__builtin_altivec_vcmpnew">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnezw : GCCBuiltin<"__builtin_altivec_vcmpnezw">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+                        
+  def int_ppc_altivec_vcmpequh : GCCBuiltin<"__builtin_altivec_vcmpequh">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsh : GCCBuiltin<"__builtin_altivec_vcmpgtsh">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtuh : GCCBuiltin<"__builtin_altivec_vcmpgtuh">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpneh : GCCBuiltin<"__builtin_altivec_vcmpneh">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnezh : GCCBuiltin<"__builtin_altivec_vcmpnezh">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+
+  def int_ppc_altivec_vcmpequb : GCCBuiltin<"__builtin_altivec_vcmpequb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsb : GCCBuiltin<"__builtin_altivec_vcmpgtsb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtub : GCCBuiltin<"__builtin_altivec_vcmpgtub">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpneb : GCCBuiltin<"__builtin_altivec_vcmpneb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnezb : GCCBuiltin<"__builtin_altivec_vcmpnezb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+
+  // Predicate Comparisons.  The first operand specifies interpretation of CR6.
+  def int_ppc_altivec_vcmpbfp_p : GCCBuiltin<"__builtin_altivec_vcmpbfp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpeqfp_p : GCCBuiltin<"__builtin_altivec_vcmpeqfp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgefp_p : GCCBuiltin<"__builtin_altivec_vcmpgefp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtfp_p : GCCBuiltin<"__builtin_altivec_vcmpgtfp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+
+  def int_ppc_altivec_vcmpequd_p : GCCBuiltin<"__builtin_altivec_vcmpequd_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsd_p : GCCBuiltin<"__builtin_altivec_vcmpgtsd_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtud_p : GCCBuiltin<"__builtin_altivec_vcmpgtud_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
+                        [IntrNoMem]>;
+
+  def int_ppc_altivec_vcmpequw_p : GCCBuiltin<"__builtin_altivec_vcmpequw_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsw_p : GCCBuiltin<"__builtin_altivec_vcmpgtsw_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtuw_p : GCCBuiltin<"__builtin_altivec_vcmpgtuw_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnew_p : GCCBuiltin<"__builtin_altivec_vcmpnew_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnezw_p : GCCBuiltin<"__builtin_altivec_vcmpnezw_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+                        [IntrNoMem]>;
+                        
+  def int_ppc_altivec_vcmpequh_p : GCCBuiltin<"__builtin_altivec_vcmpequh_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsh_p : GCCBuiltin<"__builtin_altivec_vcmpgtsh_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtuh_p : GCCBuiltin<"__builtin_altivec_vcmpgtuh_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpneh_p : GCCBuiltin<"__builtin_altivec_vcmpneh_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnezh_p : GCCBuiltin<"__builtin_altivec_vcmpnezh_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+                        [IntrNoMem]>;
+
+  def int_ppc_altivec_vcmpequb_p : GCCBuiltin<"__builtin_altivec_vcmpequb_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtsb_p : GCCBuiltin<"__builtin_altivec_vcmpgtsb_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpgtub_p : GCCBuiltin<"__builtin_altivec_vcmpgtub_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpneb_p : GCCBuiltin<"__builtin_altivec_vcmpneb_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcmpnezb_p : GCCBuiltin<"__builtin_altivec_vcmpnezb_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vclzlsbb : GCCBuiltin<"__builtin_altivec_vclzlsbb">,
+              Intrinsic<[llvm_i32_ty],[llvm_v16i8_ty],[IntrNoMem]>;
+  def int_ppc_altivec_vctzlsbb : GCCBuiltin<"__builtin_altivec_vctzlsbb">,
+              Intrinsic<[llvm_i32_ty],[llvm_v16i8_ty],[IntrNoMem]>;
+  def int_ppc_altivec_vprtybw : GCCBuiltin<"__builtin_altivec_vprtybw">,
+              Intrinsic<[llvm_v4i32_ty],[llvm_v4i32_ty],[IntrNoMem]>;
+  def int_ppc_altivec_vprtybd : GCCBuiltin<"__builtin_altivec_vprtybd">,
+              Intrinsic<[llvm_v2i64_ty],[llvm_v2i64_ty],[IntrNoMem]>;
+  def int_ppc_altivec_vprtybq : GCCBuiltin<"__builtin_altivec_vprtybq">,
+              Intrinsic<[llvm_v1i128_ty],[llvm_v1i128_ty],[IntrNoMem]>;
+
+}
+
+// Vector average.
+def int_ppc_altivec_vavgsb : PowerPC_Vec_BBB_Intrinsic<"vavgsb">;
+def int_ppc_altivec_vavgsh : PowerPC_Vec_HHH_Intrinsic<"vavgsh">;
+def int_ppc_altivec_vavgsw : PowerPC_Vec_WWW_Intrinsic<"vavgsw">;
+def int_ppc_altivec_vavgub : PowerPC_Vec_BBB_Intrinsic<"vavgub">;
+def int_ppc_altivec_vavguh : PowerPC_Vec_HHH_Intrinsic<"vavguh">;
+def int_ppc_altivec_vavguw : PowerPC_Vec_WWW_Intrinsic<"vavguw">;
+
+// Vector maximum.
+def int_ppc_altivec_vmaxfp : PowerPC_Vec_FFF_Intrinsic<"vmaxfp">;
+def int_ppc_altivec_vmaxsb : PowerPC_Vec_BBB_Intrinsic<"vmaxsb">;
+def int_ppc_altivec_vmaxsh : PowerPC_Vec_HHH_Intrinsic<"vmaxsh">;
+def int_ppc_altivec_vmaxsw : PowerPC_Vec_WWW_Intrinsic<"vmaxsw">;
+def int_ppc_altivec_vmaxsd : PowerPC_Vec_DDD_Intrinsic<"vmaxsd">;
+def int_ppc_altivec_vmaxub : PowerPC_Vec_BBB_Intrinsic<"vmaxub">;
+def int_ppc_altivec_vmaxuh : PowerPC_Vec_HHH_Intrinsic<"vmaxuh">;
+def int_ppc_altivec_vmaxuw : PowerPC_Vec_WWW_Intrinsic<"vmaxuw">;
+def int_ppc_altivec_vmaxud : PowerPC_Vec_DDD_Intrinsic<"vmaxud">;
+
+// Vector minimum.
+def int_ppc_altivec_vminfp : PowerPC_Vec_FFF_Intrinsic<"vminfp">;
+def int_ppc_altivec_vminsb : PowerPC_Vec_BBB_Intrinsic<"vminsb">;
+def int_ppc_altivec_vminsh : PowerPC_Vec_HHH_Intrinsic<"vminsh">;
+def int_ppc_altivec_vminsw : PowerPC_Vec_WWW_Intrinsic<"vminsw">;
+def int_ppc_altivec_vminsd : PowerPC_Vec_DDD_Intrinsic<"vminsd">;
+def int_ppc_altivec_vminub : PowerPC_Vec_BBB_Intrinsic<"vminub">;
+def int_ppc_altivec_vminuh : PowerPC_Vec_HHH_Intrinsic<"vminuh">;
+def int_ppc_altivec_vminuw : PowerPC_Vec_WWW_Intrinsic<"vminuw">;
+def int_ppc_altivec_vminud : PowerPC_Vec_DDD_Intrinsic<"vminud">;
+
+// Saturating adds.
+def int_ppc_altivec_vaddubs : PowerPC_Vec_BBB_Intrinsic<"vaddubs">;
+def int_ppc_altivec_vaddsbs : PowerPC_Vec_BBB_Intrinsic<"vaddsbs">;
+def int_ppc_altivec_vadduhs : PowerPC_Vec_HHH_Intrinsic<"vadduhs">;
+def int_ppc_altivec_vaddshs : PowerPC_Vec_HHH_Intrinsic<"vaddshs">;
+def int_ppc_altivec_vadduws : PowerPC_Vec_WWW_Intrinsic<"vadduws">;
+def int_ppc_altivec_vaddsws : PowerPC_Vec_WWW_Intrinsic<"vaddsws">;
+def int_ppc_altivec_vaddcuw : PowerPC_Vec_WWW_Intrinsic<"vaddcuw">;
+def int_ppc_altivec_vaddcuq : PowerPC_Vec_QQQ_Intrinsic<"vaddcuq">;
+
+// Saturating subs.
+def int_ppc_altivec_vsububs : PowerPC_Vec_BBB_Intrinsic<"vsububs">;
+def int_ppc_altivec_vsubsbs : PowerPC_Vec_BBB_Intrinsic<"vsubsbs">;
+def int_ppc_altivec_vsubuhs : PowerPC_Vec_HHH_Intrinsic<"vsubuhs">;
+def int_ppc_altivec_vsubshs : PowerPC_Vec_HHH_Intrinsic<"vsubshs">;
+def int_ppc_altivec_vsubuws : PowerPC_Vec_WWW_Intrinsic<"vsubuws">;
+def int_ppc_altivec_vsubsws : PowerPC_Vec_WWW_Intrinsic<"vsubsws">;
+def int_ppc_altivec_vsubcuw : PowerPC_Vec_WWW_Intrinsic<"vsubcuw">;
+def int_ppc_altivec_vsubcuq : PowerPC_Vec_QQQ_Intrinsic<"vsubcuq">;
+
+let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
+  // Saturating multiply-adds.
+  def int_ppc_altivec_vmhaddshs : GCCBuiltin<"__builtin_altivec_vmhaddshs">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vmhraddshs : GCCBuiltin<"__builtin_altivec_vmhraddshs">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_ppc_altivec_vmaddfp : GCCBuiltin<"__builtin_altivec_vmaddfp">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vnmsubfp : GCCBuiltin<"__builtin_altivec_vnmsubfp">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+
+  // Vector Multiply Sum Intructions.
+  def int_ppc_altivec_vmsummbm : GCCBuiltin<"__builtin_altivec_vmsummbm">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                       llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vmsumshm : GCCBuiltin<"__builtin_altivec_vmsumshm">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                       llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vmsumshs : GCCBuiltin<"__builtin_altivec_vmsumshs">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty, 
+                       llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vmsumubm : GCCBuiltin<"__builtin_altivec_vmsumubm">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty, 
+                       llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vmsumuhm : GCCBuiltin<"__builtin_altivec_vmsumuhm">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                       llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vmsumuhs : GCCBuiltin<"__builtin_altivec_vmsumuhs">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                       llvm_v4i32_ty], [IntrNoMem]>;
+
+  // Vector Multiply Intructions.
+  def int_ppc_altivec_vmulesb : GCCBuiltin<"__builtin_altivec_vmulesb">,
+          Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmulesh : GCCBuiltin<"__builtin_altivec_vmulesh">,
+          Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmulesw : GCCBuiltin<"__builtin_altivec_vmulesw">,
+          Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmuleub : GCCBuiltin<"__builtin_altivec_vmuleub">,
+          Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmuleuh : GCCBuiltin<"__builtin_altivec_vmuleuh">,
+          Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmuleuw : GCCBuiltin<"__builtin_altivec_vmuleuw">,
+          Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                    [IntrNoMem]>;
+
+  def int_ppc_altivec_vmulosb : GCCBuiltin<"__builtin_altivec_vmulosb">,
+          Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmulosh : GCCBuiltin<"__builtin_altivec_vmulosh">,
+          Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmulosw : GCCBuiltin<"__builtin_altivec_vmulosw">,
+          Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmuloub : GCCBuiltin<"__builtin_altivec_vmuloub">,
+          Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmulouh : GCCBuiltin<"__builtin_altivec_vmulouh">,
+          Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                    [IntrNoMem]>;
+  def int_ppc_altivec_vmulouw : GCCBuiltin<"__builtin_altivec_vmulouw">,
+          Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                    [IntrNoMem]>;
+
+  // Vector Sum Intructions.
+  def int_ppc_altivec_vsumsws : GCCBuiltin<"__builtin_altivec_vsumsws">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vsum2sws : GCCBuiltin<"__builtin_altivec_vsum2sws">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vsum4sbs : GCCBuiltin<"__builtin_altivec_vsum4sbs">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vsum4shs : GCCBuiltin<"__builtin_altivec_vsum4shs">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vsum4ubs : GCCBuiltin<"__builtin_altivec_vsum4ubs">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+
+  // Other multiplies.
+  def int_ppc_altivec_vmladduhm : GCCBuiltin<"__builtin_altivec_vmladduhm">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, 
+                       llvm_v8i16_ty], [IntrNoMem]>;
+
+  // Packs.
+  def int_ppc_altivec_vpkpx : GCCBuiltin<"__builtin_altivec_vpkpx">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vpkshss : GCCBuiltin<"__builtin_altivec_vpkshss">,
+            Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vpkshus : GCCBuiltin<"__builtin_altivec_vpkshus">,
+            Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vpkswss : GCCBuiltin<"__builtin_altivec_vpkswss">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vpkswus : GCCBuiltin<"__builtin_altivec_vpkswus">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vpksdss : GCCBuiltin<"__builtin_altivec_vpksdss">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                      [IntrNoMem]>;
+  def int_ppc_altivec_vpksdus : GCCBuiltin<"__builtin_altivec_vpksdus">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                      [IntrNoMem]>;
+  // vpkuhum is lowered to a shuffle.
+  def int_ppc_altivec_vpkuhus : GCCBuiltin<"__builtin_altivec_vpkuhus">,
+            Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                      [IntrNoMem]>;
+  // vpkuwum is lowered to a shuffle.
+  def int_ppc_altivec_vpkuwus : GCCBuiltin<"__builtin_altivec_vpkuwus">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                      [IntrNoMem]>;
+  // vpkudum is lowered to a shuffle.
+  def int_ppc_altivec_vpkudus : GCCBuiltin<"__builtin_altivec_vpkudus">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                      [IntrNoMem]>;
+
+  // Unpacks.
+  def int_ppc_altivec_vupkhpx : GCCBuiltin<"__builtin_altivec_vupkhpx">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupkhsb : GCCBuiltin<"__builtin_altivec_vupkhsb">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupkhsh : GCCBuiltin<"__builtin_altivec_vupkhsh">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupkhsw : GCCBuiltin<"__builtin_altivec_vupkhsw">,
+            Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupklpx : GCCBuiltin<"__builtin_altivec_vupklpx">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupklsb : GCCBuiltin<"__builtin_altivec_vupklsb">,
+            Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupklsh : GCCBuiltin<"__builtin_altivec_vupklsh">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vupklsw : GCCBuiltin<"__builtin_altivec_vupklsw">,
+            Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+
+
+  // FP <-> integer conversion.
+  def int_ppc_altivec_vcfsx : GCCBuiltin<"__builtin_altivec_vcfsx">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vcfux : GCCBuiltin<"__builtin_altivec_vcfux">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vctsxs : GCCBuiltin<"__builtin_altivec_vctsxs">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vctuxs : GCCBuiltin<"__builtin_altivec_vctuxs">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+
+  def int_ppc_altivec_vrfim : GCCBuiltin<"__builtin_altivec_vrfim">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vrfin : GCCBuiltin<"__builtin_altivec_vrfin">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vrfip : GCCBuiltin<"__builtin_altivec_vrfip">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vrfiz : GCCBuiltin<"__builtin_altivec_vrfiz">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+  // Add Extended Quadword
+  def int_ppc_altivec_vaddeuqm : GCCBuiltin<"__builtin_altivec_vaddeuqm">,
+              Intrinsic<[llvm_v1i128_ty], 
+                        [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vaddecuq : GCCBuiltin<"__builtin_altivec_vaddecuq">,
+              Intrinsic<[llvm_v1i128_ty], 
+                        [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+                        [IntrNoMem]>;
+
+  // Sub Extended Quadword
+  def int_ppc_altivec_vsubeuqm : GCCBuiltin<"__builtin_altivec_vsubeuqm">,
+              Intrinsic<[llvm_v1i128_ty], 
+                        [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+                        [IntrNoMem]>;
+  def int_ppc_altivec_vsubecuq : GCCBuiltin<"__builtin_altivec_vsubecuq">,
+              Intrinsic<[llvm_v1i128_ty], 
+                        [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+                        [IntrNoMem]>;
+}
+
+def int_ppc_altivec_vsl   : PowerPC_Vec_WWW_Intrinsic<"vsl">;
+def int_ppc_altivec_vslo  : PowerPC_Vec_WWW_Intrinsic<"vslo">;
+
+def int_ppc_altivec_vslb  : PowerPC_Vec_BBB_Intrinsic<"vslb">;
+def int_ppc_altivec_vslv  : PowerPC_Vec_BBB_Intrinsic<"vslv">;
+def int_ppc_altivec_vsrv  : PowerPC_Vec_BBB_Intrinsic<"vsrv">;
+def int_ppc_altivec_vslh  : PowerPC_Vec_HHH_Intrinsic<"vslh">;
+def int_ppc_altivec_vslw  : PowerPC_Vec_WWW_Intrinsic<"vslw">;
+
+// Right Shifts.
+def int_ppc_altivec_vsr   : PowerPC_Vec_WWW_Intrinsic<"vsr">;
+def int_ppc_altivec_vsro  : PowerPC_Vec_WWW_Intrinsic<"vsro">;
+  
+def int_ppc_altivec_vsrb  : PowerPC_Vec_BBB_Intrinsic<"vsrb">;
+def int_ppc_altivec_vsrh  : PowerPC_Vec_HHH_Intrinsic<"vsrh">;
+def int_ppc_altivec_vsrw  : PowerPC_Vec_WWW_Intrinsic<"vsrw">;
+def int_ppc_altivec_vsrab : PowerPC_Vec_BBB_Intrinsic<"vsrab">;
+def int_ppc_altivec_vsrah : PowerPC_Vec_HHH_Intrinsic<"vsrah">;
+def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">;
+
+// Rotates.
+def int_ppc_altivec_vrlb  : PowerPC_Vec_BBB_Intrinsic<"vrlb">;
+def int_ppc_altivec_vrlh  : PowerPC_Vec_HHH_Intrinsic<"vrlh">;
+def int_ppc_altivec_vrlw  : PowerPC_Vec_WWW_Intrinsic<"vrlw">;
+def int_ppc_altivec_vrld  : PowerPC_Vec_DDD_Intrinsic<"vrld">;
+
+let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
+  // Miscellaneous.
+  def int_ppc_altivec_lvsl :
+              Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
+  def int_ppc_altivec_lvsr :
+              Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+  def int_ppc_altivec_vperm : GCCBuiltin<"__builtin_altivec_vperm_4si">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, 
+                         llvm_v4i32_ty, llvm_v16i8_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vsel : GCCBuiltin<"__builtin_altivec_vsel_4si">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, 
+                         llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vgbbd : GCCBuiltin<"__builtin_altivec_vgbbd">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_ppc_altivec_vbpermq : GCCBuiltin<"__builtin_altivec_vbpermq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+}
+
+def int_ppc_altivec_vexptefp  : PowerPC_Vec_FF_Intrinsic<"vexptefp">;
+def int_ppc_altivec_vlogefp   : PowerPC_Vec_FF_Intrinsic<"vlogefp">;
+def int_ppc_altivec_vrefp     : PowerPC_Vec_FF_Intrinsic<"vrefp">;
+def int_ppc_altivec_vrsqrtefp : PowerPC_Vec_FF_Intrinsic<"vrsqrtefp">;
+
+// Power8 Intrinsics
+// Crypto
+let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
+  def int_ppc_altivec_crypto_vsbox :
+              GCCBuiltin<"__builtin_altivec_crypto_vsbox">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+  def int_ppc_altivec_crypto_vpermxor :
+              GCCBuiltin<"__builtin_altivec_crypto_vpermxor">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_ppc_altivec_crypto_vshasigmad :
+            GCCBuiltin<"__builtin_altivec_crypto_vshasigmad">,
+            Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                       llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_altivec_crypto_vshasigmaw :
+            GCCBuiltin<"__builtin_altivec_crypto_vshasigmaw">,
+            Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                       llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+def int_ppc_altivec_crypto_vcipher :
+            PowerPC_Vec_DDD_Intrinsic<"crypto_vcipher">;
+def int_ppc_altivec_crypto_vcipherlast :
+            PowerPC_Vec_DDD_Intrinsic<"crypto_vcipherlast">;
+def int_ppc_altivec_crypto_vncipher :
+            PowerPC_Vec_DDD_Intrinsic<"crypto_vncipher">;
+def int_ppc_altivec_crypto_vncipherlast :
+            PowerPC_Vec_DDD_Intrinsic<"crypto_vncipherlast">;
+def int_ppc_altivec_crypto_vpmsumb :
+            PowerPC_Vec_BBB_Intrinsic<"crypto_vpmsumb">;
+def int_ppc_altivec_crypto_vpmsumh :
+            PowerPC_Vec_HHH_Intrinsic<"crypto_vpmsumh">;
+def int_ppc_altivec_crypto_vpmsumw :
+            PowerPC_Vec_WWW_Intrinsic<"crypto_vpmsumw">;
+def int_ppc_altivec_crypto_vpmsumd :
+            PowerPC_Vec_DDD_Intrinsic<"crypto_vpmsumd">;
+
+// Absolute Difference intrinsics
+def int_ppc_altivec_vabsdub : PowerPC_Vec_BBB_Intrinsic<"vabsdub">;
+def int_ppc_altivec_vabsduh : PowerPC_Vec_HHH_Intrinsic<"vabsduh">;
+def int_ppc_altivec_vabsduw : PowerPC_Vec_WWW_Intrinsic<"vabsduw">;
+
+// Vector rotates
+def int_ppc_altivec_vrlwnm :
+      PowerPC_Vec_Intrinsic<"vrlwnm", [llvm_v4i32_ty],
+                            [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_altivec_vrlwmi :
+      PowerPC_Vec_Intrinsic<"vrlwmi", [llvm_v4i32_ty],
+                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                            [IntrNoMem]>;
+def int_ppc_altivec_vrldnm :
+      PowerPC_Vec_Intrinsic<"vrldnm", [llvm_v2i64_ty],
+                            [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_altivec_vrldmi :
+      PowerPC_Vec_Intrinsic<"vrldmi", [llvm_v2i64_ty],
+                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+                            [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC VSX Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
+
+// Vector load.
+def int_ppc_vsx_lxvw4x :
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvd2x :
+      Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvw4x_be :
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvd2x_be :
+      Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvl :
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem,
+      IntrArgMemOnly]>;
+def int_ppc_vsx_lxvll :
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem,
+      IntrArgMemOnly]>;
+def int_ppc_vsx_stxvl :
+      Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i64_ty],
+      [IntrWriteMem, IntrArgMemOnly]>;
+def int_ppc_vsx_stxvll :
+      Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i64_ty],
+      [IntrWriteMem, IntrArgMemOnly]>;
+
+// Vector store.
+def int_ppc_vsx_stxvw4x : Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+                                    [IntrWriteMem, IntrArgMemOnly]>;
+def int_ppc_vsx_stxvd2x : Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty],
+                                    [IntrWriteMem, IntrArgMemOnly]>;
+def int_ppc_vsx_stxvw4x_be : Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+                                       [IntrWriteMem, IntrArgMemOnly]>;
+def int_ppc_vsx_stxvd2x_be : Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty],
+                                       [IntrWriteMem, IntrArgMemOnly]>;
+// Vector and scalar maximum.
+def int_ppc_vsx_xvmaxdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmaxdp">;
+def int_ppc_vsx_xvmaxsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvmaxsp">;
+def int_ppc_vsx_xsmaxdp : PowerPC_VSX_Sca_DDD_Intrinsic<"xsmaxdp">;
+
+// Vector and scalar minimum.
+def int_ppc_vsx_xvmindp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmindp">;
+def int_ppc_vsx_xvminsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvminsp">;
+def int_ppc_vsx_xsmindp : PowerPC_VSX_Sca_DDD_Intrinsic<"xsmindp">;
+
+// Vector divide.
+def int_ppc_vsx_xvdivdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvdivdp">;
+def int_ppc_vsx_xvdivsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvdivsp">;
+
+// Vector round-to-infinity (ceil)
+def int_ppc_vsx_xvrspip :
+      Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvrdpip :
+      Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+// Vector reciprocal estimate
+def int_ppc_vsx_xvresp : GCCBuiltin<"__builtin_vsx_xvresp">,
+      Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvredp : GCCBuiltin<"__builtin_vsx_xvredp">,
+      Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+// Vector rsqrte
+def int_ppc_vsx_xvrsqrtesp : GCCBuiltin<"__builtin_vsx_xvrsqrtesp">,
+      Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvrsqrtedp : GCCBuiltin<"__builtin_vsx_xvrsqrtedp">,
+      Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+// Vector compare
+def int_ppc_vsx_xvcmpeqdp :
+      PowerPC_VSX_Intrinsic<"xvcmpeqdp", [llvm_v2i64_ty],
+                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpeqdp_p : GCCBuiltin<"__builtin_vsx_xvcmpeqdp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
+                        [IntrNoMem]>;
+def int_ppc_vsx_xvcmpeqsp :
+      PowerPC_VSX_Intrinsic<"xvcmpeqsp", [llvm_v4i32_ty],
+                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpeqsp_p : GCCBuiltin<"__builtin_vsx_xvcmpeqsp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgedp :
+      PowerPC_VSX_Intrinsic<"xvcmpgedp", [llvm_v2i64_ty],
+                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgedp_p : GCCBuiltin<"__builtin_vsx_xvcmpgedp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
+                        [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgesp :
+      PowerPC_VSX_Intrinsic<"xvcmpgesp", [llvm_v4i32_ty],
+                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgesp_p : GCCBuiltin<"__builtin_vsx_xvcmpgesp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtdp :
+      PowerPC_VSX_Intrinsic<"xvcmpgtdp", [llvm_v2i64_ty],
+                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtdp_p : GCCBuiltin<"__builtin_vsx_xvcmpgtdp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
+                        [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtsp :
+      PowerPC_VSX_Intrinsic<"xvcmpgtsp", [llvm_v4i32_ty],
+                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtsp_p : GCCBuiltin<"__builtin_vsx_xvcmpgtsp_p">,
+              Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+                        [IntrNoMem]>;
+def int_ppc_vsx_xxleqv :
+      PowerPC_VSX_Intrinsic<"xxleqv", [llvm_v4i32_ty],
+                            [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xviexpdp :
+      PowerPC_VSX_Intrinsic<"xviexpdp",[llvm_v2f64_ty],
+                            [llvm_v2i64_ty, llvm_v2i64_ty],[IntrNoMem]>;
+def int_ppc_vsx_xviexpsp :
+      PowerPC_VSX_Intrinsic<"xviexpsp",[llvm_v4f32_ty],
+                            [llvm_v4i32_ty, llvm_v4i32_ty],[IntrNoMem]>;
+def int_ppc_vsx_xvcvdpsxws :
+      PowerPC_VSX_Intrinsic<"xvcvdpsxws", [llvm_v4i32_ty],
+                            [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvdpuxws :
+      PowerPC_VSX_Intrinsic<"xvcvdpuxws", [llvm_v4i32_ty],
+                            [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsxwdp :
+      PowerPC_VSX_Intrinsic<"xvcvsxwdp", [llvm_v2f64_ty],
+                            [llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvuxwdp :
+      PowerPC_VSX_Intrinsic<"xvcvuxwdp", [llvm_v2f64_ty],
+                            [llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvspdp :
+      PowerPC_VSX_Intrinsic<"xvcvspdp", [llvm_v2f64_ty],
+                            [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsxdsp :
+      PowerPC_VSX_Intrinsic<"xvcvsxdsp", [llvm_v4f32_ty],
+                            [llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvuxdsp :
+      PowerPC_VSX_Intrinsic<"xvcvuxdsp", [llvm_v4f32_ty],
+                            [llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvdpsp :
+      PowerPC_VSX_Intrinsic<"xvcvdpsp", [llvm_v4f32_ty],
+                            [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsphp :
+      PowerPC_VSX_Intrinsic<"xvcvsphp", [llvm_v4f32_ty],
+                            [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxexpdp :
+      PowerPC_VSX_Intrinsic<"xvxexpdp", [llvm_v2i64_ty],
+                            [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxexpsp :
+      PowerPC_VSX_Intrinsic<"xvxexpsp", [llvm_v4i32_ty],
+                            [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxsigdp :
+      PowerPC_VSX_Intrinsic<"xvxsigdp", [llvm_v2i64_ty],
+                            [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxsigsp :
+      PowerPC_VSX_Intrinsic<"xvxsigsp", [llvm_v4i32_ty],
+                            [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvtstdcdp :
+      PowerPC_VSX_Intrinsic<"xvtstdcdp", [llvm_v2i64_ty],
+                            [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvtstdcsp :
+      PowerPC_VSX_Intrinsic<"xvtstdcsp", [llvm_v4i32_ty],
+                            [llvm_v4f32_ty,llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvhpsp :
+      PowerPC_VSX_Intrinsic<"xvcvhpsp", [llvm_v4f32_ty],
+                            [llvm_v8i16_ty],[IntrNoMem]>;
+def int_ppc_vsx_xxextractuw :
+      PowerPC_VSX_Intrinsic<"xxextractuw",[llvm_v2i64_ty],
+                            [llvm_v2i64_ty,llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxinsertw :
+      PowerPC_VSX_Intrinsic<"xxinsertw",[llvm_v4i32_ty],
+                            [llvm_v4i32_ty,llvm_v2i64_ty,llvm_i32_ty],
+                            [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC QPX Intrinsics.
+//
+
+let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
+  /// PowerPC_QPX_Intrinsic - Base class for all QPX intrinsics.
+  class PowerPC_QPX_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+                              list<LLVMType> param_types,
+                              list<IntrinsicProperty> properties>
+    : GCCBuiltin<!strconcat("__builtin_qpx_", GCCIntSuffix)>,
+      Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC QPX Intrinsic Class Definitions.
+//
+
+/// PowerPC_QPX_FF_Intrinsic - A PowerPC intrinsic that takes one v4f64
+/// vector and returns one.  These intrinsics have no side effects.
+class PowerPC_QPX_FF_Intrinsic<string GCCIntSuffix>
+  : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+
+/// PowerPC_QPX_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f64
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_QPX_FFF_Intrinsic<string GCCIntSuffix>
+  : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_QPX_FFFF_Intrinsic - A PowerPC intrinsic that takes three v4f64
+/// vectors and returns one.  These intrinsics have no side effects.
+class PowerPC_QPX_FFFF_Intrinsic<string GCCIntSuffix>
+  : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f64_ty],
+                          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                          [IntrNoMem]>;
+
+/// PowerPC_QPX_Load_Intrinsic - A PowerPC intrinsic that takes a pointer
+/// and returns a v4f64.
+class PowerPC_QPX_Load_Intrinsic<string GCCIntSuffix>
+  : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+/// PowerPC_QPX_LoadPerm_Intrinsic - A PowerPC intrinsic that takes a pointer
+/// and returns a v4f64 permutation.
+class PowerPC_QPX_LoadPerm_Intrinsic<string GCCIntSuffix>
+  : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+                          [llvm_v4f64_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+/// PowerPC_QPX_Store_Intrinsic - A PowerPC intrinsic that takes a pointer
+/// and stores a v4f64.
+class PowerPC_QPX_Store_Intrinsic<string GCCIntSuffix>
+  : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+                          [], [llvm_v4f64_ty, llvm_ptr_ty],
+                          [IntrWriteMem, IntrArgMemOnly]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC QPX Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
+  // Add Instructions
+  def int_ppc_qpx_qvfadd : PowerPC_QPX_FFF_Intrinsic<"qvfadd">;
+  def int_ppc_qpx_qvfadds : PowerPC_QPX_FFF_Intrinsic<"qvfadds">;
+  def int_ppc_qpx_qvfsub : PowerPC_QPX_FFF_Intrinsic<"qvfsub">;
+  def int_ppc_qpx_qvfsubs : PowerPC_QPX_FFF_Intrinsic<"qvfsubs">;
+
+  // Estimate Instructions
+  def int_ppc_qpx_qvfre : PowerPC_QPX_FF_Intrinsic<"qvfre">;
+  def int_ppc_qpx_qvfres : PowerPC_QPX_FF_Intrinsic<"qvfres">;
+  def int_ppc_qpx_qvfrsqrte : PowerPC_QPX_FF_Intrinsic<"qvfrsqrte">;
+  def int_ppc_qpx_qvfrsqrtes : PowerPC_QPX_FF_Intrinsic<"qvfrsqrtes">;
+
+  // Multiply Instructions
+  def int_ppc_qpx_qvfmul : PowerPC_QPX_FFF_Intrinsic<"qvfmul">;
+  def int_ppc_qpx_qvfmuls : PowerPC_QPX_FFF_Intrinsic<"qvfmuls">;
+  def int_ppc_qpx_qvfxmul : PowerPC_QPX_FFF_Intrinsic<"qvfxmul">;
+  def int_ppc_qpx_qvfxmuls : PowerPC_QPX_FFF_Intrinsic<"qvfxmuls">;
+
+  // Multiply-add instructions
+  def int_ppc_qpx_qvfmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfmadd">;
+  def int_ppc_qpx_qvfmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfmadds">;
+  def int_ppc_qpx_qvfnmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfnmadd">;
+  def int_ppc_qpx_qvfnmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfnmadds">;
+  def int_ppc_qpx_qvfmsub : PowerPC_QPX_FFFF_Intrinsic<"qvfmsub">;
+  def int_ppc_qpx_qvfmsubs : PowerPC_QPX_FFFF_Intrinsic<"qvfmsubs">;
+  def int_ppc_qpx_qvfnmsub : PowerPC_QPX_FFFF_Intrinsic<"qvfnmsub">;
+  def int_ppc_qpx_qvfnmsubs : PowerPC_QPX_FFFF_Intrinsic<"qvfnmsubs">;
+  def int_ppc_qpx_qvfxmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxmadd">;
+  def int_ppc_qpx_qvfxmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxmadds">;
+  def int_ppc_qpx_qvfxxnpmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxxnpmadd">;
+  def int_ppc_qpx_qvfxxnpmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxxnpmadds">;
+  def int_ppc_qpx_qvfxxcpnmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxxcpnmadd">;
+  def int_ppc_qpx_qvfxxcpnmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxxcpnmadds">;
+  def int_ppc_qpx_qvfxxmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxxmadd">;
+  def int_ppc_qpx_qvfxxmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxxmadds">;
+
+  // Select Instruction
+  def int_ppc_qpx_qvfsel : PowerPC_QPX_FFFF_Intrinsic<"qvfsel">;
+
+  // Permute Instruction
+  def int_ppc_qpx_qvfperm : PowerPC_QPX_FFFF_Intrinsic<"qvfperm">;
+
+  // Convert and Round Instructions
+  def int_ppc_qpx_qvfctid : PowerPC_QPX_FF_Intrinsic<"qvfctid">;
+  def int_ppc_qpx_qvfctidu : PowerPC_QPX_FF_Intrinsic<"qvfctidu">;
+  def int_ppc_qpx_qvfctidz : PowerPC_QPX_FF_Intrinsic<"qvfctidz">;
+  def int_ppc_qpx_qvfctiduz : PowerPC_QPX_FF_Intrinsic<"qvfctiduz">;
+  def int_ppc_qpx_qvfctiw : PowerPC_QPX_FF_Intrinsic<"qvfctiw">;
+  def int_ppc_qpx_qvfctiwu : PowerPC_QPX_FF_Intrinsic<"qvfctiwu">;
+  def int_ppc_qpx_qvfctiwz : PowerPC_QPX_FF_Intrinsic<"qvfctiwz">;
+  def int_ppc_qpx_qvfctiwuz : PowerPC_QPX_FF_Intrinsic<"qvfctiwuz">;
+  def int_ppc_qpx_qvfcfid : PowerPC_QPX_FF_Intrinsic<"qvfcfid">;
+  def int_ppc_qpx_qvfcfidu : PowerPC_QPX_FF_Intrinsic<"qvfcfidu">;
+  def int_ppc_qpx_qvfcfids : PowerPC_QPX_FF_Intrinsic<"qvfcfids">;
+  def int_ppc_qpx_qvfcfidus : PowerPC_QPX_FF_Intrinsic<"qvfcfidus">;
+  def int_ppc_qpx_qvfrsp : PowerPC_QPX_FF_Intrinsic<"qvfrsp">;
+  def int_ppc_qpx_qvfriz : PowerPC_QPX_FF_Intrinsic<"qvfriz">;
+  def int_ppc_qpx_qvfrin : PowerPC_QPX_FF_Intrinsic<"qvfrin">;
+  def int_ppc_qpx_qvfrip : PowerPC_QPX_FF_Intrinsic<"qvfrip">;
+  def int_ppc_qpx_qvfrim : PowerPC_QPX_FF_Intrinsic<"qvfrim">;
+
+  // Move Instructions
+  def int_ppc_qpx_qvfneg : PowerPC_QPX_FF_Intrinsic<"qvfneg">;
+  def int_ppc_qpx_qvfabs : PowerPC_QPX_FF_Intrinsic<"qvfabs">;
+  def int_ppc_qpx_qvfnabs : PowerPC_QPX_FF_Intrinsic<"qvfnabs">;
+  def int_ppc_qpx_qvfcpsgn : PowerPC_QPX_FFF_Intrinsic<"qvfcpsgn">;
+
+  // Compare Instructions
+  def int_ppc_qpx_qvftstnan : PowerPC_QPX_FFF_Intrinsic<"qvftstnan">;
+  def int_ppc_qpx_qvfcmplt : PowerPC_QPX_FFF_Intrinsic<"qvfcmplt">;
+  def int_ppc_qpx_qvfcmpgt : PowerPC_QPX_FFF_Intrinsic<"qvfcmpgt">;
+  def int_ppc_qpx_qvfcmpeq : PowerPC_QPX_FFF_Intrinsic<"qvfcmpeq">;
+
+  // Load instructions
+  def int_ppc_qpx_qvlfd : PowerPC_QPX_Load_Intrinsic<"qvlfd">;
+  def int_ppc_qpx_qvlfda : PowerPC_QPX_Load_Intrinsic<"qvlfda">;
+  def int_ppc_qpx_qvlfs : PowerPC_QPX_Load_Intrinsic<"qvlfs">;
+  def int_ppc_qpx_qvlfsa : PowerPC_QPX_Load_Intrinsic<"qvlfsa">;
+
+  def int_ppc_qpx_qvlfcda : PowerPC_QPX_Load_Intrinsic<"qvlfcda">;
+  def int_ppc_qpx_qvlfcd : PowerPC_QPX_Load_Intrinsic<"qvlfcd">;
+  def int_ppc_qpx_qvlfcsa : PowerPC_QPX_Load_Intrinsic<"qvlfcsa">;
+  def int_ppc_qpx_qvlfcs : PowerPC_QPX_Load_Intrinsic<"qvlfcs">;
+  def int_ppc_qpx_qvlfiwaa : PowerPC_QPX_Load_Intrinsic<"qvlfiwaa">;
+  def int_ppc_qpx_qvlfiwa : PowerPC_QPX_Load_Intrinsic<"qvlfiwa">;
+  def int_ppc_qpx_qvlfiwza : PowerPC_QPX_Load_Intrinsic<"qvlfiwza">;
+  def int_ppc_qpx_qvlfiwz : PowerPC_QPX_Load_Intrinsic<"qvlfiwz">;
+
+  def int_ppc_qpx_qvlpcld : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcld">;
+  def int_ppc_qpx_qvlpcls : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcls">;
+  def int_ppc_qpx_qvlpcrd : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcrd">;
+  def int_ppc_qpx_qvlpcrs : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcrs">;
+
+  // Store instructions
+  def int_ppc_qpx_qvstfd : PowerPC_QPX_Store_Intrinsic<"qvstfd">;
+  def int_ppc_qpx_qvstfda : PowerPC_QPX_Store_Intrinsic<"qvstfda">;
+  def int_ppc_qpx_qvstfs : PowerPC_QPX_Store_Intrinsic<"qvstfs">;
+  def int_ppc_qpx_qvstfsa : PowerPC_QPX_Store_Intrinsic<"qvstfsa">;
+
+  def int_ppc_qpx_qvstfcda : PowerPC_QPX_Store_Intrinsic<"qvstfcda">;
+  def int_ppc_qpx_qvstfcd : PowerPC_QPX_Store_Intrinsic<"qvstfcd">;
+  def int_ppc_qpx_qvstfcsa : PowerPC_QPX_Store_Intrinsic<"qvstfcsa">;
+  def int_ppc_qpx_qvstfcs : PowerPC_QPX_Store_Intrinsic<"qvstfcs">;
+  def int_ppc_qpx_qvstfiwa : PowerPC_QPX_Store_Intrinsic<"qvstfiwa">;
+  def int_ppc_qpx_qvstfiw : PowerPC_QPX_Store_Intrinsic<"qvstfiw">;
+
+  // Logical and permutation formation
+  def int_ppc_qpx_qvflogical : PowerPC_QPX_Intrinsic<"qvflogical",
+                          [llvm_v4f64_ty],
+                          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+  def int_ppc_qpx_qvgpci : PowerPC_QPX_Intrinsic<"qvgpci",
+                          [llvm_v4f64_ty], [llvm_i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC HTM Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
+
+def int_ppc_tbegin : GCCBuiltin<"__builtin_tbegin">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+def int_ppc_tend : GCCBuiltin<"__builtin_tend">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+def int_ppc_tabort : GCCBuiltin<"__builtin_tabort">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+def int_ppc_tabortwc : GCCBuiltin<"__builtin_tabortwc">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_ppc_tabortwci : GCCBuiltin<"__builtin_tabortwci">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_ppc_tabortdc : GCCBuiltin<"__builtin_tabortdc">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_ppc_tabortdci : GCCBuiltin<"__builtin_tabortdci">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+def int_ppc_tcheck : GCCBuiltin<"__builtin_tcheck">,
+      Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_treclaim : GCCBuiltin<"__builtin_treclaim">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+def int_ppc_trechkpt : GCCBuiltin<"__builtin_trechkpt">,
+      Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_tsr : GCCBuiltin<"__builtin_tsr">,
+      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+def int_ppc_get_texasr : GCCBuiltin<"__builtin_get_texasr">,
+      Intrinsic<[llvm_i64_ty], [], []>;
+def int_ppc_get_texasru : GCCBuiltin<"__builtin_get_texasru">,
+      Intrinsic<[llvm_i64_ty], [], []>;
+def int_ppc_get_tfhar : GCCBuiltin<"__builtin_get_tfhar">,
+      Intrinsic<[llvm_i64_ty], [], []>;
+def int_ppc_get_tfiar : GCCBuiltin<"__builtin_get_tfiar">,
+      Intrinsic<[llvm_i64_ty], [], []>;
+
+def int_ppc_set_texasr : GCCBuiltin<"__builtin_set_texasr">,
+      Intrinsic<[], [llvm_i64_ty], []>;
+def int_ppc_set_texasru : GCCBuiltin<"__builtin_set_texasru">,
+      Intrinsic<[], [llvm_i64_ty], []>;
+def int_ppc_set_tfhar : GCCBuiltin<"__builtin_set_tfhar">,
+      Intrinsic<[], [llvm_i64_ty], []>;
+def int_ppc_set_tfiar : GCCBuiltin<"__builtin_set_tfiar">,
+      Intrinsic<[], [llvm_i64_ty], []>;
+
+// Extended mnemonics
+def int_ppc_tendall : GCCBuiltin<"__builtin_tendall">,
+      Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_tresume : GCCBuiltin<"__builtin_tresume">,
+      Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_tsuspend : GCCBuiltin<"__builtin_tsuspend">,
+      Intrinsic<[llvm_i32_ty], [], []>;
+
+def int_ppc_ttest : GCCBuiltin<"__builtin_ttest">,
+      Intrinsic<[llvm_i64_ty], [], []>;
+
+def int_ppc_cfence : Intrinsic<[], [llvm_anyint_ty], []>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsSystemZ.td b/linux-x64/clang/include/llvm/IR/IntrinsicsSystemZ.td
new file mode 100644
index 0000000..caa2ec2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsSystemZ.td
@@ -0,0 +1,431 @@
+//===- IntrinsicsSystemZ.td - Defines SystemZ intrinsics ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the SystemZ-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+class SystemZUnaryConv<string name, LLVMType result, LLVMType arg>
+  : GCCBuiltin<"__builtin_s390_" ## name>,
+    Intrinsic<[result], [arg], [IntrNoMem]>;
+
+class SystemZUnary<string name, LLVMType type>
+  : SystemZUnaryConv<name, type, type>;
+
+class SystemZUnaryConvCC<LLVMType result, LLVMType arg>
+  : Intrinsic<[result, llvm_i32_ty], [arg], [IntrNoMem]>;
+
+class SystemZUnaryCC<LLVMType type>
+  : SystemZUnaryConvCC<type, type>;
+
+class SystemZBinaryConv<string name, LLVMType result, LLVMType arg>
+  : GCCBuiltin<"__builtin_s390_" ## name>,
+    Intrinsic<[result], [arg, arg], [IntrNoMem]>;
+
+class SystemZBinary<string name, LLVMType type>
+  : SystemZBinaryConv<name, type, type>;
+
+class SystemZBinaryInt<string name, LLVMType type>
+  : GCCBuiltin<"__builtin_s390_" ## name>,
+    Intrinsic<[type], [type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZBinaryConvCC<LLVMType result, LLVMType arg>
+  : Intrinsic<[result, llvm_i32_ty], [arg, arg], [IntrNoMem]>;
+
+class SystemZBinaryConvIntCC<LLVMType result, LLVMType arg>
+  : Intrinsic<[result, llvm_i32_ty], [arg, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZBinaryCC<LLVMType type>
+  : SystemZBinaryConvCC<type, type>;
+
+class SystemZTernaryConv<string name, LLVMType result, LLVMType arg>
+  : GCCBuiltin<"__builtin_s390_" ## name>,
+    Intrinsic<[result], [arg, arg, result], [IntrNoMem]>;
+
+class SystemZTernary<string name, LLVMType type>
+  : SystemZTernaryConv<name, type, type>;
+
+class SystemZTernaryInt<string name, LLVMType type>
+  : GCCBuiltin<"__builtin_s390_" ## name>,
+    Intrinsic<[type], [type, type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZTernaryIntCC<LLVMType type>
+  : Intrinsic<[type, llvm_i32_ty], [type, type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZQuaternaryInt<string name, LLVMType type>
+  : GCCBuiltin<"__builtin_s390_" ## name>,
+    Intrinsic<[type], [type, type, type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZQuaternaryIntCC<LLVMType type>
+  : Intrinsic<[type, llvm_i32_ty], [type, type, type, llvm_i32_ty],
+              [IntrNoMem]>;
+
+multiclass SystemZUnaryExtBHF<string name> {
+  def b : SystemZUnaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+  def h : SystemZUnaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+  def f : SystemZUnaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZUnaryExtBHWF<string name> {
+  def b  : SystemZUnaryConv<name##"b",  llvm_v8i16_ty, llvm_v16i8_ty>;
+  def hw : SystemZUnaryConv<name##"hw", llvm_v4i32_ty, llvm_v8i16_ty>;
+  def f  : SystemZUnaryConv<name##"f",  llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZUnaryBHF<string name> {
+  def b : SystemZUnary<name##"b", llvm_v16i8_ty>;
+  def h : SystemZUnary<name##"h", llvm_v8i16_ty>;
+  def f : SystemZUnary<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZUnaryBHFG<string name> : SystemZUnaryBHF<name> {
+  def g : SystemZUnary<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZUnaryCCBHF {
+  def bs : SystemZUnaryCC<llvm_v16i8_ty>;
+  def hs : SystemZUnaryCC<llvm_v8i16_ty>;
+  def fs : SystemZUnaryCC<llvm_v4i32_ty>;
+}
+
+multiclass SystemZBinaryTruncHFG<string name> {
+  def h : SystemZBinaryConv<name##"h", llvm_v16i8_ty, llvm_v8i16_ty>;
+  def f : SystemZBinaryConv<name##"f", llvm_v8i16_ty, llvm_v4i32_ty>;
+  def g : SystemZBinaryConv<name##"g", llvm_v4i32_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryTruncCCHFG {
+  def hs : SystemZBinaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
+  def fs : SystemZBinaryConvCC<llvm_v8i16_ty, llvm_v4i32_ty>;
+  def gs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryExtBHF<string name> {
+  def b : SystemZBinaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+  def h : SystemZBinaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+  def f : SystemZBinaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZBinaryExtBHFG<string name> : SystemZBinaryExtBHF<name> {
+  def g : SystemZBinaryConv<name##"g", llvm_v16i8_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryBHF<string name> {
+  def b : SystemZBinary<name##"b", llvm_v16i8_ty>;
+  def h : SystemZBinary<name##"h", llvm_v8i16_ty>;
+  def f : SystemZBinary<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZBinaryBHFG<string name> : SystemZBinaryBHF<name> {
+  def g : SystemZBinary<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryIntBHFG<string name> {
+  def b : SystemZBinaryInt<name##"b", llvm_v16i8_ty>;
+  def h : SystemZBinaryInt<name##"h", llvm_v8i16_ty>;
+  def f : SystemZBinaryInt<name##"f", llvm_v4i32_ty>;
+  def g : SystemZBinaryInt<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryCCBHF {
+  def bs : SystemZBinaryCC<llvm_v16i8_ty>;
+  def hs : SystemZBinaryCC<llvm_v8i16_ty>;
+  def fs : SystemZBinaryCC<llvm_v4i32_ty>;
+}
+
+multiclass SystemZCompareBHFG<string name> {
+  def bs : SystemZBinaryCC<llvm_v16i8_ty>;
+  def hs : SystemZBinaryCC<llvm_v8i16_ty>;
+  def fs : SystemZBinaryCC<llvm_v4i32_ty>;
+  def gs : SystemZBinaryCC<llvm_v2i64_ty>;
+}
+
+multiclass SystemZTernaryExtBHF<string name> {
+  def b : SystemZTernaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+  def h : SystemZTernaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+  def f : SystemZTernaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZTernaryExtBHFG<string name> : SystemZTernaryExtBHF<name> {
+  def g : SystemZTernaryConv<name##"g", llvm_v16i8_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZTernaryBHF<string name> {
+  def b : SystemZTernary<name##"b", llvm_v16i8_ty>;
+  def h : SystemZTernary<name##"h", llvm_v8i16_ty>;
+  def f : SystemZTernary<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZTernaryIntBHF<string name> {
+  def b : SystemZTernaryInt<name##"b", llvm_v16i8_ty>;
+  def h : SystemZTernaryInt<name##"h", llvm_v8i16_ty>;
+  def f : SystemZTernaryInt<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZTernaryIntCCBHF {
+  def bs : SystemZTernaryIntCC<llvm_v16i8_ty>;
+  def hs : SystemZTernaryIntCC<llvm_v8i16_ty>;
+  def fs : SystemZTernaryIntCC<llvm_v4i32_ty>;
+}
+
+multiclass SystemZQuaternaryIntBHF<string name> {
+  def b : SystemZQuaternaryInt<name##"b", llvm_v16i8_ty>;
+  def h : SystemZQuaternaryInt<name##"h", llvm_v8i16_ty>;
+  def f : SystemZQuaternaryInt<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZQuaternaryIntBHFG<string name> : SystemZQuaternaryIntBHF<name> {
+  def g : SystemZQuaternaryInt<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZQuaternaryIntCCBHF {
+  def bs : SystemZQuaternaryIntCC<llvm_v16i8_ty>;
+  def hs : SystemZQuaternaryIntCC<llvm_v8i16_ty>;
+  def fs : SystemZQuaternaryIntCC<llvm_v4i32_ty>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Transactional-execution intrinsics
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "s390" in {
+  def int_s390_tbegin : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                  [IntrNoDuplicate, IntrWriteMem]>;
+
+  def int_s390_tbegin_nofloat : Intrinsic<[llvm_i32_ty],
+                                          [llvm_ptr_ty, llvm_i32_ty],
+                                          [IntrNoDuplicate, IntrWriteMem]>;
+
+  def int_s390_tbeginc : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+                                   [IntrNoDuplicate, IntrWriteMem]>;
+
+  def int_s390_tabort : Intrinsic<[], [llvm_i64_ty],
+                                  [IntrNoReturn, Throws, IntrWriteMem]>;
+
+  def int_s390_tend : GCCBuiltin<"__builtin_tend">,
+                      Intrinsic<[llvm_i32_ty], []>;
+
+  def int_s390_etnd : GCCBuiltin<"__builtin_tx_nesting_depth">,
+                      Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+
+  def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr64_ty],
+                                 [IntrArgMemOnly, IntrWriteMem]>;
+
+  def int_s390_ppa_txassist : GCCBuiltin<"__builtin_tx_assist">,
+                              Intrinsic<[], [llvm_i32_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Vector intrinsics
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "s390" in {
+  def int_s390_lcbb : GCCBuiltin<"__builtin_s390_lcbb">,
+                      Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                [IntrNoMem]>;
+
+  def int_s390_vlbb : GCCBuiltin<"__builtin_s390_vlbb">,
+                      Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_s390_vll : GCCBuiltin<"__builtin_s390_vll">,
+                     Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
+                               [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_s390_vpdi : GCCBuiltin<"__builtin_s390_vpdi">,
+                      Intrinsic<[llvm_v2i64_ty],
+                                [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+                                [IntrNoMem]>;
+
+  def int_s390_vperm : GCCBuiltin<"__builtin_s390_vperm">,
+                       Intrinsic<[llvm_v16i8_ty],
+                                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+                                 [IntrNoMem]>;
+
+  defm int_s390_vpks : SystemZBinaryTruncHFG<"vpks">;
+  defm int_s390_vpks : SystemZBinaryTruncCCHFG;
+
+  defm int_s390_vpkls : SystemZBinaryTruncHFG<"vpkls">;
+  defm int_s390_vpkls : SystemZBinaryTruncCCHFG;
+
+  def int_s390_vstl : GCCBuiltin<"__builtin_s390_vstl">,
+                      Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
+                                [IntrArgMemOnly, IntrWriteMem]>;
+
+  defm int_s390_vupl  : SystemZUnaryExtBHWF<"vupl">;
+  defm int_s390_vupll : SystemZUnaryExtBHF<"vupll">;
+
+  defm int_s390_vuph  : SystemZUnaryExtBHF<"vuph">;
+  defm int_s390_vuplh : SystemZUnaryExtBHF<"vuplh">;
+
+  defm int_s390_vacc : SystemZBinaryBHFG<"vacc">;
+
+  def int_s390_vaq    : SystemZBinary<"vaq",     llvm_v16i8_ty>;
+  def int_s390_vacq   : SystemZTernary<"vacq",   llvm_v16i8_ty>;
+  def int_s390_vaccq  : SystemZBinary<"vaccq",   llvm_v16i8_ty>;
+  def int_s390_vacccq : SystemZTernary<"vacccq", llvm_v16i8_ty>;
+
+  defm int_s390_vavg  : SystemZBinaryBHFG<"vavg">;
+  defm int_s390_vavgl : SystemZBinaryBHFG<"vavgl">;
+
+  def int_s390_vcksm : SystemZBinary<"vcksm", llvm_v4i32_ty>;
+
+  defm int_s390_vgfm  : SystemZBinaryExtBHFG<"vgfm">;
+  defm int_s390_vgfma : SystemZTernaryExtBHFG<"vgfma">;
+
+  defm int_s390_vmah  : SystemZTernaryBHF<"vmah">;
+  defm int_s390_vmalh : SystemZTernaryBHF<"vmalh">;
+  defm int_s390_vmae  : SystemZTernaryExtBHF<"vmae">;
+  defm int_s390_vmale : SystemZTernaryExtBHF<"vmale">;
+  defm int_s390_vmao  : SystemZTernaryExtBHF<"vmao">;
+  defm int_s390_vmalo : SystemZTernaryExtBHF<"vmalo">;
+
+  defm int_s390_vmh  : SystemZBinaryBHF<"vmh">;
+  defm int_s390_vmlh : SystemZBinaryBHF<"vmlh">;
+  defm int_s390_vme  : SystemZBinaryExtBHF<"vme">;
+  defm int_s390_vmle : SystemZBinaryExtBHF<"vmle">;
+  defm int_s390_vmo  : SystemZBinaryExtBHF<"vmo">;
+  defm int_s390_vmlo : SystemZBinaryExtBHF<"vmlo">;
+
+  defm int_s390_verllv : SystemZBinaryBHFG<"verllv">;
+  defm int_s390_verll  : SystemZBinaryIntBHFG<"verll">;
+  defm int_s390_verim  : SystemZQuaternaryIntBHFG<"verim">;
+
+  def int_s390_vsl   : SystemZBinary<"vsl",   llvm_v16i8_ty>;
+  def int_s390_vslb  : SystemZBinary<"vslb",  llvm_v16i8_ty>;
+  def int_s390_vsra  : SystemZBinary<"vsra",  llvm_v16i8_ty>;
+  def int_s390_vsrab : SystemZBinary<"vsrab", llvm_v16i8_ty>;
+  def int_s390_vsrl  : SystemZBinary<"vsrl",  llvm_v16i8_ty>;
+  def int_s390_vsrlb : SystemZBinary<"vsrlb", llvm_v16i8_ty>;
+
+  def int_s390_vsldb : GCCBuiltin<"__builtin_s390_vsldb">,
+                       Intrinsic<[llvm_v16i8_ty],
+                                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+
+  defm int_s390_vscbi : SystemZBinaryBHFG<"vscbi">;
+
+  def int_s390_vsq     : SystemZBinary<"vsq",      llvm_v16i8_ty>;
+  def int_s390_vsbiq   : SystemZTernary<"vsbiq",   llvm_v16i8_ty>;
+  def int_s390_vscbiq  : SystemZBinary<"vscbiq",   llvm_v16i8_ty>;
+  def int_s390_vsbcbiq : SystemZTernary<"vsbcbiq", llvm_v16i8_ty>;
+
+  def int_s390_vsumb : SystemZBinaryConv<"vsumb", llvm_v4i32_ty, llvm_v16i8_ty>;
+  def int_s390_vsumh : SystemZBinaryConv<"vsumh", llvm_v4i32_ty, llvm_v8i16_ty>;
+
+  def int_s390_vsumgh : SystemZBinaryConv<"vsumgh", llvm_v2i64_ty,
+                                          llvm_v8i16_ty>;
+  def int_s390_vsumgf : SystemZBinaryConv<"vsumgf", llvm_v2i64_ty,
+                                          llvm_v4i32_ty>;
+
+  def int_s390_vsumqf : SystemZBinaryConv<"vsumqf", llvm_v16i8_ty,
+                                          llvm_v4i32_ty>;
+  def int_s390_vsumqg : SystemZBinaryConv<"vsumqg", llvm_v16i8_ty,
+                                          llvm_v2i64_ty>;
+
+  def int_s390_vtm : SystemZBinaryConv<"vtm", llvm_i32_ty, llvm_v16i8_ty>;
+
+  defm int_s390_vceq : SystemZCompareBHFG<"vceq">;
+  defm int_s390_vch  : SystemZCompareBHFG<"vch">;
+  defm int_s390_vchl : SystemZCompareBHFG<"vchl">;
+
+  defm int_s390_vfae  : SystemZTernaryIntBHF<"vfae">;
+  defm int_s390_vfae  : SystemZTernaryIntCCBHF;
+  defm int_s390_vfaez : SystemZTernaryIntBHF<"vfaez">;
+  defm int_s390_vfaez : SystemZTernaryIntCCBHF;
+
+  defm int_s390_vfee  : SystemZBinaryBHF<"vfee">;
+  defm int_s390_vfee  : SystemZBinaryCCBHF;
+  defm int_s390_vfeez : SystemZBinaryBHF<"vfeez">;
+  defm int_s390_vfeez : SystemZBinaryCCBHF;
+
+  defm int_s390_vfene  : SystemZBinaryBHF<"vfene">;
+  defm int_s390_vfene  : SystemZBinaryCCBHF;
+  defm int_s390_vfenez : SystemZBinaryBHF<"vfenez">;
+  defm int_s390_vfenez : SystemZBinaryCCBHF;
+
+  defm int_s390_vistr : SystemZUnaryBHF<"vistr">;
+  defm int_s390_vistr : SystemZUnaryCCBHF;
+
+  defm int_s390_vstrc  : SystemZQuaternaryIntBHF<"vstrc">;
+  defm int_s390_vstrc  : SystemZQuaternaryIntCCBHF;
+  defm int_s390_vstrcz : SystemZQuaternaryIntBHF<"vstrcz">;
+  defm int_s390_vstrcz : SystemZQuaternaryIntCCBHF;
+
+  def int_s390_vfcedbs  : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+  def int_s390_vfchdbs  : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+  def int_s390_vfchedbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+
+  def int_s390_vftcidb : SystemZBinaryConvIntCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+
+  def int_s390_vfidb : Intrinsic<[llvm_v2f64_ty],
+                                 [llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+
+  // Instructions from the Vector Enhancements Facility 1
+  def int_s390_vbperm : SystemZBinaryConv<"vbperm", llvm_v2i64_ty,
+                                          llvm_v16i8_ty>;
+
+  def int_s390_vmslg  : GCCBuiltin<"__builtin_s390_vmslg">,
+                        Intrinsic<[llvm_v16i8_ty],
+                                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v16i8_ty,
+                                   llvm_i32_ty], [IntrNoMem]>;
+
+  def int_s390_vfmaxdb : Intrinsic<[llvm_v2f64_ty],
+                                   [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
+                                   [IntrNoMem]>;
+  def int_s390_vfmindb : Intrinsic<[llvm_v2f64_ty],
+                                   [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
+                                   [IntrNoMem]>;
+  def int_s390_vfmaxsb : Intrinsic<[llvm_v4f32_ty],
+                                   [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
+                                   [IntrNoMem]>;
+  def int_s390_vfminsb : Intrinsic<[llvm_v4f32_ty],
+                                   [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
+                                   [IntrNoMem]>;
+
+  def int_s390_vfcesbs  : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
+  def int_s390_vfchsbs  : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
+  def int_s390_vfchesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
+
+  def int_s390_vftcisb : SystemZBinaryConvIntCC<llvm_v4i32_ty, llvm_v4f32_ty>;
+
+  def int_s390_vfisb : Intrinsic<[llvm_v4f32_ty],
+                                 [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+
+  // Instructions from the Vector Packed Decimal Facility
+  def int_s390_vlrl : GCCBuiltin<"__builtin_s390_vlrl">,
+                      Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
+                                [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_s390_vstrl : GCCBuiltin<"__builtin_s390_vstrl">,
+                       Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
+                                 [IntrArgMemOnly, IntrWriteMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Misc intrinsics
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "s390" in {
+  def int_s390_sfpc : GCCBuiltin<"__builtin_s390_sfpc">,
+                      Intrinsic<[], [llvm_i32_ty], []>;
+  def int_s390_efpc : GCCBuiltin<"__builtin_s390_efpc">,
+                      Intrinsic<[llvm_i32_ty], [], []>;
+
+  def int_s390_tdc : Intrinsic<[llvm_i32_ty], [llvm_anyfloat_ty, llvm_i64_ty],
+                               [IntrNoMem]>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsWebAssembly.td b/linux-x64/clang/include/llvm/IR/IntrinsicsWebAssembly.td
new file mode 100644
index 0000000..e9e5e53
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -0,0 +1,48 @@
+//===- IntrinsicsWebAssembly.td - Defines wasm intrinsics --*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines all of the WebAssembly-specific intrinsics.
+///
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "wasm" in {  // All intrinsics start with "llvm.wasm.".
+
+// Query the current memory size, and increase the current memory size.
+// Note that mem.size is not IntrNoMem because it must be sequenced with
+// respect to mem.grow calls.
+// These are the new proposed names, which aren't yet official. Use at your own
+// risk.
+def int_wasm_mem_size : Intrinsic<[llvm_anyint_ty],
+                                  [llvm_i32_ty],
+                                  [IntrReadMem]>;
+def int_wasm_mem_grow : Intrinsic<[llvm_anyint_ty],
+                                  [llvm_i32_ty, LLVMMatchType<0>],
+                                  []>;
+
+// These are the existing names, which are currently official, but expected
+// to be deprecated in the future. They also lack the immediate field.
+def int_wasm_current_memory : Intrinsic<[llvm_anyint_ty], [], [IntrReadMem]>;
+def int_wasm_grow_memory : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], []>;
+
+//===----------------------------------------------------------------------===//
+// Exception handling intrinsics
+//===----------------------------------------------------------------------===//
+
+// throw / rethrow
+def int_wasm_throw : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty],
+                               [Throws, IntrNoReturn]>;
+def int_wasm_rethrow : Intrinsic<[], [], [Throws, IntrNoReturn]>;
+
+// Since wasm does not use landingpad instructions, these instructions return
+// exception pointer and selector values until we lower them in WasmEHPrepare.
+def int_wasm_get_exception : Intrinsic<[llvm_ptr_ty], [], [IntrHasSideEffects]>;
+def int_wasm_get_ehselector : Intrinsic<[llvm_i32_ty], [],
+                                        [IntrHasSideEffects]>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsX86.td b/linux-x64/clang/include/llvm/IR/IntrinsicsX86.td
new file mode 100644
index 0000000..b0a9dc1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsX86.td
@@ -0,0 +1,6423 @@
+//===- IntrinsicsX86.td - Defines X86 intrinsics -----------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the X86-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Interrupt traps
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_int : Intrinsic<[], [llvm_i8_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SEH intrinsics for Windows
+let TargetPrefix = "x86" in {
+  def int_x86_seh_lsda : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+  // Marks the EH registration node created in LLVM IR prior to code generation.
+  def int_x86_seh_ehregnode : Intrinsic<[], [llvm_ptr_ty], []>;
+
+  // Marks the EH guard slot node created in LLVM IR prior to code generation.
+  def int_x86_seh_ehguard : Intrinsic<[], [llvm_ptr_ty], []>;
+
+  // Given a pointer to the end of an EH registration object, returns the true
+  // parent frame address that can be used with llvm.localrecover.
+  def int_x86_seh_recoverfp : Intrinsic<[llvm_ptr_ty],
+                                        [llvm_ptr_ty, llvm_ptr_ty],
+                                        [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FLAGS.
+let TargetPrefix = "x86" in {
+  def int_x86_flags_read_u32 : GCCBuiltin<"__builtin_ia32_readeflags_u32">,
+        Intrinsic<[llvm_i32_ty], [], []>;
+  def int_x86_flags_read_u64 : GCCBuiltin<"__builtin_ia32_readeflags_u64">,
+        Intrinsic<[llvm_i64_ty], [], []>;
+  def int_x86_flags_write_u32 : GCCBuiltin<"__builtin_ia32_writeeflags_u32">,
+        Intrinsic<[], [llvm_i32_ty], []>;
+  def int_x86_flags_write_u64 : GCCBuiltin<"__builtin_ia32_writeeflags_u64">,
+        Intrinsic<[], [llvm_i64_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Read Time Stamp Counter.
+let TargetPrefix = "x86" in {
+  def int_x86_rdtsc : GCCBuiltin<"__builtin_ia32_rdtsc">,
+              Intrinsic<[llvm_i64_ty], [], []>;
+  def int_x86_rdtscp : GCCBuiltin<"__builtin_ia32_rdtscp">,
+              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty], [IntrArgMemOnly]>;
+}
+
+// Read Performance-Monitoring Counter.
+let TargetPrefix = "x86" in {
+  def int_x86_rdpmc : GCCBuiltin<"__builtin_ia32_rdpmc">,
+              Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
+}
+
+// Read processor ID.
+let TargetPrefix = "x86" in {
+  def int_x86_rdpid : GCCBuiltin<"__builtin_ia32_rdpid">,
+              Intrinsic<[llvm_i32_ty], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// CET SS
+let TargetPrefix = "x86" in {
+  def int_x86_incsspd : GCCBuiltin<"__builtin_ia32_incsspd">,
+              Intrinsic<[], [llvm_i32_ty], []>;
+  def int_x86_incsspq : GCCBuiltin<"__builtin_ia32_incsspq">,
+              Intrinsic<[], [llvm_i64_ty], []>;
+  def int_x86_rdsspd : GCCBuiltin<"__builtin_ia32_rdsspd">,
+              Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+  def int_x86_rdsspq : GCCBuiltin<"__builtin_ia32_rdsspq">,
+              Intrinsic<[llvm_i64_ty], [llvm_i64_ty], []>;
+  def int_x86_saveprevssp : GCCBuiltin<"__builtin_ia32_saveprevssp">,
+              Intrinsic<[], [], []>;
+  def int_x86_rstorssp : GCCBuiltin<"__builtin_ia32_rstorssp">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_wrssd : GCCBuiltin<"__builtin_ia32_wrssd">,
+              Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
+  def int_x86_wrssq : GCCBuiltin<"__builtin_ia32_wrssq">,
+              Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], []>;
+  def int_x86_wrussd : GCCBuiltin<"__builtin_ia32_wrussd">,
+              Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
+  def int_x86_wrussq : GCCBuiltin<"__builtin_ia32_wrussq">,
+              Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], []>;
+  def int_x86_setssbsy : GCCBuiltin<"__builtin_ia32_setssbsy">,
+              Intrinsic<[], [], []>;
+  def int_x86_clrssbsy : GCCBuiltin<"__builtin_ia32_clrssbsy">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// 3DNow!
+
+let TargetPrefix = "x86" in {
+  def int_x86_3dnow_pavgusb : GCCBuiltin<"__builtin_ia32_pavgusb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pf2id : GCCBuiltin<"__builtin_ia32_pf2id">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_3dnow_pfacc : GCCBuiltin<"__builtin_ia32_pfacc">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfadd : GCCBuiltin<"__builtin_ia32_pfadd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfcmpeq : GCCBuiltin<"__builtin_ia32_pfcmpeq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfcmpge : GCCBuiltin<"__builtin_ia32_pfcmpge">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfcmpgt : GCCBuiltin<"__builtin_ia32_pfcmpgt">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfmax : GCCBuiltin<"__builtin_ia32_pfmax">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfmin : GCCBuiltin<"__builtin_ia32_pfmin">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfmul : GCCBuiltin<"__builtin_ia32_pfmul">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfrcp : GCCBuiltin<"__builtin_ia32_pfrcp">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_3dnow_pfrcpit1 : GCCBuiltin<"__builtin_ia32_pfrcpit1">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfrcpit2 : GCCBuiltin<"__builtin_ia32_pfrcpit2">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfrsqrt : GCCBuiltin<"__builtin_ia32_pfrsqrt">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_3dnow_pfrsqit1 : GCCBuiltin<"__builtin_ia32_pfrsqit1">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfsub : GCCBuiltin<"__builtin_ia32_pfsub">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pfsubr : GCCBuiltin<"__builtin_ia32_pfsubr">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnow_pi2fd : GCCBuiltin<"__builtin_ia32_pi2fd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_3dnow_pmulhrw : GCCBuiltin<"__builtin_ia32_pmulhrw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// 3DNow! extensions
+
+let TargetPrefix = "x86" in {
+  def int_x86_3dnowa_pf2iw : GCCBuiltin<"__builtin_ia32_pf2iw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_3dnowa_pfnacc : GCCBuiltin<"__builtin_ia32_pfnacc">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnowa_pfpnacc : GCCBuiltin<"__builtin_ia32_pfpnacc">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_3dnowa_pi2fw : GCCBuiltin<"__builtin_ia32_pi2fw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_3dnowa_pswapd :
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE1
+
+// Arithmetic ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse_sqrt_ss : GCCBuiltin<"__builtin_ia32_sqrtss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse_sqrt_ps : GCCBuiltin<"__builtin_ia32_sqrtps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse_rcp_ss : GCCBuiltin<"__builtin_ia32_rcpss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse_rcp_ps : GCCBuiltin<"__builtin_ia32_rcpps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse_rsqrt_ss : GCCBuiltin<"__builtin_ia32_rsqrtss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse_rsqrt_ps : GCCBuiltin<"__builtin_ia32_rsqrtps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse_min_ss : GCCBuiltin<"__builtin_ia32_minss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_min_ps : GCCBuiltin<"__builtin_ia32_minps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_max_ss : GCCBuiltin<"__builtin_ia32_maxss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_max_ps : GCCBuiltin<"__builtin_ia32_maxps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+// Comparison ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse_cmp_ss : GCCBuiltin<"__builtin_ia32_cmpss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_sse_cmp_ps :
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_sse_comieq_ss : GCCBuiltin<"__builtin_ia32_comieq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_comilt_ss : GCCBuiltin<"__builtin_ia32_comilt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_comile_ss : GCCBuiltin<"__builtin_ia32_comile">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_comigt_ss : GCCBuiltin<"__builtin_ia32_comigt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_comige_ss : GCCBuiltin<"__builtin_ia32_comige">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_comineq_ss : GCCBuiltin<"__builtin_ia32_comineq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_ucomieq_ss : GCCBuiltin<"__builtin_ia32_ucomieq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_ucomilt_ss : GCCBuiltin<"__builtin_ia32_ucomilt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_ucomile_ss : GCCBuiltin<"__builtin_ia32_ucomile">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_ucomigt_ss : GCCBuiltin<"__builtin_ia32_ucomigt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_ucomige_ss : GCCBuiltin<"__builtin_ia32_ucomige">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_ucomineq_ss : GCCBuiltin<"__builtin_ia32_ucomineq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+
+// Conversion ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse_cvtss2si : GCCBuiltin<"__builtin_ia32_cvtss2si">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvtss2si64 : GCCBuiltin<"__builtin_ia32_cvtss2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvttss2si : GCCBuiltin<"__builtin_ia32_cvttss2si">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvttss2si64 : GCCBuiltin<"__builtin_ia32_cvttss2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvtsi2ss : // TODO: Remove this intrinsic.
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvtsi642ss : // TODO: Remove this intrinsic.
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i64_ty], [IntrNoMem]>;
+
+  def int_x86_sse_cvtps2pi : GCCBuiltin<"__builtin_ia32_cvtps2pi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvttps2pi: GCCBuiltin<"__builtin_ia32_cvttps2pi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvtpi2ps : GCCBuiltin<"__builtin_ia32_cvtpi2ps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// Cacheability support ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse_sfence : GCCBuiltin<"__builtin_ia32_sfence">,
+              Intrinsic<[], [], []>;
+}
+
+// Control register.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse_stmxcsr :
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_sse_ldmxcsr :
+              Intrinsic<[], [llvm_ptr_ty], []>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse_movmsk_ps : GCCBuiltin<"__builtin_ia32_movmskps">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE2
+
+// FP arithmetic ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse2_sqrt_sd : GCCBuiltin<"__builtin_ia32_sqrtsd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse2_sqrt_pd : GCCBuiltin<"__builtin_ia32_sqrtpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_sse2_min_sd : GCCBuiltin<"__builtin_ia32_minsd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_min_pd : GCCBuiltin<"__builtin_ia32_minpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_max_sd : GCCBuiltin<"__builtin_ia32_maxsd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_max_pd : GCCBuiltin<"__builtin_ia32_maxpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// FP comparison ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse2_cmp_sd : GCCBuiltin<"__builtin_ia32_cmpsd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_sse2_cmp_pd :
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_sse2_comieq_sd : GCCBuiltin<"__builtin_ia32_comisdeq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_comilt_sd : GCCBuiltin<"__builtin_ia32_comisdlt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_comile_sd : GCCBuiltin<"__builtin_ia32_comisdle">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_comigt_sd : GCCBuiltin<"__builtin_ia32_comisdgt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_comige_sd : GCCBuiltin<"__builtin_ia32_comisdge">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_comineq_sd : GCCBuiltin<"__builtin_ia32_comisdneq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_ucomieq_sd : GCCBuiltin<"__builtin_ia32_ucomisdeq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_ucomilt_sd : GCCBuiltin<"__builtin_ia32_ucomisdlt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_ucomile_sd : GCCBuiltin<"__builtin_ia32_ucomisdle">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_ucomigt_sd : GCCBuiltin<"__builtin_ia32_ucomisdgt">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_ucomige_sd : GCCBuiltin<"__builtin_ia32_ucomisdge">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_ucomineq_sd : GCCBuiltin<"__builtin_ia32_ucomisdneq">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_sse2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_sse2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_sse2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse2_psll_w : GCCBuiltin<"__builtin_ia32_psllw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_psll_d : GCCBuiltin<"__builtin_ia32_pslld128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psll_q : GCCBuiltin<"__builtin_ia32_psllq128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrl_d : GCCBuiltin<"__builtin_ia32_psrld128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_sse2_psra_w : GCCBuiltin<"__builtin_ia32_psraw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_psra_d : GCCBuiltin<"__builtin_ia32_psrad128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+
+  def int_x86_sse2_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_psrai_d : GCCBuiltin<"__builtin_ia32_psradi128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Conversion ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse2_cvtdq2ps : GCCBuiltin<"__builtin_ia32_cvtdq2ps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtpd2dq : GCCBuiltin<"__builtin_ia32_cvtpd2dq">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvttpd2dq : GCCBuiltin<"__builtin_ia32_cvttpd2dq">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtpd2ps : GCCBuiltin<"__builtin_ia32_cvtpd2ps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtps2dq : GCCBuiltin<"__builtin_ia32_cvtps2dq">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvttps2dq : GCCBuiltin<"__builtin_ia32_cvttps2dq">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtsd2si : GCCBuiltin<"__builtin_ia32_cvtsd2si">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtsd2si64 : GCCBuiltin<"__builtin_ia32_cvtsd2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvttsd2si : GCCBuiltin<"__builtin_ia32_cvttsd2si">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_cvttsd2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtsi2sd : // TODO: Remove this intrinsic.
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtsi642sd : // TODO: Remove this intrinsic.
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtsd2ss : GCCBuiltin<"__builtin_ia32_cvtsd2ss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_cvtss2sd : // TODO: Remove this intrinsic.
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse_cvttpd2pi: GCCBuiltin<"__builtin_ia32_cvttpd2pi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse_cvtpi2pd : GCCBuiltin<"__builtin_ia32_cvtpi2pd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse2_packsswb_128 : GCCBuiltin<"__builtin_ia32_packsswb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_packssdw_128 : GCCBuiltin<"__builtin_ia32_packssdw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sse2_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_sse2_movmsk_pd : GCCBuiltin<"__builtin_ia32_movmskpd">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">,
+              Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">,
+              Intrinsic<[], [llvm_v16i8_ty,
+                         llvm_v16i8_ty, llvm_ptr_ty], []>;
+  def int_x86_sse2_clflush : GCCBuiltin<"__builtin_ia32_clflush">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_sse2_lfence : GCCBuiltin<"__builtin_ia32_lfence">,
+              Intrinsic<[], [], []>;
+  def int_x86_sse2_mfence : GCCBuiltin<"__builtin_ia32_mfence">,
+              Intrinsic<[], [], []>;
+  def int_x86_sse2_pause : GCCBuiltin<"__builtin_ia32_pause">,
+              Intrinsic<[], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE3
+
+// Addition / subtraction ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse3_addsub_ps : GCCBuiltin<"__builtin_ia32_addsubps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse3_addsub_pd : GCCBuiltin<"__builtin_ia32_addsubpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// Horizontal ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse3_hadd_ps : GCCBuiltin<"__builtin_ia32_haddps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse3_hadd_pd : GCCBuiltin<"__builtin_ia32_haddpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_sse3_hsub_ps : GCCBuiltin<"__builtin_ia32_hsubps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_sse3_hsub_pd : GCCBuiltin<"__builtin_ia32_hsubpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// Specialized unaligned load.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse3_ldu_dq : GCCBuiltin<"__builtin_ia32_lddqu">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// Thread synchronization ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse3_monitor : GCCBuiltin<"__builtin_ia32_monitor">,
+              Intrinsic<[], [llvm_ptr_ty,
+                         llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_sse3_mwait : GCCBuiltin<"__builtin_ia32_mwait">,
+              Intrinsic<[], [llvm_i32_ty,
+                         llvm_i32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSSE3
+
+// Horizontal arithmetic ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_ssse3_phadd_w         : GCCBuiltin<"__builtin_ia32_phaddw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_phadd_w_128     : GCCBuiltin<"__builtin_ia32_phaddw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_phadd_d         : GCCBuiltin<"__builtin_ia32_phaddd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_phadd_d_128     : GCCBuiltin<"__builtin_ia32_phaddd128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_phadd_sw        : GCCBuiltin<"__builtin_ia32_phaddsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_phadd_sw_128    : GCCBuiltin<"__builtin_ia32_phaddsw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_phsub_w         : GCCBuiltin<"__builtin_ia32_phsubw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_phsub_w_128     : GCCBuiltin<"__builtin_ia32_phsubw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_phsub_d         : GCCBuiltin<"__builtin_ia32_phsubd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_phsub_d_128     : GCCBuiltin<"__builtin_ia32_phsubd128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_phsub_sw        : GCCBuiltin<"__builtin_ia32_phsubsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_phsub_sw_128    : GCCBuiltin<"__builtin_ia32_phsubsw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_pmadd_ub_sw     : GCCBuiltin<"__builtin_ia32_pmaddubsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem]>;
+}
+
+// Packed multiply high with round and scale
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_ssse3_pmul_hr_sw      : GCCBuiltin<"__builtin_ia32_pmulhrsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_ssse3_pmul_hr_sw_128  : GCCBuiltin<"__builtin_ia32_pmulhrsw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+}
+
+// Shuffle ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_ssse3_pshuf_b         : GCCBuiltin<"__builtin_ia32_pshufb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_pshuf_b_128     : GCCBuiltin<"__builtin_ia32_pshufb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_sse_pshuf_w           : GCCBuiltin<"__builtin_ia32_pshufw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i8_ty],
+                         [IntrNoMem]>;
+}
+
+// Sign ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_ssse3_psign_b         : GCCBuiltin<"__builtin_ia32_psignb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_psign_b_128     : GCCBuiltin<"__builtin_ia32_psignb128">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                         llvm_v16i8_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_psign_w         : GCCBuiltin<"__builtin_ia32_psignw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_psign_w_128     : GCCBuiltin<"__builtin_ia32_psignw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_psign_d         : GCCBuiltin<"__builtin_ia32_psignd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_ssse3_psign_d_128     : GCCBuiltin<"__builtin_ia32_psignd128">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+}
+
+// Absolute value ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_ssse3_pabs_b     : GCCBuiltin<"__builtin_ia32_pabsb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_pabs_w     : GCCBuiltin<"__builtin_ia32_pabsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_ssse3_pabs_d     : GCCBuiltin<"__builtin_ia32_pabsd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4.1
+
+// FP rounding ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_round_ss        : GCCBuiltin<"__builtin_ia32_roundss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse41_round_ps        : GCCBuiltin<"__builtin_ia32_roundps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse41_round_sd        : GCCBuiltin<"__builtin_ia32_roundsd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_sse41_round_pd        : GCCBuiltin<"__builtin_ia32_roundpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector min element
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_phminposuw     : GCCBuiltin<"__builtin_ia32_phminposuw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty],
+                        [IntrNoMem]>;
+}
+
+// Advanced Encryption Standard (AES) Instructions
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_aesni_aesimc          : GCCBuiltin<"__builtin_ia32_aesimc128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_aesni_aesenc          : GCCBuiltin<"__builtin_ia32_aesenc128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_aesni_aesenc_256      : GCCBuiltin<"__builtin_ia32_aesenc256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_aesni_aesenc_512      : GCCBuiltin<"__builtin_ia32_aesenc512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_aesni_aesenclast : GCCBuiltin<"__builtin_ia32_aesenclast128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_aesni_aesenclast_256 :
+    GCCBuiltin<"__builtin_ia32_aesenclast256">,
+    Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+              [IntrNoMem]>;
+  def int_x86_aesni_aesenclast_512 :
+    GCCBuiltin<"__builtin_ia32_aesenclast512">,
+    Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+              [IntrNoMem]>;
+
+  def int_x86_aesni_aesdec          : GCCBuiltin<"__builtin_ia32_aesdec128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_aesni_aesdec_256      : GCCBuiltin<"__builtin_ia32_aesdec256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_aesni_aesdec_512      : GCCBuiltin<"__builtin_ia32_aesdec512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_aesni_aesdeclast : GCCBuiltin<"__builtin_ia32_aesdeclast128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_aesni_aesdeclast_256 :
+    GCCBuiltin<"__builtin_ia32_aesdeclast256">,
+    Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+              [IntrNoMem]>;
+  def int_x86_aesni_aesdeclast_512 :
+    GCCBuiltin<"__builtin_ia32_aesdeclast512">,
+    Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+              [IntrNoMem]>;
+
+  def int_x86_aesni_aeskeygenassist :
+              GCCBuiltin<"__builtin_ia32_aeskeygenassist128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+}
+
+// PCLMUL instructions
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+  def int_x86_pclmulqdq : GCCBuiltin<"__builtin_ia32_pclmulqdq128">,
+          Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_pclmulqdq_256 : GCCBuiltin<"__builtin_ia32_pclmulqdq256">,
+          Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_pclmulqdq_512 : GCCBuiltin<"__builtin_ia32_pclmulqdq512">,
+          Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+}
+
+// Vector pack
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_packusdw        : GCCBuiltin<"__builtin_ia32_packusdw128">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+}
+
+// Vector multiply
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_pmuldq          : GCCBuiltin<"__builtin_ia32_pmuldq128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+}
+
+// Vector insert
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_insertps       : GCCBuiltin<"__builtin_ia32_insertps128">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_pblendvb         : GCCBuiltin<"__builtin_ia32_pblendvb128">,
+        Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_v16i8_ty],
+                  [IntrNoMem]>;
+  def int_x86_sse41_blendvpd         : GCCBuiltin<"__builtin_ia32_blendvpd">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,llvm_v2f64_ty],
+                  [IntrNoMem]>;
+  def int_x86_sse41_blendvps         : GCCBuiltin<"__builtin_ia32_blendvps">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,llvm_v4f32_ty],
+                  [IntrNoMem]>;
+}
+
+// Vector dot product
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_dppd            : GCCBuiltin<"__builtin_ia32_dppd">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+                    [IntrNoMem, Commutative]>;
+  def int_x86_sse41_dpps            : GCCBuiltin<"__builtin_ia32_dpps">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+                    [IntrNoMem, Commutative]>;
+}
+
+// Vector sum of absolute differences
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_mpsadbw         : GCCBuiltin<"__builtin_ia32_mpsadbw128">,
+          Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i8_ty],
+                    [IntrNoMem, Commutative]>;
+}
+
+// Test instruction with bitwise comparison.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+  def int_x86_sse41_ptestz          : GCCBuiltin<"__builtin_ia32_ptestz128">,
+          Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                    [IntrNoMem]>;
+  def int_x86_sse41_ptestc          : GCCBuiltin<"__builtin_ia32_ptestc128">,
+          Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                    [IntrNoMem]>;
+  def int_x86_sse41_ptestnzc        : GCCBuiltin<"__builtin_ia32_ptestnzc128">,
+          Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                    [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4.2
+
+// Miscellaneous
+// CRC Instruction
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+  def int_x86_sse42_crc32_32_8       : GCCBuiltin<"__builtin_ia32_crc32qi">,
+          Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_sse42_crc32_32_16      : GCCBuiltin<"__builtin_ia32_crc32hi">,
+          Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_sse42_crc32_32_32      : GCCBuiltin<"__builtin_ia32_crc32si">,
+          Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                    [IntrNoMem]>;
+  def int_x86_sse42_crc32_64_64      : GCCBuiltin<"__builtin_ia32_crc32di">,
+          Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                    [IntrNoMem]>;
+}
+
+// String/text processing ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+  def int_x86_sse42_pcmpistrm128  : GCCBuiltin<"__builtin_ia32_pcmpistrm128">,
+    Intrinsic<[llvm_v16i8_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpistri128  : GCCBuiltin<"__builtin_ia32_pcmpistri128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpistria128 : GCCBuiltin<"__builtin_ia32_pcmpistria128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpistric128 : GCCBuiltin<"__builtin_ia32_pcmpistric128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpistrio128 : GCCBuiltin<"__builtin_ia32_pcmpistrio128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpistris128 : GCCBuiltin<"__builtin_ia32_pcmpistris128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpistriz128 : GCCBuiltin<"__builtin_ia32_pcmpistriz128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestrm128  : GCCBuiltin<"__builtin_ia32_pcmpestrm128">,
+    Intrinsic<[llvm_v16i8_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestri128  : GCCBuiltin<"__builtin_ia32_pcmpestri128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestria128 : GCCBuiltin<"__builtin_ia32_pcmpestria128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestric128 : GCCBuiltin<"__builtin_ia32_pcmpestric128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestrio128 : GCCBuiltin<"__builtin_ia32_pcmpestrio128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestris128 : GCCBuiltin<"__builtin_ia32_pcmpestris128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+  def int_x86_sse42_pcmpestriz128 : GCCBuiltin<"__builtin_ia32_pcmpestriz128">,
+    Intrinsic<[llvm_i32_ty],
+        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+         llvm_i8_ty],
+        [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4A
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_sse4a_extrqi : GCCBuiltin<"__builtin_ia32_extrqi">,
+    Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty, llvm_i8_ty],
+              [IntrNoMem]>;
+  def int_x86_sse4a_extrq  : GCCBuiltin<"__builtin_ia32_extrq">,
+    Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+  def int_x86_sse4a_insertqi : GCCBuiltin<"__builtin_ia32_insertqi">,
+    Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                                llvm_i8_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_sse4a_insertq  : GCCBuiltin<"__builtin_ia32_insertq">,
+    Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX
+
+// Arithmetic ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_addsub_pd_256 : GCCBuiltin<"__builtin_ia32_addsubpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_addsub_ps_256 : GCCBuiltin<"__builtin_ia32_addsubps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_max_pd_256 : GCCBuiltin<"__builtin_ia32_maxpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_max_ps_256 : GCCBuiltin<"__builtin_ia32_maxps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_min_pd_256 : GCCBuiltin<"__builtin_ia32_minpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_min_ps_256 : GCCBuiltin<"__builtin_ia32_minps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+
+  def int_x86_avx_sqrt_pd_256 : GCCBuiltin<"__builtin_ia32_sqrtpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_sqrt_ps_256 : GCCBuiltin<"__builtin_ia32_sqrtps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+  def int_x86_avx_rsqrt_ps_256 : GCCBuiltin<"__builtin_ia32_rsqrtps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+  def int_x86_avx_rcp_ps_256 : GCCBuiltin<"__builtin_ia32_rcpps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+  def int_x86_avx_round_pd_256 : GCCBuiltin<"__builtin_ia32_roundpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx_round_ps_256 : GCCBuiltin<"__builtin_ia32_roundps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Horizontal ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_hadd_pd_256 : GCCBuiltin<"__builtin_ia32_haddpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_hsub_ps_256 : GCCBuiltin<"__builtin_ia32_hsubps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_hsub_pd_256 : GCCBuiltin<"__builtin_ia32_hsubpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_hadd_ps_256 : GCCBuiltin<"__builtin_ia32_haddps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector permutation
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_vpermilvar_pd : GCCBuiltin<"__builtin_ia32_vpermilvarpd">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                  llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_avx_vpermilvar_ps : GCCBuiltin<"__builtin_ia32_vpermilvarps">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                  llvm_v4i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx_vpermilvar_pd_256 :
+        GCCBuiltin<"__builtin_ia32_vpermilvarpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4i64_ty], [IntrNoMem]>;
+  def int_x86_avx_vpermilvar_ps_256 :
+        GCCBuiltin<"__builtin_ia32_vpermilvarps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_d_128 :
+       GCCBuiltin<"__builtin_ia32_vpermi2vard128_mask">,
+        Intrinsic<[llvm_v4i32_ty],
+        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+        [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2vard256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2vard512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_hi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varhi128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+          [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_hi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varhi256_mask">,
+          Intrinsic<[llvm_v16i16_ty],
+          [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_hi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varhi512_mask">,
+          Intrinsic<[llvm_v32i16_ty],
+          [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_pd_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_pd_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_pd_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_ps_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_ps_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_ps_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_d_512:
+        GCCBuiltin<"__builtin_ia32_vpermt2vard512_mask">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                  llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_q_512:
+        GCCBuiltin<"__builtin_ia32_vpermt2varq512_mask">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                  llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_ps_512:
+        GCCBuiltin<"__builtin_ia32_vpermt2varps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty,
+                  llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_pd_512:
+        GCCBuiltin<"__builtin_ia32_vpermt2varpd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8i64_ty,
+                  llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2vard128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2vard128_maskz">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2vard256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2vard256_maskz">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2vard512_maskz">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_hi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varhi128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+          [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_hi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varhi128_maskz">,
+          Intrinsic<[llvm_v8i16_ty],
+          [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_hi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varhi256_mask">,
+          Intrinsic<[llvm_v16i16_ty],
+          [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_hi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varhi256_maskz">,
+          Intrinsic<[llvm_v16i16_ty],
+          [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_hi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varhi512_mask">,
+          Intrinsic<[llvm_v32i16_ty],
+          [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_hi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varhi512_maskz">,
+          Intrinsic<[llvm_v32i16_ty],
+          [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_pd_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2i64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_pd_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varpd128_maskz">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2i64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_pd_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4i64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_pd_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varpd256_maskz">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4i64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_pd_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varpd512_maskz">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8i64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_ps_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4i32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_ps_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varps128_maskz">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4i32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_ps_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8i32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_ps_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varps256_maskz">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8i32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_ps_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varps512_maskz">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16i32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varq128_maskz">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varq256_maskz">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varq512_maskz">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_qi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varqi128_mask">,
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+          llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_qi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varqi128_mask">,
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+          llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_qi_128 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varqi128_maskz">,
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+          llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_qi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varqi256_mask">,
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+          llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_qi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varqi256_mask">,
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+          llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_qi_256 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varqi256_maskz">,
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+          llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermi2var_qi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermi2varqi512_mask">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
+          llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpermt2var_qi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varqi512_mask">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
+          llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vpermt2var_qi_512 :
+        GCCBuiltin<"__builtin_ia32_vpermt2varqi512_maskz">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
+          llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_vpermilvar_pd_512 :
+        GCCBuiltin<"__builtin_ia32_vpermilvarpd512">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8i64_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_vpermilvar_ps_512 :
+        GCCBuiltin<"__builtin_ia32_vpermilvarps512">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_pshuf_b_512 :
+        GCCBuiltin<"__builtin_ia32_pshufb512">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
+          [IntrNoMem]>;
+
+}
+
+// GFNI Instructions
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_vgf2p8affineinvqb_128 :
+         GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v16qi">,
+          Intrinsic<[llvm_v16i8_ty],
+          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_vgf2p8affineinvqb_256 :
+         GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v32qi">,
+          Intrinsic<[llvm_v32i8_ty],
+          [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_vgf2p8affineinvqb_512 :
+         GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v64qi">,
+          Intrinsic<[llvm_v64i8_ty],
+          [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_vgf2p8affineqb_128 :
+         GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v16qi">,
+          Intrinsic<[llvm_v16i8_ty],
+          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_vgf2p8affineqb_256 :
+         GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v32qi">,
+          Intrinsic<[llvm_v32i8_ty],
+          [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_vgf2p8affineqb_512 :
+         GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v64qi">,
+          Intrinsic<[llvm_v64i8_ty],
+          [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_vgf2p8mulb_128     :
+         GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v16qi">,
+          Intrinsic<[llvm_v16i8_ty],
+          [llvm_v16i8_ty, llvm_v16i8_ty],
+          [IntrNoMem]>;
+  def int_x86_vgf2p8mulb_256     :
+         GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v32qi">,
+          Intrinsic<[llvm_v32i8_ty],
+          [llvm_v32i8_ty, llvm_v32i8_ty],
+          [IntrNoMem]>;
+  def int_x86_vgf2p8mulb_512     :
+         GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v64qi">,
+          Intrinsic<[llvm_v64i8_ty],
+          [llvm_v64i8_ty, llvm_v64i8_ty],
+          [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_blendv_pd_256 : GCCBuiltin<"__builtin_ia32_blendvpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty, llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_blendv_ps_256 : GCCBuiltin<"__builtin_ia32_blendvps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty, llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector dot product
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_dp_ps_256 : GCCBuiltin<"__builtin_ia32_dpps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem, Commutative]>;
+}
+
+// Vector compare
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_cmp_pd_256 :
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx_cmp_ps_256 :
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector convert
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_cvtdq2_ps_256 : GCCBuiltin<"__builtin_ia32_cvtdq2ps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+  def int_x86_avx_cvt_pd2_ps_256 : GCCBuiltin<"__builtin_ia32_cvtpd2ps256">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_cvt_ps2dq_256 : GCCBuiltin<"__builtin_ia32_cvtps2dq256">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_cvtt_pd2dq_256 : GCCBuiltin<"__builtin_ia32_cvttpd2dq256">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_cvt_pd2dq_256 : GCCBuiltin<"__builtin_ia32_cvtpd2dq256">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_cvtt_ps2dq_256 : GCCBuiltin<"__builtin_ia32_cvttps2dq256">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector bit test
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_vtestz_pd : GCCBuiltin<"__builtin_ia32_vtestzpd">,
+        Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                  llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestc_pd : GCCBuiltin<"__builtin_ia32_vtestcpd">,
+        Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                  llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestnzc_pd : GCCBuiltin<"__builtin_ia32_vtestnzcpd">,
+        Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                  llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestz_ps : GCCBuiltin<"__builtin_ia32_vtestzps">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                  llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestc_ps : GCCBuiltin<"__builtin_ia32_vtestcps">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                  llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestnzc_ps : GCCBuiltin<"__builtin_ia32_vtestnzcps">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                  llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestz_pd_256 : GCCBuiltin<"__builtin_ia32_vtestzpd256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestc_pd_256 : GCCBuiltin<"__builtin_ia32_vtestcpd256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestnzc_pd_256 : GCCBuiltin<"__builtin_ia32_vtestnzcpd256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+                  llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestz_ps_256 : GCCBuiltin<"__builtin_ia32_vtestzps256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestc_ps_256 : GCCBuiltin<"__builtin_ia32_vtestcps256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_vtestnzc_ps_256 : GCCBuiltin<"__builtin_ia32_vtestnzcps256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+                  llvm_v8f32_ty], [IntrNoMem]>;
+  def int_x86_avx_ptestz_256 : GCCBuiltin<"__builtin_ia32_ptestz256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+                  llvm_v4i64_ty], [IntrNoMem]>;
+  def int_x86_avx_ptestc_256 : GCCBuiltin<"__builtin_ia32_ptestc256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+                  llvm_v4i64_ty], [IntrNoMem]>;
+  def int_x86_avx_ptestnzc_256 : GCCBuiltin<"__builtin_ia32_ptestnzc256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+                  llvm_v4i64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_fpclass_pd_128 :
+         GCCBuiltin<"__builtin_ia32_fpclasspd128_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_pd_256 :
+         GCCBuiltin<"__builtin_ia32_fpclasspd256_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v4f64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_pd_512 :
+         GCCBuiltin<"__builtin_ia32_fpclasspd512_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_ps_128 :
+         GCCBuiltin<"__builtin_ia32_fpclassps128_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_ps_256 :
+         GCCBuiltin<"__builtin_ia32_fpclassps256_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v8f32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_ps_512 :
+         GCCBuiltin<"__builtin_ia32_fpclassps512_mask">,
+          Intrinsic<[llvm_i16_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_i16_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_sd :
+         GCCBuiltin<"__builtin_ia32_fpclasssd_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fpclass_ss :
+         GCCBuiltin<"__builtin_ia32_fpclassss_mask">,
+          Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+}
+
+// Vector extract sign mask
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_movmsk_pd_256 : GCCBuiltin<"__builtin_ia32_movmskpd256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_avx_movmsk_ps_256 : GCCBuiltin<"__builtin_ia32_movmskps256">,
+        Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector zero
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_vzeroall : GCCBuiltin<"__builtin_ia32_vzeroall">,
+        Intrinsic<[], [], []>;
+  def int_x86_avx_vzeroupper : GCCBuiltin<"__builtin_ia32_vzeroupper">,
+        Intrinsic<[], [], []>;
+}
+
+// SIMD load ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">,
+        Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// Conditional load ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+}
+
+// Conditional store ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
+        Intrinsic<[], [llvm_ptr_ty,
+                  llvm_v2i64_ty, llvm_v2f64_ty], [IntrArgMemOnly]>;
+  def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">,
+        Intrinsic<[], [llvm_ptr_ty,
+                  llvm_v4i32_ty, llvm_v4f32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx_maskstore_pd_256 :
+        GCCBuiltin<"__builtin_ia32_maskstorepd256">,
+        Intrinsic<[], [llvm_ptr_ty,
+                  llvm_v4i64_ty, llvm_v4f64_ty], [IntrArgMemOnly]>;
+  def int_x86_avx_maskstore_ps_256 :
+        GCCBuiltin<"__builtin_ia32_maskstoreps256">,
+        Intrinsic<[], [llvm_ptr_ty,
+                  llvm_v8i32_ty, llvm_v8f32_ty], [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_store_ss :
+        GCCBuiltin<"__builtin_ia32_storess_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty, llvm_i8_ty],
+                  [IntrArgMemOnly]>;
+}
+
+// BITALG bits shuffle
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_mask_vpshufbitqmb_128 :
+    GCCBuiltin<"__builtin_ia32_vpshufbitqmb128_mask">,
+    Intrinsic<[llvm_i16_ty],
+              [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+              [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshufbitqmb_256 :
+    GCCBuiltin<"__builtin_ia32_vpshufbitqmb256_mask">,
+    Intrinsic<[llvm_i32_ty],
+              [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshufbitqmb_512 :
+    GCCBuiltin<"__builtin_ia32_vpshufbitqmb512_mask">,
+    Intrinsic<[llvm_i64_ty],
+              [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+              [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX2
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_pmul_dq : GCCBuiltin<"__builtin_ia32_pmuldq256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_psll_w : GCCBuiltin<"__builtin_ia32_psllw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_psll_d : GCCBuiltin<"__builtin_ia32_pslld256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psll_q : GCCBuiltin<"__builtin_ia32_psllq256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrl_d : GCCBuiltin<"__builtin_ia32_psrld256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_avx2_psra_w : GCCBuiltin<"__builtin_ia32_psraw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_psra_d : GCCBuiltin<"__builtin_ia32_psrad256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx2_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_psrai_d : GCCBuiltin<"__builtin_ia32_psradi256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_psra_q_128 : GCCBuiltin<"__builtin_ia32_psraq128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_psra_q_256 : GCCBuiltin<"__builtin_ia32_psraq256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_psrai_q_128 : GCCBuiltin<"__builtin_ia32_psraqi128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrai_q_256 : GCCBuiltin<"__builtin_ia32_psraqi256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_psll_w_512 : GCCBuiltin<"__builtin_ia32_psllw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_psll_d_512 : GCCBuiltin<"__builtin_ia32_pslld512">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psll_q_512 : GCCBuiltin<"__builtin_ia32_psllq512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrl_w_512 : GCCBuiltin<"__builtin_ia32_psrlw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrl_d_512 : GCCBuiltin<"__builtin_ia32_psrld512">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrl_q_512 : GCCBuiltin<"__builtin_ia32_psrlq512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_psra_w_512 : GCCBuiltin<"__builtin_ia32_psraw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_psra_d_512 : GCCBuiltin<"__builtin_ia32_psrad512">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psra_q_512 : GCCBuiltin<"__builtin_ia32_psraq512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_v2i64_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_pslli_w_512 : GCCBuiltin<"__builtin_ia32_psllwi512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_pslli_d_512 : GCCBuiltin<"__builtin_ia32_pslldi512">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_pslli_q_512 : GCCBuiltin<"__builtin_ia32_psllqi512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrli_w_512 : GCCBuiltin<"__builtin_ia32_psrlwi512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrli_d_512 : GCCBuiltin<"__builtin_ia32_psrldi512">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrli_q_512 : GCCBuiltin<"__builtin_ia32_psrlqi512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrai_w_512 : GCCBuiltin<"__builtin_ia32_psrawi512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrai_d_512 : GCCBuiltin<"__builtin_ia32_psradi512">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_psrai_q_512 : GCCBuiltin<"__builtin_ia32_psraqi512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_pmultishift_qb_128:
+        GCCBuiltin<"__builtin_ia32_vpmultishiftqb128_mask">,
+        Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                   llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pmultishift_qb_256:
+        GCCBuiltin<"__builtin_ia32_vpmultishiftqb256_mask">,
+        Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                   llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pmultishift_qb_512:
+        GCCBuiltin<"__builtin_ia32_vpmultishiftqb512_mask">,
+        Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
+                   llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+}
+
+// Pack ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_packsswb : GCCBuiltin<"__builtin_ia32_packsswb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_packssdw : GCCBuiltin<"__builtin_ia32_packssdw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_packuswb : GCCBuiltin<"__builtin_ia32_packuswb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_packusdw : GCCBuiltin<"__builtin_ia32_packusdw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Horizontal arithmetic ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem]>;
+  def int_x86_avx2_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem]>;
+}
+
+// Sign ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_psign_b : GCCBuiltin<"__builtin_ia32_psignb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx2_psign_w : GCCBuiltin<"__builtin_ia32_psignw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem]>;
+  def int_x86_avx2_psign_d : GCCBuiltin<"__builtin_ia32_psignd256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Packed multiply high with round and scale
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx512_pmul_hr_sw_512 : GCCBuiltin<"__builtin_ia32_pmulhrsw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_v32i16_ty], [IntrNoMem, Commutative]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_pblendvb : GCCBuiltin<"__builtin_ia32_pblendvb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem]>;
+}
+
+
+// Vector permutation
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_permd : GCCBuiltin<"__builtin_ia32_permvarsi256">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_permps : GCCBuiltin<"__builtin_ia32_permvarsf256">,
+              Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty],
+                        [IntrNoMem]>;
+}
+
+// Conditional load ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_maskload_d : GCCBuiltin<"__builtin_ia32_maskloadd">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_maskload_q : GCCBuiltin<"__builtin_ia32_maskloadq">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_maskload_d_256 : GCCBuiltin<"__builtin_ia32_maskloadd256">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
+                  [IntrReadMem, IntrArgMemOnly]>;
+}
+
+// Conditional store ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_maskstore_d : GCCBuiltin<"__builtin_ia32_maskstored">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                  [IntrArgMemOnly]>;
+  def int_x86_avx2_maskstore_q : GCCBuiltin<"__builtin_ia32_maskstoreq">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+                  [IntrArgMemOnly]>;
+  def int_x86_avx2_maskstore_d_256 :
+        GCCBuiltin<"__builtin_ia32_maskstored256">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty],
+                  [IntrArgMemOnly]>;
+  def int_x86_avx2_maskstore_q_256 :
+        GCCBuiltin<"__builtin_ia32_maskstoreq256">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty],
+                  [IntrArgMemOnly]>;
+}
+
+// Variable bit shift ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_psllv_d : GCCBuiltin<"__builtin_ia32_psllv4si">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psllv_d_256 : GCCBuiltin<"__builtin_ia32_psllv8si">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psllv_q : GCCBuiltin<"__builtin_ia32_psllv2di">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psllv_q_256 : GCCBuiltin<"__builtin_ia32_psllv4di">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_psllv_d_512 : GCCBuiltin<"__builtin_ia32_psllv16si">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psllv_q_512 : GCCBuiltin<"__builtin_ia32_psllv8di">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx2_psrlv_d : GCCBuiltin<"__builtin_ia32_psrlv4si">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psrlv_d_256 : GCCBuiltin<"__builtin_ia32_psrlv8si">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psrlv_q : GCCBuiltin<"__builtin_ia32_psrlv2di">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psrlv_q_256 : GCCBuiltin<"__builtin_ia32_psrlv4di">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_psrlv_d_512 : GCCBuiltin<"__builtin_ia32_psrlv16si">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrlv_q_512 : GCCBuiltin<"__builtin_ia32_psrlv8di">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx2_psrav_d : GCCBuiltin<"__builtin_ia32_psrav4si">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx2_psrav_d_256 : GCCBuiltin<"__builtin_ia32_psrav8si">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_psrav_d_512 : GCCBuiltin<"__builtin_ia32_psrav16si">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrav_q_128 : GCCBuiltin<"__builtin_ia32_psravq128">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrav_q_256 : GCCBuiltin<"__builtin_ia32_psravq256">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrav_q_512 : GCCBuiltin<"__builtin_ia32_psrav8di">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_psllv_w_128 : GCCBuiltin<"__builtin_ia32_psllv8hi">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psllv_w_256 : GCCBuiltin<"__builtin_ia32_psllv16hi">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psllv_w_512 : GCCBuiltin<"__builtin_ia32_psllv32hi">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_psrlv_w_128 : GCCBuiltin<"__builtin_ia32_psrlv8hi">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrlv_w_256 : GCCBuiltin<"__builtin_ia32_psrlv16hi">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrlv_w_512 : GCCBuiltin<"__builtin_ia32_psrlv32hi">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_psrav_w_128 : GCCBuiltin<"__builtin_ia32_psrav8hi">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrav_w_256 : GCCBuiltin<"__builtin_ia32_psrav16hi">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_psrav_w_512 : GCCBuiltin<"__builtin_ia32_psrav32hi">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_mask_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prorv_d_256 : GCCBuiltin<"__builtin_ia32_prorvd256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prorv_d_512 : GCCBuiltin<"__builtin_ia32_prorvd512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prorv_q_128 : GCCBuiltin<"__builtin_ia32_prorvq128_mask">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prorv_q_256 : GCCBuiltin<"__builtin_ia32_prorvq256_mask">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prorv_q_512 : GCCBuiltin<"__builtin_ia32_prorvq512_mask">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+   def int_x86_avx512_mask_prol_d_128 : GCCBuiltin<"__builtin_ia32_prold128_mask">,
+              Intrinsic<[llvm_v4i32_ty] , [llvm_v4i32_ty,
+                         llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prol_d_256 : GCCBuiltin<"__builtin_ia32_prold256_mask">,
+              Intrinsic<[llvm_v8i32_ty] , [llvm_v8i32_ty,
+                         llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prol_d_512 : GCCBuiltin<"__builtin_ia32_prold512_mask">,
+              Intrinsic<[llvm_v16i32_ty] , [llvm_v16i32_ty,
+                         llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prol_q_128 : GCCBuiltin<"__builtin_ia32_prolq128_mask">,
+              Intrinsic<[llvm_v2i64_ty] , [llvm_v2i64_ty,
+                         llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prol_q_256 : GCCBuiltin<"__builtin_ia32_prolq256_mask">,
+              Intrinsic<[llvm_v4i64_ty] , [llvm_v4i64_ty,
+                         llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prol_q_512 : GCCBuiltin<"__builtin_ia32_prolq512_mask">,
+              Intrinsic<[llvm_v8i64_ty] , [llvm_v8i64_ty,
+                         llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+
+  def int_x86_avx512_mask_prolv_d_128 : GCCBuiltin<"__builtin_ia32_prolvd128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prolv_d_256 : GCCBuiltin<"__builtin_ia32_prolvd256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prolv_d_512 : GCCBuiltin<"__builtin_ia32_prolvd512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prolv_q_128 : GCCBuiltin<"__builtin_ia32_prolvq128_mask">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prolv_q_256 : GCCBuiltin<"__builtin_ia32_prolvq256_mask">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_prolv_q_512 : GCCBuiltin<"__builtin_ia32_prolvq512_mask">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pror_d_128 : GCCBuiltin<"__builtin_ia32_prord128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+                         llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pror_d_256 : GCCBuiltin<"__builtin_ia32_prord256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                         llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pror_d_512 : GCCBuiltin<"__builtin_ia32_prord512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                         llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pror_q_128 : GCCBuiltin<"__builtin_ia32_prorq128_mask">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+                         llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pror_q_256 : GCCBuiltin<"__builtin_ia32_prorq256_mask">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                         llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_pror_q_512 : GCCBuiltin<"__builtin_ia32_prorq512_mask">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                         llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+}
+
+// Gather ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_gather_d_pd : GCCBuiltin<"__builtin_ia32_gatherd_pd">,
+      Intrinsic<[llvm_v2f64_ty],
+        [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_d_pd_256 : GCCBuiltin<"__builtin_ia32_gatherd_pd256">,
+      Intrinsic<[llvm_v4f64_ty],
+        [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_pd : GCCBuiltin<"__builtin_ia32_gatherq_pd">,
+      Intrinsic<[llvm_v2f64_ty],
+        [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_pd_256 : GCCBuiltin<"__builtin_ia32_gatherq_pd256">,
+      Intrinsic<[llvm_v4f64_ty],
+        [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_d_ps : GCCBuiltin<"__builtin_ia32_gatherd_ps">,
+      Intrinsic<[llvm_v4f32_ty],
+        [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_d_ps_256 : GCCBuiltin<"__builtin_ia32_gatherd_ps256">,
+      Intrinsic<[llvm_v8f32_ty],
+        [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_ps : GCCBuiltin<"__builtin_ia32_gatherq_ps">,
+      Intrinsic<[llvm_v4f32_ty],
+        [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_ps_256 : GCCBuiltin<"__builtin_ia32_gatherq_ps256">,
+      Intrinsic<[llvm_v4f32_ty],
+        [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx2_gather_d_q : GCCBuiltin<"__builtin_ia32_gatherd_q">,
+      Intrinsic<[llvm_v2i64_ty],
+        [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_d_q_256 : GCCBuiltin<"__builtin_ia32_gatherd_q256">,
+      Intrinsic<[llvm_v4i64_ty],
+        [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_q : GCCBuiltin<"__builtin_ia32_gatherq_q">,
+      Intrinsic<[llvm_v2i64_ty],
+        [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_q_256 : GCCBuiltin<"__builtin_ia32_gatherq_q256">,
+      Intrinsic<[llvm_v4i64_ty],
+        [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_d_d : GCCBuiltin<"__builtin_ia32_gatherd_d">,
+      Intrinsic<[llvm_v4i32_ty],
+        [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_d_d_256 : GCCBuiltin<"__builtin_ia32_gatherd_d256">,
+      Intrinsic<[llvm_v8i32_ty],
+        [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_d : GCCBuiltin<"__builtin_ia32_gatherq_d">,
+      Intrinsic<[llvm_v4i32_ty],
+        [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx2_gather_q_d_256 : GCCBuiltin<"__builtin_ia32_gatherq_d256">,
+      Intrinsic<[llvm_v4i32_ty],
+        [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+        [IntrReadMem, IntrArgMemOnly]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx2_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb256">,
+              Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx2_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb256">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                         llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx2_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw256">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                         llvm_i8_ty], [IntrNoMem, Commutative]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FMA3 and FMA4
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_fma_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss3">,
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd3">,
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma4_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss">,
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma4_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd">,
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">,
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">,
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">,
+              Intrinsic<[llvm_v8f32_ty],
+                        [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">,
+              Intrinsic<[llvm_v4f64_ty],
+                        [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_fma_vfmsub_ss : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsub_sd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsub_ps : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsub_pd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsub_ps_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v8f32_ty],
+                        [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsub_pd_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f64_ty],
+                        [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmadd_ss : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmadd_sd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmadd_ps : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmadd_pd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmadd_ps_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v8f32_ty],
+                        [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmadd_pd_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f64_ty],
+                        [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmsub_ss : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmsub_sd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmsub_ps : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmsub_pd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmsub_ps_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v8f32_ty],
+                        [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfnmsub_pd_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f64_ty],
+                        [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmaddsub_ps_256 :
+               GCCBuiltin<"__builtin_ia32_vfmaddsubps256">,
+              Intrinsic<[llvm_v8f32_ty],
+                        [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmaddsub_pd_256 :
+              GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">,
+              Intrinsic<[llvm_v4f64_ty],
+                        [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsubadd_ps : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f32_ty],
+                        [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsubadd_pd : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v2f64_ty],
+                        [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsubadd_ps_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v8f32_ty],
+                        [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+                        [IntrNoMem]>;
+  def int_x86_fma_vfmsubadd_pd_256 : // TODO: remove this intrinsic
+              Intrinsic<[llvm_v4f64_ty],
+                        [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd128_maskz">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask3">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd256_maskz">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask3">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddpd512_maskz">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps128_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps128_maskz">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps256_mask3">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps256_maskz">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps512_mask3">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddps512_maskz">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmaddsub_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmaddsub_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmaddsub_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_maskz">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmaddsub_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmaddsub_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask3">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmaddsub_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_maskz">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmaddsub_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmaddsub_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask3">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmaddsub_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_maskz">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmaddsub_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmaddsub_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmaddsub_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps128_maskz">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmaddsub_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmaddsub_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask3">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmaddsub_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps256_maskz">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmaddsub_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmaddsub_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask3">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmaddsub_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmaddsubps512_maskz">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+
+  def int_x86_avx512_mask_vfmadd_sd :
+         GCCBuiltin<"__builtin_ia32_vfmaddsd3_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfmadd_ss :
+         GCCBuiltin<"__builtin_ia32_vfmaddss3_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_sd :
+         GCCBuiltin<"__builtin_ia32_vfmaddsd3_maskz">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_vfmadd_ss :
+         GCCBuiltin<"__builtin_ia32_vfmaddss3_maskz">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_sd :
+         GCCBuiltin<"__builtin_ia32_vfmaddsd3_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmadd_ss :
+         GCCBuiltin<"__builtin_ia32_vfmaddss3_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_sd :
+         GCCBuiltin<"__builtin_ia32_vfmsubsd3_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_ss :
+         GCCBuiltin<"__builtin_ia32_vfmsubss3_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmsubpd128_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmsubpd256_mask3">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmsubpd512_mask3">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmsubps128_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmsubps256_mask3">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsub_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmsubps512_mask3">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsubadd_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfmsubaddpd128_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsubadd_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfmsubaddpd256_mask3">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsubadd_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfmsubaddpd512_mask3">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsubadd_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfmsubaddps128_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsubadd_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfmsubaddps256_mask3">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfmsubadd_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfmsubaddps512_mask3">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmadd_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfnmaddpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmadd_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfnmaddpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmadd_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfnmaddpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmadd_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfnmaddps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmadd_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfnmaddps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmadd_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfnmaddps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_sd :
+         GCCBuiltin<"__builtin_ia32_vfnmsubsd3_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_ss :
+         GCCBuiltin<"__builtin_ia32_vfnmsubss3_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmsub_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_pd_128 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask3">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmsub_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_pd_256 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask3">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmsub_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_pd_512 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask3">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,  llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmsub_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_ps_128 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask3">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmsub_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_ps_256 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask3">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vfnmsub_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask3_vfnmsub_ps_512 :
+         GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask3">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,  llvm_i16_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpmadd52h_uq_128 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52huq128_mask">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                         llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpmadd52h_uq_128 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52huq128_maskz">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                         llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpmadd52l_uq_128 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52luq128_mask">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                         llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpmadd52l_uq_128 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52luq128_maskz">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                         llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpmadd52h_uq_256 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52huq256_mask">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                         llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpmadd52h_uq_256 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52huq256_maskz">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                         llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpmadd52l_uq_256 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52luq256_mask">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                         llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpmadd52l_uq_256 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52luq256_maskz">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                         llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpmadd52h_uq_512 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52huq512_mask">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                         llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpmadd52h_uq_512 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52huq512_maskz">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                         llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpmadd52l_uq_512 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52luq512_mask">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                         llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpmadd52l_uq_512 :
+              GCCBuiltin<"__builtin_ia32_vpmadd52luq512_maskz">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                         llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// VNNI
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_mask_vpdpbusd_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusd128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpbusd_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusd128_maskz">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpbusd_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusd256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpbusd_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusd256_maskz">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpbusd_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusd512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpbusd_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusd512_maskz">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpdpbusds_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusds128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpbusds_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusds128_maskz">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpbusds_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusds256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpbusds_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusds256_maskz">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpbusds_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusds512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpbusds_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpbusds512_maskz">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpdpwssd_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssd128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpwssd_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssd128_maskz">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpwssd_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssd256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpwssd_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssd256_maskz">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpwssd_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssd512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpwssd_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssd512_maskz">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpdpwssds_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssds128_mask">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpwssds_128 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssds128_maskz">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpwssds_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssds256_mask">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpwssds_256 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssds256_maskz">,
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                         llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpdpwssds_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssds512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpdpwssds_512 :
+              GCCBuiltin<"__builtin_ia32_vpdpwssds512_maskz">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                         llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// XOP
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_xop_vpermil2pd : GCCBuiltin<"__builtin_ia32_vpermil2pd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                          llvm_v2i64_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_xop_vpermil2pd_256 :
+              GCCBuiltin<"__builtin_ia32_vpermil2pd256">,
+              Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                                          llvm_v4i64_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_xop_vpermil2ps : GCCBuiltin<"__builtin_ia32_vpermil2ps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                          llvm_v4i32_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpermil2ps_256 :
+              GCCBuiltin<"__builtin_ia32_vpermil2ps256">,
+              Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                                          llvm_v8i32_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_xop_vfrcz_pd : GCCBuiltin<"__builtin_ia32_vfrczpd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_xop_vfrcz_ps : GCCBuiltin<"__builtin_ia32_vfrczps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_xop_vfrcz_sd : GCCBuiltin<"__builtin_ia32_vfrczsd">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  def int_x86_xop_vfrcz_ss : GCCBuiltin<"__builtin_ia32_vfrczss">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+  def int_x86_xop_vfrcz_pd_256 : GCCBuiltin<"__builtin_ia32_vfrczpd256">,
+              Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+  def int_x86_xop_vfrcz_ps_256 : GCCBuiltin<"__builtin_ia32_vfrczps256">,
+              Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+  def int_x86_xop_vpcomb : GCCBuiltin<"__builtin_ia32_vpcomb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomw : GCCBuiltin<"__builtin_ia32_vpcomw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomd : GCCBuiltin<"__builtin_ia32_vpcomd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomq : GCCBuiltin<"__builtin_ia32_vpcomq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomub : GCCBuiltin<"__builtin_ia32_vpcomub">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomuw : GCCBuiltin<"__builtin_ia32_vpcomuw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomud : GCCBuiltin<"__builtin_ia32_vpcomud">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vpcomuq : GCCBuiltin<"__builtin_ia32_vpcomuq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                         llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_xop_vphaddbd :
+              GCCBuiltin<"__builtin_ia32_vphaddbd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddbq :
+              GCCBuiltin<"__builtin_ia32_vphaddbq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddbw :
+              GCCBuiltin<"__builtin_ia32_vphaddbw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphadddq :
+              GCCBuiltin<"__builtin_ia32_vphadddq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddubd :
+              GCCBuiltin<"__builtin_ia32_vphaddubd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddubq :
+              GCCBuiltin<"__builtin_ia32_vphaddubq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddubw :
+              GCCBuiltin<"__builtin_ia32_vphaddubw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddudq :
+              GCCBuiltin<"__builtin_ia32_vphaddudq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_xop_vphadduwd :
+              GCCBuiltin<"__builtin_ia32_vphadduwd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_xop_vphadduwq :
+              GCCBuiltin<"__builtin_ia32_vphadduwq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddwd :
+              GCCBuiltin<"__builtin_ia32_vphaddwd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_xop_vphaddwq :
+              GCCBuiltin<"__builtin_ia32_vphaddwq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_xop_vphsubbw :
+              GCCBuiltin<"__builtin_ia32_vphsubbw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_xop_vphsubdq :
+              GCCBuiltin<"__builtin_ia32_vphsubdq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_xop_vphsubwd :
+              GCCBuiltin<"__builtin_ia32_vphsubwd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_xop_vpmacsdd :
+              GCCBuiltin<"__builtin_ia32_vpmacsdd">,
+              Intrinsic<[llvm_v4i32_ty],
+                        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacsdqh :
+              GCCBuiltin<"__builtin_ia32_vpmacsdqh">,
+              Intrinsic<[llvm_v2i64_ty],
+                        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacsdql :
+              GCCBuiltin<"__builtin_ia32_vpmacsdql">,
+              Intrinsic<[llvm_v2i64_ty],
+                        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacssdd :
+              GCCBuiltin<"__builtin_ia32_vpmacssdd">,
+              Intrinsic<[llvm_v4i32_ty],
+                        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacssdqh :
+              GCCBuiltin<"__builtin_ia32_vpmacssdqh">,
+              Intrinsic<[llvm_v2i64_ty],
+                        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacssdql :
+              GCCBuiltin<"__builtin_ia32_vpmacssdql">,
+              Intrinsic<[llvm_v2i64_ty],
+                        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacsswd :
+              GCCBuiltin<"__builtin_ia32_vpmacsswd">,
+              Intrinsic<[llvm_v4i32_ty],
+                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacssww :
+              GCCBuiltin<"__builtin_ia32_vpmacssww">,
+              Intrinsic<[llvm_v8i16_ty],
+                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacswd :
+              GCCBuiltin<"__builtin_ia32_vpmacswd">,
+              Intrinsic<[llvm_v4i32_ty],
+                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmacsww :
+              GCCBuiltin<"__builtin_ia32_vpmacsww">,
+              Intrinsic<[llvm_v8i16_ty],
+                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmadcsswd :
+              GCCBuiltin<"__builtin_ia32_vpmadcsswd">,
+              Intrinsic<[llvm_v4i32_ty],
+                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpmadcswd :
+              GCCBuiltin<"__builtin_ia32_vpmadcswd">,
+              Intrinsic<[llvm_v4i32_ty],
+                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_xop_vpperm :
+              GCCBuiltin<"__builtin_ia32_vpperm">,
+              Intrinsic<[llvm_v16i8_ty],
+                        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_xop_vprotb : GCCBuiltin<"__builtin_ia32_vprotb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotd : GCCBuiltin<"__builtin_ia32_vprotd">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotq : GCCBuiltin<"__builtin_ia32_vprotq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotw : GCCBuiltin<"__builtin_ia32_vprotw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotbi : GCCBuiltin<"__builtin_ia32_vprotbi">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotdi : GCCBuiltin<"__builtin_ia32_vprotdi">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotqi : GCCBuiltin<"__builtin_ia32_vprotqi">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vprotwi : GCCBuiltin<"__builtin_ia32_vprotwi">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_xop_vpshab :
+              GCCBuiltin<"__builtin_ia32_vpshab">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshad :
+              GCCBuiltin<"__builtin_ia32_vpshad">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshaq :
+              GCCBuiltin<"__builtin_ia32_vpshaq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshaw :
+              GCCBuiltin<"__builtin_ia32_vpshaw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshlb :
+              GCCBuiltin<"__builtin_ia32_vpshlb">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshld :
+              GCCBuiltin<"__builtin_ia32_vpshld">,
+              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshlq :
+              GCCBuiltin<"__builtin_ia32_vpshlq">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+  def int_x86_xop_vpshlw :
+              GCCBuiltin<"__builtin_ia32_vpshlw">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+                        [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// LWP
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_llwpcb :
+              GCCBuiltin<"__builtin_ia32_llwpcb">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_slwpcb :
+              GCCBuiltin<"__builtin_ia32_slwpcb">,
+              Intrinsic<[llvm_ptr_ty], [], []>;
+  def int_x86_lwpins32 :
+              GCCBuiltin<"__builtin_ia32_lwpins32">,
+              Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_lwpins64 :
+              GCCBuiltin<"__builtin_ia32_lwpins64">,
+              Intrinsic<[llvm_i8_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_lwpval32 :
+              GCCBuiltin<"__builtin_ia32_lwpval32">,
+              Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_lwpval64 :
+              GCCBuiltin<"__builtin_ia32_lwpval64">,
+              Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// MMX
+
+// Empty MMX state op.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_mmx_emms  : GCCBuiltin<"__builtin_ia32_emms">,
+              Intrinsic<[], [], []>;
+  def int_x86_mmx_femms : GCCBuiltin<"__builtin_ia32_femms">,
+              Intrinsic<[], [], []>;
+}
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  // Addition
+  def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+
+  def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_padds_w : GCCBuiltin<"__builtin_ia32_paddsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  def int_x86_mmx_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  // Subtraction
+  def int_x86_mmx_psub_b : GCCBuiltin<"__builtin_ia32_psubb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_psub_w : GCCBuiltin<"__builtin_ia32_psubw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_psub_d : GCCBuiltin<"__builtin_ia32_psubd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_psub_q : GCCBuiltin<"__builtin_ia32_psubq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+
+  // Multiplication
+  def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pmull_w : GCCBuiltin<"__builtin_ia32_pmullw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  // Bitwise operations
+  def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem, Commutative]>;
+
+  // Averages
+  def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  // Maximum
+  def int_x86_mmx_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  // Minimum
+  def int_x86_mmx_pminu_b : GCCBuiltin<"__builtin_ia32_pminub">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  // Packed sum of absolute differences
+  def int_x86_mmx_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  // Shift left logical
+  def int_x86_mmx_psll_w : GCCBuiltin<"__builtin_ia32_psllw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psll_d : GCCBuiltin<"__builtin_ia32_pslld">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psll_q : GCCBuiltin<"__builtin_ia32_psllq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psrl_d : GCCBuiltin<"__builtin_ia32_psrld">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_psra_w : GCCBuiltin<"__builtin_ia32_psraw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_psra_d : GCCBuiltin<"__builtin_ia32_psrad">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_mmx_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_mmx_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_mmx_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_mmx_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_mmx_psrai_d : GCCBuiltin<"__builtin_ia32_psradi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+}
+// Permute
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_mask_permvar_df_256 : GCCBuiltin<"__builtin_ia32_permvardf256_mask">,
+              Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+                        llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_df_512 : GCCBuiltin<"__builtin_ia32_permvardf512_mask">,
+              Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty,
+                        llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_di_256 : GCCBuiltin<"__builtin_ia32_permvardi256_mask">,
+              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+                        llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_di_512 : GCCBuiltin<"__builtin_ia32_permvardi512_mask">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+                        llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_hi_128 : GCCBuiltin<"__builtin_ia32_permvarhi128_mask">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+                        llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_hi_256 : GCCBuiltin<"__builtin_ia32_permvarhi256_mask">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+                        llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_hi_512 : GCCBuiltin<"__builtin_ia32_permvarhi512_mask">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                        llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_qi_128 : GCCBuiltin<"__builtin_ia32_permvarqi128_mask">,
+              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+                        llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_qi_256 : GCCBuiltin<"__builtin_ia32_permvarqi256_mask">,
+              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+                        llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_qi_512 : GCCBuiltin<"__builtin_ia32_permvarqi512_mask">,
+              Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
+                        llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_sf_256 : // TODO: Remove this intrinsic
+              Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+                        llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_sf_512 : GCCBuiltin<"__builtin_ia32_permvarsf512_mask">,
+              Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty,
+                        llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_si_256 : // TODO: Remove this intrinsic
+              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+                        llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],  [IntrNoMem]>;
+  def int_x86_avx512_mask_permvar_si_512 : GCCBuiltin<"__builtin_ia32_permvarsi512_mask">,
+              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+                        llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],  [IntrNoMem]>;
+}
+// Pack ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_mmx_packsswb : GCCBuiltin<"__builtin_ia32_packsswb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_packssdw : GCCBuiltin<"__builtin_ia32_packssdw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_packuswb : GCCBuiltin<"__builtin_ia32_packuswb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// Unpacking ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_mmx_punpckhbw : GCCBuiltin<"__builtin_ia32_punpckhbw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_punpckhwd : GCCBuiltin<"__builtin_ia32_punpckhwd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_punpckhdq : GCCBuiltin<"__builtin_ia32_punpckhdq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_punpcklbw : GCCBuiltin<"__builtin_ia32_punpcklbw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_punpcklwd : GCCBuiltin<"__builtin_ia32_punpcklwd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+  def int_x86_mmx_punpckldq : GCCBuiltin<"__builtin_ia32_punpckldq">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+                        [IntrNoMem]>;
+}
+
+// Integer comparison ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+  def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+  def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+  def int_x86_mmx_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                         llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">,
+              Intrinsic<[], [llvm_x86mmx_ty, llvm_x86mmx_ty, llvm_ptr_ty], []>;
+
+  def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">,
+              Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">,
+              Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>;
+
+  def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                        llvm_x86mmx_ty, llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_mmx_pextr_w : GCCBuiltin<"__builtin_ia32_vec_ext_v4hi">,
+              Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+
+  def int_x86_mmx_pinsr_w : GCCBuiltin<"__builtin_ia32_vec_set_v4hi">,
+              Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+                        llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// BMI
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_bmi_bextr_32 : GCCBuiltin<"__builtin_ia32_bextr_u32">,
+              Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_bmi_bextr_64 : GCCBuiltin<"__builtin_ia32_bextr_u64">,
+              Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_bmi_bzhi_32 : GCCBuiltin<"__builtin_ia32_bzhi_si">,
+              Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_bmi_bzhi_64 : GCCBuiltin<"__builtin_ia32_bzhi_di">,
+              Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_bmi_pdep_32 : GCCBuiltin<"__builtin_ia32_pdep_si">,
+              Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_bmi_pdep_64 : GCCBuiltin<"__builtin_ia32_pdep_di">,
+              Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_bmi_pext_32 : GCCBuiltin<"__builtin_ia32_pext_si">,
+              Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_bmi_pext_64 : GCCBuiltin<"__builtin_ia32_pext_di">,
+              Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FS/GS Base
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_rdfsbase_32 : GCCBuiltin<"__builtin_ia32_rdfsbase32">,
+              Intrinsic<[llvm_i32_ty], []>;
+  def int_x86_rdgsbase_32 : GCCBuiltin<"__builtin_ia32_rdgsbase32">,
+              Intrinsic<[llvm_i32_ty], []>;
+  def int_x86_rdfsbase_64 : GCCBuiltin<"__builtin_ia32_rdfsbase64">,
+              Intrinsic<[llvm_i64_ty], []>;
+  def int_x86_rdgsbase_64 : GCCBuiltin<"__builtin_ia32_rdgsbase64">,
+              Intrinsic<[llvm_i64_ty], []>;
+  def int_x86_wrfsbase_32 : GCCBuiltin<"__builtin_ia32_wrfsbase32">,
+              Intrinsic<[], [llvm_i32_ty]>;
+  def int_x86_wrgsbase_32 : GCCBuiltin<"__builtin_ia32_wrgsbase32">,
+              Intrinsic<[], [llvm_i32_ty]>;
+  def int_x86_wrfsbase_64 : GCCBuiltin<"__builtin_ia32_wrfsbase64">,
+              Intrinsic<[], [llvm_i64_ty]>;
+  def int_x86_wrgsbase_64 : GCCBuiltin<"__builtin_ia32_wrgsbase64">,
+              Intrinsic<[], [llvm_i64_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FXSR
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_fxrstor : GCCBuiltin<"__builtin_ia32_fxrstor">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_fxrstor64 : GCCBuiltin<"__builtin_ia32_fxrstor64">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_fxsave : GCCBuiltin<"__builtin_ia32_fxsave">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+  def int_x86_fxsave64 : GCCBuiltin<"__builtin_ia32_fxsave64">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// XSAVE
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_xsave :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsave64 :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xrstor :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xrstor64 :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsaveopt :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsaveopt64 :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xrstors :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xrstors64 :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsavec :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsavec64 :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsaves :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xsaves64 :
+              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+  def int_x86_xgetbv :
+              Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
+  def int_x86_xsetbv :
+              Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// CLFLUSHOPT and CLWB
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_clflushopt : GCCBuiltin<"__builtin_ia32_clflushopt">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+
+  def int_x86_clwb : GCCBuiltin<"__builtin_ia32_clwb">,
+              Intrinsic<[], [llvm_ptr_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Support protection key
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_rdpkru : GCCBuiltin <"__builtin_ia32_rdpkru">,
+              Intrinsic<[llvm_i32_ty], [], []>;
+  def int_x86_wrpkru : GCCBuiltin<"__builtin_ia32_wrpkru">,
+              Intrinsic<[], [llvm_i32_ty], []>;
+}
+//===----------------------------------------------------------------------===//
+// Half float conversion
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256">,
+              Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+  def int_x86_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_mask_vcvtph2ps_512 : GCCBuiltin<"__builtin_ia32_vcvtph2ps512_mask">,
+              Intrinsic<[llvm_v16f32_ty], [llvm_v16i16_ty, llvm_v16f32_ty,
+                                           llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256_mask">,
+              Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty, llvm_v8f32_ty,
+                                           llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps_mask">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty, llvm_v4f32_ty,
+                                           llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vcvtps2ph_512 : GCCBuiltin<"__builtin_ia32_vcvtps2ph512_mask">,
+              Intrinsic<[llvm_v16i16_ty], [llvm_v16f32_ty, llvm_i32_ty,
+                                           llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256_mask">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty,
+                                           llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph_mask">,
+              Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty,
+                                           llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// TBM
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_tbm_bextri_u32 : GCCBuiltin<"__builtin_ia32_bextri_u32">,
+        Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_tbm_bextri_u64 : GCCBuiltin<"__builtin_ia32_bextri_u64">,
+        Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// RDRAND intrinsics - Return a random value and whether it is valid.
+// RDSEED intrinsics - Return a NIST SP800-90B & C compliant random value and
+// whether it is valid.
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  // These are declared side-effecting so they don't get eliminated by CSE or
+  // LICM.
+  def int_x86_rdrand_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
+  def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
+  def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
+  def int_x86_rdseed_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
+  def int_x86_rdseed_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
+  def int_x86_rdseed_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// ADX
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_addcarryx_u32: GCCBuiltin<"__builtin_ia32_addcarryx_u32">,
+        Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
+                                 llvm_ptr_ty], [IntrArgMemOnly]>;
+  def int_x86_addcarryx_u64: GCCBuiltin<"__builtin_ia32_addcarryx_u64">,
+        Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
+                                 llvm_ptr_ty], [IntrArgMemOnly]>;
+  def int_x86_addcarry_u32: GCCBuiltin<"__builtin_ia32_addcarry_u32">,
+        Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
+                                 llvm_ptr_ty], [IntrArgMemOnly]>;
+  def int_x86_addcarry_u64: GCCBuiltin<"__builtin_ia32_addcarry_u64">,
+        Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
+                                 llvm_ptr_ty], [IntrArgMemOnly]>;
+  def int_x86_subborrow_u32: GCCBuiltin<"__builtin_ia32_subborrow_u32">,
+        Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
+                                 llvm_ptr_ty], [IntrArgMemOnly]>;
+  def int_x86_subborrow_u64: GCCBuiltin<"__builtin_ia32_subborrow_u64">,
+        Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
+                                 llvm_ptr_ty], [IntrArgMemOnly]>;
+}
+
+//===----------------------------------------------------------------------===//
+// RTM intrinsics. Transactional Memory support.
+
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_xbegin : GCCBuiltin<"__builtin_ia32_xbegin">,
+              Intrinsic<[llvm_i32_ty], [], []>;
+  def int_x86_xend : GCCBuiltin<"__builtin_ia32_xend">,
+              Intrinsic<[], [], []>;
+  def int_x86_xabort : GCCBuiltin<"__builtin_ia32_xabort">,
+              Intrinsic<[], [llvm_i8_ty], []>;
+  def int_x86_xtest : GCCBuiltin<"__builtin_ia32_xtest">,
+              Intrinsic<[llvm_i32_ty], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX512
+
+// Conversion ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_cvttss2si : GCCBuiltin<"__builtin_ia32_vcvttss2si32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttss2si64 : GCCBuiltin<"__builtin_ia32_vcvttss2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttss2usi : GCCBuiltin<"__builtin_ia32_vcvttss2usi32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttss2usi64 : GCCBuiltin<"__builtin_ia32_vcvttss2usi64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtusi2ss : GCCBuiltin<"__builtin_ia32_cvtusi2ss32">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtusi642ss : GCCBuiltin<"__builtin_ia32_cvtusi2ss64">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttsd2si : GCCBuiltin<"__builtin_ia32_vcvttsd2si32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_vcvttsd2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttsd2usi : GCCBuiltin<"__builtin_ia32_vcvttsd2usi32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvttsd2usi64 : GCCBuiltin<"__builtin_ia32_vcvttsd2usi64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtusi2sd : GCCBuiltin<"__builtin_ia32_cvtusi2sd32">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtusi642sd : GCCBuiltin<"__builtin_ia32_cvtusi2sd64">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtss2usi32 : GCCBuiltin<"__builtin_ia32_vcvtss2usi32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtss2usi64 : GCCBuiltin<"__builtin_ia32_vcvtss2usi64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtss2si32 : GCCBuiltin<"__builtin_ia32_vcvtss2si32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtss2si64 : GCCBuiltin<"__builtin_ia32_vcvtss2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtsd2usi32 : GCCBuiltin<"__builtin_ia32_vcvtsd2usi32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtsd2usi64 : GCCBuiltin<"__builtin_ia32_vcvtsd2usi64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtsd2si32 : GCCBuiltin<"__builtin_ia32_vcvtsd2si32">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcvtsd2si64 : GCCBuiltin<"__builtin_ia32_vcvtsd2si64">,
+              Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtsi2ss32 : GCCBuiltin<"__builtin_ia32_cvtsi2ss32">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtsi2ss64 : GCCBuiltin<"__builtin_ia32_cvtsi2ss64">,
+              Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+                         llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_cvtsi2sd64 : GCCBuiltin<"__builtin_ia32_cvtsi2sd64">,
+              Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+                         llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Pack ops.
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_packsswb_512 : GCCBuiltin<"__builtin_ia32_packsswb512">,
+              Intrinsic<[llvm_v64i8_ty], [llvm_v32i16_ty,llvm_v32i16_ty],
+                        [IntrNoMem]>;
+  def int_x86_avx512_packssdw_512 : GCCBuiltin<"__builtin_ia32_packssdw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+                         [IntrNoMem]>;
+  def int_x86_avx512_packuswb_512 : GCCBuiltin<"__builtin_ia32_packuswb512">,
+              Intrinsic<[llvm_v64i8_ty], [llvm_v32i16_ty,llvm_v32i16_ty],
+                         [IntrNoMem]>;
+  def int_x86_avx512_packusdw_512 : GCCBuiltin<"__builtin_ia32_packusdw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+                         [IntrNoMem]>;
+}
+
+// Vector convert
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_mask_cvtdq2ps_128 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4i32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtdq2ps_256 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8i32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtdq2ps_512 :
+        GCCBuiltin<"__builtin_ia32_cvtdq2ps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16i32_ty, llvm_v16f32_ty,  llvm_i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2dq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2dq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2dq_256 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2dq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2dq512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2ps_256 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f64_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2ps_512 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2ps512_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f64_ty, llvm_v8f32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtsd2ss_round :
+        GCCBuiltin<"__builtin_ia32_cvtsd2ss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v2f64_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtss2sd_round :
+        GCCBuiltin<"__builtin_ia32_cvtss2sd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v4f32_ty, llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2ps :
+        GCCBuiltin<"__builtin_ia32_cvtpd2ps_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v2f64_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2qq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2qq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2qq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2qq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2qq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2qq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2udq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2udq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2udq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2udq256_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2udq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2udq512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2uqq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2uqq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2uqq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2uqq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtpd2uqq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtpd2uqq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2dq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtps2dq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2dq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtps2dq256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2dq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtps2dq512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2pd_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v4f32_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2pd_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f32_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2pd_512 :
+        GCCBuiltin<"__builtin_ia32_cvtps2pd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f32_ty, llvm_v8f64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2qq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtps2qq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2qq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtps2qq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2qq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtps2qq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2udq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtps2udq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2udq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtps2udq256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2udq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtps2udq512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2uqq_128 :
+        GCCBuiltin<"__builtin_ia32_cvtps2uqq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2uqq_256 :
+        GCCBuiltin<"__builtin_ia32_cvtps2uqq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtps2uqq_512 :
+        GCCBuiltin<"__builtin_ia32_cvtps2uqq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtqq2pd_128 :
+        GCCBuiltin<"__builtin_ia32_cvtqq2pd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2i64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtqq2pd_256 :
+        GCCBuiltin<"__builtin_ia32_cvtqq2pd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4i64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtqq2pd_512 :
+        GCCBuiltin<"__builtin_ia32_cvtqq2pd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8i64_ty, llvm_v8f64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtqq2ps_128 :
+        GCCBuiltin<"__builtin_ia32_cvtqq2ps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v2i64_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtqq2ps_256 :
+        GCCBuiltin<"__builtin_ia32_cvtqq2ps256_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4i64_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtqq2ps_512 :
+        GCCBuiltin<"__builtin_ia32_cvtqq2ps512_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8i64_ty, llvm_v8f32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2dq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2dq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2dq_256 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2dq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2dq512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2qq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2qq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2qq_256 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2qq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2qq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2qq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2udq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2udq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2udq_256 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2udq256_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2udq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2udq512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2uqq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2uqq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2uqq_256 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2uqq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttpd2uqq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttpd2uqq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2dq_128 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2dq_256 : // TODO: remove this intrinsic
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2dq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttps2dq512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2qq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttps2qq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2qq_256 :
+        GCCBuiltin<"__builtin_ia32_cvttps2qq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2qq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttps2qq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2udq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttps2udq128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2udq_256 :
+        GCCBuiltin<"__builtin_ia32_cvttps2udq256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2udq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttps2udq512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2uqq_128 :
+        GCCBuiltin<"__builtin_ia32_cvttps2uqq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2uqq_256 :
+        GCCBuiltin<"__builtin_ia32_cvttps2uqq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvttps2uqq_512 :
+        GCCBuiltin<"__builtin_ia32_cvttps2uqq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtudq2ps_128 :
+        GCCBuiltin<"__builtin_ia32_cvtudq2ps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4i32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtudq2ps_256 :
+        GCCBuiltin<"__builtin_ia32_cvtudq2ps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8i32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtudq2ps_512 :
+        GCCBuiltin<"__builtin_ia32_cvtudq2ps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16i32_ty, llvm_v16f32_ty,  llvm_i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtuqq2pd_128 :
+        GCCBuiltin<"__builtin_ia32_cvtuqq2pd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2i64_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtuqq2pd_256 :
+        GCCBuiltin<"__builtin_ia32_cvtuqq2pd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4i64_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtuqq2pd_512 :
+        GCCBuiltin<"__builtin_ia32_cvtuqq2pd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8i64_ty, llvm_v8f64_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtuqq2ps_128 :
+        GCCBuiltin<"__builtin_ia32_cvtuqq2ps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v2i64_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtuqq2ps_256 :
+        GCCBuiltin<"__builtin_ia32_cvtuqq2ps256_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4i64_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cvtuqq2ps_512 :
+        GCCBuiltin<"__builtin_ia32_cvtuqq2ps512_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8i64_ty, llvm_v8f32_ty,  llvm_i8_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_rndscale_pd_128 : GCCBuiltin<"__builtin_ia32_rndscalepd_128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i32_ty,
+                                     llvm_v2f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_rndscale_pd_256 : GCCBuiltin<"__builtin_ia32_rndscalepd_256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
+                                     llvm_v4f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_rndscale_pd_512 : GCCBuiltin<"__builtin_ia32_rndscalepd_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
+                                     llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_rndscale_ps_128 : GCCBuiltin<"__builtin_ia32_rndscaleps_128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty,
+                                     llvm_v4f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_rndscale_ps_256 : GCCBuiltin<"__builtin_ia32_rndscaleps_256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
+                                     llvm_v8f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_rndscale_ps_512 : GCCBuiltin<"__builtin_ia32_rndscaleps_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
+                                     llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_pd_128 : GCCBuiltin<"__builtin_ia32_reducepd128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i32_ty,
+                                     llvm_v2f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_pd_256 : GCCBuiltin<"__builtin_ia32_reducepd256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
+                                     llvm_v4f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_pd_512 : GCCBuiltin<"__builtin_ia32_reducepd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
+                                     llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_ps_128 : GCCBuiltin<"__builtin_ia32_reduceps128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty,
+                                     llvm_v4f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_ps_256 : GCCBuiltin<"__builtin_ia32_reduceps256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
+                                     llvm_v8f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_ps_512 : GCCBuiltin<"__builtin_ia32_reduceps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
+                                     llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_pd_128 : GCCBuiltin<"__builtin_ia32_rangepd128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty,
+                                    llvm_v2f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_pd_256 : GCCBuiltin<"__builtin_ia32_rangepd256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty,
+                                    llvm_v4f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_pd_512 : GCCBuiltin<"__builtin_ia32_rangepd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty,
+                                    llvm_v8f64_ty,  llvm_i8_ty,  llvm_i32_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_ps_128 : GCCBuiltin<"__builtin_ia32_rangeps128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty,
+                                    llvm_v4f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_ps_256 : GCCBuiltin<"__builtin_ia32_rangeps256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty,
+                                    llvm_v8f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_ps_512 : GCCBuiltin<"__builtin_ia32_rangeps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty,
+                                     llvm_v16f32_ty,  llvm_i16_ty,  llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector load with broadcast
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  // TODO: Remove the broadcast intrinsics with no gcc builtin and autoupgrade
+  def int_x86_avx512_vbroadcast_ss_512 :
+        Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_vbroadcast_sd_512 :
+        Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+   def int_x86_avx512_broadcastmw_512 :
+          GCCBuiltin<"__builtin_ia32_broadcastmw512">,
+          Intrinsic<[llvm_v16i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+   def int_x86_avx512_broadcastmw_256 :
+          GCCBuiltin<"__builtin_ia32_broadcastmw256">,
+          Intrinsic<[llvm_v8i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+   def int_x86_avx512_broadcastmw_128 :
+          GCCBuiltin<"__builtin_ia32_broadcastmw128">,
+          Intrinsic<[llvm_v4i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+   def int_x86_avx512_broadcastmb_512 :
+          GCCBuiltin<"__builtin_ia32_broadcastmb512">,
+          Intrinsic<[llvm_v8i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+   def int_x86_avx512_broadcastmb_256 :
+          GCCBuiltin<"__builtin_ia32_broadcastmb256">,
+          Intrinsic<[llvm_v4i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+   def int_x86_avx512_broadcastmb_128 :
+          GCCBuiltin<"__builtin_ia32_broadcastmb128">,
+          Intrinsic<[llvm_v2i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Arithmetic ops
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+
+  def int_x86_avx512_mask_add_ps_512 : GCCBuiltin<"__builtin_ia32_addps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                     llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_add_pd_512 : GCCBuiltin<"__builtin_ia32_addpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                     llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sub_ps_512 : GCCBuiltin<"__builtin_ia32_subps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                     llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sub_pd_512 : GCCBuiltin<"__builtin_ia32_subpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                     llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_mul_ps_512 : GCCBuiltin<"__builtin_ia32_mulps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                     llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_mul_pd_512 : GCCBuiltin<"__builtin_ia32_mulpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                     llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_div_ps_512 : GCCBuiltin<"__builtin_ia32_divps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                     llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_div_pd_512 : GCCBuiltin<"__builtin_ia32_divpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                     llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_max_ps_512 : GCCBuiltin<"__builtin_ia32_maxps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                     llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_max_pd_512 : GCCBuiltin<"__builtin_ia32_maxpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                     llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_min_ps_512 : GCCBuiltin<"__builtin_ia32_minps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                     llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_min_pd_512 : GCCBuiltin<"__builtin_ia32_minpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                     llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_add_ss_round : GCCBuiltin<"__builtin_ia32_addss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                     llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_div_ss_round : GCCBuiltin<"__builtin_ia32_divss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                     llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_mul_ss_round : GCCBuiltin<"__builtin_ia32_mulss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                     llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sub_ss_round : GCCBuiltin<"__builtin_ia32_subss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                     llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_max_ss_round : GCCBuiltin<"__builtin_ia32_maxss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                     llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_min_ss_round : GCCBuiltin<"__builtin_ia32_minss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                     llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_add_sd_round : GCCBuiltin<"__builtin_ia32_addsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                     llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_div_sd_round : GCCBuiltin<"__builtin_ia32_divsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                     llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_mul_sd_round : GCCBuiltin<"__builtin_ia32_mulsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                     llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sub_sd_round : GCCBuiltin<"__builtin_ia32_subsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                     llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_max_sd_round : GCCBuiltin<"__builtin_ia32_maxsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                     llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_min_sd_round : GCCBuiltin<"__builtin_ia32_minsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                     llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_rndscale_ss : GCCBuiltin<"__builtin_ia32_rndscaless_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+                                     [IntrNoMem]>;
+  def int_x86_avx512_mask_rndscale_sd : GCCBuiltin<"__builtin_ia32_rndscalesd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                      llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+                                     [IntrNoMem]>;
+  def int_x86_avx512_mask_range_ss : GCCBuiltin<"__builtin_ia32_rangess128_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+                                     [IntrNoMem]>;
+  def int_x86_avx512_mask_range_sd : GCCBuiltin<"__builtin_ia32_rangesd128_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                      llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+                                     [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_ss : GCCBuiltin<"__builtin_ia32_reducess_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+                                     [IntrNoMem]>;
+  def int_x86_avx512_mask_reduce_sd : GCCBuiltin<"__builtin_ia32_reducesd_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                      llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+                                     [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_sd : GCCBuiltin<"__builtin_ia32_scalefsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                      llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_ss : GCCBuiltin<"__builtin_ia32_scalefss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                      llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_pd_128 : GCCBuiltin<"__builtin_ia32_scalefpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                    llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_pd_256 : GCCBuiltin<"__builtin_ia32_scalefpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                    llvm_v4f64_ty, llvm_i8_ty],[IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_pd_512 : GCCBuiltin<"__builtin_ia32_scalefpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                    llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_ps_128 : GCCBuiltin<"__builtin_ia32_scalefps128_mask">,
+          Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                    llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_ps_256 : GCCBuiltin<"__builtin_ia32_scalefps256_mask">,
+          Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                    llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_scalef_ps_512 : GCCBuiltin<"__builtin_ia32_scalefps512_mask">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                    llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_sqrt_ss : GCCBuiltin<"__builtin_ia32_sqrtss_round_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                    llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sqrt_sd : GCCBuiltin<"__builtin_ia32_sqrtsd_round_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_sqrt_pd_128 : GCCBuiltin<"__builtin_ia32_sqrtpd128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sqrt_pd_256 : GCCBuiltin<"__builtin_ia32_sqrtpd256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sqrt_pd_512 : GCCBuiltin<"__builtin_ia32_sqrtpd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                    llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sqrt_ps_128 : GCCBuiltin<"__builtin_ia32_sqrtps128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sqrt_ps_256 : GCCBuiltin<"__builtin_ia32_sqrtps256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_sqrt_ps_512 : GCCBuiltin<"__builtin_ia32_sqrtps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                     llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_pd_128 :
+         GCCBuiltin<"__builtin_ia32_fixupimmpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_pd_128 :
+         GCCBuiltin<"__builtin_ia32_fixupimmpd128_maskz">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_pd_256 :
+         GCCBuiltin<"__builtin_ia32_fixupimmpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_pd_256 :
+         GCCBuiltin<"__builtin_ia32_fixupimmpd256_maskz">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_pd_512 :
+         GCCBuiltin<"__builtin_ia32_fixupimmpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_pd_512 :
+         GCCBuiltin<"__builtin_ia32_fixupimmpd512_maskz">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_ps_128 :
+         GCCBuiltin<"__builtin_ia32_fixupimmps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_ps_128 :
+         GCCBuiltin<"__builtin_ia32_fixupimmps128_maskz">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_ps_256 :
+         GCCBuiltin<"__builtin_ia32_fixupimmps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_ps_256 :
+         GCCBuiltin<"__builtin_ia32_fixupimmps256_maskz">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_ps_512 :
+         GCCBuiltin<"__builtin_ia32_fixupimmps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16i32_ty, llvm_i32_ty,
+          llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_ps_512 :
+         GCCBuiltin<"__builtin_ia32_fixupimmps512_maskz">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16i32_ty, llvm_i32_ty,
+          llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_sd :
+         GCCBuiltin<"__builtin_ia32_fixupimmsd_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_sd :
+         GCCBuiltin<"__builtin_ia32_fixupimmsd_maskz">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_fixupimm_ss :
+         GCCBuiltin<"__builtin_ia32_fixupimmss_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_fixupimm_ss :
+         GCCBuiltin<"__builtin_ia32_fixupimmss_maskz">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty,
+          llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_pd_128 : GCCBuiltin<"__builtin_ia32_getexppd128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_pd_256 : GCCBuiltin<"__builtin_ia32_getexppd256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_pd_512 : GCCBuiltin<"__builtin_ia32_getexppd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                    llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_ps_128 : GCCBuiltin<"__builtin_ia32_getexpps128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_ps_256 : GCCBuiltin<"__builtin_ia32_getexpps256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_ps_512 : GCCBuiltin<"__builtin_ia32_getexpps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                     llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getexp_ss : GCCBuiltin<"__builtin_ia32_getexpss128_round_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                    llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_getexp_sd : GCCBuiltin<"__builtin_ia32_getexpsd128_round_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_pd_128 :
+         GCCBuiltin<"__builtin_ia32_getmantpd128_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty,llvm_i32_ty, llvm_v2f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_pd_256 :
+         GCCBuiltin<"__builtin_ia32_getmantpd256_mask">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty,llvm_i32_ty, llvm_v4f64_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_pd_512 :
+         GCCBuiltin<"__builtin_ia32_getmantpd512_mask">,
+          Intrinsic<[llvm_v8f64_ty],
+          [llvm_v8f64_ty,llvm_i32_ty, llvm_v8f64_ty,  llvm_i8_ty,llvm_i32_ty ],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_ps_128 :
+         GCCBuiltin<"__builtin_ia32_getmantps128_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_ps_256 :
+         GCCBuiltin<"__builtin_ia32_getmantps256_mask">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_ps_512 :
+         GCCBuiltin<"__builtin_ia32_getmantps512_mask">,
+          Intrinsic<[llvm_v16f32_ty],
+          [llvm_v16f32_ty,llvm_i32_ty, llvm_v16f32_ty,llvm_i16_ty,llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_ss :
+         GCCBuiltin<"__builtin_ia32_getmantss_round_mask">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,
+           llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_getmant_sd :
+         GCCBuiltin<"__builtin_ia32_getmantsd_round_mask">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty,
+           llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_rsqrt14_ss : GCCBuiltin<"__builtin_ia32_rsqrt14ss_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rsqrt14_sd : GCCBuiltin<"__builtin_ia32_rsqrt14sd_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_rsqrt14_pd_128 : GCCBuiltin<"__builtin_ia32_rsqrt14pd128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rsqrt14_pd_256 : GCCBuiltin<"__builtin_ia32_rsqrt14pd256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rsqrt14_pd_512 : GCCBuiltin<"__builtin_ia32_rsqrt14pd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rsqrt14_ps_128 : GCCBuiltin<"__builtin_ia32_rsqrt14ps128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rsqrt14_ps_256 : GCCBuiltin<"__builtin_ia32_rsqrt14ps256_mask">,
+          Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rsqrt14_ps_512 : GCCBuiltin<"__builtin_ia32_rsqrt14ps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                     llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_ss : GCCBuiltin<"__builtin_ia32_rcp14ss_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_sd : GCCBuiltin<"__builtin_ia32_rcp14sd_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_rcp14_pd_128 : GCCBuiltin<"__builtin_ia32_rcp14pd128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_pd_256 : GCCBuiltin<"__builtin_ia32_rcp14pd256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_pd_512 : GCCBuiltin<"__builtin_ia32_rcp14pd512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                    llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_ps_128 : GCCBuiltin<"__builtin_ia32_rcp14ps128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_ps_256 : GCCBuiltin<"__builtin_ia32_rcp14ps256_mask">,
+          Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                                     llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp14_ps_512 : GCCBuiltin<"__builtin_ia32_rcp14ps512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                     llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_rcp28_ps : GCCBuiltin<"__builtin_ia32_rcp28ps_mask">,
+            Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                         llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_rcp28_pd : GCCBuiltin<"__builtin_ia32_rcp28pd_mask">,
+            Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                        llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_exp2_ps : GCCBuiltin<"__builtin_ia32_exp2ps_mask">,
+            Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                         llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_exp2_pd : GCCBuiltin<"__builtin_ia32_exp2pd_mask">,
+            Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                        llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_rcp28_ss : GCCBuiltin<"__builtin_ia32_rcp28ss_round_mask">,
+            Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                        llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+                      [IntrNoMem]>;
+  def int_x86_avx512_rcp28_sd : GCCBuiltin<"__builtin_ia32_rcp28sd_round_mask">,
+            Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                        llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+                      [IntrNoMem]>;
+  def int_x86_avx512_rsqrt28_ps : GCCBuiltin<"__builtin_ia32_rsqrt28ps_mask">,
+            Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                                         llvm_i16_ty, llvm_i32_ty],
+                      [IntrNoMem]>;
+  def int_x86_avx512_rsqrt28_pd : GCCBuiltin<"__builtin_ia32_rsqrt28pd_mask">,
+            Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                                        llvm_i8_ty, llvm_i32_ty],
+                      [IntrNoMem]>;
+  def int_x86_avx512_rsqrt28_ss : GCCBuiltin<"__builtin_ia32_rsqrt28ss_round_mask">,
+            Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                                        llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+                      [IntrNoMem]>;
+  def int_x86_avx512_rsqrt28_sd : GCCBuiltin<"__builtin_ia32_rsqrt28sd_round_mask">,
+            Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                                        llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+                      [IntrNoMem]>;
+  def int_x86_avx512_psad_bw_512 : GCCBuiltin<"__builtin_ia32_psadbw512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
+                        [IntrNoMem, Commutative]>;
+}
+// Integer arithmetic ops
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_padds_b_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                     llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_padds_b_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                     llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_padds_b_512 : GCCBuiltin<"__builtin_ia32_paddsb512_mask">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+                     llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_padds_w_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                     llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_padds_w_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                     llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_padds_w_512 : GCCBuiltin<"__builtin_ia32_paddsw512_mask">,
+          Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                     llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_paddus_b_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                     llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_paddus_b_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                     llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_paddus_b_512 : GCCBuiltin<"__builtin_ia32_paddusb512_mask">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+                     llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_paddus_w_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                     llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_paddus_w_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                     llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_paddus_w_512 : GCCBuiltin<"__builtin_ia32_paddusw512_mask">,
+          Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                     llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubs_b_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                     llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubs_b_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                     llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubs_b_512 : GCCBuiltin<"__builtin_ia32_psubsb512_mask">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+                     llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubs_w_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                     llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubs_w_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                     llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubs_w_512 : GCCBuiltin<"__builtin_ia32_psubsw512_mask">,
+          Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                     llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubus_b_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                     llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubus_b_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                     llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubus_b_512 : GCCBuiltin<"__builtin_ia32_psubusb512_mask">,
+          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+                     llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubus_w_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                     llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubus_w_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                     llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_psubus_w_512 : GCCBuiltin<"__builtin_ia32_psubusw512_mask">,
+          Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                     llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_v32i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx512_pmulh_w_512 : GCCBuiltin<"__builtin_ia32_pmulhw512">,
+              Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+                         llvm_v32i16_ty], [IntrNoMem, Commutative]>;
+  def int_x86_avx512_mask_pmaddw_d_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_pmaddw_d_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v8i32_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_pmaddw_d_512 :
+         GCCBuiltin<"__builtin_ia32_pmaddwd512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+          [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v16i32_ty,  llvm_i16_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_pmaddubs_w_128 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v8i16_ty],
+          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v8i16_ty,  llvm_i8_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_pmaddubs_w_256 : // FIXME: remove this intrinsic
+          Intrinsic<[llvm_v16i16_ty],
+          [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v16i16_ty,  llvm_i16_ty],
+          [IntrNoMem]>;
+  def int_x86_avx512_mask_pmaddubs_w_512 :
+         GCCBuiltin<"__builtin_ia32_pmaddubsw512_mask">,
+          Intrinsic<[llvm_v32i16_ty],
+          [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v32i16_ty,  llvm_i32_ty],
+          [IntrNoMem]>;
+
+  def int_x86_avx512_mask_dbpsadbw_128 :
+         GCCBuiltin<"__builtin_ia32_dbpsadbw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v8i16_ty,
+           llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_dbpsadbw_256 :
+         GCCBuiltin<"__builtin_ia32_dbpsadbw256_mask">,
+          Intrinsic<[llvm_v16i16_ty],
+          [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v16i16_ty,
+           llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_dbpsadbw_512 :
+         GCCBuiltin<"__builtin_ia32_dbpsadbw512_mask">,
+          Intrinsic<[llvm_v32i16_ty],
+          [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v32i16_ty,
+           llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Gather and Scatter ops
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_gather_dpd_512  : GCCBuiltin<"__builtin_ia32_gathersiv8df">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+                     llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_gather_dps_512  : GCCBuiltin<"__builtin_ia32_gathersiv16sf">,
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
+                     llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_gather_qpd_512  : GCCBuiltin<"__builtin_ia32_gatherdiv8df">,
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_gather_qps_512  : GCCBuiltin<"__builtin_ia32_gatherdiv16sf">,
+          Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+
+
+  def int_x86_avx512_gather_dpq_512  : GCCBuiltin<"__builtin_ia32_gathersiv8di">,
+          Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_gather_dpi_512  : GCCBuiltin<"__builtin_ia32_gathersiv16si">,
+          Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
+                     llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_gather_qpq_512  : GCCBuiltin<"__builtin_ia32_gatherdiv8di">,
+          Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_gather_qpi_512  : GCCBuiltin<"__builtin_ia32_gatherdiv16si">,
+          Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div2_df :
+        GCCBuiltin<"__builtin_ia32_gather3div2df">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div2_di :
+        GCCBuiltin<"__builtin_ia32_gather3div2di">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div4_df :
+        GCCBuiltin<"__builtin_ia32_gather3div4df">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div4_di :
+        GCCBuiltin<"__builtin_ia32_gather3div4di">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div4_sf :
+        GCCBuiltin<"__builtin_ia32_gather3div4sf">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div4_si :
+        GCCBuiltin<"__builtin_ia32_gather3div4si">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div8_sf :
+        GCCBuiltin<"__builtin_ia32_gather3div8sf">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3div8_si :
+        GCCBuiltin<"__builtin_ia32_gather3div8si">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv2_df :
+        GCCBuiltin<"__builtin_ia32_gather3siv2df">,
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv2_di :
+        GCCBuiltin<"__builtin_ia32_gather3siv2di">,
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv4_df :
+        GCCBuiltin<"__builtin_ia32_gather3siv4df">,
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv4_di :
+        GCCBuiltin<"__builtin_ia32_gather3siv4di">,
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv4_sf :
+        GCCBuiltin<"__builtin_ia32_gather3siv4sf">,
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv4_si :
+        GCCBuiltin<"__builtin_ia32_gather3siv4si">,
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv8_sf :
+        GCCBuiltin<"__builtin_ia32_gather3siv8sf">,
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_gather3siv8_si :
+        GCCBuiltin<"__builtin_ia32_gather3siv8si">,
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+// scatter
+  def int_x86_avx512_scatter_dpd_512  : GCCBuiltin<"__builtin_ia32_scattersiv8df">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+                        llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_scatter_dps_512  : GCCBuiltin<"__builtin_ia32_scattersiv16sf">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
+                       llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_scatter_qpd_512  : GCCBuiltin<"__builtin_ia32_scatterdiv8df">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+                     llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_scatter_qps_512  : GCCBuiltin<"__builtin_ia32_scatterdiv16sf">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+                     llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+
+
+  def int_x86_avx512_scatter_dpq_512  : GCCBuiltin<"__builtin_ia32_scattersiv8di">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+                         llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_scatter_dpi_512  : GCCBuiltin<"__builtin_ia32_scattersiv16si">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
+                     llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_scatter_qpq_512  : GCCBuiltin<"__builtin_ia32_scatterdiv8di">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,llvm_v8i64_ty, llvm_v8i64_ty,
+                         llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_scatter_qpi_512  : GCCBuiltin<"__builtin_ia32_scatterdiv16si">,
+          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_v8i64_ty, llvm_v8i32_ty,
+                         llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv2_df :
+       GCCBuiltin<"__builtin_ia32_scatterdiv2df">,
+        Intrinsic<[],
+        [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
+        [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv2_di :
+        GCCBuiltin<"__builtin_ia32_scatterdiv2di">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv4_df :
+        GCCBuiltin<"__builtin_ia32_scatterdiv4df">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv4_di :
+        GCCBuiltin<"__builtin_ia32_scatterdiv4di">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv4_sf :
+        GCCBuiltin<"__builtin_ia32_scatterdiv4sf">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv4_si :
+        GCCBuiltin<"__builtin_ia32_scatterdiv4si">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv8_sf :
+        GCCBuiltin<"__builtin_ia32_scatterdiv8sf">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scatterdiv8_si :
+        GCCBuiltin<"__builtin_ia32_scatterdiv8si">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv2_df :
+        GCCBuiltin<"__builtin_ia32_scattersiv2df">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv2_di :
+        GCCBuiltin<"__builtin_ia32_scattersiv2di">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv4_df :
+        GCCBuiltin<"__builtin_ia32_scattersiv4df">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv4_di :
+        GCCBuiltin<"__builtin_ia32_scattersiv4di">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv4_sf :
+        GCCBuiltin<"__builtin_ia32_scattersiv4sf">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv4_si :
+        GCCBuiltin<"__builtin_ia32_scattersiv4si">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv8_sf :
+        GCCBuiltin<"__builtin_ia32_scattersiv8sf">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_scattersiv8_si :
+        GCCBuiltin<"__builtin_ia32_scattersiv8si">,
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  // gather prefetch
+  def int_x86_avx512_gatherpf_dpd_512  : GCCBuiltin<"__builtin_ia32_gatherpfdpd">,
+          Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_gatherpf_dps_512  : GCCBuiltin<"__builtin_ia32_gatherpfdps">,
+          Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_gatherpf_qpd_512  : GCCBuiltin<"__builtin_ia32_gatherpfqpd">,
+          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_gatherpf_qps_512  : GCCBuiltin<"__builtin_ia32_gatherpfqps">,
+          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+
+  // scatter prefetch
+  def int_x86_avx512_scatterpf_dpd_512  : GCCBuiltin<"__builtin_ia32_scatterpfdpd">,
+          Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_scatterpf_dps_512  : GCCBuiltin<"__builtin_ia32_scatterpfdps">,
+          Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_scatterpf_qpd_512  : GCCBuiltin<"__builtin_ia32_scatterpfqpd">,
+          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_scatterpf_qps_512  : GCCBuiltin<"__builtin_ia32_scatterpfqps">,
+          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
+}
+
+// AVX-512 conflict detection instruction
+// Instructions that count the number of leading zero bits
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_conflict_d_128 :
+          GCCBuiltin<"__builtin_ia32_vpconflictsi_128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_conflict_d_256 :
+          GCCBuiltin<"__builtin_ia32_vpconflictsi_256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+                    [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_conflict_d_512 :
+          GCCBuiltin<"__builtin_ia32_vpconflictsi_512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+                    [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+
+  def int_x86_avx512_mask_conflict_q_128 :
+          GCCBuiltin<"__builtin_ia32_vpconflictdi_128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+                    [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_conflict_q_256 :
+          GCCBuiltin<"__builtin_ia32_vpconflictdi_256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+                    [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_conflict_q_512 :
+          GCCBuiltin<"__builtin_ia32_vpconflictdi_512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+                    [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+}
+
+// Compares
+let TargetPrefix = "x86" in {
+  // 512-bit
+  def int_x86_avx512_vcomi_sd : GCCBuiltin<"__builtin_ia32_vcomisd">,
+              Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+                         llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_vcomi_ss : GCCBuiltin<"__builtin_ia32_vcomiss">,
+              Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+                         llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Compress, Expand
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_compress_ps_512 :
+                             GCCBuiltin<"__builtin_ia32_compresssf512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_pd_512 :
+                             GCCBuiltin<"__builtin_ia32_compressdf512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_ps_256 :
+                             GCCBuiltin<"__builtin_ia32_compresssf256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_pd_256 :
+                             GCCBuiltin<"__builtin_ia32_compressdf256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_ps_128 :
+                             GCCBuiltin<"__builtin_ia32_compresssf128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_pd_128 :
+                             GCCBuiltin<"__builtin_ia32_compressdf128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_compress_store_ps_512 :
+                            GCCBuiltin<"__builtin_ia32_compressstoresf512_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty,
+                   llvm_i16_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_pd_512 :
+                            GCCBuiltin<"__builtin_ia32_compressstoredf512_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v8f64_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_ps_256 :
+                            GCCBuiltin<"__builtin_ia32_compressstoresf256_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_pd_256 :
+                            GCCBuiltin<"__builtin_ia32_compressstoredf256_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_ps_128 :
+                            GCCBuiltin<"__builtin_ia32_compressstoresf128_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_pd_128 :
+                            GCCBuiltin<"__builtin_ia32_compressstoredf128_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v2f64_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_compress_d_512 :
+                             GCCBuiltin<"__builtin_ia32_compresssi512_mask">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_q_512 :
+                             GCCBuiltin<"__builtin_ia32_compressdi512_mask">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_d_256 :
+                             GCCBuiltin<"__builtin_ia32_compresssi256_mask">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_q_256 :
+                             GCCBuiltin<"__builtin_ia32_compressdi256_mask">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_d_128 :
+                             GCCBuiltin<"__builtin_ia32_compresssi128_mask">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_q_128 :
+                             GCCBuiltin<"__builtin_ia32_compressdi128_mask">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_compress_store_d_512 :
+                            GCCBuiltin<"__builtin_ia32_compressstoresi512_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v16i32_ty,
+                   llvm_i16_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_q_512 :
+                            GCCBuiltin<"__builtin_ia32_compressstoredi512_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_d_256 :
+                            GCCBuiltin<"__builtin_ia32_compressstoresi256_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_q_256 :
+                            GCCBuiltin<"__builtin_ia32_compressstoredi256_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_d_128 :
+                            GCCBuiltin<"__builtin_ia32_compressstoresi128_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_q_128 :
+                            GCCBuiltin<"__builtin_ia32_compressstoredi128_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_compress_b_512 :
+                             GCCBuiltin<"__builtin_ia32_compressqi512_mask">,
+        Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+                   llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_w_512 :
+                             GCCBuiltin<"__builtin_ia32_compresshi512_mask">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                   llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_b_256 :
+                             GCCBuiltin<"__builtin_ia32_compressqi256_mask">,
+        Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                   llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_w_256 :
+                             GCCBuiltin<"__builtin_ia32_compresshi256_mask">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_b_128 :
+                             GCCBuiltin<"__builtin_ia32_compressqi128_mask">,
+        Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_compress_w_128 :
+                             GCCBuiltin<"__builtin_ia32_compresshi128_mask">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_compress_store_b_512 :
+                            GCCBuiltin<"__builtin_ia32_compressstoreqi512_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v64i8_ty,
+                   llvm_i64_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_w_512 :
+                            GCCBuiltin<"__builtin_ia32_compressstorehi512_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v32i16_ty,
+                   llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_b_256 :
+                            GCCBuiltin<"__builtin_ia32_compressstoreqi256_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty,
+                   llvm_i32_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_w_256 :
+                            GCCBuiltin<"__builtin_ia32_compressstorehi256_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v16i16_ty,
+                   llvm_i16_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_b_128 :
+                            GCCBuiltin<"__builtin_ia32_compressstoreqi128_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v16i8_ty,
+                   llvm_i16_ty], [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_compress_store_w_128 :
+                            GCCBuiltin<"__builtin_ia32_compressstorehi128_mask">,
+        Intrinsic<[], [llvm_ptr_ty, llvm_v8i16_ty,
+                   llvm_i8_ty], [IntrArgMemOnly]>;
+
+// expand
+  def int_x86_avx512_mask_expand_ps_512 :
+                             GCCBuiltin<"__builtin_ia32_expandsf512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_pd_512 :
+                             GCCBuiltin<"__builtin_ia32_expanddf512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_ps_256 :
+                             GCCBuiltin<"__builtin_ia32_expandsf256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_pd_256 :
+                             GCCBuiltin<"__builtin_ia32_expanddf256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_ps_128 :
+                             GCCBuiltin<"__builtin_ia32_expandsf128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_pd_128 :
+                             GCCBuiltin<"__builtin_ia32_expanddf128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_expand_load_ps_512 :
+                            GCCBuiltin<"__builtin_ia32_expandloadsf512_mask">,
+        Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty,
+                   llvm_i16_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_pd_512 :
+                            GCCBuiltin<"__builtin_ia32_expandloaddf512_mask">,
+        Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty, llvm_v8f64_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_ps_256 :
+                            GCCBuiltin<"__builtin_ia32_expandloadsf256_mask">,
+        Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_pd_256 :
+                            GCCBuiltin<"__builtin_ia32_expandloaddf256_mask">,
+        Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_ps_128 :
+                            GCCBuiltin<"__builtin_ia32_expandloadsf128_mask">,
+        Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_pd_128 :
+                            GCCBuiltin<"__builtin_ia32_expandloaddf128_mask">,
+        Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_expand_d_512 :
+                             GCCBuiltin<"__builtin_ia32_expandsi512_mask">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_q_512 :
+                             GCCBuiltin<"__builtin_ia32_expanddi512_mask">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_d_256 :
+                             GCCBuiltin<"__builtin_ia32_expandsi256_mask">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_q_256 :
+                             GCCBuiltin<"__builtin_ia32_expanddi256_mask">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_d_128 :
+                             GCCBuiltin<"__builtin_ia32_expandsi128_mask">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_q_128 :
+                             GCCBuiltin<"__builtin_ia32_expanddi128_mask">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_expand_load_d_512 :
+                            GCCBuiltin<"__builtin_ia32_expandloadsi512_mask">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_ptr_ty, llvm_v16i32_ty,
+                   llvm_i16_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_q_512 :
+                            GCCBuiltin<"__builtin_ia32_expandloaddi512_mask">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty, llvm_v8i64_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_d_256 :
+                            GCCBuiltin<"__builtin_ia32_expandloadsi256_mask">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_q_256 :
+                            GCCBuiltin<"__builtin_ia32_expandloaddi256_mask">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_d_128 :
+                            GCCBuiltin<"__builtin_ia32_expandloadsi128_mask">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_q_128 :
+                            GCCBuiltin<"__builtin_ia32_expandloaddi128_mask">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_expand_b_512 :
+                            GCCBuiltin<"__builtin_ia32_expandqi512_mask">,
+        Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+                   llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_w_512 :
+                            GCCBuiltin<"__builtin_ia32_expandhi512_mask">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                   llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_b_256 :
+                            GCCBuiltin<"__builtin_ia32_expandqi256_mask">,
+        Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+                   llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_w_256 :
+                            GCCBuiltin<"__builtin_ia32_expandhi256_mask">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_b_128 :
+                            GCCBuiltin<"__builtin_ia32_expandqi128_mask">,
+        Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_expand_w_128 :
+                            GCCBuiltin<"__builtin_ia32_expandhi128_mask">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_expand_load_b_512 :
+                            GCCBuiltin<"__builtin_ia32_expandloadqi512_mask">,
+        Intrinsic<[llvm_v64i8_ty], [llvm_ptr_ty, llvm_v64i8_ty,
+                   llvm_i64_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_w_512 :
+                            GCCBuiltin<"__builtin_ia32_expandloadhi512_mask">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_ptr_ty, llvm_v32i16_ty,
+                   llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_b_256 :
+                            GCCBuiltin<"__builtin_ia32_expandloadqi256_mask">,
+        Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty, llvm_v32i8_ty,
+                   llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_w_256 :
+                            GCCBuiltin<"__builtin_ia32_expandloadhi256_mask">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_ptr_ty, llvm_v16i16_ty,
+                   llvm_i16_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_b_128 :
+                            GCCBuiltin<"__builtin_ia32_expandloadqi128_mask">,
+        Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_v16i8_ty,
+                   llvm_i16_ty], [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_expand_load_w_128 :
+                            GCCBuiltin<"__builtin_ia32_expandloadhi128_mask">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty, llvm_v8i16_ty,
+                   llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
+}
+
+// VBMI2 Concat & Shift
+let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
+  def int_x86_avx512_mask_vpshld_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldq512_mask">,
+        Intrinsic<[llvm_v8i64_ty],
+                  [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshld_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldq256_mask">,
+        Intrinsic<[llvm_v4i64_ty],
+                  [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshld_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldq128_mask">,
+        Intrinsic<[llvm_v2i64_ty],
+                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshld_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldd512_mask">,
+        Intrinsic<[llvm_v16i32_ty],
+                  [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshld_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldd256_mask">,
+        Intrinsic<[llvm_v8i32_ty],
+                  [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshld_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldd128_mask">,
+        Intrinsic<[llvm_v4i32_ty],
+                  [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshld_w_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldw512_mask">,
+        Intrinsic<[llvm_v32i16_ty],
+                  [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_v32i16_ty,
+                   llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshld_w_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldw256_mask">,
+        Intrinsic<[llvm_v16i16_ty],
+                  [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty, llvm_v16i16_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshld_w_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldw128_mask">,
+        Intrinsic<[llvm_v8i16_ty],
+                  [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshrd_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdq512_mask">,
+        Intrinsic<[llvm_v8i64_ty],
+                  [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrd_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdq256_mask">,
+        Intrinsic<[llvm_v4i64_ty],
+                  [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrd_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdq128_mask">,
+        Intrinsic<[llvm_v2i64_ty],
+                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshrd_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdd512_mask">,
+        Intrinsic<[llvm_v16i32_ty],
+                  [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrd_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdd256_mask">,
+        Intrinsic<[llvm_v8i32_ty],
+                  [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrd_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdd128_mask">,
+        Intrinsic<[llvm_v4i32_ty],
+                  [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshrd_w_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdw512_mask">,
+        Intrinsic<[llvm_v32i16_ty],
+                  [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_v32i16_ty,
+                   llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrd_w_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdw256_mask">,
+        Intrinsic<[llvm_v16i16_ty],
+                  [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty, llvm_v16i16_ty,
+                   llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrd_w_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdw128_mask">,
+        Intrinsic<[llvm_v8i16_ty],
+                  [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty,
+                   llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshldv_w_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldvw128_mask">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_w_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldvw128_maskz">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshldv_w_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldvw256_mask">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_w_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldvw256_maskz">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshldv_w_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldvw512_mask">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_w_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldvw512_maskz">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshldv_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldvq128_mask">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldvq128_maskz">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshldv_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldvq256_mask">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldvq256_maskz">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshldv_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldvq512_mask">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldvq512_maskz">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshldv_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldvd128_mask">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpshldvd128_maskz">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshldv_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldvd256_mask">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpshldvd256_maskz">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshldv_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldvd512_mask">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshldv_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpshldvd512_maskz">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshrdv_w_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvw128_mask">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_w_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvw128_maskz">,
+        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrdv_w_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvw256_mask">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_w_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvw256_maskz">,
+        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrdv_w_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvw512_mask">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_w_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvw512_maskz">,
+        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshrdv_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvq128_mask">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_q_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvq128_maskz">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrdv_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvq256_mask">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_q_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvq256_maskz">,
+        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrdv_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvq512_mask">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_q_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvq512_maskz">,
+        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_vpshrdv_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvd128_mask">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_d_128 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvd128_maskz">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrdv_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvd256_mask">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_d_256 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvd256_maskz">,
+        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vpshrdv_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvd512_mask">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+  def int_x86_avx512_maskz_vpshrdv_d_512 :
+        GCCBuiltin<"__builtin_ia32_vpshrdvd512_maskz">,
+        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+}
+
+// truncate
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_pmov_qb_128 :
+          GCCBuiltin<"__builtin_ia32_pmovqb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qb_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovqb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qb_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsqb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qb_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsqb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qb_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusqb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qb_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusqb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qb_256 :
+          GCCBuiltin<"__builtin_ia32_pmovqb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qb_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovqb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qb_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsqb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qb_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsqb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qb_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusqb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qb_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusqb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qb_512 :
+          GCCBuiltin<"__builtin_ia32_pmovqb512_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qb_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovqb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qb_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsqb512_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qb_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsqb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qb_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusqb512_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qb_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusqb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qw_128 :
+          GCCBuiltin<"__builtin_ia32_pmovqw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qw_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovqw128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qw_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsqw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qw_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsqw128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qw_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusqw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qw_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusqw128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qw_256 :
+          GCCBuiltin<"__builtin_ia32_pmovqw256_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qw_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovqw256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qw_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsqw256_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qw_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsqw256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qw_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusqw256_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qw_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusqw256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qw_512 :
+          GCCBuiltin<"__builtin_ia32_pmovqw512_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qw_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovqw512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qw_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsqw512_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qw_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsqw512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qw_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusqw512_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qw_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusqw512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qd_128 :
+          GCCBuiltin<"__builtin_ia32_pmovqd128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qd_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovqd128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qd_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsqd128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qd_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsqd128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qd_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusqd128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qd_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusqd128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qd_256 :
+          GCCBuiltin<"__builtin_ia32_pmovqd256_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qd_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovqd256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qd_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsqd256_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qd_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsqd256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qd_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusqd256_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qd_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusqd256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_qd_512 :
+          GCCBuiltin<"__builtin_ia32_pmovqd512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+                    [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_qd_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovqd512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_qd_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsqd512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+                    [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_qd_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsqd512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_qd_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusqd512_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+                    [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_qd_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusqd512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_db_128 :
+          GCCBuiltin<"__builtin_ia32_pmovdb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_db_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovdb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_db_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsdb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_db_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsdb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_db_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusdb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_db_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusdb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_db_256 :
+          GCCBuiltin<"__builtin_ia32_pmovdb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_db_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovdb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_db_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsdb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_db_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsdb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_db_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusdb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_db_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusdb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_db_512 :
+          GCCBuiltin<"__builtin_ia32_pmovdb512_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_db_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovdb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_db_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsdb512_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_db_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsdb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_db_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusdb512_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_db_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusdb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_dw_128 :
+          GCCBuiltin<"__builtin_ia32_pmovdw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_dw_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovdw128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_dw_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsdw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_dw_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovsdw128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_dw_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusdw128_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_dw_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovusdw128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_dw_256 :
+          GCCBuiltin<"__builtin_ia32_pmovdw256_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_dw_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovdw256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_dw_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsdw256_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_dw_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovsdw256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_dw_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusdw256_mask">,
+          Intrinsic<[llvm_v8i16_ty],
+                    [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_dw_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovusdw256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_dw_512 :
+          GCCBuiltin<"__builtin_ia32_pmovdw512_mask">,
+          Intrinsic<[llvm_v16i16_ty],
+                    [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_dw_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovdw512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_dw_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsdw512_mask">,
+          Intrinsic<[llvm_v16i16_ty],
+                    [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_dw_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovsdw512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_dw_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusdw512_mask">,
+          Intrinsic<[llvm_v16i16_ty],
+                    [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_dw_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovusdw512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_wb_128 :
+          GCCBuiltin<"__builtin_ia32_pmovwb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_wb_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovwb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_wb_128 :
+          GCCBuiltin<"__builtin_ia32_pmovswb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_wb_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovswb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_wb_128 :
+          GCCBuiltin<"__builtin_ia32_pmovuswb128_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_wb_mem_128 :
+          GCCBuiltin<"__builtin_ia32_pmovuswb128mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_wb_256 :
+          GCCBuiltin<"__builtin_ia32_pmovwb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_wb_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovwb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_wb_256 :
+          GCCBuiltin<"__builtin_ia32_pmovswb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_wb_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovswb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_wb_256 :
+          GCCBuiltin<"__builtin_ia32_pmovuswb256_mask">,
+          Intrinsic<[llvm_v16i8_ty],
+                    [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_wb_mem_256 :
+          GCCBuiltin<"__builtin_ia32_pmovuswb256mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmov_wb_512 :
+          GCCBuiltin<"__builtin_ia32_pmovwb512_mask">,
+          Intrinsic<[llvm_v32i8_ty],
+                    [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmov_wb_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovwb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovs_wb_512 :
+          GCCBuiltin<"__builtin_ia32_pmovswb512_mask">,
+          Intrinsic<[llvm_v32i8_ty],
+                    [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovs_wb_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovswb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_pmovus_wb_512 :
+          GCCBuiltin<"__builtin_ia32_pmovuswb512_mask">,
+          Intrinsic<[llvm_v32i8_ty],
+                    [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
+                    [IntrNoMem]>;
+  def int_x86_avx512_mask_pmovus_wb_mem_512 :
+          GCCBuiltin<"__builtin_ia32_pmovuswb512mem_mask">,
+          Intrinsic<[],
+                    [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+}
+
+// Bitwise ternary logic
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_pternlog_d_128 :
+          GCCBuiltin<"__builtin_ia32_pternlogd128_mask">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_pternlog_d_128 :
+          GCCBuiltin<"__builtin_ia32_pternlogd128_maskz">,
+          Intrinsic<[llvm_v4i32_ty],
+                    [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_pternlog_d_256 :
+          GCCBuiltin<"__builtin_ia32_pternlogd256_mask">,
+          Intrinsic<[llvm_v8i32_ty],
+                    [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_pternlog_d_256 :
+          GCCBuiltin<"__builtin_ia32_pternlogd256_maskz">,
+          Intrinsic<[llvm_v8i32_ty],
+                    [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_pternlog_d_512 :
+          GCCBuiltin<"__builtin_ia32_pternlogd512_mask">,
+          Intrinsic<[llvm_v16i32_ty],
+                    [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty,
+                     llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_pternlog_d_512 :
+          GCCBuiltin<"__builtin_ia32_pternlogd512_maskz">,
+          Intrinsic<[llvm_v16i32_ty],
+                    [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty,
+                     llvm_i16_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_pternlog_q_128 :
+          GCCBuiltin<"__builtin_ia32_pternlogq128_mask">,
+          Intrinsic<[llvm_v2i64_ty],
+                    [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_pternlog_q_128 :
+          GCCBuiltin<"__builtin_ia32_pternlogq128_maskz">,
+          Intrinsic<[llvm_v2i64_ty],
+                    [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_pternlog_q_256 :
+          GCCBuiltin<"__builtin_ia32_pternlogq256_mask">,
+          Intrinsic<[llvm_v4i64_ty],
+                    [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_pternlog_q_256 :
+          GCCBuiltin<"__builtin_ia32_pternlogq256_maskz">,
+          Intrinsic<[llvm_v4i64_ty],
+                    [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_pternlog_q_512 :
+          GCCBuiltin<"__builtin_ia32_pternlogq512_mask">,
+          Intrinsic<[llvm_v8i64_ty],
+                    [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_maskz_pternlog_q_512 :
+          GCCBuiltin<"__builtin_ia32_pternlogq512_maskz">,
+          Intrinsic<[llvm_v8i64_ty],
+                    [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty,
+                     llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_cmp_ps_512 :
+              Intrinsic<[llvm_v16i1_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+                         llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_cmp_pd_512 :
+              Intrinsic<[llvm_v8i1_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+                         llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_cmp_ps_256 :
+              Intrinsic<[llvm_v8i1_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_cmp_pd_256 :
+              Intrinsic<[llvm_v4i1_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+                         llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_cmp_ps_128 :
+            Intrinsic<[llvm_v4i1_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                       llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_cmp_pd_128 :
+            Intrinsic<[llvm_v2i1_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                       llvm_i32_ty], [IntrNoMem]>;
+
+  def int_x86_avx512_mask_cmp_ss :
+        GCCBuiltin<"__builtin_ia32_cmpss_mask">,
+              Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+                         llvm_i32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_cmp_sd :
+        GCCBuiltin<"__builtin_ia32_cmpsd_mask">,
+              Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+                         llvm_i32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SHA intrinsics
+let TargetPrefix = "x86" in {
+  def int_x86_sha1rnds4 : GCCBuiltin<"__builtin_ia32_sha1rnds4">,
+        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+                  [IntrNoMem]>;
+  def int_x86_sha1nexte : GCCBuiltin<"__builtin_ia32_sha1nexte">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sha1msg1 : GCCBuiltin<"__builtin_ia32_sha1msg1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sha1msg2 : GCCBuiltin<"__builtin_ia32_sha1msg2">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sha256rnds2 : GCCBuiltin<"__builtin_ia32_sha256rnds2">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+                [IntrNoMem]>;
+  def int_x86_sha256msg1 : GCCBuiltin<"__builtin_ia32_sha256msg1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+  def int_x86_sha256msg2 : GCCBuiltin<"__builtin_ia32_sha256msg2">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Thread synchronization ops with timer.
+let TargetPrefix = "x86" in {
+  def int_x86_monitorx
+      : GCCBuiltin<"__builtin_ia32_monitorx">,
+        Intrinsic<[], [ llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty ], []>;
+  def int_x86_mwaitx
+      : GCCBuiltin<"__builtin_ia32_mwaitx">,
+        Intrinsic<[], [ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Cache-line zero
+let TargetPrefix = "x86" in {
+  def int_x86_clzero : GCCBuiltin<"__builtin_ia32_clzero">,
+      Intrinsic<[], [llvm_ptr_ty], []>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/IntrinsicsXCore.td b/linux-x64/clang/include/llvm/IR/IntrinsicsXCore.td
new file mode 100644
index 0000000..b614e1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/IntrinsicsXCore.td
@@ -0,0 +1,121 @@
+//==- IntrinsicsXCore.td - XCore intrinsics                 -*- tablegen -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the XCore-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "xcore" in {  // All intrinsics start with "llvm.xcore.".
+  // Miscellaneous instructions.
+  def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>,
+                         GCCBuiltin<"__builtin_bitrev">;
+  def int_xcore_crc8 : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+                                 [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+                                 [IntrNoMem]>;
+  def int_xcore_crc32 : Intrinsic<[llvm_i32_ty],
+                                  [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+                                  [IntrNoMem]>;
+  def int_xcore_sext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+  def int_xcore_zext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+  def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>,
+                        GCCBuiltin<"__builtin_getid">;
+  def int_xcore_getps : Intrinsic<[llvm_i32_ty],[llvm_i32_ty]>,
+                        GCCBuiltin<"__builtin_getps">;
+  def int_xcore_setps : Intrinsic<[],[llvm_i32_ty, llvm_i32_ty]>,
+                        GCCBuiltin<"__builtin_setps">;
+  def int_xcore_geted : Intrinsic<[llvm_i32_ty],[]>;
+  def int_xcore_getet : Intrinsic<[llvm_i32_ty],[]>;
+  def int_xcore_setsr : Intrinsic<[],[llvm_i32_ty]>;
+  def int_xcore_clrsr : Intrinsic<[],[llvm_i32_ty]>;
+
+  // Resource instructions.
+  def int_xcore_getr : Intrinsic<[llvm_anyptr_ty],[llvm_i32_ty]>;
+  def int_xcore_freer : Intrinsic<[],[llvm_anyptr_ty],
+                                   [NoCapture<0>]>;
+  def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<0>]>;
+  def int_xcore_int : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                [NoCapture<0>]>;
+  def int_xcore_inct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                 [NoCapture<0>]>;
+  def int_xcore_out : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                [NoCapture<0>]>;
+  def int_xcore_outt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                 [NoCapture<0>]>;
+  def int_xcore_outct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_chkct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_testct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                   [NoCapture<0>]>;
+  def int_xcore_testwct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                    [NoCapture<0>]>;
+  def int_xcore_setd : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_setc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_inshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_outshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_setpt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_clrpt : Intrinsic<[],[llvm_anyptr_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_getts : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_syncr : Intrinsic<[],[llvm_anyptr_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_settw : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_setv : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                 [NoCapture<0>]>;
+  def int_xcore_setev : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                  [NoCapture<0>]>;
+  def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
+  def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
+  def int_xcore_setclk : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
+                                   [NoCapture<0>, NoCapture<1>]>;
+  def int_xcore_setrdy : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
+                                   [NoCapture<0>, NoCapture<1>]>;
+  def int_xcore_setpsc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+                                   [NoCapture<0>]>;
+  def int_xcore_peek : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                 [NoCapture<0>]>;
+  def int_xcore_endin : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+                                 [NoCapture<0>]>;
+
+  // Intrinsics for events.
+  def int_xcore_waitevent : Intrinsic<[llvm_ptr_ty],[], [IntrReadMem]>;
+
+  // If any of the resources owned by the thread are ready this returns the
+  // vector of one of the ready resources. If no resources owned by the thread
+  // are ready then the operand passed to the intrinsic is returned.
+  def int_xcore_checkevent : Intrinsic<[llvm_ptr_ty],[llvm_ptr_ty]>;
+
+  def int_xcore_clre : Intrinsic<[],[],[]>;
+
+  // Intrinsics for threads.
+  def int_xcore_getst : Intrinsic <[llvm_anyptr_ty],[llvm_anyptr_ty],
+                                   [NoCapture<0>]>;
+  def int_xcore_msync : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<0>]>;
+  def int_xcore_ssync : Intrinsic <[],[]>;
+  def int_xcore_mjoin : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<0>]>;
+  def int_xcore_initsp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                    [NoCapture<0>]>;
+  def int_xcore_initpc : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                    [NoCapture<0>]>;
+  def int_xcore_initlr : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                    [NoCapture<0>]>;
+  def int_xcore_initcp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                    [NoCapture<0>]>;
+  def int_xcore_initdp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+                                    [NoCapture<0>]>;
+}
diff --git a/linux-x64/clang/include/llvm/IR/LLVMContext.h b/linux-x64/clang/include/llvm/IR/LLVMContext.h
new file mode 100644
index 0000000..5778b41
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/LLVMContext.h
@@ -0,0 +1,349 @@
+//===- llvm/LLVMContext.h - Class for managing "global" state ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares LLVMContext, a container of "global" state in LLVM, such
+// as the global type and constant uniquing tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LLVMCONTEXT_H
+#define LLVM_IR_LLVMCONTEXT_H
+
+#include "llvm-c/Types.h"
+#include "llvm/IR/DiagnosticHandler.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Options.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class DiagnosticInfo;
+enum DiagnosticSeverity : char;
+class Function;
+class Instruction;
+class LLVMContextImpl;
+class Module;
+class OptPassGate;
+template <typename T> class SmallVectorImpl;
+class SMDiagnostic;
+class StringRef;
+class Twine;
+
+namespace yaml {
+
+class Output;
+
+} // end namespace yaml
+
+namespace SyncScope {
+
+typedef uint8_t ID;
+
+/// Known synchronization scope IDs, which always have the same value.  All
+/// synchronization scope IDs that LLVM has special knowledge of are listed
+/// here.  Additionally, this scheme allows LLVM to efficiently check for
+/// specific synchronization scope ID without comparing strings.
+enum {
+  /// Synchronized with respect to signal handlers executing in the same thread.
+  SingleThread = 0,
+
+  /// Synchronized with respect to all concurrently executing threads.
+  System = 1
+};
+
+} // end namespace SyncScope
+
+/// This is an important class for using LLVM in a threaded context.  It
+/// (opaquely) owns and manages the core "global" data of LLVM's core
+/// infrastructure, including the type and constant uniquing tables.
+/// LLVMContext itself provides no locking guarantees, so you should be careful
+/// to have one context per thread.
+class LLVMContext {
+public:
+  LLVMContextImpl *const pImpl;
+  LLVMContext();
+  LLVMContext(LLVMContext &) = delete;
+  LLVMContext &operator=(const LLVMContext &) = delete;
+  ~LLVMContext();
+
+  // Pinned metadata names, which always have the same value.  This is a
+  // compile-time performance optimization, not a correctness optimization.
+  enum : unsigned {
+    MD_dbg = 0,                       // "dbg"
+    MD_tbaa = 1,                      // "tbaa"
+    MD_prof = 2,                      // "prof"
+    MD_fpmath = 3,                    // "fpmath"
+    MD_range = 4,                     // "range"
+    MD_tbaa_struct = 5,               // "tbaa.struct"
+    MD_invariant_load = 6,            // "invariant.load"
+    MD_alias_scope = 7,               // "alias.scope"
+    MD_noalias = 8,                   // "noalias",
+    MD_nontemporal = 9,               // "nontemporal"
+    MD_mem_parallel_loop_access = 10, // "llvm.mem.parallel_loop_access"
+    MD_nonnull = 11,                  // "nonnull"
+    MD_dereferenceable = 12,          // "dereferenceable"
+    MD_dereferenceable_or_null = 13,  // "dereferenceable_or_null"
+    MD_make_implicit = 14,            // "make.implicit"
+    MD_unpredictable = 15,            // "unpredictable"
+    MD_invariant_group = 16,          // "invariant.group"
+    MD_align = 17,                    // "align"
+    MD_loop = 18,                     // "llvm.loop"
+    MD_type = 19,                     // "type"
+    MD_section_prefix = 20,           // "section_prefix"
+    MD_absolute_symbol = 21,          // "absolute_symbol"
+    MD_associated = 22,               // "associated"
+    MD_callees = 23,                  // "callees"
+    MD_irr_loop = 24,                 // "irr_loop"
+  };
+
+  /// Known operand bundle tag IDs, which always have the same value.  All
+  /// operand bundle tags that LLVM has special knowledge of are listed here.
+  /// Additionally, this scheme allows LLVM to efficiently check for specific
+  /// operand bundle tags without comparing strings.
+  enum : unsigned {
+    OB_deopt = 0,         // "deopt"
+    OB_funclet = 1,       // "funclet"
+    OB_gc_transition = 2, // "gc-transition"
+  };
+
+  /// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
+  /// This ID is uniqued across modules in the current LLVMContext.
+  unsigned getMDKindID(StringRef Name) const;
+
+  /// getMDKindNames - Populate client supplied SmallVector with the name for
+  /// custom metadata IDs registered in this LLVMContext.
+  void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
+
+  /// getOperandBundleTags - Populate client supplied SmallVector with the
+  /// bundle tags registered in this LLVMContext.  The bundle tags are ordered
+  /// by increasing bundle IDs.
+  /// \see LLVMContext::getOperandBundleTagID
+  void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
+
+  /// getOperandBundleTagID - Maps a bundle tag to an integer ID.  Every bundle
+  /// tag registered with an LLVMContext has an unique ID.
+  uint32_t getOperandBundleTagID(StringRef Tag) const;
+
+  /// getOrInsertSyncScopeID - Maps synchronization scope name to
+  /// synchronization scope ID.  Every synchronization scope registered with
+  /// LLVMContext has unique ID except pre-defined ones.
+  SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+  /// getSyncScopeNames - Populates client supplied SmallVector with
+  /// synchronization scope names registered with LLVMContext.  Synchronization
+  /// scope names are ordered by increasing synchronization scope IDs.
+  void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
+  /// Define the GC for a function
+  void setGC(const Function &Fn, std::string GCName);
+
+  /// Return the GC for a function
+  const std::string &getGC(const Function &Fn);
+
+  /// Remove the GC for a function
+  void deleteGC(const Function &Fn);
+
+  /// Return true if the Context runtime configuration is set to discard all
+  /// value names. When true, only GlobalValue names will be available in the
+  /// IR.
+  bool shouldDiscardValueNames() const;
+
+  /// Set the Context runtime configuration to discard all value name (but
+  /// GlobalValue). Clients can use this flag to save memory and runtime,
+  /// especially in release mode.
+  void setDiscardValueNames(bool Discard);
+
+  /// Whether there is a string map for uniquing debug info
+  /// identifiers across the context.  Off by default.
+  bool isODRUniquingDebugTypes() const;
+  void enableDebugTypeODRUniquing();
+  void disableDebugTypeODRUniquing();
+
+  using InlineAsmDiagHandlerTy = void (*)(const SMDiagnostic&, void *Context,
+                                          unsigned LocCookie);
+
+  /// Defines the type of a yield callback.
+  /// \see LLVMContext::setYieldCallback.
+  using YieldCallbackTy = void (*)(LLVMContext *Context, void *OpaqueHandle);
+
+  /// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
+  /// when problems with inline asm are detected by the backend.  The first
+  /// argument is a function pointer and the second is a context pointer that
+  /// gets passed into the DiagHandler.
+  ///
+  /// LLVMContext doesn't take ownership or interpret either of these
+  /// pointers.
+  void setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
+                                     void *DiagContext = nullptr);
+
+  /// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
+  /// setInlineAsmDiagnosticHandler.
+  InlineAsmDiagHandlerTy getInlineAsmDiagnosticHandler() const;
+
+  /// getInlineAsmDiagnosticContext - Return the diagnostic context set by
+  /// setInlineAsmDiagnosticHandler.
+  void *getInlineAsmDiagnosticContext() const;
+
+  /// setDiagnosticHandlerCallBack - This method sets a handler call back
+  /// that is invoked when the backend needs to report anything to the user.
+  /// The first argument is a function pointer and the second is a context pointer
+  /// that gets passed into the DiagHandler.  The third argument should be set to
+  /// true if the handler only expects enabled diagnostics.
+  ///
+  /// LLVMContext doesn't take ownership or interpret either of these
+  /// pointers.
+  void setDiagnosticHandlerCallBack(
+      DiagnosticHandler::DiagnosticHandlerTy DiagHandler,
+      void *DiagContext = nullptr, bool RespectFilters = false);
+
+  /// setDiagnosticHandler - This method sets unique_ptr to object of DiagnosticHandler
+  /// to provide custom diagnostic handling. The first argument is unique_ptr of object
+  /// of type DiagnosticHandler or a derived of that.   The third argument should be
+  /// set to true if the handler only expects enabled diagnostics.
+  ///
+  /// Ownership of this pointer is moved to LLVMContextImpl.
+  void setDiagnosticHandler(std::unique_ptr<DiagnosticHandler> &&DH,
+                            bool RespectFilters = false);
+
+  /// getDiagnosticHandlerCallBack - Return the diagnostic handler call back set by
+  /// setDiagnosticHandlerCallBack.
+  DiagnosticHandler::DiagnosticHandlerTy getDiagnosticHandlerCallBack() const;
+
+  /// getDiagnosticContext - Return the diagnostic context set by
+  /// setDiagnosticContext.
+  void *getDiagnosticContext() const;
+
+  /// getDiagHandlerPtr - Returns const raw pointer of DiagnosticHandler set by
+  /// setDiagnosticHandler.
+  const DiagnosticHandler *getDiagHandlerPtr() const;
+
+  /// getDiagnosticHandler - transfers owenership of DiagnosticHandler unique_ptr
+  /// to caller.
+  std::unique_ptr<DiagnosticHandler> getDiagnosticHandler();
+
+  /// \brief Return if a code hotness metric should be included in optimization
+  /// diagnostics.
+  bool getDiagnosticsHotnessRequested() const;
+  /// \brief Set if a code hotness metric should be included in optimization
+  /// diagnostics.
+  void setDiagnosticsHotnessRequested(bool Requested);
+
+  /// \brief Return the minimum hotness value a diagnostic would need in order
+  /// to be included in optimization diagnostics. If there is no minimum, this
+  /// returns None.
+  uint64_t getDiagnosticsHotnessThreshold() const;
+
+  /// \brief Set the minimum hotness value a diagnostic needs in order to be
+  /// included in optimization diagnostics.
+  void setDiagnosticsHotnessThreshold(uint64_t Threshold);
+
+  /// \brief Return the YAML file used by the backend to save optimization
+  /// diagnostics.  If null, diagnostics are not saved in a file but only
+  /// emitted via the diagnostic handler.
+  yaml::Output *getDiagnosticsOutputFile();
+  /// Set the diagnostics output file used for optimization diagnostics.
+  ///
+  /// By default or if invoked with null, diagnostics are not saved in a file
+  /// but only emitted via the diagnostic handler.  Even if an output file is
+  /// set, the handler is invoked for each diagnostic message.
+  void setDiagnosticsOutputFile(std::unique_ptr<yaml::Output> F);
+
+  /// \brief Get the prefix that should be printed in front of a diagnostic of
+  ///        the given \p Severity
+  static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity);
+
+  /// \brief Report a message to the currently installed diagnostic handler.
+  ///
+  /// This function returns, in particular in the case of error reporting
+  /// (DI.Severity == \a DS_Error), so the caller should leave the compilation
+  /// process in a self-consistent state, even though the generated code
+  /// need not be correct.
+  ///
+  /// The diagnostic message will be implicitly prefixed with a severity keyword
+  /// according to \p DI.getSeverity(), i.e., "error: " for \a DS_Error,
+  /// "warning: " for \a DS_Warning, and "note: " for \a DS_Note.
+  void diagnose(const DiagnosticInfo &DI);
+
+  /// \brief Registers a yield callback with the given context.
+  ///
+  /// The yield callback function may be called by LLVM to transfer control back
+  /// to the client that invoked the LLVM compilation. This can be used to yield
+  /// control of the thread, or perform periodic work needed by the client.
+  /// There is no guaranteed frequency at which callbacks must occur; in fact,
+  /// the client is not guaranteed to ever receive this callback. It is at the
+  /// sole discretion of LLVM to do so and only if it can guarantee that
+  /// suspending the thread won't block any forward progress in other LLVM
+  /// contexts in the same process.
+  ///
+  /// At a suspend point, the state of the current LLVM context is intentionally
+  /// undefined. No assumptions about it can or should be made. Only LLVM
+  /// context API calls that explicitly state that they can be used during a
+  /// yield callback are allowed to be used. Any other API calls into the
+  /// context are not supported until the yield callback function returns
+  /// control to LLVM. Other LLVM contexts are unaffected by this restriction.
+  void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle);
+
+  /// \brief Calls the yield callback (if applicable).
+  ///
+  /// This transfers control of the current thread back to the client, which may
+  /// suspend the current thread. Only call this method when LLVM doesn't hold
+  /// any global mutex or cannot block the execution in another LLVM context.
+  void yield();
+
+  /// emitError - Emit an error message to the currently installed error handler
+  /// with optional location information.  This function returns, so code should
+  /// be prepared to drop the erroneous construct on the floor and "not crash".
+  /// The generated code need not be correct.  The error message will be
+  /// implicitly prefixed with "error: " and should not end with a ".".
+  void emitError(unsigned LocCookie, const Twine &ErrorStr);
+  void emitError(const Instruction *I, const Twine &ErrorStr);
+  void emitError(const Twine &ErrorStr);
+
+  /// \brief Query for a debug option's value.
+  ///
+  /// This function returns typed data populated from command line parsing.
+  template <typename ValT, typename Base, ValT(Base::*Mem)>
+  ValT getOption() const {
+    return OptionRegistry::instance().template get<ValT, Base, Mem>();
+  }
+
+  /// \brief Access the object which manages optimization bisection for failure
+  /// analysis.
+  OptPassGate &getOptPassGate();
+
+private:
+  // Module needs access to the add/removeModule methods.
+  friend class Module;
+
+  /// addModule - Register a module as being instantiated in this context.  If
+  /// the context is deleted, the module will be deleted as well.
+  void addModule(Module*);
+
+  /// removeModule - Unregister a module from this context.
+  void removeModule(Module*);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef)
+
+/* Specialized opaque context conversions.
+ */
+inline LLVMContext **unwrap(LLVMContextRef* Tys) {
+  return reinterpret_cast<LLVMContext**>(Tys);
+}
+
+inline LLVMContextRef *wrap(const LLVMContext **Tys) {
+  return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_LLVMCONTEXT_H
diff --git a/linux-x64/clang/include/llvm/IR/LegacyPassManager.h b/linux-x64/clang/include/llvm/IR/LegacyPassManager.h
new file mode 100644
index 0000000..9a376a1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/LegacyPassManager.h
@@ -0,0 +1,106 @@
+//===- LegacyPassManager.h - Legacy Container for Passes --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the legacy PassManager class.  This class is used to hold,
+// maintain, and optimize execution of Passes.  The PassManager class ensures
+// that analysis results are available before a pass runs, and that Pass's are
+// destroyed when the PassManager is destroyed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSMANAGER_H
+#define LLVM_IR_LEGACYPASSMANAGER_H
+
+#include "llvm/Pass.h"
+#include "llvm/Support/CBindingWrapping.h"
+
+namespace llvm {
+
+class Pass;
+class Module;
+
+namespace legacy {
+
+class PassManagerImpl;
+class FunctionPassManagerImpl;
+
+/// PassManagerBase - An abstract interface to allow code to add passes to
+/// a pass manager without having to hard-code what kind of pass manager
+/// it is.
+class PassManagerBase {
+public:
+  virtual ~PassManagerBase();
+
+  /// Add a pass to the queue of passes to run.  This passes ownership of
+  /// the Pass to the PassManager.  When the PassManager is destroyed, the pass
+  /// will be destroyed as well, so there is no need to delete the pass.  This
+  /// may even destroy the pass right away if it is found to be redundant. This
+  /// implies that all passes MUST be allocated with 'new'.
+  virtual void add(Pass *P) = 0;
+};
+
+/// PassManager manages ModulePassManagers
+class PassManager : public PassManagerBase {
+public:
+
+  PassManager();
+  ~PassManager() override;
+
+  void add(Pass *P) override;
+
+  /// run - Execute all of the passes scheduled for execution.  Keep track of
+  /// whether any of the passes modifies the module, and if so, return true.
+  bool run(Module &M);
+
+private:
+  /// PassManagerImpl_New is the actual class. PassManager is just the
+  /// wraper to publish simple pass manager interface
+  PassManagerImpl *PM;
+};
+
+/// FunctionPassManager manages FunctionPasses and BasicBlockPassManagers.
+class FunctionPassManager : public PassManagerBase {
+public:
+  /// FunctionPassManager ctor - This initializes the pass manager.  It needs,
+  /// but does not take ownership of, the specified Module.
+  explicit FunctionPassManager(Module *M);
+  ~FunctionPassManager() override;
+
+  void add(Pass *P) override;
+
+  /// run - Execute all of the passes scheduled for execution.  Keep
+  /// track of whether any of the passes modifies the function, and if
+  /// so, return true.
+  ///
+  bool run(Function &F);
+
+  /// doInitialization - Run all of the initializers for the function passes.
+  ///
+  bool doInitialization();
+
+  /// doFinalization - Run all of the finalizers for the function passes.
+  ///
+  bool doFinalization();
+
+private:
+  FunctionPassManagerImpl *FPM;
+  Module *M;
+};
+
+} // End legacy namespace
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_STDCXX_CONVERSION_FUNCTIONS(legacy::PassManagerBase, LLVMPassManagerRef)
+
+/// If -time-passes has been specified, report the timings immediately and then
+/// reset the timers to zero.
+void reportAndResetTimings();
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/LegacyPassManagers.h b/linux-x64/clang/include/llvm/IR/LegacyPassManagers.h
new file mode 100644
index 0000000..3dc4a77
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/LegacyPassManagers.h
@@ -0,0 +1,505 @@
+//===- LegacyPassManagers.h - Legacy Pass Infrastructure --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LLVM Pass Manager infrastructure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSMANAGERS_H
+#define LLVM_IR_LEGACYPASSMANAGERS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Pass.h"
+#include <vector>
+
+//===----------------------------------------------------------------------===//
+// Overview:
+// The Pass Manager Infrastructure manages passes. It's responsibilities are:
+//
+//   o Manage optimization pass execution order
+//   o Make required Analysis information available before pass P is run
+//   o Release memory occupied by dead passes
+//   o If Analysis information is dirtied by a pass then regenerate Analysis
+//     information before it is consumed by another pass.
+//
+// Pass Manager Infrastructure uses multiple pass managers.  They are
+// PassManager, FunctionPassManager, MPPassManager, FPPassManager, BBPassManager.
+// This class hierarchy uses multiple inheritance but pass managers do not
+// derive from another pass manager.
+//
+// PassManager and FunctionPassManager are two top-level pass manager that
+// represents the external interface of this entire pass manager infrastucture.
+//
+// Important classes :
+//
+// [o] class PMTopLevelManager;
+//
+// Two top level managers, PassManager and FunctionPassManager, derive from
+// PMTopLevelManager. PMTopLevelManager manages information used by top level
+// managers such as last user info.
+//
+// [o] class PMDataManager;
+//
+// PMDataManager manages information, e.g. list of available analysis info,
+// used by a pass manager to manage execution order of passes. It also provides
+// a place to implement common pass manager APIs. All pass managers derive from
+// PMDataManager.
+//
+// [o] class BBPassManager : public FunctionPass, public PMDataManager;
+//
+// BBPassManager manages BasicBlockPasses.
+//
+// [o] class FunctionPassManager;
+//
+// This is a external interface used to manage FunctionPasses. This
+// interface relies on FunctionPassManagerImpl to do all the tasks.
+//
+// [o] class FunctionPassManagerImpl : public ModulePass, PMDataManager,
+//                                     public PMTopLevelManager;
+//
+// FunctionPassManagerImpl is a top level manager. It manages FPPassManagers
+//
+// [o] class FPPassManager : public ModulePass, public PMDataManager;
+//
+// FPPassManager manages FunctionPasses and BBPassManagers
+//
+// [o] class MPPassManager : public Pass, public PMDataManager;
+//
+// MPPassManager manages ModulePasses and FPPassManagers
+//
+// [o] class PassManager;
+//
+// This is a external interface used by various tools to manages passes. It
+// relies on PassManagerImpl to do all the tasks.
+//
+// [o] class PassManagerImpl : public Pass, public PMDataManager,
+//                             public PMTopLevelManager
+//
+// PassManagerImpl is a top level pass manager responsible for managing
+// MPPassManagers.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/PrettyStackTrace.h"
+
+namespace llvm {
+template <typename T> class ArrayRef;
+class Module;
+class Pass;
+class StringRef;
+class Value;
+class Timer;
+class PMDataManager;
+
+// enums for debugging strings
+enum PassDebuggingString {
+  EXECUTION_MSG, // "Executing Pass '" + PassName
+  MODIFICATION_MSG, // "Made Modification '" + PassName
+  FREEING_MSG, // " Freeing Pass '" + PassName
+  ON_BASICBLOCK_MSG, // "' on BasicBlock '" + InstructionName + "'...\n"
+  ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n"
+  ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n"
+  ON_REGION_MSG, // "' on Region '" + Msg + "'...\n'"
+  ON_LOOP_MSG, // "' on Loop '" + Msg + "'...\n'"
+  ON_CG_MSG // "' on Call Graph Nodes '" + Msg + "'...\n'"
+};
+
+/// PassManagerPrettyStackEntry - This is used to print informative information
+/// about what pass is running when/if a stack trace is generated.
+class PassManagerPrettyStackEntry : public PrettyStackTraceEntry {
+  Pass *P;
+  Value *V;
+  Module *M;
+
+public:
+  explicit PassManagerPrettyStackEntry(Pass *p)
+    : P(p), V(nullptr), M(nullptr) {}  // When P is releaseMemory'd.
+  PassManagerPrettyStackEntry(Pass *p, Value &v)
+    : P(p), V(&v), M(nullptr) {} // When P is run on V
+  PassManagerPrettyStackEntry(Pass *p, Module &m)
+    : P(p), V(nullptr), M(&m) {} // When P is run on M
+
+  /// print - Emit information about this stack frame to OS.
+  void print(raw_ostream &OS) const override;
+};
+
+//===----------------------------------------------------------------------===//
+// PMStack
+//
+/// PMStack - This class implements a stack data structure of PMDataManager
+/// pointers.
+///
+/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
+/// using PMStack. Each Pass implements assignPassManager() to connect itself
+/// with appropriate manager. assignPassManager() walks PMStack to find
+/// suitable manager.
+class PMStack {
+public:
+  typedef std::vector<PMDataManager *>::const_reverse_iterator iterator;
+  iterator begin() const { return S.rbegin(); }
+  iterator end() const { return S.rend(); }
+
+  void pop();
+  PMDataManager *top() const { return S.back(); }
+  void push(PMDataManager *PM);
+  bool empty() const { return S.empty(); }
+
+  void dump() const;
+
+private:
+  std::vector<PMDataManager *> S;
+};
+
+//===----------------------------------------------------------------------===//
+// PMTopLevelManager
+//
+/// PMTopLevelManager manages LastUser info and collects common APIs used by
+/// top level pass managers.
+class PMTopLevelManager {
+protected:
+  explicit PMTopLevelManager(PMDataManager *PMDM);
+
+  unsigned getNumContainedManagers() const {
+    return (unsigned)PassManagers.size();
+  }
+
+  void initializeAllAnalysisInfo();
+
+private:
+  virtual PMDataManager *getAsPMDataManager() = 0;
+  virtual PassManagerType getTopLevelPassManagerType() = 0;
+
+public:
+  /// Schedule pass P for execution. Make sure that passes required by
+  /// P are run before P is run. Update analysis info maintained by
+  /// the manager. Remove dead passes. This is a recursive function.
+  void schedulePass(Pass *P);
+
+  /// Set pass P as the last user of the given analysis passes.
+  void setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P);
+
+  /// Collect passes whose last user is P
+  void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P);
+
+  /// Find the pass that implements Analysis AID. Search immutable
+  /// passes and all pass managers. If desired pass is not found
+  /// then return NULL.
+  Pass *findAnalysisPass(AnalysisID AID);
+
+  /// Retrieve the PassInfo for an analysis.
+  const PassInfo *findAnalysisPassInfo(AnalysisID AID) const;
+
+  /// Find analysis usage information for the pass P.
+  AnalysisUsage *findAnalysisUsage(Pass *P);
+
+  virtual ~PMTopLevelManager();
+
+  /// Add immutable pass and initialize it.
+  void addImmutablePass(ImmutablePass *P);
+
+  inline SmallVectorImpl<ImmutablePass *>& getImmutablePasses() {
+    return ImmutablePasses;
+  }
+
+  void addPassManager(PMDataManager *Manager) {
+    PassManagers.push_back(Manager);
+  }
+
+  // Add Manager into the list of managers that are not directly
+  // maintained by this top level pass manager
+  inline void addIndirectPassManager(PMDataManager *Manager) {
+    IndirectPassManagers.push_back(Manager);
+  }
+
+  // Print passes managed by this top level manager.
+  void dumpPasses() const;
+  void dumpArguments() const;
+
+  // Active Pass Managers
+  PMStack activeStack;
+
+protected:
+  /// Collection of pass managers
+  SmallVector<PMDataManager *, 8> PassManagers;
+
+private:
+  /// Collection of pass managers that are not directly maintained
+  /// by this pass manager
+  SmallVector<PMDataManager *, 8> IndirectPassManagers;
+
+  // Map to keep track of last user of the analysis pass.
+  // LastUser->second is the last user of Lastuser->first.
+  DenseMap<Pass *, Pass *> LastUser;
+
+  // Map to keep track of passes that are last used by a pass.
+  // This inverse map is initialized at PM->run() based on
+  // LastUser map.
+  DenseMap<Pass *, SmallPtrSet<Pass *, 8> > InversedLastUser;
+
+  /// Immutable passes are managed by top level manager.
+  SmallVector<ImmutablePass *, 16> ImmutablePasses;
+
+  /// Map from ID to immutable passes.
+  SmallDenseMap<AnalysisID, ImmutablePass *, 8> ImmutablePassMap;
+
+
+  /// A wrapper around AnalysisUsage for the purpose of uniqueing.  The wrapper
+  /// is used to avoid needing to make AnalysisUsage itself a folding set node.
+  struct AUFoldingSetNode : public FoldingSetNode {
+    AnalysisUsage AU;
+    AUFoldingSetNode(const AnalysisUsage &AU) : AU(AU) {}
+    void Profile(FoldingSetNodeID &ID) const {
+      Profile(ID, AU);
+    }
+    static void Profile(FoldingSetNodeID &ID, const AnalysisUsage &AU) {
+      // TODO: We could consider sorting the dependency arrays within the
+      // AnalysisUsage (since they are conceptually unordered).
+      ID.AddBoolean(AU.getPreservesAll());
+      auto ProfileVec = [&](const SmallVectorImpl<AnalysisID>& Vec) {
+        ID.AddInteger(Vec.size());
+        for(AnalysisID AID : Vec)
+          ID.AddPointer(AID);
+      };
+      ProfileVec(AU.getRequiredSet());
+      ProfileVec(AU.getRequiredTransitiveSet());
+      ProfileVec(AU.getPreservedSet());
+      ProfileVec(AU.getUsedSet());
+    }
+  };
+
+  // Contains all of the unique combinations of AnalysisUsage.  This is helpful
+  // when we have multiple instances of the same pass since they'll usually
+  // have the same analysis usage and can share storage.
+  FoldingSet<AUFoldingSetNode> UniqueAnalysisUsages;
+
+  // Allocator used for allocating UAFoldingSetNodes.  This handles deletion of
+  // all allocated nodes in one fell swoop.
+  SpecificBumpPtrAllocator<AUFoldingSetNode> AUFoldingSetNodeAllocator;
+
+  // Maps from a pass to it's associated entry in UniqueAnalysisUsages.  Does
+  // not own the storage associated with either key or value.. 
+  DenseMap<Pass *, AnalysisUsage*> AnUsageMap;
+
+  /// Collection of PassInfo objects found via analysis IDs and in this top
+  /// level manager. This is used to memoize queries to the pass registry.
+  /// FIXME: This is an egregious hack because querying the pass registry is
+  /// either slow or racy.
+  mutable DenseMap<AnalysisID, const PassInfo *> AnalysisPassInfos;
+};
+
+//===----------------------------------------------------------------------===//
+// PMDataManager
+
+/// PMDataManager provides the common place to manage the analysis data
+/// used by pass managers.
+class PMDataManager {
+public:
+  explicit PMDataManager() : TPM(nullptr), Depth(0) {
+    initializeAnalysisInfo();
+  }
+
+  virtual ~PMDataManager();
+
+  virtual Pass *getAsPass() = 0;
+
+  /// Augment AvailableAnalysis by adding analysis made available by pass P.
+  void recordAvailableAnalysis(Pass *P);
+
+  /// verifyPreservedAnalysis -- Verify analysis presreved by pass P.
+  void verifyPreservedAnalysis(Pass *P);
+
+  /// Remove Analysis that is not preserved by the pass
+  void removeNotPreservedAnalysis(Pass *P);
+
+  /// Remove dead passes used by P.
+  void removeDeadPasses(Pass *P, StringRef Msg,
+                        enum PassDebuggingString);
+
+  /// Remove P.
+  void freePass(Pass *P, StringRef Msg,
+                enum PassDebuggingString);
+
+  /// Add pass P into the PassVector. Update
+  /// AvailableAnalysis appropriately if ProcessAnalysis is true.
+  void add(Pass *P, bool ProcessAnalysis = true);
+
+  /// Add RequiredPass into list of lower level passes required by pass P.
+  /// RequiredPass is run on the fly by Pass Manager when P requests it
+  /// through getAnalysis interface.
+  virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
+
+  virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F);
+
+  /// Initialize available analysis information.
+  void initializeAnalysisInfo() {
+    AvailableAnalysis.clear();
+    for (unsigned i = 0; i < PMT_Last; ++i)
+      InheritedAnalysis[i] = nullptr;
+  }
+
+  // Return true if P preserves high level analysis used by other
+  // passes that are managed by this manager.
+  bool preserveHigherLevelAnalysis(Pass *P);
+
+  /// Populate UsedPasses with analysis pass that are used or required by pass
+  /// P and are available. Populate ReqPassNotAvailable with analysis pass that
+  /// are required by pass P but are not available.
+  void collectRequiredAndUsedAnalyses(
+      SmallVectorImpl<Pass *> &UsedPasses,
+      SmallVectorImpl<AnalysisID> &ReqPassNotAvailable, Pass *P);
+
+  /// All Required analyses should be available to the pass as it runs!  Here
+  /// we fill in the AnalysisImpls member of the pass so that it can
+  /// successfully use the getAnalysis() method to retrieve the
+  /// implementations it needs.
+  void initializeAnalysisImpl(Pass *P);
+
+  /// Find the pass that implements Analysis AID. If desired pass is not found
+  /// then return NULL.
+  Pass *findAnalysisPass(AnalysisID AID, bool Direction);
+
+  // Access toplevel manager
+  PMTopLevelManager *getTopLevelManager() { return TPM; }
+  void setTopLevelManager(PMTopLevelManager *T) { TPM = T; }
+
+  unsigned getDepth() const { return Depth; }
+  void setDepth(unsigned newDepth) { Depth = newDepth; }
+
+  // Print routines used by debug-pass
+  void dumpLastUses(Pass *P, unsigned Offset) const;
+  void dumpPassArguments() const;
+  void dumpPassInfo(Pass *P, enum PassDebuggingString S1,
+                    enum PassDebuggingString S2, StringRef Msg);
+  void dumpRequiredSet(const Pass *P) const;
+  void dumpPreservedSet(const Pass *P) const;
+  void dumpUsedSet(const Pass *P) const;
+
+  unsigned getNumContainedPasses() const {
+    return (unsigned)PassVector.size();
+  }
+
+  virtual PassManagerType getPassManagerType() const {
+    assert ( 0 && "Invalid use of getPassManagerType");
+    return PMT_Unknown;
+  }
+
+  DenseMap<AnalysisID, Pass*> *getAvailableAnalysis() {
+    return &AvailableAnalysis;
+  }
+
+  // Collect AvailableAnalysis from all the active Pass Managers.
+  void populateInheritedAnalysis(PMStack &PMS) {
+    unsigned Index = 0;
+    for (PMStack::iterator I = PMS.begin(), E = PMS.end();
+         I != E; ++I)
+      InheritedAnalysis[Index++] = (*I)->getAvailableAnalysis();
+  }
+
+protected:
+  // Top level manager.
+  PMTopLevelManager *TPM;
+
+  // Collection of pass that are managed by this manager
+  SmallVector<Pass *, 16> PassVector;
+
+  // Collection of Analysis provided by Parent pass manager and
+  // used by current pass manager. At at time there can not be more
+  // then PMT_Last active pass mangers.
+  DenseMap<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last];
+
+  /// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
+  /// or higher is specified.
+  bool isPassDebuggingExecutionsOrMore() const;
+
+private:
+  void dumpAnalysisUsage(StringRef Msg, const Pass *P,
+                         const AnalysisUsage::VectorType &Set) const;
+
+  // Set of available Analysis. This information is used while scheduling
+  // pass. If a pass requires an analysis which is not available then
+  // the required analysis pass is scheduled to run before the pass itself is
+  // scheduled to run.
+  DenseMap<AnalysisID, Pass*> AvailableAnalysis;
+
+  // Collection of higher level analysis used by the pass managed by
+  // this manager.
+  SmallVector<Pass *, 16> HigherLevelAnalysis;
+
+  unsigned Depth;
+};
+
+//===----------------------------------------------------------------------===//
+// FPPassManager
+//
+/// FPPassManager manages BBPassManagers and FunctionPasses.
+/// It batches all function passes and basic block pass managers together and
+/// sequence them to process one function at a time before processing next
+/// function.
+class FPPassManager : public ModulePass, public PMDataManager {
+public:
+  static char ID;
+  explicit FPPassManager()
+  : ModulePass(ID), PMDataManager() { }
+
+  /// run - Execute all of the passes scheduled for execution.  Keep track of
+  /// whether any of the passes modifies the module, and if so, return true.
+  bool runOnFunction(Function &F);
+  bool runOnModule(Module &M) override;
+
+  /// cleanup - After running all passes, clean up pass manager cache.
+  void cleanup();
+
+  /// doInitialization - Overrides ModulePass doInitialization for global
+  /// initialization tasks
+  ///
+  using ModulePass::doInitialization;
+
+  /// doInitialization - Run all of the initializers for the function passes.
+  ///
+  bool doInitialization(Module &M) override;
+
+  /// doFinalization - Overrides ModulePass doFinalization for global
+  /// finalization tasks
+  ///
+  using ModulePass::doFinalization;
+
+  /// doFinalization - Run all of the finalizers for the function passes.
+  ///
+  bool doFinalization(Module &M) override;
+
+  PMDataManager *getAsPMDataManager() override { return this; }
+  Pass *getAsPass() override { return this; }
+
+  /// Pass Manager itself does not invalidate any analysis info.
+  void getAnalysisUsage(AnalysisUsage &Info) const override {
+    Info.setPreservesAll();
+  }
+
+  // Print passes managed by this manager
+  void dumpPassStructure(unsigned Offset) override;
+
+  StringRef getPassName() const override { return "Function Pass Manager"; }
+
+  FunctionPass *getContainedPass(unsigned N) {
+    assert ( N < PassVector.size() && "Pass number out of range!");
+    FunctionPass *FP = static_cast<FunctionPass *>(PassVector[N]);
+    return FP;
+  }
+
+  PassManagerType getPassManagerType() const override {
+    return PMT_FunctionPassManager;
+  }
+};
+
+Timer *getPassTimer(Pass *);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/LegacyPassNameParser.h b/linux-x64/clang/include/llvm/IR/LegacyPassNameParser.h
new file mode 100644
index 0000000..4cec081
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/LegacyPassNameParser.h
@@ -0,0 +1,139 @@
+//===- LegacyPassNameParser.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PassNameParser and FilteredPassNameParser<> classes,
+// which are used to add command line arguments to a utility for all of the
+// passes that have been registered into the system.
+//
+// The PassNameParser class adds ALL passes linked into the system (that are
+// creatable) as command line arguments to the tool (when instantiated with the
+// appropriate command line option template).  The FilteredPassNameParser<>
+// template is used for the same purposes as PassNameParser, except that it only
+// includes passes that have a PassType that are compatible with the filter
+// (which is the template argument).
+//
+// Note that this is part of the legacy pass manager infrastructure and will be
+// (eventually) going away.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSNAMEPARSER_H
+#define LLVM_IR_LEGACYPASSNAMEPARSER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// PassNameParser class - Make use of the pass registration mechanism to
+// automatically add a command line argument to opt for each pass.
+//
+class PassNameParser : public PassRegistrationListener,
+                       public cl::parser<const PassInfo*> {
+public:
+  PassNameParser(cl::Option &O);
+  ~PassNameParser() override;
+
+  void initialize() {
+    cl::parser<const PassInfo*>::initialize();
+
+    // Add all of the passes to the map that got initialized before 'this' did.
+    enumeratePasses();
+  }
+
+  // ignorablePassImpl - Can be overriden in subclasses to refine the list of
+  // which passes we want to include.
+  //
+  virtual bool ignorablePassImpl(const PassInfo *P) const { return false; }
+
+  inline bool ignorablePass(const PassInfo *P) const {
+    // Ignore non-selectable and non-constructible passes!  Ignore
+    // non-optimizations.
+    return P->getPassArgument().empty() || P->getNormalCtor() == nullptr ||
+           ignorablePassImpl(P);
+  }
+
+  // Implement the PassRegistrationListener callbacks used to populate our map
+  //
+  void passRegistered(const PassInfo *P) override {
+    if (ignorablePass(P)) return;
+    if (findOption(P->getPassArgument().data()) != getNumOptions()) {
+      errs() << "Two passes with the same argument (-"
+           << P->getPassArgument() << ") attempted to be registered!\n";
+      llvm_unreachable(nullptr);
+    }
+    addLiteralOption(P->getPassArgument().data(), P, P->getPassName().data());
+  }
+  void passEnumerate(const PassInfo *P) override { passRegistered(P); }
+
+  // printOptionInfo - Print out information about this option.  Override the
+  // default implementation to sort the table before we print...
+  void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const override {
+    PassNameParser *PNP = const_cast<PassNameParser*>(this);
+    array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValCompare);
+    cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth);
+  }
+
+private:
+  // ValCompare - Provide a sorting comparator for Values elements...
+  static int ValCompare(const PassNameParser::OptionInfo *VT1,
+                        const PassNameParser::OptionInfo *VT2) {
+    return VT1->Name.compare(VT2->Name);
+  }
+};
+
+///===----------------------------------------------------------------------===//
+/// FilteredPassNameParser class - Make use of the pass registration
+/// mechanism to automatically add a command line argument to opt for
+/// each pass that satisfies a filter criteria.  Filter should return
+/// true for passes to be registered as command-line options.
+///
+template<typename Filter>
+class FilteredPassNameParser : public PassNameParser {
+private:
+  Filter filter;
+
+public:
+  bool ignorablePassImpl(const PassInfo *P) const override {
+    return !filter(*P);
+  }
+};
+
+///===----------------------------------------------------------------------===//
+/// PassArgFilter - A filter for use with PassNameFilterParser that only
+/// accepts a Pass whose Arg matches certain strings.
+///
+/// Use like this:
+///
+/// extern const char AllowedPassArgs[] = "-anders_aa -dse";
+///
+/// static cl::list<
+///   const PassInfo*,
+///   bool,
+///   FilteredPassNameParser<PassArgFilter<AllowedPassArgs> > >
+/// PassList(cl::desc("Passes available:"));
+///
+/// Only the -anders_aa and -dse options will be available to the user.
+///
+template<const char *Args>
+class PassArgFilter {
+public:
+  bool operator()(const PassInfo &P) const {
+    return StringRef(Args).contains(P.getPassArgument());
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/MDBuilder.h b/linux-x64/clang/include/llvm/IR/MDBuilder.h
new file mode 100644
index 0000000..d5218ea
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/MDBuilder.h
@@ -0,0 +1,203 @@
+//===---- llvm/MDBuilder.h - Builder for LLVM metadata ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MDBuilder class, which is used as a convenient way to
+// create LLVM metadata with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MDBUILDER_H
+#define LLVM_IR_MDBUILDER_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/DataTypes.h"
+#include <utility>
+
+namespace llvm {
+
+class APInt;
+template <typename T> class ArrayRef;
+class LLVMContext;
+class Constant;
+class ConstantAsMetadata;
+class MDNode;
+class MDString;
+class Metadata;
+
+class MDBuilder {
+  LLVMContext &Context;
+
+public:
+  MDBuilder(LLVMContext &context) : Context(context) {}
+
+  /// \brief Return the given string as metadata.
+  MDString *createString(StringRef Str);
+
+  /// \brief Return the given constant as metadata.
+  ConstantAsMetadata *createConstant(Constant *C);
+
+  //===------------------------------------------------------------------===//
+  // FPMath metadata.
+  //===------------------------------------------------------------------===//
+
+  /// \brief Return metadata with the given settings.  The special value 0.0
+  /// for the Accuracy parameter indicates the default (maximal precision)
+  /// setting.
+  MDNode *createFPMath(float Accuracy);
+
+  //===------------------------------------------------------------------===//
+  // Prof metadata.
+  //===------------------------------------------------------------------===//
+
+  /// \brief Return metadata containing two branch weights.
+  MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight);
+
+  /// \brief Return metadata containing a number of branch weights.
+  MDNode *createBranchWeights(ArrayRef<uint32_t> Weights);
+
+  /// Return metadata specifying that a branch or switch is unpredictable.
+  MDNode *createUnpredictable();
+
+  /// Return metadata containing the entry \p Count for a function, a boolean
+  /// \Synthetic indicating whether the counts were synthetized, and the
+  /// GUIDs stored in \p Imports that need to be imported for sample PGO, to
+  /// enable the same inlines as the profiled optimized binary
+  MDNode *createFunctionEntryCount(uint64_t Count, bool Synthetic,
+                                   const DenseSet<GlobalValue::GUID> *Imports);
+
+  /// Return metadata containing the section prefix for a function.
+  MDNode *createFunctionSectionPrefix(StringRef Prefix);
+
+  //===------------------------------------------------------------------===//
+  // Range metadata.
+  //===------------------------------------------------------------------===//
+
+  /// \brief Return metadata describing the range [Lo, Hi).
+  MDNode *createRange(const APInt &Lo, const APInt &Hi);
+
+  /// \brief Return metadata describing the range [Lo, Hi).
+  MDNode *createRange(Constant *Lo, Constant *Hi);
+
+  //===------------------------------------------------------------------===//
+  // Callees metadata.
+  //===------------------------------------------------------------------===//
+
+  /// \brief Return metadata indicating the possible callees of indirect
+  /// calls.
+  MDNode *createCallees(ArrayRef<Function *> Callees);
+
+  //===------------------------------------------------------------------===//
+  // AA metadata.
+  //===------------------------------------------------------------------===//
+
+protected:
+  /// \brief Return metadata appropriate for a AA root node (scope or TBAA).
+  /// Each returned node is distinct from all other metadata and will never
+  /// be identified (uniqued) with anything else.
+  MDNode *createAnonymousAARoot(StringRef Name = StringRef(),
+                                MDNode *Extra = nullptr);
+
+public:
+  /// \brief Return metadata appropriate for a TBAA root node. Each returned
+  /// node is distinct from all other metadata and will never be identified
+  /// (uniqued) with anything else.
+  MDNode *createAnonymousTBAARoot() {
+    return createAnonymousAARoot();
+  }
+
+  /// \brief Return metadata appropriate for an alias scope domain node.
+  /// Each returned node is distinct from all other metadata and will never
+  /// be identified (uniqued) with anything else.
+  MDNode *createAnonymousAliasScopeDomain(StringRef Name = StringRef()) {
+    return createAnonymousAARoot(Name);
+  }
+
+  /// \brief Return metadata appropriate for an alias scope root node.
+  /// Each returned node is distinct from all other metadata and will never
+  /// be identified (uniqued) with anything else.
+  MDNode *createAnonymousAliasScope(MDNode *Domain,
+                                    StringRef Name = StringRef()) {
+    return createAnonymousAARoot(Name, Domain);
+  }
+
+  /// \brief Return metadata appropriate for a TBAA root node with the given
+  /// name.  This may be identified (uniqued) with other roots with the same
+  /// name.
+  MDNode *createTBAARoot(StringRef Name);
+
+  /// \brief Return metadata appropriate for an alias scope domain node with
+  /// the given name. This may be identified (uniqued) with other roots with
+  /// the same name.
+  MDNode *createAliasScopeDomain(StringRef Name);
+
+  /// \brief Return metadata appropriate for an alias scope node with
+  /// the given name. This may be identified (uniqued) with other scopes with
+  /// the same name and domain.
+  MDNode *createAliasScope(StringRef Name, MDNode *Domain);
+
+  /// \brief Return metadata for a non-root TBAA node with the given name,
+  /// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
+  MDNode *createTBAANode(StringRef Name, MDNode *Parent,
+                         bool isConstant = false);
+
+  struct TBAAStructField {
+    uint64_t Offset;
+    uint64_t Size;
+    MDNode *Type;
+    TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *Type) :
+      Offset(Offset), Size(Size), Type(Type) {}
+  };
+
+  /// \brief Return metadata for a tbaa.struct node with the given
+  /// struct field descriptions.
+  MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields);
+
+  /// \brief Return metadata for a TBAA struct node in the type DAG
+  /// with the given name, a list of pairs (offset, field type in the type DAG).
+  MDNode *
+  createTBAAStructTypeNode(StringRef Name,
+                           ArrayRef<std::pair<MDNode *, uint64_t>> Fields);
+
+  /// \brief Return metadata for a TBAA scalar type node with the
+  /// given name, an offset and a parent in the TBAA type DAG.
+  MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
+                                   uint64_t Offset = 0);
+
+  /// \brief Return metadata for a TBAA tag node with the given
+  /// base type, access type and offset relative to the base type.
+  MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
+                                  uint64_t Offset, bool IsConstant = false);
+
+  /// \brief Return metadata for a TBAA type node in the TBAA type DAG with the
+  /// given parent type, size in bytes, type identifier and a list of fields.
+  MDNode *createTBAATypeNode(MDNode *Parent, uint64_t Size, Metadata *Id,
+                             ArrayRef<TBAAStructField> Fields =
+                                 ArrayRef<TBAAStructField>());
+
+  /// \brief Return metadata for a TBAA access tag with the given base type,
+  /// final access type, offset of the access relative to the base type, size of
+  /// the access and flag indicating whether the accessed object can be
+  /// considered immutable for the purposes of the TBAA analysis.
+  MDNode *createTBAAAccessTag(MDNode *BaseType, MDNode *AccessType,
+                              uint64_t Offset, uint64_t Size,
+                              bool IsImmutable = false);
+
+  /// \brief Return mutable version of the given mutable or immutable TBAA
+  /// access tag.
+  MDNode *createMutableTBAAAccessTag(MDNode *Tag);
+
+  /// \brief Return metadata containing an irreducible loop header weight.
+  MDNode *createIrrLoopHeaderWeight(uint64_t Weight);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Mangler.h b/linux-x64/clang/include/llvm/IR/Mangler.h
new file mode 100644
index 0000000..0261c00
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Mangler.h
@@ -0,0 +1,58 @@
+//===-- llvm/IR/Mangler.h - Self-contained name mangler ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Unified name mangler for various backends.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MANGLER_H
+#define LLVM_IR_MANGLER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/GlobalValue.h"
+
+namespace llvm {
+
+class DataLayout;
+template <typename T> class SmallVectorImpl;
+class Triple;
+class Twine;
+class raw_ostream;
+
+class Mangler {
+  /// We need to give global values the same name every time they are mangled.
+  /// This keeps track of the number we give to anonymous ones.
+  mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;
+
+public:
+  /// Print the appropriate prefix and the specified global variable's name.
+  /// If the global variable doesn't have a name, this fills in a unique name
+  /// for the global.
+  void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
+                         bool CannotUsePrivateLabel) const;
+  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+                         bool CannotUsePrivateLabel) const;
+
+  /// Print the appropriate prefix and the specified name as the global variable
+  /// name. GVName must not be empty.
+  static void getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
+                                const DataLayout &DL);
+  static void getNameWithPrefix(SmallVectorImpl<char> &OutName,
+                                const Twine &GVName, const DataLayout &DL);
+};
+
+void emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
+                                  const Triple &TT, Mangler &Mangler);
+
+void emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
+                                const Triple &T, Mangler &M);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Metadata.def b/linux-x64/clang/include/llvm/IR/Metadata.def
new file mode 100644
index 0000000..03cdcab
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Metadata.def
@@ -0,0 +1,125 @@
+//===- llvm/IR/Metadata.def - Metadata definitions --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through all types of metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#if !(defined HANDLE_METADATA || defined HANDLE_METADATA_LEAF ||               \
+      defined HANDLE_METADATA_BRANCH || defined HANDLE_MDNODE_LEAF ||          \
+      defined HANDLE_MDNODE_LEAF_UNIQUABLE || defined HANDLE_MDNODE_BRANCH ||  \
+      defined HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE ||                      \
+      defined HANDLE_SPECIALIZED_MDNODE_LEAF ||                                \
+      defined HANDLE_SPECIALIZED_MDNODE_BRANCH)
+#error "Missing macro definition of HANDLE_METADATA*"
+#endif
+
+// Handler for all types of metadata.
+#ifndef HANDLE_METADATA
+#define HANDLE_METADATA(CLASS)
+#endif
+
+// Handler for leaf nodes in the class hierarchy.
+#ifndef HANDLE_METADATA_LEAF
+#define HANDLE_METADATA_LEAF(CLASS) HANDLE_METADATA(CLASS)
+#endif
+
+// Handler for non-leaf nodes in the class hierarchy.
+#ifndef HANDLE_METADATA_BRANCH
+#define HANDLE_METADATA_BRANCH(CLASS) HANDLE_METADATA(CLASS)
+#endif
+
+// Handler for specialized and uniquable leaf nodes under MDNode.  Defers to
+// HANDLE_MDNODE_LEAF_UNIQUABLE if it's defined, otherwise to
+// HANDLE_SPECIALIZED_MDNODE_LEAF.
+#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
+#ifdef HANDLE_MDNODE_LEAF_UNIQUABLE
+#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS)                        \
+  HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS)
+#else
+#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS)                        \
+  HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)
+#endif
+#endif
+
+// Handler for leaf nodes under MDNode.
+#ifndef HANDLE_MDNODE_LEAF_UNIQUABLE
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) HANDLE_MDNODE_LEAF(CLASS)
+#endif
+
+// Handler for leaf nodes under MDNode.
+#ifndef HANDLE_MDNODE_LEAF
+#define HANDLE_MDNODE_LEAF(CLASS) HANDLE_METADATA_LEAF(CLASS)
+#endif
+
+// Handler for non-leaf nodes under MDNode.
+#ifndef HANDLE_MDNODE_BRANCH
+#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_METADATA_BRANCH(CLASS)
+#endif
+
+// Handler for specialized leaf nodes under MDNode.
+#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) HANDLE_MDNODE_LEAF(CLASS)
+#endif
+
+// Handler for specialized non-leaf nodes under MDNode.
+#ifndef HANDLE_SPECIALIZED_MDNODE_BRANCH
+#define HANDLE_SPECIALIZED_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_BRANCH(CLASS)
+#endif
+
+HANDLE_METADATA_LEAF(MDString)
+HANDLE_METADATA_BRANCH(ValueAsMetadata)
+HANDLE_METADATA_LEAF(ConstantAsMetadata)
+HANDLE_METADATA_LEAF(LocalAsMetadata)
+HANDLE_METADATA_LEAF(DistinctMDOperandPlaceholder)
+HANDLE_MDNODE_BRANCH(MDNode)
+HANDLE_MDNODE_LEAF_UNIQUABLE(MDTuple)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocation)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIExpression)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariableExpression)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DINode)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(GenericDINode)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubrange)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIEnumerator)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIScope)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIBasicType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIDerivedType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DICompositeType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubroutineType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIFile)
+HANDLE_SPECIALIZED_MDNODE_LEAF(DICompileUnit)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DILocalScope)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubprogram)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DILexicalBlockBase)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlock)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlockFile)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DINamespace)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIModule)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DITemplateParameter)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateTypeParameter)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateValueParameter)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIVariable)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariable)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocalVariable)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIObjCProperty)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIImportedEntity)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIMacroNode)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacro)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacroFile)
+
+#undef HANDLE_METADATA
+#undef HANDLE_METADATA_LEAF
+#undef HANDLE_METADATA_BRANCH
+#undef HANDLE_MDNODE_LEAF
+#undef HANDLE_MDNODE_LEAF_UNIQUABLE
+#undef HANDLE_MDNODE_BRANCH
+#undef HANDLE_SPECIALIZED_MDNODE_LEAF
+#undef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
+#undef HANDLE_SPECIALIZED_MDNODE_BRANCH
diff --git a/linux-x64/clang/include/llvm/IR/Metadata.h b/linux-x64/clang/include/llvm/IR/Metadata.h
new file mode 100644
index 0000000..bc0b87a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Metadata.h
@@ -0,0 +1,1425 @@
+//===- llvm/IR/Metadata.h - Metadata definitions ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the declarations for metadata subclasses.
+/// They represent the different flavors of metadata that live in LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_METADATA_H
+#define LLVM_IR_METADATA_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+class Module;
+class ModuleSlotTracker;
+class raw_ostream;
+class Type;
+
+enum LLVMConstants : uint32_t {
+  DEBUG_METADATA_VERSION = 3 // Current debug info version number.
+};
+
+/// \brief Root of the metadata hierarchy.
+///
+/// This is a root class for typeless data in the IR.
+class Metadata {
+  friend class ReplaceableMetadataImpl;
+
+  /// \brief RTTI.
+  const unsigned char SubclassID;
+
+protected:
+  /// \brief Active type of storage.
+  enum StorageType { Uniqued, Distinct, Temporary };
+
+  /// \brief Storage flag for non-uniqued, otherwise unowned, metadata.
+  unsigned char Storage;
+  // TODO: expose remaining bits to subclasses.
+
+  unsigned short SubclassData16 = 0;
+  unsigned SubclassData32 = 0;
+
+public:
+  enum MetadataKind {
+#define HANDLE_METADATA_LEAF(CLASS) CLASS##Kind,
+#include "llvm/IR/Metadata.def"
+  };
+
+protected:
+  Metadata(unsigned ID, StorageType Storage)
+      : SubclassID(ID), Storage(Storage) {
+    static_assert(sizeof(*this) == 8, "Metadata fields poorly packed");
+  }
+
+  ~Metadata() = default;
+
+  /// \brief Default handling of a changed operand, which asserts.
+  ///
+  /// If subclasses pass themselves in as owners to a tracking node reference,
+  /// they must provide an implementation of this method.
+  void handleChangedOperand(void *, Metadata *) {
+    llvm_unreachable("Unimplemented in Metadata subclass");
+  }
+
+public:
+  unsigned getMetadataID() const { return SubclassID; }
+
+  /// \brief User-friendly dump.
+  ///
+  /// If \c M is provided, metadata nodes will be numbered canonically;
+  /// otherwise, pointer addresses are substituted.
+  ///
+  /// Note: this uses an explicit overload instead of default arguments so that
+  /// the nullptr version is easy to call from a debugger.
+  ///
+  /// @{
+  void dump() const;
+  void dump(const Module *M) const;
+  /// @}
+
+  /// \brief Print.
+  ///
+  /// Prints definition of \c this.
+  ///
+  /// If \c M is provided, metadata nodes will be numbered canonically;
+  /// otherwise, pointer addresses are substituted.
+  /// @{
+  void print(raw_ostream &OS, const Module *M = nullptr,
+             bool IsForDebug = false) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr,
+             bool IsForDebug = false) const;
+  /// @}
+
+  /// \brief Print as operand.
+  ///
+  /// Prints reference of \c this.
+  ///
+  /// If \c M is provided, metadata nodes will be numbered canonically;
+  /// otherwise, pointer addresses are substituted.
+  /// @{
+  void printAsOperand(raw_ostream &OS, const Module *M = nullptr) const;
+  void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST,
+                      const Module *M = nullptr) const;
+  /// @}
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef)
+
+// Specialized opaque metadata conversions.
+inline Metadata **unwrap(LLVMMetadataRef *MDs) {
+  return reinterpret_cast<Metadata**>(MDs);
+}
+
+#define HANDLE_METADATA(CLASS) class CLASS;
+#include "llvm/IR/Metadata.def"
+
+// Provide specializations of isa so that we don't need definitions of
+// subclasses to see if the metadata is a subclass.
+#define HANDLE_METADATA_LEAF(CLASS)                                            \
+  template <> struct isa_impl<CLASS, Metadata> {                               \
+    static inline bool doit(const Metadata &MD) {                              \
+      return MD.getMetadataID() == Metadata::CLASS##Kind;                      \
+    }                                                                          \
+  };
+#include "llvm/IR/Metadata.def"
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) {
+  MD.print(OS);
+  return OS;
+}
+
+/// \brief Metadata wrapper in the Value hierarchy.
+///
+/// A member of the \a Value hierarchy to represent a reference to metadata.
+/// This allows, e.g., instrinsics to have metadata as operands.
+///
+/// Notably, this is the only thing in either hierarchy that is allowed to
+/// reference \a LocalAsMetadata.
+class MetadataAsValue : public Value {
+  friend class ReplaceableMetadataImpl;
+  friend class LLVMContextImpl;
+
+  Metadata *MD;
+
+  MetadataAsValue(Type *Ty, Metadata *MD);
+
+  /// \brief Drop use of metadata (during teardown).
+  void dropUse() { MD = nullptr; }
+
+public:
+  ~MetadataAsValue();
+
+  static MetadataAsValue *get(LLVMContext &Context, Metadata *MD);
+  static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD);
+
+  Metadata *getMetadata() const { return MD; }
+
+  static bool classof(const Value *V) {
+    return V->getValueID() == MetadataAsValueVal;
+  }
+
+private:
+  void handleChangedMetadata(Metadata *MD);
+  void track();
+  void untrack();
+};
+
+/// \brief API for tracking metadata references through RAUW and deletion.
+///
+/// Shared API for updating \a Metadata pointers in subclasses that support
+/// RAUW.
+///
+/// This API is not meant to be used directly.  See \a TrackingMDRef for a
+/// user-friendly tracking reference.
+class MetadataTracking {
+public:
+  /// \brief Track the reference to metadata.
+  ///
+  /// Register \c MD with \c *MD, if the subclass supports tracking.  If \c *MD
+  /// gets RAUW'ed, \c MD will be updated to the new address.  If \c *MD gets
+  /// deleted, \c MD will be set to \c nullptr.
+  ///
+  /// If tracking isn't supported, \c *MD will not change.
+  ///
+  /// \return true iff tracking is supported by \c MD.
+  static bool track(Metadata *&MD) {
+    return track(&MD, *MD, static_cast<Metadata *>(nullptr));
+  }
+
+  /// \brief Track the reference to metadata for \a Metadata.
+  ///
+  /// As \a track(Metadata*&), but with support for calling back to \c Owner to
+  /// tell it that its operand changed.  This could trigger \c Owner being
+  /// re-uniqued.
+  static bool track(void *Ref, Metadata &MD, Metadata &Owner) {
+    return track(Ref, MD, &Owner);
+  }
+
+  /// \brief Track the reference to metadata for \a MetadataAsValue.
+  ///
+  /// As \a track(Metadata*&), but with support for calling back to \c Owner to
+  /// tell it that its operand changed.  This could trigger \c Owner being
+  /// re-uniqued.
+  static bool track(void *Ref, Metadata &MD, MetadataAsValue &Owner) {
+    return track(Ref, MD, &Owner);
+  }
+
+  /// \brief Stop tracking a reference to metadata.
+  ///
+  /// Stops \c *MD from tracking \c MD.
+  static void untrack(Metadata *&MD) { untrack(&MD, *MD); }
+  static void untrack(void *Ref, Metadata &MD);
+
+  /// \brief Move tracking from one reference to another.
+  ///
+  /// Semantically equivalent to \c untrack(MD) followed by \c track(New),
+  /// except that ownership callbacks are maintained.
+  ///
+  /// Note: it is an error if \c *MD does not equal \c New.
+  ///
+  /// \return true iff tracking is supported by \c MD.
+  static bool retrack(Metadata *&MD, Metadata *&New) {
+    return retrack(&MD, *MD, &New);
+  }
+  static bool retrack(void *Ref, Metadata &MD, void *New);
+
+  /// \brief Check whether metadata is replaceable.
+  static bool isReplaceable(const Metadata &MD);
+
+  using OwnerTy = PointerUnion<MetadataAsValue *, Metadata *>;
+
+private:
+  /// \brief Track a reference to metadata for an owner.
+  ///
+  /// Generalized version of tracking.
+  static bool track(void *Ref, Metadata &MD, OwnerTy Owner);
+};
+
+/// \brief Shared implementation of use-lists for replaceable metadata.
+///
+/// Most metadata cannot be RAUW'ed.  This is a shared implementation of
+/// use-lists and associated API for the two that support it (\a ValueAsMetadata
+/// and \a TempMDNode).
+class ReplaceableMetadataImpl {
+  friend class MetadataTracking;
+
+public:
+  using OwnerTy = MetadataTracking::OwnerTy;
+
+private:
+  LLVMContext &Context;
+  uint64_t NextIndex = 0;
+  SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap;
+
+public:
+  ReplaceableMetadataImpl(LLVMContext &Context) : Context(Context) {}
+
+  ~ReplaceableMetadataImpl() {
+    assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata");
+  }
+
+  LLVMContext &getContext() const { return Context; }
+
+  /// \brief Replace all uses of this with MD.
+  ///
+  /// Replace all uses of this with \c MD, which is allowed to be null.
+  void replaceAllUsesWith(Metadata *MD);
+
+  /// \brief Resolve all uses of this.
+  ///
+  /// Resolve all uses of this, turning off RAUW permanently.  If \c
+  /// ResolveUsers, call \a MDNode::resolve() on any users whose last operand
+  /// is resolved.
+  void resolveAllUses(bool ResolveUsers = true);
+
+private:
+  void addRef(void *Ref, OwnerTy Owner);
+  void dropRef(void *Ref);
+  void moveRef(void *Ref, void *New, const Metadata &MD);
+
+  /// Lazily construct RAUW support on MD.
+  ///
+  /// If this is an unresolved MDNode, RAUW support will be created on-demand.
+  /// ValueAsMetadata always has RAUW support.
+  static ReplaceableMetadataImpl *getOrCreate(Metadata &MD);
+
+  /// Get RAUW support on MD, if it exists.
+  static ReplaceableMetadataImpl *getIfExists(Metadata &MD);
+
+  /// Check whether this node will support RAUW.
+  ///
+  /// Returns \c true unless getOrCreate() would return null.
+  static bool isReplaceable(const Metadata &MD);
+};
+
+/// \brief Value wrapper in the Metadata hierarchy.
+///
+/// This is a custom value handle that allows other metadata to refer to
+/// classes in the Value hierarchy.
+///
+/// Because of full uniquing support, each value is only wrapped by a single \a
+/// ValueAsMetadata object, so the lookup maps are far more efficient than
+/// those using ValueHandleBase.
+class ValueAsMetadata : public Metadata, ReplaceableMetadataImpl {
+  friend class ReplaceableMetadataImpl;
+  friend class LLVMContextImpl;
+
+  Value *V;
+
+  /// \brief Drop users without RAUW (during teardown).
+  void dropUsers() {
+    ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ false);
+  }
+
+protected:
+  ValueAsMetadata(unsigned ID, Value *V)
+      : Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) {
+    assert(V && "Expected valid value");
+  }
+
+  ~ValueAsMetadata() = default;
+
+public:
+  static ValueAsMetadata *get(Value *V);
+
+  static ConstantAsMetadata *getConstant(Value *C) {
+    return cast<ConstantAsMetadata>(get(C));
+  }
+
+  static LocalAsMetadata *getLocal(Value *Local) {
+    return cast<LocalAsMetadata>(get(Local));
+  }
+
+  static ValueAsMetadata *getIfExists(Value *V);
+
+  static ConstantAsMetadata *getConstantIfExists(Value *C) {
+    return cast_or_null<ConstantAsMetadata>(getIfExists(C));
+  }
+
+  static LocalAsMetadata *getLocalIfExists(Value *Local) {
+    return cast_or_null<LocalAsMetadata>(getIfExists(Local));
+  }
+
+  Value *getValue() const { return V; }
+  Type *getType() const { return V->getType(); }
+  LLVMContext &getContext() const { return V->getContext(); }
+
+  static void handleDeletion(Value *V);
+  static void handleRAUW(Value *From, Value *To);
+
+protected:
+  /// \brief Handle collisions after \a Value::replaceAllUsesWith().
+  ///
+  /// RAUW isn't supported directly for \a ValueAsMetadata, but if the wrapped
+  /// \a Value gets RAUW'ed and the target already exists, this is used to
+  /// merge the two metadata nodes.
+  void replaceAllUsesWith(Metadata *MD) {
+    ReplaceableMetadataImpl::replaceAllUsesWith(MD);
+  }
+
+public:
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == LocalAsMetadataKind ||
+           MD->getMetadataID() == ConstantAsMetadataKind;
+  }
+};
+
+class ConstantAsMetadata : public ValueAsMetadata {
+  friend class ValueAsMetadata;
+
+  ConstantAsMetadata(Constant *C)
+      : ValueAsMetadata(ConstantAsMetadataKind, C) {}
+
+public:
+  static ConstantAsMetadata *get(Constant *C) {
+    return ValueAsMetadata::getConstant(C);
+  }
+
+  static ConstantAsMetadata *getIfExists(Constant *C) {
+    return ValueAsMetadata::getConstantIfExists(C);
+  }
+
+  Constant *getValue() const {
+    return cast<Constant>(ValueAsMetadata::getValue());
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == ConstantAsMetadataKind;
+  }
+};
+
+class LocalAsMetadata : public ValueAsMetadata {
+  friend class ValueAsMetadata;
+
+  LocalAsMetadata(Value *Local)
+      : ValueAsMetadata(LocalAsMetadataKind, Local) {
+    assert(!isa<Constant>(Local) && "Expected local value");
+  }
+
+public:
+  static LocalAsMetadata *get(Value *Local) {
+    return ValueAsMetadata::getLocal(Local);
+  }
+
+  static LocalAsMetadata *getIfExists(Value *Local) {
+    return ValueAsMetadata::getLocalIfExists(Local);
+  }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == LocalAsMetadataKind;
+  }
+};
+
+/// \brief Transitional API for extracting constants from Metadata.
+///
+/// This namespace contains transitional functions for metadata that points to
+/// \a Constants.
+///
+/// In prehistory -- when metadata was a subclass of \a Value -- \a MDNode
+/// operands could refer to any \a Value.  There's was a lot of code like this:
+///
+/// \code
+///     MDNode *N = ...;
+///     auto *CI = dyn_cast<ConstantInt>(N->getOperand(2));
+/// \endcode
+///
+/// Now that \a Value and \a Metadata are in separate hierarchies, maintaining
+/// the semantics for \a isa(), \a cast(), \a dyn_cast() (etc.) requires three
+/// steps: cast in the \a Metadata hierarchy, extraction of the \a Value, and
+/// cast in the \a Value hierarchy.  Besides creating boiler-plate, this
+/// requires subtle control flow changes.
+///
+/// The end-goal is to create a new type of metadata, called (e.g.) \a MDInt,
+/// so that metadata can refer to numbers without traversing a bridge to the \a
+/// Value hierarchy.  In this final state, the code above would look like this:
+///
+/// \code
+///     MDNode *N = ...;
+///     auto *MI = dyn_cast<MDInt>(N->getOperand(2));
+/// \endcode
+///
+/// The API in this namespace supports the transition.  \a MDInt doesn't exist
+/// yet, and even once it does, changing each metadata schema to use it is its
+/// own mini-project.  In the meantime this API prevents us from introducing
+/// complex and bug-prone control flow that will disappear in the end.  In
+/// particular, the above code looks like this:
+///
+/// \code
+///     MDNode *N = ...;
+///     auto *CI = mdconst::dyn_extract<ConstantInt>(N->getOperand(2));
+/// \endcode
+///
+/// The full set of provided functions includes:
+///
+///   mdconst::hasa                <=> isa
+///   mdconst::extract             <=> cast
+///   mdconst::extract_or_null     <=> cast_or_null
+///   mdconst::dyn_extract         <=> dyn_cast
+///   mdconst::dyn_extract_or_null <=> dyn_cast_or_null
+///
+/// The target of the cast must be a subclass of \a Constant.
+namespace mdconst {
+
+namespace detail {
+
+template <class T> T &make();
+template <class T, class Result> struct HasDereference {
+  using Yes = char[1];
+  using No = char[2];
+  template <size_t N> struct SFINAE {};
+
+  template <class U, class V>
+  static Yes &hasDereference(SFINAE<sizeof(static_cast<V>(*make<U>()))> * = 0);
+  template <class U, class V> static No &hasDereference(...);
+
+  static const bool value =
+      sizeof(hasDereference<T, Result>(nullptr)) == sizeof(Yes);
+};
+template <class V, class M> struct IsValidPointer {
+  static const bool value = std::is_base_of<Constant, V>::value &&
+                            HasDereference<M, const Metadata &>::value;
+};
+template <class V, class M> struct IsValidReference {
+  static const bool value = std::is_base_of<Constant, V>::value &&
+                            std::is_convertible<M, const Metadata &>::value;
+};
+
+} // end namespace detail
+
+/// \brief Check whether Metadata has a Value.
+///
+/// As an analogue to \a isa(), check whether \c MD has an \a Value inside of
+/// type \c X.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, bool>::type
+hasa(Y &&MD) {
+  assert(MD && "Null pointer sent into hasa");
+  if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
+    return isa<X>(V->getValue());
+  return false;
+}
+template <class X, class Y>
+inline
+    typename std::enable_if<detail::IsValidReference<X, Y &>::value, bool>::type
+    hasa(Y &MD) {
+  return hasa(&MD);
+}
+
+/// \brief Extract a Value from Metadata.
+///
+/// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+extract(Y &&MD) {
+  return cast<X>(cast<ConstantAsMetadata>(MD)->getValue());
+}
+template <class X, class Y>
+inline
+    typename std::enable_if<detail::IsValidReference<X, Y &>::value, X *>::type
+    extract(Y &MD) {
+  return extract(&MD);
+}
+
+/// \brief Extract a Value from Metadata, allowing null.
+///
+/// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X
+/// from \c MD, allowing \c MD to be null.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+extract_or_null(Y &&MD) {
+  if (auto *V = cast_or_null<ConstantAsMetadata>(MD))
+    return cast<X>(V->getValue());
+  return nullptr;
+}
+
+/// \brief Extract a Value from Metadata, if any.
+///
+/// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X
+/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
+/// Value it does contain is of the wrong subclass.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+dyn_extract(Y &&MD) {
+  if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
+    return dyn_cast<X>(V->getValue());
+  return nullptr;
+}
+
+/// \brief Extract a Value from Metadata, if any, allowing null.
+///
+/// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X
+/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
+/// Value it does contain is of the wrong subclass, allowing \c MD to be null.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+dyn_extract_or_null(Y &&MD) {
+  if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD))
+    return dyn_cast<X>(V->getValue());
+  return nullptr;
+}
+
+} // end namespace mdconst
+
+//===----------------------------------------------------------------------===//
+/// \brief A single uniqued string.
+///
+/// These are used to efficiently contain a byte sequence for metadata.
+/// MDString is always unnamed.
+class MDString : public Metadata {
+  friend class StringMapEntry<MDString>;
+
+  StringMapEntry<MDString> *Entry = nullptr;
+
+  MDString() : Metadata(MDStringKind, Uniqued) {}
+
+public:
+  MDString(const MDString &) = delete;
+  MDString &operator=(MDString &&) = delete;
+  MDString &operator=(const MDString &) = delete;
+
+  static MDString *get(LLVMContext &Context, StringRef Str);
+  static MDString *get(LLVMContext &Context, const char *Str) {
+    return get(Context, Str ? StringRef(Str) : StringRef());
+  }
+
+  StringRef getString() const;
+
+  unsigned getLength() const { return (unsigned)getString().size(); }
+
+  using iterator = StringRef::iterator;
+
+  /// \brief Pointer to the first byte of the string.
+  iterator begin() const { return getString().begin(); }
+
+  /// \brief Pointer to one byte past the end of the string.
+  iterator end() const { return getString().end(); }
+
+  const unsigned char *bytes_begin() const { return getString().bytes_begin(); }
+  const unsigned char *bytes_end() const { return getString().bytes_end(); }
+
+  /// \brief Methods for support type inquiry through isa, cast, and dyn_cast.
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == MDStringKind;
+  }
+};
+
+/// \brief A collection of metadata nodes that might be associated with a
+/// memory access used by the alias-analysis infrastructure.
+struct AAMDNodes {
+  explicit AAMDNodes(MDNode *T = nullptr, MDNode *S = nullptr,
+                     MDNode *N = nullptr)
+      : TBAA(T), Scope(S), NoAlias(N) {}
+
+  bool operator==(const AAMDNodes &A) const {
+    return TBAA == A.TBAA && Scope == A.Scope && NoAlias == A.NoAlias;
+  }
+
+  bool operator!=(const AAMDNodes &A) const { return !(*this == A); }
+
+  explicit operator bool() const { return TBAA || Scope || NoAlias; }
+
+  /// \brief The tag for type-based alias analysis.
+  MDNode *TBAA;
+
+  /// \brief The tag for alias scope specification (used with noalias).
+  MDNode *Scope;
+
+  /// \brief The tag specifying the noalias scope.
+  MDNode *NoAlias;
+
+  /// \brief Given two sets of AAMDNodes that apply to the same pointer,
+  /// give the best AAMDNodes that are compatible with both (i.e. a set of
+  /// nodes whose allowable aliasing conclusions are a subset of those
+  /// allowable by both of the inputs). However, for efficiency
+  /// reasons, do not create any new MDNodes.
+  AAMDNodes intersect(const AAMDNodes &Other) {
+    AAMDNodes Result;
+    Result.TBAA = Other.TBAA == TBAA ? TBAA : nullptr;
+    Result.Scope = Other.Scope == Scope ? Scope : nullptr;
+    Result.NoAlias = Other.NoAlias == NoAlias ? NoAlias : nullptr;
+    return Result;
+  }
+};
+
+// Specialize DenseMapInfo for AAMDNodes.
+template<>
+struct DenseMapInfo<AAMDNodes> {
+  static inline AAMDNodes getEmptyKey() {
+    return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(),
+                     nullptr, nullptr);
+  }
+
+  static inline AAMDNodes getTombstoneKey() {
+    return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(),
+                     nullptr, nullptr);
+  }
+
+  static unsigned getHashValue(const AAMDNodes &Val) {
+    return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^
+           DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^
+           DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias);
+  }
+
+  static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) {
+    return LHS == RHS;
+  }
+};
+
+/// \brief Tracking metadata reference owned by Metadata.
+///
+/// Similar to \a TrackingMDRef, but it's expected to be owned by an instance
+/// of \a Metadata, which has the option of registering itself for callbacks to
+/// re-unique itself.
+///
+/// In particular, this is used by \a MDNode.
+class MDOperand {
+  Metadata *MD = nullptr;
+
+public:
+  MDOperand() = default;
+  MDOperand(MDOperand &&) = delete;
+  MDOperand(const MDOperand &) = delete;
+  MDOperand &operator=(MDOperand &&) = delete;
+  MDOperand &operator=(const MDOperand &) = delete;
+  ~MDOperand() { untrack(); }
+
+  Metadata *get() const { return MD; }
+  operator Metadata *() const { return get(); }
+  Metadata *operator->() const { return get(); }
+  Metadata &operator*() const { return *get(); }
+
+  void reset() {
+    untrack();
+    MD = nullptr;
+  }
+  void reset(Metadata *MD, Metadata *Owner) {
+    untrack();
+    this->MD = MD;
+    track(Owner);
+  }
+
+private:
+  void track(Metadata *Owner) {
+    if (MD) {
+      if (Owner)
+        MetadataTracking::track(this, *MD, *Owner);
+      else
+        MetadataTracking::track(MD);
+    }
+  }
+
+  void untrack() {
+    assert(static_cast<void *>(this) == &MD && "Expected same address");
+    if (MD)
+      MetadataTracking::untrack(MD);
+  }
+};
+
+template <> struct simplify_type<MDOperand> {
+  using SimpleType = Metadata *;
+
+  static SimpleType getSimplifiedValue(MDOperand &MD) { return MD.get(); }
+};
+
+template <> struct simplify_type<const MDOperand> {
+  using SimpleType = Metadata *;
+
+  static SimpleType getSimplifiedValue(const MDOperand &MD) { return MD.get(); }
+};
+
+/// \brief Pointer to the context, with optional RAUW support.
+///
+/// Either a raw (non-null) pointer to the \a LLVMContext, or an owned pointer
+/// to \a ReplaceableMetadataImpl (which has a reference to \a LLVMContext).
+class ContextAndReplaceableUses {
+  PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr;
+
+public:
+  ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {}
+  ContextAndReplaceableUses(
+      std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses)
+      : Ptr(ReplaceableUses.release()) {
+    assert(getReplaceableUses() && "Expected non-null replaceable uses");
+  }
+  ContextAndReplaceableUses() = delete;
+  ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete;
+  ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete;
+  ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete;
+  ContextAndReplaceableUses &
+  operator=(const ContextAndReplaceableUses &) = delete;
+  ~ContextAndReplaceableUses() { delete getReplaceableUses(); }
+
+  operator LLVMContext &() { return getContext(); }
+
+  /// \brief Whether this contains RAUW support.
+  bool hasReplaceableUses() const {
+    return Ptr.is<ReplaceableMetadataImpl *>();
+  }
+
+  LLVMContext &getContext() const {
+    if (hasReplaceableUses())
+      return getReplaceableUses()->getContext();
+    return *Ptr.get<LLVMContext *>();
+  }
+
+  ReplaceableMetadataImpl *getReplaceableUses() const {
+    if (hasReplaceableUses())
+      return Ptr.get<ReplaceableMetadataImpl *>();
+    return nullptr;
+  }
+
+  /// Ensure that this has RAUW support, and then return it.
+  ReplaceableMetadataImpl *getOrCreateReplaceableUses() {
+    if (!hasReplaceableUses())
+      makeReplaceable(llvm::make_unique<ReplaceableMetadataImpl>(getContext()));
+    return getReplaceableUses();
+  }
+
+  /// \brief Assign RAUW support to this.
+  ///
+  /// Make this replaceable, taking ownership of \c ReplaceableUses (which must
+  /// not be null).
+  void
+  makeReplaceable(std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) {
+    assert(ReplaceableUses && "Expected non-null replaceable uses");
+    assert(&ReplaceableUses->getContext() == &getContext() &&
+           "Expected same context");
+    delete getReplaceableUses();
+    Ptr = ReplaceableUses.release();
+  }
+
+  /// \brief Drop RAUW support.
+  ///
+  /// Cede ownership of RAUW support, returning it.
+  std::unique_ptr<ReplaceableMetadataImpl> takeReplaceableUses() {
+    assert(hasReplaceableUses() && "Expected to own replaceable uses");
+    std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses(
+        getReplaceableUses());
+    Ptr = &ReplaceableUses->getContext();
+    return ReplaceableUses;
+  }
+};
+
+struct TempMDNodeDeleter {
+  inline void operator()(MDNode *Node) const;
+};
+
+#define HANDLE_MDNODE_LEAF(CLASS)                                              \
+  using Temp##CLASS = std::unique_ptr<CLASS, TempMDNodeDeleter>;
+#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_LEAF(CLASS)
+#include "llvm/IR/Metadata.def"
+
+/// \brief Metadata node.
+///
+/// Metadata nodes can be uniqued, like constants, or distinct.  Temporary
+/// metadata nodes (with full support for RAUW) can be used to delay uniquing
+/// until forward references are known.  The basic metadata node is an \a
+/// MDTuple.
+///
+/// There is limited support for RAUW at construction time.  At construction
+/// time, if any operand is a temporary node (or an unresolved uniqued node,
+/// which indicates a transitive temporary operand), the node itself will be
+/// unresolved.  As soon as all operands become resolved, it will drop RAUW
+/// support permanently.
+///
+/// If an unresolved node is part of a cycle, \a resolveCycles() needs
+/// to be called on some member of the cycle once all temporary nodes have been
+/// replaced.
+class MDNode : public Metadata {
+  friend class ReplaceableMetadataImpl;
+  friend class LLVMContextImpl;
+
+  unsigned NumOperands;
+  unsigned NumUnresolved;
+
+  ContextAndReplaceableUses Context;
+
+protected:
+  MDNode(LLVMContext &Context, unsigned ID, StorageType Storage,
+         ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None);
+  ~MDNode() = default;
+
+  void *operator new(size_t Size, unsigned NumOps);
+  void operator delete(void *Mem);
+
+  /// \brief Required by std, but never called.
+  void operator delete(void *, unsigned) {
+    llvm_unreachable("Constructor throws?");
+  }
+
+  /// \brief Required by std, but never called.
+  void operator delete(void *, unsigned, bool) {
+    llvm_unreachable("Constructor throws?");
+  }
+
+  void dropAllReferences();
+
+  MDOperand *mutable_begin() { return mutable_end() - NumOperands; }
+  MDOperand *mutable_end() { return reinterpret_cast<MDOperand *>(this); }
+
+  using mutable_op_range = iterator_range<MDOperand *>;
+
+  mutable_op_range mutable_operands() {
+    return mutable_op_range(mutable_begin(), mutable_end());
+  }
+
+public:
+  MDNode(const MDNode &) = delete;
+  void operator=(const MDNode &) = delete;
+  void *operator new(size_t) = delete;
+
+  static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs);
+  static inline MDTuple *getIfExists(LLVMContext &Context,
+                                     ArrayRef<Metadata *> MDs);
+  static inline MDTuple *getDistinct(LLVMContext &Context,
+                                     ArrayRef<Metadata *> MDs);
+  static inline TempMDTuple getTemporary(LLVMContext &Context,
+                                         ArrayRef<Metadata *> MDs);
+
+  /// \brief Create a (temporary) clone of this.
+  TempMDNode clone() const;
+
+  /// \brief Deallocate a node created by getTemporary.
+  ///
+  /// Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining
+  /// references will be reset.
+  static void deleteTemporary(MDNode *N);
+
+  LLVMContext &getContext() const { return Context.getContext(); }
+
+  /// \brief Replace a specific operand.
+  void replaceOperandWith(unsigned I, Metadata *New);
+
+  /// \brief Check if node is fully resolved.
+  ///
+  /// If \a isTemporary(), this always returns \c false; if \a isDistinct(),
+  /// this always returns \c true.
+  ///
+  /// If \a isUniqued(), returns \c true if this has already dropped RAUW
+  /// support (because all operands are resolved).
+  ///
+  /// As forward declarations are resolved, their containers should get
+  /// resolved automatically.  However, if this (or one of its operands) is
+  /// involved in a cycle, \a resolveCycles() needs to be called explicitly.
+  bool isResolved() const { return !isTemporary() && !NumUnresolved; }
+
+  bool isUniqued() const { return Storage == Uniqued; }
+  bool isDistinct() const { return Storage == Distinct; }
+  bool isTemporary() const { return Storage == Temporary; }
+
+  /// \brief RAUW a temporary.
+  ///
+  /// \pre \a isTemporary() must be \c true.
+  void replaceAllUsesWith(Metadata *MD) {
+    assert(isTemporary() && "Expected temporary node");
+    if (Context.hasReplaceableUses())
+      Context.getReplaceableUses()->replaceAllUsesWith(MD);
+  }
+
+  /// \brief Resolve cycles.
+  ///
+  /// Once all forward declarations have been resolved, force cycles to be
+  /// resolved.
+  ///
+  /// \pre No operands (or operands' operands, etc.) have \a isTemporary().
+  void resolveCycles();
+
+  /// Resolve a unique, unresolved node.
+  void resolve();
+
+  /// \brief Replace a temporary node with a permanent one.
+  ///
+  /// Try to create a uniqued version of \c N -- in place, if possible -- and
+  /// return it.  If \c N cannot be uniqued, return a distinct node instead.
+  template <class T>
+  static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+  replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) {
+    return cast<T>(N.release()->replaceWithPermanentImpl());
+  }
+
+  /// \brief Replace a temporary node with a uniqued one.
+  ///
+  /// Create a uniqued version of \c N -- in place, if possible -- and return
+  /// it.  Takes ownership of the temporary node.
+  ///
+  /// \pre N does not self-reference.
+  template <class T>
+  static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+  replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) {
+    return cast<T>(N.release()->replaceWithUniquedImpl());
+  }
+
+  /// \brief Replace a temporary node with a distinct one.
+  ///
+  /// Create a distinct version of \c N -- in place, if possible -- and return
+  /// it.  Takes ownership of the temporary node.
+  template <class T>
+  static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+  replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) {
+    return cast<T>(N.release()->replaceWithDistinctImpl());
+  }
+
+private:
+  MDNode *replaceWithPermanentImpl();
+  MDNode *replaceWithUniquedImpl();
+  MDNode *replaceWithDistinctImpl();
+
+protected:
+  /// \brief Set an operand.
+  ///
+  /// Sets the operand directly, without worrying about uniquing.
+  void setOperand(unsigned I, Metadata *New);
+
+  void storeDistinctInContext();
+  template <class T, class StoreT>
+  static T *storeImpl(T *N, StorageType Storage, StoreT &Store);
+  template <class T> static T *storeImpl(T *N, StorageType Storage);
+
+private:
+  void handleChangedOperand(void *Ref, Metadata *New);
+
+  /// Drop RAUW support, if any.
+  void dropReplaceableUses();
+
+  void resolveAfterOperandChange(Metadata *Old, Metadata *New);
+  void decrementUnresolvedOperandCount();
+  void countUnresolvedOperands();
+
+  /// \brief Mutate this to be "uniqued".
+  ///
+  /// Mutate this so that \a isUniqued().
+  /// \pre \a isTemporary().
+  /// \pre already added to uniquing set.
+  void makeUniqued();
+
+  /// \brief Mutate this to be "distinct".
+  ///
+  /// Mutate this so that \a isDistinct().
+  /// \pre \a isTemporary().
+  void makeDistinct();
+
+  void deleteAsSubclass();
+  MDNode *uniquify();
+  void eraseFromStore();
+
+  template <class NodeTy> struct HasCachedHash;
+  template <class NodeTy>
+  static void dispatchRecalculateHash(NodeTy *N, std::true_type) {
+    N->recalculateHash();
+  }
+  template <class NodeTy>
+  static void dispatchRecalculateHash(NodeTy *, std::false_type) {}
+  template <class NodeTy>
+  static void dispatchResetHash(NodeTy *N, std::true_type) {
+    N->setHash(0);
+  }
+  template <class NodeTy>
+  static void dispatchResetHash(NodeTy *, std::false_type) {}
+
+public:
+  using op_iterator = const MDOperand *;
+  using op_range = iterator_range<op_iterator>;
+
+  op_iterator op_begin() const {
+    return const_cast<MDNode *>(this)->mutable_begin();
+  }
+
+  op_iterator op_end() const {
+    return const_cast<MDNode *>(this)->mutable_end();
+  }
+
+  op_range operands() const { return op_range(op_begin(), op_end()); }
+
+  const MDOperand &getOperand(unsigned I) const {
+    assert(I < NumOperands && "Out of range");
+    return op_begin()[I];
+  }
+
+  /// \brief Return number of MDNode operands.
+  unsigned getNumOperands() const { return NumOperands; }
+
+  /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Metadata *MD) {
+    switch (MD->getMetadataID()) {
+    default:
+      return false;
+#define HANDLE_MDNODE_LEAF(CLASS)                                              \
+  case CLASS##Kind:                                                            \
+    return true;
+#include "llvm/IR/Metadata.def"
+    }
+  }
+
+  /// \brief Check whether MDNode is a vtable access.
+  bool isTBAAVtableAccess() const;
+
+  /// \brief Methods for metadata merging.
+  static MDNode *concatenate(MDNode *A, MDNode *B);
+  static MDNode *intersect(MDNode *A, MDNode *B);
+  static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B);
+  static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B);
+  static MDNode *getMostGenericRange(MDNode *A, MDNode *B);
+  static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B);
+  static MDNode *getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B);
+};
+
+/// \brief Tuple of metadata.
+///
+/// This is the simple \a MDNode arbitrary tuple.  Nodes are uniqued by
+/// default based on their operands.
+class MDTuple : public MDNode {
+  friend class LLVMContextImpl;
+  friend class MDNode;
+
+  MDTuple(LLVMContext &C, StorageType Storage, unsigned Hash,
+          ArrayRef<Metadata *> Vals)
+      : MDNode(C, MDTupleKind, Storage, Vals) {
+    setHash(Hash);
+  }
+
+  ~MDTuple() { dropAllReferences(); }
+
+  void setHash(unsigned Hash) { SubclassData32 = Hash; }
+  void recalculateHash();
+
+  static MDTuple *getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs,
+                          StorageType Storage, bool ShouldCreate = true);
+
+  TempMDTuple cloneImpl() const {
+    return getTemporary(getContext(),
+                        SmallVector<Metadata *, 4>(op_begin(), op_end()));
+  }
+
+public:
+  /// \brief Get the hash, if any.
+  unsigned getHash() const { return SubclassData32; }
+
+  static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+    return getImpl(Context, MDs, Uniqued);
+  }
+
+  static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+    return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false);
+  }
+
+  /// \brief Return a distinct node.
+  ///
+  /// Return a distinct node -- i.e., a node that is not uniqued.
+  static MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+    return getImpl(Context, MDs, Distinct);
+  }
+
+  /// \brief Return a temporary node.
+  ///
+  /// For use in constructing cyclic MDNode structures. A temporary MDNode is
+  /// not uniqued, may be RAUW'd, and must be manually deleted with
+  /// deleteTemporary.
+  static TempMDTuple getTemporary(LLVMContext &Context,
+                                  ArrayRef<Metadata *> MDs) {
+    return TempMDTuple(getImpl(Context, MDs, Temporary));
+  }
+
+  /// \brief Return a (temporary) clone of this.
+  TempMDTuple clone() const { return cloneImpl(); }
+
+  static bool classof(const Metadata *MD) {
+    return MD->getMetadataID() == MDTupleKind;
+  }
+};
+
+MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+  return MDTuple::get(Context, MDs);
+}
+
+MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+  return MDTuple::getIfExists(Context, MDs);
+}
+
+MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+  return MDTuple::getDistinct(Context, MDs);
+}
+
+TempMDTuple MDNode::getTemporary(LLVMContext &Context,
+                                 ArrayRef<Metadata *> MDs) {
+  return MDTuple::getTemporary(Context, MDs);
+}
+
+void TempMDNodeDeleter::operator()(MDNode *Node) const {
+  MDNode::deleteTemporary(Node);
+}
+
+/// \brief Typed iterator through MDNode operands.
+///
+/// An iterator that transforms an \a MDNode::iterator into an iterator over a
+/// particular Metadata subclass.
+template <class T>
+class TypedMDOperandIterator
+    : public std::iterator<std::input_iterator_tag, T *, std::ptrdiff_t, void,
+                           T *> {
+  MDNode::op_iterator I = nullptr;
+
+public:
+  TypedMDOperandIterator() = default;
+  explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {}
+
+  T *operator*() const { return cast_or_null<T>(*I); }
+
+  TypedMDOperandIterator &operator++() {
+    ++I;
+    return *this;
+  }
+
+  TypedMDOperandIterator operator++(int) {
+    TypedMDOperandIterator Temp(*this);
+    ++I;
+    return Temp;
+  }
+
+  bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; }
+  bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; }
+};
+
+/// \brief Typed, array-like tuple of metadata.
+///
+/// This is a wrapper for \a MDTuple that makes it act like an array holding a
+/// particular type of metadata.
+template <class T> class MDTupleTypedArrayWrapper {
+  const MDTuple *N = nullptr;
+
+public:
+  MDTupleTypedArrayWrapper() = default;
+  MDTupleTypedArrayWrapper(const MDTuple *N) : N(N) {}
+
+  template <class U>
+  MDTupleTypedArrayWrapper(
+      const MDTupleTypedArrayWrapper<U> &Other,
+      typename std::enable_if<std::is_convertible<U *, T *>::value>::type * =
+          nullptr)
+      : N(Other.get()) {}
+
+  template <class U>
+  explicit MDTupleTypedArrayWrapper(
+      const MDTupleTypedArrayWrapper<U> &Other,
+      typename std::enable_if<!std::is_convertible<U *, T *>::value>::type * =
+          nullptr)
+      : N(Other.get()) {}
+
+  explicit operator bool() const { return get(); }
+  explicit operator MDTuple *() const { return get(); }
+
+  MDTuple *get() const { return const_cast<MDTuple *>(N); }
+  MDTuple *operator->() const { return get(); }
+  MDTuple &operator*() const { return *get(); }
+
+  // FIXME: Fix callers and remove condition on N.
+  unsigned size() const { return N ? N->getNumOperands() : 0u; }
+  bool empty() const { return N ? N->getNumOperands() == 0 : true; }
+  T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); }
+
+  // FIXME: Fix callers and remove condition on N.
+  using iterator = TypedMDOperandIterator<T>;
+
+  iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); }
+  iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
+};
+
+#define HANDLE_METADATA(CLASS)                                                 \
+  using CLASS##Array = MDTupleTypedArrayWrapper<CLASS>;
+#include "llvm/IR/Metadata.def"
+
+/// Placeholder metadata for operands of distinct MDNodes.
+///
+/// This is a lightweight placeholder for an operand of a distinct node.  It's
+/// purpose is to help track forward references when creating a distinct node.
+/// This allows distinct nodes involved in a cycle to be constructed before
+/// their operands without requiring a heavyweight temporary node with
+/// full-blown RAUW support.
+///
+/// Each placeholder supports only a single MDNode user.  Clients should pass
+/// an ID, retrieved via \a getID(), to indicate the "real" operand that this
+/// should be replaced with.
+///
+/// While it would be possible to implement move operators, they would be
+/// fairly expensive.  Leave them unimplemented to discourage their use
+/// (clients can use std::deque, std::list, BumpPtrAllocator, etc.).
+class DistinctMDOperandPlaceholder : public Metadata {
+  friend class MetadataTracking;
+
+  Metadata **Use = nullptr;
+
+public:
+  explicit DistinctMDOperandPlaceholder(unsigned ID)
+      : Metadata(DistinctMDOperandPlaceholderKind, Distinct) {
+    SubclassData32 = ID;
+  }
+
+  DistinctMDOperandPlaceholder() = delete;
+  DistinctMDOperandPlaceholder(DistinctMDOperandPlaceholder &&) = delete;
+  DistinctMDOperandPlaceholder(const DistinctMDOperandPlaceholder &) = delete;
+
+  ~DistinctMDOperandPlaceholder() {
+    if (Use)
+      *Use = nullptr;
+  }
+
+  unsigned getID() const { return SubclassData32; }
+
+  /// Replace the use of this with MD.
+  void replaceUseWith(Metadata *MD) {
+    if (!Use)
+      return;
+    *Use = MD;
+
+    if (*Use)
+      MetadataTracking::track(*Use);
+
+    Metadata *T = cast<Metadata>(this);
+    MetadataTracking::untrack(T);
+    assert(!Use && "Use is still being tracked despite being untracked!");
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// \brief A tuple of MDNodes.
+///
+/// Despite its name, a NamedMDNode isn't itself an MDNode. NamedMDNodes belong
+/// to modules, have names, and contain lists of MDNodes.
+///
+/// TODO: Inherit from Metadata.
+class NamedMDNode : public ilist_node<NamedMDNode> {
+  friend class LLVMContextImpl;
+  friend class Module;
+
+  std::string Name;
+  Module *Parent = nullptr;
+  void *Operands; // SmallVector<TrackingMDRef, 4>
+
+  void setParent(Module *M) { Parent = M; }
+
+  explicit NamedMDNode(const Twine &N);
+
+  template<class T1, class T2>
+  class op_iterator_impl :
+      public std::iterator<std::bidirectional_iterator_tag, T2> {
+    friend class NamedMDNode;
+
+    const NamedMDNode *Node = nullptr;
+    unsigned Idx = 0;
+
+    op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) {}
+
+  public:
+    op_iterator_impl() = default;
+
+    bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; }
+    bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; }
+
+    op_iterator_impl &operator++() {
+      ++Idx;
+      return *this;
+    }
+
+    op_iterator_impl operator++(int) {
+      op_iterator_impl tmp(*this);
+      operator++();
+      return tmp;
+    }
+
+    op_iterator_impl &operator--() {
+      --Idx;
+      return *this;
+    }
+
+    op_iterator_impl operator--(int) {
+      op_iterator_impl tmp(*this);
+      operator--();
+      return tmp;
+    }
+
+    T1 operator*() const { return Node->getOperand(Idx); }
+  };
+
+public:
+  NamedMDNode(const NamedMDNode &) = delete;
+  ~NamedMDNode();
+
+  /// \brief Drop all references and remove the node from parent module.
+  void eraseFromParent();
+
+  /// Remove all uses and clear node vector.
+  void dropAllReferences() { clearOperands(); }
+  /// Drop all references to this node's operands.
+  void clearOperands();
+
+  /// \brief Get the module that holds this named metadata collection.
+  inline Module *getParent() { return Parent; }
+  inline const Module *getParent() const { return Parent; }
+
+  MDNode *getOperand(unsigned i) const;
+  unsigned getNumOperands() const;
+  void addOperand(MDNode *M);
+  void setOperand(unsigned I, MDNode *New);
+  StringRef getName() const;
+  void print(raw_ostream &ROS, bool IsForDebug = false) const;
+  void print(raw_ostream &ROS, ModuleSlotTracker &MST,
+             bool IsForDebug = false) const;
+  void dump() const;
+
+  // ---------------------------------------------------------------------------
+  // Operand Iterator interface...
+  //
+  using op_iterator = op_iterator_impl<MDNode *, MDNode>;
+
+  op_iterator op_begin() { return op_iterator(this, 0); }
+  op_iterator op_end()   { return op_iterator(this, getNumOperands()); }
+
+  using const_op_iterator = op_iterator_impl<const MDNode *, MDNode>;
+
+  const_op_iterator op_begin() const { return const_op_iterator(this, 0); }
+  const_op_iterator op_end()   const { return const_op_iterator(this, getNumOperands()); }
+
+  inline iterator_range<op_iterator>  operands() {
+    return make_range(op_begin(), op_end());
+  }
+  inline iterator_range<const_op_iterator> operands() const {
+    return make_range(op_begin(), op_end());
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_METADATA_H
diff --git a/linux-x64/clang/include/llvm/IR/Module.h b/linux-x64/clang/include/llvm/IR/Module.h
new file mode 100644
index 0000000..58e4bc4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Module.h
@@ -0,0 +1,876 @@
+//===- llvm/Module.h - C++ class to represent a VM module -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// Module.h This file contains the declarations for the Module class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MODULE_H
+#define LLVM_IR_MODULE_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/CodeGen.h"
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class Error;
+class FunctionType;
+class GVMaterializer;
+class LLVMContext;
+class MemoryBuffer;
+class RandomNumberGenerator;
+template <class PtrType> class SmallPtrSetImpl;
+class StructType;
+
+/// A Module instance is used to store all the information related to an
+/// LLVM module. Modules are the top level container of all other LLVM
+/// Intermediate Representation (IR) objects. Each module directly contains a
+/// list of globals variables, a list of functions, a list of libraries (or
+/// other modules) this module depends on, a symbol table, and various data
+/// about the target's characteristics.
+///
+/// A module maintains a GlobalValRefMap object that is used to hold all
+/// constant references to global variables in the module.  When a global
+/// variable is destroyed, it should have no entries in the GlobalValueRefMap.
+/// @brief The main container class for the LLVM Intermediate Representation.
+class Module {
+/// @name Types And Enumerations
+/// @{
+public:
+  /// The type for the list of global variables.
+  using GlobalListType = SymbolTableList<GlobalVariable>;
+  /// The type for the list of functions.
+  using FunctionListType = SymbolTableList<Function>;
+  /// The type for the list of aliases.
+  using AliasListType = SymbolTableList<GlobalAlias>;
+  /// The type for the list of ifuncs.
+  using IFuncListType = SymbolTableList<GlobalIFunc>;
+  /// The type for the list of named metadata.
+  using NamedMDListType = ilist<NamedMDNode>;
+  /// The type of the comdat "symbol" table.
+  using ComdatSymTabType = StringMap<Comdat>;
+
+  /// The Global Variable iterator.
+  using global_iterator = GlobalListType::iterator;
+  /// The Global Variable constant iterator.
+  using const_global_iterator = GlobalListType::const_iterator;
+
+  /// The Function iterators.
+  using iterator = FunctionListType::iterator;
+  /// The Function constant iterator
+  using const_iterator = FunctionListType::const_iterator;
+
+  /// The Function reverse iterator.
+  using reverse_iterator = FunctionListType::reverse_iterator;
+  /// The Function constant reverse iterator.
+  using const_reverse_iterator = FunctionListType::const_reverse_iterator;
+
+  /// The Global Alias iterators.
+  using alias_iterator = AliasListType::iterator;
+  /// The Global Alias constant iterator
+  using const_alias_iterator = AliasListType::const_iterator;
+
+  /// The Global IFunc iterators.
+  using ifunc_iterator = IFuncListType::iterator;
+  /// The Global IFunc constant iterator
+  using const_ifunc_iterator = IFuncListType::const_iterator;
+
+  /// The named metadata iterators.
+  using named_metadata_iterator = NamedMDListType::iterator;
+  /// The named metadata constant iterators.
+  using const_named_metadata_iterator = NamedMDListType::const_iterator;
+
+  /// This enumeration defines the supported behaviors of module flags.
+  enum ModFlagBehavior {
+    /// Emits an error if two values disagree, otherwise the resulting value is
+    /// that of the operands.
+    Error = 1,
+
+    /// Emits a warning if two values disagree. The result value will be the
+    /// operand for the flag from the first module being linked.
+    Warning = 2,
+
+    /// Adds a requirement that another module flag be present and have a
+    /// specified value after linking is performed. The value must be a metadata
+    /// pair, where the first element of the pair is the ID of the module flag
+    /// to be restricted, and the second element of the pair is the value the
+    /// module flag should be restricted to. This behavior can be used to
+    /// restrict the allowable results (via triggering of an error) of linking
+    /// IDs with the **Override** behavior.
+    Require = 3,
+
+    /// Uses the specified value, regardless of the behavior or value of the
+    /// other module. If both modules specify **Override**, but the values
+    /// differ, an error will be emitted.
+    Override = 4,
+
+    /// Appends the two values, which are required to be metadata nodes.
+    Append = 5,
+
+    /// Appends the two values, which are required to be metadata
+    /// nodes. However, duplicate entries in the second list are dropped
+    /// during the append operation.
+    AppendUnique = 6,
+
+    /// Takes the max of the two values, which are required to be integers.
+    Max = 7,
+
+    // Markers:
+    ModFlagBehaviorFirstVal = Error,
+    ModFlagBehaviorLastVal = Max
+  };
+
+  /// Checks if Metadata represents a valid ModFlagBehavior, and stores the
+  /// converted result in MFB.
+  static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB);
+
+  struct ModuleFlagEntry {
+    ModFlagBehavior Behavior;
+    MDString *Key;
+    Metadata *Val;
+
+    ModuleFlagEntry(ModFlagBehavior B, MDString *K, Metadata *V)
+        : Behavior(B), Key(K), Val(V) {}
+  };
+
+/// @}
+/// @name Member Variables
+/// @{
+private:
+  LLVMContext &Context;           ///< The LLVMContext from which types and
+                                  ///< constants are allocated.
+  GlobalListType GlobalList;      ///< The Global Variables in the module
+  FunctionListType FunctionList;  ///< The Functions in the module
+  AliasListType AliasList;        ///< The Aliases in the module
+  IFuncListType IFuncList;        ///< The IFuncs in the module
+  NamedMDListType NamedMDList;    ///< The named metadata in the module
+  std::string GlobalScopeAsm;     ///< Inline Asm at global scope.
+  ValueSymbolTable *ValSymTab;    ///< Symbol table for values
+  ComdatSymTabType ComdatSymTab;  ///< Symbol table for COMDATs
+  std::unique_ptr<MemoryBuffer>
+  OwnedMemoryBuffer;              ///< Memory buffer directly owned by this
+                                  ///< module, for legacy clients only.
+  std::unique_ptr<GVMaterializer>
+  Materializer;                   ///< Used to materialize GlobalValues
+  std::string ModuleID;           ///< Human readable identifier for the module
+  std::string SourceFileName;     ///< Original source file name for module,
+                                  ///< recorded in bitcode.
+  std::string TargetTriple;       ///< Platform target triple Module compiled on
+                                  ///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
+  void *NamedMDSymTab;            ///< NamedMDNode names.
+  DataLayout DL;                  ///< DataLayout associated with the module
+
+  friend class Constant;
+
+/// @}
+/// @name Constructors
+/// @{
+public:
+  /// The Module constructor. Note that there is no default constructor. You
+  /// must provide a name for the module upon construction.
+  explicit Module(StringRef ModuleID, LLVMContext& C);
+  /// The module destructor. This will dropAllReferences.
+  ~Module();
+
+/// @}
+/// @name Module Level Accessors
+/// @{
+
+  /// Get the module identifier which is, essentially, the name of the module.
+  /// @returns the module identifier as a string
+  const std::string &getModuleIdentifier() const { return ModuleID; }
+
+  /// Get the module's original source file name. When compiling from
+  /// bitcode, this is taken from a bitcode record where it was recorded.
+  /// For other compiles it is the same as the ModuleID, which would
+  /// contain the source file name.
+  const std::string &getSourceFileName() const { return SourceFileName; }
+
+  /// \brief Get a short "name" for the module.
+  ///
+  /// This is useful for debugging or logging. It is essentially a convenience
+  /// wrapper around getModuleIdentifier().
+  StringRef getName() const { return ModuleID; }
+
+  /// Get the data layout string for the module's target platform. This is
+  /// equivalent to getDataLayout()->getStringRepresentation().
+  const std::string &getDataLayoutStr() const {
+    return DL.getStringRepresentation();
+  }
+
+  /// Get the data layout for the module's target platform.
+  const DataLayout &getDataLayout() const;
+
+  /// Get the target triple which is a string describing the target host.
+  /// @returns a string containing the target triple.
+  const std::string &getTargetTriple() const { return TargetTriple; }
+
+  /// Get the global data context.
+  /// @returns LLVMContext - a container for LLVM's global information
+  LLVMContext &getContext() const { return Context; }
+
+  /// Get any module-scope inline assembly blocks.
+  /// @returns a string containing the module-scope inline assembly blocks.
+  const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
+
+  /// Get a RandomNumberGenerator salted for use with this module. The
+  /// RNG can be seeded via -rng-seed=<uint64> and is salted with the
+  /// ModuleID and the provided pass salt. The returned RNG should not
+  /// be shared across threads or passes.
+  ///
+  /// A unique RNG per pass ensures a reproducible random stream even
+  /// when other randomness consuming passes are added or removed. In
+  /// addition, the random stream will be reproducible across LLVM
+  /// versions when the pass does not change.
+  std::unique_ptr<RandomNumberGenerator> createRNG(const Pass* P) const;
+
+/// @}
+/// @name Module Level Mutators
+/// @{
+
+  /// Set the module identifier.
+  void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
+
+  /// Set the module's original source file name.
+  void setSourceFileName(StringRef Name) { SourceFileName = Name; }
+
+  /// Set the data layout
+  void setDataLayout(StringRef Desc);
+  void setDataLayout(const DataLayout &Other);
+
+  /// Set the target triple.
+  void setTargetTriple(StringRef T) { TargetTriple = T; }
+
+  /// Set the module-scope inline assembly blocks.
+  /// A trailing newline is added if the input doesn't have one.
+  void setModuleInlineAsm(StringRef Asm) {
+    GlobalScopeAsm = Asm;
+    if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
+      GlobalScopeAsm += '\n';
+  }
+
+  /// Append to the module-scope inline assembly blocks.
+  /// A trailing newline is added if the input doesn't have one.
+  void appendModuleInlineAsm(StringRef Asm) {
+    GlobalScopeAsm += Asm;
+    if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
+      GlobalScopeAsm += '\n';
+  }
+
+/// @}
+/// @name Generic Value Accessors
+/// @{
+
+  /// Return the global value in the module with the specified name, of
+  /// arbitrary type. This method returns null if a global with the specified
+  /// name is not found.
+  GlobalValue *getNamedValue(StringRef Name) const;
+
+  /// Return a unique non-zero ID for the specified metadata kind. This ID is
+  /// uniqued across modules in the current LLVMContext.
+  unsigned getMDKindID(StringRef Name) const;
+
+  /// Populate client supplied SmallVector with the name for custom metadata IDs
+  /// registered in this LLVMContext.
+  void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
+
+  /// Populate client supplied SmallVector with the bundle tags registered in
+  /// this LLVMContext.  The bundle tags are ordered by increasing bundle IDs.
+  /// \see LLVMContext::getOperandBundleTagID
+  void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
+
+  /// Return the type with the specified name, or null if there is none by that
+  /// name.
+  StructType *getTypeByName(StringRef Name) const;
+
+  std::vector<StructType *> getIdentifiedStructTypes() const;
+
+/// @}
+/// @name Function Accessors
+/// @{
+
+  /// Look up the specified function in the module symbol table. Four
+  /// possibilities:
+  ///   1. If it does not exist, add a prototype for the function and return it.
+  ///   2. If it exists, and has a local linkage, the existing function is
+  ///      renamed and a new one is inserted.
+  ///   3. Otherwise, if the existing function has the correct prototype, return
+  ///      the existing function.
+  ///   4. Finally, the function exists but has the wrong prototype: return the
+  ///      function with a constantexpr cast to the right prototype.
+  Constant *getOrInsertFunction(StringRef Name, FunctionType *T,
+                                AttributeList AttributeList);
+
+  Constant *getOrInsertFunction(StringRef Name, FunctionType *T);
+
+  /// Look up the specified function in the module symbol table. If it does not
+  /// exist, add a prototype for the function and return it. This function
+  /// guarantees to return a constant of pointer to the specified function type
+  /// or a ConstantExpr BitCast of that type if the named function has a
+  /// different type. This version of the method takes a list of
+  /// function arguments, which makes it easier for clients to use.
+  template<typename... ArgsTy>
+  Constant *getOrInsertFunction(StringRef Name,
+                                AttributeList AttributeList,
+                                Type *RetTy, ArgsTy... Args)
+  {
+    SmallVector<Type*, sizeof...(ArgsTy)> ArgTys{Args...};
+    return getOrInsertFunction(Name,
+                               FunctionType::get(RetTy, ArgTys, false),
+                               AttributeList);
+  }
+
+  /// Same as above, but without the attributes.
+  template<typename... ArgsTy>
+  Constant *getOrInsertFunction(StringRef Name, Type *RetTy, ArgsTy... Args) {
+    return getOrInsertFunction(Name, AttributeList{}, RetTy, Args...);
+  }
+
+  /// Look up the specified function in the module symbol table. If it does not
+  /// exist, return null.
+  Function *getFunction(StringRef Name) const;
+
+/// @}
+/// @name Global Variable Accessors
+/// @{
+
+  /// Look up the specified global variable in the module symbol table. If it
+  /// does not exist, return null. If AllowInternal is set to true, this
+  /// function will return types that have InternalLinkage. By default, these
+  /// types are not returned.
+  GlobalVariable *getGlobalVariable(StringRef Name) const {
+    return getGlobalVariable(Name, false);
+  }
+
+  GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal) const;
+
+  GlobalVariable *getGlobalVariable(StringRef Name,
+                                    bool AllowInternal = false) {
+    return static_cast<const Module *>(this)->getGlobalVariable(Name,
+                                                                AllowInternal);
+  }
+
+  /// Return the global variable in the module with the specified name, of
+  /// arbitrary type. This method returns null if a global with the specified
+  /// name is not found.
+  const GlobalVariable *getNamedGlobal(StringRef Name) const {
+    return getGlobalVariable(Name, true);
+  }
+  GlobalVariable *getNamedGlobal(StringRef Name) {
+    return const_cast<GlobalVariable *>(
+                       static_cast<const Module *>(this)->getNamedGlobal(Name));
+  }
+
+  /// Look up the specified global in the module symbol table.
+  ///   1. If it does not exist, add a declaration of the global and return it.
+  ///   2. Else, the global exists but has the wrong type: return the function
+  ///      with a constantexpr cast to the right type.
+  ///   3. Finally, if the existing global is the correct declaration, return
+  ///      the existing global.
+  Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
+
+/// @}
+/// @name Global Alias Accessors
+/// @{
+
+  /// Return the global alias in the module with the specified name, of
+  /// arbitrary type. This method returns null if a global with the specified
+  /// name is not found.
+  GlobalAlias *getNamedAlias(StringRef Name) const;
+
+/// @}
+/// @name Global IFunc Accessors
+/// @{
+
+  /// Return the global ifunc in the module with the specified name, of
+  /// arbitrary type. This method returns null if a global with the specified
+  /// name is not found.
+  GlobalIFunc *getNamedIFunc(StringRef Name) const;
+
+/// @}
+/// @name Named Metadata Accessors
+/// @{
+
+  /// Return the first NamedMDNode in the module with the specified name. This
+  /// method returns null if a NamedMDNode with the specified name is not found.
+  NamedMDNode *getNamedMetadata(const Twine &Name) const;
+
+  /// Return the named MDNode in the module with the specified name. This method
+  /// returns a new NamedMDNode if a NamedMDNode with the specified name is not
+  /// found.
+  NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
+
+  /// Remove the given NamedMDNode from this module and delete it.
+  void eraseNamedMetadata(NamedMDNode *NMD);
+
+/// @}
+/// @name Comdat Accessors
+/// @{
+
+  /// Return the Comdat in the module with the specified name. It is created
+  /// if it didn't already exist.
+  Comdat *getOrInsertComdat(StringRef Name);
+
+/// @}
+/// @name Module Flags Accessors
+/// @{
+
+  /// Returns the module flags in the provided vector.
+  void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const;
+
+  /// Return the corresponding value if Key appears in module flags, otherwise
+  /// return null.
+  Metadata *getModuleFlag(StringRef Key) const;
+
+  /// Returns the NamedMDNode in the module that represents module-level flags.
+  /// This method returns null if there are no module-level flags.
+  NamedMDNode *getModuleFlagsMetadata() const;
+
+  /// Returns the NamedMDNode in the module that represents module-level flags.
+  /// If module-level flags aren't found, it creates the named metadata that
+  /// contains them.
+  NamedMDNode *getOrInsertModuleFlagsMetadata();
+
+  /// Add a module-level flag to the module-level flags metadata. It will create
+  /// the module-level flags named metadata if it doesn't already exist.
+  void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);
+  void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Constant *Val);
+  void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
+  void addModuleFlag(MDNode *Node);
+
+/// @}
+/// @name Materialization
+/// @{
+
+  /// Sets the GVMaterializer to GVM. This module must not yet have a
+  /// Materializer. To reset the materializer for a module that already has one,
+  /// call materializeAll first. Destroying this module will destroy
+  /// its materializer without materializing any more GlobalValues. Without
+  /// destroying the Module, there is no way to detach or destroy a materializer
+  /// without materializing all the GVs it controls, to avoid leaving orphan
+  /// unmaterialized GVs.
+  void setMaterializer(GVMaterializer *GVM);
+  /// Retrieves the GVMaterializer, if any, for this Module.
+  GVMaterializer *getMaterializer() const { return Materializer.get(); }
+  bool isMaterialized() const { return !getMaterializer(); }
+
+  /// Make sure the GlobalValue is fully read.
+  llvm::Error materialize(GlobalValue *GV);
+
+  /// Make sure all GlobalValues in this Module are fully read and clear the
+  /// Materializer.
+  llvm::Error materializeAll();
+
+  llvm::Error materializeMetadata();
+
+/// @}
+/// @name Direct access to the globals list, functions list, and symbol table
+/// @{
+
+  /// Get the Module's list of global variables (constant).
+  const GlobalListType   &getGlobalList() const       { return GlobalList; }
+  /// Get the Module's list of global variables.
+  GlobalListType         &getGlobalList()             { return GlobalList; }
+
+  static GlobalListType Module::*getSublistAccess(GlobalVariable*) {
+    return &Module::GlobalList;
+  }
+
+  /// Get the Module's list of functions (constant).
+  const FunctionListType &getFunctionList() const     { return FunctionList; }
+  /// Get the Module's list of functions.
+  FunctionListType       &getFunctionList()           { return FunctionList; }
+  static FunctionListType Module::*getSublistAccess(Function*) {
+    return &Module::FunctionList;
+  }
+
+  /// Get the Module's list of aliases (constant).
+  const AliasListType    &getAliasList() const        { return AliasList; }
+  /// Get the Module's list of aliases.
+  AliasListType          &getAliasList()              { return AliasList; }
+
+  static AliasListType Module::*getSublistAccess(GlobalAlias*) {
+    return &Module::AliasList;
+  }
+
+  /// Get the Module's list of ifuncs (constant).
+  const IFuncListType    &getIFuncList() const        { return IFuncList; }
+  /// Get the Module's list of ifuncs.
+  IFuncListType          &getIFuncList()              { return IFuncList; }
+
+  static IFuncListType Module::*getSublistAccess(GlobalIFunc*) {
+    return &Module::IFuncList;
+  }
+
+  /// Get the Module's list of named metadata (constant).
+  const NamedMDListType  &getNamedMDList() const      { return NamedMDList; }
+  /// Get the Module's list of named metadata.
+  NamedMDListType        &getNamedMDList()            { return NamedMDList; }
+
+  static NamedMDListType Module::*getSublistAccess(NamedMDNode*) {
+    return &Module::NamedMDList;
+  }
+
+  /// Get the symbol table of global variable and function identifiers
+  const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; }
+  /// Get the Module's symbol table of global variable and function identifiers.
+  ValueSymbolTable       &getValueSymbolTable()       { return *ValSymTab; }
+
+  /// Get the Module's symbol table for COMDATs (constant).
+  const ComdatSymTabType &getComdatSymbolTable() const { return ComdatSymTab; }
+  /// Get the Module's symbol table for COMDATs.
+  ComdatSymTabType &getComdatSymbolTable() { return ComdatSymTab; }
+
+/// @}
+/// @name Global Variable Iteration
+/// @{
+
+  global_iterator       global_begin()       { return GlobalList.begin(); }
+  const_global_iterator global_begin() const { return GlobalList.begin(); }
+  global_iterator       global_end  ()       { return GlobalList.end(); }
+  const_global_iterator global_end  () const { return GlobalList.end(); }
+  bool                  global_empty() const { return GlobalList.empty(); }
+
+  iterator_range<global_iterator> globals() {
+    return make_range(global_begin(), global_end());
+  }
+  iterator_range<const_global_iterator> globals() const {
+    return make_range(global_begin(), global_end());
+  }
+
+/// @}
+/// @name Function Iteration
+/// @{
+
+  iterator                begin()       { return FunctionList.begin(); }
+  const_iterator          begin() const { return FunctionList.begin(); }
+  iterator                end  ()       { return FunctionList.end();   }
+  const_iterator          end  () const { return FunctionList.end();   }
+  reverse_iterator        rbegin()      { return FunctionList.rbegin(); }
+  const_reverse_iterator  rbegin() const{ return FunctionList.rbegin(); }
+  reverse_iterator        rend()        { return FunctionList.rend(); }
+  const_reverse_iterator  rend() const  { return FunctionList.rend(); }
+  size_t                  size() const  { return FunctionList.size(); }
+  bool                    empty() const { return FunctionList.empty(); }
+
+  iterator_range<iterator> functions() {
+    return make_range(begin(), end());
+  }
+  iterator_range<const_iterator> functions() const {
+    return make_range(begin(), end());
+  }
+
+/// @}
+/// @name Alias Iteration
+/// @{
+
+  alias_iterator       alias_begin()            { return AliasList.begin(); }
+  const_alias_iterator alias_begin() const      { return AliasList.begin(); }
+  alias_iterator       alias_end  ()            { return AliasList.end();   }
+  const_alias_iterator alias_end  () const      { return AliasList.end();   }
+  size_t               alias_size () const      { return AliasList.size();  }
+  bool                 alias_empty() const      { return AliasList.empty(); }
+
+  iterator_range<alias_iterator> aliases() {
+    return make_range(alias_begin(), alias_end());
+  }
+  iterator_range<const_alias_iterator> aliases() const {
+    return make_range(alias_begin(), alias_end());
+  }
+
+/// @}
+/// @name IFunc Iteration
+/// @{
+
+  ifunc_iterator       ifunc_begin()            { return IFuncList.begin(); }
+  const_ifunc_iterator ifunc_begin() const      { return IFuncList.begin(); }
+  ifunc_iterator       ifunc_end  ()            { return IFuncList.end();   }
+  const_ifunc_iterator ifunc_end  () const      { return IFuncList.end();   }
+  size_t               ifunc_size () const      { return IFuncList.size();  }
+  bool                 ifunc_empty() const      { return IFuncList.empty(); }
+
+  iterator_range<ifunc_iterator> ifuncs() {
+    return make_range(ifunc_begin(), ifunc_end());
+  }
+  iterator_range<const_ifunc_iterator> ifuncs() const {
+    return make_range(ifunc_begin(), ifunc_end());
+  }
+
+  /// @}
+  /// @name Convenience iterators
+  /// @{
+
+  using global_object_iterator =
+      concat_iterator<GlobalObject, iterator, global_iterator>;
+  using const_global_object_iterator =
+      concat_iterator<const GlobalObject, const_iterator,
+                      const_global_iterator>;
+
+  iterator_range<global_object_iterator> global_objects() {
+    return concat<GlobalObject>(functions(), globals());
+  }
+  iterator_range<const_global_object_iterator> global_objects() const {
+    return concat<const GlobalObject>(functions(), globals());
+  }
+
+  global_object_iterator global_object_begin() {
+    return global_objects().begin();
+  }
+  global_object_iterator global_object_end() { return global_objects().end(); }
+
+  const_global_object_iterator global_object_begin() const {
+    return global_objects().begin();
+  }
+  const_global_object_iterator global_object_end() const {
+    return global_objects().end();
+  }
+
+  using global_value_iterator =
+      concat_iterator<GlobalValue, iterator, global_iterator, alias_iterator,
+                      ifunc_iterator>;
+  using const_global_value_iterator =
+      concat_iterator<const GlobalValue, const_iterator, const_global_iterator,
+                      const_alias_iterator, const_ifunc_iterator>;
+
+  iterator_range<global_value_iterator> global_values() {
+    return concat<GlobalValue>(functions(), globals(), aliases(), ifuncs());
+  }
+  iterator_range<const_global_value_iterator> global_values() const {
+    return concat<const GlobalValue>(functions(), globals(), aliases(),
+                                     ifuncs());
+  }
+
+  global_value_iterator global_value_begin() { return global_values().begin(); }
+  global_value_iterator global_value_end() { return global_values().end(); }
+
+  const_global_value_iterator global_value_begin() const {
+    return global_values().begin();
+  }
+  const_global_value_iterator global_value_end() const {
+    return global_values().end();
+  }
+
+  /// @}
+  /// @name Named Metadata Iteration
+  /// @{
+
+  named_metadata_iterator named_metadata_begin() { return NamedMDList.begin(); }
+  const_named_metadata_iterator named_metadata_begin() const {
+    return NamedMDList.begin();
+  }
+
+  named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
+  const_named_metadata_iterator named_metadata_end() const {
+    return NamedMDList.end();
+  }
+
+  size_t named_metadata_size() const { return NamedMDList.size();  }
+  bool named_metadata_empty() const { return NamedMDList.empty(); }
+
+  iterator_range<named_metadata_iterator> named_metadata() {
+    return make_range(named_metadata_begin(), named_metadata_end());
+  }
+  iterator_range<const_named_metadata_iterator> named_metadata() const {
+    return make_range(named_metadata_begin(), named_metadata_end());
+  }
+
+  /// An iterator for DICompileUnits that skips those marked NoDebug.
+  class debug_compile_units_iterator
+      : public std::iterator<std::input_iterator_tag, DICompileUnit *> {
+    NamedMDNode *CUs;
+    unsigned Idx;
+
+    void SkipNoDebugCUs();
+
+  public:
+    explicit debug_compile_units_iterator(NamedMDNode *CUs, unsigned Idx)
+        : CUs(CUs), Idx(Idx) {
+      SkipNoDebugCUs();
+    }
+
+    debug_compile_units_iterator &operator++() {
+      ++Idx;
+      SkipNoDebugCUs();
+      return *this;
+    }
+
+    debug_compile_units_iterator operator++(int) {
+      debug_compile_units_iterator T(*this);
+      ++Idx;
+      return T;
+    }
+
+    bool operator==(const debug_compile_units_iterator &I) const {
+      return Idx == I.Idx;
+    }
+
+    bool operator!=(const debug_compile_units_iterator &I) const {
+      return Idx != I.Idx;
+    }
+
+    DICompileUnit *operator*() const;
+    DICompileUnit *operator->() const;
+  };
+
+  debug_compile_units_iterator debug_compile_units_begin() const {
+    auto *CUs = getNamedMetadata("llvm.dbg.cu");
+    return debug_compile_units_iterator(CUs, 0);
+  }
+
+  debug_compile_units_iterator debug_compile_units_end() const {
+    auto *CUs = getNamedMetadata("llvm.dbg.cu");
+    return debug_compile_units_iterator(CUs, CUs ? CUs->getNumOperands() : 0);
+  }
+
+  /// Return an iterator for all DICompileUnits listed in this Module's
+  /// llvm.dbg.cu named metadata node and aren't explicitly marked as
+  /// NoDebug.
+  iterator_range<debug_compile_units_iterator> debug_compile_units() const {
+    auto *CUs = getNamedMetadata("llvm.dbg.cu");
+    return make_range(
+        debug_compile_units_iterator(CUs, 0),
+        debug_compile_units_iterator(CUs, CUs ? CUs->getNumOperands() : 0));
+  }
+/// @}
+
+  /// Destroy ConstantArrays in LLVMContext if they are not used.
+  /// ConstantArrays constructed during linking can cause quadratic memory
+  /// explosion. Releasing all unused constants can cause a 20% LTO compile-time
+  /// slowdown for a large application.
+  ///
+  /// NOTE: Constants are currently owned by LLVMContext. This can then only
+  /// be called where all uses of the LLVMContext are understood.
+  void dropTriviallyDeadConstantArrays();
+
+/// @name Utility functions for printing and dumping Module objects
+/// @{
+
+  /// Print the module to an output stream with an optional
+  /// AssemblyAnnotationWriter.  If \c ShouldPreserveUseListOrder, then include
+  /// uselistorder directives so that use-lists can be recreated when reading
+  /// the assembly.
+  void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW,
+             bool ShouldPreserveUseListOrder = false,
+             bool IsForDebug = false) const;
+
+  /// Dump the module to stderr (for debugging).
+  void dump() const;
+
+  /// This function causes all the subinstructions to "let go" of all references
+  /// that they are maintaining.  This allows one to 'delete' a whole class at
+  /// a time, even though there may be circular references... first all
+  /// references are dropped, and all use counts go to zero.  Then everything
+  /// is delete'd for real.  Note that no operations are valid on an object
+  /// that has "dropped all references", except operator delete.
+  void dropAllReferences();
+
+/// @}
+/// @name Utility functions for querying Debug information.
+/// @{
+
+  /// \brief Returns the Number of Register ParametersDwarf Version by checking
+  /// module flags.
+  unsigned getNumberRegisterParameters() const;
+
+  /// \brief Returns the Dwarf Version by checking module flags.
+  unsigned getDwarfVersion() const;
+
+  /// \brief Returns the CodeView Version by checking module flags.
+  /// Returns zero if not present in module.
+  unsigned getCodeViewFlag() const;
+
+/// @}
+/// @name Utility functions for querying and setting PIC level
+/// @{
+
+  /// \brief Returns the PIC level (small or large model)
+  PICLevel::Level getPICLevel() const;
+
+  /// \brief Set the PIC level (small or large model)
+  void setPICLevel(PICLevel::Level PL);
+/// @}
+
+/// @}
+/// @name Utility functions for querying and setting PIE level
+/// @{
+
+  /// \brief Returns the PIE level (small or large model)
+  PIELevel::Level getPIELevel() const;
+
+  /// \brief Set the PIE level (small or large model)
+  void setPIELevel(PIELevel::Level PL);
+/// @}
+
+  /// @name Utility functions for querying and setting PGO summary
+  /// @{
+
+  /// \brief Attach profile summary metadata to this module.
+  void setProfileSummary(Metadata *M);
+
+  /// \brief Returns profile summary metadata
+  Metadata *getProfileSummary();
+  /// @}
+
+  /// Returns true if PLT should be avoided for RTLib calls.
+  bool getRtLibUseGOT() const;
+
+  /// Set that PLT should be avoid for RTLib calls.
+  void setRtLibUseGOT();
+
+
+  /// Take ownership of the given memory buffer.
+  void setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB);
+};
+
+/// \brief Given "llvm.used" or "llvm.compiler.used" as a global name, collect
+/// the initializer elements of that global in Set and return the global itself.
+GlobalVariable *collectUsedGlobalVariables(const Module &M,
+                                           SmallPtrSetImpl<GlobalValue *> &Set,
+                                           bool CompilerUsed);
+
+/// An raw_ostream inserter for modules.
+inline raw_ostream &operator<<(raw_ostream &O, const Module &M) {
+  M.print(O, nullptr);
+  return O;
+}
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Module, LLVMModuleRef)
+
+/* LLVMModuleProviderRef exists for historical reasons, but now just holds a
+ * Module.
+ */
+inline Module *unwrap(LLVMModuleProviderRef MP) {
+  return reinterpret_cast<Module*>(MP);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_MODULE_H
diff --git a/linux-x64/clang/include/llvm/IR/ModuleSlotTracker.h b/linux-x64/clang/include/llvm/IR/ModuleSlotTracker.h
new file mode 100644
index 0000000..eb26fba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ModuleSlotTracker.h
@@ -0,0 +1,80 @@
+//===-- llvm/IR/ModuleSlotTracker.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MODULESLOTTRACKER_H
+#define LLVM_IR_MODULESLOTTRACKER_H
+
+#include <memory>
+
+namespace llvm {
+
+class Module;
+class Function;
+class SlotTracker;
+class Value;
+
+/// Manage lifetime of a slot tracker for printing IR.
+///
+/// Wrapper around the \a SlotTracker used internally by \a AsmWriter.  This
+/// class allows callers to share the cost of incorporating the metadata in a
+/// module or a function.
+///
+/// If the IR changes from underneath \a ModuleSlotTracker, strings like
+/// "<badref>" will be printed, or, worse, the wrong slots entirely.
+class ModuleSlotTracker {
+  /// Storage for a slot tracker.
+  std::unique_ptr<SlotTracker> MachineStorage;
+  bool ShouldCreateStorage = false;
+  bool ShouldInitializeAllMetadata = false;
+
+  const Module *M = nullptr;
+  const Function *F = nullptr;
+  SlotTracker *Machine = nullptr;
+
+public:
+  /// Wrap a preinitialized SlotTracker.
+  ModuleSlotTracker(SlotTracker &Machine, const Module *M,
+                    const Function *F = nullptr);
+
+  /// Construct a slot tracker from a module.
+  ///
+  /// If \a M is \c nullptr, uses a null slot tracker.  Otherwise, initializes
+  /// a slot tracker, and initializes all metadata slots.  \c
+  /// ShouldInitializeAllMetadata defaults to true because this is expected to
+  /// be shared between multiple callers, and otherwise MDNode references will
+  /// not match up.
+  explicit ModuleSlotTracker(const Module *M,
+                             bool ShouldInitializeAllMetadata = true);
+
+  /// Destructor to clean up storage.
+  ~ModuleSlotTracker();
+
+  /// Lazily creates a slot tracker.
+  SlotTracker *getMachine();
+
+  const Module *getModule() const { return M; }
+  const Function *getCurrentFunction() const { return F; }
+
+  /// Incorporate the given function.
+  ///
+  /// Purge the currently incorporated function and incorporate \c F.  If \c F
+  /// is currently incorporated, this is a no-op.
+  void incorporateFunction(const Function &F);
+
+  /// Return the slot number of the specified local value.
+  ///
+  /// A function that defines this value should be incorporated prior to calling
+  /// this method.
+  /// Return -1 if the value is not in the function's SlotTracker.
+  int getLocalSlot(const Value *V);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/ModuleSummaryIndex.h b/linux-x64/clang/include/llvm/IR/ModuleSummaryIndex.h
new file mode 100644
index 0000000..45f8cd7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ModuleSummaryIndex.h
@@ -0,0 +1,1100 @@
+//===- llvm/ModuleSummaryIndex.h - Module Summary Index ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// ModuleSummaryIndex.h This file contains the declarations the classes that
+///  hold the module index and summary for function importing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MODULESUMMARYINDEX_H
+#define LLVM_IR_MODULESUMMARYINDEX_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ScaledNumber.h"
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+namespace yaml {
+
+template <typename T> struct MappingTraits;
+
+} // end namespace yaml
+
+/// \brief Class to accumulate and hold information about a callee.
+struct CalleeInfo {
+  enum class HotnessType : uint8_t {
+    Unknown = 0,
+    Cold = 1,
+    None = 2,
+    Hot = 3,
+    Critical = 4
+  };
+
+  // The size of the bit-field might need to be adjusted if more values are
+  // added to HotnessType enum.
+  uint32_t Hotness : 3;
+
+  /// The value stored in RelBlockFreq has to be interpreted as the digits of
+  /// a scaled number with a scale of \p -ScaleShift.
+  uint32_t RelBlockFreq : 29;
+  static constexpr int32_t ScaleShift = 8;
+  static constexpr uint64_t MaxRelBlockFreq = (1 << 29) - 1;
+
+  CalleeInfo()
+      : Hotness(static_cast<uint32_t>(HotnessType::Unknown)), RelBlockFreq(0) {}
+  explicit CalleeInfo(HotnessType Hotness, uint64_t RelBF)
+      : Hotness(static_cast<uint32_t>(Hotness)), RelBlockFreq(RelBF) {}
+
+  void updateHotness(const HotnessType OtherHotness) {
+    Hotness = std::max(Hotness, static_cast<uint32_t>(OtherHotness));
+  }
+
+  HotnessType getHotness() const { return HotnessType(Hotness); }
+
+  /// Update \p RelBlockFreq from \p BlockFreq and \p EntryFreq
+  ///
+  /// BlockFreq is divided by EntryFreq and added to RelBlockFreq. To represent
+  /// fractional values, the result is represented as a fixed point number with
+  /// scale of -ScaleShift.
+  void updateRelBlockFreq(uint64_t BlockFreq, uint64_t EntryFreq) {
+    if (EntryFreq == 0)
+      return;
+    using Scaled64 = ScaledNumber<uint64_t>;
+    Scaled64 Temp(BlockFreq, ScaleShift);
+    Temp /= Scaled64::get(EntryFreq);
+
+    uint64_t Sum =
+        SaturatingAdd<uint64_t>(Temp.toInt<uint64_t>(), RelBlockFreq);
+    Sum = std::min(Sum, uint64_t(MaxRelBlockFreq));
+    RelBlockFreq = static_cast<uint32_t>(Sum);
+  }
+};
+
+class GlobalValueSummary;
+
+using GlobalValueSummaryList = std::vector<std::unique_ptr<GlobalValueSummary>>;
+
+struct GlobalValueSummaryInfo {
+  union NameOrGV {
+    NameOrGV(bool IsAnalysis) {
+      if (IsAnalysis)
+        GV = nullptr;
+      else
+        Name = "";
+    }
+
+    /// The GlobalValue corresponding to this summary. This is only used in
+    /// per-module summaries, when module analysis is being run.
+    const GlobalValue *GV;
+
+    /// Summary string representation. This StringRef points to BC module
+    /// string table and is valid until module data is stored in memory.
+    /// This is guaranteed to happen until runThinLTOBackend function is
+    /// called, so it is safe to use this field during thin link. This field
+    /// is only valid if summary index was loaded from BC file.
+    StringRef Name;
+  } U;
+
+  GlobalValueSummaryInfo(bool IsAnalysis) : U(IsAnalysis) {}
+
+  /// List of global value summary structures for a particular value held
+  /// in the GlobalValueMap. Requires a vector in the case of multiple
+  /// COMDAT values of the same name.
+  GlobalValueSummaryList SummaryList;
+};
+
+/// Map from global value GUID to corresponding summary structures. Use a
+/// std::map rather than a DenseMap so that pointers to the map's value_type
+/// (which are used by ValueInfo) are not invalidated by insertion. Also it will
+/// likely incur less overhead, as the value type is not very small and the size
+/// of the map is unknown, resulting in inefficiencies due to repeated
+/// insertions and resizing.
+using GlobalValueSummaryMapTy =
+    std::map<GlobalValue::GUID, GlobalValueSummaryInfo>;
+
+/// Struct that holds a reference to a particular GUID in a global value
+/// summary.
+struct ValueInfo {
+  PointerIntPair<const GlobalValueSummaryMapTy::value_type *, 1, bool>
+      RefAndFlag;
+
+  ValueInfo() = default;
+  ValueInfo(bool IsAnalysis, const GlobalValueSummaryMapTy::value_type *R) {
+    RefAndFlag.setPointer(R);
+    RefAndFlag.setInt(IsAnalysis);
+  }
+
+  operator bool() const { return getRef(); }
+
+  GlobalValue::GUID getGUID() const { return getRef()->first; }
+  const GlobalValue *getValue() const {
+    assert(isFromAnalysis());
+    return getRef()->second.U.GV;
+  }
+
+  ArrayRef<std::unique_ptr<GlobalValueSummary>> getSummaryList() const {
+    return getRef()->second.SummaryList;
+  }
+
+  StringRef name() const {
+    return isFromAnalysis() ? getRef()->second.U.GV->getName()
+                            : getRef()->second.U.Name;
+  }
+
+  bool isFromAnalysis() const { return RefAndFlag.getInt(); }
+
+  const GlobalValueSummaryMapTy::value_type *getRef() const {
+    return RefAndFlag.getPointer();
+  }
+
+  bool isDSOLocal() const;
+};
+
+inline bool operator==(const ValueInfo &A, const ValueInfo &B) {
+  assert(A.getRef() && B.getRef() &&
+         "Need ValueInfo with non-null Ref to compare GUIDs");
+  return A.getRef() == B.getRef();
+}
+
+inline bool operator!=(const ValueInfo &A, const ValueInfo &B) {
+  assert(A.getRef() && B.getRef() &&
+         "Need ValueInfo with non-null Ref to compare GUIDs");
+  return A.getGUID() != B.getGUID();
+}
+
+inline bool operator<(const ValueInfo &A, const ValueInfo &B) {
+  assert(A.getRef() && B.getRef() &&
+         "Need ValueInfo with non-null Ref to compare GUIDs");
+  return A.getGUID() < B.getGUID();
+}
+
+template <> struct DenseMapInfo<ValueInfo> {
+  static inline ValueInfo getEmptyKey() {
+    return ValueInfo(false, (GlobalValueSummaryMapTy::value_type *)-8);
+  }
+
+  static inline ValueInfo getTombstoneKey() {
+    return ValueInfo(false, (GlobalValueSummaryMapTy::value_type *)-16);
+  }
+
+  static inline bool isSpecialKey(ValueInfo V) {
+    return V == getTombstoneKey() || V == getEmptyKey();
+  }
+
+  static bool isEqual(ValueInfo L, ValueInfo R) {
+    // We are not supposed to mix ValueInfo(s) with different analysis flag
+    // in a same container.
+    assert(isSpecialKey(L) || isSpecialKey(R) ||
+           (L.isFromAnalysis() == R.isFromAnalysis()));
+    return L.getRef() == R.getRef();
+  }
+  static unsigned getHashValue(ValueInfo I) { return (uintptr_t)I.getRef(); }
+};
+
+/// \brief Function and variable summary information to aid decisions and
+/// implementation of importing.
+class GlobalValueSummary {
+public:
+  /// \brief Sububclass discriminator (for dyn_cast<> et al.)
+  enum SummaryKind : unsigned { AliasKind, FunctionKind, GlobalVarKind };
+
+  /// Group flags (Linkage, NotEligibleToImport, etc.) as a bitfield.
+  struct GVFlags {
+    /// \brief The linkage type of the associated global value.
+    ///
+    /// One use is to flag values that have local linkage types and need to
+    /// have module identifier appended before placing into the combined
+    /// index, to disambiguate from other values with the same name.
+    /// In the future this will be used to update and optimize linkage
+    /// types based on global summary-based analysis.
+    unsigned Linkage : 4;
+
+    /// Indicate if the global value cannot be imported (e.g. it cannot
+    /// be renamed or references something that can't be renamed).
+    unsigned NotEligibleToImport : 1;
+
+    /// In per-module summary, indicate that the global value must be considered
+    /// a live root for index-based liveness analysis. Used for special LLVM
+    /// values such as llvm.global_ctors that the linker does not know about.
+    ///
+    /// In combined summary, indicate that the global value is live.
+    unsigned Live : 1;
+
+    /// Indicates that the linker resolved the symbol to a definition from
+    /// within the same linkage unit.
+    unsigned DSOLocal : 1;
+
+    /// Convenience Constructors
+    explicit GVFlags(GlobalValue::LinkageTypes Linkage,
+                     bool NotEligibleToImport, bool Live, bool IsLocal)
+        : Linkage(Linkage), NotEligibleToImport(NotEligibleToImport),
+          Live(Live), DSOLocal(IsLocal) {}
+  };
+
+private:
+  /// Kind of summary for use in dyn_cast<> et al.
+  SummaryKind Kind;
+
+  GVFlags Flags;
+
+  /// This is the hash of the name of the symbol in the original file. It is
+  /// identical to the GUID for global symbols, but differs for local since the
+  /// GUID includes the module level id in the hash.
+  GlobalValue::GUID OriginalName = 0;
+
+  /// \brief Path of module IR containing value's definition, used to locate
+  /// module during importing.
+  ///
+  /// This is only used during parsing of the combined index, or when
+  /// parsing the per-module index for creation of the combined summary index,
+  /// not during writing of the per-module index which doesn't contain a
+  /// module path string table.
+  StringRef ModulePath;
+
+  /// List of values referenced by this global value's definition
+  /// (either by the initializer of a global variable, or referenced
+  /// from within a function). This does not include functions called, which
+  /// are listed in the derived FunctionSummary object.
+  std::vector<ValueInfo> RefEdgeList;
+
+protected:
+  GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs)
+      : Kind(K), Flags(Flags), RefEdgeList(std::move(Refs)) {
+    assert((K != AliasKind || Refs.empty()) &&
+           "Expect no references for AliasSummary");
+  }
+
+public:
+  virtual ~GlobalValueSummary() = default;
+
+  /// Returns the hash of the original name, it is identical to the GUID for
+  /// externally visible symbols, but not for local ones.
+  GlobalValue::GUID getOriginalName() { return OriginalName; }
+
+  /// Initialize the original name hash in this summary.
+  void setOriginalName(GlobalValue::GUID Name) { OriginalName = Name; }
+
+  /// Which kind of summary subclass this is.
+  SummaryKind getSummaryKind() const { return Kind; }
+
+  /// Set the path to the module containing this function, for use in
+  /// the combined index.
+  void setModulePath(StringRef ModPath) { ModulePath = ModPath; }
+
+  /// Get the path to the module containing this function.
+  StringRef modulePath() const { return ModulePath; }
+
+  /// Get the flags for this GlobalValue (see \p struct GVFlags).
+  GVFlags flags() { return Flags; }
+
+  /// Return linkage type recorded for this global value.
+  GlobalValue::LinkageTypes linkage() const {
+    return static_cast<GlobalValue::LinkageTypes>(Flags.Linkage);
+  }
+
+  /// Sets the linkage to the value determined by global summary-based
+  /// optimization. Will be applied in the ThinLTO backends.
+  void setLinkage(GlobalValue::LinkageTypes Linkage) {
+    Flags.Linkage = Linkage;
+  }
+
+  /// Return true if this global value can't be imported.
+  bool notEligibleToImport() const { return Flags.NotEligibleToImport; }
+
+  bool isLive() const { return Flags.Live; }
+
+  void setLive(bool Live) { Flags.Live = Live; }
+
+  void setDSOLocal(bool Local) { Flags.DSOLocal = Local; }
+
+  bool isDSOLocal() const { return Flags.DSOLocal; }
+
+  /// Flag that this global value cannot be imported.
+  void setNotEligibleToImport() { Flags.NotEligibleToImport = true; }
+
+  /// Return the list of values referenced by this global value definition.
+  ArrayRef<ValueInfo> refs() const { return RefEdgeList; }
+
+  /// If this is an alias summary, returns the summary of the aliased object (a
+  /// global variable or function), otherwise returns itself.
+  GlobalValueSummary *getBaseObject();
+  const GlobalValueSummary *getBaseObject() const;
+
+  friend class ModuleSummaryIndex;
+};
+
+/// \brief Alias summary information.
+class AliasSummary : public GlobalValueSummary {
+  GlobalValueSummary *AliaseeSummary;
+  // AliaseeGUID is only set and accessed when we are building a combined index
+  // via the BitcodeReader.
+  GlobalValue::GUID AliaseeGUID;
+
+public:
+  AliasSummary(GVFlags Flags)
+      : GlobalValueSummary(AliasKind, Flags, ArrayRef<ValueInfo>{}),
+        AliaseeSummary(nullptr), AliaseeGUID(0) {}
+
+  /// Check if this is an alias summary.
+  static bool classof(const GlobalValueSummary *GVS) {
+    return GVS->getSummaryKind() == AliasKind;
+  }
+
+  void setAliasee(GlobalValueSummary *Aliasee) { AliaseeSummary = Aliasee; }
+  void setAliaseeGUID(GlobalValue::GUID GUID) { AliaseeGUID = GUID; }
+
+  const GlobalValueSummary &getAliasee() const {
+    assert(AliaseeSummary && "Unexpected missing aliasee summary");
+    return *AliaseeSummary;
+  }
+
+  GlobalValueSummary &getAliasee() {
+    return const_cast<GlobalValueSummary &>(
+                         static_cast<const AliasSummary *>(this)->getAliasee());
+  }
+  const GlobalValue::GUID &getAliaseeGUID() const {
+    assert(AliaseeGUID && "Unexpected missing aliasee GUID");
+    return AliaseeGUID;
+  }
+};
+
+const inline GlobalValueSummary *GlobalValueSummary::getBaseObject() const {
+  if (auto *AS = dyn_cast<AliasSummary>(this))
+    return &AS->getAliasee();
+  return this;
+}
+
+inline GlobalValueSummary *GlobalValueSummary::getBaseObject() {
+  if (auto *AS = dyn_cast<AliasSummary>(this))
+    return &AS->getAliasee();
+  return this;
+}
+
+/// \brief Function summary information to aid decisions and implementation of
+/// importing.
+class FunctionSummary : public GlobalValueSummary {
+public:
+  /// <CalleeValueInfo, CalleeInfo> call edge pair.
+  using EdgeTy = std::pair<ValueInfo, CalleeInfo>;
+
+  /// An "identifier" for a virtual function. This contains the type identifier
+  /// represented as a GUID and the offset from the address point to the virtual
+  /// function pointer, where "address point" is as defined in the Itanium ABI:
+  /// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#vtable-general
+  struct VFuncId {
+    GlobalValue::GUID GUID;
+    uint64_t Offset;
+  };
+
+  /// A specification for a virtual function call with all constant integer
+  /// arguments. This is used to perform virtual constant propagation on the
+  /// summary.
+  struct ConstVCall {
+    VFuncId VFunc;
+    std::vector<uint64_t> Args;
+  };
+
+  /// Function attribute flags. Used to track if a function accesses memory,
+  /// recurses or aliases.
+  struct FFlags {
+    unsigned ReadNone : 1;
+    unsigned ReadOnly : 1;
+    unsigned NoRecurse : 1;
+    unsigned ReturnDoesNotAlias : 1;
+  };
+
+  /// Create an empty FunctionSummary (with specified call edges).
+  /// Used to represent external nodes and the dummy root node.
+  static FunctionSummary
+  makeDummyFunctionSummary(std::vector<FunctionSummary::EdgeTy> Edges) {
+    return FunctionSummary(
+        FunctionSummary::GVFlags(
+            GlobalValue::LinkageTypes::AvailableExternallyLinkage,
+            /*NotEligibleToImport=*/true, /*Live=*/true, /*IsLocal=*/false),
+        0, FunctionSummary::FFlags{}, std::vector<ValueInfo>(),
+        std::move(Edges), std::vector<GlobalValue::GUID>(),
+        std::vector<FunctionSummary::VFuncId>(),
+        std::vector<FunctionSummary::VFuncId>(),
+        std::vector<FunctionSummary::ConstVCall>(),
+        std::vector<FunctionSummary::ConstVCall>());
+  }
+
+  /// A dummy node to reference external functions that aren't in the index
+  static FunctionSummary ExternalNode;
+
+private:
+  /// Number of instructions (ignoring debug instructions, e.g.) computed
+  /// during the initial compile step when the summary index is first built.
+  unsigned InstCount;
+
+  /// Function attribute flags. Used to track if a function accesses memory,
+  /// recurses or aliases.
+  FFlags FunFlags;
+
+  /// List of <CalleeValueInfo, CalleeInfo> call edge pairs from this function.
+  std::vector<EdgeTy> CallGraphEdgeList;
+
+  /// All type identifier related information. Because these fields are
+  /// relatively uncommon we only allocate space for them if necessary.
+  struct TypeIdInfo {
+    /// List of type identifiers used by this function in llvm.type.test
+    /// intrinsics other than by an llvm.assume intrinsic, represented as GUIDs.
+    std::vector<GlobalValue::GUID> TypeTests;
+
+    /// List of virtual calls made by this function using (respectively)
+    /// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics that do
+    /// not have all constant integer arguments.
+    std::vector<VFuncId> TypeTestAssumeVCalls, TypeCheckedLoadVCalls;
+
+    /// List of virtual calls made by this function using (respectively)
+    /// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics with
+    /// all constant integer arguments.
+    std::vector<ConstVCall> TypeTestAssumeConstVCalls,
+        TypeCheckedLoadConstVCalls;
+  };
+
+  std::unique_ptr<TypeIdInfo> TIdInfo;
+
+public:
+  FunctionSummary(GVFlags Flags, unsigned NumInsts, FFlags FunFlags,
+                  std::vector<ValueInfo> Refs, std::vector<EdgeTy> CGEdges,
+                  std::vector<GlobalValue::GUID> TypeTests,
+                  std::vector<VFuncId> TypeTestAssumeVCalls,
+                  std::vector<VFuncId> TypeCheckedLoadVCalls,
+                  std::vector<ConstVCall> TypeTestAssumeConstVCalls,
+                  std::vector<ConstVCall> TypeCheckedLoadConstVCalls)
+      : GlobalValueSummary(FunctionKind, Flags, std::move(Refs)),
+        InstCount(NumInsts), FunFlags(FunFlags),
+        CallGraphEdgeList(std::move(CGEdges)) {
+    if (!TypeTests.empty() || !TypeTestAssumeVCalls.empty() ||
+        !TypeCheckedLoadVCalls.empty() || !TypeTestAssumeConstVCalls.empty() ||
+        !TypeCheckedLoadConstVCalls.empty())
+      TIdInfo = llvm::make_unique<TypeIdInfo>(TypeIdInfo{
+          std::move(TypeTests), std::move(TypeTestAssumeVCalls),
+          std::move(TypeCheckedLoadVCalls),
+          std::move(TypeTestAssumeConstVCalls),
+          std::move(TypeCheckedLoadConstVCalls)});
+  }
+
+  /// Check if this is a function summary.
+  static bool classof(const GlobalValueSummary *GVS) {
+    return GVS->getSummaryKind() == FunctionKind;
+  }
+
+  /// Get function attribute flags.
+  FFlags &fflags() { return FunFlags; }
+
+  /// Get the instruction count recorded for this function.
+  unsigned instCount() const { return InstCount; }
+
+  /// Return the list of <CalleeValueInfo, CalleeInfo> pairs.
+  ArrayRef<EdgeTy> calls() const { return CallGraphEdgeList; }
+
+  /// Returns the list of type identifiers used by this function in
+  /// llvm.type.test intrinsics other than by an llvm.assume intrinsic,
+  /// represented as GUIDs.
+  ArrayRef<GlobalValue::GUID> type_tests() const {
+    if (TIdInfo)
+      return TIdInfo->TypeTests;
+    return {};
+  }
+
+  /// Returns the list of virtual calls made by this function using
+  /// llvm.assume(llvm.type.test) intrinsics that do not have all constant
+  /// integer arguments.
+  ArrayRef<VFuncId> type_test_assume_vcalls() const {
+    if (TIdInfo)
+      return TIdInfo->TypeTestAssumeVCalls;
+    return {};
+  }
+
+  /// Returns the list of virtual calls made by this function using
+  /// llvm.type.checked.load intrinsics that do not have all constant integer
+  /// arguments.
+  ArrayRef<VFuncId> type_checked_load_vcalls() const {
+    if (TIdInfo)
+      return TIdInfo->TypeCheckedLoadVCalls;
+    return {};
+  }
+
+  /// Returns the list of virtual calls made by this function using
+  /// llvm.assume(llvm.type.test) intrinsics with all constant integer
+  /// arguments.
+  ArrayRef<ConstVCall> type_test_assume_const_vcalls() const {
+    if (TIdInfo)
+      return TIdInfo->TypeTestAssumeConstVCalls;
+    return {};
+  }
+
+  /// Returns the list of virtual calls made by this function using
+  /// llvm.type.checked.load intrinsics with all constant integer arguments.
+  ArrayRef<ConstVCall> type_checked_load_const_vcalls() const {
+    if (TIdInfo)
+      return TIdInfo->TypeCheckedLoadConstVCalls;
+    return {};
+  }
+
+  /// Add a type test to the summary. This is used by WholeProgramDevirt if we
+  /// were unable to devirtualize a checked call.
+  void addTypeTest(GlobalValue::GUID Guid) {
+    if (!TIdInfo)
+      TIdInfo = llvm::make_unique<TypeIdInfo>();
+    TIdInfo->TypeTests.push_back(Guid);
+  }
+
+  friend struct GraphTraits<ValueInfo>;
+};
+
+template <> struct DenseMapInfo<FunctionSummary::VFuncId> {
+  static FunctionSummary::VFuncId getEmptyKey() { return {0, uint64_t(-1)}; }
+
+  static FunctionSummary::VFuncId getTombstoneKey() {
+    return {0, uint64_t(-2)};
+  }
+
+  static bool isEqual(FunctionSummary::VFuncId L, FunctionSummary::VFuncId R) {
+    return L.GUID == R.GUID && L.Offset == R.Offset;
+  }
+
+  static unsigned getHashValue(FunctionSummary::VFuncId I) { return I.GUID; }
+};
+
+template <> struct DenseMapInfo<FunctionSummary::ConstVCall> {
+  static FunctionSummary::ConstVCall getEmptyKey() {
+    return {{0, uint64_t(-1)}, {}};
+  }
+
+  static FunctionSummary::ConstVCall getTombstoneKey() {
+    return {{0, uint64_t(-2)}, {}};
+  }
+
+  static bool isEqual(FunctionSummary::ConstVCall L,
+                      FunctionSummary::ConstVCall R) {
+    return DenseMapInfo<FunctionSummary::VFuncId>::isEqual(L.VFunc, R.VFunc) &&
+           L.Args == R.Args;
+  }
+
+  static unsigned getHashValue(FunctionSummary::ConstVCall I) {
+    return I.VFunc.GUID;
+  }
+};
+
+/// \brief Global variable summary information to aid decisions and
+/// implementation of importing.
+///
+/// Currently this doesn't add anything to the base \p GlobalValueSummary,
+/// but is a placeholder as additional info may be added to the summary
+/// for variables.
+class GlobalVarSummary : public GlobalValueSummary {
+
+public:
+  GlobalVarSummary(GVFlags Flags, std::vector<ValueInfo> Refs)
+      : GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)) {}
+
+  /// Check if this is a global variable summary.
+  static bool classof(const GlobalValueSummary *GVS) {
+    return GVS->getSummaryKind() == GlobalVarKind;
+  }
+};
+
+struct TypeTestResolution {
+  /// Specifies which kind of type check we should emit for this byte array.
+  /// See http://clang.llvm.org/docs/ControlFlowIntegrityDesign.html for full
+  /// details on each kind of check; the enumerators are described with
+  /// reference to that document.
+  enum Kind {
+    Unsat,     ///< Unsatisfiable type (i.e. no global has this type metadata)
+    ByteArray, ///< Test a byte array (first example)
+    Inline,    ///< Inlined bit vector ("Short Inline Bit Vectors")
+    Single,    ///< Single element (last example in "Short Inline Bit Vectors")
+    AllOnes,   ///< All-ones bit vector ("Eliminating Bit Vector Checks for
+               ///  All-Ones Bit Vectors")
+  } TheKind = Unsat;
+
+  /// Range of size-1 expressed as a bit width. For example, if the size is in
+  /// range [1,256], this number will be 8. This helps generate the most compact
+  /// instruction sequences.
+  unsigned SizeM1BitWidth = 0;
+
+  // The following fields are only used if the target does not support the use
+  // of absolute symbols to store constants. Their meanings are the same as the
+  // corresponding fields in LowerTypeTestsModule::TypeIdLowering in
+  // LowerTypeTests.cpp.
+
+  uint64_t AlignLog2 = 0;
+  uint64_t SizeM1 = 0;
+  uint8_t BitMask = 0;
+  uint64_t InlineBits = 0;
+};
+
+struct WholeProgramDevirtResolution {
+  enum Kind {
+    Indir,        ///< Just do a regular virtual call
+    SingleImpl,   ///< Single implementation devirtualization
+    BranchFunnel, ///< When retpoline mitigation is enabled, use a branch funnel
+                  ///< that is defined in the merged module. Otherwise same as
+                  ///< Indir.
+  } TheKind = Indir;
+
+  std::string SingleImplName;
+
+  struct ByArg {
+    enum Kind {
+      Indir,            ///< Just do a regular virtual call
+      UniformRetVal,    ///< Uniform return value optimization
+      UniqueRetVal,     ///< Unique return value optimization
+      VirtualConstProp, ///< Virtual constant propagation
+    } TheKind = Indir;
+
+    /// Additional information for the resolution:
+    /// - UniformRetVal: the uniform return value.
+    /// - UniqueRetVal: the return value associated with the unique vtable (0 or
+    ///   1).
+    uint64_t Info = 0;
+
+    // The following fields are only used if the target does not support the use
+    // of absolute symbols to store constants.
+
+    uint32_t Byte = 0;
+    uint32_t Bit = 0;
+  };
+
+  /// Resolutions for calls with all constant integer arguments (excluding the
+  /// first argument, "this"), where the key is the argument vector.
+  std::map<std::vector<uint64_t>, ByArg> ResByArg;
+};
+
+struct TypeIdSummary {
+  TypeTestResolution TTRes;
+
+  /// Mapping from byte offset to whole-program devirt resolution for that
+  /// (typeid, byte offset) pair.
+  std::map<uint64_t, WholeProgramDevirtResolution> WPDRes;
+};
+
+/// 160 bits SHA1
+using ModuleHash = std::array<uint32_t, 5>;
+
+/// Type used for iterating through the global value summary map.
+using const_gvsummary_iterator = GlobalValueSummaryMapTy::const_iterator;
+using gvsummary_iterator = GlobalValueSummaryMapTy::iterator;
+
+/// String table to hold/own module path strings, which additionally holds the
+/// module ID assigned to each module during the plugin step, as well as a hash
+/// of the module. The StringMap makes a copy of and owns inserted strings.
+using ModulePathStringTableTy = StringMap<std::pair<uint64_t, ModuleHash>>;
+
+/// Map of global value GUID to its summary, used to identify values defined in
+/// a particular module, and provide efficient access to their summary.
+using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;
+
+/// Class to hold module path string table and global value map,
+/// and encapsulate methods for operating on them.
+class ModuleSummaryIndex {
+private:
+  /// Map from value name to list of summary instances for values of that
+  /// name (may be duplicates in the COMDAT case, e.g.).
+  GlobalValueSummaryMapTy GlobalValueMap;
+
+  /// Holds strings for combined index, mapping to the corresponding module ID.
+  ModulePathStringTableTy ModulePathStringTable;
+
+  /// Mapping from type identifiers to summary information for that type
+  /// identifier.
+  std::map<std::string, TypeIdSummary> TypeIdMap;
+
+  /// Mapping from original ID to GUID. If original ID can map to multiple
+  /// GUIDs, it will be mapped to 0.
+  std::map<GlobalValue::GUID, GlobalValue::GUID> OidGuidMap;
+
+  /// Indicates that summary-based GlobalValue GC has run, and values with
+  /// GVFlags::Live==false are really dead. Otherwise, all values must be
+  /// considered live.
+  bool WithGlobalValueDeadStripping = false;
+
+  /// Indicates that distributed backend should skip compilation of the
+  /// module. Flag is suppose to be set by distributed ThinLTO indexing
+  /// when it detected that the module is not needed during the final
+  /// linking. As result distributed backend should just output a minimal
+  /// valid object file.
+  bool SkipModuleByDistributedBackend = false;
+
+  /// If true then we're performing analysis of IR module, filling summary
+  /// accordingly. The value of 'false' means we're reading summary from
+  /// BC or YAML source. Affects the type of value stored in NameOrGV union
+  bool IsAnalysis;
+
+  std::set<std::string> CfiFunctionDefs;
+  std::set<std::string> CfiFunctionDecls;
+
+  // YAML I/O support.
+  friend yaml::MappingTraits<ModuleSummaryIndex>;
+
+  GlobalValueSummaryMapTy::value_type *
+  getOrInsertValuePtr(GlobalValue::GUID GUID) {
+    return &*GlobalValueMap.emplace(GUID, GlobalValueSummaryInfo(IsAnalysis)).first;
+  }
+
+public:
+  // See IsAnalysis variable comment.
+  ModuleSummaryIndex(bool IsPerformingAnalysis)
+      : IsAnalysis(IsPerformingAnalysis) {}
+
+  bool isPerformingAnalysis() const { return IsAnalysis; }
+
+  gvsummary_iterator begin() { return GlobalValueMap.begin(); }
+  const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
+  gvsummary_iterator end() { return GlobalValueMap.end(); }
+  const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
+  size_t size() const { return GlobalValueMap.size(); }
+
+  /// Convenience function for doing a DFS on a ValueInfo. Marks the function in
+  /// the FunctionHasParent map.
+  static void discoverNodes(ValueInfo V,
+                            std::map<ValueInfo, bool> &FunctionHasParent) {
+    if (!V.getSummaryList().size())
+      return; // skip external functions that don't have summaries
+
+    // Mark discovered if we haven't yet
+    auto S = FunctionHasParent.emplace(V, false);
+
+    // Stop if we've already discovered this node
+    if (!S.second)
+      return;
+
+    FunctionSummary *F =
+        dyn_cast<FunctionSummary>(V.getSummaryList().front().get());
+    assert(F != nullptr && "Expected FunctionSummary node");
+
+    for (auto &C : F->calls()) {
+      // Insert node if necessary
+      auto S = FunctionHasParent.emplace(C.first, true);
+
+      // Skip nodes that we're sure have parents
+      if (!S.second && S.first->second)
+        continue;
+
+      if (S.second)
+        discoverNodes(C.first, FunctionHasParent);
+      else
+        S.first->second = true;
+    }
+  }
+
+  // Calculate the callgraph root
+  FunctionSummary calculateCallGraphRoot() {
+    // Functions that have a parent will be marked in FunctionHasParent pair.
+    // Once we've marked all functions, the functions in the map that are false
+    // have no parent (so they're the roots)
+    std::map<ValueInfo, bool> FunctionHasParent;
+
+    for (auto &S : *this) {
+      // Skip external functions
+      if (!S.second.SummaryList.size() ||
+          !isa<FunctionSummary>(S.second.SummaryList.front().get()))
+        continue;
+      discoverNodes(ValueInfo(IsAnalysis, &S), FunctionHasParent);
+    }
+
+    std::vector<FunctionSummary::EdgeTy> Edges;
+    // create edges to all roots in the Index
+    for (auto &P : FunctionHasParent) {
+      if (P.second)
+        continue; // skip over non-root nodes
+      Edges.push_back(std::make_pair(P.first, CalleeInfo{}));
+    }
+    if (Edges.empty()) {
+      // Failed to find root - return an empty node
+      return FunctionSummary::makeDummyFunctionSummary({});
+    }
+    auto CallGraphRoot = FunctionSummary::makeDummyFunctionSummary(Edges);
+    return CallGraphRoot;
+  }
+
+  bool withGlobalValueDeadStripping() const {
+    return WithGlobalValueDeadStripping;
+  }
+  void setWithGlobalValueDeadStripping() {
+    WithGlobalValueDeadStripping = true;
+  }
+
+  bool skipModuleByDistributedBackend() const {
+    return SkipModuleByDistributedBackend;
+  }
+  void setSkipModuleByDistributedBackend() {
+    SkipModuleByDistributedBackend = true;
+  }
+
+  bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
+    return !WithGlobalValueDeadStripping || GVS->isLive();
+  }
+  bool isGUIDLive(GlobalValue::GUID GUID) const;
+
+  /// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
+  ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
+    auto I = GlobalValueMap.find(GUID);
+    return ValueInfo(IsAnalysis, I == GlobalValueMap.end() ? nullptr : &*I);
+  }
+
+  /// Return a ValueInfo for \p GUID.
+  ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID) {
+    return ValueInfo(IsAnalysis, getOrInsertValuePtr(GUID));
+  }
+
+  /// Return a ValueInfo for \p GUID setting value \p Name.
+  ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID, StringRef Name) {
+    assert(!IsAnalysis);
+    auto VP = getOrInsertValuePtr(GUID);
+    VP->second.U.Name = Name;
+    return ValueInfo(IsAnalysis, VP);
+  }
+
+  /// Return a ValueInfo for \p GV and mark it as belonging to GV.
+  ValueInfo getOrInsertValueInfo(const GlobalValue *GV) {
+    assert(IsAnalysis);
+    auto VP = getOrInsertValuePtr(GV->getGUID());
+    VP->second.U.GV = GV;
+    return ValueInfo(IsAnalysis, VP);
+  }
+
+  /// Return the GUID for \p OriginalId in the OidGuidMap.
+  GlobalValue::GUID getGUIDFromOriginalID(GlobalValue::GUID OriginalID) const {
+    const auto I = OidGuidMap.find(OriginalID);
+    return I == OidGuidMap.end() ? 0 : I->second;
+  }
+
+  std::set<std::string> &cfiFunctionDefs() { return CfiFunctionDefs; }
+  const std::set<std::string> &cfiFunctionDefs() const { return CfiFunctionDefs; }
+
+  std::set<std::string> &cfiFunctionDecls() { return CfiFunctionDecls; }
+  const std::set<std::string> &cfiFunctionDecls() const { return CfiFunctionDecls; }
+
+  /// Add a global value summary for a value of the given name.
+  void addGlobalValueSummary(StringRef ValueName,
+                             std::unique_ptr<GlobalValueSummary> Summary) {
+    addGlobalValueSummary(getOrInsertValueInfo(GlobalValue::getGUID(ValueName)),
+                          std::move(Summary));
+  }
+
+  /// Add a global value summary for the given ValueInfo.
+  void addGlobalValueSummary(ValueInfo VI,
+                             std::unique_ptr<GlobalValueSummary> Summary) {
+    addOriginalName(VI.getGUID(), Summary->getOriginalName());
+    // Here we have a notionally const VI, but the value it points to is owned
+    // by the non-const *this.
+    const_cast<GlobalValueSummaryMapTy::value_type *>(VI.getRef())
+        ->second.SummaryList.push_back(std::move(Summary));
+  }
+
+  /// Add an original name for the value of the given GUID.
+  void addOriginalName(GlobalValue::GUID ValueGUID,
+                       GlobalValue::GUID OrigGUID) {
+    if (OrigGUID == 0 || ValueGUID == OrigGUID)
+      return;
+    if (OidGuidMap.count(OrigGUID) && OidGuidMap[OrigGUID] != ValueGUID)
+      OidGuidMap[OrigGUID] = 0;
+    else
+      OidGuidMap[OrigGUID] = ValueGUID;
+  }
+
+  /// Find the summary for global \p GUID in module \p ModuleId, or nullptr if
+  /// not found.
+  GlobalValueSummary *findSummaryInModule(GlobalValue::GUID ValueGUID,
+                                          StringRef ModuleId) const {
+    auto CalleeInfo = getValueInfo(ValueGUID);
+    if (!CalleeInfo) {
+      return nullptr; // This function does not have a summary
+    }
+    auto Summary =
+        llvm::find_if(CalleeInfo.getSummaryList(),
+                      [&](const std::unique_ptr<GlobalValueSummary> &Summary) {
+                        return Summary->modulePath() == ModuleId;
+                      });
+    if (Summary == CalleeInfo.getSummaryList().end())
+      return nullptr;
+    return Summary->get();
+  }
+
+  /// Returns the first GlobalValueSummary for \p GV, asserting that there
+  /// is only one if \p PerModuleIndex.
+  GlobalValueSummary *getGlobalValueSummary(const GlobalValue &GV,
+                                            bool PerModuleIndex = true) const {
+    assert(GV.hasName() && "Can't get GlobalValueSummary for GV with no name");
+    return getGlobalValueSummary(GlobalValue::getGUID(GV.getName()),
+                                 PerModuleIndex);
+  }
+
+  /// Returns the first GlobalValueSummary for \p ValueGUID, asserting that
+  /// there
+  /// is only one if \p PerModuleIndex.
+  GlobalValueSummary *getGlobalValueSummary(GlobalValue::GUID ValueGUID,
+                                            bool PerModuleIndex = true) const;
+
+  /// Table of modules, containing module hash and id.
+  const StringMap<std::pair<uint64_t, ModuleHash>> &modulePaths() const {
+    return ModulePathStringTable;
+  }
+
+  /// Table of modules, containing hash and id.
+  StringMap<std::pair<uint64_t, ModuleHash>> &modulePaths() {
+    return ModulePathStringTable;
+  }
+
+  /// Get the module ID recorded for the given module path.
+  uint64_t getModuleId(const StringRef ModPath) const {
+    return ModulePathStringTable.lookup(ModPath).first;
+  }
+
+  /// Get the module SHA1 hash recorded for the given module path.
+  const ModuleHash &getModuleHash(const StringRef ModPath) const {
+    auto It = ModulePathStringTable.find(ModPath);
+    assert(It != ModulePathStringTable.end() && "Module not registered");
+    return It->second.second;
+  }
+
+  /// Convenience method for creating a promoted global name
+  /// for the given value name of a local, and its original module's ID.
+  static std::string getGlobalNameForLocal(StringRef Name, ModuleHash ModHash) {
+    SmallString<256> NewName(Name);
+    NewName += ".llvm.";
+    NewName += utostr((uint64_t(ModHash[0]) << 32) |
+                      ModHash[1]); // Take the first 64 bits
+    return NewName.str();
+  }
+
+  /// Helper to obtain the unpromoted name for a global value (or the original
+  /// name if not promoted).
+  static StringRef getOriginalNameBeforePromote(StringRef Name) {
+    std::pair<StringRef, StringRef> Pair = Name.split(".llvm.");
+    return Pair.first;
+  }
+
+  typedef ModulePathStringTableTy::value_type ModuleInfo;
+
+  /// Add a new module with the given \p Hash, mapped to the given \p
+  /// ModID, and return a reference to the module.
+  ModuleInfo *addModule(StringRef ModPath, uint64_t ModId,
+                        ModuleHash Hash = ModuleHash{{0}}) {
+    return &*ModulePathStringTable.insert({ModPath, {ModId, Hash}}).first;
+  }
+
+  /// Check if the given Module has any functions available for exporting
+  /// in the index. We consider any module present in the ModulePathStringTable
+  /// to have exported functions.
+  bool hasExportedFunctions(const Module &M) const {
+    return ModulePathStringTable.count(M.getModuleIdentifier());
+  }
+
+  const std::map<std::string, TypeIdSummary> &typeIds() const {
+    return TypeIdMap;
+  }
+
+  /// This accessor should only be used when exporting because it can mutate the
+  /// map.
+  TypeIdSummary &getOrInsertTypeIdSummary(StringRef TypeId) {
+    return TypeIdMap[TypeId];
+  }
+
+  /// This returns either a pointer to the type id summary (if present in the
+  /// summary map) or null (if not present). This may be used when importing.
+  const TypeIdSummary *getTypeIdSummary(StringRef TypeId) const {
+    auto I = TypeIdMap.find(TypeId);
+    if (I == TypeIdMap.end())
+      return nullptr;
+    return &I->second;
+  }
+
+  /// Collect for the given module the list of function it defines
+  /// (GUID -> Summary).
+  void collectDefinedFunctionsForModule(StringRef ModulePath,
+                                        GVSummaryMapTy &GVSummaryMap) const;
+
+  /// Collect for each module the list of Summaries it defines (GUID ->
+  /// Summary).
+  void collectDefinedGVSummariesPerModule(
+      StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries) const;
+
+  /// Export summary to dot file for GraphViz.
+  void exportToDot(raw_ostream& OS) const;
+
+  /// Print out strongly connected components for debugging.
+  void dumpSCCs(raw_ostream &OS);
+};
+
+/// GraphTraits definition to build SCC for the index
+template <> struct GraphTraits<ValueInfo> {
+  typedef ValueInfo NodeRef;
+
+  static NodeRef valueInfoFromEdge(FunctionSummary::EdgeTy &P) {
+    return P.first;
+  }
+  using ChildIteratorType =
+      mapped_iterator<std::vector<FunctionSummary::EdgeTy>::iterator,
+                      decltype(&valueInfoFromEdge)>;
+
+  static NodeRef getEntryNode(ValueInfo V) { return V; }
+
+  static ChildIteratorType child_begin(NodeRef N) {
+    if (!N.getSummaryList().size()) // handle external function
+      return ChildIteratorType(
+          FunctionSummary::ExternalNode.CallGraphEdgeList.begin(),
+          &valueInfoFromEdge);
+    FunctionSummary *F =
+        cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
+    return ChildIteratorType(F->CallGraphEdgeList.begin(), &valueInfoFromEdge);
+  }
+
+  static ChildIteratorType child_end(NodeRef N) {
+    if (!N.getSummaryList().size()) // handle external function
+      return ChildIteratorType(
+          FunctionSummary::ExternalNode.CallGraphEdgeList.end(),
+          &valueInfoFromEdge);
+    FunctionSummary *F =
+        cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
+    return ChildIteratorType(F->CallGraphEdgeList.end(), &valueInfoFromEdge);
+  }
+};
+
+template <>
+struct GraphTraits<ModuleSummaryIndex *> : public GraphTraits<ValueInfo> {
+  static NodeRef getEntryNode(ModuleSummaryIndex *I) {
+    std::unique_ptr<GlobalValueSummary> Root =
+        make_unique<FunctionSummary>(I->calculateCallGraphRoot());
+    GlobalValueSummaryInfo G(I->isPerformingAnalysis());
+    G.SummaryList.push_back(std::move(Root));
+    static auto P =
+        GlobalValueSummaryMapTy::value_type(GlobalValue::GUID(0), std::move(G));
+    return ValueInfo(I->isPerformingAnalysis(), &P);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_MODULESUMMARYINDEX_H
diff --git a/linux-x64/clang/include/llvm/IR/ModuleSummaryIndexYAML.h b/linux-x64/clang/include/llvm/IR/ModuleSummaryIndexYAML.h
new file mode 100644
index 0000000..8f30de6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -0,0 +1,276 @@
+//===-- llvm/ModuleSummaryIndexYAML.h - YAML I/O for summary ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MODULESUMMARYINDEXYAML_H
+#define LLVM_IR_MODULESUMMARYINDEXYAML_H
+
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Support/YAMLTraits.h"
+
+namespace llvm {
+namespace yaml {
+
+template <> struct ScalarEnumerationTraits<TypeTestResolution::Kind> {
+  static void enumeration(IO &io, TypeTestResolution::Kind &value) {
+    io.enumCase(value, "Unsat", TypeTestResolution::Unsat);
+    io.enumCase(value, "ByteArray", TypeTestResolution::ByteArray);
+    io.enumCase(value, "Inline", TypeTestResolution::Inline);
+    io.enumCase(value, "Single", TypeTestResolution::Single);
+    io.enumCase(value, "AllOnes", TypeTestResolution::AllOnes);
+  }
+};
+
+template <> struct MappingTraits<TypeTestResolution> {
+  static void mapping(IO &io, TypeTestResolution &res) {
+    io.mapOptional("Kind", res.TheKind);
+    io.mapOptional("SizeM1BitWidth", res.SizeM1BitWidth);
+    io.mapOptional("AlignLog2", res.AlignLog2);
+    io.mapOptional("SizeM1", res.SizeM1);
+    io.mapOptional("BitMask", res.BitMask);
+    io.mapOptional("InlineBits", res.InlineBits);
+  }
+};
+
+template <>
+struct ScalarEnumerationTraits<WholeProgramDevirtResolution::ByArg::Kind> {
+  static void enumeration(IO &io,
+                          WholeProgramDevirtResolution::ByArg::Kind &value) {
+    io.enumCase(value, "Indir", WholeProgramDevirtResolution::ByArg::Indir);
+    io.enumCase(value, "UniformRetVal",
+                WholeProgramDevirtResolution::ByArg::UniformRetVal);
+    io.enumCase(value, "UniqueRetVal",
+                WholeProgramDevirtResolution::ByArg::UniqueRetVal);
+    io.enumCase(value, "VirtualConstProp",
+                WholeProgramDevirtResolution::ByArg::VirtualConstProp);
+  }
+};
+
+template <> struct MappingTraits<WholeProgramDevirtResolution::ByArg> {
+  static void mapping(IO &io, WholeProgramDevirtResolution::ByArg &res) {
+    io.mapOptional("Kind", res.TheKind);
+    io.mapOptional("Info", res.Info);
+    io.mapOptional("Byte", res.Byte);
+    io.mapOptional("Bit", res.Bit);
+  }
+};
+
+template <>
+struct CustomMappingTraits<
+    std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg>> {
+  static void inputOne(
+      IO &io, StringRef Key,
+      std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg> &V) {
+    std::vector<uint64_t> Args;
+    std::pair<StringRef, StringRef> P = {"", Key};
+    while (!P.second.empty()) {
+      P = P.second.split(',');
+      uint64_t Arg;
+      if (P.first.getAsInteger(0, Arg)) {
+        io.setError("key not an integer");
+        return;
+      }
+      Args.push_back(Arg);
+    }
+    io.mapRequired(Key.str().c_str(), V[Args]);
+  }
+  static void output(
+      IO &io,
+      std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg> &V) {
+    for (auto &P : V) {
+      std::string Key;
+      for (uint64_t Arg : P.first) {
+        if (!Key.empty())
+          Key += ',';
+        Key += llvm::utostr(Arg);
+      }
+      io.mapRequired(Key.c_str(), P.second);
+    }
+  }
+};
+
+template <> struct ScalarEnumerationTraits<WholeProgramDevirtResolution::Kind> {
+  static void enumeration(IO &io, WholeProgramDevirtResolution::Kind &value) {
+    io.enumCase(value, "Indir", WholeProgramDevirtResolution::Indir);
+    io.enumCase(value, "SingleImpl", WholeProgramDevirtResolution::SingleImpl);
+    io.enumCase(value, "BranchFunnel",
+                WholeProgramDevirtResolution::BranchFunnel);
+  }
+};
+
+template <> struct MappingTraits<WholeProgramDevirtResolution> {
+  static void mapping(IO &io, WholeProgramDevirtResolution &res) {
+    io.mapOptional("Kind", res.TheKind);
+    io.mapOptional("SingleImplName", res.SingleImplName);
+    io.mapOptional("ResByArg", res.ResByArg);
+  }
+};
+
+template <>
+struct CustomMappingTraits<std::map<uint64_t, WholeProgramDevirtResolution>> {
+  static void inputOne(IO &io, StringRef Key,
+                       std::map<uint64_t, WholeProgramDevirtResolution> &V) {
+    uint64_t KeyInt;
+    if (Key.getAsInteger(0, KeyInt)) {
+      io.setError("key not an integer");
+      return;
+    }
+    io.mapRequired(Key.str().c_str(), V[KeyInt]);
+  }
+  static void output(IO &io, std::map<uint64_t, WholeProgramDevirtResolution> &V) {
+    for (auto &P : V)
+      io.mapRequired(llvm::utostr(P.first).c_str(), P.second);
+  }
+};
+
+template <> struct MappingTraits<TypeIdSummary> {
+  static void mapping(IO &io, TypeIdSummary& summary) {
+    io.mapOptional("TTRes", summary.TTRes);
+    io.mapOptional("WPDRes", summary.WPDRes);
+  }
+};
+
+struct FunctionSummaryYaml {
+  unsigned Linkage;
+  bool NotEligibleToImport, Live, IsLocal;
+  std::vector<uint64_t> TypeTests;
+  std::vector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
+      TypeCheckedLoadVCalls;
+  std::vector<FunctionSummary::ConstVCall> TypeTestAssumeConstVCalls,
+      TypeCheckedLoadConstVCalls;
+};
+
+} // End yaml namespace
+} // End llvm namespace
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<FunctionSummary::VFuncId> {
+  static void mapping(IO &io, FunctionSummary::VFuncId& id) {
+    io.mapOptional("GUID", id.GUID);
+    io.mapOptional("Offset", id.Offset);
+  }
+};
+
+template <> struct MappingTraits<FunctionSummary::ConstVCall> {
+  static void mapping(IO &io, FunctionSummary::ConstVCall& id) {
+    io.mapOptional("VFunc", id.VFunc);
+    io.mapOptional("Args", id.Args);
+  }
+};
+
+} // End yaml namespace
+} // End llvm namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummary::VFuncId)
+LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummary::ConstVCall)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<FunctionSummaryYaml> {
+  static void mapping(IO &io, FunctionSummaryYaml& summary) {
+    io.mapOptional("Linkage", summary.Linkage);
+    io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport);
+    io.mapOptional("Live", summary.Live);
+    io.mapOptional("Local", summary.IsLocal);
+    io.mapOptional("TypeTests", summary.TypeTests);
+    io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls);
+    io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls);
+    io.mapOptional("TypeTestAssumeConstVCalls",
+                   summary.TypeTestAssumeConstVCalls);
+    io.mapOptional("TypeCheckedLoadConstVCalls",
+                   summary.TypeCheckedLoadConstVCalls);
+  }
+};
+
+} // End yaml namespace
+} // End llvm namespace
+
+LLVM_YAML_IS_STRING_MAP(TypeIdSummary)
+LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummaryYaml)
+
+namespace llvm {
+namespace yaml {
+
+// FIXME: Add YAML mappings for the rest of the module summary.
+template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
+  static void inputOne(IO &io, StringRef Key, GlobalValueSummaryMapTy &V) {
+    std::vector<FunctionSummaryYaml> FSums;
+    io.mapRequired(Key.str().c_str(), FSums);
+    uint64_t KeyInt;
+    if (Key.getAsInteger(0, KeyInt)) {
+      io.setError("key not an integer");
+      return;
+    }
+    auto P = V.emplace(KeyInt, /*IsAnalysis=*/false);
+    auto &Elem = (*P.first).second;
+    for (auto &FSum : FSums) {
+      Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
+          GlobalValueSummary::GVFlags(
+              static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
+              FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal),
+          0, FunctionSummary::FFlags{}, ArrayRef<ValueInfo>{},
+          ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
+          std::move(FSum.TypeTestAssumeVCalls),
+          std::move(FSum.TypeCheckedLoadVCalls),
+          std::move(FSum.TypeTestAssumeConstVCalls),
+          std::move(FSum.TypeCheckedLoadConstVCalls)));
+    }
+  }
+  static void output(IO &io, GlobalValueSummaryMapTy &V) {
+    for (auto &P : V) {
+      std::vector<FunctionSummaryYaml> FSums;
+      for (auto &Sum : P.second.SummaryList) {
+        if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get()))
+          FSums.push_back(FunctionSummaryYaml{
+              FSum->flags().Linkage,
+              static_cast<bool>(FSum->flags().NotEligibleToImport),
+              static_cast<bool>(FSum->flags().Live),
+              static_cast<bool>(FSum->flags().DSOLocal), FSum->type_tests(),
+              FSum->type_test_assume_vcalls(), FSum->type_checked_load_vcalls(),
+              FSum->type_test_assume_const_vcalls(),
+              FSum->type_checked_load_const_vcalls()});
+      }
+      if (!FSums.empty())
+        io.mapRequired(llvm::utostr(P.first).c_str(), FSums);
+    }
+  }
+};
+
+template <> struct MappingTraits<ModuleSummaryIndex> {
+  static void mapping(IO &io, ModuleSummaryIndex& index) {
+    io.mapOptional("GlobalValueMap", index.GlobalValueMap);
+    io.mapOptional("TypeIdMap", index.TypeIdMap);
+    io.mapOptional("WithGlobalValueDeadStripping",
+                   index.WithGlobalValueDeadStripping);
+
+    if (io.outputting()) {
+      std::vector<std::string> CfiFunctionDefs(index.CfiFunctionDefs.begin(),
+                                               index.CfiFunctionDefs.end());
+      io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
+      std::vector<std::string> CfiFunctionDecls(index.CfiFunctionDecls.begin(),
+                                                index.CfiFunctionDecls.end());
+      io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
+    } else {
+      std::vector<std::string> CfiFunctionDefs;
+      io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
+      index.CfiFunctionDefs = {CfiFunctionDefs.begin(), CfiFunctionDefs.end()};
+      std::vector<std::string> CfiFunctionDecls;
+      io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
+      index.CfiFunctionDecls = {CfiFunctionDecls.begin(),
+                                CfiFunctionDecls.end()};
+    }
+  }
+};
+
+} // End yaml namespace
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/NoFolder.h b/linux-x64/clang/include/llvm/IR/NoFolder.h
new file mode 100644
index 0000000..def07ff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/NoFolder.h
@@ -0,0 +1,342 @@
+//===- NoFolder.h - Constant folding helper ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NoFolder class, a helper for IRBuilder.  It provides
+// IRBuilder with a set of methods for creating unfolded constants.  This is
+// useful for learners trying to understand how LLVM IR works, and who don't
+// want details to be hidden by the constant folder.  For general constant
+// creation and folding, use ConstantExpr and the routines in
+// llvm/Analysis/ConstantFolding.h.
+//
+// Note: since it is not actually possible to create unfolded constants, this
+// class returns instructions rather than constants.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_NOFOLDER_H
+#define LLVM_IR_NOFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+
+/// NoFolder - Create "constants" (actually, instructions) with no folding.
+class NoFolder {
+public:
+  explicit NoFolder() = default;
+
+  //===--------------------------------------------------------------------===//
+  // Binary Operators
+  //===--------------------------------------------------------------------===//
+
+  Instruction *CreateAdd(Constant *LHS, Constant *RHS,
+                         bool HasNUW = false, bool HasNSW = false) const {
+    BinaryOperator *BO = BinaryOperator::CreateAdd(LHS, RHS);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateNSWAdd(LHS, RHS);
+  }
+
+  Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateNUWAdd(LHS, RHS);
+  }
+
+  Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateFAdd(LHS, RHS);
+  }
+
+  Instruction *CreateSub(Constant *LHS, Constant *RHS,
+                         bool HasNUW = false, bool HasNSW = false) const {
+    BinaryOperator *BO = BinaryOperator::CreateSub(LHS, RHS);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateNSWSub(LHS, RHS);
+  }
+
+  Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateNUWSub(LHS, RHS);
+  }
+
+  Instruction *CreateFSub(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateFSub(LHS, RHS);
+  }
+
+  Instruction *CreateMul(Constant *LHS, Constant *RHS,
+                         bool HasNUW = false, bool HasNSW = false) const {
+    BinaryOperator *BO = BinaryOperator::CreateMul(LHS, RHS);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateNSWMul(LHS, RHS);
+  }
+
+  Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateNUWMul(LHS, RHS);
+  }
+
+  Instruction *CreateFMul(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateFMul(LHS, RHS);
+  }
+
+  Instruction *CreateUDiv(Constant *LHS, Constant *RHS,
+                          bool isExact = false) const {
+    if (!isExact)
+      return BinaryOperator::CreateUDiv(LHS, RHS);
+    return BinaryOperator::CreateExactUDiv(LHS, RHS);
+  }
+
+  Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateExactUDiv(LHS, RHS);
+  }
+
+  Instruction *CreateSDiv(Constant *LHS, Constant *RHS,
+                          bool isExact = false) const {
+    if (!isExact)
+      return BinaryOperator::CreateSDiv(LHS, RHS);
+    return BinaryOperator::CreateExactSDiv(LHS, RHS);
+  }
+
+  Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateExactSDiv(LHS, RHS);
+  }
+
+  Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateFDiv(LHS, RHS);
+  }
+
+  Instruction *CreateURem(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateURem(LHS, RHS);
+  }
+
+  Instruction *CreateSRem(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateSRem(LHS, RHS);
+  }
+
+  Instruction *CreateFRem(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateFRem(LHS, RHS);
+  }
+
+  Instruction *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
+                         bool HasNSW = false) const {
+    BinaryOperator *BO = BinaryOperator::CreateShl(LHS, RHS);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Instruction *CreateLShr(Constant *LHS, Constant *RHS,
+                          bool isExact = false) const {
+    if (!isExact)
+      return BinaryOperator::CreateLShr(LHS, RHS);
+    return BinaryOperator::CreateExactLShr(LHS, RHS);
+  }
+
+  Instruction *CreateAShr(Constant *LHS, Constant *RHS,
+                          bool isExact = false) const {
+    if (!isExact)
+      return BinaryOperator::CreateAShr(LHS, RHS);
+    return BinaryOperator::CreateExactAShr(LHS, RHS);
+  }
+
+  Instruction *CreateAnd(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateAnd(LHS, RHS);
+  }
+
+  Instruction *CreateOr(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateOr(LHS, RHS);
+  }
+
+  Instruction *CreateXor(Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::CreateXor(LHS, RHS);
+  }
+
+  Instruction *CreateBinOp(Instruction::BinaryOps Opc,
+                           Constant *LHS, Constant *RHS) const {
+    return BinaryOperator::Create(Opc, LHS, RHS);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Unary Operators
+  //===--------------------------------------------------------------------===//
+
+  Instruction *CreateNeg(Constant *C,
+                         bool HasNUW = false, bool HasNSW = false) const {
+    BinaryOperator *BO = BinaryOperator::CreateNeg(C);
+    if (HasNUW) BO->setHasNoUnsignedWrap();
+    if (HasNSW) BO->setHasNoSignedWrap();
+    return BO;
+  }
+
+  Instruction *CreateNSWNeg(Constant *C) const {
+    return BinaryOperator::CreateNSWNeg(C);
+  }
+
+  Instruction *CreateNUWNeg(Constant *C) const {
+    return BinaryOperator::CreateNUWNeg(C);
+  }
+
+  Instruction *CreateFNeg(Constant *C) const {
+    return BinaryOperator::CreateFNeg(C);
+  }
+
+  Instruction *CreateNot(Constant *C) const {
+    return BinaryOperator::CreateNot(C);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Memory Instructions
+  //===--------------------------------------------------------------------===//
+
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+                                ArrayRef<Constant *> IdxList) const {
+    return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
+  }
+
+  Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return ConstantExpr::getGetElementPtr(Ty, C, Idx);
+  }
+
+  Instruction *CreateGetElementPtr(Type *Ty, Constant *C,
+                                   ArrayRef<Value *> IdxList) const {
+    return GetElementPtrInst::Create(Ty, C, IdxList);
+  }
+
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        ArrayRef<Constant *> IdxList) const {
+    return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
+  }
+
+  Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                        Constant *Idx) const {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
+  }
+
+  Instruction *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                           ArrayRef<Value *> IdxList) const {
+    return GetElementPtrInst::CreateInBounds(Ty, C, IdxList);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Cast/Conversion Operators
+  //===--------------------------------------------------------------------===//
+
+  Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
+                    Type *DestTy) const {
+    return CastInst::Create(Op, C, DestTy);
+  }
+
+  Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
+    return CastInst::CreatePointerCast(C, DestTy);
+  }
+
+  Instruction *CreateIntCast(Constant *C, Type *DestTy,
+                       bool isSigned) const {
+    return CastInst::CreateIntegerCast(C, DestTy, isSigned);
+  }
+
+  Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
+    return CastInst::CreateFPCast(C, DestTy);
+  }
+
+  Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::BitCast, C, DestTy);
+  }
+
+  Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::IntToPtr, C, DestTy);
+  }
+
+  Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
+    return CreateCast(Instruction::PtrToInt, C, DestTy);
+  }
+
+  Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+    return CastInst::CreateZExtOrBitCast(C, DestTy);
+  }
+
+  Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+    return CastInst::CreateSExtOrBitCast(C, DestTy);
+  }
+
+  Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+    return CastInst::CreateTruncOrBitCast(C, DestTy);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Compare Instructions
+  //===--------------------------------------------------------------------===//
+
+  Instruction *CreateICmp(CmpInst::Predicate P,
+                          Constant *LHS, Constant *RHS) const {
+    return new ICmpInst(P, LHS, RHS);
+  }
+
+  Instruction *CreateFCmp(CmpInst::Predicate P,
+                          Constant *LHS, Constant *RHS) const {
+    return new FCmpInst(P, LHS, RHS);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Other Instructions
+  //===--------------------------------------------------------------------===//
+
+  Instruction *CreateSelect(Constant *C,
+                            Constant *True, Constant *False) const {
+    return SelectInst::Create(C, True, False);
+  }
+
+  Instruction *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+    return ExtractElementInst::Create(Vec, Idx);
+  }
+
+  Instruction *CreateInsertElement(Constant *Vec, Constant *NewElt,
+                                   Constant *Idx) const {
+    return InsertElementInst::Create(Vec, NewElt, Idx);
+  }
+
+  Instruction *CreateShuffleVector(Constant *V1, Constant *V2,
+                                   Constant *Mask) const {
+    return new ShuffleVectorInst(V1, V2, Mask);
+  }
+
+  Instruction *CreateExtractValue(Constant *Agg,
+                                  ArrayRef<unsigned> IdxList) const {
+    return ExtractValueInst::Create(Agg, IdxList);
+  }
+
+  Instruction *CreateInsertValue(Constant *Agg, Constant *Val,
+                                 ArrayRef<unsigned> IdxList) const {
+    return InsertValueInst::Create(Agg, Val, IdxList);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_NOFOLDER_H
diff --git a/linux-x64/clang/include/llvm/IR/OperandTraits.h b/linux-x64/clang/include/llvm/IR/OperandTraits.h
new file mode 100644
index 0000000..c618aff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/OperandTraits.h
@@ -0,0 +1,163 @@
+//===-- llvm/OperandTraits.h - OperandTraits class definition ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the traits classes that are handy for enforcing the correct
+// layout of various User subclasses. It also provides the means for accessing
+// the operands in the most efficient manner.
+//
+
+#ifndef LLVM_IR_OPERANDTRAITS_H
+#define LLVM_IR_OPERANDTRAITS_H
+
+#include "llvm/IR/User.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//                          FixedNumOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// FixedNumOperandTraits - determine the allocation regime of the Use array
+/// when it is a prefix to the User object, and the number of Use objects is
+/// known at compile time.
+
+template <typename SubClass, unsigned ARITY>
+struct FixedNumOperandTraits {
+  static Use *op_begin(SubClass* U) {
+    static_assert(
+        !std::is_polymorphic<SubClass>::value,
+        "adding virtual methods to subclasses of User breaks use lists");
+    return reinterpret_cast<Use*>(U) - ARITY;
+  }
+  static Use *op_end(SubClass* U) {
+    return reinterpret_cast<Use*>(U);
+  }
+  static unsigned operands(const User*) {
+    return ARITY;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                          OptionalOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// OptionalOperandTraits - when the number of operands may change at runtime.
+/// Naturally it may only decrease, because the allocations may not change.
+
+template <typename SubClass, unsigned ARITY = 1>
+struct OptionalOperandTraits : public FixedNumOperandTraits<SubClass, ARITY> {
+  static unsigned operands(const User *U) {
+    return U->getNumOperands();
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                          VariadicOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// VariadicOperandTraits - determine the allocation regime of the Use array
+/// when it is a prefix to the User object, and the number of Use objects is
+/// only known at allocation time.
+
+template <typename SubClass, unsigned MINARITY = 0>
+struct VariadicOperandTraits {
+  static Use *op_begin(SubClass* U) {
+    static_assert(
+        !std::is_polymorphic<SubClass>::value,
+        "adding virtual methods to subclasses of User breaks use lists");
+    return reinterpret_cast<Use*>(U) - static_cast<User*>(U)->getNumOperands();
+  }
+  static Use *op_end(SubClass* U) {
+    return reinterpret_cast<Use*>(U);
+  }
+  static unsigned operands(const User *U) {
+    return U->getNumOperands();
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                          HungoffOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// HungoffOperandTraits - determine the allocation regime of the Use array
+/// when it is not a prefix to the User object, but allocated at an unrelated
+/// heap address.
+///
+/// This is the traits class that is needed when the Use array must be
+/// resizable.
+
+template <unsigned MINARITY = 1>
+struct HungoffOperandTraits {
+  static Use *op_begin(User* U) {
+    return U->getOperandList();
+  }
+  static Use *op_end(User* U) {
+    return U->getOperandList() + U->getNumOperands();
+  }
+  static unsigned operands(const User *U) {
+    return U->getNumOperands();
+  }
+};
+
+/// Macro for generating in-class operand accessor declarations.
+/// It should only be called in the public section of the interface.
+///
+#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS) \
+  public: \
+  inline VALUECLASS *getOperand(unsigned) const; \
+  inline void setOperand(unsigned, VALUECLASS*); \
+  inline op_iterator op_begin(); \
+  inline const_op_iterator op_begin() const; \
+  inline op_iterator op_end(); \
+  inline const_op_iterator op_end() const; \
+  protected: \
+  template <int> inline Use &Op(); \
+  template <int> inline const Use &Op() const; \
+  public: \
+  inline unsigned getNumOperands() const
+
+/// Macro for generating out-of-class operand accessor definitions
+#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS) \
+CLASS::op_iterator CLASS::op_begin() { \
+  return OperandTraits<CLASS>::op_begin(this); \
+} \
+CLASS::const_op_iterator CLASS::op_begin() const { \
+  return OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this)); \
+} \
+CLASS::op_iterator CLASS::op_end() { \
+  return OperandTraits<CLASS>::op_end(this); \
+} \
+CLASS::const_op_iterator CLASS::op_end() const { \
+  return OperandTraits<CLASS>::op_end(const_cast<CLASS*>(this)); \
+} \
+VALUECLASS *CLASS::getOperand(unsigned i_nocapture) const { \
+  assert(i_nocapture < OperandTraits<CLASS>::operands(this) \
+         && "getOperand() out of range!"); \
+  return cast_or_null<VALUECLASS>( \
+    OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this))[i_nocapture].get()); \
+} \
+void CLASS::setOperand(unsigned i_nocapture, VALUECLASS *Val_nocapture) { \
+  assert(i_nocapture < OperandTraits<CLASS>::operands(this) \
+         && "setOperand() out of range!"); \
+  OperandTraits<CLASS>::op_begin(this)[i_nocapture] = Val_nocapture; \
+} \
+unsigned CLASS::getNumOperands() const { \
+  return OperandTraits<CLASS>::operands(this); \
+} \
+template <int Idx_nocapture> Use &CLASS::Op() { \
+  return this->OpFrom<Idx_nocapture>(this); \
+} \
+template <int Idx_nocapture> const Use &CLASS::Op() const { \
+  return this->OpFrom<Idx_nocapture>(this); \
+}
+
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/Operator.h b/linux-x64/clang/include/llvm/IR/Operator.h
new file mode 100644
index 0000000..01746e4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Operator.h
@@ -0,0 +1,566 @@
+//===-- llvm/Operator.h - Operator utility subclass -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various classes for working with Instructions and
+// ConstantExprs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_OPERATOR_H
+#define LLVM_IR_OPERATOR_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cstddef>
+
+namespace llvm {
+
+/// This is a utility class that provides an abstraction for the common
+/// functionality between Instructions and ConstantExprs.
+class Operator : public User {
+public:
+  // The Operator class is intended to be used as a utility, and is never itself
+  // instantiated.
+  Operator() = delete;
+  ~Operator() = delete;
+
+  void *operator new(size_t s) = delete;
+
+  /// Return the opcode for this Instruction or ConstantExpr.
+  unsigned getOpcode() const {
+    if (const Instruction *I = dyn_cast<Instruction>(this))
+      return I->getOpcode();
+    return cast<ConstantExpr>(this)->getOpcode();
+  }
+
+  /// If V is an Instruction or ConstantExpr, return its opcode.
+  /// Otherwise return UserOp1.
+  static unsigned getOpcode(const Value *V) {
+    if (const Instruction *I = dyn_cast<Instruction>(V))
+      return I->getOpcode();
+    if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+      return CE->getOpcode();
+    return Instruction::UserOp1;
+  }
+
+  static bool classof(const Instruction *) { return true; }
+  static bool classof(const ConstantExpr *) { return true; }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) || isa<ConstantExpr>(V);
+  }
+};
+
+/// Utility class for integer operators which may exhibit overflow - Add, Sub,
+/// Mul, and Shl. It does not include SDiv, despite that operator having the
+/// potential for overflow.
+class OverflowingBinaryOperator : public Operator {
+public:
+  enum {
+    NoUnsignedWrap = (1 << 0),
+    NoSignedWrap   = (1 << 1)
+  };
+
+private:
+  friend class Instruction;
+  friend class ConstantExpr;
+
+  void setHasNoUnsignedWrap(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
+  }
+  void setHasNoSignedWrap(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
+  }
+
+public:
+  /// Test whether this operation is known to never
+  /// undergo unsigned overflow, aka the nuw property.
+  bool hasNoUnsignedWrap() const {
+    return SubclassOptionalData & NoUnsignedWrap;
+  }
+
+  /// Test whether this operation is known to never
+  /// undergo signed overflow, aka the nsw property.
+  bool hasNoSignedWrap() const {
+    return (SubclassOptionalData & NoSignedWrap) != 0;
+  }
+
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Instruction::Add ||
+           I->getOpcode() == Instruction::Sub ||
+           I->getOpcode() == Instruction::Mul ||
+           I->getOpcode() == Instruction::Shl;
+  }
+  static bool classof(const ConstantExpr *CE) {
+    return CE->getOpcode() == Instruction::Add ||
+           CE->getOpcode() == Instruction::Sub ||
+           CE->getOpcode() == Instruction::Mul ||
+           CE->getOpcode() == Instruction::Shl;
+  }
+  static bool classof(const Value *V) {
+    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+  }
+};
+
+/// A udiv or sdiv instruction, which can be marked as "exact",
+/// indicating that no bits are destroyed.
+class PossiblyExactOperator : public Operator {
+public:
+  enum {
+    IsExact = (1 << 0)
+  };
+
+private:
+  friend class Instruction;
+  friend class ConstantExpr;
+
+  void setIsExact(bool B) {
+    SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B * IsExact);
+  }
+
+public:
+  /// Test whether this division is known to be exact, with zero remainder.
+  bool isExact() const {
+    return SubclassOptionalData & IsExact;
+  }
+
+  static bool isPossiblyExactOpcode(unsigned OpC) {
+    return OpC == Instruction::SDiv ||
+           OpC == Instruction::UDiv ||
+           OpC == Instruction::AShr ||
+           OpC == Instruction::LShr;
+  }
+
+  static bool classof(const ConstantExpr *CE) {
+    return isPossiblyExactOpcode(CE->getOpcode());
+  }
+  static bool classof(const Instruction *I) {
+    return isPossiblyExactOpcode(I->getOpcode());
+  }
+  static bool classof(const Value *V) {
+    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+  }
+};
+
+/// Convenience struct for specifying and reasoning about fast-math flags.
+class FastMathFlags {
+private:
+  friend class FPMathOperator;
+
+  unsigned Flags = 0;
+
+  FastMathFlags(unsigned F) {
+    // If all 7 bits are set, turn this into -1. If the number of bits grows,
+    // this must be updated. This is intended to provide some forward binary
+    // compatibility insurance for the meaning of 'fast' in case bits are added.
+    if (F == 0x7F) Flags = ~0U;
+    else Flags = F;
+  }
+
+public:
+  // This is how the bits are used in Value::SubclassOptionalData so they
+  // should fit there too.
+  // WARNING: We're out of space. SubclassOptionalData only has 7 bits. New
+  // functionality will require a change in how this information is stored.
+  enum {
+    AllowReassoc    = (1 << 0),
+    NoNaNs          = (1 << 1),
+    NoInfs          = (1 << 2),
+    NoSignedZeros   = (1 << 3),
+    AllowReciprocal = (1 << 4),
+    AllowContract   = (1 << 5),
+    ApproxFunc      = (1 << 6)
+  };
+
+  FastMathFlags() = default;
+
+  bool any() const { return Flags != 0; }
+  bool none() const { return Flags == 0; }
+  bool all() const { return Flags == ~0U; }
+
+  void clear() { Flags = 0; }
+  void set()   { Flags = ~0U; }
+
+  /// Flag queries
+  bool allowReassoc() const    { return 0 != (Flags & AllowReassoc); }
+  bool noNaNs() const          { return 0 != (Flags & NoNaNs); }
+  bool noInfs() const          { return 0 != (Flags & NoInfs); }
+  bool noSignedZeros() const   { return 0 != (Flags & NoSignedZeros); }
+  bool allowReciprocal() const { return 0 != (Flags & AllowReciprocal); }
+  bool allowContract() const   { return 0 != (Flags & AllowContract); }
+  bool approxFunc() const      { return 0 != (Flags & ApproxFunc); }
+  /// 'Fast' means all bits are set.
+  bool isFast() const          { return all(); }
+
+  /// Flag setters
+  void setAllowReassoc()    { Flags |= AllowReassoc; }
+  void setNoNaNs()          { Flags |= NoNaNs; }
+  void setNoInfs()          { Flags |= NoInfs; }
+  void setNoSignedZeros()   { Flags |= NoSignedZeros; }
+  void setAllowReciprocal() { Flags |= AllowReciprocal; }
+  // TODO: Change the other set* functions to take a parameter?
+  void setAllowContract(bool B) {
+    Flags = (Flags & ~AllowContract) | B * AllowContract;
+  }
+  void setApproxFunc()      { Flags |= ApproxFunc; }
+  void setFast()            { set(); }
+
+  void operator&=(const FastMathFlags &OtherFlags) {
+    Flags &= OtherFlags.Flags;
+  }
+};
+
+/// Utility class for floating point operations which can have
+/// information about relaxed accuracy requirements attached to them.
+class FPMathOperator : public Operator {
+private:
+  friend class Instruction;
+
+  /// 'Fast' means all bits are set.
+  void setFast(bool B) {
+    setHasAllowReassoc(B);
+    setHasNoNaNs(B);
+    setHasNoInfs(B);
+    setHasNoSignedZeros(B);
+    setHasAllowReciprocal(B);
+    setHasAllowContract(B);
+    setHasApproxFunc(B);
+  }
+
+  void setHasAllowReassoc(bool B) {
+    SubclassOptionalData =
+    (SubclassOptionalData & ~FastMathFlags::AllowReassoc) |
+    (B * FastMathFlags::AllowReassoc);
+  }
+
+  void setHasNoNaNs(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~FastMathFlags::NoNaNs) |
+      (B * FastMathFlags::NoNaNs);
+  }
+
+  void setHasNoInfs(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~FastMathFlags::NoInfs) |
+      (B * FastMathFlags::NoInfs);
+  }
+
+  void setHasNoSignedZeros(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~FastMathFlags::NoSignedZeros) |
+      (B * FastMathFlags::NoSignedZeros);
+  }
+
+  void setHasAllowReciprocal(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~FastMathFlags::AllowReciprocal) |
+      (B * FastMathFlags::AllowReciprocal);
+  }
+
+  void setHasAllowContract(bool B) {
+    SubclassOptionalData =
+        (SubclassOptionalData & ~FastMathFlags::AllowContract) |
+        (B * FastMathFlags::AllowContract);
+  }
+
+  void setHasApproxFunc(bool B) {
+    SubclassOptionalData =
+        (SubclassOptionalData & ~FastMathFlags::ApproxFunc) |
+        (B * FastMathFlags::ApproxFunc);
+  }
+
+  /// Convenience function for setting multiple fast-math flags.
+  /// FMF is a mask of the bits to set.
+  void setFastMathFlags(FastMathFlags FMF) {
+    SubclassOptionalData |= FMF.Flags;
+  }
+
+  /// Convenience function for copying all fast-math flags.
+  /// All values in FMF are transferred to this operator.
+  void copyFastMathFlags(FastMathFlags FMF) {
+    SubclassOptionalData = FMF.Flags;
+  }
+
+public:
+  /// Test if this operation allows all non-strict floating-point transforms.
+  bool isFast() const {
+    return ((SubclassOptionalData & FastMathFlags::AllowReassoc) != 0 &&
+            (SubclassOptionalData & FastMathFlags::NoNaNs) != 0 &&
+            (SubclassOptionalData & FastMathFlags::NoInfs) != 0 &&
+            (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0 &&
+            (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0 &&
+            (SubclassOptionalData & FastMathFlags::AllowContract) != 0 &&
+            (SubclassOptionalData & FastMathFlags::ApproxFunc) != 0);
+  }
+
+  /// Test if this operation may be simplified with reassociative transforms.
+  bool hasAllowReassoc() const {
+    return (SubclassOptionalData & FastMathFlags::AllowReassoc) != 0;
+  }
+
+  /// Test if this operation's arguments and results are assumed not-NaN.
+  bool hasNoNaNs() const {
+    return (SubclassOptionalData & FastMathFlags::NoNaNs) != 0;
+  }
+
+  /// Test if this operation's arguments and results are assumed not-infinite.
+  bool hasNoInfs() const {
+    return (SubclassOptionalData & FastMathFlags::NoInfs) != 0;
+  }
+
+  /// Test if this operation can ignore the sign of zero.
+  bool hasNoSignedZeros() const {
+    return (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0;
+  }
+
+  /// Test if this operation can use reciprocal multiply instead of division.
+  bool hasAllowReciprocal() const {
+    return (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0;
+  }
+
+  /// Test if this operation can be floating-point contracted (FMA).
+  bool hasAllowContract() const {
+    return (SubclassOptionalData & FastMathFlags::AllowContract) != 0;
+  }
+
+  /// Test if this operation allows approximations of math library functions or
+  /// intrinsics.
+  bool hasApproxFunc() const {
+    return (SubclassOptionalData & FastMathFlags::ApproxFunc) != 0;
+  }
+
+  /// Convenience function for getting all the fast-math flags
+  FastMathFlags getFastMathFlags() const {
+    return FastMathFlags(SubclassOptionalData);
+  }
+
+  /// Get the maximum error permitted by this operation in ULPs. An accuracy of
+  /// 0.0 means that the operation should be performed with the default
+  /// precision.
+  float getFPAccuracy() const;
+
+  static bool classof(const Instruction *I) {
+    return I->getType()->isFPOrFPVectorTy() ||
+      I->getOpcode() == Instruction::FCmp;
+  }
+
+  static bool classof(const ConstantExpr *CE) {
+    return CE->getType()->isFPOrFPVectorTy() ||
+           CE->getOpcode() == Instruction::FCmp;
+  }
+
+  static bool classof(const Value *V) {
+    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+  }
+};
+
+/// A helper template for defining operators for individual opcodes.
+template<typename SuperClass, unsigned Opc>
+class ConcreteOperator : public SuperClass {
+public:
+  static bool classof(const Instruction *I) {
+    return I->getOpcode() == Opc;
+  }
+  static bool classof(const ConstantExpr *CE) {
+    return CE->getOpcode() == Opc;
+  }
+  static bool classof(const Value *V) {
+    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+  }
+};
+
+class AddOperator
+  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {
+};
+class SubOperator
+  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {
+};
+class MulOperator
+  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {
+};
+class ShlOperator
+  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
+};
+
+class SDivOperator
+  : public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
+};
+class UDivOperator
+  : public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {
+};
+class AShrOperator
+  : public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {
+};
+class LShrOperator
+  : public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
+};
+
+class ZExtOperator : public ConcreteOperator<Operator, Instruction::ZExt> {};
+
+class GEPOperator
+  : public ConcreteOperator<Operator, Instruction::GetElementPtr> {
+  friend class GetElementPtrInst;
+  friend class ConstantExpr;
+
+  enum {
+    IsInBounds = (1 << 0),
+    // InRangeIndex: bits 1-6
+  };
+
+  void setIsInBounds(bool B) {
+    SubclassOptionalData =
+      (SubclassOptionalData & ~IsInBounds) | (B * IsInBounds);
+  }
+
+public:
+  /// Test whether this is an inbounds GEP, as defined by LangRef.html.
+  bool isInBounds() const {
+    return SubclassOptionalData & IsInBounds;
+  }
+
+  /// Returns the offset of the index with an inrange attachment, or None if
+  /// none.
+  Optional<unsigned> getInRangeIndex() const {
+    if (SubclassOptionalData >> 1 == 0) return None;
+    return (SubclassOptionalData >> 1) - 1;
+  }
+
+  inline op_iterator       idx_begin()       { return op_begin()+1; }
+  inline const_op_iterator idx_begin() const { return op_begin()+1; }
+  inline op_iterator       idx_end()         { return op_end(); }
+  inline const_op_iterator idx_end()   const { return op_end(); }
+
+  Value *getPointerOperand() {
+    return getOperand(0);
+  }
+  const Value *getPointerOperand() const {
+    return getOperand(0);
+  }
+  static unsigned getPointerOperandIndex() {
+    return 0U;                      // get index for modifying correct operand
+  }
+
+  /// Method to return the pointer operand as a PointerType.
+  Type *getPointerOperandType() const {
+    return getPointerOperand()->getType();
+  }
+
+  Type *getSourceElementType() const;
+  Type *getResultElementType() const;
+
+  /// Method to return the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return getPointerOperandType()->getPointerAddressSpace();
+  }
+
+  unsigned getNumIndices() const {  // Note: always non-negative
+    return getNumOperands() - 1;
+  }
+
+  bool hasIndices() const {
+    return getNumOperands() > 1;
+  }
+
+  /// Return true if all of the indices of this GEP are zeros.
+  /// If so, the result pointer and the first operand have the same
+  /// value, just potentially different types.
+  bool hasAllZeroIndices() const {
+    for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
+      if (ConstantInt *C = dyn_cast<ConstantInt>(I))
+        if (C->isZero())
+          continue;
+      return false;
+    }
+    return true;
+  }
+
+  /// Return true if all of the indices of this GEP are constant integers.
+  /// If so, the result pointer and the first operand have
+  /// a constant offset between them.
+  bool hasAllConstantIndices() const {
+    for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
+      if (!isa<ConstantInt>(I))
+        return false;
+    }
+    return true;
+  }
+
+  unsigned countNonConstantIndices() const {
+    return count_if(make_range(idx_begin(), idx_end()), [](const Use& use) {
+        return !isa<ConstantInt>(*use);
+      });
+  }
+
+  /// \brief Accumulate the constant address offset of this GEP if possible.
+  ///
+  /// This routine accepts an APInt into which it will accumulate the constant
+  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
+  /// all-constant, it returns false and the value of the offset APInt is
+  /// undefined (it is *not* preserved!). The APInt passed into this routine
+  /// must be at exactly as wide as the IntPtr type for the address space of the
+  /// base GEP pointer.
+  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
+};
+
+class PtrToIntOperator
+    : public ConcreteOperator<Operator, Instruction::PtrToInt> {
+  friend class PtrToInt;
+  friend class ConstantExpr;
+
+public:
+  Value *getPointerOperand() {
+    return getOperand(0);
+  }
+  const Value *getPointerOperand() const {
+    return getOperand(0);
+  }
+
+  static unsigned getPointerOperandIndex() {
+    return 0U;                      // get index for modifying correct operand
+  }
+
+  /// Method to return the pointer operand as a PointerType.
+  Type *getPointerOperandType() const {
+    return getPointerOperand()->getType();
+  }
+
+  /// Method to return the address space of the pointer operand.
+  unsigned getPointerAddressSpace() const {
+    return cast<PointerType>(getPointerOperandType())->getAddressSpace();
+  }
+};
+
+class BitCastOperator
+    : public ConcreteOperator<Operator, Instruction::BitCast> {
+  friend class BitCastInst;
+  friend class ConstantExpr;
+
+public:
+  Type *getSrcTy() const {
+    return getOperand(0)->getType();
+  }
+
+  Type *getDestTy() const {
+    return getType();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_OPERATOR_H
diff --git a/linux-x64/clang/include/llvm/IR/OptBisect.h b/linux-x64/clang/include/llvm/IR/OptBisect.h
new file mode 100644
index 0000000..cfc724c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/OptBisect.h
@@ -0,0 +1,89 @@
+//===- llvm/IR/OptBisect.h - LLVM Bisect support ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the interface for bisecting optimizations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_OPTBISECT_H
+#define LLVM_IR_OPTBISECT_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class Pass;
+class Module;
+class Function;
+class BasicBlock;
+class Region;
+class Loop;
+class CallGraphSCC;
+
+/// Extensions to this class implement mechanisms to disable passes and
+/// individual optimizations at compile time.
+class OptPassGate {
+public:
+  virtual ~OptPassGate() = default;
+
+  virtual bool shouldRunPass(const Pass *P, const Module &U) { return true; }
+  virtual bool shouldRunPass(const Pass *P, const Function &U)  {return true; }
+  virtual bool shouldRunPass(const Pass *P, const BasicBlock &U)  { return true; }
+  virtual bool shouldRunPass(const Pass *P, const Region &U)  { return true; }
+  virtual bool shouldRunPass(const Pass *P, const Loop &U)  { return true; }
+  virtual bool shouldRunPass(const Pass *P, const CallGraphSCC &U)  { return true; }
+};
+
+/// This class implements a mechanism to disable passes and individual
+/// optimizations at compile time based on a command line option
+/// (-opt-bisect-limit) in order to perform a bisecting search for
+/// optimization-related problems.
+class OptBisect : public OptPassGate {
+public:
+  /// \brief Default constructor, initializes the OptBisect state based on the
+  /// -opt-bisect-limit command line argument.
+  ///
+  /// By default, bisection is disabled.
+  ///
+  /// Clients should not instantiate this class directly.  All access should go
+  /// through LLVMContext.
+  OptBisect();
+
+  virtual ~OptBisect() = default;
+
+  /// Checks the bisect limit to determine if the specified pass should run.
+  ///
+  /// These functions immediately return true if bisection is disabled. If the
+  /// bisect limit is set to -1, the functions print a message describing
+  /// the pass and the bisect number assigned to it and return true.  Otherwise,
+  /// the functions print a message with the bisect number assigned to the
+  /// pass and indicating whether or not the pass will be run and return true if
+  /// the bisect limit has not yet been exceeded or false if it has.
+  ///
+  /// Most passes should not call these routines directly. Instead, they are
+  /// called through helper routines provided by the pass base classes.  For
+  /// instance, function passes should call FunctionPass::skipFunction().
+  bool shouldRunPass(const Pass *P, const Module &U) override;
+  bool shouldRunPass(const Pass *P, const Function &U) override;
+  bool shouldRunPass(const Pass *P, const BasicBlock &U) override;
+  bool shouldRunPass(const Pass *P, const Region &U) override;
+  bool shouldRunPass(const Pass *P, const Loop &U) override;
+  bool shouldRunPass(const Pass *P, const CallGraphSCC &U) override;
+
+private:
+  bool checkPass(const StringRef PassName, const StringRef TargetDesc);
+
+  bool BisectEnabled = false;
+  unsigned LastBisectNum = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_OPTBISECT_H
diff --git a/linux-x64/clang/include/llvm/IR/PassManager.h b/linux-x64/clang/include/llvm/IR/PassManager.h
new file mode 100644
index 0000000..4f838a7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/PassManager.h
@@ -0,0 +1,1324 @@
+//===- PassManager.h - Pass management infrastructure -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header defines various interfaces for pass management in LLVM. There
+/// is no "pass" interface in LLVM per se. Instead, an instance of any class
+/// which supports a method to 'run' it over a unit of IR can be used as
+/// a pass. A pass manager is generally a tool to collect a sequence of passes
+/// which run over a particular IR construct, and run each of them in sequence
+/// over each such construct in the containing IR construct. As there is no
+/// containing IR construct for a Module, a manager for passes over modules
+/// forms the base case which runs its managed passes in sequence over the
+/// single module provided.
+///
+/// The core IR library provides managers for running passes over
+/// modules and functions.
+///
+/// * FunctionPassManager can run over a Module, runs each pass over
+///   a Function.
+/// * ModulePassManager must be directly run, runs each pass over the Module.
+///
+/// Note that the implementations of the pass managers use concept-based
+/// polymorphism as outlined in the "Value Semantics and Concept-based
+/// Polymorphism" talk (or its abbreviated sibling "Inheritance Is The Base
+/// Class of Evil") by Sean Parent:
+/// * http://github.com/sean-parent/sean-parent.github.com/wiki/Papers-and-Presentations
+/// * http://www.youtube.com/watch?v=_BpMYeUFXv8
+/// * http://channel9.msdn.com/Events/GoingNative/2013/Inheritance-Is-The-Base-Class-of-Evil
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSMANAGER_H
+#define LLVM_IR_PASSMANAGER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManagerInternal.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/TypeName.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// A special type used by analysis passes to provide an address that
+/// identifies that particular analysis pass type.
+///
+/// Analysis passes should have a static data member of this type and derive
+/// from the \c AnalysisInfoMixin to get a static ID method used to identify
+/// the analysis in the pass management infrastructure.
+struct alignas(8) AnalysisKey {};
+
+/// A special type used to provide an address that identifies a set of related
+/// analyses.  These sets are primarily used below to mark sets of analyses as
+/// preserved.
+///
+/// For example, a transformation can indicate that it preserves the CFG of a
+/// function by preserving the appropriate AnalysisSetKey.  An analysis that
+/// depends only on the CFG can then check if that AnalysisSetKey is preserved;
+/// if it is, the analysis knows that it itself is preserved.
+struct alignas(8) AnalysisSetKey {};
+
+/// This templated class represents "all analyses that operate over \<a
+/// particular IR unit\>" (e.g. a Function or a Module) in instances of
+/// PreservedAnalysis.
+///
+/// This lets a transformation say e.g. "I preserved all function analyses".
+///
+/// Note that you must provide an explicit instantiation declaration and
+/// definition for this template in order to get the correct behavior on
+/// Windows. Otherwise, the address of SetKey will not be stable.
+template <typename IRUnitT> class AllAnalysesOn {
+public:
+  static AnalysisSetKey *ID() { return &SetKey; }
+
+private:
+  static AnalysisSetKey SetKey;
+};
+
+template <typename IRUnitT> AnalysisSetKey AllAnalysesOn<IRUnitT>::SetKey;
+
+extern template class AllAnalysesOn<Module>;
+extern template class AllAnalysesOn<Function>;
+
+/// Represents analyses that only rely on functions' control flow.
+///
+/// This can be used with \c PreservedAnalyses to mark the CFG as preserved and
+/// to query whether it has been preserved.
+///
+/// The CFG of a function is defined as the set of basic blocks and the edges
+/// between them. Changing the set of basic blocks in a function is enough to
+/// mutate the CFG. Mutating the condition of a branch or argument of an
+/// invoked function does not mutate the CFG, but changing the successor labels
+/// of those instructions does.
+class CFGAnalyses {
+public:
+  static AnalysisSetKey *ID() { return &SetKey; }
+
+private:
+  static AnalysisSetKey SetKey;
+};
+
+/// A set of analyses that are preserved following a run of a transformation
+/// pass.
+///
+/// Transformation passes build and return these objects to communicate which
+/// analyses are still valid after the transformation. For most passes this is
+/// fairly simple: if they don't change anything all analyses are preserved,
+/// otherwise only a short list of analyses that have been explicitly updated
+/// are preserved.
+///
+/// This class also lets transformation passes mark abstract *sets* of analyses
+/// as preserved. A transformation that (say) does not alter the CFG can
+/// indicate such by marking a particular AnalysisSetKey as preserved, and
+/// then analyses can query whether that AnalysisSetKey is preserved.
+///
+/// Finally, this class can represent an "abandoned" analysis, which is
+/// not preserved even if it would be covered by some abstract set of analyses.
+///
+/// Given a `PreservedAnalyses` object, an analysis will typically want to
+/// figure out whether it is preserved. In the example below, MyAnalysisType is
+/// preserved if it's not abandoned, and (a) it's explicitly marked as
+/// preserved, (b), the set AllAnalysesOn<MyIRUnit> is preserved, or (c) both
+/// AnalysisSetA and AnalysisSetB are preserved.
+///
+/// ```
+///   auto PAC = PA.getChecker<MyAnalysisType>();
+///   if (PAC.preserved() || PAC.preservedSet<AllAnalysesOn<MyIRUnit>>() ||
+///       (PAC.preservedSet<AnalysisSetA>() &&
+///        PAC.preservedSet<AnalysisSetB>())) {
+///     // The analysis has been successfully preserved ...
+///   }
+/// ```
+class PreservedAnalyses {
+public:
+  /// \brief Convenience factory function for the empty preserved set.
+  static PreservedAnalyses none() { return PreservedAnalyses(); }
+
+  /// \brief Construct a special preserved set that preserves all passes.
+  static PreservedAnalyses all() {
+    PreservedAnalyses PA;
+    PA.PreservedIDs.insert(&AllAnalysesKey);
+    return PA;
+  }
+
+  /// \brief Construct a preserved analyses object with a single preserved set.
+  template <typename AnalysisSetT>
+  static PreservedAnalyses allInSet() {
+    PreservedAnalyses PA;
+    PA.preserveSet<AnalysisSetT>();
+    return PA;
+  }
+
+  /// Mark an analysis as preserved.
+  template <typename AnalysisT> void preserve() { preserve(AnalysisT::ID()); }
+
+  /// \brief Given an analysis's ID, mark the analysis as preserved, adding it
+  /// to the set.
+  void preserve(AnalysisKey *ID) {
+    // Clear this ID from the explicit not-preserved set if present.
+    NotPreservedAnalysisIDs.erase(ID);
+
+    // If we're not already preserving all analyses (other than those in
+    // NotPreservedAnalysisIDs).
+    if (!areAllPreserved())
+      PreservedIDs.insert(ID);
+  }
+
+  /// Mark an analysis set as preserved.
+  template <typename AnalysisSetT> void preserveSet() {
+    preserveSet(AnalysisSetT::ID());
+  }
+
+  /// Mark an analysis set as preserved using its ID.
+  void preserveSet(AnalysisSetKey *ID) {
+    // If we're not already in the saturated 'all' state, add this set.
+    if (!areAllPreserved())
+      PreservedIDs.insert(ID);
+  }
+
+  /// Mark an analysis as abandoned.
+  ///
+  /// An abandoned analysis is not preserved, even if it is nominally covered
+  /// by some other set or was previously explicitly marked as preserved.
+  ///
+  /// Note that you can only abandon a specific analysis, not a *set* of
+  /// analyses.
+  template <typename AnalysisT> void abandon() { abandon(AnalysisT::ID()); }
+
+  /// Mark an analysis as abandoned using its ID.
+  ///
+  /// An abandoned analysis is not preserved, even if it is nominally covered
+  /// by some other set or was previously explicitly marked as preserved.
+  ///
+  /// Note that you can only abandon a specific analysis, not a *set* of
+  /// analyses.
+  void abandon(AnalysisKey *ID) {
+    PreservedIDs.erase(ID);
+    NotPreservedAnalysisIDs.insert(ID);
+  }
+
+  /// \brief Intersect this set with another in place.
+  ///
+  /// This is a mutating operation on this preserved set, removing all
+  /// preserved passes which are not also preserved in the argument.
+  void intersect(const PreservedAnalyses &Arg) {
+    if (Arg.areAllPreserved())
+      return;
+    if (areAllPreserved()) {
+      *this = Arg;
+      return;
+    }
+    // The intersection requires the *union* of the explicitly not-preserved
+    // IDs and the *intersection* of the preserved IDs.
+    for (auto ID : Arg.NotPreservedAnalysisIDs) {
+      PreservedIDs.erase(ID);
+      NotPreservedAnalysisIDs.insert(ID);
+    }
+    for (auto ID : PreservedIDs)
+      if (!Arg.PreservedIDs.count(ID))
+        PreservedIDs.erase(ID);
+  }
+
+  /// \brief Intersect this set with a temporary other set in place.
+  ///
+  /// This is a mutating operation on this preserved set, removing all
+  /// preserved passes which are not also preserved in the argument.
+  void intersect(PreservedAnalyses &&Arg) {
+    if (Arg.areAllPreserved())
+      return;
+    if (areAllPreserved()) {
+      *this = std::move(Arg);
+      return;
+    }
+    // The intersection requires the *union* of the explicitly not-preserved
+    // IDs and the *intersection* of the preserved IDs.
+    for (auto ID : Arg.NotPreservedAnalysisIDs) {
+      PreservedIDs.erase(ID);
+      NotPreservedAnalysisIDs.insert(ID);
+    }
+    for (auto ID : PreservedIDs)
+      if (!Arg.PreservedIDs.count(ID))
+        PreservedIDs.erase(ID);
+  }
+
+  /// A checker object that makes it easy to query for whether an analysis or
+  /// some set covering it is preserved.
+  class PreservedAnalysisChecker {
+    friend class PreservedAnalyses;
+
+    const PreservedAnalyses &PA;
+    AnalysisKey *const ID;
+    const bool IsAbandoned;
+
+    /// A PreservedAnalysisChecker is tied to a particular Analysis because
+    /// `preserved()` and `preservedSet()` both return false if the Analysis
+    /// was abandoned.
+    PreservedAnalysisChecker(const PreservedAnalyses &PA, AnalysisKey *ID)
+        : PA(PA), ID(ID), IsAbandoned(PA.NotPreservedAnalysisIDs.count(ID)) {}
+
+  public:
+    /// Returns true if the checker's analysis was not abandoned and either
+    ///  - the analysis is explicitly preserved or
+    ///  - all analyses are preserved.
+    bool preserved() {
+      return !IsAbandoned && (PA.PreservedIDs.count(&AllAnalysesKey) ||
+                              PA.PreservedIDs.count(ID));
+    }
+
+    /// Returns true if the checker's analysis was not abandoned and either
+    ///  - \p AnalysisSetT is explicitly preserved or
+    ///  - all analyses are preserved.
+    template <typename AnalysisSetT> bool preservedSet() {
+      AnalysisSetKey *SetID = AnalysisSetT::ID();
+      return !IsAbandoned && (PA.PreservedIDs.count(&AllAnalysesKey) ||
+                              PA.PreservedIDs.count(SetID));
+    }
+  };
+
+  /// Build a checker for this `PreservedAnalyses` and the specified analysis
+  /// type.
+  ///
+  /// You can use the returned object to query whether an analysis was
+  /// preserved. See the example in the comment on `PreservedAnalysis`.
+  template <typename AnalysisT> PreservedAnalysisChecker getChecker() const {
+    return PreservedAnalysisChecker(*this, AnalysisT::ID());
+  }
+
+  /// Build a checker for this `PreservedAnalyses` and the specified analysis
+  /// ID.
+  ///
+  /// You can use the returned object to query whether an analysis was
+  /// preserved. See the example in the comment on `PreservedAnalysis`.
+  PreservedAnalysisChecker getChecker(AnalysisKey *ID) const {
+    return PreservedAnalysisChecker(*this, ID);
+  }
+
+  /// Test whether all analyses are preserved (and none are abandoned).
+  ///
+  /// This is used primarily to optimize for the common case of a transformation
+  /// which makes no changes to the IR.
+  bool areAllPreserved() const {
+    return NotPreservedAnalysisIDs.empty() &&
+           PreservedIDs.count(&AllAnalysesKey);
+  }
+
+  /// Directly test whether a set of analyses is preserved.
+  ///
+  /// This is only true when no analyses have been explicitly abandoned.
+  template <typename AnalysisSetT> bool allAnalysesInSetPreserved() const {
+    return allAnalysesInSetPreserved(AnalysisSetT::ID());
+  }
+
+  /// Directly test whether a set of analyses is preserved.
+  ///
+  /// This is only true when no analyses have been explicitly abandoned.
+  bool allAnalysesInSetPreserved(AnalysisSetKey *SetID) const {
+    return NotPreservedAnalysisIDs.empty() &&
+           (PreservedIDs.count(&AllAnalysesKey) || PreservedIDs.count(SetID));
+  }
+
+private:
+  /// A special key used to indicate all analyses.
+  static AnalysisSetKey AllAnalysesKey;
+
+  /// The IDs of analyses and analysis sets that are preserved.
+  SmallPtrSet<void *, 2> PreservedIDs;
+
+  /// The IDs of explicitly not-preserved analyses.
+  ///
+  /// If an analysis in this set is covered by a set in `PreservedIDs`, we
+  /// consider it not-preserved. That is, `NotPreservedAnalysisIDs` always
+  /// "wins" over analysis sets in `PreservedIDs`.
+  ///
+  /// Also, a given ID should never occur both here and in `PreservedIDs`.
+  SmallPtrSet<AnalysisKey *, 2> NotPreservedAnalysisIDs;
+};
+
+// Forward declare the analysis manager template.
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
+
+/// A CRTP mix-in to automatically provide informational APIs needed for
+/// passes.
+///
+/// This provides some boilerplate for types that are passes.
+template <typename DerivedT> struct PassInfoMixin {
+  /// Gets the name of the pass we are mixed into.
+  static StringRef name() {
+    static_assert(std::is_base_of<PassInfoMixin, DerivedT>::value,
+                  "Must pass the derived type as the template argument!");
+    StringRef Name = getTypeName<DerivedT>();
+    if (Name.startswith("llvm::"))
+      Name = Name.drop_front(strlen("llvm::"));
+    return Name;
+  }
+};
+
+/// A CRTP mix-in that provides informational APIs needed for analysis passes.
+///
+/// This provides some boilerplate for types that are analysis passes. It
+/// automatically mixes in \c PassInfoMixin.
+template <typename DerivedT>
+struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
+  /// Returns an opaque, unique ID for this analysis type.
+  ///
+  /// This ID is a pointer type that is guaranteed to be 8-byte aligned and thus
+  /// suitable for use in sets, maps, and other data structures that use the low
+  /// bits of pointers.
+  ///
+  /// Note that this requires the derived type provide a static \c AnalysisKey
+  /// member called \c Key.
+  ///
+  /// FIXME: The only reason the mixin type itself can't declare the Key value
+  /// is that some compilers cannot correctly unique a templated static variable
+  /// so it has the same addresses in each instantiation. The only currently
+  /// known platform with this limitation is Windows DLL builds, specifically
+  /// building each part of LLVM as a DLL. If we ever remove that build
+  /// configuration, this mixin can provide the static key as well.
+  static AnalysisKey *ID() {
+    static_assert(std::is_base_of<AnalysisInfoMixin, DerivedT>::value,
+                  "Must pass the derived type as the template argument!");
+    return &DerivedT::Key;
+  }
+};
+
+/// \brief Manages a sequence of passes over a particular unit of IR.
+///
+/// A pass manager contains a sequence of passes to run over a particular unit
+/// of IR (e.g. Functions, Modules). It is itself a valid pass over that unit of
+/// IR, and when run over some given IR will run each of its contained passes in
+/// sequence. Pass managers are the primary and most basic building block of a
+/// pass pipeline.
+///
+/// When you run a pass manager, you provide an \c AnalysisManager<IRUnitT>
+/// argument. The pass manager will propagate that analysis manager to each
+/// pass it runs, and will call the analysis manager's invalidation routine with
+/// the PreservedAnalyses of each pass it runs.
+template <typename IRUnitT,
+          typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+          typename... ExtraArgTs>
+class PassManager : public PassInfoMixin<
+                        PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...>> {
+public:
+  /// \brief Construct a pass manager.
+  ///
+  /// If \p DebugLogging is true, we'll log our progress to llvm::dbgs().
+  explicit PassManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+
+  // FIXME: These are equivalent to the default move constructor/move
+  // assignment. However, using = default triggers linker errors due to the
+  // explicit instantiations below. Find away to use the default and remove the
+  // duplicated code here.
+  PassManager(PassManager &&Arg)
+      : Passes(std::move(Arg.Passes)),
+        DebugLogging(std::move(Arg.DebugLogging)) {}
+
+  PassManager &operator=(PassManager &&RHS) {
+    Passes = std::move(RHS.Passes);
+    DebugLogging = std::move(RHS.DebugLogging);
+    return *this;
+  }
+
+  /// \brief Run all of the passes in this manager over the given unit of IR.
+  /// ExtraArgs are passed to each pass.
+  PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
+                        ExtraArgTs... ExtraArgs) {
+    PreservedAnalyses PA = PreservedAnalyses::all();
+
+    if (DebugLogging)
+      dbgs() << "Starting " << getTypeName<IRUnitT>() << " pass manager run.\n";
+
+    for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
+      if (DebugLogging)
+        dbgs() << "Running pass: " << Passes[Idx]->name() << " on "
+               << IR.getName() << "\n";
+
+      PreservedAnalyses PassPA = Passes[Idx]->run(IR, AM, ExtraArgs...);
+
+      // Update the analysis manager as each pass runs and potentially
+      // invalidates analyses.
+      AM.invalidate(IR, PassPA);
+
+      // Finally, intersect the preserved analyses to compute the aggregate
+      // preserved set for this pass manager.
+      PA.intersect(std::move(PassPA));
+
+      // FIXME: Historically, the pass managers all called the LLVM context's
+      // yield function here. We don't have a generic way to acquire the
+      // context and it isn't yet clear what the right pattern is for yielding
+      // in the new pass manager so it is currently omitted.
+      //IR.getContext().yield();
+    }
+
+    // Invalidation was handled after each pass in the above loop for the
+    // current unit of IR. Therefore, the remaining analysis results in the
+    // AnalysisManager are preserved. We mark this with a set so that we don't
+    // need to inspect each one individually.
+    PA.preserveSet<AllAnalysesOn<IRUnitT>>();
+
+    if (DebugLogging)
+      dbgs() << "Finished " << getTypeName<IRUnitT>() << " pass manager run.\n";
+
+    return PA;
+  }
+
+  template <typename PassT> void addPass(PassT Pass) {
+    using PassModelT =
+        detail::PassModel<IRUnitT, PassT, PreservedAnalyses, AnalysisManagerT,
+                          ExtraArgTs...>;
+
+    Passes.emplace_back(new PassModelT(std::move(Pass)));
+  }
+
+private:
+  using PassConceptT =
+      detail::PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...>;
+
+  std::vector<std::unique_ptr<PassConceptT>> Passes;
+
+  /// \brief Flag indicating whether we should do debug logging.
+  bool DebugLogging;
+};
+
+extern template class PassManager<Module>;
+
+/// \brief Convenience typedef for a pass manager over modules.
+using ModulePassManager = PassManager<Module>;
+
+extern template class PassManager<Function>;
+
+/// \brief Convenience typedef for a pass manager over functions.
+using FunctionPassManager = PassManager<Function>;
+
+/// \brief A container for analyses that lazily runs them and caches their
+/// results.
+///
+/// This class can manage analyses for any IR unit where the address of the IR
+/// unit sufficies as its identity.
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager {
+public:
+  class Invalidator;
+
+private:
+  // Now that we've defined our invalidator, we can define the concept types.
+  using ResultConceptT =
+      detail::AnalysisResultConcept<IRUnitT, PreservedAnalyses, Invalidator>;
+  using PassConceptT =
+      detail::AnalysisPassConcept<IRUnitT, PreservedAnalyses, Invalidator,
+                                  ExtraArgTs...>;
+
+  /// \brief List of analysis pass IDs and associated concept pointers.
+  ///
+  /// Requires iterators to be valid across appending new entries and arbitrary
+  /// erases. Provides the analysis ID to enable finding iterators to a given
+  /// entry in maps below, and provides the storage for the actual result
+  /// concept.
+  using AnalysisResultListT =
+      std::list<std::pair<AnalysisKey *, std::unique_ptr<ResultConceptT>>>;
+
+  /// \brief Map type from IRUnitT pointer to our custom list type.
+  using AnalysisResultListMapT = DenseMap<IRUnitT *, AnalysisResultListT>;
+
+  /// \brief Map type from a pair of analysis ID and IRUnitT pointer to an
+  /// iterator into a particular result list (which is where the actual analysis
+  /// result is stored).
+  using AnalysisResultMapT =
+      DenseMap<std::pair<AnalysisKey *, IRUnitT *>,
+               typename AnalysisResultListT::iterator>;
+
+public:
+  /// API to communicate dependencies between analyses during invalidation.
+  ///
+  /// When an analysis result embeds handles to other analysis results, it
+  /// needs to be invalidated both when its own information isn't preserved and
+  /// when any of its embedded analysis results end up invalidated. We pass an
+  /// \c Invalidator object as an argument to \c invalidate() in order to let
+  /// the analysis results themselves define the dependency graph on the fly.
+  /// This lets us avoid building building an explicit representation of the
+  /// dependencies between analysis results.
+  class Invalidator {
+  public:
+    /// Trigger the invalidation of some other analysis pass if not already
+    /// handled and return whether it was in fact invalidated.
+    ///
+    /// This is expected to be called from within a given analysis result's \c
+    /// invalidate method to trigger a depth-first walk of all inter-analysis
+    /// dependencies. The same \p IR unit and \p PA passed to that result's \c
+    /// invalidate method should in turn be provided to this routine.
+    ///
+    /// The first time this is called for a given analysis pass, it will call
+    /// the corresponding result's \c invalidate method.  Subsequent calls will
+    /// use a cache of the results of that initial call.  It is an error to form
+    /// cyclic dependencies between analysis results.
+    ///
+    /// This returns true if the given analysis's result is invalid. Any
+    /// dependecies on it will become invalid as a result.
+    template <typename PassT>
+    bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
+      using ResultModelT =
+          detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+                                      PreservedAnalyses, Invalidator>;
+
+      return invalidateImpl<ResultModelT>(PassT::ID(), IR, PA);
+    }
+
+    /// A type-erased variant of the above invalidate method with the same core
+    /// API other than passing an analysis ID rather than an analysis type
+    /// parameter.
+    ///
+    /// This is sadly less efficient than the above routine, which leverages
+    /// the type parameter to avoid the type erasure overhead.
+    bool invalidate(AnalysisKey *ID, IRUnitT &IR, const PreservedAnalyses &PA) {
+      return invalidateImpl<>(ID, IR, PA);
+    }
+
+  private:
+    friend class AnalysisManager;
+
+    template <typename ResultT = ResultConceptT>
+    bool invalidateImpl(AnalysisKey *ID, IRUnitT &IR,
+                        const PreservedAnalyses &PA) {
+      // If we've already visited this pass, return true if it was invalidated
+      // and false otherwise.
+      auto IMapI = IsResultInvalidated.find(ID);
+      if (IMapI != IsResultInvalidated.end())
+        return IMapI->second;
+
+      // Otherwise look up the result object.
+      auto RI = Results.find({ID, &IR});
+      assert(RI != Results.end() &&
+             "Trying to invalidate a dependent result that isn't in the "
+             "manager's cache is always an error, likely due to a stale result "
+             "handle!");
+
+      auto &Result = static_cast<ResultT &>(*RI->second->second);
+
+      // Insert into the map whether the result should be invalidated and return
+      // that. Note that we cannot reuse IMapI and must do a fresh insert here,
+      // as calling invalidate could (recursively) insert things into the map,
+      // making any iterator or reference invalid.
+      bool Inserted;
+      std::tie(IMapI, Inserted) =
+          IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, *this)});
+      (void)Inserted;
+      assert(Inserted && "Should not have already inserted this ID, likely "
+                         "indicates a dependency cycle!");
+      return IMapI->second;
+    }
+
+    Invalidator(SmallDenseMap<AnalysisKey *, bool, 8> &IsResultInvalidated,
+                const AnalysisResultMapT &Results)
+        : IsResultInvalidated(IsResultInvalidated), Results(Results) {}
+
+    SmallDenseMap<AnalysisKey *, bool, 8> &IsResultInvalidated;
+    const AnalysisResultMapT &Results;
+  };
+
+  /// \brief Construct an empty analysis manager.
+  ///
+  /// If \p DebugLogging is true, we'll log our progress to llvm::dbgs().
+  AnalysisManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+  AnalysisManager(AnalysisManager &&) = default;
+  AnalysisManager &operator=(AnalysisManager &&) = default;
+
+  /// \brief Returns true if the analysis manager has an empty results cache.
+  bool empty() const {
+    assert(AnalysisResults.empty() == AnalysisResultLists.empty() &&
+           "The storage and index of analysis results disagree on how many "
+           "there are!");
+    return AnalysisResults.empty();
+  }
+
+  /// \brief Clear any cached analysis results for a single unit of IR.
+  ///
+  /// This doesn't invalidate, but instead simply deletes, the relevant results.
+  /// It is useful when the IR is being removed and we want to clear out all the
+  /// memory pinned for it.
+  void clear(IRUnitT &IR, llvm::StringRef Name) {
+    if (DebugLogging)
+      dbgs() << "Clearing all analysis results for: " << Name << "\n";
+
+    auto ResultsListI = AnalysisResultLists.find(&IR);
+    if (ResultsListI == AnalysisResultLists.end())
+      return;
+    // Delete the map entries that point into the results list.
+    for (auto &IDAndResult : ResultsListI->second)
+      AnalysisResults.erase({IDAndResult.first, &IR});
+
+    // And actually destroy and erase the results associated with this IR.
+    AnalysisResultLists.erase(ResultsListI);
+  }
+
+  /// \brief Clear all analysis results cached by this AnalysisManager.
+  ///
+  /// Like \c clear(IRUnitT&), this doesn't invalidate the results; it simply
+  /// deletes them.  This lets you clean up the AnalysisManager when the set of
+  /// IR units itself has potentially changed, and thus we can't even look up a
+  /// a result and invalidate/clear it directly.
+  void clear() {
+    AnalysisResults.clear();
+    AnalysisResultLists.clear();
+  }
+
+  /// \brief Get the result of an analysis pass for a given IR unit.
+  ///
+  /// Runs the analysis if a cached result is not available.
+  template <typename PassT>
+  typename PassT::Result &getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs) {
+    assert(AnalysisPasses.count(PassT::ID()) &&
+           "This analysis pass was not registered prior to being queried");
+    ResultConceptT &ResultConcept =
+        getResultImpl(PassT::ID(), IR, ExtraArgs...);
+
+    using ResultModelT =
+        detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+                                    PreservedAnalyses, Invalidator>;
+
+    return static_cast<ResultModelT &>(ResultConcept).Result;
+  }
+
+  /// \brief Get the cached result of an analysis pass for a given IR unit.
+  ///
+  /// This method never runs the analysis.
+  ///
+  /// \returns null if there is no cached result.
+  template <typename PassT>
+  typename PassT::Result *getCachedResult(IRUnitT &IR) const {
+    assert(AnalysisPasses.count(PassT::ID()) &&
+           "This analysis pass was not registered prior to being queried");
+
+    ResultConceptT *ResultConcept = getCachedResultImpl(PassT::ID(), IR);
+    if (!ResultConcept)
+      return nullptr;
+
+    using ResultModelT =
+        detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+                                    PreservedAnalyses, Invalidator>;
+
+    return &static_cast<ResultModelT *>(ResultConcept)->Result;
+  }
+
+  /// \brief Register an analysis pass with the manager.
+  ///
+  /// The parameter is a callable whose result is an analysis pass. This allows
+  /// passing in a lambda to construct the analysis.
+  ///
+  /// The analysis type to register is the type returned by calling the \c
+  /// PassBuilder argument. If that type has already been registered, then the
+  /// argument will not be called and this function will return false.
+  /// Otherwise, we register the analysis returned by calling \c PassBuilder(),
+  /// and this function returns true.
+  ///
+  /// (Note: Although the return value of this function indicates whether or not
+  /// an analysis was previously registered, there intentionally isn't a way to
+  /// query this directly.  Instead, you should just register all the analyses
+  /// you might want and let this class run them lazily.  This idiom lets us
+  /// minimize the number of times we have to look up analyses in our
+  /// hashtable.)
+  template <typename PassBuilderT>
+  bool registerPass(PassBuilderT &&PassBuilder) {
+    using PassT = decltype(PassBuilder());
+    using PassModelT =
+        detail::AnalysisPassModel<IRUnitT, PassT, PreservedAnalyses,
+                                  Invalidator, ExtraArgTs...>;
+
+    auto &PassPtr = AnalysisPasses[PassT::ID()];
+    if (PassPtr)
+      // Already registered this pass type!
+      return false;
+
+    // Construct a new model around the instance returned by the builder.
+    PassPtr.reset(new PassModelT(PassBuilder()));
+    return true;
+  }
+
+  /// \brief Invalidate a specific analysis pass for an IR module.
+  ///
+  /// Note that the analysis result can disregard invalidation, if it determines
+  /// it is in fact still valid.
+  template <typename PassT> void invalidate(IRUnitT &IR) {
+    assert(AnalysisPasses.count(PassT::ID()) &&
+           "This analysis pass was not registered prior to being invalidated");
+    invalidateImpl(PassT::ID(), IR);
+  }
+
+  /// \brief Invalidate cached analyses for an IR unit.
+  ///
+  /// Walk through all of the analyses pertaining to this unit of IR and
+  /// invalidate them, unless they are preserved by the PreservedAnalyses set.
+  void invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
+    // We're done if all analyses on this IR unit are preserved.
+    if (PA.allAnalysesInSetPreserved<AllAnalysesOn<IRUnitT>>())
+      return;
+
+    if (DebugLogging)
+      dbgs() << "Invalidating all non-preserved analyses for: " << IR.getName()
+             << "\n";
+
+    // Track whether each analysis's result is invalidated in
+    // IsResultInvalidated.
+    SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
+    Invalidator Inv(IsResultInvalidated, AnalysisResults);
+    AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
+    for (auto &AnalysisResultPair : ResultsList) {
+      // This is basically the same thing as Invalidator::invalidate, but we
+      // can't call it here because we're operating on the type-erased result.
+      // Moreover if we instead called invalidate() directly, it would do an
+      // unnecessary look up in ResultsList.
+      AnalysisKey *ID = AnalysisResultPair.first;
+      auto &Result = *AnalysisResultPair.second;
+
+      auto IMapI = IsResultInvalidated.find(ID);
+      if (IMapI != IsResultInvalidated.end())
+        // This result was already handled via the Invalidator.
+        continue;
+
+      // Try to invalidate the result, giving it the Invalidator so it can
+      // recursively query for any dependencies it has and record the result.
+      // Note that we cannot reuse 'IMapI' here or pre-insert the ID, as
+      // Result.invalidate may insert things into the map, invalidating our
+      // iterator.
+      bool Inserted =
+          IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, Inv)})
+              .second;
+      (void)Inserted;
+      assert(Inserted && "Should never have already inserted this ID, likely "
+                         "indicates a cycle!");
+    }
+
+    // Now erase the results that were marked above as invalidated.
+    if (!IsResultInvalidated.empty()) {
+      for (auto I = ResultsList.begin(), E = ResultsList.end(); I != E;) {
+        AnalysisKey *ID = I->first;
+        if (!IsResultInvalidated.lookup(ID)) {
+          ++I;
+          continue;
+        }
+
+        if (DebugLogging)
+          dbgs() << "Invalidating analysis: " << this->lookUpPass(ID).name()
+                 << " on " << IR.getName() << "\n";
+
+        I = ResultsList.erase(I);
+        AnalysisResults.erase({ID, &IR});
+      }
+    }
+
+    if (ResultsList.empty())
+      AnalysisResultLists.erase(&IR);
+  }
+
+private:
+  /// \brief Look up a registered analysis pass.
+  PassConceptT &lookUpPass(AnalysisKey *ID) {
+    typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(ID);
+    assert(PI != AnalysisPasses.end() &&
+           "Analysis passes must be registered prior to being queried!");
+    return *PI->second;
+  }
+
+  /// \brief Look up a registered analysis pass.
+  const PassConceptT &lookUpPass(AnalysisKey *ID) const {
+    typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(ID);
+    assert(PI != AnalysisPasses.end() &&
+           "Analysis passes must be registered prior to being queried!");
+    return *PI->second;
+  }
+
+  /// \brief Get an analysis result, running the pass if necessary.
+  ResultConceptT &getResultImpl(AnalysisKey *ID, IRUnitT &IR,
+                                ExtraArgTs... ExtraArgs) {
+    typename AnalysisResultMapT::iterator RI;
+    bool Inserted;
+    std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair(
+        std::make_pair(ID, &IR), typename AnalysisResultListT::iterator()));
+
+    // If we don't have a cached result for this function, look up the pass and
+    // run it to produce a result, which we then add to the cache.
+    if (Inserted) {
+      auto &P = this->lookUpPass(ID);
+      if (DebugLogging)
+        dbgs() << "Running analysis: " << P.name() << " on " << IR.getName()
+               << "\n";
+      AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
+      ResultList.emplace_back(ID, P.run(IR, *this, ExtraArgs...));
+
+      // P.run may have inserted elements into AnalysisResults and invalidated
+      // RI.
+      RI = AnalysisResults.find({ID, &IR});
+      assert(RI != AnalysisResults.end() && "we just inserted it!");
+
+      RI->second = std::prev(ResultList.end());
+    }
+
+    return *RI->second->second;
+  }
+
+  /// \brief Get a cached analysis result or return null.
+  ResultConceptT *getCachedResultImpl(AnalysisKey *ID, IRUnitT &IR) const {
+    typename AnalysisResultMapT::const_iterator RI =
+        AnalysisResults.find({ID, &IR});
+    return RI == AnalysisResults.end() ? nullptr : &*RI->second->second;
+  }
+
+  /// \brief Invalidate a function pass result.
+  void invalidateImpl(AnalysisKey *ID, IRUnitT &IR) {
+    typename AnalysisResultMapT::iterator RI =
+        AnalysisResults.find({ID, &IR});
+    if (RI == AnalysisResults.end())
+      return;
+
+    if (DebugLogging)
+      dbgs() << "Invalidating analysis: " << this->lookUpPass(ID).name()
+             << " on " << IR.getName() << "\n";
+    AnalysisResultLists[&IR].erase(RI->second);
+    AnalysisResults.erase(RI);
+  }
+
+  /// \brief Map type from module analysis pass ID to pass concept pointer.
+  using AnalysisPassMapT =
+      DenseMap<AnalysisKey *, std::unique_ptr<PassConceptT>>;
+
+  /// \brief Collection of module analysis passes, indexed by ID.
+  AnalysisPassMapT AnalysisPasses;
+
+  /// \brief Map from function to a list of function analysis results.
+  ///
+  /// Provides linear time removal of all analysis results for a function and
+  /// the ultimate storage for a particular cached analysis result.
+  AnalysisResultListMapT AnalysisResultLists;
+
+  /// \brief Map from an analysis ID and function to a particular cached
+  /// analysis result.
+  AnalysisResultMapT AnalysisResults;
+
+  /// \brief Indicates whether we log to \c llvm::dbgs().
+  bool DebugLogging;
+};
+
+extern template class AnalysisManager<Module>;
+
+/// \brief Convenience typedef for the Module analysis manager.
+using ModuleAnalysisManager = AnalysisManager<Module>;
+
+extern template class AnalysisManager<Function>;
+
+/// \brief Convenience typedef for the Function analysis manager.
+using FunctionAnalysisManager = AnalysisManager<Function>;
+
+/// \brief An analysis over an "outer" IR unit that provides access to an
+/// analysis manager over an "inner" IR unit.  The inner unit must be contained
+/// in the outer unit.
+///
+/// Fore example, InnerAnalysisManagerProxy<FunctionAnalysisManager, Module> is
+/// an analysis over Modules (the "outer" unit) that provides access to a
+/// Function analysis manager.  The FunctionAnalysisManager is the "inner"
+/// manager being proxied, and Functions are the "inner" unit.  The inner/outer
+/// relationship is valid because each Function is contained in one Module.
+///
+/// If you're (transitively) within a pass manager for an IR unit U that
+/// contains IR unit V, you should never use an analysis manager over V, except
+/// via one of these proxies.
+///
+/// Note that the proxy's result is a move-only RAII object.  The validity of
+/// the analyses in the inner analysis manager is tied to its lifetime.
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
+class InnerAnalysisManagerProxy
+    : public AnalysisInfoMixin<
+          InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>> {
+public:
+  class Result {
+  public:
+    explicit Result(AnalysisManagerT &InnerAM) : InnerAM(&InnerAM) {}
+
+    Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)) {
+      // We have to null out the analysis manager in the moved-from state
+      // because we are taking ownership of the responsibilty to clear the
+      // analysis state.
+      Arg.InnerAM = nullptr;
+    }
+
+    ~Result() {
+      // InnerAM is cleared in a moved from state where there is nothing to do.
+      if (!InnerAM)
+        return;
+
+      // Clear out the analysis manager if we're being destroyed -- it means we
+      // didn't even see an invalidate call when we got invalidated.
+      InnerAM->clear();
+    }
+
+    Result &operator=(Result &&RHS) {
+      InnerAM = RHS.InnerAM;
+      // We have to null out the analysis manager in the moved-from state
+      // because we are taking ownership of the responsibilty to clear the
+      // analysis state.
+      RHS.InnerAM = nullptr;
+      return *this;
+    }
+
+    /// \brief Accessor for the analysis manager.
+    AnalysisManagerT &getManager() { return *InnerAM; }
+
+    /// \brief Handler for invalidation of the outer IR unit, \c IRUnitT.
+    ///
+    /// If the proxy analysis itself is not preserved, we assume that the set of
+    /// inner IR objects contained in IRUnit may have changed.  In this case,
+    /// we have to call \c clear() on the inner analysis manager, as it may now
+    /// have stale pointers to its inner IR objects.
+    ///
+    /// Regardless of whether the proxy analysis is marked as preserved, all of
+    /// the analyses in the inner analysis manager are potentially invalidated
+    /// based on the set of preserved analyses.
+    bool invalidate(
+        IRUnitT &IR, const PreservedAnalyses &PA,
+        typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv);
+
+  private:
+    AnalysisManagerT *InnerAM;
+  };
+
+  explicit InnerAnalysisManagerProxy(AnalysisManagerT &InnerAM)
+      : InnerAM(&InnerAM) {}
+
+  /// \brief Run the analysis pass and create our proxy result object.
+  ///
+  /// This doesn't do any interesting work; it is primarily used to insert our
+  /// proxy result object into the outer analysis cache so that we can proxy
+  /// invalidation to the inner analysis manager.
+  Result run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
+             ExtraArgTs...) {
+    return Result(*InnerAM);
+  }
+
+private:
+  friend AnalysisInfoMixin<
+      InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>>;
+
+  static AnalysisKey Key;
+
+  AnalysisManagerT *InnerAM;
+};
+
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
+AnalysisKey
+    InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>::Key;
+
+/// Provide the \c FunctionAnalysisManager to \c Module proxy.
+using FunctionAnalysisManagerModuleProxy =
+    InnerAnalysisManagerProxy<FunctionAnalysisManager, Module>;
+
+/// Specialization of the invalidate method for the \c
+/// FunctionAnalysisManagerModuleProxy's result.
+template <>
+bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
+    Module &M, const PreservedAnalyses &PA,
+    ModuleAnalysisManager::Invalidator &Inv);
+
+// Ensure the \c FunctionAnalysisManagerModuleProxy is provided as an extern
+// template.
+extern template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
+                                                Module>;
+
+/// \brief An analysis over an "inner" IR unit that provides access to an
+/// analysis manager over a "outer" IR unit.  The inner unit must be contained
+/// in the outer unit.
+///
+/// For example OuterAnalysisManagerProxy<ModuleAnalysisManager, Function> is an
+/// analysis over Functions (the "inner" unit) which provides access to a Module
+/// analysis manager.  The ModuleAnalysisManager is the "outer" manager being
+/// proxied, and Modules are the "outer" IR unit.  The inner/outer relationship
+/// is valid because each Function is contained in one Module.
+///
+/// This proxy only exposes the const interface of the outer analysis manager,
+/// to indicate that you cannot cause an outer analysis to run from within an
+/// inner pass.  Instead, you must rely on the \c getCachedResult API.
+///
+/// This proxy doesn't manage invalidation in any way -- that is handled by the
+/// recursive return path of each layer of the pass manager.  A consequence of
+/// this is the outer analyses may be stale.  We invalidate the outer analyses
+/// only when we're done running passes over the inner IR units.
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
+class OuterAnalysisManagerProxy
+    : public AnalysisInfoMixin<
+          OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>> {
+public:
+  /// \brief Result proxy object for \c OuterAnalysisManagerProxy.
+  class Result {
+  public:
+    explicit Result(const AnalysisManagerT &AM) : AM(&AM) {}
+
+    const AnalysisManagerT &getManager() const { return *AM; }
+
+    /// When invalidation occurs, remove any registered invalidation events.
+    bool invalidate(
+        IRUnitT &IRUnit, const PreservedAnalyses &PA,
+        typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv) {
+      // Loop over the set of registered outer invalidation mappings and if any
+      // of them map to an analysis that is now invalid, clear it out.
+      SmallVector<AnalysisKey *, 4> DeadKeys;
+      for (auto &KeyValuePair : OuterAnalysisInvalidationMap) {
+        AnalysisKey *OuterID = KeyValuePair.first;
+        auto &InnerIDs = KeyValuePair.second;
+        InnerIDs.erase(llvm::remove_if(InnerIDs, [&](AnalysisKey *InnerID) {
+          return Inv.invalidate(InnerID, IRUnit, PA); }),
+                       InnerIDs.end());
+        if (InnerIDs.empty())
+          DeadKeys.push_back(OuterID);
+      }
+
+      for (auto OuterID : DeadKeys)
+        OuterAnalysisInvalidationMap.erase(OuterID);
+
+      // The proxy itself remains valid regardless of anything else.
+      return false;
+    }
+
+    /// Register a deferred invalidation event for when the outer analysis
+    /// manager processes its invalidations.
+    template <typename OuterAnalysisT, typename InvalidatedAnalysisT>
+    void registerOuterAnalysisInvalidation() {
+      AnalysisKey *OuterID = OuterAnalysisT::ID();
+      AnalysisKey *InvalidatedID = InvalidatedAnalysisT::ID();
+
+      auto &InvalidatedIDList = OuterAnalysisInvalidationMap[OuterID];
+      // Note, this is a linear scan. If we end up with large numbers of
+      // analyses that all trigger invalidation on the same outer analysis,
+      // this entire system should be changed to some other deterministic
+      // data structure such as a `SetVector` of a pair of pointers.
+      auto InvalidatedIt = std::find(InvalidatedIDList.begin(),
+                                     InvalidatedIDList.end(), InvalidatedID);
+      if (InvalidatedIt == InvalidatedIDList.end())
+        InvalidatedIDList.push_back(InvalidatedID);
+    }
+
+    /// Access the map from outer analyses to deferred invalidation requiring
+    /// analyses.
+    const SmallDenseMap<AnalysisKey *, TinyPtrVector<AnalysisKey *>, 2> &
+    getOuterInvalidations() const {
+      return OuterAnalysisInvalidationMap;
+    }
+
+  private:
+    const AnalysisManagerT *AM;
+
+    /// A map from an outer analysis ID to the set of this IR-unit's analyses
+    /// which need to be invalidated.
+    SmallDenseMap<AnalysisKey *, TinyPtrVector<AnalysisKey *>, 2>
+        OuterAnalysisInvalidationMap;
+  };
+
+  OuterAnalysisManagerProxy(const AnalysisManagerT &AM) : AM(&AM) {}
+
+  /// \brief Run the analysis pass and create our proxy result object.
+  /// Nothing to see here, it just forwards the \c AM reference into the
+  /// result.
+  Result run(IRUnitT &, AnalysisManager<IRUnitT, ExtraArgTs...> &,
+             ExtraArgTs...) {
+    return Result(*AM);
+  }
+
+private:
+  friend AnalysisInfoMixin<
+      OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>>;
+
+  static AnalysisKey Key;
+
+  const AnalysisManagerT *AM;
+};
+
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
+AnalysisKey
+    OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>::Key;
+
+extern template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
+                                                Function>;
+/// Provide the \c ModuleAnalysisManager to \c Function proxy.
+using ModuleAnalysisManagerFunctionProxy =
+    OuterAnalysisManagerProxy<ModuleAnalysisManager, Function>;
+
+/// \brief Trivial adaptor that maps from a module to its functions.
+///
+/// Designed to allow composition of a FunctionPass(Manager) and
+/// a ModulePassManager, by running the FunctionPass(Manager) over every
+/// function in the module.
+///
+/// Function passes run within this adaptor can rely on having exclusive access
+/// to the function they are run over. They should not read or modify any other
+/// functions! Other threads or systems may be manipulating other functions in
+/// the module, and so their state should never be relied on.
+/// FIXME: Make the above true for all of LLVM's actual passes, some still
+/// violate this principle.
+///
+/// Function passes can also read the module containing the function, but they
+/// should not modify that module outside of the use lists of various globals.
+/// For example, a function pass is not permitted to add functions to the
+/// module.
+/// FIXME: Make the above true for all of LLVM's actual passes, some still
+/// violate this principle.
+///
+/// Note that although function passes can access module analyses, module
+/// analyses are not invalidated while the function passes are running, so they
+/// may be stale.  Function analyses will not be stale.
+template <typename FunctionPassT>
+class ModuleToFunctionPassAdaptor
+    : public PassInfoMixin<ModuleToFunctionPassAdaptor<FunctionPassT>> {
+public:
+  explicit ModuleToFunctionPassAdaptor(FunctionPassT Pass)
+      : Pass(std::move(Pass)) {}
+
+  /// \brief Runs the function pass across every function in the module.
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
+    FunctionAnalysisManager &FAM =
+        AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+
+    PreservedAnalyses PA = PreservedAnalyses::all();
+    for (Function &F : M) {
+      if (F.isDeclaration())
+        continue;
+
+      PreservedAnalyses PassPA = Pass.run(F, FAM);
+
+      // We know that the function pass couldn't have invalidated any other
+      // function's analyses (that's the contract of a function pass), so
+      // directly handle the function analysis manager's invalidation here.
+      FAM.invalidate(F, PassPA);
+
+      // Then intersect the preserved set so that invalidation of module
+      // analyses will eventually occur when the module pass completes.
+      PA.intersect(std::move(PassPA));
+    }
+
+    // The FunctionAnalysisManagerModuleProxy is preserved because (we assume)
+    // the function passes we ran didn't add or remove any functions.
+    //
+    // We also preserve all analyses on Functions, because we did all the
+    // invalidation we needed to do above.
+    PA.preserveSet<AllAnalysesOn<Function>>();
+    PA.preserve<FunctionAnalysisManagerModuleProxy>();
+    return PA;
+  }
+
+private:
+  FunctionPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename FunctionPassT>
+ModuleToFunctionPassAdaptor<FunctionPassT>
+createModuleToFunctionPassAdaptor(FunctionPassT Pass) {
+  return ModuleToFunctionPassAdaptor<FunctionPassT>(std::move(Pass));
+}
+
+/// \brief A utility pass template to force an analysis result to be available.
+///
+/// If there are extra arguments at the pass's run level there may also be
+/// extra arguments to the analysis manager's \c getResult routine. We can't
+/// guess how to effectively map the arguments from one to the other, and so
+/// this specialization just ignores them.
+///
+/// Specific patterns of run-method extra arguments and analysis manager extra
+/// arguments will have to be defined as appropriate specializations.
+template <typename AnalysisT, typename IRUnitT,
+          typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+          typename... ExtraArgTs>
+struct RequireAnalysisPass
+    : PassInfoMixin<RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
+                                        ExtraArgTs...>> {
+  /// \brief Run this pass over some unit of IR.
+  ///
+  /// This pass can be run over any unit of IR and use any analysis manager
+  /// provided they satisfy the basic API requirements. When this pass is
+  /// created, these methods can be instantiated to satisfy whatever the
+  /// context requires.
+  PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM,
+                        ExtraArgTs &&... Args) {
+    (void)AM.template getResult<AnalysisT>(Arg,
+                                           std::forward<ExtraArgTs>(Args)...);
+
+    return PreservedAnalyses::all();
+  }
+};
+
+/// \brief A no-op pass template which simply forces a specific analysis result
+/// to be invalidated.
+template <typename AnalysisT>
+struct InvalidateAnalysisPass
+    : PassInfoMixin<InvalidateAnalysisPass<AnalysisT>> {
+  /// \brief Run this pass over some unit of IR.
+  ///
+  /// This pass can be run over any unit of IR and use any analysis manager,
+  /// provided they satisfy the basic API requirements. When this pass is
+  /// created, these methods can be instantiated to satisfy whatever the
+  /// context requires.
+  template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+  PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM, ExtraArgTs &&...) {
+    auto PA = PreservedAnalyses::all();
+    PA.abandon<AnalysisT>();
+    return PA;
+  }
+};
+
+/// \brief A utility pass that does nothing, but preserves no analyses.
+///
+/// Because this preserves no analyses, any analysis passes queried after this
+/// pass runs will recompute fresh results.
+struct InvalidateAllAnalysesPass : PassInfoMixin<InvalidateAllAnalysesPass> {
+  /// \brief Run this pass over some unit of IR.
+  template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+  PreservedAnalyses run(IRUnitT &, AnalysisManagerT &, ExtraArgTs &&...) {
+    return PreservedAnalyses::none();
+  }
+};
+
+/// A utility pass template that simply runs another pass multiple times.
+///
+/// This can be useful when debugging or testing passes. It also serves as an
+/// example of how to extend the pass manager in ways beyond composition.
+template <typename PassT>
+class RepeatedPass : public PassInfoMixin<RepeatedPass<PassT>> {
+public:
+  RepeatedPass(int Count, PassT P) : Count(Count), P(std::move(P)) {}
+
+  template <typename IRUnitT, typename AnalysisManagerT, typename... Ts>
+  PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM, Ts &&... Args) {
+    auto PA = PreservedAnalyses::all();
+    for (int i = 0; i < Count; ++i)
+      PA.intersect(P.run(Arg, AM, std::forward<Ts>(Args)...));
+    return PA;
+  }
+
+private:
+  int Count;
+  PassT P;
+};
+
+template <typename PassT>
+RepeatedPass<PassT> createRepeatedPass(int Count, PassT P) {
+  return RepeatedPass<PassT>(Count, std::move(P));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_PASSMANAGER_H
diff --git a/linux-x64/clang/include/llvm/IR/PassManagerInternal.h b/linux-x64/clang/include/llvm/IR/PassManagerInternal.h
new file mode 100644
index 0000000..9195d4d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/PassManagerInternal.h
@@ -0,0 +1,308 @@
+//===- PassManager internal APIs and implementation details -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides internal APIs and implementation details used by the
+/// pass management interfaces exposed in PassManager.h. To understand more
+/// context of why these particular interfaces are needed, see that header
+/// file. None of these APIs should be used elsewhere.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSMANAGERINTERNAL_H
+#define LLVM_IR_PASSMANAGERINTERNAL_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+template <typename IRUnitT> class AllAnalysesOn;
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
+class PreservedAnalyses;
+
+/// \brief Implementation details of the pass manager interfaces.
+namespace detail {
+
+/// \brief Template for the abstract base class used to dispatch
+/// polymorphically over pass objects.
+template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+struct PassConcept {
+  // Boiler plate necessary for the container of derived classes.
+  virtual ~PassConcept() = default;
+
+  /// \brief The polymorphic API which runs the pass over a given IR entity.
+  ///
+  /// Note that actual pass object can omit the analysis manager argument if
+  /// desired. Also that the analysis manager may be null if there is no
+  /// analysis manager in the pass pipeline.
+  virtual PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
+                                ExtraArgTs... ExtraArgs) = 0;
+
+  /// \brief Polymorphic method to access the name of a pass.
+  virtual StringRef name() = 0;
+};
+
+/// \brief A template wrapper used to implement the polymorphic API.
+///
+/// Can be instantiated for any object which provides a \c run method accepting
+/// an \c IRUnitT& and an \c AnalysisManager<IRUnit>&. It requires the pass to
+/// be a copyable object.
+template <typename IRUnitT, typename PassT, typename PreservedAnalysesT,
+          typename AnalysisManagerT, typename... ExtraArgTs>
+struct PassModel : PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...> {
+  explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
+  // We have to explicitly define all the special member functions because MSVC
+  // refuses to generate them.
+  PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
+  PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+
+  friend void swap(PassModel &LHS, PassModel &RHS) {
+    using std::swap;
+    swap(LHS.Pass, RHS.Pass);
+  }
+
+  PassModel &operator=(PassModel RHS) {
+    swap(*this, RHS);
+    return *this;
+  }
+
+  PreservedAnalysesT run(IRUnitT &IR, AnalysisManagerT &AM,
+                         ExtraArgTs... ExtraArgs) override {
+    return Pass.run(IR, AM, ExtraArgs...);
+  }
+
+  StringRef name() override { return PassT::name(); }
+
+  PassT Pass;
+};
+
+/// \brief Abstract concept of an analysis result.
+///
+/// This concept is parameterized over the IR unit that this result pertains
+/// to.
+template <typename IRUnitT, typename PreservedAnalysesT, typename InvalidatorT>
+struct AnalysisResultConcept {
+  virtual ~AnalysisResultConcept() = default;
+
+  /// \brief Method to try and mark a result as invalid.
+  ///
+  /// When the outer analysis manager detects a change in some underlying
+  /// unit of the IR, it will call this method on all of the results cached.
+  ///
+  /// \p PA is a set of preserved analyses which can be used to avoid
+  /// invalidation because the pass which changed the underlying IR took care
+  /// to update or preserve the analysis result in some way.
+  ///
+  /// \p Inv is typically a \c AnalysisManager::Invalidator object that can be
+  /// used by a particular analysis result to discover if other analyses
+  /// results are also invalidated in the event that this result depends on
+  /// them. See the documentation in the \c AnalysisManager for more details.
+  ///
+  /// \returns true if the result is indeed invalid (the default).
+  virtual bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA,
+                          InvalidatorT &Inv) = 0;
+};
+
+/// \brief SFINAE metafunction for computing whether \c ResultT provides an
+/// \c invalidate member function.
+template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod {
+  using EnabledType = char;
+  struct DisabledType {
+    char a, b;
+  };
+
+  // Purely to help out MSVC which fails to disable the below specialization,
+  // explicitly enable using the result type's invalidate routine if we can
+  // successfully call that routine.
+  template <typename T> struct Nonce { using Type = EnabledType; };
+  template <typename T>
+  static typename Nonce<decltype(std::declval<T>().invalidate(
+      std::declval<IRUnitT &>(), std::declval<PreservedAnalyses>()))>::Type
+      check(rank<2>);
+
+  // First we define an overload that can only be taken if there is no
+  // invalidate member. We do this by taking the address of an invalidate
+  // member in an adjacent base class of a derived class. This would be
+  // ambiguous if there were an invalidate member in the result type.
+  template <typename T, typename U> static DisabledType NonceFunction(T U::*);
+  struct CheckerBase { int invalidate; };
+  template <typename T> struct Checker : CheckerBase, T {};
+  template <typename T>
+  static decltype(NonceFunction(&Checker<T>::invalidate)) check(rank<1>);
+
+  // Now we have the fallback that will only be reached when there is an
+  // invalidate member, and enables the trait.
+  template <typename T>
+  static EnabledType check(rank<0>);
+
+public:
+  enum { Value = sizeof(check<ResultT>(rank<2>())) == sizeof(EnabledType) };
+};
+
+/// \brief Wrapper to model the analysis result concept.
+///
+/// By default, this will implement the invalidate method with a trivial
+/// implementation so that the actual analysis result doesn't need to provide
+/// an invalidation handler. It is only selected when the invalidation handler
+/// is not part of the ResultT's interface.
+template <typename IRUnitT, typename PassT, typename ResultT,
+          typename PreservedAnalysesT, typename InvalidatorT,
+          bool HasInvalidateHandler =
+              ResultHasInvalidateMethod<IRUnitT, ResultT>::Value>
+struct AnalysisResultModel;
+
+/// \brief Specialization of \c AnalysisResultModel which provides the default
+/// invalidate functionality.
+template <typename IRUnitT, typename PassT, typename ResultT,
+          typename PreservedAnalysesT, typename InvalidatorT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT,
+                           InvalidatorT, false>
+    : AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT> {
+  explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
+  // We have to explicitly define all the special member functions because MSVC
+  // refuses to generate them.
+  AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
+  AnalysisResultModel(AnalysisResultModel &&Arg)
+      : Result(std::move(Arg.Result)) {}
+
+  friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
+    using std::swap;
+    swap(LHS.Result, RHS.Result);
+  }
+
+  AnalysisResultModel &operator=(AnalysisResultModel RHS) {
+    swap(*this, RHS);
+    return *this;
+  }
+
+  /// \brief The model bases invalidation solely on being in the preserved set.
+  //
+  // FIXME: We should actually use two different concepts for analysis results
+  // rather than two different models, and avoid the indirect function call for
+  // ones that use the trivial behavior.
+  bool invalidate(IRUnitT &, const PreservedAnalysesT &PA,
+                  InvalidatorT &) override {
+    auto PAC = PA.template getChecker<PassT>();
+    return !PAC.preserved() &&
+           !PAC.template preservedSet<AllAnalysesOn<IRUnitT>>();
+  }
+
+  ResultT Result;
+};
+
+/// \brief Specialization of \c AnalysisResultModel which delegates invalidate
+/// handling to \c ResultT.
+template <typename IRUnitT, typename PassT, typename ResultT,
+          typename PreservedAnalysesT, typename InvalidatorT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT,
+                           InvalidatorT, true>
+    : AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT> {
+  explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
+  // We have to explicitly define all the special member functions because MSVC
+  // refuses to generate them.
+  AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
+  AnalysisResultModel(AnalysisResultModel &&Arg)
+      : Result(std::move(Arg.Result)) {}
+
+  friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
+    using std::swap;
+    swap(LHS.Result, RHS.Result);
+  }
+
+  AnalysisResultModel &operator=(AnalysisResultModel RHS) {
+    swap(*this, RHS);
+    return *this;
+  }
+
+  /// \brief The model delegates to the \c ResultT method.
+  bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA,
+                  InvalidatorT &Inv) override {
+    return Result.invalidate(IR, PA, Inv);
+  }
+
+  ResultT Result;
+};
+
+/// \brief Abstract concept of an analysis pass.
+///
+/// This concept is parameterized over the IR unit that it can run over and
+/// produce an analysis result.
+template <typename IRUnitT, typename PreservedAnalysesT, typename InvalidatorT,
+          typename... ExtraArgTs>
+struct AnalysisPassConcept {
+  virtual ~AnalysisPassConcept() = default;
+
+  /// \brief Method to run this analysis over a unit of IR.
+  /// \returns A unique_ptr to the analysis result object to be queried by
+  /// users.
+  virtual std::unique_ptr<
+      AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
+  run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
+      ExtraArgTs... ExtraArgs) = 0;
+
+  /// \brief Polymorphic method to access the name of a pass.
+  virtual StringRef name() = 0;
+};
+
+/// \brief Wrapper to model the analysis pass concept.
+///
+/// Can wrap any type which implements a suitable \c run method. The method
+/// must accept an \c IRUnitT& and an \c AnalysisManager<IRUnitT>& as arguments
+/// and produce an object which can be wrapped in a \c AnalysisResultModel.
+template <typename IRUnitT, typename PassT, typename PreservedAnalysesT,
+          typename InvalidatorT, typename... ExtraArgTs>
+struct AnalysisPassModel : AnalysisPassConcept<IRUnitT, PreservedAnalysesT,
+                                               InvalidatorT, ExtraArgTs...> {
+  explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
+  // We have to explicitly define all the special member functions because MSVC
+  // refuses to generate them.
+  AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
+  AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+
+  friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
+    using std::swap;
+    swap(LHS.Pass, RHS.Pass);
+  }
+
+  AnalysisPassModel &operator=(AnalysisPassModel RHS) {
+    swap(*this, RHS);
+    return *this;
+  }
+
+  // FIXME: Replace PassT::Result with type traits when we use C++11.
+  using ResultModelT =
+      AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+                          PreservedAnalysesT, InvalidatorT>;
+
+  /// \brief The model delegates to the \c PassT::run method.
+  ///
+  /// The return is wrapped in an \c AnalysisResultModel.
+  std::unique_ptr<
+      AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
+  run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
+      ExtraArgTs... ExtraArgs) override {
+    return llvm::make_unique<ResultModelT>(Pass.run(IR, AM, ExtraArgs...));
+  }
+
+  /// \brief The model delegates to a static \c PassT::name method.
+  ///
+  /// The returned string ref must point to constant immutable data!
+  StringRef name() override { return PassT::name(); }
+
+  PassT Pass;
+};
+
+} // end namespace detail
+
+} // end namespace llvm
+
+#endif // LLVM_IR_PASSMANAGERINTERNAL_H
diff --git a/linux-x64/clang/include/llvm/IR/PatternMatch.h b/linux-x64/clang/include/llvm/IR/PatternMatch.h
new file mode 100644
index 0000000..304b84b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/PatternMatch.h
@@ -0,0 +1,1736 @@
+//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a simple and efficient mechanism for performing general
+// tree-based pattern matches on the LLVM IR. The power of these routines is
+// that it allows you to write concise patterns that are expressive and easy to
+// understand. The other major advantage of this is that it allows you to
+// trivially capture/bind elements in the pattern to variables. For example,
+// you can do something like this:
+//
+//  Value *Exp = ...
+//  Value *X, *Y;  ConstantInt *C1, *C2;      // (X & C1) | (Y & C2)
+//  if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
+//                      m_And(m_Value(Y), m_ConstantInt(C2))))) {
+//    ... Pattern is matched and variables are bound ...
+//  }
+//
+// This is primarily useful to things like the instruction combiner, but can
+// also be useful for static analysis tools or code generators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PATTERNMATCH_H
+#define LLVM_IR_PATTERNMATCH_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+
+namespace llvm {
+namespace PatternMatch {
+
+template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
+  return const_cast<Pattern &>(P).match(V);
+}
+
+template <typename SubPattern_t> struct OneUse_match {
+  SubPattern_t SubPattern;
+
+  OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    return V->hasOneUse() && SubPattern.match(V);
+  }
+};
+
+template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
+  return SubPattern;
+}
+
+template <typename Class> struct class_match {
+  template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
+};
+
+/// Match an arbitrary value and ignore it.
+inline class_match<Value> m_Value() { return class_match<Value>(); }
+
+/// Match an arbitrary binary operation and ignore it.
+inline class_match<BinaryOperator> m_BinOp() {
+  return class_match<BinaryOperator>();
+}
+
+/// Matches any compare instruction and ignore it.
+inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
+
+/// Match an arbitrary ConstantInt and ignore it.
+inline class_match<ConstantInt> m_ConstantInt() {
+  return class_match<ConstantInt>();
+}
+
+/// Match an arbitrary undef constant.
+inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
+
+/// Match an arbitrary Constant and ignore it.
+inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
+
+/// Matching combinators
+template <typename LTy, typename RTy> struct match_combine_or {
+  LTy L;
+  RTy R;
+
+  match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (L.match(V))
+      return true;
+    if (R.match(V))
+      return true;
+    return false;
+  }
+};
+
+template <typename LTy, typename RTy> struct match_combine_and {
+  LTy L;
+  RTy R;
+
+  match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (L.match(V))
+      if (R.match(V))
+        return true;
+    return false;
+  }
+};
+
+/// Combine two pattern matchers matching L || R
+template <typename LTy, typename RTy>
+inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
+  return match_combine_or<LTy, RTy>(L, R);
+}
+
+/// Combine two pattern matchers matching L && R
+template <typename LTy, typename RTy>
+inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
+  return match_combine_and<LTy, RTy>(L, R);
+}
+
+struct match_zero {
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *C = dyn_cast<Constant>(V))
+      return C->isNullValue();
+    return false;
+  }
+};
+
+/// Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers.
+inline match_zero m_Zero() { return match_zero(); }
+
+struct apint_match {
+  const APInt *&Res;
+
+  apint_match(const APInt *&R) : Res(R) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (auto *CI = dyn_cast<ConstantInt>(V)) {
+      Res = &CI->getValue();
+      return true;
+    }
+    if (V->getType()->isVectorTy())
+      if (const auto *C = dyn_cast<Constant>(V))
+        if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
+          Res = &CI->getValue();
+          return true;
+        }
+    return false;
+  }
+};
+// Either constexpr if or renaming ConstantFP::getValueAPF to
+// ConstantFP::getValue is needed to do it via single template
+// function for both apint/apfloat.
+struct apfloat_match {
+  const APFloat *&Res;
+  apfloat_match(const APFloat *&R) : Res(R) {}
+  template <typename ITy> bool match(ITy *V) {
+    if (auto *CI = dyn_cast<ConstantFP>(V)) {
+      Res = &CI->getValueAPF();
+      return true;
+    }
+    if (V->getType()->isVectorTy())
+      if (const auto *C = dyn_cast<Constant>(V))
+        if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) {
+          Res = &CI->getValueAPF();
+          return true;
+        }
+    return false;
+  }
+};
+
+/// Match a ConstantInt or splatted ConstantVector, binding the
+/// specified pointer to the contained APInt.
+inline apint_match m_APInt(const APInt *&Res) { return Res; }
+
+/// Match a ConstantFP or splatted ConstantVector, binding the
+/// specified pointer to the contained APFloat.
+inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; }
+
+template <int64_t Val> struct constantint_match {
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *CI = dyn_cast<ConstantInt>(V)) {
+      const APInt &CIV = CI->getValue();
+      if (Val >= 0)
+        return CIV == static_cast<uint64_t>(Val);
+      // If Val is negative, and CI is shorter than it, truncate to the right
+      // number of bits.  If it is larger, then we have to sign extend.  Just
+      // compare their negated values.
+      return -CIV == -Val;
+    }
+    return false;
+  }
+};
+
+/// Match a ConstantInt with a specific value.
+template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
+  return constantint_match<Val>();
+}
+
+/// This helper class is used to match scalar and vector integer constants that
+/// satisfy a specified predicate.
+/// For vector constants, undefined elements are ignored.
+template <typename Predicate> struct cst_pred_ty : public Predicate {
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *CI = dyn_cast<ConstantInt>(V))
+      return this->isValue(CI->getValue());
+    if (V->getType()->isVectorTy()) {
+      if (const auto *C = dyn_cast<Constant>(V)) {
+        if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
+          return this->isValue(CI->getValue());
+
+        // Non-splat vector constant: check each element for a match.
+        unsigned NumElts = V->getType()->getVectorNumElements();
+        assert(NumElts != 0 && "Constant vector with no elements?");
+        for (unsigned i = 0; i != NumElts; ++i) {
+          Constant *Elt = C->getAggregateElement(i);
+          if (!Elt)
+            return false;
+          if (isa<UndefValue>(Elt))
+            continue;
+          auto *CI = dyn_cast<ConstantInt>(Elt);
+          if (!CI || !this->isValue(CI->getValue()))
+            return false;
+        }
+        return true;
+      }
+    }
+    return false;
+  }
+};
+
+/// This helper class is used to match scalar and vector constants that
+/// satisfy a specified predicate, and bind them to an APInt.
+template <typename Predicate> struct api_pred_ty : public Predicate {
+  const APInt *&Res;
+
+  api_pred_ty(const APInt *&R) : Res(R) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *CI = dyn_cast<ConstantInt>(V))
+      if (this->isValue(CI->getValue())) {
+        Res = &CI->getValue();
+        return true;
+      }
+    if (V->getType()->isVectorTy())
+      if (const auto *C = dyn_cast<Constant>(V))
+        if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
+          if (this->isValue(CI->getValue())) {
+            Res = &CI->getValue();
+            return true;
+          }
+
+    return false;
+  }
+};
+
+/// This helper class is used to match scalar and vector floating-point
+/// constants that satisfy a specified predicate.
+/// For vector constants, undefined elements are ignored.
+template <typename Predicate> struct cstfp_pred_ty : public Predicate {
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *CF = dyn_cast<ConstantFP>(V))
+      return this->isValue(CF->getValueAPF());
+    if (V->getType()->isVectorTy()) {
+      if (const auto *C = dyn_cast<Constant>(V)) {
+        if (const auto *CF = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
+          return this->isValue(CF->getValueAPF());
+
+        // Non-splat vector constant: check each element for a match.
+        unsigned NumElts = V->getType()->getVectorNumElements();
+        assert(NumElts != 0 && "Constant vector with no elements?");
+        for (unsigned i = 0; i != NumElts; ++i) {
+          Constant *Elt = C->getAggregateElement(i);
+          if (!Elt)
+            return false;
+          if (isa<UndefValue>(Elt))
+            continue;
+          auto *CF = dyn_cast<ConstantFP>(Elt);
+          if (!CF || !this->isValue(CF->getValueAPF()))
+            return false;
+        }
+        return true;
+      }
+    }
+    return false;
+  }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Encapsulate constant value queries for use in templated predicate matchers.
+// This allows checking if constants match using compound predicates and works
+// with vector constants, possibly with relaxed constraints. For example, ignore
+// undef values.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+struct is_all_ones {
+  bool isValue(const APInt &C) { return C.isAllOnesValue(); }
+};
+/// Match an integer or vector with all bits set.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_all_ones> m_AllOnes() {
+  return cst_pred_ty<is_all_ones>();
+}
+
+struct is_maxsignedvalue {
+  bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
+};
+/// Match an integer or vector with values having all bits except for the high
+/// bit set (0x7f...).
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
+  return cst_pred_ty<is_maxsignedvalue>();
+}
+inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
+  return V;
+}
+
+struct is_negative {
+  bool isValue(const APInt &C) { return C.isNegative(); }
+};
+/// Match an integer or vector of negative values.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_negative> m_Negative() {
+  return cst_pred_ty<is_negative>();
+}
+inline api_pred_ty<is_negative> m_Negative(const APInt *&V) {
+  return V;
+}
+
+struct is_nonnegative {
+  bool isValue(const APInt &C) { return C.isNonNegative(); }
+};
+/// Match an integer or vector of nonnegative values.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_nonnegative> m_NonNegative() {
+  return cst_pred_ty<is_nonnegative>();
+}
+inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) {
+  return V;
+}
+
+struct is_one {
+  bool isValue(const APInt &C) { return C.isOneValue(); }
+};
+/// Match an integer 1 or a vector with all elements equal to 1.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_one> m_One() {
+  return cst_pred_ty<is_one>();
+}
+
+struct is_power2 {
+  bool isValue(const APInt &C) { return C.isPowerOf2(); }
+};
+/// Match an integer or vector power-of-2.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_power2> m_Power2() {
+  return cst_pred_ty<is_power2>();
+}
+inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
+  return V;
+}
+
+struct is_power2_or_zero {
+  bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
+};
+/// Match an integer or vector of 0 or power-of-2 values.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
+  return cst_pred_ty<is_power2_or_zero>();
+}
+inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
+  return V;
+}
+
+struct is_sign_mask {
+  bool isValue(const APInt &C) { return C.isSignMask(); }
+};
+/// Match an integer or vector with only the sign bit(s) set.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_sign_mask> m_SignMask() {
+  return cst_pred_ty<is_sign_mask>();
+}
+
+struct is_nan {
+  bool isValue(const APFloat &C) { return C.isNaN(); }
+};
+/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
+/// For vectors, this includes constants with undefined elements.
+inline cstfp_pred_ty<is_nan> m_NaN() {
+  return cstfp_pred_ty<is_nan>();
+}
+
+struct is_any_zero_fp {
+  bool isValue(const APFloat &C) { return C.isZero(); }
+};
+/// Match a floating-point negative zero or positive zero.
+/// For vectors, this includes constants with undefined elements.
+inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() {
+  return cstfp_pred_ty<is_any_zero_fp>();
+}
+
+struct is_pos_zero_fp {
+  bool isValue(const APFloat &C) { return C.isPosZero(); }
+};
+/// Match a floating-point positive zero.
+/// For vectors, this includes constants with undefined elements.
+inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() {
+  return cstfp_pred_ty<is_pos_zero_fp>();
+}
+
+struct is_neg_zero_fp {
+  bool isValue(const APFloat &C) { return C.isNegZero(); }
+};
+/// Match a floating-point negative zero.
+/// For vectors, this includes constants with undefined elements.
+inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() {
+  return cstfp_pred_ty<is_neg_zero_fp>();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename Class> struct bind_ty {
+  Class *&VR;
+
+  bind_ty(Class *&V) : VR(V) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (auto *CV = dyn_cast<Class>(V)) {
+      VR = CV;
+      return true;
+    }
+    return false;
+  }
+};
+
+/// Match a value, capturing it if we match.
+inline bind_ty<Value> m_Value(Value *&V) { return V; }
+inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
+
+/// Match an instruction, capturing it if we match.
+inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
+/// Match a binary operator, capturing it if we match.
+inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
+
+/// Match a ConstantInt, capturing the value if we match.
+inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
+
+/// Match a Constant, capturing the value if we match.
+inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
+
+/// Match a ConstantFP, capturing the value if we match.
+inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
+
+/// Match a specified Value*.
+struct specificval_ty {
+  const Value *Val;
+
+  specificval_ty(const Value *V) : Val(V) {}
+
+  template <typename ITy> bool match(ITy *V) { return V == Val; }
+};
+
+/// Match if we have a specific specified value.
+inline specificval_ty m_Specific(const Value *V) { return V; }
+
+/// Match a specified floating point value or vector of all elements of
+/// that value.
+struct specific_fpval {
+  double Val;
+
+  specific_fpval(double V) : Val(V) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *CFP = dyn_cast<ConstantFP>(V))
+      return CFP->isExactlyValue(Val);
+    if (V->getType()->isVectorTy())
+      if (const auto *C = dyn_cast<Constant>(V))
+        if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
+          return CFP->isExactlyValue(Val);
+    return false;
+  }
+};
+
+/// Match a specific floating point value or vector with all elements
+/// equal to the value.
+inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
+
+/// Match a float 1.0 or vector with all elements equal to 1.0.
+inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
+
+struct bind_const_intval_ty {
+  uint64_t &VR;
+
+  bind_const_intval_ty(uint64_t &V) : VR(V) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    if (const auto *CV = dyn_cast<ConstantInt>(V))
+      if (CV->getValue().ule(UINT64_MAX)) {
+        VR = CV->getZExtValue();
+        return true;
+      }
+    return false;
+  }
+};
+
+/// Match a specified integer value or vector of all elements of that
+// value.
+struct specific_intval {
+  uint64_t Val;
+
+  specific_intval(uint64_t V) : Val(V) {}
+
+  template <typename ITy> bool match(ITy *V) {
+    const auto *CI = dyn_cast<ConstantInt>(V);
+    if (!CI && V->getType()->isVectorTy())
+      if (const auto *C = dyn_cast<Constant>(V))
+        CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
+
+    return CI && CI->getValue() == Val;
+  }
+};
+
+/// Match a specific integer value or vector with all elements equal to
+/// the value.
+inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); }
+
+/// Match a ConstantInt and bind to its value.  This does not match
+/// ConstantInts wider than 64-bits.
+inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
+
+//===----------------------------------------------------------------------===//
+// Matcher for any binary operator.
+//
+template <typename LHS_t, typename RHS_t, bool Commutable = false>
+struct AnyBinaryOp_match {
+  LHS_t L;
+  RHS_t R;
+
+  AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *I = dyn_cast<BinaryOperator>(V))
+      return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
+             (Commutable && R.match(I->getOperand(0)) &&
+              L.match(I->getOperand(1)));
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
+  return AnyBinaryOp_match<LHS, RHS>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for specific binary operators.
+//
+
+template <typename LHS_t, typename RHS_t, unsigned Opcode,
+          bool Commutable = false>
+struct BinaryOp_match {
+  LHS_t L;
+  RHS_t R;
+
+  BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (V->getValueID() == Value::InstructionVal + Opcode) {
+      auto *I = cast<BinaryOperator>(V);
+      return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
+             (Commutable && R.match(I->getOperand(0)) &&
+              L.match(I->getOperand(1)));
+    }
+    if (auto *CE = dyn_cast<ConstantExpr>(V))
+      return CE->getOpcode() == Opcode &&
+             ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
+              (Commutable && R.match(CE->getOperand(0)) &&
+               L.match(CE->getOperand(1))));
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
+                                                        const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
+                                                        const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
+                                                        const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
+                                                        const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
+                                                      const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
+                                                        const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
+                                                        const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
+                                                          const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
+}
+
+template <typename LHS_t, typename RHS_t, unsigned Opcode,
+          unsigned WrapFlags = 0>
+struct OverflowingBinaryOp_match {
+  LHS_t L;
+  RHS_t R;
+
+  OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
+      : L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
+      if (Op->getOpcode() != Opcode)
+        return false;
+      if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
+          !Op->hasNoUnsignedWrap())
+        return false;
+      if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
+          !Op->hasNoSignedWrap())
+        return false;
+      return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
+    }
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+                                 OverflowingBinaryOperator::NoSignedWrap>
+m_NSWAdd(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+                                   OverflowingBinaryOperator::NoSignedWrap>(
+      L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+                                 OverflowingBinaryOperator::NoSignedWrap>
+m_NSWSub(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+                                   OverflowingBinaryOperator::NoSignedWrap>(
+      L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+                                 OverflowingBinaryOperator::NoSignedWrap>
+m_NSWMul(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+                                   OverflowingBinaryOperator::NoSignedWrap>(
+      L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+                                 OverflowingBinaryOperator::NoSignedWrap>
+m_NSWShl(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+                                   OverflowingBinaryOperator::NoSignedWrap>(
+      L, R);
+}
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+                                 OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWAdd(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+                                   OverflowingBinaryOperator::NoUnsignedWrap>(
+      L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+                                 OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWSub(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+                                   OverflowingBinaryOperator::NoUnsignedWrap>(
+      L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+                                 OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWMul(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+                                   OverflowingBinaryOperator::NoUnsignedWrap>(
+      L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+                                 OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWShl(const LHS &L, const RHS &R) {
+  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+                                   OverflowingBinaryOperator::NoUnsignedWrap>(
+      L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Class that matches a group of binary opcodes.
+//
+template <typename LHS_t, typename RHS_t, typename Predicate>
+struct BinOpPred_match : Predicate {
+  LHS_t L;
+  RHS_t R;
+
+  BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *I = dyn_cast<Instruction>(V))
+      return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
+             R.match(I->getOperand(1));
+    if (auto *CE = dyn_cast<ConstantExpr>(V))
+      return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
+             R.match(CE->getOperand(1));
+    return false;
+  }
+};
+
+struct is_shift_op {
+  bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
+};
+
+struct is_right_shift_op {
+  bool isOpType(unsigned Opcode) {
+    return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
+  }
+};
+
+struct is_logical_shift_op {
+  bool isOpType(unsigned Opcode) {
+    return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
+  }
+};
+
+struct is_bitwiselogic_op {
+  bool isOpType(unsigned Opcode) {
+    return Instruction::isBitwiseLogicOp(Opcode);
+  }
+};
+
+struct is_idiv_op {
+  bool isOpType(unsigned Opcode) {
+    return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
+  }
+};
+
+/// Matches shift operations.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
+                                                      const RHS &R) {
+  return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
+}
+
+/// Matches logical shift operations.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
+                                                          const RHS &R) {
+  return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
+}
+
+/// Matches logical shift operations.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
+m_LogicalShift(const LHS &L, const RHS &R) {
+  return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
+}
+
+/// Matches bitwise logic operations.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
+m_BitwiseLogic(const LHS &L, const RHS &R) {
+  return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
+}
+
+/// Matches integer division operations.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
+                                                    const RHS &R) {
+  return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Class that matches exact binary ops.
+//
+template <typename SubPattern_t> struct Exact_match {
+  SubPattern_t SubPattern;
+
+  Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
+      return PEO->isExact() && SubPattern.match(V);
+    return false;
+  }
+};
+
+template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
+  return SubPattern;
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for CmpInst classes
+//
+
+template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
+          bool Commutable = false>
+struct CmpClass_match {
+  PredicateTy &Predicate;
+  LHS_t L;
+  RHS_t R;
+
+  CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
+      : Predicate(Pred), L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *I = dyn_cast<Class>(V))
+      if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
+          (Commutable && R.match(I->getOperand(0)) &&
+           L.match(I->getOperand(1)))) {
+        Predicate = I->getPredicate();
+        return true;
+      }
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
+m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+  return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
+m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+  return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
+m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+  return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for SelectInst classes
+//
+
+template <typename Cond_t, typename LHS_t, typename RHS_t>
+struct SelectClass_match {
+  Cond_t C;
+  LHS_t L;
+  RHS_t R;
+
+  SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS)
+      : C(Cond), L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *I = dyn_cast<SelectInst>(V))
+      return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) &&
+             R.match(I->getOperand(2));
+    return false;
+  }
+};
+
+template <typename Cond, typename LHS, typename RHS>
+inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L,
+                                                  const RHS &R) {
+  return SelectClass_match<Cond, LHS, RHS>(C, L, R);
+}
+
+/// This matches a select of two constants, e.g.:
+/// m_SelectCst<-1, 0>(m_Value(V))
+template <int64_t L, int64_t R, typename Cond>
+inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>>
+m_SelectCst(const Cond &C) {
+  return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for InsertElementInst classes
+//
+
+template <typename Val_t, typename Elt_t, typename Idx_t>
+struct InsertElementClass_match {
+  Val_t V;
+  Elt_t E;
+  Idx_t I;
+
+  InsertElementClass_match(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
+      : V(Val), E(Elt), I(Idx) {}
+
+  template <typename OpTy> bool match(OpTy *VV) {
+    if (auto *II = dyn_cast<InsertElementInst>(VV))
+      return V.match(II->getOperand(0)) && E.match(II->getOperand(1)) &&
+             I.match(II->getOperand(2));
+    return false;
+  }
+};
+
+template <typename Val_t, typename Elt_t, typename Idx_t>
+inline InsertElementClass_match<Val_t, Elt_t, Idx_t>
+m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
+  return InsertElementClass_match<Val_t, Elt_t, Idx_t>(Val, Elt, Idx);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for ExtractElementInst classes
+//
+
+template <typename Val_t, typename Idx_t> struct ExtractElementClass_match {
+  Val_t V;
+  Idx_t I;
+
+  ExtractElementClass_match(const Val_t &Val, const Idx_t &Idx)
+      : V(Val), I(Idx) {}
+
+  template <typename OpTy> bool match(OpTy *VV) {
+    if (auto *II = dyn_cast<ExtractElementInst>(VV))
+      return V.match(II->getOperand(0)) && I.match(II->getOperand(1));
+    return false;
+  }
+};
+
+template <typename Val_t, typename Idx_t>
+inline ExtractElementClass_match<Val_t, Idx_t>
+m_ExtractElement(const Val_t &Val, const Idx_t &Idx) {
+  return ExtractElementClass_match<Val_t, Idx_t>(Val, Idx);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for ShuffleVectorInst classes
+//
+
+template <typename V1_t, typename V2_t, typename Mask_t>
+struct ShuffleVectorClass_match {
+  V1_t V1;
+  V2_t V2;
+  Mask_t M;
+
+  ShuffleVectorClass_match(const V1_t &v1, const V2_t &v2, const Mask_t &m)
+      : V1(v1), V2(v2), M(m) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
+      return V1.match(SI->getOperand(0)) && V2.match(SI->getOperand(1)) &&
+             M.match(SI->getOperand(2));
+    return false;
+  }
+};
+
+template <typename V1_t, typename V2_t, typename Mask_t>
+inline ShuffleVectorClass_match<V1_t, V2_t, Mask_t>
+m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m) {
+  return ShuffleVectorClass_match<V1_t, V2_t, Mask_t>(v1, v2, m);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for CastInst classes
+//
+
+template <typename Op_t, unsigned Opcode> struct CastClass_match {
+  Op_t Op;
+
+  CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *O = dyn_cast<Operator>(V))
+      return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
+    return false;
+  }
+};
+
+/// Matches BitCast.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::BitCast>(Op);
+}
+
+/// Matches PtrToInt.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
+}
+
+/// Matches Trunc.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::Trunc>(Op);
+}
+
+/// Matches SExt.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::SExt>(Op);
+}
+
+/// Matches ZExt.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::ZExt>(Op);
+}
+
+template <typename OpTy>
+inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
+                        CastClass_match<OpTy, Instruction::SExt>>
+m_ZExtOrSExt(const OpTy &Op) {
+  return m_CombineOr(m_ZExt(Op), m_SExt(Op));
+}
+
+/// Matches UIToFP.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::UIToFP>(Op);
+}
+
+/// Matches SIToFP.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::SIToFP>(Op);
+}
+
+/// Matches FPTrunc
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
+}
+
+/// Matches FPExt
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
+  return CastClass_match<OpTy, Instruction::FPExt>(Op);
+}
+
+//===----------------------------------------------------------------------===//
+// Matcher for LoadInst classes
+//
+
+template <typename Op_t> struct LoadClass_match {
+  Op_t Op;
+
+  LoadClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *LI = dyn_cast<LoadInst>(V))
+      return Op.match(LI->getPointerOperand());
+    return false;
+  }
+};
+
+/// Matches LoadInst.
+template <typename OpTy> inline LoadClass_match<OpTy> m_Load(const OpTy &Op) {
+  return LoadClass_match<OpTy>(Op);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for unary operators
+//
+
+template <typename LHS_t> struct neg_match {
+  LHS_t L;
+
+  neg_match(const LHS_t &LHS) : L(LHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *O = dyn_cast<Operator>(V))
+      if (O->getOpcode() == Instruction::Sub)
+        return matchIfNeg(O->getOperand(0), O->getOperand(1));
+    return false;
+  }
+
+private:
+  bool matchIfNeg(Value *LHS, Value *RHS) {
+    return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
+            isa<ConstantAggregateZero>(LHS)) &&
+           L.match(RHS);
+  }
+};
+
+/// Match an integer negate.
+template <typename LHS> inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
+
+template <typename LHS_t> struct fneg_match {
+  LHS_t L;
+
+  fneg_match(const LHS_t &LHS) : L(LHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *O = dyn_cast<Operator>(V))
+      if (O->getOpcode() == Instruction::FSub)
+        return matchIfFNeg(O->getOperand(0), O->getOperand(1));
+    return false;
+  }
+
+private:
+  bool matchIfFNeg(Value *LHS, Value *RHS) {
+    if (const auto *C = dyn_cast<Constant>(LHS))
+      return C->isNegativeZeroValue() && L.match(RHS);
+    return false;
+  }
+};
+
+/// Match a floating point negate.
+template <typename LHS> inline fneg_match<LHS> m_FNeg(const LHS &L) {
+  return L;
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for control flow.
+//
+
+struct br_match {
+  BasicBlock *&Succ;
+
+  br_match(BasicBlock *&Succ) : Succ(Succ) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *BI = dyn_cast<BranchInst>(V))
+      if (BI->isUnconditional()) {
+        Succ = BI->getSuccessor(0);
+        return true;
+      }
+    return false;
+  }
+};
+
+inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
+
+template <typename Cond_t> struct brc_match {
+  Cond_t Cond;
+  BasicBlock *&T, *&F;
+
+  brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
+      : Cond(C), T(t), F(f) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (auto *BI = dyn_cast<BranchInst>(V))
+      if (BI->isConditional() && Cond.match(BI->getCondition())) {
+        T = BI->getSuccessor(0);
+        F = BI->getSuccessor(1);
+        return true;
+      }
+    return false;
+  }
+};
+
+template <typename Cond_t>
+inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
+  return brc_match<Cond_t>(C, T, F);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
+//
+
+template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
+          bool Commutable = false>
+struct MaxMin_match {
+  LHS_t L;
+  RHS_t R;
+
+  MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
+    auto *SI = dyn_cast<SelectInst>(V);
+    if (!SI)
+      return false;
+    auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
+    if (!Cmp)
+      return false;
+    // At this point we have a select conditioned on a comparison.  Check that
+    // it is the values returned by the select that are being compared.
+    Value *TrueVal = SI->getTrueValue();
+    Value *FalseVal = SI->getFalseValue();
+    Value *LHS = Cmp->getOperand(0);
+    Value *RHS = Cmp->getOperand(1);
+    if ((TrueVal != LHS || FalseVal != RHS) &&
+        (TrueVal != RHS || FalseVal != LHS))
+      return false;
+    typename CmpInst_t::Predicate Pred =
+        LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
+    // Does "(x pred y) ? x : y" represent the desired max/min operation?
+    if (!Pred_t::match(Pred))
+      return false;
+    // It does!  Bind the operands.
+    return (L.match(LHS) && R.match(RHS)) ||
+           (Commutable && R.match(LHS) && L.match(RHS));
+  }
+};
+
+/// Helper class for identifying signed max predicates.
+struct smax_pred_ty {
+  static bool match(ICmpInst::Predicate Pred) {
+    return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
+  }
+};
+
+/// Helper class for identifying signed min predicates.
+struct smin_pred_ty {
+  static bool match(ICmpInst::Predicate Pred) {
+    return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
+  }
+};
+
+/// Helper class for identifying unsigned max predicates.
+struct umax_pred_ty {
+  static bool match(ICmpInst::Predicate Pred) {
+    return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
+  }
+};
+
+/// Helper class for identifying unsigned min predicates.
+struct umin_pred_ty {
+  static bool match(ICmpInst::Predicate Pred) {
+    return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
+  }
+};
+
+/// Helper class for identifying ordered max predicates.
+struct ofmax_pred_ty {
+  static bool match(FCmpInst::Predicate Pred) {
+    return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
+  }
+};
+
+/// Helper class for identifying ordered min predicates.
+struct ofmin_pred_ty {
+  static bool match(FCmpInst::Predicate Pred) {
+    return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
+  }
+};
+
+/// Helper class for identifying unordered max predicates.
+struct ufmax_pred_ty {
+  static bool match(FCmpInst::Predicate Pred) {
+    return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
+  }
+};
+
+/// Helper class for identifying unordered min predicates.
+struct ufmin_pred_ty {
+  static bool match(FCmpInst::Predicate Pred) {
+    return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
+                                                             const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
+                                                             const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
+                                                             const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
+                                                             const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
+}
+
+/// Match an 'ordered' floating point maximum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
+///
+///                         max(L, R)  iff L and R are not NaN
+///  m_OrdFMax(L, R) =      R          iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
+                                                                 const RHS &R) {
+  return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
+}
+
+/// Match an 'ordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
+///
+///                         min(L, R)  iff L and R are not NaN
+///  m_OrdFMin(L, R) =      R          iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
+                                                                 const RHS &R) {
+  return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
+}
+
+/// Match an 'unordered' floating point maximum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
+///
+///                         max(L, R)  iff L and R are not NaN
+///  m_UnordFMax(L, R) =    L          iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
+m_UnordFMax(const LHS &L, const RHS &R) {
+  return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
+}
+
+/// Match an 'unordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
+///
+///                          min(L, R)  iff L and R are not NaN
+///  m_UnordFMin(L, R) =     L          iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
+m_UnordFMin(const LHS &L, const RHS &R) {
+  return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for overflow check patterns: e.g. (a + b) u< a
+//
+
+template <typename LHS_t, typename RHS_t, typename Sum_t>
+struct UAddWithOverflow_match {
+  LHS_t L;
+  RHS_t R;
+  Sum_t S;
+
+  UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
+      : L(L), R(R), S(S) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    Value *ICmpLHS, *ICmpRHS;
+    ICmpInst::Predicate Pred;
+    if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
+      return false;
+
+    Value *AddLHS, *AddRHS;
+    auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
+
+    // (a + b) u< a, (a + b) u< b
+    if (Pred == ICmpInst::ICMP_ULT)
+      if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
+        return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
+
+    // a >u (a + b), b >u (a + b)
+    if (Pred == ICmpInst::ICMP_UGT)
+      if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
+        return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
+
+    return false;
+  }
+};
+
+/// Match an icmp instruction checking for unsigned overflow on addition.
+///
+/// S is matched to the addition whose result is being checked for overflow, and
+/// L and R are matched to the LHS and RHS of S.
+template <typename LHS_t, typename RHS_t, typename Sum_t>
+UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
+m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
+  return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
+}
+
+template <typename Opnd_t> struct Argument_match {
+  unsigned OpI;
+  Opnd_t Val;
+
+  Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    CallSite CS(V);
+    return CS.isCall() && Val.match(CS.getArgument(OpI));
+  }
+};
+
+/// Match an argument.
+template <unsigned OpI, typename Opnd_t>
+inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
+  return Argument_match<Opnd_t>(OpI, Op);
+}
+
+/// Intrinsic matchers.
+struct IntrinsicID_match {
+  unsigned ID;
+
+  IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    if (const auto *CI = dyn_cast<CallInst>(V))
+      if (const auto *F = CI->getCalledFunction())
+        return F->getIntrinsicID() == ID;
+    return false;
+  }
+};
+
+/// Intrinsic matches are combinations of ID matchers, and argument
+/// matchers. Higher arity matcher are defined recursively in terms of and-ing
+/// them with lower arity matchers. Here's some convenient typedefs for up to
+/// several arguments, and more can be added as needed
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+          typename T3 = void, typename T4 = void, typename T5 = void,
+          typename T6 = void, typename T7 = void, typename T8 = void,
+          typename T9 = void, typename T10 = void>
+struct m_Intrinsic_Ty;
+template <typename T0> struct m_Intrinsic_Ty<T0> {
+  using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
+};
+template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
+  using Ty =
+      match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
+};
+template <typename T0, typename T1, typename T2>
+struct m_Intrinsic_Ty<T0, T1, T2> {
+  using Ty =
+      match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
+                        Argument_match<T2>>;
+};
+template <typename T0, typename T1, typename T2, typename T3>
+struct m_Intrinsic_Ty<T0, T1, T2, T3> {
+  using Ty =
+      match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
+                        Argument_match<T3>>;
+};
+
+/// Match intrinsic calls like this:
+/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
+template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
+  return IntrinsicID_match(IntrID);
+}
+
+template <Intrinsic::ID IntrID, typename T0>
+inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
+  return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
+}
+
+template <Intrinsic::ID IntrID, typename T0, typename T1>
+inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
+                                                       const T1 &Op1) {
+  return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
+}
+
+template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
+inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
+  return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
+}
+
+template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
+          typename T3>
+inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
+  return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
+}
+
+// Helper intrinsic matching specializations.
+template <typename Opnd0>
+inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
+  return m_Intrinsic<Intrinsic::bitreverse>(Op0);
+}
+
+template <typename Opnd0>
+inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
+  return m_Intrinsic<Intrinsic::bswap>(Op0);
+}
+
+template <typename Opnd0, typename Opnd1>
+inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
+                                                        const Opnd1 &Op1) {
+  return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
+}
+
+template <typename Opnd0, typename Opnd1>
+inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
+                                                        const Opnd1 &Op1) {
+  return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
+}
+
+template <typename Opnd_t> struct Signum_match {
+  Opnd_t Val;
+  Signum_match(const Opnd_t &V) : Val(V) {}
+
+  template <typename OpTy> bool match(OpTy *V) {
+    unsigned TypeSize = V->getType()->getScalarSizeInBits();
+    if (TypeSize == 0)
+      return false;
+
+    unsigned ShiftWidth = TypeSize - 1;
+    Value *OpL = nullptr, *OpR = nullptr;
+
+    // This is the representation of signum we match:
+    //
+    //  signum(x) == (x >> 63) | (-x >>u 63)
+    //
+    // An i1 value is its own signum, so it's correct to match
+    //
+    //  signum(x) == (x >> 0)  | (-x >>u 0)
+    //
+    // for i1 values.
+
+    auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
+    auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
+    auto Signum = m_Or(LHS, RHS);
+
+    return Signum.match(V) && OpL == OpR && Val.match(OpL);
+  }
+};
+
+/// Matches a signum pattern.
+///
+/// signum(x) =
+///      x >  0  ->  1
+///      x == 0  ->  0
+///      x <  0  -> -1
+template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
+  return Signum_match<Val_t>(V);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for two-operands operators with the operators in either order
+//
+
+/// Matches a BinaryOperator with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
+  return AnyBinaryOp_match<LHS, RHS, true>(L, R);
+}
+
+/// Matches an ICmp with a predicate over LHS and RHS in either order.
+/// Does not swap the predicate.
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
+m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+  return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
+                                                                       R);
+}
+
+/// Matches a Add with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
+}
+
+/// Matches a Mul with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
+}
+
+/// Matches an And with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
+}
+
+/// Matches an Or with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
+                                                              const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
+}
+
+/// Matches an Xor with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
+}
+
+/// Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
+template <typename ValTy>
+inline BinaryOp_match<ValTy, cst_pred_ty<is_all_ones>, Instruction::Xor, true>
+m_Not(const ValTy &V) {
+  return m_c_Xor(V, m_AllOnes());
+}
+
+/// Matches an SMin with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
+m_c_SMin(const LHS &L, const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
+}
+/// Matches an SMax with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
+m_c_SMax(const LHS &L, const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
+}
+/// Matches a UMin with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
+m_c_UMin(const LHS &L, const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
+}
+/// Matches a UMax with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
+m_c_UMax(const LHS &L, const RHS &R) {
+  return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
+}
+
+/// Matches FAdd with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true>
+m_c_FAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R);
+}
+
+/// Matches FMul with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FMul, true>
+m_c_FMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R);
+}
+
+} // end namespace PatternMatch
+} // end namespace llvm
+
+#endif // LLVM_IR_PATTERNMATCH_H
diff --git a/linux-x64/clang/include/llvm/IR/PredIteratorCache.h b/linux-x64/clang/include/llvm/IR/PredIteratorCache.h
new file mode 100644
index 0000000..81f5353
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/PredIteratorCache.h
@@ -0,0 +1,81 @@
+//===- PredIteratorCache.h - pred_iterator Cache ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PredIteratorCache class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PREDITERATORCACHE_H
+#define LLVM_IR_PREDITERATORCACHE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+/// PredIteratorCache - This class is an extremely trivial cache for
+/// predecessor iterator queries.  This is useful for code that repeatedly
+/// wants the predecessor list for the same blocks.
+class PredIteratorCache {
+  /// BlockToPredsMap - Pointer to null-terminated list.
+  mutable DenseMap<BasicBlock *, BasicBlock **> BlockToPredsMap;
+  mutable DenseMap<BasicBlock *, unsigned> BlockToPredCountMap;
+
+  /// Memory - This is the space that holds cached preds.
+  BumpPtrAllocator Memory;
+
+private:
+  /// GetPreds - Get a cached list for the null-terminated predecessor list of
+  /// the specified block.  This can be used in a loop like this:
+  ///   for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI)
+  ///      use(*PI);
+  /// instead of:
+  /// for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+  BasicBlock **GetPreds(BasicBlock *BB) {
+    BasicBlock **&Entry = BlockToPredsMap[BB];
+    if (Entry)
+      return Entry;
+
+    SmallVector<BasicBlock *, 32> PredCache(pred_begin(BB), pred_end(BB));
+    PredCache.push_back(nullptr); // null terminator.
+
+    BlockToPredCountMap[BB] = PredCache.size() - 1;
+
+    Entry = Memory.Allocate<BasicBlock *>(PredCache.size());
+    std::copy(PredCache.begin(), PredCache.end(), Entry);
+    return Entry;
+  }
+
+  unsigned GetNumPreds(BasicBlock *BB) const {
+    auto Result = BlockToPredCountMap.find(BB);
+    if (Result != BlockToPredCountMap.end())
+      return Result->second;
+    return BlockToPredCountMap[BB] = std::distance(pred_begin(BB), pred_end(BB));
+  }
+
+public:
+  size_t size(BasicBlock *BB) const { return GetNumPreds(BB); }
+  ArrayRef<BasicBlock *> get(BasicBlock *BB) {
+    return makeArrayRef(GetPreds(BB), GetNumPreds(BB));
+  }
+
+  /// clear - Remove all information.
+  void clear() {
+    BlockToPredsMap.clear();
+    BlockToPredCountMap.clear();
+    Memory.Reset();
+  }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/ProfileSummary.h b/linux-x64/clang/include/llvm/IR/ProfileSummary.h
new file mode 100644
index 0000000..d85ce8c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ProfileSummary.h
@@ -0,0 +1,85 @@
+//===- ProfileSummary.h - Profile summary data structure. -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the profile summary data structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PROFILESUMMARY_H
+#define LLVM_IR_PROFILESUMMARY_H
+
+#include <algorithm>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class LLVMContext;
+class Metadata;
+
+// The profile summary is one or more (Cutoff, MinCount, NumCounts) triplets.
+// The semantics of counts depend on the type of profile. For instrumentation
+// profile, counts are block counts and for sample profile, counts are
+// per-line samples. Given a target counts percentile, we compute the minimum
+// number of counts needed to reach this target and the minimum among these
+// counts.
+struct ProfileSummaryEntry {
+  uint32_t Cutoff;    ///< The required percentile of counts.
+  uint64_t MinCount;  ///< The minimum count for this percentile.
+  uint64_t NumCounts; ///< Number of counts >= the minimum count.
+
+  ProfileSummaryEntry(uint32_t TheCutoff, uint64_t TheMinCount,
+                      uint64_t TheNumCounts)
+      : Cutoff(TheCutoff), MinCount(TheMinCount), NumCounts(TheNumCounts) {}
+};
+
+using SummaryEntryVector = std::vector<ProfileSummaryEntry>;
+
+class ProfileSummary {
+public:
+  enum Kind { PSK_Instr, PSK_Sample };
+
+private:
+  const Kind PSK;
+  static const char *KindStr[2];
+  SummaryEntryVector DetailedSummary;
+  uint64_t TotalCount, MaxCount, MaxInternalCount, MaxFunctionCount;
+  uint32_t NumCounts, NumFunctions;
+  /// \brief Return detailed summary as metadata.
+  Metadata *getDetailedSummaryMD(LLVMContext &Context);
+
+public:
+  static const int Scale = 1000000;
+
+  ProfileSummary(Kind K, SummaryEntryVector DetailedSummary,
+                 uint64_t TotalCount, uint64_t MaxCount,
+                 uint64_t MaxInternalCount, uint64_t MaxFunctionCount,
+                 uint32_t NumCounts, uint32_t NumFunctions)
+      : PSK(K), DetailedSummary(std::move(DetailedSummary)),
+        TotalCount(TotalCount), MaxCount(MaxCount),
+        MaxInternalCount(MaxInternalCount), MaxFunctionCount(MaxFunctionCount),
+        NumCounts(NumCounts), NumFunctions(NumFunctions) {}
+
+  Kind getKind() const { return PSK; }
+  /// \brief Return summary information as metadata.
+  Metadata *getMD(LLVMContext &Context);
+  /// \brief Construct profile summary from metdata.
+  static ProfileSummary *getFromMD(Metadata *MD);
+  SummaryEntryVector &getDetailedSummary() { return DetailedSummary; }
+  uint32_t getNumFunctions() { return NumFunctions; }
+  uint64_t getMaxFunctionCount() { return MaxFunctionCount; }
+  uint32_t getNumCounts() { return NumCounts; }
+  uint64_t getTotalCount() { return TotalCount; }
+  uint64_t getMaxCount() { return MaxCount; }
+  uint64_t getMaxInternalCount() { return MaxInternalCount; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_PROFILESUMMARY_H
diff --git a/linux-x64/clang/include/llvm/IR/SafepointIRVerifier.h b/linux-x64/clang/include/llvm/IR/SafepointIRVerifier.h
new file mode 100644
index 0000000..092050d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/SafepointIRVerifier.h
@@ -0,0 +1,35 @@
+//===- SafepointIRVerifier.h - Checks for GC relocation problems *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a verifier which is useful for enforcing the relocation
+// properties required by a relocating GC.  Specifically, it looks for uses of
+// the unrelocated value of pointer SSA values after a possible safepoint. It
+// attempts to report no false negatives, but may end up reporting false
+// positives in rare cases (see the note at the top of the corresponding cpp
+// file.)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SAFEPOINT_IR_VERIFIER
+#define LLVM_IR_SAFEPOINT_IR_VERIFIER
+
+namespace llvm {
+
+class Function;
+class FunctionPass;
+
+/// Run the safepoint verifier over a single function.  Crashes on failure.
+void verifySafepointIR(Function &F);
+
+/// Create an instance of the safepoint verifier pass which can be added to
+/// a pass pipeline to check for relocation bugs.
+FunctionPass *createSafepointIRVerifierPass();
+}
+
+#endif // LLVM_IR_SAFEPOINT_IR_VERIFIER
diff --git a/linux-x64/clang/include/llvm/IR/Statepoint.h b/linux-x64/clang/include/llvm/IR/Statepoint.h
new file mode 100644
index 0000000..a87f67c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Statepoint.h
@@ -0,0 +1,474 @@
+//===- llvm/IR/Statepoint.h - gc.statepoint utilities -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains utility functions and a wrapper class analogous to
+// CallSite for accessing the fields of gc.statepoint, gc.relocate,
+// gc.result intrinsics; and some general utilities helpful when dealing with
+// gc.statepoint.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_STATEPOINT_H
+#define LLVM_IR_STATEPOINT_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+/// The statepoint intrinsic accepts a set of flags as its third argument.
+/// Valid values come out of this set.
+enum class StatepointFlags {
+  None = 0,
+  GCTransition = 1, ///< Indicates that this statepoint is a transition from
+                    ///< GC-aware code to code that is not GC-aware.
+  /// Mark the deopt arguments associated with the statepoint as only being
+  /// "live-in". By default, deopt arguments are "live-through".  "live-through"
+  /// requires that they the value be live on entry, on exit, and at any point
+  /// during the call.  "live-in" only requires the value be available at the
+  /// start of the call.  In particular, "live-in" values can be placed in
+  /// unused argument registers or other non-callee saved registers.
+  DeoptLiveIn = 2,
+
+  MaskAll = 3 ///< A bitmask that includes all valid flags.
+};
+
+class GCRelocateInst;
+class GCResultInst;
+
+bool isStatepoint(ImmutableCallSite CS);
+bool isStatepoint(const Value *V);
+bool isStatepoint(const Value &V);
+
+bool isGCRelocate(ImmutableCallSite CS);
+bool isGCRelocate(const Value *V);
+
+bool isGCResult(ImmutableCallSite CS);
+bool isGCResult(const Value *V);
+
+/// Analogous to CallSiteBase, this provides most of the actual
+/// functionality for Statepoint and ImmutableStatepoint.  It is
+/// templatized to allow easily specializing of const and non-const
+/// concrete subtypes.  This is structured analogous to CallSite
+/// rather than the IntrinsicInst.h helpers since we need to support
+/// invokable statepoints.
+template <typename FunTy, typename InstructionTy, typename ValueTy,
+          typename CallSiteTy>
+class StatepointBase {
+  CallSiteTy StatepointCS;
+
+protected:
+  explicit StatepointBase(InstructionTy *I) {
+    if (isStatepoint(I)) {
+      StatepointCS = CallSiteTy(I);
+      assert(StatepointCS && "isStatepoint implies CallSite");
+    }
+  }
+
+  explicit StatepointBase(CallSiteTy CS) {
+    if (isStatepoint(CS))
+      StatepointCS = CS;
+  }
+
+public:
+  using arg_iterator = typename CallSiteTy::arg_iterator;
+
+  enum {
+    IDPos = 0,
+    NumPatchBytesPos = 1,
+    CalledFunctionPos = 2,
+    NumCallArgsPos = 3,
+    FlagsPos = 4,
+    CallArgsBeginPos = 5,
+  };
+
+  void *operator new(size_t, unsigned) = delete;
+  void *operator new(size_t s) = delete;
+
+  explicit operator bool() const {
+    // We do not assign non-statepoint CallSites to StatepointCS.
+    return (bool)StatepointCS;
+  }
+
+  /// Return the underlying CallSite.
+  CallSiteTy getCallSite() const {
+    assert(*this && "check validity first!");
+    return StatepointCS;
+  }
+
+  uint64_t getFlags() const {
+    return cast<ConstantInt>(getCallSite().getArgument(FlagsPos))
+        ->getZExtValue();
+  }
+
+  /// Return the ID associated with this statepoint.
+  uint64_t getID() const {
+    const Value *IDVal = getCallSite().getArgument(IDPos);
+    return cast<ConstantInt>(IDVal)->getZExtValue();
+  }
+
+  /// Return the number of patchable bytes associated with this statepoint.
+  uint32_t getNumPatchBytes() const {
+    const Value *NumPatchBytesVal = getCallSite().getArgument(NumPatchBytesPos);
+    uint64_t NumPatchBytes =
+      cast<ConstantInt>(NumPatchBytesVal)->getZExtValue();
+    assert(isInt<32>(NumPatchBytes) && "should fit in 32 bits!");
+    return NumPatchBytes;
+  }
+
+  /// Return the value actually being called or invoked.
+  ValueTy *getCalledValue() const {
+    return getCallSite().getArgument(CalledFunctionPos);
+  }
+
+  InstructionTy *getInstruction() const {
+    return getCallSite().getInstruction();
+  }
+
+  /// Return the function being called if this is a direct call, otherwise
+  /// return null (if it's an indirect call).
+  FunTy *getCalledFunction() const {
+    return dyn_cast<Function>(getCalledValue());
+  }
+
+  /// Return the caller function for this statepoint.
+  FunTy *getCaller() const { return getCallSite().getCaller(); }
+
+  /// Determine if the statepoint cannot unwind.
+  bool doesNotThrow() const {
+    Function *F = getCalledFunction();
+    return getCallSite().doesNotThrow() || (F ? F->doesNotThrow() : false);
+  }
+
+  /// Return the type of the value returned by the call underlying the
+  /// statepoint.
+  Type *getActualReturnType() const {
+    auto *FTy = cast<FunctionType>(
+        cast<PointerType>(getCalledValue()->getType())->getElementType());
+    return FTy->getReturnType();
+  }
+
+  /// Number of arguments to be passed to the actual callee.
+  int getNumCallArgs() const {
+    const Value *NumCallArgsVal = getCallSite().getArgument(NumCallArgsPos);
+    return cast<ConstantInt>(NumCallArgsVal)->getZExtValue();
+  }
+
+  size_t arg_size() const { return getNumCallArgs(); }
+  typename CallSiteTy::arg_iterator arg_begin() const {
+    assert(CallArgsBeginPos <= (int)getCallSite().arg_size());
+    return getCallSite().arg_begin() + CallArgsBeginPos;
+  }
+  typename CallSiteTy::arg_iterator arg_end() const {
+    auto I = arg_begin() + arg_size();
+    assert((getCallSite().arg_end() - I) >= 0);
+    return I;
+  }
+
+  ValueTy *getArgument(unsigned Index) {
+    assert(Index < arg_size() && "out of bounds!");
+    return *(arg_begin() + Index);
+  }
+
+  /// range adapter for call arguments
+  iterator_range<arg_iterator> call_args() const {
+    return make_range(arg_begin(), arg_end());
+  }
+
+  /// \brief Return true if the call or the callee has the given attribute.
+  bool paramHasAttr(unsigned i, Attribute::AttrKind A) const {
+    Function *F = getCalledFunction();
+    return getCallSite().paramHasAttr(i + CallArgsBeginPos, A) ||
+          (F ? F->getAttributes().hasAttribute(i, A) : false);
+  }
+
+  /// Number of GC transition args.
+  int getNumTotalGCTransitionArgs() const {
+    const Value *NumGCTransitionArgs = *arg_end();
+    return cast<ConstantInt>(NumGCTransitionArgs)->getZExtValue();
+  }
+  typename CallSiteTy::arg_iterator gc_transition_args_begin() const {
+    auto I = arg_end() + 1;
+    assert((getCallSite().arg_end() - I) >= 0);
+    return I;
+  }
+  typename CallSiteTy::arg_iterator gc_transition_args_end() const {
+    auto I = gc_transition_args_begin() + getNumTotalGCTransitionArgs();
+    assert((getCallSite().arg_end() - I) >= 0);
+    return I;
+  }
+
+  /// range adapter for GC transition arguments
+  iterator_range<arg_iterator> gc_transition_args() const {
+    return make_range(gc_transition_args_begin(), gc_transition_args_end());
+  }
+
+  /// Number of additional arguments excluding those intended
+  /// for garbage collection.
+  int getNumTotalVMSArgs() const {
+    const Value *NumVMSArgs = *gc_transition_args_end();
+    return cast<ConstantInt>(NumVMSArgs)->getZExtValue();
+  }
+
+  typename CallSiteTy::arg_iterator deopt_begin() const {
+    auto I = gc_transition_args_end() + 1;
+    assert((getCallSite().arg_end() - I) >= 0);
+    return I;
+  }
+  typename CallSiteTy::arg_iterator deopt_end() const {
+    auto I = deopt_begin() + getNumTotalVMSArgs();
+    assert((getCallSite().arg_end() - I) >= 0);
+    return I;
+  }
+
+  /// range adapter for vm state arguments
+  iterator_range<arg_iterator> deopt_operands() const {
+    return make_range(deopt_begin(), deopt_end());
+  }
+
+  typename CallSiteTy::arg_iterator gc_args_begin() const {
+    return deopt_end();
+  }
+  typename CallSiteTy::arg_iterator gc_args_end() const {
+    return getCallSite().arg_end();
+  }
+
+  unsigned gcArgsStartIdx() const {
+    return gc_args_begin() - getInstruction()->op_begin();
+  }
+
+  /// range adapter for gc arguments
+  iterator_range<arg_iterator> gc_args() const {
+    return make_range(gc_args_begin(), gc_args_end());
+  }
+
+  /// Get list of all gc reloactes linked to this statepoint
+  /// May contain several relocations for the same base/derived pair.
+  /// For example this could happen due to relocations on unwinding
+  /// path of invoke.
+  std::vector<const GCRelocateInst *> getRelocates() const;
+
+  /// Get the experimental_gc_result call tied to this statepoint.  Can be
+  /// nullptr if there isn't a gc_result tied to this statepoint.  Guaranteed to
+  /// be a CallInst if non-null.
+  const GCResultInst *getGCResult() const {
+    for (auto *U : getInstruction()->users())
+      if (auto *GRI = dyn_cast<GCResultInst>(U))
+        return GRI;
+    return nullptr;
+  }
+
+#ifndef NDEBUG
+  /// Asserts if this statepoint is malformed.  Common cases for failure
+  /// include incorrect length prefixes for variable length sections or
+  /// illegal values for parameters.
+  void verify() {
+    assert(getNumCallArgs() >= 0 &&
+           "number of arguments to actually callee can't be negative");
+
+    // The internal asserts in the iterator accessors do the rest.
+    (void)arg_begin();
+    (void)arg_end();
+    (void)gc_transition_args_begin();
+    (void)gc_transition_args_end();
+    (void)deopt_begin();
+    (void)deopt_end();
+    (void)gc_args_begin();
+    (void)gc_args_end();
+  }
+#endif
+};
+
+/// A specialization of it's base class for read only access
+/// to a gc.statepoint.
+class ImmutableStatepoint
+    : public StatepointBase<const Function, const Instruction, const Value,
+                            ImmutableCallSite> {
+  using Base =
+      StatepointBase<const Function, const Instruction, const Value,
+                     ImmutableCallSite>;
+
+public:
+  explicit ImmutableStatepoint(const Instruction *I) : Base(I) {}
+  explicit ImmutableStatepoint(ImmutableCallSite CS) : Base(CS) {}
+};
+
+/// A specialization of it's base class for read-write access
+/// to a gc.statepoint.
+class Statepoint
+    : public StatepointBase<Function, Instruction, Value, CallSite> {
+  using Base = StatepointBase<Function, Instruction, Value, CallSite>;
+
+public:
+  explicit Statepoint(Instruction *I) : Base(I) {}
+  explicit Statepoint(CallSite CS) : Base(CS) {}
+};
+
+/// Common base class for representing values projected from a statepoint.  
+/// Currently, the only projections available are gc.result and gc.relocate.
+class GCProjectionInst : public IntrinsicInst {
+public:
+  static bool classof(const IntrinsicInst *I) {
+    return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate ||
+      I->getIntrinsicID() == Intrinsic::experimental_gc_result;
+  }
+
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+
+  /// Return true if this relocate is tied to the invoke statepoint.
+  /// This includes relocates which are on the unwinding path.
+  bool isTiedToInvoke() const {
+    const Value *Token = getArgOperand(0);
+
+    return isa<LandingPadInst>(Token) || isa<InvokeInst>(Token);
+  }
+
+  /// The statepoint with which this gc.relocate is associated.
+  const Instruction *getStatepoint() const {
+    const Value *Token = getArgOperand(0);
+
+    // This takes care both of relocates for call statepoints and relocates
+    // on normal path of invoke statepoint.
+    if (!isa<LandingPadInst>(Token)) {
+      assert(isStatepoint(Token));
+      return cast<Instruction>(Token);
+    }
+
+    // This relocate is on exceptional path of an invoke statepoint
+    const BasicBlock *InvokeBB =
+        cast<Instruction>(Token)->getParent()->getUniquePredecessor();
+
+    assert(InvokeBB && "safepoints should have unique landingpads");
+    assert(InvokeBB->getTerminator() &&
+           "safepoint block should be well formed");
+    assert(isStatepoint(InvokeBB->getTerminator()));
+
+    return InvokeBB->getTerminator();
+  }
+};
+
+/// Represents calls to the gc.relocate intrinsic.
+class GCRelocateInst : public GCProjectionInst {
+public:
+  static bool classof(const IntrinsicInst *I) {
+    return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate;
+  }
+
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+
+  /// The index into the associate statepoint's argument list
+  /// which contains the base pointer of the pointer whose
+  /// relocation this gc.relocate describes.
+  unsigned getBasePtrIndex() const {
+    return cast<ConstantInt>(getArgOperand(1))->getZExtValue();
+  }
+
+  /// The index into the associate statepoint's argument list which
+  /// contains the pointer whose relocation this gc.relocate describes.
+  unsigned getDerivedPtrIndex() const {
+    return cast<ConstantInt>(getArgOperand(2))->getZExtValue();
+  }
+
+  Value *getBasePtr() const {
+    ImmutableCallSite CS(getStatepoint());
+    return *(CS.arg_begin() + getBasePtrIndex());
+  }
+
+  Value *getDerivedPtr() const {
+    ImmutableCallSite CS(getStatepoint());
+    return *(CS.arg_begin() + getDerivedPtrIndex());
+  }
+};
+
+/// Represents calls to the gc.result intrinsic.
+class GCResultInst : public GCProjectionInst {
+public:
+  static bool classof(const IntrinsicInst *I) {
+    return I->getIntrinsicID() == Intrinsic::experimental_gc_result;
+  }
+
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
+template <typename FunTy, typename InstructionTy, typename ValueTy,
+          typename CallSiteTy>
+std::vector<const GCRelocateInst *>
+StatepointBase<FunTy, InstructionTy, ValueTy, CallSiteTy>::getRelocates()
+    const {
+
+  std::vector<const GCRelocateInst *> Result;
+
+  CallSiteTy StatepointCS = getCallSite();
+
+  // Search for relocated pointers.  Note that working backwards from the
+  // gc_relocates ensures that we only get pairs which are actually relocated
+  // and used after the statepoint.
+  for (const User *U : getInstruction()->users())
+    if (auto *Relocate = dyn_cast<GCRelocateInst>(U))
+      Result.push_back(Relocate);
+
+  if (!StatepointCS.isInvoke())
+    return Result;
+
+  // We need to scan thorough exceptional relocations if it is invoke statepoint
+  LandingPadInst *LandingPad =
+      cast<InvokeInst>(getInstruction())->getLandingPadInst();
+
+  // Search for gc relocates that are attached to this landingpad.
+  for (const User *LandingPadUser : LandingPad->users()) {
+    if (auto *Relocate = dyn_cast<GCRelocateInst>(LandingPadUser))
+      Result.push_back(Relocate);
+  }
+  return Result;
+}
+
+/// Call sites that get wrapped by a gc.statepoint (currently only in
+/// RewriteStatepointsForGC and potentially in other passes in the future) can
+/// have attributes that describe properties of gc.statepoint call they will be
+/// eventually be wrapped in.  This struct is used represent such directives.
+struct StatepointDirectives {
+  Optional<uint32_t> NumPatchBytes;
+  Optional<uint64_t> StatepointID;
+
+  static const uint64_t DefaultStatepointID = 0xABCDEF00;
+  static const uint64_t DeoptBundleStatepointID = 0xABCDEF0F;
+};
+
+/// Parse out statepoint directives from the function attributes present in \p
+/// AS.
+StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeList AS);
+
+/// Return \c true if the \p Attr is an attribute that is a statepoint
+/// directive.
+bool isStatepointDirectiveAttr(Attribute Attr);
+
+} // end namespace llvm
+
+#endif // LLVM_IR_STATEPOINT_H
diff --git a/linux-x64/clang/include/llvm/IR/SymbolTableListTraits.h b/linux-x64/clang/include/llvm/IR/SymbolTableListTraits.h
new file mode 100644
index 0000000..87ce902
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/SymbolTableListTraits.h
@@ -0,0 +1,118 @@
+//===- llvm/SymbolTableListTraits.h - Traits for iplist ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic class that is used to implement the automatic
+// symbol table manipulation that occurs when you put (for example) a named
+// instruction into a basic block.
+//
+// The way that this is implemented is by using a special traits class with the
+// intrusive list that makes up the list of instructions in a basic block.  When
+// a new element is added to the list of instructions, the traits class is
+// notified, allowing the symbol table to be updated.
+//
+// This generic class implements the traits class.  It must be generic so that
+// it can work for all uses it, which include lists of instructions, basic
+// blocks, arguments, functions, global variables, etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SYMBOLTABLELISTTRAITS_H
+#define LLVM_IR_SYMBOLTABLELISTTRAITS_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/simple_ilist.h"
+#include <cstddef>
+
+namespace llvm {
+
+class Argument;
+class BasicBlock;
+class Function;
+class GlobalAlias;
+class GlobalIFunc;
+class GlobalVariable;
+class Instruction;
+class Module;
+class ValueSymbolTable;
+
+/// Template metafunction to get the parent type for a symbol table list.
+///
+/// Implementations create a typedef called \c type so that we only need a
+/// single template parameter for the list and traits.
+template <typename NodeTy> struct SymbolTableListParentType {};
+
+#define DEFINE_SYMBOL_TABLE_PARENT_TYPE(NODE, PARENT)                          \
+  template <> struct SymbolTableListParentType<NODE> { using type = PARENT; };
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(Instruction, BasicBlock)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(BasicBlock, Function)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(Argument, Function)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(Function, Module)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalVariable, Module)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalAlias, Module)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalIFunc, Module)
+#undef DEFINE_SYMBOL_TABLE_PARENT_TYPE
+
+template <typename NodeTy> class SymbolTableList;
+
+// ValueSubClass   - The type of objects that I hold, e.g. Instruction.
+// ItemParentClass - The type of object that owns the list, e.g. BasicBlock.
+//
+template <typename ValueSubClass>
+class SymbolTableListTraits : public ilist_alloc_traits<ValueSubClass> {
+  using ListTy = SymbolTableList<ValueSubClass>;
+  using iterator = typename simple_ilist<ValueSubClass>::iterator;
+  using ItemParentClass =
+      typename SymbolTableListParentType<ValueSubClass>::type;
+
+public:
+  SymbolTableListTraits() = default;
+
+private:
+  /// getListOwner - Return the object that owns this list.  If this is a list
+  /// of instructions, it returns the BasicBlock that owns them.
+  ItemParentClass *getListOwner() {
+    size_t Offset(size_t(&((ItemParentClass*)nullptr->*ItemParentClass::
+                           getSublistAccess(static_cast<ValueSubClass*>(nullptr)))));
+    ListTy *Anchor(static_cast<ListTy *>(this));
+    return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
+                                              Offset);
+  }
+
+  static ListTy &getList(ItemParentClass *Par) {
+    return Par->*(Par->getSublistAccess((ValueSubClass*)nullptr));
+  }
+
+  static ValueSymbolTable *getSymTab(ItemParentClass *Par) {
+    return Par ? toPtr(Par->getValueSymbolTable()) : nullptr;
+  }
+
+public:
+  void addNodeToList(ValueSubClass *V);
+  void removeNodeFromList(ValueSubClass *V);
+  void transferNodesFromList(SymbolTableListTraits &L2, iterator first,
+                             iterator last);
+  // private:
+  template<typename TPtr>
+  void setSymTabObject(TPtr *, TPtr);
+  static ValueSymbolTable *toPtr(ValueSymbolTable *P) { return P; }
+  static ValueSymbolTable *toPtr(ValueSymbolTable &R) { return &R; }
+};
+
+/// List that automatically updates parent links and symbol tables.
+///
+/// When nodes are inserted into and removed from this list, the associated
+/// symbol table will be automatically updated.  Similarly, parent links get
+/// updated automatically.
+template <class T>
+class SymbolTableList
+    : public iplist_impl<simple_ilist<T>, SymbolTableListTraits<T>> {};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_SYMBOLTABLELISTTRAITS_H
diff --git a/linux-x64/clang/include/llvm/IR/TrackingMDRef.h b/linux-x64/clang/include/llvm/IR/TrackingMDRef.h
new file mode 100644
index 0000000..bdec904
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/TrackingMDRef.h
@@ -0,0 +1,178 @@
+//===- llvm/IR/TrackingMDRef.h - Tracking Metadata references ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// References to metadata that track RAUW.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TRACKINGMDREF_H
+#define LLVM_IR_TRACKINGMDREF_H
+
+#include "llvm/IR/Metadata.h"
+#include <algorithm>
+#include <cassert>
+
+namespace llvm {
+
+/// \brief Tracking metadata reference.
+///
+/// This class behaves like \a TrackingVH, but for metadata.
+class TrackingMDRef {
+  Metadata *MD = nullptr;
+
+public:
+  TrackingMDRef() = default;
+  explicit TrackingMDRef(Metadata *MD) : MD(MD) { track(); }
+
+  TrackingMDRef(TrackingMDRef &&X) : MD(X.MD) { retrack(X); }
+  TrackingMDRef(const TrackingMDRef &X) : MD(X.MD) { track(); }
+
+  TrackingMDRef &operator=(TrackingMDRef &&X) {
+    if (&X == this)
+      return *this;
+
+    untrack();
+    MD = X.MD;
+    retrack(X);
+    return *this;
+  }
+
+  TrackingMDRef &operator=(const TrackingMDRef &X) {
+    if (&X == this)
+      return *this;
+
+    untrack();
+    MD = X.MD;
+    track();
+    return *this;
+  }
+
+  ~TrackingMDRef() { untrack(); }
+
+  Metadata *get() const { return MD; }
+  operator Metadata *() const { return get(); }
+  Metadata *operator->() const { return get(); }
+  Metadata &operator*() const { return *get(); }
+
+  void reset() {
+    untrack();
+    MD = nullptr;
+  }
+  void reset(Metadata *MD) {
+    untrack();
+    this->MD = MD;
+    track();
+  }
+
+  /// \brief Check whether this has a trivial destructor.
+  ///
+  /// If \c MD isn't replaceable, the destructor will be a no-op.
+  bool hasTrivialDestructor() const {
+    return !MD || !MetadataTracking::isReplaceable(*MD);
+  }
+
+  bool operator==(const TrackingMDRef &X) const { return MD == X.MD; }
+  bool operator!=(const TrackingMDRef &X) const { return MD != X.MD; }
+
+private:
+  void track() {
+    if (MD)
+      MetadataTracking::track(MD);
+  }
+
+  void untrack() {
+    if (MD)
+      MetadataTracking::untrack(MD);
+  }
+
+  void retrack(TrackingMDRef &X) {
+    assert(MD == X.MD && "Expected values to match");
+    if (X.MD) {
+      MetadataTracking::retrack(X.MD, MD);
+      X.MD = nullptr;
+    }
+  }
+};
+
+/// \brief Typed tracking ref.
+///
+/// Track refererences of a particular type.  It's useful to use this for \a
+/// MDNode and \a ValueAsMetadata.
+template <class T> class TypedTrackingMDRef {
+  TrackingMDRef Ref;
+
+public:
+  TypedTrackingMDRef() = default;
+  explicit TypedTrackingMDRef(T *MD) : Ref(static_cast<Metadata *>(MD)) {}
+
+  TypedTrackingMDRef(TypedTrackingMDRef &&X) : Ref(std::move(X.Ref)) {}
+  TypedTrackingMDRef(const TypedTrackingMDRef &X) : Ref(X.Ref) {}
+
+  TypedTrackingMDRef &operator=(TypedTrackingMDRef &&X) {
+    Ref = std::move(X.Ref);
+    return *this;
+  }
+
+  TypedTrackingMDRef &operator=(const TypedTrackingMDRef &X) {
+    Ref = X.Ref;
+    return *this;
+  }
+
+  T *get() const { return (T *)Ref.get(); }
+  operator T *() const { return get(); }
+  T *operator->() const { return get(); }
+  T &operator*() const { return *get(); }
+
+  bool operator==(const TypedTrackingMDRef &X) const { return Ref == X.Ref; }
+  bool operator!=(const TypedTrackingMDRef &X) const { return Ref != X.Ref; }
+
+  void reset() { Ref.reset(); }
+  void reset(T *MD) { Ref.reset(static_cast<Metadata *>(MD)); }
+
+  /// \brief Check whether this has a trivial destructor.
+  bool hasTrivialDestructor() const { return Ref.hasTrivialDestructor(); }
+};
+
+using TrackingMDNodeRef = TypedTrackingMDRef<MDNode>;
+using TrackingValueAsMetadataRef = TypedTrackingMDRef<ValueAsMetadata>;
+
+// Expose the underlying metadata to casting.
+template <> struct simplify_type<TrackingMDRef> {
+  using SimpleType = Metadata *;
+
+  static SimpleType getSimplifiedValue(TrackingMDRef &MD) { return MD.get(); }
+};
+
+template <> struct simplify_type<const TrackingMDRef> {
+  using SimpleType = Metadata *;
+
+  static SimpleType getSimplifiedValue(const TrackingMDRef &MD) {
+    return MD.get();
+  }
+};
+
+template <class T> struct simplify_type<TypedTrackingMDRef<T>> {
+  using SimpleType = T *;
+
+  static SimpleType getSimplifiedValue(TypedTrackingMDRef<T> &MD) {
+    return MD.get();
+  }
+};
+
+template <class T> struct simplify_type<const TypedTrackingMDRef<T>> {
+  using SimpleType = T *;
+
+  static SimpleType getSimplifiedValue(const TypedTrackingMDRef<T> &MD) {
+    return MD.get();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_TRACKINGMDREF_H
diff --git a/linux-x64/clang/include/llvm/IR/Type.h b/linux-x64/clang/include/llvm/IR/Type.h
new file mode 100644
index 0000000..39a7976
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Type.h
@@ -0,0 +1,504 @@
+//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Type class.  For more "Type"
+// stuff, look in DerivedTypes.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TYPE_H
+#define LLVM_IR_TYPE_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+
+namespace llvm {
+
+template<class GraphType> struct GraphTraits;
+class IntegerType;
+class LLVMContext;
+class PointerType;
+class raw_ostream;
+class StringRef;
+
+/// The instances of the Type class are immutable: once they are created,
+/// they are never changed.  Also note that only one instance of a particular
+/// type is ever created.  Thus seeing if two types are equal is a matter of
+/// doing a trivial pointer comparison. To enforce that no two equal instances
+/// are created, Type instances can only be created via static factory methods
+/// in class Type and in derived classes.  Once allocated, Types are never
+/// free'd.
+///
+class Type {
+public:
+  //===--------------------------------------------------------------------===//
+  /// Definitions of all of the base types for the Type system.  Based on this
+  /// value, you can cast to a class defined in DerivedTypes.h.
+  /// Note: If you add an element to this, you need to add an element to the
+  /// Type::getPrimitiveType function, or else things will break!
+  /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
+  ///
+  enum TypeID {
+    // PrimitiveTypes - make sure LastPrimitiveTyID stays up to date.
+    VoidTyID = 0,    ///<  0: type with no size
+    HalfTyID,        ///<  1: 16-bit floating point type
+    FloatTyID,       ///<  2: 32-bit floating point type
+    DoubleTyID,      ///<  3: 64-bit floating point type
+    X86_FP80TyID,    ///<  4: 80-bit floating point type (X87)
+    FP128TyID,       ///<  5: 128-bit floating point type (112-bit mantissa)
+    PPC_FP128TyID,   ///<  6: 128-bit floating point type (two 64-bits, PowerPC)
+    LabelTyID,       ///<  7: Labels
+    MetadataTyID,    ///<  8: Metadata
+    X86_MMXTyID,     ///<  9: MMX vectors (64 bits, X86 specific)
+    TokenTyID,       ///< 10: Tokens
+
+    // Derived types... see DerivedTypes.h file.
+    // Make sure FirstDerivedTyID stays up to date!
+    IntegerTyID,     ///< 11: Arbitrary bit width integers
+    FunctionTyID,    ///< 12: Functions
+    StructTyID,      ///< 13: Structures
+    ArrayTyID,       ///< 14: Arrays
+    PointerTyID,     ///< 15: Pointers
+    VectorTyID       ///< 16: SIMD 'packed' format, or other vector type
+  };
+
+private:
+  /// This refers to the LLVMContext in which this type was uniqued.
+  LLVMContext &Context;
+
+  TypeID   ID : 8;            // The current base type of this type.
+  unsigned SubclassData : 24; // Space for subclasses to store data.
+                              // Note that this should be synchronized with
+                              // MAX_INT_BITS value in IntegerType class.
+
+protected:
+  friend class LLVMContextImpl;
+
+  explicit Type(LLVMContext &C, TypeID tid)
+    : Context(C), ID(tid), SubclassData(0) {}
+  ~Type() = default;
+
+  unsigned getSubclassData() const { return SubclassData; }
+
+  void setSubclassData(unsigned val) {
+    SubclassData = val;
+    // Ensure we don't have any accidental truncation.
+    assert(getSubclassData() == val && "Subclass data too large for field");
+  }
+
+  /// Keeps track of how many Type*'s there are in the ContainedTys list.
+  unsigned NumContainedTys = 0;
+
+  /// A pointer to the array of Types contained by this Type. For example, this
+  /// includes the arguments of a function type, the elements of a structure,
+  /// the pointee of a pointer, the element type of an array, etc. This pointer
+  /// may be 0 for types that don't contain other types (Integer, Double,
+  /// Float).
+  Type * const *ContainedTys = nullptr;
+
+  static bool isSequentialType(TypeID TyID) {
+    return TyID == ArrayTyID || TyID == VectorTyID;
+  }
+
+public:
+  /// Print the current type.
+  /// Omit the type details if \p NoDetails == true.
+  /// E.g., let %st = type { i32, i16 }
+  /// When \p NoDetails is true, we only print %st.
+  /// Put differently, \p NoDetails prints the type as if
+  /// inlined with the operands when printing an instruction.
+  void print(raw_ostream &O, bool IsForDebug = false,
+             bool NoDetails = false) const;
+
+  void dump() const;
+
+  /// Return the LLVMContext in which this type was uniqued.
+  LLVMContext &getContext() const { return Context; }
+
+  //===--------------------------------------------------------------------===//
+  // Accessors for working with types.
+  //
+
+  /// Return the type id for the type. This will return one of the TypeID enum
+  /// elements defined above.
+  TypeID getTypeID() const { return ID; }
+
+  /// Return true if this is 'void'.
+  bool isVoidTy() const { return getTypeID() == VoidTyID; }
+
+  /// Return true if this is 'half', a 16-bit IEEE fp type.
+  bool isHalfTy() const { return getTypeID() == HalfTyID; }
+
+  /// Return true if this is 'float', a 32-bit IEEE fp type.
+  bool isFloatTy() const { return getTypeID() == FloatTyID; }
+
+  /// Return true if this is 'double', a 64-bit IEEE fp type.
+  bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
+
+  /// Return true if this is x86 long double.
+  bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
+
+  /// Return true if this is 'fp128'.
+  bool isFP128Ty() const { return getTypeID() == FP128TyID; }
+
+  /// Return true if this is powerpc long double.
+  bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
+
+  /// Return true if this is one of the six floating-point types
+  bool isFloatingPointTy() const {
+    return getTypeID() == HalfTyID || getTypeID() == FloatTyID ||
+           getTypeID() == DoubleTyID ||
+           getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
+           getTypeID() == PPC_FP128TyID;
+  }
+
+  const fltSemantics &getFltSemantics() const {
+    switch (getTypeID()) {
+    case HalfTyID: return APFloat::IEEEhalf();
+    case FloatTyID: return APFloat::IEEEsingle();
+    case DoubleTyID: return APFloat::IEEEdouble();
+    case X86_FP80TyID: return APFloat::x87DoubleExtended();
+    case FP128TyID: return APFloat::IEEEquad();
+    case PPC_FP128TyID: return APFloat::PPCDoubleDouble();
+    default: llvm_unreachable("Invalid floating type");
+    }
+  }
+
+  /// Return true if this is X86 MMX.
+  bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
+
+  /// Return true if this is a FP type or a vector of FP.
+  bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
+
+  /// Return true if this is 'label'.
+  bool isLabelTy() const { return getTypeID() == LabelTyID; }
+
+  /// Return true if this is 'metadata'.
+  bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
+
+  /// Return true if this is 'token'.
+  bool isTokenTy() const { return getTypeID() == TokenTyID; }
+
+  /// True if this is an instance of IntegerType.
+  bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
+
+  /// Return true if this is an IntegerType of the given width.
+  bool isIntegerTy(unsigned Bitwidth) const;
+
+  /// Return true if this is an integer type or a vector of integer types.
+  bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
+
+  /// Return true if this is an integer type or a vector of integer types of
+  /// the given width.
+  bool isIntOrIntVectorTy(unsigned BitWidth) const {
+    return getScalarType()->isIntegerTy(BitWidth);
+  }
+
+  /// True if this is an instance of FunctionType.
+  bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
+
+  /// True if this is an instance of StructType.
+  bool isStructTy() const { return getTypeID() == StructTyID; }
+
+  /// True if this is an instance of ArrayType.
+  bool isArrayTy() const { return getTypeID() == ArrayTyID; }
+
+  /// True if this is an instance of PointerType.
+  bool isPointerTy() const { return getTypeID() == PointerTyID; }
+
+  /// Return true if this is a pointer type or a vector of pointer types.
+  bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
+
+  /// True if this is an instance of VectorType.
+  bool isVectorTy() const { return getTypeID() == VectorTyID; }
+
+  /// Return true if this type could be converted with a lossless BitCast to
+  /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
+  /// same size only where no re-interpretation of the bits is done.
+  /// @brief Determine if this type could be losslessly bitcast to Ty
+  bool canLosslesslyBitCastTo(Type *Ty) const;
+
+  /// Return true if this type is empty, that is, it has no elements or all of
+  /// its elements are empty.
+  bool isEmptyTy() const;
+
+  /// Return true if the type is "first class", meaning it is a valid type for a
+  /// Value.
+  bool isFirstClassType() const {
+    return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
+  }
+
+  /// Return true if the type is a valid type for a register in codegen. This
+  /// includes all first-class types except struct and array types.
+  bool isSingleValueType() const {
+    return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
+           isPointerTy() || isVectorTy();
+  }
+
+  /// Return true if the type is an aggregate type. This means it is valid as
+  /// the first operand of an insertvalue or extractvalue instruction. This
+  /// includes struct and array types, but does not include vector types.
+  bool isAggregateType() const {
+    return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
+  }
+
+  /// Return true if it makes sense to take the size of this type. To get the
+  /// actual size for a particular target, it is reasonable to use the
+  /// DataLayout subsystem to do this.
+  bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
+    // If it's a primitive, it is always sized.
+    if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
+        getTypeID() == PointerTyID ||
+        getTypeID() == X86_MMXTyID)
+      return true;
+    // If it is not something that can have a size (e.g. a function or label),
+    // it doesn't have a size.
+    if (getTypeID() != StructTyID && getTypeID() != ArrayTyID &&
+        getTypeID() != VectorTyID)
+      return false;
+    // Otherwise we have to try harder to decide.
+    return isSizedDerivedType(Visited);
+  }
+
+  /// Return the basic size of this type if it is a primitive type. These are
+  /// fixed by LLVM and are not target-dependent.
+  /// This will return zero if the type does not have a size or is not a
+  /// primitive type.
+  ///
+  /// Note that this may not reflect the size of memory allocated for an
+  /// instance of the type or the number of bytes that are written when an
+  /// instance of the type is stored to memory. The DataLayout class provides
+  /// additional query functions to provide this information.
+  ///
+  unsigned getPrimitiveSizeInBits() const LLVM_READONLY;
+
+  /// If this is a vector type, return the getPrimitiveSizeInBits value for the
+  /// element type. Otherwise return the getPrimitiveSizeInBits value for this
+  /// type.
+  unsigned getScalarSizeInBits() const LLVM_READONLY;
+
+  /// Return the width of the mantissa of this type. This is only valid on
+  /// floating-point types. If the FP type does not have a stable mantissa (e.g.
+  /// ppc long double), this method returns -1.
+  int getFPMantissaWidth() const;
+
+  /// If this is a vector type, return the element type, otherwise return
+  /// 'this'.
+  Type *getScalarType() const {
+    if (isVectorTy())
+      return getVectorElementType();
+    return const_cast<Type*>(this);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Type Iteration support.
+  //
+  using subtype_iterator = Type * const *;
+
+  subtype_iterator subtype_begin() const { return ContainedTys; }
+  subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
+  ArrayRef<Type*> subtypes() const {
+    return makeArrayRef(subtype_begin(), subtype_end());
+  }
+
+  using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;
+
+  subtype_reverse_iterator subtype_rbegin() const {
+    return subtype_reverse_iterator(subtype_end());
+  }
+  subtype_reverse_iterator subtype_rend() const {
+    return subtype_reverse_iterator(subtype_begin());
+  }
+
+  /// This method is used to implement the type iterator (defined at the end of
+  /// the file). For derived types, this returns the types 'contained' in the
+  /// derived type.
+  Type *getContainedType(unsigned i) const {
+    assert(i < NumContainedTys && "Index out of range!");
+    return ContainedTys[i];
+  }
+
+  /// Return the number of types in the derived type.
+  unsigned getNumContainedTypes() const { return NumContainedTys; }
+
+  //===--------------------------------------------------------------------===//
+  // Helper methods corresponding to subclass methods.  This forces a cast to
+  // the specified subclass and calls its accessor.  "getVectorNumElements" (for
+  // example) is shorthand for cast<VectorType>(Ty)->getNumElements().  This is
+  // only intended to cover the core methods that are frequently used, helper
+  // methods should not be added here.
+
+  inline unsigned getIntegerBitWidth() const;
+
+  inline Type *getFunctionParamType(unsigned i) const;
+  inline unsigned getFunctionNumParams() const;
+  inline bool isFunctionVarArg() const;
+
+  inline StringRef getStructName() const;
+  inline unsigned getStructNumElements() const;
+  inline Type *getStructElementType(unsigned N) const;
+
+  inline Type *getSequentialElementType() const {
+    assert(isSequentialType(getTypeID()) && "Not a sequential type!");
+    return ContainedTys[0];
+  }
+
+  inline uint64_t getArrayNumElements() const;
+
+  Type *getArrayElementType() const {
+    assert(getTypeID() == ArrayTyID);
+    return ContainedTys[0];
+  }
+
+  inline unsigned getVectorNumElements() const;
+  Type *getVectorElementType() const {
+    assert(getTypeID() == VectorTyID);
+    return ContainedTys[0];
+  }
+
+  Type *getPointerElementType() const {
+    assert(getTypeID() == PointerTyID);
+    return ContainedTys[0];
+  }
+
+  /// Get the address space of this pointer or pointer vector type.
+  inline unsigned getPointerAddressSpace() const;
+
+  //===--------------------------------------------------------------------===//
+  // Static members exported by the Type class itself.  Useful for getting
+  // instances of Type.
+  //
+
+  /// Return a type based on an identifier.
+  static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
+
+  //===--------------------------------------------------------------------===//
+  // These are the builtin types that are always available.
+  //
+  static Type *getVoidTy(LLVMContext &C);
+  static Type *getLabelTy(LLVMContext &C);
+  static Type *getHalfTy(LLVMContext &C);
+  static Type *getFloatTy(LLVMContext &C);
+  static Type *getDoubleTy(LLVMContext &C);
+  static Type *getMetadataTy(LLVMContext &C);
+  static Type *getX86_FP80Ty(LLVMContext &C);
+  static Type *getFP128Ty(LLVMContext &C);
+  static Type *getPPC_FP128Ty(LLVMContext &C);
+  static Type *getX86_MMXTy(LLVMContext &C);
+  static Type *getTokenTy(LLVMContext &C);
+  static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
+  static IntegerType *getInt1Ty(LLVMContext &C);
+  static IntegerType *getInt8Ty(LLVMContext &C);
+  static IntegerType *getInt16Ty(LLVMContext &C);
+  static IntegerType *getInt32Ty(LLVMContext &C);
+  static IntegerType *getInt64Ty(LLVMContext &C);
+  static IntegerType *getInt128Ty(LLVMContext &C);
+  template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
+    int noOfBits = sizeof(ScalarTy) * CHAR_BIT;
+    if (std::is_integral<ScalarTy>::value) {
+      return (Type*) Type::getIntNTy(C, noOfBits);
+    } else if (std::is_floating_point<ScalarTy>::value) {
+      switch (noOfBits) {
+      case 32:
+        return Type::getFloatTy(C);
+      case 64:
+        return Type::getDoubleTy(C);
+      }
+    }
+    llvm_unreachable("Unsupported type in Type::getScalarTy");
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Convenience methods for getting pointer types with one of the above builtin
+  // types as pointee.
+  //
+  static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
+  static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
+  static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
+
+  /// Return a pointer to the current type. This is equivalent to
+  /// PointerType::get(Foo, AddrSpace).
+  PointerType *getPointerTo(unsigned AddrSpace = 0) const;
+
+private:
+  /// Derived types like structures and arrays are sized iff all of the members
+  /// of the type are sized as well. Since asking for their size is relatively
+  /// uncommon, move this operation out-of-line.
+  bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
+};
+
+// Printing of types.
+inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
+  T.print(OS);
+  return OS;
+}
+
+// allow isa<PointerType>(x) to work without DerivedTypes.h included.
+template <> struct isa_impl<PointerType, Type> {
+  static inline bool doit(const Type &Ty) {
+    return Ty.getTypeID() == Type::PointerTyID;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Provide specializations of GraphTraits to be able to treat a type as a
+// graph of sub types.
+
+template <> struct GraphTraits<Type *> {
+  using NodeRef = Type *;
+  using ChildIteratorType = Type::subtype_iterator;
+
+  static NodeRef getEntryNode(Type *T) { return T; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->subtype_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->subtype_end(); }
+};
+
+template <> struct GraphTraits<const Type*> {
+  using NodeRef = const Type *;
+  using ChildIteratorType = Type::subtype_iterator;
+
+  static NodeRef getEntryNode(NodeRef T) { return T; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->subtype_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->subtype_end(); }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)
+
+/* Specialized opaque type conversions.
+ */
+inline Type **unwrap(LLVMTypeRef* Tys) {
+  return reinterpret_cast<Type**>(Tys);
+}
+
+inline LLVMTypeRef *wrap(Type **Tys) {
+  return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_TYPE_H
diff --git a/linux-x64/clang/include/llvm/IR/TypeBuilder.h b/linux-x64/clang/include/llvm/IR/TypeBuilder.h
new file mode 100644
index 0000000..d2c6f00
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/TypeBuilder.h
@@ -0,0 +1,407 @@
+//===---- llvm/TypeBuilder.h - Builder for LLVM types -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeBuilder class, which is used as a convenient way to
+// create LLVM types with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TYPEBUILDER_H
+#define LLVM_IR_TYPEBUILDER_H
+
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include <climits>
+
+namespace llvm {
+
+/// TypeBuilder - This provides a uniform API for looking up types
+/// known at compile time.  To support cross-compilation, we define a
+/// series of tag types in the llvm::types namespace, like i<N>,
+/// ieee_float, ppc_fp128, etc.  TypeBuilder<T, false> allows T to be
+/// any of these, a native C type (whose size may depend on the host
+/// compiler), or a pointer, function, or struct type built out of
+/// these.  TypeBuilder<T, true> removes native C types from this set
+/// to guarantee that its result is suitable for cross-compilation.
+/// We define the primitive types, pointer types, and functions up to
+/// 5 arguments here, but to use this class with your own types,
+/// you'll need to specialize it.  For example, say you want to call a
+/// function defined externally as:
+///
+/// \code{.cpp}
+///
+///   struct MyType {
+///     int32 a;
+///     int32 *b;
+///     void *array[1];  // Intended as a flexible array.
+///   };
+///   int8 AFunction(struct MyType *value);
+///
+/// \endcode
+///
+/// You'll want to use
+///   Function::Create(TypeBuilder<types::i<8>(MyType*), true>::get(), ...)
+/// to declare the function, but when you first try this, your compiler will
+/// complain that TypeBuilder<MyType, true>::get() doesn't exist. To fix this,
+/// write:
+///
+/// \code{.cpp}
+///
+///   namespace llvm {
+///   template<bool xcompile> class TypeBuilder<MyType, xcompile> {
+///   public:
+///     static StructType *get(LLVMContext &Context) {
+///       // If you cache this result, be sure to cache it separately
+///       // for each LLVMContext.
+///       return StructType::get(
+///         TypeBuilder<types::i<32>, xcompile>::get(Context),
+///         TypeBuilder<types::i<32>*, xcompile>::get(Context),
+///         TypeBuilder<types::i<8>*[], xcompile>::get(Context),
+///         nullptr);
+///     }
+///
+///     // You may find this a convenient place to put some constants
+///     // to help with getelementptr.  They don't have any effect on
+///     // the operation of TypeBuilder.
+///     enum Fields {
+///       FIELD_A,
+///       FIELD_B,
+///       FIELD_ARRAY
+///     };
+///   }
+///   }  // namespace llvm
+///
+/// \endcode
+///
+/// TypeBuilder cannot handle recursive types or types you only know at runtime.
+/// If you try to give it a recursive type, it will deadlock, infinitely
+/// recurse, or do something similarly undesirable.
+template<typename T, bool cross_compilable> class TypeBuilder {};
+
+// Types for use with cross-compilable TypeBuilders.  These correspond
+// exactly with an LLVM-native type.
+namespace types {
+/// i<N> corresponds to the LLVM IntegerType with N bits.
+template<uint32_t num_bits> class i {};
+
+// The following classes represent the LLVM floating types.
+class ieee_float {};
+class ieee_double {};
+class x86_fp80 {};
+class fp128 {};
+class ppc_fp128 {};
+// X86 MMX.
+class x86_mmx {};
+}  // namespace types
+
+// LLVM doesn't have const or volatile types.
+template<typename T, bool cross> class TypeBuilder<const T, cross>
+  : public TypeBuilder<T, cross> {};
+template<typename T, bool cross> class TypeBuilder<volatile T, cross>
+  : public TypeBuilder<T, cross> {};
+template<typename T, bool cross> class TypeBuilder<const volatile T, cross>
+  : public TypeBuilder<T, cross> {};
+
+// Pointers
+template<typename T, bool cross> class TypeBuilder<T*, cross> {
+public:
+  static PointerType *get(LLVMContext &Context) {
+    return PointerType::getUnqual(TypeBuilder<T,cross>::get(Context));
+  }
+};
+
+/// There is no support for references
+template<typename T, bool cross> class TypeBuilder<T&, cross> {};
+
+// Arrays
+template<typename T, size_t N, bool cross> class TypeBuilder<T[N], cross> {
+public:
+  static ArrayType *get(LLVMContext &Context) {
+    return ArrayType::get(TypeBuilder<T, cross>::get(Context), N);
+  }
+};
+/// LLVM uses an array of length 0 to represent an unknown-length array.
+template<typename T, bool cross> class TypeBuilder<T[], cross> {
+public:
+  static ArrayType *get(LLVMContext &Context) {
+    return ArrayType::get(TypeBuilder<T, cross>::get(Context), 0);
+  }
+};
+
+// Define the C integral types only for TypeBuilder<T, false>.
+//
+// C integral types do not have a defined size. It would be nice to use the
+// stdint.h-defined typedefs that do have defined sizes, but we'd run into the
+// following problem:
+//
+// On an ILP32 machine, stdint.h might define:
+//
+//   typedef int int32_t;
+//   typedef long long int64_t;
+//   typedef long size_t;
+//
+// If we defined TypeBuilder<int32_t> and TypeBuilder<int64_t>, then any use of
+// TypeBuilder<size_t> would fail.  We couldn't define TypeBuilder<size_t> in
+// addition to the defined-size types because we'd get duplicate definitions on
+// platforms where stdint.h instead defines:
+//
+//   typedef int int32_t;
+//   typedef long long int64_t;
+//   typedef int size_t;
+//
+// So we define all the primitive C types and nothing else.
+#define DEFINE_INTEGRAL_TYPEBUILDER(T) \
+template<> class TypeBuilder<T, false> { \
+public: \
+  static IntegerType *get(LLVMContext &Context) { \
+    return IntegerType::get(Context, sizeof(T) * CHAR_BIT); \
+  } \
+}; \
+template<> class TypeBuilder<T, true> { \
+  /* We provide a definition here so users don't accidentally */ \
+  /* define these types to work. */ \
+}
+DEFINE_INTEGRAL_TYPEBUILDER(char);
+DEFINE_INTEGRAL_TYPEBUILDER(signed char);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned char);
+DEFINE_INTEGRAL_TYPEBUILDER(short);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned short);
+DEFINE_INTEGRAL_TYPEBUILDER(int);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned int);
+DEFINE_INTEGRAL_TYPEBUILDER(long);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned long);
+#ifdef _MSC_VER
+DEFINE_INTEGRAL_TYPEBUILDER(__int64);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned __int64);
+#else /* _MSC_VER */
+DEFINE_INTEGRAL_TYPEBUILDER(long long);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned long long);
+#endif /* _MSC_VER */
+#undef DEFINE_INTEGRAL_TYPEBUILDER
+
+template<uint32_t num_bits, bool cross>
+class TypeBuilder<types::i<num_bits>, cross> {
+public:
+  static IntegerType *get(LLVMContext &C) {
+    return IntegerType::get(C, num_bits);
+  }
+};
+
+template<> class TypeBuilder<float, false> {
+public:
+  static Type *get(LLVMContext& C) {
+    return Type::getFloatTy(C);
+  }
+};
+template<> class TypeBuilder<float, true> {};
+
+template<> class TypeBuilder<double, false> {
+public:
+  static Type *get(LLVMContext& C) {
+    return Type::getDoubleTy(C);
+  }
+};
+template<> class TypeBuilder<double, true> {};
+
+template<bool cross> class TypeBuilder<types::ieee_float, cross> {
+public:
+  static Type *get(LLVMContext& C) { return Type::getFloatTy(C); }
+};
+template<bool cross> class TypeBuilder<types::ieee_double, cross> {
+public:
+  static Type *get(LLVMContext& C) { return Type::getDoubleTy(C); }
+};
+template<bool cross> class TypeBuilder<types::x86_fp80, cross> {
+public:
+  static Type *get(LLVMContext& C) { return Type::getX86_FP80Ty(C); }
+};
+template<bool cross> class TypeBuilder<types::fp128, cross> {
+public:
+  static Type *get(LLVMContext& C) { return Type::getFP128Ty(C); }
+};
+template<bool cross> class TypeBuilder<types::ppc_fp128, cross> {
+public:
+  static Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); }
+};
+template<bool cross> class TypeBuilder<types::x86_mmx, cross> {
+public:
+  static Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); }
+};
+
+template<bool cross> class TypeBuilder<void, cross> {
+public:
+  static Type *get(LLVMContext &C) {
+    return Type::getVoidTy(C);
+  }
+};
+
+/// void* is disallowed in LLVM types, but it occurs often enough in C code that
+/// we special case it.
+template<> class TypeBuilder<void*, false>
+  : public TypeBuilder<types::i<8>*, false> {};
+template<> class TypeBuilder<const void*, false>
+  : public TypeBuilder<types::i<8>*, false> {};
+template<> class TypeBuilder<volatile void*, false>
+  : public TypeBuilder<types::i<8>*, false> {};
+template<> class TypeBuilder<const volatile void*, false>
+  : public TypeBuilder<types::i<8>*, false> {};
+
+template<typename R, bool cross> class TypeBuilder<R(), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context), false);
+  }
+};
+template<typename R, typename A1, bool cross> class TypeBuilder<R(A1), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                             params, false);
+  }
+};
+template<typename R, typename A1, typename A2, bool cross>
+class TypeBuilder<R(A1, A2), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                             params, false);
+  }
+};
+template<typename R, typename A1, typename A2, typename A3, bool cross>
+class TypeBuilder<R(A1, A2, A3), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+      TypeBuilder<A3, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                             params, false);
+  }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+         bool cross>
+class TypeBuilder<R(A1, A2, A3, A4), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+      TypeBuilder<A3, cross>::get(Context),
+      TypeBuilder<A4, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                             params, false);
+  }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+         typename A5, bool cross>
+class TypeBuilder<R(A1, A2, A3, A4, A5), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+      TypeBuilder<A3, cross>::get(Context),
+      TypeBuilder<A4, cross>::get(Context),
+      TypeBuilder<A5, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                             params, false);
+  }
+};
+
+template<typename R, bool cross> class TypeBuilder<R(...), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context), true);
+  }
+};
+template<typename R, typename A1, bool cross>
+class TypeBuilder<R(A1, ...), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true);
+  }
+};
+template<typename R, typename A1, typename A2, bool cross>
+class TypeBuilder<R(A1, A2, ...), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                                   params, true);
+  }
+};
+template<typename R, typename A1, typename A2, typename A3, bool cross>
+class TypeBuilder<R(A1, A2, A3, ...), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+      TypeBuilder<A3, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                                   params, true);
+  }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+         bool cross>
+class TypeBuilder<R(A1, A2, A3, A4, ...), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+      TypeBuilder<A3, cross>::get(Context),
+      TypeBuilder<A4, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                             params, true);
+  }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+         typename A5, bool cross>
+class TypeBuilder<R(A1, A2, A3, A4, A5, ...), cross> {
+public:
+  static FunctionType *get(LLVMContext &Context) {
+    Type *params[] = {
+      TypeBuilder<A1, cross>::get(Context),
+      TypeBuilder<A2, cross>::get(Context),
+      TypeBuilder<A3, cross>::get(Context),
+      TypeBuilder<A4, cross>::get(Context),
+      TypeBuilder<A5, cross>::get(Context),
+    };
+    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+                                   params, true);
+  }
+};
+
+}  // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/IR/TypeFinder.h b/linux-x64/clang/include/llvm/IR/TypeFinder.h
new file mode 100644
index 0000000..c050c38
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/TypeFinder.h
@@ -0,0 +1,82 @@
+//===- llvm/IR/TypeFinder.h - Class to find used struct types ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the TypeFinder class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TYPEFINDER_H
+#define LLVM_IR_TYPEFINDER_H
+
+#include "llvm/ADT/DenseSet.h"
+#include <cstddef>
+#include <vector>
+
+namespace llvm {
+
+class MDNode;
+class Module;
+class StructType;
+class Type;
+class Value;
+
+/// TypeFinder - Walk over a module, identifying all of the types that are
+/// used by the module.
+class TypeFinder {
+  // To avoid walking constant expressions multiple times and other IR
+  // objects, we keep several helper maps.
+  DenseSet<const Value*> VisitedConstants;
+  DenseSet<const MDNode *> VisitedMetadata;
+  DenseSet<Type*> VisitedTypes;
+
+  std::vector<StructType*> StructTypes;
+  bool OnlyNamed = false;
+
+public:
+  TypeFinder() = default;
+
+  void run(const Module &M, bool onlyNamed);
+  void clear();
+
+  using iterator = std::vector<StructType*>::iterator;
+  using const_iterator = std::vector<StructType*>::const_iterator;
+
+  iterator begin() { return StructTypes.begin(); }
+  iterator end() { return StructTypes.end(); }
+
+  const_iterator begin() const { return StructTypes.begin(); }
+  const_iterator end() const { return StructTypes.end(); }
+
+  bool empty() const { return StructTypes.empty(); }
+  size_t size() const { return StructTypes.size(); }
+  iterator erase(iterator I, iterator E) { return StructTypes.erase(I, E); }
+
+  StructType *&operator[](unsigned Idx) { return StructTypes[Idx]; }
+
+  DenseSet<const MDNode *> &getVisitedMetadata() { return VisitedMetadata; }
+
+private:
+  /// incorporateType - This method adds the type to the list of used
+  /// structures if it's not in there already.
+  void incorporateType(Type *Ty);
+
+  /// incorporateValue - This method is used to walk operand lists finding types
+  /// hiding in constant expressions and other operands that won't be walked in
+  /// other ways.  GlobalValues, basic blocks, instructions, and inst operands
+  /// are all explicitly enumerated.
+  void incorporateValue(const Value *V);
+
+  /// incorporateMDNode - This method is used to walk the operands of an MDNode
+  /// to find types hiding within.
+  void incorporateMDNode(const MDNode *V);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_TYPEFINDER_H
diff --git a/linux-x64/clang/include/llvm/IR/Use.h b/linux-x64/clang/include/llvm/IR/Use.h
new file mode 100644
index 0000000..0ac1393
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Use.h
@@ -0,0 +1,182 @@
+//===- llvm/Use.h - Definition of the Use class -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This defines the Use class.  The Use class represents the operand of an
+/// instruction or some other User instance which refers to a Value.  The Use
+/// class keeps the "use list" of the referenced value up to date.
+///
+/// Pointer tagging is used to efficiently find the User corresponding to a Use
+/// without having to store a User pointer in every Use. A User is preceded in
+/// memory by all the Uses corresponding to its operands, and the low bits of
+/// one of the fields (Prev) of the Use class are used to encode offsets to be
+/// able to find that User given a pointer to any Use. For details, see:
+///
+///   http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_USE_H
+#define LLVM_IR_USE_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+template <typename> struct simplify_type;
+class User;
+class Value;
+
+/// \brief A Use represents the edge between a Value definition and its users.
+///
+/// This is notionally a two-dimensional linked list. It supports traversing
+/// all of the uses for a particular value definition. It also supports jumping
+/// directly to the used value when we arrive from the User's operands, and
+/// jumping directly to the User when we arrive from the Value's uses.
+///
+/// The pointer to the used Value is explicit, and the pointer to the User is
+/// implicit. The implicit pointer is found via a waymarking algorithm
+/// described in the programmer's manual:
+///
+///   http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
+///
+/// This is essentially the single most memory intensive object in LLVM because
+/// of the number of uses in the system. At the same time, the constant time
+/// operations it allows are essential to many optimizations having reasonable
+/// time complexity.
+class Use {
+public:
+  Use(const Use &U) = delete;
+
+  /// \brief Provide a fast substitute to std::swap<Use>
+  /// that also works with less standard-compliant compilers
+  void swap(Use &RHS);
+
+  /// Pointer traits for the UserRef PointerIntPair. This ensures we always
+  /// use the LSB regardless of pointer alignment on different targets.
+  struct UserRefPointerTraits {
+    static inline void *getAsVoidPointer(User *P) { return P; }
+
+    static inline User *getFromVoidPointer(void *P) {
+      return (User *)P;
+    }
+
+    enum { NumLowBitsAvailable = 1 };
+  };
+
+  // A type for the word following an array of hung-off Uses in memory, which is
+  // a pointer back to their User with the bottom bit set.
+  using UserRef = PointerIntPair<User *, 1, unsigned, UserRefPointerTraits>;
+
+  /// Pointer traits for the Prev PointerIntPair. This ensures we always use
+  /// the two LSBs regardless of pointer alignment on different targets.
+  struct PrevPointerTraits {
+    static inline void *getAsVoidPointer(Use **P) { return P; }
+
+    static inline Use **getFromVoidPointer(void *P) {
+      return (Use **)P;
+    }
+
+    enum { NumLowBitsAvailable = 2 };
+  };
+
+private:
+  /// Destructor - Only for zap()
+  ~Use() {
+    if (Val)
+      removeFromList();
+  }
+
+  enum PrevPtrTag { zeroDigitTag, oneDigitTag, stopTag, fullStopTag };
+
+  /// Constructor
+  Use(PrevPtrTag tag) { Prev.setInt(tag); }
+
+public:
+  friend class Value;
+
+  operator Value *() const { return Val; }
+  Value *get() const { return Val; }
+
+  /// \brief Returns the User that contains this Use.
+  ///
+  /// For an instruction operand, for example, this will return the
+  /// instruction.
+  User *getUser() const LLVM_READONLY;
+
+  inline void set(Value *Val);
+
+  inline Value *operator=(Value *RHS);
+  inline const Use &operator=(const Use &RHS);
+
+  Value *operator->() { return Val; }
+  const Value *operator->() const { return Val; }
+
+  Use *getNext() const { return Next; }
+
+  /// \brief Return the operand # of this use in its User.
+  unsigned getOperandNo() const;
+
+  /// \brief Initializes the waymarking tags on an array of Uses.
+  ///
+  /// This sets up the array of Uses such that getUser() can find the User from
+  /// any of those Uses.
+  static Use *initTags(Use *Start, Use *Stop);
+
+  /// \brief Destroys Use operands when the number of operands of
+  /// a User changes.
+  static void zap(Use *Start, const Use *Stop, bool del = false);
+
+private:
+  const Use *getImpliedUser() const LLVM_READONLY;
+
+  Value *Val = nullptr;
+  Use *Next;
+  PointerIntPair<Use **, 2, PrevPtrTag, PrevPointerTraits> Prev;
+
+  void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); }
+
+  void addToList(Use **List) {
+    Next = *List;
+    if (Next)
+      Next->setPrev(&Next);
+    setPrev(List);
+    *List = this;
+  }
+
+  void removeFromList() {
+    Use **StrippedPrev = Prev.getPointer();
+    *StrippedPrev = Next;
+    if (Next)
+      Next->setPrev(StrippedPrev);
+  }
+};
+
+/// \brief Allow clients to treat uses just like values when using
+/// casting operators.
+template <> struct simplify_type<Use> {
+  using SimpleType = Value *;
+
+  static SimpleType getSimplifiedValue(Use &Val) { return Val.get(); }
+};
+template <> struct simplify_type<const Use> {
+  using SimpleType = /*const*/ Value *;
+
+  static SimpleType getSimplifiedValue(const Use &Val) { return Val.get(); }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_USE_H
diff --git a/linux-x64/clang/include/llvm/IR/UseListOrder.h b/linux-x64/clang/include/llvm/IR/UseListOrder.h
new file mode 100644
index 0000000..a8b394f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/UseListOrder.h
@@ -0,0 +1,44 @@
+//===- llvm/IR/UseListOrder.h - LLVM Use List Order -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has structures and command-line options for preserving use-list
+// order.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_USELISTORDER_H
+#define LLVM_IR_USELISTORDER_H
+
+#include <cstddef>
+#include <vector>
+
+namespace llvm {
+
+class Function;
+class Value;
+
+/// \brief Structure to hold a use-list order.
+struct UseListOrder {
+  const Value *V = nullptr;
+  const Function *F = nullptr;
+  std::vector<unsigned> Shuffle;
+
+  UseListOrder(const Value *V, const Function *F, size_t ShuffleSize)
+      : V(V), F(F), Shuffle(ShuffleSize) {}
+
+  UseListOrder() = default;
+  UseListOrder(UseListOrder &&) = default;
+  UseListOrder &operator=(UseListOrder &&) = default;
+};
+
+using UseListOrderStack = std::vector<UseListOrder>;
+
+} // end namespace llvm
+
+#endif // LLVM_IR_USELISTORDER_H
diff --git a/linux-x64/clang/include/llvm/IR/User.h b/linux-x64/clang/include/llvm/IR/User.h
new file mode 100644
index 0000000..9d30be0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/User.h
@@ -0,0 +1,335 @@
+//===- llvm/User.h - User class definition ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class defines the interface that one who uses a Value must implement.
+// Each instance of the Value class keeps track of what User's have handles
+// to it.
+//
+//  * Instructions are the largest class of Users.
+//  * Constants may be users of other constants (think arrays and stuff)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_USER_H
+#define LLVM_IR_USER_H
+
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+template <typename T> class MutableArrayRef;
+
+/// \brief Compile-time customization of User operands.
+///
+/// Customizes operand-related allocators and accessors.
+template <class>
+struct OperandTraits;
+
+class User : public Value {
+  template <unsigned>
+  friend struct HungoffOperandTraits;
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE inline static void *
+  allocateFixedOperandUser(size_t, unsigned, unsigned);
+
+protected:
+  /// Allocate a User with an operand pointer co-allocated.
+  ///
+  /// This is used for subclasses which need to allocate a variable number
+  /// of operands, ie, 'hung off uses'.
+  void *operator new(size_t Size);
+
+  /// Allocate a User with the operands co-allocated.
+  ///
+  /// This is used for subclasses which have a fixed number of operands.
+  void *operator new(size_t Size, unsigned Us);
+
+  /// Allocate a User with the operands co-allocated.  If DescBytes is non-zero
+  /// then allocate an additional DescBytes bytes before the operands. These
+  /// bytes can be accessed by calling getDescriptor.
+  ///
+  /// DescBytes needs to be divisible by sizeof(void *).  The allocated
+  /// descriptor, if any, is aligned to sizeof(void *) bytes.
+  ///
+  /// This is used for subclasses which have a fixed number of operands.
+  void *operator new(size_t Size, unsigned Us, unsigned DescBytes);
+
+  User(Type *ty, unsigned vty, Use *, unsigned NumOps)
+      : Value(ty, vty) {
+    assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands");
+    NumUserOperands = NumOps;
+    // If we have hung off uses, then the operand list should initially be
+    // null.
+    assert((!HasHungOffUses || !getOperandList()) &&
+           "Error in initializing hung off uses for User");
+  }
+
+  /// \brief Allocate the array of Uses, followed by a pointer
+  /// (with bottom bit set) to the User.
+  /// \param IsPhi identifies callers which are phi nodes and which need
+  /// N BasicBlock* allocated along with N
+  void allocHungoffUses(unsigned N, bool IsPhi = false);
+
+  /// \brief Grow the number of hung off uses.  Note that allocHungoffUses
+  /// should be called if there are no uses.
+  void growHungoffUses(unsigned N, bool IsPhi = false);
+
+protected:
+  ~User() = default; // Use deleteValue() to delete a generic Instruction.
+
+public:
+  User(const User &) = delete;
+
+  /// \brief Free memory allocated for User and Use objects.
+  void operator delete(void *Usr);
+  /// \brief Placement delete - required by std, called if the ctor throws.
+  void operator delete(void *Usr, unsigned) {
+    // Note: If a subclass manipulates the information which is required to calculate the 
+    // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has 
+    // to restore the changed information to the original value, since the dtor of that class
+    // is not called if the ctor fails.  
+    User::operator delete(Usr);
+
+#ifndef LLVM_ENABLE_EXCEPTIONS
+    llvm_unreachable("Constructor throws?");
+#endif
+  }
+  /// \brief Placement delete - required by std, called if the ctor throws.
+  void operator delete(void *Usr, unsigned, bool) {
+    // Note: If a subclass manipulates the information which is required to calculate the 
+    // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has 
+    // to restore the changed information to the original value, since the dtor of that class
+    // is not called if the ctor fails.  
+    User::operator delete(Usr);
+
+#ifndef LLVM_ENABLE_EXCEPTIONS
+    llvm_unreachable("Constructor throws?");
+#endif
+  }
+
+protected:
+  template <int Idx, typename U> static Use &OpFrom(const U *that) {
+    return Idx < 0
+      ? OperandTraits<U>::op_end(const_cast<U*>(that))[Idx]
+      : OperandTraits<U>::op_begin(const_cast<U*>(that))[Idx];
+  }
+
+  template <int Idx> Use &Op() {
+    return OpFrom<Idx>(this);
+  }
+  template <int Idx> const Use &Op() const {
+    return OpFrom<Idx>(this);
+  }
+
+private:
+  const Use *getHungOffOperands() const {
+    return *(reinterpret_cast<const Use *const *>(this) - 1);
+  }
+
+  Use *&getHungOffOperands() { return *(reinterpret_cast<Use **>(this) - 1); }
+
+  const Use *getIntrusiveOperands() const {
+    return reinterpret_cast<const Use *>(this) - NumUserOperands;
+  }
+
+  Use *getIntrusiveOperands() {
+    return reinterpret_cast<Use *>(this) - NumUserOperands;
+  }
+
+  void setOperandList(Use *NewList) {
+    assert(HasHungOffUses &&
+           "Setting operand list only required for hung off uses");
+    getHungOffOperands() = NewList;
+  }
+
+public:
+  const Use *getOperandList() const {
+    return HasHungOffUses ? getHungOffOperands() : getIntrusiveOperands();
+  }
+  Use *getOperandList() {
+    return const_cast<Use *>(static_cast<const User *>(this)->getOperandList());
+  }
+
+  Value *getOperand(unsigned i) const {
+    assert(i < NumUserOperands && "getOperand() out of range!");
+    return getOperandList()[i];
+  }
+
+  void setOperand(unsigned i, Value *Val) {
+    assert(i < NumUserOperands && "setOperand() out of range!");
+    assert((!isa<Constant>((const Value*)this) ||
+            isa<GlobalValue>((const Value*)this)) &&
+           "Cannot mutate a constant with setOperand!");
+    getOperandList()[i] = Val;
+  }
+
+  const Use &getOperandUse(unsigned i) const {
+    assert(i < NumUserOperands && "getOperandUse() out of range!");
+    return getOperandList()[i];
+  }
+  Use &getOperandUse(unsigned i) {
+    assert(i < NumUserOperands && "getOperandUse() out of range!");
+    return getOperandList()[i];
+  }
+
+  unsigned getNumOperands() const { return NumUserOperands; }
+
+  /// Returns the descriptor co-allocated with this User instance.
+  ArrayRef<const uint8_t> getDescriptor() const;
+
+  /// Returns the descriptor co-allocated with this User instance.
+  MutableArrayRef<uint8_t> getDescriptor();
+
+  /// Set the number of operands on a GlobalVariable.
+  ///
+  /// GlobalVariable always allocates space for a single operands, but
+  /// doesn't always use it.
+  ///
+  /// FIXME: As that the number of operands is used to find the start of
+  /// the allocated memory in operator delete, we need to always think we have
+  /// 1 operand before delete.
+  void setGlobalVariableNumOperands(unsigned NumOps) {
+    assert(NumOps <= 1 && "GlobalVariable can only have 0 or 1 operands");
+    NumUserOperands = NumOps;
+  }
+
+  /// \brief Subclasses with hung off uses need to manage the operand count
+  /// themselves.  In these instances, the operand count isn't used to find the
+  /// OperandList, so there's no issue in having the operand count change.
+  void setNumHungOffUseOperands(unsigned NumOps) {
+    assert(HasHungOffUses && "Must have hung off uses to use this method");
+    assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands");
+    NumUserOperands = NumOps;
+  }
+
+  // ---------------------------------------------------------------------------
+  // Operand Iterator interface...
+  //
+  using op_iterator = Use*;
+  using const_op_iterator = const Use*;
+  using op_range = iterator_range<op_iterator>;
+  using const_op_range = iterator_range<const_op_iterator>;
+
+  op_iterator       op_begin()       { return getOperandList(); }
+  const_op_iterator op_begin() const { return getOperandList(); }
+  op_iterator       op_end()         {
+    return getOperandList() + NumUserOperands;
+  }
+  const_op_iterator op_end()   const {
+    return getOperandList() + NumUserOperands;
+  }
+  op_range operands() {
+    return op_range(op_begin(), op_end());
+  }
+  const_op_range operands() const {
+    return const_op_range(op_begin(), op_end());
+  }
+
+  /// \brief Iterator for directly iterating over the operand Values.
+  struct value_op_iterator
+      : iterator_adaptor_base<value_op_iterator, op_iterator,
+                              std::random_access_iterator_tag, Value *,
+                              ptrdiff_t, Value *, Value *> {
+    explicit value_op_iterator(Use *U = nullptr) : iterator_adaptor_base(U) {}
+
+    Value *operator*() const { return *I; }
+    Value *operator->() const { return operator*(); }
+  };
+
+  value_op_iterator value_op_begin() {
+    return value_op_iterator(op_begin());
+  }
+  value_op_iterator value_op_end() {
+    return value_op_iterator(op_end());
+  }
+  iterator_range<value_op_iterator> operand_values() {
+    return make_range(value_op_begin(), value_op_end());
+  }
+
+  struct const_value_op_iterator
+      : iterator_adaptor_base<const_value_op_iterator, const_op_iterator,
+                              std::random_access_iterator_tag, const Value *,
+                              ptrdiff_t, const Value *, const Value *> {
+    explicit const_value_op_iterator(const Use *U = nullptr) :
+      iterator_adaptor_base(U) {}
+
+    const Value *operator*() const { return *I; }
+    const Value *operator->() const { return operator*(); }
+  };
+
+  const_value_op_iterator value_op_begin() const {
+    return const_value_op_iterator(op_begin());
+  }
+  const_value_op_iterator value_op_end() const {
+    return const_value_op_iterator(op_end());
+  }
+  iterator_range<const_value_op_iterator> operand_values() const {
+    return make_range(value_op_begin(), value_op_end());
+  }
+
+  /// \brief Drop all references to operands.
+  ///
+  /// This function is in charge of "letting go" of all objects that this User
+  /// refers to.  This allows one to 'delete' a whole class at a time, even
+  /// though there may be circular references...  First all references are
+  /// dropped, and all use counts go to zero.  Then everything is deleted for
+  /// real.  Note that no operations are valid on an object that has "dropped
+  /// all references", except operator delete.
+  void dropAllReferences() {
+    for (Use &U : operands())
+      U.set(nullptr);
+  }
+
+  /// \brief Replace uses of one Value with another.
+  ///
+  /// Replaces all references to the "From" definition with references to the
+  /// "To" definition.
+  void replaceUsesOfWith(Value *From, Value *To);
+
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) || isa<Constant>(V);
+  }
+};
+
+// Either Use objects, or a Use pointer can be prepended to User.
+static_assert(alignof(Use) >= alignof(User),
+              "Alignment is insufficient after objects prepended to User");
+static_assert(alignof(Use *) >= alignof(User),
+              "Alignment is insufficient after objects prepended to User");
+
+template<> struct simplify_type<User::op_iterator> {
+  using SimpleType = Value*;
+
+  static SimpleType getSimplifiedValue(User::op_iterator &Val) {
+    return Val->get();
+  }
+};
+template<> struct simplify_type<User::const_op_iterator> {
+  using SimpleType = /*const*/ Value*;
+
+  static SimpleType getSimplifiedValue(User::const_op_iterator &Val) {
+    return Val->get();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_USER_H
diff --git a/linux-x64/clang/include/llvm/IR/Value.def b/linux-x64/clang/include/llvm/IR/Value.def
new file mode 100644
index 0000000..e2ddba0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Value.def
@@ -0,0 +1,117 @@
+//===-------- llvm/IR/Value.def - File that describes Values ---v-*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains descriptions of the various LLVM values.  This is
+// used as a central place for enumerating the different values.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+//
+#if !(defined HANDLE_GLOBAL_VALUE || defined HANDLE_CONSTANT ||                \
+      defined HANDLE_INSTRUCTION || defined HANDLE_INLINE_ASM_VALUE ||         \
+      defined HANDLE_METADATA_VALUE || defined HANDLE_VALUE ||                 \
+      defined HANDLE_CONSTANT_MARKER || defined HANDLE_MEMORY_VALUE)
+#error "Missing macro definition of HANDLE_VALUE*"
+#endif
+
+#ifndef HANDLE_MEMORY_VALUE
+#define HANDLE_MEMORY_VALUE(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_GLOBAL_VALUE
+#define HANDLE_GLOBAL_VALUE(ValueName) HANDLE_CONSTANT(ValueName)
+#endif
+
+#ifndef HANDLE_CONSTANT
+#define HANDLE_CONSTANT(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_INSTRUCTION
+#define HANDLE_INSTRUCTION(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_INLINE_ASM_VALUE
+#define HANDLE_INLINE_ASM_VALUE(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_METADATA_VALUE
+#define HANDLE_METADATA_VALUE(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_VALUE
+#define HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_CONSTANT_MARKER
+#define HANDLE_CONSTANT_MARKER(MarkerName, ValueName)
+#endif
+
+// Having constant first makes the range check for isa<Constant> faster
+// and smaller by one operation.
+
+// Constant
+HANDLE_GLOBAL_VALUE(Function)
+HANDLE_GLOBAL_VALUE(GlobalAlias)
+HANDLE_GLOBAL_VALUE(GlobalIFunc)
+HANDLE_GLOBAL_VALUE(GlobalVariable)
+HANDLE_CONSTANT(BlockAddress)
+HANDLE_CONSTANT(ConstantExpr)
+
+// ConstantAggregate.
+HANDLE_CONSTANT(ConstantArray)
+HANDLE_CONSTANT(ConstantStruct)
+HANDLE_CONSTANT(ConstantVector)
+
+// ConstantData.
+HANDLE_CONSTANT(UndefValue)
+HANDLE_CONSTANT(ConstantAggregateZero)
+HANDLE_CONSTANT(ConstantDataArray)
+HANDLE_CONSTANT(ConstantDataVector)
+HANDLE_CONSTANT(ConstantInt)
+HANDLE_CONSTANT(ConstantFP)
+HANDLE_CONSTANT(ConstantPointerNull)
+HANDLE_CONSTANT(ConstantTokenNone)
+
+HANDLE_CONSTANT_MARKER(ConstantFirstVal, Function)
+HANDLE_CONSTANT_MARKER(ConstantLastVal, ConstantTokenNone)
+HANDLE_CONSTANT_MARKER(ConstantDataFirstVal, UndefValue)
+HANDLE_CONSTANT_MARKER(ConstantDataLastVal, ConstantTokenNone)
+HANDLE_CONSTANT_MARKER(ConstantAggregateFirstVal, ConstantArray)
+HANDLE_CONSTANT_MARKER(ConstantAggregateLastVal, ConstantVector)
+
+HANDLE_VALUE(Argument)
+HANDLE_VALUE(BasicBlock)
+
+
+HANDLE_METADATA_VALUE(MetadataAsValue)
+HANDLE_INLINE_ASM_VALUE(InlineAsm)
+
+// FIXME: It's awkward that Value.def knows about classes in Analysis. While
+// this doesn't introduce a strict link or include dependency, we should remove
+// the circular dependency eventually.
+HANDLE_MEMORY_VALUE(MemoryUse)
+HANDLE_MEMORY_VALUE(MemoryDef)
+HANDLE_MEMORY_VALUE(MemoryPhi)
+
+HANDLE_INSTRUCTION(Instruction)
+// Enum values starting at InstructionVal are used for Instructions;
+// don't add new values here!
+
+#undef HANDLE_MEMORY_VALUE
+#undef HANDLE_GLOBAL_VALUE
+#undef HANDLE_CONSTANT
+#undef HANDLE_INSTRUCTION
+#undef HANDLE_METADATA_VALUE
+#undef HANDLE_INLINE_ASM_VALUE
+#undef HANDLE_VALUE
+#undef HANDLE_CONSTANT_MARKER
diff --git a/linux-x64/clang/include/llvm/IR/Value.h b/linux-x64/clang/include/llvm/IR/Value.h
new file mode 100644
index 0000000..d848fe9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Value.h
@@ -0,0 +1,872 @@
+//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Value class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUE_H
+#define LLVM_IR_VALUE_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Use.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <iterator>
+#include <memory>
+
+namespace llvm {
+
+class APInt;
+class Argument;
+class BasicBlock;
+class Constant;
+class ConstantData;
+class ConstantAggregate;
+class DataLayout;
+class Function;
+class GlobalAlias;
+class GlobalIFunc;
+class GlobalIndirectSymbol;
+class GlobalObject;
+class GlobalValue;
+class GlobalVariable;
+class InlineAsm;
+class Instruction;
+class LLVMContext;
+class Module;
+class ModuleSlotTracker;
+class raw_ostream;
+template<typename ValueTy> class StringMapEntry;
+class StringRef;
+class Twine;
+class Type;
+class User;
+
+using ValueName = StringMapEntry<Value *>;
+
+//===----------------------------------------------------------------------===//
+//                                 Value Class
+//===----------------------------------------------------------------------===//
+
+/// \brief LLVM Value Representation
+///
+/// This is a very important LLVM class. It is the base class of all values
+/// computed by a program that may be used as operands to other values. Value is
+/// the super class of other important classes such as Instruction and Function.
+/// All Values have a Type. Type is not a subclass of Value. Some values can
+/// have a name and they belong to some Module.  Setting the name on the Value
+/// automatically updates the module's symbol table.
+///
+/// Every value has a "use list" that keeps track of which other Values are
+/// using this Value.  A Value can also have an arbitrary number of ValueHandle
+/// objects that watch it and listen to RAUW and Destroy events.  See
+/// llvm/IR/ValueHandle.h for details.
+class Value {
+  // The least-significant bit of the first word of Value *must* be zero:
+  //   http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
+  Type *VTy;
+  Use *UseList;
+
+  friend class ValueAsMetadata; // Allow access to IsUsedByMD.
+  friend class ValueHandleBase;
+
+  const unsigned char SubclassID;   // Subclass identifier (for isa/dyn_cast)
+  unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
+
+protected:
+  /// \brief Hold subclass data that can be dropped.
+  ///
+  /// This member is similar to SubclassData, however it is for holding
+  /// information which may be used to aid optimization, but which may be
+  /// cleared to zero without affecting conservative interpretation.
+  unsigned char SubclassOptionalData : 7;
+
+private:
+  /// \brief Hold arbitrary subclass data.
+  ///
+  /// This member is defined by this class, but is not used for anything.
+  /// Subclasses can use it to hold whatever state they find useful.  This
+  /// field is initialized to zero by the ctor.
+  unsigned short SubclassData;
+
+protected:
+  /// \brief The number of operands in the subclass.
+  ///
+  /// This member is defined by this class, but not used for anything.
+  /// Subclasses can use it to store their number of operands, if they have
+  /// any.
+  ///
+  /// This is stored here to save space in User on 64-bit hosts.  Since most
+  /// instances of Value have operands, 32-bit hosts aren't significantly
+  /// affected.
+  ///
+  /// Note, this should *NOT* be used directly by any class other than User.
+  /// User uses this value to find the Use list.
+  enum : unsigned { NumUserOperandsBits = 28 };
+  unsigned NumUserOperands : NumUserOperandsBits;
+
+  // Use the same type as the bitfield above so that MSVC will pack them.
+  unsigned IsUsedByMD : 1;
+  unsigned HasName : 1;
+  unsigned HasHungOffUses : 1;
+  unsigned HasDescriptor : 1;
+
+private:
+  template <typename UseT> // UseT == 'Use' or 'const Use'
+  class use_iterator_impl
+      : public std::iterator<std::forward_iterator_tag, UseT *> {
+    friend class Value;
+
+    UseT *U;
+
+    explicit use_iterator_impl(UseT *u) : U(u) {}
+
+  public:
+    use_iterator_impl() : U() {}
+
+    bool operator==(const use_iterator_impl &x) const { return U == x.U; }
+    bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
+
+    use_iterator_impl &operator++() { // Preincrement
+      assert(U && "Cannot increment end iterator!");
+      U = U->getNext();
+      return *this;
+    }
+
+    use_iterator_impl operator++(int) { // Postincrement
+      auto tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    UseT &operator*() const {
+      assert(U && "Cannot dereference end iterator!");
+      return *U;
+    }
+
+    UseT *operator->() const { return &operator*(); }
+
+    operator use_iterator_impl<const UseT>() const {
+      return use_iterator_impl<const UseT>(U);
+    }
+  };
+
+  template <typename UserTy> // UserTy == 'User' or 'const User'
+  class user_iterator_impl
+      : public std::iterator<std::forward_iterator_tag, UserTy *> {
+    use_iterator_impl<Use> UI;
+    explicit user_iterator_impl(Use *U) : UI(U) {}
+    friend class Value;
+
+  public:
+    user_iterator_impl() = default;
+
+    bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
+    bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
+
+    /// \brief Returns true if this iterator is equal to user_end() on the value.
+    bool atEnd() const { return *this == user_iterator_impl(); }
+
+    user_iterator_impl &operator++() { // Preincrement
+      ++UI;
+      return *this;
+    }
+
+    user_iterator_impl operator++(int) { // Postincrement
+      auto tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    // Retrieve a pointer to the current User.
+    UserTy *operator*() const {
+      return UI->getUser();
+    }
+
+    UserTy *operator->() const { return operator*(); }
+
+    operator user_iterator_impl<const UserTy>() const {
+      return user_iterator_impl<const UserTy>(*UI);
+    }
+
+    Use &getUse() const { return *UI; }
+  };
+
+protected:
+  Value(Type *Ty, unsigned scid);
+
+  /// Value's destructor should be virtual by design, but that would require
+  /// that Value and all of its subclasses have a vtable that effectively
+  /// duplicates the information in the value ID. As a size optimization, the
+  /// destructor has been protected, and the caller should manually call
+  /// deleteValue.
+  ~Value(); // Use deleteValue() to delete a generic Value.
+
+public:
+  Value(const Value &) = delete;
+  Value &operator=(const Value &) = delete;
+
+  /// Delete a pointer to a generic Value.
+  void deleteValue();
+
+  /// \brief Support for debugging, callable in GDB: V->dump()
+  void dump() const;
+
+  /// \brief Implement operator<< on Value.
+  /// @{
+  void print(raw_ostream &O, bool IsForDebug = false) const;
+  void print(raw_ostream &O, ModuleSlotTracker &MST,
+             bool IsForDebug = false) const;
+  /// @}
+
+  /// \brief Print the name of this Value out to the specified raw_ostream.
+  ///
+  /// This is useful when you just want to print 'int %reg126', not the
+  /// instruction that generated it. If you specify a Module for context, then
+  /// even constanst get pretty-printed; for example, the type of a null
+  /// pointer is printed symbolically.
+  /// @{
+  void printAsOperand(raw_ostream &O, bool PrintType = true,
+                      const Module *M = nullptr) const;
+  void printAsOperand(raw_ostream &O, bool PrintType,
+                      ModuleSlotTracker &MST) const;
+  /// @}
+
+  /// \brief All values are typed, get the type of this value.
+  Type *getType() const { return VTy; }
+
+  /// \brief All values hold a context through their type.
+  LLVMContext &getContext() const;
+
+  // \brief All values can potentially be named.
+  bool hasName() const { return HasName; }
+  ValueName *getValueName() const;
+  void setValueName(ValueName *VN);
+
+private:
+  void destroyValueName();
+  void doRAUW(Value *New, bool NoMetadata);
+  void setNameImpl(const Twine &Name);
+
+public:
+  /// \brief Return a constant reference to the value's name.
+  ///
+  /// This guaranteed to return the same reference as long as the value is not
+  /// modified.  If the value has a name, this does a hashtable lookup, so it's
+  /// not free.
+  StringRef getName() const;
+
+  /// \brief Change the name of the value.
+  ///
+  /// Choose a new unique name if the provided name is taken.
+  ///
+  /// \param Name The new name; or "" if the value's name should be removed.
+  void setName(const Twine &Name);
+
+  /// \brief Transfer the name from V to this value.
+  ///
+  /// After taking V's name, sets V's name to empty.
+  ///
+  /// \note It is an error to call V->takeName(V).
+  void takeName(Value *V);
+
+  /// \brief Change all uses of this to point to a new Value.
+  ///
+  /// Go through the uses list for this definition and make each use point to
+  /// "V" instead of "this".  After this completes, 'this's use list is
+  /// guaranteed to be empty.
+  void replaceAllUsesWith(Value *V);
+
+  /// \brief Change non-metadata uses of this to point to a new Value.
+  ///
+  /// Go through the uses list for this definition and make each use point to
+  /// "V" instead of "this". This function skips metadata entries in the list.
+  void replaceNonMetadataUsesWith(Value *V);
+
+  /// replaceUsesOutsideBlock - Go through the uses list for this definition and
+  /// make each use point to "V" instead of "this" when the use is outside the
+  /// block. 'This's use list is expected to have at least one element.
+  /// Unlike replaceAllUsesWith this function does not support basic block
+  /// values or constant users.
+  void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
+
+  /// replaceUsesExceptBlockAddr - Go through the uses list for this definition
+  /// and make each use point to "V" instead of "this" when the use is outside
+  /// the block. 'This's use list is expected to have at least one element.
+  /// Unlike replaceAllUsesWith this function skips blockaddr uses.
+  void replaceUsesExceptBlockAddr(Value *New);
+
+  //----------------------------------------------------------------------
+  // Methods for handling the chain of uses of this Value.
+  //
+  // Materializing a function can introduce new uses, so these methods come in
+  // two variants:
+  // The methods that start with materialized_ check the uses that are
+  // currently known given which functions are materialized. Be very careful
+  // when using them since you might not get all uses.
+  // The methods that don't start with materialized_ assert that modules is
+  // fully materialized.
+  void assertModuleIsMaterializedImpl() const;
+  // This indirection exists so we can keep assertModuleIsMaterializedImpl()
+  // around in release builds of Value.cpp to be linked with other code built
+  // in debug mode. But this avoids calling it in any of the release built code.
+  void assertModuleIsMaterialized() const {
+#ifndef NDEBUG
+    assertModuleIsMaterializedImpl();
+#endif
+  }
+
+  bool use_empty() const {
+    assertModuleIsMaterialized();
+    return UseList == nullptr;
+  }
+
+  bool materialized_use_empty() const {
+    return UseList == nullptr;
+  }
+
+  using use_iterator = use_iterator_impl<Use>;
+  using const_use_iterator = use_iterator_impl<const Use>;
+
+  use_iterator materialized_use_begin() { return use_iterator(UseList); }
+  const_use_iterator materialized_use_begin() const {
+    return const_use_iterator(UseList);
+  }
+  use_iterator use_begin() {
+    assertModuleIsMaterialized();
+    return materialized_use_begin();
+  }
+  const_use_iterator use_begin() const {
+    assertModuleIsMaterialized();
+    return materialized_use_begin();
+  }
+  use_iterator use_end() { return use_iterator(); }
+  const_use_iterator use_end() const { return const_use_iterator(); }
+  iterator_range<use_iterator> materialized_uses() {
+    return make_range(materialized_use_begin(), use_end());
+  }
+  iterator_range<const_use_iterator> materialized_uses() const {
+    return make_range(materialized_use_begin(), use_end());
+  }
+  iterator_range<use_iterator> uses() {
+    assertModuleIsMaterialized();
+    return materialized_uses();
+  }
+  iterator_range<const_use_iterator> uses() const {
+    assertModuleIsMaterialized();
+    return materialized_uses();
+  }
+
+  bool user_empty() const {
+    assertModuleIsMaterialized();
+    return UseList == nullptr;
+  }
+
+  using user_iterator = user_iterator_impl<User>;
+  using const_user_iterator = user_iterator_impl<const User>;
+
+  user_iterator materialized_user_begin() { return user_iterator(UseList); }
+  const_user_iterator materialized_user_begin() const {
+    return const_user_iterator(UseList);
+  }
+  user_iterator user_begin() {
+    assertModuleIsMaterialized();
+    return materialized_user_begin();
+  }
+  const_user_iterator user_begin() const {
+    assertModuleIsMaterialized();
+    return materialized_user_begin();
+  }
+  user_iterator user_end() { return user_iterator(); }
+  const_user_iterator user_end() const { return const_user_iterator(); }
+  User *user_back() {
+    assertModuleIsMaterialized();
+    return *materialized_user_begin();
+  }
+  const User *user_back() const {
+    assertModuleIsMaterialized();
+    return *materialized_user_begin();
+  }
+  iterator_range<user_iterator> materialized_users() {
+    return make_range(materialized_user_begin(), user_end());
+  }
+  iterator_range<const_user_iterator> materialized_users() const {
+    return make_range(materialized_user_begin(), user_end());
+  }
+  iterator_range<user_iterator> users() {
+    assertModuleIsMaterialized();
+    return materialized_users();
+  }
+  iterator_range<const_user_iterator> users() const {
+    assertModuleIsMaterialized();
+    return materialized_users();
+  }
+
+  /// \brief Return true if there is exactly one user of this value.
+  ///
+  /// This is specialized because it is a common request and does not require
+  /// traversing the whole use list.
+  bool hasOneUse() const {
+    const_use_iterator I = use_begin(), E = use_end();
+    if (I == E) return false;
+    return ++I == E;
+  }
+
+  /// \brief Return true if this Value has exactly N users.
+  bool hasNUses(unsigned N) const;
+
+  /// \brief Return true if this value has N users or more.
+  ///
+  /// This is logically equivalent to getNumUses() >= N.
+  bool hasNUsesOrMore(unsigned N) const;
+
+  /// \brief Check if this value is used in the specified basic block.
+  bool isUsedInBasicBlock(const BasicBlock *BB) const;
+
+  /// \brief This method computes the number of uses of this Value.
+  ///
+  /// This is a linear time operation.  Use hasOneUse, hasNUses, or
+  /// hasNUsesOrMore to check for specific values.
+  unsigned getNumUses() const;
+
+  /// \brief This method should only be used by the Use class.
+  void addUse(Use &U) { U.addToList(&UseList); }
+
+  /// \brief Concrete subclass of this.
+  ///
+  /// An enumeration for keeping track of the concrete subclass of Value that
+  /// is actually instantiated. Values of this enumeration are kept in the
+  /// Value classes SubclassID field. They are used for concrete type
+  /// identification.
+  enum ValueTy {
+#define HANDLE_VALUE(Name) Name##Val,
+#include "llvm/IR/Value.def"
+
+    // Markers:
+#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
+#include "llvm/IR/Value.def"
+  };
+
+  /// \brief Return an ID for the concrete type of this object.
+  ///
+  /// This is used to implement the classof checks.  This should not be used
+  /// for any other purpose, as the values may change as LLVM evolves.  Also,
+  /// note that for instructions, the Instruction's opcode is added to
+  /// InstructionVal. So this means three things:
+  /// # there is no value with code InstructionVal (no opcode==0).
+  /// # there are more possible values for the value type than in ValueTy enum.
+  /// # the InstructionVal enumerator must be the highest valued enumerator in
+  ///   the ValueTy enum.
+  unsigned getValueID() const {
+    return SubclassID;
+  }
+
+  /// \brief Return the raw optional flags value contained in this value.
+  ///
+  /// This should only be used when testing two Values for equivalence.
+  unsigned getRawSubclassOptionalData() const {
+    return SubclassOptionalData;
+  }
+
+  /// \brief Clear the optional flags contained in this value.
+  void clearSubclassOptionalData() {
+    SubclassOptionalData = 0;
+  }
+
+  /// \brief Check the optional flags for equality.
+  bool hasSameSubclassOptionalData(const Value *V) const {
+    return SubclassOptionalData == V->SubclassOptionalData;
+  }
+
+  /// \brief Return true if there is a value handle associated with this value.
+  bool hasValueHandle() const { return HasValueHandle; }
+
+  /// \brief Return true if there is metadata referencing this value.
+  bool isUsedByMetadata() const { return IsUsedByMD; }
+
+  /// \brief Return true if this value is a swifterror value.
+  ///
+  /// swifterror values can be either a function argument or an alloca with a
+  /// swifterror attribute.
+  bool isSwiftError() const;
+
+  /// \brief Strip off pointer casts, all-zero GEPs, and aliases.
+  ///
+  /// Returns the original uncasted value.  If this is called on a non-pointer
+  /// value, it returns 'this'.
+  const Value *stripPointerCasts() const;
+  Value *stripPointerCasts() {
+    return const_cast<Value *>(
+                         static_cast<const Value *>(this)->stripPointerCasts());
+  }
+
+  /// \brief Strip off pointer casts, all-zero GEPs, aliases and barriers.
+  ///
+  /// Returns the original uncasted value.  If this is called on a non-pointer
+  /// value, it returns 'this'. This function should be used only in
+  /// Alias analysis.
+  const Value *stripPointerCastsAndBarriers() const;
+  Value *stripPointerCastsAndBarriers() {
+    return const_cast<Value *>(
+        static_cast<const Value *>(this)->stripPointerCastsAndBarriers());
+  }
+
+  /// \brief Strip off pointer casts and all-zero GEPs.
+  ///
+  /// Returns the original uncasted value.  If this is called on a non-pointer
+  /// value, it returns 'this'.
+  const Value *stripPointerCastsNoFollowAliases() const;
+  Value *stripPointerCastsNoFollowAliases() {
+    return const_cast<Value *>(
+          static_cast<const Value *>(this)->stripPointerCastsNoFollowAliases());
+  }
+
+  /// \brief Strip off pointer casts and all-constant inbounds GEPs.
+  ///
+  /// Returns the original pointer value.  If this is called on a non-pointer
+  /// value, it returns 'this'.
+  const Value *stripInBoundsConstantOffsets() const;
+  Value *stripInBoundsConstantOffsets() {
+    return const_cast<Value *>(
+              static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
+  }
+
+  /// \brief Accumulate offsets from \a stripInBoundsConstantOffsets().
+  ///
+  /// Stores the resulting constant offset stripped into the APInt provided.
+  /// The provided APInt will be extended or truncated as needed to be the
+  /// correct bitwidth for an offset of this pointer type.
+  ///
+  /// If this is called on a non-pointer value, it returns 'this'.
+  const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
+                                                         APInt &Offset) const;
+  Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
+                                                   APInt &Offset) {
+    return const_cast<Value *>(static_cast<const Value *>(this)
+        ->stripAndAccumulateInBoundsConstantOffsets(DL, Offset));
+  }
+
+  /// \brief Strip off pointer casts and inbounds GEPs.
+  ///
+  /// Returns the original pointer value.  If this is called on a non-pointer
+  /// value, it returns 'this'.
+  const Value *stripInBoundsOffsets() const;
+  Value *stripInBoundsOffsets() {
+    return const_cast<Value *>(
+                      static_cast<const Value *>(this)->stripInBoundsOffsets());
+  }
+
+  /// \brief Returns the number of bytes known to be dereferenceable for the
+  /// pointer value.
+  ///
+  /// If CanBeNull is set by this function the pointer can either be null or be
+  /// dereferenceable up to the returned number of bytes.
+  uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
+                                          bool &CanBeNull) const;
+
+  /// \brief Returns an alignment of the pointer value.
+  ///
+  /// Returns an alignment which is either specified explicitly, e.g. via
+  /// align attribute of a function argument, or guaranteed by DataLayout.
+  unsigned getPointerAlignment(const DataLayout &DL) const;
+
+  /// \brief Translate PHI node to its predecessor from the given basic block.
+  ///
+  /// If this value is a PHI node with CurBB as its parent, return the value in
+  /// the PHI node corresponding to PredBB.  If not, return ourself.  This is
+  /// useful if you want to know the value something has in a predecessor
+  /// block.
+  const Value *DoPHITranslation(const BasicBlock *CurBB,
+                                const BasicBlock *PredBB) const;
+  Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
+    return const_cast<Value *>(
+             static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
+  }
+
+  /// \brief The maximum alignment for instructions.
+  ///
+  /// This is the greatest alignment value supported by load, store, and alloca
+  /// instructions, and global values.
+  static const unsigned MaxAlignmentExponent = 29;
+  static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
+
+  /// \brief Mutate the type of this Value to be of the specified type.
+  ///
+  /// Note that this is an extremely dangerous operation which can create
+  /// completely invalid IR very easily.  It is strongly recommended that you
+  /// recreate IR objects with the right types instead of mutating them in
+  /// place.
+  void mutateType(Type *Ty) {
+    VTy = Ty;
+  }
+
+  /// \brief Sort the use-list.
+  ///
+  /// Sorts the Value's use-list by Cmp using a stable mergesort.  Cmp is
+  /// expected to compare two \a Use references.
+  template <class Compare> void sortUseList(Compare Cmp);
+
+  /// \brief Reverse the use-list.
+  void reverseUseList();
+
+private:
+  /// \brief Merge two lists together.
+  ///
+  /// Merges \c L and \c R using \c Cmp.  To enable stable sorts, always pushes
+  /// "equal" items from L before items from R.
+  ///
+  /// \return the first element in the list.
+  ///
+  /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
+  template <class Compare>
+  static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
+    Use *Merged;
+    Use **Next = &Merged;
+
+    while (true) {
+      if (!L) {
+        *Next = R;
+        break;
+      }
+      if (!R) {
+        *Next = L;
+        break;
+      }
+      if (Cmp(*R, *L)) {
+        *Next = R;
+        Next = &R->Next;
+        R = R->Next;
+      } else {
+        *Next = L;
+        Next = &L->Next;
+        L = L->Next;
+      }
+    }
+
+    return Merged;
+  }
+
+protected:
+  unsigned short getSubclassDataFromValue() const { return SubclassData; }
+  void setValueSubclassData(unsigned short D) { SubclassData = D; }
+};
+
+struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
+
+/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
+/// Those don't work because Value and Instruction's destructors are protected,
+/// aren't virtual, and won't destroy the complete object.
+using unique_value = std::unique_ptr<Value, ValueDeleter>;
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
+  V.print(OS);
+  return OS;
+}
+
+void Use::set(Value *V) {
+  if (Val) removeFromList();
+  Val = V;
+  if (V) V->addUse(*this);
+}
+
+Value *Use::operator=(Value *RHS) {
+  set(RHS);
+  return RHS;
+}
+
+const Use &Use::operator=(const Use &RHS) {
+  set(RHS.Val);
+  return *this;
+}
+
+template <class Compare> void Value::sortUseList(Compare Cmp) {
+  if (!UseList || !UseList->Next)
+    // No need to sort 0 or 1 uses.
+    return;
+
+  // Note: this function completely ignores Prev pointers until the end when
+  // they're fixed en masse.
+
+  // Create a binomial vector of sorted lists, visiting uses one at a time and
+  // merging lists as necessary.
+  const unsigned MaxSlots = 32;
+  Use *Slots[MaxSlots];
+
+  // Collect the first use, turning it into a single-item list.
+  Use *Next = UseList->Next;
+  UseList->Next = nullptr;
+  unsigned NumSlots = 1;
+  Slots[0] = UseList;
+
+  // Collect all but the last use.
+  while (Next->Next) {
+    Use *Current = Next;
+    Next = Current->Next;
+
+    // Turn Current into a single-item list.
+    Current->Next = nullptr;
+
+    // Save Current in the first available slot, merging on collisions.
+    unsigned I;
+    for (I = 0; I < NumSlots; ++I) {
+      if (!Slots[I])
+        break;
+
+      // Merge two lists, doubling the size of Current and emptying slot I.
+      //
+      // Since the uses in Slots[I] originally preceded those in Current, send
+      // Slots[I] in as the left parameter to maintain a stable sort.
+      Current = mergeUseLists(Slots[I], Current, Cmp);
+      Slots[I] = nullptr;
+    }
+    // Check if this is a new slot.
+    if (I == NumSlots) {
+      ++NumSlots;
+      assert(NumSlots <= MaxSlots && "Use list bigger than 2^32");
+    }
+
+    // Found an open slot.
+    Slots[I] = Current;
+  }
+
+  // Merge all the lists together.
+  assert(Next && "Expected one more Use");
+  assert(!Next->Next && "Expected only one Use");
+  UseList = Next;
+  for (unsigned I = 0; I < NumSlots; ++I)
+    if (Slots[I])
+      // Since the uses in Slots[I] originally preceded those in UseList, send
+      // Slots[I] in as the left parameter to maintain a stable sort.
+      UseList = mergeUseLists(Slots[I], UseList, Cmp);
+
+  // Fix the Prev pointers.
+  for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
+    I->setPrev(Prev);
+    Prev = &I->Next;
+  }
+}
+
+// isa - Provide some specializations of isa so that we don't have to include
+// the subtype header files to test to see if the value is a subclass...
+//
+template <> struct isa_impl<Constant, Value> {
+  static inline bool doit(const Value &Val) {
+    static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
+    return Val.getValueID() <= Value::ConstantLastVal;
+  }
+};
+
+template <> struct isa_impl<ConstantData, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() >= Value::ConstantDataFirstVal &&
+           Val.getValueID() <= Value::ConstantDataLastVal;
+  }
+};
+
+template <> struct isa_impl<ConstantAggregate, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
+           Val.getValueID() <= Value::ConstantAggregateLastVal;
+  }
+};
+
+template <> struct isa_impl<Argument, Value> {
+  static inline bool doit (const Value &Val) {
+    return Val.getValueID() == Value::ArgumentVal;
+  }
+};
+
+template <> struct isa_impl<InlineAsm, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() == Value::InlineAsmVal;
+  }
+};
+
+template <> struct isa_impl<Instruction, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() >= Value::InstructionVal;
+  }
+};
+
+template <> struct isa_impl<BasicBlock, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() == Value::BasicBlockVal;
+  }
+};
+
+template <> struct isa_impl<Function, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() == Value::FunctionVal;
+  }
+};
+
+template <> struct isa_impl<GlobalVariable, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() == Value::GlobalVariableVal;
+  }
+};
+
+template <> struct isa_impl<GlobalAlias, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() == Value::GlobalAliasVal;
+  }
+};
+
+template <> struct isa_impl<GlobalIFunc, Value> {
+  static inline bool doit(const Value &Val) {
+    return Val.getValueID() == Value::GlobalIFuncVal;
+  }
+};
+
+template <> struct isa_impl<GlobalIndirectSymbol, Value> {
+  static inline bool doit(const Value &Val) {
+    return isa<GlobalAlias>(Val) || isa<GlobalIFunc>(Val);
+  }
+};
+
+template <> struct isa_impl<GlobalValue, Value> {
+  static inline bool doit(const Value &Val) {
+    return isa<GlobalObject>(Val) || isa<GlobalIndirectSymbol>(Val);
+  }
+};
+
+template <> struct isa_impl<GlobalObject, Value> {
+  static inline bool doit(const Value &Val) {
+    return isa<GlobalVariable>(Val) || isa<Function>(Val);
+  }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)
+
+// Specialized opaque value conversions.
+inline Value **unwrap(LLVMValueRef *Vals) {
+  return reinterpret_cast<Value**>(Vals);
+}
+
+template<typename T>
+inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
+#ifndef NDEBUG
+  for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
+    unwrap<T>(*I); // For side effect of calling assert on invalid usage.
+#endif
+  (void)Length;
+  return reinterpret_cast<T**>(Vals);
+}
+
+inline LLVMValueRef *wrap(const Value **Vals) {
+  return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_VALUE_H
diff --git a/linux-x64/clang/include/llvm/IR/ValueHandle.h b/linux-x64/clang/include/llvm/IR/ValueHandle.h
new file mode 100644
index 0000000..b45cc7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ValueHandle.h
@@ -0,0 +1,562 @@
+//===- ValueHandle.h - Value Smart Pointer classes --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ValueHandle class and its sub-classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUEHANDLE_H
+#define LLVM_IR_VALUEHANDLE_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+
+namespace llvm {
+
+/// \brief This is the common base class of value handles.
+///
+/// ValueHandle's are smart pointers to Value's that have special behavior when
+/// the value is deleted or ReplaceAllUsesWith'd.  See the specific handles
+/// below for details.
+class ValueHandleBase {
+  friend class Value;
+
+protected:
+  /// \brief This indicates what sub class the handle actually is.
+  ///
+  /// This is to avoid having a vtable for the light-weight handle pointers. The
+  /// fully general Callback version does have a vtable.
+  enum HandleBaseKind { Assert, Callback, Weak, WeakTracking };
+
+  ValueHandleBase(const ValueHandleBase &RHS)
+      : ValueHandleBase(RHS.PrevPair.getInt(), RHS) {}
+
+  ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
+      : PrevPair(nullptr, Kind), Val(RHS.getValPtr()) {
+    if (isValid(getValPtr()))
+      AddToExistingUseList(RHS.getPrevPtr());
+  }
+
+private:
+  PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair;
+  ValueHandleBase *Next = nullptr;
+  Value *Val = nullptr;
+
+  void setValPtr(Value *V) { Val = V; }
+
+public:
+  explicit ValueHandleBase(HandleBaseKind Kind)
+      : PrevPair(nullptr, Kind) {}
+  ValueHandleBase(HandleBaseKind Kind, Value *V)
+      : PrevPair(nullptr, Kind), Val(V) {
+    if (isValid(getValPtr()))
+      AddToUseList();
+  }
+
+  ~ValueHandleBase() {
+    if (isValid(getValPtr()))
+      RemoveFromUseList();
+  }
+
+  Value *operator=(Value *RHS) {
+    if (getValPtr() == RHS)
+      return RHS;
+    if (isValid(getValPtr()))
+      RemoveFromUseList();
+    setValPtr(RHS);
+    if (isValid(getValPtr()))
+      AddToUseList();
+    return RHS;
+  }
+
+  Value *operator=(const ValueHandleBase &RHS) {
+    if (getValPtr() == RHS.getValPtr())
+      return RHS.getValPtr();
+    if (isValid(getValPtr()))
+      RemoveFromUseList();
+    setValPtr(RHS.getValPtr());
+    if (isValid(getValPtr()))
+      AddToExistingUseList(RHS.getPrevPtr());
+    return getValPtr();
+  }
+
+  Value *operator->() const { return getValPtr(); }
+  Value &operator*() const { return *getValPtr(); }
+
+protected:
+  Value *getValPtr() const { return Val; }
+
+  static bool isValid(Value *V) {
+    return V &&
+           V != DenseMapInfo<Value *>::getEmptyKey() &&
+           V != DenseMapInfo<Value *>::getTombstoneKey();
+  }
+
+  /// \brief Remove this ValueHandle from its current use list.
+  void RemoveFromUseList();
+
+  /// \brief Clear the underlying pointer without clearing the use list.
+  ///
+  /// This should only be used if a derived class has manually removed the
+  /// handle from the use list.
+  void clearValPtr() { setValPtr(nullptr); }
+
+public:
+  // Callbacks made from Value.
+  static void ValueIsDeleted(Value *V);
+  static void ValueIsRAUWd(Value *Old, Value *New);
+
+private:
+  // Internal implementation details.
+  ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); }
+  HandleBaseKind getKind() const { return PrevPair.getInt(); }
+  void setPrevPtr(ValueHandleBase **Ptr) { PrevPair.setPointer(Ptr); }
+
+  /// \brief Add this ValueHandle to the use list for V.
+  ///
+  /// List is the address of either the head of the list or a Next node within
+  /// the existing use list.
+  void AddToExistingUseList(ValueHandleBase **List);
+
+  /// \brief Add this ValueHandle to the use list after Node.
+  void AddToExistingUseListAfter(ValueHandleBase *Node);
+
+  /// \brief Add this ValueHandle to the use list for V.
+  void AddToUseList();
+};
+
+/// \brief A nullable Value handle that is nullable.
+///
+/// This is a value handle that points to a value, and nulls itself
+/// out if that value is deleted.
+class WeakVH : public ValueHandleBase {
+public:
+  WeakVH() : ValueHandleBase(Weak) {}
+  WeakVH(Value *P) : ValueHandleBase(Weak, P) {}
+  WeakVH(const WeakVH &RHS)
+      : ValueHandleBase(Weak, RHS) {}
+
+  WeakVH &operator=(const WeakVH &RHS) = default;
+
+  Value *operator=(Value *RHS) {
+    return ValueHandleBase::operator=(RHS);
+  }
+  Value *operator=(const ValueHandleBase &RHS) {
+    return ValueHandleBase::operator=(RHS);
+  }
+
+  operator Value*() const {
+    return getValPtr();
+  }
+};
+
+// Specialize simplify_type to allow WeakVH to participate in
+// dyn_cast, isa, etc.
+template <> struct simplify_type<WeakVH> {
+  using SimpleType = Value *;
+
+  static SimpleType getSimplifiedValue(WeakVH &WVH) { return WVH; }
+};
+template <> struct simplify_type<const WeakVH> {
+  using SimpleType = Value *;
+
+  static SimpleType getSimplifiedValue(const WeakVH &WVH) { return WVH; }
+};
+
+/// \brief Value handle that is nullable, but tries to track the Value.
+///
+/// This is a value handle that tries hard to point to a Value, even across
+/// RAUW operations, but will null itself out if the value is destroyed.  this
+/// is useful for advisory sorts of information, but should not be used as the
+/// key of a map (since the map would have to rearrange itself when the pointer
+/// changes).
+class WeakTrackingVH : public ValueHandleBase {
+public:
+  WeakTrackingVH() : ValueHandleBase(WeakTracking) {}
+  WeakTrackingVH(Value *P) : ValueHandleBase(WeakTracking, P) {}
+  WeakTrackingVH(const WeakTrackingVH &RHS)
+      : ValueHandleBase(WeakTracking, RHS) {}
+
+  WeakTrackingVH &operator=(const WeakTrackingVH &RHS) = default;
+
+  Value *operator=(Value *RHS) {
+    return ValueHandleBase::operator=(RHS);
+  }
+  Value *operator=(const ValueHandleBase &RHS) {
+    return ValueHandleBase::operator=(RHS);
+  }
+
+  operator Value*() const {
+    return getValPtr();
+  }
+
+  bool pointsToAliveValue() const {
+    return ValueHandleBase::isValid(getValPtr());
+  }
+};
+
+// Specialize simplify_type to allow WeakTrackingVH to participate in
+// dyn_cast, isa, etc.
+template <> struct simplify_type<WeakTrackingVH> {
+  using SimpleType = Value *;
+
+  static SimpleType getSimplifiedValue(WeakTrackingVH &WVH) { return WVH; }
+};
+template <> struct simplify_type<const WeakTrackingVH> {
+  using SimpleType = Value *;
+
+  static SimpleType getSimplifiedValue(const WeakTrackingVH &WVH) {
+    return WVH;
+  }
+};
+
+/// \brief Value handle that asserts if the Value is deleted.
+///
+/// This is a Value Handle that points to a value and asserts out if the value
+/// is destroyed while the handle is still live.  This is very useful for
+/// catching dangling pointer bugs and other things which can be non-obvious.
+/// One particularly useful place to use this is as the Key of a map.  Dangling
+/// pointer bugs often lead to really subtle bugs that only occur if another
+/// object happens to get allocated to the same address as the old one.  Using
+/// an AssertingVH ensures that an assert is triggered as soon as the bad
+/// delete occurs.
+///
+/// Note that an AssertingVH handle does *not* follow values across RAUW
+/// operations.  This means that RAUW's need to explicitly update the
+/// AssertingVH's as it moves.  This is required because in non-assert mode this
+/// class turns into a trivial wrapper around a pointer.
+template <typename ValueTy>
+class AssertingVH
+#ifndef NDEBUG
+  : public ValueHandleBase
+#endif
+  {
+  friend struct DenseMapInfo<AssertingVH<ValueTy>>;
+
+#ifndef NDEBUG
+  Value *getRawValPtr() const { return ValueHandleBase::getValPtr(); }
+  void setRawValPtr(Value *P) { ValueHandleBase::operator=(P); }
+#else
+  Value *ThePtr;
+  Value *getRawValPtr() const { return ThePtr; }
+  void setRawValPtr(Value *P) { ThePtr = P; }
+#endif
+  // Convert a ValueTy*, which may be const, to the raw Value*.
+  static Value *GetAsValue(Value *V) { return V; }
+  static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
+
+  ValueTy *getValPtr() const { return static_cast<ValueTy *>(getRawValPtr()); }
+  void setValPtr(ValueTy *P) { setRawValPtr(GetAsValue(P)); }
+
+public:
+#ifndef NDEBUG
+  AssertingVH() : ValueHandleBase(Assert) {}
+  AssertingVH(ValueTy *P) : ValueHandleBase(Assert, GetAsValue(P)) {}
+  AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {}
+#else
+  AssertingVH() : ThePtr(nullptr) {}
+  AssertingVH(ValueTy *P) : ThePtr(GetAsValue(P)) {}
+#endif
+
+  operator ValueTy*() const {
+    return getValPtr();
+  }
+
+  ValueTy *operator=(ValueTy *RHS) {
+    setValPtr(RHS);
+    return getValPtr();
+  }
+  ValueTy *operator=(const AssertingVH<ValueTy> &RHS) {
+    setValPtr(RHS.getValPtr());
+    return getValPtr();
+  }
+
+  ValueTy *operator->() const { return getValPtr(); }
+  ValueTy &operator*() const { return *getValPtr(); }
+};
+
+// Specialize DenseMapInfo to allow AssertingVH to participate in DenseMap.
+template<typename T>
+struct DenseMapInfo<AssertingVH<T>> {
+  static inline AssertingVH<T> getEmptyKey() {
+    AssertingVH<T> Res;
+    Res.setRawValPtr(DenseMapInfo<Value *>::getEmptyKey());
+    return Res;
+  }
+
+  static inline AssertingVH<T> getTombstoneKey() {
+    AssertingVH<T> Res;
+    Res.setRawValPtr(DenseMapInfo<Value *>::getTombstoneKey());
+    return Res;
+  }
+
+  static unsigned getHashValue(const AssertingVH<T> &Val) {
+    return DenseMapInfo<Value *>::getHashValue(Val.getRawValPtr());
+  }
+
+  static bool isEqual(const AssertingVH<T> &LHS, const AssertingVH<T> &RHS) {
+    return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(),
+                                          RHS.getRawValPtr());
+  }
+};
+
+template <typename T>
+struct isPodLike<AssertingVH<T>> {
+#ifdef NDEBUG
+  static const bool value = true;
+#else
+  static const bool value = false;
+#endif
+};
+
+/// \brief Value handle that tracks a Value across RAUW.
+///
+/// TrackingVH is designed for situations where a client needs to hold a handle
+/// to a Value (or subclass) across some operations which may move that value,
+/// but should never destroy it or replace it with some unacceptable type.
+///
+/// It is an error to attempt to replace a value with one of a type which is
+/// incompatible with any of its outstanding TrackingVHs.
+///
+/// It is an error to read from a TrackingVH that does not point to a valid
+/// value.  A TrackingVH is said to not point to a valid value if either it
+/// hasn't yet been assigned a value yet or because the value it was tracking
+/// has since been deleted.
+///
+/// Assigning a value to a TrackingVH is always allowed, even if said TrackingVH
+/// no longer points to a valid value.
+template <typename ValueTy> class TrackingVH {
+  WeakTrackingVH InnerHandle;
+
+public:
+  ValueTy *getValPtr() const {
+    assert(InnerHandle.pointsToAliveValue() &&
+           "TrackingVH must be non-null and valid on dereference!");
+
+    // Check that the value is a member of the correct subclass. We would like
+    // to check this property on assignment for better debugging, but we don't
+    // want to require a virtual interface on this VH. Instead we allow RAUW to
+    // replace this value with a value of an invalid type, and check it here.
+    assert(isa<ValueTy>(InnerHandle) &&
+           "Tracked Value was replaced by one with an invalid type!");
+    return cast<ValueTy>(InnerHandle);
+  }
+
+  void setValPtr(ValueTy *P) {
+    // Assigning to non-valid TrackingVH's are fine so we just unconditionally
+    // assign here.
+    InnerHandle = GetAsValue(P);
+  }
+
+  // Convert a ValueTy*, which may be const, to the type the base
+  // class expects.
+  static Value *GetAsValue(Value *V) { return V; }
+  static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
+
+public:
+  TrackingVH() = default;
+  TrackingVH(ValueTy *P) { setValPtr(P); }
+
+  operator ValueTy*() const {
+    return getValPtr();
+  }
+
+  ValueTy *operator=(ValueTy *RHS) {
+    setValPtr(RHS);
+    return getValPtr();
+  }
+
+  ValueTy *operator->() const { return getValPtr(); }
+  ValueTy &operator*() const { return *getValPtr(); }
+};
+
+/// \brief Value handle with callbacks on RAUW and destruction.
+///
+/// This is a value handle that allows subclasses to define callbacks that run
+/// when the underlying Value has RAUW called on it or is destroyed.  This
+/// class can be used as the key of a map, as long as the user takes it out of
+/// the map before calling setValPtr() (since the map has to rearrange itself
+/// when the pointer changes).  Unlike ValueHandleBase, this class has a vtable.
+class CallbackVH : public ValueHandleBase {
+  virtual void anchor();
+protected:
+  ~CallbackVH() = default;
+  CallbackVH(const CallbackVH &) = default;
+  CallbackVH &operator=(const CallbackVH &) = default;
+
+  void setValPtr(Value *P) {
+    ValueHandleBase::operator=(P);
+  }
+
+public:
+  CallbackVH() : ValueHandleBase(Callback) {}
+  CallbackVH(Value *P) : ValueHandleBase(Callback, P) {}
+
+  operator Value*() const {
+    return getValPtr();
+  }
+
+  /// \brief Callback for Value destruction.
+  ///
+  /// Called when this->getValPtr() is destroyed, inside ~Value(), so you
+  /// may call any non-virtual Value method on getValPtr(), but no subclass
+  /// methods.  If WeakTrackingVH were implemented as a CallbackVH, it would use
+  /// this
+  /// method to call setValPtr(NULL).  AssertingVH would use this method to
+  /// cause an assertion failure.
+  ///
+  /// All implementations must remove the reference from this object to the
+  /// Value that's being destroyed.
+  virtual void deleted() { setValPtr(nullptr); }
+
+  /// \brief Callback for Value RAUW.
+  ///
+  /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
+  /// _before_ any of the uses have actually been replaced.  If WeakTrackingVH
+  /// were
+  /// implemented as a CallbackVH, it would use this method to call
+  /// setValPtr(new_value).  AssertingVH would do nothing in this method.
+  virtual void allUsesReplacedWith(Value *) {}
+};
+
+/// Value handle that poisons itself if the Value is deleted.
+///
+/// This is a Value Handle that points to a value and poisons itself if the
+/// value is destroyed while the handle is still live.  This is very useful for
+/// catching dangling pointer bugs where an \c AssertingVH cannot be used
+/// because the dangling handle needs to outlive the value without ever being
+/// used.
+///
+/// One particularly useful place to use this is as the Key of a map. Dangling
+/// pointer bugs often lead to really subtle bugs that only occur if another
+/// object happens to get allocated to the same address as the old one. Using
+/// a PoisoningVH ensures that an assert is triggered if looking up a new value
+/// in the map finds a handle from the old value.
+///
+/// Note that a PoisoningVH handle does *not* follow values across RAUW
+/// operations. This means that RAUW's need to explicitly update the
+/// PoisoningVH's as it moves. This is required because in non-assert mode this
+/// class turns into a trivial wrapper around a pointer.
+template <typename ValueTy>
+class PoisoningVH
+#ifndef NDEBUG
+    final : public CallbackVH
+#endif
+{
+  friend struct DenseMapInfo<PoisoningVH<ValueTy>>;
+
+  // Convert a ValueTy*, which may be const, to the raw Value*.
+  static Value *GetAsValue(Value *V) { return V; }
+  static Value *GetAsValue(const Value *V) { return const_cast<Value *>(V); }
+
+#ifndef NDEBUG
+  /// A flag tracking whether this value has been poisoned.
+  ///
+  /// On delete and RAUW, we leave the value pointer alone so that as a raw
+  /// pointer it produces the same value (and we fit into the same key of
+  /// a hash table, etc), but we poison the handle so that any top-level usage
+  /// will fail.
+  bool Poisoned = false;
+
+  Value *getRawValPtr() const { return ValueHandleBase::getValPtr(); }
+  void setRawValPtr(Value *P) { ValueHandleBase::operator=(P); }
+
+  /// Handle deletion by poisoning the handle.
+  void deleted() override {
+    assert(!Poisoned && "Tried to delete an already poisoned handle!");
+    Poisoned = true;
+    RemoveFromUseList();
+  }
+
+  /// Handle RAUW by poisoning the handle.
+  void allUsesReplacedWith(Value *) override {
+    assert(!Poisoned && "Tried to RAUW an already poisoned handle!");
+    Poisoned = true;
+    RemoveFromUseList();
+  }
+#else // NDEBUG
+  Value *ThePtr = nullptr;
+
+  Value *getRawValPtr() const { return ThePtr; }
+  void setRawValPtr(Value *P) { ThePtr = P; }
+#endif
+
+  ValueTy *getValPtr() const {
+    assert(!Poisoned && "Accessed a poisoned value handle!");
+    return static_cast<ValueTy *>(getRawValPtr());
+  }
+  void setValPtr(ValueTy *P) { setRawValPtr(GetAsValue(P)); }
+
+public:
+  PoisoningVH() = default;
+#ifndef NDEBUG
+  PoisoningVH(ValueTy *P) : CallbackVH(GetAsValue(P)) {}
+  PoisoningVH(const PoisoningVH &RHS)
+      : CallbackVH(RHS), Poisoned(RHS.Poisoned) {}
+
+  ~PoisoningVH() {
+    if (Poisoned)
+      clearValPtr();
+  }
+
+  PoisoningVH &operator=(const PoisoningVH &RHS) {
+    if (Poisoned)
+      clearValPtr();
+    CallbackVH::operator=(RHS);
+    Poisoned = RHS.Poisoned;
+    return *this;
+  }
+#else
+  PoisoningVH(ValueTy *P) : ThePtr(GetAsValue(P)) {}
+#endif
+
+  operator ValueTy *() const { return getValPtr(); }
+
+  ValueTy *operator->() const { return getValPtr(); }
+  ValueTy &operator*() const { return *getValPtr(); }
+};
+
+// Specialize DenseMapInfo to allow PoisoningVH to participate in DenseMap.
+template <typename T> struct DenseMapInfo<PoisoningVH<T>> {
+  static inline PoisoningVH<T> getEmptyKey() {
+    PoisoningVH<T> Res;
+    Res.setRawValPtr(DenseMapInfo<Value *>::getEmptyKey());
+    return Res;
+  }
+
+  static inline PoisoningVH<T> getTombstoneKey() {
+    PoisoningVH<T> Res;
+    Res.setRawValPtr(DenseMapInfo<Value *>::getTombstoneKey());
+    return Res;
+  }
+
+  static unsigned getHashValue(const PoisoningVH<T> &Val) {
+    return DenseMapInfo<Value *>::getHashValue(Val.getRawValPtr());
+  }
+
+  static bool isEqual(const PoisoningVH<T> &LHS, const PoisoningVH<T> &RHS) {
+    return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(),
+                                          RHS.getRawValPtr());
+  }
+};
+
+template <typename T> struct isPodLike<PoisoningVH<T>> {
+#ifdef NDEBUG
+  static const bool value = true;
+#else
+  static const bool value = false;
+#endif
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_VALUEHANDLE_H
diff --git a/linux-x64/clang/include/llvm/IR/ValueMap.h b/linux-x64/clang/include/llvm/IR/ValueMap.h
new file mode 100644
index 0000000..11d5823
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ValueMap.h
@@ -0,0 +1,434 @@
+//===- ValueMap.h - Safe map from Values to data ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ValueMap class.  ValueMap maps Value* or any subclass
+// to an arbitrary other type.  It provides the DenseMap interface but updates
+// itself to remain safe when keys are RAUWed or deleted.  By default, when a
+// key is RAUWed from V1 to V2, the old mapping V1->target is removed, and a new
+// mapping V2->target is added.  If V2 already existed, its old target is
+// overwritten.  When a key is deleted, its mapping is removed.
+//
+// You can override a ValueMap's Config parameter to control exactly what
+// happens on RAUW and destruction and to get called back on each event.  It's
+// legal to call back into the ValueMap from a Config's callbacks.  Config
+// parameters should inherit from ValueMapConfig<KeyT> to get default
+// implementations of all the methods ValueMap uses.  See ValueMapConfig for
+// documentation of the functions you can override.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUEMAP_H
+#define LLVM_IR_VALUEMAP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/UniqueLock.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+template<typename KeyT, typename ValueT, typename Config>
+class ValueMapCallbackVH;
+template<typename DenseMapT, typename KeyT>
+class ValueMapIterator;
+template<typename DenseMapT, typename KeyT>
+class ValueMapConstIterator;
+
+/// This class defines the default behavior for configurable aspects of
+/// ValueMap<>.  User Configs should inherit from this class to be as compatible
+/// as possible with future versions of ValueMap.
+template<typename KeyT, typename MutexT = sys::Mutex>
+struct ValueMapConfig {
+  using mutex_type = MutexT;
+
+  /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's
+  /// false, the ValueMap will leave the original mapping in place.
+  enum { FollowRAUW = true };
+
+  // All methods will be called with a first argument of type ExtraData.  The
+  // default implementations in this class take a templated first argument so
+  // that users' subclasses can use any type they want without having to
+  // override all the defaults.
+  struct ExtraData {};
+
+  template<typename ExtraDataT>
+  static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
+  template<typename ExtraDataT>
+  static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
+
+  /// Returns a mutex that should be acquired around any changes to the map.
+  /// This is only acquired from the CallbackVH (and held around calls to onRAUW
+  /// and onDelete) and not inside other ValueMap methods.  NULL means that no
+  /// mutex is necessary.
+  template<typename ExtraDataT>
+  static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
+};
+
+/// See the file comment.
+template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT>>
+class ValueMap {
+  friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
+
+  using ValueMapCVH = ValueMapCallbackVH<KeyT, ValueT, Config>;
+  using MapT = DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH>>;
+  using MDMapT = DenseMap<const Metadata *, TrackingMDRef>;
+  using ExtraData = typename Config::ExtraData;
+
+  MapT Map;
+  Optional<MDMapT> MDMap;
+  ExtraData Data;
+  bool MayMapMetadata = true;
+
+public:
+  using key_type = KeyT;
+  using mapped_type = ValueT;
+  using value_type = std::pair<KeyT, ValueT>;
+  using size_type = unsigned;
+
+  explicit ValueMap(unsigned NumInitBuckets = 64)
+      : Map(NumInitBuckets), Data() {}
+  explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64)
+      : Map(NumInitBuckets), Data(Data) {}
+  ValueMap(const ValueMap &) = delete;
+  ValueMap &operator=(const ValueMap &) = delete;
+
+  bool hasMD() const { return bool(MDMap); }
+  MDMapT &MD() {
+    if (!MDMap)
+      MDMap.emplace();
+    return *MDMap;
+  }
+  Optional<MDMapT> &getMDMap() { return MDMap; }
+
+  bool mayMapMetadata() const { return MayMapMetadata; }
+  void enableMapMetadata() { MayMapMetadata = true; }
+  void disableMapMetadata() { MayMapMetadata = false; }
+
+  /// Get the mapped metadata, if it's in the map.
+  Optional<Metadata *> getMappedMD(const Metadata *MD) const {
+    if (!MDMap)
+      return None;
+    auto Where = MDMap->find(MD);
+    if (Where == MDMap->end())
+      return None;
+    return Where->second.get();
+  }
+
+  using iterator = ValueMapIterator<MapT, KeyT>;
+  using const_iterator = ValueMapConstIterator<MapT, KeyT>;
+
+  inline iterator begin() { return iterator(Map.begin()); }
+  inline iterator end() { return iterator(Map.end()); }
+  inline const_iterator begin() const { return const_iterator(Map.begin()); }
+  inline const_iterator end() const { return const_iterator(Map.end()); }
+
+  bool empty() const { return Map.empty(); }
+  size_type size() const { return Map.size(); }
+
+  /// Grow the map so that it has at least Size buckets. Does not shrink
+  void resize(size_t Size) { Map.resize(Size); }
+
+  void clear() {
+    Map.clear();
+    MDMap.reset();
+  }
+
+  /// Return 1 if the specified key is in the map, 0 otherwise.
+  size_type count(const KeyT &Val) const {
+    return Map.find_as(Val) == Map.end() ? 0 : 1;
+  }
+
+  iterator find(const KeyT &Val) {
+    return iterator(Map.find_as(Val));
+  }
+  const_iterator find(const KeyT &Val) const {
+    return const_iterator(Map.find_as(Val));
+  }
+
+  /// lookup - Return the entry for the specified key, or a default
+  /// constructed value if no such entry exists.
+  ValueT lookup(const KeyT &Val) const {
+    typename MapT::const_iterator I = Map.find_as(Val);
+    return I != Map.end() ? I->second : ValueT();
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+    auto MapResult = Map.insert(std::make_pair(Wrap(KV.first), KV.second));
+    return std::make_pair(iterator(MapResult.first), MapResult.second);
+  }
+
+  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+    auto MapResult =
+        Map.insert(std::make_pair(Wrap(KV.first), std::move(KV.second)));
+    return std::make_pair(iterator(MapResult.first), MapResult.second);
+  }
+
+  /// insert - Range insertion of pairs.
+  template<typename InputIt>
+  void insert(InputIt I, InputIt E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  bool erase(const KeyT &Val) {
+    typename MapT::iterator I = Map.find_as(Val);
+    if (I == Map.end())
+      return false;
+
+    Map.erase(I);
+    return true;
+  }
+  void erase(iterator I) {
+    return Map.erase(I.base());
+  }
+
+  value_type& FindAndConstruct(const KeyT &Key) {
+    return Map.FindAndConstruct(Wrap(Key));
+  }
+
+  ValueT &operator[](const KeyT &Key) {
+    return Map[Wrap(Key)];
+  }
+
+  /// isPointerIntoBucketsArray - Return true if the specified pointer points
+  /// somewhere into the ValueMap's array of buckets (i.e. either to a key or
+  /// value in the ValueMap).
+  bool isPointerIntoBucketsArray(const void *Ptr) const {
+    return Map.isPointerIntoBucketsArray(Ptr);
+  }
+
+  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+  /// array.  In conjunction with the previous method, this can be used to
+  /// determine whether an insertion caused the ValueMap to reallocate.
+  const void *getPointerIntoBucketsArray() const {
+    return Map.getPointerIntoBucketsArray();
+  }
+
+private:
+  // Takes a key being looked up in the map and wraps it into a
+  // ValueMapCallbackVH, the actual key type of the map.  We use a helper
+  // function because ValueMapCVH is constructed with a second parameter.
+  ValueMapCVH Wrap(KeyT key) const {
+    // The only way the resulting CallbackVH could try to modify *this (making
+    // the const_cast incorrect) is if it gets inserted into the map.  But then
+    // this function must have been called from a non-const method, making the
+    // const_cast ok.
+    return ValueMapCVH(key, const_cast<ValueMap*>(this));
+  }
+};
+
+// This CallbackVH updates its ValueMap when the contained Value changes,
+// according to the user's preferences expressed through the Config object.
+template <typename KeyT, typename ValueT, typename Config>
+class ValueMapCallbackVH final : public CallbackVH {
+  friend class ValueMap<KeyT, ValueT, Config>;
+  friend struct DenseMapInfo<ValueMapCallbackVH>;
+
+  using ValueMapT = ValueMap<KeyT, ValueT, Config>;
+  using KeySansPointerT = typename std::remove_pointer<KeyT>::type;
+
+  ValueMapT *Map;
+
+  ValueMapCallbackVH(KeyT Key, ValueMapT *Map)
+      : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))),
+        Map(Map) {}
+
+  // Private constructor used to create empty/tombstone DenseMap keys.
+  ValueMapCallbackVH(Value *V) : CallbackVH(V), Map(nullptr) {}
+
+public:
+  KeyT Unwrap() const { return cast_or_null<KeySansPointerT>(getValPtr()); }
+
+  void deleted() override {
+    // Make a copy that won't get changed even when *this is destroyed.
+    ValueMapCallbackVH Copy(*this);
+    typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
+    unique_lock<typename Config::mutex_type> Guard;
+    if (M)
+      Guard = unique_lock<typename Config::mutex_type>(*M);
+    Config::onDelete(Copy.Map->Data, Copy.Unwrap());  // May destroy *this.
+    Copy.Map->Map.erase(Copy);  // Definitely destroys *this.
+  }
+
+  void allUsesReplacedWith(Value *new_key) override {
+    assert(isa<KeySansPointerT>(new_key) &&
+           "Invalid RAUW on key of ValueMap<>");
+    // Make a copy that won't get changed even when *this is destroyed.
+    ValueMapCallbackVH Copy(*this);
+    typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
+    unique_lock<typename Config::mutex_type> Guard;
+    if (M)
+      Guard = unique_lock<typename Config::mutex_type>(*M);
+
+    KeyT typed_new_key = cast<KeySansPointerT>(new_key);
+    // Can destroy *this:
+    Config::onRAUW(Copy.Map->Data, Copy.Unwrap(), typed_new_key);
+    if (Config::FollowRAUW) {
+      typename ValueMapT::MapT::iterator I = Copy.Map->Map.find(Copy);
+      // I could == Copy.Map->Map.end() if the onRAUW callback already
+      // removed the old mapping.
+      if (I != Copy.Map->Map.end()) {
+        ValueT Target(std::move(I->second));
+        Copy.Map->Map.erase(I);  // Definitely destroys *this.
+        Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target)));
+      }
+    }
+  }
+};
+
+template<typename KeyT, typename ValueT, typename Config>
+struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
+  using VH = ValueMapCallbackVH<KeyT, ValueT, Config>;
+
+  static inline VH getEmptyKey() {
+    return VH(DenseMapInfo<Value *>::getEmptyKey());
+  }
+
+  static inline VH getTombstoneKey() {
+    return VH(DenseMapInfo<Value *>::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const VH &Val) {
+    return DenseMapInfo<KeyT>::getHashValue(Val.Unwrap());
+  }
+
+  static unsigned getHashValue(const KeyT &Val) {
+    return DenseMapInfo<KeyT>::getHashValue(Val);
+  }
+
+  static bool isEqual(const VH &LHS, const VH &RHS) {
+    return LHS == RHS;
+  }
+
+  static bool isEqual(const KeyT &LHS, const VH &RHS) {
+    return LHS == RHS.getValPtr();
+  }
+};
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapIterator :
+    public std::iterator<std::forward_iterator_tag,
+                         std::pair<KeyT, typename DenseMapT::mapped_type>,
+                         ptrdiff_t> {
+  using BaseT = typename DenseMapT::iterator;
+  using ValueT = typename DenseMapT::mapped_type;
+
+  BaseT I;
+
+public:
+  ValueMapIterator() : I() {}
+  ValueMapIterator(BaseT I) : I(I) {}
+
+  BaseT base() const { return I; }
+
+  struct ValueTypeProxy {
+    const KeyT first;
+    ValueT& second;
+
+    ValueTypeProxy *operator->() { return this; }
+
+    operator std::pair<KeyT, ValueT>() const {
+      return std::make_pair(first, second);
+    }
+  };
+
+  ValueTypeProxy operator*() const {
+    ValueTypeProxy Result = {I->first.Unwrap(), I->second};
+    return Result;
+  }
+
+  ValueTypeProxy operator->() const {
+    return operator*();
+  }
+
+  bool operator==(const ValueMapIterator &RHS) const {
+    return I == RHS.I;
+  }
+  bool operator!=(const ValueMapIterator &RHS) const {
+    return I != RHS.I;
+  }
+
+  inline ValueMapIterator& operator++() {  // Preincrement
+    ++I;
+    return *this;
+  }
+  ValueMapIterator operator++(int) {  // Postincrement
+    ValueMapIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapConstIterator :
+    public std::iterator<std::forward_iterator_tag,
+                         std::pair<KeyT, typename DenseMapT::mapped_type>,
+                         ptrdiff_t> {
+  using BaseT = typename DenseMapT::const_iterator;
+  using ValueT = typename DenseMapT::mapped_type;
+
+  BaseT I;
+
+public:
+  ValueMapConstIterator() : I() {}
+  ValueMapConstIterator(BaseT I) : I(I) {}
+  ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other)
+    : I(Other.base()) {}
+
+  BaseT base() const { return I; }
+
+  struct ValueTypeProxy {
+    const KeyT first;
+    const ValueT& second;
+    ValueTypeProxy *operator->() { return this; }
+    operator std::pair<KeyT, ValueT>() const {
+      return std::make_pair(first, second);
+    }
+  };
+
+  ValueTypeProxy operator*() const {
+    ValueTypeProxy Result = {I->first.Unwrap(), I->second};
+    return Result;
+  }
+
+  ValueTypeProxy operator->() const {
+    return operator*();
+  }
+
+  bool operator==(const ValueMapConstIterator &RHS) const {
+    return I == RHS.I;
+  }
+  bool operator!=(const ValueMapConstIterator &RHS) const {
+    return I != RHS.I;
+  }
+
+  inline ValueMapConstIterator& operator++() {  // Preincrement
+    ++I;
+    return *this;
+  }
+  ValueMapConstIterator operator++(int) {  // Postincrement
+    ValueMapConstIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_VALUEMAP_H
diff --git a/linux-x64/clang/include/llvm/IR/ValueSymbolTable.h b/linux-x64/clang/include/llvm/IR/ValueSymbolTable.h
new file mode 100644
index 0000000..26cbbfa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/ValueSymbolTable.h
@@ -0,0 +1,139 @@
+//===- llvm/ValueSymbolTable.h - Implement a Value Symtab -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the name/Value symbol table for LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUESYMBOLTABLE_H
+#define LLVM_IR_VALUESYMBOLTABLE_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Value.h"
+#include <cstdint>
+
+namespace llvm {
+
+class Argument;
+class BasicBlock;
+class Function;
+class GlobalAlias;
+class GlobalIFunc;
+class GlobalVariable;
+class Instruction;
+template <unsigned InternalLen> class SmallString;
+template <typename ValueSubClass> class SymbolTableListTraits;
+
+/// This class provides a symbol table of name/value pairs. It is essentially
+/// a std::map<std::string,Value*> but has a controlled interface provided by
+/// LLVM as well as ensuring uniqueness of names.
+///
+class ValueSymbolTable {
+  friend class SymbolTableListTraits<Argument>;
+  friend class SymbolTableListTraits<BasicBlock>;
+  friend class SymbolTableListTraits<Function>;
+  friend class SymbolTableListTraits<GlobalAlias>;
+  friend class SymbolTableListTraits<GlobalIFunc>;
+  friend class SymbolTableListTraits<GlobalVariable>;
+  friend class SymbolTableListTraits<Instruction>;
+  friend class Value;
+
+/// @name Types
+/// @{
+public:
+  /// @brief A mapping of names to values.
+  using ValueMap = StringMap<Value*>;
+
+  /// @brief An iterator over a ValueMap.
+  using iterator = ValueMap::iterator;
+
+  /// @brief A const_iterator over a ValueMap.
+  using const_iterator = ValueMap::const_iterator;
+
+/// @}
+/// @name Constructors
+/// @{
+
+  ValueSymbolTable() : vmap(0) {}
+  ~ValueSymbolTable();
+
+/// @}
+/// @name Accessors
+/// @{
+
+  /// This method finds the value with the given \p Name in the
+  /// the symbol table.
+  /// @returns the value associated with the \p Name
+  /// @brief Lookup a named Value.
+  Value *lookup(StringRef Name) const { return vmap.lookup(Name); }
+
+  /// @returns true iff the symbol table is empty
+  /// @brief Determine if the symbol table is empty
+  inline bool empty() const { return vmap.empty(); }
+
+  /// @brief The number of name/type pairs is returned.
+  inline unsigned size() const { return unsigned(vmap.size()); }
+
+  /// This function can be used from the debugger to display the
+  /// content of the symbol table while debugging.
+  /// @brief Print out symbol table on stderr
+  void dump() const;
+
+/// @}
+/// @name Iteration
+/// @{
+
+  /// @brief Get an iterator that from the beginning of the symbol table.
+  inline iterator begin() { return vmap.begin(); }
+
+  /// @brief Get a const_iterator that from the beginning of the symbol table.
+  inline const_iterator begin() const { return vmap.begin(); }
+
+  /// @brief Get an iterator to the end of the symbol table.
+  inline iterator end() { return vmap.end(); }
+
+  /// @brief Get a const_iterator to the end of the symbol table.
+  inline const_iterator end() const { return vmap.end(); }
+
+  /// @}
+  /// @name Mutators
+  /// @{
+private:
+  ValueName *makeUniqueName(Value *V, SmallString<256> &UniqueName);
+
+  /// This method adds the provided value \p N to the symbol table.  The Value
+  /// must have a name which is used to place the value in the symbol table.
+  /// If the inserted name conflicts, this renames the value.
+  /// @brief Add a named value to the symbol table
+  void reinsertValue(Value *V);
+
+  /// createValueName - This method attempts to create a value name and insert
+  /// it into the symbol table with the specified name.  If it conflicts, it
+  /// auto-renames the name and returns that instead.
+  ValueName *createValueName(StringRef Name, Value *V);
+
+  /// This method removes a value from the symbol table.  It leaves the
+  /// ValueName attached to the value, but it is no longer inserted in the
+  /// symtab.
+  void removeValueName(ValueName *V);
+
+  /// @}
+  /// @name Internal Data
+  /// @{
+
+  ValueMap vmap;                    ///< The map that holds the symbol table.
+  mutable uint32_t LastUnique = 0;  ///< Counter for tracking unique names
+
+/// @}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_VALUESYMBOLTABLE_H
diff --git a/linux-x64/clang/include/llvm/IR/Verifier.h b/linux-x64/clang/include/llvm/IR/Verifier.h
new file mode 100644
index 0000000..bc10f33
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IR/Verifier.h
@@ -0,0 +1,149 @@
+//===- Verifier.h - LLVM IR Verifier ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function verifier interface, that can be used for some
+// sanity checking of input to the system, and for checking that transformations
+// haven't done something bad.
+//
+// Note that this does not provide full 'java style' security and verifications,
+// instead it just tries to ensure that code is well formed.
+//
+// To see what specifically is checked, look at the top of Verifier.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VERIFIER_H
+#define LLVM_IR_VERIFIER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/PassManager.h"
+#include <utility>
+
+namespace llvm {
+
+class APInt;
+class Function;
+class FunctionPass;
+class Instruction;
+class MDNode;
+class Module;
+class raw_ostream;
+struct VerifierSupport;
+
+/// Verify that the TBAA Metadatas are valid.
+class TBAAVerifier {
+  VerifierSupport *Diagnostic = nullptr;
+
+  /// Helper to diagnose a failure
+  template <typename... Tys> void CheckFailed(Tys &&... Args);
+
+  /// Cache of TBAA base nodes that have already been visited.  This cachce maps
+  /// a node that has been visited to a pair (IsInvalid, BitWidth) where
+  ///
+  ///  \c IsInvalid is true iff the node is invalid.
+  ///  \c BitWidth, if non-zero, is the bitwidth of the integer used to denoting
+  ///    the offset of the access.  If zero, only a zero offset is allowed.
+  ///
+  /// \c BitWidth has no meaning if \c IsInvalid is true.
+  using TBAABaseNodeSummary = std::pair<bool, unsigned>;
+  DenseMap<const MDNode *, TBAABaseNodeSummary> TBAABaseNodes;
+
+  /// Maps an alleged scalar TBAA node to a boolean that is true if the said
+  /// TBAA node is a valid scalar TBAA node or false otherwise.
+  DenseMap<const MDNode *, bool> TBAAScalarNodes;
+
+  /// \name Helper functions used by \c visitTBAAMetadata.
+  /// @{
+  MDNode *getFieldNodeFromTBAABaseNode(Instruction &I, const MDNode *BaseNode,
+                                       APInt &Offset, bool IsNewFormat);
+  TBAAVerifier::TBAABaseNodeSummary verifyTBAABaseNode(Instruction &I,
+                                                       const MDNode *BaseNode,
+                                                       bool IsNewFormat);
+  TBAABaseNodeSummary verifyTBAABaseNodeImpl(Instruction &I,
+                                             const MDNode *BaseNode,
+                                             bool IsNewFormat);
+
+  bool isValidScalarTBAANode(const MDNode *MD);
+  /// @}
+
+public:
+  TBAAVerifier(VerifierSupport *Diagnostic = nullptr)
+      : Diagnostic(Diagnostic) {}
+  /// Visit an instruction and return true if it is valid, return false if an
+  /// invalid TBAA is attached.
+  bool visitTBAAMetadata(Instruction &I, const MDNode *MD);
+};
+
+/// \brief Check a function for errors, useful for use when debugging a
+/// pass.
+///
+/// If there are no errors, the function returns false. If an error is found,
+/// a message describing the error is written to OS (if non-null) and true is
+/// returned.
+bool verifyFunction(const Function &F, raw_ostream *OS = nullptr);
+
+/// \brief Check a module for errors.
+///
+/// If there are no errors, the function returns false. If an error is
+/// found, a message describing the error is written to OS (if
+/// non-null) and true is returned.
+///
+/// \return true if the module is broken. If BrokenDebugInfo is
+/// supplied, DebugInfo verification failures won't be considered as
+/// error and instead *BrokenDebugInfo will be set to true. Debug
+/// info errors can be "recovered" from by stripping the debug info.
+bool verifyModule(const Module &M, raw_ostream *OS = nullptr,
+                  bool *BrokenDebugInfo = nullptr);
+
+FunctionPass *createVerifierPass(bool FatalErrors = true);
+
+/// Check a module for errors, and report separate error states for IR
+/// and debug info errors.
+class VerifierAnalysis : public AnalysisInfoMixin<VerifierAnalysis> {
+  friend AnalysisInfoMixin<VerifierAnalysis>;
+
+  static AnalysisKey Key;
+
+public:
+  struct Result {
+    bool IRBroken, DebugInfoBroken;
+  };
+
+  Result run(Module &M, ModuleAnalysisManager &);
+  Result run(Function &F, FunctionAnalysisManager &);
+};
+
+/// Check a module for errors, but report debug info errors separately.
+/// Otherwise behaves as the normal verifyModule. Debug info errors can be
+/// "recovered" from by stripping the debug info.
+bool verifyModule(bool &BrokenDebugInfo, const Module &M, raw_ostream *OS);
+
+/// \brief Create a verifier pass.
+///
+/// Check a module or function for validity. This is essentially a pass wrapped
+/// around the above verifyFunction and verifyModule routines and
+/// functionality. When the pass detects a verification error it is always
+/// printed to stderr, and by default they are fatal. You can override that by
+/// passing \c false to \p FatalErrors.
+///
+/// Note that this creates a pass suitable for the legacy pass manager. It has
+/// nothing to do with \c VerifierPass.
+class VerifierPass : public PassInfoMixin<VerifierPass> {
+  bool FatalErrors;
+
+public:
+  explicit VerifierPass(bool FatalErrors = true) : FatalErrors(FatalErrors) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_VERIFIER_H
diff --git a/linux-x64/clang/include/llvm/IRReader/IRReader.h b/linux-x64/clang/include/llvm/IRReader/IRReader.h
new file mode 100644
index 0000000..bedde89
--- /dev/null
+++ b/linux-x64/clang/include/llvm/IRReader/IRReader.h
@@ -0,0 +1,63 @@
+//===---- llvm/IRReader/IRReader.h - Reader for LLVM IR files ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions for reading LLVM IR. They support both
+// Bitcode and Assembly, automatically detecting the input format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IRREADER_IRREADER_H
+#define LLVM_IRREADER_IRREADER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+
+namespace llvm {
+
+class StringRef;
+class MemoryBufferRef;
+class Module;
+class SMDiagnostic;
+class LLVMContext;
+
+/// If the given file holds a bitcode image, return a Module
+/// for it which does lazy deserialization of function bodies.  Otherwise,
+/// attempt to parse it as LLVM Assembly and return a fully populated
+/// Module. The ShouldLazyLoadMetadata flag is passed down to the bitcode
+/// reader to optionally enable lazy metadata loading.
+std::unique_ptr<Module>
+getLazyIRFileModule(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
+                    bool ShouldLazyLoadMetadata = false);
+
+/// If the given MemoryBuffer holds a bitcode image, return a Module
+/// for it.  Otherwise, attempt to parse it as LLVM Assembly and return
+/// a Module for it.
+/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
+///                         This option should only be set to false by llvm-as
+///                         for use inside the LLVM testuite!
+/// \param DataLayoutString Override datalayout in the llvm assembly.
+std::unique_ptr<Module> parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
+                                LLVMContext &Context,
+                                bool UpgradeDebugInfo = true,
+                                StringRef DataLayoutString = "");
+
+/// If the given file holds a bitcode image, return a Module for it.
+/// Otherwise, attempt to parse it as LLVM Assembly and return a Module
+/// for it.
+/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
+///                         This option should only be set to false by llvm-as
+///                         for use inside the LLVM testuite!
+/// \param DataLayoutString Override datalayout in the llvm assembly.
+std::unique_ptr<Module> parseIRFile(StringRef Filename, SMDiagnostic &Err,
+                                    LLVMContext &Context,
+                                    bool UpgradeDebugInfo = true,
+                                    StringRef DataLayoutString = "");
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/InitializePasses.h b/linux-x64/clang/include/llvm/InitializePasses.h
new file mode 100644
index 0000000..5aa5112
--- /dev/null
+++ b/linux-x64/clang/include/llvm/InitializePasses.h
@@ -0,0 +1,395 @@
+//===- llvm/InitializePasses.h - Initialize All Passes ----------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations for the pass initialization routines
+// for the entire LLVM project.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_INITIALIZEPASSES_H
+#define LLVM_INITIALIZEPASSES_H
+
+namespace llvm {
+
+class PassRegistry;
+
+/// Initialize all passes linked into the TransformUtils library.
+void initializeCore(PassRegistry&);
+
+/// Initialize all passes linked into the TransformUtils library.
+void initializeTransformUtils(PassRegistry&);
+
+/// Initialize all passes linked into the ScalarOpts library.
+void initializeScalarOpts(PassRegistry&);
+
+/// Initialize all passes linked into the ObjCARCOpts library.
+void initializeObjCARCOpts(PassRegistry&);
+
+/// Initialize all passes linked into the Vectorize library.
+void initializeVectorization(PassRegistry&);
+
+/// Initialize all passes linked into the InstCombine library.
+void initializeInstCombine(PassRegistry&);
+
+/// Initialize all passes linked into the IPO library.
+void initializeIPO(PassRegistry&);
+
+/// Initialize all passes linked into the Instrumentation library.
+void initializeInstrumentation(PassRegistry&);
+
+/// Initialize all passes linked into the Analysis library.
+void initializeAnalysis(PassRegistry&);
+
+/// Initialize all passes linked into the Coroutines library.
+void initializeCoroutines(PassRegistry&);
+
+/// Initialize all passes linked into the CodeGen library.
+void initializeCodeGen(PassRegistry&);
+
+/// Initialize all passes linked into the GlobalISel library.
+void initializeGlobalISel(PassRegistry&);
+
+/// Initialize all passes linked into the CodeGen library.
+void initializeTarget(PassRegistry&);
+
+void initializeAAEvalLegacyPassPass(PassRegistry&);
+void initializeAAResultsWrapperPassPass(PassRegistry&);
+void initializeADCELegacyPassPass(PassRegistry&);
+void initializeAddDiscriminatorsLegacyPassPass(PassRegistry&);
+void initializeAddressSanitizerModulePass(PassRegistry&);
+void initializeAddressSanitizerPass(PassRegistry&);
+void initializeAggressiveInstCombinerLegacyPassPass(PassRegistry&);
+void initializeAliasSetPrinterPass(PassRegistry&);
+void initializeAlignmentFromAssumptionsPass(PassRegistry&);
+void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
+void initializeArgPromotionPass(PassRegistry&);
+void initializeAssumptionCacheTrackerPass(PassRegistry&);
+void initializeAtomicExpandPass(PassRegistry&);
+void initializeBDCELegacyPassPass(PassRegistry&);
+void initializeBarrierNoopPass(PassRegistry&);
+void initializeBasicAAWrapperPassPass(PassRegistry&);
+void initializeBlockExtractorPass(PassRegistry &);
+void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry&);
+void initializeBoundsCheckingLegacyPassPass(PassRegistry&);
+void initializeBranchFolderPassPass(PassRegistry&);
+void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&);
+void initializeBranchRelaxationPass(PassRegistry&);
+void initializeBreakCriticalEdgesPass(PassRegistry&);
+void initializeBreakFalseDepsPass(PassRegistry&);
+void initializeCallSiteSplittingLegacyPassPass(PassRegistry&);
+void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
+void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
+void initializeCFGPrinterLegacyPassPass(PassRegistry&);
+void initializeCFGSimplifyPassPass(PassRegistry&);
+void initializeCFGViewerLegacyPassPass(PassRegistry&);
+void initializeCFLAndersAAWrapperPassPass(PassRegistry&);
+void initializeCFLSteensAAWrapperPassPass(PassRegistry&);
+void initializeCallGraphDOTPrinterPass(PassRegistry&);
+void initializeCallGraphPrinterLegacyPassPass(PassRegistry&);
+void initializeCallGraphViewerPass(PassRegistry&);
+void initializeCallGraphWrapperPassPass(PassRegistry&);
+void initializeCodeGenPreparePass(PassRegistry&);
+void initializeConstantHoistingLegacyPassPass(PassRegistry&);
+void initializeCalledValuePropagationLegacyPassPass(PassRegistry &);
+void initializeConstantMergeLegacyPassPass(PassRegistry&);
+void initializeConstantPropagationPass(PassRegistry&);
+void initializeCorrelatedValuePropagationPass(PassRegistry&);
+void initializeCostModelAnalysisPass(PassRegistry&);
+void initializeEarlyMachineLICMPass(PassRegistry&);
+void initializeEarlyTailDuplicatePass(PassRegistry&);
+void initializeEntryExitInstrumenterPass(PassRegistry&);
+void initializePostInlineEntryExitInstrumenterPass(PassRegistry&);
+void initializeCrossDSOCFIPass(PassRegistry&);
+void initializeDAEPass(PassRegistry&);
+void initializeDAHPass(PassRegistry&);
+void initializeDCELegacyPassPass(PassRegistry&);
+void initializeDSELegacyPassPass(PassRegistry&);
+void initializeDataFlowSanitizerPass(PassRegistry&);
+void initializeDeadInstEliminationPass(PassRegistry&);
+void initializeDeadMachineInstructionElimPass(PassRegistry&);
+void initializeDelinearizationPass(PassRegistry&);
+void initializeDemandedBitsWrapperPassPass(PassRegistry&);
+void initializeDependenceAnalysisPass(PassRegistry&);
+void initializeDependenceAnalysisWrapperPassPass(PassRegistry&);
+void initializeDetectDeadLanesPass(PassRegistry&);
+void initializeDivergenceAnalysisPass(PassRegistry&);
+void initializeDivRemPairsLegacyPassPass(PassRegistry&);
+void initializeDomOnlyPrinterPass(PassRegistry&);
+void initializeDomOnlyViewerPass(PassRegistry&);
+void initializeDomPrinterPass(PassRegistry&);
+void initializeDomViewerPass(PassRegistry&);
+void initializeDominanceFrontierWrapperPassPass(PassRegistry&);
+void initializeDominatorTreeWrapperPassPass(PassRegistry&);
+void initializeDwarfEHPreparePass(PassRegistry&);
+void initializeEarlyCSELegacyPassPass(PassRegistry&);
+void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry&);
+void initializeEarlyIfConverterPass(PassRegistry&);
+void initializeEdgeBundlesPass(PassRegistry&);
+void initializeEfficiencySanitizerPass(PassRegistry&);
+void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry&);
+void initializeExpandISelPseudosPass(PassRegistry&);
+void initializeExpandMemCmpPassPass(PassRegistry&);
+void initializeExpandPostRAPass(PassRegistry&);
+void initializeExpandReductionsPass(PassRegistry&);
+void initializeExternalAAWrapperPassPass(PassRegistry&);
+void initializeFEntryInserterPass(PassRegistry&);
+void initializeFinalizeMachineBundlesPass(PassRegistry&);
+void initializeFlattenCFGPassPass(PassRegistry&);
+void initializeFloat2IntLegacyPassPass(PassRegistry&);
+void initializeForceFunctionAttrsLegacyPassPass(PassRegistry&);
+void initializeForwardControlFlowIntegrityPass(PassRegistry&);
+void initializeFuncletLayoutPass(PassRegistry&);
+void initializeFunctionImportLegacyPassPass(PassRegistry&);
+void initializeGCMachineCodeAnalysisPass(PassRegistry&);
+void initializeGCModuleInfoPass(PassRegistry&);
+void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
+void initializeGVNHoistLegacyPassPass(PassRegistry&);
+void initializeGVNLegacyPassPass(PassRegistry&);
+void initializeGVNSinkLegacyPassPass(PassRegistry&);
+void initializeGlobalDCELegacyPassPass(PassRegistry&);
+void initializeGlobalMergePass(PassRegistry&);
+void initializeGlobalOptLegacyPassPass(PassRegistry&);
+void initializeGlobalSplitPass(PassRegistry&);
+void initializeGlobalsAAWrapperPassPass(PassRegistry&);
+void initializeGuardWideningLegacyPassPass(PassRegistry&);
+void initializeIPCPPass(PassRegistry&);
+void initializeIPSCCPLegacyPassPass(PassRegistry&);
+void initializeIRTranslatorPass(PassRegistry&);
+void initializeIVUsersWrapperPassPass(PassRegistry&);
+void initializeIfConverterPass(PassRegistry&);
+void initializeImplicitNullChecksPass(PassRegistry&);
+void initializeIndVarSimplifyLegacyPassPass(PassRegistry&);
+void initializeIndirectBrExpandPassPass(PassRegistry&);
+void initializeIRCELegacyPassPass(PassRegistry&);
+void initializeInferAddressSpacesPass(PassRegistry&);
+void initializeInferFunctionAttrsLegacyPassPass(PassRegistry&);
+void initializeInlineCostAnalysisPass(PassRegistry&);
+void initializeInstCountPass(PassRegistry&);
+void initializeInstNamerPass(PassRegistry&);
+void initializeInstSimplifierPass(PassRegistry&);
+void initializeInstrProfilingLegacyPassPass(PassRegistry&);
+void initializeInstructionCombiningPassPass(PassRegistry&);
+void initializeInstructionSelectPass(PassRegistry&);
+void initializeInterleavedAccessPass(PassRegistry&);
+void initializeInternalizeLegacyPassPass(PassRegistry&);
+void initializeIntervalPartitionPass(PassRegistry&);
+void initializeJumpThreadingPass(PassRegistry&);
+void initializeLCSSAVerificationPassPass(PassRegistry&);
+void initializeLCSSAWrapperPassPass(PassRegistry&);
+void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
+void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
+void initializeLazyMachineBlockFrequencyInfoPassPass(PassRegistry&);
+void initializeLazyValueInfoPrinterPass(PassRegistry&);
+void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
+void initializeLegacyLICMPassPass(PassRegistry&);
+void initializeLegacyLoopSinkPassPass(PassRegistry&);
+void initializeLegalizerPass(PassRegistry&);
+void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
+void initializeLintPass(PassRegistry&);
+void initializeLiveDebugValuesPass(PassRegistry&);
+void initializeLiveDebugVariablesPass(PassRegistry&);
+void initializeLiveIntervalsPass(PassRegistry&);
+void initializeLiveRangeShrinkPass(PassRegistry&);
+void initializeLiveRegMatrixPass(PassRegistry&);
+void initializeLiveStacksPass(PassRegistry&);
+void initializeLiveVariablesPass(PassRegistry&);
+void initializeLoadStoreVectorizerPass(PassRegistry&);
+void initializeLoaderPassPass(PassRegistry&);
+void initializeLocalStackSlotPassPass(PassRegistry&);
+void initializeLocalizerPass(PassRegistry&);
+void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
+void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
+void initializeLoopDeletionLegacyPassPass(PassRegistry&);
+void initializeLoopDistributeLegacyPass(PassRegistry&);
+void initializeLoopExtractorPass(PassRegistry&);
+void initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry&);
+void initializeLoopInfoWrapperPassPass(PassRegistry&);
+void initializeLoopInterchangePass(PassRegistry&);
+void initializeLoopLoadEliminationPass(PassRegistry&);
+void initializeLoopPassPass(PassRegistry&);
+void initializeLoopPredicationLegacyPassPass(PassRegistry&);
+void initializeLoopRerollPass(PassRegistry&);
+void initializeLoopRotateLegacyPassPass(PassRegistry&);
+void initializeLoopSimplifyCFGLegacyPassPass(PassRegistry&);
+void initializeLoopSimplifyPass(PassRegistry&);
+void initializeLoopStrengthReducePass(PassRegistry&);
+void initializeLoopUnrollPass(PassRegistry&);
+void initializeLoopUnswitchPass(PassRegistry&);
+void initializeLoopVectorizePass(PassRegistry&);
+void initializeLoopVersioningLICMPass(PassRegistry&);
+void initializeLoopVersioningPassPass(PassRegistry&);
+void initializeLowerAtomicLegacyPassPass(PassRegistry&);
+void initializeLowerEmuTLSPass(PassRegistry&);
+void initializeLowerExpectIntrinsicPass(PassRegistry&);
+void initializeLowerGuardIntrinsicLegacyPassPass(PassRegistry&);
+void initializeLowerIntrinsicsPass(PassRegistry&);
+void initializeLowerInvokeLegacyPassPass(PassRegistry&);
+void initializeLowerSwitchPass(PassRegistry&);
+void initializeLowerTypeTestsPass(PassRegistry&);
+void initializeMIRPrintingPassPass(PassRegistry&);
+void initializeMachineBlockFrequencyInfoPass(PassRegistry&);
+void initializeMachineBlockPlacementPass(PassRegistry&);
+void initializeMachineBlockPlacementStatsPass(PassRegistry&);
+void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
+void initializeMachineCSEPass(PassRegistry&);
+void initializeMachineCombinerPass(PassRegistry&);
+void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeMachineDominanceFrontierPass(PassRegistry&);
+void initializeMachineDominatorTreePass(PassRegistry&);
+void initializeMachineFunctionPrinterPassPass(PassRegistry&);
+void initializeMachineLICMPass(PassRegistry&);
+void initializeMachineLoopInfoPass(PassRegistry&);
+void initializeMachineModuleInfoPass(PassRegistry&);
+void initializeMachineOptimizationRemarkEmitterPassPass(PassRegistry&);
+void initializeMachineOutlinerPass(PassRegistry&);
+void initializeMachinePipelinerPass(PassRegistry&);
+void initializeMachinePostDominatorTreePass(PassRegistry&);
+void initializeMachineRegionInfoPassPass(PassRegistry&);
+void initializeMachineSchedulerPass(PassRegistry&);
+void initializeMachineSinkingPass(PassRegistry&);
+void initializeMachineTraceMetricsPass(PassRegistry&);
+void initializeMachineVerifierPassPass(PassRegistry&);
+void initializeMemCpyOptLegacyPassPass(PassRegistry&);
+void initializeMemDepPrinterPass(PassRegistry&);
+void initializeMemDerefPrinterPass(PassRegistry&);
+void initializeMemoryDependenceWrapperPassPass(PassRegistry&);
+void initializeMemorySSAPrinterLegacyPassPass(PassRegistry&);
+void initializeMemorySSAWrapperPassPass(PassRegistry&);
+void initializeMemorySanitizerPass(PassRegistry&);
+void initializeMergeFunctionsPass(PassRegistry&);
+void initializeMergeICmpsPass(PassRegistry&);
+void initializeMergedLoadStoreMotionLegacyPassPass(PassRegistry&);
+void initializeMetaRenamerPass(PassRegistry&);
+void initializeModuleDebugInfoPrinterPass(PassRegistry&);
+void initializeModuleSummaryIndexWrapperPassPass(PassRegistry&);
+void initializeMustExecutePrinterPass(PassRegistry&);
+void initializeNameAnonGlobalLegacyPassPass(PassRegistry&);
+void initializeNaryReassociateLegacyPassPass(PassRegistry&);
+void initializeNewGVNLegacyPassPass(PassRegistry&);
+void initializeObjCARCAAWrapperPassPass(PassRegistry&);
+void initializeObjCARCAPElimPass(PassRegistry&);
+void initializeObjCARCContractPass(PassRegistry&);
+void initializeObjCARCExpandPass(PassRegistry&);
+void initializeObjCARCOptPass(PassRegistry&);
+void initializeOptimizationRemarkEmitterWrapperPassPass(PassRegistry&);
+void initializeOptimizePHIsPass(PassRegistry&);
+void initializePAEvalPass(PassRegistry&);
+void initializePEIPass(PassRegistry&);
+void initializePGOIndirectCallPromotionLegacyPassPass(PassRegistry&);
+void initializePGOInstrumentationGenLegacyPassPass(PassRegistry&);
+void initializePGOInstrumentationUseLegacyPassPass(PassRegistry&);
+void initializePGOMemOPSizeOptLegacyPassPass(PassRegistry&);
+void initializePHIEliminationPass(PassRegistry&);
+void initializePartialInlinerLegacyPassPass(PassRegistry&);
+void initializePartiallyInlineLibCallsLegacyPassPass(PassRegistry&);
+void initializePatchableFunctionPass(PassRegistry&);
+void initializePeepholeOptimizerPass(PassRegistry&);
+void initializePhysicalRegisterUsageInfoPass(PassRegistry&);
+void initializePlaceBackedgeSafepointsImplPass(PassRegistry&);
+void initializePlaceSafepointsPass(PassRegistry&);
+void initializePostDomOnlyPrinterPass(PassRegistry&);
+void initializePostDomOnlyViewerPass(PassRegistry&);
+void initializePostDomPrinterPass(PassRegistry&);
+void initializePostDomViewerPass(PassRegistry&);
+void initializePostDominatorTreeWrapperPassPass(PassRegistry&);
+void initializePostMachineSchedulerPass(PassRegistry&);
+void initializePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
+void initializePostRAHazardRecognizerPass(PassRegistry&);
+void initializePostRAMachineSinkingPass(PassRegistry&);
+void initializePostRASchedulerPass(PassRegistry&);
+void initializePreISelIntrinsicLoweringLegacyPassPass(PassRegistry&);
+void initializePredicateInfoPrinterLegacyPassPass(PassRegistry&);
+void initializePrintBasicBlockPassPass(PassRegistry&);
+void initializePrintFunctionPassWrapperPass(PassRegistry&);
+void initializePrintModulePassWrapperPass(PassRegistry&);
+void initializeProcessImplicitDefsPass(PassRegistry&);
+void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
+void initializePromoteLegacyPassPass(PassRegistry&);
+void initializePruneEHPass(PassRegistry&);
+void initializeRABasicPass(PassRegistry&);
+void initializeRegAllocFastPass(PassRegistry&);
+void initializeRAGreedyPass(PassRegistry&);
+void initializeReassociateLegacyPassPass(PassRegistry&);
+void initializeRegBankSelectPass(PassRegistry&);
+void initializeReachingDefAnalysisPass(PassRegistry&);
+void initializeRegToMemPass(PassRegistry&);
+void initializeRegionInfoPassPass(PassRegistry&);
+void initializeRegionOnlyPrinterPass(PassRegistry&);
+void initializeRegionOnlyViewerPass(PassRegistry&);
+void initializeRegionPrinterPass(PassRegistry&);
+void initializeRegionViewerPass(PassRegistry&);
+void initializeRegisterCoalescerPass(PassRegistry&);
+void initializeRenameIndependentSubregsPass(PassRegistry&);
+void initializeResetMachineFunctionPass(PassRegistry&);
+void initializeReversePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
+void initializeRewriteStatepointsForGCLegacyPassPass(PassRegistry &);
+void initializeRewriteSymbolsLegacyPassPass(PassRegistry&);
+void initializeSafepointIRVerifierPass(PassRegistry&);
+void initializeSCCPLegacyPassPass(PassRegistry&);
+void initializeSCEVAAWrapperPassPass(PassRegistry&);
+void initializeSLPVectorizerPass(PassRegistry&);
+void initializeSROALegacyPassPass(PassRegistry&);
+void initializeSafeStackLegacyPassPass(PassRegistry&);
+void initializeSampleProfileLoaderLegacyPassPass(PassRegistry&);
+void initializeSanitizerCoverageModulePass(PassRegistry&);
+void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
+void initializeScalarizeMaskedMemIntrinPass(PassRegistry&);
+void initializeScalarizerPass(PassRegistry&);
+void initializeScavengerTestPass(PassRegistry&);
+void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
+void initializeSeparateConstOffsetFromGEPPass(PassRegistry&);
+void initializeShadowStackGCLoweringPass(PassRegistry&);
+void initializeShrinkWrapPass(PassRegistry&);
+void initializeSimpleInlinerPass(PassRegistry&);
+void initializeSimpleLoopUnswitchLegacyPassPass(PassRegistry&);
+void initializeSingleLoopExtractorPass(PassRegistry&);
+void initializeSinkingLegacyPassPass(PassRegistry&);
+void initializeSjLjEHPreparePass(PassRegistry&);
+void initializeSlotIndexesPass(PassRegistry&);
+void initializeSpeculativeExecutionLegacyPassPass(PassRegistry&);
+void initializeSpillPlacementPass(PassRegistry&);
+void initializeStackColoringPass(PassRegistry&);
+void initializeStackMapLivenessPass(PassRegistry&);
+void initializeStackProtectorPass(PassRegistry&);
+void initializeStackSlotColoringPass(PassRegistry&);
+void initializeStraightLineStrengthReducePass(PassRegistry&);
+void initializeStripDeadDebugInfoPass(PassRegistry&);
+void initializeStripDeadPrototypesLegacyPassPass(PassRegistry&);
+void initializeStripDebugDeclarePass(PassRegistry&);
+void initializeStripGCRelocatesPass(PassRegistry&);
+void initializeStripNonDebugSymbolsPass(PassRegistry&);
+void initializeStripNonLineTableDebugInfoPass(PassRegistry&);
+void initializeStripSymbolsPass(PassRegistry&);
+void initializeStructurizeCFGPass(PassRegistry&);
+void initializeHWAddressSanitizerPass(PassRegistry&);
+void initializeTailCallElimPass(PassRegistry&);
+void initializeTailDuplicatePass(PassRegistry&);
+void initializeTargetLibraryInfoWrapperPassPass(PassRegistry&);
+void initializeTargetPassConfigPass(PassRegistry&);
+void initializeTargetTransformInfoWrapperPassPass(PassRegistry&);
+void initializeThreadSanitizerPass(PassRegistry&);
+void initializeTwoAddressInstructionPassPass(PassRegistry&);
+void initializeTypeBasedAAWrapperPassPass(PassRegistry&);
+void initializeUnifyFunctionExitNodesPass(PassRegistry&);
+void initializeUnpackMachineBundlesPass(PassRegistry&);
+void initializeUnreachableBlockElimLegacyPassPass(PassRegistry&);
+void initializeUnreachableMachineBlockElimPass(PassRegistry&);
+void initializeVerifierLegacyPassPass(PassRegistry&);
+void initializeVirtRegMapPass(PassRegistry&);
+void initializeVirtRegRewriterPass(PassRegistry&);
+void initializeWholeProgramDevirtPass(PassRegistry&);
+void initializeWinEHPreparePass(PassRegistry&);
+void initializeWriteBitcodePassPass(PassRegistry&);
+void initializeWriteThinLTOBitcodePass(PassRegistry&);
+void initializeXRayInstrumentationPass(PassRegistry&);
+void initializeMIRCanonicalizerPass(PassRegistry &);
+
+} // end namespace llvm
+
+#endif // LLVM_INITIALIZEPASSES_H
diff --git a/linux-x64/clang/include/llvm/LTO/Caching.h b/linux-x64/clang/include/llvm/LTO/Caching.h
new file mode 100644
index 0000000..7201ab3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/Caching.h
@@ -0,0 +1,40 @@
+//===- Caching.h - LLVM Link Time Optimizer Configuration -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the localCache function, which allows clients to add a
+// filesystem cache to ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_CACHING_H
+#define LLVM_LTO_CACHING_H
+
+#include "llvm/LTO/LTO.h"
+#include <string>
+
+namespace llvm {
+namespace lto {
+
+/// This type defines the callback to add a pre-existing native object file
+/// (e.g. in a cache).
+///
+/// Buffer callbacks must be thread safe.
+typedef std::function<void(unsigned Task, std::unique_ptr<MemoryBuffer> MB)>
+    AddBufferFn;
+
+/// Create a local file system cache which uses the given cache directory and
+/// file callback. This function also creates the cache directory if it does not
+/// already exist.
+Expected<NativeObjectCache> localCache(StringRef CacheDirectoryPath,
+                                       AddBufferFn AddBuffer);
+
+} // namespace lto
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/Config.h b/linux-x64/clang/include/llvm/LTO/Config.h
new file mode 100644
index 0000000..4bd981c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/Config.h
@@ -0,0 +1,202 @@
+//===-Config.h - LLVM Link Time Optimizer Configuration -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the lto::Config data structure, which allows clients to
+// configure LTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_CONFIG_H
+#define LLVM_LTO_CONFIG_H
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+
+#include <functional>
+
+namespace llvm {
+
+class Error;
+class Module;
+class ModuleSummaryIndex;
+class raw_pwrite_stream;
+
+namespace lto {
+
+/// LTO configuration. A linker can configure LTO by setting fields in this data
+/// structure and passing it to the lto::LTO constructor.
+struct Config {
+  // Note: when adding fields here, consider whether they need to be added to
+  // computeCacheKey in LTO.cpp.
+  std::string CPU;
+  TargetOptions Options;
+  std::vector<std::string> MAttrs;
+  Optional<Reloc::Model> RelocModel = Reloc::PIC_;
+  Optional<CodeModel::Model> CodeModel = None;
+  CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
+  TargetMachine::CodeGenFileType CGFileType = TargetMachine::CGFT_ObjectFile;
+  unsigned OptLevel = 2;
+  bool DisableVerify = false;
+
+  /// Use the new pass manager
+  bool UseNewPM = false;
+
+  /// Disable entirely the optimizer, including importing for ThinLTO
+  bool CodeGenOnly = false;
+
+  /// If this field is set, the set of passes run in the middle-end optimizer
+  /// will be the one specified by the string. Only works with the new pass
+  /// manager as the old one doesn't have this ability.
+  std::string OptPipeline;
+
+  // If this field is set, it has the same effect of specifying an AA pipeline
+  // identified by the string. Only works with the new pass manager, in
+  // conjunction OptPipeline.
+  std::string AAPipeline;
+
+  /// Setting this field will replace target triples in input files with this
+  /// triple.
+  std::string OverrideTriple;
+
+  /// Setting this field will replace unspecified target triples in input files
+  /// with this triple.
+  std::string DefaultTriple;
+
+  /// Sample PGO profile path.
+  std::string SampleProfile;
+
+  /// Optimization remarks file path.
+  std::string RemarksFilename = "";
+
+  /// Whether to emit optimization remarks with hotness informations.
+  bool RemarksWithHotness = false;
+
+  /// Whether to emit the pass manager debuggging informations.
+  bool DebugPassManager = false;
+
+  bool ShouldDiscardValueNames = true;
+  DiagnosticHandlerFunction DiagHandler;
+
+  /// If this field is set, LTO will write input file paths and symbol
+  /// resolutions here in llvm-lto2 command line flag format. This can be
+  /// used for testing and for running the LTO pipeline outside of the linker
+  /// with llvm-lto2.
+  std::unique_ptr<raw_ostream> ResolutionFile;
+
+  /// The following callbacks deal with tasks, which normally represent the
+  /// entire optimization and code generation pipeline for what will become a
+  /// single native object file. Each task has a unique identifier between 0 and
+  /// getMaxTasks()-1, which is supplied to the callback via the Task parameter.
+  /// A task represents the entire pipeline for ThinLTO and regular
+  /// (non-parallel) LTO, but a parallel code generation task will be split into
+  /// N tasks before code generation, where N is the parallelism level.
+  ///
+  /// LTO may decide to stop processing a task at any time, for example if the
+  /// module is empty or if a module hook (see below) returns false. For this
+  /// reason, the client should not expect to receive exactly getMaxTasks()
+  /// native object files.
+
+  /// A module hook may be used by a linker to perform actions during the LTO
+  /// pipeline. For example, a linker may use this function to implement
+  /// -save-temps. If this function returns false, any further processing for
+  /// that task is aborted.
+  ///
+  /// Module hooks must be thread safe with respect to the linker's internal
+  /// data structures. A module hook will never be called concurrently from
+  /// multiple threads with the same task ID, or the same module.
+  ///
+  /// Note that in out-of-process backend scenarios, none of the hooks will be
+  /// called for ThinLTO tasks.
+  typedef std::function<bool(unsigned Task, const Module &)> ModuleHookFn;
+
+  /// This module hook is called after linking (regular LTO) or loading
+  /// (ThinLTO) the module, before modifying it.
+  ModuleHookFn PreOptModuleHook;
+
+  /// This hook is called after promoting any internal functions
+  /// (ThinLTO-specific).
+  ModuleHookFn PostPromoteModuleHook;
+
+  /// This hook is called after internalizing the module.
+  ModuleHookFn PostInternalizeModuleHook;
+
+  /// This hook is called after importing from other modules (ThinLTO-specific).
+  ModuleHookFn PostImportModuleHook;
+
+  /// This module hook is called after optimization is complete.
+  ModuleHookFn PostOptModuleHook;
+
+  /// This module hook is called before code generation. It is similar to the
+  /// PostOptModuleHook, but for parallel code generation it is called after
+  /// splitting the module.
+  ModuleHookFn PreCodeGenModuleHook;
+
+  /// A combined index hook is called after all per-module indexes have been
+  /// combined (ThinLTO-specific). It can be used to implement -save-temps for
+  /// the combined index.
+  ///
+  /// If this function returns false, any further processing for ThinLTO tasks
+  /// is aborted.
+  ///
+  /// It is called regardless of whether the backend is in-process, although it
+  /// is not called from individual backend processes.
+  typedef std::function<bool(const ModuleSummaryIndex &Index)>
+      CombinedIndexHookFn;
+  CombinedIndexHookFn CombinedIndexHook;
+
+  /// This is a convenience function that configures this Config object to write
+  /// temporary files named after the given OutputFileName for each of the LTO
+  /// phases to disk. A client can use this function to implement -save-temps.
+  ///
+  /// FIXME: Temporary files derived from ThinLTO backends are currently named
+  /// after the input file name, rather than the output file name, when
+  /// UseInputModulePath is set to true.
+  ///
+  /// Specifically, it (1) sets each of the above module hooks and the combined
+  /// index hook to a function that calls the hook function (if any) that was
+  /// present in the appropriate field when the addSaveTemps function was
+  /// called, and writes the module to a bitcode file with a name prefixed by
+  /// the given output file name, and (2) creates a resolution file whose name
+  /// is prefixed by the given output file name and sets ResolutionFile to its
+  /// file handle.
+  Error addSaveTemps(std::string OutputFileName,
+                     bool UseInputModulePath = false);
+};
+
+struct LTOLLVMDiagnosticHandler : public DiagnosticHandler {
+  DiagnosticHandlerFunction *Fn;
+  LTOLLVMDiagnosticHandler(DiagnosticHandlerFunction *DiagHandlerFn)
+      : Fn(DiagHandlerFn) {}
+  bool handleDiagnostics(const DiagnosticInfo &DI) override {
+    (*Fn)(DI);
+    return true;
+  }
+};
+/// A derived class of LLVMContext that initializes itself according to a given
+/// Config object. The purpose of this class is to tie ownership of the
+/// diagnostic handler to the context, as opposed to the Config object (which
+/// may be ephemeral).
+// FIXME: This should not be required as diagnostic handler is not callback.
+struct LTOLLVMContext : LLVMContext {
+
+  LTOLLVMContext(const Config &C) : DiagHandler(C.DiagHandler) {
+    setDiscardValueNames(C.ShouldDiscardValueNames);
+    enableDebugTypeODRUniquing();
+    setDiagnosticHandler(
+        llvm::make_unique<LTOLLVMDiagnosticHandler>(&DiagHandler), true);
+  }
+  DiagnosticHandlerFunction DiagHandler;
+};
+
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/LTO.h b/linux-x64/clang/include/llvm/LTO/LTO.h
new file mode 100644
index 0000000..7d6beab
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/LTO.h
@@ -0,0 +1,417 @@
+//===-LTO.h - LLVM Link Time Optimizer ------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares functions and classes used to support LTO. It is intended
+// to be used both by LTO classes as well as by clients (gold-plugin) that
+// don't utilize the LTO code generator interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTO_H
+#define LLVM_LTO_LTO_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/LTO/Config.h"
+#include "llvm/Linker/IRMover.h"
+#include "llvm/Object/IRSymtab.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/thread.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
+
+namespace llvm {
+
+class BitcodeModule;
+class Error;
+class LLVMContext;
+class MemoryBufferRef;
+class Module;
+class Target;
+class raw_pwrite_stream;
+
+/// Resolve Weak and LinkOnce values in the \p Index. Linkage changes recorded
+/// in the index and the ThinLTO backends must apply the changes to the Module
+/// via thinLTOResolveWeakForLinkerModule.
+///
+/// This is done for correctness (if value exported, ensure we always
+/// emit a copy), and compile-time optimization (allow drop of duplicates).
+void thinLTOResolveWeakForLinkerInIndex(
+    ModuleSummaryIndex &Index,
+    function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
+        isPrevailing,
+    function_ref<void(StringRef, GlobalValue::GUID, GlobalValue::LinkageTypes)>
+        recordNewLinkage);
+
+/// Update the linkages in the given \p Index to mark exported values
+/// as external and non-exported values as internal. The ThinLTO backends
+/// must apply the changes to the Module via thinLTOInternalizeModule.
+void thinLTOInternalizeAndPromoteInIndex(
+    ModuleSummaryIndex &Index,
+    function_ref<bool(StringRef, GlobalValue::GUID)> isExported);
+
+namespace lto {
+
+/// Given the original \p Path to an output file, replace any path
+/// prefix matching \p OldPrefix with \p NewPrefix. Also, create the
+/// resulting directory if it does not yet exist.
+std::string getThinLTOOutputFile(const std::string &Path,
+                                 const std::string &OldPrefix,
+                                 const std::string &NewPrefix);
+
+/// Setup optimization remarks.
+Expected<std::unique_ptr<ToolOutputFile>>
+setupOptimizationRemarks(LLVMContext &Context, StringRef LTORemarksFilename,
+                         bool LTOPassRemarksWithHotness, int Count = -1);
+
+class LTO;
+struct SymbolResolution;
+class ThinBackendProc;
+
+/// An input file. This is a symbol table wrapper that only exposes the
+/// information that an LTO client should need in order to do symbol resolution.
+class InputFile {
+public:
+  class Symbol;
+
+private:
+  // FIXME: Remove LTO class friendship once we have bitcode symbol tables.
+  friend LTO;
+  InputFile() = default;
+
+  std::vector<BitcodeModule> Mods;
+  SmallVector<char, 0> Strtab;
+  std::vector<Symbol> Symbols;
+
+  // [begin, end) for each module
+  std::vector<std::pair<size_t, size_t>> ModuleSymIndices;
+
+  StringRef TargetTriple, SourceFileName, COFFLinkerOpts;
+  std::vector<StringRef> ComdatTable;
+
+public:
+  ~InputFile();
+
+  /// Create an InputFile.
+  static Expected<std::unique_ptr<InputFile>> create(MemoryBufferRef Object);
+
+  /// The purpose of this class is to only expose the symbol information that an
+  /// LTO client should need in order to do symbol resolution.
+  class Symbol : irsymtab::Symbol {
+    friend LTO;
+
+  public:
+    Symbol(const irsymtab::Symbol &S) : irsymtab::Symbol(S) {}
+
+    using irsymtab::Symbol::isUndefined;
+    using irsymtab::Symbol::isCommon;
+    using irsymtab::Symbol::isWeak;
+    using irsymtab::Symbol::isIndirect;
+    using irsymtab::Symbol::getName;
+    using irsymtab::Symbol::getVisibility;
+    using irsymtab::Symbol::canBeOmittedFromSymbolTable;
+    using irsymtab::Symbol::isTLS;
+    using irsymtab::Symbol::getComdatIndex;
+    using irsymtab::Symbol::getCommonSize;
+    using irsymtab::Symbol::getCommonAlignment;
+    using irsymtab::Symbol::getCOFFWeakExternalFallback;
+    using irsymtab::Symbol::getSectionName;
+    using irsymtab::Symbol::isExecutable;
+  };
+
+  /// A range over the symbols in this InputFile.
+  ArrayRef<Symbol> symbols() const { return Symbols; }
+
+  /// Returns linker options specified in the input file.
+  StringRef getCOFFLinkerOpts() const { return COFFLinkerOpts; }
+
+  /// Returns the path to the InputFile.
+  StringRef getName() const;
+
+  /// Returns the input file's target triple.
+  StringRef getTargetTriple() const { return TargetTriple; }
+
+  /// Returns the source file path specified at compile time.
+  StringRef getSourceFileName() const { return SourceFileName; }
+
+  // Returns a table with all the comdats used by this file.
+  ArrayRef<StringRef> getComdatTable() const { return ComdatTable; }
+
+private:
+  ArrayRef<Symbol> module_symbols(unsigned I) const {
+    const auto &Indices = ModuleSymIndices[I];
+    return {Symbols.data() + Indices.first, Symbols.data() + Indices.second};
+  }
+};
+
+/// This class wraps an output stream for a native object. Most clients should
+/// just be able to return an instance of this base class from the stream
+/// callback, but if a client needs to perform some action after the stream is
+/// written to, that can be done by deriving from this class and overriding the
+/// destructor.
+class NativeObjectStream {
+public:
+  NativeObjectStream(std::unique_ptr<raw_pwrite_stream> OS) : OS(std::move(OS)) {}
+  std::unique_ptr<raw_pwrite_stream> OS;
+  virtual ~NativeObjectStream() = default;
+};
+
+/// This type defines the callback to add a native object that is generated on
+/// the fly.
+///
+/// Stream callbacks must be thread safe.
+typedef std::function<std::unique_ptr<NativeObjectStream>(unsigned Task)>
+    AddStreamFn;
+
+/// This is the type of a native object cache. To request an item from the
+/// cache, pass a unique string as the Key. For hits, the cached file will be
+/// added to the link and this function will return AddStreamFn(). For misses,
+/// the cache will return a stream callback which must be called at most once to
+/// produce content for the stream. The native object stream produced by the
+/// stream callback will add the file to the link after the stream is written
+/// to.
+///
+/// Clients generally look like this:
+///
+/// if (AddStreamFn AddStream = Cache(Task, Key))
+///   ProduceContent(AddStream);
+typedef std::function<AddStreamFn(unsigned Task, StringRef Key)>
+    NativeObjectCache;
+
+/// A ThinBackend defines what happens after the thin-link phase during ThinLTO.
+/// The details of this type definition aren't important; clients can only
+/// create a ThinBackend using one of the create*ThinBackend() functions below.
+typedef std::function<std::unique_ptr<ThinBackendProc>(
+    Config &C, ModuleSummaryIndex &CombinedIndex,
+    StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+    AddStreamFn AddStream, NativeObjectCache Cache)>
+    ThinBackend;
+
+/// This ThinBackend runs the individual backend jobs in-process.
+ThinBackend createInProcessThinBackend(unsigned ParallelismLevel);
+
+/// This ThinBackend writes individual module indexes to files, instead of
+/// running the individual backend jobs. This backend is for distributed builds
+/// where separate processes will invoke the real backends.
+///
+/// To find the path to write the index to, the backend checks if the path has a
+/// prefix of OldPrefix; if so, it replaces that prefix with NewPrefix. It then
+/// appends ".thinlto.bc" and writes the index to that path. If
+/// ShouldEmitImportsFiles is true it also writes a list of imported files to a
+/// similar path with ".imports" appended instead.
+/// LinkedObjectsFile is an output stream to write the list of object files for
+/// the final ThinLTO linking. Can be nullptr.
+/// OnWrite is callback which receives module identifier and notifies LTO user
+/// that index file for the module (and optionally imports file) was created.
+using IndexWriteCallback = std::function<void(const std::string &)>;
+ThinBackend createWriteIndexesThinBackend(std::string OldPrefix,
+                                          std::string NewPrefix,
+                                          bool ShouldEmitImportsFiles,
+                                          raw_fd_ostream *LinkedObjectsFile,
+                                          IndexWriteCallback OnWrite);
+
+/// This class implements a resolution-based interface to LLVM's LTO
+/// functionality. It supports regular LTO, parallel LTO code generation and
+/// ThinLTO. You can use it from a linker in the following way:
+/// - Set hooks and code generation options (see lto::Config struct defined in
+///   Config.h), and use the lto::Config object to create an lto::LTO object.
+/// - Create lto::InputFile objects using lto::InputFile::create(), then use
+///   the symbols() function to enumerate its symbols and compute a resolution
+///   for each symbol (see SymbolResolution below).
+/// - After the linker has visited each input file (and each regular object
+///   file) and computed a resolution for each symbol, take each lto::InputFile
+///   and pass it and an array of symbol resolutions to the add() function.
+/// - Call the getMaxTasks() function to get an upper bound on the number of
+///   native object files that LTO may add to the link.
+/// - Call the run() function. This function will use the supplied AddStream
+///   and Cache functions to add up to getMaxTasks() native object files to
+///   the link.
+class LTO {
+  friend InputFile;
+
+public:
+  /// Create an LTO object. A default constructed LTO object has a reasonable
+  /// production configuration, but you can customize it by passing arguments to
+  /// this constructor.
+  /// FIXME: We do currently require the DiagHandler field to be set in Conf.
+  /// Until that is fixed, a Config argument is required.
+  LTO(Config Conf, ThinBackend Backend = nullptr,
+      unsigned ParallelCodeGenParallelismLevel = 1);
+  ~LTO();
+
+  /// Add an input file to the LTO link, using the provided symbol resolutions.
+  /// The symbol resolutions must appear in the enumeration order given by
+  /// InputFile::symbols().
+  Error add(std::unique_ptr<InputFile> Obj, ArrayRef<SymbolResolution> Res);
+
+  /// Returns an upper bound on the number of tasks that the client may expect.
+  /// This may only be called after all IR object files have been added. For a
+  /// full description of tasks see LTOBackend.h.
+  unsigned getMaxTasks() const;
+
+  /// Runs the LTO pipeline. This function calls the supplied AddStream
+  /// function to add native object files to the link.
+  ///
+  /// The Cache parameter is optional. If supplied, it will be used to cache
+  /// native object files and add them to the link.
+  ///
+  /// The client will receive at most one callback (via either AddStream or
+  /// Cache) for each task identifier.
+  Error run(AddStreamFn AddStream, NativeObjectCache Cache = nullptr);
+
+private:
+  Config Conf;
+
+  struct RegularLTOState {
+    RegularLTOState(unsigned ParallelCodeGenParallelismLevel, Config &Conf);
+    struct CommonResolution {
+      uint64_t Size = 0;
+      unsigned Align = 0;
+      /// Record if at least one instance of the common was marked as prevailing
+      bool Prevailing = false;
+    };
+    std::map<std::string, CommonResolution> Commons;
+
+    unsigned ParallelCodeGenParallelismLevel;
+    LTOLLVMContext Ctx;
+    std::unique_ptr<Module> CombinedModule;
+    std::unique_ptr<IRMover> Mover;
+
+    // This stores the information about a regular LTO module that we have added
+    // to the link. It will either be linked immediately (for modules without
+    // summaries) or after summary-based dead stripping (for modules with
+    // summaries).
+    struct AddedModule {
+      std::unique_ptr<Module> M;
+      std::vector<GlobalValue *> Keep;
+    };
+    std::vector<AddedModule> ModsWithSummaries;
+  } RegularLTO;
+
+  struct ThinLTOState {
+    ThinLTOState(ThinBackend Backend);
+
+    ThinBackend Backend;
+    ModuleSummaryIndex CombinedIndex;
+    MapVector<StringRef, BitcodeModule> ModuleMap;
+    DenseMap<GlobalValue::GUID, StringRef> PrevailingModuleForGUID;
+  } ThinLTO;
+
+  // The global resolution for a particular (mangled) symbol name. This is in
+  // particular necessary to track whether each symbol can be internalized.
+  // Because any input file may introduce a new cross-partition reference, we
+  // cannot make any final internalization decisions until all input files have
+  // been added and the client has called run(). During run() we apply
+  // internalization decisions either directly to the module (for regular LTO)
+  // or to the combined index (for ThinLTO).
+  struct GlobalResolution {
+    /// The unmangled name of the global.
+    std::string IRName;
+
+    /// Keep track if the symbol is visible outside of a module with a summary
+    /// (i.e. in either a regular object or a regular LTO module without a
+    /// summary).
+    bool VisibleOutsideSummary = false;
+
+    bool UnnamedAddr = true;
+
+    /// True if module contains the prevailing definition.
+    bool Prevailing = false;
+
+    /// Returns true if module contains the prevailing definition and symbol is
+    /// an IR symbol. For example when module-level inline asm block is used,
+    /// symbol can be prevailing in module but have no IR name.
+    bool isPrevailingIRSymbol() const { return Prevailing && !IRName.empty(); }
+
+    /// This field keeps track of the partition number of this global. The
+    /// regular LTO object is partition 0, while each ThinLTO object has its own
+    /// partition number from 1 onwards.
+    ///
+    /// Any global that is defined or used by more than one partition, or that
+    /// is referenced externally, may not be internalized.
+    ///
+    /// Partitions generally have a one-to-one correspondence with tasks, except
+    /// that we use partition 0 for all parallel LTO code generation partitions.
+    /// Any partitioning of the combined LTO object is done internally by the
+    /// LTO backend.
+    unsigned Partition = Unknown;
+
+    /// Special partition numbers.
+    enum : unsigned {
+      /// A partition number has not yet been assigned to this global.
+      Unknown = -1u,
+
+      /// This global is either used by more than one partition or has an
+      /// external reference, and therefore cannot be internalized.
+      External = -2u,
+
+      /// The RegularLTO partition
+      RegularLTO = 0,
+    };
+  };
+
+  // Global mapping from mangled symbol names to resolutions.
+  StringMap<GlobalResolution> GlobalResolutions;
+
+  void addModuleToGlobalRes(ArrayRef<InputFile::Symbol> Syms,
+                            ArrayRef<SymbolResolution> Res, unsigned Partition,
+                            bool InSummary);
+
+  // These functions take a range of symbol resolutions [ResI, ResE) and consume
+  // the resolutions used by a single input module by incrementing ResI. After
+  // these functions return, [ResI, ResE) will refer to the resolution range for
+  // the remaining modules in the InputFile.
+  Error addModule(InputFile &Input, unsigned ModI,
+                  const SymbolResolution *&ResI, const SymbolResolution *ResE);
+
+  Expected<RegularLTOState::AddedModule>
+  addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+                const SymbolResolution *&ResI, const SymbolResolution *ResE);
+  Error linkRegularLTO(RegularLTOState::AddedModule Mod,
+                       bool LivenessFromIndex);
+
+  Error addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+                   const SymbolResolution *&ResI, const SymbolResolution *ResE);
+
+  Error runRegularLTO(AddStreamFn AddStream);
+  Error runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache);
+
+  mutable bool CalledGetMaxTasks = false;
+};
+
+/// The resolution for a symbol. The linker must provide a SymbolResolution for
+/// each global symbol based on its internal resolution of that symbol.
+struct SymbolResolution {
+  SymbolResolution()
+      : Prevailing(0), FinalDefinitionInLinkageUnit(0), VisibleToRegularObj(0),
+        LinkerRedefined(0) {}
+
+  /// The linker has chosen this definition of the symbol.
+  unsigned Prevailing : 1;
+
+  /// The definition of this symbol is unpreemptable at runtime and is known to
+  /// be in this linkage unit.
+  unsigned FinalDefinitionInLinkageUnit : 1;
+
+  /// The definition of this symbol is visible outside of the LTO unit.
+  unsigned VisibleToRegularObj : 1;
+
+  /// Linker redefined version of the symbol which appeared in -wrap or -defsym
+  /// linker option.
+  unsigned LinkerRedefined : 1;
+};
+
+} // namespace lto
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/LTOBackend.h b/linux-x64/clang/include/llvm/LTO/LTOBackend.h
new file mode 100644
index 0000000..d4743f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/LTOBackend.h
@@ -0,0 +1,52 @@
+//===-LTOBackend.h - LLVM Link Time Optimizer Backend ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the "backend" phase of LTO, i.e. it performs
+// optimization and code generation on a loaded module. It is generally used
+// internally by the LTO class but can also be used independently, for example
+// to implement a standalone ThinLTO backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTOBACKEND_H
+#define LLVM_LTO_LTOBACKEND_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
+
+namespace llvm {
+
+class BitcodeModule;
+class Error;
+class Module;
+class Target;
+
+namespace lto {
+
+/// Runs a regular LTO backend. The regular LTO backend can also act as the
+/// regular LTO phase of ThinLTO, which may need to access the combined index.
+Error backend(Config &C, AddStreamFn AddStream,
+              unsigned ParallelCodeGenParallelismLevel,
+              std::unique_ptr<Module> M, ModuleSummaryIndex &CombinedIndex);
+
+/// Runs a ThinLTO backend.
+Error thinBackend(Config &C, unsigned Task, AddStreamFn AddStream, Module &M,
+                  const ModuleSummaryIndex &CombinedIndex,
+                  const FunctionImporter::ImportMapTy &ImportList,
+                  const GVSummaryMapTy &DefinedGlobals,
+                  MapVector<StringRef, BitcodeModule> &ModuleMap);
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/legacy/LTOCodeGenerator.h b/linux-x64/clang/include/llvm/LTO/legacy/LTOCodeGenerator.h
new file mode 100644
index 0000000..f48ab02
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/legacy/LTOCodeGenerator.h
@@ -0,0 +1,244 @@
+//===-LTOCodeGenerator.h - LLVM Link Time Optimizer -----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LTOCodeGenerator class.
+//
+//   LTO compilation consists of three phases: Pre-IPO, IPO and Post-IPO.
+//
+//   The Pre-IPO phase compiles source code into bitcode file. The resulting
+// bitcode files, along with object files and libraries, will be fed to the
+// linker to through the IPO and Post-IPO phases. By using obj-file extension,
+// the resulting bitcode file disguises itself as an object file, and therefore
+// obviates the need of writing a special set of the make-rules only for LTO
+// compilation.
+//
+//   The IPO phase perform inter-procedural analyses and optimizations, and
+// the Post-IPO consists two sub-phases: intra-procedural scalar optimizations
+// (SOPT), and intra-procedural target-dependent code generator (CG).
+//
+//   As of this writing, we don't separate IPO and the Post-IPO SOPT. They
+// are intermingled together, and are driven by a single pass manager (see
+// PassManagerBuilder::populateLTOPassManager()).
+//
+//   The "LTOCodeGenerator" is the driver for the IPO and Post-IPO stages.
+// The "CodeGenerator" here is bit confusing. Don't confuse the "CodeGenerator"
+// with the machine specific code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTOCODEGENERATOR_H
+#define LLVM_LTO_LTOCODEGENERATOR_H
+
+#include "llvm-c/lto.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+template <typename T> class ArrayRef;
+  class LLVMContext;
+  class DiagnosticInfo;
+  class Linker;
+  class Mangler;
+  class MemoryBuffer;
+  class TargetLibraryInfo;
+  class TargetMachine;
+  class raw_ostream;
+  class raw_pwrite_stream;
+
+//===----------------------------------------------------------------------===//
+/// C++ class which implements the opaque lto_code_gen_t type.
+///
+struct LTOCodeGenerator {
+  static const char *getVersionString();
+
+  LTOCodeGenerator(LLVMContext &Context);
+  ~LTOCodeGenerator();
+
+  /// Merge given module.  Return true on success.
+  ///
+  /// Resets \a HasVerifiedInput.
+  bool addModule(struct LTOModule *);
+
+  /// Set the destination module.
+  ///
+  /// Resets \a HasVerifiedInput.
+  void setModule(std::unique_ptr<LTOModule> M);
+
+  void setAsmUndefinedRefs(struct LTOModule *);
+  void setTargetOptions(const TargetOptions &Options);
+  void setDebugInfo(lto_debug_model);
+  void setCodePICModel(Optional<Reloc::Model> Model) { RelocModel = Model; }
+
+  /// Set the file type to be emitted (assembly or object code).
+  /// The default is TargetMachine::CGFT_ObjectFile.
+  void setFileType(TargetMachine::CodeGenFileType FT) { FileType = FT; }
+
+  void setCpu(StringRef MCpu) { this->MCpu = MCpu; }
+  void setAttr(StringRef MAttr) { this->MAttr = MAttr; }
+  void setOptLevel(unsigned OptLevel);
+
+  void setShouldInternalize(bool Value) { ShouldInternalize = Value; }
+  void setShouldEmbedUselists(bool Value) { ShouldEmbedUselists = Value; }
+
+  /// Restore linkage of globals
+  ///
+  /// When set, the linkage of globals will be restored prior to code
+  /// generation. That is, a global symbol that had external linkage prior to
+  /// LTO will be emitted with external linkage again; and a local will remain
+  /// local. Note that this option only affects the end result - globals may
+  /// still be internalized in the process of LTO and may be modified and/or
+  /// deleted where legal.
+  ///
+  /// The default behavior will internalize globals (unless on the preserve
+  /// list) and, if parallel code generation is enabled, will externalize
+  /// all locals.
+  void setShouldRestoreGlobalsLinkage(bool Value) {
+    ShouldRestoreGlobalsLinkage = Value;
+  }
+
+  void addMustPreserveSymbol(StringRef Sym) { MustPreserveSymbols[Sym] = 1; }
+
+  /// Pass options to the driver and optimization passes.
+  ///
+  /// These options are not necessarily for debugging purpose (the function
+  /// name is misleading).  This function should be called before
+  /// LTOCodeGenerator::compilexxx(), and
+  /// LTOCodeGenerator::writeMergedModules().
+  void setCodeGenDebugOptions(StringRef Opts);
+
+  /// Parse the options set in setCodeGenDebugOptions.
+  ///
+  /// Like \a setCodeGenDebugOptions(), this must be called before
+  /// LTOCodeGenerator::compilexxx() and
+  /// LTOCodeGenerator::writeMergedModules().
+  void parseCodeGenDebugOptions();
+
+  /// Write the merged module to the file specified by the given path.  Return
+  /// true on success.
+  ///
+  /// Calls \a verifyMergedModuleOnce().
+  bool writeMergedModules(StringRef Path);
+
+  /// Compile the merged module into a *single* output file; the path to output
+  /// file is returned to the caller via argument "name". Return true on
+  /// success.
+  ///
+  /// \note It is up to the linker to remove the intermediate output file.  Do
+  /// not try to remove the object file in LTOCodeGenerator's destructor as we
+  /// don't who (LTOCodeGenerator or the output file) will last longer.
+  bool compile_to_file(const char **Name, bool DisableVerify,
+                       bool DisableInline, bool DisableGVNLoadPRE,
+                       bool DisableVectorization);
+
+  /// As with compile_to_file(), this function compiles the merged module into
+  /// single output file. Instead of returning the output file path to the
+  /// caller (linker), it brings the output to a buffer, and returns the buffer
+  /// to the caller. This function should delete the intermediate file once
+  /// its content is brought to memory. Return NULL if the compilation was not
+  /// successful.
+  std::unique_ptr<MemoryBuffer> compile(bool DisableVerify, bool DisableInline,
+                                        bool DisableGVNLoadPRE,
+                                        bool DisableVectorization);
+
+  /// Optimizes the merged module.  Returns true on success.
+  ///
+  /// Calls \a verifyMergedModuleOnce().
+  bool optimize(bool DisableVerify, bool DisableInline, bool DisableGVNLoadPRE,
+                bool DisableVectorization);
+
+  /// Compiles the merged optimized module into a single output file. It brings
+  /// the output to a buffer, and returns the buffer to the caller. Return NULL
+  /// if the compilation was not successful.
+  std::unique_ptr<MemoryBuffer> compileOptimized();
+
+  /// Compile the merged optimized module into out.size() output files each
+  /// representing a linkable partition of the module. If out contains more
+  /// than one element, code generation is done in parallel with out.size()
+  /// threads.  Output files will be written to members of out. Returns true on
+  /// success.
+  ///
+  /// Calls \a verifyMergedModuleOnce().
+  bool compileOptimized(ArrayRef<raw_pwrite_stream *> Out);
+
+  /// Enable the Freestanding mode: indicate that the optimizer should not
+  /// assume builtins are present on the target.
+  void setFreestanding(bool Enabled) { Freestanding = Enabled; }
+
+  void setDiagnosticHandler(lto_diagnostic_handler_t, void *);
+
+  LLVMContext &getContext() { return Context; }
+
+  void resetMergedModule() { MergedModule.reset(); }
+  void DiagnosticHandler(const DiagnosticInfo &DI);
+
+private:
+  void initializeLTOPasses();
+
+  /// Verify the merged module on first call.
+  ///
+  /// Sets \a HasVerifiedInput on first call and doesn't run again on the same
+  /// input.
+  void verifyMergedModuleOnce();
+
+  bool compileOptimizedToFile(const char **Name);
+  void restoreLinkageForExternals();
+  void applyScopeRestrictions();
+  void preserveDiscardableGVs(
+      Module &TheModule,
+      llvm::function_ref<bool(const GlobalValue &)> mustPreserveGV);
+
+  bool determineTarget();
+  std::unique_ptr<TargetMachine> createTargetMachine();
+
+  void emitError(const std::string &ErrMsg);
+  void emitWarning(const std::string &ErrMsg);
+
+  void finishOptimizationRemarks();
+
+  LLVMContext &Context;
+  std::unique_ptr<Module> MergedModule;
+  std::unique_ptr<Linker> TheLinker;
+  std::unique_ptr<TargetMachine> TargetMach;
+  bool EmitDwarfDebugInfo = false;
+  bool ScopeRestrictionsDone = false;
+  bool HasVerifiedInput = false;
+  Optional<Reloc::Model> RelocModel;
+  StringSet<> MustPreserveSymbols;
+  StringSet<> AsmUndefinedRefs;
+  StringMap<GlobalValue::LinkageTypes> ExternalSymbols;
+  std::vector<std::string> CodegenOptions;
+  std::string FeatureStr;
+  std::string MCpu;
+  std::string MAttr;
+  std::string NativeObjectPath;
+  TargetOptions Options;
+  CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
+  const Target *MArch = nullptr;
+  std::string TripleStr;
+  unsigned OptLevel = 2;
+  lto_diagnostic_handler_t DiagHandler = nullptr;
+  void *DiagContext = nullptr;
+  bool ShouldInternalize = true;
+  bool ShouldEmbedUselists = false;
+  bool ShouldRestoreGlobalsLinkage = false;
+  TargetMachine::CodeGenFileType FileType = TargetMachine::CGFT_ObjectFile;
+  std::unique_ptr<ToolOutputFile> DiagnosticOutputFile;
+  bool Freestanding = false;
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/legacy/LTOModule.h b/linux-x64/clang/include/llvm/LTO/legacy/LTOModule.h
new file mode 100644
index 0000000..017e223
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/legacy/LTOModule.h
@@ -0,0 +1,208 @@
+//===-LTOModule.h - LLVM Link Time Optimizer ------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LTOModule class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTOMODULE_H
+#define LLVM_LTO_LTOMODULE_H
+
+#include "llvm-c/lto.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Object/ModuleSymbolTable.h"
+#include "llvm/Target/TargetMachine.h"
+#include <string>
+#include <vector>
+
+// Forward references to llvm classes.
+namespace llvm {
+  class Function;
+  class GlobalValue;
+  class MemoryBuffer;
+  class TargetOptions;
+  class Value;
+
+//===----------------------------------------------------------------------===//
+/// C++ class which implements the opaque lto_module_t type.
+///
+struct LTOModule {
+private:
+  struct NameAndAttributes {
+    StringRef name;
+    uint32_t           attributes = 0;
+    bool               isFunction = 0;
+    const GlobalValue *symbol = 0;
+  };
+
+  std::unique_ptr<LLVMContext> OwnedContext;
+
+  std::string LinkerOpts;
+
+  std::unique_ptr<Module> Mod;
+  MemoryBufferRef MBRef;
+  ModuleSymbolTable SymTab;
+  std::unique_ptr<TargetMachine> _target;
+  std::vector<NameAndAttributes> _symbols;
+
+  // _defines and _undefines only needed to disambiguate tentative definitions
+  StringSet<>                             _defines;
+  StringMap<NameAndAttributes> _undefines;
+  std::vector<StringRef> _asm_undefines;
+
+  LTOModule(std::unique_ptr<Module> M, MemoryBufferRef MBRef,
+            TargetMachine *TM);
+
+public:
+  ~LTOModule();
+
+  /// Returns 'true' if the file or memory contents is LLVM bitcode.
+  static bool isBitcodeFile(const void *mem, size_t length);
+  static bool isBitcodeFile(StringRef path);
+
+  /// Returns 'true' if the Module is produced for ThinLTO.
+  bool isThinLTO();
+
+  /// Returns 'true' if the memory buffer is LLVM bitcode for the specified
+  /// triple.
+  static bool isBitcodeForTarget(MemoryBuffer *memBuffer,
+                                 StringRef triplePrefix);
+
+  /// Returns a string representing the producer identification stored in the
+  /// bitcode, or "" if the bitcode does not contains any.
+  ///
+  static std::string getProducerString(MemoryBuffer *Buffer);
+
+  /// Create a MemoryBuffer from a memory range with an optional name.
+  static std::unique_ptr<MemoryBuffer>
+  makeBuffer(const void *mem, size_t length, StringRef name = "");
+
+  /// Create an LTOModule. N.B. These methods take ownership of the buffer. The
+  /// caller must have initialized the Targets, the TargetMCs, the AsmPrinters,
+  /// and the AsmParsers by calling:
+  ///
+  /// InitializeAllTargets();
+  /// InitializeAllTargetMCs();
+  /// InitializeAllAsmPrinters();
+  /// InitializeAllAsmParsers();
+  static ErrorOr<std::unique_ptr<LTOModule>>
+  createFromFile(LLVMContext &Context, StringRef path,
+                 const TargetOptions &options);
+  static ErrorOr<std::unique_ptr<LTOModule>>
+  createFromOpenFile(LLVMContext &Context, int fd, StringRef path, size_t size,
+                     const TargetOptions &options);
+  static ErrorOr<std::unique_ptr<LTOModule>>
+  createFromOpenFileSlice(LLVMContext &Context, int fd, StringRef path,
+                          size_t map_size, off_t offset,
+                          const TargetOptions &options);
+  static ErrorOr<std::unique_ptr<LTOModule>>
+  createFromBuffer(LLVMContext &Context, const void *mem, size_t length,
+                   const TargetOptions &options, StringRef path = "");
+  static ErrorOr<std::unique_ptr<LTOModule>>
+  createInLocalContext(std::unique_ptr<LLVMContext> Context, const void *mem,
+                       size_t length, const TargetOptions &options,
+                       StringRef path);
+
+  const Module &getModule() const { return *Mod; }
+  Module &getModule() { return *Mod; }
+
+  std::unique_ptr<Module> takeModule() { return std::move(Mod); }
+
+  /// Return the Module's target triple.
+  const std::string &getTargetTriple() {
+    return getModule().getTargetTriple();
+  }
+
+  /// Set the Module's target triple.
+  void setTargetTriple(StringRef Triple) {
+    getModule().setTargetTriple(Triple);
+  }
+
+  /// Get the number of symbols
+  uint32_t getSymbolCount() {
+    return _symbols.size();
+  }
+
+  /// Get the attributes for a symbol at the specified index.
+  lto_symbol_attributes getSymbolAttributes(uint32_t index) {
+    if (index < _symbols.size())
+      return lto_symbol_attributes(_symbols[index].attributes);
+    return lto_symbol_attributes(0);
+  }
+
+  /// Get the name of the symbol at the specified index.
+  StringRef getSymbolName(uint32_t index) {
+    if (index < _symbols.size())
+      return _symbols[index].name;
+    return StringRef();
+  }
+
+  const GlobalValue *getSymbolGV(uint32_t index) {
+    if (index < _symbols.size())
+      return _symbols[index].symbol;
+    return nullptr;
+  }
+
+  StringRef getLinkerOpts() { return LinkerOpts; }
+
+  const std::vector<StringRef> &getAsmUndefinedRefs() { return _asm_undefines; }
+
+private:
+  /// Parse metadata from the module
+  // FIXME: it only parses "llvm.linker.options" metadata at the moment
+  void parseMetadata();
+
+  /// Parse the symbols from the module and model-level ASM and add them to
+  /// either the defined or undefined lists.
+  void parseSymbols();
+
+  /// Add a symbol which isn't defined just yet to a list to be resolved later.
+  void addPotentialUndefinedSymbol(ModuleSymbolTable::Symbol Sym,
+                                   bool isFunc);
+
+  /// Add a defined symbol to the list.
+  void addDefinedSymbol(StringRef Name, const GlobalValue *def,
+                        bool isFunction);
+
+  /// Add a data symbol as defined to the list.
+  void addDefinedDataSymbol(ModuleSymbolTable::Symbol Sym);
+  void addDefinedDataSymbol(StringRef Name, const GlobalValue *v);
+
+  /// Add a function symbol as defined to the list.
+  void addDefinedFunctionSymbol(ModuleSymbolTable::Symbol Sym);
+  void addDefinedFunctionSymbol(StringRef Name, const Function *F);
+
+  /// Add a global symbol from module-level ASM to the defined list.
+  void addAsmGlobalSymbol(StringRef, lto_symbol_attributes scope);
+
+  /// Add a global symbol from module-level ASM to the undefined list.
+  void addAsmGlobalSymbolUndef(StringRef);
+
+  /// Parse i386/ppc ObjC class data structure.
+  void addObjCClass(const GlobalVariable *clgv);
+
+  /// Parse i386/ppc ObjC category data structure.
+  void addObjCCategory(const GlobalVariable *clgv);
+
+  /// Parse i386/ppc ObjC class list data structure.
+  void addObjCClassRef(const GlobalVariable *clgv);
+
+  /// Get string that the data pointer points to.
+  bool objcClassNameFromExpression(const Constant *c, std::string &name);
+
+  /// Create an LTOModule (private version).
+  static ErrorOr<std::unique_ptr<LTOModule>>
+  makeLTOModule(MemoryBufferRef Buffer, const TargetOptions &options,
+                LLVMContext &Context, bool ShouldBeLazy);
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/linux-x64/clang/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
new file mode 100644
index 0000000..b32a972
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
@@ -0,0 +1,356 @@
+//===-ThinLTOCodeGenerator.h - LLVM Link Time Optimizer -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ThinLTOCodeGenerator class, similar to the
+// LTOCodeGenerator but for the ThinLTO scheme. It provides an interface for
+// linker plugin.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_THINLTOCODEGENERATOR_H
+#define LLVM_LTO_THINLTOCODEGENERATOR_H
+
+#include "llvm-c/lto.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Support/CachePruning.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Target/TargetOptions.h"
+
+#include <string>
+
+namespace llvm {
+class StringRef;
+class LLVMContext;
+class TargetMachine;
+
+/// Wrapper around MemoryBufferRef, owning the identifier
+class ThinLTOBuffer {
+  std::string OwnedIdentifier;
+  StringRef Buffer;
+
+public:
+  ThinLTOBuffer(StringRef Buffer, StringRef Identifier)
+      : OwnedIdentifier(Identifier), Buffer(Buffer) {}
+
+  MemoryBufferRef getMemBuffer() const {
+    return MemoryBufferRef(Buffer,
+                           {OwnedIdentifier.c_str(), OwnedIdentifier.size()});
+  }
+  StringRef getBuffer() const { return Buffer; }
+  StringRef getBufferIdentifier() const { return OwnedIdentifier; }
+};
+
+/// Helper to gather options relevant to the target machine creation
+struct TargetMachineBuilder {
+  Triple TheTriple;
+  std::string MCpu;
+  std::string MAttr;
+  TargetOptions Options;
+  Optional<Reloc::Model> RelocModel;
+  CodeGenOpt::Level CGOptLevel = CodeGenOpt::Aggressive;
+
+  std::unique_ptr<TargetMachine> create() const;
+};
+
+/// This class define an interface similar to the LTOCodeGenerator, but adapted
+/// for ThinLTO processing.
+/// The ThinLTOCodeGenerator is not intended to be reuse for multiple
+/// compilation: the model is that the client adds modules to the generator and
+/// ask to perform the ThinLTO optimizations / codegen, and finally destroys the
+/// codegenerator.
+class ThinLTOCodeGenerator {
+public:
+  /// Add given module to the code generator.
+  void addModule(StringRef Identifier, StringRef Data);
+
+  /**
+   * Adds to a list of all global symbols that must exist in the final generated
+   * code. If a symbol is not listed there, it will be optimized away if it is
+   * inlined into every usage.
+   */
+  void preserveSymbol(StringRef Name);
+
+  /**
+   * Adds to a list of all global symbols that are cross-referenced between
+   * ThinLTO files. If the ThinLTO CodeGenerator can ensure that every
+   * references from a ThinLTO module to this symbol is optimized away, then
+   * the symbol can be discarded.
+   */
+  void crossReferenceSymbol(StringRef Name);
+
+  /**
+   * Process all the modules that were added to the code generator in parallel.
+   *
+   * Client can access the resulting object files using getProducedBinaries(),
+   * unless setGeneratedObjectsDirectory() has been called, in which case
+   * results are available through getProducedBinaryFiles().
+   */
+  void run();
+
+  /**
+   * Return the "in memory" binaries produced by the code generator. This is
+   * filled after run() unless setGeneratedObjectsDirectory() has been
+   * called, in which case results are available through
+   * getProducedBinaryFiles().
+   */
+  std::vector<std::unique_ptr<MemoryBuffer>> &getProducedBinaries() {
+    return ProducedBinaries;
+  }
+
+  /**
+   * Return the "on-disk" binaries produced by the code generator. This is
+   * filled after run() when setGeneratedObjectsDirectory() has been
+   * called, in which case results are available through getProducedBinaries().
+   */
+  std::vector<std::string> &getProducedBinaryFiles() {
+    return ProducedBinaryFiles;
+  }
+
+  /**
+   * \defgroup Options setters
+   * @{
+   */
+
+  /**
+   * \defgroup Cache controlling options
+   *
+   * These entry points control the ThinLTO cache. The cache is intended to
+   * support incremental build, and thus needs to be persistent accross build.
+   * The client enabled the cache by supplying a path to an existing directory.
+   * The code generator will use this to store objects files that may be reused
+   * during a subsequent build.
+   * To avoid filling the disk space, a few knobs are provided:
+   *  - The pruning interval limit the frequency at which the garbage collector
+   *    will try to scan the cache directory to prune it from expired entries.
+   *    Setting to -1 disable the pruning (default). Setting to 0 will force
+   *    pruning to occur.
+   *  - The pruning expiration time indicates to the garbage collector how old
+   *    an entry needs to be to be removed.
+   *  - Finally, the garbage collector can be instructed to prune the cache till
+   *    the occupied space goes below a threshold.
+   * @{
+   */
+
+  struct CachingOptions {
+    std::string Path;                    // Path to the cache, empty to disable.
+    CachePruningPolicy Policy;
+  };
+
+  /// Provide a path to a directory where to store the cached files for
+  /// incremental build.
+  void setCacheDir(std::string Path) { CacheOptions.Path = std::move(Path); }
+
+  /// Cache policy: interval (seconds) between two prunes of the cache. Set to a
+  /// negative value to disable pruning. A value of 0 will force pruning to
+  /// occur.
+  void setCachePruningInterval(int Interval) {
+    if(Interval < 0)
+      CacheOptions.Policy.Interval.reset();
+    else
+      CacheOptions.Policy.Interval = std::chrono::seconds(Interval);
+  }
+
+  /// Cache policy: expiration (in seconds) for an entry.
+  /// A value of 0 will be ignored.
+  void setCacheEntryExpiration(unsigned Expiration) {
+    if (Expiration)
+      CacheOptions.Policy.Expiration = std::chrono::seconds(Expiration);
+  }
+
+  /**
+   * Sets the maximum cache size that can be persistent across build, in terms
+   * of percentage of the available space on the disk. Set to 100 to indicate
+   * no limit, 50 to indicate that the cache size will not be left over
+   * half the available space. A value over 100 will be reduced to 100, and a
+   * value of 0 will be ignored.
+   *
+   *
+   * The formula looks like:
+   *  AvailableSpace = FreeSpace + ExistingCacheSize
+   *  NewCacheSize = AvailableSpace * P/100
+   *
+   */
+  void setMaxCacheSizeRelativeToAvailableSpace(unsigned Percentage) {
+    if (Percentage)
+      CacheOptions.Policy.MaxSizePercentageOfAvailableSpace = Percentage;
+  }
+
+  /// Cache policy: the maximum size for the cache directory in bytes. A value
+  /// over the amount of available space on the disk will be reduced to the
+  /// amount of available space. A value of 0 will be ignored.
+  void setCacheMaxSizeBytes(unsigned MaxSizeBytes) {
+    if (MaxSizeBytes)
+      CacheOptions.Policy.MaxSizeBytes = MaxSizeBytes;
+  }
+
+  /// Cache policy: the maximum number of files in the cache directory. A value
+  /// of 0 will be ignored.
+  void setCacheMaxSizeFiles(unsigned MaxSizeFiles) {
+    if (MaxSizeFiles)
+      CacheOptions.Policy.MaxSizeFiles = MaxSizeFiles;
+  }
+
+  /**@}*/
+
+  /// Set the path to a directory where to save temporaries at various stages of
+  /// the processing.
+  void setSaveTempsDir(std::string Path) { SaveTempsDir = std::move(Path); }
+
+  /// Set the path to a directory where to save generated object files. This
+  /// path can be used by a linker to request on-disk files instead of in-memory
+  /// buffers. When set, results are available through getProducedBinaryFiles()
+  /// instead of getProducedBinaries().
+  void setGeneratedObjectsDirectory(std::string Path) {
+    SavedObjectsDirectoryPath = std::move(Path);
+  }
+
+  /// CPU to use to initialize the TargetMachine
+  void setCpu(std::string Cpu) { TMBuilder.MCpu = std::move(Cpu); }
+
+  /// Subtarget attributes
+  void setAttr(std::string MAttr) { TMBuilder.MAttr = std::move(MAttr); }
+
+  /// TargetMachine options
+  void setTargetOptions(TargetOptions Options) {
+    TMBuilder.Options = std::move(Options);
+  }
+
+  /// Enable the Freestanding mode: indicate that the optimizer should not
+  /// assume builtins are present on the target.
+  void setFreestanding(bool Enabled) { Freestanding = Enabled; }
+
+  /// CodeModel
+  void setCodePICModel(Optional<Reloc::Model> Model) {
+    TMBuilder.RelocModel = Model;
+  }
+
+  /// CodeGen optimization level
+  void setCodeGenOptLevel(CodeGenOpt::Level CGOptLevel) {
+    TMBuilder.CGOptLevel = CGOptLevel;
+  }
+
+  /// IR optimization level: from 0 to 3.
+  void setOptLevel(unsigned NewOptLevel) {
+    OptLevel = (NewOptLevel > 3) ? 3 : NewOptLevel;
+  }
+
+  /// Disable CodeGen, only run the stages till codegen and stop. The output
+  /// will be bitcode.
+  void disableCodeGen(bool Disable) { DisableCodeGen = Disable; }
+
+  /// Perform CodeGen only: disable all other stages.
+  void setCodeGenOnly(bool CGOnly) { CodeGenOnly = CGOnly; }
+
+  /**@}*/
+
+  /**
+   * \defgroup Set of APIs to run individual stages in isolation.
+   * @{
+   */
+
+  /**
+   * Produce the combined summary index from all the bitcode files:
+   * "thin-link".
+   */
+  std::unique_ptr<ModuleSummaryIndex> linkCombinedIndex();
+
+  /**
+   * Perform promotion and renaming of exported internal functions,
+   * and additionally resolve weak and linkonce symbols.
+   * Index is updated to reflect linkage changes from weak resolution.
+   */
+  void promote(Module &Module, ModuleSummaryIndex &Index);
+
+  /**
+   * Compute and emit the imported files for module at \p ModulePath.
+   */
+  static void emitImports(StringRef ModulePath, StringRef OutputName,
+                          ModuleSummaryIndex &Index);
+
+  /**
+   * Perform cross-module importing for the module identified by
+   * ModuleIdentifier.
+   */
+  void crossModuleImport(Module &Module, ModuleSummaryIndex &Index);
+
+  /**
+   * Compute the list of summaries needed for importing into module.
+   */
+  static void gatherImportedSummariesForModule(
+      StringRef ModulePath, ModuleSummaryIndex &Index,
+      std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
+
+  /**
+   * Perform internalization. Index is updated to reflect linkage changes.
+   */
+  void internalize(Module &Module, ModuleSummaryIndex &Index);
+
+  /**
+   * Perform post-importing ThinLTO optimizations.
+   */
+  void optimize(Module &Module);
+
+  /**
+   * Perform ThinLTO CodeGen.
+   */
+  std::unique_ptr<MemoryBuffer> codegen(Module &Module);
+
+  /**@}*/
+
+private:
+  /// Helper factory to build a TargetMachine
+  TargetMachineBuilder TMBuilder;
+
+  /// Vector holding the in-memory buffer containing the produced binaries, when
+  /// SavedObjectsDirectoryPath isn't set.
+  std::vector<std::unique_ptr<MemoryBuffer>> ProducedBinaries;
+
+  /// Path to generated files in the supplied SavedObjectsDirectoryPath if any.
+  std::vector<std::string> ProducedBinaryFiles;
+
+  /// Vector holding the input buffers containing the bitcode modules to
+  /// process.
+  std::vector<ThinLTOBuffer> Modules;
+
+  /// Set of symbols that need to be preserved outside of the set of bitcode
+  /// files.
+  StringSet<> PreservedSymbols;
+
+  /// Set of symbols that are cross-referenced between bitcode files.
+  StringSet<> CrossReferencedSymbols;
+
+  /// Control the caching behavior.
+  CachingOptions CacheOptions;
+
+  /// Path to a directory to save the temporary bitcode files.
+  std::string SaveTempsDir;
+
+  /// Path to a directory to save the generated object files.
+  std::string SavedObjectsDirectoryPath;
+
+  /// Flag to enable/disable CodeGen. When set to true, the process stops after
+  /// optimizations and a bitcode is produced.
+  bool DisableCodeGen = false;
+
+  /// Flag to indicate that only the CodeGen will be performed, no cross-module
+  /// importing or optimization.
+  bool CodeGenOnly = false;
+
+  /// Flag to indicate that the optimizer should not assume builtins are present
+  /// on the target.
+  bool Freestanding = false;
+
+  /// IR Optimization Level [0-3].
+  unsigned OptLevel = 3;
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/LTO/legacy/UpdateCompilerUsed.h b/linux-x64/clang/include/llvm/LTO/legacy/UpdateCompilerUsed.h
new file mode 100644
index 0000000..4be0027
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LTO/legacy/UpdateCompilerUsed.h
@@ -0,0 +1,32 @@
+//==------ UpdateCompilerUsed.h - LLVM Link Time Optimizer Utility --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a helper class to update llvm.compiler_used metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_UPDATE_COMPILER_USED_H
+#define LLVM_LTO_UPDATE_COMPILER_USED_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/GlobalValue.h"
+
+namespace llvm {
+class Module;
+class TargetMachine;
+
+/// Find all globals in \p TheModule that are referenced in
+/// \p AsmUndefinedRefs, as well as the user-supplied functions definitions that
+/// are also libcalls, and create or update the magic "llvm.compiler_used"
+/// global in \p TheModule.
+void updateCompilerUsed(Module &TheModule, const TargetMachine &TM,
+                        const StringSet<> &AsmUndefinedRefs);
+}
+
+#endif // LLVM_LTO_UPDATE_COMPILER_USED_H
diff --git a/linux-x64/clang/include/llvm/LineEditor/LineEditor.h b/linux-x64/clang/include/llvm/LineEditor/LineEditor.h
new file mode 100644
index 0000000..68995d0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LineEditor/LineEditor.h
@@ -0,0 +1,154 @@
+//===-- llvm/LineEditor/LineEditor.h - line editor --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINEEDITOR_LINEEDITOR_H
+#define LLVM_LINEEDITOR_LINEEDITOR_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class LineEditor {
+public:
+  /// Create a LineEditor object.
+  ///
+  /// \param ProgName The name of the current program. Used to form a default
+  /// prompt.
+  /// \param HistoryPath Path to the file in which to store history data, if
+  /// possible.
+  /// \param In The input stream used by the editor.
+  /// \param Out The output stream used by the editor.
+  /// \param Err The error stream used by the editor.
+  LineEditor(StringRef ProgName, StringRef HistoryPath = "", FILE *In = stdin,
+             FILE *Out = stdout, FILE *Err = stderr);
+  ~LineEditor();
+
+  /// Reads a line.
+  ///
+  /// \return The line, or llvm::Optional<std::string>() on EOF.
+  llvm::Optional<std::string> readLine() const;
+
+  void saveHistory();
+  void loadHistory();
+
+  static std::string getDefaultHistoryPath(StringRef ProgName);
+
+  /// The action to perform upon a completion request.
+  struct CompletionAction {
+    enum ActionKind {
+      /// Insert Text at the cursor position.
+      AK_Insert,
+      /// Show Completions, or beep if the list is empty.
+      AK_ShowCompletions
+    };
+
+    ActionKind Kind;
+
+    /// The text to insert.
+    std::string Text;
+
+    /// The list of completions to show.
+    std::vector<std::string> Completions;
+  };
+
+  /// A possible completion at a given cursor position.
+  struct Completion {
+    Completion() {}
+    Completion(const std::string &TypedText, const std::string &DisplayText)
+        : TypedText(TypedText), DisplayText(DisplayText) {}
+
+    /// The text to insert. If the user has already input some of the
+    /// completion, this should only include the rest of the text.
+    std::string TypedText;
+
+    /// A description of this completion. This may be the completion itself, or
+    /// maybe a summary of its type or arguments.
+    std::string DisplayText;
+  };
+
+  /// Set the completer for this LineEditor. A completer is a function object
+  /// which takes arguments of type StringRef (the string to complete) and
+  /// size_t (the zero-based cursor position in the StringRef) and returns a
+  /// CompletionAction.
+  template <typename T> void setCompleter(T Comp) {
+    Completer.reset(new CompleterModel<T>(Comp));
+  }
+
+  /// Set the completer for this LineEditor to the given list completer.
+  /// A list completer is a function object which takes arguments of type
+  /// StringRef (the string to complete) and size_t (the zero-based cursor
+  /// position in the StringRef) and returns a std::vector<Completion>.
+  template <typename T> void setListCompleter(T Comp) {
+    Completer.reset(new ListCompleterModel<T>(Comp));
+  }
+
+  /// Use the current completer to produce a CompletionAction for the given
+  /// completion request. If the current completer is a list completer, this
+  /// will return an AK_Insert CompletionAction if each completion has a common
+  /// prefix, or an AK_ShowCompletions CompletionAction otherwise.
+  ///
+  /// \param Buffer The string to complete
+  /// \param Pos The zero-based cursor position in the StringRef
+  CompletionAction getCompletionAction(StringRef Buffer, size_t Pos) const;
+
+  const std::string &getPrompt() const { return Prompt; }
+  void setPrompt(const std::string &P) { Prompt = P; }
+
+  // Public so callbacks in LineEditor.cpp can use it.
+  struct InternalData;
+
+private:
+  std::string Prompt;
+  std::string HistoryPath;
+  std::unique_ptr<InternalData> Data;
+
+  struct CompleterConcept {
+    virtual ~CompleterConcept();
+    virtual CompletionAction complete(StringRef Buffer, size_t Pos) const = 0;
+  };
+
+  struct ListCompleterConcept : CompleterConcept {
+    ~ListCompleterConcept() override;
+    CompletionAction complete(StringRef Buffer, size_t Pos) const override;
+    static std::string getCommonPrefix(const std::vector<Completion> &Comps);
+    virtual std::vector<Completion> getCompletions(StringRef Buffer,
+                                                   size_t Pos) const = 0;
+  };
+
+  template <typename T>
+  struct CompleterModel : CompleterConcept {
+    CompleterModel(T Value) : Value(Value) {}
+    CompletionAction complete(StringRef Buffer, size_t Pos) const override {
+      return Value(Buffer, Pos);
+    }
+    T Value;
+  };
+
+  template <typename T>
+  struct ListCompleterModel : ListCompleterConcept {
+    ListCompleterModel(T Value) : Value(std::move(Value)) {}
+    std::vector<Completion> getCompletions(StringRef Buffer,
+                                           size_t Pos) const override {
+      return Value(Buffer, Pos);
+    }
+    T Value;
+  };
+
+  std::unique_ptr<const CompleterConcept> Completer;
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/LinkAllIR.h b/linux-x64/clang/include/llvm/LinkAllIR.h
new file mode 100644
index 0000000..9a9f3d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LinkAllIR.h
@@ -0,0 +1,52 @@
+//===----- LinkAllIR.h - Reference All VMCore Code --------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all the object modules of the VMCore library so
+// that tools like llc, opt, and lli can ensure they are linked with all symbols
+// from libVMCore.a It should only be used from a tool's main program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKALLIR_H
+#define LLVM_LINKALLIR_H
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include <cstdlib>
+
+namespace {
+  struct ForceVMCoreLinking {
+    ForceVMCoreLinking() {
+      // We must reference VMCore in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+      llvm::LLVMContext Context;
+      (void)new llvm::Module("", Context);
+      (void)new llvm::UnreachableInst(Context);
+      (void)    llvm::createVerifierPass(); 
+    }
+  } ForceVMCoreLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/LinkAllPasses.h b/linux-x64/clang/include/llvm/LinkAllPasses.h
new file mode 100644
index 0000000..2d7956d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/LinkAllPasses.h
@@ -0,0 +1,231 @@
+//===- llvm/LinkAllPasses.h ------------ Reference All Passes ---*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all transformation and analysis passes for tools
+// like opt and bugpoint that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKALLPASSES_H
+#define LLVM_LINKALLPASSES_H
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysisEvaluator.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
+#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
+#include "llvm/Analysis/CallPrinter.h"
+#include "llvm/Analysis/DomPrinter.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/IntervalPartition.h"
+#include "llvm/Analysis/Lint.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/RegionPass.h"
+#include "llvm/Analysis/RegionPrinter.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/ScopedNoAliasAA.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/Support/Valgrind.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/Transforms/IPO/FunctionAttrs.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/Transforms/Utils.h"
+#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
+#include "llvm/Transforms/Vectorize.h"
+#include <cstdlib>
+
+namespace {
+  struct ForcePassLinking {
+    ForcePassLinking() {
+      // We must reference the passes in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      (void) llvm::createAAEvalPass();
+      (void) llvm::createAggressiveDCEPass();
+      (void) llvm::createBitTrackingDCEPass();
+      (void) llvm::createArgumentPromotionPass();
+      (void) llvm::createAlignmentFromAssumptionsPass();
+      (void) llvm::createBasicAAWrapperPass();
+      (void) llvm::createSCEVAAWrapperPass();
+      (void) llvm::createTypeBasedAAWrapperPass();
+      (void) llvm::createScopedNoAliasAAWrapperPass();
+      (void) llvm::createBoundsCheckingLegacyPass();
+      (void) llvm::createBreakCriticalEdgesPass();
+      (void) llvm::createCallGraphDOTPrinterPass();
+      (void) llvm::createCallGraphViewerPass();
+      (void) llvm::createCFGSimplificationPass();
+      (void) llvm::createCFLAndersAAWrapperPass();
+      (void) llvm::createCFLSteensAAWrapperPass();
+      (void) llvm::createStructurizeCFGPass();
+      (void) llvm::createLibCallsShrinkWrapPass();
+      (void) llvm::createCalledValuePropagationPass();
+      (void) llvm::createConstantMergePass();
+      (void) llvm::createConstantPropagationPass();
+      (void) llvm::createCostModelAnalysisPass();
+      (void) llvm::createDeadArgEliminationPass();
+      (void) llvm::createDeadCodeEliminationPass();
+      (void) llvm::createDeadInstEliminationPass();
+      (void) llvm::createDeadStoreEliminationPass();
+      (void) llvm::createDependenceAnalysisWrapperPass();
+      (void) llvm::createDivergenceAnalysisPass();
+      (void) llvm::createDomOnlyPrinterPass();
+      (void) llvm::createDomPrinterPass();
+      (void) llvm::createDomOnlyViewerPass();
+      (void) llvm::createDomViewerPass();
+      (void) llvm::createGCOVProfilerPass();
+      (void) llvm::createPGOInstrumentationGenLegacyPass();
+      (void) llvm::createPGOInstrumentationUseLegacyPass();
+      (void) llvm::createPGOIndirectCallPromotionLegacyPass();
+      (void) llvm::createPGOMemOPSizeOptLegacyPass();
+      (void) llvm::createInstrProfilingLegacyPass();
+      (void) llvm::createFunctionImportPass();
+      (void) llvm::createFunctionInliningPass();
+      (void) llvm::createAlwaysInlinerLegacyPass();
+      (void) llvm::createGlobalDCEPass();
+      (void) llvm::createGlobalOptimizerPass();
+      (void) llvm::createGlobalsAAWrapperPass();
+      (void) llvm::createGuardWideningPass();
+      (void) llvm::createIPConstantPropagationPass();
+      (void) llvm::createIPSCCPPass();
+      (void) llvm::createInductiveRangeCheckEliminationPass();
+      (void) llvm::createIndVarSimplifyPass();
+      (void) llvm::createInstructionCombiningPass();
+      (void) llvm::createInternalizePass();
+      (void) llvm::createLCSSAPass();
+      (void) llvm::createLICMPass();
+      (void) llvm::createLoopSinkPass();
+      (void) llvm::createLazyValueInfoPass();
+      (void) llvm::createLoopExtractorPass();
+      (void) llvm::createLoopInterchangePass();
+      (void) llvm::createLoopPredicationPass();
+      (void) llvm::createLoopSimplifyPass();
+      (void) llvm::createLoopSimplifyCFGPass();
+      (void) llvm::createLoopStrengthReducePass();
+      (void) llvm::createLoopRerollPass();
+      (void) llvm::createLoopUnrollPass();
+      (void) llvm::createLoopUnswitchPass();
+      (void) llvm::createLoopVersioningLICMPass();
+      (void) llvm::createLoopIdiomPass();
+      (void) llvm::createLoopRotatePass();
+      (void) llvm::createLowerExpectIntrinsicPass();
+      (void) llvm::createLowerInvokePass();
+      (void) llvm::createLowerSwitchPass();
+      (void) llvm::createNaryReassociatePass();
+      (void) llvm::createObjCARCAAWrapperPass();
+      (void) llvm::createObjCARCAPElimPass();
+      (void) llvm::createObjCARCExpandPass();
+      (void) llvm::createObjCARCContractPass();
+      (void) llvm::createObjCARCOptPass();
+      (void) llvm::createPAEvalPass();
+      (void) llvm::createPromoteMemoryToRegisterPass();
+      (void) llvm::createDemoteRegisterToMemoryPass();
+      (void) llvm::createPruneEHPass();
+      (void) llvm::createPostDomOnlyPrinterPass();
+      (void) llvm::createPostDomPrinterPass();
+      (void) llvm::createPostDomOnlyViewerPass();
+      (void) llvm::createPostDomViewerPass();
+      (void) llvm::createReassociatePass();
+      (void) llvm::createRegionInfoPass();
+      (void) llvm::createRegionOnlyPrinterPass();
+      (void) llvm::createRegionOnlyViewerPass();
+      (void) llvm::createRegionPrinterPass();
+      (void) llvm::createRegionViewerPass();
+      (void) llvm::createSCCPPass();
+      (void) llvm::createSafeStackPass();
+      (void) llvm::createSROAPass();
+      (void) llvm::createSingleLoopExtractorPass();
+      (void) llvm::createStripSymbolsPass();
+      (void) llvm::createStripNonDebugSymbolsPass();
+      (void) llvm::createStripDeadDebugInfoPass();
+      (void) llvm::createStripDeadPrototypesPass();
+      (void) llvm::createTailCallEliminationPass();
+      (void) llvm::createJumpThreadingPass();
+      (void) llvm::createUnifyFunctionExitNodesPass();
+      (void) llvm::createInstCountPass();
+      (void) llvm::createConstantHoistingPass();
+      (void) llvm::createCodeGenPreparePass();
+      (void) llvm::createEntryExitInstrumenterPass();
+      (void) llvm::createPostInlineEntryExitInstrumenterPass();
+      (void) llvm::createEarlyCSEPass();
+      (void) llvm::createGVNHoistPass();
+      (void) llvm::createMergedLoadStoreMotionPass();
+      (void) llvm::createGVNPass();
+      (void) llvm::createNewGVNPass();
+      (void) llvm::createMemCpyOptPass();
+      (void) llvm::createLoopDeletionPass();
+      (void) llvm::createPostDomTree();
+      (void) llvm::createInstructionNamerPass();
+      (void) llvm::createMetaRenamerPass();
+      (void) llvm::createPostOrderFunctionAttrsLegacyPass();
+      (void) llvm::createReversePostOrderFunctionAttrsPass();
+      (void) llvm::createMergeFunctionsPass();
+      (void) llvm::createMergeICmpsPass();
+      (void) llvm::createExpandMemCmpPass();
+      std::string buf;
+      llvm::raw_string_ostream os(buf);
+      (void) llvm::createPrintModulePass(os);
+      (void) llvm::createPrintFunctionPass(os);
+      (void) llvm::createPrintBasicBlockPass(os);
+      (void) llvm::createModuleDebugInfoPrinterPass();
+      (void) llvm::createPartialInliningPass();
+      (void) llvm::createLintPass();
+      (void) llvm::createSinkingPass();
+      (void) llvm::createLowerAtomicPass();
+      (void) llvm::createCorrelatedValuePropagationPass();
+      (void) llvm::createMemDepPrinter();
+      (void) llvm::createInstructionSimplifierPass();
+      (void) llvm::createLoopVectorizePass();
+      (void) llvm::createSLPVectorizerPass();
+      (void) llvm::createLoadStoreVectorizerPass();
+      (void) llvm::createPartiallyInlineLibCallsPass();
+      (void) llvm::createScalarizerPass();
+      (void) llvm::createSeparateConstOffsetFromGEPPass();
+      (void) llvm::createSpeculativeExecutionPass();
+      (void) llvm::createSpeculativeExecutionIfHasBranchDivergencePass();
+      (void) llvm::createRewriteSymbolsPass();
+      (void) llvm::createStraightLineStrengthReducePass();
+      (void) llvm::createMemDerefPrinter();
+      (void) llvm::createMustExecutePrinter();
+      (void) llvm::createFloat2IntPass();
+      (void) llvm::createEliminateAvailableExternallyPass();
+      (void) llvm::createScalarizeMaskedMemIntrinPass();
+
+      (void)new llvm::IntervalPartition();
+      (void)new llvm::ScalarEvolutionWrapperPass();
+      llvm::Function::Create(nullptr, llvm::GlobalValue::ExternalLinkage)->viewCFGOnly();
+      llvm::RGPassManager RGM;
+      llvm::TargetLibraryInfoImpl TLII;
+      llvm::TargetLibraryInfo TLI(TLII);
+      llvm::AliasAnalysis AA(TLI);
+      llvm::AliasSetTracker X(AA);
+      X.add(nullptr, 0, llvm::AAMDNodes()); // for -print-alias-sets
+      (void) llvm::AreStatisticsEnabled();
+      (void) llvm::sys::RunningOnValgrind();
+    }
+  } ForcePassLinking; // Force link by creating a global definition.
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Linker/IRMover.h b/linux-x64/clang/include/llvm/Linker/IRMover.h
new file mode 100644
index 0000000..235ada4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Linker/IRMover.h
@@ -0,0 +1,89 @@
+//===- IRMover.h ------------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKER_IRMOVER_H
+#define LLVM_LINKER_IRMOVER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include <functional>
+
+namespace llvm {
+class Error;
+class GlobalValue;
+class Metadata;
+class Module;
+class StructType;
+class TrackingMDRef;
+class Type;
+
+class IRMover {
+  struct StructTypeKeyInfo {
+    struct KeyTy {
+      ArrayRef<Type *> ETypes;
+      bool IsPacked;
+      KeyTy(ArrayRef<Type *> E, bool P);
+      KeyTy(const StructType *ST);
+      bool operator==(const KeyTy &that) const;
+      bool operator!=(const KeyTy &that) const;
+    };
+    static StructType *getEmptyKey();
+    static StructType *getTombstoneKey();
+    static unsigned getHashValue(const KeyTy &Key);
+    static unsigned getHashValue(const StructType *ST);
+    static bool isEqual(const KeyTy &LHS, const StructType *RHS);
+    static bool isEqual(const StructType *LHS, const StructType *RHS);
+  };
+
+  /// Type of the Metadata map in \a ValueToValueMapTy.
+  typedef DenseMap<const Metadata *, TrackingMDRef> MDMapT;
+
+public:
+  class IdentifiedStructTypeSet {
+    // The set of opaque types is the composite module.
+    DenseSet<StructType *> OpaqueStructTypes;
+
+    // The set of identified but non opaque structures in the composite module.
+    DenseSet<StructType *, StructTypeKeyInfo> NonOpaqueStructTypes;
+
+  public:
+    void addNonOpaque(StructType *Ty);
+    void switchToNonOpaque(StructType *Ty);
+    void addOpaque(StructType *Ty);
+    StructType *findNonOpaque(ArrayRef<Type *> ETypes, bool IsPacked);
+    bool hasType(StructType *Ty);
+  };
+
+  IRMover(Module &M);
+
+  typedef std::function<void(GlobalValue &)> ValueAdder;
+
+  /// Move in the provide values in \p ValuesToLink from \p Src.
+  ///
+  /// - \p AddLazyFor is a call back that the IRMover will call when a global
+  ///   value is referenced by one of the ValuesToLink (transitively) but was
+  ///   not present in ValuesToLink. The GlobalValue and a ValueAdder callback
+  ///   are passed as an argument, and the callback is expected to be called
+  ///   if the GlobalValue needs to be added to the \p ValuesToLink and linked.
+  /// - \p IsPerformingImport is true when this IR link is to perform ThinLTO
+  ///   function importing from Src.
+  Error move(std::unique_ptr<Module> Src, ArrayRef<GlobalValue *> ValuesToLink,
+             std::function<void(GlobalValue &GV, ValueAdder Add)> AddLazyFor,
+             bool IsPerformingImport);
+  Module &getModule() { return Composite; }
+
+private:
+  Module &Composite;
+  IdentifiedStructTypeSet IdentifiedStructTypes;
+  MDMapT SharedMDs; ///< A Metadata map to use for all calls to \a move().
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Linker/Linker.h b/linux-x64/clang/include/llvm/Linker/Linker.h
new file mode 100644
index 0000000..628e011
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Linker/Linker.h
@@ -0,0 +1,59 @@
+//===- Linker.h - Module Linker Interface -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKER_LINKER_H
+#define LLVM_LINKER_LINKER_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Linker/IRMover.h"
+
+namespace llvm {
+class Module;
+class StructType;
+class Type;
+
+/// This class provides the core functionality of linking in LLVM. It keeps a
+/// pointer to the merged module so far. It doesn't take ownership of the
+/// module since it is assumed that the user of this class will want to do
+/// something with it after the linking.
+class Linker {
+  IRMover Mover;
+
+public:
+  enum Flags {
+    None = 0,
+    OverrideFromSrc = (1 << 0),
+    LinkOnlyNeeded = (1 << 1),
+  };
+
+  Linker(Module &M);
+
+  /// \brief Link \p Src into the composite.
+  ///
+  /// Passing OverrideSymbols as true will have symbols from Src
+  /// shadow those in the Dest.
+  ///
+  /// Passing InternalizeCallback will have the linker call the function with
+  /// the new module and a list of global value names to be internalized by the
+  /// callback.
+  ///
+  /// Returns true on error.
+  bool linkInModule(std::unique_ptr<Module> Src, unsigned Flags = Flags::None,
+                    std::function<void(Module &, const StringSet<> &)>
+                        InternalizeCallback = {});
+
+  static bool linkModules(Module &Dest, std::unique_ptr<Module> Src,
+                          unsigned Flags = Flags::None,
+                          std::function<void(Module &, const StringSet<> &)>
+                              InternalizeCallback = {});
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/ConstantPools.h b/linux-x64/clang/include/llvm/MC/ConstantPools.h
new file mode 100644
index 0000000..ef33250
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/ConstantPools.h
@@ -0,0 +1,103 @@
+//===- ConstantPool.h - Keep track of assembler-generated  ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ConstantPool and AssemblerConstantPools classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_CONSTANTPOOLS_H
+#define LLVM_MC_CONSTANTPOOLS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/SMLoc.h"
+#include <cstdint>
+#include <map>
+
+namespace llvm {
+
+class MCContext;
+class MCExpr;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+class MCSymbolRefExpr;
+
+struct ConstantPoolEntry {
+  ConstantPoolEntry(MCSymbol *L, const MCExpr *Val, unsigned Sz, SMLoc Loc_)
+    : Label(L), Value(Val), Size(Sz), Loc(Loc_) {}
+
+  MCSymbol *Label;
+  const MCExpr *Value;
+  unsigned Size;
+  SMLoc Loc;
+};
+
+// A class to keep track of assembler-generated constant pools that are use to
+// implement the ldr-pseudo.
+class ConstantPool {
+  using EntryVecTy = SmallVector<ConstantPoolEntry, 4>;
+  EntryVecTy Entries;
+  std::map<int64_t, const MCSymbolRefExpr *> CachedEntries;
+
+public:
+  // Initialize a new empty constant pool
+  ConstantPool() = default;
+
+  // Add a new entry to the constant pool in the next slot.
+  // \param Value is the new entry to put in the constant pool.
+  // \param Size is the size in bytes of the entry
+  //
+  // \returns a MCExpr that references the newly inserted value
+  const MCExpr *addEntry(const MCExpr *Value, MCContext &Context,
+                         unsigned Size, SMLoc Loc);
+
+  // Emit the contents of the constant pool using the provided streamer.
+  void emitEntries(MCStreamer &Streamer);
+
+  // Return true if the constant pool is empty
+  bool empty();
+
+  void clearCache();
+};
+
+class AssemblerConstantPools {
+  // Map type used to keep track of per-Section constant pools used by the
+  // ldr-pseudo opcode. The map associates a section to its constant pool. The
+  // constant pool is a vector of (label, value) pairs. When the ldr
+  // pseudo is parsed we insert a new (label, value) pair into the constant pool
+  // for the current section and add MCSymbolRefExpr to the new label as
+  // an opcode to the ldr. After we have parsed all the user input we
+  // output the (label, value) pairs in each constant pool at the end of the
+  // section.
+  //
+  // We use the MapVector for the map type to ensure stable iteration of
+  // the sections at the end of the parse. We need to iterate over the
+  // sections in a stable order to ensure that we have print the
+  // constant pools in a deterministic order when printing an assembly
+  // file.
+  using ConstantPoolMapTy = MapVector<MCSection *, ConstantPool>;
+  ConstantPoolMapTy ConstantPools;
+
+public:
+  void emitAll(MCStreamer &Streamer);
+  void emitForCurrentSection(MCStreamer &Streamer);
+  void clearCacheForCurrentSection(MCStreamer &Streamer);
+  const MCExpr *addEntry(MCStreamer &Streamer, const MCExpr *Expr,
+                         unsigned Size, SMLoc Loc);
+
+private:
+  ConstantPool *getConstantPool(MCSection *Section);
+  ConstantPool &getOrCreateConstantPool(MCSection *Section);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_CONSTANTPOOLS_H
diff --git a/linux-x64/clang/include/llvm/MC/LaneBitmask.h b/linux-x64/clang/include/llvm/MC/LaneBitmask.h
new file mode 100644
index 0000000..8c0b4ec
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/LaneBitmask.h
@@ -0,0 +1,102 @@
+//===- llvm/MC/LaneBitmask.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// A common definition of LaneBitmask for use in TableGen and CodeGen.
+///
+/// A lane mask is a bitmask representing the covering of a register with
+/// sub-registers.
+///
+/// This is typically used to track liveness at sub-register granularity.
+/// Lane masks for sub-register indices are similar to register units for
+/// physical registers. The individual bits in a lane mask can't be assigned
+/// any specific meaning. They can be used to check if two sub-register
+/// indices overlap.
+///
+/// Iff the target has a register such that:
+///
+///   getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+///
+/// then:
+///
+///   (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
+
+#ifndef LLVM_MC_LANEBITMASK_H
+#define LLVM_MC_LANEBITMASK_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Printable.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+  struct LaneBitmask {
+    // When changing the underlying type, change the format string as well.
+    using Type = unsigned;
+    enum : unsigned { BitWidth = 8*sizeof(Type) };
+    constexpr static const char *const FormatStr = "%08X";
+
+    constexpr LaneBitmask() = default;
+    explicit constexpr LaneBitmask(Type V) : Mask(V) {}
+
+    constexpr bool operator== (LaneBitmask M) const { return Mask == M.Mask; }
+    constexpr bool operator!= (LaneBitmask M) const { return Mask != M.Mask; }
+    constexpr bool operator< (LaneBitmask M)  const { return Mask < M.Mask; }
+    constexpr bool none() const { return Mask == 0; }
+    constexpr bool any()  const { return Mask != 0; }
+    constexpr bool all()  const { return ~Mask == 0; }
+
+    constexpr LaneBitmask operator~() const {
+      return LaneBitmask(~Mask);
+    }
+    constexpr LaneBitmask operator|(LaneBitmask M) const {
+      return LaneBitmask(Mask | M.Mask);
+    }
+    constexpr LaneBitmask operator&(LaneBitmask M) const {
+      return LaneBitmask(Mask & M.Mask);
+    }
+    LaneBitmask &operator|=(LaneBitmask M) {
+      Mask |= M.Mask;
+      return *this;
+    }
+    LaneBitmask &operator&=(LaneBitmask M) {
+      Mask &= M.Mask;
+      return *this;
+    }
+
+    constexpr Type getAsInteger() const { return Mask; }
+
+    unsigned getNumLanes() const {
+      return countPopulation(Mask);
+    }
+    unsigned getHighestLane() const {
+      return Log2_32(Mask);
+    }
+
+    static constexpr LaneBitmask getNone() { return LaneBitmask(0); }
+    static constexpr LaneBitmask getAll() { return ~LaneBitmask(0); }
+    static constexpr LaneBitmask getLane(unsigned Lane) {
+      return LaneBitmask(Type(1) << Lane);
+    }
+
+  private:
+    Type Mask = 0;
+  };
+
+  /// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
+  inline Printable PrintLaneMask(LaneBitmask LaneMask) {
+    return Printable([LaneMask](raw_ostream &OS) {
+      OS << format(LaneBitmask::FormatStr, LaneMask.getAsInteger());
+    });
+  }
+
+} // end namespace llvm
+
+#endif // LLVM_MC_LANEBITMASK_H
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmBackend.h b/linux-x64/clang/include/llvm/MC/MCAsmBackend.h
new file mode 100644
index 0000000..a8a5850
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmBackend.h
@@ -0,0 +1,182 @@
+//===- llvm/MC/MCAsmBackend.h - MC Asm Backend ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMBACKEND_H
+#define LLVM_MC_MCASMBACKEND_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFragment.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class MCAsmLayout;
+class MCAssembler;
+class MCCFIInstruction;
+class MCCodePadder;
+struct MCFixupKindInfo;
+class MCFragment;
+class MCInst;
+class MCObjectStreamer;
+class MCObjectWriter;
+struct MCCodePaddingContext;
+class MCRelaxableFragment;
+class MCSubtargetInfo;
+class MCValue;
+class raw_pwrite_stream;
+
+/// Generic interface to target specific assembler backends.
+class MCAsmBackend {
+  std::unique_ptr<MCCodePadder> CodePadder;
+
+protected: // Can only create subclasses.
+  MCAsmBackend();
+  MCAsmBackend(std::unique_ptr<MCCodePadder> TargetCodePadder);
+
+public:
+  MCAsmBackend(const MCAsmBackend &) = delete;
+  MCAsmBackend &operator=(const MCAsmBackend &) = delete;
+  virtual ~MCAsmBackend();
+
+  /// lifetime management
+  virtual void reset() {}
+
+  /// Create a new MCObjectWriter instance for use by the assembler backend to
+  /// emit the final object file.
+  virtual std::unique_ptr<MCObjectWriter>
+  createObjectWriter(raw_pwrite_stream &OS) const = 0;
+
+  /// \name Target Fixup Interfaces
+  /// @{
+
+  /// Get the number of target specific fixup kinds.
+  virtual unsigned getNumFixupKinds() const = 0;
+
+  /// Map a relocation name used in .reloc to a fixup kind.
+  virtual Optional<MCFixupKind> getFixupKind(StringRef Name) const;
+
+  /// Get information on a fixup kind.
+  virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;
+
+  /// Hook to check if a relocation is needed for some target specific reason.
+  virtual bool shouldForceRelocation(const MCAssembler &Asm,
+                                     const MCFixup &Fixup,
+                                     const MCValue &Target) {
+    return false;
+  }
+
+  /// Apply the \p Value for given \p Fixup into the provided data fragment, at
+  /// the offset specified by the fixup and following the fixup kind as
+  /// appropriate. Errors (such as an out of range fixup value) should be
+  /// reported via \p Ctx.
+  virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+                          const MCValue &Target, MutableArrayRef<char> Data,
+                          uint64_t Value, bool IsResolved) const = 0;
+
+  /// @}
+
+  /// \name Target Relaxation Interfaces
+  /// @{
+
+  /// Check whether the given instruction may need relaxation.
+  ///
+  /// \param Inst - The instruction to test.
+  virtual bool mayNeedRelaxation(const MCInst &Inst) const = 0;
+
+  /// Target specific predicate for whether a given fixup requires the
+  /// associated instruction to be relaxed.
+  virtual bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
+                                            uint64_t Value,
+                                            const MCRelaxableFragment *DF,
+                                            const MCAsmLayout &Layout) const;
+
+  /// Simple predicate for targets where !Resolved implies requiring relaxation
+  virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+                                    const MCRelaxableFragment *DF,
+                                    const MCAsmLayout &Layout) const = 0;
+
+  /// Relax the instruction in the given fragment to the next wider instruction.
+  ///
+  /// \param Inst The instruction to relax, which may be the same as the
+  /// output.
+  /// \param STI the subtarget information for the associated instruction.
+  /// \param [out] Res On return, the relaxed instruction.
+  virtual void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+                                MCInst &Res) const = 0;
+
+  /// @}
+
+  /// Returns the minimum size of a nop in bytes on this target. The assembler
+  /// will use this to emit excess padding in situations where the padding
+  /// required for simple alignment would be less than the minimum nop size.
+  ///
+  virtual unsigned getMinimumNopSize() const { return 1; }
+
+  /// Write an (optimal) nop sequence of Count bytes to the given output. If the
+  /// target cannot generate such a sequence, it should return an error.
+  ///
+  /// \return - True on success.
+  virtual bool writeNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
+
+  /// Give backend an opportunity to finish layout after relaxation
+  virtual void finishLayout(MCAssembler const &Asm,
+                            MCAsmLayout &Layout) const {}
+
+  /// Handle any target-specific assembler flags. By default, do nothing.
+  virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {}
+
+  /// \brief Generate the compact unwind encoding for the CFI instructions.
+  virtual uint32_t
+      generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction>) const {
+    return 0;
+  }
+
+  /// Handles all target related code padding when starting to write a new
+  /// basic block to an object file.
+  ///
+  /// \param OS The streamer used for writing the padding data and function.
+  /// \param Context the context of the padding, Embeds the basic block's
+  /// parameters.
+  void handleCodePaddingBasicBlockStart(MCObjectStreamer *OS,
+                                        const MCCodePaddingContext &Context);
+  /// Handles all target related code padding after writing a block to an object
+  /// file.
+  ///
+  /// \param Context the context of the padding, Embeds the basic block's
+  /// parameters.
+  void handleCodePaddingBasicBlockEnd(const MCCodePaddingContext &Context);
+  /// Handles all target related code padding before writing a new instruction
+  /// to an object file.
+  ///
+  /// \param Inst the instruction.
+  void handleCodePaddingInstructionBegin(const MCInst &Inst);
+  /// Handles all target related code padding after writing an instruction to an
+  /// object file.
+  ///
+  /// \param Inst the instruction.
+  void handleCodePaddingInstructionEnd(const MCInst &Inst);
+
+  /// Relaxes a fragment (changes the size of the padding) according to target
+  /// requirements. The new size computation is done w.r.t a layout.
+  ///
+  /// \param PF The fragment to relax.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns true iff any relaxation occurred.
+  bool relaxFragment(MCPaddingFragment *PF, MCAsmLayout &Layout);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCASMBACKEND_H
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmInfo.h b/linux-x64/clang/include/llvm/MC/MCAsmInfo.h
new file mode 100644
index 0000000..c538c46
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmInfo.h
@@ -0,0 +1,624 @@
+//===-- llvm/MC/MCAsmInfo.h - Asm info --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class to be used as the basis for target specific
+// asm writers.  This class primarily takes care of global printing constants,
+// which are used in very similar ways across all targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFO_H
+#define LLVM_MC_MCASMINFO_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include <vector>
+
+namespace llvm {
+
+class MCContext;
+class MCExpr;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+
+namespace WinEH {
+
+enum class EncodingType {
+  Invalid, /// Invalid
+  Alpha,   /// Windows Alpha
+  Alpha64, /// Windows AXP64
+  ARM,     /// Windows NT (Windows on ARM)
+  CE,      /// Windows CE ARM, PowerPC, SH3, SH4
+  Itanium, /// Windows x64, Windows Itanium (IA-64)
+  X86,     /// Windows x86, uses no CFI, just EH tables
+  MIPS = Alpha,
+};
+
+} // end namespace WinEH
+
+namespace LCOMM {
+
+enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment };
+
+} // end namespace LCOMM
+
+/// This class is intended to be used as a base class for asm
+/// properties and features specific to the target.
+class MCAsmInfo {
+protected:
+  //===------------------------------------------------------------------===//
+  // Properties to be set by the target writer, used to configure asm printer.
+  //
+
+  /// Code pointer size in bytes.  Default is 4.
+  unsigned CodePointerSize = 4;
+
+  /// Size of the stack slot reserved for callee-saved registers, in bytes.
+  /// Default is same as pointer size.
+  unsigned CalleeSaveStackSlotSize = 4;
+
+  /// True if target is little endian.  Default is true.
+  bool IsLittleEndian = true;
+
+  /// True if target stack grow up.  Default is false.
+  bool StackGrowsUp = false;
+
+  /// True if this target has the MachO .subsections_via_symbols directive.
+  /// Default is false.
+  bool HasSubsectionsViaSymbols = false;
+
+  /// True if this is a MachO target that supports the macho-specific .zerofill
+  /// directive for emitting BSS Symbols.  Default is false.
+  bool HasMachoZeroFillDirective = false;
+
+  /// True if this is a MachO target that supports the macho-specific .tbss
+  /// directive for emitting thread local BSS Symbols.  Default is false.
+  bool HasMachoTBSSDirective = false;
+
+  /// This is the maximum possible length of an instruction, which is needed to
+  /// compute the size of an inline asm.  Defaults to 4.
+  unsigned MaxInstLength = 4;
+
+  /// Every possible instruction length is a multiple of this value.  Factored
+  /// out in .debug_frame and .debug_line.  Defaults to 1.
+  unsigned MinInstAlignment = 1;
+
+  /// The '$' token, when not referencing an identifier or constant, refers to
+  /// the current PC.  Defaults to false.
+  bool DollarIsPC = false;
+
+  /// This string, if specified, is used to separate instructions from each
+  /// other when on the same line.  Defaults to ';'
+  const char *SeparatorString;
+
+  /// This indicates the comment character used by the assembler.  Defaults to
+  /// "#"
+  StringRef CommentString;
+
+  /// This is appended to emitted labels.  Defaults to ":"
+  const char *LabelSuffix;
+
+  // Print the EH begin symbol with an assignment. Defaults to false.
+  bool UseAssignmentForEHBegin = false;
+
+  // Do we need to create a local symbol for .size?
+  bool NeedsLocalForSize = false;
+
+  /// This prefix is used for globals like constant pool entries that are
+  /// completely private to the .s file and should not have names in the .o
+  /// file.  Defaults to "L"
+  StringRef PrivateGlobalPrefix;
+
+  /// This prefix is used for labels for basic blocks. Defaults to the same as
+  /// PrivateGlobalPrefix.
+  StringRef PrivateLabelPrefix;
+
+  /// This prefix is used for symbols that should be passed through the
+  /// assembler but be removed by the linker.  This is 'l' on Darwin, currently
+  /// used for some ObjC metadata.  The default of "" meast that for this system
+  /// a plain private symbol should be used.  Defaults to "".
+  StringRef LinkerPrivateGlobalPrefix;
+
+  /// If these are nonempty, they contain a directive to emit before and after
+  /// an inline assembly statement.  Defaults to "#APP\n", "#NO_APP\n"
+  const char *InlineAsmStart;
+  const char *InlineAsmEnd;
+
+  /// These are assembly directives that tells the assembler to interpret the
+  /// following instructions differently.  Defaults to ".code16", ".code32",
+  /// ".code64".
+  const char *Code16Directive;
+  const char *Code32Directive;
+  const char *Code64Directive;
+
+  /// Which dialect of an assembler variant to use.  Defaults to 0
+  unsigned AssemblerDialect = 0;
+
+  /// This is true if the assembler allows @ characters in symbol names.
+  /// Defaults to false.
+  bool AllowAtInName = false;
+
+  /// If this is true, symbol names with invalid characters will be printed in
+  /// quotes.
+  bool SupportsQuotedNames = true;
+
+  /// This is true if data region markers should be printed as
+  /// ".data_region/.end_data_region" directives. If false, use "$d/$a" labels
+  /// instead.
+  bool UseDataRegionDirectives = false;
+
+  //===--- Data Emission Directives -------------------------------------===//
+
+  /// This should be set to the directive used to get some number of zero bytes
+  /// emitted to the current section.  Common cases are "\t.zero\t" and
+  /// "\t.space\t".  If this is set to null, the Data*bitsDirective's will be
+  /// used to emit zero bytes.  Defaults to "\t.zero\t"
+  const char *ZeroDirective;
+
+  /// This directive allows emission of an ascii string with the standard C
+  /// escape characters embedded into it.  If a target doesn't support this, it
+  /// can be set to null. Defaults to "\t.ascii\t"
+  const char *AsciiDirective;
+
+  /// If not null, this allows for special handling of zero terminated strings
+  /// on this target.  This is commonly supported as ".asciz".  If a target
+  /// doesn't support this, it can be set to null.  Defaults to "\t.asciz\t"
+  const char *AscizDirective;
+
+  /// These directives are used to output some unit of integer data to the
+  /// current section.  If a data directive is set to null, smaller data
+  /// directives will be used to emit the large sizes.  Defaults to "\t.byte\t",
+  /// "\t.short\t", "\t.long\t", "\t.quad\t"
+  const char *Data8bitsDirective;
+  const char *Data16bitsDirective;
+  const char *Data32bitsDirective;
+  const char *Data64bitsDirective;
+
+  /// If non-null, a directive that is used to emit a word which should be
+  /// relocated as a 64-bit GP-relative offset, e.g. .gpdword on Mips.  Defaults
+  /// to nullptr.
+  const char *GPRel64Directive = nullptr;
+
+  /// If non-null, a directive that is used to emit a word which should be
+  /// relocated as a 32-bit GP-relative offset, e.g. .gpword on Mips or .gprel32
+  /// on Alpha.  Defaults to nullptr.
+  const char *GPRel32Directive = nullptr;
+
+  /// If non-null, directives that are used to emit a word/dword which should
+  /// be relocated as a 32/64-bit DTP/TP-relative offset, e.g. .dtprelword/
+  /// .dtpreldword/.tprelword/.tpreldword on Mips.
+  const char *DTPRel32Directive = nullptr;
+  const char *DTPRel64Directive = nullptr;
+  const char *TPRel32Directive = nullptr;
+  const char *TPRel64Directive = nullptr;
+
+  /// This is true if this target uses "Sun Style" syntax for section switching
+  /// ("#alloc,#write" etc) instead of the normal ELF syntax (,"a,w") in
+  /// .section directives.  Defaults to false.
+  bool SunStyleELFSectionSwitchSyntax = false;
+
+  /// This is true if this target uses ELF '.section' directive before the
+  /// '.bss' one. It's used for PPC/Linux which doesn't support the '.bss'
+  /// directive only.  Defaults to false.
+  bool UsesELFSectionDirectiveForBSS = false;
+
+  bool NeedsDwarfSectionOffsetDirective = false;
+
+  //===--- Alignment Information ----------------------------------------===//
+
+  /// If this is true (the default) then the asmprinter emits ".align N"
+  /// directives, where N is the number of bytes to align to.  Otherwise, it
+  /// emits ".align log2(N)", e.g. 3 to align to an 8 byte boundary.  Defaults
+  /// to true.
+  bool AlignmentIsInBytes = true;
+
+  /// If non-zero, this is used to fill the executable space created as the
+  /// result of a alignment directive.  Defaults to 0
+  unsigned TextAlignFillValue = 0;
+
+  //===--- Global Variable Emission Directives --------------------------===//
+
+  /// This is the directive used to declare a global entity. Defaults to
+  /// ".globl".
+  const char *GlobalDirective;
+
+  /// True if the expression
+  ///   .long f - g
+  /// uses a relocation but it can be suppressed by writing
+  ///   a = f - g
+  ///   .long a
+  bool SetDirectiveSuppressesReloc = false;
+
+  /// False if the assembler requires that we use
+  /// \code
+  ///   Lc = a - b
+  ///   .long Lc
+  /// \endcode
+  //
+  /// instead of
+  //
+  /// \code
+  ///   .long a - b
+  /// \endcode
+  ///
+  ///  Defaults to true.
+  bool HasAggressiveSymbolFolding = true;
+
+  /// True is .comm's and .lcomms optional alignment is to be specified in bytes
+  /// instead of log2(n).  Defaults to true.
+  bool COMMDirectiveAlignmentIsInBytes = true;
+
+  /// Describes if the .lcomm directive for the target supports an alignment
+  /// argument and how it is interpreted.  Defaults to NoAlignment.
+  LCOMM::LCOMMType LCOMMDirectiveAlignmentType = LCOMM::NoAlignment;
+
+  // True if the target allows .align directives on functions. This is true for
+  // most targets, so defaults to true.
+  bool HasFunctionAlignment = true;
+
+  /// True if the target has .type and .size directives, this is true for most
+  /// ELF targets.  Defaults to true.
+  bool HasDotTypeDotSizeDirective = true;
+
+  /// True if the target has a single parameter .file directive, this is true
+  /// for ELF targets.  Defaults to true.
+  bool HasSingleParameterDotFile = true;
+
+  /// True if the target has a .ident directive, this is true for ELF targets.
+  /// Defaults to false.
+  bool HasIdentDirective = false;
+
+  /// True if this target supports the MachO .no_dead_strip directive.  Defaults
+  /// to false.
+  bool HasNoDeadStrip = false;
+
+  /// True if this target supports the MachO .alt_entry directive.  Defaults to
+  /// false.
+  bool HasAltEntry = false;
+
+  /// Used to declare a global as being a weak symbol. Defaults to ".weak".
+  const char *WeakDirective;
+
+  /// This directive, if non-null, is used to declare a global as being a weak
+  /// undefined symbol.  Defaults to nullptr.
+  const char *WeakRefDirective = nullptr;
+
+  /// True if we have a directive to declare a global as being a weak defined
+  /// symbol.  Defaults to false.
+  bool HasWeakDefDirective = false;
+
+  /// True if we have a directive to declare a global as being a weak defined
+  /// symbol that can be hidden (unexported).  Defaults to false.
+  bool HasWeakDefCanBeHiddenDirective = false;
+
+  /// True if we have a .linkonce directive.  This is used on cygwin/mingw.
+  /// Defaults to false.
+  bool HasLinkOnceDirective = false;
+
+  /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
+  /// hidden visibility.  Defaults to MCSA_Hidden.
+  MCSymbolAttr HiddenVisibilityAttr = MCSA_Hidden;
+
+  /// This attribute, if not MCSA_Invalid, is used to declare an undefined
+  /// symbol as having hidden visibility. Defaults to MCSA_Hidden.
+  MCSymbolAttr HiddenDeclarationVisibilityAttr = MCSA_Hidden;
+
+  /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
+  /// protected visibility.  Defaults to MCSA_Protected
+  MCSymbolAttr ProtectedVisibilityAttr = MCSA_Protected;
+
+  //===--- Dwarf Emission Directives -----------------------------------===//
+
+  /// True if target supports emission of debugging information.  Defaults to
+  /// false.
+  bool SupportsDebugInformation = false;
+
+  /// Exception handling format for the target.  Defaults to None.
+  ExceptionHandling ExceptionsType = ExceptionHandling::None;
+
+  /// Windows exception handling data (.pdata) encoding.  Defaults to Invalid.
+  WinEH::EncodingType WinEHEncodingType = WinEH::EncodingType::Invalid;
+
+  /// True if Dwarf2 output generally uses relocations for references to other
+  /// .debug_* sections.
+  bool DwarfUsesRelocationsAcrossSections = true;
+
+  /// True if DWARF FDE symbol reference relocations should be replaced by an
+  /// absolute difference.
+  bool DwarfFDESymbolsUseAbsDiff = false;
+
+  /// True if dwarf register numbers are printed instead of symbolic register
+  /// names in .cfi_* directives.  Defaults to false.
+  bool DwarfRegNumForCFI = false;
+
+  /// True if target uses parens to indicate the symbol variant instead of @.
+  /// For example, foo(plt) instead of foo@plt.  Defaults to false.
+  bool UseParensForSymbolVariant = false;
+
+  //===--- Prologue State ----------------------------------------------===//
+
+  std::vector<MCCFIInstruction> InitialFrameState;
+
+  //===--- Integrated Assembler Information ----------------------------===//
+
+  /// Should we use the integrated assembler?
+  /// The integrated assembler should be enabled by default (by the
+  /// constructors) when failing to parse a valid piece of assembly (inline
+  /// or otherwise) is considered a bug. It may then be overridden after
+  /// construction (see LLVMTargetMachine::initAsmInfo()).
+  bool UseIntegratedAssembler;
+
+  /// Preserve Comments in assembly
+  bool PreserveAsmComments;
+
+  /// Compress DWARF debug sections. Defaults to no compression.
+  DebugCompressionType CompressDebugSections = DebugCompressionType::None;
+
+  /// True if the integrated assembler should interpret 'a >> b' constant
+  /// expressions as logical rather than arithmetic.
+  bool UseLogicalShr = true;
+
+  // If true, emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL, on
+  // X86_64 ELF.
+  bool RelaxELFRelocations = true;
+
+  // If true, then the lexer and expression parser will support %neg(),
+  // %hi(), and similar unary operators.
+  bool HasMipsExpressions = false;
+
+public:
+  explicit MCAsmInfo();
+  virtual ~MCAsmInfo();
+
+  /// Get the code pointer size in bytes.
+  unsigned getCodePointerSize() const { return CodePointerSize; }
+
+  /// Get the callee-saved register stack slot
+  /// size in bytes.
+  unsigned getCalleeSaveStackSlotSize() const {
+    return CalleeSaveStackSlotSize;
+  }
+
+  /// True if the target is little endian.
+  bool isLittleEndian() const { return IsLittleEndian; }
+
+  /// True if target stack grow up.
+  bool isStackGrowthDirectionUp() const { return StackGrowsUp; }
+
+  bool hasSubsectionsViaSymbols() const { return HasSubsectionsViaSymbols; }
+
+  // Data directive accessors.
+
+  const char *getData8bitsDirective() const { return Data8bitsDirective; }
+  const char *getData16bitsDirective() const { return Data16bitsDirective; }
+  const char *getData32bitsDirective() const { return Data32bitsDirective; }
+  const char *getData64bitsDirective() const { return Data64bitsDirective; }
+  const char *getGPRel64Directive() const { return GPRel64Directive; }
+  const char *getGPRel32Directive() const { return GPRel32Directive; }
+  const char *getDTPRel64Directive() const { return DTPRel64Directive; }
+  const char *getDTPRel32Directive() const { return DTPRel32Directive; }
+  const char *getTPRel64Directive() const { return TPRel64Directive; }
+  const char *getTPRel32Directive() const { return TPRel32Directive; }
+
+  /// Targets can implement this method to specify a section to switch to if the
+  /// translation unit doesn't have any trampolines that require an executable
+  /// stack.
+  virtual MCSection *getNonexecutableStackSection(MCContext &Ctx) const {
+    return nullptr;
+  }
+
+  /// \brief True if the section is atomized using the symbols in it.
+  /// This is false if the section is not atomized at all (most ELF sections) or
+  /// if it is atomized based on its contents (MachO' __TEXT,__cstring for
+  /// example).
+  virtual bool isSectionAtomizableBySymbols(const MCSection &Section) const;
+
+  virtual const MCExpr *getExprForPersonalitySymbol(const MCSymbol *Sym,
+                                                    unsigned Encoding,
+                                                    MCStreamer &Streamer) const;
+
+  virtual const MCExpr *getExprForFDESymbol(const MCSymbol *Sym,
+                                            unsigned Encoding,
+                                            MCStreamer &Streamer) const;
+
+  /// Return true if the identifier \p Name does not need quotes to be
+  /// syntactically correct.
+  virtual bool isValidUnquotedName(StringRef Name) const;
+
+  /// Return true if the .section directive should be omitted when
+  /// emitting \p SectionName.  For example:
+  ///
+  /// shouldOmitSectionDirective(".text")
+  ///
+  /// returns false => .section .text,#alloc,#execinstr
+  /// returns true  => .text
+  virtual bool shouldOmitSectionDirective(StringRef SectionName) const;
+
+  bool usesSunStyleELFSectionSwitchSyntax() const {
+    return SunStyleELFSectionSwitchSyntax;
+  }
+
+  bool usesELFSectionDirectiveForBSS() const {
+    return UsesELFSectionDirectiveForBSS;
+  }
+
+  bool needsDwarfSectionOffsetDirective() const {
+    return NeedsDwarfSectionOffsetDirective;
+  }
+
+  // Accessors.
+
+  bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
+  bool hasMachoTBSSDirective() const { return HasMachoTBSSDirective; }
+  unsigned getMaxInstLength() const { return MaxInstLength; }
+  unsigned getMinInstAlignment() const { return MinInstAlignment; }
+  bool getDollarIsPC() const { return DollarIsPC; }
+  const char *getSeparatorString() const { return SeparatorString; }
+
+  /// This indicates the column (zero-based) at which asm comments should be
+  /// printed.
+  unsigned getCommentColumn() const { return 40; }
+
+  StringRef getCommentString() const { return CommentString; }
+  const char *getLabelSuffix() const { return LabelSuffix; }
+
+  bool useAssignmentForEHBegin() const { return UseAssignmentForEHBegin; }
+  bool needsLocalForSize() const { return NeedsLocalForSize; }
+  StringRef getPrivateGlobalPrefix() const { return PrivateGlobalPrefix; }
+  StringRef getPrivateLabelPrefix() const { return PrivateLabelPrefix; }
+
+  bool hasLinkerPrivateGlobalPrefix() const {
+    return LinkerPrivateGlobalPrefix[0] != '\0';
+  }
+
+  StringRef getLinkerPrivateGlobalPrefix() const {
+    if (hasLinkerPrivateGlobalPrefix())
+      return LinkerPrivateGlobalPrefix;
+    return getPrivateGlobalPrefix();
+  }
+
+  const char *getInlineAsmStart() const { return InlineAsmStart; }
+  const char *getInlineAsmEnd() const { return InlineAsmEnd; }
+  const char *getCode16Directive() const { return Code16Directive; }
+  const char *getCode32Directive() const { return Code32Directive; }
+  const char *getCode64Directive() const { return Code64Directive; }
+  unsigned getAssemblerDialect() const { return AssemblerDialect; }
+  bool doesAllowAtInName() const { return AllowAtInName; }
+  bool supportsNameQuoting() const { return SupportsQuotedNames; }
+
+  bool doesSupportDataRegionDirectives() const {
+    return UseDataRegionDirectives;
+  }
+
+  const char *getZeroDirective() const { return ZeroDirective; }
+  const char *getAsciiDirective() const { return AsciiDirective; }
+  const char *getAscizDirective() const { return AscizDirective; }
+  bool getAlignmentIsInBytes() const { return AlignmentIsInBytes; }
+  unsigned getTextAlignFillValue() const { return TextAlignFillValue; }
+  const char *getGlobalDirective() const { return GlobalDirective; }
+
+  bool doesSetDirectiveSuppressReloc() const {
+    return SetDirectiveSuppressesReloc;
+  }
+
+  bool hasAggressiveSymbolFolding() const { return HasAggressiveSymbolFolding; }
+
+  bool getCOMMDirectiveAlignmentIsInBytes() const {
+    return COMMDirectiveAlignmentIsInBytes;
+  }
+
+  LCOMM::LCOMMType getLCOMMDirectiveAlignmentType() const {
+    return LCOMMDirectiveAlignmentType;
+  }
+
+  bool hasFunctionAlignment() const { return HasFunctionAlignment; }
+  bool hasDotTypeDotSizeDirective() const { return HasDotTypeDotSizeDirective; }
+  bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; }
+  bool hasIdentDirective() const { return HasIdentDirective; }
+  bool hasNoDeadStrip() const { return HasNoDeadStrip; }
+  bool hasAltEntry() const { return HasAltEntry; }
+  const char *getWeakDirective() const { return WeakDirective; }
+  const char *getWeakRefDirective() const { return WeakRefDirective; }
+  bool hasWeakDefDirective() const { return HasWeakDefDirective; }
+
+  bool hasWeakDefCanBeHiddenDirective() const {
+    return HasWeakDefCanBeHiddenDirective;
+  }
+
+  bool hasLinkOnceDirective() const { return HasLinkOnceDirective; }
+
+  MCSymbolAttr getHiddenVisibilityAttr() const { return HiddenVisibilityAttr; }
+
+  MCSymbolAttr getHiddenDeclarationVisibilityAttr() const {
+    return HiddenDeclarationVisibilityAttr;
+  }
+
+  MCSymbolAttr getProtectedVisibilityAttr() const {
+    return ProtectedVisibilityAttr;
+  }
+
+  bool doesSupportDebugInformation() const { return SupportsDebugInformation; }
+
+  bool doesSupportExceptionHandling() const {
+    return ExceptionsType != ExceptionHandling::None;
+  }
+
+  ExceptionHandling getExceptionHandlingType() const { return ExceptionsType; }
+  WinEH::EncodingType getWinEHEncodingType() const { return WinEHEncodingType; }
+
+  void setExceptionsType(ExceptionHandling EH) {
+    ExceptionsType = EH;
+  }
+
+  /// Returns true if the exception handling method for the platform uses call
+  /// frame information to unwind.
+  bool usesCFIForEH() const {
+    return (ExceptionsType == ExceptionHandling::DwarfCFI ||
+            ExceptionsType == ExceptionHandling::ARM || usesWindowsCFI());
+  }
+
+  bool usesWindowsCFI() const {
+    return ExceptionsType == ExceptionHandling::WinEH &&
+           (WinEHEncodingType != WinEH::EncodingType::Invalid &&
+            WinEHEncodingType != WinEH::EncodingType::X86);
+  }
+
+  bool doesDwarfUseRelocationsAcrossSections() const {
+    return DwarfUsesRelocationsAcrossSections;
+  }
+
+  bool doDwarfFDESymbolsUseAbsDiff() const { return DwarfFDESymbolsUseAbsDiff; }
+  bool useDwarfRegNumForCFI() const { return DwarfRegNumForCFI; }
+  bool useParensForSymbolVariant() const { return UseParensForSymbolVariant; }
+
+  void addInitialFrameState(const MCCFIInstruction &Inst) {
+    InitialFrameState.push_back(Inst);
+  }
+
+  const std::vector<MCCFIInstruction> &getInitialFrameState() const {
+    return InitialFrameState;
+  }
+
+  /// Return true if assembly (inline or otherwise) should be parsed.
+  bool useIntegratedAssembler() const { return UseIntegratedAssembler; }
+
+  /// Set whether assembly (inline or otherwise) should be parsed.
+  virtual void setUseIntegratedAssembler(bool Value) {
+    UseIntegratedAssembler = Value;
+  }
+
+  /// Return true if assembly (inline or otherwise) should be parsed.
+  bool preserveAsmComments() const { return PreserveAsmComments; }
+
+  /// Set whether assembly (inline or otherwise) should be parsed.
+  virtual void setPreserveAsmComments(bool Value) {
+    PreserveAsmComments = Value;
+  }
+
+  DebugCompressionType compressDebugSections() const {
+    return CompressDebugSections;
+  }
+
+  void setCompressDebugSections(DebugCompressionType CompressDebugSections) {
+    this->CompressDebugSections = CompressDebugSections;
+  }
+
+  bool shouldUseLogicalShr() const { return UseLogicalShr; }
+
+  bool canRelaxRelocations() const { return RelaxELFRelocations; }
+  void setRelaxELFRelocations(bool V) { RelaxELFRelocations = V; }
+  bool hasMipsExpressions() const { return HasMipsExpressions; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCASMINFO_H
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmInfoCOFF.h b/linux-x64/clang/include/llvm/MC/MCAsmInfoCOFF.h
new file mode 100644
index 0000000..01c8ae4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmInfoCOFF.h
@@ -0,0 +1,40 @@
+//===- MCAsmInfoCOFF.h - COFF asm properties --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFOCOFF_H
+#define LLVM_MC_MCASMINFOCOFF_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+
+class MCAsmInfoCOFF : public MCAsmInfo {
+  virtual void anchor();
+
+protected:
+  explicit MCAsmInfoCOFF();
+};
+
+class MCAsmInfoMicrosoft : public MCAsmInfoCOFF {
+  void anchor() override;
+
+protected:
+  explicit MCAsmInfoMicrosoft();
+};
+
+class MCAsmInfoGNUCOFF : public MCAsmInfoCOFF {
+  void anchor() override;
+
+protected:
+  explicit MCAsmInfoGNUCOFF();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCASMINFOCOFF_H
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmInfoDarwin.h b/linux-x64/clang/include/llvm/MC/MCAsmInfoDarwin.h
new file mode 100644
index 0000000..a533d60
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmInfoDarwin.h
@@ -0,0 +1,31 @@
+//===- MCAsmInfoDarwin.h - Darwin asm properties ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines target asm properties related what form asm statements
+// should take in general on Darwin-based targets
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFODARWIN_H
+#define LLVM_MC_MCASMINFODARWIN_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+
+class MCAsmInfoDarwin : public MCAsmInfo {
+public:
+  explicit MCAsmInfoDarwin();
+
+  bool isSectionAtomizableBySymbols(const MCSection &Section) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCASMINFODARWIN_H
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmInfoELF.h b/linux-x64/clang/include/llvm/MC/MCAsmInfoELF.h
new file mode 100644
index 0000000..f113afc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmInfoELF.h
@@ -0,0 +1,31 @@
+//===- llvm/MC/MCAsmInfoELF.h - ELF Asm info --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFOELF_H
+#define LLVM_MC_MCASMINFOELF_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+
+class MCAsmInfoELF : public MCAsmInfo {
+  virtual void anchor();
+  MCSection *getNonexecutableStackSection(MCContext &Ctx) const final;
+
+protected:
+  /// Targets which have non-executable stacks by default can set this to false
+  /// to disable the special section which requests a non-executable stack.
+  bool UsesNonexecutableStackSection = true;
+
+  MCAsmInfoELF();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCASMINFOELF_H
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmInfoWasm.h b/linux-x64/clang/include/llvm/MC/MCAsmInfoWasm.h
new file mode 100644
index 0000000..bc46cfd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmInfoWasm.h
@@ -0,0 +1,24 @@
+//===-- llvm/MC/MCAsmInfoWasm.h - Wasm Asm info -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFOWASM_H
+#define LLVM_MC_MCASMINFOWASM_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+class MCAsmInfoWasm : public MCAsmInfo {
+  virtual void anchor();
+
+protected:
+  MCAsmInfoWasm();
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmLayout.h b/linux-x64/clang/include/llvm/MC/MCAsmLayout.h
new file mode 100644
index 0000000..1b20d5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmLayout.h
@@ -0,0 +1,107 @@
+//===- MCAsmLayout.h - Assembly Layout Object -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMLAYOUT_H
+#define LLVM_MC_MCASMLAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+class MCAssembler;
+class MCFragment;
+class MCSection;
+class MCSymbol;
+
+/// Encapsulates the layout of an assembly file at a particular point in time.
+///
+/// Assembly may require computing multiple layouts for a particular assembly
+/// file as part of the relaxation process. This class encapsulates the layout
+/// at a single point in time in such a way that it is always possible to
+/// efficiently compute the exact address of any symbol in the assembly file,
+/// even during the relaxation process.
+class MCAsmLayout {
+  MCAssembler &Assembler;
+
+  /// List of sections in layout order.
+  llvm::SmallVector<MCSection *, 16> SectionOrder;
+
+  /// The last fragment which was laid out, or 0 if nothing has been laid
+  /// out. Fragments are always laid out in order, so all fragments with a
+  /// lower ordinal will be valid.
+  mutable DenseMap<const MCSection *, MCFragment *> LastValidFragment;
+
+  /// \brief Make sure that the layout for the given fragment is valid, lazily
+  /// computing it if necessary.
+  void ensureValid(const MCFragment *F) const;
+
+  /// \brief Is the layout for this fragment valid?
+  bool isFragmentValid(const MCFragment *F) const;
+
+public:
+  MCAsmLayout(MCAssembler &Assembler);
+
+  /// Get the assembler object this is a layout for.
+  MCAssembler &getAssembler() const { return Assembler; }
+
+  /// \brief Invalidate the fragments starting with F because it has been
+  /// resized. The fragment's size should have already been updated, but
+  /// its bundle padding will be recomputed.
+  void invalidateFragmentsFrom(MCFragment *F);
+
+  /// \brief Perform layout for a single fragment, assuming that the previous
+  /// fragment has already been laid out correctly, and the parent section has
+  /// been initialized.
+  void layoutFragment(MCFragment *Fragment);
+
+  /// \name Section Access (in layout order)
+  /// @{
+
+  llvm::SmallVectorImpl<MCSection *> &getSectionOrder() { return SectionOrder; }
+  const llvm::SmallVectorImpl<MCSection *> &getSectionOrder() const {
+    return SectionOrder;
+  }
+
+  /// @}
+  /// \name Fragment Layout Data
+  /// @{
+
+  /// \brief Get the offset of the given fragment inside its containing section.
+  uint64_t getFragmentOffset(const MCFragment *F) const;
+
+  /// @}
+  /// \name Utility Functions
+  /// @{
+
+  /// \brief Get the address space size of the given section, as it effects
+  /// layout. This may differ from the size reported by \see getSectionSize() by
+  /// not including section tail padding.
+  uint64_t getSectionAddressSize(const MCSection *Sec) const;
+
+  /// \brief Get the data size of the given section, as emitted to the object
+  /// file. This may include additional padding, or be 0 for virtual sections.
+  uint64_t getSectionFileSize(const MCSection *Sec) const;
+
+  /// \brief Get the offset of the given symbol, as computed in the current
+  /// layout.
+  /// \return True on success.
+  bool getSymbolOffset(const MCSymbol &S, uint64_t &Val) const;
+
+  /// \brief Variant that reports a fatal error if the offset is not computable.
+  uint64_t getSymbolOffset(const MCSymbol &S) const;
+
+  /// \brief If this symbol is equivalent to A + Constant, return A.
+  const MCSymbol *getBaseSymbol(const MCSymbol &Symbol) const;
+
+  /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCAsmMacro.h b/linux-x64/clang/include/llvm/MC/MCAsmMacro.h
new file mode 100644
index 0000000..09b32c7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAsmMacro.h
@@ -0,0 +1,158 @@
+//===- MCAsmMacro.h - Assembly Macros ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMMACRO_H
+#define LLVM_MC_MCASMMACRO_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/SMLoc.h"
+#include <vector>
+
+namespace llvm {
+
+/// Target independent representation for an assembler token.
+class AsmToken {
+public:
+  enum TokenKind {
+    // Markers
+    Eof, Error,
+
+    // String values.
+    Identifier,
+    String,
+
+    // Integer values.
+    Integer,
+    BigNum, // larger than 64 bits
+
+    // Real values.
+    Real,
+
+    // Comments
+    Comment,
+    HashDirective,
+    // No-value.
+    EndOfStatement,
+    Colon,
+    Space,
+    Plus, Minus, Tilde,
+    Slash,     // '/'
+    BackSlash, // '\'
+    LParen, RParen, LBrac, RBrac, LCurly, RCurly,
+    Star, Dot, Comma, Dollar, Equal, EqualEqual,
+
+    Pipe, PipePipe, Caret,
+    Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
+    Less, LessEqual, LessLess, LessGreater,
+    Greater, GreaterEqual, GreaterGreater, At,
+
+    // MIPS unary expression operators such as %neg.
+    PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
+    PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
+    PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
+    PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
+    PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
+    PercentTprel_Lo
+  };
+
+private:
+  TokenKind Kind;
+
+  /// A reference to the entire token contents; this is always a pointer into
+  /// a memory buffer owned by the source manager.
+  StringRef Str;
+
+  APInt IntVal;
+
+public:
+  AsmToken() = default;
+  AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
+      : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
+  AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
+      : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
+
+  TokenKind getKind() const { return Kind; }
+  bool is(TokenKind K) const { return Kind == K; }
+  bool isNot(TokenKind K) const { return Kind != K; }
+
+  SMLoc getLoc() const;
+  SMLoc getEndLoc() const;
+  SMRange getLocRange() const;
+
+  /// Get the contents of a string token (without quotes).
+  StringRef getStringContents() const {
+    assert(Kind == String && "This token isn't a string!");
+    return Str.slice(1, Str.size() - 1);
+  }
+
+  /// Get the identifier string for the current token, which should be an
+  /// identifier or a string. This gets the portion of the string which should
+  /// be used as the identifier, e.g., it does not include the quotes on
+  /// strings.
+  StringRef getIdentifier() const {
+    if (Kind == Identifier)
+      return getString();
+    return getStringContents();
+  }
+
+  /// Get the string for the current token, this includes all characters (for
+  /// example, the quotes on strings) in the token.
+  ///
+  /// The returned StringRef points into the source manager's memory buffer, and
+  /// is safe to store across calls to Lex().
+  StringRef getString() const { return Str; }
+
+  // FIXME: Don't compute this in advance, it makes every token larger, and is
+  // also not generally what we want (it is nicer for recovery etc. to lex 123br
+  // as a single token, then diagnose as an invalid number).
+  int64_t getIntVal() const {
+    assert(Kind == Integer && "This token isn't an integer!");
+    return IntVal.getZExtValue();
+  }
+
+  APInt getAPIntVal() const {
+    assert((Kind == Integer || Kind == BigNum) &&
+           "This token isn't an integer!");
+    return IntVal;
+  }
+
+  void dump(raw_ostream &OS) const;
+  void dump() const { dump(dbgs()); }
+};
+
+struct MCAsmMacroParameter {
+  StringRef Name;
+  std::vector<AsmToken> Value;
+  bool Required = false;
+  bool Vararg = false;
+
+  MCAsmMacroParameter() = default;
+
+  void dump() const { dump(dbgs()); }
+  void dump(raw_ostream &OS) const;
+};
+
+typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
+struct MCAsmMacro {
+  StringRef Name;
+  StringRef Body;
+  MCAsmMacroParameters Parameters;
+
+public:
+  MCAsmMacro(StringRef N, StringRef B, MCAsmMacroParameters P)
+      : Name(N), Body(B), Parameters(std::move(P)) {}
+
+  void dump() const { dump(dbgs()); }
+  void dump(raw_ostream &OS) const;
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCAssembler.h b/linux-x64/clang/include/llvm/MC/MCAssembler.h
new file mode 100644
index 0000000..b91b044
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCAssembler.h
@@ -0,0 +1,445 @@
+//===- MCAssembler.h - Object File Generation -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASSEMBLER_H
+#define LLVM_MC_MCASSEMBLER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
+#include "llvm/MC/MCSymbol.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCAsmLayout;
+class MCContext;
+class MCCodeEmitter;
+class MCFragment;
+class MCObjectWriter;
+class MCSection;
+class MCValue;
+
+// FIXME: This really doesn't belong here. See comments below.
+struct IndirectSymbolData {
+  MCSymbol *Symbol;
+  MCSection *Section;
+};
+
+// FIXME: Ditto this. Purely so the Streamer and the ObjectWriter can talk
+// to one another.
+struct DataRegionData {
+  // This enum should be kept in sync w/ the mach-o definition in
+  // llvm/Object/MachOFormat.h.
+  enum KindTy { Data = 1, JumpTable8, JumpTable16, JumpTable32 } Kind;
+  MCSymbol *Start;
+  MCSymbol *End;
+};
+
+class MCAssembler {
+  friend class MCAsmLayout;
+
+public:
+  using SectionListType = std::vector<MCSection *>;
+  using SymbolDataListType = std::vector<const MCSymbol *>;
+
+  using const_iterator = pointee_iterator<SectionListType::const_iterator>;
+  using iterator = pointee_iterator<SectionListType::iterator>;
+
+  using const_symbol_iterator =
+      pointee_iterator<SymbolDataListType::const_iterator>;
+  using symbol_iterator = pointee_iterator<SymbolDataListType::iterator>;
+
+  using symbol_range = iterator_range<symbol_iterator>;
+  using const_symbol_range = iterator_range<const_symbol_iterator>;
+
+  using const_indirect_symbol_iterator =
+      std::vector<IndirectSymbolData>::const_iterator;
+  using indirect_symbol_iterator = std::vector<IndirectSymbolData>::iterator;
+
+  using const_data_region_iterator =
+      std::vector<DataRegionData>::const_iterator;
+  using data_region_iterator = std::vector<DataRegionData>::iterator;
+
+  /// MachO specific deployment target version info.
+  // A Major version of 0 indicates that no version information was supplied
+  // and so the corresponding load command should not be emitted.
+  using VersionInfoType = struct {
+    bool EmitBuildVersion;
+    union {
+      MCVersionMinType Type;          ///< Used when EmitBuildVersion==false.
+      MachO::PlatformType Platform;   ///< Used when EmitBuildVersion==true.
+    } TypeOrPlatform;
+    unsigned Major;
+    unsigned Minor;
+    unsigned Update;
+  };
+
+private:
+  MCContext &Context;
+
+  MCAsmBackend &Backend;
+
+  MCCodeEmitter &Emitter;
+
+  MCObjectWriter &Writer;
+
+  SectionListType Sections;
+
+  SymbolDataListType Symbols;
+
+  std::vector<IndirectSymbolData> IndirectSymbols;
+
+  std::vector<DataRegionData> DataRegions;
+
+  /// The list of linker options to propagate into the object file.
+  std::vector<std::vector<std::string>> LinkerOptions;
+
+  /// List of declared file names
+  std::vector<std::string> FileNames;
+
+  MCDwarfLineTableParams LTParams;
+
+  /// The set of function symbols for which a .thumb_func directive has
+  /// been seen.
+  //
+  // FIXME: We really would like this in target specific code rather than
+  // here. Maybe when the relocation stuff moves to target specific,
+  // this can go with it? The streamer would need some target specific
+  // refactoring too.
+  mutable SmallPtrSet<const MCSymbol *, 32> ThumbFuncs;
+
+  /// \brief The bundle alignment size currently set in the assembler.
+  ///
+  /// By default it's 0, which means bundling is disabled.
+  unsigned BundleAlignSize;
+
+  bool RelaxAll : 1;
+  bool SubsectionsViaSymbols : 1;
+  bool IncrementalLinkerCompatible : 1;
+
+  /// ELF specific e_header flags
+  // It would be good if there were an MCELFAssembler class to hold this.
+  // ELF header flags are used both by the integrated and standalone assemblers.
+  // Access to the flags is necessary in cases where assembler directives affect
+  // which flags to be set.
+  unsigned ELFHeaderEFlags;
+
+  /// Used to communicate Linker Optimization Hint information between
+  /// the Streamer and the .o writer
+  MCLOHContainer LOHContainer;
+
+  VersionInfoType VersionInfo;
+
+  /// Evaluate a fixup to a relocatable expression and the value which should be
+  /// placed into the fixup.
+  ///
+  /// \param Layout The layout to use for evaluation.
+  /// \param Fixup The fixup to evaluate.
+  /// \param DF The fragment the fixup is inside.
+  /// \param Target [out] On return, the relocatable expression the fixup
+  /// evaluates to.
+  /// \param Value [out] On return, the value of the fixup as currently laid
+  /// out.
+  /// \return Whether the fixup value was fully resolved. This is true if the
+  /// \p Value result is fixed, otherwise the value may change due to
+  /// relocation.
+  bool evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup,
+                     const MCFragment *DF, MCValue &Target,
+                     uint64_t &Value) const;
+
+  /// Check whether a fixup can be satisfied, or whether it needs to be relaxed
+  /// (increased in size, in order to hold its value correctly).
+  bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF,
+                            const MCAsmLayout &Layout) const;
+
+  /// Check whether the given fragment needs relaxation.
+  bool fragmentNeedsRelaxation(const MCRelaxableFragment *IF,
+                               const MCAsmLayout &Layout) const;
+
+  /// \brief Perform one layout iteration and return true if any offsets
+  /// were adjusted.
+  bool layoutOnce(MCAsmLayout &Layout);
+
+  /// \brief Perform one layout iteration of the given section and return true
+  /// if any offsets were adjusted.
+  bool layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec);
+
+  bool relaxInstruction(MCAsmLayout &Layout, MCRelaxableFragment &IF);
+
+  bool relaxPaddingFragment(MCAsmLayout &Layout, MCPaddingFragment &PF);
+
+  bool relaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF);
+
+  bool relaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF);
+  bool relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
+                                   MCDwarfCallFrameFragment &DF);
+  bool relaxCVInlineLineTable(MCAsmLayout &Layout,
+                              MCCVInlineLineTableFragment &DF);
+  bool relaxCVDefRange(MCAsmLayout &Layout, MCCVDefRangeFragment &DF);
+
+  /// finishLayout - Finalize a layout, including fragment lowering.
+  void finishLayout(MCAsmLayout &Layout);
+
+  std::tuple<MCValue, uint64_t, bool>
+  handleFixup(const MCAsmLayout &Layout, MCFragment &F, const MCFixup &Fixup);
+
+public:
+  std::vector<std::pair<StringRef, const MCSymbol *>> Symvers;
+
+  /// Construct a new assembler instance.
+  //
+  // FIXME: How are we going to parameterize this? Two obvious options are stay
+  // concrete and require clients to pass in a target like object. The other
+  // option is to make this abstract, and have targets provide concrete
+  // implementations as we do with AsmParser.
+  MCAssembler(MCContext &Context, MCAsmBackend &Backend,
+              MCCodeEmitter &Emitter, MCObjectWriter &Writer);
+  MCAssembler(const MCAssembler &) = delete;
+  MCAssembler &operator=(const MCAssembler &) = delete;
+  ~MCAssembler();
+
+  /// Compute the effective fragment size assuming it is laid out at the given
+  /// \p SectionAddress and \p FragmentOffset.
+  uint64_t computeFragmentSize(const MCAsmLayout &Layout,
+                               const MCFragment &F) const;
+
+  /// Find the symbol which defines the atom containing the given symbol, or
+  /// null if there is no such symbol.
+  const MCSymbol *getAtom(const MCSymbol &S) const;
+
+  /// Check whether a particular symbol is visible to the linker and is required
+  /// in the symbol table, or whether it can be discarded by the assembler. This
+  /// also effects whether the assembler treats the label as potentially
+  /// defining a separate atom.
+  bool isSymbolLinkerVisible(const MCSymbol &SD) const;
+
+  /// Emit the section contents using the given object writer.
+  void writeSectionData(const MCSection *Section,
+                        const MCAsmLayout &Layout) const;
+
+  /// Check whether a given symbol has been flagged with .thumb_func.
+  bool isThumbFunc(const MCSymbol *Func) const;
+
+  /// Flag a function symbol as the target of a .thumb_func directive.
+  void setIsThumbFunc(const MCSymbol *Func) { ThumbFuncs.insert(Func); }
+
+  /// ELF e_header flags
+  unsigned getELFHeaderEFlags() const { return ELFHeaderEFlags; }
+  void setELFHeaderEFlags(unsigned Flags) { ELFHeaderEFlags = Flags; }
+
+  /// MachO deployment target version information.
+  const VersionInfoType &getVersionInfo() const { return VersionInfo; }
+  void setVersionMin(MCVersionMinType Type, unsigned Major, unsigned Minor,
+                     unsigned Update) {
+    VersionInfo.EmitBuildVersion = false;
+    VersionInfo.TypeOrPlatform.Type = Type;
+    VersionInfo.Major = Major;
+    VersionInfo.Minor = Minor;
+    VersionInfo.Update = Update;
+  }
+  void setBuildVersion(MachO::PlatformType Platform, unsigned Major,
+                       unsigned Minor, unsigned Update) {
+    VersionInfo.EmitBuildVersion = true;
+    VersionInfo.TypeOrPlatform.Platform = Platform;
+    VersionInfo.Major = Major;
+    VersionInfo.Minor = Minor;
+    VersionInfo.Update = Update;
+  }
+
+  /// Reuse an assembler instance
+  ///
+  void reset();
+
+  MCContext &getContext() const { return Context; }
+
+  MCAsmBackend &getBackend() const { return Backend; }
+
+  MCCodeEmitter &getEmitter() const { return Emitter; }
+
+  MCObjectWriter &getWriter() const { return Writer; }
+
+  MCDwarfLineTableParams getDWARFLinetableParams() const { return LTParams; }
+  void setDWARFLinetableParams(MCDwarfLineTableParams P) { LTParams = P; }
+
+  /// Finish - Do final processing and write the object to the output stream.
+  /// \p Writer is used for custom object writer (as the MCJIT does),
+  /// if not specified it is automatically created from backend.
+  void Finish();
+
+  // Layout all section and prepare them for emission.
+  void layout(MCAsmLayout &Layout);
+
+  // FIXME: This does not belong here.
+  bool getSubsectionsViaSymbols() const { return SubsectionsViaSymbols; }
+  void setSubsectionsViaSymbols(bool Value) { SubsectionsViaSymbols = Value; }
+
+  bool isIncrementalLinkerCompatible() const {
+    return IncrementalLinkerCompatible;
+  }
+  void setIncrementalLinkerCompatible(bool Value) {
+    IncrementalLinkerCompatible = Value;
+  }
+
+  bool getRelaxAll() const { return RelaxAll; }
+  void setRelaxAll(bool Value) { RelaxAll = Value; }
+
+  bool isBundlingEnabled() const { return BundleAlignSize != 0; }
+
+  unsigned getBundleAlignSize() const { return BundleAlignSize; }
+
+  void setBundleAlignSize(unsigned Size) {
+    assert((Size == 0 || !(Size & (Size - 1))) &&
+           "Expect a power-of-two bundle align size");
+    BundleAlignSize = Size;
+  }
+
+  /// \name Section List Access
+  /// @{
+
+  iterator begin() { return Sections.begin(); }
+  const_iterator begin() const { return Sections.begin(); }
+
+  iterator end() { return Sections.end(); }
+  const_iterator end() const { return Sections.end(); }
+
+  size_t size() const { return Sections.size(); }
+
+  /// @}
+  /// \name Symbol List Access
+  /// @{
+  symbol_iterator symbol_begin() { return Symbols.begin(); }
+  const_symbol_iterator symbol_begin() const { return Symbols.begin(); }
+
+  symbol_iterator symbol_end() { return Symbols.end(); }
+  const_symbol_iterator symbol_end() const { return Symbols.end(); }
+
+  symbol_range symbols() { return make_range(symbol_begin(), symbol_end()); }
+  const_symbol_range symbols() const {
+    return make_range(symbol_begin(), symbol_end());
+  }
+
+  size_t symbol_size() const { return Symbols.size(); }
+
+  /// @}
+  /// \name Indirect Symbol List Access
+  /// @{
+
+  // FIXME: This is a total hack, this should not be here. Once things are
+  // factored so that the streamer has direct access to the .o writer, it can
+  // disappear.
+  std::vector<IndirectSymbolData> &getIndirectSymbols() {
+    return IndirectSymbols;
+  }
+
+  indirect_symbol_iterator indirect_symbol_begin() {
+    return IndirectSymbols.begin();
+  }
+  const_indirect_symbol_iterator indirect_symbol_begin() const {
+    return IndirectSymbols.begin();
+  }
+
+  indirect_symbol_iterator indirect_symbol_end() {
+    return IndirectSymbols.end();
+  }
+  const_indirect_symbol_iterator indirect_symbol_end() const {
+    return IndirectSymbols.end();
+  }
+
+  size_t indirect_symbol_size() const { return IndirectSymbols.size(); }
+
+  /// @}
+  /// \name Linker Option List Access
+  /// @{
+
+  std::vector<std::vector<std::string>> &getLinkerOptions() {
+    return LinkerOptions;
+  }
+
+  /// @}
+  /// \name Data Region List Access
+  /// @{
+
+  // FIXME: This is a total hack, this should not be here. Once things are
+  // factored so that the streamer has direct access to the .o writer, it can
+  // disappear.
+  std::vector<DataRegionData> &getDataRegions() { return DataRegions; }
+
+  data_region_iterator data_region_begin() { return DataRegions.begin(); }
+  const_data_region_iterator data_region_begin() const {
+    return DataRegions.begin();
+  }
+
+  data_region_iterator data_region_end() { return DataRegions.end(); }
+  const_data_region_iterator data_region_end() const {
+    return DataRegions.end();
+  }
+
+  size_t data_region_size() const { return DataRegions.size(); }
+
+  /// @}
+  /// \name Data Region List Access
+  /// @{
+
+  // FIXME: This is a total hack, this should not be here. Once things are
+  // factored so that the streamer has direct access to the .o writer, it can
+  // disappear.
+  MCLOHContainer &getLOHContainer() { return LOHContainer; }
+  const MCLOHContainer &getLOHContainer() const {
+    return const_cast<MCAssembler *>(this)->getLOHContainer();
+  }
+  /// @}
+  /// \name Backend Data Access
+  /// @{
+
+  bool registerSection(MCSection &Section);
+
+  void registerSymbol(const MCSymbol &Symbol, bool *Created = nullptr);
+
+  ArrayRef<std::string> getFileNames() { return FileNames; }
+
+  void addFileName(StringRef FileName) {
+    if (!is_contained(FileNames, FileName))
+      FileNames.push_back(FileName);
+  }
+
+  /// \brief Write the necessary bundle padding to the given object writer.
+  /// Expects a fragment \p F containing instructions and its size \p FSize.
+  void writeFragmentPadding(const MCFragment &F, uint64_t FSize,
+                            MCObjectWriter *OW) const;
+
+  /// @}
+
+  void dump() const;
+};
+
+/// \brief Compute the amount of padding required before the fragment \p F to
+/// obey bundling restrictions, where \p FOffset is the fragment's offset in
+/// its section and \p FSize is the fragment's size.
+uint64_t computeBundlePadding(const MCAssembler &Assembler, const MCFragment *F,
+                              uint64_t FOffset, uint64_t FSize);
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCASSEMBLER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCCodeEmitter.h b/linux-x64/clang/include/llvm/MC/MCCodeEmitter.h
new file mode 100644
index 0000000..f1b0b78
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCCodeEmitter.h
@@ -0,0 +1,43 @@
+//===- llvm/MC/MCCodeEmitter.h - Instruction Encoding -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCODEEMITTER_H
+#define LLVM_MC_MCCODEEMITTER_H
+
+namespace llvm {
+
+class MCFixup;
+class MCInst;
+class MCSubtargetInfo;
+class raw_ostream;
+template<typename T> class SmallVectorImpl;
+
+/// MCCodeEmitter - Generic instruction encoding interface.
+class MCCodeEmitter {
+protected: // Can only create subclasses.
+  MCCodeEmitter();
+
+public:
+  MCCodeEmitter(const MCCodeEmitter &) = delete;
+  MCCodeEmitter &operator=(const MCCodeEmitter &) = delete;
+  virtual ~MCCodeEmitter();
+
+  /// Lifetime management
+  virtual void reset() {}
+
+  /// EncodeInstruction - Encode the given \p Inst to bytes on the output
+  /// stream \p OS.
+  virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS,
+                                 SmallVectorImpl<MCFixup> &Fixups,
+                                 const MCSubtargetInfo &STI) const = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCCODEEMITTER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCCodePadder.h b/linux-x64/clang/include/llvm/MC/MCCodePadder.h
new file mode 100644
index 0000000..b7772b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCCodePadder.h
@@ -0,0 +1,243 @@
+//===- llvm/MC/CodePadder.h - MC Code Padder --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCODEPADDER_H
+#define LLVM_MC_MCCODEPADDER_H
+
+#include "MCFragment.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class MCAsmLayout;
+class MCCodePaddingPolicy;
+class MCFragment;
+class MCInst;
+class MCObjectStreamer;
+class MCSection;
+
+typedef SmallVector<const MCPaddingFragment *, 8> MCPFRange;
+
+struct MCCodePaddingContext {
+  bool IsPaddingActive;
+  bool IsBasicBlockInsideInnermostLoop;
+  bool IsBasicBlockReachableViaFallthrough;
+  bool IsBasicBlockReachableViaBranch;
+};
+
+/// Target-independent base class incharge of all code padding decisions for a
+/// target. During encoding it determines if and where MCPaddingFragments will
+/// be located, as later on, when layout information is available, it determines
+/// their sizes.
+class MCCodePadder {
+  MCCodePadder(const MCCodePadder &) = delete;
+  void operator=(const MCCodePadder &) = delete;
+
+  /// Determines if the MCCodePaddingPolicies are active.
+  bool ArePoliciesActive;
+
+  /// All the supported MCCodePaddingPolicies.
+  SmallPtrSet<MCCodePaddingPolicy *, 4> CodePaddingPolicies;
+
+  /// A pointer to the fragment of the instruction whose padding is currently
+  /// done for.
+  MCPaddingFragment *CurrHandledInstFragment;
+
+  /// A map holding the jurisdiction for each padding fragment. Key: padding
+  /// fragment. Value: The fragment's jurisdiction. A jurisdiction is a vector
+  /// of padding fragments whose conditions are being controlled by another
+  /// fragment, the key fragment.
+  DenseMap<MCPaddingFragment *, MCPFRange> FragmentToJurisdiction;
+  MCPFRange &getJurisdiction(MCPaddingFragment *Fragment, MCAsmLayout &Layout);
+
+  /// A map holding the maximal instruction window size relevant for a padding
+  /// fragment.
+  DenseMap<MCPaddingFragment *, uint64_t> FragmentToMaxWindowSize;
+  uint64_t getMaxWindowSize(MCPaddingFragment *Fragment, MCAsmLayout &Layout);
+
+protected:
+  /// The current streamer, used to stream code padding.
+  MCObjectStreamer *OS;
+
+  bool addPolicy(MCCodePaddingPolicy *Policy);
+
+  virtual bool
+  basicBlockRequiresInsertionPoint(const MCCodePaddingContext &Context) {
+    return false;
+  }
+
+  virtual bool instructionRequiresInsertionPoint(const MCInst &Inst) {
+    return false;
+  }
+
+  virtual bool usePoliciesForBasicBlock(const MCCodePaddingContext &Context) {
+    return Context.IsPaddingActive;
+  }
+
+public:
+  MCCodePadder()
+      : ArePoliciesActive(false), CurrHandledInstFragment(nullptr),
+        OS(nullptr) {}
+  virtual ~MCCodePadder();
+
+  /// Handles all target related code padding when starting to write a new
+  /// basic block to an object file.
+  ///
+  /// \param OS The streamer used for writing the padding data and function.
+  /// \param Context the context of the padding, Embeds the basic block's
+  /// parameters.
+  void handleBasicBlockStart(MCObjectStreamer *OS,
+                             const MCCodePaddingContext &Context);
+  /// Handles all target related code padding when done writing a block to an
+  /// object file.
+  ///
+  /// \param Context the context of the padding, Embeds the basic block's
+  /// parameters.
+  void handleBasicBlockEnd(const MCCodePaddingContext &Context);
+  /// Handles all target related code padding before writing a new instruction
+  /// to an object file.
+  ///
+  /// \param Inst the instruction.
+  void handleInstructionBegin(const MCInst &Inst);
+  /// Handles all target related code padding after writing an instruction to an
+  /// object file.
+  ///
+  /// \param Inst the instruction.
+  void handleInstructionEnd(const MCInst &Inst);
+
+  /// Relaxes a fragment (changes the size of the padding) according to target
+  /// requirements. The new size computation is done w.r.t a layout.
+  ///
+  /// \param Fragment The fragment to relax.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns true iff any relaxation occurred.
+  bool relaxFragment(MCPaddingFragment *Fragment, MCAsmLayout &Layout);
+};
+
+/// The base class for all padding policies, i.e. a rule or set of rules to pad
+/// the generated code.
+class MCCodePaddingPolicy {
+  MCCodePaddingPolicy() = delete;
+  MCCodePaddingPolicy(const MCCodePaddingPolicy &) = delete;
+  void operator=(const MCCodePaddingPolicy &) = delete;
+
+protected:
+  /// A mask holding the kind of this policy, i.e. only the i'th bit will be set
+  /// where i is the kind number.
+  const uint64_t KindMask;
+  /// Instruction window size relevant to this policy.
+  const uint64_t WindowSize;
+  /// A boolean indicating which byte of the instruction determies its
+  /// instruction window. If true - the last byte of the instructions, o.w. -
+  /// the first byte of the instruction.
+  const bool InstByteIsLastByte;
+
+  MCCodePaddingPolicy(uint64_t Kind, uint64_t WindowSize,
+                      bool InstByteIsLastByte)
+      : KindMask(UINT64_C(1) << Kind), WindowSize(WindowSize),
+        InstByteIsLastByte(InstByteIsLastByte) {}
+
+  /// Computes and returns the offset of the consecutive fragment of a given
+  /// fragment.
+  ///
+  /// \param Fragment The fragment whose consecutive offset will be computed.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns the offset of the consecutive fragment of \p Fragment.
+  static uint64_t getNextFragmentOffset(const MCFragment *Fragment,
+                                        const MCAsmLayout &Layout);
+  /// Returns the instruction byte of an instruction pointed by a given
+  /// MCPaddingFragment. An instruction byte is the address of the byte of an
+  /// instruction which determines its instruction window.
+  ///
+  /// \param Fragment The fragment pointing to the instruction.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns the instruction byte of an instruction pointed by \p Fragment.
+  uint64_t getFragmentInstByte(const MCPaddingFragment *Fragment,
+                               MCAsmLayout &Layout) const;
+  uint64_t computeWindowEndAddress(const MCPaddingFragment *Fragment,
+                                   uint64_t Offset, MCAsmLayout &Layout) const;
+
+  /// Computes and returns the penalty weight of a first instruction window in a
+  /// range. This requires a special function since the first window does not
+  /// contain all the padding fragments in that window. It only contains all the
+  /// padding fragments starting from the relevant insertion point.
+  ///
+  /// \param Window The first window.
+  /// \param Offset The offset of the parent section relative to the beginning
+  /// of the file, mod the window size.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns the penalty weight of a first instruction window in a range, \p
+  /// Window.
+  double computeFirstWindowPenaltyWeight(const MCPFRange &Window,
+                                         uint64_t Offset,
+                                         MCAsmLayout &Layout) const;
+  /// Computes and returns the penalty caused by an instruction window.
+  ///
+  /// \param Window The instruction window.
+  /// \param Offset The offset of the parent section relative to the beginning
+  /// of the file, mod the window size.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns the penalty caused by \p Window.
+  virtual double computeWindowPenaltyWeight(const MCPFRange &Window,
+                                            uint64_t Offset,
+                                            MCAsmLayout &Layout) const = 0;
+
+public:
+  virtual ~MCCodePaddingPolicy() {}
+
+  /// Returns the kind mask of this policy -  A mask holding the kind of this
+  /// policy, i.e. only the i'th bit will be set where i is the kind number.
+  uint64_t getKindMask() const { return KindMask; }
+  /// Returns the instruction window size relevant to this policy.
+  uint64_t getWindowSize() const { return WindowSize; }
+  /// Returns true if the last byte of an instruction determines its instruction
+  /// window, or false if the first of an instruction determines it.
+  bool isInstByteLastByte() const { return InstByteIsLastByte; }
+
+  /// Returns true iff this policy needs padding for a given basic block.
+  ///
+  /// \param Context the context of the padding, Embeds the basic block's
+  /// parameters.
+  ///
+  /// \returns true iff this policy needs padding for the basic block.
+  virtual bool
+  basicBlockRequiresPaddingFragment(const MCCodePaddingContext &Context) const {
+    return false;
+  }
+  /// Returns true iff this policy needs padding for a given instruction.
+  ///
+  /// \param Inst The given instruction.
+  ///
+  /// \returns true iff this policy needs padding for \p Inst.
+  virtual bool instructionRequiresPaddingFragment(const MCInst &Inst) const {
+    return false;
+  }
+  /// Computes and returns the penalty caused by a range of instruction windows.
+  /// The weight is computed for each window separelty and then accumulated.
+  ///
+  /// \param Range The range.
+  /// \param Offset The offset of the parent section relative to the beginning
+  /// of the file, mod the window size.
+  /// \param Layout Code layout information.
+  ///
+  /// \returns the penalty caused by \p Range.
+  double computeRangePenaltyWeight(const MCPFRange &Range, uint64_t Offset,
+                                   MCAsmLayout &Layout) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_MC_MCCODEPADDER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCCodeView.h b/linux-x64/clang/include/llvm/MC/MCCodeView.h
new file mode 100644
index 0000000..c8f1451
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCCodeView.h
@@ -0,0 +1,301 @@
+//===- MCCodeView.h - Machine Code CodeView support -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Holds state from .cv_file and .cv_loc directives for later emission.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCODEVIEW_H
+#define LLVM_MC_MCCODEVIEW_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/MC/MCObjectStreamer.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+class MCContext;
+class MCObjectStreamer;
+class MCStreamer;
+class CodeViewContext;
+
+/// \brief Instances of this class represent the information from a
+/// .cv_loc directive.
+class MCCVLoc {
+  uint32_t FunctionId;
+  uint32_t FileNum;
+  uint32_t Line;
+  uint16_t Column;
+  uint16_t PrologueEnd : 1;
+  uint16_t IsStmt : 1;
+
+private: // CodeViewContext manages these
+  friend class CodeViewContext;
+  MCCVLoc(unsigned functionid, unsigned fileNum, unsigned line, unsigned column,
+          bool prologueend, bool isstmt)
+      : FunctionId(functionid), FileNum(fileNum), Line(line), Column(column),
+        PrologueEnd(prologueend), IsStmt(isstmt) {}
+
+  // Allow the default copy constructor and assignment operator to be used
+  // for an MCCVLoc object.
+
+public:
+  unsigned getFunctionId() const { return FunctionId; }
+
+  /// \brief Get the FileNum of this MCCVLoc.
+  unsigned getFileNum() const { return FileNum; }
+
+  /// \brief Get the Line of this MCCVLoc.
+  unsigned getLine() const { return Line; }
+
+  /// \brief Get the Column of this MCCVLoc.
+  unsigned getColumn() const { return Column; }
+
+  bool isPrologueEnd() const { return PrologueEnd; }
+  bool isStmt() const { return IsStmt; }
+
+  void setFunctionId(unsigned FID) { FunctionId = FID; }
+
+  /// \brief Set the FileNum of this MCCVLoc.
+  void setFileNum(unsigned fileNum) { FileNum = fileNum; }
+
+  /// \brief Set the Line of this MCCVLoc.
+  void setLine(unsigned line) { Line = line; }
+
+  /// \brief Set the Column of this MCCVLoc.
+  void setColumn(unsigned column) {
+    assert(column <= UINT16_MAX);
+    Column = column;
+  }
+
+  void setPrologueEnd(bool PE) { PrologueEnd = PE; }
+  void setIsStmt(bool IS) { IsStmt = IS; }
+};
+
+/// \brief Instances of this class represent the line information for
+/// the CodeView line table entries.  Which is created after a machine
+/// instruction is assembled and uses an address from a temporary label
+/// created at the current address in the current section and the info from
+/// the last .cv_loc directive seen as stored in the context.
+class MCCVLineEntry : public MCCVLoc {
+  const MCSymbol *Label;
+
+private:
+  // Allow the default copy constructor and assignment operator to be used
+  // for an MCCVLineEntry object.
+
+public:
+  // Constructor to create an MCCVLineEntry given a symbol and the dwarf loc.
+  MCCVLineEntry(const MCSymbol *Label, const MCCVLoc loc)
+      : MCCVLoc(loc), Label(Label) {}
+
+  const MCSymbol *getLabel() const { return Label; }
+
+  // This is called when an instruction is assembled into the specified
+  // section and if there is information from the last .cv_loc directive that
+  // has yet to have a line entry made for it is made.
+  static void Make(MCObjectStreamer *MCOS);
+};
+
+/// Information describing a function or inlined call site introduced by
+/// .cv_func_id or .cv_inline_site_id. Accumulates information from .cv_loc
+/// directives used with this function's id or the id of an inlined call site
+/// within this function or inlined call site.
+struct MCCVFunctionInfo {
+  /// If this represents an inlined call site, then ParentFuncIdPlusOne will be
+  /// the parent function id plus one. If this represents a normal function,
+  /// then there is no parent, and ParentFuncIdPlusOne will be FunctionSentinel.
+  /// If this struct is an unallocated slot in the function info vector, then
+  /// ParentFuncIdPlusOne will be zero.
+  unsigned ParentFuncIdPlusOne = 0;
+
+  enum : unsigned { FunctionSentinel = ~0U };
+
+  struct LineInfo {
+    unsigned File;
+    unsigned Line;
+    unsigned Col;
+  };
+
+  LineInfo InlinedAt;
+
+  /// The section of the first .cv_loc directive used for this function, or null
+  /// if none has been seen yet.
+  MCSection *Section = nullptr;
+
+  /// Map from inlined call site id to the inlined at location to use for that
+  /// call site. Call chains are collapsed, so for the call chain 'f -> g -> h',
+  /// the InlinedAtMap of 'f' will contain entries for 'g' and 'h' that both
+  /// list the line info for the 'g' call site.
+  DenseMap<unsigned, LineInfo> InlinedAtMap;
+
+  /// Returns true if this is function info has not yet been used in a
+  /// .cv_func_id or .cv_inline_site_id directive.
+  bool isUnallocatedFunctionInfo() const { return ParentFuncIdPlusOne == 0; }
+
+  /// Returns true if this represents an inlined call site, meaning
+  /// ParentFuncIdPlusOne is neither zero nor ~0U.
+  bool isInlinedCallSite() const {
+    return !isUnallocatedFunctionInfo() &&
+           ParentFuncIdPlusOne != FunctionSentinel;
+  }
+
+  unsigned getParentFuncId() const {
+    assert(isInlinedCallSite());
+    return ParentFuncIdPlusOne - 1;
+  }
+};
+
+/// Holds state from .cv_file and .cv_loc directives for later emission.
+class CodeViewContext {
+public:
+  CodeViewContext();
+  ~CodeViewContext();
+
+  bool isValidFileNumber(unsigned FileNumber) const;
+  bool addFile(MCStreamer &OS, unsigned FileNumber, StringRef Filename,
+               ArrayRef<uint8_t> ChecksumBytes, uint8_t ChecksumKind);
+
+  /// Records the function id of a normal function. Returns false if the
+  /// function id has already been used, and true otherwise.
+  bool recordFunctionId(unsigned FuncId);
+
+  /// Records the function id of an inlined call site. Records the "inlined at"
+  /// location info of the call site, including what function or inlined call
+  /// site it was inlined into. Returns false if the function id has already
+  /// been used, and true otherwise.
+  bool recordInlinedCallSiteId(unsigned FuncId, unsigned IAFunc,
+                               unsigned IAFile, unsigned IALine,
+                               unsigned IACol);
+
+  /// Retreive the function info if this is a valid function id, or nullptr.
+  MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId);
+
+  /// Saves the information from the currently parsed .cv_loc directive
+  /// and sets CVLocSeen.  When the next instruction is assembled an entry
+  /// in the line number table with this information and the address of the
+  /// instruction will be created.
+  void setCurrentCVLoc(unsigned FunctionId, unsigned FileNo, unsigned Line,
+                       unsigned Column, bool PrologueEnd, bool IsStmt) {
+    CurrentCVLoc.setFunctionId(FunctionId);
+    CurrentCVLoc.setFileNum(FileNo);
+    CurrentCVLoc.setLine(Line);
+    CurrentCVLoc.setColumn(Column);
+    CurrentCVLoc.setPrologueEnd(PrologueEnd);
+    CurrentCVLoc.setIsStmt(IsStmt);
+    CVLocSeen = true;
+  }
+
+  bool getCVLocSeen() { return CVLocSeen; }
+  void clearCVLocSeen() { CVLocSeen = false; }
+
+  const MCCVLoc &getCurrentCVLoc() { return CurrentCVLoc; }
+
+  bool isValidCVFileNumber(unsigned FileNumber);
+
+  /// \brief Add a line entry.
+  void addLineEntry(const MCCVLineEntry &LineEntry);
+
+  std::vector<MCCVLineEntry> getFunctionLineEntries(unsigned FuncId);
+
+  std::pair<size_t, size_t> getLineExtent(unsigned FuncId);
+
+  ArrayRef<MCCVLineEntry> getLinesForExtent(size_t L, size_t R);
+
+  /// Emits a line table substream.
+  void emitLineTableForFunction(MCObjectStreamer &OS, unsigned FuncId,
+                                const MCSymbol *FuncBegin,
+                                const MCSymbol *FuncEnd);
+
+  void emitInlineLineTableForFunction(MCObjectStreamer &OS,
+                                      unsigned PrimaryFunctionId,
+                                      unsigned SourceFileId,
+                                      unsigned SourceLineNum,
+                                      const MCSymbol *FnStartSym,
+                                      const MCSymbol *FnEndSym);
+
+  /// Encodes the binary annotations once we have a layout.
+  void encodeInlineLineTable(MCAsmLayout &Layout,
+                             MCCVInlineLineTableFragment &F);
+
+  void
+  emitDefRange(MCObjectStreamer &OS,
+               ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
+               StringRef FixedSizePortion);
+
+  void encodeDefRange(MCAsmLayout &Layout, MCCVDefRangeFragment &F);
+
+  /// Emits the string table substream.
+  void emitStringTable(MCObjectStreamer &OS);
+
+  /// Emits the file checksum substream.
+  void emitFileChecksums(MCObjectStreamer &OS);
+
+  /// Emits the offset into the checksum table of the given file number.
+  void emitFileChecksumOffset(MCObjectStreamer &OS, unsigned FileNo);
+
+  /// Add something to the string table.  Returns the final string as well as
+  /// offset into the string table.
+  std::pair<StringRef, unsigned> addToStringTable(StringRef S);
+
+private:
+  /// The current CodeView line information from the last .cv_loc directive.
+  MCCVLoc CurrentCVLoc = MCCVLoc(0, 0, 0, 0, false, true);
+  bool CVLocSeen = false;
+
+  /// Map from string to string table offset.
+  StringMap<unsigned> StringTable;
+
+  /// The fragment that ultimately holds our strings.
+  MCDataFragment *StrTabFragment = nullptr;
+  bool InsertedStrTabFragment = false;
+
+  MCDataFragment *getStringTableFragment();
+
+  /// Get a string table offset.
+  unsigned getStringTableOffset(StringRef S);
+
+  struct FileInfo {
+    unsigned StringTableOffset;
+
+    // Indicates if this FileInfo corresponds to an actual file, or hasn't been
+    // set yet.
+    bool Assigned = false;
+
+    uint8_t ChecksumKind;
+
+    ArrayRef<uint8_t> Checksum;
+
+    // Checksum offset stored as a symbol because it might be requested
+    // before it has been calculated, so a fixup may be needed.
+    MCSymbol *ChecksumTableOffset;
+  };
+
+  /// Array storing added file information.
+  SmallVector<FileInfo, 4> Files;
+
+  /// The offset of the first and last .cv_loc directive for a given function
+  /// id.
+  std::map<unsigned, std::pair<size_t, size_t>> MCCVLineStartStop;
+
+  /// A collection of MCCVLineEntry for each section.
+  std::vector<MCCVLineEntry> MCCVLines;
+
+  /// All known functions and inlined call sites, indexed by function id.
+  std::vector<MCCVFunctionInfo> Functions;
+
+  /// Indicate whether we have already laid out the checksum table addresses or
+  /// not.
+  bool ChecksumOffsetsAssigned = false;
+};
+
+} // end namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCContext.h b/linux-x64/clang/include/llvm/MC/MCContext.h
new file mode 100644
index 0000000..c110ffd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCContext.h
@@ -0,0 +1,722 @@
+//===- MCContext.h - Machine Code Context -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCONTEXT_H
+#define LLVM_MC_MCCONTEXT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/MC/MCAsmMacro.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+  class CodeViewContext;
+  class MCAsmInfo;
+  class MCLabel;
+  class MCObjectFileInfo;
+  class MCRegisterInfo;
+  class MCSection;
+  class MCSectionCOFF;
+  class MCSectionELF;
+  class MCSectionMachO;
+  class MCSectionWasm;
+  class MCStreamer;
+  class MCSymbol;
+  class MCSymbolELF;
+  class MCSymbolWasm;
+  class SMLoc;
+  class SourceMgr;
+
+  /// Context object for machine code objects.  This class owns all of the
+  /// sections that it creates.
+  ///
+  class MCContext {
+  public:
+    using SymbolTable = StringMap<MCSymbol *, BumpPtrAllocator &>;
+
+  private:
+    /// The SourceMgr for this object, if any.
+    const SourceMgr *SrcMgr;
+
+    /// The SourceMgr for inline assembly, if any.
+    SourceMgr *InlineSrcMgr;
+
+    /// The MCAsmInfo for this target.
+    const MCAsmInfo *MAI;
+
+    /// The MCRegisterInfo for this target.
+    const MCRegisterInfo *MRI;
+
+    /// The MCObjectFileInfo for this target.
+    const MCObjectFileInfo *MOFI;
+
+    std::unique_ptr<CodeViewContext> CVContext;
+
+    /// Allocator object used for creating machine code objects.
+    ///
+    /// We use a bump pointer allocator to avoid the need to track all allocated
+    /// objects.
+    BumpPtrAllocator Allocator;
+
+    SpecificBumpPtrAllocator<MCSectionCOFF> COFFAllocator;
+    SpecificBumpPtrAllocator<MCSectionELF> ELFAllocator;
+    SpecificBumpPtrAllocator<MCSectionMachO> MachOAllocator;
+    SpecificBumpPtrAllocator<MCSectionWasm> WasmAllocator;
+
+    /// Bindings of names to symbols.
+    SymbolTable Symbols;
+
+    /// A mapping from a local label number and an instance count to a symbol.
+    /// For example, in the assembly
+    ///     1:
+    ///     2:
+    ///     1:
+    /// We have three labels represented by the pairs (1, 0), (2, 0) and (1, 1)
+    DenseMap<std::pair<unsigned, unsigned>, MCSymbol *> LocalSymbols;
+
+    /// Keeps tracks of names that were used both for used declared and
+    /// artificial symbols. The value is "true" if the name has been used for a
+    /// non-section symbol (there can be at most one of those, plus an unlimited
+    /// number of section symbols with the same name).
+    StringMap<bool, BumpPtrAllocator &> UsedNames;
+
+    /// The next ID to dole out to an unnamed assembler temporary symbol with
+    /// a given prefix.
+    StringMap<unsigned> NextID;
+
+    /// Instances of directional local labels.
+    DenseMap<unsigned, MCLabel *> Instances;
+    /// NextInstance() creates the next instance of the directional local label
+    /// for the LocalLabelVal and adds it to the map if needed.
+    unsigned NextInstance(unsigned LocalLabelVal);
+    /// GetInstance() gets the current instance of the directional local label
+    /// for the LocalLabelVal and adds it to the map if needed.
+    unsigned GetInstance(unsigned LocalLabelVal);
+
+    /// The file name of the log file from the environment variable
+    /// AS_SECURE_LOG_FILE.  Which must be set before the .secure_log_unique
+    /// directive is used or it is an error.
+    char *SecureLogFile;
+    /// The stream that gets written to for the .secure_log_unique directive.
+    std::unique_ptr<raw_fd_ostream> SecureLog;
+    /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
+    /// catch errors if .secure_log_unique appears twice without
+    /// .secure_log_reset appearing between them.
+    bool SecureLogUsed = false;
+
+    /// The compilation directory to use for DW_AT_comp_dir.
+    SmallString<128> CompilationDir;
+
+    /// The main file name if passed in explicitly.
+    std::string MainFileName;
+
+    /// The dwarf file and directory tables from the dwarf .file directive.
+    /// We now emit a line table for each compile unit. To reduce the prologue
+    /// size of each line table, the files and directories used by each compile
+    /// unit are separated.
+    std::map<unsigned, MCDwarfLineTable> MCDwarfLineTablesCUMap;
+
+    /// The current dwarf line information from the last dwarf .loc directive.
+    MCDwarfLoc CurrentDwarfLoc;
+    bool DwarfLocSeen = false;
+
+    /// Generate dwarf debugging info for assembly source files.
+    bool GenDwarfForAssembly = false;
+
+    /// The current dwarf file number when generate dwarf debugging info for
+    /// assembly source files.
+    unsigned GenDwarfFileNumber = 0;
+
+    /// Sections for generating the .debug_ranges and .debug_aranges sections.
+    SetVector<MCSection *> SectionsForRanges;
+
+    /// The information gathered from labels that will have dwarf label
+    /// entries when generating dwarf assembly source files.
+    std::vector<MCGenDwarfLabelEntry> MCGenDwarfLabelEntries;
+
+    /// The string to embed in the debug information for the compile unit, if
+    /// non-empty.
+    StringRef DwarfDebugFlags;
+
+    /// The string to embed in as the dwarf AT_producer for the compile unit, if
+    /// non-empty.
+    StringRef DwarfDebugProducer;
+
+    /// The maximum version of dwarf that we should emit.
+    uint16_t DwarfVersion = 4;
+
+    /// Honor temporary labels, this is useful for debugging semantic
+    /// differences between temporary and non-temporary labels (primarily on
+    /// Darwin).
+    bool AllowTemporaryLabels = true;
+    bool UseNamesOnTempLabels = true;
+
+    /// The Compile Unit ID that we are currently processing.
+    unsigned DwarfCompileUnitID = 0;
+
+    struct ELFSectionKey {
+      std::string SectionName;
+      StringRef GroupName;
+      unsigned UniqueID;
+
+      ELFSectionKey(StringRef SectionName, StringRef GroupName,
+                    unsigned UniqueID)
+          : SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) {
+      }
+
+      bool operator<(const ELFSectionKey &Other) const {
+        if (SectionName != Other.SectionName)
+          return SectionName < Other.SectionName;
+        if (GroupName != Other.GroupName)
+          return GroupName < Other.GroupName;
+        return UniqueID < Other.UniqueID;
+      }
+    };
+
+    struct COFFSectionKey {
+      std::string SectionName;
+      StringRef GroupName;
+      int SelectionKey;
+      unsigned UniqueID;
+
+      COFFSectionKey(StringRef SectionName, StringRef GroupName,
+                     int SelectionKey, unsigned UniqueID)
+          : SectionName(SectionName), GroupName(GroupName),
+            SelectionKey(SelectionKey), UniqueID(UniqueID) {}
+
+      bool operator<(const COFFSectionKey &Other) const {
+        if (SectionName != Other.SectionName)
+          return SectionName < Other.SectionName;
+        if (GroupName != Other.GroupName)
+          return GroupName < Other.GroupName;
+        if (SelectionKey != Other.SelectionKey)
+          return SelectionKey < Other.SelectionKey;
+        return UniqueID < Other.UniqueID;
+      }
+    };
+
+    struct WasmSectionKey {
+      std::string SectionName;
+      StringRef GroupName;
+      unsigned UniqueID;
+
+      WasmSectionKey(StringRef SectionName, StringRef GroupName,
+                     unsigned UniqueID)
+          : SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) {
+      }
+
+      bool operator<(const WasmSectionKey &Other) const {
+        if (SectionName != Other.SectionName)
+          return SectionName < Other.SectionName;
+        if (GroupName != Other.GroupName)
+          return GroupName < Other.GroupName;
+        return UniqueID < Other.UniqueID;
+      }
+    };
+
+    StringMap<MCSectionMachO *> MachOUniquingMap;
+    std::map<ELFSectionKey, MCSectionELF *> ELFUniquingMap;
+    std::map<COFFSectionKey, MCSectionCOFF *> COFFUniquingMap;
+    std::map<WasmSectionKey, MCSectionWasm *> WasmUniquingMap;
+    StringMap<bool> RelSecNames;
+
+    SpecificBumpPtrAllocator<MCSubtargetInfo> MCSubtargetAllocator;
+
+    /// Do automatic reset in destructor
+    bool AutoReset;
+
+    bool HadError = false;
+
+    MCSymbol *createSymbolImpl(const StringMapEntry<bool> *Name,
+                               bool CanBeUnnamed);
+    MCSymbol *createSymbol(StringRef Name, bool AlwaysAddSuffix,
+                           bool IsTemporary);
+
+    MCSymbol *getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal,
+                                                unsigned Instance);
+
+    MCSectionELF *createELFSectionImpl(StringRef Section, unsigned Type,
+                                       unsigned Flags, SectionKind K,
+                                       unsigned EntrySize,
+                                       const MCSymbolELF *Group,
+                                       unsigned UniqueID,
+                                       const MCSymbolELF *Associated);
+
+    /// \brief Map of currently defined macros.
+    StringMap<MCAsmMacro> MacroMap;
+
+  public:
+    explicit MCContext(const MCAsmInfo *MAI, const MCRegisterInfo *MRI,
+                       const MCObjectFileInfo *MOFI,
+                       const SourceMgr *Mgr = nullptr, bool DoAutoReset = true);
+    MCContext(const MCContext &) = delete;
+    MCContext &operator=(const MCContext &) = delete;
+    ~MCContext();
+
+    const SourceMgr *getSourceManager() const { return SrcMgr; }
+
+    void setInlineSourceManager(SourceMgr *SM) { InlineSrcMgr = SM; }
+
+    const MCAsmInfo *getAsmInfo() const { return MAI; }
+
+    const MCRegisterInfo *getRegisterInfo() const { return MRI; }
+
+    const MCObjectFileInfo *getObjectFileInfo() const { return MOFI; }
+
+    CodeViewContext &getCVContext();
+
+    void setAllowTemporaryLabels(bool Value) { AllowTemporaryLabels = Value; }
+    void setUseNamesOnTempLabels(bool Value) { UseNamesOnTempLabels = Value; }
+
+    /// \name Module Lifetime Management
+    /// @{
+
+    /// reset - return object to right after construction state to prepare
+    /// to process a new module
+    void reset();
+
+    /// @}
+
+    /// \name Symbol Management
+    /// @{
+
+    /// Create and return a new linker temporary symbol with a unique but
+    /// unspecified name.
+    MCSymbol *createLinkerPrivateTempSymbol();
+
+    /// Create and return a new assembler temporary symbol with a unique but
+    /// unspecified name.
+    MCSymbol *createTempSymbol(bool CanBeUnnamed = true);
+
+    MCSymbol *createTempSymbol(const Twine &Name, bool AlwaysAddSuffix,
+                               bool CanBeUnnamed = true);
+
+    /// Create the definition of a directional local symbol for numbered label
+    /// (used for "1:" definitions).
+    MCSymbol *createDirectionalLocalSymbol(unsigned LocalLabelVal);
+
+    /// Create and return a directional local symbol for numbered label (used
+    /// for "1b" or 1f" references).
+    MCSymbol *getDirectionalLocalSymbol(unsigned LocalLabelVal, bool Before);
+
+    /// Lookup the symbol inside with the specified \p Name.  If it exists,
+    /// return it.  If not, create a forward reference and return it.
+    ///
+    /// \param Name - The symbol name, which must be unique across all symbols.
+    MCSymbol *getOrCreateSymbol(const Twine &Name);
+
+    /// Gets a symbol that will be defined to the final stack offset of a local
+    /// variable after codegen.
+    ///
+    /// \param Idx - The index of a local variable passed to @llvm.localescape.
+    MCSymbol *getOrCreateFrameAllocSymbol(StringRef FuncName, unsigned Idx);
+
+    MCSymbol *getOrCreateParentFrameOffsetSymbol(StringRef FuncName);
+
+    MCSymbol *getOrCreateLSDASymbol(StringRef FuncName);
+
+    /// Get the symbol for \p Name, or null.
+    MCSymbol *lookupSymbol(const Twine &Name) const;
+
+    /// Set value for a symbol.
+    void setSymbolValue(MCStreamer &Streamer, StringRef Sym, uint64_t Val);
+
+    /// getSymbols - Get a reference for the symbol table for clients that
+    /// want to, for example, iterate over all symbols. 'const' because we
+    /// still want any modifications to the table itself to use the MCContext
+    /// APIs.
+    const SymbolTable &getSymbols() const { return Symbols; }
+
+    /// @}
+
+    /// \name Section Management
+    /// @{
+
+    enum : unsigned {
+      /// Pass this value as the UniqueID during section creation to get the
+      /// generic section with the given name and characteristics. The usual
+      /// sections such as .text use this ID.
+      GenericSectionID = ~0U
+    };
+
+    /// Return the MCSection for the specified mach-o section.  This requires
+    /// the operands to be valid.
+    MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section,
+                                    unsigned TypeAndAttributes,
+                                    unsigned Reserved2, SectionKind K,
+                                    const char *BeginSymName = nullptr);
+
+    MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section,
+                                    unsigned TypeAndAttributes, SectionKind K,
+                                    const char *BeginSymName = nullptr) {
+      return getMachOSection(Segment, Section, TypeAndAttributes, 0, K,
+                             BeginSymName);
+    }
+
+    MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
+                                unsigned Flags) {
+      return getELFSection(Section, Type, Flags, 0, "");
+    }
+
+    MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
+                                unsigned Flags, unsigned EntrySize,
+                                const Twine &Group) {
+      return getELFSection(Section, Type, Flags, EntrySize, Group, ~0);
+    }
+
+    MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
+                                unsigned Flags, unsigned EntrySize,
+                                const Twine &Group, unsigned UniqueID) {
+      return getELFSection(Section, Type, Flags, EntrySize, Group, UniqueID,
+                           nullptr);
+    }
+
+    MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
+                                unsigned Flags, unsigned EntrySize,
+                                const Twine &Group, unsigned UniqueID,
+                                const MCSymbolELF *Associated);
+
+    MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
+                                unsigned Flags, unsigned EntrySize,
+                                const MCSymbolELF *Group, unsigned UniqueID,
+                                const MCSymbolELF *Associated);
+
+    /// Get a section with the provided group identifier. This section is
+    /// named by concatenating \p Prefix with '.' then \p Suffix. The \p Type
+    /// describes the type of the section and \p Flags are used to further
+    /// configure this named section.
+    MCSectionELF *getELFNamedSection(const Twine &Prefix, const Twine &Suffix,
+                                     unsigned Type, unsigned Flags,
+                                     unsigned EntrySize = 0);
+
+    MCSectionELF *createELFRelSection(const Twine &Name, unsigned Type,
+                                      unsigned Flags, unsigned EntrySize,
+                                      const MCSymbolELF *Group,
+                                      const MCSectionELF *RelInfoSection);
+
+    void renameELFSection(MCSectionELF *Section, StringRef Name);
+
+    MCSectionELF *createELFGroupSection(const MCSymbolELF *Group);
+
+    MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics,
+                                  SectionKind Kind, StringRef COMDATSymName,
+                                  int Selection,
+                                  unsigned UniqueID = GenericSectionID,
+                                  const char *BeginSymName = nullptr);
+
+    MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics,
+                                  SectionKind Kind,
+                                  const char *BeginSymName = nullptr);
+
+    MCSectionCOFF *getCOFFSection(StringRef Section);
+
+    /// Gets or creates a section equivalent to Sec that is associated with the
+    /// section containing KeySym. For example, to create a debug info section
+    /// associated with an inline function, pass the normal debug info section
+    /// as Sec and the function symbol as KeySym.
+    MCSectionCOFF *
+    getAssociativeCOFFSection(MCSectionCOFF *Sec, const MCSymbol *KeySym,
+                              unsigned UniqueID = GenericSectionID);
+
+    MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K) {
+      return getWasmSection(Section, K, nullptr);
+    }
+
+    MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
+                                  const char *BeginSymName) {
+      return getWasmSection(Section, K, "", ~0, BeginSymName);
+    }
+
+    MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
+                                  const Twine &Group, unsigned UniqueID) {
+      return getWasmSection(Section, K, Group, UniqueID, nullptr);
+    }
+
+    MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
+                                  const Twine &Group, unsigned UniqueID,
+                                  const char *BeginSymName);
+
+    MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
+                                  const MCSymbolWasm *Group, unsigned UniqueID,
+                                  const char *BeginSymName);
+
+    // Create and save a copy of STI and return a reference to the copy.
+    MCSubtargetInfo &getSubtargetCopy(const MCSubtargetInfo &STI);
+
+    /// @}
+
+    /// \name Dwarf Management
+    /// @{
+
+    /// \brief Get the compilation directory for DW_AT_comp_dir
+    /// The compilation directory should be set with \c setCompilationDir before
+    /// calling this function. If it is unset, an empty string will be returned.
+    StringRef getCompilationDir() const { return CompilationDir; }
+
+    /// \brief Set the compilation directory for DW_AT_comp_dir
+    void setCompilationDir(StringRef S) { CompilationDir = S.str(); }
+
+    /// \brief Get the main file name for use in error messages and debug
+    /// info. This can be set to ensure we've got the correct file name
+    /// after preprocessing or for -save-temps.
+    const std::string &getMainFileName() const { return MainFileName; }
+
+    /// \brief Set the main file name and override the default.
+    void setMainFileName(StringRef S) { MainFileName = S; }
+
+    /// Creates an entry in the dwarf file and directory tables.
+    Expected<unsigned> getDwarfFile(StringRef Directory, StringRef FileName,
+                                    unsigned FileNumber,
+                                    MD5::MD5Result *Checksum,
+                                    Optional<StringRef> Source, unsigned CUID);
+
+    bool isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID = 0);
+
+    const std::map<unsigned, MCDwarfLineTable> &getMCDwarfLineTables() const {
+      return MCDwarfLineTablesCUMap;
+    }
+
+    MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) {
+      return MCDwarfLineTablesCUMap[CUID];
+    }
+
+    const MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) const {
+      auto I = MCDwarfLineTablesCUMap.find(CUID);
+      assert(I != MCDwarfLineTablesCUMap.end());
+      return I->second;
+    }
+
+    const SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles(unsigned CUID = 0) {
+      return getMCDwarfLineTable(CUID).getMCDwarfFiles();
+    }
+
+    const SmallVectorImpl<std::string> &getMCDwarfDirs(unsigned CUID = 0) {
+      return getMCDwarfLineTable(CUID).getMCDwarfDirs();
+    }
+
+    bool hasMCLineSections() const {
+      for (const auto &Table : MCDwarfLineTablesCUMap)
+        if (!Table.second.getMCDwarfFiles().empty() || Table.second.getLabel())
+          return true;
+      return false;
+    }
+
+    unsigned getDwarfCompileUnitID() { return DwarfCompileUnitID; }
+
+    void setDwarfCompileUnitID(unsigned CUIndex) {
+      DwarfCompileUnitID = CUIndex;
+    }
+
+    /// Specifies the "root" file and directory of the compilation unit.
+    /// These are "file 0" and "directory 0" in DWARF v5.
+    void setMCLineTableRootFile(unsigned CUID, StringRef CompilationDir,
+                                StringRef Filename, MD5::MD5Result *Checksum,
+                                Optional<StringRef> Source) {
+      getMCDwarfLineTable(CUID).setRootFile(CompilationDir, Filename, Checksum,
+                                            Source);
+    }
+
+    /// Saves the information from the currently parsed dwarf .loc directive
+    /// and sets DwarfLocSeen.  When the next instruction is assembled an entry
+    /// in the line number table with this information and the address of the
+    /// instruction will be created.
+    void setCurrentDwarfLoc(unsigned FileNum, unsigned Line, unsigned Column,
+                            unsigned Flags, unsigned Isa,
+                            unsigned Discriminator) {
+      CurrentDwarfLoc.setFileNum(FileNum);
+      CurrentDwarfLoc.setLine(Line);
+      CurrentDwarfLoc.setColumn(Column);
+      CurrentDwarfLoc.setFlags(Flags);
+      CurrentDwarfLoc.setIsa(Isa);
+      CurrentDwarfLoc.setDiscriminator(Discriminator);
+      DwarfLocSeen = true;
+    }
+
+    void clearDwarfLocSeen() { DwarfLocSeen = false; }
+
+    bool getDwarfLocSeen() { return DwarfLocSeen; }
+    const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; }
+
+    bool getGenDwarfForAssembly() { return GenDwarfForAssembly; }
+    void setGenDwarfForAssembly(bool Value) { GenDwarfForAssembly = Value; }
+    unsigned getGenDwarfFileNumber() { return GenDwarfFileNumber; }
+
+    void setGenDwarfFileNumber(unsigned FileNumber) {
+      GenDwarfFileNumber = FileNumber;
+    }
+
+    const SetVector<MCSection *> &getGenDwarfSectionSyms() {
+      return SectionsForRanges;
+    }
+
+    bool addGenDwarfSection(MCSection *Sec) {
+      return SectionsForRanges.insert(Sec);
+    }
+
+    void finalizeDwarfSections(MCStreamer &MCOS);
+
+    const std::vector<MCGenDwarfLabelEntry> &getMCGenDwarfLabelEntries() const {
+      return MCGenDwarfLabelEntries;
+    }
+
+    void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry &E) {
+      MCGenDwarfLabelEntries.push_back(E);
+    }
+
+    void setDwarfDebugFlags(StringRef S) { DwarfDebugFlags = S; }
+    StringRef getDwarfDebugFlags() { return DwarfDebugFlags; }
+
+    void setDwarfDebugProducer(StringRef S) { DwarfDebugProducer = S; }
+    StringRef getDwarfDebugProducer() { return DwarfDebugProducer; }
+
+    dwarf::DwarfFormat getDwarfFormat() const {
+      // TODO: Support DWARF64
+      return dwarf::DWARF32;
+    }
+
+    void setDwarfVersion(uint16_t v) { DwarfVersion = v; }
+    uint16_t getDwarfVersion() const { return DwarfVersion; }
+
+    /// @}
+
+    char *getSecureLogFile() { return SecureLogFile; }
+    raw_fd_ostream *getSecureLog() { return SecureLog.get(); }
+
+    void setSecureLog(std::unique_ptr<raw_fd_ostream> Value) {
+      SecureLog = std::move(Value);
+    }
+
+    bool getSecureLogUsed() { return SecureLogUsed; }
+    void setSecureLogUsed(bool Value) { SecureLogUsed = Value; }
+
+    void *allocate(unsigned Size, unsigned Align = 8) {
+      return Allocator.Allocate(Size, Align);
+    }
+
+    void deallocate(void *Ptr) {}
+
+    bool hadError() { return HadError; }
+    void reportError(SMLoc L, const Twine &Msg);
+    // Unrecoverable error has occurred. Display the best diagnostic we can
+    // and bail via exit(1). For now, most MC backend errors are unrecoverable.
+    // FIXME: We should really do something about that.
+    LLVM_ATTRIBUTE_NORETURN void reportFatalError(SMLoc L,
+                                                  const Twine &Msg);
+
+    const MCAsmMacro *lookupMacro(StringRef Name) {
+      StringMap<MCAsmMacro>::iterator I = MacroMap.find(Name);
+      return (I == MacroMap.end()) ? nullptr : &I->getValue();
+    }
+
+    void defineMacro(StringRef Name, MCAsmMacro Macro) {
+      MacroMap.insert(std::make_pair(Name, std::move(Macro)));
+    }
+
+    void undefineMacro(StringRef Name) { MacroMap.erase(Name); }
+  };
+
+} // end namespace llvm
+
+// operator new and delete aren't allowed inside namespaces.
+// The throw specifications are mandated by the standard.
+/// \brief Placement new for using the MCContext's allocator.
+///
+/// This placement form of operator new uses the MCContext's allocator for
+/// obtaining memory. It is a non-throwing new, which means that it returns
+/// null on error. (If that is what the allocator does. The current does, so if
+/// this ever changes, this operator will have to be changed, too.)
+/// Usage looks like this (assuming there's an MCContext 'Context' in scope):
+/// \code
+/// // Default alignment (8)
+/// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments);
+/// // Specific alignment
+/// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments);
+/// \endcode
+/// Please note that you cannot use delete on the pointer; it must be
+/// deallocated using an explicit destructor call followed by
+/// \c Context.Deallocate(Ptr).
+///
+/// \param Bytes The number of bytes to allocate. Calculated by the compiler.
+/// \param C The MCContext that provides the allocator.
+/// \param Alignment The alignment of the allocated memory (if the underlying
+///                  allocator supports it).
+/// \return The allocated memory. Could be NULL.
+inline void *operator new(size_t Bytes, llvm::MCContext &C,
+                          size_t Alignment = 8) noexcept {
+  return C.allocate(Bytes, Alignment);
+}
+/// \brief Placement delete companion to the new above.
+///
+/// This operator is just a companion to the new above. There is no way of
+/// invoking it directly; see the new operator for more details. This operator
+/// is called implicitly by the compiler if a placement new expression using
+/// the MCContext throws in the object constructor.
+inline void operator delete(void *Ptr, llvm::MCContext &C, size_t) noexcept {
+  C.deallocate(Ptr);
+}
+
+/// This placement form of operator new[] uses the MCContext's allocator for
+/// obtaining memory. It is a non-throwing new[], which means that it returns
+/// null on error.
+/// Usage looks like this (assuming there's an MCContext 'Context' in scope):
+/// \code
+/// // Default alignment (8)
+/// char *data = new (Context) char[10];
+/// // Specific alignment
+/// char *data = new (Context, 4) char[10];
+/// \endcode
+/// Please note that you cannot use delete on the pointer; it must be
+/// deallocated using an explicit destructor call followed by
+/// \c Context.Deallocate(Ptr).
+///
+/// \param Bytes The number of bytes to allocate. Calculated by the compiler.
+/// \param C The MCContext that provides the allocator.
+/// \param Alignment The alignment of the allocated memory (if the underlying
+///                  allocator supports it).
+/// \return The allocated memory. Could be NULL.
+inline void *operator new[](size_t Bytes, llvm::MCContext &C,
+                            size_t Alignment = 8) noexcept {
+  return C.allocate(Bytes, Alignment);
+}
+
+/// \brief Placement delete[] companion to the new[] above.
+///
+/// This operator is just a companion to the new[] above. There is no way of
+/// invoking it directly; see the new[] operator for more details. This operator
+/// is called implicitly by the compiler if a placement new[] expression using
+/// the MCContext throws in the object constructor.
+inline void operator delete[](void *Ptr, llvm::MCContext &C) noexcept {
+  C.deallocate(Ptr);
+}
+
+#endif // LLVM_MC_MCCONTEXT_H
diff --git a/linux-x64/clang/include/llvm/MC/MCDirectives.h b/linux-x64/clang/include/llvm/MC/MCDirectives.h
new file mode 100644
index 0000000..8c74b16
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCDirectives.h
@@ -0,0 +1,73 @@
+//===- MCDirectives.h - Enums for directives on various targets -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various enums that represent target-specific directives.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDIRECTIVES_H
+#define LLVM_MC_MCDIRECTIVES_H
+
+namespace llvm {
+
+enum MCSymbolAttr {
+  MCSA_Invalid = 0,    ///< Not a valid directive.
+
+  // Various directives in alphabetical order.
+  MCSA_ELF_TypeFunction,    ///< .type _foo, STT_FUNC  # aka @function
+  MCSA_ELF_TypeIndFunction, ///< .type _foo, STT_GNU_IFUNC
+  MCSA_ELF_TypeObject,      ///< .type _foo, STT_OBJECT  # aka @object
+  MCSA_ELF_TypeTLS,         ///< .type _foo, STT_TLS     # aka @tls_object
+  MCSA_ELF_TypeCommon,      ///< .type _foo, STT_COMMON  # aka @common
+  MCSA_ELF_TypeNoType,      ///< .type _foo, STT_NOTYPE  # aka @notype
+  MCSA_ELF_TypeGnuUniqueObject, /// .type _foo, @gnu_unique_object
+  MCSA_Global,              ///< .globl
+  MCSA_Hidden,              ///< .hidden (ELF)
+  MCSA_IndirectSymbol,      ///< .indirect_symbol (MachO)
+  MCSA_Internal,            ///< .internal (ELF)
+  MCSA_LazyReference,       ///< .lazy_reference (MachO)
+  MCSA_Local,               ///< .local (ELF)
+  MCSA_NoDeadStrip,         ///< .no_dead_strip (MachO)
+  MCSA_SymbolResolver,      ///< .symbol_resolver (MachO)
+  MCSA_AltEntry,            ///< .alt_entry (MachO)
+  MCSA_PrivateExtern,       ///< .private_extern (MachO)
+  MCSA_Protected,           ///< .protected (ELF)
+  MCSA_Reference,           ///< .reference (MachO)
+  MCSA_Weak,                ///< .weak
+  MCSA_WeakDefinition,      ///< .weak_definition (MachO)
+  MCSA_WeakReference,       ///< .weak_reference (MachO)
+  MCSA_WeakDefAutoPrivate   ///< .weak_def_can_be_hidden (MachO)
+};
+
+enum MCAssemblerFlag {
+  MCAF_SyntaxUnified,         ///< .syntax (ARM/ELF)
+  MCAF_SubsectionsViaSymbols, ///< .subsections_via_symbols (MachO)
+  MCAF_Code16,                ///< .code16 (X86) / .code 16 (ARM)
+  MCAF_Code32,                ///< .code32 (X86) / .code 32 (ARM)
+  MCAF_Code64                 ///< .code64 (X86)
+};
+
+enum MCDataRegionType {
+  MCDR_DataRegion,            ///< .data_region
+  MCDR_DataRegionJT8,         ///< .data_region jt8
+  MCDR_DataRegionJT16,        ///< .data_region jt16
+  MCDR_DataRegionJT32,        ///< .data_region jt32
+  MCDR_DataRegionEnd          ///< .end_data_region
+};
+
+enum MCVersionMinType {
+  MCVM_IOSVersionMin,         ///< .ios_version_min
+  MCVM_OSXVersionMin,         ///< .macosx_version_min
+  MCVM_TvOSVersionMin,        ///< .tvos_version_min
+  MCVM_WatchOSVersionMin,     ///< .watchos_version_min
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCDisassembler/MCDisassembler.h b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCDisassembler.h
new file mode 100644
index 0000000..7f09c05
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCDisassembler.h
@@ -0,0 +1,115 @@
+//===- llvm/MC/MCDisassembler.h - Disassembler interface --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDISASSEMBLER_MCDISASSEMBLER_H
+#define LLVM_MC_MCDISASSEMBLER_MCDISASSEMBLER_H
+
+#include "llvm/MC/MCDisassembler/MCSymbolizer.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class MCContext;
+class MCInst;
+class MCSubtargetInfo;
+class raw_ostream;
+
+/// Superclass for all disassemblers. Consumes a memory region and provides an
+/// array of assembly instructions.
+class MCDisassembler {
+public:
+  /// Ternary decode status. Most backends will just use Fail and
+  /// Success, however some have a concept of an instruction with
+  /// understandable semantics but which is architecturally
+  /// incorrect. An example of this is ARM UNPREDICTABLE instructions
+  /// which are disassemblable but cause undefined behaviour.
+  ///
+  /// Because it makes sense to disassemble these instructions, there
+  /// is a "soft fail" failure mode that indicates the MCInst& is
+  /// valid but architecturally incorrect.
+  ///
+  /// The enum numbers are deliberately chosen such that reduction
+  /// from Success->SoftFail ->Fail can be done with a simple
+  /// bitwise-AND:
+  ///
+  ///   LEFT & TOP =  | Success       Unpredictable   Fail
+  ///   --------------+-----------------------------------
+  ///   Success       | Success       Unpredictable   Fail
+  ///   Unpredictable | Unpredictable Unpredictable   Fail
+  ///   Fail          | Fail          Fail            Fail
+  ///
+  /// An easy way of encoding this is as 0b11, 0b01, 0b00 for
+  /// Success, SoftFail, Fail respectively.
+  enum DecodeStatus {
+    Fail = 0,
+    SoftFail = 1,
+    Success = 3
+  };
+
+  MCDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+    : Ctx(Ctx), STI(STI) {}
+
+  virtual ~MCDisassembler();
+
+  /// Returns the disassembly of a single instruction.
+  ///
+  /// \param Instr    - An MCInst to populate with the contents of the
+  ///                   instruction.
+  /// \param Size     - A value to populate with the size of the instruction, or
+  ///                   the number of bytes consumed while attempting to decode
+  ///                   an invalid instruction.
+  /// \param Address  - The address, in the memory space of region, of the first
+  ///                   byte of the instruction.
+  /// \param Bytes    - A reference to the actual bytes of the instruction.
+  /// \param VStream  - The stream to print warnings and diagnostic messages on.
+  /// \param CStream  - The stream to print comments and annotations on.
+  /// \return         - MCDisassembler::Success if the instruction is valid,
+  ///                   MCDisassembler::SoftFail if the instruction was
+  ///                                            disassemblable but invalid,
+  ///                   MCDisassembler::Fail if the instruction was invalid.
+  virtual DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
+                                      ArrayRef<uint8_t> Bytes, uint64_t Address,
+                                      raw_ostream &VStream,
+                                      raw_ostream &CStream) const = 0;
+
+private:
+  MCContext &Ctx;
+
+protected:
+  // Subtarget information, for instruction decoding predicates if required.
+  const MCSubtargetInfo &STI;
+  std::unique_ptr<MCSymbolizer> Symbolizer;
+
+public:
+  // Helpers around MCSymbolizer
+  bool tryAddingSymbolicOperand(MCInst &Inst,
+                                int64_t Value,
+                                uint64_t Address, bool IsBranch,
+                                uint64_t Offset, uint64_t InstSize) const;
+
+  void tryAddingPcLoadReferenceComment(int64_t Value, uint64_t Address) const;
+
+  /// Set \p Symzer as the current symbolizer.
+  /// This takes ownership of \p Symzer, and deletes the previously set one.
+  void setSymbolizer(std::unique_ptr<MCSymbolizer> Symzer);
+
+  MCContext& getContext() const { return Ctx; }
+
+  const MCSubtargetInfo& getSubtargetInfo() const { return STI; }
+
+  // Marked mutable because we cache it inside the disassembler, rather than
+  // having to pass it around as an argument through all the autogenerated code.
+  mutable raw_ostream *CommentStream = nullptr;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCDISASSEMBLER_MCDISASSEMBLER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h
new file mode 100644
index 0000000..bd3e5d4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h
@@ -0,0 +1,58 @@
+//===-- llvm/MC/MCExternalSymbolizer.h - ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCExternalSymbolizer class, which
+// enables library users to provide callbacks (through the C API) to do the
+// symbolization externally.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDISASSEMBLER_MCEXTERNALSYMBOLIZER_H
+#define LLVM_MC_MCDISASSEMBLER_MCEXTERNALSYMBOLIZER_H
+
+#include "llvm-c/Disassembler.h"
+#include "llvm/MC/MCDisassembler/MCSymbolizer.h"
+#include <memory>
+
+namespace llvm {
+
+/// \brief Symbolize using user-provided, C API, callbacks.
+///
+/// See llvm-c/Disassembler.h.
+class MCExternalSymbolizer : public MCSymbolizer {
+protected:
+  /// \name Hooks for symbolic disassembly via the public 'C' interface.
+  /// @{
+  /// The function to get the symbolic information for operands.
+  LLVMOpInfoCallback GetOpInfo;
+  /// The function to lookup a symbol name.
+  LLVMSymbolLookupCallback SymbolLookUp;
+  /// The pointer to the block of symbolic information for above call back.
+  void *DisInfo;
+  /// @}
+
+public:
+  MCExternalSymbolizer(MCContext &Ctx,
+                       std::unique_ptr<MCRelocationInfo> RelInfo,
+                       LLVMOpInfoCallback getOpInfo,
+                       LLVMSymbolLookupCallback symbolLookUp, void *disInfo)
+    : MCSymbolizer(Ctx, std::move(RelInfo)), GetOpInfo(getOpInfo),
+      SymbolLookUp(symbolLookUp), DisInfo(disInfo) {}
+
+  bool tryAddingSymbolicOperand(MCInst &MI, raw_ostream &CommentStream,
+                                int64_t Value, uint64_t Address, bool IsBranch,
+                                uint64_t Offset, uint64_t InstSize) override;
+  void tryAddingPcLoadReferenceComment(raw_ostream &CommentStream,
+                                       int64_t Value,
+                                       uint64_t Address) override;
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCDisassembler/MCRelocationInfo.h b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCRelocationInfo.h
new file mode 100644
index 0000000..7836e88
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCRelocationInfo.h
@@ -0,0 +1,45 @@
+//===- llvm/MC/MCRelocationInfo.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCRelocationInfo class, which provides methods to
+// create MCExprs from relocations, either found in an object::ObjectFile
+// (object::RelocationRef), or provided through the C API.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDISASSEMBLER_MCRELOCATIONINFO_H
+#define LLVM_MC_MCDISASSEMBLER_MCRELOCATIONINFO_H
+
+namespace llvm {
+
+class MCContext;
+class MCExpr;
+
+/// \brief Create MCExprs from relocations found in an object file.
+class MCRelocationInfo {
+protected:
+  MCContext &Ctx;
+
+public:
+  MCRelocationInfo(MCContext &Ctx);
+  MCRelocationInfo(const MCRelocationInfo &) = delete;
+  MCRelocationInfo &operator=(const MCRelocationInfo &) = delete;
+  virtual ~MCRelocationInfo();
+
+  /// \brief Create an MCExpr for the target-specific \p VariantKind.
+  /// The VariantKinds are defined in llvm-c/Disassembler.h.
+  /// Used by MCExternalSymbolizer.
+  /// \returns If possible, an MCExpr corresponding to VariantKind, else 0.
+  virtual const MCExpr *createExprForCAPIVariantKind(const MCExpr *SubExpr,
+                                                     unsigned VariantKind);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCDISASSEMBLER_MCRELOCATIONINFO_H
diff --git a/linux-x64/clang/include/llvm/MC/MCDisassembler/MCSymbolizer.h b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCSymbolizer.h
new file mode 100644
index 0000000..d85cf5e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCDisassembler/MCSymbolizer.h
@@ -0,0 +1,83 @@
+//===- llvm/MC/MCSymbolizer.h - MCSymbolizer class --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCSymbolizer class, which is used
+// to symbolize instructions decoded from an object, that is, transform their
+// immediate operands to MCExprs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDISASSEMBLER_MCSYMBOLIZER_H
+#define LLVM_MC_MCDISASSEMBLER_MCSYMBOLIZER_H
+
+#include "llvm/MC/MCDisassembler/MCRelocationInfo.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class MCContext;
+class MCInst;
+class raw_ostream;
+
+/// \brief Symbolize and annotate disassembled instructions.
+///
+/// For now this mimics the old symbolization logic (from both ARM and x86), that
+/// relied on user-provided (C API) callbacks to do the actual symbol lookup in
+/// the object file. This was moved to MCExternalSymbolizer.
+/// A better API would not rely on actually calling the two methods here from
+/// inside each disassembler, but would use the instr info to determine what
+/// operands are actually symbolizable, and in what way. I don't think this
+/// information exists right now.
+class MCSymbolizer {
+protected:
+  MCContext &Ctx;
+  std::unique_ptr<MCRelocationInfo> RelInfo;
+
+public:
+  /// \brief Construct an MCSymbolizer, taking ownership of \p RelInfo.
+  MCSymbolizer(MCContext &Ctx, std::unique_ptr<MCRelocationInfo> RelInfo)
+    : Ctx(Ctx), RelInfo(std::move(RelInfo)) {
+  }
+
+  MCSymbolizer(const MCSymbolizer &) = delete;
+  MCSymbolizer &operator=(const MCSymbolizer &) = delete;
+  virtual ~MCSymbolizer();
+
+  /// \brief Try to add a symbolic operand instead of \p Value to the MCInst.
+  ///
+  /// Instead of having a difficult to read immediate, a symbolic operand would
+  /// represent this immediate in a more understandable way, for instance as a
+  /// symbol or an offset from a symbol. Relocations can also be used to enrich
+  /// the symbolic expression.
+  /// \param Inst      - The MCInst where to insert the symbolic operand.
+  /// \param cStream   - Stream to print comments and annotations on.
+  /// \param Value     - Operand value, pc-adjusted by the caller if necessary.
+  /// \param Address   - Load address of the instruction.
+  /// \param IsBranch  - Is the instruction a branch?
+  /// \param Offset    - Byte offset of the operand inside the inst.
+  /// \param InstSize  - Size of the instruction in bytes.
+  /// \return Whether a symbolic operand was added.
+  virtual bool tryAddingSymbolicOperand(MCInst &Inst, raw_ostream &cStream,
+                                        int64_t Value, uint64_t Address,
+                                        bool IsBranch, uint64_t Offset,
+                                        uint64_t InstSize) = 0;
+
+  /// \brief Try to add a comment on the PC-relative load.
+  /// For instance, in Mach-O, this is used to add annotations to instructions
+  /// that use C string literals, as found in __cstring.
+  virtual void tryAddingPcLoadReferenceComment(raw_ostream &cStream,
+                                               int64_t Value,
+                                               uint64_t Address) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCDISASSEMBLER_MCSYMBOLIZER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCDwarf.h b/linux-x64/clang/include/llvm/MC/MCDwarf.h
new file mode 100644
index 0000000..5cdb176
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCDwarf.h
@@ -0,0 +1,578 @@
+//===- MCDwarf.h - Machine Code Dwarf support -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCDwarfFile to support the dwarf
+// .file directive and the .loc directive.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDWARF_H
+#define LLVM_MC_MCDWARF_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MD5.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class MCAsmBackend;
+class MCContext;
+class MCDwarfLineStr;
+class MCObjectStreamer;
+class MCStreamer;
+class MCSymbol;
+class raw_ostream;
+class SMLoc;
+class SourceMgr;
+
+/// \brief Instances of this class represent the name of the dwarf
+/// .file directive and its associated dwarf file number in the MC file,
+/// and MCDwarfFile's are created and uniqued by the MCContext class where
+/// the file number for each is its index into the vector of DwarfFiles (note
+/// index 0 is not used and not a valid dwarf file number).
+struct MCDwarfFile {
+  // \brief The base name of the file without its directory path.
+  std::string Name;
+
+  // \brief The index into the list of directory names for this file name.
+  unsigned DirIndex;
+
+  /// The MD5 checksum, if there is one. Non-owning pointer to data allocated
+  /// in MCContext.
+  MD5::MD5Result *Checksum = nullptr;
+
+  /// The source code of the file. Non-owning reference to data allocated in
+  /// MCContext.
+  Optional<StringRef> Source;
+};
+
+/// \brief Instances of this class represent the information from a
+/// dwarf .loc directive.
+class MCDwarfLoc {
+  uint32_t FileNum;
+  uint32_t Line;
+  uint16_t Column;
+  // Flags (see #define's below)
+  uint8_t Flags;
+  uint8_t Isa;
+  uint32_t Discriminator;
+
+// Flag that indicates the initial value of the is_stmt_start flag.
+#define DWARF2_LINE_DEFAULT_IS_STMT 1
+
+#define DWARF2_FLAG_IS_STMT (1 << 0)
+#define DWARF2_FLAG_BASIC_BLOCK (1 << 1)
+#define DWARF2_FLAG_PROLOGUE_END (1 << 2)
+#define DWARF2_FLAG_EPILOGUE_BEGIN (1 << 3)
+
+private: // MCContext manages these
+  friend class MCContext;
+  friend class MCDwarfLineEntry;
+
+  MCDwarfLoc(unsigned fileNum, unsigned line, unsigned column, unsigned flags,
+             unsigned isa, unsigned discriminator)
+      : FileNum(fileNum), Line(line), Column(column), Flags(flags), Isa(isa),
+        Discriminator(discriminator) {}
+
+  // Allow the default copy constructor and assignment operator to be used
+  // for an MCDwarfLoc object.
+
+public:
+  /// \brief Get the FileNum of this MCDwarfLoc.
+  unsigned getFileNum() const { return FileNum; }
+
+  /// \brief Get the Line of this MCDwarfLoc.
+  unsigned getLine() const { return Line; }
+
+  /// \brief Get the Column of this MCDwarfLoc.
+  unsigned getColumn() const { return Column; }
+
+  /// \brief Get the Flags of this MCDwarfLoc.
+  unsigned getFlags() const { return Flags; }
+
+  /// \brief Get the Isa of this MCDwarfLoc.
+  unsigned getIsa() const { return Isa; }
+
+  /// \brief Get the Discriminator of this MCDwarfLoc.
+  unsigned getDiscriminator() const { return Discriminator; }
+
+  /// \brief Set the FileNum of this MCDwarfLoc.
+  void setFileNum(unsigned fileNum) { FileNum = fileNum; }
+
+  /// \brief Set the Line of this MCDwarfLoc.
+  void setLine(unsigned line) { Line = line; }
+
+  /// \brief Set the Column of this MCDwarfLoc.
+  void setColumn(unsigned column) {
+    assert(column <= UINT16_MAX);
+    Column = column;
+  }
+
+  /// \brief Set the Flags of this MCDwarfLoc.
+  void setFlags(unsigned flags) {
+    assert(flags <= UINT8_MAX);
+    Flags = flags;
+  }
+
+  /// \brief Set the Isa of this MCDwarfLoc.
+  void setIsa(unsigned isa) {
+    assert(isa <= UINT8_MAX);
+    Isa = isa;
+  }
+
+  /// \brief Set the Discriminator of this MCDwarfLoc.
+  void setDiscriminator(unsigned discriminator) {
+    Discriminator = discriminator;
+  }
+};
+
+/// \brief Instances of this class represent the line information for
+/// the dwarf line table entries.  Which is created after a machine
+/// instruction is assembled and uses an address from a temporary label
+/// created at the current address in the current section and the info from
+/// the last .loc directive seen as stored in the context.
+class MCDwarfLineEntry : public MCDwarfLoc {
+  MCSymbol *Label;
+
+private:
+  // Allow the default copy constructor and assignment operator to be used
+  // for an MCDwarfLineEntry object.
+
+public:
+  // Constructor to create an MCDwarfLineEntry given a symbol and the dwarf loc.
+  MCDwarfLineEntry(MCSymbol *label, const MCDwarfLoc loc)
+      : MCDwarfLoc(loc), Label(label) {}
+
+  MCSymbol *getLabel() const { return Label; }
+
+  // This is called when an instruction is assembled into the specified
+  // section and if there is information from the last .loc directive that
+  // has yet to have a line entry made for it is made.
+  static void Make(MCObjectStreamer *MCOS, MCSection *Section);
+};
+
+/// \brief Instances of this class represent the line information for a compile
+/// unit where machine instructions have been assembled after seeing .loc
+/// directives.  This is the information used to build the dwarf line
+/// table for a section.
+class MCLineSection {
+public:
+  // \brief Add an entry to this MCLineSection's line entries.
+  void addLineEntry(const MCDwarfLineEntry &LineEntry, MCSection *Sec) {
+    MCLineDivisions[Sec].push_back(LineEntry);
+  }
+
+  using MCDwarfLineEntryCollection = std::vector<MCDwarfLineEntry>;
+  using iterator = MCDwarfLineEntryCollection::iterator;
+  using const_iterator = MCDwarfLineEntryCollection::const_iterator;
+  using MCLineDivisionMap = MapVector<MCSection *, MCDwarfLineEntryCollection>;
+
+private:
+  // A collection of MCDwarfLineEntry for each section.
+  MCLineDivisionMap MCLineDivisions;
+
+public:
+  // Returns the collection of MCDwarfLineEntry for a given Compile Unit ID.
+  const MCLineDivisionMap &getMCLineEntries() const {
+    return MCLineDivisions;
+  }
+};
+
+struct MCDwarfLineTableParams {
+  /// First special line opcode - leave room for the standard opcodes.
+  /// Note: If you want to change this, you'll have to update the
+  /// "StandardOpcodeLengths" table that is emitted in
+  /// \c Emit().
+  uint8_t DWARF2LineOpcodeBase = 13;
+  /// Minimum line offset in a special line info. opcode.  The value
+  /// -5 was chosen to give a reasonable range of values.
+  int8_t DWARF2LineBase = -5;
+  /// Range of line offsets in a special line info. opcode.
+  uint8_t DWARF2LineRange = 14;
+};
+
+struct MCDwarfLineTableHeader {
+  MCSymbol *Label = nullptr;
+  SmallVector<std::string, 3> MCDwarfDirs;
+  SmallVector<MCDwarfFile, 3> MCDwarfFiles;
+  StringMap<unsigned> SourceIdMap;
+  StringRef CompilationDir;
+  MCDwarfFile RootFile;
+  bool HasMD5 = false;
+  bool HasSource = false;
+
+  MCDwarfLineTableHeader() = default;
+
+  Expected<unsigned> tryGetFile(StringRef &Directory, StringRef &FileName,
+                                MD5::MD5Result *Checksum,
+                                Optional<StringRef> &Source,
+                                unsigned FileNumber = 0);
+  std::pair<MCSymbol *, MCSymbol *>
+  Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
+       Optional<MCDwarfLineStr> &LineStr) const;
+  std::pair<MCSymbol *, MCSymbol *>
+  Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
+       ArrayRef<char> SpecialOpcodeLengths,
+       Optional<MCDwarfLineStr> &LineStr) const;
+
+private:
+  void emitV2FileDirTables(MCStreamer *MCOS) const;
+  void emitV5FileDirTables(MCStreamer *MCOS,
+                           Optional<MCDwarfLineStr> &LineStr) const;
+};
+
+class MCDwarfDwoLineTable {
+  MCDwarfLineTableHeader Header;
+
+public:
+  void maybeSetRootFile(StringRef Directory, StringRef FileName,
+                        MD5::MD5Result *Checksum, Optional<StringRef> Source) {
+    if (!Header.RootFile.Name.empty())
+      return;
+    Header.CompilationDir = Directory;
+    Header.RootFile.Name = FileName;
+    Header.RootFile.DirIndex = 0;
+    Header.RootFile.Checksum = Checksum;
+    Header.RootFile.Source = Source;
+    Header.HasMD5 = (Checksum != nullptr);
+    Header.HasSource = Source.hasValue();
+  }
+
+  unsigned getFile(StringRef Directory, StringRef FileName,
+                   MD5::MD5Result *Checksum, Optional<StringRef> Source) {
+    return cantFail(Header.tryGetFile(Directory, FileName, Checksum, Source));
+  }
+
+  void Emit(MCStreamer &MCOS, MCDwarfLineTableParams Params,
+            MCSection *Section) const;
+};
+
+class MCDwarfLineTable {
+  MCDwarfLineTableHeader Header;
+  MCLineSection MCLineSections;
+
+public:
+  // This emits the Dwarf file and the line tables for all Compile Units.
+  static void Emit(MCObjectStreamer *MCOS, MCDwarfLineTableParams Params);
+
+  // This emits the Dwarf file and the line tables for a given Compile Unit.
+  void EmitCU(MCObjectStreamer *MCOS, MCDwarfLineTableParams Params,
+              Optional<MCDwarfLineStr> &LineStr) const;
+
+  Expected<unsigned> tryGetFile(StringRef &Directory, StringRef &FileName,
+                                MD5::MD5Result *Checksum,
+                                Optional<StringRef> Source,
+                                unsigned FileNumber = 0);
+  unsigned getFile(StringRef &Directory, StringRef &FileName,
+                   MD5::MD5Result *Checksum, Optional<StringRef> &Source,
+                   unsigned FileNumber = 0) {
+    return cantFail(tryGetFile(Directory, FileName, Checksum, Source,
+                               FileNumber));
+  }
+
+  void setRootFile(StringRef Directory, StringRef FileName,
+                   MD5::MD5Result *Checksum, Optional<StringRef> Source) {
+    Header.CompilationDir = Directory;
+    Header.RootFile.Name = FileName;
+    Header.RootFile.DirIndex = 0;
+    Header.RootFile.Checksum = Checksum;
+    Header.RootFile.Source = Source;
+    Header.HasMD5 = (Checksum != nullptr);
+    Header.HasSource = Source.hasValue();
+  }
+
+  MCSymbol *getLabel() const {
+    return Header.Label;
+  }
+
+  void setLabel(MCSymbol *Label) {
+    Header.Label = Label;
+  }
+
+  const SmallVectorImpl<std::string> &getMCDwarfDirs() const {
+    return Header.MCDwarfDirs;
+  }
+
+  SmallVectorImpl<std::string> &getMCDwarfDirs() {
+    return Header.MCDwarfDirs;
+  }
+
+  const SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles() const {
+    return Header.MCDwarfFiles;
+  }
+
+  SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles() {
+    return Header.MCDwarfFiles;
+  }
+
+  const MCLineSection &getMCLineSections() const {
+    return MCLineSections;
+  }
+  MCLineSection &getMCLineSections() {
+    return MCLineSections;
+  }
+};
+
+class MCDwarfLineAddr {
+public:
+  /// Utility function to encode a Dwarf pair of LineDelta and AddrDeltas.
+  static void Encode(MCContext &Context, MCDwarfLineTableParams Params,
+                     int64_t LineDelta, uint64_t AddrDelta, raw_ostream &OS);
+
+  /// Utility function to emit the encoding to a streamer.
+  static void Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
+                   int64_t LineDelta, uint64_t AddrDelta);
+};
+
+class MCGenDwarfInfo {
+public:
+  //
+  // When generating dwarf for assembly source files this emits the Dwarf
+  // sections.
+  //
+  static void Emit(MCStreamer *MCOS);
+};
+
+// When generating dwarf for assembly source files this is the info that is
+// needed to be gathered for each symbol that will have a dwarf label.
+class MCGenDwarfLabelEntry {
+private:
+  // Name of the symbol without a leading underbar, if any.
+  StringRef Name;
+  // The dwarf file number this symbol is in.
+  unsigned FileNumber;
+  // The line number this symbol is at.
+  unsigned LineNumber;
+  // The low_pc for the dwarf label is taken from this symbol.
+  MCSymbol *Label;
+
+public:
+  MCGenDwarfLabelEntry(StringRef name, unsigned fileNumber, unsigned lineNumber,
+                       MCSymbol *label)
+      : Name(name), FileNumber(fileNumber), LineNumber(lineNumber),
+        Label(label) {}
+
+  StringRef getName() const { return Name; }
+  unsigned getFileNumber() const { return FileNumber; }
+  unsigned getLineNumber() const { return LineNumber; }
+  MCSymbol *getLabel() const { return Label; }
+
+  // This is called when label is created when we are generating dwarf for
+  // assembly source files.
+  static void Make(MCSymbol *Symbol, MCStreamer *MCOS, SourceMgr &SrcMgr,
+                   SMLoc &Loc);
+};
+
+class MCCFIInstruction {
+public:
+  enum OpType {
+    OpSameValue,
+    OpRememberState,
+    OpRestoreState,
+    OpOffset,
+    OpDefCfaRegister,
+    OpDefCfaOffset,
+    OpDefCfa,
+    OpRelOffset,
+    OpAdjustCfaOffset,
+    OpEscape,
+    OpRestore,
+    OpUndefined,
+    OpRegister,
+    OpWindowSave,
+    OpGnuArgsSize
+  };
+
+private:
+  OpType Operation;
+  MCSymbol *Label;
+  unsigned Register;
+  union {
+    int Offset;
+    unsigned Register2;
+  };
+  std::vector<char> Values;
+
+  MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R, int O, StringRef V)
+      : Operation(Op), Label(L), Register(R), Offset(O),
+        Values(V.begin(), V.end()) {
+    assert(Op != OpRegister);
+  }
+
+  MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R1, unsigned R2)
+      : Operation(Op), Label(L), Register(R1), Register2(R2) {
+    assert(Op == OpRegister);
+  }
+
+public:
+  /// \brief .cfi_def_cfa defines a rule for computing CFA as: take address from
+  /// Register and add Offset to it.
+  static MCCFIInstruction createDefCfa(MCSymbol *L, unsigned Register,
+                                       int Offset) {
+    return MCCFIInstruction(OpDefCfa, L, Register, -Offset, "");
+  }
+
+  /// \brief .cfi_def_cfa_register modifies a rule for computing CFA. From now
+  /// on Register will be used instead of the old one. Offset remains the same.
+  static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register) {
+    return MCCFIInstruction(OpDefCfaRegister, L, Register, 0, "");
+  }
+
+  /// \brief .cfi_def_cfa_offset modifies a rule for computing CFA. Register
+  /// remains the same, but offset is new. Note that it is the absolute offset
+  /// that will be added to a defined register to the compute CFA address.
+  static MCCFIInstruction createDefCfaOffset(MCSymbol *L, int Offset) {
+    return MCCFIInstruction(OpDefCfaOffset, L, 0, -Offset, "");
+  }
+
+  /// \brief .cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but
+  /// Offset is a relative value that is added/subtracted from the previous
+  /// offset.
+  static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int Adjustment) {
+    return MCCFIInstruction(OpAdjustCfaOffset, L, 0, Adjustment, "");
+  }
+
+  /// \brief .cfi_offset Previous value of Register is saved at offset Offset
+  /// from CFA.
+  static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register,
+                                       int Offset) {
+    return MCCFIInstruction(OpOffset, L, Register, Offset, "");
+  }
+
+  /// \brief .cfi_rel_offset Previous value of Register is saved at offset
+  /// Offset from the current CFA register. This is transformed to .cfi_offset
+  /// using the known displacement of the CFA register from the CFA.
+  static MCCFIInstruction createRelOffset(MCSymbol *L, unsigned Register,
+                                          int Offset) {
+    return MCCFIInstruction(OpRelOffset, L, Register, Offset, "");
+  }
+
+  /// \brief .cfi_register Previous value of Register1 is saved in
+  /// register Register2.
+  static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1,
+                                         unsigned Register2) {
+    return MCCFIInstruction(OpRegister, L, Register1, Register2);
+  }
+
+  /// \brief .cfi_window_save SPARC register window is saved.
+  static MCCFIInstruction createWindowSave(MCSymbol *L) {
+    return MCCFIInstruction(OpWindowSave, L, 0, 0, "");
+  }
+
+  /// \brief .cfi_restore says that the rule for Register is now the same as it
+  /// was at the beginning of the function, after all initial instructions added
+  /// by .cfi_startproc were executed.
+  static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register) {
+    return MCCFIInstruction(OpRestore, L, Register, 0, "");
+  }
+
+  /// \brief .cfi_undefined From now on the previous value of Register can't be
+  /// restored anymore.
+  static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register) {
+    return MCCFIInstruction(OpUndefined, L, Register, 0, "");
+  }
+
+  /// \brief .cfi_same_value Current value of Register is the same as in the
+  /// previous frame. I.e., no restoration is needed.
+  static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register) {
+    return MCCFIInstruction(OpSameValue, L, Register, 0, "");
+  }
+
+  /// \brief .cfi_remember_state Save all current rules for all registers.
+  static MCCFIInstruction createRememberState(MCSymbol *L) {
+    return MCCFIInstruction(OpRememberState, L, 0, 0, "");
+  }
+
+  /// \brief .cfi_restore_state Restore the previously saved state.
+  static MCCFIInstruction createRestoreState(MCSymbol *L) {
+    return MCCFIInstruction(OpRestoreState, L, 0, 0, "");
+  }
+
+  /// \brief .cfi_escape Allows the user to add arbitrary bytes to the unwind
+  /// info.
+  static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals) {
+    return MCCFIInstruction(OpEscape, L, 0, 0, Vals);
+  }
+
+  /// \brief A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE
+  static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int Size) {
+    return MCCFIInstruction(OpGnuArgsSize, L, 0, Size, "");
+  }
+
+  OpType getOperation() const { return Operation; }
+  MCSymbol *getLabel() const { return Label; }
+
+  unsigned getRegister() const {
+    assert(Operation == OpDefCfa || Operation == OpOffset ||
+           Operation == OpRestore || Operation == OpUndefined ||
+           Operation == OpSameValue || Operation == OpDefCfaRegister ||
+           Operation == OpRelOffset || Operation == OpRegister);
+    return Register;
+  }
+
+  unsigned getRegister2() const {
+    assert(Operation == OpRegister);
+    return Register2;
+  }
+
+  int getOffset() const {
+    assert(Operation == OpDefCfa || Operation == OpOffset ||
+           Operation == OpRelOffset || Operation == OpDefCfaOffset ||
+           Operation == OpAdjustCfaOffset || Operation == OpGnuArgsSize);
+    return Offset;
+  }
+
+  StringRef getValues() const {
+    assert(Operation == OpEscape);
+    return StringRef(&Values[0], Values.size());
+  }
+};
+
+struct MCDwarfFrameInfo {
+  MCDwarfFrameInfo() = default;
+
+  MCSymbol *Begin = nullptr;
+  MCSymbol *End = nullptr;
+  const MCSymbol *Personality = nullptr;
+  const MCSymbol *Lsda = nullptr;
+  std::vector<MCCFIInstruction> Instructions;
+  unsigned CurrentCfaRegister = 0;
+  unsigned PersonalityEncoding = 0;
+  unsigned LsdaEncoding = 0;
+  uint32_t CompactUnwindEncoding = 0;
+  bool IsSignalFrame = false;
+  bool IsSimple = false;
+  unsigned RAReg = static_cast<unsigned>(INT_MAX);
+};
+
+class MCDwarfFrameEmitter {
+public:
+  //
+  // This emits the frame info section.
+  //
+  static void Emit(MCObjectStreamer &streamer, MCAsmBackend *MAB, bool isEH);
+  static void EmitAdvanceLoc(MCObjectStreamer &Streamer, uint64_t AddrDelta);
+  static void EncodeAdvanceLoc(MCContext &Context, uint64_t AddrDelta,
+                               raw_ostream &OS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCDWARF_H
diff --git a/linux-x64/clang/include/llvm/MC/MCELFObjectWriter.h b/linux-x64/clang/include/llvm/MC/MCELFObjectWriter.h
new file mode 100644
index 0000000..fd8d118
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCELFObjectWriter.h
@@ -0,0 +1,146 @@
+//===- llvm/MC/MCELFObjectWriter.h - ELF Object Writer ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCELFOBJECTWRITER_H
+#define LLVM_MC_MCELFOBJECTWRITER_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class MCAssembler;
+class MCContext;
+class MCFixup;
+class MCObjectWriter;
+class MCSymbol;
+class MCSymbolELF;
+class MCValue;
+
+struct ELFRelocationEntry {
+  uint64_t Offset; // Where is the relocation.
+  const MCSymbolELF *Symbol; // The symbol to relocate with.
+  unsigned Type;   // The type of the relocation.
+  uint64_t Addend; // The addend to use.
+  const MCSymbolELF *OriginalSymbol; // The original value of Symbol if we changed it.
+  uint64_t OriginalAddend; // The original value of addend.
+
+  ELFRelocationEntry(uint64_t Offset, const MCSymbolELF *Symbol, unsigned Type,
+                     uint64_t Addend, const MCSymbolELF *OriginalSymbol,
+                     uint64_t OriginalAddend)
+      : Offset(Offset), Symbol(Symbol), Type(Type), Addend(Addend),
+        OriginalSymbol(OriginalSymbol), OriginalAddend(OriginalAddend) {}
+
+  void print(raw_ostream &Out) const {
+    Out << "Off=" << Offset << ", Sym=" << Symbol << ", Type=" << Type
+        << ", Addend=" << Addend << ", OriginalSymbol=" << OriginalSymbol
+        << ", OriginalAddend=" << OriginalAddend;
+  }
+
+  void dump() const { print(errs()); }
+};
+
+class MCELFObjectTargetWriter {
+  const uint8_t OSABI;
+  const uint16_t EMachine;
+  const unsigned HasRelocationAddend : 1;
+  const unsigned Is64Bit : 1;
+
+protected:
+  MCELFObjectTargetWriter(bool Is64Bit_, uint8_t OSABI_, uint16_t EMachine_,
+                          bool HasRelocationAddend);
+
+public:
+  virtual ~MCELFObjectTargetWriter() = default;
+
+  static uint8_t getOSABI(Triple::OSType OSType) {
+    switch (OSType) {
+      case Triple::CloudABI:
+        return ELF::ELFOSABI_CLOUDABI;
+      case Triple::PS4:
+      case Triple::FreeBSD:
+        return ELF::ELFOSABI_FREEBSD;
+      default:
+        return ELF::ELFOSABI_NONE;
+    }
+  }
+
+  virtual unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+                                const MCFixup &Fixup, bool IsPCRel) const = 0;
+
+  virtual bool needsRelocateWithSymbol(const MCSymbol &Sym,
+                                       unsigned Type) const;
+
+  virtual void sortRelocs(const MCAssembler &Asm,
+                          std::vector<ELFRelocationEntry> &Relocs);
+
+  /// \name Accessors
+  /// @{
+  uint8_t getOSABI() const { return OSABI; }
+  uint16_t getEMachine() const { return EMachine; }
+  bool hasRelocationAddend() const { return HasRelocationAddend; }
+  bool is64Bit() const { return Is64Bit; }
+  /// @}
+
+  // Instead of changing everyone's API we pack the N64 Type fields
+  // into the existing 32 bit data unsigned.
+#define R_TYPE_SHIFT 0
+#define R_TYPE_MASK 0xffffff00
+#define R_TYPE2_SHIFT 8
+#define R_TYPE2_MASK 0xffff00ff
+#define R_TYPE3_SHIFT 16
+#define R_TYPE3_MASK 0xff00ffff
+#define R_SSYM_SHIFT 24
+#define R_SSYM_MASK 0x00ffffff
+
+  // N64 relocation type accessors
+  uint8_t getRType(uint32_t Type) const {
+    return (unsigned)((Type >> R_TYPE_SHIFT) & 0xff);
+  }
+  uint8_t getRType2(uint32_t Type) const {
+    return (unsigned)((Type >> R_TYPE2_SHIFT) & 0xff);
+  }
+  uint8_t getRType3(uint32_t Type) const {
+    return (unsigned)((Type >> R_TYPE3_SHIFT) & 0xff);
+  }
+  uint8_t getRSsym(uint32_t Type) const {
+    return (unsigned)((Type >> R_SSYM_SHIFT) & 0xff);
+  }
+
+  // N64 relocation type setting
+  unsigned setRType(unsigned Value, unsigned Type) const {
+    return ((Type & R_TYPE_MASK) | ((Value & 0xff) << R_TYPE_SHIFT));
+  }
+  unsigned setRType2(unsigned Value, unsigned Type) const {
+    return (Type & R_TYPE2_MASK) | ((Value & 0xff) << R_TYPE2_SHIFT);
+  }
+  unsigned setRType3(unsigned Value, unsigned Type) const {
+    return (Type & R_TYPE3_MASK) | ((Value & 0xff) << R_TYPE3_SHIFT);
+  }
+  unsigned setRSsym(unsigned Value, unsigned Type) const {
+    return (Type & R_SSYM_MASK) | ((Value & 0xff) << R_SSYM_SHIFT);
+  }
+};
+
+/// \brief Construct a new ELF writer instance.
+///
+/// \param MOTW - The target specific ELF writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+std::unique_ptr<MCObjectWriter>
+createELFObjectWriter(std::unique_ptr<MCELFObjectTargetWriter> MOTW,
+                      raw_pwrite_stream &OS, bool IsLittleEndian);
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCELFOBJECTWRITER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCELFStreamer.h b/linux-x64/clang/include/llvm/MC/MCELFStreamer.h
new file mode 100644
index 0000000..2f23cd6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCELFStreamer.h
@@ -0,0 +1,102 @@
+//===- MCELFStreamer.h - MCStreamer ELF Object File Interface ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCELFSTREAMER_H
+#define LLVM_MC_MCELFSTREAMER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCObjectStreamer.h"
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCExpr;
+class MCInst;
+
+class MCELFStreamer : public MCObjectStreamer {
+public:
+  MCELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
+                raw_pwrite_stream &OS, std::unique_ptr<MCCodeEmitter> Emitter);
+
+  ~MCELFStreamer() override = default;
+
+  /// state management
+  void reset() override {
+    SeenIdent = false;
+    BundleGroups.clear();
+    MCObjectStreamer::reset();
+  }
+
+  /// \name MCStreamer Interface
+  /// @{
+
+  void InitSections(bool NoExecStack) override;
+  void ChangeSection(MCSection *Section, const MCExpr *Subsection) override;
+  void EmitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
+  void EmitLabel(MCSymbol *Symbol, SMLoc Loc, MCFragment *F) override;
+  void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+  void EmitThumbFunc(MCSymbol *Func) override;
+  void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
+  bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+  void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+  void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                        unsigned ByteAlignment) override;
+
+  void emitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
+  void emitELFSymverDirective(StringRef AliasName,
+                              const MCSymbol *Aliasee) override;
+
+  void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                             unsigned ByteAlignment) override;
+
+  void EmitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
+                    uint64_t Size = 0, unsigned ByteAlignment = 0) override;
+  void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+                      unsigned ByteAlignment = 0) override;
+  void EmitValueImpl(const MCExpr *Value, unsigned Size,
+                     SMLoc Loc = SMLoc()) override;
+
+  void EmitIdent(StringRef IdentString) override;
+
+  void EmitValueToAlignment(unsigned, int64_t, unsigned, unsigned) override;
+
+  void FinishImpl() override;
+
+  void EmitBundleAlignMode(unsigned AlignPow2) override;
+  void EmitBundleLock(bool AlignToEnd) override;
+  void EmitBundleUnlock() override;
+
+private:
+  bool isBundleLocked() const;
+  void EmitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &) override;
+  void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;
+
+  void fixSymbolsInTLSFixups(const MCExpr *expr);
+
+  /// \brief Merge the content of the fragment \p EF into the fragment \p DF.
+  void mergeFragment(MCDataFragment *, MCDataFragment *);
+
+  bool SeenIdent = false;
+
+  /// BundleGroups - The stack of fragments holding the bundle-locked
+  /// instructions.
+  SmallVector<MCDataFragment *, 4> BundleGroups;
+};
+
+MCELFStreamer *createARMELFStreamer(MCContext &Context,
+                                    std::unique_ptr<MCAsmBackend> TAB,
+                                    raw_pwrite_stream &OS,
+                                    std::unique_ptr<MCCodeEmitter> Emitter,
+                                    bool RelaxAll, bool IsThumb);
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCELFSTREAMER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCExpr.h b/linux-x64/clang/include/llvm/MC/MCExpr.h
new file mode 100644
index 0000000..fcbbe65
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCExpr.h
@@ -0,0 +1,595 @@
+//===- MCExpr.h - Assembly Level Expressions --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCEXPR_H
+#define LLVM_MC_MCEXPR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/SMLoc.h"
+#include <cstdint>
+
+namespace llvm {
+
+class MCAsmInfo;
+class MCAsmLayout;
+class MCAssembler;
+class MCContext;
+class MCFixup;
+class MCFragment;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+class MCValue;
+class raw_ostream;
+class StringRef;
+
+using SectionAddrMap = DenseMap<const MCSection *, uint64_t>;
+
+/// \brief Base class for the full range of assembler expressions which are
+/// needed for parsing.
+class MCExpr {
+public:
+  enum ExprKind {
+    Binary,    ///< Binary expressions.
+    Constant,  ///< Constant expressions.
+    SymbolRef, ///< References to labels and assigned expressions.
+    Unary,     ///< Unary expressions.
+    Target     ///< Target specific expression.
+  };
+
+private:
+  ExprKind Kind;
+  SMLoc Loc;
+
+  bool evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
+                          const MCAsmLayout *Layout,
+                          const SectionAddrMap *Addrs) const;
+
+  bool evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
+                          const MCAsmLayout *Layout,
+                          const SectionAddrMap *Addrs, bool InSet) const;
+
+protected:
+  explicit MCExpr(ExprKind Kind, SMLoc Loc) : Kind(Kind), Loc(Loc) {}
+
+  bool evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
+                                 const MCAsmLayout *Layout,
+                                 const MCFixup *Fixup,
+                                 const SectionAddrMap *Addrs, bool InSet) const;
+
+public:
+  MCExpr(const MCExpr &) = delete;
+  MCExpr &operator=(const MCExpr &) = delete;
+
+  /// \name Accessors
+  /// @{
+
+  ExprKind getKind() const { return Kind; }
+  SMLoc getLoc() const { return Loc; }
+
+  /// @}
+  /// \name Utility Methods
+  /// @{
+
+  void print(raw_ostream &OS, const MCAsmInfo *MAI,
+             bool InParens = false) const;
+  void dump() const;
+
+  /// @}
+  /// \name Expression Evaluation
+  /// @{
+
+  /// \brief Try to evaluate the expression to an absolute value.
+  ///
+  /// \param Res - The absolute value, if evaluation succeeds.
+  /// \param Layout - The assembler layout object to use for evaluating symbol
+  /// values. If not given, then only non-symbolic expressions will be
+  /// evaluated.
+  /// \return - True on success.
+  bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
+                          const SectionAddrMap &Addrs) const;
+  bool evaluateAsAbsolute(int64_t &Res) const;
+  bool evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const;
+  bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
+
+  bool evaluateKnownAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
+
+  /// \brief Try to evaluate the expression to a relocatable value, i.e. an
+  /// expression of the fixed form (a - b + constant).
+  ///
+  /// \param Res - The relocatable value, if evaluation succeeds.
+  /// \param Layout - The assembler layout object to use for evaluating values.
+  /// \param Fixup - The Fixup object if available.
+  /// \return - True on success.
+  bool evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout,
+                             const MCFixup *Fixup) const;
+
+  /// \brief Try to evaluate the expression to the form (a - b + constant) where
+  /// neither a nor b are variables.
+  ///
+  /// This is a more aggressive variant of evaluateAsRelocatable. The intended
+  /// use is for when relocations are not available, like the .size directive.
+  bool evaluateAsValue(MCValue &Res, const MCAsmLayout &Layout) const;
+
+  /// \brief Find the "associated section" for this expression, which is
+  /// currently defined as the absolute section for constants, or
+  /// otherwise the section associated with the first defined symbol in the
+  /// expression.
+  MCFragment *findAssociatedFragment() const;
+
+  /// @}
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) {
+  E.print(OS, nullptr);
+  return OS;
+}
+
+//// \brief  Represent a constant integer expression.
+class MCConstantExpr : public MCExpr {
+  int64_t Value;
+
+  explicit MCConstantExpr(int64_t Value)
+      : MCExpr(MCExpr::Constant, SMLoc()), Value(Value) {}
+
+public:
+  /// \name Construction
+  /// @{
+
+  static const MCConstantExpr *create(int64_t Value, MCContext &Ctx);
+
+  /// @}
+  /// \name Accessors
+  /// @{
+
+  int64_t getValue() const { return Value; }
+
+  /// @}
+
+  static bool classof(const MCExpr *E) {
+    return E->getKind() == MCExpr::Constant;
+  }
+};
+
+/// \brief  Represent a reference to a symbol from inside an expression.
+///
+/// A symbol reference in an expression may be a use of a label, a use of an
+/// assembler variable (defined constant), or constitute an implicit definition
+/// of the symbol as external.
+class MCSymbolRefExpr : public MCExpr {
+public:
+  enum VariantKind : uint16_t {
+    VK_None,
+    VK_Invalid,
+
+    VK_GOT,
+    VK_GOTOFF,
+    VK_GOTREL,
+    VK_GOTPCREL,
+    VK_GOTTPOFF,
+    VK_INDNTPOFF,
+    VK_NTPOFF,
+    VK_GOTNTPOFF,
+    VK_PLT,
+    VK_TLSGD,
+    VK_TLSLD,
+    VK_TLSLDM,
+    VK_TPOFF,
+    VK_DTPOFF,
+    VK_TLSCALL,   // symbol(tlscall)
+    VK_TLSDESC,   // symbol(tlsdesc)
+    VK_TLVP,      // Mach-O thread local variable relocations
+    VK_TLVPPAGE,
+    VK_TLVPPAGEOFF,
+    VK_PAGE,
+    VK_PAGEOFF,
+    VK_GOTPAGE,
+    VK_GOTPAGEOFF,
+    VK_SECREL,
+    VK_SIZE,      // symbol@SIZE
+    VK_WEAKREF,   // The link between the symbols in .weakref foo, bar
+
+    VK_X86_ABS8,
+
+    VK_ARM_NONE,
+    VK_ARM_GOT_PREL,
+    VK_ARM_TARGET1,
+    VK_ARM_TARGET2,
+    VK_ARM_PREL31,
+    VK_ARM_SBREL,          // symbol(sbrel)
+    VK_ARM_TLSLDO,         // symbol(tlsldo)
+    VK_ARM_TLSDESCSEQ,
+
+    VK_AVR_NONE,
+    VK_AVR_LO8,
+    VK_AVR_HI8,
+    VK_AVR_HLO8,
+    VK_AVR_DIFF8,
+    VK_AVR_DIFF16,
+    VK_AVR_DIFF32,
+
+    VK_PPC_LO,             // symbol@l
+    VK_PPC_HI,             // symbol@h
+    VK_PPC_HA,             // symbol@ha
+    VK_PPC_HIGHER,         // symbol@higher
+    VK_PPC_HIGHERA,        // symbol@highera
+    VK_PPC_HIGHEST,        // symbol@highest
+    VK_PPC_HIGHESTA,       // symbol@highesta
+    VK_PPC_GOT_LO,         // symbol@got@l
+    VK_PPC_GOT_HI,         // symbol@got@h
+    VK_PPC_GOT_HA,         // symbol@got@ha
+    VK_PPC_TOCBASE,        // symbol@tocbase
+    VK_PPC_TOC,            // symbol@toc
+    VK_PPC_TOC_LO,         // symbol@toc@l
+    VK_PPC_TOC_HI,         // symbol@toc@h
+    VK_PPC_TOC_HA,         // symbol@toc@ha
+    VK_PPC_DTPMOD,         // symbol@dtpmod
+    VK_PPC_TPREL_LO,       // symbol@tprel@l
+    VK_PPC_TPREL_HI,       // symbol@tprel@h
+    VK_PPC_TPREL_HA,       // symbol@tprel@ha
+    VK_PPC_TPREL_HIGHER,   // symbol@tprel@higher
+    VK_PPC_TPREL_HIGHERA,  // symbol@tprel@highera
+    VK_PPC_TPREL_HIGHEST,  // symbol@tprel@highest
+    VK_PPC_TPREL_HIGHESTA, // symbol@tprel@highesta
+    VK_PPC_DTPREL_LO,      // symbol@dtprel@l
+    VK_PPC_DTPREL_HI,      // symbol@dtprel@h
+    VK_PPC_DTPREL_HA,      // symbol@dtprel@ha
+    VK_PPC_DTPREL_HIGHER,  // symbol@dtprel@higher
+    VK_PPC_DTPREL_HIGHERA, // symbol@dtprel@highera
+    VK_PPC_DTPREL_HIGHEST, // symbol@dtprel@highest
+    VK_PPC_DTPREL_HIGHESTA,// symbol@dtprel@highesta
+    VK_PPC_GOT_TPREL,      // symbol@got@tprel
+    VK_PPC_GOT_TPREL_LO,   // symbol@got@tprel@l
+    VK_PPC_GOT_TPREL_HI,   // symbol@got@tprel@h
+    VK_PPC_GOT_TPREL_HA,   // symbol@got@tprel@ha
+    VK_PPC_GOT_DTPREL,     // symbol@got@dtprel
+    VK_PPC_GOT_DTPREL_LO,  // symbol@got@dtprel@l
+    VK_PPC_GOT_DTPREL_HI,  // symbol@got@dtprel@h
+    VK_PPC_GOT_DTPREL_HA,  // symbol@got@dtprel@ha
+    VK_PPC_TLS,            // symbol@tls
+    VK_PPC_GOT_TLSGD,      // symbol@got@tlsgd
+    VK_PPC_GOT_TLSGD_LO,   // symbol@got@tlsgd@l
+    VK_PPC_GOT_TLSGD_HI,   // symbol@got@tlsgd@h
+    VK_PPC_GOT_TLSGD_HA,   // symbol@got@tlsgd@ha
+    VK_PPC_TLSGD,          // symbol@tlsgd
+    VK_PPC_GOT_TLSLD,      // symbol@got@tlsld
+    VK_PPC_GOT_TLSLD_LO,   // symbol@got@tlsld@l
+    VK_PPC_GOT_TLSLD_HI,   // symbol@got@tlsld@h
+    VK_PPC_GOT_TLSLD_HA,   // symbol@got@tlsld@ha
+    VK_PPC_TLSLD,          // symbol@tlsld
+    VK_PPC_LOCAL,          // symbol@local
+
+    VK_COFF_IMGREL32, // symbol@imgrel (image-relative)
+
+    VK_Hexagon_PCREL,
+    VK_Hexagon_LO16,
+    VK_Hexagon_HI16,
+    VK_Hexagon_GPREL,
+    VK_Hexagon_GD_GOT,
+    VK_Hexagon_LD_GOT,
+    VK_Hexagon_GD_PLT,
+    VK_Hexagon_LD_PLT,
+    VK_Hexagon_IE,
+    VK_Hexagon_IE_GOT,
+
+    VK_WebAssembly_FUNCTION, // Function table index, rather than virtual addr
+    VK_WebAssembly_TYPEINDEX,// Type table index
+
+    VK_AMDGPU_GOTPCREL32_LO, // symbol@gotpcrel32@lo
+    VK_AMDGPU_GOTPCREL32_HI, // symbol@gotpcrel32@hi
+    VK_AMDGPU_REL32_LO,      // symbol@rel32@lo
+    VK_AMDGPU_REL32_HI,      // symbol@rel32@hi
+
+    VK_TPREL,
+    VK_DTPREL
+  };
+
+private:
+  /// The symbol reference modifier.
+  const VariantKind Kind;
+
+  /// Specifies how the variant kind should be printed.
+  const unsigned UseParensForSymbolVariant : 1;
+
+  // FIXME: Remove this bit.
+  const unsigned HasSubsectionsViaSymbols : 1;
+
+  /// The symbol being referenced.
+  const MCSymbol *Symbol;
+
+  explicit MCSymbolRefExpr(const MCSymbol *Symbol, VariantKind Kind,
+                           const MCAsmInfo *MAI, SMLoc Loc = SMLoc());
+
+public:
+  /// \name Construction
+  /// @{
+
+  static const MCSymbolRefExpr *create(const MCSymbol *Symbol, MCContext &Ctx) {
+    return MCSymbolRefExpr::create(Symbol, VK_None, Ctx);
+  }
+
+  static const MCSymbolRefExpr *create(const MCSymbol *Symbol, VariantKind Kind,
+                                       MCContext &Ctx, SMLoc Loc = SMLoc());
+  static const MCSymbolRefExpr *create(StringRef Name, VariantKind Kind,
+                                       MCContext &Ctx);
+
+  /// @}
+  /// \name Accessors
+  /// @{
+
+  const MCSymbol &getSymbol() const { return *Symbol; }
+
+  VariantKind getKind() const { return Kind; }
+
+  void printVariantKind(raw_ostream &OS) const;
+
+  bool hasSubsectionsViaSymbols() const { return HasSubsectionsViaSymbols; }
+
+  /// @}
+  /// \name Static Utility Functions
+  /// @{
+
+  static StringRef getVariantKindName(VariantKind Kind);
+
+  static VariantKind getVariantKindForName(StringRef Name);
+
+  /// @}
+
+  static bool classof(const MCExpr *E) {
+    return E->getKind() == MCExpr::SymbolRef;
+  }
+};
+
+/// \brief Unary assembler expressions.
+class MCUnaryExpr : public MCExpr {
+public:
+  enum Opcode {
+    LNot,  ///< Logical negation.
+    Minus, ///< Unary minus.
+    Not,   ///< Bitwise negation.
+    Plus   ///< Unary plus.
+  };
+
+private:
+  Opcode Op;
+  const MCExpr *Expr;
+
+  MCUnaryExpr(Opcode Op, const MCExpr *Expr, SMLoc Loc)
+      : MCExpr(MCExpr::Unary, Loc), Op(Op), Expr(Expr) {}
+
+public:
+  /// \name Construction
+  /// @{
+
+  static const MCUnaryExpr *create(Opcode Op, const MCExpr *Expr,
+                                   MCContext &Ctx, SMLoc Loc = SMLoc());
+
+  static const MCUnaryExpr *createLNot(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
+    return create(LNot, Expr, Ctx, Loc);
+  }
+
+  static const MCUnaryExpr *createMinus(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
+    return create(Minus, Expr, Ctx, Loc);
+  }
+
+  static const MCUnaryExpr *createNot(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
+    return create(Not, Expr, Ctx, Loc);
+  }
+
+  static const MCUnaryExpr *createPlus(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
+    return create(Plus, Expr, Ctx, Loc);
+  }
+
+  /// @}
+  /// \name Accessors
+  /// @{
+
+  /// \brief Get the kind of this unary expression.
+  Opcode getOpcode() const { return Op; }
+
+  /// \brief Get the child of this unary expression.
+  const MCExpr *getSubExpr() const { return Expr; }
+
+  /// @}
+
+  static bool classof(const MCExpr *E) {
+    return E->getKind() == MCExpr::Unary;
+  }
+};
+
+/// \brief Binary assembler expressions.
+class MCBinaryExpr : public MCExpr {
+public:
+  enum Opcode {
+    Add,  ///< Addition.
+    And,  ///< Bitwise and.
+    Div,  ///< Signed division.
+    EQ,   ///< Equality comparison.
+    GT,   ///< Signed greater than comparison (result is either 0 or some
+          ///< target-specific non-zero value)
+    GTE,  ///< Signed greater than or equal comparison (result is either 0 or
+          ///< some target-specific non-zero value).
+    LAnd, ///< Logical and.
+    LOr,  ///< Logical or.
+    LT,   ///< Signed less than comparison (result is either 0 or
+          ///< some target-specific non-zero value).
+    LTE,  ///< Signed less than or equal comparison (result is either 0 or
+          ///< some target-specific non-zero value).
+    Mod,  ///< Signed remainder.
+    Mul,  ///< Multiplication.
+    NE,   ///< Inequality comparison.
+    Or,   ///< Bitwise or.
+    Shl,  ///< Shift left.
+    AShr, ///< Arithmetic shift right.
+    LShr, ///< Logical shift right.
+    Sub,  ///< Subtraction.
+    Xor   ///< Bitwise exclusive or.
+  };
+
+private:
+  Opcode Op;
+  const MCExpr *LHS, *RHS;
+
+  MCBinaryExpr(Opcode Op, const MCExpr *LHS, const MCExpr *RHS,
+               SMLoc Loc = SMLoc())
+      : MCExpr(MCExpr::Binary, Loc), Op(Op), LHS(LHS), RHS(RHS) {}
+
+public:
+  /// \name Construction
+  /// @{
+
+  static const MCBinaryExpr *create(Opcode Op, const MCExpr *LHS,
+                                    const MCExpr *RHS, MCContext &Ctx,
+                                    SMLoc Loc = SMLoc());
+
+  static const MCBinaryExpr *createAdd(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Add, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createAnd(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(And, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createDiv(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Div, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createEQ(const MCExpr *LHS, const MCExpr *RHS,
+                                      MCContext &Ctx) {
+    return create(EQ, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createGT(const MCExpr *LHS, const MCExpr *RHS,
+                                      MCContext &Ctx) {
+    return create(GT, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createGTE(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(GTE, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createLAnd(const MCExpr *LHS, const MCExpr *RHS,
+                                        MCContext &Ctx) {
+    return create(LAnd, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createLOr(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(LOr, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createLT(const MCExpr *LHS, const MCExpr *RHS,
+                                      MCContext &Ctx) {
+    return create(LT, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createLTE(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(LTE, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createMod(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Mod, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createMul(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Mul, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createNE(const MCExpr *LHS, const MCExpr *RHS,
+                                      MCContext &Ctx) {
+    return create(NE, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createOr(const MCExpr *LHS, const MCExpr *RHS,
+                                      MCContext &Ctx) {
+    return create(Or, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createShl(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Shl, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createAShr(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(AShr, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createLShr(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(LShr, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createSub(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Sub, LHS, RHS, Ctx);
+  }
+
+  static const MCBinaryExpr *createXor(const MCExpr *LHS, const MCExpr *RHS,
+                                       MCContext &Ctx) {
+    return create(Xor, LHS, RHS, Ctx);
+  }
+
+  /// @}
+  /// \name Accessors
+  /// @{
+
+  /// \brief Get the kind of this binary expression.
+  Opcode getOpcode() const { return Op; }
+
+  /// \brief Get the left-hand side expression of the binary operator.
+  const MCExpr *getLHS() const { return LHS; }
+
+  /// \brief Get the right-hand side expression of the binary operator.
+  const MCExpr *getRHS() const { return RHS; }
+
+  /// @}
+
+  static bool classof(const MCExpr *E) {
+    return E->getKind() == MCExpr::Binary;
+  }
+};
+
+/// \brief This is an extension point for target-specific MCExpr subclasses to
+/// implement.
+///
+/// NOTE: All subclasses are required to have trivial destructors because
+/// MCExprs are bump pointer allocated and not destructed.
+class MCTargetExpr : public MCExpr {
+  virtual void anchor();
+
+protected:
+  MCTargetExpr() : MCExpr(Target, SMLoc()) {}
+  virtual ~MCTargetExpr() = default;
+
+public:
+  virtual void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const = 0;
+  virtual bool evaluateAsRelocatableImpl(MCValue &Res,
+                                         const MCAsmLayout *Layout,
+                                         const MCFixup *Fixup) const = 0;
+  virtual void visitUsedExpr(MCStreamer& Streamer) const = 0;
+  virtual MCFragment *findAssociatedFragment() const = 0;
+
+  virtual void fixELFSymbolsInTLSFixups(MCAssembler &) const = 0;
+
+  static bool classof(const MCExpr *E) {
+    return E->getKind() == MCExpr::Target;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCEXPR_H
diff --git a/linux-x64/clang/include/llvm/MC/MCFixedLenDisassembler.h b/linux-x64/clang/include/llvm/MC/MCFixedLenDisassembler.h
new file mode 100644
index 0000000..ad34d94
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCFixedLenDisassembler.h
@@ -0,0 +1,34 @@
+//===-- llvm/MC/MCFixedLenDisassembler.h - Decoder driver -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Fixed length disassembler decoder state machine driver.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCFIXEDLENDISASSEMBLER_H
+#define LLVM_MC_MCFIXEDLENDISASSEMBLER_H
+
+namespace llvm {
+
+namespace MCD {
+// Disassembler state machine opcodes.
+enum DecoderOps {
+  OPC_ExtractField = 1, // OPC_ExtractField(uint8_t Start, uint8_t Len)
+  OPC_FilterValue,      // OPC_FilterValue(uleb128 Val, uint16_t NumToSkip)
+  OPC_CheckField,       // OPC_CheckField(uint8_t Start, uint8_t Len,
+                        //                uleb128 Val, uint16_t NumToSkip)
+  OPC_CheckPredicate,   // OPC_CheckPredicate(uleb128 PIdx, uint16_t NumToSkip)
+  OPC_Decode,           // OPC_Decode(uleb128 Opcode, uleb128 DIdx)
+  OPC_TryDecode,        // OPC_TryDecode(uleb128 Opcode, uleb128 DIdx,
+                        //               uint16_t NumToSkip)
+  OPC_SoftFail,         // OPC_SoftFail(uleb128 PMask, uleb128 NMask)
+  OPC_Fail              // OPC_Fail()
+};
+
+} // namespace MCDecode
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCFixup.h b/linux-x64/clang/include/llvm/MC/MCFixup.h
new file mode 100644
index 0000000..b83086c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCFixup.h
@@ -0,0 +1,117 @@
+//===-- llvm/MC/MCFixup.h - Instruction Relocation and Patching -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFIXUP_H
+#define LLVM_MC_MCFIXUP_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SMLoc.h"
+#include <cassert>
+
+namespace llvm {
+class MCExpr;
+
+/// \brief Extensible enumeration to represent the type of a fixup.
+enum MCFixupKind {
+  FK_Data_1 = 0, ///< A one-byte fixup.
+  FK_Data_2,     ///< A two-byte fixup.
+  FK_Data_4,     ///< A four-byte fixup.
+  FK_Data_8,     ///< A eight-byte fixup.
+  FK_PCRel_1,    ///< A one-byte pc relative fixup.
+  FK_PCRel_2,    ///< A two-byte pc relative fixup.
+  FK_PCRel_4,    ///< A four-byte pc relative fixup.
+  FK_PCRel_8,    ///< A eight-byte pc relative fixup.
+  FK_GPRel_1,    ///< A one-byte gp relative fixup.
+  FK_GPRel_2,    ///< A two-byte gp relative fixup.
+  FK_GPRel_4,    ///< A four-byte gp relative fixup.
+  FK_GPRel_8,    ///< A eight-byte gp relative fixup.
+  FK_DTPRel_4,   ///< A four-byte dtp relative fixup.
+  FK_DTPRel_8,   ///< A eight-byte dtp relative fixup.
+  FK_TPRel_4,    ///< A four-byte tp relative fixup.
+  FK_TPRel_8,    ///< A eight-byte tp relative fixup.
+  FK_SecRel_1,   ///< A one-byte section relative fixup.
+  FK_SecRel_2,   ///< A two-byte section relative fixup.
+  FK_SecRel_4,   ///< A four-byte section relative fixup.
+  FK_SecRel_8,   ///< A eight-byte section relative fixup.
+
+  FirstTargetFixupKind = 128,
+
+  // Limit range of target fixups, in case we want to pack more efficiently
+  // later.
+  MaxTargetFixupKind = (1 << 8)
+};
+
+/// \brief Encode information on a single operation to perform on a byte
+/// sequence (e.g., an encoded instruction) which requires assemble- or run-
+/// time patching.
+///
+/// Fixups are used any time the target instruction encoder needs to represent
+/// some value in an instruction which is not yet concrete. The encoder will
+/// encode the instruction assuming the value is 0, and emit a fixup which
+/// communicates to the assembler backend how it should rewrite the encoded
+/// value.
+///
+/// During the process of relaxation, the assembler will apply fixups as
+/// symbolic values become concrete. When relaxation is complete, any remaining
+/// fixups become relocations in the object file (or errors, if the fixup cannot
+/// be encoded on the target).
+class MCFixup {
+  /// The value to put into the fixup location. The exact interpretation of the
+  /// expression is target dependent, usually it will be one of the operands to
+  /// an instruction or an assembler directive.
+  const MCExpr *Value;
+
+  /// The byte index of start of the relocation inside the MCFragment.
+  uint32_t Offset;
+
+  /// The target dependent kind of fixup item this is. The kind is used to
+  /// determine how the operand value should be encoded into the instruction.
+  unsigned Kind;
+
+  /// The source location which gave rise to the fixup, if any.
+  SMLoc Loc;
+public:
+  static MCFixup create(uint32_t Offset, const MCExpr *Value,
+                        MCFixupKind Kind, SMLoc Loc = SMLoc()) {
+    assert(unsigned(Kind) < MaxTargetFixupKind && "Kind out of range!");
+    MCFixup FI;
+    FI.Value = Value;
+    FI.Offset = Offset;
+    FI.Kind = unsigned(Kind);
+    FI.Loc = Loc;
+    return FI;
+  }
+
+  MCFixupKind getKind() const { return MCFixupKind(Kind); }
+
+  uint32_t getOffset() const { return Offset; }
+  void setOffset(uint32_t Value) { Offset = Value; }
+
+  const MCExpr *getValue() const { return Value; }
+
+  /// \brief Return the generic fixup kind for a value with the given size. It
+  /// is an error to pass an unsupported size.
+  static MCFixupKind getKindForSize(unsigned Size, bool isPCRel) {
+    switch (Size) {
+    default: llvm_unreachable("Invalid generic fixup size!");
+    case 1: return isPCRel ? FK_PCRel_1 : FK_Data_1;
+    case 2: return isPCRel ? FK_PCRel_2 : FK_Data_2;
+    case 4: return isPCRel ? FK_PCRel_4 : FK_Data_4;
+    case 8: return isPCRel ? FK_PCRel_8 : FK_Data_8;
+    }
+  }
+
+  SMLoc getLoc() const { return Loc; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCFixupKindInfo.h b/linux-x64/clang/include/llvm/MC/MCFixupKindInfo.h
new file mode 100644
index 0000000..58183bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCFixupKindInfo.h
@@ -0,0 +1,43 @@
+//===-- llvm/MC/MCFixupKindInfo.h - Fixup Descriptors -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFIXUPKINDINFO_H
+#define LLVM_MC_MCFIXUPKINDINFO_H
+
+namespace llvm {
+
+/// \brief Target independent information on a fixup kind.
+struct MCFixupKindInfo {
+  enum FixupKindFlags {
+    /// Is this fixup kind PCrelative? This is used by the assembler backend to
+    /// evaluate fixup values in a target independent manner when possible.
+    FKF_IsPCRel = (1 << 0),
+
+    /// Should this fixup kind force a 4-byte aligned effective PC value?
+    FKF_IsAlignedDownTo32Bits = (1 << 1)
+  };
+
+  /// A target specific name for the fixup kind. The names will be unique for
+  /// distinct kinds on any given target.
+  const char *Name;
+
+  /// The bit offset to write the relocation into.
+  unsigned TargetOffset;
+
+  /// The number of bits written by this fixup. The bits are assumed to be
+  /// contiguous.
+  unsigned TargetSize;
+
+  /// Flags describing additional information on this fixup kind.
+  unsigned Flags;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCFragment.h b/linux-x64/clang/include/llvm/MC/MCFragment.h
new file mode 100644
index 0000000..38c3655
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCFragment.h
@@ -0,0 +1,666 @@
+//===- MCFragment.h - Fragment type hierarchy -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFRAGMENT_H
+#define LLVM_MC_MCFRAGMENT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/SMLoc.h"
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class MCSection;
+class MCSubtargetInfo;
+class MCSymbol;
+
+class MCFragment : public ilist_node_with_parent<MCFragment, MCSection> {
+  friend class MCAsmLayout;
+
+public:
+  enum FragmentType : uint8_t {
+    FT_Align,
+    FT_Data,
+    FT_CompactEncodedInst,
+    FT_Fill,
+    FT_Relaxable,
+    FT_Org,
+    FT_Dwarf,
+    FT_DwarfFrame,
+    FT_LEB,
+    FT_Padding,
+    FT_SymbolId,
+    FT_CVInlineLines,
+    FT_CVDefRange,
+    FT_Dummy
+  };
+
+private:
+  FragmentType Kind;
+
+protected:
+  bool HasInstructions;
+
+private:
+  /// \brief Should this fragment be aligned to the end of a bundle?
+  bool AlignToBundleEnd;
+
+  uint8_t BundlePadding;
+
+  /// LayoutOrder - The layout order of this fragment.
+  unsigned LayoutOrder;
+
+  /// The data for the section this fragment is in.
+  MCSection *Parent;
+
+  /// Atom - The atom this fragment is in, as represented by it's defining
+  /// symbol.
+  const MCSymbol *Atom;
+
+  /// \name Assembler Backend Data
+  /// @{
+  //
+  // FIXME: This could all be kept private to the assembler implementation.
+
+  /// Offset - The offset of this fragment in its section. This is ~0 until
+  /// initialized.
+  uint64_t Offset;
+
+  /// @}
+
+protected:
+  MCFragment(FragmentType Kind, bool HasInstructions,
+             uint8_t BundlePadding, MCSection *Parent = nullptr);
+
+  ~MCFragment();
+
+public:
+  MCFragment() = delete;
+  MCFragment(const MCFragment &) = delete;
+  MCFragment &operator=(const MCFragment &) = delete;
+
+  /// Destroys the current fragment.
+  ///
+  /// This must be used instead of delete as MCFragment is non-virtual.
+  /// This method will dispatch to the appropriate subclass.
+  void destroy();
+
+  FragmentType getKind() const { return Kind; }
+
+  MCSection *getParent() const { return Parent; }
+  void setParent(MCSection *Value) { Parent = Value; }
+
+  const MCSymbol *getAtom() const { return Atom; }
+  void setAtom(const MCSymbol *Value) { Atom = Value; }
+
+  unsigned getLayoutOrder() const { return LayoutOrder; }
+  void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
+
+  /// \brief Does this fragment have instructions emitted into it? By default
+  /// this is false, but specific fragment types may set it to true.
+  bool hasInstructions() const { return HasInstructions; }
+
+  /// \brief Should this fragment be placed at the end of an aligned bundle?
+  bool alignToBundleEnd() const { return AlignToBundleEnd; }
+  void setAlignToBundleEnd(bool V) { AlignToBundleEnd = V; }
+
+  /// \brief Get the padding size that must be inserted before this fragment.
+  /// Used for bundling. By default, no padding is inserted.
+  /// Note that padding size is restricted to 8 bits. This is an optimization
+  /// to reduce the amount of space used for each fragment. In practice, larger
+  /// padding should never be required.
+  uint8_t getBundlePadding() const { return BundlePadding; }
+
+  /// \brief Set the padding size for this fragment. By default it's a no-op,
+  /// and only some fragments have a meaningful implementation.
+  void setBundlePadding(uint8_t N) { BundlePadding = N; }
+
+  /// \brief Return true if given frgment has FT_Dummy type.
+  bool isDummy() const { return Kind == FT_Dummy; }
+
+  void dump() const;
+};
+
+class MCDummyFragment : public MCFragment {
+public:
+  explicit MCDummyFragment(MCSection *Sec)
+      : MCFragment(FT_Dummy, false, 0, Sec) {}
+
+  static bool classof(const MCFragment *F) { return F->getKind() == FT_Dummy; }
+};
+
+/// Interface implemented by fragments that contain encoded instructions and/or
+/// data.
+///
+class MCEncodedFragment : public MCFragment {
+protected:
+  MCEncodedFragment(MCFragment::FragmentType FType, bool HasInstructions,
+                    MCSection *Sec)
+      : MCFragment(FType, HasInstructions, 0, Sec) {}
+
+public:
+  static bool classof(const MCFragment *F) {
+    MCFragment::FragmentType Kind = F->getKind();
+    switch (Kind) {
+    default:
+      return false;
+    case MCFragment::FT_Relaxable:
+    case MCFragment::FT_CompactEncodedInst:
+    case MCFragment::FT_Data:
+      return true;
+    }
+  }
+};
+
+/// Interface implemented by fragments that contain encoded instructions and/or
+/// data.
+///
+template<unsigned ContentsSize>
+class MCEncodedFragmentWithContents : public MCEncodedFragment {
+  SmallVector<char, ContentsSize> Contents;
+
+protected:
+  MCEncodedFragmentWithContents(MCFragment::FragmentType FType,
+                                bool HasInstructions,
+                                MCSection *Sec)
+      : MCEncodedFragment(FType, HasInstructions, Sec) {}
+
+public:
+  SmallVectorImpl<char> &getContents() { return Contents; }
+  const SmallVectorImpl<char> &getContents() const { return Contents; }
+};
+
+/// Interface implemented by fragments that contain encoded instructions and/or
+/// data and also have fixups registered.
+///
+template<unsigned ContentsSize, unsigned FixupsSize>
+class MCEncodedFragmentWithFixups :
+  public MCEncodedFragmentWithContents<ContentsSize> {
+
+  /// Fixups - The list of fixups in this fragment.
+  SmallVector<MCFixup, FixupsSize> Fixups;
+
+protected:
+  MCEncodedFragmentWithFixups(MCFragment::FragmentType FType,
+                              bool HasInstructions,
+                              MCSection *Sec)
+      : MCEncodedFragmentWithContents<ContentsSize>(FType, HasInstructions,
+                                                    Sec) {}
+
+public:
+  using const_fixup_iterator = SmallVectorImpl<MCFixup>::const_iterator;
+  using fixup_iterator = SmallVectorImpl<MCFixup>::iterator;
+
+  SmallVectorImpl<MCFixup> &getFixups() { return Fixups; }
+  const SmallVectorImpl<MCFixup> &getFixups() const { return Fixups; }
+
+  fixup_iterator fixup_begin() { return Fixups.begin(); }
+  const_fixup_iterator fixup_begin() const { return Fixups.begin(); }
+
+  fixup_iterator fixup_end() { return Fixups.end(); }
+  const_fixup_iterator fixup_end() const { return Fixups.end(); }
+
+  static bool classof(const MCFragment *F) {
+    MCFragment::FragmentType Kind = F->getKind();
+    return Kind == MCFragment::FT_Relaxable || Kind == MCFragment::FT_Data ||
+           Kind == MCFragment::FT_CVDefRange;
+  }
+};
+
+/// Fragment for data and encoded instructions.
+///
+class MCDataFragment : public MCEncodedFragmentWithFixups<32, 4> {
+public:
+  MCDataFragment(MCSection *Sec = nullptr)
+      : MCEncodedFragmentWithFixups<32, 4>(FT_Data, false, Sec) {}
+
+  void setHasInstructions(bool V) { HasInstructions = V; }
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Data;
+  }
+};
+
+/// This is a compact (memory-size-wise) fragment for holding an encoded
+/// instruction (non-relaxable) that has no fixups registered. When applicable,
+/// it can be used instead of MCDataFragment and lead to lower memory
+/// consumption.
+///
+class MCCompactEncodedInstFragment : public MCEncodedFragmentWithContents<4> {
+public:
+  MCCompactEncodedInstFragment(MCSection *Sec = nullptr)
+      : MCEncodedFragmentWithContents(FT_CompactEncodedInst, true, Sec) {
+  }
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_CompactEncodedInst;
+  }
+};
+
+/// A relaxable fragment holds on to its MCInst, since it may need to be
+/// relaxed during the assembler layout and relaxation stage.
+///
+class MCRelaxableFragment : public MCEncodedFragmentWithFixups<8, 1> {
+
+  /// Inst - The instruction this is a fragment for.
+  MCInst Inst;
+
+  /// STI - The MCSubtargetInfo in effect when the instruction was encoded.
+  const MCSubtargetInfo &STI;
+
+public:
+  MCRelaxableFragment(const MCInst &Inst, const MCSubtargetInfo &STI,
+                      MCSection *Sec = nullptr)
+      : MCEncodedFragmentWithFixups(FT_Relaxable, true, Sec),
+        Inst(Inst), STI(STI) {}
+
+  const MCInst &getInst() const { return Inst; }
+  void setInst(const MCInst &Value) { Inst = Value; }
+
+  const MCSubtargetInfo &getSubtargetInfo() { return STI; }
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Relaxable;
+  }
+};
+
+class MCAlignFragment : public MCFragment {
+  /// Alignment - The alignment to ensure, in bytes.
+  unsigned Alignment;
+
+  /// EmitNops - Flag to indicate that (optimal) NOPs should be emitted instead
+  /// of using the provided value. The exact interpretation of this flag is
+  /// target dependent.
+  bool EmitNops : 1;
+
+  /// Value - Value to use for filling padding bytes.
+  int64_t Value;
+
+  /// ValueSize - The size of the integer (in bytes) of \p Value.
+  unsigned ValueSize;
+
+  /// MaxBytesToEmit - The maximum number of bytes to emit; if the alignment
+  /// cannot be satisfied in this width then this fragment is ignored.
+  unsigned MaxBytesToEmit;
+
+public:
+  MCAlignFragment(unsigned Alignment, int64_t Value, unsigned ValueSize,
+                  unsigned MaxBytesToEmit, MCSection *Sec = nullptr)
+      : MCFragment(FT_Align, false, 0, Sec), Alignment(Alignment),
+        EmitNops(false), Value(Value),
+        ValueSize(ValueSize), MaxBytesToEmit(MaxBytesToEmit) {}
+
+  /// \name Accessors
+  /// @{
+
+  unsigned getAlignment() const { return Alignment; }
+
+  int64_t getValue() const { return Value; }
+
+  unsigned getValueSize() const { return ValueSize; }
+
+  unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }
+
+  bool hasEmitNops() const { return EmitNops; }
+  void setEmitNops(bool Value) { EmitNops = Value; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Align;
+  }
+};
+
+/// Fragment for adding required padding.
+/// This fragment is always inserted before an instruction, and holds that
+/// instruction as context information (as well as a mask of kinds) for
+/// determining the padding size.
+///
+class MCPaddingFragment : public MCFragment {
+  /// A mask containing all the kinds relevant to this fragment. i.e. the i'th
+  /// bit will be set iff kind i is relevant to this fragment.
+  uint64_t PaddingPoliciesMask;
+  /// A boolean indicating if this fragment will actually hold padding. If its
+  /// value is false, then this fragment serves only as a placeholder,
+  /// containing data to assist other insertion point in their decision making.
+  bool IsInsertionPoint;
+
+  uint64_t Size;
+
+  struct MCInstInfo {
+    bool IsInitialized;
+    MCInst Inst;
+    /// A boolean indicating whether the instruction pointed by this fragment is
+    /// a fixed size instruction or a relaxable instruction held by a
+    /// MCRelaxableFragment.
+    bool IsImmutableSizedInst;
+    union {
+      /// If the instruction is a fixed size instruction, hold its size.
+      size_t InstSize;
+      /// Otherwise, hold a pointer to the MCRelaxableFragment holding it.
+      MCRelaxableFragment *InstFragment;
+    };
+  };
+  MCInstInfo InstInfo;
+
+public:
+  static const uint64_t PFK_None = UINT64_C(0);
+
+  enum MCPaddingFragmentKind {
+    // values 0-7 are reserved for future target independet values.
+
+    FirstTargetPerfNopFragmentKind = 8,
+
+    /// Limit range of target MCPerfNopFragment kinds to fit in uint64_t
+    MaxTargetPerfNopFragmentKind = 63
+  };
+
+  MCPaddingFragment(MCSection *Sec = nullptr)
+      : MCFragment(FT_Padding, false, 0, Sec), PaddingPoliciesMask(PFK_None),
+        IsInsertionPoint(false), Size(UINT64_C(0)),
+        InstInfo({false, MCInst(), false, {0}}) {}
+
+  bool isInsertionPoint() const { return IsInsertionPoint; }
+  void setAsInsertionPoint() { IsInsertionPoint = true; }
+  uint64_t getPaddingPoliciesMask() const { return PaddingPoliciesMask; }
+  void setPaddingPoliciesMask(uint64_t Value) { PaddingPoliciesMask = Value; }
+  bool hasPaddingPolicy(uint64_t PolicyMask) const {
+    assert(isPowerOf2_64(PolicyMask) &&
+           "Policy mask must contain exactly one policy");
+    return (getPaddingPoliciesMask() & PolicyMask) != PFK_None;
+  }
+  const MCInst &getInst() const {
+    assert(isInstructionInitialized() && "Fragment has no instruction!");
+    return InstInfo.Inst;
+  }
+  size_t getInstSize() const {
+    assert(isInstructionInitialized() && "Fragment has no instruction!");
+    if (InstInfo.IsImmutableSizedInst)
+      return InstInfo.InstSize;
+    assert(InstInfo.InstFragment != nullptr &&
+           "Must have a valid InstFragment to retrieve InstSize from");
+    return InstInfo.InstFragment->getContents().size();
+  }
+  void setInstAndInstSize(const MCInst &Inst, size_t InstSize) {
+	InstInfo.IsInitialized = true;
+    InstInfo.IsImmutableSizedInst = true;
+    InstInfo.Inst = Inst;
+    InstInfo.InstSize = InstSize;
+  }
+  void setInstAndInstFragment(const MCInst &Inst,
+                              MCRelaxableFragment *InstFragment) {
+    InstInfo.IsInitialized = true;
+    InstInfo.IsImmutableSizedInst = false;
+    InstInfo.Inst = Inst;
+    InstInfo.InstFragment = InstFragment;
+  }
+  uint64_t getSize() const { return Size; }
+  void setSize(uint64_t Value) { Size = Value; }
+  bool isInstructionInitialized() const { return InstInfo.IsInitialized; }
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Padding;
+  }
+};
+
+class MCFillFragment : public MCFragment {
+  /// Value to use for filling bytes.
+  uint8_t Value;
+
+  /// The number of bytes to insert.
+  const MCExpr &Size;
+
+  /// Source location of the directive that this fragment was created for.
+  SMLoc Loc;
+
+public:
+  MCFillFragment(uint8_t Value, const MCExpr &Size, SMLoc Loc,
+                 MCSection *Sec = nullptr)
+      : MCFragment(FT_Fill, false, 0, Sec), Value(Value), Size(Size), Loc(Loc) {
+  }
+
+  uint8_t getValue() const { return Value; }
+  const MCExpr &getSize() const { return Size; }
+
+  SMLoc getLoc() const { return Loc; }
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Fill;
+  }
+};
+
+class MCOrgFragment : public MCFragment {
+  /// The offset this fragment should start at.
+  const MCExpr *Offset;
+
+  /// Value to use for filling bytes.
+  int8_t Value;
+
+  /// Source location of the directive that this fragment was created for.
+  SMLoc Loc;
+
+public:
+  MCOrgFragment(const MCExpr &Offset, int8_t Value, SMLoc Loc,
+                MCSection *Sec = nullptr)
+      : MCFragment(FT_Org, false, 0, Sec), Offset(&Offset), Value(Value), Loc(Loc) {}
+
+  /// \name Accessors
+  /// @{
+
+  const MCExpr &getOffset() const { return *Offset; }
+
+  uint8_t getValue() const { return Value; }
+
+  SMLoc getLoc() const { return Loc; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Org;
+  }
+};
+
+class MCLEBFragment : public MCFragment {
+  /// Value - The value this fragment should contain.
+  const MCExpr *Value;
+
+  /// IsSigned - True if this is a sleb128, false if uleb128.
+  bool IsSigned;
+
+  SmallString<8> Contents;
+
+public:
+  MCLEBFragment(const MCExpr &Value_, bool IsSigned_, MCSection *Sec = nullptr)
+      : MCFragment(FT_LEB, false, 0, Sec), Value(&Value_), IsSigned(IsSigned_) {
+    Contents.push_back(0);
+  }
+
+  /// \name Accessors
+  /// @{
+
+  const MCExpr &getValue() const { return *Value; }
+
+  bool isSigned() const { return IsSigned; }
+
+  SmallString<8> &getContents() { return Contents; }
+  const SmallString<8> &getContents() const { return Contents; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_LEB;
+  }
+};
+
+class MCDwarfLineAddrFragment : public MCFragment {
+  /// LineDelta - the value of the difference between the two line numbers
+  /// between two .loc dwarf directives.
+  int64_t LineDelta;
+
+  /// AddrDelta - The expression for the difference of the two symbols that
+  /// make up the address delta between two .loc dwarf directives.
+  const MCExpr *AddrDelta;
+
+  SmallString<8> Contents;
+
+public:
+  MCDwarfLineAddrFragment(int64_t LineDelta, const MCExpr &AddrDelta,
+                          MCSection *Sec = nullptr)
+      : MCFragment(FT_Dwarf, false, 0, Sec), LineDelta(LineDelta),
+        AddrDelta(&AddrDelta) {
+    Contents.push_back(0);
+  }
+
+  /// \name Accessors
+  /// @{
+
+  int64_t getLineDelta() const { return LineDelta; }
+
+  const MCExpr &getAddrDelta() const { return *AddrDelta; }
+
+  SmallString<8> &getContents() { return Contents; }
+  const SmallString<8> &getContents() const { return Contents; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_Dwarf;
+  }
+};
+
+class MCDwarfCallFrameFragment : public MCFragment {
+  /// AddrDelta - The expression for the difference of the two symbols that
+  /// make up the address delta between two .cfi_* dwarf directives.
+  const MCExpr *AddrDelta;
+
+  SmallString<8> Contents;
+
+public:
+  MCDwarfCallFrameFragment(const MCExpr &AddrDelta, MCSection *Sec = nullptr)
+      : MCFragment(FT_DwarfFrame, false, 0, Sec), AddrDelta(&AddrDelta) {
+    Contents.push_back(0);
+  }
+
+  /// \name Accessors
+  /// @{
+
+  const MCExpr &getAddrDelta() const { return *AddrDelta; }
+
+  SmallString<8> &getContents() { return Contents; }
+  const SmallString<8> &getContents() const { return Contents; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_DwarfFrame;
+  }
+};
+
+/// Represents a symbol table index fragment.
+class MCSymbolIdFragment : public MCFragment {
+  const MCSymbol *Sym;
+
+public:
+  MCSymbolIdFragment(const MCSymbol *Sym, MCSection *Sec = nullptr)
+      : MCFragment(FT_SymbolId, false, 0, Sec), Sym(Sym) {}
+
+  /// \name Accessors
+  /// @{
+
+  const MCSymbol *getSymbol() { return Sym; }
+  const MCSymbol *getSymbol() const { return Sym; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_SymbolId;
+  }
+};
+
+/// Fragment representing the binary annotations produced by the
+/// .cv_inline_linetable directive.
+class MCCVInlineLineTableFragment : public MCFragment {
+  unsigned SiteFuncId;
+  unsigned StartFileId;
+  unsigned StartLineNum;
+  const MCSymbol *FnStartSym;
+  const MCSymbol *FnEndSym;
+  SmallString<8> Contents;
+
+  /// CodeViewContext has the real knowledge about this format, so let it access
+  /// our members.
+  friend class CodeViewContext;
+
+public:
+  MCCVInlineLineTableFragment(unsigned SiteFuncId, unsigned StartFileId,
+                              unsigned StartLineNum, const MCSymbol *FnStartSym,
+                              const MCSymbol *FnEndSym,
+                              MCSection *Sec = nullptr)
+      : MCFragment(FT_CVInlineLines, false, 0, Sec), SiteFuncId(SiteFuncId),
+        StartFileId(StartFileId), StartLineNum(StartLineNum),
+        FnStartSym(FnStartSym), FnEndSym(FnEndSym) {}
+
+  /// \name Accessors
+  /// @{
+
+  const MCSymbol *getFnStartSym() const { return FnStartSym; }
+  const MCSymbol *getFnEndSym() const { return FnEndSym; }
+
+  SmallString<8> &getContents() { return Contents; }
+  const SmallString<8> &getContents() const { return Contents; }
+
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_CVInlineLines;
+  }
+};
+
+/// Fragment representing the .cv_def_range directive.
+class MCCVDefRangeFragment : public MCEncodedFragmentWithFixups<32, 4> {
+  SmallVector<std::pair<const MCSymbol *, const MCSymbol *>, 2> Ranges;
+  SmallString<32> FixedSizePortion;
+
+  /// CodeViewContext has the real knowledge about this format, so let it access
+  /// our members.
+  friend class CodeViewContext;
+
+public:
+  MCCVDefRangeFragment(
+      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
+      StringRef FixedSizePortion, MCSection *Sec = nullptr)
+      : MCEncodedFragmentWithFixups<32, 4>(FT_CVDefRange, false, Sec),
+        Ranges(Ranges.begin(), Ranges.end()),
+        FixedSizePortion(FixedSizePortion) {}
+
+  /// \name Accessors
+  /// @{
+  ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> getRanges() const {
+    return Ranges;
+  }
+
+  StringRef getFixedSizePortion() const { return FixedSizePortion; }
+  /// @}
+
+  static bool classof(const MCFragment *F) {
+    return F->getKind() == MCFragment::FT_CVDefRange;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCFRAGMENT_H
diff --git a/linux-x64/clang/include/llvm/MC/MCInst.h b/linux-x64/clang/include/llvm/MC/MCInst.h
new file mode 100644
index 0000000..db28fd0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInst.h
@@ -0,0 +1,223 @@
+//===- llvm/MC/MCInst.h - MCInst class --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCInst and MCOperand classes, which
+// is the basic representation used to represent low-level machine code
+// instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINST_H
+#define LLVM_MC_MCINST_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SMLoc.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+class MCExpr;
+class MCInst;
+class MCInstPrinter;
+class raw_ostream;
+
+/// \brief Instances of this class represent operands of the MCInst class.
+/// This is a simple discriminated union.
+class MCOperand {
+  enum MachineOperandType : unsigned char {
+    kInvalid,     ///< Uninitialized.
+    kRegister,    ///< Register operand.
+    kImmediate,   ///< Immediate operand.
+    kFPImmediate, ///< Floating-point immediate operand.
+    kExpr,        ///< Relocatable immediate operand.
+    kInst         ///< Sub-instruction operand.
+  };
+  MachineOperandType Kind = kInvalid;
+
+  union {
+    unsigned RegVal;
+    int64_t ImmVal;
+    double FPImmVal;
+    const MCExpr *ExprVal;
+    const MCInst *InstVal;
+  };
+
+public:
+  MCOperand() : FPImmVal(0.0) {}
+
+  bool isValid() const { return Kind != kInvalid; }
+  bool isReg() const { return Kind == kRegister; }
+  bool isImm() const { return Kind == kImmediate; }
+  bool isFPImm() const { return Kind == kFPImmediate; }
+  bool isExpr() const { return Kind == kExpr; }
+  bool isInst() const { return Kind == kInst; }
+
+  /// \brief Returns the register number.
+  unsigned getReg() const {
+    assert(isReg() && "This is not a register operand!");
+    return RegVal;
+  }
+
+  /// \brief Set the register number.
+  void setReg(unsigned Reg) {
+    assert(isReg() && "This is not a register operand!");
+    RegVal = Reg;
+  }
+
+  int64_t getImm() const {
+    assert(isImm() && "This is not an immediate");
+    return ImmVal;
+  }
+
+  void setImm(int64_t Val) {
+    assert(isImm() && "This is not an immediate");
+    ImmVal = Val;
+  }
+
+  double getFPImm() const {
+    assert(isFPImm() && "This is not an FP immediate");
+    return FPImmVal;
+  }
+
+  void setFPImm(double Val) {
+    assert(isFPImm() && "This is not an FP immediate");
+    FPImmVal = Val;
+  }
+
+  const MCExpr *getExpr() const {
+    assert(isExpr() && "This is not an expression");
+    return ExprVal;
+  }
+
+  void setExpr(const MCExpr *Val) {
+    assert(isExpr() && "This is not an expression");
+    ExprVal = Val;
+  }
+
+  const MCInst *getInst() const {
+    assert(isInst() && "This is not a sub-instruction");
+    return InstVal;
+  }
+
+  void setInst(const MCInst *Val) {
+    assert(isInst() && "This is not a sub-instruction");
+    InstVal = Val;
+  }
+
+  static MCOperand createReg(unsigned Reg) {
+    MCOperand Op;
+    Op.Kind = kRegister;
+    Op.RegVal = Reg;
+    return Op;
+  }
+
+  static MCOperand createImm(int64_t Val) {
+    MCOperand Op;
+    Op.Kind = kImmediate;
+    Op.ImmVal = Val;
+    return Op;
+  }
+
+  static MCOperand createFPImm(double Val) {
+    MCOperand Op;
+    Op.Kind = kFPImmediate;
+    Op.FPImmVal = Val;
+    return Op;
+  }
+
+  static MCOperand createExpr(const MCExpr *Val) {
+    MCOperand Op;
+    Op.Kind = kExpr;
+    Op.ExprVal = Val;
+    return Op;
+  }
+
+  static MCOperand createInst(const MCInst *Val) {
+    MCOperand Op;
+    Op.Kind = kInst;
+    Op.InstVal = Val;
+    return Op;
+  }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+};
+
+template <> struct isPodLike<MCOperand> { static const bool value = true; };
+
+/// \brief Instances of this class represent a single low-level machine
+/// instruction.
+class MCInst {
+  unsigned Opcode = 0;
+  SMLoc Loc;
+  SmallVector<MCOperand, 8> Operands;
+  // These flags could be used to pass some info from one target subcomponent
+  // to another, for example, from disassembler to asm printer. The values of
+  // the flags have any sense on target level only (e.g. prefixes on x86).
+  unsigned Flags = 0;
+
+public:
+  MCInst() = default;
+
+  void setOpcode(unsigned Op) { Opcode = Op; }
+  unsigned getOpcode() const { return Opcode; }
+
+  void setFlags(unsigned F) { Flags = F; }
+  unsigned getFlags() const { return Flags; }
+
+  void setLoc(SMLoc loc) { Loc = loc; }
+  SMLoc getLoc() const { return Loc; }
+
+  const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
+  MCOperand &getOperand(unsigned i) { return Operands[i]; }
+  unsigned getNumOperands() const { return Operands.size(); }
+
+  void addOperand(const MCOperand &Op) { Operands.push_back(Op); }
+
+  using iterator = SmallVectorImpl<MCOperand>::iterator;
+  using const_iterator = SmallVectorImpl<MCOperand>::const_iterator;
+
+  void clear() { Operands.clear(); }
+  void erase(iterator I) { Operands.erase(I); }
+  size_t size() const { return Operands.size(); }
+  iterator begin() { return Operands.begin(); }
+  const_iterator begin() const { return Operands.begin(); }
+  iterator end() { return Operands.end(); }
+  const_iterator end() const { return Operands.end(); }
+
+  iterator insert(iterator I, const MCOperand &Op) {
+    return Operands.insert(I, Op);
+  }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+  /// \brief Dump the MCInst as prettily as possible using the additional MC
+  /// structures, if given. Operators are separated by the \p Separator
+  /// string.
+  void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer = nullptr,
+                   StringRef Separator = " ") const;
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
+  MO.print(OS);
+  return OS;
+}
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCInst &MI) {
+  MI.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCINST_H
diff --git a/linux-x64/clang/include/llvm/MC/MCInstBuilder.h b/linux-x64/clang/include/llvm/MC/MCInstBuilder.h
new file mode 100644
index 0000000..30609bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInstBuilder.h
@@ -0,0 +1,74 @@
+//===-- llvm/MC/MCInstBuilder.h - Simplify creation of MCInsts --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the MCInstBuilder class for convenient creation of
+// MCInsts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTBUILDER_H
+#define LLVM_MC_MCINSTBUILDER_H
+
+#include "llvm/MC/MCInst.h"
+
+namespace llvm {
+
+class MCInstBuilder {
+  MCInst Inst;
+
+public:
+  /// \brief Create a new MCInstBuilder for an MCInst with a specific opcode.
+  MCInstBuilder(unsigned Opcode) {
+    Inst.setOpcode(Opcode);
+  }
+
+  /// \brief Add a new register operand.
+  MCInstBuilder &addReg(unsigned Reg) {
+    Inst.addOperand(MCOperand::createReg(Reg));
+    return *this;
+  }
+
+  /// \brief Add a new integer immediate operand.
+  MCInstBuilder &addImm(int64_t Val) {
+    Inst.addOperand(MCOperand::createImm(Val));
+    return *this;
+  }
+
+  /// \brief Add a new floating point immediate operand.
+  MCInstBuilder &addFPImm(double Val) {
+    Inst.addOperand(MCOperand::createFPImm(Val));
+    return *this;
+  }
+
+  /// \brief Add a new MCExpr operand.
+  MCInstBuilder &addExpr(const MCExpr *Val) {
+    Inst.addOperand(MCOperand::createExpr(Val));
+    return *this;
+  }
+
+  /// \brief Add a new MCInst operand.
+  MCInstBuilder &addInst(const MCInst *Val) {
+    Inst.addOperand(MCOperand::createInst(Val));
+    return *this;
+  }
+
+  /// \brief Add an operand.
+  MCInstBuilder &addOperand(const MCOperand &Op) {
+    Inst.addOperand(Op);
+    return *this;
+  }
+
+  operator MCInst&() {
+    return Inst;
+  }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCInstPrinter.h b/linux-x64/clang/include/llvm/MC/MCInstPrinter.h
new file mode 100644
index 0000000..0694030
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInstPrinter.h
@@ -0,0 +1,109 @@
+//===- MCInstPrinter.h - MCInst to target assembly syntax -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTPRINTER_H
+#define LLVM_MC_MCINSTPRINTER_H
+
+#include "llvm/Support/Format.h"
+#include <cstdint>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class MCAsmInfo;
+class MCInst;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class raw_ostream;
+class StringRef;
+
+/// Convert `Bytes' to a hex string and output to `OS'
+void dumpBytes(ArrayRef<uint8_t> Bytes, raw_ostream &OS);
+
+namespace HexStyle {
+
+enum Style {
+  C,  ///< 0xff
+  Asm ///< 0ffh
+};
+
+} // end namespace HexStyle
+
+/// \brief This is an instance of a target assembly language printer that
+/// converts an MCInst to valid target assembly syntax.
+class MCInstPrinter {
+protected:
+  /// \brief A stream that comments can be emitted to if desired.  Each comment
+  /// must end with a newline.  This will be null if verbose assembly emission
+  /// is disable.
+  raw_ostream *CommentStream = nullptr;
+  const MCAsmInfo &MAI;
+  const MCInstrInfo &MII;
+  const MCRegisterInfo &MRI;
+
+  /// True if we are printing marked up assembly.
+  bool UseMarkup = false;
+
+  /// True if we are printing immediates as hex.
+  bool PrintImmHex = false;
+
+  /// Which style to use for printing hexadecimal values.
+  HexStyle::Style PrintHexStyle = HexStyle::C;
+
+  /// Utility function for printing annotations.
+  void printAnnotation(raw_ostream &OS, StringRef Annot);
+
+public:
+  MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
+                const MCRegisterInfo &mri) : MAI(mai), MII(mii), MRI(mri) {}
+
+  virtual ~MCInstPrinter();
+
+  /// \brief Specify a stream to emit comments to.
+  void setCommentStream(raw_ostream &OS) { CommentStream = &OS; }
+
+  /// \brief Print the specified MCInst to the specified raw_ostream.
+  virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
+                         const MCSubtargetInfo &STI) = 0;
+
+  /// \brief Return the name of the specified opcode enum (e.g. "MOV32ri") or
+  /// empty if we can't resolve it.
+  StringRef getOpcodeName(unsigned Opcode) const;
+
+  /// \brief Print the assembler register name.
+  virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
+
+  bool getUseMarkup() const { return UseMarkup; }
+  void setUseMarkup(bool Value) { UseMarkup = Value; }
+
+  /// Utility functions to make adding mark ups simpler.
+  StringRef markup(StringRef s) const;
+  StringRef markup(StringRef a, StringRef b) const;
+
+  bool getPrintImmHex() const { return PrintImmHex; }
+  void setPrintImmHex(bool Value) { PrintImmHex = Value; }
+
+  HexStyle::Style getPrintHexStyle() const { return PrintHexStyle; }
+  void setPrintHexStyle(HexStyle::Style Value) { PrintHexStyle = Value; }
+
+  /// Utility function to print immediates in decimal or hex.
+  format_object<int64_t> formatImm(int64_t Value) const {
+    return PrintImmHex ? formatHex(Value) : formatDec(Value);
+  }
+
+  /// Utility functions to print decimal/hexadecimal values.
+  format_object<int64_t> formatDec(int64_t Value) const;
+  format_object<int64_t> formatHex(int64_t Value) const;
+  format_object<uint64_t> formatHex(uint64_t Value) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCINSTPRINTER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCInstrAnalysis.h b/linux-x64/clang/include/llvm/MC/MCInstrAnalysis.h
new file mode 100644
index 0000000..dd3e1df
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInstrAnalysis.h
@@ -0,0 +1,72 @@
+//===- llvm/MC/MCInstrAnalysis.h - InstrDesc target hooks -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MCInstrAnalysis class which the MCTargetDescs can
+// derive from to give additional information to MC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRANALYSIS_H
+#define LLVM_MC_MCINSTRANALYSIS_H
+
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include <cstdint>
+
+namespace llvm {
+
+class MCInstrAnalysis {
+protected:
+  friend class Target;
+
+  const MCInstrInfo *Info;
+
+public:
+  MCInstrAnalysis(const MCInstrInfo *Info) : Info(Info) {}
+  virtual ~MCInstrAnalysis() = default;
+
+  virtual bool isBranch(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isBranch();
+  }
+
+  virtual bool isConditionalBranch(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isConditionalBranch();
+  }
+
+  virtual bool isUnconditionalBranch(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isUnconditionalBranch();
+  }
+
+  virtual bool isIndirectBranch(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isIndirectBranch();
+  }
+
+  virtual bool isCall(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isCall();
+  }
+
+  virtual bool isReturn(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isReturn();
+  }
+
+  virtual bool isTerminator(const MCInst &Inst) const {
+    return Info->get(Inst.getOpcode()).isTerminator();
+  }
+
+  /// \brief Given a branch instruction try to get the address the branch
+  /// targets. Return true on success, and the address in Target.
+  virtual bool
+  evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
+                 uint64_t &Target) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCINSTRANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/MC/MCInstrDesc.h b/linux-x64/clang/include/llvm/MC/MCInstrDesc.h
new file mode 100644
index 0000000..ff4c756
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInstrDesc.h
@@ -0,0 +1,591 @@
+//===-- llvm/MC/MCInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MCOperandInfo and MCInstrDesc classes, which
+// are used to describe target instructions and their operands.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRDESC_H
+#define LLVM_MC_MCINSTRDESC_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/DataTypes.h"
+#include <string>
+
+namespace llvm {
+  class MCInst;
+  class MCSubtargetInfo;
+  class FeatureBitset;
+
+//===----------------------------------------------------------------------===//
+// Machine Operand Flags and Description
+//===----------------------------------------------------------------------===//
+
+namespace MCOI {
+// Operand constraints
+enum OperandConstraint {
+  TIED_TO = 0,  // Must be allocated the same register as.
+  EARLY_CLOBBER // Operand is an early clobber register operand
+};
+
+/// \brief These are flags set on operands, but should be considered
+/// private, all access should go through the MCOperandInfo accessors.
+/// See the accessors for a description of what these are.
+enum OperandFlags { LookupPtrRegClass = 0, Predicate, OptionalDef };
+
+/// \brief Operands are tagged with one of the values of this enum.
+enum OperandType {
+  OPERAND_UNKNOWN = 0,
+  OPERAND_IMMEDIATE = 1,
+  OPERAND_REGISTER = 2,
+  OPERAND_MEMORY = 3,
+  OPERAND_PCREL = 4,
+
+  OPERAND_FIRST_GENERIC = 6,
+  OPERAND_GENERIC_0 = 6,
+  OPERAND_GENERIC_1 = 7,
+  OPERAND_GENERIC_2 = 8,
+  OPERAND_GENERIC_3 = 9,
+  OPERAND_GENERIC_4 = 10,
+  OPERAND_GENERIC_5 = 11,
+  OPERAND_LAST_GENERIC = 11,
+
+  OPERAND_FIRST_TARGET = 12,
+};
+
+enum GenericOperandType {
+};
+
+}
+
+/// \brief This holds information about one operand of a machine instruction,
+/// indicating the register class for register operands, etc.
+class MCOperandInfo {
+public:
+  /// \brief This specifies the register class enumeration of the operand
+  /// if the operand is a register.  If isLookupPtrRegClass is set, then this is
+  /// an index that is passed to TargetRegisterInfo::getPointerRegClass(x) to
+  /// get a dynamic register class.
+  int16_t RegClass;
+
+  /// \brief These are flags from the MCOI::OperandFlags enum.
+  uint8_t Flags;
+
+  /// \brief Information about the type of the operand.
+  uint8_t OperandType;
+  /// \brief The lower 16 bits are used to specify which constraints are set.
+  /// The higher 16 bits are used to specify the value of constraints (4 bits
+  /// each).
+  uint32_t Constraints;
+
+  /// \brief Set if this operand is a pointer value and it requires a callback
+  /// to look up its register class.
+  bool isLookupPtrRegClass() const {
+    return Flags & (1 << MCOI::LookupPtrRegClass);
+  }
+
+  /// \brief Set if this is one of the operands that made up of the predicate
+  /// operand that controls an isPredicable() instruction.
+  bool isPredicate() const { return Flags & (1 << MCOI::Predicate); }
+
+  /// \brief Set if this operand is a optional def.
+  bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }
+
+  bool isGenericType() const {
+    return OperandType >= MCOI::OPERAND_FIRST_GENERIC &&
+           OperandType <= MCOI::OPERAND_LAST_GENERIC;
+  }
+
+  unsigned getGenericTypeIndex() const {
+    assert(isGenericType() && "non-generic types don't have an index");
+    return OperandType - MCOI::OPERAND_FIRST_GENERIC;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Machine Instruction Flags and Description
+//===----------------------------------------------------------------------===//
+
+namespace MCID {
+/// \brief These should be considered private to the implementation of the
+/// MCInstrDesc class.  Clients should use the predicate methods on MCInstrDesc,
+/// not use these directly.  These all correspond to bitfields in the
+/// MCInstrDesc::Flags field.
+enum Flag {
+  Variadic = 0,
+  HasOptionalDef,
+  Pseudo,
+  Return,
+  Call,
+  Barrier,
+  Terminator,
+  Branch,
+  IndirectBranch,
+  Compare,
+  MoveImm,
+  Bitcast,
+  Select,
+  DelaySlot,
+  FoldableAsLoad,
+  MayLoad,
+  MayStore,
+  Predicable,
+  NotDuplicable,
+  UnmodeledSideEffects,
+  Commutable,
+  ConvertibleTo3Addr,
+  UsesCustomInserter,
+  HasPostISelHook,
+  Rematerializable,
+  CheapAsAMove,
+  ExtraSrcRegAllocReq,
+  ExtraDefRegAllocReq,
+  RegSequence,
+  ExtractSubreg,
+  InsertSubreg,
+  Convergent,
+  Add
+};
+}
+
+/// \brief Describe properties that are true of each instruction in the target
+/// description file.  This captures information about side effects, register
+/// use and many other things.  There is one instance of this struct for each
+/// target instruction class, and the MachineInstr class points to this struct
+/// directly to describe itself.
+class MCInstrDesc {
+public:
+  unsigned short Opcode;         // The opcode number
+  unsigned short NumOperands;    // Num of args (may be more if variable_ops)
+  unsigned char NumDefs;         // Num of args that are definitions
+  unsigned char Size;            // Number of bytes in encoding.
+  unsigned short SchedClass;     // enum identifying instr sched class
+  uint64_t Flags;                // Flags identifying machine instr class
+  uint64_t TSFlags;              // Target Specific Flag values
+  const MCPhysReg *ImplicitUses; // Registers implicitly read by this instr
+  const MCPhysReg *ImplicitDefs; // Registers implicitly defined by this instr
+  const MCOperandInfo *OpInfo;   // 'NumOperands' entries about operands
+  // Subtarget feature that this is deprecated on, if any
+  // -1 implies this is not deprecated by any single feature. It may still be
+  // deprecated due to a "complex" reason, below.
+  int64_t DeprecatedFeature;
+
+  // A complex method to determine is a certain is deprecated or not, and return
+  // the reason for deprecation.
+  bool (*ComplexDeprecationInfo)(MCInst &, const MCSubtargetInfo &,
+                                 std::string &);
+
+  /// \brief Returns the value of the specific constraint if
+  /// it is set. Returns -1 if it is not set.
+  int getOperandConstraint(unsigned OpNum,
+                           MCOI::OperandConstraint Constraint) const {
+    if (OpNum < NumOperands &&
+        (OpInfo[OpNum].Constraints & (1 << Constraint))) {
+      unsigned Pos = 16 + Constraint * 4;
+      return (int)(OpInfo[OpNum].Constraints >> Pos) & 0xf;
+    }
+    return -1;
+  }
+
+  /// \brief Returns true if a certain instruction is deprecated and if so
+  /// returns the reason in \p Info.
+  bool getDeprecatedInfo(MCInst &MI, const MCSubtargetInfo &STI,
+                         std::string &Info) const;
+
+  /// \brief Return the opcode number for this descriptor.
+  unsigned getOpcode() const { return Opcode; }
+
+  /// \brief Return the number of declared MachineOperands for this
+  /// MachineInstruction.  Note that variadic (isVariadic() returns true)
+  /// instructions may have additional operands at the end of the list, and note
+  /// that the machine instruction may include implicit register def/uses as
+  /// well.
+  unsigned getNumOperands() const { return NumOperands; }
+
+  using const_opInfo_iterator = const MCOperandInfo *;
+
+  const_opInfo_iterator opInfo_begin() const { return OpInfo; }
+  const_opInfo_iterator opInfo_end() const { return OpInfo + NumOperands; }
+
+  iterator_range<const_opInfo_iterator> operands() const {
+    return make_range(opInfo_begin(), opInfo_end());
+  }
+
+  /// \brief Return the number of MachineOperands that are register
+  /// definitions.  Register definitions always occur at the start of the
+  /// machine operand list.  This is the number of "outs" in the .td file,
+  /// and does not include implicit defs.
+  unsigned getNumDefs() const { return NumDefs; }
+
+  /// \brief Return flags of this instruction.
+  uint64_t getFlags() const { return Flags; }
+
+  /// \brief Return true if this instruction can have a variable number of
+  /// operands.  In this case, the variable operands will be after the normal
+  /// operands but before the implicit definitions and uses (if any are
+  /// present).
+  bool isVariadic() const { return Flags & (1ULL << MCID::Variadic); }
+
+  /// \brief Set if this instruction has an optional definition, e.g.
+  /// ARM instructions which can set condition code if 's' bit is set.
+  bool hasOptionalDef() const { return Flags & (1ULL << MCID::HasOptionalDef); }
+
+  /// \brief Return true if this is a pseudo instruction that doesn't
+  /// correspond to a real machine instruction.
+  bool isPseudo() const { return Flags & (1ULL << MCID::Pseudo); }
+
+  /// \brief Return true if the instruction is a return.
+  bool isReturn() const { return Flags & (1ULL << MCID::Return); }
+
+  /// \brief Return true if the instruction is an add instruction.
+  bool isAdd() const { return Flags & (1ULL << MCID::Add); }
+
+  /// \brief  Return true if the instruction is a call.
+  bool isCall() const { return Flags & (1ULL << MCID::Call); }
+
+  /// \brief Returns true if the specified instruction stops control flow
+  /// from executing the instruction immediately following it.  Examples include
+  /// unconditional branches and return instructions.
+  bool isBarrier() const { return Flags & (1ULL << MCID::Barrier); }
+
+  /// \brief Returns true if this instruction part of the terminator for
+  /// a basic block.  Typically this is things like return and branch
+  /// instructions.
+  ///
+  /// Various passes use this to insert code into the bottom of a basic block,
+  /// but before control flow occurs.
+  bool isTerminator() const { return Flags & (1ULL << MCID::Terminator); }
+
+  /// \brief Returns true if this is a conditional, unconditional, or
+  /// indirect branch.  Predicates below can be used to discriminate between
+  /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
+  /// get more information.
+  bool isBranch() const { return Flags & (1ULL << MCID::Branch); }
+
+  /// \brief Return true if this is an indirect branch, such as a
+  /// branch through a register.
+  bool isIndirectBranch() const { return Flags & (1ULL << MCID::IndirectBranch); }
+
+  /// \brief Return true if this is a branch which may fall
+  /// through to the next instruction or may transfer control flow to some other
+  /// block.  The TargetInstrInfo::AnalyzeBranch method can be used to get more
+  /// information about this branch.
+  bool isConditionalBranch() const {
+    return isBranch() & !isBarrier() & !isIndirectBranch();
+  }
+
+  /// \brief Return true if this is a branch which always
+  /// transfers control flow to some other block.  The
+  /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
+  /// about this branch.
+  bool isUnconditionalBranch() const {
+    return isBranch() & isBarrier() & !isIndirectBranch();
+  }
+
+  /// \brief Return true if this is a branch or an instruction which directly
+  /// writes to the program counter. Considered 'may' affect rather than
+  /// 'does' affect as things like predication are not taken into account.
+  bool mayAffectControlFlow(const MCInst &MI, const MCRegisterInfo &RI) const;
+
+  /// \brief Return true if this instruction has a predicate operand
+  /// that controls execution. It may be set to 'always', or may be set to other
+  /// values. There are various methods in TargetInstrInfo that can be used to
+  /// control and modify the predicate in this instruction.
+  bool isPredicable() const { return Flags & (1ULL << MCID::Predicable); }
+
+  /// \brief Return true if this instruction is a comparison.
+  bool isCompare() const { return Flags & (1ULL << MCID::Compare); }
+
+  /// \brief Return true if this instruction is a move immediate
+  /// (including conditional moves) instruction.
+  bool isMoveImmediate() const { return Flags & (1ULL << MCID::MoveImm); }
+
+  /// \brief Return true if this instruction is a bitcast instruction.
+  bool isBitcast() const { return Flags & (1ULL << MCID::Bitcast); }
+
+  /// \brief Return true if this is a select instruction.
+  bool isSelect() const { return Flags & (1ULL << MCID::Select); }
+
+  /// \brief Return true if this instruction cannot be safely
+  /// duplicated.  For example, if the instruction has a unique labels attached
+  /// to it, duplicating it would cause multiple definition errors.
+  bool isNotDuplicable() const { return Flags & (1ULL << MCID::NotDuplicable); }
+
+  /// \brief Returns true if the specified instruction has a delay slot which
+  /// must be filled by the code generator.
+  bool hasDelaySlot() const { return Flags & (1ULL << MCID::DelaySlot); }
+
+  /// \brief Return true for instructions that can be folded as memory operands
+  /// in other instructions. The most common use for this is instructions that
+  /// are simple loads from memory that don't modify the loaded value in any
+  /// way, but it can also be used for instructions that can be expressed as
+  /// constant-pool loads, such as V_SETALLONES on x86, to allow them to be
+  /// folded when it is beneficial.  This should only be set on instructions
+  /// that return a value in their only virtual register definition.
+  bool canFoldAsLoad() const { return Flags & (1ULL << MCID::FoldableAsLoad); }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic REG_SEQUENCE instructions.
+  /// E.g., on ARM,
+  /// dX VMOVDRR rY, rZ
+  /// is equivalent to
+  /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
+  /// override accordingly.
+  bool isRegSequenceLike() const { return Flags & (1ULL << MCID::RegSequence); }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic EXTRACT_SUBREG instructions.
+  /// E.g., on ARM,
+  /// rX, rY VMOVRRD dZ
+  /// is equivalent to two EXTRACT_SUBREG:
+  /// rX = EXTRACT_SUBREG dZ, ssub_0
+  /// rY = EXTRACT_SUBREG dZ, ssub_1
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
+  /// override accordingly.
+  bool isExtractSubregLike() const {
+    return Flags & (1ULL << MCID::ExtractSubreg);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic INSERT_SUBREG instructions.
+  /// E.g., on ARM,
+  /// dX = VSETLNi32 dY, rZ, Imm
+  /// is equivalent to a INSERT_SUBREG:
+  /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
+  /// override accordingly.
+  bool isInsertSubregLike() const { return Flags & (1ULL << MCID::InsertSubreg); }
+
+
+  /// \brief Return true if this instruction is convergent.
+  ///
+  /// Convergent instructions may not be made control-dependent on any
+  /// additional values.
+  bool isConvergent() const { return Flags & (1ULL << MCID::Convergent); }
+
+  //===--------------------------------------------------------------------===//
+  // Side Effect Analysis
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Return true if this instruction could possibly read memory.
+  /// Instructions with this flag set are not necessarily simple load
+  /// instructions, they may load a value and modify it, for example.
+  bool mayLoad() const { return Flags & (1ULL << MCID::MayLoad); }
+
+  /// \brief Return true if this instruction could possibly modify memory.
+  /// Instructions with this flag set are not necessarily simple store
+  /// instructions, they may store a modified value based on their operands, or
+  /// may not actually modify anything, for example.
+  bool mayStore() const { return Flags & (1ULL << MCID::MayStore); }
+
+  /// \brief Return true if this instruction has side
+  /// effects that are not modeled by other flags.  This does not return true
+  /// for instructions whose effects are captured by:
+  ///
+  ///  1. Their operand list and implicit definition/use list.  Register use/def
+  ///     info is explicit for instructions.
+  ///  2. Memory accesses.  Use mayLoad/mayStore.
+  ///  3. Calling, branching, returning: use isCall/isReturn/isBranch.
+  ///
+  /// Examples of side effects would be modifying 'invisible' machine state like
+  /// a control register, flushing a cache, modifying a register invisible to
+  /// LLVM, etc.
+  bool hasUnmodeledSideEffects() const {
+    return Flags & (1ULL << MCID::UnmodeledSideEffects);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Flags that indicate whether an instruction can be modified by a method.
+  //===--------------------------------------------------------------------===//
+
+  /// \brief Return true if this may be a 2- or 3-address instruction (of the
+  /// form "X = op Y, Z, ..."), which produces the same result if Y and Z are
+  /// exchanged.  If this flag is set, then the
+  /// TargetInstrInfo::commuteInstruction method may be used to hack on the
+  /// instruction.
+  ///
+  /// Note that this flag may be set on instructions that are only commutable
+  /// sometimes.  In these cases, the call to commuteInstruction will fail.
+  /// Also note that some instructions require non-trivial modification to
+  /// commute them.
+  bool isCommutable() const { return Flags & (1ULL << MCID::Commutable); }
+
+  /// \brief Return true if this is a 2-address instruction which can be changed
+  /// into a 3-address instruction if needed.  Doing this transformation can be
+  /// profitable in the register allocator, because it means that the
+  /// instruction can use a 2-address form if possible, but degrade into a less
+  /// efficient form if the source and dest register cannot be assigned to the
+  /// same register.  For example, this allows the x86 backend to turn a "shl
+  /// reg, 3" instruction into an LEA instruction, which is the same speed as
+  /// the shift but has bigger code size.
+  ///
+  /// If this returns true, then the target must implement the
+  /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
+  /// is allowed to fail if the transformation isn't valid for this specific
+  /// instruction (e.g. shl reg, 4 on x86).
+  ///
+  bool isConvertibleTo3Addr() const {
+    return Flags & (1ULL << MCID::ConvertibleTo3Addr);
+  }
+
+  /// \brief Return true if this instruction requires custom insertion support
+  /// when the DAG scheduler is inserting it into a machine basic block.  If
+  /// this is true for the instruction, it basically means that it is a pseudo
+  /// instruction used at SelectionDAG time that is expanded out into magic code
+  /// by the target when MachineInstrs are formed.
+  ///
+  /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
+  /// is used to insert this into the MachineBasicBlock.
+  bool usesCustomInsertionHook() const {
+    return Flags & (1ULL << MCID::UsesCustomInserter);
+  }
+
+  /// \brief Return true if this instruction requires *adjustment* after
+  /// instruction selection by calling a target hook. For example, this can be
+  /// used to fill in ARM 's' optional operand depending on whether the
+  /// conditional flag register is used.
+  bool hasPostISelHook() const { return Flags & (1ULL << MCID::HasPostISelHook); }
+
+  /// \brief Returns true if this instruction is a candidate for remat. This
+  /// flag is only used in TargetInstrInfo method isTriviallyRematerializable.
+  ///
+  /// If this flag is set, the isReallyTriviallyReMaterializable()
+  /// or isReallyTriviallyReMaterializableGeneric methods are called to verify
+  /// the instruction is really rematable.
+  bool isRematerializable() const {
+    return Flags & (1ULL << MCID::Rematerializable);
+  }
+
+  /// \brief Returns true if this instruction has the same cost (or less) than a
+  /// move instruction. This is useful during certain types of optimizations
+  /// (e.g., remat during two-address conversion or machine licm) where we would
+  /// like to remat or hoist the instruction, but not if it costs more than
+  /// moving the instruction into the appropriate register. Note, we are not
+  /// marking copies from and to the same register class with this flag.
+  ///
+  /// This method could be called by interface TargetInstrInfo::isAsCheapAsAMove
+  /// for different subtargets.
+  bool isAsCheapAsAMove() const { return Flags & (1ULL << MCID::CheapAsAMove); }
+
+  /// \brief Returns true if this instruction source operands have special
+  /// register allocation requirements that are not captured by the operand
+  /// register classes. e.g. ARM::STRD's two source registers must be an even /
+  /// odd pair, ARM::STM registers have to be in ascending order.  Post-register
+  /// allocation passes should not attempt to change allocations for sources of
+  /// instructions with this flag.
+  bool hasExtraSrcRegAllocReq() const {
+    return Flags & (1ULL << MCID::ExtraSrcRegAllocReq);
+  }
+
+  /// \brief Returns true if this instruction def operands have special register
+  /// allocation requirements that are not captured by the operand register
+  /// classes. e.g. ARM::LDRD's two def registers must be an even / odd pair,
+  /// ARM::LDM registers have to be in ascending order.  Post-register
+  /// allocation passes should not attempt to change allocations for definitions
+  /// of instructions with this flag.
+  bool hasExtraDefRegAllocReq() const {
+    return Flags & (1ULL << MCID::ExtraDefRegAllocReq);
+  }
+
+  /// \brief Return a list of registers that are potentially read by any
+  /// instance of this machine instruction.  For example, on X86, the "adc"
+  /// instruction adds two register operands and adds the carry bit in from the
+  /// flags register.  In this case, the instruction is marked as implicitly
+  /// reading the flags.  Likewise, the variable shift instruction on X86 is
+  /// marked as implicitly reading the 'CL' register, which it always does.
+  ///
+  /// This method returns null if the instruction has no implicit uses.
+  const MCPhysReg *getImplicitUses() const { return ImplicitUses; }
+
+  /// \brief Return the number of implicit uses this instruction has.
+  unsigned getNumImplicitUses() const {
+    if (!ImplicitUses)
+      return 0;
+    unsigned i = 0;
+    for (; ImplicitUses[i]; ++i) /*empty*/
+      ;
+    return i;
+  }
+
+  /// \brief Return a list of registers that are potentially written by any
+  /// instance of this machine instruction.  For example, on X86, many
+  /// instructions implicitly set the flags register.  In this case, they are
+  /// marked as setting the FLAGS.  Likewise, many instructions always deposit
+  /// their result in a physical register.  For example, the X86 divide
+  /// instruction always deposits the quotient and remainder in the EAX/EDX
+  /// registers.  For that instruction, this will return a list containing the
+  /// EAX/EDX/EFLAGS registers.
+  ///
+  /// This method returns null if the instruction has no implicit defs.
+  const MCPhysReg *getImplicitDefs() const { return ImplicitDefs; }
+
+  /// \brief Return the number of implicit defs this instruct has.
+  unsigned getNumImplicitDefs() const {
+    if (!ImplicitDefs)
+      return 0;
+    unsigned i = 0;
+    for (; ImplicitDefs[i]; ++i) /*empty*/
+      ;
+    return i;
+  }
+
+  /// \brief Return true if this instruction implicitly
+  /// uses the specified physical register.
+  bool hasImplicitUseOfPhysReg(unsigned Reg) const {
+    if (const MCPhysReg *ImpUses = ImplicitUses)
+      for (; *ImpUses; ++ImpUses)
+        if (*ImpUses == Reg)
+          return true;
+    return false;
+  }
+
+  /// \brief Return true if this instruction implicitly
+  /// defines the specified physical register.
+  bool hasImplicitDefOfPhysReg(unsigned Reg,
+                               const MCRegisterInfo *MRI = nullptr) const;
+
+  /// \brief Return the scheduling class for this instruction.  The
+  /// scheduling class is an index into the InstrItineraryData table.  This
+  /// returns zero if there is no known scheduling information for the
+  /// instruction.
+  unsigned getSchedClass() const { return SchedClass; }
+
+  /// \brief Return the number of bytes in the encoding of this instruction,
+  /// or zero if the encoding size cannot be known from the opcode.
+  unsigned getSize() const { return Size; }
+
+  /// \brief Find the index of the first operand in the
+  /// operand list that is used to represent the predicate. It returns -1 if
+  /// none is found.
+  int findFirstPredOperandIdx() const {
+    if (isPredicable()) {
+      for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+        if (OpInfo[i].isPredicate())
+          return i;
+    }
+    return -1;
+  }
+
+  /// \brief Return true if this instruction defines the specified physical
+  /// register, either explicitly or implicitly.
+  bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg,
+                       const MCRegisterInfo &RI) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCInstrInfo.h b/linux-x64/clang/include/llvm/MC/MCInstrInfo.h
new file mode 100644
index 0000000..80f1f32
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInstrInfo.h
@@ -0,0 +1,59 @@
+//===-- llvm/MC/MCInstrInfo.h - Target Instruction Info ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target machine instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRINFO_H
+#define LLVM_MC_MCINSTRINFO_H
+
+#include "llvm/MC/MCInstrDesc.h"
+#include <cassert>
+
+namespace llvm {
+
+//---------------------------------------------------------------------------
+/// \brief Interface to description of machine instruction set.
+class MCInstrInfo {
+  const MCInstrDesc *Desc;          // Raw array to allow static init'n
+  const unsigned *InstrNameIndices; // Array for name indices in InstrNameData
+  const char *InstrNameData;        // Instruction name string pool
+  unsigned NumOpcodes;              // Number of entries in the desc array
+
+public:
+  /// \brief Initialize MCInstrInfo, called by TableGen auto-generated routines.
+  /// *DO NOT USE*.
+  void InitMCInstrInfo(const MCInstrDesc *D, const unsigned *NI, const char *ND,
+                       unsigned NO) {
+    Desc = D;
+    InstrNameIndices = NI;
+    InstrNameData = ND;
+    NumOpcodes = NO;
+  }
+
+  unsigned getNumOpcodes() const { return NumOpcodes; }
+
+  /// \brief Return the machine instruction descriptor that corresponds to the
+  /// specified instruction opcode.
+  const MCInstrDesc &get(unsigned Opcode) const {
+    assert(Opcode < NumOpcodes && "Invalid opcode!");
+    return Desc[Opcode];
+  }
+
+  /// \brief Returns the name for the instructions with the given opcode.
+  StringRef getName(unsigned Opcode) const {
+    assert(Opcode < NumOpcodes && "Invalid opcode!");
+    return StringRef(&InstrNameData[InstrNameIndices[Opcode]]);
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCInstrItineraries.h b/linux-x64/clang/include/llvm/MC/MCInstrItineraries.h
new file mode 100644
index 0000000..f0824e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCInstrItineraries.h
@@ -0,0 +1,235 @@
+//===- llvm/MC/MCInstrItineraries.h - Scheduling ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the structures used for instruction
+// itineraries, stages, and operand reads/writes.  This is used by
+// schedulers to determine instruction stages and latencies.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRITINERARIES_H
+#define LLVM_MC_MCINSTRITINERARIES_H
+
+#include "llvm/MC/MCSchedule.h"
+#include <algorithm>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// These values represent a non-pipelined step in
+/// the execution of an instruction.  Cycles represents the number of
+/// discrete time slots needed to complete the stage.  Units represent
+/// the choice of functional units that can be used to complete the
+/// stage.  Eg. IntUnit1, IntUnit2. NextCycles indicates how many
+/// cycles should elapse from the start of this stage to the start of
+/// the next stage in the itinerary. A value of -1 indicates that the
+/// next stage should start immediately after the current one.
+/// For example:
+///
+///   { 1, x, -1 }
+///      indicates that the stage occupies FU x for 1 cycle and that
+///      the next stage starts immediately after this one.
+///
+///   { 2, x|y, 1 }
+///      indicates that the stage occupies either FU x or FU y for 2
+///      consecutive cycles and that the next stage starts one cycle
+///      after this stage starts. That is, the stage requirements
+///      overlap in time.
+///
+///   { 1, x, 0 }
+///      indicates that the stage occupies FU x for 1 cycle and that
+///      the next stage starts in this same cycle. This can be used to
+///      indicate that the instruction requires multiple stages at the
+///      same time.
+///
+/// FU reservation can be of two different kinds:
+///  - FUs which instruction actually requires
+///  - FUs which instruction just reserves. Reserved unit is not available for
+///    execution of other instruction. However, several instructions can reserve
+///    the same unit several times.
+/// Such two types of units reservation is used to model instruction domain
+/// change stalls, FUs using the same resource (e.g. same register file), etc.
+
+struct InstrStage {
+  enum ReservationKinds {
+    Required = 0,
+    Reserved = 1
+  };
+
+  unsigned Cycles_;  ///< Length of stage in machine cycles
+  unsigned Units_;   ///< Choice of functional units
+  int NextCycles_;   ///< Number of machine cycles to next stage
+  ReservationKinds Kind_; ///< Kind of the FU reservation
+
+  /// \brief Returns the number of cycles the stage is occupied.
+  unsigned getCycles() const {
+    return Cycles_;
+  }
+
+  /// \brief Returns the choice of FUs.
+  unsigned getUnits() const {
+    return Units_;
+  }
+
+  ReservationKinds getReservationKind() const {
+    return Kind_;
+  }
+
+  /// \brief Returns the number of cycles from the start of this stage to the
+  /// start of the next stage in the itinerary
+  unsigned getNextCycles() const {
+    return (NextCycles_ >= 0) ? (unsigned)NextCycles_ : Cycles_;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// An itinerary represents the scheduling information for an instruction.
+/// This includes a set of stages occupied by the instruction and the pipeline
+/// cycle in which operands are read and written.
+///
+struct InstrItinerary {
+  int16_t  NumMicroOps;        ///< # of micro-ops, -1 means it's variable
+  uint16_t FirstStage;         ///< Index of first stage in itinerary
+  uint16_t LastStage;          ///< Index of last + 1 stage in itinerary
+  uint16_t FirstOperandCycle;  ///< Index of first operand rd/wr
+  uint16_t LastOperandCycle;   ///< Index of last + 1 operand rd/wr
+};
+
+//===----------------------------------------------------------------------===//
+/// Itinerary data supplied by a subtarget to be used by a target.
+///
+class InstrItineraryData {
+public:
+  MCSchedModel SchedModel =
+      MCSchedModel::GetDefaultSchedModel(); ///< Basic machine properties.
+  const InstrStage *Stages = nullptr;       ///< Array of stages selected
+  const unsigned *OperandCycles = nullptr; ///< Array of operand cycles selected
+  const unsigned *Forwardings = nullptr; ///< Array of pipeline forwarding paths
+  const InstrItinerary *Itineraries =
+      nullptr; ///< Array of itineraries selected
+
+  InstrItineraryData() = default;
+  InstrItineraryData(const MCSchedModel &SM, const InstrStage *S,
+                     const unsigned *OS, const unsigned *F)
+    : SchedModel(SM), Stages(S), OperandCycles(OS), Forwardings(F),
+      Itineraries(SchedModel.InstrItineraries) {}
+
+  /// \brief Returns true if there are no itineraries.
+  bool isEmpty() const { return Itineraries == nullptr; }
+
+  /// \brief Returns true if the index is for the end marker itinerary.
+  bool isEndMarker(unsigned ItinClassIndx) const {
+    return ((Itineraries[ItinClassIndx].FirstStage == UINT16_MAX) &&
+            (Itineraries[ItinClassIndx].LastStage == UINT16_MAX));
+  }
+
+  /// \brief Return the first stage of the itinerary.
+  const InstrStage *beginStage(unsigned ItinClassIndx) const {
+    unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
+    return Stages + StageIdx;
+  }
+
+  /// \brief Return the last+1 stage of the itinerary.
+  const InstrStage *endStage(unsigned ItinClassIndx) const {
+    unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
+    return Stages + StageIdx;
+  }
+
+  /// \brief Return the total stage latency of the given class.  The latency is
+  /// the maximum completion time for any stage in the itinerary.  If no stages
+  /// exist, it defaults to one cycle.
+  unsigned getStageLatency(unsigned ItinClassIndx) const {
+    // If the target doesn't provide itinerary information, use a simple
+    // non-zero default value for all instructions.
+    if (isEmpty())
+      return 1;
+
+    // Calculate the maximum completion time for any stage.
+    unsigned Latency = 0, StartCycle = 0;
+    for (const InstrStage *IS = beginStage(ItinClassIndx),
+           *E = endStage(ItinClassIndx); IS != E; ++IS) {
+      Latency = std::max(Latency, StartCycle + IS->getCycles());
+      StartCycle += IS->getNextCycles();
+    }
+    return Latency;
+  }
+
+  /// \brief Return the cycle for the given class and operand.  Return -1 if no
+  /// cycle is specified for the operand.
+  int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const {
+    if (isEmpty())
+      return -1;
+
+    unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
+    unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
+    if ((FirstIdx + OperandIdx) >= LastIdx)
+      return -1;
+
+    return (int)OperandCycles[FirstIdx + OperandIdx];
+  }
+
+  /// \brief Return true if there is a pipeline forwarding between instructions
+  /// of itinerary classes DefClass and UseClasses so that value produced by an
+  /// instruction of itinerary class DefClass, operand index DefIdx can be
+  /// bypassed when it's read by an instruction of itinerary class UseClass,
+  /// operand index UseIdx.
+  bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
+                             unsigned UseClass, unsigned UseIdx) const {
+    unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
+    unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
+    if ((FirstDefIdx + DefIdx) >= LastDefIdx)
+      return false;
+    if (Forwardings[FirstDefIdx + DefIdx] == 0)
+      return false;
+
+    unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
+    unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
+    if ((FirstUseIdx + UseIdx) >= LastUseIdx)
+      return false;
+
+    return Forwardings[FirstDefIdx + DefIdx] ==
+      Forwardings[FirstUseIdx + UseIdx];
+  }
+
+  /// \brief Compute and return the use operand latency of a given itinerary
+  /// class and operand index if the value is produced by an instruction of the
+  /// specified itinerary class and def operand index.
+  int getOperandLatency(unsigned DefClass, unsigned DefIdx,
+                        unsigned UseClass, unsigned UseIdx) const {
+    if (isEmpty())
+      return -1;
+
+    int DefCycle = getOperandCycle(DefClass, DefIdx);
+    if (DefCycle == -1)
+      return -1;
+
+    int UseCycle = getOperandCycle(UseClass, UseIdx);
+    if (UseCycle == -1)
+      return -1;
+
+    UseCycle = DefCycle - UseCycle + 1;
+    if (UseCycle > 0 &&
+        hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
+      // FIXME: This assumes one cycle benefit for every pipeline forwarding.
+      --UseCycle;
+    return UseCycle;
+  }
+
+  /// \brief Return the number of micro-ops that the given class decodes to.
+  /// Return -1 for classes that require dynamic lookup via TargetInstrInfo.
+  int getNumMicroOps(unsigned ItinClassIndx) const {
+    if (isEmpty())
+      return 1;
+    return Itineraries[ItinClassIndx].NumMicroOps;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCINSTRITINERARIES_H
diff --git a/linux-x64/clang/include/llvm/MC/MCLabel.h b/linux-x64/clang/include/llvm/MC/MCLabel.h
new file mode 100644
index 0000000..b6579fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCLabel.h
@@ -0,0 +1,57 @@
+//===- MCLabel.h - Machine Code Directional Local Labels --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCLabel class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCLABEL_H
+#define LLVM_MC_MCLABEL_H
+
+namespace llvm {
+
+class raw_ostream;
+
+/// \brief Instances of this class represent a label name in the MC file,
+/// and MCLabel are created and uniqued by the MCContext class.  MCLabel
+/// should only be constructed for valid instances in the object file.
+class MCLabel {
+  // \brief The instance number of this Directional Local Label.
+  unsigned Instance;
+
+private: // MCContext creates and uniques these.
+  friend class MCContext;
+
+  MCLabel(unsigned instance) : Instance(instance) {}
+
+public:
+  MCLabel(const MCLabel &) = delete;
+  MCLabel &operator=(const MCLabel &) = delete;
+
+  /// \brief Get the current instance of this Directional Local Label.
+  unsigned getInstance() const { return Instance; }
+
+  /// \brief Increment the current instance of this Directional Local Label.
+  unsigned incInstance() { return ++Instance; }
+
+  /// \brief Print the value to the stream \p OS.
+  void print(raw_ostream &OS) const;
+
+  /// \brief Print the value to stderr.
+  void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MCLabel &Label) {
+  Label.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCLABEL_H
diff --git a/linux-x64/clang/include/llvm/MC/MCLinkerOptimizationHint.h b/linux-x64/clang/include/llvm/MC/MCLinkerOptimizationHint.h
new file mode 100644
index 0000000..f0fd07f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCLinkerOptimizationHint.h
@@ -0,0 +1,187 @@
+//===- MCLinkerOptimizationHint.h - LOH interface ---------------*- C++ -*-===//
+//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some helpers classes to handle Linker Optimization Hint
+// (LOH).
+//
+// FIXME: LOH interface supports only MachO format at the moment.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
+#define LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+
+namespace llvm {
+
+class MachObjectWriter;
+class MCAsmLayout;
+class MCSymbol;
+
+/// Linker Optimization Hint Type.
+enum MCLOHType {
+  MCLOH_AdrpAdrp = 0x1u,      ///< Adrp xY, _v1@PAGE -> Adrp xY, _v2@PAGE.
+  MCLOH_AdrpLdr = 0x2u,       ///< Adrp _v@PAGE -> Ldr _v@PAGEOFF.
+  MCLOH_AdrpAddLdr = 0x3u,    ///< Adrp _v@PAGE -> Add _v@PAGEOFF -> Ldr.
+  MCLOH_AdrpLdrGotLdr = 0x4u, ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF -> Ldr.
+  MCLOH_AdrpAddStr = 0x5u,    ///< Adrp _v@PAGE -> Add _v@PAGEOFF -> Str.
+  MCLOH_AdrpLdrGotStr = 0x6u, ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF -> Str.
+  MCLOH_AdrpAdd = 0x7u,       ///< Adrp _v@PAGE -> Add _v@PAGEOFF.
+  MCLOH_AdrpLdrGot = 0x8u     ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF.
+};
+
+static inline StringRef MCLOHDirectiveName() {
+  return StringRef(".loh");
+}
+
+static inline bool isValidMCLOHType(unsigned Kind) {
+  return Kind >= MCLOH_AdrpAdrp && Kind <= MCLOH_AdrpLdrGot;
+}
+
+static inline int MCLOHNameToId(StringRef Name) {
+#define MCLOHCaseNameToId(Name)     .Case(#Name, MCLOH_ ## Name)
+  return StringSwitch<int>(Name)
+    MCLOHCaseNameToId(AdrpAdrp)
+    MCLOHCaseNameToId(AdrpLdr)
+    MCLOHCaseNameToId(AdrpAddLdr)
+    MCLOHCaseNameToId(AdrpLdrGotLdr)
+    MCLOHCaseNameToId(AdrpAddStr)
+    MCLOHCaseNameToId(AdrpLdrGotStr)
+    MCLOHCaseNameToId(AdrpAdd)
+    MCLOHCaseNameToId(AdrpLdrGot)
+    .Default(-1);
+}
+
+static inline StringRef MCLOHIdToName(MCLOHType Kind) {
+#define MCLOHCaseIdToName(Name)      case MCLOH_ ## Name: return StringRef(#Name);
+  switch (Kind) {
+    MCLOHCaseIdToName(AdrpAdrp);
+    MCLOHCaseIdToName(AdrpLdr);
+    MCLOHCaseIdToName(AdrpAddLdr);
+    MCLOHCaseIdToName(AdrpLdrGotLdr);
+    MCLOHCaseIdToName(AdrpAddStr);
+    MCLOHCaseIdToName(AdrpLdrGotStr);
+    MCLOHCaseIdToName(AdrpAdd);
+    MCLOHCaseIdToName(AdrpLdrGot);
+  }
+  return StringRef();
+}
+
+static inline int MCLOHIdToNbArgs(MCLOHType Kind) {
+  switch (Kind) {
+    // LOH with two arguments
+  case MCLOH_AdrpAdrp:
+  case MCLOH_AdrpLdr:
+  case MCLOH_AdrpAdd:
+  case MCLOH_AdrpLdrGot:
+    return 2;
+    // LOH with three arguments
+  case MCLOH_AdrpAddLdr:
+  case MCLOH_AdrpLdrGotLdr:
+  case MCLOH_AdrpAddStr:
+  case MCLOH_AdrpLdrGotStr:
+    return 3;
+  }
+  return -1;
+}
+
+/// Store Linker Optimization Hint information (LOH).
+class MCLOHDirective {
+  MCLOHType Kind;
+
+  /// Arguments of this directive. Order matters.
+  SmallVector<MCSymbol *, 3> Args;
+
+  /// Emit this directive in \p OutStream using the information available
+  /// in the given \p ObjWriter and \p Layout to get the address of the
+  /// arguments within the object file.
+  void emit_impl(raw_ostream &OutStream, const MachObjectWriter &ObjWriter,
+                 const MCAsmLayout &Layout) const;
+
+public:
+  using LOHArgs = SmallVectorImpl<MCSymbol *>;
+
+  MCLOHDirective(MCLOHType Kind, const LOHArgs &Args)
+      : Kind(Kind), Args(Args.begin(), Args.end()) {
+    assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
+  }
+
+  MCLOHType getKind() const { return Kind; }
+
+  const LOHArgs &getArgs() const { return Args; }
+
+  /// Emit this directive as:
+  /// <kind, numArgs, addr1, ..., addrN>
+  void emit(MachObjectWriter &ObjWriter, const MCAsmLayout &Layout) const;
+
+  /// Get the size in bytes of this directive if emitted in \p ObjWriter with
+  /// the given \p Layout.
+  uint64_t getEmitSize(const MachObjectWriter &ObjWriter,
+                       const MCAsmLayout &Layout) const;
+};
+
+class MCLOHContainer {
+  /// Keep track of the emit size of all the LOHs.
+  mutable uint64_t EmitSize = 0;
+
+  /// Keep track of all LOH directives.
+  SmallVector<MCLOHDirective, 32> Directives;
+
+public:
+  using LOHDirectives = SmallVectorImpl<MCLOHDirective>;
+
+  MCLOHContainer() = default;
+
+  /// Const accessor to the directives.
+  const LOHDirectives &getDirectives() const {
+    return Directives;
+  }
+
+  /// Add the directive of the given kind \p Kind with the given arguments
+  /// \p Args to the container.
+  void addDirective(MCLOHType Kind, const MCLOHDirective::LOHArgs &Args) {
+    Directives.push_back(MCLOHDirective(Kind, Args));
+  }
+
+  /// Get the size of the directives if emitted.
+  uint64_t getEmitSize(const MachObjectWriter &ObjWriter,
+                       const MCAsmLayout &Layout) const {
+    if (!EmitSize) {
+      for (const MCLOHDirective &D : Directives)
+        EmitSize += D.getEmitSize(ObjWriter, Layout);
+    }
+    return EmitSize;
+  }
+
+  /// Emit all Linker Optimization Hint in one big table.
+  /// Each line of the table is emitted by LOHDirective::emit.
+  void emit(MachObjectWriter &ObjWriter, const MCAsmLayout &Layout) const {
+    for (const MCLOHDirective &D : Directives)
+      D.emit(ObjWriter, Layout);
+  }
+
+  void reset() {
+    Directives.clear();
+    EmitSize = 0;
+  }
+};
+
+// Add types for specialized template using MCSymbol.
+using MCLOHArgs = MCLOHDirective::LOHArgs;
+using MCLOHDirectives = MCLOHContainer::LOHDirectives;
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
diff --git a/linux-x64/clang/include/llvm/MC/MCMachObjectWriter.h b/linux-x64/clang/include/llvm/MC/MCMachObjectWriter.h
new file mode 100644
index 0000000..594869f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCMachObjectWriter.h
@@ -0,0 +1,279 @@
+//===- llvm/MC/MCMachObjectWriter.h - Mach Object Writer --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCMACHOBJECTWRITER_H
+#define LLVM_MC_MCMACHOBJECTWRITER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class MachObjectWriter;
+
+class MCMachObjectTargetWriter {
+  const unsigned Is64Bit : 1;
+  const uint32_t CPUType;
+  const uint32_t CPUSubtype;
+  unsigned LocalDifference_RIT;
+
+protected:
+  MCMachObjectTargetWriter(bool Is64Bit_, uint32_t CPUType_,
+                           uint32_t CPUSubtype_);
+
+  void setLocalDifferenceRelocationType(unsigned Type) {
+    LocalDifference_RIT = Type;
+  }
+
+public:
+  virtual ~MCMachObjectTargetWriter();
+
+  /// \name Lifetime Management
+  /// @{
+
+  virtual void reset() {}
+
+  /// @}
+
+  /// \name Accessors
+  /// @{
+
+  bool is64Bit() const { return Is64Bit; }
+  uint32_t getCPUType() const { return CPUType; }
+  uint32_t getCPUSubtype() const { return CPUSubtype; }
+  unsigned getLocalDifferenceRelocationType() const {
+    return LocalDifference_RIT;
+  }
+
+  /// @}
+
+  /// \name API
+  /// @{
+
+  virtual void recordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
+                                const MCAsmLayout &Layout,
+                                const MCFragment *Fragment,
+                                const MCFixup &Fixup, MCValue Target,
+                                uint64_t &FixedValue) = 0;
+
+  /// @}
+};
+
+class MachObjectWriter : public MCObjectWriter {
+  /// Helper struct for containing some precomputed information on symbols.
+  struct MachSymbolData {
+    const MCSymbol *Symbol;
+    uint64_t StringIndex;
+    uint8_t SectionIndex;
+
+    // Support lexicographic sorting.
+    bool operator<(const MachSymbolData &RHS) const;
+  };
+
+  /// The target specific Mach-O writer instance.
+  std::unique_ptr<MCMachObjectTargetWriter> TargetObjectWriter;
+
+  /// \name Relocation Data
+  /// @{
+
+  struct RelAndSymbol {
+    const MCSymbol *Sym;
+    MachO::any_relocation_info MRE;
+    RelAndSymbol(const MCSymbol *Sym, const MachO::any_relocation_info &MRE)
+        : Sym(Sym), MRE(MRE) {}
+  };
+
+  DenseMap<const MCSection *, std::vector<RelAndSymbol>> Relocations;
+  DenseMap<const MCSection *, unsigned> IndirectSymBase;
+
+  SectionAddrMap SectionAddress;
+
+  /// @}
+  /// \name Symbol Table Data
+  /// @{
+
+  StringTableBuilder StringTable{StringTableBuilder::MachO};
+  std::vector<MachSymbolData> LocalSymbolData;
+  std::vector<MachSymbolData> ExternalSymbolData;
+  std::vector<MachSymbolData> UndefinedSymbolData;
+
+  /// @}
+
+  MachSymbolData *findSymbolData(const MCSymbol &Sym);
+
+public:
+  MachObjectWriter(std::unique_ptr<MCMachObjectTargetWriter> MOTW,
+                   raw_pwrite_stream &OS, bool IsLittleEndian)
+      : MCObjectWriter(OS, IsLittleEndian),
+        TargetObjectWriter(std::move(MOTW)) {}
+
+  const MCSymbol &findAliasedSymbol(const MCSymbol &Sym) const;
+
+  /// \name Lifetime management Methods
+  /// @{
+
+  void reset() override;
+
+  /// @}
+
+  /// \name Utility Methods
+  /// @{
+
+  bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);
+
+  SectionAddrMap &getSectionAddressMap() { return SectionAddress; }
+
+  uint64_t getSectionAddress(const MCSection *Sec) const {
+    return SectionAddress.lookup(Sec);
+  }
+  uint64_t getSymbolAddress(const MCSymbol &S, const MCAsmLayout &Layout) const;
+
+  uint64_t getFragmentAddress(const MCFragment *Fragment,
+                              const MCAsmLayout &Layout) const;
+
+  uint64_t getPaddingSize(const MCSection *SD, const MCAsmLayout &Layout) const;
+
+  bool doesSymbolRequireExternRelocation(const MCSymbol &S);
+
+  /// @}
+
+  /// \name Target Writer Proxy Accessors
+  /// @{
+
+  bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
+  bool isX86_64() const {
+    uint32_t CPUType = TargetObjectWriter->getCPUType();
+    return CPUType == MachO::CPU_TYPE_X86_64;
+  }
+
+  /// @}
+
+  void writeHeader(MachO::HeaderFileType Type, unsigned NumLoadCommands,
+                   unsigned LoadCommandsSize, bool SubsectionsViaSymbols);
+
+  /// Write a segment load command.
+  ///
+  /// \param NumSections The number of sections in this segment.
+  /// \param SectionDataSize The total size of the sections.
+  void writeSegmentLoadCommand(StringRef Name, unsigned NumSections,
+                               uint64_t VMAddr, uint64_t VMSize,
+                               uint64_t SectionDataStartOffset,
+                               uint64_t SectionDataSize, uint32_t MaxProt,
+                               uint32_t InitProt);
+
+  void writeSection(const MCAsmLayout &Layout, const MCSection &Sec,
+                    uint64_t VMAddr, uint64_t FileOffset, unsigned Flags,
+                    uint64_t RelocationsStart, unsigned NumRelocations);
+
+  void writeSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
+                              uint32_t StringTableOffset,
+                              uint32_t StringTableSize);
+
+  void writeDysymtabLoadCommand(
+      uint32_t FirstLocalSymbol, uint32_t NumLocalSymbols,
+      uint32_t FirstExternalSymbol, uint32_t NumExternalSymbols,
+      uint32_t FirstUndefinedSymbol, uint32_t NumUndefinedSymbols,
+      uint32_t IndirectSymbolOffset, uint32_t NumIndirectSymbols);
+
+  void writeNlist(MachSymbolData &MSD, const MCAsmLayout &Layout);
+
+  void writeLinkeditLoadCommand(uint32_t Type, uint32_t DataOffset,
+                                uint32_t DataSize);
+
+  void writeLinkerOptionsLoadCommand(const std::vector<std::string> &Options);
+
+  // FIXME: We really need to improve the relocation validation. Basically, we
+  // want to implement a separate computation which evaluates the relocation
+  // entry as the linker would, and verifies that the resultant fixup value is
+  // exactly what the encoder wanted. This will catch several classes of
+  // problems:
+  //
+  //  - Relocation entry bugs, the two algorithms are unlikely to have the same
+  //    exact bug.
+  //
+  //  - Relaxation issues, where we forget to relax something.
+  //
+  //  - Input errors, where something cannot be correctly encoded. 'as' allows
+  //    these through in many cases.
+
+  // Add a relocation to be output in the object file. At the time this is
+  // called, the symbol indexes are not know, so if the relocation refers
+  // to a symbol it should be passed as \p RelSymbol so that it can be updated
+  // afterwards. If the relocation doesn't refer to a symbol, nullptr should be
+  // used.
+  void addRelocation(const MCSymbol *RelSymbol, const MCSection *Sec,
+                     MachO::any_relocation_info &MRE) {
+    RelAndSymbol P(RelSymbol, MRE);
+    Relocations[Sec].push_back(P);
+  }
+
+  void recordScatteredRelocation(const MCAssembler &Asm,
+                                 const MCAsmLayout &Layout,
+                                 const MCFragment *Fragment,
+                                 const MCFixup &Fixup, MCValue Target,
+                                 unsigned Log2Size, uint64_t &FixedValue);
+
+  void recordTLVPRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+                            const MCFragment *Fragment, const MCFixup &Fixup,
+                            MCValue Target, uint64_t &FixedValue);
+
+  void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
+                        const MCFragment *Fragment, const MCFixup &Fixup,
+                        MCValue Target, uint64_t &FixedValue) override;
+
+  void bindIndirectSymbols(MCAssembler &Asm);
+
+  /// Compute the symbol table data.
+  void computeSymbolTable(MCAssembler &Asm,
+                          std::vector<MachSymbolData> &LocalSymbolData,
+                          std::vector<MachSymbolData> &ExternalSymbolData,
+                          std::vector<MachSymbolData> &UndefinedSymbolData);
+
+  void computeSectionAddresses(const MCAssembler &Asm,
+                               const MCAsmLayout &Layout);
+
+  void executePostLayoutBinding(MCAssembler &Asm,
+                                const MCAsmLayout &Layout) override;
+
+  bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+                                              const MCSymbol &A,
+                                              const MCSymbol &B,
+                                              bool InSet) const override;
+
+  bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+                                              const MCSymbol &SymA,
+                                              const MCFragment &FB, bool InSet,
+                                              bool IsPCRel) const override;
+
+  void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
+};
+
+/// Construct a new Mach-O writer instance.
+///
+/// This routine takes ownership of the target writer subclass.
+///
+/// \param MOTW - The target specific Mach-O writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+std::unique_ptr<MCObjectWriter>
+createMachObjectWriter(std::unique_ptr<MCMachObjectTargetWriter> MOTW,
+                       raw_pwrite_stream &OS, bool IsLittleEndian);
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCMACHOBJECTWRITER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCObjectFileInfo.h b/linux-x64/clang/include/llvm/MC/MCObjectFileInfo.h
new file mode 100644
index 0000000..c99f252
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCObjectFileInfo.h
@@ -0,0 +1,383 @@
+//===-- llvm/MC/MCObjectFileInfo.h - Object File Info -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes common object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTFILEINFO_H
+#define LLVM_MC_MCOBJECTFILEINFO_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+class MCContext;
+class MCSection;
+
+class MCObjectFileInfo {
+protected:
+  /// True if .comm supports alignment.  This is a hack for as long as we
+  /// support 10.4 Tiger, whose assembler doesn't support alignment on comm.
+  bool CommDirectiveSupportsAlignment;
+
+  /// True if target object file supports a weak_definition of constant 0 for an
+  /// omitted EH frame.
+  bool SupportsWeakOmittedEHFrame;
+
+  /// True if the target object file supports emitting a compact unwind section
+  /// without an associated EH frame section.
+  bool SupportsCompactUnwindWithoutEHFrame;
+
+  /// OmitDwarfIfHaveCompactUnwind - True if the target object file
+  /// supports having some functions with compact unwind and other with
+  /// dwarf unwind.
+  bool OmitDwarfIfHaveCompactUnwind;
+
+  /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values
+  /// for EH.
+  unsigned PersonalityEncoding;
+  unsigned LSDAEncoding;
+  unsigned FDECFIEncoding;
+  unsigned TTypeEncoding;
+
+  /// Compact unwind encoding indicating that we should emit only an EH frame.
+  unsigned CompactUnwindDwarfEHFrameOnly;
+
+  /// Section directive for standard text.
+  MCSection *TextSection;
+
+  /// Section directive for standard data.
+  MCSection *DataSection;
+
+  /// Section that is default initialized to zero.
+  MCSection *BSSSection;
+
+  /// Section that is readonly and can contain arbitrary initialized data.
+  /// Targets are not required to have a readonly section. If they don't,
+  /// various bits of code will fall back to using the data section for
+  /// constants.
+  MCSection *ReadOnlySection;
+
+  /// If exception handling is supported by the target, this is the section the
+  /// Language Specific Data Area information is emitted to.
+  MCSection *LSDASection;
+
+  /// If exception handling is supported by the target and the target can
+  /// support a compact representation of the CIE and FDE, this is the section
+  /// to emit them into.
+  MCSection *CompactUnwindSection;
+
+  // Dwarf sections for debug info.  If a target supports debug info, these must
+  // be set.
+  MCSection *DwarfAbbrevSection;
+  MCSection *DwarfInfoSection;
+  MCSection *DwarfLineSection;
+  MCSection *DwarfLineStrSection;
+  MCSection *DwarfFrameSection;
+  MCSection *DwarfPubTypesSection;
+  const MCSection *DwarfDebugInlineSection;
+  MCSection *DwarfStrSection;
+  MCSection *DwarfLocSection;
+  MCSection *DwarfARangesSection;
+  MCSection *DwarfRangesSection;
+  MCSection *DwarfMacinfoSection;
+  // The pubnames section is no longer generated by default.  The generation
+  // can be enabled by a compiler flag.
+  MCSection *DwarfPubNamesSection;
+
+  /// DWARF5 Experimental Debug Info Sections
+  /// DwarfAccelNamesSection, DwarfAccelObjCSection,
+  /// DwarfAccelNamespaceSection, DwarfAccelTypesSection -
+  /// If we use the DWARF accelerated hash tables then we want to emit these
+  /// sections.
+  MCSection *DwarfAccelNamesSection;
+  MCSection *DwarfAccelObjCSection;
+  MCSection *DwarfAccelNamespaceSection;
+  MCSection *DwarfAccelTypesSection;
+
+  // These are used for the Fission separate debug information files.
+  MCSection *DwarfInfoDWOSection;
+  MCSection *DwarfTypesDWOSection;
+  MCSection *DwarfAbbrevDWOSection;
+  MCSection *DwarfStrDWOSection;
+  MCSection *DwarfLineDWOSection;
+  MCSection *DwarfLocDWOSection;
+  MCSection *DwarfStrOffDWOSection;
+
+  /// The DWARF v5 string offset and address table sections.
+  MCSection *DwarfStrOffSection;
+  MCSection *DwarfAddrSection;
+
+  // These are for Fission DWP files.
+  MCSection *DwarfCUIndexSection;
+  MCSection *DwarfTUIndexSection;
+
+  /// Section for newer gnu pubnames.
+  MCSection *DwarfGnuPubNamesSection;
+  /// Section for newer gnu pubtypes.
+  MCSection *DwarfGnuPubTypesSection;
+
+  // Section for Swift AST
+  MCSection *DwarfSwiftASTSection;
+
+  MCSection *COFFDebugSymbolsSection;
+  MCSection *COFFDebugTypesSection;
+  MCSection *COFFGlobalTypeHashesSection;
+
+  /// Extra TLS Variable Data section.
+  ///
+  /// If the target needs to put additional information for a TLS variable,
+  /// it'll go here.
+  MCSection *TLSExtraDataSection;
+
+  /// Section directive for Thread Local data. ELF, MachO, COFF, and Wasm.
+  MCSection *TLSDataSection; // Defaults to ".tdata".
+
+  /// Section directive for Thread Local uninitialized data.
+  ///
+  /// Null if this target doesn't support a BSS section. ELF and MachO only.
+  MCSection *TLSBSSSection; // Defaults to ".tbss".
+
+  /// StackMap section.
+  MCSection *StackMapSection;
+
+  /// FaultMap section.
+  MCSection *FaultMapSection;
+
+  /// EH frame section.
+  ///
+  /// It is initialized on demand so it can be overwritten (with uniquing).
+  MCSection *EHFrameSection;
+
+  /// Section containing metadata on function stack sizes.
+  MCSection *StackSizesSection;
+
+  // ELF specific sections.
+  MCSection *DataRelROSection;
+  MCSection *MergeableConst4Section;
+  MCSection *MergeableConst8Section;
+  MCSection *MergeableConst16Section;
+  MCSection *MergeableConst32Section;
+
+  // MachO specific sections.
+
+  /// Section for thread local structure information.
+  ///
+  /// Contains the source code name of the variable, visibility and a pointer to
+  /// the initial value (.tdata or .tbss).
+  MCSection *TLSTLVSection; // Defaults to ".tlv".
+
+  /// Section for thread local data initialization functions.
+  const MCSection *TLSThreadInitSection; // Defaults to ".thread_init_func".
+
+  MCSection *CStringSection;
+  MCSection *UStringSection;
+  MCSection *TextCoalSection;
+  MCSection *ConstTextCoalSection;
+  MCSection *ConstDataSection;
+  MCSection *DataCoalSection;
+  MCSection *DataCommonSection;
+  MCSection *DataBSSSection;
+  MCSection *FourByteConstantSection;
+  MCSection *EightByteConstantSection;
+  MCSection *SixteenByteConstantSection;
+  MCSection *LazySymbolPointerSection;
+  MCSection *NonLazySymbolPointerSection;
+  MCSection *ThreadLocalPointerSection;
+
+  /// COFF specific sections.
+  MCSection *DrectveSection;
+  MCSection *PDataSection;
+  MCSection *XDataSection;
+  MCSection *SXDataSection;
+  MCSection *GFIDsSection;
+
+public:
+  void InitMCObjectFileInfo(const Triple &TT, bool PIC, MCContext &ctx,
+                            bool LargeCodeModel = false);
+
+  bool getSupportsWeakOmittedEHFrame() const {
+    return SupportsWeakOmittedEHFrame;
+  }
+  bool getSupportsCompactUnwindWithoutEHFrame() const {
+    return SupportsCompactUnwindWithoutEHFrame;
+  }
+  bool getOmitDwarfIfHaveCompactUnwind() const {
+    return OmitDwarfIfHaveCompactUnwind;
+  }
+
+  bool getCommDirectiveSupportsAlignment() const {
+    return CommDirectiveSupportsAlignment;
+  }
+
+  unsigned getPersonalityEncoding() const { return PersonalityEncoding; }
+  unsigned getLSDAEncoding() const { return LSDAEncoding; }
+  unsigned getFDEEncoding() const { return FDECFIEncoding; }
+  unsigned getTTypeEncoding() const { return TTypeEncoding; }
+
+  unsigned getCompactUnwindDwarfEHFrameOnly() const {
+    return CompactUnwindDwarfEHFrameOnly;
+  }
+
+  MCSection *getTextSection() const { return TextSection; }
+  MCSection *getDataSection() const { return DataSection; }
+  MCSection *getBSSSection() const { return BSSSection; }
+  MCSection *getReadOnlySection() const { return ReadOnlySection; }
+  MCSection *getLSDASection() const { return LSDASection; }
+  MCSection *getCompactUnwindSection() const { return CompactUnwindSection; }
+  MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
+  MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
+  MCSection *getDwarfLineSection() const { return DwarfLineSection; }
+  MCSection *getDwarfLineStrSection() const { return DwarfLineStrSection; }
+  MCSection *getDwarfFrameSection() const { return DwarfFrameSection; }
+  MCSection *getDwarfPubNamesSection() const { return DwarfPubNamesSection; }
+  MCSection *getDwarfPubTypesSection() const { return DwarfPubTypesSection; }
+  MCSection *getDwarfGnuPubNamesSection() const {
+    return DwarfGnuPubNamesSection;
+  }
+  MCSection *getDwarfGnuPubTypesSection() const {
+    return DwarfGnuPubTypesSection;
+  }
+  const MCSection *getDwarfDebugInlineSection() const {
+    return DwarfDebugInlineSection;
+  }
+  MCSection *getDwarfStrSection() const { return DwarfStrSection; }
+  MCSection *getDwarfLocSection() const { return DwarfLocSection; }
+  MCSection *getDwarfARangesSection() const { return DwarfARangesSection; }
+  MCSection *getDwarfRangesSection() const { return DwarfRangesSection; }
+  MCSection *getDwarfMacinfoSection() const { return DwarfMacinfoSection; }
+
+  // DWARF5 Experimental Debug Info Sections
+  MCSection *getDwarfAccelNamesSection() const {
+    return DwarfAccelNamesSection;
+  }
+  MCSection *getDwarfAccelObjCSection() const { return DwarfAccelObjCSection; }
+  MCSection *getDwarfAccelNamespaceSection() const {
+    return DwarfAccelNamespaceSection;
+  }
+  MCSection *getDwarfAccelTypesSection() const {
+    return DwarfAccelTypesSection;
+  }
+  MCSection *getDwarfInfoDWOSection() const { return DwarfInfoDWOSection; }
+  MCSection *getDwarfTypesSection(uint64_t Hash) const;
+  MCSection *getDwarfTypesDWOSection() const { return DwarfTypesDWOSection; }
+  MCSection *getDwarfAbbrevDWOSection() const { return DwarfAbbrevDWOSection; }
+  MCSection *getDwarfStrDWOSection() const { return DwarfStrDWOSection; }
+  MCSection *getDwarfLineDWOSection() const { return DwarfLineDWOSection; }
+  MCSection *getDwarfLocDWOSection() const { return DwarfLocDWOSection; }
+  MCSection *getDwarfStrOffDWOSection() const { return DwarfStrOffDWOSection; }
+  MCSection *getDwarfStrOffSection() const { return DwarfStrOffSection; }
+  MCSection *getDwarfAddrSection() const { return DwarfAddrSection; }
+  MCSection *getDwarfCUIndexSection() const { return DwarfCUIndexSection; }
+  MCSection *getDwarfTUIndexSection() const { return DwarfTUIndexSection; }
+  MCSection *getDwarfSwiftASTSection() const { return DwarfSwiftASTSection; }
+
+  MCSection *getCOFFDebugSymbolsSection() const {
+    return COFFDebugSymbolsSection;
+  }
+  MCSection *getCOFFDebugTypesSection() const {
+    return COFFDebugTypesSection;
+  }
+  MCSection *getCOFFGlobalTypeHashesSection() const {
+    return COFFGlobalTypeHashesSection;
+  }
+
+  MCSection *getTLSExtraDataSection() const { return TLSExtraDataSection; }
+  const MCSection *getTLSDataSection() const { return TLSDataSection; }
+  MCSection *getTLSBSSSection() const { return TLSBSSSection; }
+
+  MCSection *getStackMapSection() const { return StackMapSection; }
+  MCSection *getFaultMapSection() const { return FaultMapSection; }
+
+  MCSection *getStackSizesSection() const { return StackSizesSection; }
+
+  // ELF specific sections.
+  MCSection *getDataRelROSection() const { return DataRelROSection; }
+  const MCSection *getMergeableConst4Section() const {
+    return MergeableConst4Section;
+  }
+  const MCSection *getMergeableConst8Section() const {
+    return MergeableConst8Section;
+  }
+  const MCSection *getMergeableConst16Section() const {
+    return MergeableConst16Section;
+  }
+  const MCSection *getMergeableConst32Section() const {
+    return MergeableConst32Section;
+  }
+
+  // MachO specific sections.
+  const MCSection *getTLSTLVSection() const { return TLSTLVSection; }
+  const MCSection *getTLSThreadInitSection() const {
+    return TLSThreadInitSection;
+  }
+  const MCSection *getCStringSection() const { return CStringSection; }
+  const MCSection *getUStringSection() const { return UStringSection; }
+  MCSection *getTextCoalSection() const { return TextCoalSection; }
+  const MCSection *getConstTextCoalSection() const {
+    return ConstTextCoalSection;
+  }
+  const MCSection *getConstDataSection() const { return ConstDataSection; }
+  const MCSection *getDataCoalSection() const { return DataCoalSection; }
+  const MCSection *getDataCommonSection() const { return DataCommonSection; }
+  MCSection *getDataBSSSection() const { return DataBSSSection; }
+  const MCSection *getFourByteConstantSection() const {
+    return FourByteConstantSection;
+  }
+  const MCSection *getEightByteConstantSection() const {
+    return EightByteConstantSection;
+  }
+  const MCSection *getSixteenByteConstantSection() const {
+    return SixteenByteConstantSection;
+  }
+  MCSection *getLazySymbolPointerSection() const {
+    return LazySymbolPointerSection;
+  }
+  MCSection *getNonLazySymbolPointerSection() const {
+    return NonLazySymbolPointerSection;
+  }
+  MCSection *getThreadLocalPointerSection() const {
+    return ThreadLocalPointerSection;
+  }
+
+  // COFF specific sections.
+  MCSection *getDrectveSection() const { return DrectveSection; }
+  MCSection *getPDataSection() const { return PDataSection; }
+  MCSection *getXDataSection() const { return XDataSection; }
+  MCSection *getSXDataSection() const { return SXDataSection; }
+  MCSection *getGFIDsSection() const { return GFIDsSection; }
+
+  MCSection *getEHFrameSection() {
+    return EHFrameSection;
+  }
+
+  enum Environment { IsMachO, IsELF, IsCOFF, IsWasm };
+  Environment getObjectFileType() const { return Env; }
+
+  bool isPositionIndependent() const { return PositionIndependent; }
+
+private:
+  Environment Env;
+  bool PositionIndependent;
+  MCContext *Ctx;
+  Triple TT;
+
+  void initMachOMCObjectFileInfo(const Triple &T);
+  void initELFMCObjectFileInfo(const Triple &T, bool Large);
+  void initCOFFMCObjectFileInfo(const Triple &T);
+  void initWasmMCObjectFileInfo(const Triple &T);
+
+public:
+  const Triple &getTargetTriple() const { return TT; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCObjectStreamer.h b/linux-x64/clang/include/llvm/MC/MCObjectStreamer.h
new file mode 100644
index 0000000..8e9b4ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCObjectStreamer.h
@@ -0,0 +1,190 @@
+//===- MCObjectStreamer.h - MCStreamer Object File Interface ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTSTREAMER_H
+#define LLVM_MC_MCOBJECTSTREAMER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MCAssembler;
+class MCCodeEmitter;
+class MCSubtargetInfo;
+class MCExpr;
+class MCFragment;
+class MCDataFragment;
+class MCAsmBackend;
+class raw_ostream;
+class raw_pwrite_stream;
+
+/// \brief Streaming object file generation interface.
+///
+/// This class provides an implementation of the MCStreamer interface which is
+/// suitable for use with the assembler backend. Specific object file formats
+/// are expected to subclass this interface to implement directives specific
+/// to that file format or custom semantics expected by the object writer
+/// implementation.
+class MCObjectStreamer : public MCStreamer {
+  std::unique_ptr<MCObjectWriter> ObjectWriter;
+  std::unique_ptr<MCAsmBackend> TAB;
+  std::unique_ptr<MCCodeEmitter> Emitter;
+  std::unique_ptr<MCAssembler> Assembler;
+  MCSection::iterator CurInsertionPoint;
+  bool EmitEHFrame;
+  bool EmitDebugFrame;
+  SmallVector<MCSymbol *, 2> PendingLabels;
+
+  virtual void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo&) = 0;
+  void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
+  void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
+  MCSymbol *EmitCFILabel() override;
+  void EmitInstructionImpl(const MCInst &Inst, const MCSubtargetInfo &STI);
+
+protected:
+  MCObjectStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
+                   raw_pwrite_stream &OS,
+                   std::unique_ptr<MCCodeEmitter> Emitter);
+  ~MCObjectStreamer();
+
+public:
+  /// state management
+  void reset() override;
+
+  /// Object streamers require the integrated assembler.
+  bool isIntegratedAssemblerRequired() const override { return true; }
+
+  void EmitFrames(MCAsmBackend *MAB);
+  void EmitCFISections(bool EH, bool Debug) override;
+
+  MCFragment *getCurrentFragment() const;
+
+  void insert(MCFragment *F) {
+    flushPendingLabels(F);
+    MCSection *CurSection = getCurrentSectionOnly();
+    CurSection->getFragmentList().insert(CurInsertionPoint, F);
+    F->setParent(CurSection);
+  }
+
+  /// Get a data fragment to write into, creating a new one if the current
+  /// fragment is not a data fragment.
+  MCDataFragment *getOrCreateDataFragment();
+  MCPaddingFragment *getOrCreatePaddingFragment();
+
+protected:
+  bool changeSectionImpl(MCSection *Section, const MCExpr *Subsection);
+
+  /// If any labels have been emitted but not assigned fragments, ensure that
+  /// they get assigned, either to F if possible or to a new data fragment.
+  /// Optionally, it is also possible to provide an offset \p FOffset, which
+  /// will be used as a symbol offset within the fragment.
+  void flushPendingLabels(MCFragment *F, uint64_t FOffset = 0);
+
+public:
+  void visitUsedSymbol(const MCSymbol &Sym) override;
+
+  MCAssembler &getAssembler() { return *Assembler; }
+
+  /// \name MCStreamer Interface
+  /// @{
+
+  void EmitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
+  virtual void EmitLabel(MCSymbol *Symbol, SMLoc Loc, MCFragment *F);
+  void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
+  void EmitValueImpl(const MCExpr *Value, unsigned Size,
+                     SMLoc Loc = SMLoc()) override;
+  void EmitULEB128Value(const MCExpr *Value) override;
+  void EmitSLEB128Value(const MCExpr *Value) override;
+  void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
+  void ChangeSection(MCSection *Section, const MCExpr *Subsection) override;
+  void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+                       bool = false) override;
+
+  /// \brief Emit an instruction to a special fragment, because this instruction
+  /// can change its size during relaxation.
+  virtual void EmitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &);
+
+  void EmitBundleAlignMode(unsigned AlignPow2) override;
+  void EmitBundleLock(bool AlignToEnd) override;
+  void EmitBundleUnlock() override;
+  void EmitBytes(StringRef Data) override;
+  void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+                            unsigned ValueSize = 1,
+                            unsigned MaxBytesToEmit = 0) override;
+  void EmitCodeAlignment(unsigned ByteAlignment,
+                         unsigned MaxBytesToEmit = 0) override;
+  void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
+                         SMLoc Loc) override;
+  void
+  EmitCodePaddingBasicBlockStart(const MCCodePaddingContext &Context) override;
+  void
+  EmitCodePaddingBasicBlockEnd(const MCCodePaddingContext &Context) override;
+  void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+                             unsigned Column, unsigned Flags,
+                             unsigned Isa, unsigned Discriminator,
+                             StringRef FileName) override;
+  void EmitDwarfAdvanceLineAddr(int64_t LineDelta, const MCSymbol *LastLabel,
+                                const MCSymbol *Label,
+                                unsigned PointerSize);
+  void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
+                                 const MCSymbol *Label);
+  void EmitCVLocDirective(unsigned FunctionId, unsigned FileNo, unsigned Line,
+                          unsigned Column, bool PrologueEnd, bool IsStmt,
+                          StringRef FileName, SMLoc Loc) override;
+  void EmitCVLinetableDirective(unsigned FunctionId, const MCSymbol *Begin,
+                                const MCSymbol *End) override;
+  void EmitCVInlineLinetableDirective(unsigned PrimaryFunctionId,
+                                      unsigned SourceFileId,
+                                      unsigned SourceLineNum,
+                                      const MCSymbol *FnStartSym,
+                                      const MCSymbol *FnEndSym) override;
+  void EmitCVDefRangeDirective(
+      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
+      StringRef FixedSizePortion) override;
+  void EmitCVStringTableDirective() override;
+  void EmitCVFileChecksumsDirective() override;
+  void EmitCVFileChecksumOffsetDirective(unsigned FileNo) override;
+  void EmitDTPRel32Value(const MCExpr *Value) override;
+  void EmitDTPRel64Value(const MCExpr *Value) override;
+  void EmitTPRel32Value(const MCExpr *Value) override;
+  void EmitTPRel64Value(const MCExpr *Value) override;
+  void EmitGPRel32Value(const MCExpr *Value) override;
+  void EmitGPRel64Value(const MCExpr *Value) override;
+  bool EmitRelocDirective(const MCExpr &Offset, StringRef Name,
+                          const MCExpr *Expr, SMLoc Loc) override;
+  using MCStreamer::emitFill;
+  void emitFill(const MCExpr &NumBytes, uint64_t FillValue,
+                SMLoc Loc = SMLoc()) override;
+  void emitFill(const MCExpr &NumValues, int64_t Size, int64_t Expr,
+                SMLoc Loc = SMLoc()) override;
+  void EmitFileDirective(StringRef Filename) override;
+
+  void FinishImpl() override;
+
+  /// Emit the absolute difference between two symbols if possible.
+  ///
+  /// Emit the absolute difference between \c Hi and \c Lo, as long as we can
+  /// compute it.  Currently, that requires that both symbols are in the same
+  /// data fragment.  Otherwise, do nothing and return \c false.
+  ///
+  /// \pre Offset of \c Hi is greater than the offset \c Lo.
+  void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
+                              unsigned Size) override;
+
+  void emitAbsoluteSymbolDiffAsULEB128(const MCSymbol *Hi,
+                                       const MCSymbol *Lo) override;
+
+  bool mayHaveInstructions(MCSection &Sec) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCObjectWriter.h b/linux-x64/clang/include/llvm/MC/MCObjectWriter.h
new file mode 100644
index 0000000..cd90690
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCObjectWriter.h
@@ -0,0 +1,200 @@
+//===- llvm/MC/MCObjectWriter.h - Object File Writer Interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTWRITER_H
+#define LLVM_MC_MCOBJECTWRITER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+
+namespace llvm {
+
+class MCAsmLayout;
+class MCAssembler;
+class MCFixup;
+class MCFragment;
+class MCSymbol;
+class MCSymbolRefExpr;
+class MCValue;
+
+/// Defines the object file and target independent interfaces used by the
+/// assembler backend to write native file format object files.
+///
+/// The object writer contains a few callbacks used by the assembler to allow
+/// the object writer to modify the assembler data structures at appropriate
+/// points. Once assembly is complete, the object writer is given the
+/// MCAssembler instance, which contains all the symbol and section data which
+/// should be emitted as part of writeObject().
+///
+/// The object writer also contains a number of helper methods for writing
+/// binary data to the output stream.
+class MCObjectWriter {
+  raw_pwrite_stream *OS;
+
+protected:
+  unsigned IsLittleEndian : 1;
+
+  // Can only create subclasses.
+  MCObjectWriter(raw_pwrite_stream &OS, bool IsLittleEndian)
+      : OS(&OS), IsLittleEndian(IsLittleEndian) {}
+
+  unsigned getInitialOffset() {
+    return OS->tell();
+  }
+
+public:
+  MCObjectWriter(const MCObjectWriter &) = delete;
+  MCObjectWriter &operator=(const MCObjectWriter &) = delete;
+  virtual ~MCObjectWriter();
+
+  /// lifetime management
+  virtual void reset() {}
+
+  bool isLittleEndian() const { return IsLittleEndian; }
+
+  raw_pwrite_stream &getStream() { return *OS; }
+  void setStream(raw_pwrite_stream &NewOS) { OS = &NewOS; }
+
+  /// \name High-Level API
+  /// @{
+
+  /// Perform any late binding of symbols (for example, to assign symbol
+  /// indices for use when generating relocations).
+  ///
+  /// This routine is called by the assembler after layout and relaxation is
+  /// complete.
+  virtual void executePostLayoutBinding(MCAssembler &Asm,
+                                        const MCAsmLayout &Layout) = 0;
+
+  /// Record a relocation entry.
+  ///
+  /// This routine is called by the assembler after layout and relaxation, and
+  /// post layout binding. The implementation is responsible for storing
+  /// information about the relocation so that it can be emitted during
+  /// writeObject().
+  virtual void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
+                                const MCFragment *Fragment,
+                                const MCFixup &Fixup, MCValue Target,
+                                uint64_t &FixedValue) = 0;
+
+  /// Check whether the difference (A - B) between two symbol references is
+  /// fully resolved.
+  ///
+  /// Clients are not required to answer precisely and may conservatively return
+  /// false, even when a difference is fully resolved.
+  bool isSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
+                                          const MCSymbolRefExpr *A,
+                                          const MCSymbolRefExpr *B,
+                                          bool InSet) const;
+
+  virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+                                                      const MCSymbol &A,
+                                                      const MCSymbol &B,
+                                                      bool InSet) const;
+
+  virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+                                                      const MCSymbol &SymA,
+                                                      const MCFragment &FB,
+                                                      bool InSet,
+                                                      bool IsPCRel) const;
+
+  /// Write the object file.
+  ///
+  /// This routine is called by the assembler after layout and relaxation is
+  /// complete, fixups have been evaluated and applied, and relocations
+  /// generated.
+  virtual void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) = 0;
+
+  /// @}
+  /// \name Binary Output
+  /// @{
+
+  void write8(uint8_t Value) { *OS << char(Value); }
+
+  void writeLE16(uint16_t Value) {
+    support::endian::Writer<support::little>(*OS).write(Value);
+  }
+
+  void writeLE32(uint32_t Value) {
+    support::endian::Writer<support::little>(*OS).write(Value);
+  }
+
+  void writeLE64(uint64_t Value) {
+    support::endian::Writer<support::little>(*OS).write(Value);
+  }
+
+  void writeBE16(uint16_t Value) {
+    support::endian::Writer<support::big>(*OS).write(Value);
+  }
+
+  void writeBE32(uint32_t Value) {
+    support::endian::Writer<support::big>(*OS).write(Value);
+  }
+
+  void writeBE64(uint64_t Value) {
+    support::endian::Writer<support::big>(*OS).write(Value);
+  }
+
+  void write16(uint16_t Value) {
+    if (IsLittleEndian)
+      writeLE16(Value);
+    else
+      writeBE16(Value);
+  }
+
+  void write32(uint32_t Value) {
+    if (IsLittleEndian)
+      writeLE32(Value);
+    else
+      writeBE32(Value);
+  }
+
+  void write64(uint64_t Value) {
+    if (IsLittleEndian)
+      writeLE64(Value);
+    else
+      writeBE64(Value);
+  }
+
+  void WriteZeros(unsigned N) {
+    const char Zeros[16] = {0};
+
+    for (unsigned i = 0, e = N / 16; i != e; ++i)
+      *OS << StringRef(Zeros, 16);
+
+    *OS << StringRef(Zeros, N % 16);
+  }
+
+  void writeBytes(const SmallVectorImpl<char> &ByteVec,
+                  unsigned ZeroFillSize = 0) {
+    writeBytes(StringRef(ByteVec.data(), ByteVec.size()), ZeroFillSize);
+  }
+
+  void writeBytes(StringRef Str, unsigned ZeroFillSize = 0) {
+    // TODO: this version may need to go away once all fragment contents are
+    // converted to SmallVector<char, N>
+    assert(
+        (ZeroFillSize == 0 || Str.size() <= ZeroFillSize) &&
+        "data size greater than fill size, unexpected large write will occur");
+    *OS << Str;
+    if (ZeroFillSize)
+      WriteZeros(ZeroFillSize - Str.size());
+  }
+
+  /// @}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCOBJECTWRITER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/AsmCond.h b/linux-x64/clang/include/llvm/MC/MCParser/AsmCond.h
new file mode 100644
index 0000000..8e7bfc5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/AsmCond.h
@@ -0,0 +1,40 @@
+//===- AsmCond.h - Assembly file conditional assembly  ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_ASMCOND_H
+#define LLVM_MC_MCPARSER_ASMCOND_H
+
+namespace llvm {
+
+/// AsmCond - Class to support conditional assembly
+///
+/// The conditional assembly feature (.if, .else, .elseif and .endif) is
+/// implemented with AsmCond that tells us what we are in the middle of 
+/// processing.  Ignore can be either true or false.  When true we are ignoring
+/// the block of code in the middle of a conditional.
+
+class AsmCond {
+public:
+  enum ConditionalAssemblyType {
+    NoCond,     // no conditional is being processed
+    IfCond,     // inside if conditional
+    ElseIfCond, // inside elseif conditional
+    ElseCond    // inside else conditional
+  };
+
+  ConditionalAssemblyType TheCond = NoCond;
+  bool CondMet = false;
+  bool Ignore = false;
+
+  AsmCond() = default;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_ASMCOND_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/AsmLexer.h b/linux-x64/clang/include/llvm/MC/MCParser/AsmLexer.h
new file mode 100644
index 0000000..207183a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/AsmLexer.h
@@ -0,0 +1,76 @@
+//===- AsmLexer.h - Lexer for Assembly Files --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class declares the lexer for assembly files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_ASMLEXER_H
+#define LLVM_MC_MCPARSER_ASMLEXER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include <string>
+
+namespace llvm {
+
+class MCAsmInfo;
+
+/// AsmLexer - Lexer class for assembly files.
+class AsmLexer : public MCAsmLexer {
+  const MCAsmInfo &MAI;
+
+  const char *CurPtr = nullptr;
+  StringRef CurBuf;
+  bool IsAtStartOfLine = true;
+  bool IsAtStartOfStatement = true;
+  bool IsParsingMSInlineAsm = false;
+  bool IsPeeking = false;
+
+protected:
+  /// LexToken - Read the next token and return its code.
+  AsmToken LexToken() override;
+
+public:
+  AsmLexer(const MCAsmInfo &MAI);
+  AsmLexer(const AsmLexer &) = delete;
+  AsmLexer &operator=(const AsmLexer &) = delete;
+  ~AsmLexer() override;
+
+  void setBuffer(StringRef Buf, const char *ptr = nullptr);
+  void setParsingMSInlineAsm(bool V) { IsParsingMSInlineAsm = V; }
+
+  StringRef LexUntilEndOfStatement() override;
+
+  size_t peekTokens(MutableArrayRef<AsmToken> Buf,
+                    bool ShouldSkipSpace = true) override;
+
+  const MCAsmInfo &getMAI() const { return MAI; }
+
+private:
+  bool isAtStartOfComment(const char *Ptr);
+  bool isAtStatementSeparator(const char *Ptr);
+  int getNextChar();
+  AsmToken ReturnError(const char *Loc, const std::string &Msg);
+
+  AsmToken LexIdentifier();
+  AsmToken LexSlash();
+  AsmToken LexLineComment();
+  AsmToken LexDigit();
+  AsmToken LexSingleQuote();
+  AsmToken LexQuote();
+  AsmToken LexFloatLiteral();
+  AsmToken LexHexFloatLiteral(bool NoIntDigits);
+
+  StringRef LexUntilEndOfLine();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_ASMLEXER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/MCAsmLexer.h b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmLexer.h
new file mode 100644
index 0000000..10550b3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -0,0 +1,162 @@
+//===- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
+#define LLVM_MC_MCPARSER_MCASMLEXER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCAsmMacro.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+/// A callback class which is notified of each comment in an assembly file as
+/// it is lexed.
+class AsmCommentConsumer {
+public:
+  virtual ~AsmCommentConsumer() = default;
+
+  /// Callback function for when a comment is lexed. Loc is the start of the
+  /// comment text (excluding the comment-start marker). CommentText is the text
+  /// of the comment, excluding the comment start and end markers, and the
+  /// newline for single-line comments.
+  virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
+};
+
+
+/// Generic assembler lexer interface, for use by target specific assembly
+/// lexers.
+class MCAsmLexer {
+  /// The current token, stored in the base class for faster access.
+  SmallVector<AsmToken, 1> CurTok;
+
+  /// The location and description of the current error
+  SMLoc ErrLoc;
+  std::string Err;
+
+protected: // Can only create subclasses.
+  const char *TokStart = nullptr;
+  bool SkipSpace = true;
+  bool AllowAtInIdentifier;
+  bool IsAtStartOfStatement = true;
+  AsmCommentConsumer *CommentConsumer = nullptr;
+
+  bool AltMacroMode;
+  MCAsmLexer();
+
+  virtual AsmToken LexToken() = 0;
+
+  void SetError(SMLoc errLoc, const std::string &err) {
+    ErrLoc = errLoc;
+    Err = err;
+  }
+
+public:
+  MCAsmLexer(const MCAsmLexer &) = delete;
+  MCAsmLexer &operator=(const MCAsmLexer &) = delete;
+  virtual ~MCAsmLexer();
+
+  bool IsaAltMacroMode() {
+    return AltMacroMode;
+  }
+
+  void SetAltMacroMode(bool AltMacroSet) {
+    AltMacroMode = AltMacroSet;
+  }
+
+  /// Consume the next token from the input stream and return it.
+  ///
+  /// The lexer will continuosly return the end-of-file token once the end of
+  /// the main input file has been reached.
+  const AsmToken &Lex() {
+    assert(!CurTok.empty());
+    // Mark if we parsing out a EndOfStatement.
+    IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
+    CurTok.erase(CurTok.begin());
+    // LexToken may generate multiple tokens via UnLex but will always return
+    // the first one. Place returned value at head of CurTok vector.
+    if (CurTok.empty()) {
+      AsmToken T = LexToken();
+      CurTok.insert(CurTok.begin(), T);
+    }
+    return CurTok.front();
+  }
+
+  void UnLex(AsmToken const &Token) {
+    IsAtStartOfStatement = false;
+    CurTok.insert(CurTok.begin(), Token);
+  }
+
+  bool isAtStartOfStatement() { return IsAtStartOfStatement; }
+
+  virtual StringRef LexUntilEndOfStatement() = 0;
+
+  /// Get the current source location.
+  SMLoc getLoc() const;
+
+  /// Get the current (last) lexed token.
+  const AsmToken &getTok() const {
+    return CurTok[0];
+  }
+
+  /// Look ahead at the next token to be lexed.
+  const AsmToken peekTok(bool ShouldSkipSpace = true) {
+    AsmToken Tok;
+
+    MutableArrayRef<AsmToken> Buf(Tok);
+    size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);
+
+    assert(ReadCount == 1);
+    (void)ReadCount;
+
+    return Tok;
+  }
+
+  /// Look ahead an arbitrary number of tokens.
+  virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
+                            bool ShouldSkipSpace = true) = 0;
+
+  /// Get the current error location
+  SMLoc getErrLoc() {
+    return ErrLoc;
+  }
+
+  /// Get the current error string
+  const std::string &getErr() {
+    return Err;
+  }
+
+  /// Get the kind of current token.
+  AsmToken::TokenKind getKind() const { return getTok().getKind(); }
+
+  /// Check if the current token has kind \p K.
+  bool is(AsmToken::TokenKind K) const { return getTok().is(K); }
+
+  /// Check if the current token has kind \p K.
+  bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }
+
+  /// Set whether spaces should be ignored by the lexer
+  void setSkipSpace(bool val) { SkipSpace = val; }
+
+  bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
+  void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
+
+  void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
+    this->CommentConsumer = CommentConsumer;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_MCASMLEXER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParser.h b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParser.h
new file mode 100644
index 0000000..0f79c47
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParser.h
@@ -0,0 +1,313 @@
+//===- llvm/MC/MCAsmParser.h - Abstract Asm Parser Interface ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMPARSER_H
+#define LLVM_MC_MCPARSER_MCASMPARSER_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/Support/SMLoc.h"
+#include <cstdint>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class MCAsmInfo;
+class MCAsmParserExtension;
+class MCContext;
+class MCExpr;
+class MCInstPrinter;
+class MCInstrInfo;
+class MCStreamer;
+class MCTargetAsmParser;
+class SourceMgr;
+
+struct InlineAsmIdentifierInfo {
+  enum IdKind {
+    IK_Invalid,  // Initial state. Unexpected after a successful parsing.
+    IK_Label,    // Function/Label reference.
+    IK_EnumVal,  // Value of enumeration type.
+    IK_Var       // Variable.
+  };
+  // Represents an Enum value
+  struct EnumIdentifier {
+    int64_t EnumVal;
+  };
+  // Represents a label/function reference
+  struct LabelIdentifier {
+    void *Decl;
+  };
+  // Represents a variable
+  struct VariableIdentifier {
+    void *Decl;
+    bool IsGlobalLV;
+    unsigned Length;
+    unsigned Size;
+    unsigned Type;
+  };
+  // An InlineAsm identifier can only be one of those
+  union {
+    EnumIdentifier Enum;
+    LabelIdentifier Label;
+    VariableIdentifier Var;
+  };
+  bool isKind(IdKind kind) const { return Kind == kind; }
+  // Initializers
+  void setEnum(int64_t enumVal) {
+    assert(isKind(IK_Invalid) && "should be initialized only once");
+    Kind = IK_EnumVal;
+    Enum.EnumVal = enumVal;
+  }
+  void setLabel(void *decl) {
+    assert(isKind(IK_Invalid) && "should be initialized only once");
+    Kind = IK_Label;
+    Label.Decl = decl;
+  }
+  void setVar(void *decl, bool isGlobalLV, unsigned size, unsigned type) {
+    assert(isKind(IK_Invalid) && "should be initialized only once");
+    Kind = IK_Var;
+    Var.Decl = decl;
+    Var.IsGlobalLV = isGlobalLV;
+    Var.Size = size;
+    Var.Type = type;
+    Var.Length = size / type;
+  }
+  InlineAsmIdentifierInfo() : Kind(IK_Invalid) {}
+
+private:
+  // Discriminate using the current kind.
+  IdKind Kind;
+};
+
+/// \brief Generic Sema callback for assembly parser.
+class MCAsmParserSemaCallback {
+public:
+  virtual ~MCAsmParserSemaCallback();
+
+  virtual void LookupInlineAsmIdentifier(StringRef &LineBuf,
+                                         InlineAsmIdentifierInfo &Info,
+                                         bool IsUnevaluatedContext) = 0;
+  virtual StringRef LookupInlineAsmLabel(StringRef Identifier, SourceMgr &SM,
+                                         SMLoc Location, bool Create) = 0;
+  virtual bool LookupInlineAsmField(StringRef Base, StringRef Member,
+                                    unsigned &Offset) = 0;
+};
+
+/// \brief Generic assembler parser interface, for use by target specific
+/// assembly parsers.
+class MCAsmParser {
+public:
+  using DirectiveHandler = bool (*)(MCAsmParserExtension*, StringRef, SMLoc);
+  using ExtensionDirectiveHandler =
+      std::pair<MCAsmParserExtension*, DirectiveHandler>;
+
+  struct MCPendingError {
+    SMLoc Loc;
+    SmallString<64> Msg;
+    SMRange Range;
+  };
+
+private:
+  MCTargetAsmParser *TargetParser = nullptr;
+
+  unsigned ShowParsedOperands : 1;
+
+protected: // Can only create subclasses.
+  MCAsmParser();
+
+  /// Flag tracking whether any errors have been encountered.
+  bool HadError = false;
+  /// Enable print [latency:throughput] in output file.
+  bool EnablePrintSchedInfo = false;
+
+  SmallVector<MCPendingError, 1> PendingErrors;
+
+public:
+  MCAsmParser(const MCAsmParser &) = delete;
+  MCAsmParser &operator=(const MCAsmParser &) = delete;
+  virtual ~MCAsmParser();
+
+  virtual void addDirectiveHandler(StringRef Directive,
+                                   ExtensionDirectiveHandler Handler) = 0;
+
+  virtual void addAliasForDirective(StringRef Directive, StringRef Alias) = 0;
+
+  virtual SourceMgr &getSourceManager() = 0;
+
+  virtual MCAsmLexer &getLexer() = 0;
+  const MCAsmLexer &getLexer() const {
+    return const_cast<MCAsmParser*>(this)->getLexer();
+  }
+
+  virtual MCContext &getContext() = 0;
+
+  /// \brief Return the output streamer for the assembler.
+  virtual MCStreamer &getStreamer() = 0;
+
+  MCTargetAsmParser &getTargetParser() const { return *TargetParser; }
+  void setTargetParser(MCTargetAsmParser &P);
+
+  virtual unsigned getAssemblerDialect() { return 0;}
+  virtual void setAssemblerDialect(unsigned i) { }
+
+  bool getShowParsedOperands() const { return ShowParsedOperands; }
+  void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; }
+
+  void setEnablePrintSchedInfo(bool Value) { EnablePrintSchedInfo = Value; }
+  bool shouldPrintSchedInfo() { return EnablePrintSchedInfo; }
+
+  /// \brief Run the parser on the input source buffer.
+  virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;
+
+  virtual void setParsingInlineAsm(bool V) = 0;
+  virtual bool isParsingInlineAsm() = 0;
+
+  /// \brief Parse MS-style inline assembly.
+  virtual bool parseMSInlineAsm(
+      void *AsmLoc, std::string &AsmString, unsigned &NumOutputs,
+      unsigned &NumInputs, SmallVectorImpl<std::pair<void *, bool>> &OpDecls,
+      SmallVectorImpl<std::string> &Constraints,
+      SmallVectorImpl<std::string> &Clobbers, const MCInstrInfo *MII,
+      const MCInstPrinter *IP, MCAsmParserSemaCallback &SI) = 0;
+
+  /// \brief Emit a note at the location \p L, with the message \p Msg.
+  virtual void Note(SMLoc L, const Twine &Msg, SMRange Range = None) = 0;
+
+  /// \brief Emit a warning at the location \p L, with the message \p Msg.
+  ///
+  /// \return The return value is true, if warnings are fatal.
+  virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) = 0;
+
+  /// \brief Return an error at the location \p L, with the message \p Msg. This
+  /// may be modified before being emitted.
+  ///
+  /// \return The return value is always true, as an idiomatic convenience to
+  /// clients.
+  bool Error(SMLoc L, const Twine &Msg, SMRange Range = None);
+
+  /// \brief Emit an error at the location \p L, with the message \p Msg.
+  ///
+  /// \return The return value is always true, as an idiomatic convenience to
+  /// clients.
+  virtual bool printError(SMLoc L, const Twine &Msg, SMRange Range = None) = 0;
+
+  bool hasPendingError() { return !PendingErrors.empty(); }
+
+  bool printPendingErrors() {
+    bool rv = !PendingErrors.empty();
+    for (auto Err : PendingErrors) {
+      printError(Err.Loc, Twine(Err.Msg), Err.Range);
+    }
+    PendingErrors.clear();
+    return rv;
+  }
+
+  bool addErrorSuffix(const Twine &Suffix);
+
+  /// \brief Get the next AsmToken in the stream, possibly handling file
+  /// inclusion first.
+  virtual const AsmToken &Lex() = 0;
+
+  /// \brief Get the current AsmToken from the stream.
+  const AsmToken &getTok() const;
+
+  /// \brief Report an error at the current lexer location.
+  bool TokError(const Twine &Msg, SMRange Range = None);
+
+  bool parseTokenLoc(SMLoc &Loc);
+  bool parseToken(AsmToken::TokenKind T, const Twine &Msg = "unexpected token");
+  /// \brief Attempt to parse and consume token, returning true on
+  /// success.
+  bool parseOptionalToken(AsmToken::TokenKind T);
+
+  bool parseEOL(const Twine &ErrMsg);
+
+  bool parseMany(function_ref<bool()> parseOne, bool hasComma = true);
+
+  bool parseIntToken(int64_t &V, const Twine &ErrMsg);
+
+  bool check(bool P, const Twine &Msg);
+  bool check(bool P, SMLoc Loc, const Twine &Msg);
+
+  /// \brief Parse an identifier or string (as a quoted identifier) and set \p
+  /// Res to the identifier contents.
+  virtual bool parseIdentifier(StringRef &Res) = 0;
+
+  /// \brief Parse up to the end of statement and return the contents from the
+  /// current token until the end of the statement; the current token on exit
+  /// will be either the EndOfStatement or EOF.
+  virtual StringRef parseStringToEndOfStatement() = 0;
+
+  /// \brief Parse the current token as a string which may include escaped
+  /// characters and return the string contents.
+  virtual bool parseEscapedString(std::string &Data) = 0;
+
+  /// \brief Skip to the end of the current statement, for error recovery.
+  virtual void eatToEndOfStatement() = 0;
+
+  /// \brief Parse an arbitrary expression.
+  ///
+  /// \param Res - The value of the expression. The result is undefined
+  /// on error.
+  /// \return - False on success.
+  virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
+  bool parseExpression(const MCExpr *&Res);
+
+  /// \brief Parse a primary expression.
+  ///
+  /// \param Res - The value of the expression. The result is undefined
+  /// on error.
+  /// \return - False on success.
+  virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) = 0;
+
+  /// \brief Parse an arbitrary expression, assuming that an initial '(' has
+  /// already been consumed.
+  ///
+  /// \param Res - The value of the expression. The result is undefined
+  /// on error.
+  /// \return - False on success.
+  virtual bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
+
+  /// \brief Parse an expression which must evaluate to an absolute value.
+  ///
+  /// \param Res - The value of the absolute expression. The result is undefined
+  /// on error.
+  /// \return - False on success.
+  virtual bool parseAbsoluteExpression(int64_t &Res) = 0;
+
+  /// \brief Ensure that we have a valid section set in the streamer. Otherwise,
+  /// report an error and switch to .text.
+  /// \return - False on success.
+  virtual bool checkForValidSection() = 0;
+
+  /// \brief Parse an arbitrary expression of a specified parenthesis depth,
+  /// assuming that the initial '(' characters have already been consumed.
+  ///
+  /// \param ParenDepth - Specifies how many trailing expressions outside the
+  /// current parentheses we have to parse.
+  /// \param Res - The value of the expression. The result is undefined
+  /// on error.
+  /// \return - False on success.
+  virtual bool parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
+                                     SMLoc &EndLoc) = 0;
+};
+
+/// \brief Create an MCAsmParser instance.
+MCAsmParser *createMCAsmParser(SourceMgr &, MCContext &, MCStreamer &,
+                               const MCAsmInfo &, unsigned CB = 0);
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_MCASMPARSER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParserExtension.h b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParserExtension.h
new file mode 100644
index 0000000..ffb8d7a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -0,0 +1,121 @@
+//===- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
+#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+
+class Twine;
+
+/// \brief Generic interface for extending the MCAsmParser,
+/// which is implemented by target and object file assembly parser
+/// implementations.
+class MCAsmParserExtension {
+  MCAsmParser *Parser;
+
+protected:
+  MCAsmParserExtension();
+
+  // Helper template for implementing static dispatch functions.
+  template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
+  static bool HandleDirective(MCAsmParserExtension *Target,
+                              StringRef Directive,
+                              SMLoc DirectiveLoc) {
+    T *Obj = static_cast<T*>(Target);
+    return (Obj->*Handler)(Directive, DirectiveLoc);
+  }
+
+  bool BracketExpressionsSupported = false;
+
+public:
+  MCAsmParserExtension(const MCAsmParserExtension &) = delete;
+  MCAsmParserExtension &operator=(const MCAsmParserExtension &) = delete;
+  virtual ~MCAsmParserExtension();
+
+  /// \brief Initialize the extension for parsing using the given \p Parser.
+  /// The extension should use the AsmParser interfaces to register its
+  /// parsing routines.
+  virtual void Initialize(MCAsmParser &Parser);
+
+  /// \name MCAsmParser Proxy Interfaces
+  /// @{
+
+  MCContext &getContext() { return getParser().getContext(); }
+
+  MCAsmLexer &getLexer() { return getParser().getLexer(); }
+  const MCAsmLexer &getLexer() const {
+    return const_cast<MCAsmParserExtension *>(this)->getLexer();
+  }
+
+  MCAsmParser &getParser() { return *Parser; }
+  const MCAsmParser &getParser() const {
+    return const_cast<MCAsmParserExtension*>(this)->getParser();
+  }
+
+  SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
+  MCStreamer &getStreamer() { return getParser().getStreamer(); }
+
+  bool Warning(SMLoc L, const Twine &Msg) {
+    return getParser().Warning(L, Msg);
+  }
+
+  bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
+    return getParser().Error(L, Msg, Range);
+  }
+
+  void Note(SMLoc L, const Twine &Msg) {
+    getParser().Note(L, Msg);
+  }
+
+  bool TokError(const Twine &Msg) {
+    return getParser().TokError(Msg);
+  }
+
+  const AsmToken &Lex() { return getParser().Lex(); }
+  const AsmToken &getTok() { return getParser().getTok(); }
+  bool parseToken(AsmToken::TokenKind T,
+                  const Twine &Msg = "unexpected token") {
+    return getParser().parseToken(T, Msg);
+  }
+
+  bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
+    return getParser().parseMany(parseOne, hasComma);
+  }
+
+  bool parseOptionalToken(AsmToken::TokenKind T) {
+    return getParser().parseOptionalToken(T);
+  }
+
+  bool check(bool P, const Twine &Msg) {
+    return getParser().check(P, Msg);
+  }
+
+  bool check(bool P, SMLoc Loc, const Twine &Msg) {
+    return getParser().check(P, Loc, Msg);
+  }
+
+  bool addErrorSuffix(const Twine &Suffix) {
+    return getParser().addErrorSuffix(Suffix);
+  }
+
+  bool HasBracketExpressions() const { return BracketExpressionsSupported; }
+
+  /// @}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParserUtils.h b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParserUtils.h
new file mode 100644
index 0000000..84173bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/MCAsmParserUtils.h
@@ -0,0 +1,34 @@
+//===- llvm/MC/MCAsmParserUtils.h - Asm Parser Utilities --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
+#define LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
+
+namespace llvm {
+
+class MCAsmParser;
+class MCExpr;
+class MCSymbol;
+class StringRef;
+
+namespace MCParserUtils {
+
+/// Parse a value expression and return whether it can be assigned to a symbol
+/// with the given name.
+///
+/// On success, returns false and sets the Symbol and Value output parameters.
+bool parseAssignmentExpression(StringRef Name, bool allow_redef,
+                               MCAsmParser &Parser, MCSymbol *&Symbol,
+                               const MCExpr *&Value);
+
+} // namespace MCParserUtils
+
+} // namespace llvm
+
+#endif // LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/linux-x64/clang/include/llvm/MC/MCParser/MCParsedAsmOperand.h
new file mode 100644
index 0000000..4af76ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -0,0 +1,100 @@
+//===- llvm/MC/MCParsedAsmOperand.h - Asm Parser Operand --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
+#define LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SMLoc.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// MCParsedAsmOperand - This abstract class represents a source-level assembly
+/// instruction operand.  It should be subclassed by target-specific code.  This
+/// base class is used by target-independent clients and is the interface
+/// between parsing an asm instruction and recognizing it.
+class MCParsedAsmOperand {
+  /// MCOperandNum - The corresponding MCInst operand number.  Only valid when
+  /// parsing MS-style inline assembly.
+  unsigned MCOperandNum;
+
+  /// Constraint - The constraint on this operand.  Only valid when parsing
+  /// MS-style inline assembly.
+  std::string Constraint;
+
+protected:
+  // This only seems to need to be movable (by ARMOperand) but ARMOperand has
+  // lots of members and MSVC doesn't support defaulted move ops, so to avoid
+  // that verbosity, just rely on defaulted copy ops. It's only the Constraint
+  // string member that would benefit from movement anyway.
+  MCParsedAsmOperand() = default;
+  MCParsedAsmOperand(const MCParsedAsmOperand &RHS) = default;
+  MCParsedAsmOperand &operator=(const MCParsedAsmOperand &) = default;
+
+public:
+  virtual ~MCParsedAsmOperand() = default;
+
+  void setConstraint(StringRef C) { Constraint = C.str(); }
+  StringRef getConstraint() { return Constraint; }
+
+  void setMCOperandNum (unsigned OpNum) { MCOperandNum = OpNum; }
+  unsigned getMCOperandNum() { return MCOperandNum; }
+
+  virtual StringRef getSymName() { return StringRef(); }
+  virtual void *getOpDecl() { return nullptr; }
+
+  /// isToken - Is this a token operand?
+  virtual bool isToken() const = 0;
+  /// isImm - Is this an immediate operand?
+  virtual bool isImm() const = 0;
+  /// isReg - Is this a register operand?
+  virtual bool isReg() const = 0;
+  virtual unsigned getReg() const = 0;
+
+  /// isMem - Is this a memory operand?
+  virtual bool isMem() const = 0;
+
+  /// getStartLoc - Get the location of the first token of this operand.
+  virtual SMLoc getStartLoc() const = 0;
+  /// getEndLoc - Get the location of the last token of this operand.
+  virtual SMLoc getEndLoc() const = 0;
+
+  /// needAddressOf - Do we need to emit code to get the address of the
+  /// variable/label?   Only valid when parsing MS-style inline assembly.
+  virtual bool needAddressOf() const { return false; }
+
+  /// isOffsetOf - Do we need to emit code to get the offset of the variable,
+  /// rather then the value of the variable?   Only valid when parsing MS-style
+  /// inline assembly.
+  virtual bool isOffsetOf() const { return false; }
+
+  /// getOffsetOfLoc - Get the location of the offset operator.
+  virtual SMLoc getOffsetOfLoc() const { return SMLoc(); }
+
+  /// print - Print a debug representation of the operand to the given stream.
+  virtual void print(raw_ostream &OS) const = 0;
+
+  /// dump - Print to the debug stream.
+  virtual void dump() const;
+};
+
+//===----------------------------------------------------------------------===//
+// Debugging Support
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCParsedAsmOperand &MO) {
+  MO.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
diff --git a/linux-x64/clang/include/llvm/MC/MCParser/MCTargetAsmParser.h b/linux-x64/clang/include/llvm/MC/MCParser/MCTargetAsmParser.h
new file mode 100644
index 0000000..d628794
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCParser/MCTargetAsmParser.h
@@ -0,0 +1,432 @@
+//===- llvm/MC/MCTargetAsmParser.h - Target Assembly Parser -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCTARGETASMPARSER_H
+#define LLVM_MC_MCPARSER_MCTARGETASMPARSER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/SMLoc.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class MCInst;
+class MCParsedAsmOperand;
+class MCStreamer;
+class MCSubtargetInfo;
+template <typename T> class SmallVectorImpl;
+
+using OperandVector = SmallVectorImpl<std::unique_ptr<MCParsedAsmOperand>>;
+
+enum AsmRewriteKind {
+  AOK_Align,          // Rewrite align as .align.
+  AOK_EVEN,           // Rewrite even as .even.
+  AOK_Emit,           // Rewrite _emit as .byte.
+  AOK_Input,          // Rewrite in terms of $N.
+  AOK_Output,         // Rewrite in terms of $N.
+  AOK_SizeDirective,  // Add a sizing directive (e.g., dword ptr).
+  AOK_Label,          // Rewrite local labels.
+  AOK_EndOfStatement, // Add EndOfStatement (e.g., "\n\t").
+  AOK_Skip,           // Skip emission (e.g., offset/type operators).
+  AOK_IntelExpr       // SizeDirective SymDisp [BaseReg + IndexReg * Scale + ImmDisp]
+};
+
+const char AsmRewritePrecedence [] = {
+  2, // AOK_Align
+  2, // AOK_EVEN
+  2, // AOK_Emit
+  3, // AOK_Input
+  3, // AOK_Output
+  5, // AOK_SizeDirective
+  1, // AOK_Label
+  5, // AOK_EndOfStatement
+  2, // AOK_Skip
+  2  // AOK_IntelExpr
+};
+
+// Represnt the various parts which makes up an intel expression,
+// used for emitting compound intel expressions
+struct IntelExpr {
+  bool NeedBracs;
+  int64_t Imm;
+  StringRef BaseReg;
+  StringRef IndexReg;
+  unsigned Scale;
+
+  IntelExpr(bool needBracs = false) : NeedBracs(needBracs), Imm(0),
+    BaseReg(StringRef()), IndexReg(StringRef()),
+    Scale(1) {}
+  // Compund immediate expression
+  IntelExpr(int64_t imm, bool needBracs) : IntelExpr(needBracs) {
+    Imm = imm;
+  }
+  // [Reg + ImmediateExpression]
+  // We don't bother to emit an immediate expression evaluated to zero
+  IntelExpr(StringRef reg, int64_t imm = 0, unsigned scale = 0,
+    bool needBracs = true) :
+    IntelExpr(imm, needBracs) {
+    IndexReg = reg;
+    if (scale)
+      Scale = scale;
+  }
+  // [BaseReg + IndexReg * ScaleExpression + ImmediateExpression]
+  IntelExpr(StringRef baseReg, StringRef indexReg, unsigned scale = 0,
+    int64_t imm = 0, bool needBracs = true) :
+    IntelExpr(indexReg, imm, scale, needBracs) {
+    BaseReg = baseReg;
+  }
+  bool hasBaseReg() const {
+    return BaseReg.size();
+  }
+  bool hasIndexReg() const {
+    return IndexReg.size();
+  }
+  bool hasRegs() const {
+    return hasBaseReg() || hasIndexReg();
+  }
+  bool isValid() const {
+    return (Scale == 1) ||
+           (hasIndexReg() && (Scale == 2 || Scale == 4 || Scale == 8));
+  }
+};
+
+struct AsmRewrite {
+  AsmRewriteKind Kind;
+  SMLoc Loc;
+  unsigned Len;
+  int64_t Val;
+  StringRef Label;
+  IntelExpr IntelExp;
+
+public:
+  AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, int64_t val = 0)
+    : Kind(kind), Loc(loc), Len(len), Val(val) {}
+  AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len, StringRef label)
+    : AsmRewrite(kind, loc, len) { Label = label; }
+  AsmRewrite(SMLoc loc, unsigned len, IntelExpr exp)
+    : AsmRewrite(AOK_IntelExpr, loc, len) { IntelExp = exp; }
+};
+
+struct ParseInstructionInfo {
+  SmallVectorImpl<AsmRewrite> *AsmRewrites = nullptr;
+
+  ParseInstructionInfo() = default;
+  ParseInstructionInfo(SmallVectorImpl<AsmRewrite> *rewrites)
+    : AsmRewrites(rewrites) {}
+};
+
+enum OperandMatchResultTy {
+  MatchOperand_Success,  // operand matched successfully
+  MatchOperand_NoMatch,  // operand did not match
+  MatchOperand_ParseFail // operand matched but had errors
+};
+
+// When matching of an assembly instruction fails, there may be multiple
+// encodings that are close to being a match. It's often ambiguous which one
+// the programmer intended to use, so we want to report an error which mentions
+// each of these "near-miss" encodings. This struct contains information about
+// one such encoding, and why it did not match the parsed instruction.
+class NearMissInfo {
+public:
+  enum NearMissKind {
+    NoNearMiss,
+    NearMissOperand,
+    NearMissFeature,
+    NearMissPredicate,
+    NearMissTooFewOperands,
+  };
+
+  // The encoding is valid for the parsed assembly string. This is only used
+  // internally to the table-generated assembly matcher.
+  static NearMissInfo getSuccess() { return NearMissInfo(); }
+
+  // The instruction encoding is not valid because it requires some target
+  // features that are not currently enabled. MissingFeatures has a bit set for
+  // each feature that the encoding needs but which is not enabled.
+  static NearMissInfo getMissedFeature(uint64_t MissingFeatures) {
+    NearMissInfo Result;
+    Result.Kind = NearMissFeature;
+    Result.Features = MissingFeatures;
+    return Result;
+  }
+
+  // The instruction encoding is not valid because the target-specific
+  // predicate function returned an error code. FailureCode is the
+  // target-specific error code returned by the predicate.
+  static NearMissInfo getMissedPredicate(unsigned FailureCode) {
+    NearMissInfo Result;
+    Result.Kind = NearMissPredicate;
+    Result.PredicateError = FailureCode;
+    return Result;
+  }
+
+  // The instruction encoding is not valid because one (and only one) parsed
+  // operand is not of the correct type. OperandError is the error code
+  // relating to the operand class expected by the encoding. OperandClass is
+  // the type of the expected operand. Opcode is the opcode of the encoding.
+  // OperandIndex is the index into the parsed operand list.
+  static NearMissInfo getMissedOperand(unsigned OperandError,
+                                       unsigned OperandClass, unsigned Opcode,
+                                       unsigned OperandIndex) {
+    NearMissInfo Result;
+    Result.Kind = NearMissOperand;
+    Result.MissedOperand.Error = OperandError;
+    Result.MissedOperand.Class = OperandClass;
+    Result.MissedOperand.Opcode = Opcode;
+    Result.MissedOperand.Index = OperandIndex;
+    return Result;
+  }
+
+  // The instruction encoding is not valid because it expects more operands
+  // than were parsed. OperandClass is the class of the expected operand that
+  // was not provided. Opcode is the instruction encoding.
+  static NearMissInfo getTooFewOperands(unsigned OperandClass,
+                                        unsigned Opcode) {
+    NearMissInfo Result;
+    Result.Kind = NearMissTooFewOperands;
+    Result.TooFewOperands.Class = OperandClass;
+    Result.TooFewOperands.Opcode = Opcode;
+    return Result;
+  }
+
+  operator bool() const { return Kind != NoNearMiss; }
+
+  NearMissKind getKind() const { return Kind; }
+
+  // Feature flags required by the instruction, that the current target does
+  // not have.
+  uint64_t getFeatures() const {
+    assert(Kind == NearMissFeature);
+    return Features;
+  }
+  // Error code returned by the target predicate when validating this
+  // instruction encoding.
+  unsigned getPredicateError() const {
+    assert(Kind == NearMissPredicate);
+    return PredicateError;
+  }
+  // MatchClassKind of the operand that we expected to see.
+  unsigned getOperandClass() const {
+    assert(Kind == NearMissOperand || Kind == NearMissTooFewOperands);
+    return MissedOperand.Class;
+  }
+  // Opcode of the encoding we were trying to match.
+  unsigned getOpcode() const {
+    assert(Kind == NearMissOperand || Kind == NearMissTooFewOperands);
+    return MissedOperand.Opcode;
+  }
+  // Error code returned when validating the operand.
+  unsigned getOperandError() const {
+    assert(Kind == NearMissOperand);
+    return MissedOperand.Error;
+  }
+  // Index of the actual operand we were trying to match in the list of parsed
+  // operands.
+  unsigned getOperandIndex() const {
+    assert(Kind == NearMissOperand);
+    return MissedOperand.Index;
+  }
+
+private:
+  NearMissKind Kind;
+
+  // These two structs share a common prefix, so we can safely rely on the fact
+  // that they overlap in the union.
+  struct MissedOpInfo {
+    unsigned Class;
+    unsigned Opcode;
+    unsigned Error;
+    unsigned Index;
+  };
+
+  struct TooFewOperandsInfo {
+    unsigned Class;
+    unsigned Opcode;
+  };
+
+  union {
+    uint64_t Features;
+    unsigned PredicateError;
+    MissedOpInfo MissedOperand;
+    TooFewOperandsInfo TooFewOperands;
+  };
+
+  NearMissInfo() : Kind(NoNearMiss) {}
+};
+
+/// MCTargetAsmParser - Generic interface to target specific assembly parsers.
+class MCTargetAsmParser : public MCAsmParserExtension {
+public:
+  enum MatchResultTy {
+    Match_InvalidOperand,
+    Match_InvalidTiedOperand,
+    Match_MissingFeature,
+    Match_MnemonicFail,
+    Match_Success,
+    Match_NearMisses,
+    FIRST_TARGET_MATCH_RESULT_TY
+  };
+
+protected: // Can only create subclasses.
+  MCTargetAsmParser(MCTargetOptions const &, const MCSubtargetInfo &STI,
+                    const MCInstrInfo &MII);
+
+  /// Create a copy of STI and return a non-const reference to it.
+  MCSubtargetInfo &copySTI();
+
+  /// AvailableFeatures - The current set of available features.
+  uint64_t AvailableFeatures = 0;
+
+  /// ParsingInlineAsm - Are we parsing ms-style inline assembly?
+  bool ParsingInlineAsm = false;
+
+  /// SemaCallback - The Sema callback implementation.  Must be set when parsing
+  /// ms-style inline assembly.
+  MCAsmParserSemaCallback *SemaCallback;
+
+  /// Set of options which affects instrumentation of inline assembly.
+  MCTargetOptions MCOptions;
+
+  /// Current STI.
+  const MCSubtargetInfo *STI;
+
+  const MCInstrInfo &MII;
+
+public:
+  MCTargetAsmParser(const MCTargetAsmParser &) = delete;
+  MCTargetAsmParser &operator=(const MCTargetAsmParser &) = delete;
+
+  ~MCTargetAsmParser() override;
+
+  const MCSubtargetInfo &getSTI() const;
+
+  uint64_t getAvailableFeatures() const { return AvailableFeatures; }
+  void setAvailableFeatures(uint64_t Value) { AvailableFeatures = Value; }
+
+  bool isParsingInlineAsm () { return ParsingInlineAsm; }
+  void setParsingInlineAsm (bool Value) { ParsingInlineAsm = Value; }
+
+  MCTargetOptions getTargetOptions() const { return MCOptions; }
+
+  void setSemaCallback(MCAsmParserSemaCallback *Callback) {
+    SemaCallback = Callback;
+  }
+
+  virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+                             SMLoc &EndLoc) = 0;
+
+  /// Sets frame register corresponding to the current MachineFunction.
+  virtual void SetFrameRegister(unsigned RegNo) {}
+
+  /// ParseInstruction - Parse one assembly instruction.
+  ///
+  /// The parser is positioned following the instruction name. The target
+  /// specific instruction parser should parse the entire instruction and
+  /// construct the appropriate MCInst, or emit an error. On success, the entire
+  /// line should be parsed up to and including the end-of-statement token. On
+  /// failure, the parser is not required to read to the end of the line.
+  //
+  /// \param Name - The instruction name.
+  /// \param NameLoc - The source location of the name.
+  /// \param Operands [out] - The list of parsed operands, this returns
+  ///        ownership of them to the caller.
+  /// \return True on failure.
+  virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+                                SMLoc NameLoc, OperandVector &Operands) = 0;
+  virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+                                AsmToken Token, OperandVector &Operands) {
+    return ParseInstruction(Info, Name, Token.getLoc(), Operands);
+  }
+
+  /// ParseDirective - Parse a target specific assembler directive
+  ///
+  /// The parser is positioned following the directive name.  The target
+  /// specific directive parser should parse the entire directive doing or
+  /// recording any target specific work, or return true and do nothing if the
+  /// directive is not target specific. If the directive is specific for
+  /// the target, the entire line is parsed up to and including the
+  /// end-of-statement token and false is returned.
+  ///
+  /// \param DirectiveID - the identifier token of the directive.
+  virtual bool ParseDirective(AsmToken DirectiveID) = 0;
+
+  /// MatchAndEmitInstruction - Recognize a series of operands of a parsed
+  /// instruction as an actual MCInst and emit it to the specified MCStreamer.
+  /// This returns false on success and returns true on failure to match.
+  ///
+  /// On failure, the target parser is responsible for emitting a diagnostic
+  /// explaining the match failure.
+  virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+                                       OperandVector &Operands, MCStreamer &Out,
+                                       uint64_t &ErrorInfo,
+                                       bool MatchingInlineAsm) = 0;
+
+  /// Allows targets to let registers opt out of clobber lists.
+  virtual bool OmitRegisterFromClobberLists(unsigned RegNo) { return false; }
+
+  /// Allow a target to add special case operand matching for things that
+  /// tblgen doesn't/can't handle effectively. For example, literal
+  /// immediates on ARM. TableGen expects a token operand, but the parser
+  /// will recognize them as immediates.
+  virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+                                              unsigned Kind) {
+    return Match_InvalidOperand;
+  }
+
+  /// Validate the instruction match against any complex target predicates
+  /// before rendering any operands to it.
+  virtual unsigned
+  checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands) {
+    return Match_Success;
+  }
+
+  /// checkTargetMatchPredicate - Validate the instruction match against
+  /// any complex target predicates not expressible via match classes.
+  virtual unsigned checkTargetMatchPredicate(MCInst &Inst) {
+    return Match_Success;
+  }
+
+  virtual void convertToMapAndConstraints(unsigned Kind,
+                                          const OperandVector &Operands) = 0;
+
+  // Return whether this parser uses assignment statements with equals tokens
+  virtual bool equalIsAsmAssignment() { return true; };
+  // Return whether this start of statement identifier is a label
+  virtual bool isLabel(AsmToken &Token) { return true; };
+  // Return whether this parser accept star as start of statement
+  virtual bool starIsStartOfStatement() { return false; };
+
+  virtual const MCExpr *applyModifierToExpr(const MCExpr *E,
+                                            MCSymbolRefExpr::VariantKind,
+                                            MCContext &Ctx) {
+    return nullptr;
+  }
+
+  virtual void onLabelParsed(MCSymbol *Symbol) {}
+
+  /// Ensure that all previously parsed instructions have been emitted to the
+  /// output streamer, if the target does not emit them immediately.
+  virtual void flushPendingInstructions(MCStreamer &Out) {}
+
+  virtual const MCExpr *createTargetUnaryExpr(const MCExpr *E,
+                                              AsmToken::TokenKind OperatorToken,
+                                              MCContext &Ctx) {
+    return nullptr;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCPARSER_MCTARGETASMPARSER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCRegisterInfo.h b/linux-x64/clang/include/llvm/MC/MCRegisterInfo.h
new file mode 100644
index 0000000..c57c9ef
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCRegisterInfo.h
@@ -0,0 +1,734 @@
+//===- MC/MCRegisterInfo.h - Target Register Description --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes an abstract interface used to get information about a
+// target machines register file.  This information is used for a variety of
+// purposed, especially register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCREGISTERINFO_H
+#define LLVM_MC_MCREGISTERINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+/// An unsigned integer type large enough to represent all physical registers,
+/// but not necessarily virtual registers.
+using MCPhysReg = uint16_t;
+
+/// MCRegisterClass - Base class of TargetRegisterClass.
+class MCRegisterClass {
+public:
+  using iterator = const MCPhysReg*;
+  using const_iterator = const MCPhysReg*;
+
+  const iterator RegsBegin;
+  const uint8_t *const RegSet;
+  const uint32_t NameIdx;
+  const uint16_t RegsSize;
+  const uint16_t RegSetSize;
+  const uint16_t ID;
+  const uint16_t PhysRegSize;
+  const int8_t CopyCost;
+  const bool Allocatable;
+
+  /// getID() - Return the register class ID number.
+  ///
+  unsigned getID() const { return ID; }
+
+  /// begin/end - Return all of the registers in this class.
+  ///
+  iterator       begin() const { return RegsBegin; }
+  iterator         end() const { return RegsBegin + RegsSize; }
+
+  /// getNumRegs - Return the number of registers in this class.
+  ///
+  unsigned getNumRegs() const { return RegsSize; }
+
+  /// getRegister - Return the specified register in the class.
+  ///
+  unsigned getRegister(unsigned i) const {
+    assert(i < getNumRegs() && "Register number out of range!");
+    return RegsBegin[i];
+  }
+
+  /// contains - Return true if the specified register is included in this
+  /// register class.  This does not include virtual registers.
+  bool contains(unsigned Reg) const {
+    unsigned InByte = Reg % 8;
+    unsigned Byte = Reg / 8;
+    if (Byte >= RegSetSize)
+      return false;
+    return (RegSet[Byte] & (1 << InByte)) != 0;
+  }
+
+  /// contains - Return true if both registers are in this class.
+  bool contains(unsigned Reg1, unsigned Reg2) const {
+    return contains(Reg1) && contains(Reg2);
+  }
+
+  /// Return the size of the physical register in bytes.
+  unsigned getPhysRegSize() const { return PhysRegSize; }
+  /// Temporary function to allow out-of-tree targets to switch.
+  unsigned getSize() const { return getPhysRegSize(); }
+
+  /// getCopyCost - Return the cost of copying a value between two registers in
+  /// this class. A negative number means the register class is very expensive
+  /// to copy e.g. status flag register classes.
+  int getCopyCost() const { return CopyCost; }
+
+  /// isAllocatable - Return true if this register class may be used to create
+  /// virtual registers.
+  bool isAllocatable() const { return Allocatable; }
+};
+
+/// MCRegisterDesc - This record contains information about a particular
+/// register.  The SubRegs field is a zero terminated array of registers that
+/// are sub-registers of the specific register, e.g. AL, AH are sub-registers
+/// of AX. The SuperRegs field is a zero terminated array of registers that are
+/// super-registers of the specific register, e.g. RAX, EAX, are
+/// super-registers of AX.
+///
+struct MCRegisterDesc {
+  uint32_t Name;      // Printable name for the reg (for debugging)
+  uint32_t SubRegs;   // Sub-register set, described above
+  uint32_t SuperRegs; // Super-register set, described above
+
+  // Offset into MCRI::SubRegIndices of a list of sub-register indices for each
+  // sub-register in SubRegs.
+  uint32_t SubRegIndices;
+
+  // RegUnits - Points to the list of register units. The low 4 bits holds the
+  // Scale, the high bits hold an offset into DiffLists. See MCRegUnitIterator.
+  uint32_t RegUnits;
+
+  /// Index into list with lane mask sequences. The sequence contains a lanemask
+  /// for every register unit.
+  uint16_t RegUnitLaneMasks;
+};
+
+/// MCRegisterInfo base class - We assume that the target defines a static
+/// array of MCRegisterDesc objects that represent all of the machine
+/// registers that the target has.  As such, we simply have to track a pointer
+/// to this array so that we can turn register number into a register
+/// descriptor.
+///
+/// Note this class is designed to be a base class of TargetRegisterInfo, which
+/// is the interface used by codegen. However, specific targets *should never*
+/// specialize this class. MCRegisterInfo should only contain getters to access
+/// TableGen generated physical register data. It must not be extended with
+/// virtual methods.
+///
+class MCRegisterInfo {
+public:
+  using regclass_iterator = const MCRegisterClass *;
+
+  /// DwarfLLVMRegPair - Emitted by tablegen so Dwarf<->LLVM reg mappings can be
+  /// performed with a binary search.
+  struct DwarfLLVMRegPair {
+    unsigned FromReg;
+    unsigned ToReg;
+
+    bool operator<(DwarfLLVMRegPair RHS) const { return FromReg < RHS.FromReg; }
+  };
+
+  /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
+  /// index, -1 in any being invalid.
+  struct SubRegCoveredBits {
+    uint16_t Offset;
+    uint16_t Size;
+  };
+
+private:
+  const MCRegisterDesc *Desc;                 // Pointer to the descriptor array
+  unsigned NumRegs;                           // Number of entries in the array
+  unsigned RAReg;                             // Return address register
+  unsigned PCReg;                             // Program counter register
+  const MCRegisterClass *Classes;             // Pointer to the regclass array
+  unsigned NumClasses;                        // Number of entries in the array
+  unsigned NumRegUnits;                       // Number of regunits.
+  const MCPhysReg (*RegUnitRoots)[2];         // Pointer to regunit root table.
+  const MCPhysReg *DiffLists;                 // Pointer to the difflists array
+  const LaneBitmask *RegUnitMaskSequences;    // Pointer to lane mask sequences
+                                              // for register units.
+  const char *RegStrings;                     // Pointer to the string table.
+  const char *RegClassStrings;                // Pointer to the class strings.
+  const uint16_t *SubRegIndices;              // Pointer to the subreg lookup
+                                              // array.
+  const SubRegCoveredBits *SubRegIdxRanges;   // Pointer to the subreg covered
+                                              // bit ranges array.
+  unsigned NumSubRegIndices;                  // Number of subreg indices.
+  const uint16_t *RegEncodingTable;           // Pointer to array of register
+                                              // encodings.
+
+  unsigned L2DwarfRegsSize;
+  unsigned EHL2DwarfRegsSize;
+  unsigned Dwarf2LRegsSize;
+  unsigned EHDwarf2LRegsSize;
+  const DwarfLLVMRegPair *L2DwarfRegs;        // LLVM to Dwarf regs mapping
+  const DwarfLLVMRegPair *EHL2DwarfRegs;      // LLVM to Dwarf regs mapping EH
+  const DwarfLLVMRegPair *Dwarf2LRegs;        // Dwarf to LLVM regs mapping
+  const DwarfLLVMRegPair *EHDwarf2LRegs;      // Dwarf to LLVM regs mapping EH
+  DenseMap<unsigned, int> L2SEHRegs;          // LLVM to SEH regs mapping
+  DenseMap<unsigned, int> L2CVRegs;           // LLVM to CV regs mapping
+
+public:
+  /// DiffListIterator - Base iterator class that can traverse the
+  /// differentially encoded register and regunit lists in DiffLists.
+  /// Don't use this class directly, use one of the specialized sub-classes
+  /// defined below.
+  class DiffListIterator {
+    uint16_t Val = 0;
+    const MCPhysReg *List = nullptr;
+
+  protected:
+    /// Create an invalid iterator. Call init() to point to something useful.
+    DiffListIterator() = default;
+
+    /// init - Point the iterator to InitVal, decoding subsequent values from
+    /// DiffList. The iterator will initially point to InitVal, sub-classes are
+    /// responsible for skipping the seed value if it is not part of the list.
+    void init(MCPhysReg InitVal, const MCPhysReg *DiffList) {
+      Val = InitVal;
+      List = DiffList;
+    }
+
+    /// advance - Move to the next list position, return the applied
+    /// differential. This function does not detect the end of the list, that
+    /// is the caller's responsibility (by checking for a 0 return value).
+    unsigned advance() {
+      assert(isValid() && "Cannot move off the end of the list.");
+      MCPhysReg D = *List++;
+      Val += D;
+      return D;
+    }
+
+  public:
+    /// isValid - returns true if this iterator is not yet at the end.
+    bool isValid() const { return List; }
+
+    /// Dereference the iterator to get the value at the current position.
+    unsigned operator*() const { return Val; }
+
+    /// Pre-increment to move to the next position.
+    void operator++() {
+      // The end of the list is encoded as a 0 differential.
+      if (!advance())
+        List = nullptr;
+    }
+  };
+
+  // These iterators are allowed to sub-class DiffListIterator and access
+  // internal list pointers.
+  friend class MCSubRegIterator;
+  friend class MCSubRegIndexIterator;
+  friend class MCSuperRegIterator;
+  friend class MCRegUnitIterator;
+  friend class MCRegUnitMaskIterator;
+  friend class MCRegUnitRootIterator;
+
+  /// \brief Initialize MCRegisterInfo, called by TableGen
+  /// auto-generated routines. *DO NOT USE*.
+  void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
+                          unsigned PC,
+                          const MCRegisterClass *C, unsigned NC,
+                          const MCPhysReg (*RURoots)[2],
+                          unsigned NRU,
+                          const MCPhysReg *DL,
+                          const LaneBitmask *RUMS,
+                          const char *Strings,
+                          const char *ClassStrings,
+                          const uint16_t *SubIndices,
+                          unsigned NumIndices,
+                          const SubRegCoveredBits *SubIdxRanges,
+                          const uint16_t *RET) {
+    Desc = D;
+    NumRegs = NR;
+    RAReg = RA;
+    PCReg = PC;
+    Classes = C;
+    DiffLists = DL;
+    RegUnitMaskSequences = RUMS;
+    RegStrings = Strings;
+    RegClassStrings = ClassStrings;
+    NumClasses = NC;
+    RegUnitRoots = RURoots;
+    NumRegUnits = NRU;
+    SubRegIndices = SubIndices;
+    NumSubRegIndices = NumIndices;
+    SubRegIdxRanges = SubIdxRanges;
+    RegEncodingTable = RET;
+
+    // Initialize DWARF register mapping variables
+    EHL2DwarfRegs = nullptr;
+    EHL2DwarfRegsSize = 0;
+    L2DwarfRegs = nullptr;
+    L2DwarfRegsSize = 0;
+    EHDwarf2LRegs = nullptr;
+    EHDwarf2LRegsSize = 0;
+    Dwarf2LRegs = nullptr;
+    Dwarf2LRegsSize = 0;
+  }
+
+  /// \brief Used to initialize LLVM register to Dwarf
+  /// register number mapping. Called by TableGen auto-generated routines.
+  /// *DO NOT USE*.
+  void mapLLVMRegsToDwarfRegs(const DwarfLLVMRegPair *Map, unsigned Size,
+                              bool isEH) {
+    if (isEH) {
+      EHL2DwarfRegs = Map;
+      EHL2DwarfRegsSize = Size;
+    } else {
+      L2DwarfRegs = Map;
+      L2DwarfRegsSize = Size;
+    }
+  }
+
+  /// \brief Used to initialize Dwarf register to LLVM
+  /// register number mapping. Called by TableGen auto-generated routines.
+  /// *DO NOT USE*.
+  void mapDwarfRegsToLLVMRegs(const DwarfLLVMRegPair *Map, unsigned Size,
+                              bool isEH) {
+    if (isEH) {
+      EHDwarf2LRegs = Map;
+      EHDwarf2LRegsSize = Size;
+    } else {
+      Dwarf2LRegs = Map;
+      Dwarf2LRegsSize = Size;
+    }
+  }
+
+  /// mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register
+  /// number mapping. By default the SEH register number is just the same
+  /// as the LLVM register number.
+  /// FIXME: TableGen these numbers. Currently this requires target specific
+  /// initialization code.
+  void mapLLVMRegToSEHReg(unsigned LLVMReg, int SEHReg) {
+    L2SEHRegs[LLVMReg] = SEHReg;
+  }
+
+  void mapLLVMRegToCVReg(unsigned LLVMReg, int CVReg) {
+    L2CVRegs[LLVMReg] = CVReg;
+  }
+
+  /// \brief This method should return the register where the return
+  /// address can be found.
+  unsigned getRARegister() const {
+    return RAReg;
+  }
+
+  /// Return the register which is the program counter.
+  unsigned getProgramCounter() const {
+    return PCReg;
+  }
+
+  const MCRegisterDesc &operator[](unsigned RegNo) const {
+    assert(RegNo < NumRegs &&
+           "Attempting to access record for invalid register number!");
+    return Desc[RegNo];
+  }
+
+  /// \brief Provide a get method, equivalent to [], but more useful with a
+  /// pointer to this object.
+  const MCRegisterDesc &get(unsigned RegNo) const {
+    return operator[](RegNo);
+  }
+
+  /// \brief Returns the physical register number of sub-register "Index"
+  /// for physical register RegNo. Return zero if the sub-register does not
+  /// exist.
+  unsigned getSubReg(unsigned Reg, unsigned Idx) const;
+
+  /// \brief Return a super-register of the specified register
+  /// Reg so its sub-register of index SubIdx is Reg.
+  unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+                               const MCRegisterClass *RC) const;
+
+  /// \brief For a given register pair, return the sub-register index
+  /// if the second register is a sub-register of the first. Return zero
+  /// otherwise.
+  unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;
+
+  /// \brief Get the size of the bit range covered by a sub-register index.
+  /// If the index isn't continuous, return the sum of the sizes of its parts.
+  /// If the index is used to access subregisters of different sizes, return -1.
+  unsigned getSubRegIdxSize(unsigned Idx) const;
+
+  /// \brief Get the offset of the bit range covered by a sub-register index.
+  /// If an Offset doesn't make sense (the index isn't continuous, or is used to
+  /// access sub-registers at different offsets), return -1.
+  unsigned getSubRegIdxOffset(unsigned Idx) const;
+
+  /// \brief Return the human-readable symbolic target-specific name for the
+  /// specified physical register.
+  const char *getName(unsigned RegNo) const {
+    return RegStrings + get(RegNo).Name;
+  }
+
+  /// \brief Return the number of registers this target has (useful for
+  /// sizing arrays holding per register information)
+  unsigned getNumRegs() const {
+    return NumRegs;
+  }
+
+  /// \brief Return the number of sub-register indices
+  /// understood by the target. Index 0 is reserved for the no-op sub-register,
+  /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers.
+  unsigned getNumSubRegIndices() const {
+    return NumSubRegIndices;
+  }
+
+  /// \brief Return the number of (native) register units in the
+  /// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
+  /// can be accessed through MCRegUnitIterator defined below.
+  unsigned getNumRegUnits() const {
+    return NumRegUnits;
+  }
+
+  /// \brief Map a target register to an equivalent dwarf register
+  /// number.  Returns -1 if there is no equivalent value.  The second
+  /// parameter allows targets to use different numberings for EH info and
+  /// debugging info.
+  int getDwarfRegNum(unsigned RegNum, bool isEH) const;
+
+  /// \brief Map a dwarf register back to a target register.
+  int getLLVMRegNum(unsigned RegNum, bool isEH) const;
+
+  /// \brief Map a DWARF EH register back to a target register (same as
+  /// getLLVMRegNum(RegNum, true)) but return -1 if there is no mapping,
+  /// rather than asserting that there must be one.
+  int getLLVMRegNumFromEH(unsigned RegNum) const;
+
+  /// \brief Map a target EH register number to an equivalent DWARF register
+  /// number.
+  int getDwarfRegNumFromDwarfEHRegNum(unsigned RegNum) const;
+
+  /// \brief Map a target register to an equivalent SEH register
+  /// number.  Returns LLVM register number if there is no equivalent value.
+  int getSEHRegNum(unsigned RegNum) const;
+
+  /// \brief Map a target register to an equivalent CodeView register
+  /// number.
+  int getCodeViewRegNum(unsigned RegNum) const;
+
+  regclass_iterator regclass_begin() const { return Classes; }
+  regclass_iterator regclass_end() const { return Classes+NumClasses; }
+  iterator_range<regclass_iterator> regclasses() const {
+    return make_range(regclass_begin(), regclass_end());
+  }
+
+  unsigned getNumRegClasses() const {
+    return (unsigned)(regclass_end()-regclass_begin());
+  }
+
+  /// \brief Returns the register class associated with the enumeration
+  /// value.  See class MCOperandInfo.
+  const MCRegisterClass& getRegClass(unsigned i) const {
+    assert(i < getNumRegClasses() && "Register Class ID out of range");
+    return Classes[i];
+  }
+
+  const char *getRegClassName(const MCRegisterClass *Class) const {
+    return RegClassStrings + Class->NameIdx;
+  }
+
+   /// \brief Returns the encoding for RegNo
+  uint16_t getEncodingValue(unsigned RegNo) const {
+    assert(RegNo < NumRegs &&
+           "Attempting to get encoding for invalid register number!");
+    return RegEncodingTable[RegNo];
+  }
+
+  /// \brief Returns true if RegB is a sub-register of RegA.
+  bool isSubRegister(unsigned RegA, unsigned RegB) const {
+    return isSuperRegister(RegB, RegA);
+  }
+
+  /// \brief Returns true if RegB is a super-register of RegA.
+  bool isSuperRegister(unsigned RegA, unsigned RegB) const;
+
+  /// \brief Returns true if RegB is a sub-register of RegA or if RegB == RegA.
+  bool isSubRegisterEq(unsigned RegA, unsigned RegB) const {
+    return isSuperRegisterEq(RegB, RegA);
+  }
+
+  /// \brief Returns true if RegB is a super-register of RegA or if
+  /// RegB == RegA.
+  bool isSuperRegisterEq(unsigned RegA, unsigned RegB) const {
+    return RegA == RegB || isSuperRegister(RegA, RegB);
+  }
+
+  /// \brief Returns true if RegB is a super-register or sub-register of RegA
+  /// or if RegB == RegA.
+  bool isSuperOrSubRegisterEq(unsigned RegA, unsigned RegB) const {
+    return isSubRegisterEq(RegA, RegB) || isSuperRegister(RegA, RegB);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                          Register List Iterators
+//===----------------------------------------------------------------------===//
+
+// MCRegisterInfo provides lists of super-registers, sub-registers, and
+// aliasing registers. Use these iterator classes to traverse the lists.
+
+/// MCSubRegIterator enumerates all sub-registers of Reg.
+/// If IncludeSelf is set, Reg itself is included in the list.
+class MCSubRegIterator : public MCRegisterInfo::DiffListIterator {
+public:
+  MCSubRegIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+                     bool IncludeSelf = false) {
+    init(Reg, MCRI->DiffLists + MCRI->get(Reg).SubRegs);
+    // Initially, the iterator points to Reg itself.
+    if (!IncludeSelf)
+      ++*this;
+  }
+};
+
+/// Iterator that enumerates the sub-registers of a Reg and the associated
+/// sub-register indices.
+class MCSubRegIndexIterator {
+  MCSubRegIterator SRIter;
+  const uint16_t *SRIndex;
+
+public:
+  /// Constructs an iterator that traverses subregisters and their
+  /// associated subregister indices.
+  MCSubRegIndexIterator(unsigned Reg, const MCRegisterInfo *MCRI)
+    : SRIter(Reg, MCRI) {
+    SRIndex = MCRI->SubRegIndices + MCRI->get(Reg).SubRegIndices;
+  }
+
+  /// Returns current sub-register.
+  unsigned getSubReg() const {
+    return *SRIter;
+  }
+
+  /// Returns sub-register index of the current sub-register.
+  unsigned getSubRegIndex() const {
+    return *SRIndex;
+  }
+
+  /// Returns true if this iterator is not yet at the end.
+  bool isValid() const { return SRIter.isValid(); }
+
+  /// Moves to the next position.
+  void operator++() {
+    ++SRIter;
+    ++SRIndex;
+  }
+};
+
+/// MCSuperRegIterator enumerates all super-registers of Reg.
+/// If IncludeSelf is set, Reg itself is included in the list.
+class MCSuperRegIterator : public MCRegisterInfo::DiffListIterator {
+public:
+  MCSuperRegIterator() = default;
+
+  MCSuperRegIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+                     bool IncludeSelf = false) {
+    init(Reg, MCRI->DiffLists + MCRI->get(Reg).SuperRegs);
+    // Initially, the iterator points to Reg itself.
+    if (!IncludeSelf)
+      ++*this;
+  }
+};
+
+// Definition for isSuperRegister. Put it down here since it needs the
+// iterator defined above in addition to the MCRegisterInfo class itself.
+inline bool MCRegisterInfo::isSuperRegister(unsigned RegA, unsigned RegB) const{
+  for (MCSuperRegIterator I(RegA, this); I.isValid(); ++I)
+    if (*I == RegB)
+      return true;
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+//                               Register Units
+//===----------------------------------------------------------------------===//
+
+// Register units are used to compute register aliasing. Every register has at
+// least one register unit, but it can have more. Two registers overlap if and
+// only if they have a common register unit.
+//
+// A target with a complicated sub-register structure will typically have many
+// fewer register units than actual registers. MCRI::getNumRegUnits() returns
+// the number of register units in the target.
+
+// MCRegUnitIterator enumerates a list of register units for Reg. The list is
+// in ascending numerical order.
+class MCRegUnitIterator : public MCRegisterInfo::DiffListIterator {
+public:
+  /// MCRegUnitIterator - Create an iterator that traverses the register units
+  /// in Reg.
+  MCRegUnitIterator() = default;
+
+  MCRegUnitIterator(unsigned Reg, const MCRegisterInfo *MCRI) {
+    assert(Reg && "Null register has no regunits");
+    // Decode the RegUnits MCRegisterDesc field.
+    unsigned RU = MCRI->get(Reg).RegUnits;
+    unsigned Scale = RU & 15;
+    unsigned Offset = RU >> 4;
+
+    // Initialize the iterator to Reg * Scale, and the List pointer to
+    // DiffLists + Offset.
+    init(Reg * Scale, MCRI->DiffLists + Offset);
+
+    // That may not be a valid unit, we need to advance by one to get the real
+    // unit number. The first differential can be 0 which would normally
+    // terminate the list, but since we know every register has at least one
+    // unit, we can allow a 0 differential here.
+    advance();
+  }
+};
+
+/// MCRegUnitMaskIterator enumerates a list of register units and their
+/// associated lane masks for Reg. The register units are in ascending
+/// numerical order.
+class MCRegUnitMaskIterator {
+  MCRegUnitIterator RUIter;
+  const LaneBitmask *MaskListIter;
+
+public:
+  MCRegUnitMaskIterator() = default;
+
+  /// Constructs an iterator that traverses the register units and their
+  /// associated LaneMasks in Reg.
+  MCRegUnitMaskIterator(unsigned Reg, const MCRegisterInfo *MCRI)
+    : RUIter(Reg, MCRI) {
+      uint16_t Idx = MCRI->get(Reg).RegUnitLaneMasks;
+      MaskListIter = &MCRI->RegUnitMaskSequences[Idx];
+  }
+
+  /// Returns a (RegUnit, LaneMask) pair.
+  std::pair<unsigned,LaneBitmask> operator*() const {
+    return std::make_pair(*RUIter, *MaskListIter);
+  }
+
+  /// Returns true if this iterator is not yet at the end.
+  bool isValid() const { return RUIter.isValid(); }
+
+  /// Moves to the next position.
+  void operator++() {
+    ++MaskListIter;
+    ++RUIter;
+  }
+};
+
+// Each register unit has one or two root registers. The complete set of
+// registers containing a register unit is the union of the roots and their
+// super-registers. All registers aliasing Unit can be visited like this:
+//
+//   for (MCRegUnitRootIterator RI(Unit, MCRI); RI.isValid(); ++RI) {
+//     for (MCSuperRegIterator SI(*RI, MCRI, true); SI.isValid(); ++SI)
+//       visit(*SI);
+//    }
+
+/// MCRegUnitRootIterator enumerates the root registers of a register unit.
+class MCRegUnitRootIterator {
+  uint16_t Reg0 = 0;
+  uint16_t Reg1 = 0;
+
+public:
+  MCRegUnitRootIterator() = default;
+
+  MCRegUnitRootIterator(unsigned RegUnit, const MCRegisterInfo *MCRI) {
+    assert(RegUnit < MCRI->getNumRegUnits() && "Invalid register unit");
+    Reg0 = MCRI->RegUnitRoots[RegUnit][0];
+    Reg1 = MCRI->RegUnitRoots[RegUnit][1];
+  }
+
+  /// \brief Dereference to get the current root register.
+  unsigned operator*() const {
+    return Reg0;
+  }
+
+  /// \brief Check if the iterator is at the end of the list.
+  bool isValid() const {
+    return Reg0;
+  }
+
+  /// \brief Preincrement to move to the next root register.
+  void operator++() {
+    assert(isValid() && "Cannot move off the end of the list.");
+    Reg0 = Reg1;
+    Reg1 = 0;
+  }
+};
+
+/// MCRegAliasIterator enumerates all registers aliasing Reg.  If IncludeSelf is
+/// set, Reg itself is included in the list.  This iterator does not guarantee
+/// any ordering or that entries are unique.
+class MCRegAliasIterator {
+private:
+  unsigned Reg;
+  const MCRegisterInfo *MCRI;
+  bool IncludeSelf;
+
+  MCRegUnitIterator RI;
+  MCRegUnitRootIterator RRI;
+  MCSuperRegIterator SI;
+
+public:
+  MCRegAliasIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+                     bool IncludeSelf)
+    : Reg(Reg), MCRI(MCRI), IncludeSelf(IncludeSelf) {
+    // Initialize the iterators.
+    for (RI = MCRegUnitIterator(Reg, MCRI); RI.isValid(); ++RI) {
+      for (RRI = MCRegUnitRootIterator(*RI, MCRI); RRI.isValid(); ++RRI) {
+        for (SI = MCSuperRegIterator(*RRI, MCRI, true); SI.isValid(); ++SI) {
+          if (!(!IncludeSelf && Reg == *SI))
+            return;
+        }
+      }
+    }
+  }
+
+  bool isValid() const { return RI.isValid(); }
+
+  unsigned operator*() const {
+    assert(SI.isValid() && "Cannot dereference an invalid iterator.");
+    return *SI;
+  }
+
+  void advance() {
+    // Assuming SI is valid.
+    ++SI;
+    if (SI.isValid()) return;
+
+    ++RRI;
+    if (RRI.isValid()) {
+      SI = MCSuperRegIterator(*RRI, MCRI, true);
+      return;
+    }
+
+    ++RI;
+    if (RI.isValid()) {
+      RRI = MCRegUnitRootIterator(*RI, MCRI);
+      SI = MCSuperRegIterator(*RRI, MCRI, true);
+    }
+  }
+
+  void operator++() {
+    assert(isValid() && "Cannot move off the end of the list.");
+    do advance();
+    while (!IncludeSelf && isValid() && *SI == Reg);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCREGISTERINFO_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSchedule.h b/linux-x64/clang/include/llvm/MC/MCSchedule.h
new file mode 100644
index 0000000..62f8efd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSchedule.h
@@ -0,0 +1,247 @@
+//===-- llvm/MC/MCSchedule.h - Scheduling -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to describe a subtarget's machine model
+// for scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSCHEDULE_H
+#define LLVM_MC_MCSCHEDULE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+struct InstrItinerary;
+class MCSubtargetInfo;
+
+/// Define a kind of processor resource that will be modeled by the scheduler.
+struct MCProcResourceDesc {
+  const char *Name;
+  unsigned NumUnits; // Number of resource of this kind
+  unsigned SuperIdx; // Index of the resources kind that contains this kind.
+
+  // Number of resources that may be buffered.
+  //
+  // Buffered resources (BufferSize != 0) may be consumed at some indeterminate
+  // cycle after dispatch. This should be used for out-of-order cpus when
+  // instructions that use this resource can be buffered in a reservaton
+  // station.
+  //
+  // Unbuffered resources (BufferSize == 0) always consume their resource some
+  // fixed number of cycles after dispatch. If a resource is unbuffered, then
+  // the scheduler will avoid scheduling instructions with conflicting resources
+  // in the same cycle. This is for in-order cpus, or the in-order portion of
+  // an out-of-order cpus.
+  int BufferSize;
+
+  // If the resource has sub-units, a pointer to the first element of an array
+  // of `NumUnits` elements containing the ProcResourceIdx of the sub units.
+  // nullptr if the resource does not have sub-units.
+  const unsigned *SubUnitsIdxBegin;
+
+  bool operator==(const MCProcResourceDesc &Other) const {
+    return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx
+      && BufferSize == Other.BufferSize;
+  }
+};
+
+/// Identify one of the processor resource kinds consumed by a particular
+/// scheduling class for the specified number of cycles.
+struct MCWriteProcResEntry {
+  uint16_t ProcResourceIdx;
+  uint16_t Cycles;
+
+  bool operator==(const MCWriteProcResEntry &Other) const {
+    return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles;
+  }
+};
+
+/// Specify the latency in cpu cycles for a particular scheduling class and def
+/// index. -1 indicates an invalid latency. Heuristics would typically consider
+/// an instruction with invalid latency to have infinite latency.  Also identify
+/// the WriteResources of this def. When the operand expands to a sequence of
+/// writes, this ID is the last write in the sequence.
+struct MCWriteLatencyEntry {
+  int16_t Cycles;
+  uint16_t WriteResourceID;
+
+  bool operator==(const MCWriteLatencyEntry &Other) const {
+    return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID;
+  }
+};
+
+/// Specify the number of cycles allowed after instruction issue before a
+/// particular use operand reads its registers. This effectively reduces the
+/// write's latency. Here we allow negative cycles for corner cases where
+/// latency increases. This rule only applies when the entry's WriteResource
+/// matches the write's WriteResource.
+///
+/// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by
+/// WriteResourceIdx.
+struct MCReadAdvanceEntry {
+  unsigned UseIdx;
+  unsigned WriteResourceID;
+  int Cycles;
+
+  bool operator==(const MCReadAdvanceEntry &Other) const {
+    return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID
+      && Cycles == Other.Cycles;
+  }
+};
+
+/// Summarize the scheduling resources required for an instruction of a
+/// particular scheduling class.
+///
+/// Defined as an aggregate struct for creating tables with initializer lists.
+struct MCSchedClassDesc {
+  static const unsigned short InvalidNumMicroOps = (1U << 14) - 1;
+  static const unsigned short VariantNumMicroOps = InvalidNumMicroOps - 1;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  const char* Name;
+#endif
+  uint16_t NumMicroOps : 14;
+  bool     BeginGroup : 1;
+  bool     EndGroup : 1;
+  uint16_t WriteProcResIdx; // First index into WriteProcResTable.
+  uint16_t NumWriteProcResEntries;
+  uint16_t WriteLatencyIdx; // First index into WriteLatencyTable.
+  uint16_t NumWriteLatencyEntries;
+  uint16_t ReadAdvanceIdx; // First index into ReadAdvanceTable.
+  uint16_t NumReadAdvanceEntries;
+
+  bool isValid() const {
+    return NumMicroOps != InvalidNumMicroOps;
+  }
+  bool isVariant() const {
+    return NumMicroOps == VariantNumMicroOps;
+  }
+};
+
+/// Machine model for scheduling, bundling, and heuristics.
+///
+/// The machine model directly provides basic information about the
+/// microarchitecture to the scheduler in the form of properties. It also
+/// optionally refers to scheduler resource tables and itinerary
+/// tables. Scheduler resource tables model the latency and cost for each
+/// instruction type. Itinerary tables are an independent mechanism that
+/// provides a detailed reservation table describing each cycle of instruction
+/// execution. Subtargets may define any or all of the above categories of data
+/// depending on the type of CPU and selected scheduler.
+struct MCSchedModel {
+  // IssueWidth is the maximum number of instructions that may be scheduled in
+  // the same per-cycle group.
+  unsigned IssueWidth;
+  static const unsigned DefaultIssueWidth = 1;
+
+  // MicroOpBufferSize is the number of micro-ops that the processor may buffer
+  // for out-of-order execution.
+  //
+  // "0" means operations that are not ready in this cycle are not considered
+  // for scheduling (they go in the pending queue). Latency is paramount. This
+  // may be more efficient if many instructions are pending in a schedule.
+  //
+  // "1" means all instructions are considered for scheduling regardless of
+  // whether they are ready in this cycle. Latency still causes issue stalls,
+  // but we balance those stalls against other heuristics.
+  //
+  // "> 1" means the processor is out-of-order. This is a machine independent
+  // estimate of highly machine specific characteristics such as the register
+  // renaming pool and reorder buffer.
+  unsigned MicroOpBufferSize;
+  static const unsigned DefaultMicroOpBufferSize = 0;
+
+  // LoopMicroOpBufferSize is the number of micro-ops that the processor may
+  // buffer for optimized loop execution. More generally, this represents the
+  // optimal number of micro-ops in a loop body. A loop may be partially
+  // unrolled to bring the count of micro-ops in the loop body closer to this
+  // number.
+  unsigned LoopMicroOpBufferSize;
+  static const unsigned DefaultLoopMicroOpBufferSize = 0;
+
+  // LoadLatency is the expected latency of load instructions.
+  unsigned LoadLatency;
+  static const unsigned DefaultLoadLatency = 4;
+
+  // HighLatency is the expected latency of "very high latency" operations.
+  // See TargetInstrInfo::isHighLatencyDef().
+  // By default, this is set to an arbitrarily high number of cycles
+  // likely to have some impact on scheduling heuristics.
+  unsigned HighLatency;
+  static const unsigned DefaultHighLatency = 10;
+
+  // MispredictPenalty is the typical number of extra cycles the processor
+  // takes to recover from a branch misprediction.
+  unsigned MispredictPenalty;
+  static const unsigned DefaultMispredictPenalty = 10;
+
+  bool PostRAScheduler; // default value is false
+
+  bool CompleteModel;
+
+  unsigned ProcID;
+  const MCProcResourceDesc *ProcResourceTable;
+  const MCSchedClassDesc *SchedClassTable;
+  unsigned NumProcResourceKinds;
+  unsigned NumSchedClasses;
+  // Instruction itinerary tables used by InstrItineraryData.
+  friend class InstrItineraryData;
+  const InstrItinerary *InstrItineraries;
+
+  unsigned getProcessorID() const { return ProcID; }
+
+  /// Does this machine model include instruction-level scheduling.
+  bool hasInstrSchedModel() const { return SchedClassTable; }
+
+  /// Return true if this machine model data for all instructions with a
+  /// scheduling class (itinerary class or SchedRW list).
+  bool isComplete() const { return CompleteModel; }
+
+  /// Return true if machine supports out of order execution.
+  bool isOutOfOrder() const { return MicroOpBufferSize > 1; }
+
+  unsigned getNumProcResourceKinds() const {
+    return NumProcResourceKinds;
+  }
+
+  const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
+    assert(hasInstrSchedModel() && "No scheduling machine model");
+
+    assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx");
+    return &ProcResourceTable[ProcResourceIdx];
+  }
+
+  const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const {
+    assert(hasInstrSchedModel() && "No scheduling machine model");
+
+    assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx");
+    return &SchedClassTable[SchedClassIdx];
+  }
+
+  /// Returns the latency value for the scheduling class.
+  static int computeInstrLatency(const MCSubtargetInfo &STI,
+                                 const MCSchedClassDesc &SCDesc);
+
+  /// Returns the reciprocal throughput information from a MCSchedClassDesc.
+  static Optional<double>
+  getReciprocalThroughput(const MCSubtargetInfo &STI,
+                          const MCSchedClassDesc &SCDesc);
+
+  /// Returns the default initialized model.
+  static const MCSchedModel &GetDefaultSchedModel() { return Default; }
+  static const MCSchedModel Default;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCSection.h b/linux-x64/clang/include/llvm/MC/MCSection.h
new file mode 100644
index 0000000..2771b1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSection.h
@@ -0,0 +1,187 @@
+//===- MCSection.h - Machine Code Sections ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSection class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTION_H
+#define LLVM_MC_MCSECTION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/MC/SectionKind.h"
+#include <cassert>
+#include <utility>
+
+namespace llvm {
+
+class MCAsmInfo;
+class MCContext;
+class MCExpr;
+class MCSymbol;
+class raw_ostream;
+class Triple;
+
+template <> struct ilist_alloc_traits<MCFragment> {
+  static void deleteNode(MCFragment *V);
+};
+
+/// Instances of this class represent a uniqued identifier for a section in the
+/// current translation unit.  The MCContext class uniques and creates these.
+class MCSection {
+public:
+  enum SectionVariant { SV_COFF = 0, SV_ELF, SV_MachO, SV_Wasm };
+
+  /// \brief Express the state of bundle locked groups while emitting code.
+  enum BundleLockStateType {
+    NotBundleLocked,
+    BundleLocked,
+    BundleLockedAlignToEnd
+  };
+
+  using FragmentListType = iplist<MCFragment>;
+
+  using const_iterator = FragmentListType::const_iterator;
+  using iterator = FragmentListType::iterator;
+
+  using const_reverse_iterator = FragmentListType::const_reverse_iterator;
+  using reverse_iterator = FragmentListType::reverse_iterator;
+
+private:
+  MCSymbol *Begin;
+  MCSymbol *End = nullptr;
+  /// The alignment requirement of this section.
+  unsigned Alignment = 1;
+  /// The section index in the assemblers section list.
+  unsigned Ordinal = 0;
+  /// The index of this section in the layout order.
+  unsigned LayoutOrder;
+
+  /// \brief Keeping track of bundle-locked state.
+  BundleLockStateType BundleLockState = NotBundleLocked;
+
+  /// \brief Current nesting depth of bundle_lock directives.
+  unsigned BundleLockNestingDepth = 0;
+
+  /// \brief We've seen a bundle_lock directive but not its first instruction
+  /// yet.
+  bool BundleGroupBeforeFirstInst : 1;
+
+  /// Whether this section has had instructions emitted into it.
+  bool HasInstructions : 1;
+
+  bool IsRegistered : 1;
+
+  MCDummyFragment DummyFragment;
+
+  FragmentListType Fragments;
+
+  /// Mapping from subsection number to insertion point for subsection numbers
+  /// below that number.
+  SmallVector<std::pair<unsigned, MCFragment *>, 1> SubsectionFragmentMap;
+
+protected:
+  SectionVariant Variant;
+  SectionKind Kind;
+
+  MCSection(SectionVariant V, SectionKind K, MCSymbol *Begin);
+  ~MCSection();
+
+public:
+  MCSection(const MCSection &) = delete;
+  MCSection &operator=(const MCSection &) = delete;
+
+  SectionKind getKind() const { return Kind; }
+
+  SectionVariant getVariant() const { return Variant; }
+
+  MCSymbol *getBeginSymbol() { return Begin; }
+  const MCSymbol *getBeginSymbol() const {
+    return const_cast<MCSection *>(this)->getBeginSymbol();
+  }
+  void setBeginSymbol(MCSymbol *Sym) {
+    assert(!Begin);
+    Begin = Sym;
+  }
+  MCSymbol *getEndSymbol(MCContext &Ctx);
+  bool hasEnded() const;
+
+  unsigned getAlignment() const { return Alignment; }
+  void setAlignment(unsigned Value) { Alignment = Value; }
+
+  unsigned getOrdinal() const { return Ordinal; }
+  void setOrdinal(unsigned Value) { Ordinal = Value; }
+
+  unsigned getLayoutOrder() const { return LayoutOrder; }
+  void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
+
+  BundleLockStateType getBundleLockState() const { return BundleLockState; }
+  void setBundleLockState(BundleLockStateType NewState);
+  bool isBundleLocked() const { return BundleLockState != NotBundleLocked; }
+
+  bool isBundleGroupBeforeFirstInst() const {
+    return BundleGroupBeforeFirstInst;
+  }
+  void setBundleGroupBeforeFirstInst(bool IsFirst) {
+    BundleGroupBeforeFirstInst = IsFirst;
+  }
+
+  bool hasInstructions() const { return HasInstructions; }
+  void setHasInstructions(bool Value) { HasInstructions = Value; }
+
+  bool isRegistered() const { return IsRegistered; }
+  void setIsRegistered(bool Value) { IsRegistered = Value; }
+
+  MCSection::FragmentListType &getFragmentList() { return Fragments; }
+  const MCSection::FragmentListType &getFragmentList() const {
+    return const_cast<MCSection *>(this)->getFragmentList();
+  }
+
+  /// Support for MCFragment::getNextNode().
+  static FragmentListType MCSection::*getSublistAccess(MCFragment *) {
+    return &MCSection::Fragments;
+  }
+
+  const MCDummyFragment &getDummyFragment() const { return DummyFragment; }
+  MCDummyFragment &getDummyFragment() { return DummyFragment; }
+
+  iterator begin() { return Fragments.begin(); }
+  const_iterator begin() const { return Fragments.begin(); }
+
+  iterator end() { return Fragments.end(); }
+  const_iterator end() const { return Fragments.end(); }
+
+  reverse_iterator rbegin() { return Fragments.rbegin(); }
+  const_reverse_iterator rbegin() const { return Fragments.rbegin(); }
+
+  reverse_iterator rend() { return Fragments.rend(); }
+  const_reverse_iterator rend() const  { return Fragments.rend(); }
+
+  MCSection::iterator getSubsectionInsertionPoint(unsigned Subsection);
+
+  void dump() const;
+
+  virtual void PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
+                                    raw_ostream &OS,
+                                    const MCExpr *Subsection) const = 0;
+
+  /// Return true if a .align directive should use "optimized nops" to fill
+  /// instead of 0s.
+  virtual bool UseCodeAlign() const = 0;
+
+  /// Check whether this section is "virtual", that is has no actual object
+  /// file contents.
+  virtual bool isVirtualSection() const = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSECTION_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSectionCOFF.h b/linux-x64/clang/include/llvm/MC/MCSectionCOFF.h
new file mode 100644
index 0000000..24b9f88
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSectionCOFF.h
@@ -0,0 +1,100 @@
+//===- MCSectionCOFF.h - COFF Machine Code Sections -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionCOFF class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONCOFF_H
+#define LLVM_MC_MCSECTIONCOFF_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/SectionKind.h"
+#include <cassert>
+
+namespace llvm {
+
+class MCSymbol;
+
+/// This represents a section on Windows
+class MCSectionCOFF final : public MCSection {
+  // The memory for this string is stored in the same MCContext as *this.
+  StringRef SectionName;
+
+  // FIXME: The following fields should not be mutable, but are for now so the
+  // asm parser can honor the .linkonce directive.
+
+  /// This is the Characteristics field of a section, drawn from the enums
+  /// below.
+  mutable unsigned Characteristics;
+
+  /// The unique IDs used with the .pdata and .xdata sections created internally
+  /// by the assembler. This ID is used to ensure that for every .text section,
+  /// there is exactly one .pdata and one .xdata section, which is required by
+  /// the Microsoft incremental linker. This data is mutable because this ID is
+  /// not notionally part of the section.
+  mutable unsigned WinCFISectionID = ~0U;
+
+  /// The COMDAT symbol of this section. Only valid if this is a COMDAT section.
+  /// Two COMDAT sections are merged if they have the same COMDAT symbol.
+  MCSymbol *COMDATSymbol;
+
+  /// This is the Selection field for the section symbol, if it is a COMDAT
+  /// section (Characteristics & IMAGE_SCN_LNK_COMDAT) != 0
+  mutable int Selection;
+
+private:
+  friend class MCContext;
+  MCSectionCOFF(StringRef Section, unsigned Characteristics,
+                MCSymbol *COMDATSymbol, int Selection, SectionKind K,
+                MCSymbol *Begin)
+      : MCSection(SV_COFF, K, Begin), SectionName(Section),
+        Characteristics(Characteristics), COMDATSymbol(COMDATSymbol),
+        Selection(Selection) {
+    assert((Characteristics & 0x00F00000) == 0 &&
+           "alignment must not be set upon section creation");
+  }
+
+public:
+  ~MCSectionCOFF();
+
+  /// Decides whether a '.section' directive should be printed before the
+  /// section name
+  bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
+
+  StringRef getSectionName() const { return SectionName; }
+  unsigned getCharacteristics() const { return Characteristics; }
+  MCSymbol *getCOMDATSymbol() const { return COMDATSymbol; }
+  int getSelection() const { return Selection; }
+
+  void setSelection(int Selection) const;
+
+  void PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
+                            raw_ostream &OS,
+                            const MCExpr *Subsection) const override;
+  bool UseCodeAlign() const override;
+  bool isVirtualSection() const override;
+
+  unsigned getOrAssignWinCFISectionID(unsigned *NextID) const {
+    if (WinCFISectionID == ~0U)
+      WinCFISectionID = (*NextID)++;
+    return WinCFISectionID;
+  }
+
+  static bool isImplicitlyDiscardable(StringRef Name) {
+    return Name.startswith(".debug");
+  }
+
+  static bool classof(const MCSection *S) { return S->getVariant() == SV_COFF; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSECTIONCOFF_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSectionELF.h b/linux-x64/clang/include/llvm/MC/MCSectionELF.h
new file mode 100644
index 0000000..00c289c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSectionELF.h
@@ -0,0 +1,99 @@
+//===- MCSectionELF.h - ELF Machine Code Sections ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionELF class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONELF_H
+#define LLVM_MC_MCSECTIONELF_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/SectionKind.h"
+
+namespace llvm {
+
+class MCSymbol;
+
+/// This represents a section on linux, lots of unix variants and some bare
+/// metal systems.
+class MCSectionELF final : public MCSection {
+  /// This is the name of the section.  The referenced memory is owned by
+  /// TargetLoweringObjectFileELF's ELFUniqueMap.
+  StringRef SectionName;
+
+  /// This is the sh_type field of a section, drawn from the enums below.
+  unsigned Type;
+
+  /// This is the sh_flags field of a section, drawn from the enums below.
+  unsigned Flags;
+
+  unsigned UniqueID;
+
+  /// The size of each entry in this section. This size only makes sense for
+  /// sections that contain fixed-sized entries. If a section does not contain
+  /// fixed-sized entries 'EntrySize' will be 0.
+  unsigned EntrySize;
+
+  const MCSymbolELF *Group;
+
+  /// sh_info for SHF_LINK_ORDER (can be null).
+  const MCSymbol *AssociatedSymbol;
+
+private:
+  friend class MCContext;
+
+  MCSectionELF(StringRef Section, unsigned type, unsigned flags, SectionKind K,
+               unsigned entrySize, const MCSymbolELF *group, unsigned UniqueID,
+               MCSymbol *Begin, const MCSymbolELF *AssociatedSymbol)
+      : MCSection(SV_ELF, K, Begin), SectionName(Section), Type(type),
+        Flags(flags), UniqueID(UniqueID), EntrySize(entrySize), Group(group),
+        AssociatedSymbol(AssociatedSymbol) {
+    if (Group)
+      Group->setIsSignature();
+  }
+
+  void setSectionName(StringRef Name) { SectionName = Name; }
+
+public:
+  ~MCSectionELF();
+
+  /// Decides whether a '.section' directive should be printed before the
+  /// section name
+  bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
+
+  StringRef getSectionName() const { return SectionName; }
+  unsigned getType() const { return Type; }
+  unsigned getFlags() const { return Flags; }
+  unsigned getEntrySize() const { return EntrySize; }
+  void setFlags(unsigned F) { Flags = F; }
+  const MCSymbolELF *getGroup() const { return Group; }
+
+  void PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
+                            raw_ostream &OS,
+                            const MCExpr *Subsection) const override;
+  bool UseCodeAlign() const override;
+  bool isVirtualSection() const override;
+
+  bool isUnique() const { return UniqueID != ~0U; }
+  unsigned getUniqueID() const { return UniqueID; }
+
+  const MCSection *getAssociatedSection() const { return &AssociatedSymbol->getSection(); }
+  const MCSymbol *getAssociatedSymbol() const { return AssociatedSymbol; }
+
+  static bool classof(const MCSection *S) {
+    return S->getVariant() == SV_ELF;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSECTIONELF_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSectionMachO.h b/linux-x64/clang/include/llvm/MC/MCSectionMachO.h
new file mode 100644
index 0000000..89db09c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSectionMachO.h
@@ -0,0 +1,92 @@
+//===- MCSectionMachO.h - MachO Machine Code Sections -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionMachO class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONMACHO_H
+#define LLVM_MC_MCSECTIONMACHO_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/MC/MCSection.h"
+
+namespace llvm {
+
+/// This represents a section on a Mach-O system (used by Mac OS X).  On a Mac
+/// system, these are also described in /usr/include/mach-o/loader.h.
+class MCSectionMachO final : public MCSection {
+  char SegmentName[16];  // Not necessarily null terminated!
+  char SectionName[16];  // Not necessarily null terminated!
+
+  /// This is the SECTION_TYPE and SECTION_ATTRIBUTES field of a section, drawn
+  /// from the enums below.
+  unsigned TypeAndAttributes;
+
+  /// The 'reserved2' field of a section, used to represent the size of stubs,
+  /// for example.
+  unsigned Reserved2;
+
+  MCSectionMachO(StringRef Segment, StringRef Section, unsigned TAA,
+                 unsigned reserved2, SectionKind K, MCSymbol *Begin);
+  friend class MCContext;
+public:
+
+  StringRef getSegmentName() const {
+    // SegmentName is not necessarily null terminated!
+    if (SegmentName[15])
+      return StringRef(SegmentName, 16);
+    return StringRef(SegmentName);
+  }
+  StringRef getSectionName() const {
+    // SectionName is not necessarily null terminated!
+    if (SectionName[15])
+      return StringRef(SectionName, 16);
+    return StringRef(SectionName);
+  }
+
+  unsigned getTypeAndAttributes() const { return TypeAndAttributes; }
+  unsigned getStubSize() const { return Reserved2; }
+
+  MachO::SectionType getType() const {
+    return static_cast<MachO::SectionType>(TypeAndAttributes &
+                                           MachO::SECTION_TYPE);
+  }
+  bool hasAttribute(unsigned Value) const {
+    return (TypeAndAttributes & Value) != 0;
+  }
+
+  /// Parse the section specifier indicated by "Spec". This is a string that can
+  /// appear after a .section directive in a mach-o flavored .s file.  If
+  /// successful, this fills in the specified Out parameters and returns an
+  /// empty string.  When an invalid section specifier is present, this returns
+  /// a string indicating the problem. If no TAA was parsed, TAA is not altered,
+  /// and TAAWasSet becomes false.
+  static std::string ParseSectionSpecifier(StringRef Spec,       // In.
+                                           StringRef &Segment,   // Out.
+                                           StringRef &Section,   // Out.
+                                           unsigned  &TAA,       // Out.
+                                           bool      &TAAParsed, // Out.
+                                           unsigned  &StubSize); // Out.
+
+  void PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
+                            raw_ostream &OS,
+                            const MCExpr *Subsection) const override;
+  bool UseCodeAlign() const override;
+  bool isVirtualSection() const override;
+
+  static bool classof(const MCSection *S) {
+    return S->getVariant() == SV_MachO;
+  }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCSectionWasm.h b/linux-x64/clang/include/llvm/MC/MCSectionWasm.h
new file mode 100644
index 0000000..ab4cd7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSectionWasm.h
@@ -0,0 +1,88 @@
+//===- MCSectionWasm.h - Wasm Machine Code Sections -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionWasm class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONWASM_H
+#define LLVM_MC_MCSECTIONWASM_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class MCSymbol;
+
+/// This represents a section on wasm.
+class MCSectionWasm final : public MCSection {
+  /// This is the name of the section.  The referenced memory is owned by
+  /// TargetLoweringObjectFileWasm's WasmUniqueMap.
+  StringRef SectionName;
+
+  unsigned UniqueID;
+
+  const MCSymbolWasm *Group;
+
+  // The offset of the MC function/data section in the wasm code/data section.
+  // For data relocations the offset is relative to start of the data payload
+  // itself and does not include the size of the section header.
+  uint64_t SectionOffset = 0;
+
+  // For data sections, this is the index of of the corresponding wasm data
+  // segment
+  uint32_t SegmentIndex = 0;
+
+  friend class MCContext;
+  MCSectionWasm(StringRef Section, SectionKind K, const MCSymbolWasm *group,
+                unsigned UniqueID, MCSymbol *Begin)
+      : MCSection(SV_Wasm, K, Begin), SectionName(Section), UniqueID(UniqueID),
+        Group(group) {}
+
+  void setSectionName(StringRef Name) { SectionName = Name; }
+
+public:
+  ~MCSectionWasm();
+
+  /// Decides whether a '.section' directive should be printed before the
+  /// section name
+  bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
+
+  StringRef getSectionName() const { return SectionName; }
+  const MCSymbolWasm *getGroup() const { return Group; }
+
+  void PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
+                            raw_ostream &OS,
+                            const MCExpr *Subsection) const override;
+  bool UseCodeAlign() const override;
+  bool isVirtualSection() const override;
+
+  bool isWasmData() const {
+    return Kind.isGlobalWriteableData() || Kind.isReadOnly();
+  }
+
+  bool isUnique() const { return UniqueID != ~0U; }
+  unsigned getUniqueID() const { return UniqueID; }
+
+  uint64_t getSectionOffset() const { return SectionOffset; }
+  void setSectionOffset(uint64_t Offset) { SectionOffset = Offset; }
+
+  uint32_t getSegmentIndex() const { return SegmentIndex; }
+  void setSegmentIndex(uint32_t Index) { SegmentIndex = Index; }
+
+  static bool classof(const MCSection *S) { return S->getVariant() == SV_Wasm; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCStreamer.h b/linux-x64/clang/include/llvm/MC/MCStreamer.h
new file mode 100644
index 0000000..582a836
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCStreamer.h
@@ -0,0 +1,975 @@
+//===- MCStreamer.h - High-level Streaming Machine Code Output --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCStreamer class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSTREAMER_H
+#define LLVM_MC_MCSTREAMER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCWinEH.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/TargetParser.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AssemblerConstantPools;
+class formatted_raw_ostream;
+class MCAsmBackend;
+class MCCodeEmitter;
+struct MCCodePaddingContext;
+class MCContext;
+class MCExpr;
+class MCInst;
+class MCInstPrinter;
+class MCSection;
+class MCStreamer;
+class MCSymbolRefExpr;
+class MCSubtargetInfo;
+class raw_ostream;
+class Twine;
+
+using MCSectionSubPair = std::pair<MCSection *, const MCExpr *>;
+
+/// Target specific streamer interface. This is used so that targets can
+/// implement support for target specific assembly directives.
+///
+/// If target foo wants to use this, it should implement 3 classes:
+/// * FooTargetStreamer : public MCTargetStreamer
+/// * FooTargetAsmStreamer : public FooTargetStreamer
+/// * FooTargetELFStreamer : public FooTargetStreamer
+///
+/// FooTargetStreamer should have a pure virtual method for each directive. For
+/// example, for a ".bar symbol_name" directive, it should have
+/// virtual emitBar(const MCSymbol &Symbol) = 0;
+///
+/// The FooTargetAsmStreamer and FooTargetELFStreamer classes implement the
+/// method. The assembly streamer just prints ".bar symbol_name". The object
+/// streamer does whatever is needed to implement .bar in the object file.
+///
+/// In the assembly printer and parser the target streamer can be used by
+/// calling getTargetStreamer and casting it to FooTargetStreamer:
+///
+/// MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
+/// FooTargetStreamer &ATS = static_cast<FooTargetStreamer &>(TS);
+///
+/// The base classes FooTargetAsmStreamer and FooTargetELFStreamer should
+/// *never* be treated differently. Callers should always talk to a
+/// FooTargetStreamer.
+class MCTargetStreamer {
+protected:
+  MCStreamer &Streamer;
+
+public:
+  MCTargetStreamer(MCStreamer &S);
+  virtual ~MCTargetStreamer();
+
+  MCStreamer &getStreamer() { return Streamer; }
+
+  // Allow a target to add behavior to the EmitLabel of MCStreamer.
+  virtual void emitLabel(MCSymbol *Symbol);
+  // Allow a target to add behavior to the emitAssignment of MCStreamer.
+  virtual void emitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+
+  virtual void prettyPrintAsm(MCInstPrinter &InstPrinter, raw_ostream &OS,
+                              const MCInst &Inst, const MCSubtargetInfo &STI);
+
+  virtual void emitDwarfFileDirective(StringRef Directive);
+
+  /// Update streamer for a new active section.
+  ///
+  /// This is called by PopSection and SwitchSection, if the current
+  /// section changes.
+  virtual void changeSection(const MCSection *CurSection, MCSection *Section,
+                             const MCExpr *SubSection, raw_ostream &OS);
+
+  virtual void emitValue(const MCExpr *Value);
+
+  virtual void finish();
+};
+
+// FIXME: declared here because it is used from
+// lib/CodeGen/AsmPrinter/ARMException.cpp.
+class ARMTargetStreamer : public MCTargetStreamer {
+public:
+  ARMTargetStreamer(MCStreamer &S);
+  ~ARMTargetStreamer() override;
+
+  virtual void emitFnStart();
+  virtual void emitFnEnd();
+  virtual void emitCantUnwind();
+  virtual void emitPersonality(const MCSymbol *Personality);
+  virtual void emitPersonalityIndex(unsigned Index);
+  virtual void emitHandlerData();
+  virtual void emitSetFP(unsigned FpReg, unsigned SpReg,
+                         int64_t Offset = 0);
+  virtual void emitMovSP(unsigned Reg, int64_t Offset = 0);
+  virtual void emitPad(int64_t Offset);
+  virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+                           bool isVector);
+  virtual void emitUnwindRaw(int64_t StackOffset,
+                             const SmallVectorImpl<uint8_t> &Opcodes);
+
+  virtual void switchVendor(StringRef Vendor);
+  virtual void emitAttribute(unsigned Attribute, unsigned Value);
+  virtual void emitTextAttribute(unsigned Attribute, StringRef String);
+  virtual void emitIntTextAttribute(unsigned Attribute, unsigned IntValue,
+                                    StringRef StringValue = "");
+  virtual void emitFPU(unsigned FPU);
+  virtual void emitArch(ARM::ArchKind Arch);
+  virtual void emitArchExtension(unsigned ArchExt);
+  virtual void emitObjectArch(ARM::ArchKind Arch);
+  void emitTargetAttributes(const MCSubtargetInfo &STI);
+  virtual void finishAttributeSection();
+  virtual void emitInst(uint32_t Inst, char Suffix = '\0');
+
+  virtual void AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE);
+
+  virtual void emitThumbSet(MCSymbol *Symbol, const MCExpr *Value);
+
+  void finish() override;
+
+  /// Reset any state between object emissions, i.e. the equivalent of
+  /// MCStreamer's reset method.
+  virtual void reset();
+
+  /// Callback used to implement the ldr= pseudo.
+  /// Add a new entry to the constant pool for the current section and return an
+  /// MCExpr that can be used to refer to the constant pool location.
+  const MCExpr *addConstantPoolEntry(const MCExpr *, SMLoc Loc);
+
+  /// Callback used to implemnt the .ltorg directive.
+  /// Emit contents of constant pool for the current section.
+  void emitCurrentConstantPool();
+
+private:
+  std::unique_ptr<AssemblerConstantPools> ConstantPools;
+};
+
+/// \brief Streaming machine code generation interface.
+///
+/// This interface is intended to provide a programatic interface that is very
+/// similar to the level that an assembler .s file provides.  It has callbacks
+/// to emit bytes, handle directives, etc.  The implementation of this interface
+/// retains state to know what the current section is etc.
+///
+/// There are multiple implementations of this interface: one for writing out
+/// a .s file, and implementations that write out .o files of various formats.
+///
+class MCStreamer {
+  MCContext &Context;
+  std::unique_ptr<MCTargetStreamer> TargetStreamer;
+
+  std::vector<MCDwarfFrameInfo> DwarfFrameInfos;
+  MCDwarfFrameInfo *getCurrentDwarfFrameInfo();
+
+  /// Similar to DwarfFrameInfos, but for SEH unwind info. Chained frames may
+  /// refer to each other, so use std::unique_ptr to provide pointer stability.
+  std::vector<std::unique_ptr<WinEH::FrameInfo>> WinFrameInfos;
+
+  WinEH::FrameInfo *CurrentWinFrameInfo;
+
+  /// Retreive the current frame info if one is available and it is not yet
+  /// closed. Otherwise, issue an error and return null.
+  WinEH::FrameInfo *EnsureValidWinFrameInfo(SMLoc Loc);
+
+  /// \brief Tracks an index to represent the order a symbol was emitted in.
+  /// Zero means we did not emit that symbol.
+  DenseMap<const MCSymbol *, unsigned> SymbolOrdering;
+
+  /// \brief This is stack of current and previous section values saved by
+  /// PushSection.
+  SmallVector<std::pair<MCSectionSubPair, MCSectionSubPair>, 4> SectionStack;
+
+  /// The next unique ID to use when creating a WinCFI-related section (.pdata
+  /// or .xdata). This ID ensures that we have a one-to-one mapping from
+  /// code section to unwind info section, which MSVC's incremental linker
+  /// requires.
+  unsigned NextWinCFIID = 0;
+
+protected:
+  MCStreamer(MCContext &Ctx);
+
+  virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
+  virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame);
+
+  /// When emitting an object file, create and emit a real label. When emitting
+  /// textual assembly, this should do nothing to avoid polluting our output.
+  virtual MCSymbol *EmitCFILabel();
+
+  WinEH::FrameInfo *getCurrentWinFrameInfo() {
+    return CurrentWinFrameInfo;
+  }
+
+  virtual void EmitWindowsUnwindTables();
+
+  virtual void EmitRawTextImpl(StringRef String);
+
+public:
+  MCStreamer(const MCStreamer &) = delete;
+  MCStreamer &operator=(const MCStreamer &) = delete;
+  virtual ~MCStreamer();
+
+  void visitUsedExpr(const MCExpr &Expr);
+  virtual void visitUsedSymbol(const MCSymbol &Sym);
+
+  void setTargetStreamer(MCTargetStreamer *TS) {
+    TargetStreamer.reset(TS);
+  }
+
+  /// State management
+  ///
+  virtual void reset();
+
+  MCContext &getContext() const { return Context; }
+
+  MCTargetStreamer *getTargetStreamer() {
+    return TargetStreamer.get();
+  }
+
+  unsigned getNumFrameInfos() { return DwarfFrameInfos.size(); }
+  ArrayRef<MCDwarfFrameInfo> getDwarfFrameInfos() const {
+    return DwarfFrameInfos;
+  }
+
+  bool hasUnfinishedDwarfFrameInfo();
+
+  unsigned getNumWinFrameInfos() { return WinFrameInfos.size(); }
+  ArrayRef<std::unique_ptr<WinEH::FrameInfo>> getWinFrameInfos() const {
+    return WinFrameInfos;
+  }
+
+  void generateCompactUnwindEncodings(MCAsmBackend *MAB);
+
+  /// \name Assembly File Formatting.
+  /// @{
+
+  /// \brief Return true if this streamer supports verbose assembly and if it is
+  /// enabled.
+  virtual bool isVerboseAsm() const { return false; }
+
+  /// \brief Return true if this asm streamer supports emitting unformatted text
+  /// to the .s file with EmitRawText.
+  virtual bool hasRawTextSupport() const { return false; }
+
+  /// \brief Is the integrated assembler required for this streamer to function
+  /// correctly?
+  virtual bool isIntegratedAssemblerRequired() const { return false; }
+
+  /// \brief Add a textual comment.
+  ///
+  /// Typically for comments that can be emitted to the generated .s
+  /// file if applicable as a QoI issue to make the output of the compiler
+  /// more readable.  This only affects the MCAsmStreamer, and only when
+  /// verbose assembly output is enabled.
+  ///
+  /// If the comment includes embedded \n's, they will each get the comment
+  /// prefix as appropriate.  The added comment should not end with a \n.
+  /// By default, each comment is terminated with an end of line, i.e. the
+  /// EOL param is set to true by default. If one prefers not to end the 
+  /// comment with a new line then the EOL param should be passed 
+  /// with a false value.
+  virtual void AddComment(const Twine &T, bool EOL = true) {}
+
+  /// \brief Return a raw_ostream that comments can be written to. Unlike
+  /// AddComment, you are required to terminate comments with \n if you use this
+  /// method.
+  virtual raw_ostream &GetCommentOS();
+
+  /// \brief Print T and prefix it with the comment string (normally #) and
+  /// optionally a tab. This prints the comment immediately, not at the end of
+  /// the current line. It is basically a safe version of EmitRawText: since it
+  /// only prints comments, the object streamer ignores it instead of asserting.
+  virtual void emitRawComment(const Twine &T, bool TabPrefix = true);
+
+  /// \brief Add explicit comment T. T is required to be a valid
+  /// comment in the output and does not need to be escaped.
+  virtual void addExplicitComment(const Twine &T);
+
+  /// \brief Emit added explicit comments.
+  virtual void emitExplicitComments();
+
+  /// AddBlankLine - Emit a blank line to a .s file to pretty it up.
+  virtual void AddBlankLine() {}
+
+  /// @}
+
+  /// \name Symbol & Section Management
+  /// @{
+
+  /// \brief Return the current section that the streamer is emitting code to.
+  MCSectionSubPair getCurrentSection() const {
+    if (!SectionStack.empty())
+      return SectionStack.back().first;
+    return MCSectionSubPair();
+  }
+  MCSection *getCurrentSectionOnly() const { return getCurrentSection().first; }
+
+  /// \brief Return the previous section that the streamer is emitting code to.
+  MCSectionSubPair getPreviousSection() const {
+    if (!SectionStack.empty())
+      return SectionStack.back().second;
+    return MCSectionSubPair();
+  }
+
+  /// \brief Returns an index to represent the order a symbol was emitted in.
+  /// (zero if we did not emit that symbol)
+  unsigned GetSymbolOrder(const MCSymbol *Sym) const {
+    return SymbolOrdering.lookup(Sym);
+  }
+
+  /// \brief Update streamer for a new active section.
+  ///
+  /// This is called by PopSection and SwitchSection, if the current
+  /// section changes.
+  virtual void ChangeSection(MCSection *, const MCExpr *);
+
+  /// \brief Save the current and previous section on the section stack.
+  void PushSection() {
+    SectionStack.push_back(
+        std::make_pair(getCurrentSection(), getPreviousSection()));
+  }
+
+  /// \brief Restore the current and previous section from the section stack.
+  /// Calls ChangeSection as needed.
+  ///
+  /// Returns false if the stack was empty.
+  bool PopSection() {
+    if (SectionStack.size() <= 1)
+      return false;
+    auto I = SectionStack.end();
+    --I;
+    MCSectionSubPair OldSection = I->first;
+    --I;
+    MCSectionSubPair NewSection = I->first;
+
+    if (OldSection != NewSection)
+      ChangeSection(NewSection.first, NewSection.second);
+    SectionStack.pop_back();
+    return true;
+  }
+
+  bool SubSection(const MCExpr *Subsection) {
+    if (SectionStack.empty())
+      return false;
+
+    SwitchSection(SectionStack.back().first.first, Subsection);
+    return true;
+  }
+
+  /// Set the current section where code is being emitted to \p Section.  This
+  /// is required to update CurSection.
+  ///
+  /// This corresponds to assembler directives like .section, .text, etc.
+  virtual void SwitchSection(MCSection *Section,
+                             const MCExpr *Subsection = nullptr);
+
+  /// \brief Set the current section where code is being emitted to \p Section.
+  /// This is required to update CurSection. This version does not call
+  /// ChangeSection.
+  void SwitchSectionNoChange(MCSection *Section,
+                             const MCExpr *Subsection = nullptr) {
+    assert(Section && "Cannot switch to a null section!");
+    MCSectionSubPair curSection = SectionStack.back().first;
+    SectionStack.back().second = curSection;
+    if (MCSectionSubPair(Section, Subsection) != curSection)
+      SectionStack.back().first = MCSectionSubPair(Section, Subsection);
+  }
+
+  /// \brief Create the default sections and set the initial one.
+  virtual void InitSections(bool NoExecStack);
+
+  MCSymbol *endSection(MCSection *Section);
+
+  /// \brief Sets the symbol's section.
+  ///
+  /// Each emitted symbol will be tracked in the ordering table,
+  /// so we can sort on them later.
+  void AssignFragment(MCSymbol *Symbol, MCFragment *Fragment);
+
+  /// \brief Emit a label for \p Symbol into the current section.
+  ///
+  /// This corresponds to an assembler statement such as:
+  ///   foo:
+  ///
+  /// \param Symbol - The symbol to emit. A given symbol should only be
+  /// emitted as a label once, and symbols emitted as a label should never be
+  /// used in an assignment.
+  // FIXME: These emission are non-const because we mutate the symbol to
+  // add the section we're emitting it to later.
+  virtual void EmitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc());
+
+  virtual void EmitEHSymAttributes(const MCSymbol *Symbol, MCSymbol *EHSymbol);
+
+  /// \brief Note in the output the specified \p Flag.
+  virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+
+  /// \brief Emit the given list \p Options of strings as linker
+  /// options into the output.
+  virtual void EmitLinkerOptions(ArrayRef<std::string> Kind) {}
+
+  /// \brief Note in the output the specified region \p Kind.
+  virtual void EmitDataRegion(MCDataRegionType Kind) {}
+
+  /// \brief Specify the Mach-O minimum deployment target version.
+  virtual void EmitVersionMin(MCVersionMinType Type, unsigned Major,
+                              unsigned Minor, unsigned Update) {}
+
+  /// Emit/Specify Mach-O build version command.
+  /// \p Platform should be one of MachO::PlatformType.
+  virtual void EmitBuildVersion(unsigned Platform, unsigned Major,
+                                unsigned Minor, unsigned Update) {}
+
+  void EmitVersionForTarget(const Triple &Target);
+
+  /// \brief Note in the output that the specified \p Func is a Thumb mode
+  /// function (ARM target only).
+  virtual void EmitThumbFunc(MCSymbol *Func);
+
+  /// \brief Emit an assignment of \p Value to \p Symbol.
+  ///
+  /// This corresponds to an assembler statement such as:
+  ///  symbol = value
+  ///
+  /// The assignment generates no code, but has the side effect of binding the
+  /// value in the current context. For the assembly streamer, this prints the
+  /// binding into the .s file.
+  ///
+  /// \param Symbol - The symbol being assigned to.
+  /// \param Value - The value for the symbol.
+  virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+
+  /// \brief Emit an weak reference from \p Alias to \p Symbol.
+  ///
+  /// This corresponds to an assembler statement such as:
+  ///  .weakref alias, symbol
+  ///
+  /// \param Alias - The alias that is being created.
+  /// \param Symbol - The symbol being aliased.
+  virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
+
+  /// \brief Add the given \p Attribute to \p Symbol.
+  virtual bool EmitSymbolAttribute(MCSymbol *Symbol,
+                                   MCSymbolAttr Attribute) = 0;
+
+  /// \brief Set the \p DescValue for the \p Symbol.
+  ///
+  /// \param Symbol - The symbol to have its n_desc field set.
+  /// \param DescValue - The value to set into the n_desc field.
+  virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
+
+  /// \brief Start emitting COFF symbol definition
+  ///
+  /// \param Symbol - The symbol to have its External & Type fields set.
+  virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol);
+
+  /// \brief Emit the storage class of the symbol.
+  ///
+  /// \param StorageClass - The storage class the symbol should have.
+  virtual void EmitCOFFSymbolStorageClass(int StorageClass);
+
+  /// \brief Emit the type of the symbol.
+  ///
+  /// \param Type - A COFF type identifier (see COFF::SymbolType in X86COFF.h)
+  virtual void EmitCOFFSymbolType(int Type);
+
+  /// \brief Marks the end of the symbol definition.
+  virtual void EndCOFFSymbolDef();
+
+  virtual void EmitCOFFSafeSEH(MCSymbol const *Symbol);
+
+  /// \brief Emits the symbol table index of a Symbol into the current section.
+  virtual void EmitCOFFSymbolIndex(MCSymbol const *Symbol);
+
+  /// \brief Emits a COFF section index.
+  ///
+  /// \param Symbol - Symbol the section number relocation should point to.
+  virtual void EmitCOFFSectionIndex(MCSymbol const *Symbol);
+
+  /// \brief Emits a COFF section relative relocation.
+  ///
+  /// \param Symbol - Symbol the section relative relocation should point to.
+  virtual void EmitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset);
+
+  /// \brief Emit an ELF .size directive.
+  ///
+  /// This corresponds to an assembler statement such as:
+  ///  .size symbol, expression
+  virtual void emitELFSize(MCSymbol *Symbol, const MCExpr *Value);
+
+  /// \brief Emit an ELF .symver directive.
+  ///
+  /// This corresponds to an assembler statement such as:
+  ///  .symver _start, foo@@SOME_VERSION
+  /// \param AliasName - The versioned alias (i.e. "foo@@SOME_VERSION")
+  /// \param Aliasee - The aliased symbol (i.e. "_start")
+  virtual void emitELFSymverDirective(StringRef AliasName,
+                                      const MCSymbol *Aliasee);
+
+  /// \brief Emit a Linker Optimization Hint (LOH) directive.
+  /// \param Args - Arguments of the LOH.
+  virtual void EmitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) {}
+
+  /// \brief Emit a common symbol.
+  ///
+  /// \param Symbol - The common symbol to emit.
+  /// \param Size - The size of the common symbol.
+  /// \param ByteAlignment - The alignment of the symbol if
+  /// non-zero. This must be a power of 2.
+  virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                                unsigned ByteAlignment) = 0;
+
+  /// \brief Emit a local common (.lcomm) symbol.
+  ///
+  /// \param Symbol - The common symbol to emit.
+  /// \param Size - The size of the common symbol.
+  /// \param ByteAlignment - The alignment of the common symbol in bytes.
+  virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                                     unsigned ByteAlignment);
+
+  /// \brief Emit the zerofill section and an optional symbol.
+  ///
+  /// \param Section - The zerofill section to create and or to put the symbol
+  /// \param Symbol - The zerofill symbol to emit, if non-NULL.
+  /// \param Size - The size of the zerofill symbol.
+  /// \param ByteAlignment - The alignment of the zerofill symbol if
+  /// non-zero. This must be a power of 2 on some targets.
+  virtual void EmitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
+                            uint64_t Size = 0, unsigned ByteAlignment = 0) = 0;
+
+  /// \brief Emit a thread local bss (.tbss) symbol.
+  ///
+  /// \param Section - The thread local common section.
+  /// \param Symbol - The thread local common symbol to emit.
+  /// \param Size - The size of the symbol.
+  /// \param ByteAlignment - The alignment of the thread local common symbol
+  /// if non-zero.  This must be a power of 2 on some targets.
+  virtual void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol,
+                              uint64_t Size, unsigned ByteAlignment = 0);
+
+  /// @}
+  /// \name Generating Data
+  /// @{
+
+  /// \brief Emit the bytes in \p Data into the output.
+  ///
+  /// This is used to implement assembler directives such as .byte, .ascii,
+  /// etc.
+  virtual void EmitBytes(StringRef Data);
+
+  /// Functionally identical to EmitBytes. When emitting textual assembly, this
+  /// method uses .byte directives instead of .ascii or .asciz for readability.
+  virtual void EmitBinaryData(StringRef Data);
+
+  /// \brief Emit the expression \p Value into the output as a native
+  /// integer of the given \p Size bytes.
+  ///
+  /// This is used to implement assembler directives such as .word, .quad,
+  /// etc.
+  ///
+  /// \param Value - The value to emit.
+  /// \param Size - The size of the integer (in bytes) to emit. This must
+  /// match a native machine width.
+  /// \param Loc - The location of the expression for error reporting.
+  virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+                             SMLoc Loc = SMLoc());
+
+  void EmitValue(const MCExpr *Value, unsigned Size, SMLoc Loc = SMLoc());
+
+  /// \brief Special case of EmitValue that avoids the client having
+  /// to pass in a MCExpr for constant integers.
+  virtual void EmitIntValue(uint64_t Value, unsigned Size);
+
+  virtual void EmitULEB128Value(const MCExpr *Value);
+
+  virtual void EmitSLEB128Value(const MCExpr *Value);
+
+  /// \brief Special case of EmitULEB128Value that avoids the client having to
+  /// pass in a MCExpr for constant integers.
+  void EmitULEB128IntValue(uint64_t Value);
+
+  /// \brief Special case of EmitSLEB128Value that avoids the client having to
+  /// pass in a MCExpr for constant integers.
+  void EmitSLEB128IntValue(int64_t Value);
+
+  /// \brief Special case of EmitValue that avoids the client having to pass in
+  /// a MCExpr for MCSymbols.
+  void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+                       bool IsSectionRelative = false);
+
+  /// \brief Emit the expression \p Value into the output as a dtprel
+  /// (64-bit DTP relative) value.
+  ///
+  /// This is used to implement assembler directives such as .dtpreldword on
+  /// targets that support them.
+  virtual void EmitDTPRel64Value(const MCExpr *Value);
+
+  /// \brief Emit the expression \p Value into the output as a dtprel
+  /// (32-bit DTP relative) value.
+  ///
+  /// This is used to implement assembler directives such as .dtprelword on
+  /// targets that support them.
+  virtual void EmitDTPRel32Value(const MCExpr *Value);
+
+  /// \brief Emit the expression \p Value into the output as a tprel
+  /// (64-bit TP relative) value.
+  ///
+  /// This is used to implement assembler directives such as .tpreldword on
+  /// targets that support them.
+  virtual void EmitTPRel64Value(const MCExpr *Value);
+
+  /// \brief Emit the expression \p Value into the output as a tprel
+  /// (32-bit TP relative) value.
+  ///
+  /// This is used to implement assembler directives such as .tprelword on
+  /// targets that support them.
+  virtual void EmitTPRel32Value(const MCExpr *Value);
+
+  /// \brief Emit the expression \p Value into the output as a gprel64 (64-bit
+  /// GP relative) value.
+  ///
+  /// This is used to implement assembler directives such as .gpdword on
+  /// targets that support them.
+  virtual void EmitGPRel64Value(const MCExpr *Value);
+
+  /// \brief Emit the expression \p Value into the output as a gprel32 (32-bit
+  /// GP relative) value.
+  ///
+  /// This is used to implement assembler directives such as .gprel32 on
+  /// targets that support them.
+  virtual void EmitGPRel32Value(const MCExpr *Value);
+
+  /// \brief Emit NumBytes bytes worth of the value specified by FillValue.
+  /// This implements directives such as '.space'.
+  void emitFill(uint64_t NumBytes, uint8_t FillValue);
+
+  /// \brief Emit \p Size bytes worth of the value specified by \p FillValue.
+  ///
+  /// This is used to implement assembler directives such as .space or .skip.
+  ///
+  /// \param NumBytes - The number of bytes to emit.
+  /// \param FillValue - The value to use when filling bytes.
+  /// \param Loc - The location of the expression for error reporting.
+  virtual void emitFill(const MCExpr &NumBytes, uint64_t FillValue,
+                        SMLoc Loc = SMLoc());
+
+  /// \brief Emit \p NumValues copies of \p Size bytes. Each \p Size bytes is
+  /// taken from the lowest order 4 bytes of \p Expr expression.
+  ///
+  /// This is used to implement assembler directives such as .fill.
+  ///
+  /// \param NumValues - The number of copies of \p Size bytes to emit.
+  /// \param Size - The size (in bytes) of each repeated value.
+  /// \param Expr - The expression from which \p Size bytes are used.
+  virtual void emitFill(const MCExpr &NumValues, int64_t Size, int64_t Expr,
+                        SMLoc Loc = SMLoc());
+
+  /// \brief Emit NumBytes worth of zeros.
+  /// This function properly handles data in virtual sections.
+  void EmitZeros(uint64_t NumBytes);
+
+  /// \brief Emit some number of copies of \p Value until the byte alignment \p
+  /// ByteAlignment is reached.
+  ///
+  /// If the number of bytes need to emit for the alignment is not a multiple
+  /// of \p ValueSize, then the contents of the emitted fill bytes is
+  /// undefined.
+  ///
+  /// This used to implement the .align assembler directive.
+  ///
+  /// \param ByteAlignment - The alignment to reach. This must be a power of
+  /// two on some targets.
+  /// \param Value - The value to use when filling bytes.
+  /// \param ValueSize - The size of the integer (in bytes) to emit for
+  /// \p Value. This must match a native machine width.
+  /// \param MaxBytesToEmit - The maximum numbers of bytes to emit, or 0. If
+  /// the alignment cannot be reached in this many bytes, no bytes are
+  /// emitted.
+  virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+                                    unsigned ValueSize = 1,
+                                    unsigned MaxBytesToEmit = 0);
+
+  /// \brief Emit nops until the byte alignment \p ByteAlignment is reached.
+  ///
+  /// This used to align code where the alignment bytes may be executed.  This
+  /// can emit different bytes for different sizes to optimize execution.
+  ///
+  /// \param ByteAlignment - The alignment to reach. This must be a power of
+  /// two on some targets.
+  /// \param MaxBytesToEmit - The maximum numbers of bytes to emit, or 0. If
+  /// the alignment cannot be reached in this many bytes, no bytes are
+  /// emitted.
+  virtual void EmitCodeAlignment(unsigned ByteAlignment,
+                                 unsigned MaxBytesToEmit = 0);
+
+  /// \brief Emit some number of copies of \p Value until the byte offset \p
+  /// Offset is reached.
+  ///
+  /// This is used to implement assembler directives such as .org.
+  ///
+  /// \param Offset - The offset to reach. This may be an expression, but the
+  /// expression must be associated with the current section.
+  /// \param Value - The value to use when filling bytes.
+  virtual void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
+                                 SMLoc Loc);
+
+  virtual void
+  EmitCodePaddingBasicBlockStart(const MCCodePaddingContext &Context) {}
+
+  virtual void
+  EmitCodePaddingBasicBlockEnd(const MCCodePaddingContext &Context) {}
+
+  /// @}
+
+  /// \brief Switch to a new logical file.  This is used to implement the '.file
+  /// "foo.c"' assembler directive.
+  virtual void EmitFileDirective(StringRef Filename);
+
+  /// \brief Emit the "identifiers" directive.  This implements the
+  /// '.ident "version foo"' assembler directive.
+  virtual void EmitIdent(StringRef IdentString) {}
+
+  /// \brief Associate a filename with a specified logical file number.  This
+  /// implements the DWARF2 '.file 4 "foo.c"' assembler directive.
+  unsigned EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+                                  StringRef Filename,
+                                  MD5::MD5Result *Checksum = nullptr,
+                                  Optional<StringRef> Source = None,
+                                  unsigned CUID = 0) {
+    return cantFail(
+        tryEmitDwarfFileDirective(FileNo, Directory, Filename, Checksum,
+                                  Source, CUID));
+  }
+
+  /// Associate a filename with a specified logical file number.
+  /// Also associate a directory, optional checksum, and optional source
+  /// text with the logical file.  This implements the DWARF2
+  /// '.file 4 "dir/foo.c"' assembler directive, and the DWARF5
+  /// '.file 4 "dir/foo.c" md5 "..." source "..."' assembler directive.
+  virtual Expected<unsigned> tryEmitDwarfFileDirective(
+      unsigned FileNo, StringRef Directory, StringRef Filename,
+      MD5::MD5Result *Checksum = nullptr, Optional<StringRef> Source = None,
+      unsigned CUID = 0);
+
+  /// Specify the "root" file of the compilation, using the ".file 0" extension.
+  virtual void emitDwarfFile0Directive(StringRef Directory, StringRef Filename,
+                                       MD5::MD5Result *Checksum,
+                                       Optional<StringRef> Source,
+                                       unsigned CUID = 0);
+
+  /// \brief This implements the DWARF2 '.loc fileno lineno ...' assembler
+  /// directive.
+  virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+                                     unsigned Column, unsigned Flags,
+                                     unsigned Isa, unsigned Discriminator,
+                                     StringRef FileName);
+
+  /// Associate a filename with a specified logical file number, and also
+  /// specify that file's checksum information.  This implements the '.cv_file 4
+  /// "foo.c"' assembler directive. Returns true on success.
+  virtual bool EmitCVFileDirective(unsigned FileNo, StringRef Filename,
+                                   ArrayRef<uint8_t> Checksum,
+                                   unsigned ChecksumKind);
+
+  /// \brief Introduces a function id for use with .cv_loc.
+  virtual bool EmitCVFuncIdDirective(unsigned FunctionId);
+
+  /// \brief Introduces an inline call site id for use with .cv_loc. Includes
+  /// extra information for inline line table generation.
+  virtual bool EmitCVInlineSiteIdDirective(unsigned FunctionId, unsigned IAFunc,
+                                           unsigned IAFile, unsigned IALine,
+                                           unsigned IACol, SMLoc Loc);
+
+  /// \brief This implements the CodeView '.cv_loc' assembler directive.
+  virtual void EmitCVLocDirective(unsigned FunctionId, unsigned FileNo,
+                                  unsigned Line, unsigned Column,
+                                  bool PrologueEnd, bool IsStmt,
+                                  StringRef FileName, SMLoc Loc);
+
+  /// \brief This implements the CodeView '.cv_linetable' assembler directive.
+  virtual void EmitCVLinetableDirective(unsigned FunctionId,
+                                        const MCSymbol *FnStart,
+                                        const MCSymbol *FnEnd);
+
+  /// \brief This implements the CodeView '.cv_inline_linetable' assembler
+  /// directive.
+  virtual void EmitCVInlineLinetableDirective(unsigned PrimaryFunctionId,
+                                              unsigned SourceFileId,
+                                              unsigned SourceLineNum,
+                                              const MCSymbol *FnStartSym,
+                                              const MCSymbol *FnEndSym);
+
+  /// \brief This implements the CodeView '.cv_def_range' assembler
+  /// directive.
+  virtual void EmitCVDefRangeDirective(
+      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
+      StringRef FixedSizePortion);
+
+  /// \brief This implements the CodeView '.cv_stringtable' assembler directive.
+  virtual void EmitCVStringTableDirective() {}
+
+  /// \brief This implements the CodeView '.cv_filechecksums' assembler directive.
+  virtual void EmitCVFileChecksumsDirective() {}
+
+  /// This implements the CodeView '.cv_filechecksumoffset' assembler
+  /// directive.
+  virtual void EmitCVFileChecksumOffsetDirective(unsigned FileNo) {}
+
+  /// This implements the CodeView '.cv_fpo_data' assembler directive.
+  virtual void EmitCVFPOData(const MCSymbol *ProcSym, SMLoc Loc = {}) {}
+
+  /// Emit the absolute difference between two symbols.
+  ///
+  /// \pre Offset of \c Hi is greater than the offset \c Lo.
+  virtual void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
+                                      unsigned Size);
+
+  /// Emit the absolute difference between two symbols encoded with ULEB128.
+  virtual void emitAbsoluteSymbolDiffAsULEB128(const MCSymbol *Hi,
+                                               const MCSymbol *Lo);
+
+  virtual MCSymbol *getDwarfLineTableSymbol(unsigned CUID);
+  virtual void EmitCFISections(bool EH, bool Debug);
+  void EmitCFIStartProc(bool IsSimple);
+  void EmitCFIEndProc();
+  virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
+  virtual void EmitCFIDefCfaOffset(int64_t Offset);
+  virtual void EmitCFIDefCfaRegister(int64_t Register);
+  virtual void EmitCFIOffset(int64_t Register, int64_t Offset);
+  virtual void EmitCFIPersonality(const MCSymbol *Sym, unsigned Encoding);
+  virtual void EmitCFILsda(const MCSymbol *Sym, unsigned Encoding);
+  virtual void EmitCFIRememberState();
+  virtual void EmitCFIRestoreState();
+  virtual void EmitCFISameValue(int64_t Register);
+  virtual void EmitCFIRestore(int64_t Register);
+  virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset);
+  virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment);
+  virtual void EmitCFIEscape(StringRef Values);
+  virtual void EmitCFIReturnColumn(int64_t Register);
+  virtual void EmitCFIGnuArgsSize(int64_t Size);
+  virtual void EmitCFISignalFrame();
+  virtual void EmitCFIUndefined(int64_t Register);
+  virtual void EmitCFIRegister(int64_t Register1, int64_t Register2);
+  virtual void EmitCFIWindowSave();
+
+  virtual void EmitWinCFIStartProc(const MCSymbol *Symbol, SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIEndProc(SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIStartChained(SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIEndChained(SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIPushReg(unsigned Register, SMLoc Loc = SMLoc());
+  virtual void EmitWinCFISetFrame(unsigned Register, unsigned Offset,
+                                  SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIAllocStack(unsigned Size, SMLoc Loc = SMLoc());
+  virtual void EmitWinCFISaveReg(unsigned Register, unsigned Offset,
+                                 SMLoc Loc = SMLoc());
+  virtual void EmitWinCFISaveXMM(unsigned Register, unsigned Offset,
+                                 SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIPushFrame(bool Code, SMLoc Loc = SMLoc());
+  virtual void EmitWinCFIEndProlog(SMLoc Loc = SMLoc());
+  virtual void EmitWinEHHandler(const MCSymbol *Sym, bool Unwind, bool Except,
+                                SMLoc Loc = SMLoc());
+  virtual void EmitWinEHHandlerData(SMLoc Loc = SMLoc());
+
+  /// Get the .pdata section used for the given section. Typically the given
+  /// section is either the main .text section or some other COMDAT .text
+  /// section, but it may be any section containing code.
+  MCSection *getAssociatedPDataSection(const MCSection *TextSec);
+
+  /// Get the .xdata section used for the given section.
+  MCSection *getAssociatedXDataSection(const MCSection *TextSec);
+
+  virtual void EmitSyntaxDirective();
+
+  /// \brief Emit a .reloc directive.
+  /// Returns true if the relocation could not be emitted because Name is not
+  /// known.
+  virtual bool EmitRelocDirective(const MCExpr &Offset, StringRef Name,
+                                  const MCExpr *Expr, SMLoc Loc) {
+    return true;
+  }
+
+  /// \brief Emit the given \p Instruction into the current section.
+  /// PrintSchedInfo == true then schedul comment should be added to output
+  virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+                               bool PrintSchedInfo = false);
+
+  /// \brief Set the bundle alignment mode from now on in the section.
+  /// The argument is the power of 2 to which the alignment is set. The
+  /// value 0 means turn the bundle alignment off.
+  virtual void EmitBundleAlignMode(unsigned AlignPow2);
+
+  /// \brief The following instructions are a bundle-locked group.
+  ///
+  /// \param AlignToEnd - If true, the bundle-locked group will be aligned to
+  ///                     the end of a bundle.
+  virtual void EmitBundleLock(bool AlignToEnd);
+
+  /// \brief Ends a bundle-locked group.
+  virtual void EmitBundleUnlock();
+
+  /// \brief If this file is backed by a assembly streamer, this dumps the
+  /// specified string in the output .s file.  This capability is indicated by
+  /// the hasRawTextSupport() predicate.  By default this aborts.
+  void EmitRawText(const Twine &String);
+
+  /// \brief Streamer specific finalization.
+  virtual void FinishImpl();
+  /// \brief Finish emission of machine code.
+  void Finish();
+
+  virtual bool mayHaveInstructions(MCSection &Sec) const { return true; }
+};
+
+/// Create a dummy machine code streamer, which does nothing. This is useful for
+/// timing the assembler front end.
+MCStreamer *createNullStreamer(MCContext &Ctx);
+
+/// Create a machine code streamer which will print out assembly for the native
+/// target, suitable for compiling with a native assembler.
+///
+/// \param InstPrint - If given, the instruction printer to use. If not given
+/// the MCInst representation will be printed.  This method takes ownership of
+/// InstPrint.
+///
+/// \param CE - If given, a code emitter to use to show the instruction
+/// encoding inline with the assembly. This method takes ownership of \p CE.
+///
+/// \param TAB - If given, a target asm backend to use to show the fixup
+/// information in conjunction with encoding information. This method takes
+/// ownership of \p TAB.
+///
+/// \param ShowInst - Whether to show the MCInst representation inline with
+/// the assembly.
+MCStreamer *createAsmStreamer(MCContext &Ctx,
+                              std::unique_ptr<formatted_raw_ostream> OS,
+                              bool isVerboseAsm, bool useDwarfDirectory,
+                              MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+                              MCAsmBackend *TAB, bool ShowInst);
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSTREAMER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSubtargetInfo.h b/linux-x64/clang/include/llvm/MC/MCSubtargetInfo.h
new file mode 100644
index 0000000..0a2b247
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSubtargetInfo.h
@@ -0,0 +1,181 @@
+//===- llvm/MC/MCSubtargetInfo.h - Subtarget Information --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the subtarget options of a Target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSUBTARGETINFO_H
+#define LLVM_MC_MCSUBTARGETINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+class MachineInstr;
+class MCInst;
+
+//===----------------------------------------------------------------------===//
+///
+/// Generic base class for all target subtargets.
+///
+class MCSubtargetInfo {
+  Triple TargetTriple;
+  std::string CPU; // CPU being targeted.
+  ArrayRef<SubtargetFeatureKV> ProcFeatures;  // Processor feature list
+  ArrayRef<SubtargetFeatureKV> ProcDesc;  // Processor descriptions
+
+  // Scheduler machine model
+  const SubtargetInfoKV *ProcSchedModels;
+  const MCWriteProcResEntry *WriteProcResTable;
+  const MCWriteLatencyEntry *WriteLatencyTable;
+  const MCReadAdvanceEntry *ReadAdvanceTable;
+  const MCSchedModel *CPUSchedModel;
+
+  const InstrStage *Stages;            // Instruction itinerary stages
+  const unsigned *OperandCycles;       // Itinerary operand cycles
+  const unsigned *ForwardingPaths;
+  FeatureBitset FeatureBits;           // Feature bits for current CPU + FS
+
+public:
+  MCSubtargetInfo(const MCSubtargetInfo &) = default;
+  MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
+                  ArrayRef<SubtargetFeatureKV> PF,
+                  ArrayRef<SubtargetFeatureKV> PD,
+                  const SubtargetInfoKV *ProcSched,
+                  const MCWriteProcResEntry *WPR, const MCWriteLatencyEntry *WL,
+                  const MCReadAdvanceEntry *RA, const InstrStage *IS,
+                  const unsigned *OC, const unsigned *FP);
+  MCSubtargetInfo() = delete;
+  MCSubtargetInfo &operator=(const MCSubtargetInfo &) = delete;
+  MCSubtargetInfo &operator=(MCSubtargetInfo &&) = delete;
+  virtual ~MCSubtargetInfo() = default;
+
+  const Triple &getTargetTriple() const { return TargetTriple; }
+  StringRef getCPU() const { return CPU; }
+
+  const FeatureBitset& getFeatureBits() const { return FeatureBits; }
+  void setFeatureBits(const FeatureBitset &FeatureBits_) {
+    FeatureBits = FeatureBits_;
+  }
+
+  bool hasFeature(unsigned Feature) const {
+    return FeatureBits[Feature];
+  }
+
+protected:
+  /// Initialize the scheduling model and feature bits.
+  ///
+  /// FIXME: Find a way to stick this in the constructor, since it should only
+  /// be called during initialization.
+  void InitMCProcessorInfo(StringRef CPU, StringRef FS);
+
+public:
+  /// Set the features to the default for the given CPU with an appended feature
+  /// string.
+  void setDefaultFeatures(StringRef CPU, StringRef FS);
+
+  /// Toggle a feature and return the re-computed feature bits.
+  /// This version does not change the implied bits.
+  FeatureBitset ToggleFeature(uint64_t FB);
+
+  /// Toggle a feature and return the re-computed feature bits.
+  /// This version does not change the implied bits.
+  FeatureBitset ToggleFeature(const FeatureBitset& FB);
+
+  /// Toggle a set of features and return the re-computed feature bits.
+  /// This version will also change all implied bits.
+  FeatureBitset ToggleFeature(StringRef FS);
+
+  /// Apply a feature flag and return the re-computed feature bits, including
+  /// all feature bits implied by the flag.
+  FeatureBitset ApplyFeatureFlag(StringRef FS);
+
+  /// Check whether the subtarget features are enabled/disabled as per
+  /// the provided string, ignoring all other features.
+  bool checkFeatures(StringRef FS) const;
+
+  /// Get the machine model of a CPU.
+  const MCSchedModel &getSchedModelForCPU(StringRef CPU) const;
+
+  /// Get the machine model for this subtarget's CPU.
+  const MCSchedModel &getSchedModel() const { return *CPUSchedModel; }
+
+  /// Return an iterator at the first process resource consumed by the given
+  /// scheduling class.
+  const MCWriteProcResEntry *getWriteProcResBegin(
+    const MCSchedClassDesc *SC) const {
+    return &WriteProcResTable[SC->WriteProcResIdx];
+  }
+  const MCWriteProcResEntry *getWriteProcResEnd(
+    const MCSchedClassDesc *SC) const {
+    return getWriteProcResBegin(SC) + SC->NumWriteProcResEntries;
+  }
+
+  const MCWriteLatencyEntry *getWriteLatencyEntry(const MCSchedClassDesc *SC,
+                                                  unsigned DefIdx) const {
+    assert(DefIdx < SC->NumWriteLatencyEntries &&
+           "MachineModel does not specify a WriteResource for DefIdx");
+
+    return &WriteLatencyTable[SC->WriteLatencyIdx + DefIdx];
+  }
+
+  int getReadAdvanceCycles(const MCSchedClassDesc *SC, unsigned UseIdx,
+                           unsigned WriteResID) const {
+    // TODO: The number of read advance entries in a class can be significant
+    // (~50). Consider compressing the WriteID into a dense ID of those that are
+    // used by ReadAdvance and representing them as a bitset.
+    for (const MCReadAdvanceEntry *I = &ReadAdvanceTable[SC->ReadAdvanceIdx],
+           *E = I + SC->NumReadAdvanceEntries; I != E; ++I) {
+      if (I->UseIdx < UseIdx)
+        continue;
+      if (I->UseIdx > UseIdx)
+        break;
+      // Find the first WriteResIdx match, which has the highest cycle count.
+      if (!I->WriteResourceID || I->WriteResourceID == WriteResID) {
+        return I->Cycles;
+      }
+    }
+    return 0;
+  }
+
+  /// Get scheduling itinerary of a CPU.
+  InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;
+
+  /// Initialize an InstrItineraryData instance.
+  void initInstrItins(InstrItineraryData &InstrItins) const;
+
+  /// Check whether the CPU string is valid.
+  bool isCPUStringValid(StringRef CPU) const {
+    auto Found = std::lower_bound(ProcDesc.begin(), ProcDesc.end(), CPU);
+    return Found != ProcDesc.end() && StringRef(Found->Key) == CPU;
+  }
+
+  /// Returns string representation of scheduler comment
+  virtual std::string getSchedInfoStr(const MachineInstr &MI) const {
+    return {};
+  }
+
+  virtual std::string getSchedInfoStr(MCInst const &MCI) const {
+    return {};
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSUBTARGETINFO_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSymbol.h b/linux-x64/clang/include/llvm/MC/MCSymbol.h
new file mode 100644
index 0000000..cc8fc02
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSymbol.h
@@ -0,0 +1,427 @@
+//===- MCSymbol.h - Machine Code Symbols ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCSymbol class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSYMBOL_H
+#define LLVM_MC_MCSYMBOL_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+class MCAsmInfo;
+class MCContext;
+class MCExpr;
+class MCSection;
+class raw_ostream;
+
+/// MCSymbol - Instances of this class represent a symbol name in the MC file,
+/// and MCSymbols are created and uniqued by the MCContext class.  MCSymbols
+/// should only be constructed with valid names for the object file.
+///
+/// If the symbol is defined/emitted into the current translation unit, the
+/// Section member is set to indicate what section it lives in.  Otherwise, if
+/// it is a reference to an external entity, it has a null section.
+class MCSymbol {
+protected:
+  /// The kind of the symbol.  If it is any value other than unset then this
+  /// class is actually one of the appropriate subclasses of MCSymbol.
+  enum SymbolKind {
+    SymbolKindUnset,
+    SymbolKindCOFF,
+    SymbolKindELF,
+    SymbolKindMachO,
+    SymbolKindWasm,
+  };
+
+  /// A symbol can contain an Offset, or Value, or be Common, but never more
+  /// than one of these.
+  enum Contents : uint8_t {
+    SymContentsUnset,
+    SymContentsOffset,
+    SymContentsVariable,
+    SymContentsCommon,
+  };
+
+  // Special sentinal value for the absolute pseudo fragment.
+  static MCFragment *AbsolutePseudoFragment;
+
+  /// If a symbol has a Fragment, the section is implied, so we only need
+  /// one pointer.
+  /// The special AbsolutePseudoFragment value is for absolute symbols.
+  /// If this is a variable symbol, this caches the variable value's fragment.
+  /// FIXME: We might be able to simplify this by having the asm streamer create
+  /// dummy fragments.
+  /// If this is a section, then it gives the symbol is defined in. This is null
+  /// for undefined symbols.
+  ///
+  /// If this is a fragment, then it gives the fragment this symbol's value is
+  /// relative to, if any.
+  ///
+  /// For the 'HasName' integer, this is true if this symbol is named.
+  /// A named symbol will have a pointer to the name allocated in the bytes
+  /// immediately prior to the MCSymbol.
+  mutable PointerIntPair<MCFragment *, 1> FragmentAndHasName;
+
+  /// IsTemporary - True if this is an assembler temporary label, which
+  /// typically does not survive in the .o file's symbol table.  Usually
+  /// "Lfoo" or ".foo".
+  unsigned IsTemporary : 1;
+
+  /// \brief True if this symbol can be redefined.
+  unsigned IsRedefinable : 1;
+
+  /// IsUsed - True if this symbol has been used.
+  mutable unsigned IsUsed : 1;
+
+  mutable unsigned IsRegistered : 1;
+
+  /// This symbol is visible outside this translation unit.
+  mutable unsigned IsExternal : 1;
+
+  /// This symbol is private extern.
+  mutable unsigned IsPrivateExtern : 1;
+
+  /// LLVM RTTI discriminator. This is actually a SymbolKind enumerator, but is
+  /// unsigned to avoid sign extension and achieve better bitpacking with MSVC.
+  unsigned Kind : 3;
+
+  /// True if we have created a relocation that uses this symbol.
+  mutable unsigned IsUsedInReloc : 1;
+
+  /// This is actually a Contents enumerator, but is unsigned to avoid sign
+  /// extension and achieve better bitpacking with MSVC.
+  unsigned SymbolContents : 2;
+
+  /// The alignment of the symbol, if it is 'common', or -1.
+  ///
+  /// The alignment is stored as log2(align) + 1.  This allows all values from
+  /// 0 to 2^31 to be stored which is every power of 2 representable by an
+  /// unsigned.
+  enum : unsigned { NumCommonAlignmentBits = 5 };
+  unsigned CommonAlignLog2 : NumCommonAlignmentBits;
+
+  /// The Flags field is used by object file implementations to store
+  /// additional per symbol information which is not easily classified.
+  enum : unsigned { NumFlagsBits = 16 };
+  mutable uint32_t Flags : NumFlagsBits;
+
+  /// Index field, for use by the object file implementation.
+  mutable uint32_t Index = 0;
+
+  union {
+    /// The offset to apply to the fragment address to form this symbol's value.
+    uint64_t Offset;
+
+    /// The size of the symbol, if it is 'common'.
+    uint64_t CommonSize;
+
+    /// If non-null, the value for a variable symbol.
+    const MCExpr *Value;
+  };
+
+  // MCContext creates and uniques these.
+  friend class MCExpr;
+  friend class MCContext;
+
+  /// \brief The name for a symbol.
+  /// MCSymbol contains a uint64_t so is probably aligned to 8.  On a 32-bit
+  /// system, the name is a pointer so isn't going to satisfy the 8 byte
+  /// alignment of uint64_t.  Account for that here.
+  using NameEntryStorageTy = union {
+    const StringMapEntry<bool> *NameEntry;
+    uint64_t AlignmentPadding;
+  };
+
+  MCSymbol(SymbolKind Kind, const StringMapEntry<bool> *Name, bool isTemporary)
+      : IsTemporary(isTemporary), IsRedefinable(false), IsUsed(false),
+        IsRegistered(false), IsExternal(false), IsPrivateExtern(false),
+        Kind(Kind), IsUsedInReloc(false), SymbolContents(SymContentsUnset),
+        CommonAlignLog2(0), Flags(0) {
+    Offset = 0;
+    FragmentAndHasName.setInt(!!Name);
+    if (Name)
+      getNameEntryPtr() = Name;
+  }
+
+  // Provide custom new/delete as we will only allocate space for a name
+  // if we need one.
+  void *operator new(size_t s, const StringMapEntry<bool> *Name,
+                     MCContext &Ctx);
+
+private:
+  void operator delete(void *);
+  /// \brief Placement delete - required by std, but never called.
+  void operator delete(void*, unsigned) {
+    llvm_unreachable("Constructor throws?");
+  }
+  /// \brief Placement delete - required by std, but never called.
+  void operator delete(void*, unsigned, bool) {
+    llvm_unreachable("Constructor throws?");
+  }
+
+  MCSection *getSectionPtr() const {
+    if (MCFragment *F = getFragment()) {
+      assert(F != AbsolutePseudoFragment);
+      return F->getParent();
+    }
+    return nullptr;
+  }
+
+  /// \brief Get a reference to the name field.  Requires that we have a name
+  const StringMapEntry<bool> *&getNameEntryPtr() {
+    assert(FragmentAndHasName.getInt() && "Name is required");
+    NameEntryStorageTy *Name = reinterpret_cast<NameEntryStorageTy *>(this);
+    return (*(Name - 1)).NameEntry;
+  }
+  const StringMapEntry<bool> *&getNameEntryPtr() const {
+    return const_cast<MCSymbol*>(this)->getNameEntryPtr();
+  }
+
+public:
+  MCSymbol(const MCSymbol &) = delete;
+  MCSymbol &operator=(const MCSymbol &) = delete;
+
+  /// getName - Get the symbol name.
+  StringRef getName() const {
+    if (!FragmentAndHasName.getInt())
+      return StringRef();
+
+    return getNameEntryPtr()->first();
+  }
+
+  bool isRegistered() const { return IsRegistered; }
+  void setIsRegistered(bool Value) const { IsRegistered = Value; }
+
+  void setUsedInReloc() const { IsUsedInReloc = true; }
+  bool isUsedInReloc() const { return IsUsedInReloc; }
+
+  /// \name Accessors
+  /// @{
+
+  /// isTemporary - Check if this is an assembler temporary symbol.
+  bool isTemporary() const { return IsTemporary; }
+
+  /// isUsed - Check if this is used.
+  bool isUsed() const { return IsUsed; }
+
+  /// \brief Check if this symbol is redefinable.
+  bool isRedefinable() const { return IsRedefinable; }
+  /// \brief Mark this symbol as redefinable.
+  void setRedefinable(bool Value) { IsRedefinable = Value; }
+  /// \brief Prepare this symbol to be redefined.
+  void redefineIfPossible() {
+    if (IsRedefinable) {
+      if (SymbolContents == SymContentsVariable) {
+        Value = nullptr;
+        SymbolContents = SymContentsUnset;
+      }
+      setUndefined();
+      IsRedefinable = false;
+    }
+  }
+
+  /// @}
+  /// \name Associated Sections
+  /// @{
+
+  /// isDefined - Check if this symbol is defined (i.e., it has an address).
+  ///
+  /// Defined symbols are either absolute or in some section.
+  bool isDefined() const { return !isUndefined(); }
+
+  /// isInSection - Check if this symbol is defined in some section (i.e., it
+  /// is defined but not absolute).
+  bool isInSection() const {
+    return isDefined() && !isAbsolute();
+  }
+
+  /// isUndefined - Check if this symbol undefined (i.e., implicitly defined).
+  bool isUndefined(bool SetUsed = true) const {
+    return getFragment(SetUsed) == nullptr;
+  }
+
+  /// isAbsolute - Check if this is an absolute symbol.
+  bool isAbsolute() const {
+    return getFragment() == AbsolutePseudoFragment;
+  }
+
+  /// Get the section associated with a defined, non-absolute symbol.
+  MCSection &getSection() const {
+    assert(isInSection() && "Invalid accessor!");
+    return *getSectionPtr();
+  }
+
+  /// Mark the symbol as defined in the fragment \p F.
+  void setFragment(MCFragment *F) const {
+    assert(!isVariable() && "Cannot set fragment of variable");
+    FragmentAndHasName.setPointer(F);
+  }
+
+  /// Mark the symbol as undefined.
+  void setUndefined() { FragmentAndHasName.setPointer(nullptr); }
+
+  bool isELF() const { return Kind == SymbolKindELF; }
+
+  bool isCOFF() const { return Kind == SymbolKindCOFF; }
+
+  bool isMachO() const { return Kind == SymbolKindMachO; }
+
+  bool isWasm() const { return Kind == SymbolKindWasm; }
+
+  /// @}
+  /// \name Variable Symbols
+  /// @{
+
+  /// isVariable - Check if this is a variable symbol.
+  bool isVariable() const {
+    return SymbolContents == SymContentsVariable;
+  }
+
+  /// getVariableValue - Get the value for variable symbols.
+  const MCExpr *getVariableValue(bool SetUsed = true) const {
+    assert(isVariable() && "Invalid accessor!");
+    IsUsed |= SetUsed;
+    return Value;
+  }
+
+  void setVariableValue(const MCExpr *Value);
+
+  /// @}
+
+  /// Get the (implementation defined) index.
+  uint32_t getIndex() const {
+    return Index;
+  }
+
+  /// Set the (implementation defined) index.
+  void setIndex(uint32_t Value) const {
+    Index = Value;
+  }
+
+  uint64_t getOffset() const {
+    assert((SymbolContents == SymContentsUnset ||
+            SymbolContents == SymContentsOffset) &&
+           "Cannot get offset for a common/variable symbol");
+    return Offset;
+  }
+  void setOffset(uint64_t Value) {
+    assert((SymbolContents == SymContentsUnset ||
+            SymbolContents == SymContentsOffset) &&
+           "Cannot set offset for a common/variable symbol");
+    Offset = Value;
+    SymbolContents = SymContentsOffset;
+  }
+
+  /// Return the size of a 'common' symbol.
+  uint64_t getCommonSize() const {
+    assert(isCommon() && "Not a 'common' symbol!");
+    return CommonSize;
+  }
+
+  /// Mark this symbol as being 'common'.
+  ///
+  /// \param Size - The size of the symbol.
+  /// \param Align - The alignment of the symbol.
+  void setCommon(uint64_t Size, unsigned Align) {
+    assert(getOffset() == 0);
+    CommonSize = Size;
+    SymbolContents = SymContentsCommon;
+
+    assert((!Align || isPowerOf2_32(Align)) &&
+           "Alignment must be a power of 2");
+    unsigned Log2Align = Log2_32(Align) + 1;
+    assert(Log2Align < (1U << NumCommonAlignmentBits) &&
+           "Out of range alignment");
+    CommonAlignLog2 = Log2Align;
+  }
+
+  ///  Return the alignment of a 'common' symbol.
+  unsigned getCommonAlignment() const {
+    assert(isCommon() && "Not a 'common' symbol!");
+    return CommonAlignLog2 ? (1U << (CommonAlignLog2 - 1)) : 0;
+  }
+
+  /// Declare this symbol as being 'common'.
+  ///
+  /// \param Size - The size of the symbol.
+  /// \param Align - The alignment of the symbol.
+  /// \return True if symbol was already declared as a different type
+  bool declareCommon(uint64_t Size, unsigned Align) {
+    assert(isCommon() || getOffset() == 0);
+    if(isCommon()) {
+      if(CommonSize != Size || getCommonAlignment() != Align)
+       return true;
+    } else
+      setCommon(Size, Align);
+    return false;
+  }
+
+  /// Is this a 'common' symbol.
+  bool isCommon() const {
+    return SymbolContents == SymContentsCommon;
+  }
+
+  MCFragment *getFragment(bool SetUsed = true) const {
+    MCFragment *Fragment = FragmentAndHasName.getPointer();
+    if (Fragment || !isVariable())
+      return Fragment;
+    Fragment = getVariableValue(SetUsed)->findAssociatedFragment();
+    FragmentAndHasName.setPointer(Fragment);
+    return Fragment;
+  }
+
+  bool isExternal() const { return IsExternal; }
+  void setExternal(bool Value) const { IsExternal = Value; }
+
+  bool isPrivateExtern() const { return IsPrivateExtern; }
+  void setPrivateExtern(bool Value) { IsPrivateExtern = Value; }
+
+  /// print - Print the value to the stream \p OS.
+  void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
+
+  /// dump - Print the value to stderr.
+  void dump() const;
+
+protected:
+  /// Get the (implementation defined) symbol flags.
+  uint32_t getFlags() const { return Flags; }
+
+  /// Set the (implementation defined) symbol flags.
+  void setFlags(uint32_t Value) const {
+    assert(Value < (1U << NumFlagsBits) && "Out of range flags");
+    Flags = Value;
+  }
+
+  /// Modify the flags via a mask
+  void modifyFlags(uint32_t Value, uint32_t Mask) const {
+    assert(Value < (1U << NumFlagsBits) && "Out of range flags");
+    Flags = (Flags & ~Mask) | Value;
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MCSymbol &Sym) {
+  Sym.print(OS, nullptr);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSymbolCOFF.h b/linux-x64/clang/include/llvm/MC/MCSymbolCOFF.h
new file mode 100644
index 0000000..7918c35
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSymbolCOFF.h
@@ -0,0 +1,67 @@
+//===- MCSymbolCOFF.h -  ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSYMBOLCOFF_H
+#define LLVM_MC_MCSYMBOLCOFF_H
+
+#include "llvm/MC/MCSymbol.h"
+#include <cstdint>
+
+namespace llvm {
+
+class MCSymbolCOFF : public MCSymbol {
+  /// This corresponds to the e_type field of the COFF symbol.
+  mutable uint16_t Type = 0;
+
+  enum SymbolFlags : uint16_t {
+    SF_ClassMask = 0x00FF,
+    SF_ClassShift = 0,
+
+    SF_WeakExternal = 0x0100,
+    SF_SafeSEH = 0x0200,
+  };
+
+public:
+  MCSymbolCOFF(const StringMapEntry<bool> *Name, bool isTemporary)
+      : MCSymbol(SymbolKindCOFF, Name, isTemporary) {}
+
+  uint16_t getType() const {
+    return Type;
+  }
+  void setType(uint16_t Ty) const {
+    Type = Ty;
+  }
+
+  uint16_t getClass() const {
+    return (getFlags() & SF_ClassMask) >> SF_ClassShift;
+  }
+  void setClass(uint16_t StorageClass) const {
+    modifyFlags(StorageClass << SF_ClassShift, SF_ClassMask);
+  }
+
+  bool isWeakExternal() const {
+    return getFlags() & SF_WeakExternal;
+  }
+  void setIsWeakExternal() const {
+    modifyFlags(SF_WeakExternal, SF_WeakExternal);
+  }
+
+  bool isSafeSEH() const {
+    return getFlags() & SF_SafeSEH;
+  }
+  void setIsSafeSEH() const {
+    modifyFlags(SF_SafeSEH, SF_SafeSEH);
+  }
+
+  static bool classof(const MCSymbol *S) { return S->isCOFF(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSYMBOLCOFF_H
diff --git a/linux-x64/clang/include/llvm/MC/MCSymbolELF.h b/linux-x64/clang/include/llvm/MC/MCSymbolELF.h
new file mode 100644
index 0000000..bbcd22e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSymbolELF.h
@@ -0,0 +1,54 @@
+//===- MCSymbolELF.h -  -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCSYMBOLELF_H
+#define LLVM_MC_MCSYMBOLELF_H
+
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class MCSymbolELF : public MCSymbol {
+  /// An expression describing how to calculate the size of a symbol. If a
+  /// symbol has no size this field will be NULL.
+  const MCExpr *SymbolSize = nullptr;
+
+public:
+  MCSymbolELF(const StringMapEntry<bool> *Name, bool isTemporary)
+      : MCSymbol(SymbolKindELF, Name, isTemporary) {}
+  void setSize(const MCExpr *SS) { SymbolSize = SS; }
+
+  const MCExpr *getSize() const { return SymbolSize; }
+
+  void setVisibility(unsigned Visibility);
+  unsigned getVisibility() const;
+
+  void setOther(unsigned Other);
+  unsigned getOther() const;
+
+  void setType(unsigned Type) const;
+  unsigned getType() const;
+
+  void setBinding(unsigned Binding) const;
+  unsigned getBinding() const;
+
+  bool isBindingSet() const;
+
+  void setIsWeakrefUsedInReloc() const;
+  bool isWeakrefUsedInReloc() const;
+
+  void setIsSignature() const;
+  bool isSignature() const;
+
+  static bool classof(const MCSymbol *S) { return S->isELF(); }
+
+private:
+  void setIsBindingSet() const;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCSymbolMachO.h b/linux-x64/clang/include/llvm/MC/MCSymbolMachO.h
new file mode 100644
index 0000000..25220e4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSymbolMachO.h
@@ -0,0 +1,136 @@
+//===- MCSymbolMachO.h -  ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCSYMBOLMACHO_H
+#define LLVM_MC_MCSYMBOLMACHO_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class MCSymbolMachO : public MCSymbol {
+  /// \brief We store the value for the 'desc' symbol field in the
+  /// lowest 16 bits of the implementation defined flags.
+  enum MachOSymbolFlags : uint16_t { // See <mach-o/nlist.h>.
+    SF_DescFlagsMask                        = 0xFFFF,
+
+    // Reference type flags.
+    SF_ReferenceTypeMask                    = 0x0007,
+    SF_ReferenceTypeUndefinedNonLazy        = 0x0000,
+    SF_ReferenceTypeUndefinedLazy           = 0x0001,
+    SF_ReferenceTypeDefined                 = 0x0002,
+    SF_ReferenceTypePrivateDefined          = 0x0003,
+    SF_ReferenceTypePrivateUndefinedNonLazy = 0x0004,
+    SF_ReferenceTypePrivateUndefinedLazy    = 0x0005,
+
+    // Other 'desc' flags.
+    SF_ThumbFunc                            = 0x0008,
+    SF_NoDeadStrip                          = 0x0020,
+    SF_WeakReference                        = 0x0040,
+    SF_WeakDefinition                       = 0x0080,
+    SF_SymbolResolver                       = 0x0100,
+    SF_AltEntry                             = 0x0200,
+
+    // Common alignment
+    SF_CommonAlignmentMask                  = 0xF0FF,
+    SF_CommonAlignmentShift                 = 8
+  };
+
+public:
+  MCSymbolMachO(const StringMapEntry<bool> *Name, bool isTemporary)
+      : MCSymbol(SymbolKindMachO, Name, isTemporary) {}
+
+  // Reference type methods.
+
+  void clearReferenceType() const {
+    modifyFlags(0, SF_ReferenceTypeMask);
+  }
+
+  void setReferenceTypeUndefinedLazy(bool Value) const {
+    modifyFlags(Value ? SF_ReferenceTypeUndefinedLazy : 0,
+                SF_ReferenceTypeUndefinedLazy);
+  }
+
+  // Other 'desc' methods.
+
+  void setThumbFunc() const {
+    modifyFlags(SF_ThumbFunc, SF_ThumbFunc);
+  }
+
+  bool isNoDeadStrip() const {
+    return getFlags() & SF_NoDeadStrip;
+  }
+  void setNoDeadStrip() const {
+    modifyFlags(SF_NoDeadStrip, SF_NoDeadStrip);
+  }
+
+  bool isWeakReference() const {
+    return getFlags() & SF_WeakReference;
+  }
+  void setWeakReference() const {
+    modifyFlags(SF_WeakReference, SF_WeakReference);
+  }
+
+  bool isWeakDefinition() const {
+    return getFlags() & SF_WeakDefinition;
+  }
+  void setWeakDefinition() const {
+    modifyFlags(SF_WeakDefinition, SF_WeakDefinition);
+  }
+
+  bool isSymbolResolver() const {
+    return getFlags() & SF_SymbolResolver;
+  }
+  void setSymbolResolver() const {
+    modifyFlags(SF_SymbolResolver, SF_SymbolResolver);
+  }
+
+  void setAltEntry() const {
+    modifyFlags(SF_AltEntry, SF_AltEntry);
+  }
+
+  bool isAltEntry() const {
+    return getFlags() & SF_AltEntry;
+  }
+
+  void setDesc(unsigned Value) const {
+    assert(Value == (Value & SF_DescFlagsMask) &&
+           "Invalid .desc value!");
+    setFlags(Value & SF_DescFlagsMask);
+  }
+
+  /// \brief Get the encoded value of the flags as they will be emitted in to
+  /// the MachO binary
+  uint16_t getEncodedFlags(bool EncodeAsAltEntry) const {
+    uint16_t Flags = getFlags();
+
+    // Common alignment is packed into the 'desc' bits.
+    if (isCommon()) {
+      if (unsigned Align = getCommonAlignment()) {
+        unsigned Log2Size = Log2_32(Align);
+        assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
+        if (Log2Size > 15)
+          report_fatal_error("invalid 'common' alignment '" +
+                             Twine(Align) + "' for '" + getName() + "'",
+                             false);
+        Flags = (Flags & SF_CommonAlignmentMask) |
+                (Log2Size << SF_CommonAlignmentShift);
+      }
+    }
+
+    if (EncodeAsAltEntry)
+      Flags |= SF_AltEntry;
+
+    return Flags;
+  }
+
+  static bool classof(const MCSymbol *S) { return S->isMachO(); }
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCSymbolWasm.h b/linux-x64/clang/include/llvm/MC/MCSymbolWasm.h
new file mode 100644
index 0000000..10eadb0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCSymbolWasm.h
@@ -0,0 +1,96 @@
+//===- MCSymbolWasm.h -  ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCSYMBOLWASM_H
+#define LLVM_MC_MCSYMBOLWASM_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+
+class MCSymbolWasm : public MCSymbol {
+  wasm::WasmSymbolType Type = wasm::WASM_SYMBOL_TYPE_DATA;
+  bool IsWeak = false;
+  bool IsHidden = false;
+  bool IsComdat = false;
+  std::string ModuleName;
+  SmallVector<wasm::ValType, 1> Returns;
+  SmallVector<wasm::ValType, 4> Params;
+  wasm::WasmGlobalType GlobalType;
+  bool ParamsSet = false;
+  bool ReturnsSet = false;
+  bool GlobalTypeSet = false;
+
+  /// An expression describing how to calculate the size of a symbol. If a
+  /// symbol has no size this field will be NULL.
+  const MCExpr *SymbolSize = nullptr;
+
+public:
+  // Use a module name of "env" for now, for compatibility with existing tools.
+  // This is temporary, and may change, as the ABI is not yet stable.
+  MCSymbolWasm(const StringMapEntry<bool> *Name, bool isTemporary)
+      : MCSymbol(SymbolKindWasm, Name, isTemporary),
+        ModuleName("env") {}
+  static bool classof(const MCSymbol *S) { return S->isWasm(); }
+
+  const MCExpr *getSize() const { return SymbolSize; }
+  void setSize(const MCExpr *SS) { SymbolSize = SS; }
+
+  bool isFunction() const { return Type == wasm::WASM_SYMBOL_TYPE_FUNCTION; }
+  bool isData() const { return Type == wasm::WASM_SYMBOL_TYPE_DATA; }
+  bool isGlobal() const { return Type == wasm::WASM_SYMBOL_TYPE_GLOBAL; }
+  wasm::WasmSymbolType getType() const { return Type; }
+  void setType(wasm::WasmSymbolType type) { Type = type; }
+
+  bool isWeak() const { return IsWeak; }
+  void setWeak(bool isWeak) { IsWeak = isWeak; }
+
+  bool isHidden() const { return IsHidden; }
+  void setHidden(bool isHidden) { IsHidden = isHidden; }
+
+  bool isComdat() const { return IsComdat; }
+  void setComdat(bool isComdat) { IsComdat = isComdat; }
+
+  const StringRef getModuleName() const { return ModuleName; }
+  void setModuleName(StringRef Name) { ModuleName = Name; }
+
+  const SmallVector<wasm::ValType, 1> &getReturns() const {
+    assert(ReturnsSet);
+    return Returns;
+  }
+
+  void setReturns(SmallVectorImpl<wasm::ValType> &&Rets) {
+    ReturnsSet = true;
+    Returns = std::move(Rets);
+  }
+
+  const SmallVector<wasm::ValType, 4> &getParams() const {
+    assert(ParamsSet);
+    return Params;
+  }
+
+  void setParams(SmallVectorImpl<wasm::ValType> &&Pars) {
+    ParamsSet = true;
+    Params = std::move(Pars);
+  }
+
+  const wasm::WasmGlobalType &getGlobalType() const {
+    assert(GlobalTypeSet);
+    return GlobalType;
+  }
+
+  void setGlobalType(wasm::WasmGlobalType GT) {
+    GlobalTypeSet = true;
+    GlobalType = GT;
+  }
+};
+
+}  // end namespace llvm
+
+#endif // LLVM_MC_MCSYMBOLWASM_H
diff --git a/linux-x64/clang/include/llvm/MC/MCTargetOptions.h b/linux-x64/clang/include/llvm/MC/MCTargetOptions.h
new file mode 100644
index 0000000..f5d330f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCTargetOptions.h
@@ -0,0 +1,80 @@
+//===- MCTargetOptions.h - MC Target Options --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETOPTIONS_H
+#define LLVM_MC_MCTARGETOPTIONS_H
+
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+enum class ExceptionHandling {
+  None,     /// No exception support
+  DwarfCFI, /// DWARF-like instruction based exceptions
+  SjLj,     /// setjmp/longjmp based exceptions
+  ARM,      /// ARM EHABI
+  WinEH,    /// Windows Exception Handling
+  Wasm,     /// WebAssembly Exception Handling
+};
+
+enum class DebugCompressionType {
+  None, /// No compression
+  GNU,  /// zlib-gnu style compression
+  Z,    /// zlib style complession
+};
+
+class StringRef;
+
+class MCTargetOptions {
+public:
+  enum AsmInstrumentation {
+    AsmInstrumentationNone,
+    AsmInstrumentationAddress
+  };
+
+  /// Enables AddressSanitizer instrumentation at machine level.
+  bool SanitizeAddress : 1;
+
+  bool MCRelaxAll : 1;
+  bool MCNoExecStack : 1;
+  bool MCFatalWarnings : 1;
+  bool MCNoWarn : 1;
+  bool MCNoDeprecatedWarn : 1;
+  bool MCSaveTempLabels : 1;
+  bool MCUseDwarfDirectory : 1;
+  bool MCIncrementalLinkerCompatible : 1;
+  bool MCPIECopyRelocations : 1;
+  bool ShowMCEncoding : 1;
+  bool ShowMCInst : 1;
+  bool AsmVerbose : 1;
+
+  /// Preserve Comments in Assembly.
+  bool PreserveAsmComments : 1;
+
+  int DwarfVersion = 0;
+
+  std::string ABIName;
+  std::string SplitDwarfFile;
+
+  /// Additional paths to search for `.include` directives when using the
+  /// integrated assembler.
+  std::vector<std::string> IASSearchPaths;
+
+  MCTargetOptions();
+
+  /// getABIName - If this returns a non-empty string this represents the
+  /// textual name of the ABI that we want the backend to use, e.g. o32, or
+  /// aapcs-linux.
+  StringRef getABIName() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCTARGETOPTIONS_H
diff --git a/linux-x64/clang/include/llvm/MC/MCTargetOptionsCommandFlags.def b/linux-x64/clang/include/llvm/MC/MCTargetOptionsCommandFlags.def
new file mode 100644
index 0000000..5172fa4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCTargetOptionsCommandFlags.def
@@ -0,0 +1,80 @@
+//===-- MCTargetOptionsCommandFlags.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains machine code-specific flags that are shared between
+// different command line tools.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
+#define LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
+
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+static cl::opt<MCTargetOptions::AsmInstrumentation> AsmInstrumentation(
+    "asm-instrumentation", cl::desc("Instrumentation of inline assembly and "
+                                    "assembly source files"),
+    cl::init(MCTargetOptions::AsmInstrumentationNone),
+    cl::values(clEnumValN(MCTargetOptions::AsmInstrumentationNone, "none",
+                          "no instrumentation at all"),
+               clEnumValN(MCTargetOptions::AsmInstrumentationAddress, "address",
+                          "instrument instructions with memory arguments")));
+
+static cl::opt<bool> RelaxAll("mc-relax-all",
+                       cl::desc("When used with filetype=obj, "
+                                "relax all fixups in the emitted object file"));
+
+static cl::opt<bool> IncrementalLinkerCompatible(
+    "incremental-linker-compatible",
+    cl::desc(
+        "When used with filetype=obj, "
+        "emit an object file which can be used with an incremental linker"));
+
+static cl::opt<bool> PIECopyRelocations("pie-copy-relocations", cl::desc("PIE Copy Relocations"));
+
+static cl::opt<int> DwarfVersion("dwarf-version", cl::desc("Dwarf version"),
+                          cl::init(0));
+
+static cl::opt<bool> ShowMCInst("asm-show-inst",
+                         cl::desc("Emit internal instruction representation to "
+                                  "assembly file"));
+
+static cl::opt<bool> FatalWarnings("fatal-warnings",
+                            cl::desc("Treat warnings as errors"));
+
+static cl::opt<bool> NoWarn("no-warn", cl::desc("Suppress all warnings"));
+static cl::alias NoWarnW("W", cl::desc("Alias for --no-warn"), cl::aliasopt(NoWarn));
+
+static cl::opt<bool> NoDeprecatedWarn("no-deprecated-warn",
+                               cl::desc("Suppress all deprecated warnings"));
+
+static cl::opt<std::string>
+ABIName("target-abi", cl::Hidden,
+        cl::desc("The name of the ABI to be targeted from the backend."),
+        cl::init(""));
+
+static MCTargetOptions InitMCTargetOptionsFromFlags() {
+  MCTargetOptions Options;
+  Options.SanitizeAddress =
+      (AsmInstrumentation == MCTargetOptions::AsmInstrumentationAddress);
+  Options.MCRelaxAll = RelaxAll;
+  Options.MCIncrementalLinkerCompatible = IncrementalLinkerCompatible;
+  Options.MCPIECopyRelocations = PIECopyRelocations;
+  Options.DwarfVersion = DwarfVersion;
+  Options.ShowMCInst = ShowMCInst;
+  Options.ABIName = ABIName;
+  Options.MCFatalWarnings = FatalWarnings;
+  Options.MCNoWarn = NoWarn;
+  Options.MCNoDeprecatedWarn = NoDeprecatedWarn;
+  return Options;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCValue.h b/linux-x64/clang/include/llvm/MC/MCValue.h
new file mode 100644
index 0000000..ff223f7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCValue.h
@@ -0,0 +1,87 @@
+//===-- llvm/MC/MCValue.h - MCValue class -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCVALUE_H
+#define LLVM_MC_MCVALUE_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+class MCAsmInfo;
+class raw_ostream;
+
+/// \brief This represents an "assembler immediate".
+///
+///  In its most general form, this can hold ":Kind:(SymbolA - SymbolB +
+///  imm64)".  Not all targets supports relocations of this general form, but we
+///  need to represent this anyway.
+///
+/// In general both SymbolA and SymbolB will also have a modifier
+/// analogous to the top-level Kind. Current targets are not expected
+/// to make use of both though. The choice comes down to whether
+/// relocation modifiers apply to the closest symbol or the whole
+/// expression.
+///
+/// Note that this class must remain a simple POD value class, because we need
+/// it to live in unions etc.
+class MCValue {
+  const MCSymbolRefExpr *SymA = nullptr, *SymB = nullptr;
+  int64_t Cst = 0;
+  uint32_t RefKind = 0;
+
+public:
+  MCValue() = default;
+  int64_t getConstant() const { return Cst; }
+  const MCSymbolRefExpr *getSymA() const { return SymA; }
+  const MCSymbolRefExpr *getSymB() const { return SymB; }
+  uint32_t getRefKind() const { return RefKind; }
+
+  /// \brief Is this an absolute (as opposed to relocatable) value.
+  bool isAbsolute() const { return !SymA && !SymB; }
+
+  /// \brief Print the value to the stream \p OS.
+  void print(raw_ostream &OS) const;
+
+  /// \brief Print the value to stderr.
+  void dump() const;
+
+  MCSymbolRefExpr::VariantKind getAccessVariant() const;
+
+  static MCValue get(const MCSymbolRefExpr *SymA,
+                     const MCSymbolRefExpr *SymB = nullptr,
+                     int64_t Val = 0, uint32_t RefKind = 0) {
+    MCValue R;
+    R.Cst = Val;
+    R.SymA = SymA;
+    R.SymB = SymB;
+    R.RefKind = RefKind;
+    return R;
+  }
+
+  static MCValue get(int64_t Val) {
+    MCValue R;
+    R.Cst = Val;
+    R.SymA = nullptr;
+    R.SymB = nullptr;
+    R.RefKind = 0;
+    return R;
+  }
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCWasmObjectWriter.h b/linux-x64/clang/include/llvm/MC/MCWasmObjectWriter.h
new file mode 100644
index 0000000..a4d5eb8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCWasmObjectWriter.h
@@ -0,0 +1,53 @@
+//===-- llvm/MC/MCWasmObjectWriter.h - Wasm Object Writer -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWASMOBJECTWRITER_H
+#define LLVM_MC_MCWASMOBJECTWRITER_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MCFixup;
+class MCObjectWriter;
+class MCValue;
+class raw_pwrite_stream;
+
+class MCWasmObjectTargetWriter {
+  const unsigned Is64Bit : 1;
+
+protected:
+  explicit MCWasmObjectTargetWriter(bool Is64Bit_);
+
+public:
+  virtual ~MCWasmObjectTargetWriter();
+
+  virtual unsigned getRelocType(const MCValue &Target,
+                                const MCFixup &Fixup) const = 0;
+
+  /// \name Accessors
+  /// @{
+  bool is64Bit() const { return Is64Bit; }
+  /// @}
+};
+
+/// \brief Construct a new Wasm writer instance.
+///
+/// \param MOTW - The target specific Wasm writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+std::unique_ptr<MCObjectWriter>
+createWasmObjectWriter(std::unique_ptr<MCWasmObjectTargetWriter> MOTW,
+                       raw_pwrite_stream &OS);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCWasmStreamer.h b/linux-x64/clang/include/llvm/MC/MCWasmStreamer.h
new file mode 100644
index 0000000..c0d4545
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCWasmStreamer.h
@@ -0,0 +1,84 @@
+//===- MCWasmStreamer.h - MCStreamer Wasm Object File Interface -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWASMSTREAMER_H
+#define LLVM_MC_MCWASMSTREAMER_H
+
+#include "MCAsmBackend.h"
+#include "MCCodeEmitter.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class MCAssembler;
+class MCExpr;
+class MCInst;
+class raw_ostream;
+
+class MCWasmStreamer : public MCObjectStreamer {
+public:
+  MCWasmStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
+                 raw_pwrite_stream &OS, std::unique_ptr<MCCodeEmitter> Emitter)
+      : MCObjectStreamer(Context, std::move(TAB), OS, std::move(Emitter)),
+        SeenIdent(false) {}
+
+  ~MCWasmStreamer() override;
+
+  /// state management
+  void reset() override {
+    SeenIdent = false;
+    MCObjectStreamer::reset();
+  }
+
+  /// \name MCStreamer Interface
+  /// @{
+
+  void ChangeSection(MCSection *Section, const MCExpr *Subsection) override;
+  void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+  void EmitThumbFunc(MCSymbol *Func) override;
+  void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
+  bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+  void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+  void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                        unsigned ByteAlignment) override;
+
+  void emitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
+
+  void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                             unsigned ByteAlignment) override;
+
+  void EmitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
+                    uint64_t Size = 0, unsigned ByteAlignment = 0) override;
+  void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+                      unsigned ByteAlignment = 0) override;
+  void EmitValueImpl(const MCExpr *Value, unsigned Size,
+                     SMLoc Loc = SMLoc()) override;
+
+  void EmitIdent(StringRef IdentString) override;
+
+  void EmitValueToAlignment(unsigned, int64_t, unsigned, unsigned) override;
+
+  void FinishImpl() override;
+
+private:
+  void EmitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &) override;
+  void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;
+
+  /// \brief Merge the content of the fragment \p EF into the fragment \p DF.
+  void mergeFragment(MCDataFragment *, MCDataFragment *);
+
+  bool SeenIdent;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCWin64EH.h b/linux-x64/clang/include/llvm/MC/MCWin64EH.h
new file mode 100644
index 0000000..83ea738
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCWin64EH.h
@@ -0,0 +1,62 @@
+//===- MCWin64EH.h - Machine Code Win64 EH support --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains declarations to support the Win64 Exception Handling
+// scheme in MC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWIN64EH_H
+#define LLVM_MC_MCWIN64EH_H
+
+#include "llvm/MC/MCWinEH.h"
+#include "llvm/Support/Win64EH.h"
+
+namespace llvm {
+class MCStreamer;
+class MCSymbol;
+
+namespace Win64EH {
+struct Instruction {
+  static WinEH::Instruction PushNonVol(MCSymbol *L, unsigned Reg) {
+    return WinEH::Instruction(Win64EH::UOP_PushNonVol, L, Reg, -1);
+  }
+  static WinEH::Instruction Alloc(MCSymbol *L, unsigned Size) {
+    return WinEH::Instruction(Size > 128 ? UOP_AllocLarge : UOP_AllocSmall, L,
+                              -1, Size);
+  }
+  static WinEH::Instruction PushMachFrame(MCSymbol *L, bool Code) {
+    return WinEH::Instruction(UOP_PushMachFrame, L, -1, Code ? 1 : 0);
+  }
+  static WinEH::Instruction SaveNonVol(MCSymbol *L, unsigned Reg,
+                                       unsigned Offset) {
+    return WinEH::Instruction(Offset > 512 * 1024 - 8 ? UOP_SaveNonVolBig
+                                                      : UOP_SaveNonVol,
+                              L, Reg, Offset);
+  }
+  static WinEH::Instruction SaveXMM(MCSymbol *L, unsigned Reg,
+                                    unsigned Offset) {
+    return WinEH::Instruction(Offset > 512 * 1024 - 8 ? UOP_SaveXMM128Big
+                                                      : UOP_SaveXMM128,
+                              L, Reg, Offset);
+  }
+  static WinEH::Instruction SetFPReg(MCSymbol *L, unsigned Reg, unsigned Off) {
+    return WinEH::Instruction(UOP_SetFPReg, L, Reg, Off);
+  }
+};
+
+class UnwindEmitter : public WinEH::UnwindEmitter {
+public:
+  void Emit(MCStreamer &Streamer) const override;
+  void EmitUnwindInfo(MCStreamer &Streamer, WinEH::FrameInfo *FI) const override;
+};
+}
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MCWinCOFFObjectWriter.h b/linux-x64/clang/include/llvm/MC/MCWinCOFFObjectWriter.h
new file mode 100644
index 0000000..3234bd9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCWinCOFFObjectWriter.h
@@ -0,0 +1,52 @@
+//===- llvm/MC/MCWinCOFFObjectWriter.h - Win COFF Object Writer -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINCOFFOBJECTWRITER_H
+#define LLVM_MC_MCWINCOFFOBJECTWRITER_H
+
+#include <memory>
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCContext;
+class MCFixup;
+class MCObjectWriter;
+class MCValue;
+class raw_pwrite_stream;
+
+  class MCWinCOFFObjectTargetWriter {
+    virtual void anchor();
+
+    const unsigned Machine;
+
+  protected:
+    MCWinCOFFObjectTargetWriter(unsigned Machine_);
+
+  public:
+    virtual ~MCWinCOFFObjectTargetWriter() = default;
+
+    unsigned getMachine() const { return Machine; }
+    virtual unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+                                  const MCFixup &Fixup, bool IsCrossSection,
+                                  const MCAsmBackend &MAB) const = 0;
+    virtual bool recordRelocation(const MCFixup &) const { return true; }
+  };
+
+  /// \brief Construct a new Win COFF writer instance.
+  ///
+  /// \param MOTW - The target specific WinCOFF writer subclass.
+  /// \param OS - The stream to write to.
+  /// \returns The constructed object writer.
+  std::unique_ptr<MCObjectWriter>
+  createWinCOFFObjectWriter(std::unique_ptr<MCWinCOFFObjectTargetWriter> MOTW,
+                            raw_pwrite_stream &OS);
+} // end namespace llvm
+
+#endif // LLVM_MC_MCWINCOFFOBJECTWRITER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCWinCOFFStreamer.h b/linux-x64/clang/include/llvm/MC/MCWinCOFFStreamer.h
new file mode 100644
index 0000000..60c17ca
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCWinCOFFStreamer.h
@@ -0,0 +1,81 @@
+//===- MCWinCOFFStreamer.h - COFF Object File Interface ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINCOFFSTREAMER_H
+#define LLVM_MC_MCWINCOFFSTREAMER_H
+
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCObjectStreamer.h"
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCContext;
+class MCCodeEmitter;
+class MCInst;
+class MCSection;
+class MCSubtargetInfo;
+class MCSymbol;
+class StringRef;
+class raw_pwrite_stream;
+
+class MCWinCOFFStreamer : public MCObjectStreamer {
+public:
+  MCWinCOFFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> MAB,
+                    std::unique_ptr<MCCodeEmitter> CE, raw_pwrite_stream &OS);
+
+  /// state management
+  void reset() override {
+    CurSymbol = nullptr;
+    MCObjectStreamer::reset();
+  }
+
+  /// \name MCStreamer interface
+  /// \{
+
+  void InitSections(bool NoExecStack) override;
+  void EmitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
+  void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+  void EmitThumbFunc(MCSymbol *Func) override;
+  bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+  void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+  void BeginCOFFSymbolDef(MCSymbol const *Symbol) override;
+  void EmitCOFFSymbolStorageClass(int StorageClass) override;
+  void EmitCOFFSymbolType(int Type) override;
+  void EndCOFFSymbolDef() override;
+  void EmitCOFFSafeSEH(MCSymbol const *Symbol) override;
+  void EmitCOFFSymbolIndex(MCSymbol const *Symbol) override;
+  void EmitCOFFSectionIndex(MCSymbol const *Symbol) override;
+  void EmitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset) override;
+  void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                        unsigned ByteAlignment) override;
+  void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+                             unsigned ByteAlignment) override;
+  void EmitZerofill(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+                    unsigned ByteAlignment) override;
+  void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+                      unsigned ByteAlignment) override;
+  void EmitIdent(StringRef IdentString) override;
+  void EmitWinEHHandlerData(SMLoc Loc) override;
+  void FinishImpl() override;
+
+  /// \}
+
+protected:
+  const MCSymbol *CurSymbol;
+
+  void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &STI) override;
+
+private:
+  void Error(const Twine &Msg) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MCWINCOFFSTREAMER_H
diff --git a/linux-x64/clang/include/llvm/MC/MCWinEH.h b/linux-x64/clang/include/llvm/MC/MCWinEH.h
new file mode 100644
index 0000000..4ca52a6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MCWinEH.h
@@ -0,0 +1,67 @@
+//===- MCWinEH.h - Windows Unwinding Support --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINEH_H
+#define LLVM_MC_MCWINEH_H
+
+#include <vector>
+
+namespace llvm {
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+
+namespace WinEH {
+struct Instruction {
+  const MCSymbol *Label;
+  const unsigned Offset;
+  const unsigned Register;
+  const unsigned Operation;
+
+  Instruction(unsigned Op, MCSymbol *L, unsigned Reg, unsigned Off)
+    : Label(L), Offset(Off), Register(Reg), Operation(Op) {}
+};
+
+struct FrameInfo {
+  const MCSymbol *Begin = nullptr;
+  const MCSymbol *End = nullptr;
+  const MCSymbol *ExceptionHandler = nullptr;
+  const MCSymbol *Function = nullptr;
+  const MCSymbol *PrologEnd = nullptr;
+  const MCSymbol *Symbol = nullptr;
+  const MCSection *TextSection = nullptr;
+
+  bool HandlesUnwind = false;
+  bool HandlesExceptions = false;
+
+  int LastFrameInst = -1;
+  const FrameInfo *ChainedParent = nullptr;
+  std::vector<Instruction> Instructions;
+
+  FrameInfo() = default;
+  FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel)
+      : Begin(BeginFuncEHLabel), Function(Function) {}
+  FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel,
+            const FrameInfo *ChainedParent)
+      : Begin(BeginFuncEHLabel), Function(Function),
+        ChainedParent(ChainedParent) {}
+};
+
+class UnwindEmitter {
+public:
+  virtual ~UnwindEmitter();
+
+  /// This emits the unwind info sections (.pdata and .xdata in PE/COFF).
+  virtual void Emit(MCStreamer &Streamer) const = 0;
+  virtual void EmitUnwindInfo(MCStreamer &Streamer, FrameInfo *FI) const = 0;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/MachineLocation.h b/linux-x64/clang/include/llvm/MC/MachineLocation.h
new file mode 100644
index 0000000..91ed661
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/MachineLocation.h
@@ -0,0 +1,59 @@
+//===- llvm/MC/MachineLocation.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// The MachineLocation class is used to represent a simple location in a machine
+// frame.  Locations will be one of two forms; a register or an address formed
+// from a base address plus an offset.  Register indirection can be specified by
+// explicitly passing an offset to the constructor.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MACHINELOCATION_H
+#define LLVM_MC_MACHINELOCATION_H
+
+#include <cstdint>
+#include <cassert>
+
+namespace llvm {
+
+class MachineLocation {
+private:
+  bool IsRegister = false;              ///< True if location is a register.
+  unsigned Register = 0;                ///< gcc/gdb register number.
+
+public:
+  enum : uint32_t {
+    // The target register number for an abstract frame pointer. The value is
+    // an arbitrary value that doesn't collide with any real target register.
+    VirtualFP = ~0U
+  };
+
+  MachineLocation() = default;
+  /// Create a direct register location.
+  explicit MachineLocation(unsigned R, bool Indirect = false)
+      : IsRegister(!Indirect), Register(R) {}
+
+  bool operator==(const MachineLocation &Other) const {
+    return IsRegister == Other.IsRegister && Register == Other.Register;
+  }
+
+  // Accessors.
+  /// \return true iff this is a register-indirect location.
+  bool isIndirect()      const { return !IsRegister; }
+  bool isReg()           const { return IsRegister; }
+  unsigned getReg()      const { return Register; }
+  void setIsRegister(bool Is)  { IsRegister = Is; }
+  void setRegister(unsigned R) { Register = R; }
+};
+
+inline bool operator!=(const MachineLocation &LHS, const MachineLocation &RHS) {
+  return !(LHS == RHS);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_MC_MACHINELOCATION_H
diff --git a/linux-x64/clang/include/llvm/MC/SectionKind.h b/linux-x64/clang/include/llvm/MC/SectionKind.h
new file mode 100644
index 0000000..66eb9ec
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/SectionKind.h
@@ -0,0 +1,208 @@
+//===-- llvm/MC/SectionKind.h - Classification of sections ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_SECTIONKIND_H
+#define LLVM_MC_SECTIONKIND_H
+
+namespace llvm {
+
+/// SectionKind - This is a simple POD value that classifies the properties of
+/// a section.  A section is classified into the deepest possible
+/// classification, and then the target maps them onto their sections based on
+/// what capabilities they have.
+///
+/// The comments below describe these as if they were an inheritance hierarchy
+/// in order to explain the predicates below.
+///
+class SectionKind {
+  enum Kind {
+    /// Metadata - Debug info sections or other metadata.
+    Metadata,
+
+    /// Text - Text section, used for functions and other executable code.
+    Text,
+
+           /// ExecuteOnly, Text section that is not readable.
+           ExecuteOnly,
+
+    /// ReadOnly - Data that is never written to at program runtime by the
+    /// program or the dynamic linker.  Things in the top-level readonly
+    /// SectionKind are not mergeable.
+    ReadOnly,
+
+        /// MergableCString - Any null-terminated string which allows merging.
+        /// These values are known to end in a nul value of the specified size,
+        /// not otherwise contain a nul value, and be mergable.  This allows the
+        /// linker to unique the strings if it so desires.
+
+           /// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
+           Mergeable1ByteCString,
+
+           /// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
+           Mergeable2ByteCString,
+
+           /// Mergeable4ByteCString - 4 byte mergable, null terminated, string.
+           Mergeable4ByteCString,
+
+        /// MergeableConst - These are sections for merging fixed-length
+        /// constants together.  For example, this can be used to unique
+        /// constant pool entries etc.
+
+            /// MergeableConst4 - This is a section used by 4-byte constants,
+            /// for example, floats.
+            MergeableConst4,
+
+            /// MergeableConst8 - This is a section used by 8-byte constants,
+            /// for example, doubles.
+            MergeableConst8,
+
+            /// MergeableConst16 - This is a section used by 16-byte constants,
+            /// for example, vectors.
+            MergeableConst16,
+
+            /// MergeableConst32 - This is a section used by 32-byte constants,
+            /// for example, vectors.
+            MergeableConst32,
+
+    /// Writeable - This is the base of all segments that need to be written
+    /// to during program runtime.
+
+       /// ThreadLocal - This is the base of all TLS segments.  All TLS
+       /// objects must be writeable, otherwise there is no reason for them to
+       /// be thread local!
+
+           /// ThreadBSS - Zero-initialized TLS data objects.
+           ThreadBSS,
+
+           /// ThreadData - Initialized TLS data objects.
+           ThreadData,
+
+       /// GlobalWriteableData - Writeable data that is global (not thread
+       /// local).
+
+           /// BSS - Zero initialized writeable data.
+           BSS,
+
+               /// BSSLocal - This is BSS (zero initialized and writable) data
+               /// which has local linkage.
+               BSSLocal,
+
+               /// BSSExtern - This is BSS data with normal external linkage.
+               BSSExtern,
+
+           /// Common - Data with common linkage.  These represent tentative
+           /// definitions, which always have a zero initializer and are never
+           /// marked 'constant'.
+           Common,
+
+           /// This is writeable data that has a non-zero initializer.
+           Data,
+
+           /// ReadOnlyWithRel - These are global variables that are never
+           /// written to by the program, but that have relocations, so they
+           /// must be stuck in a writeable section so that the dynamic linker
+           /// can write to them.  If it chooses to, the dynamic linker can
+           /// mark the pages these globals end up on as read-only after it is
+           /// done with its relocation phase.
+           ReadOnlyWithRel
+  } K : 8;
+public:
+
+  bool isMetadata() const { return K == Metadata; }
+
+  bool isText() const { return K == Text || K == ExecuteOnly; }
+
+  bool isExecuteOnly() const { return K == ExecuteOnly; }
+
+  bool isReadOnly() const {
+    return K == ReadOnly || isMergeableCString() ||
+           isMergeableConst();
+  }
+
+  bool isMergeableCString() const {
+    return K == Mergeable1ByteCString || K == Mergeable2ByteCString ||
+           K == Mergeable4ByteCString;
+  }
+  bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
+  bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
+  bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }
+
+  bool isMergeableConst() const {
+    return K == MergeableConst4 || K == MergeableConst8 ||
+           K == MergeableConst16 || K == MergeableConst32;
+  }
+  bool isMergeableConst4() const { return K == MergeableConst4; }
+  bool isMergeableConst8() const { return K == MergeableConst8; }
+  bool isMergeableConst16() const { return K == MergeableConst16; }
+  bool isMergeableConst32() const { return K == MergeableConst32; }
+
+  bool isWriteable() const {
+    return isThreadLocal() || isGlobalWriteableData();
+  }
+
+  bool isThreadLocal() const {
+    return K == ThreadData || K == ThreadBSS;
+  }
+
+  bool isThreadBSS() const { return K == ThreadBSS; }
+  bool isThreadData() const { return K == ThreadData; }
+
+  bool isGlobalWriteableData() const {
+    return isBSS() || isCommon() || isData() || isReadOnlyWithRel();
+  }
+
+  bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
+  bool isBSSLocal() const { return K == BSSLocal; }
+  bool isBSSExtern() const { return K == BSSExtern; }
+
+  bool isCommon() const { return K == Common; }
+
+  bool isData() const { return K == Data; }
+
+  bool isReadOnlyWithRel() const {
+    return K == ReadOnlyWithRel;
+  }
+private:
+  static SectionKind get(Kind K) {
+    SectionKind Res;
+    Res.K = K;
+    return Res;
+  }
+public:
+
+  static SectionKind getMetadata() { return get(Metadata); }
+  static SectionKind getText() { return get(Text); }
+  static SectionKind getExecuteOnly() { return get(ExecuteOnly); }
+  static SectionKind getReadOnly() { return get(ReadOnly); }
+  static SectionKind getMergeable1ByteCString() {
+    return get(Mergeable1ByteCString);
+  }
+  static SectionKind getMergeable2ByteCString() {
+    return get(Mergeable2ByteCString);
+  }
+  static SectionKind getMergeable4ByteCString() {
+    return get(Mergeable4ByteCString);
+  }
+  static SectionKind getMergeableConst4() { return get(MergeableConst4); }
+  static SectionKind getMergeableConst8() { return get(MergeableConst8); }
+  static SectionKind getMergeableConst16() { return get(MergeableConst16); }
+  static SectionKind getMergeableConst32() { return get(MergeableConst32); }
+  static SectionKind getThreadBSS() { return get(ThreadBSS); }
+  static SectionKind getThreadData() { return get(ThreadData); }
+  static SectionKind getBSS() { return get(BSS); }
+  static SectionKind getBSSLocal() { return get(BSSLocal); }
+  static SectionKind getBSSExtern() { return get(BSSExtern); }
+  static SectionKind getCommon() { return get(Common); }
+  static SectionKind getData() { return get(Data); }
+  static SectionKind getReadOnlyWithRel() { return get(ReadOnlyWithRel); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/MC/StringTableBuilder.h b/linux-x64/clang/include/llvm/MC/StringTableBuilder.h
new file mode 100644
index 0000000..89bc55a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/StringTableBuilder.h
@@ -0,0 +1,75 @@
+//===- StringTableBuilder.h - String table building utility -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_STRINGTABLEBUILDER_H
+#define LLVM_MC_STRINGTABLEBUILDER_H
+
+#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// \brief Utility for building string tables with deduplicated suffixes.
+class StringTableBuilder {
+public:
+  enum Kind { ELF, WinCOFF, MachO, RAW, DWARF };
+
+private:
+  DenseMap<CachedHashStringRef, size_t> StringIndexMap;
+  size_t Size = 0;
+  Kind K;
+  unsigned Alignment;
+  bool Finalized = false;
+
+  void finalizeStringTable(bool Optimize);
+  void initSize();
+
+public:
+  StringTableBuilder(Kind K, unsigned Alignment = 1);
+  ~StringTableBuilder();
+
+  /// \brief Add a string to the builder. Returns the position of S in the
+  /// table. The position will be changed if finalize is used.
+  /// Can only be used before the table is finalized.
+  size_t add(CachedHashStringRef S);
+  size_t add(StringRef S) { return add(CachedHashStringRef(S)); }
+
+  /// \brief Analyze the strings and build the final table. No more strings can
+  /// be added after this point.
+  void finalize();
+
+  /// Finalize the string table without reording it. In this mode, offsets
+  /// returned by add will still be valid.
+  void finalizeInOrder();
+
+  /// \brief Get the offest of a string in the string table. Can only be used
+  /// after the table is finalized.
+  size_t getOffset(CachedHashStringRef S) const;
+  size_t getOffset(StringRef S) const {
+    return getOffset(CachedHashStringRef(S));
+  }
+
+  size_t getSize() const { return Size; }
+  void clear();
+
+  void write(raw_ostream &OS) const;
+  void write(uint8_t *Buf) const;
+
+private:
+  bool isFinalized() const { return Finalized; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_STRINGTABLEBUILDER_H
diff --git a/linux-x64/clang/include/llvm/MC/SubtargetFeature.h b/linux-x64/clang/include/llvm/MC/SubtargetFeature.h
new file mode 100644
index 0000000..76c7dd5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/MC/SubtargetFeature.h
@@ -0,0 +1,133 @@
+//===- llvm/MC/SubtargetFeature.h - CPU characteristics ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Defines and manages user or tool specified CPU characteristics.
+/// The intent is to be able to package specific features that should or should
+/// not be used on a specific target processor.  A tool, such as llc, could, as
+/// as example, gather chip info from the command line, a long with features
+/// that should be used on that chip.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_SUBTARGETFEATURE_H
+#define LLVM_MC_SUBTARGETFEATURE_H
+
+#include "llvm/ADT/StringRef.h"
+#include <bitset>
+#include <initializer_list>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class raw_ostream;
+class Triple;
+
+const unsigned MAX_SUBTARGET_FEATURES = 192;
+/// Container class for subtarget features.
+/// This is convenient because std::bitset does not have a constructor
+/// with an initializer list of set bits.
+class FeatureBitset : public std::bitset<MAX_SUBTARGET_FEATURES> {
+public:
+  // Cannot inherit constructors because it's not supported by VC++..
+  FeatureBitset() = default;
+
+  FeatureBitset(const bitset<MAX_SUBTARGET_FEATURES>& B) : bitset(B) {}
+
+  FeatureBitset(std::initializer_list<unsigned> Init) {
+    for (auto I : Init)
+      set(I);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+
+/// Used to provide key value pairs for feature and CPU bit flags.
+struct SubtargetFeatureKV {
+  const char *Key;                      ///< K-V key string
+  const char *Desc;                     ///< Help descriptor
+  FeatureBitset Value;                  ///< K-V integer value
+  FeatureBitset Implies;                ///< K-V bit mask
+
+  /// Compare routine for std::lower_bound
+  bool operator<(StringRef S) const {
+    return StringRef(Key) < S;
+  }
+
+  /// Compare routine for std::is_sorted.
+  bool operator<(const SubtargetFeatureKV &Other) const {
+    return StringRef(Key) < StringRef(Other.Key);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+
+/// Used to provide key value pairs for CPU and arbitrary pointers.
+struct SubtargetInfoKV {
+  const char *Key;                      ///< K-V key string
+  const void *Value;                    ///< K-V pointer value
+
+  /// Compare routine for std::lower_bound
+  bool operator<(StringRef S) const {
+    return StringRef(Key) < S;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+
+/// Manages the enabling and disabling of subtarget specific features.
+///
+/// Features are encoded as a string of the form
+///   "+attr1,+attr2,-attr3,...,+attrN"
+/// A comma separates each feature from the next (all lowercase.)
+/// Each of the remaining features is prefixed with + or - indicating whether
+/// that feature should be enabled or disabled contrary to the cpu
+/// specification.
+class SubtargetFeatures {
+  std::vector<std::string> Features;    ///< Subtarget features as a vector
+
+public:
+  explicit SubtargetFeatures(StringRef Initial = "");
+
+  /// Returns features as a string.
+  std::string getString() const;
+
+  /// Adds Features.
+  void AddFeature(StringRef String, bool Enable = true);
+
+  /// Toggles a feature and update the feature bits.
+  static void ToggleFeature(FeatureBitset &Bits, StringRef String,
+                            ArrayRef<SubtargetFeatureKV> FeatureTable);
+
+  /// Applies the feature flag and update the feature bits.
+  static void ApplyFeatureFlag(FeatureBitset &Bits, StringRef Feature,
+                               ArrayRef<SubtargetFeatureKV> FeatureTable);
+
+  /// Returns feature bits of a CPU.
+  FeatureBitset getFeatureBits(StringRef CPU,
+                               ArrayRef<SubtargetFeatureKV> CPUTable,
+                               ArrayRef<SubtargetFeatureKV> FeatureTable);
+
+  /// Returns the vector of individual subtarget features.
+  const std::vector<std::string> &getFeatures() const { return Features; }
+
+  /// Prints feature string.
+  void print(raw_ostream &OS) const;
+
+  // Dumps feature info.
+  void dump() const;
+
+  /// Adds the default features for the specified target triple.
+  void getDefaultSubtargetFeatures(const Triple& Triple);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_MC_SUBTARGETFEATURE_H
diff --git a/linux-x64/clang/include/llvm/Object/Archive.h b/linux-x64/clang/include/llvm/Object/Archive.h
new file mode 100644
index 0000000..5a1512b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/Archive.h
@@ -0,0 +1,289 @@
+//===- Archive.h - ar archive file format -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ar archive file format class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ARCHIVE_H
+#define LLVM_OBJECT_ARCHIVE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace object {
+
+class Archive;
+
+class ArchiveMemberHeader {
+public:
+  friend class Archive;
+
+  ArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr,
+                      uint64_t Size, Error *Err);
+  // ArchiveMemberHeader() = default;
+
+  /// Get the name without looking up long names.
+  Expected<StringRef> getRawName() const;
+
+  /// Get the name looking up long names.
+  Expected<StringRef> getName(uint64_t Size) const;
+
+  /// Members are not larger than 4GB.
+  Expected<uint32_t> getSize() const;
+
+  Expected<sys::fs::perms> getAccessMode() const;
+  Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const;
+
+  StringRef getRawLastModified() const {
+    return StringRef(ArMemHdr->LastModified,
+                     sizeof(ArMemHdr->LastModified)).rtrim(' ');
+  }
+
+  Expected<unsigned> getUID() const;
+  Expected<unsigned> getGID() const;
+
+  // This returns the size of the private struct ArMemHdrType
+  uint64_t getSizeOf() const {
+    return sizeof(ArMemHdrType);
+  }
+
+private:
+  struct ArMemHdrType {
+    char Name[16];
+    char LastModified[12];
+    char UID[6];
+    char GID[6];
+    char AccessMode[8];
+    char Size[10]; ///< Size of data, not including header or padding.
+    char Terminator[2];
+  };
+  Archive const *Parent;
+  ArMemHdrType const *ArMemHdr;
+};
+
+class Archive : public Binary {
+  virtual void anchor();
+
+public:
+  class Child {
+    friend Archive;
+    friend ArchiveMemberHeader;
+
+    const Archive *Parent;
+    ArchiveMemberHeader Header;
+    /// \brief Includes header but not padding byte.
+    StringRef Data;
+    /// \brief Offset from Data to the start of the file.
+    uint16_t StartOfFile;
+
+    Expected<bool> isThinMember() const;
+
+  public:
+    Child(const Archive *Parent, const char *Start, Error *Err);
+    Child(const Archive *Parent, StringRef Data, uint16_t StartOfFile);
+
+    bool operator ==(const Child &other) const {
+      assert(!Parent || !other.Parent || Parent == other.Parent);
+      return Data.begin() == other.Data.begin();
+    }
+
+    const Archive *getParent() const { return Parent; }
+    Expected<Child> getNext() const;
+
+    Expected<StringRef> getName() const;
+    Expected<std::string> getFullName() const;
+    Expected<StringRef> getRawName() const { return Header.getRawName(); }
+
+    Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const {
+      return Header.getLastModified();
+    }
+
+    StringRef getRawLastModified() const {
+      return Header.getRawLastModified();
+    }
+
+    Expected<unsigned> getUID() const { return Header.getUID(); }
+    Expected<unsigned> getGID() const { return Header.getGID(); }
+
+    Expected<sys::fs::perms> getAccessMode() const {
+      return Header.getAccessMode();
+    }
+
+    /// \return the size of the archive member without the header or padding.
+    Expected<uint64_t> getSize() const;
+    /// \return the size in the archive header for this member.
+    Expected<uint64_t> getRawSize() const;
+
+    Expected<StringRef> getBuffer() const;
+    uint64_t getChildOffset() const;
+
+    Expected<MemoryBufferRef> getMemoryBufferRef() const;
+
+    Expected<std::unique_ptr<Binary>>
+    getAsBinary(LLVMContext *Context = nullptr) const;
+  };
+
+  class child_iterator {
+    Child C;
+    Error *E = nullptr;
+
+  public:
+    child_iterator() : C(Child(nullptr, nullptr, nullptr)) {}
+    child_iterator(const Child &C, Error *E) : C(C), E(E) {}
+
+    const Child *operator->() const { return &C; }
+    const Child &operator*() const { return C; }
+
+    bool operator==(const child_iterator &other) const {
+      // Ignore errors here: If an error occurred during increment then getNext
+      // will have been set to child_end(), and the following comparison should
+      // do the right thing.
+      return C == other.C;
+    }
+
+    bool operator!=(const child_iterator &other) const {
+      return !(*this == other);
+    }
+
+    // Code in loops with child_iterators must check for errors on each loop
+    // iteration.  And if there is an error break out of the loop.
+    child_iterator &operator++() { // Preincrement
+      assert(E && "Can't increment iterator with no Error attached");
+      ErrorAsOutParameter ErrAsOutParam(E);
+      if (auto ChildOrErr = C.getNext())
+        C = *ChildOrErr;
+      else {
+        C = C.getParent()->child_end().C;
+        *E = ChildOrErr.takeError();
+        E = nullptr;
+      }
+      return *this;
+    }
+  };
+
+  class Symbol {
+    const Archive *Parent;
+    uint32_t SymbolIndex;
+    uint32_t StringIndex; // Extra index to the string.
+
+  public:
+    Symbol(const Archive *p, uint32_t symi, uint32_t stri)
+      : Parent(p)
+      , SymbolIndex(symi)
+      , StringIndex(stri) {}
+
+    bool operator ==(const Symbol &other) const {
+      return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
+    }
+
+    StringRef getName() const;
+    Expected<Child> getMember() const;
+    Symbol getNext() const;
+  };
+
+  class symbol_iterator {
+    Symbol symbol;
+
+  public:
+    symbol_iterator(const Symbol &s) : symbol(s) {}
+
+    const Symbol *operator->() const { return &symbol; }
+    const Symbol &operator*() const { return symbol; }
+
+    bool operator==(const symbol_iterator &other) const {
+      return symbol == other.symbol;
+    }
+
+    bool operator!=(const symbol_iterator &other) const {
+      return !(*this == other);
+    }
+
+    symbol_iterator& operator++() {  // Preincrement
+      symbol = symbol.getNext();
+      return *this;
+    }
+  };
+
+  Archive(MemoryBufferRef Source, Error &Err);
+  static Expected<std::unique_ptr<Archive>> create(MemoryBufferRef Source);
+
+  enum Kind {
+    K_GNU,
+    K_GNU64,
+    K_BSD,
+    K_DARWIN,
+    K_DARWIN64,
+    K_COFF
+  };
+
+  Kind kind() const { return (Kind)Format; }
+  bool isThin() const { return IsThin; }
+
+  child_iterator child_begin(Error &Err, bool SkipInternal = true) const;
+  child_iterator child_end() const;
+  iterator_range<child_iterator> children(Error &Err,
+                                          bool SkipInternal = true) const {
+    return make_range(child_begin(Err, SkipInternal), child_end());
+  }
+
+  symbol_iterator symbol_begin() const;
+  symbol_iterator symbol_end() const;
+  iterator_range<symbol_iterator> symbols() const {
+    return make_range(symbol_begin(), symbol_end());
+  }
+
+  // Cast methods.
+  static bool classof(Binary const *v) {
+    return v->isArchive();
+  }
+
+  // check if a symbol is in the archive
+  Expected<Optional<Child>> findSym(StringRef name) const;
+
+  bool isEmpty() const;
+  bool hasSymbolTable() const;
+  StringRef getSymbolTable() const { return SymbolTable; }
+  StringRef getStringTable() const { return StringTable; }
+  uint32_t getNumberOfSymbols() const;
+
+  std::vector<std::unique_ptr<MemoryBuffer>> takeThinBuffers() {
+    return std::move(ThinBuffers);
+  }
+
+private:
+  StringRef SymbolTable;
+  StringRef StringTable;
+
+  StringRef FirstRegularData;
+  uint16_t FirstRegularStartOfFile = -1;
+  void setFirstRegular(const Child &C);
+
+  unsigned Format : 3;
+  unsigned IsThin : 1;
+  mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers;
+};
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_ARCHIVE_H
diff --git a/linux-x64/clang/include/llvm/Object/ArchiveWriter.h b/linux-x64/clang/include/llvm/Object/ArchiveWriter.h
new file mode 100644
index 0000000..495b943
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/ArchiveWriter.h
@@ -0,0 +1,47 @@
+//===- ArchiveWriter.h - ar archive file format writer ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the writeArchive function for writing an archive file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ARCHIVEWRITER_H
+#define LLVM_OBJECT_ARCHIVEWRITER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+
+namespace llvm {
+
+struct NewArchiveMember {
+  std::unique_ptr<MemoryBuffer> Buf;
+  StringRef MemberName;
+  sys::TimePoint<std::chrono::seconds> ModTime;
+  unsigned UID = 0, GID = 0, Perms = 0644;
+
+  bool IsNew = false;
+  NewArchiveMember() = default;
+  NewArchiveMember(MemoryBufferRef BufRef);
+
+  static Expected<NewArchiveMember>
+  getOldMember(const object::Archive::Child &OldMember, bool Deterministic);
+
+  static Expected<NewArchiveMember> getFile(StringRef FileName,
+                                            bool Deterministic);
+};
+
+Error writeArchive(StringRef ArcName, ArrayRef<NewArchiveMember> NewMembers,
+                   bool WriteSymtab, object::Archive::Kind Kind,
+                   bool Deterministic, bool Thin,
+                   std::unique_ptr<MemoryBuffer> OldArchiveBuf = nullptr);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/Binary.h b/linux-x64/clang/include/llvm/Object/Binary.h
new file mode 100644
index 0000000..5e93691
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/Binary.h
@@ -0,0 +1,219 @@
+//===- Binary.h - A generic binary file -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Binary class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_BINARY_H
+#define LLVM_OBJECT_BINARY_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class LLVMContext;
+class StringRef;
+
+namespace object {
+
+class Binary {
+private:
+  unsigned int TypeID;
+
+protected:
+  MemoryBufferRef Data;
+
+  Binary(unsigned int Type, MemoryBufferRef Source);
+
+  enum {
+    ID_Archive,
+    ID_MachOUniversalBinary,
+    ID_COFFImportFile,
+    ID_IR,                 // LLVM IR
+
+    ID_WinRes, // Windows resource (.res) file.
+
+    // Object and children.
+    ID_StartObjects,
+    ID_COFF,
+
+    ID_ELF32L, // ELF 32-bit, little endian
+    ID_ELF32B, // ELF 32-bit, big endian
+    ID_ELF64L, // ELF 64-bit, little endian
+    ID_ELF64B, // ELF 64-bit, big endian
+
+    ID_MachO32L, // MachO 32-bit, little endian
+    ID_MachO32B, // MachO 32-bit, big endian
+    ID_MachO64L, // MachO 64-bit, little endian
+    ID_MachO64B, // MachO 64-bit, big endian
+
+    ID_Wasm,
+
+    ID_EndObjects
+  };
+
+  static inline unsigned int getELFType(bool isLE, bool is64Bits) {
+    if (isLE)
+      return is64Bits ? ID_ELF64L : ID_ELF32L;
+    else
+      return is64Bits ? ID_ELF64B : ID_ELF32B;
+  }
+
+  static unsigned int getMachOType(bool isLE, bool is64Bits) {
+    if (isLE)
+      return is64Bits ? ID_MachO64L : ID_MachO32L;
+    else
+      return is64Bits ? ID_MachO64B : ID_MachO32B;
+  }
+
+public:
+  Binary() = delete;
+  Binary(const Binary &other) = delete;
+  virtual ~Binary();
+
+  StringRef getData() const;
+  StringRef getFileName() const;
+  MemoryBufferRef getMemoryBufferRef() const;
+
+  // Cast methods.
+  unsigned int getType() const { return TypeID; }
+
+  // Convenience methods
+  bool isObject() const {
+    return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
+  }
+
+  bool isSymbolic() const { return isIR() || isObject() || isCOFFImportFile(); }
+
+  bool isArchive() const {
+    return TypeID == ID_Archive;
+  }
+
+  bool isMachOUniversalBinary() const {
+    return TypeID == ID_MachOUniversalBinary;
+  }
+
+  bool isELF() const {
+    return TypeID >= ID_ELF32L && TypeID <= ID_ELF64B;
+  }
+
+  bool isMachO() const {
+    return TypeID >= ID_MachO32L && TypeID <= ID_MachO64B;
+  }
+
+  bool isCOFF() const {
+    return TypeID == ID_COFF;
+  }
+
+  bool isWasm() const { return TypeID == ID_Wasm; }
+
+  bool isCOFFImportFile() const {
+    return TypeID == ID_COFFImportFile;
+  }
+
+  bool isIR() const {
+    return TypeID == ID_IR;
+  }
+
+  bool isLittleEndian() const {
+    return !(TypeID == ID_ELF32B || TypeID == ID_ELF64B ||
+             TypeID == ID_MachO32B || TypeID == ID_MachO64B);
+  }
+
+  bool isWinRes() const { return TypeID == ID_WinRes; }
+
+  Triple::ObjectFormatType getTripleObjectFormat() const {
+    if (isCOFF())
+      return Triple::COFF;
+    if (isMachO())
+      return Triple::MachO;
+    if (isELF())
+      return Triple::ELF;
+    return Triple::UnknownObjectFormat;
+  }
+
+  static std::error_code checkOffset(MemoryBufferRef M, uintptr_t Addr,
+                                     const uint64_t Size) {
+    if (Addr + Size < Addr || Addr + Size < Size ||
+        Addr + Size > uintptr_t(M.getBufferEnd()) ||
+        Addr < uintptr_t(M.getBufferStart())) {
+      return object_error::unexpected_eof;
+    }
+    return std::error_code();
+  }
+};
+
+/// @brief Create a Binary from Source, autodetecting the file type.
+///
+/// @param Source The data to create the Binary from.
+Expected<std::unique_ptr<Binary>> createBinary(MemoryBufferRef Source,
+                                               LLVMContext *Context = nullptr);
+
+template <typename T> class OwningBinary {
+  std::unique_ptr<T> Bin;
+  std::unique_ptr<MemoryBuffer> Buf;
+
+public:
+  OwningBinary();
+  OwningBinary(std::unique_ptr<T> Bin, std::unique_ptr<MemoryBuffer> Buf);
+  OwningBinary(OwningBinary<T>&& Other);
+  OwningBinary<T> &operator=(OwningBinary<T> &&Other);
+
+  std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>> takeBinary();
+
+  T* getBinary();
+  const T* getBinary() const;
+};
+
+template <typename T>
+OwningBinary<T>::OwningBinary(std::unique_ptr<T> Bin,
+                              std::unique_ptr<MemoryBuffer> Buf)
+    : Bin(std::move(Bin)), Buf(std::move(Buf)) {}
+
+template <typename T> OwningBinary<T>::OwningBinary() = default;
+
+template <typename T>
+OwningBinary<T>::OwningBinary(OwningBinary &&Other)
+    : Bin(std::move(Other.Bin)), Buf(std::move(Other.Buf)) {}
+
+template <typename T>
+OwningBinary<T> &OwningBinary<T>::operator=(OwningBinary &&Other) {
+  Bin = std::move(Other.Bin);
+  Buf = std::move(Other.Buf);
+  return *this;
+}
+
+template <typename T>
+std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>>
+OwningBinary<T>::takeBinary() {
+  return std::make_pair(std::move(Bin), std::move(Buf));
+}
+
+template <typename T> T* OwningBinary<T>::getBinary() {
+  return Bin.get();
+}
+
+template <typename T> const T* OwningBinary<T>::getBinary() const {
+  return Bin.get();
+}
+
+Expected<OwningBinary<Binary>> createBinary(StringRef Path);
+
+} // end namespace object
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_BINARY_H
diff --git a/linux-x64/clang/include/llvm/Object/COFF.h b/linux-x64/clang/include/llvm/Object/COFF.h
new file mode 100644
index 0000000..9190149
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/COFF.h
@@ -0,0 +1,1226 @@
+//===- COFF.h - COFF object file implementation -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the COFFObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_COFF_H
+#define LLVM_OBJECT_COFF_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/CVDebugRecord.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <system_error>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+
+namespace object {
+
+class BaseRelocRef;
+class DelayImportDirectoryEntryRef;
+class ExportDirectoryEntryRef;
+class ImportDirectoryEntryRef;
+class ImportedSymbolRef;
+class ResourceSectionRef;
+
+using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>;
+using delay_import_directory_iterator =
+    content_iterator<DelayImportDirectoryEntryRef>;
+using export_directory_iterator = content_iterator<ExportDirectoryEntryRef>;
+using imported_symbol_iterator = content_iterator<ImportedSymbolRef>;
+using base_reloc_iterator = content_iterator<BaseRelocRef>;
+
+/// The DOS compatible header at the front of all PE/COFF executables.
+struct dos_header {
+  char                 Magic[2];
+  support::ulittle16_t UsedBytesInTheLastPage;
+  support::ulittle16_t FileSizeInPages;
+  support::ulittle16_t NumberOfRelocationItems;
+  support::ulittle16_t HeaderSizeInParagraphs;
+  support::ulittle16_t MinimumExtraParagraphs;
+  support::ulittle16_t MaximumExtraParagraphs;
+  support::ulittle16_t InitialRelativeSS;
+  support::ulittle16_t InitialSP;
+  support::ulittle16_t Checksum;
+  support::ulittle16_t InitialIP;
+  support::ulittle16_t InitialRelativeCS;
+  support::ulittle16_t AddressOfRelocationTable;
+  support::ulittle16_t OverlayNumber;
+  support::ulittle16_t Reserved[4];
+  support::ulittle16_t OEMid;
+  support::ulittle16_t OEMinfo;
+  support::ulittle16_t Reserved2[10];
+  support::ulittle32_t AddressOfNewExeHeader;
+};
+
+struct coff_file_header {
+  support::ulittle16_t Machine;
+  support::ulittle16_t NumberOfSections;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle32_t PointerToSymbolTable;
+  support::ulittle32_t NumberOfSymbols;
+  support::ulittle16_t SizeOfOptionalHeader;
+  support::ulittle16_t Characteristics;
+
+  bool isImportLibrary() const { return NumberOfSections == 0xffff; }
+};
+
+struct coff_bigobj_file_header {
+  support::ulittle16_t Sig1;
+  support::ulittle16_t Sig2;
+  support::ulittle16_t Version;
+  support::ulittle16_t Machine;
+  support::ulittle32_t TimeDateStamp;
+  uint8_t              UUID[16];
+  support::ulittle32_t unused1;
+  support::ulittle32_t unused2;
+  support::ulittle32_t unused3;
+  support::ulittle32_t unused4;
+  support::ulittle32_t NumberOfSections;
+  support::ulittle32_t PointerToSymbolTable;
+  support::ulittle32_t NumberOfSymbols;
+};
+
+/// The 32-bit PE header that follows the COFF header.
+struct pe32_header {
+  support::ulittle16_t Magic;
+  uint8_t MajorLinkerVersion;
+  uint8_t MinorLinkerVersion;
+  support::ulittle32_t SizeOfCode;
+  support::ulittle32_t SizeOfInitializedData;
+  support::ulittle32_t SizeOfUninitializedData;
+  support::ulittle32_t AddressOfEntryPoint;
+  support::ulittle32_t BaseOfCode;
+  support::ulittle32_t BaseOfData;
+  support::ulittle32_t ImageBase;
+  support::ulittle32_t SectionAlignment;
+  support::ulittle32_t FileAlignment;
+  support::ulittle16_t MajorOperatingSystemVersion;
+  support::ulittle16_t MinorOperatingSystemVersion;
+  support::ulittle16_t MajorImageVersion;
+  support::ulittle16_t MinorImageVersion;
+  support::ulittle16_t MajorSubsystemVersion;
+  support::ulittle16_t MinorSubsystemVersion;
+  support::ulittle32_t Win32VersionValue;
+  support::ulittle32_t SizeOfImage;
+  support::ulittle32_t SizeOfHeaders;
+  support::ulittle32_t CheckSum;
+  support::ulittle16_t Subsystem;
+  // FIXME: This should be DllCharacteristics.
+  support::ulittle16_t DLLCharacteristics;
+  support::ulittle32_t SizeOfStackReserve;
+  support::ulittle32_t SizeOfStackCommit;
+  support::ulittle32_t SizeOfHeapReserve;
+  support::ulittle32_t SizeOfHeapCommit;
+  support::ulittle32_t LoaderFlags;
+  // FIXME: This should be NumberOfRvaAndSizes.
+  support::ulittle32_t NumberOfRvaAndSize;
+};
+
+/// The 64-bit PE header that follows the COFF header.
+struct pe32plus_header {
+  support::ulittle16_t Magic;
+  uint8_t MajorLinkerVersion;
+  uint8_t MinorLinkerVersion;
+  support::ulittle32_t SizeOfCode;
+  support::ulittle32_t SizeOfInitializedData;
+  support::ulittle32_t SizeOfUninitializedData;
+  support::ulittle32_t AddressOfEntryPoint;
+  support::ulittle32_t BaseOfCode;
+  support::ulittle64_t ImageBase;
+  support::ulittle32_t SectionAlignment;
+  support::ulittle32_t FileAlignment;
+  support::ulittle16_t MajorOperatingSystemVersion;
+  support::ulittle16_t MinorOperatingSystemVersion;
+  support::ulittle16_t MajorImageVersion;
+  support::ulittle16_t MinorImageVersion;
+  support::ulittle16_t MajorSubsystemVersion;
+  support::ulittle16_t MinorSubsystemVersion;
+  support::ulittle32_t Win32VersionValue;
+  support::ulittle32_t SizeOfImage;
+  support::ulittle32_t SizeOfHeaders;
+  support::ulittle32_t CheckSum;
+  support::ulittle16_t Subsystem;
+  support::ulittle16_t DLLCharacteristics;
+  support::ulittle64_t SizeOfStackReserve;
+  support::ulittle64_t SizeOfStackCommit;
+  support::ulittle64_t SizeOfHeapReserve;
+  support::ulittle64_t SizeOfHeapCommit;
+  support::ulittle32_t LoaderFlags;
+  support::ulittle32_t NumberOfRvaAndSize;
+};
+
+struct data_directory {
+  support::ulittle32_t RelativeVirtualAddress;
+  support::ulittle32_t Size;
+};
+
+struct debug_directory {
+  support::ulittle32_t Characteristics;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle16_t MajorVersion;
+  support::ulittle16_t MinorVersion;
+  support::ulittle32_t Type;
+  support::ulittle32_t SizeOfData;
+  support::ulittle32_t AddressOfRawData;
+  support::ulittle32_t PointerToRawData;
+};
+
+template <typename IntTy>
+struct import_lookup_table_entry {
+  IntTy Data;
+
+  bool isOrdinal() const { return Data < 0; }
+
+  uint16_t getOrdinal() const {
+    assert(isOrdinal() && "ILT entry is not an ordinal!");
+    return Data & 0xFFFF;
+  }
+
+  uint32_t getHintNameRVA() const {
+    assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
+    return Data & 0xFFFFFFFF;
+  }
+};
+
+using import_lookup_table_entry32 =
+    import_lookup_table_entry<support::little32_t>;
+using import_lookup_table_entry64 =
+    import_lookup_table_entry<support::little64_t>;
+
+struct delay_import_directory_table_entry {
+  // dumpbin reports this field as "Characteristics" instead of "Attributes".
+  support::ulittle32_t Attributes;
+  support::ulittle32_t Name;
+  support::ulittle32_t ModuleHandle;
+  support::ulittle32_t DelayImportAddressTable;
+  support::ulittle32_t DelayImportNameTable;
+  support::ulittle32_t BoundDelayImportTable;
+  support::ulittle32_t UnloadDelayImportTable;
+  support::ulittle32_t TimeStamp;
+};
+
+struct export_directory_table_entry {
+  support::ulittle32_t ExportFlags;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle16_t MajorVersion;
+  support::ulittle16_t MinorVersion;
+  support::ulittle32_t NameRVA;
+  support::ulittle32_t OrdinalBase;
+  support::ulittle32_t AddressTableEntries;
+  support::ulittle32_t NumberOfNamePointers;
+  support::ulittle32_t ExportAddressTableRVA;
+  support::ulittle32_t NamePointerRVA;
+  support::ulittle32_t OrdinalTableRVA;
+};
+
+union export_address_table_entry {
+  support::ulittle32_t ExportRVA;
+  support::ulittle32_t ForwarderRVA;
+};
+
+using export_name_pointer_table_entry = support::ulittle32_t;
+using export_ordinal_table_entry = support::ulittle16_t;
+
+struct StringTableOffset {
+  support::ulittle32_t Zeroes;
+  support::ulittle32_t Offset;
+};
+
+template <typename SectionNumberType>
+struct coff_symbol {
+  union {
+    char ShortName[COFF::NameSize];
+    StringTableOffset Offset;
+  } Name;
+
+  support::ulittle32_t Value;
+  SectionNumberType SectionNumber;
+
+  support::ulittle16_t Type;
+
+  uint8_t StorageClass;
+  uint8_t NumberOfAuxSymbols;
+};
+
+using coff_symbol16 = coff_symbol<support::ulittle16_t>;
+using coff_symbol32 = coff_symbol<support::ulittle32_t>;
+
+// Contains only common parts of coff_symbol16 and coff_symbol32.
+struct coff_symbol_generic {
+  union {
+    char ShortName[COFF::NameSize];
+    StringTableOffset Offset;
+  } Name;
+  support::ulittle32_t Value;
+};
+
+struct coff_aux_section_definition;
+
+class COFFSymbolRef {
+public:
+  COFFSymbolRef() = default;
+  COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS) {}
+  COFFSymbolRef(const coff_symbol32 *CS) : CS32(CS) {}
+
+  const void *getRawPtr() const {
+    return CS16 ? static_cast<const void *>(CS16) : CS32;
+  }
+
+  const coff_symbol_generic *getGeneric() const {
+    if (CS16)
+      return reinterpret_cast<const coff_symbol_generic *>(CS16);
+    return reinterpret_cast<const coff_symbol_generic *>(CS32);
+  }
+
+  friend bool operator<(COFFSymbolRef A, COFFSymbolRef B) {
+    return A.getRawPtr() < B.getRawPtr();
+  }
+
+  bool isBigObj() const {
+    if (CS16)
+      return false;
+    if (CS32)
+      return true;
+    llvm_unreachable("COFFSymbolRef points to nothing!");
+  }
+
+  const char *getShortName() const {
+    return CS16 ? CS16->Name.ShortName : CS32->Name.ShortName;
+  }
+
+  const StringTableOffset &getStringTableOffset() const {
+    assert(isSet() && "COFFSymbolRef points to nothing!");
+    return CS16 ? CS16->Name.Offset : CS32->Name.Offset;
+  }
+
+  uint32_t getValue() const { return CS16 ? CS16->Value : CS32->Value; }
+
+  int32_t getSectionNumber() const {
+    assert(isSet() && "COFFSymbolRef points to nothing!");
+    if (CS16) {
+      // Reserved sections are returned as negative numbers.
+      if (CS16->SectionNumber <= COFF::MaxNumberOfSections16)
+        return CS16->SectionNumber;
+      return static_cast<int16_t>(CS16->SectionNumber);
+    }
+    return static_cast<int32_t>(CS32->SectionNumber);
+  }
+
+  uint16_t getType() const {
+    assert(isSet() && "COFFSymbolRef points to nothing!");
+    return CS16 ? CS16->Type : CS32->Type;
+  }
+
+  uint8_t getStorageClass() const {
+    assert(isSet() && "COFFSymbolRef points to nothing!");
+    return CS16 ? CS16->StorageClass : CS32->StorageClass;
+  }
+
+  uint8_t getNumberOfAuxSymbols() const {
+    assert(isSet() && "COFFSymbolRef points to nothing!");
+    return CS16 ? CS16->NumberOfAuxSymbols : CS32->NumberOfAuxSymbols;
+  }
+
+  uint8_t getBaseType() const { return getType() & 0x0F; }
+
+  uint8_t getComplexType() const {
+    return (getType() & 0xF0) >> COFF::SCT_COMPLEX_TYPE_SHIFT;
+  }
+
+  template <typename T> const T *getAux() const {
+    return CS16 ? reinterpret_cast<const T *>(CS16 + 1)
+                : reinterpret_cast<const T *>(CS32 + 1);
+  }
+
+  const coff_aux_section_definition *getSectionDefinition() const {
+    if (!getNumberOfAuxSymbols() ||
+        getStorageClass() != COFF::IMAGE_SYM_CLASS_STATIC)
+      return nullptr;
+    return getAux<coff_aux_section_definition>();
+  }
+
+  bool isAbsolute() const {
+    return getSectionNumber() == -1;
+  }
+
+  bool isExternal() const {
+    return getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL;
+  }
+
+  bool isCommon() const {
+    return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
+           getValue() != 0;
+  }
+
+  bool isUndefined() const {
+    return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
+           getValue() == 0;
+  }
+
+  bool isWeakExternal() const {
+    return getStorageClass() == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
+  }
+
+  bool isFunctionDefinition() const {
+    return isExternal() && getBaseType() == COFF::IMAGE_SYM_TYPE_NULL &&
+           getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION &&
+           !COFF::isReservedSectionNumber(getSectionNumber());
+  }
+
+  bool isFunctionLineInfo() const {
+    return getStorageClass() == COFF::IMAGE_SYM_CLASS_FUNCTION;
+  }
+
+  bool isAnyUndefined() const {
+    return isUndefined() || isWeakExternal();
+  }
+
+  bool isFileRecord() const {
+    return getStorageClass() == COFF::IMAGE_SYM_CLASS_FILE;
+  }
+
+  bool isSection() const {
+    return getStorageClass() == COFF::IMAGE_SYM_CLASS_SECTION;
+  }
+
+  bool isSectionDefinition() const {
+    // C++/CLI creates external ABS symbols for non-const appdomain globals.
+    // These are also followed by an auxiliary section definition.
+    bool isAppdomainGlobal =
+        getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
+        getSectionNumber() == COFF::IMAGE_SYM_ABSOLUTE;
+    bool isOrdinarySection = getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC;
+    if (!getNumberOfAuxSymbols())
+      return false;
+    return isAppdomainGlobal || isOrdinarySection;
+  }
+
+  bool isCLRToken() const {
+    return getStorageClass() == COFF::IMAGE_SYM_CLASS_CLR_TOKEN;
+  }
+
+private:
+  bool isSet() const { return CS16 || CS32; }
+
+  const coff_symbol16 *CS16 = nullptr;
+  const coff_symbol32 *CS32 = nullptr;
+};
+
+struct coff_section {
+  char Name[COFF::NameSize];
+  support::ulittle32_t VirtualSize;
+  support::ulittle32_t VirtualAddress;
+  support::ulittle32_t SizeOfRawData;
+  support::ulittle32_t PointerToRawData;
+  support::ulittle32_t PointerToRelocations;
+  support::ulittle32_t PointerToLinenumbers;
+  support::ulittle16_t NumberOfRelocations;
+  support::ulittle16_t NumberOfLinenumbers;
+  support::ulittle32_t Characteristics;
+
+  // Returns true if the actual number of relocations is stored in
+  // VirtualAddress field of the first relocation table entry.
+  bool hasExtendedRelocations() const {
+    return (Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) &&
+           NumberOfRelocations == UINT16_MAX;
+  }
+
+  uint32_t getAlignment() const {
+    // The IMAGE_SCN_TYPE_NO_PAD bit is a legacy way of getting to
+    // IMAGE_SCN_ALIGN_1BYTES.
+    if (Characteristics & COFF::IMAGE_SCN_TYPE_NO_PAD)
+      return 1;
+
+    // Bit [20:24] contains section alignment. Both 0 and 1 mean alignment 1.
+    uint32_t Shift = (Characteristics >> 20) & 0xF;
+    if (Shift > 0)
+      return 1U << (Shift - 1);
+    return 1;
+  }
+};
+
+struct coff_relocation {
+  support::ulittle32_t VirtualAddress;
+  support::ulittle32_t SymbolTableIndex;
+  support::ulittle16_t Type;
+};
+
+struct coff_aux_function_definition {
+  support::ulittle32_t TagIndex;
+  support::ulittle32_t TotalSize;
+  support::ulittle32_t PointerToLinenumber;
+  support::ulittle32_t PointerToNextFunction;
+  char Unused1[2];
+};
+
+static_assert(sizeof(coff_aux_function_definition) == 18,
+              "auxiliary entry must be 18 bytes");
+
+struct coff_aux_bf_and_ef_symbol {
+  char Unused1[4];
+  support::ulittle16_t Linenumber;
+  char Unused2[6];
+  support::ulittle32_t PointerToNextFunction;
+  char Unused3[2];
+};
+
+static_assert(sizeof(coff_aux_bf_and_ef_symbol) == 18,
+              "auxiliary entry must be 18 bytes");
+
+struct coff_aux_weak_external {
+  support::ulittle32_t TagIndex;
+  support::ulittle32_t Characteristics;
+  char Unused1[10];
+};
+
+static_assert(sizeof(coff_aux_weak_external) == 18,
+              "auxiliary entry must be 18 bytes");
+
+struct coff_aux_section_definition {
+  support::ulittle32_t Length;
+  support::ulittle16_t NumberOfRelocations;
+  support::ulittle16_t NumberOfLinenumbers;
+  support::ulittle32_t CheckSum;
+  support::ulittle16_t NumberLowPart;
+  uint8_t              Selection;
+  uint8_t              Unused;
+  support::ulittle16_t NumberHighPart;
+  int32_t getNumber(bool IsBigObj) const {
+    uint32_t Number = static_cast<uint32_t>(NumberLowPart);
+    if (IsBigObj)
+      Number |= static_cast<uint32_t>(NumberHighPart) << 16;
+    return static_cast<int32_t>(Number);
+  }
+};
+
+static_assert(sizeof(coff_aux_section_definition) == 18,
+              "auxiliary entry must be 18 bytes");
+
+struct coff_aux_clr_token {
+  uint8_t              AuxType;
+  uint8_t              Reserved;
+  support::ulittle32_t SymbolTableIndex;
+  char                 MBZ[12];
+};
+
+static_assert(sizeof(coff_aux_clr_token) == 18,
+              "auxiliary entry must be 18 bytes");
+
+struct coff_import_header {
+  support::ulittle16_t Sig1;
+  support::ulittle16_t Sig2;
+  support::ulittle16_t Version;
+  support::ulittle16_t Machine;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle32_t SizeOfData;
+  support::ulittle16_t OrdinalHint;
+  support::ulittle16_t TypeInfo;
+
+  int getType() const { return TypeInfo & 0x3; }
+  int getNameType() const { return (TypeInfo >> 2) & 0x7; }
+};
+
+struct coff_import_directory_table_entry {
+  support::ulittle32_t ImportLookupTableRVA;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle32_t ForwarderChain;
+  support::ulittle32_t NameRVA;
+  support::ulittle32_t ImportAddressTableRVA;
+
+  bool isNull() const {
+    return ImportLookupTableRVA == 0 && TimeDateStamp == 0 &&
+           ForwarderChain == 0 && NameRVA == 0 && ImportAddressTableRVA == 0;
+  }
+};
+
+template <typename IntTy>
+struct coff_tls_directory {
+  IntTy StartAddressOfRawData;
+  IntTy EndAddressOfRawData;
+  IntTy AddressOfIndex;
+  IntTy AddressOfCallBacks;
+  support::ulittle32_t SizeOfZeroFill;
+  support::ulittle32_t Characteristics;
+
+  uint32_t getAlignment() const {
+    // Bit [20:24] contains section alignment.
+    uint32_t Shift = (Characteristics & 0x00F00000) >> 20;
+    if (Shift > 0)
+      return 1U << (Shift - 1);
+    return 0;
+  }
+};
+
+using coff_tls_directory32 = coff_tls_directory<support::little32_t>;
+using coff_tls_directory64 = coff_tls_directory<support::little64_t>;
+
+/// Bits in control flow guard flags as we understand them.
+enum class coff_guard_flags : uint32_t {
+  CFInstrumented = 0x00000100,
+  HasFidTable = 0x00000400,
+  ProtectDelayLoadIAT = 0x00001000,
+  DelayLoadIATSection = 0x00002000, // Delay load in separate section
+  HasLongJmpTable = 0x00010000,
+  FidTableHasFlags = 0x10000000, // Indicates that fid tables are 5 bytes
+};
+
+struct coff_load_config_code_integrity {
+  support::ulittle16_t Flags;
+  support::ulittle16_t Catalog;
+  support::ulittle32_t CatalogOffset;
+  support::ulittle32_t Reserved;
+};
+
+/// 32-bit load config (IMAGE_LOAD_CONFIG_DIRECTORY32)
+struct coff_load_configuration32 {
+  support::ulittle32_t Size;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle16_t MajorVersion;
+  support::ulittle16_t MinorVersion;
+  support::ulittle32_t GlobalFlagsClear;
+  support::ulittle32_t GlobalFlagsSet;
+  support::ulittle32_t CriticalSectionDefaultTimeout;
+  support::ulittle32_t DeCommitFreeBlockThreshold;
+  support::ulittle32_t DeCommitTotalFreeThreshold;
+  support::ulittle32_t LockPrefixTable;
+  support::ulittle32_t MaximumAllocationSize;
+  support::ulittle32_t VirtualMemoryThreshold;
+  support::ulittle32_t ProcessAffinityMask;
+  support::ulittle32_t ProcessHeapFlags;
+  support::ulittle16_t CSDVersion;
+  support::ulittle16_t DependentLoadFlags;
+  support::ulittle32_t EditList;
+  support::ulittle32_t SecurityCookie;
+  support::ulittle32_t SEHandlerTable;
+  support::ulittle32_t SEHandlerCount;
+
+  // Added in MSVC 2015 for /guard:cf.
+  support::ulittle32_t GuardCFCheckFunction;
+  support::ulittle32_t GuardCFCheckDispatch;
+  support::ulittle32_t GuardCFFunctionTable;
+  support::ulittle32_t GuardCFFunctionCount;
+  support::ulittle32_t GuardFlags; // coff_guard_flags
+
+  // Added in MSVC 2017
+  coff_load_config_code_integrity CodeIntegrity;
+  support::ulittle32_t GuardAddressTakenIatEntryTable;
+  support::ulittle32_t GuardAddressTakenIatEntryCount;
+  support::ulittle32_t GuardLongJumpTargetTable;
+  support::ulittle32_t GuardLongJumpTargetCount;
+  support::ulittle32_t DynamicValueRelocTable;
+  support::ulittle32_t CHPEMetadataPointer;
+  support::ulittle32_t GuardRFFailureRoutine;
+  support::ulittle32_t GuardRFFailureRoutineFunctionPointer;
+  support::ulittle32_t DynamicValueRelocTableOffset;
+  support::ulittle16_t DynamicValueRelocTableSection;
+  support::ulittle16_t Reserved2;
+  support::ulittle32_t GuardRFVerifyStackPointerFunctionPointer;
+  support::ulittle32_t HotPatchTableOffset;
+};
+
+/// 64-bit load config (IMAGE_LOAD_CONFIG_DIRECTORY64)
+struct coff_load_configuration64 {
+  support::ulittle32_t Size;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle16_t MajorVersion;
+  support::ulittle16_t MinorVersion;
+  support::ulittle32_t GlobalFlagsClear;
+  support::ulittle32_t GlobalFlagsSet;
+  support::ulittle32_t CriticalSectionDefaultTimeout;
+  support::ulittle64_t DeCommitFreeBlockThreshold;
+  support::ulittle64_t DeCommitTotalFreeThreshold;
+  support::ulittle64_t LockPrefixTable;
+  support::ulittle64_t MaximumAllocationSize;
+  support::ulittle64_t VirtualMemoryThreshold;
+  support::ulittle64_t ProcessAffinityMask;
+  support::ulittle32_t ProcessHeapFlags;
+  support::ulittle16_t CSDVersion;
+  support::ulittle16_t DependentLoadFlags;
+  support::ulittle64_t EditList;
+  support::ulittle64_t SecurityCookie;
+  support::ulittle64_t SEHandlerTable;
+  support::ulittle64_t SEHandlerCount;
+
+  // Added in MSVC 2015 for /guard:cf.
+  support::ulittle64_t GuardCFCheckFunction;
+  support::ulittle64_t GuardCFCheckDispatch;
+  support::ulittle64_t GuardCFFunctionTable;
+  support::ulittle64_t GuardCFFunctionCount;
+  support::ulittle32_t GuardFlags;
+
+  // Added in MSVC 2017
+  coff_load_config_code_integrity CodeIntegrity;
+  support::ulittle64_t GuardAddressTakenIatEntryTable;
+  support::ulittle64_t GuardAddressTakenIatEntryCount;
+  support::ulittle64_t GuardLongJumpTargetTable;
+  support::ulittle64_t GuardLongJumpTargetCount;
+  support::ulittle64_t DynamicValueRelocTable;
+  support::ulittle64_t CHPEMetadataPointer;
+  support::ulittle64_t GuardRFFailureRoutine;
+  support::ulittle64_t GuardRFFailureRoutineFunctionPointer;
+  support::ulittle32_t DynamicValueRelocTableOffset;
+  support::ulittle16_t DynamicValueRelocTableSection;
+  support::ulittle16_t Reserved2;
+  support::ulittle64_t GuardRFVerifyStackPointerFunctionPointer;
+  support::ulittle32_t HotPatchTableOffset;
+};
+
+struct coff_runtime_function_x64 {
+  support::ulittle32_t BeginAddress;
+  support::ulittle32_t EndAddress;
+  support::ulittle32_t UnwindInformation;
+};
+
+struct coff_base_reloc_block_header {
+  support::ulittle32_t PageRVA;
+  support::ulittle32_t BlockSize;
+};
+
+struct coff_base_reloc_block_entry {
+  support::ulittle16_t Data;
+
+  int getType() const { return Data >> 12; }
+  int getOffset() const { return Data & ((1 << 12) - 1); }
+};
+
+struct coff_resource_dir_entry {
+  union {
+    support::ulittle32_t NameOffset;
+    support::ulittle32_t ID;
+    uint32_t getNameOffset() const {
+      return maskTrailingOnes<uint32_t>(31) & NameOffset;
+    }
+    // Even though the PE/COFF spec doesn't mention this, the high bit of a name
+    // offset is set.
+    void setNameOffset(uint32_t Offset) { NameOffset = Offset | (1 << 31); }
+  } Identifier;
+  union {
+    support::ulittle32_t DataEntryOffset;
+    support::ulittle32_t SubdirOffset;
+
+    bool isSubDir() const { return SubdirOffset >> 31; }
+    uint32_t value() const {
+      return maskTrailingOnes<uint32_t>(31) & SubdirOffset;
+    }
+
+  } Offset;
+};
+
+struct coff_resource_data_entry {
+  support::ulittle32_t DataRVA;
+  support::ulittle32_t DataSize;
+  support::ulittle32_t Codepage;
+  support::ulittle32_t Reserved;
+};
+
+struct coff_resource_dir_table {
+  support::ulittle32_t Characteristics;
+  support::ulittle32_t TimeDateStamp;
+  support::ulittle16_t MajorVersion;
+  support::ulittle16_t MinorVersion;
+  support::ulittle16_t NumberOfNameEntries;
+  support::ulittle16_t NumberOfIDEntries;
+};
+
+struct debug_h_header {
+  support::ulittle32_t Magic;
+  support::ulittle16_t Version;
+  support::ulittle16_t HashAlgorithm;
+};
+
+class COFFObjectFile : public ObjectFile {
+private:
+  friend class ImportDirectoryEntryRef;
+  friend class ExportDirectoryEntryRef;
+  const coff_file_header *COFFHeader;
+  const coff_bigobj_file_header *COFFBigObjHeader;
+  const pe32_header *PE32Header;
+  const pe32plus_header *PE32PlusHeader;
+  const data_directory *DataDirectory;
+  const coff_section *SectionTable;
+  const coff_symbol16 *SymbolTable16;
+  const coff_symbol32 *SymbolTable32;
+  const char *StringTable;
+  uint32_t StringTableSize;
+  const coff_import_directory_table_entry *ImportDirectory;
+  const delay_import_directory_table_entry *DelayImportDirectory;
+  uint32_t NumberOfDelayImportDirectory;
+  const export_directory_table_entry *ExportDirectory;
+  const coff_base_reloc_block_header *BaseRelocHeader;
+  const coff_base_reloc_block_header *BaseRelocEnd;
+  const debug_directory *DebugDirectoryBegin;
+  const debug_directory *DebugDirectoryEnd;
+  // Either coff_load_configuration32 or coff_load_configuration64.
+  const void *LoadConfig = nullptr;
+
+  std::error_code getString(uint32_t offset, StringRef &Res) const;
+
+  template <typename coff_symbol_type>
+  const coff_symbol_type *toSymb(DataRefImpl Symb) const;
+  const coff_section *toSec(DataRefImpl Sec) const;
+  const coff_relocation *toRel(DataRefImpl Rel) const;
+
+  std::error_code initSymbolTablePtr();
+  std::error_code initImportTablePtr();
+  std::error_code initDelayImportTablePtr();
+  std::error_code initExportTablePtr();
+  std::error_code initBaseRelocPtr();
+  std::error_code initDebugDirectoryPtr();
+  std::error_code initLoadConfigPtr();
+
+public:
+  uintptr_t getSymbolTable() const {
+    if (SymbolTable16)
+      return reinterpret_cast<uintptr_t>(SymbolTable16);
+    if (SymbolTable32)
+      return reinterpret_cast<uintptr_t>(SymbolTable32);
+    return uintptr_t(0);
+  }
+
+  uint16_t getMachine() const {
+    if (COFFHeader)
+      return COFFHeader->Machine;
+    if (COFFBigObjHeader)
+      return COFFBigObjHeader->Machine;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint16_t getSizeOfOptionalHeader() const {
+    if (COFFHeader)
+      return COFFHeader->isImportLibrary() ? 0
+                                           : COFFHeader->SizeOfOptionalHeader;
+    // bigobj doesn't have this field.
+    if (COFFBigObjHeader)
+      return 0;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint16_t getCharacteristics() const {
+    if (COFFHeader)
+      return COFFHeader->isImportLibrary() ? 0 : COFFHeader->Characteristics;
+    // bigobj doesn't have characteristics to speak of,
+    // editbin will silently lie to you if you attempt to set any.
+    if (COFFBigObjHeader)
+      return 0;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint32_t getTimeDateStamp() const {
+    if (COFFHeader)
+      return COFFHeader->TimeDateStamp;
+    if (COFFBigObjHeader)
+      return COFFBigObjHeader->TimeDateStamp;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint32_t getNumberOfSections() const {
+    if (COFFHeader)
+      return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSections;
+    if (COFFBigObjHeader)
+      return COFFBigObjHeader->NumberOfSections;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint32_t getPointerToSymbolTable() const {
+    if (COFFHeader)
+      return COFFHeader->isImportLibrary() ? 0
+                                           : COFFHeader->PointerToSymbolTable;
+    if (COFFBigObjHeader)
+      return COFFBigObjHeader->PointerToSymbolTable;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint32_t getRawNumberOfSymbols() const {
+    if (COFFHeader)
+      return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols;
+    if (COFFBigObjHeader)
+      return COFFBigObjHeader->NumberOfSymbols;
+    llvm_unreachable("no COFF header!");
+  }
+
+  uint32_t getNumberOfSymbols() const {
+    if (!SymbolTable16 && !SymbolTable32)
+      return 0;
+    return getRawNumberOfSymbols();
+  }
+
+  const coff_load_configuration32 *getLoadConfig32() const {
+    assert(!is64());
+    return reinterpret_cast<const coff_load_configuration32 *>(LoadConfig);
+  }
+
+  const coff_load_configuration64 *getLoadConfig64() const {
+    assert(is64());
+    return reinterpret_cast<const coff_load_configuration64 *>(LoadConfig);
+  }
+
+protected:
+  void moveSymbolNext(DataRefImpl &Symb) const override;
+  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
+  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+  uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
+  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+  void moveSectionNext(DataRefImpl &Sec) const override;
+  std::error_code getSectionName(DataRefImpl Sec,
+                                 StringRef &Res) const override;
+  uint64_t getSectionAddress(DataRefImpl Sec) const override;
+  uint64_t getSectionIndex(DataRefImpl Sec) const override;
+  uint64_t getSectionSize(DataRefImpl Sec) const override;
+  std::error_code getSectionContents(DataRefImpl Sec,
+                                     StringRef &Res) const override;
+  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+  bool isSectionCompressed(DataRefImpl Sec) const override;
+  bool isSectionText(DataRefImpl Sec) const override;
+  bool isSectionData(DataRefImpl Sec) const override;
+  bool isSectionBSS(DataRefImpl Sec) const override;
+  bool isSectionVirtual(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+
+  void moveRelocationNext(DataRefImpl &Rel) const override;
+  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+  uint64_t getRelocationType(DataRefImpl Rel) const override;
+  void getRelocationTypeName(DataRefImpl Rel,
+                             SmallVectorImpl<char> &Result) const override;
+
+public:
+  COFFObjectFile(MemoryBufferRef Object, std::error_code &EC);
+
+  basic_symbol_iterator symbol_begin() const override;
+  basic_symbol_iterator symbol_end() const override;
+  section_iterator section_begin() const override;
+  section_iterator section_end() const override;
+
+  const coff_section *getCOFFSection(const SectionRef &Section) const;
+  COFFSymbolRef getCOFFSymbol(const DataRefImpl &Ref) const;
+  COFFSymbolRef getCOFFSymbol(const SymbolRef &Symbol) const;
+  const coff_relocation *getCOFFRelocation(const RelocationRef &Reloc) const;
+  unsigned getSectionID(SectionRef Sec) const;
+  unsigned getSymbolSectionID(SymbolRef Sym) const;
+
+  uint8_t getBytesInAddress() const override;
+  StringRef getFileFormatName() const override;
+  Triple::ArchType getArch() const override;
+  SubtargetFeatures getFeatures() const override { return SubtargetFeatures(); }
+
+  import_directory_iterator import_directory_begin() const;
+  import_directory_iterator import_directory_end() const;
+  delay_import_directory_iterator delay_import_directory_begin() const;
+  delay_import_directory_iterator delay_import_directory_end() const;
+  export_directory_iterator export_directory_begin() const;
+  export_directory_iterator export_directory_end() const;
+  base_reloc_iterator base_reloc_begin() const;
+  base_reloc_iterator base_reloc_end() const;
+  const debug_directory *debug_directory_begin() const {
+    return DebugDirectoryBegin;
+  }
+  const debug_directory *debug_directory_end() const {
+    return DebugDirectoryEnd;
+  }
+
+  iterator_range<import_directory_iterator> import_directories() const;
+  iterator_range<delay_import_directory_iterator>
+      delay_import_directories() const;
+  iterator_range<export_directory_iterator> export_directories() const;
+  iterator_range<base_reloc_iterator> base_relocs() const;
+  iterator_range<const debug_directory *> debug_directories() const {
+    return make_range(debug_directory_begin(), debug_directory_end());
+  }
+
+  const dos_header *getDOSHeader() const {
+    if (!PE32Header && !PE32PlusHeader)
+      return nullptr;
+    return reinterpret_cast<const dos_header *>(base());
+  }
+  std::error_code getPE32Header(const pe32_header *&Res) const;
+  std::error_code getPE32PlusHeader(const pe32plus_header *&Res) const;
+  std::error_code getDataDirectory(uint32_t index,
+                                   const data_directory *&Res) const;
+  std::error_code getSection(int32_t index, const coff_section *&Res) const;
+
+  template <typename coff_symbol_type>
+  std::error_code getSymbol(uint32_t Index,
+                            const coff_symbol_type *&Res) const {
+    if (Index >= getNumberOfSymbols())
+      return object_error::parse_failed;
+
+    Res = reinterpret_cast<coff_symbol_type *>(getSymbolTable()) + Index;
+    return std::error_code();
+  }
+  Expected<COFFSymbolRef> getSymbol(uint32_t index) const {
+    if (SymbolTable16) {
+      const coff_symbol16 *Symb = nullptr;
+      if (std::error_code EC = getSymbol(index, Symb))
+        return errorCodeToError(EC);
+      return COFFSymbolRef(Symb);
+    }
+    if (SymbolTable32) {
+      const coff_symbol32 *Symb = nullptr;
+      if (std::error_code EC = getSymbol(index, Symb))
+        return errorCodeToError(EC);
+      return COFFSymbolRef(Symb);
+    }
+    return errorCodeToError(object_error::parse_failed);
+  }
+
+  template <typename T>
+  std::error_code getAuxSymbol(uint32_t index, const T *&Res) const {
+    Expected<COFFSymbolRef> S = getSymbol(index);
+    if (Error E = S.takeError())
+      return errorToErrorCode(std::move(E));
+    Res = reinterpret_cast<const T *>(S->getRawPtr());
+    return std::error_code();
+  }
+
+  std::error_code getSymbolName(COFFSymbolRef Symbol, StringRef &Res) const;
+  std::error_code getSymbolName(const coff_symbol_generic *Symbol,
+                                StringRef &Res) const;
+
+  ArrayRef<uint8_t> getSymbolAuxData(COFFSymbolRef Symbol) const;
+
+  size_t getSymbolTableEntrySize() const {
+    if (COFFHeader)
+      return sizeof(coff_symbol16);
+    if (COFFBigObjHeader)
+      return sizeof(coff_symbol32);
+    llvm_unreachable("null symbol table pointer!");
+  }
+
+  iterator_range<const coff_relocation *>
+  getRelocations(const coff_section *Sec) const;
+
+  std::error_code getSectionName(const coff_section *Sec, StringRef &Res) const;
+  uint64_t getSectionSize(const coff_section *Sec) const;
+  std::error_code getSectionContents(const coff_section *Sec,
+                                     ArrayRef<uint8_t> &Res) const;
+
+  uint64_t getImageBase() const;
+  std::error_code getVaPtr(uint64_t VA, uintptr_t &Res) const;
+  std::error_code getRvaPtr(uint32_t Rva, uintptr_t &Res) const;
+
+  /// Given an RVA base and size, returns a valid array of bytes or an error
+  /// code if the RVA and size is not contained completely within a valid
+  /// section.
+  std::error_code getRvaAndSizeAsBytes(uint32_t RVA, uint32_t Size,
+                                       ArrayRef<uint8_t> &Contents) const;
+
+  std::error_code getHintName(uint32_t Rva, uint16_t &Hint,
+                              StringRef &Name) const;
+
+  /// Get PDB information out of a codeview debug directory entry.
+  std::error_code getDebugPDBInfo(const debug_directory *DebugDir,
+                                  const codeview::DebugInfo *&Info,
+                                  StringRef &PDBFileName) const;
+
+  /// Get PDB information from an executable. If the information is not present,
+  /// Info will be set to nullptr and PDBFileName will be empty. An error is
+  /// returned only on corrupt object files. Convenience accessor that can be
+  /// used if the debug directory is not already handy.
+  std::error_code getDebugPDBInfo(const codeview::DebugInfo *&Info,
+                                  StringRef &PDBFileName) const;
+
+  bool isRelocatableObject() const override;
+  bool is64() const { return PE32PlusHeader; }
+
+  static bool classof(const Binary *v) { return v->isCOFF(); }
+};
+
+// The iterator for the import directory table.
+class ImportDirectoryEntryRef {
+public:
+  ImportDirectoryEntryRef() = default;
+  ImportDirectoryEntryRef(const coff_import_directory_table_entry *Table,
+                          uint32_t I, const COFFObjectFile *Owner)
+      : ImportTable(Table), Index(I), OwningObject(Owner) {}
+
+  bool operator==(const ImportDirectoryEntryRef &Other) const;
+  void moveNext();
+
+  imported_symbol_iterator imported_symbol_begin() const;
+  imported_symbol_iterator imported_symbol_end() const;
+  iterator_range<imported_symbol_iterator> imported_symbols() const;
+
+  imported_symbol_iterator lookup_table_begin() const;
+  imported_symbol_iterator lookup_table_end() const;
+  iterator_range<imported_symbol_iterator> lookup_table_symbols() const;
+
+  std::error_code getName(StringRef &Result) const;
+  std::error_code getImportLookupTableRVA(uint32_t &Result) const;
+  std::error_code getImportAddressTableRVA(uint32_t &Result) const;
+
+  std::error_code
+  getImportTableEntry(const coff_import_directory_table_entry *&Result) const;
+
+private:
+  const coff_import_directory_table_entry *ImportTable;
+  uint32_t Index;
+  const COFFObjectFile *OwningObject = nullptr;
+};
+
+class DelayImportDirectoryEntryRef {
+public:
+  DelayImportDirectoryEntryRef() = default;
+  DelayImportDirectoryEntryRef(const delay_import_directory_table_entry *T,
+                               uint32_t I, const COFFObjectFile *Owner)
+      : Table(T), Index(I), OwningObject(Owner) {}
+
+  bool operator==(const DelayImportDirectoryEntryRef &Other) const;
+  void moveNext();
+
+  imported_symbol_iterator imported_symbol_begin() const;
+  imported_symbol_iterator imported_symbol_end() const;
+  iterator_range<imported_symbol_iterator> imported_symbols() const;
+
+  std::error_code getName(StringRef &Result) const;
+  std::error_code getDelayImportTable(
+      const delay_import_directory_table_entry *&Result) const;
+  std::error_code getImportAddress(int AddrIndex, uint64_t &Result) const;
+
+private:
+  const delay_import_directory_table_entry *Table;
+  uint32_t Index;
+  const COFFObjectFile *OwningObject = nullptr;
+};
+
+// The iterator for the export directory table entry.
+class ExportDirectoryEntryRef {
+public:
+  ExportDirectoryEntryRef() = default;
+  ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I,
+                          const COFFObjectFile *Owner)
+      : ExportTable(Table), Index(I), OwningObject(Owner) {}
+
+  bool operator==(const ExportDirectoryEntryRef &Other) const;
+  void moveNext();
+
+  std::error_code getDllName(StringRef &Result) const;
+  std::error_code getOrdinalBase(uint32_t &Result) const;
+  std::error_code getOrdinal(uint32_t &Result) const;
+  std::error_code getExportRVA(uint32_t &Result) const;
+  std::error_code getSymbolName(StringRef &Result) const;
+
+  std::error_code isForwarder(bool &Result) const;
+  std::error_code getForwardTo(StringRef &Result) const;
+
+private:
+  const export_directory_table_entry *ExportTable;
+  uint32_t Index;
+  const COFFObjectFile *OwningObject = nullptr;
+};
+
+class ImportedSymbolRef {
+public:
+  ImportedSymbolRef() = default;
+  ImportedSymbolRef(const import_lookup_table_entry32 *Entry, uint32_t I,
+                    const COFFObjectFile *Owner)
+      : Entry32(Entry), Entry64(nullptr), Index(I), OwningObject(Owner) {}
+  ImportedSymbolRef(const import_lookup_table_entry64 *Entry, uint32_t I,
+                    const COFFObjectFile *Owner)
+      : Entry32(nullptr), Entry64(Entry), Index(I), OwningObject(Owner) {}
+
+  bool operator==(const ImportedSymbolRef &Other) const;
+  void moveNext();
+
+  std::error_code getSymbolName(StringRef &Result) const;
+  std::error_code isOrdinal(bool &Result) const;
+  std::error_code getOrdinal(uint16_t &Result) const;
+  std::error_code getHintNameRVA(uint32_t &Result) const;
+
+private:
+  const import_lookup_table_entry32 *Entry32;
+  const import_lookup_table_entry64 *Entry64;
+  uint32_t Index;
+  const COFFObjectFile *OwningObject = nullptr;
+};
+
+class BaseRelocRef {
+public:
+  BaseRelocRef() = default;
+  BaseRelocRef(const coff_base_reloc_block_header *Header,
+               const COFFObjectFile *Owner)
+      : Header(Header), Index(0) {}
+
+  bool operator==(const BaseRelocRef &Other) const;
+  void moveNext();
+
+  std::error_code getType(uint8_t &Type) const;
+  std::error_code getRVA(uint32_t &Result) const;
+
+private:
+  const coff_base_reloc_block_header *Header;
+  uint32_t Index;
+};
+
+class ResourceSectionRef {
+public:
+  ResourceSectionRef() = default;
+  explicit ResourceSectionRef(StringRef Ref) : BBS(Ref, support::little) {}
+
+  Expected<ArrayRef<UTF16>>
+  getEntryNameString(const coff_resource_dir_entry &Entry);
+  Expected<const coff_resource_dir_table &>
+  getEntrySubDir(const coff_resource_dir_entry &Entry);
+  Expected<const coff_resource_dir_table &> getBaseTable();
+
+private:
+  BinaryByteStream BBS;
+
+  Expected<const coff_resource_dir_table &> getTableAtOffset(uint32_t Offset);
+  Expected<ArrayRef<UTF16>> getDirStringAtOffset(uint32_t Offset);
+};
+
+// Corresponds to `_FPO_DATA` structure in the PE/COFF spec.
+struct FpoData {
+  support::ulittle32_t Offset; // ulOffStart: Offset 1st byte of function code
+  support::ulittle32_t Size;   // cbProcSize: # bytes in function
+  support::ulittle32_t NumLocals; // cdwLocals: # bytes in locals/4
+  support::ulittle16_t NumParams; // cdwParams: # bytes in params/4
+  support::ulittle16_t Attributes;
+
+  // cbProlog: # bytes in prolog
+  int getPrologSize() const { return Attributes & 0xF; }
+
+  // cbRegs: # regs saved
+  int getNumSavedRegs() const { return (Attributes >> 8) & 0x7; }
+
+  // fHasSEH: true if seh is func
+  bool hasSEH() const { return (Attributes >> 9) & 1; }
+
+  // fUseBP: true if EBP has been allocated
+  bool useBP() const { return (Attributes >> 10) & 1; }
+
+  // cbFrame: frame pointer
+  int getFP() const { return Attributes >> 14; }
+};
+
+} // end namespace object
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_COFF_H
diff --git a/linux-x64/clang/include/llvm/Object/COFFImportFile.h b/linux-x64/clang/include/llvm/Object/COFFImportFile.h
new file mode 100644
index 0000000..7ca416f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/COFFImportFile.h
@@ -0,0 +1,107 @@
+//===- COFFImportFile.h - COFF short import file implementation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF short import file is a special kind of file which contains
+// only symbol names for DLL-exported symbols. This class implements
+// exporting of Symbols to create libraries and a SymbolicFile
+// interface for the file type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_COFF_IMPORT_FILE_H
+#define LLVM_OBJECT_COFF_IMPORT_FILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace object {
+
+class COFFImportFile : public SymbolicFile {
+public:
+  COFFImportFile(MemoryBufferRef Source)
+      : SymbolicFile(ID_COFFImportFile, Source) {}
+
+  static bool classof(Binary const *V) { return V->isCOFFImportFile(); }
+
+  void moveSymbolNext(DataRefImpl &Symb) const override { ++Symb.p; }
+
+  std::error_code printSymbolName(raw_ostream &OS,
+                                  DataRefImpl Symb) const override {
+    if (Symb.p == 0)
+      OS << "__imp_";
+    OS << StringRef(Data.getBufferStart() + sizeof(coff_import_header));
+    return std::error_code();
+  }
+
+  uint32_t getSymbolFlags(DataRefImpl Symb) const override {
+    return SymbolRef::SF_Global;
+  }
+
+  basic_symbol_iterator symbol_begin() const override {
+    return BasicSymbolRef(DataRefImpl(), this);
+  }
+
+  basic_symbol_iterator symbol_end() const override {
+    DataRefImpl Symb;
+    Symb.p = isData() ? 1 : 2;
+    return BasicSymbolRef(Symb, this);
+  }
+
+  const coff_import_header *getCOFFImportHeader() const {
+    return reinterpret_cast<const object::coff_import_header *>(
+        Data.getBufferStart());
+  }
+
+private:
+  bool isData() const {
+    return getCOFFImportHeader()->getType() == COFF::IMPORT_DATA;
+  }
+};
+
+struct COFFShortExport {
+  std::string Name;
+  std::string ExtName;
+  std::string SymbolName;
+
+  uint16_t Ordinal = 0;
+  bool Noname = false;
+  bool Data = false;
+  bool Private = false;
+  bool Constant = false;
+
+  bool isWeak() {
+    return ExtName.size() && ExtName != Name;
+  }
+
+  friend bool operator==(const COFFShortExport &L, const COFFShortExport &R) {
+    return L.Name == R.Name && L.ExtName == R.ExtName &&
+            L.Ordinal == R.Ordinal && L.Noname == R.Noname &&
+            L.Data == R.Data && L.Private == R.Private;
+  }
+
+  friend bool operator!=(const COFFShortExport &L, const COFFShortExport &R) {
+    return !(L == R);
+  }
+};
+
+Error writeImportLibrary(StringRef ImportName, StringRef Path,
+                         ArrayRef<COFFShortExport> Exports,
+                         COFF::MachineTypes Machine, bool MakeWeakAliases,
+                         bool MinGW);
+
+} // namespace object
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/COFFModuleDefinition.h b/linux-x64/clang/include/llvm/Object/COFFModuleDefinition.h
new file mode 100644
index 0000000..be139a2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/COFFModuleDefinition.h
@@ -0,0 +1,53 @@
+//===--- COFFModuleDefinition.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Windows-specific.
+// A parser for the module-definition file (.def file).
+// Parsed results are directly written to Config global variable.
+//
+// The format of module-definition files are described in this document:
+// https://msdn.microsoft.com/en-us/library/28d6s79h.aspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_COFF_MODULE_DEFINITION_H
+#define LLVM_OBJECT_COFF_MODULE_DEFINITION_H
+
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/COFFImportFile.h"
+
+namespace llvm {
+namespace object {
+
+struct COFFModuleDefinition {
+  std::vector<COFFShortExport> Exports;
+  std::string OutputFile;
+  std::string ImportName;
+  uint64_t ImageBase = 0;
+  uint64_t StackReserve = 0;
+  uint64_t StackCommit = 0;
+  uint64_t HeapReserve = 0;
+  uint64_t HeapCommit = 0;
+  uint32_t MajorImageVersion = 0;
+  uint32_t MinorImageVersion = 0;
+  uint32_t MajorOSVersion = 0;
+  uint32_t MinorOSVersion = 0;
+};
+
+// mingw and wine def files do not mangle _ for x86 which
+// is a consequence of legacy binutils' dlltool functionality.
+// This MingwDef flag should be removed once mingw stops this pratice.
+Expected<COFFModuleDefinition>
+parseCOFFModuleDefinition(MemoryBufferRef MB, COFF::MachineTypes Machine,
+                          bool MingwDef = false);
+
+} // End namespace object.
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/CVDebugRecord.h b/linux-x64/clang/include/llvm/Object/CVDebugRecord.h
new file mode 100644
index 0000000..faad72c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/CVDebugRecord.h
@@ -0,0 +1,55 @@
+//===- CVDebugRecord.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_CVDEBUGRECORD_H
+#define LLVM_OBJECT_CVDEBUGRECORD_H
+
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace OMF {
+struct Signature {
+  enum ID : uint32_t {
+    PDB70 = 0x53445352, // RSDS
+    PDB20 = 0x3031424e, // NB10
+    CV50 = 0x3131424e,  // NB11
+    CV41 = 0x3930424e,  // NB09
+  };
+
+  support::ulittle32_t CVSignature;
+  support::ulittle32_t Offset;
+};
+}
+
+namespace codeview {
+struct PDB70DebugInfo {
+  support::ulittle32_t CVSignature;
+  uint8_t Signature[16];
+  support::ulittle32_t Age;
+  // char PDBFileName[];
+};
+
+struct PDB20DebugInfo {
+  support::ulittle32_t CVSignature;
+  support::ulittle32_t Offset;
+  support::ulittle32_t Signature;
+  support::ulittle32_t Age;
+  // char PDBFileName[];
+};
+
+union DebugInfo {
+  struct OMF::Signature Signature;
+  struct PDB20DebugInfo PDB20;
+  struct PDB70DebugInfo PDB70;
+};
+}
+}
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Object/Decompressor.h b/linux-x64/clang/include/llvm/Object/Decompressor.h
new file mode 100644
index 0000000..c8e888d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/Decompressor.h
@@ -0,0 +1,67 @@
+//===-- Decompressor.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_OBJECT_DECOMPRESSOR_H
+#define LLVM_OBJECT_DECOMPRESSOR_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/ObjectFile.h"
+
+namespace llvm {
+namespace object {
+
+/// @brief Decompressor helps to handle decompression of compressed sections.
+class Decompressor {
+public:
+  /// @brief Create decompressor object.
+  /// @param Name        Section name.
+  /// @param Data        Section content.
+  /// @param IsLE        Flag determines if Data is in little endian form.
+  /// @param Is64Bit     Flag determines if object is 64 bit.
+  static Expected<Decompressor> create(StringRef Name, StringRef Data,
+                                       bool IsLE, bool Is64Bit);
+
+  /// @brief Resize the buffer and uncompress section data into it.
+  /// @param Out         Destination buffer.
+  template <class T> Error resizeAndDecompress(T &Out) {
+    Out.resize(DecompressedSize);
+    return decompress({Out.data(), (size_t)DecompressedSize});
+  }
+
+  /// @brief Uncompress section data to raw buffer provided.
+  /// @param Buffer      Destination buffer.
+  Error decompress(MutableArrayRef<char> Buffer);
+
+  /// @brief Return memory buffer size required for decompression.
+  uint64_t getDecompressedSize() { return DecompressedSize; }
+
+  /// @brief Return true if section is compressed, including gnu-styled case.
+  static bool isCompressed(const object::SectionRef &Section);
+
+  /// @brief Return true if section is a ELF compressed one.
+  static bool isCompressedELFSection(uint64_t Flags, StringRef Name);
+
+  /// @brief Return true if section name matches gnu style compressed one.
+  static bool isGnuStyle(StringRef Name);
+
+private:
+  Decompressor(StringRef Data);
+
+  Error consumeCompressedGnuHeader();
+  Error consumeCompressedZLibHeader(bool Is64Bit, bool IsLittleEndian);
+
+  StringRef SectionData;
+  uint64_t DecompressedSize;
+};
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_DECOMPRESSOR_H
diff --git a/linux-x64/clang/include/llvm/Object/ELF.h b/linux-x64/clang/include/llvm/Object/ELF.h
new file mode 100644
index 0000000..46504e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/ELF.h
@@ -0,0 +1,611 @@
+//===- ELF.h - ELF object file implementation -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ELFFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELF_H
+#define LLVM_OBJECT_ELF_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+namespace object {
+
+StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type);
+StringRef getELFSectionTypeName(uint32_t Machine, uint32_t Type);
+
+// Subclasses of ELFFile may need this for template instantiation
+inline std::pair<unsigned char, unsigned char>
+getElfArchType(StringRef Object) {
+  if (Object.size() < ELF::EI_NIDENT)
+    return std::make_pair((uint8_t)ELF::ELFCLASSNONE,
+                          (uint8_t)ELF::ELFDATANONE);
+  return std::make_pair((uint8_t)Object[ELF::EI_CLASS],
+                        (uint8_t)Object[ELF::EI_DATA]);
+}
+
+static inline Error createError(StringRef Err) {
+  return make_error<StringError>(Err, object_error::parse_failed);
+}
+
+template <class ELFT>
+class ELFFile {
+public:
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  using uintX_t = typename ELFT::uint;
+  using Elf_Ehdr = typename ELFT::Ehdr;
+  using Elf_Shdr = typename ELFT::Shdr;
+  using Elf_Sym = typename ELFT::Sym;
+  using Elf_Dyn = typename ELFT::Dyn;
+  using Elf_Phdr = typename ELFT::Phdr;
+  using Elf_Rel = typename ELFT::Rel;
+  using Elf_Rela = typename ELFT::Rela;
+  using Elf_Verdef = typename ELFT::Verdef;
+  using Elf_Verdaux = typename ELFT::Verdaux;
+  using Elf_Verneed = typename ELFT::Verneed;
+  using Elf_Vernaux = typename ELFT::Vernaux;
+  using Elf_Versym = typename ELFT::Versym;
+  using Elf_Hash = typename ELFT::Hash;
+  using Elf_GnuHash = typename ELFT::GnuHash;
+  using Elf_Nhdr = typename ELFT::Nhdr;
+  using Elf_Note = typename ELFT::Note;
+  using Elf_Note_Iterator = typename ELFT::NoteIterator;
+  using Elf_Dyn_Range = typename ELFT::DynRange;
+  using Elf_Shdr_Range = typename ELFT::ShdrRange;
+  using Elf_Sym_Range = typename ELFT::SymRange;
+  using Elf_Rel_Range = typename ELFT::RelRange;
+  using Elf_Rela_Range = typename ELFT::RelaRange;
+  using Elf_Phdr_Range = typename ELFT::PhdrRange;
+
+  const uint8_t *base() const {
+    return reinterpret_cast<const uint8_t *>(Buf.data());
+  }
+
+  size_t getBufSize() const { return Buf.size(); }
+
+private:
+  StringRef Buf;
+
+  ELFFile(StringRef Object);
+
+public:
+  const Elf_Ehdr *getHeader() const {
+    return reinterpret_cast<const Elf_Ehdr *>(base());
+  }
+
+  template <typename T>
+  Expected<const T *> getEntry(uint32_t Section, uint32_t Entry) const;
+  template <typename T>
+  Expected<const T *> getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
+
+  Expected<StringRef> getStringTable(const Elf_Shdr *Section) const;
+  Expected<StringRef> getStringTableForSymtab(const Elf_Shdr &Section) const;
+  Expected<StringRef> getStringTableForSymtab(const Elf_Shdr &Section,
+                                              Elf_Shdr_Range Sections) const;
+
+  Expected<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section) const;
+  Expected<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section,
+                                             Elf_Shdr_Range Sections) const;
+
+  StringRef getRelocationTypeName(uint32_t Type) const;
+  void getRelocationTypeName(uint32_t Type,
+                             SmallVectorImpl<char> &Result) const;
+
+  /// \brief Get the symbol for a given relocation.
+  Expected<const Elf_Sym *> getRelocationSymbol(const Elf_Rel *Rel,
+                                                const Elf_Shdr *SymTab) const;
+
+  static Expected<ELFFile> create(StringRef Object);
+
+  bool isMipsELF64() const {
+    return getHeader()->e_machine == ELF::EM_MIPS &&
+           getHeader()->getFileClass() == ELF::ELFCLASS64;
+  }
+
+  bool isMips64EL() const {
+    return isMipsELF64() &&
+           getHeader()->getDataEncoding() == ELF::ELFDATA2LSB;
+  }
+
+  Expected<Elf_Shdr_Range> sections() const;
+
+  Expected<Elf_Sym_Range> symbols(const Elf_Shdr *Sec) const {
+    if (!Sec)
+      return makeArrayRef<Elf_Sym>(nullptr, nullptr);
+    return getSectionContentsAsArray<Elf_Sym>(Sec);
+  }
+
+  Expected<Elf_Rela_Range> relas(const Elf_Shdr *Sec) const {
+    return getSectionContentsAsArray<Elf_Rela>(Sec);
+  }
+
+  Expected<Elf_Rel_Range> rels(const Elf_Shdr *Sec) const {
+    return getSectionContentsAsArray<Elf_Rel>(Sec);
+  }
+
+  Expected<std::vector<Elf_Rela>> android_relas(const Elf_Shdr *Sec) const;
+
+  /// \brief Iterate over program header table.
+  Expected<Elf_Phdr_Range> program_headers() const {
+    if (getHeader()->e_phnum && getHeader()->e_phentsize != sizeof(Elf_Phdr))
+      return createError("invalid e_phentsize");
+    if (getHeader()->e_phoff +
+            (getHeader()->e_phnum * getHeader()->e_phentsize) >
+        getBufSize())
+      return createError("program headers longer than binary");
+    auto *Begin =
+        reinterpret_cast<const Elf_Phdr *>(base() + getHeader()->e_phoff);
+    return makeArrayRef(Begin, Begin + getHeader()->e_phnum);
+  }
+
+  /// Get an iterator over notes in a program header.
+  ///
+  /// The program header must be of type \c PT_NOTE.
+  ///
+  /// \param Phdr the program header to iterate over.
+  /// \param Err [out] an error to support fallible iteration, which should
+  ///  be checked after iteration ends.
+  Elf_Note_Iterator notes_begin(const Elf_Phdr &Phdr, Error &Err) const {
+    if (Phdr.p_type != ELF::PT_NOTE) {
+      Err = createError("attempt to iterate notes of non-note program header");
+      return Elf_Note_Iterator(Err);
+    }
+    if (Phdr.p_offset + Phdr.p_filesz > getBufSize()) {
+      Err = createError("invalid program header offset/size");
+      return Elf_Note_Iterator(Err);
+    }
+    return Elf_Note_Iterator(base() + Phdr.p_offset, Phdr.p_filesz, Err);
+  }
+
+  /// Get an iterator over notes in a section.
+  ///
+  /// The section must be of type \c SHT_NOTE.
+  ///
+  /// \param Shdr the section to iterate over.
+  /// \param Err [out] an error to support fallible iteration, which should
+  ///  be checked after iteration ends.
+  Elf_Note_Iterator notes_begin(const Elf_Shdr &Shdr, Error &Err) const {
+    if (Shdr.sh_type != ELF::SHT_NOTE) {
+      Err = createError("attempt to iterate notes of non-note section");
+      return Elf_Note_Iterator(Err);
+    }
+    if (Shdr.sh_offset + Shdr.sh_size > getBufSize()) {
+      Err = createError("invalid section offset/size");
+      return Elf_Note_Iterator(Err);
+    }
+    return Elf_Note_Iterator(base() + Shdr.sh_offset, Shdr.sh_size, Err);
+  }
+
+  /// Get the end iterator for notes.
+  Elf_Note_Iterator notes_end() const {
+    return Elf_Note_Iterator();
+  }
+
+  /// Get an iterator range over notes of a program header.
+  ///
+  /// The program header must be of type \c PT_NOTE.
+  ///
+  /// \param Phdr the program header to iterate over.
+  /// \param Err [out] an error to support fallible iteration, which should
+  ///  be checked after iteration ends.
+  iterator_range<Elf_Note_Iterator> notes(const Elf_Phdr &Phdr,
+                                          Error &Err) const {
+    return make_range(notes_begin(Phdr, Err), notes_end());
+  }
+
+  /// Get an iterator range over notes of a section.
+  ///
+  /// The section must be of type \c SHT_NOTE.
+  ///
+  /// \param Shdr the section to iterate over.
+  /// \param Err [out] an error to support fallible iteration, which should
+  ///  be checked after iteration ends.
+  iterator_range<Elf_Note_Iterator> notes(const Elf_Shdr &Shdr,
+                                          Error &Err) const {
+    return make_range(notes_begin(Shdr, Err), notes_end());
+  }
+
+  Expected<StringRef> getSectionStringTable(Elf_Shdr_Range Sections) const;
+  Expected<uint32_t> getSectionIndex(const Elf_Sym *Sym, Elf_Sym_Range Syms,
+                                     ArrayRef<Elf_Word> ShndxTable) const;
+  Expected<const Elf_Shdr *> getSection(const Elf_Sym *Sym,
+                                        const Elf_Shdr *SymTab,
+                                        ArrayRef<Elf_Word> ShndxTable) const;
+  Expected<const Elf_Shdr *> getSection(const Elf_Sym *Sym,
+                                        Elf_Sym_Range Symtab,
+                                        ArrayRef<Elf_Word> ShndxTable) const;
+  Expected<const Elf_Shdr *> getSection(uint32_t Index) const;
+
+  Expected<const Elf_Sym *> getSymbol(const Elf_Shdr *Sec,
+                                      uint32_t Index) const;
+
+  Expected<StringRef> getSectionName(const Elf_Shdr *Section) const;
+  Expected<StringRef> getSectionName(const Elf_Shdr *Section,
+                                     StringRef DotShstrtab) const;
+  template <typename T>
+  Expected<ArrayRef<T>> getSectionContentsAsArray(const Elf_Shdr *Sec) const;
+  Expected<ArrayRef<uint8_t>> getSectionContents(const Elf_Shdr *Sec) const;
+};
+
+using ELF32LEFile = ELFFile<ELF32LE>;
+using ELF64LEFile = ELFFile<ELF64LE>;
+using ELF32BEFile = ELFFile<ELF32BE>;
+using ELF64BEFile = ELFFile<ELF64BE>;
+
+template <class ELFT>
+inline Expected<const typename ELFT::Shdr *>
+getSection(typename ELFT::ShdrRange Sections, uint32_t Index) {
+  if (Index >= Sections.size())
+    return createError("invalid section index");
+  return &Sections[Index];
+}
+
+template <class ELFT>
+inline Expected<uint32_t>
+getExtendedSymbolTableIndex(const typename ELFT::Sym *Sym,
+                            const typename ELFT::Sym *FirstSym,
+                            ArrayRef<typename ELFT::Word> ShndxTable) {
+  assert(Sym->st_shndx == ELF::SHN_XINDEX);
+  unsigned Index = Sym - FirstSym;
+  if (Index >= ShndxTable.size())
+    return createError("index past the end of the symbol table");
+
+  // The size of the table was checked in getSHNDXTable.
+  return ShndxTable[Index];
+}
+
+template <class ELFT>
+Expected<uint32_t>
+ELFFile<ELFT>::getSectionIndex(const Elf_Sym *Sym, Elf_Sym_Range Syms,
+                               ArrayRef<Elf_Word> ShndxTable) const {
+  uint32_t Index = Sym->st_shndx;
+  if (Index == ELF::SHN_XINDEX) {
+    auto ErrorOrIndex = getExtendedSymbolTableIndex<ELFT>(
+        Sym, Syms.begin(), ShndxTable);
+    if (!ErrorOrIndex)
+      return ErrorOrIndex.takeError();
+    return *ErrorOrIndex;
+  }
+  if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE)
+    return 0;
+  return Index;
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Shdr *>
+ELFFile<ELFT>::getSection(const Elf_Sym *Sym, const Elf_Shdr *SymTab,
+                          ArrayRef<Elf_Word> ShndxTable) const {
+  auto SymsOrErr = symbols(SymTab);
+  if (!SymsOrErr)
+    return SymsOrErr.takeError();
+  return getSection(Sym, *SymsOrErr, ShndxTable);
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Shdr *>
+ELFFile<ELFT>::getSection(const Elf_Sym *Sym, Elf_Sym_Range Symbols,
+                          ArrayRef<Elf_Word> ShndxTable) const {
+  auto IndexOrErr = getSectionIndex(Sym, Symbols, ShndxTable);
+  if (!IndexOrErr)
+    return IndexOrErr.takeError();
+  uint32_t Index = *IndexOrErr;
+  if (Index == 0)
+    return nullptr;
+  return getSection(Index);
+}
+
+template <class ELFT>
+inline Expected<const typename ELFT::Sym *>
+getSymbol(typename ELFT::SymRange Symbols, uint32_t Index) {
+  if (Index >= Symbols.size())
+    return createError("invalid symbol index");
+  return &Symbols[Index];
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Sym *>
+ELFFile<ELFT>::getSymbol(const Elf_Shdr *Sec, uint32_t Index) const {
+  auto SymtabOrErr = symbols(Sec);
+  if (!SymtabOrErr)
+    return SymtabOrErr.takeError();
+  return object::getSymbol<ELFT>(*SymtabOrErr, Index);
+}
+
+template <class ELFT>
+template <typename T>
+Expected<ArrayRef<T>>
+ELFFile<ELFT>::getSectionContentsAsArray(const Elf_Shdr *Sec) const {
+  if (Sec->sh_entsize != sizeof(T) && sizeof(T) != 1)
+    return createError("invalid sh_entsize");
+
+  uintX_t Offset = Sec->sh_offset;
+  uintX_t Size = Sec->sh_size;
+
+  if (Size % sizeof(T))
+    return createError("size is not a multiple of sh_entsize");
+  if ((std::numeric_limits<uintX_t>::max() - Offset < Size) ||
+      Offset + Size > Buf.size())
+    return createError("invalid section offset");
+
+  if (Offset % alignof(T))
+    return createError("unaligned data");
+
+  const T *Start = reinterpret_cast<const T *>(base() + Offset);
+  return makeArrayRef(Start, Size / sizeof(T));
+}
+
+template <class ELFT>
+Expected<ArrayRef<uint8_t>>
+ELFFile<ELFT>::getSectionContents(const Elf_Shdr *Sec) const {
+  return getSectionContentsAsArray<uint8_t>(Sec);
+}
+
+template <class ELFT>
+StringRef ELFFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
+  return getELFRelocationTypeName(getHeader()->e_machine, Type);
+}
+
+template <class ELFT>
+void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
+                                          SmallVectorImpl<char> &Result) const {
+  if (!isMipsELF64()) {
+    StringRef Name = getRelocationTypeName(Type);
+    Result.append(Name.begin(), Name.end());
+  } else {
+    // The Mips N64 ABI allows up to three operations to be specified per
+    // relocation record. Unfortunately there's no easy way to test for the
+    // presence of N64 ELFs as they have no special flag that identifies them
+    // as being N64. We can safely assume at the moment that all Mips
+    // ELFCLASS64 ELFs are N64. New Mips64 ABIs should provide enough
+    // information to disambiguate between old vs new ABIs.
+    uint8_t Type1 = (Type >> 0) & 0xFF;
+    uint8_t Type2 = (Type >> 8) & 0xFF;
+    uint8_t Type3 = (Type >> 16) & 0xFF;
+
+    // Concat all three relocation type names.
+    StringRef Name = getRelocationTypeName(Type1);
+    Result.append(Name.begin(), Name.end());
+
+    Name = getRelocationTypeName(Type2);
+    Result.append(1, '/');
+    Result.append(Name.begin(), Name.end());
+
+    Name = getRelocationTypeName(Type3);
+    Result.append(1, '/');
+    Result.append(Name.begin(), Name.end());
+  }
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Sym *>
+ELFFile<ELFT>::getRelocationSymbol(const Elf_Rel *Rel,
+                                   const Elf_Shdr *SymTab) const {
+  uint32_t Index = Rel->getSymbol(isMips64EL());
+  if (Index == 0)
+    return nullptr;
+  return getEntry<Elf_Sym>(SymTab, Index);
+}
+
+template <class ELFT>
+Expected<StringRef>
+ELFFile<ELFT>::getSectionStringTable(Elf_Shdr_Range Sections) const {
+  uint32_t Index = getHeader()->e_shstrndx;
+  if (Index == ELF::SHN_XINDEX)
+    Index = Sections[0].sh_link;
+
+  if (!Index) // no section string table.
+    return "";
+  if (Index >= Sections.size())
+    return createError("invalid section index");
+  return getStringTable(&Sections[Index]);
+}
+
+template <class ELFT> ELFFile<ELFT>::ELFFile(StringRef Object) : Buf(Object) {}
+
+template <class ELFT>
+Expected<ELFFile<ELFT>> ELFFile<ELFT>::create(StringRef Object) {
+  if (sizeof(Elf_Ehdr) > Object.size())
+    return createError("Invalid buffer");
+  return ELFFile(Object);
+}
+
+template <class ELFT>
+Expected<typename ELFT::ShdrRange> ELFFile<ELFT>::sections() const {
+  const uintX_t SectionTableOffset = getHeader()->e_shoff;
+  if (SectionTableOffset == 0)
+    return ArrayRef<Elf_Shdr>();
+
+  if (getHeader()->e_shentsize != sizeof(Elf_Shdr))
+    return createError(
+        "invalid section header entry size (e_shentsize) in ELF header");
+
+  const uint64_t FileSize = Buf.size();
+
+  if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize)
+    return createError("section header table goes past the end of the file");
+
+  // Invalid address alignment of section headers
+  if (SectionTableOffset & (alignof(Elf_Shdr) - 1))
+    return createError("invalid alignment of section headers");
+
+  const Elf_Shdr *First =
+      reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);
+
+  uintX_t NumSections = getHeader()->e_shnum;
+  if (NumSections == 0)
+    NumSections = First->sh_size;
+
+  if (NumSections > UINT64_MAX / sizeof(Elf_Shdr))
+    return createError("section table goes past the end of file");
+
+  const uint64_t SectionTableSize = NumSections * sizeof(Elf_Shdr);
+
+  // Section table goes past end of file!
+  if (SectionTableOffset + SectionTableSize > FileSize)
+    return createError("section table goes past the end of file");
+
+  return makeArrayRef(First, NumSections);
+}
+
+template <class ELFT>
+template <typename T>
+Expected<const T *> ELFFile<ELFT>::getEntry(uint32_t Section,
+                                            uint32_t Entry) const {
+  auto SecOrErr = getSection(Section);
+  if (!SecOrErr)
+    return SecOrErr.takeError();
+  return getEntry<T>(*SecOrErr, Entry);
+}
+
+template <class ELFT>
+template <typename T>
+Expected<const T *> ELFFile<ELFT>::getEntry(const Elf_Shdr *Section,
+                                            uint32_t Entry) const {
+  if (sizeof(T) != Section->sh_entsize)
+    return createError("invalid sh_entsize");
+  size_t Pos = Section->sh_offset + Entry * sizeof(T);
+  if (Pos + sizeof(T) > Buf.size())
+    return createError("invalid section offset");
+  return reinterpret_cast<const T *>(base() + Pos);
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Shdr *>
+ELFFile<ELFT>::getSection(uint32_t Index) const {
+  auto TableOrErr = sections();
+  if (!TableOrErr)
+    return TableOrErr.takeError();
+  return object::getSection<ELFT>(*TableOrErr, Index);
+}
+
+template <class ELFT>
+Expected<StringRef>
+ELFFile<ELFT>::getStringTable(const Elf_Shdr *Section) const {
+  if (Section->sh_type != ELF::SHT_STRTAB)
+    return createError("invalid sh_type for string table, expected SHT_STRTAB");
+  auto V = getSectionContentsAsArray<char>(Section);
+  if (!V)
+    return V.takeError();
+  ArrayRef<char> Data = *V;
+  if (Data.empty())
+    return createError("empty string table");
+  if (Data.back() != '\0')
+    return createError("string table non-null terminated");
+  return StringRef(Data.begin(), Data.size());
+}
+
+template <class ELFT>
+Expected<ArrayRef<typename ELFT::Word>>
+ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section) const {
+  auto SectionsOrErr = sections();
+  if (!SectionsOrErr)
+    return SectionsOrErr.takeError();
+  return getSHNDXTable(Section, *SectionsOrErr);
+}
+
+template <class ELFT>
+Expected<ArrayRef<typename ELFT::Word>>
+ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section,
+                             Elf_Shdr_Range Sections) const {
+  assert(Section.sh_type == ELF::SHT_SYMTAB_SHNDX);
+  auto VOrErr = getSectionContentsAsArray<Elf_Word>(&Section);
+  if (!VOrErr)
+    return VOrErr.takeError();
+  ArrayRef<Elf_Word> V = *VOrErr;
+  auto SymTableOrErr = object::getSection<ELFT>(Sections, Section.sh_link);
+  if (!SymTableOrErr)
+    return SymTableOrErr.takeError();
+  const Elf_Shdr &SymTable = **SymTableOrErr;
+  if (SymTable.sh_type != ELF::SHT_SYMTAB &&
+      SymTable.sh_type != ELF::SHT_DYNSYM)
+    return createError("invalid sh_type");
+  if (V.size() != (SymTable.sh_size / sizeof(Elf_Sym)))
+    return createError("invalid section contents size");
+  return V;
+}
+
+template <class ELFT>
+Expected<StringRef>
+ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec) const {
+  auto SectionsOrErr = sections();
+  if (!SectionsOrErr)
+    return SectionsOrErr.takeError();
+  return getStringTableForSymtab(Sec, *SectionsOrErr);
+}
+
+template <class ELFT>
+Expected<StringRef>
+ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec,
+                                       Elf_Shdr_Range Sections) const {
+
+  if (Sec.sh_type != ELF::SHT_SYMTAB && Sec.sh_type != ELF::SHT_DYNSYM)
+    return createError(
+        "invalid sh_type for symbol table, expected SHT_SYMTAB or SHT_DYNSYM");
+  auto SectionOrErr = object::getSection<ELFT>(Sections, Sec.sh_link);
+  if (!SectionOrErr)
+    return SectionOrErr.takeError();
+  return getStringTable(*SectionOrErr);
+}
+
+template <class ELFT>
+Expected<StringRef>
+ELFFile<ELFT>::getSectionName(const Elf_Shdr *Section) const {
+  auto SectionsOrErr = sections();
+  if (!SectionsOrErr)
+    return SectionsOrErr.takeError();
+  auto Table = getSectionStringTable(*SectionsOrErr);
+  if (!Table)
+    return Table.takeError();
+  return getSectionName(Section, *Table);
+}
+
+template <class ELFT>
+Expected<StringRef> ELFFile<ELFT>::getSectionName(const Elf_Shdr *Section,
+                                                  StringRef DotShstrtab) const {
+  uint32_t Offset = Section->sh_name;
+  if (Offset == 0)
+    return StringRef();
+  if (Offset >= DotShstrtab.size())
+    return createError("invalid string offset");
+  return StringRef(DotShstrtab.data() + Offset);
+}
+
+/// This function returns the hash value for a symbol in the .dynsym section
+/// Name of the API remains consistent as specified in the libelf
+/// REF : http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
+inline unsigned hashSysV(StringRef SymbolName) {
+  unsigned h = 0, g;
+  for (char C : SymbolName) {
+    h = (h << 4) + C;
+    g = h & 0xf0000000L;
+    if (g != 0)
+      h ^= g >> 24;
+    h &= ~g;
+  }
+  return h;
+}
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_ELF_H
diff --git a/linux-x64/clang/include/llvm/Object/ELFObjectFile.h b/linux-x64/clang/include/llvm/Object/ELFObjectFile.h
new file mode 100644
index 0000000..4d00103
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/ELFObjectFile.h
@@ -0,0 +1,1122 @@
+//===- ELFObjectFile.h - ELF object file implementation ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ELFObjectFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELFOBJECTFILE_H
+#define LLVM_OBJECT_ELFOBJECTFILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/ARMAttributeParser.h"
+#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cassert>
+#include <cstdint>
+#include <system_error>
+
+namespace llvm {
+namespace object {
+
+class elf_symbol_iterator;
+
+class ELFObjectFileBase : public ObjectFile {
+  friend class ELFRelocationRef;
+  friend class ELFSectionRef;
+  friend class ELFSymbolRef;
+
+protected:
+  ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source);
+
+  virtual uint16_t getEMachine() const = 0;
+  virtual uint64_t getSymbolSize(DataRefImpl Symb) const = 0;
+  virtual uint8_t getSymbolOther(DataRefImpl Symb) const = 0;
+  virtual uint8_t getSymbolELFType(DataRefImpl Symb) const = 0;
+
+  virtual uint32_t getSectionType(DataRefImpl Sec) const = 0;
+  virtual uint64_t getSectionFlags(DataRefImpl Sec) const = 0;
+  virtual uint64_t getSectionOffset(DataRefImpl Sec) const = 0;
+
+  virtual Expected<int64_t> getRelocationAddend(DataRefImpl Rel) const = 0;
+
+public:
+  using elf_symbol_iterator_range = iterator_range<elf_symbol_iterator>;
+
+  virtual elf_symbol_iterator_range getDynamicSymbolIterators() const = 0;
+
+  /// Returns platform-specific object flags, if any.
+  virtual unsigned getPlatformFlags() const = 0;
+
+  elf_symbol_iterator_range symbols() const;
+
+  static bool classof(const Binary *v) { return v->isELF(); }
+
+  SubtargetFeatures getFeatures() const override;
+
+  SubtargetFeatures getMIPSFeatures() const;
+
+  SubtargetFeatures getARMFeatures() const;
+
+  SubtargetFeatures getRISCVFeatures() const;
+
+  void setARMSubArch(Triple &TheTriple) const override;
+};
+
+class ELFSectionRef : public SectionRef {
+public:
+  ELFSectionRef(const SectionRef &B) : SectionRef(B) {
+    assert(isa<ELFObjectFileBase>(SectionRef::getObject()));
+  }
+
+  const ELFObjectFileBase *getObject() const {
+    return cast<ELFObjectFileBase>(SectionRef::getObject());
+  }
+
+  uint32_t getType() const {
+    return getObject()->getSectionType(getRawDataRefImpl());
+  }
+
+  uint64_t getFlags() const {
+    return getObject()->getSectionFlags(getRawDataRefImpl());
+  }
+
+  uint64_t getOffset() const {
+    return getObject()->getSectionOffset(getRawDataRefImpl());
+  }
+};
+
+class elf_section_iterator : public section_iterator {
+public:
+  elf_section_iterator(const section_iterator &B) : section_iterator(B) {
+    assert(isa<ELFObjectFileBase>(B->getObject()));
+  }
+
+  const ELFSectionRef *operator->() const {
+    return static_cast<const ELFSectionRef *>(section_iterator::operator->());
+  }
+
+  const ELFSectionRef &operator*() const {
+    return static_cast<const ELFSectionRef &>(section_iterator::operator*());
+  }
+};
+
+class ELFSymbolRef : public SymbolRef {
+public:
+  ELFSymbolRef(const SymbolRef &B) : SymbolRef(B) {
+    assert(isa<ELFObjectFileBase>(SymbolRef::getObject()));
+  }
+
+  const ELFObjectFileBase *getObject() const {
+    return cast<ELFObjectFileBase>(BasicSymbolRef::getObject());
+  }
+
+  uint64_t getSize() const {
+    return getObject()->getSymbolSize(getRawDataRefImpl());
+  }
+
+  uint8_t getOther() const {
+    return getObject()->getSymbolOther(getRawDataRefImpl());
+  }
+
+  uint8_t getELFType() const {
+    return getObject()->getSymbolELFType(getRawDataRefImpl());
+  }
+};
+
+class elf_symbol_iterator : public symbol_iterator {
+public:
+  elf_symbol_iterator(const basic_symbol_iterator &B)
+      : symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
+                                  cast<ELFObjectFileBase>(B->getObject()))) {}
+
+  const ELFSymbolRef *operator->() const {
+    return static_cast<const ELFSymbolRef *>(symbol_iterator::operator->());
+  }
+
+  const ELFSymbolRef &operator*() const {
+    return static_cast<const ELFSymbolRef &>(symbol_iterator::operator*());
+  }
+};
+
+class ELFRelocationRef : public RelocationRef {
+public:
+  ELFRelocationRef(const RelocationRef &B) : RelocationRef(B) {
+    assert(isa<ELFObjectFileBase>(RelocationRef::getObject()));
+  }
+
+  const ELFObjectFileBase *getObject() const {
+    return cast<ELFObjectFileBase>(RelocationRef::getObject());
+  }
+
+  Expected<int64_t> getAddend() const {
+    return getObject()->getRelocationAddend(getRawDataRefImpl());
+  }
+};
+
+class elf_relocation_iterator : public relocation_iterator {
+public:
+  elf_relocation_iterator(const relocation_iterator &B)
+      : relocation_iterator(RelocationRef(
+            B->getRawDataRefImpl(), cast<ELFObjectFileBase>(B->getObject()))) {}
+
+  const ELFRelocationRef *operator->() const {
+    return static_cast<const ELFRelocationRef *>(
+        relocation_iterator::operator->());
+  }
+
+  const ELFRelocationRef &operator*() const {
+    return static_cast<const ELFRelocationRef &>(
+        relocation_iterator::operator*());
+  }
+};
+
+inline ELFObjectFileBase::elf_symbol_iterator_range
+ELFObjectFileBase::symbols() const {
+  return elf_symbol_iterator_range(symbol_begin(), symbol_end());
+}
+
+template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
+  uint16_t getEMachine() const override;
+  uint64_t getSymbolSize(DataRefImpl Sym) const override;
+
+public:
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+  using uintX_t = typename ELFT::uint;
+
+  using Elf_Sym = typename ELFT::Sym;
+  using Elf_Shdr = typename ELFT::Shdr;
+  using Elf_Ehdr = typename ELFT::Ehdr;
+  using Elf_Rel = typename ELFT::Rel;
+  using Elf_Rela = typename ELFT::Rela;
+  using Elf_Dyn = typename ELFT::Dyn;
+
+private:
+  ELFObjectFile(MemoryBufferRef Object, ELFFile<ELFT> EF,
+                const Elf_Shdr *DotDynSymSec, const Elf_Shdr *DotSymtabSec,
+                ArrayRef<Elf_Word> ShndxTable);
+
+protected:
+  ELFFile<ELFT> EF;
+
+  const Elf_Shdr *DotDynSymSec = nullptr; // Dynamic symbol table section.
+  const Elf_Shdr *DotSymtabSec = nullptr; // Symbol table section.
+  ArrayRef<Elf_Word> ShndxTable;
+
+  void moveSymbolNext(DataRefImpl &Symb) const override;
+  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
+  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+  uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+  uint8_t getSymbolOther(DataRefImpl Symb) const override;
+  uint8_t getSymbolELFType(DataRefImpl Symb) const override;
+  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
+  Expected<section_iterator> getSymbolSection(const Elf_Sym *Symb,
+                                              const Elf_Shdr *SymTab) const;
+  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+
+  void moveSectionNext(DataRefImpl &Sec) const override;
+  std::error_code getSectionName(DataRefImpl Sec,
+                                 StringRef &Res) const override;
+  uint64_t getSectionAddress(DataRefImpl Sec) const override;
+  uint64_t getSectionIndex(DataRefImpl Sec) const override;
+  uint64_t getSectionSize(DataRefImpl Sec) const override;
+  std::error_code getSectionContents(DataRefImpl Sec,
+                                     StringRef &Res) const override;
+  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+  bool isSectionCompressed(DataRefImpl Sec) const override;
+  bool isSectionText(DataRefImpl Sec) const override;
+  bool isSectionData(DataRefImpl Sec) const override;
+  bool isSectionBSS(DataRefImpl Sec) const override;
+  bool isSectionVirtual(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+  section_iterator getRelocatedSection(DataRefImpl Sec) const override;
+
+  void moveRelocationNext(DataRefImpl &Rel) const override;
+  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+  uint64_t getRelocationType(DataRefImpl Rel) const override;
+  void getRelocationTypeName(DataRefImpl Rel,
+                             SmallVectorImpl<char> &Result) const override;
+
+  uint32_t getSectionType(DataRefImpl Sec) const override;
+  uint64_t getSectionFlags(DataRefImpl Sec) const override;
+  uint64_t getSectionOffset(DataRefImpl Sec) const override;
+  StringRef getRelocationTypeName(uint32_t Type) const;
+
+  /// \brief Get the relocation section that contains \a Rel.
+  const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
+    auto RelSecOrErr = EF.getSection(Rel.d.a);
+    if (!RelSecOrErr)
+      report_fatal_error(errorToErrorCode(RelSecOrErr.takeError()).message());
+    return *RelSecOrErr;
+  }
+
+  DataRefImpl toDRI(const Elf_Shdr *SymTable, unsigned SymbolNum) const {
+    DataRefImpl DRI;
+    if (!SymTable) {
+      DRI.d.a = 0;
+      DRI.d.b = 0;
+      return DRI;
+    }
+    assert(SymTable->sh_type == ELF::SHT_SYMTAB ||
+           SymTable->sh_type == ELF::SHT_DYNSYM);
+
+    auto SectionsOrErr = EF.sections();
+    if (!SectionsOrErr) {
+      DRI.d.a = 0;
+      DRI.d.b = 0;
+      return DRI;
+    }
+    uintptr_t SHT = reinterpret_cast<uintptr_t>((*SectionsOrErr).begin());
+    unsigned SymTableIndex =
+        (reinterpret_cast<uintptr_t>(SymTable) - SHT) / sizeof(Elf_Shdr);
+
+    DRI.d.a = SymTableIndex;
+    DRI.d.b = SymbolNum;
+    return DRI;
+  }
+
+  const Elf_Shdr *toELFShdrIter(DataRefImpl Sec) const {
+    return reinterpret_cast<const Elf_Shdr *>(Sec.p);
+  }
+
+  DataRefImpl toDRI(const Elf_Shdr *Sec) const {
+    DataRefImpl DRI;
+    DRI.p = reinterpret_cast<uintptr_t>(Sec);
+    return DRI;
+  }
+
+  DataRefImpl toDRI(const Elf_Dyn *Dyn) const {
+    DataRefImpl DRI;
+    DRI.p = reinterpret_cast<uintptr_t>(Dyn);
+    return DRI;
+  }
+
+  bool isExportedToOtherDSO(const Elf_Sym *ESym) const {
+    unsigned char Binding = ESym->getBinding();
+    unsigned char Visibility = ESym->getVisibility();
+
+    // A symbol is exported if its binding is either GLOBAL or WEAK, and its
+    // visibility is either DEFAULT or PROTECTED. All other symbols are not
+    // exported.
+    return ((Binding == ELF::STB_GLOBAL || Binding == ELF::STB_WEAK) &&
+            (Visibility == ELF::STV_DEFAULT ||
+             Visibility == ELF::STV_PROTECTED));
+  }
+
+  // This flag is used for classof, to distinguish ELFObjectFile from
+  // its subclass. If more subclasses will be created, this flag will
+  // have to become an enum.
+  bool isDyldELFObject;
+
+public:
+  ELFObjectFile(ELFObjectFile<ELFT> &&Other);
+  static Expected<ELFObjectFile<ELFT>> create(MemoryBufferRef Object);
+
+  const Elf_Rel *getRel(DataRefImpl Rel) const;
+  const Elf_Rela *getRela(DataRefImpl Rela) const;
+
+  const Elf_Sym *getSymbol(DataRefImpl Sym) const {
+    auto Ret = EF.template getEntry<Elf_Sym>(Sym.d.a, Sym.d.b);
+    if (!Ret)
+      report_fatal_error(errorToErrorCode(Ret.takeError()).message());
+    return *Ret;
+  }
+
+  const Elf_Shdr *getSection(DataRefImpl Sec) const {
+    return reinterpret_cast<const Elf_Shdr *>(Sec.p);
+  }
+
+  basic_symbol_iterator symbol_begin() const override;
+  basic_symbol_iterator symbol_end() const override;
+
+  elf_symbol_iterator dynamic_symbol_begin() const;
+  elf_symbol_iterator dynamic_symbol_end() const;
+
+  section_iterator section_begin() const override;
+  section_iterator section_end() const override;
+
+  Expected<int64_t> getRelocationAddend(DataRefImpl Rel) const override;
+
+  uint8_t getBytesInAddress() const override;
+  StringRef getFileFormatName() const override;
+  Triple::ArchType getArch() const override;
+
+  unsigned getPlatformFlags() const override { return EF.getHeader()->e_flags; }
+
+  std::error_code getBuildAttributes(ARMAttributeParser &Attributes) const override {
+    auto SectionsOrErr = EF.sections();
+    if (!SectionsOrErr)
+      return errorToErrorCode(SectionsOrErr.takeError());
+
+    for (const Elf_Shdr &Sec : *SectionsOrErr) {
+      if (Sec.sh_type == ELF::SHT_ARM_ATTRIBUTES) {
+        auto ErrorOrContents = EF.getSectionContents(&Sec);
+        if (!ErrorOrContents)
+          return errorToErrorCode(ErrorOrContents.takeError());
+
+        auto Contents = ErrorOrContents.get();
+        if (Contents[0] != ARMBuildAttrs::Format_Version || Contents.size() == 1)
+          return std::error_code();
+
+        Attributes.Parse(Contents, ELFT::TargetEndianness == support::little);
+        break;
+      }
+    }
+    return std::error_code();
+  }
+
+  const ELFFile<ELFT> *getELFFile() const { return &EF; }
+
+  bool isDyldType() const { return isDyldELFObject; }
+  static bool classof(const Binary *v) {
+    return v->getType() == getELFType(ELFT::TargetEndianness == support::little,
+                                      ELFT::Is64Bits);
+  }
+
+  elf_symbol_iterator_range getDynamicSymbolIterators() const override;
+
+  bool isRelocatableObject() const override;
+};
+
+using ELF32LEObjectFile = ELFObjectFile<ELF32LE>;
+using ELF64LEObjectFile = ELFObjectFile<ELF64LE>;
+using ELF32BEObjectFile = ELFObjectFile<ELF32BE>;
+using ELF64BEObjectFile = ELFObjectFile<ELF64BE>;
+
+template <class ELFT>
+void ELFObjectFile<ELFT>::moveSymbolNext(DataRefImpl &Sym) const {
+  ++Sym.d.b;
+}
+
+template <class ELFT>
+Expected<StringRef> ELFObjectFile<ELFT>::getSymbolName(DataRefImpl Sym) const {
+  const Elf_Sym *ESym = getSymbol(Sym);
+  auto SymTabOrErr = EF.getSection(Sym.d.a);
+  if (!SymTabOrErr)
+    return SymTabOrErr.takeError();
+  const Elf_Shdr *SymTableSec = *SymTabOrErr;
+  auto StrTabOrErr = EF.getSection(SymTableSec->sh_link);
+  if (!StrTabOrErr)
+    return StrTabOrErr.takeError();
+  const Elf_Shdr *StringTableSec = *StrTabOrErr;
+  auto SymStrTabOrErr = EF.getStringTable(StringTableSec);
+  if (!SymStrTabOrErr)
+    return SymStrTabOrErr.takeError();
+  return ESym->getName(*SymStrTabOrErr);
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionFlags(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_flags;
+}
+
+template <class ELFT>
+uint32_t ELFObjectFile<ELFT>::getSectionType(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_type;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionOffset(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_offset;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSymbolValueImpl(DataRefImpl Symb) const {
+  const Elf_Sym *ESym = getSymbol(Symb);
+  uint64_t Ret = ESym->st_value;
+  if (ESym->st_shndx == ELF::SHN_ABS)
+    return Ret;
+
+  const Elf_Ehdr *Header = EF.getHeader();
+  // Clear the ARM/Thumb or microMIPS indicator flag.
+  if ((Header->e_machine == ELF::EM_ARM || Header->e_machine == ELF::EM_MIPS) &&
+      ESym->getType() == ELF::STT_FUNC)
+    Ret &= ~1;
+
+  return Ret;
+}
+
+template <class ELFT>
+Expected<uint64_t>
+ELFObjectFile<ELFT>::getSymbolAddress(DataRefImpl Symb) const {
+  uint64_t Result = getSymbolValue(Symb);
+  const Elf_Sym *ESym = getSymbol(Symb);
+  switch (ESym->st_shndx) {
+  case ELF::SHN_COMMON:
+  case ELF::SHN_UNDEF:
+  case ELF::SHN_ABS:
+    return Result;
+  }
+
+  const Elf_Ehdr *Header = EF.getHeader();
+  auto SymTabOrErr = EF.getSection(Symb.d.a);
+  if (!SymTabOrErr)
+    return SymTabOrErr.takeError();
+  const Elf_Shdr *SymTab = *SymTabOrErr;
+
+  if (Header->e_type == ELF::ET_REL) {
+    auto SectionOrErr = EF.getSection(ESym, SymTab, ShndxTable);
+    if (!SectionOrErr)
+      return SectionOrErr.takeError();
+    const Elf_Shdr *Section = *SectionOrErr;
+    if (Section)
+      Result += Section->sh_addr;
+  }
+
+  return Result;
+}
+
+template <class ELFT>
+uint32_t ELFObjectFile<ELFT>::getSymbolAlignment(DataRefImpl Symb) const {
+  const Elf_Sym *Sym = getSymbol(Symb);
+  if (Sym->st_shndx == ELF::SHN_COMMON)
+    return Sym->st_value;
+  return 0;
+}
+
+template <class ELFT>
+uint16_t ELFObjectFile<ELFT>::getEMachine() const {
+  return EF.getHeader()->e_machine;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSymbolSize(DataRefImpl Sym) const {
+  return getSymbol(Sym)->st_size;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
+  return getSymbol(Symb)->st_size;
+}
+
+template <class ELFT>
+uint8_t ELFObjectFile<ELFT>::getSymbolOther(DataRefImpl Symb) const {
+  return getSymbol(Symb)->st_other;
+}
+
+template <class ELFT>
+uint8_t ELFObjectFile<ELFT>::getSymbolELFType(DataRefImpl Symb) const {
+  return getSymbol(Symb)->getType();
+}
+
+template <class ELFT>
+Expected<SymbolRef::Type>
+ELFObjectFile<ELFT>::getSymbolType(DataRefImpl Symb) const {
+  const Elf_Sym *ESym = getSymbol(Symb);
+
+  switch (ESym->getType()) {
+  case ELF::STT_NOTYPE:
+    return SymbolRef::ST_Unknown;
+  case ELF::STT_SECTION:
+    return SymbolRef::ST_Debug;
+  case ELF::STT_FILE:
+    return SymbolRef::ST_File;
+  case ELF::STT_FUNC:
+    return SymbolRef::ST_Function;
+  case ELF::STT_OBJECT:
+  case ELF::STT_COMMON:
+  case ELF::STT_TLS:
+    return SymbolRef::ST_Data;
+  default:
+    return SymbolRef::ST_Other;
+  }
+}
+
+template <class ELFT>
+uint32_t ELFObjectFile<ELFT>::getSymbolFlags(DataRefImpl Sym) const {
+  const Elf_Sym *ESym = getSymbol(Sym);
+
+  uint32_t Result = SymbolRef::SF_None;
+
+  if (ESym->getBinding() != ELF::STB_LOCAL)
+    Result |= SymbolRef::SF_Global;
+
+  if (ESym->getBinding() == ELF::STB_WEAK)
+    Result |= SymbolRef::SF_Weak;
+
+  if (ESym->st_shndx == ELF::SHN_ABS)
+    Result |= SymbolRef::SF_Absolute;
+
+  if (ESym->getType() == ELF::STT_FILE || ESym->getType() == ELF::STT_SECTION)
+    Result |= SymbolRef::SF_FormatSpecific;
+
+  auto DotSymtabSecSyms = EF.symbols(DotSymtabSec);
+  if (DotSymtabSecSyms && ESym == (*DotSymtabSecSyms).begin())
+    Result |= SymbolRef::SF_FormatSpecific;
+  auto DotDynSymSecSyms = EF.symbols(DotDynSymSec);
+  if (DotDynSymSecSyms && ESym == (*DotDynSymSecSyms).begin())
+    Result |= SymbolRef::SF_FormatSpecific;
+
+  if (EF.getHeader()->e_machine == ELF::EM_ARM) {
+    if (Expected<StringRef> NameOrErr = getSymbolName(Sym)) {
+      StringRef Name = *NameOrErr;
+      if (Name.startswith("$d") || Name.startswith("$t") ||
+          Name.startswith("$a"))
+        Result |= SymbolRef::SF_FormatSpecific;
+    } else {
+      // TODO: Actually report errors helpfully.
+      consumeError(NameOrErr.takeError());
+    }
+    if (ESym->getType() == ELF::STT_FUNC && (ESym->st_value & 1) == 1)
+      Result |= SymbolRef::SF_Thumb;
+  }
+
+  if (ESym->st_shndx == ELF::SHN_UNDEF)
+    Result |= SymbolRef::SF_Undefined;
+
+  if (ESym->getType() == ELF::STT_COMMON || ESym->st_shndx == ELF::SHN_COMMON)
+    Result |= SymbolRef::SF_Common;
+
+  if (isExportedToOtherDSO(ESym))
+    Result |= SymbolRef::SF_Exported;
+
+  if (ESym->getVisibility() == ELF::STV_HIDDEN)
+    Result |= SymbolRef::SF_Hidden;
+
+  return Result;
+}
+
+template <class ELFT>
+Expected<section_iterator>
+ELFObjectFile<ELFT>::getSymbolSection(const Elf_Sym *ESym,
+                                      const Elf_Shdr *SymTab) const {
+  auto ESecOrErr = EF.getSection(ESym, SymTab, ShndxTable);
+  if (!ESecOrErr)
+    return ESecOrErr.takeError();
+
+  const Elf_Shdr *ESec = *ESecOrErr;
+  if (!ESec)
+    return section_end();
+
+  DataRefImpl Sec;
+  Sec.p = reinterpret_cast<intptr_t>(ESec);
+  return section_iterator(SectionRef(Sec, this));
+}
+
+template <class ELFT>
+Expected<section_iterator>
+ELFObjectFile<ELFT>::getSymbolSection(DataRefImpl Symb) const {
+  const Elf_Sym *Sym = getSymbol(Symb);
+  auto SymTabOrErr = EF.getSection(Symb.d.a);
+  if (!SymTabOrErr)
+    return SymTabOrErr.takeError();
+  const Elf_Shdr *SymTab = *SymTabOrErr;
+  return getSymbolSection(Sym, SymTab);
+}
+
+template <class ELFT>
+void ELFObjectFile<ELFT>::moveSectionNext(DataRefImpl &Sec) const {
+  const Elf_Shdr *ESec = getSection(Sec);
+  Sec = toDRI(++ESec);
+}
+
+template <class ELFT>
+std::error_code ELFObjectFile<ELFT>::getSectionName(DataRefImpl Sec,
+                                                    StringRef &Result) const {
+  auto Name = EF.getSectionName(&*getSection(Sec));
+  if (!Name)
+    return errorToErrorCode(Name.takeError());
+  Result = *Name;
+  return std::error_code();
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionAddress(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_addr;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionIndex(DataRefImpl Sec) const {
+  auto SectionsOrErr = EF.sections();
+  handleAllErrors(std::move(SectionsOrErr.takeError()),
+                  [](const ErrorInfoBase &) {
+                    llvm_unreachable("unable to get section index");
+                  });
+  const Elf_Shdr *First = SectionsOrErr->begin();
+  return getSection(Sec) - First;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionSize(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_size;
+}
+
+template <class ELFT>
+std::error_code
+ELFObjectFile<ELFT>::getSectionContents(DataRefImpl Sec,
+                                        StringRef &Result) const {
+  const Elf_Shdr *EShdr = getSection(Sec);
+  if (std::error_code EC =
+          checkOffset(getMemoryBufferRef(),
+                      (uintptr_t)base() + EShdr->sh_offset, EShdr->sh_size))
+    return EC;
+  Result = StringRef((const char *)base() + EShdr->sh_offset, EShdr->sh_size);
+  return std::error_code();
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionAlignment(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_addralign;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionCompressed(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_flags & ELF::SHF_COMPRESSED;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionText(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_flags & ELF::SHF_EXECINSTR;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionData(DataRefImpl Sec) const {
+  const Elf_Shdr *EShdr = getSection(Sec);
+  return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
+         EShdr->sh_type == ELF::SHT_PROGBITS;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionBSS(DataRefImpl Sec) const {
+  const Elf_Shdr *EShdr = getSection(Sec);
+  return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
+         EShdr->sh_type == ELF::SHT_NOBITS;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionVirtual(DataRefImpl Sec) const {
+  return getSection(Sec)->sh_type == ELF::SHT_NOBITS;
+}
+
+template <class ELFT>
+relocation_iterator
+ELFObjectFile<ELFT>::section_rel_begin(DataRefImpl Sec) const {
+  DataRefImpl RelData;
+  auto SectionsOrErr = EF.sections();
+  if (!SectionsOrErr)
+    return relocation_iterator(RelocationRef());
+  uintptr_t SHT = reinterpret_cast<uintptr_t>((*SectionsOrErr).begin());
+  RelData.d.a = (Sec.p - SHT) / EF.getHeader()->e_shentsize;
+  RelData.d.b = 0;
+  return relocation_iterator(RelocationRef(RelData, this));
+}
+
+template <class ELFT>
+relocation_iterator
+ELFObjectFile<ELFT>::section_rel_end(DataRefImpl Sec) const {
+  const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+  relocation_iterator Begin = section_rel_begin(Sec);
+  if (S->sh_type != ELF::SHT_RELA && S->sh_type != ELF::SHT_REL)
+    return Begin;
+  DataRefImpl RelData = Begin->getRawDataRefImpl();
+  const Elf_Shdr *RelSec = getRelSection(RelData);
+
+  // Error check sh_link here so that getRelocationSymbol can just use it.
+  auto SymSecOrErr = EF.getSection(RelSec->sh_link);
+  if (!SymSecOrErr)
+    report_fatal_error(errorToErrorCode(SymSecOrErr.takeError()).message());
+
+  RelData.d.b += S->sh_size / S->sh_entsize;
+  return relocation_iterator(RelocationRef(RelData, this));
+}
+
+template <class ELFT>
+section_iterator
+ELFObjectFile<ELFT>::getRelocatedSection(DataRefImpl Sec) const {
+  if (EF.getHeader()->e_type != ELF::ET_REL)
+    return section_end();
+
+  const Elf_Shdr *EShdr = getSection(Sec);
+  uintX_t Type = EShdr->sh_type;
+  if (Type != ELF::SHT_REL && Type != ELF::SHT_RELA)
+    return section_end();
+
+  auto R = EF.getSection(EShdr->sh_info);
+  if (!R)
+    report_fatal_error(errorToErrorCode(R.takeError()).message());
+  return section_iterator(SectionRef(toDRI(*R), this));
+}
+
+// Relocations
+template <class ELFT>
+void ELFObjectFile<ELFT>::moveRelocationNext(DataRefImpl &Rel) const {
+  ++Rel.d.b;
+}
+
+template <class ELFT>
+symbol_iterator
+ELFObjectFile<ELFT>::getRelocationSymbol(DataRefImpl Rel) const {
+  uint32_t symbolIdx;
+  const Elf_Shdr *sec = getRelSection(Rel);
+  if (sec->sh_type == ELF::SHT_REL)
+    symbolIdx = getRel(Rel)->getSymbol(EF.isMips64EL());
+  else
+    symbolIdx = getRela(Rel)->getSymbol(EF.isMips64EL());
+  if (!symbolIdx)
+    return symbol_end();
+
+  // FIXME: error check symbolIdx
+  DataRefImpl SymbolData;
+  SymbolData.d.a = sec->sh_link;
+  SymbolData.d.b = symbolIdx;
+  return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getRelocationOffset(DataRefImpl Rel) const {
+  assert(EF.getHeader()->e_type == ELF::ET_REL &&
+         "Only relocatable object files have relocation offsets");
+  const Elf_Shdr *sec = getRelSection(Rel);
+  if (sec->sh_type == ELF::SHT_REL)
+    return getRel(Rel)->r_offset;
+
+  return getRela(Rel)->r_offset;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getRelocationType(DataRefImpl Rel) const {
+  const Elf_Shdr *sec = getRelSection(Rel);
+  if (sec->sh_type == ELF::SHT_REL)
+    return getRel(Rel)->getType(EF.isMips64EL());
+  else
+    return getRela(Rel)->getType(EF.isMips64EL());
+}
+
+template <class ELFT>
+StringRef ELFObjectFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
+  return getELFRelocationTypeName(EF.getHeader()->e_machine, Type);
+}
+
+template <class ELFT>
+void ELFObjectFile<ELFT>::getRelocationTypeName(
+    DataRefImpl Rel, SmallVectorImpl<char> &Result) const {
+  uint32_t type = getRelocationType(Rel);
+  EF.getRelocationTypeName(type, Result);
+}
+
+template <class ELFT>
+Expected<int64_t>
+ELFObjectFile<ELFT>::getRelocationAddend(DataRefImpl Rel) const {
+  if (getRelSection(Rel)->sh_type != ELF::SHT_RELA)
+    return createError("Section is not SHT_RELA");
+  return (int64_t)getRela(Rel)->r_addend;
+}
+
+template <class ELFT>
+const typename ELFObjectFile<ELFT>::Elf_Rel *
+ELFObjectFile<ELFT>::getRel(DataRefImpl Rel) const {
+  assert(getRelSection(Rel)->sh_type == ELF::SHT_REL);
+  auto Ret = EF.template getEntry<Elf_Rel>(Rel.d.a, Rel.d.b);
+  if (!Ret)
+    report_fatal_error(errorToErrorCode(Ret.takeError()).message());
+  return *Ret;
+}
+
+template <class ELFT>
+const typename ELFObjectFile<ELFT>::Elf_Rela *
+ELFObjectFile<ELFT>::getRela(DataRefImpl Rela) const {
+  assert(getRelSection(Rela)->sh_type == ELF::SHT_RELA);
+  auto Ret = EF.template getEntry<Elf_Rela>(Rela.d.a, Rela.d.b);
+  if (!Ret)
+    report_fatal_error(errorToErrorCode(Ret.takeError()).message());
+  return *Ret;
+}
+
+template <class ELFT>
+Expected<ELFObjectFile<ELFT>>
+ELFObjectFile<ELFT>::create(MemoryBufferRef Object) {
+  auto EFOrErr = ELFFile<ELFT>::create(Object.getBuffer());
+  if (Error E = EFOrErr.takeError())
+    return std::move(E);
+  auto EF = std::move(*EFOrErr);
+
+  auto SectionsOrErr = EF.sections();
+  if (!SectionsOrErr)
+    return SectionsOrErr.takeError();
+
+  const Elf_Shdr *DotDynSymSec = nullptr;
+  const Elf_Shdr *DotSymtabSec = nullptr;
+  ArrayRef<Elf_Word> ShndxTable;
+  for (const Elf_Shdr &Sec : *SectionsOrErr) {
+    switch (Sec.sh_type) {
+    case ELF::SHT_DYNSYM: {
+      if (DotDynSymSec)
+        return createError("More than one dynamic symbol table!");
+      DotDynSymSec = &Sec;
+      break;
+    }
+    case ELF::SHT_SYMTAB: {
+      if (DotSymtabSec)
+        return createError("More than one static symbol table!");
+      DotSymtabSec = &Sec;
+      break;
+    }
+    case ELF::SHT_SYMTAB_SHNDX: {
+      auto TableOrErr = EF.getSHNDXTable(Sec);
+      if (!TableOrErr)
+        return TableOrErr.takeError();
+      ShndxTable = *TableOrErr;
+      break;
+    }
+    }
+  }
+  return ELFObjectFile<ELFT>(Object, EF, DotDynSymSec, DotSymtabSec,
+                             ShndxTable);
+}
+
+template <class ELFT>
+ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, ELFFile<ELFT> EF,
+                                   const Elf_Shdr *DotDynSymSec,
+                                   const Elf_Shdr *DotSymtabSec,
+                                   ArrayRef<Elf_Word> ShndxTable)
+    : ELFObjectFileBase(
+          getELFType(ELFT::TargetEndianness == support::little, ELFT::Is64Bits),
+          Object),
+      EF(EF), DotDynSymSec(DotDynSymSec), DotSymtabSec(DotSymtabSec),
+      ShndxTable(ShndxTable) {}
+
+template <class ELFT>
+ELFObjectFile<ELFT>::ELFObjectFile(ELFObjectFile<ELFT> &&Other)
+    : ELFObjectFile(Other.Data, Other.EF, Other.DotDynSymSec,
+                    Other.DotSymtabSec, Other.ShndxTable) {}
+
+template <class ELFT>
+basic_symbol_iterator ELFObjectFile<ELFT>::symbol_begin() const {
+  DataRefImpl Sym = toDRI(DotSymtabSec, 0);
+  return basic_symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+basic_symbol_iterator ELFObjectFile<ELFT>::symbol_end() const {
+  const Elf_Shdr *SymTab = DotSymtabSec;
+  if (!SymTab)
+    return symbol_begin();
+  DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
+  return basic_symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_begin() const {
+  DataRefImpl Sym = toDRI(DotDynSymSec, 0);
+  return symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_end() const {
+  const Elf_Shdr *SymTab = DotDynSymSec;
+  if (!SymTab)
+    return dynamic_symbol_begin();
+  DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
+  return basic_symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+section_iterator ELFObjectFile<ELFT>::section_begin() const {
+  auto SectionsOrErr = EF.sections();
+  if (!SectionsOrErr)
+    return section_iterator(SectionRef());
+  return section_iterator(SectionRef(toDRI((*SectionsOrErr).begin()), this));
+}
+
+template <class ELFT>
+section_iterator ELFObjectFile<ELFT>::section_end() const {
+  auto SectionsOrErr = EF.sections();
+  if (!SectionsOrErr)
+    return section_iterator(SectionRef());
+  return section_iterator(SectionRef(toDRI((*SectionsOrErr).end()), this));
+}
+
+template <class ELFT>
+uint8_t ELFObjectFile<ELFT>::getBytesInAddress() const {
+  return ELFT::Is64Bits ? 8 : 4;
+}
+
+template <class ELFT>
+StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
+  bool IsLittleEndian = ELFT::TargetEndianness == support::little;
+  switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+  case ELF::ELFCLASS32:
+    switch (EF.getHeader()->e_machine) {
+    case ELF::EM_386:
+      return "ELF32-i386";
+    case ELF::EM_IAMCU:
+      return "ELF32-iamcu";
+    case ELF::EM_X86_64:
+      return "ELF32-x86-64";
+    case ELF::EM_ARM:
+      return (IsLittleEndian ? "ELF32-arm-little" : "ELF32-arm-big");
+    case ELF::EM_AVR:
+      return "ELF32-avr";
+    case ELF::EM_HEXAGON:
+      return "ELF32-hexagon";
+    case ELF::EM_LANAI:
+      return "ELF32-lanai";
+    case ELF::EM_MIPS:
+      return "ELF32-mips";
+    case ELF::EM_PPC:
+      return "ELF32-ppc";
+    case ELF::EM_RISCV:
+      return "ELF32-riscv";
+    case ELF::EM_SPARC:
+    case ELF::EM_SPARC32PLUS:
+      return "ELF32-sparc";
+    case ELF::EM_WEBASSEMBLY:
+      return "ELF32-wasm";
+    case ELF::EM_AMDGPU:
+      return "ELF32-amdgpu";
+    default:
+      return "ELF32-unknown";
+    }
+  case ELF::ELFCLASS64:
+    switch (EF.getHeader()->e_machine) {
+    case ELF::EM_386:
+      return "ELF64-i386";
+    case ELF::EM_X86_64:
+      return "ELF64-x86-64";
+    case ELF::EM_AARCH64:
+      return (IsLittleEndian ? "ELF64-aarch64-little" : "ELF64-aarch64-big");
+    case ELF::EM_PPC64:
+      return "ELF64-ppc64";
+    case ELF::EM_RISCV:
+      return "ELF64-riscv";
+    case ELF::EM_S390:
+      return "ELF64-s390";
+    case ELF::EM_SPARCV9:
+      return "ELF64-sparc";
+    case ELF::EM_MIPS:
+      return "ELF64-mips";
+    case ELF::EM_WEBASSEMBLY:
+      return "ELF64-wasm";
+    case ELF::EM_AMDGPU:
+      return "ELF64-amdgpu";
+    case ELF::EM_BPF:
+      return "ELF64-BPF";
+    default:
+      return "ELF64-unknown";
+    }
+  default:
+    // FIXME: Proper error handling.
+    report_fatal_error("Invalid ELFCLASS!");
+  }
+}
+
+template <class ELFT> Triple::ArchType ELFObjectFile<ELFT>::getArch() const {
+  bool IsLittleEndian = ELFT::TargetEndianness == support::little;
+  switch (EF.getHeader()->e_machine) {
+  case ELF::EM_386:
+  case ELF::EM_IAMCU:
+    return Triple::x86;
+  case ELF::EM_X86_64:
+    return Triple::x86_64;
+  case ELF::EM_AARCH64:
+    return IsLittleEndian ? Triple::aarch64 : Triple::aarch64_be;
+  case ELF::EM_ARM:
+    return Triple::arm;
+  case ELF::EM_AVR:
+    return Triple::avr;
+  case ELF::EM_HEXAGON:
+    return Triple::hexagon;
+  case ELF::EM_LANAI:
+    return Triple::lanai;
+  case ELF::EM_MIPS:
+    switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+    case ELF::ELFCLASS32:
+      return IsLittleEndian ? Triple::mipsel : Triple::mips;
+    case ELF::ELFCLASS64:
+      return IsLittleEndian ? Triple::mips64el : Triple::mips64;
+    default:
+      report_fatal_error("Invalid ELFCLASS!");
+    }
+  case ELF::EM_PPC:
+    return Triple::ppc;
+  case ELF::EM_PPC64:
+    return IsLittleEndian ? Triple::ppc64le : Triple::ppc64;
+  case ELF::EM_RISCV:
+    switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+    case ELF::ELFCLASS32:
+      return Triple::riscv32;
+    case ELF::ELFCLASS64:
+      return Triple::riscv64;
+    default:
+      report_fatal_error("Invalid ELFCLASS!");
+    }
+  case ELF::EM_S390:
+    return Triple::systemz;
+
+  case ELF::EM_SPARC:
+  case ELF::EM_SPARC32PLUS:
+    return IsLittleEndian ? Triple::sparcel : Triple::sparc;
+  case ELF::EM_SPARCV9:
+    return Triple::sparcv9;
+  case ELF::EM_WEBASSEMBLY:
+    switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+    case ELF::ELFCLASS32: return Triple::wasm32;
+    case ELF::ELFCLASS64: return Triple::wasm64;
+    default: return Triple::UnknownArch;
+    }
+
+  case ELF::EM_AMDGPU: {
+    if (!IsLittleEndian)
+      return Triple::UnknownArch;
+
+    unsigned MACH = EF.getHeader()->e_flags & ELF::EF_AMDGPU_MACH;
+    if (MACH >= ELF::EF_AMDGPU_MACH_R600_FIRST &&
+        MACH <= ELF::EF_AMDGPU_MACH_R600_LAST)
+      return Triple::r600;
+    if (MACH >= ELF::EF_AMDGPU_MACH_AMDGCN_FIRST &&
+        MACH <= ELF::EF_AMDGPU_MACH_AMDGCN_LAST)
+      return Triple::amdgcn;
+
+    return Triple::UnknownArch;
+  }
+
+  case ELF::EM_BPF:
+    return IsLittleEndian ? Triple::bpfel : Triple::bpfeb;
+
+  default:
+    return Triple::UnknownArch;
+  }
+}
+
+template <class ELFT>
+ELFObjectFileBase::elf_symbol_iterator_range
+ELFObjectFile<ELFT>::getDynamicSymbolIterators() const {
+  return make_range(dynamic_symbol_begin(), dynamic_symbol_end());
+}
+
+template <class ELFT> bool ELFObjectFile<ELFT>::isRelocatableObject() const {
+  return EF.getHeader()->e_type == ELF::ET_REL;
+}
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_ELFOBJECTFILE_H
diff --git a/linux-x64/clang/include/llvm/Object/ELFTypes.h b/linux-x64/clang/include/llvm/Object/ELFTypes.h
new file mode 100644
index 0000000..260ca96
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/ELFTypes.h
@@ -0,0 +1,740 @@
+//===- ELFTypes.h - Endian specific types for ELF ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELFTYPES_H
+#define LLVM_OBJECT_ELFTYPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <type_traits>
+
+namespace llvm {
+namespace object {
+
+using support::endianness;
+
+template <class ELFT> struct Elf_Ehdr_Impl;
+template <class ELFT> struct Elf_Shdr_Impl;
+template <class ELFT> struct Elf_Sym_Impl;
+template <class ELFT> struct Elf_Dyn_Impl;
+template <class ELFT> struct Elf_Phdr_Impl;
+template <class ELFT, bool isRela> struct Elf_Rel_Impl;
+template <class ELFT> struct Elf_Verdef_Impl;
+template <class ELFT> struct Elf_Verdaux_Impl;
+template <class ELFT> struct Elf_Verneed_Impl;
+template <class ELFT> struct Elf_Vernaux_Impl;
+template <class ELFT> struct Elf_Versym_Impl;
+template <class ELFT> struct Elf_Hash_Impl;
+template <class ELFT> struct Elf_GnuHash_Impl;
+template <class ELFT> struct Elf_Chdr_Impl;
+template <class ELFT> struct Elf_Nhdr_Impl;
+template <class ELFT> class Elf_Note_Impl;
+template <class ELFT> class Elf_Note_Iterator_Impl;
+
+template <endianness E, bool Is64> struct ELFType {
+private:
+  template <typename Ty>
+  using packed = support::detail::packed_endian_specific_integral<Ty, E, 1>;
+
+public:
+  static const endianness TargetEndianness = E;
+  static const bool Is64Bits = Is64;
+
+  using uint = typename std::conditional<Is64, uint64_t, uint32_t>::type;
+  using Ehdr = Elf_Ehdr_Impl<ELFType<E, Is64>>;
+  using Shdr = Elf_Shdr_Impl<ELFType<E, Is64>>;
+  using Sym = Elf_Sym_Impl<ELFType<E, Is64>>;
+  using Dyn = Elf_Dyn_Impl<ELFType<E, Is64>>;
+  using Phdr = Elf_Phdr_Impl<ELFType<E, Is64>>;
+  using Rel = Elf_Rel_Impl<ELFType<E, Is64>, false>;
+  using Rela = Elf_Rel_Impl<ELFType<E, Is64>, true>;
+  using Verdef = Elf_Verdef_Impl<ELFType<E, Is64>>;
+  using Verdaux = Elf_Verdaux_Impl<ELFType<E, Is64>>;
+  using Verneed = Elf_Verneed_Impl<ELFType<E, Is64>>;
+  using Vernaux = Elf_Vernaux_Impl<ELFType<E, Is64>>;
+  using Versym = Elf_Versym_Impl<ELFType<E, Is64>>;
+  using Hash = Elf_Hash_Impl<ELFType<E, Is64>>;
+  using GnuHash = Elf_GnuHash_Impl<ELFType<E, Is64>>;
+  using Chdr = Elf_Chdr_Impl<ELFType<E, Is64>>;
+  using Nhdr = Elf_Nhdr_Impl<ELFType<E, Is64>>;
+  using Note = Elf_Note_Impl<ELFType<E, Is64>>;
+  using NoteIterator = Elf_Note_Iterator_Impl<ELFType<E, Is64>>;
+  using DynRange = ArrayRef<Dyn>;
+  using ShdrRange = ArrayRef<Shdr>;
+  using SymRange = ArrayRef<Sym>;
+  using RelRange = ArrayRef<Rel>;
+  using RelaRange = ArrayRef<Rela>;
+  using PhdrRange = ArrayRef<Phdr>;
+
+  using Half = packed<uint16_t>;
+  using Word = packed<uint32_t>;
+  using Sword = packed<int32_t>;
+  using Xword = packed<uint64_t>;
+  using Sxword = packed<int64_t>;
+  using Addr = packed<uint>;
+  using Off = packed<uint>;
+};
+
+using ELF32LE = ELFType<support::little, false>;
+using ELF32BE = ELFType<support::big, false>;
+using ELF64LE = ELFType<support::little, true>;
+using ELF64BE = ELFType<support::big, true>;
+
+// Use an alignment of 2 for the typedefs since that is the worst case for
+// ELF files in archives.
+
+// I really don't like doing this, but the alternative is copypasta.
+#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)                                       \
+  using Elf_Addr = typename ELFT::Addr;                                        \
+  using Elf_Off = typename ELFT::Off;                                          \
+  using Elf_Half = typename ELFT::Half;                                        \
+  using Elf_Word = typename ELFT::Word;                                        \
+  using Elf_Sword = typename ELFT::Sword;                                      \
+  using Elf_Xword = typename ELFT::Xword;                                      \
+  using Elf_Sxword = typename ELFT::Sxword;
+
+#define LLVM_ELF_COMMA ,
+#define LLVM_ELF_IMPORT_TYPES(E, W)                                            \
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFType<E LLVM_ELF_COMMA W>)
+
+// Section header.
+template <class ELFT> struct Elf_Shdr_Base;
+
+template <endianness TargetEndianness>
+struct Elf_Shdr_Base<ELFType<TargetEndianness, false>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  Elf_Word sh_name;      // Section name (index into string table)
+  Elf_Word sh_type;      // Section type (SHT_*)
+  Elf_Word sh_flags;     // Section flags (SHF_*)
+  Elf_Addr sh_addr;      // Address where section is to be loaded
+  Elf_Off sh_offset;     // File offset of section data, in bytes
+  Elf_Word sh_size;      // Size of section, in bytes
+  Elf_Word sh_link;      // Section type-specific header table index link
+  Elf_Word sh_info;      // Section type-specific extra information
+  Elf_Word sh_addralign; // Section address alignment
+  Elf_Word sh_entsize;   // Size of records contained within the section
+};
+
+template <endianness TargetEndianness>
+struct Elf_Shdr_Base<ELFType<TargetEndianness, true>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  Elf_Word sh_name;       // Section name (index into string table)
+  Elf_Word sh_type;       // Section type (SHT_*)
+  Elf_Xword sh_flags;     // Section flags (SHF_*)
+  Elf_Addr sh_addr;       // Address where section is to be loaded
+  Elf_Off sh_offset;      // File offset of section data, in bytes
+  Elf_Xword sh_size;      // Size of section, in bytes
+  Elf_Word sh_link;       // Section type-specific header table index link
+  Elf_Word sh_info;       // Section type-specific extra information
+  Elf_Xword sh_addralign; // Section address alignment
+  Elf_Xword sh_entsize;   // Size of records contained within the section
+};
+
+template <class ELFT>
+struct Elf_Shdr_Impl : Elf_Shdr_Base<ELFT> {
+  using Elf_Shdr_Base<ELFT>::sh_entsize;
+  using Elf_Shdr_Base<ELFT>::sh_size;
+
+  /// @brief Get the number of entities this section contains if it has any.
+  unsigned getEntityCount() const {
+    if (sh_entsize == 0)
+      return 0;
+    return sh_size / sh_entsize;
+  }
+};
+
+template <class ELFT> struct Elf_Sym_Base;
+
+template <endianness TargetEndianness>
+struct Elf_Sym_Base<ELFType<TargetEndianness, false>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  Elf_Word st_name;       // Symbol name (index into string table)
+  Elf_Addr st_value;      // Value or address associated with the symbol
+  Elf_Word st_size;       // Size of the symbol
+  unsigned char st_info;  // Symbol's type and binding attributes
+  unsigned char st_other; // Must be zero; reserved
+  Elf_Half st_shndx;      // Which section (header table index) it's defined in
+};
+
+template <endianness TargetEndianness>
+struct Elf_Sym_Base<ELFType<TargetEndianness, true>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  Elf_Word st_name;       // Symbol name (index into string table)
+  unsigned char st_info;  // Symbol's type and binding attributes
+  unsigned char st_other; // Must be zero; reserved
+  Elf_Half st_shndx;      // Which section (header table index) it's defined in
+  Elf_Addr st_value;      // Value or address associated with the symbol
+  Elf_Xword st_size;      // Size of the symbol
+};
+
+template <class ELFT>
+struct Elf_Sym_Impl : Elf_Sym_Base<ELFT> {
+  using Elf_Sym_Base<ELFT>::st_info;
+  using Elf_Sym_Base<ELFT>::st_shndx;
+  using Elf_Sym_Base<ELFT>::st_other;
+  using Elf_Sym_Base<ELFT>::st_value;
+
+  // These accessors and mutators correspond to the ELF32_ST_BIND,
+  // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
+  unsigned char getBinding() const { return st_info >> 4; }
+  unsigned char getType() const { return st_info & 0x0f; }
+  uint64_t getValue() const { return st_value; }
+  void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+  void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+
+  void setBindingAndType(unsigned char b, unsigned char t) {
+    st_info = (b << 4) + (t & 0x0f);
+  }
+
+  /// Access to the STV_xxx flag stored in the first two bits of st_other.
+  /// STV_DEFAULT: 0
+  /// STV_INTERNAL: 1
+  /// STV_HIDDEN: 2
+  /// STV_PROTECTED: 3
+  unsigned char getVisibility() const { return st_other & 0x3; }
+  void setVisibility(unsigned char v) {
+    assert(v < 4 && "Invalid value for visibility");
+    st_other = (st_other & ~0x3) | v;
+  }
+
+  bool isAbsolute() const { return st_shndx == ELF::SHN_ABS; }
+
+  bool isCommon() const {
+    return getType() == ELF::STT_COMMON || st_shndx == ELF::SHN_COMMON;
+  }
+
+  bool isDefined() const { return !isUndefined(); }
+
+  bool isProcessorSpecific() const {
+    return st_shndx >= ELF::SHN_LOPROC && st_shndx <= ELF::SHN_HIPROC;
+  }
+
+  bool isOSSpecific() const {
+    return st_shndx >= ELF::SHN_LOOS && st_shndx <= ELF::SHN_HIOS;
+  }
+
+  bool isReserved() const {
+    // ELF::SHN_HIRESERVE is 0xffff so st_shndx <= ELF::SHN_HIRESERVE is always
+    // true and some compilers warn about it.
+    return st_shndx >= ELF::SHN_LORESERVE;
+  }
+
+  bool isUndefined() const { return st_shndx == ELF::SHN_UNDEF; }
+
+  bool isExternal() const {
+    return getBinding() != ELF::STB_LOCAL;
+  }
+
+  Expected<StringRef> getName(StringRef StrTab) const;
+};
+
+template <class ELFT>
+Expected<StringRef> Elf_Sym_Impl<ELFT>::getName(StringRef StrTab) const {
+  uint32_t Offset = this->st_name;
+  if (Offset >= StrTab.size())
+    return errorCodeToError(object_error::parse_failed);
+  return StringRef(StrTab.data() + Offset);
+}
+
+/// Elf_Versym: This is the structure of entries in the SHT_GNU_versym section
+/// (.gnu.version). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Versym_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Half vs_index; // Version index with flags (e.g. VERSYM_HIDDEN)
+};
+
+/// Elf_Verdef: This is the structure of entries in the SHT_GNU_verdef section
+/// (.gnu.version_d). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Verdef_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  using Elf_Verdaux = Elf_Verdaux_Impl<ELFT>;
+  Elf_Half vd_version; // Version of this structure (e.g. VER_DEF_CURRENT)
+  Elf_Half vd_flags;   // Bitwise flags (VER_DEF_*)
+  Elf_Half vd_ndx;     // Version index, used in .gnu.version entries
+  Elf_Half vd_cnt;     // Number of Verdaux entries
+  Elf_Word vd_hash;    // Hash of name
+  Elf_Word vd_aux;     // Offset to the first Verdaux entry (in bytes)
+  Elf_Word vd_next;    // Offset to the next Verdef entry (in bytes)
+
+  /// Get the first Verdaux entry for this Verdef.
+  const Elf_Verdaux *getAux() const {
+    return reinterpret_cast<const Elf_Verdaux *>((const char *)this + vd_aux);
+  }
+};
+
+/// Elf_Verdaux: This is the structure of auxiliary data in the SHT_GNU_verdef
+/// section (.gnu.version_d). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Verdaux_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Word vda_name; // Version name (offset in string table)
+  Elf_Word vda_next; // Offset to next Verdaux entry (in bytes)
+};
+
+/// Elf_Verneed: This is the structure of entries in the SHT_GNU_verneed
+/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Verneed_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Half vn_version; // Version of this structure (e.g. VER_NEED_CURRENT)
+  Elf_Half vn_cnt;     // Number of associated Vernaux entries
+  Elf_Word vn_file;    // Library name (string table offset)
+  Elf_Word vn_aux;     // Offset to first Vernaux entry (in bytes)
+  Elf_Word vn_next;    // Offset to next Verneed entry (in bytes)
+};
+
+/// Elf_Vernaux: This is the structure of auxiliary data in SHT_GNU_verneed
+/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Vernaux_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Word vna_hash;  // Hash of dependency name
+  Elf_Half vna_flags; // Bitwise Flags (VER_FLAG_*)
+  Elf_Half vna_other; // Version index, used in .gnu.version entries
+  Elf_Word vna_name;  // Dependency name
+  Elf_Word vna_next;  // Offset to next Vernaux entry (in bytes)
+};
+
+/// Elf_Dyn_Base: This structure matches the form of entries in the dynamic
+///               table section (.dynamic) look like.
+template <class ELFT> struct Elf_Dyn_Base;
+
+template <endianness TargetEndianness>
+struct Elf_Dyn_Base<ELFType<TargetEndianness, false>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  Elf_Sword d_tag;
+  union {
+    Elf_Word d_val;
+    Elf_Addr d_ptr;
+  } d_un;
+};
+
+template <endianness TargetEndianness>
+struct Elf_Dyn_Base<ELFType<TargetEndianness, true>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  Elf_Sxword d_tag;
+  union {
+    Elf_Xword d_val;
+    Elf_Addr d_ptr;
+  } d_un;
+};
+
+/// Elf_Dyn_Impl: This inherits from Elf_Dyn_Base, adding getters.
+template <class ELFT>
+struct Elf_Dyn_Impl : Elf_Dyn_Base<ELFT> {
+  using Elf_Dyn_Base<ELFT>::d_tag;
+  using Elf_Dyn_Base<ELFT>::d_un;
+  using intX_t = typename std::conditional<ELFT::Is64Bits,
+                                           int64_t, int32_t>::type;
+  using uintX_t = typename std::conditional<ELFT::Is64Bits,
+                                            uint64_t, uint32_t>::type;
+  intX_t getTag() const { return d_tag; }
+  uintX_t getVal() const { return d_un.d_val; }
+  uintX_t getPtr() const { return d_un.d_ptr; }
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  static const bool IsRela = false;
+  Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+  Elf_Word r_info;   // Symbol table index and type of relocation to apply
+
+  uint32_t getRInfo(bool isMips64EL) const {
+    assert(!isMips64EL);
+    return r_info;
+  }
+  void setRInfo(uint32_t R, bool IsMips64EL) {
+    assert(!IsMips64EL);
+    r_info = R;
+  }
+
+  // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+  // and ELF32_R_INFO macros defined in the ELF specification:
+  uint32_t getSymbol(bool isMips64EL) const {
+    return this->getRInfo(isMips64EL) >> 8;
+  }
+  unsigned char getType(bool isMips64EL) const {
+    return (unsigned char)(this->getRInfo(isMips64EL) & 0x0ff);
+  }
+  void setSymbol(uint32_t s, bool IsMips64EL) {
+    setSymbolAndType(s, getType(IsMips64EL), IsMips64EL);
+  }
+  void setType(unsigned char t, bool IsMips64EL) {
+    setSymbolAndType(getSymbol(IsMips64EL), t, IsMips64EL);
+  }
+  void setSymbolAndType(uint32_t s, unsigned char t, bool IsMips64EL) {
+    this->setRInfo((s << 8) + t, IsMips64EL);
+  }
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, true>
+    : public Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  static const bool IsRela = true;
+  Elf_Sword r_addend; // Compute value for relocatable field by adding this
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  static const bool IsRela = false;
+  Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+  Elf_Xword r_info;  // Symbol table index and type of relocation to apply
+
+  uint64_t getRInfo(bool isMips64EL) const {
+    uint64_t t = r_info;
+    if (!isMips64EL)
+      return t;
+    // Mips64 little endian has a "special" encoding of r_info. Instead of one
+    // 64 bit little endian number, it is a little endian 32 bit number followed
+    // by a 32 bit big endian number.
+    return (t << 32) | ((t >> 8) & 0xff000000) | ((t >> 24) & 0x00ff0000) |
+           ((t >> 40) & 0x0000ff00) | ((t >> 56) & 0x000000ff);
+  }
+
+  void setRInfo(uint64_t R, bool IsMips64EL) {
+    if (IsMips64EL)
+      r_info = (R >> 32) | ((R & 0xff000000) << 8) | ((R & 0x00ff0000) << 24) |
+               ((R & 0x0000ff00) << 40) | ((R & 0x000000ff) << 56);
+    else
+      r_info = R;
+  }
+
+  // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+  // and ELF64_R_INFO macros defined in the ELF specification:
+  uint32_t getSymbol(bool isMips64EL) const {
+    return (uint32_t)(this->getRInfo(isMips64EL) >> 32);
+  }
+  uint32_t getType(bool isMips64EL) const {
+    return (uint32_t)(this->getRInfo(isMips64EL) & 0xffffffffL);
+  }
+  void setSymbol(uint32_t s, bool IsMips64EL) {
+    setSymbolAndType(s, getType(IsMips64EL), IsMips64EL);
+  }
+  void setType(uint32_t t, bool IsMips64EL) {
+    setSymbolAndType(getSymbol(IsMips64EL), t, IsMips64EL);
+  }
+  void setSymbolAndType(uint32_t s, uint32_t t, bool IsMips64EL) {
+    this->setRInfo(((uint64_t)s << 32) + (t & 0xffffffffL), IsMips64EL);
+  }
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, true>
+    : public Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  static const bool IsRela = true;
+  Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
+};
+
+template <class ELFT>
+struct Elf_Ehdr_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
+  Elf_Half e_type;                       // Type of file (see ET_*)
+  Elf_Half e_machine;   // Required architecture for this file (see EM_*)
+  Elf_Word e_version;   // Must be equal to 1
+  Elf_Addr e_entry;     // Address to jump to in order to start program
+  Elf_Off e_phoff;      // Program header table's file offset, in bytes
+  Elf_Off e_shoff;      // Section header table's file offset, in bytes
+  Elf_Word e_flags;     // Processor-specific flags
+  Elf_Half e_ehsize;    // Size of ELF header, in bytes
+  Elf_Half e_phentsize; // Size of an entry in the program header table
+  Elf_Half e_phnum;     // Number of entries in the program header table
+  Elf_Half e_shentsize; // Size of an entry in the section header table
+  Elf_Half e_shnum;     // Number of entries in the section header table
+  Elf_Half e_shstrndx;  // Section header table index of section name
+                        // string table
+
+  bool checkMagic() const {
+    return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+  }
+
+  unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
+  unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
+};
+
+template <endianness TargetEndianness>
+struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  Elf_Word p_type;   // Type of segment
+  Elf_Off p_offset;  // FileOffset where segment is located, in bytes
+  Elf_Addr p_vaddr;  // Virtual Address of beginning of segment
+  Elf_Addr p_paddr;  // Physical address of beginning of segment (OS-specific)
+  Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+  Elf_Word p_memsz;  // Num. of bytes in mem image of segment (may be zero)
+  Elf_Word p_flags;  // Segment flags
+  Elf_Word p_align;  // Segment alignment constraint
+};
+
+template <endianness TargetEndianness>
+struct Elf_Phdr_Impl<ELFType<TargetEndianness, true>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  Elf_Word p_type;    // Type of segment
+  Elf_Word p_flags;   // Segment flags
+  Elf_Off p_offset;   // FileOffset where segment is located, in bytes
+  Elf_Addr p_vaddr;   // Virtual Address of beginning of segment
+  Elf_Addr p_paddr;   // Physical address of beginning of segment (OS-specific)
+  Elf_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+  Elf_Xword p_memsz;  // Num. of bytes in mem image of segment (may be zero)
+  Elf_Xword p_align;  // Segment alignment constraint
+};
+
+// ELFT needed for endianness.
+template <class ELFT>
+struct Elf_Hash_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Word nbucket;
+  Elf_Word nchain;
+
+  ArrayRef<Elf_Word> buckets() const {
+    return ArrayRef<Elf_Word>(&nbucket + 2, &nbucket + 2 + nbucket);
+  }
+
+  ArrayRef<Elf_Word> chains() const {
+    return ArrayRef<Elf_Word>(&nbucket + 2 + nbucket,
+                              &nbucket + 2 + nbucket + nchain);
+  }
+};
+
+// .gnu.hash section
+template <class ELFT>
+struct Elf_GnuHash_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Word nbuckets;
+  Elf_Word symndx;
+  Elf_Word maskwords;
+  Elf_Word shift2;
+
+  ArrayRef<Elf_Off> filter() const {
+    return ArrayRef<Elf_Off>(reinterpret_cast<const Elf_Off *>(&shift2 + 1),
+                             maskwords);
+  }
+
+  ArrayRef<Elf_Word> buckets() const {
+    return ArrayRef<Elf_Word>(
+        reinterpret_cast<const Elf_Word *>(filter().end()), nbuckets);
+  }
+
+  ArrayRef<Elf_Word> values(unsigned DynamicSymCount) const {
+    return ArrayRef<Elf_Word>(buckets().end(), DynamicSymCount - symndx);
+  }
+};
+
+// Compressed section headers.
+// http://www.sco.com/developers/gabi/latest/ch4.sheader.html#compression_header
+template <endianness TargetEndianness>
+struct Elf_Chdr_Impl<ELFType<TargetEndianness, false>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  Elf_Word ch_type;
+  Elf_Word ch_size;
+  Elf_Word ch_addralign;
+};
+
+template <endianness TargetEndianness>
+struct Elf_Chdr_Impl<ELFType<TargetEndianness, true>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  Elf_Word ch_type;
+  Elf_Word ch_reserved;
+  Elf_Xword ch_size;
+  Elf_Xword ch_addralign;
+};
+
+/// Note header
+template <class ELFT>
+struct Elf_Nhdr_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Word n_namesz;
+  Elf_Word n_descsz;
+  Elf_Word n_type;
+
+  /// The alignment of the name and descriptor.
+  ///
+  /// Implementations differ from the specification here: in practice all
+  /// variants align both the name and descriptor to 4-bytes.
+  static const unsigned int Align = 4;
+
+  /// Get the size of the note, including name, descriptor, and padding.
+  size_t getSize() const {
+    return sizeof(*this) + alignTo<Align>(n_namesz) + alignTo<Align>(n_descsz);
+  }
+};
+
+/// An ELF note.
+///
+/// Wraps a note header, providing methods for accessing the name and
+/// descriptor safely.
+template <class ELFT>
+class Elf_Note_Impl {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+  const Elf_Nhdr_Impl<ELFT> &Nhdr;
+
+  template <class NoteIteratorELFT> friend class Elf_Note_Iterator_Impl;
+
+  Elf_Note_Impl(const Elf_Nhdr_Impl<ELFT> &Nhdr) : Nhdr(Nhdr) {}
+
+public:
+  /// Get the note's name, excluding the terminating null byte.
+  StringRef getName() const {
+    if (!Nhdr.n_namesz)
+      return StringRef();
+    return StringRef(reinterpret_cast<const char *>(&Nhdr) + sizeof(Nhdr),
+                     Nhdr.n_namesz - 1);
+  }
+
+  /// Get the note's descriptor.
+  ArrayRef<Elf_Word> getDesc() const {
+    if (!Nhdr.n_descsz)
+      return ArrayRef<Elf_Word>();
+    return ArrayRef<Elf_Word>(
+        reinterpret_cast<const Elf_Word *>(
+            reinterpret_cast<const uint8_t *>(&Nhdr) + sizeof(Nhdr) +
+            alignTo<Elf_Nhdr_Impl<ELFT>::Align>(Nhdr.n_namesz)),
+        Nhdr.n_descsz);
+  }
+
+  /// Get the note's type.
+  Elf_Word getType() const { return Nhdr.n_type; }
+};
+
+template <class ELFT>
+class Elf_Note_Iterator_Impl
+    : std::iterator<std::forward_iterator_tag, Elf_Note_Impl<ELFT>> {
+  // Nhdr being a nullptr marks the end of iteration.
+  const Elf_Nhdr_Impl<ELFT> *Nhdr = nullptr;
+  size_t RemainingSize = 0u;
+  Error *Err = nullptr;
+
+  template <class ELFFileELFT> friend class ELFFile;
+
+  // Stop iteration and indicate an overflow.
+  void stopWithOverflowError() {
+    Nhdr = nullptr;
+    *Err = make_error<StringError>("ELF note overflows container",
+                                   object_error::parse_failed);
+  }
+
+  // Advance Nhdr by NoteSize bytes, starting from NhdrPos.
+  //
+  // Assumes NoteSize <= RemainingSize. Ensures Nhdr->getSize() <= RemainingSize
+  // upon returning. Handles stopping iteration when reaching the end of the
+  // container, either cleanly or with an overflow error.
+  void advanceNhdr(const uint8_t *NhdrPos, size_t NoteSize) {
+    RemainingSize -= NoteSize;
+    if (RemainingSize == 0u)
+      Nhdr = nullptr;
+    else if (sizeof(*Nhdr) > RemainingSize)
+      stopWithOverflowError();
+    else {
+      Nhdr = reinterpret_cast<const Elf_Nhdr_Impl<ELFT> *>(NhdrPos + NoteSize);
+      if (Nhdr->getSize() > RemainingSize)
+        stopWithOverflowError();
+    }
+  }
+
+  Elf_Note_Iterator_Impl() {}
+  explicit Elf_Note_Iterator_Impl(Error &Err) : Err(&Err) {}
+  Elf_Note_Iterator_Impl(const uint8_t *Start, size_t Size, Error &Err)
+      : RemainingSize(Size), Err(&Err) {
+    assert(Start && "ELF note iterator starting at NULL");
+    advanceNhdr(Start, 0u);
+  }
+
+public:
+  Elf_Note_Iterator_Impl &operator++() {
+    assert(Nhdr && "incremented ELF note end iterator");
+    const uint8_t *NhdrPos = reinterpret_cast<const uint8_t *>(Nhdr);
+    size_t NoteSize = Nhdr->getSize();
+    advanceNhdr(NhdrPos, NoteSize);
+    return *this;
+  }
+  bool operator==(Elf_Note_Iterator_Impl Other) const {
+    return Nhdr == Other.Nhdr;
+  }
+  bool operator!=(Elf_Note_Iterator_Impl Other) const {
+    return !(*this == Other);
+  }
+  Elf_Note_Impl<ELFT> operator*() const {
+    assert(Nhdr && "dereferenced ELF note end iterator");
+    return Elf_Note_Impl<ELFT>(*Nhdr);
+  }
+};
+
+// MIPS .reginfo section
+template <class ELFT>
+struct Elf_Mips_RegInfo;
+
+template <support::endianness TargetEndianness>
+struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+  Elf_Word ri_gprmask;     // bit-mask of used general registers
+  Elf_Word ri_cprmask[4];  // bit-mask of used co-processor registers
+  Elf_Addr ri_gp_value;    // gp register value
+};
+
+template <support::endianness TargetEndianness>
+struct Elf_Mips_RegInfo<ELFType<TargetEndianness, true>> {
+  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+  Elf_Word ri_gprmask;     // bit-mask of used general registers
+  Elf_Word ri_pad;         // unused padding field
+  Elf_Word ri_cprmask[4];  // bit-mask of used co-processor registers
+  Elf_Addr ri_gp_value;    // gp register value
+};
+
+// .MIPS.options section
+template <class ELFT> struct Elf_Mips_Options {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  uint8_t kind;     // Determines interpretation of variable part of descriptor
+  uint8_t size;     // Byte size of descriptor, including this header
+  Elf_Half section; // Section header index of section affected,
+                    // or 0 for global options
+  Elf_Word info;    // Kind-specific information
+
+  Elf_Mips_RegInfo<ELFT> &getRegInfo() {
+    assert(kind == ELF::ODK_REGINFO);
+    return *reinterpret_cast<Elf_Mips_RegInfo<ELFT> *>(
+        (uint8_t *)this + sizeof(Elf_Mips_Options));
+  }
+  const Elf_Mips_RegInfo<ELFT> &getRegInfo() const {
+    return const_cast<Elf_Mips_Options *>(this)->getRegInfo();
+  }
+};
+
+// .MIPS.abiflags section content
+template <class ELFT> struct Elf_Mips_ABIFlags {
+  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+  Elf_Half version;  // Version of the structure
+  uint8_t isa_level; // ISA level: 1-5, 32, and 64
+  uint8_t isa_rev;   // ISA revision (0 for MIPS I - MIPS V)
+  uint8_t gpr_size;  // General purpose registers size
+  uint8_t cpr1_size; // Co-processor 1 registers size
+  uint8_t cpr2_size; // Co-processor 2 registers size
+  uint8_t fp_abi;    // Floating-point ABI flag
+  Elf_Word isa_ext;  // Processor-specific extension
+  Elf_Word ases;     // ASEs flags
+  Elf_Word flags1;   // General flags
+  Elf_Word flags2;   // General flags
+};
+
+} // end namespace object.
+} // end namespace llvm.
+
+#endif // LLVM_OBJECT_ELFTYPES_H
diff --git a/linux-x64/clang/include/llvm/Object/Error.h b/linux-x64/clang/include/llvm/Object/Error.h
new file mode 100644
index 0000000..eb93833
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/Error.h
@@ -0,0 +1,92 @@
+//===- Error.h - system_error extensions for Object -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This declares a new error_category for the Object library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ERROR_H
+#define LLVM_OBJECT_ERROR_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Error.h"
+#include <system_error>
+
+namespace llvm {
+namespace object {
+
+class Binary;
+
+const std::error_category &object_category();
+
+enum class object_error {
+  // Error code 0 is absent. Use std::error_code() instead.
+  arch_not_found = 1,
+  invalid_file_type,
+  parse_failed,
+  unexpected_eof,
+  string_table_non_null_end,
+  invalid_section_index,
+  bitcode_section_not_found,
+  invalid_symbol_index,
+};
+
+inline std::error_code make_error_code(object_error e) {
+  return std::error_code(static_cast<int>(e), object_category());
+}
+
+/// Base class for all errors indicating malformed binary files.
+///
+/// Having a subclass for all malformed binary files allows archive-walking
+/// code to skip malformed files without having to understand every possible
+/// way that a binary file might be malformed.
+///
+/// Currently inherits from ECError for easy interoperability with
+/// std::error_code, but this will be removed in the future.
+class BinaryError : public ErrorInfo<BinaryError, ECError> {
+public:
+  static char ID;
+  BinaryError() {
+    // Default to parse_failed, can be overridden with setErrorCode.
+    setErrorCode(make_error_code(object_error::parse_failed));
+  }
+};
+
+/// Generic binary error.
+///
+/// For errors that don't require their own specific sub-error (most errors)
+/// this class can be used to describe the error via a string message.
+class GenericBinaryError : public ErrorInfo<GenericBinaryError, BinaryError> {
+public:
+  static char ID;
+  GenericBinaryError(Twine Msg);
+  GenericBinaryError(Twine Msg, object_error ECOverride);
+  const std::string &getMessage() const { return Msg; }
+  void log(raw_ostream &OS) const override;
+private:
+  std::string Msg;
+};
+
+/// isNotObjectErrorInvalidFileType() is used when looping through the children
+/// of an archive after calling getAsBinary() on the child and it returns an
+/// llvm::Error.  In the cases we want to loop through the children and ignore the
+/// non-objects in the archive this is used to test the error to see if an
+/// error() function needs to called on the llvm::Error.
+Error isNotObjectErrorInvalidFileType(llvm::Error Err);
+
+} // end namespace object.
+
+} // end namespace llvm.
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::object::object_error> : std::true_type {};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/IRObjectFile.h b/linux-x64/clang/include/llvm/Object/IRObjectFile.h
new file mode 100644
index 0000000..6c271b1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/IRObjectFile.h
@@ -0,0 +1,82 @@
+//===- IRObjectFile.h - LLVM IR object file implementation ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the IRObjectFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_IROBJECTFILE_H
+#define LLVM_OBJECT_IROBJECTFILE_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Object/IRSymtab.h"
+#include "llvm/Object/ModuleSymbolTable.h"
+#include "llvm/Object/SymbolicFile.h"
+
+namespace llvm {
+class BitcodeModule;
+class Mangler;
+class Module;
+class GlobalValue;
+class Triple;
+
+namespace object {
+class ObjectFile;
+
+class IRObjectFile : public SymbolicFile {
+  std::vector<std::unique_ptr<Module>> Mods;
+  ModuleSymbolTable SymTab;
+  IRObjectFile(MemoryBufferRef Object,
+               std::vector<std::unique_ptr<Module>> Mods);
+
+public:
+  ~IRObjectFile() override;
+  void moveSymbolNext(DataRefImpl &Symb) const override;
+  std::error_code printSymbolName(raw_ostream &OS,
+                                  DataRefImpl Symb) const override;
+  uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+  basic_symbol_iterator symbol_begin() const override;
+  basic_symbol_iterator symbol_end() const override;
+
+  StringRef getTargetTriple() const;
+
+  static bool classof(const Binary *v) {
+    return v->isIR();
+  }
+
+  /// \brief Finds and returns bitcode embedded in the given object file, or an
+  /// error code if not found.
+  static Expected<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);
+
+  /// \brief Finds and returns bitcode in the given memory buffer (which may
+  /// be either a bitcode file or a native object file with embedded bitcode),
+  /// or an error code if not found.
+  static Expected<MemoryBufferRef>
+  findBitcodeInMemBuffer(MemoryBufferRef Object);
+
+  static Expected<std::unique_ptr<IRObjectFile>> create(MemoryBufferRef Object,
+                                                        LLVMContext &Context);
+};
+
+/// The contents of a bitcode file and its irsymtab. Any underlying data
+/// for the irsymtab are owned by Symtab and Strtab.
+struct IRSymtabFile {
+  std::vector<BitcodeModule> Mods;
+  SmallVector<char, 0> Symtab, Strtab;
+  irsymtab::Reader TheReader;
+};
+
+/// Reads a bitcode file, creating its irsymtab if necessary.
+Expected<IRSymtabFile> readIRSymtab(MemoryBufferRef MBRef);
+
+}
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/IRSymtab.h b/linux-x64/clang/include/llvm/Object/IRSymtab.h
new file mode 100644
index 0000000..5f6a024
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/IRSymtab.h
@@ -0,0 +1,358 @@
+//===- IRSymtab.h - data definitions for IR symbol tables -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains data definitions and a reader and builder for a symbol
+// table for LLVM IR. Its purpose is to allow linkers and other consumers of
+// bitcode files to efficiently read the symbol table for symbol resolution
+// purposes without needing to construct a module in memory.
+//
+// As with most object files the symbol table has two parts: the symbol table
+// itself and a string table which is referenced by the symbol table.
+//
+// A symbol table corresponds to a single bitcode file, which may consist of
+// multiple modules, so symbol tables may likewise contain symbols for multiple
+// modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_IRSYMTAB_H
+#define LLVM_OBJECT_IRSYMTAB_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+struct BitcodeFileContents;
+class StringTableBuilder;
+
+namespace irsymtab {
+
+namespace storage {
+
+// The data structures in this namespace define the low-level serialization
+// format. Clients that just want to read a symbol table should use the
+// irsymtab::Reader class.
+
+using Word = support::ulittle32_t;
+
+/// A reference to a string in the string table.
+struct Str {
+  Word Offset, Size;
+
+  StringRef get(StringRef Strtab) const {
+    return {Strtab.data() + Offset, Size};
+  }
+};
+
+/// A reference to a range of objects in the symbol table.
+template <typename T> struct Range {
+  Word Offset, Size;
+
+  ArrayRef<T> get(StringRef Symtab) const {
+    return {reinterpret_cast<const T *>(Symtab.data() + Offset), Size};
+  }
+};
+
+/// Describes the range of a particular module's symbols within the symbol
+/// table.
+struct Module {
+  Word Begin, End;
+
+  /// The index of the first Uncommon for this Module.
+  Word UncBegin;
+};
+
+/// This is equivalent to an IR comdat.
+struct Comdat {
+  Str Name;
+};
+
+/// Contains the information needed by linkers for symbol resolution, as well as
+/// by the LTO implementation itself.
+struct Symbol {
+  /// The mangled symbol name.
+  Str Name;
+
+  /// The unmangled symbol name, or the empty string if this is not an IR
+  /// symbol.
+  Str IRName;
+
+  /// The index into Header::Comdats, or -1 if not a comdat member.
+  Word ComdatIndex;
+
+  Word Flags;
+  enum FlagBits {
+    FB_visibility, // 2 bits
+    FB_has_uncommon = FB_visibility + 2,
+    FB_undefined,
+    FB_weak,
+    FB_common,
+    FB_indirect,
+    FB_used,
+    FB_tls,
+    FB_may_omit,
+    FB_global,
+    FB_format_specific,
+    FB_unnamed_addr,
+    FB_executable,
+  };
+};
+
+/// This data structure contains rarely used symbol fields and is optionally
+/// referenced by a Symbol.
+struct Uncommon {
+  Word CommonSize, CommonAlign;
+
+  /// COFF-specific: the name of the symbol that a weak external resolves to
+  /// if not defined.
+  Str COFFWeakExternFallbackName;
+
+  /// Specified section name, if any.
+  Str SectionName;
+};
+
+struct Header {
+  /// Version number of the symtab format. This number should be incremented
+  /// when the format changes, but it does not need to be incremented if a
+  /// change to LLVM would cause it to create a different symbol table.
+  Word Version;
+  enum { kCurrentVersion = 1 };
+
+  /// The producer's version string (LLVM_VERSION_STRING " " LLVM_REVISION).
+  /// Consumers should rebuild the symbol table from IR if the producer's
+  /// version does not match the consumer's version due to potential differences
+  /// in symbol table format, symbol enumeration order and so on.
+  Str Producer;
+
+  Range<Module> Modules;
+  Range<Comdat> Comdats;
+  Range<Symbol> Symbols;
+  Range<Uncommon> Uncommons;
+
+  Str TargetTriple, SourceFileName;
+
+  /// COFF-specific: linker directives.
+  Str COFFLinkerOpts;
+};
+
+} // end namespace storage
+
+/// Fills in Symtab and StrtabBuilder with a valid symbol and string table for
+/// Mods.
+Error build(ArrayRef<Module *> Mods, SmallVector<char, 0> &Symtab,
+            StringTableBuilder &StrtabBuilder, BumpPtrAllocator &Alloc);
+
+/// This represents a symbol that has been read from a storage::Symbol and
+/// possibly a storage::Uncommon.
+struct Symbol {
+  // Copied from storage::Symbol.
+  StringRef Name, IRName;
+  int ComdatIndex;
+  uint32_t Flags;
+
+  // Copied from storage::Uncommon.
+  uint32_t CommonSize, CommonAlign;
+  StringRef COFFWeakExternFallbackName;
+  StringRef SectionName;
+
+  /// Returns the mangled symbol name.
+  StringRef getName() const { return Name; }
+
+  /// Returns the unmangled symbol name, or the empty string if this is not an
+  /// IR symbol.
+  StringRef getIRName() const { return IRName; }
+
+  /// Returns the index into the comdat table (see Reader::getComdatTable()), or
+  /// -1 if not a comdat member.
+  int getComdatIndex() const { return ComdatIndex; }
+
+  using S = storage::Symbol;
+
+  GlobalValue::VisibilityTypes getVisibility() const {
+    return GlobalValue::VisibilityTypes((Flags >> S::FB_visibility) & 3);
+  }
+
+  bool isUndefined() const { return (Flags >> S::FB_undefined) & 1; }
+  bool isWeak() const { return (Flags >> S::FB_weak) & 1; }
+  bool isCommon() const { return (Flags >> S::FB_common) & 1; }
+  bool isIndirect() const { return (Flags >> S::FB_indirect) & 1; }
+  bool isUsed() const { return (Flags >> S::FB_used) & 1; }
+  bool isTLS() const { return (Flags >> S::FB_tls) & 1; }
+
+  bool canBeOmittedFromSymbolTable() const {
+    return (Flags >> S::FB_may_omit) & 1;
+  }
+
+  bool isGlobal() const { return (Flags >> S::FB_global) & 1; }
+  bool isFormatSpecific() const { return (Flags >> S::FB_format_specific) & 1; }
+  bool isUnnamedAddr() const { return (Flags >> S::FB_unnamed_addr) & 1; }
+  bool isExecutable() const { return (Flags >> S::FB_executable) & 1; }
+
+  uint64_t getCommonSize() const {
+    assert(isCommon());
+    return CommonSize;
+  }
+
+  uint32_t getCommonAlignment() const {
+    assert(isCommon());
+    return CommonAlign;
+  }
+
+  /// COFF-specific: for weak externals, returns the name of the symbol that is
+  /// used as a fallback if the weak external remains undefined.
+  StringRef getCOFFWeakExternalFallback() const {
+    assert(isWeak() && isIndirect());
+    return COFFWeakExternFallbackName;
+  }
+
+  StringRef getSectionName() const { return SectionName; }
+};
+
+/// This class can be used to read a Symtab and Strtab produced by
+/// irsymtab::build.
+class Reader {
+  StringRef Symtab, Strtab;
+
+  ArrayRef<storage::Module> Modules;
+  ArrayRef<storage::Comdat> Comdats;
+  ArrayRef<storage::Symbol> Symbols;
+  ArrayRef<storage::Uncommon> Uncommons;
+
+  StringRef str(storage::Str S) const { return S.get(Strtab); }
+
+  template <typename T> ArrayRef<T> range(storage::Range<T> R) const {
+    return R.get(Symtab);
+  }
+
+  const storage::Header &header() const {
+    return *reinterpret_cast<const storage::Header *>(Symtab.data());
+  }
+
+public:
+  class SymbolRef;
+
+  Reader() = default;
+  Reader(StringRef Symtab, StringRef Strtab) : Symtab(Symtab), Strtab(Strtab) {
+    Modules = range(header().Modules);
+    Comdats = range(header().Comdats);
+    Symbols = range(header().Symbols);
+    Uncommons = range(header().Uncommons);
+  }
+
+  using symbol_range = iterator_range<object::content_iterator<SymbolRef>>;
+
+  /// Returns the symbol table for the entire bitcode file.
+  /// The symbols enumerated by this method are ephemeral, but they can be
+  /// copied into an irsymtab::Symbol object.
+  symbol_range symbols() const;
+
+  size_t getNumModules() const { return Modules.size(); }
+
+  /// Returns a slice of the symbol table for the I'th module in the file.
+  /// The symbols enumerated by this method are ephemeral, but they can be
+  /// copied into an irsymtab::Symbol object.
+  symbol_range module_symbols(unsigned I) const;
+
+  StringRef getTargetTriple() const { return str(header().TargetTriple); }
+
+  /// Returns the source file path specified at compile time.
+  StringRef getSourceFileName() const { return str(header().SourceFileName); }
+
+  /// Returns a table with all the comdats used by this file.
+  std::vector<StringRef> getComdatTable() const {
+    std::vector<StringRef> ComdatTable;
+    ComdatTable.reserve(Comdats.size());
+    for (auto C : Comdats)
+      ComdatTable.push_back(str(C.Name));
+    return ComdatTable;
+  }
+
+  /// COFF-specific: returns linker options specified in the input file.
+  StringRef getCOFFLinkerOpts() const { return str(header().COFFLinkerOpts); }
+};
+
+/// Ephemeral symbols produced by Reader::symbols() and
+/// Reader::module_symbols().
+class Reader::SymbolRef : public Symbol {
+  const storage::Symbol *SymI, *SymE;
+  const storage::Uncommon *UncI;
+  const Reader *R;
+
+  void read() {
+    if (SymI == SymE)
+      return;
+
+    Name = R->str(SymI->Name);
+    IRName = R->str(SymI->IRName);
+    ComdatIndex = SymI->ComdatIndex;
+    Flags = SymI->Flags;
+
+    if (Flags & (1 << storage::Symbol::FB_has_uncommon)) {
+      CommonSize = UncI->CommonSize;
+      CommonAlign = UncI->CommonAlign;
+      COFFWeakExternFallbackName = R->str(UncI->COFFWeakExternFallbackName);
+      SectionName = R->str(UncI->SectionName);
+    } else
+      // Reset this field so it can be queried unconditionally for all symbols.
+      SectionName = "";
+  }
+
+public:
+  SymbolRef(const storage::Symbol *SymI, const storage::Symbol *SymE,
+            const storage::Uncommon *UncI, const Reader *R)
+      : SymI(SymI), SymE(SymE), UncI(UncI), R(R) {
+    read();
+  }
+
+  void moveNext() {
+    ++SymI;
+    if (Flags & (1 << storage::Symbol::FB_has_uncommon))
+      ++UncI;
+    read();
+  }
+
+  bool operator==(const SymbolRef &Other) const { return SymI == Other.SymI; }
+};
+
+inline Reader::symbol_range Reader::symbols() const {
+  return {SymbolRef(Symbols.begin(), Symbols.end(), Uncommons.begin(), this),
+          SymbolRef(Symbols.end(), Symbols.end(), nullptr, this)};
+}
+
+inline Reader::symbol_range Reader::module_symbols(unsigned I) const {
+  const storage::Module &M = Modules[I];
+  const storage::Symbol *MBegin = Symbols.begin() + M.Begin,
+                        *MEnd = Symbols.begin() + M.End;
+  return {SymbolRef(MBegin, MEnd, Uncommons.begin() + M.UncBegin, this),
+          SymbolRef(MEnd, MEnd, nullptr, this)};
+}
+
+/// The contents of the irsymtab in a bitcode file. Any underlying data for the
+/// irsymtab are owned by Symtab and Strtab.
+struct FileContents {
+  SmallVector<char, 0> Symtab, Strtab;
+  Reader TheReader;
+};
+
+/// Reads the contents of a bitcode file, creating its irsymtab if necessary.
+Expected<FileContents> readBitcode(const BitcodeFileContents &BFC);
+
+} // end namespace irsymtab
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_IRSYMTAB_H
diff --git a/linux-x64/clang/include/llvm/Object/MachO.h b/linux-x64/clang/include/llvm/Object/MachO.h
new file mode 100644
index 0000000..bfd3462
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/MachO.h
@@ -0,0 +1,737 @@
+//===- MachO.h - MachO object file implementation ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MachOObjectFile class, which implement the ObjectFile
+// interface for MachO files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MACHO_H
+#define LLVM_OBJECT_MACHO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+namespace object {
+
+/// DiceRef - This is a value type class that represents a single
+/// data in code entry in the table in a Mach-O object file.
+class DiceRef {
+  DataRefImpl DicePimpl;
+  const ObjectFile *OwningObject = nullptr;
+
+public:
+  DiceRef() = default;
+  DiceRef(DataRefImpl DiceP, const ObjectFile *Owner);
+
+  bool operator==(const DiceRef &Other) const;
+  bool operator<(const DiceRef &Other) const;
+
+  void moveNext();
+
+  std::error_code getOffset(uint32_t &Result) const;
+  std::error_code getLength(uint16_t &Result) const;
+  std::error_code getKind(uint16_t &Result) const;
+
+  DataRefImpl getRawDataRefImpl() const;
+  const ObjectFile *getObjectFile() const;
+};
+using dice_iterator = content_iterator<DiceRef>;
+
+/// ExportEntry encapsulates the current-state-of-the-walk used when doing a
+/// non-recursive walk of the trie data structure.  This allows you to iterate
+/// across all exported symbols using:
+///      Error Err;
+///      for (const llvm::object::ExportEntry &AnExport : Obj->exports(&Err)) {
+///      }
+///      if (Err) { report error ...
+class ExportEntry {
+public:
+  ExportEntry(Error *Err, const MachOObjectFile *O, ArrayRef<uint8_t> Trie);
+
+  StringRef name() const;
+  uint64_t flags() const;
+  uint64_t address() const;
+  uint64_t other() const;
+  StringRef otherName() const;
+  uint32_t nodeOffset() const;
+
+  bool operator==(const ExportEntry &) const;
+
+  void moveNext();
+
+private:
+  friend class MachOObjectFile;
+
+  void moveToFirst();
+  void moveToEnd();
+  uint64_t readULEB128(const uint8_t *&p, const char **error);
+  void pushDownUntilBottom();
+  void pushNode(uint64_t Offset);
+
+  // Represents a node in the mach-o exports trie.
+  struct NodeState {
+    NodeState(const uint8_t *Ptr);
+
+    const uint8_t *Start;
+    const uint8_t *Current;
+    uint64_t Flags = 0;
+    uint64_t Address = 0;
+    uint64_t Other = 0;
+    const char *ImportName = nullptr;
+    unsigned ChildCount = 0;
+    unsigned NextChildIndex = 0;
+    unsigned ParentStringLength = 0;
+    bool IsExportNode = false;
+  };
+  using NodeList = SmallVector<NodeState, 16>;
+  using node_iterator = NodeList::const_iterator;
+
+  Error *E;
+  const MachOObjectFile *O;
+  ArrayRef<uint8_t> Trie;
+  SmallString<256> CumulativeString;
+  NodeList Stack;
+  bool Done = false;
+
+  iterator_range<node_iterator> nodes() const {
+    return make_range(Stack.begin(), Stack.end());
+  }
+};
+using export_iterator = content_iterator<ExportEntry>;
+
+// Segment info so SegIndex/SegOffset pairs in a Mach-O Bind or Rebase entry
+// can be checked and translated.  Only the SegIndex/SegOffset pairs from
+// checked entries are to be used with the segmentName(), sectionName() and
+// address() methods below.
+class BindRebaseSegInfo {
+public:
+  BindRebaseSegInfo(const MachOObjectFile *Obj);
+
+  // Used to check a Mach-O Bind or Rebase entry for errors when iterating.
+  const char *checkSegAndOffset(int32_t SegIndex, uint64_t SegOffset,
+                                bool endInvalid);
+  const char *checkCountAndSkip(uint32_t Count, uint32_t Skip,
+                                uint8_t PointerSize, int32_t SegIndex,
+                                uint64_t SegOffset);
+  // Used with valid SegIndex/SegOffset values from checked entries.
+  StringRef segmentName(int32_t SegIndex);
+  StringRef sectionName(int32_t SegIndex, uint64_t SegOffset);
+  uint64_t address(uint32_t SegIndex, uint64_t SegOffset);
+
+private:
+  struct SectionInfo {
+    uint64_t Address;
+    uint64_t Size;
+    StringRef SectionName;
+    StringRef SegmentName;
+    uint64_t OffsetInSegment;
+    uint64_t SegmentStartAddress;
+    int32_t SegmentIndex;
+  };
+  const SectionInfo &findSection(int32_t SegIndex, uint64_t SegOffset);
+
+  SmallVector<SectionInfo, 32> Sections;
+  int32_t MaxSegIndex;
+};
+
+/// MachORebaseEntry encapsulates the current state in the decompression of
+/// rebasing opcodes. This allows you to iterate through the compressed table of
+/// rebasing using:
+///    Error Err;
+///    for (const llvm::object::MachORebaseEntry &Entry : Obj->rebaseTable(&Err)) {
+///    }
+///    if (Err) { report error ...
+class MachORebaseEntry {
+public:
+  MachORebaseEntry(Error *Err, const MachOObjectFile *O,
+                   ArrayRef<uint8_t> opcodes, bool is64Bit);
+
+  int32_t segmentIndex() const;
+  uint64_t segmentOffset() const;
+  StringRef typeName() const;
+  StringRef segmentName() const;
+  StringRef sectionName() const;
+  uint64_t address() const;
+
+  bool operator==(const MachORebaseEntry &) const;
+
+  void moveNext();
+
+private:
+  friend class MachOObjectFile;
+
+  void moveToFirst();
+  void moveToEnd();
+  uint64_t readULEB128(const char **error);
+
+  Error *E;
+  const MachOObjectFile *O;
+  ArrayRef<uint8_t> Opcodes;
+  const uint8_t *Ptr;
+  uint64_t SegmentOffset = 0;
+  int32_t SegmentIndex = -1;
+  uint64_t RemainingLoopCount = 0;
+  uint64_t AdvanceAmount = 0;
+  uint8_t  RebaseType = 0;
+  uint8_t  PointerSize;
+  bool     Done = false;
+};
+using rebase_iterator = content_iterator<MachORebaseEntry>;
+
+/// MachOBindEntry encapsulates the current state in the decompression of
+/// binding opcodes. This allows you to iterate through the compressed table of
+/// bindings using:
+///    Error Err;
+///    for (const llvm::object::MachOBindEntry &Entry : Obj->bindTable(&Err)) {
+///    }
+///    if (Err) { report error ...
+class MachOBindEntry {
+public:
+  enum class Kind { Regular, Lazy, Weak };
+
+  MachOBindEntry(Error *Err, const MachOObjectFile *O,
+                 ArrayRef<uint8_t> Opcodes, bool is64Bit, MachOBindEntry::Kind);
+
+  int32_t segmentIndex() const;
+  uint64_t segmentOffset() const;
+  StringRef typeName() const;
+  StringRef symbolName() const;
+  uint32_t flags() const;
+  int64_t addend() const;
+  int ordinal() const;
+
+  StringRef segmentName() const;
+  StringRef sectionName() const;
+  uint64_t address() const;
+
+  bool operator==(const MachOBindEntry &) const;
+
+  void moveNext();
+
+private:
+  friend class MachOObjectFile;
+
+  void moveToFirst();
+  void moveToEnd();
+  uint64_t readULEB128(const char **error);
+  int64_t readSLEB128(const char **error);
+
+  Error *E;
+  const MachOObjectFile *O;
+  ArrayRef<uint8_t> Opcodes;
+  const uint8_t *Ptr;
+  uint64_t SegmentOffset = 0;
+  int32_t  SegmentIndex = -1;
+  StringRef SymbolName;
+  bool     LibraryOrdinalSet = false;
+  int      Ordinal = 0;
+  uint32_t Flags = 0;
+  int64_t  Addend = 0;
+  uint64_t RemainingLoopCount = 0;
+  uint64_t AdvanceAmount = 0;
+  uint8_t  BindType = 0;
+  uint8_t  PointerSize;
+  Kind     TableKind;
+  bool     Done = false;
+};
+using bind_iterator = content_iterator<MachOBindEntry>;
+
+class MachOObjectFile : public ObjectFile {
+public:
+  struct LoadCommandInfo {
+    const char *Ptr;      // Where in memory the load command is.
+    MachO::load_command C; // The command itself.
+  };
+  using LoadCommandList = SmallVector<LoadCommandInfo, 4>;
+  using load_command_iterator = LoadCommandList::const_iterator;
+
+  static Expected<std::unique_ptr<MachOObjectFile>>
+  create(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
+         uint32_t UniversalCputype = 0, uint32_t UniversalIndex = 0);
+
+  void moveSymbolNext(DataRefImpl &Symb) const override;
+
+  uint64_t getNValue(DataRefImpl Sym) const;
+  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
+
+  // MachO specific.
+  Error checkSymbolTable() const;
+
+  std::error_code getIndirectName(DataRefImpl Symb, StringRef &Res) const;
+  unsigned getSectionType(SectionRef Sec) const;
+
+  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
+  uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+  unsigned getSymbolSectionID(SymbolRef Symb) const;
+  unsigned getSectionID(SectionRef Sec) const;
+
+  void moveSectionNext(DataRefImpl &Sec) const override;
+  std::error_code getSectionName(DataRefImpl Sec,
+                                 StringRef &Res) const override;
+  uint64_t getSectionAddress(DataRefImpl Sec) const override;
+  uint64_t getSectionIndex(DataRefImpl Sec) const override;
+  uint64_t getSectionSize(DataRefImpl Sec) const override;
+  std::error_code getSectionContents(DataRefImpl Sec,
+                                     StringRef &Res) const override;
+  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+  bool isSectionCompressed(DataRefImpl Sec) const override;
+  bool isSectionText(DataRefImpl Sec) const override;
+  bool isSectionData(DataRefImpl Sec) const override;
+  bool isSectionBSS(DataRefImpl Sec) const override;
+  bool isSectionVirtual(DataRefImpl Sec) const override;
+  bool isSectionBitcode(DataRefImpl Sec) const override;
+
+  /// When dsymutil generates the companion file, it strips all unnecessary
+  /// sections (e.g. everything in the _TEXT segment) by omitting their body
+  /// and setting the offset in their corresponding load command to zero.
+  ///
+  /// While the load command itself is valid, reading the section corresponds
+  /// to reading the number of bytes specified in the load command, starting
+  /// from offset 0 (i.e. the Mach-O header at the beginning of the file).
+  bool isSectionStripped(DataRefImpl Sec) const override;
+
+  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+
+  relocation_iterator extrel_begin() const;
+  relocation_iterator extrel_end() const;
+  iterator_range<relocation_iterator> external_relocations() const {
+    return make_range(extrel_begin(), extrel_end());
+  }
+
+  relocation_iterator locrel_begin() const;
+  relocation_iterator locrel_end() const;
+  
+  void moveRelocationNext(DataRefImpl &Rel) const override;
+  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+  section_iterator getRelocationSection(DataRefImpl Rel) const;
+  uint64_t getRelocationType(DataRefImpl Rel) const override;
+  void getRelocationTypeName(DataRefImpl Rel,
+                             SmallVectorImpl<char> &Result) const override;
+  uint8_t getRelocationLength(DataRefImpl Rel) const;
+
+  // MachO specific.
+  std::error_code getLibraryShortNameByIndex(unsigned Index, StringRef &) const;
+  uint32_t getLibraryCount() const;
+
+  section_iterator getRelocationRelocatedSection(relocation_iterator Rel) const;
+
+  // TODO: Would be useful to have an iterator based version
+  // of the load command interface too.
+
+  basic_symbol_iterator symbol_begin() const override;
+  basic_symbol_iterator symbol_end() const override;
+
+  // MachO specific.
+  basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
+  uint64_t getSymbolIndex(DataRefImpl Symb) const;
+
+  section_iterator section_begin() const override;
+  section_iterator section_end() const override;
+
+  uint8_t getBytesInAddress() const override;
+
+  StringRef getFileFormatName() const override;
+  Triple::ArchType getArch() const override;
+  SubtargetFeatures getFeatures() const override { return SubtargetFeatures(); }
+  Triple getArchTriple(const char **McpuDefault = nullptr) const;
+
+  relocation_iterator section_rel_begin(unsigned Index) const;
+  relocation_iterator section_rel_end(unsigned Index) const;
+
+  dice_iterator begin_dices() const;
+  dice_iterator end_dices() const;
+
+  load_command_iterator begin_load_commands() const;
+  load_command_iterator end_load_commands() const;
+  iterator_range<load_command_iterator> load_commands() const;
+
+  /// For use iterating over all exported symbols.
+  iterator_range<export_iterator> exports(Error &Err) const;
+
+  /// For use examining a trie not in a MachOObjectFile.
+  static iterator_range<export_iterator> exports(Error &Err,
+                                                 ArrayRef<uint8_t> Trie,
+                                                 const MachOObjectFile *O =
+                                                                      nullptr);
+
+  /// For use iterating over all rebase table entries.
+  iterator_range<rebase_iterator> rebaseTable(Error &Err);
+
+  /// For use examining rebase opcodes in a MachOObjectFile.
+  static iterator_range<rebase_iterator> rebaseTable(Error &Err,
+                                                     MachOObjectFile *O,
+                                                     ArrayRef<uint8_t> Opcodes,
+                                                     bool is64);
+
+  /// For use iterating over all bind table entries.
+  iterator_range<bind_iterator> bindTable(Error &Err);
+
+  /// For use iterating over all lazy bind table entries.
+  iterator_range<bind_iterator> lazyBindTable(Error &Err);
+
+  /// For use iterating over all weak bind table entries.
+  iterator_range<bind_iterator> weakBindTable(Error &Err);
+
+  /// For use examining bind opcodes in a MachOObjectFile.
+  static iterator_range<bind_iterator> bindTable(Error &Err,
+                                                 MachOObjectFile *O,
+                                                 ArrayRef<uint8_t> Opcodes,
+                                                 bool is64,
+                                                 MachOBindEntry::Kind);
+
+  /// For use with a SegIndex,SegOffset pair in MachOBindEntry::moveNext() to
+  /// validate a MachOBindEntry.
+  const char *BindEntryCheckSegAndOffset(int32_t SegIndex, uint64_t SegOffset,
+                                         bool endInvalid) const {
+    return BindRebaseSectionTable->checkSegAndOffset(SegIndex, SegOffset,
+                                                     endInvalid);
+  }
+  /// For use in MachOBindEntry::moveNext() to validate a MachOBindEntry for
+  /// the BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB opcode.
+  const char *BindEntryCheckCountAndSkip(uint32_t Count, uint32_t Skip,
+                                         uint8_t PointerSize, int32_t SegIndex,
+                                         uint64_t SegOffset) const {
+    return BindRebaseSectionTable->checkCountAndSkip(Count, Skip, PointerSize,
+                                                     SegIndex, SegOffset);
+  }
+
+  /// For use with a SegIndex,SegOffset pair in MachORebaseEntry::moveNext() to
+  /// validate a MachORebaseEntry.
+  const char *RebaseEntryCheckSegAndOffset(int32_t SegIndex, uint64_t SegOffset,
+                                           bool endInvalid) const {
+    return BindRebaseSectionTable->checkSegAndOffset(SegIndex, SegOffset,
+                                                     endInvalid);
+  }
+  /// For use in MachORebaseEntry::moveNext() to validate a MachORebaseEntry for
+  /// the REBASE_OPCODE_DO_*_TIMES* opcodes.
+  const char *RebaseEntryCheckCountAndSkip(uint32_t Count, uint32_t Skip,
+                                         uint8_t PointerSize, int32_t SegIndex,
+                                         uint64_t SegOffset) const {
+    return BindRebaseSectionTable->checkCountAndSkip(Count, Skip, PointerSize,
+                                                     SegIndex, SegOffset);
+  }
+
+  /// For use with the SegIndex of a checked Mach-O Bind or Rebase entry to
+  /// get the segment name.
+  StringRef BindRebaseSegmentName(int32_t SegIndex) const {
+    return BindRebaseSectionTable->segmentName(SegIndex);
+  }
+
+  /// For use with a SegIndex,SegOffset pair from a checked Mach-O Bind or
+  /// Rebase entry to get the section name.
+  StringRef BindRebaseSectionName(uint32_t SegIndex, uint64_t SegOffset) const {
+    return BindRebaseSectionTable->sectionName(SegIndex, SegOffset);
+  }
+
+  /// For use with a SegIndex,SegOffset pair from a checked Mach-O Bind or
+  /// Rebase entry to get the address.
+  uint64_t BindRebaseAddress(uint32_t SegIndex, uint64_t SegOffset) const {
+    return BindRebaseSectionTable->address(SegIndex, SegOffset);
+  }
+
+  // In a MachO file, sections have a segment name. This is used in the .o
+  // files. They have a single segment, but this field specifies which segment
+  // a section should be put in the final object.
+  StringRef getSectionFinalSegmentName(DataRefImpl Sec) const;
+
+  // Names are stored as 16 bytes. These returns the raw 16 bytes without
+  // interpreting them as a C string.
+  ArrayRef<char> getSectionRawName(DataRefImpl Sec) const;
+  ArrayRef<char> getSectionRawFinalSegmentName(DataRefImpl Sec) const;
+
+  // MachO specific Info about relocations.
+  bool isRelocationScattered(const MachO::any_relocation_info &RE) const;
+  unsigned getPlainRelocationSymbolNum(
+                                    const MachO::any_relocation_info &RE) const;
+  bool getPlainRelocationExternal(const MachO::any_relocation_info &RE) const;
+  bool getScatteredRelocationScattered(
+                                    const MachO::any_relocation_info &RE) const;
+  uint32_t getScatteredRelocationValue(
+                                    const MachO::any_relocation_info &RE) const;
+  uint32_t getScatteredRelocationType(
+                                    const MachO::any_relocation_info &RE) const;
+  unsigned getAnyRelocationAddress(const MachO::any_relocation_info &RE) const;
+  unsigned getAnyRelocationPCRel(const MachO::any_relocation_info &RE) const;
+  unsigned getAnyRelocationLength(const MachO::any_relocation_info &RE) const;
+  unsigned getAnyRelocationType(const MachO::any_relocation_info &RE) const;
+  SectionRef getAnyRelocationSection(const MachO::any_relocation_info &RE) const;
+
+  // MachO specific structures.
+  MachO::section getSection(DataRefImpl DRI) const;
+  MachO::section_64 getSection64(DataRefImpl DRI) const;
+  MachO::section getSection(const LoadCommandInfo &L, unsigned Index) const;
+  MachO::section_64 getSection64(const LoadCommandInfo &L,unsigned Index) const;
+  MachO::nlist getSymbolTableEntry(DataRefImpl DRI) const;
+  MachO::nlist_64 getSymbol64TableEntry(DataRefImpl DRI) const;
+
+  MachO::linkedit_data_command
+  getLinkeditDataLoadCommand(const LoadCommandInfo &L) const;
+  MachO::segment_command
+  getSegmentLoadCommand(const LoadCommandInfo &L) const;
+  MachO::segment_command_64
+  getSegment64LoadCommand(const LoadCommandInfo &L) const;
+  MachO::linker_option_command
+  getLinkerOptionLoadCommand(const LoadCommandInfo &L) const;
+  MachO::version_min_command
+  getVersionMinLoadCommand(const LoadCommandInfo &L) const;
+  MachO::note_command
+  getNoteLoadCommand(const LoadCommandInfo &L) const;
+  MachO::build_version_command
+  getBuildVersionLoadCommand(const LoadCommandInfo &L) const;
+  MachO::build_tool_version
+  getBuildToolVersion(unsigned index) const;
+  MachO::dylib_command
+  getDylibIDLoadCommand(const LoadCommandInfo &L) const;
+  MachO::dyld_info_command
+  getDyldInfoLoadCommand(const LoadCommandInfo &L) const;
+  MachO::dylinker_command
+  getDylinkerCommand(const LoadCommandInfo &L) const;
+  MachO::uuid_command
+  getUuidCommand(const LoadCommandInfo &L) const;
+  MachO::rpath_command
+  getRpathCommand(const LoadCommandInfo &L) const;
+  MachO::source_version_command
+  getSourceVersionCommand(const LoadCommandInfo &L) const;
+  MachO::entry_point_command
+  getEntryPointCommand(const LoadCommandInfo &L) const;
+  MachO::encryption_info_command
+  getEncryptionInfoCommand(const LoadCommandInfo &L) const;
+  MachO::encryption_info_command_64
+  getEncryptionInfoCommand64(const LoadCommandInfo &L) const;
+  MachO::sub_framework_command
+  getSubFrameworkCommand(const LoadCommandInfo &L) const;
+  MachO::sub_umbrella_command
+  getSubUmbrellaCommand(const LoadCommandInfo &L) const;
+  MachO::sub_library_command
+  getSubLibraryCommand(const LoadCommandInfo &L) const;
+  MachO::sub_client_command
+  getSubClientCommand(const LoadCommandInfo &L) const;
+  MachO::routines_command
+  getRoutinesCommand(const LoadCommandInfo &L) const;
+  MachO::routines_command_64
+  getRoutinesCommand64(const LoadCommandInfo &L) const;
+  MachO::thread_command
+  getThreadCommand(const LoadCommandInfo &L) const;
+
+  MachO::any_relocation_info getRelocation(DataRefImpl Rel) const;
+  MachO::data_in_code_entry getDice(DataRefImpl Rel) const;
+  const MachO::mach_header &getHeader() const;
+  const MachO::mach_header_64 &getHeader64() const;
+  uint32_t
+  getIndirectSymbolTableEntry(const MachO::dysymtab_command &DLC,
+                              unsigned Index) const;
+  MachO::data_in_code_entry getDataInCodeTableEntry(uint32_t DataOffset,
+                                                    unsigned Index) const;
+  MachO::symtab_command getSymtabLoadCommand() const;
+  MachO::dysymtab_command getDysymtabLoadCommand() const;
+  MachO::linkedit_data_command getDataInCodeLoadCommand() const;
+  MachO::linkedit_data_command getLinkOptHintsLoadCommand() const;
+  ArrayRef<uint8_t> getDyldInfoRebaseOpcodes() const;
+  ArrayRef<uint8_t> getDyldInfoBindOpcodes() const;
+  ArrayRef<uint8_t> getDyldInfoWeakBindOpcodes() const;
+  ArrayRef<uint8_t> getDyldInfoLazyBindOpcodes() const;
+  ArrayRef<uint8_t> getDyldInfoExportsTrie() const;
+  ArrayRef<uint8_t> getUuid() const;
+
+  StringRef getStringTableData() const;
+  bool is64Bit() const;
+  void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const;
+
+  static StringRef guessLibraryShortName(StringRef Name, bool &isFramework,
+                                         StringRef &Suffix);
+
+  static Triple::ArchType getArch(uint32_t CPUType);
+  static Triple getArchTriple(uint32_t CPUType, uint32_t CPUSubType,
+                              const char **McpuDefault = nullptr,
+                              const char **ArchFlag = nullptr);
+  static bool isValidArch(StringRef ArchFlag);
+  static Triple getHostArch();
+
+  bool isRelocatableObject() const override;
+
+  StringRef mapDebugSectionName(StringRef Name) const override;
+
+  bool hasPageZeroSegment() const { return HasPageZeroSegment; }
+
+  static bool classof(const Binary *v) {
+    return v->isMachO();
+  }
+
+  static uint32_t
+  getVersionMinMajor(MachO::version_min_command &C, bool SDK) {
+    uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
+    return (VersionOrSDK >> 16) & 0xffff;
+  }
+
+  static uint32_t
+  getVersionMinMinor(MachO::version_min_command &C, bool SDK) {
+    uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
+    return (VersionOrSDK >> 8) & 0xff;
+  }
+
+  static uint32_t
+  getVersionMinUpdate(MachO::version_min_command &C, bool SDK) {
+    uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
+    return VersionOrSDK & 0xff;
+  }
+
+  static std::string getBuildPlatform(uint32_t platform) {
+    switch (platform) {
+    case MachO::PLATFORM_MACOS: return "macos";
+    case MachO::PLATFORM_IOS: return "ios";
+    case MachO::PLATFORM_TVOS: return "tvos";
+    case MachO::PLATFORM_WATCHOS: return "watchos";
+    case MachO::PLATFORM_BRIDGEOS: return "bridgeos";
+    default:
+      std::string ret;
+      raw_string_ostream ss(ret);
+      ss << format_hex(platform, 8, true);
+      return ss.str();
+    }
+  }
+
+  static std::string getBuildTool(uint32_t tools) {
+    switch (tools) {
+    case MachO::TOOL_CLANG: return "clang";
+    case MachO::TOOL_SWIFT: return "swift";
+    case MachO::TOOL_LD: return "ld";
+    default:
+      std::string ret;
+      raw_string_ostream ss(ret);
+      ss << format_hex(tools, 8, true);
+      return ss.str();
+    }
+  }
+
+  static std::string getVersionString(uint32_t version) {
+    uint32_t major = (version >> 16) & 0xffff;
+    uint32_t minor = (version >> 8) & 0xff;
+    uint32_t update = version & 0xff;
+
+    SmallString<32> Version;
+    Version = utostr(major) + "." + utostr(minor);
+    if (update != 0)
+      Version += "." + utostr(update);
+    return Version.str();
+  }
+
+private:
+  MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
+                  Error &Err, uint32_t UniversalCputype = 0,
+                  uint32_t UniversalIndex = 0);
+
+  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+
+  union {
+    MachO::mach_header_64 Header64;
+    MachO::mach_header Header;
+  };
+  using SectionList = SmallVector<const char*, 1>;
+  SectionList Sections;
+  using LibraryList = SmallVector<const char*, 1>;
+  LibraryList Libraries;
+  LoadCommandList LoadCommands;
+  using LibraryShortName = SmallVector<StringRef, 1>;
+  using BuildToolList = SmallVector<const char*, 1>;
+  BuildToolList BuildTools;
+  mutable LibraryShortName LibrariesShortNames;
+  std::unique_ptr<BindRebaseSegInfo> BindRebaseSectionTable;
+  const char *SymtabLoadCmd = nullptr;
+  const char *DysymtabLoadCmd = nullptr;
+  const char *DataInCodeLoadCmd = nullptr;
+  const char *LinkOptHintsLoadCmd = nullptr;
+  const char *DyldInfoLoadCmd = nullptr;
+  const char *UuidLoadCmd = nullptr;
+  bool HasPageZeroSegment = false;
+};
+
+/// DiceRef
+inline DiceRef::DiceRef(DataRefImpl DiceP, const ObjectFile *Owner)
+  : DicePimpl(DiceP) , OwningObject(Owner) {}
+
+inline bool DiceRef::operator==(const DiceRef &Other) const {
+  return DicePimpl == Other.DicePimpl;
+}
+
+inline bool DiceRef::operator<(const DiceRef &Other) const {
+  return DicePimpl < Other.DicePimpl;
+}
+
+inline void DiceRef::moveNext() {
+  const MachO::data_in_code_entry *P =
+    reinterpret_cast<const MachO::data_in_code_entry *>(DicePimpl.p);
+  DicePimpl.p = reinterpret_cast<uintptr_t>(P + 1);
+}
+
+// Since a Mach-O data in code reference, a DiceRef, can only be created when
+// the OwningObject ObjectFile is a MachOObjectFile a static_cast<> is used for
+// the methods that get the values of the fields of the reference.
+
+inline std::error_code DiceRef::getOffset(uint32_t &Result) const {
+  const MachOObjectFile *MachOOF =
+    static_cast<const MachOObjectFile *>(OwningObject);
+  MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
+  Result = Dice.offset;
+  return std::error_code();
+}
+
+inline std::error_code DiceRef::getLength(uint16_t &Result) const {
+  const MachOObjectFile *MachOOF =
+    static_cast<const MachOObjectFile *>(OwningObject);
+  MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
+  Result = Dice.length;
+  return std::error_code();
+}
+
+inline std::error_code DiceRef::getKind(uint16_t &Result) const {
+  const MachOObjectFile *MachOOF =
+    static_cast<const MachOObjectFile *>(OwningObject);
+  MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
+  Result = Dice.kind;
+  return std::error_code();
+}
+
+inline DataRefImpl DiceRef::getRawDataRefImpl() const {
+  return DicePimpl;
+}
+
+inline const ObjectFile *DiceRef::getObjectFile() const {
+  return OwningObject;
+}
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_MACHO_H
diff --git a/linux-x64/clang/include/llvm/Object/MachOUniversal.h b/linux-x64/clang/include/llvm/Object/MachOUniversal.h
new file mode 100644
index 0000000..72837d0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/MachOUniversal.h
@@ -0,0 +1,168 @@
+//===- MachOUniversal.h - Mach-O universal binaries -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares Mach-O fat/universal binaries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MACHOUNIVERSAL_H
+#define LLVM_OBJECT_MACHOUNIVERSAL_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/MachO.h"
+
+namespace llvm {
+class StringRef;
+
+namespace object {
+
+class MachOUniversalBinary : public Binary {
+  virtual void anchor();
+
+  uint32_t Magic;
+  uint32_t NumberOfObjects;
+public:
+  class ObjectForArch {
+    const MachOUniversalBinary *Parent;
+    /// \brief Index of object in the universal binary.
+    uint32_t Index;
+    /// \brief Descriptor of the object.
+    MachO::fat_arch Header;
+    MachO::fat_arch_64 Header64;
+
+  public:
+    ObjectForArch(const MachOUniversalBinary *Parent, uint32_t Index);
+
+    void clear() {
+      Parent = nullptr;
+      Index = 0;
+    }
+
+    bool operator==(const ObjectForArch &Other) const {
+      return (Parent == Other.Parent) && (Index == Other.Index);
+    }
+
+    ObjectForArch getNext() const { return ObjectForArch(Parent, Index + 1); }
+    uint32_t getCPUType() const {
+      if (Parent->getMagic() == MachO::FAT_MAGIC)
+        return Header.cputype;
+      else // Parent->getMagic() == MachO::FAT_MAGIC_64
+        return Header64.cputype;
+    }
+    uint32_t getCPUSubType() const {
+      if (Parent->getMagic() == MachO::FAT_MAGIC)
+        return Header.cpusubtype;
+      else // Parent->getMagic() == MachO::FAT_MAGIC_64
+        return Header64.cpusubtype;
+    }
+    uint32_t getOffset() const {
+      if (Parent->getMagic() == MachO::FAT_MAGIC)
+        return Header.offset;
+      else // Parent->getMagic() == MachO::FAT_MAGIC_64
+        return Header64.offset;
+    }
+    uint32_t getSize() const {
+      if (Parent->getMagic() == MachO::FAT_MAGIC)
+        return Header.size;
+      else // Parent->getMagic() == MachO::FAT_MAGIC_64
+        return Header64.size;
+    }
+    uint32_t getAlign() const {
+      if (Parent->getMagic() == MachO::FAT_MAGIC)
+        return Header.align;
+      else // Parent->getMagic() == MachO::FAT_MAGIC_64
+        return Header64.align;
+    }
+    uint32_t getReserved() const {
+      if (Parent->getMagic() == MachO::FAT_MAGIC)
+        return 0;
+      else // Parent->getMagic() == MachO::FAT_MAGIC_64
+        return Header64.reserved;
+    }
+    std::string getArchFlagName() const {
+      const char *McpuDefault, *ArchFlag;
+      if (Parent->getMagic() == MachO::FAT_MAGIC) {
+        Triple T =
+            MachOObjectFile::getArchTriple(Header.cputype, Header.cpusubtype,
+                                           &McpuDefault, &ArchFlag);
+      } else { // Parent->getMagic() == MachO::FAT_MAGIC_64
+        Triple T =
+            MachOObjectFile::getArchTriple(Header64.cputype,
+                                           Header64.cpusubtype,
+                                           &McpuDefault, &ArchFlag);
+      }
+      if (ArchFlag) {
+        std::string ArchFlagName(ArchFlag);
+        return ArchFlagName;
+      } else {
+        std::string ArchFlagName("");
+        return ArchFlagName;
+      }
+    }
+
+    Expected<std::unique_ptr<MachOObjectFile>> getAsObjectFile() const;
+
+    Expected<std::unique_ptr<Archive>> getAsArchive() const;
+  };
+
+  class object_iterator {
+    ObjectForArch Obj;
+  public:
+    object_iterator(const ObjectForArch &Obj) : Obj(Obj) {}
+    const ObjectForArch *operator->() const { return &Obj; }
+    const ObjectForArch &operator*() const { return Obj; }
+
+    bool operator==(const object_iterator &Other) const {
+      return Obj == Other.Obj;
+    }
+    bool operator!=(const object_iterator &Other) const {
+      return !(*this == Other);
+    }
+
+    object_iterator& operator++() {  // Preincrement
+      Obj = Obj.getNext();
+      return *this;
+    }
+  };
+
+  MachOUniversalBinary(MemoryBufferRef Souce, Error &Err);
+  static Expected<std::unique_ptr<MachOUniversalBinary>>
+  create(MemoryBufferRef Source);
+
+  object_iterator begin_objects() const {
+    return ObjectForArch(this, 0);
+  }
+  object_iterator end_objects() const {
+    return ObjectForArch(nullptr, 0);
+  }
+
+  iterator_range<object_iterator> objects() const {
+    return make_range(begin_objects(), end_objects());
+  }
+
+  uint32_t getMagic() const { return Magic; }
+  uint32_t getNumberOfObjects() const { return NumberOfObjects; }
+
+  // Cast methods.
+  static bool classof(Binary const *V) {
+    return V->isMachOUniversalBinary();
+  }
+
+  Expected<std::unique_ptr<MachOObjectFile>>
+  getObjectForArch(StringRef ArchName) const;
+};
+
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/ModuleSymbolTable.h b/linux-x64/clang/include/llvm/Object/ModuleSymbolTable.h
new file mode 100644
index 0000000..9e93228
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/ModuleSymbolTable.h
@@ -0,0 +1,64 @@
+//===- ModuleSymbolTable.h - symbol table for in-memory IR ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents a symbol table built from in-memory IR. It provides
+// access to GlobalValues and should only be used if such access is required
+// (e.g. in the LTO implementation).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MODULESYMBOLTABLE_H
+#define LLVM_OBJECT_MODULESYMBOLTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/Allocator.h"
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class GlobalValue;
+
+class ModuleSymbolTable {
+public:
+  using AsmSymbol = std::pair<std::string, uint32_t>;
+  using Symbol = PointerUnion<GlobalValue *, AsmSymbol *>;
+
+private:
+  Module *FirstMod = nullptr;
+
+  SpecificBumpPtrAllocator<AsmSymbol> AsmSymbols;
+  std::vector<Symbol> SymTab;
+  Mangler Mang;
+
+public:
+  ArrayRef<Symbol> symbols() const { return SymTab; }
+  void addModule(Module *M);
+
+  void printSymbolName(raw_ostream &OS, Symbol S) const;
+  uint32_t getSymbolFlags(Symbol S) const;
+
+  /// Parse inline ASM and collect the symbols that are defined or referenced in
+  /// the current module.
+  ///
+  /// For each found symbol, call \p AsmSymbol with the name of the symbol found
+  /// and the associated flags.
+  static void CollectAsmSymbols(
+      const Module &M,
+      function_ref<void(StringRef, object::BasicSymbolRef::Flags)> AsmSymbol);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_MODULESYMBOLTABLE_H
diff --git a/linux-x64/clang/include/llvm/Object/ObjectFile.h b/linux-x64/clang/include/llvm/Object/ObjectFile.h
new file mode 100644
index 0000000..9c4ae94
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/ObjectFile.h
@@ -0,0 +1,507 @@
+//===- ObjectFile.h - File format independent object file -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a file format independent ObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_OBJECTFILE_H
+#define LLVM_OBJECT_OBJECTFILE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <system_error>
+
+namespace llvm {
+
+class ARMAttributeParser;
+
+namespace object {
+
+class COFFObjectFile;
+class MachOObjectFile;
+class ObjectFile;
+class SectionRef;
+class SymbolRef;
+class symbol_iterator;
+class WasmObjectFile;
+
+using section_iterator = content_iterator<SectionRef>;
+
+/// This is a value type class that represents a single relocation in the list
+/// of relocations in the object file.
+class RelocationRef {
+  DataRefImpl RelocationPimpl;
+  const ObjectFile *OwningObject = nullptr;
+
+public:
+  RelocationRef() = default;
+  RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
+
+  bool operator==(const RelocationRef &Other) const;
+
+  void moveNext();
+
+  uint64_t getOffset() const;
+  symbol_iterator getSymbol() const;
+  uint64_t getType() const;
+
+  /// @brief Get a string that represents the type of this relocation.
+  ///
+  /// This is for display purposes only.
+  void getTypeName(SmallVectorImpl<char> &Result) const;
+
+  DataRefImpl getRawDataRefImpl() const;
+  const ObjectFile *getObject() const;
+};
+
+using relocation_iterator = content_iterator<RelocationRef>;
+
+/// This is a value type class that represents a single section in the list of
+/// sections in the object file.
+class SectionRef {
+  friend class SymbolRef;
+
+  DataRefImpl SectionPimpl;
+  const ObjectFile *OwningObject = nullptr;
+
+public:
+  SectionRef() = default;
+  SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
+
+  bool operator==(const SectionRef &Other) const;
+  bool operator!=(const SectionRef &Other) const;
+  bool operator<(const SectionRef &Other) const;
+
+  void moveNext();
+
+  std::error_code getName(StringRef &Result) const;
+  uint64_t getAddress() const;
+  uint64_t getIndex() const;
+  uint64_t getSize() const;
+  std::error_code getContents(StringRef &Result) const;
+
+  /// @brief Get the alignment of this section as the actual value (not log 2).
+  uint64_t getAlignment() const;
+
+  bool isCompressed() const;
+  bool isText() const;
+  bool isData() const;
+  bool isBSS() const;
+  bool isVirtual() const;
+  bool isBitcode() const;
+  bool isStripped() const;
+
+  bool containsSymbol(SymbolRef S) const;
+
+  relocation_iterator relocation_begin() const;
+  relocation_iterator relocation_end() const;
+  iterator_range<relocation_iterator> relocations() const {
+    return make_range(relocation_begin(), relocation_end());
+  }
+  section_iterator getRelocatedSection() const;
+
+  DataRefImpl getRawDataRefImpl() const;
+  const ObjectFile *getObject() const;
+};
+
+/// This is a value type class that represents a single symbol in the list of
+/// symbols in the object file.
+class SymbolRef : public BasicSymbolRef {
+  friend class SectionRef;
+
+public:
+  enum Type {
+    ST_Unknown, // Type not specified
+    ST_Data,
+    ST_Debug,
+    ST_File,
+    ST_Function,
+    ST_Other
+  };
+
+  SymbolRef() = default;
+  SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
+  SymbolRef(const BasicSymbolRef &B) : BasicSymbolRef(B) {
+    assert(isa<ObjectFile>(BasicSymbolRef::getObject()));
+  }
+
+  Expected<StringRef> getName() const;
+  /// Returns the symbol virtual address (i.e. address at which it will be
+  /// mapped).
+  Expected<uint64_t> getAddress() const;
+
+  /// Return the value of the symbol depending on the object this can be an
+  /// offset or a virtual address.
+  uint64_t getValue() const;
+
+  /// @brief Get the alignment of this symbol as the actual value (not log 2).
+  uint32_t getAlignment() const;
+  uint64_t getCommonSize() const;
+  Expected<SymbolRef::Type> getType() const;
+
+  /// @brief Get section this symbol is defined in reference to. Result is
+  /// end_sections() if it is undefined or is an absolute symbol.
+  Expected<section_iterator> getSection() const;
+
+  const ObjectFile *getObject() const;
+};
+
+class symbol_iterator : public basic_symbol_iterator {
+public:
+  symbol_iterator(SymbolRef Sym) : basic_symbol_iterator(Sym) {}
+  symbol_iterator(const basic_symbol_iterator &B)
+      : basic_symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
+                                        cast<ObjectFile>(B->getObject()))) {}
+
+  const SymbolRef *operator->() const {
+    const BasicSymbolRef &P = basic_symbol_iterator::operator *();
+    return static_cast<const SymbolRef*>(&P);
+  }
+
+  const SymbolRef &operator*() const {
+    const BasicSymbolRef &P = basic_symbol_iterator::operator *();
+    return static_cast<const SymbolRef&>(P);
+  }
+};
+
+/// This class is the base class for all object file types. Concrete instances
+/// of this object are created by createObjectFile, which figures out which type
+/// to create.
+class ObjectFile : public SymbolicFile {
+  virtual void anchor();
+
+protected:
+  ObjectFile(unsigned int Type, MemoryBufferRef Source);
+
+  const uint8_t *base() const {
+    return reinterpret_cast<const uint8_t *>(Data.getBufferStart());
+  }
+
+  // These functions are for SymbolRef to call internally. The main goal of
+  // this is to allow SymbolRef::SymbolPimpl to point directly to the symbol
+  // entry in the memory mapped object file. SymbolPimpl cannot contain any
+  // virtual functions because then it could not point into the memory mapped
+  // file.
+  //
+  // Implementations assume that the DataRefImpl is valid and has not been
+  // modified externally. It's UB otherwise.
+  friend class SymbolRef;
+
+  virtual Expected<StringRef> getSymbolName(DataRefImpl Symb) const = 0;
+  std::error_code printSymbolName(raw_ostream &OS,
+                                  DataRefImpl Symb) const override;
+  virtual Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const = 0;
+  virtual uint64_t getSymbolValueImpl(DataRefImpl Symb) const = 0;
+  virtual uint32_t getSymbolAlignment(DataRefImpl Symb) const;
+  virtual uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const = 0;
+  virtual Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const = 0;
+  virtual Expected<section_iterator>
+  getSymbolSection(DataRefImpl Symb) const = 0;
+
+  // Same as above for SectionRef.
+  friend class SectionRef;
+
+  virtual void moveSectionNext(DataRefImpl &Sec) const = 0;
+  virtual std::error_code getSectionName(DataRefImpl Sec,
+                                         StringRef &Res) const = 0;
+  virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
+  virtual uint64_t getSectionIndex(DataRefImpl Sec) const = 0;
+  virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
+  virtual std::error_code getSectionContents(DataRefImpl Sec,
+                                             StringRef &Res) const = 0;
+  virtual uint64_t getSectionAlignment(DataRefImpl Sec) const = 0;
+  virtual bool isSectionCompressed(DataRefImpl Sec) const = 0;
+  virtual bool isSectionText(DataRefImpl Sec) const = 0;
+  virtual bool isSectionData(DataRefImpl Sec) const = 0;
+  virtual bool isSectionBSS(DataRefImpl Sec) const = 0;
+  // A section is 'virtual' if its contents aren't present in the object image.
+  virtual bool isSectionVirtual(DataRefImpl Sec) const = 0;
+  virtual bool isSectionBitcode(DataRefImpl Sec) const;
+  virtual bool isSectionStripped(DataRefImpl Sec) const;
+  virtual relocation_iterator section_rel_begin(DataRefImpl Sec) const = 0;
+  virtual relocation_iterator section_rel_end(DataRefImpl Sec) const = 0;
+  virtual section_iterator getRelocatedSection(DataRefImpl Sec) const;
+
+  // Same as above for RelocationRef.
+  friend class RelocationRef;
+  virtual void moveRelocationNext(DataRefImpl &Rel) const = 0;
+  virtual uint64_t getRelocationOffset(DataRefImpl Rel) const = 0;
+  virtual symbol_iterator getRelocationSymbol(DataRefImpl Rel) const = 0;
+  virtual uint64_t getRelocationType(DataRefImpl Rel) const = 0;
+  virtual void getRelocationTypeName(DataRefImpl Rel,
+                                     SmallVectorImpl<char> &Result) const = 0;
+
+  uint64_t getSymbolValue(DataRefImpl Symb) const;
+
+public:
+  ObjectFile() = delete;
+  ObjectFile(const ObjectFile &other) = delete;
+
+  uint64_t getCommonSymbolSize(DataRefImpl Symb) const {
+    assert(getSymbolFlags(Symb) & SymbolRef::SF_Common);
+    return getCommonSymbolSizeImpl(Symb);
+  }
+
+  using symbol_iterator_range = iterator_range<symbol_iterator>;
+  symbol_iterator_range symbols() const {
+    return symbol_iterator_range(symbol_begin(), symbol_end());
+  }
+
+  virtual section_iterator section_begin() const = 0;
+  virtual section_iterator section_end() const = 0;
+
+  using section_iterator_range = iterator_range<section_iterator>;
+  section_iterator_range sections() const {
+    return section_iterator_range(section_begin(), section_end());
+  }
+
+  /// @brief The number of bytes used to represent an address in this object
+  ///        file format.
+  virtual uint8_t getBytesInAddress() const = 0;
+
+  virtual StringRef getFileFormatName() const = 0;
+  virtual Triple::ArchType getArch() const = 0;
+  virtual SubtargetFeatures getFeatures() const = 0;
+  virtual void setARMSubArch(Triple &TheTriple) const { }
+
+  /// @brief Create a triple from the data in this object file.
+  Triple makeTriple() const;
+
+  virtual std::error_code
+    getBuildAttributes(ARMAttributeParser &Attributes) const {
+      return std::error_code();
+    }
+
+  /// Maps a debug section name to a standard DWARF section name.
+  virtual StringRef mapDebugSectionName(StringRef Name) const { return Name; }
+
+  /// True if this is a relocatable object (.o/.obj).
+  virtual bool isRelocatableObject() const = 0;
+
+  /// @returns Pointer to ObjectFile subclass to handle this type of object.
+  /// @param ObjectPath The path to the object file. ObjectPath.isObject must
+  ///        return true.
+  /// @brief Create ObjectFile from path.
+  static Expected<OwningBinary<ObjectFile>>
+  createObjectFile(StringRef ObjectPath);
+
+  static Expected<std::unique_ptr<ObjectFile>>
+  createObjectFile(MemoryBufferRef Object, llvm::file_magic Type);
+  static Expected<std::unique_ptr<ObjectFile>>
+  createObjectFile(MemoryBufferRef Object) {
+    return createObjectFile(Object, llvm::file_magic::unknown);
+  }
+
+  static bool classof(const Binary *v) {
+    return v->isObject();
+  }
+
+  static Expected<std::unique_ptr<COFFObjectFile>>
+  createCOFFObjectFile(MemoryBufferRef Object);
+
+  static Expected<std::unique_ptr<ObjectFile>>
+  createELFObjectFile(MemoryBufferRef Object);
+
+  static Expected<std::unique_ptr<MachOObjectFile>>
+  createMachOObjectFile(MemoryBufferRef Object,
+                        uint32_t UniversalCputype = 0,
+                        uint32_t UniversalIndex = 0);
+
+  static Expected<std::unique_ptr<WasmObjectFile>>
+  createWasmObjectFile(MemoryBufferRef Object);
+};
+
+// Inline function definitions.
+inline SymbolRef::SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner)
+    : BasicSymbolRef(SymbolP, Owner) {}
+
+inline Expected<StringRef> SymbolRef::getName() const {
+  return getObject()->getSymbolName(getRawDataRefImpl());
+}
+
+inline Expected<uint64_t> SymbolRef::getAddress() const {
+  return getObject()->getSymbolAddress(getRawDataRefImpl());
+}
+
+inline uint64_t SymbolRef::getValue() const {
+  return getObject()->getSymbolValue(getRawDataRefImpl());
+}
+
+inline uint32_t SymbolRef::getAlignment() const {
+  return getObject()->getSymbolAlignment(getRawDataRefImpl());
+}
+
+inline uint64_t SymbolRef::getCommonSize() const {
+  return getObject()->getCommonSymbolSize(getRawDataRefImpl());
+}
+
+inline Expected<section_iterator> SymbolRef::getSection() const {
+  return getObject()->getSymbolSection(getRawDataRefImpl());
+}
+
+inline Expected<SymbolRef::Type> SymbolRef::getType() const {
+  return getObject()->getSymbolType(getRawDataRefImpl());
+}
+
+inline const ObjectFile *SymbolRef::getObject() const {
+  const SymbolicFile *O = BasicSymbolRef::getObject();
+  return cast<ObjectFile>(O);
+}
+
+/// SectionRef
+inline SectionRef::SectionRef(DataRefImpl SectionP,
+                              const ObjectFile *Owner)
+  : SectionPimpl(SectionP)
+  , OwningObject(Owner) {}
+
+inline bool SectionRef::operator==(const SectionRef &Other) const {
+  return SectionPimpl == Other.SectionPimpl;
+}
+
+inline bool SectionRef::operator!=(const SectionRef &Other) const {
+  return SectionPimpl != Other.SectionPimpl;
+}
+
+inline bool SectionRef::operator<(const SectionRef &Other) const {
+  return SectionPimpl < Other.SectionPimpl;
+}
+
+inline void SectionRef::moveNext() {
+  return OwningObject->moveSectionNext(SectionPimpl);
+}
+
+inline std::error_code SectionRef::getName(StringRef &Result) const {
+  return OwningObject->getSectionName(SectionPimpl, Result);
+}
+
+inline uint64_t SectionRef::getAddress() const {
+  return OwningObject->getSectionAddress(SectionPimpl);
+}
+
+inline uint64_t SectionRef::getIndex() const {
+  return OwningObject->getSectionIndex(SectionPimpl);
+}
+
+inline uint64_t SectionRef::getSize() const {
+  return OwningObject->getSectionSize(SectionPimpl);
+}
+
+inline std::error_code SectionRef::getContents(StringRef &Result) const {
+  return OwningObject->getSectionContents(SectionPimpl, Result);
+}
+
+inline uint64_t SectionRef::getAlignment() const {
+  return OwningObject->getSectionAlignment(SectionPimpl);
+}
+
+inline bool SectionRef::isCompressed() const {
+  return OwningObject->isSectionCompressed(SectionPimpl);
+}
+
+inline bool SectionRef::isText() const {
+  return OwningObject->isSectionText(SectionPimpl);
+}
+
+inline bool SectionRef::isData() const {
+  return OwningObject->isSectionData(SectionPimpl);
+}
+
+inline bool SectionRef::isBSS() const {
+  return OwningObject->isSectionBSS(SectionPimpl);
+}
+
+inline bool SectionRef::isVirtual() const {
+  return OwningObject->isSectionVirtual(SectionPimpl);
+}
+
+inline bool SectionRef::isBitcode() const {
+  return OwningObject->isSectionBitcode(SectionPimpl);
+}
+
+inline bool SectionRef::isStripped() const {
+  return OwningObject->isSectionStripped(SectionPimpl);
+}
+
+inline relocation_iterator SectionRef::relocation_begin() const {
+  return OwningObject->section_rel_begin(SectionPimpl);
+}
+
+inline relocation_iterator SectionRef::relocation_end() const {
+  return OwningObject->section_rel_end(SectionPimpl);
+}
+
+inline section_iterator SectionRef::getRelocatedSection() const {
+  return OwningObject->getRelocatedSection(SectionPimpl);
+}
+
+inline DataRefImpl SectionRef::getRawDataRefImpl() const {
+  return SectionPimpl;
+}
+
+inline const ObjectFile *SectionRef::getObject() const {
+  return OwningObject;
+}
+
+/// RelocationRef
+inline RelocationRef::RelocationRef(DataRefImpl RelocationP,
+                              const ObjectFile *Owner)
+  : RelocationPimpl(RelocationP)
+  , OwningObject(Owner) {}
+
+inline bool RelocationRef::operator==(const RelocationRef &Other) const {
+  return RelocationPimpl == Other.RelocationPimpl;
+}
+
+inline void RelocationRef::moveNext() {
+  return OwningObject->moveRelocationNext(RelocationPimpl);
+}
+
+inline uint64_t RelocationRef::getOffset() const {
+  return OwningObject->getRelocationOffset(RelocationPimpl);
+}
+
+inline symbol_iterator RelocationRef::getSymbol() const {
+  return OwningObject->getRelocationSymbol(RelocationPimpl);
+}
+
+inline uint64_t RelocationRef::getType() const {
+  return OwningObject->getRelocationType(RelocationPimpl);
+}
+
+inline void RelocationRef::getTypeName(SmallVectorImpl<char> &Result) const {
+  return OwningObject->getRelocationTypeName(RelocationPimpl, Result);
+}
+
+inline DataRefImpl RelocationRef::getRawDataRefImpl() const {
+  return RelocationPimpl;
+}
+
+inline const ObjectFile *RelocationRef::getObject() const {
+  return OwningObject;
+}
+
+} // end namespace object
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_OBJECTFILE_H
diff --git a/linux-x64/clang/include/llvm/Object/RelocVisitor.h b/linux-x64/clang/include/llvm/Object/RelocVisitor.h
new file mode 100644
index 0000000..2d0e938
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/RelocVisitor.h
@@ -0,0 +1,324 @@
+//===- RelocVisitor.h - Visitor for object file relocations -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a wrapper around all the different types of relocations
+// in different file formats, such that a client can handle them in a unified
+// manner by only implementing a minimal number of functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_RELOCVISITOR_H
+#define LLVM_OBJECT_RELOCVISITOR_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdint>
+#include <system_error>
+
+namespace llvm {
+namespace object {
+
+/// @brief Base class for object file relocation visitors.
+class RelocVisitor {
+public:
+  explicit RelocVisitor(const ObjectFile &Obj) : ObjToVisit(Obj) {}
+
+  // TODO: Should handle multiple applied relocations via either passing in the
+  // previously computed value or just count paired relocations as a single
+  // visit.
+  uint64_t visit(uint32_t Rel, RelocationRef R, uint64_t Value = 0) {
+    if (isa<ELFObjectFileBase>(ObjToVisit))
+      return visitELF(Rel, R, Value);
+    if (isa<COFFObjectFile>(ObjToVisit))
+      return visitCOFF(Rel, R, Value);
+    if (isa<MachOObjectFile>(ObjToVisit))
+      return visitMachO(Rel, R, Value);
+
+    HasError = true;
+    return 0;
+  }
+
+  bool error() { return HasError; }
+
+private:
+  const ObjectFile &ObjToVisit;
+  bool HasError = false;
+
+  uint64_t visitELF(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
+      switch (ObjToVisit.getArch()) {
+      case Triple::x86_64:
+        return visitX86_64(Rel, R, Value);
+      case Triple::aarch64:
+      case Triple::aarch64_be:
+        return visitAarch64(Rel, R, Value);
+      case Triple::bpfel:
+      case Triple::bpfeb:
+        return visitBpf(Rel, R, Value);
+      case Triple::mips64el:
+      case Triple::mips64:
+        return visitMips64(Rel, R, Value);
+      case Triple::ppc64le:
+      case Triple::ppc64:
+        return visitPPC64(Rel, R, Value);
+      case Triple::systemz:
+        return visitSystemz(Rel, R, Value);
+      case Triple::sparcv9:
+        return visitSparc64(Rel, R, Value);
+      case Triple::amdgcn:
+        return visitAmdgpu(Rel, R, Value);
+      default:
+        HasError = true;
+        return 0;
+      }
+    }
+
+    // 32-bit object file
+    assert(ObjToVisit.getBytesInAddress() == 4 &&
+           "Invalid word size in object file");
+
+    switch (ObjToVisit.getArch()) {
+    case Triple::x86:
+      return visitX86(Rel, R, Value);
+    case Triple::ppc:
+      return visitPPC32(Rel, R, Value);
+    case Triple::arm:
+    case Triple::armeb:
+      return visitARM(Rel, R, Value);
+    case Triple::lanai:
+      return visitLanai(Rel, R, Value);
+    case Triple::mipsel:
+    case Triple::mips:
+      return visitMips32(Rel, R, Value);
+    case Triple::sparc:
+      return visitSparc32(Rel, R, Value);
+    case Triple::hexagon:
+      return visitHexagon(Rel, R, Value);
+    default:
+      HasError = true;
+      return 0;
+    }
+  }
+
+  int64_t getELFAddend(RelocationRef R) {
+    Expected<int64_t> AddendOrErr = ELFRelocationRef(R).getAddend();
+    handleAllErrors(AddendOrErr.takeError(), [](const ErrorInfoBase &EI) {
+      report_fatal_error(EI.message());
+    });
+    return *AddendOrErr;
+  }
+
+  uint64_t visitX86_64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_X86_64_NONE:
+      return 0;
+    case ELF::R_X86_64_64:
+      return Value + getELFAddend(R);
+    case ELF::R_X86_64_PC32:
+      return Value + getELFAddend(R) - R.getOffset();
+    case ELF::R_X86_64_32:
+    case ELF::R_X86_64_32S:
+      return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitAarch64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_AARCH64_ABS32: {
+      int64_t Res = Value + getELFAddend(R);
+      if (Res < INT32_MIN || Res > UINT32_MAX)
+        HasError = true;
+      return static_cast<uint32_t>(Res);
+    }
+    case ELF::R_AARCH64_ABS64:
+      return Value + getELFAddend(R);
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitBpf(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_BPF_64_32:
+      return Value & 0xFFFFFFFF;
+    case ELF::R_BPF_64_64:
+      return Value;
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitMips64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_MIPS_32:
+      return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+    case ELF::R_MIPS_64:
+      return Value + getELFAddend(R);
+    case ELF::R_MIPS_TLS_DTPREL64:
+      return Value + getELFAddend(R) - 0x8000;
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitPPC64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_PPC64_ADDR32:
+      return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+    case ELF::R_PPC64_ADDR64:
+      return Value + getELFAddend(R);
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitSystemz(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_390_32: {
+      int64_t Res = Value + getELFAddend(R);
+      if (Res < INT32_MIN || Res > UINT32_MAX)
+        HasError = true;
+      return static_cast<uint32_t>(Res);
+    }
+    case ELF::R_390_64:
+      return Value + getELFAddend(R);
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitSparc64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_SPARC_32:
+    case ELF::R_SPARC_64:
+    case ELF::R_SPARC_UA32:
+    case ELF::R_SPARC_UA64:
+      return Value + getELFAddend(R);
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitAmdgpu(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_AMDGPU_ABS32:
+    case ELF::R_AMDGPU_ABS64:
+      return Value + getELFAddend(R);
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitX86(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (Rel) {
+    case ELF::R_386_NONE:
+      return 0;
+    case ELF::R_386_32:
+      return Value;
+    case ELF::R_386_PC32:
+      return Value - R.getOffset();
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitPPC32(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (Rel == ELF::R_PPC_ADDR32)
+      return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitARM(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (Rel == ELF::R_ARM_ABS32) {
+      if ((int64_t)Value < INT32_MIN || (int64_t)Value > UINT32_MAX)
+        HasError = true;
+      return static_cast<uint32_t>(Value);
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitLanai(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (Rel == ELF::R_LANAI_32)
+      return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitMips32(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    // FIXME: Take in account implicit addends to get correct results.
+    if (Rel == ELF::R_MIPS_32)
+      return Value & 0xFFFFFFFF;
+    if (Rel == ELF::R_MIPS_TLS_DTPREL32)
+      return Value & 0xFFFFFFFF;
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitSparc32(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (Rel == ELF::R_SPARC_32 || Rel == ELF::R_SPARC_UA32)
+      return Value + getELFAddend(R);
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitHexagon(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (Rel == ELF::R_HEX_32)
+      return Value + getELFAddend(R);
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitCOFF(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    switch (ObjToVisit.getArch()) {
+    case Triple::x86:
+      switch (Rel) {
+      case COFF::IMAGE_REL_I386_SECREL:
+      case COFF::IMAGE_REL_I386_DIR32:
+        return static_cast<uint32_t>(Value);
+      }
+      break;
+    case Triple::x86_64:
+      switch (Rel) {
+      case COFF::IMAGE_REL_AMD64_SECREL:
+        return static_cast<uint32_t>(Value);
+      case COFF::IMAGE_REL_AMD64_ADDR64:
+        return Value;
+      }
+      break;
+    default:
+      break;
+    }
+    HasError = true;
+    return 0;
+  }
+
+  uint64_t visitMachO(uint32_t Rel, RelocationRef R, uint64_t Value) {
+    if (ObjToVisit.getArch() == Triple::x86_64 &&
+        Rel == MachO::X86_64_RELOC_UNSIGNED)
+      return Value;
+    HasError = true;
+    return 0;
+  }
+};
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_RELOCVISITOR_H
diff --git a/linux-x64/clang/include/llvm/Object/StackMapParser.h b/linux-x64/clang/include/llvm/Object/StackMapParser.h
new file mode 100644
index 0000000..557db5a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/StackMapParser.h
@@ -0,0 +1,439 @@
+//===- StackMapParser.h - StackMap Parsing Support --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKMAPPARSER_H
+#define LLVM_CODEGEN_STACKMAPPARSER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Endian.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+template <support::endianness Endianness>
+class StackMapV2Parser {
+public:
+  template <typename AccessorT>
+  class AccessorIterator {
+  public:
+    AccessorIterator(AccessorT A) : A(A) {}
+
+    AccessorIterator& operator++() { A = A.next(); return *this; }
+    AccessorIterator operator++(int) {
+      auto tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    bool operator==(const AccessorIterator &Other) {
+      return A.P == Other.A.P;
+    }
+
+    bool operator!=(const AccessorIterator &Other) { return !(*this == Other); }
+
+    AccessorT& operator*() { return A; }
+    AccessorT* operator->() { return &A; }
+
+  private:
+    AccessorT A;
+  };
+
+  /// Accessor for function records.
+  class FunctionAccessor {
+    friend class StackMapV2Parser;
+
+  public:
+    /// Get the function address.
+    uint64_t getFunctionAddress() const {
+      return read<uint64_t>(P);
+    }
+
+    /// Get the function's stack size.
+    uint64_t getStackSize() const {
+      return read<uint64_t>(P + sizeof(uint64_t));
+    }
+
+    /// Get the number of callsite records.
+    uint64_t getRecordCount() const {
+      return read<uint64_t>(P + (2 * sizeof(uint64_t)));
+    }
+
+  private:
+    FunctionAccessor(const uint8_t *P) : P(P) {}
+
+    const static int FunctionAccessorSize = 3 * sizeof(uint64_t);
+
+    FunctionAccessor next() const {
+      return FunctionAccessor(P + FunctionAccessorSize);
+    }
+
+    const uint8_t *P;
+  };
+
+  /// Accessor for constants.
+  class ConstantAccessor {
+    friend class StackMapV2Parser;
+
+  public:
+    /// Return the value of this constant.
+    uint64_t getValue() const { return read<uint64_t>(P); }
+
+  private:
+    ConstantAccessor(const uint8_t *P) : P(P) {}
+
+    const static int ConstantAccessorSize = sizeof(uint64_t);
+
+    ConstantAccessor next() const {
+      return ConstantAccessor(P + ConstantAccessorSize);
+    }
+
+    const uint8_t *P;
+  };
+
+  enum class LocationKind : uint8_t {
+    Register = 1, Direct = 2, Indirect = 3, Constant = 4, ConstantIndex = 5
+  };
+
+  /// Accessor for location records.
+  class LocationAccessor {
+    friend class StackMapV2Parser;
+    friend class RecordAccessor;
+
+  public:
+    /// Get the Kind for this location.
+    LocationKind getKind() const {
+      return LocationKind(P[KindOffset]);
+    }
+
+    /// Get the Dwarf register number for this location.
+    uint16_t getDwarfRegNum() const {
+      return read<uint16_t>(P + DwarfRegNumOffset);
+    }
+
+    /// Get the small-constant for this location. (Kind must be Constant).
+    uint32_t getSmallConstant() const {
+      assert(getKind() == LocationKind::Constant && "Not a small constant.");
+      return read<uint32_t>(P + SmallConstantOffset);
+    }
+
+    /// Get the constant-index for this location. (Kind must be ConstantIndex).
+    uint32_t getConstantIndex() const {
+      assert(getKind() == LocationKind::ConstantIndex &&
+             "Not a constant-index.");
+      return read<uint32_t>(P + SmallConstantOffset);
+    }
+
+    /// Get the offset for this location. (Kind must be Direct or Indirect).
+    int32_t getOffset() const {
+      assert((getKind() == LocationKind::Direct ||
+              getKind() == LocationKind::Indirect) &&
+             "Not direct or indirect.");
+      return read<int32_t>(P + SmallConstantOffset);
+    }
+
+  private:
+    LocationAccessor(const uint8_t *P) : P(P) {}
+
+    LocationAccessor next() const {
+      return LocationAccessor(P + LocationAccessorSize);
+    }
+
+    static const int KindOffset = 0;
+    static const int DwarfRegNumOffset = KindOffset + sizeof(uint16_t);
+    static const int SmallConstantOffset = DwarfRegNumOffset + sizeof(uint16_t);
+    static const int LocationAccessorSize = sizeof(uint64_t);
+
+    const uint8_t *P;
+  };
+
+  /// Accessor for stackmap live-out fields.
+  class LiveOutAccessor {
+    friend class StackMapV2Parser;
+    friend class RecordAccessor;
+
+  public:
+    /// Get the Dwarf register number for this live-out.
+    uint16_t getDwarfRegNum() const {
+      return read<uint16_t>(P + DwarfRegNumOffset);
+    }
+
+    /// Get the size in bytes of live [sub]register.
+    unsigned getSizeInBytes() const {
+      return read<uint8_t>(P + SizeOffset);
+    }
+
+  private:
+    LiveOutAccessor(const uint8_t *P) : P(P) {}
+
+    LiveOutAccessor next() const {
+      return LiveOutAccessor(P + LiveOutAccessorSize);
+    }
+
+    static const int DwarfRegNumOffset = 0;
+    static const int SizeOffset =
+      DwarfRegNumOffset + sizeof(uint16_t) + sizeof(uint8_t);
+    static const int LiveOutAccessorSize = sizeof(uint32_t);
+
+    const uint8_t *P;
+  };
+
+  /// Accessor for stackmap records.
+  class RecordAccessor {
+    friend class StackMapV2Parser;
+
+  public:
+    using location_iterator = AccessorIterator<LocationAccessor>;
+    using liveout_iterator = AccessorIterator<LiveOutAccessor>;
+
+    /// Get the patchpoint/stackmap ID for this record.
+    uint64_t getID() const {
+      return read<uint64_t>(P + PatchpointIDOffset);
+    }
+
+    /// Get the instruction offset (from the start of the containing function)
+    /// for this record.
+    uint32_t getInstructionOffset() const {
+      return read<uint32_t>(P + InstructionOffsetOffset);
+    }
+
+    /// Get the number of locations contained in this record.
+    uint16_t getNumLocations() const {
+      return read<uint16_t>(P + NumLocationsOffset);
+    }
+
+    /// Get the location with the given index.
+    LocationAccessor getLocation(unsigned LocationIndex) const {
+      unsigned LocationOffset =
+        LocationListOffset + LocationIndex * LocationSize;
+      return LocationAccessor(P + LocationOffset);
+    }
+
+    /// Begin iterator for locations.
+    location_iterator location_begin() const {
+      return location_iterator(getLocation(0));
+    }
+
+    /// End iterator for locations.
+    location_iterator location_end() const {
+      return location_iterator(getLocation(getNumLocations()));
+    }
+
+    /// Iterator range for locations.
+    iterator_range<location_iterator> locations() const {
+      return make_range(location_begin(), location_end());
+    }
+
+    /// Get the number of liveouts contained in this record.
+    uint16_t getNumLiveOuts() const {
+      return read<uint16_t>(P + getNumLiveOutsOffset());
+    }
+
+    /// Get the live-out with the given index.
+    LiveOutAccessor getLiveOut(unsigned LiveOutIndex) const {
+      unsigned LiveOutOffset =
+        getNumLiveOutsOffset() + sizeof(uint16_t) + LiveOutIndex * LiveOutSize;
+      return LiveOutAccessor(P + LiveOutOffset);
+    }
+
+    /// Begin iterator for live-outs.
+    liveout_iterator liveouts_begin() const {
+      return liveout_iterator(getLiveOut(0));
+    }
+
+    /// End iterator for live-outs.
+    liveout_iterator liveouts_end() const {
+      return liveout_iterator(getLiveOut(getNumLiveOuts()));
+    }
+
+    /// Iterator range for live-outs.
+    iterator_range<liveout_iterator> liveouts() const {
+      return make_range(liveouts_begin(), liveouts_end());
+    }
+
+  private:
+    RecordAccessor(const uint8_t *P) : P(P) {}
+
+    unsigned getNumLiveOutsOffset() const {
+      return LocationListOffset + LocationSize * getNumLocations() +
+             sizeof(uint16_t);
+    }
+
+    unsigned getSizeInBytes() const {
+      unsigned RecordSize =
+        getNumLiveOutsOffset() + sizeof(uint16_t) + getNumLiveOuts() * LiveOutSize;
+      return (RecordSize + 7) & ~0x7;
+    }
+
+    RecordAccessor next() const {
+      return RecordAccessor(P + getSizeInBytes());
+    }
+
+    static const unsigned PatchpointIDOffset = 0;
+    static const unsigned InstructionOffsetOffset =
+      PatchpointIDOffset + sizeof(uint64_t);
+    static const unsigned NumLocationsOffset =
+      InstructionOffsetOffset + sizeof(uint32_t) + sizeof(uint16_t);
+    static const unsigned LocationListOffset =
+      NumLocationsOffset + sizeof(uint16_t);
+    static const unsigned LocationSize = sizeof(uint64_t);
+    static const unsigned LiveOutSize = sizeof(uint32_t);
+
+    const uint8_t *P;
+  };
+
+  /// Construct a parser for a version-2 stackmap. StackMap data will be read
+  /// from the given array.
+  StackMapV2Parser(ArrayRef<uint8_t> StackMapSection)
+      : StackMapSection(StackMapSection) {
+    ConstantsListOffset = FunctionListOffset + getNumFunctions() * FunctionSize;
+
+    assert(StackMapSection[0] == 2 &&
+           "StackMapV2Parser can only parse version 2 stackmaps");
+
+    unsigned CurrentRecordOffset =
+      ConstantsListOffset + getNumConstants() * ConstantSize;
+
+    for (unsigned I = 0, E = getNumRecords(); I != E; ++I) {
+      StackMapRecordOffsets.push_back(CurrentRecordOffset);
+      CurrentRecordOffset +=
+        RecordAccessor(&StackMapSection[CurrentRecordOffset]).getSizeInBytes();
+    }
+  }
+
+  using function_iterator = AccessorIterator<FunctionAccessor>;
+  using constant_iterator = AccessorIterator<ConstantAccessor>;
+  using record_iterator = AccessorIterator<RecordAccessor>;
+
+  /// Get the version number of this stackmap. (Always returns 2).
+  unsigned getVersion() const { return 2; }
+
+  /// Get the number of functions in the stack map.
+  uint32_t getNumFunctions() const {
+    return read<uint32_t>(&StackMapSection[NumFunctionsOffset]);
+  }
+
+  /// Get the number of large constants in the stack map.
+  uint32_t getNumConstants() const {
+    return read<uint32_t>(&StackMapSection[NumConstantsOffset]);
+  }
+
+  /// Get the number of stackmap records in the stackmap.
+  uint32_t getNumRecords() const {
+    return read<uint32_t>(&StackMapSection[NumRecordsOffset]);
+  }
+
+  /// Return an FunctionAccessor for the given function index.
+  FunctionAccessor getFunction(unsigned FunctionIndex) const {
+    return FunctionAccessor(StackMapSection.data() +
+                            getFunctionOffset(FunctionIndex));
+  }
+
+  /// Begin iterator for functions.
+  function_iterator functions_begin() const {
+    return function_iterator(getFunction(0));
+  }
+
+  /// End iterator for functions.
+  function_iterator functions_end() const {
+    return function_iterator(
+             FunctionAccessor(StackMapSection.data() +
+                              getFunctionOffset(getNumFunctions())));
+  }
+
+  /// Iterator range for functions.
+  iterator_range<function_iterator> functions() const {
+    return make_range(functions_begin(), functions_end());
+  }
+
+  /// Return the large constant at the given index.
+  ConstantAccessor getConstant(unsigned ConstantIndex) const {
+    return ConstantAccessor(StackMapSection.data() +
+                            getConstantOffset(ConstantIndex));
+  }
+
+  /// Begin iterator for constants.
+  constant_iterator constants_begin() const {
+    return constant_iterator(getConstant(0));
+  }
+
+  /// End iterator for constants.
+  constant_iterator constants_end() const {
+    return constant_iterator(
+             ConstantAccessor(StackMapSection.data() +
+                              getConstantOffset(getNumConstants())));
+  }
+
+  /// Iterator range for constants.
+  iterator_range<constant_iterator> constants() const {
+    return make_range(constants_begin(), constants_end());
+  }
+
+  /// Return a RecordAccessor for the given record index.
+  RecordAccessor getRecord(unsigned RecordIndex) const {
+    std::size_t RecordOffset = StackMapRecordOffsets[RecordIndex];
+    return RecordAccessor(StackMapSection.data() + RecordOffset);
+  }
+
+  /// Begin iterator for records.
+  record_iterator records_begin() const {
+    if (getNumRecords() == 0)
+      return record_iterator(RecordAccessor(nullptr));
+    return record_iterator(getRecord(0));
+  }
+
+  /// End iterator for records.
+  record_iterator records_end() const {
+    // Records need to be handled specially, since we cache the start addresses
+    // for them: We can't just compute the 1-past-the-end address, we have to
+    // look at the last record and use the 'next' method.
+    if (getNumRecords() == 0)
+      return record_iterator(RecordAccessor(nullptr));
+    return record_iterator(getRecord(getNumRecords() - 1).next());
+  }
+
+  /// Iterator range for records.
+  iterator_range<record_iterator> records() const {
+    return make_range(records_begin(), records_end());
+  }
+
+private:
+  template <typename T>
+  static T read(const uint8_t *P) {
+    return support::endian::read<T, Endianness, 1>(P);
+  }
+
+  static const unsigned HeaderOffset = 0;
+  static const unsigned NumFunctionsOffset = HeaderOffset + sizeof(uint32_t);
+  static const unsigned NumConstantsOffset = NumFunctionsOffset + sizeof(uint32_t);
+  static const unsigned NumRecordsOffset = NumConstantsOffset + sizeof(uint32_t);
+  static const unsigned FunctionListOffset = NumRecordsOffset + sizeof(uint32_t);
+
+  static const unsigned FunctionSize = 3 * sizeof(uint64_t);
+  static const unsigned ConstantSize = sizeof(uint64_t);
+
+  std::size_t getFunctionOffset(unsigned FunctionIndex) const {
+    return FunctionListOffset + FunctionIndex * FunctionSize;
+  }
+
+  std::size_t getConstantOffset(unsigned ConstantIndex) const {
+    return ConstantsListOffset + ConstantIndex * ConstantSize;
+  }
+
+  ArrayRef<uint8_t> StackMapSection;
+  unsigned ConstantsListOffset;
+  std::vector<unsigned> StackMapRecordOffsets;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_STACKMAPPARSER_H
diff --git a/linux-x64/clang/include/llvm/Object/SymbolSize.h b/linux-x64/clang/include/llvm/Object/SymbolSize.h
new file mode 100644
index 0000000..1a1dc87
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/SymbolSize.h
@@ -0,0 +1,34 @@
+//===- SymbolSize.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_SYMBOLSIZE_H
+#define LLVM_OBJECT_SYMBOLSIZE_H
+
+#include "llvm/Object/ObjectFile.h"
+
+namespace llvm {
+namespace object {
+
+struct SymEntry {
+  symbol_iterator I;
+  uint64_t Address;
+  unsigned Number;
+  unsigned SectionID;
+};
+
+int compareAddress(const SymEntry *A, const SymEntry *B);
+
+std::vector<std::pair<SymbolRef, uint64_t>>
+computeSymbolSizes(const ObjectFile &O);
+
+}
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Object/SymbolicFile.h b/linux-x64/clang/include/llvm/Object/SymbolicFile.h
new file mode 100644
index 0000000..5b9549b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/SymbolicFile.h
@@ -0,0 +1,216 @@
+//===- SymbolicFile.h - Interface that only provides symbols ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SymbolicFile interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_SYMBOLICFILE_H
+#define LLVM_OBJECT_SYMBOLICFILE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cinttypes>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <memory>
+#include <system_error>
+
+namespace llvm {
+namespace object {
+
+union DataRefImpl {
+  // This entire union should probably be a
+  // char[max(8, sizeof(uintptr_t))] and require the impl to cast.
+  struct {
+    uint32_t a, b;
+  } d;
+  uintptr_t p;
+
+  DataRefImpl() { std::memset(this, 0, sizeof(DataRefImpl)); }
+};
+
+template <typename OStream>
+OStream& operator<<(OStream &OS, const DataRefImpl &D) {
+  OS << "(" << format("0x%08" PRIxPTR, D.p) << " (" << format("0x%08x", D.d.a)
+     << ", " << format("0x%08x", D.d.b) << "))";
+  return OS;
+}
+
+inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) {
+  // Check bitwise identical. This is the only legal way to compare a union w/o
+  // knowing which member is in use.
+  return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
+}
+
+inline bool operator!=(const DataRefImpl &a, const DataRefImpl &b) {
+  return !operator==(a, b);
+}
+
+inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) {
+  // Check bitwise identical. This is the only legal way to compare a union w/o
+  // knowing which member is in use.
+  return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
+}
+
+template <class content_type>
+class content_iterator
+    : public std::iterator<std::forward_iterator_tag, content_type> {
+  content_type Current;
+
+public:
+  content_iterator(content_type symb) : Current(std::move(symb)) {}
+
+  const content_type *operator->() const { return &Current; }
+
+  const content_type &operator*() const { return Current; }
+
+  bool operator==(const content_iterator &other) const {
+    return Current == other.Current;
+  }
+
+  bool operator!=(const content_iterator &other) const {
+    return !(*this == other);
+  }
+
+  content_iterator &operator++() { // preincrement
+    Current.moveNext();
+    return *this;
+  }
+};
+
+class SymbolicFile;
+
+/// This is a value type class that represents a single symbol in the list of
+/// symbols in the object file.
+class BasicSymbolRef {
+  DataRefImpl SymbolPimpl;
+  const SymbolicFile *OwningObject = nullptr;
+
+public:
+  enum Flags : unsigned {
+    SF_None = 0,
+    SF_Undefined = 1U << 0,      // Symbol is defined in another object file
+    SF_Global = 1U << 1,         // Global symbol
+    SF_Weak = 1U << 2,           // Weak symbol
+    SF_Absolute = 1U << 3,       // Absolute symbol
+    SF_Common = 1U << 4,         // Symbol has common linkage
+    SF_Indirect = 1U << 5,       // Symbol is an alias to another symbol
+    SF_Exported = 1U << 6,       // Symbol is visible to other DSOs
+    SF_FormatSpecific = 1U << 7, // Specific to the object file format
+                                 // (e.g. section symbols)
+    SF_Thumb = 1U << 8,          // Thumb symbol in a 32-bit ARM binary
+    SF_Hidden = 1U << 9,         // Symbol has hidden visibility
+    SF_Const = 1U << 10,         // Symbol value is constant
+    SF_Executable = 1U << 11,    // Symbol points to an executable section
+                                 // (IR only)
+  };
+
+  BasicSymbolRef() = default;
+  BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner);
+
+  bool operator==(const BasicSymbolRef &Other) const;
+  bool operator<(const BasicSymbolRef &Other) const;
+
+  void moveNext();
+
+  std::error_code printName(raw_ostream &OS) const;
+
+  /// Get symbol flags (bitwise OR of SymbolRef::Flags)
+  uint32_t getFlags() const;
+
+  DataRefImpl getRawDataRefImpl() const;
+  const SymbolicFile *getObject() const;
+};
+
+using basic_symbol_iterator = content_iterator<BasicSymbolRef>;
+
+class SymbolicFile : public Binary {
+public:
+  SymbolicFile(unsigned int Type, MemoryBufferRef Source);
+  ~SymbolicFile() override;
+
+  // virtual interface.
+  virtual void moveSymbolNext(DataRefImpl &Symb) const = 0;
+
+  virtual std::error_code printSymbolName(raw_ostream &OS,
+                                          DataRefImpl Symb) const = 0;
+
+  virtual uint32_t getSymbolFlags(DataRefImpl Symb) const = 0;
+
+  virtual basic_symbol_iterator symbol_begin() const = 0;
+
+  virtual basic_symbol_iterator symbol_end() const = 0;
+
+  // convenience wrappers.
+  using basic_symbol_iterator_range = iterator_range<basic_symbol_iterator>;
+  basic_symbol_iterator_range symbols() const {
+    return basic_symbol_iterator_range(symbol_begin(), symbol_end());
+  }
+
+  // construction aux.
+  static Expected<std::unique_ptr<SymbolicFile>>
+  createSymbolicFile(MemoryBufferRef Object, llvm::file_magic Type,
+                     LLVMContext *Context);
+
+  static Expected<std::unique_ptr<SymbolicFile>>
+  createSymbolicFile(MemoryBufferRef Object) {
+    return createSymbolicFile(Object, llvm::file_magic::unknown, nullptr);
+  }
+  static Expected<OwningBinary<SymbolicFile>>
+  createSymbolicFile(StringRef ObjectPath);
+
+  static bool classof(const Binary *v) {
+    return v->isSymbolic();
+  }
+};
+
+inline BasicSymbolRef::BasicSymbolRef(DataRefImpl SymbolP,
+                                      const SymbolicFile *Owner)
+    : SymbolPimpl(SymbolP), OwningObject(Owner) {}
+
+inline bool BasicSymbolRef::operator==(const BasicSymbolRef &Other) const {
+  return SymbolPimpl == Other.SymbolPimpl;
+}
+
+inline bool BasicSymbolRef::operator<(const BasicSymbolRef &Other) const {
+  return SymbolPimpl < Other.SymbolPimpl;
+}
+
+inline void BasicSymbolRef::moveNext() {
+  return OwningObject->moveSymbolNext(SymbolPimpl);
+}
+
+inline std::error_code BasicSymbolRef::printName(raw_ostream &OS) const {
+  return OwningObject->printSymbolName(OS, SymbolPimpl);
+}
+
+inline uint32_t BasicSymbolRef::getFlags() const {
+  return OwningObject->getSymbolFlags(SymbolPimpl);
+}
+
+inline DataRefImpl BasicSymbolRef::getRawDataRefImpl() const {
+  return SymbolPimpl;
+}
+
+inline const SymbolicFile *BasicSymbolRef::getObject() const {
+  return OwningObject;
+}
+
+} // end namespace object
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_SYMBOLICFILE_H
diff --git a/linux-x64/clang/include/llvm/Object/Wasm.h b/linux-x64/clang/include/llvm/Object/Wasm.h
new file mode 100644
index 0000000..d49acf3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/Wasm.h
@@ -0,0 +1,278 @@
+//===- WasmObjectFile.h - Wasm object file implementation -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the WasmObjectFile class, which implements the ObjectFile
+// interface for Wasm files.
+//
+// See: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_WASM_H
+#define LLVM_OBJECT_WASM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace object {
+
+class WasmSymbol {
+public:
+  WasmSymbol(const wasm::WasmSymbolInfo &Info,
+             const wasm::WasmSignature *FunctionType,
+             const wasm::WasmGlobalType *GlobalType)
+      : Info(Info), FunctionType(FunctionType), GlobalType(GlobalType) {}
+
+  const wasm::WasmSymbolInfo &Info;
+  const wasm::WasmSignature *FunctionType;
+  const wasm::WasmGlobalType *GlobalType;
+
+  bool isTypeFunction() const {
+    return Info.Kind == wasm::WASM_SYMBOL_TYPE_FUNCTION;
+  }
+
+  bool isTypeData() const { return Info.Kind == wasm::WASM_SYMBOL_TYPE_DATA; }
+
+  bool isTypeGlobal() const {
+    return Info.Kind == wasm::WASM_SYMBOL_TYPE_GLOBAL;
+  }
+
+  bool isDefined() const { return !isUndefined(); }
+
+  bool isUndefined() const {
+    return (Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) != 0;
+  }
+
+  bool isBindingWeak() const {
+    return getBinding() == wasm::WASM_SYMBOL_BINDING_WEAK;
+  }
+
+  bool isBindingGlobal() const {
+    return getBinding() == wasm::WASM_SYMBOL_BINDING_GLOBAL;
+  }
+
+  bool isBindingLocal() const {
+    return getBinding() == wasm::WASM_SYMBOL_BINDING_LOCAL;
+  }
+
+  unsigned getBinding() const {
+    return Info.Flags & wasm::WASM_SYMBOL_BINDING_MASK;
+  }
+
+  bool isHidden() const {
+    return getVisibility() == wasm::WASM_SYMBOL_VISIBILITY_HIDDEN;
+  }
+
+  unsigned getVisibility() const {
+    return Info.Flags & wasm::WASM_SYMBOL_VISIBILITY_MASK;
+  }
+
+  void print(raw_ostream &Out) const {
+    Out << "Name=" << Info.Name << ", Kind=" << Info.Kind
+        << ", Flags=" << Info.Flags;
+    if (!isTypeData()) {
+      Out << ", ElemIndex=" << Info.ElementIndex;
+    } else if (isDefined()) {
+      Out << ", Segment=" << Info.DataRef.Segment;
+      Out << ", Offset=" << Info.DataRef.Offset;
+      Out << ", Size=" << Info.DataRef.Size;
+    }
+  }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
+#endif
+};
+
+struct WasmSection {
+  WasmSection() = default;
+
+  uint32_t Type = 0; // Section type (See below)
+  uint32_t Offset = 0; // Offset with in the file
+  StringRef Name; // Section name (User-defined sections only)
+  ArrayRef<uint8_t> Content; // Section content
+  std::vector<wasm::WasmRelocation> Relocations; // Relocations for this section
+};
+
+struct WasmSegment {
+  uint32_t SectionOffset;
+  wasm::WasmDataSegment Data;
+};
+
+class WasmObjectFile : public ObjectFile {
+
+public:
+  WasmObjectFile(MemoryBufferRef Object, Error &Err);
+
+  const wasm::WasmObjectHeader &getHeader() const;
+  const WasmSymbol &getWasmSymbol(const DataRefImpl &Symb) const;
+  const WasmSymbol &getWasmSymbol(const SymbolRef &Symbol) const;
+  const WasmSection &getWasmSection(const SectionRef &Section) const;
+  const wasm::WasmRelocation &getWasmRelocation(const RelocationRef& Ref) const;
+
+  static bool classof(const Binary *v) { return v->isWasm(); }
+
+  ArrayRef<wasm::WasmSignature> types() const { return Signatures; }
+  ArrayRef<uint32_t> functionTypes() const { return FunctionTypes; }
+  ArrayRef<wasm::WasmImport> imports() const { return Imports; }
+  ArrayRef<wasm::WasmTable> tables() const { return Tables; }
+  ArrayRef<wasm::WasmLimits> memories() const { return Memories; }
+  ArrayRef<wasm::WasmGlobal> globals() const { return Globals; }
+  ArrayRef<wasm::WasmExport> exports() const { return Exports; }
+  ArrayRef<WasmSymbol> syms() const { return Symbols; }
+  const wasm::WasmLinkingData& linkingData() const { return LinkingData; }
+  uint32_t getNumberOfSymbols() const { return Symbols.size(); }
+  ArrayRef<wasm::WasmElemSegment> elements() const { return ElemSegments; }
+  ArrayRef<WasmSegment> dataSegments() const { return DataSegments; }
+  ArrayRef<wasm::WasmFunction> functions() const { return Functions; }
+  ArrayRef<wasm::WasmFunctionName> debugNames() const { return DebugNames; }
+  uint32_t startFunction() const { return StartFunction; }
+  uint32_t getNumImportedGlobals() const { return NumImportedGlobals; }
+  uint32_t getNumImportedFunctions() const { return NumImportedFunctions; }
+
+  void moveSymbolNext(DataRefImpl &Symb) const override;
+
+  uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+
+  basic_symbol_iterator symbol_begin() const override;
+
+  basic_symbol_iterator symbol_end() const override;
+  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
+
+  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+  uint64_t getWasmSymbolValue(const WasmSymbol& Sym) const;
+  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
+  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+
+  // Overrides from SectionRef.
+  void moveSectionNext(DataRefImpl &Sec) const override;
+  std::error_code getSectionName(DataRefImpl Sec,
+                                 StringRef &Res) const override;
+  uint64_t getSectionAddress(DataRefImpl Sec) const override;
+  uint64_t getSectionIndex(DataRefImpl Sec) const override;
+  uint64_t getSectionSize(DataRefImpl Sec) const override;
+  std::error_code getSectionContents(DataRefImpl Sec,
+                                     StringRef &Res) const override;
+  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+  bool isSectionCompressed(DataRefImpl Sec) const override;
+  bool isSectionText(DataRefImpl Sec) const override;
+  bool isSectionData(DataRefImpl Sec) const override;
+  bool isSectionBSS(DataRefImpl Sec) const override;
+  bool isSectionVirtual(DataRefImpl Sec) const override;
+  bool isSectionBitcode(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+  relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+
+  // Overrides from RelocationRef.
+  void moveRelocationNext(DataRefImpl &Rel) const override;
+  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+  uint64_t getRelocationType(DataRefImpl Rel) const override;
+  void getRelocationTypeName(DataRefImpl Rel,
+                             SmallVectorImpl<char> &Result) const override;
+
+  section_iterator section_begin() const override;
+  section_iterator section_end() const override;
+  uint8_t getBytesInAddress() const override;
+  StringRef getFileFormatName() const override;
+  Triple::ArchType getArch() const override;
+  SubtargetFeatures getFeatures() const override;
+  bool isRelocatableObject() const override;
+
+private:
+  bool isValidFunctionIndex(uint32_t Index) const;
+  bool isDefinedFunctionIndex(uint32_t Index) const;
+  bool isValidGlobalIndex(uint32_t Index) const;
+  bool isDefinedGlobalIndex(uint32_t Index) const;
+  bool isValidFunctionSymbol(uint32_t Index) const;
+  bool isValidGlobalSymbol(uint32_t Index) const;
+  bool isValidDataSymbol(uint32_t Index) const;
+  wasm::WasmFunction &getDefinedFunction(uint32_t Index);
+  wasm::WasmGlobal &getDefinedGlobal(uint32_t Index);
+
+  const WasmSection &getWasmSection(DataRefImpl Ref) const;
+  const wasm::WasmRelocation &getWasmRelocation(DataRefImpl Ref) const;
+
+  WasmSection* findCustomSectionByName(StringRef Name);
+  WasmSection* findSectionByType(uint32_t Type);
+
+  const uint8_t *getPtr(size_t Offset) const;
+  Error parseSection(WasmSection &Sec);
+  Error parseCustomSection(WasmSection &Sec, const uint8_t *Ptr,
+                           const uint8_t *End);
+
+  // Standard section types
+  Error parseTypeSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseImportSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseFunctionSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseTableSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseMemorySection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseGlobalSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseExportSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseStartSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseElemSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseCodeSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseDataSection(const uint8_t *Ptr, const uint8_t *End);
+
+  // Custom section types
+  Error parseNameSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseLinkingSection(const uint8_t *Ptr, const uint8_t *End);
+  Error parseLinkingSectionSymtab(const uint8_t *&Ptr, const uint8_t *End);
+  Error parseLinkingSectionComdat(const uint8_t *&Ptr, const uint8_t *End);
+  Error parseRelocSection(StringRef Name, const uint8_t *Ptr,
+                          const uint8_t *End);
+
+  wasm::WasmObjectHeader Header;
+  std::vector<WasmSection> Sections;
+  std::vector<wasm::WasmSignature> Signatures;
+  std::vector<uint32_t> FunctionTypes;
+  std::vector<wasm::WasmTable> Tables;
+  std::vector<wasm::WasmLimits> Memories;
+  std::vector<wasm::WasmGlobal> Globals;
+  std::vector<wasm::WasmImport> Imports;
+  std::vector<wasm::WasmExport> Exports;
+  std::vector<wasm::WasmElemSegment> ElemSegments;
+  std::vector<WasmSegment> DataSegments;
+  std::vector<wasm::WasmFunction> Functions;
+  std::vector<WasmSymbol> Symbols;
+  std::vector<wasm::WasmFunctionName> DebugNames;
+  uint32_t StartFunction = -1;
+  bool HasLinkingSection = false;
+  wasm::WasmLinkingData LinkingData;
+  uint32_t NumImportedGlobals = 0;
+  uint32_t NumImportedFunctions = 0;
+  uint32_t CodeSection = 0;
+  uint32_t DataSection = 0;
+  uint32_t GlobalSection = 0;
+};
+
+} // end namespace object
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const object::WasmSymbol &Sym) {
+  Sym.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_WASM_H
diff --git a/linux-x64/clang/include/llvm/Object/WasmTraits.h b/linux-x64/clang/include/llvm/Object/WasmTraits.h
new file mode 100644
index 0000000..ebcd00b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/WasmTraits.h
@@ -0,0 +1,63 @@
+//===- WasmTraits.h - DenseMap traits for the Wasm structures ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides llvm::DenseMapInfo traits for the Wasm structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_WASMTRAITS_H
+#define LLVM_OBJECT_WASMTRAITS_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/BinaryFormat/Wasm.h"
+
+namespace llvm {
+
+template <typename T> struct DenseMapInfo;
+
+// Traits for using WasmSignature in a DenseMap.
+template <> struct DenseMapInfo<wasm::WasmSignature> {
+  static wasm::WasmSignature getEmptyKey() {
+    return wasm::WasmSignature{{}, 1};
+  }
+  static wasm::WasmSignature getTombstoneKey() {
+    return wasm::WasmSignature{{}, 2};
+  }
+  static unsigned getHashValue(const wasm::WasmSignature &Sig) {
+    unsigned H = hash_value(Sig.ReturnType);
+    for (int32_t Param : Sig.ParamTypes)
+      H = hash_combine(H, Param);
+    return H;
+  }
+  static bool isEqual(const wasm::WasmSignature &LHS,
+                      const wasm::WasmSignature &RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Traits for using WasmGlobalType in a DenseMap
+template <> struct DenseMapInfo<wasm::WasmGlobalType> {
+  static wasm::WasmGlobalType getEmptyKey() {
+    return wasm::WasmGlobalType{1, true};
+  }
+  static wasm::WasmGlobalType getTombstoneKey() {
+    return wasm::WasmGlobalType{2, true};
+  }
+  static unsigned getHashValue(const wasm::WasmGlobalType &GlobalType) {
+    return hash_combine(GlobalType.Type, GlobalType.Mutable);
+  }
+  static bool isEqual(const wasm::WasmGlobalType &LHS,
+                      const wasm::WasmGlobalType &RHS) {
+    return LHS == RHS;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_WASMTRAITS_H
diff --git a/linux-x64/clang/include/llvm/Object/WindowsResource.h b/linux-x64/clang/include/llvm/Object/WindowsResource.h
new file mode 100644
index 0000000..a077c82
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Object/WindowsResource.h
@@ -0,0 +1,227 @@
+//===-- WindowsResource.h ---------------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file declares the .res file class.  .res files are intermediate
+// products of the typical resource-compilation process on Windows.  This
+// process is as follows:
+//
+// .rc file(s) ---(rc.exe)---> .res file(s) ---(cvtres.exe)---> COFF file
+//
+// .rc files are human-readable scripts that list all resources a program uses.
+//
+// They are compiled into .res files, which are a list of the resources in
+// binary form.
+//
+// Finally the data stored in the .res is compiled into a COFF file, where it
+// is organized in a directory tree structure for optimized access by the
+// program during runtime.
+//
+// Ref: msdn.microsoft.com/en-us/library/windows/desktop/ms648007(v=vs.85).aspx
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_LLVM_OBJECT_RESFILE_H
+#define LLVM_INCLUDE_LLVM_OBJECT_RESFILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ScopedPrinter.h"
+
+#include <map>
+
+namespace llvm {
+namespace object {
+
+class WindowsResource;
+
+const size_t WIN_RES_MAGIC_SIZE = 16;
+const size_t WIN_RES_NULL_ENTRY_SIZE = 16;
+const uint32_t WIN_RES_HEADER_ALIGNMENT = 4;
+const uint32_t WIN_RES_DATA_ALIGNMENT = 4;
+const uint16_t WIN_RES_PURE_MOVEABLE = 0x0030;
+
+struct WinResHeaderPrefix {
+  support::ulittle32_t DataSize;
+  support::ulittle32_t HeaderSize;
+};
+
+// Type and Name may each either be an integer ID or a string.  This struct is
+// only used in the case where they are both IDs.
+struct WinResIDs {
+  uint16_t TypeFlag;
+  support::ulittle16_t TypeID;
+  uint16_t NameFlag;
+  support::ulittle16_t NameID;
+
+  void setType(uint16_t ID) {
+    TypeFlag = 0xffff;
+    TypeID = ID;
+  }
+
+  void setName(uint16_t ID) {
+    NameFlag = 0xffff;
+    NameID = ID;
+  }
+};
+
+struct WinResHeaderSuffix {
+  support::ulittle32_t DataVersion;
+  support::ulittle16_t MemoryFlags;
+  support::ulittle16_t Language;
+  support::ulittle32_t Version;
+  support::ulittle32_t Characteristics;
+};
+
+class EmptyResError : public GenericBinaryError {
+public:
+  EmptyResError(Twine Msg, object_error ECOverride)
+      : GenericBinaryError(Msg, ECOverride) {}
+};
+
+class ResourceEntryRef {
+public:
+  Error moveNext(bool &End);
+  bool checkTypeString() const { return IsStringType; }
+  ArrayRef<UTF16> getTypeString() const { return Type; }
+  uint16_t getTypeID() const { return TypeID; }
+  bool checkNameString() const { return IsStringName; }
+  ArrayRef<UTF16> getNameString() const { return Name; }
+  uint16_t getNameID() const { return NameID; }
+  uint16_t getDataVersion() const { return Suffix->DataVersion; }
+  uint16_t getLanguage() const { return Suffix->Language; }
+  uint16_t getMemoryFlags() const { return Suffix->MemoryFlags; }
+  uint16_t getMajorVersion() const { return Suffix->Version >> 16; }
+  uint16_t getMinorVersion() const { return Suffix->Version; }
+  uint32_t getCharacteristics() const { return Suffix->Characteristics; }
+  ArrayRef<uint8_t> getData() const { return Data; }
+
+private:
+  friend class WindowsResource;
+
+  ResourceEntryRef(BinaryStreamRef Ref, const WindowsResource *Owner);
+  Error loadNext();
+
+  static Expected<ResourceEntryRef> create(BinaryStreamRef Ref,
+                                           const WindowsResource *Owner);
+
+  BinaryStreamReader Reader;
+  bool IsStringType;
+  ArrayRef<UTF16> Type;
+  uint16_t TypeID;
+  bool IsStringName;
+  ArrayRef<UTF16> Name;
+  uint16_t NameID;
+  const WinResHeaderSuffix *Suffix = nullptr;
+  ArrayRef<uint8_t> Data;
+};
+
+class WindowsResource : public Binary {
+public:
+  Expected<ResourceEntryRef> getHeadEntry();
+
+  static bool classof(const Binary *V) { return V->isWinRes(); }
+
+  static Expected<std::unique_ptr<WindowsResource>>
+  createWindowsResource(MemoryBufferRef Source);
+
+private:
+  friend class ResourceEntryRef;
+
+  WindowsResource(MemoryBufferRef Source);
+
+  BinaryByteStream BBS;
+};
+
+class WindowsResourceParser {
+public:
+  class TreeNode;
+  WindowsResourceParser();
+  Error parse(WindowsResource *WR);
+  void printTree(raw_ostream &OS) const;
+  const TreeNode &getTree() const { return Root; }
+  const ArrayRef<std::vector<uint8_t>> getData() const { return Data; }
+  const ArrayRef<std::vector<UTF16>> getStringTable() const {
+    return StringTable;
+  }
+
+  class TreeNode {
+  public:
+    template <typename T>
+    using Children = std::map<T, std::unique_ptr<TreeNode>>;
+
+    void print(ScopedPrinter &Writer, StringRef Name) const;
+    uint32_t getTreeSize() const;
+    uint32_t getStringIndex() const { return StringIndex; }
+    uint32_t getDataIndex() const { return DataIndex; }
+    uint16_t getMajorVersion() const { return MajorVersion; }
+    uint16_t getMinorVersion() const { return MinorVersion; }
+    uint32_t getCharacteristics() const { return Characteristics; }
+    bool checkIsDataNode() const { return IsDataNode; }
+    const Children<uint32_t> &getIDChildren() const { return IDChildren; }
+    const Children<std::string> &getStringChildren() const {
+      return StringChildren;
+    }
+
+  private:
+    friend class WindowsResourceParser;
+
+    static uint32_t StringCount;
+    static uint32_t DataCount;
+
+    static std::unique_ptr<TreeNode> createStringNode();
+    static std::unique_ptr<TreeNode> createIDNode();
+    static std::unique_ptr<TreeNode> createDataNode(uint16_t MajorVersion,
+                                                    uint16_t MinorVersion,
+                                                    uint32_t Characteristics);
+
+    explicit TreeNode(bool IsStringNode);
+    TreeNode(uint16_t MajorVersion, uint16_t MinorVersion,
+             uint32_t Characteristics);
+
+    void addEntry(const ResourceEntryRef &Entry, bool &IsNewTypeString,
+                  bool &IsNewNameString);
+    TreeNode &addTypeNode(const ResourceEntryRef &Entry, bool &IsNewTypeString);
+    TreeNode &addNameNode(const ResourceEntryRef &Entry, bool &IsNewNameString);
+    TreeNode &addLanguageNode(const ResourceEntryRef &Entry);
+    TreeNode &addChild(uint32_t ID, bool IsDataNode = false,
+                       uint16_t MajorVersion = 0, uint16_t MinorVersion = 0,
+                       uint32_t Characteristics = 0);
+    TreeNode &addChild(ArrayRef<UTF16> NameRef, bool &IsNewString);
+
+    bool IsDataNode = false;
+    uint32_t StringIndex;
+    uint32_t DataIndex;
+    Children<uint32_t> IDChildren;
+    Children<std::string> StringChildren;
+    uint16_t MajorVersion = 0;
+    uint16_t MinorVersion = 0;
+    uint32_t Characteristics = 0;
+  };
+
+private:
+  TreeNode Root;
+  std::vector<std::vector<uint8_t>> Data;
+  std::vector<std::vector<UTF16>> StringTable;
+};
+
+Expected<std::unique_ptr<MemoryBuffer>>
+writeWindowsResourceCOFF(llvm::COFF::MachineTypes MachineType,
+                         const WindowsResourceParser &Parser);
+
+} // namespace object
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/COFFYAML.h b/linux-x64/clang/include/llvm/ObjectYAML/COFFYAML.h
new file mode 100644
index 0000000..8794eaa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/COFFYAML.h
@@ -0,0 +1,250 @@
+//===- COFFYAML.h - COFF YAMLIO implementation ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares classes for handling the YAML representation of COFF.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_COFFYAML_H
+#define LLVM_OBJECTYAML_COFFYAML_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/ObjectYAML/CodeViewYAMLDebugSections.h"
+#include "llvm/ObjectYAML/CodeViewYAMLTypeHashing.h"
+#include "llvm/ObjectYAML/CodeViewYAMLTypes.h"
+#include "llvm/ObjectYAML/YAML.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+namespace COFF {
+
+inline Characteristics operator|(Characteristics a, Characteristics b) {
+  uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
+  return static_cast<Characteristics>(Ret);
+}
+
+inline SectionCharacteristics operator|(SectionCharacteristics a,
+                                        SectionCharacteristics b) {
+  uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
+  return static_cast<SectionCharacteristics>(Ret);
+}
+
+inline DLLCharacteristics operator|(DLLCharacteristics a,
+                                    DLLCharacteristics b) {
+  uint16_t Ret = static_cast<uint16_t>(a) | static_cast<uint16_t>(b);
+  return static_cast<DLLCharacteristics>(Ret);
+}
+
+} // end namespace COFF
+
+// The structure of the yaml files is not an exact 1:1 match to COFF. In order
+// to use yaml::IO, we use these structures which are closer to the source.
+namespace COFFYAML {
+
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, COMDATType)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, WeakExternalCharacteristics)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, AuxSymbolType)
+
+struct Relocation {
+  uint32_t VirtualAddress;
+  uint16_t Type;
+  StringRef SymbolName;
+};
+
+struct Section {
+  COFF::section Header;
+  unsigned Alignment = 0;
+  yaml::BinaryRef SectionData;
+  std::vector<CodeViewYAML::YAMLDebugSubsection> DebugS;
+  std::vector<CodeViewYAML::LeafRecord> DebugT;
+  Optional<CodeViewYAML::DebugHSection> DebugH;
+  std::vector<Relocation> Relocations;
+  StringRef Name;
+
+  Section();
+};
+
+struct Symbol {
+  COFF::symbol Header;
+  COFF::SymbolBaseType SimpleType = COFF::IMAGE_SYM_TYPE_NULL;
+  COFF::SymbolComplexType ComplexType = COFF::IMAGE_SYM_DTYPE_NULL;
+  Optional<COFF::AuxiliaryFunctionDefinition> FunctionDefinition;
+  Optional<COFF::AuxiliarybfAndefSymbol> bfAndefSymbol;
+  Optional<COFF::AuxiliaryWeakExternal> WeakExternal;
+  StringRef File;
+  Optional<COFF::AuxiliarySectionDefinition> SectionDefinition;
+  Optional<COFF::AuxiliaryCLRToken> CLRToken;
+  StringRef Name;
+
+  Symbol();
+};
+
+struct PEHeader {
+  COFF::PE32Header Header;
+  Optional<COFF::DataDirectory> DataDirectories[COFF::NUM_DATA_DIRECTORIES];
+};
+
+struct Object {
+  Optional<PEHeader> OptionalHeader;
+  COFF::header Header;
+  std::vector<Section> Sections;
+  std::vector<Symbol> Symbols;
+
+  Object();
+};
+
+} // end namespace COFFYAML
+
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Section)
+LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Symbol)
+LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Relocation)
+
+namespace llvm {
+namespace yaml {
+
+template <>
+struct ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics> {
+  static void enumeration(IO &IO, COFFYAML::WeakExternalCharacteristics &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFFYAML::AuxSymbolType> {
+  static void enumeration(IO &IO, COFFYAML::AuxSymbolType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFFYAML::COMDATType> {
+  static void enumeration(IO &IO, COFFYAML::COMDATType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::MachineTypes> {
+  static void enumeration(IO &IO, COFF::MachineTypes &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::SymbolBaseType> {
+  static void enumeration(IO &IO, COFF::SymbolBaseType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::SymbolStorageClass> {
+  static void enumeration(IO &IO, COFF::SymbolStorageClass &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::SymbolComplexType> {
+  static void enumeration(IO &IO, COFF::SymbolComplexType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypeI386> {
+  static void enumeration(IO &IO, COFF::RelocationTypeI386 &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypeAMD64> {
+  static void enumeration(IO &IO, COFF::RelocationTypeAMD64 &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypesARM> {
+  static void enumeration(IO &IO, COFF::RelocationTypesARM &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypesARM64> {
+  static void enumeration(IO &IO, COFF::RelocationTypesARM64 &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::WindowsSubsystem> {
+  static void enumeration(IO &IO, COFF::WindowsSubsystem &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<COFF::Characteristics> {
+  static void bitset(IO &IO, COFF::Characteristics &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<COFF::SectionCharacteristics> {
+  static void bitset(IO &IO, COFF::SectionCharacteristics &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<COFF::DLLCharacteristics> {
+  static void bitset(IO &IO, COFF::DLLCharacteristics &Value);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Relocation> {
+  static void mapping(IO &IO, COFFYAML::Relocation &Rel);
+};
+
+template <>
+struct MappingTraits<COFFYAML::PEHeader> {
+  static void mapping(IO &IO, COFFYAML::PEHeader &PH);
+};
+
+template <>
+struct MappingTraits<COFF::DataDirectory> {
+  static void mapping(IO &IO, COFF::DataDirectory &DD);
+};
+
+template <>
+struct MappingTraits<COFF::header> {
+  static void mapping(IO &IO, COFF::header &H);
+};
+
+template <> struct MappingTraits<COFF::AuxiliaryFunctionDefinition> {
+  static void mapping(IO &IO, COFF::AuxiliaryFunctionDefinition &AFD);
+};
+
+template <> struct MappingTraits<COFF::AuxiliarybfAndefSymbol> {
+  static void mapping(IO &IO, COFF::AuxiliarybfAndefSymbol &AAS);
+};
+
+template <> struct MappingTraits<COFF::AuxiliaryWeakExternal> {
+  static void mapping(IO &IO, COFF::AuxiliaryWeakExternal &AWE);
+};
+
+template <> struct MappingTraits<COFF::AuxiliarySectionDefinition> {
+  static void mapping(IO &IO, COFF::AuxiliarySectionDefinition &ASD);
+};
+
+template <> struct MappingTraits<COFF::AuxiliaryCLRToken> {
+  static void mapping(IO &IO, COFF::AuxiliaryCLRToken &ACT);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Symbol> {
+  static void mapping(IO &IO, COFFYAML::Symbol &S);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Section> {
+  static void mapping(IO &IO, COFFYAML::Section &Sec);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Object> {
+  static void mapping(IO &IO, COFFYAML::Object &Obj);
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_COFFYAML_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h
new file mode 100644
index 0000000..d620008
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h
@@ -0,0 +1,140 @@
+//=- CodeViewYAMLDebugSections.h - CodeView YAMLIO debug sections -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLDEBUGSECTIONS_H
+#define LLVM_OBJECTYAML_CODEVIEWYAMLDEBUGSECTIONS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+namespace codeview {
+
+class StringsAndChecksums;
+class StringsAndChecksumsRef;
+
+} // end namespace codeview
+
+namespace CodeViewYAML {
+
+namespace detail {
+
+struct YAMLSubsectionBase;
+
+} // end namespace detail
+
+struct YAMLFrameData {
+  uint32_t RvaStart;
+  uint32_t CodeSize;
+  uint32_t LocalSize;
+  uint32_t ParamsSize;
+  uint32_t MaxStackSize;
+  StringRef FrameFunc;
+  uint32_t PrologSize;
+  uint32_t SavedRegsSize;
+  uint32_t Flags;
+};
+
+struct YAMLCrossModuleImport {
+  StringRef ModuleName;
+  std::vector<uint32_t> ImportIds;
+};
+
+struct SourceLineEntry {
+  uint32_t Offset;
+  uint32_t LineStart;
+  uint32_t EndDelta;
+  bool IsStatement;
+};
+
+struct SourceColumnEntry {
+  uint16_t StartColumn;
+  uint16_t EndColumn;
+};
+
+struct SourceLineBlock {
+  StringRef FileName;
+  std::vector<SourceLineEntry> Lines;
+  std::vector<SourceColumnEntry> Columns;
+};
+
+struct HexFormattedString {
+  std::vector<uint8_t> Bytes;
+};
+
+struct SourceFileChecksumEntry {
+  StringRef FileName;
+  codeview::FileChecksumKind Kind;
+  HexFormattedString ChecksumBytes;
+};
+
+struct SourceLineInfo {
+  uint32_t RelocOffset;
+  uint32_t RelocSegment;
+  codeview::LineFlags Flags;
+  uint32_t CodeSize;
+  std::vector<SourceLineBlock> Blocks;
+};
+
+struct InlineeSite {
+  uint32_t Inlinee;
+  StringRef FileName;
+  uint32_t SourceLineNum;
+  std::vector<StringRef> ExtraFiles;
+};
+
+struct InlineeInfo {
+  bool HasExtraFiles;
+  std::vector<InlineeSite> Sites;
+};
+
+struct YAMLDebugSubsection {
+  static Expected<YAMLDebugSubsection>
+  fromCodeViewSubection(const codeview::StringsAndChecksumsRef &SC,
+                        const codeview::DebugSubsectionRecord &SS);
+
+  std::shared_ptr<detail::YAMLSubsectionBase> Subsection;
+};
+
+struct DebugSubsectionState {};
+
+Expected<std::vector<std::shared_ptr<codeview::DebugSubsection>>>
+toCodeViewSubsectionList(BumpPtrAllocator &Allocator,
+                         ArrayRef<YAMLDebugSubsection> Subsections,
+                         const codeview::StringsAndChecksums &SC);
+
+std::vector<YAMLDebugSubsection>
+fromDebugS(ArrayRef<uint8_t> Data, const codeview::StringsAndChecksumsRef &SC);
+
+void initializeStringsAndChecksums(ArrayRef<YAMLDebugSubsection> Sections,
+                                   codeview::StringsAndChecksums &SC);
+
+} // end namespace CodeViewYAML
+
+} // end namespace llvm
+
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::YAMLDebugSubsection)
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::YAMLDebugSubsection)
+
+#endif // LLVM_OBJECTYAML_CODEVIEWYAMLDEBUGSECTIONS_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLSymbols.h b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLSymbols.h
new file mode 100644
index 0000000..791193c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLSymbols.h
@@ -0,0 +1,49 @@
+//===- CodeViewYAMLSymbols.h - CodeView YAMLIO Symbol implementation ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLSYMBOLS_H
+#define LLVM_OBJECTYAML_CODEVIEWYAMLSYMBOLS_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <memory>
+
+namespace llvm {
+namespace CodeViewYAML {
+
+namespace detail {
+
+struct SymbolRecordBase;
+
+} // end namespace detail
+
+struct SymbolRecord {
+  std::shared_ptr<detail::SymbolRecordBase> Symbol;
+
+  codeview::CVSymbol
+  toCodeViewSymbol(BumpPtrAllocator &Allocator,
+                   codeview::CodeViewContainer Container) const;
+
+  static Expected<SymbolRecord> fromCodeViewSymbol(codeview::CVSymbol Symbol);
+};
+
+} // end namespace CodeViewYAML
+} // end namespace llvm
+
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::SymbolRecord)
+LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::SymbolRecord)
+
+#endif // LLVM_OBJECTYAML_CODEVIEWYAMLSYMBOLS_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h
new file mode 100644
index 0000000..4f0d9ef
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h
@@ -0,0 +1,62 @@
+//==- CodeViewYAMLTypeHashing.h - CodeView YAMLIO Type hashing ----*- C++-*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLTYPEHASHING_H
+#define LLVM_OBJECTYAML_CODEVIEWYAMLTYPEHASHING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/CodeView/TypeHashing.h"
+#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+namespace CodeViewYAML {
+
+struct GlobalHash {
+  GlobalHash() = default;
+  explicit GlobalHash(StringRef S) : Hash(S) {
+    assert(S.size() == 20 && "Invalid hash size!");
+  }
+  explicit GlobalHash(ArrayRef<uint8_t> S) : Hash(S) {
+    assert(S.size() == 20 && "Invalid hash size!");
+  }
+  yaml::BinaryRef Hash;
+};
+
+struct DebugHSection {
+  uint32_t Magic;
+  uint16_t Version;
+  uint16_t HashAlgorithm;
+  std::vector<GlobalHash> Hashes;
+};
+
+DebugHSection fromDebugH(ArrayRef<uint8_t> DebugT);
+ArrayRef<uint8_t> toDebugH(const DebugHSection &DebugH,
+                           BumpPtrAllocator &Alloc);
+
+} // end namespace CodeViewYAML
+
+} // end namespace llvm
+
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::DebugHSection)
+LLVM_YAML_DECLARE_SCALAR_TRAITS(CodeViewYAML::GlobalHash, QuotingType::None)
+LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::GlobalHash)
+
+#endif // LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLTypes.h b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLTypes.h
new file mode 100644
index 0000000..bc3b556
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/CodeViewYAMLTypes.h
@@ -0,0 +1,69 @@
+//==- CodeViewYAMLTypes.h - CodeView YAMLIO Type implementation --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H
+#define LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+namespace codeview {
+class AppendingTypeTableBuilder;
+}
+
+namespace CodeViewYAML {
+
+namespace detail {
+
+struct LeafRecordBase;
+struct MemberRecordBase;
+
+} // end namespace detail
+
+struct MemberRecord {
+  std::shared_ptr<detail::MemberRecordBase> Member;
+};
+
+struct LeafRecord {
+  std::shared_ptr<detail::LeafRecordBase> Leaf;
+
+  codeview::CVType
+  toCodeViewRecord(codeview::AppendingTypeTableBuilder &Serializer) const;
+  static Expected<LeafRecord> fromCodeViewRecord(codeview::CVType Type);
+};
+
+std::vector<LeafRecord> fromDebugT(ArrayRef<uint8_t> DebugT);
+ArrayRef<uint8_t> toDebugT(ArrayRef<LeafRecord>, BumpPtrAllocator &Alloc);
+
+} // end namespace CodeViewYAML
+
+} // end namespace llvm
+
+LLVM_YAML_DECLARE_SCALAR_TRAITS(codeview::GUID, QuotingType::Single)
+
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::LeafRecord)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::MemberRecord)
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::LeafRecord)
+LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::MemberRecord)
+
+#endif // LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/DWARFEmitter.h b/linux-x64/clang/include/llvm/ObjectYAML/DWARFEmitter.h
new file mode 100644
index 0000000..0d7d8b4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/DWARFEmitter.h
@@ -0,0 +1,49 @@
+//===--- DWARFEmitter.h - ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief Common declarations for yaml2obj
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_DWARFEMITTER_H
+#define LLVM_OBJECTYAML_DWARFEMITTER_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace DWARFYAML {
+
+struct Data;
+struct PubSection;
+
+void EmitDebugAbbrev(raw_ostream &OS, const Data &DI);
+void EmitDebugStr(raw_ostream &OS, const Data &DI);
+
+void EmitDebugAranges(raw_ostream &OS, const Data &DI);
+void EmitPubSection(raw_ostream &OS, const PubSection &Sect,
+                    bool IsLittleEndian);
+void EmitDebugInfo(raw_ostream &OS, const Data &DI);
+void EmitDebugLine(raw_ostream &OS, const Data &DI);
+
+Expected<StringMap<std::unique_ptr<MemoryBuffer>>>
+EmitDebugSections(StringRef YAMLString,
+                  bool IsLittleEndian = sys::IsLittleEndianHost);
+
+} // end namespace DWARFYAML
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_DWARFEMITTER_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/DWARFYAML.h b/linux-x64/clang/include/llvm/ObjectYAML/DWARFYAML.h
new file mode 100644
index 0000000..2162f0f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/DWARFYAML.h
@@ -0,0 +1,309 @@
+//===- DWARFYAML.h - DWARF YAMLIO implementation ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares classes for handling the YAML representation
+/// of DWARF Debug Info.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_DWARFYAML_H
+#define LLVM_OBJECTYAML_DWARFYAML_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace DWARFYAML {
+
+struct InitialLength {
+  uint32_t TotalLength;
+  uint64_t TotalLength64;
+
+  bool isDWARF64() const { return TotalLength == UINT32_MAX; }
+
+  uint64_t getLength() const {
+    return isDWARF64() ? TotalLength64 : TotalLength;
+  }
+
+  void setLength(uint64_t Len) {
+    if (Len >= (uint64_t)UINT32_MAX) {
+      TotalLength64 = Len;
+      TotalLength = UINT32_MAX;
+    } else {
+      TotalLength = Len;
+    }
+  }
+};
+
+struct AttributeAbbrev {
+  llvm::dwarf::Attribute Attribute;
+  llvm::dwarf::Form Form;
+  llvm::yaml::Hex64 Value; // Some DWARF5 attributes have values
+};
+
+struct Abbrev {
+  llvm::yaml::Hex32 Code;
+  llvm::dwarf::Tag Tag;
+  llvm::dwarf::Constants Children;
+  std::vector<AttributeAbbrev> Attributes;
+};
+
+struct ARangeDescriptor {
+  llvm::yaml::Hex64 Address;
+  uint64_t Length;
+};
+
+struct ARange {
+  InitialLength Length;
+  uint16_t Version;
+  uint32_t CuOffset;
+  uint8_t AddrSize;
+  uint8_t SegSize;
+  std::vector<ARangeDescriptor> Descriptors;
+};
+
+struct PubEntry {
+  llvm::yaml::Hex32 DieOffset;
+  llvm::yaml::Hex8 Descriptor;
+  StringRef Name;
+};
+
+struct PubSection {
+  InitialLength Length;
+  uint16_t Version;
+  uint32_t UnitOffset;
+  uint32_t UnitSize;
+  bool IsGNUStyle = false;
+  std::vector<PubEntry> Entries;
+};
+
+struct FormValue {
+  llvm::yaml::Hex64 Value;
+  StringRef CStr;
+  std::vector<llvm::yaml::Hex8> BlockData;
+};
+
+struct Entry {
+  llvm::yaml::Hex32 AbbrCode;
+  std::vector<FormValue> Values;
+};
+
+struct Unit {
+  InitialLength Length;
+  uint16_t Version;
+  llvm::dwarf::UnitType Type; // Added in DWARF 5
+  uint32_t AbbrOffset;
+  uint8_t AddrSize;
+  std::vector<Entry> Entries;
+};
+
+struct File {
+  StringRef Name;
+  uint64_t DirIdx;
+  uint64_t ModTime;
+  uint64_t Length;
+};
+
+struct LineTableOpcode {
+  dwarf::LineNumberOps Opcode;
+  uint64_t ExtLen;
+  dwarf::LineNumberExtendedOps SubOpcode;
+  uint64_t Data;
+  int64_t SData;
+  File FileEntry;
+  std::vector<llvm::yaml::Hex8> UnknownOpcodeData;
+  std::vector<llvm::yaml::Hex64> StandardOpcodeData;
+};
+
+struct LineTable {
+  InitialLength Length;
+  uint16_t Version;
+  uint64_t PrologueLength;
+  uint8_t MinInstLength;
+  uint8_t MaxOpsPerInst;
+  uint8_t DefaultIsStmt;
+  uint8_t LineBase;
+  uint8_t LineRange;
+  uint8_t OpcodeBase;
+  std::vector<uint8_t> StandardOpcodeLengths;
+  std::vector<StringRef> IncludeDirs;
+  std::vector<File> Files;
+  std::vector<LineTableOpcode> Opcodes;
+};
+
+struct Data {
+  bool IsLittleEndian;
+  std::vector<Abbrev> AbbrevDecls;
+  std::vector<StringRef> DebugStrings;
+  std::vector<ARange> ARanges;
+  PubSection PubNames;
+  PubSection PubTypes;
+
+  PubSection GNUPubNames;
+  PubSection GNUPubTypes;
+
+  std::vector<Unit> CompileUnits;
+
+  std::vector<LineTable> DebugLines;
+
+  bool isEmpty() const;
+};
+
+} // end namespace DWARFYAML
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex64)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex8)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::AttributeAbbrev)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Abbrev)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::ARangeDescriptor)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::ARange)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::PubEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Unit)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::FormValue)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Entry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::File)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LineTable)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LineTableOpcode)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<DWARFYAML::Data> {
+  static void mapping(IO &IO, DWARFYAML::Data &DWARF);
+};
+
+template <> struct MappingTraits<DWARFYAML::Abbrev> {
+  static void mapping(IO &IO, DWARFYAML::Abbrev &Abbrev);
+};
+
+template <> struct MappingTraits<DWARFYAML::AttributeAbbrev> {
+  static void mapping(IO &IO, DWARFYAML::AttributeAbbrev &AttAbbrev);
+};
+
+template <> struct MappingTraits<DWARFYAML::ARangeDescriptor> {
+  static void mapping(IO &IO, DWARFYAML::ARangeDescriptor &Descriptor);
+};
+
+template <> struct MappingTraits<DWARFYAML::ARange> {
+  static void mapping(IO &IO, DWARFYAML::ARange &Range);
+};
+
+template <> struct MappingTraits<DWARFYAML::PubEntry> {
+  static void mapping(IO &IO, DWARFYAML::PubEntry &Entry);
+};
+
+template <> struct MappingTraits<DWARFYAML::PubSection> {
+  static void mapping(IO &IO, DWARFYAML::PubSection &Section);
+};
+
+template <> struct MappingTraits<DWARFYAML::Unit> {
+  static void mapping(IO &IO, DWARFYAML::Unit &Unit);
+};
+
+template <> struct MappingTraits<DWARFYAML::Entry> {
+  static void mapping(IO &IO, DWARFYAML::Entry &Entry);
+};
+
+template <> struct MappingTraits<DWARFYAML::FormValue> {
+  static void mapping(IO &IO, DWARFYAML::FormValue &FormValue);
+};
+
+template <> struct MappingTraits<DWARFYAML::File> {
+  static void mapping(IO &IO, DWARFYAML::File &File);
+};
+
+template <> struct MappingTraits<DWARFYAML::LineTableOpcode> {
+  static void mapping(IO &IO, DWARFYAML::LineTableOpcode &LineTableOpcode);
+};
+
+template <> struct MappingTraits<DWARFYAML::LineTable> {
+  static void mapping(IO &IO, DWARFYAML::LineTable &LineTable);
+};
+
+template <> struct MappingTraits<DWARFYAML::InitialLength> {
+  static void mapping(IO &IO, DWARFYAML::InitialLength &DWARF);
+};
+
+#define HANDLE_DW_TAG(unused, name, unused2, unused3)                          \
+  io.enumCase(value, "DW_TAG_" #name, dwarf::DW_TAG_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::Tag> {
+  static void enumeration(IO &io, dwarf::Tag &value) {
+#include "llvm/BinaryFormat/Dwarf.def"
+    io.enumFallback<Hex16>(value);
+  }
+};
+
+#define HANDLE_DW_LNS(unused, name)                                            \
+  io.enumCase(value, "DW_LNS_" #name, dwarf::DW_LNS_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::LineNumberOps> {
+  static void enumeration(IO &io, dwarf::LineNumberOps &value) {
+#include "llvm/BinaryFormat/Dwarf.def"
+    io.enumFallback<Hex8>(value);
+  }
+};
+
+#define HANDLE_DW_LNE(unused, name)                                            \
+  io.enumCase(value, "DW_LNE_" #name, dwarf::DW_LNE_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::LineNumberExtendedOps> {
+  static void enumeration(IO &io, dwarf::LineNumberExtendedOps &value) {
+#include "llvm/BinaryFormat/Dwarf.def"
+    io.enumFallback<Hex16>(value);
+  }
+};
+
+#define HANDLE_DW_AT(unused, name, unused2, unused3)                           \
+  io.enumCase(value, "DW_AT_" #name, dwarf::DW_AT_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::Attribute> {
+  static void enumeration(IO &io, dwarf::Attribute &value) {
+#include "llvm/BinaryFormat/Dwarf.def"
+    io.enumFallback<Hex16>(value);
+  }
+};
+
+#define HANDLE_DW_FORM(unused, name, unused2, unused3)                         \
+  io.enumCase(value, "DW_FORM_" #name, dwarf::DW_FORM_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::Form> {
+  static void enumeration(IO &io, dwarf::Form &value) {
+#include "llvm/BinaryFormat/Dwarf.def"
+    io.enumFallback<Hex16>(value);
+  }
+};
+
+#define HANDLE_DW_UT(unused, name)                                             \
+  io.enumCase(value, "DW_UT_" #name, dwarf::DW_UT_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::UnitType> {
+  static void enumeration(IO &io, dwarf::UnitType &value) {
+#include "llvm/BinaryFormat/Dwarf.def"
+    io.enumFallback<Hex8>(value);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<dwarf::Constants> {
+  static void enumeration(IO &io, dwarf::Constants &value) {
+    io.enumCase(value, "DW_CHILDREN_no", dwarf::DW_CHILDREN_no);
+    io.enumCase(value, "DW_CHILDREN_yes", dwarf::DW_CHILDREN_yes);
+    io.enumFallback<Hex16>(value);
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_DWARFYAML_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/ELFYAML.h b/linux-x64/clang/include/llvm/ObjectYAML/ELFYAML.h
new file mode 100644
index 0000000..7ba8396
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/ELFYAML.h
@@ -0,0 +1,379 @@
+//===- ELFYAML.h - ELF YAMLIO implementation --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares classes for handling the YAML representation
+/// of ELF.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_ELFYAML_H
+#define LLVM_OBJECTYAML_ELFYAML_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace ELFYAML {
+
+// These types are invariant across 32/64-bit ELF, so for simplicity just
+// directly give them their exact sizes. We don't need to worry about
+// endianness because these are just the types in the YAMLIO structures,
+// and are appropriately converted to the necessary endianness when
+// reading/generating binary object files.
+// The naming of these types is intended to be ELF_PREFIX, where PREFIX is
+// the common prefix of the respective constants. E.g. ELF_EM corresponds
+// to the `e_machine` constants, like `EM_X86_64`.
+// In the future, these would probably be better suited by C++11 enum
+// class's with appropriate fixed underlying type.
+LLVM_YAML_STRONG_TYPEDEF(uint16_t, ELF_ET)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_PT)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_EM)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFCLASS)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFDATA)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFOSABI)
+// Just use 64, since it can hold 32-bit values too.
+LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_EF)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_PF)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_SHT)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_REL)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_RSS)
+// Just use 64, since it can hold 32-bit values too.
+LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_SHF)
+LLVM_YAML_STRONG_TYPEDEF(uint16_t, ELF_SHN)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STT)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STV)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STO)
+
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_AFL_REG)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_ABI_FP)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_EXT)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_ASE)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_FLAGS1)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_ISA)
+
+// For now, hardcode 64 bits everywhere that 32 or 64 would be needed
+// since 64-bit can hold 32-bit values too.
+struct FileHeader {
+  ELF_ELFCLASS Class;
+  ELF_ELFDATA Data;
+  ELF_ELFOSABI OSABI;
+  ELF_ET Type;
+  ELF_EM Machine;
+  ELF_EF Flags;
+  llvm::yaml::Hex64 Entry;
+};
+
+struct SectionName {
+  StringRef Section;
+};
+
+struct ProgramHeader {
+  ELF_PT Type;
+  ELF_PF Flags;
+  llvm::yaml::Hex64 VAddr;
+  llvm::yaml::Hex64 PAddr;
+  Optional<llvm::yaml::Hex64> Align;
+  std::vector<SectionName> Sections;
+};
+
+struct Symbol {
+  StringRef Name;
+  ELF_STT Type;
+  StringRef Section;
+  Optional<ELF_SHN> Index;
+  llvm::yaml::Hex64 Value;
+  llvm::yaml::Hex64 Size;
+  uint8_t Other;
+};
+
+struct LocalGlobalWeakSymbols {
+  std::vector<Symbol> Local;
+  std::vector<Symbol> Global;
+  std::vector<Symbol> Weak;
+};
+
+struct SectionOrType {
+  StringRef sectionNameOrType;
+};
+
+struct Section {
+  enum class SectionKind {
+    Group,
+    RawContent,
+    Relocation,
+    NoBits,
+    MipsABIFlags
+  };
+  SectionKind Kind;
+  StringRef Name;
+  ELF_SHT Type;
+  ELF_SHF Flags;
+  llvm::yaml::Hex64 Address;
+  StringRef Link;
+  StringRef Info;
+  llvm::yaml::Hex64 AddressAlign;
+
+  Section(SectionKind Kind) : Kind(Kind) {}
+  virtual ~Section();
+};
+struct RawContentSection : Section {
+  yaml::BinaryRef Content;
+  llvm::yaml::Hex64 Size;
+
+  RawContentSection() : Section(SectionKind::RawContent) {}
+
+  static bool classof(const Section *S) {
+    return S->Kind == SectionKind::RawContent;
+  }
+};
+
+struct NoBitsSection : Section {
+  llvm::yaml::Hex64 Size;
+
+  NoBitsSection() : Section(SectionKind::NoBits) {}
+
+  static bool classof(const Section *S) {
+    return S->Kind == SectionKind::NoBits;
+  }
+};
+
+struct Group : Section {
+  // Members of a group contain a flag and a list of section indices
+  // that are part of the group.
+  std::vector<SectionOrType> Members;
+
+  Group() : Section(SectionKind::Group) {}
+
+  static bool classof(const Section *S) {
+    return S->Kind == SectionKind::Group;
+  }
+};
+
+struct Relocation {
+  llvm::yaml::Hex64 Offset;
+  int64_t Addend;
+  ELF_REL Type;
+  Optional<StringRef> Symbol;
+};
+
+struct RelocationSection : Section {
+  std::vector<Relocation> Relocations;
+
+  RelocationSection() : Section(SectionKind::Relocation) {}
+
+  static bool classof(const Section *S) {
+    return S->Kind == SectionKind::Relocation;
+  }
+};
+
+// Represents .MIPS.abiflags section
+struct MipsABIFlags : Section {
+  llvm::yaml::Hex16 Version;
+  MIPS_ISA ISALevel;
+  llvm::yaml::Hex8 ISARevision;
+  MIPS_AFL_REG GPRSize;
+  MIPS_AFL_REG CPR1Size;
+  MIPS_AFL_REG CPR2Size;
+  MIPS_ABI_FP FpABI;
+  MIPS_AFL_EXT ISAExtension;
+  MIPS_AFL_ASE ASEs;
+  MIPS_AFL_FLAGS1 Flags1;
+  llvm::yaml::Hex32 Flags2;
+
+  MipsABIFlags() : Section(SectionKind::MipsABIFlags) {}
+
+  static bool classof(const Section *S) {
+    return S->Kind == SectionKind::MipsABIFlags;
+  }
+};
+
+struct Object {
+  FileHeader Header;
+  std::vector<ProgramHeader> ProgramHeaders;
+  std::vector<std::unique_ptr<Section>> Sections;
+  // Although in reality the symbols reside in a section, it is a lot
+  // cleaner and nicer if we read them from the YAML as a separate
+  // top-level key, which automatically ensures that invariants like there
+  // being a single SHT_SYMTAB section are upheld.
+  LocalGlobalWeakSymbols Symbols;
+  LocalGlobalWeakSymbols DynamicSymbols;
+};
+
+} // end namespace ELFYAML
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::ProgramHeader)
+LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::ELFYAML::Section>)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Symbol)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Relocation)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::SectionOrType)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::SectionName)
+
+namespace llvm {
+namespace yaml {
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ET> {
+  static void enumeration(IO &IO, ELFYAML::ELF_ET &Value);
+};
+
+template <> struct ScalarEnumerationTraits<ELFYAML::ELF_PT> {
+  static void enumeration(IO &IO, ELFYAML::ELF_PT &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_EM> {
+  static void enumeration(IO &IO, ELFYAML::ELF_EM &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ELFCLASS> {
+  static void enumeration(IO &IO, ELFYAML::ELF_ELFCLASS &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ELFDATA> {
+  static void enumeration(IO &IO, ELFYAML::ELF_ELFDATA &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI> {
+  static void enumeration(IO &IO, ELFYAML::ELF_ELFOSABI &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::ELF_EF> {
+  static void bitset(IO &IO, ELFYAML::ELF_EF &Value);
+};
+
+template <> struct ScalarBitSetTraits<ELFYAML::ELF_PF> {
+  static void bitset(IO &IO, ELFYAML::ELF_PF &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_SHT> {
+  static void enumeration(IO &IO, ELFYAML::ELF_SHT &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::ELF_SHF> {
+  static void bitset(IO &IO, ELFYAML::ELF_SHF &Value);
+};
+
+template <> struct ScalarEnumerationTraits<ELFYAML::ELF_SHN> {
+  static void enumeration(IO &IO, ELFYAML::ELF_SHN &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_STT> {
+  static void enumeration(IO &IO, ELFYAML::ELF_STT &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_STV> {
+  static void enumeration(IO &IO, ELFYAML::ELF_STV &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::ELF_STO> {
+  static void bitset(IO &IO, ELFYAML::ELF_STO &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_REL> {
+  static void enumeration(IO &IO, ELFYAML::ELF_REL &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_RSS> {
+  static void enumeration(IO &IO, ELFYAML::ELF_RSS &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_REG> {
+  static void enumeration(IO &IO, ELFYAML::MIPS_AFL_REG &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_ABI_FP> {
+  static void enumeration(IO &IO, ELFYAML::MIPS_ABI_FP &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_EXT> {
+  static void enumeration(IO &IO, ELFYAML::MIPS_AFL_EXT &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_ISA> {
+  static void enumeration(IO &IO, ELFYAML::MIPS_ISA &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_ASE> {
+  static void bitset(IO &IO, ELFYAML::MIPS_AFL_ASE &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_FLAGS1> {
+  static void bitset(IO &IO, ELFYAML::MIPS_AFL_FLAGS1 &Value);
+};
+
+template <>
+struct MappingTraits<ELFYAML::FileHeader> {
+  static void mapping(IO &IO, ELFYAML::FileHeader &FileHdr);
+};
+
+template <> struct MappingTraits<ELFYAML::ProgramHeader> {
+  static void mapping(IO &IO, ELFYAML::ProgramHeader &FileHdr);
+};
+
+template <>
+struct MappingTraits<ELFYAML::Symbol> {
+  static void mapping(IO &IO, ELFYAML::Symbol &Symbol);
+  static StringRef validate(IO &IO, ELFYAML::Symbol &Symbol);
+};
+
+template <>
+struct MappingTraits<ELFYAML::LocalGlobalWeakSymbols> {
+  static void mapping(IO &IO, ELFYAML::LocalGlobalWeakSymbols &Symbols);
+};
+
+template <> struct MappingTraits<ELFYAML::Relocation> {
+  static void mapping(IO &IO, ELFYAML::Relocation &Rel);
+};
+
+template <>
+struct MappingTraits<std::unique_ptr<ELFYAML::Section>> {
+  static void mapping(IO &IO, std::unique_ptr<ELFYAML::Section> &Section);
+  static StringRef validate(IO &io, std::unique_ptr<ELFYAML::Section> &Section);
+};
+
+template <>
+struct MappingTraits<ELFYAML::Object> {
+  static void mapping(IO &IO, ELFYAML::Object &Object);
+};
+
+template <> struct MappingTraits<ELFYAML::SectionOrType> {
+  static void mapping(IO &IO, ELFYAML::SectionOrType &sectionOrType);
+};
+
+template <> struct MappingTraits<ELFYAML::SectionName> {
+  static void mapping(IO &IO, ELFYAML::SectionName &sectionName);
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_ELFYAML_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/MachOYAML.h b/linux-x64/clang/include/llvm/ObjectYAML/MachOYAML.h
new file mode 100644
index 0000000..1fa8f92
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/MachOYAML.h
@@ -0,0 +1,307 @@
+//===- MachOYAML.h - Mach-O YAMLIO implementation ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares classes for handling the YAML representation
+/// of Mach-O.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_MACHOYAML_H
+#define LLVM_OBJECTYAML_MACHOYAML_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ObjectYAML/DWARFYAML.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace MachOYAML {
+
+struct Section {
+  char sectname[16];
+  char segname[16];
+  llvm::yaml::Hex64 addr;
+  uint64_t size;
+  llvm::yaml::Hex32 offset;
+  uint32_t align;
+  llvm::yaml::Hex32 reloff;
+  uint32_t nreloc;
+  llvm::yaml::Hex32 flags;
+  llvm::yaml::Hex32 reserved1;
+  llvm::yaml::Hex32 reserved2;
+  llvm::yaml::Hex32 reserved3;
+};
+
+struct FileHeader {
+  llvm::yaml::Hex32 magic;
+  llvm::yaml::Hex32 cputype;
+  llvm::yaml::Hex32 cpusubtype;
+  llvm::yaml::Hex32 filetype;
+  uint32_t ncmds;
+  uint32_t sizeofcmds;
+  llvm::yaml::Hex32 flags;
+  llvm::yaml::Hex32 reserved;
+};
+
+struct LoadCommand {
+  virtual ~LoadCommand();
+
+  llvm::MachO::macho_load_command Data;
+  std::vector<Section> Sections;
+  std::vector<MachO::build_tool_version> Tools;
+  std::vector<llvm::yaml::Hex8> PayloadBytes;
+  std::string PayloadString;
+  uint64_t ZeroPadBytes;
+};
+
+struct NListEntry {
+  uint32_t n_strx;
+  llvm::yaml::Hex8 n_type;
+  uint8_t n_sect;
+  uint16_t n_desc;
+  uint64_t n_value;
+};
+
+struct RebaseOpcode {
+  MachO::RebaseOpcode Opcode;
+  uint8_t Imm;
+  std::vector<yaml::Hex64> ExtraData;
+};
+
+struct BindOpcode {
+  MachO::BindOpcode Opcode;
+  uint8_t Imm;
+  std::vector<yaml::Hex64> ULEBExtraData;
+  std::vector<int64_t> SLEBExtraData;
+  StringRef Symbol;
+};
+
+struct ExportEntry {
+  uint64_t TerminalSize = 0;
+  uint64_t NodeOffset = 0;
+  std::string Name;
+  llvm::yaml::Hex64 Flags = 0;
+  llvm::yaml::Hex64 Address = 0;
+  llvm::yaml::Hex64 Other = 0;
+  std::string ImportName;
+  std::vector<MachOYAML::ExportEntry> Children;
+};
+
+struct LinkEditData {
+  std::vector<MachOYAML::RebaseOpcode> RebaseOpcodes;
+  std::vector<MachOYAML::BindOpcode> BindOpcodes;
+  std::vector<MachOYAML::BindOpcode> WeakBindOpcodes;
+  std::vector<MachOYAML::BindOpcode> LazyBindOpcodes;
+  MachOYAML::ExportEntry ExportTrie;
+  std::vector<NListEntry> NameList;
+  std::vector<StringRef> StringTable;
+
+  bool isEmpty() const;
+};
+
+struct Object {
+  bool IsLittleEndian;
+  FileHeader Header;
+  std::vector<LoadCommand> LoadCommands;
+  std::vector<Section> Sections;
+  LinkEditData LinkEdit;
+  DWARFYAML::Data DWARF;
+};
+
+struct FatHeader {
+  llvm::yaml::Hex32 magic;
+  uint32_t nfat_arch;
+};
+
+struct FatArch {
+  llvm::yaml::Hex32 cputype;
+  llvm::yaml::Hex32 cpusubtype;
+  llvm::yaml::Hex64 offset;
+  uint64_t size;
+  uint32_t align;
+  llvm::yaml::Hex32 reserved;
+};
+
+struct UniversalBinary {
+  FatHeader Header;
+  std::vector<FatArch> FatArchs;
+  std::vector<Object> Slices;
+};
+
+} // end namespace MachOYAML
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::LoadCommand)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Section)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::RebaseOpcode)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::BindOpcode)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::ExportEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::NListEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Object)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::FatArch)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachO::build_tool_version)
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace yaml {
+
+template <> struct MappingTraits<MachOYAML::FileHeader> {
+  static void mapping(IO &IO, MachOYAML::FileHeader &FileHeader);
+};
+
+template <> struct MappingTraits<MachOYAML::Object> {
+  static void mapping(IO &IO, MachOYAML::Object &Object);
+};
+
+template <> struct MappingTraits<MachOYAML::FatHeader> {
+  static void mapping(IO &IO, MachOYAML::FatHeader &FatHeader);
+};
+
+template <> struct MappingTraits<MachOYAML::FatArch> {
+  static void mapping(IO &IO, MachOYAML::FatArch &FatArch);
+};
+
+template <> struct MappingTraits<MachOYAML::UniversalBinary> {
+  static void mapping(IO &IO, MachOYAML::UniversalBinary &UniversalBinary);
+};
+
+template <> struct MappingTraits<MachOYAML::LoadCommand> {
+  static void mapping(IO &IO, MachOYAML::LoadCommand &LoadCommand);
+};
+
+template <> struct MappingTraits<MachOYAML::LinkEditData> {
+  static void mapping(IO &IO, MachOYAML::LinkEditData &LinkEditData);
+};
+
+template <> struct MappingTraits<MachOYAML::RebaseOpcode> {
+  static void mapping(IO &IO, MachOYAML::RebaseOpcode &RebaseOpcode);
+};
+
+template <> struct MappingTraits<MachOYAML::BindOpcode> {
+  static void mapping(IO &IO, MachOYAML::BindOpcode &BindOpcode);
+};
+
+template <> struct MappingTraits<MachOYAML::ExportEntry> {
+  static void mapping(IO &IO, MachOYAML::ExportEntry &ExportEntry);
+};
+
+template <> struct MappingTraits<MachOYAML::Section> {
+  static void mapping(IO &IO, MachOYAML::Section &Section);
+};
+
+template <> struct MappingTraits<MachOYAML::NListEntry> {
+  static void mapping(IO &IO, MachOYAML::NListEntry &NListEntry);
+};
+
+template <> struct MappingTraits<MachO::build_tool_version> {
+  static void mapping(IO &IO, MachO::build_tool_version &tool);
+};
+
+#define HANDLE_LOAD_COMMAND(LCName, LCValue, LCStruct)                         \
+  io.enumCase(value, #LCName, MachO::LCName);
+
+template <> struct ScalarEnumerationTraits<MachO::LoadCommandType> {
+  static void enumeration(IO &io, MachO::LoadCommandType &value) {
+#include "llvm/BinaryFormat/MachO.def"
+    io.enumFallback<Hex32>(value);
+  }
+};
+
+#define ENUM_CASE(Enum) io.enumCase(value, #Enum, MachO::Enum);
+
+template <> struct ScalarEnumerationTraits<MachO::RebaseOpcode> {
+  static void enumeration(IO &io, MachO::RebaseOpcode &value) {
+    ENUM_CASE(REBASE_OPCODE_DONE)
+    ENUM_CASE(REBASE_OPCODE_SET_TYPE_IMM)
+    ENUM_CASE(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB)
+    ENUM_CASE(REBASE_OPCODE_ADD_ADDR_ULEB)
+    ENUM_CASE(REBASE_OPCODE_ADD_ADDR_IMM_SCALED)
+    ENUM_CASE(REBASE_OPCODE_DO_REBASE_IMM_TIMES)
+    ENUM_CASE(REBASE_OPCODE_DO_REBASE_ULEB_TIMES)
+    ENUM_CASE(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
+    ENUM_CASE(REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB)
+    io.enumFallback<Hex8>(value);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<MachO::BindOpcode> {
+  static void enumeration(IO &io, MachO::BindOpcode &value) {
+    ENUM_CASE(BIND_OPCODE_DONE)
+    ENUM_CASE(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM)
+    ENUM_CASE(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB)
+    ENUM_CASE(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM)
+    ENUM_CASE(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
+    ENUM_CASE(BIND_OPCODE_SET_TYPE_IMM)
+    ENUM_CASE(BIND_OPCODE_SET_ADDEND_SLEB)
+    ENUM_CASE(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB)
+    ENUM_CASE(BIND_OPCODE_ADD_ADDR_ULEB)
+    ENUM_CASE(BIND_OPCODE_DO_BIND)
+    ENUM_CASE(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
+    ENUM_CASE(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED)
+    ENUM_CASE(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB)
+    io.enumFallback<Hex8>(value);
+  }
+};
+
+// This trait is used for 16-byte chars in Mach structures used for strings
+using char_16 = char[16];
+
+template <> struct ScalarTraits<char_16> {
+  static void output(const char_16 &Val, void *, raw_ostream &Out);
+  static StringRef input(StringRef Scalar, void *, char_16 &Val);
+  static QuotingType mustQuote(StringRef S);
+};
+
+// This trait is used for UUIDs. It reads and writes them matching otool's
+// formatting style.
+using uuid_t = raw_ostream::uuid_t;
+
+template <> struct ScalarTraits<uuid_t> {
+  static void output(const uuid_t &Val, void *, raw_ostream &Out);
+  static StringRef input(StringRef Scalar, void *, uuid_t &Val);
+  static QuotingType mustQuote(StringRef S);
+};
+
+// Load Command struct mapping traits
+
+#define LOAD_COMMAND_STRUCT(LCStruct)                                          \
+  template <> struct MappingTraits<MachO::LCStruct> {                          \
+    static void mapping(IO &IO, MachO::LCStruct &LoadCommand);                 \
+  };
+
+#include "llvm/BinaryFormat/MachO.def"
+
+// Extra structures used by load commands
+template <> struct MappingTraits<MachO::dylib> {
+  static void mapping(IO &IO, MachO::dylib &LoadCommand);
+};
+
+template <> struct MappingTraits<MachO::fvmlib> {
+  static void mapping(IO &IO, MachO::fvmlib &LoadCommand);
+};
+
+template <> struct MappingTraits<MachO::section> {
+  static void mapping(IO &IO, MachO::section &LoadCommand);
+};
+
+template <> struct MappingTraits<MachO::section_64> {
+  static void mapping(IO &IO, MachO::section_64 &LoadCommand);
+};
+
+} // end namespace yaml
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_MACHOYAML_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/ObjectYAML.h b/linux-x64/clang/include/llvm/ObjectYAML/ObjectYAML.h
new file mode 100644
index 0000000..00ce864
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/ObjectYAML.h
@@ -0,0 +1,40 @@
+//===- ObjectYAML.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_OBJECTYAML_H
+#define LLVM_OBJECTYAML_OBJECTYAML_H
+
+#include "llvm/ObjectYAML/COFFYAML.h"
+#include "llvm/ObjectYAML/ELFYAML.h"
+#include "llvm/ObjectYAML/MachOYAML.h"
+#include "llvm/ObjectYAML/WasmYAML.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <memory>
+
+namespace llvm {
+namespace yaml {
+
+class IO;
+
+struct YamlObjectFile {
+  std::unique_ptr<ELFYAML::Object> Elf;
+  std::unique_ptr<COFFYAML::Object> Coff;
+  std::unique_ptr<MachOYAML::Object> MachO;
+  std::unique_ptr<MachOYAML::UniversalBinary> FatMachO;
+  std::unique_ptr<WasmYAML::Object> Wasm;
+};
+
+template <> struct MappingTraits<YamlObjectFile> {
+  static void mapping(IO &IO, YamlObjectFile &ObjectFile);
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_OBJECTYAML_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/WasmYAML.h b/linux-x64/clang/include/llvm/ObjectYAML/WasmYAML.h
new file mode 100644
index 0000000..1c5e77e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/WasmYAML.h
@@ -0,0 +1,476 @@
+//===- WasmYAML.h - Wasm YAMLIO implementation ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares classes for handling the YAML representation
+/// of wasm binaries.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_WASMYAML_H
+#define LLVM_OBJECTYAML_WASMYAML_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace WasmYAML {
+
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, SectionType)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ValueType)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, TableType)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, SignatureForm)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ExportKind)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, Opcode)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, RelocType)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, SymbolFlags)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, SymbolKind)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, SegmentFlags)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, LimitFlags)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ComdatKind)
+
+struct FileHeader {
+  yaml::Hex32 Version;
+};
+
+struct Limits {
+  LimitFlags Flags;
+  yaml::Hex32 Initial;
+  yaml::Hex32 Maximum;
+};
+
+struct Table {
+  TableType ElemType;
+  Limits TableLimits;
+};
+
+struct Export {
+  StringRef Name;
+  ExportKind Kind;
+  uint32_t Index;
+};
+
+struct ElemSegment {
+  uint32_t TableIndex;
+  wasm::WasmInitExpr Offset;
+  std::vector<uint32_t> Functions;
+};
+
+struct Global {
+  uint32_t Index;
+  ValueType Type;
+  bool Mutable;
+  wasm::WasmInitExpr InitExpr;
+};
+
+struct Import {
+  StringRef Module;
+  StringRef Field;
+  ExportKind Kind;
+  union {
+    uint32_t SigIndex;
+    Global GlobalImport;
+    Table TableImport;
+    Limits Memory;
+  };
+};
+
+struct LocalDecl {
+  ValueType Type;
+  uint32_t Count;
+};
+
+struct Function {
+  uint32_t Index;
+  std::vector<LocalDecl> Locals;
+  yaml::BinaryRef Body;
+};
+
+struct Relocation {
+  RelocType Type;
+  uint32_t Index;
+  yaml::Hex32 Offset;
+  int32_t Addend;
+};
+
+struct DataSegment {
+  uint32_t MemoryIndex;
+  uint32_t SectionOffset;
+  wasm::WasmInitExpr Offset;
+  yaml::BinaryRef Content;
+};
+
+struct NameEntry {
+  uint32_t Index;
+  StringRef Name;
+};
+
+struct SegmentInfo {
+  uint32_t Index;
+  StringRef Name;
+  uint32_t Alignment;
+  SegmentFlags Flags;
+};
+
+struct Signature {
+  uint32_t Index;
+  SignatureForm Form = wasm::WASM_TYPE_FUNC;
+  std::vector<ValueType> ParamTypes;
+  ValueType ReturnType;
+};
+
+struct SymbolInfo {
+  uint32_t Index;
+  StringRef Name;
+  SymbolKind Kind;
+  SymbolFlags Flags;
+  union {
+    uint32_t ElementIndex;
+    wasm::WasmDataReference DataRef;
+  };
+};
+
+struct InitFunction {
+  uint32_t Priority;
+  uint32_t Symbol;
+};
+
+struct ComdatEntry {
+  ComdatKind Kind;
+  uint32_t Index;
+};
+
+struct Comdat {
+  StringRef Name;
+  std::vector<ComdatEntry> Entries;
+};
+
+struct Section {
+  explicit Section(SectionType SecType) : Type(SecType) {}
+  virtual ~Section();
+
+  SectionType Type;
+  std::vector<Relocation> Relocations;
+};
+
+struct CustomSection : Section {
+  explicit CustomSection(StringRef Name)
+      : Section(wasm::WASM_SEC_CUSTOM), Name(Name) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_CUSTOM;
+  }
+
+  StringRef Name;
+  yaml::BinaryRef Payload;
+};
+
+struct NameSection : CustomSection {
+  NameSection() : CustomSection("name") {}
+
+  static bool classof(const Section *S) {
+    auto C = dyn_cast<CustomSection>(S);
+    return C && C->Name == "name";
+  }
+
+  std::vector<NameEntry> FunctionNames;
+};
+
+struct LinkingSection : CustomSection {
+  LinkingSection() : CustomSection("linking") {}
+
+  static bool classof(const Section *S) {
+    auto C = dyn_cast<CustomSection>(S);
+    return C && C->Name == "linking";
+  }
+
+  std::vector<SymbolInfo> SymbolTable;
+  std::vector<SegmentInfo> SegmentInfos;
+  std::vector<InitFunction> InitFunctions;
+  std::vector<Comdat> Comdats;
+};
+
+struct TypeSection : Section {
+  TypeSection() : Section(wasm::WASM_SEC_TYPE) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_TYPE;
+  }
+
+  std::vector<Signature> Signatures;
+};
+
+struct ImportSection : Section {
+  ImportSection() : Section(wasm::WASM_SEC_IMPORT) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_IMPORT;
+  }
+
+  std::vector<Import> Imports;
+};
+
+struct FunctionSection : Section {
+  FunctionSection() : Section(wasm::WASM_SEC_FUNCTION) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_FUNCTION;
+  }
+
+  std::vector<uint32_t> FunctionTypes;
+};
+
+struct TableSection : Section {
+  TableSection() : Section(wasm::WASM_SEC_TABLE) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_TABLE;
+  }
+
+  std::vector<Table> Tables;
+};
+
+struct MemorySection : Section {
+  MemorySection() : Section(wasm::WASM_SEC_MEMORY) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_MEMORY;
+  }
+
+  std::vector<Limits> Memories;
+};
+
+struct GlobalSection : Section {
+  GlobalSection() : Section(wasm::WASM_SEC_GLOBAL) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_GLOBAL;
+  }
+
+  std::vector<Global> Globals;
+};
+
+struct ExportSection : Section {
+  ExportSection() : Section(wasm::WASM_SEC_EXPORT) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_EXPORT;
+  }
+
+  std::vector<Export> Exports;
+};
+
+struct StartSection : Section {
+  StartSection() : Section(wasm::WASM_SEC_START) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_START;
+  }
+
+  uint32_t StartFunction;
+};
+
+struct ElemSection : Section {
+  ElemSection() : Section(wasm::WASM_SEC_ELEM) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_ELEM;
+  }
+
+  std::vector<ElemSegment> Segments;
+};
+
+struct CodeSection : Section {
+  CodeSection() : Section(wasm::WASM_SEC_CODE) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_CODE;
+  }
+
+  std::vector<Function> Functions;
+};
+
+struct DataSection : Section {
+  DataSection() : Section(wasm::WASM_SEC_DATA) {}
+
+  static bool classof(const Section *S) {
+    return S->Type == wasm::WASM_SEC_DATA;
+  }
+
+  std::vector<DataSegment> Segments;
+};
+
+struct Object {
+  FileHeader Header;
+  std::vector<std::unique_ptr<Section>> Sections;
+};
+
+} // end namespace WasmYAML
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::WasmYAML::Section>)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Signature)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ValueType)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Table)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Import)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Export)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ElemSegment)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Limits)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::DataSegment)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Global)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Function)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::LocalDecl)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Relocation)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::NameEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::SegmentInfo)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::SymbolInfo)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::InitFunction)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ComdatEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Comdat)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<WasmYAML::FileHeader> {
+  static void mapping(IO &IO, WasmYAML::FileHeader &FileHdr);
+};
+
+template <> struct MappingTraits<std::unique_ptr<WasmYAML::Section>> {
+  static void mapping(IO &IO, std::unique_ptr<WasmYAML::Section> &Section);
+};
+
+template <> struct MappingTraits<WasmYAML::Object> {
+  static void mapping(IO &IO, WasmYAML::Object &Object);
+};
+
+template <> struct MappingTraits<WasmYAML::Import> {
+  static void mapping(IO &IO, WasmYAML::Import &Import);
+};
+
+template <> struct MappingTraits<WasmYAML::Export> {
+  static void mapping(IO &IO, WasmYAML::Export &Export);
+};
+
+template <> struct MappingTraits<WasmYAML::Global> {
+  static void mapping(IO &IO, WasmYAML::Global &Global);
+};
+
+template <> struct ScalarBitSetTraits<WasmYAML::LimitFlags> {
+  static void bitset(IO &IO, WasmYAML::LimitFlags &Value);
+};
+
+template <> struct ScalarBitSetTraits<WasmYAML::SymbolFlags> {
+  static void bitset(IO &IO, WasmYAML::SymbolFlags &Value);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::SymbolKind> {
+  static void enumeration(IO &IO, WasmYAML::SymbolKind &Kind);
+};
+
+template <> struct ScalarBitSetTraits<WasmYAML::SegmentFlags> {
+  static void bitset(IO &IO, WasmYAML::SegmentFlags &Value);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::SectionType> {
+  static void enumeration(IO &IO, WasmYAML::SectionType &Type);
+};
+
+template <> struct MappingTraits<WasmYAML::Signature> {
+  static void mapping(IO &IO, WasmYAML::Signature &Signature);
+};
+
+template <> struct MappingTraits<WasmYAML::Table> {
+  static void mapping(IO &IO, WasmYAML::Table &Table);
+};
+
+template <> struct MappingTraits<WasmYAML::Limits> {
+  static void mapping(IO &IO, WasmYAML::Limits &Limits);
+};
+
+template <> struct MappingTraits<WasmYAML::Function> {
+  static void mapping(IO &IO, WasmYAML::Function &Function);
+};
+
+template <> struct MappingTraits<WasmYAML::Relocation> {
+  static void mapping(IO &IO, WasmYAML::Relocation &Relocation);
+};
+
+template <> struct MappingTraits<WasmYAML::NameEntry> {
+  static void mapping(IO &IO, WasmYAML::NameEntry &NameEntry);
+};
+
+template <> struct MappingTraits<WasmYAML::SegmentInfo> {
+  static void mapping(IO &IO, WasmYAML::SegmentInfo &SegmentInfo);
+};
+
+template <> struct MappingTraits<WasmYAML::LocalDecl> {
+  static void mapping(IO &IO, WasmYAML::LocalDecl &LocalDecl);
+};
+
+template <> struct MappingTraits<wasm::WasmInitExpr> {
+  static void mapping(IO &IO, wasm::WasmInitExpr &Expr);
+};
+
+template <> struct MappingTraits<WasmYAML::DataSegment> {
+  static void mapping(IO &IO, WasmYAML::DataSegment &Segment);
+};
+
+template <> struct MappingTraits<WasmYAML::ElemSegment> {
+  static void mapping(IO &IO, WasmYAML::ElemSegment &Segment);
+};
+
+template <> struct MappingTraits<WasmYAML::SymbolInfo> {
+  static void mapping(IO &IO, WasmYAML::SymbolInfo &Info);
+};
+
+template <> struct MappingTraits<WasmYAML::InitFunction> {
+  static void mapping(IO &IO, WasmYAML::InitFunction &Init);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::ComdatKind> {
+  static void enumeration(IO &IO, WasmYAML::ComdatKind &Kind);
+};
+
+template <> struct MappingTraits<WasmYAML::ComdatEntry> {
+  static void mapping(IO &IO, WasmYAML::ComdatEntry &ComdatEntry);
+};
+
+template <> struct MappingTraits<WasmYAML::Comdat> {
+  static void mapping(IO &IO, WasmYAML::Comdat &Comdat);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::ValueType> {
+  static void enumeration(IO &IO, WasmYAML::ValueType &Type);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::ExportKind> {
+  static void enumeration(IO &IO, WasmYAML::ExportKind &Kind);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::TableType> {
+  static void enumeration(IO &IO, WasmYAML::TableType &Type);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::Opcode> {
+  static void enumeration(IO &IO, WasmYAML::Opcode &Opcode);
+};
+
+template <> struct ScalarEnumerationTraits<WasmYAML::RelocType> {
+  static void enumeration(IO &IO, WasmYAML::RelocType &Kind);
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_WASMYAML_H
diff --git a/linux-x64/clang/include/llvm/ObjectYAML/YAML.h b/linux-x64/clang/include/llvm/ObjectYAML/YAML.h
new file mode 100644
index 0000000..93266dd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ObjectYAML/YAML.h
@@ -0,0 +1,117 @@
+//===- YAML.h ---------------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTYAML_YAML_H
+#define LLVM_OBJECTYAML_YAML_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace yaml {
+
+/// \brief Specialized YAMLIO scalar type for representing a binary blob.
+///
+/// A typical use case would be to represent the content of a section in a
+/// binary file.
+/// This class has custom YAMLIO traits for convenient reading and writing.
+/// It renders as a string of hex digits in a YAML file.
+/// For example, it might render as `DEADBEEFCAFEBABE` (YAML does not
+/// require the quotation marks, so for simplicity when outputting they are
+/// omitted).
+/// When reading, any string whose content is an even number of hex digits
+/// will be accepted.
+/// For example, all of the following are acceptable:
+/// `DEADBEEF`, `"DeADbEeF"`, `"\x44EADBEEF"` (Note: '\x44' == 'D')
+///
+/// A significant advantage of using this class is that it never allocates
+/// temporary strings or buffers for any of its functionality.
+///
+/// Example:
+///
+/// The YAML mapping:
+/// \code
+/// Foo: DEADBEEFCAFEBABE
+/// \endcode
+///
+/// Could be modeled in YAMLIO by the struct:
+/// \code
+/// struct FooHolder {
+///   BinaryRef Foo;
+/// };
+/// namespace llvm {
+/// namespace yaml {
+/// template <>
+/// struct MappingTraits<FooHolder> {
+///   static void mapping(IO &IO, FooHolder &FH) {
+///     IO.mapRequired("Foo", FH.Foo);
+///   }
+/// };
+/// } // end namespace yaml
+/// } // end namespace llvm
+/// \endcode
+class BinaryRef {
+  friend bool operator==(const BinaryRef &LHS, const BinaryRef &RHS);
+
+  /// \brief Either raw binary data, or a string of hex bytes (must always
+  /// be an even number of characters).
+  ArrayRef<uint8_t> Data;
+
+  /// \brief Discriminator between the two states of the `Data` member.
+  bool DataIsHexString = true;
+
+public:
+  BinaryRef() = default;
+  BinaryRef(ArrayRef<uint8_t> Data) : Data(Data), DataIsHexString(false) {}
+  BinaryRef(StringRef Data)
+      : Data(reinterpret_cast<const uint8_t *>(Data.data()), Data.size()) {}
+
+  /// \brief The number of bytes that are represented by this BinaryRef.
+  /// This is the number of bytes that writeAsBinary() will write.
+  ArrayRef<uint8_t>::size_type binary_size() const {
+    if (DataIsHexString)
+      return Data.size() / 2;
+    return Data.size();
+  }
+
+  /// \brief Write the contents (regardless of whether it is binary or a
+  /// hex string) as binary to the given raw_ostream.
+  void writeAsBinary(raw_ostream &OS) const;
+
+  /// \brief Write the contents (regardless of whether it is binary or a
+  /// hex string) as hex to the given raw_ostream.
+  ///
+  /// For example, a possible output could be `DEADBEEFCAFEBABE`.
+  void writeAsHex(raw_ostream &OS) const;
+};
+
+inline bool operator==(const BinaryRef &LHS, const BinaryRef &RHS) {
+  // Special case for default constructed BinaryRef.
+  if (LHS.Data.empty() && RHS.Data.empty())
+    return true;
+
+  return LHS.DataIsHexString == RHS.DataIsHexString && LHS.Data == RHS.Data;
+}
+
+template <> struct ScalarTraits<BinaryRef> {
+  static void output(const BinaryRef &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, BinaryRef &);
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+} // end namespace yaml
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECTYAML_YAML_H
diff --git a/linux-x64/clang/include/llvm/Option/Arg.h b/linux-x64/clang/include/llvm/Option/Arg.h
new file mode 100644
index 0000000..c519a4a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Option/Arg.h
@@ -0,0 +1,132 @@
+//===- Arg.h - Parsed Argument Classes --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines the llvm::Arg class for parsed arguments.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_ARG_H
+#define LLVM_OPTION_ARG_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace opt {
+
+class ArgList;
+
+/// \brief A concrete instance of a particular driver option.
+///
+/// The Arg class encodes just enough information to be able to
+/// derive the argument values efficiently.
+class Arg {
+private:
+  /// \brief The option this argument is an instance of.
+  const Option Opt;
+
+  /// \brief The argument this argument was derived from (during tool chain
+  /// argument translation), if any.
+  const Arg *BaseArg;
+
+  /// \brief How this instance of the option was spelled.
+  StringRef Spelling;
+
+  /// \brief The index at which this argument appears in the containing
+  /// ArgList.
+  unsigned Index;
+
+  /// \brief Was this argument used to effect compilation?
+  ///
+  /// This is used for generating "argument unused" diagnostics.
+  mutable unsigned Claimed : 1;
+
+  /// \brief Does this argument own its values?
+  mutable unsigned OwnsValues : 1;
+
+  /// \brief The argument values, as C strings.
+  SmallVector<const char *, 2> Values;
+
+public:
+  Arg(const Option Opt, StringRef Spelling, unsigned Index,
+      const Arg *BaseArg = nullptr);
+  Arg(const Option Opt, StringRef Spelling, unsigned Index,
+      const char *Value0, const Arg *BaseArg = nullptr);
+  Arg(const Option Opt, StringRef Spelling, unsigned Index,
+      const char *Value0, const char *Value1, const Arg *BaseArg = nullptr);
+  Arg(const Arg &) = delete;
+  Arg &operator=(const Arg &) = delete;
+  ~Arg();
+
+  const Option &getOption() const { return Opt; }
+  StringRef getSpelling() const { return Spelling; }
+  unsigned getIndex() const { return Index; }
+
+  /// \brief Return the base argument which generated this arg.
+  ///
+  /// This is either the argument itself or the argument it was
+  /// derived from during tool chain specific argument translation.
+  const Arg &getBaseArg() const {
+    return BaseArg ? *BaseArg : *this;
+  }
+  void setBaseArg(const Arg *BaseArg) { this->BaseArg = BaseArg; }
+
+  bool getOwnsValues() const { return OwnsValues; }
+  void setOwnsValues(bool Value) const { OwnsValues = Value; }
+
+  bool isClaimed() const { return getBaseArg().Claimed; }
+
+  /// \brief Set the Arg claimed bit.
+  void claim() const { getBaseArg().Claimed = true; }
+
+  unsigned getNumValues() const { return Values.size(); }
+
+  const char *getValue(unsigned N = 0) const {
+    return Values[N];
+  }
+
+  SmallVectorImpl<const char *> &getValues() { return Values; }
+  const SmallVectorImpl<const char *> &getValues() const { return Values; }
+
+  bool containsValue(StringRef Value) const {
+    for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+      if (Values[i] == Value)
+        return true;
+    return false;
+  }
+
+  /// \brief Append the argument onto the given array as strings.
+  void render(const ArgList &Args, ArgStringList &Output) const;
+
+  /// \brief Append the argument, render as an input, onto the given
+  /// array as strings.
+  ///
+  /// The distinction is that some options only render their values
+  /// when rendered as a input (e.g., Xlinker).
+  void renderAsInput(const ArgList &Args, ArgStringList &Output) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+
+  /// \brief Return a formatted version of the argument and
+  /// its values, for debugging and diagnostics.
+  std::string getAsString(const ArgList &Args) const;
+};
+
+} // end namespace opt
+
+} // end namespace llvm
+
+#endif // LLVM_OPTION_ARG_H
diff --git a/linux-x64/clang/include/llvm/Option/ArgList.h b/linux-x64/clang/include/llvm/Option/ArgList.h
new file mode 100644
index 0000000..a80921f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Option/ArgList.h
@@ -0,0 +1,523 @@
+//===- ArgList.h - Argument List Management ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_ARGLIST_H
+#define LLVM_OPTION_ARGLIST_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Option/Option.h"
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace opt {
+
+/// arg_iterator - Iterates through arguments stored inside an ArgList.
+template<typename BaseIter, unsigned NumOptSpecifiers = 0>
+class arg_iterator {
+  /// The current argument and the end of the sequence we're iterating.
+  BaseIter Current, End;
+
+  /// Optional filters on the arguments which will be match. To avoid a
+  /// zero-sized array, we store one specifier even if we're asked for none.
+  OptSpecifier Ids[NumOptSpecifiers ? NumOptSpecifiers : 1];
+
+  void SkipToNextArg() {
+    for (; Current != End; ++Current) {
+      // Skip erased elements.
+      if (!*Current)
+        continue;
+
+      // Done if there are no filters.
+      if (!NumOptSpecifiers)
+        return;
+
+      // Otherwise require a match.
+      const Option &O = (*Current)->getOption();
+      for (auto Id : Ids) {
+        if (!Id.isValid())
+          break;
+        if (O.matches(Id))
+          return;
+      }
+    }
+  }
+
+  using Traits = std::iterator_traits<BaseIter>;
+
+public:
+  using value_type = typename Traits::value_type;
+  using reference = typename Traits::reference;
+  using pointer = typename Traits::pointer;
+  using iterator_category = std::forward_iterator_tag;
+  using difference_type = std::ptrdiff_t;
+
+  arg_iterator(
+      BaseIter Current, BaseIter End,
+      const OptSpecifier (&Ids)[NumOptSpecifiers ? NumOptSpecifiers : 1] = {})
+      : Current(Current), End(End) {
+    for (unsigned I = 0; I != NumOptSpecifiers; ++I)
+      this->Ids[I] = Ids[I];
+    SkipToNextArg();
+  }
+
+  // FIXME: This conversion function makes no sense.
+  operator const Arg*() { return *Current; }
+
+  reference operator*() const { return *Current; }
+  pointer operator->() const { return Current; }
+
+  arg_iterator &operator++() {
+    ++Current;
+    SkipToNextArg();
+    return *this;
+  }
+
+  arg_iterator operator++(int) {
+    arg_iterator tmp(*this);
+    ++(*this);
+    return tmp;
+  }
+
+  friend bool operator==(arg_iterator LHS, arg_iterator RHS) {
+    return LHS.Current == RHS.Current;
+  }
+  friend bool operator!=(arg_iterator LHS, arg_iterator RHS) {
+    return !(LHS == RHS);
+  }
+};
+
+/// ArgList - Ordered collection of driver arguments.
+///
+/// The ArgList class manages a list of Arg instances as well as
+/// auxiliary data and convenience methods to allow Tools to quickly
+/// check for the presence of Arg instances for a particular Option
+/// and to iterate over groups of arguments.
+class ArgList {
+public:
+  using arglist_type = SmallVector<Arg *, 16>;
+  using iterator = arg_iterator<arglist_type::iterator>;
+  using const_iterator = arg_iterator<arglist_type::const_iterator>;
+  using reverse_iterator = arg_iterator<arglist_type::reverse_iterator>;
+  using const_reverse_iterator =
+      arg_iterator<arglist_type::const_reverse_iterator>;
+
+  template<unsigned N> using filtered_iterator =
+      arg_iterator<arglist_type::const_iterator, N>;
+  template<unsigned N> using filtered_reverse_iterator =
+      arg_iterator<arglist_type::const_reverse_iterator, N>;
+
+private:
+  /// The internal list of arguments.
+  arglist_type Args;
+
+  using OptRange = std::pair<unsigned, unsigned>;
+  static OptRange emptyRange() { return {-1u, 0u}; }
+
+  /// The first and last index of each different OptSpecifier ID.
+  DenseMap<unsigned, OptRange> OptRanges;
+
+  /// Get the range of indexes in which options with the specified IDs might
+  /// reside, or (0, 0) if there are no such options.
+  OptRange getRange(std::initializer_list<OptSpecifier> Ids) const;
+
+protected:
+  // Make the default special members protected so they won't be used to slice
+  // derived objects, but can still be used by derived objects to implement
+  // their own special members.
+  ArgList() = default;
+
+  // Explicit move operations to ensure the container is cleared post-move
+  // otherwise it could lead to a double-delete in the case of moving of an
+  // InputArgList which deletes the contents of the container. If we could fix
+  // up the ownership here (delegate storage/ownership to the derived class so
+  // it can be a container of unique_ptr) this would be simpler.
+  ArgList(ArgList &&RHS)
+      : Args(std::move(RHS.Args)), OptRanges(std::move(RHS.OptRanges)) {
+    RHS.Args.clear();
+    RHS.OptRanges.clear();
+  }
+
+  ArgList &operator=(ArgList &&RHS) {
+    Args = std::move(RHS.Args);
+    RHS.Args.clear();
+    OptRanges = std::move(RHS.OptRanges);
+    RHS.OptRanges.clear();
+    return *this;
+  }
+
+  // Protect the dtor to ensure this type is never destroyed polymorphically.
+  ~ArgList() = default;
+
+  // Implicitly convert a value to an OptSpecifier. Used to work around a bug
+  // in MSVC's implementation of narrowing conversion checking.
+  static OptSpecifier toOptSpecifier(OptSpecifier S) { return S; }
+
+public:
+  /// @name Arg Access
+  /// @{
+
+  /// append - Append \p A to the arg list.
+  void append(Arg *A);
+
+  const arglist_type &getArgs() const { return Args; }
+
+  unsigned size() const { return Args.size(); }
+
+  /// @}
+  /// @name Arg Iteration
+  /// @{
+
+  iterator begin() { return {Args.begin(), Args.end()}; }
+  iterator end() { return {Args.end(), Args.end()}; }
+
+  reverse_iterator rbegin() { return {Args.rbegin(), Args.rend()}; }
+  reverse_iterator rend() { return {Args.rend(), Args.rend()}; }
+
+  const_iterator begin() const { return {Args.begin(), Args.end()}; }
+  const_iterator end() const { return {Args.end(), Args.end()}; }
+
+  const_reverse_iterator rbegin() const { return {Args.rbegin(), Args.rend()}; }
+  const_reverse_iterator rend() const { return {Args.rend(), Args.rend()}; }
+
+  template<typename ...OptSpecifiers>
+  iterator_range<filtered_iterator<sizeof...(OptSpecifiers)>>
+  filtered(OptSpecifiers ...Ids) const {
+    OptRange Range = getRange({toOptSpecifier(Ids)...});
+    auto B = Args.begin() + Range.first;
+    auto E = Args.begin() + Range.second;
+    using Iterator = filtered_iterator<sizeof...(OptSpecifiers)>;
+    return make_range(Iterator(B, E, {toOptSpecifier(Ids)...}),
+                      Iterator(E, E, {toOptSpecifier(Ids)...}));
+  }
+
+  template<typename ...OptSpecifiers>
+  iterator_range<filtered_reverse_iterator<sizeof...(OptSpecifiers)>>
+  filtered_reverse(OptSpecifiers ...Ids) const {
+    OptRange Range = getRange({toOptSpecifier(Ids)...});
+    auto B = Args.rend() - Range.second;
+    auto E = Args.rend() - Range.first;
+    using Iterator = filtered_reverse_iterator<sizeof...(OptSpecifiers)>;
+    return make_range(Iterator(B, E, {toOptSpecifier(Ids)...}),
+                      Iterator(E, E, {toOptSpecifier(Ids)...}));
+  }
+
+  /// @}
+  /// @name Arg Removal
+  /// @{
+
+  /// eraseArg - Remove any option matching \p Id.
+  void eraseArg(OptSpecifier Id);
+
+  /// @}
+  /// @name Arg Access
+  /// @{
+
+  /// hasArg - Does the arg list contain any option matching \p Id.
+  ///
+  /// \p Claim Whether the argument should be claimed, if it exists.
+  template<typename ...OptSpecifiers>
+  bool hasArgNoClaim(OptSpecifiers ...Ids) const {
+    return getLastArgNoClaim(Ids...) != nullptr;
+  }
+  template<typename ...OptSpecifiers>
+  bool hasArg(OptSpecifiers ...Ids) const {
+    return getLastArg(Ids...) != nullptr;
+  }
+
+  /// Return the last argument matching \p Id, or null.
+  template<typename ...OptSpecifiers>
+  Arg *getLastArg(OptSpecifiers ...Ids) const {
+    Arg *Res = nullptr;
+    for (Arg *A : filtered(Ids...)) {
+      Res = A;
+      Res->claim();
+    }
+    return Res;
+  }
+
+  /// Return the last argument matching \p Id, or null. Do not "claim" the
+  /// option (don't mark it as having been used).
+  template<typename ...OptSpecifiers>
+  Arg *getLastArgNoClaim(OptSpecifiers ...Ids) const {
+    for (Arg *A : filtered_reverse(Ids...))
+      return A;
+    return nullptr;
+  }
+
+  /// getArgString - Return the input argument string at \p Index.
+  virtual const char *getArgString(unsigned Index) const = 0;
+
+  /// getNumInputArgStrings - Return the number of original argument strings,
+  /// which are guaranteed to be the first strings in the argument string
+  /// list.
+  virtual unsigned getNumInputArgStrings() const = 0;
+
+  /// @}
+  /// @name Argument Lookup Utilities
+  /// @{
+
+  /// getLastArgValue - Return the value of the last argument, or a default.
+  StringRef getLastArgValue(OptSpecifier Id, StringRef Default = "") const;
+
+  /// getAllArgValues - Get the values of all instances of the given argument
+  /// as strings.
+  std::vector<std::string> getAllArgValues(OptSpecifier Id) const;
+
+  /// @}
+  /// @name Translation Utilities
+  /// @{
+
+  /// hasFlag - Given an option \p Pos and its negative form \p Neg, return
+  /// true if the option is present, false if the negation is present, and
+  /// \p Default if neither option is given. If both the option and its
+  /// negation are present, the last one wins.
+  bool hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default=true) const;
+
+  /// hasFlag - Given an option \p Pos, an alias \p PosAlias and its negative
+  /// form \p Neg, return true if the option or its alias is present, false if
+  /// the negation is present, and \p Default if none of the options are
+  /// given. If multiple options are present, the last one wins.
+  bool hasFlag(OptSpecifier Pos, OptSpecifier PosAlias, OptSpecifier Neg,
+               bool Default = true) const;
+
+  /// AddLastArg - Render only the last argument match \p Id0, if present.
+  void AddLastArg(ArgStringList &Output, OptSpecifier Id0) const;
+  void AddLastArg(ArgStringList &Output, OptSpecifier Id0,
+                  OptSpecifier Id1) const;
+
+  /// AddAllArgsExcept - Render all arguments matching any of the given ids
+  /// and not matching any of the excluded ids.
+  void AddAllArgsExcept(ArgStringList &Output, ArrayRef<OptSpecifier> Ids,
+                        ArrayRef<OptSpecifier> ExcludeIds) const;
+  /// AddAllArgs - Render all arguments matching any of the given ids.
+  void AddAllArgs(ArgStringList &Output, ArrayRef<OptSpecifier> Ids) const;
+
+  /// AddAllArgs - Render all arguments matching the given ids.
+  void AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
+                  OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;
+
+  /// AddAllArgValues - Render the argument values of all arguments
+  /// matching the given ids.
+  void AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
+                       OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;
+
+  /// AddAllArgsTranslated - Render all the arguments matching the
+  /// given ids, but forced to separate args and using the provided
+  /// name instead of the first option value.
+  ///
+  /// \param Joined - If true, render the argument as joined with
+  /// the option specifier.
+  void AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
+                            const char *Translation,
+                            bool Joined = false) const;
+
+  /// ClaimAllArgs - Claim all arguments which match the given
+  /// option id.
+  void ClaimAllArgs(OptSpecifier Id0) const;
+
+  /// ClaimAllArgs - Claim all arguments.
+  ///
+  void ClaimAllArgs() const;
+
+  /// @}
+  /// @name Arg Synthesis
+  /// @{
+
+  /// Construct a constant string pointer whose
+  /// lifetime will match that of the ArgList.
+  virtual const char *MakeArgStringRef(StringRef Str) const = 0;
+  const char *MakeArgString(const Twine &Str) const {
+    SmallString<256> Buf;
+    return MakeArgStringRef(Str.toStringRef(Buf));
+  }
+
+  /// \brief Create an arg string for (\p LHS + \p RHS), reusing the
+  /// string at \p Index if possible.
+  const char *GetOrMakeJoinedArgString(unsigned Index, StringRef LHS,
+                                        StringRef RHS) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+
+  /// @}
+};
+
+class InputArgList final : public ArgList {
+private:
+  /// List of argument strings used by the contained Args.
+  ///
+  /// This is mutable since we treat the ArgList as being the list
+  /// of Args, and allow routines to add new strings (to have a
+  /// convenient place to store the memory) via MakeIndex.
+  mutable ArgStringList ArgStrings;
+
+  /// Strings for synthesized arguments.
+  ///
+  /// This is mutable since we treat the ArgList as being the list
+  /// of Args, and allow routines to add new strings (to have a
+  /// convenient place to store the memory) via MakeIndex.
+  mutable std::list<std::string> SynthesizedStrings;
+
+  /// The number of original input argument strings.
+  unsigned NumInputArgStrings;
+
+  /// Release allocated arguments.
+  void releaseMemory();
+
+public:
+  InputArgList() : NumInputArgStrings(0) {}
+
+  InputArgList(const char* const *ArgBegin, const char* const *ArgEnd);
+
+  InputArgList(InputArgList &&RHS)
+      : ArgList(std::move(RHS)), ArgStrings(std::move(RHS.ArgStrings)),
+        SynthesizedStrings(std::move(RHS.SynthesizedStrings)),
+        NumInputArgStrings(RHS.NumInputArgStrings) {}
+
+  InputArgList &operator=(InputArgList &&RHS) {
+    releaseMemory();
+    ArgList::operator=(std::move(RHS));
+    ArgStrings = std::move(RHS.ArgStrings);
+    SynthesizedStrings = std::move(RHS.SynthesizedStrings);
+    NumInputArgStrings = RHS.NumInputArgStrings;
+    return *this;
+  }
+
+  ~InputArgList() { releaseMemory(); }
+
+  const char *getArgString(unsigned Index) const override {
+    return ArgStrings[Index];
+  }
+
+  unsigned getNumInputArgStrings() const override {
+    return NumInputArgStrings;
+  }
+
+  /// @name Arg Synthesis
+  /// @{
+
+public:
+  /// MakeIndex - Get an index for the given string(s).
+  unsigned MakeIndex(StringRef String0) const;
+  unsigned MakeIndex(StringRef String0, StringRef String1) const;
+
+  using ArgList::MakeArgString;
+  const char *MakeArgStringRef(StringRef Str) const override;
+
+  /// @}
+};
+
+/// DerivedArgList - An ordered collection of driver arguments,
+/// whose storage may be in another argument list.
+class DerivedArgList final : public ArgList {
+  const InputArgList &BaseArgs;
+
+  /// The list of arguments we synthesized.
+  mutable SmallVector<std::unique_ptr<Arg>, 16> SynthesizedArgs;
+
+public:
+  /// Construct a new derived arg list from \p BaseArgs.
+  DerivedArgList(const InputArgList &BaseArgs);
+
+  const char *getArgString(unsigned Index) const override {
+    return BaseArgs.getArgString(Index);
+  }
+
+  unsigned getNumInputArgStrings() const override {
+    return BaseArgs.getNumInputArgStrings();
+  }
+
+  const InputArgList &getBaseArgs() const {
+    return BaseArgs;
+  }
+
+  /// @name Arg Synthesis
+  /// @{
+
+  /// AddSynthesizedArg - Add a argument to the list of synthesized arguments
+  /// (to be freed).
+  void AddSynthesizedArg(Arg *A);
+
+  using ArgList::MakeArgString;
+  const char *MakeArgStringRef(StringRef Str) const override;
+
+  /// AddFlagArg - Construct a new FlagArg for the given option \p Id and
+  /// append it to the argument list.
+  void AddFlagArg(const Arg *BaseArg, const Option Opt) {
+    append(MakeFlagArg(BaseArg, Opt));
+  }
+
+  /// AddPositionalArg - Construct a new Positional arg for the given option
+  /// \p Id, with the provided \p Value and append it to the argument
+  /// list.
+  void AddPositionalArg(const Arg *BaseArg, const Option Opt,
+                        StringRef Value) {
+    append(MakePositionalArg(BaseArg, Opt, Value));
+  }
+
+  /// AddSeparateArg - Construct a new Positional arg for the given option
+  /// \p Id, with the provided \p Value and append it to the argument
+  /// list.
+  void AddSeparateArg(const Arg *BaseArg, const Option Opt,
+                      StringRef Value) {
+    append(MakeSeparateArg(BaseArg, Opt, Value));
+  }
+
+  /// AddJoinedArg - Construct a new Positional arg for the given option
+  /// \p Id, with the provided \p Value and append it to the argument list.
+  void AddJoinedArg(const Arg *BaseArg, const Option Opt,
+                    StringRef Value) {
+    append(MakeJoinedArg(BaseArg, Opt, Value));
+  }
+
+  /// MakeFlagArg - Construct a new FlagArg for the given option \p Id.
+  Arg *MakeFlagArg(const Arg *BaseArg, const Option Opt) const;
+
+  /// MakePositionalArg - Construct a new Positional arg for the
+  /// given option \p Id, with the provided \p Value.
+  Arg *MakePositionalArg(const Arg *BaseArg, const Option Opt,
+                          StringRef Value) const;
+
+  /// MakeSeparateArg - Construct a new Positional arg for the
+  /// given option \p Id, with the provided \p Value.
+  Arg *MakeSeparateArg(const Arg *BaseArg, const Option Opt,
+                        StringRef Value) const;
+
+  /// MakeJoinedArg - Construct a new Positional arg for the
+  /// given option \p Id, with the provided \p Value.
+  Arg *MakeJoinedArg(const Arg *BaseArg, const Option Opt,
+                      StringRef Value) const;
+
+  /// @}
+};
+
+} // end namespace opt
+
+} // end namespace llvm
+
+#endif // LLVM_OPTION_ARGLIST_H
diff --git a/linux-x64/clang/include/llvm/Option/OptParser.td b/linux-x64/clang/include/llvm/Option/OptParser.td
new file mode 100644
index 0000000..9c37374
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Option/OptParser.td
@@ -0,0 +1,139 @@
+//===--- OptParser.td - Common Option Parsing Interfaces ------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the common interfaces used by the option parsing TableGen
+//  backend.
+//
+//===----------------------------------------------------------------------===//
+
+// Define the kinds of options.
+
+class OptionKind<string name, int precedence = 0, bit sentinel = 0> {
+  string Name = name;
+  // The kind precedence, kinds with lower precedence are matched first.
+  int Precedence = precedence;
+  // Indicate a sentinel option.
+  bit Sentinel = sentinel;
+}
+
+// An option group.
+def KIND_GROUP : OptionKind<"Group">;
+// The input option kind.
+def KIND_INPUT : OptionKind<"Input", 1, 1>;
+// The unknown option kind.
+def KIND_UNKNOWN : OptionKind<"Unknown", 2, 1>;
+// A flag with no values.
+def KIND_FLAG : OptionKind<"Flag">;
+// An option which prefixes its (single) value.
+def KIND_JOINED : OptionKind<"Joined", 1>;
+// An option which is followed by its value.
+def KIND_SEPARATE : OptionKind<"Separate">;
+// An option followed by its values, which are separated by commas.
+def KIND_COMMAJOINED : OptionKind<"CommaJoined">;
+// An option which is which takes multiple (separate) arguments.
+def KIND_MULTIARG : OptionKind<"MultiArg">;
+// An option which is either joined to its (non-empty) value, or followed by its
+// value.
+def KIND_JOINED_OR_SEPARATE : OptionKind<"JoinedOrSeparate">;
+// An option which is both joined to its (first) value, and followed by its
+// (second) value.
+def KIND_JOINED_AND_SEPARATE : OptionKind<"JoinedAndSeparate">;
+// An option which consumes all remaining arguments if there are any.
+def KIND_REMAINING_ARGS : OptionKind<"RemainingArgs">;
+// An option which consumes an optional joined argument and any other remaining
+// arguments.
+def KIND_REMAINING_ARGS_JOINED : OptionKind<"RemainingArgsJoined">;
+
+// Define the option flags.
+
+class OptionFlag {}
+
+// HelpHidden - The option should not be displayed in --help, even if it has
+// help text. Clients *can* use this in conjunction with the OptTable::PrintHelp
+// arguments to implement hidden help groups.
+def HelpHidden : OptionFlag;
+
+// RenderAsInput - The option should not render the name when rendered as an
+// input (i.e., the option is rendered as values).
+def RenderAsInput : OptionFlag;
+
+// RenderJoined - The option should be rendered joined, even if separate (only
+// sensible on single value separate options).
+def RenderJoined : OptionFlag;
+
+// RenderSeparate - The option should be rendered separately, even if joined
+// (only sensible on joined options).
+def RenderSeparate : OptionFlag;
+
+// Define the option group class.
+
+class OptionGroup<string name> {
+  string EnumName = ?; // Uses the def name if undefined.
+  string Name = name;
+  string HelpText = ?;
+  OptionGroup Group = ?;
+  list<OptionFlag> Flags = [];
+}
+
+// Define the option class.
+
+class Option<list<string> prefixes, string name, OptionKind kind> {
+  string EnumName = ?; // Uses the def name if undefined.
+  list<string> Prefixes = prefixes;
+  string Name = name;
+  OptionKind Kind = kind;
+  // Used by MultiArg option kind.
+  int NumArgs = 0;
+  string HelpText = ?;
+  string MetaVarName = ?;
+  string Values = ?;
+  code ValuesCode = ?;
+  list<OptionFlag> Flags = [];
+  OptionGroup Group = ?;
+  Option Alias = ?;
+  list<string> AliasArgs = [];
+}
+
+// Helpers for defining options.
+
+class Flag<list<string> prefixes, string name>
+  : Option<prefixes, name, KIND_FLAG>;
+class Joined<list<string> prefixes, string name>
+  : Option<prefixes, name, KIND_JOINED>;
+class Separate<list<string> prefixes, string name>
+  : Option<prefixes, name, KIND_SEPARATE>;
+class CommaJoined<list<string> prefixes, string name>
+  : Option<prefixes, name, KIND_COMMAJOINED>;
+class MultiArg<list<string> prefixes, string name, int numargs>
+  : Option<prefixes, name, KIND_MULTIARG> {
+  int NumArgs = numargs;
+}
+class JoinedOrSeparate<list<string> prefixes, string name>
+  : Option<prefixes, name, KIND_JOINED_OR_SEPARATE>;
+class JoinedAndSeparate<list<string> prefixes, string name>
+  : Option<prefixes, name, KIND_JOINED_AND_SEPARATE>;
+
+// Mix-ins for adding optional attributes.
+
+class Alias<Option alias> { Option Alias = alias; }
+class AliasArgs<list<string> aliasargs> { list<string> AliasArgs = aliasargs; }
+class EnumName<string name> { string EnumName = name; }
+class Flags<list<OptionFlag> flags> { list<OptionFlag> Flags = flags; }
+class Group<OptionGroup group> { OptionGroup Group = group; }
+class HelpText<string text> { string HelpText = text; }
+class MetaVarName<string name> { string MetaVarName = name; }
+class Values<string value> { string Values = value; }
+class ValuesCode<code valuecode> { code ValuesCode = valuecode; }
+
+// Predefined options.
+
+// FIXME: Have generator validate that these appear in correct position (and
+// aren't duplicated).
+def INPUT : Option<[], "<input>", KIND_INPUT>;
+def UNKNOWN : Option<[], "<unknown>", KIND_UNKNOWN>;
diff --git a/linux-x64/clang/include/llvm/Option/OptSpecifier.h b/linux-x64/clang/include/llvm/Option/OptSpecifier.h
new file mode 100644
index 0000000..84c3cf8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Option/OptSpecifier.h
@@ -0,0 +1,39 @@
+//===- OptSpecifier.h - Option Specifiers -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_OPTSPECIFIER_H
+#define LLVM_OPTION_OPTSPECIFIER_H
+
+namespace llvm {
+namespace opt {
+
+class Option;
+
+/// OptSpecifier - Wrapper class for abstracting references to option IDs.
+class OptSpecifier {
+  unsigned ID = 0;
+
+public:
+  OptSpecifier() = default;
+  explicit OptSpecifier(bool) = delete;
+  /*implicit*/ OptSpecifier(unsigned ID) : ID(ID) {}
+  /*implicit*/ OptSpecifier(const Option *Opt);
+
+  bool isValid() const { return ID != 0; }
+
+  unsigned getID() const { return ID; }
+
+  bool operator==(OptSpecifier Opt) const { return ID == Opt.getID(); }
+  bool operator!=(OptSpecifier Opt) const { return !(*this == Opt); }
+};
+
+} // end namespace opt
+} // end namespace llvm
+
+#endif // LLVM_OPTION_OPTSPECIFIER_H
diff --git a/linux-x64/clang/include/llvm/Option/OptTable.h b/linux-x64/clang/include/llvm/Option/OptTable.h
new file mode 100644
index 0000000..20b9bba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Option/OptTable.h
@@ -0,0 +1,241 @@
+//===- OptTable.h - Option Table --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_OPTTABLE_H
+#define LLVM_OPTION_OPTTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Option/OptSpecifier.h"
+#include <cassert>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace opt {
+
+class Arg;
+class ArgList;
+class InputArgList;
+class Option;
+
+/// \brief Provide access to the Option info table.
+///
+/// The OptTable class provides a layer of indirection which allows Option
+/// instance to be created lazily. In the common case, only a few options will
+/// be needed at runtime; the OptTable class maintains enough information to
+/// parse command lines without instantiating Options, while letting other
+/// parts of the driver still use Option instances where convenient.
+class OptTable {
+public:
+  /// \brief Entry for a single option instance in the option data table.
+  struct Info {
+    /// A null terminated array of prefix strings to apply to name while
+    /// matching.
+    const char *const *Prefixes;
+    const char *Name;
+    const char *HelpText;
+    const char *MetaVar;
+    unsigned ID;
+    unsigned char Kind;
+    unsigned char Param;
+    unsigned short Flags;
+    unsigned short GroupID;
+    unsigned short AliasID;
+    const char *AliasArgs;
+    const char *Values;
+  };
+
+private:
+  /// \brief The option information table.
+  std::vector<Info> OptionInfos;
+  bool IgnoreCase;
+
+  unsigned TheInputOptionID = 0;
+  unsigned TheUnknownOptionID = 0;
+
+  /// The index of the first option which can be parsed (i.e., is not a
+  /// special option like 'input' or 'unknown', and is not an option group).
+  unsigned FirstSearchableIndex = 0;
+
+  /// The union of all option prefixes. If an argument does not begin with
+  /// one of these, it is an input.
+  StringSet<> PrefixesUnion;
+  std::string PrefixChars;
+
+private:
+  const Info &getInfo(OptSpecifier Opt) const {
+    unsigned id = Opt.getID();
+    assert(id > 0 && id - 1 < getNumOptions() && "Invalid Option ID.");
+    return OptionInfos[id - 1];
+  }
+
+protected:
+  OptTable(ArrayRef<Info> OptionInfos, bool IgnoreCase = false);
+
+public:
+  ~OptTable();
+
+  /// \brief Return the total number of option classes.
+  unsigned getNumOptions() const { return OptionInfos.size(); }
+
+  /// \brief Get the given Opt's Option instance, lazily creating it
+  /// if necessary.
+  ///
+  /// \return The option, or null for the INVALID option id.
+  const Option getOption(OptSpecifier Opt) const;
+
+  /// \brief Lookup the name of the given option.
+  const char *getOptionName(OptSpecifier id) const {
+    return getInfo(id).Name;
+  }
+
+  /// \brief Get the kind of the given option.
+  unsigned getOptionKind(OptSpecifier id) const {
+    return getInfo(id).Kind;
+  }
+
+  /// \brief Get the group id for the given option.
+  unsigned getOptionGroupID(OptSpecifier id) const {
+    return getInfo(id).GroupID;
+  }
+
+  /// \brief Get the help text to use to describe this option.
+  const char *getOptionHelpText(OptSpecifier id) const {
+    return getInfo(id).HelpText;
+  }
+
+  /// \brief Get the meta-variable name to use when describing
+  /// this options values in the help text.
+  const char *getOptionMetaVar(OptSpecifier id) const {
+    return getInfo(id).MetaVar;
+  }
+
+  /// Find possible value for given flags. This is used for shell
+  /// autocompletion.
+  ///
+  /// \param [in] Option - Key flag like "-stdlib=" when "-stdlib=l"
+  /// was passed to clang.
+  ///
+  /// \param [in] Arg - Value which we want to autocomplete like "l"
+  /// when "-stdlib=l" was passed to clang.
+  ///
+  /// \return The vector of possible values.
+  std::vector<std::string> suggestValueCompletions(StringRef Option,
+                                                   StringRef Arg) const;
+
+  /// Find flags from OptTable which starts with Cur.
+  ///
+  /// \param [in] Cur - String prefix that all returned flags need
+  //  to start with.
+  ///
+  /// \return The vector of flags which start with Cur.
+  std::vector<std::string> findByPrefix(StringRef Cur,
+                                        unsigned short DisableFlags) const;
+
+  /// Find the OptTable option that most closely matches the given string.
+  ///
+  /// \param [in] Option - A string, such as "-stdlibs=l", that represents user
+  /// input of an option that may not exist in the OptTable. Note that the
+  /// string includes prefix dashes "-" as well as values "=l".
+  /// \param [out] NearestString - The nearest option string found in the
+  /// OptTable.
+  /// \param [in] FlagsToInclude - Only find options with any of these flags.
+  /// Zero is the default, which includes all flags.
+  /// \param [in] FlagsToExclude - Don't find options with this flag. Zero
+  /// is the default, and means exclude nothing.
+  /// \param [in] MinimumLength - Don't find options shorter than this length.
+  /// For example, a minimum length of 3 prevents "-x" from being considered
+  /// near to "-S".
+  ///
+  /// \return The edit distance of the nearest string found.
+  unsigned findNearest(StringRef Option, std::string &NearestString,
+                       unsigned FlagsToInclude = 0, unsigned FlagsToExclude = 0,
+                       unsigned MinimumLength = 4) const;
+
+  /// Add Values to Option's Values class
+  ///
+  /// \param [in] Option - Prefix + Name of the flag which Values will be
+  ///  changed. For example, "-analyzer-checker".
+  /// \param [in] Values - String of Values seperated by ",", such as
+  ///  "foo, bar..", where foo and bar is the argument which the Option flag
+  ///  takes
+  ///
+  /// \return true in success, and false in fail.
+  bool addValues(const char *Option, const char *Values);
+
+  /// \brief Parse a single argument; returning the new argument and
+  /// updating Index.
+  ///
+  /// \param [in,out] Index - The current parsing position in the argument
+  /// string list; on return this will be the index of the next argument
+  /// string to parse.
+  /// \param [in] FlagsToInclude - Only parse options with any of these flags.
+  /// Zero is the default which includes all flags.
+  /// \param [in] FlagsToExclude - Don't parse options with this flag.  Zero
+  /// is the default and means exclude nothing.
+  ///
+  /// \return The parsed argument, or 0 if the argument is missing values
+  /// (in which case Index still points at the conceptual next argument string
+  /// to parse).
+  Arg *ParseOneArg(const ArgList &Args, unsigned &Index,
+                   unsigned FlagsToInclude = 0,
+                   unsigned FlagsToExclude = 0) const;
+
+  /// \brief Parse an list of arguments into an InputArgList.
+  ///
+  /// The resulting InputArgList will reference the strings in [\p ArgBegin,
+  /// \p ArgEnd), and their lifetime should extend past that of the returned
+  /// InputArgList.
+  ///
+  /// The only error that can occur in this routine is if an argument is
+  /// missing values; in this case \p MissingArgCount will be non-zero.
+  ///
+  /// \param MissingArgIndex - On error, the index of the option which could
+  /// not be parsed.
+  /// \param MissingArgCount - On error, the number of missing options.
+  /// \param FlagsToInclude - Only parse options with any of these flags.
+  /// Zero is the default which includes all flags.
+  /// \param FlagsToExclude - Don't parse options with this flag.  Zero
+  /// is the default and means exclude nothing.
+  /// \return An InputArgList; on error this will contain all the options
+  /// which could be parsed.
+  InputArgList ParseArgs(ArrayRef<const char *> Args, unsigned &MissingArgIndex,
+                         unsigned &MissingArgCount, unsigned FlagsToInclude = 0,
+                         unsigned FlagsToExclude = 0) const;
+
+  /// \brief Render the help text for an option table.
+  ///
+  /// \param OS - The stream to write the help text to.
+  /// \param Name - The name to use in the usage line.
+  /// \param Title - The title to use in the usage line.
+  /// \param FlagsToInclude - If non-zero, only include options with any
+  ///                         of these flags set.
+  /// \param FlagsToExclude - Exclude options with any of these flags set.
+  /// \param ShowAllAliases - If true, display all options including aliases
+  ///                         that don't have help texts. By default, we display
+  ///                         only options that are not hidden and have help
+  ///                         texts.
+  void PrintHelp(raw_ostream &OS, const char *Name, const char *Title,
+                 unsigned FlagsToInclude, unsigned FlagsToExclude,
+                 bool ShowAllAliases) const;
+
+  void PrintHelp(raw_ostream &OS, const char *Name, const char *Title,
+                 bool ShowHidden = false, bool ShowAllAliases = false) const;
+};
+
+} // end namespace opt
+
+} // end namespace llvm
+
+#endif // LLVM_OPTION_OPTTABLE_H
diff --git a/linux-x64/clang/include/llvm/Option/Option.h b/linux-x64/clang/include/llvm/Option/Option.h
new file mode 100644
index 0000000..d9aebd5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Option/Option.h
@@ -0,0 +1,218 @@
+//===- Option.h - Abstract Driver Options -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_OPTION_H
+#define LLVM_OPTION_OPTION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace opt {
+
+class Arg;
+class ArgList;
+
+/// ArgStringList - Type used for constructing argv lists for subprocesses.
+using ArgStringList = SmallVector<const char *, 16>;
+
+/// Base flags for all options. Custom flags may be added after.
+enum DriverFlag {
+  HelpHidden       = (1 << 0),
+  RenderAsInput    = (1 << 1),
+  RenderJoined     = (1 << 2),
+  RenderSeparate   = (1 << 3)
+};
+
+/// Option - Abstract representation for a single form of driver
+/// argument.
+///
+/// An Option class represents a form of option that the driver
+/// takes, for example how many arguments the option has and how
+/// they can be provided. Individual option instances store
+/// additional information about what group the option is a member
+/// of (if any), if the option is an alias, and a number of
+/// flags. At runtime the driver parses the command line into
+/// concrete Arg instances, each of which corresponds to a
+/// particular Option instance.
+class Option {
+public:
+  enum OptionClass {
+    GroupClass = 0,
+    InputClass,
+    UnknownClass,
+    FlagClass,
+    JoinedClass,
+    ValuesClass,
+    SeparateClass,
+    RemainingArgsClass,
+    RemainingArgsJoinedClass,
+    CommaJoinedClass,
+    MultiArgClass,
+    JoinedOrSeparateClass,
+    JoinedAndSeparateClass
+  };
+
+  enum RenderStyleKind {
+    RenderCommaJoinedStyle,
+    RenderJoinedStyle,
+    RenderSeparateStyle,
+    RenderValuesStyle
+  };
+
+protected:
+  const OptTable::Info *Info;
+  const OptTable *Owner;
+
+public:
+  Option(const OptTable::Info *Info, const OptTable *Owner);
+
+  bool isValid() const {
+    return Info != nullptr;
+  }
+
+  unsigned getID() const {
+    assert(Info && "Must have a valid info!");
+    return Info->ID;
+  }
+
+  OptionClass getKind() const {
+    assert(Info && "Must have a valid info!");
+    return OptionClass(Info->Kind);
+  }
+
+  /// \brief Get the name of this option without any prefix.
+  StringRef getName() const {
+    assert(Info && "Must have a valid info!");
+    return Info->Name;
+  }
+
+  const Option getGroup() const {
+    assert(Info && "Must have a valid info!");
+    assert(Owner && "Must have a valid owner!");
+    return Owner->getOption(Info->GroupID);
+  }
+
+  const Option getAlias() const {
+    assert(Info && "Must have a valid info!");
+    assert(Owner && "Must have a valid owner!");
+    return Owner->getOption(Info->AliasID);
+  }
+
+  /// \brief Get the alias arguments as a \0 separated list.
+  /// E.g. ["foo", "bar"] would be returned as "foo\0bar\0".
+  const char *getAliasArgs() const {
+    assert(Info && "Must have a valid info!");
+    assert((!Info->AliasArgs || Info->AliasArgs[0] != 0) &&
+           "AliasArgs should be either 0 or non-empty.");
+
+    return Info->AliasArgs;
+  }
+
+  /// \brief Get the default prefix for this option.
+  StringRef getPrefix() const {
+    const char *Prefix = *Info->Prefixes;
+    return Prefix ? Prefix : StringRef();
+  }
+
+  /// \brief Get the name of this option with the default prefix.
+  std::string getPrefixedName() const {
+    std::string Ret = getPrefix();
+    Ret += getName();
+    return Ret;
+  }
+
+  unsigned getNumArgs() const { return Info->Param; }
+
+  bool hasNoOptAsInput() const { return Info->Flags & RenderAsInput;}
+
+  RenderStyleKind getRenderStyle() const {
+    if (Info->Flags & RenderJoined)
+      return RenderJoinedStyle;
+    if (Info->Flags & RenderSeparate)
+      return RenderSeparateStyle;
+    switch (getKind()) {
+    case GroupClass:
+    case InputClass:
+    case UnknownClass:
+      return RenderValuesStyle;
+    case JoinedClass:
+    case JoinedAndSeparateClass:
+      return RenderJoinedStyle;
+    case CommaJoinedClass:
+      return RenderCommaJoinedStyle;
+    case FlagClass:
+    case ValuesClass:
+    case SeparateClass:
+    case MultiArgClass:
+    case JoinedOrSeparateClass:
+    case RemainingArgsClass:
+    case RemainingArgsJoinedClass:
+      return RenderSeparateStyle;
+    }
+    llvm_unreachable("Unexpected kind!");
+  }
+
+  /// Test if this option has the flag \a Val.
+  bool hasFlag(unsigned Val) const {
+    return Info->Flags & Val;
+  }
+
+  /// getUnaliasedOption - Return the final option this option
+  /// aliases (itself, if the option has no alias).
+  const Option getUnaliasedOption() const {
+    const Option Alias = getAlias();
+    if (Alias.isValid()) return Alias.getUnaliasedOption();
+    return *this;
+  }
+
+  /// getRenderName - Return the name to use when rendering this
+  /// option.
+  StringRef getRenderName() const {
+    return getUnaliasedOption().getName();
+  }
+
+  /// matches - Predicate for whether this option is part of the
+  /// given option (which may be a group).
+  ///
+  /// Note that matches against options which are an alias should never be
+  /// done -- aliases do not participate in matching and so such a query will
+  /// always be false.
+  bool matches(OptSpecifier ID) const;
+
+  /// accept - Potentially accept the current argument, returning a
+  /// new Arg instance, or 0 if the option does not accept this
+  /// argument (or the argument is missing values).
+  ///
+  /// If the option accepts the current argument, accept() sets
+  /// Index to the position where argument parsing should resume
+  /// (even if the argument is missing values).
+  ///
+  /// \param ArgSize The number of bytes taken up by the matched Option prefix
+  ///                and name. This is used to determine where joined values
+  ///                start.
+  Arg *accept(const ArgList &Args, unsigned &Index, unsigned ArgSize) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+};
+
+} // end namespace opt
+
+} // end namespace llvm
+
+#endif // LLVM_OPTION_OPTION_H
diff --git a/linux-x64/clang/include/llvm/Pass.h b/linux-x64/clang/include/llvm/Pass.h
new file mode 100644
index 0000000..a29b377
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Pass.h
@@ -0,0 +1,378 @@
+//===- llvm/Pass.h - Base class for Passes ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a base class that indicates that a specified class is a
+// transformation pass implementation.
+//
+// Passes are designed this way so that it is possible to run passes in a cache
+// and organizationally optimal order without having to specify it at the front
+// end.  This allows arbitrary passes to be strung together and have them
+// executed as efficiently as possible.
+//
+// Passes should extend one of the classes below, depending on the guarantees
+// that it can make about what will be modified as it is run.  For example, most
+// global optimizations should derive from FunctionPass, because they do not add
+// or delete functions, they operate on the internals of the function.
+//
+// Note that this file #includes PassSupport.h and PassAnalysisSupport.h (at the
+// bottom), so the APIs exposed by these files are also automatically available
+// to all users of this file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASS_H
+#define LLVM_PASS_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+
+class AnalysisResolver;
+class AnalysisUsage;
+class BasicBlock;
+class Function;
+class ImmutablePass;
+class Module;
+class PassInfo;
+class PMDataManager;
+class PMStack;
+class raw_ostream;
+
+// AnalysisID - Use the PassInfo to identify a pass...
+using AnalysisID = const void *;
+
+/// Different types of internal pass managers. External pass managers
+/// (PassManager and FunctionPassManager) are not represented here.
+/// Ordering of pass manager types is important here.
+enum PassManagerType {
+  PMT_Unknown = 0,
+  PMT_ModulePassManager = 1, ///< MPPassManager
+  PMT_CallGraphPassManager,  ///< CGPassManager
+  PMT_FunctionPassManager,   ///< FPPassManager
+  PMT_LoopPassManager,       ///< LPPassManager
+  PMT_RegionPassManager,     ///< RGPassManager
+  PMT_BasicBlockPassManager, ///< BBPassManager
+  PMT_Last
+};
+
+// Different types of passes.
+enum PassKind {
+  PT_BasicBlock,
+  PT_Region,
+  PT_Loop,
+  PT_Function,
+  PT_CallGraphSCC,
+  PT_Module,
+  PT_PassManager
+};
+
+//===----------------------------------------------------------------------===//
+/// Pass interface - Implemented by all 'passes'.  Subclass this if you are an
+/// interprocedural optimization or you do not fit into any of the more
+/// constrained passes described below.
+///
+class Pass {
+  AnalysisResolver *Resolver = nullptr;  // Used to resolve analysis
+  const void *PassID;
+  PassKind Kind;
+
+public:
+  explicit Pass(PassKind K, char &pid) : PassID(&pid), Kind(K) {}
+  Pass(const Pass &) = delete;
+  Pass &operator=(const Pass &) = delete;
+  virtual ~Pass();
+
+  PassKind getPassKind() const { return Kind; }
+
+  /// getPassName - Return a nice clean name for a pass.  This usually
+  /// implemented in terms of the name that is registered by one of the
+  /// Registration templates, but can be overloaded directly.
+  virtual StringRef getPassName() const;
+
+  /// getPassID - Return the PassID number that corresponds to this pass.
+  AnalysisID getPassID() const {
+    return PassID;
+  }
+
+  /// doInitialization - Virtual method overridden by subclasses to do
+  /// any necessary initialization before any pass is run.
+  virtual bool doInitialization(Module &)  { return false; }
+
+  /// doFinalization - Virtual method overriden by subclasses to do any
+  /// necessary clean up after all passes have run.
+  virtual bool doFinalization(Module &) { return false; }
+
+  /// print - Print out the internal state of the pass.  This is called by
+  /// Analyze to print out the contents of an analysis.  Otherwise it is not
+  /// necessary to implement this method.  Beware that the module pointer MAY be
+  /// null.  This automatically forwards to a virtual function that does not
+  /// provide the Module* in case the analysis doesn't need it it can just be
+  /// ignored.
+  virtual void print(raw_ostream &OS, const Module *M) const;
+
+  void dump() const; // dump - Print to stderr.
+
+  /// createPrinterPass - Get a Pass appropriate to print the IR this
+  /// pass operates on (Module, Function or MachineFunction).
+  virtual Pass *createPrinterPass(raw_ostream &OS,
+                                  const std::string &Banner) const = 0;
+
+  /// Each pass is responsible for assigning a pass manager to itself.
+  /// PMS is the stack of available pass manager.
+  virtual void assignPassManager(PMStack &,
+                                 PassManagerType) {}
+
+  /// Check if available pass managers are suitable for this pass or not.
+  virtual void preparePassManager(PMStack &);
+
+  ///  Return what kind of Pass Manager can manage this pass.
+  virtual PassManagerType getPotentialPassManagerType() const;
+
+  // Access AnalysisResolver
+  void setResolver(AnalysisResolver *AR);
+  AnalysisResolver *getResolver() const { return Resolver; }
+
+  /// getAnalysisUsage - This function should be overriden by passes that need
+  /// analysis information to do their job.  If a pass specifies that it uses a
+  /// particular analysis result to this function, it can then use the
+  /// getAnalysis<AnalysisType>() function, below.
+  virtual void getAnalysisUsage(AnalysisUsage &) const;
+
+  /// releaseMemory() - This member can be implemented by a pass if it wants to
+  /// be able to release its memory when it is no longer needed.  The default
+  /// behavior of passes is to hold onto memory for the entire duration of their
+  /// lifetime (which is the entire compile time).  For pipelined passes, this
+  /// is not a big deal because that memory gets recycled every time the pass is
+  /// invoked on another program unit.  For IP passes, it is more important to
+  /// free memory when it is unused.
+  ///
+  /// Optionally implement this function to release pass memory when it is no
+  /// longer used.
+  virtual void releaseMemory();
+
+  /// getAdjustedAnalysisPointer - This method is used when a pass implements
+  /// an analysis interface through multiple inheritance.  If needed, it should
+  /// override this to adjust the this pointer as needed for the specified pass
+  /// info.
+  virtual void *getAdjustedAnalysisPointer(AnalysisID ID);
+  virtual ImmutablePass *getAsImmutablePass();
+  virtual PMDataManager *getAsPMDataManager();
+
+  /// verifyAnalysis() - This member can be implemented by a analysis pass to
+  /// check state of analysis information.
+  virtual void verifyAnalysis() const;
+
+  // dumpPassStructure - Implement the -debug-passes=PassStructure option
+  virtual void dumpPassStructure(unsigned Offset = 0);
+
+  // lookupPassInfo - Return the pass info object for the specified pass class,
+  // or null if it is not known.
+  static const PassInfo *lookupPassInfo(const void *TI);
+
+  // lookupPassInfo - Return the pass info object for the pass with the given
+  // argument string, or null if it is not known.
+  static const PassInfo *lookupPassInfo(StringRef Arg);
+
+  // createPass - Create a object for the specified pass class,
+  // or null if it is not known.
+  static Pass *createPass(AnalysisID ID);
+
+  /// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
+  /// get analysis information that might be around, for example to update it.
+  /// This is different than getAnalysis in that it can fail (if the analysis
+  /// results haven't been computed), so should only be used if you can handle
+  /// the case when the analysis is not available.  This method is often used by
+  /// transformation APIs to update analysis results for a pass automatically as
+  /// the transform is performed.
+  template<typename AnalysisType> AnalysisType *
+    getAnalysisIfAvailable() const; // Defined in PassAnalysisSupport.h
+
+  /// mustPreserveAnalysisID - This method serves the same function as
+  /// getAnalysisIfAvailable, but works if you just have an AnalysisID.  This
+  /// obviously cannot give you a properly typed instance of the class if you
+  /// don't have the class name available (use getAnalysisIfAvailable if you
+  /// do), but it can tell you if you need to preserve the pass at least.
+  bool mustPreserveAnalysisID(char &AID) const;
+
+  /// getAnalysis<AnalysisType>() - This function is used by subclasses to get
+  /// to the analysis information that they claim to use by overriding the
+  /// getAnalysisUsage function.
+  template<typename AnalysisType>
+  AnalysisType &getAnalysis() const; // Defined in PassAnalysisSupport.h
+
+  template<typename AnalysisType>
+  AnalysisType &getAnalysis(Function &F); // Defined in PassAnalysisSupport.h
+
+  template<typename AnalysisType>
+  AnalysisType &getAnalysisID(AnalysisID PI) const;
+
+  template<typename AnalysisType>
+  AnalysisType &getAnalysisID(AnalysisID PI, Function &F);
+};
+
+//===----------------------------------------------------------------------===//
+/// ModulePass class - This class is used to implement unstructured
+/// interprocedural optimizations and analyses.  ModulePasses may do anything
+/// they want to the program.
+///
+class ModulePass : public Pass {
+public:
+  explicit ModulePass(char &pid) : Pass(PT_Module, pid) {}
+
+  // Force out-of-line virtual method.
+  ~ModulePass() override;
+
+  /// createPrinterPass - Get a module printer pass.
+  Pass *createPrinterPass(raw_ostream &OS,
+                          const std::string &Banner) const override;
+
+  /// runOnModule - Virtual method overriden by subclasses to process the module
+  /// being operated on.
+  virtual bool runOnModule(Module &M) = 0;
+
+  void assignPassManager(PMStack &PMS, PassManagerType T) override;
+
+  ///  Return what kind of Pass Manager can manage this pass.
+  PassManagerType getPotentialPassManagerType() const override;
+
+protected:
+  /// Optional passes call this function to check whether the pass should be
+  /// skipped. This is the case when optimization bisect is over the limit.
+  bool skipModule(Module &M) const;
+};
+
+//===----------------------------------------------------------------------===//
+/// ImmutablePass class - This class is used to provide information that does
+/// not need to be run.  This is useful for things like target information and
+/// "basic" versions of AnalysisGroups.
+///
+class ImmutablePass : public ModulePass {
+public:
+  explicit ImmutablePass(char &pid) : ModulePass(pid) {}
+
+  // Force out-of-line virtual method.
+  ~ImmutablePass() override;
+
+  /// initializePass - This method may be overriden by immutable passes to allow
+  /// them to perform various initialization actions they require.  This is
+  /// primarily because an ImmutablePass can "require" another ImmutablePass,
+  /// and if it does, the overloaded version of initializePass may get access to
+  /// these passes with getAnalysis<>.
+  virtual void initializePass();
+
+  ImmutablePass *getAsImmutablePass() override { return this; }
+
+  /// ImmutablePasses are never run.
+  bool runOnModule(Module &) override { return false; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FunctionPass class - This class is used to implement most global
+/// optimizations.  Optimizations should subclass this class if they meet the
+/// following constraints:
+///
+///  1. Optimizations are organized globally, i.e., a function at a time
+///  2. Optimizing a function does not cause the addition or removal of any
+///     functions in the module
+///
+class FunctionPass : public Pass {
+public:
+  explicit FunctionPass(char &pid) : Pass(PT_Function, pid) {}
+
+  /// createPrinterPass - Get a function printer pass.
+  Pass *createPrinterPass(raw_ostream &OS,
+                          const std::string &Banner) const override;
+
+  /// runOnFunction - Virtual method overriden by subclasses to do the
+  /// per-function processing of the pass.
+  virtual bool runOnFunction(Function &F) = 0;
+
+  void assignPassManager(PMStack &PMS, PassManagerType T) override;
+
+  ///  Return what kind of Pass Manager can manage this pass.
+  PassManagerType getPotentialPassManagerType() const override;
+
+protected:
+  /// Optional passes call this function to check whether the pass should be
+  /// skipped. This is the case when Attribute::OptimizeNone is set or when
+  /// optimization bisect is over the limit.
+  bool skipFunction(const Function &F) const;
+};
+
+//===----------------------------------------------------------------------===//
+/// BasicBlockPass class - This class is used to implement most local
+/// optimizations.  Optimizations should subclass this class if they
+/// meet the following constraints:
+///   1. Optimizations are local, operating on either a basic block or
+///      instruction at a time.
+///   2. Optimizations do not modify the CFG of the contained function, or any
+///      other basic block in the function.
+///   3. Optimizations conform to all of the constraints of FunctionPasses.
+///
+class BasicBlockPass : public Pass {
+public:
+  explicit BasicBlockPass(char &pid) : Pass(PT_BasicBlock, pid) {}
+
+  /// createPrinterPass - Get a basic block printer pass.
+  Pass *createPrinterPass(raw_ostream &OS,
+                          const std::string &Banner) const override;
+
+  using llvm::Pass::doInitialization;
+  using llvm::Pass::doFinalization;
+
+  /// doInitialization - Virtual method overridden by BasicBlockPass subclasses
+  /// to do any necessary per-function initialization.
+  virtual bool doInitialization(Function &);
+
+  /// runOnBasicBlock - Virtual method overriden by subclasses to do the
+  /// per-basicblock processing of the pass.
+  virtual bool runOnBasicBlock(BasicBlock &BB) = 0;
+
+  /// doFinalization - Virtual method overriden by BasicBlockPass subclasses to
+  /// do any post processing needed after all passes have run.
+  virtual bool doFinalization(Function &);
+
+  void assignPassManager(PMStack &PMS, PassManagerType T) override;
+
+  ///  Return what kind of Pass Manager can manage this pass.
+  PassManagerType getPotentialPassManagerType() const override;
+
+protected:
+  /// Optional passes call this function to check whether the pass should be
+  /// skipped. This is the case when Attribute::OptimizeNone is set or when
+  /// optimization bisect is over the limit.
+  bool skipBasicBlock(const BasicBlock &BB) const;
+};
+
+/// If the user specifies the -time-passes argument on an LLVM tool command line
+/// then the value of this boolean will be true, otherwise false.
+/// @brief This is the storage for the -time-passes option.
+extern bool TimePassesIsEnabled;
+
+/// isFunctionInPrintList - returns true if a function should be printed via
+//  debugging options like -print-after-all/-print-before-all.
+//  @brief Tells if the function IR should be printed by PrinterPass.
+extern bool isFunctionInPrintList(StringRef FunctionName);
+
+/// forcePrintModuleIR - returns true if IR printing passes should
+//  be printing module IR (even for local-pass printers e.g. function-pass)
+//  to provide more context, as enabled by debugging option -print-module-scope
+//  @brief Tells if IR printer should be printing module IR
+extern bool forcePrintModuleIR();
+
+} // end namespace llvm
+
+// Include support files that contain important APIs commonly used by Passes,
+// but that we want to separate out to make it easier to read the header files.
+#include "llvm/InitializePasses.h"
+#include "llvm/PassAnalysisSupport.h"
+#include "llvm/PassSupport.h"
+
+#endif // LLVM_PASS_H
diff --git a/linux-x64/clang/include/llvm/PassAnalysisSupport.h b/linux-x64/clang/include/llvm/PassAnalysisSupport.h
new file mode 100644
index 0000000..1187187
--- /dev/null
+++ b/linux-x64/clang/include/llvm/PassAnalysisSupport.h
@@ -0,0 +1,274 @@
+//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stuff that is used to define and "use" Analysis Passes.
+// This file is automatically #included by Pass.h, so:
+//
+//           NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
+//
+// Instead, #include Pass.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSANALYSISSUPPORT_H
+#define LLVM_PASSANALYSISSUPPORT_H
+
+#include "Pass.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Function;
+class Pass;
+class PMDataManager;
+
+//===----------------------------------------------------------------------===//
+/// Represent the analysis usage information of a pass.  This tracks analyses
+/// that the pass REQUIRES (must be available when the pass runs), REQUIRES
+/// TRANSITIVE (must be available throughout the lifetime of the pass), and
+/// analyses that the pass PRESERVES (the pass does not invalidate the results
+/// of these analyses).  This information is provided by a pass to the Pass
+/// infrastructure through the getAnalysisUsage virtual function.
+///
+class AnalysisUsage {
+public:
+  using VectorType = SmallVectorImpl<AnalysisID>;
+
+private:
+  /// Sets of analyses required and preserved by a pass
+  // TODO: It's not clear that SmallVector is an appropriate data structure for
+  // this usecase.  The sizes were picked to minimize wasted space, but are
+  // otherwise fairly meaningless.
+  SmallVector<AnalysisID, 8> Required;
+  SmallVector<AnalysisID, 2> RequiredTransitive;
+  SmallVector<AnalysisID, 2> Preserved;
+  SmallVector<AnalysisID, 0> Used;
+  bool PreservesAll = false;
+
+public:
+  AnalysisUsage() = default;
+
+  ///@{
+  /// Add the specified ID to the required set of the usage info for a pass.
+  AnalysisUsage &addRequiredID(const void *ID);
+  AnalysisUsage &addRequiredID(char &ID);
+  template<class PassClass>
+  AnalysisUsage &addRequired() {
+    return addRequiredID(PassClass::ID);
+  }
+
+  AnalysisUsage &addRequiredTransitiveID(char &ID);
+  template<class PassClass>
+  AnalysisUsage &addRequiredTransitive() {
+    return addRequiredTransitiveID(PassClass::ID);
+  }
+  ///@}
+
+  ///@{
+  /// Add the specified ID to the set of analyses preserved by this pass.
+  AnalysisUsage &addPreservedID(const void *ID) {
+    Preserved.push_back(ID);
+    return *this;
+  }
+  AnalysisUsage &addPreservedID(char &ID) {
+    Preserved.push_back(&ID);
+    return *this;
+  }
+  /// Add the specified Pass class to the set of analyses preserved by this pass.
+  template<class PassClass>
+  AnalysisUsage &addPreserved() {
+    Preserved.push_back(&PassClass::ID);
+    return *this;
+  }
+  ///@}
+
+  ///@{
+  /// Add the specified ID to the set of analyses used by this pass if they are
+  /// available..
+  AnalysisUsage &addUsedIfAvailableID(const void *ID) {
+    Used.push_back(ID);
+    return *this;
+  }
+  AnalysisUsage &addUsedIfAvailableID(char &ID) {
+    Used.push_back(&ID);
+    return *this;
+  }
+  /// Add the specified Pass class to the set of analyses used by this pass.
+  template<class PassClass>
+  AnalysisUsage &addUsedIfAvailable() {
+    Used.push_back(&PassClass::ID);
+    return *this;
+  }
+  ///@}
+
+  /// Add the Pass with the specified argument string to the set of analyses
+  /// preserved by this pass. If no such Pass exists, do nothing. This can be
+  /// useful when a pass is trivially preserved, but may not be linked in. Be
+  /// careful about spelling!
+  AnalysisUsage &addPreserved(StringRef Arg);
+
+  /// Set by analyses that do not transform their input at all
+  void setPreservesAll() { PreservesAll = true; }
+
+  /// Determine whether a pass said it does not transform its input at all
+  bool getPreservesAll() const { return PreservesAll; }
+
+  /// This function should be called by the pass, iff they do not:
+  ///
+  ///  1. Add or remove basic blocks from the function
+  ///  2. Modify terminator instructions in any way.
+  ///
+  /// This function annotates the AnalysisUsage info object to say that analyses
+  /// that only depend on the CFG are preserved by this pass.
+  void setPreservesCFG();
+
+  const VectorType &getRequiredSet() const { return Required; }
+  const VectorType &getRequiredTransitiveSet() const {
+    return RequiredTransitive;
+  }
+  const VectorType &getPreservedSet() const { return Preserved; }
+  const VectorType &getUsedSet() const { return Used; }
+};
+
+//===----------------------------------------------------------------------===//
+/// AnalysisResolver - Simple interface used by Pass objects to pull all
+/// analysis information out of pass manager that is responsible to manage
+/// the pass.
+///
+class AnalysisResolver {
+public:
+  AnalysisResolver() = delete;
+  explicit AnalysisResolver(PMDataManager &P) : PM(P) {}
+
+  PMDataManager &getPMDataManager() { return PM; }
+
+  /// Find pass that is implementing PI.
+  Pass *findImplPass(AnalysisID PI) {
+    Pass *ResultPass = nullptr;
+    for (const auto &AnalysisImpl : AnalysisImpls) {
+      if (AnalysisImpl.first == PI) {
+        ResultPass = AnalysisImpl.second;
+        break;
+      }
+    }
+    return ResultPass;
+  }
+
+  /// Find pass that is implementing PI. Initialize pass for Function F.
+  Pass *findImplPass(Pass *P, AnalysisID PI, Function &F);
+
+  void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
+    if (findImplPass(PI) == P)
+      return;
+    std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
+    AnalysisImpls.push_back(pir);
+  }
+
+  /// Clear cache that is used to connect a pass to the analysis (PassInfo).
+  void clearAnalysisImpls() {
+    AnalysisImpls.clear();
+  }
+
+  /// Return analysis result or null if it doesn't exist.
+  Pass *getAnalysisIfAvailable(AnalysisID ID, bool Direction) const;
+
+private:
+  /// This keeps track of which passes implements the interfaces that are
+  /// required by the current pass (to implement getAnalysis()).
+  std::vector<std::pair<AnalysisID, Pass *>> AnalysisImpls;
+
+  /// PassManager that is used to resolve analysis info
+  PMDataManager &PM;
+};
+
+/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
+/// get analysis information that might be around, for example to update it.
+/// This is different than getAnalysis in that it can fail (if the analysis
+/// results haven't been computed), so should only be used if you can handle
+/// the case when the analysis is not available.  This method is often used by
+/// transformation APIs to update analysis results for a pass automatically as
+/// the transform is performed.
+template<typename AnalysisType>
+AnalysisType *Pass::getAnalysisIfAvailable() const {
+  assert(Resolver && "Pass not resident in a PassManager object!");
+
+  const void *PI = &AnalysisType::ID;
+
+  Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI, true);
+  if (!ResultPass) return nullptr;
+
+  // Because the AnalysisType may not be a subclass of pass (for
+  // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
+  // adjust the return pointer (because the class may multiply inherit, once
+  // from pass, once from AnalysisType).
+  return (AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
+}
+
+/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
+/// to the analysis information that they claim to use by overriding the
+/// getAnalysisUsage function.
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysis() const {
+  assert(Resolver && "Pass has not been inserted into a PassManager object!");
+  return getAnalysisID<AnalysisType>(&AnalysisType::ID);
+}
+
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
+  assert(PI && "getAnalysis for unregistered pass!");
+  assert(Resolver&&"Pass has not been inserted into a PassManager object!");
+  // PI *must* appear in AnalysisImpls.  Because the number of passes used
+  // should be a small number, we just do a linear search over a (dense)
+  // vector.
+  Pass *ResultPass = Resolver->findImplPass(PI);
+  assert(ResultPass && 
+         "getAnalysis*() called on an analysis that was not "
+         "'required' by pass!");
+
+  // Because the AnalysisType may not be a subclass of pass (for
+  // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
+  // adjust the return pointer (because the class may multiply inherit, once
+  // from pass, once from AnalysisType).
+  return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
+}
+
+/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
+/// to the analysis information that they claim to use by overriding the
+/// getAnalysisUsage function.
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysis(Function &F) {
+  assert(Resolver &&"Pass has not been inserted into a PassManager object!");
+
+  return getAnalysisID<AnalysisType>(&AnalysisType::ID, F);
+}
+
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F) {
+  assert(PI && "getAnalysis for unregistered pass!");
+  assert(Resolver && "Pass has not been inserted into a PassManager object!");
+  // PI *must* appear in AnalysisImpls.  Because the number of passes used
+  // should be a small number, we just do a linear search over a (dense)
+  // vector.
+  Pass *ResultPass = Resolver->findImplPass(this, PI, F);
+  assert(ResultPass && "Unable to find requested analysis info");
+
+  // Because the AnalysisType may not be a subclass of pass (for
+  // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
+  // adjust the return pointer (because the class may multiply inherit, once
+  // from pass, once from AnalysisType).
+  return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_PASSANALYSISSUPPORT_H
diff --git a/linux-x64/clang/include/llvm/PassInfo.h b/linux-x64/clang/include/llvm/PassInfo.h
new file mode 100644
index 0000000..2f1ab4d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/PassInfo.h
@@ -0,0 +1,121 @@
+//===- llvm/PassInfo.h - Pass Info class ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and implements the PassInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSINFO_H
+#define LLVM_PASSINFO_H
+
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+class Pass;
+
+//===---------------------------------------------------------------------------
+/// PassInfo class - An instance of this class exists for every pass known by
+/// the system, and can be obtained from a live Pass by calling its
+/// getPassInfo() method.  These objects are set up by the RegisterPass<>
+/// template.
+///
+class PassInfo {
+public:
+  using NormalCtor_t = Pass* (*)();
+
+private:
+  StringRef PassName;     // Nice name for Pass
+  StringRef PassArgument; // Command Line argument to run this pass
+  const void *PassID;
+  const bool IsCFGOnlyPass = false;      // Pass only looks at the CFG.
+  const bool IsAnalysis;                 // True if an analysis pass.
+  const bool IsAnalysisGroup;            // True if an analysis group.
+  std::vector<const PassInfo *> ItfImpl; // Interfaces implemented by this pass
+  NormalCtor_t NormalCtor = nullptr;
+
+public:
+  /// PassInfo ctor - Do not call this directly, this should only be invoked
+  /// through RegisterPass.
+  PassInfo(StringRef name, StringRef arg, const void *pi, NormalCtor_t normal,
+           bool isCFGOnly, bool is_analysis)
+      : PassName(name), PassArgument(arg), PassID(pi), IsCFGOnlyPass(isCFGOnly),
+        IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) {}
+
+  /// PassInfo ctor - Do not call this directly, this should only be invoked
+  /// through RegisterPass. This version is for use by analysis groups; it
+  /// does not auto-register the pass.
+  PassInfo(StringRef name, const void *pi)
+      : PassName(name), PassID(pi), IsAnalysis(false), IsAnalysisGroup(true) {}
+
+  PassInfo(const PassInfo &) = delete;
+  PassInfo &operator=(const PassInfo &) = delete;
+
+  /// getPassName - Return the friendly name for the pass, never returns null
+  StringRef getPassName() const { return PassName; }
+
+  /// getPassArgument - Return the command line option that may be passed to
+  /// 'opt' that will cause this pass to be run.  This will return null if there
+  /// is no argument.
+  StringRef getPassArgument() const { return PassArgument; }
+
+  /// getTypeInfo - Return the id object for the pass...
+  /// TODO : Rename
+  const void *getTypeInfo() const { return PassID; }
+
+  /// Return true if this PassID implements the specified ID pointer.
+  bool isPassID(const void *IDPtr) const { return PassID == IDPtr; }
+
+  /// isAnalysisGroup - Return true if this is an analysis group, not a normal
+  /// pass.
+  bool isAnalysisGroup() const { return IsAnalysisGroup; }
+  bool isAnalysis() const { return IsAnalysis; }
+
+  /// isCFGOnlyPass - return true if this pass only looks at the CFG for the
+  /// function.
+  bool isCFGOnlyPass() const { return IsCFGOnlyPass; }
+
+  /// getNormalCtor - Return a pointer to a function, that when called, creates
+  /// an instance of the pass and returns it.  This pointer may be null if there
+  /// is no default constructor for the pass.
+  NormalCtor_t getNormalCtor() const {
+    return NormalCtor;
+  }
+  void setNormalCtor(NormalCtor_t Ctor) {
+    NormalCtor = Ctor;
+  }
+
+  /// createPass() - Use this method to create an instance of this pass.
+  Pass *createPass() const {
+    assert((!isAnalysisGroup() || NormalCtor) &&
+           "No default implementation found for analysis group!");
+    assert(NormalCtor &&
+           "Cannot call createPass on PassInfo without default ctor!");
+    return NormalCtor();
+  }
+
+  /// addInterfaceImplemented - This method is called when this pass is
+  /// registered as a member of an analysis group with the RegisterAnalysisGroup
+  /// template.
+  void addInterfaceImplemented(const PassInfo *ItfPI) {
+    ItfImpl.push_back(ItfPI);
+  }
+
+  /// getInterfacesImplemented - Return a list of all of the analysis group
+  /// interfaces implemented by this pass.
+  const std::vector<const PassInfo*> &getInterfacesImplemented() const {
+    return ItfImpl;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_PASSINFO_H
diff --git a/linux-x64/clang/include/llvm/PassRegistry.h b/linux-x64/clang/include/llvm/PassRegistry.h
new file mode 100644
index 0000000..93edc12
--- /dev/null
+++ b/linux-x64/clang/include/llvm/PassRegistry.h
@@ -0,0 +1,98 @@
+//===- llvm/PassRegistry.h - Pass Information Registry ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PassRegistry, a class that is used in the initialization
+// and registration of passes.  At application startup, passes are registered
+// with the PassRegistry, which is later provided to the PassManager for 
+// dependency resolution and similar tasks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSREGISTRY_H
+#define LLVM_PASSREGISTRY_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/RWMutex.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class PassInfo;
+struct PassRegistrationListener;
+
+/// PassRegistry - This class manages the registration and intitialization of
+/// the pass subsystem as application startup, and assists the PassManager
+/// in resolving pass dependencies.
+/// NOTE: PassRegistry is NOT thread-safe.  If you want to use LLVM on multiple
+/// threads simultaneously, you will need to use a separate PassRegistry on
+/// each thread.
+class PassRegistry {
+  mutable sys::SmartRWMutex<true> Lock;
+
+  /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
+  using MapType = DenseMap<const void *, const PassInfo *>;
+  MapType PassInfoMap;
+
+  using StringMapType = StringMap<const PassInfo *>;
+  StringMapType PassInfoStringMap;
+
+  std::vector<std::unique_ptr<const PassInfo>> ToFree;
+  std::vector<PassRegistrationListener *> Listeners;
+
+public:
+  PassRegistry() = default;
+  ~PassRegistry();
+
+  /// getPassRegistry - Access the global registry object, which is
+  /// automatically initialized at application launch and destroyed by
+  /// llvm_shutdown.
+  static PassRegistry *getPassRegistry();
+
+  /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
+  /// type identifier (&MyPass::ID).
+  const PassInfo *getPassInfo(const void *TI) const;
+
+  /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
+  /// argument string.
+  const PassInfo *getPassInfo(StringRef Arg) const;
+
+  /// registerPass - Register a pass (by means of its PassInfo) with the
+  /// registry.  Required in order to use the pass with a PassManager.
+  void registerPass(const PassInfo &PI, bool ShouldFree = false);
+
+  /// registerAnalysisGroup - Register an analysis group (or a pass implementing
+  // an analysis group) with the registry.  Like registerPass, this is required
+  // in order for a PassManager to be able to use this group/pass.
+  void registerAnalysisGroup(const void *InterfaceID, const void *PassID,
+                             PassInfo &Registeree, bool isDefault,
+                             bool ShouldFree = false);
+
+  /// enumerateWith - Enumerate the registered passes, calling the provided
+  /// PassRegistrationListener's passEnumerate() callback on each of them.
+  void enumerateWith(PassRegistrationListener *L);
+
+  /// addRegistrationListener - Register the given PassRegistrationListener
+  /// to receive passRegistered() callbacks whenever a new pass is registered.
+  void addRegistrationListener(PassRegistrationListener *L);
+
+  /// removeRegistrationListener - Unregister a PassRegistrationListener so that
+  /// it no longer receives passRegistered() callbacks.
+  void removeRegistrationListener(PassRegistrationListener *L);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassRegistry, LLVMPassRegistryRef)
+
+} // end namespace llvm
+
+#endif // LLVM_PASSREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/PassSupport.h b/linux-x64/clang/include/llvm/PassSupport.h
new file mode 100644
index 0000000..1bf23dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/PassSupport.h
@@ -0,0 +1,216 @@
+//===- llvm/PassSupport.h - Pass Support code -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stuff that is used to define and "use" Passes.  This file
+// is automatically #included by Pass.h, so:
+//
+//           NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
+//
+// Instead, #include Pass.h.
+//
+// This file defines Pass registration code and classes used for it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSSUPPORT_H
+#define LLVM_PASSSUPPORT_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/PassInfo.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/Threading.h"
+#include <functional>
+
+namespace llvm {
+
+class Pass;
+
+#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)                    \
+  static void *initialize##passName##PassOnce(PassRegistry &Registry) {        \
+    PassInfo *PI = new PassInfo(                                               \
+        name, arg, &passName::ID,                                              \
+        PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);     \
+    Registry.registerPass(*PI, true);                                          \
+    return PI;                                                                 \
+  }                                                                            \
+  static llvm::once_flag Initialize##passName##PassFlag;                       \
+  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
+    llvm::call_once(Initialize##passName##PassFlag,                            \
+                    initialize##passName##PassOnce, std::ref(Registry));       \
+  }
+
+#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)              \
+  static void *initialize##passName##PassOnce(PassRegistry &Registry) {
+
+#define INITIALIZE_PASS_DEPENDENCY(depName) initialize##depName##Pass(Registry);
+#define INITIALIZE_AG_DEPENDENCY(depName)                                      \
+  initialize##depName##AnalysisGroup(Registry);
+
+#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)                \
+  PassInfo *PI = new PassInfo(                                                 \
+      name, arg, &passName::ID,                                                \
+      PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);       \
+  Registry.registerPass(*PI, true);                                            \
+  return PI;                                                                   \
+  }                                                                            \
+  static llvm::once_flag Initialize##passName##PassFlag;                       \
+  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
+    llvm::call_once(Initialize##passName##PassFlag,                            \
+                    initialize##passName##PassOnce, std::ref(Registry));       \
+  }
+
+#define INITIALIZE_PASS_WITH_OPTIONS(PassName, Arg, Name, Cfg, Analysis)       \
+  INITIALIZE_PASS_BEGIN(PassName, Arg, Name, Cfg, Analysis)                    \
+  PassName::registerOptions();                                                 \
+  INITIALIZE_PASS_END(PassName, Arg, Name, Cfg, Analysis)
+
+#define INITIALIZE_PASS_WITH_OPTIONS_BEGIN(PassName, Arg, Name, Cfg, Analysis) \
+  INITIALIZE_PASS_BEGIN(PassName, Arg, Name, Cfg, Analysis)                    \
+  PassName::registerOptions();
+
+template <typename PassName> Pass *callDefaultCtor() { return new PassName(); }
+
+//===---------------------------------------------------------------------------
+/// RegisterPass<t> template - This template class is used to notify the system
+/// that a Pass is available for use, and registers it into the internal
+/// database maintained by the PassManager.  Unless this template is used, opt,
+/// for example will not be able to see the pass and attempts to create the pass
+/// will fail. This template is used in the follow manner (at global scope, in
+/// your .cpp file):
+///
+/// static RegisterPass<YourPassClassName> tmp("passopt", "My Pass Name");
+///
+/// This statement will cause your pass to be created by calling the default
+/// constructor exposed by the pass.
+template <typename passName> struct RegisterPass : public PassInfo {
+  // Register Pass using default constructor...
+  RegisterPass(StringRef PassArg, StringRef Name, bool CFGOnly = false,
+               bool is_analysis = false)
+      : PassInfo(Name, PassArg, &passName::ID,
+                 PassInfo::NormalCtor_t(callDefaultCtor<passName>), CFGOnly,
+                 is_analysis) {
+    PassRegistry::getPassRegistry()->registerPass(*this);
+  }
+};
+
+/// RegisterAnalysisGroup - Register a Pass as a member of an analysis _group_.
+/// Analysis groups are used to define an interface (which need not derive from
+/// Pass) that is required by passes to do their job.  Analysis Groups differ
+/// from normal analyses because any available implementation of the group will
+/// be used if it is available.
+///
+/// If no analysis implementing the interface is available, a default
+/// implementation is created and added.  A pass registers itself as the default
+/// implementation by specifying 'true' as the second template argument of this
+/// class.
+///
+/// In addition to registering itself as an analysis group member, a pass must
+/// register itself normally as well.  Passes may be members of multiple groups
+/// and may still be "required" specifically by name.
+///
+/// The actual interface may also be registered as well (by not specifying the
+/// second template argument).  The interface should be registered to associate
+/// a nice name with the interface.
+class RegisterAGBase : public PassInfo {
+public:
+  RegisterAGBase(StringRef Name, const void *InterfaceID,
+                 const void *PassID = nullptr, bool isDefault = false);
+};
+
+template <typename Interface, bool Default = false>
+struct RegisterAnalysisGroup : public RegisterAGBase {
+  explicit RegisterAnalysisGroup(PassInfo &RPB)
+      : RegisterAGBase(RPB.getPassName(), &Interface::ID, RPB.getTypeInfo(),
+                       Default) {}
+
+  explicit RegisterAnalysisGroup(const char *Name)
+      : RegisterAGBase(Name, &Interface::ID) {}
+};
+
+#define INITIALIZE_ANALYSIS_GROUP(agName, name, defaultPass)                   \
+  static void *initialize##agName##AnalysisGroupOnce(PassRegistry &Registry) { \
+    initialize##defaultPass##Pass(Registry);                                   \
+    PassInfo *AI = new PassInfo(name, &agName::ID);                            \
+    Registry.registerAnalysisGroup(&agName::ID, 0, *AI, false, true);          \
+    return AI;                                                                 \
+  }                                                                            \
+  static llvm::once_flag Initialize##agName##AnalysisGroupFlag;                \
+  void llvm::initialize##agName##AnalysisGroup(PassRegistry &Registry) {       \
+    llvm::call_once(Initialize##agName##AnalysisGroupFlag,                     \
+                    initialize##agName##AnalysisGroupOnce,                     \
+                    std::ref(Registry));                                       \
+  }
+
+#define INITIALIZE_AG_PASS(passName, agName, arg, name, cfg, analysis, def)    \
+  static void *initialize##passName##PassOnce(PassRegistry &Registry) {        \
+    if (!def)                                                                  \
+      initialize##agName##AnalysisGroup(Registry);                             \
+    PassInfo *PI = new PassInfo(                                               \
+        name, arg, &passName::ID,                                              \
+        PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);     \
+    Registry.registerPass(*PI, true);                                          \
+                                                                               \
+    PassInfo *AI = new PassInfo(name, &agName::ID);                            \
+    Registry.registerAnalysisGroup(&agName::ID, &passName::ID, *AI, def,       \
+                                   true);                                      \
+    return AI;                                                                 \
+  }                                                                            \
+  static llvm::once_flag Initialize##passName##PassFlag;                       \
+  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
+    llvm::call_once(Initialize##passName##PassFlag,                            \
+                    initialize##passName##PassOnce, std::ref(Registry));       \
+  }
+
+#define INITIALIZE_AG_PASS_BEGIN(passName, agName, arg, n, cfg, analysis, def) \
+  static void *initialize##passName##PassOnce(PassRegistry &Registry) {        \
+    if (!def)                                                                  \
+      initialize##agName##AnalysisGroup(Registry);
+
+#define INITIALIZE_AG_PASS_END(passName, agName, arg, n, cfg, analysis, def)   \
+  PassInfo *PI = new PassInfo(                                                 \
+      n, arg, &passName::ID,                                                   \
+      PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);       \
+  Registry.registerPass(*PI, true);                                            \
+                                                                               \
+  PassInfo *AI = new PassInfo(n, &agName::ID);                                 \
+  Registry.registerAnalysisGroup(&agName::ID, &passName::ID, *AI, def, true);  \
+  return AI;                                                                   \
+  }                                                                            \
+  static llvm::once_flag Initialize##passName##PassFlag;                       \
+  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
+    llvm::call_once(Initialize##passName##PassFlag,                            \
+                    initialize##passName##PassOnce, std::ref(Registry));       \
+  }
+
+//===---------------------------------------------------------------------------
+/// PassRegistrationListener class - This class is meant to be derived from by
+/// clients that are interested in which passes get registered and unregistered
+/// at runtime (which can be because of the RegisterPass constructors being run
+/// as the program starts up, or may be because a shared object just got
+/// loaded).
+struct PassRegistrationListener {
+  PassRegistrationListener() = default;
+  virtual ~PassRegistrationListener() = default;
+
+  /// Callback functions - These functions are invoked whenever a pass is loaded
+  /// or removed from the current executable.
+  virtual void passRegistered(const PassInfo *) {}
+
+  /// enumeratePasses - Iterate over the registered passes, calling the
+  /// passEnumerate callback on each PassInfo object.
+  void enumeratePasses();
+
+  /// passEnumerate - Callback function invoked when someone calls
+  /// enumeratePasses on this PassRegistrationListener object.
+  virtual void passEnumerate(const PassInfo *) {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_PASSSUPPORT_H
diff --git a/linux-x64/clang/include/llvm/Passes/PassBuilder.h b/linux-x64/clang/include/llvm/Passes/PassBuilder.h
new file mode 100644
index 0000000..5efcda0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Passes/PassBuilder.h
@@ -0,0 +1,685 @@
+//===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Interfaces for registering analysis passes, producing common pass manager
+/// configurations, and parsing of pass pipelines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSES_PASSBUILDER_H
+#define LLVM_PASSES_PASSBUILDER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+#include <vector>
+
+namespace llvm {
+class StringRef;
+class AAManager;
+class TargetMachine;
+
+/// A struct capturing PGO tunables.
+struct PGOOptions {
+  PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
+             std::string SampleProfileFile = "", bool RunProfileGen = false,
+             bool SamplePGOSupport = false)
+      : ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
+        SampleProfileFile(SampleProfileFile), RunProfileGen(RunProfileGen),
+        SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
+    assert((RunProfileGen ||
+            !SampleProfileFile.empty() ||
+            !ProfileUseFile.empty() ||
+            SamplePGOSupport) && "Illegal PGOOptions.");
+  }
+  std::string ProfileGenFile;
+  std::string ProfileUseFile;
+  std::string SampleProfileFile;
+  bool RunProfileGen;
+  bool SamplePGOSupport;
+};
+
+/// \brief This class provides access to building LLVM's passes.
+///
+/// It's members provide the baseline state available to passes during their
+/// construction. The \c PassRegistry.def file specifies how to construct all
+/// of the built-in passes, and those may reference these members during
+/// construction.
+class PassBuilder {
+  TargetMachine *TM;
+  Optional<PGOOptions> PGOOpt;
+
+public:
+  /// \brief A struct to capture parsed pass pipeline names.
+  ///
+  /// A pipeline is defined as a series of names, each of which may in itself
+  /// recursively contain a nested pipeline. A name is either the name of a pass
+  /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
+  /// name is the name of a pass, the InnerPipeline is empty, since passes
+  /// cannot contain inner pipelines. See parsePassPipeline() for a more
+  /// detailed description of the textual pipeline format.
+  struct PipelineElement {
+    StringRef Name;
+    std::vector<PipelineElement> InnerPipeline;
+  };
+
+  /// \brief ThinLTO phase.
+  ///
+  /// This enumerates the LLVM ThinLTO optimization phases.
+  enum class ThinLTOPhase {
+    /// No ThinLTO behavior needed.
+    None,
+    // ThinLTO prelink (summary) phase.
+    PreLink,
+    // ThinLTO postlink (backend compile) phase.
+    PostLink
+  };
+
+  /// \brief LLVM-provided high-level optimization levels.
+  ///
+  /// This enumerates the LLVM-provided high-level optimization levels. Each
+  /// level has a specific goal and rationale.
+  enum OptimizationLevel {
+    /// Disable as many optimizations as possible. This doesn't completely
+    /// disable the optimizer in all cases, for example always_inline functions
+    /// can be required to be inlined for correctness.
+    O0,
+
+    /// Optimize quickly without destroying debuggability.
+    ///
+    /// FIXME: The current and historical behavior of this level does *not*
+    /// agree with this goal, but we would like to move toward this goal in the
+    /// future.
+    ///
+    /// This level is tuned to produce a result from the optimizer as quickly
+    /// as possible and to avoid destroying debuggability. This tends to result
+    /// in a very good development mode where the compiled code will be
+    /// immediately executed as part of testing. As a consequence, where
+    /// possible, we would like to produce efficient-to-execute code, but not
+    /// if it significantly slows down compilation or would prevent even basic
+    /// debugging of the resulting binary.
+    ///
+    /// As an example, complex loop transformations such as versioning,
+    /// vectorization, or fusion might not make sense here due to the degree to
+    /// which the executed code would differ from the source code, and the
+    /// potential compile time cost.
+    O1,
+
+    /// Optimize for fast execution as much as possible without triggering
+    /// significant incremental compile time or code size growth.
+    ///
+    /// The key idea is that optimizations at this level should "pay for
+    /// themselves". So if an optimization increases compile time by 5% or
+    /// increases code size by 5% for a particular benchmark, that benchmark
+    /// should also be one which sees a 5% runtime improvement. If the compile
+    /// time or code size penalties happen on average across a diverse range of
+    /// LLVM users' benchmarks, then the improvements should as well.
+    ///
+    /// And no matter what, the compile time needs to not grow superlinearly
+    /// with the size of input to LLVM so that users can control the runtime of
+    /// the optimizer in this mode.
+    ///
+    /// This is expected to be a good default optimization level for the vast
+    /// majority of users.
+    O2,
+
+    /// Optimize for fast execution as much as possible.
+    ///
+    /// This mode is significantly more aggressive in trading off compile time
+    /// and code size to get execution time improvements. The core idea is that
+    /// this mode should include any optimization that helps execution time on
+    /// balance across a diverse collection of benchmarks, even if it increases
+    /// code size or compile time for some benchmarks without corresponding
+    /// improvements to execution time.
+    ///
+    /// Despite being willing to trade more compile time off to get improved
+    /// execution time, this mode still tries to avoid superlinear growth in
+    /// order to make even significantly slower compile times at least scale
+    /// reasonably. This does not preclude very substantial constant factor
+    /// costs though.
+    O3,
+
+    /// Similar to \c O2 but tries to optimize for small code size instead of
+    /// fast execution without triggering significant incremental execution
+    /// time slowdowns.
+    ///
+    /// The logic here is exactly the same as \c O2, but with code size and
+    /// execution time metrics swapped.
+    ///
+    /// A consequence of the different core goal is that this should in general
+    /// produce substantially smaller executables that still run in
+    /// a reasonable amount of time.
+    Os,
+
+    /// A very specialized mode that will optimize for code size at any and all
+    /// costs.
+    ///
+    /// This is useful primarily when there are absolute size limitations and
+    /// any effort taken to reduce the size is worth it regardless of the
+    /// execution time impact. You should expect this level to produce rather
+    /// slow, but very small, code.
+    Oz
+  };
+
+  explicit PassBuilder(TargetMachine *TM = nullptr,
+                       Optional<PGOOptions> PGOOpt = None)
+      : TM(TM), PGOOpt(PGOOpt) {}
+
+  /// \brief Cross register the analysis managers through their proxies.
+  ///
+  /// This is an interface that can be used to cross register each
+  // AnalysisManager with all the others analysis managers.
+  void crossRegisterProxies(LoopAnalysisManager &LAM,
+                            FunctionAnalysisManager &FAM,
+                            CGSCCAnalysisManager &CGAM,
+                            ModuleAnalysisManager &MAM);
+
+  /// \brief Registers all available module analysis passes.
+  ///
+  /// This is an interface that can be used to populate a \c
+  /// ModuleAnalysisManager with all registered module analyses. Callers can
+  /// still manually register any additional analyses. Callers can also
+  /// pre-register analyses and this will not override those.
+  void registerModuleAnalyses(ModuleAnalysisManager &MAM);
+
+  /// \brief Registers all available CGSCC analysis passes.
+  ///
+  /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
+  /// with all registered CGSCC analyses. Callers can still manually register any
+  /// additional analyses. Callers can also pre-register analyses and this will
+  /// not override those.
+  void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
+
+  /// \brief Registers all available function analysis passes.
+  ///
+  /// This is an interface that can be used to populate a \c
+  /// FunctionAnalysisManager with all registered function analyses. Callers can
+  /// still manually register any additional analyses. Callers can also
+  /// pre-register analyses and this will not override those.
+  void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
+
+  /// \brief Registers all available loop analysis passes.
+  ///
+  /// This is an interface that can be used to populate a \c LoopAnalysisManager
+  /// with all registered loop analyses. Callers can still manually register any
+  /// additional analyses.
+  void registerLoopAnalyses(LoopAnalysisManager &LAM);
+
+  /// Construct the core LLVM function canonicalization and simplification
+  /// pipeline.
+  ///
+  /// This is a long pipeline and uses most of the per-function optimization
+  /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
+  /// repeatedly over the IR and is not expected to destroy important
+  /// information about the semantics of the IR.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ///
+  /// \p Phase indicates the current ThinLTO phase.
+  FunctionPassManager
+  buildFunctionSimplificationPipeline(OptimizationLevel Level,
+                                      ThinLTOPhase Phase,
+                                      bool DebugLogging = false);
+
+  /// Construct the core LLVM module canonicalization and simplification
+  /// pipeline.
+  ///
+  /// This pipeline focuses on canonicalizing and simplifying the entire module
+  /// of IR. Much like the function simplification pipeline above, it is
+  /// suitable to run repeatedly over the IR and is not expected to destroy
+  /// important information. It does, however, perform inlining and other
+  /// heuristic based simplifications that are not strictly reversible.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ///
+  /// \p Phase indicates the current ThinLTO phase.
+  ModulePassManager
+  buildModuleSimplificationPipeline(OptimizationLevel Level,
+                                    ThinLTOPhase Phase,
+                                    bool DebugLogging = false);
+
+  /// Construct the core LLVM module optimization pipeline.
+  ///
+  /// This pipeline focuses on optimizing the execution speed of the IR. It
+  /// uses cost modeling and thresholds to balance code growth against runtime
+  /// improvements. It includes vectorization and other information destroying
+  /// transformations. It also cannot generally be run repeatedly on a module
+  /// without potentially seriously regressing either runtime performance of
+  /// the code or serious code size growth.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
+                                                    bool DebugLogging = false);
+
+  /// Build a per-module default optimization pipeline.
+  ///
+  /// This provides a good default optimization pipeline for per-module
+  /// optimization and code generation without any link-time optimization. It
+  /// typically correspond to frontend "-O[123]" options for optimization
+  /// levels \c O1, \c O2 and \c O3 resp.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
+                                                  bool DebugLogging = false);
+
+  /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
+  /// a pass manager.
+  ///
+  /// This adds the pre-link optimizations tuned to prepare a module for
+  /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
+  /// without making irreversible decisions which could be made better during
+  /// the LTO run.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ModulePassManager
+  buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
+                                     bool DebugLogging = false);
+
+  /// Build an ThinLTO default optimization pipeline to a pass manager.
+  ///
+  /// This provides a good default optimization pipeline for link-time
+  /// optimization and code generation. It is particularly tuned to fit well
+  /// when IR coming into the LTO phase was first run through \c
+  /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ModulePassManager buildThinLTODefaultPipeline(OptimizationLevel Level,
+                                                bool DebugLogging = false);
+
+  /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
+  /// manager.
+  ///
+  /// This adds the pre-link optimizations tuned to work well with a later LTO
+  /// run. It works to minimize the IR which needs to be analyzed without
+  /// making irreversible decisions which could be made better during the LTO
+  /// run.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
+                                                   bool DebugLogging = false);
+
+  /// Build an LTO default optimization pipeline to a pass manager.
+  ///
+  /// This provides a good default optimization pipeline for link-time
+  /// optimization and code generation. It is particularly tuned to fit well
+  /// when IR coming into the LTO phase was first run through \c
+  /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
+  ///
+  /// Note that \p Level cannot be `O0` here. The pipelines produced are
+  /// only intended for use when attempting to optimize code. If frontends
+  /// require some transformations for semantic reasons, they should explicitly
+  /// build them.
+  ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
+                                            bool DebugLogging = false);
+
+  /// Build the default `AAManager` with the default alias analysis pipeline
+  /// registered.
+  AAManager buildDefaultAAPipeline();
+
+  /// \brief Parse a textual pass pipeline description into a \c
+  /// ModulePassManager.
+  ///
+  /// The format of the textual pass pipeline description looks something like:
+  ///
+  ///   module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
+  ///
+  /// Pass managers have ()s describing the nest structure of passes. All passes
+  /// are comma separated. As a special shortcut, if the very first pass is not
+  /// a module pass (as a module pass manager is), this will automatically form
+  /// the shortest stack of pass managers that allow inserting that first pass.
+  /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
+  /// passes 'lpassN', all of these are valid:
+  ///
+  ///   fpass1,fpass2,fpass3
+  ///   cgpass1,cgpass2,cgpass3
+  ///   lpass1,lpass2,lpass3
+  ///
+  /// And they are equivalent to the following (resp.):
+  ///
+  ///   module(function(fpass1,fpass2,fpass3))
+  ///   module(cgscc(cgpass1,cgpass2,cgpass3))
+  ///   module(function(loop(lpass1,lpass2,lpass3)))
+  ///
+  /// This shortcut is especially useful for debugging and testing small pass
+  /// combinations. Note that these shortcuts don't introduce any other magic.
+  /// If the sequence of passes aren't all the exact same kind of pass, it will
+  /// be an error. You cannot mix different levels implicitly, you must
+  /// explicitly form a pass manager in which to nest passes.
+  bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
+                         bool VerifyEachPass = true, bool DebugLogging = false);
+
+  /// {{@ Parse a textual pass pipeline description into a specific PassManager
+  ///
+  /// Automatic deduction of an appropriate pass manager stack is not supported.
+  /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
+  /// this is the valid pipeline text:
+  ///
+  ///   function(lpass)
+  bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
+                         bool VerifyEachPass = true, bool DebugLogging = false);
+  bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
+                         bool VerifyEachPass = true, bool DebugLogging = false);
+  bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
+                         bool VerifyEachPass = true, bool DebugLogging = false);
+  /// @}}
+
+  /// Parse a textual alias analysis pipeline into the provided AA manager.
+  ///
+  /// The format of the textual AA pipeline is a comma separated list of AA
+  /// pass names:
+  ///
+  ///   basic-aa,globals-aa,...
+  ///
+  /// The AA manager is set up such that the provided alias analyses are tried
+  /// in the order specified. See the \c AAManaager documentation for details
+  /// about the logic used. This routine just provides the textual mapping
+  /// between AA names and the analyses to register with the manager.
+  ///
+  /// Returns false if the text cannot be parsed cleanly. The specific state of
+  /// the \p AA manager is unspecified if such an error is encountered and this
+  /// returns false.
+  bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
+
+  /// \brief Register a callback for a default optimizer pipeline extension
+  /// point
+  ///
+  /// This extension point allows adding passes that perform peephole
+  /// optimizations similar to the instruction combiner. These passes will be
+  /// inserted after each instance of the instruction combiner pass.
+  void registerPeepholeEPCallback(
+      const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+    PeepholeEPCallbacks.push_back(C);
+  }
+
+  /// \brief Register a callback for a default optimizer pipeline extension
+  /// point
+  ///
+  /// This extension point allows adding late loop canonicalization and
+  /// simplification passes. This is the last point in the loop optimization
+  /// pipeline before loop deletion. Each pass added
+  /// here must be an instance of LoopPass.
+  /// This is the place to add passes that can remove loops, such as target-
+  /// specific loop idiom recognition.
+  void registerLateLoopOptimizationsEPCallback(
+      const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
+    LateLoopOptimizationsEPCallbacks.push_back(C);
+  }
+
+  /// \brief Register a callback for a default optimizer pipeline extension
+  /// point
+  ///
+  /// This extension point allows adding loop passes to the end of the loop
+  /// optimizer.
+  void registerLoopOptimizerEndEPCallback(
+      const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
+    LoopOptimizerEndEPCallbacks.push_back(C);
+  }
+
+  /// \brief Register a callback for a default optimizer pipeline extension
+  /// point
+  ///
+  /// This extension point allows adding optimization passes after most of the
+  /// main optimizations, but before the last cleanup-ish optimizations.
+  void registerScalarOptimizerLateEPCallback(
+      const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+    ScalarOptimizerLateEPCallbacks.push_back(C);
+  }
+
+  /// \brief Register a callback for a default optimizer pipeline extension
+  /// point
+  ///
+  /// This extension point allows adding CallGraphSCC passes at the end of the
+  /// main CallGraphSCC passes and before any function simplification passes run
+  /// by CGPassManager.
+  void registerCGSCCOptimizerLateEPCallback(
+      const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
+    CGSCCOptimizerLateEPCallbacks.push_back(C);
+  }
+
+  /// \brief Register a callback for a default optimizer pipeline extension
+  /// point
+  ///
+  /// This extension point allows adding optimization passes before the
+  /// vectorizer and other highly target specific optimization passes are
+  /// executed.
+  void registerVectorizerStartEPCallback(
+      const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+    VectorizerStartEPCallbacks.push_back(C);
+  }
+
+  /// Register a callback for a default optimizer pipeline extension point.
+  ///
+  /// This extension point allows adding optimization once at the start of the
+  /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
+  /// link-time pipelines).
+  void registerPipelineStartEPCallback(
+      const std::function<void(ModulePassManager &)> &C) {
+    PipelineStartEPCallbacks.push_back(C);
+  }
+
+  /// \brief Register a callback for parsing an AliasAnalysis Name to populate
+  /// the given AAManager \p AA
+  void registerParseAACallback(
+      const std::function<bool(StringRef Name, AAManager &AA)> &C) {
+    AAParsingCallbacks.push_back(C);
+  }
+
+  /// {{@ Register callbacks for analysis registration with this PassBuilder
+  /// instance.
+  /// Callees register their analyses with the given AnalysisManager objects.
+  void registerAnalysisRegistrationCallback(
+      const std::function<void(CGSCCAnalysisManager &)> &C) {
+    CGSCCAnalysisRegistrationCallbacks.push_back(C);
+  }
+  void registerAnalysisRegistrationCallback(
+      const std::function<void(FunctionAnalysisManager &)> &C) {
+    FunctionAnalysisRegistrationCallbacks.push_back(C);
+  }
+  void registerAnalysisRegistrationCallback(
+      const std::function<void(LoopAnalysisManager &)> &C) {
+    LoopAnalysisRegistrationCallbacks.push_back(C);
+  }
+  void registerAnalysisRegistrationCallback(
+      const std::function<void(ModuleAnalysisManager &)> &C) {
+    ModuleAnalysisRegistrationCallbacks.push_back(C);
+  }
+  /// @}}
+
+  /// {{@ Register pipeline parsing callbacks with this pass builder instance.
+  /// Using these callbacks, callers can parse both a single pass name, as well
+  /// as entire sub-pipelines, and populate the PassManager instance
+  /// accordingly.
+  void registerPipelineParsingCallback(
+      const std::function<bool(StringRef Name, CGSCCPassManager &,
+                               ArrayRef<PipelineElement>)> &C) {
+    CGSCCPipelineParsingCallbacks.push_back(C);
+  }
+  void registerPipelineParsingCallback(
+      const std::function<bool(StringRef Name, FunctionPassManager &,
+                               ArrayRef<PipelineElement>)> &C) {
+    FunctionPipelineParsingCallbacks.push_back(C);
+  }
+  void registerPipelineParsingCallback(
+      const std::function<bool(StringRef Name, LoopPassManager &,
+                               ArrayRef<PipelineElement>)> &C) {
+    LoopPipelineParsingCallbacks.push_back(C);
+  }
+  void registerPipelineParsingCallback(
+      const std::function<bool(StringRef Name, ModulePassManager &,
+                               ArrayRef<PipelineElement>)> &C) {
+    ModulePipelineParsingCallbacks.push_back(C);
+  }
+  /// @}}
+
+  /// \brief Register a callback for a top-level pipeline entry.
+  ///
+  /// If the PassManager type is not given at the top level of the pipeline
+  /// text, this Callback should be used to determine the appropriate stack of
+  /// PassManagers and populate the passed ModulePassManager.
+  void registerParseTopLevelPipelineCallback(
+      const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+                               bool VerifyEachPass, bool DebugLogging)> &C) {
+    TopLevelPipelineParsingCallbacks.push_back(C);
+  }
+
+private:
+  static Optional<std::vector<PipelineElement>>
+  parsePipelineText(StringRef Text);
+
+  bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
+                       bool VerifyEachPass, bool DebugLogging);
+  bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
+                      bool VerifyEachPass, bool DebugLogging);
+  bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
+                     bool VerifyEachPass, bool DebugLogging);
+  bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
+                     bool VerifyEachPass, bool DebugLogging);
+  bool parseAAPassName(AAManager &AA, StringRef Name);
+
+  bool parseLoopPassPipeline(LoopPassManager &LPM,
+                             ArrayRef<PipelineElement> Pipeline,
+                             bool VerifyEachPass, bool DebugLogging);
+  bool parseFunctionPassPipeline(FunctionPassManager &FPM,
+                                 ArrayRef<PipelineElement> Pipeline,
+                                 bool VerifyEachPass, bool DebugLogging);
+  bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
+                              ArrayRef<PipelineElement> Pipeline,
+                              bool VerifyEachPass, bool DebugLogging);
+  bool parseModulePassPipeline(ModulePassManager &MPM,
+                               ArrayRef<PipelineElement> Pipeline,
+                               bool VerifyEachPass, bool DebugLogging);
+
+  void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
+                         OptimizationLevel Level, bool RunProfileGen,
+                         std::string ProfileGenFile,
+                         std::string ProfileUseFile);
+
+  void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
+
+  // Extension Point callbacks
+  SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+      PeepholeEPCallbacks;
+  SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
+      LateLoopOptimizationsEPCallbacks;
+  SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
+      LoopOptimizerEndEPCallbacks;
+  SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+      ScalarOptimizerLateEPCallbacks;
+  SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
+      CGSCCOptimizerLateEPCallbacks;
+  SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+      VectorizerStartEPCallbacks;
+  // Module callbacks
+  SmallVector<std::function<void(ModulePassManager &)>, 2>
+      PipelineStartEPCallbacks;
+  SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
+      ModuleAnalysisRegistrationCallbacks;
+  SmallVector<std::function<bool(StringRef, ModulePassManager &,
+                                 ArrayRef<PipelineElement>)>,
+              2>
+      ModulePipelineParsingCallbacks;
+  SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+                                 bool VerifyEachPass, bool DebugLogging)>,
+              2>
+      TopLevelPipelineParsingCallbacks;
+  // CGSCC callbacks
+  SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
+      CGSCCAnalysisRegistrationCallbacks;
+  SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
+                                 ArrayRef<PipelineElement>)>,
+              2>
+      CGSCCPipelineParsingCallbacks;
+  // Function callbacks
+  SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
+      FunctionAnalysisRegistrationCallbacks;
+  SmallVector<std::function<bool(StringRef, FunctionPassManager &,
+                                 ArrayRef<PipelineElement>)>,
+              2>
+      FunctionPipelineParsingCallbacks;
+  // Loop callbacks
+  SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
+      LoopAnalysisRegistrationCallbacks;
+  SmallVector<std::function<bool(StringRef, LoopPassManager &,
+                                 ArrayRef<PipelineElement>)>,
+              2>
+      LoopPipelineParsingCallbacks;
+  // AA callbacks
+  SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
+      AAParsingCallbacks;
+};
+
+/// This utility template takes care of adding require<> and invalidate<>
+/// passes for an analysis to a given \c PassManager. It is intended to be used
+/// during parsing of a pass pipeline when parsing a single PipelineName.
+/// When registering a new function analysis FancyAnalysis with the pass
+/// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
+/// like this:
+///
+/// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
+///                                   ArrayRef<PipelineElement> P) {
+///   if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
+///                                                 FPM))
+///     return true;
+///   return false;
+/// }
+template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
+          typename... ExtraArgTs>
+bool parseAnalysisUtilityPasses(
+    StringRef AnalysisName, StringRef PipelineName,
+    PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
+  if (!PipelineName.endswith(">"))
+    return false;
+  // See if this is an invalidate<> pass name
+  if (PipelineName.startswith("invalidate<")) {
+    PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
+    if (PipelineName != AnalysisName)
+      return false;
+    PM.addPass(InvalidateAnalysisPass<AnalysisT>());
+    return true;
+  }
+
+  // See if this is a require<> pass name
+  if (PipelineName.startswith("require<")) {
+    PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
+    if (PipelineName != AnalysisName)
+      return false;
+    PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
+                                   ExtraArgTs...>());
+    return true;
+  }
+
+  return false;
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMapping.h b/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMapping.h
new file mode 100644
index 0000000..5a4098c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMapping.h
@@ -0,0 +1,831 @@
+//===- CoverageMapping.h - Code coverage mapping support --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Code coverage mapping data is generated by clang and read by
+// llvm-cov to show code coverage statistics for a file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPING_H
+#define LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class IndexedInstrProfReader;
+
+namespace coverage {
+
+class CoverageMappingReader;
+struct CoverageMappingRecord;
+
+enum class coveragemap_error {
+  success = 0,
+  eof,
+  no_data_found,
+  unsupported_version,
+  truncated,
+  malformed
+};
+
+const std::error_category &coveragemap_category();
+
+inline std::error_code make_error_code(coveragemap_error E) {
+  return std::error_code(static_cast<int>(E), coveragemap_category());
+}
+
+class CoverageMapError : public ErrorInfo<CoverageMapError> {
+public:
+  CoverageMapError(coveragemap_error Err) : Err(Err) {
+    assert(Err != coveragemap_error::success && "Not an error");
+  }
+
+  std::string message() const override;
+
+  void log(raw_ostream &OS) const override { OS << message(); }
+
+  std::error_code convertToErrorCode() const override {
+    return make_error_code(Err);
+  }
+
+  coveragemap_error get() const { return Err; }
+
+  static char ID;
+
+private:
+  coveragemap_error Err;
+};
+
+/// A Counter is an abstract value that describes how to compute the
+/// execution count for a region of code using the collected profile count data.
+struct Counter {
+  enum CounterKind { Zero, CounterValueReference, Expression };
+  static const unsigned EncodingTagBits = 2;
+  static const unsigned EncodingTagMask = 0x3;
+  static const unsigned EncodingCounterTagAndExpansionRegionTagBits =
+      EncodingTagBits + 1;
+
+private:
+  CounterKind Kind = Zero;
+  unsigned ID = 0;
+
+  Counter(CounterKind Kind, unsigned ID) : Kind(Kind), ID(ID) {}
+
+public:
+  Counter() = default;
+
+  CounterKind getKind() const { return Kind; }
+
+  bool isZero() const { return Kind == Zero; }
+
+  bool isExpression() const { return Kind == Expression; }
+
+  unsigned getCounterID() const { return ID; }
+
+  unsigned getExpressionID() const { return ID; }
+
+  friend bool operator==(const Counter &LHS, const Counter &RHS) {
+    return LHS.Kind == RHS.Kind && LHS.ID == RHS.ID;
+  }
+
+  friend bool operator!=(const Counter &LHS, const Counter &RHS) {
+    return !(LHS == RHS);
+  }
+
+  friend bool operator<(const Counter &LHS, const Counter &RHS) {
+    return std::tie(LHS.Kind, LHS.ID) < std::tie(RHS.Kind, RHS.ID);
+  }
+
+  /// Return the counter that represents the number zero.
+  static Counter getZero() { return Counter(); }
+
+  /// Return the counter that corresponds to a specific profile counter.
+  static Counter getCounter(unsigned CounterId) {
+    return Counter(CounterValueReference, CounterId);
+  }
+
+  /// Return the counter that corresponds to a specific addition counter
+  /// expression.
+  static Counter getExpression(unsigned ExpressionId) {
+    return Counter(Expression, ExpressionId);
+  }
+};
+
+/// A Counter expression is a value that represents an arithmetic operation
+/// with two counters.
+struct CounterExpression {
+  enum ExprKind { Subtract, Add };
+  ExprKind Kind;
+  Counter LHS, RHS;
+
+  CounterExpression(ExprKind Kind, Counter LHS, Counter RHS)
+      : Kind(Kind), LHS(LHS), RHS(RHS) {}
+};
+
+/// A Counter expression builder is used to construct the counter expressions.
+/// It avoids unnecessary duplication and simplifies algebraic expressions.
+class CounterExpressionBuilder {
+  /// A list of all the counter expressions
+  std::vector<CounterExpression> Expressions;
+
+  /// A lookup table for the index of a given expression.
+  DenseMap<CounterExpression, unsigned> ExpressionIndices;
+
+  /// Return the counter which corresponds to the given expression.
+  ///
+  /// If the given expression is already stored in the builder, a counter
+  /// that references that expression is returned. Otherwise, the given
+  /// expression is added to the builder's collection of expressions.
+  Counter get(const CounterExpression &E);
+
+  /// Represents a term in a counter expression tree.
+  struct Term {
+    unsigned CounterID;
+    int Factor;
+
+    Term(unsigned CounterID, int Factor)
+        : CounterID(CounterID), Factor(Factor) {}
+  };
+
+  /// Gather the terms of the expression tree for processing.
+  ///
+  /// This collects each addition and subtraction referenced by the counter into
+  /// a sequence that can be sorted and combined to build a simplified counter
+  /// expression.
+  void extractTerms(Counter C, int Sign, SmallVectorImpl<Term> &Terms);
+
+  /// Simplifies the given expression tree
+  /// by getting rid of algebraically redundant operations.
+  Counter simplify(Counter ExpressionTree);
+
+public:
+  ArrayRef<CounterExpression> getExpressions() const { return Expressions; }
+
+  /// Return a counter that represents the expression that adds LHS and RHS.
+  Counter add(Counter LHS, Counter RHS);
+
+  /// Return a counter that represents the expression that subtracts RHS from
+  /// LHS.
+  Counter subtract(Counter LHS, Counter RHS);
+};
+
+using LineColPair = std::pair<unsigned, unsigned>;
+
+/// A Counter mapping region associates a source range with a specific counter.
+struct CounterMappingRegion {
+  enum RegionKind {
+    /// A CodeRegion associates some code with a counter
+    CodeRegion,
+
+    /// An ExpansionRegion represents a file expansion region that associates 
+    /// a source range with the expansion of a virtual source file, such as
+    /// for a macro instantiation or #include file.
+    ExpansionRegion,
+
+    /// A SkippedRegion represents a source range with code that was skipped
+    /// by a preprocessor or similar means.
+    SkippedRegion,
+
+    /// A GapRegion is like a CodeRegion, but its count is only set as the
+    /// line execution count when its the only region in the line.
+    GapRegion
+  };
+
+  Counter Count;
+  unsigned FileID, ExpandedFileID;
+  unsigned LineStart, ColumnStart, LineEnd, ColumnEnd;
+  RegionKind Kind;
+
+  CounterMappingRegion(Counter Count, unsigned FileID, unsigned ExpandedFileID,
+                       unsigned LineStart, unsigned ColumnStart,
+                       unsigned LineEnd, unsigned ColumnEnd, RegionKind Kind)
+      : Count(Count), FileID(FileID), ExpandedFileID(ExpandedFileID),
+        LineStart(LineStart), ColumnStart(ColumnStart), LineEnd(LineEnd),
+        ColumnEnd(ColumnEnd), Kind(Kind) {}
+
+  static CounterMappingRegion
+  makeRegion(Counter Count, unsigned FileID, unsigned LineStart,
+             unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
+    return CounterMappingRegion(Count, FileID, 0, LineStart, ColumnStart,
+                                LineEnd, ColumnEnd, CodeRegion);
+  }
+
+  static CounterMappingRegion
+  makeExpansion(unsigned FileID, unsigned ExpandedFileID, unsigned LineStart,
+                unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
+    return CounterMappingRegion(Counter(), FileID, ExpandedFileID, LineStart,
+                                ColumnStart, LineEnd, ColumnEnd,
+                                ExpansionRegion);
+  }
+
+  static CounterMappingRegion
+  makeSkipped(unsigned FileID, unsigned LineStart, unsigned ColumnStart,
+              unsigned LineEnd, unsigned ColumnEnd) {
+    return CounterMappingRegion(Counter(), FileID, 0, LineStart, ColumnStart,
+                                LineEnd, ColumnEnd, SkippedRegion);
+  }
+
+  static CounterMappingRegion
+  makeGapRegion(Counter Count, unsigned FileID, unsigned LineStart,
+                unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
+    return CounterMappingRegion(Count, FileID, 0, LineStart, ColumnStart,
+                                LineEnd, (1U << 31) | ColumnEnd, GapRegion);
+  }
+
+  inline LineColPair startLoc() const {
+    return LineColPair(LineStart, ColumnStart);
+  }
+
+  inline LineColPair endLoc() const { return LineColPair(LineEnd, ColumnEnd); }
+};
+
+/// Associates a source range with an execution count.
+struct CountedRegion : public CounterMappingRegion {
+  uint64_t ExecutionCount;
+
+  CountedRegion(const CounterMappingRegion &R, uint64_t ExecutionCount)
+      : CounterMappingRegion(R), ExecutionCount(ExecutionCount) {}
+};
+
+/// A Counter mapping context is used to connect the counters, expressions
+/// and the obtained counter values.
+class CounterMappingContext {
+  ArrayRef<CounterExpression> Expressions;
+  ArrayRef<uint64_t> CounterValues;
+
+public:
+  CounterMappingContext(ArrayRef<CounterExpression> Expressions,
+                        ArrayRef<uint64_t> CounterValues = None)
+      : Expressions(Expressions), CounterValues(CounterValues) {}
+
+  void setCounts(ArrayRef<uint64_t> Counts) { CounterValues = Counts; }
+
+  void dump(const Counter &C, raw_ostream &OS) const;
+  void dump(const Counter &C) const { dump(C, dbgs()); }
+
+  /// Return the number of times that a region of code associated with this
+  /// counter was executed.
+  Expected<int64_t> evaluate(const Counter &C) const;
+};
+
+/// Code coverage information for a single function.
+struct FunctionRecord {
+  /// Raw function name.
+  std::string Name;
+  /// Associated files.
+  std::vector<std::string> Filenames;
+  /// Regions in the function along with their counts.
+  std::vector<CountedRegion> CountedRegions;
+  /// The number of times this function was executed.
+  uint64_t ExecutionCount;
+
+  FunctionRecord(StringRef Name, ArrayRef<StringRef> Filenames)
+      : Name(Name), Filenames(Filenames.begin(), Filenames.end()) {}
+
+  FunctionRecord(FunctionRecord &&FR) = default;
+  FunctionRecord &operator=(FunctionRecord &&) = default;
+
+  void pushRegion(CounterMappingRegion Region, uint64_t Count) {
+    if (CountedRegions.empty())
+      ExecutionCount = Count;
+    CountedRegions.emplace_back(Region, Count);
+  }
+};
+
+/// Iterator over Functions, optionally filtered to a single file.
+class FunctionRecordIterator
+    : public iterator_facade_base<FunctionRecordIterator,
+                                  std::forward_iterator_tag, FunctionRecord> {
+  ArrayRef<FunctionRecord> Records;
+  ArrayRef<FunctionRecord>::iterator Current;
+  StringRef Filename;
+
+  /// Skip records whose primary file is not \c Filename.
+  void skipOtherFiles();
+
+public:
+  FunctionRecordIterator(ArrayRef<FunctionRecord> Records_,
+                         StringRef Filename = "")
+      : Records(Records_), Current(Records.begin()), Filename(Filename) {
+    skipOtherFiles();
+  }
+
+  FunctionRecordIterator() : Current(Records.begin()) {}
+
+  bool operator==(const FunctionRecordIterator &RHS) const {
+    return Current == RHS.Current && Filename == RHS.Filename;
+  }
+
+  const FunctionRecord &operator*() const { return *Current; }
+
+  FunctionRecordIterator &operator++() {
+    assert(Current != Records.end() && "incremented past end");
+    ++Current;
+    skipOtherFiles();
+    return *this;
+  }
+};
+
+/// Coverage information for a macro expansion or #included file.
+///
+/// When covered code has pieces that can be expanded for more detail, such as a
+/// preprocessor macro use and its definition, these are represented as
+/// expansions whose coverage can be looked up independently.
+struct ExpansionRecord {
+  /// The abstract file this expansion covers.
+  unsigned FileID;
+  /// The region that expands to this record.
+  const CountedRegion &Region;
+  /// Coverage for the expansion.
+  const FunctionRecord &Function;
+
+  ExpansionRecord(const CountedRegion &Region,
+                  const FunctionRecord &Function)
+      : FileID(Region.ExpandedFileID), Region(Region), Function(Function) {}
+};
+
+/// The execution count information starting at a point in a file.
+///
+/// A sequence of CoverageSegments gives execution counts for a file in format
+/// that's simple to iterate through for processing.
+struct CoverageSegment {
+  /// The line where this segment begins.
+  unsigned Line;
+  /// The column where this segment begins.
+  unsigned Col;
+  /// The execution count, or zero if no count was recorded.
+  uint64_t Count;
+  /// When false, the segment was uninstrumented or skipped.
+  bool HasCount;
+  /// Whether this enters a new region or returns to a previous count.
+  bool IsRegionEntry;
+  /// Whether this enters a gap region.
+  bool IsGapRegion;
+
+  CoverageSegment(unsigned Line, unsigned Col, bool IsRegionEntry)
+      : Line(Line), Col(Col), Count(0), HasCount(false),
+        IsRegionEntry(IsRegionEntry), IsGapRegion(false) {}
+
+  CoverageSegment(unsigned Line, unsigned Col, uint64_t Count,
+                  bool IsRegionEntry, bool IsGapRegion = false)
+      : Line(Line), Col(Col), Count(Count), HasCount(true),
+        IsRegionEntry(IsRegionEntry), IsGapRegion(IsGapRegion) {}
+
+  friend bool operator==(const CoverageSegment &L, const CoverageSegment &R) {
+    return std::tie(L.Line, L.Col, L.Count, L.HasCount, L.IsRegionEntry,
+                    L.IsGapRegion) == std::tie(R.Line, R.Col, R.Count,
+                                               R.HasCount, R.IsRegionEntry,
+                                               R.IsGapRegion);
+  }
+};
+
+/// An instantiation group contains a \c FunctionRecord list, such that each
+/// record corresponds to a distinct instantiation of the same function.
+///
+/// Note that it's possible for a function to have more than one instantiation
+/// (consider C++ template specializations or static inline functions).
+class InstantiationGroup {
+  friend class CoverageMapping;
+
+  unsigned Line;
+  unsigned Col;
+  std::vector<const FunctionRecord *> Instantiations;
+
+  InstantiationGroup(unsigned Line, unsigned Col,
+                     std::vector<const FunctionRecord *> Instantiations)
+      : Line(Line), Col(Col), Instantiations(std::move(Instantiations)) {}
+
+public:
+  InstantiationGroup(const InstantiationGroup &) = delete;
+  InstantiationGroup(InstantiationGroup &&) = default;
+
+  /// Get the number of instantiations in this group.
+  size_t size() const { return Instantiations.size(); }
+
+  /// Get the line where the common function was defined.
+  unsigned getLine() const { return Line; }
+
+  /// Get the column where the common function was defined.
+  unsigned getColumn() const { return Col; }
+
+  /// Check if the instantiations in this group have a common mangled name.
+  bool hasName() const {
+    for (unsigned I = 1, E = Instantiations.size(); I < E; ++I)
+      if (Instantiations[I]->Name != Instantiations[0]->Name)
+        return false;
+    return true;
+  }
+
+  /// Get the common mangled name for instantiations in this group.
+  StringRef getName() const {
+    assert(hasName() && "Instantiations don't have a shared name");
+    return Instantiations[0]->Name;
+  }
+
+  /// Get the total execution count of all instantiations in this group.
+  uint64_t getTotalExecutionCount() const {
+    uint64_t Count = 0;
+    for (const FunctionRecord *F : Instantiations)
+      Count += F->ExecutionCount;
+    return Count;
+  }
+
+  /// Get the instantiations in this group.
+  ArrayRef<const FunctionRecord *> getInstantiations() const {
+    return Instantiations;
+  }
+};
+
+/// Coverage information to be processed or displayed.
+///
+/// This represents the coverage of an entire file, expansion, or function. It
+/// provides a sequence of CoverageSegments to iterate through, as well as the
+/// list of expansions that can be further processed.
+class CoverageData {
+  friend class CoverageMapping;
+
+  std::string Filename;
+  std::vector<CoverageSegment> Segments;
+  std::vector<ExpansionRecord> Expansions;
+
+public:
+  CoverageData() = default;
+
+  CoverageData(StringRef Filename) : Filename(Filename) {}
+
+  /// Get the name of the file this data covers.
+  StringRef getFilename() const { return Filename; }
+
+  /// Get an iterator over the coverage segments for this object. The segments
+  /// are guaranteed to be uniqued and sorted by location.
+  std::vector<CoverageSegment>::const_iterator begin() const {
+    return Segments.begin();
+  }
+
+  std::vector<CoverageSegment>::const_iterator end() const {
+    return Segments.end();
+  }
+
+  bool empty() const { return Segments.empty(); }
+
+  /// Expansions that can be further processed.
+  ArrayRef<ExpansionRecord> getExpansions() const { return Expansions; }
+};
+
+/// The mapping of profile information to coverage data.
+///
+/// This is the main interface to get coverage information, using a profile to
+/// fill out execution counts.
+class CoverageMapping {
+  StringSet<> FunctionNames;
+  std::vector<FunctionRecord> Functions;
+  std::vector<std::pair<std::string, uint64_t>> FuncHashMismatches;
+  std::vector<std::pair<std::string, uint64_t>> FuncCounterMismatches;
+
+  CoverageMapping() = default;
+
+  /// Add a function record corresponding to \p Record.
+  Error loadFunctionRecord(const CoverageMappingRecord &Record,
+                           IndexedInstrProfReader &ProfileReader);
+
+public:
+  CoverageMapping(const CoverageMapping &) = delete;
+  CoverageMapping &operator=(const CoverageMapping &) = delete;
+
+  /// Load the coverage mapping using the given readers.
+  static Expected<std::unique_ptr<CoverageMapping>>
+  load(ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders,
+       IndexedInstrProfReader &ProfileReader);
+
+  /// Load the coverage mapping from the given object files and profile. If
+  /// \p Arches is non-empty, it must specify an architecture for each object.
+  static Expected<std::unique_ptr<CoverageMapping>>
+  load(ArrayRef<StringRef> ObjectFilenames, StringRef ProfileFilename,
+       ArrayRef<StringRef> Arches = None);
+
+  /// The number of functions that couldn't have their profiles mapped.
+  ///
+  /// This is a count of functions whose profile is out of date or otherwise
+  /// can't be associated with any coverage information.
+  unsigned getMismatchedCount() const {
+    return FuncHashMismatches.size() + FuncCounterMismatches.size();
+  }
+
+  /// A hash mismatch occurs when a profile record for a symbol does not have
+  /// the same hash as a coverage mapping record for the same symbol. This
+  /// returns a list of hash mismatches, where each mismatch is a pair of the
+  /// symbol name and its coverage mapping hash.
+  ArrayRef<std::pair<std::string, uint64_t>> getHashMismatches() const {
+    return FuncHashMismatches;
+  }
+
+  /// A counter mismatch occurs when there is an error when evaluating the
+  /// counter expressions in a coverage mapping record. This returns a list of
+  /// counter mismatches, where each mismatch is a pair of the symbol name and
+  /// the number of valid evaluated counter expressions.
+  ArrayRef<std::pair<std::string, uint64_t>> getCounterMismatches() const {
+    return FuncCounterMismatches;
+  }
+
+  /// Returns a lexicographically sorted, unique list of files that are
+  /// covered.
+  std::vector<StringRef> getUniqueSourceFiles() const;
+
+  /// Get the coverage for a particular file.
+  ///
+  /// The given filename must be the name as recorded in the coverage
+  /// information. That is, only names returned from getUniqueSourceFiles will
+  /// yield a result.
+  CoverageData getCoverageForFile(StringRef Filename) const;
+
+  /// Get the coverage for a particular function.
+  CoverageData getCoverageForFunction(const FunctionRecord &Function) const;
+
+  /// Get the coverage for an expansion within a coverage set.
+  CoverageData getCoverageForExpansion(const ExpansionRecord &Expansion) const;
+
+  /// Gets all of the functions covered by this profile.
+  iterator_range<FunctionRecordIterator> getCoveredFunctions() const {
+    return make_range(FunctionRecordIterator(Functions),
+                      FunctionRecordIterator());
+  }
+
+  /// Gets all of the functions in a particular file.
+  iterator_range<FunctionRecordIterator>
+  getCoveredFunctions(StringRef Filename) const {
+    return make_range(FunctionRecordIterator(Functions, Filename),
+                      FunctionRecordIterator());
+  }
+
+  /// Get the list of function instantiation groups in a particular file.
+  ///
+  /// Every instantiation group in a program is attributed to exactly one file:
+  /// the file in which the definition for the common function begins.
+  std::vector<InstantiationGroup>
+  getInstantiationGroups(StringRef Filename) const;
+};
+
+/// Coverage statistics for a single line.
+class LineCoverageStats {
+  uint64_t ExecutionCount;
+  bool HasMultipleRegions;
+  bool Mapped;
+  unsigned Line;
+  ArrayRef<const CoverageSegment *> LineSegments;
+  const CoverageSegment *WrappedSegment;
+
+  friend class LineCoverageIterator;
+  LineCoverageStats() = default;
+
+public:
+  LineCoverageStats(ArrayRef<const CoverageSegment *> LineSegments,
+                    const CoverageSegment *WrappedSegment, unsigned Line);
+
+  uint64_t getExecutionCount() const { return ExecutionCount; }
+
+  bool hasMultipleRegions() const { return HasMultipleRegions; }
+
+  bool isMapped() const { return Mapped; }
+
+  unsigned getLine() const { return Line; }
+
+  ArrayRef<const CoverageSegment *> getLineSegments() const {
+    return LineSegments;
+  }
+
+  const CoverageSegment *getWrappedSegment() const { return WrappedSegment; }
+};
+
+/// An iterator over the \c LineCoverageStats objects for lines described by
+/// a \c CoverageData instance.
+class LineCoverageIterator
+    : public iterator_facade_base<
+          LineCoverageIterator, std::forward_iterator_tag, LineCoverageStats> {
+public:
+  LineCoverageIterator(const CoverageData &CD)
+      : LineCoverageIterator(CD, CD.begin()->Line) {}
+
+  LineCoverageIterator(const CoverageData &CD, unsigned Line)
+      : CD(CD), WrappedSegment(nullptr), Next(CD.begin()), Ended(false),
+        Line(Line), Segments(), Stats() {
+    this->operator++();
+  }
+
+  LineCoverageIterator &operator=(const LineCoverageIterator &R) = default;
+
+  bool operator==(const LineCoverageIterator &R) const {
+    return &CD == &R.CD && Next == R.Next && Ended == R.Ended;
+  }
+
+  const LineCoverageStats &operator*() const { return Stats; }
+
+  LineCoverageStats &operator*() { return Stats; }
+
+  LineCoverageIterator &operator++();
+
+  LineCoverageIterator getEnd() const {
+    auto EndIt = *this;
+    EndIt.Next = CD.end();
+    EndIt.Ended = true;
+    return EndIt;
+  }
+
+private:
+  const CoverageData &CD;
+  const CoverageSegment *WrappedSegment;
+  std::vector<CoverageSegment>::const_iterator Next;
+  bool Ended;
+  unsigned Line;
+  SmallVector<const CoverageSegment *, 4> Segments;
+  LineCoverageStats Stats;
+};
+
+/// Get a \c LineCoverageIterator range for the lines described by \p CD.
+static inline iterator_range<LineCoverageIterator>
+getLineCoverageStats(const coverage::CoverageData &CD) {
+  auto Begin = LineCoverageIterator(CD);
+  auto End = Begin.getEnd();
+  return make_range(Begin, End);
+}
+
+// Profile coverage map has the following layout:
+// [CoverageMapFileHeader]
+// [ArrayStart]
+//  [CovMapFunctionRecord]
+//  [CovMapFunctionRecord]
+//  ...
+// [ArrayEnd]
+// [Encoded Region Mapping Data]
+LLVM_PACKED_START
+template <class IntPtrT> struct CovMapFunctionRecordV1 {
+#define COVMAP_V1
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
+#include "llvm/ProfileData/InstrProfData.inc"
+#undef COVMAP_V1
+
+  // Return the structural hash associated with the function.
+  template <support::endianness Endian> uint64_t getFuncHash() const {
+    return support::endian::byte_swap<uint64_t, Endian>(FuncHash);
+  }
+
+  // Return the coverage map data size for the funciton.
+  template <support::endianness Endian> uint32_t getDataSize() const {
+    return support::endian::byte_swap<uint32_t, Endian>(DataSize);
+  }
+
+  // Return function lookup key. The value is consider opaque.
+  template <support::endianness Endian> IntPtrT getFuncNameRef() const {
+    return support::endian::byte_swap<IntPtrT, Endian>(NamePtr);
+  }
+
+  // Return the PGO name of the function */
+  template <support::endianness Endian>
+  Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const {
+    IntPtrT NameRef = getFuncNameRef<Endian>();
+    uint32_t NameS = support::endian::byte_swap<uint32_t, Endian>(NameSize);
+    FuncName = ProfileNames.getFuncName(NameRef, NameS);
+    if (NameS && FuncName.empty())
+      return make_error<CoverageMapError>(coveragemap_error::malformed);
+    return Error::success();
+  }
+};
+
+struct CovMapFunctionRecord {
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
+#include "llvm/ProfileData/InstrProfData.inc"
+
+  // Return the structural hash associated with the function.
+  template <support::endianness Endian> uint64_t getFuncHash() const {
+    return support::endian::byte_swap<uint64_t, Endian>(FuncHash);
+  }
+
+  // Return the coverage map data size for the funciton.
+  template <support::endianness Endian> uint32_t getDataSize() const {
+    return support::endian::byte_swap<uint32_t, Endian>(DataSize);
+  }
+
+  // Return function lookup key. The value is consider opaque.
+  template <support::endianness Endian> uint64_t getFuncNameRef() const {
+    return support::endian::byte_swap<uint64_t, Endian>(NameRef);
+  }
+
+  // Return the PGO name of the function */
+  template <support::endianness Endian>
+  Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const {
+    uint64_t NameRef = getFuncNameRef<Endian>();
+    FuncName = ProfileNames.getFuncName(NameRef);
+    return Error::success();
+  }
+};
+
+// Per module coverage mapping data header, i.e. CoverageMapFileHeader
+// documented above.
+struct CovMapHeader {
+#define COVMAP_HEADER(Type, LLVMType, Name, Init) Type Name;
+#include "llvm/ProfileData/InstrProfData.inc"
+  template <support::endianness Endian> uint32_t getNRecords() const {
+    return support::endian::byte_swap<uint32_t, Endian>(NRecords);
+  }
+
+  template <support::endianness Endian> uint32_t getFilenamesSize() const {
+    return support::endian::byte_swap<uint32_t, Endian>(FilenamesSize);
+  }
+
+  template <support::endianness Endian> uint32_t getCoverageSize() const {
+    return support::endian::byte_swap<uint32_t, Endian>(CoverageSize);
+  }
+
+  template <support::endianness Endian> uint32_t getVersion() const {
+    return support::endian::byte_swap<uint32_t, Endian>(Version);
+  }
+};
+
+LLVM_PACKED_END
+
+enum CovMapVersion {
+  Version1 = 0,
+  // Function's name reference from CovMapFuncRecord is changed from raw
+  // name string pointer to MD5 to support name section compression. Name
+  // section is also compressed.
+  Version2 = 1,
+  // A new interpretation of the columnEnd field is added in order to mark
+  // regions as gap areas.
+  Version3 = 2,
+  // The current version is Version3
+  CurrentVersion = INSTR_PROF_COVMAP_VERSION
+};
+
+template <int CovMapVersion, class IntPtrT> struct CovMapTraits {
+  using CovMapFuncRecordType = CovMapFunctionRecord;
+  using NameRefType = uint64_t;
+};
+
+template <class IntPtrT> struct CovMapTraits<CovMapVersion::Version1, IntPtrT> {
+  using CovMapFuncRecordType = CovMapFunctionRecordV1<IntPtrT>;
+  using NameRefType = IntPtrT;
+};
+
+} // end namespace coverage
+
+/// Provide DenseMapInfo for CounterExpression
+template<> struct DenseMapInfo<coverage::CounterExpression> {
+  static inline coverage::CounterExpression getEmptyKey() {
+    using namespace coverage;
+
+    return CounterExpression(CounterExpression::ExprKind::Subtract,
+                             Counter::getCounter(~0U),
+                             Counter::getCounter(~0U));
+  }
+
+  static inline coverage::CounterExpression getTombstoneKey() {
+    using namespace coverage;
+
+    return CounterExpression(CounterExpression::ExprKind::Add,
+                             Counter::getCounter(~0U),
+                             Counter::getCounter(~0U));
+  }
+
+  static unsigned getHashValue(const coverage::CounterExpression &V) {
+    return static_cast<unsigned>(
+        hash_combine(V.Kind, V.LHS.getKind(), V.LHS.getCounterID(),
+                     V.RHS.getKind(), V.RHS.getCounterID()));
+  }
+
+  static bool isEqual(const coverage::CounterExpression &LHS,
+                      const coverage::CounterExpression &RHS) {
+    return LHS.Kind == RHS.Kind && LHS.LHS == RHS.LHS && LHS.RHS == RHS.RHS;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPING_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMappingReader.h b/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
new file mode 100644
index 0000000..633e515
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
@@ -0,0 +1,217 @@
+//===- CoverageMappingReader.h - Code coverage mapping reader ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for reading coverage mapping data for
+// instrumentation based coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGREADER_H
+#define LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ProfileData/Coverage/CoverageMapping.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace coverage {
+
+class CoverageMappingReader;
+
+/// \brief Coverage mapping information for a single function.
+struct CoverageMappingRecord {
+  StringRef FunctionName;
+  uint64_t FunctionHash;
+  ArrayRef<StringRef> Filenames;
+  ArrayRef<CounterExpression> Expressions;
+  ArrayRef<CounterMappingRegion> MappingRegions;
+};
+
+/// \brief A file format agnostic iterator over coverage mapping data.
+class CoverageMappingIterator
+    : public std::iterator<std::input_iterator_tag, CoverageMappingRecord> {
+  CoverageMappingReader *Reader;
+  CoverageMappingRecord Record;
+  coveragemap_error ReadErr;
+
+  void increment();
+
+public:
+  CoverageMappingIterator()
+      : Reader(nullptr), Record(), ReadErr(coveragemap_error::success) {}
+
+  CoverageMappingIterator(CoverageMappingReader *Reader)
+      : Reader(Reader), Record(), ReadErr(coveragemap_error::success) {
+    increment();
+  }
+
+  ~CoverageMappingIterator() {
+    if (ReadErr != coveragemap_error::success)
+      llvm_unreachable("Unexpected error in coverage mapping iterator");
+  }
+
+  CoverageMappingIterator &operator++() {
+    increment();
+    return *this;
+  }
+  bool operator==(const CoverageMappingIterator &RHS) {
+    return Reader == RHS.Reader;
+  }
+  bool operator!=(const CoverageMappingIterator &RHS) {
+    return Reader != RHS.Reader;
+  }
+  Expected<CoverageMappingRecord &> operator*() {
+    if (ReadErr != coveragemap_error::success) {
+      auto E = make_error<CoverageMapError>(ReadErr);
+      ReadErr = coveragemap_error::success;
+      return std::move(E);
+    }
+    return Record;
+  }
+  Expected<CoverageMappingRecord *> operator->() {
+    if (ReadErr != coveragemap_error::success) {
+      auto E = make_error<CoverageMapError>(ReadErr);
+      ReadErr = coveragemap_error::success;
+      return std::move(E);
+    }
+    return &Record;
+  }
+};
+
+class CoverageMappingReader {
+public:
+  virtual ~CoverageMappingReader() = default;
+
+  virtual Error readNextRecord(CoverageMappingRecord &Record) = 0;
+  CoverageMappingIterator begin() { return CoverageMappingIterator(this); }
+  CoverageMappingIterator end() { return CoverageMappingIterator(); }
+};
+
+/// \brief Base class for the raw coverage mapping and filenames data readers.
+class RawCoverageReader {
+protected:
+  StringRef Data;
+
+  RawCoverageReader(StringRef Data) : Data(Data) {}
+
+  Error readULEB128(uint64_t &Result);
+  Error readIntMax(uint64_t &Result, uint64_t MaxPlus1);
+  Error readSize(uint64_t &Result);
+  Error readString(StringRef &Result);
+};
+
+/// \brief Reader for the raw coverage filenames.
+class RawCoverageFilenamesReader : public RawCoverageReader {
+  std::vector<StringRef> &Filenames;
+
+public:
+  RawCoverageFilenamesReader(StringRef Data, std::vector<StringRef> &Filenames)
+      : RawCoverageReader(Data), Filenames(Filenames) {}
+  RawCoverageFilenamesReader(const RawCoverageFilenamesReader &) = delete;
+  RawCoverageFilenamesReader &
+  operator=(const RawCoverageFilenamesReader &) = delete;
+
+  Error read();
+};
+
+/// \brief Checks if the given coverage mapping data is exported for
+/// an unused function.
+class RawCoverageMappingDummyChecker : public RawCoverageReader {
+public:
+  RawCoverageMappingDummyChecker(StringRef MappingData)
+      : RawCoverageReader(MappingData) {}
+
+  Expected<bool> isDummy();
+};
+
+/// \brief Reader for the raw coverage mapping data.
+class RawCoverageMappingReader : public RawCoverageReader {
+  ArrayRef<StringRef> TranslationUnitFilenames;
+  std::vector<StringRef> &Filenames;
+  std::vector<CounterExpression> &Expressions;
+  std::vector<CounterMappingRegion> &MappingRegions;
+
+public:
+  RawCoverageMappingReader(StringRef MappingData,
+                           ArrayRef<StringRef> TranslationUnitFilenames,
+                           std::vector<StringRef> &Filenames,
+                           std::vector<CounterExpression> &Expressions,
+                           std::vector<CounterMappingRegion> &MappingRegions)
+      : RawCoverageReader(MappingData),
+        TranslationUnitFilenames(TranslationUnitFilenames),
+        Filenames(Filenames), Expressions(Expressions),
+        MappingRegions(MappingRegions) {}
+  RawCoverageMappingReader(const RawCoverageMappingReader &) = delete;
+  RawCoverageMappingReader &
+  operator=(const RawCoverageMappingReader &) = delete;
+
+  Error read();
+
+private:
+  Error decodeCounter(unsigned Value, Counter &C);
+  Error readCounter(Counter &C);
+  Error
+  readMappingRegionsSubArray(std::vector<CounterMappingRegion> &MappingRegions,
+                             unsigned InferredFileID, size_t NumFileIDs);
+};
+
+/// \brief Reader for the coverage mapping data that is emitted by the
+/// frontend and stored in an object file.
+class BinaryCoverageReader : public CoverageMappingReader {
+public:
+  struct ProfileMappingRecord {
+    CovMapVersion Version;
+    StringRef FunctionName;
+    uint64_t FunctionHash;
+    StringRef CoverageMapping;
+    size_t FilenamesBegin;
+    size_t FilenamesSize;
+
+    ProfileMappingRecord(CovMapVersion Version, StringRef FunctionName,
+                         uint64_t FunctionHash, StringRef CoverageMapping,
+                         size_t FilenamesBegin, size_t FilenamesSize)
+        : Version(Version), FunctionName(FunctionName),
+          FunctionHash(FunctionHash), CoverageMapping(CoverageMapping),
+          FilenamesBegin(FilenamesBegin), FilenamesSize(FilenamesSize) {}
+  };
+
+private:
+  std::vector<StringRef> Filenames;
+  std::vector<ProfileMappingRecord> MappingRecords;
+  InstrProfSymtab ProfileNames;
+  size_t CurrentRecord = 0;
+  std::vector<StringRef> FunctionsFilenames;
+  std::vector<CounterExpression> Expressions;
+  std::vector<CounterMappingRegion> MappingRegions;
+
+  BinaryCoverageReader() = default;
+
+public:
+  BinaryCoverageReader(const BinaryCoverageReader &) = delete;
+  BinaryCoverageReader &operator=(const BinaryCoverageReader &) = delete;
+
+  static Expected<std::unique_ptr<BinaryCoverageReader>>
+  create(std::unique_ptr<MemoryBuffer> &ObjectBuffer,
+         StringRef Arch);
+
+  Error readNextRecord(CoverageMappingRecord &Record) override;
+};
+
+} // end namespace coverage
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGREADER_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h b/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h
new file mode 100644
index 0000000..b6f864a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h
@@ -0,0 +1,62 @@
+//===- CoverageMappingWriter.h - Code coverage mapping writer ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing coverage mapping data for
+// instrumentation based coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGWRITER_H
+#define LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGWRITER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ProfileData/Coverage/CoverageMapping.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace coverage {
+
+/// \brief Writer of the filenames section for the instrumentation
+/// based code coverage.
+class CoverageFilenamesSectionWriter {
+  ArrayRef<StringRef> Filenames;
+
+public:
+  CoverageFilenamesSectionWriter(ArrayRef<StringRef> Filenames)
+      : Filenames(Filenames) {}
+
+  /// \brief Write encoded filenames to the given output stream.
+  void write(raw_ostream &OS);
+};
+
+/// \brief Writer for instrumentation based coverage mapping data.
+class CoverageMappingWriter {
+  ArrayRef<unsigned> VirtualFileMapping;
+  ArrayRef<CounterExpression> Expressions;
+  MutableArrayRef<CounterMappingRegion> MappingRegions;
+
+public:
+  CoverageMappingWriter(ArrayRef<unsigned> VirtualFileMapping,
+                        ArrayRef<CounterExpression> Expressions,
+                        MutableArrayRef<CounterMappingRegion> MappingRegions)
+      : VirtualFileMapping(VirtualFileMapping), Expressions(Expressions),
+        MappingRegions(MappingRegions) {}
+
+  /// \brief Write encoded coverage mapping data to the given output stream.
+  void write(raw_ostream &OS);
+};
+
+} // end namespace coverage
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGWRITER_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/GCOV.h b/linux-x64/clang/include/llvm/ProfileData/GCOV.h
new file mode 100644
index 0000000..497f80b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/GCOV.h
@@ -0,0 +1,460 @@
+//===- GCOV.h - LLVM coverage tool ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header provides the interface to read and write coverage files that
+// use 'gcov' format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_GCOV_H
+#define LLVM_PROFILEDATA_GCOV_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class GCOVFunction;
+class GCOVBlock;
+class FileInfo;
+
+namespace GCOV {
+
+enum GCOVVersion { V402, V404, V704 };
+
+/// \brief A struct for passing gcov options between functions.
+struct Options {
+  Options(bool A, bool B, bool C, bool F, bool P, bool U, bool L, bool N)
+      : AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F),
+        PreservePaths(P), UncondBranch(U), LongFileNames(L), NoOutput(N) {}
+
+  bool AllBlocks;
+  bool BranchInfo;
+  bool BranchCount;
+  bool FuncCoverage;
+  bool PreservePaths;
+  bool UncondBranch;
+  bool LongFileNames;
+  bool NoOutput;
+};
+
+} // end namespace GCOV
+
+/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
+/// read operations.
+class GCOVBuffer {
+public:
+  GCOVBuffer(MemoryBuffer *B) : Buffer(B) {}
+
+  /// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer.
+  bool readGCNOFormat() {
+    StringRef File = Buffer->getBuffer().slice(0, 4);
+    if (File != "oncg") {
+      errs() << "Unexpected file type: " << File << ".\n";
+      return false;
+    }
+    Cursor = 4;
+    return true;
+  }
+
+  /// readGCDAFormat - Check GCDA signature is valid at the beginning of buffer.
+  bool readGCDAFormat() {
+    StringRef File = Buffer->getBuffer().slice(0, 4);
+    if (File != "adcg") {
+      errs() << "Unexpected file type: " << File << ".\n";
+      return false;
+    }
+    Cursor = 4;
+    return true;
+  }
+
+  /// readGCOVVersion - Read GCOV version.
+  bool readGCOVVersion(GCOV::GCOVVersion &Version) {
+    StringRef VersionStr = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (VersionStr == "*204") {
+      Cursor += 4;
+      Version = GCOV::V402;
+      return true;
+    }
+    if (VersionStr == "*404") {
+      Cursor += 4;
+      Version = GCOV::V404;
+      return true;
+    }
+    if (VersionStr == "*704") {
+      Cursor += 4;
+      Version = GCOV::V704;
+      return true;
+    }
+    errs() << "Unexpected version: " << VersionStr << ".\n";
+    return false;
+  }
+
+  /// readFunctionTag - If cursor points to a function tag then increment the
+  /// cursor and return true otherwise return false.
+  bool readFunctionTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' ||
+        Tag[3] != '\1') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  /// readBlockTag - If cursor points to a block tag then increment the
+  /// cursor and return true otherwise return false.
+  bool readBlockTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x41' ||
+        Tag[3] != '\x01') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  /// readEdgeTag - If cursor points to an edge tag then increment the
+  /// cursor and return true otherwise return false.
+  bool readEdgeTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x43' ||
+        Tag[3] != '\x01') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  /// readLineTag - If cursor points to a line tag then increment the
+  /// cursor and return true otherwise return false.
+  bool readLineTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x45' ||
+        Tag[3] != '\x01') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  /// readArcTag - If cursor points to an gcda arc tag then increment the
+  /// cursor and return true otherwise return false.
+  bool readArcTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\xa1' ||
+        Tag[3] != '\1') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  /// readObjectTag - If cursor points to an object summary tag then increment
+  /// the cursor and return true otherwise return false.
+  bool readObjectTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' ||
+        Tag[3] != '\xa1') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  /// readProgramTag - If cursor points to a program summary tag then increment
+  /// the cursor and return true otherwise return false.
+  bool readProgramTag() {
+    StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' ||
+        Tag[3] != '\xa3') {
+      return false;
+    }
+    Cursor += 4;
+    return true;
+  }
+
+  bool readInt(uint32_t &Val) {
+    if (Buffer->getBuffer().size() < Cursor + 4) {
+      errs() << "Unexpected end of memory buffer: " << Cursor + 4 << ".\n";
+      return false;
+    }
+    StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+    Cursor += 4;
+    Val = *(const uint32_t *)(Str.data());
+    return true;
+  }
+
+  bool readInt64(uint64_t &Val) {
+    uint32_t Lo, Hi;
+    if (!readInt(Lo) || !readInt(Hi))
+      return false;
+    Val = ((uint64_t)Hi << 32) | Lo;
+    return true;
+  }
+
+  bool readString(StringRef &Str) {
+    uint32_t Len = 0;
+    // Keep reading until we find a non-zero length. This emulates gcov's
+    // behaviour, which appears to do the same.
+    while (Len == 0)
+      if (!readInt(Len))
+        return false;
+    Len *= 4;
+    if (Buffer->getBuffer().size() < Cursor + Len) {
+      errs() << "Unexpected end of memory buffer: " << Cursor + Len << ".\n";
+      return false;
+    }
+    Str = Buffer->getBuffer().slice(Cursor, Cursor + Len).split('\0').first;
+    Cursor += Len;
+    return true;
+  }
+
+  uint64_t getCursor() const { return Cursor; }
+  void advanceCursor(uint32_t n) { Cursor += n * 4; }
+
+private:
+  MemoryBuffer *Buffer;
+  uint64_t Cursor = 0;
+};
+
+/// GCOVFile - Collects coverage information for one pair of coverage file
+/// (.gcno and .gcda).
+class GCOVFile {
+public:
+  GCOVFile() = default;
+
+  bool readGCNO(GCOVBuffer &Buffer);
+  bool readGCDA(GCOVBuffer &Buffer);
+  uint32_t getChecksum() const { return Checksum; }
+  void print(raw_ostream &OS) const;
+  void dump() const;
+  void collectLineCounts(FileInfo &FI);
+
+private:
+  bool GCNOInitialized = false;
+  GCOV::GCOVVersion Version;
+  uint32_t Checksum = 0;
+  SmallVector<std::unique_ptr<GCOVFunction>, 16> Functions;
+  uint32_t RunCount = 0;
+  uint32_t ProgramCount = 0;
+};
+
+/// GCOVEdge - Collects edge information.
+struct GCOVEdge {
+  GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D) {}
+
+  GCOVBlock &Src;
+  GCOVBlock &Dst;
+  uint64_t Count = 0;
+};
+
+/// GCOVFunction - Collects function information.
+class GCOVFunction {
+public:
+  using BlockIterator = pointee_iterator<SmallVectorImpl<
+      std::unique_ptr<GCOVBlock>>::const_iterator>;
+
+  GCOVFunction(GCOVFile &P) : Parent(P) {}
+
+  bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
+  bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
+  StringRef getName() const { return Name; }
+  StringRef getFilename() const { return Filename; }
+  size_t getNumBlocks() const { return Blocks.size(); }
+  uint64_t getEntryCount() const;
+  uint64_t getExitCount() const;
+
+  BlockIterator block_begin() const { return Blocks.begin(); }
+  BlockIterator block_end() const { return Blocks.end(); }
+  iterator_range<BlockIterator> blocks() const {
+    return make_range(block_begin(), block_end());
+  }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+  void collectLineCounts(FileInfo &FI);
+
+private:
+  GCOVFile &Parent;
+  uint32_t Ident = 0;
+  uint32_t Checksum;
+  uint32_t LineNumber = 0;
+  StringRef Name;
+  StringRef Filename;
+  SmallVector<std::unique_ptr<GCOVBlock>, 16> Blocks;
+  SmallVector<std::unique_ptr<GCOVEdge>, 16> Edges;
+};
+
+/// GCOVBlock - Collects block information.
+class GCOVBlock {
+  struct EdgeWeight {
+    EdgeWeight(GCOVBlock *D) : Dst(D) {}
+
+    GCOVBlock *Dst;
+    uint64_t Count = 0;
+  };
+
+  struct SortDstEdgesFunctor {
+    bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) {
+      return E1->Dst.Number < E2->Dst.Number;
+    }
+  };
+
+public:
+  using EdgeIterator = SmallVectorImpl<GCOVEdge *>::const_iterator;
+
+  GCOVBlock(GCOVFunction &P, uint32_t N) : Parent(P), Number(N) {}
+  ~GCOVBlock();
+
+  const GCOVFunction &getParent() const { return Parent; }
+  void addLine(uint32_t N) { Lines.push_back(N); }
+  uint32_t getLastLine() const { return Lines.back(); }
+  void addCount(size_t DstEdgeNo, uint64_t N);
+  uint64_t getCount() const { return Counter; }
+
+  void addSrcEdge(GCOVEdge *Edge) {
+    assert(&Edge->Dst == this); // up to caller to ensure edge is valid
+    SrcEdges.push_back(Edge);
+  }
+
+  void addDstEdge(GCOVEdge *Edge) {
+    assert(&Edge->Src == this); // up to caller to ensure edge is valid
+    // Check if adding this edge causes list to become unsorted.
+    if (DstEdges.size() && DstEdges.back()->Dst.Number > Edge->Dst.Number)
+      DstEdgesAreSorted = false;
+    DstEdges.push_back(Edge);
+  }
+
+  size_t getNumSrcEdges() const { return SrcEdges.size(); }
+  size_t getNumDstEdges() const { return DstEdges.size(); }
+  void sortDstEdges();
+
+  EdgeIterator src_begin() const { return SrcEdges.begin(); }
+  EdgeIterator src_end() const { return SrcEdges.end(); }
+  iterator_range<EdgeIterator> srcs() const {
+    return make_range(src_begin(), src_end());
+  }
+
+  EdgeIterator dst_begin() const { return DstEdges.begin(); }
+  EdgeIterator dst_end() const { return DstEdges.end(); }
+  iterator_range<EdgeIterator> dsts() const {
+    return make_range(dst_begin(), dst_end());
+  }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+  void collectLineCounts(FileInfo &FI);
+
+private:
+  GCOVFunction &Parent;
+  uint32_t Number;
+  uint64_t Counter = 0;
+  bool DstEdgesAreSorted = true;
+  SmallVector<GCOVEdge *, 16> SrcEdges;
+  SmallVector<GCOVEdge *, 16> DstEdges;
+  SmallVector<uint32_t, 16> Lines;
+};
+
+class FileInfo {
+  // It is unlikely--but possible--for multiple functions to be on the same
+  // line.
+  // Therefore this typedef allows LineData.Functions to store multiple
+  // functions
+  // per instance. This is rare, however, so optimize for the common case.
+  using FunctionVector = SmallVector<const GCOVFunction *, 1>;
+  using FunctionLines = DenseMap<uint32_t, FunctionVector>;
+  using BlockVector = SmallVector<const GCOVBlock *, 4>;
+  using BlockLines = DenseMap<uint32_t, BlockVector>;
+
+  struct LineData {
+    LineData() = default;
+
+    BlockLines Blocks;
+    FunctionLines Functions;
+    uint32_t LastLine = 0;
+  };
+
+  struct GCOVCoverage {
+    GCOVCoverage(StringRef Name) : Name(Name) {}
+
+    StringRef Name;
+
+    uint32_t LogicalLines = 0;
+    uint32_t LinesExec = 0;
+
+    uint32_t Branches = 0;
+    uint32_t BranchesExec = 0;
+    uint32_t BranchesTaken = 0;
+  };
+
+public:
+  FileInfo(const GCOV::Options &Options) : Options(Options) {}
+
+  void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) {
+    if (Line > LineInfo[Filename].LastLine)
+      LineInfo[Filename].LastLine = Line;
+    LineInfo[Filename].Blocks[Line - 1].push_back(Block);
+  }
+
+  void addFunctionLine(StringRef Filename, uint32_t Line,
+                       const GCOVFunction *Function) {
+    if (Line > LineInfo[Filename].LastLine)
+      LineInfo[Filename].LastLine = Line;
+    LineInfo[Filename].Functions[Line - 1].push_back(Function);
+  }
+
+  void setRunCount(uint32_t Runs) { RunCount = Runs; }
+  void setProgramCount(uint32_t Programs) { ProgramCount = Programs; }
+  void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile,
+             StringRef GCDAFile);
+
+private:
+  std::string getCoveragePath(StringRef Filename, StringRef MainFilename);
+  std::unique_ptr<raw_ostream> openCoveragePath(StringRef CoveragePath);
+  void printFunctionSummary(raw_ostream &OS, const FunctionVector &Funcs) const;
+  void printBlockInfo(raw_ostream &OS, const GCOVBlock &Block,
+                      uint32_t LineIndex, uint32_t &BlockNo) const;
+  void printBranchInfo(raw_ostream &OS, const GCOVBlock &Block,
+                       GCOVCoverage &Coverage, uint32_t &EdgeNo);
+  void printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo,
+                             uint64_t Count) const;
+
+  void printCoverage(raw_ostream &OS, const GCOVCoverage &Coverage) const;
+  void printFuncCoverage(raw_ostream &OS) const;
+  void printFileCoverage(raw_ostream &OS) const;
+
+  const GCOV::Options &Options;
+  StringMap<LineData> LineInfo;
+  uint32_t RunCount = 0;
+  uint32_t ProgramCount = 0;
+
+  using FileCoverageList = SmallVector<std::pair<std::string, GCOVCoverage>, 4>;
+  using FuncCoverageMap = MapVector<const GCOVFunction *, GCOVCoverage>;
+
+  FileCoverageList FileCoverages;
+  FuncCoverageMap FuncCoverages;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_GCOV_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/InstrProf.h b/linux-x64/clang/include/llvm/ProfileData/InstrProf.h
new file mode 100644
index 0000000..88ae0f0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/InstrProf.h
@@ -0,0 +1,1046 @@
+//===- InstrProf.h - Instrumented profiling format support ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based profiling data is generated by instrumented
+// binaries through library functions in compiler-rt, and read by the clang
+// frontend to feed PGO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROF_H
+#define LLVM_PROFILEDATA_INSTRPROF_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/InstrProfData.inc"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <list>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Function;
+class GlobalVariable;
+struct InstrProfRecord;
+class InstrProfSymtab;
+class Instruction;
+class MDNode;
+class Module;
+
+enum InstrProfSectKind {
+#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) Kind,
+#include "llvm/ProfileData/InstrProfData.inc"
+};
+
+/// Return the name of the profile section corresponding to \p IPSK.
+///
+/// The name of the section depends on the object format type \p OF. If
+/// \p AddSegmentInfo is true, a segment prefix and additional linker hints may
+/// be added to the section name (this is the default).
+std::string getInstrProfSectionName(InstrProfSectKind IPSK,
+                                    Triple::ObjectFormatType OF,
+                                    bool AddSegmentInfo = true);
+
+/// Return the name profile runtime entry point to do value profiling
+/// for a given site.
+inline StringRef getInstrProfValueProfFuncName() {
+  return INSTR_PROF_VALUE_PROF_FUNC_STR;
+}
+
+/// Return the name profile runtime entry point to do value range profiling.
+inline StringRef getInstrProfValueRangeProfFuncName() {
+  return INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR;
+}
+
+/// Return the name prefix of variables containing instrumented function names.
+inline StringRef getInstrProfNameVarPrefix() { return "__profn_"; }
+
+/// Return the name prefix of variables containing per-function control data.
+inline StringRef getInstrProfDataVarPrefix() { return "__profd_"; }
+
+/// Return the name prefix of profile counter variables.
+inline StringRef getInstrProfCountersVarPrefix() { return "__profc_"; }
+
+/// Return the name prefix of value profile variables.
+inline StringRef getInstrProfValuesVarPrefix() { return "__profvp_"; }
+
+/// Return the name of value profile node array variables:
+inline StringRef getInstrProfVNodesVarName() { return "__llvm_prf_vnodes"; }
+
+/// Return the name prefix of the COMDAT group for instrumentation variables
+/// associated with a COMDAT function.
+inline StringRef getInstrProfComdatPrefix() { return "__profv_"; }
+
+/// Return the name of the variable holding the strings (possibly compressed)
+/// of all function's PGO names.
+inline StringRef getInstrProfNamesVarName() {
+  return "__llvm_prf_nm";
+}
+
+/// Return the name of a covarage mapping variable (internal linkage)
+/// for each instrumented source module. Such variables are allocated
+/// in the __llvm_covmap section.
+inline StringRef getCoverageMappingVarName() {
+  return "__llvm_coverage_mapping";
+}
+
+/// Return the name of the internal variable recording the array
+/// of PGO name vars referenced by the coverage mapping. The owning
+/// functions of those names are not emitted by FE (e.g, unused inline
+/// functions.)
+inline StringRef getCoverageUnusedNamesVarName() {
+  return "__llvm_coverage_names";
+}
+
+/// Return the name of function that registers all the per-function control
+/// data at program startup time by calling __llvm_register_function. This
+/// function has internal linkage and is called by  __llvm_profile_init
+/// runtime method. This function is not generated for these platforms:
+/// Darwin, Linux, and FreeBSD.
+inline StringRef getInstrProfRegFuncsName() {
+  return "__llvm_profile_register_functions";
+}
+
+/// Return the name of the runtime interface that registers per-function control
+/// data for one instrumented function.
+inline StringRef getInstrProfRegFuncName() {
+  return "__llvm_profile_register_function";
+}
+
+/// Return the name of the runtime interface that registers the PGO name strings.
+inline StringRef getInstrProfNamesRegFuncName() {
+  return "__llvm_profile_register_names_function";
+}
+
+/// Return the name of the runtime initialization method that is generated by
+/// the compiler. The function calls __llvm_profile_register_functions and
+/// __llvm_profile_override_default_filename functions if needed. This function
+/// has internal linkage and invoked at startup time via init_array.
+inline StringRef getInstrProfInitFuncName() { return "__llvm_profile_init"; }
+
+/// Return the name of the hook variable defined in profile runtime library.
+/// A reference to the variable causes the linker to link in the runtime
+/// initialization module (which defines the hook variable).
+inline StringRef getInstrProfRuntimeHookVarName() {
+  return INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_RUNTIME_VAR);
+}
+
+/// Return the name of the compiler generated function that references the
+/// runtime hook variable. The function is a weak global.
+inline StringRef getInstrProfRuntimeHookVarUseFuncName() {
+  return "__llvm_profile_runtime_user";
+}
+
+/// Return the marker used to separate PGO names during serialization.
+inline StringRef getInstrProfNameSeparator() { return "\01"; }
+
+/// Return the modified name for function \c F suitable to be
+/// used the key for profile lookup. Variable \c InLTO indicates if this
+/// is called in LTO optimization passes.
+std::string getPGOFuncName(const Function &F, bool InLTO = false,
+                           uint64_t Version = INSTR_PROF_INDEX_VERSION);
+
+/// Return the modified name for a function suitable to be
+/// used the key for profile lookup. The function's original
+/// name is \c RawFuncName and has linkage of type \c Linkage.
+/// The function is defined in module \c FileName.
+std::string getPGOFuncName(StringRef RawFuncName,
+                           GlobalValue::LinkageTypes Linkage,
+                           StringRef FileName,
+                           uint64_t Version = INSTR_PROF_INDEX_VERSION);
+
+/// Return the name of the global variable used to store a function
+/// name in PGO instrumentation. \c FuncName is the name of the function
+/// returned by the \c getPGOFuncName call.
+std::string getPGOFuncNameVarName(StringRef FuncName,
+                                  GlobalValue::LinkageTypes Linkage);
+
+/// Create and return the global variable for function name used in PGO
+/// instrumentation. \c FuncName is the name of the function returned
+/// by \c getPGOFuncName call.
+GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName);
+
+/// Create and return the global variable for function name used in PGO
+/// instrumentation.  /// \c FuncName is the name of the function
+/// returned by \c getPGOFuncName call, \c M is the owning module,
+/// and \c Linkage is the linkage of the instrumented function.
+GlobalVariable *createPGOFuncNameVar(Module &M,
+                                     GlobalValue::LinkageTypes Linkage,
+                                     StringRef PGOFuncName);
+
+/// Return the initializer in string of the PGO name var \c NameVar.
+StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar);
+
+/// Given a PGO function name, remove the filename prefix and return
+/// the original (static) function name.
+StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName,
+                                   StringRef FileName = "<unknown>");
+
+/// Given a vector of strings (function PGO names) \c NameStrs, the
+/// method generates a combined string \c Result thatis ready to be
+/// serialized.  The \c Result string is comprised of three fields:
+/// The first field is the legnth of the uncompressed strings, and the
+/// the second field is the length of the zlib-compressed string.
+/// Both fields are encoded in ULEB128.  If \c doCompress is false, the
+///  third field is the uncompressed strings; otherwise it is the
+/// compressed string. When the string compression is off, the
+/// second field will have value zero.
+Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs,
+                                bool doCompression, std::string &Result);
+
+/// Produce \c Result string with the same format described above. The input
+/// is vector of PGO function name variables that are referenced.
+Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
+                                std::string &Result, bool doCompression = true);
+
+/// \c NameStrings is a string composed of one of more sub-strings encoded in
+/// the format described above. The substrings are separated by 0 or more zero
+/// bytes. This method decodes the string and populates the \c Symtab.
+Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab);
+
+/// Check if INSTR_PROF_RAW_VERSION_VAR is defined. This global is only being
+/// set in IR PGO compilation.
+bool isIRPGOFlagSet(const Module *M);
+
+/// Check if we can safely rename this Comdat function. Instances of the same
+/// comdat function may have different control flows thus can not share the
+/// same counter variable.
+bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken = false);
+
+enum InstrProfValueKind : uint32_t {
+#define VALUE_PROF_KIND(Enumerator, Value) Enumerator = Value,
+#include "llvm/ProfileData/InstrProfData.inc"
+};
+
+/// Get the value profile data for value site \p SiteIdx from \p InstrProfR
+/// and annotate the instruction \p Inst with the value profile meta data.
+/// Annotate up to \p MaxMDCount (default 3) number of records per value site.
+void annotateValueSite(Module &M, Instruction &Inst,
+                       const InstrProfRecord &InstrProfR,
+                       InstrProfValueKind ValueKind, uint32_t SiteIndx,
+                       uint32_t MaxMDCount = 3);
+
+/// Same as the above interface but using an ArrayRef, as well as \p Sum.
+void annotateValueSite(Module &M, Instruction &Inst,
+                       ArrayRef<InstrProfValueData> VDs, uint64_t Sum,
+                       InstrProfValueKind ValueKind, uint32_t MaxMDCount);
+
+/// Extract the value profile data from \p Inst which is annotated with
+/// value profile meta data. Return false if there is no value data annotated,
+/// otherwise  return true.
+bool getValueProfDataFromInst(const Instruction &Inst,
+                              InstrProfValueKind ValueKind,
+                              uint32_t MaxNumValueData,
+                              InstrProfValueData ValueData[],
+                              uint32_t &ActualNumValueData, uint64_t &TotalC);
+
+inline StringRef getPGOFuncNameMetadataName() { return "PGOFuncName"; }
+
+/// Return the PGOFuncName meta data associated with a function.
+MDNode *getPGOFuncNameMetadata(const Function &F);
+
+/// Create the PGOFuncName meta data if PGOFuncName is different from
+/// function's raw name. This should only apply to internal linkage functions
+/// declared by users only.
+void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName);
+
+/// Check if we can use Comdat for profile variables. This will eliminate
+/// the duplicated profile variables for Comdat functions.
+bool needsComdatForCounter(const Function &F, const Module &M);
+
+const std::error_category &instrprof_category();
+
+enum class instrprof_error {
+  success = 0,
+  eof,
+  unrecognized_format,
+  bad_magic,
+  bad_header,
+  unsupported_version,
+  unsupported_hash_type,
+  too_large,
+  truncated,
+  malformed,
+  unknown_function,
+  hash_mismatch,
+  count_mismatch,
+  counter_overflow,
+  value_site_count_mismatch,
+  compress_failed,
+  uncompress_failed,
+  empty_raw_profile,
+  zlib_unavailable
+};
+
+inline std::error_code make_error_code(instrprof_error E) {
+  return std::error_code(static_cast<int>(E), instrprof_category());
+}
+
+class InstrProfError : public ErrorInfo<InstrProfError> {
+public:
+  InstrProfError(instrprof_error Err) : Err(Err) {
+    assert(Err != instrprof_error::success && "Not an error");
+  }
+
+  std::string message() const override;
+
+  void log(raw_ostream &OS) const override { OS << message(); }
+
+  std::error_code convertToErrorCode() const override {
+    return make_error_code(Err);
+  }
+
+  instrprof_error get() const { return Err; }
+
+  /// Consume an Error and return the raw enum value contained within it. The
+  /// Error must either be a success value, or contain a single InstrProfError.
+  static instrprof_error take(Error E) {
+    auto Err = instrprof_error::success;
+    handleAllErrors(std::move(E), [&Err](const InstrProfError &IPE) {
+      assert(Err == instrprof_error::success && "Multiple errors encountered");
+      Err = IPE.get();
+    });
+    return Err;
+  }
+
+  static char ID;
+
+private:
+  instrprof_error Err;
+};
+
+class SoftInstrProfErrors {
+  /// Count the number of soft instrprof_errors encountered and keep track of
+  /// the first such error for reporting purposes.
+
+  /// The first soft error encountered.
+  instrprof_error FirstError = instrprof_error::success;
+
+  /// The number of hash mismatches.
+  unsigned NumHashMismatches = 0;
+
+  /// The number of count mismatches.
+  unsigned NumCountMismatches = 0;
+
+  /// The number of counter overflows.
+  unsigned NumCounterOverflows = 0;
+
+  /// The number of value site count mismatches.
+  unsigned NumValueSiteCountMismatches = 0;
+
+public:
+  SoftInstrProfErrors() = default;
+
+  ~SoftInstrProfErrors() {
+    assert(FirstError == instrprof_error::success &&
+           "Unchecked soft error encountered");
+  }
+
+  /// Track a soft error (\p IE) and increment its associated counter.
+  void addError(instrprof_error IE);
+
+  /// Get the number of hash mismatches.
+  unsigned getNumHashMismatches() const { return NumHashMismatches; }
+
+  /// Get the number of count mismatches.
+  unsigned getNumCountMismatches() const { return NumCountMismatches; }
+
+  /// Get the number of counter overflows.
+  unsigned getNumCounterOverflows() const { return NumCounterOverflows; }
+
+  /// Get the number of value site count mismatches.
+  unsigned getNumValueSiteCountMismatches() const {
+    return NumValueSiteCountMismatches;
+  }
+
+  /// Return the first encountered error and reset FirstError to a success
+  /// value.
+  Error takeError() {
+    if (FirstError == instrprof_error::success)
+      return Error::success();
+    auto E = make_error<InstrProfError>(FirstError);
+    FirstError = instrprof_error::success;
+    return E;
+  }
+};
+
+namespace object {
+
+class SectionRef;
+
+} // end namespace object
+
+namespace IndexedInstrProf {
+
+uint64_t ComputeHash(StringRef K);
+
+} // end namespace IndexedInstrProf
+
+/// A symbol table used for function PGO name look-up with keys
+/// (such as pointers, md5hash values) to the function. A function's
+/// PGO name or name's md5hash are used in retrieving the profile
+/// data of the function. See \c getPGOFuncName() method for details
+/// on how PGO name is formed.
+class InstrProfSymtab {
+public:
+  using AddrHashMap = std::vector<std::pair<uint64_t, uint64_t>>;
+
+private:
+  StringRef Data;
+  uint64_t Address = 0;
+  // Unique name strings.
+  StringSet<> NameTab;
+  // A map from MD5 keys to function name strings.
+  std::vector<std::pair<uint64_t, StringRef>> MD5NameMap;
+  // A map from MD5 keys to function define. We only populate this map
+  // when build the Symtab from a Module.
+  std::vector<std::pair<uint64_t, Function *>> MD5FuncMap;
+  // A map from function runtime address to function name MD5 hash.
+  // This map is only populated and used by raw instr profile reader.
+  AddrHashMap AddrToMD5Map;
+  bool Sorted = false;
+
+  static StringRef getExternalSymbol() {
+    return "** External Symbol **";
+  }
+
+  // If the symtab is created by a series of calls to \c addFuncName, \c
+  // finalizeSymtab needs to be called before looking up function names.
+  // This is required because the underlying map is a vector (for space
+  // efficiency) which needs to be sorted.
+  inline void finalizeSymtab();
+
+public:
+  InstrProfSymtab() = default;
+
+  /// Create InstrProfSymtab from an object file section which
+  /// contains function PGO names. When section may contain raw
+  /// string data or string data in compressed form. This method
+  /// only initialize the symtab with reference to the data and
+  /// the section base address. The decompression will be delayed
+  /// until before it is used. See also \c create(StringRef) method.
+  Error create(object::SectionRef &Section);
+
+  /// This interface is used by reader of CoverageMapping test
+  /// format.
+  inline Error create(StringRef D, uint64_t BaseAddr);
+
+  /// \c NameStrings is a string composed of one of more sub-strings
+  ///  encoded in the format described in \c collectPGOFuncNameStrings.
+  /// This method is a wrapper to \c readPGOFuncNameStrings method.
+  inline Error create(StringRef NameStrings);
+
+  /// A wrapper interface to populate the PGO symtab with functions
+  /// decls from module \c M. This interface is used by transformation
+  /// passes such as indirect function call promotion. Variable \c InLTO
+  /// indicates if this is called from LTO optimization passes.
+  Error create(Module &M, bool InLTO = false);
+
+  /// Create InstrProfSymtab from a set of names iteratable from
+  /// \p IterRange. This interface is used by IndexedProfReader.
+  template <typename NameIterRange> Error create(const NameIterRange &IterRange);
+
+  /// Update the symtab by adding \p FuncName to the table. This interface
+  /// is used by the raw and text profile readers.
+  Error addFuncName(StringRef FuncName) {
+    if (FuncName.empty())
+      return make_error<InstrProfError>(instrprof_error::malformed);
+    auto Ins = NameTab.insert(FuncName);
+    if (Ins.second) {
+      MD5NameMap.push_back(std::make_pair(
+          IndexedInstrProf::ComputeHash(FuncName), Ins.first->getKey()));
+      Sorted = false;
+    }
+    return Error::success();
+  }
+
+  /// Map a function address to its name's MD5 hash. This interface
+  /// is only used by the raw profiler reader.
+  void mapAddress(uint64_t Addr, uint64_t MD5Val) {
+    AddrToMD5Map.push_back(std::make_pair(Addr, MD5Val));
+  }
+
+  /// Return a function's hash, or 0, if the function isn't in this SymTab.
+  uint64_t getFunctionHashFromAddress(uint64_t Address);
+
+  /// Return function's PGO name from the function name's symbol
+  /// address in the object file. If an error occurs, return
+  /// an empty string.
+  StringRef getFuncName(uint64_t FuncNameAddress, size_t NameSize);
+
+  /// Return function's PGO name from the name's md5 hash value.
+  /// If not found, return an empty string.
+  inline StringRef getFuncName(uint64_t FuncMD5Hash);
+
+  /// Just like getFuncName, except that it will return a non-empty StringRef
+  /// if the function is external to this symbol table. All such cases
+  /// will be represented using the same StringRef value.
+  inline StringRef getFuncNameOrExternalSymbol(uint64_t FuncMD5Hash);
+
+  /// True if Symbol is the value used to represent external symbols.
+  static bool isExternalSymbol(const StringRef &Symbol) {
+    return Symbol == InstrProfSymtab::getExternalSymbol();
+  }
+
+  /// Return function from the name's md5 hash. Return nullptr if not found.
+  inline Function *getFunction(uint64_t FuncMD5Hash);
+
+  /// Return the function's original assembly name by stripping off
+  /// the prefix attached (to symbols with priviate linkage). For
+  /// global functions, it returns the same string as getFuncName.
+  inline StringRef getOrigFuncName(uint64_t FuncMD5Hash);
+
+  /// Return the name section data.
+  inline StringRef getNameData() const { return Data; }
+};
+
+Error InstrProfSymtab::create(StringRef D, uint64_t BaseAddr) {
+  Data = D;
+  Address = BaseAddr;
+  return Error::success();
+}
+
+Error InstrProfSymtab::create(StringRef NameStrings) {
+  return readPGOFuncNameStrings(NameStrings, *this);
+}
+
+template <typename NameIterRange>
+Error InstrProfSymtab::create(const NameIterRange &IterRange) {
+  for (auto Name : IterRange)
+    if (Error E = addFuncName(Name))
+      return E;
+
+  finalizeSymtab();
+  return Error::success();
+}
+
+void InstrProfSymtab::finalizeSymtab() {
+  if (Sorted)
+    return;
+  std::sort(MD5NameMap.begin(), MD5NameMap.end(), less_first());
+  std::sort(MD5FuncMap.begin(), MD5FuncMap.end(), less_first());
+  std::sort(AddrToMD5Map.begin(), AddrToMD5Map.end(), less_first());
+  AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()),
+                     AddrToMD5Map.end());
+  Sorted = true;
+}
+
+StringRef InstrProfSymtab::getFuncNameOrExternalSymbol(uint64_t FuncMD5Hash) {
+  StringRef ret = getFuncName(FuncMD5Hash);
+  if (ret.empty())
+    return InstrProfSymtab::getExternalSymbol();
+  return ret;
+}
+
+StringRef InstrProfSymtab::getFuncName(uint64_t FuncMD5Hash) {
+  finalizeSymtab();
+  auto Result =
+      std::lower_bound(MD5NameMap.begin(), MD5NameMap.end(), FuncMD5Hash,
+                       [](const std::pair<uint64_t, std::string> &LHS,
+                          uint64_t RHS) { return LHS.first < RHS; });
+  if (Result != MD5NameMap.end() && Result->first == FuncMD5Hash)
+    return Result->second;
+  return StringRef();
+}
+
+Function* InstrProfSymtab::getFunction(uint64_t FuncMD5Hash) {
+  finalizeSymtab();
+  auto Result =
+      std::lower_bound(MD5FuncMap.begin(), MD5FuncMap.end(), FuncMD5Hash,
+                       [](const std::pair<uint64_t, Function*> &LHS,
+                          uint64_t RHS) { return LHS.first < RHS; });
+  if (Result != MD5FuncMap.end() && Result->first == FuncMD5Hash)
+    return Result->second;
+  return nullptr;
+}
+
+// See also getPGOFuncName implementation. These two need to be
+// matched.
+StringRef InstrProfSymtab::getOrigFuncName(uint64_t FuncMD5Hash) {
+  StringRef PGOName = getFuncName(FuncMD5Hash);
+  size_t S = PGOName.find_first_of(':');
+  if (S == StringRef::npos)
+    return PGOName;
+  return PGOName.drop_front(S + 1);
+}
+
+struct InstrProfValueSiteRecord {
+  /// Value profiling data pairs at a given value site.
+  std::list<InstrProfValueData> ValueData;
+
+  InstrProfValueSiteRecord() { ValueData.clear(); }
+  template <class InputIterator>
+  InstrProfValueSiteRecord(InputIterator F, InputIterator L)
+      : ValueData(F, L) {}
+
+  /// Sort ValueData ascending by Value
+  void sortByTargetValues() {
+    ValueData.sort(
+        [](const InstrProfValueData &left, const InstrProfValueData &right) {
+          return left.Value < right.Value;
+        });
+  }
+  /// Sort ValueData Descending by Count
+  inline void sortByCount();
+
+  /// Merge data from another InstrProfValueSiteRecord
+  /// Optionally scale merged counts by \p Weight.
+  void merge(InstrProfValueSiteRecord &Input, uint64_t Weight,
+             function_ref<void(instrprof_error)> Warn);
+  /// Scale up value profile data counts.
+  void scale(uint64_t Weight, function_ref<void(instrprof_error)> Warn);
+};
+
+/// Profiling information for a single function.
+struct InstrProfRecord {
+  std::vector<uint64_t> Counts;
+
+  InstrProfRecord() = default;
+  InstrProfRecord(std::vector<uint64_t> Counts) : Counts(std::move(Counts)) {}
+  InstrProfRecord(InstrProfRecord &&) = default;
+  InstrProfRecord(const InstrProfRecord &RHS)
+      : Counts(RHS.Counts),
+        ValueData(RHS.ValueData
+                      ? llvm::make_unique<ValueProfData>(*RHS.ValueData)
+                      : nullptr) {}
+  InstrProfRecord &operator=(InstrProfRecord &&) = default;
+  InstrProfRecord &operator=(const InstrProfRecord &RHS) {
+    Counts = RHS.Counts;
+    if (!RHS.ValueData) {
+      ValueData = nullptr;
+      return *this;
+    }
+    if (!ValueData)
+      ValueData = llvm::make_unique<ValueProfData>(*RHS.ValueData);
+    else
+      *ValueData = *RHS.ValueData;
+    return *this;
+  }
+
+  /// Return the number of value profile kinds with non-zero number
+  /// of profile sites.
+  inline uint32_t getNumValueKinds() const;
+  /// Return the number of instrumented sites for ValueKind.
+  inline uint32_t getNumValueSites(uint32_t ValueKind) const;
+
+  /// Return the total number of ValueData for ValueKind.
+  inline uint32_t getNumValueData(uint32_t ValueKind) const;
+
+  /// Return the number of value data collected for ValueKind at profiling
+  /// site: Site.
+  inline uint32_t getNumValueDataForSite(uint32_t ValueKind,
+                                         uint32_t Site) const;
+
+  /// Return the array of profiled values at \p Site. If \p TotalC
+  /// is not null, the total count of all target values at this site
+  /// will be stored in \c *TotalC.
+  inline std::unique_ptr<InstrProfValueData[]>
+  getValueForSite(uint32_t ValueKind, uint32_t Site,
+                  uint64_t *TotalC = nullptr) const;
+
+  /// Get the target value/counts of kind \p ValueKind collected at site
+  /// \p Site and store the result in array \p Dest. Return the total
+  /// counts of all target values at this site.
+  inline uint64_t getValueForSite(InstrProfValueData Dest[], uint32_t ValueKind,
+                                  uint32_t Site) const;
+
+  /// Reserve space for NumValueSites sites.
+  inline void reserveSites(uint32_t ValueKind, uint32_t NumValueSites);
+
+  /// Add ValueData for ValueKind at value Site.
+  void addValueData(uint32_t ValueKind, uint32_t Site,
+                    InstrProfValueData *VData, uint32_t N,
+                    InstrProfSymtab *SymTab);
+
+  /// Merge the counts in \p Other into this one.
+  /// Optionally scale merged counts by \p Weight.
+  void merge(InstrProfRecord &Other, uint64_t Weight,
+             function_ref<void(instrprof_error)> Warn);
+
+  /// Scale up profile counts (including value profile data) by
+  /// \p Weight.
+  void scale(uint64_t Weight, function_ref<void(instrprof_error)> Warn);
+
+  /// Sort value profile data (per site) by count.
+  void sortValueData() {
+    for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
+      for (auto &SR : getValueSitesForKind(Kind))
+        SR.sortByCount();
+  }
+
+  /// Clear value data entries and edge counters.
+  void Clear() {
+    Counts.clear();
+    clearValueData();
+  }
+
+  /// Clear value data entries
+  void clearValueData() { ValueData = nullptr; }
+
+private:
+  struct ValueProfData {
+    std::vector<InstrProfValueSiteRecord> IndirectCallSites;
+    std::vector<InstrProfValueSiteRecord> MemOPSizes;
+  };
+  std::unique_ptr<ValueProfData> ValueData;
+
+  MutableArrayRef<InstrProfValueSiteRecord>
+  getValueSitesForKind(uint32_t ValueKind) {
+    // Cast to /add/ const (should be an implicit_cast, ideally, if that's ever
+    // implemented in LLVM) to call the const overload of this function, then
+    // cast away the constness from the result.
+    auto AR = const_cast<const InstrProfRecord *>(this)->getValueSitesForKind(
+        ValueKind);
+    return makeMutableArrayRef(
+        const_cast<InstrProfValueSiteRecord *>(AR.data()), AR.size());
+  }
+  ArrayRef<InstrProfValueSiteRecord>
+  getValueSitesForKind(uint32_t ValueKind) const {
+    if (!ValueData)
+      return None;
+    switch (ValueKind) {
+    case IPVK_IndirectCallTarget:
+      return ValueData->IndirectCallSites;
+    case IPVK_MemOPSize:
+      return ValueData->MemOPSizes;
+    default:
+      llvm_unreachable("Unknown value kind!");
+    }
+  }
+
+  std::vector<InstrProfValueSiteRecord> &
+  getOrCreateValueSitesForKind(uint32_t ValueKind) {
+    if (!ValueData)
+      ValueData = llvm::make_unique<ValueProfData>();
+    switch (ValueKind) {
+    case IPVK_IndirectCallTarget:
+      return ValueData->IndirectCallSites;
+    case IPVK_MemOPSize:
+      return ValueData->MemOPSizes;
+    default:
+      llvm_unreachable("Unknown value kind!");
+    }
+  }
+
+  // Map indirect call target name hash to name string.
+  uint64_t remapValue(uint64_t Value, uint32_t ValueKind,
+                      InstrProfSymtab *SymTab);
+
+  // Merge Value Profile data from Src record to this record for ValueKind.
+  // Scale merged value counts by \p Weight.
+  void mergeValueProfData(uint32_t ValkeKind, InstrProfRecord &Src,
+                          uint64_t Weight,
+                          function_ref<void(instrprof_error)> Warn);
+
+  // Scale up value profile data count.
+  void scaleValueProfData(uint32_t ValueKind, uint64_t Weight,
+                          function_ref<void(instrprof_error)> Warn);
+};
+
+struct NamedInstrProfRecord : InstrProfRecord {
+  StringRef Name;
+  uint64_t Hash;
+
+  NamedInstrProfRecord() = default;
+  NamedInstrProfRecord(StringRef Name, uint64_t Hash,
+                       std::vector<uint64_t> Counts)
+      : InstrProfRecord(std::move(Counts)), Name(Name), Hash(Hash) {}
+};
+
+uint32_t InstrProfRecord::getNumValueKinds() const {
+  uint32_t NumValueKinds = 0;
+  for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
+    NumValueKinds += !(getValueSitesForKind(Kind).empty());
+  return NumValueKinds;
+}
+
+uint32_t InstrProfRecord::getNumValueData(uint32_t ValueKind) const {
+  uint32_t N = 0;
+  for (auto &SR : getValueSitesForKind(ValueKind))
+    N += SR.ValueData.size();
+  return N;
+}
+
+uint32_t InstrProfRecord::getNumValueSites(uint32_t ValueKind) const {
+  return getValueSitesForKind(ValueKind).size();
+}
+
+uint32_t InstrProfRecord::getNumValueDataForSite(uint32_t ValueKind,
+                                                 uint32_t Site) const {
+  return getValueSitesForKind(ValueKind)[Site].ValueData.size();
+}
+
+std::unique_ptr<InstrProfValueData[]>
+InstrProfRecord::getValueForSite(uint32_t ValueKind, uint32_t Site,
+                                 uint64_t *TotalC) const {
+  uint64_t Dummy;
+  uint64_t &TotalCount = (TotalC == nullptr ? Dummy : *TotalC);
+  uint32_t N = getNumValueDataForSite(ValueKind, Site);
+  if (N == 0) {
+    TotalCount = 0;
+    return std::unique_ptr<InstrProfValueData[]>(nullptr);
+  }
+
+  auto VD = llvm::make_unique<InstrProfValueData[]>(N);
+  TotalCount = getValueForSite(VD.get(), ValueKind, Site);
+
+  return VD;
+}
+
+uint64_t InstrProfRecord::getValueForSite(InstrProfValueData Dest[],
+                                          uint32_t ValueKind,
+                                          uint32_t Site) const {
+  uint32_t I = 0;
+  uint64_t TotalCount = 0;
+  for (auto V : getValueSitesForKind(ValueKind)[Site].ValueData) {
+    Dest[I].Value = V.Value;
+    Dest[I].Count = V.Count;
+    TotalCount = SaturatingAdd(TotalCount, V.Count);
+    I++;
+  }
+  return TotalCount;
+}
+
+void InstrProfRecord::reserveSites(uint32_t ValueKind, uint32_t NumValueSites) {
+  if (!NumValueSites)
+    return;
+  getOrCreateValueSitesForKind(ValueKind).reserve(NumValueSites);
+}
+
+inline support::endianness getHostEndianness() {
+  return sys::IsLittleEndianHost ? support::little : support::big;
+}
+
+// Include definitions for value profile data
+#define INSTR_PROF_VALUE_PROF_DATA
+#include "llvm/ProfileData/InstrProfData.inc"
+
+void InstrProfValueSiteRecord::sortByCount() {
+  ValueData.sort(
+      [](const InstrProfValueData &left, const InstrProfValueData &right) {
+        return left.Count > right.Count;
+      });
+  // Now truncate
+  size_t max_s = INSTR_PROF_MAX_NUM_VAL_PER_SITE;
+  if (ValueData.size() > max_s)
+    ValueData.resize(max_s);
+}
+
+namespace IndexedInstrProf {
+
+enum class HashT : uint32_t {
+  MD5,
+  Last = MD5
+};
+
+inline uint64_t ComputeHash(HashT Type, StringRef K) {
+  switch (Type) {
+  case HashT::MD5:
+    return MD5Hash(K);
+  }
+  llvm_unreachable("Unhandled hash type");
+}
+
+const uint64_t Magic = 0x8169666f72706cff; // "\xfflprofi\x81"
+
+enum ProfVersion {
+  // Version 1 is the first version. In this version, the value of
+  // a key/value pair can only include profile data of a single function.
+  // Due to this restriction, the number of block counters for a given
+  // function is not recorded but derived from the length of the value.
+  Version1 = 1,
+  // The version 2 format supports recording profile data of multiple
+  // functions which share the same key in one value field. To support this,
+  // the number block counters is recorded as an uint64_t field right after the
+  // function structural hash.
+  Version2 = 2,
+  // Version 3 supports value profile data. The value profile data is expected
+  // to follow the block counter profile data.
+  Version3 = 3,
+  // In this version, profile summary data \c IndexedInstrProf::Summary is
+  // stored after the profile header.
+  Version4 = 4,
+  // In this version, the frontend PGO stable hash algorithm defaults to V2.
+  Version5 = 5,
+  // The current version is 5.
+  CurrentVersion = INSTR_PROF_INDEX_VERSION
+};
+const uint64_t Version = ProfVersion::CurrentVersion;
+
+const HashT HashType = HashT::MD5;
+
+inline uint64_t ComputeHash(StringRef K) { return ComputeHash(HashType, K); }
+
+// This structure defines the file header of the LLVM profile
+// data file in indexed-format.
+struct Header {
+  uint64_t Magic;
+  uint64_t Version;
+  uint64_t Unused; // Becomes unused since version 4
+  uint64_t HashType;
+  uint64_t HashOffset;
+};
+
+// Profile summary data recorded in the profile data file in indexed
+// format. It is introduced in version 4. The summary data follows
+// right after the profile file header.
+struct Summary {
+  struct Entry {
+    uint64_t Cutoff; ///< The required percentile of total execution count.
+    uint64_t
+        MinBlockCount;  ///< The minimum execution count for this percentile.
+    uint64_t NumBlocks; ///< Number of blocks >= the minumum execution count.
+  };
+  // The field kind enumerator to assigned value mapping should remain
+  // unchanged  when a new kind is added or an old kind gets deleted in
+  // the future.
+  enum SummaryFieldKind {
+    /// The total number of functions instrumented.
+    TotalNumFunctions = 0,
+    /// Total number of instrumented blocks/edges.
+    TotalNumBlocks = 1,
+    /// The maximal execution count among all functions.
+    /// This field does not exist for profile data from IR based
+    /// instrumentation.
+    MaxFunctionCount = 2,
+    /// Max block count of the program.
+    MaxBlockCount = 3,
+    /// Max internal block count of the program (excluding entry blocks).
+    MaxInternalBlockCount = 4,
+    /// The sum of all instrumented block counts.
+    TotalBlockCount = 5,
+    NumKinds = TotalBlockCount + 1
+  };
+
+  // The number of summmary fields following the summary header.
+  uint64_t NumSummaryFields;
+  // The number of Cutoff Entries (Summary::Entry) following summary fields.
+  uint64_t NumCutoffEntries;
+
+  Summary() = delete;
+  Summary(uint32_t Size) { memset(this, 0, Size); }
+
+  void operator delete(void *ptr) { ::operator delete(ptr); }
+
+  static uint32_t getSize(uint32_t NumSumFields, uint32_t NumCutoffEntries) {
+    return sizeof(Summary) + NumCutoffEntries * sizeof(Entry) +
+           NumSumFields * sizeof(uint64_t);
+  }
+
+  const uint64_t *getSummaryDataBase() const {
+    return reinterpret_cast<const uint64_t *>(this + 1);
+  }
+
+  uint64_t *getSummaryDataBase() {
+    return reinterpret_cast<uint64_t *>(this + 1);
+  }
+
+  const Entry *getCutoffEntryBase() const {
+    return reinterpret_cast<const Entry *>(
+        &getSummaryDataBase()[NumSummaryFields]);
+  }
+
+  Entry *getCutoffEntryBase() {
+    return reinterpret_cast<Entry *>(&getSummaryDataBase()[NumSummaryFields]);
+  }
+
+  uint64_t get(SummaryFieldKind K) const {
+    return getSummaryDataBase()[K];
+  }
+
+  void set(SummaryFieldKind K, uint64_t V) {
+    getSummaryDataBase()[K] = V;
+  }
+
+  const Entry &getEntry(uint32_t I) const { return getCutoffEntryBase()[I]; }
+
+  void setEntry(uint32_t I, const ProfileSummaryEntry &E) {
+    Entry &ER = getCutoffEntryBase()[I];
+    ER.Cutoff = E.Cutoff;
+    ER.MinBlockCount = E.MinCount;
+    ER.NumBlocks = E.NumCounts;
+  }
+};
+
+inline std::unique_ptr<Summary> allocSummary(uint32_t TotalSize) {
+  return std::unique_ptr<Summary>(new (::operator new(TotalSize))
+                                      Summary(TotalSize));
+}
+
+} // end namespace IndexedInstrProf
+
+namespace RawInstrProf {
+
+// Version 1: First version
+// Version 2: Added value profile data section. Per-function control data
+// struct has more fields to describe value profile information.
+// Version 3: Compressed name section support. Function PGO name reference
+// from control data struct is changed from raw pointer to Name's MD5 value.
+// Version 4: ValueDataBegin and ValueDataSizes fields are removed from the
+// raw header.
+const uint64_t Version = INSTR_PROF_RAW_VERSION;
+
+template <class IntPtrT> inline uint64_t getMagic();
+template <> inline uint64_t getMagic<uint64_t>() {
+  return INSTR_PROF_RAW_MAGIC_64;
+}
+
+template <> inline uint64_t getMagic<uint32_t>() {
+  return INSTR_PROF_RAW_MAGIC_32;
+}
+
+// Per-function profile data header/control structure.
+// The definition should match the structure defined in
+// compiler-rt/lib/profile/InstrProfiling.h.
+// It should also match the synthesized type in
+// Transforms/Instrumentation/InstrProfiling.cpp:getOrCreateRegionCounters.
+template <class IntPtrT> struct LLVM_ALIGNAS(8) ProfileData {
+  #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Type Name;
+  #include "llvm/ProfileData/InstrProfData.inc"
+};
+
+// File header structure of the LLVM profile data in raw format.
+// The definition should match the header referenced in
+// compiler-rt/lib/profile/InstrProfilingFile.c  and
+// InstrProfilingBuffer.c.
+struct Header {
+#define INSTR_PROF_RAW_HEADER(Type, Name, Init) const Type Name;
+#include "llvm/ProfileData/InstrProfData.inc"
+};
+
+} // end namespace RawInstrProf
+
+// Parse MemOP Size range option.
+void getMemOPSizeRangeFromOption(StringRef Str, int64_t &RangeStart,
+                                 int64_t &RangeLast);
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_INSTRPROF_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/InstrProfData.inc b/linux-x64/clang/include/llvm/ProfileData/InstrProfData.inc
new file mode 100644
index 0000000..bac8cce
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/InstrProfData.inc
@@ -0,0 +1,740 @@
+/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
+|*
+|*                     The LLVM Compiler Infrastructure
+|*
+|* This file is distributed under the University of Illinois Open Source
+|* License. See LICENSE.TXT for details.
+|*
+\*===----------------------------------------------------------------------===*/
+/*
+ * This is the master file that defines all the data structure, signature,
+ * constant literals that are shared across profiling runtime library,
+ * compiler (instrumentation), and host tools (reader/writer). The entities
+ * defined in this file affect the profile runtime ABI, the raw profile format,
+ * or both.
+ *
+ * The file has two identical copies. The master copy lives in LLVM and
+ * the other one  sits in compiler-rt/lib/profile directory. To make changes
+ * in this file, first modify the master copy and copy it over to compiler-rt.
+ * Testing of any change in this file can start only after the two copies are
+ * synced up.
+ *
+ * The first part of the file includes macros that defines types, names, and
+ * initializers for the member fields of the core data structures. The field
+ * declarations for one structure is enabled by defining the field activation
+ * macro associated with that structure. Only one field activation record
+ * can be defined at one time and the rest definitions will be filtered out by
+ * the preprocessor.
+ *
+ * Examples of how the template is used to instantiate structure definition:
+ * 1. To declare a structure:
+ *
+ * struct ProfData {
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ *    Type Name;
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ * 2. To construct LLVM type arrays for the struct type:
+ *
+ * Type *DataTypes[] = {
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ *   LLVMType,
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ * 4. To construct constant array for the initializers:
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ *   Initializer,
+ * Constant *ConstantVals[] = {
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ *
+ * The second part of the file includes definitions all other entities that
+ * are related to runtime ABI and format. When no field activation macro is
+ * defined, this file can be included to introduce the definitions.
+ *
+\*===----------------------------------------------------------------------===*/
+
+/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
+ * the compiler runtime. */
+#ifndef INSTR_PROF_VISIBILITY
+#define INSTR_PROF_VISIBILITY
+#endif
+
+/* INSTR_PROF_DATA start. */
+/* Definition of member fields of the per-function control structure. */
+#ifndef INSTR_PROF_DATA
+#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
+                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+                IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
+INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
+                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+                Inc->getHash()->getZExtValue()))
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt64PtrTy(Ctx), CounterPtr, \
+                ConstantExpr::getBitCast(CounterPtr, \
+                llvm::Type::getInt64PtrTy(Ctx)))
+/* This is used to map function pointers for the indirect call targets to
+ * function name hashes during the conversion from raw to merged profile
+ * data.
+ */
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
+                FunctionAddr)
+INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
+                ValuesPtrExpr)
+INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
+                ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
+INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
+                ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
+#undef INSTR_PROF_DATA
+/* INSTR_PROF_DATA end. */
+
+
+/* This is an internal data structure used by value profiler. It
+ * is defined here to allow serialization code sharing by LLVM
+ * to be used in unit test.
+ *
+ * typedef struct ValueProfNode {
+ *   // InstrProfValueData VData;
+ *   uint64_t Value;
+ *   uint64_t Count;
+ *   struct ValueProfNode *Next;
+ * } ValueProfNode;
+ */
+/* INSTR_PROF_VALUE_NODE start. */
+#ifndef INSTR_PROF_VALUE_NODE
+#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
+                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
+INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
+                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
+INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
+                      ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
+#undef INSTR_PROF_VALUE_NODE
+/* INSTR_PROF_VALUE_NODE end. */
+
+/* INSTR_PROF_RAW_HEADER  start */
+/* Definition of member fields of the raw profile header data structure. */
+#ifndef INSTR_PROF_RAW_HEADER
+#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
+INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
+INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
+INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
+INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
+INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, (uintptr_t)CountersBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
+#undef INSTR_PROF_RAW_HEADER
+/* INSTR_PROF_RAW_HEADER  end */
+
+/* VALUE_PROF_FUNC_PARAM start */
+/* Definition of parameter types of the runtime API used to do value profiling
+ * for a given value site.
+ */
+#ifndef VALUE_PROF_FUNC_PARAM
+#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
+#define INSTR_PROF_COMMA
+#else
+#define INSTR_PROF_DATA_DEFINED
+#define INSTR_PROF_COMMA ,
+#endif
+VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
+                      INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
+#ifndef VALUE_RANGE_PROF
+VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
+#else /* VALUE_RANGE_PROF */
+VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
+                      INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
+                      INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
+                      INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
+#endif /*VALUE_RANGE_PROF */
+#undef VALUE_PROF_FUNC_PARAM
+#undef INSTR_PROF_COMMA
+/* VALUE_PROF_FUNC_PARAM end */
+
+/* VALUE_PROF_KIND start */
+#ifndef VALUE_PROF_KIND
+#define VALUE_PROF_KIND(Enumerator, Value)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+/* For indirect function call value profiling, the addresses of the target
+ * functions are profiled by the instrumented code. The target addresses are
+ * written in the raw profile data and converted to target function name's MD5
+ * hash by the profile reader during deserialization.  Typically, this happens
+ * when the raw profile data is read during profile merging.
+ *
+ * For this remapping the ProfData is used.  ProfData contains both the function
+ * name hash and the function address.
+ */
+VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0)
+/* For memory intrinsic functions size profiling. */
+VALUE_PROF_KIND(IPVK_MemOPSize, 1)
+/* These two kinds must be the last to be
+ * declared. This is to make sure the string
+ * array created with the template can be
+ * indexed with the kind value.
+ */
+VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget)
+VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize)
+
+#undef VALUE_PROF_KIND
+/* VALUE_PROF_KIND end */
+
+/* COVMAP_FUNC_RECORD start */
+/* Definition of member fields of the function record structure in coverage
+ * map.
+ */
+#ifndef COVMAP_FUNC_RECORD
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+#ifdef COVMAP_V1
+COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
+                   NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
+                   llvm::Type::getInt8PtrTy(Ctx)))
+COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
+                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
+                   NameValue.size()))
+#else
+COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
+                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+                   llvm::IndexedInstrProf::ComputeHash(NameValue)))
+#endif
+COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
+                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
+                   CoverageMapping.size()))
+COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
+                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
+#undef COVMAP_FUNC_RECORD
+/* COVMAP_FUNC_RECORD end.  */
+
+/* COVMAP_HEADER start */
+/* Definition of member fields of coverage map header.
+ */
+#ifndef COVMAP_HEADER
+#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
+              llvm::ConstantInt::get(Int32Ty,  FunctionRecords.size()))
+COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
+              llvm::ConstantInt::get(Int32Ty, FilenamesSize))
+COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
+              llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
+COVMAP_HEADER(uint32_t, Int32Ty, Version, \
+              llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
+#undef COVMAP_HEADER
+/* COVMAP_HEADER end.  */
+
+
+#ifdef INSTR_PROF_SECT_ENTRY
+#define INSTR_PROF_DATA_DEFINED
+INSTR_PROF_SECT_ENTRY(IPSK_data, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
+                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
+                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_name, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
+                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_vals, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
+                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
+                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
+                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COFF), "__LLVM_COV,")
+
+#undef INSTR_PROF_SECT_ENTRY
+#endif
+
+
+#ifdef INSTR_PROF_VALUE_PROF_DATA
+#define INSTR_PROF_DATA_DEFINED
+
+#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
+/*!
+ * This is the header of the data structure that defines the on-disk
+ * layout of the value profile data of a particular kind for one function.
+ */
+typedef struct ValueProfRecord {
+  /* The kind of the value profile record. */
+  uint32_t Kind;
+  /*
+   * The number of value profile sites. It is guaranteed to be non-zero;
+   * otherwise the record for this kind won't be emitted.
+   */
+  uint32_t NumValueSites;
+  /*
+   * The first element of the array that stores the number of profiled
+   * values for each value site. The size of the array is NumValueSites.
+   * Since NumValueSites is greater than zero, there is at least one
+   * element in the array.
+   */
+  uint8_t SiteCountArray[1];
+
+  /*
+   * The fake declaration is for documentation purpose only.
+   * Align the start of next field to be on 8 byte boundaries.
+  uint8_t Padding[X];
+   */
+
+  /* The array of value profile data. The size of the array is the sum
+   * of all elements in SiteCountArray[].
+  InstrProfValueData ValueData[];
+   */
+
+#ifdef __cplusplus
+  /*!
+   * \brief Return the number of value sites.
+   */
+  uint32_t getNumValueSites() const { return NumValueSites; }
+  /*!
+   * \brief Read data from this record and save it to Record.
+   */
+  void deserializeTo(InstrProfRecord &Record,
+                     InstrProfSymtab *SymTab);
+  /*
+   * In-place byte swap:
+   * Do byte swap for this instance. \c Old is the original order before
+   * the swap, and \c New is the New byte order.
+   */
+  void swapBytes(support::endianness Old, support::endianness New);
+#endif
+} ValueProfRecord;
+
+/*!
+ * Per-function header/control data structure for value profiling
+ * data in indexed format.
+ */
+typedef struct ValueProfData {
+  /*
+   * Total size in bytes including this field. It must be a multiple
+   * of sizeof(uint64_t).
+   */
+  uint32_t TotalSize;
+  /*
+   *The number of value profile kinds that has value profile data.
+   * In this implementation, a value profile kind is considered to
+   * have profile data if the number of value profile sites for the
+   * kind is not zero. More aggressively, the implementation can
+   * choose to check the actual data value: if none of the value sites
+   * has any profiled values, the kind can be skipped.
+   */
+  uint32_t NumValueKinds;
+
+  /*
+   * Following are a sequence of variable length records. The prefix/header
+   * of each record is defined by ValueProfRecord type. The number of
+   * records is NumValueKinds.
+   * ValueProfRecord Record_1;
+   * ValueProfRecord Record_N;
+   */
+
+#if __cplusplus
+  /*!
+   * Return the total size in bytes of the on-disk value profile data
+   * given the data stored in Record.
+   */
+  static uint32_t getSize(const InstrProfRecord &Record);
+  /*!
+   * Return a pointer to \c ValueProfData instance ready to be streamed.
+   */
+  static std::unique_ptr<ValueProfData>
+  serializeFrom(const InstrProfRecord &Record);
+  /*!
+   * Check the integrity of the record.
+   */
+  Error checkIntegrity();
+  /*!
+   * Return a pointer to \c ValueProfileData instance ready to be read.
+   * All data in the instance are properly byte swapped. The input
+   * data is assumed to be in little endian order.
+   */
+  static Expected<std::unique_ptr<ValueProfData>>
+  getValueProfData(const unsigned char *SrcBuffer,
+                   const unsigned char *const SrcBufferEnd,
+                   support::endianness SrcDataEndianness);
+  /*!
+   * Swap byte order from \c Endianness order to host byte order.
+   */
+  void swapBytesToHost(support::endianness Endianness);
+  /*!
+   * Swap byte order from host byte order to \c Endianness order.
+   */
+  void swapBytesFromHost(support::endianness Endianness);
+  /*!
+   * Return the total size of \c ValueProfileData.
+   */
+  uint32_t getSize() const { return TotalSize; }
+  /*!
+   * Read data from this data and save it to \c Record.
+   */
+  void deserializeTo(InstrProfRecord &Record,
+                     InstrProfSymtab *SymTab);
+  void operator delete(void *ptr) { ::operator delete(ptr); }
+#endif
+} ValueProfData;
+
+/*
+ * The closure is designed to abstact away two types of value profile data:
+ * - InstrProfRecord which is the primary data structure used to
+ *   represent profile data in host tools (reader, writer, and profile-use)
+ * - value profile runtime data structure suitable to be used by C
+ *   runtime library.
+ *
+ * Both sources of data need to serialize to disk/memory-buffer in common
+ * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
+ * writer to share the same format and code with indexed profile writer.
+ *
+ * For documentation of the member methods below, refer to corresponding methods
+ * in class InstrProfRecord.
+ */
+typedef struct ValueProfRecordClosure {
+  const void *Record;
+  uint32_t (*GetNumValueKinds)(const void *Record);
+  uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
+  uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
+  uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
+
+  /*
+   * After extracting the value profile data from the value profile record,
+   * this method is used to map the in-memory value to on-disk value. If
+   * the method is null, value will be written out untranslated.
+   */
+  uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
+  void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
+                          uint32_t S);
+  ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
+} ValueProfRecordClosure;
+
+INSTR_PROF_VISIBILITY ValueProfRecord *
+getFirstValueProfRecord(ValueProfData *VPD);
+INSTR_PROF_VISIBILITY ValueProfRecord *
+getValueProfRecordNext(ValueProfRecord *VPR);
+INSTR_PROF_VISIBILITY InstrProfValueData *
+getValueProfRecordValueData(ValueProfRecord *VPR);
+INSTR_PROF_VISIBILITY uint32_t
+getValueProfRecordHeaderSize(uint32_t NumValueSites);
+
+#undef INSTR_PROF_VALUE_PROF_DATA
+#endif  /* INSTR_PROF_VALUE_PROF_DATA */
+
+
+#ifdef INSTR_PROF_COMMON_API_IMPL
+#define INSTR_PROF_DATA_DEFINED
+#ifdef __cplusplus
+#define INSTR_PROF_INLINE inline
+#define INSTR_PROF_NULLPTR nullptr
+#else
+#define INSTR_PROF_INLINE
+#define INSTR_PROF_NULLPTR NULL
+#endif
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+/*!
+ * \brief Return the \c ValueProfRecord header size including the
+ * padding bytes.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
+  uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
+                  sizeof(uint8_t) * NumValueSites;
+  /* Round the size to multiple of 8 bytes. */
+  Size = (Size + 7) & ~7;
+  return Size;
+}
+
+/*!
+ * \brief Return the total size of the value profile record including the
+ * header and the value data.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+uint32_t getValueProfRecordSize(uint32_t NumValueSites,
+                                uint32_t NumValueData) {
+  return getValueProfRecordHeaderSize(NumValueSites) +
+         sizeof(InstrProfValueData) * NumValueData;
+}
+
+/*!
+ * \brief Return the pointer to the start of value data array.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
+  return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
+                                                   This->NumValueSites));
+}
+
+/*!
+ * \brief Return the total number of value data for \c This record.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
+  uint32_t NumValueData = 0;
+  uint32_t I;
+  for (I = 0; I < This->NumValueSites; I++)
+    NumValueData += This->SiteCountArray[I];
+  return NumValueData;
+}
+
+/*!
+ * \brief Use this method to advance to the next \c This \c ValueProfRecord.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
+  uint32_t NumValueData = getValueProfRecordNumValueData(This);
+  return (ValueProfRecord *)((char *)This +
+                             getValueProfRecordSize(This->NumValueSites,
+                                                    NumValueData));
+}
+
+/*!
+ * \brief Return the first \c ValueProfRecord instance.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
+  return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
+}
+
+/* Closure based interfaces.  */
+
+/*!
+ * Return the total size in bytes of the on-disk value profile data
+ * given the data stored in Record.
+ */
+INSTR_PROF_VISIBILITY uint32_t
+getValueProfDataSize(ValueProfRecordClosure *Closure) {
+  uint32_t Kind;
+  uint32_t TotalSize = sizeof(ValueProfData);
+  const void *Record = Closure->Record;
+
+  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
+    uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
+    if (!NumValueSites)
+      continue;
+    TotalSize += getValueProfRecordSize(NumValueSites,
+                                        Closure->GetNumValueData(Record, Kind));
+  }
+  return TotalSize;
+}
+
+/*!
+ * Extract value profile data of a function for the profile kind \c ValueKind
+ * from the \c Closure and serialize the data into \c This record instance.
+ */
+INSTR_PROF_VISIBILITY void
+serializeValueProfRecordFrom(ValueProfRecord *This,
+                             ValueProfRecordClosure *Closure,
+                             uint32_t ValueKind, uint32_t NumValueSites) {
+  uint32_t S;
+  const void *Record = Closure->Record;
+  This->Kind = ValueKind;
+  This->NumValueSites = NumValueSites;
+  InstrProfValueData *DstVD = getValueProfRecordValueData(This);
+
+  for (S = 0; S < NumValueSites; S++) {
+    uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
+    This->SiteCountArray[S] = ND;
+    Closure->GetValueForSite(Record, DstVD, ValueKind, S);
+    DstVD += ND;
+  }
+}
+
+/*!
+ * Extract value profile data of a function  from the \c Closure
+ * and serialize the data into \c DstData if it is not NULL or heap
+ * memory allocated by the \c Closure's allocator method. If \c
+ * DstData is not null, the caller is expected to set the TotalSize
+ * in DstData.
+ */
+INSTR_PROF_VISIBILITY ValueProfData *
+serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
+                           ValueProfData *DstData) {
+  uint32_t Kind;
+  uint32_t TotalSize =
+      DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
+
+  ValueProfData *VPD =
+      DstData ? DstData : Closure->AllocValueProfData(TotalSize);
+
+  VPD->TotalSize = TotalSize;
+  VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
+  ValueProfRecord *VR = getFirstValueProfRecord(VPD);
+  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
+    uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
+    if (!NumValueSites)
+      continue;
+    serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
+    VR = getValueProfRecordNext(VR);
+  }
+  return VPD;
+}
+
+#undef INSTR_PROF_COMMON_API_IMPL
+#endif /* INSTR_PROF_COMMON_API_IMPL */
+
+/*============================================================================*/
+
+#ifndef INSTR_PROF_DATA_DEFINED
+
+#ifndef INSTR_PROF_DATA_INC
+#define INSTR_PROF_DATA_INC
+
+/* Helper macros.  */
+#define INSTR_PROF_SIMPLE_QUOTE(x) #x
+#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
+#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
+#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
+
+/* Magic number to detect file format and endianness.
+ * Use 255 at one end, since no UTF-8 file can use that character.  Avoid 0,
+ * so that utilities, like strings, don't grab it as a string.  129 is also
+ * invalid UTF-8, and high enough to be interesting.
+ * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
+ * for 32-bit platforms.
+ */
+#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
+       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
+        (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
+#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
+       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
+        (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
+
+/* Raw profile format version (start from 1). */
+#define INSTR_PROF_RAW_VERSION 4
+/* Indexed profile format version (start from 1). */
+#define INSTR_PROF_INDEX_VERSION 5
+/* Coverage mapping format vresion (start from 0). */
+#define INSTR_PROF_COVMAP_VERSION 2
+
+/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
+ * version for other variants of profile. We set the lowest bit of the upper 8
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * generated profile, and 0 if this is a Clang FE generated profile.
+ */
+#define VARIANT_MASKS_ALL 0xff00000000000000ULL
+#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
+#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
+#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
+
+/* The variable that holds the name of the profile data
+ * specified via command line. */
+#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
+
+/* section name strings common to all targets other
+   than WIN32 */
+#define INSTR_PROF_DATA_COMMON __llvm_prf_data
+#define INSTR_PROF_NAME_COMMON __llvm_prf_names
+#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
+#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
+#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
+#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
+/* Win32 */
+#define INSTR_PROF_DATA_COFF .lprfd
+#define INSTR_PROF_NAME_COFF .lprfn
+#define INSTR_PROF_CNTS_COFF .lprfc
+#define INSTR_PROF_VALS_COFF .lprfv
+#define INSTR_PROF_VNODES_COFF .lprfnd
+#define INSTR_PROF_COVMAP_COFF .lcovmap
+
+#ifdef _WIN32
+/* Runtime section names and name strings.  */
+#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
+#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
+#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
+/* Array of pointers. Each pointer points to a list
+ * of value nodes associated with one value site.
+ */
+#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
+/* Value profile nodes section. */
+#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
+#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
+#else
+/* Runtime section names and name strings.  */
+#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COMMON
+#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COMMON
+#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COMMON
+/* Array of pointers. Each pointer points to a list
+ * of value nodes associated with one value site.
+ */
+#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COMMON
+/* Value profile nodes section. */
+#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COMMON
+#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COMMON
+#endif
+
+#define INSTR_PROF_DATA_SECT_NAME_STR                                          \
+  INSTR_PROF_QUOTE(INSTR_PROF_DATA_SECT_NAME)
+#define INSTR_PROF_NAME_SECT_NAME_STR                                          \
+  INSTR_PROF_QUOTE(INSTR_PROF_NAME_SECT_NAME)
+#define INSTR_PROF_CNTS_SECT_NAME_STR                                          \
+  INSTR_PROF_QUOTE(INSTR_PROF_CNTS_SECT_NAME)
+#define INSTR_PROF_COVMAP_SECT_NAME_STR                                        \
+  INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_SECT_NAME)
+#define INSTR_PROF_VALS_SECT_NAME_STR                                          \
+  INSTR_PROF_QUOTE(INSTR_PROF_VALS_SECT_NAME)
+#define INSTR_PROF_VNODES_SECT_NAME_STR                                        \
+  INSTR_PROF_QUOTE(INSTR_PROF_VNODES_SECT_NAME)
+
+/* Macros to define start/stop section symbol for a given
+ * section on Linux. For instance
+ * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
+ * expand to __start___llvm_prof_data
+ */
+#define INSTR_PROF_SECT_START(Sect) \
+        INSTR_PROF_CONCAT(__start_,Sect)
+#define INSTR_PROF_SECT_STOP(Sect) \
+        INSTR_PROF_CONCAT(__stop_,Sect)
+
+/* Value Profiling API linkage name.  */
+#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
+#define INSTR_PROF_VALUE_PROF_FUNC_STR \
+        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
+#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
+#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
+        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
+
+/* InstrProfile per-function control data alignment.  */
+#define INSTR_PROF_DATA_ALIGNMENT 8
+
+/* The data structure that represents a tracked value by the
+ * value profiler.
+ */
+typedef struct InstrProfValueData {
+  /* Profiled value. */
+  uint64_t Value;
+  /* Number of times the value appears in the training run. */
+  uint64_t Count;
+} InstrProfValueData;
+
+#endif /* INSTR_PROF_DATA_INC */
+
+#else
+#undef INSTR_PROF_DATA_DEFINED
+#endif
diff --git a/linux-x64/clang/include/llvm/ProfileData/InstrProfReader.h b/linux-x64/clang/include/llvm/ProfileData/InstrProfReader.h
new file mode 100644
index 0000000..efc22dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/InstrProfReader.h
@@ -0,0 +1,456 @@
+//===- InstrProfReader.h - Instrumented profiling readers -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for reading profiling data for instrumentation
+// based PGO and coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROFREADER_H
+#define LLVM_PROFILEDATA_INSTRPROFREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class InstrProfReader;
+
+/// A file format agnostic iterator over profiling data.
+class InstrProfIterator : public std::iterator<std::input_iterator_tag,
+                                               NamedInstrProfRecord> {
+  InstrProfReader *Reader = nullptr;
+  value_type Record;
+
+  void Increment();
+
+public:
+  InstrProfIterator() = default;
+  InstrProfIterator(InstrProfReader *Reader) : Reader(Reader) { Increment(); }
+
+  InstrProfIterator &operator++() { Increment(); return *this; }
+  bool operator==(const InstrProfIterator &RHS) { return Reader == RHS.Reader; }
+  bool operator!=(const InstrProfIterator &RHS) { return Reader != RHS.Reader; }
+  value_type &operator*() { return Record; }
+  value_type *operator->() { return &Record; }
+};
+
+/// Base class and interface for reading profiling data of any known instrprof
+/// format. Provides an iterator over NamedInstrProfRecords.
+class InstrProfReader {
+  instrprof_error LastError = instrprof_error::success;
+
+public:
+  InstrProfReader() = default;
+  virtual ~InstrProfReader() = default;
+
+  /// Read the header.  Required before reading first record.
+  virtual Error readHeader() = 0;
+
+  /// Read a single record.
+  virtual Error readNextRecord(NamedInstrProfRecord &Record) = 0;
+
+  /// Iterator over profile data.
+  InstrProfIterator begin() { return InstrProfIterator(this); }
+  InstrProfIterator end() { return InstrProfIterator(); }
+
+  virtual bool isIRLevelProfile() const = 0;
+
+  /// Return the PGO symtab. There are three different readers:
+  /// Raw, Text, and Indexed profile readers. The first two types
+  /// of readers are used only by llvm-profdata tool, while the indexed
+  /// profile reader is also used by llvm-cov tool and the compiler (
+  /// backend or frontend). Since creating PGO symtab can create
+  /// significant runtime and memory overhead (as it touches data
+  /// for the whole program), InstrProfSymtab for the indexed profile
+  /// reader should be created on demand and it is recommended to be
+  /// only used for dumping purpose with llvm-proftool, not with the
+  /// compiler.
+  virtual InstrProfSymtab &getSymtab() = 0;
+
+protected:
+  std::unique_ptr<InstrProfSymtab> Symtab;
+
+  /// Set the current error and return same.
+  Error error(instrprof_error Err) {
+    LastError = Err;
+    if (Err == instrprof_error::success)
+      return Error::success();
+    return make_error<InstrProfError>(Err);
+  }
+
+  Error error(Error &&E) { return error(InstrProfError::take(std::move(E))); }
+
+  /// Clear the current error and return a successful one.
+  Error success() { return error(instrprof_error::success); }
+
+public:
+  /// Return true if the reader has finished reading the profile data.
+  bool isEOF() { return LastError == instrprof_error::eof; }
+
+  /// Return true if the reader encountered an error reading profiling data.
+  bool hasError() { return LastError != instrprof_error::success && !isEOF(); }
+
+  /// Get the current error.
+  Error getError() {
+    if (hasError())
+      return make_error<InstrProfError>(LastError);
+    return Error::success();
+  }
+
+  /// Factory method to create an appropriately typed reader for the given
+  /// instrprof file.
+  static Expected<std::unique_ptr<InstrProfReader>> create(const Twine &Path);
+
+  static Expected<std::unique_ptr<InstrProfReader>>
+  create(std::unique_ptr<MemoryBuffer> Buffer);
+};
+
+/// Reader for the simple text based instrprof format.
+///
+/// This format is a simple text format that's suitable for test data. Records
+/// are separated by one or more blank lines, and record fields are separated by
+/// new lines.
+///
+/// Each record consists of a function name, a function hash, a number of
+/// counters, and then each counter value, in that order.
+class TextInstrProfReader : public InstrProfReader {
+private:
+  /// The profile data file contents.
+  std::unique_ptr<MemoryBuffer> DataBuffer;
+  /// Iterator over the profile data.
+  line_iterator Line;
+  bool IsIRLevelProfile = false;
+
+  Error readValueProfileData(InstrProfRecord &Record);
+
+public:
+  TextInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer_)
+      : DataBuffer(std::move(DataBuffer_)), Line(*DataBuffer, true, '#') {}
+  TextInstrProfReader(const TextInstrProfReader &) = delete;
+  TextInstrProfReader &operator=(const TextInstrProfReader &) = delete;
+
+  /// Return true if the given buffer is in text instrprof format.
+  static bool hasFormat(const MemoryBuffer &Buffer);
+
+  bool isIRLevelProfile() const override { return IsIRLevelProfile; }
+
+  /// Read the header.
+  Error readHeader() override;
+
+  /// Read a single record.
+  Error readNextRecord(NamedInstrProfRecord &Record) override;
+
+  InstrProfSymtab &getSymtab() override {
+    assert(Symtab.get());
+    return *Symtab.get();
+  }
+};
+
+/// Reader for the raw instrprof binary format from runtime.
+///
+/// This format is a raw memory dump of the instrumentation-baed profiling data
+/// from the runtime.  It has no index.
+///
+/// Templated on the unsigned type whose size matches pointers on the platform
+/// that wrote the profile.
+template <class IntPtrT>
+class RawInstrProfReader : public InstrProfReader {
+private:
+  /// The profile data file contents.
+  std::unique_ptr<MemoryBuffer> DataBuffer;
+  bool ShouldSwapBytes;
+  // The value of the version field of the raw profile data header. The lower 56
+  // bits specifies the format version and the most significant 8 bits specify
+  // the variant types of the profile.
+  uint64_t Version;
+  uint64_t CountersDelta;
+  uint64_t NamesDelta;
+  const RawInstrProf::ProfileData<IntPtrT> *Data;
+  const RawInstrProf::ProfileData<IntPtrT> *DataEnd;
+  const uint64_t *CountersStart;
+  const char *NamesStart;
+  uint64_t NamesSize;
+  // After value profile is all read, this pointer points to
+  // the header of next profile data (if exists)
+  const uint8_t *ValueDataStart;
+  uint32_t ValueKindLast;
+  uint32_t CurValueDataSize;
+
+public:
+  RawInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
+      : DataBuffer(std::move(DataBuffer)) {}
+  RawInstrProfReader(const RawInstrProfReader &) = delete;
+  RawInstrProfReader &operator=(const RawInstrProfReader &) = delete;
+
+  static bool hasFormat(const MemoryBuffer &DataBuffer);
+  Error readHeader() override;
+  Error readNextRecord(NamedInstrProfRecord &Record) override;
+
+  bool isIRLevelProfile() const override {
+    return (Version & VARIANT_MASK_IR_PROF) != 0;
+  }
+
+  InstrProfSymtab &getSymtab() override {
+    assert(Symtab.get());
+    return *Symtab.get();
+  }
+
+private:
+  Error createSymtab(InstrProfSymtab &Symtab);
+  Error readNextHeader(const char *CurrentPos);
+  Error readHeader(const RawInstrProf::Header &Header);
+
+  template <class IntT> IntT swap(IntT Int) const {
+    return ShouldSwapBytes ? sys::getSwappedBytes(Int) : Int;
+  }
+
+  support::endianness getDataEndianness() const {
+    support::endianness HostEndian = getHostEndianness();
+    if (!ShouldSwapBytes)
+      return HostEndian;
+    if (HostEndian == support::little)
+      return support::big;
+    else
+      return support::little;
+  }
+
+  inline uint8_t getNumPaddingBytes(uint64_t SizeInBytes) {
+    return 7 & (sizeof(uint64_t) - SizeInBytes % sizeof(uint64_t));
+  }
+
+  Error readName(NamedInstrProfRecord &Record);
+  Error readFuncHash(NamedInstrProfRecord &Record);
+  Error readRawCounts(InstrProfRecord &Record);
+  Error readValueProfilingData(InstrProfRecord &Record);
+  bool atEnd() const { return Data == DataEnd; }
+
+  void advanceData() {
+    Data++;
+    ValueDataStart += CurValueDataSize;
+  }
+
+  const char *getNextHeaderPos() const {
+      assert(atEnd());
+      return (const char *)ValueDataStart;
+  }
+
+  const uint64_t *getCounter(IntPtrT CounterPtr) const {
+    ptrdiff_t Offset = (swap(CounterPtr) - CountersDelta) / sizeof(uint64_t);
+    return CountersStart + Offset;
+  }
+
+  StringRef getName(uint64_t NameRef) const {
+    return Symtab->getFuncName(swap(NameRef));
+  }
+};
+
+using RawInstrProfReader32 = RawInstrProfReader<uint32_t>;
+using RawInstrProfReader64 = RawInstrProfReader<uint64_t>;
+
+namespace IndexedInstrProf {
+
+enum class HashT : uint32_t;
+
+} // end namespace IndexedInstrProf
+
+/// Trait for lookups into the on-disk hash table for the binary instrprof
+/// format.
+class InstrProfLookupTrait {
+  std::vector<NamedInstrProfRecord> DataBuffer;
+  IndexedInstrProf::HashT HashType;
+  unsigned FormatVersion;
+  // Endianness of the input value profile data.
+  // It should be LE by default, but can be changed
+  // for testing purpose.
+  support::endianness ValueProfDataEndianness = support::little;
+
+public:
+  InstrProfLookupTrait(IndexedInstrProf::HashT HashType, unsigned FormatVersion)
+      : HashType(HashType), FormatVersion(FormatVersion) {}
+
+  using data_type = ArrayRef<NamedInstrProfRecord>;
+
+  using internal_key_type = StringRef;
+  using external_key_type = StringRef;
+  using hash_value_type = uint64_t;
+  using offset_type = uint64_t;
+
+  static bool EqualKey(StringRef A, StringRef B) { return A == B; }
+  static StringRef GetInternalKey(StringRef K) { return K; }
+  static StringRef GetExternalKey(StringRef K) { return K; }
+
+  hash_value_type ComputeHash(StringRef K);
+
+  static std::pair<offset_type, offset_type>
+  ReadKeyDataLength(const unsigned char *&D) {
+    using namespace support;
+
+    offset_type KeyLen = endian::readNext<offset_type, little, unaligned>(D);
+    offset_type DataLen = endian::readNext<offset_type, little, unaligned>(D);
+    return std::make_pair(KeyLen, DataLen);
+  }
+
+  StringRef ReadKey(const unsigned char *D, offset_type N) {
+    return StringRef((const char *)D, N);
+  }
+
+  bool readValueProfilingData(const unsigned char *&D,
+                              const unsigned char *const End);
+  data_type ReadData(StringRef K, const unsigned char *D, offset_type N);
+
+  // Used for testing purpose only.
+  void setValueProfDataEndianness(support::endianness Endianness) {
+    ValueProfDataEndianness = Endianness;
+  }
+};
+
+struct InstrProfReaderIndexBase {
+  virtual ~InstrProfReaderIndexBase() = default;
+
+  // Read all the profile records with the same key pointed to the current
+  // iterator.
+  virtual Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) = 0;
+
+  // Read all the profile records with the key equal to FuncName
+  virtual Error getRecords(StringRef FuncName,
+                                     ArrayRef<NamedInstrProfRecord> &Data) = 0;
+  virtual void advanceToNextKey() = 0;
+  virtual bool atEnd() const = 0;
+  virtual void setValueProfDataEndianness(support::endianness Endianness) = 0;
+  virtual uint64_t getVersion() const = 0;
+  virtual bool isIRLevelProfile() const = 0;
+  virtual Error populateSymtab(InstrProfSymtab &) = 0;
+};
+
+using OnDiskHashTableImplV3 =
+    OnDiskIterableChainedHashTable<InstrProfLookupTrait>;
+
+template <typename HashTableImpl>
+class InstrProfReaderIndex : public InstrProfReaderIndexBase {
+private:
+  std::unique_ptr<HashTableImpl> HashTable;
+  typename HashTableImpl::data_iterator RecordIterator;
+  uint64_t FormatVersion;
+
+public:
+  InstrProfReaderIndex(const unsigned char *Buckets,
+                       const unsigned char *const Payload,
+                       const unsigned char *const Base,
+                       IndexedInstrProf::HashT HashType, uint64_t Version);
+  ~InstrProfReaderIndex() override = default;
+
+  Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) override;
+  Error getRecords(StringRef FuncName,
+                   ArrayRef<NamedInstrProfRecord> &Data) override;
+  void advanceToNextKey() override { RecordIterator++; }
+
+  bool atEnd() const override {
+    return RecordIterator == HashTable->data_end();
+  }
+
+  void setValueProfDataEndianness(support::endianness Endianness) override {
+    HashTable->getInfoObj().setValueProfDataEndianness(Endianness);
+  }
+
+  uint64_t getVersion() const override { return GET_VERSION(FormatVersion); }
+
+  bool isIRLevelProfile() const override {
+    return (FormatVersion & VARIANT_MASK_IR_PROF) != 0;
+  }
+
+  Error populateSymtab(InstrProfSymtab &Symtab) override {
+    return Symtab.create(HashTable->keys());
+  }
+};
+
+/// Reader for the indexed binary instrprof format.
+class IndexedInstrProfReader : public InstrProfReader {
+private:
+  /// The profile data file contents.
+  std::unique_ptr<MemoryBuffer> DataBuffer;
+  /// The index into the profile data.
+  std::unique_ptr<InstrProfReaderIndexBase> Index;
+  /// Profile summary data.
+  std::unique_ptr<ProfileSummary> Summary;
+  // Index to the current record in the record array.
+  unsigned RecordIndex;
+
+  // Read the profile summary. Return a pointer pointing to one byte past the
+  // end of the summary data if it exists or the input \c Cur.
+  const unsigned char *readSummary(IndexedInstrProf::ProfVersion Version,
+                                   const unsigned char *Cur);
+
+public:
+  IndexedInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
+      : DataBuffer(std::move(DataBuffer)), RecordIndex(0) {}
+  IndexedInstrProfReader(const IndexedInstrProfReader &) = delete;
+  IndexedInstrProfReader &operator=(const IndexedInstrProfReader &) = delete;
+
+  /// Return the profile version.
+  uint64_t getVersion() const { return Index->getVersion(); }
+  bool isIRLevelProfile() const override { return Index->isIRLevelProfile(); }
+
+  /// Return true if the given buffer is in an indexed instrprof format.
+  static bool hasFormat(const MemoryBuffer &DataBuffer);
+
+  /// Read the file header.
+  Error readHeader() override;
+  /// Read a single record.
+  Error readNextRecord(NamedInstrProfRecord &Record) override;
+
+  /// Return the NamedInstrProfRecord associated with FuncName and FuncHash
+  Expected<InstrProfRecord> getInstrProfRecord(StringRef FuncName,
+                                               uint64_t FuncHash);
+
+  /// Fill Counts with the profile data for the given function name.
+  Error getFunctionCounts(StringRef FuncName, uint64_t FuncHash,
+                          std::vector<uint64_t> &Counts);
+
+  /// Return the maximum of all known function counts.
+  uint64_t getMaximumFunctionCount() { return Summary->getMaxFunctionCount(); }
+
+  /// Factory method to create an indexed reader.
+  static Expected<std::unique_ptr<IndexedInstrProfReader>>
+  create(const Twine &Path);
+
+  static Expected<std::unique_ptr<IndexedInstrProfReader>>
+  create(std::unique_ptr<MemoryBuffer> Buffer);
+
+  // Used for testing purpose only.
+  void setValueProfDataEndianness(support::endianness Endianness) {
+    Index->setValueProfDataEndianness(Endianness);
+  }
+
+  // See description in the base class. This interface is designed
+  // to be used by llvm-profdata (for dumping). Avoid using this when
+  // the client is the compiler.
+  InstrProfSymtab &getSymtab() override;
+  ProfileSummary &getSummary() { return *(Summary.get()); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_INSTRPROFREADER_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/InstrProfWriter.h b/linux-x64/clang/include/llvm/ProfileData/InstrProfWriter.h
new file mode 100644
index 0000000..8107ab3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/InstrProfWriter.h
@@ -0,0 +1,102 @@
+//===- InstrProfWriter.h - Instrumented profiling writer --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing profiling data for instrumentation
+// based PGO and coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROFWRITER_H
+#define LLVM_PROFILEDATA_INSTRPROFWRITER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+/// Writer for instrumentation based profile data.
+class InstrProfRecordWriterTrait;
+class ProfOStream;
+class raw_fd_ostream;
+
+class InstrProfWriter {
+public:
+  using ProfilingData = SmallDenseMap<uint64_t, InstrProfRecord>;
+  enum ProfKind { PF_Unknown = 0, PF_FE, PF_IRLevel };
+
+private:
+  bool Sparse;
+  StringMap<ProfilingData> FunctionData;
+  ProfKind ProfileKind = PF_Unknown;
+  // Use raw pointer here for the incomplete type object.
+  InstrProfRecordWriterTrait *InfoObj;
+
+public:
+  InstrProfWriter(bool Sparse = false);
+  ~InstrProfWriter();
+
+  /// Add function counts for the given function. If there are already counts
+  /// for this function and the hash and number of counts match, each counter is
+  /// summed. Optionally scale counts by \p Weight.
+  void addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
+                 function_ref<void(Error)> Warn);
+  void addRecord(NamedInstrProfRecord &&I, function_ref<void(Error)> Warn) {
+    addRecord(std::move(I), 1, Warn);
+  }
+
+  /// Merge existing function counts from the given writer.
+  void mergeRecordsFromWriter(InstrProfWriter &&IPW,
+                              function_ref<void(Error)> Warn);
+
+  /// Write the profile to \c OS
+  void write(raw_fd_ostream &OS);
+
+  /// Write the profile in text format to \c OS
+  Error writeText(raw_fd_ostream &OS);
+
+  /// Write \c Record in text format to \c OS
+  static void writeRecordInText(StringRef Name, uint64_t Hash,
+                                const InstrProfRecord &Counters,
+                                InstrProfSymtab &Symtab, raw_fd_ostream &OS);
+
+  /// Write the profile, returning the raw data. For testing.
+  std::unique_ptr<MemoryBuffer> writeBuffer();
+
+  /// Set the ProfileKind. Report error if mixing FE and IR level profiles.
+  Error setIsIRLevelProfile(bool IsIRLevel) {
+    if (ProfileKind == PF_Unknown) {
+      ProfileKind = IsIRLevel ? PF_IRLevel: PF_FE;
+      return Error::success();
+    }
+    return (IsIRLevel == (ProfileKind == PF_IRLevel))
+               ? Error::success()
+               : make_error<InstrProfError>(
+                     instrprof_error::unsupported_version);
+  }
+
+  // Internal interface for testing purpose only.
+  void setValueProfDataEndianness(support::endianness Endianness);
+  void setOutputSparse(bool Sparse);
+
+private:
+  void addRecord(StringRef Name, uint64_t Hash, InstrProfRecord &&I,
+                 uint64_t Weight, function_ref<void(Error)> Warn);
+  bool shouldEncodeData(const ProfilingData &PD);
+  void writeImpl(ProfOStream &OS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_INSTRPROFWRITER_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/ProfileCommon.h b/linux-x64/clang/include/llvm/ProfileData/ProfileCommon.h
new file mode 100644
index 0000000..51b065b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/ProfileCommon.h
@@ -0,0 +1,102 @@
+//===- ProfileCommon.h - Common profiling APIs. -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains data structures and functions common to both instrumented
+// and sample profiling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_PROFILECOMMON_H
+#define LLVM_PROFILEDATA_PROFILECOMMON_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+namespace sampleprof {
+
+class FunctionSamples;
+
+} // end namespace sampleprof
+
+inline const char *getHotSectionPrefix() { return ".hot"; }
+inline const char *getUnlikelySectionPrefix() { return ".unlikely"; }
+
+class ProfileSummaryBuilder {
+private:
+  /// We keep track of the number of times a count (block count or samples)
+  /// appears in the profile. The map is kept sorted in the descending order of
+  /// counts.
+  std::map<uint64_t, uint32_t, std::greater<uint64_t>> CountFrequencies;
+  std::vector<uint32_t> DetailedSummaryCutoffs;
+
+protected:
+  SummaryEntryVector DetailedSummary;
+  uint64_t TotalCount = 0;
+  uint64_t MaxCount = 0;
+  uint64_t MaxFunctionCount = 0;
+  uint32_t NumCounts = 0;
+  uint32_t NumFunctions = 0;
+
+  ProfileSummaryBuilder(std::vector<uint32_t> Cutoffs)
+      : DetailedSummaryCutoffs(std::move(Cutoffs)) {}
+  ~ProfileSummaryBuilder() = default;
+
+  inline void addCount(uint64_t Count);
+  void computeDetailedSummary();
+
+public:
+  /// \brief A vector of useful cutoff values for detailed summary.
+  static const ArrayRef<uint32_t> DefaultCutoffs;
+};
+
+class InstrProfSummaryBuilder final : public ProfileSummaryBuilder {
+  uint64_t MaxInternalBlockCount = 0;
+
+  inline void addEntryCount(uint64_t Count);
+  inline void addInternalCount(uint64_t Count);
+
+public:
+  InstrProfSummaryBuilder(std::vector<uint32_t> Cutoffs)
+      : ProfileSummaryBuilder(std::move(Cutoffs)) {}
+
+  void addRecord(const InstrProfRecord &);
+  std::unique_ptr<ProfileSummary> getSummary();
+};
+
+class SampleProfileSummaryBuilder final : public ProfileSummaryBuilder {
+public:
+  SampleProfileSummaryBuilder(std::vector<uint32_t> Cutoffs)
+      : ProfileSummaryBuilder(std::move(Cutoffs)) {}
+
+  void addRecord(const sampleprof::FunctionSamples &FS);
+  std::unique_ptr<ProfileSummary> getSummary();
+};
+
+/// This is called when a count is seen in the profile.
+void ProfileSummaryBuilder::addCount(uint64_t Count) {
+  TotalCount += Count;
+  if (Count > MaxCount)
+    MaxCount = Count;
+  NumCounts++;
+  CountFrequencies[Count]++;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_PROFILECOMMON_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/SampleProf.h b/linux-x64/clang/include/llvm/ProfileData/SampleProf.h
new file mode 100644
index 0000000..d79ef3b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/SampleProf.h
@@ -0,0 +1,471 @@
+//===- SampleProf.h - Sampling profiling format support ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains common definitions used in the reading and writing of
+// sample profile data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_SAMPLEPROF_H
+#define LLVM_PROFILEDATA_SAMPLEPROF_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cstdint>
+#include <map>
+#include <string>
+#include <system_error>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+
+const std::error_category &sampleprof_category();
+
+enum class sampleprof_error {
+  success = 0,
+  bad_magic,
+  unsupported_version,
+  too_large,
+  truncated,
+  malformed,
+  unrecognized_format,
+  unsupported_writing_format,
+  truncated_name_table,
+  not_implemented,
+  counter_overflow
+};
+
+inline std::error_code make_error_code(sampleprof_error E) {
+  return std::error_code(static_cast<int>(E), sampleprof_category());
+}
+
+inline sampleprof_error MergeResult(sampleprof_error &Accumulator,
+                                    sampleprof_error Result) {
+  // Prefer first error encountered as later errors may be secondary effects of
+  // the initial problem.
+  if (Accumulator == sampleprof_error::success &&
+      Result != sampleprof_error::success)
+    Accumulator = Result;
+  return Accumulator;
+}
+
+} // end namespace llvm
+
+namespace std {
+
+template <>
+struct is_error_code_enum<llvm::sampleprof_error> : std::true_type {};
+
+} // end namespace std
+
+namespace llvm {
+namespace sampleprof {
+
+static inline uint64_t SPMagic() {
+  return uint64_t('S') << (64 - 8) | uint64_t('P') << (64 - 16) |
+         uint64_t('R') << (64 - 24) | uint64_t('O') << (64 - 32) |
+         uint64_t('F') << (64 - 40) | uint64_t('4') << (64 - 48) |
+         uint64_t('2') << (64 - 56) | uint64_t(0xff);
+}
+
+static inline uint64_t SPVersion() { return 103; }
+
+/// Represents the relative location of an instruction.
+///
+/// Instruction locations are specified by the line offset from the
+/// beginning of the function (marked by the line where the function
+/// header is) and the discriminator value within that line.
+///
+/// The discriminator value is useful to distinguish instructions
+/// that are on the same line but belong to different basic blocks
+/// (e.g., the two post-increment instructions in "if (p) x++; else y++;").
+struct LineLocation {
+  LineLocation(uint32_t L, uint32_t D) : LineOffset(L), Discriminator(D) {}
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+  bool operator<(const LineLocation &O) const {
+    return LineOffset < O.LineOffset ||
+           (LineOffset == O.LineOffset && Discriminator < O.Discriminator);
+  }
+
+  uint32_t LineOffset;
+  uint32_t Discriminator;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const LineLocation &Loc);
+
+/// Representation of a single sample record.
+///
+/// A sample record is represented by a positive integer value, which
+/// indicates how frequently was the associated line location executed.
+///
+/// Additionally, if the associated location contains a function call,
+/// the record will hold a list of all the possible called targets. For
+/// direct calls, this will be the exact function being invoked. For
+/// indirect calls (function pointers, virtual table dispatch), this
+/// will be a list of one or more functions.
+class SampleRecord {
+public:
+  using CallTargetMap = StringMap<uint64_t>;
+
+  SampleRecord() = default;
+
+  /// Increment the number of samples for this record by \p S.
+  /// Optionally scale sample count \p S by \p Weight.
+  ///
+  /// Sample counts accumulate using saturating arithmetic, to avoid wrapping
+  /// around unsigned integers.
+  sampleprof_error addSamples(uint64_t S, uint64_t Weight = 1) {
+    bool Overflowed;
+    NumSamples = SaturatingMultiplyAdd(S, Weight, NumSamples, &Overflowed);
+    return Overflowed ? sampleprof_error::counter_overflow
+                      : sampleprof_error::success;
+  }
+
+  /// Add called function \p F with samples \p S.
+  /// Optionally scale sample count \p S by \p Weight.
+  ///
+  /// Sample counts accumulate using saturating arithmetic, to avoid wrapping
+  /// around unsigned integers.
+  sampleprof_error addCalledTarget(StringRef F, uint64_t S,
+                                   uint64_t Weight = 1) {
+    uint64_t &TargetSamples = CallTargets[F];
+    bool Overflowed;
+    TargetSamples =
+        SaturatingMultiplyAdd(S, Weight, TargetSamples, &Overflowed);
+    return Overflowed ? sampleprof_error::counter_overflow
+                      : sampleprof_error::success;
+  }
+
+  /// Return true if this sample record contains function calls.
+  bool hasCalls() const { return !CallTargets.empty(); }
+
+  uint64_t getSamples() const { return NumSamples; }
+  const CallTargetMap &getCallTargets() const { return CallTargets; }
+
+  /// Merge the samples in \p Other into this record.
+  /// Optionally scale sample counts by \p Weight.
+  sampleprof_error merge(const SampleRecord &Other, uint64_t Weight = 1) {
+    sampleprof_error Result = addSamples(Other.getSamples(), Weight);
+    for (const auto &I : Other.getCallTargets()) {
+      MergeResult(Result, addCalledTarget(I.first(), I.second, Weight));
+    }
+    return Result;
+  }
+
+  void print(raw_ostream &OS, unsigned Indent) const;
+  void dump() const;
+
+private:
+  uint64_t NumSamples = 0;
+  CallTargetMap CallTargets;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const SampleRecord &Sample);
+
+class FunctionSamples;
+
+using BodySampleMap = std::map<LineLocation, SampleRecord>;
+// NOTE: Using a StringMap here makes parsed profiles consume around 17% more
+// memory, which is *very* significant for large profiles.
+using FunctionSamplesMap = std::map<std::string, FunctionSamples>;
+using CallsiteSampleMap = std::map<LineLocation, FunctionSamplesMap>;
+
+/// Representation of the samples collected for a function.
+///
+/// This data structure contains all the collected samples for the body
+/// of a function. Each sample corresponds to a LineLocation instance
+/// within the body of the function.
+class FunctionSamples {
+public:
+  FunctionSamples() = default;
+
+  void print(raw_ostream &OS = dbgs(), unsigned Indent = 0) const;
+  void dump() const;
+
+  sampleprof_error addTotalSamples(uint64_t Num, uint64_t Weight = 1) {
+    bool Overflowed;
+    TotalSamples =
+        SaturatingMultiplyAdd(Num, Weight, TotalSamples, &Overflowed);
+    return Overflowed ? sampleprof_error::counter_overflow
+                      : sampleprof_error::success;
+  }
+
+  sampleprof_error addHeadSamples(uint64_t Num, uint64_t Weight = 1) {
+    bool Overflowed;
+    TotalHeadSamples =
+        SaturatingMultiplyAdd(Num, Weight, TotalHeadSamples, &Overflowed);
+    return Overflowed ? sampleprof_error::counter_overflow
+                      : sampleprof_error::success;
+  }
+
+  sampleprof_error addBodySamples(uint32_t LineOffset, uint32_t Discriminator,
+                                  uint64_t Num, uint64_t Weight = 1) {
+    return BodySamples[LineLocation(LineOffset, Discriminator)].addSamples(
+        Num, Weight);
+  }
+
+  sampleprof_error addCalledTargetSamples(uint32_t LineOffset,
+                                          uint32_t Discriminator,
+                                          StringRef FName, uint64_t Num,
+                                          uint64_t Weight = 1) {
+    return BodySamples[LineLocation(LineOffset, Discriminator)].addCalledTarget(
+        FName, Num, Weight);
+  }
+
+  /// Return the number of samples collected at the given location.
+  /// Each location is specified by \p LineOffset and \p Discriminator.
+  /// If the location is not found in profile, return error.
+  ErrorOr<uint64_t> findSamplesAt(uint32_t LineOffset,
+                                  uint32_t Discriminator) const {
+    const auto &ret = BodySamples.find(LineLocation(LineOffset, Discriminator));
+    if (ret == BodySamples.end())
+      return std::error_code();
+    else
+      return ret->second.getSamples();
+  }
+
+  /// Returns the call target map collected at a given location.
+  /// Each location is specified by \p LineOffset and \p Discriminator.
+  /// If the location is not found in profile, return error.
+  ErrorOr<SampleRecord::CallTargetMap>
+  findCallTargetMapAt(uint32_t LineOffset, uint32_t Discriminator) const {
+    const auto &ret = BodySamples.find(LineLocation(LineOffset, Discriminator));
+    if (ret == BodySamples.end())
+      return std::error_code();
+    return ret->second.getCallTargets();
+  }
+
+  /// Return the function samples at the given callsite location.
+  FunctionSamplesMap &functionSamplesAt(const LineLocation &Loc) {
+    return CallsiteSamples[Loc];
+  }
+
+  /// Returns the FunctionSamplesMap at the given \p Loc.
+  const FunctionSamplesMap *
+  findFunctionSamplesMapAt(const LineLocation &Loc) const {
+    auto iter = CallsiteSamples.find(Loc);
+    if (iter == CallsiteSamples.end())
+      return nullptr;
+    return &iter->second;
+  }
+
+  /// Returns a pointer to FunctionSamples at the given callsite location \p Loc
+  /// with callee \p CalleeName. If no callsite can be found, relax the
+  /// restriction to return the FunctionSamples at callsite location \p Loc
+  /// with the maximum total sample count.
+  const FunctionSamples *findFunctionSamplesAt(const LineLocation &Loc,
+                                               StringRef CalleeName) const {
+    auto iter = CallsiteSamples.find(Loc);
+    if (iter == CallsiteSamples.end())
+      return nullptr;
+    auto FS = iter->second.find(CalleeName);
+    if (FS != iter->second.end())
+      return &FS->second;
+    // If we cannot find exact match of the callee name, return the FS with
+    // the max total count.
+    uint64_t MaxTotalSamples = 0;
+    const FunctionSamples *R = nullptr;
+    for (const auto &NameFS : iter->second)
+      if (NameFS.second.getTotalSamples() >= MaxTotalSamples) {
+        MaxTotalSamples = NameFS.second.getTotalSamples();
+        R = &NameFS.second;
+      }
+    return R;
+  }
+
+  bool empty() const { return TotalSamples == 0; }
+
+  /// Return the total number of samples collected inside the function.
+  uint64_t getTotalSamples() const { return TotalSamples; }
+
+  /// Return the total number of branch samples that have the function as the
+  /// branch target. This should be equivalent to the sample of the first
+  /// instruction of the symbol. But as we directly get this info for raw
+  /// profile without referring to potentially inaccurate debug info, this
+  /// gives more accurate profile data and is preferred for standalone symbols.
+  uint64_t getHeadSamples() const { return TotalHeadSamples; }
+
+  /// Return the sample count of the first instruction of the function.
+  /// The function can be either a standalone symbol or an inlined function.
+  uint64_t getEntrySamples() const {
+    // Use either BodySamples or CallsiteSamples which ever has the smaller
+    // lineno.
+    if (!BodySamples.empty() &&
+        (CallsiteSamples.empty() ||
+         BodySamples.begin()->first < CallsiteSamples.begin()->first))
+      return BodySamples.begin()->second.getSamples();
+    if (!CallsiteSamples.empty()) {
+      uint64_t T = 0;
+      // An indirect callsite may be promoted to several inlined direct calls.
+      // We need to get the sum of them.
+      for (const auto &N_FS : CallsiteSamples.begin()->second)
+        T += N_FS.second.getEntrySamples();
+      return T;
+    }
+    return 0;
+  }
+
+  /// Return all the samples collected in the body of the function.
+  const BodySampleMap &getBodySamples() const { return BodySamples; }
+
+  /// Return all the callsite samples collected in the body of the function.
+  const CallsiteSampleMap &getCallsiteSamples() const {
+    return CallsiteSamples;
+  }
+
+  /// Merge the samples in \p Other into this one.
+  /// Optionally scale samples by \p Weight.
+  sampleprof_error merge(const FunctionSamples &Other, uint64_t Weight = 1) {
+    sampleprof_error Result = sampleprof_error::success;
+    Name = Other.getName();
+    MergeResult(Result, addTotalSamples(Other.getTotalSamples(), Weight));
+    MergeResult(Result, addHeadSamples(Other.getHeadSamples(), Weight));
+    for (const auto &I : Other.getBodySamples()) {
+      const LineLocation &Loc = I.first;
+      const SampleRecord &Rec = I.second;
+      MergeResult(Result, BodySamples[Loc].merge(Rec, Weight));
+    }
+    for (const auto &I : Other.getCallsiteSamples()) {
+      const LineLocation &Loc = I.first;
+      FunctionSamplesMap &FSMap = functionSamplesAt(Loc);
+      for (const auto &Rec : I.second)
+        MergeResult(Result, FSMap[Rec.first].merge(Rec.second, Weight));
+    }
+    return Result;
+  }
+
+  /// Recursively traverses all children, if the total sample count of the
+  /// corresponding function is no less than \p Threshold, add its corresponding
+  /// GUID to \p S. Also traverse the BodySamples to add hot CallTarget's GUID
+  /// to \p S.
+  void findInlinedFunctions(DenseSet<GlobalValue::GUID> &S, const Module *M,
+                            uint64_t Threshold) const {
+    if (TotalSamples <= Threshold)
+      return;
+    S.insert(Function::getGUID(Name));
+    // Import hot CallTargets, which may not be available in IR because full
+    // profile annotation cannot be done until backend compilation in ThinLTO.
+    for (const auto &BS : BodySamples)
+      for (const auto &TS : BS.second.getCallTargets())
+        if (TS.getValue() > Threshold) {
+          Function *Callee = M->getFunction(TS.getKey());
+          if (!Callee || !Callee->getSubprogram())
+            S.insert(Function::getGUID(TS.getKey()));
+        }
+    for (const auto &CS : CallsiteSamples)
+      for (const auto &NameFS : CS.second)
+        NameFS.second.findInlinedFunctions(S, M, Threshold);
+  }
+
+  /// Set the name of the function.
+  void setName(StringRef FunctionName) { Name = FunctionName; }
+
+  /// Return the function name.
+  const StringRef &getName() const { return Name; }
+
+  /// Returns the line offset to the start line of the subprogram.
+  /// We assume that a single function will not exceed 65535 LOC.
+  static unsigned getOffset(const DILocation *DIL);
+
+  /// \brief Get the FunctionSamples of the inline instance where DIL originates
+  /// from.
+  ///
+  /// The FunctionSamples of the instruction (Machine or IR) associated to
+  /// \p DIL is the inlined instance in which that instruction is coming from.
+  /// We traverse the inline stack of that instruction, and match it with the
+  /// tree nodes in the profile.
+  ///
+  /// \returns the FunctionSamples pointer to the inlined instance.
+  const FunctionSamples *findFunctionSamples(const DILocation *DIL) const;
+
+private:
+  /// Mangled name of the function.
+  StringRef Name;
+
+  /// Total number of samples collected inside this function.
+  ///
+  /// Samples are cumulative, they include all the samples collected
+  /// inside this function and all its inlined callees.
+  uint64_t TotalSamples = 0;
+
+  /// Total number of samples collected at the head of the function.
+  /// This is an approximation of the number of calls made to this function
+  /// at runtime.
+  uint64_t TotalHeadSamples = 0;
+
+  /// Map instruction locations to collected samples.
+  ///
+  /// Each entry in this map contains the number of samples
+  /// collected at the corresponding line offset. All line locations
+  /// are an offset from the start of the function.
+  BodySampleMap BodySamples;
+
+  /// Map call sites to collected samples for the called function.
+  ///
+  /// Each entry in this map corresponds to all the samples
+  /// collected for the inlined function call at the given
+  /// location. For example, given:
+  ///
+  ///     void foo() {
+  ///  1    bar();
+  ///  ...
+  ///  8    baz();
+  ///     }
+  ///
+  /// If the bar() and baz() calls were inlined inside foo(), this
+  /// map will contain two entries.  One for all the samples collected
+  /// in the call to bar() at line offset 1, the other for all the samples
+  /// collected in the call to baz() at line offset 8.
+  CallsiteSampleMap CallsiteSamples;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const FunctionSamples &FS);
+
+/// Sort a LocationT->SampleT map by LocationT.
+///
+/// It produces a sorted list of <LocationT, SampleT> records by ascending
+/// order of LocationT.
+template <class LocationT, class SampleT> class SampleSorter {
+public:
+  using SamplesWithLoc = std::pair<const LocationT, SampleT>;
+  using SamplesWithLocList = SmallVector<const SamplesWithLoc *, 20>;
+
+  SampleSorter(const std::map<LocationT, SampleT> &Samples) {
+    for (const auto &I : Samples)
+      V.push_back(&I);
+    std::stable_sort(V.begin(), V.end(),
+                     [](const SamplesWithLoc *A, const SamplesWithLoc *B) {
+                       return A->first < B->first;
+                     });
+  }
+
+  const SamplesWithLocList &get() const { return V; }
+
+private:
+  SamplesWithLocList V;
+};
+
+} // end namespace sampleprof
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_SAMPLEPROF_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/SampleProfReader.h b/linux-x64/clang/include/llvm/ProfileData/SampleProfReader.h
new file mode 100644
index 0000000..0e9ab2d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/SampleProfReader.h
@@ -0,0 +1,462 @@
+//===- SampleProfReader.h - Read LLVM sample profile data -------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions needed for reading sample profiles.
+//
+// NOTE: If you are making changes to this file format, please remember
+//       to document them in the Clang documentation at
+//       tools/clang/docs/UsersManual.rst.
+//
+// Text format
+// -----------
+//
+// Sample profiles are written as ASCII text. The file is divided into
+// sections, which correspond to each of the functions executed at runtime.
+// Each section has the following format
+//
+//     function1:total_samples:total_head_samples
+//      offset1[.discriminator]: number_of_samples [fn1:num fn2:num ... ]
+//      offset2[.discriminator]: number_of_samples [fn3:num fn4:num ... ]
+//      ...
+//      offsetN[.discriminator]: number_of_samples [fn5:num fn6:num ... ]
+//      offsetA[.discriminator]: fnA:num_of_total_samples
+//       offsetA1[.discriminator]: number_of_samples [fn7:num fn8:num ... ]
+//       ...
+//
+// This is a nested tree in which the identation represents the nesting level
+// of the inline stack. There are no blank lines in the file. And the spacing
+// within a single line is fixed. Additional spaces will result in an error
+// while reading the file.
+//
+// Any line starting with the '#' character is completely ignored.
+//
+// Inlined calls are represented with indentation. The Inline stack is a
+// stack of source locations in which the top of the stack represents the
+// leaf function, and the bottom of the stack represents the actual
+// symbol to which the instruction belongs.
+//
+// Function names must be mangled in order for the profile loader to
+// match them in the current translation unit. The two numbers in the
+// function header specify how many total samples were accumulated in the
+// function (first number), and the total number of samples accumulated
+// in the prologue of the function (second number). This head sample
+// count provides an indicator of how frequently the function is invoked.
+//
+// There are two types of lines in the function body.
+//
+// * Sampled line represents the profile information of a source location.
+// * Callsite line represents the profile information of a callsite.
+//
+// Each sampled line may contain several items. Some are optional (marked
+// below):
+//
+// a. Source line offset. This number represents the line number
+//    in the function where the sample was collected. The line number is
+//    always relative to the line where symbol of the function is
+//    defined. So, if the function has its header at line 280, the offset
+//    13 is at line 293 in the file.
+//
+//    Note that this offset should never be a negative number. This could
+//    happen in cases like macros. The debug machinery will register the
+//    line number at the point of macro expansion. So, if the macro was
+//    expanded in a line before the start of the function, the profile
+//    converter should emit a 0 as the offset (this means that the optimizers
+//    will not be able to associate a meaningful weight to the instructions
+//    in the macro).
+//
+// b. [OPTIONAL] Discriminator. This is used if the sampled program
+//    was compiled with DWARF discriminator support
+//    (http://wiki.dwarfstd.org/index.php?title=Path_Discriminators).
+//    DWARF discriminators are unsigned integer values that allow the
+//    compiler to distinguish between multiple execution paths on the
+//    same source line location.
+//
+//    For example, consider the line of code ``if (cond) foo(); else bar();``.
+//    If the predicate ``cond`` is true 80% of the time, then the edge
+//    into function ``foo`` should be considered to be taken most of the
+//    time. But both calls to ``foo`` and ``bar`` are at the same source
+//    line, so a sample count at that line is not sufficient. The
+//    compiler needs to know which part of that line is taken more
+//    frequently.
+//
+//    This is what discriminators provide. In this case, the calls to
+//    ``foo`` and ``bar`` will be at the same line, but will have
+//    different discriminator values. This allows the compiler to correctly
+//    set edge weights into ``foo`` and ``bar``.
+//
+// c. Number of samples. This is an integer quantity representing the
+//    number of samples collected by the profiler at this source
+//    location.
+//
+// d. [OPTIONAL] Potential call targets and samples. If present, this
+//    line contains a call instruction. This models both direct and
+//    number of samples. For example,
+//
+//      130: 7  foo:3  bar:2  baz:7
+//
+//    The above means that at relative line offset 130 there is a call
+//    instruction that calls one of ``foo()``, ``bar()`` and ``baz()``,
+//    with ``baz()`` being the relatively more frequently called target.
+//
+// Each callsite line may contain several items. Some are optional.
+//
+// a. Source line offset. This number represents the line number of the
+//    callsite that is inlined in the profiled binary.
+//
+// b. [OPTIONAL] Discriminator. Same as the discriminator for sampled line.
+//
+// c. Number of samples. This is an integer quantity representing the
+//    total number of samples collected for the inlined instance at this
+//    callsite
+//
+//
+// Binary format
+// -------------
+//
+// This is a more compact encoding. Numbers are encoded as ULEB128 values
+// and all strings are encoded in a name table. The file is organized in
+// the following sections:
+//
+// MAGIC (uint64_t)
+//    File identifier computed by function SPMagic() (0x5350524f463432ff)
+//
+// VERSION (uint32_t)
+//    File format version number computed by SPVersion()
+//
+// SUMMARY
+//    TOTAL_COUNT (uint64_t)
+//        Total number of samples in the profile.
+//    MAX_COUNT (uint64_t)
+//        Maximum value of samples on a line.
+//    MAX_FUNCTION_COUNT (uint64_t)
+//        Maximum number of samples at function entry (head samples).
+//    NUM_COUNTS (uint64_t)
+//        Number of lines with samples.
+//    NUM_FUNCTIONS (uint64_t)
+//        Number of functions with samples.
+//    NUM_DETAILED_SUMMARY_ENTRIES (size_t)
+//        Number of entries in detailed summary
+//    DETAILED_SUMMARY
+//        A list of detailed summary entry. Each entry consists of
+//        CUTOFF (uint32_t)
+//            Required percentile of total sample count expressed as a fraction
+//            multiplied by 1000000.
+//        MIN_COUNT (uint64_t)
+//            The minimum number of samples required to reach the target
+//            CUTOFF.
+//        NUM_COUNTS (uint64_t)
+//            Number of samples to get to the desrired percentile.
+//
+// NAME TABLE
+//    SIZE (uint32_t)
+//        Number of entries in the name table.
+//    NAMES
+//        A NUL-separated list of SIZE strings.
+//
+// FUNCTION BODY (one for each uninlined function body present in the profile)
+//    HEAD_SAMPLES (uint64_t) [only for top-level functions]
+//        Total number of samples collected at the head (prologue) of the
+//        function.
+//        NOTE: This field should only be present for top-level functions
+//              (i.e., not inlined into any caller). Inlined function calls
+//              have no prologue, so they don't need this.
+//    NAME_IDX (uint32_t)
+//        Index into the name table indicating the function name.
+//    SAMPLES (uint64_t)
+//        Total number of samples collected in this function.
+//    NRECS (uint32_t)
+//        Total number of sampling records this function's profile.
+//    BODY RECORDS
+//        A list of NRECS entries. Each entry contains:
+//          OFFSET (uint32_t)
+//            Line offset from the start of the function.
+//          DISCRIMINATOR (uint32_t)
+//            Discriminator value (see description of discriminators
+//            in the text format documentation above).
+//          SAMPLES (uint64_t)
+//            Number of samples collected at this location.
+//          NUM_CALLS (uint32_t)
+//            Number of non-inlined function calls made at this location. In the
+//            case of direct calls, this number will always be 1. For indirect
+//            calls (virtual functions and function pointers) this will
+//            represent all the actual functions called at runtime.
+//          CALL_TARGETS
+//            A list of NUM_CALLS entries for each called function:
+//               NAME_IDX (uint32_t)
+//                  Index into the name table with the callee name.
+//               SAMPLES (uint64_t)
+//                  Number of samples collected at the call site.
+//    NUM_INLINED_FUNCTIONS (uint32_t)
+//      Number of callees inlined into this function.
+//    INLINED FUNCTION RECORDS
+//      A list of NUM_INLINED_FUNCTIONS entries describing each of the inlined
+//      callees.
+//        OFFSET (uint32_t)
+//          Line offset from the start of the function.
+//        DISCRIMINATOR (uint32_t)
+//          Discriminator value (see description of discriminators
+//          in the text format documentation above).
+//        FUNCTION BODY
+//          A FUNCTION BODY entry describing the inlined function.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_SAMPLEPROFREADER_H
+#define LLVM_PROFILEDATA_SAMPLEPROFREADER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/GCOV.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace sampleprof {
+
+/// \brief Sample-based profile reader.
+///
+/// Each profile contains sample counts for all the functions
+/// executed. Inside each function, statements are annotated with the
+/// collected samples on all the instructions associated with that
+/// statement.
+///
+/// For this to produce meaningful data, the program needs to be
+/// compiled with some debug information (at minimum, line numbers:
+/// -gline-tables-only). Otherwise, it will be impossible to match IR
+/// instructions to the line numbers collected by the profiler.
+///
+/// From the profile file, we are interested in collecting the
+/// following information:
+///
+/// * A list of functions included in the profile (mangled names).
+///
+/// * For each function F:
+///   1. The total number of samples collected in F.
+///
+///   2. The samples collected at each line in F. To provide some
+///      protection against source code shuffling, line numbers should
+///      be relative to the start of the function.
+///
+/// The reader supports two file formats: text and binary. The text format
+/// is useful for debugging and testing, while the binary format is more
+/// compact and I/O efficient. They can both be used interchangeably.
+class SampleProfileReader {
+public:
+  SampleProfileReader(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+      : Profiles(0), Ctx(C), Buffer(std::move(B)) {}
+
+  virtual ~SampleProfileReader() = default;
+
+  /// \brief Read and validate the file header.
+  virtual std::error_code readHeader() = 0;
+
+  /// \brief Read sample profiles from the associated file.
+  virtual std::error_code read() = 0;
+
+  /// \brief Print the profile for \p FName on stream \p OS.
+  void dumpFunctionProfile(StringRef FName, raw_ostream &OS = dbgs());
+
+  /// \brief Print all the profiles on stream \p OS.
+  void dump(raw_ostream &OS = dbgs());
+
+  /// \brief Return the samples collected for function \p F.
+  FunctionSamples *getSamplesFor(const Function &F) {
+    // The function name may have been updated by adding suffix. In sample
+    // profile, the function names are all stripped, so we need to strip
+    // the function name suffix before matching with profile.
+    if (Profiles.count(F.getName().split('.').first))
+      return &Profiles[(F.getName().split('.').first)];
+    return nullptr;
+  }
+
+  /// \brief Return all the profiles.
+  StringMap<FunctionSamples> &getProfiles() { return Profiles; }
+
+  /// \brief Report a parse error message.
+  void reportError(int64_t LineNumber, Twine Msg) const {
+    Ctx.diagnose(DiagnosticInfoSampleProfile(Buffer->getBufferIdentifier(),
+                                             LineNumber, Msg));
+  }
+
+  /// \brief Create a sample profile reader appropriate to the file format.
+  static ErrorOr<std::unique_ptr<SampleProfileReader>>
+  create(const Twine &Filename, LLVMContext &C);
+
+  /// \brief Create a sample profile reader from the supplied memory buffer.
+  static ErrorOr<std::unique_ptr<SampleProfileReader>>
+  create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C);
+
+  /// \brief Return the profile summary.
+  ProfileSummary &getSummary() { return *(Summary.get()); }
+
+protected:
+  /// \brief Map every function to its associated profile.
+  ///
+  /// The profile of every function executed at runtime is collected
+  /// in the structure FunctionSamples. This maps function objects
+  /// to their corresponding profiles.
+  StringMap<FunctionSamples> Profiles;
+
+  /// \brief LLVM context used to emit diagnostics.
+  LLVMContext &Ctx;
+
+  /// \brief Memory buffer holding the profile file.
+  std::unique_ptr<MemoryBuffer> Buffer;
+
+  /// \brief Profile summary information.
+  std::unique_ptr<ProfileSummary> Summary;
+
+  /// \brief Compute summary for this profile.
+  void computeSummary();
+};
+
+class SampleProfileReaderText : public SampleProfileReader {
+public:
+  SampleProfileReaderText(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+      : SampleProfileReader(std::move(B), C) {}
+
+  /// \brief Read and validate the file header.
+  std::error_code readHeader() override { return sampleprof_error::success; }
+
+  /// \brief Read sample profiles from the associated file.
+  std::error_code read() override;
+
+  /// \brief Return true if \p Buffer is in the format supported by this class.
+  static bool hasFormat(const MemoryBuffer &Buffer);
+};
+
+class SampleProfileReaderBinary : public SampleProfileReader {
+public:
+  SampleProfileReaderBinary(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+      : SampleProfileReader(std::move(B), C) {}
+
+  /// \brief Read and validate the file header.
+  std::error_code readHeader() override;
+
+  /// \brief Read sample profiles from the associated file.
+  std::error_code read() override;
+
+  /// \brief Return true if \p Buffer is in the format supported by this class.
+  static bool hasFormat(const MemoryBuffer &Buffer);
+
+protected:
+  /// \brief Read a numeric value of type T from the profile.
+  ///
+  /// If an error occurs during decoding, a diagnostic message is emitted and
+  /// EC is set.
+  ///
+  /// \returns the read value.
+  template <typename T> ErrorOr<T> readNumber();
+
+  /// \brief Read a string from the profile.
+  ///
+  /// If an error occurs during decoding, a diagnostic message is emitted and
+  /// EC is set.
+  ///
+  /// \returns the read value.
+  ErrorOr<StringRef> readString();
+
+  /// Read a string indirectly via the name table.
+  ErrorOr<StringRef> readStringFromTable();
+
+  /// \brief Return true if we've reached the end of file.
+  bool at_eof() const { return Data >= End; }
+
+  /// Read the contents of the given profile instance.
+  std::error_code readProfile(FunctionSamples &FProfile);
+
+  /// \brief Points to the current location in the buffer.
+  const uint8_t *Data = nullptr;
+
+  /// \brief Points to the end of the buffer.
+  const uint8_t *End = nullptr;
+
+  /// Function name table.
+  std::vector<StringRef> NameTable;
+
+private:
+  std::error_code readSummaryEntry(std::vector<ProfileSummaryEntry> &Entries);
+
+  /// \brief Read profile summary.
+  std::error_code readSummary();
+};
+
+using InlineCallStack = SmallVector<FunctionSamples *, 10>;
+
+// Supported histogram types in GCC.  Currently, we only need support for
+// call target histograms.
+enum HistType {
+  HIST_TYPE_INTERVAL,
+  HIST_TYPE_POW2,
+  HIST_TYPE_SINGLE_VALUE,
+  HIST_TYPE_CONST_DELTA,
+  HIST_TYPE_INDIR_CALL,
+  HIST_TYPE_AVERAGE,
+  HIST_TYPE_IOR,
+  HIST_TYPE_INDIR_CALL_TOPN
+};
+
+class SampleProfileReaderGCC : public SampleProfileReader {
+public:
+  SampleProfileReaderGCC(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+      : SampleProfileReader(std::move(B), C), GcovBuffer(Buffer.get()) {}
+
+  /// \brief Read and validate the file header.
+  std::error_code readHeader() override;
+
+  /// \brief Read sample profiles from the associated file.
+  std::error_code read() override;
+
+  /// \brief Return true if \p Buffer is in the format supported by this class.
+  static bool hasFormat(const MemoryBuffer &Buffer);
+
+protected:
+  std::error_code readNameTable();
+  std::error_code readOneFunctionProfile(const InlineCallStack &InlineStack,
+                                         bool Update, uint32_t Offset);
+  std::error_code readFunctionProfiles();
+  std::error_code skipNextWord();
+  template <typename T> ErrorOr<T> readNumber();
+  ErrorOr<StringRef> readString();
+
+  /// \brief Read the section tag and check that it's the same as \p Expected.
+  std::error_code readSectionTag(uint32_t Expected);
+
+  /// GCOV buffer containing the profile.
+  GCOVBuffer GcovBuffer;
+
+  /// Function names in this profile.
+  std::vector<std::string> Names;
+
+  /// GCOV tags used to separate sections in the profile file.
+  static const uint32_t GCOVTagAFDOFileNames = 0xaa000000;
+  static const uint32_t GCOVTagAFDOFunction = 0xac000000;
+};
+
+} // end namespace sampleprof
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_SAMPLEPROFREADER_H
diff --git a/linux-x64/clang/include/llvm/ProfileData/SampleProfWriter.h b/linux-x64/clang/include/llvm/ProfileData/SampleProfWriter.h
new file mode 100644
index 0000000..86af103
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ProfileData/SampleProfWriter.h
@@ -0,0 +1,133 @@
+//===- SampleProfWriter.h - Write LLVM sample profile data ------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions needed for writing sample profiles.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
+#define LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <system_error>
+
+namespace llvm {
+namespace sampleprof {
+
+enum SampleProfileFormat { SPF_None = 0, SPF_Text, SPF_Binary, SPF_GCC };
+
+/// \brief Sample-based profile writer. Base class.
+class SampleProfileWriter {
+public:
+  virtual ~SampleProfileWriter() = default;
+
+  /// Write sample profiles in \p S.
+  ///
+  /// \returns status code of the file update operation.
+  virtual std::error_code write(const FunctionSamples &S) = 0;
+
+  /// Write all the sample profiles in the given map of samples.
+  ///
+  /// \returns status code of the file update operation.
+  std::error_code write(const StringMap<FunctionSamples> &ProfileMap);
+
+  raw_ostream &getOutputStream() { return *OutputStream; }
+
+  /// Profile writer factory.
+  ///
+  /// Create a new file writer based on the value of \p Format.
+  static ErrorOr<std::unique_ptr<SampleProfileWriter>>
+  create(StringRef Filename, SampleProfileFormat Format);
+
+  /// Create a new stream writer based on the value of \p Format.
+  /// For testing.
+  static ErrorOr<std::unique_ptr<SampleProfileWriter>>
+  create(std::unique_ptr<raw_ostream> &OS, SampleProfileFormat Format);
+
+protected:
+  SampleProfileWriter(std::unique_ptr<raw_ostream> &OS)
+      : OutputStream(std::move(OS)) {}
+
+  /// \brief Write a file header for the profile file.
+  virtual std::error_code
+  writeHeader(const StringMap<FunctionSamples> &ProfileMap) = 0;
+
+  /// \brief Output stream where to emit the profile to.
+  std::unique_ptr<raw_ostream> OutputStream;
+
+  /// \brief Profile summary.
+  std::unique_ptr<ProfileSummary> Summary;
+
+  /// \brief Compute summary for this profile.
+  void computeSummary(const StringMap<FunctionSamples> &ProfileMap);
+};
+
+/// \brief Sample-based profile writer (text format).
+class SampleProfileWriterText : public SampleProfileWriter {
+public:
+  std::error_code write(const FunctionSamples &S) override;
+
+protected:
+  SampleProfileWriterText(std::unique_ptr<raw_ostream> &OS)
+      : SampleProfileWriter(OS), Indent(0) {}
+
+  std::error_code
+  writeHeader(const StringMap<FunctionSamples> &ProfileMap) override {
+    return sampleprof_error::success;
+  }
+
+private:
+  /// Indent level to use when writing.
+  ///
+  /// This is used when printing inlined callees.
+  unsigned Indent;
+
+  friend ErrorOr<std::unique_ptr<SampleProfileWriter>>
+  SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS,
+                              SampleProfileFormat Format);
+};
+
+/// \brief Sample-based profile writer (binary format).
+class SampleProfileWriterBinary : public SampleProfileWriter {
+public:
+  std::error_code write(const FunctionSamples &S) override;
+
+protected:
+  SampleProfileWriterBinary(std::unique_ptr<raw_ostream> &OS)
+      : SampleProfileWriter(OS) {}
+
+  std::error_code
+  writeHeader(const StringMap<FunctionSamples> &ProfileMap) override;
+  std::error_code writeSummary();
+  std::error_code writeNameIdx(StringRef FName);
+  std::error_code writeBody(const FunctionSamples &S);
+
+private:
+  void addName(StringRef FName);
+  void addNames(const FunctionSamples &S);
+
+  MapVector<StringRef, uint32_t> NameTable;
+
+  friend ErrorOr<std::unique_ptr<SampleProfileWriter>>
+  SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS,
+                              SampleProfileFormat Format);
+};
+
+} // end namespace sampleprof
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
diff --git a/linux-x64/clang/include/llvm/Support/AArch64TargetParser.def b/linux-x64/clang/include/llvm/Support/AArch64TargetParser.def
new file mode 100644
index 0000000..30c7924
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/AArch64TargetParser.def
@@ -0,0 +1,103 @@
+//===- AARCH64TargetParser.def - AARCH64 target parsing defines ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides defines to build up the AARCH64 target parser's logic.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef AARCH64_ARCH
+#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
+#endif
+AARCH64_ARCH("invalid", INVALID, "", "",
+             ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE)
+AARCH64_ARCH("armv8-a", ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
+             FK_CRYPTO_NEON_FP_ARMV8,
+             (AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD))
+AARCH64_ARCH("armv8.1-a", ARMV8_1A, "8.1-A", "v8.1a",
+             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+              AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_RDM))
+AARCH64_ARCH("armv8.2-a", ARMV8_2A, "8.2-A", "v8.2a",
+             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
+              AArch64::AEK_RDM))
+AARCH64_ARCH("armv8.3-a", ARMV8_3A, "8.3-A", "v8.3a",
+             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
+              AArch64::AEK_RDM | AArch64::AEK_RCPC))
+#undef AARCH64_ARCH
+
+#ifndef AARCH64_ARCH_EXT_NAME
+#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
+#endif
+// FIXME: This would be nicer were it tablegen
+AARCH64_ARCH_EXT_NAME("invalid",  AArch64::AEK_INVALID,  nullptr,  nullptr)
+AARCH64_ARCH_EXT_NAME("none",     AArch64::AEK_NONE,     nullptr,  nullptr)
+AARCH64_ARCH_EXT_NAME("crc",      AArch64::AEK_CRC,      "+crc",   "-crc")
+AARCH64_ARCH_EXT_NAME("lse",      AArch64::AEK_LSE,      "+lse",   "-lse")
+AARCH64_ARCH_EXT_NAME("rdm",      AArch64::AEK_RDM,      "+rdm",   "-rdm")
+AARCH64_ARCH_EXT_NAME("crypto",   AArch64::AEK_CRYPTO,   "+crypto","-crypto")
+AARCH64_ARCH_EXT_NAME("dotprod",  AArch64::AEK_DOTPROD,  "+dotprod","-dotprod")
+AARCH64_ARCH_EXT_NAME("fp",       AArch64::AEK_FP,       "+fp-armv8",  "-fp-armv8")
+AARCH64_ARCH_EXT_NAME("simd",     AArch64::AEK_SIMD,     "+neon",  "-neon")
+AARCH64_ARCH_EXT_NAME("fp16",     AArch64::AEK_FP16,     "+fullfp16",  "-fullfp16")
+AARCH64_ARCH_EXT_NAME("profile",  AArch64::AEK_PROFILE,  "+spe",  "-spe")
+AARCH64_ARCH_EXT_NAME("ras",      AArch64::AEK_RAS,      "+ras",  "-ras")
+AARCH64_ARCH_EXT_NAME("sve",      AArch64::AEK_SVE,      "+sve",  "-sve")
+AARCH64_ARCH_EXT_NAME("rcpc",     AArch64::AEK_RCPC,     "+rcpc", "-rcpc")
+#undef AARCH64_ARCH_EXT_NAME
+
+#ifndef AARCH64_CPU_NAME
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
+#endif
+AARCH64_CPU_NAME("cortex-a35", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("cortex-a53", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("cortex-a55", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC))
+AARCH64_CPU_NAME("cortex-a57", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("cortex-a72", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("cortex-a73", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("cortex-a75", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC))
+AARCH64_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_NONE))
+AARCH64_CPU_NAME("exynos-m1", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("exynos-m2", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("falkor", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC | AArch64::AEK_RDM))
+AARCH64_CPU_NAME("saphira", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_PROFILE))
+AARCH64_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("thunderx2t99", ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_NONE))
+AARCH64_CPU_NAME("thunderx", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
+AARCH64_CPU_NAME("thunderxt88", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
+AARCH64_CPU_NAME("thunderxt81", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
+AARCH64_CPU_NAME("thunderxt83", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
+// Invalid CPU
+AARCH64_CPU_NAME("invalid", INVALID, FK_INVALID, true, AArch64::AEK_INVALID)
+#undef AARCH64_CPU_NAME
diff --git a/linux-x64/clang/include/llvm/Support/AMDGPUKernelDescriptor.h b/linux-x64/clang/include/llvm/Support/AMDGPUKernelDescriptor.h
new file mode 100644
index 0000000..ce2c0c1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/AMDGPUKernelDescriptor.h
@@ -0,0 +1,139 @@
+//===--- AMDGPUKernelDescriptor.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief AMDGPU kernel descriptor definitions. For more information, visit
+/// https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor-for-gfx6-gfx9
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_AMDGPUKERNELDESCRIPTOR_H
+#define LLVM_SUPPORT_AMDGPUKERNELDESCRIPTOR_H
+
+#include <cstdint>
+
+// Creates enumeration entries used for packing bits into integers. Enumeration
+// entries include bit shift amount, bit width, and bit mask.
+#define AMDGPU_BITS_ENUM_ENTRY(name, shift, width) \
+  name ## _SHIFT = (shift),                        \
+  name ## _WIDTH = (width),                        \
+  name = (((1 << (width)) - 1) << (shift))         \
+
+// Gets bits for specified bit mask from specified source.
+#define AMDGPU_BITS_GET(src, mask) \
+  ((src & mask) >> mask ## _SHIFT) \
+
+// Sets bits for specified bit mask in specified destination.
+#define AMDGPU_BITS_SET(dst, mask, val)     \
+  dst &= (~(1 << mask ## _SHIFT) & ~mask);  \
+  dst |= (((val) << mask ## _SHIFT) & mask) \
+
+namespace llvm {
+namespace AMDGPU {
+namespace HSAKD {
+
+/// \brief Floating point rounding modes.
+enum : uint8_t {
+  AMDGPU_FLOAT_ROUND_MODE_NEAR_EVEN      = 0,
+  AMDGPU_FLOAT_ROUND_MODE_PLUS_INFINITY  = 1,
+  AMDGPU_FLOAT_ROUND_MODE_MINUS_INFINITY = 2,
+  AMDGPU_FLOAT_ROUND_MODE_ZERO           = 3,
+};
+
+/// \brief Floating point denorm modes.
+enum : uint8_t {
+  AMDGPU_FLOAT_DENORM_MODE_FLUSH_SRC_DST = 0,
+  AMDGPU_FLOAT_DENORM_MODE_FLUSH_DST     = 1,
+  AMDGPU_FLOAT_DENORM_MODE_FLUSH_SRC     = 2,
+  AMDGPU_FLOAT_DENORM_MODE_FLUSH_NONE    = 3,
+};
+
+/// \brief System VGPR workitem IDs.
+enum : uint8_t {
+  AMDGPU_SYSTEM_VGPR_WORKITEM_ID_X         = 0,
+  AMDGPU_SYSTEM_VGPR_WORKITEM_ID_X_Y       = 1,
+  AMDGPU_SYSTEM_VGPR_WORKITEM_ID_X_Y_Z     = 2,
+  AMDGPU_SYSTEM_VGPR_WORKITEM_ID_UNDEFINED = 3,
+};
+
+/// \brief Compute program resource register one layout.
+enum ComputePgmRsrc1 {
+  AMDGPU_BITS_ENUM_ENTRY(GRANULATED_WORKITEM_VGPR_COUNT, 0, 6),
+  AMDGPU_BITS_ENUM_ENTRY(GRANULATED_WAVEFRONT_SGPR_COUNT, 6, 4),
+  AMDGPU_BITS_ENUM_ENTRY(PRIORITY, 10, 2),
+  AMDGPU_BITS_ENUM_ENTRY(FLOAT_ROUND_MODE_32, 12, 2),
+  AMDGPU_BITS_ENUM_ENTRY(FLOAT_ROUND_MODE_16_64, 14, 2),
+  AMDGPU_BITS_ENUM_ENTRY(FLOAT_DENORM_MODE_32, 16, 2),
+  AMDGPU_BITS_ENUM_ENTRY(FLOAT_DENORM_MODE_16_64, 18, 2),
+  AMDGPU_BITS_ENUM_ENTRY(PRIV, 20, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_DX10_CLAMP, 21, 1),
+  AMDGPU_BITS_ENUM_ENTRY(DEBUG_MODE, 22, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_IEEE_MODE, 23, 1),
+  AMDGPU_BITS_ENUM_ENTRY(BULKY, 24, 1),
+  AMDGPU_BITS_ENUM_ENTRY(CDBG_USER, 25, 1),
+  AMDGPU_BITS_ENUM_ENTRY(FP16_OVFL, 26, 1),
+  AMDGPU_BITS_ENUM_ENTRY(RESERVED0, 27, 5),
+};
+
+/// \brief Compute program resource register two layout.
+enum ComputePgmRsrc2 {
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_SGPR_PRIVATE_SEGMENT_WAVE_OFFSET, 0, 1),
+  AMDGPU_BITS_ENUM_ENTRY(USER_SGPR_COUNT, 1, 5),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_TRAP_HANDLER, 6, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_SGPR_WORKGROUP_ID_X, 7, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_SGPR_WORKGROUP_ID_Y, 8, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_SGPR_WORKGROUP_ID_Z, 9, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_SGPR_WORKGROUP_INFO, 10, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_VGPR_WORKITEM_ID, 11, 2),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_ADDRESS_WATCH, 13, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_MEMORY, 14, 1),
+  AMDGPU_BITS_ENUM_ENTRY(GRANULATED_LDS_SIZE, 15, 9),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, 24, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, 25, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, 26, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, 27, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, 28, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, 29, 1),
+  AMDGPU_BITS_ENUM_ENTRY(ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, 30, 1),
+  AMDGPU_BITS_ENUM_ENTRY(RESERVED1, 31, 1),
+};
+
+/// \brief Kernel descriptor layout. This layout should be kept backwards
+/// compatible as it is consumed by the command processor.
+struct KernelDescriptor final {
+  uint32_t GroupSegmentFixedSize;
+  uint32_t PrivateSegmentFixedSize;
+  uint32_t MaxFlatWorkGroupSize;
+  uint64_t IsDynamicCallStack : 1;
+  uint64_t IsXNACKEnabled : 1;
+  uint64_t Reserved0 : 30;
+  int64_t KernelCodeEntryByteOffset;
+  uint64_t Reserved1[3];
+  uint32_t ComputePgmRsrc1;
+  uint32_t ComputePgmRsrc2;
+  uint64_t EnableSGPRPrivateSegmentBuffer : 1;
+  uint64_t EnableSGPRDispatchPtr : 1;
+  uint64_t EnableSGPRQueuePtr : 1;
+  uint64_t EnableSGPRKernargSegmentPtr : 1;
+  uint64_t EnableSGPRDispatchID : 1;
+  uint64_t EnableSGPRFlatScratchInit : 1;
+  uint64_t EnableSGPRPrivateSegmentSize : 1;
+  uint64_t EnableSGPRGridWorkgroupCountX : 1;
+  uint64_t EnableSGPRGridWorkgroupCountY : 1;
+  uint64_t EnableSGPRGridWorkgroupCountZ : 1;
+  uint64_t Reserved2 : 54;
+
+  KernelDescriptor() = default;
+};
+
+} // end namespace HSAKD
+} // end namespace AMDGPU
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_AMDGPUKERNELDESCRIPTOR_H
diff --git a/linux-x64/clang/include/llvm/Support/AMDGPUMetadata.h b/linux-x64/clang/include/llvm/Support/AMDGPUMetadata.h
new file mode 100644
index 0000000..00039a7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/AMDGPUMetadata.h
@@ -0,0 +1,481 @@
+//===--- AMDGPUMetadata.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief AMDGPU metadata definitions and in-memory representations.
+///
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_AMDGPUMETADATA_H
+#define LLVM_SUPPORT_AMDGPUMETADATA_H
+
+#include <cstdint>
+#include <string>
+#include <system_error>
+#include <vector>
+
+namespace llvm {
+namespace AMDGPU {
+
+//===----------------------------------------------------------------------===//
+// HSA metadata.
+//===----------------------------------------------------------------------===//
+namespace HSAMD {
+
+/// \brief HSA metadata major version.
+constexpr uint32_t VersionMajor = 1;
+/// \brief HSA metadata minor version.
+constexpr uint32_t VersionMinor = 0;
+
+/// \brief HSA metadata beginning assembler directive.
+constexpr char AssemblerDirectiveBegin[] = ".amd_amdgpu_hsa_metadata";
+/// \brief HSA metadata ending assembler directive.
+constexpr char AssemblerDirectiveEnd[] = ".end_amd_amdgpu_hsa_metadata";
+
+/// \brief Access qualifiers.
+enum class AccessQualifier : uint8_t {
+  Default   = 0,
+  ReadOnly  = 1,
+  WriteOnly = 2,
+  ReadWrite = 3,
+  Unknown   = 0xff
+};
+
+/// \brief Address space qualifiers.
+enum class AddressSpaceQualifier : uint8_t {
+  Private  = 0,
+  Global   = 1,
+  Constant = 2,
+  Local    = 3,
+  Generic  = 4,
+  Region   = 5,
+  Unknown  = 0xff
+};
+
+/// \brief Value kinds.
+enum class ValueKind : uint8_t {
+  ByValue                = 0,
+  GlobalBuffer           = 1,
+  DynamicSharedPointer   = 2,
+  Sampler                = 3,
+  Image                  = 4,
+  Pipe                   = 5,
+  Queue                  = 6,
+  HiddenGlobalOffsetX    = 7,
+  HiddenGlobalOffsetY    = 8,
+  HiddenGlobalOffsetZ    = 9,
+  HiddenNone             = 10,
+  HiddenPrintfBuffer     = 11,
+  HiddenDefaultQueue     = 12,
+  HiddenCompletionAction = 13,
+  Unknown                = 0xff
+};
+
+/// \brief Value types.
+enum class ValueType : uint8_t {
+  Struct  = 0,
+  I8      = 1,
+  U8      = 2,
+  I16     = 3,
+  U16     = 4,
+  F16     = 5,
+  I32     = 6,
+  U32     = 7,
+  F32     = 8,
+  I64     = 9,
+  U64     = 10,
+  F64     = 11,
+  Unknown = 0xff
+};
+
+//===----------------------------------------------------------------------===//
+// Kernel Metadata.
+//===----------------------------------------------------------------------===//
+namespace Kernel {
+
+//===----------------------------------------------------------------------===//
+// Kernel Attributes Metadata.
+//===----------------------------------------------------------------------===//
+namespace Attrs {
+
+namespace Key {
+/// \brief Key for Kernel::Attr::Metadata::mReqdWorkGroupSize.
+constexpr char ReqdWorkGroupSize[] = "ReqdWorkGroupSize";
+/// \brief Key for Kernel::Attr::Metadata::mWorkGroupSizeHint.
+constexpr char WorkGroupSizeHint[] = "WorkGroupSizeHint";
+/// \brief Key for Kernel::Attr::Metadata::mVecTypeHint.
+constexpr char VecTypeHint[] = "VecTypeHint";
+/// \brief Key for Kernel::Attr::Metadata::mRuntimeHandle.
+constexpr char RuntimeHandle[] = "RuntimeHandle";
+} // end namespace Key
+
+/// \brief In-memory representation of kernel attributes metadata.
+struct Metadata final {
+  /// \brief 'reqd_work_group_size' attribute. Optional.
+  std::vector<uint32_t> mReqdWorkGroupSize = std::vector<uint32_t>();
+  /// \brief 'work_group_size_hint' attribute. Optional.
+  std::vector<uint32_t> mWorkGroupSizeHint = std::vector<uint32_t>();
+  /// \brief 'vec_type_hint' attribute. Optional.
+  std::string mVecTypeHint = std::string();
+  /// \brief External symbol created by runtime to store the kernel address
+  /// for enqueued blocks.
+  std::string mRuntimeHandle = std::string();
+
+  /// \brief Default constructor.
+  Metadata() = default;
+
+  /// \returns True if kernel attributes metadata is empty, false otherwise.
+  bool empty() const {
+    return !notEmpty();
+  }
+
+  /// \returns True if kernel attributes metadata is not empty, false otherwise.
+  bool notEmpty() const {
+    return !mReqdWorkGroupSize.empty() || !mWorkGroupSizeHint.empty() ||
+           !mVecTypeHint.empty() || !mRuntimeHandle.empty();
+  }
+};
+
+} // end namespace Attrs
+
+//===----------------------------------------------------------------------===//
+// Kernel Argument Metadata.
+//===----------------------------------------------------------------------===//
+namespace Arg {
+
+namespace Key {
+/// \brief Key for Kernel::Arg::Metadata::mName.
+constexpr char Name[] = "Name";
+/// \brief Key for Kernel::Arg::Metadata::mTypeName.
+constexpr char TypeName[] = "TypeName";
+/// \brief Key for Kernel::Arg::Metadata::mSize.
+constexpr char Size[] = "Size";
+/// \brief Key for Kernel::Arg::Metadata::mAlign.
+constexpr char Align[] = "Align";
+/// \brief Key for Kernel::Arg::Metadata::mValueKind.
+constexpr char ValueKind[] = "ValueKind";
+/// \brief Key for Kernel::Arg::Metadata::mValueType.
+constexpr char ValueType[] = "ValueType";
+/// \brief Key for Kernel::Arg::Metadata::mPointeeAlign.
+constexpr char PointeeAlign[] = "PointeeAlign";
+/// \brief Key for Kernel::Arg::Metadata::mAddrSpaceQual.
+constexpr char AddrSpaceQual[] = "AddrSpaceQual";
+/// \brief Key for Kernel::Arg::Metadata::mAccQual.
+constexpr char AccQual[] = "AccQual";
+/// \brief Key for Kernel::Arg::Metadata::mActualAccQual.
+constexpr char ActualAccQual[] = "ActualAccQual";
+/// \brief Key for Kernel::Arg::Metadata::mIsConst.
+constexpr char IsConst[] = "IsConst";
+/// \brief Key for Kernel::Arg::Metadata::mIsRestrict.
+constexpr char IsRestrict[] = "IsRestrict";
+/// \brief Key for Kernel::Arg::Metadata::mIsVolatile.
+constexpr char IsVolatile[] = "IsVolatile";
+/// \brief Key for Kernel::Arg::Metadata::mIsPipe.
+constexpr char IsPipe[] = "IsPipe";
+} // end namespace Key
+
+/// \brief In-memory representation of kernel argument metadata.
+struct Metadata final {
+  /// \brief Name. Optional.
+  std::string mName = std::string();
+  /// \brief Type name. Optional.
+  std::string mTypeName = std::string();
+  /// \brief Size in bytes. Required.
+  uint32_t mSize = 0;
+  /// \brief Alignment in bytes. Required.
+  uint32_t mAlign = 0;
+  /// \brief Value kind. Required.
+  ValueKind mValueKind = ValueKind::Unknown;
+  /// \brief Value type. Required.
+  ValueType mValueType = ValueType::Unknown;
+  /// \brief Pointee alignment in bytes. Optional.
+  uint32_t mPointeeAlign = 0;
+  /// \brief Address space qualifier. Optional.
+  AddressSpaceQualifier mAddrSpaceQual = AddressSpaceQualifier::Unknown;
+  /// \brief Access qualifier. Optional.
+  AccessQualifier mAccQual = AccessQualifier::Unknown;
+  /// \brief Actual access qualifier. Optional.
+  AccessQualifier mActualAccQual = AccessQualifier::Unknown;
+  /// \brief True if 'const' qualifier is specified. Optional.
+  bool mIsConst = false;
+  /// \brief True if 'restrict' qualifier is specified. Optional.
+  bool mIsRestrict = false;
+  /// \brief True if 'volatile' qualifier is specified. Optional.
+  bool mIsVolatile = false;
+  /// \brief True if 'pipe' qualifier is specified. Optional.
+  bool mIsPipe = false;
+
+  /// \brief Default constructor.
+  Metadata() = default;
+};
+
+} // end namespace Arg
+
+//===----------------------------------------------------------------------===//
+// Kernel Code Properties Metadata.
+//===----------------------------------------------------------------------===//
+namespace CodeProps {
+
+namespace Key {
+/// \brief Key for Kernel::CodeProps::Metadata::mKernargSegmentSize.
+constexpr char KernargSegmentSize[] = "KernargSegmentSize";
+/// \brief Key for Kernel::CodeProps::Metadata::mGroupSegmentFixedSize.
+constexpr char GroupSegmentFixedSize[] = "GroupSegmentFixedSize";
+/// \brief Key for Kernel::CodeProps::Metadata::mPrivateSegmentFixedSize.
+constexpr char PrivateSegmentFixedSize[] = "PrivateSegmentFixedSize";
+/// \brief Key for Kernel::CodeProps::Metadata::mKernargSegmentAlign.
+constexpr char KernargSegmentAlign[] = "KernargSegmentAlign";
+/// \brief Key for Kernel::CodeProps::Metadata::mWavefrontSize.
+constexpr char WavefrontSize[] = "WavefrontSize";
+/// \brief Key for Kernel::CodeProps::Metadata::mNumSGPRs.
+constexpr char NumSGPRs[] = "NumSGPRs";
+/// \brief Key for Kernel::CodeProps::Metadata::mNumVGPRs.
+constexpr char NumVGPRs[] = "NumVGPRs";
+/// \brief Key for Kernel::CodeProps::Metadata::mMaxFlatWorkGroupSize.
+constexpr char MaxFlatWorkGroupSize[] = "MaxFlatWorkGroupSize";
+/// \brief Key for Kernel::CodeProps::Metadata::mIsDynamicCallStack.
+constexpr char IsDynamicCallStack[] = "IsDynamicCallStack";
+/// \brief Key for Kernel::CodeProps::Metadata::mIsXNACKEnabled.
+constexpr char IsXNACKEnabled[] = "IsXNACKEnabled";
+/// \brief Key for Kernel::CodeProps::Metadata::mNumSpilledSGPRs.
+constexpr char NumSpilledSGPRs[] = "NumSpilledSGPRs";
+/// \brief Key for Kernel::CodeProps::Metadata::mNumSpilledVGPRs.
+constexpr char NumSpilledVGPRs[] = "NumSpilledVGPRs";
+} // end namespace Key
+
+/// \brief In-memory representation of kernel code properties metadata.
+struct Metadata final {
+  /// \brief Size in bytes of the kernarg segment memory. Kernarg segment memory
+  /// holds the values of the arguments to the kernel. Required.
+  uint64_t mKernargSegmentSize = 0;
+  /// \brief Size in bytes of the group segment memory required by a workgroup.
+  /// This value does not include any dynamically allocated group segment memory
+  /// that may be added when the kernel is dispatched. Required.
+  uint32_t mGroupSegmentFixedSize = 0;
+  /// \brief Size in bytes of the private segment memory required by a workitem.
+  /// Private segment memory includes arg, spill and private segments. Required.
+  uint32_t mPrivateSegmentFixedSize = 0;
+  /// \brief Maximum byte alignment of variables used by the kernel in the
+  /// kernarg memory segment. Required.
+  uint32_t mKernargSegmentAlign = 0;
+  /// \brief Wavefront size. Required.
+  uint32_t mWavefrontSize = 0;
+  /// \brief Total number of SGPRs used by a wavefront. Optional.
+  uint16_t mNumSGPRs = 0;
+  /// \brief Total number of VGPRs used by a workitem. Optional.
+  uint16_t mNumVGPRs = 0;
+  /// \brief Maximum flat work-group size supported by the kernel. Optional.
+  uint32_t mMaxFlatWorkGroupSize = 0;
+  /// \brief True if the generated machine code is using a dynamically sized
+  /// call stack. Optional.
+  bool mIsDynamicCallStack = false;
+  /// \brief True if the generated machine code is capable of supporting XNACK.
+  /// Optional.
+  bool mIsXNACKEnabled = false;
+  /// \brief Number of SGPRs spilled by a wavefront. Optional.
+  uint16_t mNumSpilledSGPRs = 0;
+  /// \brief Number of VGPRs spilled by a workitem. Optional.
+  uint16_t mNumSpilledVGPRs = 0;
+
+  /// \brief Default constructor.
+  Metadata() = default;
+
+  /// \returns True if kernel code properties metadata is empty, false
+  /// otherwise.
+  bool empty() const {
+    return !notEmpty();
+  }
+
+  /// \returns True if kernel code properties metadata is not empty, false
+  /// otherwise.
+  bool notEmpty() const {
+    return true;
+  }
+};
+
+} // end namespace CodeProps
+
+//===----------------------------------------------------------------------===//
+// Kernel Debug Properties Metadata.
+//===----------------------------------------------------------------------===//
+namespace DebugProps {
+
+namespace Key {
+/// \brief Key for Kernel::DebugProps::Metadata::mDebuggerABIVersion.
+constexpr char DebuggerABIVersion[] = "DebuggerABIVersion";
+/// \brief Key for Kernel::DebugProps::Metadata::mReservedNumVGPRs.
+constexpr char ReservedNumVGPRs[] = "ReservedNumVGPRs";
+/// \brief Key for Kernel::DebugProps::Metadata::mReservedFirstVGPR.
+constexpr char ReservedFirstVGPR[] = "ReservedFirstVGPR";
+/// \brief Key for Kernel::DebugProps::Metadata::mPrivateSegmentBufferSGPR.
+constexpr char PrivateSegmentBufferSGPR[] = "PrivateSegmentBufferSGPR";
+/// \brief Key for
+///     Kernel::DebugProps::Metadata::mWavefrontPrivateSegmentOffsetSGPR.
+constexpr char WavefrontPrivateSegmentOffsetSGPR[] =
+    "WavefrontPrivateSegmentOffsetSGPR";
+} // end namespace Key
+
+/// \brief In-memory representation of kernel debug properties metadata.
+struct Metadata final {
+  /// \brief Debugger ABI version. Optional.
+  std::vector<uint32_t> mDebuggerABIVersion = std::vector<uint32_t>();
+  /// \brief Consecutive number of VGPRs reserved for debugger use. Must be 0 if
+  /// mDebuggerABIVersion is not set. Optional.
+  uint16_t mReservedNumVGPRs = 0;
+  /// \brief First fixed VGPR reserved. Must be uint16_t(-1) if
+  /// mDebuggerABIVersion is not set or mReservedFirstVGPR is 0. Optional.
+  uint16_t mReservedFirstVGPR = uint16_t(-1);
+  /// \brief Fixed SGPR of the first of 4 SGPRs used to hold the scratch V# used
+  /// for the entire kernel execution. Must be uint16_t(-1) if
+  /// mDebuggerABIVersion is not set or SGPR not used or not known. Optional.
+  uint16_t mPrivateSegmentBufferSGPR = uint16_t(-1);
+  /// \brief Fixed SGPR used to hold the wave scratch offset for the entire
+  /// kernel execution. Must be uint16_t(-1) if mDebuggerABIVersion is not set
+  /// or SGPR is not used or not known. Optional.
+  uint16_t mWavefrontPrivateSegmentOffsetSGPR = uint16_t(-1);
+
+  /// \brief Default constructor.
+  Metadata() = default;
+
+  /// \returns True if kernel debug properties metadata is empty, false
+  /// otherwise.
+  bool empty() const {
+    return !notEmpty();
+  }
+
+  /// \returns True if kernel debug properties metadata is not empty, false
+  /// otherwise.
+  bool notEmpty() const {
+    return !mDebuggerABIVersion.empty();
+  }
+};
+
+} // end namespace DebugProps
+
+namespace Key {
+/// \brief Key for Kernel::Metadata::mName.
+constexpr char Name[] = "Name";
+/// \brief Key for Kernel::Metadata::mSymbolName.
+constexpr char SymbolName[] = "SymbolName";
+/// \brief Key for Kernel::Metadata::mLanguage.
+constexpr char Language[] = "Language";
+/// \brief Key for Kernel::Metadata::mLanguageVersion.
+constexpr char LanguageVersion[] = "LanguageVersion";
+/// \brief Key for Kernel::Metadata::mAttrs.
+constexpr char Attrs[] = "Attrs";
+/// \brief Key for Kernel::Metadata::mArgs.
+constexpr char Args[] = "Args";
+/// \brief Key for Kernel::Metadata::mCodeProps.
+constexpr char CodeProps[] = "CodeProps";
+/// \brief Key for Kernel::Metadata::mDebugProps.
+constexpr char DebugProps[] = "DebugProps";
+} // end namespace Key
+
+/// \brief In-memory representation of kernel metadata.
+struct Metadata final {
+  /// \brief Kernel source name. Required.
+  std::string mName = std::string();
+  /// \brief Kernel descriptor name. Required.
+  std::string mSymbolName = std::string();
+  /// \brief Language. Optional.
+  std::string mLanguage = std::string();
+  /// \brief Language version. Optional.
+  std::vector<uint32_t> mLanguageVersion = std::vector<uint32_t>();
+  /// \brief Attributes metadata. Optional.
+  Attrs::Metadata mAttrs = Attrs::Metadata();
+  /// \brief Arguments metadata. Optional.
+  std::vector<Arg::Metadata> mArgs = std::vector<Arg::Metadata>();
+  /// \brief Code properties metadata. Optional.
+  CodeProps::Metadata mCodeProps = CodeProps::Metadata();
+  /// \brief Debug properties metadata. Optional.
+  DebugProps::Metadata mDebugProps = DebugProps::Metadata();
+
+  /// \brief Default constructor.
+  Metadata() = default;
+};
+
+} // end namespace Kernel
+
+namespace Key {
+/// \brief Key for HSA::Metadata::mVersion.
+constexpr char Version[] = "Version";
+/// \brief Key for HSA::Metadata::mPrintf.
+constexpr char Printf[] = "Printf";
+/// \brief Key for HSA::Metadata::mKernels.
+constexpr char Kernels[] = "Kernels";
+} // end namespace Key
+
+/// \brief In-memory representation of HSA metadata.
+struct Metadata final {
+  /// \brief HSA metadata version. Required.
+  std::vector<uint32_t> mVersion = std::vector<uint32_t>();
+  /// \brief Printf metadata. Optional.
+  std::vector<std::string> mPrintf = std::vector<std::string>();
+  /// \brief Kernels metadata. Required.
+  std::vector<Kernel::Metadata> mKernels = std::vector<Kernel::Metadata>();
+
+  /// \brief Default constructor.
+  Metadata() = default;
+};
+
+/// \brief Converts \p String to \p HSAMetadata.
+std::error_code fromString(std::string String, Metadata &HSAMetadata);
+
+/// \brief Converts \p HSAMetadata to \p String.
+std::error_code toString(Metadata HSAMetadata, std::string &String);
+
+} // end namespace HSAMD
+
+//===----------------------------------------------------------------------===//
+// PAL metadata.
+//===----------------------------------------------------------------------===//
+namespace PALMD {
+
+/// \brief PAL metadata assembler directive.
+constexpr char AssemblerDirective[] = ".amd_amdgpu_pal_metadata";
+
+/// \brief PAL metadata keys.
+enum Key : uint32_t {
+  LS_NUM_USED_VGPRS = 0x10000021,
+  HS_NUM_USED_VGPRS = 0x10000022,
+  ES_NUM_USED_VGPRS = 0x10000023,
+  GS_NUM_USED_VGPRS = 0x10000024,
+  VS_NUM_USED_VGPRS = 0x10000025,
+  PS_NUM_USED_VGPRS = 0x10000026,
+  CS_NUM_USED_VGPRS = 0x10000027,
+
+  LS_NUM_USED_SGPRS = 0x10000028,
+  HS_NUM_USED_SGPRS = 0x10000029,
+  ES_NUM_USED_SGPRS = 0x1000002a,
+  GS_NUM_USED_SGPRS = 0x1000002b,
+  VS_NUM_USED_SGPRS = 0x1000002c,
+  PS_NUM_USED_SGPRS = 0x1000002d,
+  CS_NUM_USED_SGPRS = 0x1000002e,
+
+  LS_SCRATCH_SIZE = 0x10000044,
+  HS_SCRATCH_SIZE = 0x10000045,
+  ES_SCRATCH_SIZE = 0x10000046,
+  GS_SCRATCH_SIZE = 0x10000047,
+  VS_SCRATCH_SIZE = 0x10000048,
+  PS_SCRATCH_SIZE = 0x10000049,
+  CS_SCRATCH_SIZE = 0x1000004a
+};
+
+/// \brief PAL metadata represented as a vector.
+typedef std::vector<uint32_t> Metadata;
+
+/// \brief Converts \p PALMetadata to \p String.
+std::error_code toString(const Metadata &PALMetadata, std::string &String);
+
+} // end namespace PALMD
+} // end namespace AMDGPU
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_AMDGPUMETADATA_H
diff --git a/linux-x64/clang/include/llvm/Support/ARMAttributeParser.h b/linux-x64/clang/include/llvm/Support/ARMAttributeParser.h
new file mode 100644
index 0000000..919f397
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ARMAttributeParser.h
@@ -0,0 +1,140 @@
+//===--- ARMAttributeParser.h - ARM Attribute Information Printer ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMATTRIBUTEPARSER_H
+#define LLVM_SUPPORT_ARMATTRIBUTEPARSER_H
+
+#include "ARMBuildAttributes.h"
+#include "ScopedPrinter.h"
+
+#include <map>
+
+namespace llvm {
+class StringRef;
+
+class ARMAttributeParser {
+  ScopedPrinter *SW;
+
+  std::map<unsigned, unsigned> Attributes;
+
+  struct DisplayHandler {
+    ARMBuildAttrs::AttrType Attribute;
+    void (ARMAttributeParser::*Routine)(ARMBuildAttrs::AttrType,
+                                        const uint8_t *, uint32_t &);
+  };
+  static const DisplayHandler DisplayRoutines[];
+
+  uint64_t ParseInteger(const uint8_t *Data, uint32_t &Offset);
+  StringRef ParseString(const uint8_t *Data, uint32_t &Offset);
+
+  void IntegerAttribute(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                        uint32_t &Offset);
+  void StringAttribute(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+
+  void PrintAttribute(unsigned Tag, unsigned Value, StringRef ValueDesc);
+
+  void CPU_arch(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                uint32_t &Offset);
+  void CPU_arch_profile(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                        uint32_t &Offset);
+  void ARM_ISA_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                   uint32_t &Offset);
+  void THUMB_ISA_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                     uint32_t &Offset);
+  void FP_arch(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+               uint32_t &Offset);
+  void WMMX_arch(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                 uint32_t &Offset);
+  void Advanced_SIMD_arch(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                          uint32_t &Offset);
+  void PCS_config(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                  uint32_t &Offset);
+  void ABI_PCS_R9_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                      uint32_t &Offset);
+  void ABI_PCS_RW_data(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_PCS_RO_data(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_PCS_GOT_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_PCS_wchar_t(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_FP_rounding(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_FP_denormal(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_FP_exceptions(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                         uint32_t &Offset);
+  void ABI_FP_user_exceptions(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                              uint32_t &Offset);
+  void ABI_FP_number_model(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                           uint32_t &Offset);
+  void ABI_align_needed(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                        uint32_t &Offset);
+  void ABI_align_preserved(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                           uint32_t &Offset);
+  void ABI_enum_size(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                     uint32_t &Offset);
+  void ABI_HardFP_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                      uint32_t &Offset);
+  void ABI_VFP_args(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                    uint32_t &Offset);
+  void ABI_WMMX_args(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                     uint32_t &Offset);
+  void ABI_optimization_goals(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                              uint32_t &Offset);
+  void ABI_FP_optimization_goals(ARMBuildAttrs::AttrType Tag,
+                                 const uint8_t *Data, uint32_t &Offset);
+  void compatibility(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                     uint32_t &Offset);
+  void CPU_unaligned_access(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                            uint32_t &Offset);
+  void FP_HP_extension(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void ABI_FP_16bit_format(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                           uint32_t &Offset);
+  void MPextension_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                       uint32_t &Offset);
+  void DIV_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+               uint32_t &Offset);
+  void DSP_extension(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                     uint32_t &Offset);
+  void T2EE_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                uint32_t &Offset);
+  void Virtualization_use(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                          uint32_t &Offset);
+  void nodefaults(ARMBuildAttrs::AttrType Tag, const uint8_t *Data,
+                  uint32_t &Offset);
+
+  void ParseAttributeList(const uint8_t *Data, uint32_t &Offset,
+                          uint32_t Length);
+  void ParseIndexList(const uint8_t *Data, uint32_t &Offset,
+                      SmallVectorImpl<uint8_t> &IndexList);
+  void ParseSubsection(const uint8_t *Data, uint32_t Length);
+public:
+  ARMAttributeParser(ScopedPrinter *SW) : SW(SW) {}
+
+  ARMAttributeParser() : SW(nullptr) { }
+
+  void Parse(ArrayRef<uint8_t> Section, bool isLittle);
+
+  bool hasAttribute(unsigned Tag) const {
+    return Attributes.count(Tag);
+  }
+
+  unsigned getAttributeValue(unsigned Tag) const {
+    return Attributes.find(Tag)->second;
+  }
+};
+
+}
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Support/ARMBuildAttributes.h b/linux-x64/clang/include/llvm/Support/ARMBuildAttributes.h
new file mode 100644
index 0000000..6c83e44
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ARMBuildAttributes.h
@@ -0,0 +1,246 @@
+//===-- ARMBuildAttributes.h - ARM Build Attributes -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains enumerations and support routines for ARM build attributes
+// as defined in ARM ABI addenda document (ABI release 2.08).
+//
+// ELF for the ARM Architecture r2.09 - November 30, 2012
+//
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
+#define LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
+
+namespace llvm {
+class StringRef;
+
+namespace ARMBuildAttrs {
+
+enum SpecialAttr {
+  // This is for the .cpu asm attr. It translates into one or more
+  // AttrType (below) entries in the .ARM.attributes section in the ELF.
+  SEL_CPU
+};
+
+enum AttrType {
+  // Rest correspond to ELF/.ARM.attributes
+  File                      = 1,
+  CPU_raw_name              = 4,
+  CPU_name                  = 5,
+  CPU_arch                  = 6,
+  CPU_arch_profile          = 7,
+  ARM_ISA_use               = 8,
+  THUMB_ISA_use             = 9,
+  FP_arch                   = 10,
+  WMMX_arch                 = 11,
+  Advanced_SIMD_arch        = 12,
+  PCS_config                = 13,
+  ABI_PCS_R9_use            = 14,
+  ABI_PCS_RW_data           = 15,
+  ABI_PCS_RO_data           = 16,
+  ABI_PCS_GOT_use           = 17,
+  ABI_PCS_wchar_t           = 18,
+  ABI_FP_rounding           = 19,
+  ABI_FP_denormal           = 20,
+  ABI_FP_exceptions         = 21,
+  ABI_FP_user_exceptions    = 22,
+  ABI_FP_number_model       = 23,
+  ABI_align_needed          = 24,
+  ABI_align_preserved       = 25,
+  ABI_enum_size             = 26,
+  ABI_HardFP_use            = 27,
+  ABI_VFP_args              = 28,
+  ABI_WMMX_args             = 29,
+  ABI_optimization_goals    = 30,
+  ABI_FP_optimization_goals = 31,
+  compatibility             = 32,
+  CPU_unaligned_access      = 34,
+  FP_HP_extension           = 36,
+  ABI_FP_16bit_format       = 38,
+  MPextension_use           = 42, // recoded from 70 (ABI r2.08)
+  DIV_use                   = 44,
+  DSP_extension             = 46,
+  also_compatible_with      = 65,
+  conformance               = 67,
+  Virtualization_use        = 68,
+
+  /// Legacy Tags
+  Section                   = 2,  // deprecated (ABI r2.09)
+  Symbol                    = 3,  // deprecated (ABI r2.09)
+  ABI_align8_needed         = 24, // renamed to ABI_align_needed (ABI r2.09)
+  ABI_align8_preserved      = 25, // renamed to ABI_align_preserved (ABI r2.09)
+  nodefaults                = 64, // deprecated (ABI r2.09)
+  T2EE_use                  = 66, // deprecated (ABI r2.09)
+  MPextension_use_old       = 70  // recoded to MPextension_use (ABI r2.08)
+};
+
+StringRef AttrTypeAsString(unsigned Attr, bool HasTagPrefix = true);
+StringRef AttrTypeAsString(AttrType Attr, bool HasTagPrefix = true);
+int AttrTypeFromString(StringRef Tag);
+
+// Magic numbers for .ARM.attributes
+enum AttrMagic {
+  Format_Version  = 0x41
+};
+
+// Legal Values for CPU_arch, (=6), uleb128
+enum CPUArch {
+  Pre_v4   = 0,
+  v4       = 1,   // e.g. SA110
+  v4T      = 2,   // e.g. ARM7TDMI
+  v5T      = 3,   // e.g. ARM9TDMI
+  v5TE     = 4,   // e.g. ARM946E_S
+  v5TEJ    = 5,   // e.g. ARM926EJ_S
+  v6       = 6,   // e.g. ARM1136J_S
+  v6KZ     = 7,   // e.g. ARM1176JZ_S
+  v6T2     = 8,   // e.g. ARM1156T2_S
+  v6K      = 9,   // e.g. ARM1176JZ_S
+  v7       = 10,  // e.g. Cortex A8, Cortex M3
+  v6_M     = 11,  // e.g. Cortex M1
+  v6S_M    = 12,  // v6_M with the System extensions
+  v7E_M    = 13,  // v7_M with DSP extensions
+  v8_A     = 14,  // v8_A AArch32
+  v8_R     = 15,  // e.g. Cortex R52
+  v8_M_Base= 16,  // v8_M_Base AArch32
+  v8_M_Main= 17,  // v8_M_Main AArch32
+};
+
+enum CPUArchProfile {               // (=7), uleb128
+  Not_Applicable          = 0,      // pre v7, or cross-profile code
+  ApplicationProfile      = (0x41), // 'A' (e.g. for Cortex A8)
+  RealTimeProfile         = (0x52), // 'R' (e.g. for Cortex R4)
+  MicroControllerProfile  = (0x4D), // 'M' (e.g. for Cortex M3)
+  SystemProfile           = (0x53)  // 'S' Application or real-time profile
+};
+
+// The following have a lot of common use cases
+enum {
+  Not_Allowed = 0,
+  Allowed = 1,
+
+  // Tag_ARM_ISA_use (=8), uleb128
+
+  // Tag_THUMB_ISA_use, (=9), uleb128
+  AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
+  AllowThumbDerived = 3, // Thumb allowed, derived from arch/profile
+
+  // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
+  AllowFPv2  = 2,     // v2 FP ISA permitted (implies use of the v1 FP ISA)
+  AllowFPv3A = 3,     // v3 FP ISA permitted (implies use of the v2 FP ISA)
+  AllowFPv3B = 4,     // v3 FP ISA permitted, but only D0-D15, S0-S31
+  AllowFPv4A = 5,     // v4 FP ISA permitted (implies use of v3 FP ISA)
+  AllowFPv4B = 6,     // v4 FP ISA was permitted, but only D0-D15, S0-S31
+  AllowFPARMv8A = 7,  // Use of the ARM v8-A FP ISA was permitted
+  AllowFPARMv8B = 8,  // Use of the ARM v8-A FP ISA was permitted, but only
+                      // D0-D15, S0-S31
+
+  // Tag_WMMX_arch, (=11), uleb128
+  AllowWMMXv1 = 1,  // The user permitted this entity to use WMMX v1
+  AllowWMMXv2 = 2,  // The user permitted this entity to use WMMX v2
+
+  // Tag_Advanced_SIMD_arch, (=12), uleb128
+  AllowNeon = 1,      // SIMDv1 was permitted
+  AllowNeon2 = 2,     // SIMDv2 was permitted (Half-precision FP, MAC operations)
+  AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted
+  AllowNeonARMv8_1a = 4,// ARM v8.1-A SIMD was permitted (RDMA)
+
+  // Tag_ABI_PCS_R9_use, (=14), uleb128
+  R9IsGPR = 0,        // R9 used as v6 (just another callee-saved register)
+  R9IsSB = 1,         // R9 used as a global static base rgister
+  R9IsTLSPointer = 2, // R9 used as a thread local storage pointer
+  R9Reserved = 3,     // R9 not used by code associated with attributed entity
+
+  // Tag_ABI_PCS_RW_data, (=15), uleb128
+  AddressRWPCRel = 1, // Address RW static data PC-relative
+  AddressRWSBRel = 2, // Address RW static data SB-relative
+  AddressRWNone = 3, // No RW static data permitted
+
+  // Tag_ABI_PCS_RO_data, (=14), uleb128
+  AddressROPCRel = 1, // Address RO static data PC-relative
+  AddressRONone = 2, // No RO static data permitted
+
+  // Tag_ABI_PCS_GOT_use, (=17), uleb128
+  AddressDirect = 1, // Address imported data directly
+  AddressGOT = 2, // Address imported data indirectly (via GOT)
+
+  // Tag_ABI_PCS_wchar_t, (=18), uleb128
+  WCharProhibited = 0,  // wchar_t is not used
+  WCharWidth2Bytes = 2, // sizeof(wchar_t) == 2
+  WCharWidth4Bytes = 4, // sizeof(wchar_t) == 4
+
+  // Tag_ABI_align_needed, (=24), uleb128
+  Align8Byte = 1,
+  Align4Byte = 2,
+  AlignReserved = 3,
+
+  // Tag_ABI_align_needed, (=25), uleb128
+  AlignNotPreserved = 0,
+  AlignPreserve8Byte = 1,
+  AlignPreserveAll = 2,
+
+  // Tag_ABI_FP_denormal, (=20), uleb128
+  PositiveZero = 0,
+  IEEEDenormals = 1,
+  PreserveFPSign = 2, // sign when flushed-to-zero is preserved
+
+  // Tag_ABI_FP_number_model, (=23), uleb128
+  AllowIEEENormal = 1,
+  AllowRTABI = 2,  // numbers, infinities, and one quiet NaN (see [RTABI])
+  AllowIEEE754 = 3, // this code to use all the IEEE 754-defined FP encodings
+
+  // Tag_ABI_enum_size, (=26), uleb128
+  EnumProhibited = 0, // The user prohibited the use of enums when building
+                      // this entity.
+  EnumSmallest = 1,   // Enum is smallest container big enough to hold all
+                      // values.
+  Enum32Bit = 2,      // Enum is at least 32 bits.
+  Enum32BitABI = 3,   // Every enumeration visible across an ABI-complying
+                      // interface contains a value needing 32 bits to encode
+                      // it; other enums can be containerized.
+
+  // Tag_ABI_HardFP_use, (=27), uleb128
+  HardFPImplied = 0,          // FP use should be implied by Tag_FP_arch
+  HardFPSinglePrecision = 1,  // Single-precision only
+
+  // Tag_ABI_VFP_args, (=28), uleb128
+  BaseAAPCS = 0,
+  HardFPAAPCS = 1,
+
+  // Tag_FP_HP_extension, (=36), uleb128
+  AllowHPFP = 1, // Allow use of Half Precision FP
+
+  // Tag_FP_16bit_format, (=38), uleb128
+  FP16FormatIEEE = 1,
+  FP16VFP3 = 2,
+
+  // Tag_MPextension_use, (=42), uleb128
+  AllowMP = 1, // Allow use of MP extensions
+
+  // Tag_DIV_use, (=44), uleb128
+  // Note: AllowDIVExt must be emitted if and only if the permission to use
+  // hardware divide cannot be conveyed using AllowDIVIfExists or DisallowDIV
+  AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no
+                        // info exists.
+  DisallowDIV = 1,      // Hardware divide explicitly disallowed.
+  AllowDIVExt = 2,      // Allow hardware divide as optional architecture
+                        // extension above the base arch specified by
+                        // Tag_CPU_arch and Tag_CPU_arch_profile.
+
+  // Tag_Virtualization_use, (=68), uleb128
+  AllowTZ = 1,
+  AllowVirtualization = 2,
+  AllowTZVirtualization = 3
+};
+
+} // namespace ARMBuildAttrs
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ARMEHABI.h b/linux-x64/clang/include/llvm/Support/ARMEHABI.h
new file mode 100644
index 0000000..9b052df
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ARMEHABI.h
@@ -0,0 +1,134 @@
+//===--- ARMEHABI.h - ARM Exception Handling ABI ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the constants for the ARM unwind opcodes and exception
+// handling table entry kinds.
+//
+// The enumerations and constants in this file reflect the ARM EHABI
+// Specification as published by ARM.
+//
+// Exception Handling ABI for the ARM Architecture r2.09 - November 30, 2012
+//
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMEHABI_H
+#define LLVM_SUPPORT_ARMEHABI_H
+
+namespace llvm {
+namespace ARM {
+namespace EHABI {
+  /// ARM exception handling table entry kinds
+  enum EHTEntryKind {
+    EHT_GENERIC = 0x00,
+    EHT_COMPACT = 0x80
+  };
+
+  enum {
+    /// Special entry for the function never unwind
+    EXIDX_CANTUNWIND = 0x1
+  };
+
+  /// ARM-defined frame unwinding opcodes
+  enum UnwindOpcodes {
+    // Format: 00xxxxxx
+    // Purpose: vsp = vsp + ((x << 2) + 4)
+    UNWIND_OPCODE_INC_VSP = 0x00,
+
+    // Format: 01xxxxxx
+    // Purpose: vsp = vsp - ((x << 2) + 4)
+    UNWIND_OPCODE_DEC_VSP = 0x40,
+
+    // Format: 10000000 00000000
+    // Purpose: refuse to unwind
+    UNWIND_OPCODE_REFUSE = 0x8000,
+
+    // Format: 1000xxxx xxxxxxxx
+    // Purpose: pop r[15:12], r[11:4]
+    // Constraint: x != 0
+    UNWIND_OPCODE_POP_REG_MASK_R4 = 0x8000,
+
+    // Format: 1001xxxx
+    // Purpose: vsp = r[x]
+    // Constraint: x != 13 && x != 15
+    UNWIND_OPCODE_SET_VSP = 0x90,
+
+    // Format: 10100xxx
+    // Purpose: pop r[(4+x):4]
+    UNWIND_OPCODE_POP_REG_RANGE_R4 = 0xa0,
+
+    // Format: 10101xxx
+    // Purpose: pop r14, r[(4+x):4]
+    UNWIND_OPCODE_POP_REG_RANGE_R4_R14 = 0xa8,
+
+    // Format: 10110000
+    // Purpose: finish
+    UNWIND_OPCODE_FINISH = 0xb0,
+
+    // Format: 10110001 0000xxxx
+    // Purpose: pop r[3:0]
+    // Constraint: x != 0
+    UNWIND_OPCODE_POP_REG_MASK = 0xb100,
+
+    // Format: 10110010 x(uleb128)
+    // Purpose: vsp = vsp + ((x << 2) + 0x204)
+    UNWIND_OPCODE_INC_VSP_ULEB128 = 0xb2,
+
+    // Format: 10110011 xxxxyyyy
+    // Purpose: pop d[(x+y):x]
+    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX = 0xb300,
+
+    // Format: 10111xxx
+    // Purpose: pop d[(8+x):8]
+    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX_D8 = 0xb8,
+
+    // Format: 11000xxx
+    // Purpose: pop wR[(10+x):10]
+    UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE_WR10 = 0xc0,
+
+    // Format: 11000110 xxxxyyyy
+    // Purpose: pop wR[(x+y):x]
+    UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE = 0xc600,
+
+    // Format: 11000111 0000xxxx
+    // Purpose: pop wCGR[3:0]
+    // Constraint: x != 0
+    UNWIND_OPCODE_POP_WIRELESS_MMX_REG_MASK = 0xc700,
+
+    // Format: 11001000 xxxxyyyy
+    // Purpose: pop d[(16+x+y):(16+x)]
+    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 = 0xc800,
+
+    // Format: 11001001 xxxxyyyy
+    // Purpose: pop d[(x+y):x]
+    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD = 0xc900,
+
+    // Format: 11010xxx
+    // Purpose: pop d[(8+x):8]
+    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D8 = 0xd0
+  };
+
+  /// ARM-defined Personality Routine Index
+  enum PersonalityRoutineIndex {
+    // To make the exception handling table become more compact, ARM defined
+    // several personality routines in EHABI.  There are 3 different
+    // personality routines in ARM EHABI currently.  It is possible to have 16
+    // pre-defined personality routines at most.
+    AEABI_UNWIND_CPP_PR0 = 0,
+    AEABI_UNWIND_CPP_PR1 = 1,
+    AEABI_UNWIND_CPP_PR2 = 2,
+
+    NUM_PERSONALITY_INDEX
+  };
+}
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ARMTargetParser.def b/linux-x64/clang/include/llvm/Support/ARMTargetParser.def
new file mode 100644
index 0000000..6c8eff1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ARMTargetParser.def
@@ -0,0 +1,264 @@
+//===- ARMTargetParser.def - ARM target parsing defines ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides defines to build up the ARM target parser's logic.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef ARM_FPU
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION)
+#endif
+ARM_FPU("invalid", FK_INVALID, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("none", FK_NONE, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("vfp", FK_VFP, FPUVersion::VFPV2, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("vfpv2", FK_VFPV2, FPUVersion::VFPV2, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("vfpv3", FK_VFPV3, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("vfpv3-fp16", FK_VFPV3_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("vfpv3-d16", FK_VFPV3_D16, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::D16)
+ARM_FPU("vfpv3-d16-fp16", FK_VFPV3_D16_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::D16)
+ARM_FPU("vfpv3xd", FK_VFPV3XD, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::SP_D16)
+ARM_FPU("vfpv3xd-fp16", FK_VFPV3XD_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::SP_D16)
+ARM_FPU("vfpv4", FK_VFPV4, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("vfpv4-d16", FK_VFPV4_D16, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::D16)
+ARM_FPU("fpv4-sp-d16", FK_FPV4_SP_D16, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::SP_D16)
+ARM_FPU("fpv5-d16", FK_FPV5_D16, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::D16)
+ARM_FPU("fpv5-sp-d16", FK_FPV5_SP_D16, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::SP_D16)
+ARM_FPU("fp-armv8", FK_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::None)
+ARM_FPU("neon", FK_NEON, FPUVersion::VFPV3, NeonSupportLevel::Neon, FPURestriction::None)
+ARM_FPU("neon-fp16", FK_NEON_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::Neon, FPURestriction::None)
+ARM_FPU("neon-vfpv4", FK_NEON_VFPV4, FPUVersion::VFPV4, NeonSupportLevel::Neon, FPURestriction::None)
+ARM_FPU("neon-fp-armv8", FK_NEON_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::Neon, FPURestriction::None)
+ARM_FPU("crypto-neon-fp-armv8", FK_CRYPTO_NEON_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::Crypto,
+        FPURestriction::None)
+ARM_FPU("softvfp", FK_SOFTVFP, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
+#undef ARM_FPU
+
+#ifndef ARM_ARCH
+#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
+#endif
+ARM_ARCH("invalid", INVALID, "", "",
+          ARMBuildAttrs::CPUArch::Pre_v4, FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv2", ARMV2, "2", "v2", ARMBuildAttrs::CPUArch::Pre_v4,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv2a", ARMV2A, "2A", "v2a", ARMBuildAttrs::CPUArch::Pre_v4,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv3", ARMV3, "3", "v3", ARMBuildAttrs::CPUArch::Pre_v4,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv3m", ARMV3M, "3M", "v3m", ARMBuildAttrs::CPUArch::Pre_v4,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv4", ARMV4, "4", "v4", ARMBuildAttrs::CPUArch::v4,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv4t", ARMV4T, "4T", "v4t", ARMBuildAttrs::CPUArch::v4T,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv5t", ARMV5T, "5T", "v5", ARMBuildAttrs::CPUArch::v5T,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv5te", ARMV5TE, "5TE", "v5e", ARMBuildAttrs::CPUArch::v5TE,
+          FK_NONE, ARM::AEK_DSP)
+ARM_ARCH("armv5tej", ARMV5TEJ, "5TEJ", "v5e", ARMBuildAttrs::CPUArch::v5TEJ,
+          FK_NONE, ARM::AEK_DSP)
+ARM_ARCH("armv6", ARMV6, "6", "v6", ARMBuildAttrs::CPUArch::v6,
+          FK_VFPV2, ARM::AEK_DSP)
+ARM_ARCH("armv6k", ARMV6K, "6K", "v6k", ARMBuildAttrs::CPUArch::v6K,
+          FK_VFPV2, ARM::AEK_DSP)
+ARM_ARCH("armv6t2", ARMV6T2, "6T2", "v6t2", ARMBuildAttrs::CPUArch::v6T2,
+          FK_NONE, ARM::AEK_DSP)
+ARM_ARCH("armv6kz", ARMV6KZ, "6KZ", "v6kz", ARMBuildAttrs::CPUArch::v6KZ,
+          FK_VFPV2, (ARM::AEK_SEC | ARM::AEK_DSP))
+ARM_ARCH("armv6-m", ARMV6M, "6-M", "v6m", ARMBuildAttrs::CPUArch::v6_M,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv7-a", ARMV7A, "7-A", "v7", ARMBuildAttrs::CPUArch::v7,
+          FK_NEON, ARM::AEK_DSP)
+ARM_ARCH("armv7ve", ARMV7VE, "7VE", "v7ve", ARMBuildAttrs::CPUArch::v7,
+          FK_NEON, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT |
+          ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
+ARM_ARCH("armv7-r", ARMV7R, "7-R", "v7r", ARMBuildAttrs::CPUArch::v7,
+          FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
+ARM_ARCH("armv7-m", ARMV7M, "7-M", "v7m", ARMBuildAttrs::CPUArch::v7,
+          FK_NONE, ARM::AEK_HWDIVTHUMB)
+ARM_ARCH("armv7e-m", ARMV7EM, "7E-M", "v7em", ARMBuildAttrs::CPUArch::v7E_M,
+          FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
+ARM_ARCH("armv8-a", ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
+         FK_CRYPTO_NEON_FP_ARMV8,
+         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
+ARM_ARCH("armv8.1-a", ARMV8_1A, "8.1-A", "v8.1a",
+         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
+ARM_ARCH("armv8.2-a", ARMV8_2A, "8.2-A", "v8.2a",
+         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
+ARM_ARCH("armv8.3-a", ARMV8_3A, "8.3-A", "v8.3a",
+         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
+ARM_ARCH("armv8-r", ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R,
+          FK_NEON_FP_ARMV8,
+          (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB |
+           ARM::AEK_DSP | ARM::AEK_CRC))
+ARM_ARCH("armv8-m.base", ARMV8MBaseline, "8-M.Baseline", "v8m.base",
+          ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIVTHUMB)
+ARM_ARCH("armv8-m.main", ARMV8MMainline, "8-M.Mainline", "v8m.main",
+          ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIVTHUMB)
+// Non-standard Arch names.
+ARM_ARCH("iwmmxt", IWMMXT, "iwmmxt", "", ARMBuildAttrs::CPUArch::v5TE,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("iwmmxt2", IWMMXT2, "iwmmxt2", "", ARMBuildAttrs::CPUArch::v5TE,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("xscale", XSCALE, "xscale", "v5e", ARMBuildAttrs::CPUArch::v5TE,
+          FK_NONE, ARM::AEK_NONE)
+ARM_ARCH("armv7s", ARMV7S, "7-S", "v7s", ARMBuildAttrs::CPUArch::v7,
+          FK_NEON_VFPV4, ARM::AEK_DSP)
+ARM_ARCH("armv7k", ARMV7K, "7-K", "v7k", ARMBuildAttrs::CPUArch::v7,
+          FK_NONE, ARM::AEK_DSP)
+#undef ARM_ARCH
+
+#ifndef ARM_ARCH_EXT_NAME
+#define ARM_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
+#endif
+// FIXME: This would be nicer were it tablegen
+ARM_ARCH_EXT_NAME("invalid",  ARM::AEK_INVALID,  nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("none",     ARM::AEK_NONE,     nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("crc",      ARM::AEK_CRC,      "+crc",   "-crc")
+ARM_ARCH_EXT_NAME("crypto",   ARM::AEK_CRYPTO,   "+crypto","-crypto")
+ARM_ARCH_EXT_NAME("dotprod",  ARM::AEK_DOTPROD,  "+dotprod","-dotprod")
+ARM_ARCH_EXT_NAME("dsp",      ARM::AEK_DSP,      "+dsp",   "-dsp")
+ARM_ARCH_EXT_NAME("fp",       ARM::AEK_FP,       nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("idiv",     (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB), nullptr, nullptr)
+ARM_ARCH_EXT_NAME("mp",       ARM::AEK_MP,       nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("simd",     ARM::AEK_SIMD,     nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("sec",      ARM::AEK_SEC,      nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("virt",     ARM::AEK_VIRT,     nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("fp16",     ARM::AEK_FP16,     "+fullfp16",  "-fullfp16")
+ARM_ARCH_EXT_NAME("ras",      ARM::AEK_RAS,      "+ras", "-ras")
+ARM_ARCH_EXT_NAME("os",       ARM::AEK_OS,       nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("iwmmxt",   ARM::AEK_IWMMXT,   nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("iwmmxt2",  ARM::AEK_IWMMXT2,  nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("maverick", ARM::AEK_MAVERICK, nullptr,  nullptr)
+ARM_ARCH_EXT_NAME("xscale",   ARM::AEK_XSCALE,   nullptr,  nullptr)
+#undef ARM_ARCH_EXT_NAME
+
+#ifndef ARM_HW_DIV_NAME
+#define ARM_HW_DIV_NAME(NAME, ID)
+#endif
+ARM_HW_DIV_NAME("invalid", ARM::AEK_INVALID)
+ARM_HW_DIV_NAME("none", ARM::AEK_NONE)
+ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIVTHUMB)
+ARM_HW_DIV_NAME("arm", ARM::AEK_HWDIVARM)
+ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
+#undef ARM_HW_DIV_NAME
+
+#ifndef ARM_CPU_NAME
+#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
+#endif
+ARM_CPU_NAME("arm2", ARMV2, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm3", ARMV2A, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm6", ARMV3, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm7m", ARMV3M, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm8", ARMV4, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm810", ARMV4, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("strongarm", ARMV4, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("strongarm110", ARMV4, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("strongarm1100", ARMV4, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("strongarm1110", ARMV4, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm7tdmi", ARMV4T, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm7tdmi-s", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm710t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm720t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm9", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm9tdmi", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm920", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm920t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm922t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm9312", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm940t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("ep9312", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm10tdmi", ARMV5T, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1020t", ARMV5T, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm9e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm946e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm966e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm968e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm10e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1020e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1022e", ARMV5TE, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm926ej-s", ARMV5TEJ, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1136j-s", ARMV6, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1136jf-s", ARMV6, FK_VFPV2, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1136jz-s", ARMV6, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1176j-s", ARMV6K, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1176jz-s", ARMV6KZ, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("mpcore", ARMV6K, FK_VFPV2, false, ARM::AEK_NONE)
+ARM_CPU_NAME("mpcorenovfp", ARMV6K, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1176jzf-s", ARMV6KZ, FK_VFPV2, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1156t2-s", ARMV6T2, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1156t2f-s", ARMV6T2, FK_VFPV2, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m0", ARMV6M, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m0plus", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m1", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("sc000", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-a5", ARMV7A, FK_NEON_VFPV4, false,
+             (ARM::AEK_SEC | ARM::AEK_MP))
+ARM_CPU_NAME("cortex-a7", ARMV7A, FK_NEON_VFPV4, false,
+             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+              ARM::AEK_HWDIVTHUMB))
+ARM_CPU_NAME("cortex-a8", ARMV7A, FK_NEON, false, ARM::AEK_SEC)
+ARM_CPU_NAME("cortex-a9", ARMV7A, FK_NEON_FP16, false, (ARM::AEK_SEC | ARM::AEK_MP))
+ARM_CPU_NAME("cortex-a12", ARMV7A, FK_NEON_VFPV4, false,
+             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+              ARM::AEK_HWDIVTHUMB))
+ARM_CPU_NAME("cortex-a15", ARMV7A, FK_NEON_VFPV4, false,
+             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+              ARM::AEK_HWDIVTHUMB))
+ARM_CPU_NAME("cortex-a17", ARMV7A, FK_NEON_VFPV4, false,
+             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+              ARM::AEK_HWDIVTHUMB))
+ARM_CPU_NAME("krait", ARMV7A, FK_NEON_VFPV4, false,
+             (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
+ARM_CPU_NAME("cortex-r4", ARMV7R, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-r4f", ARMV7R, FK_VFPV3_D16, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-r5", ARMV7R, FK_VFPV3_D16, false,
+             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
+ARM_CPU_NAME("cortex-r7", ARMV7R, FK_VFPV3_D16_FP16, false,
+             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
+ARM_CPU_NAME("cortex-r8", ARMV7R, FK_VFPV3_D16_FP16, false,
+             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
+ARM_CPU_NAME("cortex-r52", ARMV8R, FK_NEON_FP_ARMV8, true, ARM::AEK_NONE)
+ARM_CPU_NAME("sc300", ARMV7M, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m3", ARMV7M, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m4", ARMV7EM, FK_FPV4_SP_D16, true, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m7", ARMV7EM, FK_FPV5_D16, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m23", ARMV8MBaseline, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("cortex-m33", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
+ARM_CPU_NAME("cortex-a32", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("cortex-a35", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("cortex-a53", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("cortex-a55", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
+ARM_CPU_NAME("cortex-a57", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("cortex-a72", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("cortex-a73", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("cortex-a75", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
+ARM_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m1", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m2", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+// Non-standard Arch names.
+ARM_CPU_NAME("iwmmxt", IWMMXT, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("xscale", XSCALE, FK_NONE, true, ARM::AEK_NONE)
+ARM_CPU_NAME("swift", ARMV7S, FK_NEON_VFPV4, true,
+             (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
+// Invalid CPU
+ARM_CPU_NAME("invalid", INVALID, FK_INVALID, true, ARM::AEK_INVALID)
+#undef ARM_CPU_NAME
diff --git a/linux-x64/clang/include/llvm/Support/ARMWinEH.h b/linux-x64/clang/include/llvm/Support/ARMWinEH.h
new file mode 100644
index 0000000..1463629
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ARMWinEH.h
@@ -0,0 +1,382 @@
+//===-- llvm/Support/WinARMEH.h - Windows on ARM EH Constants ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMWINEH_H
+#define LLVM_SUPPORT_ARMWINEH_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace ARM {
+namespace WinEH {
+enum class RuntimeFunctionFlag {
+  RFF_Unpacked,       /// unpacked entry
+  RFF_Packed,         /// packed entry
+  RFF_PackedFragment, /// packed entry representing a fragment
+  RFF_Reserved,       /// reserved
+};
+
+enum class ReturnType {
+  RT_POP,             /// return via pop {pc} (L flag must be set)
+  RT_B,               /// 16-bit branch
+  RT_BW,              /// 32-bit branch
+  RT_NoEpilogue,      /// no epilogue (fragment)
+};
+
+/// RuntimeFunction - An entry in the table of procedure data (.pdata)
+///
+///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------------------------------------------------------------+
+/// |                     Function Start RVA                        |
+/// +-------------------+-+-+-+-----+-+---+---------------------+---+
+/// |    Stack Adjust   |C|L|R| Reg |H|Ret|   Function Length   |Flg|
+/// +-------------------+-+-+-+-----+-+---+---------------------+---+
+///
+/// Flag : 2-bit field with the following meanings:
+///   - 00 = packed unwind data not used; reamining bits point to .xdata record
+///   - 01 = packed unwind data
+///   - 10 = packed unwind data, function assumed to have no prologue; useful
+///          for function fragments that are discontiguous with the start of the
+///          function
+///   - 11 = reserved
+/// Function Length : 11-bit field providing the length of the entire function
+///                   in bytes, divided by 2; if the function is greater than
+///                   4KB, a full .xdata record must be used instead
+/// Ret : 2-bit field indicating how the function returns
+///   - 00 = return via pop {pc} (the L bit must be set)
+///   - 01 = return via 16-bit branch
+///   - 10 = return via 32-bit branch
+///   - 11 = no epilogue; useful for function fragments that may only contain a
+///          prologue but the epilogue is elsewhere
+/// H : 1-bit flag indicating whether the function "homes" the integer parameter
+///     registers (r0-r3), allocating 16-bytes on the stack
+/// Reg : 3-bit field indicating the index of the last saved non-volatile
+///       register.  If the R bit is set to 0, then only integer registers are
+///       saved (r4-rN, where N is 4 + Reg).  If the R bit is set to 1, then
+///       only floating-point registers are being saved (d8-dN, where N is
+///       8 + Reg).  The special case of the R bit being set to 1 and Reg equal
+///       to 7 indicates that no registers are saved.
+/// R : 1-bit flag indicating whether the non-volatile registers are integer or
+///     floating-point.  0 indicates integer, 1 indicates floating-point.  The
+///     special case of the R-flag being set and Reg being set to 7 indicates
+///     that no non-volatile registers are saved.
+/// L : 1-bit flag indicating whether the function saves/restores the link
+///     register (LR)
+/// C : 1-bit flag indicating whether the function includes extra instructions
+///     to setup a frame chain for fast walking.  If this flag is set, r11 is
+///     implicitly added to the list of saved non-volatile integer registers.
+/// Stack Adjust : 10-bit field indicating the number of bytes of stack that are
+///                allocated for this function.  Only values between 0x000 and
+///                0x3f3 can be directly encoded.  If the value is 0x3f4 or
+///                greater, then the low 4 bits have special meaning as follows:
+///                - Bit 0-1
+///                  indicate the number of words' of adjustment (1-4), minus 1
+///                - Bit 2
+///                  indicates if the prologue combined adjustment into push
+///                - Bit 3
+///                  indicates if the epilogue combined adjustment into pop
+///
+/// RESTRICTIONS:
+///   - IF C is SET:
+///     + L flag must be set since frame chaining requires r11 and lr
+///     + r11 must NOT be included in the set of registers described by Reg
+///   - IF Ret is 0:
+///     + L flag must be set
+
+// NOTE: RuntimeFunction is meant to be a simple class that provides raw access
+// to all fields in the structure.  The accessor methods reflect the names of
+// the bitfields that they correspond to.  Although some obvious simplifications
+// are possible via merging of methods, it would prevent the use of this class
+// to fully inspect the contents of the data structure which is particularly
+// useful for scenarios such as llvm-readobj to aid in testing.
+
+class RuntimeFunction {
+public:
+  const support::ulittle32_t BeginAddress;
+  const support::ulittle32_t UnwindData;
+
+  RuntimeFunction(const support::ulittle32_t *Data)
+    : BeginAddress(Data[0]), UnwindData(Data[1]) {}
+
+  RuntimeFunction(const support::ulittle32_t BeginAddress,
+                  const support::ulittle32_t UnwindData)
+    : BeginAddress(BeginAddress), UnwindData(UnwindData) {}
+
+  RuntimeFunctionFlag Flag() const {
+    return RuntimeFunctionFlag(UnwindData & 0x3);
+  }
+
+  uint32_t ExceptionInformationRVA() const {
+    assert(Flag() == RuntimeFunctionFlag::RFF_Unpacked &&
+           "unpacked form required for this operation");
+    return (UnwindData & ~0x3);
+  }
+
+  uint32_t PackedUnwindData() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return (UnwindData & ~0x3);
+  }
+  uint32_t FunctionLength() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return (((UnwindData & 0x00001ffc) >> 2) << 1);
+  }
+  ReturnType Ret() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    assert(((UnwindData & 0x00006000) || L()) && "L must be set to 1");
+    return ReturnType((UnwindData & 0x00006000) >> 13);
+  }
+  bool H() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return ((UnwindData & 0x00008000) >> 15);
+  }
+  uint8_t Reg() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return ((UnwindData & 0x00070000) >> 16);
+  }
+  bool R() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return ((UnwindData & 0x00080000) >> 19);
+  }
+  bool L() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return ((UnwindData & 0x00100000) >> 20);
+  }
+  bool C() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    assert(((~UnwindData & 0x00200000) || L()) &&
+           "L flag must be set, chaining requires r11 and LR");
+    assert(((~UnwindData & 0x00200000) || (Reg() < 7) || R()) &&
+           "r11 must not be included in Reg; C implies r11");
+    return ((UnwindData & 0x00200000) >> 21);
+  }
+  uint16_t StackAdjust() const {
+    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+           "packed form required for this operation");
+    return ((UnwindData & 0xffc00000) >> 22);
+  }
+};
+
+/// PrologueFolding - pseudo-flag derived from Stack Adjust indicating that the
+/// prologue has stack adjustment combined into the push
+inline bool PrologueFolding(const RuntimeFunction &RF) {
+  return RF.StackAdjust() >= 0x3f4 && (RF.StackAdjust() & 0x4);
+}
+/// Epilogue - pseudo-flag derived from Stack Adjust indicating that the
+/// epilogue has stack adjustment combined into the pop
+inline bool EpilogueFolding(const RuntimeFunction &RF) {
+  return RF.StackAdjust() >= 0x3f4 && (RF.StackAdjust() & 0x8);
+}
+/// StackAdjustment - calculated stack adjustment in words.  The stack
+/// adjustment should be determined via this function to account for the special
+/// handling the special encoding when the value is >= 0x3f4.
+inline uint16_t StackAdjustment(const RuntimeFunction &RF) {
+  uint16_t Adjustment = RF.StackAdjust();
+  if (Adjustment >= 0x3f4)
+    return (Adjustment & 0x3) ? ((Adjustment & 0x3) << 2) - 1 : 0;
+  return Adjustment;
+}
+
+/// SavedRegisterMask - Utility function to calculate the set of saved general
+/// purpose (r0-r15) and VFP (d0-d31) registers.
+std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF);
+
+/// ExceptionDataRecord - An entry in the table of exception data (.xdata)
+///
+///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +-------+---------+-+-+-+---+-----------------------------------+
+/// | C Wrd | Epi Cnt |F|E|X|Ver|         Function Length           |
+/// +-------+--------+'-'-'-'---'---+-------------------------------+
+/// |    Reserved    |Ex. Code Words|   (Extended Epilogue Count)   |
+/// +-------+--------+--------------+-------------------------------+
+///
+/// Function Length : 18-bit field indicating the total length of the function
+///                   in bytes divided by 2.  If a function is larger than
+///                   512KB, then multiple pdata and xdata records must be used.
+/// Vers : 2-bit field describing the version of the remaining structure.  Only
+///        version 0 is currently defined (values 1-3 are not permitted).
+/// X : 1-bit field indicating the presence of exception data
+/// E : 1-bit field indicating that the single epilogue is packed into the
+///     header
+/// F : 1-bit field indicating that the record describes a function fragment
+///     (implies that no prologue is present, and prologue processing should be
+///     skipped)
+/// Epilogue Count : 5-bit field that differs in meaning based on the E field.
+///
+///                  If E is set, then this field specifies the index of the
+///                  first unwind code describing the (only) epilogue.
+///
+///                  Otherwise, this field indicates the number of exception
+///                  scopes.  If more than 31 scopes exist, then this field and
+///                  the Code Words field must both be set to 0 to indicate that
+///                  an extension word is required.
+/// Code Words : 4-bit field that species the number of 32-bit words needed to
+///              contain all the unwind codes.  If more than 15 words (63 code
+///              bytes) are required, then this field and the Epilogue Count
+///              field must both be set to 0 to indicate that an extension word
+///              is required.
+/// Extended Epilogue Count, Extended Code Words :
+///                          Valid only if Epilog Count and Code Words are both
+///                          set to 0.  Provides an 8-bit extended code word
+///                          count and 16-bits for epilogue count
+///
+///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +----------------+------+---+---+-------------------------------+
+/// |  Ep Start Idx  | Cond |Res|       Epilogue Start Offset       |
+/// +----------------+------+---+-----------------------------------+
+///
+/// If the E bit is unset in the header, the header is followed by a series of
+/// epilogue scopes, which are sorted by their offset.
+///
+/// Epilogue Start Offset: 18-bit field encoding the offset of epilogue relative
+///                        to the start of the function in bytes divided by two
+/// Res : 2-bit field reserved for future expansion (must be set to 0)
+/// Condition : 4-bit field providing the condition under which the epilogue is
+///             executed.  Unconditional epilogues should set this field to 0xe.
+///             Epilogues must be entirely conditional or unconditional, and in
+///             Thumb-2 mode.  The epilogue beings with the first instruction
+///             after the IT opcode.
+/// Epilogue Start Index : 8-bit field indicating the byte index of the first
+///                        unwind code describing the epilogue
+///
+///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------------+---------------+---------------+---------------+
+/// | Unwind Code 3 | Unwind Code 2 | Unwind Code 1 | Unwind Code 0 |
+/// +---------------+---------------+---------------+---------------+
+///
+/// Following the epilogue scopes, the byte code describing the unwinding
+/// follows.  This is padded to align up to word alignment.  Bytes are stored in
+/// little endian.
+///
+///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------------------------------------------------------------+
+/// |           Exception Handler RVA (requires X = 1)              |
+/// +---------------------------------------------------------------+
+/// |  (possibly followed by data required for exception handler)   |
+/// +---------------------------------------------------------------+
+///
+/// If the X bit is set in the header, the unwind byte code is followed by the
+/// exception handler information.  This constants of one Exception Handler RVA
+/// which is the address to the exception handler, followed immediately by the
+/// variable length data associated with the exception handler.
+///
+
+struct EpilogueScope {
+  const support::ulittle32_t ES;
+
+  EpilogueScope(const support::ulittle32_t Data) : ES(Data) {}
+  uint32_t EpilogueStartOffset() const {
+    return (ES & 0x0003ffff);
+  }
+  uint8_t Res() const {
+    return ((ES & 0x000c0000) >> 18);
+  }
+  uint8_t Condition() const {
+    return ((ES & 0x00f00000) >> 20);
+  }
+  uint8_t EpilogueStartIndex() const {
+    return ((ES & 0xff000000) >> 24);
+  }
+};
+
+struct ExceptionDataRecord;
+inline size_t HeaderWords(const ExceptionDataRecord &XR);
+
+struct ExceptionDataRecord {
+  const support::ulittle32_t *Data;
+
+  ExceptionDataRecord(const support::ulittle32_t *Data) : Data(Data) {}
+
+  uint32_t FunctionLength() const {
+    return (Data[0] & 0x0003ffff);
+  }
+
+  uint8_t Vers() const {
+    return (Data[0] & 0x000C0000) >> 18;
+  }
+
+  bool X() const {
+    return ((Data[0] & 0x00100000) >> 20);
+  }
+
+  bool E() const {
+    return ((Data[0] & 0x00200000) >> 21);
+  }
+
+  bool F() const {
+    return ((Data[0] & 0x00400000) >> 22);
+  }
+
+  uint8_t EpilogueCount() const {
+    if (HeaderWords(*this) == 1)
+      return (Data[0] & 0x0f800000) >> 23;
+    return Data[1] & 0x0000ffff;
+  }
+
+  uint8_t CodeWords() const {
+    if (HeaderWords(*this) == 1)
+      return (Data[0] & 0xf0000000) >> 28;
+    return (Data[1] & 0x00ff0000) >> 16;
+  }
+
+  ArrayRef<support::ulittle32_t> EpilogueScopes() const {
+    assert(E() == 0 && "epilogue scopes are only present when the E bit is 0");
+    size_t Offset = HeaderWords(*this);
+    return makeArrayRef(&Data[Offset], EpilogueCount());
+  }
+
+  ArrayRef<uint8_t> UnwindByteCode() const {
+    const size_t Offset = HeaderWords(*this)
+                        + (E() ? 0 :  EpilogueCount());
+    const uint8_t *ByteCode =
+      reinterpret_cast<const uint8_t *>(&Data[Offset]);
+    return makeArrayRef(ByteCode, CodeWords() * sizeof(uint32_t));
+  }
+
+  uint32_t ExceptionHandlerRVA() const {
+    assert(X() && "Exception Handler RVA is only valid if the X bit is set");
+    return Data[HeaderWords(*this) + EpilogueCount() + CodeWords()];
+  }
+
+  uint32_t ExceptionHandlerParameter() const {
+    assert(X() && "Exception Handler RVA is only valid if the X bit is set");
+    return Data[HeaderWords(*this) + EpilogueCount() + CodeWords() + 1];
+  }
+};
+
+inline size_t HeaderWords(const ExceptionDataRecord &XR) {
+  return (XR.Data[0] & 0xff800000) ? 1 : 2;
+}
+}
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/AlignOf.h b/linux-x64/clang/include/llvm/Support/AlignOf.h
new file mode 100644
index 0000000..abd19af
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/AlignOf.h
@@ -0,0 +1,146 @@
+//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ALIGNOF_H
+#define LLVM_SUPPORT_ALIGNOF_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+
+namespace llvm {
+
+/// \struct AlignedCharArray
+/// \brief Helper for building an aligned character array type.
+///
+/// This template is used to explicitly build up a collection of aligned
+/// character array types. We have to build these up using a macro and explicit
+/// specialization to cope with MSVC (at least till 2015) where only an
+/// integer literal can be used to specify an alignment constraint. Once built
+/// up here, we can then begin to indirect between these using normal C++
+/// template parameters.
+
+// MSVC requires special handling here.
+#ifndef _MSC_VER
+
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray {
+  LLVM_ALIGNAS(Alignment) char buffer[Size];
+};
+
+#else // _MSC_VER
+
+/// \brief Create a type with an aligned char buffer.
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray;
+
+// We provide special variations of this template for the most common
+// alignments because __declspec(align(...)) doesn't actually work when it is
+// a member of a by-value function argument in MSVC, even if the alignment
+// request is something reasonably like 8-byte or 16-byte. Note that we can't
+// even include the declspec with the union that forces the alignment because
+// MSVC warns on the existence of the declspec despite the union member forcing
+// proper alignment.
+
+template<std::size_t Size>
+struct AlignedCharArray<1, Size> {
+  union {
+    char aligned;
+    char buffer[Size];
+  };
+};
+
+template<std::size_t Size>
+struct AlignedCharArray<2, Size> {
+  union {
+    short aligned;
+    char buffer[Size];
+  };
+};
+
+template<std::size_t Size>
+struct AlignedCharArray<4, Size> {
+  union {
+    int aligned;
+    char buffer[Size];
+  };
+};
+
+template<std::size_t Size>
+struct AlignedCharArray<8, Size> {
+  union {
+    double aligned;
+    char buffer[Size];
+  };
+};
+
+
+// The rest of these are provided with a __declspec(align(...)) and we simply
+// can't pass them by-value as function arguments on MSVC.
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+  template<std::size_t Size> \
+  struct AlignedCharArray<x, Size> { \
+    __declspec(align(x)) char buffer[Size]; \
+  };
+
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#endif // _MSC_VER
+
+namespace detail {
+template <typename T1,
+          typename T2 = char, typename T3 = char, typename T4 = char,
+          typename T5 = char, typename T6 = char, typename T7 = char,
+          typename T8 = char, typename T9 = char, typename T10 = char>
+class AlignerImpl {
+  T1 t1; T2 t2; T3 t3; T4 t4; T5 t5; T6 t6; T7 t7; T8 t8; T9 t9; T10 t10;
+
+  AlignerImpl() = delete;
+};
+
+template <typename T1,
+          typename T2 = char, typename T3 = char, typename T4 = char,
+          typename T5 = char, typename T6 = char, typename T7 = char,
+          typename T8 = char, typename T9 = char, typename T10 = char>
+union SizerImpl {
+  char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
+       arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
+       arr9[sizeof(T9)], arr10[sizeof(T10)];
+};
+} // end namespace detail
+
+/// \brief This union template exposes a suitably aligned and sized character
+/// array member which can hold elements of any of up to ten types.
+///
+/// These types may be arrays, structs, or any other types. The goal is to
+/// expose a char array buffer member which can be used as suitable storage for
+/// a placement new of any of these types. Support for more than ten types can
+/// be added at the cost of more boilerplate.
+template <typename T1,
+          typename T2 = char, typename T3 = char, typename T4 = char,
+          typename T5 = char, typename T6 = char, typename T7 = char,
+          typename T8 = char, typename T9 = char, typename T10 = char>
+struct AlignedCharArrayUnion : llvm::AlignedCharArray<
+    alignof(llvm::detail::AlignerImpl<T1, T2, T3, T4, T5,
+                                      T6, T7, T8, T9, T10>),
+    sizeof(::llvm::detail::SizerImpl<T1, T2, T3, T4, T5,
+                                     T6, T7, T8, T9, T10>)> {
+};
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_ALIGNOF_H
diff --git a/linux-x64/clang/include/llvm/Support/Allocator.h b/linux-x64/clang/include/llvm/Support/Allocator.h
new file mode 100644
index 0000000..8ed4109
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Allocator.h
@@ -0,0 +1,494 @@
+//===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
+/// of these conform to an LLVM "Allocator" concept which consists of an
+/// Allocate method accepting a size and alignment, and a Deallocate accepting
+/// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
+/// Allocate and Deallocate for setting size and alignment based on the final
+/// type. These overloads are typically provided by a base class template \c
+/// AllocatorBase.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ALLOCATOR_H
+#define LLVM_SUPPORT_ALLOCATOR_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// \brief CRTP base class providing obvious overloads for the core \c
+/// Allocate() methods of LLVM-style allocators.
+///
+/// This base class both documents the full public interface exposed by all
+/// LLVM-style allocators, and redirects all of the overloads to a single core
+/// set of methods which the derived class must define.
+template <typename DerivedT> class AllocatorBase {
+public:
+  /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
+  /// must be implemented by \c DerivedT.
+  void *Allocate(size_t Size, size_t Alignment) {
+#ifdef __clang__
+    static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
+                      &AllocatorBase::Allocate) !=
+                      static_cast<void *(DerivedT::*)(size_t, size_t)>(
+                          &DerivedT::Allocate),
+                  "Class derives from AllocatorBase without implementing the "
+                  "core Allocate(size_t, size_t) overload!");
+#endif
+    return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
+  }
+
+  /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
+  /// allocator.
+  void Deallocate(const void *Ptr, size_t Size) {
+#ifdef __clang__
+    static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
+                      &AllocatorBase::Deallocate) !=
+                      static_cast<void (DerivedT::*)(const void *, size_t)>(
+                          &DerivedT::Deallocate),
+                  "Class derives from AllocatorBase without implementing the "
+                  "core Deallocate(void *) overload!");
+#endif
+    return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
+  }
+
+  // The rest of these methods are helpers that redirect to one of the above
+  // core methods.
+
+  /// \brief Allocate space for a sequence of objects without constructing them.
+  template <typename T> T *Allocate(size_t Num = 1) {
+    return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
+  }
+
+  /// \brief Deallocate space for a sequence of objects without constructing them.
+  template <typename T>
+  typename std::enable_if<
+      !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
+  Deallocate(T *Ptr, size_t Num = 1) {
+    Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
+  }
+};
+
+class MallocAllocator : public AllocatorBase<MallocAllocator> {
+public:
+  void Reset() {}
+
+  LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
+                                                size_t /*Alignment*/) {
+    void* memPtr =  malloc(Size);
+    if (memPtr == nullptr) 
+      report_bad_alloc_error("Allocation in MallocAllocator failed.");
+
+    return memPtr;
+  }
+
+  // Pull in base class overloads.
+  using AllocatorBase<MallocAllocator>::Allocate;
+
+  void Deallocate(const void *Ptr, size_t /*Size*/) {
+    free(const_cast<void *>(Ptr));
+  }
+
+  // Pull in base class overloads.
+  using AllocatorBase<MallocAllocator>::Deallocate;
+
+  void PrintStats() const {}
+};
+
+namespace detail {
+
+// We call out to an external function to actually print the message as the
+// printing code uses Allocator.h in its implementation.
+void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
+                                size_t TotalMemory);
+
+} // end namespace detail
+
+/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
+///
+/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
+/// memory rather than relying on a boundless contiguous heap. However, it has
+/// bump-pointer semantics in that it is a monotonically growing pool of memory
+/// where every allocation is found by merely allocating the next N bytes in
+/// the slab, or the next N bytes in the next slab.
+///
+/// Note that this also has a threshold for forcing allocations above a certain
+/// size into their own slab.
+///
+/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
+/// object, which wraps malloc, to allocate memory, but it can be changed to
+/// use a custom allocator.
+template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
+          size_t SizeThreshold = SlabSize>
+class BumpPtrAllocatorImpl
+    : public AllocatorBase<
+          BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
+public:
+  static_assert(SizeThreshold <= SlabSize,
+                "The SizeThreshold must be at most the SlabSize to ensure "
+                "that objects larger than a slab go into their own memory "
+                "allocation.");
+
+  BumpPtrAllocatorImpl() = default;
+
+  template <typename T>
+  BumpPtrAllocatorImpl(T &&Allocator)
+      : Allocator(std::forward<T &&>(Allocator)) {}
+
+  // Manually implement a move constructor as we must clear the old allocator's
+  // slabs as a matter of correctness.
+  BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
+      : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
+        CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
+        BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
+        Allocator(std::move(Old.Allocator)) {
+    Old.CurPtr = Old.End = nullptr;
+    Old.BytesAllocated = 0;
+    Old.Slabs.clear();
+    Old.CustomSizedSlabs.clear();
+  }
+
+  ~BumpPtrAllocatorImpl() {
+    DeallocateSlabs(Slabs.begin(), Slabs.end());
+    DeallocateCustomSizedSlabs();
+  }
+
+  BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
+    DeallocateSlabs(Slabs.begin(), Slabs.end());
+    DeallocateCustomSizedSlabs();
+
+    CurPtr = RHS.CurPtr;
+    End = RHS.End;
+    BytesAllocated = RHS.BytesAllocated;
+    RedZoneSize = RHS.RedZoneSize;
+    Slabs = std::move(RHS.Slabs);
+    CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
+    Allocator = std::move(RHS.Allocator);
+
+    RHS.CurPtr = RHS.End = nullptr;
+    RHS.BytesAllocated = 0;
+    RHS.Slabs.clear();
+    RHS.CustomSizedSlabs.clear();
+    return *this;
+  }
+
+  /// \brief Deallocate all but the current slab and reset the current pointer
+  /// to the beginning of it, freeing all memory allocated so far.
+  void Reset() {
+    // Deallocate all but the first slab, and deallocate all custom-sized slabs.
+    DeallocateCustomSizedSlabs();
+    CustomSizedSlabs.clear();
+
+    if (Slabs.empty())
+      return;
+
+    // Reset the state.
+    BytesAllocated = 0;
+    CurPtr = (char *)Slabs.front();
+    End = CurPtr + SlabSize;
+
+    __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
+    DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
+    Slabs.erase(std::next(Slabs.begin()), Slabs.end());
+  }
+
+  /// \brief Allocate space at the specified alignment.
+  LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
+  Allocate(size_t Size, size_t Alignment) {
+    assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
+
+    // Keep track of how many bytes we've allocated.
+    BytesAllocated += Size;
+
+    size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
+    assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
+
+    size_t SizeToAllocate = Size;
+#if LLVM_ADDRESS_SANITIZER_BUILD
+    // Add trailing bytes as a "red zone" under ASan.
+    SizeToAllocate += RedZoneSize;
+#endif
+
+    // Check if we have enough space.
+    if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
+      char *AlignedPtr = CurPtr + Adjustment;
+      CurPtr = AlignedPtr + SizeToAllocate;
+      // Update the allocation point of this memory block in MemorySanitizer.
+      // Without this, MemorySanitizer messages for values originated from here
+      // will point to the allocation of the entire slab.
+      __msan_allocated_memory(AlignedPtr, Size);
+      // Similarly, tell ASan about this space.
+      __asan_unpoison_memory_region(AlignedPtr, Size);
+      return AlignedPtr;
+    }
+
+    // If Size is really big, allocate a separate slab for it.
+    size_t PaddedSize = SizeToAllocate + Alignment - 1;
+    if (PaddedSize > SizeThreshold) {
+      void *NewSlab = Allocator.Allocate(PaddedSize, 0);
+      // We own the new slab and don't want anyone reading anyting other than
+      // pieces returned from this method.  So poison the whole slab.
+      __asan_poison_memory_region(NewSlab, PaddedSize);
+      CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
+
+      uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
+      assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
+      char *AlignedPtr = (char*)AlignedAddr;
+      __msan_allocated_memory(AlignedPtr, Size);
+      __asan_unpoison_memory_region(AlignedPtr, Size);
+      return AlignedPtr;
+    }
+
+    // Otherwise, start a new slab and try again.
+    StartNewSlab();
+    uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
+    assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
+           "Unable to allocate memory!");
+    char *AlignedPtr = (char*)AlignedAddr;
+    CurPtr = AlignedPtr + SizeToAllocate;
+    __msan_allocated_memory(AlignedPtr, Size);
+    __asan_unpoison_memory_region(AlignedPtr, Size);
+    return AlignedPtr;
+  }
+
+  // Pull in base class overloads.
+  using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
+
+  // Bump pointer allocators are expected to never free their storage; and
+  // clients expect pointers to remain valid for non-dereferencing uses even
+  // after deallocation.
+  void Deallocate(const void *Ptr, size_t Size) {
+    __asan_poison_memory_region(Ptr, Size);
+  }
+
+  // Pull in base class overloads.
+  using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
+
+  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
+
+  size_t getTotalMemory() const {
+    size_t TotalMemory = 0;
+    for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
+      TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
+    for (auto &PtrAndSize : CustomSizedSlabs)
+      TotalMemory += PtrAndSize.second;
+    return TotalMemory;
+  }
+
+  size_t getBytesAllocated() const { return BytesAllocated; }
+
+  void setRedZoneSize(size_t NewSize) {
+    RedZoneSize = NewSize;
+  }
+
+  void PrintStats() const {
+    detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
+                                       getTotalMemory());
+  }
+
+private:
+  /// \brief The current pointer into the current slab.
+  ///
+  /// This points to the next free byte in the slab.
+  char *CurPtr = nullptr;
+
+  /// \brief The end of the current slab.
+  char *End = nullptr;
+
+  /// \brief The slabs allocated so far.
+  SmallVector<void *, 4> Slabs;
+
+  /// \brief Custom-sized slabs allocated for too-large allocation requests.
+  SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
+
+  /// \brief How many bytes we've allocated.
+  ///
+  /// Used so that we can compute how much space was wasted.
+  size_t BytesAllocated = 0;
+
+  /// \brief The number of bytes to put between allocations when running under
+  /// a sanitizer.
+  size_t RedZoneSize = 1;
+
+  /// \brief The allocator instance we use to get slabs of memory.
+  AllocatorT Allocator;
+
+  static size_t computeSlabSize(unsigned SlabIdx) {
+    // Scale the actual allocated slab size based on the number of slabs
+    // allocated. Every 128 slabs allocated, we double the allocated size to
+    // reduce allocation frequency, but saturate at multiplying the slab size by
+    // 2^30.
+    return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
+  }
+
+  /// \brief Allocate a new slab and move the bump pointers over into the new
+  /// slab, modifying CurPtr and End.
+  void StartNewSlab() {
+    size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
+
+    void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
+    // We own the new slab and don't want anyone reading anything other than
+    // pieces returned from this method.  So poison the whole slab.
+    __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
+
+    Slabs.push_back(NewSlab);
+    CurPtr = (char *)(NewSlab);
+    End = ((char *)NewSlab) + AllocatedSlabSize;
+  }
+
+  /// \brief Deallocate a sequence of slabs.
+  void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
+                       SmallVectorImpl<void *>::iterator E) {
+    for (; I != E; ++I) {
+      size_t AllocatedSlabSize =
+          computeSlabSize(std::distance(Slabs.begin(), I));
+      Allocator.Deallocate(*I, AllocatedSlabSize);
+    }
+  }
+
+  /// \brief Deallocate all memory for custom sized slabs.
+  void DeallocateCustomSizedSlabs() {
+    for (auto &PtrAndSize : CustomSizedSlabs) {
+      void *Ptr = PtrAndSize.first;
+      size_t Size = PtrAndSize.second;
+      Allocator.Deallocate(Ptr, Size);
+    }
+  }
+
+  template <typename T> friend class SpecificBumpPtrAllocator;
+};
+
+/// \brief The standard BumpPtrAllocator which just uses the default template
+/// parameters.
+typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
+
+/// \brief A BumpPtrAllocator that allows only elements of a specific type to be
+/// allocated.
+///
+/// This allows calling the destructor in DestroyAll() and when the allocator is
+/// destroyed.
+template <typename T> class SpecificBumpPtrAllocator {
+  BumpPtrAllocator Allocator;
+
+public:
+  SpecificBumpPtrAllocator() {
+    // Because SpecificBumpPtrAllocator walks the memory to call destructors,
+    // it can't have red zones between allocations.
+    Allocator.setRedZoneSize(0);
+  }
+  SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
+      : Allocator(std::move(Old.Allocator)) {}
+  ~SpecificBumpPtrAllocator() { DestroyAll(); }
+
+  SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
+    Allocator = std::move(RHS.Allocator);
+    return *this;
+  }
+
+  /// Call the destructor of each allocated object and deallocate all but the
+  /// current slab and reset the current pointer to the beginning of it, freeing
+  /// all memory allocated so far.
+  void DestroyAll() {
+    auto DestroyElements = [](char *Begin, char *End) {
+      assert(Begin == (char *)alignAddr(Begin, alignof(T)));
+      for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
+        reinterpret_cast<T *>(Ptr)->~T();
+    };
+
+    for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
+         ++I) {
+      size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
+          std::distance(Allocator.Slabs.begin(), I));
+      char *Begin = (char *)alignAddr(*I, alignof(T));
+      char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
+                                               : (char *)*I + AllocatedSlabSize;
+
+      DestroyElements(Begin, End);
+    }
+
+    for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
+      void *Ptr = PtrAndSize.first;
+      size_t Size = PtrAndSize.second;
+      DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
+    }
+
+    Allocator.Reset();
+  }
+
+  /// \brief Allocate space for an array of objects without constructing them.
+  T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
+};
+
+/// \{
+/// Counterparts of allocation functions defined in namespace 'std', which crash
+/// on allocation failure instead of returning null pointer.
+
+LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_malloc(size_t Sz) {
+  void *Result = std::malloc(Sz);
+  if (Result == nullptr)
+    report_bad_alloc_error("Allocation failed.");
+  return Result;
+}
+
+LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_calloc(size_t Count,
+                                                        size_t Sz) {
+  void *Result = std::calloc(Count, Sz);
+  if (Result == nullptr)
+    report_bad_alloc_error("Allocation failed.");
+  return Result;
+}
+
+LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_realloc(void *Ptr, size_t Sz) {
+  void *Result = std::realloc(Ptr, Sz);
+  if (Result == nullptr)
+    report_bad_alloc_error("Allocation failed.");
+  return Result;
+}
+
+/// \}
+
+} // end namespace llvm
+
+template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
+void *operator new(size_t Size,
+                   llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
+                                              SizeThreshold> &Allocator) {
+  struct S {
+    char c;
+    union {
+      double D;
+      long double LD;
+      long long L;
+      void *P;
+    } x;
+  };
+  return Allocator.Allocate(
+      Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
+}
+
+template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
+void operator delete(
+    void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
+}
+
+#endif // LLVM_SUPPORT_ALLOCATOR_H
diff --git a/linux-x64/clang/include/llvm/Support/ArrayRecycler.h b/linux-x64/clang/include/llvm/Support/ArrayRecycler.h
new file mode 100644
index 0000000..68696be
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ArrayRecycler.h
@@ -0,0 +1,145 @@
+//==- llvm/Support/ArrayRecycler.h - Recycling of Arrays ---------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ArrayRecycler class template which can recycle small
+// arrays allocated from one of the allocators in Allocator.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARRAYRECYCLER_H
+#define LLVM_SUPPORT_ARRAYRECYCLER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace llvm {
+
+/// Recycle small arrays allocated from a BumpPtrAllocator.
+///
+/// Arrays are allocated in a small number of fixed sizes. For each supported
+/// array size, the ArrayRecycler keeps a free list of available arrays.
+///
+template <class T, size_t Align = alignof(T)> class ArrayRecycler {
+  // The free list for a given array size is a simple singly linked list.
+  // We can't use iplist or Recycler here since those classes can't be copied.
+  struct FreeList {
+    FreeList *Next;
+  };
+
+  static_assert(Align >= alignof(FreeList), "Object underaligned");
+  static_assert(sizeof(T) >= sizeof(FreeList), "Objects are too small");
+
+  // Keep a free list for each array size.
+  SmallVector<FreeList*, 8> Bucket;
+
+  // Remove an entry from the free list in Bucket[Idx] and return it.
+  // Return NULL if no entries are available.
+  T *pop(unsigned Idx) {
+    if (Idx >= Bucket.size())
+      return nullptr;
+    FreeList *Entry = Bucket[Idx];
+    if (!Entry)
+      return nullptr;
+    __asan_unpoison_memory_region(Entry, Capacity::get(Idx).getSize());
+    Bucket[Idx] = Entry->Next;
+    __msan_allocated_memory(Entry, Capacity::get(Idx).getSize());
+    return reinterpret_cast<T*>(Entry);
+  }
+
+  // Add an entry to the free list at Bucket[Idx].
+  void push(unsigned Idx, T *Ptr) {
+    assert(Ptr && "Cannot recycle NULL pointer");
+    FreeList *Entry = reinterpret_cast<FreeList*>(Ptr);
+    if (Idx >= Bucket.size())
+      Bucket.resize(size_t(Idx) + 1);
+    Entry->Next = Bucket[Idx];
+    Bucket[Idx] = Entry;
+    __asan_poison_memory_region(Ptr, Capacity::get(Idx).getSize());
+  }
+
+public:
+  /// The size of an allocated array is represented by a Capacity instance.
+  ///
+  /// This class is much smaller than a size_t, and it provides methods to work
+  /// with the set of legal array capacities.
+  class Capacity {
+    uint8_t Index;
+    explicit Capacity(uint8_t idx) : Index(idx) {}
+
+  public:
+    Capacity() : Index(0) {}
+
+    /// Get the capacity of an array that can hold at least N elements.
+    static Capacity get(size_t N) {
+      return Capacity(N ? Log2_64_Ceil(N) : 0);
+    }
+
+    /// Get the number of elements in an array with this capacity.
+    size_t getSize() const { return size_t(1u) << Index; }
+
+    /// Get the bucket number for this capacity.
+    unsigned getBucket() const { return Index; }
+
+    /// Get the next larger capacity. Large capacities grow exponentially, so
+    /// this function can be used to reallocate incrementally growing vectors
+    /// in amortized linear time.
+    Capacity getNext() const { return Capacity(Index + 1); }
+  };
+
+  ~ArrayRecycler() {
+    // The client should always call clear() so recycled arrays can be returned
+    // to the allocator.
+    assert(Bucket.empty() && "Non-empty ArrayRecycler deleted!");
+  }
+
+  /// Release all the tracked allocations to the allocator. The recycler must
+  /// be free of any tracked allocations before being deleted.
+  template<class AllocatorType>
+  void clear(AllocatorType &Allocator) {
+    for (; !Bucket.empty(); Bucket.pop_back())
+      while (T *Ptr = pop(Bucket.size() - 1))
+        Allocator.Deallocate(Ptr);
+  }
+
+  /// Special case for BumpPtrAllocator which has an empty Deallocate()
+  /// function.
+  ///
+  /// There is no need to traverse the free lists, pulling all the objects into
+  /// cache.
+  void clear(BumpPtrAllocator&) {
+    Bucket.clear();
+  }
+
+  /// Allocate an array of at least the requested capacity.
+  ///
+  /// Return an existing recycled array, or allocate one from Allocator if
+  /// none are available for recycling.
+  ///
+  template<class AllocatorType>
+  T *allocate(Capacity Cap, AllocatorType &Allocator) {
+    // Try to recycle an existing array.
+    if (T *Ptr = pop(Cap.getBucket()))
+      return Ptr;
+    // Nope, get more memory.
+    return static_cast<T*>(Allocator.Allocate(sizeof(T)*Cap.getSize(), Align));
+  }
+
+  /// Deallocate an array with the specified Capacity.
+  ///
+  /// Cap must be the same capacity that was given to allocate().
+  ///
+  void deallocate(Capacity Cap, T *Ptr) {
+    push(Cap.getBucket(), Ptr);
+  }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Atomic.h b/linux-x64/clang/include/llvm/Support/Atomic.h
new file mode 100644
index 0000000..552313f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Atomic.h
@@ -0,0 +1,43 @@
+//===- llvm/Support/Atomic.h - Atomic Operations -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys atomic operations.
+//
+// DO NOT USE IN NEW CODE!
+//
+// New code should always rely on the std::atomic facilities in C++11.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ATOMIC_H
+#define LLVM_SUPPORT_ATOMIC_H
+
+#include "llvm/Support/DataTypes.h"
+
+// Windows will at times define MemoryFence.
+#ifdef MemoryFence
+#undef MemoryFence
+#endif
+
+namespace llvm {
+  namespace sys {
+    void MemoryFence();
+
+#ifdef _MSC_VER
+    typedef long cas_flag;
+#else
+    typedef uint32_t cas_flag;
+#endif
+    cas_flag CompareAndSwap(volatile cas_flag* ptr,
+                            cas_flag new_value,
+                            cas_flag old_value);
+  }
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/AtomicOrdering.h b/linux-x64/clang/include/llvm/Support/AtomicOrdering.h
new file mode 100644
index 0000000..e93b755
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/AtomicOrdering.h
@@ -0,0 +1,152 @@
+//===-- llvm/Support/AtomicOrdering.h ---Atomic Ordering---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Atomic ordering constants.
+///
+/// These values are used by LLVM to represent atomic ordering for C++11's
+/// memory model and more, as detailed in docs/Atomics.rst.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ATOMICORDERING_H
+#define LLVM_SUPPORT_ATOMICORDERING_H
+
+#include <cstddef>
+
+namespace llvm {
+
+/// Atomic ordering for C11 / C++11's memody models.
+///
+/// These values cannot change because they are shared with standard library
+/// implementations as well as with other compilers.
+enum class AtomicOrderingCABI {
+  relaxed = 0,
+  consume = 1,
+  acquire = 2,
+  release = 3,
+  acq_rel = 4,
+  seq_cst = 5,
+};
+
+bool operator<(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
+bool operator>(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
+bool operator<=(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
+bool operator>=(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
+
+// Validate an integral value which isn't known to fit within the enum's range
+// is a valid AtomicOrderingCABI.
+template <typename Int> inline bool isValidAtomicOrderingCABI(Int I) {
+  return (Int)AtomicOrderingCABI::relaxed <= I &&
+         I <= (Int)AtomicOrderingCABI::seq_cst;
+}
+
+/// Atomic ordering for LLVM's memory model.
+///
+/// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and
+/// Unordered, which are both below the C++ orders.
+///
+/// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst
+///                                   \-->consume-->acquire--/
+enum class AtomicOrdering {
+  NotAtomic = 0,
+  Unordered = 1,
+  Monotonic = 2, // Equivalent to C++'s relaxed.
+  // Consume = 3,  // Not specified yet.
+  Acquire = 4,
+  Release = 5,
+  AcquireRelease = 6,
+  SequentiallyConsistent = 7
+};
+
+bool operator<(AtomicOrdering, AtomicOrdering) = delete;
+bool operator>(AtomicOrdering, AtomicOrdering) = delete;
+bool operator<=(AtomicOrdering, AtomicOrdering) = delete;
+bool operator>=(AtomicOrdering, AtomicOrdering) = delete;
+
+// Validate an integral value which isn't known to fit within the enum's range
+// is a valid AtomicOrdering.
+template <typename Int> inline bool isValidAtomicOrdering(Int I) {
+  return static_cast<Int>(AtomicOrdering::NotAtomic) <= I &&
+         I <= static_cast<Int>(AtomicOrdering::SequentiallyConsistent);
+}
+
+/// String used by LLVM IR to represent atomic ordering.
+inline const char *toIRString(AtomicOrdering ao) {
+  static const char *names[8] = {"not_atomic", "unordered", "monotonic",
+                                 "consume",    "acquire",   "release",
+                                 "acq_rel",    "seq_cst"};
+  return names[static_cast<size_t>(ao)];
+}
+
+/// Returns true if ao is stronger than other as defined by the AtomicOrdering
+/// lattice, which is based on C++'s definition.
+inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) {
+  static const bool lookup[8][8] = {
+      //               NA     UN     RX     CO     AC     RE     AR     SC
+      /* NotAtomic */ {false, false, false, false, false, false, false, false},
+      /* Unordered */ { true, false, false, false, false, false, false, false},
+      /* relaxed   */ { true,  true, false, false, false, false, false, false},
+      /* consume   */ { true,  true,  true, false, false, false, false, false},
+      /* acquire   */ { true,  true,  true,  true, false, false, false, false},
+      /* release   */ { true,  true,  true, false, false, false, false, false},
+      /* acq_rel   */ { true,  true,  true,  true,  true,  true, false, false},
+      /* seq_cst   */ { true,  true,  true,  true,  true,  true,  true, false},
+  };
+  return lookup[static_cast<size_t>(ao)][static_cast<size_t>(other)];
+}
+
+inline bool isAtLeastOrStrongerThan(AtomicOrdering ao, AtomicOrdering other) {
+  static const bool lookup[8][8] = {
+      //               NA     UN     RX     CO     AC     RE     AR     SC
+      /* NotAtomic */ { true, false, false, false, false, false, false, false},
+      /* Unordered */ { true,  true, false, false, false, false, false, false},
+      /* relaxed   */ { true,  true,  true, false, false, false, false, false},
+      /* consume   */ { true,  true,  true,  true, false, false, false, false},
+      /* acquire   */ { true,  true,  true,  true,  true, false, false, false},
+      /* release   */ { true,  true,  true, false, false,  true, false, false},
+      /* acq_rel   */ { true,  true,  true,  true,  true,  true,  true, false},
+      /* seq_cst   */ { true,  true,  true,  true,  true,  true,  true,  true},
+  };
+  return lookup[static_cast<size_t>(ao)][static_cast<size_t>(other)];
+}
+
+inline bool isStrongerThanUnordered(AtomicOrdering ao) {
+  return isStrongerThan(ao, AtomicOrdering::Unordered);
+}
+
+inline bool isStrongerThanMonotonic(AtomicOrdering ao) {
+  return isStrongerThan(ao, AtomicOrdering::Monotonic);
+}
+
+inline bool isAcquireOrStronger(AtomicOrdering ao) {
+  return isAtLeastOrStrongerThan(ao, AtomicOrdering::Acquire);
+}
+
+inline bool isReleaseOrStronger(AtomicOrdering ao) {
+  return isAtLeastOrStrongerThan(ao, AtomicOrdering::Release);
+}
+
+inline AtomicOrderingCABI toCABI(AtomicOrdering ao) {
+  static const AtomicOrderingCABI lookup[8] = {
+      /* NotAtomic */ AtomicOrderingCABI::relaxed,
+      /* Unordered */ AtomicOrderingCABI::relaxed,
+      /* relaxed   */ AtomicOrderingCABI::relaxed,
+      /* consume   */ AtomicOrderingCABI::consume,
+      /* acquire   */ AtomicOrderingCABI::acquire,
+      /* release   */ AtomicOrderingCABI::release,
+      /* acq_rel   */ AtomicOrderingCABI::acq_rel,
+      /* seq_cst   */ AtomicOrderingCABI::seq_cst,
+  };
+  return lookup[static_cast<size_t>(ao)];
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_ATOMICORDERING_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryByteStream.h b/linux-x64/clang/include/llvm/Support/BinaryByteStream.h
new file mode 100644
index 0000000..db1ccba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryByteStream.h
@@ -0,0 +1,262 @@
+//===- BinaryByteStream.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+// A BinaryStream which stores data in a single continguous memory buffer.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYBYTESTREAM_H
+#define LLVM_SUPPORT_BINARYBYTESTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/BinaryStream.h"
+#include "llvm/Support/BinaryStreamError.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileOutputBuffer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+
+namespace llvm {
+
+/// \brief An implementation of BinaryStream which holds its entire data set
+/// in a single contiguous buffer.  BinaryByteStream guarantees that no read
+/// operation will ever incur a copy.  Note that BinaryByteStream does not
+/// own the underlying buffer.
+class BinaryByteStream : public BinaryStream {
+public:
+  BinaryByteStream() = default;
+  BinaryByteStream(ArrayRef<uint8_t> Data, llvm::support::endianness Endian)
+      : Endian(Endian), Data(Data) {}
+  BinaryByteStream(StringRef Data, llvm::support::endianness Endian)
+      : Endian(Endian), Data(Data.bytes_begin(), Data.bytes_end()) {}
+
+  llvm::support::endianness getEndian() const override { return Endian; }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override {
+    if (auto EC = checkOffsetForRead(Offset, Size))
+      return EC;
+    Buffer = Data.slice(Offset, Size);
+    return Error::success();
+  }
+
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override {
+    if (auto EC = checkOffsetForRead(Offset, 1))
+      return EC;
+    Buffer = Data.slice(Offset);
+    return Error::success();
+  }
+
+  uint32_t getLength() override { return Data.size(); }
+
+  ArrayRef<uint8_t> data() const { return Data; }
+
+  StringRef str() const {
+    const char *CharData = reinterpret_cast<const char *>(Data.data());
+    return StringRef(CharData, Data.size());
+  }
+
+protected:
+  llvm::support::endianness Endian;
+  ArrayRef<uint8_t> Data;
+};
+
+/// \brief An implementation of BinaryStream whose data is backed by an llvm
+/// MemoryBuffer object.  MemoryBufferByteStream owns the MemoryBuffer in
+/// question.  As with BinaryByteStream, reading from a MemoryBufferByteStream
+/// will never cause a copy.
+class MemoryBufferByteStream : public BinaryByteStream {
+public:
+  MemoryBufferByteStream(std::unique_ptr<MemoryBuffer> Buffer,
+                         llvm::support::endianness Endian)
+      : BinaryByteStream(Buffer->getBuffer(), Endian),
+        MemBuffer(std::move(Buffer)) {}
+
+  std::unique_ptr<MemoryBuffer> MemBuffer;
+};
+
+/// \brief An implementation of BinaryStream which holds its entire data set
+/// in a single contiguous buffer.  As with BinaryByteStream, the mutable
+/// version also guarantees that no read operation will ever incur a copy,
+/// and similarly it does not own the underlying buffer.
+class MutableBinaryByteStream : public WritableBinaryStream {
+public:
+  MutableBinaryByteStream() = default;
+  MutableBinaryByteStream(MutableArrayRef<uint8_t> Data,
+                          llvm::support::endianness Endian)
+      : Data(Data), ImmutableStream(Data, Endian) {}
+
+  llvm::support::endianness getEndian() const override {
+    return ImmutableStream.getEndian();
+  }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override {
+    return ImmutableStream.readBytes(Offset, Size, Buffer);
+  }
+
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override {
+    return ImmutableStream.readLongestContiguousChunk(Offset, Buffer);
+  }
+
+  uint32_t getLength() override { return ImmutableStream.getLength(); }
+
+  Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) override {
+    if (Buffer.empty())
+      return Error::success();
+
+    if (auto EC = checkOffsetForWrite(Offset, Buffer.size()))
+      return EC;
+
+    uint8_t *DataPtr = const_cast<uint8_t *>(Data.data());
+    ::memcpy(DataPtr + Offset, Buffer.data(), Buffer.size());
+    return Error::success();
+  }
+
+  Error commit() override { return Error::success(); }
+
+  MutableArrayRef<uint8_t> data() const { return Data; }
+
+private:
+  MutableArrayRef<uint8_t> Data;
+  BinaryByteStream ImmutableStream;
+};
+
+/// \brief An implementation of WritableBinaryStream which can write at its end
+/// causing the underlying data to grow.  This class owns the underlying data.
+class AppendingBinaryByteStream : public WritableBinaryStream {
+  std::vector<uint8_t> Data;
+  llvm::support::endianness Endian = llvm::support::little;
+
+public:
+  AppendingBinaryByteStream() = default;
+  AppendingBinaryByteStream(llvm::support::endianness Endian)
+      : Endian(Endian) {}
+
+  void clear() { Data.clear(); }
+
+  llvm::support::endianness getEndian() const override { return Endian; }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override {
+    if (auto EC = checkOffsetForWrite(Offset, Buffer.size()))
+      return EC;
+
+    Buffer = makeArrayRef(Data).slice(Offset, Size);
+    return Error::success();
+  }
+
+  void insert(uint32_t Offset, ArrayRef<uint8_t> Bytes) {
+    Data.insert(Data.begin() + Offset, Bytes.begin(), Bytes.end());
+  }
+
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override {
+    if (auto EC = checkOffsetForWrite(Offset, 1))
+      return EC;
+
+    Buffer = makeArrayRef(Data).slice(Offset);
+    return Error::success();
+  }
+
+  uint32_t getLength() override { return Data.size(); }
+
+  Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) override {
+    if (Buffer.empty())
+      return Error::success();
+
+    // This is well-defined for any case except where offset is strictly
+    // greater than the current length.  If offset is equal to the current
+    // length, we can still grow.  If offset is beyond the current length, we
+    // would have to decide how to deal with the intermediate uninitialized
+    // bytes.  So we punt on that case for simplicity and just say it's an
+    // error.
+    if (Offset > getLength())
+      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
+
+    uint32_t RequiredSize = Offset + Buffer.size();
+    if (RequiredSize > Data.size())
+      Data.resize(RequiredSize);
+
+    ::memcpy(Data.data() + Offset, Buffer.data(), Buffer.size());
+    return Error::success();
+  }
+
+  Error commit() override { return Error::success(); }
+
+  /// \brief Return the properties of this stream.
+  virtual BinaryStreamFlags getFlags() const override {
+    return BSF_Write | BSF_Append;
+  }
+
+  MutableArrayRef<uint8_t> data() { return Data; }
+};
+
+/// \brief An implementation of WritableBinaryStream backed by an llvm
+/// FileOutputBuffer.
+class FileBufferByteStream : public WritableBinaryStream {
+private:
+  class StreamImpl : public MutableBinaryByteStream {
+  public:
+    StreamImpl(std::unique_ptr<FileOutputBuffer> Buffer,
+               llvm::support::endianness Endian)
+        : MutableBinaryByteStream(
+              MutableArrayRef<uint8_t>(Buffer->getBufferStart(),
+                                       Buffer->getBufferEnd()),
+              Endian),
+          FileBuffer(std::move(Buffer)) {}
+
+    Error commit() override {
+      if (FileBuffer->commit())
+        return make_error<BinaryStreamError>(
+            stream_error_code::filesystem_error);
+      return Error::success();
+    }
+
+  private:
+    std::unique_ptr<FileOutputBuffer> FileBuffer;
+  };
+
+public:
+  FileBufferByteStream(std::unique_ptr<FileOutputBuffer> Buffer,
+                       llvm::support::endianness Endian)
+      : Impl(std::move(Buffer), Endian) {}
+
+  llvm::support::endianness getEndian() const override {
+    return Impl.getEndian();
+  }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override {
+    return Impl.readBytes(Offset, Size, Buffer);
+  }
+
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override {
+    return Impl.readLongestContiguousChunk(Offset, Buffer);
+  }
+
+  uint32_t getLength() override { return Impl.getLength(); }
+
+  Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) override {
+    return Impl.writeBytes(Offset, Data);
+  }
+
+  Error commit() override { return Impl.commit(); }
+
+private:
+  StreamImpl Impl;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_BYTESTREAM_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryItemStream.h b/linux-x64/clang/include/llvm/Support/BinaryItemStream.h
new file mode 100644
index 0000000..278723d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryItemStream.h
@@ -0,0 +1,108 @@
+//===- BinaryItemStream.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYITEMSTREAM_H
+#define LLVM_SUPPORT_BINARYITEMSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/BinaryStream.h"
+#include "llvm/Support/BinaryStreamError.h"
+#include "llvm/Support/Error.h"
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+template <typename T> struct BinaryItemTraits {
+  static size_t length(const T &Item) = delete;
+  static ArrayRef<uint8_t> bytes(const T &Item) = delete;
+};
+
+/// BinaryItemStream represents a sequence of objects stored in some kind of
+/// external container but for which it is useful to view as a stream of
+/// contiguous bytes.  An example of this might be if you have a collection of
+/// records and you serialize each one into a buffer, and store these serialized
+/// records in a container.  The pointers themselves are not laid out
+/// contiguously in memory, but we may wish to read from or write to these
+/// records as if they were.
+template <typename T, typename Traits = BinaryItemTraits<T>>
+class BinaryItemStream : public BinaryStream {
+public:
+  explicit BinaryItemStream(llvm::support::endianness Endian)
+      : Endian(Endian) {}
+
+  llvm::support::endianness getEndian() const override { return Endian; }
+
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) override {
+    auto ExpectedIndex = translateOffsetIndex(Offset);
+    if (!ExpectedIndex)
+      return ExpectedIndex.takeError();
+    const auto &Item = Items[*ExpectedIndex];
+    if (auto EC = checkOffsetForRead(Offset, Size))
+      return EC;
+    if (Size > Traits::length(Item))
+      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
+    Buffer = Traits::bytes(Item).take_front(Size);
+    return Error::success();
+  }
+
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) override {
+    auto ExpectedIndex = translateOffsetIndex(Offset);
+    if (!ExpectedIndex)
+      return ExpectedIndex.takeError();
+    Buffer = Traits::bytes(Items[*ExpectedIndex]);
+    return Error::success();
+  }
+
+  void setItems(ArrayRef<T> ItemArray) {
+    Items = ItemArray;
+    computeItemOffsets();
+  }
+
+  uint32_t getLength() override {
+    return ItemEndOffsets.empty() ? 0 : ItemEndOffsets.back();
+  }
+
+private:
+  void computeItemOffsets() {
+    ItemEndOffsets.clear();
+    ItemEndOffsets.reserve(Items.size());
+    uint32_t CurrentOffset = 0;
+    for (const auto &Item : Items) {
+      uint32_t Len = Traits::length(Item);
+      assert(Len > 0 && "no empty items");
+      CurrentOffset += Len;
+      ItemEndOffsets.push_back(CurrentOffset);
+    }
+  }
+
+  Expected<uint32_t> translateOffsetIndex(uint32_t Offset) {
+    // Make sure the offset is somewhere in our items array.
+    if (Offset >= getLength())
+      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
+    ++Offset;
+    auto Iter =
+        std::lower_bound(ItemEndOffsets.begin(), ItemEndOffsets.end(), Offset);
+    size_t Idx = std::distance(ItemEndOffsets.begin(), Iter);
+    assert(Idx < Items.size() && "binary search for offset failed");
+    return Idx;
+  }
+
+  llvm::support::endianness Endian;
+  ArrayRef<T> Items;
+
+  // Sorted vector of offsets to accelerate lookup.
+  std::vector<uint32_t> ItemEndOffsets;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYITEMSTREAM_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryStream.h b/linux-x64/clang/include/llvm/Support/BinaryStream.h
new file mode 100644
index 0000000..d69a03e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryStream.h
@@ -0,0 +1,102 @@
+//===- BinaryStream.h - Base interface for a stream of data -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYSTREAM_H
+#define LLVM_SUPPORT_BINARYSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/Support/BinaryStreamError.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+
+enum BinaryStreamFlags {
+  BSF_None = 0,
+  BSF_Write = 1,  // Stream supports writing.
+  BSF_Append = 2, // Writing can occur at offset == length.
+  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ BSF_Append)
+};
+
+/// \brief An interface for accessing data in a stream-like format, but which
+/// discourages copying.  Instead of specifying a buffer in which to copy
+/// data on a read, the API returns an ArrayRef to data owned by the stream's
+/// implementation.  Since implementations may not necessarily store data in a
+/// single contiguous buffer (or even in memory at all), in such cases a it may
+/// be necessary for an implementation to cache such a buffer so that it can
+/// return it.
+class BinaryStream {
+public:
+  virtual ~BinaryStream() = default;
+
+  virtual llvm::support::endianness getEndian() const = 0;
+
+  /// \brief Given an offset into the stream and a number of bytes, attempt to
+  /// read the bytes and set the output ArrayRef to point to data owned by the
+  /// stream.
+  virtual Error readBytes(uint32_t Offset, uint32_t Size,
+                          ArrayRef<uint8_t> &Buffer) = 0;
+
+  /// \brief Given an offset into the stream, read as much as possible without
+  /// copying any data.
+  virtual Error readLongestContiguousChunk(uint32_t Offset,
+                                           ArrayRef<uint8_t> &Buffer) = 0;
+
+  /// \brief Return the number of bytes of data in this stream.
+  virtual uint32_t getLength() = 0;
+
+  /// \brief Return the properties of this stream.
+  virtual BinaryStreamFlags getFlags() const { return BSF_None; }
+
+protected:
+  Error checkOffsetForRead(uint32_t Offset, uint32_t DataSize) {
+    if (Offset > getLength())
+      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
+    if (getLength() < DataSize + Offset)
+      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
+    return Error::success();
+  }
+};
+
+/// \brief A BinaryStream which can be read from as well as written to.  Note
+/// that writing to a BinaryStream always necessitates copying from the input
+/// buffer to the stream's backing store.  Streams are assumed to be buffered
+/// so that to be portable it is necessary to call commit() on the stream when
+/// all data has been written.
+class WritableBinaryStream : public BinaryStream {
+public:
+  ~WritableBinaryStream() override = default;
+
+  /// \brief Attempt to write the given bytes into the stream at the desired
+  /// offset. This will always necessitate a copy.  Cannot shrink or grow the
+  /// stream, only writes into existing allocated space.
+  virtual Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) = 0;
+
+  /// \brief For buffered streams, commits changes to the backing store.
+  virtual Error commit() = 0;
+
+  /// \brief Return the properties of this stream.
+  BinaryStreamFlags getFlags() const override { return BSF_Write; }
+
+protected:
+  Error checkOffsetForWrite(uint32_t Offset, uint32_t DataSize) {
+    if (!(getFlags() & BSF_Append))
+      return checkOffsetForRead(Offset, DataSize);
+
+    if (Offset > getLength())
+      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
+    return Error::success();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYSTREAM_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryStreamArray.h b/linux-x64/clang/include/llvm/Support/BinaryStreamArray.h
new file mode 100644
index 0000000..3f5562b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryStreamArray.h
@@ -0,0 +1,358 @@
+//===- BinaryStreamArray.h - Array backed by an arbitrary stream *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYSTREAMARRAY_H
+#define LLVM_SUPPORT_BINARYSTREAMARRAY_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+
+/// Lightweight arrays that are backed by an arbitrary BinaryStream.  This file
+/// provides two different array implementations.
+///
+///     VarStreamArray - Arrays of variable length records.  The user specifies
+///       an Extractor type that can extract a record from a given offset and
+///       return the number of bytes consumed by the record.
+///
+///     FixedStreamArray - Arrays of fixed length records.  This is similar in
+///       spirit to ArrayRef<T>, but since it is backed by a BinaryStream, the
+///       elements of the array need not be laid out in contiguous memory.
+namespace llvm {
+
+/// VarStreamArrayExtractor is intended to be specialized to provide customized
+/// extraction logic.  On input it receives a BinaryStreamRef pointing to the
+/// beginning of the next record, but where the length of the record is not yet
+/// known.  Upon completion, it should return an appropriate Error instance if
+/// a record could not be extracted, or if one could be extracted it should
+/// return success and set Len to the number of bytes this record occupied in
+/// the underlying stream, and it should fill out the fields of the value type
+/// Item appropriately to represent the current record.
+///
+/// You can specialize this template for your own custom value types to avoid
+/// having to specify a second template argument to VarStreamArray (documented
+/// below).
+template <typename T> struct VarStreamArrayExtractor {
+  // Method intentionally deleted.  You must provide an explicit specialization
+  // with the following method implemented.
+  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
+                   T &Item) const = delete;
+};
+
+/// VarStreamArray represents an array of variable length records backed by a
+/// stream.  This could be a contiguous sequence of bytes in memory, it could
+/// be a file on disk, or it could be a PDB stream where bytes are stored as
+/// discontiguous blocks in a file.  Usually it is desirable to treat arrays
+/// as contiguous blocks of memory, but doing so with large PDB files, for
+/// example, could mean allocating huge amounts of memory just to allow
+/// re-ordering of stream data to be contiguous before iterating over it.  By
+/// abstracting this out, we need not duplicate this memory, and we can
+/// iterate over arrays in arbitrarily formatted streams.  Elements are parsed
+/// lazily on iteration, so there is no upfront cost associated with building
+/// or copying a VarStreamArray, no matter how large it may be.
+///
+/// You create a VarStreamArray by specifying a ValueType and an Extractor type.
+/// If you do not specify an Extractor type, you are expected to specialize
+/// VarStreamArrayExtractor<T> for your ValueType.
+///
+/// By default an Extractor is default constructed in the class, but in some
+/// cases you might find it useful for an Extractor to maintain state across
+/// extractions.  In this case you can provide your own Extractor through a
+/// secondary constructor.  The following examples show various ways of
+/// creating a VarStreamArray.
+///
+///       // Will use VarStreamArrayExtractor<MyType> as the extractor.
+///       VarStreamArray<MyType> MyTypeArray;
+///
+///       // Will use a default-constructed MyExtractor as the extractor.
+///       VarStreamArray<MyType, MyExtractor> MyTypeArray2;
+///
+///       // Will use the specific instance of MyExtractor provided.
+///       // MyExtractor need not be default-constructible in this case.
+///       MyExtractor E(SomeContext);
+///       VarStreamArray<MyType, MyExtractor> MyTypeArray3(E);
+///
+
+template <typename ValueType, typename Extractor> class VarStreamArrayIterator;
+
+template <typename ValueType,
+          typename Extractor = VarStreamArrayExtractor<ValueType>>
+class VarStreamArray {
+  friend class VarStreamArrayIterator<ValueType, Extractor>;
+
+public:
+  typedef VarStreamArrayIterator<ValueType, Extractor> Iterator;
+
+  VarStreamArray() = default;
+
+  explicit VarStreamArray(const Extractor &E) : E(E) {}
+
+  explicit VarStreamArray(BinaryStreamRef Stream) : Stream(Stream) {}
+
+  VarStreamArray(BinaryStreamRef Stream, const Extractor &E)
+      : Stream(Stream), E(E) {}
+
+  Iterator begin(bool *HadError = nullptr) const {
+    return Iterator(*this, E, HadError);
+  }
+
+  bool valid() const { return Stream.valid(); }
+
+  Iterator end() const { return Iterator(E); }
+
+  bool empty() const { return Stream.getLength() == 0; }
+
+  /// \brief given an offset into the array's underlying stream, return an
+  /// iterator to the record at that offset.  This is considered unsafe
+  /// since the behavior is undefined if \p Offset does not refer to the
+  /// beginning of a valid record.
+  Iterator at(uint32_t Offset) const {
+    return Iterator(*this, E, Offset, nullptr);
+  }
+
+  const Extractor &getExtractor() const { return E; }
+  Extractor &getExtractor() { return E; }
+
+  BinaryStreamRef getUnderlyingStream() const { return Stream; }
+  void setUnderlyingStream(BinaryStreamRef S) { Stream = S; }
+
+private:
+  BinaryStreamRef Stream;
+  Extractor E;
+};
+
+template <typename ValueType, typename Extractor>
+class VarStreamArrayIterator
+    : public iterator_facade_base<VarStreamArrayIterator<ValueType, Extractor>,
+                                  std::forward_iterator_tag, ValueType> {
+  typedef VarStreamArrayIterator<ValueType, Extractor> IterType;
+  typedef VarStreamArray<ValueType, Extractor> ArrayType;
+
+public:
+  VarStreamArrayIterator(const ArrayType &Array, const Extractor &E,
+                         bool *HadError)
+      : VarStreamArrayIterator(Array, E, 0, HadError) {}
+
+  VarStreamArrayIterator(const ArrayType &Array, const Extractor &E,
+                         uint32_t Offset, bool *HadError)
+      : IterRef(Array.Stream.drop_front(Offset)), Extract(E),
+        Array(&Array), AbsOffset(Offset), HadError(HadError) {
+    if (IterRef.getLength() == 0)
+      moveToEnd();
+    else {
+      auto EC = Extract(IterRef, ThisLen, ThisValue);
+      if (EC) {
+        consumeError(std::move(EC));
+        markError();
+      }
+    }
+  }
+
+  VarStreamArrayIterator() = default;
+  explicit VarStreamArrayIterator(const Extractor &E) : Extract(E) {}
+  ~VarStreamArrayIterator() = default;
+
+  bool operator==(const IterType &R) const {
+    if (Array && R.Array) {
+      // Both have a valid array, make sure they're same.
+      assert(Array == R.Array);
+      return IterRef == R.IterRef;
+    }
+
+    // Both iterators are at the end.
+    if (!Array && !R.Array)
+      return true;
+
+    // One is not at the end and one is.
+    return false;
+  }
+
+  const ValueType &operator*() const {
+    assert(Array && !HasError);
+    return ThisValue;
+  }
+
+  ValueType &operator*() {
+    assert(Array && !HasError);
+    return ThisValue;
+  }
+
+  IterType &operator+=(unsigned N) {
+    for (unsigned I = 0; I < N; ++I) {
+      // We are done with the current record, discard it so that we are
+      // positioned at the next record.
+      AbsOffset += ThisLen;
+      IterRef = IterRef.drop_front(ThisLen);
+      if (IterRef.getLength() == 0) {
+        // There is nothing after the current record, we must make this an end
+        // iterator.
+        moveToEnd();
+      } else {
+        // There is some data after the current record.
+        auto EC = Extract(IterRef, ThisLen, ThisValue);
+        if (EC) {
+          consumeError(std::move(EC));
+          markError();
+        } else if (ThisLen == 0) {
+          // An empty record? Make this an end iterator.
+          moveToEnd();
+        }
+      }
+    }
+    return *this;
+  }
+
+  uint32_t offset() const { return AbsOffset; }
+  uint32_t getRecordLength() const { return ThisLen; }
+
+private:
+  void moveToEnd() {
+    Array = nullptr;
+    ThisLen = 0;
+  }
+  void markError() {
+    moveToEnd();
+    HasError = true;
+    if (HadError != nullptr)
+      *HadError = true;
+  }
+
+  ValueType ThisValue;
+  BinaryStreamRef IterRef;
+  Extractor Extract;
+  const ArrayType *Array{nullptr};
+  uint32_t ThisLen{0};
+  uint32_t AbsOffset{0};
+  bool HasError{false};
+  bool *HadError{nullptr};
+};
+
+template <typename T> class FixedStreamArrayIterator;
+
+/// FixedStreamArray is similar to VarStreamArray, except with each record
+/// having a fixed-length.  As with VarStreamArray, there is no upfront
+/// cost associated with building or copying a FixedStreamArray, as the
+/// memory for each element is not read from the backing stream until that
+/// element is iterated.
+template <typename T> class FixedStreamArray {
+  friend class FixedStreamArrayIterator<T>;
+
+public:
+  typedef FixedStreamArrayIterator<T> Iterator;
+
+  FixedStreamArray() = default;
+  explicit FixedStreamArray(BinaryStreamRef Stream) : Stream(Stream) {
+    assert(Stream.getLength() % sizeof(T) == 0);
+  }
+
+  bool operator==(const FixedStreamArray<T> &Other) const {
+    return Stream == Other.Stream;
+  }
+
+  bool operator!=(const FixedStreamArray<T> &Other) const {
+    return !(*this == Other);
+  }
+
+  FixedStreamArray &operator=(const FixedStreamArray &) = default;
+
+  const T &operator[](uint32_t Index) const {
+    assert(Index < size());
+    uint32_t Off = Index * sizeof(T);
+    ArrayRef<uint8_t> Data;
+    if (auto EC = Stream.readBytes(Off, sizeof(T), Data)) {
+      assert(false && "Unexpected failure reading from stream");
+      // This should never happen since we asserted that the stream length was
+      // an exact multiple of the element size.
+      consumeError(std::move(EC));
+    }
+    assert(llvm::alignmentAdjustment(Data.data(), alignof(T)) == 0);
+    return *reinterpret_cast<const T *>(Data.data());
+  }
+
+  uint32_t size() const { return Stream.getLength() / sizeof(T); }
+
+  bool empty() const { return size() == 0; }
+
+  FixedStreamArrayIterator<T> begin() const {
+    return FixedStreamArrayIterator<T>(*this, 0);
+  }
+
+  FixedStreamArrayIterator<T> end() const {
+    return FixedStreamArrayIterator<T>(*this, size());
+  }
+
+  const T &front() const { return *begin(); }
+  const T &back() const {
+    FixedStreamArrayIterator<T> I = end();
+    return *(--I);
+  }
+
+  BinaryStreamRef getUnderlyingStream() const { return Stream; }
+
+private:
+  BinaryStreamRef Stream;
+};
+
+template <typename T>
+class FixedStreamArrayIterator
+    : public iterator_facade_base<FixedStreamArrayIterator<T>,
+                                  std::random_access_iterator_tag, const T> {
+
+public:
+  FixedStreamArrayIterator(const FixedStreamArray<T> &Array, uint32_t Index)
+      : Array(Array), Index(Index) {}
+
+  FixedStreamArrayIterator<T> &
+  operator=(const FixedStreamArrayIterator<T> &Other) {
+    Array = Other.Array;
+    Index = Other.Index;
+    return *this;
+  }
+
+  const T &operator*() const { return Array[Index]; }
+  const T &operator*() { return Array[Index]; }
+
+  bool operator==(const FixedStreamArrayIterator<T> &R) const {
+    assert(Array == R.Array);
+    return (Index == R.Index) && (Array == R.Array);
+  }
+
+  FixedStreamArrayIterator<T> &operator+=(std::ptrdiff_t N) {
+    Index += N;
+    return *this;
+  }
+
+  FixedStreamArrayIterator<T> &operator-=(std::ptrdiff_t N) {
+    assert(std::ptrdiff_t(Index) >= N);
+    Index -= N;
+    return *this;
+  }
+
+  std::ptrdiff_t operator-(const FixedStreamArrayIterator<T> &R) const {
+    assert(Array == R.Array);
+    assert(Index >= R.Index);
+    return Index - R.Index;
+  }
+
+  bool operator<(const FixedStreamArrayIterator<T> &RHS) const {
+    assert(Array == RHS.Array);
+    return Index < RHS.Index;
+  }
+
+private:
+  FixedStreamArray<T> Array;
+  uint32_t Index;
+};
+
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYSTREAMARRAY_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryStreamError.h b/linux-x64/clang/include/llvm/Support/BinaryStreamError.h
new file mode 100644
index 0000000..7d9699d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryStreamError.h
@@ -0,0 +1,48 @@
+//===- BinaryStreamError.h - Error extensions for Binary Streams *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYSTREAMERROR_H
+#define LLVM_SUPPORT_BINARYSTREAMERROR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+#include <string>
+
+namespace llvm {
+enum class stream_error_code {
+  unspecified,
+  stream_too_short,
+  invalid_array_size,
+  invalid_offset,
+  filesystem_error
+};
+
+/// Base class for errors originating when parsing raw PDB files
+class BinaryStreamError : public ErrorInfo<BinaryStreamError> {
+public:
+  static char ID;
+  explicit BinaryStreamError(stream_error_code C);
+  explicit BinaryStreamError(StringRef Context);
+  BinaryStreamError(stream_error_code C, StringRef Context);
+
+  void log(raw_ostream &OS) const override;
+  std::error_code convertToErrorCode() const override;
+
+  StringRef getErrorMessage() const;
+
+  stream_error_code getErrorCode() const { return Code; }
+
+private:
+  std::string ErrMsg;
+  stream_error_code Code;
+};
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYSTREAMERROR_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryStreamReader.h b/linux-x64/clang/include/llvm/Support/BinaryStreamReader.h
new file mode 100644
index 0000000..ae5ebb2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryStreamReader.h
@@ -0,0 +1,270 @@
+//===- BinaryStreamReader.h - Reads objects from a binary stream *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYSTREAMREADER_H
+#define LLVM_SUPPORT_BINARYSTREAMREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/type_traits.h"
+
+#include <string>
+#include <type_traits>
+
+namespace llvm {
+
+/// \brief Provides read only access to a subclass of `BinaryStream`.  Provides
+/// bounds checking and helpers for writing certain common data types such as
+/// null-terminated strings, integers in various flavors of endianness, etc.
+/// Can be subclassed to provide reading of custom datatypes, although no
+/// are overridable.
+class BinaryStreamReader {
+public:
+  BinaryStreamReader() = default;
+  explicit BinaryStreamReader(BinaryStreamRef Ref);
+  explicit BinaryStreamReader(BinaryStream &Stream);
+  explicit BinaryStreamReader(ArrayRef<uint8_t> Data,
+                              llvm::support::endianness Endian);
+  explicit BinaryStreamReader(StringRef Data, llvm::support::endianness Endian);
+
+  BinaryStreamReader(const BinaryStreamReader &Other)
+      : Stream(Other.Stream), Offset(Other.Offset) {}
+
+  BinaryStreamReader &operator=(const BinaryStreamReader &Other) {
+    Stream = Other.Stream;
+    Offset = Other.Offset;
+    return *this;
+  }
+
+  virtual ~BinaryStreamReader() {}
+
+  /// Read as much as possible from the underlying string at the current offset
+  /// without invoking a copy, and set \p Buffer to the resulting data slice.
+  /// Updates the stream's offset to point after the newly read data.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer);
+
+  /// Read \p Size bytes from the underlying stream at the current offset and
+  /// and set \p Buffer to the resulting data slice.  Whether a copy occurs
+  /// depends on the implementation of the underlying stream.  Updates the
+  /// stream's offset to point after the newly read data.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size);
+
+  /// Read an integer of the specified endianness into \p Dest and update the
+  /// stream's offset.  The data is always copied from the stream's underlying
+  /// buffer into \p Dest. Updates the stream's offset to point after the newly
+  /// read data.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  template <typename T> Error readInteger(T &Dest) {
+    static_assert(std::is_integral<T>::value,
+                  "Cannot call readInteger with non-integral value!");
+
+    ArrayRef<uint8_t> Bytes;
+    if (auto EC = readBytes(Bytes, sizeof(T)))
+      return EC;
+
+    Dest = llvm::support::endian::read<T, llvm::support::unaligned>(
+        Bytes.data(), Stream.getEndian());
+    return Error::success();
+  }
+
+  /// Similar to readInteger.
+  template <typename T> Error readEnum(T &Dest) {
+    static_assert(std::is_enum<T>::value,
+                  "Cannot call readEnum with non-enum value!");
+    typename std::underlying_type<T>::type N;
+    if (auto EC = readInteger(N))
+      return EC;
+    Dest = static_cast<T>(N);
+    return Error::success();
+  }
+
+  /// Read a null terminated string from \p Dest.  Whether a copy occurs depends
+  /// on the implementation of the underlying stream.  Updates the stream's
+  /// offset to point after the newly read data.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readCString(StringRef &Dest);
+
+  /// Similar to readCString, however read a null-terminated UTF16 string
+  /// instead.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readWideString(ArrayRef<UTF16> &Dest);
+
+  /// Read a \p Length byte string into \p Dest.  Whether a copy occurs depends
+  /// on the implementation of the underlying stream.  Updates the stream's
+  /// offset to point after the newly read data.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readFixedString(StringRef &Dest, uint32_t Length);
+
+  /// Read the entire remainder of the underlying stream into \p Ref.  This is
+  /// equivalent to calling getUnderlyingStream().slice(Offset).  Updates the
+  /// stream's offset to point to the end of the stream.  Never causes a copy.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readStreamRef(BinaryStreamRef &Ref);
+
+  /// Read \p Length bytes from the underlying stream into \p Ref.  This is
+  /// equivalent to calling getUnderlyingStream().slice(Offset, Length).
+  /// Updates the stream's offset to point after the newly read object.  Never
+  /// causes a copy.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readStreamRef(BinaryStreamRef &Ref, uint32_t Length);
+
+  /// Read \p Length bytes from the underlying stream into \p Stream.  This is
+  /// equivalent to calling getUnderlyingStream().slice(Offset, Length).
+  /// Updates the stream's offset to point after the newly read object.  Never
+  /// causes a copy.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  Error readSubstream(BinarySubstreamRef &Stream, uint32_t Size);
+
+  /// Get a pointer to an object of type T from the underlying stream, as if by
+  /// memcpy, and store the result into \p Dest.  It is up to the caller to
+  /// ensure that objects of type T can be safely treated in this manner.
+  /// Updates the stream's offset to point after the newly read object.  Whether
+  /// a copy occurs depends upon the implementation of the underlying
+  /// stream.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  template <typename T> Error readObject(const T *&Dest) {
+    ArrayRef<uint8_t> Buffer;
+    if (auto EC = readBytes(Buffer, sizeof(T)))
+      return EC;
+    Dest = reinterpret_cast<const T *>(Buffer.data());
+    return Error::success();
+  }
+
+  /// Get a reference to a \p NumElements element array of objects of type T
+  /// from the underlying stream as if by memcpy, and store the resulting array
+  /// slice into \p array.  It is up to the caller to ensure that objects of
+  /// type T can be safely treated in this manner.  Updates the stream's offset
+  /// to point after the newly read object.  Whether a copy occurs depends upon
+  /// the implementation of the underlying stream.
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  template <typename T>
+  Error readArray(ArrayRef<T> &Array, uint32_t NumElements) {
+    ArrayRef<uint8_t> Bytes;
+    if (NumElements == 0) {
+      Array = ArrayRef<T>();
+      return Error::success();
+    }
+
+    if (NumElements > UINT32_MAX / sizeof(T))
+      return make_error<BinaryStreamError>(
+          stream_error_code::invalid_array_size);
+
+    if (auto EC = readBytes(Bytes, NumElements * sizeof(T)))
+      return EC;
+
+    assert(alignmentAdjustment(Bytes.data(), alignof(T)) == 0 &&
+           "Reading at invalid alignment!");
+
+    Array = ArrayRef<T>(reinterpret_cast<const T *>(Bytes.data()), NumElements);
+    return Error::success();
+  }
+
+  /// Read a VarStreamArray of size \p Size bytes and store the result into
+  /// \p Array.  Updates the stream's offset to point after the newly read
+  /// array.  Never causes a copy (although iterating the elements of the
+  /// VarStreamArray may, depending upon the implementation of the underlying
+  /// stream).
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  template <typename T, typename U>
+  Error readArray(VarStreamArray<T, U> &Array, uint32_t Size) {
+    BinaryStreamRef S;
+    if (auto EC = readStreamRef(S, Size))
+      return EC;
+    Array.setUnderlyingStream(S);
+    return Error::success();
+  }
+
+  /// Read a FixedStreamArray of \p NumItems elements and store the result into
+  /// \p Array.  Updates the stream's offset to point after the newly read
+  /// array.  Never causes a copy (although iterating the elements of the
+  /// FixedStreamArray may, depending upon the implementation of the underlying
+  /// stream).
+  ///
+  /// \returns a success error code if the data was successfully read, otherwise
+  /// returns an appropriate error code.
+  template <typename T>
+  Error readArray(FixedStreamArray<T> &Array, uint32_t NumItems) {
+    if (NumItems == 0) {
+      Array = FixedStreamArray<T>();
+      return Error::success();
+    }
+
+    if (NumItems > UINT32_MAX / sizeof(T))
+      return make_error<BinaryStreamError>(
+          stream_error_code::invalid_array_size);
+
+    BinaryStreamRef View;
+    if (auto EC = readStreamRef(View, NumItems * sizeof(T)))
+      return EC;
+
+    Array = FixedStreamArray<T>(View);
+    return Error::success();
+  }
+
+  bool empty() const { return bytesRemaining() == 0; }
+  void setOffset(uint32_t Off) { Offset = Off; }
+  uint32_t getOffset() const { return Offset; }
+  uint32_t getLength() const { return Stream.getLength(); }
+  uint32_t bytesRemaining() const { return getLength() - getOffset(); }
+
+  /// Advance the stream's offset by \p Amount bytes.
+  ///
+  /// \returns a success error code if at least \p Amount bytes remain in the
+  /// stream, otherwise returns an appropriate error code.
+  Error skip(uint32_t Amount);
+
+  /// Examine the next byte of the underlying stream without advancing the
+  /// stream's offset.  If the stream is empty the behavior is undefined.
+  ///
+  /// \returns the next byte in the stream.
+  uint8_t peek() const;
+
+  Error padToAlignment(uint32_t Align);
+
+  std::pair<BinaryStreamReader, BinaryStreamReader>
+  split(uint32_t Offset) const;
+
+private:
+  BinaryStreamRef Stream;
+  uint32_t Offset = 0;
+};
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYSTREAMREADER_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryStreamRef.h b/linux-x64/clang/include/llvm/Support/BinaryStreamRef.h
new file mode 100644
index 0000000..5cf355b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryStreamRef.h
@@ -0,0 +1,275 @@
+//===- BinaryStreamRef.h - A copyable reference to a stream -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYSTREAMREF_H
+#define LLVM_SUPPORT_BINARYSTREAMREF_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/BinaryStream.h"
+#include "llvm/Support/BinaryStreamError.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+/// Common stuff for mutable and immutable StreamRefs.
+template <class RefType, class StreamType> class BinaryStreamRefBase {
+protected:
+  BinaryStreamRefBase() = default;
+  explicit BinaryStreamRefBase(StreamType &BorrowedImpl)
+      : BorrowedImpl(&BorrowedImpl), ViewOffset(0) {
+    if (!(BorrowedImpl.getFlags() & BSF_Append))
+      Length = BorrowedImpl.getLength();
+  }
+
+  BinaryStreamRefBase(std::shared_ptr<StreamType> SharedImpl, uint32_t Offset,
+                      Optional<uint32_t> Length)
+      : SharedImpl(SharedImpl), BorrowedImpl(SharedImpl.get()),
+        ViewOffset(Offset), Length(Length) {}
+  BinaryStreamRefBase(StreamType &BorrowedImpl, uint32_t Offset,
+                      Optional<uint32_t> Length)
+      : BorrowedImpl(&BorrowedImpl), ViewOffset(Offset), Length(Length) {}
+  BinaryStreamRefBase(const BinaryStreamRefBase &Other) = default;
+  BinaryStreamRefBase &operator=(const BinaryStreamRefBase &Other) = default;
+
+  BinaryStreamRefBase &operator=(BinaryStreamRefBase &&Other) = default;
+  BinaryStreamRefBase(BinaryStreamRefBase &&Other) = default;
+
+public:
+  llvm::support::endianness getEndian() const {
+    return BorrowedImpl->getEndian();
+  }
+
+  uint32_t getLength() const {
+    if (Length.hasValue())
+      return *Length;
+
+    return BorrowedImpl ? (BorrowedImpl->getLength() - ViewOffset) : 0;
+  }
+
+  /// Return a new BinaryStreamRef with the first \p N elements removed.  If
+  /// this BinaryStreamRef is length-tracking, then the resulting one will be
+  /// too.
+  RefType drop_front(uint32_t N) const {
+    if (!BorrowedImpl)
+      return RefType();
+
+    N = std::min(N, getLength());
+    RefType Result(static_cast<const RefType &>(*this));
+    if (N == 0)
+      return Result;
+
+    Result.ViewOffset += N;
+    if (Result.Length.hasValue())
+      *Result.Length -= N;
+    return Result;
+  }
+
+  /// Return a new BinaryStreamRef with the last \p N elements removed.  If
+  /// this BinaryStreamRef is length-tracking and \p N is greater than 0, then
+  /// this BinaryStreamRef will no longer length-track.
+  RefType drop_back(uint32_t N) const {
+    if (!BorrowedImpl)
+      return RefType();
+
+    RefType Result(static_cast<const RefType &>(*this));
+    N = std::min(N, getLength());
+
+    if (N == 0)
+      return Result;
+
+    // Since we're dropping non-zero bytes from the end, stop length-tracking
+    // by setting the length of the resulting StreamRef to an explicit value.
+    if (!Result.Length.hasValue())
+      Result.Length = getLength();
+
+    *Result.Length -= N;
+    return Result;
+  }
+
+  /// Return a new BinaryStreamRef with only the first \p N elements remaining.
+  RefType keep_front(uint32_t N) const {
+    assert(N <= getLength());
+    return drop_back(getLength() - N);
+  }
+
+  /// Return a new BinaryStreamRef with only the last \p N elements remaining.
+  RefType keep_back(uint32_t N) const {
+    assert(N <= getLength());
+    return drop_front(getLength() - N);
+  }
+
+  /// Return a new BinaryStreamRef with the first and last \p N elements
+  /// removed.
+  RefType drop_symmetric(uint32_t N) const {
+    return drop_front(N).drop_back(N);
+  }
+
+  /// Return a new BinaryStreamRef with the first \p Offset elements removed,
+  /// and retaining exactly \p Len elements.
+  RefType slice(uint32_t Offset, uint32_t Len) const {
+    return drop_front(Offset).keep_front(Len);
+  }
+
+  bool valid() const { return BorrowedImpl != nullptr; }
+
+  bool operator==(const RefType &Other) const {
+    if (BorrowedImpl != Other.BorrowedImpl)
+      return false;
+    if (ViewOffset != Other.ViewOffset)
+      return false;
+    if (Length != Other.Length)
+      return false;
+    return true;
+  }
+
+protected:
+  Error checkOffsetForRead(uint32_t Offset, uint32_t DataSize) const {
+    if (Offset > getLength())
+      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
+    if (getLength() < DataSize + Offset)
+      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
+    return Error::success();
+  }
+
+  std::shared_ptr<StreamType> SharedImpl;
+  StreamType *BorrowedImpl = nullptr;
+  uint32_t ViewOffset = 0;
+  Optional<uint32_t> Length;
+};
+
+/// \brief BinaryStreamRef is to BinaryStream what ArrayRef is to an Array.  It
+/// provides copy-semantics and read only access to a "window" of the underlying
+/// BinaryStream. Note that BinaryStreamRef is *not* a BinaryStream.  That is to
+/// say, it does not inherit and override the methods of BinaryStream.  In
+/// general, you should not pass around pointers or references to BinaryStreams
+/// and use inheritance to achieve polymorphism.  Instead, you should pass
+/// around BinaryStreamRefs by value and achieve polymorphism that way.
+class BinaryStreamRef
+    : public BinaryStreamRefBase<BinaryStreamRef, BinaryStream> {
+  friend BinaryStreamRefBase<BinaryStreamRef, BinaryStream>;
+  friend class WritableBinaryStreamRef;
+  BinaryStreamRef(std::shared_ptr<BinaryStream> Impl, uint32_t ViewOffset,
+                  Optional<uint32_t> Length)
+      : BinaryStreamRefBase(Impl, ViewOffset, Length) {}
+
+public:
+  BinaryStreamRef() = default;
+  BinaryStreamRef(BinaryStream &Stream);
+  BinaryStreamRef(BinaryStream &Stream, uint32_t Offset,
+                  Optional<uint32_t> Length);
+  explicit BinaryStreamRef(ArrayRef<uint8_t> Data,
+                           llvm::support::endianness Endian);
+  explicit BinaryStreamRef(StringRef Data, llvm::support::endianness Endian);
+
+  BinaryStreamRef(const BinaryStreamRef &Other) = default;
+  BinaryStreamRef &operator=(const BinaryStreamRef &Other) = default;
+  BinaryStreamRef(BinaryStreamRef &&Other) = default;
+  BinaryStreamRef &operator=(BinaryStreamRef &&Other) = default;
+
+  // Use BinaryStreamRef.slice() instead.
+  BinaryStreamRef(BinaryStreamRef &S, uint32_t Offset,
+                  uint32_t Length) = delete;
+
+  /// Given an Offset into this StreamRef and a Size, return a reference to a
+  /// buffer owned by the stream.
+  ///
+  /// \returns a success error code if the entire range of data is within the
+  /// bounds of this BinaryStreamRef's view and the implementation could read
+  /// the data, and an appropriate error code otherwise.
+  Error readBytes(uint32_t Offset, uint32_t Size,
+                  ArrayRef<uint8_t> &Buffer) const;
+
+  /// Given an Offset into this BinaryStreamRef, return a reference to the
+  /// largest buffer the stream could support without necessitating a copy.
+  ///
+  /// \returns a success error code if implementation could read the data,
+  /// and an appropriate error code otherwise.
+  Error readLongestContiguousChunk(uint32_t Offset,
+                                   ArrayRef<uint8_t> &Buffer) const;
+};
+
+struct BinarySubstreamRef {
+  uint32_t Offset;            // Offset in the parent stream
+  BinaryStreamRef StreamData; // Stream Data
+
+  BinarySubstreamRef slice(uint32_t Off, uint32_t Size) const {
+    BinaryStreamRef SubSub = StreamData.slice(Off, Size);
+    return {Off + Offset, SubSub};
+  }
+  BinarySubstreamRef drop_front(uint32_t N) const {
+    return slice(N, size() - N);
+  }
+  BinarySubstreamRef keep_front(uint32_t N) const { return slice(0, N); }
+
+  std::pair<BinarySubstreamRef, BinarySubstreamRef>
+  split(uint32_t Offset) const {
+    return std::make_pair(keep_front(Offset), drop_front(Offset));
+  }
+
+  uint32_t size() const { return StreamData.getLength(); }
+  bool empty() const { return size() == 0; }
+};
+
+class WritableBinaryStreamRef
+    : public BinaryStreamRefBase<WritableBinaryStreamRef,
+                                 WritableBinaryStream> {
+  friend BinaryStreamRefBase<WritableBinaryStreamRef, WritableBinaryStream>;
+  WritableBinaryStreamRef(std::shared_ptr<WritableBinaryStream> Impl,
+                          uint32_t ViewOffset, Optional<uint32_t> Length)
+      : BinaryStreamRefBase(Impl, ViewOffset, Length) {}
+
+  Error checkOffsetForWrite(uint32_t Offset, uint32_t DataSize) const {
+    if (!(BorrowedImpl->getFlags() & BSF_Append))
+      return checkOffsetForRead(Offset, DataSize);
+
+    if (Offset > getLength())
+      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
+    return Error::success();
+  }
+
+public:
+  WritableBinaryStreamRef() = default;
+  WritableBinaryStreamRef(WritableBinaryStream &Stream);
+  WritableBinaryStreamRef(WritableBinaryStream &Stream, uint32_t Offset,
+                          Optional<uint32_t> Length);
+  explicit WritableBinaryStreamRef(MutableArrayRef<uint8_t> Data,
+                                   llvm::support::endianness Endian);
+  WritableBinaryStreamRef(const WritableBinaryStreamRef &Other) = default;
+  WritableBinaryStreamRef &
+  operator=(const WritableBinaryStreamRef &Other) = default;
+
+  WritableBinaryStreamRef(WritableBinaryStreamRef &&Other) = default;
+  WritableBinaryStreamRef &operator=(WritableBinaryStreamRef &&Other) = default;
+
+  // Use WritableBinaryStreamRef.slice() instead.
+  WritableBinaryStreamRef(WritableBinaryStreamRef &S, uint32_t Offset,
+                          uint32_t Length) = delete;
+
+  /// Given an Offset into this WritableBinaryStreamRef and some input data,
+  /// writes the data to the underlying stream.
+  ///
+  /// \returns a success error code if the data could fit within the underlying
+  /// stream at the specified location and the implementation could write the
+  /// data, and an appropriate error code otherwise.
+  Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const;
+
+  /// Conver this WritableBinaryStreamRef to a read-only BinaryStreamRef.
+  operator BinaryStreamRef() const;
+
+  /// \brief For buffered streams, commits changes to the backing store.
+  Error commit();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYSTREAMREF_H
diff --git a/linux-x64/clang/include/llvm/Support/BinaryStreamWriter.h b/linux-x64/clang/include/llvm/Support/BinaryStreamWriter.h
new file mode 100644
index 0000000..f31db87
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BinaryStreamWriter.h
@@ -0,0 +1,183 @@
+//===- BinaryStreamWriter.h - Writes objects to a BinaryStream ---*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BINARYSTREAMWRITER_H
+#define LLVM_SUPPORT_BINARYSTREAMWRITER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamError.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// \brief Provides write only access to a subclass of `WritableBinaryStream`.
+/// Provides bounds checking and helpers for writing certain common data types
+/// such as null-terminated strings, integers in various flavors of endianness,
+/// etc.  Can be subclassed to provide reading and writing of custom datatypes,
+/// although no methods are overridable.
+class BinaryStreamWriter {
+public:
+  BinaryStreamWriter() = default;
+  explicit BinaryStreamWriter(WritableBinaryStreamRef Ref);
+  explicit BinaryStreamWriter(WritableBinaryStream &Stream);
+  explicit BinaryStreamWriter(MutableArrayRef<uint8_t> Data,
+                              llvm::support::endianness Endian);
+
+  BinaryStreamWriter(const BinaryStreamWriter &Other)
+      : Stream(Other.Stream), Offset(Other.Offset) {}
+
+  BinaryStreamWriter &operator=(const BinaryStreamWriter &Other) {
+    Stream = Other.Stream;
+    Offset = Other.Offset;
+    return *this;
+  }
+
+  virtual ~BinaryStreamWriter() {}
+
+  /// Write the bytes specified in \p Buffer to the underlying stream.
+  /// On success, updates the offset so that subsequent writes will occur
+  /// at the next unwritten position.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  Error writeBytes(ArrayRef<uint8_t> Buffer);
+
+  /// Write the integer \p Value to the underlying stream in the
+  /// specified endianness.  On success, updates the offset so that
+  /// subsequent writes occur at the next unwritten position.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  template <typename T> Error writeInteger(T Value) {
+    static_assert(std::is_integral<T>::value,
+                  "Cannot call writeInteger with non-integral value!");
+    uint8_t Buffer[sizeof(T)];
+    llvm::support::endian::write<T, llvm::support::unaligned>(
+        Buffer, Value, Stream.getEndian());
+    return writeBytes(Buffer);
+  }
+
+  /// Similar to writeInteger
+  template <typename T> Error writeEnum(T Num) {
+    static_assert(std::is_enum<T>::value,
+                  "Cannot call writeEnum with non-Enum type");
+
+    using U = typename std::underlying_type<T>::type;
+    return writeInteger<U>(static_cast<U>(Num));
+  }
+
+  /// Write the string \p Str to the underlying stream followed by a null
+  /// terminator.  On success, updates the offset so that subsequent writes
+  /// occur at the next unwritten position.  \p Str need not be null terminated
+  /// on input.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  Error writeCString(StringRef Str);
+
+  /// Write the string \p Str to the underlying stream without a null
+  /// terminator.  On success, updates the offset so that subsequent writes
+  /// occur at the next unwritten position.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  Error writeFixedString(StringRef Str);
+
+  /// Efficiently reads all data from \p Ref, and writes it to this stream.
+  /// This operation will not invoke any copies of the source data, regardless
+  /// of the source stream's implementation.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  Error writeStreamRef(BinaryStreamRef Ref);
+
+  /// Efficiently reads \p Size bytes from \p Ref, and writes it to this stream.
+  /// This operation will not invoke any copies of the source data, regardless
+  /// of the source stream's implementation.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  Error writeStreamRef(BinaryStreamRef Ref, uint32_t Size);
+
+  /// Writes the object \p Obj to the underlying stream, as if by using memcpy.
+  /// It is up to the caller to ensure that type of \p Obj can be safely copied
+  /// in this fashion, as no checks are made to ensure that this is safe.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  template <typename T> Error writeObject(const T &Obj) {
+    static_assert(!std::is_pointer<T>::value,
+                  "writeObject should not be used with pointers, to write "
+                  "the pointed-to value dereference the pointer before calling "
+                  "writeObject");
+    return writeBytes(
+        ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(&Obj), sizeof(T)));
+  }
+
+  /// Writes an array of objects of type T to the underlying stream, as if by
+  /// using memcpy.  It is up to the caller to ensure that type of \p Obj can
+  /// be safely copied in this fashion, as no checks are made to ensure that
+  /// this is safe.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  template <typename T> Error writeArray(ArrayRef<T> Array) {
+    if (Array.empty())
+      return Error::success();
+    if (Array.size() > UINT32_MAX / sizeof(T))
+      return make_error<BinaryStreamError>(
+          stream_error_code::invalid_array_size);
+
+    return writeBytes(
+        ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(Array.data()),
+                          Array.size() * sizeof(T)));
+  }
+
+  /// Writes all data from the array \p Array to the underlying stream.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  template <typename T, typename U>
+  Error writeArray(VarStreamArray<T, U> Array) {
+    return writeStreamRef(Array.getUnderlyingStream());
+  }
+
+  /// Writes all elements from the array \p Array to the underlying stream.
+  ///
+  /// \returns a success error code if the data was successfully written,
+  /// otherwise returns an appropriate error code.
+  template <typename T> Error writeArray(FixedStreamArray<T> Array) {
+    return writeStreamRef(Array.getUnderlyingStream());
+  }
+
+  /// Splits the Writer into two Writers at a given offset.
+  std::pair<BinaryStreamWriter, BinaryStreamWriter> split(uint32_t Off) const;
+
+  void setOffset(uint32_t Off) { Offset = Off; }
+  uint32_t getOffset() const { return Offset; }
+  uint32_t getLength() const { return Stream.getLength(); }
+  uint32_t bytesRemaining() const { return getLength() - getOffset(); }
+  Error padToAlignment(uint32_t Align);
+
+protected:
+  WritableBinaryStreamRef Stream;
+  uint32_t Offset = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_BINARYSTREAMWRITER_H
diff --git a/linux-x64/clang/include/llvm/Support/BlockFrequency.h b/linux-x64/clang/include/llvm/Support/BlockFrequency.h
new file mode 100644
index 0000000..2e75cbd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BlockFrequency.h
@@ -0,0 +1,82 @@
+//===-------- BlockFrequency.h - Block Frequency Wrapper --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Block Frequency class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BLOCKFREQUENCY_H
+#define LLVM_SUPPORT_BLOCKFREQUENCY_H
+
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+// This class represents Block Frequency as a 64-bit value.
+class BlockFrequency {
+  uint64_t Frequency;
+
+public:
+  BlockFrequency(uint64_t Freq = 0) : Frequency(Freq) { }
+
+  /// \brief Returns the maximum possible frequency, the saturation value.
+  static uint64_t getMaxFrequency() { return -1ULL; }
+
+  /// \brief Returns the frequency as a fixpoint number scaled by the entry
+  /// frequency.
+  uint64_t getFrequency() const { return Frequency; }
+
+  /// \brief Multiplies with a branch probability. The computation will never
+  /// overflow.
+  BlockFrequency &operator*=(BranchProbability Prob);
+  BlockFrequency operator*(BranchProbability Prob) const;
+
+  /// \brief Divide by a non-zero branch probability using saturating
+  /// arithmetic.
+  BlockFrequency &operator/=(BranchProbability Prob);
+  BlockFrequency operator/(BranchProbability Prob) const;
+
+  /// \brief Adds another block frequency using saturating arithmetic.
+  BlockFrequency &operator+=(BlockFrequency Freq);
+  BlockFrequency operator+(BlockFrequency Freq) const;
+
+  /// \brief Subtracts another block frequency using saturating arithmetic.
+  BlockFrequency &operator-=(BlockFrequency Freq);
+  BlockFrequency operator-(BlockFrequency Freq) const;
+
+  /// \brief Shift block frequency to the right by count digits saturating to 1.
+  BlockFrequency &operator>>=(const unsigned count);
+
+  bool operator<(BlockFrequency RHS) const {
+    return Frequency < RHS.Frequency;
+  }
+
+  bool operator<=(BlockFrequency RHS) const {
+    return Frequency <= RHS.Frequency;
+  }
+
+  bool operator>(BlockFrequency RHS) const {
+    return Frequency > RHS.Frequency;
+  }
+
+  bool operator>=(BlockFrequency RHS) const {
+    return Frequency >= RHS.Frequency;
+  }
+
+  bool operator==(BlockFrequency RHS) const {
+    return Frequency == RHS.Frequency;
+  }
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/BranchProbability.h b/linux-x64/clang/include/llvm/Support/BranchProbability.h
new file mode 100644
index 0000000..b403d7f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/BranchProbability.h
@@ -0,0 +1,231 @@
+//===- BranchProbability.h - Branch Probability Wrapper ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of BranchProbability shared by IR and Machine Instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BRANCHPROBABILITY_H
+#define LLVM_SUPPORT_BRANCHPROBABILITY_H
+
+#include "llvm/Support/DataTypes.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <numeric>
+
+namespace llvm {
+
+class raw_ostream;
+
+// This class represents Branch Probability as a non-negative fraction that is
+// no greater than 1. It uses a fixed-point-like implementation, in which the
+// denominator is always a constant value (here we use 1<<31 for maximum
+// precision).
+class BranchProbability {
+  // Numerator
+  uint32_t N;
+
+  // Denominator, which is a constant value.
+  static const uint32_t D = 1u << 31;
+  static const uint32_t UnknownN = UINT32_MAX;
+
+  // Construct a BranchProbability with only numerator assuming the denominator
+  // is 1<<31. For internal use only.
+  explicit BranchProbability(uint32_t n) : N(n) {}
+
+public:
+  BranchProbability() : N(UnknownN) {}
+  BranchProbability(uint32_t Numerator, uint32_t Denominator);
+
+  bool isZero() const { return N == 0; }
+  bool isUnknown() const { return N == UnknownN; }
+
+  static BranchProbability getZero() { return BranchProbability(0); }
+  static BranchProbability getOne() { return BranchProbability(D); }
+  static BranchProbability getUnknown() { return BranchProbability(UnknownN); }
+  // Create a BranchProbability object with the given numerator and 1<<31
+  // as denominator.
+  static BranchProbability getRaw(uint32_t N) { return BranchProbability(N); }
+  // Create a BranchProbability object from 64-bit integers.
+  static BranchProbability getBranchProbability(uint64_t Numerator,
+                                                uint64_t Denominator);
+
+  // Normalize given probabilties so that the sum of them becomes approximate
+  // one.
+  template <class ProbabilityIter>
+  static void normalizeProbabilities(ProbabilityIter Begin,
+                                     ProbabilityIter End);
+
+  uint32_t getNumerator() const { return N; }
+  static uint32_t getDenominator() { return D; }
+
+  // Return (1 - Probability).
+  BranchProbability getCompl() const { return BranchProbability(D - N); }
+
+  raw_ostream &print(raw_ostream &OS) const;
+
+  void dump() const;
+
+  /// \brief Scale a large integer.
+  ///
+  /// Scales \c Num.  Guarantees full precision.  Returns the floor of the
+  /// result.
+  ///
+  /// \return \c Num times \c this.
+  uint64_t scale(uint64_t Num) const;
+
+  /// \brief Scale a large integer by the inverse.
+  ///
+  /// Scales \c Num by the inverse of \c this.  Guarantees full precision.
+  /// Returns the floor of the result.
+  ///
+  /// \return \c Num divided by \c this.
+  uint64_t scaleByInverse(uint64_t Num) const;
+
+  BranchProbability &operator+=(BranchProbability RHS) {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in arithmetics.");
+    // Saturate the result in case of overflow.
+    N = (uint64_t(N) + RHS.N > D) ? D : N + RHS.N;
+    return *this;
+  }
+
+  BranchProbability &operator-=(BranchProbability RHS) {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in arithmetics.");
+    // Saturate the result in case of underflow.
+    N = N < RHS.N ? 0 : N - RHS.N;
+    return *this;
+  }
+
+  BranchProbability &operator*=(BranchProbability RHS) {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in arithmetics.");
+    N = (static_cast<uint64_t>(N) * RHS.N + D / 2) / D;
+    return *this;
+  }
+
+  BranchProbability &operator*=(uint32_t RHS) {
+    assert(N != UnknownN &&
+           "Unknown probability cannot participate in arithmetics.");
+    N = (uint64_t(N) * RHS > D) ? D : N * RHS;
+    return *this;
+  }
+
+  BranchProbability &operator/=(uint32_t RHS) {
+    assert(N != UnknownN &&
+           "Unknown probability cannot participate in arithmetics.");
+    assert(RHS > 0 && "The divider cannot be zero.");
+    N /= RHS;
+    return *this;
+  }
+
+  BranchProbability operator+(BranchProbability RHS) const {
+    BranchProbability Prob(*this);
+    return Prob += RHS;
+  }
+
+  BranchProbability operator-(BranchProbability RHS) const {
+    BranchProbability Prob(*this);
+    return Prob -= RHS;
+  }
+
+  BranchProbability operator*(BranchProbability RHS) const {
+    BranchProbability Prob(*this);
+    return Prob *= RHS;
+  }
+
+  BranchProbability operator*(uint32_t RHS) const {
+    BranchProbability Prob(*this);
+    return Prob *= RHS;
+  }
+
+  BranchProbability operator/(uint32_t RHS) const {
+    BranchProbability Prob(*this);
+    return Prob /= RHS;
+  }
+
+  bool operator==(BranchProbability RHS) const { return N == RHS.N; }
+  bool operator!=(BranchProbability RHS) const { return !(*this == RHS); }
+
+  bool operator<(BranchProbability RHS) const {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in comparisons.");
+    return N < RHS.N;
+  }
+
+  bool operator>(BranchProbability RHS) const {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in comparisons.");
+    return RHS < *this;
+  }
+
+  bool operator<=(BranchProbability RHS) const {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in comparisons.");
+    return !(RHS < *this);
+  }
+
+  bool operator>=(BranchProbability RHS) const {
+    assert(N != UnknownN && RHS.N != UnknownN &&
+           "Unknown probability cannot participate in comparisons.");
+    return !(*this < RHS);
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, BranchProbability Prob) {
+  return Prob.print(OS);
+}
+
+template <class ProbabilityIter>
+void BranchProbability::normalizeProbabilities(ProbabilityIter Begin,
+                                               ProbabilityIter End) {
+  if (Begin == End)
+    return;
+
+  unsigned UnknownProbCount = 0;
+  uint64_t Sum = std::accumulate(Begin, End, uint64_t(0),
+                                 [&](uint64_t S, const BranchProbability &BP) {
+                                   if (!BP.isUnknown())
+                                     return S + BP.N;
+                                   UnknownProbCount++;
+                                   return S;
+                                 });
+
+  if (UnknownProbCount > 0) {
+    BranchProbability ProbForUnknown = BranchProbability::getZero();
+    // If the sum of all known probabilities is less than one, evenly distribute
+    // the complement of sum to unknown probabilities. Otherwise, set unknown
+    // probabilities to zeros and continue to normalize known probabilities.
+    if (Sum < BranchProbability::getDenominator())
+      ProbForUnknown = BranchProbability::getRaw(
+          (BranchProbability::getDenominator() - Sum) / UnknownProbCount);
+
+    std::replace_if(Begin, End,
+                    [](const BranchProbability &BP) { return BP.isUnknown(); },
+                    ProbForUnknown);
+
+    if (Sum <= BranchProbability::getDenominator())
+      return;
+  }
+
+  if (Sum == 0) {
+    BranchProbability BP(1, std::distance(Begin, End));
+    std::fill(Begin, End, BP);
+    return;
+  }
+
+  for (auto I = Begin; I != End; ++I)
+    I->N = (I->N * uint64_t(D) + Sum / 2) / Sum;
+}
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/CBindingWrapping.h b/linux-x64/clang/include/llvm/Support/CBindingWrapping.h
new file mode 100644
index 0000000..f60f99d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CBindingWrapping.h
@@ -0,0 +1,47 @@
+//===- llvm/Support/CBindingWrapph.h - C Interface Wrapping -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the wrapping macros for the C interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CBINDINGWRAPPING_H
+#define LLVM_SUPPORT_CBINDINGWRAPPING_H
+
+#include "llvm-c/Types.h"
+#include "llvm/Support/Casting.h"
+
+#define DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref)     \
+  inline ty *unwrap(ref P) {                            \
+    return reinterpret_cast<ty*>(P);                    \
+  }                                                     \
+                                                        \
+  inline ref wrap(const ty *P) {                        \
+    return reinterpret_cast<ref>(const_cast<ty*>(P));   \
+  }
+
+#define DEFINE_ISA_CONVERSION_FUNCTIONS(ty, ref)        \
+  DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref)           \
+                                                        \
+  template<typename T>                                  \
+  inline T *unwrap(ref P) {                             \
+    return cast<T>(unwrap(P));                          \
+  }
+
+#define DEFINE_STDCXX_CONVERSION_FUNCTIONS(ty, ref)     \
+  DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref)           \
+                                                        \
+  template<typename T>                                  \
+  inline T *unwrap(ref P) {                             \
+    T *Q = (T*)unwrap(P);                               \
+    assert(Q && "Invalid cast!");                       \
+    return Q;                                           \
+  }
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/COM.h b/linux-x64/clang/include/llvm/Support/COM.h
new file mode 100644
index 0000000..a2d5a7a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/COM.h
@@ -0,0 +1,36 @@
+//===- llvm/Support/COM.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Provides a library for accessing COM functionality of the Host OS.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COM_H
+#define LLVM_SUPPORT_COM_H
+
+namespace llvm {
+namespace sys {
+
+enum class COMThreadingMode { SingleThreaded, MultiThreaded };
+
+class InitializeCOMRAII {
+public:
+  explicit InitializeCOMRAII(COMThreadingMode Threading,
+                             bool SpeedOverMemory = false);
+  ~InitializeCOMRAII();
+
+private:
+  InitializeCOMRAII(const InitializeCOMRAII &) = delete;
+  void operator=(const InitializeCOMRAII &) = delete;
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/CachePruning.h b/linux-x64/clang/include/llvm/Support/CachePruning.h
new file mode 100644
index 0000000..f38ce17
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CachePruning.h
@@ -0,0 +1,78 @@
+//=- CachePruning.h - Helper to manage the pruning of a cache dir -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pruning of a directory intended for cache storage, using
+// various policies.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CACHE_PRUNING_H
+#define LLVM_SUPPORT_CACHE_PRUNING_H
+
+#include "llvm/ADT/StringRef.h"
+#include <chrono>
+
+namespace llvm {
+
+template <typename T> class Expected;
+
+/// Policy for the pruneCache() function. A default constructed
+/// CachePruningPolicy provides a reasonable default policy.
+struct CachePruningPolicy {
+  /// The pruning interval. This is intended to be used to avoid scanning the
+  /// directory too often. It does not impact the decision of which file to
+  /// prune. A value of 0 forces the scan to occur. A value of None disables
+  /// pruning.
+  llvm::Optional<std::chrono::seconds> Interval = std::chrono::seconds(1200);
+
+  /// The expiration for a file. When a file hasn't been accessed for Expiration
+  /// seconds, it is removed from the cache. A value of 0 disables the
+  /// expiration-based pruning.
+  std::chrono::seconds Expiration = std::chrono::hours(7 * 24); // 1w
+
+  /// The maximum size for the cache directory, in terms of percentage of the
+  /// available space on the disk. Set to 100 to indicate no limit, 50 to
+  /// indicate that the cache size will not be left over half the available disk
+  /// space. A value over 100 will be reduced to 100. A value of 0 disables the
+  /// percentage size-based pruning.
+  unsigned MaxSizePercentageOfAvailableSpace = 75;
+
+  /// The maximum size for the cache directory in bytes. A value over the amount
+  /// of available space on the disk will be reduced to the amount of available
+  /// space. A value of 0 disables the absolute size-based pruning.
+  uint64_t MaxSizeBytes = 0;
+
+  /// The maximum number of files in the cache directory. A value of 0 disables
+  /// the number of files based pruning.
+  ///
+  /// This defaults to 1000000 because with that many files there are
+  /// diminishing returns on the effectiveness of the cache, and some file
+  /// systems have a limit on how many files can be contained in a directory
+  /// (notably ext4, which is limited to around 6000000 files).
+  uint64_t MaxSizeFiles = 1000000;
+};
+
+/// Parse the given string as a cache pruning policy. Defaults are taken from a
+/// default constructed CachePruningPolicy object.
+/// For example: "prune_interval=30s:prune_after=24h:cache_size=50%"
+/// which means a pruning interval of 30 seconds, expiration time of 24 hours
+/// and maximum cache size of 50% of available disk space.
+Expected<CachePruningPolicy> parseCachePruningPolicy(StringRef PolicyStr);
+
+/// Peform pruning using the supplied policy, returns true if pruning
+/// occurred, i.e. if Policy.Interval was expired.
+///
+/// As a safeguard against data loss if the user specifies the wrong directory
+/// as their cache directory, this function will ignore files not matching the
+/// pattern "llvmcache-*".
+bool pruneCache(StringRef Path, CachePruningPolicy Policy);
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Capacity.h b/linux-x64/clang/include/llvm/Support/Capacity.h
new file mode 100644
index 0000000..7460f98
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Capacity.h
@@ -0,0 +1,32 @@
+//===--- Capacity.h - Generic computation of ADT memory use -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the capacity function that computes the amount of
+// memory used by an ADT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CAPACITY_H
+#define LLVM_SUPPORT_CAPACITY_H
+
+#include <cstddef>
+
+namespace llvm {
+
+template <typename T>
+static inline size_t capacity_in_bytes(const T &x) {
+  // This default definition of capacity should work for things like std::vector
+  // and friends.  More specialized versions will work for others.
+  return x.capacity() * sizeof(typename T::value_type);
+}
+
+} // end namespace llvm
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Support/Casting.h b/linux-x64/clang/include/llvm/Support/Casting.h
new file mode 100644
index 0000000..baa2a81
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Casting.h
@@ -0,0 +1,399 @@
+//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
+// and dyn_cast_or_null<X>() templates.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CASTING_H
+#define LLVM_SUPPORT_CASTING_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <memory>
+#include <type_traits>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//                          isa<x> Support Templates
+//===----------------------------------------------------------------------===//
+
+// Define a template that can be specialized by smart pointers to reflect the
+// fact that they are automatically dereferenced, and are not involved with the
+// template selection process...  the default implementation is a noop.
+//
+template<typename From> struct simplify_type {
+  using SimpleType = From; // The real type this represents...
+
+  // An accessor to get the real value...
+  static SimpleType &getSimplifiedValue(From &Val) { return Val; }
+};
+
+template<typename From> struct simplify_type<const From> {
+  using NonConstSimpleType = typename simplify_type<From>::SimpleType;
+  using SimpleType =
+      typename add_const_past_pointer<NonConstSimpleType>::type;
+  using RetType =
+      typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
+
+  static RetType getSimplifiedValue(const From& Val) {
+    return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
+  }
+};
+
+// The core of the implementation of isa<X> is here; To and From should be
+// the names of classes.  This template can be specialized to customize the
+// implementation of isa<> without rewriting it from scratch.
+template <typename To, typename From, typename Enabler = void>
+struct isa_impl {
+  static inline bool doit(const From &Val) {
+    return To::classof(&Val);
+  }
+};
+
+/// \brief Always allow upcasts, and perform no dynamic check for them.
+template <typename To, typename From>
+struct isa_impl<
+    To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> {
+  static inline bool doit(const From &) { return true; }
+};
+
+template <typename To, typename From> struct isa_impl_cl {
+  static inline bool doit(const From &Val) {
+    return isa_impl<To, From>::doit(Val);
+  }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, const From> {
+  static inline bool doit(const From &Val) {
+    return isa_impl<To, From>::doit(Val);
+  }
+};
+
+template <typename To, typename From>
+struct isa_impl_cl<To, const std::unique_ptr<From>> {
+  static inline bool doit(const std::unique_ptr<From> &Val) {
+    assert(Val && "isa<> used on a null pointer");
+    return isa_impl_cl<To, From>::doit(*Val);
+  }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, From*> {
+  static inline bool doit(const From *Val) {
+    assert(Val && "isa<> used on a null pointer");
+    return isa_impl<To, From>::doit(*Val);
+  }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, From*const> {
+  static inline bool doit(const From *Val) {
+    assert(Val && "isa<> used on a null pointer");
+    return isa_impl<To, From>::doit(*Val);
+  }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, const From*> {
+  static inline bool doit(const From *Val) {
+    assert(Val && "isa<> used on a null pointer");
+    return isa_impl<To, From>::doit(*Val);
+  }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
+  static inline bool doit(const From *Val) {
+    assert(Val && "isa<> used on a null pointer");
+    return isa_impl<To, From>::doit(*Val);
+  }
+};
+
+template<typename To, typename From, typename SimpleFrom>
+struct isa_impl_wrap {
+  // When From != SimplifiedType, we can simplify the type some more by using
+  // the simplify_type template.
+  static bool doit(const From &Val) {
+    return isa_impl_wrap<To, SimpleFrom,
+      typename simplify_type<SimpleFrom>::SimpleType>::doit(
+                          simplify_type<const From>::getSimplifiedValue(Val));
+  }
+};
+
+template<typename To, typename FromTy>
+struct isa_impl_wrap<To, FromTy, FromTy> {
+  // When From == SimpleType, we are as simple as we are going to get.
+  static bool doit(const FromTy &Val) {
+    return isa_impl_cl<To,FromTy>::doit(Val);
+  }
+};
+
+// isa<X> - Return true if the parameter to the template is an instance of the
+// template type argument.  Used like this:
+//
+//  if (isa<Type>(myVal)) { ... }
+//
+template <class X, class Y> LLVM_NODISCARD inline bool isa(const Y &Val) {
+  return isa_impl_wrap<X, const Y,
+                       typename simplify_type<const Y>::SimpleType>::doit(Val);
+}
+
+//===----------------------------------------------------------------------===//
+//                          cast<x> Support Templates
+//===----------------------------------------------------------------------===//
+
+template<class To, class From> struct cast_retty;
+
+// Calculate what type the 'cast' function should return, based on a requested
+// type of To and a source type of From.
+template<class To, class From> struct cast_retty_impl {
+  using ret_type = To &;       // Normal case, return Ty&
+};
+template<class To, class From> struct cast_retty_impl<To, const From> {
+  using ret_type = const To &; // Normal case, return Ty&
+};
+
+template<class To, class From> struct cast_retty_impl<To, From*> {
+  using ret_type = To *;       // Pointer arg case, return Ty*
+};
+
+template<class To, class From> struct cast_retty_impl<To, const From*> {
+  using ret_type = const To *; // Constant pointer arg case, return const Ty*
+};
+
+template<class To, class From> struct cast_retty_impl<To, const From*const> {
+  using ret_type = const To *; // Constant pointer arg case, return const Ty*
+};
+
+template <class To, class From>
+struct cast_retty_impl<To, std::unique_ptr<From>> {
+private:
+  using PointerType = typename cast_retty_impl<To, From *>::ret_type;
+  using ResultType = typename std::remove_pointer<PointerType>::type;
+
+public:
+  using ret_type = std::unique_ptr<ResultType>;
+};
+
+template<class To, class From, class SimpleFrom>
+struct cast_retty_wrap {
+  // When the simplified type and the from type are not the same, use the type
+  // simplifier to reduce the type, then reuse cast_retty_impl to get the
+  // resultant type.
+  using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
+};
+
+template<class To, class FromTy>
+struct cast_retty_wrap<To, FromTy, FromTy> {
+  // When the simplified type is equal to the from type, use it directly.
+  using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
+};
+
+template<class To, class From>
+struct cast_retty {
+  using ret_type = typename cast_retty_wrap<
+      To, From, typename simplify_type<From>::SimpleType>::ret_type;
+};
+
+// Ensure the non-simple values are converted using the simplify_type template
+// that may be specialized by smart pointers...
+//
+template<class To, class From, class SimpleFrom> struct cast_convert_val {
+  // This is not a simple type, use the template to simplify it...
+  static typename cast_retty<To, From>::ret_type doit(From &Val) {
+    return cast_convert_val<To, SimpleFrom,
+      typename simplify_type<SimpleFrom>::SimpleType>::doit(
+                          simplify_type<From>::getSimplifiedValue(Val));
+  }
+};
+
+template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
+  // This _is_ a simple type, just cast it.
+  static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
+    typename cast_retty<To, FromTy>::ret_type Res2
+     = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
+    return Res2;
+  }
+};
+
+template <class X> struct is_simple_type {
+  static const bool value =
+      std::is_same<X, typename simplify_type<X>::SimpleType>::value;
+};
+
+// cast<X> - Return the argument parameter cast to the specified type.  This
+// casting operator asserts that the type is correct, so it does not return null
+// on failure.  It does not allow a null argument (use cast_or_null for that).
+// It is typically used like this:
+//
+//  cast<Instruction>(myVal)->getParent()
+//
+template <class X, class Y>
+inline typename std::enable_if<!is_simple_type<Y>::value,
+                               typename cast_retty<X, const Y>::ret_type>::type
+cast(const Y &Val) {
+  assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
+  return cast_convert_val<
+      X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
+}
+
+template <class X, class Y>
+inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
+  assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
+  return cast_convert_val<X, Y,
+                          typename simplify_type<Y>::SimpleType>::doit(Val);
+}
+
+template <class X, class Y>
+inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
+  assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
+  return cast_convert_val<X, Y*,
+                          typename simplify_type<Y*>::SimpleType>::doit(Val);
+}
+
+template <class X, class Y>
+inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
+cast(std::unique_ptr<Y> &&Val) {
+  assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!");
+  using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
+  return ret_type(
+      cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit(
+          Val.release()));
+}
+
+// cast_or_null<X> - Functionally identical to cast, except that a null value is
+// accepted.
+//
+template <class X, class Y>
+LLVM_NODISCARD inline
+    typename std::enable_if<!is_simple_type<Y>::value,
+                            typename cast_retty<X, const Y>::ret_type>::type
+    cast_or_null(const Y &Val) {
+  if (!Val)
+    return nullptr;
+  assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
+  return cast<X>(Val);
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline
+    typename std::enable_if<!is_simple_type<Y>::value,
+                            typename cast_retty<X, Y>::ret_type>::type
+    cast_or_null(Y &Val) {
+  if (!Val)
+    return nullptr;
+  assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
+  return cast<X>(Val);
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type
+cast_or_null(Y *Val) {
+  if (!Val) return nullptr;
+  assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
+  return cast<X>(Val);
+}
+
+template <class X, class Y>
+inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
+cast_or_null(std::unique_ptr<Y> &&Val) {
+  if (!Val)
+    return nullptr;
+  return cast<X>(std::move(Val));
+}
+
+// dyn_cast<X> - Return the argument parameter cast to the specified type.  This
+// casting operator returns null if the argument is of the wrong type, so it can
+// be used to test for a type as well as cast if successful.  This should be
+// used in the context of an if statement like this:
+//
+//  if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
+//
+
+template <class X, class Y>
+LLVM_NODISCARD inline
+    typename std::enable_if<!is_simple_type<Y>::value,
+                            typename cast_retty<X, const Y>::ret_type>::type
+    dyn_cast(const Y &Val) {
+  return isa<X>(Val) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
+  return isa<X>(Val) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
+  return isa<X>(Val) ? cast<X>(Val) : nullptr;
+}
+
+// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
+// value is accepted.
+//
+template <class X, class Y>
+LLVM_NODISCARD inline
+    typename std::enable_if<!is_simple_type<Y>::value,
+                            typename cast_retty<X, const Y>::ret_type>::type
+    dyn_cast_or_null(const Y &Val) {
+  return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline
+    typename std::enable_if<!is_simple_type<Y>::value,
+                            typename cast_retty<X, Y>::ret_type>::type
+    dyn_cast_or_null(Y &Val) {
+  return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type
+dyn_cast_or_null(Y *Val) {
+  return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
+}
+
+// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
+// taking ownership of the input pointer iff isa<X>(Val) is true.  If the
+// cast is successful, From refers to nullptr on exit and the casted value
+// is returned.  If the cast is unsuccessful, the function returns nullptr
+// and From is unchanged.
+template <class X, class Y>
+LLVM_NODISCARD inline auto unique_dyn_cast(std::unique_ptr<Y> &Val)
+    -> decltype(cast<X>(Val)) {
+  if (!isa<X>(Val))
+    return nullptr;
+  return cast<X>(std::move(Val));
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val)
+    -> decltype(cast<X>(Val)) {
+  return unique_dyn_cast<X, Y>(Val);
+}
+
+// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
+// a null value is accepted.
+template <class X, class Y>
+LLVM_NODISCARD inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val)
+    -> decltype(cast<X>(Val)) {
+  if (!Val)
+    return nullptr;
+  return unique_dyn_cast<X, Y>(Val);
+}
+
+template <class X, class Y>
+LLVM_NODISCARD inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val)
+    -> decltype(cast<X>(Val)) {
+  return unique_dyn_cast_or_null<X, Y>(Val);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_CASTING_H
diff --git a/linux-x64/clang/include/llvm/Support/CheckedArithmetic.h b/linux-x64/clang/include/llvm/Support/CheckedArithmetic.h
new file mode 100644
index 0000000..68bce06
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CheckedArithmetic.h
@@ -0,0 +1,83 @@
+//==-- llvm/Support/CheckedArithmetic.h - Safe arithmetical operations *- C++ //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains generic functions for operating on integers which
+// give the indication on whether the operation has overflown.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CHECKEDARITHMETIC_H
+#define LLVM_SUPPORT_CHECKEDARITHMETIC_H
+
+#include "llvm/ADT/APInt.h"
+
+#include <type_traits>
+
+namespace {
+
+/// Utility function to apply a given method of \c APInt \p F to \p LHS and
+/// \p RHS, and write the output into \p Res.
+/// \return Whether the operation overflows.
+template <typename T, typename F>
+typename std::enable_if<std::is_integral<T>::value && sizeof(T) * 8 <= 64,
+                        bool>::type
+checkedOp(T LHS, T RHS, F Op, T *Res = nullptr, bool Signed = true) {
+  llvm::APInt ALHS(/*BitSize=*/sizeof(T) * 8, LHS, Signed);
+  llvm::APInt ARHS(/*BitSize=*/sizeof(T) * 8, RHS, Signed);
+  bool Overflow;
+  llvm::APInt Out = (ALHS.*Op)(ARHS, Overflow);
+  if (Res)
+    *Res = Signed ? Out.getSExtValue() : Out.getZExtValue();
+  return Overflow;
+}
+}
+
+namespace llvm {
+
+/// Add two signed integers \p LHS and \p RHS, write into \p Res if non-null.
+/// Does not guarantee saturating arithmetic.
+/// \return Whether the result overflows.
+template <typename T>
+typename std::enable_if<std::is_signed<T>::value, bool>::type
+checkedAdd(T LHS, T RHS, T *Res = nullptr) {
+  return checkedOp(LHS, RHS, &llvm::APInt::sadd_ov, Res);
+}
+
+/// Multiply two signed integers \p LHS and \p RHS, write into \p Res if
+/// non-null.
+/// Does not guarantee saturating arithmetic.
+/// \return Whether the result overflows.
+template <typename T>
+typename std::enable_if<std::is_signed<T>::value, bool>::type
+checkedMul(T LHS, T RHS, T *Res = nullptr) {
+  return checkedOp(LHS, RHS, &llvm::APInt::smul_ov, Res);
+}
+
+/// Add two unsigned integers \p LHS and \p RHS, write into \p Res if non-null.
+/// Does not guarantee saturating arithmetic.
+/// \return Whether the result overflows.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+checkedAddUnsigned(T LHS, T RHS, T *Res = nullptr) {
+  return checkedOp(LHS, RHS, &llvm::APInt::uadd_ov, Res, /*Signed=*/false);
+}
+
+/// Multiply two unsigned integers \p LHS and \p RHS, write into \p Res if
+/// non-null.
+/// Does not guarantee saturating arithmetic.
+/// \return Whether the result overflows.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+checkedMulUnsigned(T LHS, T RHS, T *Res = nullptr) {
+  return checkedOp(LHS, RHS, &llvm::APInt::umul_ov, Res, /*Signed=*/false);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Chrono.h b/linux-x64/clang/include/llvm/Support/Chrono.h
new file mode 100644
index 0000000..994068a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Chrono.h
@@ -0,0 +1,164 @@
+//===- llvm/Support/Chrono.h - Utilities for Timing Manipulation-*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CHRONO_H
+#define LLVM_SUPPORT_CHRONO_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/FormatProviders.h"
+
+#include <chrono>
+#include <ctime>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace sys {
+
+/// A time point on the system clock. This is provided for two reasons:
+/// - to insulate us agains subtle differences in behavoir to differences in
+///   system clock precision (which is implementation-defined and differs between
+///   platforms).
+/// - to shorten the type name
+/// The default precision is nanoseconds. If need a specific precision specify
+/// it explicitly. If unsure, use the default. If you need a time point on a
+/// clock other than the system_clock, use std::chrono directly.
+template <typename D = std::chrono::nanoseconds>
+using TimePoint = std::chrono::time_point<std::chrono::system_clock, D>;
+
+/// Convert a TimePoint to std::time_t
+LLVM_ATTRIBUTE_ALWAYS_INLINE inline std::time_t toTimeT(TimePoint<> TP) {
+  using namespace std::chrono;
+  return system_clock::to_time_t(
+      time_point_cast<system_clock::time_point::duration>(TP));
+}
+
+/// Convert a std::time_t to a TimePoint
+LLVM_ATTRIBUTE_ALWAYS_INLINE inline TimePoint<std::chrono::seconds>
+toTimePoint(std::time_t T) {
+  using namespace std::chrono;
+  return time_point_cast<seconds>(system_clock::from_time_t(T));
+}
+
+} // namespace sys
+
+raw_ostream &operator<<(raw_ostream &OS, sys::TimePoint<> TP);
+
+/// Format provider for TimePoint<>
+///
+/// The options string is a strftime format string, with extensions:
+///   - %L is millis: 000-999
+///   - %f is micros: 000000-999999
+///   - %N is nanos: 000000000 - 999999999
+///
+/// If no options are given, the default format is "%Y-%m-%d %H:%M:%S.%N".
+template <>
+struct format_provider<sys::TimePoint<>> {
+  static void format(const sys::TimePoint<> &TP, llvm::raw_ostream &OS,
+                     StringRef Style);
+};
+
+/// Implementation of format_provider<T> for duration types.
+///
+/// The options string of a duration  type has the grammar:
+///
+///   duration_options  ::= [unit][show_unit [number_options]]
+///   unit              ::= `h`|`m`|`s`|`ms|`us`|`ns`
+///   show_unit         ::= `+` | `-`
+///   number_options    ::= options string for a integral or floating point type
+///
+///   Examples
+///   =================================
+///   |  options  | Input | Output    |
+///   =================================
+///   | ""        | 1s    | 1 s       |
+///   | "ms"      | 1s    | 1000 ms   |
+///   | "ms-"     | 1s    | 1000      |
+///   | "ms-n"    | 1s    | 1,000     |
+///   | ""        | 1.0s  | 1.00 s    |
+///   =================================
+///
+///  If the unit of the duration type is not one of the units specified above,
+///  it is still possible to format it, provided you explicitly request a
+///  display unit or you request that the unit is not displayed.
+
+namespace detail {
+template <typename Period> struct unit { static const char value[]; };
+template <typename Period> const char unit<Period>::value[] = "";
+
+template <> struct unit<std::ratio<3600>> { static const char value[]; };
+template <> struct unit<std::ratio<60>> { static const char value[]; };
+template <> struct unit<std::ratio<1>> { static const char value[]; };
+template <> struct unit<std::milli> { static const char value[]; };
+template <> struct unit<std::micro> { static const char value[]; };
+template <> struct unit<std::nano> { static const char value[]; };
+} // namespace detail
+
+template <typename Rep, typename Period>
+struct format_provider<std::chrono::duration<Rep, Period>> {
+private:
+  typedef std::chrono::duration<Rep, Period> Dur;
+  typedef typename std::conditional<
+      std::chrono::treat_as_floating_point<Rep>::value, double, intmax_t>::type
+      InternalRep;
+
+  template <typename AsPeriod> static InternalRep getAs(const Dur &D) {
+    using namespace std::chrono;
+    return duration_cast<duration<InternalRep, AsPeriod>>(D).count();
+  }
+
+  static std::pair<InternalRep, StringRef> consumeUnit(StringRef &Style,
+                                                        const Dur &D) {
+    using namespace std::chrono;
+    if (Style.consume_front("ns"))
+      return {getAs<std::nano>(D), "ns"};
+    if (Style.consume_front("us"))
+      return {getAs<std::micro>(D), "us"};
+    if (Style.consume_front("ms"))
+      return {getAs<std::milli>(D), "ms"};
+    if (Style.consume_front("s"))
+      return {getAs<std::ratio<1>>(D), "s"};
+    if (Style.consume_front("m"))
+      return {getAs<std::ratio<60>>(D), "m"};
+    if (Style.consume_front("h"))
+      return {getAs<std::ratio<3600>>(D), "h"};
+    return {D.count(), detail::unit<Period>::value};
+  }
+
+  static bool consumeShowUnit(StringRef &Style) {
+    if (Style.empty())
+      return true;
+    if (Style.consume_front("-"))
+      return false;
+    if (Style.consume_front("+"))
+      return true;
+    assert(0 && "Unrecognised duration format");
+    return true;
+  }
+
+public:
+  static void format(const Dur &D, llvm::raw_ostream &Stream, StringRef Style) {
+    InternalRep count;
+    StringRef unit;
+    std::tie(count, unit) = consumeUnit(Style, D);
+    bool show_unit = consumeShowUnit(Style);
+
+    format_provider<InternalRep>::format(count, Stream, Style);
+
+    if (show_unit) {
+      assert(!unit.empty());
+      Stream << " " << unit;
+    }
+  }
+};
+
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_CHRONO_H
diff --git a/linux-x64/clang/include/llvm/Support/CodeGen.h b/linux-x64/clang/include/llvm/Support/CodeGen.h
new file mode 100644
index 0000000..5f9e331
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CodeGen.h
@@ -0,0 +1,62 @@
+//===-- llvm/Support/CodeGen.h - CodeGen Concepts ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file define some types which define code generation concepts. For
+// example, relocation model.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CODEGEN_H
+#define LLVM_SUPPORT_CODEGEN_H
+
+namespace llvm {
+
+  // Relocation model types.
+  namespace Reloc {
+  enum Model { Static, PIC_, DynamicNoPIC, ROPI, RWPI, ROPI_RWPI };
+  }
+
+  // Code model types.
+  namespace CodeModel {
+    // Sync changes with CodeGenCWrappers.h.
+  enum Model { Small, Kernel, Medium, Large };
+  }
+
+  namespace PICLevel {
+    // This is used to map -fpic/-fPIC.
+    enum Level { NotPIC=0, SmallPIC=1, BigPIC=2 };
+  }
+
+  namespace PIELevel {
+    enum Level { Default=0, Small=1, Large=2 };
+  }
+
+  // TLS models.
+  namespace TLSModel {
+    enum Model {
+      GeneralDynamic,
+      LocalDynamic,
+      InitialExec,
+      LocalExec
+    };
+  }
+
+  // Code generation optimization level.
+  namespace CodeGenOpt {
+    enum Level {
+      None,        // -O0
+      Less,        // -O1
+      Default,     // -O2, -Os
+      Aggressive   // -O3
+    };
+  }
+
+}  // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/CodeGenCoverage.h b/linux-x64/clang/include/llvm/Support/CodeGenCoverage.h
new file mode 100644
index 0000000..d5bd837
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CodeGenCoverage.h
@@ -0,0 +1,37 @@
+//== llvm/Support/CodeGenCoverage.h ------------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file This file provides rule coverage tracking for tablegen-erated CodeGen.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CODEGENCOVERAGE_H
+#define LLVM_SUPPORT_CODEGENCOVERAGE_H
+
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+class LLVMContext;
+class MemoryBuffer;
+
+class CodeGenCoverage {
+protected:
+  BitVector RuleCoverage;
+
+public:
+  CodeGenCoverage();
+
+  void setCovered(uint64_t RuleID);
+  bool isCovered(uint64_t RuleID);
+
+  bool parse(MemoryBuffer &Buffer, StringRef BackendName);
+  bool emit(StringRef FilePrefix, StringRef BackendName) const;
+  void reset();
+};
+} // end namespace llvm
+
+#endif // ifndef LLVM_SUPPORT_CODEGENCOVERAGE_H
diff --git a/linux-x64/clang/include/llvm/Support/CommandLine.h b/linux-x64/clang/include/llvm/Support/CommandLine.h
new file mode 100644
index 0000000..f043c11
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CommandLine.h
@@ -0,0 +1,1947 @@
+//===- llvm/Support/CommandLine.h - Command line handler --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements a command line argument processor that is useful when
+// creating a tool.  It provides a simple, minimalistic interface that is easily
+// extensible and supports nonlocal (library) command line options.
+//
+// Note that rather than trying to figure out what this code does, you should
+// read the library documentation located in docs/CommandLine.html or looks at
+// the many example usages in tools/*/*.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COMMANDLINE_H
+#define LLVM_SUPPORT_COMMANDLINE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <functional>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+class StringSaver;
+class raw_ostream;
+
+/// cl Namespace - This namespace contains all of the command line option
+/// processing machinery.  It is intentionally a short name to make qualified
+/// usage concise.
+namespace cl {
+
+//===----------------------------------------------------------------------===//
+// ParseCommandLineOptions - Command line option processing entry point.
+//
+// Returns true on success. Otherwise, this will print the error message to
+// stderr and exit if \p Errs is not set (nullptr by default), or print the
+// error message to \p Errs and return false if \p Errs is provided.
+bool ParseCommandLineOptions(int argc, const char *const *argv,
+                             StringRef Overview = "",
+                             raw_ostream *Errs = nullptr);
+
+//===----------------------------------------------------------------------===//
+// ParseEnvironmentOptions - Environment variable option processing alternate
+//                           entry point.
+//
+void ParseEnvironmentOptions(const char *progName, const char *envvar,
+                             const char *Overview = "");
+
+// Function pointer type for printing version information.
+using VersionPrinterTy = std::function<void(raw_ostream &)>;
+
+///===---------------------------------------------------------------------===//
+/// SetVersionPrinter - Override the default (LLVM specific) version printer
+///                     used to print out the version when --version is given
+///                     on the command line. This allows other systems using the
+///                     CommandLine utilities to print their own version string.
+void SetVersionPrinter(VersionPrinterTy func);
+
+///===---------------------------------------------------------------------===//
+/// AddExtraVersionPrinter - Add an extra printer to use in addition to the
+///                          default one. This can be called multiple times,
+///                          and each time it adds a new function to the list
+///                          which will be called after the basic LLVM version
+///                          printing is complete. Each can then add additional
+///                          information specific to the tool.
+void AddExtraVersionPrinter(VersionPrinterTy func);
+
+// PrintOptionValues - Print option values.
+// With -print-options print the difference between option values and defaults.
+// With -print-all-options print all option values.
+// (Currently not perfect, but best-effort.)
+void PrintOptionValues();
+
+// Forward declaration - AddLiteralOption needs to be up here to make gcc happy.
+class Option;
+
+/// \brief Adds a new option for parsing and provides the option it refers to.
+///
+/// \param O pointer to the option
+/// \param Name the string name for the option to handle during parsing
+///
+/// Literal options are used by some parsers to register special option values.
+/// This is how the PassNameParser registers pass names for opt.
+void AddLiteralOption(Option &O, StringRef Name);
+
+//===----------------------------------------------------------------------===//
+// Flags permitted to be passed to command line arguments
+//
+
+enum NumOccurrencesFlag { // Flags for the number of occurrences allowed
+  Optional = 0x00,        // Zero or One occurrence
+  ZeroOrMore = 0x01,      // Zero or more occurrences allowed
+  Required = 0x02,        // One occurrence required
+  OneOrMore = 0x03,       // One or more occurrences required
+
+  // ConsumeAfter - Indicates that this option is fed anything that follows the
+  // last positional argument required by the application (it is an error if
+  // there are zero positional arguments, and a ConsumeAfter option is used).
+  // Thus, for example, all arguments to LLI are processed until a filename is
+  // found.  Once a filename is found, all of the succeeding arguments are
+  // passed, unprocessed, to the ConsumeAfter option.
+  //
+  ConsumeAfter = 0x04
+};
+
+enum ValueExpected { // Is a value required for the option?
+  // zero reserved for the unspecified value
+  ValueOptional = 0x01,  // The value can appear... or not
+  ValueRequired = 0x02,  // The value is required to appear!
+  ValueDisallowed = 0x03 // A value may not be specified (for flags)
+};
+
+enum OptionHidden {   // Control whether -help shows this option
+  NotHidden = 0x00,   // Option included in -help & -help-hidden
+  Hidden = 0x01,      // -help doesn't, but -help-hidden does
+  ReallyHidden = 0x02 // Neither -help nor -help-hidden show this arg
+};
+
+// Formatting flags - This controls special features that the option might have
+// that cause it to be parsed differently...
+//
+// Prefix - This option allows arguments that are otherwise unrecognized to be
+// matched by options that are a prefix of the actual value.  This is useful for
+// cases like a linker, where options are typically of the form '-lfoo' or
+// '-L../../include' where -l or -L are the actual flags.  When prefix is
+// enabled, and used, the value for the flag comes from the suffix of the
+// argument.
+//
+// Grouping - With this option enabled, multiple letter options are allowed to
+// bunch together with only a single hyphen for the whole group.  This allows
+// emulation of the behavior that ls uses for example: ls -la === ls -l -a
+//
+
+enum FormattingFlags {
+  NormalFormatting = 0x00, // Nothing special
+  Positional = 0x01,       // Is a positional argument, no '-' required
+  Prefix = 0x02,           // Can this option directly prefix its value?
+  Grouping = 0x03          // Can this option group with other options?
+};
+
+enum MiscFlags {             // Miscellaneous flags to adjust argument
+  CommaSeparated = 0x01,     // Should this cl::list split between commas?
+  PositionalEatsArgs = 0x02, // Should this positional cl::list eat -args?
+  Sink = 0x04                // Should this cl::list eat all unknown options?
+};
+
+//===----------------------------------------------------------------------===//
+// Option Category class
+//
+class OptionCategory {
+private:
+  StringRef const Name;
+  StringRef const Description;
+
+  void registerCategory();
+
+public:
+  OptionCategory(StringRef const Name,
+                 StringRef const Description = "")
+      : Name(Name), Description(Description) {
+    registerCategory();
+  }
+
+  StringRef getName() const { return Name; }
+  StringRef getDescription() const { return Description; }
+};
+
+// The general Option Category (used as default category).
+extern OptionCategory GeneralCategory;
+
+//===----------------------------------------------------------------------===//
+// SubCommand class
+//
+class SubCommand {
+private:
+  StringRef Name;
+  StringRef Description;
+
+protected:
+  void registerSubCommand();
+  void unregisterSubCommand();
+
+public:
+  SubCommand(StringRef Name, StringRef Description = "")
+      : Name(Name), Description(Description) {
+        registerSubCommand();
+  }
+  SubCommand() = default;
+
+  void reset();
+
+  explicit operator bool() const;
+
+  StringRef getName() const { return Name; }
+  StringRef getDescription() const { return Description; }
+
+  SmallVector<Option *, 4> PositionalOpts;
+  SmallVector<Option *, 4> SinkOpts;
+  StringMap<Option *> OptionsMap;
+
+  Option *ConsumeAfterOpt = nullptr; // The ConsumeAfter option if it exists.
+};
+
+// A special subcommand representing no subcommand
+extern ManagedStatic<SubCommand> TopLevelSubCommand;
+
+// A special subcommand that can be used to put an option into all subcommands.
+extern ManagedStatic<SubCommand> AllSubCommands;
+
+//===----------------------------------------------------------------------===//
+// Option Base class
+//
+class Option {
+  friend class alias;
+
+  // handleOccurrences - Overriden by subclasses to handle the value passed into
+  // an argument.  Should return true if there was an error processing the
+  // argument and the program should exit.
+  //
+  virtual bool handleOccurrence(unsigned pos, StringRef ArgName,
+                                StringRef Arg) = 0;
+
+  virtual enum ValueExpected getValueExpectedFlagDefault() const {
+    return ValueOptional;
+  }
+
+  // Out of line virtual function to provide home for the class.
+  virtual void anchor();
+
+  int NumOccurrences = 0; // The number of times specified
+  // Occurrences, HiddenFlag, and Formatting are all enum types but to avoid
+  // problems with signed enums in bitfields.
+  unsigned Occurrences : 3; // enum NumOccurrencesFlag
+  // not using the enum type for 'Value' because zero is an implementation
+  // detail representing the non-value
+  unsigned Value : 2;
+  unsigned HiddenFlag : 2; // enum OptionHidden
+  unsigned Formatting : 2; // enum FormattingFlags
+  unsigned Misc : 3;
+  unsigned Position = 0;       // Position of last occurrence of the option
+  unsigned AdditionalVals = 0; // Greater than 0 for multi-valued option.
+
+public:
+  StringRef ArgStr;   // The argument string itself (ex: "help", "o")
+  StringRef HelpStr;  // The descriptive text message for -help
+  StringRef ValueStr; // String describing what the value of this option is
+  OptionCategory *Category; // The Category this option belongs to
+  SmallPtrSet<SubCommand *, 4> Subs; // The subcommands this option belongs to.
+  bool FullyInitialized = false; // Has addArgument been called?
+
+  inline enum NumOccurrencesFlag getNumOccurrencesFlag() const {
+    return (enum NumOccurrencesFlag)Occurrences;
+  }
+
+  inline enum ValueExpected getValueExpectedFlag() const {
+    return Value ? ((enum ValueExpected)Value) : getValueExpectedFlagDefault();
+  }
+
+  inline enum OptionHidden getOptionHiddenFlag() const {
+    return (enum OptionHidden)HiddenFlag;
+  }
+
+  inline enum FormattingFlags getFormattingFlag() const {
+    return (enum FormattingFlags)Formatting;
+  }
+
+  inline unsigned getMiscFlags() const { return Misc; }
+  inline unsigned getPosition() const { return Position; }
+  inline unsigned getNumAdditionalVals() const { return AdditionalVals; }
+
+  // hasArgStr - Return true if the argstr != ""
+  bool hasArgStr() const { return !ArgStr.empty(); }
+  bool isPositional() const { return getFormattingFlag() == cl::Positional; }
+  bool isSink() const { return getMiscFlags() & cl::Sink; }
+
+  bool isConsumeAfter() const {
+    return getNumOccurrencesFlag() == cl::ConsumeAfter;
+  }
+
+  bool isInAllSubCommands() const {
+    return any_of(Subs, [](const SubCommand *SC) {
+      return SC == &*AllSubCommands;
+    });
+  }
+
+  //-------------------------------------------------------------------------===
+  // Accessor functions set by OptionModifiers
+  //
+  void setArgStr(StringRef S);
+  void setDescription(StringRef S) { HelpStr = S; }
+  void setValueStr(StringRef S) { ValueStr = S; }
+  void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) { Occurrences = Val; }
+  void setValueExpectedFlag(enum ValueExpected Val) { Value = Val; }
+  void setHiddenFlag(enum OptionHidden Val) { HiddenFlag = Val; }
+  void setFormattingFlag(enum FormattingFlags V) { Formatting = V; }
+  void setMiscFlag(enum MiscFlags M) { Misc |= M; }
+  void setPosition(unsigned pos) { Position = pos; }
+  void setCategory(OptionCategory &C) { Category = &C; }
+  void addSubCommand(SubCommand &S) { Subs.insert(&S); }
+
+protected:
+  explicit Option(enum NumOccurrencesFlag OccurrencesFlag,
+                  enum OptionHidden Hidden)
+      : Occurrences(OccurrencesFlag), Value(0), HiddenFlag(Hidden),
+        Formatting(NormalFormatting), Misc(0), Category(&GeneralCategory) {}
+
+  inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; }
+
+public:
+  virtual ~Option() = default;
+
+  // addArgument - Register this argument with the commandline system.
+  //
+  void addArgument();
+
+  /// Unregisters this option from the CommandLine system.
+  ///
+  /// This option must have been the last option registered.
+  /// For testing purposes only.
+  void removeArgument();
+
+  // Return the width of the option tag for printing...
+  virtual size_t getOptionWidth() const = 0;
+
+  // printOptionInfo - Print out information about this option.  The
+  // to-be-maintained width is specified.
+  //
+  virtual void printOptionInfo(size_t GlobalWidth) const = 0;
+
+  virtual void printOptionValue(size_t GlobalWidth, bool Force) const = 0;
+
+  virtual void setDefault() = 0;
+
+  static void printHelpStr(StringRef HelpStr, size_t Indent,
+                           size_t FirstLineIndentedBy);
+
+  virtual void getExtraOptionNames(SmallVectorImpl<StringRef> &) {}
+
+  // addOccurrence - Wrapper around handleOccurrence that enforces Flags.
+  //
+  virtual bool addOccurrence(unsigned pos, StringRef ArgName, StringRef Value,
+                             bool MultiArg = false);
+
+  // Prints option name followed by message.  Always returns true.
+  bool error(const Twine &Message, StringRef ArgName = StringRef());
+
+  inline int getNumOccurrences() const { return NumOccurrences; }
+  inline void reset() { NumOccurrences = 0; }
+};
+
+//===----------------------------------------------------------------------===//
+// Command line option modifiers that can be used to modify the behavior of
+// command line option parsers...
+//
+
+// desc - Modifier to set the description shown in the -help output...
+struct desc {
+  StringRef Desc;
+
+  desc(StringRef Str) : Desc(Str) {}
+
+  void apply(Option &O) const { O.setDescription(Desc); }
+};
+
+// value_desc - Modifier to set the value description shown in the -help
+// output...
+struct value_desc {
+  StringRef Desc;
+
+  value_desc(StringRef Str) : Desc(Str) {}
+
+  void apply(Option &O) const { O.setValueStr(Desc); }
+};
+
+// init - Specify a default (initial) value for the command line argument, if
+// the default constructor for the argument type does not give you what you
+// want.  This is only valid on "opt" arguments, not on "list" arguments.
+//
+template <class Ty> struct initializer {
+  const Ty &Init;
+  initializer(const Ty &Val) : Init(Val) {}
+
+  template <class Opt> void apply(Opt &O) const { O.setInitialValue(Init); }
+};
+
+template <class Ty> initializer<Ty> init(const Ty &Val) {
+  return initializer<Ty>(Val);
+}
+
+// location - Allow the user to specify which external variable they want to
+// store the results of the command line argument processing into, if they don't
+// want to store it in the option itself.
+//
+template <class Ty> struct LocationClass {
+  Ty &Loc;
+
+  LocationClass(Ty &L) : Loc(L) {}
+
+  template <class Opt> void apply(Opt &O) const { O.setLocation(O, Loc); }
+};
+
+template <class Ty> LocationClass<Ty> location(Ty &L) {
+  return LocationClass<Ty>(L);
+}
+
+// cat - Specifiy the Option category for the command line argument to belong
+// to.
+struct cat {
+  OptionCategory &Category;
+
+  cat(OptionCategory &c) : Category(c) {}
+
+  template <class Opt> void apply(Opt &O) const { O.setCategory(Category); }
+};
+
+// sub - Specify the subcommand that this option belongs to.
+struct sub {
+  SubCommand &Sub;
+
+  sub(SubCommand &S) : Sub(S) {}
+
+  template <class Opt> void apply(Opt &O) const { O.addSubCommand(Sub); }
+};
+
+//===----------------------------------------------------------------------===//
+// OptionValue class
+
+// Support value comparison outside the template.
+struct GenericOptionValue {
+  virtual bool compare(const GenericOptionValue &V) const = 0;
+
+protected:
+  GenericOptionValue() = default;
+  GenericOptionValue(const GenericOptionValue&) = default;
+  GenericOptionValue &operator=(const GenericOptionValue &) = default;
+  ~GenericOptionValue() = default;
+
+private:
+  virtual void anchor();
+};
+
+template <class DataType> struct OptionValue;
+
+// The default value safely does nothing. Option value printing is only
+// best-effort.
+template <class DataType, bool isClass>
+struct OptionValueBase : public GenericOptionValue {
+  // Temporary storage for argument passing.
+  using WrapperType = OptionValue<DataType>;
+
+  bool hasValue() const { return false; }
+
+  const DataType &getValue() const { llvm_unreachable("no default value"); }
+
+  // Some options may take their value from a different data type.
+  template <class DT> void setValue(const DT & /*V*/) {}
+
+  bool compare(const DataType & /*V*/) const { return false; }
+
+  bool compare(const GenericOptionValue & /*V*/) const override {
+    return false;
+  }
+
+protected:
+  ~OptionValueBase() = default;
+};
+
+// Simple copy of the option value.
+template <class DataType> class OptionValueCopy : public GenericOptionValue {
+  DataType Value;
+  bool Valid = false;
+
+protected:
+  OptionValueCopy(const OptionValueCopy&) = default;
+  OptionValueCopy &operator=(const OptionValueCopy &) = default;
+  ~OptionValueCopy() = default;
+
+public:
+  OptionValueCopy() = default;
+
+  bool hasValue() const { return Valid; }
+
+  const DataType &getValue() const {
+    assert(Valid && "invalid option value");
+    return Value;
+  }
+
+  void setValue(const DataType &V) {
+    Valid = true;
+    Value = V;
+  }
+
+  bool compare(const DataType &V) const { return Valid && (Value != V); }
+
+  bool compare(const GenericOptionValue &V) const override {
+    const OptionValueCopy<DataType> &VC =
+        static_cast<const OptionValueCopy<DataType> &>(V);
+    if (!VC.hasValue())
+      return false;
+    return compare(VC.getValue());
+  }
+};
+
+// Non-class option values.
+template <class DataType>
+struct OptionValueBase<DataType, false> : OptionValueCopy<DataType> {
+  using WrapperType = DataType;
+
+protected:
+  OptionValueBase() = default;
+  OptionValueBase(const OptionValueBase&) = default;
+  OptionValueBase &operator=(const OptionValueBase &) = default;
+  ~OptionValueBase() = default;
+};
+
+// Top-level option class.
+template <class DataType>
+struct OptionValue final
+    : OptionValueBase<DataType, std::is_class<DataType>::value> {
+  OptionValue() = default;
+
+  OptionValue(const DataType &V) { this->setValue(V); }
+
+  // Some options may take their value from a different data type.
+  template <class DT> OptionValue<DataType> &operator=(const DT &V) {
+    this->setValue(V);
+    return *this;
+  }
+};
+
+// Other safe-to-copy-by-value common option types.
+enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE };
+template <>
+struct OptionValue<cl::boolOrDefault> final
+    : OptionValueCopy<cl::boolOrDefault> {
+  using WrapperType = cl::boolOrDefault;
+
+  OptionValue() = default;
+
+  OptionValue(const cl::boolOrDefault &V) { this->setValue(V); }
+
+  OptionValue<cl::boolOrDefault> &operator=(const cl::boolOrDefault &V) {
+    setValue(V);
+    return *this;
+  }
+
+private:
+  void anchor() override;
+};
+
+template <>
+struct OptionValue<std::string> final : OptionValueCopy<std::string> {
+  using WrapperType = StringRef;
+
+  OptionValue() = default;
+
+  OptionValue(const std::string &V) { this->setValue(V); }
+
+  OptionValue<std::string> &operator=(const std::string &V) {
+    setValue(V);
+    return *this;
+  }
+
+private:
+  void anchor() override;
+};
+
+//===----------------------------------------------------------------------===//
+// Enum valued command line option
+//
+
+// This represents a single enum value, using "int" as the underlying type.
+struct OptionEnumValue {
+  StringRef Name;
+  int Value;
+  StringRef Description;
+};
+
+#define clEnumVal(ENUMVAL, DESC)                                               \
+  llvm::cl::OptionEnumValue { #ENUMVAL, int(ENUMVAL), DESC }
+#define clEnumValN(ENUMVAL, FLAGNAME, DESC)                                    \
+  llvm::cl::OptionEnumValue { FLAGNAME, int(ENUMVAL), DESC }
+
+// values - For custom data types, allow specifying a group of values together
+// as the values that go into the mapping that the option handler uses.
+//
+class ValuesClass {
+  // Use a vector instead of a map, because the lists should be short,
+  // the overhead is less, and most importantly, it keeps them in the order
+  // inserted so we can print our option out nicely.
+  SmallVector<OptionEnumValue, 4> Values;
+
+public:
+  ValuesClass(std::initializer_list<OptionEnumValue> Options)
+      : Values(Options) {}
+
+  template <class Opt> void apply(Opt &O) const {
+    for (auto Value : Values)
+      O.getParser().addLiteralOption(Value.Name, Value.Value,
+                                     Value.Description);
+  }
+};
+
+/// Helper to build a ValuesClass by forwarding a variable number of arguments
+/// as an initializer list to the ValuesClass constructor.
+template <typename... OptsTy> ValuesClass values(OptsTy... Options) {
+  return ValuesClass({Options...});
+}
+
+//===----------------------------------------------------------------------===//
+// parser class - Parameterizable parser for different data types.  By default,
+// known data types (string, int, bool) have specialized parsers, that do what
+// you would expect.  The default parser, used for data types that are not
+// built-in, uses a mapping table to map specific options to values, which is
+// used, among other things, to handle enum types.
+
+//--------------------------------------------------
+// generic_parser_base - This class holds all the non-generic code that we do
+// not need replicated for every instance of the generic parser.  This also
+// allows us to put stuff into CommandLine.cpp
+//
+class generic_parser_base {
+protected:
+  class GenericOptionInfo {
+  public:
+    GenericOptionInfo(StringRef name, StringRef helpStr)
+        : Name(name), HelpStr(helpStr) {}
+    StringRef Name;
+    StringRef HelpStr;
+  };
+
+public:
+  generic_parser_base(Option &O) : Owner(O) {}
+
+  virtual ~generic_parser_base() = default;
+  // Base class should have virtual-destructor
+
+  // getNumOptions - Virtual function implemented by generic subclass to
+  // indicate how many entries are in Values.
+  //
+  virtual unsigned getNumOptions() const = 0;
+
+  // getOption - Return option name N.
+  virtual StringRef getOption(unsigned N) const = 0;
+
+  // getDescription - Return description N
+  virtual StringRef getDescription(unsigned N) const = 0;
+
+  // Return the width of the option tag for printing...
+  virtual size_t getOptionWidth(const Option &O) const;
+
+  virtual const GenericOptionValue &getOptionValue(unsigned N) const = 0;
+
+  // printOptionInfo - Print out information about this option.  The
+  // to-be-maintained width is specified.
+  //
+  virtual void printOptionInfo(const Option &O, size_t GlobalWidth) const;
+
+  void printGenericOptionDiff(const Option &O, const GenericOptionValue &V,
+                              const GenericOptionValue &Default,
+                              size_t GlobalWidth) const;
+
+  // printOptionDiff - print the value of an option and it's default.
+  //
+  // Template definition ensures that the option and default have the same
+  // DataType (via the same AnyOptionValue).
+  template <class AnyOptionValue>
+  void printOptionDiff(const Option &O, const AnyOptionValue &V,
+                       const AnyOptionValue &Default,
+                       size_t GlobalWidth) const {
+    printGenericOptionDiff(O, V, Default, GlobalWidth);
+  }
+
+  void initialize() {}
+
+  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) {
+    // If there has been no argstr specified, that means that we need to add an
+    // argument for every possible option.  This ensures that our options are
+    // vectored to us.
+    if (!Owner.hasArgStr())
+      for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
+        OptionNames.push_back(getOption(i));
+  }
+
+  enum ValueExpected getValueExpectedFlagDefault() const {
+    // If there is an ArgStr specified, then we are of the form:
+    //
+    //    -opt=O2   or   -opt O2  or  -optO2
+    //
+    // In which case, the value is required.  Otherwise if an arg str has not
+    // been specified, we are of the form:
+    //
+    //    -O2 or O2 or -la (where -l and -a are separate options)
+    //
+    // If this is the case, we cannot allow a value.
+    //
+    if (Owner.hasArgStr())
+      return ValueRequired;
+    else
+      return ValueDisallowed;
+  }
+
+  // findOption - Return the option number corresponding to the specified
+  // argument string.  If the option is not found, getNumOptions() is returned.
+  //
+  unsigned findOption(StringRef Name);
+
+protected:
+  Option &Owner;
+};
+
+// Default parser implementation - This implementation depends on having a
+// mapping of recognized options to values of some sort.  In addition to this,
+// each entry in the mapping also tracks a help message that is printed with the
+// command line option for -help.  Because this is a simple mapping parser, the
+// data type can be any unsupported type.
+//
+template <class DataType> class parser : public generic_parser_base {
+protected:
+  class OptionInfo : public GenericOptionInfo {
+  public:
+    OptionInfo(StringRef name, DataType v, StringRef helpStr)
+        : GenericOptionInfo(name, helpStr), V(v) {}
+
+    OptionValue<DataType> V;
+  };
+  SmallVector<OptionInfo, 8> Values;
+
+public:
+  parser(Option &O) : generic_parser_base(O) {}
+
+  using parser_data_type = DataType;
+
+  // Implement virtual functions needed by generic_parser_base
+  unsigned getNumOptions() const override { return unsigned(Values.size()); }
+  StringRef getOption(unsigned N) const override { return Values[N].Name; }
+  StringRef getDescription(unsigned N) const override {
+    return Values[N].HelpStr;
+  }
+
+  // getOptionValue - Return the value of option name N.
+  const GenericOptionValue &getOptionValue(unsigned N) const override {
+    return Values[N].V;
+  }
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, DataType &V) {
+    StringRef ArgVal;
+    if (Owner.hasArgStr())
+      ArgVal = Arg;
+    else
+      ArgVal = ArgName;
+
+    for (size_t i = 0, e = Values.size(); i != e; ++i)
+      if (Values[i].Name == ArgVal) {
+        V = Values[i].V.getValue();
+        return false;
+      }
+
+    return O.error("Cannot find option named '" + ArgVal + "'!");
+  }
+
+  /// addLiteralOption - Add an entry to the mapping table.
+  ///
+  template <class DT>
+  void addLiteralOption(StringRef Name, const DT &V, StringRef HelpStr) {
+    assert(findOption(Name) == Values.size() && "Option already exists!");
+    OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
+    Values.push_back(X);
+    AddLiteralOption(Owner, Name);
+  }
+
+  /// removeLiteralOption - Remove the specified option.
+  ///
+  void removeLiteralOption(StringRef Name) {
+    unsigned N = findOption(Name);
+    assert(N != Values.size() && "Option not found!");
+    Values.erase(Values.begin() + N);
+  }
+};
+
+//--------------------------------------------------
+// basic_parser - Super class of parsers to provide boilerplate code
+//
+class basic_parser_impl { // non-template implementation of basic_parser<t>
+public:
+  basic_parser_impl(Option &) {}
+
+  enum ValueExpected getValueExpectedFlagDefault() const {
+    return ValueRequired;
+  }
+
+  void getExtraOptionNames(SmallVectorImpl<StringRef> &) {}
+
+  void initialize() {}
+
+  // Return the width of the option tag for printing...
+  size_t getOptionWidth(const Option &O) const;
+
+  // printOptionInfo - Print out information about this option.  The
+  // to-be-maintained width is specified.
+  //
+  void printOptionInfo(const Option &O, size_t GlobalWidth) const;
+
+  // printOptionNoValue - Print a placeholder for options that don't yet support
+  // printOptionDiff().
+  void printOptionNoValue(const Option &O, size_t GlobalWidth) const;
+
+  // getValueName - Overload in subclass to provide a better default value.
+  virtual StringRef getValueName() const { return "value"; }
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  virtual void anchor();
+
+protected:
+  ~basic_parser_impl() = default;
+
+  // A helper for basic_parser::printOptionDiff.
+  void printOptionName(const Option &O, size_t GlobalWidth) const;
+};
+
+// basic_parser - The real basic parser is just a template wrapper that provides
+// a typedef for the provided data type.
+//
+template <class DataType> class basic_parser : public basic_parser_impl {
+public:
+  using parser_data_type = DataType;
+  using OptVal = OptionValue<DataType>;
+
+  basic_parser(Option &O) : basic_parser_impl(O) {}
+
+protected:
+  ~basic_parser() = default;
+};
+
+//--------------------------------------------------
+// parser<bool>
+//
+template <> class parser<bool> final : public basic_parser<bool> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, bool &Val);
+
+  void initialize() {}
+
+  enum ValueExpected getValueExpectedFlagDefault() const {
+    return ValueOptional;
+  }
+
+  // getValueName - Do not print =<value> at all.
+  StringRef getValueName() const override { return StringRef(); }
+
+  void printOptionDiff(const Option &O, bool V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<bool>;
+
+//--------------------------------------------------
+// parser<boolOrDefault>
+template <>
+class parser<boolOrDefault> final : public basic_parser<boolOrDefault> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, boolOrDefault &Val);
+
+  enum ValueExpected getValueExpectedFlagDefault() const {
+    return ValueOptional;
+  }
+
+  // getValueName - Do not print =<value> at all.
+  StringRef getValueName() const override { return StringRef(); }
+
+  void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<boolOrDefault>;
+
+//--------------------------------------------------
+// parser<int>
+//
+template <> class parser<int> final : public basic_parser<int> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, int &Val);
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "int"; }
+
+  void printOptionDiff(const Option &O, int V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<int>;
+
+//--------------------------------------------------
+// parser<unsigned>
+//
+template <> class parser<unsigned> final : public basic_parser<unsigned> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned &Val);
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "uint"; }
+
+  void printOptionDiff(const Option &O, unsigned V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<unsigned>;
+
+//--------------------------------------------------
+// parser<unsigned long long>
+//
+template <>
+class parser<unsigned long long> final
+    : public basic_parser<unsigned long long> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg,
+             unsigned long long &Val);
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "uint"; }
+
+  void printOptionDiff(const Option &O, unsigned long long V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<unsigned long long>;
+
+//--------------------------------------------------
+// parser<double>
+//
+template <> class parser<double> final : public basic_parser<double> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, double &Val);
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "number"; }
+
+  void printOptionDiff(const Option &O, double V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<double>;
+
+//--------------------------------------------------
+// parser<float>
+//
+template <> class parser<float> final : public basic_parser<float> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &O, StringRef ArgName, StringRef Arg, float &Val);
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "number"; }
+
+  void printOptionDiff(const Option &O, float V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<float>;
+
+//--------------------------------------------------
+// parser<std::string>
+//
+template <> class parser<std::string> final : public basic_parser<std::string> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &, StringRef, StringRef Arg, std::string &Value) {
+    Value = Arg.str();
+    return false;
+  }
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "string"; }
+
+  void printOptionDiff(const Option &O, StringRef V, const OptVal &Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<std::string>;
+
+//--------------------------------------------------
+// parser<char>
+//
+template <> class parser<char> final : public basic_parser<char> {
+public:
+  parser(Option &O) : basic_parser(O) {}
+
+  // parse - Return true on error.
+  bool parse(Option &, StringRef, StringRef Arg, char &Value) {
+    Value = Arg[0];
+    return false;
+  }
+
+  // getValueName - Overload in subclass to provide a better default value.
+  StringRef getValueName() const override { return "char"; }
+
+  void printOptionDiff(const Option &O, char V, OptVal Default,
+                       size_t GlobalWidth) const;
+
+  // An out-of-line virtual method to provide a 'home' for this class.
+  void anchor() override;
+};
+
+extern template class basic_parser<char>;
+
+//--------------------------------------------------
+// PrintOptionDiff
+//
+// This collection of wrappers is the intermediary between class opt and class
+// parser to handle all the template nastiness.
+
+// This overloaded function is selected by the generic parser.
+template <class ParserClass, class DT>
+void printOptionDiff(const Option &O, const generic_parser_base &P, const DT &V,
+                     const OptionValue<DT> &Default, size_t GlobalWidth) {
+  OptionValue<DT> OV = V;
+  P.printOptionDiff(O, OV, Default, GlobalWidth);
+}
+
+// This is instantiated for basic parsers when the parsed value has a different
+// type than the option value. e.g. HelpPrinter.
+template <class ParserDT, class ValDT> struct OptionDiffPrinter {
+  void print(const Option &O, const parser<ParserDT> &P, const ValDT & /*V*/,
+             const OptionValue<ValDT> & /*Default*/, size_t GlobalWidth) {
+    P.printOptionNoValue(O, GlobalWidth);
+  }
+};
+
+// This is instantiated for basic parsers when the parsed value has the same
+// type as the option value.
+template <class DT> struct OptionDiffPrinter<DT, DT> {
+  void print(const Option &O, const parser<DT> &P, const DT &V,
+             const OptionValue<DT> &Default, size_t GlobalWidth) {
+    P.printOptionDiff(O, V, Default, GlobalWidth);
+  }
+};
+
+// This overloaded function is selected by the basic parser, which may parse a
+// different type than the option type.
+template <class ParserClass, class ValDT>
+void printOptionDiff(
+    const Option &O,
+    const basic_parser<typename ParserClass::parser_data_type> &P,
+    const ValDT &V, const OptionValue<ValDT> &Default, size_t GlobalWidth) {
+
+  OptionDiffPrinter<typename ParserClass::parser_data_type, ValDT> printer;
+  printer.print(O, static_cast<const ParserClass &>(P), V, Default,
+                GlobalWidth);
+}
+
+//===----------------------------------------------------------------------===//
+// applicator class - This class is used because we must use partial
+// specialization to handle literal string arguments specially (const char* does
+// not correctly respond to the apply method).  Because the syntax to use this
+// is a pain, we have the 'apply' method below to handle the nastiness...
+//
+template <class Mod> struct applicator {
+  template <class Opt> static void opt(const Mod &M, Opt &O) { M.apply(O); }
+};
+
+// Handle const char* as a special case...
+template <unsigned n> struct applicator<char[n]> {
+  template <class Opt> static void opt(StringRef Str, Opt &O) {
+    O.setArgStr(Str);
+  }
+};
+template <unsigned n> struct applicator<const char[n]> {
+  template <class Opt> static void opt(StringRef Str, Opt &O) {
+    O.setArgStr(Str);
+  }
+};
+template <> struct applicator<StringRef > {
+  template <class Opt> static void opt(StringRef Str, Opt &O) {
+    O.setArgStr(Str);
+  }
+};
+
+template <> struct applicator<NumOccurrencesFlag> {
+  static void opt(NumOccurrencesFlag N, Option &O) {
+    O.setNumOccurrencesFlag(N);
+  }
+};
+
+template <> struct applicator<ValueExpected> {
+  static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); }
+};
+
+template <> struct applicator<OptionHidden> {
+  static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); }
+};
+
+template <> struct applicator<FormattingFlags> {
+  static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); }
+};
+
+template <> struct applicator<MiscFlags> {
+  static void opt(MiscFlags MF, Option &O) { O.setMiscFlag(MF); }
+};
+
+// apply method - Apply modifiers to an option in a type safe way.
+template <class Opt, class Mod, class... Mods>
+void apply(Opt *O, const Mod &M, const Mods &... Ms) {
+  applicator<Mod>::opt(M, *O);
+  apply(O, Ms...);
+}
+
+template <class Opt, class Mod> void apply(Opt *O, const Mod &M) {
+  applicator<Mod>::opt(M, *O);
+}
+
+//===----------------------------------------------------------------------===//
+// opt_storage class
+
+// Default storage class definition: external storage.  This implementation
+// assumes the user will specify a variable to store the data into with the
+// cl::location(x) modifier.
+//
+template <class DataType, bool ExternalStorage, bool isClass>
+class opt_storage {
+  DataType *Location = nullptr; // Where to store the object...
+  OptionValue<DataType> Default;
+
+  void check_location() const {
+    assert(Location && "cl::location(...) not specified for a command "
+                       "line option with external storage, "
+                       "or cl::init specified before cl::location()!!");
+  }
+
+public:
+  opt_storage() = default;
+
+  bool setLocation(Option &O, DataType &L) {
+    if (Location)
+      return O.error("cl::location(x) specified more than once!");
+    Location = &L;
+    Default = L;
+    return false;
+  }
+
+  template <class T> void setValue(const T &V, bool initial = false) {
+    check_location();
+    *Location = V;
+    if (initial)
+      Default = V;
+  }
+
+  DataType &getValue() {
+    check_location();
+    return *Location;
+  }
+  const DataType &getValue() const {
+    check_location();
+    return *Location;
+  }
+
+  operator DataType() const { return this->getValue(); }
+
+  const OptionValue<DataType> &getDefault() const { return Default; }
+};
+
+// Define how to hold a class type object, such as a string.  Since we can
+// inherit from a class, we do so.  This makes us exactly compatible with the
+// object in all cases that it is used.
+//
+template <class DataType>
+class opt_storage<DataType, false, true> : public DataType {
+public:
+  OptionValue<DataType> Default;
+
+  template <class T> void setValue(const T &V, bool initial = false) {
+    DataType::operator=(V);
+    if (initial)
+      Default = V;
+  }
+
+  DataType &getValue() { return *this; }
+  const DataType &getValue() const { return *this; }
+
+  const OptionValue<DataType> &getDefault() const { return Default; }
+};
+
+// Define a partial specialization to handle things we cannot inherit from.  In
+// this case, we store an instance through containment, and overload operators
+// to get at the value.
+//
+template <class DataType> class opt_storage<DataType, false, false> {
+public:
+  DataType Value;
+  OptionValue<DataType> Default;
+
+  // Make sure we initialize the value with the default constructor for the
+  // type.
+  opt_storage() : Value(DataType()), Default(DataType()) {}
+
+  template <class T> void setValue(const T &V, bool initial = false) {
+    Value = V;
+    if (initial)
+      Default = V;
+  }
+  DataType &getValue() { return Value; }
+  DataType getValue() const { return Value; }
+
+  const OptionValue<DataType> &getDefault() const { return Default; }
+
+  operator DataType() const { return getValue(); }
+
+  // If the datatype is a pointer, support -> on it.
+  DataType operator->() const { return Value; }
+};
+
+//===----------------------------------------------------------------------===//
+// opt - A scalar command line option.
+//
+template <class DataType, bool ExternalStorage = false,
+          class ParserClass = parser<DataType>>
+class opt : public Option,
+            public opt_storage<DataType, ExternalStorage,
+                               std::is_class<DataType>::value> {
+  ParserClass Parser;
+
+  bool handleOccurrence(unsigned pos, StringRef ArgName,
+                        StringRef Arg) override {
+    typename ParserClass::parser_data_type Val =
+        typename ParserClass::parser_data_type();
+    if (Parser.parse(*this, ArgName, Arg, Val))
+      return true; // Parse error!
+    this->setValue(Val);
+    this->setPosition(pos);
+    return false;
+  }
+
+  enum ValueExpected getValueExpectedFlagDefault() const override {
+    return Parser.getValueExpectedFlagDefault();
+  }
+
+  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
+    return Parser.getExtraOptionNames(OptionNames);
+  }
+
+  // Forward printing stuff to the parser...
+  size_t getOptionWidth() const override {
+    return Parser.getOptionWidth(*this);
+  }
+
+  void printOptionInfo(size_t GlobalWidth) const override {
+    Parser.printOptionInfo(*this, GlobalWidth);
+  }
+
+  void printOptionValue(size_t GlobalWidth, bool Force) const override {
+    if (Force || this->getDefault().compare(this->getValue())) {
+      cl::printOptionDiff<ParserClass>(*this, Parser, this->getValue(),
+                                       this->getDefault(), GlobalWidth);
+    }
+  }
+
+  template <class T, class = typename std::enable_if<
+            std::is_assignable<T&, T>::value>::type>
+  void setDefaultImpl() {
+    const OptionValue<DataType> &V = this->getDefault();
+    if (V.hasValue())
+      this->setValue(V.getValue());
+  }
+
+  template <class T, class = typename std::enable_if<
+            !std::is_assignable<T&, T>::value>::type>
+  void setDefaultImpl(...) {}
+
+  void setDefault() override { setDefaultImpl<DataType>(); }
+
+  void done() {
+    addArgument();
+    Parser.initialize();
+  }
+
+public:
+  // Command line options should not be copyable
+  opt(const opt &) = delete;
+  opt &operator=(const opt &) = delete;
+
+  // setInitialValue - Used by the cl::init modifier...
+  void setInitialValue(const DataType &V) { this->setValue(V, true); }
+
+  ParserClass &getParser() { return Parser; }
+
+  template <class T> DataType &operator=(const T &Val) {
+    this->setValue(Val);
+    return this->getValue();
+  }
+
+  template <class... Mods>
+  explicit opt(const Mods &... Ms)
+      : Option(Optional, NotHidden), Parser(*this) {
+    apply(this, Ms...);
+    done();
+  }
+};
+
+extern template class opt<unsigned>;
+extern template class opt<int>;
+extern template class opt<std::string>;
+extern template class opt<char>;
+extern template class opt<bool>;
+
+//===----------------------------------------------------------------------===//
+// list_storage class
+
+// Default storage class definition: external storage.  This implementation
+// assumes the user will specify a variable to store the data into with the
+// cl::location(x) modifier.
+//
+template <class DataType, class StorageClass> class list_storage {
+  StorageClass *Location = nullptr; // Where to store the object...
+
+public:
+  list_storage() = default;
+
+  bool setLocation(Option &O, StorageClass &L) {
+    if (Location)
+      return O.error("cl::location(x) specified more than once!");
+    Location = &L;
+    return false;
+  }
+
+  template <class T> void addValue(const T &V) {
+    assert(Location != 0 && "cl::location(...) not specified for a command "
+                            "line option with external storage!");
+    Location->push_back(V);
+  }
+};
+
+// Define how to hold a class type object, such as a string.
+// Originally this code inherited from std::vector. In transitioning to a new
+// API for command line options we should change this. The new implementation
+// of this list_storage specialization implements the minimum subset of the
+// std::vector API required for all the current clients.
+//
+// FIXME: Reduce this API to a more narrow subset of std::vector
+//
+template <class DataType> class list_storage<DataType, bool> {
+  std::vector<DataType> Storage;
+
+public:
+  using iterator = typename std::vector<DataType>::iterator;
+
+  iterator begin() { return Storage.begin(); }
+  iterator end() { return Storage.end(); }
+
+  using const_iterator = typename std::vector<DataType>::const_iterator;
+
+  const_iterator begin() const { return Storage.begin(); }
+  const_iterator end() const { return Storage.end(); }
+
+  using size_type = typename std::vector<DataType>::size_type;
+
+  size_type size() const { return Storage.size(); }
+
+  bool empty() const { return Storage.empty(); }
+
+  void push_back(const DataType &value) { Storage.push_back(value); }
+  void push_back(DataType &&value) { Storage.push_back(value); }
+
+  using reference = typename std::vector<DataType>::reference;
+  using const_reference = typename std::vector<DataType>::const_reference;
+
+  reference operator[](size_type pos) { return Storage[pos]; }
+  const_reference operator[](size_type pos) const { return Storage[pos]; }
+
+  iterator erase(const_iterator pos) { return Storage.erase(pos); }
+  iterator erase(const_iterator first, const_iterator last) {
+    return Storage.erase(first, last);
+  }
+
+  iterator erase(iterator pos) { return Storage.erase(pos); }
+  iterator erase(iterator first, iterator last) {
+    return Storage.erase(first, last);
+  }
+
+  iterator insert(const_iterator pos, const DataType &value) {
+    return Storage.insert(pos, value);
+  }
+  iterator insert(const_iterator pos, DataType &&value) {
+    return Storage.insert(pos, value);
+  }
+
+  iterator insert(iterator pos, const DataType &value) {
+    return Storage.insert(pos, value);
+  }
+  iterator insert(iterator pos, DataType &&value) {
+    return Storage.insert(pos, value);
+  }
+
+  reference front() { return Storage.front(); }
+  const_reference front() const { return Storage.front(); }
+
+  operator std::vector<DataType>&() { return Storage; }
+  operator ArrayRef<DataType>() { return Storage; }
+  std::vector<DataType> *operator&() { return &Storage; }
+  const std::vector<DataType> *operator&() const { return &Storage; }
+
+  template <class T> void addValue(const T &V) { Storage.push_back(V); }
+};
+
+//===----------------------------------------------------------------------===//
+// list - A list of command line options.
+//
+template <class DataType, class StorageClass = bool,
+          class ParserClass = parser<DataType>>
+class list : public Option, public list_storage<DataType, StorageClass> {
+  std::vector<unsigned> Positions;
+  ParserClass Parser;
+
+  enum ValueExpected getValueExpectedFlagDefault() const override {
+    return Parser.getValueExpectedFlagDefault();
+  }
+
+  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
+    return Parser.getExtraOptionNames(OptionNames);
+  }
+
+  bool handleOccurrence(unsigned pos, StringRef ArgName,
+                        StringRef Arg) override {
+    typename ParserClass::parser_data_type Val =
+        typename ParserClass::parser_data_type();
+    if (Parser.parse(*this, ArgName, Arg, Val))
+      return true; // Parse Error!
+    list_storage<DataType, StorageClass>::addValue(Val);
+    setPosition(pos);
+    Positions.push_back(pos);
+    return false;
+  }
+
+  // Forward printing stuff to the parser...
+  size_t getOptionWidth() const override {
+    return Parser.getOptionWidth(*this);
+  }
+
+  void printOptionInfo(size_t GlobalWidth) const override {
+    Parser.printOptionInfo(*this, GlobalWidth);
+  }
+
+  // Unimplemented: list options don't currently store their default value.
+  void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
+  }
+
+  void setDefault() override {}
+
+  void done() {
+    addArgument();
+    Parser.initialize();
+  }
+
+public:
+  // Command line options should not be copyable
+  list(const list &) = delete;
+  list &operator=(const list &) = delete;
+
+  ParserClass &getParser() { return Parser; }
+
+  unsigned getPosition(unsigned optnum) const {
+    assert(optnum < this->size() && "Invalid option index");
+    return Positions[optnum];
+  }
+
+  void setNumAdditionalVals(unsigned n) { Option::setNumAdditionalVals(n); }
+
+  template <class... Mods>
+  explicit list(const Mods &... Ms)
+      : Option(ZeroOrMore, NotHidden), Parser(*this) {
+    apply(this, Ms...);
+    done();
+  }
+};
+
+// multi_val - Modifier to set the number of additional values.
+struct multi_val {
+  unsigned AdditionalVals;
+  explicit multi_val(unsigned N) : AdditionalVals(N) {}
+
+  template <typename D, typename S, typename P>
+  void apply(list<D, S, P> &L) const {
+    L.setNumAdditionalVals(AdditionalVals);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// bits_storage class
+
+// Default storage class definition: external storage.  This implementation
+// assumes the user will specify a variable to store the data into with the
+// cl::location(x) modifier.
+//
+template <class DataType, class StorageClass> class bits_storage {
+  unsigned *Location = nullptr; // Where to store the bits...
+
+  template <class T> static unsigned Bit(const T &V) {
+    unsigned BitPos = reinterpret_cast<unsigned>(V);
+    assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
+           "enum exceeds width of bit vector!");
+    return 1 << BitPos;
+  }
+
+public:
+  bits_storage() = default;
+
+  bool setLocation(Option &O, unsigned &L) {
+    if (Location)
+      return O.error("cl::location(x) specified more than once!");
+    Location = &L;
+    return false;
+  }
+
+  template <class T> void addValue(const T &V) {
+    assert(Location != 0 && "cl::location(...) not specified for a command "
+                            "line option with external storage!");
+    *Location |= Bit(V);
+  }
+
+  unsigned getBits() { return *Location; }
+
+  template <class T> bool isSet(const T &V) {
+    return (*Location & Bit(V)) != 0;
+  }
+};
+
+// Define how to hold bits.  Since we can inherit from a class, we do so.
+// This makes us exactly compatible with the bits in all cases that it is used.
+//
+template <class DataType> class bits_storage<DataType, bool> {
+  unsigned Bits; // Where to store the bits...
+
+  template <class T> static unsigned Bit(const T &V) {
+    unsigned BitPos = (unsigned)V;
+    assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
+           "enum exceeds width of bit vector!");
+    return 1 << BitPos;
+  }
+
+public:
+  template <class T> void addValue(const T &V) { Bits |= Bit(V); }
+
+  unsigned getBits() { return Bits; }
+
+  template <class T> bool isSet(const T &V) { return (Bits & Bit(V)) != 0; }
+};
+
+//===----------------------------------------------------------------------===//
+// bits - A bit vector of command options.
+//
+template <class DataType, class Storage = bool,
+          class ParserClass = parser<DataType>>
+class bits : public Option, public bits_storage<DataType, Storage> {
+  std::vector<unsigned> Positions;
+  ParserClass Parser;
+
+  enum ValueExpected getValueExpectedFlagDefault() const override {
+    return Parser.getValueExpectedFlagDefault();
+  }
+
+  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
+    return Parser.getExtraOptionNames(OptionNames);
+  }
+
+  bool handleOccurrence(unsigned pos, StringRef ArgName,
+                        StringRef Arg) override {
+    typename ParserClass::parser_data_type Val =
+        typename ParserClass::parser_data_type();
+    if (Parser.parse(*this, ArgName, Arg, Val))
+      return true; // Parse Error!
+    this->addValue(Val);
+    setPosition(pos);
+    Positions.push_back(pos);
+    return false;
+  }
+
+  // Forward printing stuff to the parser...
+  size_t getOptionWidth() const override {
+    return Parser.getOptionWidth(*this);
+  }
+
+  void printOptionInfo(size_t GlobalWidth) const override {
+    Parser.printOptionInfo(*this, GlobalWidth);
+  }
+
+  // Unimplemented: bits options don't currently store their default values.
+  void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
+  }
+
+  void setDefault() override {}
+
+  void done() {
+    addArgument();
+    Parser.initialize();
+  }
+
+public:
+  // Command line options should not be copyable
+  bits(const bits &) = delete;
+  bits &operator=(const bits &) = delete;
+
+  ParserClass &getParser() { return Parser; }
+
+  unsigned getPosition(unsigned optnum) const {
+    assert(optnum < this->size() && "Invalid option index");
+    return Positions[optnum];
+  }
+
+  template <class... Mods>
+  explicit bits(const Mods &... Ms)
+      : Option(ZeroOrMore, NotHidden), Parser(*this) {
+    apply(this, Ms...);
+    done();
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Aliased command line option (alias this name to a preexisting name)
+//
+
+class alias : public Option {
+  Option *AliasFor;
+
+  bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
+                        StringRef Arg) override {
+    return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
+  }
+
+  bool addOccurrence(unsigned pos, StringRef /*ArgName*/, StringRef Value,
+                     bool MultiArg = false) override {
+    return AliasFor->addOccurrence(pos, AliasFor->ArgStr, Value, MultiArg);
+  }
+
+  // Handle printing stuff...
+  size_t getOptionWidth() const override;
+  void printOptionInfo(size_t GlobalWidth) const override;
+
+  // Aliases do not need to print their values.
+  void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
+  }
+
+  void setDefault() override { AliasFor->setDefault(); }
+
+  ValueExpected getValueExpectedFlagDefault() const override {
+    return AliasFor->getValueExpectedFlag();
+  }
+
+  void done() {
+    if (!hasArgStr())
+      error("cl::alias must have argument name specified!");
+    if (!AliasFor)
+      error("cl::alias must have an cl::aliasopt(option) specified!");
+    Subs = AliasFor->Subs;
+    addArgument();
+  }
+
+public:
+  // Command line options should not be copyable
+  alias(const alias &) = delete;
+  alias &operator=(const alias &) = delete;
+
+  void setAliasFor(Option &O) {
+    if (AliasFor)
+      error("cl::alias must only have one cl::aliasopt(...) specified!");
+    AliasFor = &O;
+  }
+
+  template <class... Mods>
+  explicit alias(const Mods &... Ms)
+      : Option(Optional, Hidden), AliasFor(nullptr) {
+    apply(this, Ms...);
+    done();
+  }
+};
+
+// aliasfor - Modifier to set the option an alias aliases.
+struct aliasopt {
+  Option &Opt;
+
+  explicit aliasopt(Option &O) : Opt(O) {}
+
+  void apply(alias &A) const { A.setAliasFor(Opt); }
+};
+
+// extrahelp - provide additional help at the end of the normal help
+// output. All occurrences of cl::extrahelp will be accumulated and
+// printed to stderr at the end of the regular help, just before
+// exit is called.
+struct extrahelp {
+  StringRef morehelp;
+
+  explicit extrahelp(StringRef help);
+};
+
+void PrintVersionMessage();
+
+/// This function just prints the help message, exactly the same way as if the
+/// -help or -help-hidden option had been given on the command line.
+///
+/// \param Hidden if true will print hidden options
+/// \param Categorized if true print options in categories
+void PrintHelpMessage(bool Hidden = false, bool Categorized = false);
+
+//===----------------------------------------------------------------------===//
+// Public interface for accessing registered options.
+//
+
+/// \brief Use this to get a StringMap to all registered named options
+/// (e.g. -help). Note \p Map Should be an empty StringMap.
+///
+/// \return A reference to the StringMap used by the cl APIs to parse options.
+///
+/// Access to unnamed arguments (i.e. positional) are not provided because
+/// it is expected that the client already has access to these.
+///
+/// Typical usage:
+/// \code
+/// main(int argc,char* argv[]) {
+/// StringMap<llvm::cl::Option*> &opts = llvm::cl::getRegisteredOptions();
+/// assert(opts.count("help") == 1)
+/// opts["help"]->setDescription("Show alphabetical help information")
+/// // More code
+/// llvm::cl::ParseCommandLineOptions(argc,argv);
+/// //More code
+/// }
+/// \endcode
+///
+/// This interface is useful for modifying options in libraries that are out of
+/// the control of the client. The options should be modified before calling
+/// llvm::cl::ParseCommandLineOptions().
+///
+/// Hopefully this API can be deprecated soon. Any situation where options need
+/// to be modified by tools or libraries should be handled by sane APIs rather
+/// than just handing around a global list.
+StringMap<Option *> &getRegisteredOptions(SubCommand &Sub = *TopLevelSubCommand);
+
+/// \brief Use this to get all registered SubCommands from the provided parser.
+///
+/// \return A range of all SubCommand pointers registered with the parser.
+///
+/// Typical usage:
+/// \code
+/// main(int argc, char* argv[]) {
+///   llvm::cl::ParseCommandLineOptions(argc, argv);
+///   for (auto* S : llvm::cl::getRegisteredSubcommands()) {
+///     if (*S) {
+///       std::cout << "Executing subcommand: " << S->getName() << std::endl;
+///       // Execute some function based on the name...
+///     }
+///   }
+/// }
+/// \endcode
+///
+/// This interface is useful for defining subcommands in libraries and
+/// the dispatch from a single point (like in the main function).
+iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
+getRegisteredSubcommands();
+
+//===----------------------------------------------------------------------===//
+// Standalone command line processing utilities.
+//
+
+/// \brief Tokenizes a command line that can contain escapes and quotes.
+//
+/// The quoting rules match those used by GCC and other tools that use
+/// libiberty's buildargv() or expandargv() utilities, and do not match bash.
+/// They differ from buildargv() on treatment of backslashes that do not escape
+/// a special character to make it possible to accept most Windows file paths.
+///
+/// \param [in] Source The string to be split on whitespace with quotes.
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
+/// lines and end of the response file to be marked with a nullptr string.
+/// \param [out] NewArgv All parsed strings are appended to NewArgv.
+void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver,
+                            SmallVectorImpl<const char *> &NewArgv,
+                            bool MarkEOLs = false);
+
+/// \brief Tokenizes a Windows command line which may contain quotes and escaped
+/// quotes.
+///
+/// See MSDN docs for CommandLineToArgvW for information on the quoting rules.
+/// http://msdn.microsoft.com/en-us/library/windows/desktop/17w5ykft(v=vs.85).aspx
+///
+/// \param [in] Source The string to be split on whitespace with quotes.
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
+/// lines and end of the response file to be marked with a nullptr string.
+/// \param [out] NewArgv All parsed strings are appended to NewArgv.
+void TokenizeWindowsCommandLine(StringRef Source, StringSaver &Saver,
+                                SmallVectorImpl<const char *> &NewArgv,
+                                bool MarkEOLs = false);
+
+/// \brief String tokenization function type.  Should be compatible with either
+/// Windows or Unix command line tokenizers.
+using TokenizerCallback = void (*)(StringRef Source, StringSaver &Saver,
+                                   SmallVectorImpl<const char *> &NewArgv,
+                                   bool MarkEOLs);
+
+/// Tokenizes content of configuration file.
+///
+/// \param [in] Source The string representing content of config file.
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [out] NewArgv All parsed strings are appended to NewArgv.
+/// \param [in] MarkEOLs Added for compatibility with TokenizerCallback.
+///
+/// It works like TokenizeGNUCommandLine with ability to skip comment lines.
+///
+void tokenizeConfigFile(StringRef Source, StringSaver &Saver,
+                        SmallVectorImpl<const char *> &NewArgv,
+                        bool MarkEOLs = false);
+
+/// Reads command line options from the given configuration file.
+///
+/// \param [in] CfgFileName Path to configuration file.
+/// \param [in] Saver  Objects that saves allocated strings.
+/// \param [out] Argv Array to which the read options are added.
+/// \return true if the file was successfully read.
+///
+/// It reads content of the specified file, tokenizes it and expands "@file"
+/// commands resolving file names in them relative to the directory where
+/// CfgFilename resides.
+///
+bool readConfigFile(StringRef CfgFileName, StringSaver &Saver,
+                    SmallVectorImpl<const char *> &Argv);
+
+/// \brief Expand response files on a command line recursively using the given
+/// StringSaver and tokenization strategy.  Argv should contain the command line
+/// before expansion and will be modified in place. If requested, Argv will
+/// also be populated with nullptrs indicating where each response file line
+/// ends, which is useful for the "/link" argument that needs to consume all
+/// remaining arguments only until the next end of line, when in a response
+/// file.
+///
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [in] Tokenizer Tokenization strategy. Typically Unix or Windows.
+/// \param [in,out] Argv Command line into which to expand response files.
+/// \param [in] MarkEOLs Mark end of lines and the end of the response file
+/// with nullptrs in the Argv vector.
+/// \param [in] RelativeNames true if names of nested response files must be
+/// resolved relative to including file.
+/// \return true if all @files were expanded successfully or there were none.
+bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
+                         SmallVectorImpl<const char *> &Argv,
+                         bool MarkEOLs = false, bool RelativeNames = false);
+
+/// \brief Mark all options not part of this category as cl::ReallyHidden.
+///
+/// \param Category the category of options to keep displaying
+///
+/// Some tools (like clang-format) like to be able to hide all options that are
+/// not specific to the tool. This function allows a tool to specify a single
+/// option category to display in the -help output.
+void HideUnrelatedOptions(cl::OptionCategory &Category,
+                          SubCommand &Sub = *TopLevelSubCommand);
+
+/// \brief Mark all options not part of the categories as cl::ReallyHidden.
+///
+/// \param Categories the categories of options to keep displaying.
+///
+/// Some tools (like clang-format) like to be able to hide all options that are
+/// not specific to the tool. This function allows a tool to specify a single
+/// option category to display in the -help output.
+void HideUnrelatedOptions(ArrayRef<const cl::OptionCategory *> Categories,
+                          SubCommand &Sub = *TopLevelSubCommand);
+
+/// \brief Reset all command line options to a state that looks as if they have
+/// never appeared on the command line.  This is useful for being able to parse
+/// a command line multiple times (especially useful for writing tests).
+void ResetAllOptionOccurrences();
+
+/// \brief Reset the command line parser back to its initial state.  This
+/// removes
+/// all options, categories, and subcommands and returns the parser to a state
+/// where no options are supported.
+void ResetCommandLineParser();
+
+} // end namespace cl
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_COMMANDLINE_H
diff --git a/linux-x64/clang/include/llvm/Support/Compiler.h b/linux-x64/clang/include/llvm/Support/Compiler.h
new file mode 100644
index 0000000..43a96e4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Compiler.h
@@ -0,0 +1,19 @@
+//===-- llvm/Support/Compiler.h - Compiler abstraction support --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Due to layering constraints (Support depends on Demangler) this is a thin
+// wrapper around the implementation that lives in llvm-c, though most clients
+// can/should think of this as being provided by Support for simplicity (not
+// many clients are aware of their dependency on Demangler/it's a weird place to
+// own this - but didn't seem to justify splitting Support into "lower support"
+// and "upper support").
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Demangle/Compiler.h"
diff --git a/linux-x64/clang/include/llvm/Support/Compression.h b/linux-x64/clang/include/llvm/Support/Compression.h
new file mode 100644
index 0000000..2d191ab
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Compression.h
@@ -0,0 +1,52 @@
+//===-- llvm/Support/Compression.h ---Compression----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains basic functions for compression/uncompression.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COMPRESSION_H
+#define LLVM_SUPPORT_COMPRESSION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+template <typename T> class SmallVectorImpl;
+class Error;
+class StringRef;
+
+namespace zlib {
+
+enum CompressionLevel {
+  NoCompression,
+  DefaultCompression,
+  BestSpeedCompression,
+  BestSizeCompression
+};
+
+bool isAvailable();
+
+Error compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer,
+               CompressionLevel Level = DefaultCompression);
+
+Error uncompress(StringRef InputBuffer, char *UncompressedBuffer,
+                 size_t &UncompressedSize);
+
+Error uncompress(StringRef InputBuffer,
+                 SmallVectorImpl<char> &UncompressedBuffer,
+                 size_t UncompressedSize);
+
+uint32_t crc32(StringRef Buffer);
+
+}  // End of namespace zlib
+
+} // End of namespace llvm
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Support/ConvertUTF.h b/linux-x64/clang/include/llvm/Support/ConvertUTF.h
new file mode 100644
index 0000000..99ae171
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ConvertUTF.h
@@ -0,0 +1,291 @@
+/*===--- ConvertUTF.h - Universal Character Names conversions ---------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *==------------------------------------------------------------------------==*/
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ *
+ * Disclaimer
+ *
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ *
+ * Limitations on Rights to Redistribute This Code
+ *
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+    Conversions between UTF32, UTF-16, and UTF-8.  Header file.
+
+    Several funtions are included here, forming a complete set of
+    conversions between the three formats.  UTF-7 is not included
+    here, but is handled in a separate source file.
+
+    Each of these routines takes pointers to input buffers and output
+    buffers.  The input buffers are const.
+
+    Each routine converts the text between *sourceStart and sourceEnd,
+    putting the result into the buffer between *targetStart and
+    targetEnd. Note: the end pointers are *after* the last item: e.g.
+    *(sourceEnd - 1) is the last item.
+
+    The return result indicates whether the conversion was successful,
+    and if not, whether the problem was in the source or target buffers.
+    (Only the first encountered problem is indicated.)
+
+    After the conversion, *sourceStart and *targetStart are both
+    updated to point to the end of last text successfully converted in
+    the respective buffers.
+
+    Input parameters:
+        sourceStart - pointer to a pointer to the source buffer.
+                The contents of this are modified on return so that
+                it points at the next thing to be converted.
+        targetStart - similarly, pointer to pointer to the target buffer.
+        sourceEnd, targetEnd - respectively pointers to the ends of the
+                two buffers, for overflow checking only.
+
+    These conversion functions take a ConversionFlags argument. When this
+    flag is set to strict, both irregular sequences and isolated surrogates
+    will cause an error.  When the flag is set to lenient, both irregular
+    sequences and isolated surrogates are converted.
+
+    Whether the flag is strict or lenient, all illegal sequences will cause
+    an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
+    or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
+    must check for illegal sequences.
+
+    When the flag is set to lenient, characters over 0x10FFFF are converted
+    to the replacement character; otherwise (when the flag is set to strict)
+    they constitute an error.
+
+    Output parameters:
+        The value "sourceIllegal" is returned from some routines if the input
+        sequence is malformed.  When "sourceIllegal" is returned, the source
+        value will point to the illegal value that caused the problem. E.g.,
+        in UTF-8 when a sequence is malformed, it points to the start of the
+        malformed sequence.
+
+    Author: Mark E. Davis, 1994.
+    Rev History: Rick McGowan, fixes & updates May 2001.
+         Fixes & updates, Sept 2001.
+
+------------------------------------------------------------------------ */
+
+#ifndef LLVM_SUPPORT_CONVERTUTF_H
+#define LLVM_SUPPORT_CONVERTUTF_H
+
+#include <cstddef>
+#include <string>
+
+// Wrap everything in namespace llvm so that programs can link with llvm and
+// their own version of the unicode libraries.
+
+namespace llvm {
+
+/* ---------------------------------------------------------------------
+    The following 4 definitions are compiler-specific.
+    The C standard does not guarantee that wchar_t has at least
+    16 bits, so wchar_t is no less portable than unsigned short!
+    All should be unsigned values to avoid sign extension during
+    bit mask & shift operations.
+------------------------------------------------------------------------ */
+
+typedef unsigned int    UTF32;  /* at least 32 bits */
+typedef unsigned short  UTF16;  /* at least 16 bits */
+typedef unsigned char   UTF8;   /* typically 8 bits */
+typedef unsigned char   Boolean; /* 0 or 1 */
+
+/* Some fundamental constants */
+#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
+#define UNI_MAX_BMP (UTF32)0x0000FFFF
+#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
+#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
+#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
+
+#define UNI_MAX_UTF8_BYTES_PER_CODE_POINT 4
+
+#define UNI_UTF16_BYTE_ORDER_MARK_NATIVE  0xFEFF
+#define UNI_UTF16_BYTE_ORDER_MARK_SWAPPED 0xFFFE
+
+typedef enum {
+  conversionOK,           /* conversion successful */
+  sourceExhausted,        /* partial character in source, but hit end */
+  targetExhausted,        /* insuff. room in target for conversion */
+  sourceIllegal           /* source sequence is illegal/malformed */
+} ConversionResult;
+
+typedef enum {
+  strictConversion = 0,
+  lenientConversion
+} ConversionFlags;
+
+ConversionResult ConvertUTF8toUTF16 (
+  const UTF8** sourceStart, const UTF8* sourceEnd,
+  UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+/**
+ * Convert a partial UTF8 sequence to UTF32.  If the sequence ends in an
+ * incomplete code unit sequence, returns \c sourceExhausted.
+ */
+ConversionResult ConvertUTF8toUTF32Partial(
+  const UTF8** sourceStart, const UTF8* sourceEnd,
+  UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+/**
+ * Convert a partial UTF8 sequence to UTF32.  If the sequence ends in an
+ * incomplete code unit sequence, returns \c sourceIllegal.
+ */
+ConversionResult ConvertUTF8toUTF32(
+  const UTF8** sourceStart, const UTF8* sourceEnd,
+  UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF16toUTF8 (
+  const UTF16** sourceStart, const UTF16* sourceEnd,
+  UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF8 (
+  const UTF32** sourceStart, const UTF32* sourceEnd,
+  UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF16toUTF32 (
+  const UTF16** sourceStart, const UTF16* sourceEnd,
+  UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF16 (
+  const UTF32** sourceStart, const UTF32* sourceEnd,
+  UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
+
+Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
+
+unsigned getNumBytesForUTF8(UTF8 firstByte);
+
+/*************************************************************************/
+/* Below are LLVM-specific wrappers of the functions above. */
+
+template <typename T> class ArrayRef;
+template <typename T> class SmallVectorImpl;
+class StringRef;
+
+/**
+ * Convert an UTF8 StringRef to UTF8, UTF16, or UTF32 depending on
+ * WideCharWidth. The converted data is written to ResultPtr, which needs to
+ * point to at least WideCharWidth * (Source.Size() + 1) bytes. On success,
+ * ResultPtr will point one after the end of the copied string. On failure,
+ * ResultPtr will not be changed, and ErrorPtr will be set to the location of
+ * the first character which could not be converted.
+ * \return true on success.
+ */
+bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
+                       char *&ResultPtr, const UTF8 *&ErrorPtr);
+
+/**
+* Converts a UTF-8 StringRef to a std::wstring.
+* \return true on success.
+*/
+bool ConvertUTF8toWide(llvm::StringRef Source, std::wstring &Result);
+
+/**
+* Converts a UTF-8 C-string to a std::wstring.
+* \return true on success.
+*/
+bool ConvertUTF8toWide(const char *Source, std::wstring &Result);
+
+/**
+* Converts a std::wstring to a UTF-8 encoded std::string.
+* \return true on success.
+*/
+bool convertWideToUTF8(const std::wstring &Source, std::string &Result);
+
+
+/**
+ * Convert an Unicode code point to UTF8 sequence.
+ *
+ * \param Source a Unicode code point.
+ * \param [in,out] ResultPtr pointer to the output buffer, needs to be at least
+ * \c UNI_MAX_UTF8_BYTES_PER_CODE_POINT bytes.  On success \c ResultPtr is
+ * updated one past end of the converted sequence.
+ *
+ * \returns true on success.
+ */
+bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);
+
+/**
+ * Convert the first UTF8 sequence in the given source buffer to a UTF32
+ * code point.
+ *
+ * \param [in,out] source A pointer to the source buffer. If the conversion
+ * succeeds, this pointer will be updated to point to the byte just past the
+ * end of the converted sequence.
+ * \param sourceEnd A pointer just past the end of the source buffer.
+ * \param [out] target The converted code
+ * \param flags Whether the conversion is strict or lenient.
+ *
+ * \returns conversionOK on success
+ *
+ * \sa ConvertUTF8toUTF32
+ */
+inline ConversionResult convertUTF8Sequence(const UTF8 **source,
+                                            const UTF8 *sourceEnd,
+                                            UTF32 *target,
+                                            ConversionFlags flags) {
+  if (*source == sourceEnd)
+    return sourceExhausted;
+  unsigned size = getNumBytesForUTF8(**source);
+  if ((ptrdiff_t)size > sourceEnd - *source)
+    return sourceExhausted;
+  return ConvertUTF8toUTF32(source, *source + size, &target, target + 1, flags);
+}
+
+/**
+ * Returns true if a blob of text starts with a UTF-16 big or little endian byte
+ * order mark.
+ */
+bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);
+
+/**
+ * Converts a stream of raw bytes assumed to be UTF16 into a UTF8 std::string.
+ *
+ * \param [in] SrcBytes A buffer of what is assumed to be UTF-16 encoded text.
+ * \param [out] Out Converted UTF-8 is stored here on success.
+ * \returns true on success
+ */
+bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
+
+/**
+* Converts a UTF16 string into a UTF8 std::string.
+*
+* \param [in] Src A buffer of UTF-16 encoded text.
+* \param [out] Out Converted UTF-8 is stored here on success.
+* \returns true on success
+*/
+bool convertUTF16ToUTF8String(ArrayRef<UTF16> Src, std::string &Out);
+
+/**
+ * Converts a UTF-8 string into a UTF-16 string with native endianness.
+ *
+ * \returns true on success
+ */
+bool convertUTF8ToUTF16String(StringRef SrcUTF8,
+                              SmallVectorImpl<UTF16> &DstUTF16);
+
+} /* end namespace llvm */
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/CrashRecoveryContext.h b/linux-x64/clang/include/llvm/Support/CrashRecoveryContext.h
new file mode 100644
index 0000000..7026231
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/CrashRecoveryContext.h
@@ -0,0 +1,258 @@
+//===--- CrashRecoveryContext.h - Crash Recovery ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
+#define LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+class CrashRecoveryContextCleanup;
+
+/// Crash recovery helper object.
+///
+/// This class implements support for running operations in a safe context so
+/// that crashes (memory errors, stack overflow, assertion violations) can be
+/// detected and control restored to the crashing thread. Crash detection is
+/// purely "best effort", the exact set of failures which can be recovered from
+/// is platform dependent.
+///
+/// Clients make use of this code by first calling
+/// CrashRecoveryContext::Enable(), and then executing unsafe operations via a
+/// CrashRecoveryContext object. For example:
+///
+/// \code
+///    void actual_work(void *);
+///
+///    void foo() {
+///      CrashRecoveryContext CRC;
+///
+///      if (!CRC.RunSafely(actual_work, 0)) {
+///         ... a crash was detected, report error to user ...
+///      }
+///
+///      ... no crash was detected ...
+///    }
+/// \endcode
+///
+/// To assist recovery the class allows specifying set of actions that will be
+/// executed in any case, whether crash occurs or not. These actions may be used
+/// to reclaim resources in the case of crash.
+class CrashRecoveryContext {
+  void *Impl;
+  CrashRecoveryContextCleanup *head;
+
+public:
+  CrashRecoveryContext() : Impl(nullptr), head(nullptr) {}
+  ~CrashRecoveryContext();
+
+  /// Register cleanup handler, which is used when the recovery context is
+  /// finished.
+  /// The recovery context owns the the handler.
+  void registerCleanup(CrashRecoveryContextCleanup *cleanup);
+
+  void unregisterCleanup(CrashRecoveryContextCleanup *cleanup);
+
+  /// Enable crash recovery.
+  static void Enable();
+
+  /// Disable crash recovery.
+  static void Disable();
+
+  /// Return the active context, if the code is currently executing in a
+  /// thread which is in a protected context.
+  static CrashRecoveryContext *GetCurrent();
+
+  /// Return true if the current thread is recovering from a crash.
+  static bool isRecoveringFromCrash();
+
+  /// Execute the provided callback function (with the given arguments) in
+  /// a protected context.
+  ///
+  /// \return True if the function completed successfully, and false if the
+  /// function crashed (or HandleCrash was called explicitly). Clients should
+  /// make as little assumptions as possible about the program state when
+  /// RunSafely has returned false.
+  bool RunSafely(function_ref<void()> Fn);
+  bool RunSafely(void (*Fn)(void*), void *UserData) {
+    return RunSafely([&]() { Fn(UserData); });
+  }
+
+  /// Execute the provide callback function (with the given arguments) in
+  /// a protected context which is run in another thread (optionally with a
+  /// requested stack size).
+  ///
+  /// See RunSafely() and llvm_execute_on_thread().
+  ///
+  /// On Darwin, if PRIO_DARWIN_BG is set on the calling thread, it will be
+  /// propagated to the new thread as well.
+  bool RunSafelyOnThread(function_ref<void()>, unsigned RequestedStackSize = 0);
+  bool RunSafelyOnThread(void (*Fn)(void*), void *UserData,
+                         unsigned RequestedStackSize = 0) {
+    return RunSafelyOnThread([&]() { Fn(UserData); }, RequestedStackSize);
+  }
+
+  /// Explicitly trigger a crash recovery in the current process, and
+  /// return failure from RunSafely(). This function does not return.
+  void HandleCrash();
+};
+
+/// Abstract base class of cleanup handlers.
+///
+/// Derived classes override method recoverResources, which makes actual work on
+/// resource recovery.
+///
+/// Cleanup handlers are stored in a double list, which is owned and managed by
+/// a crash recovery context.
+class CrashRecoveryContextCleanup {
+protected:
+  CrashRecoveryContext *context;
+  CrashRecoveryContextCleanup(CrashRecoveryContext *context)
+      : context(context), cleanupFired(false) {}
+
+public:
+  bool cleanupFired;
+
+  virtual ~CrashRecoveryContextCleanup();
+  virtual void recoverResources() = 0;
+
+  CrashRecoveryContext *getContext() const {
+    return context;
+  }
+
+private:
+  friend class CrashRecoveryContext;
+  CrashRecoveryContextCleanup *prev, *next;
+};
+
+/// Base class of cleanup handler that controls recovery of resources of the
+/// given type.
+///
+/// \tparam Derived Class that uses this class as a base.
+/// \tparam T Type of controlled resource.
+///
+/// This class serves as a base for its template parameter as implied by
+/// Curiously Recurring Template Pattern.
+///
+/// This class factors out creation of a cleanup handler. The latter requires
+/// knowledge of the current recovery context, which is provided by this class.
+template<typename Derived, typename T>
+class CrashRecoveryContextCleanupBase : public CrashRecoveryContextCleanup {
+protected:
+  T *resource;
+  CrashRecoveryContextCleanupBase(CrashRecoveryContext *context, T *resource)
+      : CrashRecoveryContextCleanup(context), resource(resource) {}
+
+public:
+  /// Creates cleanup handler.
+  /// \param x Pointer to the resource recovered by this handler.
+  /// \return New handler or null if the method was called outside a recovery
+  ///         context.
+  static Derived *create(T *x) {
+    if (x) {
+      if (CrashRecoveryContext *context = CrashRecoveryContext::GetCurrent())
+        return new Derived(context, x);
+    }
+    return nullptr;
+  }
+};
+
+/// Cleanup handler that reclaims resource by calling destructor on it.
+template <typename T>
+class CrashRecoveryContextDestructorCleanup : public
+  CrashRecoveryContextCleanupBase<CrashRecoveryContextDestructorCleanup<T>, T> {
+public:
+  CrashRecoveryContextDestructorCleanup(CrashRecoveryContext *context,
+                                        T *resource)
+      : CrashRecoveryContextCleanupBase<
+            CrashRecoveryContextDestructorCleanup<T>, T>(context, resource) {}
+
+  virtual void recoverResources() {
+    this->resource->~T();
+  }
+};
+
+/// Cleanup handler that reclaims resource by calling 'delete' on it.
+template <typename T>
+class CrashRecoveryContextDeleteCleanup : public
+  CrashRecoveryContextCleanupBase<CrashRecoveryContextDeleteCleanup<T>, T> {
+public:
+  CrashRecoveryContextDeleteCleanup(CrashRecoveryContext *context, T *resource)
+    : CrashRecoveryContextCleanupBase<
+        CrashRecoveryContextDeleteCleanup<T>, T>(context, resource) {}
+
+  void recoverResources() override { delete this->resource; }
+};
+
+/// Cleanup handler that reclaims resource by calling its method 'Release'.
+template <typename T>
+class CrashRecoveryContextReleaseRefCleanup : public
+  CrashRecoveryContextCleanupBase<CrashRecoveryContextReleaseRefCleanup<T>, T> {
+public:
+  CrashRecoveryContextReleaseRefCleanup(CrashRecoveryContext *context,
+                                        T *resource)
+    : CrashRecoveryContextCleanupBase<CrashRecoveryContextReleaseRefCleanup<T>,
+          T>(context, resource) {}
+
+  void recoverResources() override { this->resource->Release(); }
+};
+
+/// Helper class for managing resource cleanups.
+///
+/// \tparam T Type of resource been reclaimed.
+/// \tparam Cleanup Class that defines how the resource is reclaimed.
+///
+/// Clients create objects of this type in the code executed in a crash recovery
+/// context to ensure that the resource will be reclaimed even in the case of
+/// crash. For example:
+///
+/// \code
+///    void actual_work(void *) {
+///      ...
+///      std::unique_ptr<Resource> R(new Resource());
+///      CrashRecoveryContextCleanupRegistrar D(R.get());
+///      ...
+///    }
+///
+///    void foo() {
+///      CrashRecoveryContext CRC;
+///
+///      if (!CRC.RunSafely(actual_work, 0)) {
+///         ... a crash was detected, report error to user ...
+///      }
+/// \endcode
+///
+/// If the code of `actual_work` in the example above does not crash, the
+/// destructor of CrashRecoveryContextCleanupRegistrar removes cleanup code from
+/// the current CrashRecoveryContext and the resource is reclaimed by the
+/// destructor of std::unique_ptr. If crash happens, destructors are not called
+/// and the resource is reclaimed by cleanup object registered in the recovery
+/// context by the constructor of CrashRecoveryContextCleanupRegistrar.
+template <typename T, typename Cleanup = CrashRecoveryContextDeleteCleanup<T> >
+class CrashRecoveryContextCleanupRegistrar {
+  CrashRecoveryContextCleanup *cleanup;
+
+public:
+  CrashRecoveryContextCleanupRegistrar(T *x)
+    : cleanup(Cleanup::create(x)) {
+    if (cleanup)
+      cleanup->getContext()->registerCleanup(cleanup);
+  }
+
+  ~CrashRecoveryContextCleanupRegistrar() { unregister(); }
+
+  void unregister() {
+    if (cleanup && !cleanup->cleanupFired)
+      cleanup->getContext()->unregisterCleanup(cleanup);
+    cleanup = nullptr;
+  }
+};
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
diff --git a/linux-x64/clang/include/llvm/Support/DJB.h b/linux-x64/clang/include/llvm/Support/DJB.h
new file mode 100644
index 0000000..e031114
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/DJB.h
@@ -0,0 +1,33 @@
+//===-- llvm/Support/DJB.h ---DJB Hash --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for the DJ Bernstein hash function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DJB_H
+#define LLVM_SUPPORT_DJB_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/// The Bernstein hash function used by the DWARF accelerator tables.
+inline uint32_t djbHash(StringRef Buffer, uint32_t H = 5381) {
+  for (unsigned char C : Buffer.bytes())
+    H = (H << 5) + H + C;
+  return H;
+}
+
+/// Computes the Bernstein hash after folding the input according to the Dwarf 5
+/// standard case folding rules.
+uint32_t caseFoldingDjbHash(StringRef Buffer, uint32_t H = 5381);
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_DJB_H
diff --git a/linux-x64/clang/include/llvm/Support/DOTGraphTraits.h b/linux-x64/clang/include/llvm/Support/DOTGraphTraits.h
new file mode 100644
index 0000000..4381b5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/DOTGraphTraits.h
@@ -0,0 +1,167 @@
+//===-- llvm/Support/DotGraphTraits.h - Customize .dot output ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a template class that can be used to customize dot output
+// graphs generated by the GraphWriter.h file.  The default implementation of
+// this file will produce a simple, but not very polished graph.  By
+// specializing this template, lots of customization opportunities are possible.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DOTGRAPHTRAITS_H
+#define LLVM_SUPPORT_DOTGRAPHTRAITS_H
+
+#include <string>
+
+namespace llvm {
+
+/// DefaultDOTGraphTraits - This class provides the default implementations of
+/// all of the DOTGraphTraits methods.  If a specialization does not need to
+/// override all methods here it should inherit so that it can get the default
+/// implementations.
+///
+struct DefaultDOTGraphTraits {
+private:
+  bool IsSimple;
+
+protected:
+  bool isSimple() {
+    return IsSimple;
+  }
+
+public:
+  explicit DefaultDOTGraphTraits(bool simple=false) : IsSimple (simple) {}
+
+  /// getGraphName - Return the label for the graph as a whole.  Printed at the
+  /// top of the graph.
+  ///
+  template<typename GraphType>
+  static std::string getGraphName(const GraphType &) { return ""; }
+
+  /// getGraphProperties - Return any custom properties that should be included
+  /// in the top level graph structure for dot.
+  ///
+  template<typename GraphType>
+  static std::string getGraphProperties(const GraphType &) {
+    return "";
+  }
+
+  /// renderGraphFromBottomUp - If this function returns true, the graph is
+  /// emitted bottom-up instead of top-down.  This requires graphviz 2.0 to work
+  /// though.
+  static bool renderGraphFromBottomUp() {
+    return false;
+  }
+
+  /// isNodeHidden - If the function returns true, the given node is not
+  /// displayed in the graph.
+  static bool isNodeHidden(const void *) {
+    return false;
+  }
+
+  /// getNodeLabel - Given a node and a pointer to the top level graph, return
+  /// the label to print in the node.
+  template<typename GraphType>
+  std::string getNodeLabel(const void *, const GraphType &) {
+    return "";
+  }
+
+  // getNodeIdentifierLabel - Returns a string representing the
+  // address or other unique identifier of the node. (Only used if
+  // non-empty.)
+  template <typename GraphType>
+  static std::string getNodeIdentifierLabel(const void *, const GraphType &) {
+    return "";
+  }
+
+  template<typename GraphType>
+  static std::string getNodeDescription(const void *, const GraphType &) {
+    return "";
+  }
+
+  /// If you want to specify custom node attributes, this is the place to do so
+  ///
+  template<typename GraphType>
+  static std::string getNodeAttributes(const void *,
+                                       const GraphType &) {
+    return "";
+  }
+
+  /// If you want to override the dot attributes printed for a particular edge,
+  /// override this method.
+  template<typename EdgeIter, typename GraphType>
+  static std::string getEdgeAttributes(const void *, EdgeIter,
+                                       const GraphType &) {
+    return "";
+  }
+
+  /// getEdgeSourceLabel - If you want to label the edge source itself,
+  /// implement this method.
+  template<typename EdgeIter>
+  static std::string getEdgeSourceLabel(const void *, EdgeIter) {
+    return "";
+  }
+
+  /// edgeTargetsEdgeSource - This method returns true if this outgoing edge
+  /// should actually target another edge source, not a node.  If this method is
+  /// implemented, getEdgeTarget should be implemented.
+  template<typename EdgeIter>
+  static bool edgeTargetsEdgeSource(const void *, EdgeIter) {
+    return false;
+  }
+
+  /// getEdgeTarget - If edgeTargetsEdgeSource returns true, this method is
+  /// called to determine which outgoing edge of Node is the target of this
+  /// edge.
+  template<typename EdgeIter>
+  static EdgeIter getEdgeTarget(const void *, EdgeIter I) {
+    return I;
+  }
+
+  /// hasEdgeDestLabels - If this function returns true, the graph is able
+  /// to provide labels for edge destinations.
+  static bool hasEdgeDestLabels() {
+    return false;
+  }
+
+  /// numEdgeDestLabels - If hasEdgeDestLabels, this function returns the
+  /// number of incoming edge labels the given node has.
+  static unsigned numEdgeDestLabels(const void *) {
+    return 0;
+  }
+
+  /// getEdgeDestLabel - If hasEdgeDestLabels, this function returns the
+  /// incoming edge label with the given index in the given node.
+  static std::string getEdgeDestLabel(const void *, unsigned) {
+    return "";
+  }
+
+  /// addCustomGraphFeatures - If a graph is made up of more than just
+  /// straight-forward nodes and edges, this is the place to put all of the
+  /// custom stuff necessary.  The GraphWriter object, instantiated with your
+  /// GraphType is passed in as an argument.  You may call arbitrary methods on
+  /// it to add things to the output graph.
+  ///
+  template<typename GraphType, typename GraphWriter>
+  static void addCustomGraphFeatures(const GraphType &, GraphWriter &) {}
+};
+
+
+/// DOTGraphTraits - Template class that can be specialized to customize how
+/// graphs are converted to 'dot' graphs.  When specializing, you may inherit
+/// from DefaultDOTGraphTraits if you don't need to override everything.
+///
+template <typename Ty>
+struct DOTGraphTraits : public DefaultDOTGraphTraits {
+  DOTGraphTraits (bool simple=false) : DefaultDOTGraphTraits (simple) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/DataExtractor.h b/linux-x64/clang/include/llvm/Support/DataExtractor.h
new file mode 100644
index 0000000..3144788
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/DataExtractor.h
@@ -0,0 +1,428 @@
+//===-- DataExtractor.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DATAEXTRACTOR_H
+#define LLVM_SUPPORT_DATAEXTRACTOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+/// An auxiliary type to facilitate extraction of 3-byte entities. 
+struct Uint24 {
+  uint8_t Bytes[3];
+  Uint24(uint8_t U) {
+    Bytes[0] = Bytes[1] = Bytes[2] = U;
+  }
+  Uint24(uint8_t U0, uint8_t U1, uint8_t U2) {
+    Bytes[0] = U0; Bytes[1] = U1; Bytes[2] = U2;
+  }
+  uint32_t getAsUint32(bool IsLittleEndian) const {
+    int LoIx = IsLittleEndian ? 0 : 2;
+    return Bytes[LoIx] + (Bytes[1] << 8) + (Bytes[2-LoIx] << 16);
+  }
+};
+
+using uint24_t = Uint24;
+static_assert(sizeof(uint24_t) == 3, "sizeof(uint24_t) != 3");
+
+/// Needed by swapByteOrder().
+inline uint24_t getSwappedBytes(uint24_t C) {
+  return uint24_t(C.Bytes[2], C.Bytes[1], C.Bytes[0]);
+}
+
+class DataExtractor {
+  StringRef Data;
+  uint8_t IsLittleEndian;
+  uint8_t AddressSize;
+public:
+  /// Construct with a buffer that is owned by the caller.
+  ///
+  /// This constructor allows us to use data that is owned by the
+  /// caller. The data must stay around as long as this object is
+  /// valid.
+  DataExtractor(StringRef Data, bool IsLittleEndian, uint8_t AddressSize)
+    : Data(Data), IsLittleEndian(IsLittleEndian), AddressSize(AddressSize) {}
+
+  /// \brief Get the data pointed to by this extractor.
+  StringRef getData() const { return Data; }
+  /// \brief Get the endianness for this extractor.
+  bool isLittleEndian() const { return IsLittleEndian; }
+  /// \brief Get the address size for this extractor.
+  uint8_t getAddressSize() const { return AddressSize; }
+  /// \brief Set the address size for this extractor.
+  void setAddressSize(uint8_t Size) { AddressSize = Size; }
+
+  /// Extract a C string from \a *offset_ptr.
+  ///
+  /// Returns a pointer to a C String from the data at the offset
+  /// pointed to by \a offset_ptr. A variable length NULL terminated C
+  /// string will be extracted and the \a offset_ptr will be
+  /// updated with the offset of the byte that follows the NULL
+  /// terminator byte.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     A pointer to the C string value in the data. If the offset
+  ///     pointed to by \a offset_ptr is out of bounds, or if the
+  ///     offset plus the length of the C string is out of bounds,
+  ///     NULL will be returned.
+  const char *getCStr(uint32_t *offset_ptr) const;
+
+  /// Extract a C string from \a *OffsetPtr.
+  ///
+  /// Returns a StringRef for the C String from the data at the offset
+  /// pointed to by \a OffsetPtr. A variable length NULL terminated C
+  /// string will be extracted and the \a OffsetPtr will be
+  /// updated with the offset of the byte that follows the NULL
+  /// terminator byte.
+  ///
+  /// \param[in,out] OffsetPtr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// \return
+  ///     A StringRef for the C string value in the data. If the offset
+  ///     pointed to by \a OffsetPtr is out of bounds, or if the
+  ///     offset plus the length of the C string is out of bounds,
+  ///     a default-initialized StringRef will be returned.
+  StringRef getCStrRef(uint32_t *OffsetPtr) const;
+
+  /// Extract an unsigned integer of size \a byte_size from \a
+  /// *offset_ptr.
+  ///
+  /// Extract a single unsigned integer value and update the offset
+  /// pointed to by \a offset_ptr. The size of the extracted integer
+  /// is specified by the \a byte_size argument. \a byte_size should
+  /// have a value greater than or equal to one and less than or equal
+  /// to eight since the return value is 64 bits wide. Any
+  /// \a byte_size values less than 1 or greater than 8 will result in
+  /// nothing being extracted, and zero being returned.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @param[in] byte_size
+  ///     The size in byte of the integer to extract.
+  ///
+  /// @return
+  ///     The unsigned integer value that was extracted, or zero on
+  ///     failure.
+  uint64_t getUnsigned(uint32_t *offset_ptr, uint32_t byte_size) const;
+
+  /// Extract an signed integer of size \a byte_size from \a *offset_ptr.
+  ///
+  /// Extract a single signed integer value (sign extending if required)
+  /// and update the offset pointed to by \a offset_ptr. The size of
+  /// the extracted integer is specified by the \a byte_size argument.
+  /// \a byte_size should have a value greater than or equal to one
+  /// and less than or equal to eight since the return value is 64
+  /// bits wide. Any \a byte_size values less than 1 or greater than
+  /// 8 will result in nothing being extracted, and zero being returned.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @param[in] size
+  ///     The size in bytes of the integer to extract.
+  ///
+  /// @return
+  ///     The sign extended signed integer value that was extracted,
+  ///     or zero on failure.
+  int64_t getSigned(uint32_t *offset_ptr, uint32_t size) const;
+
+  //------------------------------------------------------------------
+  /// Extract an pointer from \a *offset_ptr.
+  ///
+  /// Extract a single pointer from the data and update the offset
+  /// pointed to by \a offset_ptr. The size of the extracted pointer
+  /// is \a getAddressSize(), so the address size has to be
+  /// set correctly prior to extracting any pointer values.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted pointer value as a 64 integer.
+  uint64_t getAddress(uint32_t *offset_ptr) const {
+    return getUnsigned(offset_ptr, AddressSize);
+  }
+
+  /// Extract a uint8_t value from \a *offset_ptr.
+  ///
+  /// Extract a single uint8_t from the binary data at the offset
+  /// pointed to by \a offset_ptr, and advance the offset on success.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted uint8_t value.
+  uint8_t getU8(uint32_t *offset_ptr) const;
+
+  /// Extract \a count uint8_t values from \a *offset_ptr.
+  ///
+  /// Extract \a count uint8_t values from the binary data at the
+  /// offset pointed to by \a offset_ptr, and advance the offset on
+  /// success. The extracted values are copied into \a dst.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @param[out] dst
+  ///     A buffer to copy \a count uint8_t values into. \a dst must
+  ///     be large enough to hold all requested data.
+  ///
+  /// @param[in] count
+  ///     The number of uint8_t values to extract.
+  ///
+  /// @return
+  ///     \a dst if all values were properly extracted and copied,
+  ///     NULL otherise.
+  uint8_t *getU8(uint32_t *offset_ptr, uint8_t *dst, uint32_t count) const;
+
+  //------------------------------------------------------------------
+  /// Extract a uint16_t value from \a *offset_ptr.
+  ///
+  /// Extract a single uint16_t from the binary data at the offset
+  /// pointed to by \a offset_ptr, and update the offset on success.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted uint16_t value.
+  //------------------------------------------------------------------
+  uint16_t getU16(uint32_t *offset_ptr) const;
+
+  /// Extract \a count uint16_t values from \a *offset_ptr.
+  ///
+  /// Extract \a count uint16_t values from the binary data at the
+  /// offset pointed to by \a offset_ptr, and advance the offset on
+  /// success. The extracted values are copied into \a dst.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @param[out] dst
+  ///     A buffer to copy \a count uint16_t values into. \a dst must
+  ///     be large enough to hold all requested data.
+  ///
+  /// @param[in] count
+  ///     The number of uint16_t values to extract.
+  ///
+  /// @return
+  ///     \a dst if all values were properly extracted and copied,
+  ///     NULL otherise.
+  uint16_t *getU16(uint32_t *offset_ptr, uint16_t *dst, uint32_t count) const;
+
+  /// Extract a 24-bit unsigned value from \a *offset_ptr and return it
+  /// in a uint32_t.
+  ///
+  /// Extract 3 bytes from the binary data at the offset pointed to by
+  /// \a offset_ptr, construct a uint32_t from them and update the offset
+  /// on success.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the 3 bytes if the value is extracted correctly. If the offset
+  ///     is out of bounds or there are not enough bytes to extract this value,
+  ///     the offset will be left unmodified.
+  ///
+  /// @return
+  ///     The extracted 24-bit value represented in a uint32_t.
+  uint32_t getU24(uint32_t *offset_ptr) const;
+
+  /// Extract a uint32_t value from \a *offset_ptr.
+  ///
+  /// Extract a single uint32_t from the binary data at the offset
+  /// pointed to by \a offset_ptr, and update the offset on success.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted uint32_t value.
+  uint32_t getU32(uint32_t *offset_ptr) const;
+
+  /// Extract \a count uint32_t values from \a *offset_ptr.
+  ///
+  /// Extract \a count uint32_t values from the binary data at the
+  /// offset pointed to by \a offset_ptr, and advance the offset on
+  /// success. The extracted values are copied into \a dst.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @param[out] dst
+  ///     A buffer to copy \a count uint32_t values into. \a dst must
+  ///     be large enough to hold all requested data.
+  ///
+  /// @param[in] count
+  ///     The number of uint32_t values to extract.
+  ///
+  /// @return
+  ///     \a dst if all values were properly extracted and copied,
+  ///     NULL otherise.
+  uint32_t *getU32(uint32_t *offset_ptr, uint32_t *dst, uint32_t count) const;
+
+  /// Extract a uint64_t value from \a *offset_ptr.
+  ///
+  /// Extract a single uint64_t from the binary data at the offset
+  /// pointed to by \a offset_ptr, and update the offset on success.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted uint64_t value.
+  uint64_t getU64(uint32_t *offset_ptr) const;
+
+  /// Extract \a count uint64_t values from \a *offset_ptr.
+  ///
+  /// Extract \a count uint64_t values from the binary data at the
+  /// offset pointed to by \a offset_ptr, and advance the offset on
+  /// success. The extracted values are copied into \a dst.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @param[out] dst
+  ///     A buffer to copy \a count uint64_t values into. \a dst must
+  ///     be large enough to hold all requested data.
+  ///
+  /// @param[in] count
+  ///     The number of uint64_t values to extract.
+  ///
+  /// @return
+  ///     \a dst if all values were properly extracted and copied,
+  ///     NULL otherise.
+  uint64_t *getU64(uint32_t *offset_ptr, uint64_t *dst, uint32_t count) const;
+
+  /// Extract a signed LEB128 value from \a *offset_ptr.
+  ///
+  /// Extracts an signed LEB128 number from this object's data
+  /// starting at the offset pointed to by \a offset_ptr. The offset
+  /// pointed to by \a offset_ptr will be updated with the offset of
+  /// the byte following the last extracted byte.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted signed integer value.
+  int64_t getSLEB128(uint32_t *offset_ptr) const;
+
+  /// Extract a unsigned LEB128 value from \a *offset_ptr.
+  ///
+  /// Extracts an unsigned LEB128 number from this object's data
+  /// starting at the offset pointed to by \a offset_ptr. The offset
+  /// pointed to by \a offset_ptr will be updated with the offset of
+  /// the byte following the last extracted byte.
+  ///
+  /// @param[in,out] offset_ptr
+  ///     A pointer to an offset within the data that will be advanced
+  ///     by the appropriate number of bytes if the value is extracted
+  ///     correctly. If the offset is out of bounds or there are not
+  ///     enough bytes to extract this value, the offset will be left
+  ///     unmodified.
+  ///
+  /// @return
+  ///     The extracted unsigned integer value.
+  uint64_t getULEB128(uint32_t *offset_ptr) const;
+
+  /// Test the validity of \a offset.
+  ///
+  /// @return
+  ///     \b true if \a offset is a valid offset into the data in this
+  ///     object, \b false otherwise.
+  bool isValidOffset(uint32_t offset) const { return Data.size() > offset; }
+
+  /// Test the availability of \a length bytes of data from \a offset.
+  ///
+  /// @return
+  ///     \b true if \a offset is a valid offset and there are \a
+  ///     length bytes available at that offset, \b false otherwise.
+  bool isValidOffsetForDataOfSize(uint32_t offset, uint32_t length) const {
+    return offset + length >= offset && isValidOffset(offset + length - 1);
+  }
+
+  /// Test the availability of enough bytes of data for a pointer from
+  /// \a offset. The size of a pointer is \a getAddressSize().
+  ///
+  /// @return
+  ///     \b true if \a offset is a valid offset and there are enough
+  ///     bytes for a pointer available at that offset, \b false
+  ///     otherwise.
+  bool isValidOffsetForAddress(uint32_t offset) const {
+    return isValidOffsetForDataOfSize(offset, AddressSize);
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/DataTypes.h b/linux-x64/clang/include/llvm/Support/DataTypes.h
new file mode 100644
index 0000000..ad60a5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/DataTypes.h
@@ -0,0 +1,17 @@
+//===-- llvm/Support/DataTypes.h - Define fixed size types ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Due to layering constraints (Support depends on llvm-c) this is a thin
+// wrapper around the implementation that lives in llvm-c, though most clients
+// can/should think of this as being provided by Support for simplicity (not
+// many clients are aware of their dependency on llvm-c).
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/DataTypes.h"
diff --git a/linux-x64/clang/include/llvm/Support/Debug.h b/linux-x64/clang/include/llvm/Support/Debug.h
new file mode 100644
index 0000000..48e9e1b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Debug.h
@@ -0,0 +1,122 @@
+//===- llvm/Support/Debug.h - Easy way to add debug output ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a handy way of adding debugging information to your
+// code, without it being enabled all of the time, and without having to add
+// command line options to enable it.
+//
+// In particular, just wrap your code with the DEBUG() macro, and it will be
+// enabled automatically if you specify '-debug' on the command-line.
+// DEBUG() requires the DEBUG_TYPE macro to be defined. Set it to "foo" specify
+// that your debug code belongs to class "foo". Be careful that you only do
+// this after including Debug.h and not around any #include of headers. Headers
+// should define and undef the macro acround the code that needs to use the
+// DEBUG() macro. Then, on the command line, you can specify '-debug-only=foo'
+// to enable JUST the debug information for the foo class.
+//
+// When compiling without assertions, the -debug-* options and all code in
+// DEBUG() statements disappears, so it does not affect the runtime of the code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DEBUG_H
+#define LLVM_SUPPORT_DEBUG_H
+
+namespace llvm {
+
+class raw_ostream;
+
+#ifndef NDEBUG
+
+/// isCurrentDebugType - Return true if the specified string is the debug type
+/// specified on the command line, or if none was specified on the command line
+/// with the -debug-only=X option.
+///
+bool isCurrentDebugType(const char *Type);
+
+/// setCurrentDebugType - Set the current debug type, as if the -debug-only=X
+/// option were specified.  Note that DebugFlag also needs to be set to true for
+/// debug output to be produced.
+///
+void setCurrentDebugType(const char *Type);
+
+/// setCurrentDebugTypes - Set the current debug type, as if the
+/// -debug-only=X,Y,Z option were specified. Note that DebugFlag
+/// also needs to be set to true for debug output to be produced.
+///
+void setCurrentDebugTypes(const char **Types, unsigned Count);
+
+/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
+/// information.  In the '-debug' option is specified on the commandline, and if
+/// this is a debug build, then the code specified as the option to the macro
+/// will be executed.  Otherwise it will not be.  Example:
+///
+/// DEBUG_WITH_TYPE("bitset", dbgs() << "Bitset contains: " << Bitset << "\n");
+///
+/// This will emit the debug information if -debug is present, and -debug-only
+/// is not specified, or is specified as "bitset".
+#define DEBUG_WITH_TYPE(TYPE, X)                                        \
+  do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(TYPE)) { X; } \
+  } while (false)
+
+#else
+#define isCurrentDebugType(X) (false)
+#define setCurrentDebugType(X)
+#define setCurrentDebugTypes(X, N)
+#define DEBUG_WITH_TYPE(TYPE, X) do { } while (false)
+#endif
+
+/// This boolean is set to true if the '-debug' command line option
+/// is specified.  This should probably not be referenced directly, instead, use
+/// the DEBUG macro below.
+///
+extern bool DebugFlag;
+
+/// \name Verification flags.
+///
+/// These flags turns on/off that are expensive and are turned off by default,
+/// unless macro EXPENSIVE_CHECKS is defined. The flags allow selectively
+/// turning the checks on without need to recompile.
+/// \{
+
+/// Enables verification of dominator trees.
+///
+extern bool VerifyDomInfo;
+
+/// Enables verification of loop info.
+///
+extern bool VerifyLoopInfo;
+
+///\}
+
+/// EnableDebugBuffering - This defaults to false.  If true, the debug
+/// stream will install signal handlers to dump any buffered debug
+/// output.  It allows clients to selectively allow the debug stream
+/// to install signal handlers if they are certain there will be no
+/// conflict.
+///
+extern bool EnableDebugBuffering;
+
+/// dbgs() - This returns a reference to a raw_ostream for debugging
+/// messages.  If debugging is disabled it returns errs().  Use it
+/// like: dbgs() << "foo" << "bar";
+raw_ostream &dbgs();
+
+// DEBUG macro - This macro should be used by passes to emit debug information.
+// In the '-debug' option is specified on the commandline, and if this is a
+// debug build, then the code specified as the option to the macro will be
+// executed.  Otherwise it will not be.  Example:
+//
+// DEBUG(dbgs() << "Bitset contains: " << Bitset << "\n");
+//
+#define DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_DEBUG_H
diff --git a/linux-x64/clang/include/llvm/Support/DebugCounter.h b/linux-x64/clang/include/llvm/Support/DebugCounter.h
new file mode 100644
index 0000000..52e1bd7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/DebugCounter.h
@@ -0,0 +1,165 @@
+//===- llvm/Support/DebugCounter.h - Debug counter support ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file provides an implementation of debug counters.  Debug
+/// counters are a tool that let you narrow down a miscompilation to a specific
+/// thing happening.
+///
+/// To give a use case: Imagine you have a file, very large, and you
+/// are trying to understand the minimal transformation that breaks it. Bugpoint
+/// and bisection is often helpful here in narrowing it down to a specific pass,
+/// but it's still a very large file, and a very complicated pass to try to
+/// debug.  That is where debug counting steps in.  You can instrument the pass
+/// with a debug counter before it does a certain thing, and depending on the
+/// counts, it will either execute that thing or not.  The debug counter itself
+/// consists of a skip and a count.  Skip is the number of times shouldExecute
+/// needs to be called before it returns true.  Count is the number of times to
+/// return true once Skip is 0.  So a skip=47, count=2 ,would skip the first 47
+/// executions by returning false from shouldExecute, then execute twice, and
+/// then return false again.
+/// Note that a counter set to a negative number will always execute.
+/// For a concrete example, during predicateinfo creation, the renaming pass
+/// replaces each use with a renamed use.
+////
+/// If I use DEBUG_COUNTER to create a counter called "predicateinfo", and
+/// variable name RenameCounter, and then instrument this renaming with a debug
+/// counter, like so:
+///
+/// if (!DebugCounter::shouldExecute(RenameCounter)
+/// <continue or return or whatever not executing looks like>
+///
+/// Now I can, from the command line, make it rename or not rename certain uses
+/// by setting the skip and count.
+/// So for example
+/// bin/opt -debug-counter=predicateinfo-skip=47,predicateinfo-count=1
+/// will skip renaming the first 47 uses, then rename one, then skip the rest.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DEBUGCOUNTER_H
+#define LLVM_SUPPORT_DEBUGCOUNTER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+namespace llvm {
+
+class DebugCounter {
+public:
+  /// \brief Returns a reference to the singleton instance.
+  static DebugCounter &instance();
+
+  // Used by the command line option parser to push a new value it parsed.
+  void push_back(const std::string &);
+
+  // Register a counter with the specified name.
+  //
+  // FIXME: Currently, counter registration is required to happen before command
+  // line option parsing. The main reason to register counters is to produce a
+  // nice list of them on the command line, but i'm not sure this is worth it.
+  static unsigned registerCounter(StringRef Name, StringRef Desc) {
+    return instance().addCounter(Name, Desc);
+  }
+  inline static bool shouldExecute(unsigned CounterName) {
+// Compile to nothing when debugging is off
+#ifdef NDEBUG
+    return true;
+#else
+    auto &Us = instance();
+    auto Result = Us.Counters.find(CounterName);
+    if (Result != Us.Counters.end()) {
+      auto &CounterPair = Result->second;
+      // We only execute while the skip (first) is zero and the count (second)
+      // is non-zero.
+      // Negative counters always execute.
+      if (CounterPair.first < 0)
+        return true;
+      if (CounterPair.first != 0) {
+        --CounterPair.first;
+        return false;
+      }
+      if (CounterPair.second < 0)
+        return true;
+      if (CounterPair.second != 0) {
+        --CounterPair.second;
+        return true;
+      }
+      return false;
+    }
+    // Didn't find the counter, should we warn?
+    return true;
+#endif // NDEBUG
+  }
+
+  // Return true if a given counter had values set (either programatically or on
+  // the command line).  This will return true even if those values are
+  // currently in a state where the counter will always execute.
+  static bool isCounterSet(unsigned ID) {
+    return instance().Counters.count(ID);
+  }
+
+  // Return the skip and count for a counter. This only works for set counters.
+  static std::pair<int, int> getCounterValue(unsigned ID) {
+    auto &Us = instance();
+    auto Result = Us.Counters.find(ID);
+    assert(Result != Us.Counters.end() && "Asking about a non-set counter");
+    return Result->second;
+  }
+
+  // Set a registered counter to a given value.
+  static void setCounterValue(unsigned ID, const std::pair<int, int> &Val) {
+    auto &Us = instance();
+    Us.Counters[ID] = Val;
+  }
+
+  // Dump or print the current counter set into llvm::dbgs().
+  LLVM_DUMP_METHOD void dump() const;
+
+  void print(raw_ostream &OS) const;
+
+  // Get the counter ID for a given named counter, or return 0 if none is found.
+  unsigned getCounterId(const std::string &Name) const {
+    return RegisteredCounters.idFor(Name);
+  }
+
+  // Return the number of registered counters.
+  unsigned int getNumCounters() const { return RegisteredCounters.size(); }
+
+  // Return the name and description of the counter with the given ID.
+  std::pair<std::string, std::string> getCounterInfo(unsigned ID) const {
+    return std::make_pair(RegisteredCounters[ID], CounterDesc.lookup(ID));
+  }
+
+  // Iterate through the registered counters
+  typedef UniqueVector<std::string> CounterVector;
+  CounterVector::const_iterator begin() const {
+    return RegisteredCounters.begin();
+  }
+  CounterVector::const_iterator end() const { return RegisteredCounters.end(); }
+
+private:
+  unsigned addCounter(const std::string &Name, const std::string &Desc) {
+    unsigned Result = RegisteredCounters.insert(Name);
+    CounterDesc[Result] = Desc;
+    return Result;
+  }
+  DenseMap<unsigned, std::pair<long, long>> Counters;
+  DenseMap<unsigned, std::string> CounterDesc;
+  CounterVector RegisteredCounters;
+};
+
+#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)                              \
+  static const unsigned VARNAME =                                              \
+      DebugCounter::registerCounter(COUNTERNAME, DESC)
+
+} // namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/DynamicLibrary.h b/linux-x64/clang/include/llvm/Support/DynamicLibrary.h
new file mode 100644
index 0000000..469d5df
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/DynamicLibrary.h
@@ -0,0 +1,133 @@
+//===-- llvm/Support/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the sys::DynamicLibrary class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DYNAMICLIBRARY_H
+#define LLVM_SUPPORT_DYNAMICLIBRARY_H
+
+#include <string>
+
+namespace llvm {
+
+class StringRef;
+
+namespace sys {
+
+  /// This class provides a portable interface to dynamic libraries which also
+  /// might be known as shared libraries, shared objects, dynamic shared
+  /// objects, or dynamic link libraries. Regardless of the terminology or the
+  /// operating system interface, this class provides a portable interface that
+  /// allows dynamic libraries to be loaded and searched for externally
+  /// defined symbols. This is typically used to provide "plug-in" support.
+  /// It also allows for symbols to be defined which don't live in any library,
+  /// but rather the main program itself, useful on Windows where the main
+  /// executable cannot be searched.
+  ///
+  /// Note: there is currently no interface for temporarily loading a library,
+  /// or for unloading libraries when the LLVM library is unloaded.
+  class DynamicLibrary {
+    // Placeholder whose address represents an invalid library.
+    // We use this instead of NULL or a pointer-int pair because the OS library
+    // might define 0 or 1 to be "special" handles, such as "search all".
+    static char Invalid;
+
+    // Opaque data used to interface with OS-specific dynamic library handling.
+    void *Data;
+
+  public:
+    explicit DynamicLibrary(void *data = &Invalid) : Data(data) {}
+
+    /// Returns true if the object refers to a valid library.
+    bool isValid() const { return Data != &Invalid; }
+
+    /// Searches through the library for the symbol \p symbolName. If it is
+    /// found, the address of that symbol is returned. If not, NULL is returned.
+    /// Note that NULL will also be returned if the library failed to load.
+    /// Use isValid() to distinguish these cases if it is important.
+    /// Note that this will \e not search symbols explicitly registered by
+    /// AddSymbol().
+    void *getAddressOfSymbol(const char *symbolName);
+
+    /// This function permanently loads the dynamic library at the given path.
+    /// The library will only be unloaded when llvm_shutdown() is called.
+    /// This returns a valid DynamicLibrary instance on success and an invalid
+    /// instance on failure (see isValid()). \p *errMsg will only be modified
+    /// if the library fails to load.
+    ///
+    /// It is safe to call this function multiple times for the same library.
+    /// @brief Open a dynamic library permanently.
+    static DynamicLibrary getPermanentLibrary(const char *filename,
+                                              std::string *errMsg = nullptr);
+
+    /// Registers an externally loaded library. The library will be unloaded
+    /// when the program terminates.
+    ///
+    /// It is safe to call this function multiple times for the same library,
+    /// though ownership is only taken if there was no error.
+    ///
+    /// \returns An empty \p DynamicLibrary if the library was already loaded.
+    static DynamicLibrary addPermanentLibrary(void *handle,
+                                              std::string *errMsg = nullptr);
+
+    /// This function permanently loads the dynamic library at the given path.
+    /// Use this instead of getPermanentLibrary() when you won't need to get
+    /// symbols from the library itself.
+    ///
+    /// It is safe to call this function multiple times for the same library.
+    static bool LoadLibraryPermanently(const char *Filename,
+                                       std::string *ErrMsg = nullptr) {
+      return !getPermanentLibrary(Filename, ErrMsg).isValid();
+    }
+
+    enum SearchOrdering {
+      /// SO_Linker - Search as a call to dlsym(dlopen(NULL)) would when
+      /// DynamicLibrary::getPermanentLibrary(NULL) has been called or
+      /// search the list of explcitly loaded symbols if not.
+      SO_Linker,
+      /// SO_LoadedFirst - Search all loaded libraries, then as SO_Linker would.
+      SO_LoadedFirst,
+      /// SO_LoadedLast - Search as SO_Linker would, then loaded libraries.
+      /// Only useful to search if libraries with RTLD_LOCAL have been added.
+      SO_LoadedLast,
+      /// SO_LoadOrder - Or this in to search libraries in the ordered loaded.
+      /// The default bahaviour is to search loaded libraries in reverse.
+      SO_LoadOrder = 4
+    };
+    static SearchOrdering SearchOrder; // = SO_Linker
+
+    /// This function will search through all previously loaded dynamic
+    /// libraries for the symbol \p symbolName. If it is found, the address of
+    /// that symbol is returned. If not, null is returned. Note that this will
+    /// search permanently loaded libraries (getPermanentLibrary()) as well
+    /// as explicitly registered symbols (AddSymbol()).
+    /// @throws std::string on error.
+    /// @brief Search through libraries for address of a symbol
+    static void *SearchForAddressOfSymbol(const char *symbolName);
+
+    /// @brief Convenience function for C++ophiles.
+    static void *SearchForAddressOfSymbol(const std::string &symbolName) {
+      return SearchForAddressOfSymbol(symbolName.c_str());
+    }
+
+    /// This functions permanently adds the symbol \p symbolName with the
+    /// value \p symbolValue.  These symbols are searched before any
+    /// libraries.
+    /// @brief Add searchable symbol/value pair.
+    static void AddSymbol(StringRef symbolName, void *symbolValue);
+
+    class HandleSet;
+  };
+
+} // End sys namespace
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Endian.h b/linux-x64/clang/include/llvm/Support/Endian.h
new file mode 100644
index 0000000..f50d9b5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Endian.h
@@ -0,0 +1,415 @@
+//===- Endian.h - Utilities for IO with endian specific data ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic functions to read and write endian specific data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ENDIAN_H
+#define LLVM_SUPPORT_ENDIAN_H
+
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <type_traits>
+
+namespace llvm {
+namespace support {
+
+enum endianness {big, little, native};
+
+// These are named values for common alignments.
+enum {aligned = 0, unaligned = 1};
+
+namespace detail {
+
+/// \brief ::value is either alignment, or alignof(T) if alignment is 0.
+template<class T, int alignment>
+struct PickAlignment {
+ enum { value = alignment == 0 ? alignof(T) : alignment };
+};
+
+} // end namespace detail
+
+namespace endian {
+
+constexpr endianness system_endianness() {
+  return sys::IsBigEndianHost ? big : little;
+}
+
+template <typename value_type>
+inline value_type byte_swap(value_type value, endianness endian) {
+  if ((endian != native) && (endian != system_endianness()))
+    sys::swapByteOrder(value);
+  return value;
+}
+
+/// Swap the bytes of value to match the given endianness.
+template<typename value_type, endianness endian>
+inline value_type byte_swap(value_type value) {
+  return byte_swap(value, endian);
+}
+
+/// Read a value of a particular endianness from memory.
+template <typename value_type, std::size_t alignment>
+inline value_type read(const void *memory, endianness endian) {
+  value_type ret;
+
+  memcpy(&ret,
+         LLVM_ASSUME_ALIGNED(
+             memory, (detail::PickAlignment<value_type, alignment>::value)),
+         sizeof(value_type));
+  return byte_swap<value_type>(ret, endian);
+}
+
+template<typename value_type,
+         endianness endian,
+         std::size_t alignment>
+inline value_type read(const void *memory) {
+  return read<value_type, alignment>(memory, endian);
+}
+
+/// Read a value of a particular endianness from a buffer, and increment the
+/// buffer past that value.
+template <typename value_type, std::size_t alignment, typename CharT>
+inline value_type readNext(const CharT *&memory, endianness endian) {
+  value_type ret = read<value_type, alignment>(memory, endian);
+  memory += sizeof(value_type);
+  return ret;
+}
+
+template<typename value_type, endianness endian, std::size_t alignment,
+         typename CharT>
+inline value_type readNext(const CharT *&memory) {
+  return readNext<value_type, alignment, CharT>(memory, endian);
+}
+
+/// Write a value to memory with a particular endianness.
+template <typename value_type, std::size_t alignment>
+inline void write(void *memory, value_type value, endianness endian) {
+  value = byte_swap<value_type>(value, endian);
+  memcpy(LLVM_ASSUME_ALIGNED(
+             memory, (detail::PickAlignment<value_type, alignment>::value)),
+         &value, sizeof(value_type));
+}
+
+template<typename value_type,
+         endianness endian,
+         std::size_t alignment>
+inline void write(void *memory, value_type value) {
+  write<value_type, alignment>(memory, value, endian);
+}
+
+template <typename value_type>
+using make_unsigned_t = typename std::make_unsigned<value_type>::type;
+
+/// Read a value of a particular endianness from memory, for a location
+/// that starts at the given bit offset within the first byte.
+template <typename value_type, endianness endian, std::size_t alignment>
+inline value_type readAtBitAlignment(const void *memory, uint64_t startBit) {
+  assert(startBit < 8);
+  if (startBit == 0)
+    return read<value_type, endian, alignment>(memory);
+  else {
+    // Read two values and compose the result from them.
+    value_type val[2];
+    memcpy(&val[0],
+           LLVM_ASSUME_ALIGNED(
+               memory, (detail::PickAlignment<value_type, alignment>::value)),
+           sizeof(value_type) * 2);
+    val[0] = byte_swap<value_type, endian>(val[0]);
+    val[1] = byte_swap<value_type, endian>(val[1]);
+
+    // Shift bits from the lower value into place.
+    make_unsigned_t<value_type> lowerVal = val[0] >> startBit;
+    // Mask off upper bits after right shift in case of signed type.
+    make_unsigned_t<value_type> numBitsFirstVal =
+        (sizeof(value_type) * 8) - startBit;
+    lowerVal &= ((make_unsigned_t<value_type>)1 << numBitsFirstVal) - 1;
+
+    // Get the bits from the upper value.
+    make_unsigned_t<value_type> upperVal =
+        val[1] & (((make_unsigned_t<value_type>)1 << startBit) - 1);
+    // Shift them in to place.
+    upperVal <<= numBitsFirstVal;
+
+    return lowerVal | upperVal;
+  }
+}
+
+/// Write a value to memory with a particular endianness, for a location
+/// that starts at the given bit offset within the first byte.
+template <typename value_type, endianness endian, std::size_t alignment>
+inline void writeAtBitAlignment(void *memory, value_type value,
+                                uint64_t startBit) {
+  assert(startBit < 8);
+  if (startBit == 0)
+    write<value_type, endian, alignment>(memory, value);
+  else {
+    // Read two values and shift the result into them.
+    value_type val[2];
+    memcpy(&val[0],
+           LLVM_ASSUME_ALIGNED(
+               memory, (detail::PickAlignment<value_type, alignment>::value)),
+           sizeof(value_type) * 2);
+    val[0] = byte_swap<value_type, endian>(val[0]);
+    val[1] = byte_swap<value_type, endian>(val[1]);
+
+    // Mask off any existing bits in the upper part of the lower value that
+    // we want to replace.
+    val[0] &= ((make_unsigned_t<value_type>)1 << startBit) - 1;
+    make_unsigned_t<value_type> numBitsFirstVal =
+        (sizeof(value_type) * 8) - startBit;
+    make_unsigned_t<value_type> lowerVal = value;
+    if (startBit > 0) {
+      // Mask off the upper bits in the new value that are not going to go into
+      // the lower value. This avoids a left shift of a negative value, which
+      // is undefined behavior.
+      lowerVal &= (((make_unsigned_t<value_type>)1 << numBitsFirstVal) - 1);
+      // Now shift the new bits into place
+      lowerVal <<= startBit;
+    }
+    val[0] |= lowerVal;
+
+    // Mask off any existing bits in the lower part of the upper value that
+    // we want to replace.
+    val[1] &= ~(((make_unsigned_t<value_type>)1 << startBit) - 1);
+    // Next shift the bits that go into the upper value into position.
+    make_unsigned_t<value_type> upperVal = value >> numBitsFirstVal;
+    // Mask off upper bits after right shift in case of signed type.
+    upperVal &= ((make_unsigned_t<value_type>)1 << startBit) - 1;
+    val[1] |= upperVal;
+
+    // Finally, rewrite values.
+    val[0] = byte_swap<value_type, endian>(val[0]);
+    val[1] = byte_swap<value_type, endian>(val[1]);
+    memcpy(LLVM_ASSUME_ALIGNED(
+               memory, (detail::PickAlignment<value_type, alignment>::value)),
+           &val[0], sizeof(value_type) * 2);
+  }
+}
+
+} // end namespace endian
+
+namespace detail {
+
+template<typename value_type,
+         endianness endian,
+         std::size_t alignment>
+struct packed_endian_specific_integral {
+  packed_endian_specific_integral() = default;
+
+  explicit packed_endian_specific_integral(value_type val) { *this = val; }
+
+  operator value_type() const {
+    return endian::read<value_type, endian, alignment>(
+      (const void*)Value.buffer);
+  }
+
+  void operator=(value_type newValue) {
+    endian::write<value_type, endian, alignment>(
+      (void*)Value.buffer, newValue);
+  }
+
+  packed_endian_specific_integral &operator+=(value_type newValue) {
+    *this = *this + newValue;
+    return *this;
+  }
+
+  packed_endian_specific_integral &operator-=(value_type newValue) {
+    *this = *this - newValue;
+    return *this;
+  }
+
+  packed_endian_specific_integral &operator|=(value_type newValue) {
+    *this = *this | newValue;
+    return *this;
+  }
+
+  packed_endian_specific_integral &operator&=(value_type newValue) {
+    *this = *this & newValue;
+    return *this;
+  }
+
+private:
+  AlignedCharArray<PickAlignment<value_type, alignment>::value,
+                   sizeof(value_type)> Value;
+
+public:
+  struct ref {
+    explicit ref(void *Ptr) : Ptr(Ptr) {}
+
+    operator value_type() const {
+      return endian::read<value_type, endian, alignment>(Ptr);
+    }
+
+    void operator=(value_type NewValue) {
+      endian::write<value_type, endian, alignment>(Ptr, NewValue);
+    }
+
+  private:
+    void *Ptr;
+  };
+};
+
+} // end namespace detail
+
+using ulittle16_t =
+    detail::packed_endian_specific_integral<uint16_t, little, unaligned>;
+using ulittle32_t =
+    detail::packed_endian_specific_integral<uint32_t, little, unaligned>;
+using ulittle64_t =
+    detail::packed_endian_specific_integral<uint64_t, little, unaligned>;
+
+using little16_t =
+    detail::packed_endian_specific_integral<int16_t, little, unaligned>;
+using little32_t =
+    detail::packed_endian_specific_integral<int32_t, little, unaligned>;
+using little64_t =
+    detail::packed_endian_specific_integral<int64_t, little, unaligned>;
+
+using aligned_ulittle16_t =
+    detail::packed_endian_specific_integral<uint16_t, little, aligned>;
+using aligned_ulittle32_t =
+    detail::packed_endian_specific_integral<uint32_t, little, aligned>;
+using aligned_ulittle64_t =
+    detail::packed_endian_specific_integral<uint64_t, little, aligned>;
+
+using aligned_little16_t =
+    detail::packed_endian_specific_integral<int16_t, little, aligned>;
+using aligned_little32_t =
+    detail::packed_endian_specific_integral<int32_t, little, aligned>;
+using aligned_little64_t =
+    detail::packed_endian_specific_integral<int64_t, little, aligned>;
+
+using ubig16_t =
+    detail::packed_endian_specific_integral<uint16_t, big, unaligned>;
+using ubig32_t =
+    detail::packed_endian_specific_integral<uint32_t, big, unaligned>;
+using ubig64_t =
+    detail::packed_endian_specific_integral<uint64_t, big, unaligned>;
+
+using big16_t =
+    detail::packed_endian_specific_integral<int16_t, big, unaligned>;
+using big32_t =
+    detail::packed_endian_specific_integral<int32_t, big, unaligned>;
+using big64_t =
+    detail::packed_endian_specific_integral<int64_t, big, unaligned>;
+
+using aligned_ubig16_t =
+    detail::packed_endian_specific_integral<uint16_t, big, aligned>;
+using aligned_ubig32_t =
+    detail::packed_endian_specific_integral<uint32_t, big, aligned>;
+using aligned_ubig64_t =
+    detail::packed_endian_specific_integral<uint64_t, big, aligned>;
+
+using aligned_big16_t =
+    detail::packed_endian_specific_integral<int16_t, big, aligned>;
+using aligned_big32_t =
+    detail::packed_endian_specific_integral<int32_t, big, aligned>;
+using aligned_big64_t =
+    detail::packed_endian_specific_integral<int64_t, big, aligned>;
+
+using unaligned_uint16_t =
+    detail::packed_endian_specific_integral<uint16_t, native, unaligned>;
+using unaligned_uint32_t =
+    detail::packed_endian_specific_integral<uint32_t, native, unaligned>;
+using unaligned_uint64_t =
+    detail::packed_endian_specific_integral<uint64_t, native, unaligned>;
+
+using unaligned_int16_t =
+    detail::packed_endian_specific_integral<int16_t, native, unaligned>;
+using unaligned_int32_t =
+    detail::packed_endian_specific_integral<int32_t, native, unaligned>;
+using unaligned_int64_t =
+    detail::packed_endian_specific_integral<int64_t, native, unaligned>;
+
+namespace endian {
+
+template <typename T> inline T read(const void *P, endianness E) {
+  return read<T, unaligned>(P, E);
+}
+
+template <typename T, endianness E> inline T read(const void *P) {
+  return *(const detail::packed_endian_specific_integral<T, E, unaligned> *)P;
+}
+
+inline uint16_t read16(const void *P, endianness E) {
+  return read<uint16_t>(P, E);
+}
+inline uint32_t read32(const void *P, endianness E) {
+  return read<uint32_t>(P, E);
+}
+inline uint64_t read64(const void *P, endianness E) {
+  return read<uint64_t>(P, E);
+}
+
+template <endianness E> inline uint16_t read16(const void *P) {
+  return read<uint16_t, E>(P);
+}
+template <endianness E> inline uint32_t read32(const void *P) {
+  return read<uint32_t, E>(P);
+}
+template <endianness E> inline uint64_t read64(const void *P) {
+  return read<uint64_t, E>(P);
+}
+
+inline uint16_t read16le(const void *P) { return read16<little>(P); }
+inline uint32_t read32le(const void *P) { return read32<little>(P); }
+inline uint64_t read64le(const void *P) { return read64<little>(P); }
+inline uint16_t read16be(const void *P) { return read16<big>(P); }
+inline uint32_t read32be(const void *P) { return read32<big>(P); }
+inline uint64_t read64be(const void *P) { return read64<big>(P); }
+
+template <typename T> inline void write(void *P, T V, endianness E) {
+  write<T, unaligned>(P, V, E);
+}
+
+template <typename T, endianness E> inline void write(void *P, T V) {
+  *(detail::packed_endian_specific_integral<T, E, unaligned> *)P = V;
+}
+
+inline void write16(void *P, uint16_t V, endianness E) {
+  write<uint16_t>(P, V, E);
+}
+inline void write32(void *P, uint32_t V, endianness E) {
+  write<uint32_t>(P, V, E);
+}
+inline void write64(void *P, uint64_t V, endianness E) {
+  write<uint64_t>(P, V, E);
+}
+
+template <endianness E> inline void write16(void *P, uint16_t V) {
+  write<uint16_t, E>(P, V);
+}
+template <endianness E> inline void write32(void *P, uint32_t V) {
+  write<uint32_t, E>(P, V);
+}
+template <endianness E> inline void write64(void *P, uint64_t V) {
+  write<uint64_t, E>(P, V);
+}
+
+inline void write16le(void *P, uint16_t V) { write16<little>(P, V); }
+inline void write32le(void *P, uint32_t V) { write32<little>(P, V); }
+inline void write64le(void *P, uint64_t V) { write64<little>(P, V); }
+inline void write16be(void *P, uint16_t V) { write16<big>(P, V); }
+inline void write32be(void *P, uint32_t V) { write32<big>(P, V); }
+inline void write64be(void *P, uint64_t V) { write64<big>(P, V); }
+
+} // end namespace endian
+
+} // end namespace support
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_ENDIAN_H
diff --git a/linux-x64/clang/include/llvm/Support/EndianStream.h b/linux-x64/clang/include/llvm/Support/EndianStream.h
new file mode 100644
index 0000000..43ecd4a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/EndianStream.h
@@ -0,0 +1,69 @@
+//===- EndianStream.h - Stream ops with endian specific data ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities for operating on streams that have endian
+// specific data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ENDIANSTREAM_H
+#define LLVM_SUPPORT_ENDIANSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace support {
+
+namespace endian {
+/// Adapter to write values to a stream in a particular byte order.
+template <endianness endian> struct Writer {
+  raw_ostream &OS;
+  Writer(raw_ostream &OS) : OS(OS) {}
+  template <typename value_type> void write(ArrayRef<value_type> Vals) {
+    for (value_type V : Vals)
+      write(V);
+  }
+  template <typename value_type> void write(value_type Val) {
+    Val = byte_swap<value_type, endian>(Val);
+    OS.write((const char *)&Val, sizeof(value_type));
+  }
+};
+
+template <>
+template <>
+inline void Writer<little>::write<float>(float Val) {
+  write(FloatToBits(Val));
+}
+
+template <>
+template <>
+inline void Writer<little>::write<double>(double Val) {
+  write(DoubleToBits(Val));
+}
+
+template <>
+template <>
+inline void Writer<big>::write<float>(float Val) {
+  write(FloatToBits(Val));
+}
+
+template <>
+template <>
+inline void Writer<big>::write<double>(double Val) {
+  write(DoubleToBits(Val));
+}
+
+} // end namespace endian
+
+} // end namespace support
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Errc.h b/linux-x64/clang/include/llvm/Support/Errc.h
new file mode 100644
index 0000000..80bfe2a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Errc.h
@@ -0,0 +1,86 @@
+//===- llvm/Support/Errc.h - Defines the llvm::errc enum --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// While std::error_code works OK on all platforms we use, there are some
+// some problems with std::errc that can be avoided by using our own
+// enumeration:
+//
+// * std::errc is a namespace in some implementations. That meas that ADL
+//   doesn't work and it is sometimes necessary to write std::make_error_code
+//   or in templates:
+//   using std::make_error_code;
+//   make_error_code(...);
+//
+//   with this enum it is safe to always just use make_error_code.
+//
+// * Some implementations define fewer names than others. This header has
+//   the intersection of all the ones we support.
+//
+// * std::errc is just marked with is_error_condition_enum. This means that
+//   common patters like AnErrorCode == errc::no_such_file_or_directory take
+//   4 virtual calls instead of two comparisons.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERRC_H
+#define LLVM_SUPPORT_ERRC_H
+
+#include <system_error>
+
+namespace llvm {
+enum class errc {
+  argument_list_too_long = int(std::errc::argument_list_too_long),
+  argument_out_of_domain = int(std::errc::argument_out_of_domain),
+  bad_address = int(std::errc::bad_address),
+  bad_file_descriptor = int(std::errc::bad_file_descriptor),
+  broken_pipe = int(std::errc::broken_pipe),
+  device_or_resource_busy = int(std::errc::device_or_resource_busy),
+  directory_not_empty = int(std::errc::directory_not_empty),
+  executable_format_error = int(std::errc::executable_format_error),
+  file_exists = int(std::errc::file_exists),
+  file_too_large = int(std::errc::file_too_large),
+  filename_too_long = int(std::errc::filename_too_long),
+  function_not_supported = int(std::errc::function_not_supported),
+  illegal_byte_sequence = int(std::errc::illegal_byte_sequence),
+  inappropriate_io_control_operation =
+      int(std::errc::inappropriate_io_control_operation),
+  interrupted = int(std::errc::interrupted),
+  invalid_argument = int(std::errc::invalid_argument),
+  invalid_seek = int(std::errc::invalid_seek),
+  io_error = int(std::errc::io_error),
+  is_a_directory = int(std::errc::is_a_directory),
+  no_child_process = int(std::errc::no_child_process),
+  no_lock_available = int(std::errc::no_lock_available),
+  no_space_on_device = int(std::errc::no_space_on_device),
+  no_such_device_or_address = int(std::errc::no_such_device_or_address),
+  no_such_device = int(std::errc::no_such_device),
+  no_such_file_or_directory = int(std::errc::no_such_file_or_directory),
+  no_such_process = int(std::errc::no_such_process),
+  not_a_directory = int(std::errc::not_a_directory),
+  not_enough_memory = int(std::errc::not_enough_memory),
+  operation_not_permitted = int(std::errc::operation_not_permitted),
+  permission_denied = int(std::errc::permission_denied),
+  read_only_file_system = int(std::errc::read_only_file_system),
+  resource_deadlock_would_occur = int(std::errc::resource_deadlock_would_occur),
+  resource_unavailable_try_again =
+      int(std::errc::resource_unavailable_try_again),
+  result_out_of_range = int(std::errc::result_out_of_range),
+  too_many_files_open_in_system = int(std::errc::too_many_files_open_in_system),
+  too_many_files_open = int(std::errc::too_many_files_open),
+  too_many_links = int(std::errc::too_many_links)
+};
+
+inline std::error_code make_error_code(errc E) {
+  return std::error_code(static_cast<int>(E), std::generic_category());
+}
+}
+
+namespace std {
+template <> struct is_error_code_enum<llvm::errc> : std::true_type {};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Errno.h b/linux-x64/clang/include/llvm/Support/Errno.h
new file mode 100644
index 0000000..35dc1ea
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Errno.h
@@ -0,0 +1,46 @@
+//===- llvm/Support/Errno.h - Portable+convenient errno handling -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some portable and convenient functions to deal with errno.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERRNO_H
+#define LLVM_SUPPORT_ERRNO_H
+
+#include <cerrno>
+#include <string>
+#include <type_traits>
+
+namespace llvm {
+namespace sys {
+
+/// Returns a string representation of the errno value, using whatever
+/// thread-safe variant of strerror() is available.  Be sure to call this
+/// immediately after the function that set errno, or errno may have been
+/// overwritten by an intervening call.
+std::string StrError();
+
+/// Like the no-argument version above, but uses \p errnum instead of errno.
+std::string StrError(int errnum);
+
+template <typename FailT, typename Fun, typename... Args>
+inline auto RetryAfterSignal(const FailT &Fail, const Fun &F,
+                             const Args &... As) -> decltype(F(As...)) {
+  decltype(F(As...)) Res;
+  do
+    Res = F(As...);
+  while (Res == Fail && errno == EINTR);
+  return Res;
+}
+
+}  // namespace sys
+}  // namespace llvm
+
+#endif  // LLVM_SYSTEM_ERRNO_H
diff --git a/linux-x64/clang/include/llvm/Support/Error.h b/linux-x64/clang/include/llvm/Support/Error.h
new file mode 100644
index 0000000..2527f89
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Error.h
@@ -0,0 +1,1167 @@
+//===- llvm/Support/Error.h - Recoverable error handling --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an API used to report recoverable errors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERROR_H
+#define LLVM_SUPPORT_ERROR_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <memory>
+#include <new>
+#include <string>
+#include <system_error>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class ErrorSuccess;
+
+/// Base class for error info classes. Do not extend this directly: Extend
+/// the ErrorInfo template subclass instead.
+class ErrorInfoBase {
+public:
+  virtual ~ErrorInfoBase() = default;
+
+  /// Print an error message to an output stream.
+  virtual void log(raw_ostream &OS) const = 0;
+
+  /// Return the error message as a string.
+  virtual std::string message() const {
+    std::string Msg;
+    raw_string_ostream OS(Msg);
+    log(OS);
+    return OS.str();
+  }
+
+  /// Convert this error to a std::error_code.
+  ///
+  /// This is a temporary crutch to enable interaction with code still
+  /// using std::error_code. It will be removed in the future.
+  virtual std::error_code convertToErrorCode() const = 0;
+
+  // Returns the class ID for this type.
+  static const void *classID() { return &ID; }
+
+  // Returns the class ID for the dynamic type of this ErrorInfoBase instance.
+  virtual const void *dynamicClassID() const = 0;
+
+  // Check whether this instance is a subclass of the class identified by
+  // ClassID.
+  virtual bool isA(const void *const ClassID) const {
+    return ClassID == classID();
+  }
+
+  // Check whether this instance is a subclass of ErrorInfoT.
+  template <typename ErrorInfoT> bool isA() const {
+    return isA(ErrorInfoT::classID());
+  }
+
+private:
+  virtual void anchor();
+
+  static char ID;
+};
+
+/// Lightweight error class with error context and mandatory checking.
+///
+/// Instances of this class wrap a ErrorInfoBase pointer. Failure states
+/// are represented by setting the pointer to a ErrorInfoBase subclass
+/// instance containing information describing the failure. Success is
+/// represented by a null pointer value.
+///
+/// Instances of Error also contains a 'Checked' flag, which must be set
+/// before the destructor is called, otherwise the destructor will trigger a
+/// runtime error. This enforces at runtime the requirement that all Error
+/// instances be checked or returned to the caller.
+///
+/// There are two ways to set the checked flag, depending on what state the
+/// Error instance is in. For Error instances indicating success, it
+/// is sufficient to invoke the boolean conversion operator. E.g.:
+///
+///   @code{.cpp}
+///   Error foo(<...>);
+///
+///   if (auto E = foo(<...>))
+///     return E; // <- Return E if it is in the error state.
+///   // We have verified that E was in the success state. It can now be safely
+///   // destroyed.
+///   @endcode
+///
+/// A success value *can not* be dropped. For example, just calling 'foo(<...>)'
+/// without testing the return value will raise a runtime error, even if foo
+/// returns success.
+///
+/// For Error instances representing failure, you must use either the
+/// handleErrors or handleAllErrors function with a typed handler. E.g.:
+///
+///   @code{.cpp}
+///   class MyErrorInfo : public ErrorInfo<MyErrorInfo> {
+///     // Custom error info.
+///   };
+///
+///   Error foo(<...>) { return make_error<MyErrorInfo>(...); }
+///
+///   auto E = foo(<...>); // <- foo returns failure with MyErrorInfo.
+///   auto NewE =
+///     handleErrors(E,
+///       [](const MyErrorInfo &M) {
+///         // Deal with the error.
+///       },
+///       [](std::unique_ptr<OtherError> M) -> Error {
+///         if (canHandle(*M)) {
+///           // handle error.
+///           return Error::success();
+///         }
+///         // Couldn't handle this error instance. Pass it up the stack.
+///         return Error(std::move(M));
+///       );
+///   // Note - we must check or return NewE in case any of the handlers
+///   // returned a new error.
+///   @endcode
+///
+/// The handleAllErrors function is identical to handleErrors, except
+/// that it has a void return type, and requires all errors to be handled and
+/// no new errors be returned. It prevents errors (assuming they can all be
+/// handled) from having to be bubbled all the way to the top-level.
+///
+/// *All* Error instances must be checked before destruction, even if
+/// they're moved-assigned or constructed from Success values that have already
+/// been checked. This enforces checking through all levels of the call stack.
+class LLVM_NODISCARD Error {
+  // ErrorList needs to be able to yank ErrorInfoBase pointers out of this
+  // class to add to the error list.
+  friend class ErrorList;
+
+  // handleErrors needs to be able to set the Checked flag.
+  template <typename... HandlerTs>
+  friend Error handleErrors(Error E, HandlerTs &&... Handlers);
+
+  // Expected<T> needs to be able to steal the payload when constructed from an
+  // error.
+  template <typename T> friend class Expected;
+
+protected:
+  /// Create a success value. Prefer using 'Error::success()' for readability
+  Error() {
+    setPtr(nullptr);
+    setChecked(false);
+  }
+
+public:
+  /// Create a success value.
+  static ErrorSuccess success();
+
+  // Errors are not copy-constructable.
+  Error(const Error &Other) = delete;
+
+  /// Move-construct an error value. The newly constructed error is considered
+  /// unchecked, even if the source error had been checked. The original error
+  /// becomes a checked Success value, regardless of its original state.
+  Error(Error &&Other) {
+    setChecked(true);
+    *this = std::move(Other);
+  }
+
+  /// Create an error value. Prefer using the 'make_error' function, but
+  /// this constructor can be useful when "re-throwing" errors from handlers.
+  Error(std::unique_ptr<ErrorInfoBase> Payload) {
+    setPtr(Payload.release());
+    setChecked(false);
+  }
+
+  // Errors are not copy-assignable.
+  Error &operator=(const Error &Other) = delete;
+
+  /// Move-assign an error value. The current error must represent success, you
+  /// you cannot overwrite an unhandled error. The current error is then
+  /// considered unchecked. The source error becomes a checked success value,
+  /// regardless of its original state.
+  Error &operator=(Error &&Other) {
+    // Don't allow overwriting of unchecked values.
+    assertIsChecked();
+    setPtr(Other.getPtr());
+
+    // This Error is unchecked, even if the source error was checked.
+    setChecked(false);
+
+    // Null out Other's payload and set its checked bit.
+    Other.setPtr(nullptr);
+    Other.setChecked(true);
+
+    return *this;
+  }
+
+  /// Destroy a Error. Fails with a call to abort() if the error is
+  /// unchecked.
+  ~Error() {
+    assertIsChecked();
+    delete getPtr();
+  }
+
+  /// Bool conversion. Returns true if this Error is in a failure state,
+  /// and false if it is in an accept state. If the error is in a Success state
+  /// it will be considered checked.
+  explicit operator bool() {
+    setChecked(getPtr() == nullptr);
+    return getPtr() != nullptr;
+  }
+
+  /// Check whether one error is a subclass of another.
+  template <typename ErrT> bool isA() const {
+    return getPtr() && getPtr()->isA(ErrT::classID());
+  }
+
+  /// Returns the dynamic class id of this error, or null if this is a success
+  /// value.
+  const void* dynamicClassID() const {
+    if (!getPtr())
+      return nullptr;
+    return getPtr()->dynamicClassID();
+  }
+
+private:
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+  // assertIsChecked() happens very frequently, but under normal circumstances
+  // is supposed to be a no-op.  So we want it to be inlined, but having a bunch
+  // of debug prints can cause the function to be too large for inlining.  So
+  // it's important that we define this function out of line so that it can't be
+  // inlined.
+  LLVM_ATTRIBUTE_NORETURN
+  void fatalUncheckedError() const;
+#endif
+
+  void assertIsChecked() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    if (LLVM_UNLIKELY(!getChecked() || getPtr()))
+      fatalUncheckedError();
+#endif
+  }
+
+  ErrorInfoBase *getPtr() const {
+    return reinterpret_cast<ErrorInfoBase*>(
+             reinterpret_cast<uintptr_t>(Payload) &
+             ~static_cast<uintptr_t>(0x1));
+  }
+
+  void setPtr(ErrorInfoBase *EI) {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    Payload = reinterpret_cast<ErrorInfoBase*>(
+                (reinterpret_cast<uintptr_t>(EI) &
+                 ~static_cast<uintptr_t>(0x1)) |
+                (reinterpret_cast<uintptr_t>(Payload) & 0x1));
+#else
+    Payload = EI;
+#endif
+  }
+
+  bool getChecked() const {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    return (reinterpret_cast<uintptr_t>(Payload) & 0x1) == 0;
+#else
+    return true;
+#endif
+  }
+
+  void setChecked(bool V) {
+    Payload = reinterpret_cast<ErrorInfoBase*>(
+                (reinterpret_cast<uintptr_t>(Payload) &
+                  ~static_cast<uintptr_t>(0x1)) |
+                  (V ? 0 : 1));
+  }
+
+  std::unique_ptr<ErrorInfoBase> takePayload() {
+    std::unique_ptr<ErrorInfoBase> Tmp(getPtr());
+    setPtr(nullptr);
+    setChecked(true);
+    return Tmp;
+  }
+
+  ErrorInfoBase *Payload = nullptr;
+};
+
+/// Subclass of Error for the sole purpose of identifying the success path in
+/// the type system. This allows to catch invalid conversion to Expected<T> at
+/// compile time.
+class ErrorSuccess : public Error {};
+
+inline ErrorSuccess Error::success() { return ErrorSuccess(); }
+
+/// Make a Error instance representing failure using the given error info
+/// type.
+template <typename ErrT, typename... ArgTs> Error make_error(ArgTs &&... Args) {
+  return Error(llvm::make_unique<ErrT>(std::forward<ArgTs>(Args)...));
+}
+
+/// Base class for user error types. Users should declare their error types
+/// like:
+///
+/// class MyError : public ErrorInfo<MyError> {
+///   ....
+/// };
+///
+/// This class provides an implementation of the ErrorInfoBase::kind
+/// method, which is used by the Error RTTI system.
+template <typename ThisErrT, typename ParentErrT = ErrorInfoBase>
+class ErrorInfo : public ParentErrT {
+public:
+  static const void *classID() { return &ThisErrT::ID; }
+
+  const void *dynamicClassID() const override { return &ThisErrT::ID; }
+
+  bool isA(const void *const ClassID) const override {
+    return ClassID == classID() || ParentErrT::isA(ClassID);
+  }
+};
+
+/// Special ErrorInfo subclass representing a list of ErrorInfos.
+/// Instances of this class are constructed by joinError.
+class ErrorList final : public ErrorInfo<ErrorList> {
+  // handleErrors needs to be able to iterate the payload list of an
+  // ErrorList.
+  template <typename... HandlerTs>
+  friend Error handleErrors(Error E, HandlerTs &&... Handlers);
+
+  // joinErrors is implemented in terms of join.
+  friend Error joinErrors(Error, Error);
+
+public:
+  void log(raw_ostream &OS) const override {
+    OS << "Multiple errors:\n";
+    for (auto &ErrPayload : Payloads) {
+      ErrPayload->log(OS);
+      OS << "\n";
+    }
+  }
+
+  std::error_code convertToErrorCode() const override;
+
+  // Used by ErrorInfo::classID.
+  static char ID;
+
+private:
+  ErrorList(std::unique_ptr<ErrorInfoBase> Payload1,
+            std::unique_ptr<ErrorInfoBase> Payload2) {
+    assert(!Payload1->isA<ErrorList>() && !Payload2->isA<ErrorList>() &&
+           "ErrorList constructor payloads should be singleton errors");
+    Payloads.push_back(std::move(Payload1));
+    Payloads.push_back(std::move(Payload2));
+  }
+
+  static Error join(Error E1, Error E2) {
+    if (!E1)
+      return E2;
+    if (!E2)
+      return E1;
+    if (E1.isA<ErrorList>()) {
+      auto &E1List = static_cast<ErrorList &>(*E1.getPtr());
+      if (E2.isA<ErrorList>()) {
+        auto E2Payload = E2.takePayload();
+        auto &E2List = static_cast<ErrorList &>(*E2Payload);
+        for (auto &Payload : E2List.Payloads)
+          E1List.Payloads.push_back(std::move(Payload));
+      } else
+        E1List.Payloads.push_back(E2.takePayload());
+
+      return E1;
+    }
+    if (E2.isA<ErrorList>()) {
+      auto &E2List = static_cast<ErrorList &>(*E2.getPtr());
+      E2List.Payloads.insert(E2List.Payloads.begin(), E1.takePayload());
+      return E2;
+    }
+    return Error(std::unique_ptr<ErrorList>(
+        new ErrorList(E1.takePayload(), E2.takePayload())));
+  }
+
+  std::vector<std::unique_ptr<ErrorInfoBase>> Payloads;
+};
+
+/// Concatenate errors. The resulting Error is unchecked, and contains the
+/// ErrorInfo(s), if any, contained in E1, followed by the
+/// ErrorInfo(s), if any, contained in E2.
+inline Error joinErrors(Error E1, Error E2) {
+  return ErrorList::join(std::move(E1), std::move(E2));
+}
+
+/// Tagged union holding either a T or a Error.
+///
+/// This class parallels ErrorOr, but replaces error_code with Error. Since
+/// Error cannot be copied, this class replaces getError() with
+/// takeError(). It also adds an bool errorIsA<ErrT>() method for testing the
+/// error class type.
+template <class T> class LLVM_NODISCARD Expected {
+  template <class T1> friend class ExpectedAsOutParameter;
+  template <class OtherT> friend class Expected;
+
+  static const bool isRef = std::is_reference<T>::value;
+
+  using wrap = ReferenceStorage<typename std::remove_reference<T>::type>;
+
+  using error_type = std::unique_ptr<ErrorInfoBase>;
+
+public:
+  using storage_type = typename std::conditional<isRef, wrap, T>::type;
+  using value_type = T;
+
+private:
+  using reference = typename std::remove_reference<T>::type &;
+  using const_reference = const typename std::remove_reference<T>::type &;
+  using pointer = typename std::remove_reference<T>::type *;
+  using const_pointer = const typename std::remove_reference<T>::type *;
+
+public:
+  /// Create an Expected<T> error value from the given Error.
+  Expected(Error Err)
+      : HasError(true)
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+        // Expected is unchecked upon construction in Debug builds.
+        , Unchecked(true)
+#endif
+  {
+    assert(Err && "Cannot create Expected<T> from Error success value.");
+    new (getErrorStorage()) error_type(Err.takePayload());
+  }
+
+  /// Forbid to convert from Error::success() implicitly, this avoids having
+  /// Expected<T> foo() { return Error::success(); } which compiles otherwise
+  /// but triggers the assertion above.
+  Expected(ErrorSuccess) = delete;
+
+  /// Create an Expected<T> success value from the given OtherT value, which
+  /// must be convertible to T.
+  template <typename OtherT>
+  Expected(OtherT &&Val,
+           typename std::enable_if<std::is_convertible<OtherT, T>::value>::type
+               * = nullptr)
+      : HasError(false)
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+        // Expected is unchecked upon construction in Debug builds.
+        , Unchecked(true)
+#endif
+  {
+    new (getStorage()) storage_type(std::forward<OtherT>(Val));
+  }
+
+  /// Move construct an Expected<T> value.
+  Expected(Expected &&Other) { moveConstruct(std::move(Other)); }
+
+  /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
+  /// must be convertible to T.
+  template <class OtherT>
+  Expected(Expected<OtherT> &&Other,
+           typename std::enable_if<std::is_convertible<OtherT, T>::value>::type
+               * = nullptr) {
+    moveConstruct(std::move(Other));
+  }
+
+  /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
+  /// isn't convertible to T.
+  template <class OtherT>
+  explicit Expected(
+      Expected<OtherT> &&Other,
+      typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+          nullptr) {
+    moveConstruct(std::move(Other));
+  }
+
+  /// Move-assign from another Expected<T>.
+  Expected &operator=(Expected &&Other) {
+    moveAssign(std::move(Other));
+    return *this;
+  }
+
+  /// Destroy an Expected<T>.
+  ~Expected() {
+    assertIsChecked();
+    if (!HasError)
+      getStorage()->~storage_type();
+    else
+      getErrorStorage()->~error_type();
+  }
+
+  /// \brief Return false if there is an error.
+  explicit operator bool() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    Unchecked = HasError;
+#endif
+    return !HasError;
+  }
+
+  /// \brief Returns a reference to the stored T value.
+  reference get() {
+    assertIsChecked();
+    return *getStorage();
+  }
+
+  /// \brief Returns a const reference to the stored T value.
+  const_reference get() const {
+    assertIsChecked();
+    return const_cast<Expected<T> *>(this)->get();
+  }
+
+  /// \brief Check that this Expected<T> is an error of type ErrT.
+  template <typename ErrT> bool errorIsA() const {
+    return HasError && (*getErrorStorage())->template isA<ErrT>();
+  }
+
+  /// \brief Take ownership of the stored error.
+  /// After calling this the Expected<T> is in an indeterminate state that can
+  /// only be safely destructed. No further calls (beside the destructor) should
+  /// be made on the Expected<T> vaule.
+  Error takeError() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    Unchecked = false;
+#endif
+    return HasError ? Error(std::move(*getErrorStorage())) : Error::success();
+  }
+
+  /// \brief Returns a pointer to the stored T value.
+  pointer operator->() {
+    assertIsChecked();
+    return toPointer(getStorage());
+  }
+
+  /// \brief Returns a const pointer to the stored T value.
+  const_pointer operator->() const {
+    assertIsChecked();
+    return toPointer(getStorage());
+  }
+
+  /// \brief Returns a reference to the stored T value.
+  reference operator*() {
+    assertIsChecked();
+    return *getStorage();
+  }
+
+  /// \brief Returns a const reference to the stored T value.
+  const_reference operator*() const {
+    assertIsChecked();
+    return *getStorage();
+  }
+
+private:
+  template <class T1>
+  static bool compareThisIfSameType(const T1 &a, const T1 &b) {
+    return &a == &b;
+  }
+
+  template <class T1, class T2>
+  static bool compareThisIfSameType(const T1 &a, const T2 &b) {
+    return false;
+  }
+
+  template <class OtherT> void moveConstruct(Expected<OtherT> &&Other) {
+    HasError = Other.HasError;
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    Unchecked = true;
+    Other.Unchecked = false;
+#endif
+
+    if (!HasError)
+      new (getStorage()) storage_type(std::move(*Other.getStorage()));
+    else
+      new (getErrorStorage()) error_type(std::move(*Other.getErrorStorage()));
+  }
+
+  template <class OtherT> void moveAssign(Expected<OtherT> &&Other) {
+    assertIsChecked();
+
+    if (compareThisIfSameType(*this, Other))
+      return;
+
+    this->~Expected();
+    new (this) Expected(std::move(Other));
+  }
+
+  pointer toPointer(pointer Val) { return Val; }
+
+  const_pointer toPointer(const_pointer Val) const { return Val; }
+
+  pointer toPointer(wrap *Val) { return &Val->get(); }
+
+  const_pointer toPointer(const wrap *Val) const { return &Val->get(); }
+
+  storage_type *getStorage() {
+    assert(!HasError && "Cannot get value when an error exists!");
+    return reinterpret_cast<storage_type *>(TStorage.buffer);
+  }
+
+  const storage_type *getStorage() const {
+    assert(!HasError && "Cannot get value when an error exists!");
+    return reinterpret_cast<const storage_type *>(TStorage.buffer);
+  }
+
+  error_type *getErrorStorage() {
+    assert(HasError && "Cannot get error when a value exists!");
+    return reinterpret_cast<error_type *>(ErrorStorage.buffer);
+  }
+
+  const error_type *getErrorStorage() const {
+    assert(HasError && "Cannot get error when a value exists!");
+    return reinterpret_cast<const error_type *>(ErrorStorage.buffer);
+  }
+
+  // Used by ExpectedAsOutParameter to reset the checked flag.
+  void setUnchecked() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    Unchecked = true;
+#endif
+  }
+
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+  LLVM_ATTRIBUTE_NORETURN
+  LLVM_ATTRIBUTE_NOINLINE
+  void fatalUncheckedExpected() const {
+    dbgs() << "Expected<T> must be checked before access or destruction.\n";
+    if (HasError) {
+      dbgs() << "Unchecked Expected<T> contained error:\n";
+      (*getErrorStorage())->log(dbgs());
+    } else
+      dbgs() << "Expected<T> value was in success state. (Note: Expected<T> "
+                "values in success mode must still be checked prior to being "
+                "destroyed).\n";
+    abort();
+  }
+#endif
+
+  void assertIsChecked() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+    if (LLVM_UNLIKELY(Unchecked))
+      fatalUncheckedExpected();
+#endif
+  }
+
+  union {
+    AlignedCharArrayUnion<storage_type> TStorage;
+    AlignedCharArrayUnion<error_type> ErrorStorage;
+  };
+  bool HasError : 1;
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+  bool Unchecked : 1;
+#endif
+};
+
+/// Report a serious error, calling any installed error handler. See
+/// ErrorHandling.h.
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err,
+                                                bool gen_crash_diag = true);
+
+/// Report a fatal error if Err is a failure value.
+///
+/// This function can be used to wrap calls to fallible functions ONLY when it
+/// is known that the Error will always be a success value. E.g.
+///
+///   @code{.cpp}
+///   // foo only attempts the fallible operation if DoFallibleOperation is
+///   // true. If DoFallibleOperation is false then foo always returns
+///   // Error::success().
+///   Error foo(bool DoFallibleOperation);
+///
+///   cantFail(foo(false));
+///   @endcode
+inline void cantFail(Error Err, const char *Msg = nullptr) {
+  if (Err) {
+    if (!Msg)
+      Msg = "Failure value returned from cantFail wrapped call";
+    llvm_unreachable(Msg);
+  }
+}
+
+/// Report a fatal error if ValOrErr is a failure value, otherwise unwraps and
+/// returns the contained value.
+///
+/// This function can be used to wrap calls to fallible functions ONLY when it
+/// is known that the Error will always be a success value. E.g.
+///
+///   @code{.cpp}
+///   // foo only attempts the fallible operation if DoFallibleOperation is
+///   // true. If DoFallibleOperation is false then foo always returns an int.
+///   Expected<int> foo(bool DoFallibleOperation);
+///
+///   int X = cantFail(foo(false));
+///   @endcode
+template <typename T>
+T cantFail(Expected<T> ValOrErr, const char *Msg = nullptr) {
+  if (ValOrErr)
+    return std::move(*ValOrErr);
+  else {
+    if (!Msg)
+      Msg = "Failure value returned from cantFail wrapped call";
+    llvm_unreachable(Msg);
+  }
+}
+
+/// Report a fatal error if ValOrErr is a failure value, otherwise unwraps and
+/// returns the contained reference.
+///
+/// This function can be used to wrap calls to fallible functions ONLY when it
+/// is known that the Error will always be a success value. E.g.
+///
+///   @code{.cpp}
+///   // foo only attempts the fallible operation if DoFallibleOperation is
+///   // true. If DoFallibleOperation is false then foo always returns a Bar&.
+///   Expected<Bar&> foo(bool DoFallibleOperation);
+///
+///   Bar &X = cantFail(foo(false));
+///   @endcode
+template <typename T>
+T& cantFail(Expected<T&> ValOrErr, const char *Msg = nullptr) {
+  if (ValOrErr)
+    return *ValOrErr;
+  else {
+    if (!Msg)
+      Msg = "Failure value returned from cantFail wrapped call";
+    llvm_unreachable(Msg);
+  }
+}
+
+/// Helper for testing applicability of, and applying, handlers for
+/// ErrorInfo types.
+template <typename HandlerT>
+class ErrorHandlerTraits
+    : public ErrorHandlerTraits<decltype(
+          &std::remove_reference<HandlerT>::type::operator())> {};
+
+// Specialization functions of the form 'Error (const ErrT&)'.
+template <typename ErrT> class ErrorHandlerTraits<Error (&)(ErrT &)> {
+public:
+  static bool appliesTo(const ErrorInfoBase &E) {
+    return E.template isA<ErrT>();
+  }
+
+  template <typename HandlerT>
+  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
+    assert(appliesTo(*E) && "Applying incorrect handler");
+    return H(static_cast<ErrT &>(*E));
+  }
+};
+
+// Specialization functions of the form 'void (const ErrT&)'.
+template <typename ErrT> class ErrorHandlerTraits<void (&)(ErrT &)> {
+public:
+  static bool appliesTo(const ErrorInfoBase &E) {
+    return E.template isA<ErrT>();
+  }
+
+  template <typename HandlerT>
+  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
+    assert(appliesTo(*E) && "Applying incorrect handler");
+    H(static_cast<ErrT &>(*E));
+    return Error::success();
+  }
+};
+
+/// Specialization for functions of the form 'Error (std::unique_ptr<ErrT>)'.
+template <typename ErrT>
+class ErrorHandlerTraits<Error (&)(std::unique_ptr<ErrT>)> {
+public:
+  static bool appliesTo(const ErrorInfoBase &E) {
+    return E.template isA<ErrT>();
+  }
+
+  template <typename HandlerT>
+  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
+    assert(appliesTo(*E) && "Applying incorrect handler");
+    std::unique_ptr<ErrT> SubE(static_cast<ErrT *>(E.release()));
+    return H(std::move(SubE));
+  }
+};
+
+/// Specialization for functions of the form 'void (std::unique_ptr<ErrT>)'.
+template <typename ErrT>
+class ErrorHandlerTraits<void (&)(std::unique_ptr<ErrT>)> {
+public:
+  static bool appliesTo(const ErrorInfoBase &E) {
+    return E.template isA<ErrT>();
+  }
+
+  template <typename HandlerT>
+  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
+    assert(appliesTo(*E) && "Applying incorrect handler");
+    std::unique_ptr<ErrT> SubE(static_cast<ErrT *>(E.release()));
+    H(std::move(SubE));
+    return Error::success();
+  }
+};
+
+// Specialization for member functions of the form 'RetT (const ErrT&)'.
+template <typename C, typename RetT, typename ErrT>
+class ErrorHandlerTraits<RetT (C::*)(ErrT &)>
+    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};
+
+// Specialization for member functions of the form 'RetT (const ErrT&) const'.
+template <typename C, typename RetT, typename ErrT>
+class ErrorHandlerTraits<RetT (C::*)(ErrT &) const>
+    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};
+
+// Specialization for member functions of the form 'RetT (const ErrT&)'.
+template <typename C, typename RetT, typename ErrT>
+class ErrorHandlerTraits<RetT (C::*)(const ErrT &)>
+    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};
+
+// Specialization for member functions of the form 'RetT (const ErrT&) const'.
+template <typename C, typename RetT, typename ErrT>
+class ErrorHandlerTraits<RetT (C::*)(const ErrT &) const>
+    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};
+
+/// Specialization for member functions of the form
+/// 'RetT (std::unique_ptr<ErrT>)'.
+template <typename C, typename RetT, typename ErrT>
+class ErrorHandlerTraits<RetT (C::*)(std::unique_ptr<ErrT>)>
+    : public ErrorHandlerTraits<RetT (&)(std::unique_ptr<ErrT>)> {};
+
+/// Specialization for member functions of the form
+/// 'RetT (std::unique_ptr<ErrT>) const'.
+template <typename C, typename RetT, typename ErrT>
+class ErrorHandlerTraits<RetT (C::*)(std::unique_ptr<ErrT>) const>
+    : public ErrorHandlerTraits<RetT (&)(std::unique_ptr<ErrT>)> {};
+
+inline Error handleErrorImpl(std::unique_ptr<ErrorInfoBase> Payload) {
+  return Error(std::move(Payload));
+}
+
+template <typename HandlerT, typename... HandlerTs>
+Error handleErrorImpl(std::unique_ptr<ErrorInfoBase> Payload,
+                      HandlerT &&Handler, HandlerTs &&... Handlers) {
+  if (ErrorHandlerTraits<HandlerT>::appliesTo(*Payload))
+    return ErrorHandlerTraits<HandlerT>::apply(std::forward<HandlerT>(Handler),
+                                               std::move(Payload));
+  return handleErrorImpl(std::move(Payload),
+                         std::forward<HandlerTs>(Handlers)...);
+}
+
+/// Pass the ErrorInfo(s) contained in E to their respective handlers. Any
+/// unhandled errors (or Errors returned by handlers) are re-concatenated and
+/// returned.
+/// Because this function returns an error, its result must also be checked
+/// or returned. If you intend to handle all errors use handleAllErrors
+/// (which returns void, and will abort() on unhandled errors) instead.
+template <typename... HandlerTs>
+Error handleErrors(Error E, HandlerTs &&... Hs) {
+  if (!E)
+    return Error::success();
+
+  std::unique_ptr<ErrorInfoBase> Payload = E.takePayload();
+
+  if (Payload->isA<ErrorList>()) {
+    ErrorList &List = static_cast<ErrorList &>(*Payload);
+    Error R;
+    for (auto &P : List.Payloads)
+      R = ErrorList::join(
+          std::move(R),
+          handleErrorImpl(std::move(P), std::forward<HandlerTs>(Hs)...));
+    return R;
+  }
+
+  return handleErrorImpl(std::move(Payload), std::forward<HandlerTs>(Hs)...);
+}
+
+/// Behaves the same as handleErrors, except that by contract all errors
+/// *must* be handled by the given handlers (i.e. there must be no remaining
+/// errors after running the handlers, or llvm_unreachable is called).
+template <typename... HandlerTs>
+void handleAllErrors(Error E, HandlerTs &&... Handlers) {
+  cantFail(handleErrors(std::move(E), std::forward<HandlerTs>(Handlers)...));
+}
+
+/// Check that E is a non-error, then drop it.
+/// If E is an error, llvm_unreachable will be called.
+inline void handleAllErrors(Error E) {
+  cantFail(std::move(E));
+}
+
+/// Handle any errors (if present) in an Expected<T>, then try a recovery path.
+///
+/// If the incoming value is a success value it is returned unmodified. If it
+/// is a failure value then it the contained error is passed to handleErrors.
+/// If handleErrors is able to handle the error then the RecoveryPath functor
+/// is called to supply the final result. If handleErrors is not able to
+/// handle all errors then the unhandled errors are returned.
+///
+/// This utility enables the follow pattern:
+///
+///   @code{.cpp}
+///   enum FooStrategy { Aggressive, Conservative };
+///   Expected<Foo> foo(FooStrategy S);
+///
+///   auto ResultOrErr =
+///     handleExpected(
+///       foo(Aggressive),
+///       []() { return foo(Conservative); },
+///       [](AggressiveStrategyError&) {
+///         // Implicitly conusme this - we'll recover by using a conservative
+///         // strategy.
+///       });
+///
+///   @endcode
+template <typename T, typename RecoveryFtor, typename... HandlerTs>
+Expected<T> handleExpected(Expected<T> ValOrErr, RecoveryFtor &&RecoveryPath,
+                           HandlerTs &&... Handlers) {
+  if (ValOrErr)
+    return ValOrErr;
+
+  if (auto Err = handleErrors(ValOrErr.takeError(),
+                              std::forward<HandlerTs>(Handlers)...))
+    return std::move(Err);
+
+  return RecoveryPath();
+}
+
+/// Log all errors (if any) in E to OS. If there are any errors, ErrorBanner
+/// will be printed before the first one is logged. A newline will be printed
+/// after each error.
+///
+/// This is useful in the base level of your program to allow clean termination
+/// (allowing clean deallocation of resources, etc.), while reporting error
+/// information to the user.
+void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner);
+
+/// Write all error messages (if any) in E to a string. The newline character
+/// is used to separate error messages.
+inline std::string toString(Error E) {
+  SmallVector<std::string, 2> Errors;
+  handleAllErrors(std::move(E), [&Errors](const ErrorInfoBase &EI) {
+    Errors.push_back(EI.message());
+  });
+  return join(Errors.begin(), Errors.end(), "\n");
+}
+
+/// Consume a Error without doing anything. This method should be used
+/// only where an error can be considered a reasonable and expected return
+/// value.
+///
+/// Uses of this method are potentially indicative of design problems: If it's
+/// legitimate to do nothing while processing an "error", the error-producer
+/// might be more clearly refactored to return an Optional<T>.
+inline void consumeError(Error Err) {
+  handleAllErrors(std::move(Err), [](const ErrorInfoBase &) {});
+}
+
+/// Helper for converting an Error to a bool.
+///
+/// This method returns true if Err is in an error state, or false if it is
+/// in a success state.  Puts Err in a checked state in both cases (unlike
+/// Error::operator bool(), which only does this for success states).
+inline bool errorToBool(Error Err) {
+  bool IsError = static_cast<bool>(Err);
+  if (IsError)
+    consumeError(std::move(Err));
+  return IsError;
+}
+
+/// Helper for Errors used as out-parameters.
+///
+/// This helper is for use with the Error-as-out-parameter idiom, where an error
+/// is passed to a function or method by reference, rather than being returned.
+/// In such cases it is helpful to set the checked bit on entry to the function
+/// so that the error can be written to (unchecked Errors abort on assignment)
+/// and clear the checked bit on exit so that clients cannot accidentally forget
+/// to check the result. This helper performs these actions automatically using
+/// RAII:
+///
+///   @code{.cpp}
+///   Result foo(Error &Err) {
+///     ErrorAsOutParameter ErrAsOutParam(&Err); // 'Checked' flag set
+///     // <body of foo>
+///     // <- 'Checked' flag auto-cleared when ErrAsOutParam is destructed.
+///   }
+///   @endcode
+///
+/// ErrorAsOutParameter takes an Error* rather than Error& so that it can be
+/// used with optional Errors (Error pointers that are allowed to be null). If
+/// ErrorAsOutParameter took an Error reference, an instance would have to be
+/// created inside every condition that verified that Error was non-null. By
+/// taking an Error pointer we can just create one instance at the top of the
+/// function.
+class ErrorAsOutParameter {
+public:
+  ErrorAsOutParameter(Error *Err) : Err(Err) {
+    // Raise the checked bit if Err is success.
+    if (Err)
+      (void)!!*Err;
+  }
+
+  ~ErrorAsOutParameter() {
+    // Clear the checked bit.
+    if (Err && !*Err)
+      *Err = Error::success();
+  }
+
+private:
+  Error *Err;
+};
+
+/// Helper for Expected<T>s used as out-parameters.
+///
+/// See ErrorAsOutParameter.
+template <typename T>
+class ExpectedAsOutParameter {
+public:
+  ExpectedAsOutParameter(Expected<T> *ValOrErr)
+    : ValOrErr(ValOrErr) {
+    if (ValOrErr)
+      (void)!!*ValOrErr;
+  }
+
+  ~ExpectedAsOutParameter() {
+    if (ValOrErr)
+      ValOrErr->setUnchecked();
+  }
+
+private:
+  Expected<T> *ValOrErr;
+};
+
+/// This class wraps a std::error_code in a Error.
+///
+/// This is useful if you're writing an interface that returns a Error
+/// (or Expected) and you want to call code that still returns
+/// std::error_codes.
+class ECError : public ErrorInfo<ECError> {
+  friend Error errorCodeToError(std::error_code);
+
+public:
+  void setErrorCode(std::error_code EC) { this->EC = EC; }
+  std::error_code convertToErrorCode() const override { return EC; }
+  void log(raw_ostream &OS) const override { OS << EC.message(); }
+
+  // Used by ErrorInfo::classID.
+  static char ID;
+
+protected:
+  ECError() = default;
+  ECError(std::error_code EC) : EC(EC) {}
+
+  std::error_code EC;
+};
+
+/// The value returned by this function can be returned from convertToErrorCode
+/// for Error values where no sensible translation to std::error_code exists.
+/// It should only be used in this situation, and should never be used where a
+/// sensible conversion to std::error_code is available, as attempts to convert
+/// to/from this error will result in a fatal error. (i.e. it is a programmatic
+///error to try to convert such a value).
+std::error_code inconvertibleErrorCode();
+
+/// Helper for converting an std::error_code to a Error.
+Error errorCodeToError(std::error_code EC);
+
+/// Helper for converting an ECError to a std::error_code.
+///
+/// This method requires that Err be Error() or an ECError, otherwise it
+/// will trigger a call to abort().
+std::error_code errorToErrorCode(Error Err);
+
+/// Convert an ErrorOr<T> to an Expected<T>.
+template <typename T> Expected<T> errorOrToExpected(ErrorOr<T> &&EO) {
+  if (auto EC = EO.getError())
+    return errorCodeToError(EC);
+  return std::move(*EO);
+}
+
+/// Convert an Expected<T> to an ErrorOr<T>.
+template <typename T> ErrorOr<T> expectedToErrorOr(Expected<T> &&E) {
+  if (auto Err = E.takeError())
+    return errorToErrorCode(std::move(Err));
+  return std::move(*E);
+}
+
+/// This class wraps a string in an Error.
+///
+/// StringError is useful in cases where the client is not expected to be able
+/// to consume the specific error message programmatically (for example, if the
+/// error message is to be presented to the user).
+class StringError : public ErrorInfo<StringError> {
+public:
+  static char ID;
+
+  StringError(const Twine &S, std::error_code EC);
+
+  void log(raw_ostream &OS) const override;
+  std::error_code convertToErrorCode() const override;
+
+  const std::string &getMessage() const { return Msg; }
+
+private:
+  std::string Msg;
+  std::error_code EC;
+};
+
+/// Helper for check-and-exit error handling.
+///
+/// For tool use only. NOT FOR USE IN LIBRARY CODE.
+///
+class ExitOnError {
+public:
+  /// Create an error on exit helper.
+  ExitOnError(std::string Banner = "", int DefaultErrorExitCode = 1)
+      : Banner(std::move(Banner)),
+        GetExitCode([=](const Error &) { return DefaultErrorExitCode; }) {}
+
+  /// Set the banner string for any errors caught by operator().
+  void setBanner(std::string Banner) { this->Banner = std::move(Banner); }
+
+  /// Set the exit-code mapper function.
+  void setExitCodeMapper(std::function<int(const Error &)> GetExitCode) {
+    this->GetExitCode = std::move(GetExitCode);
+  }
+
+  /// Check Err. If it's in a failure state log the error(s) and exit.
+  void operator()(Error Err) const { checkError(std::move(Err)); }
+
+  /// Check E. If it's in a success state then return the contained value. If
+  /// it's in a failure state log the error(s) and exit.
+  template <typename T> T operator()(Expected<T> &&E) const {
+    checkError(E.takeError());
+    return std::move(*E);
+  }
+
+  /// Check E. If it's in a success state then return the contained reference. If
+  /// it's in a failure state log the error(s) and exit.
+  template <typename T> T& operator()(Expected<T&> &&E) const {
+    checkError(E.takeError());
+    return *E;
+  }
+
+private:
+  void checkError(Error Err) const {
+    if (Err) {
+      int ExitCode = GetExitCode(Err);
+      logAllUnhandledErrors(std::move(Err), errs(), Banner);
+      exit(ExitCode);
+    }
+  }
+
+  std::string Banner;
+  std::function<int(const Error &)> GetExitCode;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_ERROR_H
diff --git a/linux-x64/clang/include/llvm/Support/ErrorHandling.h b/linux-x64/clang/include/llvm/Support/ErrorHandling.h
new file mode 100644
index 0000000..39cbfed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ErrorHandling.h
@@ -0,0 +1,144 @@
+//===- llvm/Support/ErrorHandling.h - Fatal error handling ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an API used to indicate fatal error conditions.  Non-fatal
+// errors (most of them) should be handled through LLVMContext.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERRORHANDLING_H
+#define LLVM_SUPPORT_ERRORHANDLING_H
+
+#include "llvm/Support/Compiler.h"
+#include <string>
+
+namespace llvm {
+class StringRef;
+  class Twine;
+
+  /// An error handler callback.
+  typedef void (*fatal_error_handler_t)(void *user_data,
+                                        const std::string& reason,
+                                        bool gen_crash_diag);
+
+  /// install_fatal_error_handler - Installs a new error handler to be used
+  /// whenever a serious (non-recoverable) error is encountered by LLVM.
+  ///
+  /// If no error handler is installed the default is to print the error message
+  /// to stderr, and call exit(1).  If an error handler is installed then it is
+  /// the handler's responsibility to log the message, it will no longer be
+  /// printed to stderr.  If the error handler returns, then exit(1) will be
+  /// called.
+  ///
+  /// It is dangerous to naively use an error handler which throws an exception.
+  /// Even though some applications desire to gracefully recover from arbitrary
+  /// faults, blindly throwing exceptions through unfamiliar code isn't a way to
+  /// achieve this.
+  ///
+  /// \param user_data - An argument which will be passed to the install error
+  /// handler.
+  void install_fatal_error_handler(fatal_error_handler_t handler,
+                                   void *user_data = nullptr);
+
+  /// Restores default error handling behaviour.
+  void remove_fatal_error_handler();
+
+  /// ScopedFatalErrorHandler - This is a simple helper class which just
+  /// calls install_fatal_error_handler in its constructor and
+  /// remove_fatal_error_handler in its destructor.
+  struct ScopedFatalErrorHandler {
+    explicit ScopedFatalErrorHandler(fatal_error_handler_t handler,
+                                     void *user_data = nullptr) {
+      install_fatal_error_handler(handler, user_data);
+    }
+
+    ~ScopedFatalErrorHandler() { remove_fatal_error_handler(); }
+  };
+
+/// Reports a serious error, calling any installed error handler. These
+/// functions are intended to be used for error conditions which are outside
+/// the control of the compiler (I/O errors, invalid user input, etc.)
+///
+/// If no error handler is installed the default is to print the message to
+/// standard error, followed by a newline.
+/// After the error handler is called this function will call exit(1), it
+/// does not return.
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason,
+                                                bool gen_crash_diag = true);
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const std::string &reason,
+                                                bool gen_crash_diag = true);
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(StringRef reason,
+                                                bool gen_crash_diag = true);
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const Twine &reason,
+                                                bool gen_crash_diag = true);
+
+/// Installs a new bad alloc error handler that should be used whenever a
+/// bad alloc error, e.g. failing malloc/calloc, is encountered by LLVM.
+///
+/// The user can install a bad alloc handler, in order to define the behavior
+/// in case of failing allocations, e.g. throwing an exception. Note that this
+/// handler must not trigger any additional allocations itself.
+///
+/// If no error handler is installed the default is to print the error message
+/// to stderr, and call exit(1).  If an error handler is installed then it is
+/// the handler's responsibility to log the message, it will no longer be
+/// printed to stderr.  If the error handler returns, then exit(1) will be
+/// called.
+///
+///
+/// \param user_data - An argument which will be passed to the installed error
+/// handler.
+void install_bad_alloc_error_handler(fatal_error_handler_t handler,
+                                     void *user_data = nullptr);
+
+/// Restores default bad alloc error handling behavior.
+void remove_bad_alloc_error_handler();
+
+void install_out_of_memory_new_handler();
+
+/// Reports a bad alloc error, calling any user defined bad alloc
+/// error handler. In contrast to the generic 'report_fatal_error'
+/// functions, this function is expected to return, e.g. the user
+/// defined error handler throws an exception.
+///
+/// Note: When throwing an exception in the bad alloc handler, make sure that
+/// the following unwind succeeds, e.g. do not trigger additional allocations
+/// in the unwind chain.
+///
+/// If no error handler is installed (default), then a bad_alloc exception
+/// is thrown, if LLVM is compiled with exception support, otherwise an assertion
+/// is called.
+void report_bad_alloc_error(const char *Reason, bool GenCrashDiag = true);
+
+/// This function calls abort(), and prints the optional message to stderr.
+/// Use the llvm_unreachable macro (that adds location info), instead of
+/// calling this function directly.
+LLVM_ATTRIBUTE_NORETURN void
+llvm_unreachable_internal(const char *msg = nullptr, const char *file = nullptr,
+                          unsigned line = 0);
+}
+
+/// Marks that the current location is not supposed to be reachable.
+/// In !NDEBUG builds, prints the message and location info to stderr.
+/// In NDEBUG builds, becomes an optimizer hint that the current location
+/// is not supposed to be reachable.  On compilers that don't support
+/// such hints, prints a reduced message instead.
+///
+/// Use this instead of assert(0).  It conveys intent more clearly and
+/// allows compilers to omit some unnecessary code.
+#ifndef NDEBUG
+#define llvm_unreachable(msg) \
+  ::llvm::llvm_unreachable_internal(msg, __FILE__, __LINE__)
+#elif defined(LLVM_BUILTIN_UNREACHABLE)
+#define llvm_unreachable(msg) LLVM_BUILTIN_UNREACHABLE
+#else
+#define llvm_unreachable(msg) ::llvm::llvm_unreachable_internal()
+#endif
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ErrorOr.h b/linux-x64/clang/include/llvm/Support/ErrorOr.h
new file mode 100644
index 0000000..061fb65
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ErrorOr.h
@@ -0,0 +1,291 @@
+//===- llvm/Support/ErrorOr.h - Error Smart Pointer -------------*- C++ -*-===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+///
+/// Provides ErrorOr<T> smart pointer.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERROROR_H
+#define LLVM_SUPPORT_ERROROR_H
+
+#include "llvm/Support/AlignOf.h"
+#include <cassert>
+#include <system_error>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// \brief Stores a reference that can be changed.
+template <typename T>
+class ReferenceStorage {
+  T *Storage;
+
+public:
+  ReferenceStorage(T &Ref) : Storage(&Ref) {}
+
+  operator T &() const { return *Storage; }
+  T &get() const { return *Storage; }
+};
+
+/// \brief Represents either an error or a value T.
+///
+/// ErrorOr<T> is a pointer-like class that represents the result of an
+/// operation. The result is either an error, or a value of type T. This is
+/// designed to emulate the usage of returning a pointer where nullptr indicates
+/// failure. However instead of just knowing that the operation failed, we also
+/// have an error_code and optional user data that describes why it failed.
+///
+/// It is used like the following.
+/// \code
+///   ErrorOr<Buffer> getBuffer();
+///
+///   auto buffer = getBuffer();
+///   if (error_code ec = buffer.getError())
+///     return ec;
+///   buffer->write("adena");
+/// \endcode
+///
+///
+/// Implicit conversion to bool returns true if there is a usable value. The
+/// unary * and -> operators provide pointer like access to the value. Accessing
+/// the value when there is an error has undefined behavior.
+///
+/// When T is a reference type the behavior is slightly different. The reference
+/// is held in a std::reference_wrapper<std::remove_reference<T>::type>, and
+/// there is special handling to make operator -> work as if T was not a
+/// reference.
+///
+/// T cannot be a rvalue reference.
+template<class T>
+class ErrorOr {
+  template <class OtherT> friend class ErrorOr;
+
+  static const bool isRef = std::is_reference<T>::value;
+
+  using wrap = ReferenceStorage<typename std::remove_reference<T>::type>;
+
+public:
+  using storage_type = typename std::conditional<isRef, wrap, T>::type;
+
+private:
+  using reference = typename std::remove_reference<T>::type &;
+  using const_reference = const typename std::remove_reference<T>::type &;
+  using pointer = typename std::remove_reference<T>::type *;
+  using const_pointer = const typename std::remove_reference<T>::type *;
+
+public:
+  template <class E>
+  ErrorOr(E ErrorCode,
+          typename std::enable_if<std::is_error_code_enum<E>::value ||
+                                      std::is_error_condition_enum<E>::value,
+                                  void *>::type = nullptr)
+      : HasError(true) {
+    new (getErrorStorage()) std::error_code(make_error_code(ErrorCode));
+  }
+
+  ErrorOr(std::error_code EC) : HasError(true) {
+    new (getErrorStorage()) std::error_code(EC);
+  }
+
+  template <class OtherT>
+  ErrorOr(OtherT &&Val,
+          typename std::enable_if<std::is_convertible<OtherT, T>::value>::type
+              * = nullptr)
+      : HasError(false) {
+    new (getStorage()) storage_type(std::forward<OtherT>(Val));
+  }
+
+  ErrorOr(const ErrorOr &Other) {
+    copyConstruct(Other);
+  }
+
+  template <class OtherT>
+  ErrorOr(
+      const ErrorOr<OtherT> &Other,
+      typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+          nullptr) {
+    copyConstruct(Other);
+  }
+
+  template <class OtherT>
+  explicit ErrorOr(
+      const ErrorOr<OtherT> &Other,
+      typename std::enable_if<
+          !std::is_convertible<OtherT, const T &>::value>::type * = nullptr) {
+    copyConstruct(Other);
+  }
+
+  ErrorOr(ErrorOr &&Other) {
+    moveConstruct(std::move(Other));
+  }
+
+  template <class OtherT>
+  ErrorOr(
+      ErrorOr<OtherT> &&Other,
+      typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+          nullptr) {
+    moveConstruct(std::move(Other));
+  }
+
+  // This might eventually need SFINAE but it's more complex than is_convertible
+  // & I'm too lazy to write it right now.
+  template <class OtherT>
+  explicit ErrorOr(
+      ErrorOr<OtherT> &&Other,
+      typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+          nullptr) {
+    moveConstruct(std::move(Other));
+  }
+
+  ErrorOr &operator=(const ErrorOr &Other) {
+    copyAssign(Other);
+    return *this;
+  }
+
+  ErrorOr &operator=(ErrorOr &&Other) {
+    moveAssign(std::move(Other));
+    return *this;
+  }
+
+  ~ErrorOr() {
+    if (!HasError)
+      getStorage()->~storage_type();
+  }
+
+  /// \brief Return false if there is an error.
+  explicit operator bool() const {
+    return !HasError;
+  }
+
+  reference get() { return *getStorage(); }
+  const_reference get() const { return const_cast<ErrorOr<T> *>(this)->get(); }
+
+  std::error_code getError() const {
+    return HasError ? *getErrorStorage() : std::error_code();
+  }
+
+  pointer operator ->() {
+    return toPointer(getStorage());
+  }
+
+  const_pointer operator->() const { return toPointer(getStorage()); }
+
+  reference operator *() {
+    return *getStorage();
+  }
+
+  const_reference operator*() const { return *getStorage(); }
+
+private:
+  template <class OtherT>
+  void copyConstruct(const ErrorOr<OtherT> &Other) {
+    if (!Other.HasError) {
+      // Get the other value.
+      HasError = false;
+      new (getStorage()) storage_type(*Other.getStorage());
+    } else {
+      // Get other's error.
+      HasError = true;
+      new (getErrorStorage()) std::error_code(Other.getError());
+    }
+  }
+
+  template <class T1>
+  static bool compareThisIfSameType(const T1 &a, const T1 &b) {
+    return &a == &b;
+  }
+
+  template <class T1, class T2>
+  static bool compareThisIfSameType(const T1 &a, const T2 &b) {
+    return false;
+  }
+
+  template <class OtherT>
+  void copyAssign(const ErrorOr<OtherT> &Other) {
+    if (compareThisIfSameType(*this, Other))
+      return;
+
+    this->~ErrorOr();
+    new (this) ErrorOr(Other);
+  }
+
+  template <class OtherT>
+  void moveConstruct(ErrorOr<OtherT> &&Other) {
+    if (!Other.HasError) {
+      // Get the other value.
+      HasError = false;
+      new (getStorage()) storage_type(std::move(*Other.getStorage()));
+    } else {
+      // Get other's error.
+      HasError = true;
+      new (getErrorStorage()) std::error_code(Other.getError());
+    }
+  }
+
+  template <class OtherT>
+  void moveAssign(ErrorOr<OtherT> &&Other) {
+    if (compareThisIfSameType(*this, Other))
+      return;
+
+    this->~ErrorOr();
+    new (this) ErrorOr(std::move(Other));
+  }
+
+  pointer toPointer(pointer Val) {
+    return Val;
+  }
+
+  const_pointer toPointer(const_pointer Val) const { return Val; }
+
+  pointer toPointer(wrap *Val) {
+    return &Val->get();
+  }
+
+  const_pointer toPointer(const wrap *Val) const { return &Val->get(); }
+
+  storage_type *getStorage() {
+    assert(!HasError && "Cannot get value when an error exists!");
+    return reinterpret_cast<storage_type*>(TStorage.buffer);
+  }
+
+  const storage_type *getStorage() const {
+    assert(!HasError && "Cannot get value when an error exists!");
+    return reinterpret_cast<const storage_type*>(TStorage.buffer);
+  }
+
+  std::error_code *getErrorStorage() {
+    assert(HasError && "Cannot get error when a value exists!");
+    return reinterpret_cast<std::error_code *>(ErrorStorage.buffer);
+  }
+
+  const std::error_code *getErrorStorage() const {
+    return const_cast<ErrorOr<T> *>(this)->getErrorStorage();
+  }
+
+  union {
+    AlignedCharArrayUnion<storage_type> TStorage;
+    AlignedCharArrayUnion<std::error_code> ErrorStorage;
+  };
+  bool HasError : 1;
+};
+
+template <class T, class E>
+typename std::enable_if<std::is_error_code_enum<E>::value ||
+                            std::is_error_condition_enum<E>::value,
+                        bool>::type
+operator==(const ErrorOr<T> &Err, E Code) {
+  return Err.getError() == Code;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_ERROROR_H
diff --git a/linux-x64/clang/include/llvm/Support/FileOutputBuffer.h b/linux-x64/clang/include/llvm/Support/FileOutputBuffer.h
new file mode 100644
index 0000000..6aed423
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FileOutputBuffer.h
@@ -0,0 +1,74 @@
+//=== FileOutputBuffer.h - File Output Buffer -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility for creating a in-memory buffer that will be written to a file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILEOUTPUTBUFFER_H
+#define LLVM_SUPPORT_FILEOUTPUTBUFFER_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+
+namespace llvm {
+/// FileOutputBuffer - This interface provides simple way to create an in-memory
+/// buffer which will be written to a file. During the lifetime of these
+/// objects, the content or existence of the specified file is undefined. That
+/// is, creating an OutputBuffer for a file may immediately remove the file.
+/// If the FileOutputBuffer is committed, the target file's content will become
+/// the buffer content at the time of the commit.  If the FileOutputBuffer is
+/// not committed, the file will be deleted in the FileOutputBuffer destructor.
+class FileOutputBuffer {
+public:
+  enum  {
+    F_executable = 1  /// set the 'x' bit on the resulting file
+  };
+
+  /// Factory method to create an OutputBuffer object which manages a read/write
+  /// buffer of the specified size. When committed, the buffer will be written
+  /// to the file at the specified path.
+  static Expected<std::unique_ptr<FileOutputBuffer>>
+  create(StringRef FilePath, size_t Size, unsigned Flags = 0);
+
+  /// Returns a pointer to the start of the buffer.
+  virtual uint8_t *getBufferStart() const = 0;
+
+  /// Returns a pointer to the end of the buffer.
+  virtual uint8_t *getBufferEnd() const = 0;
+
+  /// Returns size of the buffer.
+  virtual size_t getBufferSize() const = 0;
+
+  /// Returns path where file will show up if buffer is committed.
+  StringRef getPath() const { return FinalPath; }
+
+  /// Flushes the content of the buffer to its file and deallocates the
+  /// buffer.  If commit() is not called before this object's destructor
+  /// is called, the file is deleted in the destructor. The optional parameter
+  /// is used if it turns out you want the file size to be smaller than
+  /// initially requested.
+  virtual Error commit() = 0;
+
+  /// If this object was previously committed, the destructor just deletes
+  /// this object.  If this object was not committed, the destructor
+  /// deallocates the buffer and the target file is never written.
+  virtual ~FileOutputBuffer() {}
+
+protected:
+  FileOutputBuffer(StringRef Path) : FinalPath(Path) {}
+
+  std::string FinalPath;
+};
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/FileSystem.h b/linux-x64/clang/include/llvm/Support/FileSystem.h
new file mode 100644
index 0000000..a9b02d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FileSystem.h
@@ -0,0 +1,1110 @@
+//===- llvm/Support/FileSystem.h - File System OS Concept -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::fs namespace. It is designed after
+// TR2/boost filesystem (v3), but modified to remove exception handling and the
+// path class.
+//
+// All functions return an error_code and their actual work via the last out
+// argument. The out argument is defined if and only if errc::success is
+// returned. A function may return any error code in the generic or system
+// category. However, they shall be equivalent to any error conditions listed
+// in each functions respective documentation if the condition applies. [ note:
+// this does not guarantee that error_code will be in the set of explicitly
+// listed codes, but it does guarantee that if any of the explicitly listed
+// errors occur, the correct error_code will be used ]. All functions may
+// return errc::not_enough_memory if there is not enough memory to complete the
+// operation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILESYSTEM_H
+#define LLVM_SUPPORT_FILESYSTEM_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MD5.h"
+#include <cassert>
+#include <cstdint>
+#include <ctime>
+#include <memory>
+#include <stack>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <vector>
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+namespace llvm {
+namespace sys {
+namespace fs {
+
+/// An enumeration for the file system's view of the type.
+enum class file_type {
+  status_error,
+  file_not_found,
+  regular_file,
+  directory_file,
+  symlink_file,
+  block_file,
+  character_file,
+  fifo_file,
+  socket_file,
+  type_unknown
+};
+
+/// space_info - Self explanatory.
+struct space_info {
+  uint64_t capacity;
+  uint64_t free;
+  uint64_t available;
+};
+
+enum perms {
+  no_perms = 0,
+  owner_read = 0400,
+  owner_write = 0200,
+  owner_exe = 0100,
+  owner_all = owner_read | owner_write | owner_exe,
+  group_read = 040,
+  group_write = 020,
+  group_exe = 010,
+  group_all = group_read | group_write | group_exe,
+  others_read = 04,
+  others_write = 02,
+  others_exe = 01,
+  others_all = others_read | others_write | others_exe,
+  all_read = owner_read | group_read | others_read,
+  all_write = owner_write | group_write | others_write,
+  all_exe = owner_exe | group_exe | others_exe,
+  all_all = owner_all | group_all | others_all,
+  set_uid_on_exe = 04000,
+  set_gid_on_exe = 02000,
+  sticky_bit = 01000,
+  all_perms = all_all | set_uid_on_exe | set_gid_on_exe | sticky_bit,
+  perms_not_known = 0xFFFF
+};
+
+// Helper functions so that you can use & and | to manipulate perms bits:
+inline perms operator|(perms l, perms r) {
+  return static_cast<perms>(static_cast<unsigned short>(l) |
+                            static_cast<unsigned short>(r));
+}
+inline perms operator&(perms l, perms r) {
+  return static_cast<perms>(static_cast<unsigned short>(l) &
+                            static_cast<unsigned short>(r));
+}
+inline perms &operator|=(perms &l, perms r) {
+  l = l | r;
+  return l;
+}
+inline perms &operator&=(perms &l, perms r) {
+  l = l & r;
+  return l;
+}
+inline perms operator~(perms x) {
+  // Avoid UB by explicitly truncating the (unsigned) ~ result.
+  return static_cast<perms>(
+      static_cast<unsigned short>(~static_cast<unsigned short>(x)));
+}
+
+class UniqueID {
+  uint64_t Device;
+  uint64_t File;
+
+public:
+  UniqueID() = default;
+  UniqueID(uint64_t Device, uint64_t File) : Device(Device), File(File) {}
+
+  bool operator==(const UniqueID &Other) const {
+    return Device == Other.Device && File == Other.File;
+  }
+  bool operator!=(const UniqueID &Other) const { return !(*this == Other); }
+  bool operator<(const UniqueID &Other) const {
+    return std::tie(Device, File) < std::tie(Other.Device, Other.File);
+  }
+
+  uint64_t getDevice() const { return Device; }
+  uint64_t getFile() const { return File; }
+};
+
+/// Represents the result of a call to directory_iterator::status(). This is a
+/// subset of the information returned by a regular sys::fs::status() call, and
+/// represents the information provided by Windows FileFirstFile/FindNextFile.
+class basic_file_status {
+protected:
+  #if defined(LLVM_ON_UNIX)
+  time_t fs_st_atime = 0;
+  time_t fs_st_mtime = 0;
+  uid_t fs_st_uid = 0;
+  gid_t fs_st_gid = 0;
+  off_t fs_st_size = 0;
+  #elif defined (LLVM_ON_WIN32)
+  uint32_t LastAccessedTimeHigh = 0;
+  uint32_t LastAccessedTimeLow = 0;
+  uint32_t LastWriteTimeHigh = 0;
+  uint32_t LastWriteTimeLow = 0;
+  uint32_t FileSizeHigh = 0;
+  uint32_t FileSizeLow = 0;
+  #endif
+  file_type Type = file_type::status_error;
+  perms Perms = perms_not_known;
+
+public:
+  basic_file_status() = default;
+
+  explicit basic_file_status(file_type Type) : Type(Type) {}
+
+  #if defined(LLVM_ON_UNIX)
+  basic_file_status(file_type Type, perms Perms, time_t ATime, time_t MTime,
+                    uid_t UID, gid_t GID, off_t Size)
+      : fs_st_atime(ATime), fs_st_mtime(MTime), fs_st_uid(UID), fs_st_gid(GID),
+        fs_st_size(Size), Type(Type), Perms(Perms) {}
+#elif defined(LLVM_ON_WIN32)
+  basic_file_status(file_type Type, perms Perms, uint32_t LastAccessTimeHigh,
+                    uint32_t LastAccessTimeLow, uint32_t LastWriteTimeHigh,
+                    uint32_t LastWriteTimeLow, uint32_t FileSizeHigh,
+                    uint32_t FileSizeLow)
+      : LastAccessedTimeHigh(LastAccessTimeHigh),
+        LastAccessedTimeLow(LastAccessTimeLow),
+        LastWriteTimeHigh(LastWriteTimeHigh),
+        LastWriteTimeLow(LastWriteTimeLow), FileSizeHigh(FileSizeHigh),
+        FileSizeLow(FileSizeLow), Type(Type), Perms(Perms) {}
+  #endif
+
+  // getters
+  file_type type() const { return Type; }
+  perms permissions() const { return Perms; }
+  TimePoint<> getLastAccessedTime() const;
+  TimePoint<> getLastModificationTime() const;
+
+  #if defined(LLVM_ON_UNIX)
+  uint32_t getUser() const { return fs_st_uid; }
+  uint32_t getGroup() const { return fs_st_gid; }
+  uint64_t getSize() const { return fs_st_size; }
+  #elif defined (LLVM_ON_WIN32)
+  uint32_t getUser() const {
+    return 9999; // Not applicable to Windows, so...
+  }
+
+  uint32_t getGroup() const {
+    return 9999; // Not applicable to Windows, so...
+  }
+
+  uint64_t getSize() const {
+    return (uint64_t(FileSizeHigh) << 32) + FileSizeLow;
+  }
+  #endif
+
+  // setters
+  void type(file_type v) { Type = v; }
+  void permissions(perms p) { Perms = p; }
+};
+
+/// Represents the result of a call to sys::fs::status().
+class file_status : public basic_file_status {
+  friend bool equivalent(file_status A, file_status B);
+
+  #if defined(LLVM_ON_UNIX)
+  dev_t fs_st_dev = 0;
+  nlink_t fs_st_nlinks = 0;
+  ino_t fs_st_ino = 0;
+  #elif defined (LLVM_ON_WIN32)
+  uint32_t NumLinks = 0;
+  uint32_t VolumeSerialNumber = 0;
+  uint32_t FileIndexHigh = 0;
+  uint32_t FileIndexLow = 0;
+  #endif
+
+public:
+  file_status() = default;
+
+  explicit file_status(file_type Type) : basic_file_status(Type) {}
+
+  #if defined(LLVM_ON_UNIX)
+  file_status(file_type Type, perms Perms, dev_t Dev, nlink_t Links, ino_t Ino,
+              time_t ATime, time_t MTime, uid_t UID, gid_t GID, off_t Size)
+      : basic_file_status(Type, Perms, ATime, MTime, UID, GID, Size),
+        fs_st_dev(Dev), fs_st_nlinks(Links), fs_st_ino(Ino) {}
+  #elif defined(LLVM_ON_WIN32)
+  file_status(file_type Type, perms Perms, uint32_t LinkCount,
+              uint32_t LastAccessTimeHigh, uint32_t LastAccessTimeLow,
+              uint32_t LastWriteTimeHigh, uint32_t LastWriteTimeLow,
+              uint32_t VolumeSerialNumber, uint32_t FileSizeHigh,
+              uint32_t FileSizeLow, uint32_t FileIndexHigh,
+              uint32_t FileIndexLow)
+      : basic_file_status(Type, Perms, LastAccessTimeHigh, LastAccessTimeLow,
+                          LastWriteTimeHigh, LastWriteTimeLow, FileSizeHigh,
+                          FileSizeLow),
+        NumLinks(LinkCount), VolumeSerialNumber(VolumeSerialNumber),
+        FileIndexHigh(FileIndexHigh), FileIndexLow(FileIndexLow) {}
+  #endif
+
+  UniqueID getUniqueID() const;
+  uint32_t getLinkCount() const;
+};
+
+/// @}
+/// @name Physical Operators
+/// @{
+
+/// @brief Make \a path an absolute path.
+///
+/// Makes \a path absolute using the \a current_directory if it is not already.
+/// An empty \a path will result in the \a current_directory.
+///
+/// /absolute/path   => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+/// @returns errc::success if \a path has been made absolute, otherwise a
+///          platform-specific error_code.
+std::error_code make_absolute(const Twine &current_directory,
+                              SmallVectorImpl<char> &path);
+
+/// @brief Make \a path an absolute path.
+///
+/// Makes \a path absolute using the current directory if it is not already. An
+/// empty \a path will result in the current directory.
+///
+/// /absolute/path   => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+/// @returns errc::success if \a path has been made absolute, otherwise a
+///          platform-specific error_code.
+std::error_code make_absolute(SmallVectorImpl<char> &path);
+
+/// @brief Create all the non-existent directories in path.
+///
+/// @param path Directories to create.
+/// @returns errc::success if is_directory(path), otherwise a platform
+///          specific error_code. If IgnoreExisting is false, also returns
+///          error if the directory already existed.
+std::error_code create_directories(const Twine &path,
+                                   bool IgnoreExisting = true,
+                                   perms Perms = owner_all | group_all);
+
+/// @brief Create the directory in path.
+///
+/// @param path Directory to create.
+/// @returns errc::success if is_directory(path), otherwise a platform
+///          specific error_code. If IgnoreExisting is false, also returns
+///          error if the directory already existed.
+std::error_code create_directory(const Twine &path, bool IgnoreExisting = true,
+                                 perms Perms = owner_all | group_all);
+
+/// @brief Create a link from \a from to \a to.
+///
+/// The link may be a soft or a hard link, depending on the platform. The caller
+/// may not assume which one. Currently on windows it creates a hard link since
+/// soft links require extra privileges. On unix, it creates a soft link since
+/// hard links don't work on SMB file systems.
+///
+/// @param to The path to hard link to.
+/// @param from The path to hard link from. This is created.
+/// @returns errc::success if the link was created, otherwise a platform
+/// specific error_code.
+std::error_code create_link(const Twine &to, const Twine &from);
+
+/// Create a hard link from \a from to \a to, or return an error.
+///
+/// @param to The path to hard link to.
+/// @param from The path to hard link from. This is created.
+/// @returns errc::success if the link was created, otherwise a platform
+/// specific error_code.
+std::error_code create_hard_link(const Twine &to, const Twine &from);
+
+/// @brief Collapse all . and .. patterns, resolve all symlinks, and optionally
+///        expand ~ expressions to the user's home directory.
+///
+/// @param path The path to resolve.
+/// @param output The location to store the resolved path.
+/// @param expand_tilde If true, resolves ~ expressions to the user's home
+///                     directory.
+std::error_code real_path(const Twine &path, SmallVectorImpl<char> &output,
+                          bool expand_tilde = false);
+
+/// @brief Get the current path.
+///
+/// @param result Holds the current path on return.
+/// @returns errc::success if the current path has been stored in result,
+///          otherwise a platform-specific error_code.
+std::error_code current_path(SmallVectorImpl<char> &result);
+
+/// @brief Set the current path.
+///
+/// @param path The path to set.
+/// @returns errc::success if the current path was successfully set,
+///          otherwise a platform-specific error_code.
+std::error_code set_current_path(const Twine &path);
+
+/// @brief Remove path. Equivalent to POSIX remove().
+///
+/// @param path Input path.
+/// @returns errc::success if path has been removed or didn't exist, otherwise a
+///          platform-specific error code. If IgnoreNonExisting is false, also
+///          returns error if the file didn't exist.
+std::error_code remove(const Twine &path, bool IgnoreNonExisting = true);
+
+/// @brief Recursively delete a directory.
+///
+/// @param path Input path.
+/// @returns errc::success if path has been removed or didn't exist, otherwise a
+///          platform-specific error code.
+std::error_code remove_directories(const Twine &path, bool IgnoreErrors = true);
+
+/// @brief Rename \a from to \a to.
+///
+/// Files are renamed as if by POSIX rename(), except that on Windows there may
+/// be a short interval of time during which the destination file does not
+/// exist.
+///
+/// @param from The path to rename from.
+/// @param to The path to rename to. This is created.
+std::error_code rename(const Twine &from, const Twine &to);
+
+/// @brief Copy the contents of \a From to \a To.
+///
+/// @param From The path to copy from.
+/// @param To The path to copy to. This is created.
+std::error_code copy_file(const Twine &From, const Twine &To);
+
+/// @brief Resize path to size. File is resized as if by POSIX truncate().
+///
+/// @param FD Input file descriptor.
+/// @param Size Size to resize to.
+/// @returns errc::success if \a path has been resized to \a size, otherwise a
+///          platform-specific error_code.
+std::error_code resize_file(int FD, uint64_t Size);
+
+/// @brief Compute an MD5 hash of a file's contents.
+///
+/// @param FD Input file descriptor.
+/// @returns An MD5Result with the hash computed, if successful, otherwise a
+///          std::error_code.
+ErrorOr<MD5::MD5Result> md5_contents(int FD);
+
+/// @brief Version of compute_md5 that doesn't require an open file descriptor.
+ErrorOr<MD5::MD5Result> md5_contents(const Twine &Path);
+
+/// @}
+/// @name Physical Observers
+/// @{
+
+/// @brief Does file exist?
+///
+/// @param status A basic_file_status previously returned from stat.
+/// @returns True if the file represented by status exists, false if it does
+///          not.
+bool exists(const basic_file_status &status);
+
+enum class AccessMode { Exist, Write, Execute };
+
+/// @brief Can the file be accessed?
+///
+/// @param Path Input path.
+/// @returns errc::success if the path can be accessed, otherwise a
+///          platform-specific error_code.
+std::error_code access(const Twine &Path, AccessMode Mode);
+
+/// @brief Does file exist?
+///
+/// @param Path Input path.
+/// @returns True if it exists, false otherwise.
+inline bool exists(const Twine &Path) {
+  return !access(Path, AccessMode::Exist);
+}
+
+/// @brief Can we execute this file?
+///
+/// @param Path Input path.
+/// @returns True if we can execute it, false otherwise.
+bool can_execute(const Twine &Path);
+
+/// @brief Can we write this file?
+///
+/// @param Path Input path.
+/// @returns True if we can write to it, false otherwise.
+inline bool can_write(const Twine &Path) {
+  return !access(Path, AccessMode::Write);
+}
+
+/// @brief Do file_status's represent the same thing?
+///
+/// @param A Input file_status.
+/// @param B Input file_status.
+///
+/// assert(status_known(A) || status_known(B));
+///
+/// @returns True if A and B both represent the same file system entity, false
+///          otherwise.
+bool equivalent(file_status A, file_status B);
+
+/// @brief Do paths represent the same thing?
+///
+/// assert(status_known(A) || status_known(B));
+///
+/// @param A Input path A.
+/// @param B Input path B.
+/// @param result Set to true if stat(A) and stat(B) have the same device and
+///               inode (or equivalent).
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code equivalent(const Twine &A, const Twine &B, bool &result);
+
+/// @brief Simpler version of equivalent for clients that don't need to
+///        differentiate between an error and false.
+inline bool equivalent(const Twine &A, const Twine &B) {
+  bool result;
+  return !equivalent(A, B, result) && result;
+}
+
+/// @brief Is the file mounted on a local filesystem?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is on fixed media such as a hard disk,
+///               false if it is not.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform specific error_code.
+std::error_code is_local(const Twine &path, bool &result);
+
+/// @brief Version of is_local accepting an open file descriptor.
+std::error_code is_local(int FD, bool &result);
+
+/// @brief Simpler version of is_local for clients that don't need to
+///        differentiate between an error and false.
+inline bool is_local(const Twine &Path) {
+  bool Result;
+  return !is_local(Path, Result) && Result;
+}
+
+/// @brief Simpler version of is_local accepting an open file descriptor for
+///        clients that don't need to differentiate between an error and false.
+inline bool is_local(int FD) {
+  bool Result;
+  return !is_local(FD, Result) && Result;
+}
+
+/// @brief Does status represent a directory?
+///
+/// @param Path The path to get the type of.
+/// @param Follow For symbolic links, indicates whether to return the file type
+///               of the link itself, or of the target.
+/// @returns A value from the file_type enumeration indicating the type of file.
+file_type get_file_type(const Twine &Path, bool Follow = true);
+
+/// @brief Does status represent a directory?
+///
+/// @param status A basic_file_status previously returned from status.
+/// @returns status.type() == file_type::directory_file.
+bool is_directory(const basic_file_status &status);
+
+/// @brief Is path a directory?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a directory (after following
+///               symlinks, false if it is not. Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code is_directory(const Twine &path, bool &result);
+
+/// @brief Simpler version of is_directory for clients that don't need to
+///        differentiate between an error and false.
+inline bool is_directory(const Twine &Path) {
+  bool Result;
+  return !is_directory(Path, Result) && Result;
+}
+
+/// @brief Does status represent a regular file?
+///
+/// @param status A basic_file_status previously returned from status.
+/// @returns status_known(status) && status.type() == file_type::regular_file.
+bool is_regular_file(const basic_file_status &status);
+
+/// @brief Is path a regular file?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a regular file (after following
+///               symlinks), false if it is not. Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code is_regular_file(const Twine &path, bool &result);
+
+/// @brief Simpler version of is_regular_file for clients that don't need to
+///        differentiate between an error and false.
+inline bool is_regular_file(const Twine &Path) {
+  bool Result;
+  if (is_regular_file(Path, Result))
+    return false;
+  return Result;
+}
+
+/// @brief Does status represent a symlink file?
+///
+/// @param status A basic_file_status previously returned from status.
+/// @returns status_known(status) && status.type() == file_type::symlink_file.
+bool is_symlink_file(const basic_file_status &status);
+
+/// @brief Is path a symlink file?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a symlink file, false if it is not.
+///               Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code is_symlink_file(const Twine &path, bool &result);
+
+/// @brief Simpler version of is_symlink_file for clients that don't need to
+///        differentiate between an error and false.
+inline bool is_symlink_file(const Twine &Path) {
+  bool Result;
+  if (is_symlink_file(Path, Result))
+    return false;
+  return Result;
+}
+
+/// @brief Does this status represent something that exists but is not a
+///        directory or regular file?
+///
+/// @param status A basic_file_status previously returned from status.
+/// @returns exists(s) && !is_regular_file(s) && !is_directory(s)
+bool is_other(const basic_file_status &status);
+
+/// @brief Is path something that exists but is not a directory,
+///        regular file, or symlink?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path exists, but is not a directory, regular
+///               file, or a symlink, false if it does not. Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code is_other(const Twine &path, bool &result);
+
+/// @brief Get file status as if by POSIX stat().
+///
+/// @param path Input path.
+/// @param result Set to the file status.
+/// @param follow When true, follows symlinks.  Otherwise, the symlink itself is
+///               statted.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code status(const Twine &path, file_status &result,
+                       bool follow = true);
+
+/// @brief A version for when a file descriptor is already available.
+std::error_code status(int FD, file_status &Result);
+
+/// @brief Set file permissions.
+///
+/// @param Path File to set permissions on.
+/// @param Permissions New file permissions.
+/// @returns errc::success if the permissions were successfully set, otherwise
+///          a platform-specific error_code.
+/// @note On Windows, all permissions except *_write are ignored. Using any of
+///       owner_write, group_write, or all_write will make the file writable.
+///       Otherwise, the file will be marked as read-only.
+std::error_code setPermissions(const Twine &Path, perms Permissions);
+
+/// @brief Get file permissions.
+///
+/// @param Path File to get permissions from.
+/// @returns the permissions if they were successfully retrieved, otherwise a
+///          platform-specific error_code.
+/// @note On Windows, if the file does not have the FILE_ATTRIBUTE_READONLY
+///       attribute, all_all will be returned. Otherwise, all_read | all_exe
+///       will be returned.
+ErrorOr<perms> getPermissions(const Twine &Path);
+
+/// @brief Get file size.
+///
+/// @param Path Input path.
+/// @param Result Set to the size of the file in \a Path.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+inline std::error_code file_size(const Twine &Path, uint64_t &Result) {
+  file_status Status;
+  std::error_code EC = status(Path, Status);
+  if (EC)
+    return EC;
+  Result = Status.getSize();
+  return std::error_code();
+}
+
+/// @brief Set the file modification and access time.
+///
+/// @returns errc::success if the file times were successfully set, otherwise a
+///          platform-specific error_code or errc::function_not_supported on
+///          platforms where the functionality isn't available.
+std::error_code setLastModificationAndAccessTime(int FD, TimePoint<> Time);
+
+/// @brief Is status available?
+///
+/// @param s Input file status.
+/// @returns True if status() != status_error.
+bool status_known(const basic_file_status &s);
+
+/// @brief Is status available?
+///
+/// @param path Input path.
+/// @param result Set to true if status() != status_error.
+/// @returns errc::success if result has been successfully set, otherwise a
+///          platform-specific error_code.
+std::error_code status_known(const Twine &path, bool &result);
+
+enum OpenFlags : unsigned {
+  F_None = 0,
+
+  /// F_Excl - When opening a file, this flag makes raw_fd_ostream
+  /// report an error if the file already exists.
+  F_Excl = 1,
+
+  /// F_Append - When opening a file, if it already exists append to the
+  /// existing file instead of returning an error.  This may not be specified
+  /// with F_Excl.
+  F_Append = 2,
+
+  /// F_NoTrunc - When opening a file, if it already exists don't truncate
+  /// the file contents.  F_Append implies F_NoTrunc, but F_Append seeks to
+  /// the end of the file, which F_NoTrunc doesn't.
+  F_NoTrunc = 4,
+
+  /// The file should be opened in text mode on platforms that make this
+  /// distinction.
+  F_Text = 8,
+
+  /// Open the file for read and write.
+  F_RW = 16,
+
+  /// Delete the file on close. Only makes a difference on windows.
+  F_Delete = 32
+};
+
+/// @brief Create a uniquely named file.
+///
+/// Generates a unique path suitable for a temporary file and then opens it as a
+/// file. The name is based on \a model with '%' replaced by a random char in
+/// [0-9a-f]. If \a model is not an absolute path, the temporary file will be
+/// created in the current directory.
+///
+/// Example: clang-%%-%%-%%-%%-%%.s => clang-a0-b1-c2-d3-e4.s
+///
+/// This is an atomic operation. Either the file is created and opened, or the
+/// file system is left untouched.
+///
+/// The intended use is for files that are to be kept, possibly after
+/// renaming them. For example, when running 'clang -c foo.o', the file can
+/// be first created as foo-abc123.o and then renamed.
+///
+/// @param Model Name to base unique path off of.
+/// @param ResultFD Set to the opened file's file descriptor.
+/// @param ResultPath Set to the opened file's absolute path.
+/// @returns errc::success if Result{FD,Path} have been successfully set,
+///          otherwise a platform-specific error_code.
+std::error_code createUniqueFile(const Twine &Model, int &ResultFD,
+                                 SmallVectorImpl<char> &ResultPath,
+                                 unsigned Mode = all_read | all_write,
+                                 sys::fs::OpenFlags Flags = sys::fs::F_RW);
+
+/// @brief Simpler version for clients that don't want an open file. An empty
+/// file will still be created.
+std::error_code createUniqueFile(const Twine &Model,
+                                 SmallVectorImpl<char> &ResultPath,
+                                 unsigned Mode = all_read | all_write);
+
+/// Represents a temporary file.
+///
+/// The temporary file must be eventually discarded or given a final name and
+/// kept.
+///
+/// The destructor doesn't implicitly discard because there is no way to
+/// properly handle errors in a destructor.
+class TempFile {
+  bool Done = false;
+  TempFile(StringRef Name, int FD);
+
+public:
+  /// This creates a temporary file with createUniqueFile and schedules it for
+  /// deletion with sys::RemoveFileOnSignal.
+  static Expected<TempFile> create(const Twine &Model,
+                                   unsigned Mode = all_read | all_write);
+  TempFile(TempFile &&Other);
+  TempFile &operator=(TempFile &&Other);
+
+  // Name of the temporary file.
+  std::string TmpName;
+
+  // The open file descriptor.
+  int FD = -1;
+
+  // Keep this with the given name.
+  Error keep(const Twine &Name);
+
+  // Keep this with the temporary name.
+  Error keep();
+
+  // Delete the file.
+  Error discard();
+
+  // This checks that keep or delete was called.
+  ~TempFile();
+};
+
+/// @brief Create a file in the system temporary directory.
+///
+/// The filename is of the form prefix-random_chars.suffix. Since the directory
+/// is not know to the caller, Prefix and Suffix cannot have path separators.
+/// The files are created with mode 0600.
+///
+/// This should be used for things like a temporary .s that is removed after
+/// running the assembler.
+std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
+                                    int &ResultFD,
+                                    SmallVectorImpl<char> &ResultPath,
+                                    sys::fs::OpenFlags Flags = sys::fs::F_RW);
+
+/// @brief Simpler version for clients that don't want an open file. An empty
+/// file will still be created.
+std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
+                                    SmallVectorImpl<char> &ResultPath);
+
+std::error_code createUniqueDirectory(const Twine &Prefix,
+                                      SmallVectorImpl<char> &ResultPath);
+
+/// @brief Get a unique name, not currently exisiting in the filesystem. Subject
+/// to race conditions, prefer to use createUniqueFile instead.
+///
+/// Similar to createUniqueFile, but instead of creating a file only
+/// checks if it exists. This function is subject to race conditions, if you
+/// want to use the returned name to actually create a file, use
+/// createUniqueFile instead.
+std::error_code getPotentiallyUniqueFileName(const Twine &Model,
+                                             SmallVectorImpl<char> &ResultPath);
+
+/// @brief Get a unique temporary file name, not currently exisiting in the
+/// filesystem. Subject to race conditions, prefer to use createTemporaryFile
+/// instead.
+///
+/// Similar to createTemporaryFile, but instead of creating a file only
+/// checks if it exists. This function is subject to race conditions, if you
+/// want to use the returned name to actually create a file, use
+/// createTemporaryFile instead.
+std::error_code
+getPotentiallyUniqueTempFileName(const Twine &Prefix, StringRef Suffix,
+                                 SmallVectorImpl<char> &ResultPath);
+
+inline OpenFlags operator|(OpenFlags A, OpenFlags B) {
+  return OpenFlags(unsigned(A) | unsigned(B));
+}
+
+inline OpenFlags &operator|=(OpenFlags &A, OpenFlags B) {
+  A = A | B;
+  return A;
+}
+
+std::error_code openFileForWrite(const Twine &Name, int &ResultFD,
+                                 OpenFlags Flags, unsigned Mode = 0666);
+
+std::error_code openFileForRead(const Twine &Name, int &ResultFD,
+                                SmallVectorImpl<char> *RealPath = nullptr);
+
+std::error_code getUniqueID(const Twine Path, UniqueID &Result);
+
+/// @brief Get disk space usage information.
+///
+/// Note: Users must be careful about "Time Of Check, Time Of Use" kind of bug.
+/// Note: Windows reports results according to the quota allocated to the user.
+///
+/// @param Path Input path.
+/// @returns a space_info structure filled with the capacity, free, and
+/// available space on the device \a Path is on. A platform specific error_code
+/// is returned on error.
+ErrorOr<space_info> disk_space(const Twine &Path);
+
+/// This class represents a memory mapped file. It is based on
+/// boost::iostreams::mapped_file.
+class mapped_file_region {
+public:
+  enum mapmode {
+    readonly, ///< May only access map via const_data as read only.
+    readwrite, ///< May access map via data and modify it. Written to path.
+    priv ///< May modify via data, but changes are lost on destruction.
+  };
+
+private:
+  /// Platform-specific mapping state.
+  size_t Size;
+  void *Mapping;
+  int FD;
+  mapmode Mode;
+
+  std::error_code init(int FD, uint64_t Offset, mapmode Mode);
+
+public:
+  mapped_file_region() = delete;
+  mapped_file_region(mapped_file_region&) = delete;
+  mapped_file_region &operator =(mapped_file_region&) = delete;
+
+  /// \param fd An open file descriptor to map. mapped_file_region takes
+  ///   ownership if closefd is true. It must have been opended in the correct
+  ///   mode.
+  mapped_file_region(int fd, mapmode mode, size_t length, uint64_t offset,
+                     std::error_code &ec);
+
+  ~mapped_file_region();
+
+  size_t size() const;
+  char *data() const;
+
+  /// Get a const view of the data. Modifying this memory has undefined
+  /// behavior.
+  const char *const_data() const;
+
+  /// \returns The minimum alignment offset must be.
+  static int alignment();
+};
+
+/// Return the path to the main executable, given the value of argv[0] from
+/// program startup and the address of main itself. In extremis, this function
+/// may fail and return an empty path.
+std::string getMainExecutable(const char *argv0, void *MainExecAddr);
+
+/// @}
+/// @name Iterators
+/// @{
+
+/// directory_entry - A single entry in a directory. Caches the status either
+/// from the result of the iteration syscall, or the first time status is
+/// called.
+class directory_entry {
+  std::string Path;
+  bool FollowSymlinks;
+  basic_file_status Status;
+
+public:
+  explicit directory_entry(const Twine &path, bool follow_symlinks = true,
+                           basic_file_status st = basic_file_status())
+      : Path(path.str()), FollowSymlinks(follow_symlinks), Status(st) {}
+
+  directory_entry() = default;
+
+  void assign(const Twine &path, basic_file_status st = basic_file_status()) {
+    Path = path.str();
+    Status = st;
+  }
+
+  void replace_filename(const Twine &filename,
+                        basic_file_status st = basic_file_status());
+
+  const std::string &path() const { return Path; }
+  ErrorOr<basic_file_status> status() const;
+
+  bool operator==(const directory_entry& rhs) const { return Path == rhs.Path; }
+  bool operator!=(const directory_entry& rhs) const { return !(*this == rhs); }
+  bool operator< (const directory_entry& rhs) const;
+  bool operator<=(const directory_entry& rhs) const;
+  bool operator> (const directory_entry& rhs) const;
+  bool operator>=(const directory_entry& rhs) const;
+};
+
+namespace detail {
+
+  struct DirIterState;
+
+  std::error_code directory_iterator_construct(DirIterState &, StringRef, bool);
+  std::error_code directory_iterator_increment(DirIterState &);
+  std::error_code directory_iterator_destruct(DirIterState &);
+
+  /// Keeps state for the directory_iterator.
+  struct DirIterState {
+    ~DirIterState() {
+      directory_iterator_destruct(*this);
+    }
+
+    intptr_t IterationHandle = 0;
+    directory_entry CurrentEntry;
+  };
+
+} // end namespace detail
+
+/// directory_iterator - Iterates through the entries in path. There is no
+/// operator++ because we need an error_code. If it's really needed we can make
+/// it call report_fatal_error on error.
+class directory_iterator {
+  std::shared_ptr<detail::DirIterState> State;
+  bool FollowSymlinks = true;
+
+public:
+  explicit directory_iterator(const Twine &path, std::error_code &ec,
+                              bool follow_symlinks = true)
+      : FollowSymlinks(follow_symlinks) {
+    State = std::make_shared<detail::DirIterState>();
+    SmallString<128> path_storage;
+    ec = detail::directory_iterator_construct(
+        *State, path.toStringRef(path_storage), FollowSymlinks);
+  }
+
+  explicit directory_iterator(const directory_entry &de, std::error_code &ec,
+                              bool follow_symlinks = true)
+      : FollowSymlinks(follow_symlinks) {
+    State = std::make_shared<detail::DirIterState>();
+    ec =
+        detail::directory_iterator_construct(*State, de.path(), FollowSymlinks);
+  }
+
+  /// Construct end iterator.
+  directory_iterator() = default;
+
+  // No operator++ because we need error_code.
+  directory_iterator &increment(std::error_code &ec) {
+    ec = directory_iterator_increment(*State);
+    return *this;
+  }
+
+  const directory_entry &operator*() const { return State->CurrentEntry; }
+  const directory_entry *operator->() const { return &State->CurrentEntry; }
+
+  bool operator==(const directory_iterator &RHS) const {
+    if (State == RHS.State)
+      return true;
+    if (!RHS.State)
+      return State->CurrentEntry == directory_entry();
+    if (!State)
+      return RHS.State->CurrentEntry == directory_entry();
+    return State->CurrentEntry == RHS.State->CurrentEntry;
+  }
+
+  bool operator!=(const directory_iterator &RHS) const {
+    return !(*this == RHS);
+  }
+  // Other members as required by
+  // C++ Std, 24.1.1 Input iterators [input.iterators]
+};
+
+namespace detail {
+
+  /// Keeps state for the recursive_directory_iterator.
+  struct RecDirIterState {
+    std::stack<directory_iterator, std::vector<directory_iterator>> Stack;
+    uint16_t Level = 0;
+    bool HasNoPushRequest = false;
+  };
+
+} // end namespace detail
+
+/// recursive_directory_iterator - Same as directory_iterator except for it
+/// recurses down into child directories.
+class recursive_directory_iterator {
+  std::shared_ptr<detail::RecDirIterState> State;
+  bool Follow;
+
+public:
+  recursive_directory_iterator() = default;
+  explicit recursive_directory_iterator(const Twine &path, std::error_code &ec,
+                                        bool follow_symlinks = true)
+      : State(std::make_shared<detail::RecDirIterState>()),
+        Follow(follow_symlinks) {
+    State->Stack.push(directory_iterator(path, ec, Follow));
+    if (State->Stack.top() == directory_iterator())
+      State.reset();
+  }
+
+  // No operator++ because we need error_code.
+  recursive_directory_iterator &increment(std::error_code &ec) {
+    const directory_iterator end_itr = {};
+
+    if (State->HasNoPushRequest)
+      State->HasNoPushRequest = false;
+    else {
+      ErrorOr<basic_file_status> st = State->Stack.top()->status();
+      if (!st) return *this;
+      if (is_directory(*st)) {
+        State->Stack.push(directory_iterator(*State->Stack.top(), ec, Follow));
+        if (ec) return *this;
+        if (State->Stack.top() != end_itr) {
+          ++State->Level;
+          return *this;
+        }
+        State->Stack.pop();
+      }
+    }
+
+    while (!State->Stack.empty()
+           && State->Stack.top().increment(ec) == end_itr) {
+      State->Stack.pop();
+      --State->Level;
+    }
+
+    // Check if we are done. If so, create an end iterator.
+    if (State->Stack.empty())
+      State.reset();
+
+    return *this;
+  }
+
+  const directory_entry &operator*() const { return *State->Stack.top(); }
+  const directory_entry *operator->() const { return &*State->Stack.top(); }
+
+  // observers
+  /// Gets the current level. Starting path is at level 0.
+  int level() const { return State->Level; }
+
+  /// Returns true if no_push has been called for this directory_entry.
+  bool no_push_request() const { return State->HasNoPushRequest; }
+
+  // modifiers
+  /// Goes up one level if Level > 0.
+  void pop() {
+    assert(State && "Cannot pop an end iterator!");
+    assert(State->Level > 0 && "Cannot pop an iterator with level < 1");
+
+    const directory_iterator end_itr = {};
+    std::error_code ec;
+    do {
+      if (ec)
+        report_fatal_error("Error incrementing directory iterator.");
+      State->Stack.pop();
+      --State->Level;
+    } while (!State->Stack.empty()
+             && State->Stack.top().increment(ec) == end_itr);
+
+    // Check if we are done. If so, create an end iterator.
+    if (State->Stack.empty())
+      State.reset();
+  }
+
+  /// Does not go down into the current directory_entry.
+  void no_push() { State->HasNoPushRequest = true; }
+
+  bool operator==(const recursive_directory_iterator &RHS) const {
+    return State == RHS.State;
+  }
+
+  bool operator!=(const recursive_directory_iterator &RHS) const {
+    return !(*this == RHS);
+  }
+  // Other members as required by
+  // C++ Std, 24.1.1 Input iterators [input.iterators]
+};
+
+/// @}
+
+} // end namespace fs
+} // end namespace sys
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_FILESYSTEM_H
diff --git a/linux-x64/clang/include/llvm/Support/FileUtilities.h b/linux-x64/clang/include/llvm/Support/FileUtilities.h
new file mode 100644
index 0000000..2ee2c60
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FileUtilities.h
@@ -0,0 +1,78 @@
+//===- llvm/Support/FileUtilities.h - File System Utilities -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of utility functions which are useful for doing
+// various things with files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILEUTILITIES_H
+#define LLVM_SUPPORT_FILEUTILITIES_H
+
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+namespace llvm {
+
+  /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if
+  /// the files match, 1 if they are different, and 2 if there is a file error.
+  /// This function allows you to specify an absolute and relative FP error that
+  /// is allowed to exist.  If you specify a string to fill in for the error
+  /// option, it will set the string to an error message if an error occurs, or
+  /// if the files are different.
+  ///
+  int DiffFilesWithTolerance(StringRef FileA,
+                             StringRef FileB,
+                             double AbsTol, double RelTol,
+                             std::string *Error = nullptr);
+
+
+  /// FileRemover - This class is a simple object meant to be stack allocated.
+  /// If an exception is thrown from a region, the object removes the filename
+  /// specified (if deleteIt is true).
+  ///
+  class FileRemover {
+    SmallString<128> Filename;
+    bool DeleteIt;
+  public:
+    FileRemover() : DeleteIt(false) {}
+
+    explicit FileRemover(const Twine& filename, bool deleteIt = true)
+      : DeleteIt(deleteIt) {
+      filename.toVector(Filename);
+    }
+
+    ~FileRemover() {
+      if (DeleteIt) {
+        // Ignore problems deleting the file.
+        sys::fs::remove(Filename);
+      }
+    }
+
+    /// setFile - Give ownership of the file to the FileRemover so it will
+    /// be removed when the object is destroyed.  If the FileRemover already
+    /// had ownership of a file, remove it first.
+    void setFile(const Twine& filename, bool deleteIt = true) {
+      if (DeleteIt) {
+        // Ignore problems deleting the file.
+        sys::fs::remove(Filename);
+      }
+
+      Filename.clear();
+      filename.toVector(Filename);
+      DeleteIt = deleteIt;
+    }
+
+    /// releaseFile - Take ownership of the file away from the FileRemover so it
+    /// will not be removed when the object is destroyed.
+    void releaseFile() { DeleteIt = false; }
+  };
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Format.h b/linux-x64/clang/include/llvm/Support/Format.h
new file mode 100644
index 0000000..bcbd2be
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Format.h
@@ -0,0 +1,257 @@
+//===- Format.h - Efficient printf-style formatting for streams -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the format() function, which can be used with other
+// LLVM subsystems to provide printf-style formatting.  This gives all the power
+// and risk of printf.  This can be used like this (with raw_ostreams as an
+// example):
+//
+//    OS << "mynumber: " << format("%4.5f", 1234.412) << '\n';
+//
+// Or if you prefer:
+//
+//  OS << format("mynumber: %4.5f\n", 1234.412);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMAT_H
+#define LLVM_SUPPORT_FORMAT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstdio>
+#include <tuple>
+
+namespace llvm {
+
+/// This is a helper class used for handling formatted output.  It is the
+/// abstract base class of a templated derived class.
+class format_object_base {
+protected:
+  const char *Fmt;
+  ~format_object_base() = default; // Disallow polymorphic deletion.
+  format_object_base(const format_object_base &) = default;
+  virtual void home(); // Out of line virtual method.
+
+  /// Call snprintf() for this object, on the given buffer and size.
+  virtual int snprint(char *Buffer, unsigned BufferSize) const = 0;
+
+public:
+  format_object_base(const char *fmt) : Fmt(fmt) {}
+
+  /// Format the object into the specified buffer.  On success, this returns
+  /// the length of the formatted string.  If the buffer is too small, this
+  /// returns a length to retry with, which will be larger than BufferSize.
+  unsigned print(char *Buffer, unsigned BufferSize) const {
+    assert(BufferSize && "Invalid buffer size!");
+
+    // Print the string, leaving room for the terminating null.
+    int N = snprint(Buffer, BufferSize);
+
+    // VC++ and old GlibC return negative on overflow, just double the size.
+    if (N < 0)
+      return BufferSize * 2;
+
+    // Other implementations yield number of bytes needed, not including the
+    // final '\0'.
+    if (unsigned(N) >= BufferSize)
+      return N + 1;
+
+    // Otherwise N is the length of output (not including the final '\0').
+    return N;
+  }
+};
+
+/// These are templated helper classes used by the format function that
+/// capture the object to be formatted and the format string. When actually
+/// printed, this synthesizes the string into a temporary buffer provided and
+/// returns whether or not it is big enough.
+
+// Helper to validate that format() parameters are scalars or pointers.
+template <typename... Args> struct validate_format_parameters;
+template <typename Arg, typename... Args>
+struct validate_format_parameters<Arg, Args...> {
+  static_assert(std::is_scalar<Arg>::value,
+                "format can't be used with non fundamental / non pointer type");
+  validate_format_parameters() { validate_format_parameters<Args...>(); }
+};
+template <> struct validate_format_parameters<> {};
+
+template <typename... Ts>
+class format_object final : public format_object_base {
+  std::tuple<Ts...> Vals;
+
+  template <std::size_t... Is>
+  int snprint_tuple(char *Buffer, unsigned BufferSize,
+                    index_sequence<Is...>) const {
+#ifdef _MSC_VER
+    return _snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...);
+#else
+    return snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...);
+#endif
+  }
+
+public:
+  format_object(const char *fmt, const Ts &... vals)
+      : format_object_base(fmt), Vals(vals...) {
+    validate_format_parameters<Ts...>();
+  }
+
+  int snprint(char *Buffer, unsigned BufferSize) const override {
+    return snprint_tuple(Buffer, BufferSize, index_sequence_for<Ts...>());
+  }
+};
+
+/// These are helper functions used to produce formatted output.  They use
+/// template type deduction to construct the appropriate instance of the
+/// format_object class to simplify their construction.
+///
+/// This is typically used like:
+/// \code
+///   OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
+
+template <typename... Ts>
+inline format_object<Ts...> format(const char *Fmt, const Ts &... Vals) {
+  return format_object<Ts...>(Fmt, Vals...);
+}
+
+/// This is a helper class for left_justify, right_justify, and center_justify.
+class FormattedString {
+public:
+  enum Justification { JustifyNone, JustifyLeft, JustifyRight, JustifyCenter };
+  FormattedString(StringRef S, unsigned W, Justification J)
+      : Str(S), Width(W), Justify(J) {}
+
+private:
+  StringRef Str;
+  unsigned Width;
+  Justification Justify;
+  friend class raw_ostream;
+};
+
+/// left_justify - append spaces after string so total output is
+/// \p Width characters.  If \p Str is larger that \p Width, full string
+/// is written with no padding.
+inline FormattedString left_justify(StringRef Str, unsigned Width) {
+  return FormattedString(Str, Width, FormattedString::JustifyLeft);
+}
+
+/// right_justify - add spaces before string so total output is
+/// \p Width characters.  If \p Str is larger that \p Width, full string
+/// is written with no padding.
+inline FormattedString right_justify(StringRef Str, unsigned Width) {
+  return FormattedString(Str, Width, FormattedString::JustifyRight);
+}
+
+/// center_justify - add spaces before and after string so total output is
+/// \p Width characters.  If \p Str is larger that \p Width, full string
+/// is written with no padding.
+inline FormattedString center_justify(StringRef Str, unsigned Width) {
+  return FormattedString(Str, Width, FormattedString::JustifyCenter);
+}
+
+/// This is a helper class used for format_hex() and format_decimal().
+class FormattedNumber {
+  uint64_t HexValue;
+  int64_t DecValue;
+  unsigned Width;
+  bool Hex;
+  bool Upper;
+  bool HexPrefix;
+  friend class raw_ostream;
+
+public:
+  FormattedNumber(uint64_t HV, int64_t DV, unsigned W, bool H, bool U,
+                  bool Prefix)
+      : HexValue(HV), DecValue(DV), Width(W), Hex(H), Upper(U),
+        HexPrefix(Prefix) {}
+};
+
+/// format_hex - Output \p N as a fixed width hexadecimal. If number will not
+/// fit in width, full number is still printed.  Examples:
+///   OS << format_hex(255, 4)              => 0xff
+///   OS << format_hex(255, 4, true)        => 0xFF
+///   OS << format_hex(255, 6)              => 0x00ff
+///   OS << format_hex(255, 2)              => 0xff
+inline FormattedNumber format_hex(uint64_t N, unsigned Width,
+                                  bool Upper = false) {
+  assert(Width <= 18 && "hex width must be <= 18");
+  return FormattedNumber(N, 0, Width, true, Upper, true);
+}
+
+/// format_hex_no_prefix - Output \p N as a fixed width hexadecimal. Does not
+/// prepend '0x' to the outputted string.  If number will not fit in width,
+/// full number is still printed.  Examples:
+///   OS << format_hex_no_prefix(255, 2)              => ff
+///   OS << format_hex_no_prefix(255, 2, true)        => FF
+///   OS << format_hex_no_prefix(255, 4)              => 00ff
+///   OS << format_hex_no_prefix(255, 1)              => ff
+inline FormattedNumber format_hex_no_prefix(uint64_t N, unsigned Width,
+                                            bool Upper = false) {
+  assert(Width <= 16 && "hex width must be <= 16");
+  return FormattedNumber(N, 0, Width, true, Upper, false);
+}
+
+/// format_decimal - Output \p N as a right justified, fixed-width decimal. If
+/// number will not fit in width, full number is still printed.  Examples:
+///   OS << format_decimal(0, 5)     => "    0"
+///   OS << format_decimal(255, 5)   => "  255"
+///   OS << format_decimal(-1, 3)    => " -1"
+///   OS << format_decimal(12345, 3) => "12345"
+inline FormattedNumber format_decimal(int64_t N, unsigned Width) {
+  return FormattedNumber(0, N, Width, false, false, false);
+}
+
+class FormattedBytes {
+  ArrayRef<uint8_t> Bytes;
+
+  // If not None, display offsets for each line relative to starting value.
+  Optional<uint64_t> FirstByteOffset;
+  uint32_t IndentLevel;  // Number of characters to indent each line.
+  uint32_t NumPerLine;   // Number of bytes to show per line.
+  uint8_t ByteGroupSize; // How many hex bytes are grouped without spaces
+  bool Upper;            // Show offset and hex bytes as upper case.
+  bool ASCII;            // Show the ASCII bytes for the hex bytes to the right.
+  friend class raw_ostream;
+
+public:
+  FormattedBytes(ArrayRef<uint8_t> B, uint32_t IL, Optional<uint64_t> O,
+                 uint32_t NPL, uint8_t BGS, bool U, bool A)
+      : Bytes(B), FirstByteOffset(O), IndentLevel(IL), NumPerLine(NPL),
+        ByteGroupSize(BGS), Upper(U), ASCII(A) {
+
+    if (ByteGroupSize > NumPerLine)
+      ByteGroupSize = NumPerLine;
+  }
+};
+
+inline FormattedBytes
+format_bytes(ArrayRef<uint8_t> Bytes, Optional<uint64_t> FirstByteOffset = None,
+             uint32_t NumPerLine = 16, uint8_t ByteGroupSize = 4,
+             uint32_t IndentLevel = 0, bool Upper = false) {
+  return FormattedBytes(Bytes, IndentLevel, FirstByteOffset, NumPerLine,
+                        ByteGroupSize, Upper, false);
+}
+
+inline FormattedBytes
+format_bytes_with_ascii(ArrayRef<uint8_t> Bytes,
+                        Optional<uint64_t> FirstByteOffset = None,
+                        uint32_t NumPerLine = 16, uint8_t ByteGroupSize = 4,
+                        uint32_t IndentLevel = 0, bool Upper = false) {
+  return FormattedBytes(Bytes, IndentLevel, FirstByteOffset, NumPerLine,
+                        ByteGroupSize, Upper, true);
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/FormatAdapters.h b/linux-x64/clang/include/llvm/Support/FormatAdapters.h
new file mode 100644
index 0000000..197beb7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FormatAdapters.h
@@ -0,0 +1,93 @@
+//===- FormatAdapters.h - Formatters for common LLVM types -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATADAPTERS_H
+#define LLVM_SUPPORT_FORMATADAPTERS_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/FormatCommon.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+template <typename T> class FormatAdapter : public detail::format_adapter {
+protected:
+  explicit FormatAdapter(T &&Item) : Item(Item) {}
+
+  T Item;
+};
+
+namespace detail {
+template <typename T> class AlignAdapter final : public FormatAdapter<T> {
+  AlignStyle Where;
+  size_t Amount;
+  char Fill;
+
+public:
+  AlignAdapter(T &&Item, AlignStyle Where, size_t Amount, char Fill)
+      : FormatAdapter<T>(std::forward<T>(Item)), Where(Where), Amount(Amount),
+        Fill(Fill) {}
+
+  void format(llvm::raw_ostream &Stream, StringRef Style) {
+    auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
+    FmtAlign(Adapter, Where, Amount, Fill).format(Stream, Style);
+  }
+};
+
+template <typename T> class PadAdapter final : public FormatAdapter<T> {
+  size_t Left;
+  size_t Right;
+
+public:
+  PadAdapter(T &&Item, size_t Left, size_t Right)
+      : FormatAdapter<T>(std::forward<T>(Item)), Left(Left), Right(Right) {}
+
+  void format(llvm::raw_ostream &Stream, StringRef Style) {
+    auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
+    Stream.indent(Left);
+    Adapter.format(Stream, Style);
+    Stream.indent(Right);
+  }
+};
+
+template <typename T> class RepeatAdapter final : public FormatAdapter<T> {
+  size_t Count;
+
+public:
+  RepeatAdapter(T &&Item, size_t Count)
+      : FormatAdapter<T>(std::forward<T>(Item)), Count(Count) {}
+
+  void format(llvm::raw_ostream &Stream, StringRef Style) {
+    auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
+    for (size_t I = 0; I < Count; ++I) {
+      Adapter.format(Stream, Style);
+    }
+  }
+};
+}
+
+template <typename T>
+detail::AlignAdapter<T> fmt_align(T &&Item, AlignStyle Where, size_t Amount,
+                                  char Fill = ' ') {
+  return detail::AlignAdapter<T>(std::forward<T>(Item), Where, Amount, Fill);
+}
+
+template <typename T>
+detail::PadAdapter<T> fmt_pad(T &&Item, size_t Left, size_t Right) {
+  return detail::PadAdapter<T>(std::forward<T>(Item), Left, Right);
+}
+
+template <typename T>
+detail::RepeatAdapter<T> fmt_repeat(T &&Item, size_t Count) {
+  return detail::RepeatAdapter<T>(std::forward<T>(Item), Count);
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/FormatCommon.h b/linux-x64/clang/include/llvm/Support/FormatCommon.h
new file mode 100644
index 0000000..36fbad2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FormatCommon.h
@@ -0,0 +1,77 @@
+//===- FormatAdapters.h - Formatters for common LLVM types -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATCOMMON_H
+#define LLVM_SUPPORT_FORMATCOMMON_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+enum class AlignStyle { Left, Center, Right };
+
+struct FmtAlign {
+  detail::format_adapter &Adapter;
+  AlignStyle Where;
+  size_t Amount;
+  char Fill;
+
+  FmtAlign(detail::format_adapter &Adapter, AlignStyle Where, size_t Amount,
+           char Fill = ' ')
+      : Adapter(Adapter), Where(Where), Amount(Amount), Fill(Fill) {}
+
+  void format(raw_ostream &S, StringRef Options) {
+    // If we don't need to align, we can format straight into the underlying
+    // stream.  Otherwise we have to go through an intermediate stream first
+    // in order to calculate how long the output is so we can align it.
+    // TODO: Make the format method return the number of bytes written, that
+    // way we can also skip the intermediate stream for left-aligned output.
+    if (Amount == 0) {
+      Adapter.format(S, Options);
+      return;
+    }
+    SmallString<64> Item;
+    raw_svector_ostream Stream(Item);
+
+    Adapter.format(Stream, Options);
+    if (Amount <= Item.size()) {
+      S << Item;
+      return;
+    }
+
+    size_t PadAmount = Amount - Item.size();
+    switch (Where) {
+    case AlignStyle::Left:
+      S << Item;
+      fill(S, PadAmount);
+      break;
+    case AlignStyle::Center: {
+      size_t X = PadAmount / 2;
+      fill(S, X);
+      S << Item;
+      fill(S, PadAmount - X);
+      break;
+    }
+    default:
+      fill(S, PadAmount);
+      S << Item;
+      break;
+    }
+  }
+
+private:
+  void fill(llvm::raw_ostream &S, uint32_t Count) {
+    for (uint32_t I = 0; I < Count; ++I)
+      S << Fill;
+  }
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/FormatProviders.h b/linux-x64/clang/include/llvm/Support/FormatProviders.h
new file mode 100644
index 0000000..4e57034
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FormatProviders.h
@@ -0,0 +1,423 @@
+//===- FormatProviders.h - Formatters for common LLVM types -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements format providers for many common LLVM types, for example
+// allowing precision and width specifiers for scalar and string types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATPROVIDERS_H
+#define LLVM_SUPPORT_FORMATPROVIDERS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/NativeFormatting.h"
+
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+namespace detail {
+template <typename T>
+struct use_integral_formatter
+    : public std::integral_constant<
+          bool, is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+                          int64_t, uint64_t, int, unsigned, long, unsigned long,
+                          long long, unsigned long long>::value> {};
+
+template <typename T>
+struct use_char_formatter
+    : public std::integral_constant<bool, std::is_same<T, char>::value> {};
+
+template <typename T>
+struct is_cstring
+    : public std::integral_constant<bool,
+                                    is_one_of<T, char *, const char *>::value> {
+};
+
+template <typename T>
+struct use_string_formatter
+    : public std::integral_constant<bool,
+                                    std::is_convertible<T, llvm::StringRef>::value> {};
+
+template <typename T>
+struct use_pointer_formatter
+    : public std::integral_constant<bool, std::is_pointer<T>::value &&
+                                              !is_cstring<T>::value> {};
+
+template <typename T>
+struct use_double_formatter
+    : public std::integral_constant<bool, std::is_floating_point<T>::value> {};
+
+class HelperFunctions {
+protected:
+  static Optional<size_t> parseNumericPrecision(StringRef Str) {
+    size_t Prec;
+    Optional<size_t> Result;
+    if (Str.empty())
+      Result = None;
+    else if (Str.getAsInteger(10, Prec)) {
+      assert(false && "Invalid precision specifier");
+      Result = None;
+    } else {
+      assert(Prec < 100 && "Precision out of range");
+      Result = std::min<size_t>(99u, Prec);
+    }
+    return Result;
+  }
+
+  static bool consumeHexStyle(StringRef &Str, HexPrintStyle &Style) {
+    if (!Str.startswith_lower("x"))
+      return false;
+
+    if (Str.consume_front("x-"))
+      Style = HexPrintStyle::Lower;
+    else if (Str.consume_front("X-"))
+      Style = HexPrintStyle::Upper;
+    else if (Str.consume_front("x+") || Str.consume_front("x"))
+      Style = HexPrintStyle::PrefixLower;
+    else if (Str.consume_front("X+") || Str.consume_front("X"))
+      Style = HexPrintStyle::PrefixUpper;
+    return true;
+  }
+
+  static size_t consumeNumHexDigits(StringRef &Str, HexPrintStyle Style,
+                                    size_t Default) {
+    Str.consumeInteger(10, Default);
+    if (isPrefixedHexStyle(Style))
+      Default += 2;
+    return Default;
+  }
+};
+}
+
+/// Implementation of format_provider<T> for integral arithmetic types.
+///
+/// The options string of an integral type has the grammar:
+///
+///   integer_options   :: [style][digits]
+///   style             :: <see table below>
+///   digits            :: <non-negative integer> 0-99
+///
+///   ==========================================================================
+///   |  style  |     Meaning          |      Example     | Digits Meaning     |
+///   --------------------------------------------------------------------------
+///   |         |                      |  Input |  Output |                    |
+///   ==========================================================================
+///   |   x-    | Hex no prefix, lower |   42   |    2a   | Minimum # digits   |
+///   |   X-    | Hex no prefix, upper |   42   |    2A   | Minimum # digits   |
+///   | x+ / x  | Hex + prefix, lower  |   42   |   0x2a  | Minimum # digits   |
+///   | X+ / X  | Hex + prefix, upper  |   42   |   0x2A  | Minimum # digits   |
+///   | N / n   | Digit grouped number | 123456 | 123,456 | Ignored            |
+///   | D / d   | Integer              | 100000 | 100000  | Ignored            |
+///   | (empty) | Same as D / d        |        |         |                    |
+///   ==========================================================================
+///
+
+template <typename T>
+struct format_provider<
+    T, typename std::enable_if<detail::use_integral_formatter<T>::value>::type>
+    : public detail::HelperFunctions {
+private:
+public:
+  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+    HexPrintStyle HS;
+    size_t Digits = 0;
+    if (consumeHexStyle(Style, HS)) {
+      Digits = consumeNumHexDigits(Style, HS, 0);
+      write_hex(Stream, V, HS, Digits);
+      return;
+    }
+
+    IntegerStyle IS = IntegerStyle::Integer;
+    if (Style.consume_front("N") || Style.consume_front("n"))
+      IS = IntegerStyle::Number;
+    else if (Style.consume_front("D") || Style.consume_front("d"))
+      IS = IntegerStyle::Integer;
+
+    Style.consumeInteger(10, Digits);
+    assert(Style.empty() && "Invalid integral format style!");
+    write_integer(Stream, V, Digits, IS);
+  }
+};
+
+/// Implementation of format_provider<T> for integral pointer types.
+///
+/// The options string of a pointer type has the grammar:
+///
+///   pointer_options   :: [style][precision]
+///   style             :: <see table below>
+///   digits            :: <non-negative integer> 0-sizeof(void*)
+///
+///   ==========================================================================
+///   |   S     |     Meaning          |                Example                |
+///   --------------------------------------------------------------------------
+///   |         |                      |       Input       |      Output       |
+///   ==========================================================================
+///   |   x-    | Hex no prefix, lower |    0xDEADBEEF     |     deadbeef      |
+///   |   X-    | Hex no prefix, upper |    0xDEADBEEF     |     DEADBEEF      |
+///   | x+ / x  | Hex + prefix, lower  |    0xDEADBEEF     |    0xdeadbeef     |
+///   | X+ / X  | Hex + prefix, upper  |    0xDEADBEEF     |    0xDEADBEEF     |
+///   | (empty) | Same as X+ / X       |                   |                   |
+///   ==========================================================================
+///
+/// The default precision is the number of nibbles in a machine word, and in all
+/// cases indicates the minimum number of nibbles to print.
+template <typename T>
+struct format_provider<
+    T, typename std::enable_if<detail::use_pointer_formatter<T>::value>::type>
+    : public detail::HelperFunctions {
+private:
+public:
+  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+    HexPrintStyle HS = HexPrintStyle::PrefixUpper;
+    consumeHexStyle(Style, HS);
+    size_t Digits = consumeNumHexDigits(Style, HS, sizeof(void *) * 2);
+    write_hex(Stream, reinterpret_cast<std::uintptr_t>(V), HS, Digits);
+  }
+};
+
+/// Implementation of format_provider<T> for c-style strings and string
+/// objects such as std::string and llvm::StringRef.
+///
+/// The options string of a string type has the grammar:
+///
+///   string_options :: [length]
+///
+/// where `length` is an optional integer specifying the maximum number of
+/// characters in the string to print.  If `length` is omitted, the string is
+/// printed up to the null terminator.
+
+template <typename T>
+struct format_provider<
+    T, typename std::enable_if<detail::use_string_formatter<T>::value>::type> {
+  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+    size_t N = StringRef::npos;
+    if (!Style.empty() && Style.getAsInteger(10, N)) {
+      assert(false && "Style is not a valid integer");
+    }
+    llvm::StringRef S = V;
+    Stream << S.substr(0, N);
+  }
+};
+
+/// Implementation of format_provider<T> for llvm::Twine.
+///
+/// This follows the same rules as the string formatter.
+
+template <> struct format_provider<Twine> {
+  static void format(const Twine &V, llvm::raw_ostream &Stream,
+                     StringRef Style) {
+    format_provider<std::string>::format(V.str(), Stream, Style);
+  }
+};
+
+/// Implementation of format_provider<T> for characters.
+///
+/// The options string of a character type has the grammar:
+///
+///   char_options :: (empty) | [integer_options]
+///
+/// If `char_options` is empty, the character is displayed as an ASCII
+/// character.  Otherwise, it is treated as an integer options string.
+///
+template <typename T>
+struct format_provider<
+    T, typename std::enable_if<detail::use_char_formatter<T>::value>::type> {
+  static void format(const char &V, llvm::raw_ostream &Stream,
+                     StringRef Style) {
+    if (Style.empty())
+      Stream << V;
+    else {
+      int X = static_cast<int>(V);
+      format_provider<int>::format(X, Stream, Style);
+    }
+  }
+};
+
+/// Implementation of format_provider<T> for type `bool`
+///
+/// The options string of a boolean type has the grammar:
+///
+///   bool_options :: "" | "Y" | "y" | "D" | "d" | "T" | "t"
+///
+///   ==================================
+///   |    C    |     Meaning          |
+///   ==================================
+///   |    Y    |       YES / NO       |
+///   |    y    |       yes / no       |
+///   |  D / d  |    Integer 0 or 1    |
+///   |    T    |     TRUE / FALSE     |
+///   |    t    |     true / false     |
+///   | (empty) |   Equivalent to 't'  |
+///   ==================================
+template <> struct format_provider<bool> {
+  static void format(const bool &B, llvm::raw_ostream &Stream,
+                     StringRef Style) {
+    Stream << StringSwitch<const char *>(Style)
+                  .Case("Y", B ? "YES" : "NO")
+                  .Case("y", B ? "yes" : "no")
+                  .CaseLower("D", B ? "1" : "0")
+                  .Case("T", B ? "TRUE" : "FALSE")
+                  .Cases("t", "", B ? "true" : "false")
+                  .Default(B ? "1" : "0");
+  }
+};
+
+/// Implementation of format_provider<T> for floating point types.
+///
+/// The options string of a floating point type has the format:
+///
+///   float_options   :: [style][precision]
+///   style           :: <see table below>
+///   precision       :: <non-negative integer> 0-99
+///
+///   =====================================================
+///   |  style  |     Meaning          |      Example     |
+///   -----------------------------------------------------
+///   |         |                      |  Input |  Output |
+///   =====================================================
+///   | P / p   | Percentage           |  0.05  |  5.00%  |
+///   | F / f   | Fixed point          |   1.0  |  1.00   |
+///   |   E     | Exponential with E   | 100000 | 1.0E+05 |
+///   |   e     | Exponential with e   | 100000 | 1.0e+05 |
+///   | (empty) | Same as F / f        |        |         |
+///   =====================================================
+///
+/// The default precision is 6 for exponential (E / e) and 2 for everything
+/// else.
+
+template <typename T>
+struct format_provider<
+    T, typename std::enable_if<detail::use_double_formatter<T>::value>::type>
+    : public detail::HelperFunctions {
+  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+    FloatStyle S;
+    if (Style.consume_front("P") || Style.consume_front("p"))
+      S = FloatStyle::Percent;
+    else if (Style.consume_front("F") || Style.consume_front("f"))
+      S = FloatStyle::Fixed;
+    else if (Style.consume_front("E"))
+      S = FloatStyle::ExponentUpper;
+    else if (Style.consume_front("e"))
+      S = FloatStyle::Exponent;
+    else
+      S = FloatStyle::Fixed;
+
+    Optional<size_t> Precision = parseNumericPrecision(Style);
+    if (!Precision.hasValue())
+      Precision = getDefaultPrecision(S);
+
+    write_double(Stream, static_cast<double>(V), S, Precision);
+  }
+};
+
+namespace detail {
+template <typename IterT>
+using IterValue = typename std::iterator_traits<IterT>::value_type;
+
+template <typename IterT>
+struct range_item_has_provider
+    : public std::integral_constant<
+          bool, !uses_missing_provider<IterValue<IterT>>::value> {};
+}
+
+/// Implementation of format_provider<T> for ranges.
+///
+/// This will print an arbitrary range as a delimited sequence of items.
+///
+/// The options string of a range type has the grammar:
+///
+///   range_style       ::= [separator] [element_style]
+///   separator         ::= "$" delimeted_expr
+///   element_style     ::= "@" delimeted_expr
+///   delimeted_expr    ::= "[" expr "]" | "(" expr ")" | "<" expr ">"
+///   expr              ::= <any string not containing delimeter>
+///
+/// where the separator expression is the string to insert between consecutive
+/// items in the range and the argument expression is the Style specification to
+/// be used when formatting the underlying type.  The default separator if
+/// unspecified is ' ' (space).  The syntax of the argument expression follows
+/// whatever grammar is dictated by the format provider or format adapter used
+/// to format the value type.
+///
+/// Note that attempting to format an `iterator_range<T>` where no format
+/// provider can be found for T will result in a compile error.
+///
+
+template <typename IterT> class format_provider<llvm::iterator_range<IterT>> {
+  using value = typename std::iterator_traits<IterT>::value_type;
+  using reference = typename std::iterator_traits<IterT>::reference;
+
+  static StringRef consumeOneOption(StringRef &Style, char Indicator,
+                                    StringRef Default) {
+    if (Style.empty())
+      return Default;
+    if (Style.front() != Indicator)
+      return Default;
+    Style = Style.drop_front();
+    if (Style.empty()) {
+      assert(false && "Invalid range style");
+      return Default;
+    }
+
+    for (const char *D : {"[]", "<>", "()"}) {
+      if (Style.front() != D[0])
+        continue;
+      size_t End = Style.find_first_of(D[1]);
+      if (End == StringRef::npos) {
+        assert(false && "Missing range option end delimeter!");
+        return Default;
+      }
+      StringRef Result = Style.slice(1, End);
+      Style = Style.drop_front(End + 1);
+      return Result;
+    }
+    assert(false && "Invalid range style!");
+    return Default;
+  }
+
+  static std::pair<StringRef, StringRef> parseOptions(StringRef Style) {
+    StringRef Sep = consumeOneOption(Style, '$', ", ");
+    StringRef Args = consumeOneOption(Style, '@', "");
+    assert(Style.empty() && "Unexpected text in range option string!");
+    return std::make_pair(Sep, Args);
+  }
+
+public:
+  static_assert(detail::range_item_has_provider<IterT>::value,
+                "Range value_type does not have a format provider!");
+  static void format(const llvm::iterator_range<IterT> &V,
+                     llvm::raw_ostream &Stream, StringRef Style) {
+    StringRef Sep;
+    StringRef ArgStyle;
+    std::tie(Sep, ArgStyle) = parseOptions(Style);
+    auto Begin = V.begin();
+    auto End = V.end();
+    if (Begin != End) {
+      auto Adapter =
+          detail::build_format_adapter(std::forward<reference>(*Begin));
+      Adapter.format(Stream, ArgStyle);
+      ++Begin;
+    }
+    while (Begin != End) {
+      Stream << Sep;
+      auto Adapter =
+          detail::build_format_adapter(std::forward<reference>(*Begin));
+      Adapter.format(Stream, ArgStyle);
+      ++Begin;
+    }
+  }
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/FormatVariadic.h b/linux-x64/clang/include/llvm/Support/FormatVariadic.h
new file mode 100644
index 0000000..8c08a7d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FormatVariadic.h
@@ -0,0 +1,270 @@
+//===- FormatVariadic.h - Efficient type-safe string formatting --*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the formatv() function which can be used with other LLVM
+// subsystems to provide printf-like formatting, but with improved safety and
+// flexibility.  The result of `formatv` is an object which can be streamed to
+// a raw_ostream or converted to a std::string or llvm::SmallString.
+//
+//   // Convert to std::string.
+//   std::string S = formatv("{0} {1}", 1234.412, "test").str();
+//
+//   // Convert to llvm::SmallString
+//   SmallString<8> S = formatv("{0} {1}", 1234.412, "test").sstr<8>();
+//
+//   // Stream to an existing raw_ostream.
+//   OS << formatv("{0} {1}", 1234.412, "test");
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATVARIADIC_H
+#define LLVM_SUPPORT_FORMATVARIADIC_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/FormatCommon.h"
+#include "llvm/Support/FormatProviders.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+enum class ReplacementType { Empty, Format, Literal };
+
+struct ReplacementItem {
+  ReplacementItem() = default;
+  explicit ReplacementItem(StringRef Literal)
+      : Type(ReplacementType::Literal), Spec(Literal) {}
+  ReplacementItem(StringRef Spec, size_t Index, size_t Align, AlignStyle Where,
+                  char Pad, StringRef Options)
+      : Type(ReplacementType::Format), Spec(Spec), Index(Index), Align(Align),
+        Where(Where), Pad(Pad), Options(Options) {}
+
+  ReplacementType Type = ReplacementType::Empty;
+  StringRef Spec;
+  size_t Index = 0;
+  size_t Align = 0;
+  AlignStyle Where = AlignStyle::Right;
+  char Pad;
+  StringRef Options;
+};
+
+class formatv_object_base {
+protected:
+  // The parameters are stored in a std::tuple, which does not provide runtime
+  // indexing capabilities.  In order to enable runtime indexing, we use this
+  // structure to put the parameters into a std::vector.  Since the parameters
+  // are not all the same type, we use some type-erasure by wrapping the
+  // parameters in a template class that derives from a non-template superclass.
+  // Essentially, we are converting a std::tuple<Derived<Ts...>> to a
+  // std::vector<Base*>.
+  struct create_adapters {
+    template <typename... Ts>
+    std::vector<detail::format_adapter *> operator()(Ts &... Items) {
+      return std::vector<detail::format_adapter *>{&Items...};
+    }
+  };
+
+  StringRef Fmt;
+  std::vector<detail::format_adapter *> Adapters;
+  std::vector<ReplacementItem> Replacements;
+
+  static bool consumeFieldLayout(StringRef &Spec, AlignStyle &Where,
+                                 size_t &Align, char &Pad);
+
+  static std::pair<ReplacementItem, StringRef>
+  splitLiteralAndReplacement(StringRef Fmt);
+
+public:
+  formatv_object_base(StringRef Fmt, std::size_t ParamCount)
+      : Fmt(Fmt), Replacements(parseFormatString(Fmt)) {
+    Adapters.reserve(ParamCount);
+  }
+
+  formatv_object_base(formatv_object_base const &rhs) = delete;
+
+  formatv_object_base(formatv_object_base &&rhs)
+      : Fmt(std::move(rhs.Fmt)),
+        Adapters(), // Adapters are initialized by formatv_object
+        Replacements(std::move(rhs.Replacements)) {
+    Adapters.reserve(rhs.Adapters.size());
+  };
+
+  void format(raw_ostream &S) const {
+    for (auto &R : Replacements) {
+      if (R.Type == ReplacementType::Empty)
+        continue;
+      if (R.Type == ReplacementType::Literal) {
+        S << R.Spec;
+        continue;
+      }
+      if (R.Index >= Adapters.size()) {
+        S << R.Spec;
+        continue;
+      }
+
+      auto W = Adapters[R.Index];
+
+      FmtAlign Align(*W, R.Where, R.Align);
+      Align.format(S, R.Options);
+    }
+  }
+  static std::vector<ReplacementItem> parseFormatString(StringRef Fmt);
+
+  static Optional<ReplacementItem> parseReplacementItem(StringRef Spec);
+
+  std::string str() const {
+    std::string Result;
+    raw_string_ostream Stream(Result);
+    Stream << *this;
+    Stream.flush();
+    return Result;
+  }
+
+  template <unsigned N> SmallString<N> sstr() const {
+    SmallString<N> Result;
+    raw_svector_ostream Stream(Result);
+    Stream << *this;
+    return Result;
+  }
+
+  template <unsigned N> operator SmallString<N>() const { return sstr<N>(); }
+
+  operator std::string() const { return str(); }
+};
+
+template <typename Tuple> class formatv_object : public formatv_object_base {
+  // Storage for the parameter adapters.  Since the base class erases the type
+  // of the parameters, we have to own the storage for the parameters here, and
+  // have the base class store type-erased pointers into this tuple.
+  Tuple Parameters;
+
+public:
+  formatv_object(StringRef Fmt, Tuple &&Params)
+      : formatv_object_base(Fmt, std::tuple_size<Tuple>::value),
+        Parameters(std::move(Params)) {
+    Adapters = apply_tuple(create_adapters(), Parameters);
+  }
+
+  formatv_object(formatv_object const &rhs) = delete;
+
+  formatv_object(formatv_object &&rhs)
+      : formatv_object_base(std::move(rhs)),
+        Parameters(std::move(rhs.Parameters)) {
+    Adapters = apply_tuple(create_adapters(), Parameters);
+  }
+};
+
+// \brief Format text given a format string and replacement parameters.
+//
+// ===General Description===
+//
+// Formats textual output.  `Fmt` is a string consisting of one or more
+// replacement sequences with the following grammar:
+//
+// rep_field ::= "{" [index] ["," layout] [":" format] "}"
+// index     ::= <non-negative integer>
+// layout    ::= [[[char]loc]width]
+// format    ::= <any string not containing "{" or "}">
+// char      ::= <any character except "{" or "}">
+// loc       ::= "-" | "=" | "+"
+// width     ::= <positive integer>
+//
+// index   - A non-negative integer specifying the index of the item in the
+//           parameter pack to print.  Any other value is invalid.
+// layout  - A string controlling how the field is laid out within the available
+//           space.
+// format  - A type-dependent string used to provide additional options to
+//           the formatting operation.  Refer to the documentation of the
+//           various individual format providers for per-type options.
+// char    - The padding character.  Defaults to ' ' (space).  Only valid if
+//           `loc` is also specified.
+// loc     - Where to print the formatted text within the field.  Only valid if
+//           `width` is also specified.
+//           '-' : The field is left aligned within the available space.
+//           '=' : The field is centered within the available space.
+//           '+' : The field is right aligned within the available space (this
+//                 is the default).
+// width   - The width of the field within which to print the formatted text.
+//           If this is less than the required length then the `char` and `loc`
+//           fields are ignored, and the field is printed with no leading or
+//           trailing padding.  If this is greater than the required length,
+//           then the text is output according to the value of `loc`, and padded
+//           as appropriate on the left and/or right by `char`.
+//
+// ===Special Characters===
+//
+// The characters '{' and '}' are reserved and cannot appear anywhere within a
+// replacement sequence.  Outside of a replacement sequence, in order to print
+// a literal '{' or '}' it must be doubled -- "{{" to print a literal '{' and
+// "}}" to print a literal '}'.
+//
+// ===Parameter Indexing===
+// `index` specifies the index of the parameter in the parameter pack to format
+// into the output.  Note that it is possible to refer to the same parameter
+// index multiple times in a given format string.  This makes it possible to
+// output the same value multiple times without passing it multiple times to the
+// function. For example:
+//
+//   formatv("{0} {1} {0}", "a", "bb")
+//
+// would yield the string "abba".  This can be convenient when it is expensive
+// to compute the value of the parameter, and you would otherwise have had to
+// save it to a temporary.
+//
+// ===Formatter Search===
+//
+// For a given parameter of type T, the following steps are executed in order
+// until a match is found:
+//
+//   1. If the parameter is of class type, and inherits from format_adapter,
+//      Then format() is invoked on it to produce the formatted output.  The
+//      implementation should write the formatted text into `Stream`.
+//   2. If there is a suitable template specialization of format_provider<>
+//      for type T containing a method whose signature is:
+//      void format(const T &Obj, raw_ostream &Stream, StringRef Options)
+//      Then this method is invoked as described in Step 1.
+//
+// If a match cannot be found through either of the above methods, a compiler
+// error is generated.
+//
+// ===Invalid Format String Handling===
+//
+// In the case of a format string which does not match the grammar described
+// above, the output is undefined.  With asserts enabled, LLVM will trigger an
+// assertion.  Otherwise, it will try to do something reasonable, but in general
+// the details of what that is are undefined.
+//
+template <typename... Ts>
+inline auto formatv(const char *Fmt, Ts &&... Vals) -> formatv_object<decltype(
+    std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...))> {
+  using ParamTuple = decltype(
+      std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...));
+  return formatv_object<ParamTuple>(
+      Fmt,
+      std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...));
+}
+
+// Allow a formatv_object to be formatted (no options supported).
+template <typename T> struct format_provider<formatv_object<T>> {
+  static void format(const formatv_object<T> &V, raw_ostream &OS, StringRef) {
+    OS << V;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_FORMATVARIADIC_H
diff --git a/linux-x64/clang/include/llvm/Support/FormatVariadicDetails.h b/linux-x64/clang/include/llvm/Support/FormatVariadicDetails.h
new file mode 100644
index 0000000..9b60462
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FormatVariadicDetails.h
@@ -0,0 +1,112 @@
+//===- FormatVariadicDetails.h - Helpers for FormatVariadic.h ----*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATVARIADIC_DETAILS_H
+#define LLVM_SUPPORT_FORMATVARIADIC_DETAILS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <type_traits>
+
+namespace llvm {
+template <typename T, typename Enable = void> struct format_provider {};
+
+namespace detail {
+class format_adapter {
+protected:
+  virtual ~format_adapter() {}
+
+public:
+  virtual void format(raw_ostream &S, StringRef Options) = 0;
+};
+
+template <typename T> class provider_format_adapter : public format_adapter {
+  T Item;
+
+public:
+  explicit provider_format_adapter(T &&Item) : Item(std::forward<T>(Item)) {}
+
+  void format(llvm::raw_ostream &S, StringRef Options) override {
+    format_provider<typename std::decay<T>::type>::format(Item, S, Options);
+  }
+};
+
+template <typename T> class missing_format_adapter;
+
+// Test if format_provider<T> is defined on T and contains a member function
+// with the signature:
+//   static void format(const T&, raw_stream &, StringRef);
+//
+template <class T> class has_FormatProvider {
+public:
+  using Decayed = typename std::decay<T>::type;
+  typedef void (*Signature_format)(const Decayed &, llvm::raw_ostream &,
+                                   StringRef);
+
+  template <typename U>
+  static char test(SameType<Signature_format, &U::format> *);
+
+  template <typename U> static double test(...);
+
+  static bool const value =
+      (sizeof(test<llvm::format_provider<Decayed>>(nullptr)) == 1);
+};
+
+// Simple template that decides whether a type T should use the member-function
+// based format() invocation.
+template <typename T>
+struct uses_format_member
+    : public std::integral_constant<
+          bool,
+          std::is_base_of<format_adapter,
+                          typename std::remove_reference<T>::type>::value> {};
+
+// Simple template that decides whether a type T should use the format_provider
+// based format() invocation.  The member function takes priority, so this test
+// will only be true if there is not ALSO a format member.
+template <typename T>
+struct uses_format_provider
+    : public std::integral_constant<
+          bool, !uses_format_member<T>::value && has_FormatProvider<T>::value> {
+};
+
+// Simple template that decides whether a type T has neither a member-function
+// nor format_provider based implementation that it can use.  Mostly used so
+// that the compiler spits out a nice diagnostic when a type with no format
+// implementation can be located.
+template <typename T>
+struct uses_missing_provider
+    : public std::integral_constant<bool,
+                                    !uses_format_member<T>::value &&
+                                        !uses_format_provider<T>::value> {};
+
+template <typename T>
+typename std::enable_if<uses_format_member<T>::value, T>::type
+build_format_adapter(T &&Item) {
+  return std::forward<T>(Item);
+}
+
+template <typename T>
+typename std::enable_if<uses_format_provider<T>::value,
+                        provider_format_adapter<T>>::type
+build_format_adapter(T &&Item) {
+  return provider_format_adapter<T>(std::forward<T>(Item));
+}
+
+template <typename T>
+typename std::enable_if<uses_missing_provider<T>::value,
+                        missing_format_adapter<T>>::type
+build_format_adapter(T &&Item) {
+  return missing_format_adapter<T>();
+}
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/FormattedStream.h b/linux-x64/clang/include/llvm/Support/FormattedStream.h
new file mode 100644
index 0000000..4a135cd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/FormattedStream.h
@@ -0,0 +1,162 @@
+//===-- llvm/Support/FormattedStream.h - Formatted streams ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains raw_ostream implementations for streams to do
+// things like pretty-print comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATTEDSTREAM_H
+#define LLVM_SUPPORT_FORMATTEDSTREAM_H
+
+#include "llvm/Support/raw_ostream.h"
+#include <utility>
+
+namespace llvm {
+
+/// formatted_raw_ostream - A raw_ostream that wraps another one and keeps track
+/// of line and column position, allowing padding out to specific column
+/// boundaries and querying the number of lines written to the stream.
+///
+class formatted_raw_ostream : public raw_ostream {
+  /// TheStream - The real stream we output to. We set it to be
+  /// unbuffered, since we're already doing our own buffering.
+  ///
+  raw_ostream *TheStream;
+
+  /// Position - The current output column and line of the data that's
+  /// been flushed and the portion of the buffer that's been
+  /// scanned.  The line and column scheme is zero-based.
+  ///
+  std::pair<unsigned, unsigned> Position;
+
+  /// Scanned - This points to one past the last character in the
+  /// buffer we've scanned.
+  ///
+  const char *Scanned;
+
+  void write_impl(const char *Ptr, size_t Size) override;
+
+  /// current_pos - Return the current position within the stream,
+  /// not counting the bytes currently in the buffer.
+  uint64_t current_pos() const override {
+    // Our current position in the stream is all the contents which have been
+    // written to the underlying stream (*not* the current position of the
+    // underlying stream).
+    return TheStream->tell();
+  }
+
+  /// ComputePosition - Examine the given output buffer and figure out the new
+  /// position after output.
+  ///
+  void ComputePosition(const char *Ptr, size_t size);
+
+  void setStream(raw_ostream &Stream) {
+    releaseStream();
+
+    TheStream = &Stream;
+
+    // This formatted_raw_ostream inherits from raw_ostream, so it'll do its
+    // own buffering, and it doesn't need or want TheStream to do another
+    // layer of buffering underneath. Resize the buffer to what TheStream
+    // had been using, and tell TheStream not to do its own buffering.
+    if (size_t BufferSize = TheStream->GetBufferSize())
+      SetBufferSize(BufferSize);
+    else
+      SetUnbuffered();
+    TheStream->SetUnbuffered();
+
+    Scanned = nullptr;
+  }
+
+public:
+  /// formatted_raw_ostream - Open the specified file for
+  /// writing. If an error occurs, information about the error is
+  /// put into ErrorInfo, and the stream should be immediately
+  /// destroyed; the string will be empty if no error occurred.
+  ///
+  /// As a side effect, the given Stream is set to be Unbuffered.
+  /// This is because formatted_raw_ostream does its own buffering,
+  /// so it doesn't want another layer of buffering to be happening
+  /// underneath it.
+  ///
+  formatted_raw_ostream(raw_ostream &Stream)
+      : TheStream(nullptr), Position(0, 0) {
+    setStream(Stream);
+  }
+  explicit formatted_raw_ostream() : TheStream(nullptr), Position(0, 0) {
+    Scanned = nullptr;
+  }
+
+  ~formatted_raw_ostream() override {
+    flush();
+    releaseStream();
+  }
+
+  /// PadToColumn - Align the output to some column number.  If the current
+  /// column is already equal to or more than NewCol, PadToColumn inserts one
+  /// space.
+  ///
+  /// \param NewCol - The column to move to.
+  formatted_raw_ostream &PadToColumn(unsigned NewCol);
+
+  /// getColumn - Return the column number
+  unsigned getColumn() { return Position.first; }
+
+  /// getLine - Return the line number
+  unsigned getLine() { return Position.second; }
+
+  raw_ostream &resetColor() override {
+    TheStream->resetColor();
+    return *this;
+  }
+
+  raw_ostream &reverseColor() override {
+    TheStream->reverseColor();
+    return *this;
+  }
+
+  raw_ostream &changeColor(enum Colors Color, bool Bold, bool BG) override {
+    TheStream->changeColor(Color, Bold, BG);
+    return *this;
+  }
+
+  bool is_displayed() const override {
+    return TheStream->is_displayed();
+  }
+
+private:
+  void releaseStream() {
+    // Transfer the buffer settings from this raw_ostream back to the underlying
+    // stream.
+    if (!TheStream)
+      return;
+    if (size_t BufferSize = GetBufferSize())
+      TheStream->SetBufferSize(BufferSize);
+    else
+      TheStream->SetUnbuffered();
+  }
+};
+
+/// fouts() - This returns a reference to a formatted_raw_ostream for
+/// standard output.  Use it like: fouts() << "foo" << "bar";
+formatted_raw_ostream &fouts();
+
+/// ferrs() - This returns a reference to a formatted_raw_ostream for
+/// standard error.  Use it like: ferrs() << "foo" << "bar";
+formatted_raw_ostream &ferrs();
+
+/// fdbgs() - This returns a reference to a formatted_raw_ostream for
+/// debug output.  Use it like: fdbgs() << "foo" << "bar";
+formatted_raw_ostream &fdbgs();
+
+} // end llvm namespace
+
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/GenericDomTree.h b/linux-x64/clang/include/llvm/Support/GenericDomTree.h
new file mode 100644
index 0000000..bcaac6b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/GenericDomTree.h
@@ -0,0 +1,907 @@
+//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a set of templates that efficiently compute a dominator
+/// tree over a generic graph. This is used typically in LLVM for fast
+/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
+/// graph types.
+///
+/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
+/// on the graph's NodeRef. The NodeRef should be a pointer and,
+/// NodeRef->getParent() must return the parent node that is also a pointer.
+///
+/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
+#define LLVM_SUPPORT_GENERICDOMTREE_H
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+#include <vector>
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+template <typename NodeT, bool IsPostDom>
+class DominatorTreeBase;
+
+namespace DomTreeBuilder {
+template <typename DomTreeT>
+struct SemiNCAInfo;
+}  // namespace DomTreeBuilder
+
+/// \brief Base class for the actual dominator tree node.
+template <class NodeT> class DomTreeNodeBase {
+  friend class PostDominatorTree;
+  friend class DominatorTreeBase<NodeT, false>;
+  friend class DominatorTreeBase<NodeT, true>;
+  friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
+  friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
+
+  NodeT *TheBB;
+  DomTreeNodeBase *IDom;
+  unsigned Level;
+  std::vector<DomTreeNodeBase *> Children;
+  mutable unsigned DFSNumIn = ~0;
+  mutable unsigned DFSNumOut = ~0;
+
+ public:
+  DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
+      : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}
+
+  using iterator = typename std::vector<DomTreeNodeBase *>::iterator;
+  using const_iterator =
+      typename std::vector<DomTreeNodeBase *>::const_iterator;
+
+  iterator begin() { return Children.begin(); }
+  iterator end() { return Children.end(); }
+  const_iterator begin() const { return Children.begin(); }
+  const_iterator end() const { return Children.end(); }
+
+  NodeT *getBlock() const { return TheBB; }
+  DomTreeNodeBase *getIDom() const { return IDom; }
+  unsigned getLevel() const { return Level; }
+
+  const std::vector<DomTreeNodeBase *> &getChildren() const { return Children; }
+
+  std::unique_ptr<DomTreeNodeBase> addChild(
+      std::unique_ptr<DomTreeNodeBase> C) {
+    Children.push_back(C.get());
+    return C;
+  }
+
+  size_t getNumChildren() const { return Children.size(); }
+
+  void clearAllChildren() { Children.clear(); }
+
+  bool compare(const DomTreeNodeBase *Other) const {
+    if (getNumChildren() != Other->getNumChildren())
+      return true;
+
+    if (Level != Other->Level) return true;
+
+    SmallPtrSet<const NodeT *, 4> OtherChildren;
+    for (const DomTreeNodeBase *I : *Other) {
+      const NodeT *Nd = I->getBlock();
+      OtherChildren.insert(Nd);
+    }
+
+    for (const DomTreeNodeBase *I : *this) {
+      const NodeT *N = I->getBlock();
+      if (OtherChildren.count(N) == 0)
+        return true;
+    }
+    return false;
+  }
+
+  void setIDom(DomTreeNodeBase *NewIDom) {
+    assert(IDom && "No immediate dominator?");
+    if (IDom == NewIDom) return;
+
+    auto I = find(IDom->Children, this);
+    assert(I != IDom->Children.end() &&
+           "Not in immediate dominator children set!");
+    // I am no longer your child...
+    IDom->Children.erase(I);
+
+    // Switch to new dominator
+    IDom = NewIDom;
+    IDom->Children.push_back(this);
+
+    UpdateLevel();
+  }
+
+  /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
+  /// in the dominator tree. They are only guaranteed valid if
+  /// updateDFSNumbers() has been called.
+  unsigned getDFSNumIn() const { return DFSNumIn; }
+  unsigned getDFSNumOut() const { return DFSNumOut; }
+
+private:
+  // Return true if this node is dominated by other. Use this only if DFS info
+  // is valid.
+  bool DominatedBy(const DomTreeNodeBase *other) const {
+    return this->DFSNumIn >= other->DFSNumIn &&
+           this->DFSNumOut <= other->DFSNumOut;
+  }
+
+  void UpdateLevel() {
+    assert(IDom);
+    if (Level == IDom->Level + 1) return;
+
+    SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};
+
+    while (!WorkStack.empty()) {
+      DomTreeNodeBase *Current = WorkStack.pop_back_val();
+      Current->Level = Current->IDom->Level + 1;
+
+      for (DomTreeNodeBase *C : *Current) {
+        assert(C->IDom);
+        if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
+      }
+    }
+  }
+};
+
+template <class NodeT>
+raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
+  if (Node->getBlock())
+    Node->getBlock()->printAsOperand(O, false);
+  else
+    O << " <<exit node>>";
+
+  O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
+    << Node->getLevel() << "]\n";
+
+  return O;
+}
+
+template <class NodeT>
+void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
+                  unsigned Lev) {
+  O.indent(2 * Lev) << "[" << Lev << "] " << N;
+  for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
+                                                       E = N->end();
+       I != E; ++I)
+    PrintDomTree<NodeT>(*I, O, Lev + 1);
+}
+
+namespace DomTreeBuilder {
+// The routines below are provided in a separate header but referenced here.
+template <typename DomTreeT>
+void Calculate(DomTreeT &DT);
+
+template <typename DomTreeT>
+void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
+                typename DomTreeT::NodePtr To);
+
+template <typename DomTreeT>
+void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
+                typename DomTreeT::NodePtr To);
+
+// UpdateKind and Update are used by the batch update API and it's easiest to
+// define them here.
+enum class UpdateKind : unsigned char { Insert, Delete };
+
+template <typename NodePtr>
+struct Update {
+  using NodeKindPair = PointerIntPair<NodePtr, 1, UpdateKind>;
+
+  NodePtr From;
+  NodeKindPair ToAndKind;
+
+  Update(UpdateKind Kind, NodePtr From, NodePtr To)
+      : From(From), ToAndKind(To, Kind) {}
+
+  UpdateKind getKind() const { return ToAndKind.getInt(); }
+  NodePtr getFrom() const { return From; }
+  NodePtr getTo() const { return ToAndKind.getPointer(); }
+  bool operator==(const Update &RHS) const {
+    return From == RHS.From && ToAndKind == RHS.ToAndKind;
+  }
+
+  friend raw_ostream &operator<<(raw_ostream &OS, const Update &U) {
+    OS << (U.getKind() == UpdateKind::Insert ? "Insert " : "Delete ");
+    U.getFrom()->printAsOperand(OS, false);
+    OS << " -> ";
+    U.getTo()->printAsOperand(OS, false);
+    return OS;
+  }
+};
+
+template <typename DomTreeT>
+void ApplyUpdates(DomTreeT &DT,
+                  ArrayRef<typename DomTreeT::UpdateType> Updates);
+
+template <typename DomTreeT>
+bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
+}  // namespace DomTreeBuilder
+
+/// \brief Core dominator tree base class.
+///
+/// This class is a generic template over graph nodes. It is instantiated for
+/// various graphs in the LLVM IR or in the code generator.
+template <typename NodeT, bool IsPostDom>
+class DominatorTreeBase {
+ public:
+  static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
+                "Currently DominatorTreeBase supports only pointer nodes");
+  using NodeType = NodeT;
+  using NodePtr = NodeT *;
+  using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
+  static_assert(std::is_pointer<ParentPtr>::value,
+                "Currently NodeT's parent must be a pointer type");
+  using ParentType = typename std::remove_pointer<ParentPtr>::type;
+  static constexpr bool IsPostDominator = IsPostDom;
+
+  using UpdateType = DomTreeBuilder::Update<NodePtr>;
+  using UpdateKind = DomTreeBuilder::UpdateKind;
+  static constexpr UpdateKind Insert = UpdateKind::Insert;
+  static constexpr UpdateKind Delete = UpdateKind::Delete;
+
+  enum class VerificationLevel { Fast, Basic, Full };
+
+protected:
+  // Dominators always have a single root, postdominators can have more.
+  SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
+
+  using DomTreeNodeMapType =
+     DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
+  DomTreeNodeMapType DomTreeNodes;
+  DomTreeNodeBase<NodeT> *RootNode;
+  ParentPtr Parent = nullptr;
+
+  mutable bool DFSInfoValid = false;
+  mutable unsigned int SlowQueries = 0;
+
+  friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
+
+ public:
+  DominatorTreeBase() {}
+
+  DominatorTreeBase(DominatorTreeBase &&Arg)
+      : Roots(std::move(Arg.Roots)),
+        DomTreeNodes(std::move(Arg.DomTreeNodes)),
+        RootNode(Arg.RootNode),
+        Parent(Arg.Parent),
+        DFSInfoValid(Arg.DFSInfoValid),
+        SlowQueries(Arg.SlowQueries) {
+    Arg.wipe();
+  }
+
+  DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
+    Roots = std::move(RHS.Roots);
+    DomTreeNodes = std::move(RHS.DomTreeNodes);
+    RootNode = RHS.RootNode;
+    Parent = RHS.Parent;
+    DFSInfoValid = RHS.DFSInfoValid;
+    SlowQueries = RHS.SlowQueries;
+    RHS.wipe();
+    return *this;
+  }
+
+  DominatorTreeBase(const DominatorTreeBase &) = delete;
+  DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
+
+  /// getRoots - Return the root blocks of the current CFG.  This may include
+  /// multiple blocks if we are computing post dominators.  For forward
+  /// dominators, this will always be a single block (the entry node).
+  ///
+  const SmallVectorImpl<NodeT *> &getRoots() const { return Roots; }
+
+  /// isPostDominator - Returns true if analysis based of postdoms
+  ///
+  bool isPostDominator() const { return IsPostDominator; }
+
+  /// compare - Return false if the other dominator tree base matches this
+  /// dominator tree base. Otherwise return true.
+  bool compare(const DominatorTreeBase &Other) const {
+    if (Parent != Other.Parent) return true;
+
+    if (Roots.size() != Other.Roots.size())
+      return true;
+
+    if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
+      return true;
+
+    const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
+    if (DomTreeNodes.size() != OtherDomTreeNodes.size())
+      return true;
+
+    for (const auto &DomTreeNode : DomTreeNodes) {
+      NodeT *BB = DomTreeNode.first;
+      typename DomTreeNodeMapType::const_iterator OI =
+          OtherDomTreeNodes.find(BB);
+      if (OI == OtherDomTreeNodes.end())
+        return true;
+
+      DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
+      DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
+
+      if (MyNd.compare(&OtherNd))
+        return true;
+    }
+
+    return false;
+  }
+
+  void releaseMemory() { reset(); }
+
+  /// getNode - return the (Post)DominatorTree node for the specified basic
+  /// block.  This is the same as using operator[] on this class.  The result
+  /// may (but is not required to) be null for a forward (backwards)
+  /// statically unreachable block.
+  DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const {
+    auto I = DomTreeNodes.find(BB);
+    if (I != DomTreeNodes.end())
+      return I->second.get();
+    return nullptr;
+  }
+
+  /// See getNode.
+  DomTreeNodeBase<NodeT> *operator[](NodeT *BB) const { return getNode(BB); }
+
+  /// getRootNode - This returns the entry node for the CFG of the function.  If
+  /// this tree represents the post-dominance relations for a function, however,
+  /// this root may be a node with the block == NULL.  This is the case when
+  /// there are multiple exit nodes from a particular function.  Consumers of
+  /// post-dominance information must be capable of dealing with this
+  /// possibility.
+  ///
+  DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
+  const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
+
+  /// Get all nodes dominated by R, including R itself.
+  void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
+    Result.clear();
+    const DomTreeNodeBase<NodeT> *RN = getNode(R);
+    if (!RN)
+      return; // If R is unreachable, it will not be present in the DOM tree.
+    SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
+    WL.push_back(RN);
+
+    while (!WL.empty()) {
+      const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
+      Result.push_back(N->getBlock());
+      WL.append(N->begin(), N->end());
+    }
+  }
+
+  /// properlyDominates - Returns true iff A dominates B and A != B.
+  /// Note that this is not a constant time operation!
+  ///
+  bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
+                         const DomTreeNodeBase<NodeT> *B) const {
+    if (!A || !B)
+      return false;
+    if (A == B)
+      return false;
+    return dominates(A, B);
+  }
+
+  bool properlyDominates(const NodeT *A, const NodeT *B) const;
+
+  /// isReachableFromEntry - Return true if A is dominated by the entry
+  /// block of the function containing it.
+  bool isReachableFromEntry(const NodeT *A) const {
+    assert(!this->isPostDominator() &&
+           "This is not implemented for post dominators");
+    return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
+  }
+
+  bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
+
+  /// dominates - Returns true iff A dominates B.  Note that this is not a
+  /// constant time operation!
+  ///
+  bool dominates(const DomTreeNodeBase<NodeT> *A,
+                 const DomTreeNodeBase<NodeT> *B) const {
+    // A node trivially dominates itself.
+    if (B == A)
+      return true;
+
+    // An unreachable node is dominated by anything.
+    if (!isReachableFromEntry(B))
+      return true;
+
+    // And dominates nothing.
+    if (!isReachableFromEntry(A))
+      return false;
+
+    if (B->getIDom() == A) return true;
+
+    if (A->getIDom() == B) return false;
+
+    // A can only dominate B if it is higher in the tree.
+    if (A->getLevel() >= B->getLevel()) return false;
+
+    // Compare the result of the tree walk and the dfs numbers, if expensive
+    // checks are enabled.
+#ifdef EXPENSIVE_CHECKS
+    assert((!DFSInfoValid ||
+            (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&
+           "Tree walk disagrees with dfs numbers!");
+#endif
+
+    if (DFSInfoValid)
+      return B->DominatedBy(A);
+
+    // If we end up with too many slow queries, just update the
+    // DFS numbers on the theory that we are going to keep querying.
+    SlowQueries++;
+    if (SlowQueries > 32) {
+      updateDFSNumbers();
+      return B->DominatedBy(A);
+    }
+
+    return dominatedBySlowTreeWalk(A, B);
+  }
+
+  bool dominates(const NodeT *A, const NodeT *B) const;
+
+  NodeT *getRoot() const {
+    assert(this->Roots.size() == 1 && "Should always have entry node!");
+    return this->Roots[0];
+  }
+
+  /// findNearestCommonDominator - Find nearest common dominator basic block
+  /// for basic block A and B. If there is no such block then return nullptr.
+  NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
+    assert(A && B && "Pointers are not valid");
+    assert(A->getParent() == B->getParent() &&
+           "Two blocks are not in same function");
+
+    // If either A or B is a entry block then it is nearest common dominator
+    // (for forward-dominators).
+    if (!isPostDominator()) {
+      NodeT &Entry = A->getParent()->front();
+      if (A == &Entry || B == &Entry)
+        return &Entry;
+    }
+
+    DomTreeNodeBase<NodeT> *NodeA = getNode(A);
+    DomTreeNodeBase<NodeT> *NodeB = getNode(B);
+
+    if (!NodeA || !NodeB) return nullptr;
+
+    // Use level information to go up the tree until the levels match. Then
+    // continue going up til we arrive at the same node.
+    while (NodeA && NodeA != NodeB) {
+      if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);
+
+      NodeA = NodeA->IDom;
+    }
+
+    return NodeA ? NodeA->getBlock() : nullptr;
+  }
+
+  const NodeT *findNearestCommonDominator(const NodeT *A,
+                                          const NodeT *B) const {
+    // Cast away the const qualifiers here. This is ok since
+    // const is re-introduced on the return type.
+    return findNearestCommonDominator(const_cast<NodeT *>(A),
+                                      const_cast<NodeT *>(B));
+  }
+
+  bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
+    return isPostDominator() && !A->getBlock();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // API to update (Post)DominatorTree information based on modifications to
+  // the CFG...
+
+  /// Inform the dominator tree about a sequence of CFG edge insertions and
+  /// deletions and perform a batch update on the tree.
+  ///
+  /// This function should be used when there were multiple CFG updates after
+  /// the last dominator tree update. It takes care of performing the updates
+  /// in sync with the CFG and optimizes away the redundant operations that
+  /// cancel each other.
+  /// The functions expects the sequence of updates to be balanced. Eg.:
+  ///  - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
+  ///    logically it results in a single insertions.
+  ///  - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
+  ///    sense to insert the same edge twice.
+  ///
+  /// What's more, the functions assumes that it's safe to ask every node in the
+  /// CFG about its children and inverse children. This implies that deletions
+  /// of CFG edges must not delete the CFG nodes before calling this function.
+  ///
+  /// Batch updates should be generally faster when performing longer sequences
+  /// of updates than calling insertEdge/deleteEdge manually multiple times, as
+  /// it can reorder the updates and remove redundant ones internally.
+  /// The batch updater is also able to detect sequences of zero and exactly one
+  /// update -- it's optimized to do less work in these cases.
+  ///
+  /// Note that for postdominators it automatically takes care of applying
+  /// updates on reverse edges internally (so there's no need to swap the
+  /// From and To pointers when constructing DominatorTree::UpdateType).
+  /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
+  /// with the same template parameter T.
+  ///
+  /// \param Updates An unordered sequence of updates to perform.
+  ///
+  void applyUpdates(ArrayRef<UpdateType> Updates) {
+    DomTreeBuilder::ApplyUpdates(*this, Updates);
+  }
+
+  /// Inform the dominator tree about a CFG edge insertion and update the tree.
+  ///
+  /// This function has to be called just before or just after making the update
+  /// on the actual CFG. There cannot be any other updates that the dominator
+  /// tree doesn't know about.
+  ///
+  /// Note that for postdominators it automatically takes care of inserting
+  /// a reverse edge internally (so there's no need to swap the parameters).
+  ///
+  void insertEdge(NodeT *From, NodeT *To) {
+    assert(From);
+    assert(To);
+    assert(From->getParent() == Parent);
+    assert(To->getParent() == Parent);
+    DomTreeBuilder::InsertEdge(*this, From, To);
+  }
+
+  /// Inform the dominator tree about a CFG edge deletion and update the tree.
+  ///
+  /// This function has to be called just after making the update on the actual
+  /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
+  /// DEBUG mode. There cannot be any other updates that the
+  /// dominator tree doesn't know about.
+  ///
+  /// Note that for postdominators it automatically takes care of deleting
+  /// a reverse edge internally (so there's no need to swap the parameters).
+  ///
+  void deleteEdge(NodeT *From, NodeT *To) {
+    assert(From);
+    assert(To);
+    assert(From->getParent() == Parent);
+    assert(To->getParent() == Parent);
+    DomTreeBuilder::DeleteEdge(*this, From, To);
+  }
+
+  /// Add a new node to the dominator tree information.
+  ///
+  /// This creates a new node as a child of DomBB dominator node, linking it
+  /// into the children list of the immediate dominator.
+  ///
+  /// \param BB New node in CFG.
+  /// \param DomBB CFG node that is dominator for BB.
+  /// \returns New dominator tree node that represents new CFG node.
+  ///
+  DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
+    assert(getNode(BB) == nullptr && "Block already in dominator tree!");
+    DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
+    assert(IDomNode && "Not immediate dominator specified for block!");
+    DFSInfoValid = false;
+    return (DomTreeNodes[BB] = IDomNode->addChild(
+                llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
+  }
+
+  /// Add a new node to the forward dominator tree and make it a new root.
+  ///
+  /// \param BB New node in CFG.
+  /// \returns New dominator tree node that represents new CFG node.
+  ///
+  DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
+    assert(getNode(BB) == nullptr && "Block already in dominator tree!");
+    assert(!this->isPostDominator() &&
+           "Cannot change root of post-dominator tree");
+    DFSInfoValid = false;
+    DomTreeNodeBase<NodeT> *NewNode = (DomTreeNodes[BB] =
+      llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)).get();
+    if (Roots.empty()) {
+      addRoot(BB);
+    } else {
+      assert(Roots.size() == 1);
+      NodeT *OldRoot = Roots.front();
+      auto &OldNode = DomTreeNodes[OldRoot];
+      OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
+      OldNode->IDom = NewNode;
+      OldNode->UpdateLevel();
+      Roots[0] = BB;
+    }
+    return RootNode = NewNode;
+  }
+
+  /// changeImmediateDominator - This method is used to update the dominator
+  /// tree information when a node's immediate dominator changes.
+  ///
+  void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
+                                DomTreeNodeBase<NodeT> *NewIDom) {
+    assert(N && NewIDom && "Cannot change null node pointers!");
+    DFSInfoValid = false;
+    N->setIDom(NewIDom);
+  }
+
+  void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
+    changeImmediateDominator(getNode(BB), getNode(NewBB));
+  }
+
+  /// eraseNode - Removes a node from the dominator tree. Block must not
+  /// dominate any other blocks. Removes node from its immediate dominator's
+  /// children list. Deletes dominator node associated with basic block BB.
+  void eraseNode(NodeT *BB) {
+    DomTreeNodeBase<NodeT> *Node = getNode(BB);
+    assert(Node && "Removing node that isn't in dominator tree.");
+    assert(Node->getChildren().empty() && "Node is not a leaf node.");
+
+    DFSInfoValid = false;
+
+    // Remove node from immediate dominator's children list.
+    DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
+    if (IDom) {
+      const auto I = find(IDom->Children, Node);
+      assert(I != IDom->Children.end() &&
+             "Not in immediate dominator children set!");
+      // I am no longer your child...
+      IDom->Children.erase(I);
+    }
+
+    DomTreeNodes.erase(BB);
+
+    if (!IsPostDom) return;
+
+    // Remember to update PostDominatorTree roots.
+    auto RIt = llvm::find(Roots, BB);
+    if (RIt != Roots.end()) {
+      std::swap(*RIt, Roots.back());
+      Roots.pop_back();
+    }
+  }
+
+  /// splitBlock - BB is split and now it has one successor. Update dominator
+  /// tree to reflect this change.
+  void splitBlock(NodeT *NewBB) {
+    if (IsPostDominator)
+      Split<Inverse<NodeT *>>(NewBB);
+    else
+      Split<NodeT *>(NewBB);
+  }
+
+  /// print - Convert to human readable form
+  ///
+  void print(raw_ostream &O) const {
+    O << "=============================--------------------------------\n";
+    if (IsPostDominator)
+      O << "Inorder PostDominator Tree: ";
+    else
+      O << "Inorder Dominator Tree: ";
+    if (!DFSInfoValid)
+      O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
+    O << "\n";
+
+    // The postdom tree can have a null root if there are no returns.
+    if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
+    if (IsPostDominator) {
+      O << "Roots: ";
+      for (const NodePtr Block : Roots) {
+        Block->printAsOperand(O, false);
+        O << " ";
+      }
+      O << "\n";
+    }
+  }
+
+public:
+  /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
+  /// dominator tree in dfs order.
+  void updateDFSNumbers() const {
+    if (DFSInfoValid) {
+      SlowQueries = 0;
+      return;
+    }
+
+    SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
+                          typename DomTreeNodeBase<NodeT>::const_iterator>,
+                32> WorkStack;
+
+    const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
+    assert((!Parent || ThisRoot) && "Empty constructed DomTree");
+    if (!ThisRoot)
+      return;
+
+    // Both dominators and postdominators have a single root node. In the case
+    // case of PostDominatorTree, this node is a virtual root.
+    WorkStack.push_back({ThisRoot, ThisRoot->begin()});
+
+    unsigned DFSNum = 0;
+    ThisRoot->DFSNumIn = DFSNum++;
+
+    while (!WorkStack.empty()) {
+      const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
+      const auto ChildIt = WorkStack.back().second;
+
+      // If we visited all of the children of this node, "recurse" back up the
+      // stack setting the DFOutNum.
+      if (ChildIt == Node->end()) {
+        Node->DFSNumOut = DFSNum++;
+        WorkStack.pop_back();
+      } else {
+        // Otherwise, recursively visit this child.
+        const DomTreeNodeBase<NodeT> *Child = *ChildIt;
+        ++WorkStack.back().second;
+
+        WorkStack.push_back({Child, Child->begin()});
+        Child->DFSNumIn = DFSNum++;
+      }
+    }
+
+    SlowQueries = 0;
+    DFSInfoValid = true;
+  }
+
+  /// recalculate - compute a dominator tree for the given function
+  void recalculate(ParentType &Func) {
+    Parent = &Func;
+    DomTreeBuilder::Calculate(*this);
+  }
+
+  /// verify - checks if the tree is correct. There are 3 level of verification:
+  ///  - Full --  verifies if the tree is correct by making sure all the
+  ///             properties (including the parent and the sibling property)
+  ///             hold.
+  ///             Takes O(N^3) time.
+  ///
+  ///  - Basic -- checks if the tree is correct, but compares it to a freshly
+  ///             constructed tree instead of checking the sibling property.
+  ///             Takes O(N^2) time.
+  ///
+  ///  - Fast  -- checks basic tree structure and compares it with a freshly
+  ///             constructed tree.
+  ///             Takes O(N^2) time worst case, but is faster in practise (same
+  ///             as tree construction).
+  bool verify(VerificationLevel VL = VerificationLevel::Full) const {
+    return DomTreeBuilder::Verify(*this, VL);
+  }
+
+protected:
+  void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
+
+  void reset() {
+    DomTreeNodes.clear();
+    Roots.clear();
+    RootNode = nullptr;
+    Parent = nullptr;
+    DFSInfoValid = false;
+    SlowQueries = 0;
+  }
+
+  // NewBB is split and now it has one successor. Update dominator tree to
+  // reflect this change.
+  template <class N>
+  void Split(typename GraphTraits<N>::NodeRef NewBB) {
+    using GraphT = GraphTraits<N>;
+    using NodeRef = typename GraphT::NodeRef;
+    assert(std::distance(GraphT::child_begin(NewBB),
+                         GraphT::child_end(NewBB)) == 1 &&
+           "NewBB should have a single successor!");
+    NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
+
+    std::vector<NodeRef> PredBlocks;
+    for (const auto &Pred : children<Inverse<N>>(NewBB))
+      PredBlocks.push_back(Pred);
+
+    assert(!PredBlocks.empty() && "No predblocks?");
+
+    bool NewBBDominatesNewBBSucc = true;
+    for (const auto &Pred : children<Inverse<N>>(NewBBSucc)) {
+      if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
+          isReachableFromEntry(Pred)) {
+        NewBBDominatesNewBBSucc = false;
+        break;
+      }
+    }
+
+    // Find NewBB's immediate dominator and create new dominator tree node for
+    // NewBB.
+    NodeT *NewBBIDom = nullptr;
+    unsigned i = 0;
+    for (i = 0; i < PredBlocks.size(); ++i)
+      if (isReachableFromEntry(PredBlocks[i])) {
+        NewBBIDom = PredBlocks[i];
+        break;
+      }
+
+    // It's possible that none of the predecessors of NewBB are reachable;
+    // in that case, NewBB itself is unreachable, so nothing needs to be
+    // changed.
+    if (!NewBBIDom) return;
+
+    for (i = i + 1; i < PredBlocks.size(); ++i) {
+      if (isReachableFromEntry(PredBlocks[i]))
+        NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
+    }
+
+    // Create the new dominator tree node... and set the idom of NewBB.
+    DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
+
+    // If NewBB strictly dominates other blocks, then it is now the immediate
+    // dominator of NewBBSucc.  Update the dominator tree as appropriate.
+    if (NewBBDominatesNewBBSucc) {
+      DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
+      changeImmediateDominator(NewBBSuccNode, NewBBNode);
+    }
+  }
+
+ private:
+  bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
+                               const DomTreeNodeBase<NodeT> *B) const {
+    assert(A != B);
+    assert(isReachableFromEntry(B));
+    assert(isReachableFromEntry(A));
+
+    const DomTreeNodeBase<NodeT> *IDom;
+    while ((IDom = B->getIDom()) != nullptr && IDom != A && IDom != B)
+      B = IDom;  // Walk up the tree
+    return IDom != nullptr;
+  }
+
+  /// \brief Wipe this tree's state without releasing any resources.
+  ///
+  /// This is essentially a post-move helper only. It leaves the object in an
+  /// assignable and destroyable state, but otherwise invalid.
+  void wipe() {
+    DomTreeNodes.clear();
+    RootNode = nullptr;
+    Parent = nullptr;
+  }
+};
+
+template <typename T>
+using DomTreeBase = DominatorTreeBase<T, false>;
+
+template <typename T>
+using PostDomTreeBase = DominatorTreeBase<T, true>;
+
+// These two functions are declared out of line as a workaround for building
+// with old (< r147295) versions of clang because of pr11642.
+template <typename NodeT, bool IsPostDom>
+bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
+                                                    const NodeT *B) const {
+  if (A == B)
+    return true;
+
+  // Cast away the const qualifiers here. This is ok since
+  // this function doesn't actually return the values returned
+  // from getNode.
+  return dominates(getNode(const_cast<NodeT *>(A)),
+                   getNode(const_cast<NodeT *>(B)));
+}
+template <typename NodeT, bool IsPostDom>
+bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
+    const NodeT *A, const NodeT *B) const {
+  if (A == B)
+    return false;
+
+  // Cast away the const qualifiers here. This is ok since
+  // this function doesn't actually return the values returned
+  // from getNode.
+  return dominates(getNode(const_cast<NodeT *>(A)),
+                   getNode(const_cast<NodeT *>(B)));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_GENERICDOMTREE_H
diff --git a/linux-x64/clang/include/llvm/Support/GenericDomTreeConstruction.h b/linux-x64/clang/include/llvm/Support/GenericDomTreeConstruction.h
new file mode 100644
index 0000000..7ec0638
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/GenericDomTreeConstruction.h
@@ -0,0 +1,1674 @@
+//===- GenericDomTreeConstruction.h - Dominator Calculation ------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Generic dominator tree construction - This file provides routines to
+/// construct immediate dominator information for a flow-graph based on the
+/// Semi-NCA algorithm described in this dissertation:
+///
+///   Linear-Time Algorithms for Dominators and Related Problems
+///   Loukas Georgiadis, Princeton University, November 2005, pp. 21-23:
+///   ftp://ftp.cs.princeton.edu/reports/2005/737.pdf
+///
+/// This implements the O(n*log(n)) versions of EVAL and LINK, because it turns
+/// out that the theoretically slower O(n*log(n)) implementation is actually
+/// faster than the almost-linear O(n*alpha(n)) version, even for large CFGs.
+///
+/// The file uses the Depth Based Search algorithm to perform incremental
+/// updates (insertion and deletions). The implemented algorithm is based on
+/// this publication:
+///
+///   An Experimental Study of Dynamic Dominators
+///   Loukas Georgiadis, et al., April 12 2016, pp. 5-7, 9-10:
+///   https://arxiv.org/pdf/1604.02711.pdf
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
+#define LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
+
+#include <queue>
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTree.h"
+
+#define DEBUG_TYPE "dom-tree-builder"
+
+namespace llvm {
+namespace DomTreeBuilder {
+
+template <typename DomTreeT>
+struct SemiNCAInfo {
+  using NodePtr = typename DomTreeT::NodePtr;
+  using NodeT = typename DomTreeT::NodeType;
+  using TreeNodePtr = DomTreeNodeBase<NodeT> *;
+  using RootsT = decltype(DomTreeT::Roots);
+  static constexpr bool IsPostDom = DomTreeT::IsPostDominator;
+
+  // Information record used by Semi-NCA during tree construction.
+  struct InfoRec {
+    unsigned DFSNum = 0;
+    unsigned Parent = 0;
+    unsigned Semi = 0;
+    NodePtr Label = nullptr;
+    NodePtr IDom = nullptr;
+    SmallVector<NodePtr, 2> ReverseChildren;
+  };
+
+  // Number to node mapping is 1-based. Initialize the mapping to start with
+  // a dummy element.
+  std::vector<NodePtr> NumToNode = {nullptr};
+  DenseMap<NodePtr, InfoRec> NodeToInfo;
+
+  using UpdateT = typename DomTreeT::UpdateType;
+  struct BatchUpdateInfo {
+    SmallVector<UpdateT, 4> Updates;
+    using NodePtrAndKind = PointerIntPair<NodePtr, 1, UpdateKind>;
+
+    // In order to be able to walk a CFG that is out of sync with the CFG
+    // DominatorTree last knew about, use the list of updates to reconstruct
+    // previous CFG versions of the current CFG. For each node, we store a set
+    // of its virtually added/deleted future successors and predecessors.
+    // Note that these children are from the future relative to what the
+    // DominatorTree knows about -- using them to gets us some snapshot of the
+    // CFG from the past (relative to the state of the CFG).
+    DenseMap<NodePtr, SmallDenseSet<NodePtrAndKind, 4>> FutureSuccessors;
+    DenseMap<NodePtr, SmallDenseSet<NodePtrAndKind, 4>> FuturePredecessors;
+    // Remembers if the whole tree was recalculated at some point during the
+    // current batch update.
+    bool IsRecalculated = false;
+  };
+
+  BatchUpdateInfo *BatchUpdates;
+  using BatchUpdatePtr = BatchUpdateInfo *;
+
+  // If BUI is a nullptr, then there's no batch update in progress.
+  SemiNCAInfo(BatchUpdatePtr BUI) : BatchUpdates(BUI) {}
+
+  void clear() {
+    NumToNode = {nullptr}; // Restore to initial state with a dummy start node.
+    NodeToInfo.clear();
+    // Don't reset the pointer to BatchUpdateInfo here -- if there's an update
+    // in progress, we need this information to continue it.
+  }
+
+  template <bool Inverse>
+  struct ChildrenGetter {
+    using ResultTy = SmallVector<NodePtr, 8>;
+
+    static ResultTy Get(NodePtr N, std::integral_constant<bool, false>) {
+      auto RChildren = reverse(children<NodePtr>(N));
+      return ResultTy(RChildren.begin(), RChildren.end());
+    }
+
+    static ResultTy Get(NodePtr N, std::integral_constant<bool, true>) {
+      auto IChildren = inverse_children<NodePtr>(N);
+      return ResultTy(IChildren.begin(), IChildren.end());
+    }
+
+    using Tag = std::integral_constant<bool, Inverse>;
+
+    // The function below is the core part of the batch updater. It allows the
+    // Depth Based Search algorithm to perform incremental updates in lockstep
+    // with updates to the CFG. We emulated lockstep CFG updates by getting its
+    // next snapshots by reverse-applying future updates.
+    static ResultTy Get(NodePtr N, BatchUpdatePtr BUI) {
+      ResultTy Res = Get(N, Tag());
+      // If there's no batch update in progress, simply return node's children.
+      if (!BUI) return Res;
+
+      // CFG children are actually its *most current* children, and we have to
+      // reverse-apply the future updates to get the node's children at the
+      // point in time the update was performed.
+      auto &FutureChildren = (Inverse != IsPostDom) ? BUI->FuturePredecessors
+                                                    : BUI->FutureSuccessors;
+      auto FCIt = FutureChildren.find(N);
+      if (FCIt == FutureChildren.end()) return Res;
+
+      for (auto ChildAndKind : FCIt->second) {
+        const NodePtr Child = ChildAndKind.getPointer();
+        const UpdateKind UK = ChildAndKind.getInt();
+
+        // Reverse-apply the future update.
+        if (UK == UpdateKind::Insert) {
+          // If there's an insertion in the future, it means that the edge must
+          // exist in the current CFG, but was not present in it before.
+          assert(llvm::find(Res, Child) != Res.end()
+                 && "Expected child not found in the CFG");
+          Res.erase(std::remove(Res.begin(), Res.end(), Child), Res.end());
+          DEBUG(dbgs() << "\tHiding edge " << BlockNamePrinter(N) << " -> "
+                       << BlockNamePrinter(Child) << "\n");
+        } else {
+          // If there's an deletion in the future, it means that the edge cannot
+          // exist in the current CFG, but existed in it before.
+          assert(llvm::find(Res, Child) == Res.end() &&
+                 "Unexpected child found in the CFG");
+          DEBUG(dbgs() << "\tShowing virtual edge " << BlockNamePrinter(N)
+                       << " -> " << BlockNamePrinter(Child) << "\n");
+          Res.push_back(Child);
+        }
+      }
+
+      return Res;
+    }
+  };
+
+  NodePtr getIDom(NodePtr BB) const {
+    auto InfoIt = NodeToInfo.find(BB);
+    if (InfoIt == NodeToInfo.end()) return nullptr;
+
+    return InfoIt->second.IDom;
+  }
+
+  TreeNodePtr getNodeForBlock(NodePtr BB, DomTreeT &DT) {
+    if (TreeNodePtr Node = DT.getNode(BB)) return Node;
+
+    // Haven't calculated this node yet?  Get or calculate the node for the
+    // immediate dominator.
+    NodePtr IDom = getIDom(BB);
+
+    assert(IDom || DT.DomTreeNodes[nullptr]);
+    TreeNodePtr IDomNode = getNodeForBlock(IDom, DT);
+
+    // Add a new tree node for this NodeT, and link it as a child of
+    // IDomNode
+    return (DT.DomTreeNodes[BB] = IDomNode->addChild(
+        llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode)))
+        .get();
+  }
+
+  static bool AlwaysDescend(NodePtr, NodePtr) { return true; }
+
+  struct BlockNamePrinter {
+    NodePtr N;
+
+    BlockNamePrinter(NodePtr Block) : N(Block) {}
+    BlockNamePrinter(TreeNodePtr TN) : N(TN ? TN->getBlock() : nullptr) {}
+
+    friend raw_ostream &operator<<(raw_ostream &O, const BlockNamePrinter &BP) {
+      if (!BP.N)
+        O << "nullptr";
+      else
+        BP.N->printAsOperand(O, false);
+
+      return O;
+    }
+  };
+
+  // Custom DFS implementation which can skip nodes based on a provided
+  // predicate. It also collects ReverseChildren so that we don't have to spend
+  // time getting predecessors in SemiNCA.
+  //
+  // If IsReverse is set to true, the DFS walk will be performed backwards
+  // relative to IsPostDom -- using reverse edges for dominators and forward
+  // edges for postdominators.
+  template <bool IsReverse = false, typename DescendCondition>
+  unsigned runDFS(NodePtr V, unsigned LastNum, DescendCondition Condition,
+                  unsigned AttachToNum) {
+    assert(V);
+    SmallVector<NodePtr, 64> WorkList = {V};
+    if (NodeToInfo.count(V) != 0) NodeToInfo[V].Parent = AttachToNum;
+
+    while (!WorkList.empty()) {
+      const NodePtr BB = WorkList.pop_back_val();
+      auto &BBInfo = NodeToInfo[BB];
+
+      // Visited nodes always have positive DFS numbers.
+      if (BBInfo.DFSNum != 0) continue;
+      BBInfo.DFSNum = BBInfo.Semi = ++LastNum;
+      BBInfo.Label = BB;
+      NumToNode.push_back(BB);
+
+      constexpr bool Direction = IsReverse != IsPostDom;  // XOR.
+      for (const NodePtr Succ :
+           ChildrenGetter<Direction>::Get(BB, BatchUpdates)) {
+        const auto SIT = NodeToInfo.find(Succ);
+        // Don't visit nodes more than once but remember to collect
+        // ReverseChildren.
+        if (SIT != NodeToInfo.end() && SIT->second.DFSNum != 0) {
+          if (Succ != BB) SIT->second.ReverseChildren.push_back(BB);
+          continue;
+        }
+
+        if (!Condition(BB, Succ)) continue;
+
+        // It's fine to add Succ to the map, because we know that it will be
+        // visited later.
+        auto &SuccInfo = NodeToInfo[Succ];
+        WorkList.push_back(Succ);
+        SuccInfo.Parent = LastNum;
+        SuccInfo.ReverseChildren.push_back(BB);
+      }
+    }
+
+    return LastNum;
+  }
+
+  NodePtr eval(NodePtr VIn, unsigned LastLinked) {
+    auto &VInInfo = NodeToInfo[VIn];
+    if (VInInfo.DFSNum < LastLinked)
+      return VIn;
+
+    SmallVector<NodePtr, 32> Work;
+    SmallPtrSet<NodePtr, 32> Visited;
+
+    if (VInInfo.Parent >= LastLinked)
+      Work.push_back(VIn);
+
+    while (!Work.empty()) {
+      NodePtr V = Work.back();
+      auto &VInfo = NodeToInfo[V];
+      NodePtr VAncestor = NumToNode[VInfo.Parent];
+
+      // Process Ancestor first
+      if (Visited.insert(VAncestor).second && VInfo.Parent >= LastLinked) {
+        Work.push_back(VAncestor);
+        continue;
+      }
+      Work.pop_back();
+
+      // Update VInfo based on Ancestor info
+      if (VInfo.Parent < LastLinked)
+        continue;
+
+      auto &VAInfo = NodeToInfo[VAncestor];
+      NodePtr VAncestorLabel = VAInfo.Label;
+      NodePtr VLabel = VInfo.Label;
+      if (NodeToInfo[VAncestorLabel].Semi < NodeToInfo[VLabel].Semi)
+        VInfo.Label = VAncestorLabel;
+      VInfo.Parent = VAInfo.Parent;
+    }
+
+    return VInInfo.Label;
+  }
+
+  // This function requires DFS to be run before calling it.
+  void runSemiNCA(DomTreeT &DT, const unsigned MinLevel = 0) {
+    const unsigned NextDFSNum(NumToNode.size());
+    // Initialize IDoms to spanning tree parents.
+    for (unsigned i = 1; i < NextDFSNum; ++i) {
+      const NodePtr V = NumToNode[i];
+      auto &VInfo = NodeToInfo[V];
+      VInfo.IDom = NumToNode[VInfo.Parent];
+    }
+
+    // Step #1: Calculate the semidominators of all vertices.
+    for (unsigned i = NextDFSNum - 1; i >= 2; --i) {
+      NodePtr W = NumToNode[i];
+      auto &WInfo = NodeToInfo[W];
+
+      // Initialize the semi dominator to point to the parent node.
+      WInfo.Semi = WInfo.Parent;
+      for (const auto &N : WInfo.ReverseChildren) {
+        if (NodeToInfo.count(N) == 0)  // Skip unreachable predecessors.
+          continue;
+
+        const TreeNodePtr TN = DT.getNode(N);
+        // Skip predecessors whose level is above the subtree we are processing.
+        if (TN && TN->getLevel() < MinLevel)
+          continue;
+
+        unsigned SemiU = NodeToInfo[eval(N, i + 1)].Semi;
+        if (SemiU < WInfo.Semi) WInfo.Semi = SemiU;
+      }
+    }
+
+    // Step #2: Explicitly define the immediate dominator of each vertex.
+    //          IDom[i] = NCA(SDom[i], SpanningTreeParent(i)).
+    // Note that the parents were stored in IDoms and later got invalidated
+    // during path compression in Eval.
+    for (unsigned i = 2; i < NextDFSNum; ++i) {
+      const NodePtr W = NumToNode[i];
+      auto &WInfo = NodeToInfo[W];
+      const unsigned SDomNum = NodeToInfo[NumToNode[WInfo.Semi]].DFSNum;
+      NodePtr WIDomCandidate = WInfo.IDom;
+      while (NodeToInfo[WIDomCandidate].DFSNum > SDomNum)
+        WIDomCandidate = NodeToInfo[WIDomCandidate].IDom;
+
+      WInfo.IDom = WIDomCandidate;
+    }
+  }
+
+  // PostDominatorTree always has a virtual root that represents a virtual CFG
+  // node that serves as a single exit from the function. All the other exits
+  // (CFG nodes with terminators and nodes in infinite loops are logically
+  // connected to this virtual CFG exit node).
+  // This functions maps a nullptr CFG node to the virtual root tree node.
+  void addVirtualRoot() {
+    assert(IsPostDom && "Only postdominators have a virtual root");
+    assert(NumToNode.size() == 1 && "SNCAInfo must be freshly constructed");
+
+    auto &BBInfo = NodeToInfo[nullptr];
+    BBInfo.DFSNum = BBInfo.Semi = 1;
+    BBInfo.Label = nullptr;
+
+    NumToNode.push_back(nullptr);  // NumToNode[1] = nullptr;
+  }
+
+  // For postdominators, nodes with no forward successors are trivial roots that
+  // are always selected as tree roots. Roots with forward successors correspond
+  // to CFG nodes within infinite loops.
+  static bool HasForwardSuccessors(const NodePtr N, BatchUpdatePtr BUI) {
+    assert(N && "N must be a valid node");
+    return !ChildrenGetter<false>::Get(N, BUI).empty();
+  }
+
+  static NodePtr GetEntryNode(const DomTreeT &DT) {
+    assert(DT.Parent && "Parent not set");
+    return GraphTraits<typename DomTreeT::ParentPtr>::getEntryNode(DT.Parent);
+  }
+
+  // Finds all roots without relaying on the set of roots already stored in the
+  // tree.
+  // We define roots to be some non-redundant set of the CFG nodes
+  static RootsT FindRoots(const DomTreeT &DT, BatchUpdatePtr BUI) {
+    assert(DT.Parent && "Parent pointer is not set");
+    RootsT Roots;
+
+    // For dominators, function entry CFG node is always a tree root node.
+    if (!IsPostDom) {
+      Roots.push_back(GetEntryNode(DT));
+      return Roots;
+    }
+
+    SemiNCAInfo SNCA(BUI);
+
+    // PostDominatorTree always has a virtual root.
+    SNCA.addVirtualRoot();
+    unsigned Num = 1;
+
+    DEBUG(dbgs() << "\t\tLooking for trivial roots\n");
+
+    // Step #1: Find all the trivial roots that are going to will definitely
+    // remain tree roots.
+    unsigned Total = 0;
+    // It may happen that there are some new nodes in the CFG that are result of
+    // the ongoing batch update, but we cannot really pretend that they don't
+    // exist -- we won't see any outgoing or incoming edges to them, so it's
+    // fine to discover them here, as they would end up appearing in the CFG at
+    // some point anyway.
+    for (const NodePtr N : nodes(DT.Parent)) {
+      ++Total;
+      // If it has no *successors*, it is definitely a root.
+      if (!HasForwardSuccessors(N, BUI)) {
+        Roots.push_back(N);
+        // Run DFS not to walk this part of CFG later.
+        Num = SNCA.runDFS(N, Num, AlwaysDescend, 1);
+        DEBUG(dbgs() << "Found a new trivial root: " << BlockNamePrinter(N)
+                     << "\n");
+        DEBUG(dbgs() << "Last visited node: "
+                     << BlockNamePrinter(SNCA.NumToNode[Num]) << "\n");
+      }
+    }
+
+    DEBUG(dbgs() << "\t\tLooking for non-trivial roots\n");
+
+    // Step #2: Find all non-trivial root candidates. Those are CFG nodes that
+    // are reverse-unreachable were not visited by previous DFS walks (i.e. CFG
+    // nodes in infinite loops).
+    bool HasNonTrivialRoots = false;
+    // Accounting for the virtual exit, see if we had any reverse-unreachable
+    // nodes.
+    if (Total + 1 != Num) {
+      HasNonTrivialRoots = true;
+      // Make another DFS pass over all other nodes to find the
+      // reverse-unreachable blocks, and find the furthest paths we'll be able
+      // to make.
+      // Note that this looks N^2, but it's really 2N worst case, if every node
+      // is unreachable. This is because we are still going to only visit each
+      // unreachable node once, we may just visit it in two directions,
+      // depending on how lucky we get.
+      SmallPtrSet<NodePtr, 4> ConnectToExitBlock;
+      for (const NodePtr I : nodes(DT.Parent)) {
+        if (SNCA.NodeToInfo.count(I) == 0) {
+          DEBUG(dbgs() << "\t\t\tVisiting node " << BlockNamePrinter(I)
+                       << "\n");
+          // Find the furthest away we can get by following successors, then
+          // follow them in reverse.  This gives us some reasonable answer about
+          // the post-dom tree inside any infinite loop. In particular, it
+          // guarantees we get to the farthest away point along *some*
+          // path. This also matches the GCC's behavior.
+          // If we really wanted a totally complete picture of dominance inside
+          // this infinite loop, we could do it with SCC-like algorithms to find
+          // the lowest and highest points in the infinite loop.  In theory, it
+          // would be nice to give the canonical backedge for the loop, but it's
+          // expensive and does not always lead to a minimal set of roots.
+          DEBUG(dbgs() << "\t\t\tRunning forward DFS\n");
+
+          const unsigned NewNum = SNCA.runDFS<true>(I, Num, AlwaysDescend, Num);
+          const NodePtr FurthestAway = SNCA.NumToNode[NewNum];
+          DEBUG(dbgs() << "\t\t\tFound a new furthest away node "
+                       << "(non-trivial root): "
+                       << BlockNamePrinter(FurthestAway) << "\n");
+          ConnectToExitBlock.insert(FurthestAway);
+          Roots.push_back(FurthestAway);
+          DEBUG(dbgs() << "\t\t\tPrev DFSNum: " << Num << ", new DFSNum: "
+                       << NewNum << "\n\t\t\tRemoving DFS info\n");
+          for (unsigned i = NewNum; i > Num; --i) {
+            const NodePtr N = SNCA.NumToNode[i];
+            DEBUG(dbgs() << "\t\t\t\tRemoving DFS info for "
+                         << BlockNamePrinter(N) << "\n");
+            SNCA.NodeToInfo.erase(N);
+            SNCA.NumToNode.pop_back();
+          }
+          const unsigned PrevNum = Num;
+          DEBUG(dbgs() << "\t\t\tRunning reverse DFS\n");
+          Num = SNCA.runDFS(FurthestAway, Num, AlwaysDescend, 1);
+          for (unsigned i = PrevNum + 1; i <= Num; ++i)
+            DEBUG(dbgs() << "\t\t\t\tfound node "
+                         << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
+        }
+      }
+    }
+
+    DEBUG(dbgs() << "Total: " << Total << ", Num: " << Num << "\n");
+    DEBUG(dbgs() << "Discovered CFG nodes:\n");
+    DEBUG(for (size_t i = 0; i <= Num; ++i) dbgs()
+          << i << ": " << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
+
+    assert((Total + 1 == Num) && "Everything should have been visited");
+
+    // Step #3: If we found some non-trivial roots, make them non-redundant.
+    if (HasNonTrivialRoots) RemoveRedundantRoots(DT, BUI, Roots);
+
+    DEBUG(dbgs() << "Found roots: ");
+    DEBUG(for (auto *Root : Roots) dbgs() << BlockNamePrinter(Root) << " ");
+    DEBUG(dbgs() << "\n");
+
+    return Roots;
+  }
+
+  // This function only makes sense for postdominators.
+  // We define roots to be some set of CFG nodes where (reverse) DFS walks have
+  // to start in order to visit all the CFG nodes (including the
+  // reverse-unreachable ones).
+  // When the search for non-trivial roots is done it may happen that some of
+  // the non-trivial roots are reverse-reachable from other non-trivial roots,
+  // which makes them redundant. This function removes them from the set of
+  // input roots.
+  static void RemoveRedundantRoots(const DomTreeT &DT, BatchUpdatePtr BUI,
+                                   RootsT &Roots) {
+    assert(IsPostDom && "This function is for postdominators only");
+    DEBUG(dbgs() << "Removing redundant roots\n");
+
+    SemiNCAInfo SNCA(BUI);
+
+    for (unsigned i = 0; i < Roots.size(); ++i) {
+      auto &Root = Roots[i];
+      // Trivial roots are always non-redundant.
+      if (!HasForwardSuccessors(Root, BUI)) continue;
+      DEBUG(dbgs() << "\tChecking if " << BlockNamePrinter(Root)
+                   << " remains a root\n");
+      SNCA.clear();
+      // Do a forward walk looking for the other roots.
+      const unsigned Num = SNCA.runDFS<true>(Root, 0, AlwaysDescend, 0);
+      // Skip the start node and begin from the second one (note that DFS uses
+      // 1-based indexing).
+      for (unsigned x = 2; x <= Num; ++x) {
+        const NodePtr N = SNCA.NumToNode[x];
+        // If we wound another root in a (forward) DFS walk, remove the current
+        // root from the set of roots, as it is reverse-reachable from the other
+        // one.
+        if (llvm::find(Roots, N) != Roots.end()) {
+          DEBUG(dbgs() << "\tForward DFS walk found another root "
+                       << BlockNamePrinter(N) << "\n\tRemoving root "
+                       << BlockNamePrinter(Root) << "\n");
+          std::swap(Root, Roots.back());
+          Roots.pop_back();
+
+          // Root at the back takes the current root's place.
+          // Start the next loop iteration with the same index.
+          --i;
+          break;
+        }
+      }
+    }
+  }
+
+  template <typename DescendCondition>
+  void doFullDFSWalk(const DomTreeT &DT, DescendCondition DC) {
+    if (!IsPostDom) {
+      assert(DT.Roots.size() == 1 && "Dominators should have a singe root");
+      runDFS(DT.Roots[0], 0, DC, 0);
+      return;
+    }
+
+    addVirtualRoot();
+    unsigned Num = 1;
+    for (const NodePtr Root : DT.Roots) Num = runDFS(Root, Num, DC, 0);
+  }
+
+  static void CalculateFromScratch(DomTreeT &DT, BatchUpdatePtr BUI) {
+    auto *Parent = DT.Parent;
+    DT.reset();
+    DT.Parent = Parent;
+    SemiNCAInfo SNCA(nullptr);  // Since we are rebuilding the whole tree,
+                                // there's no point doing it incrementally.
+
+    // Step #0: Number blocks in depth-first order and initialize variables used
+    // in later stages of the algorithm.
+    DT.Roots = FindRoots(DT, nullptr);
+    SNCA.doFullDFSWalk(DT, AlwaysDescend);
+
+    SNCA.runSemiNCA(DT);
+    if (BUI) {
+      BUI->IsRecalculated = true;
+      DEBUG(dbgs() << "DomTree recalculated, skipping future batch updates\n");
+    }
+
+    if (DT.Roots.empty()) return;
+
+    // Add a node for the root. If the tree is a PostDominatorTree it will be
+    // the virtual exit (denoted by (BasicBlock *) nullptr) which postdominates
+    // all real exits (including multiple exit blocks, infinite loops).
+    NodePtr Root = IsPostDom ? nullptr : DT.Roots[0];
+
+    DT.RootNode = (DT.DomTreeNodes[Root] =
+                       llvm::make_unique<DomTreeNodeBase<NodeT>>(Root, nullptr))
+        .get();
+    SNCA.attachNewSubtree(DT, DT.RootNode);
+  }
+
+  void attachNewSubtree(DomTreeT& DT, const TreeNodePtr AttachTo) {
+    // Attach the first unreachable block to AttachTo.
+    NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
+    // Loop over all of the discovered blocks in the function...
+    for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
+      NodePtr W = NumToNode[i];
+      DEBUG(dbgs() << "\tdiscovered a new reachable node "
+                   << BlockNamePrinter(W) << "\n");
+
+      // Don't replace this with 'count', the insertion side effect is important
+      if (DT.DomTreeNodes[W]) continue;  // Haven't calculated this node yet?
+
+      NodePtr ImmDom = getIDom(W);
+
+      // Get or calculate the node for the immediate dominator.
+      TreeNodePtr IDomNode = getNodeForBlock(ImmDom, DT);
+
+      // Add a new tree node for this BasicBlock, and link it as a child of
+      // IDomNode.
+      DT.DomTreeNodes[W] = IDomNode->addChild(
+          llvm::make_unique<DomTreeNodeBase<NodeT>>(W, IDomNode));
+    }
+  }
+
+  void reattachExistingSubtree(DomTreeT &DT, const TreeNodePtr AttachTo) {
+    NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
+    for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
+      const NodePtr N = NumToNode[i];
+      const TreeNodePtr TN = DT.getNode(N);
+      assert(TN);
+      const TreeNodePtr NewIDom = DT.getNode(NodeToInfo[N].IDom);
+      TN->setIDom(NewIDom);
+    }
+  }
+
+  // Helper struct used during edge insertions.
+  struct InsertionInfo {
+    using BucketElementTy = std::pair<unsigned, TreeNodePtr>;
+    struct DecreasingLevel {
+      bool operator()(const BucketElementTy &First,
+                      const BucketElementTy &Second) const {
+        return First.first > Second.first;
+      }
+    };
+
+    std::priority_queue<BucketElementTy, SmallVector<BucketElementTy, 8>,
+        DecreasingLevel>
+        Bucket;  // Queue of tree nodes sorted by level in descending order.
+    SmallDenseSet<TreeNodePtr, 8> Affected;
+    SmallDenseMap<TreeNodePtr, unsigned, 8> Visited;
+    SmallVector<TreeNodePtr, 8> AffectedQueue;
+    SmallVector<TreeNodePtr, 8> VisitedNotAffectedQueue;
+  };
+
+  static void InsertEdge(DomTreeT &DT, const BatchUpdatePtr BUI,
+                         const NodePtr From, const NodePtr To) {
+    assert((From || IsPostDom) &&
+           "From has to be a valid CFG node or a virtual root");
+    assert(To && "Cannot be a nullptr");
+    DEBUG(dbgs() << "Inserting edge " << BlockNamePrinter(From) << " -> "
+                 << BlockNamePrinter(To) << "\n");
+    TreeNodePtr FromTN = DT.getNode(From);
+
+    if (!FromTN) {
+      // Ignore edges from unreachable nodes for (forward) dominators.
+      if (!IsPostDom) return;
+
+      // The unreachable node becomes a new root -- a tree node for it.
+      TreeNodePtr VirtualRoot = DT.getNode(nullptr);
+      FromTN =
+          (DT.DomTreeNodes[From] = VirtualRoot->addChild(
+               llvm::make_unique<DomTreeNodeBase<NodeT>>(From, VirtualRoot)))
+              .get();
+      DT.Roots.push_back(From);
+    }
+
+    DT.DFSInfoValid = false;
+
+    const TreeNodePtr ToTN = DT.getNode(To);
+    if (!ToTN)
+      InsertUnreachable(DT, BUI, FromTN, To);
+    else
+      InsertReachable(DT, BUI, FromTN, ToTN);
+  }
+
+  // Determines if some existing root becomes reverse-reachable after the
+  // insertion. Rebuilds the whole tree if that situation happens.
+  static bool UpdateRootsBeforeInsertion(DomTreeT &DT, const BatchUpdatePtr BUI,
+                                         const TreeNodePtr From,
+                                         const TreeNodePtr To) {
+    assert(IsPostDom && "This function is only for postdominators");
+    // Destination node is not attached to the virtual root, so it cannot be a
+    // root.
+    if (!DT.isVirtualRoot(To->getIDom())) return false;
+
+    auto RIt = llvm::find(DT.Roots, To->getBlock());
+    if (RIt == DT.Roots.end())
+      return false;  // To is not a root, nothing to update.
+
+    DEBUG(dbgs() << "\t\tAfter the insertion, " << BlockNamePrinter(To)
+                 << " is no longer a root\n\t\tRebuilding the tree!!!\n");
+
+    CalculateFromScratch(DT, BUI);
+    return true;
+  }
+
+  // Updates the set of roots after insertion or deletion. This ensures that
+  // roots are the same when after a series of updates and when the tree would
+  // be built from scratch.
+  static void UpdateRootsAfterUpdate(DomTreeT &DT, const BatchUpdatePtr BUI) {
+    assert(IsPostDom && "This function is only for postdominators");
+
+    // The tree has only trivial roots -- nothing to update.
+    if (std::none_of(DT.Roots.begin(), DT.Roots.end(), [BUI](const NodePtr N) {
+          return HasForwardSuccessors(N, BUI);
+        }))
+      return;
+
+    // Recalculate the set of roots.
+    auto Roots = FindRoots(DT, BUI);
+    if (DT.Roots.size() != Roots.size() ||
+        !std::is_permutation(DT.Roots.begin(), DT.Roots.end(), Roots.begin())) {
+      // The roots chosen in the CFG have changed. This is because the
+      // incremental algorithm does not really know or use the set of roots and
+      // can make a different (implicit) decision about which node within an
+      // infinite loop becomes a root.
+
+      DEBUG(dbgs() << "Roots are different in updated trees\n"
+                   << "The entire tree needs to be rebuilt\n");
+      // It may be possible to update the tree without recalculating it, but
+      // we do not know yet how to do it, and it happens rarely in practise.
+      CalculateFromScratch(DT, BUI);
+      return;
+    }
+  }
+
+  // Handles insertion to a node already in the dominator tree.
+  static void InsertReachable(DomTreeT &DT, const BatchUpdatePtr BUI,
+                              const TreeNodePtr From, const TreeNodePtr To) {
+    DEBUG(dbgs() << "\tReachable " << BlockNamePrinter(From->getBlock())
+                 << " -> " << BlockNamePrinter(To->getBlock()) << "\n");
+    if (IsPostDom && UpdateRootsBeforeInsertion(DT, BUI, From, To)) return;
+    // DT.findNCD expects both pointers to be valid. When From is a virtual
+    // root, then its CFG block pointer is a nullptr, so we have to 'compute'
+    // the NCD manually.
+    const NodePtr NCDBlock =
+        (From->getBlock() && To->getBlock())
+            ? DT.findNearestCommonDominator(From->getBlock(), To->getBlock())
+            : nullptr;
+    assert(NCDBlock || DT.isPostDominator());
+    const TreeNodePtr NCD = DT.getNode(NCDBlock);
+    assert(NCD);
+
+    DEBUG(dbgs() << "\t\tNCA == " << BlockNamePrinter(NCD) << "\n");
+    const TreeNodePtr ToIDom = To->getIDom();
+
+    // Nothing affected -- NCA property holds.
+    // (Based on the lemma 2.5 from the second paper.)
+    if (NCD == To || NCD == ToIDom) return;
+
+    // Identify and collect affected nodes.
+    InsertionInfo II;
+    DEBUG(dbgs() << "Marking " << BlockNamePrinter(To) << " as affected\n");
+    II.Affected.insert(To);
+    const unsigned ToLevel = To->getLevel();
+    DEBUG(dbgs() << "Putting " << BlockNamePrinter(To) << " into a Bucket\n");
+    II.Bucket.push({ToLevel, To});
+
+    while (!II.Bucket.empty()) {
+      const TreeNodePtr CurrentNode = II.Bucket.top().second;
+      const unsigned  CurrentLevel = CurrentNode->getLevel();
+      II.Bucket.pop();
+      DEBUG(dbgs() << "\tAdding to Visited and AffectedQueue: "
+                   << BlockNamePrinter(CurrentNode) << "\n");
+
+      II.Visited.insert({CurrentNode, CurrentLevel});
+      II.AffectedQueue.push_back(CurrentNode);
+
+      // Discover and collect affected successors of the current node.
+      VisitInsertion(DT, BUI, CurrentNode, CurrentLevel, NCD, II);
+    }
+
+    // Finish by updating immediate dominators and levels.
+    UpdateInsertion(DT, BUI, NCD, II);
+  }
+
+  // Visits an affected node and collect its affected successors.
+  static void VisitInsertion(DomTreeT &DT, const BatchUpdatePtr BUI,
+                             const TreeNodePtr TN, const unsigned RootLevel,
+                             const TreeNodePtr NCD, InsertionInfo &II) {
+    const unsigned NCDLevel = NCD->getLevel();
+    DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << ",  RootLevel "
+                 << RootLevel << "\n");
+
+    SmallVector<TreeNodePtr, 8> Stack = {TN};
+    assert(TN->getBlock() && II.Visited.count(TN) && "Preconditions!");
+
+    SmallPtrSet<TreeNodePtr, 8> Processed;
+
+    do {
+      TreeNodePtr Next = Stack.pop_back_val();
+      DEBUG(dbgs() << " Next: " << BlockNamePrinter(Next) << "\n");
+
+      for (const NodePtr Succ :
+           ChildrenGetter<IsPostDom>::Get(Next->getBlock(), BUI)) {
+        const TreeNodePtr SuccTN = DT.getNode(Succ);
+        assert(SuccTN && "Unreachable successor found at reachable insertion");
+        const unsigned SuccLevel = SuccTN->getLevel();
+
+        DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ) << ", level = "
+                     << SuccLevel << "\n");
+
+        // Do not process the same node multiple times.
+        if (Processed.count(Next) > 0)
+          continue;
+
+        // Succ dominated by subtree From -- not affected.
+        // (Based on the lemma 2.5 from the second paper.)
+        if (SuccLevel > RootLevel) {
+          DEBUG(dbgs() << "\t\tDominated by subtree From\n");
+          if (II.Visited.count(SuccTN) != 0) {
+            DEBUG(dbgs() << "\t\t\talready visited at level "
+                         << II.Visited[SuccTN] << "\n\t\t\tcurrent level "
+                         << RootLevel << ")\n");
+
+            // A node can be necessary to visit again if we see it again at
+            // a lower level than before.
+            if (II.Visited[SuccTN] >= RootLevel)
+              continue;
+          }
+
+          DEBUG(dbgs() << "\t\tMarking visited not affected "
+                       << BlockNamePrinter(Succ) << "\n");
+          II.Visited.insert({SuccTN, RootLevel});
+          II.VisitedNotAffectedQueue.push_back(SuccTN);
+          Stack.push_back(SuccTN);
+        } else if ((SuccLevel > NCDLevel + 1) &&
+            II.Affected.count(SuccTN) == 0) {
+          DEBUG(dbgs() << "\t\tMarking affected and adding "
+                       << BlockNamePrinter(Succ) << " to a Bucket\n");
+          II.Affected.insert(SuccTN);
+          II.Bucket.push({SuccLevel, SuccTN});
+        }
+      }
+
+      Processed.insert(Next);
+    } while (!Stack.empty());
+  }
+
+  // Updates immediate dominators and levels after insertion.
+  static void UpdateInsertion(DomTreeT &DT, const BatchUpdatePtr BUI,
+                              const TreeNodePtr NCD, InsertionInfo &II) {
+    DEBUG(dbgs() << "Updating NCD = " << BlockNamePrinter(NCD) << "\n");
+
+    for (const TreeNodePtr TN : II.AffectedQueue) {
+      DEBUG(dbgs() << "\tIDom(" << BlockNamePrinter(TN)
+                   << ") = " << BlockNamePrinter(NCD) << "\n");
+      TN->setIDom(NCD);
+    }
+
+    UpdateLevelsAfterInsertion(II);
+    if (IsPostDom) UpdateRootsAfterUpdate(DT, BUI);
+  }
+
+  static void UpdateLevelsAfterInsertion(InsertionInfo &II) {
+    DEBUG(dbgs() << "Updating levels for visited but not affected nodes\n");
+
+    for (const TreeNodePtr TN : II.VisitedNotAffectedQueue) {
+      DEBUG(dbgs() << "\tlevel(" << BlockNamePrinter(TN) << ") = ("
+                   << BlockNamePrinter(TN->getIDom()) << ") "
+                   << TN->getIDom()->getLevel() << " + 1\n");
+      TN->UpdateLevel();
+    }
+  }
+
+  // Handles insertion to previously unreachable nodes.
+  static void InsertUnreachable(DomTreeT &DT, const BatchUpdatePtr BUI,
+                                const TreeNodePtr From, const NodePtr To) {
+    DEBUG(dbgs() << "Inserting " << BlockNamePrinter(From)
+                 << " -> (unreachable) " << BlockNamePrinter(To) << "\n");
+
+    // Collect discovered edges to already reachable nodes.
+    SmallVector<std::pair<NodePtr, TreeNodePtr>, 8> DiscoveredEdgesToReachable;
+    // Discover and connect nodes that became reachable with the insertion.
+    ComputeUnreachableDominators(DT, BUI, To, From, DiscoveredEdgesToReachable);
+
+    DEBUG(dbgs() << "Inserted " << BlockNamePrinter(From)
+                 << " -> (prev unreachable) " << BlockNamePrinter(To) << "\n");
+
+    // Used the discovered edges and inset discovered connecting (incoming)
+    // edges.
+    for (const auto &Edge : DiscoveredEdgesToReachable) {
+      DEBUG(dbgs() << "\tInserting discovered connecting edge "
+                   << BlockNamePrinter(Edge.first) << " -> "
+                   << BlockNamePrinter(Edge.second) << "\n");
+      InsertReachable(DT, BUI, DT.getNode(Edge.first), Edge.second);
+    }
+  }
+
+  // Connects nodes that become reachable with an insertion.
+  static void ComputeUnreachableDominators(
+      DomTreeT &DT, const BatchUpdatePtr BUI, const NodePtr Root,
+      const TreeNodePtr Incoming,
+      SmallVectorImpl<std::pair<NodePtr, TreeNodePtr>>
+          &DiscoveredConnectingEdges) {
+    assert(!DT.getNode(Root) && "Root must not be reachable");
+
+    // Visit only previously unreachable nodes.
+    auto UnreachableDescender = [&DT, &DiscoveredConnectingEdges](NodePtr From,
+                                                                  NodePtr To) {
+      const TreeNodePtr ToTN = DT.getNode(To);
+      if (!ToTN) return true;
+
+      DiscoveredConnectingEdges.push_back({From, ToTN});
+      return false;
+    };
+
+    SemiNCAInfo SNCA(BUI);
+    SNCA.runDFS(Root, 0, UnreachableDescender, 0);
+    SNCA.runSemiNCA(DT);
+    SNCA.attachNewSubtree(DT, Incoming);
+
+    DEBUG(dbgs() << "After adding unreachable nodes\n");
+  }
+
+  static void DeleteEdge(DomTreeT &DT, const BatchUpdatePtr BUI,
+                         const NodePtr From, const NodePtr To) {
+    assert(From && To && "Cannot disconnect nullptrs");
+    DEBUG(dbgs() << "Deleting edge " << BlockNamePrinter(From) << " -> "
+                 << BlockNamePrinter(To) << "\n");
+
+#ifndef NDEBUG
+    // Ensure that the edge was in fact deleted from the CFG before informing
+    // the DomTree about it.
+    // The check is O(N), so run it only in debug configuration.
+    auto IsSuccessor = [BUI](const NodePtr SuccCandidate, const NodePtr Of) {
+      auto Successors = ChildrenGetter<IsPostDom>::Get(Of, BUI);
+      return llvm::find(Successors, SuccCandidate) != Successors.end();
+    };
+    (void)IsSuccessor;
+    assert(!IsSuccessor(To, From) && "Deleted edge still exists in the CFG!");
+#endif
+
+    const TreeNodePtr FromTN = DT.getNode(From);
+    // Deletion in an unreachable subtree -- nothing to do.
+    if (!FromTN) return;
+
+    const TreeNodePtr ToTN = DT.getNode(To);
+    if (!ToTN) {
+      DEBUG(dbgs() << "\tTo (" << BlockNamePrinter(To)
+                   << ") already unreachable -- there is no edge to delete\n");
+      return;
+    }
+
+    const NodePtr NCDBlock = DT.findNearestCommonDominator(From, To);
+    const TreeNodePtr NCD = DT.getNode(NCDBlock);
+
+    // If To dominates From -- nothing to do.
+    if (ToTN != NCD) {
+      DT.DFSInfoValid = false;
+
+      const TreeNodePtr ToIDom = ToTN->getIDom();
+      DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
+                   << BlockNamePrinter(ToIDom) << "\n");
+
+      // To remains reachable after deletion.
+      // (Based on the caption under Figure 4. from the second paper.)
+      if (FromTN != ToIDom || HasProperSupport(DT, BUI, ToTN))
+        DeleteReachable(DT, BUI, FromTN, ToTN);
+      else
+        DeleteUnreachable(DT, BUI, ToTN);
+    }
+
+    if (IsPostDom) UpdateRootsAfterUpdate(DT, BUI);
+  }
+
+  // Handles deletions that leave destination nodes reachable.
+  static void DeleteReachable(DomTreeT &DT, const BatchUpdatePtr BUI,
+                              const TreeNodePtr FromTN,
+                              const TreeNodePtr ToTN) {
+    DEBUG(dbgs() << "Deleting reachable " << BlockNamePrinter(FromTN) << " -> "
+                 << BlockNamePrinter(ToTN) << "\n");
+    DEBUG(dbgs() << "\tRebuilding subtree\n");
+
+    // Find the top of the subtree that needs to be rebuilt.
+    // (Based on the lemma 2.6 from the second paper.)
+    const NodePtr ToIDom =
+        DT.findNearestCommonDominator(FromTN->getBlock(), ToTN->getBlock());
+    assert(ToIDom || DT.isPostDominator());
+    const TreeNodePtr ToIDomTN = DT.getNode(ToIDom);
+    assert(ToIDomTN);
+    const TreeNodePtr PrevIDomSubTree = ToIDomTN->getIDom();
+    // Top of the subtree to rebuild is the root node. Rebuild the tree from
+    // scratch.
+    if (!PrevIDomSubTree) {
+      DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
+      CalculateFromScratch(DT, BUI);
+      return;
+    }
+
+    // Only visit nodes in the subtree starting at To.
+    const unsigned Level = ToIDomTN->getLevel();
+    auto DescendBelow = [Level, &DT](NodePtr, NodePtr To) {
+      return DT.getNode(To)->getLevel() > Level;
+    };
+
+    DEBUG(dbgs() << "\tTop of subtree: " << BlockNamePrinter(ToIDomTN) << "\n");
+
+    SemiNCAInfo SNCA(BUI);
+    SNCA.runDFS(ToIDom, 0, DescendBelow, 0);
+    DEBUG(dbgs() << "\tRunning Semi-NCA\n");
+    SNCA.runSemiNCA(DT, Level);
+    SNCA.reattachExistingSubtree(DT, PrevIDomSubTree);
+  }
+
+  // Checks if a node has proper support, as defined on the page 3 and later
+  // explained on the page 7 of the second paper.
+  static bool HasProperSupport(DomTreeT &DT, const BatchUpdatePtr BUI,
+                               const TreeNodePtr TN) {
+    DEBUG(dbgs() << "IsReachableFromIDom " << BlockNamePrinter(TN) << "\n");
+    for (const NodePtr Pred :
+         ChildrenGetter<!IsPostDom>::Get(TN->getBlock(), BUI)) {
+      DEBUG(dbgs() << "\tPred " << BlockNamePrinter(Pred) << "\n");
+      if (!DT.getNode(Pred)) continue;
+
+      const NodePtr Support =
+          DT.findNearestCommonDominator(TN->getBlock(), Pred);
+      DEBUG(dbgs() << "\tSupport " << BlockNamePrinter(Support) << "\n");
+      if (Support != TN->getBlock()) {
+        DEBUG(dbgs() << "\t" << BlockNamePrinter(TN)
+                     << " is reachable from support "
+                     << BlockNamePrinter(Support) << "\n");
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+  // Handle deletions that make destination node unreachable.
+  // (Based on the lemma 2.7 from the second paper.)
+  static void DeleteUnreachable(DomTreeT &DT, const BatchUpdatePtr BUI,
+                                const TreeNodePtr ToTN) {
+    DEBUG(dbgs() << "Deleting unreachable subtree " << BlockNamePrinter(ToTN)
+                 << "\n");
+    assert(ToTN);
+    assert(ToTN->getBlock());
+
+    if (IsPostDom) {
+      // Deletion makes a region reverse-unreachable and creates a new root.
+      // Simulate that by inserting an edge from the virtual root to ToTN and
+      // adding it as a new root.
+      DEBUG(dbgs() << "\tDeletion made a region reverse-unreachable\n");
+      DEBUG(dbgs() << "\tAdding new root " << BlockNamePrinter(ToTN) << "\n");
+      DT.Roots.push_back(ToTN->getBlock());
+      InsertReachable(DT, BUI, DT.getNode(nullptr), ToTN);
+      return;
+    }
+
+    SmallVector<NodePtr, 16> AffectedQueue;
+    const unsigned Level = ToTN->getLevel();
+
+    // Traverse destination node's descendants with greater level in the tree
+    // and collect visited nodes.
+    auto DescendAndCollect = [Level, &AffectedQueue, &DT](NodePtr, NodePtr To) {
+      const TreeNodePtr TN = DT.getNode(To);
+      assert(TN);
+      if (TN->getLevel() > Level) return true;
+      if (llvm::find(AffectedQueue, To) == AffectedQueue.end())
+        AffectedQueue.push_back(To);
+
+      return false;
+    };
+
+    SemiNCAInfo SNCA(BUI);
+    unsigned LastDFSNum =
+        SNCA.runDFS(ToTN->getBlock(), 0, DescendAndCollect, 0);
+
+    TreeNodePtr MinNode = ToTN;
+
+    // Identify the top of the subtree to rebuild by finding the NCD of all
+    // the affected nodes.
+    for (const NodePtr N : AffectedQueue) {
+      const TreeNodePtr TN = DT.getNode(N);
+      const NodePtr NCDBlock =
+          DT.findNearestCommonDominator(TN->getBlock(), ToTN->getBlock());
+      assert(NCDBlock || DT.isPostDominator());
+      const TreeNodePtr NCD = DT.getNode(NCDBlock);
+      assert(NCD);
+
+      DEBUG(dbgs() << "Processing affected node " << BlockNamePrinter(TN)
+                   << " with NCD = " << BlockNamePrinter(NCD)
+                   << ", MinNode =" << BlockNamePrinter(MinNode) << "\n");
+      if (NCD != TN && NCD->getLevel() < MinNode->getLevel()) MinNode = NCD;
+    }
+
+    // Root reached, rebuild the whole tree from scratch.
+    if (!MinNode->getIDom()) {
+      DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
+      CalculateFromScratch(DT, BUI);
+      return;
+    }
+
+    // Erase the unreachable subtree in reverse preorder to process all children
+    // before deleting their parent.
+    for (unsigned i = LastDFSNum; i > 0; --i) {
+      const NodePtr N = SNCA.NumToNode[i];
+      const TreeNodePtr TN = DT.getNode(N);
+      DEBUG(dbgs() << "Erasing node " << BlockNamePrinter(TN) << "\n");
+
+      EraseNode(DT, TN);
+    }
+
+    // The affected subtree start at the To node -- there's no extra work to do.
+    if (MinNode == ToTN) return;
+
+    DEBUG(dbgs() << "DeleteUnreachable: running DFS with MinNode = "
+                 << BlockNamePrinter(MinNode) << "\n");
+    const unsigned MinLevel = MinNode->getLevel();
+    const TreeNodePtr PrevIDom = MinNode->getIDom();
+    assert(PrevIDom);
+    SNCA.clear();
+
+    // Identify nodes that remain in the affected subtree.
+    auto DescendBelow = [MinLevel, &DT](NodePtr, NodePtr To) {
+      const TreeNodePtr ToTN = DT.getNode(To);
+      return ToTN && ToTN->getLevel() > MinLevel;
+    };
+    SNCA.runDFS(MinNode->getBlock(), 0, DescendBelow, 0);
+
+    DEBUG(dbgs() << "Previous IDom(MinNode) = " << BlockNamePrinter(PrevIDom)
+                 << "\nRunning Semi-NCA\n");
+
+    // Rebuild the remaining part of affected subtree.
+    SNCA.runSemiNCA(DT, MinLevel);
+    SNCA.reattachExistingSubtree(DT, PrevIDom);
+  }
+
+  // Removes leaf tree nodes from the dominator tree.
+  static void EraseNode(DomTreeT &DT, const TreeNodePtr TN) {
+    assert(TN);
+    assert(TN->getNumChildren() == 0 && "Not a tree leaf");
+
+    const TreeNodePtr IDom = TN->getIDom();
+    assert(IDom);
+
+    auto ChIt = llvm::find(IDom->Children, TN);
+    assert(ChIt != IDom->Children.end());
+    std::swap(*ChIt, IDom->Children.back());
+    IDom->Children.pop_back();
+
+    DT.DomTreeNodes.erase(TN->getBlock());
+  }
+
+  //~~
+  //===--------------------- DomTree Batch Updater --------------------------===
+  //~~
+
+  static void ApplyUpdates(DomTreeT &DT, ArrayRef<UpdateT> Updates) {
+    const size_t NumUpdates = Updates.size();
+    if (NumUpdates == 0)
+      return;
+
+    // Take the fast path for a single update and avoid running the batch update
+    // machinery.
+    if (NumUpdates == 1) {
+      const auto &Update = Updates.front();
+      if (Update.getKind() == UpdateKind::Insert)
+        DT.insertEdge(Update.getFrom(), Update.getTo());
+      else
+        DT.deleteEdge(Update.getFrom(), Update.getTo());
+
+      return;
+    }
+
+    BatchUpdateInfo BUI;
+    LegalizeUpdates(Updates, BUI.Updates);
+
+    const size_t NumLegalized = BUI.Updates.size();
+    BUI.FutureSuccessors.reserve(NumLegalized);
+    BUI.FuturePredecessors.reserve(NumLegalized);
+
+    // Use the legalized future updates to initialize future successors and
+    // predecessors. Note that these sets will only decrease size over time, as
+    // the next CFG snapshots slowly approach the actual (current) CFG.
+    for (UpdateT &U : BUI.Updates) {
+      BUI.FutureSuccessors[U.getFrom()].insert({U.getTo(), U.getKind()});
+      BUI.FuturePredecessors[U.getTo()].insert({U.getFrom(), U.getKind()});
+    }
+
+    DEBUG(dbgs() << "About to apply " << NumLegalized << " updates\n");
+    DEBUG(if (NumLegalized < 32) for (const auto &U
+                                      : reverse(BUI.Updates)) dbgs()
+          << '\t' << U << "\n");
+    DEBUG(dbgs() << "\n");
+
+    // If the DominatorTree was recalculated at some point, stop the batch
+    // updates. Full recalculations ignore batch updates and look at the actual
+    // CFG.
+    for (size_t i = 0; i < NumLegalized && !BUI.IsRecalculated; ++i)
+      ApplyNextUpdate(DT, BUI);
+  }
+
+  // This function serves double purpose:
+  // a) It removes redundant updates, which makes it easier to reverse-apply
+  //    them when traversing CFG.
+  // b) It optimizes away updates that cancel each other out, as the end result
+  //    is the same.
+  //
+  // It relies on the property of the incremental updates that says that the
+  // order of updates doesn't matter. This allows us to reorder them and end up
+  // with the exact same DomTree every time.
+  //
+  // Following the same logic, the function doesn't care about the order of
+  // input updates, so it's OK to pass it an unordered sequence of updates, that
+  // doesn't make sense when applied sequentially, eg. performing double
+  // insertions or deletions and then doing an opposite update.
+  //
+  // In the future, it should be possible to schedule updates in way that
+  // minimizes the amount of work needed done during incremental updates.
+  static void LegalizeUpdates(ArrayRef<UpdateT> AllUpdates,
+                              SmallVectorImpl<UpdateT> &Result) {
+    DEBUG(dbgs() << "Legalizing " << AllUpdates.size() << " updates\n");
+    // Count the total number of inserions of each edge.
+    // Each insertion adds 1 and deletion subtracts 1. The end number should be
+    // one of {-1 (deletion), 0 (NOP), +1 (insertion)}. Otherwise, the sequence
+    // of updates contains multiple updates of the same kind and we assert for
+    // that case.
+    SmallDenseMap<std::pair<NodePtr, NodePtr>, int, 4> Operations;
+    Operations.reserve(AllUpdates.size());
+
+    for (const auto &U : AllUpdates) {
+      NodePtr From = U.getFrom();
+      NodePtr To = U.getTo();
+      if (IsPostDom) std::swap(From, To);  // Reverse edge for postdominators.
+
+      Operations[{From, To}] += (U.getKind() == UpdateKind::Insert ? 1 : -1);
+    }
+
+    Result.clear();
+    Result.reserve(Operations.size());
+    for (auto &Op : Operations) {
+      const int NumInsertions = Op.second;
+      assert(std::abs(NumInsertions) <= 1 && "Unbalanced operations!");
+      if (NumInsertions == 0) continue;
+      const UpdateKind UK =
+          NumInsertions > 0 ? UpdateKind::Insert : UpdateKind::Delete;
+      Result.push_back({UK, Op.first.first, Op.first.second});
+    }
+
+    // Make the order consistent by not relying on pointer values within the
+    // set. Reuse the old Operations map.
+    // In the future, we should sort by something else to minimize the amount
+    // of work needed to perform the series of updates.
+    for (size_t i = 0, e = AllUpdates.size(); i != e; ++i) {
+      const auto &U = AllUpdates[i];
+      if (!IsPostDom)
+        Operations[{U.getFrom(), U.getTo()}] = int(i);
+      else
+        Operations[{U.getTo(), U.getFrom()}] = int(i);
+    }
+
+    std::sort(Result.begin(), Result.end(),
+              [&Operations](const UpdateT &A, const UpdateT &B) {
+                return Operations[{A.getFrom(), A.getTo()}] >
+                       Operations[{B.getFrom(), B.getTo()}];
+              });
+  }
+
+  static void ApplyNextUpdate(DomTreeT &DT, BatchUpdateInfo &BUI) {
+    assert(!BUI.Updates.empty() && "No updates to apply!");
+    UpdateT CurrentUpdate = BUI.Updates.pop_back_val();
+    DEBUG(dbgs() << "Applying update: " << CurrentUpdate << "\n");
+
+    // Move to the next snapshot of the CFG by removing the reverse-applied
+    // current update.
+    auto &FS = BUI.FutureSuccessors[CurrentUpdate.getFrom()];
+    FS.erase({CurrentUpdate.getTo(), CurrentUpdate.getKind()});
+    if (FS.empty()) BUI.FutureSuccessors.erase(CurrentUpdate.getFrom());
+
+    auto &FP = BUI.FuturePredecessors[CurrentUpdate.getTo()];
+    FP.erase({CurrentUpdate.getFrom(), CurrentUpdate.getKind()});
+    if (FP.empty()) BUI.FuturePredecessors.erase(CurrentUpdate.getTo());
+
+    if (CurrentUpdate.getKind() == UpdateKind::Insert)
+      InsertEdge(DT, &BUI, CurrentUpdate.getFrom(), CurrentUpdate.getTo());
+    else
+      DeleteEdge(DT, &BUI, CurrentUpdate.getFrom(), CurrentUpdate.getTo());
+  }
+
+  //~~
+  //===--------------- DomTree correctness verification ---------------------===
+  //~~
+
+  // Check if the tree has correct roots. A DominatorTree always has a single
+  // root which is the function's entry node. A PostDominatorTree can have
+  // multiple roots - one for each node with no successors and for infinite
+  // loops.
+  // Running time: O(N).
+  bool verifyRoots(const DomTreeT &DT) {
+    if (!DT.Parent && !DT.Roots.empty()) {
+      errs() << "Tree has no parent but has roots!\n";
+      errs().flush();
+      return false;
+    }
+
+    if (!IsPostDom) {
+      if (DT.Roots.empty()) {
+        errs() << "Tree doesn't have a root!\n";
+        errs().flush();
+        return false;
+      }
+
+      if (DT.getRoot() != GetEntryNode(DT)) {
+        errs() << "Tree's root is not its parent's entry node!\n";
+        errs().flush();
+        return false;
+      }
+    }
+
+    RootsT ComputedRoots = FindRoots(DT, nullptr);
+    if (DT.Roots.size() != ComputedRoots.size() ||
+        !std::is_permutation(DT.Roots.begin(), DT.Roots.end(),
+                             ComputedRoots.begin())) {
+      errs() << "Tree has different roots than freshly computed ones!\n";
+      errs() << "\tPDT roots: ";
+      for (const NodePtr N : DT.Roots) errs() << BlockNamePrinter(N) << ", ";
+      errs() << "\n\tComputed roots: ";
+      for (const NodePtr N : ComputedRoots)
+        errs() << BlockNamePrinter(N) << ", ";
+      errs() << "\n";
+      errs().flush();
+      return false;
+    }
+
+    return true;
+  }
+
+  // Checks if the tree contains all reachable nodes in the input graph.
+  // Running time: O(N).
+  bool verifyReachability(const DomTreeT &DT) {
+    clear();
+    doFullDFSWalk(DT, AlwaysDescend);
+
+    for (auto &NodeToTN : DT.DomTreeNodes) {
+      const TreeNodePtr TN = NodeToTN.second.get();
+      const NodePtr BB = TN->getBlock();
+
+      // Virtual root has a corresponding virtual CFG node.
+      if (DT.isVirtualRoot(TN)) continue;
+
+      if (NodeToInfo.count(BB) == 0) {
+        errs() << "DomTree node " << BlockNamePrinter(BB)
+               << " not found by DFS walk!\n";
+        errs().flush();
+
+        return false;
+      }
+    }
+
+    for (const NodePtr N : NumToNode) {
+      if (N && !DT.getNode(N)) {
+        errs() << "CFG node " << BlockNamePrinter(N)
+               << " not found in the DomTree!\n";
+        errs().flush();
+
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  // Check if for every parent with a level L in the tree all of its children
+  // have level L + 1.
+  // Running time: O(N).
+  static bool VerifyLevels(const DomTreeT &DT) {
+    for (auto &NodeToTN : DT.DomTreeNodes) {
+      const TreeNodePtr TN = NodeToTN.second.get();
+      const NodePtr BB = TN->getBlock();
+      if (!BB) continue;
+
+      const TreeNodePtr IDom = TN->getIDom();
+      if (!IDom && TN->getLevel() != 0) {
+        errs() << "Node without an IDom " << BlockNamePrinter(BB)
+               << " has a nonzero level " << TN->getLevel() << "!\n";
+        errs().flush();
+
+        return false;
+      }
+
+      if (IDom && TN->getLevel() != IDom->getLevel() + 1) {
+        errs() << "Node " << BlockNamePrinter(BB) << " has level "
+               << TN->getLevel() << " while its IDom "
+               << BlockNamePrinter(IDom->getBlock()) << " has level "
+               << IDom->getLevel() << "!\n";
+        errs().flush();
+
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  // Check if the computed DFS numbers are correct. Note that DFS info may not
+  // be valid, and when that is the case, we don't verify the numbers.
+  // Running time: O(N log(N)).
+  static bool VerifyDFSNumbers(const DomTreeT &DT) {
+    if (!DT.DFSInfoValid || !DT.Parent)
+      return true;
+
+    const NodePtr RootBB = IsPostDom ? nullptr : DT.getRoots()[0];
+    const TreeNodePtr Root = DT.getNode(RootBB);
+
+    auto PrintNodeAndDFSNums = [](const TreeNodePtr TN) {
+      errs() << BlockNamePrinter(TN) << " {" << TN->getDFSNumIn() << ", "
+             << TN->getDFSNumOut() << '}';
+    };
+
+    // Verify the root's DFS In number. Although DFS numbering would also work
+    // if we started from some other value, we assume 0-based numbering.
+    if (Root->getDFSNumIn() != 0) {
+      errs() << "DFSIn number for the tree root is not:\n\t";
+      PrintNodeAndDFSNums(Root);
+      errs() << '\n';
+      errs().flush();
+      return false;
+    }
+
+    // For each tree node verify if children's DFS numbers cover their parent's
+    // DFS numbers with no gaps.
+    for (const auto &NodeToTN : DT.DomTreeNodes) {
+      const TreeNodePtr Node = NodeToTN.second.get();
+
+      // Handle tree leaves.
+      if (Node->getChildren().empty()) {
+        if (Node->getDFSNumIn() + 1 != Node->getDFSNumOut()) {
+          errs() << "Tree leaf should have DFSOut = DFSIn + 1:\n\t";
+          PrintNodeAndDFSNums(Node);
+          errs() << '\n';
+          errs().flush();
+          return false;
+        }
+
+        continue;
+      }
+
+      // Make a copy and sort it such that it is possible to check if there are
+      // no gaps between DFS numbers of adjacent children.
+      SmallVector<TreeNodePtr, 8> Children(Node->begin(), Node->end());
+      std::sort(Children.begin(), Children.end(),
+                [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
+                  return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
+                });
+
+      auto PrintChildrenError = [Node, &Children, PrintNodeAndDFSNums](
+          const TreeNodePtr FirstCh, const TreeNodePtr SecondCh) {
+        assert(FirstCh);
+
+        errs() << "Incorrect DFS numbers for:\n\tParent ";
+        PrintNodeAndDFSNums(Node);
+
+        errs() << "\n\tChild ";
+        PrintNodeAndDFSNums(FirstCh);
+
+        if (SecondCh) {
+          errs() << "\n\tSecond child ";
+          PrintNodeAndDFSNums(SecondCh);
+        }
+
+        errs() << "\nAll children: ";
+        for (const TreeNodePtr Ch : Children) {
+          PrintNodeAndDFSNums(Ch);
+          errs() << ", ";
+        }
+
+        errs() << '\n';
+        errs().flush();
+      };
+
+      if (Children.front()->getDFSNumIn() != Node->getDFSNumIn() + 1) {
+        PrintChildrenError(Children.front(), nullptr);
+        return false;
+      }
+
+      if (Children.back()->getDFSNumOut() + 1 != Node->getDFSNumOut()) {
+        PrintChildrenError(Children.back(), nullptr);
+        return false;
+      }
+
+      for (size_t i = 0, e = Children.size() - 1; i != e; ++i) {
+        if (Children[i]->getDFSNumOut() + 1 != Children[i + 1]->getDFSNumIn()) {
+          PrintChildrenError(Children[i], Children[i + 1]);
+          return false;
+        }
+      }
+    }
+
+    return true;
+  }
+
+  // The below routines verify the correctness of the dominator tree relative to
+  // the CFG it's coming from.  A tree is a dominator tree iff it has two
+  // properties, called the parent property and the sibling property.  Tarjan
+  // and Lengauer prove (but don't explicitly name) the properties as part of
+  // the proofs in their 1972 paper, but the proofs are mostly part of proving
+  // things about semidominators and idoms, and some of them are simply asserted
+  // based on even earlier papers (see, e.g., lemma 2).  Some papers refer to
+  // these properties as "valid" and "co-valid".  See, e.g., "Dominators,
+  // directed bipolar orders, and independent spanning trees" by Loukas
+  // Georgiadis and Robert E. Tarjan, as well as "Dominator Tree Verification
+  // and Vertex-Disjoint Paths " by the same authors.
+
+  // A very simple and direct explanation of these properties can be found in
+  // "An Experimental Study of Dynamic Dominators", found at
+  // https://arxiv.org/abs/1604.02711
+
+  // The easiest way to think of the parent property is that it's a requirement
+  // of being a dominator.  Let's just take immediate dominators.  For PARENT to
+  // be an immediate dominator of CHILD, all paths in the CFG must go through
+  // PARENT before they hit CHILD.  This implies that if you were to cut PARENT
+  // out of the CFG, there should be no paths to CHILD that are reachable.  If
+  // there are, then you now have a path from PARENT to CHILD that goes around
+  // PARENT and still reaches CHILD, which by definition, means PARENT can't be
+  // a dominator of CHILD (let alone an immediate one).
+
+  // The sibling property is similar.  It says that for each pair of sibling
+  // nodes in the dominator tree (LEFT and RIGHT) , they must not dominate each
+  // other.  If sibling LEFT dominated sibling RIGHT, it means there are no
+  // paths in the CFG from sibling LEFT to sibling RIGHT that do not go through
+  // LEFT, and thus, LEFT is really an ancestor (in the dominator tree) of
+  // RIGHT, not a sibling.
+
+  // It is possible to verify the parent and sibling properties in
+  // linear time, but the algorithms are complex. Instead, we do it in a
+  // straightforward N^2 and N^3 way below, using direct path reachability.
+
+  // Checks if the tree has the parent property: if for all edges from V to W in
+  // the input graph, such that V is reachable, the parent of W in the tree is
+  // an ancestor of V in the tree.
+  // Running time: O(N^2).
+  //
+  // This means that if a node gets disconnected from the graph, then all of
+  // the nodes it dominated previously will now become unreachable.
+  bool verifyParentProperty(const DomTreeT &DT) {
+    for (auto &NodeToTN : DT.DomTreeNodes) {
+      const TreeNodePtr TN = NodeToTN.second.get();
+      const NodePtr BB = TN->getBlock();
+      if (!BB || TN->getChildren().empty()) continue;
+
+      DEBUG(dbgs() << "Verifying parent property of node "
+                   << BlockNamePrinter(TN) << "\n");
+      clear();
+      doFullDFSWalk(DT, [BB](NodePtr From, NodePtr To) {
+        return From != BB && To != BB;
+      });
+
+      for (TreeNodePtr Child : TN->getChildren())
+        if (NodeToInfo.count(Child->getBlock()) != 0) {
+          errs() << "Child " << BlockNamePrinter(Child)
+                 << " reachable after its parent " << BlockNamePrinter(BB)
+                 << " is removed!\n";
+          errs().flush();
+
+          return false;
+        }
+    }
+
+    return true;
+  }
+
+  // Check if the tree has sibling property: if a node V does not dominate a
+  // node W for all siblings V and W in the tree.
+  // Running time: O(N^3).
+  //
+  // This means that if a node gets disconnected from the graph, then all of its
+  // siblings will now still be reachable.
+  bool verifySiblingProperty(const DomTreeT &DT) {
+    for (auto &NodeToTN : DT.DomTreeNodes) {
+      const TreeNodePtr TN = NodeToTN.second.get();
+      const NodePtr BB = TN->getBlock();
+      if (!BB || TN->getChildren().empty()) continue;
+
+      const auto &Siblings = TN->getChildren();
+      for (const TreeNodePtr N : Siblings) {
+        clear();
+        NodePtr BBN = N->getBlock();
+        doFullDFSWalk(DT, [BBN](NodePtr From, NodePtr To) {
+          return From != BBN && To != BBN;
+        });
+
+        for (const TreeNodePtr S : Siblings) {
+          if (S == N) continue;
+
+          if (NodeToInfo.count(S->getBlock()) == 0) {
+            errs() << "Node " << BlockNamePrinter(S)
+                   << " not reachable when its sibling " << BlockNamePrinter(N)
+                   << " is removed!\n";
+            errs().flush();
+
+            return false;
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+  // Check if the given tree is the same as a freshly computed one for the same
+  // Parent.
+  // Running time: O(N^2), but faster in practise (same as tree construction).
+  //
+  // Note that this does not check if that the tree construction algorithm is
+  // correct and should be only used for fast (but possibly unsound)
+  // verification.
+  static bool IsSameAsFreshTree(const DomTreeT &DT) {
+    DomTreeT FreshTree;
+    FreshTree.recalculate(*DT.Parent);
+    const bool Different = DT.compare(FreshTree);
+
+    if (Different) {
+      errs() << (DT.isPostDominator() ? "Post" : "")
+             << "DominatorTree is different than a freshly computed one!\n"
+             << "\tCurrent:\n";
+      DT.print(errs());
+      errs() << "\n\tFreshly computed tree:\n";
+      FreshTree.print(errs());
+      errs().flush();
+    }
+
+    return !Different;
+  }
+};
+
+template <class DomTreeT>
+void Calculate(DomTreeT &DT) {
+  SemiNCAInfo<DomTreeT>::CalculateFromScratch(DT, nullptr);
+}
+
+template <class DomTreeT>
+void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
+                typename DomTreeT::NodePtr To) {
+  if (DT.isPostDominator()) std::swap(From, To);
+  SemiNCAInfo<DomTreeT>::InsertEdge(DT, nullptr, From, To);
+}
+
+template <class DomTreeT>
+void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
+                typename DomTreeT::NodePtr To) {
+  if (DT.isPostDominator()) std::swap(From, To);
+  SemiNCAInfo<DomTreeT>::DeleteEdge(DT, nullptr, From, To);
+}
+
+template <class DomTreeT>
+void ApplyUpdates(DomTreeT &DT,
+                  ArrayRef<typename DomTreeT::UpdateType> Updates) {
+  SemiNCAInfo<DomTreeT>::ApplyUpdates(DT, Updates);
+}
+
+template <class DomTreeT>
+bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL) {
+  SemiNCAInfo<DomTreeT> SNCA(nullptr);
+
+  // Simplist check is to compare against a new tree. This will also
+  // usefully print the old and new trees, if they are different.
+  if (!SNCA.IsSameAsFreshTree(DT))
+    return false;
+
+  // Common checks to verify the properties of the tree. O(N log N) at worst
+  if (!SNCA.verifyRoots(DT) || !SNCA.verifyReachability(DT) ||
+      !SNCA.VerifyLevels(DT) || !SNCA.VerifyDFSNumbers(DT))
+    return false;
+
+  // Extra checks depending on VerificationLevel. Up to O(N^3)
+  if (VL == DomTreeT::VerificationLevel::Basic ||
+      VL == DomTreeT::VerificationLevel::Full)
+    if (!SNCA.verifyParentProperty(DT))
+      return false;
+  if (VL == DomTreeT::VerificationLevel::Full)
+    if (!SNCA.verifySiblingProperty(DT))
+      return false;
+
+  return true;
+}
+
+}  // namespace DomTreeBuilder
+}  // namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/GlobPattern.h b/linux-x64/clang/include/llvm/Support/GlobPattern.h
new file mode 100644
index 0000000..c9436a1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/GlobPattern.h
@@ -0,0 +1,48 @@
+//===-- GlobPattern.h - glob pattern matcher implementation -*- C++ -*-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a glob pattern matcher. The glob pattern is the
+// rule used by the shell.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GLOB_PATTERN_H
+#define LLVM_SUPPORT_GLOB_PATTERN_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+// This class represents a glob pattern. Supported metacharacters
+// are "*", "?", "[<chars>]" and "[^<chars>]".
+namespace llvm {
+class BitVector;
+template <typename T> class ArrayRef;
+
+class GlobPattern {
+public:
+  static Expected<GlobPattern> create(StringRef Pat);
+  bool match(StringRef S) const;
+
+private:
+  bool matchOne(ArrayRef<BitVector> Pat, StringRef S) const;
+
+  // Parsed glob pattern.
+  std::vector<BitVector> Tokens;
+
+  // The following members are for optimization.
+  Optional<StringRef> Exact;
+  Optional<StringRef> Prefix;
+  Optional<StringRef> Suffix;
+};
+}
+
+#endif // LLVM_SUPPORT_GLOB_PATTERN_H
diff --git a/linux-x64/clang/include/llvm/Support/GraphWriter.h b/linux-x64/clang/include/llvm/Support/GraphWriter.h
new file mode 100644
index 0000000..3df5c86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/GraphWriter.h
@@ -0,0 +1,361 @@
+//===- llvm/Support/GraphWriter.h - Write graph to a .dot file --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a simple interface that can be used to print out generic
+// LLVM graphs to ".dot" files.  "dot" is a tool that is part of the AT&T
+// graphviz package (http://www.research.att.com/sw/tools/graphviz/) which can
+// be used to turn the files output by this interface into a variety of
+// different graphics formats.
+//
+// Graphs do not need to implement any interface past what is already required
+// by the GraphTraits template, but they can choose to implement specializations
+// of the DOTGraphTraits template if they want to customize the graphs output in
+// any way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GRAPHWRITER_H
+#define LLVM_SUPPORT_GRAPHWRITER_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/DOTGraphTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+namespace DOT {  // Private functions...
+
+std::string EscapeString(const std::string &Label);
+
+/// \brief Get a color string for this node number. Simply round-robin selects
+/// from a reasonable number of colors.
+StringRef getColorString(unsigned NodeNumber);
+
+} // end namespace DOT
+
+namespace GraphProgram {
+
+enum Name {
+  DOT,
+  FDP,
+  NEATO,
+  TWOPI,
+  CIRCO
+};
+
+} // end namespace GraphProgram
+
+bool DisplayGraph(StringRef Filename, bool wait = true,
+                  GraphProgram::Name program = GraphProgram::DOT);
+
+template<typename GraphType>
+class GraphWriter {
+  raw_ostream &O;
+  const GraphType &G;
+
+  using DOTTraits = DOTGraphTraits<GraphType>;
+  using GTraits = GraphTraits<GraphType>;
+  using NodeRef = typename GTraits::NodeRef;
+  using node_iterator = typename GTraits::nodes_iterator;
+  using child_iterator = typename GTraits::ChildIteratorType;
+  DOTTraits DTraits;
+
+  static_assert(std::is_pointer<NodeRef>::value,
+                "FIXME: Currently GraphWriter requires the NodeRef type to be "
+                "a pointer.\nThe pointer usage should be moved to "
+                "DOTGraphTraits, and removed from GraphWriter itself.");
+
+  // Writes the edge labels of the node to O and returns true if there are any
+  // edge labels not equal to the empty string "".
+  bool getEdgeSourceLabels(raw_ostream &O, NodeRef Node) {
+    child_iterator EI = GTraits::child_begin(Node);
+    child_iterator EE = GTraits::child_end(Node);
+    bool hasEdgeSourceLabels = false;
+
+    for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) {
+      std::string label = DTraits.getEdgeSourceLabel(Node, EI);
+
+      if (label.empty())
+        continue;
+
+      hasEdgeSourceLabels = true;
+
+      if (i)
+        O << "|";
+
+      O << "<s" << i << ">" << DOT::EscapeString(label);
+    }
+
+    if (EI != EE && hasEdgeSourceLabels)
+      O << "|<s64>truncated...";
+
+    return hasEdgeSourceLabels;
+  }
+
+public:
+  GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
+    DTraits = DOTTraits(SN);
+  }
+
+  void writeGraph(const std::string &Title = "") {
+    // Output the header for the graph...
+    writeHeader(Title);
+
+    // Emit all of the nodes in the graph...
+    writeNodes();
+
+    // Output any customizations on the graph
+    DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, *this);
+
+    // Output the end of the graph
+    writeFooter();
+  }
+
+  void writeHeader(const std::string &Title) {
+    std::string GraphName = DTraits.getGraphName(G);
+
+    if (!Title.empty())
+      O << "digraph \"" << DOT::EscapeString(Title) << "\" {\n";
+    else if (!GraphName.empty())
+      O << "digraph \"" << DOT::EscapeString(GraphName) << "\" {\n";
+    else
+      O << "digraph unnamed {\n";
+
+    if (DTraits.renderGraphFromBottomUp())
+      O << "\trankdir=\"BT\";\n";
+
+    if (!Title.empty())
+      O << "\tlabel=\"" << DOT::EscapeString(Title) << "\";\n";
+    else if (!GraphName.empty())
+      O << "\tlabel=\"" << DOT::EscapeString(GraphName) << "\";\n";
+    O << DTraits.getGraphProperties(G);
+    O << "\n";
+  }
+
+  void writeFooter() {
+    // Finish off the graph
+    O << "}\n";
+  }
+
+  void writeNodes() {
+    // Loop over the graph, printing it out...
+    for (const auto Node : nodes<GraphType>(G))
+      if (!isNodeHidden(Node))
+        writeNode(Node);
+  }
+
+  bool isNodeHidden(NodeRef Node) {
+    return DTraits.isNodeHidden(Node);
+  }
+
+  void writeNode(NodeRef Node) {
+    std::string NodeAttributes = DTraits.getNodeAttributes(Node, G);
+
+    O << "\tNode" << static_cast<const void*>(Node) << " [shape=record,";
+    if (!NodeAttributes.empty()) O << NodeAttributes << ",";
+    O << "label=\"{";
+
+    if (!DTraits.renderGraphFromBottomUp()) {
+      O << DOT::EscapeString(DTraits.getNodeLabel(Node, G));
+
+      // If we should include the address of the node in the label, do so now.
+      std::string Id = DTraits.getNodeIdentifierLabel(Node, G);
+      if (!Id.empty())
+        O << "|" << DOT::EscapeString(Id);
+
+      std::string NodeDesc = DTraits.getNodeDescription(Node, G);
+      if (!NodeDesc.empty())
+        O << "|" << DOT::EscapeString(NodeDesc);
+    }
+
+    std::string edgeSourceLabels;
+    raw_string_ostream EdgeSourceLabels(edgeSourceLabels);
+    bool hasEdgeSourceLabels = getEdgeSourceLabels(EdgeSourceLabels, Node);
+
+    if (hasEdgeSourceLabels) {
+      if (!DTraits.renderGraphFromBottomUp()) O << "|";
+
+      O << "{" << EdgeSourceLabels.str() << "}";
+
+      if (DTraits.renderGraphFromBottomUp()) O << "|";
+    }
+
+    if (DTraits.renderGraphFromBottomUp()) {
+      O << DOT::EscapeString(DTraits.getNodeLabel(Node, G));
+
+      // If we should include the address of the node in the label, do so now.
+      std::string Id = DTraits.getNodeIdentifierLabel(Node, G);
+      if (!Id.empty())
+        O << "|" << DOT::EscapeString(Id);
+
+      std::string NodeDesc = DTraits.getNodeDescription(Node, G);
+      if (!NodeDesc.empty())
+        O << "|" << DOT::EscapeString(NodeDesc);
+    }
+
+    if (DTraits.hasEdgeDestLabels()) {
+      O << "|{";
+
+      unsigned i = 0, e = DTraits.numEdgeDestLabels(Node);
+      for (; i != e && i != 64; ++i) {
+        if (i) O << "|";
+        O << "<d" << i << ">"
+          << DOT::EscapeString(DTraits.getEdgeDestLabel(Node, i));
+      }
+
+      if (i != e)
+        O << "|<d64>truncated...";
+      O << "}";
+    }
+
+    O << "}\"];\n";   // Finish printing the "node" line
+
+    // Output all of the edges now
+    child_iterator EI = GTraits::child_begin(Node);
+    child_iterator EE = GTraits::child_end(Node);
+    for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i)
+      if (!DTraits.isNodeHidden(*EI))
+        writeEdge(Node, i, EI);
+    for (; EI != EE; ++EI)
+      if (!DTraits.isNodeHidden(*EI))
+        writeEdge(Node, 64, EI);
+  }
+
+  void writeEdge(NodeRef Node, unsigned edgeidx, child_iterator EI) {
+    if (NodeRef TargetNode = *EI) {
+      int DestPort = -1;
+      if (DTraits.edgeTargetsEdgeSource(Node, EI)) {
+        child_iterator TargetIt = DTraits.getEdgeTarget(Node, EI);
+
+        // Figure out which edge this targets...
+        unsigned Offset =
+          (unsigned)std::distance(GTraits::child_begin(TargetNode), TargetIt);
+        DestPort = static_cast<int>(Offset);
+      }
+
+      if (DTraits.getEdgeSourceLabel(Node, EI).empty())
+        edgeidx = -1;
+
+      emitEdge(static_cast<const void*>(Node), edgeidx,
+               static_cast<const void*>(TargetNode), DestPort,
+               DTraits.getEdgeAttributes(Node, EI, G));
+    }
+  }
+
+  /// emitSimpleNode - Outputs a simple (non-record) node
+  void emitSimpleNode(const void *ID, const std::string &Attr,
+                   const std::string &Label, unsigned NumEdgeSources = 0,
+                   const std::vector<std::string> *EdgeSourceLabels = nullptr) {
+    O << "\tNode" << ID << "[ ";
+    if (!Attr.empty())
+      O << Attr << ",";
+    O << " label =\"";
+    if (NumEdgeSources) O << "{";
+    O << DOT::EscapeString(Label);
+    if (NumEdgeSources) {
+      O << "|{";
+
+      for (unsigned i = 0; i != NumEdgeSources; ++i) {
+        if (i) O << "|";
+        O << "<s" << i << ">";
+        if (EdgeSourceLabels) O << DOT::EscapeString((*EdgeSourceLabels)[i]);
+      }
+      O << "}}";
+    }
+    O << "\"];\n";
+  }
+
+  /// emitEdge - Output an edge from a simple node into the graph...
+  void emitEdge(const void *SrcNodeID, int SrcNodePort,
+                const void *DestNodeID, int DestNodePort,
+                const std::string &Attrs) {
+    if (SrcNodePort  > 64) return;             // Eminating from truncated part?
+    if (DestNodePort > 64) DestNodePort = 64;  // Targeting the truncated part?
+
+    O << "\tNode" << SrcNodeID;
+    if (SrcNodePort >= 0)
+      O << ":s" << SrcNodePort;
+    O << " -> Node" << DestNodeID;
+    if (DestNodePort >= 0 && DTraits.hasEdgeDestLabels())
+      O << ":d" << DestNodePort;
+
+    if (!Attrs.empty())
+      O << "[" << Attrs << "]";
+    O << ";\n";
+  }
+
+  /// getOStream - Get the raw output stream into the graph file. Useful to
+  /// write fancy things using addCustomGraphFeatures().
+  raw_ostream &getOStream() {
+    return O;
+  }
+};
+
+template<typename GraphType>
+raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
+                        bool ShortNames = false,
+                        const Twine &Title = "") {
+  // Start the graph emission process...
+  GraphWriter<GraphType> W(O, G, ShortNames);
+
+  // Emit the graph.
+  W.writeGraph(Title.str());
+
+  return O;
+}
+
+std::string createGraphFilename(const Twine &Name, int &FD);
+
+template <typename GraphType>
+std::string WriteGraph(const GraphType &G, const Twine &Name,
+                       bool ShortNames = false, const Twine &Title = "") {
+  int FD;
+  // Windows can't always handle long paths, so limit the length of the name.
+  std::string N = Name.str();
+  N = N.substr(0, std::min<std::size_t>(N.size(), 140));
+  std::string Filename = createGraphFilename(N, FD);
+  raw_fd_ostream O(FD, /*shouldClose=*/ true);
+
+  if (FD == -1) {
+    errs() << "error opening file '" << Filename << "' for writing!\n";
+    return "";
+  }
+
+  llvm::WriteGraph(O, G, ShortNames, Title);
+  errs() << " done. \n";
+
+  return Filename;
+}
+
+/// ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file,
+/// then cleanup.  For use from the debugger.
+///
+template<typename GraphType>
+void ViewGraph(const GraphType &G, const Twine &Name,
+               bool ShortNames = false, const Twine &Title = "",
+               GraphProgram::Name Program = GraphProgram::DOT) {
+  std::string Filename = llvm::WriteGraph(G, Name, ShortNames, Title);
+
+  if (Filename.empty())
+    return;
+
+  DisplayGraph(Filename, false, Program);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_GRAPHWRITER_H
diff --git a/linux-x64/clang/include/llvm/Support/Host.h b/linux-x64/clang/include/llvm/Support/Host.h
new file mode 100644
index 0000000..ddc5fa5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Host.h
@@ -0,0 +1,99 @@
+//===- llvm/Support/Host.h - Host machine characteristics --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Methods for querying the nature of the host machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_HOST_H
+#define LLVM_SUPPORT_HOST_H
+
+#include "llvm/ADT/StringMap.h"
+
+#if defined(__linux__) || defined(__GNU__) || defined(__HAIKU__)
+#include <endian.h>
+#elif defined(_AIX)
+#include <sys/machine.h>
+#elif defined(__sun)
+/* Solaris provides _BIG_ENDIAN/_LITTLE_ENDIAN selector in sys/types.h */
+#include <sys/types.h>
+#define BIG_ENDIAN 4321
+#define LITTLE_ENDIAN 1234
+#if defined(_BIG_ENDIAN)
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#else
+#if !defined(BYTE_ORDER) && !defined(LLVM_ON_WIN32)
+#include <machine/endian.h>
+#endif
+#endif
+
+#include <string>
+
+namespace llvm {
+namespace sys {
+
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
+constexpr bool IsBigEndianHost = true;
+#else
+constexpr bool IsBigEndianHost = false;
+#endif
+
+  static const bool IsLittleEndianHost = !IsBigEndianHost;
+
+  /// getDefaultTargetTriple() - Return the default target triple the compiler
+  /// has been configured to produce code for.
+  ///
+  /// The target triple is a string in the format of:
+  ///   CPU_TYPE-VENDOR-OPERATING_SYSTEM
+  /// or
+  ///   CPU_TYPE-VENDOR-KERNEL-OPERATING_SYSTEM
+  std::string getDefaultTargetTriple();
+
+  /// getProcessTriple() - Return an appropriate target triple for generating
+  /// code to be loaded into the current process, e.g. when using the JIT.
+  std::string getProcessTriple();
+
+  /// getHostCPUName - Get the LLVM name for the host CPU. The particular format
+  /// of the name is target dependent, and suitable for passing as -mcpu to the
+  /// target which matches the host.
+  ///
+  /// \return - The host CPU name, or empty if the CPU could not be determined.
+  StringRef getHostCPUName();
+
+  /// getHostCPUFeatures - Get the LLVM names for the host CPU features.
+  /// The particular format of the names are target dependent, and suitable for
+  /// passing as -mattr to the target which matches the host.
+  ///
+  /// \param Features - A string mapping feature names to either
+  /// true (if enabled) or false (if disabled). This routine makes no guarantees
+  /// about exactly which features may appear in this map, except that they are
+  /// all valid LLVM feature names.
+  ///
+  /// \return - True on success.
+  bool getHostCPUFeatures(StringMap<bool> &Features);
+
+  /// Get the number of physical cores (as opposed to logical cores returned
+  /// from thread::hardware_concurrency(), which includes hyperthreads).
+  /// Returns -1 if unknown for the current host system.
+  int getHostNumPhysicalCores();
+
+  namespace detail {
+  /// Helper functions to extract HostCPUName from /proc/cpuinfo on linux.
+  StringRef getHostCPUNameForPowerPC(StringRef ProcCpuinfoContent);
+  StringRef getHostCPUNameForARM(StringRef ProcCpuinfoContent);
+  StringRef getHostCPUNameForS390x(StringRef ProcCpuinfoContent);
+  StringRef getHostCPUNameForBPF();
+  }
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/JamCRC.h b/linux-x64/clang/include/llvm/Support/JamCRC.h
new file mode 100644
index 0000000..5268bbd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/JamCRC.h
@@ -0,0 +1,49 @@
+//===-- llvm/Support/JamCRC.h - Cyclic Redundancy Check ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of JamCRC.
+//
+// We will use the "Rocksoft^tm Model CRC Algorithm" to describe the properties
+// of this CRC:
+//   Width  : 32
+//   Poly   : 04C11DB7
+//   Init   : FFFFFFFF
+//   RefIn  : True
+//   RefOut : True
+//   XorOut : 00000000
+//   Check  : 340BC6D9 (result of CRC for "123456789")
+//
+// N.B.  We permit flexibility of the "Init" value.  Some consumers of this need
+//       it to be zero.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_JAMCRC_H
+#define LLVM_SUPPORT_JAMCRC_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+template <typename T> class ArrayRef;
+
+class JamCRC {
+public:
+  JamCRC(uint32_t Init = 0xFFFFFFFFU) : CRC(Init) {}
+
+  // \brief Update the CRC calculation with Data.
+  void update(ArrayRef<char> Data);
+
+  uint32_t getCRC() const { return CRC; }
+
+private:
+  uint32_t CRC;
+};
+} // End of namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/KnownBits.h b/linux-x64/clang/include/llvm/Support/KnownBits.h
new file mode 100644
index 0000000..97e73b1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/KnownBits.h
@@ -0,0 +1,202 @@
+//===- llvm/Support/KnownBits.h - Stores known zeros/ones -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class for representing known zeros and ones used by
+// computeKnownBits.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_KNOWNBITS_H
+#define LLVM_SUPPORT_KNOWNBITS_H
+
+#include "llvm/ADT/APInt.h"
+
+namespace llvm {
+
+// Struct for tracking the known zeros and ones of a value.
+struct KnownBits {
+  APInt Zero;
+  APInt One;
+
+private:
+  // Internal constructor for creating a KnownBits from two APInts.
+  KnownBits(APInt Zero, APInt One)
+      : Zero(std::move(Zero)), One(std::move(One)) {}
+
+public:
+  // Default construct Zero and One.
+  KnownBits() {}
+
+  /// Create a known bits object of BitWidth bits initialized to unknown.
+  KnownBits(unsigned BitWidth) : Zero(BitWidth, 0), One(BitWidth, 0) {}
+
+  /// Get the bit width of this value.
+  unsigned getBitWidth() const {
+    assert(Zero.getBitWidth() == One.getBitWidth() &&
+           "Zero and One should have the same width!");
+    return Zero.getBitWidth();
+  }
+
+  /// Returns true if there is conflicting information.
+  bool hasConflict() const { return Zero.intersects(One); }
+
+  /// Returns true if we know the value of all bits.
+  bool isConstant() const {
+    assert(!hasConflict() && "KnownBits conflict!");
+    return Zero.countPopulation() + One.countPopulation() == getBitWidth();
+  }
+
+  /// Returns the value when all bits have a known value. This just returns One
+  /// with a protective assertion.
+  const APInt &getConstant() const {
+    assert(isConstant() && "Can only get value when all bits are known");
+    return One;
+  }
+
+  /// Returns true if we don't know any bits.
+  bool isUnknown() const { return Zero.isNullValue() && One.isNullValue(); }
+
+  /// Resets the known state of all bits.
+  void resetAll() {
+    Zero.clearAllBits();
+    One.clearAllBits();
+  }
+
+  /// Returns true if value is all zero.
+  bool isZero() const {
+    assert(!hasConflict() && "KnownBits conflict!");
+    return Zero.isAllOnesValue();
+  }
+
+  /// Returns true if value is all one bits.
+  bool isAllOnes() const {
+    assert(!hasConflict() && "KnownBits conflict!");
+    return One.isAllOnesValue();
+  }
+
+  /// Make all bits known to be zero and discard any previous information.
+  void setAllZero() {
+    Zero.setAllBits();
+    One.clearAllBits();
+  }
+
+  /// Make all bits known to be one and discard any previous information.
+  void setAllOnes() {
+    Zero.clearAllBits();
+    One.setAllBits();
+  }
+
+  /// Returns true if this value is known to be negative.
+  bool isNegative() const { return One.isSignBitSet(); }
+
+  /// Returns true if this value is known to be non-negative.
+  bool isNonNegative() const { return Zero.isSignBitSet(); }
+
+  /// Make this value negative.
+  void makeNegative() {
+    One.setSignBit();
+  }
+
+  /// Make this value negative.
+  void makeNonNegative() {
+    Zero.setSignBit();
+  }
+
+  /// Truncate the underlying known Zero and One bits. This is equivalent
+  /// to truncating the value we're tracking.
+  KnownBits trunc(unsigned BitWidth) {
+    return KnownBits(Zero.trunc(BitWidth), One.trunc(BitWidth));
+  }
+
+  /// Zero extends the underlying known Zero and One bits. This is equivalent
+  /// to zero extending the value we're tracking.
+  KnownBits zext(unsigned BitWidth) {
+    return KnownBits(Zero.zext(BitWidth), One.zext(BitWidth));
+  }
+
+  /// Sign extends the underlying known Zero and One bits. This is equivalent
+  /// to sign extending the value we're tracking.
+  KnownBits sext(unsigned BitWidth) {
+    return KnownBits(Zero.sext(BitWidth), One.sext(BitWidth));
+  }
+
+  /// Zero extends or truncates the underlying known Zero and One bits. This is
+  /// equivalent to zero extending or truncating the value we're tracking.
+  KnownBits zextOrTrunc(unsigned BitWidth) {
+    return KnownBits(Zero.zextOrTrunc(BitWidth), One.zextOrTrunc(BitWidth));
+  }
+
+  /// Returns the minimum number of trailing zero bits.
+  unsigned countMinTrailingZeros() const {
+    return Zero.countTrailingOnes();
+  }
+
+  /// Returns the minimum number of trailing one bits.
+  unsigned countMinTrailingOnes() const {
+    return One.countTrailingOnes();
+  }
+
+  /// Returns the minimum number of leading zero bits.
+  unsigned countMinLeadingZeros() const {
+    return Zero.countLeadingOnes();
+  }
+
+  /// Returns the minimum number of leading one bits.
+  unsigned countMinLeadingOnes() const {
+    return One.countLeadingOnes();
+  }
+
+  /// Returns the number of times the sign bit is replicated into the other
+  /// bits.
+  unsigned countMinSignBits() const {
+    if (isNonNegative())
+      return countMinLeadingZeros();
+    if (isNegative())
+      return countMinLeadingOnes();
+    return 0;
+  }
+
+  /// Returns the maximum number of trailing zero bits possible.
+  unsigned countMaxTrailingZeros() const {
+    return One.countTrailingZeros();
+  }
+
+  /// Returns the maximum number of trailing one bits possible.
+  unsigned countMaxTrailingOnes() const {
+    return Zero.countTrailingZeros();
+  }
+
+  /// Returns the maximum number of leading zero bits possible.
+  unsigned countMaxLeadingZeros() const {
+    return One.countLeadingZeros();
+  }
+
+  /// Returns the maximum number of leading one bits possible.
+  unsigned countMaxLeadingOnes() const {
+    return Zero.countLeadingZeros();
+  }
+
+  /// Returns the number of bits known to be one.
+  unsigned countMinPopulation() const {
+    return One.countPopulation();
+  }
+
+  /// Returns the maximum number of bits that could be one.
+  unsigned countMaxPopulation() const {
+    return getBitWidth() - Zero.countPopulation();
+  }
+
+  /// Compute known bits resulting from adding LHS and RHS.
+  static KnownBits computeForAddSub(bool Add, bool NSW, const KnownBits &LHS,
+                                    KnownBits RHS);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/LEB128.h b/linux-x64/clang/include/llvm/Support/LEB128.h
new file mode 100644
index 0000000..9feb072
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/LEB128.h
@@ -0,0 +1,197 @@
+//===- llvm/Support/LEB128.h - [SU]LEB128 utility functions -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some utility functions for encoding SLEB128 and
+// ULEB128 values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_LEB128_H
+#define LLVM_SUPPORT_LEB128_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Utility function to encode a SLEB128 value to an output stream. Returns
+/// the length in bytes of the encoded value.
+inline unsigned encodeSLEB128(int64_t Value, raw_ostream &OS,
+                              unsigned PadTo = 0) {
+  bool More;
+  unsigned Count = 0;
+  do {
+    uint8_t Byte = Value & 0x7f;
+    // NOTE: this assumes that this signed shift is an arithmetic right shift.
+    Value >>= 7;
+    More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
+              ((Value == -1) && ((Byte & 0x40) != 0))));
+    Count++;
+    if (More || Count < PadTo)
+      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+    OS << char(Byte);
+  } while (More);
+
+  // Pad with 0x80 and emit a terminating byte at the end.
+  if (Count < PadTo) {
+    uint8_t PadValue = Value < 0 ? 0x7f : 0x00;
+    for (; Count < PadTo - 1; ++Count)
+      OS << char(PadValue | 0x80);
+    OS << char(PadValue);
+    Count++;
+  }
+  return Count;
+}
+
+/// Utility function to encode a SLEB128 value to a buffer. Returns
+/// the length in bytes of the encoded value.
+inline unsigned encodeSLEB128(int64_t Value, uint8_t *p, unsigned PadTo = 0) {
+  uint8_t *orig_p = p;
+  unsigned Count = 0;
+  bool More;
+  do {
+    uint8_t Byte = Value & 0x7f;
+    // NOTE: this assumes that this signed shift is an arithmetic right shift.
+    Value >>= 7;
+    More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
+              ((Value == -1) && ((Byte & 0x40) != 0))));
+    Count++;
+    if (More || Count < PadTo)
+      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+    *p++ = Byte;
+  } while (More);
+
+  // Pad with 0x80 and emit a terminating byte at the end.
+  if (Count < PadTo) {
+    uint8_t PadValue = Value < 0 ? 0x7f : 0x00;
+    for (; Count < PadTo - 1; ++Count)
+      *p++ = (PadValue | 0x80);
+    *p++ = PadValue;
+  }
+  return (unsigned)(p - orig_p);
+}
+
+/// Utility function to encode a ULEB128 value to an output stream. Returns
+/// the length in bytes of the encoded value.
+inline unsigned encodeULEB128(uint64_t Value, raw_ostream &OS,
+                              unsigned PadTo = 0) {
+  unsigned Count = 0;
+  do {
+    uint8_t Byte = Value & 0x7f;
+    Value >>= 7;
+    Count++;
+    if (Value != 0 || Count < PadTo)
+      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+    OS << char(Byte);
+  } while (Value != 0);
+
+  // Pad with 0x80 and emit a null byte at the end.
+  if (Count < PadTo) {
+    for (; Count < PadTo - 1; ++Count)
+      OS << '\x80';
+    OS << '\x00';
+    Count++;
+  }
+  return Count;
+}
+
+/// Utility function to encode a ULEB128 value to a buffer. Returns
+/// the length in bytes of the encoded value.
+inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
+                              unsigned PadTo = 0) {
+  uint8_t *orig_p = p;
+  unsigned Count = 0;
+  do {
+    uint8_t Byte = Value & 0x7f;
+    Value >>= 7;
+    Count++;
+    if (Value != 0 || Count < PadTo)
+      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+    *p++ = Byte;
+  } while (Value != 0);
+
+  // Pad with 0x80 and emit a null byte at the end.
+  if (Count < PadTo) {
+    for (; Count < PadTo - 1; ++Count)
+      *p++ = '\x80';
+    *p++ = '\x00';
+  }
+
+  return (unsigned)(p - orig_p);
+}
+
+/// Utility function to decode a ULEB128 value.
+inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = nullptr,
+                              const uint8_t *end = nullptr,
+                              const char **error = nullptr) {
+  const uint8_t *orig_p = p;
+  uint64_t Value = 0;
+  unsigned Shift = 0;
+  if (error)
+    *error = nullptr;
+  do {
+    if (end && p == end) {
+      if (error)
+        *error = "malformed uleb128, extends past end";
+      if (n)
+        *n = (unsigned)(p - orig_p);
+      return 0;
+    }
+    uint64_t Slice = *p & 0x7f;
+    if (Shift >= 64 || Slice << Shift >> Shift != Slice) {
+      if (error)
+        *error = "uleb128 too big for uint64";
+      if (n)
+        *n = (unsigned)(p - orig_p);
+      return 0;
+    }
+    Value += uint64_t(*p & 0x7f) << Shift;
+    Shift += 7;
+  } while (*p++ >= 128);
+  if (n)
+    *n = (unsigned)(p - orig_p);
+  return Value;
+}
+
+/// Utility function to decode a SLEB128 value.
+inline int64_t decodeSLEB128(const uint8_t *p, unsigned *n = nullptr,
+                             const uint8_t *end = nullptr,
+                             const char **error = nullptr) {
+  const uint8_t *orig_p = p;
+  int64_t Value = 0;
+  unsigned Shift = 0;
+  uint8_t Byte;
+  do {
+    if (end && p == end) {
+      if (error)
+        *error = "malformed sleb128, extends past end";
+      if (n)
+        *n = (unsigned)(p - orig_p);
+      return 0;
+    }
+    Byte = *p++;
+    Value |= (int64_t(Byte & 0x7f) << Shift);
+    Shift += 7;
+  } while (Byte >= 128);
+  // Sign extend negative numbers.
+  if (Byte & 0x40)
+    Value |= (-1ULL) << Shift;
+  if (n)
+    *n = (unsigned)(p - orig_p);
+  return Value;
+}
+
+/// Utility function to get the size of the ULEB128-encoded value.
+extern unsigned getULEB128Size(uint64_t Value);
+
+/// Utility function to get the size of the SLEB128-encoded value.
+extern unsigned getSLEB128Size(int64_t Value);
+
+} // namespace llvm
+
+#endif // LLVM_SYSTEM_LEB128_H
diff --git a/linux-x64/clang/include/llvm/Support/LICENSE.TXT b/linux-x64/clang/include/llvm/Support/LICENSE.TXT
new file mode 100644
index 0000000..3479b3f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/LICENSE.TXT
@@ -0,0 +1,6 @@
+LLVM System Interface Library
+-------------------------------------------------------------------------------
+The LLVM System Interface Library is licensed under the Illinois Open Source
+License and has the following additional copyright:
+
+Copyright (C) 2004 eXtensible Systems, Inc.
diff --git a/linux-x64/clang/include/llvm/Support/LineIterator.h b/linux-x64/clang/include/llvm/Support/LineIterator.h
new file mode 100644
index 0000000..9d4cd3b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/LineIterator.h
@@ -0,0 +1,88 @@
+//===- LineIterator.h - Iterator to read a text buffer's lines --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_LINEITERATOR_H
+#define LLVM_SUPPORT_LINEITERATOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <iterator>
+
+namespace llvm {
+
+class MemoryBuffer;
+
+/// \brief A forward iterator which reads text lines from a buffer.
+///
+/// This class provides a forward iterator interface for reading one line at
+/// a time from a buffer. When default constructed the iterator will be the
+/// "end" iterator.
+///
+/// The iterator is aware of what line number it is currently processing. It
+/// strips blank lines by default, and comment lines given a comment-starting
+/// character.
+///
+/// Note that this iterator requires the buffer to be nul terminated.
+class line_iterator
+    : public std::iterator<std::forward_iterator_tag, StringRef> {
+  const MemoryBuffer *Buffer;
+  char CommentMarker;
+  bool SkipBlanks;
+
+  unsigned LineNumber;
+  StringRef CurrentLine;
+
+public:
+  /// \brief Default construct an "end" iterator.
+  line_iterator() : Buffer(nullptr) {}
+
+  /// \brief Construct a new iterator around some memory buffer.
+  explicit line_iterator(const MemoryBuffer &Buffer, bool SkipBlanks = true,
+                         char CommentMarker = '\0');
+
+  /// \brief Return true if we've reached EOF or are an "end" iterator.
+  bool is_at_eof() const { return !Buffer; }
+
+  /// \brief Return true if we're an "end" iterator or have reached EOF.
+  bool is_at_end() const { return is_at_eof(); }
+
+  /// \brief Return the current line number. May return any number at EOF.
+  int64_t line_number() const { return LineNumber; }
+
+  /// \brief Advance to the next (non-empty, non-comment) line.
+  line_iterator &operator++() {
+    advance();
+    return *this;
+  }
+  line_iterator operator++(int) {
+    line_iterator tmp(*this);
+    advance();
+    return tmp;
+  }
+
+  /// \brief Get the current line as a \c StringRef.
+  StringRef operator*() const { return CurrentLine; }
+  const StringRef *operator->() const { return &CurrentLine; }
+
+  friend bool operator==(const line_iterator &LHS, const line_iterator &RHS) {
+    return LHS.Buffer == RHS.Buffer &&
+           LHS.CurrentLine.begin() == RHS.CurrentLine.begin();
+  }
+
+  friend bool operator!=(const line_iterator &LHS, const line_iterator &RHS) {
+    return !(LHS == RHS);
+  }
+
+private:
+  /// \brief Advance the iterator to the next line.
+  void advance();
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Locale.h b/linux-x64/clang/include/llvm/Support/Locale.h
new file mode 100644
index 0000000..f7a2c03
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Locale.h
@@ -0,0 +1,17 @@
+#ifndef LLVM_SUPPORT_LOCALE_H
+#define LLVM_SUPPORT_LOCALE_H
+
+namespace llvm {
+class StringRef;
+
+namespace sys {
+namespace locale {
+
+int columnWidth(StringRef s);
+bool isPrint(int c);
+
+}
+}
+}
+
+#endif // LLVM_SUPPORT_LOCALE_H
diff --git a/linux-x64/clang/include/llvm/Support/LockFileManager.h b/linux-x64/clang/include/llvm/Support/LockFileManager.h
new file mode 100644
index 0000000..1e417bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/LockFileManager.h
@@ -0,0 +1,100 @@
+//===--- LockFileManager.h - File-level locking utility ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_LOCKFILEMANAGER_H
+#define LLVM_SUPPORT_LOCKFILEMANAGER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FileSystem.h"
+#include <system_error>
+#include <utility> // for std::pair
+
+namespace llvm {
+class StringRef;
+
+/// \brief Class that manages the creation of a lock file to aid
+/// implicit coordination between different processes.
+///
+/// The implicit coordination works by creating a ".lock" file alongside
+/// the file that we're coordinating for, using the atomicity of the file
+/// system to ensure that only a single process can create that ".lock" file.
+/// When the lock file is removed, the owning process has finished the
+/// operation.
+class LockFileManager {
+public:
+  /// \brief Describes the state of a lock file.
+  enum LockFileState {
+    /// \brief The lock file has been created and is owned by this instance
+    /// of the object.
+    LFS_Owned,
+    /// \brief The lock file already exists and is owned by some other
+    /// instance.
+    LFS_Shared,
+    /// \brief An error occurred while trying to create or find the lock
+    /// file.
+    LFS_Error
+  };
+
+  /// \brief Describes the result of waiting for the owner to release the lock.
+  enum WaitForUnlockResult {
+    /// \brief The lock was released successfully.
+    Res_Success,
+    /// \brief Owner died while holding the lock.
+    Res_OwnerDied,
+    /// \brief Reached timeout while waiting for the owner to release the lock.
+    Res_Timeout
+  };
+
+private:
+  SmallString<128> FileName;
+  SmallString<128> LockFileName;
+  Optional<sys::fs::TempFile> UniqueLockFile;
+
+  Optional<std::pair<std::string, int> > Owner;
+  std::error_code ErrorCode;
+  std::string ErrorDiagMsg;
+
+  LockFileManager(const LockFileManager &) = delete;
+  LockFileManager &operator=(const LockFileManager &) = delete;
+
+  static Optional<std::pair<std::string, int> >
+  readLockFile(StringRef LockFileName);
+
+  static bool processStillExecuting(StringRef Hostname, int PID);
+
+public:
+
+  LockFileManager(StringRef FileName);
+  ~LockFileManager();
+
+  /// \brief Determine the state of the lock file.
+  LockFileState getState() const;
+
+  operator LockFileState() const { return getState(); }
+
+  /// \brief For a shared lock, wait until the owner releases the lock.
+  WaitForUnlockResult waitForUnlock();
+
+  /// \brief Remove the lock file.  This may delete a different lock file than
+  /// the one previously read if there is a race.
+  std::error_code unsafeRemoveLockFile();
+
+  /// \brief Get error message, or "" if there is no error.
+  std::string getErrorMessage() const;
+
+  /// \brief Set error and error message
+  void setError(const std::error_code &EC, StringRef ErrorMsg = "") {
+    ErrorCode = EC;
+    ErrorDiagMsg = ErrorMsg.str();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_LOCKFILEMANAGER_H
diff --git a/linux-x64/clang/include/llvm/Support/LowLevelTypeImpl.h b/linux-x64/clang/include/llvm/Support/LowLevelTypeImpl.h
new file mode 100644
index 0000000..a0a5a52
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/LowLevelTypeImpl.h
@@ -0,0 +1,264 @@
+//== llvm/Support/LowLevelTypeImpl.h --------------------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Implement a low-level type suitable for MachineInstr level instruction
+/// selection.
+///
+/// For a type attached to a MachineInstr, we only care about 2 details: total
+/// size and the number of vector lanes (if any). Accordingly, there are 4
+/// possible valid type-kinds:
+///
+///    * `sN` for scalars and aggregates
+///    * `<N x sM>` for vectors, which must have at least 2 elements.
+///    * `pN` for pointers
+///
+/// Other information required for correct selection is expected to be carried
+/// by the opcode, or non-type flags. For example the distinction between G_ADD
+/// and G_FADD for int/float or fast-math flags.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
+#define LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cassert>
+
+namespace llvm {
+
+class DataLayout;
+class Type;
+class raw_ostream;
+
+class LLT {
+public:
+  /// Get a low-level scalar or aggregate "bag of bits".
+  static LLT scalar(unsigned SizeInBits) {
+    assert(SizeInBits > 0 && "invalid scalar size");
+    return LLT{/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0,
+               SizeInBits, /*AddressSpace=*/0};
+  }
+
+  /// Get a low-level pointer in the given address space (defaulting to 0).
+  static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
+    assert(SizeInBits > 0 && "invalid pointer size");
+    return LLT{/*isPointer=*/true, /*isVector=*/false, /*NumElements=*/0,
+               SizeInBits, AddressSpace};
+  }
+
+  /// Get a low-level vector of some number of elements and element width.
+  /// \p NumElements must be at least 2.
+  static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
+    assert(NumElements > 1 && "invalid number of vector elements");
+    assert(ScalarSizeInBits > 0 && "invalid vector element size");
+    return LLT{/*isPointer=*/false, /*isVector=*/true, NumElements,
+               ScalarSizeInBits, /*AddressSpace=*/0};
+  }
+
+  /// Get a low-level vector of some number of elements and element type.
+  static LLT vector(uint16_t NumElements, LLT ScalarTy) {
+    assert(NumElements > 1 && "invalid number of vector elements");
+    assert(!ScalarTy.isVector() && "invalid vector element type");
+    return LLT{ScalarTy.isPointer(), /*isVector=*/true, NumElements,
+               ScalarTy.getSizeInBits(),
+               ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
+  }
+
+  explicit LLT(bool isPointer, bool isVector, uint16_t NumElements,
+               unsigned SizeInBits, unsigned AddressSpace) {
+    init(isPointer, isVector, NumElements, SizeInBits, AddressSpace);
+  }
+  explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {}
+
+  explicit LLT(MVT VT);
+
+  bool isValid() const { return RawData != 0; }
+
+  bool isScalar() const { return isValid() && !IsPointer && !IsVector; }
+
+  bool isPointer() const { return isValid() && IsPointer && !IsVector; }
+
+  bool isVector() const { return isValid() && IsVector; }
+
+  /// Returns the number of elements in a vector LLT. Must only be called on
+  /// vector types.
+  uint16_t getNumElements() const {
+    assert(IsVector && "cannot get number of elements on scalar/aggregate");
+    if (!IsPointer)
+      return getFieldValue(VectorElementsFieldInfo);
+    else
+      return getFieldValue(PointerVectorElementsFieldInfo);
+  }
+
+  /// Returns the total size of the type. Must only be called on sized types.
+  unsigned getSizeInBits() const {
+    if (isPointer() || isScalar())
+      return getScalarSizeInBits();
+    return getScalarSizeInBits() * getNumElements();
+  }
+
+  unsigned getScalarSizeInBits() const {
+    assert(RawData != 0 && "Invalid Type");
+    if (!IsVector) {
+      if (!IsPointer)
+        return getFieldValue(ScalarSizeFieldInfo);
+      else
+        return getFieldValue(PointerSizeFieldInfo);
+    } else {
+      if (!IsPointer)
+        return getFieldValue(VectorSizeFieldInfo);
+      else
+        return getFieldValue(PointerVectorSizeFieldInfo);
+    }
+  }
+
+  unsigned getAddressSpace() const {
+    assert(RawData != 0 && "Invalid Type");
+    assert(IsPointer && "cannot get address space of non-pointer type");
+    if (!IsVector)
+      return getFieldValue(PointerAddressSpaceFieldInfo);
+    else
+      return getFieldValue(PointerVectorAddressSpaceFieldInfo);
+  }
+
+  /// Returns the vector's element type. Only valid for vector types.
+  LLT getElementType() const {
+    assert(isVector() && "cannot get element type of scalar/aggregate");
+    if (IsPointer)
+      return pointer(getAddressSpace(), getScalarSizeInBits());
+    else
+      return scalar(getScalarSizeInBits());
+  }
+
+  void print(raw_ostream &OS) const;
+
+  bool operator==(const LLT &RHS) const {
+    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
+           RHS.RawData == RawData;
+  }
+
+  bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
+
+  friend struct DenseMapInfo<LLT>;
+
+private:
+  /// LLT is packed into 64 bits as follows:
+  /// isPointer : 1
+  /// isVector  : 1
+  /// with 62 bits remaining for Kind-specific data, packed in bitfields
+  /// as described below. As there isn't a simple portable way to pack bits
+  /// into bitfields, here the different fields in the packed structure is
+  /// described in static const *Field variables. Each of these variables
+  /// is a 2-element array, with the first element describing the bitfield size
+  /// and the second element describing the bitfield offset.
+  typedef int BitFieldInfo[2];
+  ///
+  /// This is how the bitfields are packed per Kind:
+  /// * Invalid:
+  ///   gets encoded as RawData == 0, as that is an invalid encoding, since for
+  ///   valid encodings, SizeInBits/SizeOfElement must be larger than 0.
+  /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
+  ///   SizeInBits: 32;
+  static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 0};
+  /// * Pointer (isPointer == 1 && isVector == 0):
+  ///   SizeInBits: 16;
+  ///   AddressSpace: 23;
+  static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 0};
+  static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{
+      23, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]};
+  /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
+  ///   NumElements: 16;
+  ///   SizeOfElement: 32;
+  static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 0};
+  static const constexpr BitFieldInfo VectorSizeFieldInfo{
+      32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]};
+  /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
+  ///   NumElements: 16;
+  ///   SizeOfElement: 16;
+  ///   AddressSpace: 23;
+  static const constexpr BitFieldInfo PointerVectorElementsFieldInfo{16, 0};
+  static const constexpr BitFieldInfo PointerVectorSizeFieldInfo{
+      16,
+      PointerVectorElementsFieldInfo[1] + PointerVectorElementsFieldInfo[0]};
+  static const constexpr BitFieldInfo PointerVectorAddressSpaceFieldInfo{
+      23, PointerVectorSizeFieldInfo[1] + PointerVectorSizeFieldInfo[0]};
+
+  uint64_t IsPointer : 1;
+  uint64_t IsVector : 1;
+  uint64_t RawData : 62;
+
+  static uint64_t getMask(const BitFieldInfo FieldInfo) {
+    const int FieldSizeInBits = FieldInfo[0];
+    return (((uint64_t)1) << FieldSizeInBits) - 1;
+  }
+  static uint64_t maskAndShift(uint64_t Val, uint64_t Mask, uint8_t Shift) {
+    assert(Val <= Mask && "Value too large for field");
+    return (Val & Mask) << Shift;
+  }
+  static uint64_t maskAndShift(uint64_t Val, const BitFieldInfo FieldInfo) {
+    return maskAndShift(Val, getMask(FieldInfo), FieldInfo[1]);
+  }
+  uint64_t getFieldValue(const BitFieldInfo FieldInfo) const {
+    return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
+  }
+
+  void init(bool IsPointer, bool IsVector, uint16_t NumElements,
+            unsigned SizeInBits, unsigned AddressSpace) {
+    this->IsPointer = IsPointer;
+    this->IsVector = IsVector;
+    if (!IsVector) {
+      if (!IsPointer)
+        RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
+      else
+        RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
+                  maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
+    } else {
+      assert(NumElements > 1 && "invalid number of vector elements");
+      if (!IsPointer)
+        RawData = maskAndShift(NumElements, VectorElementsFieldInfo) |
+                  maskAndShift(SizeInBits, VectorSizeFieldInfo);
+      else
+        RawData =
+            maskAndShift(NumElements, PointerVectorElementsFieldInfo) |
+            maskAndShift(SizeInBits, PointerVectorSizeFieldInfo) |
+            maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo);
+    }
+  }
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
+  Ty.print(OS);
+  return OS;
+}
+
+template<> struct DenseMapInfo<LLT> {
+  static inline LLT getEmptyKey() {
+    LLT Invalid;
+    Invalid.IsPointer = true;
+    return Invalid;
+  }
+  static inline LLT getTombstoneKey() {
+    LLT Invalid;
+    Invalid.IsVector = true;
+    return Invalid;
+  }
+  static inline unsigned getHashValue(const LLT &Ty) {
+    uint64_t Val = ((uint64_t)Ty.RawData) << 2 | ((uint64_t)Ty.IsPointer) << 1 |
+                   ((uint64_t)Ty.IsVector);
+    return DenseMapInfo<uint64_t>::getHashValue(Val);
+  }
+  static bool isEqual(const LLT &LHS, const LLT &RHS) {
+    return LHS == RHS;
+  }
+};
+
+}
+
+#endif // LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
diff --git a/linux-x64/clang/include/llvm/Support/MD5.h b/linux-x64/clang/include/llvm/Support/MD5.h
new file mode 100644
index 0000000..2c0dc76
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/MD5.h
@@ -0,0 +1,122 @@
+/* -*- C++ -*-
+ * This code is derived from (original license follows):
+ *
+ * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
+ * MD5 Message-Digest Algorithm (RFC 1321).
+ *
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001.  No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+ *
+ * See md5.c for more information.
+ */
+
+#ifndef LLVM_SUPPORT_MD5_H
+#define LLVM_SUPPORT_MD5_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Endian.h"
+#include <array>
+#include <cstdint>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+
+class MD5 {
+  // Any 32-bit or wider unsigned integer data type will do.
+  typedef uint32_t MD5_u32plus;
+
+  MD5_u32plus a = 0x67452301;
+  MD5_u32plus b = 0xefcdab89;
+  MD5_u32plus c = 0x98badcfe;
+  MD5_u32plus d = 0x10325476;
+  MD5_u32plus hi = 0;
+  MD5_u32plus lo = 0;
+  uint8_t buffer[64];
+  MD5_u32plus block[16];
+
+public:
+  struct MD5Result {
+    std::array<uint8_t, 16> Bytes;
+
+    operator std::array<uint8_t, 16>() const { return Bytes; }
+
+    const uint8_t &operator[](size_t I) const { return Bytes[I]; }
+    uint8_t &operator[](size_t I) { return Bytes[I]; }
+
+    SmallString<32> digest() const;
+
+    uint64_t low() const {
+      // Our MD5 implementation returns the result in little endian, so the low
+      // word is first.
+      using namespace support;
+      return endian::read<uint64_t, little, unaligned>(Bytes.data());
+    }
+
+    uint64_t high() const {
+      using namespace support;
+      return endian::read<uint64_t, little, unaligned>(Bytes.data() + 8);
+    }
+    std::pair<uint64_t, uint64_t> words() const {
+      using namespace support;
+      return std::make_pair(high(), low());
+    }
+  };
+
+  MD5();
+
+  /// \brief Updates the hash for the byte stream provided.
+  void update(ArrayRef<uint8_t> Data);
+
+  /// \brief Updates the hash for the StringRef provided.
+  void update(StringRef Str);
+
+  /// \brief Finishes off the hash and puts the result in result.
+  void final(MD5Result &Result);
+
+  /// \brief Translates the bytes in \p Res to a hex string that is
+  /// deposited into \p Str. The result will be of length 32.
+  static void stringifyResult(MD5Result &Result, SmallString<32> &Str);
+
+  /// \brief Computes the hash for a given bytes.
+  static std::array<uint8_t, 16> hash(ArrayRef<uint8_t> Data);
+
+private:
+  const uint8_t *body(ArrayRef<uint8_t> Data);
+};
+
+inline bool operator==(const MD5::MD5Result &LHS, const MD5::MD5Result &RHS) {
+  return LHS.Bytes == RHS.Bytes;
+}
+
+/// Helper to compute and return lower 64 bits of the given string's MD5 hash.
+inline uint64_t MD5Hash(StringRef Str) {
+  using namespace support;
+
+  MD5 Hash;
+  Hash.update(Str);
+  MD5::MD5Result Result;
+  Hash.final(Result);
+  // Return the least significant word.
+  return Result.low();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_MD5_H
diff --git a/linux-x64/clang/include/llvm/Support/MachineValueType.h b/linux-x64/clang/include/llvm/Support/MachineValueType.h
new file mode 100644
index 0000000..552dea0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/MachineValueType.h
@@ -0,0 +1,1058 @@
+//===- Support/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of machine-level target independent types which
+// legal values in the code generator use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MACHINEVALUETYPE_H
+#define LLVM_SUPPORT_MACHINEVALUETYPE_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+  class Type;
+
+  /// Machine Value Type. Every type that is supported natively by some
+  /// processor targeted by LLVM occurs here. This means that any legal value
+  /// type can be represented by an MVT.
+  class MVT {
+  public:
+    enum SimpleValueType : uint8_t {
+      // Simple value types that aren't explicitly part of this enumeration
+      // are considered extended value types.
+      INVALID_SIMPLE_VALUE_TYPE = 0,
+
+      // If you change this numbering, you must change the values in
+      // ValueTypes.td as well!
+      Other          =   1,   // This is a non-standard value
+      i1             =   2,   // This is a 1 bit integer value
+      i8             =   3,   // This is an 8 bit integer value
+      i16            =   4,   // This is a 16 bit integer value
+      i32            =   5,   // This is a 32 bit integer value
+      i64            =   6,   // This is a 64 bit integer value
+      i128           =   7,   // This is a 128 bit integer value
+
+      FIRST_INTEGER_VALUETYPE = i1,
+      LAST_INTEGER_VALUETYPE  = i128,
+
+      f16            =   8,   // This is a 16 bit floating point value
+      f32            =   9,   // This is a 32 bit floating point value
+      f64            =  10,   // This is a 64 bit floating point value
+      f80            =  11,   // This is a 80 bit floating point value
+      f128           =  12,   // This is a 128 bit floating point value
+      ppcf128        =  13,   // This is a PPC 128-bit floating point value
+
+      FIRST_FP_VALUETYPE = f16,
+      LAST_FP_VALUETYPE  = ppcf128,
+
+      v1i1           =  14,   //    1 x i1
+      v2i1           =  15,   //    2 x i1
+      v4i1           =  16,   //    4 x i1
+      v8i1           =  17,   //    8 x i1
+      v16i1          =  18,   //   16 x i1
+      v32i1          =  19,   //   32 x i1
+      v64i1          =  20,   //   64 x i1
+      v128i1         =  21,   //  128 x i1
+      v512i1         =  22,   //  512 x i1
+      v1024i1        =  23,   // 1024 x i1
+
+      v1i8           =  24,   //  1 x i8
+      v2i8           =  25,   //  2 x i8
+      v4i8           =  26,   //  4 x i8
+      v8i8           =  27,   //  8 x i8
+      v16i8          =  28,   // 16 x i8
+      v32i8          =  29,   // 32 x i8
+      v64i8          =  30,   // 64 x i8
+      v128i8         =  31,   //128 x i8
+      v256i8         =  32,   //256 x i8
+
+      v1i16          =  33,   //  1 x i16
+      v2i16          =  34,   //  2 x i16
+      v4i16          =  35,   //  4 x i16
+      v8i16          =  36,   //  8 x i16
+      v16i16         =  37,   // 16 x i16
+      v32i16         =  38,   // 32 x i16
+      v64i16         =  39,   // 64 x i16
+      v128i16        =  40,   //128 x i16
+
+      v1i32          =  41,   //  1 x i32
+      v2i32          =  42,   //  2 x i32
+      v4i32          =  43,   //  4 x i32
+      v8i32          =  44,   //  8 x i32
+      v16i32         =  45,   // 16 x i32
+      v32i32         =  46,   // 32 x i32
+      v64i32         =  47,   // 64 x i32
+
+      v1i64          =  48,   //  1 x i64
+      v2i64          =  49,   //  2 x i64
+      v4i64          =  50,   //  4 x i64
+      v8i64          =  51,   //  8 x i64
+      v16i64         =  52,   // 16 x i64
+      v32i64         =  53,   // 32 x i64
+
+      v1i128         =  54,   //  1 x i128
+
+      // Scalable integer types
+      nxv1i1         =  55,   // n x  1 x i1
+      nxv2i1         =  56,   // n x  2 x i1
+      nxv4i1         =  57,   // n x  4 x i1
+      nxv8i1         =  58,   // n x  8 x i1
+      nxv16i1        =  59,   // n x 16 x i1
+      nxv32i1        =  60,   // n x 32 x i1
+
+      nxv1i8         =  61,   // n x  1 x i8
+      nxv2i8         =  62,   // n x  2 x i8
+      nxv4i8         =  63,   // n x  4 x i8
+      nxv8i8         =  64,   // n x  8 x i8
+      nxv16i8        =  65,   // n x 16 x i8
+      nxv32i8        =  66,   // n x 32 x i8
+
+      nxv1i16        =  67,   // n x  1 x i16
+      nxv2i16        =  68,   // n x  2 x i16
+      nxv4i16        =  69,   // n x  4 x i16
+      nxv8i16        =  70,   // n x  8 x i16
+      nxv16i16       =  71,   // n x 16 x i16
+      nxv32i16       =  72,   // n x 32 x i16
+
+      nxv1i32        =  73,   // n x  1 x i32
+      nxv2i32        =  74,   // n x  2 x i32
+      nxv4i32        =  75,   // n x  4 x i32
+      nxv8i32        =  76,   // n x  8 x i32
+      nxv16i32       =  77,   // n x 16 x i32
+      nxv32i32       =  78,   // n x 32 x i32
+
+      nxv1i64        =  79,   // n x  1 x i64
+      nxv2i64        =  80,   // n x  2 x i64
+      nxv4i64        =  81,   // n x  4 x i64
+      nxv8i64        =  82,   // n x  8 x i64
+      nxv16i64       =  83,   // n x 16 x i64
+      nxv32i64       =  84,   // n x 32 x i64
+
+      FIRST_INTEGER_VECTOR_VALUETYPE = v1i1,
+      LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64,
+
+      FIRST_INTEGER_SCALABLE_VALUETYPE = nxv1i1,
+      LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64,
+
+      v2f16          =  85,   //  2 x f16
+      v4f16          =  86,   //  4 x f16
+      v8f16          =  87,   //  8 x f16
+      v1f32          =  88,   //  1 x f32
+      v2f32          =  89,   //  2 x f32
+      v4f32          =  90,   //  4 x f32
+      v8f32          =  91,   //  8 x f32
+      v16f32         =  92,   // 16 x f32
+      v1f64          =  93,   //  1 x f64
+      v2f64          =  94,   //  2 x f64
+      v4f64          =  95,   //  4 x f64
+      v8f64          =  96,   //  8 x f64
+
+      nxv2f16        =  97,   // n x  2 x f16
+      nxv4f16        =  98,   // n x  4 x f16
+      nxv8f16        =  99,   // n x  8 x f16
+      nxv1f32        = 100,   // n x  1 x f32
+      nxv2f32        = 101,   // n x  2 x f32
+      nxv4f32        = 102,   // n x  4 x f32
+      nxv8f32        = 103,   // n x  8 x f32
+      nxv16f32       = 104,   // n x 16 x f32
+      nxv1f64        = 105,   // n x  1 x f64
+      nxv2f64        = 106,   // n x  2 x f64
+      nxv4f64        = 107,   // n x  4 x f64
+      nxv8f64        = 108,   // n x  8 x f64
+
+      FIRST_FP_VECTOR_VALUETYPE = v2f16,
+      LAST_FP_VECTOR_VALUETYPE = nxv8f64,
+
+      FIRST_FP_SCALABLE_VALUETYPE = nxv2f16,
+      LAST_FP_SCALABLE_VALUETYPE = nxv8f64,
+
+      FIRST_VECTOR_VALUETYPE = v1i1,
+      LAST_VECTOR_VALUETYPE  = nxv8f64,
+
+      x86mmx         =  109,   // This is an X86 MMX value
+
+      Glue           =  110,   // This glues nodes together during pre-RA sched
+
+      isVoid         =  111,   // This has no value
+
+      Untyped        =  112,   // This value takes a register, but has
+                               // unspecified type.  The register class
+                               // will be determined by the opcode.
+
+      ExceptRef      = 113,    // WebAssembly's except_ref type
+
+      FIRST_VALUETYPE = 1,     // This is always the beginning of the list.
+      LAST_VALUETYPE =  114,   // This always remains at the end of the list.
+
+      // This is the current maximum for LAST_VALUETYPE.
+      // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
+      // This value must be a multiple of 32.
+      MAX_ALLOWED_VALUETYPE = 128,
+
+      // A value of type llvm::TokenTy
+      token          = 248,
+
+      // This is MDNode or MDString.
+      Metadata       = 249,
+
+      // An int value the size of the pointer of the current
+      // target to any address space. This must only be used internal to
+      // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
+      iPTRAny        = 250,
+
+      // A vector with any length and element size. This is used
+      // for intrinsics that have overloadings based on vector types.
+      // This is only for tblgen's consumption!
+      vAny           = 251,
+
+      // Any floating-point or vector floating-point value. This is used
+      // for intrinsics that have overloadings based on floating-point types.
+      // This is only for tblgen's consumption!
+      fAny           = 252,
+
+      // An integer or vector integer value of any bit width. This is
+      // used for intrinsics that have overloadings based on integer bit widths.
+      // This is only for tblgen's consumption!
+      iAny           = 253,
+
+      // An int value the size of the pointer of the current
+      // target.  This should only be used internal to tblgen!
+      iPTR           = 254,
+
+      // Any type. This is used for intrinsics that have overloadings.
+      // This is only for tblgen's consumption!
+      Any            = 255
+    };
+
+    SimpleValueType SimpleTy = INVALID_SIMPLE_VALUE_TYPE;
+
+    // A class to represent the number of elements in a vector
+    //
+    // For fixed-length vectors, the total number of elements is equal to 'Min'
+    // For scalable vectors, the total number of elements is a multiple of 'Min'
+    class ElementCount {
+    public:
+      unsigned Min;
+      bool Scalable;
+
+      ElementCount(unsigned Min, bool Scalable)
+      : Min(Min), Scalable(Scalable) {}
+
+      ElementCount operator*(unsigned RHS) {
+        return { Min * RHS, Scalable };
+      }
+
+      ElementCount& operator*=(unsigned RHS) {
+        Min *= RHS;
+        return *this;
+      }
+
+      ElementCount operator/(unsigned RHS) {
+        return { Min / RHS, Scalable };
+      }
+
+      ElementCount& operator/=(unsigned RHS) {
+        Min /= RHS;
+        return *this;
+      }
+
+      bool operator==(const ElementCount& RHS) {
+        return Min == RHS.Min && Scalable == RHS.Scalable;
+      }
+    };
+
+    constexpr MVT() = default;
+    constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}
+
+    bool operator>(const MVT& S)  const { return SimpleTy >  S.SimpleTy; }
+    bool operator<(const MVT& S)  const { return SimpleTy <  S.SimpleTy; }
+    bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
+    bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
+    bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
+    bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
+
+    /// Return true if this is a valid simple valuetype.
+    bool isValid() const {
+      return (SimpleTy >= MVT::FIRST_VALUETYPE &&
+              SimpleTy < MVT::LAST_VALUETYPE);
+    }
+
+    /// Return true if this is a FP or a vector FP type.
+    bool isFloatingPoint() const {
+      return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
+               SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
+              (SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
+               SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
+    }
+
+    /// Return true if this is an integer or a vector integer type.
+    bool isInteger() const {
+      return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
+               SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
+              (SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
+               SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
+    }
+
+    /// Return true if this is an integer, not including vectors.
+    bool isScalarInteger() const {
+      return (SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
+              SimpleTy <= MVT::LAST_INTEGER_VALUETYPE);
+    }
+
+    /// Return true if this is a vector value type.
+    bool isVector() const {
+      return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
+              SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
+    }
+
+    /// Return true if this is a vector value type where the
+    /// runtime length is machine dependent
+    bool isScalableVector() const {
+      return ((SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VALUETYPE &&
+               SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VALUETYPE) ||
+              (SimpleTy >= MVT::FIRST_FP_SCALABLE_VALUETYPE &&
+               SimpleTy <= MVT::LAST_FP_SCALABLE_VALUETYPE));
+    }
+
+    /// Return true if this is a 16-bit vector type.
+    bool is16BitVector() const {
+      return (SimpleTy == MVT::v2i8  || SimpleTy == MVT::v1i16 ||
+              SimpleTy == MVT::v16i1);
+    }
+
+    /// Return true if this is a 32-bit vector type.
+    bool is32BitVector() const {
+      return (SimpleTy == MVT::v32i1 || SimpleTy == MVT::v4i8  ||
+              SimpleTy == MVT::v2i16 || SimpleTy == MVT::v1i32 ||
+              SimpleTy == MVT::v2f16 || SimpleTy == MVT::v1f32);
+    }
+
+    /// Return true if this is a 64-bit vector type.
+    bool is64BitVector() const {
+      return (SimpleTy == MVT::v64i1 || SimpleTy == MVT::v8i8  ||
+              SimpleTy == MVT::v4i16 || SimpleTy == MVT::v2i32 ||
+              SimpleTy == MVT::v1i64 || SimpleTy == MVT::v4f16 ||
+              SimpleTy == MVT::v2f32 || SimpleTy == MVT::v1f64);
+    }
+
+    /// Return true if this is a 128-bit vector type.
+    bool is128BitVector() const {
+      return (SimpleTy == MVT::v128i1 || SimpleTy == MVT::v16i8  ||
+              SimpleTy == MVT::v8i16  || SimpleTy == MVT::v4i32  ||
+              SimpleTy == MVT::v2i64  || SimpleTy == MVT::v1i128 ||
+              SimpleTy == MVT::v8f16  || SimpleTy == MVT::v4f32  ||
+              SimpleTy == MVT::v2f64);
+    }
+
+    /// Return true if this is a 256-bit vector type.
+    bool is256BitVector() const {
+      return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64  ||
+              SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
+              SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64);
+    }
+
+    /// Return true if this is a 512-bit vector type.
+    bool is512BitVector() const {
+      return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64  ||
+              SimpleTy == MVT::v512i1 || SimpleTy == MVT::v64i8  ||
+              SimpleTy == MVT::v32i16 || SimpleTy == MVT::v16i32 ||
+              SimpleTy == MVT::v8i64);
+    }
+
+    /// Return true if this is a 1024-bit vector type.
+    bool is1024BitVector() const {
+      return (SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 ||
+              SimpleTy == MVT::v64i16  || SimpleTy == MVT::v32i32 ||
+              SimpleTy == MVT::v16i64);
+    }
+
+    /// Return true if this is a 2048-bit vector type.
+    bool is2048BitVector() const {
+      return (SimpleTy == MVT::v256i8 || SimpleTy == MVT::v128i16 ||
+              SimpleTy == MVT::v64i32 || SimpleTy == MVT::v32i64);
+    }
+
+    /// Return true if this is an overloaded type for TableGen.
+    bool isOverloaded() const {
+      return (SimpleTy==MVT::Any  ||
+              SimpleTy==MVT::iAny || SimpleTy==MVT::fAny ||
+              SimpleTy==MVT::vAny || SimpleTy==MVT::iPTRAny);
+    }
+
+    /// Returns true if the given vector is a power of 2.
+    bool isPow2VectorType() const {
+      unsigned NElts = getVectorNumElements();
+      return !(NElts & (NElts - 1));
+    }
+
+    /// Widens the length of the given vector MVT up to the nearest power of 2
+    /// and returns that type.
+    MVT getPow2VectorType() const {
+      if (isPow2VectorType())
+        return *this;
+
+      unsigned NElts = getVectorNumElements();
+      unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
+      return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
+    }
+
+    /// If this is a vector, return the element type, otherwise return this.
+    MVT getScalarType() const {
+      return isVector() ? getVectorElementType() : *this;
+    }
+
+    MVT getVectorElementType() const {
+      switch (SimpleTy) {
+      default:
+        llvm_unreachable("Not a vector MVT!");
+      case v1i1:
+      case v2i1:
+      case v4i1:
+      case v8i1:
+      case v16i1:
+      case v32i1:
+      case v64i1:
+      case v128i1:
+      case v512i1:
+      case v1024i1:
+      case nxv1i1:
+      case nxv2i1:
+      case nxv4i1:
+      case nxv8i1:
+      case nxv16i1:
+      case nxv32i1: return i1;
+      case v1i8:
+      case v2i8:
+      case v4i8:
+      case v8i8:
+      case v16i8:
+      case v32i8:
+      case v64i8:
+      case v128i8:
+      case v256i8:
+      case nxv1i8:
+      case nxv2i8:
+      case nxv4i8:
+      case nxv8i8:
+      case nxv16i8:
+      case nxv32i8: return i8;
+      case v1i16:
+      case v2i16:
+      case v4i16:
+      case v8i16:
+      case v16i16:
+      case v32i16:
+      case v64i16:
+      case v128i16:
+      case nxv1i16:
+      case nxv2i16:
+      case nxv4i16:
+      case nxv8i16:
+      case nxv16i16:
+      case nxv32i16: return i16;
+      case v1i32:
+      case v2i32:
+      case v4i32:
+      case v8i32:
+      case v16i32:
+      case v32i32:
+      case v64i32:
+      case nxv1i32:
+      case nxv2i32:
+      case nxv4i32:
+      case nxv8i32:
+      case nxv16i32:
+      case nxv32i32: return i32;
+      case v1i64:
+      case v2i64:
+      case v4i64:
+      case v8i64:
+      case v16i64:
+      case v32i64:
+      case nxv1i64:
+      case nxv2i64:
+      case nxv4i64:
+      case nxv8i64:
+      case nxv16i64:
+      case nxv32i64: return i64;
+      case v1i128: return i128;
+      case v2f16:
+      case v4f16:
+      case v8f16:
+      case nxv2f16:
+      case nxv4f16:
+      case nxv8f16: return f16;
+      case v1f32:
+      case v2f32:
+      case v4f32:
+      case v8f32:
+      case v16f32:
+      case nxv1f32:
+      case nxv2f32:
+      case nxv4f32:
+      case nxv8f32:
+      case nxv16f32: return f32;
+      case v1f64:
+      case v2f64:
+      case v4f64:
+      case v8f64:
+      case nxv1f64:
+      case nxv2f64:
+      case nxv4f64:
+      case nxv8f64: return f64;
+      }
+    }
+
+    unsigned getVectorNumElements() const {
+      switch (SimpleTy) {
+      default:
+        llvm_unreachable("Not a vector MVT!");
+      case v1024i1: return 1024;
+      case v512i1: return 512;
+      case v256i8: return 256;
+      case v128i1:
+      case v128i8:
+      case v128i16: return 128;
+      case v64i1:
+      case v64i8:
+      case v64i16:
+      case v64i32: return 64;
+      case v32i1:
+      case v32i8:
+      case v32i16:
+      case v32i32:
+      case v32i64:
+      case nxv32i1:
+      case nxv32i8:
+      case nxv32i16:
+      case nxv32i32:
+      case nxv32i64: return 32;
+      case v16i1:
+      case v16i8:
+      case v16i16:
+      case v16i32:
+      case v16i64:
+      case v16f32:
+      case nxv16i1:
+      case nxv16i8:
+      case nxv16i16:
+      case nxv16i32:
+      case nxv16i64:
+      case nxv16f32: return 16;
+      case v8i1:
+      case v8i8:
+      case v8i16:
+      case v8i32:
+      case v8i64:
+      case v8f16:
+      case v8f32:
+      case v8f64:
+      case nxv8i1:
+      case nxv8i8:
+      case nxv8i16:
+      case nxv8i32:
+      case nxv8i64:
+      case nxv8f16:
+      case nxv8f32:
+      case nxv8f64: return 8;
+      case v4i1:
+      case v4i8:
+      case v4i16:
+      case v4i32:
+      case v4i64:
+      case v4f16:
+      case v4f32:
+      case v4f64:
+      case nxv4i1:
+      case nxv4i8:
+      case nxv4i16:
+      case nxv4i32:
+      case nxv4i64:
+      case nxv4f16:
+      case nxv4f32:
+      case nxv4f64: return 4;
+      case v2i1:
+      case v2i8:
+      case v2i16:
+      case v2i32:
+      case v2i64:
+      case v2f16:
+      case v2f32:
+      case v2f64:
+      case nxv2i1:
+      case nxv2i8:
+      case nxv2i16:
+      case nxv2i32:
+      case nxv2i64:
+      case nxv2f16:
+      case nxv2f32:
+      case nxv2f64: return 2;
+      case v1i1:
+      case v1i8:
+      case v1i16:
+      case v1i32:
+      case v1i64:
+      case v1i128:
+      case v1f32:
+      case v1f64:
+      case nxv1i1:
+      case nxv1i8:
+      case nxv1i16:
+      case nxv1i32:
+      case nxv1i64:
+      case nxv1f32:
+      case nxv1f64: return 1;
+      }
+    }
+
+    MVT::ElementCount getVectorElementCount() const {
+      return { getVectorNumElements(), isScalableVector() };
+    }
+
+    unsigned getSizeInBits() const {
+      switch (SimpleTy) {
+      default:
+        llvm_unreachable("getSizeInBits called on extended MVT.");
+      case Other:
+        llvm_unreachable("Value type is non-standard value, Other.");
+      case iPTR:
+        llvm_unreachable("Value type size is target-dependent. Ask TLI.");
+      case iPTRAny:
+      case iAny:
+      case fAny:
+      case vAny:
+      case Any:
+        llvm_unreachable("Value type is overloaded.");
+      case token:
+        llvm_unreachable("Token type is a sentinel that cannot be used "
+                         "in codegen and has no size");
+      case Metadata:
+        llvm_unreachable("Value type is metadata.");
+      case i1:
+      case v1i1:
+      case nxv1i1: return 1;
+      case v2i1:
+      case nxv2i1: return 2;
+      case v4i1:
+      case nxv4i1: return 4;
+      case i8  :
+      case v1i8:
+      case v8i1:
+      case nxv1i8:
+      case nxv8i1: return 8;
+      case i16 :
+      case f16:
+      case v16i1:
+      case v2i8:
+      case v1i16:
+      case nxv16i1:
+      case nxv2i8:
+      case nxv1i16: return 16;
+      case f32 :
+      case i32 :
+      case v32i1:
+      case v4i8:
+      case v2i16:
+      case v2f16:
+      case v1f32:
+      case v1i32:
+      case nxv32i1:
+      case nxv4i8:
+      case nxv2i16:
+      case nxv1i32:
+      case nxv2f16:
+      case nxv1f32: return 32;
+      case x86mmx:
+      case f64 :
+      case i64 :
+      case v64i1:
+      case v8i8:
+      case v4i16:
+      case v2i32:
+      case v1i64:
+      case v4f16:
+      case v2f32:
+      case v1f64:
+      case nxv8i8:
+      case nxv4i16:
+      case nxv2i32:
+      case nxv1i64:
+      case nxv4f16:
+      case nxv2f32:
+      case nxv1f64: return 64;
+      case f80 :  return 80;
+      case f128:
+      case ppcf128:
+      case i128:
+      case v128i1:
+      case v16i8:
+      case v8i16:
+      case v4i32:
+      case v2i64:
+      case v1i128:
+      case v8f16:
+      case v4f32:
+      case v2f64:
+      case nxv16i8:
+      case nxv8i16:
+      case nxv4i32:
+      case nxv2i64:
+      case nxv8f16:
+      case nxv4f32:
+      case nxv2f64: return 128;
+      case v32i8:
+      case v16i16:
+      case v8i32:
+      case v4i64:
+      case v8f32:
+      case v4f64:
+      case nxv32i8:
+      case nxv16i16:
+      case nxv8i32:
+      case nxv4i64:
+      case nxv8f32:
+      case nxv4f64: return 256;
+      case v512i1:
+      case v64i8:
+      case v32i16:
+      case v16i32:
+      case v8i64:
+      case v16f32:
+      case v8f64:
+      case nxv32i16:
+      case nxv16i32:
+      case nxv8i64:
+      case nxv16f32:
+      case nxv8f64: return 512;
+      case v1024i1:
+      case v128i8:
+      case v64i16:
+      case v32i32:
+      case v16i64:
+      case nxv32i32:
+      case nxv16i64: return 1024;
+      case v256i8:
+      case v128i16:
+      case v64i32:
+      case v32i64:
+      case nxv32i64: return 2048;
+      case ExceptRef: return 0; // opaque type
+      }
+    }
+
+    unsigned getScalarSizeInBits() const {
+      return getScalarType().getSizeInBits();
+    }
+
+    /// Return the number of bytes overwritten by a store of the specified value
+    /// type.
+    unsigned getStoreSize() const {
+      return (getSizeInBits() + 7) / 8;
+    }
+
+    /// Return the number of bits overwritten by a store of the specified value
+    /// type.
+    unsigned getStoreSizeInBits() const {
+      return getStoreSize() * 8;
+    }
+
+    /// Return true if this has more bits than VT.
+    bool bitsGT(MVT VT) const {
+      return getSizeInBits() > VT.getSizeInBits();
+    }
+
+    /// Return true if this has no less bits than VT.
+    bool bitsGE(MVT VT) const {
+      return getSizeInBits() >= VT.getSizeInBits();
+    }
+
+    /// Return true if this has less bits than VT.
+    bool bitsLT(MVT VT) const {
+      return getSizeInBits() < VT.getSizeInBits();
+    }
+
+    /// Return true if this has no more bits than VT.
+    bool bitsLE(MVT VT) const {
+      return getSizeInBits() <= VT.getSizeInBits();
+    }
+
+    static MVT getFloatingPointVT(unsigned BitWidth) {
+      switch (BitWidth) {
+      default:
+        llvm_unreachable("Bad bit width!");
+      case 16:
+        return MVT::f16;
+      case 32:
+        return MVT::f32;
+      case 64:
+        return MVT::f64;
+      case 80:
+        return MVT::f80;
+      case 128:
+        return MVT::f128;
+      }
+    }
+
+    static MVT getIntegerVT(unsigned BitWidth) {
+      switch (BitWidth) {
+      default:
+        return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+      case 1:
+        return MVT::i1;
+      case 8:
+        return MVT::i8;
+      case 16:
+        return MVT::i16;
+      case 32:
+        return MVT::i32;
+      case 64:
+        return MVT::i64;
+      case 128:
+        return MVT::i128;
+      }
+    }
+
+    static MVT getVectorVT(MVT VT, unsigned NumElements) {
+      switch (VT.SimpleTy) {
+      default:
+        break;
+      case MVT::i1:
+        if (NumElements == 1)    return MVT::v1i1;
+        if (NumElements == 2)    return MVT::v2i1;
+        if (NumElements == 4)    return MVT::v4i1;
+        if (NumElements == 8)    return MVT::v8i1;
+        if (NumElements == 16)   return MVT::v16i1;
+        if (NumElements == 32)   return MVT::v32i1;
+        if (NumElements == 64)   return MVT::v64i1;
+        if (NumElements == 128)  return MVT::v128i1;
+        if (NumElements == 512)  return MVT::v512i1;
+        if (NumElements == 1024) return MVT::v1024i1;
+        break;
+      case MVT::i8:
+        if (NumElements == 1)   return MVT::v1i8;
+        if (NumElements == 2)   return MVT::v2i8;
+        if (NumElements == 4)   return MVT::v4i8;
+        if (NumElements == 8)   return MVT::v8i8;
+        if (NumElements == 16)  return MVT::v16i8;
+        if (NumElements == 32)  return MVT::v32i8;
+        if (NumElements == 64)  return MVT::v64i8;
+        if (NumElements == 128) return MVT::v128i8;
+        if (NumElements == 256) return MVT::v256i8;
+        break;
+      case MVT::i16:
+        if (NumElements == 1)   return MVT::v1i16;
+        if (NumElements == 2)   return MVT::v2i16;
+        if (NumElements == 4)   return MVT::v4i16;
+        if (NumElements == 8)   return MVT::v8i16;
+        if (NumElements == 16)  return MVT::v16i16;
+        if (NumElements == 32)  return MVT::v32i16;
+        if (NumElements == 64)  return MVT::v64i16;
+        if (NumElements == 128) return MVT::v128i16;
+        break;
+      case MVT::i32:
+        if (NumElements == 1)  return MVT::v1i32;
+        if (NumElements == 2)  return MVT::v2i32;
+        if (NumElements == 4)  return MVT::v4i32;
+        if (NumElements == 8)  return MVT::v8i32;
+        if (NumElements == 16) return MVT::v16i32;
+        if (NumElements == 32) return MVT::v32i32;
+        if (NumElements == 64) return MVT::v64i32;
+        break;
+      case MVT::i64:
+        if (NumElements == 1)  return MVT::v1i64;
+        if (NumElements == 2)  return MVT::v2i64;
+        if (NumElements == 4)  return MVT::v4i64;
+        if (NumElements == 8)  return MVT::v8i64;
+        if (NumElements == 16) return MVT::v16i64;
+        if (NumElements == 32) return MVT::v32i64;
+        break;
+      case MVT::i128:
+        if (NumElements == 1)  return MVT::v1i128;
+        break;
+      case MVT::f16:
+        if (NumElements == 2)  return MVT::v2f16;
+        if (NumElements == 4)  return MVT::v4f16;
+        if (NumElements == 8)  return MVT::v8f16;
+        break;
+      case MVT::f32:
+        if (NumElements == 1)  return MVT::v1f32;
+        if (NumElements == 2)  return MVT::v2f32;
+        if (NumElements == 4)  return MVT::v4f32;
+        if (NumElements == 8)  return MVT::v8f32;
+        if (NumElements == 16) return MVT::v16f32;
+        break;
+      case MVT::f64:
+        if (NumElements == 1)  return MVT::v1f64;
+        if (NumElements == 2)  return MVT::v2f64;
+        if (NumElements == 4)  return MVT::v4f64;
+        if (NumElements == 8)  return MVT::v8f64;
+        break;
+      }
+      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+    }
+
+    static MVT getScalableVectorVT(MVT VT, unsigned NumElements) {
+      switch(VT.SimpleTy) {
+        default:
+          break;
+        case MVT::i1:
+          if (NumElements == 1)  return MVT::nxv1i1;
+          if (NumElements == 2)  return MVT::nxv2i1;
+          if (NumElements == 4)  return MVT::nxv4i1;
+          if (NumElements == 8)  return MVT::nxv8i1;
+          if (NumElements == 16) return MVT::nxv16i1;
+          if (NumElements == 32) return MVT::nxv32i1;
+          break;
+        case MVT::i8:
+          if (NumElements == 1)  return MVT::nxv1i8;
+          if (NumElements == 2)  return MVT::nxv2i8;
+          if (NumElements == 4)  return MVT::nxv4i8;
+          if (NumElements == 8)  return MVT::nxv8i8;
+          if (NumElements == 16) return MVT::nxv16i8;
+          if (NumElements == 32) return MVT::nxv32i8;
+          break;
+        case MVT::i16:
+          if (NumElements == 1)  return MVT::nxv1i16;
+          if (NumElements == 2)  return MVT::nxv2i16;
+          if (NumElements == 4)  return MVT::nxv4i16;
+          if (NumElements == 8)  return MVT::nxv8i16;
+          if (NumElements == 16) return MVT::nxv16i16;
+          if (NumElements == 32) return MVT::nxv32i16;
+          break;
+        case MVT::i32:
+          if (NumElements == 1)  return MVT::nxv1i32;
+          if (NumElements == 2)  return MVT::nxv2i32;
+          if (NumElements == 4)  return MVT::nxv4i32;
+          if (NumElements == 8)  return MVT::nxv8i32;
+          if (NumElements == 16) return MVT::nxv16i32;
+          if (NumElements == 32) return MVT::nxv32i32;
+          break;
+        case MVT::i64:
+          if (NumElements == 1)  return MVT::nxv1i64;
+          if (NumElements == 2)  return MVT::nxv2i64;
+          if (NumElements == 4)  return MVT::nxv4i64;
+          if (NumElements == 8)  return MVT::nxv8i64;
+          if (NumElements == 16) return MVT::nxv16i64;
+          if (NumElements == 32) return MVT::nxv32i64;
+          break;
+        case MVT::f16:
+          if (NumElements == 2)  return MVT::nxv2f16;
+          if (NumElements == 4)  return MVT::nxv4f16;
+          if (NumElements == 8)  return MVT::nxv8f16;
+          break;
+        case MVT::f32:
+          if (NumElements == 1)  return MVT::nxv1f32;
+          if (NumElements == 2)  return MVT::nxv2f32;
+          if (NumElements == 4)  return MVT::nxv4f32;
+          if (NumElements == 8)  return MVT::nxv8f32;
+          if (NumElements == 16) return MVT::nxv16f32;
+          break;
+        case MVT::f64:
+          if (NumElements == 1)  return MVT::nxv1f64;
+          if (NumElements == 2)  return MVT::nxv2f64;
+          if (NumElements == 4)  return MVT::nxv4f64;
+          if (NumElements == 8)  return MVT::nxv8f64;
+          break;
+      }
+      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+    }
+
+    static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) {
+      if (IsScalable)
+        return getScalableVectorVT(VT, NumElements);
+      return getVectorVT(VT, NumElements);
+    }
+
+    static MVT getVectorVT(MVT VT, MVT::ElementCount EC) {
+      if (EC.Scalable)
+        return getScalableVectorVT(VT, EC.Min);
+      return getVectorVT(VT, EC.Min);
+    }
+
+    /// Return the value type corresponding to the specified type.  This returns
+    /// all pointers as iPTR.  If HandleUnknown is true, unknown types are
+    /// returned as Other, otherwise they are invalid.
+    static MVT getVT(Type *Ty, bool HandleUnknown = false);
+
+  private:
+    /// A simple iterator over the MVT::SimpleValueType enum.
+    struct mvt_iterator {
+      SimpleValueType VT;
+
+      mvt_iterator(SimpleValueType VT) : VT(VT) {}
+
+      MVT operator*() const { return VT; }
+      bool operator!=(const mvt_iterator &LHS) const { return VT != LHS.VT; }
+
+      mvt_iterator& operator++() {
+        VT = (MVT::SimpleValueType)((int)VT + 1);
+        assert((int)VT <= MVT::MAX_ALLOWED_VALUETYPE &&
+               "MVT iterator overflowed.");
+        return *this;
+      }
+    };
+
+    /// A range of the MVT::SimpleValueType enum.
+    using mvt_range = iterator_range<mvt_iterator>;
+
+  public:
+    /// SimpleValueType Iteration
+    /// @{
+    static mvt_range all_valuetypes() {
+      return mvt_range(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE);
+    }
+
+    static mvt_range integer_valuetypes() {
+      return mvt_range(MVT::FIRST_INTEGER_VALUETYPE,
+                       (MVT::SimpleValueType)(MVT::LAST_INTEGER_VALUETYPE + 1));
+    }
+
+    static mvt_range fp_valuetypes() {
+      return mvt_range(MVT::FIRST_FP_VALUETYPE,
+                       (MVT::SimpleValueType)(MVT::LAST_FP_VALUETYPE + 1));
+    }
+
+    static mvt_range vector_valuetypes() {
+      return mvt_range(MVT::FIRST_VECTOR_VALUETYPE,
+                       (MVT::SimpleValueType)(MVT::LAST_VECTOR_VALUETYPE + 1));
+    }
+
+    static mvt_range integer_vector_valuetypes() {
+      return mvt_range(
+          MVT::FIRST_INTEGER_VECTOR_VALUETYPE,
+          (MVT::SimpleValueType)(MVT::LAST_INTEGER_VECTOR_VALUETYPE + 1));
+    }
+
+    static mvt_range fp_vector_valuetypes() {
+      return mvt_range(
+          MVT::FIRST_FP_VECTOR_VALUETYPE,
+          (MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1));
+    }
+
+    static mvt_range integer_scalable_vector_valuetypes() {
+      return mvt_range(MVT::FIRST_INTEGER_SCALABLE_VALUETYPE,
+              (MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VALUETYPE + 1));
+    }
+
+    static mvt_range fp_scalable_vector_valuetypes() {
+      return mvt_range(MVT::FIRST_FP_SCALABLE_VALUETYPE,
+                   (MVT::SimpleValueType)(MVT::LAST_FP_SCALABLE_VALUETYPE + 1));
+    }
+    /// @}
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEVALUETYPE_H
diff --git a/linux-x64/clang/include/llvm/Support/ManagedStatic.h b/linux-x64/clang/include/llvm/Support/ManagedStatic.h
new file mode 100644
index 0000000..b4bf321
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ManagedStatic.h
@@ -0,0 +1,97 @@
+//===-- llvm/Support/ManagedStatic.h - Static Global wrapper ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ManagedStatic class and the llvm_shutdown() function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MANAGEDSTATIC_H
+#define LLVM_SUPPORT_MANAGEDSTATIC_H
+
+#include <atomic>
+#include <cstddef>
+
+namespace llvm {
+
+/// object_creator - Helper method for ManagedStatic.
+template <class C> struct object_creator {
+  static void *call() { return new C(); }
+};
+
+/// object_deleter - Helper method for ManagedStatic.
+///
+template <typename T> struct object_deleter {
+  static void call(void *Ptr) { delete (T *)Ptr; }
+};
+template <typename T, size_t N> struct object_deleter<T[N]> {
+  static void call(void *Ptr) { delete[](T *)Ptr; }
+};
+
+/// ManagedStaticBase - Common base class for ManagedStatic instances.
+class ManagedStaticBase {
+protected:
+  // This should only be used as a static variable, which guarantees that this
+  // will be zero initialized.
+  mutable std::atomic<void *> Ptr;
+  mutable void (*DeleterFn)(void*);
+  mutable const ManagedStaticBase *Next;
+
+  void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const;
+
+public:
+  /// isConstructed - Return true if this object has not been created yet.
+  bool isConstructed() const { return Ptr != nullptr; }
+
+  void destroy() const;
+};
+
+/// ManagedStatic - This transparently changes the behavior of global statics to
+/// be lazily constructed on demand (good for reducing startup times of dynamic
+/// libraries that link in LLVM components) and for making destruction be
+/// explicit through the llvm_shutdown() function call.
+///
+template <class C, class Creator = object_creator<C>,
+          class Deleter = object_deleter<C>>
+class ManagedStatic : public ManagedStaticBase {
+public:
+  // Accessors.
+  C &operator*() {
+    void *Tmp = Ptr.load(std::memory_order_acquire);
+    if (!Tmp)
+      RegisterManagedStatic(Creator::call, Deleter::call);
+
+    return *static_cast<C *>(Ptr.load(std::memory_order_relaxed));
+  }
+
+  C *operator->() { return &**this; }
+
+  const C &operator*() const {
+    void *Tmp = Ptr.load(std::memory_order_acquire);
+    if (!Tmp)
+      RegisterManagedStatic(Creator::call, Deleter::call);
+
+    return *static_cast<C *>(Ptr.load(std::memory_order_relaxed));
+  }
+
+  const C *operator->() const { return &**this; }
+};
+
+/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
+void llvm_shutdown();
+
+/// llvm_shutdown_obj - This is a simple helper class that calls
+/// llvm_shutdown() when it is destroyed.
+struct llvm_shutdown_obj {
+  llvm_shutdown_obj() = default;
+  ~llvm_shutdown_obj() { llvm_shutdown(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_MANAGEDSTATIC_H
diff --git a/linux-x64/clang/include/llvm/Support/MathExtras.h b/linux-x64/clang/include/llvm/Support/MathExtras.h
new file mode 100644
index 0000000..a37a167
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/MathExtras.h
@@ -0,0 +1,846 @@
+//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some functions that are useful for math stuff.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MATHEXTRAS_H
+#define LLVM_SUPPORT_MATHEXTRAS_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <limits>
+#include <type_traits>
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef __ANDROID_NDK__
+#include <android/api-level.h>
+#endif
+
+namespace llvm {
+/// \brief The behavior an operation has on an input of 0.
+enum ZeroBehavior {
+  /// \brief The returned value is undefined.
+  ZB_Undefined,
+  /// \brief The returned value is numeric_limits<T>::max()
+  ZB_Max,
+  /// \brief The returned value is numeric_limits<T>::digits
+  ZB_Width
+};
+
+namespace detail {
+template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
+  static std::size_t count(T Val, ZeroBehavior) {
+    if (!Val)
+      return std::numeric_limits<T>::digits;
+    if (Val & 0x1)
+      return 0;
+
+    // Bisection method.
+    std::size_t ZeroBits = 0;
+    T Shift = std::numeric_limits<T>::digits >> 1;
+    T Mask = std::numeric_limits<T>::max() >> Shift;
+    while (Shift) {
+      if ((Val & Mask) == 0) {
+        Val >>= Shift;
+        ZeroBits |= Shift;
+      }
+      Shift >>= 1;
+      Mask >>= Shift;
+    }
+    return ZeroBits;
+  }
+};
+
+#if __GNUC__ >= 4 || defined(_MSC_VER)
+template <typename T> struct TrailingZerosCounter<T, 4> {
+  static std::size_t count(T Val, ZeroBehavior ZB) {
+    if (ZB != ZB_Undefined && Val == 0)
+      return 32;
+
+#if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0)
+    return __builtin_ctz(Val);
+#elif defined(_MSC_VER)
+    unsigned long Index;
+    _BitScanForward(&Index, Val);
+    return Index;
+#endif
+  }
+};
+
+#if !defined(_MSC_VER) || defined(_M_X64)
+template <typename T> struct TrailingZerosCounter<T, 8> {
+  static std::size_t count(T Val, ZeroBehavior ZB) {
+    if (ZB != ZB_Undefined && Val == 0)
+      return 64;
+
+#if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0)
+    return __builtin_ctzll(Val);
+#elif defined(_MSC_VER)
+    unsigned long Index;
+    _BitScanForward64(&Index, Val);
+    return Index;
+#endif
+  }
+};
+#endif
+#endif
+} // namespace detail
+
+/// \brief Count number of 0's from the least significant bit to the most
+///   stopping at the first 1.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
+///   valid arguments.
+template <typename T>
+std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
+  static_assert(std::numeric_limits<T>::is_integer &&
+                    !std::numeric_limits<T>::is_signed,
+                "Only unsigned integral types are allowed.");
+  return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
+}
+
+namespace detail {
+template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
+  static std::size_t count(T Val, ZeroBehavior) {
+    if (!Val)
+      return std::numeric_limits<T>::digits;
+
+    // Bisection method.
+    std::size_t ZeroBits = 0;
+    for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
+      T Tmp = Val >> Shift;
+      if (Tmp)
+        Val = Tmp;
+      else
+        ZeroBits |= Shift;
+    }
+    return ZeroBits;
+  }
+};
+
+#if __GNUC__ >= 4 || defined(_MSC_VER)
+template <typename T> struct LeadingZerosCounter<T, 4> {
+  static std::size_t count(T Val, ZeroBehavior ZB) {
+    if (ZB != ZB_Undefined && Val == 0)
+      return 32;
+
+#if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0)
+    return __builtin_clz(Val);
+#elif defined(_MSC_VER)
+    unsigned long Index;
+    _BitScanReverse(&Index, Val);
+    return Index ^ 31;
+#endif
+  }
+};
+
+#if !defined(_MSC_VER) || defined(_M_X64)
+template <typename T> struct LeadingZerosCounter<T, 8> {
+  static std::size_t count(T Val, ZeroBehavior ZB) {
+    if (ZB != ZB_Undefined && Val == 0)
+      return 64;
+
+#if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0)
+    return __builtin_clzll(Val);
+#elif defined(_MSC_VER)
+    unsigned long Index;
+    _BitScanReverse64(&Index, Val);
+    return Index ^ 63;
+#endif
+  }
+};
+#endif
+#endif
+} // namespace detail
+
+/// \brief Count number of 0's from the most significant bit to the least
+///   stopping at the first 1.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
+///   valid arguments.
+template <typename T>
+std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
+  static_assert(std::numeric_limits<T>::is_integer &&
+                    !std::numeric_limits<T>::is_signed,
+                "Only unsigned integral types are allowed.");
+  return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
+}
+
+/// \brief Get the index of the first set bit starting from the least
+///   significant bit.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
+///   valid arguments.
+template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
+  if (ZB == ZB_Max && Val == 0)
+    return std::numeric_limits<T>::max();
+
+  return countTrailingZeros(Val, ZB_Undefined);
+}
+
+/// \brief Create a bitmask with the N right-most bits set to 1, and all other
+/// bits set to 0.  Only unsigned types are allowed.
+template <typename T> T maskTrailingOnes(unsigned N) {
+  static_assert(std::is_unsigned<T>::value, "Invalid type!");
+  const unsigned Bits = CHAR_BIT * sizeof(T);
+  assert(N <= Bits && "Invalid bit index");
+  return N == 0 ? 0 : (T(-1) >> (Bits - N));
+}
+
+/// \brief Create a bitmask with the N left-most bits set to 1, and all other
+/// bits set to 0.  Only unsigned types are allowed.
+template <typename T> T maskLeadingOnes(unsigned N) {
+  return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
+/// \brief Create a bitmask with the N right-most bits set to 0, and all other
+/// bits set to 1.  Only unsigned types are allowed.
+template <typename T> T maskTrailingZeros(unsigned N) {
+  return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
+/// \brief Create a bitmask with the N left-most bits set to 0, and all other
+/// bits set to 1.  Only unsigned types are allowed.
+template <typename T> T maskLeadingZeros(unsigned N) {
+  return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
+/// \brief Get the index of the last set bit starting from the least
+///   significant bit.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
+///   valid arguments.
+template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
+  if (ZB == ZB_Max && Val == 0)
+    return std::numeric_limits<T>::max();
+
+  // Use ^ instead of - because both gcc and llvm can remove the associated ^
+  // in the __builtin_clz intrinsic on x86.
+  return countLeadingZeros(Val, ZB_Undefined) ^
+         (std::numeric_limits<T>::digits - 1);
+}
+
+/// \brief Macro compressed bit reversal table for 256 bits.
+///
+/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
+static const unsigned char BitReverseTable256[256] = {
+#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
+#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
+#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
+  R6(0), R6(2), R6(1), R6(3)
+#undef R2
+#undef R4
+#undef R6
+};
+
+/// \brief Reverse the bits in \p Val.
+template <typename T>
+T reverseBits(T Val) {
+  unsigned char in[sizeof(Val)];
+  unsigned char out[sizeof(Val)];
+  std::memcpy(in, &Val, sizeof(Val));
+  for (unsigned i = 0; i < sizeof(Val); ++i)
+    out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
+  std::memcpy(&Val, out, sizeof(Val));
+  return Val;
+}
+
+// NOTE: The following support functions use the _32/_64 extensions instead of
+// type overloading so that signed and unsigned integers can be used without
+// ambiguity.
+
+/// Return the high 32 bits of a 64 bit value.
+constexpr inline uint32_t Hi_32(uint64_t Value) {
+  return static_cast<uint32_t>(Value >> 32);
+}
+
+/// Return the low 32 bits of a 64 bit value.
+constexpr inline uint32_t Lo_32(uint64_t Value) {
+  return static_cast<uint32_t>(Value);
+}
+
+/// Make a 64-bit integer from a high / low pair of 32-bit integers.
+constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
+  return ((uint64_t)High << 32) | (uint64_t)Low;
+}
+
+/// Checks if an integer fits into the given bit width.
+template <unsigned N> constexpr inline bool isInt(int64_t x) {
+  return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
+}
+// Template specializations to get better code for common cases.
+template <> constexpr inline bool isInt<8>(int64_t x) {
+  return static_cast<int8_t>(x) == x;
+}
+template <> constexpr inline bool isInt<16>(int64_t x) {
+  return static_cast<int16_t>(x) == x;
+}
+template <> constexpr inline bool isInt<32>(int64_t x) {
+  return static_cast<int32_t>(x) == x;
+}
+
+/// Checks if a signed integer is an N bit number shifted left by S.
+template <unsigned N, unsigned S>
+constexpr inline bool isShiftedInt(int64_t x) {
+  static_assert(
+      N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
+  static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
+  return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
+}
+
+/// Checks if an unsigned integer fits into the given bit width.
+///
+/// This is written as two functions rather than as simply
+///
+///   return N >= 64 || X < (UINT64_C(1) << N);
+///
+/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
+/// left too many places.
+template <unsigned N>
+constexpr inline typename std::enable_if<(N < 64), bool>::type
+isUInt(uint64_t X) {
+  static_assert(N > 0, "isUInt<0> doesn't make sense");
+  return X < (UINT64_C(1) << (N));
+}
+template <unsigned N>
+constexpr inline typename std::enable_if<N >= 64, bool>::type
+isUInt(uint64_t X) {
+  return true;
+}
+
+// Template specializations to get better code for common cases.
+template <> constexpr inline bool isUInt<8>(uint64_t x) {
+  return static_cast<uint8_t>(x) == x;
+}
+template <> constexpr inline bool isUInt<16>(uint64_t x) {
+  return static_cast<uint16_t>(x) == x;
+}
+template <> constexpr inline bool isUInt<32>(uint64_t x) {
+  return static_cast<uint32_t>(x) == x;
+}
+
+/// Checks if a unsigned integer is an N bit number shifted left by S.
+template <unsigned N, unsigned S>
+constexpr inline bool isShiftedUInt(uint64_t x) {
+  static_assert(
+      N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
+  static_assert(N + S <= 64,
+                "isShiftedUInt<N, S> with N + S > 64 is too wide.");
+  // Per the two static_asserts above, S must be strictly less than 64.  So
+  // 1 << S is not undefined behavior.
+  return isUInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
+}
+
+/// Gets the maximum value for a N-bit unsigned integer.
+inline uint64_t maxUIntN(uint64_t N) {
+  assert(N > 0 && N <= 64 && "integer width out of range");
+
+  // uint64_t(1) << 64 is undefined behavior, so we can't do
+  //   (uint64_t(1) << N) - 1
+  // without checking first that N != 64.  But this works and doesn't have a
+  // branch.
+  return UINT64_MAX >> (64 - N);
+}
+
+/// Gets the minimum value for a N-bit signed integer.
+inline int64_t minIntN(int64_t N) {
+  assert(N > 0 && N <= 64 && "integer width out of range");
+
+  return -(UINT64_C(1)<<(N-1));
+}
+
+/// Gets the maximum value for a N-bit signed integer.
+inline int64_t maxIntN(int64_t N) {
+  assert(N > 0 && N <= 64 && "integer width out of range");
+
+  // This relies on two's complement wraparound when N == 64, so we convert to
+  // int64_t only at the very end to avoid UB.
+  return (UINT64_C(1) << (N - 1)) - 1;
+}
+
+/// Checks if an unsigned integer fits into the given (dynamic) bit width.
+inline bool isUIntN(unsigned N, uint64_t x) {
+  return N >= 64 || x <= maxUIntN(N);
+}
+
+/// Checks if an signed integer fits into the given (dynamic) bit width.
+inline bool isIntN(unsigned N, int64_t x) {
+  return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
+}
+
+/// Return true if the argument is a non-empty sequence of ones starting at the
+/// least significant bit with the remainder zero (32 bit version).
+/// Ex. isMask_32(0x0000FFFFU) == true.
+constexpr inline bool isMask_32(uint32_t Value) {
+  return Value && ((Value + 1) & Value) == 0;
+}
+
+/// Return true if the argument is a non-empty sequence of ones starting at the
+/// least significant bit with the remainder zero (64 bit version).
+constexpr inline bool isMask_64(uint64_t Value) {
+  return Value && ((Value + 1) & Value) == 0;
+}
+
+/// Return true if the argument contains a non-empty sequence of ones with the
+/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
+constexpr inline bool isShiftedMask_32(uint32_t Value) {
+  return Value && isMask_32((Value - 1) | Value);
+}
+
+/// Return true if the argument contains a non-empty sequence of ones with the
+/// remainder zero (64 bit version.)
+constexpr inline bool isShiftedMask_64(uint64_t Value) {
+  return Value && isMask_64((Value - 1) | Value);
+}
+
+/// Return true if the argument is a power of two > 0.
+/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
+constexpr inline bool isPowerOf2_32(uint32_t Value) {
+  return Value && !(Value & (Value - 1));
+}
+
+/// Return true if the argument is a power of two > 0 (64 bit edition.)
+constexpr inline bool isPowerOf2_64(uint64_t Value) {
+  return Value && !(Value & (Value - 1));
+}
+
+/// Return a byte-swapped representation of the 16-bit argument.
+inline uint16_t ByteSwap_16(uint16_t Value) {
+  return sys::SwapByteOrder_16(Value);
+}
+
+/// Return a byte-swapped representation of the 32-bit argument.
+inline uint32_t ByteSwap_32(uint32_t Value) {
+  return sys::SwapByteOrder_32(Value);
+}
+
+/// Return a byte-swapped representation of the 64-bit argument.
+inline uint64_t ByteSwap_64(uint64_t Value) {
+  return sys::SwapByteOrder_64(Value);
+}
+
+/// \brief Count the number of ones from the most significant bit to the first
+/// zero bit.
+///
+/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of all ones. Only ZB_Width and
+/// ZB_Undefined are valid arguments.
+template <typename T>
+std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
+  static_assert(std::numeric_limits<T>::is_integer &&
+                    !std::numeric_limits<T>::is_signed,
+                "Only unsigned integral types are allowed.");
+  return countLeadingZeros(~Value, ZB);
+}
+
+/// \brief Count the number of ones from the least significant bit to the first
+/// zero bit.
+///
+/// Ex. countTrailingOnes(0x00FF00FF) == 8.
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of all ones. Only ZB_Width and
+/// ZB_Undefined are valid arguments.
+template <typename T>
+std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
+  static_assert(std::numeric_limits<T>::is_integer &&
+                    !std::numeric_limits<T>::is_signed,
+                "Only unsigned integral types are allowed.");
+  return countTrailingZeros(~Value, ZB);
+}
+
+namespace detail {
+template <typename T, std::size_t SizeOfT> struct PopulationCounter {
+  static unsigned count(T Value) {
+    // Generic version, forward to 32 bits.
+    static_assert(SizeOfT <= 4, "Not implemented!");
+#if __GNUC__ >= 4
+    return __builtin_popcount(Value);
+#else
+    uint32_t v = Value;
+    v = v - ((v >> 1) & 0x55555555);
+    v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
+    return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
+#endif
+  }
+};
+
+template <typename T> struct PopulationCounter<T, 8> {
+  static unsigned count(T Value) {
+#if __GNUC__ >= 4
+    return __builtin_popcountll(Value);
+#else
+    uint64_t v = Value;
+    v = v - ((v >> 1) & 0x5555555555555555ULL);
+    v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
+    v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+    return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
+#endif
+  }
+};
+} // namespace detail
+
+/// \brief Count the number of set bits in a value.
+/// Ex. countPopulation(0xF000F000) = 8
+/// Returns 0 if the word is zero.
+template <typename T>
+inline unsigned countPopulation(T Value) {
+  static_assert(std::numeric_limits<T>::is_integer &&
+                    !std::numeric_limits<T>::is_signed,
+                "Only unsigned integral types are allowed.");
+  return detail::PopulationCounter<T, sizeof(T)>::count(Value);
+}
+
+/// Return the log base 2 of the specified value.
+inline double Log2(double Value) {
+#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
+  return __builtin_log(Value) / __builtin_log(2.0);
+#else
+  return log2(Value);
+#endif
+}
+
+/// Return the floor log base 2 of the specified value, -1 if the value is zero.
+/// (32 bit edition.)
+/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
+inline unsigned Log2_32(uint32_t Value) {
+  return 31 - countLeadingZeros(Value);
+}
+
+/// Return the floor log base 2 of the specified value, -1 if the value is zero.
+/// (64 bit edition.)
+inline unsigned Log2_64(uint64_t Value) {
+  return 63 - countLeadingZeros(Value);
+}
+
+/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
+/// (32 bit edition).
+/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
+inline unsigned Log2_32_Ceil(uint32_t Value) {
+  return 32 - countLeadingZeros(Value - 1);
+}
+
+/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
+/// (64 bit edition.)
+inline unsigned Log2_64_Ceil(uint64_t Value) {
+  return 64 - countLeadingZeros(Value - 1);
+}
+
+/// Return the greatest common divisor of the values using Euclid's algorithm.
+inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
+  while (B) {
+    uint64_t T = B;
+    B = A % B;
+    A = T;
+  }
+  return A;
+}
+
+/// This function takes a 64-bit integer and returns the bit equivalent double.
+inline double BitsToDouble(uint64_t Bits) {
+  double D;
+  static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
+  memcpy(&D, &Bits, sizeof(Bits));
+  return D;
+}
+
+/// This function takes a 32-bit integer and returns the bit equivalent float.
+inline float BitsToFloat(uint32_t Bits) {
+  float F;
+  static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
+  memcpy(&F, &Bits, sizeof(Bits));
+  return F;
+}
+
+/// This function takes a double and returns the bit equivalent 64-bit integer.
+/// Note that copying doubles around changes the bits of NaNs on some hosts,
+/// notably x86, so this routine cannot be used if these bits are needed.
+inline uint64_t DoubleToBits(double Double) {
+  uint64_t Bits;
+  static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
+  memcpy(&Bits, &Double, sizeof(Double));
+  return Bits;
+}
+
+/// This function takes a float and returns the bit equivalent 32-bit integer.
+/// Note that copying floats around changes the bits of NaNs on some hosts,
+/// notably x86, so this routine cannot be used if these bits are needed.
+inline uint32_t FloatToBits(float Float) {
+  uint32_t Bits;
+  static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
+  memcpy(&Bits, &Float, sizeof(Float));
+  return Bits;
+}
+
+/// A and B are either alignments or offsets. Return the minimum alignment that
+/// may be assumed after adding the two together.
+constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
+  // The largest power of 2 that divides both A and B.
+  //
+  // Replace "-Value" by "1+~Value" in the following commented code to avoid
+  // MSVC warning C4146
+  //    return (A | B) & -(A | B);
+  return (A | B) & (1 + ~(A | B));
+}
+
+/// \brief Aligns \c Addr to \c Alignment bytes, rounding up.
+///
+/// Alignment should be a power of two.  This method rounds up, so
+/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
+inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
+  assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&
+         "Alignment is not a power of two!");
+
+  assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
+
+  return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
+}
+
+/// \brief Returns the necessary adjustment for aligning \c Ptr to \c Alignment
+/// bytes, rounding up.
+inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
+  return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
+}
+
+/// Returns the next power of two (in 64-bits) that is strictly greater than A.
+/// Returns zero on overflow.
+inline uint64_t NextPowerOf2(uint64_t A) {
+  A |= (A >> 1);
+  A |= (A >> 2);
+  A |= (A >> 4);
+  A |= (A >> 8);
+  A |= (A >> 16);
+  A |= (A >> 32);
+  return A + 1;
+}
+
+/// Returns the power of two which is less than or equal to the given value.
+/// Essentially, it is a floor operation across the domain of powers of two.
+inline uint64_t PowerOf2Floor(uint64_t A) {
+  if (!A) return 0;
+  return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
+}
+
+/// Returns the power of two which is greater than or equal to the given value.
+/// Essentially, it is a ceil operation across the domain of powers of two.
+inline uint64_t PowerOf2Ceil(uint64_t A) {
+  if (!A)
+    return 0;
+  return NextPowerOf2(A - 1);
+}
+
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
+///
+/// If non-zero \p Skew is specified, the return value will be a minimal
+/// integer that is greater than or equal to \p Value and equal to
+/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
+/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
+///
+/// Examples:
+/// \code
+///   alignTo(5, 8) = 8
+///   alignTo(17, 8) = 24
+///   alignTo(~0LL, 8) = 0
+///   alignTo(321, 255) = 510
+///
+///   alignTo(5, 8, 7) = 7
+///   alignTo(17, 8, 1) = 17
+///   alignTo(~0LL, 8, 3) = 3
+///   alignTo(321, 255, 42) = 552
+/// \endcode
+inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
+  assert(Align != 0u && "Align can't be 0.");
+  Skew %= Align;
+  return (Value + Align - 1 - Skew) / Align * Align + Skew;
+}
+
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
+template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
+  static_assert(Align != 0u, "Align must be non-zero");
+  return (Value + Align - 1) / Align * Align;
+}
+
+/// Returns the integer ceil(Numerator / Denominator).
+inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
+  return alignTo(Numerator, Denominator) / Denominator;
+}
+
+/// \c alignTo for contexts where a constant expression is required.
+/// \sa alignTo
+///
+/// \todo FIXME: remove when \c constexpr becomes really \c constexpr
+template <uint64_t Align>
+struct AlignTo {
+  static_assert(Align != 0u, "Align must be non-zero");
+  template <uint64_t Value>
+  struct from_value {
+    static const uint64_t value = (Value + Align - 1) / Align * Align;
+  };
+};
+
+/// Returns the largest uint64_t less than or equal to \p Value and is
+/// \p Skew mod \p Align. \p Align must be non-zero
+inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
+  assert(Align != 0u && "Align can't be 0.");
+  Skew %= Align;
+  return (Value - Skew) / Align * Align + Skew;
+}
+
+/// Returns the offset to the next integer (mod 2**64) that is greater than
+/// or equal to \p Value and is a multiple of \p Align. \p Align must be
+/// non-zero.
+inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
+  return alignTo(Value, Align) - Value;
+}
+
+/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
+/// Requires 0 < B <= 32.
+template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
+  static_assert(B > 0, "Bit width can't be 0.");
+  static_assert(B <= 32, "Bit width out of range.");
+  return int32_t(X << (32 - B)) >> (32 - B);
+}
+
+/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
+/// Requires 0 < B < 32.
+inline int32_t SignExtend32(uint32_t X, unsigned B) {
+  assert(B > 0 && "Bit width can't be 0.");
+  assert(B <= 32 && "Bit width out of range.");
+  return int32_t(X << (32 - B)) >> (32 - B);
+}
+
+/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
+/// Requires 0 < B < 64.
+template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
+  static_assert(B > 0, "Bit width can't be 0.");
+  static_assert(B <= 64, "Bit width out of range.");
+  return int64_t(x << (64 - B)) >> (64 - B);
+}
+
+/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
+/// Requires 0 < B < 64.
+inline int64_t SignExtend64(uint64_t X, unsigned B) {
+  assert(B > 0 && "Bit width can't be 0.");
+  assert(B <= 64 && "Bit width out of range.");
+  return int64_t(X << (64 - B)) >> (64 - B);
+}
+
+/// Subtract two unsigned integers, X and Y, of type T and return the absolute
+/// value of the result.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, T>::type
+AbsoluteDifference(T X, T Y) {
+  return std::max(X, Y) - std::min(X, Y);
+}
+
+/// Add two unsigned integers, X and Y, of type T.  Clamp the result to the
+/// maximum representable value of T on overflow.  ResultOverflowed indicates if
+/// the result is larger than the maximum representable value of type T.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, T>::type
+SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
+  bool Dummy;
+  bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
+  // Hacker's Delight, p. 29
+  T Z = X + Y;
+  Overflowed = (Z < X || Z < Y);
+  if (Overflowed)
+    return std::numeric_limits<T>::max();
+  else
+    return Z;
+}
+
+/// Multiply two unsigned integers, X and Y, of type T.  Clamp the result to the
+/// maximum representable value of T on overflow.  ResultOverflowed indicates if
+/// the result is larger than the maximum representable value of type T.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, T>::type
+SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
+  bool Dummy;
+  bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
+
+  // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
+  // because it fails for uint16_t (where multiplication can have undefined
+  // behavior due to promotion to int), and requires a division in addition
+  // to the multiplication.
+
+  Overflowed = false;
+
+  // Log2(Z) would be either Log2Z or Log2Z + 1.
+  // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
+  // will necessarily be less than Log2Max as desired.
+  int Log2Z = Log2_64(X) + Log2_64(Y);
+  const T Max = std::numeric_limits<T>::max();
+  int Log2Max = Log2_64(Max);
+  if (Log2Z < Log2Max) {
+    return X * Y;
+  }
+  if (Log2Z > Log2Max) {
+    Overflowed = true;
+    return Max;
+  }
+
+  // We're going to use the top bit, and maybe overflow one
+  // bit past it. Multiply all but the bottom bit then add
+  // that on at the end.
+  T Z = (X >> 1) * Y;
+  if (Z & ~(Max >> 1)) {
+    Overflowed = true;
+    return Max;
+  }
+  Z <<= 1;
+  if (X & 1)
+    return SaturatingAdd(Z, Y, ResultOverflowed);
+
+  return Z;
+}
+
+/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
+/// the product. Clamp the result to the maximum representable value of T on
+/// overflow. ResultOverflowed indicates if the result is larger than the
+/// maximum representable value of type T.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, T>::type
+SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
+  bool Dummy;
+  bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
+
+  T Product = SaturatingMultiply(X, Y, &Overflowed);
+  if (Overflowed)
+    return Product;
+
+  return SaturatingAdd(A, Product, &Overflowed);
+}
+
+/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
+extern const float huge_valf;
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Memory.h b/linux-x64/clang/include/llvm/Support/Memory.h
new file mode 100644
index 0000000..3140dc6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Memory.h
@@ -0,0 +1,145 @@
+//===- llvm/Support/Memory.h - Memory Support -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Memory class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MEMORY_H
+#define LLVM_SUPPORT_MEMORY_H
+
+#include "llvm/Support/DataTypes.h"
+#include <string>
+#include <system_error>
+
+namespace llvm {
+namespace sys {
+
+  /// This class encapsulates the notion of a memory block which has an address
+  /// and a size. It is used by the Memory class (a friend) as the result of
+  /// various memory allocation operations.
+  /// @see Memory
+  /// @brief Memory block abstraction.
+  class MemoryBlock {
+  public:
+    MemoryBlock() : Address(nullptr), Size(0) { }
+    MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
+    void *base() const { return Address; }
+    size_t size() const { return Size; }
+
+  private:
+    void *Address;    ///< Address of first byte of memory area
+    size_t Size;      ///< Size, in bytes of the memory area
+    friend class Memory;
+  };
+
+  /// This class provides various memory handling functions that manipulate
+  /// MemoryBlock instances.
+  /// @since 1.4
+  /// @brief An abstraction for memory operations.
+  class Memory {
+  public:
+    enum ProtectionFlags {
+      MF_READ  = 0x1000000,
+      MF_WRITE = 0x2000000,
+      MF_EXEC  = 0x4000000
+    };
+
+    /// This method allocates a block of memory that is suitable for loading
+    /// dynamically generated code (e.g. JIT). An attempt to allocate
+    /// \p NumBytes bytes of virtual memory is made.
+    /// \p NearBlock may point to an existing allocation in which case
+    /// an attempt is made to allocate more memory near the existing block.
+    /// The actual allocated address is not guaranteed to be near the requested
+    /// address.
+    /// \p Flags is used to set the initial protection flags for the block
+    /// of the memory.
+    /// \p EC [out] returns an object describing any error that occurs.
+    ///
+    /// This method may allocate more than the number of bytes requested.  The
+    /// actual number of bytes allocated is indicated in the returned
+    /// MemoryBlock.
+    ///
+    /// The start of the allocated block must be aligned with the
+    /// system allocation granularity (64K on Windows, page size on Linux).
+    /// If the address following \p NearBlock is not so aligned, it will be
+    /// rounded up to the next allocation granularity boundary.
+    ///
+    /// \r a non-null MemoryBlock if the function was successful,
+    /// otherwise a null MemoryBlock is with \p EC describing the error.
+    ///
+    /// @brief Allocate mapped memory.
+    static MemoryBlock allocateMappedMemory(size_t NumBytes,
+                                            const MemoryBlock *const NearBlock,
+                                            unsigned Flags,
+                                            std::error_code &EC);
+
+    /// This method releases a block of memory that was allocated with the
+    /// allocateMappedMemory method. It should not be used to release any
+    /// memory block allocated any other way.
+    /// \p Block describes the memory to be released.
+    ///
+    /// \r error_success if the function was successful, or an error_code
+    /// describing the failure if an error occurred.
+    ///
+    /// @brief Release mapped memory.
+    static std::error_code releaseMappedMemory(MemoryBlock &Block);
+
+    /// This method sets the protection flags for a block of memory to the
+    /// state specified by /p Flags.  The behavior is not specified if the
+    /// memory was not allocated using the allocateMappedMemory method.
+    /// \p Block describes the memory block to be protected.
+    /// \p Flags specifies the new protection state to be assigned to the block.
+    /// \p ErrMsg [out] returns a string describing any error that occurred.
+    ///
+    /// If \p Flags is MF_WRITE, the actual behavior varies
+    /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
+    /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+    ///
+    /// \r error_success if the function was successful, or an error_code
+    /// describing the failure if an error occurred.
+    ///
+    /// @brief Set memory protection state.
+    static std::error_code protectMappedMemory(const MemoryBlock &Block,
+                                               unsigned Flags);
+
+    /// InvalidateInstructionCache - Before the JIT can run a block of code
+    /// that has been emitted it must invalidate the instruction cache on some
+    /// platforms.
+    static void InvalidateInstructionCache(const void *Addr, size_t Len);
+  };
+
+  /// Owning version of MemoryBlock.
+  class OwningMemoryBlock {
+  public:
+    OwningMemoryBlock() = default;
+    explicit OwningMemoryBlock(MemoryBlock M) : M(M) {}
+    OwningMemoryBlock(OwningMemoryBlock &&Other) {
+      M = Other.M;
+      Other.M = MemoryBlock();
+    }
+    OwningMemoryBlock& operator=(OwningMemoryBlock &&Other) {
+      M = Other.M;
+      Other.M = MemoryBlock();
+      return *this;
+    }
+    ~OwningMemoryBlock() {
+      Memory::releaseMappedMemory(M);
+    }
+    void *base() const { return M.base(); }
+    size_t size() const { return M.size(); }
+    MemoryBlock getMemoryBlock() const { return M; }
+  private:
+    MemoryBlock M;
+  };
+
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/MemoryBuffer.h b/linux-x64/clang/include/llvm/Support/MemoryBuffer.h
new file mode 100644
index 0000000..2997ae4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/MemoryBuffer.h
@@ -0,0 +1,288 @@
+//===--- MemoryBuffer.h - Memory Buffer Interface ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the MemoryBuffer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MEMORYBUFFER_H
+#define LLVM_SUPPORT_MEMORYBUFFER_H
+
+#include "llvm-c/Types.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class MemoryBufferRef;
+
+/// This interface provides simple read-only access to a block of memory, and
+/// provides simple methods for reading files and standard input into a memory
+/// buffer.  In addition to basic access to the characters in the file, this
+/// interface guarantees you can read one character past the end of the file,
+/// and that this character will read as '\0'.
+///
+/// The '\0' guarantee is needed to support an optimization -- it's intended to
+/// be more efficient for clients which are reading all the data to stop
+/// reading when they encounter a '\0' than to continually check the file
+/// position to see if it has reached the end of the file.
+class MemoryBuffer {
+  const char *BufferStart; // Start of the buffer.
+  const char *BufferEnd;   // End of the buffer.
+
+
+protected:
+  MemoryBuffer() = default;
+
+  void init(const char *BufStart, const char *BufEnd,
+            bool RequiresNullTerminator);
+
+  static constexpr sys::fs::mapped_file_region::mapmode Mapmode =
+      sys::fs::mapped_file_region::readonly;
+
+public:
+  MemoryBuffer(const MemoryBuffer &) = delete;
+  MemoryBuffer &operator=(const MemoryBuffer &) = delete;
+  virtual ~MemoryBuffer();
+
+  const char *getBufferStart() const { return BufferStart; }
+  const char *getBufferEnd() const   { return BufferEnd; }
+  size_t getBufferSize() const { return BufferEnd-BufferStart; }
+
+  StringRef getBuffer() const {
+    return StringRef(BufferStart, getBufferSize());
+  }
+
+  /// Return an identifier for this buffer, typically the filename it was read
+  /// from.
+  virtual StringRef getBufferIdentifier() const { return "Unknown buffer"; }
+
+  /// Open the specified file as a MemoryBuffer, returning a new MemoryBuffer
+  /// if successful, otherwise returning null. If FileSize is specified, this
+  /// means that the client knows that the file exists and that it has the
+  /// specified size.
+  ///
+  /// \param IsVolatile Set to true to indicate that the contents of the file
+  /// can change outside the user's control, e.g. when libclang tries to parse
+  /// while the user is editing/updating the file or if the file is on an NFS.
+  static ErrorOr<std::unique_ptr<MemoryBuffer>>
+  getFile(const Twine &Filename, int64_t FileSize = -1,
+          bool RequiresNullTerminator = true, bool IsVolatile = false);
+
+  /// Read all of the specified file into a MemoryBuffer as a stream
+  /// (i.e. until EOF reached). This is useful for special files that
+  /// look like a regular file but have 0 size (e.g. /proc/cpuinfo on Linux).
+  static ErrorOr<std::unique_ptr<MemoryBuffer>>
+  getFileAsStream(const Twine &Filename);
+
+  /// Given an already-open file descriptor, map some slice of it into a
+  /// MemoryBuffer. The slice is specified by an \p Offset and \p MapSize.
+  /// Since this is in the middle of a file, the buffer is not null terminated.
+  static ErrorOr<std::unique_ptr<MemoryBuffer>>
+  getOpenFileSlice(int FD, const Twine &Filename, uint64_t MapSize,
+                   int64_t Offset, bool IsVolatile = false);
+
+  /// Given an already-open file descriptor, read the file and return a
+  /// MemoryBuffer.
+  ///
+  /// \param IsVolatile Set to true to indicate that the contents of the file
+  /// can change outside the user's control, e.g. when libclang tries to parse
+  /// while the user is editing/updating the file or if the file is on an NFS.
+  static ErrorOr<std::unique_ptr<MemoryBuffer>>
+  getOpenFile(int FD, const Twine &Filename, uint64_t FileSize,
+              bool RequiresNullTerminator = true, bool IsVolatile = false);
+
+  /// Open the specified memory range as a MemoryBuffer. Note that InputData
+  /// must be null terminated if RequiresNullTerminator is true.
+  static std::unique_ptr<MemoryBuffer>
+  getMemBuffer(StringRef InputData, StringRef BufferName = "",
+               bool RequiresNullTerminator = true);
+
+  static std::unique_ptr<MemoryBuffer>
+  getMemBuffer(MemoryBufferRef Ref, bool RequiresNullTerminator = true);
+
+  /// Open the specified memory range as a MemoryBuffer, copying the contents
+  /// and taking ownership of it. InputData does not have to be null terminated.
+  static std::unique_ptr<MemoryBuffer>
+  getMemBufferCopy(StringRef InputData, const Twine &BufferName = "");
+
+  /// Read all of stdin into a file buffer, and return it.
+  static ErrorOr<std::unique_ptr<MemoryBuffer>> getSTDIN();
+
+  /// Open the specified file as a MemoryBuffer, or open stdin if the Filename
+  /// is "-".
+  static ErrorOr<std::unique_ptr<MemoryBuffer>>
+  getFileOrSTDIN(const Twine &Filename, int64_t FileSize = -1,
+                 bool RequiresNullTerminator = true);
+
+  /// Map a subrange of the specified file as a MemoryBuffer.
+  static ErrorOr<std::unique_ptr<MemoryBuffer>>
+  getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset,
+               bool IsVolatile = false);
+
+  //===--------------------------------------------------------------------===//
+  // Provided for performance analysis.
+  //===--------------------------------------------------------------------===//
+
+  /// The kind of memory backing used to support the MemoryBuffer.
+  enum BufferKind {
+    MemoryBuffer_Malloc,
+    MemoryBuffer_MMap
+  };
+
+  /// Return information on the memory mechanism used to support the
+  /// MemoryBuffer.
+  virtual BufferKind getBufferKind() const = 0;
+
+  MemoryBufferRef getMemBufferRef() const;
+};
+
+/// This class is an extension of MemoryBuffer, which allows copy-on-write
+/// access to the underlying contents.  It only supports creation methods that
+/// are guaranteed to produce a writable buffer.  For example, mapping a file
+/// read-only is not supported.
+class WritableMemoryBuffer : public MemoryBuffer {
+protected:
+  WritableMemoryBuffer() = default;
+
+  static constexpr sys::fs::mapped_file_region::mapmode Mapmode =
+      sys::fs::mapped_file_region::priv;
+
+public:
+  using MemoryBuffer::getBuffer;
+  using MemoryBuffer::getBufferEnd;
+  using MemoryBuffer::getBufferStart;
+
+  // const_cast is well-defined here, because the underlying buffer is
+  // guaranteed to have been initialized with a mutable buffer.
+  char *getBufferStart() {
+    return const_cast<char *>(MemoryBuffer::getBufferStart());
+  }
+  char *getBufferEnd() {
+    return const_cast<char *>(MemoryBuffer::getBufferEnd());
+  }
+  MutableArrayRef<char> getBuffer() {
+    return {getBufferStart(), getBufferEnd()};
+  }
+
+  static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
+  getFile(const Twine &Filename, int64_t FileSize = -1,
+          bool IsVolatile = false);
+
+  /// Map a subrange of the specified file as a WritableMemoryBuffer.
+  static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
+  getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset,
+               bool IsVolatile = false);
+
+  /// Allocate a new MemoryBuffer of the specified size that is not initialized.
+  /// Note that the caller should initialize the memory allocated by this
+  /// method. The memory is owned by the MemoryBuffer object.
+  static std::unique_ptr<WritableMemoryBuffer>
+  getNewUninitMemBuffer(size_t Size, const Twine &BufferName = "");
+
+  /// Allocate a new zero-initialized MemoryBuffer of the specified size. Note
+  /// that the caller need not initialize the memory allocated by this method.
+  /// The memory is owned by the MemoryBuffer object.
+  static std::unique_ptr<WritableMemoryBuffer>
+  getNewMemBuffer(size_t Size, const Twine &BufferName = "");
+
+private:
+  // Hide these base class factory function so one can't write
+  //   WritableMemoryBuffer::getXXX()
+  // and be surprised that he got a read-only Buffer.
+  using MemoryBuffer::getFileAsStream;
+  using MemoryBuffer::getFileOrSTDIN;
+  using MemoryBuffer::getMemBuffer;
+  using MemoryBuffer::getMemBufferCopy;
+  using MemoryBuffer::getOpenFile;
+  using MemoryBuffer::getOpenFileSlice;
+  using MemoryBuffer::getSTDIN;
+};
+
+/// This class is an extension of MemoryBuffer, which allows write access to
+/// the underlying contents and committing those changes to the original source.
+/// It only supports creation methods that are guaranteed to produce a writable
+/// buffer.  For example, mapping a file read-only is not supported.
+class WriteThroughMemoryBuffer : public MemoryBuffer {
+protected:
+  WriteThroughMemoryBuffer() = default;
+
+  static constexpr sys::fs::mapped_file_region::mapmode Mapmode =
+      sys::fs::mapped_file_region::readwrite;
+
+public:
+  using MemoryBuffer::getBuffer;
+  using MemoryBuffer::getBufferEnd;
+  using MemoryBuffer::getBufferStart;
+
+  // const_cast is well-defined here, because the underlying buffer is
+  // guaranteed to have been initialized with a mutable buffer.
+  char *getBufferStart() {
+    return const_cast<char *>(MemoryBuffer::getBufferStart());
+  }
+  char *getBufferEnd() {
+    return const_cast<char *>(MemoryBuffer::getBufferEnd());
+  }
+  MutableArrayRef<char> getBuffer() {
+    return {getBufferStart(), getBufferEnd()};
+  }
+
+  static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
+  getFile(const Twine &Filename, int64_t FileSize = -1);
+
+  /// Map a subrange of the specified file as a ReadWriteMemoryBuffer.
+  static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
+  getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset);
+
+private:
+  // Hide these base class factory function so one can't write
+  //   WritableMemoryBuffer::getXXX()
+  // and be surprised that he got a read-only Buffer.
+  using MemoryBuffer::getFileAsStream;
+  using MemoryBuffer::getFileOrSTDIN;
+  using MemoryBuffer::getMemBuffer;
+  using MemoryBuffer::getMemBufferCopy;
+  using MemoryBuffer::getOpenFile;
+  using MemoryBuffer::getOpenFileSlice;
+  using MemoryBuffer::getSTDIN;
+};
+
+class MemoryBufferRef {
+  StringRef Buffer;
+  StringRef Identifier;
+
+public:
+  MemoryBufferRef() = default;
+  MemoryBufferRef(MemoryBuffer& Buffer)
+      : Buffer(Buffer.getBuffer()), Identifier(Buffer.getBufferIdentifier()) {}
+  MemoryBufferRef(StringRef Buffer, StringRef Identifier)
+      : Buffer(Buffer), Identifier(Identifier) {}
+
+  StringRef getBuffer() const { return Buffer; }
+
+  StringRef getBufferIdentifier() const { return Identifier; }
+
+  const char *getBufferStart() const { return Buffer.begin(); }
+  const char *getBufferEnd() const { return Buffer.end(); }
+  size_t getBufferSize() const { return Buffer.size(); }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MemoryBuffer, LLVMMemoryBufferRef)
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_MEMORYBUFFER_H
diff --git a/linux-x64/clang/include/llvm/Support/MipsABIFlags.h b/linux-x64/clang/include/llvm/Support/MipsABIFlags.h
new file mode 100644
index 0000000..40e62e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/MipsABIFlags.h
@@ -0,0 +1,103 @@
+//===--- MipsABIFlags.h - MIPS ABI flags ----------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the constants for the ABI flags structure contained
+// in the .MIPS.abiflags section.
+//
+// https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MIPSABIFLAGS_H
+#define LLVM_SUPPORT_MIPSABIFLAGS_H
+
+namespace llvm {
+namespace Mips {
+
+// Values for the xxx_size bytes of an ABI flags structure.
+enum AFL_REG {
+  AFL_REG_NONE = 0x00, // No registers
+  AFL_REG_32 = 0x01,   // 32-bit registers
+  AFL_REG_64 = 0x02,   // 64-bit registers
+  AFL_REG_128 = 0x03   // 128-bit registers
+};
+
+// Masks for the ases word of an ABI flags structure.
+enum AFL_ASE {
+  AFL_ASE_DSP = 0x00000001,       // DSP ASE
+  AFL_ASE_DSPR2 = 0x00000002,     // DSP R2 ASE
+  AFL_ASE_EVA = 0x00000004,       // Enhanced VA Scheme
+  AFL_ASE_MCU = 0x00000008,       // MCU (MicroController) ASE
+  AFL_ASE_MDMX = 0x00000010,      // MDMX ASE
+  AFL_ASE_MIPS3D = 0x00000020,    // MIPS-3D ASE
+  AFL_ASE_MT = 0x00000040,        // MT ASE
+  AFL_ASE_SMARTMIPS = 0x00000080, // SmartMIPS ASE
+  AFL_ASE_VIRT = 0x00000100,      // VZ ASE
+  AFL_ASE_MSA = 0x00000200,       // MSA ASE
+  AFL_ASE_MIPS16 = 0x00000400,    // MIPS16 ASE
+  AFL_ASE_MICROMIPS = 0x00000800, // MICROMIPS ASE
+  AFL_ASE_XPA = 0x00001000,       // XPA ASE
+  AFL_ASE_CRC = 0x00008000        // CRC ASE
+};
+
+// Values for the isa_ext word of an ABI flags structure.
+enum AFL_EXT {
+  AFL_EXT_NONE = 0,         // None
+  AFL_EXT_XLR = 1,          // RMI Xlr instruction
+  AFL_EXT_OCTEON2 = 2,      // Cavium Networks Octeon2
+  AFL_EXT_OCTEONP = 3,      // Cavium Networks OcteonP
+  AFL_EXT_LOONGSON_3A = 4,  // Loongson 3A
+  AFL_EXT_OCTEON = 5,       // Cavium Networks Octeon
+  AFL_EXT_5900 = 6,         // MIPS R5900 instruction
+  AFL_EXT_4650 = 7,         // MIPS R4650 instruction
+  AFL_EXT_4010 = 8,         // LSI R4010 instruction
+  AFL_EXT_4100 = 9,         // NEC VR4100 instruction
+  AFL_EXT_3900 = 10,        // Toshiba R3900 instruction
+  AFL_EXT_10000 = 11,       // MIPS R10000 instruction
+  AFL_EXT_SB1 = 12,         // Broadcom SB-1 instruction
+  AFL_EXT_4111 = 13,        // NEC VR4111/VR4181 instruction
+  AFL_EXT_4120 = 14,        // NEC VR4120 instruction
+  AFL_EXT_5400 = 15,        // NEC VR5400 instruction
+  AFL_EXT_5500 = 16,        // NEC VR5500 instruction
+  AFL_EXT_LOONGSON_2E = 17, // ST Microelectronics Loongson 2E
+  AFL_EXT_LOONGSON_2F = 18, // ST Microelectronics Loongson 2F
+  AFL_EXT_OCTEON3 = 19      // Cavium Networks Octeon3
+};
+
+// Values for the flags1 word of an ABI flags structure.
+enum AFL_FLAGS1 { AFL_FLAGS1_ODDSPREG = 1 };
+
+// MIPS object attribute tags
+enum {
+  Tag_GNU_MIPS_ABI_FP = 4,  // Floating-point ABI used by this object file
+  Tag_GNU_MIPS_ABI_MSA = 8, // MSA ABI used by this object file
+};
+
+// Values for the fp_abi word of an ABI flags structure
+// and for the Tag_GNU_MIPS_ABI_FP attribute tag.
+enum Val_GNU_MIPS_ABI_FP {
+  Val_GNU_MIPS_ABI_FP_ANY = 0,    // not tagged
+  Val_GNU_MIPS_ABI_FP_DOUBLE = 1, // hard float / -mdouble-float
+  Val_GNU_MIPS_ABI_FP_SINGLE = 2, // hard float / -msingle-float
+  Val_GNU_MIPS_ABI_FP_SOFT = 3,   // soft float
+  Val_GNU_MIPS_ABI_FP_OLD_64 = 4, // -mips32r2 -mfp64
+  Val_GNU_MIPS_ABI_FP_XX = 5,     // -mfpxx
+  Val_GNU_MIPS_ABI_FP_64 = 6,     // -mips32r2 -mfp64
+  Val_GNU_MIPS_ABI_FP_64A = 7     // -mips32r2 -mfp64 -mno-odd-spreg
+};
+
+// Values for the Tag_GNU_MIPS_ABI_MSA attribute tag.
+enum Val_GNU_MIPS_ABI_MSA {
+  Val_GNU_MIPS_ABI_MSA_ANY = 0, // not tagged
+  Val_GNU_MIPS_ABI_MSA_128 = 1  // 128-bit MSA
+};
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Mutex.h b/linux-x64/clang/include/llvm/Support/Mutex.h
new file mode 100644
index 0000000..0f4e61a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Mutex.h
@@ -0,0 +1,158 @@
+//===- llvm/Support/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Mutex class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MUTEX_H
+#define LLVM_SUPPORT_MUTEX_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Threading.h"
+#include <cassert>
+
+namespace llvm
+{
+  namespace sys
+  {
+    /// @brief Platform agnostic Mutex class.
+    class MutexImpl
+    {
+    /// @name Constructors
+    /// @{
+    public:
+
+      /// Initializes the lock but doesn't acquire it. if \p recursive is set
+      /// to false, the lock will not be recursive which makes it cheaper but
+      /// also more likely to deadlock (same thread can't acquire more than
+      /// once).
+      /// @brief Default Constructor.
+      explicit MutexImpl(bool recursive = true);
+
+      /// Releases and removes the lock
+      /// @brief Destructor
+      ~MutexImpl();
+
+    /// @}
+    /// @name Methods
+    /// @{
+    public:
+
+      /// Attempts to unconditionally acquire the lock. If the lock is held by
+      /// another thread, this method will wait until it can acquire the lock.
+      /// @returns false if any kind of error occurs, true otherwise.
+      /// @brief Unconditionally acquire the lock.
+      bool acquire();
+
+      /// Attempts to release the lock. If the lock is held by the current
+      /// thread, the lock is released allowing other threads to acquire the
+      /// lock.
+      /// @returns false if any kind of error occurs, true otherwise.
+      /// @brief Unconditionally release the lock.
+      bool release();
+
+      /// Attempts to acquire the lock without blocking. If the lock is not
+      /// available, this function returns false quickly (without blocking). If
+      /// the lock is available, it is acquired.
+      /// @returns false if any kind of error occurs or the lock is not
+      /// available, true otherwise.
+      /// @brief Try to acquire the lock.
+      bool tryacquire();
+
+    //@}
+    /// @name Platform Dependent Data
+    /// @{
+    private:
+#if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0
+      void* data_; ///< We don't know what the data will be
+#endif
+
+    /// @}
+    /// @name Do Not Implement
+    /// @{
+    private:
+      MutexImpl(const MutexImpl &) = delete;
+      void operator=(const MutexImpl &) = delete;
+    /// @}
+    };
+
+
+    /// SmartMutex - A mutex with a compile time constant parameter that
+    /// indicates whether this mutex should become a no-op when we're not
+    /// running in multithreaded mode.
+    template<bool mt_only>
+    class SmartMutex {
+      MutexImpl impl;
+      unsigned acquired;
+      bool recursive;
+    public:
+      explicit SmartMutex(bool rec = true) :
+        impl(rec), acquired(0), recursive(rec) { }
+
+      bool lock() {
+        if (!mt_only || llvm_is_multithreaded()) {
+          return impl.acquire();
+        } else {
+          // Single-threaded debugging code.  This would be racy in
+          // multithreaded mode, but provides not sanity checks in single
+          // threaded mode.
+          assert((recursive || acquired == 0) && "Lock already acquired!!");
+          ++acquired;
+          return true;
+        }
+      }
+
+      bool unlock() {
+        if (!mt_only || llvm_is_multithreaded()) {
+          return impl.release();
+        } else {
+          // Single-threaded debugging code.  This would be racy in
+          // multithreaded mode, but provides not sanity checks in single
+          // threaded mode.
+          assert(((recursive && acquired) || (acquired == 1)) &&
+                 "Lock not acquired before release!");
+          --acquired;
+          return true;
+        }
+      }
+
+      bool try_lock() {
+        if (!mt_only || llvm_is_multithreaded())
+          return impl.tryacquire();
+        else return true;
+      }
+
+      private:
+        SmartMutex(const SmartMutex<mt_only> & original);
+        void operator=(const SmartMutex<mt_only> &);
+    };
+
+    /// Mutex - A standard, always enforced mutex.
+    typedef SmartMutex<false> Mutex;
+
+    template<bool mt_only>
+    class SmartScopedLock  {
+      SmartMutex<mt_only>& mtx;
+
+    public:
+      SmartScopedLock(SmartMutex<mt_only>& m) : mtx(m) {
+        mtx.lock();
+      }
+
+      ~SmartScopedLock() {
+        mtx.unlock();
+      }
+    };
+
+    typedef SmartScopedLock<false> ScopedLock;
+  }
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/MutexGuard.h b/linux-x64/clang/include/llvm/Support/MutexGuard.h
new file mode 100644
index 0000000..07b64b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/MutexGuard.h
@@ -0,0 +1,41 @@
+//===-- Support/MutexGuard.h - Acquire/Release Mutex In Scope ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a guard for a block of code that ensures a Mutex is locked
+// upon construction and released upon destruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MUTEXGUARD_H
+#define LLVM_SUPPORT_MUTEXGUARD_H
+
+#include "llvm/Support/Mutex.h"
+
+namespace llvm {
+  /// Instances of this class acquire a given Mutex Lock when constructed and
+  /// hold that lock until destruction. The intention is to instantiate one of
+  /// these on the stack at the top of some scope to be assured that C++
+  /// destruction of the object will always release the Mutex and thus avoid
+  /// a host of nasty multi-threading problems in the face of exceptions, etc.
+  /// @brief Guard a section of code with a Mutex.
+  class MutexGuard {
+    sys::Mutex &M;
+    MutexGuard(const MutexGuard &) = delete;
+    void operator=(const MutexGuard &) = delete;
+  public:
+    MutexGuard(sys::Mutex &m) : M(m) { M.lock(); }
+    ~MutexGuard() { M.unlock(); }
+    /// holds - Returns true if this locker instance holds the specified lock.
+    /// This is mostly used in assertions to validate that the correct mutex
+    /// is held.
+    bool holds(const sys::Mutex& lock) const { return &M == &lock; }
+  };
+}
+
+#endif // LLVM_SUPPORT_MUTEXGUARD_H
diff --git a/linux-x64/clang/include/llvm/Support/NativeFormatting.h b/linux-x64/clang/include/llvm/Support/NativeFormatting.h
new file mode 100644
index 0000000..6d1dd7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/NativeFormatting.h
@@ -0,0 +1,49 @@
+//===- NativeFormatting.h - Low level formatting helpers ---------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_NATIVE_FORMATTING_H
+#define LLVM_SUPPORT_NATIVE_FORMATTING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <cstdint>
+
+namespace llvm {
+enum class FloatStyle { Exponent, ExponentUpper, Fixed, Percent };
+enum class IntegerStyle {
+  Integer,
+  Number,
+};
+enum class HexPrintStyle { Upper, Lower, PrefixUpper, PrefixLower };
+
+size_t getDefaultPrecision(FloatStyle Style);
+
+bool isPrefixedHexStyle(HexPrintStyle S);
+
+void write_integer(raw_ostream &S, unsigned int N, size_t MinDigits,
+                   IntegerStyle Style);
+void write_integer(raw_ostream &S, int N, size_t MinDigits, IntegerStyle Style);
+void write_integer(raw_ostream &S, unsigned long N, size_t MinDigits,
+                   IntegerStyle Style);
+void write_integer(raw_ostream &S, long N, size_t MinDigits,
+                   IntegerStyle Style);
+void write_integer(raw_ostream &S, unsigned long long N, size_t MinDigits,
+                   IntegerStyle Style);
+void write_integer(raw_ostream &S, long long N, size_t MinDigits,
+                   IntegerStyle Style);
+
+void write_hex(raw_ostream &S, uint64_t N, HexPrintStyle Style,
+               Optional<size_t> Width = None);
+void write_double(raw_ostream &S, double D, FloatStyle Style,
+                  Optional<size_t> Precision = None);
+}
+
+#endif
+
diff --git a/linux-x64/clang/include/llvm/Support/OnDiskHashTable.h b/linux-x64/clang/include/llvm/Support/OnDiskHashTable.h
new file mode 100644
index 0000000..3ef004b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/OnDiskHashTable.h
@@ -0,0 +1,616 @@
+//===--- OnDiskHashTable.h - On-Disk Hash Table Implementation --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines facilities for reading and writing on-disk hash tables.
+///
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_ONDISKHASHTABLE_H
+#define LLVM_SUPPORT_ONDISKHASHTABLE_H
+
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdlib>
+
+namespace llvm {
+
+/// \brief Generates an on disk hash table.
+///
+/// This needs an \c Info that handles storing values into the hash table's
+/// payload and computes the hash for a given key. This should provide the
+/// following interface:
+///
+/// \code
+/// class ExampleInfo {
+/// public:
+///   typedef ExampleKey key_type;   // Must be copy constructible
+///   typedef ExampleKey &key_type_ref;
+///   typedef ExampleData data_type; // Must be copy constructible
+///   typedef ExampleData &data_type_ref;
+///   typedef uint32_t hash_value_type; // The type the hash function returns.
+///   typedef uint32_t offset_type; // The type for offsets into the table.
+///
+///   /// Calculate the hash for Key
+///   static hash_value_type ComputeHash(key_type_ref Key);
+///   /// Return the lengths, in bytes, of the given Key/Data pair.
+///   static std::pair<offset_type, offset_type>
+///   EmitKeyDataLength(raw_ostream &Out, key_type_ref Key, data_type_ref Data);
+///   /// Write Key to Out.  KeyLen is the length from EmitKeyDataLength.
+///   static void EmitKey(raw_ostream &Out, key_type_ref Key,
+///                       offset_type KeyLen);
+///   /// Write Data to Out.  DataLen is the length from EmitKeyDataLength.
+///   static void EmitData(raw_ostream &Out, key_type_ref Key,
+///                        data_type_ref Data, offset_type DataLen);
+///   /// Determine if two keys are equal. Optional, only needed by contains.
+///   static bool EqualKey(key_type_ref Key1, key_type_ref Key2);
+/// };
+/// \endcode
+template <typename Info> class OnDiskChainedHashTableGenerator {
+  /// \brief A single item in the hash table.
+  class Item {
+  public:
+    typename Info::key_type Key;
+    typename Info::data_type Data;
+    Item *Next;
+    const typename Info::hash_value_type Hash;
+
+    Item(typename Info::key_type_ref Key, typename Info::data_type_ref Data,
+         Info &InfoObj)
+        : Key(Key), Data(Data), Next(nullptr), Hash(InfoObj.ComputeHash(Key)) {}
+  };
+
+  typedef typename Info::offset_type offset_type;
+  offset_type NumBuckets;
+  offset_type NumEntries;
+  llvm::SpecificBumpPtrAllocator<Item> BA;
+
+  /// \brief A linked list of values in a particular hash bucket.
+  struct Bucket {
+    offset_type Off;
+    unsigned Length;
+    Item *Head;
+  };
+
+  Bucket *Buckets;
+
+private:
+  /// \brief Insert an item into the appropriate hash bucket.
+  void insert(Bucket *Buckets, size_t Size, Item *E) {
+    Bucket &B = Buckets[E->Hash & (Size - 1)];
+    E->Next = B.Head;
+    ++B.Length;
+    B.Head = E;
+  }
+
+  /// \brief Resize the hash table, moving the old entries into the new buckets.
+  void resize(size_t NewSize) {
+    Bucket *NewBuckets = static_cast<Bucket *>(
+        safe_calloc(NewSize, sizeof(Bucket)));
+    // Populate NewBuckets with the old entries.
+    for (size_t I = 0; I < NumBuckets; ++I)
+      for (Item *E = Buckets[I].Head; E;) {
+        Item *N = E->Next;
+        E->Next = nullptr;
+        insert(NewBuckets, NewSize, E);
+        E = N;
+      }
+
+    free(Buckets);
+    NumBuckets = NewSize;
+    Buckets = NewBuckets;
+  }
+
+public:
+  /// \brief Insert an entry into the table.
+  void insert(typename Info::key_type_ref Key,
+              typename Info::data_type_ref Data) {
+    Info InfoObj;
+    insert(Key, Data, InfoObj);
+  }
+
+  /// \brief Insert an entry into the table.
+  ///
+  /// Uses the provided Info instead of a stack allocated one.
+  void insert(typename Info::key_type_ref Key,
+              typename Info::data_type_ref Data, Info &InfoObj) {
+    ++NumEntries;
+    if (4 * NumEntries >= 3 * NumBuckets)
+      resize(NumBuckets * 2);
+    insert(Buckets, NumBuckets, new (BA.Allocate()) Item(Key, Data, InfoObj));
+  }
+
+  /// \brief Determine whether an entry has been inserted.
+  bool contains(typename Info::key_type_ref Key, Info &InfoObj) {
+    unsigned Hash = InfoObj.ComputeHash(Key);
+    for (Item *I = Buckets[Hash & (NumBuckets - 1)].Head; I; I = I->Next)
+      if (I->Hash == Hash && InfoObj.EqualKey(I->Key, Key))
+        return true;
+    return false;
+  }
+
+  /// \brief Emit the table to Out, which must not be at offset 0.
+  offset_type Emit(raw_ostream &Out) {
+    Info InfoObj;
+    return Emit(Out, InfoObj);
+  }
+
+  /// \brief Emit the table to Out, which must not be at offset 0.
+  ///
+  /// Uses the provided Info instead of a stack allocated one.
+  offset_type Emit(raw_ostream &Out, Info &InfoObj) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
+
+    // Now we're done adding entries, resize the bucket list if it's
+    // significantly too large. (This only happens if the number of
+    // entries is small and we're within our initial allocation of
+    // 64 buckets.) We aim for an occupancy ratio in [3/8, 3/4).
+    //
+    // As a special case, if there are two or fewer entries, just
+    // form a single bucket. A linear scan is fine in that case, and
+    // this is very common in C++ class lookup tables. This also
+    // guarantees we produce at least one bucket for an empty table.
+    //
+    // FIXME: Try computing a perfect hash function at this point.
+    unsigned TargetNumBuckets =
+        NumEntries <= 2 ? 1 : NextPowerOf2(NumEntries * 4 / 3);
+    if (TargetNumBuckets != NumBuckets)
+      resize(TargetNumBuckets);
+
+    // Emit the payload of the table.
+    for (offset_type I = 0; I < NumBuckets; ++I) {
+      Bucket &B = Buckets[I];
+      if (!B.Head)
+        continue;
+
+      // Store the offset for the data of this bucket.
+      B.Off = Out.tell();
+      assert(B.Off && "Cannot write a bucket at offset 0. Please add padding.");
+
+      // Write out the number of items in the bucket.
+      LE.write<uint16_t>(B.Length);
+      assert(B.Length != 0 && "Bucket has a head but zero length?");
+
+      // Write out the entries in the bucket.
+      for (Item *I = B.Head; I; I = I->Next) {
+        LE.write<typename Info::hash_value_type>(I->Hash);
+        const std::pair<offset_type, offset_type> &Len =
+            InfoObj.EmitKeyDataLength(Out, I->Key, I->Data);
+#ifdef NDEBUG
+        InfoObj.EmitKey(Out, I->Key, Len.first);
+        InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
+#else
+        // In asserts mode, check that the users length matches the data they
+        // wrote.
+        uint64_t KeyStart = Out.tell();
+        InfoObj.EmitKey(Out, I->Key, Len.first);
+        uint64_t DataStart = Out.tell();
+        InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
+        uint64_t End = Out.tell();
+        assert(offset_type(DataStart - KeyStart) == Len.first &&
+               "key length does not match bytes written");
+        assert(offset_type(End - DataStart) == Len.second &&
+               "data length does not match bytes written");
+#endif
+      }
+    }
+
+    // Pad with zeros so that we can start the hashtable at an aligned address.
+    offset_type TableOff = Out.tell();
+    uint64_t N = llvm::OffsetToAlignment(TableOff, alignof(offset_type));
+    TableOff += N;
+    while (N--)
+      LE.write<uint8_t>(0);
+
+    // Emit the hashtable itself.
+    LE.write<offset_type>(NumBuckets);
+    LE.write<offset_type>(NumEntries);
+    for (offset_type I = 0; I < NumBuckets; ++I)
+      LE.write<offset_type>(Buckets[I].Off);
+
+    return TableOff;
+  }
+
+  OnDiskChainedHashTableGenerator() {
+    NumEntries = 0;
+    NumBuckets = 64;
+    // Note that we do not need to run the constructors of the individual
+    // Bucket objects since 'calloc' returns bytes that are all 0.
+    Buckets = static_cast<Bucket *>(safe_calloc(NumBuckets, sizeof(Bucket)));
+  }
+
+  ~OnDiskChainedHashTableGenerator() { std::free(Buckets); }
+};
+
+/// \brief Provides lookup on an on disk hash table.
+///
+/// This needs an \c Info that handles reading values from the hash table's
+/// payload and computes the hash for a given key. This should provide the
+/// following interface:
+///
+/// \code
+/// class ExampleLookupInfo {
+/// public:
+///   typedef ExampleData data_type;
+///   typedef ExampleInternalKey internal_key_type; // The stored key type.
+///   typedef ExampleKey external_key_type; // The type to pass to find().
+///   typedef uint32_t hash_value_type; // The type the hash function returns.
+///   typedef uint32_t offset_type; // The type for offsets into the table.
+///
+///   /// Compare two keys for equality.
+///   static bool EqualKey(internal_key_type &Key1, internal_key_type &Key2);
+///   /// Calculate the hash for the given key.
+///   static hash_value_type ComputeHash(internal_key_type &IKey);
+///   /// Translate from the semantic type of a key in the hash table to the
+///   /// type that is actually stored and used for hashing and comparisons.
+///   /// The internal and external types are often the same, in which case this
+///   /// can simply return the passed in value.
+///   static const internal_key_type &GetInternalKey(external_key_type &EKey);
+///   /// Read the key and data length from Buffer, leaving it pointing at the
+///   /// following byte.
+///   static std::pair<offset_type, offset_type>
+///   ReadKeyDataLength(const unsigned char *&Buffer);
+///   /// Read the key from Buffer, given the KeyLen as reported from
+///   /// ReadKeyDataLength.
+///   const internal_key_type &ReadKey(const unsigned char *Buffer,
+///                                    offset_type KeyLen);
+///   /// Read the data for Key from Buffer, given the DataLen as reported from
+///   /// ReadKeyDataLength.
+///   data_type ReadData(StringRef Key, const unsigned char *Buffer,
+///                      offset_type DataLen);
+/// };
+/// \endcode
+template <typename Info> class OnDiskChainedHashTable {
+  const typename Info::offset_type NumBuckets;
+  const typename Info::offset_type NumEntries;
+  const unsigned char *const Buckets;
+  const unsigned char *const Base;
+  Info InfoObj;
+
+public:
+  typedef Info InfoType;
+  typedef typename Info::internal_key_type internal_key_type;
+  typedef typename Info::external_key_type external_key_type;
+  typedef typename Info::data_type data_type;
+  typedef typename Info::hash_value_type hash_value_type;
+  typedef typename Info::offset_type offset_type;
+
+  OnDiskChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
+                         const unsigned char *Buckets,
+                         const unsigned char *Base,
+                         const Info &InfoObj = Info())
+      : NumBuckets(NumBuckets), NumEntries(NumEntries), Buckets(Buckets),
+        Base(Base), InfoObj(InfoObj) {
+    assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+           "'buckets' must have a 4-byte alignment");
+  }
+
+  /// Read the number of buckets and the number of entries from a hash table
+  /// produced by OnDiskHashTableGenerator::Emit, and advance the Buckets
+  /// pointer past them.
+  static std::pair<offset_type, offset_type>
+  readNumBucketsAndEntries(const unsigned char *&Buckets) {
+    assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+           "buckets should be 4-byte aligned.");
+    using namespace llvm::support;
+    offset_type NumBuckets =
+        endian::readNext<offset_type, little, aligned>(Buckets);
+    offset_type NumEntries =
+        endian::readNext<offset_type, little, aligned>(Buckets);
+    return std::make_pair(NumBuckets, NumEntries);
+  }
+
+  offset_type getNumBuckets() const { return NumBuckets; }
+  offset_type getNumEntries() const { return NumEntries; }
+  const unsigned char *getBase() const { return Base; }
+  const unsigned char *getBuckets() const { return Buckets; }
+
+  bool isEmpty() const { return NumEntries == 0; }
+
+  class iterator {
+    internal_key_type Key;
+    const unsigned char *const Data;
+    const offset_type Len;
+    Info *InfoObj;
+
+  public:
+    iterator() : Key(), Data(nullptr), Len(0), InfoObj(nullptr) {}
+    iterator(const internal_key_type K, const unsigned char *D, offset_type L,
+             Info *InfoObj)
+        : Key(K), Data(D), Len(L), InfoObj(InfoObj) {}
+
+    data_type operator*() const { return InfoObj->ReadData(Key, Data, Len); }
+
+    const unsigned char *getDataPtr() const { return Data; }
+    offset_type getDataLen() const { return Len; }
+
+    bool operator==(const iterator &X) const { return X.Data == Data; }
+    bool operator!=(const iterator &X) const { return X.Data != Data; }
+  };
+
+  /// \brief Look up the stored data for a particular key.
+  iterator find(const external_key_type &EKey, Info *InfoPtr = nullptr) {
+    const internal_key_type &IKey = InfoObj.GetInternalKey(EKey);
+    hash_value_type KeyHash = InfoObj.ComputeHash(IKey);
+    return find_hashed(IKey, KeyHash, InfoPtr);
+  }
+
+  /// \brief Look up the stored data for a particular key with a known hash.
+  iterator find_hashed(const internal_key_type &IKey, hash_value_type KeyHash,
+                       Info *InfoPtr = nullptr) {
+    using namespace llvm::support;
+
+    if (!InfoPtr)
+      InfoPtr = &InfoObj;
+
+    // Each bucket is just an offset into the hash table file.
+    offset_type Idx = KeyHash & (NumBuckets - 1);
+    const unsigned char *Bucket = Buckets + sizeof(offset_type) * Idx;
+
+    offset_type Offset = endian::readNext<offset_type, little, aligned>(Bucket);
+    if (Offset == 0)
+      return iterator(); // Empty bucket.
+    const unsigned char *Items = Base + Offset;
+
+    // 'Items' starts with a 16-bit unsigned integer representing the
+    // number of items in this bucket.
+    unsigned Len = endian::readNext<uint16_t, little, unaligned>(Items);
+
+    for (unsigned i = 0; i < Len; ++i) {
+      // Read the hash.
+      hash_value_type ItemHash =
+          endian::readNext<hash_value_type, little, unaligned>(Items);
+
+      // Determine the length of the key and the data.
+      const std::pair<offset_type, offset_type> &L =
+          Info::ReadKeyDataLength(Items);
+      offset_type ItemLen = L.first + L.second;
+
+      // Compare the hashes.  If they are not the same, skip the entry entirely.
+      if (ItemHash != KeyHash) {
+        Items += ItemLen;
+        continue;
+      }
+
+      // Read the key.
+      const internal_key_type &X =
+          InfoPtr->ReadKey((const unsigned char *const)Items, L.first);
+
+      // If the key doesn't match just skip reading the value.
+      if (!InfoPtr->EqualKey(X, IKey)) {
+        Items += ItemLen;
+        continue;
+      }
+
+      // The key matches!
+      return iterator(X, Items + L.first, L.second, InfoPtr);
+    }
+
+    return iterator();
+  }
+
+  iterator end() const { return iterator(); }
+
+  Info &getInfoObj() { return InfoObj; }
+
+  /// \brief Create the hash table.
+  ///
+  /// \param Buckets is the beginning of the hash table itself, which follows
+  /// the payload of entire structure. This is the value returned by
+  /// OnDiskHashTableGenerator::Emit.
+  ///
+  /// \param Base is the point from which all offsets into the structure are
+  /// based. This is offset 0 in the stream that was used when Emitting the
+  /// table.
+  static OnDiskChainedHashTable *Create(const unsigned char *Buckets,
+                                        const unsigned char *const Base,
+                                        const Info &InfoObj = Info()) {
+    assert(Buckets > Base);
+    auto NumBucketsAndEntries = readNumBucketsAndEntries(Buckets);
+    return new OnDiskChainedHashTable<Info>(NumBucketsAndEntries.first,
+                                            NumBucketsAndEntries.second,
+                                            Buckets, Base, InfoObj);
+  }
+};
+
+/// \brief Provides lookup and iteration over an on disk hash table.
+///
+/// \copydetails llvm::OnDiskChainedHashTable
+template <typename Info>
+class OnDiskIterableChainedHashTable : public OnDiskChainedHashTable<Info> {
+  const unsigned char *Payload;
+
+public:
+  typedef OnDiskChainedHashTable<Info>          base_type;
+  typedef typename base_type::internal_key_type internal_key_type;
+  typedef typename base_type::external_key_type external_key_type;
+  typedef typename base_type::data_type         data_type;
+  typedef typename base_type::hash_value_type   hash_value_type;
+  typedef typename base_type::offset_type       offset_type;
+
+private:
+  /// \brief Iterates over all of the keys in the table.
+  class iterator_base {
+    const unsigned char *Ptr;
+    offset_type NumItemsInBucketLeft;
+    offset_type NumEntriesLeft;
+
+  public:
+    typedef external_key_type value_type;
+
+    iterator_base(const unsigned char *const Ptr, offset_type NumEntries)
+        : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries) {}
+    iterator_base()
+        : Ptr(nullptr), NumItemsInBucketLeft(0), NumEntriesLeft(0) {}
+
+    friend bool operator==(const iterator_base &X, const iterator_base &Y) {
+      return X.NumEntriesLeft == Y.NumEntriesLeft;
+    }
+    friend bool operator!=(const iterator_base &X, const iterator_base &Y) {
+      return X.NumEntriesLeft != Y.NumEntriesLeft;
+    }
+
+    /// Move to the next item.
+    void advance() {
+      using namespace llvm::support;
+      if (!NumItemsInBucketLeft) {
+        // 'Items' starts with a 16-bit unsigned integer representing the
+        // number of items in this bucket.
+        NumItemsInBucketLeft =
+            endian::readNext<uint16_t, little, unaligned>(Ptr);
+      }
+      Ptr += sizeof(hash_value_type); // Skip the hash.
+      // Determine the length of the key and the data.
+      const std::pair<offset_type, offset_type> &L =
+          Info::ReadKeyDataLength(Ptr);
+      Ptr += L.first + L.second;
+      assert(NumItemsInBucketLeft);
+      --NumItemsInBucketLeft;
+      assert(NumEntriesLeft);
+      --NumEntriesLeft;
+    }
+
+    /// Get the start of the item as written by the trait (after the hash and
+    /// immediately before the key and value length).
+    const unsigned char *getItem() const {
+      return Ptr + (NumItemsInBucketLeft ? 0 : 2) + sizeof(hash_value_type);
+    }
+  };
+
+public:
+  OnDiskIterableChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
+                                 const unsigned char *Buckets,
+                                 const unsigned char *Payload,
+                                 const unsigned char *Base,
+                                 const Info &InfoObj = Info())
+      : base_type(NumBuckets, NumEntries, Buckets, Base, InfoObj),
+        Payload(Payload) {}
+
+  /// \brief Iterates over all of the keys in the table.
+  class key_iterator : public iterator_base {
+    Info *InfoObj;
+
+  public:
+    typedef external_key_type value_type;
+
+    key_iterator(const unsigned char *const Ptr, offset_type NumEntries,
+                 Info *InfoObj)
+        : iterator_base(Ptr, NumEntries), InfoObj(InfoObj) {}
+    key_iterator() : iterator_base(), InfoObj() {}
+
+    key_iterator &operator++() {
+      this->advance();
+      return *this;
+    }
+    key_iterator operator++(int) { // Postincrement
+      key_iterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    internal_key_type getInternalKey() const {
+      auto *LocalPtr = this->getItem();
+
+      // Determine the length of the key and the data.
+      auto L = Info::ReadKeyDataLength(LocalPtr);
+
+      // Read the key.
+      return InfoObj->ReadKey(LocalPtr, L.first);
+    }
+
+    value_type operator*() const {
+      return InfoObj->GetExternalKey(getInternalKey());
+    }
+  };
+
+  key_iterator key_begin() {
+    return key_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
+  }
+  key_iterator key_end() { return key_iterator(); }
+
+  iterator_range<key_iterator> keys() {
+    return make_range(key_begin(), key_end());
+  }
+
+  /// \brief Iterates over all the entries in the table, returning the data.
+  class data_iterator : public iterator_base {
+    Info *InfoObj;
+
+  public:
+    typedef data_type value_type;
+
+    data_iterator(const unsigned char *const Ptr, offset_type NumEntries,
+                  Info *InfoObj)
+        : iterator_base(Ptr, NumEntries), InfoObj(InfoObj) {}
+    data_iterator() : iterator_base(), InfoObj() {}
+
+    data_iterator &operator++() { // Preincrement
+      this->advance();
+      return *this;
+    }
+    data_iterator operator++(int) { // Postincrement
+      data_iterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    value_type operator*() const {
+      auto *LocalPtr = this->getItem();
+
+      // Determine the length of the key and the data.
+      auto L = Info::ReadKeyDataLength(LocalPtr);
+
+      // Read the key.
+      const internal_key_type &Key = InfoObj->ReadKey(LocalPtr, L.first);
+      return InfoObj->ReadData(Key, LocalPtr + L.first, L.second);
+    }
+  };
+
+  data_iterator data_begin() {
+    return data_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
+  }
+  data_iterator data_end() { return data_iterator(); }
+
+  iterator_range<data_iterator> data() {
+    return make_range(data_begin(), data_end());
+  }
+
+  /// \brief Create the hash table.
+  ///
+  /// \param Buckets is the beginning of the hash table itself, which follows
+  /// the payload of entire structure. This is the value returned by
+  /// OnDiskHashTableGenerator::Emit.
+  ///
+  /// \param Payload is the beginning of the data contained in the table.  This
+  /// is Base plus any padding or header data that was stored, ie, the offset
+  /// that the stream was at when calling Emit.
+  ///
+  /// \param Base is the point from which all offsets into the structure are
+  /// based. This is offset 0 in the stream that was used when Emitting the
+  /// table.
+  static OnDiskIterableChainedHashTable *
+  Create(const unsigned char *Buckets, const unsigned char *const Payload,
+         const unsigned char *const Base, const Info &InfoObj = Info()) {
+    assert(Buckets > Base);
+    auto NumBucketsAndEntries =
+        OnDiskIterableChainedHashTable<Info>::readNumBucketsAndEntries(Buckets);
+    return new OnDiskIterableChainedHashTable<Info>(
+        NumBucketsAndEntries.first, NumBucketsAndEntries.second,
+        Buckets, Payload, Base, InfoObj);
+  }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Options.h b/linux-x64/clang/include/llvm/Support/Options.h
new file mode 100644
index 0000000..9019804
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Options.h
@@ -0,0 +1,120 @@
+//===- llvm/Support/Options.h - Debug options support -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares helper objects for defining debug options that can be
+/// configured via the command line. The new API currently builds on the cl::opt
+/// API, but does not require the use of static globals.
+///
+/// With this API options are registered during initialization. For passes, this
+/// happens during pass initialization. Passes with options will call a static
+/// registerOptions method during initialization that registers options with the
+/// OptionRegistry. An example implementation of registerOptions is:
+///
+/// static void registerOptions() {
+///   OptionRegistry::registerOption<bool, Scalarizer,
+///                                &Scalarizer::ScalarizeLoadStore>(
+///       "scalarize-load-store",
+///       "Allow the scalarizer pass to scalarize loads and store", false);
+/// }
+///
+/// When reading data for options the interface is via the LLVMContext. Option
+/// data for passes should be read from the context during doInitialization. An
+/// example of reading the above option would be:
+///
+/// ScalarizeLoadStore =
+///   M.getContext().getOption<bool,
+///                            Scalarizer,
+///                            &Scalarizer::ScalarizeLoadStore>();
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_OPTIONS_H
+#define LLVM_SUPPORT_OPTIONS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+namespace detail {
+
+// Options are keyed of the unique address of a static character synthesized
+// based on template arguments.
+template <typename ValT, typename Base, ValT(Base::*Mem)> class OptionKey {
+public:
+  static char ID;
+};
+
+template <typename ValT, typename Base, ValT(Base::*Mem)>
+char OptionKey<ValT, Base, Mem>::ID = 0;
+
+} // namespace detail
+
+/// \brief Singleton class used to register debug options.
+///
+/// The OptionRegistry is responsible for managing lifetimes of the options and
+/// provides interfaces for option registration and reading values from options.
+/// This object is a singleton, only one instance should ever exist so that all
+/// options are registered in the same place.
+class OptionRegistry {
+private:
+  DenseMap<void *, cl::Option *> Options;
+
+  /// \brief Adds a cl::Option to the registry.
+  ///
+  /// \param Key unique key for option
+  /// \param O option to map to \p Key
+  ///
+  /// Allocated cl::Options are owned by the OptionRegistry and are deallocated
+  /// on destruction or removal
+  void addOption(void *Key, cl::Option *O);
+
+public:
+  ~OptionRegistry();
+  OptionRegistry() {}
+
+  /// \brief Returns a reference to the singleton instance.
+  static OptionRegistry &instance();
+
+  /// \brief Registers an option with the OptionRegistry singleton.
+  ///
+  /// \tparam ValT type of the option's data
+  /// \tparam Base class used to key the option
+  /// \tparam Mem member of \p Base used for keying the option
+  ///
+  /// Options are keyed off the template parameters to generate unique static
+  /// characters. The template parameters are (1) the type of the data the
+  /// option stores (\p ValT), the class that will read the option (\p Base),
+  /// and the member that the class will store the data into (\p Mem).
+  template <typename ValT, typename Base, ValT(Base::*Mem)>
+  static void registerOption(StringRef ArgStr, StringRef Desc,
+                             const ValT &InitValue) {
+    cl::opt<ValT> *Option = new cl::opt<ValT>(ArgStr, cl::desc(Desc),
+                                              cl::Hidden, cl::init(InitValue));
+    instance().addOption(&detail::OptionKey<ValT, Base, Mem>::ID, Option);
+  }
+
+  /// \brief Returns the value of the option.
+  ///
+  /// \tparam ValT type of the option's data
+  /// \tparam Base class used to key the option
+  /// \tparam Mem member of \p Base used for keying the option
+  ///
+  /// Reads option values based on the key generated by the template parameters.
+  /// Keying for get() is the same as keying for registerOption.
+  template <typename ValT, typename Base, ValT(Base::*Mem)> ValT get() const {
+    auto It = Options.find(&detail::OptionKey<ValT, Base, Mem>::ID);
+    assert(It != Options.end() && "Option not in OptionRegistry");
+    return *(cl::opt<ValT> *)It->second;
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Parallel.h b/linux-x64/clang/include/llvm/Support/Parallel.h
new file mode 100644
index 0000000..01b2bcd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Parallel.h
@@ -0,0 +1,247 @@
+//===- llvm/Support/Parallel.h - Parallel algorithms ----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PARALLEL_H
+#define LLVM_SUPPORT_PARALLEL_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/MathExtras.h"
+
+#include <algorithm>
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+
+#if defined(_MSC_VER) && LLVM_ENABLE_THREADS
+#pragma warning(push)
+#pragma warning(disable : 4530)
+#include <concrt.h>
+#include <ppl.h>
+#pragma warning(pop)
+#endif
+
+namespace llvm {
+
+namespace parallel {
+struct sequential_execution_policy {};
+struct parallel_execution_policy {};
+
+template <typename T>
+struct is_execution_policy
+    : public std::integral_constant<
+          bool, llvm::is_one_of<T, sequential_execution_policy,
+                                parallel_execution_policy>::value> {};
+
+constexpr sequential_execution_policy seq{};
+constexpr parallel_execution_policy par{};
+
+namespace detail {
+
+#if LLVM_ENABLE_THREADS
+
+class Latch {
+  uint32_t Count;
+  mutable std::mutex Mutex;
+  mutable std::condition_variable Cond;
+
+public:
+  explicit Latch(uint32_t Count = 0) : Count(Count) {}
+  ~Latch() { sync(); }
+
+  void inc() {
+    std::lock_guard<std::mutex> lock(Mutex);
+    ++Count;
+  }
+
+  void dec() {
+    std::lock_guard<std::mutex> lock(Mutex);
+    if (--Count == 0)
+      Cond.notify_all();
+  }
+
+  void sync() const {
+    std::unique_lock<std::mutex> lock(Mutex);
+    Cond.wait(lock, [&] { return Count == 0; });
+  }
+};
+
+class TaskGroup {
+  Latch L;
+
+public:
+  void spawn(std::function<void()> f);
+
+  void sync() const { L.sync(); }
+};
+
+#if defined(_MSC_VER)
+template <class RandomAccessIterator, class Comparator>
+void parallel_sort(RandomAccessIterator Start, RandomAccessIterator End,
+                   const Comparator &Comp) {
+  concurrency::parallel_sort(Start, End, Comp);
+}
+template <class IterTy, class FuncTy>
+void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
+  concurrency::parallel_for_each(Begin, End, Fn);
+}
+
+template <class IndexTy, class FuncTy>
+void parallel_for_each_n(IndexTy Begin, IndexTy End, FuncTy Fn) {
+  concurrency::parallel_for(Begin, End, Fn);
+}
+
+#else
+const ptrdiff_t MinParallelSize = 1024;
+
+/// \brief Inclusive median.
+template <class RandomAccessIterator, class Comparator>
+RandomAccessIterator medianOf3(RandomAccessIterator Start,
+                               RandomAccessIterator End,
+                               const Comparator &Comp) {
+  RandomAccessIterator Mid = Start + (std::distance(Start, End) / 2);
+  return Comp(*Start, *(End - 1))
+             ? (Comp(*Mid, *(End - 1)) ? (Comp(*Start, *Mid) ? Mid : Start)
+                                       : End - 1)
+             : (Comp(*Mid, *Start) ? (Comp(*(End - 1), *Mid) ? Mid : End - 1)
+                                   : Start);
+}
+
+template <class RandomAccessIterator, class Comparator>
+void parallel_quick_sort(RandomAccessIterator Start, RandomAccessIterator End,
+                         const Comparator &Comp, TaskGroup &TG, size_t Depth) {
+  // Do a sequential sort for small inputs.
+  if (std::distance(Start, End) < detail::MinParallelSize || Depth == 0) {
+    std::sort(Start, End, Comp);
+    return;
+  }
+
+  // Partition.
+  auto Pivot = medianOf3(Start, End, Comp);
+  // Move Pivot to End.
+  std::swap(*(End - 1), *Pivot);
+  Pivot = std::partition(Start, End - 1, [&Comp, End](decltype(*Start) V) {
+    return Comp(V, *(End - 1));
+  });
+  // Move Pivot to middle of partition.
+  std::swap(*Pivot, *(End - 1));
+
+  // Recurse.
+  TG.spawn([=, &Comp, &TG] {
+    parallel_quick_sort(Start, Pivot, Comp, TG, Depth - 1);
+  });
+  parallel_quick_sort(Pivot + 1, End, Comp, TG, Depth - 1);
+}
+
+template <class RandomAccessIterator, class Comparator>
+void parallel_sort(RandomAccessIterator Start, RandomAccessIterator End,
+                   const Comparator &Comp) {
+  TaskGroup TG;
+  parallel_quick_sort(Start, End, Comp, TG,
+                      llvm::Log2_64(std::distance(Start, End)) + 1);
+}
+
+template <class IterTy, class FuncTy>
+void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
+  // TaskGroup has a relatively high overhead, so we want to reduce
+  // the number of spawn() calls. We'll create up to 1024 tasks here.
+  // (Note that 1024 is an arbitrary number. This code probably needs
+  // improving to take the number of available cores into account.)
+  ptrdiff_t TaskSize = std::distance(Begin, End) / 1024;
+  if (TaskSize == 0)
+    TaskSize = 1;
+
+  TaskGroup TG;
+  while (TaskSize < std::distance(Begin, End)) {
+    TG.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
+    Begin += TaskSize;
+  }
+  std::for_each(Begin, End, Fn);
+}
+
+template <class IndexTy, class FuncTy>
+void parallel_for_each_n(IndexTy Begin, IndexTy End, FuncTy Fn) {
+  ptrdiff_t TaskSize = (End - Begin) / 1024;
+  if (TaskSize == 0)
+    TaskSize = 1;
+
+  TaskGroup TG;
+  IndexTy I = Begin;
+  for (; I + TaskSize < End; I += TaskSize) {
+    TG.spawn([=, &Fn] {
+      for (IndexTy J = I, E = I + TaskSize; J != E; ++J)
+        Fn(J);
+    });
+  }
+  for (IndexTy J = I; J < End; ++J)
+    Fn(J);
+}
+
+#endif
+
+#endif
+
+template <typename Iter>
+using DefComparator =
+    std::less<typename std::iterator_traits<Iter>::value_type>;
+
+} // namespace detail
+
+// sequential algorithm implementations.
+template <class Policy, class RandomAccessIterator,
+          class Comparator = detail::DefComparator<RandomAccessIterator>>
+void sort(Policy policy, RandomAccessIterator Start, RandomAccessIterator End,
+          const Comparator &Comp = Comparator()) {
+  static_assert(is_execution_policy<Policy>::value,
+                "Invalid execution policy!");
+  std::sort(Start, End, Comp);
+}
+
+template <class Policy, class IterTy, class FuncTy>
+void for_each(Policy policy, IterTy Begin, IterTy End, FuncTy Fn) {
+  static_assert(is_execution_policy<Policy>::value,
+                "Invalid execution policy!");
+  std::for_each(Begin, End, Fn);
+}
+
+template <class Policy, class IndexTy, class FuncTy>
+void for_each_n(Policy policy, IndexTy Begin, IndexTy End, FuncTy Fn) {
+  static_assert(is_execution_policy<Policy>::value,
+                "Invalid execution policy!");
+  for (IndexTy I = Begin; I != End; ++I)
+    Fn(I);
+}
+
+// Parallel algorithm implementations, only available when LLVM_ENABLE_THREADS
+// is true.
+#if LLVM_ENABLE_THREADS
+template <class RandomAccessIterator,
+          class Comparator = detail::DefComparator<RandomAccessIterator>>
+void sort(parallel_execution_policy policy, RandomAccessIterator Start,
+          RandomAccessIterator End, const Comparator &Comp = Comparator()) {
+  detail::parallel_sort(Start, End, Comp);
+}
+
+template <class IterTy, class FuncTy>
+void for_each(parallel_execution_policy policy, IterTy Begin, IterTy End,
+              FuncTy Fn) {
+  detail::parallel_for_each(Begin, End, Fn);
+}
+
+template <class IndexTy, class FuncTy>
+void for_each_n(parallel_execution_policy policy, IndexTy Begin, IndexTy End,
+                FuncTy Fn) {
+  detail::parallel_for_each_n(Begin, End, Fn);
+}
+#endif
+
+} // namespace parallel
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_PARALLEL_H
diff --git a/linux-x64/clang/include/llvm/Support/Path.h b/linux-x64/clang/include/llvm/Support/Path.h
new file mode 100644
index 0000000..e597967
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Path.h
@@ -0,0 +1,474 @@
+//===- llvm/Support/Path.h - Path Operating System Concept ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::path namespace. It is designed after
+// TR2/boost filesystem (v3), but modified to remove exception handling and the
+// path class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PATH_H
+#define LLVM_SUPPORT_PATH_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/DataTypes.h"
+#include <iterator>
+
+namespace llvm {
+namespace sys {
+namespace path {
+
+enum class Style { windows, posix, native };
+
+/// @name Lexical Component Iterator
+/// @{
+
+/// @brief Path iterator.
+///
+/// This is an input iterator that iterates over the individual components in
+/// \a path. The traversal order is as follows:
+/// * The root-name element, if present.
+/// * The root-directory element, if present.
+/// * Each successive filename element, if present.
+/// * Dot, if one or more trailing non-root slash characters are present.
+/// Traversing backwards is possible with \a reverse_iterator
+///
+/// Iteration examples. Each component is separated by ',':
+/// @code
+///   /          => /
+///   /foo       => /,foo
+///   foo/       => foo,.
+///   /foo/bar   => /,foo,bar
+///   ../        => ..,.
+///   C:\foo\bar => C:,/,foo,bar
+/// @endcode
+class const_iterator
+    : public iterator_facade_base<const_iterator, std::input_iterator_tag,
+                                  const StringRef> {
+  StringRef Path;      ///< The entire path.
+  StringRef Component; ///< The current component. Not necessarily in Path.
+  size_t    Position;  ///< The iterators current position within Path.
+  Style S;             ///< The path style to use.
+
+  // An end iterator has Position = Path.size() + 1.
+  friend const_iterator begin(StringRef path, Style style);
+  friend const_iterator end(StringRef path);
+
+public:
+  reference operator*() const { return Component; }
+  const_iterator &operator++();    // preincrement
+  bool operator==(const const_iterator &RHS) const;
+
+  /// @brief Difference in bytes between this and RHS.
+  ptrdiff_t operator-(const const_iterator &RHS) const;
+};
+
+/// @brief Reverse path iterator.
+///
+/// This is an input iterator that iterates over the individual components in
+/// \a path in reverse order. The traversal order is exactly reversed from that
+/// of \a const_iterator
+class reverse_iterator
+    : public iterator_facade_base<reverse_iterator, std::input_iterator_tag,
+                                  const StringRef> {
+  StringRef Path;      ///< The entire path.
+  StringRef Component; ///< The current component. Not necessarily in Path.
+  size_t    Position;  ///< The iterators current position within Path.
+  Style S;             ///< The path style to use.
+
+  friend reverse_iterator rbegin(StringRef path, Style style);
+  friend reverse_iterator rend(StringRef path);
+
+public:
+  reference operator*() const { return Component; }
+  reverse_iterator &operator++();    // preincrement
+  bool operator==(const reverse_iterator &RHS) const;
+
+  /// @brief Difference in bytes between this and RHS.
+  ptrdiff_t operator-(const reverse_iterator &RHS) const;
+};
+
+/// @brief Get begin iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized with the first component of \a path.
+const_iterator begin(StringRef path, Style style = Style::native);
+
+/// @brief Get end iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized to the end of \a path.
+const_iterator end(StringRef path);
+
+/// @brief Get reverse begin iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized with the first reverse component of \a path.
+reverse_iterator rbegin(StringRef path, Style style = Style::native);
+
+/// @brief Get reverse end iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized to the reverse end of \a path.
+reverse_iterator rend(StringRef path);
+
+/// @}
+/// @name Lexical Modifiers
+/// @{
+
+/// @brief Remove the last component from \a path unless it is the root dir.
+///
+/// @code
+///   directory/filename.cpp => directory/
+///   directory/             => directory
+///   filename.cpp           => <empty>
+///   /                      => /
+/// @endcode
+///
+/// @param path A path that is modified to not have a file component.
+void remove_filename(SmallVectorImpl<char> &path, Style style = Style::native);
+
+/// @brief Replace the file extension of \a path with \a extension.
+///
+/// @code
+///   ./filename.cpp => ./filename.extension
+///   ./filename     => ./filename.extension
+///   ./             => ./.extension
+/// @endcode
+///
+/// @param path A path that has its extension replaced with \a extension.
+/// @param extension The extension to be added. It may be empty. It may also
+///                  optionally start with a '.', if it does not, one will be
+///                  prepended.
+void replace_extension(SmallVectorImpl<char> &path, const Twine &extension,
+                       Style style = Style::native);
+
+/// @brief Replace matching path prefix with another path.
+///
+/// @code
+///   /foo, /old, /new => /foo
+///   /old/foo, /old, /new => /new/foo
+///   /foo, <empty>, /new => /new/foo
+///   /old/foo, /old, <empty> => /foo
+/// @endcode
+///
+/// @param Path If \a Path starts with \a OldPrefix modify to instead
+///        start with \a NewPrefix.
+/// @param OldPrefix The path prefix to strip from \a Path.
+/// @param NewPrefix The path prefix to replace \a NewPrefix with.
+void replace_path_prefix(SmallVectorImpl<char> &Path,
+                         const StringRef &OldPrefix, const StringRef &NewPrefix,
+                         Style style = Style::native);
+
+/// @brief Append to path.
+///
+/// @code
+///   /foo  + bar/f => /foo/bar/f
+///   /foo/ + bar/f => /foo/bar/f
+///   foo   + bar/f => foo/bar/f
+/// @endcode
+///
+/// @param path Set to \a path + \a component.
+/// @param a The component to be appended to \a path.
+void append(SmallVectorImpl<char> &path, const Twine &a,
+                                         const Twine &b = "",
+                                         const Twine &c = "",
+                                         const Twine &d = "");
+
+void append(SmallVectorImpl<char> &path, Style style, const Twine &a,
+            const Twine &b = "", const Twine &c = "", const Twine &d = "");
+
+/// @brief Append to path.
+///
+/// @code
+///   /foo  + [bar,f] => /foo/bar/f
+///   /foo/ + [bar,f] => /foo/bar/f
+///   foo   + [bar,f] => foo/bar/f
+/// @endcode
+///
+/// @param path Set to \a path + [\a begin, \a end).
+/// @param begin Start of components to append.
+/// @param end One past the end of components to append.
+void append(SmallVectorImpl<char> &path, const_iterator begin,
+            const_iterator end, Style style = Style::native);
+
+/// @}
+/// @name Transforms (or some other better name)
+/// @{
+
+/// Convert path to the native form. This is used to give paths to users and
+/// operating system calls in the platform's normal way. For example, on Windows
+/// all '/' are converted to '\'.
+///
+/// @param path A path that is transformed to native format.
+/// @param result Holds the result of the transformation.
+void native(const Twine &path, SmallVectorImpl<char> &result,
+            Style style = Style::native);
+
+/// Convert path to the native form in place. This is used to give paths to
+/// users and operating system calls in the platform's normal way. For example,
+/// on Windows all '/' are converted to '\'.
+///
+/// @param path A path that is transformed to native format.
+void native(SmallVectorImpl<char> &path, Style style = Style::native);
+
+/// @brief Replaces backslashes with slashes if Windows.
+///
+/// @param path processed path
+/// @result The result of replacing backslashes with forward slashes if Windows.
+/// On Unix, this function is a no-op because backslashes are valid path
+/// chracters.
+std::string convert_to_slash(StringRef path, Style style = Style::native);
+
+/// @}
+/// @name Lexical Observers
+/// @{
+
+/// @brief Get root name.
+///
+/// @code
+///   //net/hello => //net
+///   c:/hello    => c: (on Windows, on other platforms nothing)
+///   /hello      => <empty>
+/// @endcode
+///
+/// @param path Input path.
+/// @result The root name of \a path if it has one, otherwise "".
+StringRef root_name(StringRef path, Style style = Style::native);
+
+/// @brief Get root directory.
+///
+/// @code
+///   /goo/hello => /
+///   c:/hello   => /
+///   d/file.txt => <empty>
+/// @endcode
+///
+/// @param path Input path.
+/// @result The root directory of \a path if it has one, otherwise
+///               "".
+StringRef root_directory(StringRef path, Style style = Style::native);
+
+/// @brief Get root path.
+///
+/// Equivalent to root_name + root_directory.
+///
+/// @param path Input path.
+/// @result The root path of \a path if it has one, otherwise "".
+StringRef root_path(StringRef path, Style style = Style::native);
+
+/// @brief Get relative path.
+///
+/// @code
+///   C:\hello\world => hello\world
+///   foo/bar        => foo/bar
+///   /foo/bar       => foo/bar
+/// @endcode
+///
+/// @param path Input path.
+/// @result The path starting after root_path if one exists, otherwise "".
+StringRef relative_path(StringRef path, Style style = Style::native);
+
+/// @brief Get parent path.
+///
+/// @code
+///   /          => <empty>
+///   /foo       => /
+///   foo/../bar => foo/..
+/// @endcode
+///
+/// @param path Input path.
+/// @result The parent path of \a path if one exists, otherwise "".
+StringRef parent_path(StringRef path, Style style = Style::native);
+
+/// @brief Get filename.
+///
+/// @code
+///   /foo.txt    => foo.txt
+///   .          => .
+///   ..         => ..
+///   /          => /
+/// @endcode
+///
+/// @param path Input path.
+/// @result The filename part of \a path. This is defined as the last component
+///         of \a path.
+StringRef filename(StringRef path, Style style = Style::native);
+
+/// @brief Get stem.
+///
+/// If filename contains a dot but not solely one or two dots, result is the
+/// substring of filename ending at (but not including) the last dot. Otherwise
+/// it is filename.
+///
+/// @code
+///   /foo/bar.txt => bar
+///   /foo/bar     => bar
+///   /foo/.txt    => <empty>
+///   /foo/.       => .
+///   /foo/..      => ..
+/// @endcode
+///
+/// @param path Input path.
+/// @result The stem of \a path.
+StringRef stem(StringRef path, Style style = Style::native);
+
+/// @brief Get extension.
+///
+/// If filename contains a dot but not solely one or two dots, result is the
+/// substring of filename starting at (and including) the last dot, and ending
+/// at the end of \a path. Otherwise "".
+///
+/// @code
+///   /foo/bar.txt => .txt
+///   /foo/bar     => <empty>
+///   /foo/.txt    => .txt
+/// @endcode
+///
+/// @param path Input path.
+/// @result The extension of \a path.
+StringRef extension(StringRef path, Style style = Style::native);
+
+/// @brief Check whether the given char is a path separator on the host OS.
+///
+/// @param value a character
+/// @result true if \a value is a path separator character on the host OS
+bool is_separator(char value, Style style = Style::native);
+
+/// @brief Return the preferred separator for this platform.
+///
+/// @result StringRef of the preferred separator, null-terminated.
+StringRef get_separator(Style style = Style::native);
+
+/// @brief Get the typical temporary directory for the system, e.g.,
+/// "/var/tmp" or "C:/TEMP"
+///
+/// @param erasedOnReboot Whether to favor a path that is erased on reboot
+/// rather than one that potentially persists longer. This parameter will be
+/// ignored if the user or system has set the typical environment variable
+/// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory.
+///
+/// @param result Holds the resulting path name.
+void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);
+
+/// @brief Get the user's home directory.
+///
+/// @param result Holds the resulting path name.
+/// @result True if a home directory is set, false otherwise.
+bool home_directory(SmallVectorImpl<char> &result);
+
+/// @brief Get the user's cache directory.
+///
+/// Expect the resulting path to be a directory shared with other
+/// applications/services used by the user. Params \p Path1 to \p Path3 can be
+/// used to append additional directory names to the resulting path. Recommended
+/// pattern is <user_cache_directory>/<vendor>/<application>.
+///
+/// @param Result Holds the resulting path.
+/// @param Path1 Additional path to be appended to the user's cache directory
+/// path. "" can be used to append nothing.
+/// @param Path2 Second additional path to be appended.
+/// @param Path3 Third additional path to be appended.
+/// @result True if a cache directory path is set, false otherwise.
+bool user_cache_directory(SmallVectorImpl<char> &Result, const Twine &Path1,
+                          const Twine &Path2 = "", const Twine &Path3 = "");
+
+/// @brief Has root name?
+///
+/// root_name != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root name, false otherwise.
+bool has_root_name(const Twine &path, Style style = Style::native);
+
+/// @brief Has root directory?
+///
+/// root_directory != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root directory, false otherwise.
+bool has_root_directory(const Twine &path, Style style = Style::native);
+
+/// @brief Has root path?
+///
+/// root_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root path, false otherwise.
+bool has_root_path(const Twine &path, Style style = Style::native);
+
+/// @brief Has relative path?
+///
+/// relative_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a relative path, false otherwise.
+bool has_relative_path(const Twine &path, Style style = Style::native);
+
+/// @brief Has parent path?
+///
+/// parent_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a parent path, false otherwise.
+bool has_parent_path(const Twine &path, Style style = Style::native);
+
+/// @brief Has filename?
+///
+/// filename != ""
+///
+/// @param path Input path.
+/// @result True if the path has a filename, false otherwise.
+bool has_filename(const Twine &path, Style style = Style::native);
+
+/// @brief Has stem?
+///
+/// stem != ""
+///
+/// @param path Input path.
+/// @result True if the path has a stem, false otherwise.
+bool has_stem(const Twine &path, Style style = Style::native);
+
+/// @brief Has extension?
+///
+/// extension != ""
+///
+/// @param path Input path.
+/// @result True if the path has a extension, false otherwise.
+bool has_extension(const Twine &path, Style style = Style::native);
+
+/// @brief Is path absolute?
+///
+/// @param path Input path.
+/// @result True if the path is absolute, false if it is not.
+bool is_absolute(const Twine &path, Style style = Style::native);
+
+/// @brief Is path relative?
+///
+/// @param path Input path.
+/// @result True if the path is relative, false if it is not.
+bool is_relative(const Twine &path, Style style = Style::native);
+
+/// @brief Remove redundant leading "./" pieces and consecutive separators.
+///
+/// @param path Input path.
+/// @result The cleaned-up \a path.
+StringRef remove_leading_dotslash(StringRef path, Style style = Style::native);
+
+/// @brief In-place remove any './' and optionally '../' components from a path.
+///
+/// @param path processed path
+/// @param remove_dot_dot specify if '../' (except for leading "../") should be
+/// removed
+/// @result True if path was changed
+bool remove_dots(SmallVectorImpl<char> &path, bool remove_dot_dot = false,
+                 Style style = Style::native);
+
+} // end namespace path
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/PluginLoader.h b/linux-x64/clang/include/llvm/Support/PluginLoader.h
new file mode 100644
index 0000000..bdbb134
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/PluginLoader.h
@@ -0,0 +1,37 @@
+//===-- llvm/Support/PluginLoader.h - Plugin Loader for Tools ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A tool can #include this file to get a -load option that allows the user to
+// load arbitrary shared objects into the tool's address space.  Note that this
+// header can only be included by a program ONCE, so it should never to used by
+// library authors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PLUGINLOADER_H
+#define LLVM_SUPPORT_PLUGINLOADER_H
+
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+  struct PluginLoader {
+    void operator=(const std::string &Filename);
+    static unsigned getNumPlugins();
+    static std::string& getPlugin(unsigned num);
+  };
+
+#ifndef DONT_GET_PLUGIN_LOADER_OPTION
+  // This causes operator= above to be invoked for every -load option.
+  static cl::opt<PluginLoader, false, cl::parser<std::string> >
+    LoadOpt("load", cl::ZeroOrMore, cl::value_desc("pluginfilename"),
+            cl::desc("Load the specified plugin"));
+#endif
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/PointerLikeTypeTraits.h b/linux-x64/clang/include/llvm/Support/PointerLikeTypeTraits.h
new file mode 100644
index 0000000..794230d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/PointerLikeTypeTraits.h
@@ -0,0 +1,116 @@
+//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerLikeTypeTraits class.  This allows data
+// structures to reason about pointers and other things that are pointer sized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
+#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
+
+#include "llvm/Support/DataTypes.h"
+#include <type_traits>
+
+namespace llvm {
+
+/// A traits type that is used to handle pointer types and things that are just
+/// wrappers for pointers as a uniform entity.
+template <typename T> struct PointerLikeTypeTraits;
+
+namespace detail {
+/// A tiny meta function to compute the log2 of a compile time constant.
+template <size_t N>
+struct ConstantLog2
+    : std::integral_constant<size_t, ConstantLog2<N / 2>::value + 1> {};
+template <> struct ConstantLog2<1> : std::integral_constant<size_t, 0> {};
+
+// Provide a trait to check if T is pointer-like.
+template <typename T, typename U = void> struct HasPointerLikeTypeTraits {
+  static const bool value = false;
+};
+
+// sizeof(T) is valid only for a complete T.
+template <typename T> struct HasPointerLikeTypeTraits<
+  T, decltype((sizeof(PointerLikeTypeTraits<T>) + sizeof(T)), void())> {
+  static const bool value = true;
+};
+
+template <typename T> struct IsPointerLike {
+  static const bool value = HasPointerLikeTypeTraits<T>::value;
+};
+
+template <typename T> struct IsPointerLike<T *> {
+  static const bool value = true;
+};
+} // namespace detail
+
+// Provide PointerLikeTypeTraits for non-cvr pointers.
+template <typename T> struct PointerLikeTypeTraits<T *> {
+  static inline void *getAsVoidPointer(T *P) { return P; }
+  static inline T *getFromVoidPointer(void *P) { return static_cast<T *>(P); }
+
+  enum { NumLowBitsAvailable = detail::ConstantLog2<alignof(T)>::value };
+};
+
+template <> struct PointerLikeTypeTraits<void *> {
+  static inline void *getAsVoidPointer(void *P) { return P; }
+  static inline void *getFromVoidPointer(void *P) { return P; }
+
+  /// Note, we assume here that void* is related to raw malloc'ed memory and
+  /// that malloc returns objects at least 4-byte aligned. However, this may be
+  /// wrong, or pointers may be from something other than malloc. In this case,
+  /// you should specify a real typed pointer or avoid this template.
+  ///
+  /// All clients should use assertions to do a run-time check to ensure that
+  /// this is actually true.
+  enum { NumLowBitsAvailable = 2 };
+};
+
+// Provide PointerLikeTypeTraits for const things.
+template <typename T> struct PointerLikeTypeTraits<const T> {
+  typedef PointerLikeTypeTraits<T> NonConst;
+
+  static inline const void *getAsVoidPointer(const T P) {
+    return NonConst::getAsVoidPointer(P);
+  }
+  static inline const T getFromVoidPointer(const void *P) {
+    return NonConst::getFromVoidPointer(const_cast<void *>(P));
+  }
+  enum { NumLowBitsAvailable = NonConst::NumLowBitsAvailable };
+};
+
+// Provide PointerLikeTypeTraits for const pointers.
+template <typename T> struct PointerLikeTypeTraits<const T *> {
+  typedef PointerLikeTypeTraits<T *> NonConst;
+
+  static inline const void *getAsVoidPointer(const T *P) {
+    return NonConst::getAsVoidPointer(const_cast<T *>(P));
+  }
+  static inline const T *getFromVoidPointer(const void *P) {
+    return NonConst::getFromVoidPointer(const_cast<void *>(P));
+  }
+  enum { NumLowBitsAvailable = NonConst::NumLowBitsAvailable };
+};
+
+// Provide PointerLikeTypeTraits for uintptr_t.
+template <> struct PointerLikeTypeTraits<uintptr_t> {
+  static inline void *getAsVoidPointer(uintptr_t P) {
+    return reinterpret_cast<void *>(P);
+  }
+  static inline uintptr_t getFromVoidPointer(void *P) {
+    return reinterpret_cast<uintptr_t>(P);
+  }
+  // No bits are available!
+  enum { NumLowBitsAvailable = 0 };
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/PrettyStackTrace.h b/linux-x64/clang/include/llvm/Support/PrettyStackTrace.h
new file mode 100644
index 0000000..4d64fe4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/PrettyStackTrace.h
@@ -0,0 +1,96 @@
+//===- llvm/Support/PrettyStackTrace.h - Pretty Crash Handling --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PrettyStackTraceEntry class, which is used to make
+// crashes give more contextual information about what the program was doing
+// when it crashed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H
+#define LLVM_SUPPORT_PRETTYSTACKTRACE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+  class raw_ostream;
+
+  void EnablePrettyStackTrace();
+
+  /// PrettyStackTraceEntry - This class is used to represent a frame of the
+  /// "pretty" stack trace that is dumped when a program crashes. You can define
+  /// subclasses of this and declare them on the program stack: when they are
+  /// constructed and destructed, they will add their symbolic frames to a
+  /// virtual stack trace.  This gets dumped out if the program crashes.
+  class PrettyStackTraceEntry {
+    friend PrettyStackTraceEntry *ReverseStackTrace(PrettyStackTraceEntry *);
+
+    PrettyStackTraceEntry *NextEntry;
+    PrettyStackTraceEntry(const PrettyStackTraceEntry &) = delete;
+    void operator=(const PrettyStackTraceEntry &) = delete;
+  public:
+    PrettyStackTraceEntry();
+    virtual ~PrettyStackTraceEntry();
+
+    /// print - Emit information about this stack frame to OS.
+    virtual void print(raw_ostream &OS) const = 0;
+
+    /// getNextEntry - Return the next entry in the list of frames.
+    const PrettyStackTraceEntry *getNextEntry() const { return NextEntry; }
+  };
+
+  /// PrettyStackTraceString - This object prints a specified string (which
+  /// should not contain newlines) to the stream as the stack trace when a crash
+  /// occurs.
+  class PrettyStackTraceString : public PrettyStackTraceEntry {
+    const char *Str;
+  public:
+    PrettyStackTraceString(const char *str) : Str(str) {}
+    void print(raw_ostream &OS) const override;
+  };
+
+  /// PrettyStackTraceFormat - This object prints a string (which may use
+  /// printf-style formatting but should not contain newlines) to the stream
+  /// as the stack trace when a crash occurs.
+  class PrettyStackTraceFormat : public PrettyStackTraceEntry {
+    llvm::SmallVector<char, 32> Str;
+  public:
+    PrettyStackTraceFormat(const char *Format, ...);
+    void print(raw_ostream &OS) const override;
+  };
+
+  /// PrettyStackTraceProgram - This object prints a specified program arguments
+  /// to the stream as the stack trace when a crash occurs.
+  class PrettyStackTraceProgram : public PrettyStackTraceEntry {
+    int ArgC;
+    const char *const *ArgV;
+  public:
+    PrettyStackTraceProgram(int argc, const char * const*argv)
+      : ArgC(argc), ArgV(argv) {
+      EnablePrettyStackTrace();
+    }
+    void print(raw_ostream &OS) const override;
+  };
+
+  /// Returns the topmost element of the "pretty" stack state.
+  const void *SavePrettyStackState();
+
+  /// Restores the topmost element of the "pretty" stack state to State, which
+  /// should come from a previous call to SavePrettyStackState().  This is
+  /// useful when using a CrashRecoveryContext in code that also uses
+  /// PrettyStackTraceEntries, to make sure the stack that's printed if a crash
+  /// happens after a crash that's been recovered by CrashRecoveryContext
+  /// doesn't have frames on it that were added in code unwound by the
+  /// CrashRecoveryContext.
+  void RestorePrettyStackState(const void *State);
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Printable.h b/linux-x64/clang/include/llvm/Support/Printable.h
new file mode 100644
index 0000000..cb55d41
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Printable.h
@@ -0,0 +1,52 @@
+//===--- Printable.h - Print function helpers -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the Printable struct.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PRINTABLE_H
+#define LLVM_SUPPORT_PRINTABLE_H
+
+#include <functional>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// Simple wrapper around std::function<void(raw_ostream&)>.
+/// This class is useful to construct print helpers for raw_ostream.
+///
+/// Example:
+///     Printable PrintRegister(unsigned Register) {
+///       return Printable([Register](raw_ostream &OS) {
+///         OS << getRegisterName(Register);
+///       }
+///     }
+///     ... OS << PrintRegister(Register); ...
+///
+/// Implementation note: Ideally this would just be a typedef, but doing so
+/// leads to operator << being ambiguous as function has matching constructors
+/// in some STL versions. I have seen the problem on gcc 4.6 libstdc++ and
+/// microsoft STL.
+class Printable {
+public:
+  std::function<void(raw_ostream &OS)> Print;
+  Printable(std::function<void(raw_ostream &OS)> Print)
+      : Print(std::move(Print)) {}
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Printable &P) {
+  P.Print(OS);
+  return OS;
+}
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Process.h b/linux-x64/clang/include/llvm/Support/Process.h
new file mode 100644
index 0000000..82b0d9f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Process.h
@@ -0,0 +1,200 @@
+//===- llvm/Support/Process.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Provides a library for accessing information about this process and other
+/// processes on the operating system. Also provides means of spawning
+/// subprocess for commands. The design of this library is modeled after the
+/// proposed design of the Boost.Process library, and is design specifically to
+/// follow the style of standard libraries and potentially become a proposal
+/// for a standard library.
+///
+/// This file declares the llvm::sys::Process class which contains a collection
+/// of legacy static interfaces for extracting various information about the
+/// current process. The goal is to migrate users of this API over to the new
+/// interfaces.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PROCESS_H
+#define LLVM_SUPPORT_PROCESS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/DataTypes.h"
+#include <system_error>
+
+namespace llvm {
+template <typename T> class ArrayRef;
+class StringRef;
+
+namespace sys {
+
+
+/// \brief A collection of legacy interfaces for querying information about the
+/// current executing process.
+class Process {
+public:
+  static unsigned getPageSize();
+
+  /// \brief Return process memory usage.
+  /// This static function will return the total amount of memory allocated
+  /// by the process. This only counts the memory allocated via the malloc,
+  /// calloc and realloc functions and includes any "free" holes in the
+  /// allocated space.
+  static size_t GetMallocUsage();
+
+  /// This static function will set \p user_time to the amount of CPU time
+  /// spent in user (non-kernel) mode and \p sys_time to the amount of CPU
+  /// time spent in system (kernel) mode.  If the operating system does not
+  /// support collection of these metrics, a zero duration will be for both
+  /// values.
+  /// \param elapsed Returns the system_clock::now() giving current time
+  /// \param user_time Returns the current amount of user time for the process
+  /// \param sys_time Returns the current amount of system time for the process
+  static void GetTimeUsage(TimePoint<> &elapsed,
+                           std::chrono::nanoseconds &user_time,
+                           std::chrono::nanoseconds &sys_time);
+
+  /// This function makes the necessary calls to the operating system to
+  /// prevent core files or any other kind of large memory dumps that can
+  /// occur when a program fails.
+  /// @brief Prevent core file generation.
+  static void PreventCoreFiles();
+
+  /// \brief true if PreventCoreFiles has been called, false otherwise.
+  static bool AreCoreFilesPrevented();
+
+  // This function returns the environment variable \arg name's value as a UTF-8
+  // string. \arg Name is assumed to be in UTF-8 encoding too.
+  static Optional<std::string> GetEnv(StringRef name);
+
+  /// This function searches for an existing file in the list of directories
+  /// in a PATH like environment variable, and returns the first file found,
+  /// according to the order of the entries in the PATH like environment
+  /// variable.  If an ignore list is specified, then any folder which is in
+  /// the PATH like environment variable but is also in IgnoreList is not
+  /// considered.
+  static Optional<std::string> FindInEnvPath(StringRef EnvName,
+                                             StringRef FileName,
+                                             ArrayRef<std::string> IgnoreList);
+
+  static Optional<std::string> FindInEnvPath(StringRef EnvName,
+                                             StringRef FileName);
+
+  /// This function returns a SmallVector containing the arguments passed from
+  /// the operating system to the program.  This function expects to be handed
+  /// the vector passed in from main.
+  static std::error_code
+  GetArgumentVector(SmallVectorImpl<const char *> &Args,
+                    ArrayRef<const char *> ArgsFromMain,
+                    SpecificBumpPtrAllocator<char> &ArgAllocator);
+
+  // This functions ensures that the standard file descriptors (input, output,
+  // and error) are properly mapped to a file descriptor before we use any of
+  // them.  This should only be called by standalone programs, library
+  // components should not call this.
+  static std::error_code FixupStandardFileDescriptors();
+
+  // This function safely closes a file descriptor.  It is not safe to retry
+  // close(2) when it returns with errno equivalent to EINTR; this is because
+  // *nixen cannot agree if the file descriptor is, in fact, closed when this
+  // occurs.
+  //
+  // N.B. Some operating systems, due to thread cancellation, cannot properly
+  // guarantee that it will or will not be closed one way or the other!
+  static std::error_code SafelyCloseFileDescriptor(int FD);
+
+  /// This function determines if the standard input is connected directly
+  /// to a user's input (keyboard probably), rather than coming from a file
+  /// or pipe.
+  static bool StandardInIsUserInput();
+
+  /// This function determines if the standard output is connected to a
+  /// "tty" or "console" window. That is, the output would be displayed to
+  /// the user rather than being put on a pipe or stored in a file.
+  static bool StandardOutIsDisplayed();
+
+  /// This function determines if the standard error is connected to a
+  /// "tty" or "console" window. That is, the output would be displayed to
+  /// the user rather than being put on a pipe or stored in a file.
+  static bool StandardErrIsDisplayed();
+
+  /// This function determines if the given file descriptor is connected to
+  /// a "tty" or "console" window. That is, the output would be displayed to
+  /// the user rather than being put on a pipe or stored in a file.
+  static bool FileDescriptorIsDisplayed(int fd);
+
+  /// This function determines if the given file descriptor is displayd and
+  /// supports colors.
+  static bool FileDescriptorHasColors(int fd);
+
+  /// This function determines the number of columns in the window
+  /// if standard output is connected to a "tty" or "console"
+  /// window. If standard output is not connected to a tty or
+  /// console, or if the number of columns cannot be determined,
+  /// this routine returns zero.
+  static unsigned StandardOutColumns();
+
+  /// This function determines the number of columns in the window
+  /// if standard error is connected to a "tty" or "console"
+  /// window. If standard error is not connected to a tty or
+  /// console, or if the number of columns cannot be determined,
+  /// this routine returns zero.
+  static unsigned StandardErrColumns();
+
+  /// This function determines whether the terminal connected to standard
+  /// output supports colors. If standard output is not connected to a
+  /// terminal, this function returns false.
+  static bool StandardOutHasColors();
+
+  /// This function determines whether the terminal connected to standard
+  /// error supports colors. If standard error is not connected to a
+  /// terminal, this function returns false.
+  static bool StandardErrHasColors();
+
+  /// Enables or disables whether ANSI escape sequences are used to output
+  /// colors. This only has an effect on Windows.
+  /// Note: Setting this option is not thread-safe and should only be done
+  /// during initialization.
+  static void UseANSIEscapeCodes(bool enable);
+
+  /// Whether changing colors requires the output to be flushed.
+  /// This is needed on systems that don't support escape sequences for
+  /// changing colors.
+  static bool ColorNeedsFlush();
+
+  /// This function returns the colorcode escape sequences.
+  /// If ColorNeedsFlush() is true then this function will change the colors
+  /// and return an empty escape sequence. In that case it is the
+  /// responsibility of the client to flush the output stream prior to
+  /// calling this function.
+  static const char *OutputColor(char c, bool bold, bool bg);
+
+  /// Same as OutputColor, but only enables the bold attribute.
+  static const char *OutputBold(bool bg);
+
+  /// This function returns the escape sequence to reverse forground and
+  /// background colors.
+  static const char *OutputReverse();
+
+  /// Resets the terminals colors, or returns an escape sequence to do so.
+  static const char *ResetColor();
+
+  /// Get the result of a process wide random number generator. The
+  /// generator will be automatically seeded in non-deterministic fashion.
+  static unsigned GetRandomNumber();
+};
+
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Program.h b/linux-x64/clang/include/llvm/Support/Program.h
new file mode 100644
index 0000000..06fd350
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Program.h
@@ -0,0 +1,197 @@
+//===- llvm/Support/Program.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Program class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PROGRAM_H
+#define LLVM_SUPPORT_PROGRAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorOr.h"
+#include <system_error>
+
+namespace llvm {
+namespace sys {
+
+  /// This is the OS-specific separator for PATH like environment variables:
+  // a colon on Unix or a semicolon on Windows.
+#if defined(LLVM_ON_UNIX)
+  const char EnvPathSeparator = ':';
+#elif defined (LLVM_ON_WIN32)
+  const char EnvPathSeparator = ';';
+#endif
+
+/// @brief This struct encapsulates information about a process.
+struct ProcessInfo {
+#if defined(LLVM_ON_UNIX)
+  typedef pid_t ProcessId;
+#elif defined(LLVM_ON_WIN32)
+  typedef unsigned long ProcessId; // Must match the type of DWORD on Windows.
+  typedef void * HANDLE; // Must match the type of HANDLE on Windows.
+  /// The handle to the process (available on Windows only).
+  HANDLE ProcessHandle;
+#else
+#error "ProcessInfo is not defined for this platform!"
+#endif
+
+  enum : ProcessId { InvalidPid = 0 };
+
+  /// The process identifier.
+  ProcessId Pid;
+
+  /// The return code, set after execution.
+  int ReturnCode;
+
+  ProcessInfo();
+};
+
+  /// \brief Find the first executable file \p Name in \p Paths.
+  ///
+  /// This does not perform hashing as a shell would but instead stats each PATH
+  /// entry individually so should generally be avoided. Core LLVM library
+  /// functions and options should instead require fully specified paths.
+  ///
+  /// \param Name name of the executable to find. If it contains any system
+  ///   slashes, it will be returned as is.
+  /// \param Paths optional list of paths to search for \p Name. If empty it
+  ///   will use the system PATH environment instead.
+  ///
+  /// \returns The fully qualified path to the first \p Name in \p Paths if it
+  ///   exists. \p Name if \p Name has slashes in it. Otherwise an error.
+  ErrorOr<std::string>
+  findProgramByName(StringRef Name, ArrayRef<StringRef> Paths = {});
+
+  // These functions change the specified standard stream (stdin or stdout) to
+  // binary mode. They return errc::success if the specified stream
+  // was changed. Otherwise a platform dependent error is returned.
+  std::error_code ChangeStdinToBinary();
+  std::error_code ChangeStdoutToBinary();
+
+  /// This function executes the program using the arguments provided.  The
+  /// invoked program will inherit the stdin, stdout, and stderr file
+  /// descriptors, the environment and other configuration settings of the
+  /// invoking program.
+  /// This function waits for the program to finish, so should be avoided in
+  /// library functions that aren't expected to block. Consider using
+  /// ExecuteNoWait() instead.
+  /// \returns an integer result code indicating the status of the program.
+  /// A zero or positive value indicates the result code of the program.
+  /// -1 indicates failure to execute
+  /// -2 indicates a crash during execution or timeout
+  int ExecuteAndWait(
+      StringRef Program, ///< Path of the program to be executed. It is
+      ///< presumed this is the result of the findProgramByName method.
+      const char **Args, ///< A vector of strings that are passed to the
+      ///< program.  The first element should be the name of the program.
+      ///< The list *must* be terminated by a null char* entry.
+      const char **Env = nullptr, ///< An optional vector of strings to use for
+      ///< the program's environment. If not provided, the current program's
+      ///< environment will be used.
+      ArrayRef<Optional<StringRef>> Redirects = {}, ///<
+      ///< An array of optional paths. Should have a size of zero or three.
+      ///< If the array is empty, no redirections are performed.
+      ///< Otherwise, the inferior process's stdin(0), stdout(1), and stderr(2)
+      ///< will be redirected to the corresponding paths, if the optional path
+      ///< is present (not \c llvm::None).
+      ///< When an empty path is passed in, the corresponding file descriptor
+      ///< will be disconnected (ie, /dev/null'd) in a portable way.
+      unsigned SecondsToWait = 0, ///< If non-zero, this specifies the amount
+      ///< of time to wait for the child process to exit. If the time
+      ///< expires, the child is killed and this call returns. If zero,
+      ///< this function will wait until the child finishes or forever if
+      ///< it doesn't.
+      unsigned MemoryLimit = 0, ///< If non-zero, this specifies max. amount
+      ///< of memory can be allocated by process. If memory usage will be
+      ///< higher limit, the child is killed and this call returns. If zero
+      ///< - no memory limit.
+      std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
+      ///< string instance in which error messages will be returned. If the
+      ///< string is non-empty upon return an error occurred while invoking the
+      ///< program.
+      bool *ExecutionFailed = nullptr);
+
+  /// Similar to ExecuteAndWait, but returns immediately.
+  /// @returns The \see ProcessInfo of the newly launced process.
+  /// \note On Microsoft Windows systems, users will need to either call
+  /// \see Wait until the process finished execution or win32 CloseHandle() API
+  /// on ProcessInfo.ProcessHandle to avoid memory leaks.
+  ProcessInfo ExecuteNoWait(StringRef Program, const char **Args,
+                            const char **Env = nullptr,
+                            ArrayRef<Optional<StringRef>> Redirects = {},
+                            unsigned MemoryLimit = 0,
+                            std::string *ErrMsg = nullptr,
+                            bool *ExecutionFailed = nullptr);
+
+  /// Return true if the given arguments fit within system-specific
+  /// argument length limits.
+  bool commandLineFitsWithinSystemLimits(StringRef Program,
+                                         ArrayRef<const char *> Args);
+
+  /// File encoding options when writing contents that a non-UTF8 tool will
+  /// read (on Windows systems). For UNIX, we always use UTF-8.
+  enum WindowsEncodingMethod {
+    /// UTF-8 is the LLVM native encoding, being the same as "do not perform
+    /// encoding conversion".
+    WEM_UTF8,
+    WEM_CurrentCodePage,
+    WEM_UTF16
+  };
+
+  /// Saves the UTF8-encoded \p contents string into the file \p FileName
+  /// using a specific encoding.
+  ///
+  /// This write file function adds the possibility to choose which encoding
+  /// to use when writing a text file. On Windows, this is important when
+  /// writing files with internationalization support with an encoding that is
+  /// different from the one used in LLVM (UTF-8). We use this when writing
+  /// response files, since GCC tools on MinGW only understand legacy code
+  /// pages, and VisualStudio tools only understand UTF-16.
+  /// For UNIX, using different encodings is silently ignored, since all tools
+  /// work well with UTF-8.
+  /// This function assumes that you only use UTF-8 *text* data and will convert
+  /// it to your desired encoding before writing to the file.
+  ///
+  /// FIXME: We use EM_CurrentCodePage to write response files for GNU tools in
+  /// a MinGW/MinGW-w64 environment, which has serious flaws but currently is
+  /// our best shot to make gcc/ld understand international characters. This
+  /// should be changed as soon as binutils fix this to support UTF16 on mingw.
+  ///
+  /// \returns non-zero error_code if failed
+  std::error_code
+  writeFileWithEncoding(StringRef FileName, StringRef Contents,
+                        WindowsEncodingMethod Encoding = WEM_UTF8);
+
+  /// This function waits for the process specified by \p PI to finish.
+  /// \returns A \see ProcessInfo struct with Pid set to:
+  /// \li The process id of the child process if the child process has changed
+  /// state.
+  /// \li 0 if the child process has not changed state.
+  /// \note Users of this function should always check the ReturnCode member of
+  /// the \see ProcessInfo returned from this function.
+  ProcessInfo Wait(
+      const ProcessInfo &PI, ///< The child process that should be waited on.
+      unsigned SecondsToWait, ///< If non-zero, this specifies the amount of
+      ///< time to wait for the child process to exit. If the time expires, the
+      ///< child is killed and this function returns. If zero, this function
+      ///< will perform a non-blocking wait on the child process.
+      bool WaitUntilTerminates, ///< If true, ignores \p SecondsToWait and waits
+      ///< until child has terminated.
+      std::string *ErrMsg = nullptr ///< If non-zero, provides a pointer to a
+      ///< string instance in which error messages will be returned. If the
+      ///< string is non-empty upon return an error occurred while invoking the
+      ///< program.
+      );
+  }
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/RWMutex.h b/linux-x64/clang/include/llvm/Support/RWMutex.h
new file mode 100644
index 0000000..85f4fc0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/RWMutex.h
@@ -0,0 +1,179 @@
+//===- RWMutex.h - Reader/Writer Mutual Exclusion Lock ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::RWMutex class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RWMUTEX_H
+#define LLVM_SUPPORT_RWMUTEX_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Threading.h"
+#include <cassert>
+
+namespace llvm {
+namespace sys {
+
+    /// @brief Platform agnostic RWMutex class.
+    class RWMutexImpl
+    {
+    /// @name Constructors
+    /// @{
+    public:
+
+      /// Initializes the lock but doesn't acquire it.
+      /// @brief Default Constructor.
+      explicit RWMutexImpl();
+
+    /// @}
+    /// @name Do Not Implement
+    /// @{
+      RWMutexImpl(const RWMutexImpl & original) = delete;
+      RWMutexImpl &operator=(const RWMutexImpl &) = delete;
+    /// @}
+
+      /// Releases and removes the lock
+      /// @brief Destructor
+      ~RWMutexImpl();
+
+    /// @}
+    /// @name Methods
+    /// @{
+    public:
+
+      /// Attempts to unconditionally acquire the lock in reader mode. If the
+      /// lock is held by a writer, this method will wait until it can acquire
+      /// the lock.
+      /// @returns false if any kind of error occurs, true otherwise.
+      /// @brief Unconditionally acquire the lock in reader mode.
+      bool reader_acquire();
+
+      /// Attempts to release the lock in reader mode.
+      /// @returns false if any kind of error occurs, true otherwise.
+      /// @brief Unconditionally release the lock in reader mode.
+      bool reader_release();
+
+      /// Attempts to unconditionally acquire the lock in reader mode. If the
+      /// lock is held by any readers, this method will wait until it can
+      /// acquire the lock.
+      /// @returns false if any kind of error occurs, true otherwise.
+      /// @brief Unconditionally acquire the lock in writer mode.
+      bool writer_acquire();
+
+      /// Attempts to release the lock in writer mode.
+      /// @returns false if any kind of error occurs, true otherwise.
+      /// @brief Unconditionally release the lock in write mode.
+      bool writer_release();
+
+    //@}
+    /// @name Platform Dependent Data
+    /// @{
+    private:
+#if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0
+      void* data_ = nullptr; ///< We don't know what the data will be
+#endif
+    };
+
+    /// SmartMutex - An R/W mutex with a compile time constant parameter that
+    /// indicates whether this mutex should become a no-op when we're not
+    /// running in multithreaded mode.
+    template<bool mt_only>
+    class SmartRWMutex {
+      RWMutexImpl impl;
+      unsigned readers = 0;
+      unsigned writers = 0;
+
+    public:
+      explicit SmartRWMutex() = default;
+      SmartRWMutex(const SmartRWMutex<mt_only> & original) = delete;
+      SmartRWMutex<mt_only> &operator=(const SmartRWMutex<mt_only> &) = delete;
+
+      bool lock_shared() {
+        if (!mt_only || llvm_is_multithreaded())
+          return impl.reader_acquire();
+
+        // Single-threaded debugging code.  This would be racy in multithreaded
+        // mode, but provides not sanity checks in single threaded mode.
+        ++readers;
+        return true;
+      }
+
+      bool unlock_shared() {
+        if (!mt_only || llvm_is_multithreaded())
+          return impl.reader_release();
+
+        // Single-threaded debugging code.  This would be racy in multithreaded
+        // mode, but provides not sanity checks in single threaded mode.
+        assert(readers > 0 && "Reader lock not acquired before release!");
+        --readers;
+        return true;
+      }
+
+      bool lock() {
+        if (!mt_only || llvm_is_multithreaded())
+          return impl.writer_acquire();
+
+        // Single-threaded debugging code.  This would be racy in multithreaded
+        // mode, but provides not sanity checks in single threaded mode.
+        assert(writers == 0 && "Writer lock already acquired!");
+        ++writers;
+        return true;
+      }
+
+      bool unlock() {
+        if (!mt_only || llvm_is_multithreaded())
+          return impl.writer_release();
+
+        // Single-threaded debugging code.  This would be racy in multithreaded
+        // mode, but provides not sanity checks in single threaded mode.
+        assert(writers == 1 && "Writer lock not acquired before release!");
+        --writers;
+        return true;
+      }
+    };
+
+    typedef SmartRWMutex<false> RWMutex;
+
+    /// ScopedReader - RAII acquisition of a reader lock
+    template<bool mt_only>
+    struct SmartScopedReader {
+      SmartRWMutex<mt_only>& mutex;
+
+      explicit SmartScopedReader(SmartRWMutex<mt_only>& m) : mutex(m) {
+        mutex.lock_shared();
+      }
+
+      ~SmartScopedReader() {
+        mutex.unlock_shared();
+      }
+    };
+
+    typedef SmartScopedReader<false> ScopedReader;
+
+    /// ScopedWriter - RAII acquisition of a writer lock
+    template<bool mt_only>
+    struct SmartScopedWriter {
+      SmartRWMutex<mt_only>& mutex;
+
+      explicit SmartScopedWriter(SmartRWMutex<mt_only>& m) : mutex(m) {
+        mutex.lock();
+      }
+
+      ~SmartScopedWriter() {
+        mutex.unlock();
+      }
+    };
+
+    typedef SmartScopedWriter<false> ScopedWriter;
+
+} // end namespace sys
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_RWMUTEX_H
diff --git a/linux-x64/clang/include/llvm/Support/RandomNumberGenerator.h b/linux-x64/clang/include/llvm/Support/RandomNumberGenerator.h
new file mode 100644
index 0000000..1399dab
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/RandomNumberGenerator.h
@@ -0,0 +1,70 @@
+//==- llvm/Support/RandomNumberGenerator.h - RNG for diversity ---*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an abstraction for deterministic random number
+// generation (RNG).  Note that the current implementation is not
+// cryptographically secure as it uses the C++11 <random> facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RANDOMNUMBERGENERATOR_H_
+#define LLVM_SUPPORT_RANDOMNUMBERGENERATOR_H_
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h" // Needed for uint64_t on Windows.
+#include <random>
+#include <system_error>
+
+namespace llvm {
+class StringRef;
+
+/// A random number generator.
+///
+/// Instances of this class should not be shared across threads. The
+/// seed should be set by passing the -rng-seed=<uint64> option. Use
+/// Module::createRNG to create a new RNG instance for use with that
+/// module.
+class RandomNumberGenerator {
+
+  // 64-bit Mersenne Twister by Matsumoto and Nishimura, 2000
+  // http://en.cppreference.com/w/cpp/numeric/random/mersenne_twister_engine
+  // This RNG is deterministically portable across C++11
+  // implementations.
+  using generator_type = std::mt19937_64;
+
+public:
+  using result_type = generator_type::result_type;
+
+  /// Returns a random number in the range [0, Max).
+  result_type operator()();
+
+  static constexpr result_type min() { return generator_type::min(); }
+  static constexpr result_type max() { return generator_type::max(); }
+
+private:
+  /// Seeds and salts the underlying RNG engine.
+  ///
+  /// This constructor should not be used directly. Instead use
+  /// Module::createRNG to create a new RNG salted with the Module ID.
+  RandomNumberGenerator(StringRef Salt);
+
+  generator_type Generator;
+
+  // Noncopyable.
+  RandomNumberGenerator(const RandomNumberGenerator &other) = delete;
+  RandomNumberGenerator &operator=(const RandomNumberGenerator &other) = delete;
+
+  friend class Module;
+};
+
+// Get random vector of specified size
+std::error_code getRandomBytes(void *Buffer, size_t Size);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Recycler.h b/linux-x64/clang/include/llvm/Support/Recycler.h
new file mode 100644
index 0000000..53db2e8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Recycler.h
@@ -0,0 +1,116 @@
+//==- llvm/Support/Recycler.h - Recycling Allocator --------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Recycler class template.  See the doxygen comment for
+// Recycler for more details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RECYCLER_H
+#define LLVM_SUPPORT_RECYCLER_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace llvm {
+
+/// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for
+/// printing statistics.
+///
+void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize);
+
+/// Recycler - This class manages a linked-list of deallocated nodes
+/// and facilitates reusing deallocated memory in place of allocating
+/// new memory.
+///
+template <class T, size_t Size = sizeof(T), size_t Align = alignof(T)>
+class Recycler {
+  struct FreeNode {
+    FreeNode *Next;
+  };
+
+  /// List of nodes that have deleted contents and are not in active use.
+  FreeNode *FreeList = nullptr;
+
+  FreeNode *pop_val() {
+    auto *Val = FreeList;
+    __asan_unpoison_memory_region(Val, Size);
+    FreeList = FreeList->Next;
+    __msan_allocated_memory(Val, Size);
+    return Val;
+  }
+
+  void push(FreeNode *N) {
+    N->Next = FreeList;
+    FreeList = N;
+    __asan_poison_memory_region(N, Size);
+  }
+
+public:
+  ~Recycler() {
+    // If this fails, either the callee has lost track of some allocation,
+    // or the callee isn't tracking allocations and should just call
+    // clear() before deleting the Recycler.
+    assert(!FreeList && "Non-empty recycler deleted!");
+  }
+
+  /// clear - Release all the tracked allocations to the allocator. The
+  /// recycler must be free of any tracked allocations before being
+  /// deleted; calling clear is one way to ensure this.
+  template<class AllocatorType>
+  void clear(AllocatorType &Allocator) {
+    while (FreeList) {
+      T *t = reinterpret_cast<T *>(pop_val());
+      Allocator.Deallocate(t);
+    }
+  }
+
+  /// Special case for BumpPtrAllocator which has an empty Deallocate()
+  /// function.
+  ///
+  /// There is no need to traverse the free list, pulling all the objects into
+  /// cache.
+  void clear(BumpPtrAllocator &) { FreeList = nullptr; }
+
+  template<class SubClass, class AllocatorType>
+  SubClass *Allocate(AllocatorType &Allocator) {
+    static_assert(alignof(SubClass) <= Align,
+                  "Recycler allocation alignment is less than object align!");
+    static_assert(sizeof(SubClass) <= Size,
+                  "Recycler allocation size is less than object size!");
+    return FreeList ? reinterpret_cast<SubClass *>(pop_val())
+                    : static_cast<SubClass *>(Allocator.Allocate(Size, Align));
+  }
+
+  template<class AllocatorType>
+  T *Allocate(AllocatorType &Allocator) {
+    return Allocate<T>(Allocator);
+  }
+
+  template<class SubClass, class AllocatorType>
+  void Deallocate(AllocatorType & /*Allocator*/, SubClass* Element) {
+    push(reinterpret_cast<FreeNode *>(Element));
+  }
+
+  void PrintStats();
+};
+
+template <class T, size_t Size, size_t Align>
+void Recycler<T, Size, Align>::PrintStats() {
+  size_t S = 0;
+  for (auto *I = FreeList; I; I = I->Next)
+    ++S;
+  PrintRecyclerStats(Size, Align, S);
+}
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/RecyclingAllocator.h b/linux-x64/clang/include/llvm/Support/RecyclingAllocator.h
new file mode 100644
index 0000000..32b033b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/RecyclingAllocator.h
@@ -0,0 +1,77 @@
+//==- llvm/Support/RecyclingAllocator.h - Recycling Allocator ----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecyclingAllocator class.  See the doxygen comment for
+// RecyclingAllocator for more details on the implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RECYCLINGALLOCATOR_H
+#define LLVM_SUPPORT_RECYCLINGALLOCATOR_H
+
+#include "llvm/Support/Recycler.h"
+
+namespace llvm {
+
+/// RecyclingAllocator - This class wraps an Allocator, adding the
+/// functionality of recycling deleted objects.
+///
+template <class AllocatorType, class T, size_t Size = sizeof(T),
+          size_t Align = alignof(T)>
+class RecyclingAllocator {
+private:
+  /// Base - Implementation details.
+  ///
+  Recycler<T, Size, Align> Base;
+
+  /// Allocator - The wrapped allocator.
+  ///
+  AllocatorType Allocator;
+
+public:
+  ~RecyclingAllocator() { Base.clear(Allocator); }
+
+  /// Allocate - Return a pointer to storage for an object of type
+  /// SubClass. The storage may be either newly allocated or recycled.
+  ///
+  template<class SubClass>
+  SubClass *Allocate() { return Base.template Allocate<SubClass>(Allocator); }
+
+  T *Allocate() { return Base.Allocate(Allocator); }
+
+  /// Deallocate - Release storage for the pointed-to object. The
+  /// storage will be kept track of and may be recycled.
+  ///
+  template<class SubClass>
+  void Deallocate(SubClass* E) { return Base.Deallocate(Allocator, E); }
+
+  void PrintStats() {
+    Allocator.PrintStats();
+    Base.PrintStats();
+  }
+};
+
+}
+
+template<class AllocatorType, class T, size_t Size, size_t Align>
+inline void *operator new(size_t size,
+                          llvm::RecyclingAllocator<AllocatorType,
+                                                   T, Size, Align> &Allocator) {
+  assert(size <= Size && "allocation size exceeded");
+  return Allocator.Allocate();
+}
+
+template<class AllocatorType, class T, size_t Size, size_t Align>
+inline void operator delete(void *E,
+                            llvm::RecyclingAllocator<AllocatorType,
+                                                     T, Size, Align> &A) {
+  A.Deallocate(E);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Regex.h b/linux-x64/clang/include/llvm/Support/Regex.h
new file mode 100644
index 0000000..f498835
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Regex.h
@@ -0,0 +1,102 @@
+//===-- Regex.h - Regular Expression matcher implementation -*- C++ -*-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a POSIX regular expression matcher.  Both Basic and
+// Extended POSIX regular expressions (ERE) are supported.  EREs were extended
+// to support backreferences in matches.
+// This implementation also supports matching strings with embedded NUL chars.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_REGEX_H
+#define LLVM_SUPPORT_REGEX_H
+
+#include <string>
+
+struct llvm_regex;
+
+namespace llvm {
+  class StringRef;
+  template<typename T> class SmallVectorImpl;
+
+  class Regex {
+  public:
+    enum {
+      NoFlags=0,
+      /// Compile for matching that ignores upper/lower case distinctions.
+      IgnoreCase=1,
+      /// Compile for newline-sensitive matching. With this flag '[^' bracket
+      /// expressions and '.' never match newline. A ^ anchor matches the
+      /// null string after any newline in the string in addition to its normal
+      /// function, and the $ anchor matches the null string before any
+      /// newline in the string in addition to its normal function.
+      Newline=2,
+      /// By default, the POSIX extended regular expression (ERE) syntax is
+      /// assumed. Pass this flag to turn on basic regular expressions (BRE)
+      /// instead.
+      BasicRegex=4
+    };
+
+    Regex();
+    /// Compiles the given regular expression \p Regex.
+    Regex(StringRef Regex, unsigned Flags = NoFlags);
+    Regex(const Regex &) = delete;
+    Regex &operator=(Regex regex) {
+      std::swap(preg, regex.preg);
+      std::swap(error, regex.error);
+      return *this;
+    }
+    Regex(Regex &&regex);
+    ~Regex();
+
+    /// isValid - returns the error encountered during regex compilation, or
+    /// matching, if any.
+    bool isValid(std::string &Error) const;
+
+    /// getNumMatches - In a valid regex, return the number of parenthesized
+    /// matches it contains.  The number filled in by match will include this
+    /// many entries plus one for the whole regex (as element 0).
+    unsigned getNumMatches() const;
+
+    /// matches - Match the regex against a given \p String.
+    ///
+    /// \param Matches - If given, on a successful match this will be filled in
+    /// with references to the matched group expressions (inside \p String),
+    /// the first group is always the entire pattern.
+    ///
+    /// This returns true on a successful match.
+    bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = nullptr);
+
+    /// sub - Return the result of replacing the first match of the regex in
+    /// \p String with the \p Repl string. Backreferences like "\0" in the
+    /// replacement string are replaced with the appropriate match substring.
+    ///
+    /// Note that the replacement string has backslash escaping performed on
+    /// it. Invalid backreferences are ignored (replaced by empty strings).
+    ///
+    /// \param Error If non-null, any errors in the substitution (invalid
+    /// backreferences, trailing backslashes) will be recorded as a non-empty
+    /// string.
+    std::string sub(StringRef Repl, StringRef String,
+                    std::string *Error = nullptr);
+
+    /// \brief If this function returns true, ^Str$ is an extended regular
+    /// expression that matches Str and only Str.
+    static bool isLiteralERE(StringRef Str);
+
+    /// \brief Turn String into a regex by escaping its special characters.
+    static std::string escape(StringRef String);
+
+  private:
+    struct llvm_regex *preg;
+    int error;
+  };
+}
+
+#endif // LLVM_SUPPORT_REGEX_H
diff --git a/linux-x64/clang/include/llvm/Support/Registry.h b/linux-x64/clang/include/llvm/Support/Registry.h
new file mode 100644
index 0000000..02fd5b9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Registry.h
@@ -0,0 +1,160 @@
+//=== Registry.h - Linker-supported plugin registries -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a registry template for discovering pluggable modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_REGISTRY_H
+#define LLVM_SUPPORT_REGISTRY_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include <memory>
+
+namespace llvm {
+  /// A simple registry entry which provides only a name, description, and
+  /// no-argument constructor.
+  template <typename T>
+  class SimpleRegistryEntry {
+    StringRef Name, Desc;
+    std::unique_ptr<T> (*Ctor)();
+
+  public:
+    SimpleRegistryEntry(StringRef N, StringRef D, std::unique_ptr<T> (*C)())
+        : Name(N), Desc(D), Ctor(C) {}
+
+    StringRef getName() const { return Name; }
+    StringRef getDesc() const { return Desc; }
+    std::unique_ptr<T> instantiate() const { return Ctor(); }
+  };
+
+  /// A global registry used in conjunction with static constructors to make
+  /// pluggable components (like targets or garbage collectors) "just work" when
+  /// linked with an executable.
+  template <typename T>
+  class Registry {
+  public:
+    typedef T type;
+    typedef SimpleRegistryEntry<T> entry;
+
+    class node;
+    class iterator;
+
+  private:
+    Registry() = delete;
+
+    friend class node;
+    static node *Head, *Tail;
+
+  public:
+    /// Node in linked list of entries.
+    ///
+    class node {
+      friend class iterator;
+      friend Registry<T>;
+
+      node *Next;
+      const entry& Val;
+
+    public:
+      node(const entry &V) : Next(nullptr), Val(V) {}
+    };
+
+    /// Add a node to the Registry: this is the interface between the plugin and
+    /// the executable.
+    ///
+    /// This function is exported by the executable and called by the plugin to
+    /// add a node to the executable's registry. Therefore it's not defined here
+    /// to avoid it being instantiated in the plugin and is instead defined in
+    /// the executable (see LLVM_INSTANTIATE_REGISTRY below).
+    static void add_node(node *N);
+
+    /// Iterators for registry entries.
+    ///
+    class iterator {
+      const node *Cur;
+
+    public:
+      explicit iterator(const node *N) : Cur(N) {}
+
+      bool operator==(const iterator &That) const { return Cur == That.Cur; }
+      bool operator!=(const iterator &That) const { return Cur != That.Cur; }
+      iterator &operator++() { Cur = Cur->Next; return *this; }
+      const entry &operator*() const { return Cur->Val; }
+      const entry *operator->() const { return &Cur->Val; }
+    };
+
+    // begin is not defined here in order to avoid usage of an undefined static
+    // data member, instead it's instantiated by LLVM_INSTANTIATE_REGISTRY.
+    static iterator begin();
+    static iterator end()   { return iterator(nullptr); }
+
+    static iterator_range<iterator> entries() {
+      return make_range(begin(), end());
+    }
+
+    /// A static registration template. Use like such:
+    ///
+    ///   Registry<Collector>::Add<FancyGC>
+    ///   X("fancy-gc", "Newfangled garbage collector.");
+    ///
+    /// Use of this template requires that:
+    ///
+    ///  1. The registered subclass has a default constructor.
+    template <typename V>
+    class Add {
+      entry Entry;
+      node Node;
+
+      static std::unique_ptr<T> CtorFn() { return make_unique<V>(); }
+
+    public:
+      Add(StringRef Name, StringRef Desc)
+          : Entry(Name, Desc, CtorFn), Node(Entry) {
+        add_node(&Node);
+      }
+    };
+  };
+} // end namespace llvm
+
+/// Instantiate a registry class.
+///
+/// This provides template definitions of add_node, begin, and the Head and Tail
+/// pointers, then explicitly instantiates them. We could explicitly specialize
+/// them, instead of the two-step process of define then instantiate, but
+/// strictly speaking that's not allowed by the C++ standard (we would need to
+/// have explicit specialization declarations in all translation units where the
+/// specialization is used) so we don't.
+#define LLVM_INSTANTIATE_REGISTRY(REGISTRY_CLASS) \
+  namespace llvm { \
+  template<typename T> typename Registry<T>::node *Registry<T>::Head = nullptr;\
+  template<typename T> typename Registry<T>::node *Registry<T>::Tail = nullptr;\
+  template<typename T> \
+  void Registry<T>::add_node(typename Registry<T>::node *N) { \
+    if (Tail) \
+      Tail->Next = N; \
+    else \
+      Head = N; \
+    Tail = N; \
+  } \
+  template<typename T> typename Registry<T>::iterator Registry<T>::begin() { \
+    return iterator(Head); \
+  } \
+  template REGISTRY_CLASS::node *Registry<REGISTRY_CLASS::type>::Head; \
+  template REGISTRY_CLASS::node *Registry<REGISTRY_CLASS::type>::Tail; \
+  template \
+  void Registry<REGISTRY_CLASS::type>::add_node(REGISTRY_CLASS::node*); \
+  template REGISTRY_CLASS::iterator Registry<REGISTRY_CLASS::type>::begin(); \
+  }
+
+#endif // LLVM_SUPPORT_REGISTRY_H
diff --git a/linux-x64/clang/include/llvm/Support/ReverseIteration.h b/linux-x64/clang/include/llvm/Support/ReverseIteration.h
new file mode 100644
index 0000000..5e0238d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ReverseIteration.h
@@ -0,0 +1,19 @@
+#ifndef LLVM_SUPPORT_REVERSEITERATION_H
+#define LLVM_SUPPORT_REVERSEITERATION_H
+
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+
+namespace llvm {
+
+template<class T = void *>
+bool shouldReverseIterate() {
+#if LLVM_ENABLE_REVERSE_ITERATION
+  return detail::IsPointerLike<T>::value;
+#else
+  return false;
+#endif
+}
+
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/SHA1.h b/linux-x64/clang/include/llvm/Support/SHA1.h
new file mode 100644
index 0000000..1fc60a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SHA1.h
@@ -0,0 +1,89 @@
+//==- SHA1.h - SHA1 implementation for LLVM                     --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This code is taken from public domain
+// (http://oauth.googlecode.com/svn/code/c/liboauth/src/sha1.c)
+// and modified by wrapping it in a C++ interface for LLVM,
+// and removing unnecessary code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SHA1_H
+#define LLVM_SUPPORT_SHA1_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+#include <array>
+#include <cstdint>
+
+namespace llvm {
+template <typename T> class ArrayRef;
+class StringRef;
+
+/// A class that wrap the SHA1 algorithm.
+class SHA1 {
+public:
+  SHA1() { init(); }
+
+  /// Reinitialize the internal state
+  void init();
+
+  /// Digest more data.
+  void update(ArrayRef<uint8_t> Data);
+
+  /// Digest more data.
+  void update(StringRef Str) {
+    update(ArrayRef<uint8_t>((uint8_t *)const_cast<char *>(Str.data()),
+                             Str.size()));
+  }
+
+  /// Return a reference to the current raw 160-bits SHA1 for the digested data
+  /// since the last call to init(). This call will add data to the internal
+  /// state and as such is not suited for getting an intermediate result
+  /// (see result()).
+  StringRef final();
+
+  /// Return a reference to the current raw 160-bits SHA1 for the digested data
+  /// since the last call to init(). This is suitable for getting the SHA1 at
+  /// any time without invalidating the internal state so that more calls can be
+  /// made into update.
+  StringRef result();
+
+  /// Returns a raw 160-bit SHA1 hash for the given data.
+  static std::array<uint8_t, 20> hash(ArrayRef<uint8_t> Data);
+
+private:
+  /// Define some constants.
+  /// "static constexpr" would be cleaner but MSVC does not support it yet.
+  enum { BLOCK_LENGTH = 64 };
+  enum { HASH_LENGTH = 20 };
+
+  // Internal State
+  struct {
+    union {
+      uint8_t C[BLOCK_LENGTH];
+      uint32_t L[BLOCK_LENGTH / 4];
+    } Buffer;
+    uint32_t State[HASH_LENGTH / 4];
+    uint32_t ByteCount;
+    uint8_t BufferOffset;
+  } InternalState;
+
+  // Internal copy of the hash, populated and accessed on calls to result()
+  uint32_t HashResult[HASH_LENGTH / 4];
+
+  // Helper
+  void writebyte(uint8_t data);
+  void hashBlock();
+  void addUncounted(uint8_t data);
+  void pad();
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/SMLoc.h b/linux-x64/clang/include/llvm/Support/SMLoc.h
new file mode 100644
index 0000000..5b8be55
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SMLoc.h
@@ -0,0 +1,65 @@
+//===- SMLoc.h - Source location for use with diagnostics -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SMLoc class.  This class encapsulates a location in
+// source code for use in diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SMLOC_H
+#define LLVM_SUPPORT_SMLOC_H
+
+#include "llvm/ADT/None.h"
+#include <cassert>
+
+namespace llvm {
+
+/// Represents a location in source code.
+class SMLoc {
+  const char *Ptr = nullptr;
+
+public:
+  SMLoc() = default;
+
+  bool isValid() const { return Ptr != nullptr; }
+
+  bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; }
+  bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; }
+
+  const char *getPointer() const { return Ptr; }
+
+  static SMLoc getFromPointer(const char *Ptr) {
+    SMLoc L;
+    L.Ptr = Ptr;
+    return L;
+  }
+};
+
+/// Represents a range in source code.
+///
+/// SMRange is implemented using a half-open range, as is the convention in C++.
+/// In the string "abc", the range (1,3] represents the substring "bc", and the
+/// range (2,2] represents an empty range between the characters "b" and "c".
+class SMRange {
+public:
+  SMLoc Start, End;
+
+  SMRange() = default;
+  SMRange(NoneType) {}
+  SMRange(SMLoc St, SMLoc En) : Start(St), End(En) {
+    assert(Start.isValid() == End.isValid() &&
+           "Start and end should either both be valid or both be invalid!");
+  }
+
+  bool isValid() const { return Start.isValid(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_SMLOC_H
diff --git a/linux-x64/clang/include/llvm/Support/SaveAndRestore.h b/linux-x64/clang/include/llvm/Support/SaveAndRestore.h
new file mode 100644
index 0000000..ef154ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SaveAndRestore.h
@@ -0,0 +1,49 @@
+//===-- SaveAndRestore.h - Utility  -------------------------------*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides utility classes that use RAII to save and restore
+/// values.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SAVEANDRESTORE_H
+#define LLVM_SUPPORT_SAVEANDRESTORE_H
+
+namespace llvm {
+
+/// A utility class that uses RAII to save and restore the value of a variable.
+template <typename T> struct SaveAndRestore {
+  SaveAndRestore(T &X) : X(X), OldValue(X) {}
+  SaveAndRestore(T &X, const T &NewValue) : X(X), OldValue(X) {
+    X = NewValue;
+  }
+  ~SaveAndRestore() { X = OldValue; }
+  T get() { return OldValue; }
+
+private:
+  T &X;
+  T OldValue;
+};
+
+/// Similar to \c SaveAndRestore.  Operates only on bools; the old value of a
+/// variable is saved, and during the dstor the old value is or'ed with the new
+/// value.
+struct SaveOr {
+  SaveOr(bool &X) : X(X), OldValue(X) { X = false; }
+  ~SaveOr() { X |= OldValue; }
+
+private:
+  bool &X;
+  const bool OldValue;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ScaledNumber.h b/linux-x64/clang/include/llvm/Support/ScaledNumber.h
new file mode 100644
index 0000000..cfbdbc7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ScaledNumber.h
@@ -0,0 +1,897 @@
+//===- llvm/Support/ScaledNumber.h - Support for scaled numbers -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions (and a class) useful for working with scaled
+// numbers -- in particular, pairs of integers where one represents digits and
+// another represents a scale.  The functions are helpers and live in the
+// namespace ScaledNumbers.  The class ScaledNumber is useful for modelling
+// certain cost metrics that need simple, integer-like semantics that are easy
+// to reason about.
+//
+// These might remind you of soft-floats.  If you want one of those, you're in
+// the wrong place.  Look at include/llvm/ADT/APFloat.h instead.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SCALEDNUMBER_H
+#define LLVM_SUPPORT_SCALEDNUMBER_H
+
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+namespace ScaledNumbers {
+
+/// \brief Maximum scale; same as APFloat for easy debug printing.
+const int32_t MaxScale = 16383;
+
+/// \brief Maximum scale; same as APFloat for easy debug printing.
+const int32_t MinScale = -16382;
+
+/// \brief Get the width of a number.
+template <class DigitsT> inline int getWidth() { return sizeof(DigitsT) * 8; }
+
+/// \brief Conditionally round up a scaled number.
+///
+/// Given \c Digits and \c Scale, round up iff \c ShouldRound is \c true.
+/// Always returns \c Scale unless there's an overflow, in which case it
+/// returns \c 1+Scale.
+///
+/// \pre adding 1 to \c Scale will not overflow INT16_MAX.
+template <class DigitsT>
+inline std::pair<DigitsT, int16_t> getRounded(DigitsT Digits, int16_t Scale,
+                                              bool ShouldRound) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  if (ShouldRound)
+    if (!++Digits)
+      // Overflow.
+      return std::make_pair(DigitsT(1) << (getWidth<DigitsT>() - 1), Scale + 1);
+  return std::make_pair(Digits, Scale);
+}
+
+/// \brief Convenience helper for 32-bit rounding.
+inline std::pair<uint32_t, int16_t> getRounded32(uint32_t Digits, int16_t Scale,
+                                                 bool ShouldRound) {
+  return getRounded(Digits, Scale, ShouldRound);
+}
+
+/// \brief Convenience helper for 64-bit rounding.
+inline std::pair<uint64_t, int16_t> getRounded64(uint64_t Digits, int16_t Scale,
+                                                 bool ShouldRound) {
+  return getRounded(Digits, Scale, ShouldRound);
+}
+
+/// \brief Adjust a 64-bit scaled number down to the appropriate width.
+///
+/// \pre Adding 64 to \c Scale will not overflow INT16_MAX.
+template <class DigitsT>
+inline std::pair<DigitsT, int16_t> getAdjusted(uint64_t Digits,
+                                               int16_t Scale = 0) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  const int Width = getWidth<DigitsT>();
+  if (Width == 64 || Digits <= std::numeric_limits<DigitsT>::max())
+    return std::make_pair(Digits, Scale);
+
+  // Shift right and round.
+  int Shift = 64 - Width - countLeadingZeros(Digits);
+  return getRounded<DigitsT>(Digits >> Shift, Scale + Shift,
+                             Digits & (UINT64_C(1) << (Shift - 1)));
+}
+
+/// \brief Convenience helper for adjusting to 32 bits.
+inline std::pair<uint32_t, int16_t> getAdjusted32(uint64_t Digits,
+                                                  int16_t Scale = 0) {
+  return getAdjusted<uint32_t>(Digits, Scale);
+}
+
+/// \brief Convenience helper for adjusting to 64 bits.
+inline std::pair<uint64_t, int16_t> getAdjusted64(uint64_t Digits,
+                                                  int16_t Scale = 0) {
+  return getAdjusted<uint64_t>(Digits, Scale);
+}
+
+/// \brief Multiply two 64-bit integers to create a 64-bit scaled number.
+///
+/// Implemented with four 64-bit integer multiplies.
+std::pair<uint64_t, int16_t> multiply64(uint64_t LHS, uint64_t RHS);
+
+/// \brief Multiply two 32-bit integers to create a 32-bit scaled number.
+///
+/// Implemented with one 64-bit integer multiply.
+template <class DigitsT>
+inline std::pair<DigitsT, int16_t> getProduct(DigitsT LHS, DigitsT RHS) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  if (getWidth<DigitsT>() <= 32 || (LHS <= UINT32_MAX && RHS <= UINT32_MAX))
+    return getAdjusted<DigitsT>(uint64_t(LHS) * RHS);
+
+  return multiply64(LHS, RHS);
+}
+
+/// \brief Convenience helper for 32-bit product.
+inline std::pair<uint32_t, int16_t> getProduct32(uint32_t LHS, uint32_t RHS) {
+  return getProduct(LHS, RHS);
+}
+
+/// \brief Convenience helper for 64-bit product.
+inline std::pair<uint64_t, int16_t> getProduct64(uint64_t LHS, uint64_t RHS) {
+  return getProduct(LHS, RHS);
+}
+
+/// \brief Divide two 64-bit integers to create a 64-bit scaled number.
+///
+/// Implemented with long division.
+///
+/// \pre \c Dividend and \c Divisor are non-zero.
+std::pair<uint64_t, int16_t> divide64(uint64_t Dividend, uint64_t Divisor);
+
+/// \brief Divide two 32-bit integers to create a 32-bit scaled number.
+///
+/// Implemented with one 64-bit integer divide/remainder pair.
+///
+/// \pre \c Dividend and \c Divisor are non-zero.
+std::pair<uint32_t, int16_t> divide32(uint32_t Dividend, uint32_t Divisor);
+
+/// \brief Divide two 32-bit numbers to create a 32-bit scaled number.
+///
+/// Implemented with one 64-bit integer divide/remainder pair.
+///
+/// Returns \c (DigitsT_MAX, MaxScale) for divide-by-zero (0 for 0/0).
+template <class DigitsT>
+std::pair<DigitsT, int16_t> getQuotient(DigitsT Dividend, DigitsT Divisor) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+  static_assert(sizeof(DigitsT) == 4 || sizeof(DigitsT) == 8,
+                "expected 32-bit or 64-bit digits");
+
+  // Check for zero.
+  if (!Dividend)
+    return std::make_pair(0, 0);
+  if (!Divisor)
+    return std::make_pair(std::numeric_limits<DigitsT>::max(), MaxScale);
+
+  if (getWidth<DigitsT>() == 64)
+    return divide64(Dividend, Divisor);
+  return divide32(Dividend, Divisor);
+}
+
+/// \brief Convenience helper for 32-bit quotient.
+inline std::pair<uint32_t, int16_t> getQuotient32(uint32_t Dividend,
+                                                  uint32_t Divisor) {
+  return getQuotient(Dividend, Divisor);
+}
+
+/// \brief Convenience helper for 64-bit quotient.
+inline std::pair<uint64_t, int16_t> getQuotient64(uint64_t Dividend,
+                                                  uint64_t Divisor) {
+  return getQuotient(Dividend, Divisor);
+}
+
+/// \brief Implementation of getLg() and friends.
+///
+/// Returns the rounded lg of \c Digits*2^Scale and an int specifying whether
+/// this was rounded up (1), down (-1), or exact (0).
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT>
+inline std::pair<int32_t, int> getLgImpl(DigitsT Digits, int16_t Scale) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  if (!Digits)
+    return std::make_pair(INT32_MIN, 0);
+
+  // Get the floor of the lg of Digits.
+  int32_t LocalFloor = sizeof(Digits) * 8 - countLeadingZeros(Digits) - 1;
+
+  // Get the actual floor.
+  int32_t Floor = Scale + LocalFloor;
+  if (Digits == UINT64_C(1) << LocalFloor)
+    return std::make_pair(Floor, 0);
+
+  // Round based on the next digit.
+  assert(LocalFloor >= 1);
+  bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
+  return std::make_pair(Floor + Round, Round ? 1 : -1);
+}
+
+/// \brief Get the lg (rounded) of a scaled number.
+///
+/// Get the lg of \c Digits*2^Scale.
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT> int32_t getLg(DigitsT Digits, int16_t Scale) {
+  return getLgImpl(Digits, Scale).first;
+}
+
+/// \brief Get the lg floor of a scaled number.
+///
+/// Get the floor of the lg of \c Digits*2^Scale.
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT> int32_t getLgFloor(DigitsT Digits, int16_t Scale) {
+  auto Lg = getLgImpl(Digits, Scale);
+  return Lg.first - (Lg.second > 0);
+}
+
+/// \brief Get the lg ceiling of a scaled number.
+///
+/// Get the ceiling of the lg of \c Digits*2^Scale.
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT> int32_t getLgCeiling(DigitsT Digits, int16_t Scale) {
+  auto Lg = getLgImpl(Digits, Scale);
+  return Lg.first + (Lg.second < 0);
+}
+
+/// \brief Implementation for comparing scaled numbers.
+///
+/// Compare two 64-bit numbers with different scales.  Given that the scale of
+/// \c L is higher than that of \c R by \c ScaleDiff, compare them.  Return -1,
+/// 1, and 0 for less than, greater than, and equal, respectively.
+///
+/// \pre 0 <= ScaleDiff < 64.
+int compareImpl(uint64_t L, uint64_t R, int ScaleDiff);
+
+/// \brief Compare two scaled numbers.
+///
+/// Compare two scaled numbers.  Returns 0 for equal, -1 for less than, and 1
+/// for greater than.
+template <class DigitsT>
+int compare(DigitsT LDigits, int16_t LScale, DigitsT RDigits, int16_t RScale) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  // Check for zero.
+  if (!LDigits)
+    return RDigits ? -1 : 0;
+  if (!RDigits)
+    return 1;
+
+  // Check for the scale.  Use getLgFloor to be sure that the scale difference
+  // is always lower than 64.
+  int32_t lgL = getLgFloor(LDigits, LScale), lgR = getLgFloor(RDigits, RScale);
+  if (lgL != lgR)
+    return lgL < lgR ? -1 : 1;
+
+  // Compare digits.
+  if (LScale < RScale)
+    return compareImpl(LDigits, RDigits, RScale - LScale);
+
+  return -compareImpl(RDigits, LDigits, LScale - RScale);
+}
+
+/// \brief Match scales of two numbers.
+///
+/// Given two scaled numbers, match up their scales.  Change the digits and
+/// scales in place.  Shift the digits as necessary to form equivalent numbers,
+/// losing precision only when necessary.
+///
+/// If the output value of \c LDigits (\c RDigits) is \c 0, the output value of
+/// \c LScale (\c RScale) is unspecified.
+///
+/// As a convenience, returns the matching scale.  If the output value of one
+/// number is zero, returns the scale of the other.  If both are zero, which
+/// scale is returned is unspecified.
+template <class DigitsT>
+int16_t matchScales(DigitsT &LDigits, int16_t &LScale, DigitsT &RDigits,
+                    int16_t &RScale) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  if (LScale < RScale)
+    // Swap arguments.
+    return matchScales(RDigits, RScale, LDigits, LScale);
+  if (!LDigits)
+    return RScale;
+  if (!RDigits || LScale == RScale)
+    return LScale;
+
+  // Now LScale > RScale.  Get the difference.
+  int32_t ScaleDiff = int32_t(LScale) - RScale;
+  if (ScaleDiff >= 2 * getWidth<DigitsT>()) {
+    // Don't bother shifting.  RDigits will get zero-ed out anyway.
+    RDigits = 0;
+    return LScale;
+  }
+
+  // Shift LDigits left as much as possible, then shift RDigits right.
+  int32_t ShiftL = std::min<int32_t>(countLeadingZeros(LDigits), ScaleDiff);
+  assert(ShiftL < getWidth<DigitsT>() && "can't shift more than width");
+
+  int32_t ShiftR = ScaleDiff - ShiftL;
+  if (ShiftR >= getWidth<DigitsT>()) {
+    // Don't bother shifting.  RDigits will get zero-ed out anyway.
+    RDigits = 0;
+    return LScale;
+  }
+
+  LDigits <<= ShiftL;
+  RDigits >>= ShiftR;
+
+  LScale -= ShiftL;
+  RScale += ShiftR;
+  assert(LScale == RScale && "scales should match");
+  return LScale;
+}
+
+/// \brief Get the sum of two scaled numbers.
+///
+/// Get the sum of two scaled numbers with as much precision as possible.
+///
+/// \pre Adding 1 to \c LScale (or \c RScale) will not overflow INT16_MAX.
+template <class DigitsT>
+std::pair<DigitsT, int16_t> getSum(DigitsT LDigits, int16_t LScale,
+                                   DigitsT RDigits, int16_t RScale) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  // Check inputs up front.  This is only relevant if addition overflows, but
+  // testing here should catch more bugs.
+  assert(LScale < INT16_MAX && "scale too large");
+  assert(RScale < INT16_MAX && "scale too large");
+
+  // Normalize digits to match scales.
+  int16_t Scale = matchScales(LDigits, LScale, RDigits, RScale);
+
+  // Compute sum.
+  DigitsT Sum = LDigits + RDigits;
+  if (Sum >= RDigits)
+    return std::make_pair(Sum, Scale);
+
+  // Adjust sum after arithmetic overflow.
+  DigitsT HighBit = DigitsT(1) << (getWidth<DigitsT>() - 1);
+  return std::make_pair(HighBit | Sum >> 1, Scale + 1);
+}
+
+/// \brief Convenience helper for 32-bit sum.
+inline std::pair<uint32_t, int16_t> getSum32(uint32_t LDigits, int16_t LScale,
+                                             uint32_t RDigits, int16_t RScale) {
+  return getSum(LDigits, LScale, RDigits, RScale);
+}
+
+/// \brief Convenience helper for 64-bit sum.
+inline std::pair<uint64_t, int16_t> getSum64(uint64_t LDigits, int16_t LScale,
+                                             uint64_t RDigits, int16_t RScale) {
+  return getSum(LDigits, LScale, RDigits, RScale);
+}
+
+/// \brief Get the difference of two scaled numbers.
+///
+/// Get LHS minus RHS with as much precision as possible.
+///
+/// Returns \c (0, 0) if the RHS is larger than the LHS.
+template <class DigitsT>
+std::pair<DigitsT, int16_t> getDifference(DigitsT LDigits, int16_t LScale,
+                                          DigitsT RDigits, int16_t RScale) {
+  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+  // Normalize digits to match scales.
+  const DigitsT SavedRDigits = RDigits;
+  const int16_t SavedRScale = RScale;
+  matchScales(LDigits, LScale, RDigits, RScale);
+
+  // Compute difference.
+  if (LDigits <= RDigits)
+    return std::make_pair(0, 0);
+  if (RDigits || !SavedRDigits)
+    return std::make_pair(LDigits - RDigits, LScale);
+
+  // Check if RDigits just barely lost its last bit.  E.g., for 32-bit:
+  //
+  //   1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
+  const auto RLgFloor = getLgFloor(SavedRDigits, SavedRScale);
+  if (!compare(LDigits, LScale, DigitsT(1), RLgFloor + getWidth<DigitsT>()))
+    return std::make_pair(std::numeric_limits<DigitsT>::max(), RLgFloor);
+
+  return std::make_pair(LDigits, LScale);
+}
+
+/// \brief Convenience helper for 32-bit difference.
+inline std::pair<uint32_t, int16_t> getDifference32(uint32_t LDigits,
+                                                    int16_t LScale,
+                                                    uint32_t RDigits,
+                                                    int16_t RScale) {
+  return getDifference(LDigits, LScale, RDigits, RScale);
+}
+
+/// \brief Convenience helper for 64-bit difference.
+inline std::pair<uint64_t, int16_t> getDifference64(uint64_t LDigits,
+                                                    int16_t LScale,
+                                                    uint64_t RDigits,
+                                                    int16_t RScale) {
+  return getDifference(LDigits, LScale, RDigits, RScale);
+}
+
+} // end namespace ScaledNumbers
+} // end namespace llvm
+
+namespace llvm {
+
+class raw_ostream;
+class ScaledNumberBase {
+public:
+  static const int DefaultPrecision = 10;
+
+  static void dump(uint64_t D, int16_t E, int Width);
+  static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
+                            unsigned Precision);
+  static std::string toString(uint64_t D, int16_t E, int Width,
+                              unsigned Precision);
+  static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
+  static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
+  static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
+
+  static std::pair<uint64_t, bool> splitSigned(int64_t N) {
+    if (N >= 0)
+      return std::make_pair(N, false);
+    uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
+    return std::make_pair(Unsigned, true);
+  }
+  static int64_t joinSigned(uint64_t U, bool IsNeg) {
+    if (U > uint64_t(INT64_MAX))
+      return IsNeg ? INT64_MIN : INT64_MAX;
+    return IsNeg ? -int64_t(U) : int64_t(U);
+  }
+};
+
+/// \brief Simple representation of a scaled number.
+///
+/// ScaledNumber is a number represented by digits and a scale.  It uses simple
+/// saturation arithmetic and every operation is well-defined for every value.
+/// It's somewhat similar in behaviour to a soft-float, but is *not* a
+/// replacement for one.  If you're doing numerics, look at \a APFloat instead.
+/// Nevertheless, we've found these semantics useful for modelling certain cost
+/// metrics.
+///
+/// The number is split into a signed scale and unsigned digits.  The number
+/// represented is \c getDigits()*2^getScale().  In this way, the digits are
+/// much like the mantissa in the x87 long double, but there is no canonical
+/// form so the same number can be represented by many bit representations.
+///
+/// ScaledNumber is templated on the underlying integer type for digits, which
+/// is expected to be unsigned.
+///
+/// Unlike APFloat, ScaledNumber does not model architecture floating point
+/// behaviour -- while this might make it a little faster and easier to reason
+/// about, it certainly makes it more dangerous for general numerics.
+///
+/// ScaledNumber is totally ordered.  However, there is no canonical form, so
+/// there are multiple representations of most scalars.  E.g.:
+///
+///     ScaledNumber(8u, 0) == ScaledNumber(4u, 1)
+///     ScaledNumber(4u, 1) == ScaledNumber(2u, 2)
+///     ScaledNumber(2u, 2) == ScaledNumber(1u, 3)
+///
+/// ScaledNumber implements most arithmetic operations.  Precision is kept
+/// where possible.  Uses simple saturation arithmetic, so that operations
+/// saturate to 0.0 or getLargest() rather than under or overflowing.  It has
+/// some extra arithmetic for unit inversion.  0.0/0.0 is defined to be 0.0.
+/// Any other division by 0.0 is defined to be getLargest().
+///
+/// As a convenience for modifying the exponent, left and right shifting are
+/// both implemented, and both interpret negative shifts as positive shifts in
+/// the opposite direction.
+///
+/// Scales are limited to the range accepted by x87 long double.  This makes
+/// it trivial to add functionality to convert to APFloat (this is already
+/// relied on for the implementation of printing).
+///
+/// Possible (and conflicting) future directions:
+///
+///  1. Turn this into a wrapper around \a APFloat.
+///  2. Share the algorithm implementations with \a APFloat.
+///  3. Allow \a ScaledNumber to represent a signed number.
+template <class DigitsT> class ScaledNumber : ScaledNumberBase {
+public:
+  static_assert(!std::numeric_limits<DigitsT>::is_signed,
+                "only unsigned floats supported");
+
+  typedef DigitsT DigitsType;
+
+private:
+  typedef std::numeric_limits<DigitsType> DigitsLimits;
+
+  static const int Width = sizeof(DigitsType) * 8;
+  static_assert(Width <= 64, "invalid integer width for digits");
+
+private:
+  DigitsType Digits = 0;
+  int16_t Scale = 0;
+
+public:
+  ScaledNumber() = default;
+
+  constexpr ScaledNumber(DigitsType Digits, int16_t Scale)
+      : Digits(Digits), Scale(Scale) {}
+
+private:
+  ScaledNumber(const std::pair<DigitsT, int16_t> &X)
+      : Digits(X.first), Scale(X.second) {}
+
+public:
+  static ScaledNumber getZero() { return ScaledNumber(0, 0); }
+  static ScaledNumber getOne() { return ScaledNumber(1, 0); }
+  static ScaledNumber getLargest() {
+    return ScaledNumber(DigitsLimits::max(), ScaledNumbers::MaxScale);
+  }
+  static ScaledNumber get(uint64_t N) { return adjustToWidth(N, 0); }
+  static ScaledNumber getInverse(uint64_t N) {
+    return get(N).invert();
+  }
+  static ScaledNumber getFraction(DigitsType N, DigitsType D) {
+    return getQuotient(N, D);
+  }
+
+  int16_t getScale() const { return Scale; }
+  DigitsType getDigits() const { return Digits; }
+
+  /// \brief Convert to the given integer type.
+  ///
+  /// Convert to \c IntT using simple saturating arithmetic, truncating if
+  /// necessary.
+  template <class IntT> IntT toInt() const;
+
+  bool isZero() const { return !Digits; }
+  bool isLargest() const { return *this == getLargest(); }
+  bool isOne() const {
+    if (Scale > 0 || Scale <= -Width)
+      return false;
+    return Digits == DigitsType(1) << -Scale;
+  }
+
+  /// \brief The log base 2, rounded.
+  ///
+  /// Get the lg of the scalar.  lg 0 is defined to be INT32_MIN.
+  int32_t lg() const { return ScaledNumbers::getLg(Digits, Scale); }
+
+  /// \brief The log base 2, rounded towards INT32_MIN.
+  ///
+  /// Get the lg floor.  lg 0 is defined to be INT32_MIN.
+  int32_t lgFloor() const { return ScaledNumbers::getLgFloor(Digits, Scale); }
+
+  /// \brief The log base 2, rounded towards INT32_MAX.
+  ///
+  /// Get the lg ceiling.  lg 0 is defined to be INT32_MIN.
+  int32_t lgCeiling() const {
+    return ScaledNumbers::getLgCeiling(Digits, Scale);
+  }
+
+  bool operator==(const ScaledNumber &X) const { return compare(X) == 0; }
+  bool operator<(const ScaledNumber &X) const { return compare(X) < 0; }
+  bool operator!=(const ScaledNumber &X) const { return compare(X) != 0; }
+  bool operator>(const ScaledNumber &X) const { return compare(X) > 0; }
+  bool operator<=(const ScaledNumber &X) const { return compare(X) <= 0; }
+  bool operator>=(const ScaledNumber &X) const { return compare(X) >= 0; }
+
+  bool operator!() const { return isZero(); }
+
+  /// \brief Convert to a decimal representation in a string.
+  ///
+  /// Convert to a string.  Uses scientific notation for very large/small
+  /// numbers.  Scientific notation is used roughly for numbers outside of the
+  /// range 2^-64 through 2^64.
+  ///
+  /// \c Precision indicates the number of decimal digits of precision to use;
+  /// 0 requests the maximum available.
+  ///
+  /// As a special case to make debugging easier, if the number is small enough
+  /// to convert without scientific notation and has more than \c Precision
+  /// digits before the decimal place, it's printed accurately to the first
+  /// digit past zero.  E.g., assuming 10 digits of precision:
+  ///
+  ///     98765432198.7654... => 98765432198.8
+  ///      8765432198.7654... =>  8765432198.8
+  ///       765432198.7654... =>   765432198.8
+  ///        65432198.7654... =>    65432198.77
+  ///         5432198.7654... =>     5432198.765
+  std::string toString(unsigned Precision = DefaultPrecision) {
+    return ScaledNumberBase::toString(Digits, Scale, Width, Precision);
+  }
+
+  /// \brief Print a decimal representation.
+  ///
+  /// Print a string.  See toString for documentation.
+  raw_ostream &print(raw_ostream &OS,
+                     unsigned Precision = DefaultPrecision) const {
+    return ScaledNumberBase::print(OS, Digits, Scale, Width, Precision);
+  }
+  void dump() const { return ScaledNumberBase::dump(Digits, Scale, Width); }
+
+  ScaledNumber &operator+=(const ScaledNumber &X) {
+    std::tie(Digits, Scale) =
+        ScaledNumbers::getSum(Digits, Scale, X.Digits, X.Scale);
+    // Check for exponent past MaxScale.
+    if (Scale > ScaledNumbers::MaxScale)
+      *this = getLargest();
+    return *this;
+  }
+  ScaledNumber &operator-=(const ScaledNumber &X) {
+    std::tie(Digits, Scale) =
+        ScaledNumbers::getDifference(Digits, Scale, X.Digits, X.Scale);
+    return *this;
+  }
+  ScaledNumber &operator*=(const ScaledNumber &X);
+  ScaledNumber &operator/=(const ScaledNumber &X);
+  ScaledNumber &operator<<=(int16_t Shift) {
+    shiftLeft(Shift);
+    return *this;
+  }
+  ScaledNumber &operator>>=(int16_t Shift) {
+    shiftRight(Shift);
+    return *this;
+  }
+
+private:
+  void shiftLeft(int32_t Shift);
+  void shiftRight(int32_t Shift);
+
+  /// \brief Adjust two floats to have matching exponents.
+  ///
+  /// Adjust \c this and \c X to have matching exponents.  Returns the new \c X
+  /// by value.  Does nothing if \a isZero() for either.
+  ///
+  /// The value that compares smaller will lose precision, and possibly become
+  /// \a isZero().
+  ScaledNumber matchScales(ScaledNumber X) {
+    ScaledNumbers::matchScales(Digits, Scale, X.Digits, X.Scale);
+    return X;
+  }
+
+public:
+  /// \brief Scale a large number accurately.
+  ///
+  /// Scale N (multiply it by this).  Uses full precision multiplication, even
+  /// if Width is smaller than 64, so information is not lost.
+  uint64_t scale(uint64_t N) const;
+  uint64_t scaleByInverse(uint64_t N) const {
+    // TODO: implement directly, rather than relying on inverse.  Inverse is
+    // expensive.
+    return inverse().scale(N);
+  }
+  int64_t scale(int64_t N) const {
+    std::pair<uint64_t, bool> Unsigned = splitSigned(N);
+    return joinSigned(scale(Unsigned.first), Unsigned.second);
+  }
+  int64_t scaleByInverse(int64_t N) const {
+    std::pair<uint64_t, bool> Unsigned = splitSigned(N);
+    return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
+  }
+
+  int compare(const ScaledNumber &X) const {
+    return ScaledNumbers::compare(Digits, Scale, X.Digits, X.Scale);
+  }
+  int compareTo(uint64_t N) const {
+    return ScaledNumbers::compare<uint64_t>(Digits, Scale, N, 0);
+  }
+  int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
+
+  ScaledNumber &invert() { return *this = ScaledNumber::get(1) / *this; }
+  ScaledNumber inverse() const { return ScaledNumber(*this).invert(); }
+
+private:
+  static ScaledNumber getProduct(DigitsType LHS, DigitsType RHS) {
+    return ScaledNumbers::getProduct(LHS, RHS);
+  }
+  static ScaledNumber getQuotient(DigitsType Dividend, DigitsType Divisor) {
+    return ScaledNumbers::getQuotient(Dividend, Divisor);
+  }
+
+  static int countLeadingZerosWidth(DigitsType Digits) {
+    if (Width == 64)
+      return countLeadingZeros64(Digits);
+    if (Width == 32)
+      return countLeadingZeros32(Digits);
+    return countLeadingZeros32(Digits) + Width - 32;
+  }
+
+  /// \brief Adjust a number to width, rounding up if necessary.
+  ///
+  /// Should only be called for \c Shift close to zero.
+  ///
+  /// \pre Shift >= MinScale && Shift + 64 <= MaxScale.
+  static ScaledNumber adjustToWidth(uint64_t N, int32_t Shift) {
+    assert(Shift >= ScaledNumbers::MinScale && "Shift should be close to 0");
+    assert(Shift <= ScaledNumbers::MaxScale - 64 &&
+           "Shift should be close to 0");
+    auto Adjusted = ScaledNumbers::getAdjusted<DigitsT>(N, Shift);
+    return Adjusted;
+  }
+
+  static ScaledNumber getRounded(ScaledNumber P, bool Round) {
+    // Saturate.
+    if (P.isLargest())
+      return P;
+
+    return ScaledNumbers::getRounded(P.Digits, P.Scale, Round);
+  }
+};
+
+#define SCALED_NUMBER_BOP(op, base)                                            \
+  template <class DigitsT>                                                     \
+  ScaledNumber<DigitsT> operator op(const ScaledNumber<DigitsT> &L,            \
+                                    const ScaledNumber<DigitsT> &R) {          \
+    return ScaledNumber<DigitsT>(L) base R;                                    \
+  }
+SCALED_NUMBER_BOP(+, += )
+SCALED_NUMBER_BOP(-, -= )
+SCALED_NUMBER_BOP(*, *= )
+SCALED_NUMBER_BOP(/, /= )
+#undef SCALED_NUMBER_BOP
+
+template <class DigitsT>
+ScaledNumber<DigitsT> operator<<(const ScaledNumber<DigitsT> &L,
+                                 int16_t Shift) {
+  return ScaledNumber<DigitsT>(L) <<= Shift;
+}
+
+template <class DigitsT>
+ScaledNumber<DigitsT> operator>>(const ScaledNumber<DigitsT> &L,
+                                 int16_t Shift) {
+  return ScaledNumber<DigitsT>(L) >>= Shift;
+}
+
+template <class DigitsT>
+raw_ostream &operator<<(raw_ostream &OS, const ScaledNumber<DigitsT> &X) {
+  return X.print(OS, 10);
+}
+
+#define SCALED_NUMBER_COMPARE_TO_TYPE(op, T1, T2)                              \
+  template <class DigitsT>                                                     \
+  bool operator op(const ScaledNumber<DigitsT> &L, T1 R) {                     \
+    return L.compareTo(T2(R)) op 0;                                            \
+  }                                                                            \
+  template <class DigitsT>                                                     \
+  bool operator op(T1 L, const ScaledNumber<DigitsT> &R) {                     \
+    return 0 op R.compareTo(T2(L));                                            \
+  }
+#define SCALED_NUMBER_COMPARE_TO(op)                                           \
+  SCALED_NUMBER_COMPARE_TO_TYPE(op, uint64_t, uint64_t)                        \
+  SCALED_NUMBER_COMPARE_TO_TYPE(op, uint32_t, uint64_t)                        \
+  SCALED_NUMBER_COMPARE_TO_TYPE(op, int64_t, int64_t)                          \
+  SCALED_NUMBER_COMPARE_TO_TYPE(op, int32_t, int64_t)
+SCALED_NUMBER_COMPARE_TO(< )
+SCALED_NUMBER_COMPARE_TO(> )
+SCALED_NUMBER_COMPARE_TO(== )
+SCALED_NUMBER_COMPARE_TO(!= )
+SCALED_NUMBER_COMPARE_TO(<= )
+SCALED_NUMBER_COMPARE_TO(>= )
+#undef SCALED_NUMBER_COMPARE_TO
+#undef SCALED_NUMBER_COMPARE_TO_TYPE
+
+template <class DigitsT>
+uint64_t ScaledNumber<DigitsT>::scale(uint64_t N) const {
+  if (Width == 64 || N <= DigitsLimits::max())
+    return (get(N) * *this).template toInt<uint64_t>();
+
+  // Defer to the 64-bit version.
+  return ScaledNumber<uint64_t>(Digits, Scale).scale(N);
+}
+
+template <class DigitsT>
+template <class IntT>
+IntT ScaledNumber<DigitsT>::toInt() const {
+  typedef std::numeric_limits<IntT> Limits;
+  if (*this < 1)
+    return 0;
+  if (*this >= Limits::max())
+    return Limits::max();
+
+  IntT N = Digits;
+  if (Scale > 0) {
+    assert(size_t(Scale) < sizeof(IntT) * 8);
+    return N << Scale;
+  }
+  if (Scale < 0) {
+    assert(size_t(-Scale) < sizeof(IntT) * 8);
+    return N >> -Scale;
+  }
+  return N;
+}
+
+template <class DigitsT>
+ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
+operator*=(const ScaledNumber &X) {
+  if (isZero())
+    return *this;
+  if (X.isZero())
+    return *this = X;
+
+  // Save the exponents.
+  int32_t Scales = int32_t(Scale) + int32_t(X.Scale);
+
+  // Get the raw product.
+  *this = getProduct(Digits, X.Digits);
+
+  // Combine with exponents.
+  return *this <<= Scales;
+}
+template <class DigitsT>
+ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
+operator/=(const ScaledNumber &X) {
+  if (isZero())
+    return *this;
+  if (X.isZero())
+    return *this = getLargest();
+
+  // Save the exponents.
+  int32_t Scales = int32_t(Scale) - int32_t(X.Scale);
+
+  // Get the raw quotient.
+  *this = getQuotient(Digits, X.Digits);
+
+  // Combine with exponents.
+  return *this <<= Scales;
+}
+template <class DigitsT> void ScaledNumber<DigitsT>::shiftLeft(int32_t Shift) {
+  if (!Shift || isZero())
+    return;
+  assert(Shift != INT32_MIN);
+  if (Shift < 0) {
+    shiftRight(-Shift);
+    return;
+  }
+
+  // Shift as much as we can in the exponent.
+  int32_t ScaleShift = std::min(Shift, ScaledNumbers::MaxScale - Scale);
+  Scale += ScaleShift;
+  if (ScaleShift == Shift)
+    return;
+
+  // Check this late, since it's rare.
+  if (isLargest())
+    return;
+
+  // Shift the digits themselves.
+  Shift -= ScaleShift;
+  if (Shift > countLeadingZerosWidth(Digits)) {
+    // Saturate.
+    *this = getLargest();
+    return;
+  }
+
+  Digits <<= Shift;
+}
+
+template <class DigitsT> void ScaledNumber<DigitsT>::shiftRight(int32_t Shift) {
+  if (!Shift || isZero())
+    return;
+  assert(Shift != INT32_MIN);
+  if (Shift < 0) {
+    shiftLeft(-Shift);
+    return;
+  }
+
+  // Shift as much as we can in the exponent.
+  int32_t ScaleShift = std::min(Shift, Scale - ScaledNumbers::MinScale);
+  Scale -= ScaleShift;
+  if (ScaleShift == Shift)
+    return;
+
+  // Shift the digits themselves.
+  Shift -= ScaleShift;
+  if (Shift >= Width) {
+    // Saturate.
+    *this = getZero();
+    return;
+  }
+
+  Digits >>= Shift;
+}
+
+template <typename T> struct isPodLike;
+template <typename T> struct isPodLike<ScaledNumber<T>> {
+  static const bool value = true;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_SCALEDNUMBER_H
diff --git a/linux-x64/clang/include/llvm/Support/ScopedPrinter.h b/linux-x64/clang/include/llvm/Support/ScopedPrinter.h
new file mode 100644
index 0000000..964d254
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ScopedPrinter.h
@@ -0,0 +1,389 @@
+//===-- ScopedPrinter.h ---------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SCOPEDPRINTER_H
+#define LLVM_SUPPORT_SCOPEDPRINTER_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace llvm {
+
+template <typename T> struct EnumEntry {
+  StringRef Name;
+  // While Name suffices in most of the cases, in certain cases
+  // GNU style and LLVM style of ELFDumper do not
+  // display same string for same enum. The AltName if initialized appropriately
+  // will hold the string that GNU style emits.
+  // Example:
+  // "EM_X86_64" string on LLVM style for Elf_Ehdr->e_machine corresponds to
+  // "Advanced Micro Devices X86-64" on GNU style
+  StringRef AltName;
+  T Value;
+  EnumEntry(StringRef N, StringRef A, T V) : Name(N), AltName(A), Value(V) {}
+  EnumEntry(StringRef N, T V) : Name(N), AltName(N), Value(V) {}
+};
+
+struct HexNumber {
+  // To avoid sign-extension we have to explicitly cast to the appropriate
+  // unsigned type. The overloads are here so that every type that is implicitly
+  // convertible to an integer (including enums and endian helpers) can be used
+  // without requiring type traits or call-site changes.
+  HexNumber(char Value) : Value(static_cast<unsigned char>(Value)) {}
+  HexNumber(signed char Value) : Value(static_cast<unsigned char>(Value)) {}
+  HexNumber(signed short Value) : Value(static_cast<unsigned short>(Value)) {}
+  HexNumber(signed int Value) : Value(static_cast<unsigned int>(Value)) {}
+  HexNumber(signed long Value) : Value(static_cast<unsigned long>(Value)) {}
+  HexNumber(signed long long Value)
+      : Value(static_cast<unsigned long long>(Value)) {}
+  HexNumber(unsigned char Value) : Value(Value) {}
+  HexNumber(unsigned short Value) : Value(Value) {}
+  HexNumber(unsigned int Value) : Value(Value) {}
+  HexNumber(unsigned long Value) : Value(Value) {}
+  HexNumber(unsigned long long Value) : Value(Value) {}
+  uint64_t Value;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value);
+const std::string to_hexString(uint64_t Value, bool UpperCase = true);
+
+template <class T> const std::string to_string(const T &Value) {
+  std::string number;
+  llvm::raw_string_ostream stream(number);
+  stream << Value;
+  return stream.str();
+}
+
+class ScopedPrinter {
+public:
+  ScopedPrinter(raw_ostream &OS) : OS(OS), IndentLevel(0) {}
+
+  void flush() { OS.flush(); }
+
+  void indent(int Levels = 1) { IndentLevel += Levels; }
+
+  void unindent(int Levels = 1) {
+    IndentLevel = std::max(0, IndentLevel - Levels);
+  }
+
+  void resetIndent() { IndentLevel = 0; }
+
+  int getIndentLevel() { return IndentLevel; }
+
+  void setPrefix(StringRef P) { Prefix = P; }
+
+  void printIndent() {
+    OS << Prefix;
+    for (int i = 0; i < IndentLevel; ++i)
+      OS << "  ";
+  }
+
+  template <typename T> HexNumber hex(T Value) { return HexNumber(Value); }
+
+  template <typename T, typename TEnum>
+  void printEnum(StringRef Label, T Value,
+                 ArrayRef<EnumEntry<TEnum>> EnumValues) {
+    StringRef Name;
+    bool Found = false;
+    for (const auto &EnumItem : EnumValues) {
+      if (EnumItem.Value == Value) {
+        Name = EnumItem.Name;
+        Found = true;
+        break;
+      }
+    }
+
+    if (Found) {
+      startLine() << Label << ": " << Name << " (" << hex(Value) << ")\n";
+    } else {
+      startLine() << Label << ": " << hex(Value) << "\n";
+    }
+  }
+
+  template <typename T, typename TFlag>
+  void printFlags(StringRef Label, T Value, ArrayRef<EnumEntry<TFlag>> Flags,
+                  TFlag EnumMask1 = {}, TFlag EnumMask2 = {},
+                  TFlag EnumMask3 = {}) {
+    typedef EnumEntry<TFlag> FlagEntry;
+    typedef SmallVector<FlagEntry, 10> FlagVector;
+    FlagVector SetFlags;
+
+    for (const auto &Flag : Flags) {
+      if (Flag.Value == 0)
+        continue;
+
+      TFlag EnumMask{};
+      if (Flag.Value & EnumMask1)
+        EnumMask = EnumMask1;
+      else if (Flag.Value & EnumMask2)
+        EnumMask = EnumMask2;
+      else if (Flag.Value & EnumMask3)
+        EnumMask = EnumMask3;
+      bool IsEnum = (Flag.Value & EnumMask) != 0;
+      if ((!IsEnum && (Value & Flag.Value) == Flag.Value) ||
+          (IsEnum && (Value & EnumMask) == Flag.Value)) {
+        SetFlags.push_back(Flag);
+      }
+    }
+
+    std::sort(SetFlags.begin(), SetFlags.end(), &flagName<TFlag>);
+
+    startLine() << Label << " [ (" << hex(Value) << ")\n";
+    for (const auto &Flag : SetFlags) {
+      startLine() << "  " << Flag.Name << " (" << hex(Flag.Value) << ")\n";
+    }
+    startLine() << "]\n";
+  }
+
+  template <typename T> void printFlags(StringRef Label, T Value) {
+    startLine() << Label << " [ (" << hex(Value) << ")\n";
+    uint64_t Flag = 1;
+    uint64_t Curr = Value;
+    while (Curr > 0) {
+      if (Curr & 1)
+        startLine() << "  " << hex(Flag) << "\n";
+      Curr >>= 1;
+      Flag <<= 1;
+    }
+    startLine() << "]\n";
+  }
+
+  void printNumber(StringRef Label, uint64_t Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printNumber(StringRef Label, uint32_t Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printNumber(StringRef Label, uint16_t Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printNumber(StringRef Label, uint8_t Value) {
+    startLine() << Label << ": " << unsigned(Value) << "\n";
+  }
+
+  void printNumber(StringRef Label, int64_t Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printNumber(StringRef Label, int32_t Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printNumber(StringRef Label, int16_t Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printNumber(StringRef Label, int8_t Value) {
+    startLine() << Label << ": " << int(Value) << "\n";
+  }
+
+  void printNumber(StringRef Label, const APSInt &Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printBoolean(StringRef Label, bool Value) {
+    startLine() << Label << ": " << (Value ? "Yes" : "No") << '\n';
+  }
+
+  template <typename... T> void printVersion(StringRef Label, T... Version) {
+    startLine() << Label << ": ";
+    printVersionInternal(Version...);
+    getOStream() << "\n";
+  }
+
+  template <typename T> void printList(StringRef Label, const T &List) {
+    startLine() << Label << ": [";
+    bool Comma = false;
+    for (const auto &Item : List) {
+      if (Comma)
+        OS << ", ";
+      OS << Item;
+      Comma = true;
+    }
+    OS << "]\n";
+  }
+
+  template <typename T, typename U>
+  void printList(StringRef Label, const T &List, const U &Printer) {
+    startLine() << Label << ": [";
+    bool Comma = false;
+    for (const auto &Item : List) {
+      if (Comma)
+        OS << ", ";
+      Printer(OS, Item);
+      Comma = true;
+    }
+    OS << "]\n";
+  }
+
+  template <typename T> void printHexList(StringRef Label, const T &List) {
+    startLine() << Label << ": [";
+    bool Comma = false;
+    for (const auto &Item : List) {
+      if (Comma)
+        OS << ", ";
+      OS << hex(Item);
+      Comma = true;
+    }
+    OS << "]\n";
+  }
+
+  template <typename T> void printHex(StringRef Label, T Value) {
+    startLine() << Label << ": " << hex(Value) << "\n";
+  }
+
+  template <typename T> void printHex(StringRef Label, StringRef Str, T Value) {
+    startLine() << Label << ": " << Str << " (" << hex(Value) << ")\n";
+  }
+
+  template <typename T>
+  void printSymbolOffset(StringRef Label, StringRef Symbol, T Value) {
+    startLine() << Label << ": " << Symbol << '+' << hex(Value) << '\n';
+  }
+
+  void printString(StringRef Value) { startLine() << Value << "\n"; }
+
+  void printString(StringRef Label, StringRef Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  void printString(StringRef Label, const std::string &Value) {
+    printString(Label, StringRef(Value));
+  }
+
+  void printString(StringRef Label, const char* Value) {
+    printString(Label, StringRef(Value));
+  }
+
+  template <typename T>
+  void printNumber(StringRef Label, StringRef Str, T Value) {
+    startLine() << Label << ": " << Str << " (" << Value << ")\n";
+  }
+
+  void printBinary(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value) {
+    printBinaryImpl(Label, Str, Value, false);
+  }
+
+  void printBinary(StringRef Label, StringRef Str, ArrayRef<char> Value) {
+    auto V = makeArrayRef(reinterpret_cast<const uint8_t *>(Value.data()),
+                          Value.size());
+    printBinaryImpl(Label, Str, V, false);
+  }
+
+  void printBinary(StringRef Label, ArrayRef<uint8_t> Value) {
+    printBinaryImpl(Label, StringRef(), Value, false);
+  }
+
+  void printBinary(StringRef Label, ArrayRef<char> Value) {
+    auto V = makeArrayRef(reinterpret_cast<const uint8_t *>(Value.data()),
+                          Value.size());
+    printBinaryImpl(Label, StringRef(), V, false);
+  }
+
+  void printBinary(StringRef Label, StringRef Value) {
+    auto V = makeArrayRef(reinterpret_cast<const uint8_t *>(Value.data()),
+                          Value.size());
+    printBinaryImpl(Label, StringRef(), V, false);
+  }
+
+  void printBinaryBlock(StringRef Label, ArrayRef<uint8_t> Value,
+                        uint32_t StartOffset) {
+    printBinaryImpl(Label, StringRef(), Value, true, StartOffset);
+  }
+
+  void printBinaryBlock(StringRef Label, ArrayRef<uint8_t> Value) {
+    printBinaryImpl(Label, StringRef(), Value, true);
+  }
+
+  void printBinaryBlock(StringRef Label, StringRef Value) {
+    auto V = makeArrayRef(reinterpret_cast<const uint8_t *>(Value.data()),
+                          Value.size());
+    printBinaryImpl(Label, StringRef(), V, true);
+  }
+
+  template <typename T> void printObject(StringRef Label, const T &Value) {
+    startLine() << Label << ": " << Value << "\n";
+  }
+
+  raw_ostream &startLine() {
+    printIndent();
+    return OS;
+  }
+
+  raw_ostream &getOStream() { return OS; }
+
+private:
+  template <typename T> void printVersionInternal(T Value) {
+    getOStream() << Value;
+  }
+
+  template <typename S, typename T, typename... TArgs>
+  void printVersionInternal(S Value, T Value2, TArgs... Args) {
+    getOStream() << Value << ".";
+    printVersionInternal(Value2, Args...);
+  }
+
+  template <typename T>
+  static bool flagName(const EnumEntry<T> &lhs, const EnumEntry<T> &rhs) {
+    return lhs.Name < rhs.Name;
+  }
+
+  void printBinaryImpl(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value,
+                       bool Block, uint32_t StartOffset = 0);
+
+  raw_ostream &OS;
+  int IndentLevel;
+  StringRef Prefix;
+};
+
+template <>
+inline void
+ScopedPrinter::printHex<support::ulittle16_t>(StringRef Label,
+                                              support::ulittle16_t Value) {
+  startLine() << Label << ": " << hex(Value) << "\n";
+}
+
+template<char Open, char Close>
+struct DelimitedScope {
+  explicit DelimitedScope(ScopedPrinter &W) : W(W) {
+    W.startLine() << Open << '\n';
+    W.indent();
+  }
+
+  DelimitedScope(ScopedPrinter &W, StringRef N) : W(W) {
+    W.startLine() << N;
+    if (!N.empty())
+      W.getOStream() << ' ';
+    W.getOStream() << Open << '\n';
+    W.indent();
+  }
+
+  ~DelimitedScope() {
+    W.unindent();
+    W.startLine() << Close << '\n';
+  }
+
+  ScopedPrinter &W;
+};
+
+using DictScope = DelimitedScope<'{', '}'>;
+using ListScope = DelimitedScope<'[', ']'>;
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Signals.h b/linux-x64/clang/include/llvm/Support/Signals.h
new file mode 100644
index 0000000..dec5f58
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Signals.h
@@ -0,0 +1,77 @@
+//===- llvm/Support/Signals.h - Signal Handling support ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some helpful functions for dealing with the possibility of
+// unix signals occurring while your program is running.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SIGNALS_H
+#define LLVM_SUPPORT_SIGNALS_H
+
+#include <string>
+
+namespace llvm {
+class StringRef;
+class raw_ostream;
+
+namespace sys {
+
+  /// This function runs all the registered interrupt handlers, including the
+  /// removal of files registered by RemoveFileOnSignal.
+  void RunInterruptHandlers();
+
+  /// This function registers signal handlers to ensure that if a signal gets
+  /// delivered that the named file is removed.
+  /// @brief Remove a file if a fatal signal occurs.
+  bool RemoveFileOnSignal(StringRef Filename, std::string* ErrMsg = nullptr);
+
+  /// This function removes a file from the list of files to be removed on
+  /// signal delivery.
+  void DontRemoveFileOnSignal(StringRef Filename);
+
+  /// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
+  /// process, print a stack trace and then exit.
+  /// \brief Print a stack trace if a fatal signal occurs.
+  /// \param Argv0 the current binary name, used to find the symbolizer
+  ///        relative to the current binary before searching $PATH; can be
+  ///        StringRef(), in which case we will only search $PATH.
+  /// \param DisableCrashReporting if \c true, disable the normal crash
+  ///        reporting mechanisms on the underlying operating system.
+  void PrintStackTraceOnErrorSignal(StringRef Argv0,
+                                    bool DisableCrashReporting = false);
+
+  /// Disable all system dialog boxes that appear when the process crashes.
+  void DisableSystemDialogsOnCrash();
+
+  /// \brief Print the stack trace using the given \c raw_ostream object.
+  void PrintStackTrace(raw_ostream &OS);
+
+  // Run all registered signal handlers.
+  void RunSignalHandlers();
+
+  /// AddSignalHandler - Add a function to be called when an abort/kill signal
+  /// is delivered to the process.  The handler can have a cookie passed to it
+  /// to identify what instance of the handler it is.
+  void AddSignalHandler(void (*FnPtr)(void *), void *Cookie);
+
+  /// This function registers a function to be called when the user "interrupts"
+  /// the program (typically by pressing ctrl-c).  When the user interrupts the
+  /// program, the specified interrupt function is called instead of the program
+  /// being killed, and the interrupt function automatically disabled.  Note
+  /// that interrupt functions are not allowed to call any non-reentrant
+  /// functions.  An null interrupt function pointer disables the current
+  /// installed function.  Note also that the handler may be executed on a
+  /// different thread on some platforms.
+  /// @brief Register a function to be called when ctrl-c is pressed.
+  void SetInterruptFunction(void (*IF)());
+} // End sys namespace
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Solaris/sys/regset.h b/linux-x64/clang/include/llvm/Support/Solaris/sys/regset.h
new file mode 100644
index 0000000..6a69ebe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Solaris/sys/regset.h
@@ -0,0 +1,39 @@
+/*===- llvm/Support/Solaris/sys/regset.h ------------------------*- C++ -*-===*
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file works around excessive name space pollution from the system header
+ * on Solaris hosts.
+ *
+ *===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_SUPPORT_SOLARIS_SYS_REGSET_H
+
+#include_next <sys/regset.h>
+
+#undef CS
+#undef DS
+#undef ES
+#undef FS
+#undef GS
+#undef SS
+#undef EAX
+#undef ECX
+#undef EDX
+#undef EBX
+#undef ESP
+#undef EBP
+#undef ESI
+#undef EDI
+#undef EIP
+#undef UESP
+#undef EFL
+#undef ERR
+#undef TRAPNO
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/SourceMgr.h b/linux-x64/clang/include/llvm/Support/SourceMgr.h
new file mode 100644
index 0000000..c08bf85
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SourceMgr.h
@@ -0,0 +1,282 @@
+//===- SourceMgr.h - Manager for Source Buffers & Diagnostics ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SMDiagnostic and SourceMgr classes.  This
+// provides a simple substrate for diagnostics, #include handling, and other low
+// level things for simple parsers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SOURCEMGR_H
+#define LLVM_SUPPORT_SOURCEMGR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SMLoc.h"
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+class SMDiagnostic;
+class SMFixIt;
+
+/// This owns the files read by a parser, handles include stacks,
+/// and handles diagnostic wrangling.
+class SourceMgr {
+public:
+  enum DiagKind {
+    DK_Error,
+    DK_Warning,
+    DK_Remark,
+    DK_Note,
+  };
+
+  /// Clients that want to handle their own diagnostics in a custom way can
+  /// register a function pointer+context as a diagnostic handler.
+  /// It gets called each time PrintMessage is invoked.
+  using DiagHandlerTy = void (*)(const SMDiagnostic &, void *Context);
+
+private:
+  struct SrcBuffer {
+    /// The memory buffer for the file.
+    std::unique_ptr<MemoryBuffer> Buffer;
+
+    /// This is the location of the parent include, or null if at the top level.
+    SMLoc IncludeLoc;
+  };
+
+  /// This is all of the buffers that we are reading from.
+  std::vector<SrcBuffer> Buffers;
+
+  // This is the list of directories we should search for include files in.
+  std::vector<std::string> IncludeDirectories;
+
+  /// This is a cache for line number queries, its implementation is really
+  /// private to SourceMgr.cpp.
+  mutable void *LineNoCache = nullptr;
+
+  DiagHandlerTy DiagHandler = nullptr;
+  void *DiagContext = nullptr;
+
+  bool isValidBufferID(unsigned i) const { return i && i <= Buffers.size(); }
+
+public:
+  SourceMgr() = default;
+  SourceMgr(const SourceMgr &) = delete;
+  SourceMgr &operator=(const SourceMgr &) = delete;
+  ~SourceMgr();
+
+  void setIncludeDirs(const std::vector<std::string> &Dirs) {
+    IncludeDirectories = Dirs;
+  }
+
+  /// Specify a diagnostic handler to be invoked every time PrintMessage is
+  /// called. \p Ctx is passed into the handler when it is invoked.
+  void setDiagHandler(DiagHandlerTy DH, void *Ctx = nullptr) {
+    DiagHandler = DH;
+    DiagContext = Ctx;
+  }
+
+  DiagHandlerTy getDiagHandler() const { return DiagHandler; }
+  void *getDiagContext() const { return DiagContext; }
+
+  const SrcBuffer &getBufferInfo(unsigned i) const {
+    assert(isValidBufferID(i));
+    return Buffers[i - 1];
+  }
+
+  const MemoryBuffer *getMemoryBuffer(unsigned i) const {
+    assert(isValidBufferID(i));
+    return Buffers[i - 1].Buffer.get();
+  }
+
+  unsigned getNumBuffers() const {
+    return Buffers.size();
+  }
+
+  unsigned getMainFileID() const {
+    assert(getNumBuffers());
+    return 1;
+  }
+
+  SMLoc getParentIncludeLoc(unsigned i) const {
+    assert(isValidBufferID(i));
+    return Buffers[i - 1].IncludeLoc;
+  }
+
+  /// Add a new source buffer to this source manager. This takes ownership of
+  /// the memory buffer.
+  unsigned AddNewSourceBuffer(std::unique_ptr<MemoryBuffer> F,
+                              SMLoc IncludeLoc) {
+    SrcBuffer NB;
+    NB.Buffer = std::move(F);
+    NB.IncludeLoc = IncludeLoc;
+    Buffers.push_back(std::move(NB));
+    return Buffers.size();
+  }
+
+  /// Search for a file with the specified name in the current directory or in
+  /// one of the IncludeDirs.
+  ///
+  /// If no file is found, this returns 0, otherwise it returns the buffer ID
+  /// of the stacked file. The full path to the included file can be found in
+  /// \p IncludedFile.
+  unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc,
+                          std::string &IncludedFile);
+
+  /// Return the ID of the buffer containing the specified location.
+  ///
+  /// 0 is returned if the buffer is not found.
+  unsigned FindBufferContainingLoc(SMLoc Loc) const;
+
+  /// Find the line number for the specified location in the specified file.
+  /// This is not a fast method.
+  unsigned FindLineNumber(SMLoc Loc, unsigned BufferID = 0) const {
+    return getLineAndColumn(Loc, BufferID).first;
+  }
+
+  /// Find the line and column number for the specified location in the
+  /// specified file. This is not a fast method.
+  std::pair<unsigned, unsigned> getLineAndColumn(SMLoc Loc,
+                                                 unsigned BufferID = 0) const;
+
+  /// Emit a message about the specified location with the specified string.
+  ///
+  /// \param ShowColors Display colored messages if output is a terminal and
+  /// the default error handler is used.
+  void PrintMessage(raw_ostream &OS, SMLoc Loc, DiagKind Kind,
+                    const Twine &Msg,
+                    ArrayRef<SMRange> Ranges = None,
+                    ArrayRef<SMFixIt> FixIts = None,
+                    bool ShowColors = true) const;
+
+  /// Emits a diagnostic to llvm::errs().
+  void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+                    ArrayRef<SMRange> Ranges = None,
+                    ArrayRef<SMFixIt> FixIts = None,
+                    bool ShowColors = true) const;
+
+  /// Emits a manually-constructed diagnostic to the given output stream.
+  ///
+  /// \param ShowColors Display colored messages if output is a terminal and
+  /// the default error handler is used.
+  void PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
+                    bool ShowColors = true) const;
+
+  /// Return an SMDiagnostic at the specified location with the specified
+  /// string.
+  ///
+  /// \param Msg If non-null, the kind of message (e.g., "error") which is
+  /// prefixed to the message.
+  SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+                          ArrayRef<SMRange> Ranges = None,
+                          ArrayRef<SMFixIt> FixIts = None) const;
+
+  /// Prints the names of included files and the line of the file they were
+  /// included from. A diagnostic handler can use this before printing its
+  /// custom formatted message.
+  ///
+  /// \param IncludeLoc The location of the include.
+  /// \param OS the raw_ostream to print on.
+  void PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const;
+};
+
+/// Represents a single fixit, a replacement of one range of text with another.
+class SMFixIt {
+  SMRange Range;
+
+  std::string Text;
+
+public:
+  // FIXME: Twine.str() is not very efficient.
+  SMFixIt(SMLoc Loc, const Twine &Insertion)
+    : Range(Loc, Loc), Text(Insertion.str()) {
+    assert(Loc.isValid());
+  }
+
+  // FIXME: Twine.str() is not very efficient.
+  SMFixIt(SMRange R, const Twine &Replacement)
+    : Range(R), Text(Replacement.str()) {
+    assert(R.isValid());
+  }
+
+  StringRef getText() const { return Text; }
+  SMRange getRange() const { return Range; }
+
+  bool operator<(const SMFixIt &Other) const {
+    if (Range.Start.getPointer() != Other.Range.Start.getPointer())
+      return Range.Start.getPointer() < Other.Range.Start.getPointer();
+    if (Range.End.getPointer() != Other.Range.End.getPointer())
+      return Range.End.getPointer() < Other.Range.End.getPointer();
+    return Text < Other.Text;
+  }
+};
+
+/// Instances of this class encapsulate one diagnostic report, allowing
+/// printing to a raw_ostream as a caret diagnostic.
+class SMDiagnostic {
+  const SourceMgr *SM = nullptr;
+  SMLoc Loc;
+  std::string Filename;
+  int LineNo = 0;
+  int ColumnNo = 0;
+  SourceMgr::DiagKind Kind = SourceMgr::DK_Error;
+  std::string Message, LineContents;
+  std::vector<std::pair<unsigned, unsigned>> Ranges;
+  SmallVector<SMFixIt, 4> FixIts;
+
+public:
+  // Null diagnostic.
+  SMDiagnostic() = default;
+  // Diagnostic with no location (e.g. file not found, command line arg error).
+  SMDiagnostic(StringRef filename, SourceMgr::DiagKind Knd, StringRef Msg)
+    : Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd), Message(Msg) {}
+
+  // Diagnostic with a location.
+  SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN,
+               int Line, int Col, SourceMgr::DiagKind Kind,
+               StringRef Msg, StringRef LineStr,
+               ArrayRef<std::pair<unsigned,unsigned>> Ranges,
+               ArrayRef<SMFixIt> FixIts = None);
+
+  const SourceMgr *getSourceMgr() const { return SM; }
+  SMLoc getLoc() const { return Loc; }
+  StringRef getFilename() const { return Filename; }
+  int getLineNo() const { return LineNo; }
+  int getColumnNo() const { return ColumnNo; }
+  SourceMgr::DiagKind getKind() const { return Kind; }
+  StringRef getMessage() const { return Message; }
+  StringRef getLineContents() const { return LineContents; }
+  ArrayRef<std::pair<unsigned, unsigned>> getRanges() const { return Ranges; }
+
+  void addFixIt(const SMFixIt &Hint) {
+    FixIts.push_back(Hint);
+  }
+
+  ArrayRef<SMFixIt> getFixIts() const {
+    return FixIts;
+  }
+
+  void print(const char *ProgName, raw_ostream &S, bool ShowColors = true,
+             bool ShowKindLabel = true) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_SOURCEMGR_H
diff --git a/linux-x64/clang/include/llvm/Support/SpecialCaseList.h b/linux-x64/clang/include/llvm/Support/SpecialCaseList.h
new file mode 100644
index 0000000..fd62fc4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SpecialCaseList.h
@@ -0,0 +1,155 @@
+//===-- SpecialCaseList.h - special case list for sanitizers ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class used to parse user-provided text files with
+// "special case lists" for code sanitizers. Such files are used to
+// define an "ABI list" for DataFlowSanitizer and blacklists for sanitizers
+// like AddressSanitizer or UndefinedBehaviorSanitizer.
+//
+// Empty lines and lines starting with "#" are ignored. Sections are defined
+// using a '[section_name]' header and can be used to specify sanitizers the
+// entries below it apply to. Section names are regular expressions, and
+// entries without a section header match all sections (e.g. an '[*]' header
+// is assumed.)
+// The remaining lines should have the form:
+//   prefix:wildcard_expression[=category]
+// If category is not specified, it is assumed to be empty string.
+// Definitions of "prefix" and "category" are sanitizer-specific. For example,
+// sanitizer blacklists support prefixes "src", "fun" and "global".
+// Wildcard expressions define, respectively, source files, functions or
+// globals which shouldn't be instrumented.
+// Examples of categories:
+//   "functional": used in DFSan to list functions with pure functional
+//                 semantics.
+//   "init": used in ASan blacklist to disable initialization-order bugs
+//           detection for certain globals or source files.
+// Full special case list file example:
+// ---
+// [address]
+// # Blacklisted items:
+// fun:*_ZN4base6subtle*
+// global:*global_with_bad_access_or_initialization*
+// global:*global_with_initialization_issues*=init
+// type:*Namespace::ClassName*=init
+// src:file_with_tricky_code.cc
+// src:ignore-global-initializers-issues.cc=init
+//
+// [dataflow]
+// # Functions with pure functional semantics:
+// fun:cos=functional
+// fun:sin=functional
+// ---
+// Note that the wild card is in fact an llvm::Regex, but * is automatically
+// replaced with .*
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SPECIALCASELIST_H
+#define LLVM_SUPPORT_SPECIALCASELIST_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/TrigramIndex.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class MemoryBuffer;
+class Regex;
+class StringRef;
+
+class SpecialCaseList {
+public:
+  /// Parses the special case list entries from files. On failure, returns
+  /// 0 and writes an error message to string.
+  static std::unique_ptr<SpecialCaseList>
+  create(const std::vector<std::string> &Paths, std::string &Error);
+  /// Parses the special case list from a memory buffer. On failure, returns
+  /// 0 and writes an error message to string.
+  static std::unique_ptr<SpecialCaseList> create(const MemoryBuffer *MB,
+                                                 std::string &Error);
+  /// Parses the special case list entries from files. On failure, reports a
+  /// fatal error.
+  static std::unique_ptr<SpecialCaseList>
+  createOrDie(const std::vector<std::string> &Paths);
+
+  ~SpecialCaseList();
+
+  /// Returns true, if special case list contains a line
+  /// \code
+  ///   @Prefix:<E>=@Category
+  /// \endcode
+  /// where @Query satisfies wildcard expression <E> in a given @Section.
+  bool inSection(StringRef Section, StringRef Prefix, StringRef Query,
+                 StringRef Category = StringRef()) const;
+
+  /// Returns the line number corresponding to the special case list entry if
+  /// the special case list contains a line
+  /// \code
+  ///   @Prefix:<E>=@Category
+  /// \endcode
+  /// where @Query satisfies wildcard expression <E> in a given @Section.
+  /// Returns zero if there is no blacklist entry corresponding to this
+  /// expression.
+  unsigned inSectionBlame(StringRef Section, StringRef Prefix, StringRef Query,
+                          StringRef Category = StringRef()) const;
+
+protected:
+  // Implementations of the create*() functions that can also be used by derived
+  // classes.
+  bool createInternal(const std::vector<std::string> &Paths,
+                      std::string &Error);
+  bool createInternal(const MemoryBuffer *MB, std::string &Error);
+
+  SpecialCaseList() = default;
+  SpecialCaseList(SpecialCaseList const &) = delete;
+  SpecialCaseList &operator=(SpecialCaseList const &) = delete;
+
+  /// Represents a set of regular expressions.  Regular expressions which are
+  /// "literal" (i.e. no regex metacharacters) are stored in Strings.  The
+  /// reason for doing so is efficiency; StringMap is much faster at matching
+  /// literal strings than Regex.
+  class Matcher {
+  public:
+    bool insert(std::string Regexp, unsigned LineNumber, std::string &REError);
+    // Returns the line number in the source file that this query matches to.
+    // Returns zero if no match is found.
+    unsigned match(StringRef Query) const;
+
+  private:
+    StringMap<unsigned> Strings;
+    TrigramIndex Trigrams;
+    std::vector<std::pair<std::unique_ptr<Regex>, unsigned>> RegExes;
+  };
+
+  using SectionEntries = StringMap<StringMap<Matcher>>;
+
+  struct Section {
+    Section(std::unique_ptr<Matcher> M) : SectionMatcher(std::move(M)){};
+
+    std::unique_ptr<Matcher> SectionMatcher;
+    SectionEntries Entries;
+  };
+
+  std::vector<Section> Sections;
+
+  /// Parses just-constructed SpecialCaseList entries from a memory buffer.
+  bool parse(const MemoryBuffer *MB, StringMap<size_t> &SectionsMap,
+             std::string &Error);
+
+  // Helper method for derived classes to search by Prefix, Query, and Category
+  // once they have already resolved a section entry.
+  unsigned inSectionBlame(const SectionEntries &Entries, StringRef Prefix,
+                          StringRef Query, StringRef Category) const;
+};
+
+}  // namespace llvm
+
+#endif  // LLVM_SUPPORT_SPECIALCASELIST_H
+
diff --git a/linux-x64/clang/include/llvm/Support/StringPool.h b/linux-x64/clang/include/llvm/Support/StringPool.h
new file mode 100644
index 0000000..bb5fd07
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/StringPool.h
@@ -0,0 +1,140 @@
+//===- StringPool.h - Interned string pool ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares an interned string pool, which helps reduce the cost of
+// strings by using the same storage for identical strings.
+//
+// To intern a string:
+//
+//   StringPool Pool;
+//   PooledStringPtr Str = Pool.intern("wakka wakka");
+//
+// To use the value of an interned string, use operator bool and operator*:
+//
+//   if (Str)
+//     cerr << "the string is" << *Str << "\n";
+//
+// Pooled strings are immutable, but you can change a PooledStringPtr to point
+// to another instance. So that interned strings can eventually be freed,
+// strings in the string pool are reference-counted (automatically).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_STRINGPOOL_H
+#define LLVM_SUPPORT_STRINGPOOL_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+
+namespace llvm {
+
+  class PooledStringPtr;
+
+  /// StringPool - An interned string pool. Use the intern method to add a
+  /// string. Strings are removed automatically as PooledStringPtrs are
+  /// destroyed.
+  class StringPool {
+    /// PooledString - This is the value of an entry in the pool's interning
+    /// table.
+    struct PooledString {
+      StringPool *Pool = nullptr;  ///< So the string can remove itself.
+      unsigned Refcount = 0;       ///< Number of referencing PooledStringPtrs.
+
+    public:
+      PooledString() = default;
+    };
+
+    friend class PooledStringPtr;
+
+    using table_t = StringMap<PooledString>;
+    using entry_t = StringMapEntry<PooledString>;
+    table_t InternTable;
+
+  public:
+    StringPool();
+    ~StringPool();
+
+    /// intern - Adds a string to the pool and returns a reference-counted
+    /// pointer to it. No additional memory is allocated if the string already
+    /// exists in the pool.
+    PooledStringPtr intern(StringRef Str);
+
+    /// empty - Checks whether the pool is empty. Returns true if so.
+    ///
+    inline bool empty() const { return InternTable.empty(); }
+  };
+
+  /// PooledStringPtr - A pointer to an interned string. Use operator bool to
+  /// test whether the pointer is valid, and operator * to get the string if so.
+  /// This is a lightweight value class with storage requirements equivalent to
+  /// a single pointer, but it does have reference-counting overhead when
+  /// copied.
+  class PooledStringPtr {
+    using entry_t = StringPool::entry_t;
+
+    entry_t *S = nullptr;
+
+  public:
+    PooledStringPtr() = default;
+
+    explicit PooledStringPtr(entry_t *E) : S(E) {
+      if (S) ++S->getValue().Refcount;
+    }
+
+    PooledStringPtr(const PooledStringPtr &That) : S(That.S) {
+      if (S) ++S->getValue().Refcount;
+    }
+
+    PooledStringPtr &operator=(const PooledStringPtr &That) {
+      if (S != That.S) {
+        clear();
+        S = That.S;
+        if (S) ++S->getValue().Refcount;
+      }
+      return *this;
+    }
+
+    void clear() {
+      if (!S)
+        return;
+      if (--S->getValue().Refcount == 0) {
+        S->getValue().Pool->InternTable.remove(S);
+        S->Destroy();
+      }
+      S = nullptr;
+    }
+
+    ~PooledStringPtr() { clear(); }
+
+    inline const char *begin() const {
+      assert(*this && "Attempt to dereference empty PooledStringPtr!");
+      return S->getKeyData();
+    }
+
+    inline const char *end() const {
+      assert(*this && "Attempt to dereference empty PooledStringPtr!");
+      return S->getKeyData() + S->getKeyLength();
+    }
+
+    inline unsigned size() const {
+      assert(*this && "Attempt to dereference empty PooledStringPtr!");
+      return S->getKeyLength();
+    }
+
+    inline const char *operator*() const { return begin(); }
+    inline explicit operator bool() const { return S != nullptr; }
+
+    inline bool operator==(const PooledStringPtr &That) const { return S == That.S; }
+    inline bool operator!=(const PooledStringPtr &That) const { return S != That.S; }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_STRINGPOOL_H
diff --git a/linux-x64/clang/include/llvm/Support/StringSaver.h b/linux-x64/clang/include/llvm/Support/StringSaver.h
new file mode 100644
index 0000000..e85b289
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/StringSaver.h
@@ -0,0 +1,32 @@
+//===- llvm/Support/StringSaver.h -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_STRINGSAVER_H
+#define LLVM_SUPPORT_STRINGSAVER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+/// \brief Saves strings in the inheritor's stable storage and returns a
+/// StringRef with a stable character pointer.
+class StringSaver final {
+  BumpPtrAllocator &Alloc;
+
+public:
+  StringSaver(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
+  StringRef save(const char *S) { return save(StringRef(S)); }
+  StringRef save(StringRef S);
+  StringRef save(const Twine &S) { return save(StringRef(S.str())); }
+  StringRef save(const std::string &S) { return save(StringRef(S)); }
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/SwapByteOrder.h b/linux-x64/clang/include/llvm/Support/SwapByteOrder.h
new file mode 100644
index 0000000..71d3724
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SwapByteOrder.h
@@ -0,0 +1,127 @@
+//===- SwapByteOrder.h - Generic and optimized byte swaps -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic and optimized functions to swap the byte order of
+// an integral type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SWAPBYTEORDER_H
+#define LLVM_SUPPORT_SWAPBYTEORDER_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include <cstddef>
+#if defined(_MSC_VER) && !defined(_DEBUG)
+#include <stdlib.h>
+#endif
+
+namespace llvm {
+namespace sys {
+
+/// SwapByteOrder_16 - This function returns a byte-swapped representation of
+/// the 16-bit argument.
+inline uint16_t SwapByteOrder_16(uint16_t value) {
+#if defined(_MSC_VER) && !defined(_DEBUG)
+  // The DLL version of the runtime lacks these functions (bug!?), but in a
+  // release build they're replaced with BSWAP instructions anyway.
+  return _byteswap_ushort(value);
+#else
+  uint16_t Hi = value << 8;
+  uint16_t Lo = value >> 8;
+  return Hi | Lo;
+#endif
+}
+
+/// SwapByteOrder_32 - This function returns a byte-swapped representation of
+/// the 32-bit argument.
+inline uint32_t SwapByteOrder_32(uint32_t value) {
+#if defined(__llvm__) || (LLVM_GNUC_PREREQ(4, 3, 0) && !defined(__ICC))
+  return __builtin_bswap32(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+  return _byteswap_ulong(value);
+#else
+  uint32_t Byte0 = value & 0x000000FF;
+  uint32_t Byte1 = value & 0x0000FF00;
+  uint32_t Byte2 = value & 0x00FF0000;
+  uint32_t Byte3 = value & 0xFF000000;
+  return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
+#endif
+}
+
+/// SwapByteOrder_64 - This function returns a byte-swapped representation of
+/// the 64-bit argument.
+inline uint64_t SwapByteOrder_64(uint64_t value) {
+#if defined(__llvm__) || (LLVM_GNUC_PREREQ(4, 3, 0) && !defined(__ICC))
+  return __builtin_bswap64(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+  return _byteswap_uint64(value);
+#else
+  uint64_t Hi = SwapByteOrder_32(uint32_t(value));
+  uint32_t Lo = SwapByteOrder_32(uint32_t(value >> 32));
+  return (Hi << 32) | Lo;
+#endif
+}
+
+inline unsigned char  getSwappedBytes(unsigned char C) { return C; }
+inline   signed char  getSwappedBytes(signed char C) { return C; }
+inline          char  getSwappedBytes(char C) { return C; }
+
+inline unsigned short getSwappedBytes(unsigned short C) { return SwapByteOrder_16(C); }
+inline   signed short getSwappedBytes(  signed short C) { return SwapByteOrder_16(C); }
+
+inline unsigned int   getSwappedBytes(unsigned int   C) { return SwapByteOrder_32(C); }
+inline   signed int   getSwappedBytes(  signed int   C) { return SwapByteOrder_32(C); }
+
+#if __LONG_MAX__ == __INT_MAX__
+inline unsigned long  getSwappedBytes(unsigned long  C) { return SwapByteOrder_32(C); }
+inline   signed long  getSwappedBytes(  signed long  C) { return SwapByteOrder_32(C); }
+#elif __LONG_MAX__ == __LONG_LONG_MAX__
+inline unsigned long  getSwappedBytes(unsigned long  C) { return SwapByteOrder_64(C); }
+inline   signed long  getSwappedBytes(  signed long  C) { return SwapByteOrder_64(C); }
+#else
+#error "Unknown long size!"
+#endif
+
+inline unsigned long long getSwappedBytes(unsigned long long C) {
+  return SwapByteOrder_64(C);
+}
+inline signed long long getSwappedBytes(signed long long C) {
+  return SwapByteOrder_64(C);
+}
+
+inline float getSwappedBytes(float C) {
+  union {
+    uint32_t i;
+    float f;
+  } in, out;
+  in.f = C;
+  out.i = SwapByteOrder_32(in.i);
+  return out.f;
+}
+
+inline double getSwappedBytes(double C) {
+  union {
+    uint64_t i;
+    double d;
+  } in, out;
+  in.d = C;
+  out.i = SwapByteOrder_64(in.i);
+  return out.d;
+}
+
+template<typename T>
+inline void swapByteOrder(T &Value) {
+  Value = getSwappedBytes(Value);
+}
+
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/SystemUtils.h b/linux-x64/clang/include/llvm/Support/SystemUtils.h
new file mode 100644
index 0000000..2997b1b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/SystemUtils.h
@@ -0,0 +1,32 @@
+//===- SystemUtils.h - Utilities to do low-level system stuff ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions used to do a variety of low-level, often
+// system-specific, tasks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SYSTEMUTILS_H
+#define LLVM_SUPPORT_SYSTEMUTILS_H
+
+namespace llvm {
+  class raw_ostream;
+
+/// Determine if the raw_ostream provided is connected to a terminal. If so,
+/// generate a warning message to errs() advising against display of bitcode
+/// and return true. Otherwise just return false.
+/// @brief Check for output written to a console
+bool CheckBitcodeOutputToConsole(
+  raw_ostream &stream_to_check, ///< The stream to be checked
+  bool print_warning = true     ///< Control whether warnings are printed
+);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/TarWriter.h b/linux-x64/clang/include/llvm/Support/TarWriter.h
new file mode 100644
index 0000000..639f61b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TarWriter.h
@@ -0,0 +1,34 @@
+//===-- llvm/Support/TarWriter.h - Tar archive file creator -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TAR_WRITER_H
+#define LLVM_SUPPORT_TAR_WRITER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+class TarWriter {
+public:
+  static Expected<std::unique_ptr<TarWriter>> create(StringRef OutputPath,
+                                                     StringRef BaseDir);
+
+  void append(StringRef Path, StringRef Data);
+
+private:
+  TarWriter(int FD, StringRef BaseDir);
+  raw_fd_ostream OS;
+  std::string BaseDir;
+  StringSet<> Files;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/TargetOpcodes.def b/linux-x64/clang/include/llvm/Support/TargetOpcodes.def
new file mode 100644
index 0000000..0614a00
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TargetOpcodes.def
@@ -0,0 +1,466 @@
+//===-- llvm/Support/TargetOpcodes.def - Target Indep Opcodes ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target independent instruction opcodes.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+/// HANDLE_TARGET_OPCODE defines an opcode and its associated enum value.
+///
+#ifndef HANDLE_TARGET_OPCODE
+#define HANDLE_TARGET_OPCODE(OPC, NUM)
+#endif
+
+/// HANDLE_TARGET_OPCODE_MARKER defines an alternative identifier for an opcode.
+///
+#ifndef HANDLE_TARGET_OPCODE_MARKER
+#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC)
+#endif
+
+/// Every instruction defined here must also appear in Target.td.
+///
+HANDLE_TARGET_OPCODE(PHI)
+HANDLE_TARGET_OPCODE(INLINEASM)
+HANDLE_TARGET_OPCODE(CFI_INSTRUCTION)
+HANDLE_TARGET_OPCODE(EH_LABEL)
+HANDLE_TARGET_OPCODE(GC_LABEL)
+HANDLE_TARGET_OPCODE(ANNOTATION_LABEL)
+
+/// KILL - This instruction is a noop that is used only to adjust the
+/// liveness of registers. This can be useful when dealing with
+/// sub-registers.
+HANDLE_TARGET_OPCODE(KILL)
+
+/// EXTRACT_SUBREG - This instruction takes two operands: a register
+/// that has subregisters, and a subregister index. It returns the
+/// extracted subregister value. This is commonly used to implement
+/// truncation operations on target architectures which support it.
+HANDLE_TARGET_OPCODE(EXTRACT_SUBREG)
+
+/// INSERT_SUBREG - This instruction takes three operands: a register that
+/// has subregisters, a register providing an insert value, and a
+/// subregister index. It returns the value of the first register with the
+/// value of the second register inserted. The first register is often
+/// defined by an IMPLICIT_DEF, because it is commonly used to implement
+/// anyext operations on target architectures which support it.
+HANDLE_TARGET_OPCODE(INSERT_SUBREG)
+
+/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
+HANDLE_TARGET_OPCODE(IMPLICIT_DEF)
+
+/// SUBREG_TO_REG - Assert the value of bits in a super register.
+/// The result of this instruction is the value of the second operand inserted
+/// into the subregister specified by the third operand. All other bits are
+/// assumed to be equal to the bits in the immediate integer constant in the
+/// first operand. This instruction just communicates information; No code
+/// should be generated.
+/// This is typically used after an instruction where the write to a subregister
+/// implicitly cleared the bits in the super registers.
+HANDLE_TARGET_OPCODE(SUBREG_TO_REG)
+
+/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
+/// register-to-register copy into a specific register class. This is only
+/// used between instruction selection and MachineInstr creation, before
+/// virtual registers have been created for all the instructions, and it's
+/// only needed in cases where the register classes implied by the
+/// instructions are insufficient. It is emitted as a COPY MachineInstr.
+  HANDLE_TARGET_OPCODE(COPY_TO_REGCLASS)
+
+/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
+HANDLE_TARGET_OPCODE(DBG_VALUE)
+
+/// REG_SEQUENCE - This variadic instruction is used to form a register that
+/// represents a consecutive sequence of sub-registers. It's used as a
+/// register coalescing / allocation aid and must be eliminated before code
+/// emission.
+// In SDNode form, the first operand encodes the register class created by
+// the REG_SEQUENCE, while each subsequent pair names a vreg + subreg index
+// pair.  Once it has been lowered to a MachineInstr, the regclass operand
+// is no longer present.
+/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
+/// After register coalescing references of v1024 should be replace with
+/// v1027:3, v1025 with v1027:4, etc.
+  HANDLE_TARGET_OPCODE(REG_SEQUENCE)
+
+/// COPY - Target-independent register copy. This instruction can also be
+/// used to copy between subregisters of virtual registers.
+  HANDLE_TARGET_OPCODE(COPY)
+
+/// BUNDLE - This instruction represents an instruction bundle. Instructions
+/// which immediately follow a BUNDLE instruction which are marked with
+/// 'InsideBundle' flag are inside the bundle.
+HANDLE_TARGET_OPCODE(BUNDLE)
+
+/// Lifetime markers.
+HANDLE_TARGET_OPCODE(LIFETIME_START)
+HANDLE_TARGET_OPCODE(LIFETIME_END)
+
+/// A Stackmap instruction captures the location of live variables at its
+/// position in the instruction stream. It is followed by a shadow of bytes
+/// that must lie within the function and not contain another stackmap.
+HANDLE_TARGET_OPCODE(STACKMAP)
+
+/// FEntry all - This is a marker instruction which gets translated into a raw fentry call.
+HANDLE_TARGET_OPCODE(FENTRY_CALL)
+
+/// Patchable call instruction - this instruction represents a call to a
+/// constant address, followed by a series of NOPs. It is intended to
+/// support optimizations for dynamic languages (such as javascript) that
+/// rewrite calls to runtimes with more efficient code sequences.
+/// This also implies a stack map.
+HANDLE_TARGET_OPCODE(PATCHPOINT)
+
+/// This pseudo-instruction loads the stack guard value. Targets which need
+/// to prevent the stack guard value or address from being spilled to the
+/// stack should override TargetLowering::emitLoadStackGuardNode and
+/// additionally expand this pseudo after register allocation.
+HANDLE_TARGET_OPCODE(LOAD_STACK_GUARD)
+
+/// Call instruction with associated vm state for deoptimization and list
+/// of live pointers for relocation by the garbage collector.  It is
+/// intended to support garbage collection with fully precise relocating
+/// collectors and deoptimizations in either the callee or caller.
+HANDLE_TARGET_OPCODE(STATEPOINT)
+
+/// Instruction that records the offset of a local stack allocation passed to
+/// llvm.localescape. It has two arguments: the symbol for the label and the
+/// frame index of the local stack allocation.
+HANDLE_TARGET_OPCODE(LOCAL_ESCAPE)
+
+/// Wraps a machine instruction which can fault, bundled with associated
+/// information on how to handle such a fault.
+/// For example loading instruction that may page fault, bundled with associated
+/// information on how to handle such a page fault.  It is intended to support
+/// "zero cost" null checks in managed languages by allowing LLVM to fold
+/// comparisons into existing memory operations.
+HANDLE_TARGET_OPCODE(FAULTING_OP)
+
+/// Wraps a machine instruction to add patchability constraints.  An
+/// instruction wrapped in PATCHABLE_OP has to either have a minimum
+/// size or be preceded with a nop of that size.  The first operand is
+/// an immediate denoting the minimum size of the instruction, the
+/// second operand is an immediate denoting the opcode of the original
+/// instruction.  The rest of the operands are the operands of the
+/// original instruction.
+HANDLE_TARGET_OPCODE(PATCHABLE_OP)
+
+/// This is a marker instruction which gets translated into a nop sled, useful
+/// for inserting instrumentation instructions at runtime.
+HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_ENTER)
+
+/// Wraps a return instruction and its operands to enable adding nop sleds
+/// either before or after the return. The nop sleds are useful for inserting
+/// instrumentation instructions at runtime.
+/// The patch here replaces the return instruction.
+HANDLE_TARGET_OPCODE(PATCHABLE_RET)
+
+/// This is a marker instruction which gets translated into a nop sled, useful
+/// for inserting instrumentation instructions at runtime.
+/// The patch here prepends the return instruction.
+/// The same thing as in x86_64 is not possible for ARM because it has multiple
+/// return instructions. Furthermore, CPU allows parametrized and even
+/// conditional return instructions. In the current ARM implementation we are
+/// making use of the fact that currently LLVM doesn't seem to generate
+/// conditional return instructions.
+/// On ARM, the same instruction can be used for popping multiple registers
+/// from the stack and returning (it just pops pc register too), and LLVM
+/// generates it sometimes. So we can't insert the sled between this stack
+/// adjustment and the return without splitting the original instruction into 2
+/// instructions. So on ARM, rather than jumping into the exit trampoline, we
+/// call it, it does the tracing, preserves the stack and returns.
+HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_EXIT)
+
+/// Wraps a tail call instruction and its operands to enable adding nop sleds
+/// either before or after the tail exit. We use this as a disambiguation from
+/// PATCHABLE_RET which specifically only works for return instructions.
+HANDLE_TARGET_OPCODE(PATCHABLE_TAIL_CALL)
+
+/// Wraps a logging call and its arguments with nop sleds. At runtime, this can be
+/// patched to insert instrumentation instructions.
+HANDLE_TARGET_OPCODE(PATCHABLE_EVENT_CALL)
+
+HANDLE_TARGET_OPCODE(ICALL_BRANCH_FUNNEL)
+
+/// The following generic opcodes are not supposed to appear after ISel.
+/// This is something we might want to relax, but for now, this is convenient
+/// to produce diagnostics.
+
+/// Generic ADD instruction. This is an integer add.
+HANDLE_TARGET_OPCODE(G_ADD)
+HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_START, G_ADD)
+
+/// Generic SUB instruction. This is an integer sub.
+HANDLE_TARGET_OPCODE(G_SUB)
+
+// Generic multiply instruction.
+HANDLE_TARGET_OPCODE(G_MUL)
+
+// Generic signed division instruction.
+HANDLE_TARGET_OPCODE(G_SDIV)
+
+// Generic unsigned division instruction.
+HANDLE_TARGET_OPCODE(G_UDIV)
+
+// Generic signed remainder instruction.
+HANDLE_TARGET_OPCODE(G_SREM)
+
+// Generic unsigned remainder instruction.
+HANDLE_TARGET_OPCODE(G_UREM)
+
+/// Generic bitwise and instruction.
+HANDLE_TARGET_OPCODE(G_AND)
+
+/// Generic bitwise or instruction.
+HANDLE_TARGET_OPCODE(G_OR)
+
+/// Generic bitwise exclusive-or instruction.
+HANDLE_TARGET_OPCODE(G_XOR)
+
+
+HANDLE_TARGET_OPCODE(G_IMPLICIT_DEF)
+
+/// Generic PHI instruction with types.
+HANDLE_TARGET_OPCODE(G_PHI)
+
+/// Generic instruction to materialize the address of an alloca or other
+/// stack-based object.
+HANDLE_TARGET_OPCODE(G_FRAME_INDEX)
+
+/// Generic reference to global value.
+HANDLE_TARGET_OPCODE(G_GLOBAL_VALUE)
+
+/// Generic instruction to extract blocks of bits from the register given
+/// (typically a sub-register COPY after instruction selection).
+HANDLE_TARGET_OPCODE(G_EXTRACT)
+
+HANDLE_TARGET_OPCODE(G_UNMERGE_VALUES)
+
+/// Generic instruction to insert blocks of bits from the registers given into
+/// the source.
+HANDLE_TARGET_OPCODE(G_INSERT)
+
+/// Generic instruction to paste a variable number of components together into a
+/// larger register.
+HANDLE_TARGET_OPCODE(G_MERGE_VALUES)
+
+/// Generic pointer to int conversion.
+HANDLE_TARGET_OPCODE(G_PTRTOINT)
+
+/// Generic int to pointer conversion.
+HANDLE_TARGET_OPCODE(G_INTTOPTR)
+
+/// Generic bitcast. The source and destination types must be different, or a
+/// COPY is the relevant instruction.
+HANDLE_TARGET_OPCODE(G_BITCAST)
+
+/// Generic load.
+HANDLE_TARGET_OPCODE(G_LOAD)
+
+/// Generic store.
+HANDLE_TARGET_OPCODE(G_STORE)
+
+/// Generic atomic cmpxchg with internal success check.
+HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
+
+/// Generic atomic cmpxchg.
+HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG)
+
+/// Generic atomicrmw.
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_XCHG)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_ADD)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_SUB)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_AND)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_NAND)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_OR)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_XOR)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_MAX)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_MIN)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_UMAX)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_UMIN)
+
+/// Generic conditional branch instruction.
+HANDLE_TARGET_OPCODE(G_BRCOND)
+
+/// Generic indirect branch instruction.
+HANDLE_TARGET_OPCODE(G_BRINDIRECT)
+
+/// Generic intrinsic use (without side effects).
+HANDLE_TARGET_OPCODE(G_INTRINSIC)
+
+/// Generic intrinsic use (with side effects).
+HANDLE_TARGET_OPCODE(G_INTRINSIC_W_SIDE_EFFECTS)
+
+/// Generic extension allowing rubbish in high bits.
+HANDLE_TARGET_OPCODE(G_ANYEXT)
+
+/// Generic instruction to discard the high bits of a register. This differs
+/// from (G_EXTRACT val, 0) on its action on vectors: G_TRUNC will truncate
+/// each element individually, G_EXTRACT will typically discard the high
+/// elements of the vector.
+HANDLE_TARGET_OPCODE(G_TRUNC)
+
+/// Generic integer constant.
+HANDLE_TARGET_OPCODE(G_CONSTANT)
+
+/// Generic floating constant.
+HANDLE_TARGET_OPCODE(G_FCONSTANT)
+
+/// Generic va_start instruction. Stores to its one pointer operand.
+HANDLE_TARGET_OPCODE(G_VASTART)
+
+/// Generic va_start instruction. Stores to its one pointer operand.
+HANDLE_TARGET_OPCODE(G_VAARG)
+
+// Generic sign extend
+HANDLE_TARGET_OPCODE(G_SEXT)
+
+// Generic zero extend
+HANDLE_TARGET_OPCODE(G_ZEXT)
+
+// Generic left-shift
+HANDLE_TARGET_OPCODE(G_SHL)
+
+// Generic logical right-shift
+HANDLE_TARGET_OPCODE(G_LSHR)
+
+// Generic arithmetic right-shift
+HANDLE_TARGET_OPCODE(G_ASHR)
+
+/// Generic integer-base comparison, also applicable to vectors of integers.
+HANDLE_TARGET_OPCODE(G_ICMP)
+
+/// Generic floating-point comparison, also applicable to vectors.
+HANDLE_TARGET_OPCODE(G_FCMP)
+
+/// Generic select.
+HANDLE_TARGET_OPCODE(G_SELECT)
+
+/// Generic unsigned add instruction, consuming the normal operands plus a carry
+/// flag, and similarly producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_UADDE)
+
+/// Generic unsigned subtract instruction, consuming the normal operands plus a
+/// carry flag, and similarly producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_USUBE)
+
+/// Generic signed add instruction, producing the result and a signed overflow
+/// flag.
+HANDLE_TARGET_OPCODE(G_SADDO)
+
+/// Generic signed subtract instruction, producing the result and a signed
+/// overflow flag.
+HANDLE_TARGET_OPCODE(G_SSUBO)
+
+/// Generic unsigned multiply instruction, producing the result and a signed
+/// overflow flag.
+HANDLE_TARGET_OPCODE(G_UMULO)
+
+/// Generic signed multiply instruction, producing the result and a signed
+/// overflow flag.
+HANDLE_TARGET_OPCODE(G_SMULO)
+
+// Multiply two numbers at twice the incoming bit width (unsigned) and return
+// the high half of the result.
+HANDLE_TARGET_OPCODE(G_UMULH)
+
+// Multiply two numbers at twice the incoming bit width (signed) and return
+// the high half of the result.
+HANDLE_TARGET_OPCODE(G_SMULH)
+
+/// Generic FP addition.
+HANDLE_TARGET_OPCODE(G_FADD)
+
+/// Generic FP subtraction.
+HANDLE_TARGET_OPCODE(G_FSUB)
+
+/// Generic FP multiplication.
+HANDLE_TARGET_OPCODE(G_FMUL)
+
+/// Generic FMA multiplication. Behaves like llvm fma intrinsic
+HANDLE_TARGET_OPCODE(G_FMA)
+
+/// Generic FP division.
+HANDLE_TARGET_OPCODE(G_FDIV)
+
+/// Generic FP remainder.
+HANDLE_TARGET_OPCODE(G_FREM)
+
+/// Generic FP exponentiation.
+HANDLE_TARGET_OPCODE(G_FPOW)
+
+/// Generic base-e exponential of a value.
+HANDLE_TARGET_OPCODE(G_FEXP)
+
+/// Generic base-2 exponential of a value.
+HANDLE_TARGET_OPCODE(G_FEXP2)
+
+/// Floating point base-e logarithm of a value.
+HANDLE_TARGET_OPCODE(G_FLOG)
+
+/// Floating point base-2 logarithm of a value.
+HANDLE_TARGET_OPCODE(G_FLOG2)
+
+/// Generic FP negation.
+HANDLE_TARGET_OPCODE(G_FNEG)
+
+/// Generic FP extension.
+HANDLE_TARGET_OPCODE(G_FPEXT)
+
+/// Generic float to signed-int conversion
+HANDLE_TARGET_OPCODE(G_FPTRUNC)
+
+/// Generic float to signed-int conversion
+HANDLE_TARGET_OPCODE(G_FPTOSI)
+
+/// Generic float to unsigned-int conversion
+HANDLE_TARGET_OPCODE(G_FPTOUI)
+
+/// Generic signed-int to float conversion
+HANDLE_TARGET_OPCODE(G_SITOFP)
+
+/// Generic unsigned-int to float conversion
+HANDLE_TARGET_OPCODE(G_UITOFP)
+
+/// Generic FP absolute value.
+HANDLE_TARGET_OPCODE(G_FABS)
+
+/// Generic pointer offset
+HANDLE_TARGET_OPCODE(G_GEP)
+
+/// Clear the specified number of low bits in a pointer. This rounds the value
+/// *down* to the given alignment.
+HANDLE_TARGET_OPCODE(G_PTR_MASK)
+
+/// Generic BRANCH instruction. This is an unconditional branch.
+HANDLE_TARGET_OPCODE(G_BR)
+
+/// Generic insertelement.
+HANDLE_TARGET_OPCODE(G_INSERT_VECTOR_ELT)
+
+/// Generic extractelement.
+HANDLE_TARGET_OPCODE(G_EXTRACT_VECTOR_ELT)
+
+/// Generic shufflevector.
+HANDLE_TARGET_OPCODE(G_SHUFFLE_VECTOR)
+
+/// Generic byte swap.
+HANDLE_TARGET_OPCODE(G_BSWAP)
+
+// TODO: Add more generic opcodes as we move along.
+
+/// Marker for the end of the generic opcode.
+/// This is used to check if an opcode is in the range of the
+/// generic opcodes.
+HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_END, G_BSWAP)
+
+/// BUILTIN_OP_END - This must be the last enum value in this list.
+/// The target-specific post-isel opcode values start here.
+HANDLE_TARGET_OPCODE_MARKER(GENERIC_OP_END, PRE_ISEL_GENERIC_OPCODE_END)
diff --git a/linux-x64/clang/include/llvm/Support/TargetParser.h b/linux-x64/clang/include/llvm/Support/TargetParser.h
new file mode 100644
index 0000000..8fba995
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TargetParser.h
@@ -0,0 +1,262 @@
+//===-- TargetParser - Parser for target features ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a target parser to recognise hardware features such as
+// FPU/CPU/ARCH names as well as specific support such as HDIV, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TARGETPARSER_H
+#define LLVM_SUPPORT_TARGETPARSER_H
+
+// FIXME: vector is used because that's what clang uses for subtarget feature
+// lists, but SmallVector would probably be better
+#include "llvm/ADT/Triple.h"
+#include <vector>
+
+namespace llvm {
+class StringRef;
+
+// Target specific information into their own namespaces. These should be
+// generated from TableGen because the information is already there, and there
+// is where new information about targets will be added.
+// FIXME: To TableGen this we need to make some table generated files available
+// even if the back-end is not compiled with LLVM, plus we need to create a new
+// back-end to TableGen to create these clean tables.
+namespace ARM {
+
+// FPU Version
+enum class FPUVersion {
+  NONE,
+  VFPV2,
+  VFPV3,
+  VFPV3_FP16,
+  VFPV4,
+  VFPV5
+};
+
+// An FPU name restricts the FPU in one of three ways:
+enum class FPURestriction {
+  None = 0, ///< No restriction
+  D16,      ///< Only 16 D registers
+  SP_D16    ///< Only single-precision instructions, with 16 D registers
+};
+
+// An FPU name implies one of three levels of Neon support:
+enum class NeonSupportLevel {
+  None = 0, ///< No Neon
+  Neon,     ///< Neon
+  Crypto    ///< Neon with Crypto
+};
+
+// FPU names.
+enum FPUKind {
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) KIND,
+#include "ARMTargetParser.def"
+  FK_LAST
+};
+
+// Arch names.
+enum class ArchKind {
+#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
+#include "ARMTargetParser.def"
+};
+
+// Arch extension modifiers for CPUs.
+enum ArchExtKind : unsigned {
+  AEK_INVALID =     0,
+  AEK_NONE =        1,
+  AEK_CRC =         1 << 1,
+  AEK_CRYPTO =      1 << 2,
+  AEK_FP =          1 << 3,
+  AEK_HWDIVTHUMB =  1 << 4,
+  AEK_HWDIVARM =    1 << 5,
+  AEK_MP =          1 << 6,
+  AEK_SIMD =        1 << 7,
+  AEK_SEC =         1 << 8,
+  AEK_VIRT =        1 << 9,
+  AEK_DSP =         1 << 10,
+  AEK_FP16 =        1 << 11,
+  AEK_RAS =         1 << 12,
+  AEK_SVE =         1 << 13,
+  AEK_DOTPROD =     1 << 14,
+  // Unsupported extensions.
+  AEK_OS = 0x8000000,
+  AEK_IWMMXT = 0x10000000,
+  AEK_IWMMXT2 = 0x20000000,
+  AEK_MAVERICK = 0x40000000,
+  AEK_XSCALE = 0x80000000,
+};
+
+// ISA kinds.
+enum class ISAKind { INVALID = 0, ARM, THUMB, AARCH64 };
+
+// Endianness
+// FIXME: BE8 vs. BE32?
+enum class EndianKind { INVALID = 0, LITTLE, BIG };
+
+// v6/v7/v8 Profile
+enum class ProfileKind { INVALID = 0, A, R, M };
+
+StringRef getCanonicalArchName(StringRef Arch);
+
+// Information by ID
+StringRef getFPUName(unsigned FPUKind);
+FPUVersion getFPUVersion(unsigned FPUKind);
+NeonSupportLevel getFPUNeonSupportLevel(unsigned FPUKind);
+FPURestriction getFPURestriction(unsigned FPUKind);
+
+// FIXME: These should be moved to TargetTuple once it exists
+bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
+bool getHWDivFeatures(unsigned HWDivKind, std::vector<StringRef> &Features);
+bool getExtensionFeatures(unsigned Extensions,
+                          std::vector<StringRef> &Features);
+
+StringRef getArchName(ArchKind AK);
+unsigned getArchAttr(ArchKind AK);
+StringRef getCPUAttr(ArchKind AK);
+StringRef getSubArch(ArchKind AK);
+StringRef getArchExtName(unsigned ArchExtKind);
+StringRef getArchExtFeature(StringRef ArchExt);
+StringRef getHWDivName(unsigned HWDivKind);
+
+// Information by Name
+unsigned  getDefaultFPU(StringRef CPU, ArchKind AK);
+unsigned  getDefaultExtensions(StringRef CPU, ArchKind AK);
+StringRef getDefaultCPU(StringRef Arch);
+
+// Parser
+unsigned parseHWDiv(StringRef HWDiv);
+unsigned parseFPU(StringRef FPU);
+ArchKind parseArch(StringRef Arch);
+unsigned parseArchExt(StringRef ArchExt);
+ArchKind parseCPUArch(StringRef CPU);
+void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
+ISAKind parseArchISA(StringRef Arch);
+EndianKind parseArchEndian(StringRef Arch);
+ProfileKind parseArchProfile(StringRef Arch);
+unsigned parseArchVersion(StringRef Arch);
+
+StringRef computeDefaultTargetABI(const Triple &TT, StringRef CPU);
+
+} // namespace ARM
+
+// FIXME:This should be made into class design,to avoid dupplication.
+namespace AArch64 {
+
+// Arch names.
+enum class ArchKind {
+#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
+#include "AArch64TargetParser.def"
+};
+
+// Arch extension modifiers for CPUs.
+enum ArchExtKind : unsigned {
+  AEK_INVALID =     0,
+  AEK_NONE =        1,
+  AEK_CRC =         1 << 1,
+  AEK_CRYPTO =      1 << 2,
+  AEK_FP =          1 << 3,
+  AEK_SIMD =        1 << 4,
+  AEK_FP16 =        1 << 5,
+  AEK_PROFILE =     1 << 6,
+  AEK_RAS =         1 << 7,
+  AEK_LSE =         1 << 8,
+  AEK_SVE =         1 << 9,
+  AEK_DOTPROD =     1 << 10,
+  AEK_RCPC =        1 << 11,
+  AEK_RDM =         1 << 12
+};
+
+StringRef getCanonicalArchName(StringRef Arch);
+
+// Information by ID
+StringRef getFPUName(unsigned FPUKind);
+ARM::FPUVersion getFPUVersion(unsigned FPUKind);
+ARM::NeonSupportLevel getFPUNeonSupportLevel(unsigned FPUKind);
+ARM::FPURestriction getFPURestriction(unsigned FPUKind);
+
+// FIXME: These should be moved to TargetTuple once it exists
+bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
+bool getExtensionFeatures(unsigned Extensions,
+                                   std::vector<StringRef> &Features);
+bool getArchFeatures(ArchKind AK, std::vector<StringRef> &Features);
+
+StringRef getArchName(ArchKind AK);
+unsigned getArchAttr(ArchKind AK);
+StringRef getCPUAttr(ArchKind AK);
+StringRef getSubArch(ArchKind AK);
+StringRef getArchExtName(unsigned ArchExtKind);
+StringRef getArchExtFeature(StringRef ArchExt);
+unsigned checkArchVersion(StringRef Arch);
+
+// Information by Name
+unsigned  getDefaultFPU(StringRef CPU, ArchKind AK);
+unsigned  getDefaultExtensions(StringRef CPU, ArchKind AK);
+StringRef getDefaultCPU(StringRef Arch);
+
+// Parser
+unsigned parseFPU(StringRef FPU);
+AArch64::ArchKind parseArch(StringRef Arch);
+ArchExtKind parseArchExt(StringRef ArchExt);
+ArchKind parseCPUArch(StringRef CPU);
+void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
+ARM::ISAKind parseArchISA(StringRef Arch);
+ARM::EndianKind parseArchEndian(StringRef Arch);
+ARM::ProfileKind parseArchProfile(StringRef Arch);
+unsigned parseArchVersion(StringRef Arch);
+
+} // namespace AArch64
+
+namespace X86 {
+
+// This should be kept in sync with libcc/compiler-rt as its included by clang
+// as a proxy for what's in libgcc/compiler-rt.
+enum ProcessorVendors : unsigned {
+  VENDOR_DUMMY,
+#define X86_VENDOR(ENUM, STRING) \
+  ENUM,
+#include "llvm/Support/X86TargetParser.def"
+  VENDOR_OTHER
+};
+
+// This should be kept in sync with libcc/compiler-rt as its included by clang
+// as a proxy for what's in libgcc/compiler-rt.
+enum ProcessorTypes : unsigned {
+  CPU_TYPE_DUMMY,
+#define X86_CPU_TYPE(ARCHNAME, ENUM) \
+  ENUM,
+#include "llvm/Support/X86TargetParser.def"
+  CPU_TYPE_MAX
+};
+
+// This should be kept in sync with libcc/compiler-rt as its included by clang
+// as a proxy for what's in libgcc/compiler-rt.
+enum ProcessorSubtypes : unsigned {
+  CPU_SUBTYPE_DUMMY,
+#define X86_CPU_SUBTYPE(ARCHNAME, ENUM) \
+  ENUM,
+#include "llvm/Support/X86TargetParser.def"
+  CPU_SUBTYPE_MAX
+};
+
+// This should be kept in sync with libcc/compiler-rt as it should be used
+// by clang as a proxy for what's in libgcc/compiler-rt.
+enum ProcessorFeatures {
+#define X86_FEATURE(VAL, ENUM) \
+  ENUM = VAL,
+#include "llvm/Support/X86TargetParser.def"
+
+};
+
+} // namespace X86
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/TargetRegistry.h b/linux-x64/clang/include/llvm/Support/TargetRegistry.h
new file mode 100644
index 0000000..0fc8c38
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TargetRegistry.h
@@ -0,0 +1,1177 @@
+//===- Support/TargetRegistry.h - Target Registration -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes the TargetRegistry interface, which tools can use to access
+// the appropriate target specific classes (TargetMachine, AsmPrinter, etc.)
+// which have been registered.
+//
+// Target specific class implementations should register themselves using the
+// appropriate TargetRegistry interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TARGETREGISTRY_H
+#define LLVM_SUPPORT_TARGETREGISTRY_H
+
+#include "llvm-c/DisassemblerTypes.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCAsmBackend;
+class MCAsmInfo;
+class MCAsmParser;
+class MCCodeEmitter;
+class MCContext;
+class MCDisassembler;
+class MCInstPrinter;
+class MCInstrAnalysis;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCRelocationInfo;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCSymbolizer;
+class MCTargetAsmParser;
+class MCTargetOptions;
+class MCTargetStreamer;
+class raw_ostream;
+class raw_pwrite_stream;
+class TargetMachine;
+class TargetOptions;
+
+MCStreamer *createNullStreamer(MCContext &Ctx);
+MCStreamer *createAsmStreamer(MCContext &Ctx,
+                              std::unique_ptr<formatted_raw_ostream> OS,
+                              bool isVerboseAsm, bool useDwarfDirectory,
+                              MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+                              MCAsmBackend *TAB, bool ShowInst);
+
+/// Takes ownership of \p TAB and \p CE.
+MCStreamer *createELFStreamer(MCContext &Ctx,
+                              std::unique_ptr<MCAsmBackend> &&TAB,
+                              raw_pwrite_stream &OS,
+                              std::unique_ptr<MCCodeEmitter> &&CE,
+                              bool RelaxAll);
+MCStreamer *createMachOStreamer(MCContext &Ctx,
+                                std::unique_ptr<MCAsmBackend> &&TAB,
+                                raw_pwrite_stream &OS,
+                                std::unique_ptr<MCCodeEmitter> &&CE,
+                                bool RelaxAll, bool DWARFMustBeAtTheEnd,
+                                bool LabelSections = false);
+MCStreamer *createWasmStreamer(MCContext &Ctx,
+                               std::unique_ptr<MCAsmBackend> &&TAB,
+                               raw_pwrite_stream &OS,
+                               std::unique_ptr<MCCodeEmitter> &&CE,
+                               bool RelaxAll);
+
+MCRelocationInfo *createMCRelocationInfo(const Triple &TT, MCContext &Ctx);
+
+MCSymbolizer *createMCSymbolizer(const Triple &TT, LLVMOpInfoCallback GetOpInfo,
+                                 LLVMSymbolLookupCallback SymbolLookUp,
+                                 void *DisInfo, MCContext *Ctx,
+                                 std::unique_ptr<MCRelocationInfo> &&RelInfo);
+
+/// Target - Wrapper for Target specific information.
+///
+/// For registration purposes, this is a POD type so that targets can be
+/// registered without the use of static constructors.
+///
+/// Targets should implement a single global instance of this class (which
+/// will be zero initialized), and pass that instance to the TargetRegistry as
+/// part of their initialization.
+class Target {
+public:
+  friend struct TargetRegistry;
+
+  using ArchMatchFnTy = bool (*)(Triple::ArchType Arch);
+
+  using MCAsmInfoCtorFnTy = MCAsmInfo *(*)(const MCRegisterInfo &MRI,
+                                           const Triple &TT);
+  using MCInstrInfoCtorFnTy = MCInstrInfo *(*)();
+  using MCInstrAnalysisCtorFnTy = MCInstrAnalysis *(*)(const MCInstrInfo *Info);
+  using MCRegInfoCtorFnTy = MCRegisterInfo *(*)(const Triple &TT);
+  using MCSubtargetInfoCtorFnTy = MCSubtargetInfo *(*)(const Triple &TT,
+                                                       StringRef CPU,
+                                                       StringRef Features);
+  using TargetMachineCtorTy = TargetMachine
+      *(*)(const Target &T, const Triple &TT, StringRef CPU, StringRef Features,
+           const TargetOptions &Options, Optional<Reloc::Model> RM,
+           Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT);
+  // If it weren't for layering issues (this header is in llvm/Support, but
+  // depends on MC?) this should take the Streamer by value rather than rvalue
+  // reference.
+  using AsmPrinterCtorTy = AsmPrinter *(*)(
+      TargetMachine &TM, std::unique_ptr<MCStreamer> &&Streamer);
+  using MCAsmBackendCtorTy = MCAsmBackend *(*)(const Target &T,
+                                               const MCSubtargetInfo &STI,
+                                               const MCRegisterInfo &MRI,
+                                               const MCTargetOptions &Options);
+  using MCAsmParserCtorTy = MCTargetAsmParser *(*)(
+      const MCSubtargetInfo &STI, MCAsmParser &P, const MCInstrInfo &MII,
+      const MCTargetOptions &Options);
+  using MCDisassemblerCtorTy = MCDisassembler *(*)(const Target &T,
+                                                   const MCSubtargetInfo &STI,
+                                                   MCContext &Ctx);
+  using MCInstPrinterCtorTy = MCInstPrinter *(*)(const Triple &T,
+                                                 unsigned SyntaxVariant,
+                                                 const MCAsmInfo &MAI,
+                                                 const MCInstrInfo &MII,
+                                                 const MCRegisterInfo &MRI);
+  using MCCodeEmitterCtorTy = MCCodeEmitter *(*)(const MCInstrInfo &II,
+                                                 const MCRegisterInfo &MRI,
+                                                 MCContext &Ctx);
+  using ELFStreamerCtorTy =
+      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
+                      std::unique_ptr<MCAsmBackend> &&TAB,
+                      raw_pwrite_stream &OS,
+                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);
+  using MachOStreamerCtorTy =
+      MCStreamer *(*)(MCContext &Ctx, std::unique_ptr<MCAsmBackend> &&TAB,
+                      raw_pwrite_stream &OS,
+                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll,
+                      bool DWARFMustBeAtTheEnd);
+  using COFFStreamerCtorTy =
+      MCStreamer *(*)(MCContext &Ctx, std::unique_ptr<MCAsmBackend> &&TAB,
+                      raw_pwrite_stream &OS,
+                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll,
+                      bool IncrementalLinkerCompatible);
+  using WasmStreamerCtorTy =
+      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
+                      std::unique_ptr<MCAsmBackend> &&TAB,
+                      raw_pwrite_stream &OS,
+                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);
+  using NullTargetStreamerCtorTy = MCTargetStreamer *(*)(MCStreamer &S);
+  using AsmTargetStreamerCtorTy = MCTargetStreamer *(*)(
+      MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrint,
+      bool IsVerboseAsm);
+  using ObjectTargetStreamerCtorTy = MCTargetStreamer *(*)(
+      MCStreamer &S, const MCSubtargetInfo &STI);
+  using MCRelocationInfoCtorTy = MCRelocationInfo *(*)(const Triple &TT,
+                                                       MCContext &Ctx);
+  using MCSymbolizerCtorTy = MCSymbolizer *(*)(
+      const Triple &TT, LLVMOpInfoCallback GetOpInfo,
+      LLVMSymbolLookupCallback SymbolLookUp, void *DisInfo, MCContext *Ctx,
+      std::unique_ptr<MCRelocationInfo> &&RelInfo);
+
+private:
+  /// Next - The next registered target in the linked list, maintained by the
+  /// TargetRegistry.
+  Target *Next;
+
+  /// The target function for checking if an architecture is supported.
+  ArchMatchFnTy ArchMatchFn;
+
+  /// Name - The target name.
+  const char *Name;
+
+  /// ShortDesc - A short description of the target.
+  const char *ShortDesc;
+
+  /// BackendName - The name of the backend implementation. This must match the
+  /// name of the 'def X : Target ...' in TableGen.
+  const char *BackendName;
+
+  /// HasJIT - Whether this target supports the JIT.
+  bool HasJIT;
+
+  /// MCAsmInfoCtorFn - Constructor function for this target's MCAsmInfo, if
+  /// registered.
+  MCAsmInfoCtorFnTy MCAsmInfoCtorFn;
+
+  /// MCInstrInfoCtorFn - Constructor function for this target's MCInstrInfo,
+  /// if registered.
+  MCInstrInfoCtorFnTy MCInstrInfoCtorFn;
+
+  /// MCInstrAnalysisCtorFn - Constructor function for this target's
+  /// MCInstrAnalysis, if registered.
+  MCInstrAnalysisCtorFnTy MCInstrAnalysisCtorFn;
+
+  /// MCRegInfoCtorFn - Constructor function for this target's MCRegisterInfo,
+  /// if registered.
+  MCRegInfoCtorFnTy MCRegInfoCtorFn;
+
+  /// MCSubtargetInfoCtorFn - Constructor function for this target's
+  /// MCSubtargetInfo, if registered.
+  MCSubtargetInfoCtorFnTy MCSubtargetInfoCtorFn;
+
+  /// TargetMachineCtorFn - Construction function for this target's
+  /// TargetMachine, if registered.
+  TargetMachineCtorTy TargetMachineCtorFn;
+
+  /// MCAsmBackendCtorFn - Construction function for this target's
+  /// MCAsmBackend, if registered.
+  MCAsmBackendCtorTy MCAsmBackendCtorFn;
+
+  /// MCAsmParserCtorFn - Construction function for this target's
+  /// MCTargetAsmParser, if registered.
+  MCAsmParserCtorTy MCAsmParserCtorFn;
+
+  /// AsmPrinterCtorFn - Construction function for this target's AsmPrinter,
+  /// if registered.
+  AsmPrinterCtorTy AsmPrinterCtorFn;
+
+  /// MCDisassemblerCtorFn - Construction function for this target's
+  /// MCDisassembler, if registered.
+  MCDisassemblerCtorTy MCDisassemblerCtorFn;
+
+  /// MCInstPrinterCtorFn - Construction function for this target's
+  /// MCInstPrinter, if registered.
+  MCInstPrinterCtorTy MCInstPrinterCtorFn;
+
+  /// MCCodeEmitterCtorFn - Construction function for this target's
+  /// CodeEmitter, if registered.
+  MCCodeEmitterCtorTy MCCodeEmitterCtorFn;
+
+  // Construction functions for the various object formats, if registered.
+  COFFStreamerCtorTy COFFStreamerCtorFn = nullptr;
+  MachOStreamerCtorTy MachOStreamerCtorFn = nullptr;
+  ELFStreamerCtorTy ELFStreamerCtorFn = nullptr;
+  WasmStreamerCtorTy WasmStreamerCtorFn = nullptr;
+
+  /// Construction function for this target's null TargetStreamer, if
+  /// registered (default = nullptr).
+  NullTargetStreamerCtorTy NullTargetStreamerCtorFn = nullptr;
+
+  /// Construction function for this target's asm TargetStreamer, if
+  /// registered (default = nullptr).
+  AsmTargetStreamerCtorTy AsmTargetStreamerCtorFn = nullptr;
+
+  /// Construction function for this target's obj TargetStreamer, if
+  /// registered (default = nullptr).
+  ObjectTargetStreamerCtorTy ObjectTargetStreamerCtorFn = nullptr;
+
+  /// MCRelocationInfoCtorFn - Construction function for this target's
+  /// MCRelocationInfo, if registered (default = llvm::createMCRelocationInfo)
+  MCRelocationInfoCtorTy MCRelocationInfoCtorFn = nullptr;
+
+  /// MCSymbolizerCtorFn - Construction function for this target's
+  /// MCSymbolizer, if registered (default = llvm::createMCSymbolizer)
+  MCSymbolizerCtorTy MCSymbolizerCtorFn = nullptr;
+
+public:
+  Target() = default;
+
+  /// @name Target Information
+  /// @{
+
+  // getNext - Return the next registered target.
+  const Target *getNext() const { return Next; }
+
+  /// getName - Get the target name.
+  const char *getName() const { return Name; }
+
+  /// getShortDescription - Get a short description of the target.
+  const char *getShortDescription() const { return ShortDesc; }
+
+  /// getBackendName - Get the backend name.
+  const char *getBackendName() const { return BackendName; }
+
+  /// @}
+  /// @name Feature Predicates
+  /// @{
+
+  /// hasJIT - Check if this targets supports the just-in-time compilation.
+  bool hasJIT() const { return HasJIT; }
+
+  /// hasTargetMachine - Check if this target supports code generation.
+  bool hasTargetMachine() const { return TargetMachineCtorFn != nullptr; }
+
+  /// hasMCAsmBackend - Check if this target supports .o generation.
+  bool hasMCAsmBackend() const { return MCAsmBackendCtorFn != nullptr; }
+
+  /// hasMCAsmParser - Check if this target supports assembly parsing.
+  bool hasMCAsmParser() const { return MCAsmParserCtorFn != nullptr; }
+
+  /// @}
+  /// @name Feature Constructors
+  /// @{
+
+  /// createMCAsmInfo - Create a MCAsmInfo implementation for the specified
+  /// target triple.
+  ///
+  /// \param TheTriple This argument is used to determine the target machine
+  /// feature set; it should always be provided. Generally this should be
+  /// either the target triple from the module, or the target triple of the
+  /// host if that does not exist.
+  MCAsmInfo *createMCAsmInfo(const MCRegisterInfo &MRI,
+                             StringRef TheTriple) const {
+    if (!MCAsmInfoCtorFn)
+      return nullptr;
+    return MCAsmInfoCtorFn(MRI, Triple(TheTriple));
+  }
+
+  /// createMCInstrInfo - Create a MCInstrInfo implementation.
+  ///
+  MCInstrInfo *createMCInstrInfo() const {
+    if (!MCInstrInfoCtorFn)
+      return nullptr;
+    return MCInstrInfoCtorFn();
+  }
+
+  /// createMCInstrAnalysis - Create a MCInstrAnalysis implementation.
+  ///
+  MCInstrAnalysis *createMCInstrAnalysis(const MCInstrInfo *Info) const {
+    if (!MCInstrAnalysisCtorFn)
+      return nullptr;
+    return MCInstrAnalysisCtorFn(Info);
+  }
+
+  /// createMCRegInfo - Create a MCRegisterInfo implementation.
+  ///
+  MCRegisterInfo *createMCRegInfo(StringRef TT) const {
+    if (!MCRegInfoCtorFn)
+      return nullptr;
+    return MCRegInfoCtorFn(Triple(TT));
+  }
+
+  /// createMCSubtargetInfo - Create a MCSubtargetInfo implementation.
+  ///
+  /// \param TheTriple This argument is used to determine the target machine
+  /// feature set; it should always be provided. Generally this should be
+  /// either the target triple from the module, or the target triple of the
+  /// host if that does not exist.
+  /// \param CPU This specifies the name of the target CPU.
+  /// \param Features This specifies the string representation of the
+  /// additional target features.
+  MCSubtargetInfo *createMCSubtargetInfo(StringRef TheTriple, StringRef CPU,
+                                         StringRef Features) const {
+    if (!MCSubtargetInfoCtorFn)
+      return nullptr;
+    return MCSubtargetInfoCtorFn(Triple(TheTriple), CPU, Features);
+  }
+
+  /// createTargetMachine - Create a target specific machine implementation
+  /// for the specified \p Triple.
+  ///
+  /// \param TT This argument is used to determine the target machine
+  /// feature set; it should always be provided. Generally this should be
+  /// either the target triple from the module, or the target triple of the
+  /// host if that does not exist.
+  TargetMachine *createTargetMachine(StringRef TT, StringRef CPU,
+                                     StringRef Features,
+                                     const TargetOptions &Options,
+                                     Optional<Reloc::Model> RM,
+                                     Optional<CodeModel::Model> CM = None,
+                                     CodeGenOpt::Level OL = CodeGenOpt::Default,
+                                     bool JIT = false) const {
+    if (!TargetMachineCtorFn)
+      return nullptr;
+    return TargetMachineCtorFn(*this, Triple(TT), CPU, Features, Options, RM,
+                               CM, OL, JIT);
+  }
+
+  /// createMCAsmBackend - Create a target specific assembly parser.
+  MCAsmBackend *createMCAsmBackend(const MCSubtargetInfo &STI,
+                                   const MCRegisterInfo &MRI,
+                                   const MCTargetOptions &Options) const {
+    if (!MCAsmBackendCtorFn)
+      return nullptr;
+    return MCAsmBackendCtorFn(*this, STI, MRI, Options);
+  }
+
+  /// createMCAsmParser - Create a target specific assembly parser.
+  ///
+  /// \param Parser The target independent parser implementation to use for
+  /// parsing and lexing.
+  MCTargetAsmParser *createMCAsmParser(const MCSubtargetInfo &STI,
+                                       MCAsmParser &Parser,
+                                       const MCInstrInfo &MII,
+                                       const MCTargetOptions &Options) const {
+    if (!MCAsmParserCtorFn)
+      return nullptr;
+    return MCAsmParserCtorFn(STI, Parser, MII, Options);
+  }
+
+  /// createAsmPrinter - Create a target specific assembly printer pass.  This
+  /// takes ownership of the MCStreamer object.
+  AsmPrinter *createAsmPrinter(TargetMachine &TM,
+                               std::unique_ptr<MCStreamer> &&Streamer) const {
+    if (!AsmPrinterCtorFn)
+      return nullptr;
+    return AsmPrinterCtorFn(TM, std::move(Streamer));
+  }
+
+  MCDisassembler *createMCDisassembler(const MCSubtargetInfo &STI,
+                                       MCContext &Ctx) const {
+    if (!MCDisassemblerCtorFn)
+      return nullptr;
+    return MCDisassemblerCtorFn(*this, STI, Ctx);
+  }
+
+  MCInstPrinter *createMCInstPrinter(const Triple &T, unsigned SyntaxVariant,
+                                     const MCAsmInfo &MAI,
+                                     const MCInstrInfo &MII,
+                                     const MCRegisterInfo &MRI) const {
+    if (!MCInstPrinterCtorFn)
+      return nullptr;
+    return MCInstPrinterCtorFn(T, SyntaxVariant, MAI, MII, MRI);
+  }
+
+  /// createMCCodeEmitter - Create a target specific code emitter.
+  MCCodeEmitter *createMCCodeEmitter(const MCInstrInfo &II,
+                                     const MCRegisterInfo &MRI,
+                                     MCContext &Ctx) const {
+    if (!MCCodeEmitterCtorFn)
+      return nullptr;
+    return MCCodeEmitterCtorFn(II, MRI, Ctx);
+  }
+
+  /// Create a target specific MCStreamer.
+  ///
+  /// \param T The target triple.
+  /// \param Ctx The target context.
+  /// \param TAB The target assembler backend object. Takes ownership.
+  /// \param OS The stream object.
+  /// \param Emitter The target independent assembler object.Takes ownership.
+  /// \param RelaxAll Relax all fixups?
+  MCStreamer *createMCObjectStreamer(const Triple &T, MCContext &Ctx,
+                                     std::unique_ptr<MCAsmBackend> &&TAB,
+                                     raw_pwrite_stream &OS,
+                                     std::unique_ptr<MCCodeEmitter> &&Emitter,
+                                     const MCSubtargetInfo &STI, bool RelaxAll,
+                                     bool IncrementalLinkerCompatible,
+                                     bool DWARFMustBeAtTheEnd) const {
+    MCStreamer *S;
+    switch (T.getObjectFormat()) {
+    default:
+      llvm_unreachable("Unknown object format");
+    case Triple::COFF:
+      assert(T.isOSWindows() && "only Windows COFF is supported");
+      S = COFFStreamerCtorFn(Ctx, std::move(TAB), OS, std::move(Emitter),
+                             RelaxAll, IncrementalLinkerCompatible);
+      break;
+    case Triple::MachO:
+      if (MachOStreamerCtorFn)
+        S = MachOStreamerCtorFn(Ctx, std::move(TAB), OS, std::move(Emitter),
+                                RelaxAll, DWARFMustBeAtTheEnd);
+      else
+        S = createMachOStreamer(Ctx, std::move(TAB), OS, std::move(Emitter),
+                                RelaxAll, DWARFMustBeAtTheEnd);
+      break;
+    case Triple::ELF:
+      if (ELFStreamerCtorFn)
+        S = ELFStreamerCtorFn(T, Ctx, std::move(TAB), OS, std::move(Emitter),
+                              RelaxAll);
+      else
+        S = createELFStreamer(Ctx, std::move(TAB), OS, std::move(Emitter),
+                              RelaxAll);
+      break;
+    case Triple::Wasm:
+      if (WasmStreamerCtorFn)
+        S = WasmStreamerCtorFn(T, Ctx, std::move(TAB), OS, std::move(Emitter),
+                               RelaxAll);
+      else
+        S = createWasmStreamer(Ctx, std::move(TAB), OS, std::move(Emitter),
+                               RelaxAll);
+      break;
+    }
+    if (ObjectTargetStreamerCtorFn)
+      ObjectTargetStreamerCtorFn(*S, STI);
+    return S;
+  }
+
+  MCStreamer *createAsmStreamer(MCContext &Ctx,
+                                std::unique_ptr<formatted_raw_ostream> OS,
+                                bool IsVerboseAsm, bool UseDwarfDirectory,
+                                MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+                                MCAsmBackend *TAB, bool ShowInst) const {
+    formatted_raw_ostream &OSRef = *OS;
+    MCStreamer *S = llvm::createAsmStreamer(Ctx, std::move(OS), IsVerboseAsm,
+                                            UseDwarfDirectory, InstPrint, CE,
+                                            TAB, ShowInst);
+    createAsmTargetStreamer(*S, OSRef, InstPrint, IsVerboseAsm);
+    return S;
+  }
+
+  MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
+                                            formatted_raw_ostream &OS,
+                                            MCInstPrinter *InstPrint,
+                                            bool IsVerboseAsm) const {
+    if (AsmTargetStreamerCtorFn)
+      return AsmTargetStreamerCtorFn(S, OS, InstPrint, IsVerboseAsm);
+    return nullptr;
+  }
+
+  MCStreamer *createNullStreamer(MCContext &Ctx) const {
+    MCStreamer *S = llvm::createNullStreamer(Ctx);
+    createNullTargetStreamer(*S);
+    return S;
+  }
+
+  MCTargetStreamer *createNullTargetStreamer(MCStreamer &S) const {
+    if (NullTargetStreamerCtorFn)
+      return NullTargetStreamerCtorFn(S);
+    return nullptr;
+  }
+
+  /// createMCRelocationInfo - Create a target specific MCRelocationInfo.
+  ///
+  /// \param TT The target triple.
+  /// \param Ctx The target context.
+  MCRelocationInfo *createMCRelocationInfo(StringRef TT, MCContext &Ctx) const {
+    MCRelocationInfoCtorTy Fn = MCRelocationInfoCtorFn
+                                    ? MCRelocationInfoCtorFn
+                                    : llvm::createMCRelocationInfo;
+    return Fn(Triple(TT), Ctx);
+  }
+
+  /// createMCSymbolizer - Create a target specific MCSymbolizer.
+  ///
+  /// \param TT The target triple.
+  /// \param GetOpInfo The function to get the symbolic information for
+  /// operands.
+  /// \param SymbolLookUp The function to lookup a symbol name.
+  /// \param DisInfo The pointer to the block of symbolic information for above
+  /// call
+  /// back.
+  /// \param Ctx The target context.
+  /// \param RelInfo The relocation information for this target. Takes
+  /// ownership.
+  MCSymbolizer *
+  createMCSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
+                     LLVMSymbolLookupCallback SymbolLookUp, void *DisInfo,
+                     MCContext *Ctx,
+                     std::unique_ptr<MCRelocationInfo> &&RelInfo) const {
+    MCSymbolizerCtorTy Fn =
+        MCSymbolizerCtorFn ? MCSymbolizerCtorFn : llvm::createMCSymbolizer;
+    return Fn(Triple(TT), GetOpInfo, SymbolLookUp, DisInfo, Ctx,
+              std::move(RelInfo));
+  }
+
+  /// @}
+};
+
+/// TargetRegistry - Generic interface to target specific features.
+struct TargetRegistry {
+  // FIXME: Make this a namespace, probably just move all the Register*
+  // functions into Target (currently they all just set members on the Target
+  // anyway, and Target friends this class so those functions can...
+  // function).
+  TargetRegistry() = delete;
+
+  class iterator
+      : public std::iterator<std::forward_iterator_tag, Target, ptrdiff_t> {
+    friend struct TargetRegistry;
+
+    const Target *Current = nullptr;
+
+    explicit iterator(Target *T) : Current(T) {}
+
+  public:
+    iterator() = default;
+
+    bool operator==(const iterator &x) const { return Current == x.Current; }
+    bool operator!=(const iterator &x) const { return !operator==(x); }
+
+    // Iterator traversal: forward iteration only
+    iterator &operator++() { // Preincrement
+      assert(Current && "Cannot increment end iterator!");
+      Current = Current->getNext();
+      return *this;
+    }
+    iterator operator++(int) { // Postincrement
+      iterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    const Target &operator*() const {
+      assert(Current && "Cannot dereference end iterator!");
+      return *Current;
+    }
+
+    const Target *operator->() const { return &operator*(); }
+  };
+
+  /// printRegisteredTargetsForVersion - Print the registered targets
+  /// appropriately for inclusion in a tool's version output.
+  static void printRegisteredTargetsForVersion(raw_ostream &OS);
+
+  /// @name Registry Access
+  /// @{
+
+  static iterator_range<iterator> targets();
+
+  /// lookupTarget - Lookup a target based on a target triple.
+  ///
+  /// \param Triple - The triple to use for finding a target.
+  /// \param Error - On failure, an error string describing why no target was
+  /// found.
+  static const Target *lookupTarget(const std::string &Triple,
+                                    std::string &Error);
+
+  /// lookupTarget - Lookup a target based on an architecture name
+  /// and a target triple.  If the architecture name is non-empty,
+  /// then the lookup is done by architecture.  Otherwise, the target
+  /// triple is used.
+  ///
+  /// \param ArchName - The architecture to use for finding a target.
+  /// \param TheTriple - The triple to use for finding a target.  The
+  /// triple is updated with canonical architecture name if a lookup
+  /// by architecture is done.
+  /// \param Error - On failure, an error string describing why no target was
+  /// found.
+  static const Target *lookupTarget(const std::string &ArchName,
+                                    Triple &TheTriple, std::string &Error);
+
+  /// @}
+  /// @name Target Registration
+  /// @{
+
+  /// RegisterTarget - Register the given target. Attempts to register a
+  /// target which has already been registered will be ignored.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Name - The target name. This should be a static string.
+  /// @param ShortDesc - A short target description. This should be a static
+  /// string.
+  /// @param BackendName - The name of the backend. This should be a static
+  /// string that is the same for all targets that share a backend
+  /// implementation and must match the name used in the 'def X : Target ...' in
+  /// TableGen.
+  /// @param ArchMatchFn - The arch match checking function for this target.
+  /// @param HasJIT - Whether the target supports JIT code
+  /// generation.
+  static void RegisterTarget(Target &T, const char *Name, const char *ShortDesc,
+                             const char *BackendName,
+                             Target::ArchMatchFnTy ArchMatchFn,
+                             bool HasJIT = false);
+
+  /// RegisterMCAsmInfo - Register a MCAsmInfo implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct a MCAsmInfo for the target.
+  static void RegisterMCAsmInfo(Target &T, Target::MCAsmInfoCtorFnTy Fn) {
+    T.MCAsmInfoCtorFn = Fn;
+  }
+
+  /// RegisterMCInstrInfo - Register a MCInstrInfo implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct a MCInstrInfo for the target.
+  static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
+    T.MCInstrInfoCtorFn = Fn;
+  }
+
+  /// RegisterMCInstrAnalysis - Register a MCInstrAnalysis implementation for
+  /// the given target.
+  static void RegisterMCInstrAnalysis(Target &T,
+                                      Target::MCInstrAnalysisCtorFnTy Fn) {
+    T.MCInstrAnalysisCtorFn = Fn;
+  }
+
+  /// RegisterMCRegInfo - Register a MCRegisterInfo implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct a MCRegisterInfo for the target.
+  static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn) {
+    T.MCRegInfoCtorFn = Fn;
+  }
+
+  /// RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for
+  /// the given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct a MCSubtargetInfo for the target.
+  static void RegisterMCSubtargetInfo(Target &T,
+                                      Target::MCSubtargetInfoCtorFnTy Fn) {
+    T.MCSubtargetInfoCtorFn = Fn;
+  }
+
+  /// RegisterTargetMachine - Register a TargetMachine implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct a TargetMachine for the target.
+  static void RegisterTargetMachine(Target &T, Target::TargetMachineCtorTy Fn) {
+    T.TargetMachineCtorFn = Fn;
+  }
+
+  /// RegisterMCAsmBackend - Register a MCAsmBackend implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an AsmBackend for the target.
+  static void RegisterMCAsmBackend(Target &T, Target::MCAsmBackendCtorTy Fn) {
+    T.MCAsmBackendCtorFn = Fn;
+  }
+
+  /// RegisterMCAsmParser - Register a MCTargetAsmParser implementation for
+  /// the given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an MCTargetAsmParser for the target.
+  static void RegisterMCAsmParser(Target &T, Target::MCAsmParserCtorTy Fn) {
+    T.MCAsmParserCtorFn = Fn;
+  }
+
+  /// RegisterAsmPrinter - Register an AsmPrinter implementation for the given
+  /// target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an AsmPrinter for the target.
+  static void RegisterAsmPrinter(Target &T, Target::AsmPrinterCtorTy Fn) {
+    T.AsmPrinterCtorFn = Fn;
+  }
+
+  /// RegisterMCDisassembler - Register a MCDisassembler implementation for
+  /// the given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an MCDisassembler for the target.
+  static void RegisterMCDisassembler(Target &T,
+                                     Target::MCDisassemblerCtorTy Fn) {
+    T.MCDisassemblerCtorFn = Fn;
+  }
+
+  /// RegisterMCInstPrinter - Register a MCInstPrinter implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an MCInstPrinter for the target.
+  static void RegisterMCInstPrinter(Target &T, Target::MCInstPrinterCtorTy Fn) {
+    T.MCInstPrinterCtorFn = Fn;
+  }
+
+  /// RegisterMCCodeEmitter - Register a MCCodeEmitter implementation for the
+  /// given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an MCCodeEmitter for the target.
+  static void RegisterMCCodeEmitter(Target &T, Target::MCCodeEmitterCtorTy Fn) {
+    T.MCCodeEmitterCtorFn = Fn;
+  }
+
+  static void RegisterCOFFStreamer(Target &T, Target::COFFStreamerCtorTy Fn) {
+    T.COFFStreamerCtorFn = Fn;
+  }
+
+  static void RegisterMachOStreamer(Target &T, Target::MachOStreamerCtorTy Fn) {
+    T.MachOStreamerCtorFn = Fn;
+  }
+
+  static void RegisterELFStreamer(Target &T, Target::ELFStreamerCtorTy Fn) {
+    T.ELFStreamerCtorFn = Fn;
+  }
+
+  static void RegisterWasmStreamer(Target &T, Target::WasmStreamerCtorTy Fn) {
+    T.WasmStreamerCtorFn = Fn;
+  }
+
+  static void RegisterNullTargetStreamer(Target &T,
+                                         Target::NullTargetStreamerCtorTy Fn) {
+    T.NullTargetStreamerCtorFn = Fn;
+  }
+
+  static void RegisterAsmTargetStreamer(Target &T,
+                                        Target::AsmTargetStreamerCtorTy Fn) {
+    T.AsmTargetStreamerCtorFn = Fn;
+  }
+
+  static void
+  RegisterObjectTargetStreamer(Target &T,
+                               Target::ObjectTargetStreamerCtorTy Fn) {
+    T.ObjectTargetStreamerCtorFn = Fn;
+  }
+
+  /// RegisterMCRelocationInfo - Register an MCRelocationInfo
+  /// implementation for the given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an MCRelocationInfo for the target.
+  static void RegisterMCRelocationInfo(Target &T,
+                                       Target::MCRelocationInfoCtorTy Fn) {
+    T.MCRelocationInfoCtorFn = Fn;
+  }
+
+  /// RegisterMCSymbolizer - Register an MCSymbolizer
+  /// implementation for the given target.
+  ///
+  /// Clients are responsible for ensuring that registration doesn't occur
+  /// while another thread is attempting to access the registry. Typically
+  /// this is done by initializing all targets at program startup.
+  ///
+  /// @param T - The target being registered.
+  /// @param Fn - A function to construct an MCSymbolizer for the target.
+  static void RegisterMCSymbolizer(Target &T, Target::MCSymbolizerCtorTy Fn) {
+    T.MCSymbolizerCtorFn = Fn;
+  }
+
+  /// @}
+};
+
+//===--------------------------------------------------------------------===//
+
+/// RegisterTarget - Helper template for registering a target, for use in the
+/// target's initialization function. Usage:
+///
+///
+/// Target &getTheFooTarget() { // The global target instance.
+///   static Target TheFooTarget;
+///   return TheFooTarget;
+/// }
+/// extern "C" void LLVMInitializeFooTargetInfo() {
+///   RegisterTarget<Triple::foo> X(getTheFooTarget(), "foo", "Foo
+///   description", "Foo" /* Backend Name */);
+/// }
+template <Triple::ArchType TargetArchType = Triple::UnknownArch,
+          bool HasJIT = false>
+struct RegisterTarget {
+  RegisterTarget(Target &T, const char *Name, const char *Desc,
+                 const char *BackendName) {
+    TargetRegistry::RegisterTarget(T, Name, Desc, BackendName, &getArchMatch,
+                                   HasJIT);
+  }
+
+  static bool getArchMatch(Triple::ArchType Arch) {
+    return Arch == TargetArchType;
+  }
+};
+
+/// RegisterMCAsmInfo - Helper template for registering a target assembly info
+/// implementation.  This invokes the static "Create" method on the class to
+/// actually do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCAsmInfo<FooMCAsmInfo> X(TheFooTarget);
+/// }
+template <class MCAsmInfoImpl> struct RegisterMCAsmInfo {
+  RegisterMCAsmInfo(Target &T) {
+    TargetRegistry::RegisterMCAsmInfo(T, &Allocator);
+  }
+
+private:
+  static MCAsmInfo *Allocator(const MCRegisterInfo & /*MRI*/,
+                              const Triple &TT) {
+    return new MCAsmInfoImpl(TT);
+  }
+};
+
+/// RegisterMCAsmInfoFn - Helper template for registering a target assembly info
+/// implementation.  This invokes the specified function to do the
+/// construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCAsmInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCAsmInfoFn {
+  RegisterMCAsmInfoFn(Target &T, Target::MCAsmInfoCtorFnTy Fn) {
+    TargetRegistry::RegisterMCAsmInfo(T, Fn);
+  }
+};
+
+/// RegisterMCInstrInfo - Helper template for registering a target instruction
+/// info implementation.  This invokes the static "Create" method on the class
+/// to actually do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCInstrInfo<FooMCInstrInfo> X(TheFooTarget);
+/// }
+template <class MCInstrInfoImpl> struct RegisterMCInstrInfo {
+  RegisterMCInstrInfo(Target &T) {
+    TargetRegistry::RegisterMCInstrInfo(T, &Allocator);
+  }
+
+private:
+  static MCInstrInfo *Allocator() { return new MCInstrInfoImpl(); }
+};
+
+/// RegisterMCInstrInfoFn - Helper template for registering a target
+/// instruction info implementation.  This invokes the specified function to
+/// do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCInstrInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCInstrInfoFn {
+  RegisterMCInstrInfoFn(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
+    TargetRegistry::RegisterMCInstrInfo(T, Fn);
+  }
+};
+
+/// RegisterMCInstrAnalysis - Helper template for registering a target
+/// instruction analyzer implementation.  This invokes the static "Create"
+/// method on the class to actually do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCInstrAnalysis<FooMCInstrAnalysis> X(TheFooTarget);
+/// }
+template <class MCInstrAnalysisImpl> struct RegisterMCInstrAnalysis {
+  RegisterMCInstrAnalysis(Target &T) {
+    TargetRegistry::RegisterMCInstrAnalysis(T, &Allocator);
+  }
+
+private:
+  static MCInstrAnalysis *Allocator(const MCInstrInfo *Info) {
+    return new MCInstrAnalysisImpl(Info);
+  }
+};
+
+/// RegisterMCInstrAnalysisFn - Helper template for registering a target
+/// instruction analyzer implementation.  This invokes the specified function
+/// to do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCInstrAnalysisFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCInstrAnalysisFn {
+  RegisterMCInstrAnalysisFn(Target &T, Target::MCInstrAnalysisCtorFnTy Fn) {
+    TargetRegistry::RegisterMCInstrAnalysis(T, Fn);
+  }
+};
+
+/// RegisterMCRegInfo - Helper template for registering a target register info
+/// implementation.  This invokes the static "Create" method on the class to
+/// actually do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCRegInfo<FooMCRegInfo> X(TheFooTarget);
+/// }
+template <class MCRegisterInfoImpl> struct RegisterMCRegInfo {
+  RegisterMCRegInfo(Target &T) {
+    TargetRegistry::RegisterMCRegInfo(T, &Allocator);
+  }
+
+private:
+  static MCRegisterInfo *Allocator(const Triple & /*TT*/) {
+    return new MCRegisterInfoImpl();
+  }
+};
+
+/// RegisterMCRegInfoFn - Helper template for registering a target register
+/// info implementation.  This invokes the specified function to do the
+/// construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCRegInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCRegInfoFn {
+  RegisterMCRegInfoFn(Target &T, Target::MCRegInfoCtorFnTy Fn) {
+    TargetRegistry::RegisterMCRegInfo(T, Fn);
+  }
+};
+
+/// RegisterMCSubtargetInfo - Helper template for registering a target
+/// subtarget info implementation.  This invokes the static "Create" method
+/// on the class to actually do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCSubtargetInfo<FooMCSubtargetInfo> X(TheFooTarget);
+/// }
+template <class MCSubtargetInfoImpl> struct RegisterMCSubtargetInfo {
+  RegisterMCSubtargetInfo(Target &T) {
+    TargetRegistry::RegisterMCSubtargetInfo(T, &Allocator);
+  }
+
+private:
+  static MCSubtargetInfo *Allocator(const Triple & /*TT*/, StringRef /*CPU*/,
+                                    StringRef /*FS*/) {
+    return new MCSubtargetInfoImpl();
+  }
+};
+
+/// RegisterMCSubtargetInfoFn - Helper template for registering a target
+/// subtarget info implementation.  This invokes the specified function to
+/// do the construction.  Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterMCSubtargetInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCSubtargetInfoFn {
+  RegisterMCSubtargetInfoFn(Target &T, Target::MCSubtargetInfoCtorFnTy Fn) {
+    TargetRegistry::RegisterMCSubtargetInfo(T, Fn);
+  }
+};
+
+/// RegisterTargetMachine - Helper template for registering a target machine
+/// implementation, for use in the target machine initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+///   extern Target TheFooTarget;
+///   RegisterTargetMachine<FooTargetMachine> X(TheFooTarget);
+/// }
+template <class TargetMachineImpl> struct RegisterTargetMachine {
+  RegisterTargetMachine(Target &T) {
+    TargetRegistry::RegisterTargetMachine(T, &Allocator);
+  }
+
+private:
+  static TargetMachine *
+  Allocator(const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+            const TargetOptions &Options, Optional<Reloc::Model> RM,
+            Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT) {
+    return new TargetMachineImpl(T, TT, CPU, FS, Options, RM, CM, OL, JIT);
+  }
+};
+
+/// RegisterMCAsmBackend - Helper template for registering a target specific
+/// assembler backend. Usage:
+///
+/// extern "C" void LLVMInitializeFooMCAsmBackend() {
+///   extern Target TheFooTarget;
+///   RegisterMCAsmBackend<FooAsmLexer> X(TheFooTarget);
+/// }
+template <class MCAsmBackendImpl> struct RegisterMCAsmBackend {
+  RegisterMCAsmBackend(Target &T) {
+    TargetRegistry::RegisterMCAsmBackend(T, &Allocator);
+  }
+
+private:
+  static MCAsmBackend *Allocator(const Target &T, const MCSubtargetInfo &STI,
+                                 const MCRegisterInfo &MRI,
+                                 const MCTargetOptions &Options) {
+    return new MCAsmBackendImpl(T, STI, MRI);
+  }
+};
+
+/// RegisterMCAsmParser - Helper template for registering a target specific
+/// assembly parser, for use in the target machine initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooMCAsmParser() {
+///   extern Target TheFooTarget;
+///   RegisterMCAsmParser<FooAsmParser> X(TheFooTarget);
+/// }
+template <class MCAsmParserImpl> struct RegisterMCAsmParser {
+  RegisterMCAsmParser(Target &T) {
+    TargetRegistry::RegisterMCAsmParser(T, &Allocator);
+  }
+
+private:
+  static MCTargetAsmParser *Allocator(const MCSubtargetInfo &STI,
+                                      MCAsmParser &P, const MCInstrInfo &MII,
+                                      const MCTargetOptions &Options) {
+    return new MCAsmParserImpl(STI, P, MII, Options);
+  }
+};
+
+/// RegisterAsmPrinter - Helper template for registering a target specific
+/// assembly printer, for use in the target machine initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooAsmPrinter() {
+///   extern Target TheFooTarget;
+///   RegisterAsmPrinter<FooAsmPrinter> X(TheFooTarget);
+/// }
+template <class AsmPrinterImpl> struct RegisterAsmPrinter {
+  RegisterAsmPrinter(Target &T) {
+    TargetRegistry::RegisterAsmPrinter(T, &Allocator);
+  }
+
+private:
+  static AsmPrinter *Allocator(TargetMachine &TM,
+                               std::unique_ptr<MCStreamer> &&Streamer) {
+    return new AsmPrinterImpl(TM, std::move(Streamer));
+  }
+};
+
+/// RegisterMCCodeEmitter - Helper template for registering a target specific
+/// machine code emitter, for use in the target initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooMCCodeEmitter() {
+///   extern Target TheFooTarget;
+///   RegisterMCCodeEmitter<FooCodeEmitter> X(TheFooTarget);
+/// }
+template <class MCCodeEmitterImpl> struct RegisterMCCodeEmitter {
+  RegisterMCCodeEmitter(Target &T) {
+    TargetRegistry::RegisterMCCodeEmitter(T, &Allocator);
+  }
+
+private:
+  static MCCodeEmitter *Allocator(const MCInstrInfo & /*II*/,
+                                  const MCRegisterInfo & /*MRI*/,
+                                  MCContext & /*Ctx*/) {
+    return new MCCodeEmitterImpl();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_TARGETREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/Support/TargetSelect.h b/linux-x64/clang/include/llvm/Support/TargetSelect.h
new file mode 100644
index 0000000..582785c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TargetSelect.h
@@ -0,0 +1,165 @@
+//===- TargetSelect.h - Target Selection & Registration ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides utilities to make sure that certain classes of targets are
+// linked into the main application executable, and initialize them as
+// appropriate.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TARGETSELECT_H
+#define LLVM_SUPPORT_TARGETSELECT_H
+
+#include "llvm/Config/llvm-config.h"
+
+extern "C" {
+  // Declare all of the target-initialization functions that are available.
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo();
+#include "llvm/Config/Targets.def"
+
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target();
+#include "llvm/Config/Targets.def"
+
+  // Declare all of the target-MC-initialization functions that are available.
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetMC();
+#include "llvm/Config/Targets.def"
+
+  // Declare all of the available assembly printer initialization functions.
+#define LLVM_ASM_PRINTER(TargetName) void LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+
+  // Declare all of the available assembly parser initialization functions.
+#define LLVM_ASM_PARSER(TargetName) void LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+
+  // Declare all of the available disassembler initialization functions.
+#define LLVM_DISASSEMBLER(TargetName) \
+  void LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+}
+
+namespace llvm {
+  /// InitializeAllTargetInfos - The main program should call this function if
+  /// it wants access to all available targets that LLVM is configured to
+  /// support, to make them available via the TargetRegistry.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline void InitializeAllTargetInfos() {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo();
+#include "llvm/Config/Targets.def"
+  }
+
+  /// InitializeAllTargets - The main program should call this function if it
+  /// wants access to all available target machines that LLVM is configured to
+  /// support, to make them available via the TargetRegistry.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline void InitializeAllTargets() {
+    // FIXME: Remove this, clients should do it.
+    InitializeAllTargetInfos();
+
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target();
+#include "llvm/Config/Targets.def"
+  }
+
+  /// InitializeAllTargetMCs - The main program should call this function if it
+  /// wants access to all available target MC that LLVM is configured to
+  /// support, to make them available via the TargetRegistry.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline void InitializeAllTargetMCs() {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetMC();
+#include "llvm/Config/Targets.def"
+  }
+
+  /// InitializeAllAsmPrinters - The main program should call this function if
+  /// it wants all asm printers that LLVM is configured to support, to make them
+  /// available via the TargetRegistry.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline void InitializeAllAsmPrinters() {
+#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+  }
+
+  /// InitializeAllAsmParsers - The main program should call this function if it
+  /// wants all asm parsers that LLVM is configured to support, to make them
+  /// available via the TargetRegistry.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline void InitializeAllAsmParsers() {
+#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+  }
+
+  /// InitializeAllDisassemblers - The main program should call this function if
+  /// it wants all disassemblers that LLVM is configured to support, to make
+  /// them available via the TargetRegistry.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline void InitializeAllDisassemblers() {
+#define LLVM_DISASSEMBLER(TargetName) LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+  }
+
+  /// InitializeNativeTarget - The main program should call this function to
+  /// initialize the native target corresponding to the host.  This is useful
+  /// for JIT applications to ensure that the target gets linked in correctly.
+  ///
+  /// It is legal for a client to make multiple calls to this function.
+  inline bool InitializeNativeTarget() {
+  // If we have a native target, initialize it to ensure it is linked in.
+#ifdef LLVM_NATIVE_TARGET
+    LLVM_NATIVE_TARGETINFO();
+    LLVM_NATIVE_TARGET();
+    LLVM_NATIVE_TARGETMC();
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  /// InitializeNativeTargetAsmPrinter - The main program should call
+  /// this function to initialize the native target asm printer.
+  inline bool InitializeNativeTargetAsmPrinter() {
+  // If we have a native target, initialize the corresponding asm printer.
+#ifdef LLVM_NATIVE_ASMPRINTER
+    LLVM_NATIVE_ASMPRINTER();
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  /// InitializeNativeTargetAsmParser - The main program should call
+  /// this function to initialize the native target asm parser.
+  inline bool InitializeNativeTargetAsmParser() {
+  // If we have a native target, initialize the corresponding asm parser.
+#ifdef LLVM_NATIVE_ASMPARSER
+    LLVM_NATIVE_ASMPARSER();
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  /// InitializeNativeTargetDisassembler - The main program should call
+  /// this function to initialize the native target disassembler.
+  inline bool InitializeNativeTargetDisassembler() {
+  // If we have a native target, initialize the corresponding disassembler.
+#ifdef LLVM_NATIVE_DISASSEMBLER
+    LLVM_NATIVE_DISASSEMBLER();
+    return false;
+#else
+    return true;
+#endif
+  }
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ThreadLocal.h b/linux-x64/clang/include/llvm/Support/ThreadLocal.h
new file mode 100644
index 0000000..427a67e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ThreadLocal.h
@@ -0,0 +1,63 @@
+//===- llvm/Support/ThreadLocal.h - Thread Local Data ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::ThreadLocal class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREADLOCAL_H
+#define LLVM_SUPPORT_THREADLOCAL_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Threading.h"
+#include <cassert>
+
+namespace llvm {
+  namespace sys {
+    // ThreadLocalImpl - Common base class of all ThreadLocal instantiations.
+    // YOU SHOULD NEVER USE THIS DIRECTLY.
+    class ThreadLocalImpl {
+      typedef uint64_t ThreadLocalDataTy;
+      /// \brief Platform-specific thread local data.
+      ///
+      /// This is embedded in the class and we avoid malloc'ing/free'ing it,
+      /// to make this class more safe for use along with CrashRecoveryContext.
+      union {
+        char data[sizeof(ThreadLocalDataTy)];
+        ThreadLocalDataTy align_data;
+      };
+    public:
+      ThreadLocalImpl();
+      virtual ~ThreadLocalImpl();
+      void setInstance(const void* d);
+      void *getInstance();
+      void removeInstance();
+    };
+
+    /// ThreadLocal - A class used to abstract thread-local storage.  It holds,
+    /// for each thread, a pointer a single object of type T.
+    template<class T>
+    class ThreadLocal : public ThreadLocalImpl {
+    public:
+      ThreadLocal() : ThreadLocalImpl() { }
+
+      /// get - Fetches a pointer to the object associated with the current
+      /// thread.  If no object has yet been associated, it returns NULL;
+      T* get() { return static_cast<T*>(getInstance()); }
+
+      // set - Associates a pointer to an object with the current thread.
+      void set(T* d) { setInstance(d); }
+
+      // erase - Removes the pointer associated with the current thread.
+      void erase() { removeInstance(); }
+    };
+  }
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ThreadPool.h b/linux-x64/clang/include/llvm/Support/ThreadPool.h
new file mode 100644
index 0000000..fb82559
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ThreadPool.h
@@ -0,0 +1,100 @@
+//===-- llvm/Support/ThreadPool.h - A ThreadPool implementation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a crude C++11 based thread pool.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREAD_POOL_H
+#define LLVM_SUPPORT_THREAD_POOL_H
+
+#include "llvm/Support/thread.h"
+
+#include <future>
+
+#include <atomic>
+#include <condition_variable>
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <utility>
+
+namespace llvm {
+
+/// A ThreadPool for asynchronous parallel execution on a defined number of
+/// threads.
+///
+/// The pool keeps a vector of threads alive, waiting on a condition variable
+/// for some work to become available.
+class ThreadPool {
+public:
+  using TaskTy = std::function<void()>;
+  using PackagedTaskTy = std::packaged_task<void()>;
+
+  /// Construct a pool with the number of threads found by
+  /// hardware_concurrency().
+  ThreadPool();
+
+  /// Construct a pool of \p ThreadCount threads
+  ThreadPool(unsigned ThreadCount);
+
+  /// Blocking destructor: the pool will wait for all the threads to complete.
+  ~ThreadPool();
+
+  /// Asynchronous submission of a task to the pool. The returned future can be
+  /// used to wait for the task to finish and is *non-blocking* on destruction.
+  template <typename Function, typename... Args>
+  inline std::shared_future<void> async(Function &&F, Args &&... ArgList) {
+    auto Task =
+        std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
+    return asyncImpl(std::move(Task));
+  }
+
+  /// Asynchronous submission of a task to the pool. The returned future can be
+  /// used to wait for the task to finish and is *non-blocking* on destruction.
+  template <typename Function>
+  inline std::shared_future<void> async(Function &&F) {
+    return asyncImpl(std::forward<Function>(F));
+  }
+
+  /// Blocking wait for all the threads to complete and the queue to be empty.
+  /// It is an error to try to add new tasks while blocking on this call.
+  void wait();
+
+private:
+  /// Asynchronous submission of a task to the pool. The returned future can be
+  /// used to wait for the task to finish and is *non-blocking* on destruction.
+  std::shared_future<void> asyncImpl(TaskTy F);
+
+  /// Threads in flight
+  std::vector<llvm::thread> Threads;
+
+  /// Tasks waiting for execution in the pool.
+  std::queue<PackagedTaskTy> Tasks;
+
+  /// Locking and signaling for accessing the Tasks queue.
+  std::mutex QueueLock;
+  std::condition_variable QueueCondition;
+
+  /// Locking and signaling for job completion
+  std::mutex CompletionLock;
+  std::condition_variable CompletionCondition;
+
+  /// Keep track of the number of thread actually busy
+  std::atomic<unsigned> ActiveThreads;
+
+#if LLVM_ENABLE_THREADS // avoids warning for unused variable
+  /// Signal for the destruction of the pool, asking thread to exit.
+  bool EnableFlag;
+#endif
+};
+}
+
+#endif // LLVM_SUPPORT_THREAD_POOL_H
diff --git a/linux-x64/clang/include/llvm/Support/Threading.h b/linux-x64/clang/include/llvm/Support/Threading.h
new file mode 100644
index 0000000..6d813bc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Threading.h
@@ -0,0 +1,169 @@
+//===-- llvm/Support/Threading.h - Control multithreading mode --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares helper functions for running LLVM in a multi-threaded
+// environment.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREADING_H
+#define LLVM_SUPPORT_THREADING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
+#include "llvm/Support/Compiler.h"
+#include <ciso646> // So we can check the C++ standard lib macros.
+#include <functional>
+
+#if defined(_MSC_VER)
+// MSVC's call_once implementation worked since VS 2015, which is the minimum
+// supported version as of this writing.
+#define LLVM_THREADING_USE_STD_CALL_ONCE 1
+#elif defined(LLVM_ON_UNIX) &&                                                 \
+    (defined(_LIBCPP_VERSION) ||                                               \
+     !(defined(__NetBSD__) || defined(__OpenBSD__) || defined(__ppc__)))
+// std::call_once from libc++ is used on all Unix platforms. Other
+// implementations like libstdc++ are known to have problems on NetBSD,
+// OpenBSD and PowerPC.
+#define LLVM_THREADING_USE_STD_CALL_ONCE 1
+#else
+#define LLVM_THREADING_USE_STD_CALL_ONCE 0
+#endif
+
+#if LLVM_THREADING_USE_STD_CALL_ONCE
+#include <mutex>
+#else
+#include "llvm/Support/Atomic.h"
+#endif
+
+namespace llvm {
+class Twine;
+
+/// Returns true if LLVM is compiled with support for multi-threading, and
+/// false otherwise.
+bool llvm_is_multithreaded();
+
+/// llvm_execute_on_thread - Execute the given \p UserFn on a separate
+/// thread, passing it the provided \p UserData and waits for thread
+/// completion.
+///
+/// This function does not guarantee that the code will actually be executed
+/// on a separate thread or honoring the requested stack size, but tries to do
+/// so where system support is available.
+///
+/// \param UserFn - The callback to execute.
+/// \param UserData - An argument to pass to the callback function.
+/// \param RequestedStackSize - If non-zero, a requested size (in bytes) for
+/// the thread stack.
+void llvm_execute_on_thread(void (*UserFn)(void *), void *UserData,
+                            unsigned RequestedStackSize = 0);
+
+#if LLVM_THREADING_USE_STD_CALL_ONCE
+
+  typedef std::once_flag once_flag;
+
+#else
+
+  enum InitStatus { Uninitialized = 0, Wait = 1, Done = 2 };
+
+  /// \brief The llvm::once_flag structure
+  ///
+  /// This type is modeled after std::once_flag to use with llvm::call_once.
+  /// This structure must be used as an opaque object. It is a struct to force
+  /// autoinitialization and behave like std::once_flag.
+  struct once_flag {
+    volatile sys::cas_flag status = Uninitialized;
+  };
+
+#endif
+
+  /// \brief Execute the function specified as a parameter once.
+  ///
+  /// Typical usage:
+  /// \code
+  ///   void foo() {...};
+  ///   ...
+  ///   static once_flag flag;
+  ///   call_once(flag, foo);
+  /// \endcode
+  ///
+  /// \param flag Flag used for tracking whether or not this has run.
+  /// \param F Function to call once.
+  template <typename Function, typename... Args>
+  void call_once(once_flag &flag, Function &&F, Args &&... ArgList) {
+#if LLVM_THREADING_USE_STD_CALL_ONCE
+    std::call_once(flag, std::forward<Function>(F),
+                   std::forward<Args>(ArgList)...);
+#else
+    // For other platforms we use a generic (if brittle) version based on our
+    // atomics.
+    sys::cas_flag old_val = sys::CompareAndSwap(&flag.status, Wait, Uninitialized);
+    if (old_val == Uninitialized) {
+      std::forward<Function>(F)(std::forward<Args>(ArgList)...);
+      sys::MemoryFence();
+      TsanIgnoreWritesBegin();
+      TsanHappensBefore(&flag.status);
+      flag.status = Done;
+      TsanIgnoreWritesEnd();
+    } else {
+      // Wait until any thread doing the call has finished.
+      sys::cas_flag tmp = flag.status;
+      sys::MemoryFence();
+      while (tmp != Done) {
+        tmp = flag.status;
+        sys::MemoryFence();
+      }
+    }
+    TsanHappensAfter(&flag.status);
+#endif
+  }
+
+  /// Get the amount of currency to use for tasks requiring significant
+  /// memory or other resources. Currently based on physical cores, if
+  /// available for the host system, otherwise falls back to
+  /// thread::hardware_concurrency().
+  /// Returns 1 when LLVM is configured with LLVM_ENABLE_THREADS=OFF
+  unsigned heavyweight_hardware_concurrency();
+
+  /// Get the number of threads that the current program can execute
+  /// concurrently. On some systems std::thread::hardware_concurrency() returns
+  /// the total number of cores, without taking affinity into consideration.
+  /// Returns 1 when LLVM is configured with LLVM_ENABLE_THREADS=OFF.
+  /// Fallback to std::thread::hardware_concurrency() if sched_getaffinity is
+  /// not available.
+  unsigned hardware_concurrency();
+
+  /// \brief Return the current thread id, as used in various OS system calls.
+  /// Note that not all platforms guarantee that the value returned will be
+  /// unique across the entire system, so portable code should not assume
+  /// this.
+  uint64_t get_threadid();
+
+  /// \brief Get the maximum length of a thread name on this platform.
+  /// A value of 0 means there is no limit.
+  uint32_t get_max_thread_name_length();
+
+  /// \brief Set the name of the current thread.  Setting a thread's name can
+  /// be helpful for enabling useful diagnostics under a debugger or when
+  /// logging.  The level of support for setting a thread's name varies
+  /// wildly across operating systems, and we only make a best effort to
+  /// perform the operation on supported platforms.  No indication of success
+  /// or failure is returned.
+  void set_thread_name(const Twine &Name);
+
+  /// \brief Get the name of the current thread.  The level of support for
+  /// getting a thread's name varies wildly across operating systems, and it
+  /// is not even guaranteed that if you can successfully set a thread's name
+  /// that you can later get it back.  This function is intended for diagnostic
+  /// purposes, and as with setting a thread's name no indication of whether
+  /// the operation succeeded or failed is returned.
+  void get_thread_name(SmallVectorImpl<char> &Name);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Timer.h b/linux-x64/clang/include/llvm/Support/Timer.h
new file mode 100644
index 0000000..198855a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Timer.h
@@ -0,0 +1,231 @@
+//===-- llvm/Support/Timer.h - Interval Timing Support ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TIMER_H
+#define LLVM_SUPPORT_TIMER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Timer;
+class TimerGroup;
+class raw_ostream;
+
+class TimeRecord {
+  double WallTime;       ///< Wall clock time elapsed in seconds.
+  double UserTime;       ///< User time elapsed.
+  double SystemTime;     ///< System time elapsed.
+  ssize_t MemUsed;       ///< Memory allocated (in bytes).
+public:
+  TimeRecord() : WallTime(0), UserTime(0), SystemTime(0), MemUsed(0) {}
+
+  /// Get the current time and memory usage.  If Start is true we get the memory
+  /// usage before the time, otherwise we get time before memory usage.  This
+  /// matters if the time to get the memory usage is significant and shouldn't
+  /// be counted as part of a duration.
+  static TimeRecord getCurrentTime(bool Start = true);
+
+  double getProcessTime() const { return UserTime + SystemTime; }
+  double getUserTime() const { return UserTime; }
+  double getSystemTime() const { return SystemTime; }
+  double getWallTime() const { return WallTime; }
+  ssize_t getMemUsed() const { return MemUsed; }
+
+  bool operator<(const TimeRecord &T) const {
+    // Sort by Wall Time elapsed, as it is the only thing really accurate
+    return WallTime < T.WallTime;
+  }
+
+  void operator+=(const TimeRecord &RHS) {
+    WallTime   += RHS.WallTime;
+    UserTime   += RHS.UserTime;
+    SystemTime += RHS.SystemTime;
+    MemUsed    += RHS.MemUsed;
+  }
+  void operator-=(const TimeRecord &RHS) {
+    WallTime   -= RHS.WallTime;
+    UserTime   -= RHS.UserTime;
+    SystemTime -= RHS.SystemTime;
+    MemUsed    -= RHS.MemUsed;
+  }
+
+  /// Print the current time record to \p OS, with a breakdown showing
+  /// contributions to the \p Total time record.
+  void print(const TimeRecord &Total, raw_ostream &OS) const;
+};
+
+/// This class is used to track the amount of time spent between invocations of
+/// its startTimer()/stopTimer() methods.  Given appropriate OS support it can
+/// also keep track of the RSS of the program at various points.  By default,
+/// the Timer will print the amount of time it has captured to standard error
+/// when the last timer is destroyed, otherwise it is printed when its
+/// TimerGroup is destroyed.  Timers do not print their information if they are
+/// never started.
+class Timer {
+  TimeRecord Time;          ///< The total time captured.
+  TimeRecord StartTime;     ///< The time startTimer() was last called.
+  std::string Name;         ///< The name of this time variable.
+  std::string Description;  ///< Description of this time variable.
+  bool Running;             ///< Is the timer currently running?
+  bool Triggered;           ///< Has the timer ever been triggered?
+  TimerGroup *TG = nullptr; ///< The TimerGroup this Timer is in.
+
+  Timer **Prev;             ///< Pointer to \p Next of previous timer in group.
+  Timer *Next;              ///< Next timer in the group.
+public:
+  explicit Timer(StringRef Name, StringRef Description) {
+    init(Name, Description);
+  }
+  Timer(StringRef Name, StringRef Description, TimerGroup &tg) {
+    init(Name, Description, tg);
+  }
+  Timer(const Timer &RHS) {
+    assert(!RHS.TG && "Can only copy uninitialized timers");
+  }
+  const Timer &operator=(const Timer &T) {
+    assert(!TG && !T.TG && "Can only assign uninit timers");
+    return *this;
+  }
+  ~Timer();
+
+  /// Create an uninitialized timer, client must use 'init'.
+  explicit Timer() {}
+  void init(StringRef Name, StringRef Description);
+  void init(StringRef Name, StringRef Description, TimerGroup &tg);
+
+  const std::string &getName() const { return Name; }
+  const std::string &getDescription() const { return Description; }
+  bool isInitialized() const { return TG != nullptr; }
+
+  /// Check if the timer is currently running.
+  bool isRunning() const { return Running; }
+
+  /// Check if startTimer() has ever been called on this timer.
+  bool hasTriggered() const { return Triggered; }
+
+  /// Start the timer running.  Time between calls to startTimer/stopTimer is
+  /// counted by the Timer class.  Note that these calls must be correctly
+  /// paired.
+  void startTimer();
+
+  /// Stop the timer.
+  void stopTimer();
+
+  /// Clear the timer state.
+  void clear();
+
+  /// Return the duration for which this timer has been running.
+  TimeRecord getTotalTime() const { return Time; }
+
+private:
+  friend class TimerGroup;
+};
+
+/// The TimeRegion class is used as a helper class to call the startTimer() and
+/// stopTimer() methods of the Timer class.  When the object is constructed, it
+/// starts the timer specified as its argument.  When it is destroyed, it stops
+/// the relevant timer.  This makes it easy to time a region of code.
+class TimeRegion {
+  Timer *T;
+  TimeRegion(const TimeRegion &) = delete;
+
+public:
+  explicit TimeRegion(Timer &t) : T(&t) {
+    T->startTimer();
+  }
+  explicit TimeRegion(Timer *t) : T(t) {
+    if (T) T->startTimer();
+  }
+  ~TimeRegion() {
+    if (T) T->stopTimer();
+  }
+};
+
+/// This class is basically a combination of TimeRegion and Timer.  It allows
+/// you to declare a new timer, AND specify the region to time, all in one
+/// statement.  All timers with the same name are merged.  This is primarily
+/// used for debugging and for hunting performance problems.
+struct NamedRegionTimer : public TimeRegion {
+  explicit NamedRegionTimer(StringRef Name, StringRef Description,
+                            StringRef GroupName,
+                            StringRef GroupDescription, bool Enabled = true);
+};
+
+/// The TimerGroup class is used to group together related timers into a single
+/// report that is printed when the TimerGroup is destroyed.  It is illegal to
+/// destroy a TimerGroup object before all of the Timers in it are gone.  A
+/// TimerGroup can be specified for a newly created timer in its constructor.
+class TimerGroup {
+  struct PrintRecord {
+    TimeRecord Time;
+    std::string Name;
+    std::string Description;
+
+    PrintRecord(const PrintRecord &Other) = default;
+    PrintRecord(const TimeRecord &Time, const std::string &Name,
+                const std::string &Description)
+      : Time(Time), Name(Name), Description(Description) {}
+
+    bool operator <(const PrintRecord &Other) const {
+      return Time < Other.Time;
+    }
+  };
+  std::string Name;
+  std::string Description;
+  Timer *FirstTimer = nullptr; ///< First timer in the group.
+  std::vector<PrintRecord> TimersToPrint;
+
+  TimerGroup **Prev; ///< Pointer to Next field of previous timergroup in list.
+  TimerGroup *Next;  ///< Pointer to next timergroup in list.
+  TimerGroup(const TimerGroup &TG) = delete;
+  void operator=(const TimerGroup &TG) = delete;
+
+public:
+  explicit TimerGroup(StringRef Name, StringRef Description);
+  ~TimerGroup();
+
+  void setName(StringRef NewName, StringRef NewDescription) {
+    Name.assign(NewName.begin(), NewName.end());
+    Description.assign(NewDescription.begin(), NewDescription.end());
+  }
+
+  /// Print any started timers in this group and zero them.
+  void print(raw_ostream &OS);
+
+  /// This static method prints all timers and clears them all out.
+  static void printAll(raw_ostream &OS);
+
+  /// Prints all timers as JSON key/value pairs, and clears them all out.
+  static const char *printAllJSONValues(raw_ostream &OS, const char *delim);
+
+  /// Ensure global timer group lists are initialized. This function is mostly
+  /// used by the Statistic code to influence the construction and destruction
+  /// order of the global timer lists.
+  static void ConstructTimerLists();
+private:
+  friend class Timer;
+  friend void PrintStatisticsJSON(raw_ostream &OS);
+  void addTimer(Timer &T);
+  void removeTimer(Timer &T);
+  void prepareToPrintList();
+  void PrintQueuedTimers(raw_ostream &OS);
+  void printJSONValue(raw_ostream &OS, const PrintRecord &R,
+                      const char *suffix, double Value);
+  const char *printJSONValues(raw_ostream &OS, const char *delim);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/ToolOutputFile.h b/linux-x64/clang/include/llvm/Support/ToolOutputFile.h
new file mode 100644
index 0000000..7fd5f20
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/ToolOutputFile.h
@@ -0,0 +1,63 @@
+//===- ToolOutputFile.h - Output files for compiler-like tools -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the ToolOutputFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TOOLOUTPUTFILE_H
+#define LLVM_SUPPORT_TOOLOUTPUTFILE_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// This class contains a raw_fd_ostream and adds a few extra features commonly
+/// needed for compiler-like tool output files:
+///   - The file is automatically deleted if the process is killed.
+///   - The file is automatically deleted when the ToolOutputFile
+///     object is destroyed unless the client calls keep().
+class ToolOutputFile {
+  /// This class is declared before the raw_fd_ostream so that it is constructed
+  /// before the raw_fd_ostream is constructed and destructed after the
+  /// raw_fd_ostream is destructed. It installs cleanups in its constructor and
+  /// uninstalls them in its destructor.
+  class CleanupInstaller {
+    /// The name of the file.
+    std::string Filename;
+  public:
+    /// The flag which indicates whether we should not delete the file.
+    bool Keep;
+
+    explicit CleanupInstaller(StringRef ilename);
+    ~CleanupInstaller();
+  } Installer;
+
+  /// The contained stream. This is intentionally declared after Installer.
+  raw_fd_ostream OS;
+
+public:
+  /// This constructor's arguments are passed to raw_fd_ostream's
+  /// constructor.
+  ToolOutputFile(StringRef Filename, std::error_code &EC,
+                 sys::fs::OpenFlags Flags);
+
+  ToolOutputFile(StringRef Filename, int FD);
+
+  /// Return the contained raw_fd_ostream.
+  raw_fd_ostream &os() { return OS; }
+
+  /// Indicate that the tool's job wrt this output file has been successful and
+  /// the file should not be deleted.
+  void keep() { Installer.Keep = true; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/TrailingObjects.h b/linux-x64/clang/include/llvm/Support/TrailingObjects.h
new file mode 100644
index 0000000..cb5a52b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TrailingObjects.h
@@ -0,0 +1,401 @@
+//===--- TrailingObjects.h - Variable-length classes ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This header defines support for implementing classes that have
+/// some trailing object (or arrays of objects) appended to them. The
+/// main purpose is to make it obvious where this idiom is being used,
+/// and to make the usage more idiomatic and more difficult to get
+/// wrong.
+///
+/// The TrailingObject template abstracts away the reinterpret_cast,
+/// pointer arithmetic, and size calculations used for the allocation
+/// and access of appended arrays of objects, and takes care that they
+/// are all allocated at their required alignment. Additionally, it
+/// ensures that the base type is final -- deriving from a class that
+/// expects data appended immediately after it is typically not safe.
+///
+/// Users are expected to derive from this template, and provide
+/// numTrailingObjects implementations for each trailing type except
+/// the last, e.g. like this sample:
+///
+/// \code
+/// class VarLengthObj : private TrailingObjects<VarLengthObj, int, double> {
+///   friend TrailingObjects;
+///
+///   unsigned NumInts, NumDoubles;
+///   size_t numTrailingObjects(OverloadToken<int>) const { return NumInts; }
+///  };
+/// \endcode
+///
+/// You can access the appended arrays via 'getTrailingObjects', and
+/// determine the size needed for allocation via
+/// 'additionalSizeToAlloc' and 'totalSizeToAlloc'.
+///
+/// All the methods implemented by this class are are intended for use
+/// by the implementation of the class, not as part of its interface
+/// (thus, private inheritance is suggested).
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TRAILINGOBJECTS_H
+#define LLVM_SUPPORT_TRAILINGOBJECTS_H
+
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/type_traits.h"
+#include <new>
+#include <type_traits>
+
+namespace llvm {
+
+namespace trailing_objects_internal {
+/// Helper template to calculate the max alignment requirement for a set of
+/// objects.
+template <typename First, typename... Rest> class AlignmentCalcHelper {
+private:
+  enum {
+    FirstAlignment = alignof(First),
+    RestAlignment = AlignmentCalcHelper<Rest...>::Alignment,
+  };
+
+public:
+  enum {
+    Alignment = FirstAlignment > RestAlignment ? FirstAlignment : RestAlignment
+  };
+};
+
+template <typename First> class AlignmentCalcHelper<First> {
+public:
+  enum { Alignment = alignof(First) };
+};
+
+/// The base class for TrailingObjects* classes.
+class TrailingObjectsBase {
+protected:
+  /// OverloadToken's purpose is to allow specifying function overloads
+  /// for different types, without actually taking the types as
+  /// parameters. (Necessary because member function templates cannot
+  /// be specialized, so overloads must be used instead of
+  /// specialization.)
+  template <typename T> struct OverloadToken {};
+};
+
+/// This helper template works-around MSVC 2013's lack of useful
+/// alignas() support. The argument to LLVM_ALIGNAS(), in MSVC, is
+/// required to be a literal integer. But, you *can* use template
+/// specialization to select between a bunch of different LLVM_ALIGNAS
+/// expressions...
+template <int Align>
+class TrailingObjectsAligner : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(1) TrailingObjectsAligner<1> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(2) TrailingObjectsAligner<2> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(4) TrailingObjectsAligner<4> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(8) TrailingObjectsAligner<8> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(16) TrailingObjectsAligner<16> : public TrailingObjectsBase {
+};
+template <>
+class LLVM_ALIGNAS(32) TrailingObjectsAligner<32> : public TrailingObjectsBase {
+};
+
+// Just a little helper for transforming a type pack into the same
+// number of a different type. e.g.:
+//   ExtractSecondType<Foo..., int>::type
+template <typename Ty1, typename Ty2> struct ExtractSecondType {
+  typedef Ty2 type;
+};
+
+// TrailingObjectsImpl is somewhat complicated, because it is a
+// recursively inheriting template, in order to handle the template
+// varargs. Each level of inheritance picks off a single trailing type
+// then recurses on the rest. The "Align", "BaseTy", and
+// "TopTrailingObj" arguments are passed through unchanged through the
+// recursion. "PrevTy" is, at each level, the type handled by the
+// level right above it.
+
+template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
+          typename... MoreTys>
+class TrailingObjectsImpl {
+  // The main template definition is never used -- the two
+  // specializations cover all possibilities.
+};
+
+template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
+          typename NextTy, typename... MoreTys>
+class TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
+                          MoreTys...>
+    : public TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy,
+                                 MoreTys...> {
+
+  typedef TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy, MoreTys...>
+      ParentType;
+
+  struct RequiresRealignment {
+    static const bool value = alignof(PrevTy) < alignof(NextTy);
+  };
+
+  static constexpr bool requiresRealignment() {
+    return RequiresRealignment::value;
+  }
+
+protected:
+  // Ensure the inherited getTrailingObjectsImpl is not hidden.
+  using ParentType::getTrailingObjectsImpl;
+
+  // These two functions are helper functions for
+  // TrailingObjects::getTrailingObjects. They recurse to the left --
+  // the result for each type in the list of trailing types depends on
+  // the result of calling the function on the type to the
+  // left. However, the function for the type to the left is
+  // implemented by a *subclass* of this class, so we invoke it via
+  // the TopTrailingObj, which is, via the
+  // curiously-recurring-template-pattern, the most-derived type in
+  // this recursion, and thus, contains all the overloads.
+  static const NextTy *
+  getTrailingObjectsImpl(const BaseTy *Obj,
+                         TrailingObjectsBase::OverloadToken<NextTy>) {
+    auto *Ptr = TopTrailingObj::getTrailingObjectsImpl(
+                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>()) +
+                TopTrailingObj::callNumTrailingObjects(
+                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>());
+
+    if (requiresRealignment())
+      return reinterpret_cast<const NextTy *>(
+          llvm::alignAddr(Ptr, alignof(NextTy)));
+    else
+      return reinterpret_cast<const NextTy *>(Ptr);
+  }
+
+  static NextTy *
+  getTrailingObjectsImpl(BaseTy *Obj,
+                         TrailingObjectsBase::OverloadToken<NextTy>) {
+    auto *Ptr = TopTrailingObj::getTrailingObjectsImpl(
+                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>()) +
+                TopTrailingObj::callNumTrailingObjects(
+                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>());
+
+    if (requiresRealignment())
+      return reinterpret_cast<NextTy *>(llvm::alignAddr(Ptr, alignof(NextTy)));
+    else
+      return reinterpret_cast<NextTy *>(Ptr);
+  }
+
+  // Helper function for TrailingObjects::additionalSizeToAlloc: this
+  // function recurses to superclasses, each of which requires one
+  // fewer size_t argument, and adds its own size.
+  static constexpr size_t additionalSizeToAllocImpl(
+      size_t SizeSoFar, size_t Count1,
+      typename ExtractSecondType<MoreTys, size_t>::type... MoreCounts) {
+    return ParentType::additionalSizeToAllocImpl(
+        (requiresRealignment() ? llvm::alignTo<alignof(NextTy)>(SizeSoFar)
+                               : SizeSoFar) +
+            sizeof(NextTy) * Count1,
+        MoreCounts...);
+  }
+};
+
+// The base case of the TrailingObjectsImpl inheritance recursion,
+// when there's no more trailing types.
+template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy>
+class TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy>
+    : public TrailingObjectsAligner<Align> {
+protected:
+  // This is a dummy method, only here so the "using" doesn't fail --
+  // it will never be called, because this function recurses backwards
+  // up the inheritance chain to subclasses.
+  static void getTrailingObjectsImpl();
+
+  static constexpr size_t additionalSizeToAllocImpl(size_t SizeSoFar) {
+    return SizeSoFar;
+  }
+
+  template <bool CheckAlignment> static void verifyTrailingObjectsAlignment() {}
+};
+
+} // end namespace trailing_objects_internal
+
+// Finally, the main type defined in this file, the one intended for users...
+
+/// See the file comment for details on the usage of the
+/// TrailingObjects type.
+template <typename BaseTy, typename... TrailingTys>
+class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl<
+                            trailing_objects_internal::AlignmentCalcHelper<
+                                TrailingTys...>::Alignment,
+                            BaseTy, TrailingObjects<BaseTy, TrailingTys...>,
+                            BaseTy, TrailingTys...> {
+
+  template <int A, typename B, typename T, typename P, typename... M>
+  friend class trailing_objects_internal::TrailingObjectsImpl;
+
+  template <typename... Tys> class Foo {};
+
+  typedef trailing_objects_internal::TrailingObjectsImpl<
+      trailing_objects_internal::AlignmentCalcHelper<TrailingTys...>::Alignment,
+      BaseTy, TrailingObjects<BaseTy, TrailingTys...>, BaseTy, TrailingTys...>
+      ParentType;
+  using TrailingObjectsBase = trailing_objects_internal::TrailingObjectsBase;
+
+  using ParentType::getTrailingObjectsImpl;
+
+  // This function contains only a static_assert BaseTy is final. The
+  // static_assert must be in a function, and not at class-level
+  // because BaseTy isn't complete at class instantiation time, but
+  // will be by the time this function is instantiated.
+  static void verifyTrailingObjectsAssertions() {
+#ifdef LLVM_IS_FINAL
+    static_assert(LLVM_IS_FINAL(BaseTy), "BaseTy must be final.");
+#endif
+  }
+
+  // These two methods are the base of the recursion for this method.
+  static const BaseTy *
+  getTrailingObjectsImpl(const BaseTy *Obj,
+                         TrailingObjectsBase::OverloadToken<BaseTy>) {
+    return Obj;
+  }
+
+  static BaseTy *
+  getTrailingObjectsImpl(BaseTy *Obj,
+                         TrailingObjectsBase::OverloadToken<BaseTy>) {
+    return Obj;
+  }
+
+  // callNumTrailingObjects simply calls numTrailingObjects on the
+  // provided Obj -- except when the type being queried is BaseTy
+  // itself. There is always only one of the base object, so that case
+  // is handled here. (An additional benefit of indirecting through
+  // this function is that consumers only say "friend
+  // TrailingObjects", and thus, only this class itself can call the
+  // numTrailingObjects function.)
+  static size_t
+  callNumTrailingObjects(const BaseTy *Obj,
+                         TrailingObjectsBase::OverloadToken<BaseTy>) {
+    return 1;
+  }
+
+  template <typename T>
+  static size_t callNumTrailingObjects(const BaseTy *Obj,
+                                       TrailingObjectsBase::OverloadToken<T>) {
+    return Obj->numTrailingObjects(TrailingObjectsBase::OverloadToken<T>());
+  }
+
+public:
+  // Make this (privately inherited) member public.
+#ifndef _MSC_VER
+  using ParentType::OverloadToken;
+#else
+  // MSVC bug prevents the above from working, at least up through CL
+  // 19.10.24629.
+  template <typename T>
+  using OverloadToken = typename ParentType::template OverloadToken<T>;
+#endif
+
+  /// Returns a pointer to the trailing object array of the given type
+  /// (which must be one of those specified in the class template). The
+  /// array may have zero or more elements in it.
+  template <typename T> const T *getTrailingObjects() const {
+    verifyTrailingObjectsAssertions();
+    // Forwards to an impl function with overloads, since member
+    // function templates can't be specialized.
+    return this->getTrailingObjectsImpl(
+        static_cast<const BaseTy *>(this),
+        TrailingObjectsBase::OverloadToken<T>());
+  }
+
+  /// Returns a pointer to the trailing object array of the given type
+  /// (which must be one of those specified in the class template). The
+  /// array may have zero or more elements in it.
+  template <typename T> T *getTrailingObjects() {
+    verifyTrailingObjectsAssertions();
+    // Forwards to an impl function with overloads, since member
+    // function templates can't be specialized.
+    return this->getTrailingObjectsImpl(
+        static_cast<BaseTy *>(this), TrailingObjectsBase::OverloadToken<T>());
+  }
+
+  /// Returns the size of the trailing data, if an object were
+  /// allocated with the given counts (The counts are in the same order
+  /// as the template arguments). This does not include the size of the
+  /// base object.  The template arguments must be the same as those
+  /// used in the class; they are supplied here redundantly only so
+  /// that it's clear what the counts are counting in callers.
+  template <typename... Tys>
+  static constexpr typename std::enable_if<
+      std::is_same<Foo<TrailingTys...>, Foo<Tys...>>::value, size_t>::type
+  additionalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
+                        TrailingTys, size_t>::type... Counts) {
+    return ParentType::additionalSizeToAllocImpl(0, Counts...);
+  }
+
+  /// Returns the total size of an object if it were allocated with the
+  /// given trailing object counts. This is the same as
+  /// additionalSizeToAlloc, except it *does* include the size of the base
+  /// object.
+  template <typename... Tys>
+  static constexpr typename std::enable_if<
+      std::is_same<Foo<TrailingTys...>, Foo<Tys...>>::value, size_t>::type
+  totalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
+                   TrailingTys, size_t>::type... Counts) {
+    return sizeof(BaseTy) + ParentType::additionalSizeToAllocImpl(0, Counts...);
+  }
+
+  /// A type where its ::with_counts template member has a ::type member
+  /// suitable for use as uninitialized storage for an object with the given
+  /// trailing object counts. The template arguments are similar to those
+  /// of additionalSizeToAlloc.
+  ///
+  /// Use with FixedSizeStorageOwner, e.g.:
+  ///
+  /// \code{.cpp}
+  ///
+  /// MyObj::FixedSizeStorage<void *>::with_counts<1u>::type myStackObjStorage;
+  /// MyObj::FixedSizeStorageOwner
+  ///     myStackObjOwner(new ((void *)&myStackObjStorage) MyObj);
+  /// MyObj *const myStackObjPtr = myStackObjOwner.get();
+  ///
+  /// \endcode
+  template <typename... Tys> struct FixedSizeStorage {
+    template <size_t... Counts> struct with_counts {
+      enum { Size = totalSizeToAlloc<Tys...>(Counts...) };
+      typedef llvm::AlignedCharArray<alignof(BaseTy), Size> type;
+    };
+  };
+
+  /// A type that acts as the owner for an object placed into fixed storage.
+  class FixedSizeStorageOwner {
+  public:
+    FixedSizeStorageOwner(BaseTy *p) : p(p) {}
+    ~FixedSizeStorageOwner() {
+      assert(p && "FixedSizeStorageOwner owns null?");
+      p->~BaseTy();
+    }
+
+    BaseTy *get() { return p; }
+    const BaseTy *get() const { return p; }
+
+  private:
+    FixedSizeStorageOwner(const FixedSizeStorageOwner &) = delete;
+    FixedSizeStorageOwner(FixedSizeStorageOwner &&) = delete;
+    FixedSizeStorageOwner &operator=(const FixedSizeStorageOwner &) = delete;
+    FixedSizeStorageOwner &operator=(FixedSizeStorageOwner &&) = delete;
+
+    BaseTy *const p;
+  };
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/TrigramIndex.h b/linux-x64/clang/include/llvm/Support/TrigramIndex.h
new file mode 100644
index 0000000..da0b6da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TrigramIndex.h
@@ -0,0 +1,70 @@
+//===-- TrigramIndex.h - a heuristic for SpecialCaseList --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// TrigramIndex implements a heuristic for SpecialCaseList that allows to
+// filter out ~99% incoming queries when all regular expressions in the
+// SpecialCaseList are simple wildcards with '*' and '.'. If rules are more
+// complicated, the check is defeated and it will always pass the queries to a
+// full regex.
+//
+// The basic idea is that in order for a wildcard to match a query, the query
+// needs to have all trigrams which occur in the wildcard. We create a trigram
+// index (trigram -> list of rules with it) and then count trigrams in the query
+// for each rule. If the count for one of the rules reaches the expected value,
+// the check passes the query to a regex. If none of the rules got enough
+// trigrams, the check tells that the query is definitely not matched by any
+// of the rules, and no regex matching is needed.
+// A similar idea was used in Google Code Search as described in the blog post:
+// https://swtch.com/~rsc/regexp/regexp4.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TRIGRAMINDEX_H
+#define LLVM_SUPPORT_TRIGRAMINDEX_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+namespace llvm {
+class StringRef;
+
+class TrigramIndex {
+ public:
+  /// Inserts a new Regex into the index.
+  void insert(std::string Regex);
+
+  /// Returns true, if special case list definitely does not have a line
+  /// that matches the query. Returns false, if it's not sure.
+  bool isDefinitelyOut(StringRef Query) const;
+
+  /// Returned true, iff the heuristic is defeated and not useful.
+  /// In this case isDefinitelyOut always returns false.
+  bool isDefeated() { return Defeated; }
+ private:
+  // If true, the rules are too complicated for the check to work, and full
+  // regex matching is needed for every rule.
+  bool Defeated = false;
+  // The minimum number of trigrams which should match for a rule to have a
+  // chance to match the query. The number of elements equals the number of
+  // regex rules in the SpecialCaseList.
+  std::vector<unsigned> Counts;
+  // Index holds a list of rules indices for each trigram. The same indices
+  // are used in Counts to store per-rule limits.
+  // If a trigram is too common (>4 rules with it), we stop tracking it,
+  // which increases the probability for a need to match using regex, but
+  // decreases the costs in the regular case.
+  std::unordered_map<unsigned, SmallVector<size_t, 4>> Index{256};
+};
+
+}  // namespace llvm
+
+#endif  // LLVM_SUPPORT_TRIGRAMINDEX_H
diff --git a/linux-x64/clang/include/llvm/Support/TypeName.h b/linux-x64/clang/include/llvm/Support/TypeName.h
new file mode 100644
index 0000000..0eb7ead
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/TypeName.h
@@ -0,0 +1,65 @@
+//===- TypeName.h -----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TYPENAME_H
+#define LLVM_SUPPORT_TYPENAME_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/// We provide a function which tries to compute the (demangled) name of a type
+/// statically.
+///
+/// This routine may fail on some platforms or for particularly unusual types.
+/// Do not use it for anything other than logging and debugging aids. It isn't
+/// portable or dependendable in any real sense.
+///
+/// The returned StringRef will point into a static storage duration string.
+/// However, it may not be null terminated and may be some strangely aligned
+/// inner substring of a larger string.
+template <typename DesiredTypeName>
+inline StringRef getTypeName() {
+#if defined(__clang__) || defined(__GNUC__)
+  StringRef Name = __PRETTY_FUNCTION__;
+
+  StringRef Key = "DesiredTypeName = ";
+  Name = Name.substr(Name.find(Key));
+  assert(!Name.empty() && "Unable to find the template parameter!");
+  Name = Name.drop_front(Key.size());
+
+  assert(Name.endswith("]") && "Name doesn't end in the substitution key!");
+  return Name.drop_back(1);
+#elif defined(_MSC_VER)
+  StringRef Name = __FUNCSIG__;
+
+  StringRef Key = "getTypeName<";
+  Name = Name.substr(Name.find(Key));
+  assert(!Name.empty() && "Unable to find the function name!");
+  Name = Name.drop_front(Key.size());
+
+  for (StringRef Prefix : {"class ", "struct ", "union ", "enum "})
+    if (Name.startswith(Prefix)) {
+      Name = Name.drop_front(Prefix.size());
+      break;
+    }
+
+  auto AnglePos = Name.rfind('>');
+  assert(AnglePos != StringRef::npos && "Unable to find the closing '>'!");
+  return Name.substr(0, AnglePos);
+#else
+  // No known technique for statically extracting a type name on this compiler.
+  // We return a string that is unlikely to look like any type in LLVM.
+  return "UNKNOWN_TYPE";
+#endif
+}
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Unicode.h b/linux-x64/clang/include/llvm/Support/Unicode.h
new file mode 100644
index 0000000..815484f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Unicode.h
@@ -0,0 +1,71 @@
+//===- llvm/Support/Unicode.h - Unicode character properties  -*- C++ -*-=====//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that allow querying certain properties of Unicode
+// characters.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_UNICODE_H
+#define LLVM_SUPPORT_UNICODE_H
+
+namespace llvm {
+class StringRef;
+
+namespace sys {
+namespace unicode {
+
+enum ColumnWidthErrors {
+  ErrorInvalidUTF8 = -2,
+  ErrorNonPrintableCharacter = -1
+};
+
+/// Determines if a character is likely to be displayed correctly on the
+/// terminal. Exact implementation would have to depend on the specific
+/// terminal, so we define the semantic that should be suitable for generic case
+/// of a terminal capable to output Unicode characters.
+///
+/// All characters from the Unicode code point range are considered printable
+/// except for:
+///   * C0 and C1 control character ranges;
+///   * default ignorable code points as per 5.21 of
+///     http://www.unicode.org/versions/Unicode6.2.0/UnicodeStandard-6.2.pdf
+///     except for U+00AD SOFT HYPHEN, as it's actually displayed on most
+///     terminals;
+///   * format characters (category = Cf);
+///   * surrogates (category = Cs);
+///   * unassigned characters (category = Cn).
+/// \return true if the character is considered printable.
+bool isPrintable(int UCS);
+
+/// Gets the number of positions the UTF8-encoded \p Text is likely to occupy
+/// when output on a terminal ("character width"). This depends on the
+/// implementation of the terminal, and there's no standard definition of
+/// character width.
+///
+/// The implementation defines it in a way that is expected to be compatible
+/// with a generic Unicode-capable terminal.
+///
+/// \return Character width:
+///   * ErrorNonPrintableCharacter (-1) if \p Text contains non-printable
+///     characters (as identified by isPrintable);
+///   * 0 for each non-spacing and enclosing combining mark;
+///   * 2 for each CJK character excluding halfwidth forms;
+///   * 1 for each of the remaining characters.
+int columnWidthUTF8(StringRef Text);
+
+/// Fold input unicode character according the the Simple unicode case folding
+/// rules.
+int foldCharSimple(int C);
+
+} // namespace unicode
+} // namespace sys
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/UnicodeCharRanges.h b/linux-x64/clang/include/llvm/Support/UnicodeCharRanges.h
new file mode 100644
index 0000000..4c65583
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/UnicodeCharRanges.h
@@ -0,0 +1,107 @@
+//===--- UnicodeCharRanges.h - Types and functions for character ranges ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_UNICODECHARRANGES_H
+#define LLVM_SUPPORT_UNICODECHARRANGES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+#define DEBUG_TYPE "unicode"
+
+namespace llvm {
+namespace sys {
+
+/// \brief Represents a closed range of Unicode code points [Lower, Upper].
+struct UnicodeCharRange {
+  uint32_t Lower;
+  uint32_t Upper;
+};
+
+inline bool operator<(uint32_t Value, UnicodeCharRange Range) {
+  return Value < Range.Lower;
+}
+inline bool operator<(UnicodeCharRange Range, uint32_t Value) {
+  return Range.Upper < Value;
+}
+
+/// \brief Holds a reference to an ordered array of UnicodeCharRange and allows
+/// to quickly check if a code point is contained in the set represented by this
+/// array.
+class UnicodeCharSet {
+public:
+  typedef ArrayRef<UnicodeCharRange> CharRanges;
+
+  /// \brief Constructs a UnicodeCharSet instance from an array of
+  /// UnicodeCharRanges.
+  ///
+  /// Array pointed by \p Ranges should have the lifetime at least as long as
+  /// the UnicodeCharSet instance, and should not change. Array is validated by
+  /// the constructor, so it makes sense to create as few UnicodeCharSet
+  /// instances per each array of ranges, as possible.
+#ifdef NDEBUG
+
+  // FIXME: This could use constexpr + static_assert. This way we
+  // may get rid of NDEBUG in this header. Unfortunately there are some
+  // problems to get this working with MSVC 2013. Change this when
+  // the support for MSVC 2013 is dropped.
+  constexpr UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {}
+#else
+  UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {
+    assert(rangesAreValid());
+  }
+#endif
+
+  /// \brief Returns true if the character set contains the Unicode code point
+  /// \p C.
+  bool contains(uint32_t C) const {
+    return std::binary_search(Ranges.begin(), Ranges.end(), C);
+  }
+
+private:
+  /// \brief Returns true if each of the ranges is a proper closed range
+  /// [min, max], and if the ranges themselves are ordered and non-overlapping.
+  bool rangesAreValid() const {
+    uint32_t Prev = 0;
+    for (CharRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
+         I != E; ++I) {
+      if (I != Ranges.begin() && Prev >= I->Lower) {
+        DEBUG(dbgs() << "Upper bound 0x");
+        DEBUG(dbgs().write_hex(Prev));
+        DEBUG(dbgs() << " should be less than succeeding lower bound 0x");
+        DEBUG(dbgs().write_hex(I->Lower) << "\n");
+        return false;
+      }
+      if (I->Upper < I->Lower) {
+        DEBUG(dbgs() << "Upper bound 0x");
+        DEBUG(dbgs().write_hex(I->Lower));
+        DEBUG(dbgs() << " should not be less than lower bound 0x");
+        DEBUG(dbgs().write_hex(I->Upper) << "\n");
+        return false;
+      }
+      Prev = I->Upper;
+    }
+
+    return true;
+  }
+
+  const CharRanges Ranges;
+};
+
+} // namespace sys
+} // namespace llvm
+
+#undef DEBUG_TYPE // "unicode"
+
+#endif // LLVM_SUPPORT_UNICODECHARRANGES_H
diff --git a/linux-x64/clang/include/llvm/Support/UniqueLock.h b/linux-x64/clang/include/llvm/Support/UniqueLock.h
new file mode 100644
index 0000000..b4675f4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/UniqueLock.h
@@ -0,0 +1,69 @@
+//===- Support/UniqueLock.h - Acquire/Release Mutex In Scope ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a guard for a block of code that ensures a Mutex is locked
+// upon construction and released upon destruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_UNIQUE_LOCK_H
+#define LLVM_SUPPORT_UNIQUE_LOCK_H
+
+#include <cassert>
+
+namespace llvm {
+
+  /// A pared-down imitation of std::unique_lock from C++11. Contrary to the
+  /// name, it's really more of a wrapper for a lock. It may or may not have
+  /// an associated mutex, which is guaranteed to be locked upon creation
+  /// and unlocked after destruction. unique_lock can also unlock the mutex
+  /// and re-lock it freely during its lifetime.
+  /// @brief Guard a section of code with a mutex.
+  template<typename MutexT>
+  class unique_lock {
+    MutexT *M = nullptr;
+    bool locked = false;
+
+  public:
+    unique_lock() = default;
+    explicit unique_lock(MutexT &m) : M(&m), locked(true) { M->lock(); }
+    unique_lock(const unique_lock &) = delete;
+     unique_lock &operator=(const unique_lock &) = delete;
+
+    void operator=(unique_lock &&o) {
+      if (owns_lock())
+        M->unlock();
+      M = o.M;
+      locked = o.locked;
+      o.M = nullptr;
+      o.locked = false;
+    }
+
+    ~unique_lock() { if (owns_lock()) M->unlock(); }
+
+    void lock() {
+      assert(!locked && "mutex already locked!");
+      assert(M && "no associated mutex!");
+      M->lock();
+      locked = true;
+    }
+
+    void unlock() {
+      assert(locked && "unlocking a mutex that isn't locked!");
+      assert(M && "no associated mutex!");
+      M->unlock();
+      locked = false;
+    }
+
+    bool owns_lock() { return locked; }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_UNIQUE_LOCK_H
diff --git a/linux-x64/clang/include/llvm/Support/VCSRevision.h b/linux-x64/clang/include/llvm/Support/VCSRevision.h
new file mode 100644
index 0000000..8510ea2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/VCSRevision.h
@@ -0,0 +1 @@
+#define LLVM_REVISION "git-1d739ffb036"
diff --git a/linux-x64/clang/include/llvm/Support/Valgrind.h b/linux-x64/clang/include/llvm/Support/Valgrind.h
new file mode 100644
index 0000000..084b901
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Valgrind.h
@@ -0,0 +1,32 @@
+//===- llvm/Support/Valgrind.h - Communication with Valgrind ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Methods for communicating with a valgrind instance this program is running
+// under.  These are all no-ops unless LLVM was configured on a system with the
+// valgrind headers installed and valgrind is controlling this process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_VALGRIND_H
+#define LLVM_SUPPORT_VALGRIND_H
+
+#include <cstddef>
+
+namespace llvm {
+namespace sys {
+  // True if Valgrind is controlling this process.
+  bool RunningOnValgrind();
+
+  // Discard valgrind's translation of code in the range [Addr .. Addr + Len).
+  // Otherwise valgrind may continue to execute the old version of the code.
+  void ValgrindDiscardTranslations(const void *Addr, size_t Len);
+} // namespace sys
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_VALGRIND_H
diff --git a/linux-x64/clang/include/llvm/Support/Watchdog.h b/linux-x64/clang/include/llvm/Support/Watchdog.h
new file mode 100644
index 0000000..01e1d92
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Watchdog.h
@@ -0,0 +1,38 @@
+//===--- Watchdog.h - Watchdog timer ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file declares the llvm::sys::Watchdog class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WATCHDOG_H
+#define LLVM_SUPPORT_WATCHDOG_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+  namespace sys {
+
+    /// This class provides an abstraction for a timeout around an operation
+    /// that must complete in a given amount of time. Failure to complete before
+    /// the timeout is an unrecoverable situation and no mechanisms to attempt
+    /// to handle it are provided.
+    class Watchdog {
+    public:
+      Watchdog(unsigned int seconds);
+      ~Watchdog();
+    private:
+      // Noncopyable.
+      Watchdog(const Watchdog &other) = delete;
+      Watchdog &operator=(const Watchdog &other) = delete;
+    };
+  }
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/Win64EH.h b/linux-x64/clang/include/llvm/Support/Win64EH.h
new file mode 100644
index 0000000..f6c4927
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/Win64EH.h
@@ -0,0 +1,147 @@
+//===-- llvm/Support/Win64EH.h ---Win64 EH Constants-------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains constants and structures used for implementing
+// exception handling on Win64 platforms. For more information, see
+// http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WIN64EH_H
+#define LLVM_SUPPORT_WIN64EH_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace Win64EH {
+
+/// UnwindOpcodes - Enumeration whose values specify a single operation in
+/// the prolog of a function.
+enum UnwindOpcodes {
+  UOP_PushNonVol = 0,
+  UOP_AllocLarge,
+  UOP_AllocSmall,
+  UOP_SetFPReg,
+  UOP_SaveNonVol,
+  UOP_SaveNonVolBig,
+  UOP_SaveXMM128 = 8,
+  UOP_SaveXMM128Big,
+  UOP_PushMachFrame
+};
+
+/// UnwindCode - This union describes a single operation in a function prolog,
+/// or part thereof.
+union UnwindCode {
+  struct {
+    uint8_t CodeOffset;
+    uint8_t UnwindOpAndOpInfo;
+  } u;
+  support::ulittle16_t FrameOffset;
+
+  uint8_t getUnwindOp() const {
+    return u.UnwindOpAndOpInfo & 0x0F;
+  }
+  uint8_t getOpInfo() const {
+    return (u.UnwindOpAndOpInfo >> 4) & 0x0F;
+  }
+};
+
+enum {
+  /// UNW_ExceptionHandler - Specifies that this function has an exception
+  /// handler.
+  UNW_ExceptionHandler = 0x01,
+  /// UNW_TerminateHandler - Specifies that this function has a termination
+  /// handler.
+  UNW_TerminateHandler = 0x02,
+  /// UNW_ChainInfo - Specifies that this UnwindInfo structure is chained to
+  /// another one.
+  UNW_ChainInfo = 0x04
+};
+
+/// RuntimeFunction - An entry in the table of functions with unwind info.
+struct RuntimeFunction {
+  support::ulittle32_t StartAddress;
+  support::ulittle32_t EndAddress;
+  support::ulittle32_t UnwindInfoOffset;
+};
+
+/// UnwindInfo - An entry in the exception table.
+struct UnwindInfo {
+  uint8_t VersionAndFlags;
+  uint8_t PrologSize;
+  uint8_t NumCodes;
+  uint8_t FrameRegisterAndOffset;
+  UnwindCode UnwindCodes[1];
+
+  uint8_t getVersion() const {
+    return VersionAndFlags & 0x07;
+  }
+  uint8_t getFlags() const {
+    return (VersionAndFlags >> 3) & 0x1f;
+  }
+  uint8_t getFrameRegister() const {
+    return FrameRegisterAndOffset & 0x0f;
+  }
+  uint8_t getFrameOffset() const {
+    return (FrameRegisterAndOffset >> 4) & 0x0f;
+  }
+
+  // The data after unwindCodes depends on flags.
+  // If UNW_ExceptionHandler or UNW_TerminateHandler is set then follows
+  // the address of the language-specific exception handler.
+  // If UNW_ChainInfo is set then follows a RuntimeFunction which defines
+  // the chained unwind info.
+  // For more information please see MSDN at:
+  // http://msdn.microsoft.com/en-us/library/ddssxxy8.aspx
+
+  /// \brief Return pointer to language specific data part of UnwindInfo.
+  void *getLanguageSpecificData() {
+    return reinterpret_cast<void *>(&UnwindCodes[(NumCodes+1) & ~1]);
+  }
+
+  /// \brief Return pointer to language specific data part of UnwindInfo.
+  const void *getLanguageSpecificData() const {
+    return reinterpret_cast<const void *>(&UnwindCodes[(NumCodes + 1) & ~1]);
+  }
+
+  /// \brief Return image-relative offset of language-specific exception handler.
+  uint32_t getLanguageSpecificHandlerOffset() const {
+    return *reinterpret_cast<const support::ulittle32_t *>(
+               getLanguageSpecificData());
+  }
+
+  /// \brief Set image-relative offset of language-specific exception handler.
+  void setLanguageSpecificHandlerOffset(uint32_t offset) {
+    *reinterpret_cast<support::ulittle32_t *>(getLanguageSpecificData()) =
+        offset;
+  }
+
+  /// \brief Return pointer to exception-specific data.
+  void *getExceptionData() {
+    return reinterpret_cast<void *>(reinterpret_cast<uint32_t *>(
+                                                  getLanguageSpecificData())+1);
+  }
+
+  /// \brief Return pointer to chained unwind info.
+  RuntimeFunction *getChainedFunctionEntry() {
+    return reinterpret_cast<RuntimeFunction *>(getLanguageSpecificData());
+  }
+
+  /// \brief Return pointer to chained unwind info.
+  const RuntimeFunction *getChainedFunctionEntry() const {
+    return reinterpret_cast<const RuntimeFunction *>(getLanguageSpecificData());
+  }
+};
+
+
+} // End of namespace Win64EH
+} // End of namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/WindowsError.h b/linux-x64/clang/include/llvm/Support/WindowsError.h
new file mode 100644
index 0000000..63bfe59
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/WindowsError.h
@@ -0,0 +1,19 @@
+//===-- WindowsError.h - Support for mapping windows errors to posix-------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WINDOWSERROR_H
+#define LLVM_SUPPORT_WINDOWSERROR_H
+
+#include <system_error>
+
+namespace llvm {
+std::error_code mapWindowsError(unsigned EV);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/WithColor.h b/linux-x64/clang/include/llvm/Support/WithColor.h
new file mode 100644
index 0000000..39c9953
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/WithColor.h
@@ -0,0 +1,48 @@
+//===- WithColor.h ----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WITHCOLOR_H
+#define LLVM_SUPPORT_WITHCOLOR_H
+
+namespace llvm {
+
+class raw_ostream;
+
+// Symbolic names for various syntax elements.
+enum class HighlightColor {
+  Address,
+  String,
+  Tag,
+  Attribute,
+  Enumerator,
+  Macro,
+  Error,
+  Warning,
+  Note
+};
+
+/// An RAII object that temporarily switches an output stream to a specific
+/// color.
+class WithColor {
+  raw_ostream &OS;
+  /// Determine whether colors should be displayed.
+  bool colorsEnabled(raw_ostream &OS);
+
+public:
+  /// To be used like this: WithColor(OS, HighlightColor::String) << "text";
+  WithColor(raw_ostream &OS, HighlightColor S);
+  ~WithColor();
+
+  raw_ostream &get() { return OS; }
+  operator raw_ostream &() { return OS; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_DEBUGINFO_WITHCOLOR_H
diff --git a/linux-x64/clang/include/llvm/Support/X86DisassemblerDecoderCommon.h b/linux-x64/clang/include/llvm/Support/X86DisassemblerDecoderCommon.h
new file mode 100644
index 0000000..eeffb9c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/X86DisassemblerDecoderCommon.h
@@ -0,0 +1,469 @@
+//===-- X86DisassemblerDecoderCommon.h - Disassembler decoder ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler.
+// It contains common definitions used by both the disassembler and the table
+//  generator.
+// Documentation for the disassembler can be found in X86Disassembler.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_X86_DISASSEMBLER_X86DISASSEMBLERDECODERCOMMON_H
+#define LLVM_LIB_TARGET_X86_DISASSEMBLER_X86DISASSEMBLERDECODERCOMMON_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+namespace X86Disassembler {
+
+#define INSTRUCTIONS_SYM  x86DisassemblerInstrSpecifiers
+#define CONTEXTS_SYM      x86DisassemblerContexts
+#define ONEBYTE_SYM       x86DisassemblerOneByteOpcodes
+#define TWOBYTE_SYM       x86DisassemblerTwoByteOpcodes
+#define THREEBYTE38_SYM   x86DisassemblerThreeByte38Opcodes
+#define THREEBYTE3A_SYM   x86DisassemblerThreeByte3AOpcodes
+#define XOP8_MAP_SYM      x86DisassemblerXOP8Opcodes
+#define XOP9_MAP_SYM      x86DisassemblerXOP9Opcodes
+#define XOPA_MAP_SYM      x86DisassemblerXOPAOpcodes
+#define THREEDNOW_MAP_SYM x86Disassembler3DNowOpcodes
+
+#define INSTRUCTIONS_STR  "x86DisassemblerInstrSpecifiers"
+#define CONTEXTS_STR      "x86DisassemblerContexts"
+#define ONEBYTE_STR       "x86DisassemblerOneByteOpcodes"
+#define TWOBYTE_STR       "x86DisassemblerTwoByteOpcodes"
+#define THREEBYTE38_STR   "x86DisassemblerThreeByte38Opcodes"
+#define THREEBYTE3A_STR   "x86DisassemblerThreeByte3AOpcodes"
+#define XOP8_MAP_STR      "x86DisassemblerXOP8Opcodes"
+#define XOP9_MAP_STR      "x86DisassemblerXOP9Opcodes"
+#define XOPA_MAP_STR      "x86DisassemblerXOPAOpcodes"
+#define THREEDNOW_MAP_STR "x86Disassembler3DNowOpcodes"
+
+// Attributes of an instruction that must be known before the opcode can be
+// processed correctly.  Most of these indicate the presence of particular
+// prefixes, but ATTR_64BIT is simply an attribute of the decoding context.
+#define ATTRIBUTE_BITS                  \
+  ENUM_ENTRY(ATTR_NONE,   0x00)         \
+  ENUM_ENTRY(ATTR_64BIT,  (0x1 << 0))   \
+  ENUM_ENTRY(ATTR_XS,     (0x1 << 1))   \
+  ENUM_ENTRY(ATTR_XD,     (0x1 << 2))   \
+  ENUM_ENTRY(ATTR_REXW,   (0x1 << 3))   \
+  ENUM_ENTRY(ATTR_OPSIZE, (0x1 << 4))   \
+  ENUM_ENTRY(ATTR_ADSIZE, (0x1 << 5))   \
+  ENUM_ENTRY(ATTR_VEX,    (0x1 << 6))   \
+  ENUM_ENTRY(ATTR_VEXL,   (0x1 << 7))   \
+  ENUM_ENTRY(ATTR_EVEX,   (0x1 << 8))   \
+  ENUM_ENTRY(ATTR_EVEXL,  (0x1 << 9))   \
+  ENUM_ENTRY(ATTR_EVEXL2, (0x1 << 10))  \
+  ENUM_ENTRY(ATTR_EVEXK,  (0x1 << 11))  \
+  ENUM_ENTRY(ATTR_EVEXKZ, (0x1 << 12))  \
+  ENUM_ENTRY(ATTR_EVEXB,  (0x1 << 13))
+
+#define ENUM_ENTRY(n, v) n = v,
+enum attributeBits {
+  ATTRIBUTE_BITS
+  ATTR_max
+};
+#undef ENUM_ENTRY
+
+// Combinations of the above attributes that are relevant to instruction
+// decode. Although other combinations are possible, they can be reduced to
+// these without affecting the ultimately decoded instruction.
+
+//           Class name           Rank  Rationale for rank assignment
+#define INSTRUCTION_CONTEXTS                                                   \
+  ENUM_ENTRY(IC,                    0,  "says nothing about the instruction")  \
+  ENUM_ENTRY(IC_64BIT,              1,  "says the instruction applies in "     \
+                                        "64-bit mode but no more")             \
+  ENUM_ENTRY(IC_OPSIZE,             3,  "requires an OPSIZE prefix, so "       \
+                                        "operands change width")               \
+  ENUM_ENTRY(IC_ADSIZE,             3,  "requires an ADSIZE prefix, so "       \
+                                        "operands change width")               \
+  ENUM_ENTRY(IC_OPSIZE_ADSIZE,      4,  "requires ADSIZE and OPSIZE prefixes") \
+  ENUM_ENTRY(IC_XD,                 2,  "may say something about the opcode "  \
+                                        "but not the operands")                \
+  ENUM_ENTRY(IC_XS,                 2,  "may say something about the opcode "  \
+                                        "but not the operands")                \
+  ENUM_ENTRY(IC_XD_OPSIZE,          3,  "requires an OPSIZE prefix, so "       \
+                                        "operands change width")               \
+  ENUM_ENTRY(IC_XS_OPSIZE,          3,  "requires an OPSIZE prefix, so "       \
+                                        "operands change width")               \
+  ENUM_ENTRY(IC_64BIT_REXW,         5,  "requires a REX.W prefix, so operands "\
+                                        "change width; overrides IC_OPSIZE")   \
+  ENUM_ENTRY(IC_64BIT_REXW_ADSIZE,  6,  "requires a REX.W prefix and 0x67 "    \
+                                        "prefix")                              \
+  ENUM_ENTRY(IC_64BIT_OPSIZE,       3,  "Just as meaningful as IC_OPSIZE")     \
+  ENUM_ENTRY(IC_64BIT_ADSIZE,       3,  "Just as meaningful as IC_ADSIZE")     \
+  ENUM_ENTRY(IC_64BIT_OPSIZE_ADSIZE, 4, "Just as meaningful as IC_OPSIZE/"     \
+                                        "IC_ADSIZE")                           \
+  ENUM_ENTRY(IC_64BIT_XD,           6,  "XD instructions are SSE; REX.W is "   \
+                                        "secondary")                           \
+  ENUM_ENTRY(IC_64BIT_XS,           6,  "Just as meaningful as IC_64BIT_XD")   \
+  ENUM_ENTRY(IC_64BIT_XD_OPSIZE,    3,  "Just as meaningful as IC_XD_OPSIZE")  \
+  ENUM_ENTRY(IC_64BIT_XS_OPSIZE,    3,  "Just as meaningful as IC_XS_OPSIZE")  \
+  ENUM_ENTRY(IC_64BIT_REXW_XS,      7,  "OPSIZE could mean a different "       \
+                                        "opcode")                              \
+  ENUM_ENTRY(IC_64BIT_REXW_XD,      7,  "Just as meaningful as "               \
+                                        "IC_64BIT_REXW_XS")                    \
+  ENUM_ENTRY(IC_64BIT_REXW_OPSIZE,  8,  "The Dynamic Duo!  Prefer over all "   \
+                                        "else because this changes most "      \
+                                        "operands' meaning")                   \
+  ENUM_ENTRY(IC_VEX,                1,  "requires a VEX prefix")               \
+  ENUM_ENTRY(IC_VEX_XS,             2,  "requires VEX and the XS prefix")      \
+  ENUM_ENTRY(IC_VEX_XD,             2,  "requires VEX and the XD prefix")      \
+  ENUM_ENTRY(IC_VEX_OPSIZE,         2,  "requires VEX and the OpSize prefix")  \
+  ENUM_ENTRY(IC_VEX_W,              3,  "requires VEX and the W prefix")       \
+  ENUM_ENTRY(IC_VEX_W_XS,           4,  "requires VEX, W, and XS prefix")      \
+  ENUM_ENTRY(IC_VEX_W_XD,           4,  "requires VEX, W, and XD prefix")      \
+  ENUM_ENTRY(IC_VEX_W_OPSIZE,       4,  "requires VEX, W, and OpSize")         \
+  ENUM_ENTRY(IC_VEX_L,              3,  "requires VEX and the L prefix")       \
+  ENUM_ENTRY(IC_VEX_L_XS,           4,  "requires VEX and the L and XS prefix")\
+  ENUM_ENTRY(IC_VEX_L_XD,           4,  "requires VEX and the L and XD prefix")\
+  ENUM_ENTRY(IC_VEX_L_OPSIZE,       4,  "requires VEX, L, and OpSize")         \
+  ENUM_ENTRY(IC_VEX_L_W,            4,  "requires VEX, L and W")               \
+  ENUM_ENTRY(IC_VEX_L_W_XS,         5,  "requires VEX, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_VEX_L_W_XD,         5,  "requires VEX, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_VEX_L_W_OPSIZE,     5,  "requires VEX, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX,               1,  "requires an EVEX prefix")             \
+  ENUM_ENTRY(IC_EVEX_XS,            2,  "requires EVEX and the XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_XD,            2,  "requires EVEX and the XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_OPSIZE,        2,  "requires EVEX and the OpSize prefix") \
+  ENUM_ENTRY(IC_EVEX_W,             3,  "requires EVEX and the W prefix")      \
+  ENUM_ENTRY(IC_EVEX_W_XS,          4,  "requires EVEX, W, and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XD,          4,  "requires EVEX, W, and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE,      4,  "requires EVEX, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L,             3,  "requires EVEX and the L prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_XS,          4,  "requires EVEX and the L and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L_XD,          4,  "requires EVEX and the L and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE,      4,  "requires EVEX, L, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L_W,           3,  "requires EVEX, L and W")               \
+  ENUM_ENTRY(IC_EVEX_L_W_XS,        4,  "requires EVEX, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD,        4,  "requires EVEX, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE,    4,  "requires EVEX, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_L2,            3,  "requires EVEX and the L2 prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XS,         4,  "requires EVEX and the L2 and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_XD,         4,  "requires EVEX and the L2 and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE,     4,  "requires EVEX, L2, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L2_W,          3,  "requires EVEX, L2 and W")               \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS,       4,  "requires EVEX, L2, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD,       4,  "requires EVEX, L2, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE,   4,  "requires EVEX, L2, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_K,             1,  "requires an EVEX_K prefix")             \
+  ENUM_ENTRY(IC_EVEX_XS_K,          2,  "requires EVEX_K and the XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_XD_K,          2,  "requires EVEX_K and the XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_K,      2,  "requires EVEX_K and the OpSize prefix") \
+  ENUM_ENTRY(IC_EVEX_W_K,           3,  "requires EVEX_K and the W prefix")      \
+  ENUM_ENTRY(IC_EVEX_W_XS_K,        4,  "requires EVEX_K, W, and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XD_K,        4,  "requires EVEX_K, W, and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K,    4,  "requires EVEX_K, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_K,           3,  "requires EVEX_K and the L prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_XS_K,        4,  "requires EVEX_K and the L and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L_XD_K,        4,  "requires EVEX_K and the L and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K,    4,  "requires EVEX_K, L, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L_W_K,         3,  "requires EVEX_K, L and W")               \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_K,      4,  "requires EVEX_K, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_K,      4,  "requires EVEX_K, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K,  4,  "requires EVEX_K, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_L2_K,          3,  "requires EVEX_K and the L2 prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XS_K,       4,  "requires EVEX_K and the L2 and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_XD_K,       4,  "requires EVEX_K and the L2 and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K,   4,  "requires EVEX_K, L2, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_K,        3,  "requires EVEX_K, L2 and W")               \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_K,     4,  "requires EVEX_K, L2, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_K,     4,  "requires EVEX_K, L2, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4,  "requires EVEX_K, L2, W and OpSize")     \
+  ENUM_ENTRY(IC_EVEX_B,             1,  "requires an EVEX_B prefix")             \
+  ENUM_ENTRY(IC_EVEX_XS_B,          2,  "requires EVEX_B and the XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_XD_B,          2,  "requires EVEX_B and the XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_B,      2,  "requires EVEX_B and the OpSize prefix") \
+  ENUM_ENTRY(IC_EVEX_W_B,           3,  "requires EVEX_B and the W prefix")      \
+  ENUM_ENTRY(IC_EVEX_W_XS_B,        4,  "requires EVEX_B, W, and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XD_B,        4,  "requires EVEX_B, W, and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_B,    4,  "requires EVEX_B, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_B,           3,  "requires EVEX_B and the L prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_XS_B,        4,  "requires EVEX_B and the L and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L_XD_B,        4,  "requires EVEX_B and the L and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_B,    4,  "requires EVEX_B, L, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L_W_B,         3,  "requires EVEX_B, L and W")               \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_B,      4,  "requires EVEX_B, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_B,      4,  "requires EVEX_B, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B,  4,  "requires EVEX_B, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_L2_B,          3,  "requires EVEX_B and the L2 prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XS_B,       4,  "requires EVEX_B and the L2 and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_XD_B,       4,  "requires EVEX_B and the L2 and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B,   4,  "requires EVEX_B, L2, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_B,        3,  "requires EVEX_B, L2 and W")               \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_B,     4,  "requires EVEX_B, L2, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_B,     4,  "requires EVEX_B, L2, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4,  "requires EVEX_B, L2, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_K_B,           1,  "requires EVEX_B and EVEX_K prefix")             \
+  ENUM_ENTRY(IC_EVEX_XS_K_B,        2,  "requires EVEX_B, EVEX_K and the XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_XD_K_B,        2,  "requires EVEX_B, EVEX_K and the XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_K_B,    2,  "requires EVEX_B, EVEX_K and the OpSize prefix") \
+  ENUM_ENTRY(IC_EVEX_W_K_B,         3,  "requires EVEX_B, EVEX_K and the W prefix")      \
+  ENUM_ENTRY(IC_EVEX_W_XS_K_B,      4,  "requires EVEX_B, EVEX_K, W, and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XD_K_B,      4,  "requires EVEX_B, EVEX_K, W, and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B,  4,  "requires EVEX_B, EVEX_K, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_K_B,         3,  "requires EVEX_B, EVEX_K and the L prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_XS_K_B,      4,  "requires EVEX_B, EVEX_K and the L and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L_XD_K_B,      4,  "requires EVEX_B, EVEX_K and the L and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B,  4,  "requires EVEX_B, EVEX_K, L, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L_W_K_B,       3,  "requires EVEX_B, EVEX_K, L and W")               \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_K_B,    4,  "requires EVEX_B, EVEX_K, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_K_B,    4,  "requires EVEX_B, EVEX_K, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B,4,  "requires EVEX_B, EVEX_K, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_L2_K_B,        3,  "requires EVEX_B, EVEX_K and the L2 prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XS_K_B,     4,  "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_XD_K_B,     4,  "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4,  "requires EVEX_B, EVEX_K, L2, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_K_B,      3,  "requires EVEX_B, EVEX_K, L2 and W")               \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B,   4,  "requires EVEX_B, EVEX_K, L2, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B,   4,  "requires EVEX_B, EVEX_K, L2, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B,4,  "requires EVEX_B, EVEX_K, L2, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_KZ_B,           1,  "requires EVEX_B and EVEX_KZ prefix")             \
+  ENUM_ENTRY(IC_EVEX_XS_KZ_B,        2,  "requires EVEX_B, EVEX_KZ and the XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_XD_KZ_B,        2,  "requires EVEX_B, EVEX_KZ and the XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B,    2,  "requires EVEX_B, EVEX_KZ and the OpSize prefix") \
+  ENUM_ENTRY(IC_EVEX_W_KZ_B,         3,  "requires EVEX_B, EVEX_KZ and the W prefix")      \
+  ENUM_ENTRY(IC_EVEX_W_XS_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, W, and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XD_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, W, and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B,  4,  "requires EVEX_B, EVEX_KZ, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_KZ_B,           3,  "requires EVEX_B, EVEX_KZ and the L prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_XS_KZ_B,        4,  "requires EVEX_B, EVEX_KZ and the L and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L_XD_KZ_B,        4,  "requires EVEX_B, EVEX_KZ and the L and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ_B,    4,  "requires EVEX_B, EVEX_KZ, L, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L_W_KZ_B,         3,  "requires EVEX_B, EVEX_KZ, L and W")               \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ_B,  4,  "requires EVEX_B, EVEX_KZ, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_L2_KZ_B,          3,  "requires EVEX_B, EVEX_KZ and the L2 prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XS_KZ_B,       4,  "requires EVEX_B, EVEX_KZ and the L2 and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_XD_KZ_B,       4,  "requires EVEX_B, EVEX_KZ and the L2 and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ_B,   4,  "requires EVEX_B, EVEX_KZ, L2, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_KZ_B,        3,  "requires EVEX_B, EVEX_KZ, L2 and W")               \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ_B,     4,  "requires EVEX_B, EVEX_KZ, L2, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ_B,     4,  "requires EVEX_B, EVEX_KZ, L2, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ_B, 4,  "requires EVEX_B, EVEX_KZ, L2, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_KZ,             1,  "requires an EVEX_KZ prefix")             \
+  ENUM_ENTRY(IC_EVEX_XS_KZ,          2,  "requires EVEX_KZ and the XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_XD_KZ,          2,  "requires EVEX_KZ and the XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ,      2,  "requires EVEX_KZ and the OpSize prefix") \
+  ENUM_ENTRY(IC_EVEX_W_KZ,           3,  "requires EVEX_KZ and the W prefix")      \
+  ENUM_ENTRY(IC_EVEX_W_XS_KZ,        4,  "requires EVEX_KZ, W, and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XD_KZ,        4,  "requires EVEX_KZ, W, and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ,    4,  "requires EVEX_KZ, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_KZ,           3,  "requires EVEX_KZ and the L prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_XS_KZ,        4,  "requires EVEX_KZ and the L and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L_XD_KZ,        4,  "requires EVEX_KZ and the L and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ,    4,  "requires EVEX_KZ, L, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L_W_KZ,         3,  "requires EVEX_KZ, L and W")               \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ,      4,  "requires EVEX_KZ, L, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ,      4,  "requires EVEX_KZ, L, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ,  4,  "requires EVEX_KZ, L, W and OpSize")       \
+  ENUM_ENTRY(IC_EVEX_L2_KZ,          3,  "requires EVEX_KZ and the L2 prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XS_KZ,       4,  "requires EVEX_KZ and the L2 and XS prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_XD_KZ,       4,  "requires EVEX_KZ and the L2 and XD prefix")\
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ,   4,  "requires EVEX_KZ, L2, and OpSize")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_KZ,        3,  "requires EVEX_KZ, L2 and W")               \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ,     4,  "requires EVEX_KZ, L2, W and XS prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ,     4,  "requires EVEX_KZ, L2, W and XD prefix")    \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4,  "requires EVEX_KZ, L2, W and OpSize")
+
+#define ENUM_ENTRY(n, r, d) n,
+enum InstructionContext {
+  INSTRUCTION_CONTEXTS
+  IC_max
+};
+#undef ENUM_ENTRY
+
+// Opcode types, which determine which decode table to use, both in the Intel
+// manual and also for the decoder.
+enum OpcodeType {
+  ONEBYTE       = 0,
+  TWOBYTE       = 1,
+  THREEBYTE_38  = 2,
+  THREEBYTE_3A  = 3,
+  XOP8_MAP      = 4,
+  XOP9_MAP      = 5,
+  XOPA_MAP      = 6,
+  THREEDNOW_MAP = 7
+};
+
+// The following structs are used for the hierarchical decode table.  After
+// determining the instruction's class (i.e., which IC_* constant applies to
+// it), the decoder reads the opcode.  Some instructions require specific
+// values of the ModR/M byte, so the ModR/M byte indexes into the final table.
+//
+// If a ModR/M byte is not required, "required" is left unset, and the values
+// for each instructionID are identical.
+typedef uint16_t InstrUID;
+
+// ModRMDecisionType - describes the type of ModR/M decision, allowing the
+// consumer to determine the number of entries in it.
+//
+// MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded
+//                  instruction is the same.
+// MODRM_SPLITRM  - If the ModR/M byte is between 0x00 and 0xbf, the opcode
+//                  corresponds to one instruction; otherwise, it corresponds to
+//                  a different instruction.
+// MODRM_SPLITMISC- If the ModR/M byte is between 0x00 and 0xbf, ModR/M byte
+//                  divided by 8 is used to select instruction; otherwise, each
+//                  value of the ModR/M byte could correspond to a different
+//                  instruction.
+// MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This
+//                  corresponds to instructions that use reg field as opcode
+// MODRM_FULL     - Potentially, each value of the ModR/M byte could correspond
+//                  to a different instruction.
+#define MODRMTYPES            \
+  ENUM_ENTRY(MODRM_ONEENTRY)  \
+  ENUM_ENTRY(MODRM_SPLITRM)   \
+  ENUM_ENTRY(MODRM_SPLITMISC)  \
+  ENUM_ENTRY(MODRM_SPLITREG)  \
+  ENUM_ENTRY(MODRM_FULL)
+
+#define ENUM_ENTRY(n) n,
+enum ModRMDecisionType {
+  MODRMTYPES
+  MODRM_max
+};
+#undef ENUM_ENTRY
+
+#define CASE_ENCODING_RM     \
+    case ENCODING_RM:        \
+    case ENCODING_RM_CD2:    \
+    case ENCODING_RM_CD4:    \
+    case ENCODING_RM_CD8:    \
+    case ENCODING_RM_CD16:   \
+    case ENCODING_RM_CD32:   \
+    case ENCODING_RM_CD64
+
+#define CASE_ENCODING_VSIB   \
+    case ENCODING_VSIB:      \
+    case ENCODING_VSIB_CD2:  \
+    case ENCODING_VSIB_CD4:  \
+    case ENCODING_VSIB_CD8:  \
+    case ENCODING_VSIB_CD16: \
+    case ENCODING_VSIB_CD32: \
+    case ENCODING_VSIB_CD64
+
+// Physical encodings of instruction operands.
+#define ENCODINGS                                                              \
+  ENUM_ENTRY(ENCODING_NONE,   "")                                              \
+  ENUM_ENTRY(ENCODING_REG,    "Register operand in ModR/M byte.")              \
+  ENUM_ENTRY(ENCODING_RM,     "R/M operand in ModR/M byte.")                   \
+  ENUM_ENTRY(ENCODING_RM_CD2, "R/M operand with CDisp scaling of 2")           \
+  ENUM_ENTRY(ENCODING_RM_CD4, "R/M operand with CDisp scaling of 4")           \
+  ENUM_ENTRY(ENCODING_RM_CD8, "R/M operand with CDisp scaling of 8")           \
+  ENUM_ENTRY(ENCODING_RM_CD16,"R/M operand with CDisp scaling of 16")          \
+  ENUM_ENTRY(ENCODING_RM_CD32,"R/M operand with CDisp scaling of 32")          \
+  ENUM_ENTRY(ENCODING_RM_CD64,"R/M operand with CDisp scaling of 64")          \
+  ENUM_ENTRY(ENCODING_VSIB,     "VSIB operand in ModR/M byte.")                \
+  ENUM_ENTRY(ENCODING_VSIB_CD2, "VSIB operand with CDisp scaling of 2")        \
+  ENUM_ENTRY(ENCODING_VSIB_CD4, "VSIB operand with CDisp scaling of 4")        \
+  ENUM_ENTRY(ENCODING_VSIB_CD8, "VSIB operand with CDisp scaling of 8")        \
+  ENUM_ENTRY(ENCODING_VSIB_CD16,"VSIB operand with CDisp scaling of 16")       \
+  ENUM_ENTRY(ENCODING_VSIB_CD32,"VSIB operand with CDisp scaling of 32")       \
+  ENUM_ENTRY(ENCODING_VSIB_CD64,"VSIB operand with CDisp scaling of 64")       \
+  ENUM_ENTRY(ENCODING_VVVV,   "Register operand in VEX.vvvv byte.")            \
+  ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.")         \
+  ENUM_ENTRY(ENCODING_IB,     "1-byte immediate")                              \
+  ENUM_ENTRY(ENCODING_IW,     "2-byte")                                        \
+  ENUM_ENTRY(ENCODING_ID,     "4-byte")                                        \
+  ENUM_ENTRY(ENCODING_IO,     "8-byte")                                        \
+  ENUM_ENTRY(ENCODING_RB,     "(AL..DIL, R8L..R15L) Register code added to "   \
+                              "the opcode byte")                               \
+  ENUM_ENTRY(ENCODING_RW,     "(AX..DI, R8W..R15W)")                           \
+  ENUM_ENTRY(ENCODING_RD,     "(EAX..EDI, R8D..R15D)")                         \
+  ENUM_ENTRY(ENCODING_RO,     "(RAX..RDI, R8..R15)")                           \
+  ENUM_ENTRY(ENCODING_FP,     "Position on floating-point stack in ModR/M "    \
+                              "byte.")                                         \
+                                                                               \
+  ENUM_ENTRY(ENCODING_Iv,     "Immediate of operand size")                     \
+  ENUM_ENTRY(ENCODING_Ia,     "Immediate of address size")                     \
+  ENUM_ENTRY(ENCODING_IRC,    "Immediate for static rounding control")         \
+  ENUM_ENTRY(ENCODING_Rv,     "Register code of operand size added to the "    \
+                              "opcode byte")                                   \
+  ENUM_ENTRY(ENCODING_DUP,    "Duplicate of another operand; ID is encoded "   \
+                              "in type")                                       \
+  ENUM_ENTRY(ENCODING_SI,     "Source index; encoded in OpSize/Adsize prefix") \
+  ENUM_ENTRY(ENCODING_DI,     "Destination index; encoded in prefixes")
+
+#define ENUM_ENTRY(n, d) n,
+enum OperandEncoding {
+  ENCODINGS
+  ENCODING_max
+};
+#undef ENUM_ENTRY
+
+// Semantic interpretations of instruction operands.
+#define TYPES                                                                  \
+  ENUM_ENTRY(TYPE_NONE,       "")                                              \
+  ENUM_ENTRY(TYPE_REL,        "immediate address")                             \
+  ENUM_ENTRY(TYPE_R8,         "1-byte register operand")                       \
+  ENUM_ENTRY(TYPE_R16,        "2-byte")                                        \
+  ENUM_ENTRY(TYPE_R32,        "4-byte")                                        \
+  ENUM_ENTRY(TYPE_R64,        "8-byte")                                        \
+  ENUM_ENTRY(TYPE_IMM,        "immediate operand")                      \
+  ENUM_ENTRY(TYPE_IMM3,       "1-byte immediate operand between 0 and 7")      \
+  ENUM_ENTRY(TYPE_IMM5,       "1-byte immediate operand between 0 and 31")     \
+  ENUM_ENTRY(TYPE_AVX512ICC,  "1-byte immediate operand for AVX512 icmp")      \
+  ENUM_ENTRY(TYPE_UIMM8,      "1-byte unsigned immediate operand")             \
+  ENUM_ENTRY(TYPE_M,          "Memory operand")                                \
+  ENUM_ENTRY(TYPE_MVSIBX,     "Memory operand using XMM index")                \
+  ENUM_ENTRY(TYPE_MVSIBY,     "Memory operand using YMM index")                \
+  ENUM_ENTRY(TYPE_MVSIBZ,     "Memory operand using ZMM index")                \
+  ENUM_ENTRY(TYPE_SRCIDX,     "memory at source index")                        \
+  ENUM_ENTRY(TYPE_DSTIDX,     "memory at destination index")                   \
+  ENUM_ENTRY(TYPE_MOFFS,      "memory offset (relative to segment base)")      \
+  ENUM_ENTRY(TYPE_ST,         "Position on the floating-point stack")          \
+  ENUM_ENTRY(TYPE_MM64,       "8-byte MMX register")                           \
+  ENUM_ENTRY(TYPE_XMM,        "16-byte")                                       \
+  ENUM_ENTRY(TYPE_YMM,        "32-byte")                                       \
+  ENUM_ENTRY(TYPE_ZMM,        "64-byte")                                       \
+  ENUM_ENTRY(TYPE_VK,         "mask register")                                 \
+  ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand")                      \
+  ENUM_ENTRY(TYPE_DEBUGREG,   "Debug register operand")                        \
+  ENUM_ENTRY(TYPE_CONTROLREG, "Control register operand")                      \
+  ENUM_ENTRY(TYPE_BNDR,       "MPX bounds register")                           \
+                                                                               \
+  ENUM_ENTRY(TYPE_Rv,         "Register operand of operand size")              \
+  ENUM_ENTRY(TYPE_RELv,       "Immediate address of operand size")             \
+  ENUM_ENTRY(TYPE_DUP0,       "Duplicate of operand 0")                        \
+  ENUM_ENTRY(TYPE_DUP1,       "operand 1")                                     \
+  ENUM_ENTRY(TYPE_DUP2,       "operand 2")                                     \
+  ENUM_ENTRY(TYPE_DUP3,       "operand 3")                                     \
+  ENUM_ENTRY(TYPE_DUP4,       "operand 4")                                     \
+
+#define ENUM_ENTRY(n, d) n,
+enum OperandType {
+  TYPES
+  TYPE_max
+};
+#undef ENUM_ENTRY
+
+/// \brief The specification for how to extract and interpret one operand.
+struct OperandSpecifier {
+  uint8_t encoding;
+  uint8_t type;
+};
+
+static const unsigned X86_MAX_OPERANDS = 6;
+
+/// Decoding mode for the Intel disassembler.  16-bit, 32-bit, and 64-bit mode
+/// are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode,
+/// respectively.
+enum DisassemblerMode {
+  MODE_16BIT,
+  MODE_32BIT,
+  MODE_64BIT
+};
+
+} // namespace X86Disassembler
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/X86TargetParser.def b/linux-x64/clang/include/llvm/Support/X86TargetParser.def
new file mode 100644
index 0000000..5c8c576
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/X86TargetParser.def
@@ -0,0 +1,155 @@
+//===- X86TargetParser.def - X86 target parsing defines ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides defines to build up the X86 target parser's logic.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef X86_VENDOR
+#define X86_VENDOR(ENUM, STR)
+#endif
+X86_VENDOR(VENDOR_INTEL, "intel")
+X86_VENDOR(VENDOR_AMD,   "amd")
+#undef X86_VENDOR
+
+// This macro is used to implement CPU types that have an alias. As of now
+// there is only ever one alias.
+#ifndef X86_CPU_TYPE_COMPAT_WITH_ALIAS
+#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR)
+#endif
+
+// This macro is used for cpu types present in compiler-rt/libgcc.
+#ifndef X86_CPU_TYPE_COMPAT
+#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) X86_CPU_TYPE(ARCHNAME, ENUM)
+#endif
+
+#ifndef X86_CPU_TYPE
+#define X86_CPU_TYPE(ARCHNAME, ENUM)
+#endif
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("bonnell",    INTEL_BONNELL,    "bonnell", "atom")
+X86_CPU_TYPE_COMPAT           ("core2",      INTEL_CORE2,      "core2")
+X86_CPU_TYPE_COMPAT           ("nehalem",    INTEL_COREI7,     "corei7")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("amdfam10",   AMDFAM10H,        "amdfam10h", "amdfam10")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("bdver1",     AMDFAM15H,        "amdfam15h", "amdfam15")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("silvermont", INTEL_SILVERMONT, "silvermont", "slm")
+X86_CPU_TYPE_COMPAT           ("knl",        INTEL_KNL,        "knl")
+X86_CPU_TYPE_COMPAT           ("btver1",     AMD_BTVER1,       "btver1")
+X86_CPU_TYPE_COMPAT           ("btver2",     AMD_BTVER2,       "btver2")
+X86_CPU_TYPE_COMPAT           ("znver1",     AMDFAM17H,        "amdfam17h")
+X86_CPU_TYPE_COMPAT           ("knm",        INTEL_KNM,        "knm")
+// Entries below this are not in libgcc/compiler-rt.
+X86_CPU_TYPE                  ("i386",        INTEL_i386)
+X86_CPU_TYPE                  ("i486",        INTEL_i486)
+X86_CPU_TYPE                  ("pentium",     INTEL_PENTIUM)
+X86_CPU_TYPE                  ("pentium-mmx", INTEL_PENTIUM_MMX)
+X86_CPU_TYPE                  ("pentiumpro",  INTEL_PENTIUM_PRO)
+X86_CPU_TYPE                  ("pentium2",    INTEL_PENTIUM_II)
+X86_CPU_TYPE                  ("pentium3",    INTEL_PENTIUM_III)
+X86_CPU_TYPE                  ("pentium4",    INTEL_PENTIUM_IV)
+X86_CPU_TYPE                  ("pentium-m",   INTEL_PENTIUM_M)
+X86_CPU_TYPE                  ("yonah",       INTEL_CORE_DUO)
+X86_CPU_TYPE                  ("nocona",      INTEL_NOCONA)
+X86_CPU_TYPE                  ("prescott",    INTEL_PRESCOTT)
+X86_CPU_TYPE                  ("i486",        AMD_i486)
+X86_CPU_TYPE                  ("pentium",     AMDPENTIUM)
+X86_CPU_TYPE                  ("athlon",      AMD_ATHLON)
+X86_CPU_TYPE                  ("athlon-xp",   AMD_ATHLON_XP)
+X86_CPU_TYPE                  ("k8",          AMD_K8)
+X86_CPU_TYPE                  ("k8-sse3",     AMD_K8SSE3)
+X86_CPU_TYPE                  ("goldmont",    INTEL_GOLDMONT)
+#undef X86_CPU_TYPE_COMPAT_WITH_ALIAS
+#undef X86_CPU_TYPE_COMPAT
+#undef X86_CPU_TYPE
+
+// This macro is used for cpu subtypes present in compiler-rt/libgcc.
+#ifndef X86_CPU_SUBTYPE_COMPAT
+#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) X86_CPU_SUBTYPE(ARCHNAME, ENUM)
+#endif
+
+#ifndef X86_CPU_SUBTYPE
+#define X86_CPU_SUBTYPE(ARCHNAME, ENUM)
+#endif
+
+X86_CPU_SUBTYPE_COMPAT("nehalem",        INTEL_COREI7_NEHALEM,        "nehalem")
+X86_CPU_SUBTYPE_COMPAT("westmere",       INTEL_COREI7_WESTMERE,       "westmere")
+X86_CPU_SUBTYPE_COMPAT("sandybridge",    INTEL_COREI7_SANDYBRIDGE,    "sandybridge")
+X86_CPU_SUBTYPE_COMPAT("amdfam10",       AMDFAM10H_BARCELONA,         "barcelona")
+X86_CPU_SUBTYPE_COMPAT("amdfam10",       AMDFAM10H_SHANGHAI,          "shanghai")
+X86_CPU_SUBTYPE_COMPAT("amdfam10",       AMDFAM10H_ISTANBUL,          "istanbul")
+X86_CPU_SUBTYPE_COMPAT("bdver1",         AMDFAM15H_BDVER1,            "bdver1")
+X86_CPU_SUBTYPE_COMPAT("bdver2",         AMDFAM15H_BDVER2,            "bdver2")
+X86_CPU_SUBTYPE_COMPAT("bdver3",         AMDFAM15H_BDVER3,            "bdver3")
+X86_CPU_SUBTYPE_COMPAT("bdver4",         AMDFAM15H_BDVER4,            "bdver4")
+X86_CPU_SUBTYPE_COMPAT("znver1",         AMDFAM17H_ZNVER1,            "znver1")
+X86_CPU_SUBTYPE_COMPAT("ivybridge",      INTEL_COREI7_IVYBRIDGE,      "ivybridge")
+X86_CPU_SUBTYPE_COMPAT("haswell",        INTEL_COREI7_HASWELL,        "haswell")
+X86_CPU_SUBTYPE_COMPAT("broadwell",      INTEL_COREI7_BROADWELL,      "broadwell")
+X86_CPU_SUBTYPE_COMPAT("skylake",        INTEL_COREI7_SKYLAKE,        "skylake")
+X86_CPU_SUBTYPE_COMPAT("skylake-avx512", INTEL_COREI7_SKYLAKE_AVX512, "skylake-avx512")
+X86_CPU_SUBTYPE_COMPAT("cannonlake",     INTEL_COREI7_CANNONLAKE,     "cannonlake")
+// Entries below this are not in libgcc/compiler-rt.
+X86_CPU_SUBTYPE       ("core2",          INTEL_CORE2_65)
+X86_CPU_SUBTYPE       ("penryn",         INTEL_CORE2_45)
+X86_CPU_SUBTYPE       ("k6",             AMDPENTIUM_K6)
+X86_CPU_SUBTYPE       ("k6-2",           AMDPENTIUM_K62)
+X86_CPU_SUBTYPE       ("k6-3",           AMDPENTIUM_K63)
+X86_CPU_SUBTYPE       ("geode",          AMDPENTIUM_GEODE)
+#undef X86_CPU_SUBTYPE_COMPAT
+#undef X86_CPU_SUBTYPE
+
+
+// This macro is used for cpu types present in compiler-rt/libgcc.
+#ifndef X86_FEATURE_COMPAT
+#define X86_FEATURE_COMPAT(VAL, ENUM, STR) X86_FEATURE(VAL, ENUM)
+#endif
+
+#ifndef X86_FEATURE
+#define X86_FEATURE(VAL, ENUM)
+#endif
+X86_FEATURE_COMPAT( 0, FEATURE_CMOV,            "cmov")
+X86_FEATURE_COMPAT( 1, FEATURE_MMX,             "mmx")
+X86_FEATURE_COMPAT( 2, FEATURE_POPCNT,          "popcnt")
+X86_FEATURE_COMPAT( 3, FEATURE_SSE,             "sse")
+X86_FEATURE_COMPAT( 4, FEATURE_SSE2,            "sse2")
+X86_FEATURE_COMPAT( 5, FEATURE_SSE3,            "sse3")
+X86_FEATURE_COMPAT( 6, FEATURE_SSSE3,           "ssse3")
+X86_FEATURE_COMPAT( 7, FEATURE_SSE4_1,          "sse4.1")
+X86_FEATURE_COMPAT( 8, FEATURE_SSE4_2,          "sse4.2")
+X86_FEATURE_COMPAT( 9, FEATURE_AVX,             "avx")
+X86_FEATURE_COMPAT(10, FEATURE_AVX2,            "avx2")
+X86_FEATURE_COMPAT(11, FEATURE_SSE4_A,          "sse4a")
+X86_FEATURE_COMPAT(12, FEATURE_FMA4,            "fma4")
+X86_FEATURE_COMPAT(13, FEATURE_XOP,             "xop")
+X86_FEATURE_COMPAT(14, FEATURE_FMA,             "fma")
+X86_FEATURE_COMPAT(15, FEATURE_AVX512F,         "avx512f")
+X86_FEATURE_COMPAT(16, FEATURE_BMI,             "bmi")
+X86_FEATURE_COMPAT(17, FEATURE_BMI2,            "bmi2")
+X86_FEATURE_COMPAT(18, FEATURE_AES,             "aes")
+X86_FEATURE_COMPAT(19, FEATURE_PCLMUL,          "pclmul")
+X86_FEATURE_COMPAT(20, FEATURE_AVX512VL,        "avx512vl")
+X86_FEATURE_COMPAT(21, FEATURE_AVX512BW,        "avx512bw")
+X86_FEATURE_COMPAT(22, FEATURE_AVX512DQ,        "avx512dq")
+X86_FEATURE_COMPAT(23, FEATURE_AVX512CD,        "avx512cd")
+X86_FEATURE_COMPAT(24, FEATURE_AVX512ER,        "avx512er")
+X86_FEATURE_COMPAT(25, FEATURE_AVX512PF,        "avx512pf")
+X86_FEATURE_COMPAT(26, FEATURE_AVX512VBMI,      "avx512vbmi")
+X86_FEATURE_COMPAT(27, FEATURE_AVX512IFMA,      "avx512ifma")
+X86_FEATURE_COMPAT(28, FEATURE_AVX5124VNNIW,    "avx5124vnniw")
+X86_FEATURE_COMPAT(29, FEATURE_AVX5124FMAPS,    "avx5124fmaps")
+X86_FEATURE_COMPAT(30, FEATURE_AVX512VPOPCNTDQ, "avx512vpopcntdq")
+// Features below here are not in libgcc/compiler-rt.
+X86_FEATURE       (32, FEATURE_MOVBE)
+X86_FEATURE       (33, FEATURE_ADX)
+X86_FEATURE       (34, FEATURE_EM64T)
+X86_FEATURE       (35, FEATURE_CLFLUSHOPT)
+X86_FEATURE       (36, FEATURE_SHA)
+#undef X86_FEATURE_COMPAT
+#undef X86_FEATURE
diff --git a/linux-x64/clang/include/llvm/Support/YAMLParser.h b/linux-x64/clang/include/llvm/Support/YAMLParser.h
new file mode 100644
index 0000000..7333ad9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/YAMLParser.h
@@ -0,0 +1,620 @@
+//===- YAMLParser.h - Simple YAML parser ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This is a YAML 1.2 parser.
+//
+//  See http://www.yaml.org/spec/1.2/spec.html for the full standard.
+//
+//  This currently does not implement the following:
+//    * Multi-line literal folding.
+//    * Tag resolution.
+//    * UTF-16.
+//    * BOMs anywhere other than the first Unicode scalar value in the file.
+//
+//  The most important class here is Stream. This represents a YAML stream with
+//  0, 1, or many documents.
+//
+//  SourceMgr sm;
+//  StringRef input = getInput();
+//  yaml::Stream stream(input, sm);
+//
+//  for (yaml::document_iterator di = stream.begin(), de = stream.end();
+//       di != de; ++di) {
+//    yaml::Node *n = di->getRoot();
+//    if (n) {
+//      // Do something with n...
+//    } else
+//      break;
+//  }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_YAMLPARSER_H
+#define LLVM_SUPPORT_YAMLPARSER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/SMLoc.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+class MemoryBufferRef;
+class SourceMgr;
+class raw_ostream;
+class Twine;
+
+namespace yaml {
+
+class Document;
+class document_iterator;
+class Node;
+class Scanner;
+struct Token;
+
+/// \brief Dump all the tokens in this stream to OS.
+/// \returns true if there was an error, false otherwise.
+bool dumpTokens(StringRef Input, raw_ostream &);
+
+/// \brief Scans all tokens in input without outputting anything. This is used
+///        for benchmarking the tokenizer.
+/// \returns true if there was an error, false otherwise.
+bool scanTokens(StringRef Input);
+
+/// \brief Escape \a Input for a double quoted scalar; if \p EscapePrintable
+/// is true, all UTF8 sequences will be escaped, if \p EscapePrintable is
+/// false, those UTF8 sequences encoding printable unicode scalars will not be
+/// escaped, but emitted verbatim.
+std::string escape(StringRef Input, bool EscapePrintable = true);
+
+/// \brief This class represents a YAML stream potentially containing multiple
+///        documents.
+class Stream {
+public:
+  /// \brief This keeps a reference to the string referenced by \p Input.
+  Stream(StringRef Input, SourceMgr &, bool ShowColors = true,
+         std::error_code *EC = nullptr);
+
+  Stream(MemoryBufferRef InputBuffer, SourceMgr &, bool ShowColors = true,
+         std::error_code *EC = nullptr);
+  ~Stream();
+
+  document_iterator begin();
+  document_iterator end();
+  void skip();
+  bool failed();
+
+  bool validate() {
+    skip();
+    return !failed();
+  }
+
+  void printError(Node *N, const Twine &Msg);
+
+private:
+  friend class Document;
+
+  std::unique_ptr<Scanner> scanner;
+  std::unique_ptr<Document> CurrentDoc;
+};
+
+/// \brief Abstract base class for all Nodes.
+class Node {
+  virtual void anchor();
+
+public:
+  enum NodeKind {
+    NK_Null,
+    NK_Scalar,
+    NK_BlockScalar,
+    NK_KeyValue,
+    NK_Mapping,
+    NK_Sequence,
+    NK_Alias
+  };
+
+  Node(unsigned int Type, std::unique_ptr<Document> &, StringRef Anchor,
+       StringRef Tag);
+
+  // It's not safe to copy YAML nodes; the document is streamed and the position
+  // is part of the state.
+  Node(const Node &) = delete;
+  void operator=(const Node &) = delete;
+
+  void *operator new(size_t Size, BumpPtrAllocator &Alloc,
+                     size_t Alignment = 16) noexcept {
+    return Alloc.Allocate(Size, Alignment);
+  }
+
+  void operator delete(void *Ptr, BumpPtrAllocator &Alloc,
+                       size_t Size) noexcept {
+    Alloc.Deallocate(Ptr, Size);
+  }
+
+  void operator delete(void *) noexcept = delete;
+
+  /// \brief Get the value of the anchor attached to this node. If it does not
+  ///        have one, getAnchor().size() will be 0.
+  StringRef getAnchor() const { return Anchor; }
+
+  /// \brief Get the tag as it was written in the document. This does not
+  ///   perform tag resolution.
+  StringRef getRawTag() const { return Tag; }
+
+  /// \brief Get the verbatium tag for a given Node. This performs tag resoluton
+  ///   and substitution.
+  std::string getVerbatimTag() const;
+
+  SMRange getSourceRange() const { return SourceRange; }
+  void setSourceRange(SMRange SR) { SourceRange = SR; }
+
+  // These functions forward to Document and Scanner.
+  Token &peekNext();
+  Token getNext();
+  Node *parseBlockNode();
+  BumpPtrAllocator &getAllocator();
+  void setError(const Twine &Message, Token &Location) const;
+  bool failed() const;
+
+  virtual void skip() {}
+
+  unsigned int getType() const { return TypeID; }
+
+protected:
+  std::unique_ptr<Document> &Doc;
+  SMRange SourceRange;
+
+  ~Node() = default;
+
+private:
+  unsigned int TypeID;
+  StringRef Anchor;
+  /// \brief The tag as typed in the document.
+  StringRef Tag;
+};
+
+/// \brief A null value.
+///
+/// Example:
+///   !!null null
+class NullNode final : public Node {
+  void anchor() override;
+
+public:
+  NullNode(std::unique_ptr<Document> &D)
+      : Node(NK_Null, D, StringRef(), StringRef()) {}
+
+  static bool classof(const Node *N) { return N->getType() == NK_Null; }
+};
+
+/// \brief A scalar node is an opaque datum that can be presented as a
+///        series of zero or more Unicode scalar values.
+///
+/// Example:
+///   Adena
+class ScalarNode final : public Node {
+  void anchor() override;
+
+public:
+  ScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+             StringRef Val)
+      : Node(NK_Scalar, D, Anchor, Tag), Value(Val) {
+    SMLoc Start = SMLoc::getFromPointer(Val.begin());
+    SMLoc End = SMLoc::getFromPointer(Val.end());
+    SourceRange = SMRange(Start, End);
+  }
+
+  // Return Value without any escaping or folding or other fun YAML stuff. This
+  // is the exact bytes that are contained in the file (after conversion to
+  // utf8).
+  StringRef getRawValue() const { return Value; }
+
+  /// \brief Gets the value of this node as a StringRef.
+  ///
+  /// \param Storage is used to store the content of the returned StringRef iff
+  ///        it requires any modification from how it appeared in the source.
+  ///        This happens with escaped characters and multi-line literals.
+  StringRef getValue(SmallVectorImpl<char> &Storage) const;
+
+  static bool classof(const Node *N) {
+    return N->getType() == NK_Scalar;
+  }
+
+private:
+  StringRef Value;
+
+  StringRef unescapeDoubleQuoted(StringRef UnquotedValue,
+                                 StringRef::size_type Start,
+                                 SmallVectorImpl<char> &Storage) const;
+};
+
+/// \brief A block scalar node is an opaque datum that can be presented as a
+///        series of zero or more Unicode scalar values.
+///
+/// Example:
+///   |
+///     Hello
+///     World
+class BlockScalarNode final : public Node {
+  void anchor() override;
+
+public:
+  BlockScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+                  StringRef Value, StringRef RawVal)
+      : Node(NK_BlockScalar, D, Anchor, Tag), Value(Value) {
+    SMLoc Start = SMLoc::getFromPointer(RawVal.begin());
+    SMLoc End = SMLoc::getFromPointer(RawVal.end());
+    SourceRange = SMRange(Start, End);
+  }
+
+  /// \brief Gets the value of this node as a StringRef.
+  StringRef getValue() const { return Value; }
+
+  static bool classof(const Node *N) {
+    return N->getType() == NK_BlockScalar;
+  }
+
+private:
+  StringRef Value;
+};
+
+/// \brief A key and value pair. While not technically a Node under the YAML
+///        representation graph, it is easier to treat them this way.
+///
+/// TODO: Consider making this not a child of Node.
+///
+/// Example:
+///   Section: .text
+class KeyValueNode final : public Node {
+  void anchor() override;
+
+public:
+  KeyValueNode(std::unique_ptr<Document> &D)
+      : Node(NK_KeyValue, D, StringRef(), StringRef()) {}
+
+  /// \brief Parse and return the key.
+  ///
+  /// This may be called multiple times.
+  ///
+  /// \returns The key, or nullptr if failed() == true.
+  Node *getKey();
+
+  /// \brief Parse and return the value.
+  ///
+  /// This may be called multiple times.
+  ///
+  /// \returns The value, or nullptr if failed() == true.
+  Node *getValue();
+
+  void skip() override {
+    if (Node *Key = getKey()) {
+      Key->skip();
+      if (Node *Val = getValue())
+        Val->skip();
+    }
+  }
+
+  static bool classof(const Node *N) {
+    return N->getType() == NK_KeyValue;
+  }
+
+private:
+  Node *Key = nullptr;
+  Node *Value = nullptr;
+};
+
+/// \brief This is an iterator abstraction over YAML collections shared by both
+///        sequences and maps.
+///
+/// BaseT must have a ValueT* member named CurrentEntry and a member function
+/// increment() which must set CurrentEntry to 0 to create an end iterator.
+template <class BaseT, class ValueT>
+class basic_collection_iterator
+    : public std::iterator<std::input_iterator_tag, ValueT> {
+public:
+  basic_collection_iterator() = default;
+  basic_collection_iterator(BaseT *B) : Base(B) {}
+
+  ValueT *operator->() const {
+    assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
+    return Base->CurrentEntry;
+  }
+
+  ValueT &operator*() const {
+    assert(Base && Base->CurrentEntry &&
+           "Attempted to dereference end iterator!");
+    return *Base->CurrentEntry;
+  }
+
+  operator ValueT *() const {
+    assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
+    return Base->CurrentEntry;
+  }
+
+  /// Note on EqualityComparable:
+  ///
+  /// The iterator is not re-entrant,
+  /// it is meant to be used for parsing YAML on-demand
+  /// Once iteration started - it can point only to one entry at a time
+  /// hence Base.CurrentEntry and Other.Base.CurrentEntry are equal
+  /// iff Base and Other.Base are equal.
+  bool operator==(const basic_collection_iterator &Other) const {
+    if (Base && (Base == Other.Base)) {
+      assert((Base->CurrentEntry == Other.Base->CurrentEntry)
+             && "Equal Bases expected to point to equal Entries");
+    }
+
+    return Base == Other.Base;
+  }
+
+  bool operator!=(const basic_collection_iterator &Other) const {
+    return !(Base == Other.Base);
+  }
+
+  basic_collection_iterator &operator++() {
+    assert(Base && "Attempted to advance iterator past end!");
+    Base->increment();
+    // Create an end iterator.
+    if (!Base->CurrentEntry)
+      Base = nullptr;
+    return *this;
+  }
+
+private:
+  BaseT *Base = nullptr;
+};
+
+// The following two templates are used for both MappingNode and Sequence Node.
+template <class CollectionType>
+typename CollectionType::iterator begin(CollectionType &C) {
+  assert(C.IsAtBeginning && "You may only iterate over a collection once!");
+  C.IsAtBeginning = false;
+  typename CollectionType::iterator ret(&C);
+  ++ret;
+  return ret;
+}
+
+template <class CollectionType> void skip(CollectionType &C) {
+  // TODO: support skipping from the middle of a parsed collection ;/
+  assert((C.IsAtBeginning || C.IsAtEnd) && "Cannot skip mid parse!");
+  if (C.IsAtBeginning)
+    for (typename CollectionType::iterator i = begin(C), e = C.end(); i != e;
+         ++i)
+      i->skip();
+}
+
+/// \brief Represents a YAML map created from either a block map for a flow map.
+///
+/// This parses the YAML stream as increment() is called.
+///
+/// Example:
+///   Name: _main
+///   Scope: Global
+class MappingNode final : public Node {
+  void anchor() override;
+
+public:
+  enum MappingType {
+    MT_Block,
+    MT_Flow,
+    MT_Inline ///< An inline mapping node is used for "[key: value]".
+  };
+
+  MappingNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+              MappingType MT)
+      : Node(NK_Mapping, D, Anchor, Tag), Type(MT) {}
+
+  friend class basic_collection_iterator<MappingNode, KeyValueNode>;
+
+  using iterator = basic_collection_iterator<MappingNode, KeyValueNode>;
+
+  template <class T> friend typename T::iterator yaml::begin(T &);
+  template <class T> friend void yaml::skip(T &);
+
+  iterator begin() { return yaml::begin(*this); }
+
+  iterator end() { return iterator(); }
+
+  void skip() override { yaml::skip(*this); }
+
+  static bool classof(const Node *N) {
+    return N->getType() == NK_Mapping;
+  }
+
+private:
+  MappingType Type;
+  bool IsAtBeginning = true;
+  bool IsAtEnd = false;
+  KeyValueNode *CurrentEntry = nullptr;
+
+  void increment();
+};
+
+/// \brief Represents a YAML sequence created from either a block sequence for a
+///        flow sequence.
+///
+/// This parses the YAML stream as increment() is called.
+///
+/// Example:
+///   - Hello
+///   - World
+class SequenceNode final : public Node {
+  void anchor() override;
+
+public:
+  enum SequenceType {
+    ST_Block,
+    ST_Flow,
+    // Use for:
+    //
+    // key:
+    // - val1
+    // - val2
+    //
+    // As a BlockMappingEntry and BlockEnd are not created in this case.
+    ST_Indentless
+  };
+
+  SequenceNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+               SequenceType ST)
+      : Node(NK_Sequence, D, Anchor, Tag), SeqType(ST) {}
+
+  friend class basic_collection_iterator<SequenceNode, Node>;
+
+  using iterator = basic_collection_iterator<SequenceNode, Node>;
+
+  template <class T> friend typename T::iterator yaml::begin(T &);
+  template <class T> friend void yaml::skip(T &);
+
+  void increment();
+
+  iterator begin() { return yaml::begin(*this); }
+
+  iterator end() { return iterator(); }
+
+  void skip() override { yaml::skip(*this); }
+
+  static bool classof(const Node *N) {
+    return N->getType() == NK_Sequence;
+  }
+
+private:
+  SequenceType SeqType;
+  bool IsAtBeginning = true;
+  bool IsAtEnd = false;
+  bool WasPreviousTokenFlowEntry = true; // Start with an imaginary ','.
+  Node *CurrentEntry = nullptr;
+};
+
+/// \brief Represents an alias to a Node with an anchor.
+///
+/// Example:
+///   *AnchorName
+class AliasNode final : public Node {
+  void anchor() override;
+
+public:
+  AliasNode(std::unique_ptr<Document> &D, StringRef Val)
+      : Node(NK_Alias, D, StringRef(), StringRef()), Name(Val) {}
+
+  StringRef getName() const { return Name; }
+  Node *getTarget();
+
+  static bool classof(const Node *N) { return N->getType() == NK_Alias; }
+
+private:
+  StringRef Name;
+};
+
+/// \brief A YAML Stream is a sequence of Documents. A document contains a root
+///        node.
+class Document {
+public:
+  Document(Stream &ParentStream);
+
+  /// \brief Root for parsing a node. Returns a single node.
+  Node *parseBlockNode();
+
+  /// \brief Finish parsing the current document and return true if there are
+  ///        more. Return false otherwise.
+  bool skip();
+
+  /// \brief Parse and return the root level node.
+  Node *getRoot() {
+    if (Root)
+      return Root;
+    return Root = parseBlockNode();
+  }
+
+  const std::map<StringRef, StringRef> &getTagMap() const { return TagMap; }
+
+private:
+  friend class Node;
+  friend class document_iterator;
+
+  /// \brief Stream to read tokens from.
+  Stream &stream;
+
+  /// \brief Used to allocate nodes to. All are destroyed without calling their
+  ///        destructor when the document is destroyed.
+  BumpPtrAllocator NodeAllocator;
+
+  /// \brief The root node. Used to support skipping a partially parsed
+  ///        document.
+  Node *Root;
+
+  /// \brief Maps tag prefixes to their expansion.
+  std::map<StringRef, StringRef> TagMap;
+
+  Token &peekNext();
+  Token getNext();
+  void setError(const Twine &Message, Token &Location) const;
+  bool failed() const;
+
+  /// \brief Parse %BLAH directives and return true if any were encountered.
+  bool parseDirectives();
+
+  /// \brief Parse %YAML
+  void parseYAMLDirective();
+
+  /// \brief Parse %TAG
+  void parseTAGDirective();
+
+  /// \brief Consume the next token and error if it is not \a TK.
+  bool expectToken(int TK);
+};
+
+/// \brief Iterator abstraction for Documents over a Stream.
+class document_iterator {
+public:
+  document_iterator() = default;
+  document_iterator(std::unique_ptr<Document> &D) : Doc(&D) {}
+
+  bool operator==(const document_iterator &Other) const {
+    if (isAtEnd() || Other.isAtEnd())
+      return isAtEnd() && Other.isAtEnd();
+
+    return Doc == Other.Doc;
+  }
+  bool operator!=(const document_iterator &Other) const {
+    return !(*this == Other);
+  }
+
+  document_iterator operator++() {
+    assert(Doc && "incrementing iterator past the end.");
+    if (!(*Doc)->skip()) {
+      Doc->reset(nullptr);
+    } else {
+      Stream &S = (*Doc)->stream;
+      Doc->reset(new Document(S));
+    }
+    return *this;
+  }
+
+  Document &operator*() { return *Doc->get(); }
+
+  std::unique_ptr<Document> &operator->() { return *Doc; }
+
+private:
+  bool isAtEnd() const { return !Doc || !*Doc; }
+
+  std::unique_ptr<Document> *Doc = nullptr;
+};
+
+} // end namespace yaml
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_YAMLPARSER_H
diff --git a/linux-x64/clang/include/llvm/Support/YAMLTraits.h b/linux-x64/clang/include/llvm/Support/YAMLTraits.h
new file mode 100644
index 0000000..b874ad5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/YAMLTraits.h
@@ -0,0 +1,1763 @@
+//===- llvm/Support/YAMLTraits.h --------------------------------*- C++ -*-===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_YAMLTRAITS_H
+#define LLVM_SUPPORT_YAMLTRAITS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cctype>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <new>
+#include <string>
+#include <system_error>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+namespace yaml {
+
+struct EmptyContext {};
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a YAML mapping.  For example:
+///
+///     struct MappingTraits<MyStruct> {
+///       static void mapping(IO &io, MyStruct &s) {
+///         io.mapRequired("name", s.name);
+///         io.mapRequired("size", s.size);
+///         io.mapOptional("age",  s.age);
+///       }
+///     };
+template<class T>
+struct MappingTraits {
+  // Must provide:
+  // static void mapping(IO &io, T &fields);
+  // Optionally may provide:
+  // static StringRef validate(IO &io, T &fields);
+  //
+  // The optional flow flag will cause generated YAML to use a flow mapping
+  // (e.g. { a: 0, b: 1 }):
+  // static const bool flow = true;
+};
+
+/// This class is similar to MappingTraits<T> but allows you to pass in
+/// additional context for each map operation.  For example:
+///
+///     struct MappingContextTraits<MyStruct, MyContext> {
+///       static void mapping(IO &io, MyStruct &s, MyContext &c) {
+///         io.mapRequired("name", s.name);
+///         io.mapRequired("size", s.size);
+///         io.mapOptional("age",  s.age);
+///         ++c.TimesMapped;
+///       }
+///     };
+template <class T, class Context> struct MappingContextTraits {
+  // Must provide:
+  // static void mapping(IO &io, T &fields, Context &Ctx);
+  // Optionally may provide:
+  // static StringRef validate(IO &io, T &fields, Context &Ctx);
+  //
+  // The optional flow flag will cause generated YAML to use a flow mapping
+  // (e.g. { a: 0, b: 1 }):
+  // static const bool flow = true;
+};
+
+/// This class should be specialized by any integral type that converts
+/// to/from a YAML scalar where there is a one-to-one mapping between
+/// in-memory values and a string in YAML.  For example:
+///
+///     struct ScalarEnumerationTraits<Colors> {
+///         static void enumeration(IO &io, Colors &value) {
+///           io.enumCase(value, "red",   cRed);
+///           io.enumCase(value, "blue",  cBlue);
+///           io.enumCase(value, "green", cGreen);
+///         }
+///       };
+template<typename T>
+struct ScalarEnumerationTraits {
+  // Must provide:
+  // static void enumeration(IO &io, T &value);
+};
+
+/// This class should be specialized by any integer type that is a union
+/// of bit values and the YAML representation is a flow sequence of
+/// strings.  For example:
+///
+///      struct ScalarBitSetTraits<MyFlags> {
+///        static void bitset(IO &io, MyFlags &value) {
+///          io.bitSetCase(value, "big",   flagBig);
+///          io.bitSetCase(value, "flat",  flagFlat);
+///          io.bitSetCase(value, "round", flagRound);
+///        }
+///      };
+template<typename T>
+struct ScalarBitSetTraits {
+  // Must provide:
+  // static void bitset(IO &io, T &value);
+};
+
+/// Describe which type of quotes should be used when quoting is necessary.
+/// Some non-printable characters need to be double-quoted, while some others
+/// are fine with simple-quoting, and some don't need any quoting.
+enum class QuotingType { None, Single, Double };
+
+/// This class should be specialized by type that requires custom conversion
+/// to/from a yaml scalar.  For example:
+///
+///    template<>
+///    struct ScalarTraits<MyType> {
+///      static void output(const MyType &val, void*, llvm::raw_ostream &out) {
+///        // stream out custom formatting
+///        out << llvm::format("%x", val);
+///      }
+///      static StringRef input(StringRef scalar, void*, MyType &value) {
+///        // parse scalar and set `value`
+///        // return empty string on success, or error string
+///        return StringRef();
+///      }
+///      static QuotingType mustQuote(StringRef) { return QuotingType::Single; }
+///    };
+template<typename T>
+struct ScalarTraits {
+  // Must provide:
+  //
+  // Function to write the value as a string:
+  //static void output(const T &value, void *ctxt, llvm::raw_ostream &out);
+  //
+  // Function to convert a string to a value.  Returns the empty
+  // StringRef on success or an error string if string is malformed:
+  //static StringRef input(StringRef scalar, void *ctxt, T &value);
+  //
+  // Function to determine if the value should be quoted.
+  //static QuotingType mustQuote(StringRef);
+};
+
+/// This class should be specialized by type that requires custom conversion
+/// to/from a YAML literal block scalar. For example:
+///
+///    template <>
+///    struct BlockScalarTraits<MyType> {
+///      static void output(const MyType &Value, void*, llvm::raw_ostream &Out)
+///      {
+///        // stream out custom formatting
+///        Out << Val;
+///      }
+///      static StringRef input(StringRef Scalar, void*, MyType &Value) {
+///        // parse scalar and set `value`
+///        // return empty string on success, or error string
+///        return StringRef();
+///      }
+///    };
+template <typename T>
+struct BlockScalarTraits {
+  // Must provide:
+  //
+  // Function to write the value as a string:
+  // static void output(const T &Value, void *ctx, llvm::raw_ostream &Out);
+  //
+  // Function to convert a string to a value.  Returns the empty
+  // StringRef on success or an error string if string is malformed:
+  // static StringRef input(StringRef Scalar, void *ctxt, T &Value);
+};
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a YAML sequence.  For example:
+///
+///    template<>
+///    struct SequenceTraits<MyContainer> {
+///      static size_t size(IO &io, MyContainer &seq) {
+///        return seq.size();
+///      }
+///      static MyType& element(IO &, MyContainer &seq, size_t index) {
+///        if ( index >= seq.size() )
+///          seq.resize(index+1);
+///        return seq[index];
+///      }
+///    };
+template<typename T, typename EnableIf = void>
+struct SequenceTraits {
+  // Must provide:
+  // static size_t size(IO &io, T &seq);
+  // static T::value_type& element(IO &io, T &seq, size_t index);
+  //
+  // The following is option and will cause generated YAML to use
+  // a flow sequence (e.g. [a,b,c]).
+  // static const bool flow = true;
+};
+
+/// This class should be specialized by any type for which vectors of that
+/// type need to be converted to/from a YAML sequence.
+template<typename T, typename EnableIf = void>
+struct SequenceElementTraits {
+  // Must provide:
+  // static const bool flow;
+};
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a list of YAML documents.
+template<typename T>
+struct DocumentListTraits {
+  // Must provide:
+  // static size_t size(IO &io, T &seq);
+  // static T::value_type& element(IO &io, T &seq, size_t index);
+};
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a YAML mapping in the case where the names of the keys are not known
+/// in advance, e.g. a string map.
+template <typename T>
+struct CustomMappingTraits {
+  // static void inputOne(IO &io, StringRef key, T &elem);
+  // static void output(IO &io, T &elem);
+};
+
+// Only used for better diagnostics of missing traits
+template <typename T>
+struct MissingTrait;
+
+// Test if ScalarEnumerationTraits<T> is defined on type T.
+template <class T>
+struct has_ScalarEnumerationTraits
+{
+  using Signature_enumeration = void (*)(class IO&, T&);
+
+  template <typename U>
+  static char test(SameType<Signature_enumeration, &U::enumeration>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =
+    (sizeof(test<ScalarEnumerationTraits<T>>(nullptr)) == 1);
+};
+
+// Test if ScalarBitSetTraits<T> is defined on type T.
+template <class T>
+struct has_ScalarBitSetTraits
+{
+  using Signature_bitset = void (*)(class IO&, T&);
+
+  template <typename U>
+  static char test(SameType<Signature_bitset, &U::bitset>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value = (sizeof(test<ScalarBitSetTraits<T>>(nullptr)) == 1);
+};
+
+// Test if ScalarTraits<T> is defined on type T.
+template <class T>
+struct has_ScalarTraits
+{
+  using Signature_input = StringRef (*)(StringRef, void*, T&);
+  using Signature_output = void (*)(const T&, void*, raw_ostream&);
+  using Signature_mustQuote = QuotingType (*)(StringRef);
+
+  template <typename U>
+  static char test(SameType<Signature_input, &U::input> *,
+                   SameType<Signature_output, &U::output> *,
+                   SameType<Signature_mustQuote, &U::mustQuote> *);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =
+      (sizeof(test<ScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
+};
+
+// Test if BlockScalarTraits<T> is defined on type T.
+template <class T>
+struct has_BlockScalarTraits
+{
+  using Signature_input = StringRef (*)(StringRef, void *, T &);
+  using Signature_output = void (*)(const T &, void *, raw_ostream &);
+
+  template <typename U>
+  static char test(SameType<Signature_input, &U::input> *,
+                   SameType<Signature_output, &U::output> *);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =
+      (sizeof(test<BlockScalarTraits<T>>(nullptr, nullptr)) == 1);
+};
+
+// Test if MappingContextTraits<T> is defined on type T.
+template <class T, class Context> struct has_MappingTraits {
+  using Signature_mapping = void (*)(class IO &, T &, Context &);
+
+  template <typename U>
+  static char test(SameType<Signature_mapping, &U::mapping>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =
+      (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
+};
+
+// Test if MappingTraits<T> is defined on type T.
+template <class T> struct has_MappingTraits<T, EmptyContext> {
+  using Signature_mapping = void (*)(class IO &, T &);
+
+  template <typename U>
+  static char test(SameType<Signature_mapping, &U::mapping> *);
+
+  template <typename U> static double test(...);
+
+public:
+  static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
+};
+
+// Test if MappingContextTraits<T>::validate() is defined on type T.
+template <class T, class Context> struct has_MappingValidateTraits {
+  using Signature_validate = StringRef (*)(class IO &, T &, Context &);
+
+  template <typename U>
+  static char test(SameType<Signature_validate, &U::validate>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =
+      (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
+};
+
+// Test if MappingTraits<T>::validate() is defined on type T.
+template <class T> struct has_MappingValidateTraits<T, EmptyContext> {
+  using Signature_validate = StringRef (*)(class IO &, T &);
+
+  template <typename U>
+  static char test(SameType<Signature_validate, &U::validate> *);
+
+  template <typename U> static double test(...);
+
+public:
+  static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
+};
+
+// Test if SequenceTraits<T> is defined on type T.
+template <class T>
+struct has_SequenceMethodTraits
+{
+  using Signature_size = size_t (*)(class IO&, T&);
+
+  template <typename U>
+  static char test(SameType<Signature_size, &U::size>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =  (sizeof(test<SequenceTraits<T>>(nullptr)) == 1);
+};
+
+// Test if CustomMappingTraits<T> is defined on type T.
+template <class T>
+struct has_CustomMappingTraits
+{
+  using Signature_input = void (*)(IO &io, StringRef key, T &v);
+
+  template <typename U>
+  static char test(SameType<Signature_input, &U::inputOne>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value =
+      (sizeof(test<CustomMappingTraits<T>>(nullptr)) == 1);
+};
+
+// has_FlowTraits<int> will cause an error with some compilers because
+// it subclasses int.  Using this wrapper only instantiates the
+// real has_FlowTraits only if the template type is a class.
+template <typename T, bool Enabled = std::is_class<T>::value>
+class has_FlowTraits
+{
+public:
+   static const bool value = false;
+};
+
+// Some older gcc compilers don't support straight forward tests
+// for members, so test for ambiguity cause by the base and derived
+// classes both defining the member.
+template <class T>
+struct has_FlowTraits<T, true>
+{
+  struct Fallback { bool flow; };
+  struct Derived : T, Fallback { };
+
+  template<typename C>
+  static char (&f(SameType<bool Fallback::*, &C::flow>*))[1];
+
+  template<typename C>
+  static char (&f(...))[2];
+
+public:
+  static bool const value = sizeof(f<Derived>(nullptr)) == 2;
+};
+
+// Test if SequenceTraits<T> is defined on type T
+template<typename T>
+struct has_SequenceTraits : public std::integral_constant<bool,
+                                      has_SequenceMethodTraits<T>::value > { };
+
+// Test if DocumentListTraits<T> is defined on type T
+template <class T>
+struct has_DocumentListTraits
+{
+  using Signature_size = size_t (*)(class IO &, T &);
+
+  template <typename U>
+  static char test(SameType<Signature_size, &U::size>*);
+
+  template <typename U>
+  static double test(...);
+
+public:
+  static bool const value = (sizeof(test<DocumentListTraits<T>>(nullptr))==1);
+};
+
+inline bool isNumber(StringRef S) {
+  static const char OctalChars[] = "01234567";
+  if (S.startswith("0") &&
+      S.drop_front().find_first_not_of(OctalChars) == StringRef::npos)
+    return true;
+
+  if (S.startswith("0o") &&
+      S.drop_front(2).find_first_not_of(OctalChars) == StringRef::npos)
+    return true;
+
+  static const char HexChars[] = "0123456789abcdefABCDEF";
+  if (S.startswith("0x") &&
+      S.drop_front(2).find_first_not_of(HexChars) == StringRef::npos)
+    return true;
+
+  static const char DecChars[] = "0123456789";
+  if (S.find_first_not_of(DecChars) == StringRef::npos)
+    return true;
+
+  if (S.equals(".inf") || S.equals(".Inf") || S.equals(".INF"))
+    return true;
+
+  Regex FloatMatcher("^(\\.[0-9]+|[0-9]+(\\.[0-9]*)?)([eE][-+]?[0-9]+)?$");
+  if (FloatMatcher.match(S))
+    return true;
+
+  return false;
+}
+
+inline bool isNumeric(StringRef S) {
+  if ((S.front() == '-' || S.front() == '+') && isNumber(S.drop_front()))
+    return true;
+
+  if (isNumber(S))
+    return true;
+
+  if (S.equals(".nan") || S.equals(".NaN") || S.equals(".NAN"))
+    return true;
+
+  return false;
+}
+
+inline bool isNull(StringRef S) {
+  return S.equals("null") || S.equals("Null") || S.equals("NULL") ||
+         S.equals("~");
+}
+
+inline bool isBool(StringRef S) {
+  return S.equals("true") || S.equals("True") || S.equals("TRUE") ||
+         S.equals("false") || S.equals("False") || S.equals("FALSE");
+}
+
+// 5.1. Character Set
+// The allowed character range explicitly excludes the C0 control block #x0-#x1F
+// (except for TAB #x9, LF #xA, and CR #xD which are allowed), DEL #x7F, the C1
+// control block #x80-#x9F (except for NEL #x85 which is allowed), the surrogate
+// block #xD800-#xDFFF, #xFFFE, and #xFFFF.
+inline QuotingType needsQuotes(StringRef S) {
+  if (S.empty())
+    return QuotingType::Single;
+  if (isspace(S.front()) || isspace(S.back()))
+    return QuotingType::Single;
+  if (isNull(S))
+    return QuotingType::Single;
+  if (isBool(S))
+    return QuotingType::Single;
+  if (isNumeric(S))
+    return QuotingType::Single;
+
+  // 7.3.3 Plain Style
+  // Plain scalars must not begin with most indicators, as this would cause
+  // ambiguity with other YAML constructs.
+  static constexpr char Indicators[] = R"(-?:\,[]{}#&*!|>'"%@`)";
+  if (S.find_first_of(Indicators) == 0)
+    return QuotingType::Single;
+
+  QuotingType MaxQuotingNeeded = QuotingType::None;
+  for (unsigned char C : S) {
+    // Alphanum is safe.
+    if (isAlnum(C))
+      continue;
+
+    switch (C) {
+    // Safe scalar characters.
+    case '_':
+    case '-':
+    case '/':
+    case '^':
+    case '.':
+    case ',':
+    case ' ':
+    // TAB (0x9), LF (0xA), CR (0xD) and NEL (0x85) are allowed.
+    case 0x9:
+    case 0xA:
+    case 0xD:
+    case 0x85:
+      continue;
+    // DEL (0x7F) are excluded from the allowed character range.
+    case 0x7F:
+      return QuotingType::Double;
+    default: {
+      // C0 control block (0x0 - 0x1F) is excluded from the allowed character
+      // range.
+      if (C <= 0x1F)
+        return QuotingType::Double;
+
+      // Always double quote UTF-8.
+      if ((C & 0x80) != 0)
+        return QuotingType::Double;
+
+      // The character is not safe, at least simple quoting needed.
+      MaxQuotingNeeded = QuotingType::Single;
+    }
+    }
+  }
+
+  return MaxQuotingNeeded;
+}
+
+template <typename T, typename Context>
+struct missingTraits
+    : public std::integral_constant<bool,
+                                    !has_ScalarEnumerationTraits<T>::value &&
+                                        !has_ScalarBitSetTraits<T>::value &&
+                                        !has_ScalarTraits<T>::value &&
+                                        !has_BlockScalarTraits<T>::value &&
+                                        !has_MappingTraits<T, Context>::value &&
+                                        !has_SequenceTraits<T>::value &&
+                                        !has_CustomMappingTraits<T>::value &&
+                                        !has_DocumentListTraits<T>::value> {};
+
+template <typename T, typename Context>
+struct validatedMappingTraits
+    : public std::integral_constant<
+          bool, has_MappingTraits<T, Context>::value &&
+                    has_MappingValidateTraits<T, Context>::value> {};
+
+template <typename T, typename Context>
+struct unvalidatedMappingTraits
+    : public std::integral_constant<
+          bool, has_MappingTraits<T, Context>::value &&
+                    !has_MappingValidateTraits<T, Context>::value> {};
+
+// Base class for Input and Output.
+class IO {
+public:
+  IO(void *Ctxt = nullptr);
+  virtual ~IO();
+
+  virtual bool outputting() = 0;
+
+  virtual unsigned beginSequence() = 0;
+  virtual bool preflightElement(unsigned, void *&) = 0;
+  virtual void postflightElement(void*) = 0;
+  virtual void endSequence() = 0;
+  virtual bool canElideEmptySequence() = 0;
+
+  virtual unsigned beginFlowSequence() = 0;
+  virtual bool preflightFlowElement(unsigned, void *&) = 0;
+  virtual void postflightFlowElement(void*) = 0;
+  virtual void endFlowSequence() = 0;
+
+  virtual bool mapTag(StringRef Tag, bool Default=false) = 0;
+  virtual void beginMapping() = 0;
+  virtual void endMapping() = 0;
+  virtual bool preflightKey(const char*, bool, bool, bool &, void *&) = 0;
+  virtual void postflightKey(void*) = 0;
+  virtual std::vector<StringRef> keys() = 0;
+
+  virtual void beginFlowMapping() = 0;
+  virtual void endFlowMapping() = 0;
+
+  virtual void beginEnumScalar() = 0;
+  virtual bool matchEnumScalar(const char*, bool) = 0;
+  virtual bool matchEnumFallback() = 0;
+  virtual void endEnumScalar() = 0;
+
+  virtual bool beginBitSetScalar(bool &) = 0;
+  virtual bool bitSetMatch(const char*, bool) = 0;
+  virtual void endBitSetScalar() = 0;
+
+  virtual void scalarString(StringRef &, QuotingType) = 0;
+  virtual void blockScalarString(StringRef &) = 0;
+
+  virtual void setError(const Twine &) = 0;
+
+  template <typename T>
+  void enumCase(T &Val, const char* Str, const T ConstVal) {
+    if ( matchEnumScalar(Str, outputting() && Val == ConstVal) ) {
+      Val = ConstVal;
+    }
+  }
+
+  // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF
+  template <typename T>
+  void enumCase(T &Val, const char* Str, const uint32_t ConstVal) {
+    if ( matchEnumScalar(Str, outputting() && Val == static_cast<T>(ConstVal)) ) {
+      Val = ConstVal;
+    }
+  }
+
+  template <typename FBT, typename T>
+  void enumFallback(T &Val) {
+    if (matchEnumFallback()) {
+      EmptyContext Context;
+      // FIXME: Force integral conversion to allow strong typedefs to convert.
+      FBT Res = static_cast<typename FBT::BaseType>(Val);
+      yamlize(*this, Res, true, Context);
+      Val = static_cast<T>(static_cast<typename FBT::BaseType>(Res));
+    }
+  }
+
+  template <typename T>
+  void bitSetCase(T &Val, const char* Str, const T ConstVal) {
+    if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
+      Val = static_cast<T>(Val | ConstVal);
+    }
+  }
+
+  // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF
+  template <typename T>
+  void bitSetCase(T &Val, const char* Str, const uint32_t ConstVal) {
+    if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
+      Val = static_cast<T>(Val | ConstVal);
+    }
+  }
+
+  template <typename T>
+  void maskedBitSetCase(T &Val, const char *Str, T ConstVal, T Mask) {
+    if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
+      Val = Val | ConstVal;
+  }
+
+  template <typename T>
+  void maskedBitSetCase(T &Val, const char *Str, uint32_t ConstVal,
+                        uint32_t Mask) {
+    if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
+      Val = Val | ConstVal;
+  }
+
+  void *getContext();
+  void setContext(void *);
+
+  template <typename T> void mapRequired(const char *Key, T &Val) {
+    EmptyContext Ctx;
+    this->processKey(Key, Val, true, Ctx);
+  }
+
+  template <typename T, typename Context>
+  void mapRequired(const char *Key, T &Val, Context &Ctx) {
+    this->processKey(Key, Val, true, Ctx);
+  }
+
+  template <typename T> void mapOptional(const char *Key, T &Val) {
+    EmptyContext Ctx;
+    mapOptionalWithContext(Key, Val, Ctx);
+  }
+
+  template <typename T>
+  void mapOptional(const char *Key, T &Val, const T &Default) {
+    EmptyContext Ctx;
+    mapOptionalWithContext(Key, Val, Default, Ctx);
+  }
+
+  template <typename T, typename Context>
+  typename std::enable_if<has_SequenceTraits<T>::value, void>::type
+  mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) {
+    // omit key/value instead of outputting empty sequence
+    if (this->canElideEmptySequence() && !(Val.begin() != Val.end()))
+      return;
+    this->processKey(Key, Val, false, Ctx);
+  }
+
+  template <typename T, typename Context>
+  void mapOptionalWithContext(const char *Key, Optional<T> &Val, Context &Ctx) {
+    this->processKeyWithDefault(Key, Val, Optional<T>(), /*Required=*/false,
+                                Ctx);
+  }
+
+  template <typename T, typename Context>
+  typename std::enable_if<!has_SequenceTraits<T>::value, void>::type
+  mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) {
+    this->processKey(Key, Val, false, Ctx);
+  }
+
+  template <typename T, typename Context>
+  void mapOptionalWithContext(const char *Key, T &Val, const T &Default,
+                              Context &Ctx) {
+    this->processKeyWithDefault(Key, Val, Default, false, Ctx);
+  }
+
+private:
+  template <typename T, typename Context>
+  void processKeyWithDefault(const char *Key, Optional<T> &Val,
+                             const Optional<T> &DefaultValue, bool Required,
+                             Context &Ctx) {
+    assert(DefaultValue.hasValue() == false &&
+           "Optional<T> shouldn't have a value!");
+    void *SaveInfo;
+    bool UseDefault = true;
+    const bool sameAsDefault = outputting() && !Val.hasValue();
+    if (!outputting() && !Val.hasValue())
+      Val = T();
+    if (Val.hasValue() &&
+        this->preflightKey(Key, Required, sameAsDefault, UseDefault,
+                           SaveInfo)) {
+      yamlize(*this, Val.getValue(), Required, Ctx);
+      this->postflightKey(SaveInfo);
+    } else {
+      if (UseDefault)
+        Val = DefaultValue;
+    }
+  }
+
+  template <typename T, typename Context>
+  void processKeyWithDefault(const char *Key, T &Val, const T &DefaultValue,
+                             bool Required, Context &Ctx) {
+    void *SaveInfo;
+    bool UseDefault;
+    const bool sameAsDefault = outputting() && Val == DefaultValue;
+    if ( this->preflightKey(Key, Required, sameAsDefault, UseDefault,
+                                                                  SaveInfo) ) {
+      yamlize(*this, Val, Required, Ctx);
+      this->postflightKey(SaveInfo);
+    }
+    else {
+      if ( UseDefault )
+        Val = DefaultValue;
+    }
+  }
+
+  template <typename T, typename Context>
+  void processKey(const char *Key, T &Val, bool Required, Context &Ctx) {
+    void *SaveInfo;
+    bool UseDefault;
+    if ( this->preflightKey(Key, Required, false, UseDefault, SaveInfo) ) {
+      yamlize(*this, Val, Required, Ctx);
+      this->postflightKey(SaveInfo);
+    }
+  }
+
+private:
+  void *Ctxt;
+};
+
+namespace detail {
+
+template <typename T, typename Context>
+void doMapping(IO &io, T &Val, Context &Ctx) {
+  MappingContextTraits<T, Context>::mapping(io, Val, Ctx);
+}
+
+template <typename T> void doMapping(IO &io, T &Val, EmptyContext &Ctx) {
+  MappingTraits<T>::mapping(io, Val);
+}
+
+} // end namespace detail
+
+template <typename T>
+typename std::enable_if<has_ScalarEnumerationTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+  io.beginEnumScalar();
+  ScalarEnumerationTraits<T>::enumeration(io, Val);
+  io.endEnumScalar();
+}
+
+template <typename T>
+typename std::enable_if<has_ScalarBitSetTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+  bool DoClear;
+  if ( io.beginBitSetScalar(DoClear) ) {
+    if ( DoClear )
+      Val = static_cast<T>(0);
+    ScalarBitSetTraits<T>::bitset(io, Val);
+    io.endBitSetScalar();
+  }
+}
+
+template <typename T>
+typename std::enable_if<has_ScalarTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+  if ( io.outputting() ) {
+    std::string Storage;
+    raw_string_ostream Buffer(Storage);
+    ScalarTraits<T>::output(Val, io.getContext(), Buffer);
+    StringRef Str = Buffer.str();
+    io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
+  }
+  else {
+    StringRef Str;
+    io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
+    StringRef Result = ScalarTraits<T>::input(Str, io.getContext(), Val);
+    if ( !Result.empty() ) {
+      io.setError(Twine(Result));
+    }
+  }
+}
+
+template <typename T>
+typename std::enable_if<has_BlockScalarTraits<T>::value, void>::type
+yamlize(IO &YamlIO, T &Val, bool, EmptyContext &Ctx) {
+  if (YamlIO.outputting()) {
+    std::string Storage;
+    raw_string_ostream Buffer(Storage);
+    BlockScalarTraits<T>::output(Val, YamlIO.getContext(), Buffer);
+    StringRef Str = Buffer.str();
+    YamlIO.blockScalarString(Str);
+  } else {
+    StringRef Str;
+    YamlIO.blockScalarString(Str);
+    StringRef Result =
+        BlockScalarTraits<T>::input(Str, YamlIO.getContext(), Val);
+    if (!Result.empty())
+      YamlIO.setError(Twine(Result));
+  }
+}
+
+template <typename T, typename Context>
+typename std::enable_if<validatedMappingTraits<T, Context>::value, void>::type
+yamlize(IO &io, T &Val, bool, Context &Ctx) {
+  if (has_FlowTraits<MappingTraits<T>>::value)
+    io.beginFlowMapping();
+  else
+    io.beginMapping();
+  if (io.outputting()) {
+    StringRef Err = MappingTraits<T>::validate(io, Val);
+    if (!Err.empty()) {
+      errs() << Err << "\n";
+      assert(Err.empty() && "invalid struct trying to be written as yaml");
+    }
+  }
+  detail::doMapping(io, Val, Ctx);
+  if (!io.outputting()) {
+    StringRef Err = MappingTraits<T>::validate(io, Val);
+    if (!Err.empty())
+      io.setError(Err);
+  }
+  if (has_FlowTraits<MappingTraits<T>>::value)
+    io.endFlowMapping();
+  else
+    io.endMapping();
+}
+
+template <typename T, typename Context>
+typename std::enable_if<unvalidatedMappingTraits<T, Context>::value, void>::type
+yamlize(IO &io, T &Val, bool, Context &Ctx) {
+  if (has_FlowTraits<MappingTraits<T>>::value) {
+    io.beginFlowMapping();
+    detail::doMapping(io, Val, Ctx);
+    io.endFlowMapping();
+  } else {
+    io.beginMapping();
+    detail::doMapping(io, Val, Ctx);
+    io.endMapping();
+  }
+}
+
+template <typename T>
+typename std::enable_if<has_CustomMappingTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+  if ( io.outputting() ) {
+    io.beginMapping();
+    CustomMappingTraits<T>::output(io, Val);
+    io.endMapping();
+  } else {
+    io.beginMapping();
+    for (StringRef key : io.keys())
+      CustomMappingTraits<T>::inputOne(io, key, Val);
+    io.endMapping();
+  }
+}
+
+template <typename T>
+typename std::enable_if<missingTraits<T, EmptyContext>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+  char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
+}
+
+template <typename T, typename Context>
+typename std::enable_if<has_SequenceTraits<T>::value, void>::type
+yamlize(IO &io, T &Seq, bool, Context &Ctx) {
+  if ( has_FlowTraits< SequenceTraits<T>>::value ) {
+    unsigned incnt = io.beginFlowSequence();
+    unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
+    for(unsigned i=0; i < count; ++i) {
+      void *SaveInfo;
+      if ( io.preflightFlowElement(i, SaveInfo) ) {
+        yamlize(io, SequenceTraits<T>::element(io, Seq, i), true, Ctx);
+        io.postflightFlowElement(SaveInfo);
+      }
+    }
+    io.endFlowSequence();
+  }
+  else {
+    unsigned incnt = io.beginSequence();
+    unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
+    for(unsigned i=0; i < count; ++i) {
+      void *SaveInfo;
+      if ( io.preflightElement(i, SaveInfo) ) {
+        yamlize(io, SequenceTraits<T>::element(io, Seq, i), true, Ctx);
+        io.postflightElement(SaveInfo);
+      }
+    }
+    io.endSequence();
+  }
+}
+
+template<>
+struct ScalarTraits<bool> {
+  static void output(const bool &, void* , raw_ostream &);
+  static StringRef input(StringRef, void *, bool &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<StringRef> {
+  static void output(const StringRef &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, StringRef &);
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+template<>
+struct ScalarTraits<std::string> {
+  static void output(const std::string &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, std::string &);
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+template<>
+struct ScalarTraits<uint8_t> {
+  static void output(const uint8_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, uint8_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<uint16_t> {
+  static void output(const uint16_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, uint16_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<uint32_t> {
+  static void output(const uint32_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, uint32_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<uint64_t> {
+  static void output(const uint64_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, uint64_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<int8_t> {
+  static void output(const int8_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, int8_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<int16_t> {
+  static void output(const int16_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, int16_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<int32_t> {
+  static void output(const int32_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, int32_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<int64_t> {
+  static void output(const int64_t &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, int64_t &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<float> {
+  static void output(const float &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, float &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<double> {
+  static void output(const double &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, double &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+// For endian types, we just use the existing ScalarTraits for the underlying
+// type.  This way endian aware types are supported whenever a ScalarTraits
+// is defined for the underlying type.
+template <typename value_type, support::endianness endian, size_t alignment>
+struct ScalarTraits<support::detail::packed_endian_specific_integral<
+    value_type, endian, alignment>> {
+  using endian_type =
+      support::detail::packed_endian_specific_integral<value_type, endian,
+                                                       alignment>;
+
+  static void output(const endian_type &E, void *Ctx, raw_ostream &Stream) {
+    ScalarTraits<value_type>::output(static_cast<value_type>(E), Ctx, Stream);
+  }
+
+  static StringRef input(StringRef Str, void *Ctx, endian_type &E) {
+    value_type V;
+    auto R = ScalarTraits<value_type>::input(Str, Ctx, V);
+    E = static_cast<endian_type>(V);
+    return R;
+  }
+
+  static QuotingType mustQuote(StringRef Str) {
+    return ScalarTraits<value_type>::mustQuote(Str);
+  }
+};
+
+// Utility for use within MappingTraits<>::mapping() method
+// to [de]normalize an object for use with YAML conversion.
+template <typename TNorm, typename TFinal>
+struct MappingNormalization {
+  MappingNormalization(IO &i_o, TFinal &Obj)
+      : io(i_o), BufPtr(nullptr), Result(Obj) {
+    if ( io.outputting() ) {
+      BufPtr = new (&Buffer) TNorm(io, Obj);
+    }
+    else {
+      BufPtr = new (&Buffer) TNorm(io);
+    }
+  }
+
+  ~MappingNormalization() {
+    if ( ! io.outputting() ) {
+      Result = BufPtr->denormalize(io);
+    }
+    BufPtr->~TNorm();
+  }
+
+  TNorm* operator->() { return BufPtr; }
+
+private:
+  using Storage = AlignedCharArrayUnion<TNorm>;
+
+  Storage       Buffer;
+  IO           &io;
+  TNorm        *BufPtr;
+  TFinal       &Result;
+};
+
+// Utility for use within MappingTraits<>::mapping() method
+// to [de]normalize an object for use with YAML conversion.
+template <typename TNorm, typename TFinal>
+struct MappingNormalizationHeap {
+  MappingNormalizationHeap(IO &i_o, TFinal &Obj, BumpPtrAllocator *allocator)
+    : io(i_o), Result(Obj) {
+    if ( io.outputting() ) {
+      BufPtr = new (&Buffer) TNorm(io, Obj);
+    }
+    else if (allocator) {
+      BufPtr = allocator->Allocate<TNorm>();
+      new (BufPtr) TNorm(io);
+    } else {
+      BufPtr = new TNorm(io);
+    }
+  }
+
+  ~MappingNormalizationHeap() {
+    if ( io.outputting() ) {
+      BufPtr->~TNorm();
+    }
+    else {
+      Result = BufPtr->denormalize(io);
+    }
+  }
+
+  TNorm* operator->() { return BufPtr; }
+
+private:
+  using Storage = AlignedCharArrayUnion<TNorm>;
+
+  Storage       Buffer;
+  IO           &io;
+  TNorm        *BufPtr = nullptr;
+  TFinal       &Result;
+};
+
+///
+/// The Input class is used to parse a yaml document into in-memory structs
+/// and vectors.
+///
+/// It works by using YAMLParser to do a syntax parse of the entire yaml
+/// document, then the Input class builds a graph of HNodes which wraps
+/// each yaml Node.  The extra layer is buffering.  The low level yaml
+/// parser only lets you look at each node once.  The buffering layer lets
+/// you search and interate multiple times.  This is necessary because
+/// the mapRequired() method calls may not be in the same order
+/// as the keys in the document.
+///
+class Input : public IO {
+public:
+  // Construct a yaml Input object from a StringRef and optional
+  // user-data. The DiagHandler can be specified to provide
+  // alternative error reporting.
+  Input(StringRef InputContent,
+        void *Ctxt = nullptr,
+        SourceMgr::DiagHandlerTy DiagHandler = nullptr,
+        void *DiagHandlerCtxt = nullptr);
+  Input(MemoryBufferRef Input,
+        void *Ctxt = nullptr,
+        SourceMgr::DiagHandlerTy DiagHandler = nullptr,
+        void *DiagHandlerCtxt = nullptr);
+  ~Input() override;
+
+  // Check if there was an syntax or semantic error during parsing.
+  std::error_code error();
+
+private:
+  bool outputting() override;
+  bool mapTag(StringRef, bool) override;
+  void beginMapping() override;
+  void endMapping() override;
+  bool preflightKey(const char *, bool, bool, bool &, void *&) override;
+  void postflightKey(void *) override;
+  std::vector<StringRef> keys() override;
+  void beginFlowMapping() override;
+  void endFlowMapping() override;
+  unsigned beginSequence() override;
+  void endSequence() override;
+  bool preflightElement(unsigned index, void *&) override;
+  void postflightElement(void *) override;
+  unsigned beginFlowSequence() override;
+  bool preflightFlowElement(unsigned , void *&) override;
+  void postflightFlowElement(void *) override;
+  void endFlowSequence() override;
+  void beginEnumScalar() override;
+  bool matchEnumScalar(const char*, bool) override;
+  bool matchEnumFallback() override;
+  void endEnumScalar() override;
+  bool beginBitSetScalar(bool &) override;
+  bool bitSetMatch(const char *, bool ) override;
+  void endBitSetScalar() override;
+  void scalarString(StringRef &, QuotingType) override;
+  void blockScalarString(StringRef &) override;
+  void setError(const Twine &message) override;
+  bool canElideEmptySequence() override;
+
+  class HNode {
+    virtual void anchor();
+
+  public:
+    HNode(Node *n) : _node(n) { }
+    virtual ~HNode() = default;
+
+    static bool classof(const HNode *) { return true; }
+
+    Node *_node;
+  };
+
+  class EmptyHNode : public HNode {
+    void anchor() override;
+
+  public:
+    EmptyHNode(Node *n) : HNode(n) { }
+
+    static bool classof(const HNode *n) { return NullNode::classof(n->_node); }
+
+    static bool classof(const EmptyHNode *) { return true; }
+  };
+
+  class ScalarHNode : public HNode {
+    void anchor() override;
+
+  public:
+    ScalarHNode(Node *n, StringRef s) : HNode(n), _value(s) { }
+
+    StringRef value() const { return _value; }
+
+    static bool classof(const HNode *n) {
+      return ScalarNode::classof(n->_node) ||
+             BlockScalarNode::classof(n->_node);
+    }
+
+    static bool classof(const ScalarHNode *) { return true; }
+
+  protected:
+    StringRef _value;
+  };
+
+  class MapHNode : public HNode {
+    void anchor() override;
+
+  public:
+    MapHNode(Node *n) : HNode(n) { }
+
+    static bool classof(const HNode *n) {
+      return MappingNode::classof(n->_node);
+    }
+
+    static bool classof(const MapHNode *) { return true; }
+
+    using NameToNode = StringMap<std::unique_ptr<HNode>>;
+
+    NameToNode Mapping;
+    SmallVector<std::string, 6> ValidKeys;
+  };
+
+  class SequenceHNode : public HNode {
+    void anchor() override;
+
+  public:
+    SequenceHNode(Node *n) : HNode(n) { }
+
+    static bool classof(const HNode *n) {
+      return SequenceNode::classof(n->_node);
+    }
+
+    static bool classof(const SequenceHNode *) { return true; }
+
+    std::vector<std::unique_ptr<HNode>> Entries;
+  };
+
+  std::unique_ptr<Input::HNode> createHNodes(Node *node);
+  void setError(HNode *hnode, const Twine &message);
+  void setError(Node *node, const Twine &message);
+
+public:
+  // These are only used by operator>>. They could be private
+  // if those templated things could be made friends.
+  bool setCurrentDocument();
+  bool nextDocument();
+
+  /// Returns the current node that's being parsed by the YAML Parser.
+  const Node *getCurrentNode() const;
+
+private:
+  SourceMgr                           SrcMgr; // must be before Strm
+  std::unique_ptr<llvm::yaml::Stream> Strm;
+  std::unique_ptr<HNode>              TopNode;
+  std::error_code                     EC;
+  BumpPtrAllocator                    StringAllocator;
+  document_iterator                   DocIterator;
+  std::vector<bool>                   BitValuesUsed;
+  HNode *CurrentNode = nullptr;
+  bool                                ScalarMatchFound;
+};
+
+///
+/// The Output class is used to generate a yaml document from in-memory structs
+/// and vectors.
+///
+class Output : public IO {
+public:
+  Output(raw_ostream &, void *Ctxt = nullptr, int WrapColumn = 70);
+  ~Output() override;
+
+  /// \brief Set whether or not to output optional values which are equal
+  /// to the default value.  By default, when outputting if you attempt
+  /// to write a value that is equal to the default, the value gets ignored.
+  /// Sometimes, it is useful to be able to see these in the resulting YAML
+  /// anyway.
+  void setWriteDefaultValues(bool Write) { WriteDefaultValues = Write; }
+
+  bool outputting() override;
+  bool mapTag(StringRef, bool) override;
+  void beginMapping() override;
+  void endMapping() override;
+  bool preflightKey(const char *key, bool, bool, bool &, void *&) override;
+  void postflightKey(void *) override;
+  std::vector<StringRef> keys() override;
+  void beginFlowMapping() override;
+  void endFlowMapping() override;
+  unsigned beginSequence() override;
+  void endSequence() override;
+  bool preflightElement(unsigned, void *&) override;
+  void postflightElement(void *) override;
+  unsigned beginFlowSequence() override;
+  bool preflightFlowElement(unsigned, void *&) override;
+  void postflightFlowElement(void *) override;
+  void endFlowSequence() override;
+  void beginEnumScalar() override;
+  bool matchEnumScalar(const char*, bool) override;
+  bool matchEnumFallback() override;
+  void endEnumScalar() override;
+  bool beginBitSetScalar(bool &) override;
+  bool bitSetMatch(const char *, bool ) override;
+  void endBitSetScalar() override;
+  void scalarString(StringRef &, QuotingType) override;
+  void blockScalarString(StringRef &) override;
+  void setError(const Twine &message) override;
+  bool canElideEmptySequence() override;
+
+  // These are only used by operator<<. They could be private
+  // if that templated operator could be made a friend.
+  void beginDocuments();
+  bool preflightDocument(unsigned);
+  void postflightDocument();
+  void endDocuments();
+
+private:
+  void output(StringRef s);
+  void outputUpToEndOfLine(StringRef s);
+  void newLineCheck();
+  void outputNewLine();
+  void paddedKey(StringRef key);
+  void flowKey(StringRef Key);
+
+  enum InState {
+    inSeq,
+    inFlowSeq,
+    inMapFirstKey,
+    inMapOtherKey,
+    inFlowMapFirstKey,
+    inFlowMapOtherKey
+  };
+
+  raw_ostream &Out;
+  int WrapColumn;
+  SmallVector<InState, 8> StateStack;
+  int Column = 0;
+  int ColumnAtFlowStart = 0;
+  int ColumnAtMapFlowStart = 0;
+  bool NeedBitValueComma = false;
+  bool NeedFlowSequenceComma = false;
+  bool EnumerationMatchFound = false;
+  bool NeedsNewLine = false;
+  bool WriteDefaultValues = false;
+};
+
+/// YAML I/O does conversion based on types. But often native data types
+/// are just a typedef of built in intergral types (e.g. int).  But the C++
+/// type matching system sees through the typedef and all the typedefed types
+/// look like a built in type. This will cause the generic YAML I/O conversion
+/// to be used. To provide better control over the YAML conversion, you can
+/// use this macro instead of typedef.  It will create a class with one field
+/// and automatic conversion operators to and from the base type.
+/// Based on BOOST_STRONG_TYPEDEF
+#define LLVM_YAML_STRONG_TYPEDEF(_base, _type)                                 \
+    struct _type {                                                             \
+        _type() = default;                                                     \
+        _type(const _base v) : value(v) {}                                     \
+        _type(const _type &v) = default;                                       \
+        _type &operator=(const _type &rhs) = default;                          \
+        _type &operator=(const _base &rhs) { value = rhs; return *this; }      \
+        operator const _base & () const { return value; }                      \
+        bool operator==(const _type &rhs) const { return value == rhs.value; } \
+        bool operator==(const _base &rhs) const { return value == rhs; }       \
+        bool operator<(const _type &rhs) const { return value < rhs.value; }   \
+        _base value;                                                           \
+        using BaseType = _base;                                                \
+    };
+
+///
+/// Use these types instead of uintXX_t in any mapping to have
+/// its yaml output formatted as hexadecimal.
+///
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, Hex8)
+LLVM_YAML_STRONG_TYPEDEF(uint16_t, Hex16)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, Hex32)
+LLVM_YAML_STRONG_TYPEDEF(uint64_t, Hex64)
+
+template<>
+struct ScalarTraits<Hex8> {
+  static void output(const Hex8 &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, Hex8 &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<Hex16> {
+  static void output(const Hex16 &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, Hex16 &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<Hex32> {
+  static void output(const Hex32 &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, Hex32 &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+template<>
+struct ScalarTraits<Hex64> {
+  static void output(const Hex64 &, void *, raw_ostream &);
+  static StringRef input(StringRef, void *, Hex64 &);
+  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+// Define non-member operator>> so that Input can stream in a document list.
+template <typename T>
+inline
+typename std::enable_if<has_DocumentListTraits<T>::value, Input &>::type
+operator>>(Input &yin, T &docList) {
+  int i = 0;
+  EmptyContext Ctx;
+  while ( yin.setCurrentDocument() ) {
+    yamlize(yin, DocumentListTraits<T>::element(yin, docList, i), true, Ctx);
+    if ( yin.error() )
+      return yin;
+    yin.nextDocument();
+    ++i;
+  }
+  return yin;
+}
+
+// Define non-member operator>> so that Input can stream in a map as a document.
+template <typename T>
+inline typename std::enable_if<has_MappingTraits<T, EmptyContext>::value,
+                               Input &>::type
+operator>>(Input &yin, T &docMap) {
+  EmptyContext Ctx;
+  yin.setCurrentDocument();
+  yamlize(yin, docMap, true, Ctx);
+  return yin;
+}
+
+// Define non-member operator>> so that Input can stream in a sequence as
+// a document.
+template <typename T>
+inline
+typename std::enable_if<has_SequenceTraits<T>::value, Input &>::type
+operator>>(Input &yin, T &docSeq) {
+  EmptyContext Ctx;
+  if (yin.setCurrentDocument())
+    yamlize(yin, docSeq, true, Ctx);
+  return yin;
+}
+
+// Define non-member operator>> so that Input can stream in a block scalar.
+template <typename T>
+inline
+typename std::enable_if<has_BlockScalarTraits<T>::value, Input &>::type
+operator>>(Input &In, T &Val) {
+  EmptyContext Ctx;
+  if (In.setCurrentDocument())
+    yamlize(In, Val, true, Ctx);
+  return In;
+}
+
+// Define non-member operator>> so that Input can stream in a string map.
+template <typename T>
+inline
+typename std::enable_if<has_CustomMappingTraits<T>::value, Input &>::type
+operator>>(Input &In, T &Val) {
+  EmptyContext Ctx;
+  if (In.setCurrentDocument())
+    yamlize(In, Val, true, Ctx);
+  return In;
+}
+
+// Provide better error message about types missing a trait specialization
+template <typename T>
+inline typename std::enable_if<missingTraits<T, EmptyContext>::value,
+                               Input &>::type
+operator>>(Input &yin, T &docSeq) {
+  char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
+  return yin;
+}
+
+// Define non-member operator<< so that Output can stream out document list.
+template <typename T>
+inline
+typename std::enable_if<has_DocumentListTraits<T>::value, Output &>::type
+operator<<(Output &yout, T &docList) {
+  EmptyContext Ctx;
+  yout.beginDocuments();
+  const size_t count = DocumentListTraits<T>::size(yout, docList);
+  for(size_t i=0; i < count; ++i) {
+    if ( yout.preflightDocument(i) ) {
+      yamlize(yout, DocumentListTraits<T>::element(yout, docList, i), true,
+              Ctx);
+      yout.postflightDocument();
+    }
+  }
+  yout.endDocuments();
+  return yout;
+}
+
+// Define non-member operator<< so that Output can stream out a map.
+template <typename T>
+inline typename std::enable_if<has_MappingTraits<T, EmptyContext>::value,
+                               Output &>::type
+operator<<(Output &yout, T &map) {
+  EmptyContext Ctx;
+  yout.beginDocuments();
+  if ( yout.preflightDocument(0) ) {
+    yamlize(yout, map, true, Ctx);
+    yout.postflightDocument();
+  }
+  yout.endDocuments();
+  return yout;
+}
+
+// Define non-member operator<< so that Output can stream out a sequence.
+template <typename T>
+inline
+typename std::enable_if<has_SequenceTraits<T>::value, Output &>::type
+operator<<(Output &yout, T &seq) {
+  EmptyContext Ctx;
+  yout.beginDocuments();
+  if ( yout.preflightDocument(0) ) {
+    yamlize(yout, seq, true, Ctx);
+    yout.postflightDocument();
+  }
+  yout.endDocuments();
+  return yout;
+}
+
+// Define non-member operator<< so that Output can stream out a block scalar.
+template <typename T>
+inline
+typename std::enable_if<has_BlockScalarTraits<T>::value, Output &>::type
+operator<<(Output &Out, T &Val) {
+  EmptyContext Ctx;
+  Out.beginDocuments();
+  if (Out.preflightDocument(0)) {
+    yamlize(Out, Val, true, Ctx);
+    Out.postflightDocument();
+  }
+  Out.endDocuments();
+  return Out;
+}
+
+// Define non-member operator<< so that Output can stream out a string map.
+template <typename T>
+inline
+typename std::enable_if<has_CustomMappingTraits<T>::value, Output &>::type
+operator<<(Output &Out, T &Val) {
+  EmptyContext Ctx;
+  Out.beginDocuments();
+  if (Out.preflightDocument(0)) {
+    yamlize(Out, Val, true, Ctx);
+    Out.postflightDocument();
+  }
+  Out.endDocuments();
+  return Out;
+}
+
+// Provide better error message about types missing a trait specialization
+template <typename T>
+inline typename std::enable_if<missingTraits<T, EmptyContext>::value,
+                               Output &>::type
+operator<<(Output &yout, T &seq) {
+  char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
+  return yout;
+}
+
+template <bool B> struct IsFlowSequenceBase {};
+template <> struct IsFlowSequenceBase<true> { static const bool flow = true; };
+
+template <typename T, bool Flow>
+struct SequenceTraitsImpl : IsFlowSequenceBase<Flow> {
+private:
+  using type = typename T::value_type;
+
+public:
+  static size_t size(IO &io, T &seq) { return seq.size(); }
+
+  static type &element(IO &io, T &seq, size_t index) {
+    if (index >= seq.size())
+      seq.resize(index + 1);
+    return seq[index];
+  }
+};
+
+// Simple helper to check an expression can be used as a bool-valued template
+// argument.
+template <bool> struct CheckIsBool { static const bool value = true; };
+
+// If T has SequenceElementTraits, then vector<T> and SmallVector<T, N> have
+// SequenceTraits that do the obvious thing.
+template <typename T>
+struct SequenceTraits<std::vector<T>,
+                      typename std::enable_if<CheckIsBool<
+                          SequenceElementTraits<T>::flow>::value>::type>
+    : SequenceTraitsImpl<std::vector<T>, SequenceElementTraits<T>::flow> {};
+template <typename T, unsigned N>
+struct SequenceTraits<SmallVector<T, N>,
+                      typename std::enable_if<CheckIsBool<
+                          SequenceElementTraits<T>::flow>::value>::type>
+    : SequenceTraitsImpl<SmallVector<T, N>, SequenceElementTraits<T>::flow> {};
+
+// Sequences of fundamental types use flow formatting.
+template <typename T>
+struct SequenceElementTraits<
+    T, typename std::enable_if<std::is_fundamental<T>::value>::type> {
+  static const bool flow = true;
+};
+
+// Sequences of strings use block formatting.
+template<> struct SequenceElementTraits<std::string> {
+  static const bool flow = false;
+};
+template<> struct SequenceElementTraits<StringRef> {
+  static const bool flow = false;
+};
+template<> struct SequenceElementTraits<std::pair<std::string, std::string>> {
+  static const bool flow = false;
+};
+
+/// Implementation of CustomMappingTraits for std::map<std::string, T>.
+template <typename T> struct StdMapStringCustomMappingTraitsImpl {
+  using map_type = std::map<std::string, T>;
+
+  static void inputOne(IO &io, StringRef key, map_type &v) {
+    io.mapRequired(key.str().c_str(), v[key]);
+  }
+
+  static void output(IO &io, map_type &v) {
+    for (auto &p : v)
+      io.mapRequired(p.first.c_str(), p.second);
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#define LLVM_YAML_IS_SEQUENCE_VECTOR_IMPL(TYPE, FLOW)                          \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  static_assert(                                                               \
+      !std::is_fundamental<TYPE>::value &&                                     \
+      !std::is_same<TYPE, std::string>::value &&                               \
+      !std::is_same<TYPE, llvm::StringRef>::value,                             \
+      "only use LLVM_YAML_IS_SEQUENCE_VECTOR for types you control");          \
+  template <> struct SequenceElementTraits<TYPE> {                             \
+    static const bool flow = FLOW;                                             \
+  };                                                                           \
+  }                                                                            \
+  }
+
+/// Utility for declaring that a std::vector of a particular type
+/// should be considered a YAML sequence.
+#define LLVM_YAML_IS_SEQUENCE_VECTOR(type)                                     \
+  LLVM_YAML_IS_SEQUENCE_VECTOR_IMPL(type, false)
+
+/// Utility for declaring that a std::vector of a particular type
+/// should be considered a YAML flow sequence.
+#define LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(type)                                \
+  LLVM_YAML_IS_SEQUENCE_VECTOR_IMPL(type, true)
+
+#define LLVM_YAML_DECLARE_MAPPING_TRAITS(Type)                                 \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  template <> struct MappingTraits<Type> {                                     \
+    static void mapping(IO &IO, Type &Obj);                                    \
+  };                                                                           \
+  }                                                                            \
+  }
+
+#define LLVM_YAML_DECLARE_ENUM_TRAITS(Type)                                    \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  template <> struct ScalarEnumerationTraits<Type> {                           \
+    static void enumeration(IO &io, Type &Value);                              \
+  };                                                                           \
+  }                                                                            \
+  }
+
+#define LLVM_YAML_DECLARE_BITSET_TRAITS(Type)                                  \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  template <> struct ScalarBitSetTraits<Type> {                                \
+    static void bitset(IO &IO, Type &Options);                                 \
+  };                                                                           \
+  }                                                                            \
+  }
+
+#define LLVM_YAML_DECLARE_SCALAR_TRAITS(Type, MustQuote)                       \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  template <> struct ScalarTraits<Type> {                                      \
+    static void output(const Type &Value, void *ctx, raw_ostream &Out);        \
+    static StringRef input(StringRef Scalar, void *ctxt, Type &Value);         \
+    static QuotingType mustQuote(StringRef) { return MustQuote; }              \
+  };                                                                           \
+  }                                                                            \
+  }
+
+/// Utility for declaring that a std::vector of a particular type
+/// should be considered a YAML document list.
+#define LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(_type)                               \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  template <unsigned N>                                                        \
+  struct DocumentListTraits<SmallVector<_type, N>>                             \
+      : public SequenceTraitsImpl<SmallVector<_type, N>, false> {};            \
+  template <>                                                                  \
+  struct DocumentListTraits<std::vector<_type>>                                \
+      : public SequenceTraitsImpl<std::vector<_type>, false> {};               \
+  }                                                                            \
+  }
+
+/// Utility for declaring that std::map<std::string, _type> should be considered
+/// a YAML map.
+#define LLVM_YAML_IS_STRING_MAP(_type)                                         \
+  namespace llvm {                                                             \
+  namespace yaml {                                                             \
+  template <>                                                                  \
+  struct CustomMappingTraits<std::map<std::string, _type>>                     \
+      : public StdMapStringCustomMappingTraitsImpl<_type> {};                  \
+  }                                                                            \
+  }
+
+#endif // LLVM_SUPPORT_YAMLTRAITS_H
diff --git a/linux-x64/clang/include/llvm/Support/circular_raw_ostream.h b/linux-x64/clang/include/llvm/Support/circular_raw_ostream.h
new file mode 100644
index 0000000..b46fd7f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/circular_raw_ostream.h
@@ -0,0 +1,156 @@
+//===-- llvm/Support/circular_raw_ostream.h - Buffered streams --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains raw_ostream implementations for streams to do circular
+// buffering of their output.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H
+#define LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+  /// circular_raw_ostream - A raw_ostream which *can* save its data
+  /// to a circular buffer, or can pass it through directly to an
+  /// underlying stream if specified with a buffer of zero.
+  ///
+  class circular_raw_ostream : public raw_ostream {
+  public:
+    /// TAKE_OWNERSHIP - Tell this stream that it owns the underlying
+    /// stream and is responsible for cleanup, memory management
+    /// issues, etc.
+    ///
+    static const bool TAKE_OWNERSHIP = true;
+
+    /// REFERENCE_ONLY - Tell this stream it should not manage the
+    /// held stream.
+    ///
+    static const bool REFERENCE_ONLY = false;
+
+  private:
+    /// TheStream - The real stream we output to. We set it to be
+    /// unbuffered, since we're already doing our own buffering.
+    ///
+    raw_ostream *TheStream;
+
+    /// OwnsStream - Are we responsible for managing the underlying
+    /// stream?
+    ///
+    bool OwnsStream;
+
+    /// BufferSize - The size of the buffer in bytes.
+    ///
+    size_t BufferSize;
+
+    /// BufferArray - The actual buffer storage.
+    ///
+    char *BufferArray;
+
+    /// Cur - Pointer to the current output point in BufferArray.
+    ///
+    char *Cur;
+
+    /// Filled - Indicate whether the buffer has been completely
+    /// filled.  This helps avoid garbage output.
+    ///
+    bool Filled;
+
+    /// Banner - A pointer to a banner to print before dumping the
+    /// log.
+    ///
+    const char *Banner;
+
+    /// flushBuffer - Dump the contents of the buffer to Stream.
+    ///
+    void flushBuffer() {
+      if (Filled)
+        // Write the older portion of the buffer.
+        TheStream->write(Cur, BufferArray + BufferSize - Cur);
+      // Write the newer portion of the buffer.
+      TheStream->write(BufferArray, Cur - BufferArray);
+      Cur = BufferArray;
+      Filled = false;
+    }
+
+    void write_impl(const char *Ptr, size_t Size) override;
+
+    /// current_pos - Return the current position within the stream,
+    /// not counting the bytes currently in the buffer.
+    ///
+    uint64_t current_pos() const override {
+      // This has the same effect as calling TheStream.current_pos(),
+      // but that interface is private.
+      return TheStream->tell() - TheStream->GetNumBytesInBuffer();
+    }
+
+  public:
+    /// circular_raw_ostream - Construct an optionally
+    /// circular-buffered stream, handing it an underlying stream to
+    /// do the "real" output.
+    ///
+    /// As a side effect, if BuffSize is nonzero, the given Stream is
+    /// set to be Unbuffered.  This is because circular_raw_ostream
+    /// does its own buffering, so it doesn't want another layer of
+    /// buffering to be happening underneath it.
+    ///
+    /// "Owns" tells the circular_raw_ostream whether it is
+    /// responsible for managing the held stream, doing memory
+    /// management of it, etc.
+    ///
+    circular_raw_ostream(raw_ostream &Stream, const char *Header,
+                         size_t BuffSize = 0, bool Owns = REFERENCE_ONLY)
+        : raw_ostream(/*unbuffered*/ true), TheStream(nullptr),
+          OwnsStream(Owns), BufferSize(BuffSize), BufferArray(nullptr),
+          Filled(false), Banner(Header) {
+      if (BufferSize != 0)
+        BufferArray = new char[BufferSize];
+      Cur = BufferArray;
+      setStream(Stream, Owns);
+    }
+
+    ~circular_raw_ostream() override {
+      flush();
+      flushBufferWithBanner();
+      releaseStream();
+      delete[] BufferArray;
+    }
+
+    /// setStream - Tell the circular_raw_ostream to output a
+    /// different stream.  "Owns" tells circular_raw_ostream whether
+    /// it should take responsibility for managing the underlying
+    /// stream.
+    ///
+    void setStream(raw_ostream &Stream, bool Owns = REFERENCE_ONLY) {
+      releaseStream();
+      TheStream = &Stream;
+      OwnsStream = Owns;
+    }
+
+    /// flushBufferWithBanner - Force output of the buffer along with
+    /// a small header.
+    ///
+    void flushBufferWithBanner();
+
+  private:
+    /// releaseStream - Delete the held stream if needed. Otherwise,
+    /// transfer the buffer settings from this circular_raw_ostream
+    /// back to the underlying stream.
+    ///
+    void releaseStream() {
+      if (!TheStream)
+        return;
+      if (OwnsStream)
+        delete TheStream;
+    }
+  };
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/raw_os_ostream.h b/linux-x64/clang/include/llvm/Support/raw_os_ostream.h
new file mode 100644
index 0000000..a983aeb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/raw_os_ostream.h
@@ -0,0 +1,42 @@
+//===- raw_os_ostream.h - std::ostream adaptor for raw_ostream --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the raw_os_ostream class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RAW_OS_OSTREAM_H
+#define LLVM_SUPPORT_RAW_OS_OSTREAM_H
+
+#include "llvm/Support/raw_ostream.h"
+#include <iosfwd>
+
+namespace llvm {
+
+/// raw_os_ostream - A raw_ostream that writes to an std::ostream.  This is a
+/// simple adaptor class.  It does not check for output errors; clients should
+/// use the underlying stream to detect errors.
+class raw_os_ostream : public raw_ostream {
+  std::ostream &OS;
+
+  /// write_impl - See raw_ostream::write_impl.
+  void write_impl(const char *Ptr, size_t Size) override;
+
+  /// current_pos - Return the current position within the stream, not
+  /// counting the bytes currently in the buffer.
+  uint64_t current_pos() const override;
+
+public:
+  raw_os_ostream(std::ostream &O) : OS(O) {}
+  ~raw_os_ostream() override;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/raw_ostream.h b/linux-x64/clang/include/llvm/Support/raw_ostream.h
new file mode 100644
index 0000000..d11f5a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/raw_ostream.h
@@ -0,0 +1,543 @@
+//===--- raw_ostream.h - Raw output stream ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the raw_ostream class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RAW_OSTREAM_H
+#define LLVM_SUPPORT_RAW_OSTREAM_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+class formatv_object_base;
+class format_object_base;
+class FormattedString;
+class FormattedNumber;
+class FormattedBytes;
+
+namespace sys {
+namespace fs {
+enum OpenFlags : unsigned;
+} // end namespace fs
+} // end namespace sys
+
+/// This class implements an extremely fast bulk output stream that can *only*
+/// output to a stream.  It does not support seeking, reopening, rewinding, line
+/// buffered disciplines etc. It is a simple buffer that outputs
+/// a chunk at a time.
+class raw_ostream {
+private:
+  /// The buffer is handled in such a way that the buffer is
+  /// uninitialized, unbuffered, or out of space when OutBufCur >=
+  /// OutBufEnd. Thus a single comparison suffices to determine if we
+  /// need to take the slow path to write a single character.
+  ///
+  /// The buffer is in one of three states:
+  ///  1. Unbuffered (BufferMode == Unbuffered)
+  ///  1. Uninitialized (BufferMode != Unbuffered && OutBufStart == 0).
+  ///  2. Buffered (BufferMode != Unbuffered && OutBufStart != 0 &&
+  ///               OutBufEnd - OutBufStart >= 1).
+  ///
+  /// If buffered, then the raw_ostream owns the buffer if (BufferMode ==
+  /// InternalBuffer); otherwise the buffer has been set via SetBuffer and is
+  /// managed by the subclass.
+  ///
+  /// If a subclass installs an external buffer using SetBuffer then it can wait
+  /// for a \see write_impl() call to handle the data which has been put into
+  /// this buffer.
+  char *OutBufStart, *OutBufEnd, *OutBufCur;
+
+  enum BufferKind {
+    Unbuffered = 0,
+    InternalBuffer,
+    ExternalBuffer
+  } BufferMode;
+
+public:
+  // color order matches ANSI escape sequence, don't change
+  enum Colors {
+    BLACK = 0,
+    RED,
+    GREEN,
+    YELLOW,
+    BLUE,
+    MAGENTA,
+    CYAN,
+    WHITE,
+    SAVEDCOLOR
+  };
+
+  explicit raw_ostream(bool unbuffered = false)
+      : BufferMode(unbuffered ? Unbuffered : InternalBuffer) {
+    // Start out ready to flush.
+    OutBufStart = OutBufEnd = OutBufCur = nullptr;
+  }
+
+  raw_ostream(const raw_ostream &) = delete;
+  void operator=(const raw_ostream &) = delete;
+
+  virtual ~raw_ostream();
+
+  /// tell - Return the current offset with the file.
+  uint64_t tell() const { return current_pos() + GetNumBytesInBuffer(); }
+
+  //===--------------------------------------------------------------------===//
+  // Configuration Interface
+  //===--------------------------------------------------------------------===//
+
+  /// Set the stream to be buffered, with an automatically determined buffer
+  /// size.
+  void SetBuffered();
+
+  /// Set the stream to be buffered, using the specified buffer size.
+  void SetBufferSize(size_t Size) {
+    flush();
+    SetBufferAndMode(new char[Size], Size, InternalBuffer);
+  }
+
+  size_t GetBufferSize() const {
+    // If we're supposed to be buffered but haven't actually gotten around
+    // to allocating the buffer yet, return the value that would be used.
+    if (BufferMode != Unbuffered && OutBufStart == nullptr)
+      return preferred_buffer_size();
+
+    // Otherwise just return the size of the allocated buffer.
+    return OutBufEnd - OutBufStart;
+  }
+
+  /// Set the stream to be unbuffered. When unbuffered, the stream will flush
+  /// after every write. This routine will also flush the buffer immediately
+  /// when the stream is being set to unbuffered.
+  void SetUnbuffered() {
+    flush();
+    SetBufferAndMode(nullptr, 0, Unbuffered);
+  }
+
+  size_t GetNumBytesInBuffer() const {
+    return OutBufCur - OutBufStart;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Data Output Interface
+  //===--------------------------------------------------------------------===//
+
+  void flush() {
+    if (OutBufCur != OutBufStart)
+      flush_nonempty();
+  }
+
+  raw_ostream &operator<<(char C) {
+    if (OutBufCur >= OutBufEnd)
+      return write(C);
+    *OutBufCur++ = C;
+    return *this;
+  }
+
+  raw_ostream &operator<<(unsigned char C) {
+    if (OutBufCur >= OutBufEnd)
+      return write(C);
+    *OutBufCur++ = C;
+    return *this;
+  }
+
+  raw_ostream &operator<<(signed char C) {
+    if (OutBufCur >= OutBufEnd)
+      return write(C);
+    *OutBufCur++ = C;
+    return *this;
+  }
+
+  raw_ostream &operator<<(StringRef Str) {
+    // Inline fast path, particularly for strings with a known length.
+    size_t Size = Str.size();
+
+    // Make sure we can use the fast path.
+    if (Size > (size_t)(OutBufEnd - OutBufCur))
+      return write(Str.data(), Size);
+
+    if (Size) {
+      memcpy(OutBufCur, Str.data(), Size);
+      OutBufCur += Size;
+    }
+    return *this;
+  }
+
+  raw_ostream &operator<<(const char *Str) {
+    // Inline fast path, particularly for constant strings where a sufficiently
+    // smart compiler will simplify strlen.
+
+    return this->operator<<(StringRef(Str));
+  }
+
+  raw_ostream &operator<<(const std::string &Str) {
+    // Avoid the fast path, it would only increase code size for a marginal win.
+    return write(Str.data(), Str.length());
+  }
+
+  raw_ostream &operator<<(const SmallVectorImpl<char> &Str) {
+    return write(Str.data(), Str.size());
+  }
+
+  raw_ostream &operator<<(unsigned long N);
+  raw_ostream &operator<<(long N);
+  raw_ostream &operator<<(unsigned long long N);
+  raw_ostream &operator<<(long long N);
+  raw_ostream &operator<<(const void *P);
+
+  raw_ostream &operator<<(unsigned int N) {
+    return this->operator<<(static_cast<unsigned long>(N));
+  }
+
+  raw_ostream &operator<<(int N) {
+    return this->operator<<(static_cast<long>(N));
+  }
+
+  raw_ostream &operator<<(double N);
+
+  /// Output \p N in hexadecimal, without any prefix or padding.
+  raw_ostream &write_hex(unsigned long long N);
+
+  /// Output a formatted UUID with dash separators.
+  using uuid_t = uint8_t[16];
+  raw_ostream &write_uuid(const uuid_t UUID);
+
+  /// Output \p Str, turning '\\', '\t', '\n', '"', and anything that doesn't
+  /// satisfy std::isprint into an escape sequence.
+  raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false);
+
+  raw_ostream &write(unsigned char C);
+  raw_ostream &write(const char *Ptr, size_t Size);
+
+  // Formatted output, see the format() function in Support/Format.h.
+  raw_ostream &operator<<(const format_object_base &Fmt);
+
+  // Formatted output, see the leftJustify() function in Support/Format.h.
+  raw_ostream &operator<<(const FormattedString &);
+
+  // Formatted output, see the formatHex() function in Support/Format.h.
+  raw_ostream &operator<<(const FormattedNumber &);
+
+  // Formatted output, see the formatv() function in Support/FormatVariadic.h.
+  raw_ostream &operator<<(const formatv_object_base &);
+
+  // Formatted output, see the format_bytes() function in Support/Format.h.
+  raw_ostream &operator<<(const FormattedBytes &);
+
+  /// indent - Insert 'NumSpaces' spaces.
+  raw_ostream &indent(unsigned NumSpaces);
+
+  /// Changes the foreground color of text that will be output from this point
+  /// forward.
+  /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
+  /// change only the bold attribute, and keep colors untouched
+  /// @param Bold bold/brighter text, default false
+  /// @param BG if true change the background, default: change foreground
+  /// @returns itself so it can be used within << invocations
+  virtual raw_ostream &changeColor(enum Colors Color,
+                                   bool Bold = false,
+                                   bool BG = false) {
+    (void)Color;
+    (void)Bold;
+    (void)BG;
+    return *this;
+  }
+
+  /// Resets the colors to terminal defaults. Call this when you are done
+  /// outputting colored text, or before program exit.
+  virtual raw_ostream &resetColor() { return *this; }
+
+  /// Reverses the foreground and background colors.
+  virtual raw_ostream &reverseColor() { return *this; }
+
+  /// This function determines if this stream is connected to a "tty" or
+  /// "console" window. That is, the output would be displayed to the user
+  /// rather than being put on a pipe or stored in a file.
+  virtual bool is_displayed() const { return false; }
+
+  /// This function determines if this stream is displayed and supports colors.
+  virtual bool has_colors() const { return is_displayed(); }
+
+  //===--------------------------------------------------------------------===//
+  // Subclass Interface
+  //===--------------------------------------------------------------------===//
+
+private:
+  /// The is the piece of the class that is implemented by subclasses.  This
+  /// writes the \p Size bytes starting at
+  /// \p Ptr to the underlying stream.
+  ///
+  /// This function is guaranteed to only be called at a point at which it is
+  /// safe for the subclass to install a new buffer via SetBuffer.
+  ///
+  /// \param Ptr The start of the data to be written. For buffered streams this
+  /// is guaranteed to be the start of the buffer.
+  ///
+  /// \param Size The number of bytes to be written.
+  ///
+  /// \invariant { Size > 0 }
+  virtual void write_impl(const char *Ptr, size_t Size) = 0;
+
+  // An out of line virtual method to provide a home for the class vtable.
+  virtual void handle();
+
+  /// Return the current position within the stream, not counting the bytes
+  /// currently in the buffer.
+  virtual uint64_t current_pos() const = 0;
+
+protected:
+  /// Use the provided buffer as the raw_ostream buffer. This is intended for
+  /// use only by subclasses which can arrange for the output to go directly
+  /// into the desired output buffer, instead of being copied on each flush.
+  void SetBuffer(char *BufferStart, size_t Size) {
+    SetBufferAndMode(BufferStart, Size, ExternalBuffer);
+  }
+
+  /// Return an efficient buffer size for the underlying output mechanism.
+  virtual size_t preferred_buffer_size() const;
+
+  /// Return the beginning of the current stream buffer, or 0 if the stream is
+  /// unbuffered.
+  const char *getBufferStart() const { return OutBufStart; }
+
+  //===--------------------------------------------------------------------===//
+  // Private Interface
+  //===--------------------------------------------------------------------===//
+private:
+  /// Install the given buffer and mode.
+  void SetBufferAndMode(char *BufferStart, size_t Size, BufferKind Mode);
+
+  /// Flush the current buffer, which is known to be non-empty. This outputs the
+  /// currently buffered data and resets the buffer to empty.
+  void flush_nonempty();
+
+  /// Copy data into the buffer. Size must not be greater than the number of
+  /// unused bytes in the buffer.
+  void copy_to_buffer(const char *Ptr, size_t Size);
+};
+
+/// An abstract base class for streams implementations that also support a
+/// pwrite operation. This is useful for code that can mostly stream out data,
+/// but needs to patch in a header that needs to know the output size.
+class raw_pwrite_stream : public raw_ostream {
+  virtual void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) = 0;
+
+public:
+  explicit raw_pwrite_stream(bool Unbuffered = false)
+      : raw_ostream(Unbuffered) {}
+  void pwrite(const char *Ptr, size_t Size, uint64_t Offset) {
+#ifndef NDBEBUG
+    uint64_t Pos = tell();
+    // /dev/null always reports a pos of 0, so we cannot perform this check
+    // in that case.
+    if (Pos)
+      assert(Size + Offset <= Pos && "We don't support extending the stream");
+#endif
+    pwrite_impl(Ptr, Size, Offset);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// File Output Streams
+//===----------------------------------------------------------------------===//
+
+/// A raw_ostream that writes to a file descriptor.
+///
+class raw_fd_ostream : public raw_pwrite_stream {
+  int FD;
+  bool ShouldClose;
+
+  std::error_code EC;
+
+  uint64_t pos;
+
+  bool SupportsSeeking;
+
+  /// See raw_ostream::write_impl.
+  void write_impl(const char *Ptr, size_t Size) override;
+
+  void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
+
+  /// Return the current position within the stream, not counting the bytes
+  /// currently in the buffer.
+  uint64_t current_pos() const override { return pos; }
+
+  /// Determine an efficient buffer size.
+  size_t preferred_buffer_size() const override;
+
+  /// Set the flag indicating that an output error has been encountered.
+  void error_detected(std::error_code EC) { this->EC = EC; }
+
+public:
+  /// Open the specified file for writing. If an error occurs, information
+  /// about the error is put into EC, and the stream should be immediately
+  /// destroyed;
+  /// \p Flags allows optional flags to control how the file will be opened.
+  ///
+  /// As a special case, if Filename is "-", then the stream will use
+  /// STDOUT_FILENO instead of opening a file. This will not close the stdout
+  /// descriptor.
+  raw_fd_ostream(StringRef Filename, std::error_code &EC,
+                 sys::fs::OpenFlags Flags);
+
+  /// FD is the file descriptor that this writes to.  If ShouldClose is true,
+  /// this closes the file when the stream is destroyed. If FD is for stdout or
+  /// stderr, it will not be closed.
+  raw_fd_ostream(int fd, bool shouldClose, bool unbuffered=false);
+
+  ~raw_fd_ostream() override;
+
+  /// Manually flush the stream and close the file. Note that this does not call
+  /// fsync.
+  void close();
+
+  bool supportsSeeking() { return SupportsSeeking; }
+
+  /// Flushes the stream and repositions the underlying file descriptor position
+  /// to the offset specified from the beginning of the file.
+  uint64_t seek(uint64_t off);
+
+  raw_ostream &changeColor(enum Colors colors, bool bold=false,
+                           bool bg=false) override;
+  raw_ostream &resetColor() override;
+
+  raw_ostream &reverseColor() override;
+
+  bool is_displayed() const override;
+
+  bool has_colors() const override;
+
+  std::error_code error() const { return EC; }
+
+  /// Return the value of the flag in this raw_fd_ostream indicating whether an
+  /// output error has been encountered.
+  /// This doesn't implicitly flush any pending output.  Also, it doesn't
+  /// guarantee to detect all errors unless the stream has been closed.
+  bool has_error() const { return bool(EC); }
+
+  /// Set the flag read by has_error() to false. If the error flag is set at the
+  /// time when this raw_ostream's destructor is called, report_fatal_error is
+  /// called to report the error. Use clear_error() after handling the error to
+  /// avoid this behavior.
+  ///
+  ///   "Errors should never pass silently.
+  ///    Unless explicitly silenced."
+  ///      - from The Zen of Python, by Tim Peters
+  ///
+  void clear_error() { EC = std::error_code(); }
+};
+
+/// This returns a reference to a raw_ostream for standard output. Use it like:
+/// outs() << "foo" << "bar";
+raw_ostream &outs();
+
+/// This returns a reference to a raw_ostream for standard error. Use it like:
+/// errs() << "foo" << "bar";
+raw_ostream &errs();
+
+/// This returns a reference to a raw_ostream which simply discards output.
+raw_ostream &nulls();
+
+//===----------------------------------------------------------------------===//
+// Output Stream Adaptors
+//===----------------------------------------------------------------------===//
+
+/// A raw_ostream that writes to an std::string.  This is a simple adaptor
+/// class. This class does not encounter output errors.
+class raw_string_ostream : public raw_ostream {
+  std::string &OS;
+
+  /// See raw_ostream::write_impl.
+  void write_impl(const char *Ptr, size_t Size) override;
+
+  /// Return the current position within the stream, not counting the bytes
+  /// currently in the buffer.
+  uint64_t current_pos() const override { return OS.size(); }
+
+public:
+  explicit raw_string_ostream(std::string &O) : OS(O) {}
+  ~raw_string_ostream() override;
+
+  /// Flushes the stream contents to the target string and returns  the string's
+  /// reference.
+  std::string& str() {
+    flush();
+    return OS;
+  }
+};
+
+/// A raw_ostream that writes to an SmallVector or SmallString.  This is a
+/// simple adaptor class. This class does not encounter output errors.
+/// raw_svector_ostream operates without a buffer, delegating all memory
+/// management to the SmallString. Thus the SmallString is always up-to-date,
+/// may be used directly and there is no need to call flush().
+class raw_svector_ostream : public raw_pwrite_stream {
+  SmallVectorImpl<char> &OS;
+
+  /// See raw_ostream::write_impl.
+  void write_impl(const char *Ptr, size_t Size) override;
+
+  void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
+
+  /// Return the current position within the stream.
+  uint64_t current_pos() const override;
+
+public:
+  /// Construct a new raw_svector_ostream.
+  ///
+  /// \param O The vector to write to; this should generally have at least 128
+  /// bytes free to avoid any extraneous memory overhead.
+  explicit raw_svector_ostream(SmallVectorImpl<char> &O) : OS(O) {
+    SetUnbuffered();
+  }
+
+  ~raw_svector_ostream() override = default;
+
+  void flush() = delete;
+
+  /// Return a StringRef for the vector contents.
+  StringRef str() { return StringRef(OS.data(), OS.size()); }
+};
+
+/// A raw_ostream that discards all output.
+class raw_null_ostream : public raw_pwrite_stream {
+  /// See raw_ostream::write_impl.
+  void write_impl(const char *Ptr, size_t size) override;
+  void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
+
+  /// Return the current position within the stream, not counting the bytes
+  /// currently in the buffer.
+  uint64_t current_pos() const override;
+
+public:
+  explicit raw_null_ostream() = default;
+  ~raw_null_ostream() override;
+};
+
+class buffer_ostream : public raw_svector_ostream {
+  raw_ostream &OS;
+  SmallVector<char, 0> Buffer;
+
+public:
+  buffer_ostream(raw_ostream &OS) : raw_svector_ostream(Buffer), OS(OS) {}
+  ~buffer_ostream() override { OS << str(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_RAW_OSTREAM_H
diff --git a/linux-x64/clang/include/llvm/Support/raw_sha1_ostream.h b/linux-x64/clang/include/llvm/Support/raw_sha1_ostream.h
new file mode 100644
index 0000000..bd55d98
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/raw_sha1_ostream.h
@@ -0,0 +1,47 @@
+//==- raw_sha1_ostream.h - raw_ostream that compute SHA1        --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the raw_sha1_ostream class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RAW_SHA1_OSTREAM_H
+#define LLVM_SUPPORT_RAW_SHA1_OSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// A raw_ostream that hash the content using the sha1 algorithm.
+class raw_sha1_ostream : public raw_ostream {
+  SHA1 State;
+
+  /// See raw_ostream::write_impl.
+  void write_impl(const char *Ptr, size_t Size) override {
+    State.update(ArrayRef<uint8_t>((const uint8_t *)Ptr, Size));
+  }
+
+public:
+  /// Return the current SHA1 hash for the content of the stream
+  StringRef sha1() {
+    flush();
+    return State.result();
+  }
+
+  /// Reset the internal state to start over from scratch.
+  void resetHash() { State.init(); }
+
+  uint64_t current_pos() const override { return 0; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/thread.h b/linux-x64/clang/include/llvm/Support/thread.h
new file mode 100644
index 0000000..787a513
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/thread.h
@@ -0,0 +1,53 @@
+//===-- llvm/Support/thread.h - Wrapper for <thread> ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header is a wrapper for <thread> that works around problems with the
+// MSVC headers when exceptions are disabled. It also provides llvm::thread,
+// which is either a typedef of std::thread or a replacement that calls the
+// function synchronously depending on the value of LLVM_ENABLE_THREADS.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREAD_H
+#define LLVM_SUPPORT_THREAD_H
+
+#include "llvm/Config/llvm-config.h"
+
+#if LLVM_ENABLE_THREADS
+
+#include <thread>
+
+namespace llvm {
+typedef std::thread thread;
+}
+
+#else // !LLVM_ENABLE_THREADS
+
+#include <utility>
+
+namespace llvm {
+
+struct thread {
+  thread() {}
+  thread(thread &&other) {}
+  template <class Function, class... Args>
+  explicit thread(Function &&f, Args &&... args) {
+    f(std::forward<Args>(args)...);
+  }
+  thread(const thread &) = delete;
+
+  void join() {}
+  static unsigned hardware_concurrency() { return 1; };
+};
+
+}
+
+#endif // LLVM_ENABLE_THREADS
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Support/type_traits.h b/linux-x64/clang/include/llvm/Support/type_traits.h
new file mode 100644
index 0000000..cc08783
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/type_traits.h
@@ -0,0 +1,122 @@
+//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides useful additions to the standard type_traits library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
+#define LLVM_SUPPORT_TYPE_TRAITS_H
+
+#include "llvm/Support/Compiler.h"
+#include <type_traits>
+#include <utility>
+
+#ifndef __has_feature
+#define LLVM_DEFINED_HAS_FEATURE
+#define __has_feature(x) 0
+#endif
+
+namespace llvm {
+
+/// isPodLike - This is a type trait that is used to determine whether a given
+/// type can be copied around with memcpy instead of running ctors etc.
+template <typename T>
+struct isPodLike {
+  // std::is_trivially_copyable is available in libc++ with clang, libstdc++
+  // that comes with GCC 5.
+#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) ||      \
+    (defined(__GNUC__) && __GNUC__ >= 5)
+  // If the compiler supports the is_trivially_copyable trait use it, as it
+  // matches the definition of isPodLike closely.
+  static const bool value = std::is_trivially_copyable<T>::value;
+#elif __has_feature(is_trivially_copyable)
+  // Use the internal name if the compiler supports is_trivially_copyable but we
+  // don't know if the standard library does. This is the case for clang in
+  // conjunction with libstdc++ from GCC 4.x.
+  static const bool value = __is_trivially_copyable(T);
+#else
+  // If we don't know anything else, we can (at least) assume that all non-class
+  // types are PODs.
+  static const bool value = !std::is_class<T>::value;
+#endif
+};
+
+// std::pair's are pod-like if their elements are.
+template<typename T, typename U>
+struct isPodLike<std::pair<T, U>> {
+  static const bool value = isPodLike<T>::value && isPodLike<U>::value;
+};
+
+/// \brief Metafunction that determines whether the given type is either an
+/// integral type or an enumeration type, including enum classes.
+///
+/// Note that this accepts potentially more integral types than is_integral
+/// because it is based on being implicitly convertible to an integral type.
+/// Also note that enum classes aren't implicitly convertible to integral types,
+/// the value may therefore need to be explicitly converted before being used.
+template <typename T> class is_integral_or_enum {
+  using UnderlyingT = typename std::remove_reference<T>::type;
+
+public:
+  static const bool value =
+      !std::is_class<UnderlyingT>::value && // Filter conversion operators.
+      !std::is_pointer<UnderlyingT>::value &&
+      !std::is_floating_point<UnderlyingT>::value &&
+      (std::is_enum<UnderlyingT>::value ||
+       std::is_convertible<UnderlyingT, unsigned long long>::value);
+};
+
+/// \brief If T is a pointer, just return it. If it is not, return T&.
+template<typename T, typename Enable = void>
+struct add_lvalue_reference_if_not_pointer { using type = T &; };
+
+template <typename T>
+struct add_lvalue_reference_if_not_pointer<
+    T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+  using type = T;
+};
+
+/// \brief If T is a pointer to X, return a pointer to const X. If it is not,
+/// return const T.
+template<typename T, typename Enable = void>
+struct add_const_past_pointer { using type = const T; };
+
+template <typename T>
+struct add_const_past_pointer<
+    T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+  using type = const typename std::remove_pointer<T>::type *;
+};
+
+template <typename T, typename Enable = void>
+struct const_pointer_or_const_ref {
+  using type = const T &;
+};
+template <typename T>
+struct const_pointer_or_const_ref<
+    T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+  using type = typename add_const_past_pointer<T>::type;
+};
+
+} // end namespace llvm
+
+// If the compiler supports detecting whether a class is final, define
+// an LLVM_IS_FINAL macro. If it cannot be defined properly, this
+// macro will be left undefined.
+#if __cplusplus >= 201402L
+#define LLVM_IS_FINAL(Ty) std::is_final<Ty>()
+#elif __has_feature(is_final) || LLVM_GNUC_PREREQ(4, 7, 0)
+#define LLVM_IS_FINAL(Ty) __is_final(Ty)
+#endif
+
+#ifdef LLVM_DEFINED_HAS_FEATURE
+#undef __has_feature
+#endif
+
+#endif // LLVM_SUPPORT_TYPE_TRAITS_H
diff --git a/linux-x64/clang/include/llvm/Support/xxhash.h b/linux-x64/clang/include/llvm/Support/xxhash.h
new file mode 100644
index 0000000..f7ca460
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Support/xxhash.h
@@ -0,0 +1,47 @@
+/*
+   xxHash - Extremely Fast Hash algorithm
+   Header File
+   Copyright (C) 2012-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* based on revision d2df04efcbef7d7f6886d345861e5dfda4edacc1 Removed
+ * everything but a simple interface for computing XXh64. */
+
+#ifndef LLVM_SUPPORT_XXHASH_H
+#define LLVM_SUPPORT_XXHASH_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+uint64_t xxHash64(llvm::StringRef Data);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/TableGen/Error.h b/linux-x64/clang/include/llvm/TableGen/Error.h
new file mode 100644
index 0000000..de4d3bf
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/Error.h
@@ -0,0 +1,41 @@
+//===- llvm/TableGen/Error.h - tblgen error handling helpers ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains error handling helper routines to pretty-print diagnostic
+// messages from tblgen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_ERROR_H
+#define LLVM_TABLEGEN_ERROR_H
+
+#include "llvm/Support/SourceMgr.h"
+
+namespace llvm {
+
+void PrintNote(ArrayRef<SMLoc> NoteLoc, const Twine &Msg);
+
+void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg);
+void PrintWarning(const char *Loc, const Twine &Msg);
+void PrintWarning(const Twine &Msg);
+
+void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
+void PrintError(const char *Loc, const Twine &Msg);
+void PrintError(const Twine &Msg);
+
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(const Twine &Msg);
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(ArrayRef<SMLoc> ErrorLoc,
+                                             const Twine &Msg);
+
+extern SourceMgr SrcMgr;
+extern unsigned ErrorsPrinted;
+
+} // end namespace "llvm"
+
+#endif
diff --git a/linux-x64/clang/include/llvm/TableGen/Main.h b/linux-x64/clang/include/llvm/TableGen/Main.h
new file mode 100644
index 0000000..670572d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/Main.h
@@ -0,0 +1,30 @@
+//===- llvm/TableGen/Main.h - tblgen entry point ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the common entry point for tblgen tools.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_MAIN_H
+#define LLVM_TABLEGEN_MAIN_H
+
+namespace llvm {
+
+class raw_ostream;
+class RecordKeeper;
+
+/// Perform the action using Records, and write output to OS.
+/// Returns true on error, false otherwise.
+using TableGenMainFn = bool (raw_ostream &OS, RecordKeeper &Records);
+
+int TableGenMain(char *argv0, TableGenMainFn *MainFn);
+
+} // end namespace llvm
+
+#endif // LLVM_TABLEGEN_MAIN_H
diff --git a/linux-x64/clang/include/llvm/TableGen/Record.h b/linux-x64/clang/include/llvm/TableGen/Record.h
new file mode 100644
index 0000000..3b2ebaa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/Record.h
@@ -0,0 +1,1910 @@
+//===- llvm/TableGen/Record.h - Classes for Table Records -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the main TableGen data structures, including the TableGen
+// types, values, and high-level data structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_RECORD_H
+#define LLVM_TABLEGEN_RECORD_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/TrailingObjects.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class ListRecTy;
+struct MultiClass;
+class Record;
+class RecordKeeper;
+class RecordVal;
+class Resolver;
+class StringInit;
+class TypedInit;
+
+//===----------------------------------------------------------------------===//
+//  Type Classes
+//===----------------------------------------------------------------------===//
+
+class RecTy {
+public:
+  /// \brief Subclass discriminator (for dyn_cast<> et al.)
+  enum RecTyKind {
+    BitRecTyKind,
+    BitsRecTyKind,
+    CodeRecTyKind,
+    IntRecTyKind,
+    StringRecTyKind,
+    ListRecTyKind,
+    DagRecTyKind,
+    RecordRecTyKind
+  };
+
+private:
+  RecTyKind Kind;
+  ListRecTy *ListTy = nullptr;
+
+public:
+  RecTy(RecTyKind K) : Kind(K) {}
+  virtual ~RecTy() = default;
+
+  RecTyKind getRecTyKind() const { return Kind; }
+
+  virtual std::string getAsString() const = 0;
+  void print(raw_ostream &OS) const { OS << getAsString(); }
+  void dump() const;
+
+  /// Return true if all values of 'this' type can be converted to the specified
+  /// type.
+  virtual bool typeIsConvertibleTo(const RecTy *RHS) const;
+
+  /// Return true if 'this' type is equal to or a subtype of RHS. For example,
+  /// a bit set is not an int, but they are convertible.
+  virtual bool typeIsA(const RecTy *RHS) const;
+
+  /// Returns the type representing list<this>.
+  ListRecTy *getListTy();
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
+  Ty.print(OS);
+  return OS;
+}
+
+/// 'bit' - Represent a single bit
+class BitRecTy : public RecTy {
+  static BitRecTy Shared;
+
+  BitRecTy() : RecTy(BitRecTyKind) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == BitRecTyKind;
+  }
+
+  static BitRecTy *get() { return &Shared; }
+
+  std::string getAsString() const override { return "bit"; }
+
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// 'bits<n>' - Represent a fixed number of bits
+class BitsRecTy : public RecTy {
+  unsigned Size;
+
+  explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == BitsRecTyKind;
+  }
+
+  static BitsRecTy *get(unsigned Sz);
+
+  unsigned getNumBits() const { return Size; }
+
+  std::string getAsString() const override;
+
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+
+  bool typeIsA(const RecTy *RHS) const override;
+};
+
+/// 'code' - Represent a code fragment
+class CodeRecTy : public RecTy {
+  static CodeRecTy Shared;
+
+  CodeRecTy() : RecTy(CodeRecTyKind) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == CodeRecTyKind;
+  }
+
+  static CodeRecTy *get() { return &Shared; }
+
+  std::string getAsString() const override { return "code"; }
+
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// 'int' - Represent an integer value of no particular size
+class IntRecTy : public RecTy {
+  static IntRecTy Shared;
+
+  IntRecTy() : RecTy(IntRecTyKind) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == IntRecTyKind;
+  }
+
+  static IntRecTy *get() { return &Shared; }
+
+  std::string getAsString() const override { return "int"; }
+
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// 'string' - Represent an string value
+class StringRecTy : public RecTy {
+  static StringRecTy Shared;
+
+  StringRecTy() : RecTy(StringRecTyKind) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == StringRecTyKind;
+  }
+
+  static StringRecTy *get() { return &Shared; }
+
+  std::string getAsString() const override;
+
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// 'list<Ty>' - Represent a list of values, all of which must be of
+/// the specified type.
+class ListRecTy : public RecTy {
+  friend ListRecTy *RecTy::getListTy();
+
+  RecTy *Ty;
+
+  explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == ListRecTyKind;
+  }
+
+  static ListRecTy *get(RecTy *T) { return T->getListTy(); }
+  RecTy *getElementType() const { return Ty; }
+
+  std::string getAsString() const override;
+
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+
+  bool typeIsA(const RecTy *RHS) const override;
+};
+
+/// 'dag' - Represent a dag fragment
+class DagRecTy : public RecTy {
+  static DagRecTy Shared;
+
+  DagRecTy() : RecTy(DagRecTyKind) {}
+
+public:
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == DagRecTyKind;
+  }
+
+  static DagRecTy *get() { return &Shared; }
+
+  std::string getAsString() const override;
+};
+
+/// '[classname]' - Type of record values that have zero or more superclasses.
+///
+/// The list of superclasses is non-redundant, i.e. only contains classes that
+/// are not the superclass of some other listed class.
+class RecordRecTy final : public RecTy, public FoldingSetNode,
+                          public TrailingObjects<RecordRecTy, Record *> {
+  friend class Record;
+
+  unsigned NumClasses;
+
+  explicit RecordRecTy(unsigned Num)
+      : RecTy(RecordRecTyKind), NumClasses(Num) {}
+
+public:
+  RecordRecTy(const RecordRecTy &) = delete;
+  RecordRecTy &operator=(const RecordRecTy &) = delete;
+
+  // Do not use sized deallocation due to trailing objects.
+  void operator delete(void *p) { ::operator delete(p); }
+
+  static bool classof(const RecTy *RT) {
+    return RT->getRecTyKind() == RecordRecTyKind;
+  }
+
+  /// Get the record type with the given non-redundant list of superclasses.
+  static RecordRecTy *get(ArrayRef<Record *> Classes);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  ArrayRef<Record *> getClasses() const {
+    return makeArrayRef(getTrailingObjects<Record *>(), NumClasses);
+  }
+
+  using const_record_iterator = Record * const *;
+
+  const_record_iterator classes_begin() const { return getClasses().begin(); }
+  const_record_iterator classes_end() const { return getClasses().end(); }
+
+  std::string getAsString() const override;
+
+  bool isSubClassOf(Record *Class) const;
+  bool typeIsConvertibleTo(const RecTy *RHS) const override;
+
+  bool typeIsA(const RecTy *RHS) const override;
+};
+
+/// Find a common type that T1 and T2 convert to.
+/// Return 0 if no such type exists.
+RecTy *resolveTypes(RecTy *T1, RecTy *T2);
+
+//===----------------------------------------------------------------------===//
+//  Initializer Classes
+//===----------------------------------------------------------------------===//
+
+class Init {
+protected:
+  /// \brief Discriminator enum (for isa<>, dyn_cast<>, et al.)
+  ///
+  /// This enum is laid out by a preorder traversal of the inheritance
+  /// hierarchy, and does not contain an entry for abstract classes, as per
+  /// the recommendation in docs/HowToSetUpLLVMStyleRTTI.rst.
+  ///
+  /// We also explicitly include "first" and "last" values for each
+  /// interior node of the inheritance tree, to make it easier to read the
+  /// corresponding classof().
+  ///
+  /// We could pack these a bit tighter by not having the IK_FirstXXXInit
+  /// and IK_LastXXXInit be their own values, but that would degrade
+  /// readability for really no benefit.
+  enum InitKind : uint8_t {
+    IK_First, // unused; silence a spurious warning
+    IK_FirstTypedInit,
+    IK_BitInit,
+    IK_BitsInit,
+    IK_CodeInit,
+    IK_DagInit,
+    IK_DefInit,
+    IK_FieldInit,
+    IK_IntInit,
+    IK_ListInit,
+    IK_FirstOpInit,
+    IK_BinOpInit,
+    IK_TernOpInit,
+    IK_UnOpInit,
+    IK_LastOpInit,
+    IK_FoldOpInit,
+    IK_IsAOpInit,
+    IK_StringInit,
+    IK_VarInit,
+    IK_VarListElementInit,
+    IK_VarBitInit,
+    IK_VarDefInit,
+    IK_LastTypedInit,
+    IK_UnsetInit
+  };
+
+private:
+  const InitKind Kind;
+
+protected:
+  uint8_t Opc; // Used by UnOpInit, BinOpInit, and TernOpInit
+
+private:
+  virtual void anchor();
+
+public:
+  InitKind getKind() const { return Kind; }
+
+protected:
+  explicit Init(InitKind K, uint8_t Opc = 0) : Kind(K), Opc(Opc) {}
+
+public:
+  Init(const Init &) = delete;
+  Init &operator=(const Init &) = delete;
+  virtual ~Init() = default;
+
+  /// This virtual method should be overridden by values that may
+  /// not be completely specified yet.
+  virtual bool isComplete() const { return true; }
+
+  /// Is this a concrete and fully resolved value without any references or
+  /// stuck operations? Unset values are concrete.
+  virtual bool isConcrete() const { return false; }
+
+  /// Print out this value.
+  void print(raw_ostream &OS) const { OS << getAsString(); }
+
+  /// Convert this value to a string form.
+  virtual std::string getAsString() const = 0;
+  /// Convert this value to a string form,
+  /// without adding quote markers.  This primaruly affects
+  /// StringInits where we will not surround the string value with
+  /// quotes.
+  virtual std::string getAsUnquotedString() const { return getAsString(); }
+
+  /// Debugging method that may be called through a debugger, just
+  /// invokes print on stderr.
+  void dump() const;
+
+  /// If this initializer is convertible to Ty, return an initializer whose
+  /// type is-a Ty, generating a !cast operation if required. Otherwise, return
+  /// nullptr.
+  virtual Init *getCastTo(RecTy *Ty) const = 0;
+
+  /// Convert to an initializer whose type is-a Ty, or return nullptr if this
+  /// is not possible (this can happen if the initializer's type is convertible
+  /// to Ty, but there are unresolved references).
+  virtual Init *convertInitializerTo(RecTy *Ty) const = 0;
+
+  /// This method is used to implement the bitrange
+  /// selection operator.  Given an initializer, it selects the specified bits
+  /// out, returning them as a new init of bits type.  If it is not legal to use
+  /// the bit subscript operator on this initializer, return null.
+  virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
+    return nullptr;
+  }
+
+  /// This method is used to implement the list slice
+  /// selection operator.  Given an initializer, it selects the specified list
+  /// elements, returning them as a new init of list type.  If it is not legal
+  /// to take a slice of this, return null.
+  virtual Init *convertInitListSlice(ArrayRef<unsigned> Elements) const {
+    return nullptr;
+  }
+
+  /// This method is used to implement the FieldInit class.
+  /// Implementors of this method should return the type of the named field if
+  /// they are of record type.
+  virtual RecTy *getFieldType(StringInit *FieldName) const {
+    return nullptr;
+  }
+
+  /// This method is used by classes that refer to other
+  /// variables which may not be defined at the time the expression is formed.
+  /// If a value is set for the variable later, this method will be called on
+  /// users of the value to allow the value to propagate out.
+  virtual Init *resolveReferences(Resolver &R) const {
+    return const_cast<Init *>(this);
+  }
+
+  /// This method is used to return the initializer for the specified
+  /// bit.
+  virtual Init *getBit(unsigned Bit) const = 0;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
+  I.print(OS); return OS;
+}
+
+/// This is the common super-class of types that have a specific,
+/// explicit, type.
+class TypedInit : public Init {
+  RecTy *Ty;
+
+protected:
+  explicit TypedInit(InitKind K, RecTy *T, uint8_t Opc = 0)
+    : Init(K, Opc), Ty(T) {}
+
+public:
+  TypedInit(const TypedInit &) = delete;
+  TypedInit &operator=(const TypedInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() >= IK_FirstTypedInit &&
+           I->getKind() <= IK_LastTypedInit;
+  }
+
+  RecTy *getType() const { return Ty; }
+
+  Init *getCastTo(RecTy *Ty) const override;
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+  Init *convertInitListSlice(ArrayRef<unsigned> Elements) const override;
+
+  /// This method is used to implement the FieldInit class.
+  /// Implementors of this method should return the type of the named field if
+  /// they are of record type.
+  ///
+  RecTy *getFieldType(StringInit *FieldName) const override;
+};
+
+/// '?' - Represents an uninitialized value
+class UnsetInit : public Init {
+  UnsetInit() : Init(IK_UnsetInit) {}
+
+public:
+  UnsetInit(const UnsetInit &) = delete;
+  UnsetInit &operator=(const UnsetInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_UnsetInit;
+  }
+
+  static UnsetInit *get();
+
+  Init *getCastTo(RecTy *Ty) const override;
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  Init *getBit(unsigned Bit) const override {
+    return const_cast<UnsetInit*>(this);
+  }
+
+  bool isComplete() const override { return false; }
+  bool isConcrete() const override { return true; }
+  std::string getAsString() const override { return "?"; }
+};
+
+/// 'true'/'false' - Represent a concrete initializer for a bit.
+class BitInit final : public TypedInit {
+  bool Value;
+
+  explicit BitInit(bool V) : TypedInit(IK_BitInit, BitRecTy::get()), Value(V) {}
+
+public:
+  BitInit(const BitInit &) = delete;
+  BitInit &operator=(BitInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_BitInit;
+  }
+
+  static BitInit *get(bool V);
+
+  bool getValue() const { return Value; }
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  Init *getBit(unsigned Bit) const override {
+    assert(Bit < 1 && "Bit index out of range!");
+    return const_cast<BitInit*>(this);
+  }
+
+  bool isConcrete() const override { return true; }
+  std::string getAsString() const override { return Value ? "1" : "0"; }
+};
+
+/// '{ a, b, c }' - Represents an initializer for a BitsRecTy value.
+/// It contains a vector of bits, whose size is determined by the type.
+class BitsInit final : public TypedInit, public FoldingSetNode,
+                       public TrailingObjects<BitsInit, Init *> {
+  unsigned NumBits;
+
+  BitsInit(unsigned N)
+    : TypedInit(IK_BitsInit, BitsRecTy::get(N)), NumBits(N) {}
+
+public:
+  BitsInit(const BitsInit &) = delete;
+  BitsInit &operator=(const BitsInit &) = delete;
+
+  // Do not use sized deallocation due to trailing objects.
+  void operator delete(void *p) { ::operator delete(p); }
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_BitsInit;
+  }
+
+  static BitsInit *get(ArrayRef<Init *> Range);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  unsigned getNumBits() const { return NumBits; }
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+  Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+
+  bool isComplete() const override {
+    for (unsigned i = 0; i != getNumBits(); ++i)
+      if (!getBit(i)->isComplete()) return false;
+    return true;
+  }
+
+  bool allInComplete() const {
+    for (unsigned i = 0; i != getNumBits(); ++i)
+      if (getBit(i)->isComplete()) return false;
+    return true;
+  }
+
+  bool isConcrete() const override;
+  std::string getAsString() const override;
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  Init *getBit(unsigned Bit) const override {
+    assert(Bit < NumBits && "Bit index out of range!");
+    return getTrailingObjects<Init *>()[Bit];
+  }
+};
+
+/// '7' - Represent an initialization by a literal integer value.
+class IntInit : public TypedInit {
+  int64_t Value;
+
+  explicit IntInit(int64_t V)
+    : TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {}
+
+public:
+  IntInit(const IntInit &) = delete;
+  IntInit &operator=(const IntInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_IntInit;
+  }
+
+  static IntInit *get(int64_t V);
+
+  int64_t getValue() const { return Value; }
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+  Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+
+  bool isConcrete() const override { return true; }
+  std::string getAsString() const override;
+
+  Init *getBit(unsigned Bit) const override {
+    return BitInit::get((Value & (1ULL << Bit)) != 0);
+  }
+};
+
+/// "foo" - Represent an initialization by a string value.
+class StringInit : public TypedInit {
+  StringRef Value;
+
+  explicit StringInit(StringRef V)
+      : TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {}
+
+public:
+  StringInit(const StringInit &) = delete;
+  StringInit &operator=(const StringInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_StringInit;
+  }
+
+  static StringInit *get(StringRef);
+
+  StringRef getValue() const { return Value; }
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  bool isConcrete() const override { return true; }
+  std::string getAsString() const override { return "\"" + Value.str() + "\""; }
+
+  std::string getAsUnquotedString() const override { return Value; }
+
+  Init *getBit(unsigned Bit) const override {
+    llvm_unreachable("Illegal bit reference off string");
+  }
+};
+
+class CodeInit : public TypedInit {
+  StringRef Value;
+
+  explicit CodeInit(StringRef V)
+      : TypedInit(IK_CodeInit, static_cast<RecTy *>(CodeRecTy::get())),
+        Value(V) {}
+
+public:
+  CodeInit(const StringInit &) = delete;
+  CodeInit &operator=(const StringInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_CodeInit;
+  }
+
+  static CodeInit *get(StringRef);
+
+  StringRef getValue() const { return Value; }
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  bool isConcrete() const override { return true; }
+  std::string getAsString() const override {
+    return "[{" + Value.str() + "}]";
+  }
+
+  std::string getAsUnquotedString() const override { return Value; }
+
+  Init *getBit(unsigned Bit) const override {
+    llvm_unreachable("Illegal bit reference off string");
+  }
+};
+
+/// [AL, AH, CL] - Represent a list of defs
+///
+class ListInit final : public TypedInit, public FoldingSetNode,
+                       public TrailingObjects<ListInit, Init *> {
+  unsigned NumValues;
+
+public:
+  using const_iterator = Init *const *;
+
+private:
+  explicit ListInit(unsigned N, RecTy *EltTy)
+    : TypedInit(IK_ListInit, ListRecTy::get(EltTy)), NumValues(N) {}
+
+public:
+  ListInit(const ListInit &) = delete;
+  ListInit &operator=(const ListInit &) = delete;
+
+  // Do not use sized deallocation due to trailing objects.
+  void operator delete(void *p) { ::operator delete(p); }
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_ListInit;
+  }
+  static ListInit *get(ArrayRef<Init *> Range, RecTy *EltTy);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  Init *getElement(unsigned i) const {
+    assert(i < NumValues && "List element index out of range!");
+    return getTrailingObjects<Init *>()[i];
+  }
+  RecTy *getElementType() const {
+    return cast<ListRecTy>(getType())->getElementType();
+  }
+
+  Record *getElementAsRecord(unsigned i) const;
+
+  Init *convertInitListSlice(ArrayRef<unsigned> Elements) const override;
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  /// This method is used by classes that refer to other
+  /// variables which may not be defined at the time they expression is formed.
+  /// If a value is set for the variable later, this method will be called on
+  /// users of the value to allow the value to propagate out.
+  ///
+  Init *resolveReferences(Resolver &R) const override;
+
+  bool isConcrete() const override;
+  std::string getAsString() const override;
+
+  ArrayRef<Init*> getValues() const {
+    return makeArrayRef(getTrailingObjects<Init *>(), NumValues);
+  }
+
+  const_iterator begin() const { return getTrailingObjects<Init *>(); }
+  const_iterator end  () const { return begin() + NumValues; }
+
+  size_t         size () const { return NumValues;  }
+  bool           empty() const { return NumValues == 0; }
+
+  Init *getBit(unsigned Bit) const override {
+    llvm_unreachable("Illegal bit reference off list");
+  }
+};
+
+/// Base class for operators
+///
+class OpInit : public TypedInit {
+protected:
+  explicit OpInit(InitKind K, RecTy *Type, uint8_t Opc)
+    : TypedInit(K, Type, Opc) {}
+
+public:
+  OpInit(const OpInit &) = delete;
+  OpInit &operator=(OpInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() >= IK_FirstOpInit &&
+           I->getKind() <= IK_LastOpInit;
+  }
+
+  // Clone - Clone this operator, replacing arguments with the new list
+  virtual OpInit *clone(ArrayRef<Init *> Operands) const = 0;
+
+  virtual unsigned getNumOperands() const = 0;
+  virtual Init *getOperand(unsigned i) const = 0;
+
+  Init *getBit(unsigned Bit) const override;
+};
+
+/// !op (X) - Transform an init.
+///
+class UnOpInit : public OpInit, public FoldingSetNode {
+public:
+  enum UnaryOp : uint8_t { CAST, HEAD, TAIL, SIZE, EMPTY };
+
+private:
+  Init *LHS;
+
+  UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
+    : OpInit(IK_UnOpInit, Type, opc), LHS(lhs) {}
+
+public:
+  UnOpInit(const UnOpInit &) = delete;
+  UnOpInit &operator=(const UnOpInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_UnOpInit;
+  }
+
+  static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  // Clone - Clone this operator, replacing arguments with the new list
+  OpInit *clone(ArrayRef<Init *> Operands) const override {
+    assert(Operands.size() == 1 &&
+           "Wrong number of operands for unary operation");
+    return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
+  }
+
+  unsigned getNumOperands() const override { return 1; }
+
+  Init *getOperand(unsigned i) const override {
+    assert(i == 0 && "Invalid operand id for unary operator");
+    return getOperand();
+  }
+
+  UnaryOp getOpcode() const { return (UnaryOp)Opc; }
+  Init *getOperand() const { return LHS; }
+
+  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // possible to fold.
+  Init *Fold(Record *CurRec, bool IsFinal = false) const;
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  std::string getAsString() const override;
+};
+
+/// !op (X, Y) - Combine two inits.
+class BinOpInit : public OpInit, public FoldingSetNode {
+public:
+  enum BinaryOp : uint8_t { ADD, AND, OR, SHL, SRA, SRL, LISTCONCAT,
+                            STRCONCAT, CONCAT, EQ, NE, LE, LT, GE, GT };
+
+private:
+  Init *LHS, *RHS;
+
+  BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
+      OpInit(IK_BinOpInit, Type, opc), LHS(lhs), RHS(rhs) {}
+
+public:
+  BinOpInit(const BinOpInit &) = delete;
+  BinOpInit &operator=(const BinOpInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_BinOpInit;
+  }
+
+  static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs,
+                        RecTy *Type);
+  static Init *getStrConcat(Init *lhs, Init *rhs);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  // Clone - Clone this operator, replacing arguments with the new list
+  OpInit *clone(ArrayRef<Init *> Operands) const override {
+    assert(Operands.size() == 2 &&
+           "Wrong number of operands for binary operation");
+    return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
+  }
+
+  unsigned getNumOperands() const override { return 2; }
+  Init *getOperand(unsigned i) const override {
+    switch (i) {
+    default: llvm_unreachable("Invalid operand id for binary operator");
+    case 0: return getLHS();
+    case 1: return getRHS();
+    }
+  }
+
+  BinaryOp getOpcode() const { return (BinaryOp)Opc; }
+  Init *getLHS() const { return LHS; }
+  Init *getRHS() const { return RHS; }
+
+  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // possible to fold.
+  Init *Fold(Record *CurRec) const;
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  std::string getAsString() const override;
+};
+
+/// !op (X, Y, Z) - Combine two inits.
+class TernOpInit : public OpInit, public FoldingSetNode {
+public:
+  enum TernaryOp : uint8_t { SUBST, FOREACH, IF, DAG };
+
+private:
+  Init *LHS, *MHS, *RHS;
+
+  TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
+             RecTy *Type) :
+      OpInit(IK_TernOpInit, Type, opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
+
+public:
+  TernOpInit(const TernOpInit &) = delete;
+  TernOpInit &operator=(const TernOpInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_TernOpInit;
+  }
+
+  static TernOpInit *get(TernaryOp opc, Init *lhs,
+                         Init *mhs, Init *rhs,
+                         RecTy *Type);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  // Clone - Clone this operator, replacing arguments with the new list
+  OpInit *clone(ArrayRef<Init *> Operands) const override {
+    assert(Operands.size() == 3 &&
+           "Wrong number of operands for ternary operation");
+    return TernOpInit::get(getOpcode(), Operands[0], Operands[1], Operands[2],
+                           getType());
+  }
+
+  unsigned getNumOperands() const override { return 3; }
+  Init *getOperand(unsigned i) const override {
+    switch (i) {
+    default: llvm_unreachable("Invalid operand id for ternary operator");
+    case 0: return getLHS();
+    case 1: return getMHS();
+    case 2: return getRHS();
+    }
+  }
+
+  TernaryOp getOpcode() const { return (TernaryOp)Opc; }
+  Init *getLHS() const { return LHS; }
+  Init *getMHS() const { return MHS; }
+  Init *getRHS() const { return RHS; }
+
+  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // possible to fold.
+  Init *Fold(Record *CurRec) const;
+
+  bool isComplete() const override {
+    return LHS->isComplete() && MHS->isComplete() && RHS->isComplete();
+  }
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  std::string getAsString() const override;
+};
+
+/// !foldl (a, b, expr, start, lst) - Fold over a list.
+class FoldOpInit : public TypedInit, public FoldingSetNode {
+private:
+  Init *Start;
+  Init *List;
+  Init *A;
+  Init *B;
+  Init *Expr;
+
+  FoldOpInit(Init *Start, Init *List, Init *A, Init *B, Init *Expr, RecTy *Type)
+      : TypedInit(IK_FoldOpInit, Type), Start(Start), List(List), A(A), B(B),
+        Expr(Expr) {}
+
+public:
+  FoldOpInit(const FoldOpInit &) = delete;
+  FoldOpInit &operator=(const FoldOpInit &) = delete;
+
+  static bool classof(const Init *I) { return I->getKind() == IK_FoldOpInit; }
+
+  static FoldOpInit *get(Init *Start, Init *List, Init *A, Init *B, Init *Expr,
+                         RecTy *Type);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // possible to fold.
+  Init *Fold(Record *CurRec) const;
+
+  bool isComplete() const override { return false; }
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  Init *getBit(unsigned Bit) const override;
+
+  std::string getAsString() const override;
+};
+
+/// !isa<type>(expr) - Dynamically determine the type of an expression.
+class IsAOpInit : public TypedInit, public FoldingSetNode {
+private:
+  RecTy *CheckType;
+  Init *Expr;
+
+  IsAOpInit(RecTy *CheckType, Init *Expr)
+      : TypedInit(IK_IsAOpInit, IntRecTy::get()), CheckType(CheckType),
+        Expr(Expr) {}
+
+public:
+  IsAOpInit(const IsAOpInit &) = delete;
+  IsAOpInit &operator=(const IsAOpInit &) = delete;
+
+  static bool classof(const Init *I) { return I->getKind() == IK_IsAOpInit; }
+
+  static IsAOpInit *get(RecTy *CheckType, Init *Expr);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // possible to fold.
+  Init *Fold() const;
+
+  bool isComplete() const override { return false; }
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  Init *getBit(unsigned Bit) const override;
+
+  std::string getAsString() const override;
+};
+
+/// 'Opcode' - Represent a reference to an entire variable object.
+class VarInit : public TypedInit {
+  Init *VarName;
+
+  explicit VarInit(Init *VN, RecTy *T)
+      : TypedInit(IK_VarInit, T), VarName(VN) {}
+
+public:
+  VarInit(const VarInit &) = delete;
+  VarInit &operator=(const VarInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_VarInit;
+  }
+
+  static VarInit *get(StringRef VN, RecTy *T);
+  static VarInit *get(Init *VN, RecTy *T);
+
+  StringRef getName() const;
+  Init *getNameInit() const { return VarName; }
+
+  std::string getNameInitAsString() const {
+    return getNameInit()->getAsUnquotedString();
+  }
+
+  /// This method is used by classes that refer to other
+  /// variables which may not be defined at the time they expression is formed.
+  /// If a value is set for the variable later, this method will be called on
+  /// users of the value to allow the value to propagate out.
+  ///
+  Init *resolveReferences(Resolver &R) const override;
+
+  Init *getBit(unsigned Bit) const override;
+
+  std::string getAsString() const override { return getName(); }
+};
+
+/// Opcode{0} - Represent access to one bit of a variable or field.
+class VarBitInit final : public TypedInit {
+  TypedInit *TI;
+  unsigned Bit;
+
+  VarBitInit(TypedInit *T, unsigned B)
+      : TypedInit(IK_VarBitInit, BitRecTy::get()), TI(T), Bit(B) {
+    assert(T->getType() &&
+           (isa<IntRecTy>(T->getType()) ||
+            (isa<BitsRecTy>(T->getType()) &&
+             cast<BitsRecTy>(T->getType())->getNumBits() > B)) &&
+           "Illegal VarBitInit expression!");
+  }
+
+public:
+  VarBitInit(const VarBitInit &) = delete;
+  VarBitInit &operator=(const VarBitInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_VarBitInit;
+  }
+
+  static VarBitInit *get(TypedInit *T, unsigned B);
+
+  Init *getBitVar() const { return TI; }
+  unsigned getBitNum() const { return Bit; }
+
+  std::string getAsString() const override;
+  Init *resolveReferences(Resolver &R) const override;
+
+  Init *getBit(unsigned B) const override {
+    assert(B < 1 && "Bit index out of range!");
+    return const_cast<VarBitInit*>(this);
+  }
+};
+
+/// List[4] - Represent access to one element of a var or
+/// field.
+class VarListElementInit : public TypedInit {
+  TypedInit *TI;
+  unsigned Element;
+
+  VarListElementInit(TypedInit *T, unsigned E)
+      : TypedInit(IK_VarListElementInit,
+                  cast<ListRecTy>(T->getType())->getElementType()),
+        TI(T), Element(E) {
+    assert(T->getType() && isa<ListRecTy>(T->getType()) &&
+           "Illegal VarBitInit expression!");
+  }
+
+public:
+  VarListElementInit(const VarListElementInit &) = delete;
+  VarListElementInit &operator=(const VarListElementInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_VarListElementInit;
+  }
+
+  static VarListElementInit *get(TypedInit *T, unsigned E);
+
+  TypedInit *getVariable() const { return TI; }
+  unsigned getElementNum() const { return Element; }
+
+  std::string getAsString() const override;
+  Init *resolveReferences(Resolver &R) const override;
+
+  Init *getBit(unsigned Bit) const override;
+};
+
+/// AL - Represent a reference to a 'def' in the description
+class DefInit : public TypedInit {
+  friend class Record;
+
+  Record *Def;
+
+  explicit DefInit(Record *D);
+
+public:
+  DefInit(const DefInit &) = delete;
+  DefInit &operator=(const DefInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_DefInit;
+  }
+
+  static DefInit *get(Record*);
+
+  Init *convertInitializerTo(RecTy *Ty) const override;
+
+  Record *getDef() const { return Def; }
+
+  //virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits);
+
+  RecTy *getFieldType(StringInit *FieldName) const override;
+
+  bool isConcrete() const override { return true; }
+  std::string getAsString() const override;
+
+  Init *getBit(unsigned Bit) const override {
+    llvm_unreachable("Illegal bit reference off def");
+  }
+};
+
+/// classname<targs...> - Represent an uninstantiated anonymous class
+/// instantiation.
+class VarDefInit final : public TypedInit, public FoldingSetNode,
+                         public TrailingObjects<VarDefInit, Init *> {
+  Record *Class;
+  DefInit *Def = nullptr; // after instantiation
+  unsigned NumArgs;
+
+  explicit VarDefInit(Record *Class, unsigned N)
+    : TypedInit(IK_VarDefInit, RecordRecTy::get(Class)), Class(Class), NumArgs(N) {}
+
+  DefInit *instantiate();
+
+public:
+  VarDefInit(const VarDefInit &) = delete;
+  VarDefInit &operator=(const VarDefInit &) = delete;
+
+  // Do not use sized deallocation due to trailing objects.
+  void operator delete(void *p) { ::operator delete(p); }
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_VarDefInit;
+  }
+  static VarDefInit *get(Record *Class, ArrayRef<Init *> Args);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  Init *resolveReferences(Resolver &R) const override;
+  Init *Fold() const;
+
+  std::string getAsString() const override;
+
+  Init *getArg(unsigned i) const {
+    assert(i < NumArgs && "Argument index out of range!");
+    return getTrailingObjects<Init *>()[i];
+  }
+
+  using const_iterator = Init *const *;
+
+  const_iterator args_begin() const { return getTrailingObjects<Init *>(); }
+  const_iterator args_end  () const { return args_begin() + NumArgs; }
+
+  size_t         args_size () const { return NumArgs; }
+  bool           args_empty() const { return NumArgs == 0; }
+
+  ArrayRef<Init *> args() const { return makeArrayRef(args_begin(), NumArgs); }
+
+  Init *getBit(unsigned Bit) const override {
+    llvm_unreachable("Illegal bit reference off anonymous def");
+  }
+};
+
+/// X.Y - Represent a reference to a subfield of a variable
+class FieldInit : public TypedInit {
+  Init *Rec;                // Record we are referring to
+  StringInit *FieldName;    // Field we are accessing
+
+  FieldInit(Init *R, StringInit *FN)
+      : TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
+    assert(getType() && "FieldInit with non-record type!");
+  }
+
+public:
+  FieldInit(const FieldInit &) = delete;
+  FieldInit &operator=(const FieldInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_FieldInit;
+  }
+
+  static FieldInit *get(Init *R, StringInit *FN);
+
+  Init *getRecord() const { return Rec; }
+  StringInit *getFieldName() const { return FieldName; }
+
+  Init *getBit(unsigned Bit) const override;
+
+  Init *resolveReferences(Resolver &R) const override;
+  Init *Fold(Record *CurRec) const;
+
+  std::string getAsString() const override {
+    return Rec->getAsString() + "." + FieldName->getValue().str();
+  }
+};
+
+/// (v a, b) - Represent a DAG tree value.  DAG inits are required
+/// to have at least one value then a (possibly empty) list of arguments.  Each
+/// argument can have a name associated with it.
+class DagInit final : public TypedInit, public FoldingSetNode,
+                      public TrailingObjects<DagInit, Init *, StringInit *> {
+  friend TrailingObjects;
+
+  Init *Val;
+  StringInit *ValName;
+  unsigned NumArgs;
+  unsigned NumArgNames;
+
+  DagInit(Init *V, StringInit *VN, unsigned NumArgs, unsigned NumArgNames)
+      : TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
+        NumArgs(NumArgs), NumArgNames(NumArgNames) {}
+
+  size_t numTrailingObjects(OverloadToken<Init *>) const { return NumArgs; }
+
+public:
+  DagInit(const DagInit &) = delete;
+  DagInit &operator=(const DagInit &) = delete;
+
+  static bool classof(const Init *I) {
+    return I->getKind() == IK_DagInit;
+  }
+
+  static DagInit *get(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
+                      ArrayRef<StringInit*> NameRange);
+  static DagInit *get(Init *V, StringInit *VN,
+                      ArrayRef<std::pair<Init*, StringInit*>> Args);
+
+  void Profile(FoldingSetNodeID &ID) const;
+
+  Init *getOperator() const { return Val; }
+
+  StringInit *getName() const { return ValName; }
+
+  StringRef getNameStr() const {
+    return ValName ? ValName->getValue() : StringRef();
+  }
+
+  unsigned getNumArgs() const { return NumArgs; }
+
+  Init *getArg(unsigned Num) const {
+    assert(Num < NumArgs && "Arg number out of range!");
+    return getTrailingObjects<Init *>()[Num];
+  }
+
+  StringInit *getArgName(unsigned Num) const {
+    assert(Num < NumArgNames && "Arg number out of range!");
+    return getTrailingObjects<StringInit *>()[Num];
+  }
+
+  StringRef getArgNameStr(unsigned Num) const {
+    StringInit *Init = getArgName(Num);
+    return Init ? Init->getValue() : StringRef();
+  }
+
+  ArrayRef<Init *> getArgs() const {
+    return makeArrayRef(getTrailingObjects<Init *>(), NumArgs);
+  }
+
+  ArrayRef<StringInit *> getArgNames() const {
+    return makeArrayRef(getTrailingObjects<StringInit *>(), NumArgNames);
+  }
+
+  Init *resolveReferences(Resolver &R) const override;
+
+  bool isConcrete() const override;
+  std::string getAsString() const override;
+
+  using const_arg_iterator = SmallVectorImpl<Init*>::const_iterator;
+  using const_name_iterator = SmallVectorImpl<StringInit*>::const_iterator;
+
+  inline const_arg_iterator  arg_begin() const { return getArgs().begin(); }
+  inline const_arg_iterator  arg_end  () const { return getArgs().end(); }
+
+  inline size_t              arg_size () const { return NumArgs; }
+  inline bool                arg_empty() const { return NumArgs == 0; }
+
+  inline const_name_iterator name_begin() const { return getArgNames().begin();}
+  inline const_name_iterator name_end  () const { return getArgNames().end(); }
+
+  inline size_t              name_size () const { return NumArgNames; }
+  inline bool                name_empty() const { return NumArgNames == 0; }
+
+  Init *getBit(unsigned Bit) const override {
+    llvm_unreachable("Illegal bit reference off dag");
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//  High-Level Classes
+//===----------------------------------------------------------------------===//
+
+class RecordVal {
+  friend class Record;
+
+  Init *Name;
+  PointerIntPair<RecTy *, 1, bool> TyAndPrefix;
+  Init *Value;
+
+public:
+  RecordVal(Init *N, RecTy *T, bool P);
+
+  StringRef getName() const;
+  Init *getNameInit() const { return Name; }
+
+  std::string getNameInitAsString() const {
+    return getNameInit()->getAsUnquotedString();
+  }
+
+  bool getPrefix() const { return TyAndPrefix.getInt(); }
+  RecTy *getType() const { return TyAndPrefix.getPointer(); }
+  Init *getValue() const { return Value; }
+
+  bool setValue(Init *V);
+
+  void dump() const;
+  void print(raw_ostream &OS, bool PrintSem = true) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RecordVal &RV) {
+  RV.print(OS << "  ");
+  return OS;
+}
+
+class Record {
+  static unsigned LastID;
+
+  Init *Name;
+  // Location where record was instantiated, followed by the location of
+  // multiclass prototypes used.
+  SmallVector<SMLoc, 4> Locs;
+  SmallVector<Init *, 0> TemplateArgs;
+  SmallVector<RecordVal, 0> Values;
+
+  // All superclasses in the inheritance forest in reverse preorder (yes, it
+  // must be a forest; diamond-shaped inheritance is not allowed).
+  SmallVector<std::pair<Record *, SMRange>, 0> SuperClasses;
+
+  // Tracks Record instances. Not owned by Record.
+  RecordKeeper &TrackedRecords;
+
+  DefInit *TheInit = nullptr;
+
+  // Unique record ID.
+  unsigned ID;
+
+  bool IsAnonymous;
+
+  void init();
+  void checkName();
+
+public:
+  // Constructs a record.
+  explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
+                  bool Anonymous = false) :
+    Name(N), Locs(locs.begin(), locs.end()), TrackedRecords(records),
+    ID(LastID++), IsAnonymous(Anonymous) {
+    init();
+  }
+
+  explicit Record(StringRef N, ArrayRef<SMLoc> locs, RecordKeeper &records)
+      : Record(StringInit::get(N), locs, records) {}
+
+  // When copy-constructing a Record, we must still guarantee a globally unique
+  // ID number.  Don't copy TheInit either since it's owned by the original
+  // record. All other fields can be copied normally.
+  Record(const Record &O) :
+    Name(O.Name), Locs(O.Locs), TemplateArgs(O.TemplateArgs),
+    Values(O.Values), SuperClasses(O.SuperClasses),
+    TrackedRecords(O.TrackedRecords), ID(LastID++),
+    IsAnonymous(O.IsAnonymous) { }
+
+  static unsigned getNewUID() { return LastID++; }
+
+  unsigned getID() const { return ID; }
+
+  StringRef getName() const { return cast<StringInit>(Name)->getValue(); }
+
+  Init *getNameInit() const {
+    return Name;
+  }
+
+  const std::string getNameInitAsString() const {
+    return getNameInit()->getAsUnquotedString();
+  }
+
+  void setName(Init *Name);      // Also updates RecordKeeper.
+
+  ArrayRef<SMLoc> getLoc() const { return Locs; }
+  void appendLoc(SMLoc Loc) { Locs.push_back(Loc); }
+
+  // Make the type that this record should have based on its superclasses.
+  RecordRecTy *getType();
+
+  /// get the corresponding DefInit.
+  DefInit *getDefInit();
+
+  ArrayRef<Init *> getTemplateArgs() const {
+    return TemplateArgs;
+  }
+
+  ArrayRef<RecordVal> getValues() const { return Values; }
+
+  ArrayRef<std::pair<Record *, SMRange>>  getSuperClasses() const {
+    return SuperClasses;
+  }
+
+  /// Append the direct super classes of this record to Classes.
+  void getDirectSuperClasses(SmallVectorImpl<Record *> &Classes) const;
+
+  bool isTemplateArg(Init *Name) const {
+    for (Init *TA : TemplateArgs)
+      if (TA == Name) return true;
+    return false;
+  }
+
+  const RecordVal *getValue(const Init *Name) const {
+    for (const RecordVal &Val : Values)
+      if (Val.Name == Name) return &Val;
+    return nullptr;
+  }
+
+  const RecordVal *getValue(StringRef Name) const {
+    return getValue(StringInit::get(Name));
+  }
+
+  RecordVal *getValue(const Init *Name) {
+    return const_cast<RecordVal *>(static_cast<const Record *>(this)->getValue(Name));
+  }
+
+  RecordVal *getValue(StringRef Name) {
+    return const_cast<RecordVal *>(static_cast<const Record *>(this)->getValue(Name));
+  }
+
+  void addTemplateArg(Init *Name) {
+    assert(!isTemplateArg(Name) && "Template arg already defined!");
+    TemplateArgs.push_back(Name);
+  }
+
+  void addValue(const RecordVal &RV) {
+    assert(getValue(RV.getNameInit()) == nullptr && "Value already added!");
+    Values.push_back(RV);
+    if (Values.size() > 1)
+      // Keep NAME at the end of the list.  It makes record dumps a
+      // bit prettier and allows TableGen tests to be written more
+      // naturally.  Tests can use CHECK-NEXT to look for Record
+      // fields they expect to see after a def.  They can't do that if
+      // NAME is the first Record field.
+      std::swap(Values[Values.size() - 2], Values[Values.size() - 1]);
+  }
+
+  void removeValue(Init *Name) {
+    for (unsigned i = 0, e = Values.size(); i != e; ++i)
+      if (Values[i].getNameInit() == Name) {
+        Values.erase(Values.begin()+i);
+        return;
+      }
+    llvm_unreachable("Cannot remove an entry that does not exist!");
+  }
+
+  void removeValue(StringRef Name) {
+    removeValue(StringInit::get(Name));
+  }
+
+  bool isSubClassOf(const Record *R) const {
+    for (const auto &SCPair : SuperClasses)
+      if (SCPair.first == R)
+        return true;
+    return false;
+  }
+
+  bool isSubClassOf(StringRef Name) const {
+    for (const auto &SCPair : SuperClasses) {
+      if (const auto *SI = dyn_cast<StringInit>(SCPair.first->getNameInit())) {
+        if (SI->getValue() == Name)
+          return true;
+      } else if (SCPair.first->getNameInitAsString() == Name) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  void addSuperClass(Record *R, SMRange Range) {
+    assert(!TheInit && "changing type of record after it has been referenced");
+    assert(!isSubClassOf(R) && "Already subclassing record!");
+    SuperClasses.push_back(std::make_pair(R, Range));
+  }
+
+  /// If there are any field references that refer to fields
+  /// that have been filled in, we can propagate the values now.
+  ///
+  /// This is a final resolve: any error messages, e.g. due to undefined
+  /// !cast references, are generated now.
+  void resolveReferences();
+
+  /// Apply the resolver to the name of the record as well as to the
+  /// initializers of all fields of the record except SkipVal.
+  ///
+  /// The resolver should not resolve any of the fields itself, to avoid
+  /// recursion / infinite loops.
+  void resolveReferences(Resolver &R, const RecordVal *SkipVal = nullptr);
+
+  /// If anything in this record refers to RV, replace the
+  /// reference to RV with the RHS of RV.  If RV is null, we resolve all
+  /// possible references.
+  void resolveReferencesTo(const RecordVal *RV);
+
+  RecordKeeper &getRecords() const {
+    return TrackedRecords;
+  }
+
+  bool isAnonymous() const {
+    return IsAnonymous;
+  }
+
+  void print(raw_ostream &OS) const;
+  void dump() const;
+
+  //===--------------------------------------------------------------------===//
+  // High-level methods useful to tablegen back-ends
+  //
+
+  /// Return the initializer for a value with the specified name,
+  /// or throw an exception if the field does not exist.
+  Init *getValueInit(StringRef FieldName) const;
+
+  /// Return true if the named field is unset.
+  bool isValueUnset(StringRef FieldName) const {
+    return isa<UnsetInit>(getValueInit(FieldName));
+  }
+
+  /// This method looks up the specified field and returns
+  /// its value as a string, throwing an exception if the field does not exist
+  /// or if the value is not a string.
+  StringRef getValueAsString(StringRef FieldName) const;
+
+  /// This method looks up the specified field and returns
+  /// its value as a BitsInit, throwing an exception if the field does not exist
+  /// or if the value is not the right type.
+  BitsInit *getValueAsBitsInit(StringRef FieldName) const;
+
+  /// This method looks up the specified field and returns
+  /// its value as a ListInit, throwing an exception if the field does not exist
+  /// or if the value is not the right type.
+  ListInit *getValueAsListInit(StringRef FieldName) const;
+
+  /// This method looks up the specified field and
+  /// returns its value as a vector of records, throwing an exception if the
+  /// field does not exist or if the value is not the right type.
+  std::vector<Record*> getValueAsListOfDefs(StringRef FieldName) const;
+
+  /// This method looks up the specified field and
+  /// returns its value as a vector of integers, throwing an exception if the
+  /// field does not exist or if the value is not the right type.
+  std::vector<int64_t> getValueAsListOfInts(StringRef FieldName) const;
+
+  /// This method looks up the specified field and
+  /// returns its value as a vector of strings, throwing an exception if the
+  /// field does not exist or if the value is not the right type.
+  std::vector<StringRef> getValueAsListOfStrings(StringRef FieldName) const;
+
+  /// This method looks up the specified field and returns its
+  /// value as a Record, throwing an exception if the field does not exist or if
+  /// the value is not the right type.
+  Record *getValueAsDef(StringRef FieldName) const;
+
+  /// This method looks up the specified field and returns its
+  /// value as a bit, throwing an exception if the field does not exist or if
+  /// the value is not the right type.
+  bool getValueAsBit(StringRef FieldName) const;
+
+  /// This method looks up the specified field and
+  /// returns its value as a bit. If the field is unset, sets Unset to true and
+  /// returns false.
+  bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const;
+
+  /// This method looks up the specified field and returns its
+  /// value as an int64_t, throwing an exception if the field does not exist or
+  /// if the value is not the right type.
+  int64_t getValueAsInt(StringRef FieldName) const;
+
+  /// This method looks up the specified field and returns its
+  /// value as an Dag, throwing an exception if the field does not exist or if
+  /// the value is not the right type.
+  DagInit *getValueAsDag(StringRef FieldName) const;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const Record &R);
+
+struct MultiClass {
+  Record Rec;  // Placeholder for template args and Name.
+  using RecordVector = std::vector<std::unique_ptr<Record>>;
+  RecordVector DefPrototypes;
+
+  void dump() const;
+
+  MultiClass(StringRef Name, SMLoc Loc, RecordKeeper &Records) :
+    Rec(Name, Loc, Records) {}
+};
+
+class RecordKeeper {
+  friend class RecordRecTy;
+  using RecordMap = std::map<std::string, std::unique_ptr<Record>>;
+  RecordMap Classes, Defs;
+  FoldingSet<RecordRecTy> RecordTypePool;
+  std::map<std::string, Init *> ExtraGlobals;
+  unsigned AnonCounter = 0;
+
+public:
+  const RecordMap &getClasses() const { return Classes; }
+  const RecordMap &getDefs() const { return Defs; }
+
+  Record *getClass(StringRef Name) const {
+    auto I = Classes.find(Name);
+    return I == Classes.end() ? nullptr : I->second.get();
+  }
+
+  Record *getDef(StringRef Name) const {
+    auto I = Defs.find(Name);
+    return I == Defs.end() ? nullptr : I->second.get();
+  }
+
+  Init *getGlobal(StringRef Name) const {
+    if (Record *R = getDef(Name))
+      return R->getDefInit();
+    auto It = ExtraGlobals.find(Name);
+    return It == ExtraGlobals.end() ? nullptr : It->second;
+  }
+
+  void addClass(std::unique_ptr<Record> R) {
+    bool Ins = Classes.insert(std::make_pair(R->getName(),
+                                             std::move(R))).second;
+    (void)Ins;
+    assert(Ins && "Class already exists");
+  }
+
+  void addDef(std::unique_ptr<Record> R) {
+    bool Ins = Defs.insert(std::make_pair(R->getName(),
+                                          std::move(R))).second;
+    (void)Ins;
+    assert(Ins && "Record already exists");
+  }
+
+  void addExtraGlobal(StringRef Name, Init *I) {
+    bool Ins = ExtraGlobals.insert(std::make_pair(Name, I)).second;
+    (void)Ins;
+    assert(!getDef(Name));
+    assert(Ins && "Global already exists");
+  }
+
+  Init *getNewAnonymousName();
+
+  //===--------------------------------------------------------------------===//
+  // High-level helper methods, useful for tablegen backends...
+
+  /// This method returns all concrete definitions
+  /// that derive from the specified class name.  A class with the specified
+  /// name must exist.
+  std::vector<Record *> getAllDerivedDefinitions(StringRef ClassName) const;
+
+  void dump() const;
+};
+
+/// Sorting predicate to sort record pointers by name.
+struct LessRecord {
+  bool operator()(const Record *Rec1, const Record *Rec2) const {
+    return StringRef(Rec1->getName()).compare_numeric(Rec2->getName()) < 0;
+  }
+};
+
+/// Sorting predicate to sort record pointers by their
+/// unique ID. If you just need a deterministic order, use this, since it
+/// just compares two `unsigned`; the other sorting predicates require
+/// string manipulation.
+struct LessRecordByID {
+  bool operator()(const Record *LHS, const Record *RHS) const {
+    return LHS->getID() < RHS->getID();
+  }
+};
+
+/// Sorting predicate to sort record pointers by their
+/// name field.
+struct LessRecordFieldName {
+  bool operator()(const Record *Rec1, const Record *Rec2) const {
+    return Rec1->getValueAsString("Name") < Rec2->getValueAsString("Name");
+  }
+};
+
+struct LessRecordRegister {
+  static bool ascii_isdigit(char x) { return x >= '0' && x <= '9'; }
+
+  struct RecordParts {
+    SmallVector<std::pair< bool, StringRef>, 4> Parts;
+
+    RecordParts(StringRef Rec) {
+      if (Rec.empty())
+        return;
+
+      size_t Len = 0;
+      const char *Start = Rec.data();
+      const char *Curr = Start;
+      bool isDigitPart = ascii_isdigit(Curr[0]);
+      for (size_t I = 0, E = Rec.size(); I != E; ++I, ++Len) {
+        bool isDigit = ascii_isdigit(Curr[I]);
+        if (isDigit != isDigitPart) {
+          Parts.push_back(std::make_pair(isDigitPart, StringRef(Start, Len)));
+          Len = 0;
+          Start = &Curr[I];
+          isDigitPart = ascii_isdigit(Curr[I]);
+        }
+      }
+      // Push the last part.
+      Parts.push_back(std::make_pair(isDigitPart, StringRef(Start, Len)));
+    }
+
+    size_t size() { return Parts.size(); }
+
+    std::pair<bool, StringRef> getPart(size_t i) {
+      assert (i < Parts.size() && "Invalid idx!");
+      return Parts[i];
+    }
+  };
+
+  bool operator()(const Record *Rec1, const Record *Rec2) const {
+    RecordParts LHSParts(StringRef(Rec1->getName()));
+    RecordParts RHSParts(StringRef(Rec2->getName()));
+
+    size_t LHSNumParts = LHSParts.size();
+    size_t RHSNumParts = RHSParts.size();
+    assert (LHSNumParts && RHSNumParts && "Expected at least one part!");
+
+    if (LHSNumParts != RHSNumParts)
+      return LHSNumParts < RHSNumParts;
+
+    // We expect the registers to be of the form [_a-zA-Z]+([0-9]*[_a-zA-Z]*)*.
+    for (size_t I = 0, E = LHSNumParts; I < E; I+=2) {
+      std::pair<bool, StringRef> LHSPart = LHSParts.getPart(I);
+      std::pair<bool, StringRef> RHSPart = RHSParts.getPart(I);
+      // Expect even part to always be alpha.
+      assert (LHSPart.first == false && RHSPart.first == false &&
+              "Expected both parts to be alpha.");
+      if (int Res = LHSPart.second.compare(RHSPart.second))
+        return Res < 0;
+    }
+    for (size_t I = 1, E = LHSNumParts; I < E; I+=2) {
+      std::pair<bool, StringRef> LHSPart = LHSParts.getPart(I);
+      std::pair<bool, StringRef> RHSPart = RHSParts.getPart(I);
+      // Expect odd part to always be numeric.
+      assert (LHSPart.first == true && RHSPart.first == true &&
+              "Expected both parts to be numeric.");
+      if (LHSPart.second.size() != RHSPart.second.size())
+        return LHSPart.second.size() < RHSPart.second.size();
+
+      unsigned LHSVal, RHSVal;
+
+      bool LHSFailed = LHSPart.second.getAsInteger(10, LHSVal); (void)LHSFailed;
+      assert(!LHSFailed && "Unable to convert LHS to integer.");
+      bool RHSFailed = RHSPart.second.getAsInteger(10, RHSVal); (void)RHSFailed;
+      assert(!RHSFailed && "Unable to convert RHS to integer.");
+
+      if (LHSVal != RHSVal)
+        return LHSVal < RHSVal;
+    }
+    return LHSNumParts < RHSNumParts;
+  }
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);
+
+/// Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+                  Init *Name, StringRef Scoper);
+
+//===----------------------------------------------------------------------===//
+//  Resolvers
+//===----------------------------------------------------------------------===//
+
+/// Interface for looking up the initializer for a variable name, used by
+/// Init::resolveReferences.
+class Resolver {
+  Record *CurRec;
+  bool IsFinal = false;
+
+public:
+  explicit Resolver(Record *CurRec) : CurRec(CurRec) {}
+  virtual ~Resolver() {}
+
+  Record *getCurrentRecord() const { return CurRec; }
+
+  /// Return the initializer for the given variable name (should normally be a
+  /// StringInit), or nullptr if the name could not be resolved.
+  virtual Init *resolve(Init *VarName) = 0;
+
+  // Whether bits in a BitsInit should stay unresolved if resolving them would
+  // result in a ? (UnsetInit). This behavior is used to represent instruction
+  // encodings by keeping references to unset variables within a record.
+  virtual bool keepUnsetBits() const { return false; }
+
+  // Whether this is the final resolve step before adding a record to the
+  // RecordKeeper. Error reporting during resolve and related constant folding
+  // should only happen when this is true.
+  bool isFinal() const { return IsFinal; }
+
+  void setFinal(bool Final) { IsFinal = Final; }
+};
+
+/// Resolve arbitrary mappings.
+class MapResolver final : public Resolver {
+  struct MappedValue {
+    Init *V;
+    bool Resolved;
+
+    MappedValue() : V(nullptr), Resolved(false) {}
+    MappedValue(Init *V, bool Resolved) : V(V), Resolved(Resolved) {}
+  };
+
+  DenseMap<Init *, MappedValue> Map;
+
+public:
+  explicit MapResolver(Record *CurRec = nullptr) : Resolver(CurRec) {}
+
+  void set(Init *Key, Init *Value) { Map[Key] = {Value, false}; }
+
+  Init *resolve(Init *VarName) override;
+};
+
+/// Resolve all variables from a record except for unset variables.
+class RecordResolver final : public Resolver {
+  DenseMap<Init *, Init *> Cache;
+  SmallVector<Init *, 4> Stack;
+
+public:
+  explicit RecordResolver(Record &R) : Resolver(&R) {}
+
+  Init *resolve(Init *VarName) override;
+
+  bool keepUnsetBits() const override { return true; }
+};
+
+/// Resolve all references to a specific RecordVal.
+//
+// TODO: This is used for resolving references to template arguments, in a
+//       rather inefficient way. Change those uses to resolve all template
+//       arguments simultaneously and get rid of this class.
+class RecordValResolver final : public Resolver {
+  const RecordVal *RV;
+
+public:
+  explicit RecordValResolver(Record &R, const RecordVal *RV)
+      : Resolver(&R), RV(RV) {}
+
+  Init *resolve(Init *VarName) override {
+    if (VarName == RV->getNameInit())
+      return RV->getValue();
+    return nullptr;
+  }
+};
+
+/// Delegate resolving to a sub-resolver, but shadow some variable names.
+class ShadowResolver final : public Resolver {
+  Resolver &R;
+  DenseSet<Init *> Shadowed;
+
+public:
+  explicit ShadowResolver(Resolver &R)
+      : Resolver(R.getCurrentRecord()), R(R) {
+    setFinal(R.isFinal());
+  }
+
+  void addShadow(Init *Key) { Shadowed.insert(Key); }
+
+  Init *resolve(Init *VarName) override {
+    if (Shadowed.count(VarName))
+      return nullptr;
+    return R.resolve(VarName);
+  }
+};
+
+/// (Optionally) delegate resolving to a sub-resolver, and keep track whether
+/// there were unresolved references.
+class TrackUnresolvedResolver final : public Resolver {
+  Resolver *R;
+  bool FoundUnresolved = false;
+
+public:
+  explicit TrackUnresolvedResolver(Resolver *R = nullptr)
+      : Resolver(R ? R->getCurrentRecord() : nullptr), R(R) {}
+
+  bool foundUnresolved() const { return FoundUnresolved; }
+
+  Init *resolve(Init *VarName) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TABLEGEN_RECORD_H
diff --git a/linux-x64/clang/include/llvm/TableGen/SearchableTable.td b/linux-x64/clang/include/llvm/TableGen/SearchableTable.td
new file mode 100644
index 0000000..12aaf60
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/SearchableTable.td
@@ -0,0 +1,41 @@
+//===- SearchableTable.td ----------------------------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the key top-level classes needed to produce a reasonably
+// generic table that can be binary-searched via int and string entries.
+//
+// Each table must instantiate "Mappingkind", listing the fields that should be
+// included and fields that shoould be searchable. Only two kinds of fields are
+// searchable at the moment: "strings" (which are compared case-insensitively),
+// and "bits".
+//
+// For each "MappingKind" the generated header will create GET_MAPPINGKIND_DECL
+// and GET_MAPPINGKIND_IMPL guards.
+//
+// Inside the DECL guard will be a set of function declarations:
+// "lookup{InstanceClass}By{SearchableField}", returning "const {InstanceClass}
+// *" and accepting either a StringRef or a uintN_t. Additionally, if
+// EnumNameField is still defined, there will be an "enum {InstanceClass}Values"
+// allowing C++ code to reference either the primary data table's entries (if
+// EnumValueField is not defined) or some other field (e.g. encoding) if it is.
+//
+// Inside the IMPL guard will be a primary data table "{InstanceClass}sList" and
+// as many searchable indexes as requested
+// ("{InstanceClass}sBy{SearchableField}"). Additionally implementations of the
+// lookup function will be provided.
+//
+// See AArch64SystemOperands.td and its generated header for example uses.
+//
+//===----------------------------------------------------------------------===//
+
+class SearchableTable {
+  list<string> SearchableFields;
+  string EnumNameField = "Name";
+  string EnumValueField;
+}
diff --git a/linux-x64/clang/include/llvm/TableGen/SetTheory.h b/linux-x64/clang/include/llvm/TableGen/SetTheory.h
new file mode 100644
index 0000000..4b32f9e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/SetTheory.h
@@ -0,0 +1,145 @@
+//===- SetTheory.h - Generate ordered sets from DAG expressions -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SetTheory class that computes ordered sets of
+// Records from DAG expressions.  Operators for standard set operations are
+// predefined, and it is possible to add special purpose set operators as well.
+//
+// The user may define named sets as Records of predefined classes. Set
+// expanders can be added to a SetTheory instance to teach it how to find the
+// elements of such a named set.
+//
+// These are the predefined operators. The argument lists can be individual
+// elements (defs), other sets (defs of expandable classes), lists, or DAG
+// expressions that are evaluated recursively.
+//
+// - (add S1, S2 ...) Union sets. This is also how sets are created from element
+//   lists.
+//
+// - (sub S1, S2, ...) Set difference. Every element in S1 except for the
+//   elements in S2, ...
+//
+// - (and S1, S2) Set intersection. Every element in S1 that is also in S2.
+//
+// - (shl S, N) Shift left. Remove the first N elements from S.
+//
+// - (trunc S, N) Truncate. The first N elements of S.
+//
+// - (rotl S, N) Rotate left. Same as (add (shl S, N), (trunc S, N)).
+//
+// - (rotr S, N) Rotate right.
+//
+// - (decimate S, N) Decimate S by picking every N'th element, starting with
+//   the first one. For instance, (decimate S, 2) returns the even elements of
+//   S.
+//
+// - (sequence "Format", From, To) Generate a sequence of defs with printf.
+//   For instance, (sequence "R%u", 0, 3) -> [ R0, R1, R2, R3 ]
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_SETTHEORY_H
+#define LLVM_TABLEGEN_SETTHEORY_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SMLoc.h"
+#include <map>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class DagInit;
+class Init;
+class Record;
+
+class SetTheory {
+public:
+  using RecVec = std::vector<Record *>;
+  using RecSet = SmallSetVector<Record *, 16>;
+
+  /// Operator - A callback representing a DAG operator.
+  class Operator {
+    virtual void anchor();
+
+  public:
+    virtual ~Operator() = default;
+
+    /// apply - Apply this operator to Expr's arguments and insert the result
+    /// in Elts.
+    virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts,
+                       ArrayRef<SMLoc> Loc) = 0;
+  };
+
+  /// Expander - A callback function that can transform a Record representing a
+  /// set into a fully expanded list of elements. Expanders provide a way for
+  /// users to define named sets that can be used in DAG expressions.
+  class Expander {
+    virtual void anchor();
+
+  public:
+    virtual ~Expander() = default;
+
+    virtual void expand(SetTheory&, Record*, RecSet &Elts) = 0;
+  };
+
+private:
+  // Map set defs to their fully expanded contents. This serves as a memoization
+  // cache and it makes it possible to return const references on queries.
+  using ExpandMap = std::map<Record *, RecVec>;
+  ExpandMap Expansions;
+
+  // Known DAG operators by name.
+  StringMap<std::unique_ptr<Operator>> Operators;
+
+  // Typed expanders by class name.
+  StringMap<std::unique_ptr<Expander>> Expanders;
+
+public:
+  /// Create a SetTheory instance with only the standard operators.
+  SetTheory();
+
+  /// addExpander - Add an expander for Records with the named super class.
+  void addExpander(StringRef ClassName, std::unique_ptr<Expander>);
+
+  /// addFieldExpander - Add an expander for ClassName that simply evaluates
+  /// FieldName in the Record to get the set elements.  That is all that is
+  /// needed for a class like:
+  ///
+  ///   class Set<dag d> {
+  ///     dag Elts = d;
+  ///   }
+  ///
+  void addFieldExpander(StringRef ClassName, StringRef FieldName);
+
+  /// addOperator - Add a DAG operator.
+  void addOperator(StringRef Name, std::unique_ptr<Operator>);
+
+  /// evaluate - Evaluate Expr and append the resulting set to Elts.
+  void evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc);
+
+  /// evaluate - Evaluate a sequence of Inits and append to Elts.
+  template<typename Iter>
+  void evaluate(Iter begin, Iter end, RecSet &Elts, ArrayRef<SMLoc> Loc) {
+    while (begin != end)
+      evaluate(*begin++, Elts, Loc);
+  }
+
+  /// expand - Expand a record into a set of elements if possible.  Return a
+  /// pointer to the expanded elements, or NULL if Set cannot be expanded
+  /// further.
+  const RecVec *expand(Record *Set);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TABLEGEN_SETTHEORY_H
diff --git a/linux-x64/clang/include/llvm/TableGen/StringMatcher.h b/linux-x64/clang/include/llvm/TableGen/StringMatcher.h
new file mode 100644
index 0000000..09d2092
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/StringMatcher.h
@@ -0,0 +1,56 @@
+//===- StringMatcher.h - Generate a matcher for input strings ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the StringMatcher class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_STRINGMATCHER_H
+#define LLVM_TABLEGEN_STRINGMATCHER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// StringMatcher - Given a list of strings and code to execute when they match,
+/// output a simple switch tree to classify the input string.
+///
+/// If a match is found, the code in Vals[i].second is executed; control must
+/// not exit this code fragment.  If nothing matches, execution falls through.
+///
+class StringMatcher {
+public:
+  using StringPair = std::pair<std::string, std::string>;
+
+private:
+  StringRef StrVariableName;
+  const std::vector<StringPair> &Matches;
+  raw_ostream &OS;
+
+public:
+  StringMatcher(StringRef strVariableName,
+                const std::vector<StringPair> &matches, raw_ostream &os)
+    : StrVariableName(strVariableName), Matches(matches), OS(os) {}
+
+  void Emit(unsigned Indent = 0, bool IgnoreDuplicates = false) const;
+
+private:
+  bool EmitStringMatcherForChar(const std::vector<const StringPair *> &Matches,
+                                unsigned CharNo, unsigned IndentCount,
+                                bool IgnoreDuplicates) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TABLEGEN_STRINGMATCHER_H
diff --git a/linux-x64/clang/include/llvm/TableGen/StringToOffsetTable.h b/linux-x64/clang/include/llvm/TableGen/StringToOffsetTable.h
new file mode 100644
index 0000000..4b11e88
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/StringToOffsetTable.h
@@ -0,0 +1,105 @@
+//===- StringToOffsetTable.h - Emit a big concatenated string ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_STRINGTOOFFSETTABLE_H
+#define LLVM_TABLEGEN_STRINGTOOFFSETTABLE_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cctype>
+
+namespace llvm {
+
+/// StringToOffsetTable - This class uniques a bunch of nul-terminated strings
+/// and keeps track of their offset in a massive contiguous string allocation.
+/// It can then output this string blob and use indexes into the string to
+/// reference each piece.
+class StringToOffsetTable {
+  StringMap<unsigned> StringOffset;
+  std::string AggregateString;
+
+public:
+  bool Empty() const { return StringOffset.empty(); }
+
+  unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) {
+    auto IterBool =
+        StringOffset.insert(std::make_pair(Str, AggregateString.size()));
+    if (IterBool.second) {
+      // Add the string to the aggregate if this is the first time found.
+      AggregateString.append(Str.begin(), Str.end());
+      if (appendZero)
+        AggregateString += '\0';
+    }
+
+    return IterBool.first->second;
+  }
+
+  void EmitString(raw_ostream &O) {
+    // Escape the string.
+    SmallString<256> Str;
+    raw_svector_ostream(Str).write_escaped(AggregateString);
+    AggregateString = Str.str();
+
+    O << "    \"";
+    unsigned CharsPrinted = 0;
+    for (unsigned i = 0, e = AggregateString.size(); i != e; ++i) {
+      if (CharsPrinted > 70) {
+        O << "\"\n    \"";
+        CharsPrinted = 0;
+      }
+      O << AggregateString[i];
+      ++CharsPrinted;
+
+      // Print escape sequences all together.
+      if (AggregateString[i] != '\\')
+        continue;
+
+      assert(i + 1 < AggregateString.size() && "Incomplete escape sequence!");
+      if (isdigit(AggregateString[i + 1])) {
+        assert(isdigit(AggregateString[i + 2]) &&
+               isdigit(AggregateString[i + 3]) &&
+               "Expected 3 digit octal escape!");
+        O << AggregateString[++i];
+        O << AggregateString[++i];
+        O << AggregateString[++i];
+        CharsPrinted += 3;
+      } else {
+        O << AggregateString[++i];
+        ++CharsPrinted;
+      }
+    }
+    O << "\"";
+  }
+
+  /// Emit the string using character literals. MSVC has a limitation that
+  /// string literals cannot be longer than 64K.
+  void EmitCharArray(raw_ostream &O) {
+    assert(AggregateString.find(')') == std::string::npos &&
+           "can't emit raw string with closing parens");
+    int Count = 0;
+    O << ' ';
+    for (char C : AggregateString) {
+      O << " \'";
+      O.write_escaped(StringRef(&C, 1));
+      O << "\',";
+      Count++;
+      if (Count > 14) {
+        O << "\n ";
+        Count = 0;
+      }
+    }
+    O << '\n';
+  }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/TableGen/TableGenBackend.h b/linux-x64/clang/include/llvm/TableGen/TableGenBackend.h
new file mode 100644
index 0000000..d226f1f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/TableGen/TableGenBackend.h
@@ -0,0 +1,28 @@
+//===- llvm/TableGen/TableGenBackend.h - Backend utilities ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Useful utilities for TableGen backends.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_TABLEGENBACKEND_H
+#define LLVM_TABLEGEN_TABLEGENBACKEND_H
+
+namespace llvm {
+
+class StringRef;
+class raw_ostream;
+
+/// emitSourceFileHeader - Output an LLVM style file header to the specified
+/// raw_ostream.
+void emitSourceFileHeader(StringRef Desc, raw_ostream &OS);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Target/CodeGenCWrappers.h b/linux-x64/clang/include/llvm/Target/CodeGenCWrappers.h
new file mode 100644
index 0000000..e9a9905
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/CodeGenCWrappers.h
@@ -0,0 +1,61 @@
+//===- llvm/Target/CodeGenCWrappers.h - CodeGen C Wrappers ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines C bindings wrappers for enums in llvm/Support/CodeGen.h
+// that need them.  The wrappers are separated to avoid adding an indirect
+// dependency on llvm/Config/Targets.def to CodeGen.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_CODEGENCWRAPPERS_H
+#define LLVM_TARGET_CODEGENCWRAPPERS_H
+
+#include "llvm-c/TargetMachine.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+inline Optional<CodeModel::Model> unwrap(LLVMCodeModel Model, bool &JIT) {
+  JIT = false;
+  switch (Model) {
+  case LLVMCodeModelJITDefault:
+    JIT = true;
+    LLVM_FALLTHROUGH;
+  case LLVMCodeModelDefault:
+    return None;
+  case LLVMCodeModelSmall:
+    return CodeModel::Small;
+  case LLVMCodeModelKernel:
+    return CodeModel::Kernel;
+  case LLVMCodeModelMedium:
+    return CodeModel::Medium;
+  case LLVMCodeModelLarge:
+    return CodeModel::Large;
+  }
+  return CodeModel::Small;
+}
+
+inline LLVMCodeModel wrap(CodeModel::Model Model) {
+  switch (Model) {
+  case CodeModel::Small:
+    return LLVMCodeModelSmall;
+  case CodeModel::Kernel:
+    return LLVMCodeModelKernel;
+  case CodeModel::Medium:
+    return LLVMCodeModelMedium;
+  case CodeModel::Large:
+    return LLVMCodeModelLarge;
+  }
+  llvm_unreachable("Bad CodeModel!");
+}
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Target/GenericOpcodes.td b/linux-x64/clang/include/llvm/Target/GenericOpcodes.td
new file mode 100644
index 0000000..1af6a9a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/GenericOpcodes.td
@@ -0,0 +1,644 @@
+//===-- GenericOpcodes.td - Opcodes used with GlobalISel ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the generic opcodes used with GlobalISel.
+// After instruction selection, these opcodes should not appear.
+//
+//===----------------------------------------------------------------------===//
+
+//------------------------------------------------------------------------------
+// Unary ops.
+//------------------------------------------------------------------------------
+
+class GenericInstruction : StandardPseudoInstruction;
+
+// Extend the underlying scalar type of an operation, leaving the high bits
+// unspecified.
+def G_ANYEXT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+// Sign extend the underlying scalar type of an operation, copying the sign bit
+// into the newly-created space.
+def G_SEXT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+// Zero extend the underlying scalar type of an operation, putting zero bits
+// into the newly-created space.
+def G_ZEXT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+
+// Truncate the underlying scalar type of an operation. This is equivalent to
+// G_EXTRACT for scalar types, but acts elementwise on vectors.
+def G_TRUNC : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_IMPLICIT_DEF : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins);
+  let hasSideEffects = 0;
+}
+
+def G_PHI : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins variable_ops);
+  let hasSideEffects = 0;
+}
+
+def G_FRAME_INDEX : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins unknown:$src2);
+  let hasSideEffects = 0;
+}
+
+def G_GLOBAL_VALUE : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins unknown:$src);
+  let hasSideEffects = 0;
+}
+
+def G_INTTOPTR : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_PTRTOINT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_BITCAST : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_CONSTANT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins unknown:$imm);
+  let hasSideEffects = 0;
+}
+
+def G_FCONSTANT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins unknown:$imm);
+  let hasSideEffects = 0;
+}
+
+def G_VASTART : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins type0:$list);
+  let hasSideEffects = 0;
+  let mayStore = 1;
+}
+
+def G_VAARG : GenericInstruction {
+  let OutOperandList = (outs type0:$val);
+  let InOperandList = (ins type1:$list, unknown:$align);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+  let mayStore = 1;
+}
+
+def G_BSWAP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src);
+  let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Binary ops.
+//------------------------------------------------------------------------------
+
+// Generic addition.
+def G_ADD : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic subtraction.
+def G_SUB : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic multiplication.
+def G_MUL : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic signed division.
+def G_SDIV : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic unsigned division.
+def G_UDIV : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic signed remainder.
+def G_SREM : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic unsigned remainder.
+def G_UREM : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic bitwise and.
+def G_AND : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic bitwise or.
+def G_OR : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic bitwise xor.
+def G_XOR : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic left-shift.
+def G_SHL : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic logical right-shift.
+def G_LSHR : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic arithmetic right-shift.
+def G_ASHR : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic integer comparison.
+def G_ICMP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic floating-point comparison.
+def G_FCMP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic select
+def G_SELECT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$tst, type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic pointer offset.
+def G_GEP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type1:$src2);
+  let hasSideEffects = 0;
+}
+
+def G_PTR_MASK : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src, unknown:$bits);
+  let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Overflow ops
+//------------------------------------------------------------------------------
+
+// Generic unsigned addition consuming and producing a carry flag.
+def G_UADDE : GenericInstruction {
+  let OutOperandList = (outs type0:$dst, type1:$carry_out);
+  let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
+  let hasSideEffects = 0;
+}
+
+// Generic signed addition producing a carry flag.
+def G_SADDO : GenericInstruction {
+  let OutOperandList = (outs type0:$dst, type1:$carry_out);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic unsigned subtraction consuming and producing a carry flag.
+def G_USUBE : GenericInstruction {
+  let OutOperandList = (outs type0:$dst, type1:$carry_out);
+  let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
+  let hasSideEffects = 0;
+}
+
+// Generic unsigned subtraction producing a carry flag.
+def G_SSUBO : GenericInstruction {
+  let OutOperandList = (outs type0:$dst, type1:$carry_out);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic unsigned multiplication producing a carry flag.
+def G_UMULO : GenericInstruction {
+  let OutOperandList = (outs type0:$dst, type1:$carry_out);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic signed multiplication producing a carry flag.
+def G_SMULO : GenericInstruction {
+  let OutOperandList = (outs type0:$dst, type1:$carry_out);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Multiply two numbers at twice the incoming bit width (unsigned) and return
+// the high half of the result.
+def G_UMULH : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Multiply two numbers at twice the incoming bit width (signed) and return
+// the high half of the result.
+def G_SMULH : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+//------------------------------------------------------------------------------
+// Floating Point Unary Ops.
+//------------------------------------------------------------------------------
+
+def G_FNEG : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src);
+  let hasSideEffects = 0;
+}
+
+def G_FPEXT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_FPTRUNC : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_FPTOSI : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_FPTOUI : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_SITOFP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_UITOFP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+def G_FABS : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src);
+  let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Floating Point Binary ops.
+//------------------------------------------------------------------------------
+
+// Generic FP addition.
+def G_FADD : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic FP subtraction.
+def G_FSUB : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic FP multiplication.
+def G_FMUL : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+  let isCommutable = 1;
+}
+
+// Generic fused multiply-add instruction.
+// Behaves like llvm fma intrinsic ie src1 * src2 + src3
+def G_FMA : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3);
+  let hasSideEffects = 0;
+  let isCommutable = 0;
+}
+
+// Generic FP division.
+def G_FDIV : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Generic FP remainder.
+def G_FREM : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Floating point exponentiation.
+def G_FPOW : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2);
+  let hasSideEffects = 0;
+}
+
+// Floating point base-e exponential of a value.
+def G_FEXP : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1);
+  let hasSideEffects = 0;
+}
+
+// Floating point base-2 exponential of a value.
+def G_FEXP2 : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1);
+  let hasSideEffects = 0;
+}
+
+// Floating point base-2 logarithm of a value.
+def G_FLOG : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1);
+  let hasSideEffects = 0;
+}
+
+// Floating point base-2 logarithm of a value.
+def G_FLOG2 : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1);
+  let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Memory ops
+//------------------------------------------------------------------------------
+
+// Generic load. Expects a MachineMemOperand in addition to explicit operands.
+def G_LOAD : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins ptype1:$addr);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+}
+
+// Generic store. Expects a MachineMemOperand in addition to explicit operands.
+def G_STORE : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins type0:$src, ptype1:$addr);
+  let hasSideEffects = 0;
+  let mayStore = 1;
+}
+
+// Generic atomic cmpxchg with internal success check. Expects a
+// MachineMemOperand in addition to explicit operands.
+def G_ATOMIC_CMPXCHG_WITH_SUCCESS : GenericInstruction {
+  let OutOperandList = (outs type0:$oldval, type1:$success);
+  let InOperandList = (ins type2:$addr, type0:$cmpval, type0:$newval);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+  let mayStore = 1;
+}
+
+// Generic atomic cmpxchg. Expects a MachineMemOperand in addition to explicit
+// operands.
+def G_ATOMIC_CMPXCHG : GenericInstruction {
+  let OutOperandList = (outs type0:$oldval);
+  let InOperandList = (ins ptype1:$addr, type0:$cmpval, type0:$newval);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+  let mayStore = 1;
+}
+
+// Generic atomicrmw. Expects a MachineMemOperand in addition to explicit
+// operands.
+class G_ATOMICRMW_OP : GenericInstruction {
+  let OutOperandList = (outs type0:$oldval);
+  let InOperandList = (ins ptype1:$addr, type0:$val);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+  let mayStore = 1;
+}
+
+def G_ATOMICRMW_XCHG : G_ATOMICRMW_OP;
+def G_ATOMICRMW_ADD : G_ATOMICRMW_OP;
+def G_ATOMICRMW_SUB : G_ATOMICRMW_OP;
+def G_ATOMICRMW_AND : G_ATOMICRMW_OP;
+def G_ATOMICRMW_NAND : G_ATOMICRMW_OP;
+def G_ATOMICRMW_OR : G_ATOMICRMW_OP;
+def G_ATOMICRMW_XOR : G_ATOMICRMW_OP;
+def G_ATOMICRMW_MAX : G_ATOMICRMW_OP;
+def G_ATOMICRMW_MIN : G_ATOMICRMW_OP;
+def G_ATOMICRMW_UMAX : G_ATOMICRMW_OP;
+def G_ATOMICRMW_UMIN : G_ATOMICRMW_OP;
+
+//------------------------------------------------------------------------------
+// Variadic ops
+//------------------------------------------------------------------------------
+
+// Extract a register of the specified size, starting from the block given by
+// index. This will almost certainly be mapped to sub-register COPYs after
+// register banks have been selected.
+def G_EXTRACT : GenericInstruction {
+  let OutOperandList = (outs type0:$res);
+  let InOperandList = (ins type1:$src, unknown:$offset);
+  let hasSideEffects = 0;
+}
+
+// Extract multiple registers specified size, starting from blocks given by
+// indexes. This will almost certainly be mapped to sub-register COPYs after
+// register banks have been selected.
+def G_UNMERGE_VALUES : GenericInstruction {
+  let OutOperandList = (outs type0:$dst0, variable_ops);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = 0;
+}
+
+// Insert a smaller register into a larger one at the specified bit-index.
+def G_INSERT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src, type1:$op, unknown:$offset);
+  let hasSideEffects = 0;
+}
+
+/// Concatenate multiple registers of the same size into a wider register.
+def G_MERGE_VALUES : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src0, variable_ops);
+  let hasSideEffects = 0;
+}
+
+// Intrinsic without side effects.
+def G_INTRINSIC : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins unknown:$intrin, variable_ops);
+  let hasSideEffects = 0;
+}
+
+// Intrinsic with side effects.
+def G_INTRINSIC_W_SIDE_EFFECTS : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins unknown:$intrin, variable_ops);
+  let hasSideEffects = 1;
+  let mayLoad = 1;
+  let mayStore = 1;
+}
+
+//------------------------------------------------------------------------------
+// Branches.
+//------------------------------------------------------------------------------
+
+// Generic unconditional branch.
+def G_BR : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins unknown:$src1);
+  let hasSideEffects = 0;
+  let isBranch = 1;
+  let isTerminator = 1;
+  let isBarrier = 1;
+}
+
+// Generic conditional branch.
+def G_BRCOND : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins type0:$tst, unknown:$truebb);
+  let hasSideEffects = 0;
+  let isBranch = 1;
+  let isTerminator = 1;
+}
+
+// Generic indirect branch.
+def G_BRINDIRECT : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins type0:$src1);
+  let hasSideEffects = 0;
+  let isBranch = 1;
+  let isTerminator = 1;
+}
+
+//------------------------------------------------------------------------------
+// Vector ops
+//------------------------------------------------------------------------------
+
+// Generic insertelement.
+def G_INSERT_VECTOR_ELT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src, type1:$elt, type2:$idx);
+  let hasSideEffects = 0;
+}
+
+// Generic extractelement.
+def G_EXTRACT_VECTOR_ELT : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src, type2:$idx);
+  let hasSideEffects = 0;
+}
+
+// Generic shufflevector.
+def G_SHUFFLE_VECTOR: GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$v1, type1:$v2, type2:$mask);
+  let hasSideEffects = 0;
+}
+
+// TODO: Add the other generic opcodes.
diff --git a/linux-x64/clang/include/llvm/Target/GlobalISel/RegisterBank.td b/linux-x64/clang/include/llvm/Target/GlobalISel/RegisterBank.td
new file mode 100644
index 0000000..4dfd139
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/GlobalISel/RegisterBank.td
@@ -0,0 +1,16 @@
+//===- RegisterBank.td - Register bank definitions ---------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+class RegisterBank<string name, list<RegisterClass> classes> {
+  string Name = name;
+  list<RegisterClass> RegisterClasses = classes;
+}
diff --git a/linux-x64/clang/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/linux-x64/clang/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
new file mode 100644
index 0000000..0d3b4a4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -0,0 +1,120 @@
+//===- TargetGlobalISel.td - Common code for GlobalISel ----*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used to support
+// SelectionDAG instruction selection patterns (specified in
+// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
+//
+// This is intended as a compatibility layer, to enable reuse of target
+// descriptions written for SelectionDAG without requiring explicit GlobalISel
+// support.  It will eventually supersede SelectionDAG patterns.
+//
+//===----------------------------------------------------------------------===//
+
+// Declare that a generic Instruction is 'equivalent' to an SDNode, that is,
+// SelectionDAG patterns involving the SDNode can be transformed to match the
+// Instruction instead.
+class GINodeEquiv<Instruction i, SDNode node> {
+  Instruction I = i;
+  SDNode Node = node;
+
+  // SelectionDAG has separate nodes for atomic and non-atomic memory operations
+  // (ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE) but GlobalISel
+  // stores this information in the MachineMemoryOperand.
+  bit CheckMMOIsNonAtomic = 0;
+}
+
+// These are defined in the same order as the G_* instructions.
+def : GINodeEquiv<G_ANYEXT, anyext>;
+def : GINodeEquiv<G_SEXT, sext>;
+def : GINodeEquiv<G_ZEXT, zext>;
+def : GINodeEquiv<G_TRUNC, trunc>;
+def : GINodeEquiv<G_BITCAST, bitconvert>;
+// G_INTTOPTR - SelectionDAG has no equivalent.
+// G_PTRTOINT - SelectionDAG has no equivalent.
+def : GINodeEquiv<G_CONSTANT, imm>;
+def : GINodeEquiv<G_FCONSTANT, fpimm>;
+def : GINodeEquiv<G_ADD, add>;
+def : GINodeEquiv<G_SUB, sub>;
+def : GINodeEquiv<G_MUL, mul>;
+def : GINodeEquiv<G_SDIV, sdiv>;
+def : GINodeEquiv<G_UDIV, udiv>;
+def : GINodeEquiv<G_SREM, srem>;
+def : GINodeEquiv<G_UREM, urem>;
+def : GINodeEquiv<G_AND, and>;
+def : GINodeEquiv<G_OR, or>;
+def : GINodeEquiv<G_XOR, xor>;
+def : GINodeEquiv<G_SHL, shl>;
+def : GINodeEquiv<G_LSHR, srl>;
+def : GINodeEquiv<G_ASHR, sra>;
+def : GINodeEquiv<G_SELECT, select>;
+def : GINodeEquiv<G_FNEG, fneg>;
+def : GINodeEquiv<G_FPEXT, fpextend>;
+def : GINodeEquiv<G_FPTRUNC, fpround>;
+def : GINodeEquiv<G_FPTOSI, fp_to_sint>;
+def : GINodeEquiv<G_FPTOUI, fp_to_uint>;
+def : GINodeEquiv<G_SITOFP, sint_to_fp>;
+def : GINodeEquiv<G_UITOFP, uint_to_fp>;
+def : GINodeEquiv<G_FADD, fadd>;
+def : GINodeEquiv<G_FSUB, fsub>;
+def : GINodeEquiv<G_FMA, fma>;
+def : GINodeEquiv<G_FMUL, fmul>;
+def : GINodeEquiv<G_FDIV, fdiv>;
+def : GINodeEquiv<G_FREM, frem>;
+def : GINodeEquiv<G_FPOW, fpow>;
+def : GINodeEquiv<G_FEXP2, fexp2>;
+def : GINodeEquiv<G_FLOG2, flog2>;
+def : GINodeEquiv<G_INTRINSIC, intrinsic_wo_chain>;
+// ISD::INTRINSIC_VOID can also be handled with G_INTRINSIC_W_SIDE_EFFECTS.
+def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_void>;
+def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_w_chain>;
+def : GINodeEquiv<G_BR, br>;
+def : GINodeEquiv<G_BSWAP, bswap>;
+
+// Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
+// complications that tablegen must take care of. For example, Predicates such
+// as isSignExtLoad require that this is not a perfect 1:1 mapping since a
+// sign-extending load is (G_SEXT (G_LOAD x)) in GlobalISel. Additionally,
+// G_LOAD handles both atomic and non-atomic loads where as SelectionDAG had
+// separate nodes for them. This GINodeEquiv maps the non-atomic loads to
+// G_LOAD with a non-atomic MachineMemOperand.
+def : GINodeEquiv<G_LOAD, ld> { let CheckMMOIsNonAtomic = 1; }
+// Broadly speaking G_STORE is equivalent to ISD::STORE but there are some
+// complications that tablegen must take care of. For example, predicates such
+// as isTruncStore require that this is not a perfect 1:1 mapping since a
+// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel. Additionally,
+// G_STORE handles both atomic and non-atomic stores where as SelectionDAG had
+// separate nodes for them. This GINodeEquiv maps the non-atomic stores to
+// G_STORE with a non-atomic MachineMemOperand.
+def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = 1; }
+
+def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
+def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
+def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>;
+def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub>;
+def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and>;
+def : GINodeEquiv<G_ATOMICRMW_NAND, atomic_load_nand>;
+def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or>;
+def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor>;
+def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min>;
+def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max>;
+def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
+def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
+
+// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
+// Should be used on defs that subclass GIComplexOperandMatcher<>.
+class GIComplexPatternEquiv<ComplexPattern seldag> {
+  ComplexPattern SelDAGEquivalent = seldag;
+}
+
+// Specifies the GlobalISel equivalents for SelectionDAG's SDNodeXForm.
+// Should be used on defs that subclass GICustomOperandRenderer<>.
+class GISDNodeXFormEquiv<SDNodeXForm seldag> {
+  SDNodeXForm SelDAGEquivalent = seldag;
+}
diff --git a/linux-x64/clang/include/llvm/Target/GlobalISel/Target.td b/linux-x64/clang/include/llvm/Target/GlobalISel/Target.td
new file mode 100644
index 0000000..6740f40
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/GlobalISel/Target.td
@@ -0,0 +1,61 @@
+//===- Target.td - Define GlobalISel rules -----------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used to support
+// SelectionDAG instruction selection patterns (specified in
+// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
+//
+// This is intended as a compatibility layer, to enable reuse of target
+// descriptions written for SelectionDAG without requiring explicit GlobalISel
+// support.  It will eventually supersede SelectionDAG patterns.
+//
+//===----------------------------------------------------------------------===//
+
+// Definitions that inherit from LLT define types that will be used in the
+// GlobalISel matcher.
+class LLT;
+
+def s32 : LLT;
+def s64 : LLT;
+
+// Defines a matcher for complex operands. This is analogous to ComplexPattern
+// from SelectionDAG.
+//
+// Definitions that inherit from this may also inherit from
+// GIComplexPatternEquiv to enable the import of SelectionDAG patterns involving
+// those ComplexPatterns.
+class GIComplexOperandMatcher<LLT type, string matcherfn> {
+  // The expected type of the root of the match.
+  //
+  // TODO: We should probably support, any-type, any-scalar, and multiple types
+  //       in the future.
+  LLT Type = type;
+
+  // The function that determines whether the operand matches. It should be of
+  // the form:
+  //   bool select(const MatchOperand &Root, MatchOperand &Result1)
+  // and should have the same number of ResultX arguments as the number of
+  // result operands. It must return true on successful match and false
+  // otherwise. If it returns true, then all the ResultX arguments must be
+  // overwritten.
+  string MatcherFn = matcherfn;
+}
+
+// Defines a custom renderer. This is analogous to SDNodeXForm from
+// SelectionDAG. Unlike SDNodeXForm, this matches a MachineInstr and
+// renders directly to the result instruction without an intermediate node.
+//
+// Definitions that inherit from this may also inherit from GISDNodeXFormEquiv
+// to enable the import of SelectionDAG patterns involving those SDNodeXForms.
+class GICustomOperandRenderer<string rendererfn> {
+  // The function renders the operand(s) of the matched instruction to
+  // the specified instruction. It should be of the form:
+  //   void render(MachineInstrBuilder &MIB, const MachineInstr &MI)
+  string RendererFn = rendererfn;
+}
diff --git a/linux-x64/clang/include/llvm/Target/Target.td b/linux-x64/clang/include/llvm/Target/Target.td
new file mode 100644
index 0000000..0a09e9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/Target.td
@@ -0,0 +1,1527 @@
+//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces which should be
+// implemented by each target which is using a TableGen based code generator.
+//
+//===----------------------------------------------------------------------===//
+
+// Include all information about LLVM intrinsics.
+include "llvm/IR/Intrinsics.td"
+
+//===----------------------------------------------------------------------===//
+// Register file description - These classes are used to fill in the target
+// description classes.
+
+class RegisterClass; // Forward def
+
+class HwMode<string FS> {
+  // A string representing subtarget features that turn on this HW mode.
+  // For example, "+feat1,-feat2" will indicate that the mode is active
+  // when "feat1" is enabled and "feat2" is disabled at the same time.
+  // Any other features are not checked.
+  // When multiple modes are used, they should be mutually exclusive,
+  // otherwise the results are unpredictable.
+  string Features = FS;
+}
+
+// A special mode recognized by tablegen. This mode is considered active
+// when no other mode is active. For targets that do not use specific hw
+// modes, this is the only mode.
+def DefaultMode : HwMode<"">;
+
+// A class used to associate objects with HW modes. It is only intended to
+// be used as a base class, where the derived class should contain a member
+// "Objects", which is a list of the same length as the list of modes.
+// The n-th element on the Objects list will be associated with the n-th
+// element on the Modes list.
+class HwModeSelect<list<HwMode> Ms> {
+  list<HwMode> Modes = Ms;
+}
+
+// A common class that implements a counterpart of ValueType, which is
+// dependent on a HW mode. This class inherits from ValueType itself,
+// which makes it possible to use objects of this class where ValueType
+// objects could be used. This is specifically applicable to selection
+// patterns.
+class ValueTypeByHwMode<list<HwMode> Ms, list<ValueType> Ts>
+    : HwModeSelect<Ms>, ValueType<0, 0> {
+  // The length of this list must be the same as the length of Ms.
+  list<ValueType> Objects = Ts;
+}
+
+// A class representing the register size, spill size and spill alignment
+// in bits of a register.
+class RegInfo<int RS, int SS, int SA> {
+  int RegSize = RS;         // Register size in bits.
+  int SpillSize = SS;       // Spill slot size in bits.
+  int SpillAlignment = SA;  // Spill slot alignment in bits.
+}
+
+// The register size/alignment information, parameterized by a HW mode.
+class RegInfoByHwMode<list<HwMode> Ms = [], list<RegInfo> Ts = []>
+    : HwModeSelect<Ms> {
+  // The length of this list must be the same as the length of Ms.
+  list<RegInfo> Objects = Ts;
+}
+
+// SubRegIndex - Use instances of SubRegIndex to identify subregisters.
+class SubRegIndex<int size, int offset = 0> {
+  string Namespace = "";
+
+  // Size - Size (in bits) of the sub-registers represented by this index.
+  int Size = size;
+
+  // Offset - Offset of the first bit that is part of this sub-register index.
+  // Set it to -1 if the same index is used to represent sub-registers that can
+  // be at different offsets (for example when using an index to access an
+  // element in a register tuple).
+  int Offset = offset;
+
+  // ComposedOf - A list of two SubRegIndex instances, [A, B].
+  // This indicates that this SubRegIndex is the result of composing A and B.
+  // See ComposedSubRegIndex.
+  list<SubRegIndex> ComposedOf = [];
+
+  // CoveringSubRegIndices - A list of two or more sub-register indexes that
+  // cover this sub-register.
+  //
+  // This field should normally be left blank as TableGen can infer it.
+  //
+  // TableGen automatically detects sub-registers that straddle the registers
+  // in the SubRegs field of a Register definition. For example:
+  //
+  //   Q0    = dsub_0 -> D0, dsub_1 -> D1
+  //   Q1    = dsub_0 -> D2, dsub_1 -> D3
+  //   D1_D2 = dsub_0 -> D1, dsub_1 -> D2
+  //   QQ0   = qsub_0 -> Q0, qsub_1 -> Q1
+  //
+  // TableGen will infer that D1_D2 is a sub-register of QQ0. It will be given
+  // the synthetic index dsub_1_dsub_2 unless some SubRegIndex is defined with
+  // CoveringSubRegIndices = [dsub_1, dsub_2].
+  list<SubRegIndex> CoveringSubRegIndices = [];
+}
+
+// ComposedSubRegIndex - A sub-register that is the result of composing A and B.
+// Offset is set to the sum of A and B's Offsets. Size is set to B's Size.
+class ComposedSubRegIndex<SubRegIndex A, SubRegIndex B>
+  : SubRegIndex<B.Size, !if(!eq(A.Offset, -1), -1,
+                        !if(!eq(B.Offset, -1), -1,
+                            !add(A.Offset, B.Offset)))> {
+  // See SubRegIndex.
+  let ComposedOf = [A, B];
+}
+
+// RegAltNameIndex - The alternate name set to use for register operands of
+// this register class when printing.
+class RegAltNameIndex {
+  string Namespace = "";
+}
+def NoRegAltName : RegAltNameIndex;
+
+// Register - You should define one instance of this class for each register
+// in the target machine.  String n will become the "name" of the register.
+class Register<string n, list<string> altNames = []> {
+  string Namespace = "";
+  string AsmName = n;
+  list<string> AltNames = altNames;
+
+  // Aliases - A list of registers that this register overlaps with.  A read or
+  // modification of this register can potentially read or modify the aliased
+  // registers.
+  list<Register> Aliases = [];
+
+  // SubRegs - A list of registers that are parts of this register. Note these
+  // are "immediate" sub-registers and the registers within the list do not
+  // themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX],
+  // not [AX, AH, AL].
+  list<Register> SubRegs = [];
+
+  // SubRegIndices - For each register in SubRegs, specify the SubRegIndex used
+  // to address it. Sub-sub-register indices are automatically inherited from
+  // SubRegs.
+  list<SubRegIndex> SubRegIndices = [];
+
+  // RegAltNameIndices - The alternate name indices which are valid for this
+  // register.
+  list<RegAltNameIndex> RegAltNameIndices = [];
+
+  // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
+  // These values can be determined by locating the <target>.h file in the
+  // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES.  The
+  // order of these names correspond to the enumeration used by gcc.  A value of
+  // -1 indicates that the gcc number is undefined and -2 that register number
+  // is invalid for this mode/flavour.
+  list<int> DwarfNumbers = [];
+
+  // CostPerUse - Additional cost of instructions using this register compared
+  // to other registers in its class. The register allocator will try to
+  // minimize the number of instructions using a register with a CostPerUse.
+  // This is used by the x86-64 and ARM Thumb targets where some registers
+  // require larger instruction encodings.
+  int CostPerUse = 0;
+
+  // CoveredBySubRegs - When this bit is set, the value of this register is
+  // completely determined by the value of its sub-registers.  For example, the
+  // x86 register AX is covered by its sub-registers AL and AH, but EAX is not
+  // covered by its sub-register AX.
+  bit CoveredBySubRegs = 0;
+
+  // HWEncoding - The target specific hardware encoding for this register.
+  bits<16> HWEncoding = 0;
+
+  bit isArtificial = 0;
+}
+
+// RegisterWithSubRegs - This can be used to define instances of Register which
+// need to specify sub-registers.
+// List "subregs" specifies which registers are sub-registers to this one. This
+// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc.
+// This allows the code generator to be careful not to put two values with
+// overlapping live ranges into registers which alias.
+class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
+  let SubRegs = subregs;
+}
+
+// DAGOperand - An empty base class that unifies RegisterClass's and other forms
+// of Operand's that are legal as type qualifiers in DAG patterns.  This should
+// only ever be used for defining multiclasses that are polymorphic over both
+// RegisterClass's and other Operand's.
+class DAGOperand {
+  string OperandNamespace = "MCOI";
+  string DecoderMethod = "";
+}
+
+// RegisterClass - Now that all of the registers are defined, and aliases
+// between registers are defined, specify which registers belong to which
+// register classes.  This also defines the default allocation order of
+// registers by register allocators.
+//
+class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
+                    dag regList, RegAltNameIndex idx = NoRegAltName>
+  : DAGOperand {
+  string Namespace = namespace;
+
+  // The register size/alignment information, parameterized by a HW mode.
+  RegInfoByHwMode RegInfos;
+
+  // RegType - Specify the list ValueType of the registers in this register
+  // class.  Note that all registers in a register class must have the same
+  // ValueTypes.  This is a list because some targets permit storing different
+  // types in same register, for example vector values with 128-bit total size,
+  // but different count/size of items, like SSE on x86.
+  //
+  list<ValueType> RegTypes = regTypes;
+
+  // Size - Specify the spill size in bits of the registers.  A default value of
+  // zero lets tablgen pick an appropriate size.
+  int Size = 0;
+
+  // Alignment - Specify the alignment required of the registers when they are
+  // stored or loaded to memory.
+  //
+  int Alignment = alignment;
+
+  // CopyCost - This value is used to specify the cost of copying a value
+  // between two registers in this register class. The default value is one
+  // meaning it takes a single instruction to perform the copying. A negative
+  // value means copying is extremely expensive or impossible.
+  int CopyCost = 1;
+
+  // MemberList - Specify which registers are in this class.  If the
+  // allocation_order_* method are not specified, this also defines the order of
+  // allocation used by the register allocator.
+  //
+  dag MemberList = regList;
+
+  // AltNameIndex - The alternate register name to use when printing operands
+  // of this register class. Every register in the register class must have
+  // a valid alternate name for the given index.
+  RegAltNameIndex altNameIndex = idx;
+
+  // isAllocatable - Specify that the register class can be used for virtual
+  // registers and register allocation.  Some register classes are only used to
+  // model instruction operand constraints, and should have isAllocatable = 0.
+  bit isAllocatable = 1;
+
+  // AltOrders - List of alternative allocation orders. The default order is
+  // MemberList itself, and that is good enough for most targets since the
+  // register allocators automatically remove reserved registers and move
+  // callee-saved registers to the end.
+  list<dag> AltOrders = [];
+
+  // AltOrderSelect - The body of a function that selects the allocation order
+  // to use in a given machine function. The code will be inserted in a
+  // function like this:
+  //
+  //   static inline unsigned f(const MachineFunction &MF) { ... }
+  //
+  // The function should return 0 to select the default order defined by
+  // MemberList, 1 to select the first AltOrders entry and so on.
+  code AltOrderSelect = [{}];
+
+  // Specify allocation priority for register allocators using a greedy
+  // heuristic. Classes with higher priority values are assigned first. This is
+  // useful as it is sometimes beneficial to assign registers to highly
+  // constrained classes first. The value has to be in the range [0,63].
+  int AllocationPriority = 0;
+
+  // The diagnostic type to present when referencing this operand in a match
+  // failure error message. If this is empty, the default Match_InvalidOperand
+  // diagnostic type will be used. If this is "<name>", a Match_<name> enum
+  // value will be generated and used for this operand type. The target
+  // assembly parser is responsible for converting this into a user-facing
+  // diagnostic message.
+  string DiagnosticType = "";
+
+  // A diagnostic message to emit when an invalid value is provided for this
+  // register class when it is being used an an assembly operand. If this is
+  // non-empty, an anonymous diagnostic type enum value will be generated, and
+  // the assembly matcher will provide a function to map from diagnostic types
+  // to message strings.
+  string DiagnosticString = "";
+}
+
+// The memberList in a RegisterClass is a dag of set operations. TableGen
+// evaluates these set operations and expand them into register lists. These
+// are the most common operation, see test/TableGen/SetTheory.td for more
+// examples of what is possible:
+//
+// (add R0, R1, R2) - Set Union. Each argument can be an individual register, a
+// register class, or a sub-expression. This is also the way to simply list
+// registers.
+//
+// (sub GPR, SP) - Set difference. Subtract the last arguments from the first.
+//
+// (and GPR, CSR) - Set intersection. All registers from the first set that are
+// also in the second set.
+//
+// (sequence "R%u", 0, 15) -> [R0, R1, ..., R15]. Generate a sequence of
+// numbered registers.  Takes an optional 4th operand which is a stride to use
+// when generating the sequence.
+//
+// (shl GPR, 4) - Remove the first N elements.
+//
+// (trunc GPR, 4) - Truncate after the first N elements.
+//
+// (rotl GPR, 1) - Rotate N places to the left.
+//
+// (rotr GPR, 1) - Rotate N places to the right.
+//
+// (decimate GPR, 2) - Pick every N'th element, starting with the first.
+//
+// (interleave A, B, ...) - Interleave the elements from each argument list.
+//
+// All of these operators work on ordered sets, not lists. That means
+// duplicates are removed from sub-expressions.
+
+// Set operators. The rest is defined in TargetSelectionDAG.td.
+def sequence;
+def decimate;
+def interleave;
+
+// RegisterTuples - Automatically generate super-registers by forming tuples of
+// sub-registers. This is useful for modeling register sequence constraints
+// with pseudo-registers that are larger than the architectural registers.
+//
+// The sub-register lists are zipped together:
+//
+//   def EvenOdd : RegisterTuples<[sube, subo], [(add R0, R2), (add R1, R3)]>;
+//
+// Generates the same registers as:
+//
+//   let SubRegIndices = [sube, subo] in {
+//     def R0_R1 : RegisterWithSubRegs<"", [R0, R1]>;
+//     def R2_R3 : RegisterWithSubRegs<"", [R2, R3]>;
+//   }
+//
+// The generated pseudo-registers inherit super-classes and fields from their
+// first sub-register. Most fields from the Register class are inferred, and
+// the AsmName and Dwarf numbers are cleared.
+//
+// RegisterTuples instances can be used in other set operations to form
+// register classes and so on. This is the only way of using the generated
+// registers.
+class RegisterTuples<list<SubRegIndex> Indices, list<dag> Regs> {
+  // SubRegs - N lists of registers to be zipped up. Super-registers are
+  // synthesized from the first element of each SubRegs list, the second
+  // element and so on.
+  list<dag> SubRegs = Regs;
+
+  // SubRegIndices - N SubRegIndex instances. This provides the names of the
+  // sub-registers in the synthesized super-registers.
+  list<SubRegIndex> SubRegIndices = Indices;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DwarfRegNum - This class provides a mapping of the llvm register enumeration
+// to the register numbering used by gcc and gdb.  These values are used by a
+// debug information writer to describe where values may be located during
+// execution.
+class DwarfRegNum<list<int> Numbers> {
+  // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
+  // These values can be determined by locating the <target>.h file in the
+  // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES.  The
+  // order of these names correspond to the enumeration used by gcc.  A value of
+  // -1 indicates that the gcc number is undefined and -2 that register number
+  // is invalid for this mode/flavour.
+  list<int> DwarfNumbers = Numbers;
+}
+
+// DwarfRegAlias - This class declares that a given register uses the same dwarf
+// numbers as another one. This is useful for making it clear that the two
+// registers do have the same number. It also lets us build a mapping
+// from dwarf register number to llvm register.
+class DwarfRegAlias<Register reg> {
+      Register DwarfAlias = reg;
+}
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for scheduling
+//
+include "llvm/Target/TargetSchedule.td"
+
+class Predicate; // Forward def
+
+//===----------------------------------------------------------------------===//
+// Instruction set description - These classes correspond to the C++ classes in
+// the Target/TargetInstrInfo.h file.
+//
+class Instruction {
+  string Namespace = "";
+
+  dag OutOperandList;       // An dag containing the MI def operand list.
+  dag InOperandList;        // An dag containing the MI use operand list.
+  string AsmString = "";    // The .s format to print the instruction with.
+
+  // Pattern - Set to the DAG pattern for this instruction, if we know of one,
+  // otherwise, uninitialized.
+  list<dag> Pattern;
+
+  // The follow state will eventually be inferred automatically from the
+  // instruction pattern.
+
+  list<Register> Uses = []; // Default to using no non-operand registers
+  list<Register> Defs = []; // Default to modifying no non-operand registers
+
+  // Predicates - List of predicates which will be turned into isel matching
+  // code.
+  list<Predicate> Predicates = [];
+
+  // Size - Size of encoded instruction, or zero if the size cannot be determined
+  // from the opcode.
+  int Size = 0;
+
+  // DecoderNamespace - The "namespace" in which this instruction exists, on
+  // targets like ARM which multiple ISA namespaces exist.
+  string DecoderNamespace = "";
+
+  // Code size, for instruction selection.
+  // FIXME: What does this actually mean?
+  int CodeSize = 0;
+
+  // Added complexity passed onto matching pattern.
+  int AddedComplexity  = 0;
+
+  // These bits capture information about the high-level semantics of the
+  // instruction.
+  bit isReturn     = 0;     // Is this instruction a return instruction?
+  bit isBranch     = 0;     // Is this instruction a branch instruction?
+  bit isIndirectBranch = 0; // Is this instruction an indirect branch?
+  bit isCompare    = 0;     // Is this instruction a comparison instruction?
+  bit isMoveImm    = 0;     // Is this instruction a move immediate instruction?
+  bit isBitcast    = 0;     // Is this instruction a bitcast instruction?
+  bit isSelect     = 0;     // Is this instruction a select instruction?
+  bit isBarrier    = 0;     // Can control flow fall through this instruction?
+  bit isCall       = 0;     // Is this instruction a call instruction?
+  bit isAdd        = 0;     // Is this instruction an add instruction?
+  bit canFoldAsLoad = 0;    // Can this be folded as a simple memory operand?
+  bit mayLoad      = ?;     // Is it possible for this inst to read memory?
+  bit mayStore     = ?;     // Is it possible for this inst to write memory?
+  bit isConvertibleToThreeAddress = 0;  // Can this 2-addr instruction promote?
+  bit isCommutable = 0;     // Is this 3 operand instruction commutable?
+  bit isTerminator = 0;     // Is this part of the terminator for a basic block?
+  bit isReMaterializable = 0; // Is this instruction re-materializable?
+  bit isPredicable = 0;     // Is this instruction predicable?
+  bit hasDelaySlot = 0;     // Does this instruction have an delay slot?
+  bit usesCustomInserter = 0; // Pseudo instr needing special help.
+  bit hasPostISelHook = 0;  // To be *adjusted* after isel by target hook.
+  bit hasCtrlDep   = 0;     // Does this instruction r/w ctrl-flow chains?
+  bit isNotDuplicable = 0;  // Is it unsafe to duplicate this instruction?
+  bit isConvergent = 0;     // Is this instruction convergent?
+  bit isAsCheapAsAMove = 0; // As cheap (or cheaper) than a move instruction.
+  bit hasExtraSrcRegAllocReq = 0; // Sources have special regalloc requirement?
+  bit hasExtraDefRegAllocReq = 0; // Defs have special regalloc requirement?
+  bit isRegSequence = 0;    // Is this instruction a kind of reg sequence?
+                            // If so, make sure to override
+                            // TargetInstrInfo::getRegSequenceLikeInputs.
+  bit isPseudo     = 0;     // Is this instruction a pseudo-instruction?
+                            // If so, won't have encoding information for
+                            // the [MC]CodeEmitter stuff.
+  bit isExtractSubreg = 0;  // Is this instruction a kind of extract subreg?
+                             // If so, make sure to override
+                             // TargetInstrInfo::getExtractSubregLikeInputs.
+  bit isInsertSubreg = 0;   // Is this instruction a kind of insert subreg?
+                            // If so, make sure to override
+                            // TargetInstrInfo::getInsertSubregLikeInputs.
+
+  // Does the instruction have side effects that are not captured by any
+  // operands of the instruction or other flags?
+  bit hasSideEffects = ?;
+
+  // Is this instruction a "real" instruction (with a distinct machine
+  // encoding), or is it a pseudo instruction used for codegen modeling
+  // purposes.
+  // FIXME: For now this is distinct from isPseudo, above, as code-gen-only
+  // instructions can (and often do) still have encoding information
+  // associated with them. Once we've migrated all of them over to true
+  // pseudo-instructions that are lowered to real instructions prior to
+  // the printer/emitter, we can remove this attribute and just use isPseudo.
+  //
+  // The intended use is:
+  // isPseudo: Does not have encoding information and should be expanded,
+  //   at the latest, during lowering to MCInst.
+  //
+  // isCodeGenOnly: Does have encoding information and can go through to the
+  //   CodeEmitter unchanged, but duplicates a canonical instruction
+  //   definition's encoding and should be ignored when constructing the
+  //   assembler match tables.
+  bit isCodeGenOnly = 0;
+
+  // Is this instruction a pseudo instruction for use by the assembler parser.
+  bit isAsmParserOnly = 0;
+
+  // This instruction is not expected to be queried for scheduling latencies
+  // and therefore needs no scheduling information even for a complete
+  // scheduling model.
+  bit hasNoSchedulingInfo = 0;
+
+  InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling.
+
+  // Scheduling information from TargetSchedule.td.
+  list<SchedReadWrite> SchedRW;
+
+  string Constraints = "";  // OperandConstraint, e.g. $src = $dst.
+
+  /// DisableEncoding - List of operand names (e.g. "$op1,$op2") that should not
+  /// be encoded into the output machineinstr.
+  string DisableEncoding = "";
+
+  string PostEncoderMethod = "";
+  string DecoderMethod = "";
+
+  // Is the instruction decoder method able to completely determine if the
+  // given instruction is valid or not. If the TableGen definition of the
+  // instruction specifies bitpattern A??B where A and B are static bits, the
+  // hasCompleteDecoder flag says whether the decoder method fully handles the
+  // ?? space, i.e. if it is a final arbiter for the instruction validity.
+  // If not then the decoder attempts to continue decoding when the decoder
+  // method fails.
+  //
+  // This allows to handle situations where the encoding is not fully
+  // orthogonal. Example:
+  // * InstA with bitpattern 0b0000????,
+  // * InstB with bitpattern 0b000000?? but the associated decoder method
+  //   DecodeInstB() returns Fail when ?? is 0b00 or 0b11.
+  //
+  // The decoder tries to decode a bitpattern that matches both InstA and
+  // InstB bitpatterns first as InstB (because it is the most specific
+  // encoding). In the default case (hasCompleteDecoder = 1), when
+  // DecodeInstB() returns Fail the bitpattern gets rejected. By setting
+  // hasCompleteDecoder = 0 in InstB, the decoder is informed that
+  // DecodeInstB() is not able to determine if all possible values of ?? are
+  // valid or not. If DecodeInstB() returns Fail the decoder will attempt to
+  // decode the bitpattern as InstA too.
+  bit hasCompleteDecoder = 1;
+
+  /// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
+  bits<64> TSFlags = 0;
+
+  ///@name Assembler Parser Support
+  ///@{
+
+  string AsmMatchConverter = "";
+
+  /// TwoOperandAliasConstraint - Enable TableGen to auto-generate a
+  /// two-operand matcher inst-alias for a three operand instruction.
+  /// For example, the arm instruction "add r3, r3, r5" can be written
+  /// as "add r3, r5". The constraint is of the same form as a tied-operand
+  /// constraint. For example, "$Rn = $Rd".
+  string TwoOperandAliasConstraint = "";
+
+  /// Assembler variant name to use for this instruction. If specified then
+  /// instruction will be presented only in MatchTable for this variant. If
+  /// not specified then assembler variants will be determined based on
+  /// AsmString
+  string AsmVariantName = "";
+
+  ///@}
+
+  /// UseNamedOperandTable - If set, the operand indices of this instruction
+  /// can be queried via the getNamedOperandIdx() function which is generated
+  /// by TableGen.
+  bit UseNamedOperandTable = 0;
+}
+
+/// PseudoInstExpansion - Expansion information for a pseudo-instruction.
+/// Which instruction it expands to and how the operands map from the
+/// pseudo.
+class PseudoInstExpansion<dag Result> {
+  dag ResultInst = Result;     // The instruction to generate.
+  bit isPseudo = 1;
+}
+
+/// Predicates - These are extra conditionals which are turned into instruction
+/// selector matching code. Currently each predicate is just a string.
+class Predicate<string cond> {
+  string CondString = cond;
+
+  /// AssemblerMatcherPredicate - If this feature can be used by the assembler
+  /// matcher, this is true.  Targets should set this by inheriting their
+  /// feature from the AssemblerPredicate class in addition to Predicate.
+  bit AssemblerMatcherPredicate = 0;
+
+  /// AssemblerCondString - Name of the subtarget feature being tested used
+  /// as alternative condition string used for assembler matcher.
+  /// e.g. "ModeThumb" is translated to "(Bits & ModeThumb) != 0".
+  ///      "!ModeThumb" is translated to "(Bits & ModeThumb) == 0".
+  /// It can also list multiple features separated by ",".
+  /// e.g. "ModeThumb,FeatureThumb2" is translated to
+  ///      "(Bits & ModeThumb) != 0 && (Bits & FeatureThumb2) != 0".
+  string AssemblerCondString = "";
+
+  /// PredicateName - User-level name to use for the predicate. Mainly for use
+  /// in diagnostics such as missing feature errors in the asm matcher.
+  string PredicateName = "";
+
+  /// Setting this to '1' indicates that the predicate must be recomputed on
+  /// every function change. Most predicates can leave this at '0'.
+  ///
+  /// Ignored by SelectionDAG, it always recomputes the predicate on every use.
+  bit RecomputePerFunction = 0;
+}
+
+/// NoHonorSignDependentRounding - This predicate is true if support for
+/// sign-dependent-rounding is not enabled.
+def NoHonorSignDependentRounding
+ : Predicate<"!TM.Options.HonorSignDependentRoundingFPMath()">;
+
+class Requires<list<Predicate> preds> {
+  list<Predicate> Predicates = preds;
+}
+
+/// ops definition - This is just a simple marker used to identify the operand
+/// list for an instruction. outs and ins are identical both syntactically and
+/// semantically; they are used to define def operands and use operands to
+/// improve readibility. This should be used like this:
+///     (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar.
+def ops;
+def outs;
+def ins;
+
+/// variable_ops definition - Mark this instruction as taking a variable number
+/// of operands.
+def variable_ops;
+
+
+/// PointerLikeRegClass - Values that are designed to have pointer width are
+/// derived from this.  TableGen treats the register class as having a symbolic
+/// type that it doesn't know, and resolves the actual regclass to use by using
+/// the TargetRegisterInfo::getPointerRegClass() hook at codegen time.
+class PointerLikeRegClass<int Kind> {
+  int RegClassKind = Kind;
+}
+
+
+/// ptr_rc definition - Mark this operand as being a pointer value whose
+/// register class is resolved dynamically via a callback to TargetInstrInfo.
+/// FIXME: We should probably change this to a class which contain a list of
+/// flags. But currently we have but one flag.
+def ptr_rc : PointerLikeRegClass<0>;
+
+/// unknown definition - Mark this operand as being of unknown type, causing
+/// it to be resolved by inference in the context it is used.
+class unknown_class;
+def unknown : unknown_class;
+
+/// AsmOperandClass - Representation for the kinds of operands which the target
+/// specific parser can create and the assembly matcher may need to distinguish.
+///
+/// Operand classes are used to define the order in which instructions are
+/// matched, to ensure that the instruction which gets matched for any
+/// particular list of operands is deterministic.
+///
+/// The target specific parser must be able to classify a parsed operand into a
+/// unique class which does not partially overlap with any other classes. It can
+/// match a subset of some other class, in which case the super class field
+/// should be defined.
+class AsmOperandClass {
+  /// The name to use for this class, which should be usable as an enum value.
+  string Name = ?;
+
+  /// The super classes of this operand.
+  list<AsmOperandClass> SuperClasses = [];
+
+  /// The name of the method on the target specific operand to call to test
+  /// whether the operand is an instance of this class. If not set, this will
+  /// default to "isFoo", where Foo is the AsmOperandClass name. The method
+  /// signature should be:
+  ///   bool isFoo() const;
+  string PredicateMethod = ?;
+
+  /// The name of the method on the target specific operand to call to add the
+  /// target specific operand to an MCInst. If not set, this will default to
+  /// "addFooOperands", where Foo is the AsmOperandClass name. The method
+  /// signature should be:
+  ///   void addFooOperands(MCInst &Inst, unsigned N) const;
+  string RenderMethod = ?;
+
+  /// The name of the method on the target specific operand to call to custom
+  /// handle the operand parsing. This is useful when the operands do not relate
+  /// to immediates or registers and are very instruction specific (as flags to
+  /// set in a processor register, coprocessor number, ...).
+  string ParserMethod = ?;
+
+  // The diagnostic type to present when referencing this operand in a
+  // match failure error message. By default, use a generic "invalid operand"
+  // diagnostic. The target AsmParser maps these codes to text.
+  string DiagnosticType = "";
+
+  /// A diagnostic message to emit when an invalid value is provided for this
+  /// operand.
+  string DiagnosticString = "";
+
+  /// Set to 1 if this operand is optional and not always required. Typically,
+  /// the AsmParser will emit an error when it finishes parsing an
+  /// instruction if it hasn't matched all the operands yet.  However, this
+  /// error will be suppressed if all of the remaining unmatched operands are
+  /// marked as IsOptional.
+  ///
+  /// Optional arguments must be at the end of the operand list.
+  bit IsOptional = 0;
+
+  /// The name of the method on the target specific asm parser that returns the
+  /// default operand for this optional operand. This method is only used if
+  /// IsOptional == 1. If not set, this will default to "defaultFooOperands",
+  /// where Foo is the AsmOperandClass name. The method signature should be:
+  ///   std::unique_ptr<MCParsedAsmOperand> defaultFooOperands() const;
+  string DefaultMethod = ?;
+}
+
+def ImmAsmOperand : AsmOperandClass {
+  let Name = "Imm";
+}
+
+/// Operand Types - These provide the built-in operand types that may be used
+/// by a target.  Targets can optionally provide their own operand types as
+/// needed, though this should not be needed for RISC targets.
+class Operand<ValueType ty> : DAGOperand {
+  ValueType Type = ty;
+  string PrintMethod = "printOperand";
+  string EncoderMethod = "";
+  bit hasCompleteDecoder = 1;
+  string OperandType = "OPERAND_UNKNOWN";
+  dag MIOperandInfo = (ops);
+
+  // MCOperandPredicate - Optionally, a code fragment operating on
+  // const MCOperand &MCOp, and returning a bool, to indicate if
+  // the value of MCOp is valid for the specific subclass of Operand
+  code MCOperandPredicate;
+
+  // ParserMatchClass - The "match class" that operands of this type fit
+  // in. Match classes are used to define the order in which instructions are
+  // match, to ensure that which instructions gets matched is deterministic.
+  //
+  // The target specific parser must be able to classify an parsed operand into
+  // a unique class, which does not partially overlap with any other classes. It
+  // can match a subset of some other class, in which case the AsmOperandClass
+  // should declare the other operand as one of its super classes.
+  AsmOperandClass ParserMatchClass = ImmAsmOperand;
+}
+
+class RegisterOperand<RegisterClass regclass, string pm = "printOperand">
+  : DAGOperand {
+  // RegClass - The register class of the operand.
+  RegisterClass RegClass = regclass;
+  // PrintMethod - The target method to call to print register operands of
+  // this type. The method normally will just use an alt-name index to look
+  // up the name to print. Default to the generic printOperand().
+  string PrintMethod = pm;
+
+  // EncoderMethod - The target method name to call to encode this register
+  // operand.
+  string EncoderMethod = "";
+
+  // ParserMatchClass - The "match class" that operands of this type fit
+  // in. Match classes are used to define the order in which instructions are
+  // match, to ensure that which instructions gets matched is deterministic.
+  //
+  // The target specific parser must be able to classify an parsed operand into
+  // a unique class, which does not partially overlap with any other classes. It
+  // can match a subset of some other class, in which case the AsmOperandClass
+  // should declare the other operand as one of its super classes.
+  AsmOperandClass ParserMatchClass;
+
+  string OperandType = "OPERAND_REGISTER";
+
+  // When referenced in the result of a CodeGen pattern, GlobalISel will
+  // normally copy the matched operand to the result. When this is set, it will
+  // emit a special copy that will replace zero-immediates with the specified
+  // zero-register.
+  Register GIZeroRegister = ?;
+}
+
+let OperandType = "OPERAND_IMMEDIATE" in {
+def i1imm  : Operand<i1>;
+def i8imm  : Operand<i8>;
+def i16imm : Operand<i16>;
+def i32imm : Operand<i32>;
+def i64imm : Operand<i64>;
+
+def f32imm : Operand<f32>;
+def f64imm : Operand<f64>;
+}
+
+// Register operands for generic instructions don't have an MVT, but do have
+// constraints linking the operands (e.g. all operands of a G_ADD must
+// have the same LLT).
+class TypedOperand<string Ty> : Operand<untyped> {
+  let OperandType = Ty;
+  bit IsPointer = 0;
+}
+
+def type0 : TypedOperand<"OPERAND_GENERIC_0">;
+def type1 : TypedOperand<"OPERAND_GENERIC_1">;
+def type2 : TypedOperand<"OPERAND_GENERIC_2">;
+def type3 : TypedOperand<"OPERAND_GENERIC_3">;
+def type4 : TypedOperand<"OPERAND_GENERIC_4">;
+def type5 : TypedOperand<"OPERAND_GENERIC_5">;
+
+let IsPointer = 1 in {
+  def ptype0 : TypedOperand<"OPERAND_GENERIC_0">;
+  def ptype1 : TypedOperand<"OPERAND_GENERIC_1">;
+  def ptype2 : TypedOperand<"OPERAND_GENERIC_2">;
+  def ptype3 : TypedOperand<"OPERAND_GENERIC_3">;
+  def ptype4 : TypedOperand<"OPERAND_GENERIC_4">;
+  def ptype5 : TypedOperand<"OPERAND_GENERIC_5">;
+}
+
+/// zero_reg definition - Special node to stand for the zero register.
+///
+def zero_reg;
+
+/// All operands which the MC layer classifies as predicates should inherit from
+/// this class in some manner. This is already handled for the most commonly
+/// used PredicateOperand, but may be useful in other circumstances.
+class PredicateOp;
+
+/// OperandWithDefaultOps - This Operand class can be used as the parent class
+/// for an Operand that needs to be initialized with a default value if
+/// no value is supplied in a pattern.  This class can be used to simplify the
+/// pattern definitions for instructions that have target specific flags
+/// encoded as immediate operands.
+class OperandWithDefaultOps<ValueType ty, dag defaultops>
+  : Operand<ty> {
+  dag DefaultOps = defaultops;
+}
+
+/// PredicateOperand - This can be used to define a predicate operand for an
+/// instruction.  OpTypes specifies the MIOperandInfo for the operand, and
+/// AlwaysVal specifies the value of this predicate when set to "always
+/// execute".
+class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal>
+  : OperandWithDefaultOps<ty, AlwaysVal>, PredicateOp {
+  let MIOperandInfo = OpTypes;
+}
+
+/// OptionalDefOperand - This is used to define a optional definition operand
+/// for an instruction. DefaultOps is the register the operand represents if
+/// none is supplied, e.g. zero_reg.
+class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
+  : OperandWithDefaultOps<ty, defaultops> {
+  let MIOperandInfo = OpTypes;
+}
+
+
+// InstrInfo - This class should only be instantiated once to provide parameters
+// which are global to the target machine.
+//
+class InstrInfo {
+  // Target can specify its instructions in either big or little-endian formats.
+  // For instance, while both Sparc and PowerPC are big-endian platforms, the
+  // Sparc manual specifies its instructions in the format [31..0] (big), while
+  // PowerPC specifies them using the format [0..31] (little).
+  bit isLittleEndianEncoding = 0;
+
+  // The instruction properties mayLoad, mayStore, and hasSideEffects are unset
+  // by default, and TableGen will infer their value from the instruction
+  // pattern when possible.
+  //
+  // Normally, TableGen will issue an error it it can't infer the value of a
+  // property that hasn't been set explicitly. When guessInstructionProperties
+  // is set, it will guess a safe value instead.
+  //
+  // This option is a temporary migration help. It will go away.
+  bit guessInstructionProperties = 1;
+
+  // TableGen's instruction encoder generator has support for matching operands
+  // to bit-field variables both by name and by position. While matching by
+  // name is preferred, this is currently not possible for complex operands,
+  // and some targets still reply on the positional encoding rules. When
+  // generating a decoder for such targets, the positional encoding rules must
+  // be used by the decoder generator as well.
+  //
+  // This option is temporary; it will go away once the TableGen decoder
+  // generator has better support for complex operands and targets have
+  // migrated away from using positionally encoded operands.
+  bit decodePositionallyEncodedOperands = 0;
+
+  // When set, this indicates that there will be no overlap between those
+  // operands that are matched by ordering (positional operands) and those
+  // matched by name.
+  //
+  // This option is temporary; it will go away once the TableGen decoder
+  // generator has better support for complex operands and targets have
+  // migrated away from using positionally encoded operands.
+  bit noNamedPositionallyEncodedOperands = 0;
+}
+
+// Standard Pseudo Instructions.
+// This list must match TargetOpcodes.h and CodeGenTarget.cpp.
+// Only these instructions are allowed in the TargetOpcode namespace.
+// Ensure mayLoad and mayStore have a default value, so as not to break
+// targets that set guessInstructionProperties=0. Any local definition of
+// mayLoad/mayStore takes precedence over these default values.
+class StandardPseudoInstruction : Instruction {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let isCodeGenOnly = 1;
+  let isPseudo = 1;
+  let hasNoSchedulingInfo = 1;
+  let Namespace = "TargetOpcode";
+}
+def PHI : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "PHINODE";
+  let hasSideEffects = 0;
+}
+def INLINEASM : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "";
+  let hasSideEffects = 0;  // Note side effect is encoded in an operand.
+}
+def CFI_INSTRUCTION : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i32imm:$id);
+  let AsmString = "";
+  let hasCtrlDep = 1;
+  let hasSideEffects = 0;
+  let isNotDuplicable = 1;
+}
+def EH_LABEL : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i32imm:$id);
+  let AsmString = "";
+  let hasCtrlDep = 1;
+  let hasSideEffects = 0;
+  let isNotDuplicable = 1;
+}
+def GC_LABEL : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i32imm:$id);
+  let AsmString = "";
+  let hasCtrlDep = 1;
+  let hasSideEffects = 0;
+  let isNotDuplicable = 1;
+}
+def ANNOTATION_LABEL : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i32imm:$id);
+  let AsmString = "";
+  let hasCtrlDep = 1;
+  let hasSideEffects = 0;
+  let isNotDuplicable = 1;
+}
+def KILL : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "";
+  let hasSideEffects = 0;
+}
+def EXTRACT_SUBREG : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$supersrc, i32imm:$subidx);
+  let AsmString = "";
+  let hasSideEffects = 0;
+}
+def INSERT_SUBREG : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
+  let AsmString = "";
+  let hasSideEffects = 0;
+  let Constraints = "$supersrc = $dst";
+}
+def IMPLICIT_DEF : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins);
+  let AsmString = "";
+  let hasSideEffects = 0;
+  let isReMaterializable = 1;
+  let isAsCheapAsAMove = 1;
+}
+def SUBREG_TO_REG : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
+  let AsmString = "";
+  let hasSideEffects = 0;
+}
+def COPY_TO_REGCLASS : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src, i32imm:$regclass);
+  let AsmString = "";
+  let hasSideEffects = 0;
+  let isAsCheapAsAMove = 1;
+}
+def DBG_VALUE : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "DBG_VALUE";
+  let hasSideEffects = 0;
+}
+def REG_SEQUENCE : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$supersrc, variable_ops);
+  let AsmString = "";
+  let hasSideEffects = 0;
+  let isAsCheapAsAMove = 1;
+}
+def COPY : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src);
+  let AsmString = "";
+  let hasSideEffects = 0;
+  let isAsCheapAsAMove = 1;
+  let hasNoSchedulingInfo = 0;
+}
+def BUNDLE : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "BUNDLE";
+  let hasSideEffects = 1;
+}
+def LIFETIME_START : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i32imm:$id);
+  let AsmString = "LIFETIME_START";
+  let hasSideEffects = 0;
+}
+def LIFETIME_END : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i32imm:$id);
+  let AsmString = "LIFETIME_END";
+  let hasSideEffects = 0;
+}
+def STACKMAP : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins i64imm:$id, i32imm:$nbytes, variable_ops);
+  let hasSideEffects = 1;
+  let isCall = 1;
+  let mayLoad = 1;
+  let usesCustomInserter = 1;
+}
+def PATCHPOINT : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins i64imm:$id, i32imm:$nbytes, unknown:$callee,
+                       i32imm:$nargs, i32imm:$cc, variable_ops);
+  let hasSideEffects = 1;
+  let isCall = 1;
+  let mayLoad = 1;
+  let usesCustomInserter = 1;
+}
+def STATEPOINT : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins variable_ops);
+  let usesCustomInserter = 1;
+  let mayLoad = 1;
+  let mayStore = 1;
+  let hasSideEffects = 1;
+  let isCall = 1;
+}
+def LOAD_STACK_GUARD : StandardPseudoInstruction {
+  let OutOperandList = (outs ptr_rc:$dst);
+  let InOperandList = (ins);
+  let mayLoad = 1;
+  bit isReMaterializable = 1;
+  let hasSideEffects = 0;
+  bit isPseudo = 1;
+}
+def LOCAL_ESCAPE : StandardPseudoInstruction {
+  // This instruction is really just a label. It has to be part of the chain so
+  // that it doesn't get dropped from the DAG, but it produces nothing and has
+  // no side effects.
+  let OutOperandList = (outs);
+  let InOperandList = (ins ptr_rc:$symbol, i32imm:$id);
+  let hasSideEffects = 0;
+  let hasCtrlDep = 1;
+}
+def FAULTING_OP : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let usesCustomInserter = 1;
+  let hasSideEffects = 1;
+  let mayLoad = 1;
+  let mayStore = 1;
+  let isTerminator = 1;
+  let isBranch = 1;
+}
+def PATCHABLE_OP : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let usesCustomInserter = 1;
+  let mayLoad = 1;
+  let mayStore = 1;
+  let hasSideEffects = 1;
+}
+def PATCHABLE_FUNCTION_ENTER : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins);
+  let AsmString = "# XRay Function Enter.";
+  let usesCustomInserter = 1;
+  let hasSideEffects = 0;
+}
+def PATCHABLE_RET : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "# XRay Function Patchable RET.";
+  let usesCustomInserter = 1;
+  let hasSideEffects = 1;
+  let isTerminator = 1;
+  let isReturn = 1;
+}
+def PATCHABLE_FUNCTION_EXIT : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins);
+  let AsmString = "# XRay Function Exit.";
+  let usesCustomInserter = 1;
+  let hasSideEffects = 0; // FIXME: is this correct?
+  let isReturn = 0; // Original return instruction will follow
+}
+def PATCHABLE_TAIL_CALL : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "# XRay Tail Call Exit.";
+  let usesCustomInserter = 1;
+  let hasSideEffects = 1;
+  let isReturn = 1;
+}
+def PATCHABLE_EVENT_CALL : StandardPseudoInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins ptr_rc:$event, i8imm:$size);
+  let AsmString = "# XRay Custom Event Log.";
+  let usesCustomInserter = 1;
+  let isCall = 1;
+  let mayLoad = 1;
+  let mayStore = 1;
+  let hasSideEffects = 1;
+}
+def FENTRY_CALL : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "# FEntry call";
+  let usesCustomInserter = 1;
+  let mayLoad = 1;
+  let mayStore = 1;
+  let hasSideEffects = 1;
+}
+def ICALL_BRANCH_FUNNEL : StandardPseudoInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins variable_ops);
+  let AsmString = "";
+  let hasSideEffects = 1;
+}
+
+// Generic opcodes used in GlobalISel.
+include "llvm/Target/GenericOpcodes.td"
+
+//===----------------------------------------------------------------------===//
+// AsmParser - This class can be implemented by targets that wish to implement
+// .s file parsing.
+//
+// Subtargets can have multiple different assembly parsers (e.g. AT&T vs Intel
+// syntax on X86 for example).
+//
+class AsmParser {
+  // AsmParserClassName - This specifies the suffix to use for the asmparser
+  // class.  Generated AsmParser classes are always prefixed with the target
+  // name.
+  string AsmParserClassName  = "AsmParser";
+
+  // AsmParserInstCleanup - If non-empty, this is the name of a custom member
+  // function of the AsmParser class to call on every matched instruction.
+  // This can be used to perform target specific instruction post-processing.
+  string AsmParserInstCleanup  = "";
+
+  // ShouldEmitMatchRegisterName - Set to false if the target needs a hand
+  // written register name matcher
+  bit ShouldEmitMatchRegisterName = 1;
+
+  // Set to true if the target needs a generated 'alternative register name'
+  // matcher.
+  //
+  // This generates a function which can be used to lookup registers from
+  // their aliases. This function will fail when called on targets where
+  // several registers share the same alias (i.e. not a 1:1 mapping).
+  bit ShouldEmitMatchRegisterAltName = 0;
+
+  // Set to true if MatchRegisterName and MatchRegisterAltName functions
+  // should be generated even if there are duplicate register names. The
+  // target is responsible for coercing aliased registers as necessary
+  // (e.g. in validateTargetOperandClass), and there are no guarantees about
+  // which numeric register identifier will be returned in the case of
+  // multiple matches.
+  bit AllowDuplicateRegisterNames = 0;
+
+  // HasMnemonicFirst - Set to false if target instructions don't always
+  // start with a mnemonic as the first token.
+  bit HasMnemonicFirst = 1;
+
+  // ReportMultipleNearMisses -
+  // When 0, the assembly matcher reports an error for one encoding or operand
+  // that did not match the parsed instruction.
+  // When 1, the assmebly matcher returns a list of encodings that were close
+  // to matching the parsed instruction, so to allow more detailed error
+  // messages.
+  bit ReportMultipleNearMisses = 0;
+}
+def DefaultAsmParser : AsmParser;
+
+//===----------------------------------------------------------------------===//
+// AsmParserVariant - Subtargets can have multiple different assembly parsers
+// (e.g. AT&T vs Intel syntax on X86 for example). This class can be
+// implemented by targets to describe such variants.
+//
+class AsmParserVariant {
+  // Variant - AsmParsers can be of multiple different variants.  Variants are
+  // used to support targets that need to parser multiple formats for the
+  // assembly language.
+  int Variant = 0;
+
+  // Name - The AsmParser variant name (e.g., AT&T vs Intel).
+  string Name = "";
+
+  // CommentDelimiter - If given, the delimiter string used to recognize
+  // comments which are hard coded in the .td assembler strings for individual
+  // instructions.
+  string CommentDelimiter = "";
+
+  // RegisterPrefix - If given, the token prefix which indicates a register
+  // token. This is used by the matcher to automatically recognize hard coded
+  // register tokens as constrained registers, instead of tokens, for the
+  // purposes of matching.
+  string RegisterPrefix = "";
+
+  // TokenizingCharacters - Characters that are standalone tokens
+  string TokenizingCharacters = "[]*!";
+
+  // SeparatorCharacters - Characters that are not tokens
+  string SeparatorCharacters = " \t,";
+
+  // BreakCharacters - Characters that start new identifiers
+  string BreakCharacters = "";
+}
+def DefaultAsmParserVariant : AsmParserVariant;
+
+/// AssemblerPredicate - This is a Predicate that can be used when the assembler
+/// matches instructions and aliases.
+class AssemblerPredicate<string cond, string name = ""> {
+  bit AssemblerMatcherPredicate = 1;
+  string AssemblerCondString = cond;
+  string PredicateName = name;
+}
+
+/// TokenAlias - This class allows targets to define assembler token
+/// operand aliases. That is, a token literal operand which is equivalent
+/// to another, canonical, token literal. For example, ARM allows:
+///   vmov.u32 s4, #0  -> vmov.i32, #0
+/// 'u32' is a more specific designator for the 32-bit integer type specifier
+/// and is legal for any instruction which accepts 'i32' as a datatype suffix.
+///   def : TokenAlias<".u32", ".i32">;
+///
+/// This works by marking the match class of 'From' as a subclass of the
+/// match class of 'To'.
+class TokenAlias<string From, string To> {
+  string FromToken = From;
+  string ToToken = To;
+}
+
+/// MnemonicAlias - This class allows targets to define assembler mnemonic
+/// aliases.  This should be used when all forms of one mnemonic are accepted
+/// with a different mnemonic.  For example, X86 allows:
+///   sal %al, 1    -> shl %al, 1
+///   sal %ax, %cl  -> shl %ax, %cl
+///   sal %eax, %cl -> shl %eax, %cl
+/// etc.  Though "sal" is accepted with many forms, all of them are directly
+/// translated to a shl, so it can be handled with (in the case of X86, it
+/// actually has one for each suffix as well):
+///   def : MnemonicAlias<"sal", "shl">;
+///
+/// Mnemonic aliases are mapped before any other translation in the match phase,
+/// and do allow Requires predicates, e.g.:
+///
+///  def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
+///  def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
+///
+/// Mnemonic aliases can also be constrained to specific variants, e.g.:
+///
+///  def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
+///
+/// If no variant (e.g., "att" or "intel") is specified then the alias is
+/// applied unconditionally.
+class MnemonicAlias<string From, string To, string VariantName = ""> {
+  string FromMnemonic = From;
+  string ToMnemonic = To;
+  string AsmVariantName = VariantName;
+
+  // Predicates - Predicates that must be true for this remapping to happen.
+  list<Predicate> Predicates = [];
+}
+
+/// InstAlias - This defines an alternate assembly syntax that is allowed to
+/// match an instruction that has a different (more canonical) assembly
+/// representation.
+class InstAlias<string Asm, dag Result, int Emit = 1> {
+  string AsmString = Asm;      // The .s format to match the instruction with.
+  dag ResultInst = Result;     // The MCInst to generate.
+
+  // This determines which order the InstPrinter detects aliases for
+  // printing. A larger value makes the alias more likely to be
+  // emitted. The Instruction's own definition is notionally 0.5, so 0
+  // disables printing and 1 enables it if there are no conflicting aliases.
+  int EmitPriority = Emit;
+
+  // Predicates - Predicates that must be true for this to match.
+  list<Predicate> Predicates = [];
+
+  // If the instruction specified in Result has defined an AsmMatchConverter
+  // then setting this to 1 will cause the alias to use the AsmMatchConverter
+  // function when converting the OperandVector into an MCInst instead of the
+  // function that is generated by the dag Result.
+  // Setting this to 0 will cause the alias to ignore the Result instruction's
+  // defined AsmMatchConverter and instead use the function generated by the
+  // dag Result.
+  bit UseInstAsmMatchConverter = 1;
+
+  // Assembler variant name to use for this alias. If not specified then
+  // assembler variants will be determined based on AsmString
+  string AsmVariantName = "";
+}
+
+//===----------------------------------------------------------------------===//
+// AsmWriter - This class can be implemented by targets that need to customize
+// the format of the .s file writer.
+//
+// Subtargets can have multiple different asmwriters (e.g. AT&T vs Intel syntax
+// on X86 for example).
+//
+class AsmWriter {
+  // AsmWriterClassName - This specifies the suffix to use for the asmwriter
+  // class.  Generated AsmWriter classes are always prefixed with the target
+  // name.
+  string AsmWriterClassName  = "InstPrinter";
+
+  // PassSubtarget - Determines whether MCSubtargetInfo should be passed to
+  // the various print methods.
+  // FIXME: Remove after all ports are updated.
+  int PassSubtarget = 0;
+
+  // Variant - AsmWriters can be of multiple different variants.  Variants are
+  // used to support targets that need to emit assembly code in ways that are
+  // mostly the same for different targets, but have minor differences in
+  // syntax.  If the asmstring contains {|} characters in them, this integer
+  // will specify which alternative to use.  For example "{x|y|z}" with Variant
+  // == 1, will expand to "y".
+  int Variant = 0;
+}
+def DefaultAsmWriter : AsmWriter;
+
+
+//===----------------------------------------------------------------------===//
+// Target - This class contains the "global" target information
+//
+class Target {
+  // InstructionSet - Instruction set description for this target.
+  InstrInfo InstructionSet;
+
+  // AssemblyParsers - The AsmParser instances available for this target.
+  list<AsmParser> AssemblyParsers = [DefaultAsmParser];
+
+  /// AssemblyParserVariants - The AsmParserVariant instances available for
+  /// this target.
+  list<AsmParserVariant> AssemblyParserVariants = [DefaultAsmParserVariant];
+
+  // AssemblyWriters - The AsmWriter instances available for this target.
+  list<AsmWriter> AssemblyWriters = [DefaultAsmWriter];
+
+  // AllowRegisterRenaming - Controls whether this target allows
+  // post-register-allocation renaming of registers.  This is done by
+  // setting hasExtraDefRegAllocReq and hasExtraSrcRegAllocReq to 1
+  // for all opcodes if this flag is set to 0.
+  int AllowRegisterRenaming = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// SubtargetFeature - A characteristic of the chip set.
+//
+class SubtargetFeature<string n, string a,  string v, string d,
+                       list<SubtargetFeature> i = []> {
+  // Name - Feature name.  Used by command line (-mattr=) to determine the
+  // appropriate target chip.
+  //
+  string Name = n;
+
+  // Attribute - Attribute to be set by feature.
+  //
+  string Attribute = a;
+
+  // Value - Value the attribute to be set to by feature.
+  //
+  string Value = v;
+
+  // Desc - Feature description.  Used by command line (-mattr=) to display help
+  // information.
+  //
+  string Desc = d;
+
+  // Implies - Features that this feature implies are present. If one of those
+  // features isn't set, then this one shouldn't be set either.
+  //
+  list<SubtargetFeature> Implies = i;
+}
+
+/// Specifies a Subtarget feature that this instruction is deprecated on.
+class Deprecated<SubtargetFeature dep> {
+  SubtargetFeature DeprecatedFeatureMask = dep;
+}
+
+/// A custom predicate used to determine if an instruction is
+/// deprecated or not.
+class ComplexDeprecationPredicate<string dep> {
+  string ComplexDeprecationPredicate = dep;
+}
+
+//===----------------------------------------------------------------------===//
+// Processor chip sets - These values represent each of the chip sets supported
+// by the scheduler.  Each Processor definition requires corresponding
+// instruction itineraries.
+//
+class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
+  // Name - Chip set name.  Used by command line (-mcpu=) to determine the
+  // appropriate target chip.
+  //
+  string Name = n;
+
+  // SchedModel - The machine model for scheduling and instruction cost.
+  //
+  SchedMachineModel SchedModel = NoSchedModel;
+
+  // ProcItin - The scheduling information for the target processor.
+  //
+  ProcessorItineraries ProcItin = pi;
+
+  // Features - list of
+  list<SubtargetFeature> Features = f;
+}
+
+// ProcessorModel allows subtargets to specify the more general
+// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
+// gradually move to this newer form.
+//
+// Although this class always passes NoItineraries to the Processor
+// class, the SchedMachineModel may still define valid Itineraries.
+class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
+  : Processor<n, NoItineraries, f> {
+  let SchedModel = m;
+}
+
+//===----------------------------------------------------------------------===//
+// InstrMapping - This class is used to create mapping tables to relate
+// instructions with each other based on the values specified in RowFields,
+// ColFields, KeyCol and ValueCols.
+//
+class InstrMapping {
+  // FilterClass - Used to limit search space only to the instructions that
+  // define the relationship modeled by this InstrMapping record.
+  string FilterClass;
+
+  // RowFields - List of fields/attributes that should be same for all the
+  // instructions in a row of the relation table. Think of this as a set of
+  // properties shared by all the instructions related by this relationship
+  // model and is used to categorize instructions into subgroups. For instance,
+  // if we want to define a relation that maps 'Add' instruction to its
+  // predicated forms, we can define RowFields like this:
+  //
+  // let RowFields = BaseOp
+  // All add instruction predicated/non-predicated will have to set their BaseOp
+  // to the same value.
+  //
+  // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' }
+  // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' }
+  // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false'  }
+  list<string> RowFields = [];
+
+  // List of fields/attributes that are same for all the instructions
+  // in a column of the relation table.
+  // Ex: let ColFields = 'predSense' -- It means that the columns are arranged
+  // based on the 'predSense' values. All the instruction in a specific
+  // column have the same value and it is fixed for the column according
+  // to the values set in 'ValueCols'.
+  list<string> ColFields = [];
+
+  // Values for the fields/attributes listed in 'ColFields'.
+  // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction
+  // that models this relation) should be non-predicated.
+  // In the example above, 'Add' is the key instruction.
+  list<string> KeyCol = [];
+
+  // List of values for the fields/attributes listed in 'ColFields', one for
+  // each column in the relation table.
+  //
+  // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the
+  // table. First column requires all the instructions to have predSense
+  // set to 'true' and second column requires it to be 'false'.
+  list<list<string> > ValueCols = [];
+}
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for calling conventions.
+//
+include "llvm/Target/TargetCallingConv.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for DAG isel generation.
+//
+include "llvm/Target/TargetSelectionDAG.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for Global ISel register bank info generation.
+//
+include "llvm/Target/GlobalISel/RegisterBank.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for DAG isel generation.
+//
+include "llvm/Target/GlobalISel/Target.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for the Global ISel DAG-based selector generation.
+//
+include "llvm/Target/GlobalISel/SelectionDAGCompat.td"
diff --git a/linux-x64/clang/include/llvm/Target/TargetCallingConv.td b/linux-x64/clang/include/llvm/Target/TargetCallingConv.td
new file mode 100644
index 0000000..3d8639d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetCallingConv.td
@@ -0,0 +1,187 @@
+//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===//
+// 
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+// 
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces with which targets
+// describe their calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+class CCAction;
+class CallingConv;
+
+/// CCCustom - Calls a custom arg handling function.
+class CCCustom<string fn> : CCAction {
+  string FuncName = fn;
+}
+
+/// CCPredicateAction - Instances of this class check some predicate, then
+/// delegate to another action if the predicate is true.
+class CCPredicateAction<CCAction A> : CCAction {
+  CCAction SubAction = A;
+}
+
+/// CCIfType - If the current argument is one of the specified types, apply
+/// Action A.
+class CCIfType<list<ValueType> vts, CCAction A> : CCPredicateAction<A> {
+  list<ValueType> VTs = vts;
+}
+
+/// CCIf - If the predicate matches, apply A.
+class CCIf<string predicate, CCAction A> : CCPredicateAction<A> {
+  string Predicate = predicate;
+}
+
+/// CCIfByVal - If the current argument has ByVal parameter attribute, apply
+/// Action A.
+class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
+}
+
+/// CCIfSwiftSelf - If the current argument has swiftself parameter attribute,
+/// apply Action A.
+class CCIfSwiftSelf<CCAction A> : CCIf<"ArgFlags.isSwiftSelf()", A> {
+}
+
+/// CCIfSwiftError - If the current argument has swifterror parameter attribute,
+/// apply Action A.
+class CCIfSwiftError<CCAction A> : CCIf<"ArgFlags.isSwiftError()", A> {
+}
+
+/// CCIfConsecutiveRegs - If the current argument has InConsecutiveRegs
+/// parameter attribute, apply Action A.
+class CCIfConsecutiveRegs<CCAction A> : CCIf<"ArgFlags.isInConsecutiveRegs()", A> {
+}
+
+/// CCIfCC - Match if the current calling convention is 'CC'.
+class CCIfCC<string CC, CCAction A>
+  : CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}
+
+/// CCIfInReg - If this argument is marked with the 'inreg' attribute, apply
+/// the specified action.
+class CCIfInReg<CCAction A> : CCIf<"ArgFlags.isInReg()", A> {}
+
+/// CCIfNest - If this argument is marked with the 'nest' attribute, apply
+/// the specified action.
+class CCIfNest<CCAction A> : CCIf<"ArgFlags.isNest()", A> {}
+
+/// CCIfSplit - If this argument is marked with the 'split' attribute, apply
+/// the specified action.
+class CCIfSplit<CCAction A> : CCIf<"ArgFlags.isSplit()", A> {}
+
+/// CCIfSRet - If this argument is marked with the 'sret' attribute, apply
+/// the specified action.
+class CCIfSRet<CCAction A> : CCIf<"ArgFlags.isSRet()", A> {}
+
+/// CCIfVarArg - If the current function is vararg - apply the action
+class CCIfVarArg<CCAction A> : CCIf<"State.isVarArg()", A> {}
+
+/// CCIfNotVarArg - If the current function is not vararg - apply the action
+class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {}
+
+/// CCAssignToReg - This action matches if there is a register in the specified
+/// list that is still available.  If so, it assigns the value to the first
+/// available register and succeeds.
+class CCAssignToReg<list<Register> regList> : CCAction {
+  list<Register> RegList = regList;
+}
+
+/// CCAssignToRegWithShadow - Same as CCAssignToReg, but with list of registers
+/// which became shadowed, when some register is used.
+class CCAssignToRegWithShadow<list<Register> regList,
+                              list<Register> shadowList> : CCAction {
+  list<Register> RegList = regList;
+  list<Register> ShadowRegList = shadowList;
+}
+
+/// CCAssignToStack - This action always matches: it assigns the value to a
+/// stack slot of the specified size and alignment on the stack.  If size is
+/// zero then the ABI size is used; if align is zero then the ABI alignment
+/// is used - these may depend on the target or subtarget.
+class CCAssignToStack<int size, int align> : CCAction {
+  int Size = size;
+  int Align = align;
+}
+
+/// CCAssignToStackWithShadow - Same as CCAssignToStack, but with a list of
+/// registers to be shadowed. Note that, unlike CCAssignToRegWithShadow, this
+/// shadows ALL of the registers in shadowList.
+class CCAssignToStackWithShadow<int size,
+                                int align,
+                                list<Register> shadowList> : CCAction {
+  int Size = size;
+  int Align = align;
+  list<Register> ShadowRegList = shadowList;
+}
+
+/// CCPassByVal - This action always matches: it assigns the value to a stack
+/// slot to implement ByVal aggregate parameter passing. Size and alignment
+/// specify the minimum size and alignment for the stack slot.
+class CCPassByVal<int size, int align> : CCAction {
+  int Size = size;
+  int Align = align;
+}
+
+/// CCPromoteToType - If applied, this promotes the specified current value to
+/// the specified type.
+class CCPromoteToType<ValueType destTy> : CCAction {
+  ValueType DestTy = destTy;
+}
+
+/// CCPromoteToUpperBitsInType - If applied, this promotes the specified current
+/// value to the specified type and shifts the value into the upper bits.
+class CCPromoteToUpperBitsInType<ValueType destTy> : CCAction {
+  ValueType DestTy = destTy;
+}
+
+/// CCBitConvertToType - If applied, this bitconverts the specified current
+/// value to the specified type.
+class CCBitConvertToType<ValueType destTy> : CCAction {
+  ValueType DestTy = destTy;
+}
+
+/// CCPassIndirect - If applied, this stores the value to stack and passes the pointer
+/// as normal argument.
+class CCPassIndirect<ValueType destTy> : CCAction {
+  ValueType DestTy = destTy;
+}
+
+/// CCDelegateTo - This action invokes the specified sub-calling-convention.  It
+/// is successful if the specified CC matches.
+class CCDelegateTo<CallingConv cc> : CCAction {
+  CallingConv CC = cc;
+}
+
+/// CallingConv - An instance of this is used to define each calling convention
+/// that the target supports.
+class CallingConv<list<CCAction> actions> {
+  list<CCAction> Actions = actions;
+  bit Custom = 0;
+}
+
+/// CustomCallingConv - An instance of this is used to declare calling
+/// conventions that are implemented using a custom function of the same name.
+class CustomCallingConv : CallingConv<[]> {
+  let Custom = 1;
+}
+
+/// CalleeSavedRegs - A list of callee saved registers for a given calling
+/// convention.  The order of registers is used by PrologEpilogInsertion when
+/// allocation stack slots for saved registers.
+///
+/// For each CalleeSavedRegs def, TableGen will emit a FOO_SaveList array for
+/// returning from getCalleeSavedRegs(), and a FOO_RegMask bit mask suitable for
+/// returning from getCallPreservedMask().
+class CalleeSavedRegs<dag saves> {
+  dag SaveList = saves;
+
+  // Registers that are also preserved across function calls, but should not be
+  // included in the generated FOO_SaveList array. These registers will be
+  // included in the FOO_RegMask bit mask. This can be used for registers that
+  // are saved automatically, like the SPARC register windows.
+  dag OtherPreserved;
+}
diff --git a/linux-x64/clang/include/llvm/Target/TargetIntrinsicInfo.h b/linux-x64/clang/include/llvm/Target/TargetIntrinsicInfo.h
new file mode 100644
index 0000000..6a92bde
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetIntrinsicInfo.h
@@ -0,0 +1,70 @@
+//===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target intrinsic instructions to the code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
+#define LLVM_TARGET_TARGETINTRINSICINFO_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <string>
+
+namespace llvm {
+
+class Function;
+class Module;
+class Type;
+
+//---------------------------------------------------------------------------
+///
+/// TargetIntrinsicInfo - Interface to description of machine instruction set
+///
+class TargetIntrinsicInfo {
+  TargetIntrinsicInfo(const TargetIntrinsicInfo &) = delete;
+  void operator=(const TargetIntrinsicInfo &) = delete;
+public:
+  TargetIntrinsicInfo();
+  virtual ~TargetIntrinsicInfo();
+
+  /// Return the name of a target intrinsic, e.g. "llvm.bfin.ssync".
+  /// The Tys and numTys parameters are for intrinsics with overloaded types
+  /// (e.g., those using iAny or fAny). For a declaration for an overloaded
+  /// intrinsic, Tys should point to an array of numTys pointers to Type,
+  /// and must provide exactly one type for each overloaded type in the
+  /// intrinsic.
+  virtual std::string getName(unsigned IID, Type **Tys = nullptr,
+                              unsigned numTys = 0) const = 0;
+
+  /// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
+  /// names.
+  virtual unsigned lookupName(const char *Name, unsigned Len) const =0;
+
+  unsigned lookupName(StringRef Name) const {
+    return lookupName(Name.data(), Name.size());
+  }
+
+  /// Return the target intrinsic ID of a function, or 0.
+  virtual unsigned getIntrinsicID(const Function *F) const;
+
+  /// Returns true if the intrinsic can be overloaded.
+  virtual bool isOverloaded(unsigned IID) const = 0;
+
+  /// Create or insert an LLVM Function declaration for an intrinsic,
+  /// and return it. The Tys and numTys are for intrinsics with overloaded
+  /// types. See above for more information.
+  virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = nullptr,
+                                   unsigned numTys = 0) const = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Target/TargetItinerary.td b/linux-x64/clang/include/llvm/Target/TargetItinerary.td
new file mode 100644
index 0000000..182054d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetItinerary.td
@@ -0,0 +1,152 @@
+//===- TargetItinerary.td - Target Itinierary Description --*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent scheduling interfaces
+// which should be implemented by each target that uses instruction
+// itineraries for scheduling. Itineraries are details reservation
+// tables for each instruction class. They are most appropriate for
+// in-order machine with complicated scheduling or bundling constraints.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Processor functional unit - These values represent the function units
+// available across all chip sets for the target.  Eg., IntUnit, FPUnit, ...
+// These may be independent values for each chip set or may be shared across
+// all chip sets of the target.  Each functional unit is treated as a resource
+// during scheduling and has an affect instruction order based on availability
+// during a time interval.
+//
+class FuncUnit;
+
+//===----------------------------------------------------------------------===//
+// Pipeline bypass / forwarding - These values specifies the symbolic names of
+// pipeline bypasses which can be used to forward results of instructions
+// that are forwarded to uses.
+class Bypass;
+def NoBypass : Bypass;
+
+class ReservationKind<bits<1> val> {
+  int Value = val;
+}
+
+def Required : ReservationKind<0>;
+def Reserved : ReservationKind<1>;
+
+//===----------------------------------------------------------------------===//
+// Instruction stage - These values represent a non-pipelined step in
+// the execution of an instruction.  Cycles represents the number of
+// discrete time slots needed to complete the stage.  Units represent
+// the choice of functional units that can be used to complete the
+// stage.  Eg. IntUnit1, IntUnit2. TimeInc indicates how many cycles
+// should elapse from the start of this stage to the start of the next
+// stage in the itinerary.  For example:
+//
+// A stage is specified in one of two ways:
+//
+//   InstrStage<1, [FU_x, FU_y]>     - TimeInc defaults to Cycles
+//   InstrStage<1, [FU_x, FU_y], 0>  - TimeInc explicit
+//
+
+class InstrStage<int cycles, list<FuncUnit> units,
+                 int timeinc = -1,
+                 ReservationKind kind = Required> {
+  int Cycles          = cycles;       // length of stage in machine cycles
+  list<FuncUnit> Units = units;       // choice of functional units
+  int TimeInc         = timeinc;      // cycles till start of next stage
+  int Kind            = kind.Value;   // kind of FU reservation
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary - An itinerary represents a sequential series of steps
+// required to complete an instruction.  Itineraries are represented as lists of
+// instruction stages.
+//
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary classes - These values represent 'named' instruction
+// itinerary.  Using named itineraries simplifies managing groups of
+// instructions across chip sets.  An instruction uses the same itinerary class
+// across all chip sets.  Thus a new chip set can be added without modifying
+// instruction information.
+//
+class InstrItinClass;
+def NoItinerary : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary data - These values provide a runtime map of an
+// instruction itinerary class (name) to its itinerary data.
+//
+// NumMicroOps represents the number of micro-operations that each instruction
+// in the class are decoded to. If the number is zero, then it means the
+// instruction can decode into variable number of micro-ops and it must be
+// determined dynamically. This directly relates to the itineraries
+// global IssueWidth property, which constrains the number of microops
+// that can issue per cycle.
+//
+// OperandCycles are optional "cycle counts". They specify the cycle after
+// instruction issue the values which correspond to specific operand indices
+// are defined or read. Bypasses are optional "pipeline forwarding paths", if
+// a def by an instruction is available on a specific bypass and the use can
+// read from the same bypass, then the operand use latency is reduced by one.
+//
+//  InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
+//                               InstrStage<1, [A9_AGU]>],
+//                              [3, 1], [A9_LdBypass]>,
+//  InstrItinData<IIC_iMVNr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
+//                              [1, 1], [NoBypass, A9_LdBypass]>,
+//
+// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
+// (after issue) and the result of the load is available on cycle 3. The result
+// is available via forwarding path A9_LdBypass. If it's used by the first
+// source operand of instructions of IIC_iMVNr class, then the operand latency
+// is reduced by 1.
+class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
+                    list<int> operandcycles = [],
+                    list<Bypass> bypasses = [], int uops = 1> {
+  InstrItinClass TheClass = Class;
+  int NumMicroOps = uops;
+  list<InstrStage> Stages = stages;
+  list<int> OperandCycles = operandcycles;
+  list<Bypass> Bypasses = bypasses;
+}
+
+//===----------------------------------------------------------------------===//
+// Processor itineraries - These values represent the set of all itinerary
+// classes for a given chip set.
+//
+// Set property values to -1 to use the default.
+// See InstrItineraryProps for comments and defaults.
+class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
+                           list<InstrItinData> iid> {
+  list<FuncUnit> FU = fu;
+  list<Bypass> BP = bp;
+  list<InstrItinData> IID = iid;
+}
+
+// NoItineraries - A marker that can be used by processors without schedule
+// info. Subtargets using NoItineraries can bypass the scheduler's
+// expensive HazardRecognizer because no reservation table is needed.
+def NoItineraries : ProcessorItineraries<[], [], []>;
+
+//===----------------------------------------------------------------------===//
+// Combo Function Unit data - This is a map of combo function unit names to
+// the list of functional units that are included in the combination.
+//
+class ComboFuncData<FuncUnit ComboFunc, list<FuncUnit> funclist> {
+  FuncUnit TheComboFunc = ComboFunc;
+  list<FuncUnit> FuncList = funclist;
+}
+
+//===----------------------------------------------------------------------===//
+// Combo Function Units - This is a list of all combo function unit data.
+class ComboFuncUnits<list<ComboFuncData> cfd> {
+  list<ComboFuncData> CFD = cfd;
+}
+
diff --git a/linux-x64/clang/include/llvm/Target/TargetLoweringObjectFile.h b/linux-x64/clang/include/llvm/Target/TargetLoweringObjectFile.h
new file mode 100644
index 0000000..d5ac3cd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetLoweringObjectFile.h
@@ -0,0 +1,197 @@
+//===-- llvm/Target/TargetLoweringObjectFile.h - Object Info ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes used to handle lowerings specific to common
+// object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILE_H
+#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/SectionKind.h"
+#include <cstdint>
+
+namespace llvm {
+
+class GlobalValue;
+class MachineModuleInfo;
+class Mangler;
+class MCContext;
+class MCExpr;
+class MCSection;
+class MCSymbol;
+class MCSymbolRefExpr;
+class MCStreamer;
+class MCValue;
+class TargetMachine;
+
+class TargetLoweringObjectFile : public MCObjectFileInfo {
+  MCContext *Ctx = nullptr;
+
+  /// Name-mangler for global names.
+  Mangler *Mang = nullptr;
+
+protected:
+  bool SupportIndirectSymViaGOTPCRel = false;
+  bool SupportGOTPCRelWithOffset = true;
+
+  /// This section contains the static constructor pointer list.
+  MCSection *StaticCtorSection = nullptr;
+
+  /// This section contains the static destructor pointer list.
+  MCSection *StaticDtorSection = nullptr;
+
+public:
+  TargetLoweringObjectFile() = default;
+  TargetLoweringObjectFile(const TargetLoweringObjectFile &) = delete;
+  TargetLoweringObjectFile &
+  operator=(const TargetLoweringObjectFile &) = delete;
+  virtual ~TargetLoweringObjectFile();
+
+  MCContext &getContext() const { return *Ctx; }
+  Mangler &getMangler() const { return *Mang; }
+
+  /// This method must be called before any actual lowering is done.  This
+  /// specifies the current context for codegen, and gives the lowering
+  /// implementations a chance to set up their default sections.
+  virtual void Initialize(MCContext &ctx, const TargetMachine &TM);
+
+  virtual void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
+                                    const MCSymbol *Sym) const;
+
+  /// Emit the module-level metadata that the platform cares about.
+  virtual void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                                  const TargetMachine &TM) const {}
+
+  /// Given a constant with the SectionKind, return a section that it should be
+  /// placed in.
+  virtual MCSection *getSectionForConstant(const DataLayout &DL,
+                                           SectionKind Kind,
+                                           const Constant *C,
+                                           unsigned &Align) const;
+
+  /// Classify the specified global variable into a set of target independent
+  /// categories embodied in SectionKind.
+  static SectionKind getKindForGlobal(const GlobalObject *GO,
+                                      const TargetMachine &TM);
+
+  /// This method computes the appropriate section to emit the specified global
+  /// variable or function definition. This should not be passed external (or
+  /// available externally) globals.
+  MCSection *SectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                              const TargetMachine &TM) const;
+
+  /// This method computes the appropriate section to emit the specified global
+  /// variable or function definition. This should not be passed external (or
+  /// available externally) globals.
+  MCSection *SectionForGlobal(const GlobalObject *GO,
+                              const TargetMachine &TM) const {
+    return SectionForGlobal(GO, getKindForGlobal(GO, TM), TM);
+  }
+
+  virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
+                                 const GlobalValue *GV,
+                                 const TargetMachine &TM) const;
+
+  virtual MCSection *getSectionForJumpTable(const Function &F,
+                                            const TargetMachine &TM) const;
+
+  virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+                                                   const Function &F) const;
+
+  /// Targets should implement this method to assign a section to globals with
+  /// an explicit section specfied. The implementation of this method can
+  /// assume that GO->hasSection() is true.
+  virtual MCSection *
+  getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                           const TargetMachine &TM) const = 0;
+
+  /// Return an MCExpr to use for a reference to the specified global variable
+  /// from exception handling information.
+  virtual const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+                                                unsigned Encoding,
+                                                const TargetMachine &TM,
+                                                MachineModuleInfo *MMI,
+                                                MCStreamer &Streamer) const;
+
+  /// Return the MCSymbol for a private symbol with global value name as its
+  /// base, with the specified suffix.
+  MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
+                                         StringRef Suffix,
+                                         const TargetMachine &TM) const;
+
+  // The symbol that gets passed to .cfi_personality.
+  virtual MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+                                            const TargetMachine &TM,
+                                            MachineModuleInfo *MMI) const;
+
+  const MCExpr *getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
+                                  MCStreamer &Streamer) const;
+
+  virtual MCSection *getStaticCtorSection(unsigned Priority,
+                                          const MCSymbol *KeySym) const {
+    return StaticCtorSection;
+  }
+
+  virtual MCSection *getStaticDtorSection(unsigned Priority,
+                                          const MCSymbol *KeySym) const {
+    return StaticDtorSection;
+  }
+
+  /// \brief Create a symbol reference to describe the given TLS variable when
+  /// emitting the address in debug info.
+  virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
+
+  virtual const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+                                               const GlobalValue *RHS,
+                                               const TargetMachine &TM) const {
+    return nullptr;
+  }
+
+  /// \brief Target supports replacing a data "PC"-relative access to a symbol
+  /// through another symbol, by accessing the later via a GOT entry instead?
+  bool supportIndirectSymViaGOTPCRel() const {
+    return SupportIndirectSymViaGOTPCRel;
+  }
+
+  /// \brief Target GOT "PC"-relative relocation supports encoding an additional
+  /// binary expression with an offset?
+  bool supportGOTPCRelWithOffset() const {
+    return SupportGOTPCRelWithOffset;
+  }
+
+  /// \brief Get the target specific PC relative GOT entry relocation
+  virtual const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
+                                                  const MCValue &MV,
+                                                  int64_t Offset,
+                                                  MachineModuleInfo *MMI,
+                                                  MCStreamer &Streamer) const {
+    return nullptr;
+  }
+
+  virtual void emitLinkerFlagsForGlobal(raw_ostream &OS,
+                                        const GlobalValue *GV) const {}
+
+  virtual void emitLinkerFlagsForUsed(raw_ostream &OS,
+                                      const GlobalValue *GV) const {}
+
+protected:
+  virtual MCSection *SelectSectionForGlobal(const GlobalObject *GO,
+                                            SectionKind Kind,
+                                            const TargetMachine &TM) const = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETLOWERINGOBJECTFILE_H
diff --git a/linux-x64/clang/include/llvm/Target/TargetMachine.h b/linux-x64/clang/include/llvm/Target/TargetMachine.h
new file mode 100644
index 0000000..6f5d86e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetMachine.h
@@ -0,0 +1,349 @@
+//===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TargetMachine and LLVMTargetMachine classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETMACHINE_H
+#define LLVM_TARGET_TARGETMACHINE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetOptions.h"
+#include <string>
+
+namespace llvm {
+
+class Function;
+class GlobalValue;
+class MachineModuleInfo;
+class Mangler;
+class MCAsmInfo;
+class MCContext;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class MCSymbol;
+class raw_pwrite_stream;
+class PassManagerBuilder;
+class Target;
+class TargetIntrinsicInfo;
+class TargetIRAnalysis;
+class TargetTransformInfo;
+class TargetLoweringObjectFile;
+class TargetPassConfig;
+class TargetSubtargetInfo;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class PassManagerBase;
+}
+using legacy::PassManagerBase;
+
+//===----------------------------------------------------------------------===//
+///
+/// Primary interface to the complete machine description for the target
+/// machine.  All target-specific information should be accessible through this
+/// interface.
+///
+class TargetMachine {
+protected: // Can only create subclasses.
+  TargetMachine(const Target &T, StringRef DataLayoutString,
+                const Triple &TargetTriple, StringRef CPU, StringRef FS,
+                const TargetOptions &Options);
+
+  /// The Target that this machine was created for.
+  const Target &TheTarget;
+
+  /// DataLayout for the target: keep ABI type size and alignment.
+  ///
+  /// The DataLayout is created based on the string representation provided
+  /// during construction. It is kept here only to avoid reparsing the string
+  /// but should not really be used during compilation, because it has an
+  /// internal cache that is context specific.
+  const DataLayout DL;
+
+  /// Triple string, CPU name, and target feature strings the TargetMachine
+  /// instance is created with.
+  Triple TargetTriple;
+  std::string TargetCPU;
+  std::string TargetFS;
+
+  Reloc::Model RM = Reloc::Static;
+  CodeModel::Model CMModel = CodeModel::Small;
+  CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+  /// Contains target specific asm information.
+  const MCAsmInfo *AsmInfo;
+
+  const MCRegisterInfo *MRI;
+  const MCInstrInfo *MII;
+  const MCSubtargetInfo *STI;
+
+  unsigned RequireStructuredCFG : 1;
+  unsigned O0WantsFastISel : 1;
+
+public:
+  const TargetOptions DefaultOptions;
+  mutable TargetOptions Options;
+
+  TargetMachine(const TargetMachine &) = delete;
+  void operator=(const TargetMachine &) = delete;
+  virtual ~TargetMachine();
+
+  const Target &getTarget() const { return TheTarget; }
+
+  const Triple &getTargetTriple() const { return TargetTriple; }
+  StringRef getTargetCPU() const { return TargetCPU; }
+  StringRef getTargetFeatureString() const { return TargetFS; }
+
+  /// Virtual method implemented by subclasses that returns a reference to that
+  /// target's TargetSubtargetInfo-derived member variable.
+  virtual const TargetSubtargetInfo *getSubtargetImpl(const Function &) const {
+    return nullptr;
+  }
+  virtual TargetLoweringObjectFile *getObjFileLowering() const {
+    return nullptr;
+  }
+
+  /// This method returns a pointer to the specified type of
+  /// TargetSubtargetInfo.  In debug builds, it verifies that the object being
+  /// returned is of the correct type.
+  template <typename STC> const STC &getSubtarget(const Function &F) const {
+    return *static_cast<const STC*>(getSubtargetImpl(F));
+  }
+
+  /// Create a DataLayout.
+  const DataLayout createDataLayout() const { return DL; }
+
+  /// Test if a DataLayout if compatible with the CodeGen for this target.
+  ///
+  /// The LLVM Module owns a DataLayout that is used for the target independent
+  /// optimizations and code generation. This hook provides a target specific
+  /// check on the validity of this DataLayout.
+  bool isCompatibleDataLayout(const DataLayout &Candidate) const {
+    return DL == Candidate;
+  }
+
+  /// Get the pointer size for this target.
+  ///
+  /// This is the only time the DataLayout in the TargetMachine is used.
+  unsigned getPointerSize(unsigned AS) const {
+    return DL.getPointerSize(AS);
+  }
+
+  unsigned getPointerSizeInBits(unsigned AS) const {
+    return DL.getPointerSizeInBits(AS);
+  }
+
+  unsigned getProgramPointerSize() const {
+    return DL.getPointerSize(DL.getProgramAddressSpace());
+  }
+
+  unsigned getAllocaPointerSize() const {
+    return DL.getPointerSize(DL.getAllocaAddrSpace());
+  }
+
+  /// \brief Reset the target options based on the function's attributes.
+  // FIXME: Remove TargetOptions that affect per-function code generation
+  // from TargetMachine.
+  void resetTargetOptions(const Function &F) const;
+
+  /// Return target specific asm information.
+  const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
+
+  const MCRegisterInfo *getMCRegisterInfo() const { return MRI; }
+  const MCInstrInfo *getMCInstrInfo() const { return MII; }
+  const MCSubtargetInfo *getMCSubtargetInfo() const { return STI; }
+
+  /// If intrinsic information is available, return it.  If not, return null.
+  virtual const TargetIntrinsicInfo *getIntrinsicInfo() const {
+    return nullptr;
+  }
+
+  bool requiresStructuredCFG() const { return RequireStructuredCFG; }
+  void setRequiresStructuredCFG(bool Value) { RequireStructuredCFG = Value; }
+
+  /// Returns the code generation relocation model. The choices are static, PIC,
+  /// and dynamic-no-pic, and target default.
+  Reloc::Model getRelocationModel() const;
+
+  /// Returns the code model. The choices are small, kernel, medium, large, and
+  /// target default.
+  CodeModel::Model getCodeModel() const;
+
+  bool isPositionIndependent() const;
+
+  bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const;
+
+  /// Returns true if this target uses emulated TLS.
+  bool useEmulatedTLS() const;
+
+  /// Returns the TLS model which should be used for the given global variable.
+  TLSModel::Model getTLSModel(const GlobalValue *GV) const;
+
+  /// Returns the optimization level: None, Less, Default, or Aggressive.
+  CodeGenOpt::Level getOptLevel() const;
+
+  /// \brief Overrides the optimization level.
+  void setOptLevel(CodeGenOpt::Level Level);
+
+  void setFastISel(bool Enable) { Options.EnableFastISel = Enable; }
+  bool getO0WantsFastISel() { return O0WantsFastISel; }
+  void setO0WantsFastISel(bool Enable) { O0WantsFastISel = Enable; }
+  void setGlobalISel(bool Enable) { Options.EnableGlobalISel = Enable; }
+
+  bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
+
+  bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }
+
+  /// Return true if data objects should be emitted into their own section,
+  /// corresponds to -fdata-sections.
+  bool getDataSections() const {
+    return Options.DataSections;
+  }
+
+  /// Return true if functions should be emitted into their own section,
+  /// corresponding to -ffunction-sections.
+  bool getFunctionSections() const {
+    return Options.FunctionSections;
+  }
+
+  /// \brief Get a \c TargetIRAnalysis appropriate for the target.
+  ///
+  /// This is used to construct the new pass manager's target IR analysis pass,
+  /// set up appropriately for this target machine. Even the old pass manager
+  /// uses this to answer queries about the IR.
+  TargetIRAnalysis getTargetIRAnalysis();
+
+  /// \brief Return a TargetTransformInfo for a given function.
+  ///
+  /// The returned TargetTransformInfo is specialized to the subtarget
+  /// corresponding to \p F.
+  virtual TargetTransformInfo getTargetTransformInfo(const Function &F);
+
+  /// Allow the target to modify the pass manager, e.g. by calling
+  /// PassManagerBuilder::addExtension.
+  virtual void adjustPassManager(PassManagerBuilder &) {}
+
+  /// These enums are meant to be passed into addPassesToEmitFile to indicate
+  /// what type of file to emit, and returned by it to indicate what type of
+  /// file could actually be made.
+  enum CodeGenFileType {
+    CGFT_AssemblyFile,
+    CGFT_ObjectFile,
+    CGFT_Null         // Do not emit any output.
+  };
+
+  /// Add passes to the specified pass manager to get the specified file
+  /// emitted.  Typically this will involve several steps of code generation.
+  /// This method should return true if emission of this file type is not
+  /// supported, or false on success.
+  /// \p MMI is an optional parameter that, if set to non-nullptr,
+  /// will be used to set the MachineModuloInfo for this PM.
+  virtual bool addPassesToEmitFile(PassManagerBase &, raw_pwrite_stream &,
+                                   CodeGenFileType,
+                                   bool /*DisableVerify*/ = true,
+                                   MachineModuleInfo *MMI = nullptr) {
+    return true;
+  }
+
+  /// Add passes to the specified pass manager to get machine code emitted with
+  /// the MCJIT. This method returns true if machine code is not supported. It
+  /// fills the MCContext Ctx pointer which can be used to build custom
+  /// MCStreamer.
+  ///
+  virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&,
+                                 raw_pwrite_stream &,
+                                 bool /*DisableVerify*/ = true) {
+    return true;
+  }
+
+  /// True if subtarget inserts the final scheduling pass on its own.
+  ///
+  /// Branch relaxation, which must happen after block placement, can
+  /// on some targets (e.g. SystemZ) expose additional post-RA
+  /// scheduling opportunities.
+  virtual bool targetSchedulesPostRAScheduling() const { return false; };
+
+  void getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV,
+                         Mangler &Mang, bool MayAlwaysUsePrivate = false) const;
+  MCSymbol *getSymbol(const GlobalValue *GV) const;
+
+  /// True if the target uses physical regs at Prolog/Epilog insertion
+  /// time. If true (most machines), all vregs must be allocated before
+  /// PEI. If false (virtual-register machines), then callee-save register
+  /// spilling and scavenging are not needed or used.
+  virtual bool usesPhysRegsForPEI() const { return true; }
+
+  /// True if the target wants to use interprocedural register allocation by
+  /// default. The -enable-ipra flag can be used to override this.
+  virtual bool useIPRA() const {
+    return false;
+  }
+};
+
+/// This class describes a target machine that is implemented with the LLVM
+/// target-independent code generator.
+///
+class LLVMTargetMachine : public TargetMachine {
+protected: // Can only create subclasses.
+  LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
+                    const Triple &TargetTriple, StringRef CPU, StringRef FS,
+                    const TargetOptions &Options, Reloc::Model RM,
+                    CodeModel::Model CM, CodeGenOpt::Level OL);
+
+  void initAsmInfo();
+
+public:
+  /// \brief Get a TargetTransformInfo implementation for the target.
+  ///
+  /// The TTI returned uses the common code generator to answer queries about
+  /// the IR.
+  TargetTransformInfo getTargetTransformInfo(const Function &F) override;
+
+  /// Create a pass configuration object to be used by addPassToEmitX methods
+  /// for generating a pipeline of CodeGen passes.
+  virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+  /// Add passes to the specified pass manager to get the specified file
+  /// emitted.  Typically this will involve several steps of code generation.
+  /// \p MMI is an optional parameter that, if set to non-nullptr,
+  /// will be used to set the MachineModuloInfofor this PM.
+  bool addPassesToEmitFile(PassManagerBase &PM, raw_pwrite_stream &Out,
+                           CodeGenFileType FileType, bool DisableVerify = true,
+                           MachineModuleInfo *MMI = nullptr) override;
+
+  /// Add passes to the specified pass manager to get machine code emitted with
+  /// the MCJIT. This method returns true if machine code is not supported. It
+  /// fills the MCContext Ctx pointer which can be used to build custom
+  /// MCStreamer.
+  bool addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
+                         raw_pwrite_stream &OS,
+                         bool DisableVerify = true) override;
+
+  /// Returns true if the target is expected to pass all machine verifier
+  /// checks. This is a stopgap measure to fix targets one by one. We will
+  /// remove this at some point and always enable the verifier when
+  /// EXPENSIVE_CHECKS is enabled.
+  virtual bool isMachineVerifierClean() const { return true; }
+
+  /// \brief Adds an AsmPrinter pass to the pipeline that prints assembly or
+  /// machine code from the MI representation.
+  bool addAsmPrinter(PassManagerBase &PM, raw_pwrite_stream &Out,
+                     CodeGenFileType FileTYpe, MCContext &Context);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TARGET_TARGETMACHINE_H
diff --git a/linux-x64/clang/include/llvm/Target/TargetOptions.h b/linux-x64/clang/include/llvm/Target/TargetOptions.h
new file mode 100644
index 0000000..844031f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetOptions.h
@@ -0,0 +1,278 @@
+//===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines command line option flags that are shared across various
+// targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETOPTIONS_H
+#define LLVM_TARGET_TARGETOPTIONS_H
+
+#include "llvm/MC/MCTargetOptions.h"
+
+namespace llvm {
+  class MachineFunction;
+  class Module;
+
+  namespace FloatABI {
+    enum ABIType {
+      Default, // Target-specific (either soft or hard depending on triple, etc).
+      Soft,    // Soft float.
+      Hard     // Hard float.
+    };
+  }
+
+  namespace FPOpFusion {
+    enum FPOpFusionMode {
+      Fast,     // Enable fusion of FP ops wherever it's profitable.
+      Standard, // Only allow fusion of 'blessed' ops (currently just fmuladd).
+      Strict    // Never fuse FP-ops.
+    };
+  }
+
+  namespace JumpTable {
+    enum JumpTableType {
+      Single,          // Use a single table for all indirect jumptable calls.
+      Arity,           // Use one table per number of function parameters.
+      Simplified,      // Use one table per function type, with types projected
+                       // into 4 types: pointer to non-function, struct,
+                       // primitive, and function pointer.
+      Full             // Use one table per unique function type
+    };
+  }
+
+  namespace ThreadModel {
+    enum Model {
+      POSIX,  // POSIX Threads
+      Single  // Single Threaded Environment
+    };
+  }
+
+  namespace FPDenormal {
+    enum DenormalMode {
+      IEEE,           // IEEE 754 denormal numbers
+      PreserveSign,   // the sign of a flushed-to-zero number is preserved in
+                      // the sign of 0
+      PositiveZero    // denormals are flushed to positive zero
+    };
+  }
+
+  enum class EABI {
+    Unknown,
+    Default, // Default means not specified
+    EABI4,   // Target-specific (either 4, 5 or gnu depending on triple).
+    EABI5,
+    GNU
+  };
+
+  /// Identify a debugger for "tuning" the debug info.
+  ///
+  /// The "debugger tuning" concept allows us to present a more intuitive
+  /// interface that unpacks into different sets of defaults for the various
+  /// individual feature-flag settings, that suit the preferences of the
+  /// various debuggers.  However, it's worth remembering that debuggers are
+  /// not the only consumers of debug info, and some variations in DWARF might
+  /// better be treated as target/platform issues. Fundamentally,
+  /// o if the feature is useful (or not) to a particular debugger, regardless
+  ///   of the target, that's a tuning decision;
+  /// o if the feature is useful (or not) on a particular platform, regardless
+  ///   of the debugger, that's a target decision.
+  /// It's not impossible to see both factors in some specific case.
+  ///
+  /// The "tuning" should be used to set defaults for individual feature flags
+  /// in DwarfDebug; if a given feature has a more specific command-line option,
+  /// that option should take precedence over the tuning.
+  enum class DebuggerKind {
+    Default,  // No specific tuning requested.
+    GDB,      // Tune debug info for gdb.
+    LLDB,     // Tune debug info for lldb.
+    SCE       // Tune debug info for SCE targets (e.g. PS4).
+  };
+
+  class TargetOptions {
+  public:
+    TargetOptions()
+        : PrintMachineCode(false), UnsafeFPMath(false), NoInfsFPMath(false),
+          NoNaNsFPMath(false), NoTrappingFPMath(false),
+          NoSignedZerosFPMath(false),
+          HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false),
+          GuaranteedTailCallOpt(false), StackSymbolOrdering(true),
+          EnableFastISel(false), EnableGlobalISel(false), UseInitArray(false),
+          DisableIntegratedAS(false), RelaxELFRelocations(false),
+          FunctionSections(false), DataSections(false),
+          UniqueSectionNames(true), TrapUnreachable(false),
+          EmulatedTLS(false), ExplicitEmulatedTLS(false),
+          EnableIPRA(false), EmitStackSizeSection(false) {}
+
+    /// PrintMachineCode - This flag is enabled when the -print-machineinstrs
+    /// option is specified on the command line, and should enable debugging
+    /// output from the code generator.
+    unsigned PrintMachineCode : 1;
+
+    /// DisableFramePointerElim - This returns true if frame pointer elimination
+    /// optimization should be disabled for the given machine function.
+    bool DisableFramePointerElim(const MachineFunction &MF) const;
+
+    /// UnsafeFPMath - This flag is enabled when the
+    /// -enable-unsafe-fp-math flag is specified on the command line.  When
+    /// this flag is off (the default), the code generator is not allowed to
+    /// produce results that are "less precise" than IEEE allows.  This includes
+    /// use of X86 instructions like FSIN and FCOS instead of libcalls.
+    unsigned UnsafeFPMath : 1;
+
+    /// NoInfsFPMath - This flag is enabled when the
+    /// -enable-no-infs-fp-math flag is specified on the command line. When
+    /// this flag is off (the default), the code generator is not allowed to
+    /// assume the FP arithmetic arguments and results are never +-Infs.
+    unsigned NoInfsFPMath : 1;
+
+    /// NoNaNsFPMath - This flag is enabled when the
+    /// -enable-no-nans-fp-math flag is specified on the command line. When
+    /// this flag is off (the default), the code generator is not allowed to
+    /// assume the FP arithmetic arguments and results are never NaNs.
+    unsigned NoNaNsFPMath : 1;
+
+    /// NoTrappingFPMath - This flag is enabled when the
+    /// -enable-no-trapping-fp-math is specified on the command line. This
+    /// specifies that there are no trap handlers to handle exceptions.
+    unsigned NoTrappingFPMath : 1;
+
+    /// NoSignedZerosFPMath - This flag is enabled when the
+    /// -enable-no-signed-zeros-fp-math is specified on the command line. This
+    /// specifies that optimizations are allowed to treat the sign of a zero
+    /// argument or result as insignificant.
+    unsigned NoSignedZerosFPMath : 1;
+
+    /// HonorSignDependentRoundingFPMath - This returns true when the
+    /// -enable-sign-dependent-rounding-fp-math is specified.  If this returns
+    /// false (the default), the code generator is allowed to assume that the
+    /// rounding behavior is the default (round-to-zero for all floating point
+    /// to integer conversions, and round-to-nearest for all other arithmetic
+    /// truncations).  If this is enabled (set to true), the code generator must
+    /// assume that the rounding mode may dynamically change.
+    unsigned HonorSignDependentRoundingFPMathOption : 1;
+    bool HonorSignDependentRoundingFPMath() const;
+
+    /// NoZerosInBSS - By default some codegens place zero-initialized data to
+    /// .bss section. This flag disables such behaviour (necessary, e.g. for
+    /// crt*.o compiling).
+    unsigned NoZerosInBSS : 1;
+
+    /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
+    /// specified on the commandline. When the flag is on, participating targets
+    /// will perform tail call optimization on all calls which use the fastcc
+    /// calling convention and which satisfy certain target-independent
+    /// criteria (being at the end of a function, having the same return type
+    /// as their parent function, etc.), using an alternate ABI if necessary.
+    unsigned GuaranteedTailCallOpt : 1;
+
+    /// StackAlignmentOverride - Override default stack alignment for target.
+    unsigned StackAlignmentOverride = 0;
+
+    /// StackSymbolOrdering - When true, this will allow CodeGen to order
+    /// the local stack symbols (for code size, code locality, or any other
+    /// heuristics). When false, the local symbols are left in whatever order
+    /// they were generated. Default is true.
+    unsigned StackSymbolOrdering : 1;
+
+    /// EnableFastISel - This flag enables fast-path instruction selection
+    /// which trades away generated code quality in favor of reducing
+    /// compile time.
+    unsigned EnableFastISel : 1;
+
+    /// EnableGlobalISel - This flag enables global instruction selection.
+    unsigned EnableGlobalISel : 1;
+
+    /// UseInitArray - Use .init_array instead of .ctors for static
+    /// constructors.
+    unsigned UseInitArray : 1;
+
+    /// Disable the integrated assembler.
+    unsigned DisableIntegratedAS : 1;
+
+    /// Compress DWARF debug sections.
+    DebugCompressionType CompressDebugSections = DebugCompressionType::None;
+
+    unsigned RelaxELFRelocations : 1;
+
+    /// Emit functions into separate sections.
+    unsigned FunctionSections : 1;
+
+    /// Emit data into separate sections.
+    unsigned DataSections : 1;
+
+    unsigned UniqueSectionNames : 1;
+
+    /// Emit target-specific trap instruction for 'unreachable' IR instructions.
+    unsigned TrapUnreachable : 1;
+
+    /// EmulatedTLS - This flag enables emulated TLS model, using emutls
+    /// function in the runtime library..
+    unsigned EmulatedTLS : 1;
+
+    /// Whether -emulated-tls or -no-emulated-tls is set.
+    unsigned ExplicitEmulatedTLS : 1;
+
+    /// This flag enables InterProcedural Register Allocation (IPRA).
+    unsigned EnableIPRA : 1;
+
+    /// Emit section containing metadata on function stack sizes.
+    unsigned EmitStackSizeSection : 1;
+
+    /// FloatABIType - This setting is set by -float-abi=xxx option is specfied
+    /// on the command line. This setting may either be Default, Soft, or Hard.
+    /// Default selects the target's default behavior. Soft selects the ABI for
+    /// software floating point, but does not indicate that FP hardware may not
+    /// be used. Such a combination is unfortunately popular (e.g.
+    /// arm-apple-darwin). Hard presumes that the normal FP ABI is used.
+    FloatABI::ABIType FloatABIType = FloatABI::Default;
+
+    /// AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
+    /// This controls the creation of fused FP ops that store intermediate
+    /// results in higher precision than IEEE allows (E.g. FMAs).
+    ///
+    /// Fast mode - allows formation of fused FP ops whenever they're
+    /// profitable.
+    /// Standard mode - allow fusion only for 'blessed' FP ops. At present the
+    /// only blessed op is the fmuladd intrinsic. In the future more blessed ops
+    /// may be added.
+    /// Strict mode - allow fusion only if/when it can be proven that the excess
+    /// precision won't effect the result.
+    ///
+    /// Note: This option only controls formation of fused ops by the
+    /// optimizers.  Fused operations that are explicitly specified (e.g. FMA
+    /// via the llvm.fma.* intrinsic) will always be honored, regardless of
+    /// the value of this option.
+    FPOpFusion::FPOpFusionMode AllowFPOpFusion = FPOpFusion::Standard;
+
+    /// ThreadModel - This flag specifies the type of threading model to assume
+    /// for things like atomics
+    ThreadModel::Model ThreadModel = ThreadModel::POSIX;
+
+    /// EABIVersion - This flag specifies the EABI version
+    EABI EABIVersion = EABI::Default;
+
+    /// Which debugger to tune for.
+    DebuggerKind DebuggerTuning = DebuggerKind::Default;
+
+    /// FPDenormalMode - This flags specificies which denormal numbers the code
+    /// is permitted to require.
+    FPDenormal::DenormalMode FPDenormalMode = FPDenormal::IEEE;
+
+    /// What exception model to use
+    ExceptionHandling ExceptionModel = ExceptionHandling::None;
+
+    /// Machine level options.
+    MCTargetOptions MCOptions;
+  };
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Target/TargetSchedule.td b/linux-x64/clang/include/llvm/Target/TargetSchedule.td
new file mode 100644
index 0000000..8fa9bae
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetSchedule.td
@@ -0,0 +1,444 @@
+//===- TargetSchedule.td - Target Independent Scheduling ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent scheduling interfaces which should
+// be implemented by each target which is using TableGen based scheduling.
+//
+// The SchedMachineModel is defined by subtargets for three categories of data:
+// 1. Basic properties for coarse grained instruction cost model.
+// 2. Scheduler Read/Write resources for simple per-opcode cost model.
+// 3. Instruction itineraries for detailed reservation tables.
+//
+// (1) Basic properties are defined by the SchedMachineModel
+// class. Target hooks allow subtargets to associate opcodes with
+// those properties.
+//
+// (2) A per-operand machine model can be implemented in any
+// combination of the following ways:
+//
+// A. Associate per-operand SchedReadWrite types with Instructions by
+// modifying the Instruction definition to inherit from Sched. For
+// each subtarget, define WriteRes and ReadAdvance to associate
+// processor resources and latency with each SchedReadWrite type.
+//
+// B. In each instruction definition, name an ItineraryClass. For each
+// subtarget, define ItinRW entries to map ItineraryClass to
+// per-operand SchedReadWrite types. Unlike method A, these types may
+// be subtarget specific and can be directly associated with resources
+// by defining SchedWriteRes and SchedReadAdvance.
+//
+// C. In the subtarget, map SchedReadWrite types to specific
+// opcodes. This overrides any SchedReadWrite types or
+// ItineraryClasses defined by the Instruction. As in method B, the
+// subtarget can directly associate resources with SchedReadWrite
+// types by defining SchedWriteRes and SchedReadAdvance.
+//
+// D. In either the target or subtarget, define SchedWriteVariant or
+// SchedReadVariant to map one SchedReadWrite type onto another
+// sequence of SchedReadWrite types. This allows dynamic selection of
+// an instruction's machine model via custom C++ code. It also allows
+// a machine-independent SchedReadWrite type to map to a sequence of
+// machine-dependent types.
+//
+// (3) A per-pipeline-stage machine model can be implemented by providing
+// Itineraries in addition to mapping instructions to ItineraryClasses.
+//===----------------------------------------------------------------------===//
+
+// Include legacy support for instruction itineraries.
+include "llvm/Target/TargetItinerary.td"
+
+class Instruction; // Forward def
+
+class Predicate; // Forward def
+
+// DAG operator that interprets the DAG args as Instruction defs.
+def instrs;
+
+// DAG operator that interprets each DAG arg as a regex pattern for
+// matching Instruction opcode names.
+// The regex must match the beginning of the opcode (as in Python re.match).
+// To avoid matching prefixes, append '$' to the pattern.
+def instregex;
+
+// Define the SchedMachineModel and provide basic properties for
+// coarse grained instruction cost model. Default values for the
+// properties are defined in MCSchedModel. A value of "-1" in the
+// target description's SchedMachineModel indicates that the property
+// is not overriden by the target.
+//
+// Target hooks allow subtargets to associate LoadLatency and
+// HighLatency with groups of opcodes.
+//
+// See MCSchedule.h for detailed comments.
+class SchedMachineModel {
+  int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle.
+  int MicroOpBufferSize = -1; // Max micro-ops that can be buffered.
+  int LoopMicroOpBufferSize = -1; // Max micro-ops that can be buffered for
+                                  // optimized loop dispatch/execution.
+  int LoadLatency = -1; // Cycles for loads to access the cache.
+  int HighLatency = -1; // Approximation of cycles for "high latency" ops.
+  int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
+
+  // Per-cycle resources tables.
+  ProcessorItineraries Itineraries = NoItineraries;
+
+  bit PostRAScheduler = 0; // Enable Post RegAlloc Scheduler pass.
+
+  // Subtargets that define a model for only a subset of instructions
+  // that have a scheduling class (itinerary class or SchedRW list)
+  // and may actually be generated for that subtarget must clear this
+  // bit. Otherwise, the scheduler considers an unmodelled opcode to
+  // be an error. This should only be set during initial bringup,
+  // or there will be no way to catch simple errors in the model
+  // resulting from changes to the instruction definitions.
+  bit CompleteModel = 1;
+
+  // Indicates that we should do full overlap checking for multiple InstrRWs
+  // definining the same instructions within the same SchedMachineModel.
+  // FIXME: Remove when all in tree targets are clean with the full check
+  // enabled.
+  bit FullInstRWOverlapCheck = 1;
+
+  // A processor may only implement part of published ISA, due to either new ISA
+  // extensions, (e.g. Pentium 4 doesn't have AVX) or implementation
+  // (ARM/MIPS/PowerPC/SPARC soft float cores).
+  //
+  // For a processor which doesn't support some feature(s), the schedule model
+  // can use:
+  //
+  // let<Predicate> UnsupportedFeatures = [HaveA,..,HaveY];
+  //
+  // to skip the checks for scheduling information when building LLVM for
+  // instructions which have any of the listed predicates in their Predicates
+  // field.
+  list<Predicate> UnsupportedFeatures = [];
+
+  bit NoModel = 0; // Special tag to indicate missing machine model.
+}
+
+def NoSchedModel : SchedMachineModel {
+  let NoModel = 1;
+  let CompleteModel = 0;
+}
+
+// Define a kind of processor resource that may be common across
+// similar subtargets.
+class ProcResourceKind;
+
+// Define a number of interchangeable processor resources. NumUnits
+// determines the throughput of instructions that require the resource.
+//
+// An optional Super resource may be given to model these resources as
+// a subset of the more general super resources. Using one of these
+// resources implies using one of the super resoruces.
+//
+// ProcResourceUnits normally model a few buffered resources within an
+// out-of-order engine. Buffered resources may be held for multiple
+// clock cycles, but the scheduler does not pin them to a particular
+// clock cycle relative to instruction dispatch. Setting BufferSize=0
+// changes this to an in-order issue/dispatch resource. In this case,
+// the scheduler counts down from the cycle that the instruction
+// issues in-order, forcing a stall whenever a subsequent instruction
+// requires the same resource until the number of ResourceCycles
+// specified in WriteRes expire. Setting BufferSize=1 changes this to
+// an in-order latency resource. In this case, the scheduler models
+// producer/consumer stalls between instructions that use the
+// resource.
+//
+// Examples (all assume an out-of-order engine):
+//
+// Use BufferSize = -1 for "issue ports" fed by a unified reservation
+// station. Here the size of the reservation station is modeled by
+// MicroOpBufferSize, which should be the minimum size of either the
+// register rename pool, unified reservation station, or reorder
+// buffer.
+//
+// Use BufferSize = 0 for resources that force "dispatch/issue
+// groups". (Different processors define dispath/issue
+// differently. Here we refer to stage between decoding into micro-ops
+// and moving them into a reservation station.) Normally NumMicroOps
+// is sufficient to limit dispatch/issue groups. However, some
+// processors can form groups of with only certain combinitions of
+// instruction types. e.g. POWER7.
+//
+// Use BufferSize = 1 for in-order execution units. This is used for
+// an in-order pipeline within an out-of-order core where scheduling
+// dependent operations back-to-back is guaranteed to cause a
+// bubble. e.g. Cortex-a9 floating-point.
+//
+// Use BufferSize > 1 for out-of-order executions units with a
+// separate reservation station. This simply models the size of the
+// reservation station.
+//
+// To model both dispatch/issue groups and in-order execution units,
+// create two types of units, one with BufferSize=0 and one with
+// BufferSize=1.
+//
+// SchedModel ties these units to a processor for any stand-alone defs
+// of this class.
+class ProcResourceUnits<ProcResourceKind kind, int num> {
+  ProcResourceKind Kind = kind;
+  int NumUnits = num;
+  ProcResourceKind Super = ?;
+  int BufferSize = -1;
+  SchedMachineModel SchedModel = ?;
+}
+
+// EponymousProcResourceKind helps implement ProcResourceUnits by
+// allowing a ProcResourceUnits definition to reference itself. It
+// should not be referenced anywhere else.
+def EponymousProcResourceKind : ProcResourceKind;
+
+// Subtargets typically define processor resource kind and number of
+// units in one place.
+class ProcResource<int num> : ProcResourceKind,
+  ProcResourceUnits<EponymousProcResourceKind, num>;
+
+class ProcResGroup<list<ProcResource> resources> : ProcResourceKind {
+  list<ProcResource> Resources = resources;
+  SchedMachineModel SchedModel = ?;
+  int BufferSize = -1;
+}
+
+// A target architecture may define SchedReadWrite types and associate
+// them with instruction operands.
+class SchedReadWrite;
+
+// List the per-operand types that map to the machine model of an
+// instruction. One SchedWrite type must be listed for each explicit
+// def operand in order. Additional SchedWrite types may optionally be
+// listed for implicit def operands.  SchedRead types may optionally
+// be listed for use operands in order. The order of defs relative to
+// uses is insignificant. This way, the same SchedReadWrite list may
+// be used for multiple forms of an operation. For example, a
+// two-address instruction could have two tied operands or single
+// operand that both reads and writes a reg. In both cases we have a
+// single SchedWrite and single SchedRead in any order.
+class Sched<list<SchedReadWrite> schedrw> {
+  list<SchedReadWrite> SchedRW = schedrw;
+}
+
+// Define a scheduler resource associated with a def operand.
+class SchedWrite : SchedReadWrite;
+def NoWrite : SchedWrite;
+
+// Define a scheduler resource associated with a use operand.
+class SchedRead  : SchedReadWrite;
+
+// Define a SchedWrite that is modeled as a sequence of other
+// SchedWrites with additive latency. This allows a single operand to
+// be mapped the resources composed from a set of previously defined
+// SchedWrites.
+//
+// If the final write in this sequence is a SchedWriteVariant marked
+// Variadic, then the list of prior writes are distributed across all
+// operands after resolving the predicate for the final write.
+//
+// SchedModel silences warnings but is ignored.
+class WriteSequence<list<SchedWrite> writes, int rep = 1> : SchedWrite {
+  list<SchedWrite> Writes = writes;
+  int Repeat = rep;
+  SchedMachineModel SchedModel = ?;
+}
+
+// Define values common to WriteRes and SchedWriteRes.
+//
+// SchedModel ties these resources to a processor.
+class ProcWriteResources<list<ProcResourceKind> resources> {
+  list<ProcResourceKind> ProcResources = resources;
+  list<int> ResourceCycles = [];
+  int Latency = 1;
+  int NumMicroOps = 1;
+  bit BeginGroup = 0;
+  bit EndGroup = 0;
+  // Allow a processor to mark some scheduling classes as unsupported
+  // for stronger verification.
+  bit Unsupported = 0;
+  // Allow a processor to mark some scheduling classes as single-issue.
+  // SingleIssue is an alias for Begin/End Group.
+  bit SingleIssue = 0;
+  SchedMachineModel SchedModel = ?;
+}
+
+// Define the resources and latency of a SchedWrite. This will be used
+// directly by targets that have no itinerary classes. In this case,
+// SchedWrite is defined by the target, while WriteResources is
+// defined by the subtarget, and maps the SchedWrite to processor
+// resources.
+//
+// If a target already has itinerary classes, SchedWriteResources can
+// be used instead to define subtarget specific SchedWrites and map
+// them to processor resources in one place. Then ItinRW can map
+// itinerary classes to the subtarget's SchedWrites.
+//
+// ProcResources indicates the set of resources consumed by the write.
+// Optionally, ResourceCycles indicates the number of cycles the
+// resource is consumed. Each ResourceCycles item is paired with the
+// ProcResource item at the same position in its list. Since
+// ResourceCycles are rarely specialized, the list may be
+// incomplete. By default, resources are consumed for a single cycle,
+// regardless of latency, which models a fully pipelined processing
+// unit. A value of 0 for ResourceCycles means that the resource must
+// be available but is not consumed, which is only relevant for
+// unbuffered resources.
+//
+// By default, each SchedWrite takes one micro-op, which is counted
+// against the processor's IssueWidth limit. If an instruction can
+// write multiple registers with a single micro-op, the subtarget
+// should define one of the writes to be zero micro-ops. If a
+// subtarget requires multiple micro-ops to write a single result, it
+// should either override the write's NumMicroOps to be greater than 1
+// or require additional writes. Extra writes can be required either
+// by defining a WriteSequence, or simply listing extra writes in the
+// instruction's list of writers beyond the number of "def"
+// operands. The scheduler assumes that all micro-ops must be
+// dispatched in the same cycle. These micro-ops may be required to
+// begin or end the current dispatch group.
+class WriteRes<SchedWrite write, list<ProcResourceKind> resources>
+  : ProcWriteResources<resources> {
+  SchedWrite WriteType = write;
+}
+
+// Directly name a set of WriteResources defining a new SchedWrite
+// type at the same time. This class is unaware of its SchedModel so
+// must be referenced by InstRW or ItinRW.
+class SchedWriteRes<list<ProcResourceKind> resources> : SchedWrite,
+  ProcWriteResources<resources>;
+
+// Define values common to ReadAdvance and SchedReadAdvance.
+//
+// SchedModel ties these resources to a processor.
+class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> {
+  int Cycles = cycles;
+  list<SchedWrite> ValidWrites = writes;
+  // Allow a processor to mark some scheduling classes as unsupported
+  // for stronger verification.
+  bit Unsupported = 0;
+  SchedMachineModel SchedModel = ?;
+}
+
+// A processor may define a ReadAdvance associated with a SchedRead
+// to reduce latency of a prior write by N cycles. A negative advance
+// effectively increases latency, which may be used for cross-domain
+// stalls.
+//
+// A ReadAdvance may be associated with a list of SchedWrites
+// to implement pipeline bypass. The Writes list may be empty to
+// indicate operands that are always read this number of Cycles later
+// than a normal register read, allowing the read's parent instruction
+// to issue earlier relative to the writer.
+class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []>
+  : ProcReadAdvance<cycles, writes> {
+  SchedRead ReadType = read;
+}
+
+// Directly associate a new SchedRead type with a delay and optional
+// pipeline bypass. For use with InstRW or ItinRW.
+class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead,
+  ProcReadAdvance<cycles, writes>;
+
+// Define SchedRead defaults. Reads seldom need special treatment.
+def ReadDefault : SchedRead;
+def NoReadAdvance : SchedReadAdvance<0>;
+
+// Define shared code that will be in the same scope as all
+// SchedPredicates. Available variables are:
+// (const MachineInstr *MI, const TargetSchedModel *SchedModel)
+class PredicateProlog<code c> {
+  code Code = c;
+}
+
+// Define a predicate to determine which SchedVariant applies to a
+// particular MachineInstr. The code snippet is used as an
+// if-statement's expression. Available variables are MI, SchedModel,
+// and anything defined in a PredicateProlog.
+//
+// SchedModel silences warnings but is ignored.
+class SchedPredicate<code pred> {
+  SchedMachineModel SchedModel = ?;
+  code Predicate = pred;
+}
+def NoSchedPred : SchedPredicate<[{true}]>;
+
+// Associate a predicate with a list of SchedReadWrites. By default,
+// the selected SchedReadWrites are still associated with a single
+// operand and assumed to execute sequentially with additive
+// latency. However, if the parent SchedWriteVariant or
+// SchedReadVariant is marked "Variadic", then each Selected
+// SchedReadWrite is mapped in place to the instruction's variadic
+// operands. In this case, latency is not additive. If the current Variant
+// is already part of a Sequence, then that entire chain leading up to
+// the Variant is distributed over the variadic operands.
+class SchedVar<SchedPredicate pred, list<SchedReadWrite> selected> {
+  SchedPredicate Predicate = pred;
+  list<SchedReadWrite> Selected = selected;
+}
+
+// SchedModel silences warnings but is ignored.
+class SchedVariant<list<SchedVar> variants> {
+  list<SchedVar> Variants = variants;
+  bit Variadic = 0;
+  SchedMachineModel SchedModel = ?;
+}
+
+// A SchedWriteVariant is a single SchedWrite type that maps to a list
+// of SchedWrite types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "def" operands. The
+// SchedVariant's Expansion list is then interpreted as one write
+// per-operand instead of the usual sequential writes feeding a single
+// operand.
+class SchedWriteVariant<list<SchedVar> variants> : SchedWrite,
+  SchedVariant<variants> {
+}
+
+// A SchedReadVariant is a single SchedRead type that maps to a list
+// of SchedRead types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "readsReg" operands as
+// explained above.
+class SchedReadVariant<list<SchedVar> variants> : SchedRead,
+  SchedVariant<variants> {
+}
+
+// Map a set of opcodes to a list of SchedReadWrite types. This allows
+// the subtarget to easily override specific operations.
+//
+// SchedModel ties this opcode mapping to a processor.
+class InstRW<list<SchedReadWrite> rw, dag instrlist> {
+  list<SchedReadWrite> OperandReadWrites = rw;
+  dag Instrs = instrlist;
+  SchedMachineModel SchedModel = ?;
+  // Allow a subtarget to mark some instructions as unsupported.
+  bit Unsupported = 0;
+}
+
+// Map a set of itinerary classes to SchedReadWrite resources. This is
+// used to bootstrap a target (e.g. ARM) when itineraries already
+// exist and changing InstrInfo is undesirable.
+//
+// SchedModel ties this ItineraryClass mapping to a processor.
+class ItinRW<list<SchedReadWrite> rw, list<InstrItinClass> iic> {
+  list<InstrItinClass> MatchedItinClasses = iic;
+  list<SchedReadWrite> OperandReadWrites = rw;
+  SchedMachineModel SchedModel = ?;
+}
+
+// Alias a target-defined SchedReadWrite to a processor specific
+// SchedReadWrite. This allows a subtarget to easily map a
+// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or
+// SchedReadVariant.
+//
+// SchedModel will usually be provided by surrounding let statement
+// and ties this SchedAlias mapping to a processor.
+class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
+  SchedReadWrite MatchRW = match;
+  SchedReadWrite AliasRW = alias;
+  SchedMachineModel SchedModel = ?;
+}
diff --git a/linux-x64/clang/include/llvm/Target/TargetSelectionDAG.td b/linux-x64/clang/include/llvm/Target/TargetSelectionDAG.td
new file mode 100644
index 0000000..7ba8f7e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Target/TargetSelectionDAG.td
@@ -0,0 +1,1326 @@
+//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used by SelectionDAG
+// instruction selection generators.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Type Constraint definitions.
+//
+// Note that the semantics of these constraints are hard coded into tblgen.  To
+// modify or add constraints, you have to hack tblgen.
+//
+
+class SDTypeConstraint<int opnum> {
+  int OperandNum = opnum;
+}
+
+// SDTCisVT - The specified operand has exactly this VT.
+class SDTCisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
+  ValueType VT = vt;
+}
+
+class SDTCisPtrTy<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisInt - The specified operand has integer type.
+class SDTCisInt<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisFP - The specified operand has floating-point type.
+class SDTCisFP<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisVec - The specified operand has a vector type.
+class SDTCisVec<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisSameAs - The two specified operands have identical types.
+class SDTCisSameAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+  int OtherOperandNum = OtherOp;
+}
+
+// SDTCisVTSmallerThanOp - The specified operand is a VT SDNode, and its type is
+// smaller than the 'Other' operand.
+class SDTCisVTSmallerThanOp<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+  int OtherOperandNum = OtherOp;
+}
+
+class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{
+  int BigOperandNum = BigOp;
+}
+
+/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same
+/// type as the element type of OtherOp, which is a vector type.
+class SDTCisEltOfVec<int ThisOp, int OtherOp>
+  : SDTypeConstraint<ThisOp> {
+  int OtherOpNum = OtherOp;
+}
+
+/// SDTCisSubVecOfVec - This indicates that ThisOp is a vector type
+/// with length less that of OtherOp, which is a vector type.
+class SDTCisSubVecOfVec<int ThisOp, int OtherOp>
+  : SDTypeConstraint<ThisOp> {
+  int OtherOpNum = OtherOp;
+}
+
+// SDTCVecEltisVT - The specified operand is vector type with element type
+// of VT.
+class SDTCVecEltisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
+  ValueType VT = vt;
+}
+
+// SDTCisSameNumEltsAs - The two specified operands have identical number
+// of elements.
+class SDTCisSameNumEltsAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+  int OtherOperandNum = OtherOp;
+}
+
+// SDTCisSameSizeAs - The two specified operands have identical size.
+class SDTCisSameSizeAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+  int OtherOperandNum = OtherOp;
+}
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Type Profile definitions.
+//
+// These use the constraints defined above to describe the type requirements of
+// the various nodes.  These are not hard coded into tblgen, allowing targets to
+// add their own if needed.
+//
+
+// SDTypeProfile - This profile describes the type requirements of a Selection
+// DAG node.
+class SDTypeProfile<int numresults, int numoperands,
+                    list<SDTypeConstraint> constraints> {
+  int NumResults = numresults;
+  int NumOperands = numoperands;
+  list<SDTypeConstraint> Constraints = constraints;
+}
+
+// Builtin profiles.
+def SDTIntLeaf: SDTypeProfile<1, 0, [SDTCisInt<0>]>;         // for 'imm'.
+def SDTFPLeaf : SDTypeProfile<1, 0, [SDTCisFP<0>]>;          // for 'fpimm'.
+def SDTPtrLeaf: SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;       // for '&g'.
+def SDTOther  : SDTypeProfile<1, 0, [SDTCisVT<0, OtherVT>]>; // for 'vt'.
+def SDTUNDEF  : SDTypeProfile<1, 0, []>;                     // for 'undef'.
+def SDTUnaryOp  : SDTypeProfile<1, 1, []>;                   // for bitconvert.
+
+def SDTIntBinOp : SDTypeProfile<1, 2, [     // add, and, or, xor, udiv, etc.
+  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>
+]>;
+def SDTIntShiftOp : SDTypeProfile<1, 2, [   // shl, sra, srl
+  SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>
+]>;
+def SDTIntSatNoShOp : SDTypeProfile<1, 2, [   // ssat with no shift
+  SDTCisSameAs<0, 1>, SDTCisInt<2>
+]>;
+def SDTIntBinHiLoOp : SDTypeProfile<2, 2, [ // mulhi, mullo, sdivrem, udivrem
+  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,SDTCisInt<0>
+]>;
+
+def SDTFPBinOp : SDTypeProfile<1, 2, [      // fadd, fmul, etc.
+  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>
+]>;
+def SDTFPSignOp : SDTypeProfile<1, 2, [     // fcopysign.
+  SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisFP<2>
+]>;
+def SDTFPTernaryOp : SDTypeProfile<1, 3, [  // fmadd, fnmsub, etc.
+  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisFP<0>
+]>;
+def SDTIntUnaryOp : SDTypeProfile<1, 1, [   // ctlz, cttz
+  SDTCisSameAs<0, 1>, SDTCisInt<0>
+]>;
+def SDTIntExtendOp : SDTypeProfile<1, 1, [  // sext, zext, anyext
+  SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>
+]>;
+def SDTIntTruncOp  : SDTypeProfile<1, 1, [  // trunc
+  SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>
+]>;
+def SDTFPUnaryOp  : SDTypeProfile<1, 1, [   // fneg, fsqrt, etc
+  SDTCisSameAs<0, 1>, SDTCisFP<0>
+]>;
+def SDTFPRoundOp  : SDTypeProfile<1, 1, [   // fround
+  SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>
+]>;
+def SDTFPExtendOp  : SDTypeProfile<1, 1, [  // fextend
+  SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>
+]>;
+def SDTIntToFPOp : SDTypeProfile<1, 1, [    // [su]int_to_fp
+  SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>
+]>;
+def SDTFPToIntOp : SDTypeProfile<1, 1, [    // fp_to_[su]int
+  SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>
+]>;
+def SDTExtInreg : SDTypeProfile<1, 2, [     // sext_inreg
+  SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, OtherVT>,
+  SDTCisVTSmallerThanOp<2, 1>
+]>;
+def SDTExtInvec : SDTypeProfile<1, 1, [     // sext_invec
+  SDTCisInt<0>, SDTCisVec<0>, SDTCisInt<1>, SDTCisVec<1>,
+  SDTCisOpSmallerThanOp<1, 0>, SDTCisSameSizeAs<0,1>
+]>;
+
+def SDTSetCC : SDTypeProfile<1, 3, [        // setcc
+  SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
+]>;
+
+def SDTSelect : SDTypeProfile<1, 3, [       // select
+  SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
+]>;
+
+def SDTVSelect : SDTypeProfile<1, 3, [       // vselect
+  SDTCisVec<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameNumEltsAs<0, 1>
+]>;
+
+def SDTSelectCC : SDTypeProfile<1, 5, [     // select_cc
+  SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisSameAs<0, 3>,
+  SDTCisVT<5, OtherVT>
+]>;
+
+def SDTBr : SDTypeProfile<0, 1, [           // br
+  SDTCisVT<0, OtherVT>
+]>;
+
+def SDTBrCC : SDTypeProfile<0, 4, [       // brcc
+  SDTCisVT<0, OtherVT>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
+]>;
+
+def SDTBrcond : SDTypeProfile<0, 2, [       // brcond
+  SDTCisInt<0>, SDTCisVT<1, OtherVT>
+]>;
+
+def SDTBrind : SDTypeProfile<0, 1, [        // brind
+  SDTCisPtrTy<0>
+]>;
+
+def SDTCatchret : SDTypeProfile<0, 2, [     // catchret
+  SDTCisVT<0, OtherVT>, SDTCisVT<1, OtherVT>
+]>;
+
+def SDTNone : SDTypeProfile<0, 0, []>;      // ret, trap
+
+def SDTLoad : SDTypeProfile<1, 1, [         // load
+  SDTCisPtrTy<1>
+]>;
+
+def SDTStore : SDTypeProfile<0, 2, [        // store
+  SDTCisPtrTy<1>
+]>;
+
+def SDTIStore : SDTypeProfile<1, 3, [       // indexed store
+  SDTCisSameAs<0, 2>, SDTCisPtrTy<0>, SDTCisPtrTy<3>
+]>;
+
+def SDTMaskedStore: SDTypeProfile<0, 3, [       // masked store
+  SDTCisPtrTy<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameNumEltsAs<1, 2>
+]>;
+
+def SDTMaskedLoad: SDTypeProfile<1, 3, [       // masked load
+  SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>,
+  SDTCisSameNumEltsAs<0, 2>
+]>;
+
+def SDTMaskedGather: SDTypeProfile<2, 3, [       // masked gather
+  SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<1, 3>,
+  SDTCisPtrTy<4>, SDTCVecEltisVT<1, i1>, SDTCisSameNumEltsAs<0, 1>
+]>;
+
+def SDTMaskedScatter: SDTypeProfile<1, 3, [       // masked scatter
+  SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCisSameNumEltsAs<0, 1>,
+  SDTCVecEltisVT<0, i1>, SDTCisPtrTy<3>
+]>;
+
+def SDTVecShuffle : SDTypeProfile<1, 2, [
+  SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
+]>;
+def SDTVecExtract : SDTypeProfile<1, 2, [   // vector extract
+  SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2>
+]>;
+def SDTVecInsert : SDTypeProfile<1, 3, [    // vector insert
+  SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
+]>;
+
+def SDTSubVecExtract : SDTypeProfile<1, 2, [// subvector extract
+  SDTCisSubVecOfVec<0,1>, SDTCisInt<2>
+]>;
+def SDTSubVecInsert : SDTypeProfile<1, 3, [ // subvector insert
+  SDTCisSubVecOfVec<2, 1>, SDTCisSameAs<0,1>, SDTCisInt<3>
+]>;
+
+def SDTPrefetch : SDTypeProfile<0, 4, [     // prefetch
+  SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, SDTCisInt<1>
+]>;
+
+def SDTMemBarrier : SDTypeProfile<0, 5, [   // memory barrier
+  SDTCisSameAs<0,1>,  SDTCisSameAs<0,2>,  SDTCisSameAs<0,3>, SDTCisSameAs<0,4>,
+  SDTCisInt<0>
+]>;
+def SDTAtomicFence : SDTypeProfile<0, 2, [
+  SDTCisSameAs<0,1>, SDTCisPtrTy<0>
+]>;
+def SDTAtomic3 : SDTypeProfile<1, 3, [
+  SDTCisSameAs<0,2>,  SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1>
+]>;
+def SDTAtomic2 : SDTypeProfile<1, 2, [
+  SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
+]>;
+def SDTAtomicStore : SDTypeProfile<0, 2, [
+  SDTCisPtrTy<0>, SDTCisInt<1>
+]>;
+def SDTAtomicLoad : SDTypeProfile<1, 1, [
+  SDTCisInt<0>, SDTCisPtrTy<1>
+]>;
+
+def SDTConvertOp : SDTypeProfile<1, 5, [ //cvtss, su, us, uu, ff, fs, fu, sf, su
+  SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>, SDTCisPtrTy<4>, SDTCisPtrTy<5>
+]>;
+
+class SDCallSeqStart<list<SDTypeConstraint> constraints> :
+        SDTypeProfile<0, 2, constraints>;
+class SDCallSeqEnd<list<SDTypeConstraint> constraints> :
+        SDTypeProfile<0, 2, constraints>;
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node definitions.
+//
+class SDNode<string opcode, SDTypeProfile typeprof,
+             list<SDNodeProperty> props = [], string sdclass = "SDNode">
+             : SDPatternOperator {
+  string Opcode  = opcode;
+  string SDClass = sdclass;
+  let Properties = props;
+  SDTypeProfile TypeProfile = typeprof;
+}
+
+// Special TableGen-recognized dag nodes
+def set;
+def implicit;
+def node;
+def srcvalue;
+
+def imm        : SDNode<"ISD::Constant"  , SDTIntLeaf , [], "ConstantSDNode">;
+def timm       : SDNode<"ISD::TargetConstant",SDTIntLeaf, [], "ConstantSDNode">;
+def fpimm      : SDNode<"ISD::ConstantFP", SDTFPLeaf  , [], "ConstantFPSDNode">;
+def vt         : SDNode<"ISD::VALUETYPE" , SDTOther   , [], "VTSDNode">;
+def bb         : SDNode<"ISD::BasicBlock", SDTOther   , [], "BasicBlockSDNode">;
+def cond       : SDNode<"ISD::CONDCODE"  , SDTOther   , [], "CondCodeSDNode">;
+def undef      : SDNode<"ISD::UNDEF"     , SDTUNDEF   , []>;
+def globaladdr : SDNode<"ISD::GlobalAddress",         SDTPtrLeaf, [],
+                        "GlobalAddressSDNode">;
+def tglobaladdr : SDNode<"ISD::TargetGlobalAddress",  SDTPtrLeaf, [],
+                         "GlobalAddressSDNode">;
+def globaltlsaddr : SDNode<"ISD::GlobalTLSAddress",         SDTPtrLeaf, [],
+                          "GlobalAddressSDNode">;
+def tglobaltlsaddr : SDNode<"ISD::TargetGlobalTLSAddress",  SDTPtrLeaf, [],
+                           "GlobalAddressSDNode">;
+def constpool   : SDNode<"ISD::ConstantPool",         SDTPtrLeaf, [],
+                         "ConstantPoolSDNode">;
+def tconstpool  : SDNode<"ISD::TargetConstantPool",   SDTPtrLeaf, [],
+                         "ConstantPoolSDNode">;
+def jumptable   : SDNode<"ISD::JumpTable",            SDTPtrLeaf, [],
+                         "JumpTableSDNode">;
+def tjumptable  : SDNode<"ISD::TargetJumpTable",      SDTPtrLeaf, [],
+                         "JumpTableSDNode">;
+def frameindex  : SDNode<"ISD::FrameIndex",           SDTPtrLeaf, [],
+                         "FrameIndexSDNode">;
+def tframeindex : SDNode<"ISD::TargetFrameIndex",     SDTPtrLeaf, [],
+                         "FrameIndexSDNode">;
+def externalsym : SDNode<"ISD::ExternalSymbol",       SDTPtrLeaf, [],
+                         "ExternalSymbolSDNode">;
+def texternalsym: SDNode<"ISD::TargetExternalSymbol", SDTPtrLeaf, [],
+                         "ExternalSymbolSDNode">;
+def mcsym: SDNode<"ISD::MCSymbol", SDTPtrLeaf, [], "MCSymbolSDNode">;
+def blockaddress : SDNode<"ISD::BlockAddress",        SDTPtrLeaf, [],
+                         "BlockAddressSDNode">;
+def tblockaddress: SDNode<"ISD::TargetBlockAddress",  SDTPtrLeaf, [],
+                         "BlockAddressSDNode">;
+
+def add        : SDNode<"ISD::ADD"       , SDTIntBinOp   ,
+                        [SDNPCommutative, SDNPAssociative]>;
+def sub        : SDNode<"ISD::SUB"       , SDTIntBinOp>;
+def mul        : SDNode<"ISD::MUL"       , SDTIntBinOp,
+                        [SDNPCommutative, SDNPAssociative]>;
+def mulhs      : SDNode<"ISD::MULHS"     , SDTIntBinOp, [SDNPCommutative]>;
+def mulhu      : SDNode<"ISD::MULHU"     , SDTIntBinOp, [SDNPCommutative]>;
+def smullohi   : SDNode<"ISD::SMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
+def umullohi   : SDNode<"ISD::UMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
+def sdiv       : SDNode<"ISD::SDIV"      , SDTIntBinOp>;
+def udiv       : SDNode<"ISD::UDIV"      , SDTIntBinOp>;
+def srem       : SDNode<"ISD::SREM"      , SDTIntBinOp>;
+def urem       : SDNode<"ISD::UREM"      , SDTIntBinOp>;
+def sdivrem    : SDNode<"ISD::SDIVREM"   , SDTIntBinHiLoOp>;
+def udivrem    : SDNode<"ISD::UDIVREM"   , SDTIntBinHiLoOp>;
+def srl        : SDNode<"ISD::SRL"       , SDTIntShiftOp>;
+def sra        : SDNode<"ISD::SRA"       , SDTIntShiftOp>;
+def shl        : SDNode<"ISD::SHL"       , SDTIntShiftOp>;
+def rotl       : SDNode<"ISD::ROTL"      , SDTIntShiftOp>;
+def rotr       : SDNode<"ISD::ROTR"      , SDTIntShiftOp>;
+def and        : SDNode<"ISD::AND"       , SDTIntBinOp,
+                        [SDNPCommutative, SDNPAssociative]>;
+def or         : SDNode<"ISD::OR"        , SDTIntBinOp,
+                        [SDNPCommutative, SDNPAssociative]>;
+def xor        : SDNode<"ISD::XOR"       , SDTIntBinOp,
+                        [SDNPCommutative, SDNPAssociative]>;
+def addc       : SDNode<"ISD::ADDC"      , SDTIntBinOp,
+                        [SDNPCommutative, SDNPOutGlue]>;
+def adde       : SDNode<"ISD::ADDE"      , SDTIntBinOp,
+                        [SDNPCommutative, SDNPOutGlue, SDNPInGlue]>;
+def subc       : SDNode<"ISD::SUBC"      , SDTIntBinOp,
+                        [SDNPOutGlue]>;
+def sube       : SDNode<"ISD::SUBE"      , SDTIntBinOp,
+                        [SDNPOutGlue, SDNPInGlue]>;
+def smin       : SDNode<"ISD::SMIN"      , SDTIntBinOp,
+                                  [SDNPCommutative, SDNPAssociative]>;
+def smax       : SDNode<"ISD::SMAX"      , SDTIntBinOp,
+                                  [SDNPCommutative, SDNPAssociative]>;
+def umin       : SDNode<"ISD::UMIN"      , SDTIntBinOp,
+                                  [SDNPCommutative, SDNPAssociative]>;
+def umax       : SDNode<"ISD::UMAX"      , SDTIntBinOp,
+                                  [SDNPCommutative, SDNPAssociative]>;
+
+def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
+def sext_invec : SDNode<"ISD::SIGN_EXTEND_VECTOR_INREG", SDTExtInvec>;
+def zext_invec : SDNode<"ISD::ZERO_EXTEND_VECTOR_INREG", SDTExtInvec>;
+
+def abs        : SDNode<"ISD::ABS"        , SDTIntUnaryOp>;
+def bitreverse : SDNode<"ISD::BITREVERSE" , SDTIntUnaryOp>;
+def bswap      : SDNode<"ISD::BSWAP"      , SDTIntUnaryOp>;
+def ctlz       : SDNode<"ISD::CTLZ"       , SDTIntUnaryOp>;
+def cttz       : SDNode<"ISD::CTTZ"       , SDTIntUnaryOp>;
+def ctpop      : SDNode<"ISD::CTPOP"      , SDTIntUnaryOp>;
+def ctlz_zero_undef : SDNode<"ISD::CTLZ_ZERO_UNDEF", SDTIntUnaryOp>;
+def cttz_zero_undef : SDNode<"ISD::CTTZ_ZERO_UNDEF", SDTIntUnaryOp>;
+def sext       : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
+def zext       : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
+def anyext     : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
+def trunc      : SDNode<"ISD::TRUNCATE"   , SDTIntTruncOp>;
+def bitconvert : SDNode<"ISD::BITCAST"    , SDTUnaryOp>;
+def addrspacecast : SDNode<"ISD::ADDRSPACECAST", SDTUnaryOp>;
+def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
+def insertelt  : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;
+
+def fadd       : SDNode<"ISD::FADD"       , SDTFPBinOp, [SDNPCommutative]>;
+def fsub       : SDNode<"ISD::FSUB"       , SDTFPBinOp>;
+def fmul       : SDNode<"ISD::FMUL"       , SDTFPBinOp, [SDNPCommutative]>;
+def fdiv       : SDNode<"ISD::FDIV"       , SDTFPBinOp>;
+def frem       : SDNode<"ISD::FREM"       , SDTFPBinOp>;
+def fma        : SDNode<"ISD::FMA"        , SDTFPTernaryOp>;
+def fmad       : SDNode<"ISD::FMAD"       , SDTFPTernaryOp>;
+def fabs       : SDNode<"ISD::FABS"       , SDTFPUnaryOp>;
+def fminnum    : SDNode<"ISD::FMINNUM"    , SDTFPBinOp,
+                                  [SDNPCommutative, SDNPAssociative]>;
+def fmaxnum    : SDNode<"ISD::FMAXNUM"    , SDTFPBinOp,
+                                  [SDNPCommutative, SDNPAssociative]>;
+def fminnan    : SDNode<"ISD::FMINNAN"    , SDTFPBinOp>;
+def fmaxnan    : SDNode<"ISD::FMAXNAN"    , SDTFPBinOp>;
+def fgetsign   : SDNode<"ISD::FGETSIGN"   , SDTFPToIntOp>;
+def fcanonicalize : SDNode<"ISD::FCANONICALIZE", SDTFPUnaryOp>;
+def fneg       : SDNode<"ISD::FNEG"       , SDTFPUnaryOp>;
+def fsqrt      : SDNode<"ISD::FSQRT"      , SDTFPUnaryOp>;
+def fsin       : SDNode<"ISD::FSIN"       , SDTFPUnaryOp>;
+def fcos       : SDNode<"ISD::FCOS"       , SDTFPUnaryOp>;
+def fexp2      : SDNode<"ISD::FEXP2"      , SDTFPUnaryOp>;
+def fpow       : SDNode<"ISD::FPOW"       , SDTFPBinOp>;
+def flog2      : SDNode<"ISD::FLOG2"      , SDTFPUnaryOp>;
+def frint      : SDNode<"ISD::FRINT"      , SDTFPUnaryOp>;
+def ftrunc     : SDNode<"ISD::FTRUNC"     , SDTFPUnaryOp>;
+def fceil      : SDNode<"ISD::FCEIL"      , SDTFPUnaryOp>;
+def ffloor     : SDNode<"ISD::FFLOOR"     , SDTFPUnaryOp>;
+def fnearbyint : SDNode<"ISD::FNEARBYINT" , SDTFPUnaryOp>;
+def fround     : SDNode<"ISD::FROUND"     , SDTFPUnaryOp>;
+
+def fpround    : SDNode<"ISD::FP_ROUND"   , SDTFPRoundOp>;
+def fpextend   : SDNode<"ISD::FP_EXTEND"  , SDTFPExtendOp>;
+def fcopysign  : SDNode<"ISD::FCOPYSIGN"  , SDTFPSignOp>;
+
+def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>;
+def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>;
+def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>;
+def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>;
+def f16_to_fp  : SDNode<"ISD::FP16_TO_FP" , SDTIntToFPOp>;
+def fp_to_f16  : SDNode<"ISD::FP_TO_FP16" , SDTFPToIntOp>;
+
+def setcc      : SDNode<"ISD::SETCC"      , SDTSetCC>;
+def select     : SDNode<"ISD::SELECT"     , SDTSelect>;
+def vselect    : SDNode<"ISD::VSELECT"    , SDTVSelect>;
+def selectcc   : SDNode<"ISD::SELECT_CC"  , SDTSelectCC>;
+
+def brcc       : SDNode<"ISD::BR_CC"      , SDTBrCC,   [SDNPHasChain]>;
+def brcond     : SDNode<"ISD::BRCOND"     , SDTBrcond, [SDNPHasChain]>;
+def brind      : SDNode<"ISD::BRIND"      , SDTBrind,  [SDNPHasChain]>;
+def br         : SDNode<"ISD::BR"         , SDTBr,     [SDNPHasChain]>;
+def catchret   : SDNode<"ISD::CATCHRET"   , SDTCatchret,
+                        [SDNPHasChain, SDNPSideEffect]>;
+def cleanupret : SDNode<"ISD::CLEANUPRET" , SDTNone,   [SDNPHasChain]>;
+def catchpad   : SDNode<"ISD::CATCHPAD"   , SDTNone,
+                        [SDNPHasChain, SDNPSideEffect]>;
+
+def trap       : SDNode<"ISD::TRAP"       , SDTNone,
+                        [SDNPHasChain, SDNPSideEffect]>;
+def debugtrap  : SDNode<"ISD::DEBUGTRAP"  , SDTNone,
+                        [SDNPHasChain, SDNPSideEffect]>;
+
+def prefetch   : SDNode<"ISD::PREFETCH"   , SDTPrefetch,
+                        [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+                         SDNPMemOperand]>;
+
+def readcyclecounter : SDNode<"ISD::READCYCLECOUNTER", SDTIntLeaf,
+                     [SDNPHasChain, SDNPSideEffect]>;
+
+def atomic_fence : SDNode<"ISD::ATOMIC_FENCE" , SDTAtomicFence,
+                          [SDNPHasChain, SDNPSideEffect]>;
+
+def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , SDTAtomic3,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap     : SDNode<"ISD::ATOMIC_SWAP", SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_clr : SDNode<"ISD::ATOMIC_LOAD_CLR" , SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_or  : SDNode<"ISD::ATOMIC_LOAD_OR" , SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load      : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
+                    [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_store     : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
+                    [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def masked_store : SDNode<"ISD::MSTORE",  SDTMaskedStore,
+                       [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def masked_load  : SDNode<"ISD::MLOAD",  SDTMaskedLoad,
+                       [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def masked_scatter : SDNode<"ISD::MSCATTER",  SDTMaskedScatter,
+                       [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def masked_gather  : SDNode<"ISD::MGATHER",  SDTMaskedGather,
+                       [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
+// and truncst (see below).
+def ld         : SDNode<"ISD::LOAD"       , SDTLoad,
+                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def st         : SDNode<"ISD::STORE"      , SDTStore,
+                        [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def ist        : SDNode<"ISD::STORE"      , SDTIStore,
+                        [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>;
+def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, -1, []>, []>;
+def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>,
+                              []>;
+
+// vector_extract/vector_insert are deprecated. extractelt/insertelt
+// are preferred.
+def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
+    SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
+def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
+    SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
+def concat_vectors : SDNode<"ISD::CONCAT_VECTORS",
+    SDTypeProfile<1, 2, [SDTCisSubVecOfVec<1, 0>, SDTCisSameAs<1, 2>]>,[]>;
+
+// This operator does not do subvector type checking.  The ARM
+// backend, at least, needs it.
+def vector_extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
+    SDTypeProfile<1, 2, [SDTCisInt<2>, SDTCisVec<1>, SDTCisVec<0>]>,
+    []>;
+
+// This operator does subvector type checking.
+def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
+def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
+
+// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
+// these internally.  Don't reference these directly.
+def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
+                            SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
+                            [SDNPHasChain]>;
+def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
+                               SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
+                               [SDNPHasChain]>;
+def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
+                                SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
+
+def SDT_assertext : SDTypeProfile<1, 1,
+  [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 0>]>;
+def assertsext : SDNode<"ISD::AssertSext", SDT_assertext>;
+def assertzext : SDNode<"ISD::AssertZext", SDT_assertext>;
+
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Condition Codes
+
+class CondCode; // ISD::CondCode enums
+def SETOEQ : CondCode; def SETOGT : CondCode;
+def SETOGE : CondCode; def SETOLT : CondCode; def SETOLE : CondCode;
+def SETONE : CondCode; def SETO   : CondCode; def SETUO  : CondCode;
+def SETUEQ : CondCode; def SETUGT : CondCode; def SETUGE : CondCode;
+def SETULT : CondCode; def SETULE : CondCode; def SETUNE : CondCode;
+
+def SETEQ : CondCode; def SETGT : CondCode; def SETGE : CondCode;
+def SETLT : CondCode; def SETLE : CondCode; def SETNE : CondCode;
+
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node Transformation Functions.
+//
+// This mechanism allows targets to manipulate nodes in the output DAG once a
+// match has been formed.  This is typically used to manipulate immediate
+// values.
+//
+class SDNodeXForm<SDNode opc, code xformFunction> {
+  SDNode Opcode = opc;
+  code XFormFunction = xformFunction;
+}
+
+def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>;
+
+//===----------------------------------------------------------------------===//
+// PatPred Subclasses.
+//
+// These allow specifying different sorts of predicates that control whether a
+// node is matched.
+//
+class PatPred;
+
+class CodePatPred<code predicate> : PatPred {
+  code PredicateCode = predicate;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Fragments.
+//
+// Pattern fragments are reusable chunks of dags that match specific things.
+// They can take arguments and have C++ predicates that control whether they
+// match.  They are intended to make the patterns for common instructions more
+// compact and readable.
+//
+
+/// PatFrag - Represents a pattern fragment.  This can match something on the
+/// DAG, from a single node to multiple nested other fragments.
+///
+class PatFrag<dag ops, dag frag, code pred = [{}],
+              SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
+  dag Operands = ops;
+  dag Fragment = frag;
+  code PredicateCode = pred;
+  code ImmediateCode = [{}];
+  SDNodeXForm OperandTransform = xform;
+
+  // Define a few pre-packaged predicates. This helps GlobalISel import
+  // existing rules from SelectionDAG for many common cases.
+  // They will be tested prior to the code in pred and must not be used in
+  // ImmLeaf and its subclasses.
+
+  // Is the desired pre-packaged predicate for a load?
+  bit IsLoad = ?;
+  // Is the desired pre-packaged predicate for a store?
+  bit IsStore = ?;
+  // Is the desired pre-packaged predicate for an atomic?
+  bit IsAtomic = ?;
+
+  // cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+  // cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+  bit IsUnindexed = ?;
+
+  // cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD
+  bit IsNonExtLoad = ?;
+  // cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+  bit IsAnyExtLoad = ?;
+  // cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+  bit IsSignExtLoad = ?;
+  // cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+  bit IsZeroExtLoad = ?;
+  // !cast<StoreSDNode>(N)->isTruncatingStore();
+  // cast<StoreSDNode>(N)->isTruncatingStore();
+  bit IsTruncStore = ?;
+
+  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Monotonic
+  bit IsAtomicOrderingMonotonic = ?;
+  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Acquire
+  bit IsAtomicOrderingAcquire = ?;
+  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Release
+  bit IsAtomicOrderingRelease = ?;
+  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::AcquireRelease
+  bit IsAtomicOrderingAcquireRelease = ?;
+  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::SequentiallyConsistent
+  bit IsAtomicOrderingSequentiallyConsistent = ?;
+
+  // isAcquireOrStronger(cast<AtomicSDNode>(N)->getOrdering())
+  // !isAcquireOrStronger(cast<AtomicSDNode>(N)->getOrdering())
+  bit IsAtomicOrderingAcquireOrStronger = ?;
+
+  // isReleaseOrStronger(cast<AtomicSDNode>(N)->getOrdering())
+  // !isReleaseOrStronger(cast<AtomicSDNode>(N)->getOrdering())
+  bit IsAtomicOrderingReleaseOrStronger = ?;
+
+  // cast<LoadSDNode>(N)->getMemoryVT() == MVT::<VT>;
+  // cast<StoreSDNode>(N)->getMemoryVT() == MVT::<VT>;
+  ValueType MemoryVT = ?;
+  // cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
+  // cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
+  ValueType ScalarMemoryVT = ?;
+}
+
+// OutPatFrag is a pattern fragment that is used as part of an output pattern
+// (not an input pattern). These do not have predicates or transforms, but are
+// used to avoid repeated subexpressions in output patterns.
+class OutPatFrag<dag ops, dag frag>
+ : PatFrag<ops, frag, [{}], NOOP_SDNodeXForm>;
+
+// PatLeaf's are pattern fragments that have no operands.  This is just a helper
+// to define immediates and other common things concisely.
+class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm>
+ : PatFrag<(ops), frag, pred, xform>;
+
+
+// ImmLeaf is a pattern fragment with a constraint on the immediate.  The
+// constraint is a function that is run on the immediate (always with the value
+// sign extended out to an int64_t) as Imm.  For example:
+//
+//  def immSExt8 : ImmLeaf<i16, [{ return (char)Imm == Imm; }]>;
+//
+// this is a more convenient form to match 'imm' nodes in than PatLeaf and also
+// is preferred over using PatLeaf because it allows the code generator to
+// reason more about the constraint.
+//
+// If FastIsel should ignore all instructions that have an operand of this type,
+// the FastIselShouldIgnore flag can be set.  This is an optimization to reduce
+// the code size of the generated fast instruction selector.
+class ImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm,
+              SDNode ImmNode = imm>
+  : PatFrag<(ops), (vt ImmNode), [{}], xform> {
+  let ImmediateCode = pred;
+  bit FastIselShouldIgnore = 0;
+
+  // Is the data type of the immediate an APInt?
+  bit IsAPInt = 0;
+
+  // Is the data type of the immediate an APFloat?
+  bit IsAPFloat = 0;
+}
+
+// An ImmLeaf except that Imm is an APInt. This is useful when you need to
+// zero-extend the immediate instead of sign-extend it.
+//
+// Note that FastISel does not currently understand IntImmLeaf and will not
+// generate code for rules that make use of it. As such, it does not make sense
+// to replace ImmLeaf with IntImmLeaf. However, replacing PatLeaf with an
+// IntImmLeaf will allow GlobalISel to import the rule.
+class IntImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm>
+    : ImmLeaf<vt, pred, xform> {
+  let IsAPInt = 1;
+  let FastIselShouldIgnore = 1;
+}
+
+// An ImmLeaf except that Imm is an APFloat.
+//
+// Note that FastISel does not currently understand FPImmLeaf and will not
+// generate code for rules that make use of it.
+class FPImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm>
+  : ImmLeaf<vt, pred, xform, fpimm> {
+  let IsAPFloat = 1;
+  let FastIselShouldIgnore = 1;
+}
+
+// Leaf fragments.
+
+def vtInt      : PatLeaf<(vt),  [{ return N->getVT().isInteger(); }]>;
+def vtFP       : PatLeaf<(vt),  [{ return N->getVT().isFloatingPoint(); }]>;
+
+def immAllOnesV: PatLeaf<(build_vector), [{
+  return ISD::isBuildVectorAllOnes(N);
+}]>;
+def immAllZerosV: PatLeaf<(build_vector), [{
+  return ISD::isBuildVectorAllZeros(N);
+}]>;
+
+
+
+// Other helper fragments.
+def not  : PatFrag<(ops node:$in), (xor node:$in, -1)>;
+def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>;
+def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;
+
+// null_frag - The null pattern operator is used in multiclass instantiations
+// which accept an SDPatternOperator for use in matching patterns for internal
+// definitions. When expanding a pattern, if the null fragment is referenced
+// in the expansion, the pattern is discarded and it is as-if '[]' had been
+// specified. This allows multiclasses to have the isel patterns be optional.
+def null_frag : SDPatternOperator;
+
+// load fragments.
+def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr)> {
+  let IsLoad = 1;
+  let IsUnindexed = 1;
+}
+def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
+  let IsLoad = 1;
+  let IsNonExtLoad = 1;
+}
+
+// extending load fragments.
+def extload   : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
+  let IsLoad = 1;
+  let IsAnyExtLoad = 1;
+}
+def sextload  : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
+  let IsLoad = 1;
+  let IsSignExtLoad = 1;
+}
+def zextload  : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
+  let IsLoad = 1;
+  let IsZeroExtLoad = 1;
+}
+
+def extloadi1  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i1;
+}
+def extloadi8  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i8;
+}
+def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i16;
+}
+def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i32;
+}
+def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = f32;
+}
+def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = f64;
+}
+
+def sextloadi1  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i1;
+}
+def sextloadi8  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i8;
+}
+def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i16;
+}
+def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i32;
+}
+
+def zextloadi1  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i1;
+}
+def zextloadi8  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i8;
+}
+def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i16;
+}
+def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let MemoryVT = i32;
+}
+
+def extloadvi1  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i1;
+}
+def extloadvi8  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i8;
+}
+def extloadvi16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i16;
+}
+def extloadvi32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i32;
+}
+def extloadvf32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = f32;
+}
+def extloadvf64 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = f64;
+}
+
+def sextloadvi1  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i1;
+}
+def sextloadvi8  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i8;
+}
+def sextloadvi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i16;
+}
+def sextloadvi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i32;
+}
+
+def zextloadvi1  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i1;
+}
+def zextloadvi8  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i8;
+}
+def zextloadvi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i16;
+}
+def zextloadvi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+  let IsLoad = 1;
+  let ScalarMemoryVT = i32;
+}
+
+// store fragments.
+def unindexedstore : PatFrag<(ops node:$val, node:$ptr),
+                             (st node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let IsUnindexed = 1;
+}
+def store : PatFrag<(ops node:$val, node:$ptr),
+                    (unindexedstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let IsTruncStore = 0;
+}
+
+// truncstore fragments.
+def truncstore : PatFrag<(ops node:$val, node:$ptr),
+                         (unindexedstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let IsTruncStore = 1;
+}
+def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
+                           (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let MemoryVT = i8;
+}
+def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let MemoryVT = i16;
+}
+def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let MemoryVT = i32;
+}
+def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let MemoryVT = f32;
+}
+def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let MemoryVT = f64;
+}
+
+def truncstorevi8 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let ScalarMemoryVT = i8;
+}
+
+def truncstorevi16 : PatFrag<(ops node:$val, node:$ptr),
+                             (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let ScalarMemoryVT = i16;
+}
+
+def truncstorevi32 : PatFrag<(ops node:$val, node:$ptr),
+                             (truncstore node:$val, node:$ptr)> {
+  let IsStore = 1;
+  let ScalarMemoryVT = i32;
+}
+
+// indexed store fragments.
+def istore : PatFrag<(ops node:$val, node:$base, node:$offset),
+                     (ist node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let IsTruncStore = 0;
+}
+
+def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
+                        (istore node:$val, node:$base, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
+}]>;
+
+def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset),
+                          (ist node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let IsTruncStore = 1;
+}
+def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
+                          (itruncstore node:$val, node:$base, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
+}]>;
+def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                            (pre_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i1;
+}
+def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                            (pre_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i8;
+}
+def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                             (pre_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i16;
+}
+def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                             (pre_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i32;
+}
+def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                             (pre_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = f32;
+}
+
+def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
+                         (istore node:$val, node:$ptr, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::POST_INC || AM == ISD::POST_DEC;
+}]>;
+
+def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
+                           (itruncstore node:$val, node:$base, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::POST_INC || AM == ISD::POST_DEC;
+}]>;
+def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                             (post_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i1;
+}
+def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                             (post_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i8;
+}
+def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                              (post_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i16;
+}
+def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                              (post_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = i32;
+}
+def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+                              (post_truncst node:$val, node:$base, node:$offset)> {
+  let IsStore = 1;
+  let MemoryVT = f32;
+}
+
+// nontemporal store fragments.
+def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+                               (store node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->isNonTemporal();
+}]>;
+
+def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+                                      (nontemporalstore node:$val, node:$ptr), [{
+  StoreSDNode *St = cast<StoreSDNode>(N);
+  return St->getAlignment() >= St->getMemoryVT().getStoreSize();
+}]>;
+
+def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+                                        (nontemporalstore node:$val, node:$ptr), [{
+  StoreSDNode *St = cast<StoreSDNode>(N);
+  return St->getAlignment() < St->getMemoryVT().getStoreSize();
+}]>;
+
+// nontemporal load fragments.
+def nontemporalload : PatFrag<(ops node:$ptr),
+                               (load node:$ptr), [{
+  return cast<LoadSDNode>(N)->isNonTemporal();
+}]>;
+
+def alignednontemporalload : PatFrag<(ops node:$ptr),
+                                      (nontemporalload node:$ptr), [{
+  LoadSDNode *Ld = cast<LoadSDNode>(N);
+  return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
+}]>;
+
+// setcc convenience fragments.
+def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETOEQ)>;
+def setogt : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETOGT)>;
+def setoge : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETOGE)>;
+def setolt : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETOLT)>;
+def setole : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETOLE)>;
+def setone : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETONE)>;
+def seto   : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETO)>;
+def setuo  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETUO)>;
+def setueq : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETUEQ)>;
+def setugt : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETUGT)>;
+def setuge : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETUGE)>;
+def setult : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETULT)>;
+def setule : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETULE)>;
+def setune : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETUNE)>;
+def seteq  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETEQ)>;
+def setgt  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETGT)>;
+def setge  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETGE)>;
+def setlt  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETLT)>;
+def setle  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETLE)>;
+def setne  : PatFrag<(ops node:$lhs, node:$rhs),
+                     (setcc node:$lhs, node:$rhs, SETNE)>;
+
+multiclass binary_atomic_op_ord<SDNode atomic_op> {
+  def #NAME#_monotonic : PatFrag<(ops node:$ptr, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingMonotonic = 1;
+  }
+  def #NAME#_acquire : PatFrag<(ops node:$ptr, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingAcquire = 1;
+  }
+  def #NAME#_release : PatFrag<(ops node:$ptr, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingRelease = 1;
+  }
+  def #NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingAcquireRelease = 1;
+  }
+  def #NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingSequentiallyConsistent = 1;
+  }
+}
+
+multiclass ternary_atomic_op_ord<SDNode atomic_op> {
+  def #NAME#_monotonic : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingMonotonic = 1;
+  }
+  def #NAME#_acquire : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingAcquire = 1;
+  }
+  def #NAME#_release : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingRelease = 1;
+  }
+  def #NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingAcquireRelease = 1;
+  }
+  def #NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+      (!cast<SDPatternOperator>(#NAME) node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let IsAtomicOrderingSequentiallyConsistent = 1;
+  }
+}
+
+multiclass binary_atomic_op<SDNode atomic_op> {
+  def _8 : PatFrag<(ops node:$ptr, node:$val),
+                   (atomic_op  node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i8;
+  }
+  def _16 : PatFrag<(ops node:$ptr, node:$val),
+                    (atomic_op node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i16;
+  }
+  def _32 : PatFrag<(ops node:$ptr, node:$val),
+                    (atomic_op node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i32;
+  }
+  def _64 : PatFrag<(ops node:$ptr, node:$val),
+                    (atomic_op node:$ptr, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i64;
+  }
+
+  defm NAME#_8  : binary_atomic_op_ord<atomic_op>;
+  defm NAME#_16 : binary_atomic_op_ord<atomic_op>;
+  defm NAME#_32 : binary_atomic_op_ord<atomic_op>;
+  defm NAME#_64 : binary_atomic_op_ord<atomic_op>;
+}
+
+multiclass ternary_atomic_op<SDNode atomic_op> {
+  def _8 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+                   (atomic_op  node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i8;
+  }
+  def _16 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+                    (atomic_op node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i16;
+  }
+  def _32 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+                    (atomic_op node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i32;
+  }
+  def _64 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
+                    (atomic_op node:$ptr, node:$cmp, node:$val)> {
+    let IsAtomic = 1;
+    let MemoryVT = i64;
+  }
+
+  defm NAME#_8  : ternary_atomic_op_ord<atomic_op>;
+  defm NAME#_16 : ternary_atomic_op_ord<atomic_op>;
+  defm NAME#_32 : ternary_atomic_op_ord<atomic_op>;
+  defm NAME#_64 : ternary_atomic_op_ord<atomic_op>;
+}
+
+defm atomic_load_add  : binary_atomic_op<atomic_load_add>;
+defm atomic_swap      : binary_atomic_op<atomic_swap>;
+defm atomic_load_sub  : binary_atomic_op<atomic_load_sub>;
+defm atomic_load_and  : binary_atomic_op<atomic_load_and>;
+defm atomic_load_clr  : binary_atomic_op<atomic_load_clr>;
+defm atomic_load_or   : binary_atomic_op<atomic_load_or>;
+defm atomic_load_xor  : binary_atomic_op<atomic_load_xor>;
+defm atomic_load_nand : binary_atomic_op<atomic_load_nand>;
+defm atomic_load_min  : binary_atomic_op<atomic_load_min>;
+defm atomic_load_max  : binary_atomic_op<atomic_load_max>;
+defm atomic_load_umin : binary_atomic_op<atomic_load_umin>;
+defm atomic_load_umax : binary_atomic_op<atomic_load_umax>;
+defm atomic_store     : binary_atomic_op<atomic_store>;
+defm atomic_cmp_swap  : ternary_atomic_op<atomic_cmp_swap>;
+
+def atomic_load_8 :
+  PatFrag<(ops node:$ptr),
+          (atomic_load node:$ptr)> {
+  let IsAtomic = 1;
+  let MemoryVT = i8;
+}
+def atomic_load_16 :
+  PatFrag<(ops node:$ptr),
+          (atomic_load node:$ptr)> {
+  let IsAtomic = 1;
+  let MemoryVT = i16;
+}
+def atomic_load_32 :
+  PatFrag<(ops node:$ptr),
+          (atomic_load node:$ptr)> {
+  let IsAtomic = 1;
+  let MemoryVT = i32;
+}
+def atomic_load_64 :
+  PatFrag<(ops node:$ptr),
+          (atomic_load node:$ptr)> {
+  let IsAtomic = 1;
+  let MemoryVT = i64;
+}
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Support.
+//
+// Patterns are what are actually matched against by the target-flavored
+// instruction selection DAG.  Instructions defined by the target implicitly
+// define patterns in most cases, but patterns can also be explicitly added when
+// an operation is defined by a sequence of instructions (e.g. loading a large
+// immediate value on RISC targets that do not support immediates as large as
+// their GPRs).
+//
+
+class Pattern<dag patternToMatch, list<dag> resultInstrs> {
+  dag             PatternToMatch  = patternToMatch;
+  list<dag>       ResultInstrs    = resultInstrs;
+  list<Predicate> Predicates      = [];  // See class Instruction in Target.td.
+  int             AddedComplexity = 0;   // See class Instruction in Target.td.
+}
+
+// Pat - A simple (but common) form of a pattern, which produces a simple result
+// not needing a full list.
+class Pat<dag pattern, dag result> : Pattern<pattern, [result]>;
+
+//===----------------------------------------------------------------------===//
+// Complex pattern definitions.
+//
+
+// Complex patterns, e.g. X86 addressing mode, requires pattern matching code
+// in C++. NumOperands is the number of operands returned by the select function;
+// SelectFunc is the name of the function used to pattern match the max. pattern;
+// RootNodes are the list of possible root nodes of the sub-dags to match.
+// e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>;
+//
+class ComplexPattern<ValueType ty, int numops, string fn,
+                     list<SDNode> roots = [], list<SDNodeProperty> props = [],
+                     int complexity = -1> {
+  ValueType Ty = ty;
+  int NumOperands = numops;
+  string SelectFunc = fn;
+  list<SDNode> RootNodes = roots;
+  list<SDNodeProperty> Properties = props;
+  int Complexity = complexity;
+}
diff --git a/linux-x64/clang/include/llvm/Testing/Support/Error.h b/linux-x64/clang/include/llvm/Testing/Support/Error.h
new file mode 100644
index 0000000..50889b9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Testing/Support/Error.h
@@ -0,0 +1,107 @@
+//===- llvm/Testing/Support/Error.h ---------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_SUPPORT_ERROR_H
+#define LLVM_TESTING_SUPPORT_ERROR_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Testing/Support/SupportHelpers.h"
+
+#include "gmock/gmock.h"
+#include <ostream>
+
+namespace llvm {
+namespace detail {
+ErrorHolder TakeError(Error Err);
+
+template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &Exp) {
+  return {TakeError(Exp.takeError()), Exp};
+}
+
+template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &&Exp) {
+  return TakeExpected(Exp);
+}
+
+template <typename T>
+class ValueMatchesMono
+    : public testing::MatcherInterface<const ExpectedHolder<T> &> {
+public:
+  explicit ValueMatchesMono(const testing::Matcher<T> &Matcher)
+      : Matcher(Matcher) {}
+
+  bool MatchAndExplain(const ExpectedHolder<T> &Holder,
+                       testing::MatchResultListener *listener) const override {
+    if (!Holder.Success)
+      return false;
+
+    bool result = Matcher.MatchAndExplain(*Holder.Exp, listener);
+
+    if (result)
+      return result;
+    *listener << "(";
+    Matcher.DescribeNegationTo(listener->stream());
+    *listener << ")";
+    return result;
+  }
+
+  void DescribeTo(std::ostream *OS) const override {
+    *OS << "succeeded with value (";
+    Matcher.DescribeTo(OS);
+    *OS << ")";
+  }
+
+  void DescribeNegationTo(std::ostream *OS) const override {
+    *OS << "did not succeed or value (";
+    Matcher.DescribeNegationTo(OS);
+    *OS << ")";
+  }
+
+private:
+  testing::Matcher<T> Matcher;
+};
+
+template<typename M>
+class ValueMatchesPoly {
+public:
+  explicit ValueMatchesPoly(const M &Matcher) : Matcher(Matcher) {}
+
+  template <typename T>
+  operator testing::Matcher<const ExpectedHolder<T> &>() const {
+    return MakeMatcher(
+        new ValueMatchesMono<T>(testing::SafeMatcherCast<T>(Matcher)));
+  }
+
+private:
+  M Matcher;
+};
+
+} // namespace detail
+
+#define EXPECT_THAT_ERROR(Err, Matcher)                                        \
+  EXPECT_THAT(llvm::detail::TakeError(Err), Matcher)
+#define ASSERT_THAT_ERROR(Err, Matcher)                                        \
+  ASSERT_THAT(llvm::detail::TakeError(Err), Matcher)
+
+#define EXPECT_THAT_EXPECTED(Err, Matcher)                                     \
+  EXPECT_THAT(llvm::detail::TakeExpected(Err), Matcher)
+#define ASSERT_THAT_EXPECTED(Err, Matcher)                                     \
+  ASSERT_THAT(llvm::detail::TakeExpected(Err), Matcher)
+
+MATCHER(Succeeded, "") { return arg.Success; }
+MATCHER(Failed, "") { return !arg.Success; }
+
+template <typename M>
+detail::ValueMatchesPoly<M> HasValue(M Matcher) {
+  return detail::ValueMatchesPoly<M>(Matcher);
+}
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Testing/Support/SupportHelpers.h b/linux-x64/clang/include/llvm/Testing/Support/SupportHelpers.h
new file mode 100644
index 0000000..d7f0c71
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Testing/Support/SupportHelpers.h
@@ -0,0 +1,49 @@
+//===- Testing/Support/SupportHelpers.h -----------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
+#define LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "gtest/gtest-printers.h"
+
+namespace llvm {
+namespace detail {
+struct ErrorHolder {
+  bool Success;
+  std::string Message;
+};
+
+template <typename T> struct ExpectedHolder : public ErrorHolder {
+  ExpectedHolder(ErrorHolder Err, Expected<T> &Exp)
+      : ErrorHolder(std::move(Err)), Exp(Exp) {}
+
+  Expected<T> &Exp;
+};
+
+inline void PrintTo(const ErrorHolder &Err, std::ostream *Out) {
+  *Out << (Err.Success ? "succeeded" : "failed");
+  if (!Err.Success) {
+    *Out << "  (" << StringRef(Err.Message).trim().str() << ")";
+  }
+}
+
+template <typename T>
+void PrintTo(const ExpectedHolder<T> &Item, std::ostream *Out) {
+  if (Item.Success) {
+    *Out << "succeeded with value " << ::testing::PrintToString(*Item.Exp);
+  } else {
+    PrintTo(static_cast<const ErrorHolder &>(Item), Out);
+  }
+}
+} // namespace detail
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h b/linux-x64/clang/include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h
new file mode 100644
index 0000000..964b0f7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h
@@ -0,0 +1,24 @@
+//===- DlltoolDriver.h - dlltool.exe-compatible driver ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines an interface to a dlltool.exe-compatible driver.
+// Used by llvm-dlltool.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLDRIVERS_LLVM_DLLTOOL_DLLTOOLDRIVER_H
+#define LLVM_TOOLDRIVERS_LLVM_DLLTOOL_DLLTOOLDRIVER_H
+
+namespace llvm {
+template <typename T> class ArrayRef;
+
+int dlltoolDriverMain(ArrayRef<const char *> ArgsArr);
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ToolDrivers/llvm-lib/LibDriver.h b/linux-x64/clang/include/llvm/ToolDrivers/llvm-lib/LibDriver.h
new file mode 100644
index 0000000..a4806ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ToolDrivers/llvm-lib/LibDriver.h
@@ -0,0 +1,24 @@
+//===- llvm-lib/LibDriver.h - lib.exe-compatible driver ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines an interface to a lib.exe-compatible driver that also understands
+// bitcode files. Used by llvm-lib and lld-link /lib.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLDRIVERS_LLVM_LIB_LIBDRIVER_H
+#define LLVM_TOOLDRIVERS_LLVM_LIB_LIBDRIVER_H
+
+namespace llvm {
+template <typename T> class ArrayRef;
+
+int libDriverMain(ArrayRef<const char *> ARgs);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h b/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
new file mode 100644
index 0000000..a318d18
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
@@ -0,0 +1,34 @@
+//===- AggressiveInstCombine.h - AggressiveInstCombine pass -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the aggressive instcombine pass.
+/// This pass is suitable for use in the new pass manager. For a pass that works
+/// with the legacy pass manager, please look for
+/// \c createAggressiveInstCombinerPass() in Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_AGGRESSIVE_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_AGGRESSIVE_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class AggressiveInstCombinePass
+    : public PassInfoMixin<AggressiveInstCombinePass> {
+
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines.h b/linux-x64/clang/include/llvm/Transforms/Coroutines.h
new file mode 100644
index 0000000..51beb44
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines.h
@@ -0,0 +1,38 @@
+//===-- Coroutines.h - Coroutine Transformations ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Declare accessor functions for coroutine lowering passes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_H
+#define LLVM_TRANSFORMS_COROUTINES_H
+
+namespace llvm {
+
+class Pass;
+class PassManagerBuilder;
+
+/// Add all coroutine passes to appropriate extension points.
+void addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder);
+
+/// Lower coroutine intrinsics that are not needed by later passes.
+Pass *createCoroEarlyPass();
+
+/// Split up coroutines into multiple functions driving their state machines.
+Pass *createCoroSplitPass();
+
+/// Analyze coroutines use sites, devirtualize resume/destroy calls and elide
+/// heap allocation for coroutine frame where possible.
+Pass *createCoroElidePass();
+
+/// Lower all remaining coroutine intrinsics.
+Pass *createCoroCleanupPass();
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO.h b/linux-x64/clang/include/llvm/Transforms/IPO.h
new file mode 100644
index 0000000..1514335
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO.h
@@ -0,0 +1,271 @@
+//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the IPO transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_H
+#define LLVM_TRANSFORMS_IPO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include <functional>
+#include <vector>
+
+namespace llvm {
+
+struct InlineParams;
+class StringRef;
+class ModuleSummaryIndex;
+class ModulePass;
+class Pass;
+class Function;
+class BasicBlock;
+class GlobalValue;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+//
+// These functions removes symbols from functions and modules.  If OnlyDebugInfo
+// is true, only debugging information is removed from the module.
+//
+ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false);
+
+//===----------------------------------------------------------------------===//
+//
+// These functions strips symbols from functions and modules.
+// Only debugging information is not stripped.
+//
+ModulePass *createStripNonDebugSymbolsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// This pass removes llvm.dbg.declare intrinsics.
+ModulePass *createStripDebugDeclarePass();
+
+//===----------------------------------------------------------------------===//
+//
+// This pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
+/// createConstantMergePass - This function returns a new pass that merges
+/// duplicate global constants together into a single constant that is shared.
+/// This is useful because some passes (ie TraceValues) insert a lot of string
+/// constants into the program, regardless of whether or not they duplicate an
+/// existing string.
+///
+ModulePass *createConstantMergePass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalOptimizerPass - This function returns a new pass that optimizes
+/// non-address taken internal globals.
+///
+ModulePass *createGlobalOptimizerPass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalDCEPass - This transform is designed to eliminate unreachable
+/// internal globals (functions or global variables)
+///
+ModulePass *createGlobalDCEPass();
+
+//===----------------------------------------------------------------------===//
+/// This transform is designed to eliminate available external globals
+/// (functions or global variables)
+///
+ModulePass *createEliminateAvailableExternallyPass();
+
+//===----------------------------------------------------------------------===//
+/// createGVExtractionPass - If deleteFn is true, this pass deletes
+/// the specified global values. Otherwise, it deletes as much of the module as
+/// possible, except for the global values specified.
+///
+ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
+                                   deleteFn = false);
+
+//===----------------------------------------------------------------------===//
+/// This pass performs iterative function importing from other modules.
+Pass *createFunctionImportPass();
+
+//===----------------------------------------------------------------------===//
+/// createFunctionInliningPass - Return a new pass object that uses a heuristic
+/// to inline direct function calls to small functions.
+///
+/// The Threshold can be passed directly, or asked to be computed from the
+/// given optimization and size optimization arguments.
+///
+/// The -inline-threshold command line option takes precedence over the
+/// threshold given here.
+Pass *createFunctionInliningPass();
+Pass *createFunctionInliningPass(int Threshold);
+Pass *createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel,
+                                 bool DisableInlineHotCallSite);
+Pass *createFunctionInliningPass(InlineParams &Params);
+
+//===----------------------------------------------------------------------===//
+/// createPruneEHPass - Return a new pass object which transforms invoke
+/// instructions into calls, if the callee can _not_ unwind the stack.
+///
+Pass *createPruneEHPass();
+
+//===----------------------------------------------------------------------===//
+/// createInternalizePass - This pass loops over all of the functions in the
+/// input module, internalizing all globals (functions and variables) it can.
+////
+/// Before internalizing a symbol, the callback \p MustPreserveGV is invoked and
+/// gives to the client the ability to prevent internalizing specific symbols.
+///
+/// The symbol in DSOList are internalized if it is safe to drop them from
+/// the symbol table.
+///
+/// Note that commandline options that are used with the above function are not
+/// used now!
+ModulePass *
+createInternalizePass(std::function<bool(const GlobalValue &)> MustPreserveGV);
+
+/// createInternalizePass - Same as above, but with an empty exportList.
+ModulePass *createInternalizePass();
+
+//===----------------------------------------------------------------------===//
+/// createDeadArgEliminationPass - This pass removes arguments from functions
+/// which are not used by the body of the function.
+///
+ModulePass *createDeadArgEliminationPass();
+
+/// DeadArgHacking pass - Same as DAE, but delete arguments of external
+/// functions as well.  This is definitely not safe, and should only be used by
+/// bugpoint.
+ModulePass *createDeadArgHackingPass();
+
+//===----------------------------------------------------------------------===//
+/// createArgumentPromotionPass - This pass promotes "by reference" arguments to
+/// be passed by value if the number of elements passed is smaller or
+/// equal to maxElements (maxElements == 0 means always promote).
+///
+Pass *createArgumentPromotionPass(unsigned maxElements = 3);
+
+//===----------------------------------------------------------------------===//
+/// createIPConstantPropagationPass - This pass propagates constants from call
+/// sites into the bodies of functions.
+///
+ModulePass *createIPConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+/// createIPSCCPPass - This pass propagates constants from call sites into the
+/// bodies of functions, and keeps track of whether basic blocks are executable
+/// in the process.
+///
+ModulePass *createIPSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+/// createLoopExtractorPass - This pass extracts all natural loops from the
+/// program into a function if it can.
+///
+Pass *createLoopExtractorPass();
+
+/// createSingleLoopExtractorPass - This pass extracts one natural loop from the
+/// program into a function if it can.  This is used by bugpoint.
+///
+Pass *createSingleLoopExtractorPass();
+
+/// createBlockExtractorPass - This pass extracts all the specified blocks
+/// from the functions in the module.
+///
+ModulePass *createBlockExtractorPass();
+ModulePass *
+createBlockExtractorPass(const SmallVectorImpl<BasicBlock *> &BlocksToExtract,
+                         bool EraseFunctions);
+
+/// createStripDeadPrototypesPass - This pass removes any function declarations
+/// (prototypes) that are not used.
+ModulePass *createStripDeadPrototypesPass();
+
+//===----------------------------------------------------------------------===//
+/// createReversePostOrderFunctionAttrsPass - This pass walks SCCs of the call
+/// graph in RPO to deduce and propagate function attributes. Currently it
+/// only handles synthesizing norecurse attributes.
+///
+Pass *createReversePostOrderFunctionAttrsPass();
+
+//===----------------------------------------------------------------------===//
+/// createMergeFunctionsPass - This pass discovers identical functions and
+/// collapses them.
+///
+ModulePass *createMergeFunctionsPass();
+
+//===----------------------------------------------------------------------===//
+/// createPartialInliningPass - This pass inlines parts of functions.
+///
+ModulePass *createPartialInliningPass();
+
+//===----------------------------------------------------------------------===//
+/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
+/// manager.
+ModulePass *createBarrierNoopPass();
+
+/// createCalledValuePropagationPass - Attach metadata to indirct call sites
+/// indicating the set of functions they may target at run-time.
+ModulePass *createCalledValuePropagationPass();
+
+/// What to do with the summary when running passes that operate on it.
+enum class PassSummaryAction {
+  None,   ///< Do nothing.
+  Import, ///< Import information from summary.
+  Export, ///< Export information to summary.
+};
+
+/// \brief This pass lowers type metadata and the llvm.type.test intrinsic to
+/// bitsets.
+///
+/// The behavior depends on the summary arguments:
+/// - If ExportSummary is non-null, this pass will export type identifiers to
+///   the given summary.
+/// - Otherwise, if ImportSummary is non-null, this pass will import type
+///   identifiers from the given summary.
+/// - Otherwise it does neither.
+/// It is invalid for both ExportSummary and ImportSummary to be non-null.
+ModulePass *createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
+                                     const ModuleSummaryIndex *ImportSummary);
+
+/// \brief This pass export CFI checks for use by external modules.
+ModulePass *createCrossDSOCFIPass();
+
+/// \brief This pass implements whole-program devirtualization using type
+/// metadata.
+///
+/// The behavior depends on the summary arguments:
+/// - If ExportSummary is non-null, this pass will export type identifiers to
+///   the given summary.
+/// - Otherwise, if ImportSummary is non-null, this pass will import type
+///   identifiers from the given summary.
+/// - Otherwise it does neither.
+/// It is invalid for both ExportSummary and ImportSummary to be non-null.
+ModulePass *
+createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
+                             const ModuleSummaryIndex *ImportSummary);
+
+/// This pass splits globals into pieces for the benefit of whole-program
+/// devirtualization and control-flow integrity.
+ModulePass *createGlobalSplitPass();
+
+//===----------------------------------------------------------------------===//
+// SampleProfilePass - Loads sample profile data from disk and generates
+// IR metadata to reflect the profile.
+ModulePass *createSampleProfileLoaderPass();
+ModulePass *createSampleProfileLoaderPass(StringRef Name);
+
+/// Write ThinLTO-ready bitcode to Str.
+ModulePass *createWriteThinLTOBitcodePass(raw_ostream &Str,
+                                          raw_ostream *ThinLinkOS = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
new file mode 100644
index 0000000..b52c0fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
@@ -0,0 +1,46 @@
+//===-- AlwaysInliner.h - Pass to inline "always_inline" functions --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Provides passes to inlining "always_inline" functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
+#define LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Inlines functions marked as "always_inline".
+///
+/// Note that this does not inline call sites marked as always_inline and does
+/// not delete the functions even when all users are inlined. The normal
+/// inliner should be used to handle call site inlining, this pass's goal is to
+/// be the simplest possible pass to remove always_inline function definitions'
+/// uses by inlining them. The \c GlobalDCE pass can be used to remove these
+/// functions once all users are gone.
+class AlwaysInlinerPass : public PassInfoMixin<AlwaysInlinerPass> {
+  bool InsertLifetime;
+
+public:
+  AlwaysInlinerPass(bool InsertLifetime = true)
+      : InsertLifetime(InsertLifetime) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+/// Create a legacy pass manager instance of a pass to inline and remove
+/// functions marked as "always_inline".
+Pass *createAlwaysInlinerLegacyPass(bool InsertLifetime = true);
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h b/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
new file mode 100644
index 0000000..49ca6cc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
@@ -0,0 +1,36 @@
+//===- ArgumentPromotion.h - Promote by-reference arguments -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
+#define LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Argument promotion pass.
+///
+/// This pass walks the functions in each SCC and for each one tries to
+/// transform it and all of its callers to replace indirect arguments with
+/// direct (by-value) arguments.
+class ArgumentPromotionPass : public PassInfoMixin<ArgumentPromotionPass> {
+  unsigned MaxElements;
+
+public:
+  ArgumentPromotionPass(unsigned MaxElements = 3u) : MaxElements(MaxElements) {}
+
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h b/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
new file mode 100644
index 0000000..352bdc7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
@@ -0,0 +1,35 @@
+//===- CalledValuePropagation.h - Propagate called values -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a transformation that attaches !callees metadata to
+// indirect call sites. For a given call site, the metadata, if present,
+// indicates the set of functions the call site could possibly target at
+// run-time. This metadata is added to indirect call sites when the set of
+// possible targets can be determined by analysis and is known to be small. The
+// analysis driving the transformation is similar to constant propagation and
+// makes uses of the generic sparse propagation solver.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
+#define LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class CalledValuePropagationPass
+    : public PassInfoMixin<CalledValuePropagationPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ConstantMerge.h b/linux-x64/clang/include/llvm/Transforms/IPO/ConstantMerge.h
new file mode 100644
index 0000000..e04d3ae
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ConstantMerge.h
@@ -0,0 +1,37 @@
+//===- ConstantMerge.h - Merge duplicate global constants -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface to a pass that merges duplicate global
+// constants together into a single constant that is shared.  This is useful
+// because some passes (ie TraceValues) insert a lot of string constants into
+// the program, regardless of whether or not an existing string is available.
+//
+// Algorithm: ConstantMerge is designed to build up a map of available constants
+// and eliminate duplicates when it is initialized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
+#define LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// A pass that merges duplicate global constants into a single constant.
+class ConstantMergePass : public PassInfoMixin<ConstantMergePass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h b/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
new file mode 100644
index 0000000..0979f5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
@@ -0,0 +1,28 @@
+//===-- CrossDSOCFI.cpp - Externalize this module's CFI checks --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass exports all llvm.bitset's found in the module in the form of a
+// __cfi_check function, which can be used to verify cross-DSO call targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
+#define LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class CrossDSOCFIPass : public PassInfoMixin<CrossDSOCFIPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+}
+#endif // LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
+
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h b/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
new file mode 100644
index 0000000..ba5666f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
@@ -0,0 +1,144 @@
+//===- DeadArgumentElimination.h - Eliminate Dead Args ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass deletes dead arguments from internal functions.  Dead argument
+// elimination removes arguments which are directly dead, as well as arguments
+// only passed into function calls as dead arguments of other functions.  This
+// pass also deletes dead return values in a similar way.
+//
+// This pass is often useful as a cleanup pass to run after aggressive
+// interprocedural passes, which add possibly-dead arguments or return values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
+#define LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+
+namespace llvm {
+
+class Module;
+class Use;
+class Value;
+
+/// Eliminate dead arguments (and return values) from functions.
+class DeadArgumentEliminationPass
+    : public PassInfoMixin<DeadArgumentEliminationPass> {
+public:
+  /// Struct that represents (part of) either a return value or a function
+  /// argument.  Used so that arguments and return values can be used
+  /// interchangeably.
+  struct RetOrArg {
+    const Function *F;
+    unsigned Idx;
+    bool IsArg;
+
+    RetOrArg(const Function *F, unsigned Idx, bool IsArg)
+        : F(F), Idx(Idx), IsArg(IsArg) {}
+
+    /// Make RetOrArg comparable, so we can put it into a map.
+    bool operator<(const RetOrArg &O) const {
+      return std::tie(F, Idx, IsArg) < std::tie(O.F, O.Idx, O.IsArg);
+    }
+
+    /// Make RetOrArg comparable, so we can easily iterate the multimap.
+    bool operator==(const RetOrArg &O) const {
+      return F == O.F && Idx == O.Idx && IsArg == O.IsArg;
+    }
+
+    std::string getDescription() const {
+      return (Twine(IsArg ? "Argument #" : "Return value #") + Twine(Idx) +
+              " of function " + F->getName())
+          .str();
+    }
+  };
+
+  /// Liveness enum - During our initial pass over the program, we determine
+  /// that things are either alive or maybe alive. We don't mark anything
+  /// explicitly dead (even if we know they are), since anything not alive
+  /// with no registered uses (in Uses) will never be marked alive and will
+  /// thus become dead in the end.
+  enum Liveness { Live, MaybeLive };
+
+  DeadArgumentEliminationPass(bool ShouldHackArguments_ = false)
+      : ShouldHackArguments(ShouldHackArguments_) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+
+  /// Convenience wrapper
+  RetOrArg CreateRet(const Function *F, unsigned Idx) {
+    return RetOrArg(F, Idx, false);
+  }
+
+  /// Convenience wrapper
+  RetOrArg CreateArg(const Function *F, unsigned Idx) {
+    return RetOrArg(F, Idx, true);
+  }
+
+  using UseMap = std::multimap<RetOrArg, RetOrArg>;
+
+  /// This maps a return value or argument to any MaybeLive return values or
+  /// arguments it uses. This allows the MaybeLive values to be marked live
+  /// when any of its users is marked live.
+  /// For example (indices are left out for clarity):
+  ///  - Uses[ret F] = ret G
+  ///    This means that F calls G, and F returns the value returned by G.
+  ///  - Uses[arg F] = ret G
+  ///    This means that some function calls G and passes its result as an
+  ///    argument to F.
+  ///  - Uses[ret F] = arg F
+  ///    This means that F returns one of its own arguments.
+  ///  - Uses[arg F] = arg G
+  ///    This means that G calls F and passes one of its own (G's) arguments
+  ///    directly to F.
+  UseMap Uses;
+
+  using LiveSet = std::set<RetOrArg>;
+  using LiveFuncSet = std::set<const Function *>;
+
+  /// This set contains all values that have been determined to be live.
+  LiveSet LiveValues;
+
+  /// This set contains all values that are cannot be changed in any way.
+  LiveFuncSet LiveFunctions;
+
+  using UseVector = SmallVector<RetOrArg, 5>;
+
+  /// This allows this pass to do double-duty as the dead arg hacking pass
+  /// (used only by bugpoint).
+  bool ShouldHackArguments = false;
+
+private:
+  Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
+  Liveness SurveyUse(const Use *U, UseVector &MaybeLiveUses,
+                     unsigned RetValNum = -1U);
+  Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
+
+  void SurveyFunction(const Function &F);
+  void MarkValue(const RetOrArg &RA, Liveness L,
+                 const UseVector &MaybeLiveUses);
+  void MarkLive(const RetOrArg &RA);
+  void MarkLive(const Function &F);
+  void PropagateLiveness(const RetOrArg &RA);
+  bool RemoveDeadStuffFromFunction(Function *F);
+  bool DeleteDeadVarargs(Function &Fn);
+  bool RemoveDeadArgumentsFromCallers(Function &Fn);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ElimAvailExtern.h b/linux-x64/clang/include/llvm/Transforms/IPO/ElimAvailExtern.h
new file mode 100644
index 0000000..94cb954
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ElimAvailExtern.h
@@ -0,0 +1,33 @@
+//===- ElimAvailExtern.h - Optimize Global Variables ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transform is designed to eliminate available external global
+// definitions from the program, turning them into declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
+#define LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// A pass that transforms external global definitions into declarations.
+class EliminateAvailableExternallyPass
+    : public PassInfoMixin<EliminateAvailableExternallyPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
new file mode 100644
index 0000000..ff8a654
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
@@ -0,0 +1,33 @@
+//===-- ForceFunctionAttrs.h - Force function attrs for debugging ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Super simple passes to force specific function attrs from the commandline
+/// into the IR for debugging purposes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass which forces specific function attributes into the IR, primarily as
+/// a debugging tool.
+struct ForceFunctionAttrsPass : PassInfoMixin<ForceFunctionAttrsPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+/// Create a legacy pass manager instance of a pass to force function attrs.
+Pass *createForceFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/FunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionAttrs.h
new file mode 100644
index 0000000..dc9f18c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionAttrs.h
@@ -0,0 +1,77 @@
+//===- FunctionAttrs.h - Compute function attributes ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Provides passes for computing function attributes based on interprocedural
+/// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class AAResults;
+class Function;
+class Module;
+class Pass;
+
+/// The three kinds of memory access relevant to 'readonly' and
+/// 'readnone' attributes.
+enum MemoryAccessKind {
+  MAK_ReadNone = 0,
+  MAK_ReadOnly = 1,
+  MAK_MayWrite = 2
+};
+
+/// Returns the memory access properties of this copy of the function.
+MemoryAccessKind computeFunctionBodyMemoryAccess(Function &F, AAResults &AAR);
+
+/// Computes function attributes in post-order over the call graph.
+///
+/// By operating in post-order, this pass computes precise attributes for
+/// called functions prior to processsing their callers. This "bottom-up"
+/// approach allows powerful interprocedural inference of function attributes
+/// like memory access patterns, etc. It can discover functions that do not
+/// access memory, or only read memory, and give them the readnone/readonly
+/// attribute. It also discovers function arguments that are not captured by
+/// the function and marks them with the nocapture attribute.
+struct PostOrderFunctionAttrsPass : PassInfoMixin<PostOrderFunctionAttrsPass> {
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+};
+
+/// Create a legacy pass manager instance of a pass to compute function attrs
+/// in post-order.
+Pass *createPostOrderFunctionAttrsLegacyPass();
+
+/// A pass to do RPO deduction and propagation of function attributes.
+///
+/// This pass provides a general RPO or "top down" propagation of
+/// function attributes. For a few (rare) cases, we can deduce significantly
+/// more about function attributes by working in RPO, so this pass
+/// provides the complement to the post-order pass above where the majority of
+/// deduction is performed.
+// FIXME: Currently there is no RPO CGSCC pass structure to slide into and so
+// this is a boring module pass, but eventually it should be an RPO CGSCC pass
+// when such infrastructure is available.
+class ReversePostOrderFunctionAttrsPass
+    : public PassInfoMixin<ReversePostOrderFunctionAttrsPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
new file mode 100644
index 0000000..5fedde1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
@@ -0,0 +1,162 @@
+//===- llvm/Transforms/IPO/FunctionImport.h - ThinLTO importing -*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
+#define LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Error.h"
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <unordered_set>
+#include <utility>
+
+namespace llvm {
+
+class Module;
+
+/// The function importer is automatically importing function from other modules
+/// based on the provided summary informations.
+class FunctionImporter {
+public:
+  /// Set of functions to import from a source module. Each entry is a map
+  /// containing all the functions to import for a source module.
+  /// The keys is the GUID identifying a function to import, and the value
+  /// is the threshold applied when deciding to import it.
+  using FunctionsToImportTy = std::map<GlobalValue::GUID, unsigned>;
+
+  /// The map contains an entry for every module to import from, the key being
+  /// the module identifier to pass to the ModuleLoader. The value is the set of
+  /// functions to import.
+  using ImportMapTy = StringMap<FunctionsToImportTy>;
+
+  /// The set contains an entry for every global value the module exports.
+  using ExportSetTy = std::unordered_set<GlobalValue::GUID>;
+
+  /// A function of this type is used to load modules referenced by the index.
+  using ModuleLoaderTy =
+      std::function<Expected<std::unique_ptr<Module>>(StringRef Identifier)>;
+
+  /// Create a Function Importer.
+  FunctionImporter(const ModuleSummaryIndex &Index, ModuleLoaderTy ModuleLoader)
+      : Index(Index), ModuleLoader(std::move(ModuleLoader)) {}
+
+  /// Import functions in Module \p M based on the supplied import list.
+  Expected<bool> importFunctions(Module &M, const ImportMapTy &ImportList);
+
+private:
+  /// The summaries index used to trigger importing.
+  const ModuleSummaryIndex &Index;
+
+  /// Factory function to load a Module for a given identifier
+  ModuleLoaderTy ModuleLoader;
+};
+
+/// The function importing pass
+class FunctionImportPass : public PassInfoMixin<FunctionImportPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Compute all the imports and exports for every module in the Index.
+///
+/// \p ModuleToDefinedGVSummaries contains for each Module a map
+/// (GUID -> Summary) for every global defined in the module.
+///
+/// \p ImportLists will be populated with an entry for every Module we are
+/// importing into. This entry is itself a map that can be passed to
+/// FunctionImporter::importFunctions() above (see description there).
+///
+/// \p ExportLists contains for each Module the set of globals (GUID) that will
+/// be imported by another module, or referenced by such a function. I.e. this
+/// is the set of globals that need to be promoted/renamed appropriately.
+void ComputeCrossModuleImport(
+    const ModuleSummaryIndex &Index,
+    const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+    StringMap<FunctionImporter::ImportMapTy> &ImportLists,
+    StringMap<FunctionImporter::ExportSetTy> &ExportLists);
+
+/// Compute all the imports for the given module using the Index.
+///
+/// \p ImportList will be populated with a map that can be passed to
+/// FunctionImporter::importFunctions() above (see description there).
+void ComputeCrossModuleImportForModule(
+    StringRef ModulePath, const ModuleSummaryIndex &Index,
+    FunctionImporter::ImportMapTy &ImportList);
+
+/// Mark all external summaries in \p Index for import into the given module.
+/// Used for distributed builds using a distributed index.
+///
+/// \p ImportList will be populated with a map that can be passed to
+/// FunctionImporter::importFunctions() above (see description there).
+void ComputeCrossModuleImportForModuleFromIndex(
+    StringRef ModulePath, const ModuleSummaryIndex &Index,
+    FunctionImporter::ImportMapTy &ImportList);
+
+/// PrevailingType enum used as a return type of callback passed
+/// to computeDeadSymbols. Yes and No values used when status explicitly
+/// set by symbols resolution, otherwise status is Unknown.
+enum class PrevailingType { Yes, No, Unknown };
+
+/// Compute all the symbols that are "dead": i.e these that can't be reached
+/// in the graph from any of the given symbols listed in
+/// \p GUIDPreservedSymbols. Non-prevailing symbols are symbols without a
+/// prevailing copy anywhere in IR and are normally dead, \p isPrevailing
+/// predicate returns status of symbol.
+void computeDeadSymbols(
+    ModuleSummaryIndex &Index,
+    const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
+    function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing);
+
+/// Converts value \p GV to declaration, or replaces with a declaration if
+/// it is an alias. Returns true if converted, false if replaced.
+bool convertToDeclaration(GlobalValue &GV);
+
+/// Compute the set of summaries needed for a ThinLTO backend compilation of
+/// \p ModulePath.
+//
+/// This includes summaries from that module (in case any global summary based
+/// optimizations were recorded) and from any definitions in other modules that
+/// should be imported.
+//
+/// \p ModuleToSummariesForIndex will be populated with the needed summaries
+/// from each required module path. Use a std::map instead of StringMap to get
+/// stable order for bitcode emission.
+void gatherImportedSummariesForModule(
+    StringRef ModulePath,
+    const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+    const FunctionImporter::ImportMapTy &ImportList,
+    std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
+
+/// Emit into \p OutputFilename the files module \p ModulePath will import from.
+std::error_code
+EmitImportsFiles(StringRef ModulePath, StringRef OutputFilename,
+                 const FunctionImporter::ImportMapTy &ModuleImports);
+
+/// Resolve WeakForLinker values in \p TheModule based on the information
+/// recorded in the summaries during global summary-based analysis.
+void thinLTOResolveWeakForLinkerModule(Module &TheModule,
+                                       const GVSummaryMapTy &DefinedGlobals);
+
+/// Internalize \p TheModule based on the information recorded in the summaries
+/// during global summary-based analysis.
+void thinLTOInternalizeModule(Module &TheModule,
+                              const GVSummaryMapTy &DefinedGlobals);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
new file mode 100644
index 0000000..7ca241f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
@@ -0,0 +1,57 @@
+//===-- GlobalDCE.h - DCE unreachable internal functions ------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transform is designed to eliminate unreachable internal globals from the
+// program.  It uses an aggressive algorithm, searching out globals that are
+// known to be alive.  After it finds all of the globals which are needed, it
+// deletes whatever is left over.  This allows it to delete recursive chunks of
+// the program which are unreachable.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALDCE_H
+#define LLVM_TRANSFORMS_IPO_GLOBALDCE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include <unordered_map>
+
+namespace llvm {
+
+/// Pass to remove unused function declarations.
+class GlobalDCEPass : public PassInfoMixin<GlobalDCEPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+
+private:
+  SmallPtrSet<GlobalValue*, 32> AliveGlobals;
+
+  /// Global -> Global that uses this global.
+  DenseMap<GlobalValue *, SmallPtrSet<GlobalValue *, 4>> GVDependencies;
+
+  /// Constant -> Globals that use this global cache.
+  std::unordered_map<Constant *, SmallPtrSet<GlobalValue *, 8>>
+      ConstantDependenciesCache;
+
+  /// Comdat -> Globals in that Comdat section.
+  std::unordered_multimap<Comdat *, GlobalValue *> ComdatMembers;
+
+  void UpdateGVDependencies(GlobalValue &GV);
+  void MarkLive(GlobalValue &GV,
+                SmallVectorImpl<GlobalValue *> *Updates = nullptr);
+  bool RemoveUnusedGlobalValue(GlobalValue &GV);
+
+  void ComputeDependencies(Value *V, SmallPtrSetImpl<GlobalValue *> &U);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_GLOBALDCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalOpt.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalOpt.h
new file mode 100644
index 0000000..5b48786
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalOpt.h
@@ -0,0 +1,33 @@
+//===- GlobalOpt.h - Optimize Global Variables ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms simple global variables that never have their address
+// taken.  If obviously true, it marks read/write globals as constant, deletes
+// variables only stored to, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALOPT_H
+#define LLVM_TRANSFORMS_IPO_GLOBALOPT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Optimize globals that never have their address taken.
+class GlobalOptPass : public PassInfoMixin<GlobalOptPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_GLOBALOPT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalSplit.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalSplit.h
new file mode 100644
index 0000000..56cefb7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalSplit.h
@@ -0,0 +1,34 @@
+//===- GlobalSplit.h - global variable splitter -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass uses inrange annotations on GEP indices to split globals where
+// beneficial. Clang currently attaches these annotations to references to
+// virtual table globals under the Itanium ABI for the benefit of the
+// whole-program virtual call optimization and control flow integrity passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
+#define LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to perform split of global variables.
+class GlobalSplitPass : public PassInfoMixin<GlobalSplitPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/InferFunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/InferFunctionAttrs.h
new file mode 100644
index 0000000..54e1c24
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/InferFunctionAttrs.h
@@ -0,0 +1,36 @@
+//===-- InferFunctionAttrs.h - Infer implicit function attributes ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Interfaces for passes which infer implicit function attributes from the
+/// name and signature of function declarations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass which infers function attributes from the names and signatures of
+/// function declarations in a module.
+struct InferFunctionAttrsPass : PassInfoMixin<InferFunctionAttrsPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Create a legacy pass manager instance of a pass to infer function
+/// attributes.
+Pass *createInferFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
new file mode 100644
index 0000000..eda8cf4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
@@ -0,0 +1,109 @@
+//===- Inliner.h - Inliner pass and infrastructure --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INLINER_H
+#define LLVM_TRANSFORMS_IPO_INLINER_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h"
+#include <utility>
+
+namespace llvm {
+
+class AssumptionCacheTracker;
+class CallGraph;
+class ProfileSummaryInfo;
+
+/// This class contains all of the helper code which is used to perform the
+/// inlining operations that do not depend on the policy. It contains the core
+/// bottom-up inlining infrastructure that specific inliner passes use.
+struct LegacyInlinerBase : public CallGraphSCCPass {
+  explicit LegacyInlinerBase(char &ID);
+  explicit LegacyInlinerBase(char &ID, bool InsertLifetime);
+
+  /// For this class, we declare that we require and preserve the call graph.
+  /// If the derived class implements this method, it should always explicitly
+  /// call the implementation here.
+  void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+  bool doInitialization(CallGraph &CG) override;
+
+  /// Main run interface method, this implements the interface required by the
+  /// Pass class.
+  bool runOnSCC(CallGraphSCC &SCC) override;
+
+  using llvm::Pass::doFinalization;
+
+  /// Remove now-dead linkonce functions at the end of processing to avoid
+  /// breaking the SCC traversal.
+  bool doFinalization(CallGraph &CG) override;
+
+  /// This method must be implemented by the subclass to determine the cost of
+  /// inlining the specified call site.  If the cost returned is greater than
+  /// the current inline threshold, the call site is not inlined.
+  virtual InlineCost getInlineCost(CallSite CS) = 0;
+
+  /// Remove dead functions.
+  ///
+  /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
+  /// which restricts it to deleting functions with an 'AlwaysInline'
+  /// attribute. This is useful for the InlineAlways pass that only wants to
+  /// deal with that subset of the functions.
+  bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
+
+  /// This function performs the main work of the pass.  The default of
+  /// Inlinter::runOnSCC() calls skipSCC() before calling this method, but
+  /// derived classes which cannot be skipped can override that method and call
+  /// this function unconditionally.
+  bool inlineCalls(CallGraphSCC &SCC);
+
+private:
+  // Insert @llvm.lifetime intrinsics.
+  bool InsertLifetime = true;
+
+protected:
+  AssumptionCacheTracker *ACT;
+  ProfileSummaryInfo *PSI;
+  ImportedFunctionsInliningStatistics ImportedFunctionsStats;
+};
+
+/// The inliner pass for the new pass manager.
+///
+/// This pass wires together the inlining utilities and the inline cost
+/// analysis into a CGSCC pass. It considers every call in every function in
+/// the SCC and tries to inline if profitable. It can be tuned with a number of
+/// parameters to control what cost model is used and what tradeoffs are made
+/// when making the decision.
+///
+/// It should be noted that the legacy inliners do considerably more than this
+/// inliner pass does. They provide logic for manually merging allocas, and
+/// doing considerable DCE including the DCE of dead functions. This pass makes
+/// every attempt to be simpler. DCE of functions requires complex reasoning
+/// about comdat groups, etc. Instead, it is expected that other more focused
+/// passes be composed to achieve the same end result.
+class InlinerPass : public PassInfoMixin<InlinerPass> {
+public:
+  InlinerPass(InlineParams Params = getInlineParams())
+      : Params(std::move(Params)) {}
+
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+
+private:
+  InlineParams Params;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_INLINER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Internalize.h b/linux-x64/clang/include/llvm/Transforms/IPO/Internalize.h
new file mode 100644
index 0000000..45d676d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Internalize.h
@@ -0,0 +1,79 @@
+//====- Internalize.h - Internalization API ---------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loops over all of the functions and variables in the input module.
+// If the function or variable does not need to be preserved according to the
+// client supplied callback, it is marked as internal.
+//
+// This transformation would not be legal in a regular compilation, but it gets
+// extra information from the linker about what is safe.
+//
+// For example: Internalizing a function with external linkage. Only if we are
+// told it is only used from within this module, it is safe to do it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INTERNALIZE_H
+#define LLVM_TRANSFORMS_IPO_INTERNALIZE_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/PassManager.h"
+#include <functional>
+#include <set>
+
+namespace llvm {
+class Module;
+class CallGraph;
+
+/// A pass that internalizes all functions and variables other than those that
+/// must be preserved according to \c MustPreserveGV.
+class InternalizePass : public PassInfoMixin<InternalizePass> {
+  /// Client supplied callback to control wheter a symbol must be preserved.
+  const std::function<bool(const GlobalValue &)> MustPreserveGV;
+  /// Set of symbols private to the compiler that this pass should not touch.
+  StringSet<> AlwaysPreserved;
+
+  /// Return false if we're allowed to internalize this GV.
+  bool shouldPreserveGV(const GlobalValue &GV);
+  /// Internalize GV if it is possible to do so, i.e. it is not externally
+  /// visible and is not a member of an externally visible comdat.
+  bool maybeInternalize(GlobalValue &GV,
+                        const std::set<const Comdat *> &ExternalComdats);
+  /// If GV is part of a comdat and is externally visible, keep track of its
+  /// comdat so that we don't internalize any of its members.
+  void checkComdatVisibility(GlobalValue &GV,
+                             std::set<const Comdat *> &ExternalComdats);
+
+public:
+  InternalizePass();
+  InternalizePass(std::function<bool(const GlobalValue &)> MustPreserveGV)
+      : MustPreserveGV(std::move(MustPreserveGV)) {}
+
+  /// Run the internalizer on \p TheModule, returns true if any changes was
+  /// made.
+  ///
+  /// If the CallGraph \p CG is supplied, it will be updated when
+  /// internalizing a function (by removing any edge from the "external node")
+  bool internalizeModule(Module &TheModule, CallGraph *CG = nullptr);
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Helper function to internalize functions and variables in a Module.
+inline bool
+internalizeModule(Module &TheModule,
+                  std::function<bool(const GlobalValue &)> MustPreserveGV,
+                  CallGraph *CG = nullptr) {
+  return InternalizePass(std::move(MustPreserveGV))
+      .internalizeModule(TheModule, CG);
+}
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_INTERNALIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h b/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
new file mode 100644
index 0000000..3bcfe65
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
@@ -0,0 +1,205 @@
+//===- LowerTypeTests.h - type metadata lowering pass -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the type test lowering pass implementation that
+// may be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
+#define LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class Module;
+class raw_ostream;
+
+namespace lowertypetests {
+
+struct BitSetInfo {
+  // The indices of the set bits in the bitset.
+  std::set<uint64_t> Bits;
+
+  // The byte offset into the combined global represented by the bitset.
+  uint64_t ByteOffset;
+
+  // The size of the bitset in bits.
+  uint64_t BitSize;
+
+  // Log2 alignment of the bit set relative to the combined global.
+  // For example, a log2 alignment of 3 means that bits in the bitset
+  // represent addresses 8 bytes apart.
+  unsigned AlignLog2;
+
+  bool isSingleOffset() const {
+    return Bits.size() == 1;
+  }
+
+  bool isAllOnes() const {
+    return Bits.size() == BitSize;
+  }
+
+  bool containsGlobalOffset(uint64_t Offset) const;
+
+  void print(raw_ostream &OS) const;
+};
+
+struct BitSetBuilder {
+  SmallVector<uint64_t, 16> Offsets;
+  uint64_t Min = std::numeric_limits<uint64_t>::max();
+  uint64_t Max = 0;
+
+  BitSetBuilder() = default;
+
+  void addOffset(uint64_t Offset) {
+    if (Min > Offset)
+      Min = Offset;
+    if (Max < Offset)
+      Max = Offset;
+
+    Offsets.push_back(Offset);
+  }
+
+  BitSetInfo build();
+};
+
+/// This class implements a layout algorithm for globals referenced by bit sets
+/// that tries to keep members of small bit sets together. This can
+/// significantly reduce bit set sizes in many cases.
+///
+/// It works by assembling fragments of layout from sets of referenced globals.
+/// Each set of referenced globals causes the algorithm to create a new
+/// fragment, which is assembled by appending each referenced global in the set
+/// into the fragment. If a referenced global has already been referenced by an
+/// fragment created earlier, we instead delete that fragment and append its
+/// contents into the fragment we are assembling.
+///
+/// By starting with the smallest fragments, we minimize the size of the
+/// fragments that are copied into larger fragments. This is most intuitively
+/// thought about when considering the case where the globals are virtual tables
+/// and the bit sets represent their derived classes: in a single inheritance
+/// hierarchy, the optimum layout would involve a depth-first search of the
+/// class hierarchy (and in fact the computed layout ends up looking a lot like
+/// a DFS), but a naive DFS would not work well in the presence of multiple
+/// inheritance. This aspect of the algorithm ends up fitting smaller
+/// hierarchies inside larger ones where that would be beneficial.
+///
+/// For example, consider this class hierarchy:
+///
+/// A       B
+///   \   / | \
+///     C   D   E
+///
+/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
+/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
+/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
+/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
+/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
+///
+/// Add bsC, fragments {{C}}
+/// Add bsD, fragments {{C}, {D}}
+/// Add bsE, fragments {{C}, {D}, {E}}
+/// Add bsA, fragments {{A, C}, {D}, {E}}
+/// Add bsB, fragments {{B, A, C, D, E}}
+///
+/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
+/// fewer) objects, at the cost of bsB needing to cover 1 more object.
+///
+/// The bit set lowering pass assigns an object index to each object that needs
+/// to be laid out, and calls addFragment for each bit set passing the object
+/// indices of its referenced globals. It then assembles a layout from the
+/// computed layout in the Fragments field.
+struct GlobalLayoutBuilder {
+  /// The computed layout. Each element of this vector contains a fragment of
+  /// layout (which may be empty) consisting of object indices.
+  std::vector<std::vector<uint64_t>> Fragments;
+
+  /// Mapping from object index to fragment index.
+  std::vector<uint64_t> FragmentMap;
+
+  GlobalLayoutBuilder(uint64_t NumObjects)
+      : Fragments(1), FragmentMap(NumObjects) {}
+
+  /// Add F to the layout while trying to keep its indices contiguous.
+  /// If a previously seen fragment uses any of F's indices, that
+  /// fragment will be laid out inside F.
+  void addFragment(const std::set<uint64_t> &F);
+};
+
+/// This class is used to build a byte array containing overlapping bit sets. By
+/// loading from indexed offsets into the byte array and applying a mask, a
+/// program can test bits from the bit set with a relatively short instruction
+/// sequence. For example, suppose we have 15 bit sets to lay out:
+///
+/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits),
+/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits),
+/// L (4 bits), M (3 bits), N (2 bits), O (1 bit)
+///
+/// These bits can be laid out in a 16-byte array like this:
+///
+///       Byte Offset
+///     0123456789ABCDEF
+/// Bit
+///   7 HHHHHHHHHIIIIIII
+///   6 GGGGGGGGGGJJJJJJ
+///   5 FFFFFFFFFFFKKKKK
+///   4 EEEEEEEEEEEELLLL
+///   3 DDDDDDDDDDDDDMMM
+///   2 CCCCCCCCCCCCCCNN
+///   1 BBBBBBBBBBBBBBBO
+///   0 AAAAAAAAAAAAAAAA
+///
+/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to
+/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done
+/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM.
+///
+/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array,
+/// because for one thing it gives us better packing (the more bins there are,
+/// the less evenly they will be filled), and for another, the instruction
+/// sequences can be slightly shorter, both on x86 and ARM.
+struct ByteArrayBuilder {
+  /// The byte array built so far.
+  std::vector<uint8_t> Bytes;
+
+  enum { BitsPerByte = 8 };
+
+  /// The number of bytes allocated so far for each of the bits.
+  uint64_t BitAllocs[BitsPerByte];
+
+  ByteArrayBuilder() {
+    memset(BitAllocs, 0, sizeof(BitAllocs));
+  }
+
+  /// Allocate BitSize bits in the byte array where Bits contains the bits to
+  /// set. AllocByteOffset is set to the offset within the byte array and
+  /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest
+  /// Processing Time) multiprocessor scheduling algorithm to lay out the bits
+  /// efficiently; the pass allocates bit sets in decreasing size order.
+  void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize,
+                uint64_t &AllocByteOffset, uint8_t &AllocMask);
+};
+
+} // end namespace lowertypetests
+
+class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/PartialInlining.h b/linux-x64/clang/include/llvm/Transforms/IPO/PartialInlining.h
new file mode 100644
index 0000000..ec6dd36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/PartialInlining.h
@@ -0,0 +1,32 @@
+//===- PartialInlining.h - Inline parts of functions ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs partial inlining, typically by inlining an if statement
+// that surrounds the body of the function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
+#define LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to remove unused function declarations.
+class PartialInlinerPass : public PassInfoMixin<PartialInlinerPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h b/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
new file mode 100644
index 0000000..276306f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -0,0 +1,217 @@
+// llvm/Transforms/IPO/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PassManagerBuilder class, which is used to set up a
+// "standard" optimization sequence suitable for languages like C and C++.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+#define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+class ModuleSummaryIndex;
+class Pass;
+class TargetLibraryInfoImpl;
+class TargetMachine;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class FunctionPassManager;
+class PassManagerBase;
+}
+
+/// PassManagerBuilder - This class is used to set up a standard optimization
+/// sequence for languages like C and C++, allowing some APIs to customize the
+/// pass sequence in various ways. A simple example of using it would be:
+///
+///  PassManagerBuilder Builder;
+///  Builder.OptLevel = 2;
+///  Builder.populateFunctionPassManager(FPM);
+///  Builder.populateModulePassManager(MPM);
+///
+/// In addition to setting up the basic passes, PassManagerBuilder allows
+/// frontends to vend a plugin API, where plugins are allowed to add extensions
+/// to the default pass manager.  They do this by specifying where in the pass
+/// pipeline they want to be added, along with a callback function that adds
+/// the pass(es).  For example, a plugin that wanted to add a loop optimization
+/// could do something like this:
+///
+/// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) {
+///   if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0)
+///     PM.add(createMyAwesomePass());
+/// }
+///   ...
+///   Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd,
+///                        addMyLoopPass);
+///   ...
+class PassManagerBuilder {
+public:
+  /// Extensions are passed the builder itself (so they can see how it is
+  /// configured) as well as the pass manager to add stuff to.
+  typedef std::function<void(const PassManagerBuilder &Builder,
+                             legacy::PassManagerBase &PM)>
+      ExtensionFn;
+  enum ExtensionPointTy {
+    /// EP_EarlyAsPossible - This extension point allows adding passes before
+    /// any other transformations, allowing them to see the code as it is coming
+    /// out of the frontend.
+    EP_EarlyAsPossible,
+
+    /// EP_ModuleOptimizerEarly - This extension point allows adding passes
+    /// just before the main module-level optimization passes.
+    EP_ModuleOptimizerEarly,
+
+    /// EP_LoopOptimizerEnd - This extension point allows adding loop passes to
+    /// the end of the loop optimizer.
+    EP_LoopOptimizerEnd,
+
+    /// EP_ScalarOptimizerLate - This extension point allows adding optimization
+    /// passes after most of the main optimizations, but before the last
+    /// cleanup-ish optimizations.
+    EP_ScalarOptimizerLate,
+
+    /// EP_OptimizerLast -- This extension point allows adding passes that
+    /// run after everything else.
+    EP_OptimizerLast,
+
+    /// EP_VectorizerStart - This extension point allows adding optimization
+    /// passes before the vectorizer and other highly target specific
+    /// optimization passes are executed.
+    EP_VectorizerStart,
+
+    /// EP_EnabledOnOptLevel0 - This extension point allows adding passes that
+    /// should not be disabled by O0 optimization level. The passes will be
+    /// inserted after the inlining pass.
+    EP_EnabledOnOptLevel0,
+
+    /// EP_Peephole - This extension point allows adding passes that perform
+    /// peephole optimizations similar to the instruction combiner. These passes
+    /// will be inserted after each instance of the instruction combiner pass.
+    EP_Peephole,
+
+    /// EP_LateLoopOptimizations - This extension point allows adding late loop
+    /// canonicalization and simplification passes. This is the last point in
+    /// the loop optimization pipeline before loop deletion. Each pass added
+    /// here must be an instance of LoopPass.
+    /// This is the place to add passes that can remove loops, such as target-
+    /// specific loop idiom recognition.
+    EP_LateLoopOptimizations,
+
+    /// EP_CGSCCOptimizerLate - This extension point allows adding CallGraphSCC
+    /// passes at the end of the main CallGraphSCC passes and before any
+    /// function simplification passes run by CGPassManager.
+    EP_CGSCCOptimizerLate,
+  };
+
+  /// The Optimization Level - Specify the basic optimization level.
+  ///    0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3
+  unsigned OptLevel;
+
+  /// SizeLevel - How much we're optimizing for size.
+  ///    0 = none, 1 = -Os, 2 = -Oz
+  unsigned SizeLevel;
+
+  /// LibraryInfo - Specifies information about the runtime library for the
+  /// optimizer.  If this is non-null, it is added to both the function and
+  /// per-module pass pipeline.
+  TargetLibraryInfoImpl *LibraryInfo;
+
+  /// Inliner - Specifies the inliner to use.  If this is non-null, it is
+  /// added to the per-module passes.
+  Pass *Inliner;
+
+  /// The module summary index to use for exporting information from the
+  /// regular LTO phase, for example for the CFI and devirtualization type
+  /// tests.
+  ModuleSummaryIndex *ExportSummary = nullptr;
+
+  /// The module summary index to use for importing information to the
+  /// thin LTO backends, for example for the CFI and devirtualization type
+  /// tests.
+  const ModuleSummaryIndex *ImportSummary = nullptr;
+
+  bool DisableTailCalls;
+  bool DisableUnitAtATime;
+  bool DisableUnrollLoops;
+  bool SLPVectorize;
+  bool LoopVectorize;
+  bool RerollLoops;
+  bool NewGVN;
+  bool DisableGVNLoadPRE;
+  bool VerifyInput;
+  bool VerifyOutput;
+  bool MergeFunctions;
+  bool PrepareForLTO;
+  bool PrepareForThinLTO;
+  bool PerformThinLTO;
+  bool DivergentTarget;
+
+  /// Enable profile instrumentation pass.
+  bool EnablePGOInstrGen;
+  /// Profile data file name that the instrumentation will be written to.
+  std::string PGOInstrGen;
+  /// Path of the profile data file.
+  std::string PGOInstrUse;
+  /// Path of the sample Profile data file.
+  std::string PGOSampleUse;
+
+private:
+  /// ExtensionList - This is list of all of the extensions that are registered.
+  std::vector<std::pair<ExtensionPointTy, ExtensionFn>> Extensions;
+
+public:
+  PassManagerBuilder();
+  ~PassManagerBuilder();
+  /// Adds an extension that will be used by all PassManagerBuilder instances.
+  /// This is intended to be used by plugins, to register a set of
+  /// optimisations to run automatically.
+  static void addGlobalExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+  void addExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+
+private:
+  void addExtensionsToPM(ExtensionPointTy ETy,
+                         legacy::PassManagerBase &PM) const;
+  void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
+  void addLTOOptimizationPasses(legacy::PassManagerBase &PM);
+  void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM);
+  void addPGOInstrPasses(legacy::PassManagerBase &MPM);
+  void addFunctionSimplificationPasses(legacy::PassManagerBase &MPM);
+  void addInstructionCombiningPass(legacy::PassManagerBase &MPM) const;
+
+public:
+  /// populateFunctionPassManager - This fills in the function pass manager,
+  /// which is expected to be run on each function immediately as it is
+  /// generated.  The idea is to reduce the size of the IR in memory.
+  void populateFunctionPassManager(legacy::FunctionPassManager &FPM);
+
+  /// populateModulePassManager - This sets up the primary pass manager.
+  void populateModulePassManager(legacy::PassManagerBase &MPM);
+  void populateLTOPassManager(legacy::PassManagerBase &PM);
+  void populateThinLTOPassManager(legacy::PassManagerBase &PM);
+};
+
+/// Registers a function for adding a standard set of passes.  This should be
+/// used by optimizer plugins to allow all front ends to transparently use
+/// them.  Create a static instance of this class in your plugin, providing a
+/// private function that the PassManagerBuilder can use to add your passes.
+struct RegisterStandardPasses {
+  RegisterStandardPasses(PassManagerBuilder::ExtensionPointTy Ty,
+                         PassManagerBuilder::ExtensionFn Fn) {
+    PassManagerBuilder::addGlobalExtension(Ty, std::move(Fn));
+  }
+};
+
+} // end namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SCCP.h b/linux-x64/clang/include/llvm/Transforms/IPO/SCCP.h
new file mode 100644
index 0000000..fdb7865
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SCCP.h
@@ -0,0 +1,38 @@
+//===- SCCP.h - Sparse Conditional Constant Propagation ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements  interprocedural sparse conditional constant
+// propagation and merging.
+//
+// Specifically, this:
+//   * Assumes values are constant unless proven otherwise
+//   * Assumes BasicBlocks are dead unless proven otherwise
+//   * Proves values to be constant, and replaces them with constants
+//   * Proves conditional branches to be unconditional
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SCCP_H
+#define LLVM_TRANSFORMS_IPO_SCCP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to perform interprocedural constant propagation.
+class IPSCCPPass : public PassInfoMixin<IPSCCPPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_SCCP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
new file mode 100644
index 0000000..cd5a056
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
@@ -0,0 +1,40 @@
+//===- SampleProfile.h - SamplePGO pass ---------- --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for the sampled PGO loader pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H
+#define LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H
+
+#include "llvm/IR/PassManager.h"
+#include <string>
+
+namespace llvm {
+
+class Module;
+
+/// The sample profiler data loader pass.
+class SampleProfileLoaderPass : public PassInfoMixin<SampleProfileLoaderPass> {
+public:
+  SampleProfileLoaderPass(std::string File = "", bool IsThinLTOPreLink = false)
+      : ProfileFileName(File), IsThinLTOPreLink(IsThinLTOPreLink) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  std::string ProfileFileName;
+  bool IsThinLTOPreLink;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SAMPLEPROFILE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/StripDeadPrototypes.h b/linux-x64/clang/include/llvm/Transforms/IPO/StripDeadPrototypes.h
new file mode 100644
index 0000000..5a05cd7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/StripDeadPrototypes.h
@@ -0,0 +1,32 @@
+//===-- StripDeadPrototypes.h - Remove unused function declarations -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loops over all of the functions in the input module, looking for
+// dead declarations and removes them. Dead declarations are declarations of
+// functions for which no implementation is available (i.e., declarations for
+// unused library functions).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+#define LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass to remove unused function declarations.
+struct StripDeadPrototypesPass : PassInfoMixin<StripDeadPrototypesPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h b/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
new file mode 100644
index 0000000..0b3ba86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
@@ -0,0 +1,19 @@
+#ifndef LLVM_TRANSFORMS_IPO_SYNTHETIC_COUNTS_PROPAGATION_H
+#define LLVM_TRANSFORMS_IPO_SYNTHETIC_COUNTS_PROPAGATION_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/ScaledNumber.h"
+
+namespace llvm {
+class Function;
+class Module;
+
+class SyntheticCountsPropagation
+    : public PassInfoMixin<SyntheticCountsPropagation> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+};
+} // namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h b/linux-x64/clang/include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h
new file mode 100644
index 0000000..bf04bbf
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h
@@ -0,0 +1,41 @@
+//===- ThinLTOBitcodeWriter.h - Bitcode writing pass for ThinLTO ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass prepares a module containing type metadata for ThinLTO by splitting
+// it into regular and thin LTO parts if possible, and writing both parts to
+// a multi-module bitcode file. Modules that do not contain type metadata are
+// written unmodified as a single module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_THINLTOBITCODEWRITER_H
+#define LLVM_TRANSFORMS_IPO_THINLTOBITCODEWRITER_H
+
+#include <llvm/IR/PassManager.h>
+#include <llvm/Support/raw_ostream.h>
+
+namespace llvm {
+
+class ThinLTOBitcodeWriterPass
+    : public PassInfoMixin<ThinLTOBitcodeWriterPass> {
+  raw_ostream &OS;
+  raw_ostream *ThinLinkOS;
+
+public:
+  // Writes bitcode to OS. Also write thin link file to ThinLinkOS, if
+  // it's not nullptr.
+  ThinLTOBitcodeWriterPass(raw_ostream &OS, raw_ostream *ThinLinkOS)
+      : OS(OS), ThinLinkOS(ThinLinkOS) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h b/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
new file mode 100644
index 0000000..1aa4c6f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
@@ -0,0 +1,226 @@
+//===- WholeProgramDevirt.h - Whole-program devirt pass ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the whole-program devirtualization pass
+// implementation that may be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
+#define LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+template <typename T> class MutableArrayRef;
+class Function;
+class GlobalVariable;
+
+namespace wholeprogramdevirt {
+
+// A bit vector that keeps track of which bits are used. We use this to
+// pack constant values compactly before and after each virtual table.
+struct AccumBitVector {
+  std::vector<uint8_t> Bytes;
+
+  // Bits in BytesUsed[I] are 1 if matching bit in Bytes[I] is used, 0 if not.
+  std::vector<uint8_t> BytesUsed;
+
+  std::pair<uint8_t *, uint8_t *> getPtrToData(uint64_t Pos, uint8_t Size) {
+    if (Bytes.size() < Pos + Size) {
+      Bytes.resize(Pos + Size);
+      BytesUsed.resize(Pos + Size);
+    }
+    return std::make_pair(Bytes.data() + Pos, BytesUsed.data() + Pos);
+  }
+
+  // Set little-endian value Val with size Size at bit position Pos,
+  // and mark bytes as used.
+  void setLE(uint64_t Pos, uint64_t Val, uint8_t Size) {
+    assert(Pos % 8 == 0);
+    auto DataUsed = getPtrToData(Pos / 8, Size);
+    for (unsigned I = 0; I != Size; ++I) {
+      DataUsed.first[I] = Val >> (I * 8);
+      assert(!DataUsed.second[I]);
+      DataUsed.second[I] = 0xff;
+    }
+  }
+
+  // Set big-endian value Val with size Size at bit position Pos,
+  // and mark bytes as used.
+  void setBE(uint64_t Pos, uint64_t Val, uint8_t Size) {
+    assert(Pos % 8 == 0);
+    auto DataUsed = getPtrToData(Pos / 8, Size);
+    for (unsigned I = 0; I != Size; ++I) {
+      DataUsed.first[Size - I - 1] = Val >> (I * 8);
+      assert(!DataUsed.second[Size - I - 1]);
+      DataUsed.second[Size - I - 1] = 0xff;
+    }
+  }
+
+  // Set bit at bit position Pos to b and mark bit as used.
+  void setBit(uint64_t Pos, bool b) {
+    auto DataUsed = getPtrToData(Pos / 8, 1);
+    if (b)
+      *DataUsed.first |= 1 << (Pos % 8);
+    assert(!(*DataUsed.second & (1 << Pos % 8)));
+    *DataUsed.second |= 1 << (Pos % 8);
+  }
+};
+
+// The bits that will be stored before and after a particular vtable.
+struct VTableBits {
+  // The vtable global.
+  GlobalVariable *GV;
+
+  // Cache of the vtable's size in bytes.
+  uint64_t ObjectSize = 0;
+
+  // The bit vector that will be laid out before the vtable. Note that these
+  // bytes are stored in reverse order until the globals are rebuilt. This means
+  // that any values in the array must be stored using the opposite endianness
+  // from the target.
+  AccumBitVector Before;
+
+  // The bit vector that will be laid out after the vtable.
+  AccumBitVector After;
+};
+
+// Information about a member of a particular type identifier.
+struct TypeMemberInfo {
+  // The VTableBits for the vtable.
+  VTableBits *Bits;
+
+  // The offset in bytes from the start of the vtable (i.e. the address point).
+  uint64_t Offset;
+
+  bool operator<(const TypeMemberInfo &other) const {
+    return Bits < other.Bits || (Bits == other.Bits && Offset < other.Offset);
+  }
+};
+
+// A virtual call target, i.e. an entry in a particular vtable.
+struct VirtualCallTarget {
+  VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM);
+
+  // For testing only.
+  VirtualCallTarget(const TypeMemberInfo *TM, bool IsBigEndian)
+      : Fn(nullptr), TM(TM), IsBigEndian(IsBigEndian), WasDevirt(false) {}
+
+  // The function stored in the vtable.
+  Function *Fn;
+
+  // A pointer to the type identifier member through which the pointer to Fn is
+  // accessed.
+  const TypeMemberInfo *TM;
+
+  // When doing virtual constant propagation, this stores the return value for
+  // the function when passed the currently considered argument list.
+  uint64_t RetVal;
+
+  // Whether the target is big endian.
+  bool IsBigEndian;
+
+  // Whether at least one call site to the target was devirtualized.
+  bool WasDevirt;
+
+  // The minimum byte offset before the address point. This covers the bytes in
+  // the vtable object before the address point (e.g. RTTI, access-to-top,
+  // vtables for other base classes) and is equal to the offset from the start
+  // of the vtable object to the address point.
+  uint64_t minBeforeBytes() const { return TM->Offset; }
+
+  // The minimum byte offset after the address point. This covers the bytes in
+  // the vtable object after the address point (e.g. the vtable for the current
+  // class and any later base classes) and is equal to the size of the vtable
+  // object minus the offset from the start of the vtable object to the address
+  // point.
+  uint64_t minAfterBytes() const { return TM->Bits->ObjectSize - TM->Offset; }
+
+  // The number of bytes allocated (for the vtable plus the byte array) before
+  // the address point.
+  uint64_t allocatedBeforeBytes() const {
+    return minBeforeBytes() + TM->Bits->Before.Bytes.size();
+  }
+
+  // The number of bytes allocated (for the vtable plus the byte array) after
+  // the address point.
+  uint64_t allocatedAfterBytes() const {
+    return minAfterBytes() + TM->Bits->After.Bytes.size();
+  }
+
+  // Set the bit at position Pos before the address point to RetVal.
+  void setBeforeBit(uint64_t Pos) {
+    assert(Pos >= 8 * minBeforeBytes());
+    TM->Bits->Before.setBit(Pos - 8 * minBeforeBytes(), RetVal);
+  }
+
+  // Set the bit at position Pos after the address point to RetVal.
+  void setAfterBit(uint64_t Pos) {
+    assert(Pos >= 8 * minAfterBytes());
+    TM->Bits->After.setBit(Pos - 8 * minAfterBytes(), RetVal);
+  }
+
+  // Set the bytes at position Pos before the address point to RetVal.
+  // Because the bytes in Before are stored in reverse order, we use the
+  // opposite endianness to the target.
+  void setBeforeBytes(uint64_t Pos, uint8_t Size) {
+    assert(Pos >= 8 * minBeforeBytes());
+    if (IsBigEndian)
+      TM->Bits->Before.setLE(Pos - 8 * minBeforeBytes(), RetVal, Size);
+    else
+      TM->Bits->Before.setBE(Pos - 8 * minBeforeBytes(), RetVal, Size);
+  }
+
+  // Set the bytes at position Pos after the address point to RetVal.
+  void setAfterBytes(uint64_t Pos, uint8_t Size) {
+    assert(Pos >= 8 * minAfterBytes());
+    if (IsBigEndian)
+      TM->Bits->After.setBE(Pos - 8 * minAfterBytes(), RetVal, Size);
+    else
+      TM->Bits->After.setLE(Pos - 8 * minAfterBytes(), RetVal, Size);
+  }
+};
+
+// Find the minimum offset that we may store a value of size Size bits at. If
+// IsAfter is set, look for an offset before the object, otherwise look for an
+// offset after the object.
+uint64_t findLowestOffset(ArrayRef<VirtualCallTarget> Targets, bool IsAfter,
+                          uint64_t Size);
+
+// Set the stored value in each of Targets to VirtualCallTarget::RetVal at the
+// given allocation offset before the vtable address. Stores the computed
+// byte/bit offset to OffsetByte/OffsetBit.
+void setBeforeReturnValues(MutableArrayRef<VirtualCallTarget> Targets,
+                           uint64_t AllocBefore, unsigned BitWidth,
+                           int64_t &OffsetByte, uint64_t &OffsetBit);
+
+// Set the stored value in each of Targets to VirtualCallTarget::RetVal at the
+// given allocation offset after the vtable address. Stores the computed
+// byte/bit offset to OffsetByte/OffsetBit.
+void setAfterReturnValues(MutableArrayRef<VirtualCallTarget> Targets,
+                          uint64_t AllocAfter, unsigned BitWidth,
+                          int64_t &OffsetByte, uint64_t &OffsetBit);
+
+} // end namespace wholeprogramdevirt
+
+struct WholeProgramDevirtPass : public PassInfoMixin<WholeProgramDevirtPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
new file mode 100644
index 0000000..6bd22dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -0,0 +1,61 @@
+//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the instcombine pass. This pass
+/// is suitable for use in the new pass manager. For a pass that works with the
+/// legacy pass manager, please look for \c createInstructionCombiningPass() in
+/// Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+
+namespace llvm {
+
+class InstCombinePass : public PassInfoMixin<InstCombinePass> {
+  InstCombineWorklist Worklist;
+  bool ExpensiveCombines;
+
+public:
+  static StringRef name() { return "InstCombinePass"; }
+
+  explicit InstCombinePass(bool ExpensiveCombines = true)
+      : ExpensiveCombines(ExpensiveCombines) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief The legacy pass manager's instcombine pass.
+///
+/// This is a basic whole-function wrapper around the instcombine utility. It
+/// will try to combine all instructions in the function.
+class InstructionCombiningPass : public FunctionPass {
+  InstCombineWorklist Worklist;
+  const bool ExpensiveCombines;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  InstructionCombiningPass(bool ExpensiveCombines = true)
+      : FunctionPass(ID), ExpensiveCombines(ExpensiveCombines) {
+    initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnFunction(Function &F) override;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
new file mode 100644
index 0000000..271e891
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -0,0 +1,109 @@
+//===- InstCombineWorklist.h - Worklist for InstCombine pass ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "instcombine"
+
+namespace llvm {
+
+/// InstCombineWorklist - This is the worklist management logic for
+/// InstCombine.
+class InstCombineWorklist {
+  SmallVector<Instruction*, 256> Worklist;
+  DenseMap<Instruction*, unsigned> WorklistMap;
+
+public:
+  InstCombineWorklist() = default;
+
+  InstCombineWorklist(InstCombineWorklist &&) = default;
+  InstCombineWorklist &operator=(InstCombineWorklist &&) = default;
+
+  bool isEmpty() const { return Worklist.empty(); }
+
+  /// Add - Add the specified instruction to the worklist if it isn't already
+  /// in it.
+  void Add(Instruction *I) {
+    if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
+      DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
+      Worklist.push_back(I);
+    }
+  }
+
+  void AddValue(Value *V) {
+    if (Instruction *I = dyn_cast<Instruction>(V))
+      Add(I);
+  }
+
+  /// AddInitialGroup - Add the specified batch of stuff in reverse order.
+  /// which should only be done when the worklist is empty and when the group
+  /// has no duplicates.
+  void AddInitialGroup(ArrayRef<Instruction *> List) {
+    assert(Worklist.empty() && "Worklist must be empty to add initial group");
+    Worklist.reserve(List.size()+16);
+    WorklistMap.reserve(List.size());
+    DEBUG(dbgs() << "IC: ADDING: " << List.size() << " instrs to worklist\n");
+    unsigned Idx = 0;
+    for (Instruction *I : reverse(List)) {
+      WorklistMap.insert(std::make_pair(I, Idx++));
+      Worklist.push_back(I);
+    }
+  }
+
+  // Remove - remove I from the worklist if it exists.
+  void Remove(Instruction *I) {
+    DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
+    if (It == WorklistMap.end()) return; // Not in worklist.
+
+    // Don't bother moving everything down, just null out the slot.
+    Worklist[It->second] = nullptr;
+
+    WorklistMap.erase(It);
+  }
+
+  Instruction *RemoveOne() {
+    Instruction *I = Worklist.pop_back_val();
+    WorklistMap.erase(I);
+    return I;
+  }
+
+  /// AddUsersToWorkList - When an instruction is simplified, add all users of
+  /// the instruction to the work lists because they might get more simplified
+  /// now.
+  ///
+  void AddUsersToWorkList(Instruction &I) {
+    for (User *U : I.users())
+      Add(cast<Instruction>(U));
+  }
+
+
+  /// Zap - check that the worklist is empty and nuke the backing store for
+  /// the map if it is large.
+  void Zap() {
+    assert(WorklistMap.empty() && "Worklist empty, but map not?");
+
+    // Do an explicit clear, this shrinks the map if needed.
+    WorklistMap.clear();
+  }
+};
+
+} // end namespace llvm.
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
new file mode 100644
index 0000000..b1e13f1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
@@ -0,0 +1,211 @@
+//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines constructor functions for instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class FunctionPass;
+class ModulePass;
+class OptimizationRemarkEmitter;
+
+/// Instrumentation passes often insert conditional checks into entry blocks.
+/// Call this function before splitting the entry block to move instructions
+/// that must remain in the entry block up before the split point. Static
+/// allocas and llvm.localescape calls, for example, must remain in the entry
+/// block.
+BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB,
+                                              BasicBlock::iterator IP);
+
+// Insert GCOV profiling instrumentation
+struct GCOVOptions {
+  static GCOVOptions getDefault();
+
+  // Specify whether to emit .gcno files.
+  bool EmitNotes;
+
+  // Specify whether to modify the program to emit .gcda files when run.
+  bool EmitData;
+
+  // A four-byte version string. The meaning of a version string is described in
+  // gcc's gcov-io.h
+  char Version[4];
+
+  // Emit a "cfg checksum" that follows the "line number checksum" of a
+  // function. This affects both .gcno and .gcda files.
+  bool UseCfgChecksum;
+
+  // Add the 'noredzone' attribute to added runtime library calls.
+  bool NoRedZone;
+
+  // Emit the name of the function in the .gcda files. This is redundant, as
+  // the function identifier can be used to find the name from the .gcno file.
+  bool FunctionNamesInData;
+
+  // Emit the exit block immediately after the start block, rather than after
+  // all of the function body's blocks.
+  bool ExitBlockBeforeBody;
+};
+
+ModulePass *createGCOVProfilerPass(const GCOVOptions &Options =
+                                   GCOVOptions::getDefault());
+
+// PGO Instrumention
+ModulePass *createPGOInstrumentationGenLegacyPass();
+ModulePass *
+createPGOInstrumentationUseLegacyPass(StringRef Filename = StringRef(""));
+ModulePass *createPGOIndirectCallPromotionLegacyPass(bool InLTO = false,
+                                                     bool SamplePGO = false);
+FunctionPass *createPGOMemOPSizeOptLegacyPass();
+
+// The pgo-specific indirect call promotion function declared below is used by
+// the pgo-driven indirect call promotion and sample profile passes. It's a
+// wrapper around llvm::promoteCall, et al. that additionally computes !prof
+// metadata. We place it in a pgo namespace so it's not confused with the
+// generic utilities.
+namespace pgo {
+
+// Helper function that transforms Inst (either an indirect-call instruction, or
+// an invoke instruction , to a conditional call to F. This is like:
+//     if (Inst.CalledValue == F)
+//        F(...);
+//     else
+//        Inst(...);
+//     end
+// TotalCount is the profile count value that the instruction executes.
+// Count is the profile count value that F is the target function.
+// These two values are used to update the branch weight.
+// If \p AttachProfToDirectCall is true, a prof metadata is attached to the
+// new direct call to contain \p Count.
+// Returns the promoted direct call instruction.
+Instruction *promoteIndirectCall(Instruction *Inst, Function *F, uint64_t Count,
+                                 uint64_t TotalCount,
+                                 bool AttachProfToDirectCall,
+                                 OptimizationRemarkEmitter *ORE);
+} // namespace pgo
+
+/// Options for the frontend instrumentation based profiling pass.
+struct InstrProfOptions {
+  // Add the 'noredzone' attribute to added runtime library calls.
+  bool NoRedZone = false;
+
+  // Do counter register promotion
+  bool DoCounterPromotion = false;
+
+  // Name of the profile file to use as output
+  std::string InstrProfileOutput;
+
+  InstrProfOptions() = default;
+};
+
+/// Insert frontend instrumentation based profiling.
+ModulePass *createInstrProfilingLegacyPass(
+    const InstrProfOptions &Options = InstrProfOptions());
+
+// Insert AddressSanitizer (address sanity checking) instrumentation
+FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false,
+                                                 bool Recover = false,
+                                                 bool UseAfterScope = false);
+ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
+                                             bool Recover = false,
+                                             bool UseGlobalsGC = true);
+
+// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
+FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0,
+                                        bool Recover = false);
+
+FunctionPass *createHWAddressSanitizerPass(bool Recover = false);
+
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerPass();
+
+// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
+ModulePass *createDataFlowSanitizerPass(
+    const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
+    void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
+
+// Options for EfficiencySanitizer sub-tools.
+struct EfficiencySanitizerOptions {
+  enum Type {
+    ESAN_None = 0,
+    ESAN_CacheFrag,
+    ESAN_WorkingSet,
+  } ToolType = ESAN_None;
+
+  EfficiencySanitizerOptions() = default;
+};
+
+// Insert EfficiencySanitizer instrumentation.
+ModulePass *createEfficiencySanitizerPass(
+    const EfficiencySanitizerOptions &Options = EfficiencySanitizerOptions());
+
+// Options for sanitizer coverage instrumentation.
+struct SanitizerCoverageOptions {
+  enum Type {
+    SCK_None = 0,
+    SCK_Function,
+    SCK_BB,
+    SCK_Edge
+  } CoverageType = SCK_None;
+  bool IndirectCalls = false;
+  bool TraceBB = false;
+  bool TraceCmp = false;
+  bool TraceDiv = false;
+  bool TraceGep = false;
+  bool Use8bitCounters = false;
+  bool TracePC = false;
+  bool TracePCGuard = false;
+  bool Inline8bitCounters = false;
+  bool PCTable = false;
+  bool NoPrune = false;
+  bool StackDepth = false;
+
+  SanitizerCoverageOptions() = default;
+};
+
+// Insert SanitizerCoverage instrumentation.
+ModulePass *createSanitizerCoverageModulePass(
+    const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
+
+/// \brief Calculate what to divide by to scale counts.
+///
+/// Given the maximum count, calculate a divisor that will scale all the
+/// weights to strictly less than std::numeric_limits<uint32_t>::max().
+static inline uint64_t calculateCountScale(uint64_t MaxCount) {
+  return MaxCount < std::numeric_limits<uint32_t>::max()
+             ? 1
+             : MaxCount / std::numeric_limits<uint32_t>::max() + 1;
+}
+
+/// \brief Scale an individual branch count.
+///
+/// Scale a 64-bit weight down to 32-bits using \c Scale.
+///
+static inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) {
+  uint64_t Scaled = Count / Scale;
+  assert(Scaled <= std::numeric_limits<uint32_t>::max() && "overflow 32-bits");
+  return Scaled;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRUMENTATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
new file mode 100644
index 0000000..3d4f62c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
@@ -0,0 +1,29 @@
+//===- BoundsChecking.h - Bounds checking instrumentation -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass to instrument code and perform run-time bounds checking on loads,
+/// stores, and other memory intrinsics.
+struct BoundsCheckingPass : PassInfoMixin<BoundsCheckingPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+
+/// Legacy pass creation function for the above pass.
+FunctionPass *createBoundsCheckingLegacyPass();
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
new file mode 100644
index 0000000..dd55fbe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
@@ -0,0 +1,31 @@
+//===- Transforms/Instrumentation/GCOVProfiler.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the GCOV style profiler  pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_GCOVPROFILER_H
+#define LLVM_TRANSFORMS_GCOVPROFILER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Instrumentation.h"
+
+namespace llvm {
+/// The gcov-style instrumentation pass
+class GCOVProfilerPass : public PassInfoMixin<GCOVProfilerPass> {
+public:
+  GCOVProfilerPass(const GCOVOptions &Options = GCOVOptions::getDefault()) : GCOVOpts(Options) { }
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  GCOVOptions GCOVOpts;
+};
+
+} // End llvm namespace
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
new file mode 100644
index 0000000..13fb3db
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
@@ -0,0 +1,125 @@
+//===- Transforms/Instrumentation/InstrProfiling.h --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's PGO Instrumentation lowering
+/// pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRPROFILING_H
+#define LLVM_TRANSFORMS_INSTRPROFILING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <vector>
+
+namespace llvm {
+
+class TargetLibraryInfo;
+using LoadStorePair = std::pair<Instruction *, Instruction *>;
+
+/// Instrumentation based profiling lowering pass. This pass lowers
+/// the profile instrumented code generated by FE or the IR based
+/// instrumentation pass.
+class InstrProfiling : public PassInfoMixin<InstrProfiling> {
+public:
+  InstrProfiling() = default;
+  InstrProfiling(const InstrProfOptions &Options) : Options(Options) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  bool run(Module &M, const TargetLibraryInfo &TLI);
+
+private:
+  InstrProfOptions Options;
+  Module *M;
+  Triple TT;
+  const TargetLibraryInfo *TLI;
+  struct PerFunctionProfileData {
+    uint32_t NumValueSites[IPVK_Last + 1];
+    GlobalVariable *RegionCounters = nullptr;
+    GlobalVariable *DataVar = nullptr;
+
+    PerFunctionProfileData() {
+      memset(NumValueSites, 0, sizeof(uint32_t) * (IPVK_Last + 1));
+    }
+  };
+  DenseMap<GlobalVariable *, PerFunctionProfileData> ProfileDataMap;
+  std::vector<GlobalValue *> UsedVars;
+  std::vector<GlobalVariable *> ReferencedNames;
+  GlobalVariable *NamesVar;
+  size_t NamesSize;
+
+  // vector of counter load/store pairs to be register promoted.
+  std::vector<LoadStorePair> PromotionCandidates;
+
+  // The start value of precise value profile range for memory intrinsic sizes.
+  int64_t MemOPSizeRangeStart;
+  // The end value of precise value profile range for memory intrinsic sizes.
+  int64_t MemOPSizeRangeLast;
+
+  int64_t TotalCountersPromoted = 0;
+
+  /// Lower instrumentation intrinsics in the function. Returns true if there
+  /// any lowering.
+  bool lowerIntrinsics(Function *F);
+
+  /// Register-promote counter loads and stores in loops.
+  void promoteCounterLoadStores(Function *F);
+
+  /// Returns true if profile counter update register promotion is enabled.
+  bool isCounterPromotionEnabled() const;
+
+  /// Count the number of instrumented value sites for the function.
+  void computeNumValueSiteCounts(InstrProfValueProfileInst *Ins);
+
+  /// Replace instrprof_value_profile with a call to runtime library.
+  void lowerValueProfileInst(InstrProfValueProfileInst *Ins);
+
+  /// Replace instrprof_increment with an increment of the appropriate value.
+  void lowerIncrement(InstrProfIncrementInst *Inc);
+
+  /// Force emitting of name vars for unused functions.
+  void lowerCoverageData(GlobalVariable *CoverageNamesVar);
+
+  /// Get the region counters for an increment, creating them if necessary.
+  ///
+  /// If the counter array doesn't yet exist, the profile data variables
+  /// referring to them will also be created.
+  GlobalVariable *getOrCreateRegionCounters(InstrProfIncrementInst *Inc);
+
+  /// Emit the section with compressed function names.
+  void emitNameData();
+
+  /// Emit value nodes section for value profiling.
+  void emitVNodes();
+
+  /// Emit runtime registration functions for each profile data variable.
+  void emitRegistration();
+
+  /// Emit the necessary plumbing to pull in the runtime initialization.
+  /// Returns true if a change was made.
+  bool emitRuntimeHook();
+
+  /// Add uses of our data variables and runtime hook.
+  void emitUses();
+
+  /// Create a static initializer for our data, on platforms that need it,
+  /// and for any profile output file that was specified.
+  void emitInitialization();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRPROFILING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
new file mode 100644
index 0000000..c0b37c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
@@ -0,0 +1,75 @@
+//===- Transforms/Instrumentation/PGOInstrumentation.h ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for IR based instrumentation passes (
+/// (profile-gen, and profile-use).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_PGOINSTRUMENTATION_H
+#define LLVM_TRANSFORMS_PGOINSTRUMENTATION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/PassManager.h"
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+class Function;
+class Instruction;
+class Module;
+
+/// The instrumentation (profile-instr-gen) pass for IR based PGO.
+class PGOInstrumentationGen : public PassInfoMixin<PGOInstrumentationGen> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// The profile annotation (profile-instr-use) pass for IR based PGO.
+class PGOInstrumentationUse : public PassInfoMixin<PGOInstrumentationUse> {
+public:
+  PGOInstrumentationUse(std::string Filename = "");
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  std::string ProfileFileName;
+};
+
+/// The indirect function call promotion pass.
+class PGOIndirectCallPromotion : public PassInfoMixin<PGOIndirectCallPromotion> {
+public:
+  PGOIndirectCallPromotion(bool IsInLTO = false, bool SamplePGO = false)
+      : InLTO(IsInLTO), SamplePGO(SamplePGO) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  bool InLTO;
+  bool SamplePGO;
+};
+
+/// The profile size based optimization pass for memory intrinsics.
+class PGOMemOPSizeOpt : public PassInfoMixin<PGOMemOPSizeOpt> {
+public:
+  PGOMemOPSizeOpt() = default;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+void setProfMetadata(Module *M, Instruction *TI, ArrayRef<uint64_t> EdgeCounts,
+                     uint64_t MaxCount);
+
+void setIrrLoopHeaderMetadata(Module *M, Instruction *TI, uint64_t Count);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_PGOINSTRUMENTATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/ObjCARC.h b/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
new file mode 100644
index 0000000..1897adc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
@@ -0,0 +1,48 @@
+//===-- ObjCARC.h - ObjCARC Scalar Transformations --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the ObjCARC Scalar Transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_OBJCARC_H
+#define LLVM_TRANSFORMS_OBJCARC_H
+
+namespace llvm {
+
+class Pass;
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCAPElim - ObjC ARC autorelease pool elimination.
+//
+Pass *createObjCARCAPElimPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCExpand - ObjC ARC preliminary simplifications.
+//
+Pass *createObjCARCExpandPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCContract - Late ObjC ARC cleanups.
+//
+Pass *createObjCARCContractPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCOpt - ObjC ARC optimization.
+//
+Pass *createObjCARCOptPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar.h b/linux-x64/clang/include/llvm/Transforms/Scalar.h
new file mode 100644
index 0000000..84c7bd4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar.h
@@ -0,0 +1,497 @@
+//===-- Scalar.h - Scalar Transformations -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Scalar transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_H
+#define LLVM_TRANSFORMS_SCALAR_H
+
+#include <functional>
+
+namespace llvm {
+
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class ModulePass;
+class Pass;
+class GetElementPtrInst;
+class PassInfo;
+class TerminatorInst;
+class TargetLowering;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantPropagation - A worklist driven constant propagation pass
+//
+FunctionPass *createConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AlignmentFromAssumptions - Use assume intrinsics to set load/store
+// alignments.
+//
+FunctionPass *createAlignmentFromAssumptionsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SCCP - Sparse conditional constant propagation.
+//
+FunctionPass *createSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadInstElimination - This pass quickly removes trivially dead instructions
+// without modifying the CFG of the function.  It is a BasicBlockPass, so it
+// runs efficiently when queued next to other BasicBlockPass's.
+//
+Pass *createDeadInstEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadCodeElimination - This pass is more powerful than DeadInstElimination,
+// because it is worklist driven that can potentially revisit instructions when
+// their other instructions become dead, to eliminate chains of dead
+// computations.
+//
+FunctionPass *createDeadCodeEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadStoreElimination - This pass deletes stores that are post-dominated by
+// must-aliased stores and are not loaded used between the stores.
+//
+FunctionPass *createDeadStoreEliminationPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// CallSiteSplitting - This pass split call-site based on its known argument
+// values.
+FunctionPass *createCallSiteSplittingPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm.  This
+// algorithm assumes instructions are dead until proven otherwise, which makes
+// it more successful are removing non-obviously dead instructions.
+//
+FunctionPass *createAggressiveDCEPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// GuardWidening - An optimization over the @llvm.experimental.guard intrinsic
+// that (optimistically) combines multiple guards into one to have fewer checks
+// at runtime.
+//
+FunctionPass *createGuardWideningPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// BitTrackingDCE - This pass uses a bit-tracking DCE algorithm in order to
+// remove computations of dead bits.
+//
+FunctionPass *createBitTrackingDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
+//
+FunctionPass *createSROAPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InductiveRangeCheckElimination - Transform loops to elide range checks on
+// linear functions of the induction variable.
+//
+Pass *createInductiveRangeCheckEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InductionVariableSimplify - Transform induction variables in a program to all
+// use a single canonical induction variable per loop.
+//
+Pass *createIndVarSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionCombining - Combine instructions to form fewer, simple
+// instructions. This pass does not modify the CFG, and has a tendency to make
+// instructions dead, so a subsequent DCE pass is useful.
+//
+// This pass combines things like:
+//    %Y = add int 1, %X
+//    %Z = add int 1, %Y
+// into:
+//    %Z = add int 2, %X
+//
+FunctionPass *createInstructionCombiningPass(bool ExpensiveCombines = true);
+
+//===----------------------------------------------------------------------===//
+//
+// AggressiveInstCombiner - Combine expression patterns to form expressions with
+// fewer, simple instructions. This pass does not modify the CFG.
+//
+FunctionPass *createAggressiveInstCombinerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LICM - This pass is a loop invariant code motion and memory promotion pass.
+//
+Pass *createLICMPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSink - This pass sinks invariants from preheader to loop body where
+// frequency is lower than loop preheader.
+//
+Pass *createLoopSinkPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopPredication - This pass does loop predication on guards.
+//
+Pass *createLoopPredicationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopInterchange - This pass interchanges loops to provide a more
+// cache-friendly memory access patterns.
+//
+Pass *createLoopInterchangePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopStrengthReduce - This pass is strength reduces GEP instructions that use
+// a loop's canonical induction variable as one of their indices.
+//
+Pass *createLoopStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnswitch - This pass is a simple loop unswitching pass.
+//
+Pass *createLoopUnswitchPass(bool OptimizeForSize = false,
+                             bool hasBranchDivergence = false);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnroll - This pass is a simple loop unrolling pass.
+//
+Pass *createLoopUnrollPass(int OptLevel = 2, int Threshold = -1, int Count = -1,
+                           int AllowPartial = -1, int Runtime = -1,
+                           int UpperBound = -1, int AllowPeeling = -1);
+// Create an unrolling pass for full unrolling that uses exact trip count only.
+Pass *createSimpleLoopUnrollPass(int OptLevel = 2);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopReroll - This pass is a simple loop rerolling pass.
+//
+Pass *createLoopRerollPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopRotate - This pass is a simple loop rotating pass.
+//
+Pass *createLoopRotatePass(int MaxHeaderSize = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopIdiom - This pass recognizes and replaces idioms in loops.
+//
+Pass *createLoopIdiomPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVersioningLICM - This pass is a loop versioning pass for LICM.
+//
+Pass *createLoopVersioningLICMPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DemoteRegisterToMemoryPass - This pass is used to demote registers to memory
+// references. In basically undoes the PromoteMemoryToRegister pass to make cfg
+// hacking easier.
+//
+FunctionPass *createDemoteRegisterToMemoryPass();
+extern char &DemoteRegisterToMemoryID;
+
+//===----------------------------------------------------------------------===//
+//
+// Reassociate - This pass reassociates commutative expressions in an order that
+// is designed to promote better constant propagation, GCSE, LICM, PRE...
+//
+// For example:  4 + (x + 5)  ->  x + (4 + 5)
+//
+FunctionPass *createReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// JumpThreading - Thread control through mult-pred/multi-succ blocks where some
+// preds always go to some succ. Thresholds other than minus one override the
+// internal BB duplication default threshold.
+//
+FunctionPass *createJumpThreadingPass(int Threshold = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// CFGSimplification - Merge basic blocks, eliminate unreachable blocks,
+// simplify terminator instructions, convert switches to lookup tables, etc.
+//
+FunctionPass *createCFGSimplificationPass(
+    unsigned Threshold = 1, bool ForwardSwitchCond = false,
+    bool ConvertSwitch = false, bool KeepLoops = true, bool SinkCommon = false,
+    std::function<bool(const Function &)> Ftor = nullptr);
+
+//===----------------------------------------------------------------------===//
+//
+// FlattenCFG - flatten CFG, reduce number of conditional branches by using
+// parallel-and and parallel-or mode, etc...
+//
+FunctionPass *createFlattenCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// CFG Structurization - Remove irreducible control flow
+//
+///
+/// When \p SkipUniformRegions is true the structizer will not structurize
+/// regions that only contain uniform branches.
+Pass *createStructurizeCFGPass(bool SkipUniformRegions = false);
+
+//===----------------------------------------------------------------------===//
+//
+// TailCallElimination - This pass eliminates call instructions to the current
+// function which occur immediately before return instructions.
+//
+FunctionPass *createTailCallEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
+// tree.
+//
+FunctionPass *createEarlyCSEPass(bool UseMemorySSA = false);
+
+//===----------------------------------------------------------------------===//
+//
+// GVNHoist - This pass performs a simple and fast GVN pass over the dominator
+// tree to hoist common expressions from sibling branches.
+//
+FunctionPass *createGVNHoistPass();
+
+//===----------------------------------------------------------------------===//
+//
+// GVNSink - This pass uses an "inverted" value numbering to decide the
+// similarity of expressions and sinks similar expressions into successors.
+//
+FunctionPass *createGVNSinkPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
+// are hoisted into the header, while stores sink into the footer.
+//
+FunctionPass *createMergedLoadStoreMotionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// GVN - This pass performs global value numbering and redundant load
+// elimination cotemporaneously.
+//
+FunctionPass *createNewGVNPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DivRemPairs - Hoist/decompose integer division and remainder instructions.
+//
+FunctionPass *createDivRemPairsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MemCpyOpt - This pass performs optimizations related to eliminating memcpy
+// calls and/or combining multiple stores into memset's.
+//
+FunctionPass *createMemCpyOptPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDeletion - This pass performs DCE of non-infinite loops that it
+// can prove are dead.
+//
+Pass *createLoopDeletionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantHoisting - This pass prepares a function for expensive constants.
+//
+FunctionPass *createConstantHoistingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// Sink - Code Sinking
+//
+FunctionPass *createSinkingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
+//
+Pass *createLowerAtomicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerGuardIntrinsic - Lower guard intrinsics to normal control flow.
+//
+Pass *createLowerGuardIntrinsicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MergeICmps - Merge integer comparison chains into a memcmp
+//
+Pass *createMergeICmpsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ValuePropagation - Propagate CFG-derived value information
+//
+Pass *createCorrelatedValuePropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InferAddressSpaces - Modify users of addrspacecast instructions with values
+// in the source address space if using the destination address space is slower
+// on the target.
+//
+FunctionPass *createInferAddressSpacesPass();
+extern char &InferAddressSpacesID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerExpectIntrinsics - Removes llvm.expect intrinsics and creates
+// "block_weights" metadata.
+FunctionPass *createLowerExpectIntrinsicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PartiallyInlineLibCalls - Tries to inline the fast path of library
+// calls such as sqrt.
+//
+FunctionPass *createPartiallyInlineLibCallsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ScalarizerPass - Converts vector operations into scalar operations
+//
+FunctionPass *createScalarizerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SeparateConstOffsetFromGEP - Split GEPs for better CSE
+//
+FunctionPass *createSeparateConstOffsetFromGEPPass(bool LowerGEP = false);
+
+//===----------------------------------------------------------------------===//
+//
+// SpeculativeExecution - Aggressively hoist instructions to enable
+// speculative execution on targets where branches are expensive.
+//
+FunctionPass *createSpeculativeExecutionPass();
+
+// Same as createSpeculativeExecutionPass, but does nothing unless
+// TargetTransformInfo::hasBranchDivergence() is true.
+FunctionPass *createSpeculativeExecutionIfHasBranchDivergencePass();
+
+//===----------------------------------------------------------------------===//
+//
+// StraightLineStrengthReduce - This pass strength-reduces some certain
+// instruction patterns in straight-line code.
+//
+FunctionPass *createStraightLineStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// PlaceSafepoints - Rewrite any IR calls to gc.statepoints and insert any
+// safepoint polls (method entry, backedge) that might be required.  This pass
+// does not generate explicit relocation sequences - that's handled by
+// RewriteStatepointsForGC which can be run at an arbitrary point in the pass
+// order following this pass.
+//
+FunctionPass *createPlaceSafepointsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// RewriteStatepointsForGC - Rewrite any gc.statepoints which do not yet have
+// explicit relocations to include explicit relocations.
+//
+ModulePass *createRewriteStatepointsForGCLegacyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// Float2Int - Demote floats to ints where possible.
+//
+FunctionPass *createFloat2IntPass();
+
+//===----------------------------------------------------------------------===//
+//
+// NaryReassociate - Simplify n-ary operations by reassociation.
+//
+FunctionPass *createNaryReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDistribute - Distribute loops.
+//
+FunctionPass *createLoopDistributePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopLoadElimination - Perform loop-aware load elimination.
+//
+FunctionPass *createLoopLoadEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVersioning - Perform loop multi-versioning.
+//
+FunctionPass *createLoopVersioningPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDataPrefetch - Perform data prefetching in loops.
+//
+FunctionPass *createLoopDataPrefetchPass();
+
+///===---------------------------------------------------------------------===//
+ModulePass *createNameAnonGlobalPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LibCallsShrinkWrap - Shrink-wraps a call to function if the result is not
+// used.
+//
+FunctionPass *createLibCallsShrinkWrapPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSimplifyCFG - This pass performs basic CFG simplification on loops,
+// primarily to help other loop passes.
+//
+Pass *createLoopSimplifyCFGPass();
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ADCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ADCE.h
new file mode 100644
index 0000000..f98af62
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ADCE.h
@@ -0,0 +1,38 @@
+//===- ADCE.h - Aggressive dead code elimination ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Aggressive Dead Code Elimination
+// pass. This pass optimistically assumes that all instructions are dead until
+// proven otherwise, allowing it to eliminate dead computations that other DCE
+// passes do not catch, particularly involving loop computations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ADCE_H
+#define LLVM_TRANSFORMS_SCALAR_ADCE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// A DCE pass that assumes instructions are dead until proven otherwise.
+///
+/// This pass eliminates dead code by optimistically assuming that all
+/// instructions are dead until proven otherwise. This allows it to eliminate
+/// dead computations that other DCE passes do not catch, particularly involving
+/// loop computations.
+struct ADCEPass : PassInfoMixin<ADCEPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_ADCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h b/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
new file mode 100644
index 0000000..6197503
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
@@ -0,0 +1,45 @@
+//===---- AlignmentFromAssumptions.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a ScalarEvolution-based transformation to set
+// the alignments of load, stores and memory intrinsics based on the truth
+// expressions of assume intrinsics. The primary motivation is to handle
+// complex alignment assumptions that apply to vector loads and stores that
+// appear after vectorization and unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
+#define LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
+
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct AlignmentFromAssumptionsPass
+    : public PassInfoMixin<AlignmentFromAssumptionsPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, AssumptionCache &AC, ScalarEvolution *SE_,
+               DominatorTree *DT_);
+
+  ScalarEvolution *SE = nullptr;
+  DominatorTree *DT = nullptr;
+
+  bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,
+                            const SCEV *&OffSCEV);
+  bool processAssumption(CallInst *I);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/BDCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/BDCE.h
new file mode 100644
index 0000000..d7d2730
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/BDCE.h
@@ -0,0 +1,31 @@
+//===---- BDCE.cpp - Bit-tracking dead code elimination ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Bit-Tracking Dead Code Elimination pass. Some
+// instructions (shifts, some ands, ors, etc.) kill some of their input bits.
+// We track these dead bits and remove instructions that compute only these
+// dead bits.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_BDCE_H
+#define LLVM_TRANSFORMS_SCALAR_BDCE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+// The Bit-Tracking Dead Code Elimination pass.
+struct BDCEPass : PassInfoMixin<BDCEPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_BDCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
new file mode 100644
index 0000000..5ab951a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
@@ -0,0 +1,29 @@
+//===- CallSiteSplitting..h - Callsite Splitting ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
+#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+struct CallSiteSplittingPass : PassInfoMixin<CallSiteSplittingPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
new file mode 100644
index 0000000..d3322dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
@@ -0,0 +1,167 @@
+//==- ConstantHoisting.h - Prepare code for expensive constants --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies expensive constants to hoist and coalesces them to
+// better prepare it for SelectionDAG-based code generation. This works around
+// the limitations of the basic-block-at-a-time approach.
+//
+// First it scans all instructions for integer constants and calculates its
+// cost. If the constant can be folded into the instruction (the cost is
+// TCC_Free) or the cost is just a simple operation (TCC_BASIC), then we don't
+// consider it expensive and leave it alone. This is the default behavior and
+// the default implementation of getIntImmCost will always return TCC_Free.
+//
+// If the cost is more than TCC_BASIC, then the integer constant can't be folded
+// into the instruction and it might be beneficial to hoist the constant.
+// Similar constants are coalesced to reduce register pressure and
+// materialization code.
+//
+// When a constant is hoisted, it is also hidden behind a bitcast to force it to
+// be live-out of the basic block. Otherwise the constant would be just
+// duplicated and each basic block would have its own copy in the SelectionDAG.
+// The SelectionDAG recognizes such constants as opaque and doesn't perform
+// certain transformations on them, which would create a new expensive constant.
+//
+// This optimization is only applied to integer constants in instructions and
+// simple (this means not nested) constant cast expressions. For example:
+// %0 = load i64* inttoptr (i64 big_constant to i64*)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
+#define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include <algorithm>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class BlockFrequencyInfo;
+class Constant;
+class ConstantInt;
+class DominatorTree;
+class Function;
+class Instruction;
+class TargetTransformInfo;
+
+/// A private "module" namespace for types and utilities used by
+/// ConstantHoisting. These are implementation details and should not be used by
+/// clients.
+namespace consthoist {
+
+/// \brief Keeps track of the user of a constant and the operand index where the
+/// constant is used.
+struct ConstantUser {
+  Instruction *Inst;
+  unsigned OpndIdx;
+
+  ConstantUser(Instruction *Inst, unsigned Idx) : Inst(Inst), OpndIdx(Idx) {}
+};
+
+using ConstantUseListType = SmallVector<ConstantUser, 8>;
+
+/// \brief Keeps track of a constant candidate and its uses.
+struct ConstantCandidate {
+  ConstantUseListType Uses;
+  ConstantInt *ConstInt;
+  unsigned CumulativeCost = 0;
+
+  ConstantCandidate(ConstantInt *ConstInt) : ConstInt(ConstInt) {}
+
+  /// \brief Add the user to the use list and update the cost.
+  void addUser(Instruction *Inst, unsigned Idx, unsigned Cost) {
+    CumulativeCost += Cost;
+    Uses.push_back(ConstantUser(Inst, Idx));
+  }
+};
+
+/// \brief This represents a constant that has been rebased with respect to a
+/// base constant. The difference to the base constant is recorded in Offset.
+struct RebasedConstantInfo {
+  ConstantUseListType Uses;
+  Constant *Offset;
+
+  RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset)
+    : Uses(std::move(Uses)), Offset(Offset) {}
+};
+
+using RebasedConstantListType = SmallVector<RebasedConstantInfo, 4>;
+
+/// \brief A base constant and all its rebased constants.
+struct ConstantInfo {
+  ConstantInt *BaseConstant;
+  RebasedConstantListType RebasedConstants;
+};
+
+} // end namespace consthoist
+
+class ConstantHoistingPass : public PassInfoMixin<ConstantHoistingPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
+               BlockFrequencyInfo *BFI, BasicBlock &Entry);
+
+  void releaseMemory() {
+    ConstantVec.clear();
+    ClonedCastMap.clear();
+    ConstCandVec.clear();
+  }
+
+private:
+  using ConstCandMapType = DenseMap<ConstantInt *, unsigned>;
+  using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
+
+  const TargetTransformInfo *TTI;
+  DominatorTree *DT;
+  BlockFrequencyInfo *BFI;
+  BasicBlock *Entry;
+
+  /// Keeps track of constant candidates found in the function.
+  ConstCandVecType ConstCandVec;
+
+  /// Keep track of cast instructions we already cloned.
+  SmallDenseMap<Instruction *, Instruction *> ClonedCastMap;
+
+  /// These are the final constants we decided to hoist.
+  SmallVector<consthoist::ConstantInfo, 8> ConstantVec;
+
+  Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
+  SmallPtrSet<Instruction *, 8>
+  findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo) const;
+  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+                                 Instruction *Inst, unsigned Idx,
+                                 ConstantInt *ConstInt);
+  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+                                 Instruction *Inst, unsigned Idx);
+  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+                                 Instruction *Inst);
+  void collectConstantCandidates(Function &Fn);
+  void findAndMakeBaseConstant(ConstCandVecType::iterator S,
+                               ConstCandVecType::iterator E);
+  unsigned maximizeConstantsInRange(ConstCandVecType::iterator S,
+                                    ConstCandVecType::iterator E,
+                                    ConstCandVecType::iterator &MaxCostItr);
+  void findBaseConstants();
+  void emitBaseConstants(Instruction *Base, Constant *Offset,
+                         const consthoist::ConstantUser &ConstUser);
+  bool emitBaseConstants();
+  void deleteDeadCastInst() const;
+  bool optimizeConstants(Function &Fn);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h b/linux-x64/clang/include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h
new file mode 100644
index 0000000..2093069
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h
@@ -0,0 +1,26 @@
+//===- CorrelatedValuePropagation.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
+#define LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct CorrelatedValuePropagationPass
+    : PassInfoMixin<CorrelatedValuePropagationPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
new file mode 100644
index 0000000..273346c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
@@ -0,0 +1,29 @@
+//===- DCE.h - Dead code elimination ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Dead Code Elimination pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DCE_H
+#define LLVM_TRANSFORMS_SCALAR_DCE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Basic Dead Code Elimination pass.
+class DCEPass : public PassInfoMixin<DCEPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_DCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DeadStoreElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DeadStoreElimination.h
new file mode 100644
index 0000000..cfeb218
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DeadStoreElimination.h
@@ -0,0 +1,36 @@
+//===- DeadStoreElimination.h - Fast Dead Store Elimination -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a trivial dead store elimination that only considers
+// basic-block local redundant stores.
+//
+// FIXME: This should eventually be extended to be a post-dominator tree
+// traversal.  Doing so would be pretty trivial.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// This class implements a trivial dead store elimination. We consider
+/// only the redundant stores that are local to a single Basic Block.
+class DSEPass : public PassInfoMixin<DSEPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DivRemPairs.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DivRemPairs.h
new file mode 100644
index 0000000..0a4346f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DivRemPairs.h
@@ -0,0 +1,31 @@
+//===- DivRemPairs.h - Hoist/decompose integer division and remainder -----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass hoists and/or decomposes integer division and remainder
+// instructions to enable CFG improvements and better codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
+#define LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Hoist/decompose integer division and remainder instructions to enable CFG
+/// improvements and better codegen.
+struct DivRemPairsPass : public PassInfoMixin<DivRemPairsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+#endif // LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
+
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/EarlyCSE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/EarlyCSE.h
new file mode 100644
index 0000000..dca3b2d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/EarlyCSE.h
@@ -0,0 +1,42 @@
+//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for a simple, fast CSE pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+struct EarlyCSEPass : PassInfoMixin<EarlyCSEPass> {
+  EarlyCSEPass(bool UseMemorySSA = false) : UseMemorySSA(UseMemorySSA) {}
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  bool UseMemorySSA;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
new file mode 100644
index 0000000..206ee98
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
@@ -0,0 +1,51 @@
+//===-- Float2Int.h - Demote floating point ops to work on integers -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the Float2Int pass, which aims to demote floating
+// point operations to work on integers, where that is losslessly possible.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
+#define LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
+
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class Float2IntPass : public PassInfoMixin<Float2IntPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F);
+
+private:
+  void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
+  void seen(Instruction *I, ConstantRange R);
+  ConstantRange badRange();
+  ConstantRange unknownRange();
+  ConstantRange validateRange(ConstantRange R);
+  void walkBackwards(const SmallPtrSetImpl<Instruction *> &Roots);
+  void walkForwards();
+  bool validateAndTransform();
+  Value *convert(Instruction *I, Type *ToTy);
+  void cleanup();
+
+  MapVector<Instruction *, ConstantRange> SeenInsts;
+  SmallPtrSet<Instruction *, 8> Roots;
+  EquivalenceClasses<Instruction *> ECs;
+  MapVector<Instruction *, Value *> ConvertedInsts;
+  LLVMContext *Ctx;
+};
+}
+#endif // LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
new file mode 100644
index 0000000..440d3f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
@@ -0,0 +1,310 @@
+//===- GVN.h - Eliminate redundant values and loads -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Global Value Numbering pass
+/// which eliminates fully redundant instructions. It also does somewhat Ad-Hoc
+/// PRE and dead load elimination.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GVN_H
+#define LLVM_TRANSFORMS_SCALAR_GVN_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Transforms/Utils/OrderedInstructions.h"
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class BranchInst;
+class CallInst;
+class Constant;
+class ExtractValueInst;
+class Function;
+class FunctionPass;
+class IntrinsicInst;
+class LoadInst;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class TargetLibraryInfo;
+class Value;
+
+/// A private "module" namespace for types and utilities used by GVN. These
+/// are implementation details and should not be used by clients.
+namespace gvn LLVM_LIBRARY_VISIBILITY {
+
+struct AvailableValue;
+struct AvailableValueInBlock;
+class GVNLegacyPass;
+
+} // end namespace gvn
+
+/// The core GVN pass object.
+///
+/// FIXME: We should have a good summary of the GVN algorithm implemented by
+/// this particular pass here.
+class GVN : public PassInfoMixin<GVN> {
+public:
+  struct Expression;
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  /// This removes the specified instruction from
+  /// our various maps and marks it for deletion.
+  void markInstructionForDeletion(Instruction *I) {
+    VN.erase(I);
+    InstrsToErase.push_back(I);
+  }
+
+  DominatorTree &getDominatorTree() const { return *DT; }
+  AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
+  MemoryDependenceResults &getMemDep() const { return *MD; }
+
+  /// This class holds the mapping between values and value numbers.  It is used
+  /// as an efficient mechanism to determine the expression-wise equivalence of
+  /// two values.
+  class ValueTable {
+    DenseMap<Value *, uint32_t> valueNumbering;
+    DenseMap<Expression, uint32_t> expressionNumbering;
+
+    // Expressions is the vector of Expression. ExprIdx is the mapping from
+    // value number to the index of Expression in Expressions. We use it
+    // instead of a DenseMap because filling such mapping is faster than
+    // filling a DenseMap and the compile time is a little better.
+    uint32_t nextExprNumber;
+
+    std::vector<Expression> Expressions;
+    std::vector<uint32_t> ExprIdx;
+
+    // Value number to PHINode mapping. Used for phi-translate in scalarpre.
+    DenseMap<uint32_t, PHINode *> NumberingPhi;
+
+    // Cache for phi-translate in scalarpre.
+    using PhiTranslateMap =
+        DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>;
+    PhiTranslateMap PhiTranslateTable;
+
+    AliasAnalysis *AA;
+    MemoryDependenceResults *MD;
+    DominatorTree *DT;
+
+    uint32_t nextValueNumber = 1;
+
+    Expression createExpr(Instruction *I);
+    Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
+                             Value *LHS, Value *RHS);
+    Expression createExtractvalueExpr(ExtractValueInst *EI);
+    uint32_t lookupOrAddCall(CallInst *C);
+    uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
+                              uint32_t Num, GVN &Gvn);
+    std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
+    bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
+
+  public:
+    ValueTable();
+    ValueTable(const ValueTable &Arg);
+    ValueTable(ValueTable &&Arg);
+    ~ValueTable();
+
+    uint32_t lookupOrAdd(Value *V);
+    uint32_t lookup(Value *V, bool Verify = true) const;
+    uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
+                            Value *LHS, Value *RHS);
+    uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
+                          uint32_t Num, GVN &Gvn);
+    void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock);
+    bool exists(Value *V) const;
+    void add(Value *V, uint32_t num);
+    void clear();
+    void erase(Value *v);
+    void setAliasAnalysis(AliasAnalysis *A) { AA = A; }
+    AliasAnalysis *getAliasAnalysis() const { return AA; }
+    void setMemDep(MemoryDependenceResults *M) { MD = M; }
+    void setDomTree(DominatorTree *D) { DT = D; }
+    uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
+    void verifyRemoved(const Value *) const;
+  };
+
+private:
+  friend class gvn::GVNLegacyPass;
+  friend struct DenseMapInfo<Expression>;
+
+  MemoryDependenceResults *MD;
+  DominatorTree *DT;
+  const TargetLibraryInfo *TLI;
+  AssumptionCache *AC;
+  SetVector<BasicBlock *> DeadBlocks;
+  OptimizationRemarkEmitter *ORE;
+  // Maps a block to the topmost instruction with implicit control flow in it.
+  DenseMap<const BasicBlock *, const Instruction *>
+      FirstImplicitControlFlowInsts;
+
+  OrderedInstructions *OI;
+  ValueTable VN;
+
+  /// A mapping from value numbers to lists of Value*'s that
+  /// have that value number.  Use findLeader to query it.
+  struct LeaderTableEntry {
+    Value *Val;
+    const BasicBlock *BB;
+    LeaderTableEntry *Next;
+  };
+  DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
+  BumpPtrAllocator TableAllocator;
+
+  // Block-local map of equivalent values to their leader, does not
+  // propagate to any successors. Entries added mid-block are applied
+  // to the remaining instructions in the block.
+  SmallMapVector<Value *, Constant *, 4> ReplaceWithConstMap;
+  SmallVector<Instruction *, 8> InstrsToErase;
+
+  // Map the block to reversed postorder traversal number. It is used to
+  // find back edge easily.
+  DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
+
+  using LoadDepVect = SmallVector<NonLocalDepResult, 64>;
+  using AvailValInBlkVect = SmallVector<gvn::AvailableValueInBlock, 64>;
+  using UnavailBlkVect = SmallVector<BasicBlock *, 64>;
+
+  bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
+               const TargetLibraryInfo &RunTLI, AAResults &RunAA,
+               MemoryDependenceResults *RunMD, LoopInfo *LI,
+               OptimizationRemarkEmitter *ORE);
+
+  /// Push a new Value to the LeaderTable onto the list for its value number.
+  void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
+    LeaderTableEntry &Curr = LeaderTable[N];
+    if (!Curr.Val) {
+      Curr.Val = V;
+      Curr.BB = BB;
+      return;
+    }
+
+    LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
+    Node->Val = V;
+    Node->BB = BB;
+    Node->Next = Curr.Next;
+    Curr.Next = Node;
+  }
+
+  /// Scan the list of values corresponding to a given
+  /// value number, and remove the given instruction if encountered.
+  void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
+    LeaderTableEntry *Prev = nullptr;
+    LeaderTableEntry *Curr = &LeaderTable[N];
+
+    while (Curr && (Curr->Val != I || Curr->BB != BB)) {
+      Prev = Curr;
+      Curr = Curr->Next;
+    }
+
+    if (!Curr)
+      return;
+
+    if (Prev) {
+      Prev->Next = Curr->Next;
+    } else {
+      if (!Curr->Next) {
+        Curr->Val = nullptr;
+        Curr->BB = nullptr;
+      } else {
+        LeaderTableEntry *Next = Curr->Next;
+        Curr->Val = Next->Val;
+        Curr->BB = Next->BB;
+        Curr->Next = Next->Next;
+      }
+    }
+  }
+
+  // List of critical edges to be split between iterations.
+  SmallVector<std::pair<TerminatorInst *, unsigned>, 4> toSplit;
+
+  // Helper functions of redundant load elimination
+  bool processLoad(LoadInst *L);
+  bool processNonLocalLoad(LoadInst *L);
+  bool processAssumeIntrinsic(IntrinsicInst *II);
+
+  /// Given a local dependency (Def or Clobber) determine if a value is
+  /// available for the load.  Returns true if an value is known to be
+  /// available and populates Res.  Returns false otherwise.
+  bool AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
+                               Value *Address, gvn::AvailableValue &Res);
+
+  /// Given a list of non-local dependencies, determine if a value is
+  /// available for the load in each specified block.  If it is, add it to
+  /// ValuesPerBlock.  If not, add it to UnavailableBlocks.
+  void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
+                               AvailValInBlkVect &ValuesPerBlock,
+                               UnavailBlkVect &UnavailableBlocks);
+
+  bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
+                      UnavailBlkVect &UnavailableBlocks);
+
+  // Other helper routines
+  bool processInstruction(Instruction *I);
+  bool processBlock(BasicBlock *BB);
+  void dump(DenseMap<uint32_t, Value *> &d) const;
+  bool iterateOnFunction(Function &F);
+  bool performPRE(Function &F);
+  bool performScalarPRE(Instruction *I);
+  bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
+                                 BasicBlock *Curr, unsigned int ValNo);
+  Value *findLeader(const BasicBlock *BB, uint32_t num);
+  void cleanupGlobalSets();
+  void fillImplicitControlFlowInfo(BasicBlock *BB);
+  void verifyRemoved(const Instruction *I) const;
+  bool splitCriticalEdges();
+  BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
+  bool replaceOperandsWithConsts(Instruction *I) const;
+  bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
+                         bool DominatesByEdge);
+  bool processFoldableCondBr(BranchInst *BI);
+  void addDeadBlock(BasicBlock *BB);
+  void assignValNumForDeadCode();
+  void assignBlockRPONumber(Function &F);
+};
+
+/// Create a legacy GVN pass. This also allows parameterizing whether or not
+/// loads are eliminated by the pass.
+FunctionPass *createGVNPass(bool NoLoads = false);
+
+/// \brief A simple and fast domtree-based GVN pass to hoist common expressions
+/// from sibling branches.
+struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Uses an "inverted" value numbering to decide the similarity of
+/// expressions and sinks similar expressions into successors.
+struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_GVN_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
new file mode 100644
index 0000000..99dae15
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
@@ -0,0 +1,661 @@
+//===- GVNExpression.h - GVN Expression classes -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// The header file for the GVN pass that contains expression handling
+/// classes
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
+#define LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+class BasicBlock;
+class Type;
+
+namespace GVNExpression {
+
+enum ExpressionType {
+  ET_Base,
+  ET_Constant,
+  ET_Variable,
+  ET_Dead,
+  ET_Unknown,
+  ET_BasicStart,
+  ET_Basic,
+  ET_AggregateValue,
+  ET_Phi,
+  ET_MemoryStart,
+  ET_Call,
+  ET_Load,
+  ET_Store,
+  ET_MemoryEnd,
+  ET_BasicEnd
+};
+
+class Expression {
+private:
+  ExpressionType EType;
+  unsigned Opcode;
+  mutable hash_code HashVal = 0;
+
+public:
+  Expression(ExpressionType ET = ET_Base, unsigned O = ~2U)
+      : EType(ET), Opcode(O) {}
+  Expression(const Expression &) = delete;
+  Expression &operator=(const Expression &) = delete;
+  virtual ~Expression();
+
+  static unsigned getEmptyKey() { return ~0U; }
+  static unsigned getTombstoneKey() { return ~1U; }
+
+  bool operator!=(const Expression &Other) const { return !(*this == Other); }
+  bool operator==(const Expression &Other) const {
+    if (getOpcode() != Other.getOpcode())
+      return false;
+    if (getOpcode() == getEmptyKey() || getOpcode() == getTombstoneKey())
+      return true;
+    // Compare the expression type for anything but load and store.
+    // For load and store we set the opcode to zero to make them equal.
+    if (getExpressionType() != ET_Load && getExpressionType() != ET_Store &&
+        getExpressionType() != Other.getExpressionType())
+      return false;
+
+    return equals(Other);
+  }
+
+  hash_code getComputedHash() const {
+    // It's theoretically possible for a thing to hash to zero.  In that case,
+    // we will just compute the hash a few extra times, which is no worse that
+    // we did before, which was to compute it always.
+    if (static_cast<unsigned>(HashVal) == 0)
+      HashVal = getHashValue();
+    return HashVal;
+  }
+
+  virtual bool equals(const Expression &Other) const { return true; }
+
+  // Return true if the two expressions are exactly the same, including the
+  // normally ignored fields.
+  virtual bool exactlyEquals(const Expression &Other) const {
+    return getExpressionType() == Other.getExpressionType() && equals(Other);
+  }
+
+  unsigned getOpcode() const { return Opcode; }
+  void setOpcode(unsigned opcode) { Opcode = opcode; }
+  ExpressionType getExpressionType() const { return EType; }
+
+  // We deliberately leave the expression type out of the hash value.
+  virtual hash_code getHashValue() const { return getOpcode(); }
+
+  // Debugging support
+  virtual void printInternal(raw_ostream &OS, bool PrintEType) const {
+    if (PrintEType)
+      OS << "etype = " << getExpressionType() << ",";
+    OS << "opcode = " << getOpcode() << ", ";
+  }
+
+  void print(raw_ostream &OS) const {
+    OS << "{ ";
+    printInternal(OS, true);
+    OS << "}";
+  }
+
+  LLVM_DUMP_METHOD void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Expression &E) {
+  E.print(OS);
+  return OS;
+}
+
+class BasicExpression : public Expression {
+private:
+  using RecyclerType = ArrayRecycler<Value *>;
+  using RecyclerCapacity = RecyclerType::Capacity;
+
+  Value **Operands = nullptr;
+  unsigned MaxOperands;
+  unsigned NumOperands = 0;
+  Type *ValueType = nullptr;
+
+public:
+  BasicExpression(unsigned NumOperands)
+      : BasicExpression(NumOperands, ET_Basic) {}
+  BasicExpression(unsigned NumOperands, ExpressionType ET)
+      : Expression(ET), MaxOperands(NumOperands) {}
+  BasicExpression() = delete;
+  BasicExpression(const BasicExpression &) = delete;
+  BasicExpression &operator=(const BasicExpression &) = delete;
+  ~BasicExpression() override;
+
+  static bool classof(const Expression *EB) {
+    ExpressionType ET = EB->getExpressionType();
+    return ET > ET_BasicStart && ET < ET_BasicEnd;
+  }
+
+  /// \brief Swap two operands. Used during GVN to put commutative operands in
+  /// order.
+  void swapOperands(unsigned First, unsigned Second) {
+    std::swap(Operands[First], Operands[Second]);
+  }
+
+  Value *getOperand(unsigned N) const {
+    assert(Operands && "Operands not allocated");
+    assert(N < NumOperands && "Operand out of range");
+    return Operands[N];
+  }
+
+  void setOperand(unsigned N, Value *V) {
+    assert(Operands && "Operands not allocated before setting");
+    assert(N < NumOperands && "Operand out of range");
+    Operands[N] = V;
+  }
+
+  unsigned getNumOperands() const { return NumOperands; }
+
+  using op_iterator = Value **;
+  using const_op_iterator = Value *const *;
+
+  op_iterator op_begin() { return Operands; }
+  op_iterator op_end() { return Operands + NumOperands; }
+  const_op_iterator op_begin() const { return Operands; }
+  const_op_iterator op_end() const { return Operands + NumOperands; }
+  iterator_range<op_iterator> operands() {
+    return iterator_range<op_iterator>(op_begin(), op_end());
+  }
+  iterator_range<const_op_iterator> operands() const {
+    return iterator_range<const_op_iterator>(op_begin(), op_end());
+  }
+
+  void op_push_back(Value *Arg) {
+    assert(NumOperands < MaxOperands && "Tried to add too many operands");
+    assert(Operands && "Operandss not allocated before pushing");
+    Operands[NumOperands++] = Arg;
+  }
+  bool op_empty() const { return getNumOperands() == 0; }
+
+  void allocateOperands(RecyclerType &Recycler, BumpPtrAllocator &Allocator) {
+    assert(!Operands && "Operands already allocated");
+    Operands = Recycler.allocate(RecyclerCapacity::get(MaxOperands), Allocator);
+  }
+  void deallocateOperands(RecyclerType &Recycler) {
+    Recycler.deallocate(RecyclerCapacity::get(MaxOperands), Operands);
+  }
+
+  void setType(Type *T) { ValueType = T; }
+  Type *getType() const { return ValueType; }
+
+  bool equals(const Expression &Other) const override {
+    if (getOpcode() != Other.getOpcode())
+      return false;
+
+    const auto &OE = cast<BasicExpression>(Other);
+    return getType() == OE.getType() && NumOperands == OE.NumOperands &&
+           std::equal(op_begin(), op_end(), OE.op_begin());
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(), ValueType,
+                        hash_combine_range(op_begin(), op_end()));
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeBasic, ";
+
+    this->Expression::printInternal(OS, false);
+    OS << "operands = {";
+    for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+      OS << "[" << i << "] = ";
+      Operands[i]->printAsOperand(OS);
+      OS << "  ";
+    }
+    OS << "} ";
+  }
+};
+
+class op_inserter
+    : public std::iterator<std::output_iterator_tag, void, void, void, void> {
+private:
+  using Container = BasicExpression;
+
+  Container *BE;
+
+public:
+  explicit op_inserter(BasicExpression &E) : BE(&E) {}
+  explicit op_inserter(BasicExpression *E) : BE(E) {}
+
+  op_inserter &operator=(Value *val) {
+    BE->op_push_back(val);
+    return *this;
+  }
+  op_inserter &operator*() { return *this; }
+  op_inserter &operator++() { return *this; }
+  op_inserter &operator++(int) { return *this; }
+};
+
+class MemoryExpression : public BasicExpression {
+private:
+  const MemoryAccess *MemoryLeader;
+
+public:
+  MemoryExpression(unsigned NumOperands, enum ExpressionType EType,
+                   const MemoryAccess *MemoryLeader)
+      : BasicExpression(NumOperands, EType), MemoryLeader(MemoryLeader) {}
+  MemoryExpression() = delete;
+  MemoryExpression(const MemoryExpression &) = delete;
+  MemoryExpression &operator=(const MemoryExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() > ET_MemoryStart &&
+           EB->getExpressionType() < ET_MemoryEnd;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->BasicExpression::getHashValue(), MemoryLeader);
+  }
+
+  bool equals(const Expression &Other) const override {
+    if (!this->BasicExpression::equals(Other))
+      return false;
+    const MemoryExpression &OtherMCE = cast<MemoryExpression>(Other);
+
+    return MemoryLeader == OtherMCE.MemoryLeader;
+  }
+
+  const MemoryAccess *getMemoryLeader() const { return MemoryLeader; }
+  void setMemoryLeader(const MemoryAccess *ML) { MemoryLeader = ML; }
+};
+
+class CallExpression final : public MemoryExpression {
+private:
+  CallInst *Call;
+
+public:
+  CallExpression(unsigned NumOperands, CallInst *C,
+                 const MemoryAccess *MemoryLeader)
+      : MemoryExpression(NumOperands, ET_Call, MemoryLeader), Call(C) {}
+  CallExpression() = delete;
+  CallExpression(const CallExpression &) = delete;
+  CallExpression &operator=(const CallExpression &) = delete;
+  ~CallExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Call;
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeCall, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << " represents call at ";
+    Call->printAsOperand(OS);
+  }
+};
+
+class LoadExpression final : public MemoryExpression {
+private:
+  LoadInst *Load;
+  unsigned Alignment;
+
+public:
+  LoadExpression(unsigned NumOperands, LoadInst *L,
+                 const MemoryAccess *MemoryLeader)
+      : LoadExpression(ET_Load, NumOperands, L, MemoryLeader) {}
+
+  LoadExpression(enum ExpressionType EType, unsigned NumOperands, LoadInst *L,
+                 const MemoryAccess *MemoryLeader)
+      : MemoryExpression(NumOperands, EType, MemoryLeader), Load(L) {
+    Alignment = L ? L->getAlignment() : 0;
+  }
+
+  LoadExpression() = delete;
+  LoadExpression(const LoadExpression &) = delete;
+  LoadExpression &operator=(const LoadExpression &) = delete;
+  ~LoadExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Load;
+  }
+
+  LoadInst *getLoadInst() const { return Load; }
+  void setLoadInst(LoadInst *L) { Load = L; }
+
+  unsigned getAlignment() const { return Alignment; }
+  void setAlignment(unsigned Align) { Alignment = Align; }
+
+  bool equals(const Expression &Other) const override;
+  bool exactlyEquals(const Expression &Other) const override {
+    return Expression::exactlyEquals(Other) &&
+           cast<LoadExpression>(Other).getLoadInst() == getLoadInst();
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeLoad, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << " represents Load at ";
+    Load->printAsOperand(OS);
+    OS << " with MemoryLeader " << *getMemoryLeader();
+  }
+};
+
+class StoreExpression final : public MemoryExpression {
+private:
+  StoreInst *Store;
+  Value *StoredValue;
+
+public:
+  StoreExpression(unsigned NumOperands, StoreInst *S, Value *StoredValue,
+                  const MemoryAccess *MemoryLeader)
+      : MemoryExpression(NumOperands, ET_Store, MemoryLeader), Store(S),
+        StoredValue(StoredValue) {}
+  StoreExpression() = delete;
+  StoreExpression(const StoreExpression &) = delete;
+  StoreExpression &operator=(const StoreExpression &) = delete;
+  ~StoreExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Store;
+  }
+
+  StoreInst *getStoreInst() const { return Store; }
+  Value *getStoredValue() const { return StoredValue; }
+
+  bool equals(const Expression &Other) const override;
+
+  bool exactlyEquals(const Expression &Other) const override {
+    return Expression::exactlyEquals(Other) &&
+           cast<StoreExpression>(Other).getStoreInst() == getStoreInst();
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeStore, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << " represents Store  " << *Store;
+    OS << " with StoredValue ";
+    StoredValue->printAsOperand(OS);
+    OS << " and MemoryLeader " << *getMemoryLeader();
+  }
+};
+
+class AggregateValueExpression final : public BasicExpression {
+private:
+  unsigned MaxIntOperands;
+  unsigned NumIntOperands = 0;
+  unsigned *IntOperands = nullptr;
+
+public:
+  AggregateValueExpression(unsigned NumOperands, unsigned NumIntOperands)
+      : BasicExpression(NumOperands, ET_AggregateValue),
+        MaxIntOperands(NumIntOperands) {}
+  AggregateValueExpression() = delete;
+  AggregateValueExpression(const AggregateValueExpression &) = delete;
+  AggregateValueExpression &
+  operator=(const AggregateValueExpression &) = delete;
+  ~AggregateValueExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_AggregateValue;
+  }
+
+  using int_arg_iterator = unsigned *;
+  using const_int_arg_iterator = const unsigned *;
+
+  int_arg_iterator int_op_begin() { return IntOperands; }
+  int_arg_iterator int_op_end() { return IntOperands + NumIntOperands; }
+  const_int_arg_iterator int_op_begin() const { return IntOperands; }
+  const_int_arg_iterator int_op_end() const {
+    return IntOperands + NumIntOperands;
+  }
+  unsigned int_op_size() const { return NumIntOperands; }
+  bool int_op_empty() const { return NumIntOperands == 0; }
+  void int_op_push_back(unsigned IntOperand) {
+    assert(NumIntOperands < MaxIntOperands &&
+           "Tried to add too many int operands");
+    assert(IntOperands && "Operands not allocated before pushing");
+    IntOperands[NumIntOperands++] = IntOperand;
+  }
+
+  virtual void allocateIntOperands(BumpPtrAllocator &Allocator) {
+    assert(!IntOperands && "Operands already allocated");
+    IntOperands = Allocator.Allocate<unsigned>(MaxIntOperands);
+  }
+
+  bool equals(const Expression &Other) const override {
+    if (!this->BasicExpression::equals(Other))
+      return false;
+    const AggregateValueExpression &OE = cast<AggregateValueExpression>(Other);
+    return NumIntOperands == OE.NumIntOperands &&
+           std::equal(int_op_begin(), int_op_end(), OE.int_op_begin());
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->BasicExpression::getHashValue(),
+                        hash_combine_range(int_op_begin(), int_op_end()));
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeAggregateValue, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << ", intoperands = {";
+    for (unsigned i = 0, e = int_op_size(); i != e; ++i) {
+      OS << "[" << i << "] = " << IntOperands[i] << "  ";
+    }
+    OS << "}";
+  }
+};
+
+class int_op_inserter
+    : public std::iterator<std::output_iterator_tag, void, void, void, void> {
+private:
+  using Container = AggregateValueExpression;
+
+  Container *AVE;
+
+public:
+  explicit int_op_inserter(AggregateValueExpression &E) : AVE(&E) {}
+  explicit int_op_inserter(AggregateValueExpression *E) : AVE(E) {}
+
+  int_op_inserter &operator=(unsigned int val) {
+    AVE->int_op_push_back(val);
+    return *this;
+  }
+  int_op_inserter &operator*() { return *this; }
+  int_op_inserter &operator++() { return *this; }
+  int_op_inserter &operator++(int) { return *this; }
+};
+
+class PHIExpression final : public BasicExpression {
+private:
+  BasicBlock *BB;
+
+public:
+  PHIExpression(unsigned NumOperands, BasicBlock *B)
+      : BasicExpression(NumOperands, ET_Phi), BB(B) {}
+  PHIExpression() = delete;
+  PHIExpression(const PHIExpression &) = delete;
+  PHIExpression &operator=(const PHIExpression &) = delete;
+  ~PHIExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Phi;
+  }
+
+  bool equals(const Expression &Other) const override {
+    if (!this->BasicExpression::equals(Other))
+      return false;
+    const PHIExpression &OE = cast<PHIExpression>(Other);
+    return BB == OE.BB;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->BasicExpression::getHashValue(), BB);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypePhi, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << "bb = " << BB;
+  }
+};
+
+class DeadExpression final : public Expression {
+public:
+  DeadExpression() : Expression(ET_Dead) {}
+  DeadExpression(const DeadExpression &) = delete;
+  DeadExpression &operator=(const DeadExpression &) = delete;
+
+  static bool classof(const Expression *E) {
+    return E->getExpressionType() == ET_Dead;
+  }
+};
+
+class VariableExpression final : public Expression {
+private:
+  Value *VariableValue;
+
+public:
+  VariableExpression(Value *V) : Expression(ET_Variable), VariableValue(V) {}
+  VariableExpression() = delete;
+  VariableExpression(const VariableExpression &) = delete;
+  VariableExpression &operator=(const VariableExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Variable;
+  }
+
+  Value *getVariableValue() const { return VariableValue; }
+  void setVariableValue(Value *V) { VariableValue = V; }
+
+  bool equals(const Expression &Other) const override {
+    const VariableExpression &OC = cast<VariableExpression>(Other);
+    return VariableValue == OC.VariableValue;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(),
+                        VariableValue->getType(), VariableValue);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeVariable, ";
+    this->Expression::printInternal(OS, false);
+    OS << " variable = " << *VariableValue;
+  }
+};
+
+class ConstantExpression final : public Expression {
+private:
+  Constant *ConstantValue = nullptr;
+
+public:
+  ConstantExpression() : Expression(ET_Constant) {}
+  ConstantExpression(Constant *constantValue)
+      : Expression(ET_Constant), ConstantValue(constantValue) {}
+  ConstantExpression(const ConstantExpression &) = delete;
+  ConstantExpression &operator=(const ConstantExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Constant;
+  }
+
+  Constant *getConstantValue() const { return ConstantValue; }
+  void setConstantValue(Constant *V) { ConstantValue = V; }
+
+  bool equals(const Expression &Other) const override {
+    const ConstantExpression &OC = cast<ConstantExpression>(Other);
+    return ConstantValue == OC.ConstantValue;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(),
+                        ConstantValue->getType(), ConstantValue);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeConstant, ";
+    this->Expression::printInternal(OS, false);
+    OS << " constant = " << *ConstantValue;
+  }
+};
+
+class UnknownExpression final : public Expression {
+private:
+  Instruction *Inst;
+
+public:
+  UnknownExpression(Instruction *I) : Expression(ET_Unknown), Inst(I) {}
+  UnknownExpression() = delete;
+  UnknownExpression(const UnknownExpression &) = delete;
+  UnknownExpression &operator=(const UnknownExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Unknown;
+  }
+
+  Instruction *getInstruction() const { return Inst; }
+  void setInstruction(Instruction *I) { Inst = I; }
+
+  bool equals(const Expression &Other) const override {
+    const auto &OU = cast<UnknownExpression>(Other);
+    return Inst == OU.Inst;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(), Inst);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeUnknown, ";
+    this->Expression::printInternal(OS, false);
+    OS << " inst = " << *Inst;
+  }
+};
+
+} // end namespace GVNExpression
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GuardWidening.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GuardWidening.h
new file mode 100644
index 0000000..2bc0940
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GuardWidening.h
@@ -0,0 +1,32 @@
+//===- GuardWidening.h - ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Guard widening is an optimization over the @llvm.experimental.guard intrinsic
+// that (optimistically) combines multiple guards into one to have fewer checks
+// at runtime.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GUARD_WIDENING_H
+#define LLVM_TRANSFORMS_SCALAR_GUARD_WIDENING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct GuardWideningPass : public PassInfoMixin<GuardWideningPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+
+#endif  // LLVM_TRANSFORMS_SCALAR_GUARD_WIDENING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/IVUsersPrinter.h b/linux-x64/clang/include/llvm/Transforms/Scalar/IVUsersPrinter.h
new file mode 100644
index 0000000..fad00d8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/IVUsersPrinter.h
@@ -0,0 +1,30 @@
+//===- IVUsersPrinter.h - Induction Variable Users Printing -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
+#define LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
+
+#include "llvm/Analysis/IVUsers.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Printer pass for the \c IVUsers for a loop.
+class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h b/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
new file mode 100644
index 0000000..e321c8f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
@@ -0,0 +1,34 @@
+//===- IndVarSimplify.h - Induction Variable Simplification -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Induction Variable
+// Simplification pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
+#define LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Loop;
+class LPMUpdater;
+
+class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
new file mode 100644
index 0000000..311c549
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
@@ -0,0 +1,31 @@
+//===- InductiveRangeCheckElimination.h - IRCE ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Inductive Range Check Elimination
+// loop pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class IRCEPass : public PassInfoMixin<IRCEPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h b/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
new file mode 100644
index 0000000..b3493a2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -0,0 +1,161 @@
+//===- JumpThreading.h - thread control through conditional BBs -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// See the comments on JumpThreadingPass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
+#define LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/IR/ValueHandle.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class BasicBlock;
+class BinaryOperator;
+class BranchInst;
+class CmpInst;
+class Constant;
+class DeferredDominance;
+class Function;
+class Instruction;
+class IntrinsicInst;
+class LazyValueInfo;
+class LoadInst;
+class PHINode;
+class TargetLibraryInfo;
+class Value;
+
+/// A private "module" namespace for types and utilities used by
+/// JumpThreading.
+/// These are implementation details and should not be used by clients.
+namespace jumpthreading {
+
+// These are at global scope so static functions can use them too.
+using PredValueInfo = SmallVectorImpl<std::pair<Constant *, BasicBlock *>>;
+using PredValueInfoTy = SmallVector<std::pair<Constant *, BasicBlock *>, 8>;
+
+// This is used to keep track of what kind of constant we're currently hoping
+// to find.
+enum ConstantPreference { WantInteger, WantBlockAddress };
+
+} // end namespace jumpthreading
+
+/// This pass performs 'jump threading', which looks at blocks that have
+/// multiple predecessors and multiple successors.  If one or more of the
+/// predecessors of the block can be proven to always jump to one of the
+/// successors, we forward the edge from the predecessor to the successor by
+/// duplicating the contents of this block.
+///
+/// An example of when this can occur is code like this:
+///
+///   if () { ...
+///     X = 4;
+///   }
+///   if (X < 3) {
+///
+/// In this case, the unconditional branch at the end of the first if can be
+/// revectored to the false side of the second if.
+class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
+  TargetLibraryInfo *TLI;
+  LazyValueInfo *LVI;
+  AliasAnalysis *AA;
+  DeferredDominance *DDT;
+  std::unique_ptr<BlockFrequencyInfo> BFI;
+  std::unique_ptr<BranchProbabilityInfo> BPI;
+  bool HasProfileData = false;
+  bool HasGuards = false;
+#ifdef NDEBUG
+  SmallPtrSet<const BasicBlock *, 16> LoopHeaders;
+#else
+  SmallSet<AssertingVH<const BasicBlock>, 16> LoopHeaders;
+#endif
+  DenseSet<std::pair<Value *, BasicBlock *>> RecursionSet;
+
+  unsigned BBDupThreshold;
+
+  // RAII helper for updating the recursion stack.
+  struct RecursionSetRemover {
+    DenseSet<std::pair<Value *, BasicBlock *>> &TheSet;
+    std::pair<Value *, BasicBlock *> ThePair;
+
+    RecursionSetRemover(DenseSet<std::pair<Value *, BasicBlock *>> &S,
+                        std::pair<Value *, BasicBlock *> P)
+        : TheSet(S), ThePair(P) {}
+
+    ~RecursionSetRemover() { TheSet.erase(ThePair); }
+  };
+
+public:
+  JumpThreadingPass(int T = -1);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, TargetLibraryInfo *TLI_, LazyValueInfo *LVI_,
+               AliasAnalysis *AA_, DeferredDominance *DDT_,
+               bool HasProfileData_, std::unique_ptr<BlockFrequencyInfo> BFI_,
+               std::unique_ptr<BranchProbabilityInfo> BPI_);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  void releaseMemory() {
+    BFI.reset();
+    BPI.reset();
+  }
+
+  void FindLoopHeaders(Function &F);
+  bool ProcessBlock(BasicBlock *BB);
+  bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
+                  BasicBlock *SuccBB);
+  bool DuplicateCondBranchOnPHIIntoPred(
+      BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs);
+
+  bool
+  ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,
+                                  jumpthreading::PredValueInfo &Result,
+                                  jumpthreading::ConstantPreference Preference,
+                                  Instruction *CxtI = nullptr);
+  bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
+                              jumpthreading::ConstantPreference Preference,
+                              Instruction *CxtI = nullptr);
+
+  bool ProcessBranchOnPHI(PHINode *PN);
+  bool ProcessBranchOnXOR(BinaryOperator *BO);
+  bool ProcessImpliedCondition(BasicBlock *BB);
+
+  bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
+  bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
+  bool TryToUnfoldSelectInCurrBB(BasicBlock *BB);
+
+  bool ProcessGuards(BasicBlock *BB);
+  bool ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard, BranchInst *BI);
+
+private:
+  BasicBlock *SplitBlockPreds(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+                              const char *Suffix);
+  void UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, BasicBlock *BB,
+                                    BasicBlock *NewBB, BasicBlock *SuccBB);
+  /// Check if the block has profile metadata for its outgoing edges.
+  bool doesBlockHaveProfileData(BasicBlock *BB);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
new file mode 100644
index 0000000..68ad190
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
@@ -0,0 +1,50 @@
+//===- LICM.h - Loop Invariant Code Motion Pass -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs loop invariant code motion, attempting to remove as much
+// code from the body of a loop as possible.  It does this by either hoisting
+// code into the preheader block, or by sinking code to the exit blocks if it is
+// safe.  This pass also promotes must-aliased memory locations in the loop to
+// live in registers, thus hoisting and sinking "invariant" loads and stores.
+//
+// This pass uses alias analysis for two purposes:
+//
+//  1. Moving loop invariant loads and calls out of loops.  If we can determine
+//     that a load or call inside of a loop never aliases anything stored to,
+//     we can hoist it or sink it like any other instruction.
+//  2. Scalar Promotion of Memory - If there is a store instruction inside of
+//     the loop, we try to move the store to happen AFTER the loop instead of
+//     inside of the loop.  This can only happen if a few conditions are true:
+//       A. The pointer stored through is loop invariant
+//       B. There are no stores or loads in the loop which _may_ alias the
+//          pointer.  There are no calls in the loop which mod/ref the pointer.
+//     If these conditions are true, we can promote the loads and stores in the
+//     loop of the pointer to use a temporary alloca'd variable.  We then use
+//     the SSAUpdater to construct the appropriate SSA form for the value.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LICM_H
+#define LLVM_TRANSFORMS_SCALAR_LICM_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Performs Loop Invariant Code Motion Pass.
+class LICMPass : public PassInfoMixin<LICMPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LICM_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h
new file mode 100644
index 0000000..5eddd5f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h
@@ -0,0 +1,31 @@
+//===- llvm/Analysis/LoopAccessAnalysisPrinter.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// \brief Printer pass for the \c LoopAccessInfo results.
+class LoopAccessInfoPrinterPass
+    : public PassInfoMixin<LoopAccessInfoPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDataPrefetch.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDataPrefetch.h
new file mode 100644
index 0000000..12c7a03
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDataPrefetch.h
@@ -0,0 +1,33 @@
+//===-------- LoopDataPrefetch.h - Loop Data Prefetching Pass ---*- C++ -*-===//
+//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Loop Data Prefetching Pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// An optimization pass inserting data prefetches in loops.
+class LoopDataPrefetchPass : public PassInfoMixin<LoopDataPrefetchPass> {
+public:
+  LoopDataPrefetchPass() = default;
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDeletion.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDeletion.h
new file mode 100644
index 0000000..7b8cb1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDeletion.h
@@ -0,0 +1,35 @@
+//===- LoopDeletion.h - Loop Deletion ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Deletion Pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class LoopDeletionPass : public PassInfoMixin<LoopDeletionPass> {
+public:
+  LoopDeletionPass() = default;
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDistribute.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDistribute.h
new file mode 100644
index 0000000..2bf1c9d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDistribute.h
@@ -0,0 +1,33 @@
+//===- LoopDistribute.cpp - Loop Distribution Pass --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Loop Distribution Pass.  Its main focus is to
+// distribute loops that cannot be vectorized due to dependence cycles.  It
+// tries to isolate the offending dependences into a new loop allowing
+// vectorization of the remaining parts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class LoopDistributePass : public PassInfoMixin<LoopDistributePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
new file mode 100644
index 0000000..7added8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
@@ -0,0 +1,36 @@
+//===- LoopIdiomRecognize.h - Loop Idiom Recognize Pass ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements an idiom recognizer that transforms simple loops into a
+// non-loop form.  In cases that this kicks in, it can be a significant
+// performance win.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Loop;
+class LPMUpdater;
+
+/// Performs Loop Idiom Recognize Pass.
+class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopLoadElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopLoadElimination.h
new file mode 100644
index 0000000..b0514a4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopLoadElimination.h
@@ -0,0 +1,34 @@
+//===- LoopLoadElimination.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This header defines the LoopLoadEliminationPass object. This pass forwards
+/// loaded values around loop backedges to allow their use in subsequent
+/// iterations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// Pass to forward loads in a loop around the backedge to subsequent
+/// iterations.
+struct LoopLoadEliminationPass : public PassInfoMixin<LoopLoadEliminationPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
new file mode 100644
index 0000000..56a45ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -0,0 +1,406 @@
+//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides classes for managing a pipeline of passes over loops
+/// in LLVM IR.
+///
+/// The primary loop pass pipeline is managed in a very particular way to
+/// provide a set of core guarantees:
+/// 1) Loops are, where possible, in simplified form.
+/// 2) Loops are *always* in LCSSA form.
+/// 3) A collection of Loop-specific analysis results are available:
+///    - LoopInfo
+///    - DominatorTree
+///    - ScalarEvolution
+///    - AAManager
+/// 4) All loop passes preserve #1 (where possible), #2, and #3.
+/// 5) Loop passes run over each loop in the loop nest from the innermost to
+///    the outermost. Specifically, all inner loops are processed before
+///    passes run over outer loops. When running the pipeline across an inner
+///    loop creates new inner loops, those are added and processed in this
+///    order as well.
+///
+/// This process is designed to facilitate transformations which simplify,
+/// reduce, and remove loops. For passes which are more oriented towards
+/// optimizing loops, especially optimizing loop *nests* instead of single
+/// loops in isolation, this framework is less interesting.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/PriorityWorklist.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/LCSSA.h"
+#include "llvm/Transforms/Utils/LoopSimplify.h"
+
+namespace llvm {
+
+// Forward declarations of an update tracking API used in the pass manager.
+class LPMUpdater;
+
+// Explicit specialization and instantiation declarations for the pass manager.
+// See the comments on the definition of the specialization for details on how
+// it differs from the primary template.
+template <>
+PreservedAnalyses
+PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+            LPMUpdater &>::run(Loop &InitialL, LoopAnalysisManager &AM,
+                               LoopStandardAnalysisResults &AnalysisResults,
+                               LPMUpdater &U);
+extern template class PassManager<Loop, LoopAnalysisManager,
+                                  LoopStandardAnalysisResults &, LPMUpdater &>;
+
+/// \brief The Loop pass manager.
+///
+/// See the documentation for the PassManager template for details. It runs
+/// a sequence of Loop passes over each Loop that the manager is run over. This
+/// typedef serves as a convenient way to refer to this construct.
+typedef PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+                    LPMUpdater &>
+    LoopPassManager;
+
+/// A partial specialization of the require analysis template pass to forward
+/// the extra parameters from a transformation's run method to the
+/// AnalysisManager's getResult.
+template <typename AnalysisT>
+struct RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+                           LoopStandardAnalysisResults &, LPMUpdater &>
+    : PassInfoMixin<
+          RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+                              LoopStandardAnalysisResults &, LPMUpdater &>> {
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &) {
+    (void)AM.template getResult<AnalysisT>(L, AR);
+    return PreservedAnalyses::all();
+  }
+};
+
+/// An alias template to easily name a require analysis loop pass.
+template <typename AnalysisT>
+using RequireAnalysisLoopPass =
+    RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+                        LoopStandardAnalysisResults &, LPMUpdater &>;
+
+namespace internal {
+/// Helper to implement appending of loops onto a worklist.
+///
+/// We want to process loops in postorder, but the worklist is a LIFO data
+/// structure, so we append to it in *reverse* postorder.
+///
+/// For trees, a preorder traversal is a viable reverse postorder, so we
+/// actually append using a preorder walk algorithm.
+template <typename RangeT>
+inline void appendLoopsToWorklist(RangeT &&Loops,
+                                  SmallPriorityWorklist<Loop *, 4> &Worklist) {
+  // We use an internal worklist to build up the preorder traversal without
+  // recursion.
+  SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
+
+  // We walk the initial sequence of loops in reverse because we generally want
+  // to visit defs before uses and the worklist is LIFO.
+  for (Loop *RootL : reverse(Loops)) {
+    assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
+    assert(PreOrderWorklist.empty() &&
+           "Must start with an empty preorder walk worklist.");
+    PreOrderWorklist.push_back(RootL);
+    do {
+      Loop *L = PreOrderWorklist.pop_back_val();
+      PreOrderWorklist.append(L->begin(), L->end());
+      PreOrderLoops.push_back(L);
+    } while (!PreOrderWorklist.empty());
+
+    Worklist.insert(std::move(PreOrderLoops));
+    PreOrderLoops.clear();
+  }
+}
+}
+
+template <typename LoopPassT> class FunctionToLoopPassAdaptor;
+
+/// This class provides an interface for updating the loop pass manager based
+/// on mutations to the loop nest.
+///
+/// A reference to an instance of this class is passed as an argument to each
+/// Loop pass, and Loop passes should use it to update LPM infrastructure if
+/// they modify the loop nest structure.
+class LPMUpdater {
+public:
+  /// This can be queried by loop passes which run other loop passes (like pass
+  /// managers) to know whether the loop needs to be skipped due to updates to
+  /// the loop nest.
+  ///
+  /// If this returns true, the loop object may have been deleted, so passes
+  /// should take care not to touch the object.
+  bool skipCurrentLoop() const { return SkipCurrentLoop; }
+
+  /// Loop passes should use this method to indicate they have deleted a loop
+  /// from the nest.
+  ///
+  /// Note that this loop must either be the current loop or a subloop of the
+  /// current loop. This routine must be called prior to removing the loop from
+  /// the loop nest.
+  ///
+  /// If this is called for the current loop, in addition to clearing any
+  /// state, this routine will mark that the current loop should be skipped by
+  /// the rest of the pass management infrastructure.
+  void markLoopAsDeleted(Loop &L, llvm::StringRef Name) {
+    LAM.clear(L, Name);
+    assert((&L == CurrentL || CurrentL->contains(&L)) &&
+           "Cannot delete a loop outside of the "
+           "subloop tree currently being processed.");
+    if (&L == CurrentL)
+      SkipCurrentLoop = true;
+  }
+
+  /// Loop passes should use this method to indicate they have added new child
+  /// loops of the current loop.
+  ///
+  /// \p NewChildLoops must contain only the immediate children. Any nested
+  /// loops within them will be visited in postorder as usual for the loop pass
+  /// manager.
+  void addChildLoops(ArrayRef<Loop *> NewChildLoops) {
+    // Insert ourselves back into the worklist first, as this loop should be
+    // revisited after all the children have been processed.
+    Worklist.insert(CurrentL);
+
+#ifndef NDEBUG
+    for (Loop *NewL : NewChildLoops)
+      assert(NewL->getParentLoop() == CurrentL && "All of the new loops must "
+                                                  "be immediate children of "
+                                                  "the current loop!");
+#endif
+
+    internal::appendLoopsToWorklist(NewChildLoops, Worklist);
+
+    // Also skip further processing of the current loop--it will be revisited
+    // after all of its newly added children are accounted for.
+    SkipCurrentLoop = true;
+  }
+
+  /// Loop passes should use this method to indicate they have added new
+  /// sibling loops to the current loop.
+  ///
+  /// \p NewSibLoops must only contain the immediate sibling loops. Any nested
+  /// loops within them will be visited in postorder as usual for the loop pass
+  /// manager.
+  void addSiblingLoops(ArrayRef<Loop *> NewSibLoops) {
+#ifndef NDEBUG
+    for (Loop *NewL : NewSibLoops)
+      assert(NewL->getParentLoop() == ParentL &&
+             "All of the new loops must be siblings of the current loop!");
+#endif
+
+    internal::appendLoopsToWorklist(NewSibLoops, Worklist);
+
+    // No need to skip the current loop or revisit it, as sibling loops
+    // shouldn't impact anything.
+  }
+
+  /// Restart the current loop.
+  ///
+  /// Loop passes should call this method to indicate the current loop has been
+  /// sufficiently changed that it should be re-visited from the begining of
+  /// the loop pass pipeline rather than continuing.
+  void revisitCurrentLoop() {
+    // Tell the currently in-flight pipeline to stop running.
+    SkipCurrentLoop = true;
+
+    // And insert ourselves back into the worklist.
+    Worklist.insert(CurrentL);
+  }
+
+private:
+  template <typename LoopPassT> friend class llvm::FunctionToLoopPassAdaptor;
+
+  /// The \c FunctionToLoopPassAdaptor's worklist of loops to process.
+  SmallPriorityWorklist<Loop *, 4> &Worklist;
+
+  /// The analysis manager for use in the current loop nest.
+  LoopAnalysisManager &LAM;
+
+  Loop *CurrentL;
+  bool SkipCurrentLoop;
+
+#ifndef NDEBUG
+  // In debug builds we also track the parent loop to implement asserts even in
+  // the face of loop deletion.
+  Loop *ParentL;
+#endif
+
+  LPMUpdater(SmallPriorityWorklist<Loop *, 4> &Worklist,
+             LoopAnalysisManager &LAM)
+      : Worklist(Worklist), LAM(LAM) {}
+};
+
+/// \brief Adaptor that maps from a function to its loops.
+///
+/// Designed to allow composition of a LoopPass(Manager) and a
+/// FunctionPassManager. Note that if this pass is constructed with a \c
+/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy
+/// analysis prior to running the loop passes over the function to enable a \c
+/// LoopAnalysisManager to be used within this run safely.
+template <typename LoopPassT>
+class FunctionToLoopPassAdaptor
+    : public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
+public:
+  explicit FunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false)
+      : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging) {
+    LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
+    LoopCanonicalizationFPM.addPass(LCSSAPass());
+  }
+
+  /// \brief Runs the loop passes across every loop in the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
+    // Before we even compute any loop analyses, first run a miniature function
+    // pass pipeline to put loops into their canonical form. Note that we can
+    // directly build up function analyses after this as the function pass
+    // manager handles all the invalidation at that layer.
+    PreservedAnalyses PA = LoopCanonicalizationFPM.run(F, AM);
+
+    // Get the loop structure for this function
+    LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
+
+    // If there are no loops, there is nothing to do here.
+    if (LI.empty())
+      return PA;
+
+    // Get the analysis results needed by loop passes.
+    MemorySSA *MSSA = EnableMSSALoopDependency
+                          ? (&AM.getResult<MemorySSAAnalysis>(F).getMSSA())
+                          : nullptr;
+    LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
+                                       AM.getResult<AssumptionAnalysis>(F),
+                                       AM.getResult<DominatorTreeAnalysis>(F),
+                                       AM.getResult<LoopAnalysis>(F),
+                                       AM.getResult<ScalarEvolutionAnalysis>(F),
+                                       AM.getResult<TargetLibraryAnalysis>(F),
+                                       AM.getResult<TargetIRAnalysis>(F),
+                                       MSSA};
+
+    // Setup the loop analysis manager from its proxy. It is important that
+    // this is only done when there are loops to process and we have built the
+    // LoopStandardAnalysisResults object. The loop analyses cached in this
+    // manager have access to those analysis results and so it must invalidate
+    // itself when they go away.
+    LoopAnalysisManager &LAM =
+        AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
+
+    // A postorder worklist of loops to process.
+    SmallPriorityWorklist<Loop *, 4> Worklist;
+
+    // Register the worklist and loop analysis manager so that loop passes can
+    // update them when they mutate the loop nest structure.
+    LPMUpdater Updater(Worklist, LAM);
+
+    // Add the loop nests in the reverse order of LoopInfo. For some reason,
+    // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For
+    // the purpose of unrolling, loop deletion, and LICM, we largely want to
+    // work forward across the CFG so that we visit defs before uses and can
+    // propagate simplifications from one loop nest into the next.
+    // FIXME: Consider changing the order in LoopInfo.
+    internal::appendLoopsToWorklist(reverse(LI), Worklist);
+
+    do {
+      Loop *L = Worklist.pop_back_val();
+
+      // Reset the update structure for this loop.
+      Updater.CurrentL = L;
+      Updater.SkipCurrentLoop = false;
+
+#ifndef NDEBUG
+      // Save a parent loop pointer for asserts.
+      Updater.ParentL = L->getParentLoop();
+
+      // Verify the loop structure and LCSSA form before visiting the loop.
+      L->verifyLoop();
+      assert(L->isRecursivelyLCSSAForm(LAR.DT, LI) &&
+             "Loops must remain in LCSSA form!");
+#endif
+
+      PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater);
+      // FIXME: We should verify the set of analyses relevant to Loop passes
+      // are preserved.
+
+      // If the loop hasn't been deleted, we need to handle invalidation here.
+      if (!Updater.skipCurrentLoop())
+        // We know that the loop pass couldn't have invalidated any other
+        // loop's analyses (that's the contract of a loop pass), so directly
+        // handle the loop analysis manager's invalidation here.
+        LAM.invalidate(*L, PassPA);
+
+      // Then intersect the preserved set so that invalidation of module
+      // analyses will eventually occur when the module pass completes.
+      PA.intersect(std::move(PassPA));
+    } while (!Worklist.empty());
+
+    // By definition we preserve the proxy. We also preserve all analyses on
+    // Loops. This precludes *any* invalidation of loop analyses by the proxy,
+    // but that's OK because we've taken care to invalidate analyses in the
+    // loop analysis manager incrementally above.
+    PA.preserveSet<AllAnalysesOn<Loop>>();
+    PA.preserve<LoopAnalysisManagerFunctionProxy>();
+    // We also preserve the set of standard analyses.
+    PA.preserve<DominatorTreeAnalysis>();
+    PA.preserve<LoopAnalysis>();
+    PA.preserve<ScalarEvolutionAnalysis>();
+    // FIXME: Uncomment this when all loop passes preserve MemorySSA
+    // PA.preserve<MemorySSAAnalysis>();
+    // FIXME: What we really want to do here is preserve an AA category, but
+    // that concept doesn't exist yet.
+    PA.preserve<AAManager>();
+    PA.preserve<BasicAA>();
+    PA.preserve<GlobalsAA>();
+    PA.preserve<SCEVAA>();
+    return PA;
+  }
+
+private:
+  LoopPassT Pass;
+
+  FunctionPassManager LoopCanonicalizationFPM;
+};
+
+/// \brief A function to deduce a loop pass type and wrap it in the templated
+/// adaptor.
+template <typename LoopPassT>
+FunctionToLoopPassAdaptor<LoopPassT>
+createFunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false) {
+  return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass), DebugLogging);
+}
+
+/// \brief Pass for printing a loop's contents as textual IR.
+class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
+  raw_ostream &OS;
+  std::string Banner;
+
+public:
+  PrintLoopPass();
+  PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
+                        LoopStandardAnalysisResults &, LPMUpdater &);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPredication.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPredication.h
new file mode 100644
index 0000000..57398bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPredication.h
@@ -0,0 +1,32 @@
+//===- LoopPredication.h - Guard based loop predication pass ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to convert loop variant range checks to loop invariant by
+// widening checks across loop iterations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Performs Loop Predication Pass.
+class LoopPredicationPass : public PassInfoMixin<LoopPredicationPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopRotation.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopRotation.h
new file mode 100644
index 0000000..ea8d561
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopRotation.h
@@ -0,0 +1,35 @@
+//===- LoopRotation.h - Loop Rotation -------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Rotation pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// A simple loop rotation transformation.
+class LoopRotatePass : public PassInfoMixin<LoopRotatePass> {
+public:
+  LoopRotatePass(bool EnableHeaderDuplication = true);
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+
+private:
+  const bool EnableHeaderDuplication;
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
new file mode 100644
index 0000000..7628c74
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
@@ -0,0 +1,34 @@
+//===- LoopSimplifyCFG.cpp - Loop CFG Simplification Pass -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Loop SimplifyCFG Pass. This pass is responsible for
+// basic loop CFG cleanup, primarily to assist other loop passes. If you
+// encounter a noncanonical CFG construct that causes another loop pass to
+// perform suboptimally, this is the place to fix it up.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Performs basic CFG simplifications to assist other loop passes.
+class LoopSimplifyCFGPass : public PassInfoMixin<LoopSimplifyCFGPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSink.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSink.h
new file mode 100644
index 0000000..371a7c8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSink.h
@@ -0,0 +1,40 @@
+//===- LoopSink.h - Loop Sink Pass ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Sink pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// A pass that does profile-guided sinking of instructions into loops.
+///
+/// This is a function pass as it shouldn't be composed into any kind of
+/// unified loop pass pipeline. The goal of it is to sink code into loops that
+/// is loop invariant but only required within the loop body when doing so
+/// reduces the global expected dynamic frequency with which it executes.
+/// A classic example is an extremely cold branch within a loop body.
+///
+/// We do this as a separate pass so that during normal optimization all
+/// invariant operations can be held outside the loop body to simplify
+/// fundamental analyses and transforms of the loop.
+class LoopSinkPass : public PassInfoMixin<LoopSinkPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopStrengthReduce.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
new file mode 100644
index 0000000..62c038a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
@@ -0,0 +1,42 @@
+//===- LoopStrengthReduce.h - Loop Strength Reduce Pass ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation analyzes and transforms the induction variables (and
+// computations derived from them) into forms suitable for efficient execution
+// on the target.
+//
+// This pass performs a strength reduction on array references inside loops that
+// have as one or more of their components the loop induction variable, it
+// rewrites expressions to take advantage of scaled-index addressing modes
+// available on the target, and it performs a variety of other optimizations
+// related to loop induction variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Loop;
+class LPMUpdater;
+
+/// Performs Loop Strength Reduce Pass.
+class LoopStrengthReducePass : public PassInfoMixin<LoopStrengthReducePass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
new file mode 100644
index 0000000..9848e0d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -0,0 +1,49 @@
+//===- LoopUnrollPass.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+class Loop;
+class LPMUpdater;
+
+/// Loop unroll pass that only does full loop unrolling.
+class LoopFullUnrollPass : public PassInfoMixin<LoopFullUnrollPass> {
+  const int OptLevel;
+
+public:
+  explicit LoopFullUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+/// Loop unroll pass that will support both full and partial unrolling.
+/// It is a function pass to have access to function and module analyses.
+/// It will also put loops into canonical form (simplified and LCSSA).
+class LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
+  const int OptLevel;
+
+public:
+  /// This uses the target information (or flags) to control the thresholds for
+  /// different unrolling stategies but supports all of them.
+  explicit LoopUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
new file mode 100644
index 0000000..a4a2e7a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
@@ -0,0 +1,29 @@
+//===- LowerAtomic.cpp - Lower atomic intrinsics ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+// This pass lowers atomic intrinsics to non-atomic form for use in a known
+// non-preemptible environment.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass that lowers atomic intrinsic into non-atomic intrinsics.
+class LowerAtomicPass : public PassInfoMixin<LowerAtomicPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
new file mode 100644
index 0000000..ab9dec0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
@@ -0,0 +1,37 @@
+//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerExpectIntrinsic pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerExpectIntrinsicPass : PassInfoMixin<LowerExpectIntrinsicPass> {
+  /// \brief Run the pass over the function.
+  ///
+  /// This will lower all of the expect intrinsic calls in this function into
+  /// branch weight metadata. That metadata will subsequently feed the analysis
+  /// of the probabilities and frequencies of the CFG. After running this pass,
+  /// no more expect intrinsics remain, allowing the rest of the optimizer to
+  /// ignore them.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h
new file mode 100644
index 0000000..a9f19f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h
@@ -0,0 +1,28 @@
+//===--- LowerGuardIntrinsic.h - Lower the guard intrinsic ---------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the llvm.experimental.guard intrinsic to a conditional call
+// to @llvm.experimental.deoptimize.  Once this happens, the guard can no longer
+// be widened.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerGuardIntrinsicPass : PassInfoMixin<LowerGuardIntrinsicPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif //LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h b/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
new file mode 100644
index 0000000..046c808
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
@@ -0,0 +1,79 @@
+//===- MemCpyOptimizer.h - memcpy optimization ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs various transformations related to eliminating memcpy
+// calls, or transforming sets of stores into memset's.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
+#define LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class AssumptionCache;
+class CallInst;
+class DominatorTree;
+class Function;
+class Instruction;
+class MemCpyInst;
+class MemMoveInst;
+class MemoryDependenceResults;
+class MemSetInst;
+class StoreInst;
+class TargetLibraryInfo;
+class Value;
+
+class MemCpyOptPass : public PassInfoMixin<MemCpyOptPass> {
+  MemoryDependenceResults *MD = nullptr;
+  TargetLibraryInfo *TLI = nullptr;
+  std::function<AliasAnalysis &()> LookupAliasAnalysis;
+  std::function<AssumptionCache &()> LookupAssumptionCache;
+  std::function<DominatorTree &()> LookupDomTree;
+
+public:
+  MemCpyOptPass() = default;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for the old PM.
+  bool runImpl(Function &F, MemoryDependenceResults *MD_,
+               TargetLibraryInfo *TLI_,
+               std::function<AliasAnalysis &()> LookupAliasAnalysis_,
+               std::function<AssumptionCache &()> LookupAssumptionCache_,
+               std::function<DominatorTree &()> LookupDomTree_);
+
+private:
+  // Helper functions
+  bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
+  bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
+  bool processMemCpy(MemCpyInst *M);
+  bool processMemMove(MemMoveInst *M);
+  bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
+                            uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
+  bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
+  bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
+  bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
+  bool processByValArgument(CallSite CS, unsigned ArgNo);
+  Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
+                                    Value *ByteVal);
+
+  bool iterateOnFunction(Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h b/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
new file mode 100644
index 0000000..3cad7bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
@@ -0,0 +1,39 @@
+//===- MergedLoadStoreMotion.h - merge and hoist/sink load/stores ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//! \file
+//! \brief This pass performs merges of loads and stores on both sides of a
+//  diamond (hammock). It hoists the loads and sinks the stores.
+//
+// The algorithm iteratively hoists two loads to the same address out of a
+// diamond (hammock) and merges them into a single load in the header. Similar
+// it sinks and merges two stores to the tail block (footer). The algorithm
+// iterates over the instructions of one side of the diamond and attempts to
+// find a matching load/store on the other side. It hoists / sinks when it
+// thinks it safe to do so.  This optimization helps with eg. hiding load
+// latencies, triggering if-conversion, and reducing static code size.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
+#define LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class MergedLoadStoreMotionPass
+    : public PassInfoMixin<MergedLoadStoreMotionPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h b/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
new file mode 100644
index 0000000..e835bd5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -0,0 +1,189 @@
+//===- NaryReassociate.h - Reassociate n-ary expressions --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reassociates n-ary add expressions and eliminates the redundancy
+// exposed by the reassociation.
+//
+// A motivating example:
+//
+//   void foo(int a, int b) {
+//     bar(a + b);
+//     bar((a + 2) + b);
+//   }
+//
+// An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
+// the above code to
+//
+//   int t = a + b;
+//   bar(t);
+//   bar(t + 2);
+//
+// However, the Reassociate pass is unable to do that because it processes each
+// instruction individually and believes (a + 2) + b is the best form according
+// to its rank system.
+//
+// To address this limitation, NaryReassociate reassociates an expression in a
+// form that reuses existing instructions. As a result, NaryReassociate can
+// reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
+// (a + b) is computed before.
+//
+// NaryReassociate works as follows. For every instruction in the form of (a +
+// b) + c, it checks whether a + c or b + c is already computed by a dominating
+// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
+// c) + a and removes the redundancy accordingly. To efficiently look up whether
+// an expression is computed before, we store each instruction seen and its SCEV
+// into an SCEV-to-instruction map.
+//
+// Although the algorithm pattern-matches only ternary additions, it
+// automatically handles many >3-ary expressions by walking through the function
+// in the depth-first order. For example, given
+//
+//   (a + c) + d
+//   ((a + b) + c) + d
+//
+// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
+// ((a + c) + b) + d into ((a + c) + d) + b.
+//
+// Finally, the above dominator-based algorithm may need to be run multiple
+// iterations before emitting optimal code. One source of this need is that we
+// only split an operand when it is used only once. The above algorithm can
+// eliminate an instruction and decrease the usage count of its operands. As a
+// result, an instruction that previously had multiple uses may become a
+// single-use instruction and thus eligible for split consideration. For
+// example,
+//
+//   ac = a + c
+//   ab = a + b
+//   abc = ab + c
+//   ab2 = ab + b
+//   ab2c = ab2 + c
+//
+// In the first iteration, we cannot reassociate abc to ac+b because ab is used
+// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
+// result, ab2 becomes dead and ab will be used only once in the second
+// iteration.
+//
+// Limitations and TODO items:
+//
+// 1) We only considers n-ary adds and muls for now. This should be extended
+// and generalized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
+#define LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BinaryOperator;
+class DataLayout;
+class DominatorTree;
+class Function;
+class GetElementPtrInst;
+class Instruction;
+class ScalarEvolution;
+class SCEV;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+class Type;
+class Value;
+
+class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, AssumptionCache *AC_, DominatorTree *DT_,
+               ScalarEvolution *SE_, TargetLibraryInfo *TLI_,
+               TargetTransformInfo *TTI_);
+
+private:
+  // Runs only one iteration of the dominator-based algorithm. See the header
+  // comments for why we need multiple iterations.
+  bool doOneIteration(Function &F);
+
+  // Reassociates I for better CSE.
+  Instruction *tryReassociate(Instruction *I);
+
+  // Reassociate GEP for better CSE.
+  Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
+
+  // Try splitting GEP at the I-th index and see whether either part can be
+  // CSE'ed. This is a helper function for tryReassociateGEP.
+  //
+  // \p IndexedType The element type indexed by GEP's I-th index. This is
+  //                equivalent to
+  //                  GEP->getIndexedType(GEP->getPointerOperand(), 0-th index,
+  //                                      ..., i-th index).
+  GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+                                              unsigned I, Type *IndexedType);
+
+  // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or
+  // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly.
+  GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+                                              unsigned I, Value *LHS,
+                                              Value *RHS, Type *IndexedType);
+
+  // Reassociate binary operators for better CSE.
+  Instruction *tryReassociateBinaryOp(BinaryOperator *I);
+
+  // A helper function for tryReassociateBinaryOp. LHS and RHS are explicitly
+  // passed.
+  Instruction *tryReassociateBinaryOp(Value *LHS, Value *RHS,
+                                      BinaryOperator *I);
+  // Rewrites I to (LHS op RHS) if LHS is computed already.
+  Instruction *tryReassociatedBinaryOp(const SCEV *LHS, Value *RHS,
+                                       BinaryOperator *I);
+
+  // Tries to match Op1 and Op2 by using V.
+  bool matchTernaryOp(BinaryOperator *I, Value *V, Value *&Op1, Value *&Op2);
+
+  // Gets SCEV for (LHS op RHS).
+  const SCEV *getBinarySCEV(BinaryOperator *I, const SCEV *LHS,
+                            const SCEV *RHS);
+
+  // Returns the closest dominator of \c Dominatee that computes
+  // \c CandidateExpr. Returns null if not found.
+  Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
+                                            Instruction *Dominatee);
+
+  // GetElementPtrInst implicitly sign-extends an index if the index is shorter
+  // than the pointer size. This function returns whether Index is shorter than
+  // GEP's pointer size, i.e., whether Index needs to be sign-extended in order
+  // to be an index of GEP.
+  bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);
+
+  AssumptionCache *AC;
+  const DataLayout *DL;
+  DominatorTree *DT;
+  ScalarEvolution *SE;
+  TargetLibraryInfo *TLI;
+  TargetTransformInfo *TTI;
+
+  // A lookup table quickly telling which instructions compute the given SCEV.
+  // Note that there can be multiple instructions at different locations
+  // computing to the same SCEV, so we map a SCEV to an instruction list.  For
+  // example,
+  //
+  //   if (p1)
+  //     foo(a + b);
+  //   if (p2)
+  //     bar(a + b);
+  DenseMap<const SCEV *, SmallVector<WeakTrackingVH, 2>> SeenExprs;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/NewGVN.h b/linux-x64/clang/include/llvm/Transforms/Scalar/NewGVN.h
new file mode 100644
index 0000000..05db255
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/NewGVN.h
@@ -0,0 +1,33 @@
+//===- NewGVN.h - Global Value Numbering Pass -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for LLVM's Global Value Numbering pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+#define LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class NewGVNPass : public PassInfoMixin<NewGVNPass> {
+public:
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
new file mode 100644
index 0000000..7f73831
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
@@ -0,0 +1,30 @@
+//===--- PartiallyInlineLibCalls.h - Partially inline libcalls --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to partially inline the fast path of well-known library
+// functions, such as using square-root instructions for cases where sqrt()
+// does not need to set errno.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
+#define LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class PartiallyInlineLibCallsPass
+    : public PassInfoMixin<PartiallyInlineLibCallsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
new file mode 100644
index 0000000..9997dfa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
@@ -0,0 +1,120 @@
+//===- Reassociate.h - Reassociate binary expressions -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reassociates commutative expressions in an order that is designed
+// to promote better constant propagation, GCSE, LICM, PRE, etc.
+//
+// For example: 4 + (x + 5) -> x + (4 + 5)
+//
+// In the implementation of this algorithm, constants are assigned rank = 0,
+// function arguments are rank = 1, and other values are assigned ranks
+// corresponding to the reverse post order traversal of current function
+// (starting at 2), which effectively gives values in deep loops higher rank
+// than values not in loops.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
+#define LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class APInt;
+class BasicBlock;
+class BinaryOperator;
+class Function;
+class Instruction;
+class Value;
+
+/// A private "module" namespace for types and utilities used by Reassociate.
+/// These are implementation details and should not be used by clients.
+namespace reassociate {
+
+struct ValueEntry {
+  unsigned Rank;
+  Value *Op;
+
+  ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {}
+};
+
+inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) {
+  return LHS.Rank > RHS.Rank; // Sort so that highest rank goes to start.
+}
+
+/// \brief Utility class representing a base and exponent pair which form one
+/// factor of some product.
+struct Factor {
+  Value *Base;
+  unsigned Power;
+
+  Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {}
+};
+
+class XorOpnd;
+
+} // end namespace reassociate
+
+/// Reassociate commutative expressions.
+class ReassociatePass : public PassInfoMixin<ReassociatePass> {
+  DenseMap<BasicBlock *, unsigned> RankMap;
+  DenseMap<AssertingVH<Value>, unsigned> ValueRankMap;
+  SetVector<AssertingVH<Instruction>> RedoInsts;
+
+  // Arbitrary, but prevents quadratic behavior.
+  static const unsigned GlobalReassociateLimit = 10;
+  static const unsigned NumBinaryOps =
+      Instruction::BinaryOpsEnd - Instruction::BinaryOpsBegin;
+  DenseMap<std::pair<Value *, Value *>, unsigned> PairMap[NumBinaryOps];
+
+  bool MadeChange;
+
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+
+private:
+  void BuildRankMap(Function &F, ReversePostOrderTraversal<Function *> &RPOT);
+  unsigned getRank(Value *V);
+  void canonicalizeOperands(Instruction *I);
+  void ReassociateExpression(BinaryOperator *I);
+  void RewriteExprTree(BinaryOperator *I,
+                       SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *OptimizeExpression(BinaryOperator *I,
+                            SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *OptimizeAdd(Instruction *I,
+                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *OptimizeXor(Instruction *I,
+                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
+                      APInt &ConstOpnd, Value *&Res);
+  bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
+                      reassociate::XorOpnd *Opnd2, APInt &ConstOpnd,
+                      Value *&Res);
+  Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder,
+                                 SmallVectorImpl<reassociate::Factor> &Factors);
+  Value *OptimizeMul(BinaryOperator *I,
+                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *RemoveFactorFromExpression(Value *V, Value *Factor);
+  void EraseInst(Instruction *I);
+  void RecursivelyEraseDeadInsts(Instruction *I,
+                                 SetVector<AssertingVH<Instruction>> &Insts);
+  void OptimizeInst(Instruction *I);
+  Instruction *canonicalizeNegConstExpr(Instruction *I);
+  void BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h b/linux-x64/clang/include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h
new file mode 100644
index 0000000..128f176
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h
@@ -0,0 +1,39 @@
+//===- RewriteStatepointsForGC.h - ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides interface to "Rewrite Statepoints for GC" pass.
+//
+// This passe rewrites call/invoke instructions so as to make potential
+// relocations performed by the garbage collector explicit in the IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_REWRITE_STATEPOINTS_FOR_GC_H
+#define LLVM_TRANSFORMS_SCALAR_REWRITE_STATEPOINTS_FOR_GC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class DominatorTree;
+class Function;
+class Module;
+class TargetTransformInfo;
+class TargetLibraryInfo;
+
+struct RewriteStatepointsForGC : public PassInfoMixin<RewriteStatepointsForGC> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+  bool runOnFunction(Function &F, DominatorTree &, TargetTransformInfo &,
+                     const TargetLibraryInfo &);
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_REWRITE_STATEPOINTS_FOR_GC_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
new file mode 100644
index 0000000..2a294c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
@@ -0,0 +1,43 @@
+//===- SCCP.cpp - Sparse Conditional Constant Propagation -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file implements sparse conditional constant propagation and merging:
+//
+// Specifically, this:
+//   * Assumes values are constant unless proven otherwise
+//   * Assumes BasicBlocks are dead unless proven otherwise
+//   * Proves values to be constant, and replaces them with constants
+//   * Proves conditional branches to be unconditional
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SCCP_H
+#define LLVM_TRANSFORMS_SCALAR_SCCP_H
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// This pass performs function-level constant propagation and merging.
+class SCCPPass : public PassInfoMixin<SCCPPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+bool runIPSCCP(Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI);
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SCCP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
new file mode 100644
index 0000000..4a321e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
@@ -0,0 +1,139 @@
+//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Scalar Replacement of
+/// Aggregates pass. This pass provides both aggregate splitting and the
+/// primary SSA formation used in the compiler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SROA_H
+#define LLVM_TRANSFORMS_SCALAR_SROA_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class AssumptionCache;
+class DominatorTree;
+class Function;
+class Instruction;
+class LLVMContext;
+class PHINode;
+class SelectInst;
+class Use;
+
+/// A private "module" namespace for types and utilities used by SROA. These
+/// are implementation details and should not be used by clients.
+namespace sroa LLVM_LIBRARY_VISIBILITY {
+
+class AllocaSliceRewriter;
+class AllocaSlices;
+class Partition;
+class SROALegacyPass;
+
+} // end namespace sroa
+
+/// \brief An optimization pass providing Scalar Replacement of Aggregates.
+///
+/// This pass takes allocations which can be completely analyzed (that is, they
+/// don't escape) and tries to turn them into scalar SSA values. There are
+/// a few steps to this process.
+///
+/// 1) It takes allocations of aggregates and analyzes the ways in which they
+///    are used to try to split them into smaller allocations, ideally of
+///    a single scalar data type. It will split up memcpy and memset accesses
+///    as necessary and try to isolate individual scalar accesses.
+/// 2) It will transform accesses into forms which are suitable for SSA value
+///    promotion. This can be replacing a memset with a scalar store of an
+///    integer value, or it can involve speculating operations on a PHI or
+///    select to be a PHI or select of the results.
+/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
+///    onto insert and extract operations on a vector value, and convert them to
+///    this form. By doing so, it will enable promotion of vector aggregates to
+///    SSA vector values.
+class SROA : public PassInfoMixin<SROA> {
+  LLVMContext *C = nullptr;
+  DominatorTree *DT = nullptr;
+  AssumptionCache *AC = nullptr;
+
+  /// \brief Worklist of alloca instructions to simplify.
+  ///
+  /// Each alloca in the function is added to this. Each new alloca formed gets
+  /// added to it as well to recursively simplify unless that alloca can be
+  /// directly promoted. Finally, each time we rewrite a use of an alloca other
+  /// the one being actively rewritten, we add it back onto the list if not
+  /// already present to ensure it is re-visited.
+  SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> Worklist;
+
+  /// \brief A collection of instructions to delete.
+  /// We try to batch deletions to simplify code and make things a bit more
+  /// efficient.
+  SetVector<Instruction *, SmallVector<Instruction *, 8>> DeadInsts;
+
+  /// \brief Post-promotion worklist.
+  ///
+  /// Sometimes we discover an alloca which has a high probability of becoming
+  /// viable for SROA after a round of promotion takes place. In those cases,
+  /// the alloca is enqueued here for re-processing.
+  ///
+  /// Note that we have to be very careful to clear allocas out of this list in
+  /// the event they are deleted.
+  SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> PostPromotionWorklist;
+
+  /// \brief A collection of alloca instructions we can directly promote.
+  std::vector<AllocaInst *> PromotableAllocas;
+
+  /// \brief A worklist of PHIs to speculate prior to promoting allocas.
+  ///
+  /// All of these PHIs have been checked for the safety of speculation and by
+  /// being speculated will allow promoting allocas currently in the promotable
+  /// queue.
+  SetVector<PHINode *, SmallVector<PHINode *, 2>> SpeculatablePHIs;
+
+  /// \brief A worklist of select instructions to speculate prior to promoting
+  /// allocas.
+  ///
+  /// All of these select instructions have been checked for the safety of
+  /// speculation and by being speculated will allow promoting allocas
+  /// currently in the promotable queue.
+  SetVector<SelectInst *, SmallVector<SelectInst *, 2>> SpeculatableSelects;
+
+public:
+  SROA() = default;
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+  friend class sroa::AllocaSliceRewriter;
+  friend class sroa::SROALegacyPass;
+
+  /// Helper used by both the public run method and by the legacy pass.
+  PreservedAnalyses runImpl(Function &F, DominatorTree &RunDT,
+                            AssumptionCache &RunAC);
+
+  bool presplitLoadsAndStores(AllocaInst &AI, sroa::AllocaSlices &AS);
+  AllocaInst *rewritePartition(AllocaInst &AI, sroa::AllocaSlices &AS,
+                               sroa::Partition &P);
+  bool splitAlloca(AllocaInst &AI, sroa::AllocaSlices &AS);
+  bool runOnAlloca(AllocaInst &AI);
+  void clobberUse(Use &U);
+  bool deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
+  bool promoteAllocas(Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SROA_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h
new file mode 100644
index 0000000..63bfe63
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h
@@ -0,0 +1,55 @@
+//===- SimpleLoopUnswitch.h - Hoist loop-invariant control flow -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// This pass transforms loops that contain branches on loop-invariant
+/// conditions to have multiple loops. For example, it turns the left into the
+/// right code:
+///
+///  for (...)                  if (lic)
+///    A                          for (...)
+///    if (lic)                     A; B; C
+///      B                      else
+///    C                          for (...)
+///                                 A; C
+///
+/// This can increase the size of the code exponentially (doubling it every time
+/// a loop is unswitched) so we only unswitch if the resultant code will be
+/// smaller than a threshold.
+///
+/// This pass expects LICM to be run before it to hoist invariant conditions out
+/// of the loop, to make the unswitching opportunity obvious.
+///
+class SimpleLoopUnswitchPass : public PassInfoMixin<SimpleLoopUnswitchPass> {
+  bool NonTrivial;
+
+public:
+  SimpleLoopUnswitchPass(bool NonTrivial = false) : NonTrivial(NonTrivial) {}
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+/// Create the legacy pass object for the simple loop unswitcher.
+///
+/// See the documentaion for `SimpleLoopUnswitchPass` for details.
+Pass *createSimpleLoopUnswitchLegacyPass(bool NonTrivial = false);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
new file mode 100644
index 0000000..6198957
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -0,0 +1,55 @@
+//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the pass responsible for both
+/// simplifying and canonicalizing the CFG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+
+#include "llvm/Analysis/Utils/Local.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass to simplify and canonicalize the CFG of a function.
+///
+/// This pass iteratively simplifies the entire CFG of a function. It may change
+/// or remove control flow to put the CFG into a canonical form expected by
+/// other passes of the mid-level optimizer. Depending on the specified options,
+/// it may further optimize control-flow to create non-canonical forms.
+class SimplifyCFGPass : public PassInfoMixin<SimplifyCFGPass> {
+  SimplifyCFGOptions Options;
+
+public:
+  /// The default constructor sets the pass options to create canonical IR,
+  /// rather than optimal IR. That is, by default we bypass transformations that
+  /// are likely to improve performance but make analysis for other passes more
+  /// difficult.
+  SimplifyCFGPass()
+      : SimplifyCFGPass(SimplifyCFGOptions()
+                            .forwardSwitchCondToPhi(false)
+                            .convertSwitchToLookupTable(false)
+                            .needCanonicalLoops(true)
+                            .sinkCommonInsts(false)) {}
+
+
+  /// Construct a pass with optional optimizations.
+  SimplifyCFGPass(const SimplifyCFGOptions &PassOptions);
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Sink.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Sink.h
new file mode 100644
index 0000000..f9b3cb0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Sink.h
@@ -0,0 +1,30 @@
+//===-- Sink.h - Code Sinking -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass moves instructions into successor blocks, when possible, so that
+// they aren't executed on paths where their results aren't needed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SINK_H
+#define LLVM_TRANSFORMS_SCALAR_SINK_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Move instructions into successor blocks when possible.
+class SinkingPass : public PassInfoMixin<SinkingPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_SINK_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculateAroundPHIs.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculateAroundPHIs.h
new file mode 100644
index 0000000..f39e03d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculateAroundPHIs.h
@@ -0,0 +1,111 @@
+//===- SpeculateAroundPHIs.h - Speculate around PHIs ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SPECULATEAROUNDPHIS_H
+#define LLVM_TRANSFORMS_SCALAR_SPECULATEAROUNDPHIS_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+/// This pass handles simple speculating of  instructions around PHIs when
+/// doing so is profitable for a particular target despite duplicated
+/// instructions.
+///
+/// The motivating example are PHIs of constants which will require
+/// materializing the constants along each edge. If the PHI is used by an
+/// instruction where the target can materialize the constant as part of the
+/// instruction, it is profitable to speculate those instructions around the
+/// PHI node. This can reduce dynamic instruction count as well as decrease
+/// register pressure.
+///
+/// Consider this IR for example:
+///   ```
+///   entry:
+///     br i1 %flag, label %a, label %b
+///
+///   a:
+///     br label %exit
+///
+///   b:
+///     br label %exit
+///
+///   exit:
+///     %p = phi i32 [ 7, %a ], [ 11, %b ]
+///     %sum = add i32 %arg, %p
+///     ret i32 %sum
+///   ```
+/// To materialize the inputs to this PHI node may require an explicit
+/// instruction. For example, on x86 this would turn into something like
+///   ```
+///     testq %eax, %eax
+///     movl $7, %rNN
+///     jne .L
+///     movl $11, %rNN
+///   .L:
+///     addl %edi, %rNN
+///     movl %rNN, %eax
+///     retq
+///   ```
+/// When these constants can be folded directly into another instruction, it
+/// would be preferable to avoid the potential for register pressure (above we
+/// can easily avoid it, but that isn't always true) and simply duplicate the
+/// instruction using the PHI:
+///   ```
+///   entry:
+///     br i1 %flag, label %a, label %b
+///
+///   a:
+///     %sum.1 = add i32 %arg, 7
+///     br label %exit
+///
+///   b:
+///     %sum.2 = add i32 %arg, 11
+///     br label %exit
+///
+///   exit:
+///     %p = phi i32 [ %sum.1, %a ], [ %sum.2, %b ]
+///     ret i32 %p
+///   ```
+/// Which will generate something like the following on x86:
+///   ```
+///     testq %eax, %eax
+///     addl $7, %edi
+///     jne .L
+///     addl $11, %edi
+///   .L:
+///     movl %edi, %eax
+///     retq
+///   ```
+///
+/// It is important to note that this pass is never intended to handle more
+/// complex cases where speculating around PHIs allows simplifications of the
+/// IR itself or other subsequent optimizations. Those can and should already
+/// be handled before this pass is ever run by a more powerful analysis that
+/// can reason about equivalences and common subexpressions. Classically, those
+/// cases would be handled by a GVN-powered PRE or similar transform. This
+/// pass, in contrast, is *only* interested in cases where despite no
+/// simplifications to the IR itself, speculation is *faster* to execute. The
+/// result of this is that the cost models which are appropriate to consider
+/// here are relatively simple ones around execution and codesize cost, without
+/// any need to consider simplifications or other transformations.
+struct SpeculateAroundPHIsPass : PassInfoMixin<SpeculateAroundPHIsPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SPECULATEAROUNDPHIS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculativeExecution.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculativeExecution.h
new file mode 100644
index 0000000..068f817
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculativeExecution.h
@@ -0,0 +1,92 @@
+//===- SpeculativeExecution.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass hoists instructions to enable speculative execution on
+// targets where branches are expensive. This is aimed at GPUs. It
+// currently works on simple if-then and if-then-else
+// patterns.
+//
+// Removing branches is not the only motivation for this
+// pass. E.g. consider this code and assume that there is no
+// addressing mode for multiplying by sizeof(*a):
+//
+//   if (b > 0)
+//     c = a[i + 1]
+//   if (d > 0)
+//     e = a[i + 2]
+//
+// turns into
+//
+//   p = &a[i + 1];
+//   if (b > 0)
+//     c = *p;
+//   q = &a[i + 2];
+//   if (d > 0)
+//     e = *q;
+//
+// which could later be optimized to
+//
+//   r = &a[i];
+//   if (b > 0)
+//     c = r[1];
+//   if (d > 0)
+//     e = r[2];
+//
+// Later passes sink back much of the speculated code that did not enable
+// further optimization.
+//
+// This pass is more aggressive than the function SpeculativeyExecuteBB in
+// SimplifyCFG. SimplifyCFG will not speculate if no selects are introduced and
+// it will speculate at most one instruction. It also will not speculate if
+// there is a value defined in the if-block that is only used in the then-block.
+// These restrictions make sense since the speculation in SimplifyCFG seems
+// aimed at introducing cheap selects, while this pass is intended to do more
+// aggressive speculation while counting on later passes to either capitalize on
+// that or clean it up.
+//
+// If the pass was created by calling
+// createSpeculativeExecutionIfHasBranchDivergencePass or the
+// -spec-exec-only-if-divergent-target option is present, this pass only has an
+// effect on targets where TargetTransformInfo::hasBranchDivergence() is true;
+// on other targets, it is a nop.
+//
+// This lets you include this pass unconditionally in the IR pass pipeline, but
+// only enable it for relevant targets.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
+#define LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class SpeculativeExecutionPass
+    : public PassInfoMixin<SpeculativeExecutionPass> {
+public:
+  SpeculativeExecutionPass(bool OnlyIfDivergentTarget = false);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM
+  bool runImpl(Function &F, TargetTransformInfo *TTI);
+
+private:
+  bool runOnBasicBlock(BasicBlock &B);
+  bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);
+
+  // If true, this pass is a nop unless the target architecture has branch
+  // divergence.  
+  const bool OnlyIfDivergentTarget = false;
+
+  TargetTransformInfo *TTI = nullptr;
+};
+}
+
+#endif //LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/TailRecursionElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/TailRecursionElimination.h
new file mode 100644
index 0000000..793f9bc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/TailRecursionElimination.h
@@ -0,0 +1,66 @@
+//===---- TailRecursionElimination.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file transforms calls of the current function (self recursion) followed
+// by a return instruction with a branch to the entry of the function, creating
+// a loop.  This pass also implements the following extensions to the basic
+// algorithm:
+//
+//  1. Trivial instructions between the call and return do not prevent the
+//     transformation from taking place, though currently the analysis cannot
+//     support moving any really useful instructions (only dead ones).
+//  2. This pass transforms functions that are prevented from being tail
+//     recursive by an associative and commutative expression to use an
+//     accumulator variable, thus compiling the typical naive factorial or
+//     'fib' implementation into efficient code.
+//  3. TRE is performed if the function returns void, if the return
+//     returns the result returned by the call, or if the function returns a
+//     run-time constant on all exits from the function.  It is possible, though
+//     unlikely, that the return returns something else (like constant 0), and
+//     can still be TRE'd.  It can be TRE'd if ALL OTHER return instructions in
+//     the function return the exact same value.
+//  4. If it can prove that callees do not access their caller stack frame,
+//     they are marked as eligible for tail call elimination (by the code
+//     generator).
+//
+// There are several improvements that could be made:
+//
+//  1. If the function has any alloca instructions, these instructions will be
+//     moved out of the entry block of the function, causing them to be
+//     evaluated each time through the tail recursion.  Safely keeping allocas
+//     in the entry block requires analysis to proves that the tail-called
+//     function does not read or write the stack object.
+//  2. Tail recursion is only performed if the call immediately precedes the
+//     return instruction.  It's possible that there could be a jump between
+//     the call and the return.
+//  3. There can be intervening operations between the call and the return that
+//     prevent the TRE from occurring.  For example, there could be GEP's and
+//     stores to memory that will not be read or written by the call.  This
+//     requires some substantial analysis (such as with DSA) to prove safe to
+//     move ahead of the call, but doing so could allow many more TREs to be
+//     performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
+//  4. The algorithm we use to detect if callees access their caller stack
+//     frames is very primitive.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct TailCallElimPass : PassInfoMixin<TailCallElimPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils.h b/linux-x64/clang/include/llvm/Transforms/Utils.h
new file mode 100644
index 0000000..cfb89d1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils.h
@@ -0,0 +1,125 @@
+//===- llvm/Transforms/Utils.h - Utility Transformations --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Utils transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_H
+#define LLVM_TRANSFORMS_UTILS_H
+
+namespace llvm {
+
+class ModulePass;
+class FunctionPass;
+class Pass;
+
+//===----------------------------------------------------------------------===//
+// createMetaRenamerPass - Rename everything with metasyntatic names.
+//
+ModulePass *createMetaRenamerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerInvoke - This pass removes invoke instructions, converting them to call
+// instructions.
+//
+FunctionPass *createLowerInvokePass();
+extern char &LowerInvokePassID;
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionNamer - Give any unnamed non-void instructions "tmp" names.
+//
+FunctionPass *createInstructionNamerPass();
+extern char &InstructionNamerID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerSwitch - This pass converts SwitchInst instructions into a sequence of
+// chained binary branch instructions.
+//
+FunctionPass *createLowerSwitchPass();
+extern char &LowerSwitchID;
+
+//===----------------------------------------------------------------------===//
+//
+// EntryExitInstrumenter pass - Instrument function entry/exit with calls to
+// mcount(), @__cyg_profile_func_{enter,exit} and the like. There are two
+// variants, intended to run pre- and post-inlining, respectively.
+//
+FunctionPass *createEntryExitInstrumenterPass();
+FunctionPass *createPostInlineEntryExitInstrumenterPass();
+
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges - Break all of the critical edges in the CFG by inserting
+// a dummy basic block. This pass may be "required" by passes that cannot deal
+// with critical edges. For this usage, a pass must call:
+//
+//   AU.addRequiredID(BreakCriticalEdgesID);
+//
+// This pass obviously invalidates the CFG, but can update forward dominator
+// (set, immediate dominators, tree, and frontier) information.
+//
+FunctionPass *createBreakCriticalEdgesPass();
+extern char &BreakCriticalEdgesID;
+
+//===----------------------------------------------------------------------===//
+//
+// LCSSA - This pass inserts phi nodes at loop boundaries to simplify other loop
+// optimizations.
+//
+Pass *createLCSSAPass();
+extern char &LCSSAID;
+
+//===----------------------------------------------------------------------===//
+//
+// AddDiscriminators - Add DWARF path discriminators to the IR.
+FunctionPass *createAddDiscriminatorsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PromoteMemoryToRegister - This pass is used to promote memory references to
+// be register references. A simple example of the transformation performed by
+// this pass is:
+//
+//        FROM CODE                           TO CODE
+//   %X = alloca i32, i32 1                 ret i32 42
+//   store i32 42, i32 *%X
+//   %Y = load i32* %X
+//   ret i32 %Y
+//
+FunctionPass *createPromoteMemoryToRegisterPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSimplify - Insert Pre-header blocks into the CFG for every function in
+// the module.  This pass updates dominator information, loop information, and
+// does not add critical edges to the CFG.
+//
+//   AU.addRequiredID(LoopSimplifyID);
+//
+Pass *createLoopSimplifyPass();
+extern char &LoopSimplifyID;
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionSimplifier - Remove redundant instructions.
+//
+FunctionPass *createInstructionSimplifierPass();
+extern char &InstructionSimplifierID;
+
+/// This function returns a new pass that downgrades the debug info in the
+/// module to line tables only.
+ModulePass *createStripNonLineTableDebugInfoPass();
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/linux-x64/clang/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
new file mode 100644
index 0000000..eaad06a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
@@ -0,0 +1,81 @@
+//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class AllocaInst;
+
+// These magic constants should be the same as in
+// in asan_internal.h from ASan runtime in compiler-rt.
+static const int kAsanStackLeftRedzoneMagic = 0xf1;
+static const int kAsanStackMidRedzoneMagic = 0xf2;
+static const int kAsanStackRightRedzoneMagic = 0xf3;
+static const int kAsanStackUseAfterReturnMagic = 0xf5;
+static const int kAsanStackUseAfterScopeMagic = 0xf8;
+
+// Input/output data struct for ComputeASanStackFrameLayout.
+struct ASanStackVariableDescription {
+  const char *Name;    // Name of the variable that will be displayed by asan
+                       // if a stack-related bug is reported.
+  uint64_t Size;       // Size of the variable in bytes.
+  size_t LifetimeSize; // Size in bytes to use for lifetime analysis check.
+                       // Will be rounded up to Granularity.
+  size_t Alignment;    // Alignment of the variable (power of 2).
+  AllocaInst *AI;      // The actual AllocaInst.
+  size_t Offset;       // Offset from the beginning of the frame;
+                       // set by ComputeASanStackFrameLayout.
+  unsigned Line;       // Line number.
+};
+
+// Output data struct for ComputeASanStackFrameLayout.
+struct ASanStackFrameLayout {
+  size_t Granularity;     // Shadow granularity.
+  size_t FrameAlignment;  // Alignment for the entire frame.
+  size_t FrameSize;       // Size of the frame in bytes.
+};
+
+ASanStackFrameLayout ComputeASanStackFrameLayout(
+    // The array of stack variables. The elements may get reordered and changed.
+    SmallVectorImpl<ASanStackVariableDescription> &Vars,
+    // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
+    size_t Granularity,
+    // The minimal size of the left-most redzone (header).
+    // At least 4 pointer sizes, power of 2, and >= Granularity.
+    // The resulting FrameSize should be multiple of MinHeaderSize.
+    size_t MinHeaderSize);
+
+// Compute frame description, see DescribeAddressIfStack in ASan runtime.
+SmallString<64> ComputeASanStackFrameDescription(
+    const SmallVectorImpl<ASanStackVariableDescription> &Vars);
+
+// Returns shadow bytes with marked red zones. This shadow represents the state
+// if the stack frame when all local variables are inside of the own scope.
+SmallVector<uint8_t, 64>
+GetShadowBytes(const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+               const ASanStackFrameLayout &Layout);
+
+// Returns shadow bytes with marked red zones and after scope. This shadow
+// represents the state if the stack frame when all local variables are outside
+// of the own scope.
+SmallVector<uint8_t, 64> GetShadowBytesAfterScope(
+    // The array of stack variables. The elements may get reordered and changed.
+    const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+    const ASanStackFrameLayout &Layout);
+
+} // llvm namespace
+
+#endif  // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/AddDiscriminators.h b/linux-x64/clang/include/llvm/Transforms/Utils/AddDiscriminators.h
new file mode 100644
index 0000000..4dad06e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/AddDiscriminators.h
@@ -0,0 +1,32 @@
+//===- AddDiscriminators.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass adds DWARF discriminators to the IR. Path discriminators are used
+// to decide what CFG path was taken inside sub-graphs whose instructions share
+// the same line and column number information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+#define LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class AddDiscriminatorsPass : public PassInfoMixin<AddDiscriminatorsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
new file mode 100644
index 0000000..6f0d2de
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -0,0 +1,314 @@
+//===- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on basic blocks, and
+// instructions contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+#define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+
+// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/InstrTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+class BlockFrequencyInfo;
+class BranchProbabilityInfo;
+class DeferredDominance;
+class DominatorTree;
+class Function;
+class Instruction;
+class LoopInfo;
+class MDNode;
+class MemoryDependenceResults;
+class ReturnInst;
+class TargetLibraryInfo;
+class Value;
+
+/// Delete the specified block, which must have no predecessors.
+void DeleteDeadBlock(BasicBlock *BB, DeferredDominance *DDT = nullptr);
+
+/// We know that BB has one predecessor. If there are any single-entry PHI nodes
+/// in it, fold them away. This handles the case when all entries to the PHI
+/// nodes in a block are guaranteed equal, such as when the block has exactly
+/// one predecessor.
+void FoldSingleEntryPHINodes(BasicBlock *BB,
+                             MemoryDependenceResults *MemDep = nullptr);
+
+/// Examine each PHI in the given block and delete it if it is dead. Also
+/// recursively delete any operands that become dead as a result. This includes
+/// tracing the def-use list from the PHI to see if it is ultimately unused or
+/// if it reaches an unused cycle. Return true if any PHIs were deleted.
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
+
+/// Attempts to merge a block into its predecessor, if possible. The return
+/// value indicates success or failure.
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+                               LoopInfo *LI = nullptr,
+                               MemoryDependenceResults *MemDep = nullptr);
+
+/// Replace all uses of an instruction (specified by BI) with a value, then
+/// remove and delete the original instruction.
+void ReplaceInstWithValue(BasicBlock::InstListType &BIL,
+                          BasicBlock::iterator &BI, Value *V);
+
+/// Replace the instruction specified by BI with the instruction specified by I.
+/// Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc. The
+/// original instruction is deleted and BI is updated to point to the new
+/// instruction.
+void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
+                         BasicBlock::iterator &BI, Instruction *I);
+
+/// Replace the instruction specified by From with the instruction specified by
+/// To. Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc.
+void ReplaceInstWithInst(Instruction *From, Instruction *To);
+
+/// Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+  DominatorTree *DT;
+  LoopInfo *LI;
+  bool MergeIdenticalEdges = false;
+  bool DontDeleteUselessPHIs = false;
+  bool PreserveLCSSA = false;
+
+  CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
+                               LoopInfo *LI = nullptr)
+      : DT(DT), LI(LI) {}
+
+  CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+    MergeIdenticalEdges = true;
+    return *this;
+  }
+
+  CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() {
+    DontDeleteUselessPHIs = true;
+    return *this;
+  }
+
+  CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+    PreserveLCSSA = true;
+    return *this;
+  }
+};
+
+/// If this edge is a critical edge, insert a new node to split the critical
+/// edge. This will update the analyses passed in through the option struct.
+/// This returns the new block if the edge was split, null otherwise.
+///
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination.  This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
+///
+/// It is invalid to call this function on a critical edge that starts at an
+/// IndirectBrInst.  Splitting these edges will almost always create an invalid
+/// program because the address of the new block won't be the one that is jumped
+/// to.
+BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
+                              const CriticalEdgeSplittingOptions &Options =
+                                  CriticalEdgeSplittingOptions());
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+                  const CriticalEdgeSplittingOptions &Options =
+                      CriticalEdgeSplittingOptions()) {
+  return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+                           Options);
+}
+
+/// If the edge from *PI to BB is not critical, return false. Otherwise, split
+/// all edges between the two blocks and return true. This updates all of the
+/// same analyses as the other SplitCriticalEdge function. If P is specified, it
+/// updates the analyses described above.
+inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
+                              const CriticalEdgeSplittingOptions &Options =
+                                  CriticalEdgeSplittingOptions()) {
+  bool MadeChange = false;
+  TerminatorInst *TI = (*PI)->getTerminator();
+  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+    if (TI->getSuccessor(i) == Succ)
+      MadeChange |= !!SplitCriticalEdge(TI, i, Options);
+  return MadeChange;
+}
+
+/// If an edge from Src to Dst is critical, split the edge and return true,
+/// otherwise return false. This method requires that there be an edge between
+/// the two blocks. It updates the analyses passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+                  const CriticalEdgeSplittingOptions &Options =
+                      CriticalEdgeSplittingOptions()) {
+  TerminatorInst *TI = Src->getTerminator();
+  unsigned i = 0;
+  while (true) {
+    assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
+    if (TI->getSuccessor(i) == Dst)
+      return SplitCriticalEdge(TI, i, Options);
+    ++i;
+  }
+}
+
+/// Loop over all of the edges in the CFG, breaking critical edges as they are
+/// found. Returns the number of broken edges.
+unsigned SplitAllCriticalEdges(Function &F,
+                               const CriticalEdgeSplittingOptions &Options =
+                                   CriticalEdgeSplittingOptions());
+
+/// Split the edge connecting specified block.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+                      DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// Split the specified block at the specified instruction - everything before
+/// SplitPt stays in Old and everything starting with SplitPt moves to a new
+/// block. The two blocks are joined by an unconditional branch and the loop
+/// info is updated.
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+                       DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// This method introduces at least one new basic block into the function and
+/// moves some of the predecessors of BB to be predecessors of the new block.
+/// The new predecessors are indicated by the Preds array. The new block is
+/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
+/// from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+                                   const char *Suffix,
+                                   DominatorTree *DT = nullptr,
+                                   LoopInfo *LI = nullptr,
+                                   bool PreserveLCSSA = false);
+
+/// This method transforms the landing pad, OrigBB, by introducing two new basic
+/// blocks into the function. One of those new basic blocks gets the
+/// predecessors listed in Preds. The other basic block gets the remaining
+/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
+/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
+/// 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+                                 ArrayRef<BasicBlock *> Preds,
+                                 const char *Suffix, const char *Suffix2,
+                                 SmallVectorImpl<BasicBlock *> &NewBBs,
+                                 DominatorTree *DT = nullptr,
+                                 LoopInfo *LI = nullptr,
+                                 bool PreserveLCSSA = false);
+
+/// This method duplicates the specified return instruction into a predecessor
+/// which ends in an unconditional branch. If the return instruction returns a
+/// value defined by a PHI, propagate the right value into the return. It
+/// returns the new return instruction in the predecessor.
+ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+                                       BasicBlock *Pred);
+
+/// Split the containing block at the specified instruction - everything before
+/// SplitBefore stays in the old basic block, and the rest of the instructions
+/// in the BB are moved to a new block. The two blocks are connected by a
+/// conditional branch (with value of Cmp being the condition).
+/// Before:
+///   Head
+///   SplitBefore
+///   Tail
+/// After:
+///   Head
+///   if (Cond)
+///     ThenBlock
+///   SplitBefore
+///   Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT and LI if given.
+TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+                                          bool Unreachable,
+                                          MDNode *BranchWeights = nullptr,
+                                          DominatorTree *DT = nullptr,
+                                          LoopInfo *LI = nullptr);
+
+/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
+/// but also creates the ElseBlock.
+/// Before:
+///   Head
+///   SplitBefore
+///   Tail
+/// After:
+///   Head
+///   if (Cond)
+///     ThenBlock
+///   else
+///     ElseBlock
+///   SplitBefore
+///   Tail
+void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
+                                   TerminatorInst **ThenTerm,
+                                   TerminatorInst **ElseTerm,
+                                   MDNode *BranchWeights = nullptr);
+
+/// Check whether BB is the merge point of a if-region.
+/// If so, return the boolean condition that determines which entry into
+/// BB will be taken.  Also, return by references the block that will be
+/// entered from if the condition is true, and the block that will be
+/// entered if the condition is false.
+///
+/// This does no checking to see if the true/false blocks have large or unsavory
+/// instructions in them.
+Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
+                      BasicBlock *&IfFalse);
+
+// Split critical edges where the source of the edge is an indirectbr
+// instruction. This isn't always possible, but we can handle some easy cases.
+// This is useful because MI is unable to split such critical edges,
+// which means it will not be able to sink instructions along those edges.
+// This is especially painful for indirect branches with many successors, where
+// we end up having to prepare all outgoing values in the origin block.
+//
+// Our normal algorithm for splitting critical edges requires us to update
+// the outgoing edges of the edge origin block, but for an indirectbr this
+// is hard, since it would require finding and updating the block addresses
+// the indirect branch uses. But if a block only has a single indirectbr
+// predecessor, with the others being regular branches, we can do it in a
+// different way.
+// Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr.
+// We can split D into D0 and D1, where D0 contains only the PHIs from D,
+// and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and
+// create the following structure:
+// A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1
+// If BPI and BFI aren't non-null, BPI/BFI will be updated accordingly.
+bool SplitIndirectBrCriticalEdges(Function &F,
+                                  BranchProbabilityInfo *BPI = nullptr,
+                                  BlockFrequencyInfo *BFI = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BreakCriticalEdges.h b/linux-x64/clang/include/llvm/Transforms/Utils/BreakCriticalEdges.h
new file mode 100644
index 0000000..9cc81a1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BreakCriticalEdges.h
@@ -0,0 +1,29 @@
+//===- BreakCriticalEdges.h - Critical Edge Elimination Pass --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges pass - Break all of the critical edges in the CFG by
+// inserting a dummy basic block.  This pass may be "required" by passes that
+// cannot deal with critical edges.  For this usage, the structure type is
+// forward declared.  This pass obviously invalidates the CFG, but can update
+// dominator trees.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+#define LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct BreakCriticalEdgesPass : public PassInfoMixin<BreakCriticalEdgesPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
new file mode 100644
index 0000000..3a71559
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -0,0 +1,125 @@
+//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+  class Value;
+  class DataLayout;
+  class TargetLibraryInfo;
+
+  /// Analyze the name and prototype of the given function and set any
+  /// applicable attributes.
+  /// If the library function is unavailable, this doesn't modify it.
+  ///
+  /// Returns true if any attributes were set and false otherwise.
+  bool inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI);
+
+  /// Check whether the overloaded unary floating point function
+  /// corresponding to \a Ty is available.
+  bool hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
+                       LibFunc DoubleFn, LibFunc FloatFn,
+                       LibFunc LongDoubleFn);
+
+  /// Return V if it is an i8*, otherwise cast it to i8*.
+  Value *castToCStr(Value *V, IRBuilder<> &B);
+
+  /// Emit a call to the strlen function to the builder, for the specified
+  /// pointer. Ptr is required to be some pointer type, and the return value has
+  /// 'intptr_t' type.
+  Value *emitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
+                    const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strnlen function to the builder, for the specified
+  /// pointer. Ptr is required to be some pointer type, MaxLen must be of size_t
+  /// type, and the return value has 'intptr_t' type.
+  Value *emitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+                     const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strchr function to the builder, for the specified
+  /// pointer and character. Ptr is required to be some pointer type, and the
+  /// return value has 'i8*' type.
+  Value *emitStrChr(Value *Ptr, char C, IRBuilder<> &B,
+                    const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strncmp function to the builder.
+  Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+                     const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strcpy function to the builder, for the specified
+  /// pointer arguments.
+  Value *emitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
+                    const TargetLibraryInfo *TLI, StringRef Name = "strcpy");
+
+  /// Emit a call to the strncpy function to the builder, for the specified
+  /// pointer arguments and length.
+  Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+                     const TargetLibraryInfo *TLI, StringRef Name = "strncpy");
+
+  /// Emit a call to the __memcpy_chk function to the builder. This expects that
+  /// the Len and ObjSize have type 'intptr_t' and Dst/Src are pointers.
+  Value *emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+                       IRBuilder<> &B, const DataLayout &DL,
+                       const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the memchr function. This assumes that Ptr is a pointer,
+  /// Val is an i32 value, and Len is an 'intptr_t' value.
+  Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
+                    const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the memcmp function.
+  Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+                    const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the unary function named 'Name' (e.g.  'floor'). This
+  /// function is known to take a single of type matching 'Op' and returns one
+  /// value with the same type. If 'Op' is a long double, 'l' is added as the
+  /// suffix of name, if 'Op' is a float, we add a 'f' suffix.
+  Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
+                              const AttributeList &Attrs);
+
+  /// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
+  /// function is known to take type matching 'Op1' and 'Op2' and return one
+  /// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
+  /// the suffix of name, if 'Op1/Op2' are float, we add a 'f' suffix.
+  Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
+                               IRBuilder<> &B, const AttributeList &Attrs);
+
+  /// Emit a call to the putchar function. This assumes that Char is an integer.
+  Value *emitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the puts function. This assumes that Str is some pointer.
+  Value *emitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the fputc function. This assumes that Char is an i32, and
+  /// File is a pointer to FILE.
+  Value *emitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+                   const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the puts function. Str is required to be a pointer and
+  /// File is a pointer to FILE.
+  Value *emitFPutS(Value *Str, Value *File, IRBuilder<> &B,
+                   const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the fwrite function. This assumes that Ptr is a pointer,
+  /// Size is an 'intptr_t', and File is a pointer to FILE.
+  Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
+                    const DataLayout &DL, const TargetLibraryInfo *TLI);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h b/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
new file mode 100644
index 0000000..6eca5ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -0,0 +1,70 @@
+//===- llvm/Transforms/Utils/BypassSlowDivision.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include <cstdint>
+
+namespace llvm {
+
+class BasicBlock;
+class Value;
+
+struct DivRemMapKey {
+  bool SignedOp;
+  Value *Dividend;
+  Value *Divisor;
+
+  DivRemMapKey(bool InSignedOp, Value *InDividend, Value *InDivisor)
+      : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
+};
+
+template <> struct DenseMapInfo<DivRemMapKey> {
+  static bool isEqual(const DivRemMapKey &Val1, const DivRemMapKey &Val2) {
+    return Val1.SignedOp == Val2.SignedOp && Val1.Dividend == Val2.Dividend &&
+           Val1.Divisor == Val2.Divisor;
+  }
+
+  static DivRemMapKey getEmptyKey() {
+    return DivRemMapKey(false, nullptr, nullptr);
+  }
+
+  static DivRemMapKey getTombstoneKey() {
+    return DivRemMapKey(true, nullptr, nullptr);
+  }
+
+  static unsigned getHashValue(const DivRemMapKey &Val) {
+    return (unsigned)(reinterpret_cast<uintptr_t>(Val.Dividend) ^
+                      reinterpret_cast<uintptr_t>(Val.Divisor)) ^
+           (unsigned)Val.SignedOp;
+  }
+};
+
+/// This optimization identifies DIV instructions in a BB that can be
+/// profitably bypassed and carried out with a shorter, faster divide.
+///
+/// This optimization may add basic blocks immediately after BB; for obvious
+/// reasons, you shouldn't pass those blocks to bypassSlowDivision.
+bool bypassSlowDivision(
+    BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidth);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
new file mode 100644
index 0000000..6e8ece7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
@@ -0,0 +1,54 @@
+//===- CallPromotionUtils.h - Utilities for call promotion ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares utilities useful for promoting indirect call sites to
+// direct call sites.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+
+#include "llvm/IR/CallSite.h"
+
+namespace llvm {
+
+/// Return true if the given indirect call site can be made to call \p Callee.
+///
+/// This function ensures that the number and type of the call site's arguments
+/// and return value match those of the given function. If the types do not
+/// match exactly, they must at least be bitcast compatible. If \p FailureReason
+/// is non-null and the indirect call cannot be promoted, the failure reason
+/// will be stored in it.
+bool isLegalToPromote(CallSite CS, Function *Callee,
+                      const char **FailureReason = nullptr);
+
+/// Promote the given indirect call site to unconditionally call \p Callee.
+///
+/// This function promotes the given call site, returning the direct call or
+/// invoke instruction. If the function type of the call site doesn't match that
+/// of the callee, bitcast instructions are inserted where appropriate. If \p
+/// RetBitCast is non-null, it will be used to store the return value bitcast,
+/// if created.
+Instruction *promoteCall(CallSite CS, Function *Callee,
+                         CastInst **RetBitCast = nullptr);
+
+/// Promote the given indirect call site to conditionally call \p Callee.
+///
+/// This function creates an if-then-else structure at the location of the call
+/// site. The original call site is moved into the "else" block. A clone of the
+/// indirect call site is promoted, placed in the "then" block, and returned. If
+/// \p BranchWeights is non-null, it will be used to set !prof metadata on the
+/// new conditional branch.
+Instruction *promoteCallWithIfThenElse(CallSite CS, Function *Callee,
+                                       MDNode *BranchWeights = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h b/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
new file mode 100644
index 0000000..cd02ca6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
@@ -0,0 +1,272 @@
+//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various functions that are used to clone chunks of LLVM
+// code for various purposes.  This varies from copying whole modules into new
+// modules, to cloning functions with different arguments, to inlining
+// functions, to copying basic blocks to support loop unrolling or superblock
+// formation, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
+#define LLVM_TRANSFORMS_UTILS_CLONING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <functional>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class BlockFrequencyInfo;
+class CallInst;
+class CallGraph;
+class DebugInfoFinder;
+class DominatorTree;
+class Function;
+class Instruction;
+class InvokeInst;
+class Loop;
+class LoopInfo;
+class Module;
+class ProfileSummaryInfo;
+class ReturnInst;
+
+/// Return an exact copy of the specified module
+///
+std::unique_ptr<Module> CloneModule(const Module &M);
+std::unique_ptr<Module> CloneModule(const Module &M, ValueToValueMapTy &VMap);
+
+/// Return a copy of the specified module. The ShouldCloneDefinition function
+/// controls whether a specific GlobalValue's definition is cloned. If the
+/// function returns false, the module copy will contain an external reference
+/// in place of the global definition.
+std::unique_ptr<Module>
+CloneModule(const Module &M, ValueToValueMapTy &VMap,
+            function_ref<bool(const GlobalValue *)> ShouldCloneDefinition);
+
+/// ClonedCodeInfo - This struct can be used to capture information about code
+/// being cloned, while it is being cloned.
+struct ClonedCodeInfo {
+  /// ContainsCalls - This is set to true if the cloned code contains a normal
+  /// call instruction.
+  bool ContainsCalls = false;
+
+  /// ContainsDynamicAllocas - This is set to true if the cloned code contains
+  /// a 'dynamic' alloca.  Dynamic allocas are allocas that are either not in
+  /// the entry block or they are in the entry block but are not a constant
+  /// size.
+  bool ContainsDynamicAllocas = false;
+
+  /// All cloned call sites that have operand bundles attached are appended to
+  /// this vector.  This vector may contain nulls or undefs if some of the
+  /// originally inserted callsites were DCE'ed after they were cloned.
+  std::vector<WeakTrackingVH> OperandBundleCallSites;
+
+  ClonedCodeInfo() = default;
+};
+
+/// CloneBasicBlock - Return a copy of the specified basic block, but without
+/// embedding the block into a particular function.  The block returned is an
+/// exact copy of the specified basic block, without any remapping having been
+/// performed.  Because of this, this is only suitable for applications where
+/// the basic block will be inserted into the same function that it was cloned
+/// from (loop unrolling would use this, for example).
+///
+/// Also, note that this function makes a direct copy of the basic block, and
+/// can thus produce illegal LLVM code.  In particular, it will copy any PHI
+/// nodes from the original block, even though there are no predecessors for the
+/// newly cloned block (thus, phi nodes will have to be updated).  Also, this
+/// block will branch to the old successors of the original block: these
+/// successors will have to have any PHI nodes updated to account for the new
+/// incoming edges.
+///
+/// The correlation between instructions in the source and result basic blocks
+/// is recorded in the VMap map.
+///
+/// If you have a particular suffix you'd like to use to add to any cloned
+/// names, specify it as the optional third parameter.
+///
+/// If you would like the basic block to be auto-inserted into the end of a
+/// function, you can specify it as the optional fourth parameter.
+///
+/// If you would like to collect additional information about the cloned
+/// function, you can specify a ClonedCodeInfo object with the optional fifth
+/// parameter.
+///
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
+                            const Twine &NameSuffix = "", Function *F = nullptr,
+                            ClonedCodeInfo *CodeInfo = nullptr,
+                            DebugInfoFinder *DIFinder = nullptr);
+
+/// CloneFunction - Return a copy of the specified function and add it to that
+/// function's module.  Also, any references specified in the VMap are changed
+/// to refer to their mapped value instead of the original one.  If any of the
+/// arguments to the function are in the VMap, the arguments are deleted from
+/// the resultant function.  The VMap is updated to include mappings from all of
+/// the instructions and basicblocks in the function from their old to new
+/// values.  The final argument captures information about the cloned code if
+/// non-null.
+///
+/// VMap contains no non-identity GlobalValue mappings and debug info metadata
+/// will not be cloned.
+///
+Function *CloneFunction(Function *F, ValueToValueMapTy &VMap,
+                        ClonedCodeInfo *CodeInfo = nullptr);
+
+/// Clone OldFunc into NewFunc, transforming the old arguments into references
+/// to VMap values.  Note that if NewFunc already has basic blocks, the ones
+/// cloned into it will be added to the end of the function.  This function
+/// fills in a list of return instructions, and can optionally remap types
+/// and/or append the specified suffix to all values cloned.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
+                       ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+                       SmallVectorImpl<ReturnInst*> &Returns,
+                       const char *NameSuffix = "",
+                       ClonedCodeInfo *CodeInfo = nullptr,
+                       ValueMapTypeRemapper *TypeMapper = nullptr,
+                       ValueMaterializer *Materializer = nullptr);
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+                               const Instruction *StartingInst,
+                               ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+                               SmallVectorImpl<ReturnInst *> &Returns,
+                               const char *NameSuffix = "",
+                               ClonedCodeInfo *CodeInfo = nullptr);
+
+/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
+/// except that it does some simple constant prop and DCE on the fly.  The
+/// effect of this is to copy significantly less code in cases where (for
+/// example) a function call with constant arguments is inlined, and those
+/// constant arguments cause a significant amount of code in the callee to be
+/// dead.  Since this doesn't produce an exactly copy of the input, it can't be
+/// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
+                               ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+                               SmallVectorImpl<ReturnInst*> &Returns,
+                               const char *NameSuffix = "",
+                               ClonedCodeInfo *CodeInfo = nullptr,
+                               Instruction *TheCall = nullptr);
+
+/// InlineFunctionInfo - This class captures the data input to the
+/// InlineFunction call, and records the auxiliary results produced by it.
+class InlineFunctionInfo {
+public:
+  explicit InlineFunctionInfo(CallGraph *cg = nullptr,
+                              std::function<AssumptionCache &(Function &)>
+                                  *GetAssumptionCache = nullptr,
+                              ProfileSummaryInfo *PSI = nullptr,
+                              BlockFrequencyInfo *CallerBFI = nullptr,
+                              BlockFrequencyInfo *CalleeBFI = nullptr)
+      : CG(cg), GetAssumptionCache(GetAssumptionCache), PSI(PSI),
+        CallerBFI(CallerBFI), CalleeBFI(CalleeBFI) {}
+
+  /// CG - If non-null, InlineFunction will update the callgraph to reflect the
+  /// changes it makes.
+  CallGraph *CG;
+  std::function<AssumptionCache &(Function &)> *GetAssumptionCache;
+  ProfileSummaryInfo *PSI;
+  BlockFrequencyInfo *CallerBFI, *CalleeBFI;
+
+  /// StaticAllocas - InlineFunction fills this in with all static allocas that
+  /// get copied into the caller.
+  SmallVector<AllocaInst *, 4> StaticAllocas;
+
+  /// InlinedCalls - InlineFunction fills this in with callsites that were
+  /// inlined from the callee.  This is only filled in if CG is non-null.
+  SmallVector<WeakTrackingVH, 8> InlinedCalls;
+
+  /// All of the new call sites inlined into the caller.
+  ///
+  /// 'InlineFunction' fills this in by scanning the inlined instructions, and
+  /// only if CG is null. If CG is non-null, instead the value handle
+  /// `InlinedCalls` above is used.
+  SmallVector<CallSite, 8> InlinedCallSites;
+
+  void reset() {
+    StaticAllocas.clear();
+    InlinedCalls.clear();
+    InlinedCallSites.clear();
+  }
+};
+
+/// InlineFunction - This function inlines the called function into the basic
+/// block of the caller.  This returns false if it is not possible to inline
+/// this call.  The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining.  For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream.  Similarly this will inline a recursive
+/// function by one level.
+///
+/// Note that while this routine is allowed to cleanup and optimize the
+/// *inlined* code to minimize the actual inserted code, it must not delete
+/// code in the caller as users of this routine may have pointers to
+/// instructions in the caller that need to remain stable.
+///
+/// If ForwardVarArgsTo is passed, inlining a function with varargs is allowed
+/// and all varargs at the callsite will be passed to any calls to
+/// ForwardVarArgsTo. The caller of InlineFunction has to make sure any varargs
+/// are only used by ForwardVarArgsTo.
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+                    AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+                    AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+                    AAResults *CalleeAAR = nullptr, bool InsertLifetime = true,
+                    Function *ForwardVarArgsTo = nullptr);
+
+/// \brief Clones a loop \p OrigLoop.  Returns the loop and the blocks in \p
+/// Blocks.
+///
+/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
+/// \p LoopDomBB.  Insert the new blocks before block specified in \p Before.
+/// Note: Only innermost loops are supported.
+Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
+                             Loop *OrigLoop, ValueToValueMapTy &VMap,
+                             const Twine &NameSuffix, LoopInfo *LI,
+                             DominatorTree *DT,
+                             SmallVectorImpl<BasicBlock *> &Blocks);
+
+/// \brief Remaps instructions in \p Blocks using the mapping in \p VMap.
+void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks,
+                               ValueToValueMapTy &VMap);
+
+/// Split edge between BB and PredBB and duplicate all non-Phi instructions
+/// from BB between its beginning and the StopAt instruction into the split
+/// block. Phi nodes are not duplicated, but their uses are handled correctly:
+/// we replace them with the uses of corresponding Phi inputs. ValueMapping
+/// is used to map the original instructions from BB to their newly-created
+/// copies. Returns the split block.
+BasicBlock *
+DuplicateInstructionsInSplitBetween(BasicBlock *BB, BasicBlock *PredBB,
+                                    Instruction *StopAt,
+                                    ValueToValueMapTy &ValueMapping,
+                                    DominatorTree *DT = nullptr);
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CLONING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h b/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
new file mode 100644
index 0000000..63d3451
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -0,0 +1,172 @@
+//===- Transform/Utils/CodeExtractor.h - Code extraction util ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A utility to support extracting code from one function into its own
+// stand-alone function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+#define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include <limits>
+
+namespace llvm {
+
+class BasicBlock;
+class BlockFrequency;
+class BlockFrequencyInfo;
+class BranchProbabilityInfo;
+class DominatorTree;
+class Function;
+class Instruction;
+class Loop;
+class Module;
+class Type;
+class Value;
+
+  /// \brief Utility class for extracting code into a new function.
+  ///
+  /// This utility provides a simple interface for extracting some sequence of
+  /// code into its own function, replacing it with a call to that function. It
+  /// also provides various methods to query about the nature and result of
+  /// such a transformation.
+  ///
+  /// The rough algorithm used is:
+  /// 1) Find both the inputs and outputs for the extracted region.
+  /// 2) Pass the inputs as arguments, remapping them within the extracted
+  ///    function to arguments.
+  /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
+  ///    as arguments, and inserting stores to the arguments for any scalars.
+  class CodeExtractor {
+    using ValueSet = SetVector<Value *>;
+
+    // Various bits of state computed on construction.
+    DominatorTree *const DT;
+    const bool AggregateArgs;
+    BlockFrequencyInfo *BFI;
+    BranchProbabilityInfo *BPI;
+
+    // If true, varargs functions can be extracted.
+    bool AllowVarArgs;
+
+    // Bits of intermediate state computed at various phases of extraction.
+    SetVector<BasicBlock *> Blocks;
+    unsigned NumExitBlocks = std::numeric_limits<unsigned>::max();
+    Type *RetTy;
+
+  public:
+    /// \brief Create a code extractor for a sequence of blocks.
+    ///
+    /// Given a sequence of basic blocks where the first block in the sequence
+    /// dominates the rest, prepare a code extractor object for pulling this
+    /// sequence out into its new function. When a DominatorTree is also given,
+    /// extra checking and transformations are enabled. If AllowVarArgs is true,
+    /// vararg functions can be extracted. This is safe, if all vararg handling
+    /// code is extracted, including vastart.
+    CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
+                  bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
+                  BranchProbabilityInfo *BPI = nullptr,
+                  bool AllowVarArgs = false);
+
+    /// \brief Create a code extractor for a loop body.
+    ///
+    /// Behaves just like the generic code sequence constructor, but uses the
+    /// block sequence of the loop.
+    CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false,
+                  BlockFrequencyInfo *BFI = nullptr,
+                  BranchProbabilityInfo *BPI = nullptr);
+
+    /// \brief Check to see if a block is valid for extraction.
+    ///
+    /// Blocks containing EHPads, allocas and invokes are not valid. If
+    /// AllowVarArgs is true, blocks with vastart can be extracted. This is
+    /// safe, if all vararg handling code is extracted, including vastart.
+    static bool isBlockValidForExtraction(const BasicBlock &BB,
+                                          bool AllowVarArgs);
+
+    /// \brief Perform the extraction, returning the new function.
+    ///
+    /// Returns zero when called on a CodeExtractor instance where isEligible
+    /// returns false.
+    Function *extractCodeRegion();
+
+    /// \brief Test whether this code extractor is eligible.
+    ///
+    /// Based on the blocks used when constructing the code extractor,
+    /// determine whether it is eligible for extraction.
+    bool isEligible() const { return !Blocks.empty(); }
+
+    /// \brief Compute the set of input values and output values for the code.
+    ///
+    /// These can be used either when performing the extraction or to evaluate
+    /// the expected size of a call to the extracted function. Note that this
+    /// work cannot be cached between the two as once we decide to extract
+    /// a code sequence, that sequence is modified, including changing these
+    /// sets, before extraction occurs. These modifications won't have any
+    /// significant impact on the cost however.
+    void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
+                           const ValueSet &Allocas) const;
+
+    /// Check if life time marker nodes can be hoisted/sunk into the outline
+    /// region.
+    ///
+    /// Returns true if it is safe to do the code motion.
+    bool isLegalToShrinkwrapLifetimeMarkers(Instruction *AllocaAddr) const;
+
+    /// Find the set of allocas whose life ranges are contained within the
+    /// outlined region.
+    ///
+    /// Allocas which have life_time markers contained in the outlined region
+    /// should be pushed to the outlined function. The address bitcasts that
+    /// are used by the lifetime markers are also candidates for shrink-
+    /// wrapping. The instructions that need to be sunk are collected in
+    /// 'Allocas'.
+    void findAllocas(ValueSet &SinkCands, ValueSet &HoistCands,
+                     BasicBlock *&ExitBlock) const;
+
+    /// Find or create a block within the outline region for placing hoisted
+    /// code.
+    ///
+    /// CommonExitBlock is block outside the outline region. It is the common
+    /// successor of blocks inside the region. If there exists a single block
+    /// inside the region that is the predecessor of CommonExitBlock, that block
+    /// will be returned. Otherwise CommonExitBlock will be split and the
+    /// original block will be added to the outline region.
+    BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
+
+  private:
+    void severSplitPHINodes(BasicBlock *&Header);
+    void splitReturnBlocks();
+
+    Function *constructFunction(const ValueSet &inputs,
+                                const ValueSet &outputs,
+                                BasicBlock *header,
+                                BasicBlock *newRootNode, BasicBlock *newHeader,
+                                Function *oldFunction, Module *M);
+
+    void moveCodeToFunction(Function *newFunction);
+
+    void calculateNewCallTerminatorWeights(
+        BasicBlock *CodeReplacer,
+        DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
+        BranchProbabilityInfo *BPI);
+
+    void emitCallAndSwitchStatement(Function *newFunction,
+                                    BasicBlock *newHeader,
+                                    ValueSet &inputs,
+                                    ValueSet &outputs);
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CtorUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/CtorUtils.h
new file mode 100644
index 0000000..63e564d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CtorUtils.h
@@ -0,0 +1,32 @@
+//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+class GlobalVariable;
+class Function;
+class Module;
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true.  Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+                             function_ref<bool(Function *)> ShouldRemove);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/EntryExitInstrumenter.h b/linux-x64/clang/include/llvm/Transforms/Utils/EntryExitInstrumenter.h
new file mode 100644
index 0000000..f50c5c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/EntryExitInstrumenter.h
@@ -0,0 +1,36 @@
+//===- EntryExitInstrumenter.h - Function Entry/Exit Instrumentation ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// EntryExitInstrumenter pass - Instrument function entry/exit with calls to
+// mcount(), @__cyg_profile_func_{enter,exit} and the like. There are two
+// variants, intended to run pre- and post-inlining, respectively.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+#define LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct EntryExitInstrumenterPass
+    : public PassInfoMixin<EntryExitInstrumenterPass> {
+  EntryExitInstrumenterPass(bool PostInlining) : PostInlining(PostInlining) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  bool PostInlining;
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/EscapeEnumerator.h b/linux-x64/clang/include/llvm/Transforms/Utils/EscapeEnumerator.h
new file mode 100644
index 0000000..1256dfd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/EscapeEnumerator.h
@@ -0,0 +1,49 @@
+//===-- EscapeEnumerator.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a helper class that enumerates all possible exits from a function,
+// including exception handling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+#define LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+/// EscapeEnumerator - This is a little algorithm to find all escape points
+/// from a function so that "finally"-style code can be inserted. In addition
+/// to finding the existing return and unwind instructions, it also (if
+/// necessary) transforms any call instructions into invokes and sends them to
+/// a landing pad.
+class EscapeEnumerator {
+  Function &F;
+  const char *CleanupBBName;
+
+  Function::iterator StateBB, StateE;
+  IRBuilder<> Builder;
+  bool Done;
+  bool HandleExceptions;
+
+public:
+  EscapeEnumerator(Function &F, const char *N = "cleanup",
+                   bool HandleExceptions = true)
+      : F(F), CleanupBBName(N), StateBB(F.begin()), StateE(F.end()),
+        Builder(F.getContext()), Done(false),
+        HandleExceptions(HandleExceptions) {}
+
+  IRBuilder<> *Next();
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h b/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
new file mode 100644
index 0000000..0e987b9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
@@ -0,0 +1,120 @@
+//===- Evaluator.h - LLVM IR evaluator --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Function evaluator for LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+#define LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <deque>
+#include <memory>
+
+namespace llvm {
+
+class DataLayout;
+class Function;
+class TargetLibraryInfo;
+
+/// This class evaluates LLVM IR, producing the Constant representing each SSA
+/// instruction.  Changes to global variables are stored in a mapping that can
+/// be iterated over after the evaluation is complete.  Once an evaluation call
+/// fails, the evaluation object should not be reused.
+class Evaluator {
+public:
+  Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
+      : DL(DL), TLI(TLI) {
+    ValueStack.emplace_back();
+  }
+
+  ~Evaluator() {
+    for (auto &Tmp : AllocaTmps)
+      // If there are still users of the alloca, the program is doing something
+      // silly, e.g. storing the address of the alloca somewhere and using it
+      // later.  Since this is undefined, we'll just make it be null.
+      if (!Tmp->use_empty())
+        Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
+  }
+
+  /// Evaluate a call to function F, returning true if successful, false if we
+  /// can't evaluate it.  ActualArgs contains the formal arguments for the
+  /// function.
+  bool EvaluateFunction(Function *F, Constant *&RetVal,
+                        const SmallVectorImpl<Constant*> &ActualArgs);
+
+  /// Evaluate all instructions in block BB, returning true if successful, false
+  /// if we can't evaluate it.  NewBB returns the next BB that control flows
+  /// into, or null upon return.
+  bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
+
+  Constant *getVal(Value *V) {
+    if (Constant *CV = dyn_cast<Constant>(V)) return CV;
+    Constant *R = ValueStack.back().lookup(V);
+    assert(R && "Reference to an uncomputed value!");
+    return R;
+  }
+
+  void setVal(Value *V, Constant *C) {
+    ValueStack.back()[V] = C;
+  }
+
+  const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
+    return MutatedMemory;
+  }
+
+  const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const {
+    return Invariants;
+  }
+
+private:
+  Constant *ComputeLoadResult(Constant *P);
+
+  /// As we compute SSA register values, we store their contents here. The back
+  /// of the deque contains the current function and the stack contains the
+  /// values in the calling frames.
+  std::deque<DenseMap<Value*, Constant*>> ValueStack;
+
+  /// This is used to detect recursion.  In pathological situations we could hit
+  /// exponential behavior, but at least there is nothing unbounded.
+  SmallVector<Function*, 4> CallStack;
+
+  /// For each store we execute, we update this map.  Loads check this to get
+  /// the most up-to-date value.  If evaluation is successful, this state is
+  /// committed to the process.
+  DenseMap<Constant*, Constant*> MutatedMemory;
+
+  /// To 'execute' an alloca, we create a temporary global variable to represent
+  /// its body.  This vector is needed so we can delete the temporary globals
+  /// when we are done.
+  SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;
+
+  /// These global variables have been marked invariant by the static
+  /// constructor.
+  SmallPtrSet<GlobalVariable*, 8> Invariants;
+
+  /// These are constants we have checked and know to be simple enough to live
+  /// in a static initializer of a global.
+  SmallPtrSet<Constant*, 8> SimpleConstants;
+
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_EVALUATOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
new file mode 100644
index 0000000..7698a06
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -0,0 +1,393 @@
+//===- FunctionComparator.h - Function Comparator ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionComparator and GlobalNumberState classes which
+// are used by the MergeFunctions pass for comparing functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instructions.h" 
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+#include <tuple>
+
+namespace llvm {
+
+class APFloat;
+class APInt;
+class BasicBlock;
+class Constant;
+class Function;
+class GlobalValue;
+class InlineAsm;
+class Instruction;
+class MDNode;
+class Type;
+class Value;
+
+/// GlobalNumberState assigns an integer to each global value in the program,
+/// which is used by the comparison routine to order references to globals. This
+/// state must be preserved throughout the pass, because Functions and other
+/// globals need to maintain their relative order. Globals are assigned a number
+/// when they are first visited. This order is deterministic, and so the
+/// assigned numbers are as well. When two functions are merged, neither number
+/// is updated. If the symbols are weak, this would be incorrect. If they are
+/// strong, then one will be replaced at all references to the other, and so
+/// direct callsites will now see one or the other symbol, and no update is
+/// necessary. Note that if we were guaranteed unique names, we could just
+/// compare those, but this would not work for stripped bitcodes or for those
+/// few symbols without a name.
+class GlobalNumberState {
+  struct Config : ValueMapConfig<GlobalValue *> {
+    enum { FollowRAUW = false };
+  };
+
+  // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
+  // occurs, the mapping does not change. Tracking changes is unnecessary, and
+  // also problematic for weak symbols (which may be overwritten).
+  using ValueNumberMap = ValueMap<GlobalValue *, uint64_t, Config>;
+  ValueNumberMap GlobalNumbers;
+
+  // The next unused serial number to assign to a global.
+  uint64_t NextNumber = 0;
+
+public:
+  GlobalNumberState() = default;
+
+  uint64_t getNumber(GlobalValue* Global) {
+    ValueNumberMap::iterator MapIter;
+    bool Inserted;
+    std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
+    if (Inserted)
+      NextNumber++;
+    return MapIter->second;
+  }
+
+  void erase(GlobalValue *Global) {
+    GlobalNumbers.erase(Global);
+  }
+
+  void clear() {
+    GlobalNumbers.clear();
+  }
+};
+
+/// FunctionComparator - Compares two functions to determine whether or not
+/// they will generate machine code with the same behaviour. DataLayout is
+/// used if available. The comparator always fails conservatively (erring on the
+/// side of claiming that two functions are different).
+class FunctionComparator {
+public:
+  FunctionComparator(const Function *F1, const Function *F2,
+                     GlobalNumberState* GN)
+      : FnL(F1), FnR(F2), GlobalNumbers(GN) {}
+
+  /// Test whether the two functions have equivalent behaviour.
+  int compare();
+
+  /// Hash a function. Equivalent functions will have the same hash, and unequal
+  /// functions will have different hashes with high probability.
+  using FunctionHash = uint64_t;
+  static FunctionHash functionHash(Function &);
+
+protected:
+  /// Start the comparison.
+  void beginCompare() {
+    sn_mapL.clear();
+    sn_mapR.clear();
+  }
+
+  /// Compares the signature and other general attributes of the two functions.
+  int compareSignature() const;
+
+  /// Test whether two basic blocks have equivalent behaviour.
+  int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR) const;
+
+  /// Constants comparison.
+  /// Its analog to lexicographical comparison between hypothetical numbers
+  /// of next format:
+  /// <bitcastability-trait><raw-bit-contents>
+  ///
+  /// 1. Bitcastability.
+  /// Check whether L's type could be losslessly bitcasted to R's type.
+  /// On this stage method, in case when lossless bitcast is not possible
+  /// method returns -1 or 1, thus also defining which type is greater in
+  /// context of bitcastability.
+  /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
+  ///          to the contents comparison.
+  ///          If types differ, remember types comparison result and check
+  ///          whether we still can bitcast types.
+  /// Stage 1: Types that satisfies isFirstClassType conditions are always
+  ///          greater then others.
+  /// Stage 2: Vector is greater then non-vector.
+  ///          If both types are vectors, then vector with greater bitwidth is
+  ///          greater.
+  ///          If both types are vectors with the same bitwidth, then types
+  ///          are bitcastable, and we can skip other stages, and go to contents
+  ///          comparison.
+  /// Stage 3: Pointer types are greater than non-pointers. If both types are
+  ///          pointers of the same address space - go to contents comparison.
+  ///          Different address spaces: pointer with greater address space is
+  ///          greater.
+  /// Stage 4: Types are neither vectors, nor pointers. And they differ.
+  ///          We don't know how to bitcast them. So, we better don't do it,
+  ///          and return types comparison result (so it determines the
+  ///          relationship among constants we don't know how to bitcast).
+  ///
+  /// Just for clearance, let's see how the set of constants could look
+  /// on single dimension axis:
+  ///
+  /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+  /// Where: NFCT - Not a FirstClassType
+  ///        FCT - FirstClassTyp:
+  ///
+  /// 2. Compare raw contents.
+  /// It ignores types on this stage and only compares bits from L and R.
+  /// Returns 0, if L and R has equivalent contents.
+  /// -1 or 1 if values are different.
+  /// Pretty trivial:
+  /// 2.1. If contents are numbers, compare numbers.
+  ///    Ints with greater bitwidth are greater. Ints with same bitwidths
+  ///    compared by their contents.
+  /// 2.2. "And so on". Just to avoid discrepancies with comments
+  /// perhaps it would be better to read the implementation itself.
+  /// 3. And again about overall picture. Let's look back at how the ordered set
+  /// of constants will look like:
+  /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+  ///
+  /// Now look, what could be inside [FCT, "others"], for example:
+  /// [FCT, "others"] =
+  /// [
+  ///   [double 0.1], [double 1.23],
+  ///   [i32 1], [i32 2],
+  ///   { double 1.0 },       ; StructTyID, NumElements = 1
+  ///   { i32 1 },            ; StructTyID, NumElements = 1
+  ///   { double 1, i32 1 },  ; StructTyID, NumElements = 2
+  ///   { i32 1, double 1 }   ; StructTyID, NumElements = 2
+  /// ]
+  ///
+  /// Let's explain the order. Float numbers will be less than integers, just
+  /// because of cmpType terms: FloatTyID < IntegerTyID.
+  /// Floats (with same fltSemantics) are sorted according to their value.
+  /// Then you can see integers, and they are, like a floats,
+  /// could be easy sorted among each others.
+  /// The structures. Structures are grouped at the tail, again because of their
+  /// TypeID: StructTyID > IntegerTyID > FloatTyID.
+  /// Structures with greater number of elements are greater. Structures with
+  /// greater elements going first are greater.
+  /// The same logic with vectors, arrays and other possible complex types.
+  ///
+  /// Bitcastable constants.
+  /// Let's assume, that some constant, belongs to some group of
+  /// "so-called-equal" values with different types, and at the same time
+  /// belongs to another group of constants with equal types
+  /// and "really" equal values.
+  ///
+  /// Now, prove that this is impossible:
+  ///
+  /// If constant A with type TyA is bitcastable to B with type TyB, then:
+  /// 1. All constants with equal types to TyA, are bitcastable to B. Since
+  ///    those should be vectors (if TyA is vector), pointers
+  ///    (if TyA is pointer), or else (if TyA equal to TyB), those types should
+  ///    be equal to TyB.
+  /// 2. All constants with non-equal, but bitcastable types to TyA, are
+  ///    bitcastable to B.
+  ///    Once again, just because we allow it to vectors and pointers only.
+  ///    This statement could be expanded as below:
+  /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
+  ///      vector B, and thus bitcastable to B as well.
+  /// 2.2. All pointers of the same address space, no matter what they point to,
+  ///      bitcastable. So if C is pointer, it could be bitcasted to A and to B.
+  /// So any constant equal or bitcastable to A is equal or bitcastable to B.
+  /// QED.
+  ///
+  /// In another words, for pointers and vectors, we ignore top-level type and
+  /// look at their particular properties (bit-width for vectors, and
+  /// address space for pointers).
+  /// If these properties are equal - compare their contents.
+  int cmpConstants(const Constant *L, const Constant *R) const;
+
+  /// Compares two global values by number. Uses the GlobalNumbersState to
+  /// identify the same gobals across function calls.
+  int cmpGlobalValues(GlobalValue *L, GlobalValue *R) const;
+
+  /// Assign or look up previously assigned numbers for the two values, and
+  /// return whether the numbers are equal. Numbers are assigned in the order
+  /// visited.
+  /// Comparison order:
+  /// Stage 0: Value that is function itself is always greater then others.
+  ///          If left and right values are references to their functions, then
+  ///          they are equal.
+  /// Stage 1: Constants are greater than non-constants.
+  ///          If both left and right are constants, then the result of
+  ///          cmpConstants is used as cmpValues result.
+  /// Stage 2: InlineAsm instances are greater than others. If both left and
+  ///          right are InlineAsm instances, InlineAsm* pointers casted to
+  ///          integers and compared as numbers.
+  /// Stage 3: For all other cases we compare order we meet these values in
+  ///          their functions. If right value was met first during scanning,
+  ///          then left value is greater.
+  ///          In another words, we compare serial numbers, for more details
+  ///          see comments for sn_mapL and sn_mapR.
+  int cmpValues(const Value *L, const Value *R) const;
+
+  /// Compare two Instructions for equivalence, similar to
+  /// Instruction::isSameOperationAs.
+  ///
+  /// Stages are listed in "most significant stage first" order:
+  /// On each stage below, we do comparison between some left and right
+  /// operation parts. If parts are non-equal, we assign parts comparison
+  /// result to the operation comparison result and exit from method.
+  /// Otherwise we proceed to the next stage.
+  /// Stages:
+  /// 1. Operations opcodes. Compared as numbers.
+  /// 2. Number of operands.
+  /// 3. Operation types. Compared with cmpType method.
+  /// 4. Compare operation subclass optional data as stream of bytes:
+  /// just convert it to integers and call cmpNumbers.
+  /// 5. Compare in operation operand types with cmpType in
+  /// most significant operand first order.
+  /// 6. Last stage. Check operations for some specific attributes.
+  /// For example, for Load it would be:
+  /// 6.1.Load: volatile (as boolean flag)
+  /// 6.2.Load: alignment (as integer numbers)
+  /// 6.3.Load: ordering (as underlying enum class value)
+  /// 6.4.Load: synch-scope (as integer numbers)
+  /// 6.5.Load: range metadata (as integer ranges)
+  /// On this stage its better to see the code, since its not more than 10-15
+  /// strings for particular instruction, and could change sometimes.
+  ///
+  /// Sets \p needToCmpOperands to true if the operands of the instructions
+  /// still must be compared afterwards. In this case it's already guaranteed
+  /// that both instructions have the same number of operands.
+  int cmpOperations(const Instruction *L, const Instruction *R,
+                    bool &needToCmpOperands) const;
+
+  /// cmpType - compares two types,
+  /// defines total ordering among the types set.
+  ///
+  /// Return values:
+  /// 0 if types are equal,
+  /// -1 if Left is less than Right,
+  /// +1 if Left is greater than Right.
+  ///
+  /// Description:
+  /// Comparison is broken onto stages. Like in lexicographical comparison
+  /// stage coming first has higher priority.
+  /// On each explanation stage keep in mind total ordering properties.
+  ///
+  /// 0. Before comparison we coerce pointer types of 0 address space to
+  /// integer.
+  /// We also don't bother with same type at left and right, so
+  /// just return 0 in this case.
+  ///
+  /// 1. If types are of different kind (different type IDs).
+  ///    Return result of type IDs comparison, treating them as numbers.
+  /// 2. If types are integers, check that they have the same width. If they
+  /// are vectors, check that they have the same count and subtype.
+  /// 3. Types have the same ID, so check whether they are one of:
+  /// * Void
+  /// * Float
+  /// * Double
+  /// * X86_FP80
+  /// * FP128
+  /// * PPC_FP128
+  /// * Label
+  /// * Metadata
+  /// We can treat these types as equal whenever their IDs are same.
+  /// 4. If Left and Right are pointers, return result of address space
+  /// comparison (numbers comparison). We can treat pointer types of same
+  /// address space as equal.
+  /// 5. If types are complex.
+  /// Then both Left and Right are to be expanded and their element types will
+  /// be checked with the same way. If we get Res != 0 on some stage, return it.
+  /// Otherwise return 0.
+  /// 6. For all other cases put llvm_unreachable.
+  int cmpTypes(Type *TyL, Type *TyR) const;
+
+  int cmpNumbers(uint64_t L, uint64_t R) const;
+  int cmpAPInts(const APInt &L, const APInt &R) const;
+  int cmpAPFloats(const APFloat &L, const APFloat &R) const;
+  int cmpMem(StringRef L, StringRef R) const;
+
+  // The two functions undergoing comparison.
+  const Function *FnL, *FnR;
+
+private:
+  int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
+  int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
+  int cmpAttrs(const AttributeList L, const AttributeList R) const;
+  int cmpRangeMetadata(const MDNode *L, const MDNode *R) const;
+  int cmpOperandBundlesSchema(const Instruction *L, const Instruction *R) const;
+
+  /// Compare two GEPs for equivalent pointer arithmetic.
+  /// Parts to be compared for each comparison stage,
+  /// most significant stage first:
+  /// 1. Address space. As numbers.
+  /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
+  /// 3. Pointer operand type (using cmpType method).
+  /// 4. Number of operands.
+  /// 5. Compare operands, using cmpValues method.
+  int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR) const;
+  int cmpGEPs(const GetElementPtrInst *GEPL,
+              const GetElementPtrInst *GEPR) const {
+    return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
+  }
+
+  /// Assign serial numbers to values from left function, and values from
+  /// right function.
+  /// Explanation:
+  /// Being comparing functions we need to compare values we meet at left and
+  /// right sides.
+  /// Its easy to sort things out for external values. It just should be
+  /// the same value at left and right.
+  /// But for local values (those were introduced inside function body)
+  /// we have to ensure they were introduced at exactly the same place,
+  /// and plays the same role.
+  /// Let's assign serial number to each value when we meet it first time.
+  /// Values that were met at same place will be with same serial numbers.
+  /// In this case it would be good to explain few points about values assigned
+  /// to BBs and other ways of implementation (see below).
+  ///
+  /// 1. Safety of BB reordering.
+  /// It's safe to change the order of BasicBlocks in function.
+  /// Relationship with other functions and serial numbering will not be
+  /// changed in this case.
+  /// As follows from FunctionComparator::compare(), we do CFG walk: we start
+  /// from the entry, and then take each terminator. So it doesn't matter how in
+  /// fact BBs are ordered in function. And since cmpValues are called during
+  /// this walk, the numbering depends only on how BBs located inside the CFG.
+  /// So the answer is - yes. We will get the same numbering.
+  ///
+  /// 2. Impossibility to use dominance properties of values.
+  /// If we compare two instruction operands: first is usage of local
+  /// variable AL from function FL, and second is usage of local variable AR
+  /// from FR, we could compare their origins and check whether they are
+  /// defined at the same place.
+  /// But, we are still not able to compare operands of PHI nodes, since those
+  /// could be operands from further BBs we didn't scan yet.
+  /// So it's impossible to use dominance properties in general.
+  mutable DenseMap<const Value*, int> sn_mapL, sn_mapR;
+
+  // The global state we will use
+  GlobalNumberState* GlobalNumbers;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
new file mode 100644
index 0000000..b9fbef0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
@@ -0,0 +1,119 @@
+//===- FunctionImportUtils.h - Importing support utilities -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionImportGlobalProcessing class which is used
+// to perform the necessary global value handling for function importing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+
+namespace llvm {
+class Module;
+
+/// Class to handle necessary GlobalValue changes required by ThinLTO
+/// function importing, including linkage changes and any necessary renaming.
+class FunctionImportGlobalProcessing {
+  /// The Module which we are exporting or importing functions from.
+  Module &M;
+
+  /// Module summary index passed in for function importing/exporting handling.
+  const ModuleSummaryIndex &ImportIndex;
+
+  /// Globals to import from this module, all other functions will be
+  /// imported as declarations instead of definitions.
+  SetVector<GlobalValue *> *GlobalsToImport;
+
+  /// Set to true if the given ModuleSummaryIndex contains any functions
+  /// from this source module, in which case we must conservatively assume
+  /// that any of its functions may be imported into another module
+  /// as part of a different backend compilation process.
+  bool HasExportedFunctions = false;
+
+  /// Set of llvm.*used values, in order to validate that we don't try
+  /// to promote any non-renamable values.
+  SmallPtrSet<GlobalValue *, 8> Used;
+
+  /// Check if we should promote the given local value to global scope.
+  bool shouldPromoteLocalToGlobal(const GlobalValue *SGV);
+
+#ifndef NDEBUG
+  /// Check if the given value is a local that can't be renamed (promoted).
+  /// Only used in assertion checking, and disabled under NDEBUG since the Used
+  /// set will not be populated.
+  bool isNonRenamableLocal(const GlobalValue &GV) const;
+#endif
+
+  /// Helper methods to check if we are importing from or potentially
+  /// exporting from the current source module.
+  bool isPerformingImport() const { return GlobalsToImport != nullptr; }
+  bool isModuleExporting() const { return HasExportedFunctions; }
+
+  /// If we are importing from the source module, checks if we should
+  /// import SGV as a definition, otherwise import as a declaration.
+  bool doImportAsDefinition(const GlobalValue *SGV);
+
+  /// Get the name for SGV that should be used in the linked destination
+  /// module. Specifically, this handles the case where we need to rename
+  /// a local that is being promoted to global scope, which it will always
+  /// do when \p DoPromote is true (or when importing a local).
+  std::string getName(const GlobalValue *SGV, bool DoPromote);
+
+  /// Process globals so that they can be used in ThinLTO. This includes
+  /// promoting local variables so that they can be reference externally by
+  /// thin lto imported globals and converting strong external globals to
+  /// available_externally.
+  void processGlobalsForThinLTO();
+  void processGlobalForThinLTO(GlobalValue &GV);
+
+  /// Get the new linkage for SGV that should be used in the linked destination
+  /// module. Specifically, for ThinLTO importing or exporting it may need
+  /// to be adjusted. When \p DoPromote is true then we must adjust the
+  /// linkage for a required promotion of a local to global scope.
+  GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV, bool DoPromote);
+
+public:
+  FunctionImportGlobalProcessing(
+      Module &M, const ModuleSummaryIndex &Index,
+      SetVector<GlobalValue *> *GlobalsToImport = nullptr)
+      : M(M), ImportIndex(Index), GlobalsToImport(GlobalsToImport) {
+    // If we have a ModuleSummaryIndex but no function to import,
+    // then this is the primary module being compiled in a ThinLTO
+    // backend compilation, and we need to see if it has functions that
+    // may be exported to another backend compilation.
+    if (!GlobalsToImport)
+      HasExportedFunctions = ImportIndex.hasExportedFunctions(M);
+
+#ifndef NDEBUG
+    // First collect those in the llvm.used set.
+    collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
+    // Next collect those in the llvm.compiler.used set.
+    collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ true);
+#endif
+  }
+
+  bool run();
+
+  static bool doImportAsDefinition(const GlobalValue *SGV,
+                                   SetVector<GlobalValue *> *GlobalsToImport);
+};
+
+/// Perform in-place global value handling on the given Module for
+/// exported local functions renamed and promoted for ThinLTO.
+bool renameModuleForThinLTO(
+    Module &M, const ModuleSummaryIndex &Index,
+    SetVector<GlobalValue *> *GlobalsToImport = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/GlobalStatus.h b/linux-x64/clang/include/llvm/Transforms/Utils/GlobalStatus.h
new file mode 100644
index 0000000..8cc265b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/GlobalStatus.h
@@ -0,0 +1,85 @@
+//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+#define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+
+#include "llvm/Support/AtomicOrdering.h"
+
+namespace llvm {
+
+class Constant;
+class Function;
+class Value;
+
+/// It is safe to destroy a constant iff it is only used by constants itself.
+/// Note that constants cannot be cyclic, so this test is pretty easy to
+/// implement recursively.
+///
+bool isSafeToDestroyConstant(const Constant *C);
+
+/// As we analyze each global, keep track of some information about it.  If we
+/// find out that the address of the global is taken, none of this info will be
+/// accurate.
+struct GlobalStatus {
+  /// True if the global's address is used in a comparison.
+  bool IsCompared = false;
+
+  /// True if the global is ever loaded.  If the global isn't ever loaded it
+  /// can be deleted.
+  bool IsLoaded = false;
+
+  /// Keep track of what stores to the global look like.
+  enum StoredType {
+    /// There is no store to this global.  It can thus be marked constant.
+    NotStored,
+
+    /// This global is stored to, but the only thing stored is the constant it
+    /// was initialized with. This is only tracked for scalar globals.
+    InitializerStored,
+
+    /// This global is stored to, but only its initializer and one other value
+    /// is ever stored to it.  If this global isStoredOnce, we track the value
+    /// stored to it in StoredOnceValue below.  This is only tracked for scalar
+    /// globals.
+    StoredOnce,
+
+    /// This global is stored to by multiple values or something else that we
+    /// cannot track.
+    Stored
+  } StoredType = NotStored;
+
+  /// If only one value (besides the initializer constant) is ever stored to
+  /// this global, keep track of what value it is.
+  Value *StoredOnceValue = nullptr;
+
+  /// These start out null/false.  When the first accessing function is noticed,
+  /// it is recorded. When a second different accessing function is noticed,
+  /// HasMultipleAccessingFunctions is set to true.
+  const Function *AccessingFunction = nullptr;
+  bool HasMultipleAccessingFunctions = false;
+
+  /// Set to true if this global has a user that is not an instruction (e.g. a
+  /// constant expr or GV initializer).
+  bool HasNonInstructionUser = false;
+
+  /// Set to the strongest atomic ordering requirement.
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+
+  GlobalStatus();
+
+  /// Look at all uses of the global and fill in the GlobalStatus structure.  If
+  /// the global has its address taken, return true to indicate we can't do
+  /// anything with it.
+  static bool analyzeGlobal(const Value *V, GlobalStatus &GS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h b/linux-x64/clang/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h
new file mode 100644
index 0000000..b7a3d13
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h
@@ -0,0 +1,107 @@
+//===-- ImportedFunctionsInliningStats.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Generating inliner statistics for imported functions, mostly useful for
+// ThinLTO.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class Module;
+class Function;
+/// \brief Calculate and dump ThinLTO specific inliner stats.
+/// The main statistics are:
+/// (1) Number of inlined imported functions,
+/// (2) Number of imported functions inlined into importing module (indirect),
+/// (3) Number of non imported functions inlined into importing module
+/// (indirect).
+/// The difference between first and the second is that first stat counts
+/// all performed inlines on imported functions, but the second one only the
+/// functions that have been eventually inlined to a function in the importing
+/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
+/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
+/// and after this `A` might be too big to be inlined into some other function
+/// that calls it. It calculates this statistic by building graph, where
+/// the nodes are functions, and edges are performed inlines and then by marking
+/// the edges starting from not imported function.
+///
+/// If `Verbose` is set to true, then it also dumps statistics
+/// per each inlined function, sorted by the greatest inlines count like
+/// - number of performed inlines
+/// - number of performed inlines to importing module
+class ImportedFunctionsInliningStatistics {
+private:
+  /// InlineGraphNode represents node in graph of inlined functions.
+  struct InlineGraphNode {
+    // Default-constructible and movable.
+    InlineGraphNode() = default;
+    InlineGraphNode(InlineGraphNode &&) = default;
+    InlineGraphNode &operator=(InlineGraphNode &&) = default;
+
+    llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
+    /// Incremented every direct inline.
+    int32_t NumberOfInlines = 0;
+    /// Number of inlines into non imported function (possibly indirect via
+    /// intermediate inlines). Computed based on graph search.
+    int32_t NumberOfRealInlines = 0;
+    bool Imported = false;
+    bool Visited = false;
+  };
+
+public:
+  ImportedFunctionsInliningStatistics() = default;
+  ImportedFunctionsInliningStatistics(
+      const ImportedFunctionsInliningStatistics &) = delete;
+
+  /// Set information like AllFunctions, ImportedFunctions, ModuleName.
+  void setModuleInfo(const Module &M);
+  /// Record inline of @param Callee to @param Caller for statistis.
+  void recordInline(const Function &Caller, const Function &Callee);
+  /// Dump stats computed with InlinerStatistics class.
+  /// If @param Verbose is true then separate statistics for every inlined
+  /// function will be printed.
+  void dump(bool Verbose);
+
+private:
+  /// Creates new Node in NodeMap and sets attributes, or returns existed one.
+  InlineGraphNode &createInlineGraphNode(const Function &);
+  void calculateRealInlines();
+  void dfs(InlineGraphNode &GraphNode);
+
+  using NodesMapTy =
+      llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
+  using SortedNodesTy =
+      std::vector<const NodesMapTy::MapEntryTy*>;
+  /// Returns vector of elements sorted by
+  /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
+  SortedNodesTy getSortedNodes();
+
+private:
+  /// This map manage life of all InlineGraphNodes. Unique pointer to
+  /// InlineGraphNode used since the node pointers are also saved in the
+  /// InlinedCallees vector. If it would store InlineGraphNode instead then the
+  /// address of the node would not be invariant.
+  NodesMapTy NodesMap;
+  /// Non external functions that have some other function inlined inside.
+  std::vector<StringRef> NonImportedCallers;
+  int AllFunctions = 0;
+  int ImportedFunctions = 0;
+  StringRef ModuleName;
+};
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/IntegerDivision.h b/linux-x64/clang/include/llvm/Transforms/Utils/IntegerDivision.h
new file mode 100644
index 0000000..0ec3321
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -0,0 +1,73 @@
+//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit and 64bit scalar integer
+// division for targets that don't have native support. It's largely derived
+// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
+// but hand-tuned for targets that prefer less control flow.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+
+namespace llvm {
+  class BinaryOperator;
+}
+
+namespace llvm {
+
+  /// Generate code to calculate the remainder of two integers, replacing Rem
+  /// with the generated code. This currently generates code using the udiv
+  /// expansion, but future work includes generating more specialized code,
+  /// e.g. when more information about the operands are known. Implements both
+  /// 32bit and 64bit scalar division.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandRemainder(BinaryOperator *Rem);
+
+  /// Generate code to divide two integers, replacing Div with the generated
+  /// code. This currently generates code similarly to compiler-rt's
+  /// implementations, but future work includes generating more specialized code
+  /// when more information about the operands are known. Implements both
+  /// 32bit and 64bit scalar division.
+  ///
+  /// @brief Replace Div with generated code.
+  bool expandDivision(BinaryOperator* Div);
+
+  /// Generate code to calculate the remainder of two integers, replacing Rem
+  /// with the generated code. Uses ExpandReminder with a 32bit Rem which
+  /// makes it useful for targets with little or no support for less than
+  /// 32 bit arithmetic.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandRemainderUpTo32Bits(BinaryOperator *Rem);
+
+  /// Generate code to calculate the remainder of two integers, replacing Rem
+  /// with the generated code. Uses ExpandReminder with a 64bit Rem.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandRemainderUpTo64Bits(BinaryOperator *Rem);
+
+  /// Generate code to divide two integers, replacing Div with the generated
+  /// code. Uses ExpandDivision with a 32bit Div which makes it useful for
+  /// targets with little or no support for less than 32 bit arithmetic.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandDivisionUpTo32Bits(BinaryOperator *Div);
+
+  /// Generate code to divide two integers, replacing Div with the generated
+  /// code. Uses ExpandDivision with a 64bit Div.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandDivisionUpTo64Bits(BinaryOperator *Div);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LCSSA.h b/linux-x64/clang/include/llvm/Transforms/Utils/LCSSA.h
new file mode 100644
index 0000000..fe717e5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LCSSA.h
@@ -0,0 +1,44 @@
+//===- LCSSA.h - Loop-closed SSA transform Pass -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms loops by placing phi nodes at the end of the loops for
+// all values that are live across the loop boundary.  For example, it turns
+// the left into the right code:
+//
+// for (...)                for (...)
+//   if (c)                   if (c)
+//     X1 = ...                 X1 = ...
+//   else                     else
+//     X2 = ...                 X2 = ...
+//   X3 = phi(X1, X2)         X3 = phi(X1, X2)
+// ... = X3 + 4             X4 = phi(X3)
+//                          ... = X4 + 4
+//
+// This is still valid LLVM; the extra phi nodes are purely redundant, and will
+// be trivially eliminated by InstCombine.  The major benefit of this
+// transformation is that it makes many other loop optimizations, such as
+// LoopUnswitching, simpler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LCSSA_H
+#define LLVM_TRANSFORMS_UTILS_LCSSA_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Converts loops into loop-closed SSA form.
+class LCSSAPass : public PassInfoMixin<LCSSAPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LCSSA_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h b/linux-x64/clang/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
new file mode 100644
index 0000000..c9df532
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
@@ -0,0 +1,27 @@
+//===- LibCallsShrinkWrap.h - Shrink Wrap Library Calls -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+#define LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LibCallsShrinkWrapPass : public PassInfoMixin<LibCallsShrinkWrapPass> {
+public:
+  static StringRef name() { return "LibCallsShrinkWrapPass"; }
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopRotationUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopRotationUtils.h
new file mode 100644
index 0000000..ea4d2cd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopRotationUtils.h
@@ -0,0 +1,35 @@
+//===- LoopRotationUtils.h - Utilities to perform loop rotation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides utilities to convert a loop into a loop with bottom test.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class ScalarEvolution;
+struct SimplifyQuery;
+class TargetTransformInfo;
+
+/// \brief Convert a loop into a loop with bottom test.
+bool LoopRotation(Loop *L, unsigned MaxHeaderSize, LoopInfo *LI,
+                  const TargetTransformInfo *TTI, AssumptionCache *AC,
+                  DominatorTree *DT, ScalarEvolution *SE,
+                  const SimplifyQuery &SQ);
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
new file mode 100644
index 0000000..f3828bc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
@@ -0,0 +1,65 @@
+//===- LoopSimplify.h - Loop Canonicalization Pass --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs several transformations to transform natural loops into a
+// simpler form, which makes subsequent analyses and transformations simpler and
+// more effective.
+//
+// Loop pre-header insertion guarantees that there is a single, non-critical
+// entry edge from outside of the loop to the loop header.  This simplifies a
+// number of analyses and transformations, such as LICM.
+//
+// Loop exit-block insertion guarantees that all exit blocks from the loop
+// (blocks which are outside of the loop that have predecessors inside of the
+// loop) only have predecessors from inside of the loop (and are thus dominated
+// by the loop header).  This simplifies transformations such as store-sinking
+// that are built into LICM.
+//
+// This pass also guarantees that loops will have exactly one backedge.
+//
+// Indirectbr instructions introduce several complications. If the loop
+// contains or is entered by an indirectbr instruction, it may not be possible
+// to transform the loop and make these guarantees. Client code should check
+// that these conditions are true before relying on them.
+//
+// Note that the simplifycfg pass will clean up blocks which are split out but
+// end up being unnecessary, so usage of this pass should not pessimize
+// generated code.
+//
+// This pass obviously modifies the CFG, but updates loop information and
+// dominator information.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+#define LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// This pass is responsible for loop canonicalization.
+class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Simplify each loop in a loop nest recursively.
+///
+/// This takes a potentially un-simplified loop L (and its children) and turns
+/// it into a simplified loop nest with preheaders and single backedges. It will
+/// update \c AliasAnalysis and \c ScalarEvolution analyses if they're non-null.
+bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
+                  AssumptionCache *AC, bool PreserveLCSSA);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
new file mode 100644
index 0000000..131a4b0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
@@ -0,0 +1,546 @@
+//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop transformation utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DemandedBits.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/MustExecute.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+
+class AliasSet;
+class AliasSetTracker;
+class BasicBlock;
+class DataLayout;
+class Loop;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PredicatedScalarEvolution;
+class PredIteratorCache;
+class ScalarEvolution;
+class SCEV;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+
+/// The RecurrenceDescriptor is used to identify recurrences variables in a
+/// loop. Reduction is a special case of recurrence that has uses of the
+/// recurrence variable outside the loop. The method isReductionPHI identifies
+/// reductions that are basic recurrences.
+///
+/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
+/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
+/// array[i]; } is a summation of array elements. Basic recurrences are a
+/// special case of chains of recurrences (CR). See ScalarEvolution for CR
+/// references.
+
+/// This struct holds information about recurrence variables.
+class RecurrenceDescriptor {
+public:
+  /// This enum represents the kinds of recurrences that we support.
+  enum RecurrenceKind {
+    RK_NoRecurrence,  ///< Not a recurrence.
+    RK_IntegerAdd,    ///< Sum of integers.
+    RK_IntegerMult,   ///< Product of integers.
+    RK_IntegerOr,     ///< Bitwise or logical OR of numbers.
+    RK_IntegerAnd,    ///< Bitwise or logical AND of numbers.
+    RK_IntegerXor,    ///< Bitwise or logical XOR of numbers.
+    RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()).
+    RK_FloatAdd,      ///< Sum of floats.
+    RK_FloatMult,     ///< Product of floats.
+    RK_FloatMinMax    ///< Min/max implemented in terms of select(cmp()).
+  };
+
+  // This enum represents the kind of minmax recurrence.
+  enum MinMaxRecurrenceKind {
+    MRK_Invalid,
+    MRK_UIntMin,
+    MRK_UIntMax,
+    MRK_SIntMin,
+    MRK_SIntMax,
+    MRK_FloatMin,
+    MRK_FloatMax
+  };
+
+  RecurrenceDescriptor() = default;
+
+  RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K,
+                       MinMaxRecurrenceKind MK, Instruction *UAI, Type *RT,
+                       bool Signed, SmallPtrSetImpl<Instruction *> &CI)
+      : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK),
+        UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
+    CastInsts.insert(CI.begin(), CI.end());
+  }
+
+  /// This POD struct holds information about a potential recurrence operation.
+  class InstDesc {
+  public:
+    InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
+        : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid),
+          UnsafeAlgebraInst(UAI) {}
+
+    InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr)
+        : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K),
+          UnsafeAlgebraInst(UAI) {}
+
+    bool isRecurrence() { return IsRecurrence; }
+
+    bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+    Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+    MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; }
+
+    Instruction *getPatternInst() { return PatternLastInst; }
+
+  private:
+    // Is this instruction a recurrence candidate.
+    bool IsRecurrence;
+    // The last instruction in a min/max pattern (select of the select(icmp())
+    // pattern), or the current recurrence instruction otherwise.
+    Instruction *PatternLastInst;
+    // If this is a min/max pattern the comparison predicate.
+    MinMaxRecurrenceKind MinMaxKind;
+    // Recurrence has unsafe algebra.
+    Instruction *UnsafeAlgebraInst;
+  };
+
+  /// Returns a struct describing if the instruction 'I' can be a recurrence
+  /// variable of type 'Kind'. If the recurrence is a min/max pattern of
+  /// select(icmp()) this function advances the instruction pointer 'I' from the
+  /// compare instruction to the select instruction and stores this pointer in
+  /// 'PatternLastInst' member of the returned struct.
+  static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
+                                    InstDesc &Prev, bool HasFunNoNaNAttr);
+
+  /// Returns true if instruction I has multiple uses in Insts
+  static bool hasMultipleUsesOf(Instruction *I,
+                                SmallPtrSetImpl<Instruction *> &Insts);
+
+  /// Returns true if all uses of the instruction I is within the Set.
+  static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+
+  /// Returns a struct describing if the instruction if the instruction is a
+  /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
+  /// or max(X, Y).
+  static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
+
+  /// Returns identity corresponding to the RecurrenceKind.
+  static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp);
+
+  /// Returns the opcode of binary operation corresponding to the
+  /// RecurrenceKind.
+  static unsigned getRecurrenceBinOp(RecurrenceKind Kind);
+
+  /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
+  static Value *createMinMaxOp(IRBuilder<> &Builder, MinMaxRecurrenceKind RK,
+                               Value *Left, Value *Right);
+
+  /// Returns true if Phi is a reduction of type Kind and adds it to the
+  /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
+  /// non-null, the minimal bit width needed to compute the reduction will be
+  /// computed.
+  static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop,
+                              bool HasFunNoNaNAttr,
+                              RecurrenceDescriptor &RedDes,
+                              DemandedBits *DB = nullptr,
+                              AssumptionCache *AC = nullptr,
+                              DominatorTree *DT = nullptr);
+
+  /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor
+  /// is returned in RedDes. If either \p DB is non-null or \p AC and \p DT are
+  /// non-null, the minimal bit width needed to compute the reduction will be
+  /// computed.
+  static bool isReductionPHI(PHINode *Phi, Loop *TheLoop,
+                             RecurrenceDescriptor &RedDes,
+                             DemandedBits *DB = nullptr,
+                             AssumptionCache *AC = nullptr,
+                             DominatorTree *DT = nullptr);
+
+  /// Returns true if Phi is a first-order recurrence. A first-order recurrence
+  /// is a non-reduction recurrence relation in which the value of the
+  /// recurrence in the current loop iteration equals a value defined in the
+  /// previous iteration. \p SinkAfter includes pairs of instructions where the
+  /// first will be rescheduled to appear after the second if/when the loop is
+  /// vectorized. It may be augmented with additional pairs if needed in order
+  /// to handle Phi as a first-order recurrence.
+  static bool
+  isFirstOrderRecurrence(PHINode *Phi, Loop *TheLoop,
+                         DenseMap<Instruction *, Instruction *> &SinkAfter,
+                         DominatorTree *DT);
+
+  RecurrenceKind getRecurrenceKind() { return Kind; }
+
+  MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; }
+
+  TrackingVH<Value> getRecurrenceStartValue() { return StartValue; }
+
+  Instruction *getLoopExitInstr() { return LoopExitInstr; }
+
+  /// Returns true if the recurrence has unsafe algebra which requires a relaxed
+  /// floating-point model.
+  bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+  /// Returns first unsafe algebra instruction in the PHI node's use-chain.
+  Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+  /// Returns true if the recurrence kind is an integer kind.
+  static bool isIntegerRecurrenceKind(RecurrenceKind Kind);
+
+  /// Returns true if the recurrence kind is a floating point kind.
+  static bool isFloatingPointRecurrenceKind(RecurrenceKind Kind);
+
+  /// Returns true if the recurrence kind is an arithmetic kind.
+  static bool isArithmeticRecurrenceKind(RecurrenceKind Kind);
+
+  /// Returns the type of the recurrence. This type can be narrower than the
+  /// actual type of the Phi if the recurrence has been type-promoted.
+  Type *getRecurrenceType() { return RecurrenceType; }
+
+  /// Returns a reference to the instructions used for type-promoting the
+  /// recurrence.
+  SmallPtrSet<Instruction *, 8> &getCastInsts() { return CastInsts; }
+
+  /// Returns true if all source operands of the recurrence are SExtInsts.
+  bool isSigned() { return IsSigned; }
+
+private:
+  // The starting value of the recurrence.
+  // It does not have to be zero!
+  TrackingVH<Value> StartValue;
+  // The instruction who's value is used outside the loop.
+  Instruction *LoopExitInstr = nullptr;
+  // The kind of the recurrence.
+  RecurrenceKind Kind = RK_NoRecurrence;
+  // If this a min/max recurrence the kind of recurrence.
+  MinMaxRecurrenceKind MinMaxKind = MRK_Invalid;
+  // First occurrence of unasfe algebra in the PHI's use-chain.
+  Instruction *UnsafeAlgebraInst = nullptr;
+  // The type of the recurrence.
+  Type *RecurrenceType = nullptr;
+  // True if all source operands of the recurrence are SExtInsts.
+  bool IsSigned = false;
+  // Instructions used for type-promoting the recurrence.
+  SmallPtrSet<Instruction *, 8> CastInsts;
+};
+
+/// A struct for saving information about induction variables.
+class InductionDescriptor {
+public:
+  /// This enum represents the kinds of inductions that we support.
+  enum InductionKind {
+    IK_NoInduction,  ///< Not an induction variable.
+    IK_IntInduction, ///< Integer induction variable. Step = C.
+    IK_PtrInduction, ///< Pointer induction var. Step = C / sizeof(elem).
+    IK_FpInduction   ///< Floating point induction variable.
+  };
+
+public:
+  /// Default constructor - creates an invalid induction.
+  InductionDescriptor() = default;
+
+  /// Get the consecutive direction. Returns:
+  ///   0 - unknown or non-consecutive.
+  ///   1 - consecutive and increasing.
+  ///  -1 - consecutive and decreasing.
+  int getConsecutiveDirection() const;
+
+  /// Compute the transformed value of Index at offset StartValue using step
+  /// StepValue.
+  /// For integer induction, returns StartValue + Index * StepValue.
+  /// For pointer induction, returns StartValue[Index * StepValue].
+  /// FIXME: The newly created binary instructions should contain nsw/nuw
+  /// flags, which can be found from the original scalar operations.
+  Value *transform(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
+                   const DataLayout& DL) const;
+
+  Value *getStartValue() const { return StartValue; }
+  InductionKind getKind() const { return IK; }
+  const SCEV *getStep() const { return Step; }
+  ConstantInt *getConstIntStepValue() const;
+
+  /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
+  /// induction, the induction descriptor \p D will contain the data describing
+  /// this induction. If by some other means the caller has a better SCEV
+  /// expression for \p Phi than the one returned by the ScalarEvolution
+  /// analysis, it can be passed through \p Expr. If the def-use chain 
+  /// associated with the phi includes casts (that we know we can ignore
+  /// under proper runtime checks), they are passed through \p CastsToIgnore.
+  static bool 
+  isInductionPHI(PHINode *Phi, const Loop* L, ScalarEvolution *SE,
+                 InductionDescriptor &D, const SCEV *Expr = nullptr,
+                 SmallVectorImpl<Instruction *> *CastsToIgnore = nullptr);
+
+  /// Returns true if \p Phi is a floating point induction in the loop \p L.
+  /// If \p Phi is an induction, the induction descriptor \p D will contain 
+  /// the data describing this induction.
+  static bool isFPInductionPHI(PHINode *Phi, const Loop* L,
+                               ScalarEvolution *SE, InductionDescriptor &D);
+
+  /// Returns true if \p Phi is a loop \p L induction, in the context associated
+  /// with the run-time predicate of PSE. If \p Assume is true, this can add
+  /// further SCEV predicates to \p PSE in order to prove that \p Phi is an
+  /// induction.
+  /// If \p Phi is an induction, \p D will contain the data describing this
+  /// induction.
+  static bool isInductionPHI(PHINode *Phi, const Loop* L,
+                             PredicatedScalarEvolution &PSE,
+                             InductionDescriptor &D, bool Assume = false);
+
+  /// Returns true if the induction type is FP and the binary operator does
+  /// not have the "fast-math" property. Such operation requires a relaxed FP
+  /// mode.
+  bool hasUnsafeAlgebra() {
+    return InductionBinOp && !cast<FPMathOperator>(InductionBinOp)->isFast();
+  }
+
+  /// Returns induction operator that does not have "fast-math" property
+  /// and requires FP unsafe mode.
+  Instruction *getUnsafeAlgebraInst() {
+    if (!InductionBinOp || cast<FPMathOperator>(InductionBinOp)->isFast())
+      return nullptr;
+    return InductionBinOp;
+  }
+
+  /// Returns binary opcode of the induction operator.
+  Instruction::BinaryOps getInductionOpcode() const {
+    return InductionBinOp ? InductionBinOp->getOpcode() :
+      Instruction::BinaryOpsEnd;
+  }
+
+  /// Returns a reference to the type cast instructions in the induction 
+  /// update chain, that are redundant when guarded with a runtime
+  /// SCEV overflow check.
+  const SmallVectorImpl<Instruction *> &getCastInsts() const { 
+    return RedundantCasts; 
+  }
+
+private:
+  /// Private constructor - used by \c isInductionPHI.
+  InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step,
+                      BinaryOperator *InductionBinOp = nullptr,
+                      SmallVectorImpl<Instruction *> *Casts = nullptr);
+
+  /// Start value.
+  TrackingVH<Value> StartValue;
+  /// Induction kind.
+  InductionKind IK = IK_NoInduction;
+  /// Step value.
+  const SCEV *Step = nullptr;
+  // Instruction that advances induction variable.
+  BinaryOperator *InductionBinOp = nullptr;
+  // Instructions used for type-casts of the induction variable,
+  // that are redundant when guarded with a runtime SCEV overflow check.
+  SmallVector<Instruction *, 2> RedundantCasts;
+};
+
+BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
+                                   bool PreserveLCSSA);
+
+/// Ensure that all exit blocks of the loop are dedicated exits.
+///
+/// For any loop exit block with non-loop predecessors, we split the loop
+/// predecessors to use a dedicated loop exit block. We update the dominator
+/// tree and loop info if provided, and will preserve LCSSA if requested.
+bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
+                             bool PreserveLCSSA);
+
+/// Ensures LCSSA form for every instruction from the Worklist in the scope of
+/// innermost containing loop.
+///
+/// For the given instruction which have uses outside of the loop, an LCSSA PHI
+/// node is inserted and the uses outside the loop are rewritten to use this
+/// node.
+///
+/// LoopInfo and DominatorTree are required and, since the routine makes no
+/// changes to CFG, preserved.
+///
+/// Returns true if any modifications are made.
+bool formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
+                              DominatorTree &DT, LoopInfo &LI);
+
+/// \brief Put loop into LCSSA form.
+///
+/// Looks at all instructions in the loop which have uses outside of the
+/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
+/// the loop are rewritten to use this node.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution *SE);
+
+/// \brief Put a loop nest into LCSSA form.
+///
+/// This recursively forms LCSSA for a loop nest.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
+                          ScalarEvolution *SE);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
+/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as
+/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
+bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+                TargetLibraryInfo *, TargetTransformInfo *, Loop *,
+                AliasSetTracker *, LoopSafetyInfo *,
+                OptimizationRemarkEmitter *ORE);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree.  This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
+/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
+/// loop and loop safety information as arguments. Diagnostics is emitted via \p
+/// ORE. It returns changed status.
+bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+                 TargetLibraryInfo *, Loop *, AliasSetTracker *,
+                 LoopSafetyInfo *, OptimizationRemarkEmitter *ORE);
+
+/// This function deletes dead loops. The caller of this function needs to
+/// guarantee that the loop is infact dead.
+/// The function requires a bunch or prerequisites to be present:
+///   - The loop needs to be in LCSSA form
+///   - The loop needs to have a Preheader
+///   - A unique dedicated exit block must exist
+///
+/// This also updates the relevant analysis information in \p DT, \p SE, and \p
+/// LI if pointers to those are provided.
+/// It also updates the loop PM if an updater struct is provided.
+
+void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
+                    LoopInfo *LI);
+
+/// \brief Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop.  We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes a set of must-alias values, Loop exit blocks
+/// vector, loop exit blocks insertion point vector, PredIteratorCache,
+/// LoopInfo, DominatorTree, Loop, AliasSet information for all instructions
+/// of the loop and loop safety information as arguments.
+/// Diagnostics is emitted via \p ORE. It returns changed status.
+bool promoteLoopAccessesToScalars(const SmallSetVector<Value *, 8> &,
+                                  SmallVectorImpl<BasicBlock *> &,
+                                  SmallVectorImpl<Instruction *> &,
+                                  PredIteratorCache &, LoopInfo *,
+                                  DominatorTree *, const TargetLibraryInfo *,
+                                  Loop *, AliasSetTracker *, LoopSafetyInfo *,
+                                  OptimizationRemarkEmitter *);
+
+/// Does a BFS from a given node to all of its children inside a given loop.
+/// The returned vector of nodes includes the starting point.
+SmallVector<DomTreeNode *, 16> collectChildrenInLoop(DomTreeNode *N,
+                                                     const Loop *CurLoop);
+
+/// \brief Returns the instructions that use values defined in the loop.
+SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
+
+/// \brief Find string metadata for loop
+///
+/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
+/// operand or null otherwise.  If the string metadata is not found return
+/// Optional's not-a-value.
+Optional<const MDOperand *> findStringMetadataForLoop(Loop *TheLoop,
+                                                      StringRef Name);
+
+/// \brief Set input string into loop metadata by keeping other values intact.
+void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
+                             unsigned V = 0);
+
+/// \brief Get a loop's estimated trip count based on branch weight metadata.
+/// Returns 0 when the count is estimated to be 0, or None when a meaningful
+/// estimate can not be made.
+Optional<unsigned> getLoopEstimatedTripCount(Loop *L);
+
+/// Helper to consistently add the set of standard passes to a loop pass's \c
+/// AnalysisUsage.
+///
+/// All loop passes should call this as part of implementing their \c
+/// getAnalysisUsage.
+void getLoopAnalysisUsage(AnalysisUsage &AU);
+
+/// Returns true if the hoister and sinker can handle this instruction.
+/// If SafetyInfo is null, we are checking for sinking instructions from
+/// preheader to loop body (no speculation).
+/// If SafetyInfo is not null, we are checking for hoisting/sinking
+/// instructions from loop body to preheader/exit. Check if the instruction
+/// can execute speculatively.
+/// If \p ORE is set use it to emit optimization remarks.
+bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
+                        Loop *CurLoop, AliasSetTracker *CurAST,
+                        LoopSafetyInfo *SafetyInfo,
+                        OptimizationRemarkEmitter *ORE = nullptr);
+
+/// Generates a vector reduction using shufflevectors to reduce the value.
+Value *getShuffleReduction(IRBuilder<> &Builder, Value *Src, unsigned Op,
+                           RecurrenceDescriptor::MinMaxRecurrenceKind
+                               MinMaxKind = RecurrenceDescriptor::MRK_Invalid,
+                           ArrayRef<Value *> RedOps = ArrayRef<Value *>());
+
+/// Create a target reduction of the given vector. The reduction operation
+/// is described by the \p Opcode parameter. min/max reductions require
+/// additional information supplied in \p Flags.
+/// The target is queried to determine if intrinsics or shuffle sequences are
+/// required to implement the reduction.
+Value *
+createSimpleTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
+                            unsigned Opcode, Value *Src,
+                            TargetTransformInfo::ReductionFlags Flags =
+                                TargetTransformInfo::ReductionFlags(),
+                            ArrayRef<Value *> RedOps = ArrayRef<Value *>());
+
+/// Create a generic target reduction using a recurrence descriptor \p Desc
+/// The target is queried to determine if intrinsics or shuffle sequences are
+/// required to implement the reduction.
+Value *createTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
+                             RecurrenceDescriptor &Desc, Value *Src,
+                             bool NoNaN = false);
+
+/// Get the intersection (logical and) of all of the potential IR flags
+/// of each scalar operation (VL) that will be converted into a vector (I).
+/// If OpValue is non-null, we only consider operations similar to OpValue
+/// when intersecting.
+/// Flag set: NSW, NUW, exact, and all of fast-math.
+void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
new file mode 100644
index 0000000..fa5d784
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
@@ -0,0 +1,152 @@
+//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a utility class to perform loop versioning.  The versioned
+// loop speculates that otherwise may-aliasing memory accesses don't overlap and
+// emits checks to prove this.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+#define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+
+namespace llvm {
+
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+class ScalarEvolution;
+
+/// \brief This class emits a version of the loop where run-time checks ensure
+/// that may-alias pointers can't overlap.
+///
+/// It currently only supports single-exit loops and assumes that the loop
+/// already has a preheader.
+class LoopVersioning {
+public:
+  /// \brief Expects LoopAccessInfo, Loop, LoopInfo, DominatorTree as input.
+  /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
+  /// we will retain the default checks made by LAI. Otherwise, construct an
+  /// object having no checks and we expect the user to add them.
+  LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+                 DominatorTree *DT, ScalarEvolution *SE,
+                 bool UseLAIChecks = true);
+
+  /// \brief Performs the CFG manipulation part of versioning the loop including
+  /// the DominatorTree and LoopInfo updates.
+  ///
+  /// The loop that was used to construct the class will be the "versioned" loop
+  /// i.e. the loop that will receive control if all the memchecks pass.
+  ///
+  /// This allows the loop transform pass to operate on the same loop regardless
+  /// of whether versioning was necessary or not:
+  ///
+  ///    for each loop L:
+  ///        analyze L
+  ///        if versioning is necessary version L
+  ///        transform L
+  void versionLoop() { versionLoop(findDefsUsedOutsideOfLoop(VersionedLoop)); }
+
+  /// \brief Same but if the client has already precomputed the set of values
+  /// used outside the loop, this API will allows passing that.
+  void versionLoop(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+  /// \brief Returns the versioned loop.  Control flows here if pointers in the
+  /// loop don't alias (i.e. all memchecks passed).  (This loop is actually the
+  /// same as the original loop that we got constructed with.)
+  Loop *getVersionedLoop() { return VersionedLoop; }
+
+  /// \brief Returns the fall-back loop.  Control flows here if pointers in the
+  /// loop may alias (i.e. one of the memchecks failed).
+  Loop *getNonVersionedLoop() { return NonVersionedLoop; }
+
+  /// \brief Sets the runtime alias checks for versioning the loop.
+  void setAliasChecks(
+      SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks);
+
+  /// \brief Sets the runtime SCEV checks for versioning the loop.
+  void setSCEVChecks(SCEVUnionPredicate Check);
+
+  /// \brief Annotate memory instructions in the versioned loop with no-alias
+  /// metadata based on the memchecks issued.
+  ///
+  /// This is just wrapper that calls prepareNoAliasMetadata and
+  /// annotateInstWithNoAlias on the instructions of the versioned loop.
+  void annotateLoopWithNoAlias();
+
+  /// \brief Set up the aliasing scopes based on the memchecks.  This needs to
+  /// be called before the first call to annotateInstWithNoAlias.
+  void prepareNoAliasMetadata();
+
+  /// \brief Add the noalias annotations to \p VersionedInst.
+  ///
+  /// \p OrigInst is the instruction corresponding to \p VersionedInst in the
+  /// original loop.  Initialize the aliasing scopes with
+  /// prepareNoAliasMetadata once before this can be called.
+  void annotateInstWithNoAlias(Instruction *VersionedInst,
+                               const Instruction *OrigInst);
+
+private:
+  /// \brief Adds the necessary PHI nodes for the versioned loops based on the
+  /// loop-defined values used outside of the loop.
+  ///
+  /// This needs to be called after versionLoop if there are defs in the loop
+  /// that are used outside the loop.
+  void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+  /// \brief Add the noalias annotations to \p I.  Initialize the aliasing
+  /// scopes with prepareNoAliasMetadata once before this can be called.
+  void annotateInstWithNoAlias(Instruction *I) {
+    annotateInstWithNoAlias(I, I);
+  }
+
+  /// \brief The original loop.  This becomes the "versioned" one.  I.e.,
+  /// control flows here if pointers in the loop don't alias.
+  Loop *VersionedLoop;
+  /// \brief The fall-back loop.  I.e. control flows here if pointers in the
+  /// loop may alias (memchecks failed).
+  Loop *NonVersionedLoop;
+
+  /// \brief This maps the instructions from VersionedLoop to their counterpart
+  /// in NonVersionedLoop.
+  ValueToValueMapTy VMap;
+
+  /// \brief The set of alias checks that we are versioning for.
+  SmallVector<RuntimePointerChecking::PointerCheck, 4> AliasChecks;
+
+  /// \brief The set of SCEV checks that we are versioning for.
+  SCEVUnionPredicate Preds;
+
+  /// \brief Maps a pointer to the pointer checking group that the pointer
+  /// belongs to.
+  DenseMap<const Value *, const RuntimePointerChecking::CheckingPtrGroup *>
+      PtrToGroup;
+
+  /// \brief The alias scope corresponding to a pointer checking group.
+  DenseMap<const RuntimePointerChecking::CheckingPtrGroup *, MDNode *>
+      GroupToScope;
+
+  /// \brief The list of alias scopes that a pointer checking group can't alias.
+  DenseMap<const RuntimePointerChecking::CheckingPtrGroup *, MDNode *>
+      GroupToNonAliasingScopeList;
+
+  /// \brief Analyses used.
+  const LoopAccessInfo &LAI;
+  LoopInfo *LI;
+  DominatorTree *DT;
+  ScalarEvolution *SE;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LowerInvoke.h b/linux-x64/clang/include/llvm/Transforms/Utils/LowerInvoke.h
new file mode 100644
index 0000000..12774c7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LowerInvoke.h
@@ -0,0 +1,30 @@
+//===- LowerInvoke.h - Eliminate Invoke instructions ----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation is designed for use by code generators which do not yet
+// support stack unwinding.  This pass converts 'invoke' instructions to 'call'
+// instructions, so that any exception-handling 'landingpad' blocks become dead
+// code (which can be removed by running the '-simplifycfg' pass afterwards).
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+#define LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerInvokePass : public PassInfoMixin<LowerInvokePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
new file mode 100644
index 0000000..2b7d0f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -0,0 +1,56 @@
+//===- llvm/Transforms/Utils/LowerMemintrinsics.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Lower memset, memcpy, memmov intrinsics to loops (e.g. for targets without
+// library support).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
+#define LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
+
+namespace llvm {
+
+class ConstantInt;
+class Instruction;
+class MemCpyInst;
+class MemMoveInst;
+class MemSetInst;
+class TargetTransformInfo;
+class Value;
+
+/// Emit a loop implementing the semantics of llvm.memcpy where the size is not
+/// a compile-time constant. Loop will be insterted at \p InsertBefore.
+void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr,
+                                 Value *DstAddr, Value *CopyLen,
+                                 unsigned SrcAlign, unsigned DestAlign,
+                                 bool SrcIsVolatile, bool DstIsVolatile,
+                                 const TargetTransformInfo &TTI);
+
+/// Emit a loop implementing the semantics of an llvm.memcpy whose size is a
+/// compile time constant. Loop is inserted at \p InsertBefore.
+void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
+                               Value *DstAddr, ConstantInt *CopyLen,
+                               unsigned SrcAlign, unsigned DestAlign,
+                               bool SrcIsVolatile, bool DstIsVolatile,
+                               const TargetTransformInfo &TTI);
+
+
+/// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
+void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI);
+
+/// Expand \p MemMove as a loop. \p MemMove is not deleted.
+void expandMemMoveAsLoop(MemMoveInst *MemMove);
+
+/// Expand \p MemSet as a loop. \p MemSet is not deleted.
+void expandMemSetAsLoop(MemSetInst *MemSet);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Mem2Reg.h b/linux-x64/clang/include/llvm/Transforms/Utils/Mem2Reg.h
new file mode 100644
index 0000000..4076843
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Mem2Reg.h
@@ -0,0 +1,31 @@
+//===- Mem2Reg.h - The -mem2reg pass, a wrapper around the Utils lib ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is a simple pass wrapper around the PromoteMemToReg function call
+// exposed by the Utils library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MEM2REG_H
+#define LLVM_TRANSFORMS_UTILS_MEM2REG_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class PromotePass : public PassInfoMixin<PromotePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
new file mode 100644
index 0000000..4b9bc82
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -0,0 +1,101 @@
+//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+
+#include "llvm/ADT/StringRef.h"
+#include <utility> // for std::pair
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class Module;
+class Function;
+class GlobalValue;
+class GlobalVariable;
+class Constant;
+class StringRef;
+class Value;
+class Type;
+
+/// Append F to the list of global ctors of module M with the given Priority.
+/// This wraps the function in the appropriate structure and stores it along
+/// side other global constructors. For details see
+/// http://llvm.org/docs/LangRef.html#intg_global_ctors
+void appendToGlobalCtors(Module &M, Function *F, int Priority,
+                         Constant *Data = nullptr);
+
+/// Same as appendToGlobalCtors(), but for global dtors.
+void appendToGlobalDtors(Module &M, Function *F, int Priority,
+                         Constant *Data = nullptr);
+
+// Validate the result of Module::getOrInsertFunction called for an interface
+// function of given sanitizer. If the instrumented module defines a function
+// with the same name, their prototypes must match, otherwise
+// getOrInsertFunction returns a bitcast.
+Function *checkSanitizerInterfaceFunction(Constant *FuncOrBitcast);
+
+Function *declareSanitizerInitFunction(Module &M, StringRef InitName,
+                                       ArrayRef<Type *> InitArgTypes);
+
+/// \brief Creates sanitizer constructor function, and calls sanitizer's init
+/// function from it.
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions(
+    Module &M, StringRef CtorName, StringRef InitName,
+    ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+    StringRef VersionCheckName = StringRef());
+
+/// Rename all the anon globals in the module using a hash computed from
+/// the list of public globals in the module.
+bool nameUnamedGlobals(Module &M);
+
+/// \brief Adds global values to the llvm.used list.
+void appendToUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// \brief Adds global values to the llvm.compiler.used list.
+void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// Filter out potentially dead comdat functions where other entries keep the
+/// entire comdat group alive.
+///
+/// This is designed for cases where functions appear to become dead but remain
+/// alive due to other live entries in their comdat group.
+///
+/// The \p DeadComdatFunctions container should only have pointers to
+/// `Function`s which are members of a comdat group and are believed to be
+/// dead.
+///
+/// After this routine finishes, the only remaining `Function`s in \p
+/// DeadComdatFunctions are those where every member of the comdat is listed
+/// and thus removing them is safe (provided *all* are removed).
+void filterDeadComdatFunctions(
+    Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions);
+
+/// \brief Produce a unique identifier for this module by taking the MD5 sum of
+/// the names of the module's strong external symbols that are not comdat
+/// members.
+///
+/// This identifier is normally guaranteed to be unique, or the program would
+/// fail to link due to multiply defined symbols.
+///
+/// If the module has no strong external symbols (such a module may still have a
+/// semantic effect if it performs global initialization), we cannot produce a
+/// unique identifier for this module, so we return the empty string.
+std::string getUniqueModuleId(Module *M);
+
+} // End llvm namespace
+
+#endif //  LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/NameAnonGlobals.h b/linux-x64/clang/include/llvm/Transforms/Utils/NameAnonGlobals.h
new file mode 100644
index 0000000..17fc902
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/NameAnonGlobals.h
@@ -0,0 +1,33 @@
+//===-- NameAnonGlobals.h - Anonymous Global Naming Pass --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements naming anonymous globals to make sure they can be
+// referred to by ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+#define LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that provides a name to every anonymous globals.
+class NameAnonGlobalPass : public PassInfoMixin<NameAnonGlobalPass> {
+public:
+  NameAnonGlobalPass() = default;
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/OrderedInstructions.h b/linux-x64/clang/include/llvm/Transforms/Utils/OrderedInstructions.h
new file mode 100644
index 0000000..165d4bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/OrderedInstructions.h
@@ -0,0 +1,54 @@
+//===- llvm/Transforms/Utils/OrderedInstructions.h -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an efficient way to check for dominance relation between 2
+// instructions.
+//
+// This interface dispatches to appropriate dominance check given 2
+// instructions, i.e. in case the instructions are in the same basic block,
+// OrderedBasicBlock (with instruction numbering and caching) are used.
+// Otherwise, dominator tree is used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
+#define LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/OrderedBasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+
+class OrderedInstructions {
+  /// Used to check dominance for instructions in same basic block.
+  mutable DenseMap<const BasicBlock *, std::unique_ptr<OrderedBasicBlock>>
+      OBBMap;
+
+  /// The dominator tree of the parent function.
+  DominatorTree *DT;
+
+public:
+  /// Constructor.
+  OrderedInstructions(DominatorTree *DT) : DT(DT) {}
+
+  /// Return true if first instruction dominates the second.
+  bool dominates(const Instruction *, const Instruction *) const;
+
+  /// Invalidate the OrderedBasicBlock cache when its basic block changes.
+  /// i.e. If an instruction is deleted or added to the basic block, the user
+  /// should call this function to invalidate the OrderedBasicBlock cache for
+  /// this basic block.
+  void invalidateBlock(const BasicBlock *BB) { OBBMap.erase(BB); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h b/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
new file mode 100644
index 0000000..8150f15
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
@@ -0,0 +1,295 @@
+//===- PredicateInfo.h - Build PredicateInfo ----------------------*-C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief  This file implements the PredicateInfo analysis, which creates an Extended
+/// SSA form for operations used in branch comparisons and llvm.assume
+/// comparisons.
+///
+/// Copies of these operations are inserted into the true/false edge (and after
+/// assumes), and information attached to the copies.  All uses of the original
+/// operation in blocks dominated by the true/false edge (and assume), are
+/// replaced with uses of the copies.  This enables passes to easily and sparsely
+/// propagate condition based info into the operations that may be affected.
+///
+/// Example:
+/// %cmp = icmp eq i32 %x, 50
+/// br i1 %cmp, label %true, label %false
+/// true:
+/// ret i32 %x
+/// false:
+/// ret i32 1
+///
+/// will become
+///
+/// %cmp = icmp eq i32, %x, 50
+/// br i1 %cmp, label %true, label %false
+/// true:
+/// %x.0 = call @llvm.ssa_copy.i32(i32 %x)
+/// ret i32 %x.0
+/// false:
+/// ret i32 1
+///
+/// Using getPredicateInfoFor on x.0 will give you the comparison it is
+/// dominated by (the icmp), and that you are located in the true edge of that
+/// comparison, which tells you x.0 is 50.
+///
+/// In order to reduce the number of copies inserted, predicateinfo is only
+/// inserted where it would actually be live.  This means if there are no uses of
+/// an operation dominated by the branch edges, or by an assume, the associated
+/// predicate info is never inserted.
+///
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+#define LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/PassAnalysisSupport.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Transforms/Utils/OrderedInstructions.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class DominatorTree;
+class Function;
+class Instruction;
+class MemoryAccess;
+class LLVMContext;
+class raw_ostream;
+
+enum PredicateType { PT_Branch, PT_Assume, PT_Switch };
+
+// Base class for all predicate information we provide.
+// All of our predicate information has at least a comparison.
+class PredicateBase : public ilist_node<PredicateBase> {
+public:
+  PredicateType Type;
+  // The original operand before we renamed it.
+  // This can be use by passes, when destroying predicateinfo, to know
+  // whether they can just drop the intrinsic, or have to merge metadata.
+  Value *OriginalOp;
+  PredicateBase(const PredicateBase &) = delete;
+  PredicateBase &operator=(const PredicateBase &) = delete;
+  PredicateBase() = delete;
+  virtual ~PredicateBase() = default;
+
+protected:
+  PredicateBase(PredicateType PT, Value *Op) : Type(PT), OriginalOp(Op) {}
+};
+
+class PredicateWithCondition : public PredicateBase {
+public:
+  Value *Condition;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Assume || PB->Type == PT_Branch ||
+           PB->Type == PT_Switch;
+  }
+
+protected:
+  PredicateWithCondition(PredicateType PT, Value *Op, Value *Condition)
+      : PredicateBase(PT, Op), Condition(Condition) {}
+};
+
+// Provides predicate information for assumes.  Since assumes are always true,
+// we simply provide the assume instruction, so you can tell your relative
+// position to it.
+class PredicateAssume : public PredicateWithCondition {
+public:
+  IntrinsicInst *AssumeInst;
+  PredicateAssume(Value *Op, IntrinsicInst *AssumeInst, Value *Condition)
+      : PredicateWithCondition(PT_Assume, Op, Condition),
+        AssumeInst(AssumeInst) {}
+  PredicateAssume() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Assume;
+  }
+};
+
+// Mixin class for edge predicates.  The FROM block is the block where the
+// predicate originates, and the TO block is the block where the predicate is
+// valid.
+class PredicateWithEdge : public PredicateWithCondition {
+public:
+  BasicBlock *From;
+  BasicBlock *To;
+  PredicateWithEdge() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Branch || PB->Type == PT_Switch;
+  }
+
+protected:
+  PredicateWithEdge(PredicateType PType, Value *Op, BasicBlock *From,
+                    BasicBlock *To, Value *Cond)
+      : PredicateWithCondition(PType, Op, Cond), From(From), To(To) {}
+};
+
+// Provides predicate information for branches.
+class PredicateBranch : public PredicateWithEdge {
+public:
+  // If true, SplitBB is the true successor, otherwise it's the false successor.
+  bool TrueEdge;
+  PredicateBranch(Value *Op, BasicBlock *BranchBB, BasicBlock *SplitBB,
+                  Value *Condition, bool TakenEdge)
+      : PredicateWithEdge(PT_Branch, Op, BranchBB, SplitBB, Condition),
+        TrueEdge(TakenEdge) {}
+  PredicateBranch() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Branch;
+  }
+};
+
+class PredicateSwitch : public PredicateWithEdge {
+public:
+  Value *CaseValue;
+  // This is the switch instruction.
+  SwitchInst *Switch;
+  PredicateSwitch(Value *Op, BasicBlock *SwitchBB, BasicBlock *TargetBB,
+                  Value *CaseValue, SwitchInst *SI)
+      : PredicateWithEdge(PT_Switch, Op, SwitchBB, TargetBB,
+                          SI->getCondition()),
+        CaseValue(CaseValue), Switch(SI) {}
+  PredicateSwitch() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Switch;
+  }
+};
+
+// This name is used in a few places, so kick it into their own namespace
+namespace PredicateInfoClasses {
+struct ValueDFS;
+}
+
+/// \brief Encapsulates PredicateInfo, including all data associated with memory
+/// accesses.
+class PredicateInfo {
+private:
+  // Used to store information about each value we might rename.
+  struct ValueInfo {
+    // Information about each possible copy. During processing, this is each
+    // inserted info. After processing, we move the uninserted ones to the
+    // uninserted vector.
+    SmallVector<PredicateBase *, 4> Infos;
+    SmallVector<PredicateBase *, 4> UninsertedInfos;
+  };
+  // This owns the all the predicate infos in the function, placed or not.
+  iplist<PredicateBase> AllInfos;
+
+public:
+  PredicateInfo(Function &, DominatorTree &, AssumptionCache &);
+  ~PredicateInfo();
+
+  void verifyPredicateInfo() const;
+
+  void dump() const;
+  void print(raw_ostream &) const;
+
+  const PredicateBase *getPredicateInfoFor(const Value *V) const {
+    return PredicateMap.lookup(V);
+  }
+
+protected:
+  // Used by PredicateInfo annotater, dumpers, and wrapper pass.
+  friend class PredicateInfoAnnotatedWriter;
+  friend class PredicateInfoPrinterLegacyPass;
+
+private:
+  void buildPredicateInfo();
+  void processAssume(IntrinsicInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
+  void processBranch(BranchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
+  void processSwitch(SwitchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
+  void renameUses(SmallPtrSetImpl<Value *> &);
+  using ValueDFS = PredicateInfoClasses::ValueDFS;
+  typedef SmallVectorImpl<ValueDFS> ValueDFSStack;
+  void convertUsesToDFSOrdered(Value *, SmallVectorImpl<ValueDFS> &);
+  Value *materializeStack(unsigned int &, ValueDFSStack &, Value *);
+  bool stackIsInScope(const ValueDFSStack &, const ValueDFS &) const;
+  void popStackUntilDFSScope(ValueDFSStack &, const ValueDFS &);
+  ValueInfo &getOrCreateValueInfo(Value *);
+  void addInfoFor(SmallPtrSetImpl<Value *> &OpsToRename, Value *Op,
+                  PredicateBase *PB);
+  const ValueInfo &getValueInfo(Value *) const;
+  Function &F;
+  DominatorTree &DT;
+  AssumptionCache &AC;
+  OrderedInstructions OI;
+  // This maps from copy operands to Predicate Info. Note that it does not own
+  // the Predicate Info, they belong to the ValueInfo structs in the ValueInfos
+  // vector.
+  DenseMap<const Value *, const PredicateBase *> PredicateMap;
+  // This stores info about each operand or comparison result we make copies
+  // of.  The real ValueInfos start at index 1, index 0 is unused so that we can
+  // more easily detect invalid indexing.
+  SmallVector<ValueInfo, 32> ValueInfos;
+  // This gives the index into the ValueInfos array for a given Value.  Because
+  // 0 is not a valid Value Info index, you can use DenseMap::lookup and tell
+  // whether it returned a valid result.
+  DenseMap<Value *, unsigned int> ValueInfoNums;
+  // The set of edges along which we can only handle phi uses, due to critical
+  // edges.
+  DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeUsesOnly;
+};
+
+// This pass does eager building and then printing of PredicateInfo. It is used
+// by
+// the tests to be able to build, dump, and verify PredicateInfo.
+class PredicateInfoPrinterLegacyPass : public FunctionPass {
+public:
+  PredicateInfoPrinterLegacyPass();
+
+  static char ID;
+  bool runOnFunction(Function &) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// \brief Printer pass for \c PredicateInfo.
+class PredicateInfoPrinterPass
+    : public PassInfoMixin<PredicateInfoPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit PredicateInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for \c PredicateInfo.
+struct PredicateInfoVerifierPass : PassInfoMixin<PredicateInfoVerifierPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h b/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
new file mode 100644
index 0000000..bb8a61a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -0,0 +1,46 @@
+//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to promote alloca instructions to SSA
+// registers, by using the SSA construction algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+#define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class AllocaInst;
+class DominatorTree;
+class AliasSetTracker;
+class AssumptionCache;
+
+/// \brief Return true if this alloca is legal for promotion.
+///
+/// This is true if there are only loads, stores, and lifetime markers
+/// (transitively) using this alloca. This also enforces that there is only
+/// ever one layer of bitcasts or GEPs between the alloca and the lifetime
+/// markers.
+bool isAllocaPromotable(const AllocaInst *AI);
+
+/// \brief Promote the specified list of alloca instructions into scalar
+/// registers, inserting PHI nodes as appropriate.
+///
+/// This function makes use of DominanceFrontier information.  This function
+/// does not modify the CFG of the function at all.  All allocas must be from
+/// the same function.
+///
+void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
+                     AssumptionCache *AC = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdater.h b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdater.h
new file mode 100644
index 0000000..6cd9f15
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -0,0 +1,173 @@
+//===- SSAUpdater.h - Unstructured SSA Update Tool --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+
+class BasicBlock;
+class Instruction;
+class LoadInst;
+class PHINode;
+template <typename T> class SmallVectorImpl;
+template <typename T> class SSAUpdaterTraits;
+class Type;
+class Use;
+class Value;
+
+/// \brief Helper class for SSA formation on a set of values defined in
+/// multiple blocks.
+///
+/// This is used when code duplication or another unstructured
+/// transformation wants to rewrite a set of uses of one value with uses of a
+/// set of values.
+class SSAUpdater {
+  friend class SSAUpdaterTraits<SSAUpdater>;
+
+private:
+  /// This keeps track of which value to use on a per-block basis. When we
+  /// insert PHI nodes, we keep track of them here.
+  void *AV = nullptr;
+
+  /// ProtoType holds the type of the values being rewritten.
+  Type *ProtoType = nullptr;
+
+  /// PHI nodes are given a name based on ProtoName.
+  std::string ProtoName;
+
+  /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to
+  /// the vector.
+  SmallVectorImpl<PHINode *> *InsertedPHIs;
+
+public:
+  /// If InsertedPHIs is specified, it will be filled
+  /// in with all PHI Nodes created by rewriting.
+  explicit SSAUpdater(SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
+  SSAUpdater(const SSAUpdater &) = delete;
+  SSAUpdater &operator=(const SSAUpdater &) = delete;
+  ~SSAUpdater();
+
+  /// \brief Reset this object to get ready for a new set of SSA updates with
+  /// type 'Ty'.
+  ///
+  /// PHI nodes get a name based on 'Name'.
+  void Initialize(Type *Ty, StringRef Name);
+
+  /// \brief Indicate that a rewritten value is available in the specified block
+  /// with the specified value.
+  void AddAvailableValue(BasicBlock *BB, Value *V);
+
+  /// \brief Return true if the SSAUpdater already has a value for the specified
+  /// block.
+  bool HasValueForBlock(BasicBlock *BB) const;
+
+  /// \brief Construct SSA form, materializing a value that is live at the end
+  /// of the specified block.
+  Value *GetValueAtEndOfBlock(BasicBlock *BB);
+
+  /// \brief Construct SSA form, materializing a value that is live in the
+  /// middle of the specified block.
+  ///
+  /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except
+  /// in one important case: if there is a definition of the rewritten value
+  /// after the 'use' in BB.  Consider code like this:
+  ///
+  /// \code
+  ///      X1 = ...
+  ///   SomeBB:
+  ///      use(X)
+  ///      X2 = ...
+  ///      br Cond, SomeBB, OutBB
+  /// \endcode
+  ///
+  /// In this case, there are two values (X1 and X2) added to the AvailableVals
+  /// set by the client of the rewriter, and those values are both live out of
+  /// their respective blocks.  However, the use of X happens in the *middle* of
+  /// a block.  Because of this, we need to insert a new PHI node in SomeBB to
+  /// merge the appropriate values, and this value isn't live out of the block.
+  Value *GetValueInMiddleOfBlock(BasicBlock *BB);
+
+  /// \brief Rewrite a use of the symbolic value.
+  ///
+  /// This handles PHI nodes, which use their value in the corresponding
+  /// predecessor. Note that this will not work if the use is supposed to be
+  /// rewritten to a value defined in the same block as the use, but above it.
+  /// Any 'AddAvailableValue's added for the use's block will be considered to
+  /// be below it.
+  void RewriteUse(Use &U);
+
+  /// \brief Rewrite a use like \c RewriteUse but handling in-block definitions.
+  ///
+  /// This version of the method can rewrite uses in the same block as
+  /// a definition, because it assumes that all uses of a value are below any
+  /// inserted values.
+  void RewriteUseAfterInsertions(Use &U);
+
+private:
+  Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
+};
+
+/// \brief Helper class for promoting a collection of loads and stores into SSA
+/// Form using the SSAUpdater.
+///
+/// This handles complexities that SSAUpdater doesn't, such as multiple loads
+/// and stores in one block.
+///
+/// Clients of this class are expected to subclass this and implement the
+/// virtual methods.
+class LoadAndStorePromoter {
+protected:
+  SSAUpdater &SSA;
+
+public:
+  LoadAndStorePromoter(ArrayRef<const Instruction *> Insts,
+                       SSAUpdater &S, StringRef Name = StringRef());
+  virtual ~LoadAndStorePromoter() = default;
+
+  /// \brief This does the promotion.
+  ///
+  /// Insts is a list of loads and stores to promote, and Name is the basename
+  /// for the PHIs to insert. After this is complete, the loads and stores are
+  /// removed from the code.
+  void run(const SmallVectorImpl<Instruction *> &Insts) const;
+
+  /// \brief Return true if the specified instruction is in the Inst list.
+  ///
+  /// The Insts list is the one passed into the constructor. Clients should
+  /// implement this with a more efficient version if possible.
+  virtual bool isInstInList(Instruction *I,
+                            const SmallVectorImpl<Instruction *> &Insts) const;
+
+  /// \brief This hook is invoked after all the stores are found and inserted as
+  /// available values.
+  virtual void doExtraRewritesBeforeFinalDeletion() const {}
+
+  /// \brief Clients can choose to implement this to get notified right before
+  /// a load is RAUW'd another value.
+  virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {}
+
+  /// \brief Called before each instruction is deleted.
+  virtual void instructionDeleted(Instruction *I) const {}
+
+  /// \brief Called to update debug info associated with the instruction.
+  virtual void updateDebugInfo(Instruction *I) const {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
new file mode 100644
index 0000000..3c8bd17
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -0,0 +1,469 @@
+//===- SSAUpdaterImpl.h - SSA Updater Implementation ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a template that implements the core algorithm for the
+// SSAUpdater and MachineSSAUpdater.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "ssaupdater"
+
+namespace llvm {
+
+template<typename T> class SSAUpdaterTraits;
+
+template<typename UpdaterT>
+class SSAUpdaterImpl {
+private:
+  UpdaterT *Updater;
+
+  using Traits = SSAUpdaterTraits<UpdaterT>;
+  using BlkT = typename Traits::BlkT;
+  using ValT = typename Traits::ValT;
+  using PhiT = typename Traits::PhiT;
+
+  /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
+  /// The predecessors of each block are cached here since pred_iterator is
+  /// slow and we need to iterate over the blocks at least a few times.
+  class BBInfo {
+  public:
+    // Back-pointer to the corresponding block.
+    BlkT *BB;
+
+    // Value to use in this block.
+    ValT AvailableVal;
+
+    // Block that defines the available value.
+    BBInfo *DefBB;
+
+    // Postorder number.
+    int BlkNum = 0;
+
+    // Immediate dominator.
+    BBInfo *IDom = nullptr;
+
+    // Number of predecessor blocks.
+    unsigned NumPreds = 0;
+
+    // Array[NumPreds] of predecessor blocks.
+    BBInfo **Preds = nullptr;
+
+    // Marker for existing PHIs that match.
+    PhiT *PHITag = nullptr;
+
+    BBInfo(BlkT *ThisBB, ValT V)
+      : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr) {}
+  };
+
+  using AvailableValsTy = DenseMap<BlkT *, ValT>;
+
+  AvailableValsTy *AvailableVals;
+
+  SmallVectorImpl<PhiT *> *InsertedPHIs;
+
+  using BlockListTy = SmallVectorImpl<BBInfo *>;
+  using BBMapTy = DenseMap<BlkT *, BBInfo *>;
+
+  BBMapTy BBMap;
+  BumpPtrAllocator Allocator;
+
+public:
+  explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
+                          SmallVectorImpl<PhiT *> *Ins) :
+    Updater(U), AvailableVals(A), InsertedPHIs(Ins) {}
+
+  /// GetValue - Check to see if AvailableVals has an entry for the specified
+  /// BB and if so, return it.  If not, construct SSA form by first
+  /// calculating the required placement of PHIs and then inserting new PHIs
+  /// where needed.
+  ValT GetValue(BlkT *BB) {
+    SmallVector<BBInfo *, 100> BlockList;
+    BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
+
+    // Special case: bail out if BB is unreachable.
+    if (BlockList.size() == 0) {
+      ValT V = Traits::GetUndefVal(BB, Updater);
+      (*AvailableVals)[BB] = V;
+      return V;
+    }
+
+    FindDominators(&BlockList, PseudoEntry);
+    FindPHIPlacement(&BlockList);
+    FindAvailableVals(&BlockList);
+
+    return BBMap[BB]->DefBB->AvailableVal;
+  }
+
+  /// BuildBlockList - Starting from the specified basic block, traverse back
+  /// through its predecessors until reaching blocks with known values.
+  /// Create BBInfo structures for the blocks and append them to the block
+  /// list.
+  BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
+    SmallVector<BBInfo *, 10> RootList;
+    SmallVector<BBInfo *, 64> WorkList;
+
+    BBInfo *Info = new (Allocator) BBInfo(BB, 0);
+    BBMap[BB] = Info;
+    WorkList.push_back(Info);
+
+    // Search backward from BB, creating BBInfos along the way and stopping
+    // when reaching blocks that define the value.  Record those defining
+    // blocks on the RootList.
+    SmallVector<BlkT *, 10> Preds;
+    while (!WorkList.empty()) {
+      Info = WorkList.pop_back_val();
+      Preds.clear();
+      Traits::FindPredecessorBlocks(Info->BB, &Preds);
+      Info->NumPreds = Preds.size();
+      if (Info->NumPreds == 0)
+        Info->Preds = nullptr;
+      else
+        Info->Preds = static_cast<BBInfo **>(Allocator.Allocate(
+            Info->NumPreds * sizeof(BBInfo *), alignof(BBInfo *)));
+
+      for (unsigned p = 0; p != Info->NumPreds; ++p) {
+        BlkT *Pred = Preds[p];
+        // Check if BBMap already has a BBInfo for the predecessor block.
+        typename BBMapTy::value_type &BBMapBucket =
+          BBMap.FindAndConstruct(Pred);
+        if (BBMapBucket.second) {
+          Info->Preds[p] = BBMapBucket.second;
+          continue;
+        }
+
+        // Create a new BBInfo for the predecessor.
+        ValT PredVal = AvailableVals->lookup(Pred);
+        BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
+        BBMapBucket.second = PredInfo;
+        Info->Preds[p] = PredInfo;
+
+        if (PredInfo->AvailableVal) {
+          RootList.push_back(PredInfo);
+          continue;
+        }
+        WorkList.push_back(PredInfo);
+      }
+    }
+
+    // Now that we know what blocks are backwards-reachable from the starting
+    // block, do a forward depth-first traversal to assign postorder numbers
+    // to those blocks.
+    BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
+    unsigned BlkNum = 1;
+
+    // Initialize the worklist with the roots from the backward traversal.
+    while (!RootList.empty()) {
+      Info = RootList.pop_back_val();
+      Info->IDom = PseudoEntry;
+      Info->BlkNum = -1;
+      WorkList.push_back(Info);
+    }
+
+    while (!WorkList.empty()) {
+      Info = WorkList.back();
+
+      if (Info->BlkNum == -2) {
+        // All the successors have been handled; assign the postorder number.
+        Info->BlkNum = BlkNum++;
+        // If not a root, put it on the BlockList.
+        if (!Info->AvailableVal)
+          BlockList->push_back(Info);
+        WorkList.pop_back();
+        continue;
+      }
+
+      // Leave this entry on the worklist, but set its BlkNum to mark that its
+      // successors have been put on the worklist.  When it returns to the top
+      // the list, after handling its successors, it will be assigned a
+      // number.
+      Info->BlkNum = -2;
+
+      // Add unvisited successors to the work list.
+      for (typename Traits::BlkSucc_iterator SI =
+             Traits::BlkSucc_begin(Info->BB),
+             E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
+        BBInfo *SuccInfo = BBMap[*SI];
+        if (!SuccInfo || SuccInfo->BlkNum)
+          continue;
+        SuccInfo->BlkNum = -1;
+        WorkList.push_back(SuccInfo);
+      }
+    }
+    PseudoEntry->BlkNum = BlkNum;
+    return PseudoEntry;
+  }
+
+  /// IntersectDominators - This is the dataflow lattice "meet" operation for
+  /// finding dominators.  Given two basic blocks, it walks up the dominator
+  /// tree until it finds a common dominator of both.  It uses the postorder
+  /// number of the blocks to determine how to do that.
+  BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
+    while (Blk1 != Blk2) {
+      while (Blk1->BlkNum < Blk2->BlkNum) {
+        Blk1 = Blk1->IDom;
+        if (!Blk1)
+          return Blk2;
+      }
+      while (Blk2->BlkNum < Blk1->BlkNum) {
+        Blk2 = Blk2->IDom;
+        if (!Blk2)
+          return Blk1;
+      }
+    }
+    return Blk1;
+  }
+
+  /// FindDominators - Calculate the dominator tree for the subset of the CFG
+  /// corresponding to the basic blocks on the BlockList.  This uses the
+  /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
+  /// and Kennedy, published in Software--Practice and Experience, 2001,
+  /// 4:1-10.  Because the CFG subset does not include any edges leading into
+  /// blocks that define the value, the results are not the usual dominator
+  /// tree.  The CFG subset has a single pseudo-entry node with edges to a set
+  /// of root nodes for blocks that define the value.  The dominators for this
+  /// subset CFG are not the standard dominators but they are adequate for
+  /// placing PHIs within the subset CFG.
+  void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
+    bool Changed;
+    do {
+      Changed = false;
+      // Iterate over the list in reverse order, i.e., forward on CFG edges.
+      for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+             E = BlockList->rend(); I != E; ++I) {
+        BBInfo *Info = *I;
+        BBInfo *NewIDom = nullptr;
+
+        // Iterate through the block's predecessors.
+        for (unsigned p = 0; p != Info->NumPreds; ++p) {
+          BBInfo *Pred = Info->Preds[p];
+
+          // Treat an unreachable predecessor as a definition with 'undef'.
+          if (Pred->BlkNum == 0) {
+            Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
+            (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
+            Pred->DefBB = Pred;
+            Pred->BlkNum = PseudoEntry->BlkNum;
+            PseudoEntry->BlkNum++;
+          }
+
+          if (!NewIDom)
+            NewIDom = Pred;
+          else
+            NewIDom = IntersectDominators(NewIDom, Pred);
+        }
+
+        // Check if the IDom value has changed.
+        if (NewIDom && NewIDom != Info->IDom) {
+          Info->IDom = NewIDom;
+          Changed = true;
+        }
+      }
+    } while (Changed);
+  }
+
+  /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
+  /// any blocks containing definitions of the value.  If one is found, then
+  /// the successor of Pred is in the dominance frontier for the definition,
+  /// and this function returns true.
+  bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
+    for (; Pred != IDom; Pred = Pred->IDom) {
+      if (Pred->DefBB == Pred)
+        return true;
+    }
+    return false;
+  }
+
+  /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
+  /// of the known definitions.  Iteratively add PHIs in the dom frontiers
+  /// until nothing changes.  Along the way, keep track of the nearest
+  /// dominating definitions for non-PHI blocks.
+  void FindPHIPlacement(BlockListTy *BlockList) {
+    bool Changed;
+    do {
+      Changed = false;
+      // Iterate over the list in reverse order, i.e., forward on CFG edges.
+      for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+             E = BlockList->rend(); I != E; ++I) {
+        BBInfo *Info = *I;
+
+        // If this block already needs a PHI, there is nothing to do here.
+        if (Info->DefBB == Info)
+          continue;
+
+        // Default to use the same def as the immediate dominator.
+        BBInfo *NewDefBB = Info->IDom->DefBB;
+        for (unsigned p = 0; p != Info->NumPreds; ++p) {
+          if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
+            // Need a PHI here.
+            NewDefBB = Info;
+            break;
+          }
+        }
+
+        // Check if anything changed.
+        if (NewDefBB != Info->DefBB) {
+          Info->DefBB = NewDefBB;
+          Changed = true;
+        }
+      }
+    } while (Changed);
+  }
+
+  /// FindAvailableVal - If this block requires a PHI, first check if an
+  /// existing PHI matches the PHI placement and reaching definitions computed
+  /// earlier, and if not, create a new PHI.  Visit all the block's
+  /// predecessors to calculate the available value for each one and fill in
+  /// the incoming values for a new PHI.
+  void FindAvailableVals(BlockListTy *BlockList) {
+    // Go through the worklist in forward order (i.e., backward through the CFG)
+    // and check if existing PHIs can be used.  If not, create empty PHIs where
+    // they are needed.
+    for (typename BlockListTy::iterator I = BlockList->begin(),
+           E = BlockList->end(); I != E; ++I) {
+      BBInfo *Info = *I;
+      // Check if there needs to be a PHI in BB.
+      if (Info->DefBB != Info)
+        continue;
+
+      // Look for an existing PHI.
+      FindExistingPHI(Info->BB, BlockList);
+      if (Info->AvailableVal)
+        continue;
+
+      ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
+      Info->AvailableVal = PHI;
+      (*AvailableVals)[Info->BB] = PHI;
+    }
+
+    // Now go back through the worklist in reverse order to fill in the
+    // arguments for any new PHIs added in the forward traversal.
+    for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+           E = BlockList->rend(); I != E; ++I) {
+      BBInfo *Info = *I;
+
+      if (Info->DefBB != Info) {
+        // Record the available value at join nodes to speed up subsequent
+        // uses of this SSAUpdater for the same value.
+        if (Info->NumPreds > 1)
+          (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
+        continue;
+      }
+
+      // Check if this block contains a newly added PHI.
+      PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
+      if (!PHI)
+        continue;
+
+      // Iterate through the block's predecessors.
+      for (unsigned p = 0; p != Info->NumPreds; ++p) {
+        BBInfo *PredInfo = Info->Preds[p];
+        BlkT *Pred = PredInfo->BB;
+        // Skip to the nearest preceding definition.
+        if (PredInfo->DefBB != PredInfo)
+          PredInfo = PredInfo->DefBB;
+        Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
+      }
+
+      DEBUG(dbgs() << "  Inserted PHI: " << *PHI << "\n");
+
+      // If the client wants to know about all new instructions, tell it.
+      if (InsertedPHIs) InsertedPHIs->push_back(PHI);
+    }
+  }
+
+  /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
+  /// them match what is needed.
+  void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
+    for (auto &SomePHI : BB->phis()) {
+      if (CheckIfPHIMatches(&SomePHI)) {
+        RecordMatchingPHIs(BlockList);
+        break;
+      }
+      // Match failed: clear all the PHITag values.
+      for (typename BlockListTy::iterator I = BlockList->begin(),
+             E = BlockList->end(); I != E; ++I)
+        (*I)->PHITag = nullptr;
+    }
+  }
+
+  /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
+  /// in the BBMap.
+  bool CheckIfPHIMatches(PhiT *PHI) {
+    SmallVector<PhiT *, 20> WorkList;
+    WorkList.push_back(PHI);
+
+    // Mark that the block containing this PHI has been visited.
+    BBMap[PHI->getParent()]->PHITag = PHI;
+
+    while (!WorkList.empty()) {
+      PHI = WorkList.pop_back_val();
+
+      // Iterate through the PHI's incoming values.
+      for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+             E = Traits::PHI_end(PHI); I != E; ++I) {
+        ValT IncomingVal = I.getIncomingValue();
+        BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
+        // Skip to the nearest preceding definition.
+        if (PredInfo->DefBB != PredInfo)
+          PredInfo = PredInfo->DefBB;
+
+        // Check if it matches the expected value.
+        if (PredInfo->AvailableVal) {
+          if (IncomingVal == PredInfo->AvailableVal)
+            continue;
+          return false;
+        }
+
+        // Check if the value is a PHI in the correct block.
+        PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
+        if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
+          return false;
+
+        // If this block has already been visited, check if this PHI matches.
+        if (PredInfo->PHITag) {
+          if (IncomingPHIVal == PredInfo->PHITag)
+            continue;
+          return false;
+        }
+        PredInfo->PHITag = IncomingPHIVal;
+
+        WorkList.push_back(IncomingPHIVal);
+      }
+    }
+    return true;
+  }
+
+  /// RecordMatchingPHIs - For each PHI node that matches, record it in both
+  /// the BBMap and the AvailableVals mapping.
+  void RecordMatchingPHIs(BlockListTy *BlockList) {
+    for (typename BlockListTy::iterator I = BlockList->begin(),
+           E = BlockList->end(); I != E; ++I)
+      if (PhiT *PHI = (*I)->PHITag) {
+        BlkT *BB = PHI->getParent();
+        ValT PHIVal = Traits::GetPHIValue(PHI);
+        (*AvailableVals)[BB] = PHIVal;
+        BBMap[BB]->AvailableVal = PHIVal;
+      }
+  }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "ssaupdater"
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SanitizerStats.h b/linux-x64/clang/include/llvm/Transforms/Utils/SanitizerStats.h
new file mode 100644
index 0000000..d36e342
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SanitizerStats.h
@@ -0,0 +1,56 @@
+//===- SanitizerStats.h - Sanitizer statistics gathering  -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares functions and data structures for sanitizer statistics gathering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
+#define LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+// Number of bits in data that are used for the sanitizer kind. Needs to match
+// __sanitizer::kKindBits in compiler-rt/lib/stats/stats.h
+enum { kSanitizerStatKindBits = 3 };
+
+enum SanitizerStatKind {
+  SanStat_CFI_VCall,
+  SanStat_CFI_NVCall,
+  SanStat_CFI_DerivedCast,
+  SanStat_CFI_UnrelatedCast,
+  SanStat_CFI_ICall,
+};
+
+struct SanitizerStatReport {
+  SanitizerStatReport(Module *M);
+
+  /// Generates code into B that increments a location-specific counter tagged
+  /// with the given sanitizer kind SK.
+  void create(IRBuilder<> &B, SanitizerStatKind SK);
+
+  /// Finalize module stats array and add global constructor to register it.
+  void finish();
+
+private:
+  Module *M;
+  GlobalVariable *ModuleStatsGV;
+  ArrayType *StatTy;
+  StructType *EmptyModuleStatsTy;
+
+  std::vector<Constant *> Inits;
+  ArrayType *makeModuleStatsArrayTy();
+  StructType *makeModuleStatsTy();
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
new file mode 100644
index 0000000..a1dfed2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -0,0 +1,60 @@
+//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines in interface for induction variable simplification. It does
+// not define any actual pass or policy, but provides a single function to
+// simplify a loop's induction variables based on ScalarEvolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class CastInst;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class PHINode;
+class ScalarEvolution;
+class SCEVExpander;
+
+/// Interface for visiting interesting IV users that are recognized but not
+/// simplified by this utility.
+class IVVisitor {
+protected:
+  const DominatorTree *DT = nullptr;
+
+  virtual void anchor();
+
+public:
+  IVVisitor() = default;
+  virtual ~IVVisitor() = default;
+
+  const DominatorTree *getDomTree() const { return DT; }
+  virtual void visitCast(CastInst *Cast) = 0;
+};
+
+/// simplifyUsersOfIV - Simplify instructions that use this induction variable
+/// by using ScalarEvolution to analyze the IV's recurrence.
+bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
+                       LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead,
+                       SCEVExpander &Rewriter, IVVisitor *V = nullptr);
+
+/// SimplifyLoopIVs - Simplify users of induction variables within this
+/// loop. This does not actually change or add IVs.
+bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
+                     LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyInstructions.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyInstructions.h
new file mode 100644
index 0000000..3f83861
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyInstructions.h
@@ -0,0 +1,31 @@
+//===- SimplifyInstructions.h - Remove redundant instructions ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility pass used for testing the InstructionSimplify analysis.
+// The analysis is applied to every instruction, and if it simplifies then the
+// instruction is replaced by the simplification.  If you are looking for a pass
+// that performs serious instruction folding, use the instcombine pass instead.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINSTRUCTIONS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINSTRUCTIONS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// This pass removes redundant instructions.
+class InstSimplifierPass : public PassInfoMixin<InstSimplifierPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYINSTRUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
new file mode 100644
index 0000000..73a62f5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -0,0 +1,183 @@
+//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class StringRef;
+class Value;
+class CallInst;
+class DataLayout;
+class Instruction;
+class TargetLibraryInfo;
+class BasicBlock;
+class Function;
+class OptimizationRemarkEmitter;
+
+/// \brief This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+  const TargetLibraryInfo *TLI;
+  bool OnlyLowerUnknownSize;
+
+public:
+  FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
+                             bool OnlyLowerUnknownSize = false);
+
+  /// \brief Take the given call instruction and return a more
+  /// optimal value to replace the instruction with or 0 if a more
+  /// optimal form can't be found.
+  /// The call must not be an indirect call.
+  Value *optimizeCall(CallInst *CI);
+
+private:
+  Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+
+  // Str/Stp cpy are similar enough to be handled in the same functions.
+  Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc Func);
+  Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc Func);
+
+  /// \brief Checks whether the call \p CI to a fortified libcall is foldable
+  /// to the non-fortified version.
+  bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+                               unsigned SizeOp, bool isString);
+};
+
+/// LibCallSimplifier - This class implements a collection of optimizations
+/// that replace well formed calls to library functions with a more optimal
+/// form.  For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
+class LibCallSimplifier {
+private:
+  FortifiedLibCallSimplifier FortifiedSimplifier;
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI;
+  OptimizationRemarkEmitter &ORE;
+  bool UnsafeFPShrink;
+  function_ref<void(Instruction *, Value *)> Replacer;
+
+  /// \brief Internal wrapper for RAUW that is the default implementation.
+  ///
+  /// Other users may provide an alternate function with this signature instead
+  /// of this one.
+  static void replaceAllUsesWithDefault(Instruction *I, Value *With);
+
+  /// \brief Replace an instruction's uses with a value using our replacer.
+  void replaceAllUsesWith(Instruction *I, Value *With);
+
+public:
+  LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
+                    OptimizationRemarkEmitter &ORE,
+                    function_ref<void(Instruction *, Value *)> Replacer =
+                        &replaceAllUsesWithDefault);
+
+  /// optimizeCall - Take the given call instruction and return a more
+  /// optimal value to replace the instruction with or 0 if a more
+  /// optimal form can't be found.  Note that the returned value may
+  /// be equal to the instruction being optimized.  In this case all
+  /// other instructions that use the given instruction were modified
+  /// and the given instruction is dead.
+  /// The call must not be an indirect call.
+  Value *optimizeCall(CallInst *CI);
+
+private:
+  // String and Memory Library Call Optimizations
+  Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrChr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrLen(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrPBrk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrTo(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrSpn(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeWcslen(CallInst *CI, IRBuilder<> &B);
+  // Wrapper for all String/Memory Library Call Optimizations
+  Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
+
+  // Math Library Optimizations
+  Value *optimizeCAbs(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeCos(CallInst *CI, IRBuilder<> &B);
+  Value *optimizePow(CallInst *CI, IRBuilder<> &B);
+  Value *replacePowWithSqrt(CallInst *Pow, IRBuilder<> &B);
+  Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFMinFMax(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeLog(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeTan(CallInst *CI, IRBuilder<> &B);
+  // Wrapper for all floating point library call optimizations
+  Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func,
+                                      IRBuilder<> &B);
+
+  // Integer Library Call Optimizations
+  Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFls(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeAbs(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeToAscii(CallInst *CI, IRBuilder<> &B);
+
+  // Formatting and IO Library Call Optimizations
+  Value *optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
+                                int StreamArg = -1);
+  Value *optimizePrintF(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSPrintF(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFPrintF(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFWrite(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFPuts(CallInst *CI, IRBuilder<> &B);
+  Value *optimizePuts(CallInst *CI, IRBuilder<> &B);
+
+  // Helper methods
+  Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B);
+  void classifyArgUse(Value *Val, Function *F, bool IsFloat,
+                      SmallVectorImpl<CallInst *> &SinCalls,
+                      SmallVectorImpl<CallInst *> &CosCalls,
+                      SmallVectorImpl<CallInst *> &SinCosCalls);
+  Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B);
+
+  /// hasFloatVersion - Checks if there is a float version of the specified
+  /// function by checking for an existing function with name FuncName + f
+  bool hasFloatVersion(StringRef FuncName);
+
+  /// Shared code to optimize strlen+wcslen.
+  Value *optimizeStringLength(CallInst *CI, IRBuilder<> &B, unsigned CharSize);
+};
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SplitModule.h b/linux-x64/clang/include/llvm/Transforms/Utils/SplitModule.h
new file mode 100644
index 0000000..d2c31f2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SplitModule.h
@@ -0,0 +1,43 @@
+//===- SplitModule.h - Split a module into partitions -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function llvm::SplitModule, which splits a module
+// into multiple linkable partitions. It can be used to implement parallel code
+// generation for link-time optimization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+#define LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
+
+namespace llvm {
+
+class Module;
+
+/// Splits the module M into N linkable partitions. The function ModuleCallback
+/// is called N times passing each individual partition as the MPart argument.
+///
+/// FIXME: This function does not deal with the somewhat subtle symbol
+/// visibility issues around module splitting, including (but not limited to):
+///
+/// - Internal symbols should not collide with symbols defined outside the
+///   module.
+/// - Internal symbols defined in module-level inline asm should be visible to
+///   each partition.
+void SplitModule(
+    std::unique_ptr<Module> M, unsigned N,
+    function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
+    bool PreserveLocals = false);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SymbolRewriter.h b/linux-x64/clang/include/llvm/Transforms/Utils/SymbolRewriter.h
new file mode 100644
index 0000000..e0caf77
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -0,0 +1,142 @@
+//===- SymbolRewriter.h - Symbol Rewriting Pass -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the prototypes and definitions related to the Symbol
+// Rewriter pass.
+//
+// The Symbol Rewriter pass takes a set of rewrite descriptors which define
+// transformations for symbol names.  These can be either single name to name
+// trnsformation or more broad regular expression based transformations.
+//
+// All the functions are re-written at the IR level.  The Symbol Rewriter itself
+// is exposed as a module level pass.  All symbols at the module level are
+// iterated.  For any matching symbol, the requested transformation is applied,
+// updating references to it as well (a la RAUW).  The resulting binary will
+// only contain the rewritten symbols.
+//
+// By performing this operation in the compiler, we are able to catch symbols
+// that would otherwise not be possible to catch (e.g. inlined symbols).
+//
+// This makes it possible to cleanly transform symbols without resorting to
+// overly-complex macro tricks and the pre-processor.  An example of where this
+// is useful is the sanitizers where we would like to intercept a well-defined
+// set of functions across the module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+#define LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+
+#include "llvm/IR/PassManager.h"
+#include <list>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class MemoryBuffer;
+class Module;
+class ModulePass;
+
+namespace yaml {
+
+class KeyValueNode;
+class MappingNode;
+class ScalarNode;
+class Stream;
+
+} // end namespace yaml
+
+namespace SymbolRewriter {
+
+/// The basic entity representing a rewrite operation.  It serves as the base
+/// class for any rewrite descriptor.  It has a certain set of specializations
+/// which describe a particular rewrite.
+///
+/// The RewriteMapParser can be used to parse a mapping file that provides the
+/// mapping for rewriting the symbols.  The descriptors individually describe
+/// whether to rewrite a function, global variable, or global alias.  Each of
+/// these can be selected either by explicitly providing a name for the ones to
+/// be rewritten or providing a (posix compatible) regular expression that will
+/// select the symbols to rewrite.  This descriptor list is passed to the
+/// SymbolRewriter pass.
+class RewriteDescriptor {
+public:
+  enum class Type {
+    Invalid,        /// invalid
+    Function,       /// function - descriptor rewrites a function
+    GlobalVariable, /// global variable - descriptor rewrites a global variable
+    NamedAlias,     /// named alias - descriptor rewrites a global alias
+  };
+
+  RewriteDescriptor(const RewriteDescriptor &) = delete;
+  RewriteDescriptor &operator=(const RewriteDescriptor &) = delete;
+  virtual ~RewriteDescriptor() = default;
+
+  Type getType() const { return Kind; }
+
+  virtual bool performOnModule(Module &M) = 0;
+
+protected:
+  explicit RewriteDescriptor(Type T) : Kind(T) {}
+
+private:
+  const Type Kind;
+};
+
+using RewriteDescriptorList = std::list<std::unique_ptr<RewriteDescriptor>>;
+
+class RewriteMapParser {
+public:
+  bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors);
+
+private:
+  bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL);
+  bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry,
+                  RewriteDescriptorList *DL);
+  bool parseRewriteFunctionDescriptor(yaml::Stream &Stream,
+                                      yaml::ScalarNode *Key,
+                                      yaml::MappingNode *Value,
+                                      RewriteDescriptorList *DL);
+  bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream,
+                                            yaml::ScalarNode *Key,
+                                            yaml::MappingNode *Value,
+                                            RewriteDescriptorList *DL);
+  bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K,
+                                         yaml::MappingNode *V,
+                                         RewriteDescriptorList *DL);
+};
+
+} // end namespace SymbolRewriter
+
+ModulePass *createRewriteSymbolsPass();
+ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+
+class RewriteSymbolPass : public PassInfoMixin<RewriteSymbolPass> {
+public:
+  RewriteSymbolPass() { loadAndParseMapFiles(); }
+
+  RewriteSymbolPass(SymbolRewriter::RewriteDescriptorList &DL) {
+    Descriptors.splice(Descriptors.begin(), DL);
+  }
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+  // Glue for old PM
+  bool runImpl(Module &M);
+
+private:
+  void loadAndParseMapFiles();
+
+  SymbolRewriter::RewriteDescriptorList Descriptors;  
+};
+
+} // end namespace llvm
+
+#endif //LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
new file mode 100644
index 0000000..222c601
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -0,0 +1,54 @@
+//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to ensure that functions have at most one return and one
+// unwind instruction in them.  Additionally, it keeps track of which node is
+// the new exit node of the CFG.  If there are no return or unwind instructions
+// in the function, the getReturnBlock/getUnwindBlock methods will return a null
+// pointer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+
+#include "llvm/Pass.h"
+#include "llvm/PassRegistry.h"
+
+namespace llvm {
+
+struct UnifyFunctionExitNodes : public FunctionPass {
+  BasicBlock *ReturnBlock = nullptr;
+  BasicBlock *UnwindBlock = nullptr;
+  BasicBlock *UnreachableBlock;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+  UnifyFunctionExitNodes() : FunctionPass(ID) {
+    initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
+  }
+
+  // We can preserve non-critical-edgeness when we unify function exit nodes
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistent)
+  // return, unwind, or unreachable  basic blocks in the CFG.
+  //
+  BasicBlock *getReturnBlock() const { return ReturnBlock; }
+  BasicBlock *getUnwindBlock() const { return UnwindBlock; }
+  BasicBlock *getUnreachableBlock() const { return UnreachableBlock; }
+
+  bool runOnFunction(Function &F) override;
+};
+
+Pass *createUnifyFunctionExitNodesPass();
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
new file mode 100644
index 0000000..3983637
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -0,0 +1,83 @@
+//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop unrolling utilities. It does not define any
+// actual pass or policy, but provides a single function to perform loop
+// unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class MDNode;
+class OptimizationRemarkEmitter;
+class ScalarEvolution;
+
+using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;
+
+const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
+                                     BasicBlock *ClonedBB, LoopInfo *LI,
+                                     NewLoopsMap &NewLoops);
+
+/// Represents the result of a \c UnrollLoop invocation.
+enum class LoopUnrollResult {
+  /// The loop was not modified.
+  Unmodified,
+
+  /// The loop was partially unrolled -- we still have a loop, but with a
+  /// smaller trip count.  We may also have emitted epilogue loop if the loop
+  /// had a non-constant trip count.
+  PartiallyUnrolled,
+
+  /// The loop was fully unrolled into straight-line code.  We no longer have
+  /// any back-edges.
+  FullyUnrolled
+};
+
+LoopUnrollResult UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
+                            bool Force, bool AllowRuntime,
+                            bool AllowExpensiveTripCount, bool PreserveCondBr,
+                            bool PreserveOnlyFirst, unsigned TripMultiple,
+                            unsigned PeelCount, bool UnrollRemainder,
+                            LoopInfo *LI, ScalarEvolution *SE,
+                            DominatorTree *DT, AssumptionCache *AC,
+                            OptimizationRemarkEmitter *ORE, bool PreserveLCSSA);
+
+bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
+                                bool AllowExpensiveTripCount,
+                                bool UseEpilogRemainder, bool UnrollRemainder,
+                                LoopInfo *LI,
+                                ScalarEvolution *SE, DominatorTree *DT,
+                                AssumptionCache *AC,
+                                bool PreserveLCSSA);
+
+void computePeelCount(Loop *L, unsigned LoopSize,
+                      TargetTransformInfo::UnrollingPreferences &UP,
+                      unsigned &TripCount, ScalarEvolution &SE);
+
+bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
+              DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h b/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
new file mode 100644
index 0000000..1baa9b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
@@ -0,0 +1,108 @@
+//===- VNCoercion.h - Value Numbering Coercion Utilities --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file / This file provides routines used by LLVM's value numbering passes to
+/// perform various forms of value extraction from memory when the types are not
+/// identical.  For example, given
+///
+/// store i32 8, i32 *%foo
+/// %a = bitcast i32 *%foo to i16
+/// %val = load i16, i16 *%a
+///
+/// It possible to extract the value of the load of %a from the store to %foo.
+/// These routines know how to tell whether they can do that (the analyze*
+/// routines), and can also insert the necessary IR to do it (the get*
+/// routines).
+
+#ifndef LLVM_TRANSFORMS_UTILS_VNCOERCION_H
+#define LLVM_TRANSFORMS_UTILS_VNCOERCION_H
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class Function;
+class StoreInst;
+class LoadInst;
+class MemIntrinsic;
+class Instruction;
+class Value;
+class Type;
+class DataLayout;
+namespace VNCoercion {
+/// Return true if CoerceAvailableValueToLoadType would succeed if it was
+/// called.
+bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
+                                     const DataLayout &DL);
+
+/// If we saw a store of a value to memory, and then a load from a must-aliased
+/// pointer of a different type, try to coerce the stored value to the loaded
+/// type.  LoadedTy is the type of the load we want to replace.  IRB is
+/// IRBuilder used to insert new instructions.
+///
+/// If we can't do it, return null.
+Value *coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
+                                      IRBuilder<> &IRB, const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the store at DepSI.
+///
+/// On success, it returns the offset into DepSI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
+                                   StoreInst *DepSI, const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the load at DepLI.
+///
+/// On success, it returns the offset into DepLI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI,
+                                  const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the memory intrinsic at DepMI.
+///
+/// On success, it returns the offset into DepMI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
+                                     MemIntrinsic *DepMI, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingStore returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the store. It
+/// inserts instructions to do so at InsertPt, and returns the extracted value.
+Value *getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy,
+                            Instruction *InsertPt, const DataLayout &DL);
+// This is the same as getStoreValueForLoad, except it performs no insertion
+// It only allows constant inputs.
+Constant *getConstantStoreValueForLoad(Constant *SrcVal, unsigned Offset,
+                                       Type *LoadTy, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingLoad returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the load, including
+/// any necessary load widening.  It inserts instructions to do so at InsertPt,
+/// and returns the extracted value.
+Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
+                           Instruction *InsertPt, const DataLayout &DL);
+// This is the same as getLoadValueForLoad, except it is given the load value as
+// a constant. It returns nullptr if it would require widening the load.
+Constant *getConstantLoadValueForLoad(Constant *SrcVal, unsigned Offset,
+                                      Type *LoadTy, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingMemInst returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the memory
+/// intrinsic.  It inserts instructions to do so at InsertPt, and returns the
+/// extracted value.
+Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+                              Type *LoadTy, Instruction *InsertPt,
+                              const DataLayout &DL);
+// This is the same as getStoreValueForLoad, except it performs no insertion.
+// It returns nullptr if it cannot produce a constant.
+Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+                                         Type *LoadTy, const DataLayout &DL);
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h b/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
new file mode 100644
index 0000000..4ecb23e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
@@ -0,0 +1,281 @@
+//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MapValue interface which is used by various parts of
+// the Transforms/Utils library to implement cloning and linking facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+
+namespace llvm {
+
+class Constant;
+class Function;
+class GlobalAlias;
+class GlobalVariable;
+class Instruction;
+class MDNode;
+class Metadata;
+class Type;
+class Value;
+
+using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;
+
+/// This is a class that can be implemented by clients to remap types when
+/// cloning constants and instructions.
+class ValueMapTypeRemapper {
+  virtual void anchor(); // Out of line method.
+
+public:
+  virtual ~ValueMapTypeRemapper() = default;
+
+  /// The client should implement this method if they want to remap types while
+  /// mapping values.
+  virtual Type *remapType(Type *SrcTy) = 0;
+};
+
+/// This is a class that can be implemented by clients to materialize Values on
+/// demand.
+class ValueMaterializer {
+  virtual void anchor(); // Out of line method.
+
+protected:
+  ValueMaterializer() = default;
+  ValueMaterializer(const ValueMaterializer &) = default;
+  ValueMaterializer &operator=(const ValueMaterializer &) = default;
+  ~ValueMaterializer() = default;
+
+public:
+  /// This method can be implemented to generate a mapped Value on demand. For
+  /// example, if linking lazily. Returns null if the value is not materialized.
+  virtual Value *materialize(Value *V) = 0;
+};
+
+/// These are flags that the value mapping APIs allow.
+enum RemapFlags {
+  RF_None = 0,
+
+  /// If this flag is set, the remapper knows that only local values within a
+  /// function (such as an instruction or argument) are mapped, not global
+  /// values like functions and global metadata.
+  RF_NoModuleLevelChanges = 1,
+
+  /// If this flag is set, the remapper ignores missing function-local entries
+  /// (Argument, Instruction, BasicBlock) that are not in the value map.  If it
+  /// is unset, it aborts if an operand is asked to be remapped which doesn't
+  /// exist in the mapping.
+  ///
+  /// There are no such assertions in MapValue(), whose results are almost
+  /// unchanged by this flag.  This flag mainly changes the assertion behaviour
+  /// in RemapInstruction().
+  ///
+  /// Since an Instruction's metadata operands (even that point to SSA values)
+  /// aren't guaranteed to be dominated by their definitions, MapMetadata will
+  /// return "!{}" instead of "null" for \a LocalAsMetadata instances whose SSA
+  /// values are unmapped when this flag is set.  Otherwise, \a MapValue()
+  /// completely ignores this flag.
+  ///
+  /// \a MapMetadata() always ignores this flag.
+  RF_IgnoreMissingLocals = 2,
+
+  /// Instruct the remapper to move distinct metadata instead of duplicating it
+  /// when there are module-level changes.
+  RF_MoveDistinctMDs = 4,
+
+  /// Any global values not in value map are mapped to null instead of mapping
+  /// to self.  Illegal if RF_IgnoreMissingLocals is also set.
+  RF_NullMapMissingGlobalValues = 8,
+};
+
+inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
+  return RemapFlags(unsigned(LHS) | unsigned(RHS));
+}
+
+/// Context for (re-)mapping values (and metadata).
+///
+/// A shared context used for mapping and remapping of Value and Metadata
+/// instances using \a ValueToValueMapTy, \a RemapFlags, \a
+/// ValueMapTypeRemapper, and \a ValueMaterializer.
+///
+/// There are a number of top-level entry points:
+/// - \a mapValue() (and \a mapConstant());
+/// - \a mapMetadata() (and \a mapMDNode());
+/// - \a remapInstruction(); and
+/// - \a remapFunction().
+///
+/// The \a ValueMaterializer can be used as a callback, but cannot invoke any
+/// of these top-level functions recursively.  Instead, callbacks should use
+/// one of the following to schedule work lazily in the \a ValueMapper
+/// instance:
+/// - \a scheduleMapGlobalInitializer()
+/// - \a scheduleMapAppendingVariable()
+/// - \a scheduleMapGlobalAliasee()
+/// - \a scheduleRemapFunction()
+///
+/// Sometimes a callback needs a different mapping context.  Such a context can
+/// be registered using \a registerAlternateMappingContext(), which takes an
+/// alternate \a ValueToValueMapTy and \a ValueMaterializer and returns a ID to
+/// pass into the schedule*() functions.
+///
+/// TODO: lib/Linker really doesn't need the \a ValueHandle in the \a
+/// ValueToValueMapTy.  We should template \a ValueMapper (and its
+/// implementation classes), and explicitly instantiate on two concrete
+/// instances of \a ValueMap (one as \a ValueToValueMap, and one with raw \a
+/// Value pointers).  It may be viable to do away with \a TrackingMDRef in the
+/// \a Metadata side map for the lib/Linker case as well, in which case we'll
+/// need a new template parameter on \a ValueMap.
+///
+/// TODO: Update callers of \a RemapInstruction() and \a MapValue() (etc.) to
+/// use \a ValueMapper directly.
+class ValueMapper {
+  void *pImpl;
+
+public:
+  ValueMapper(ValueToValueMapTy &VM, RemapFlags Flags = RF_None,
+              ValueMapTypeRemapper *TypeMapper = nullptr,
+              ValueMaterializer *Materializer = nullptr);
+  ValueMapper(ValueMapper &&) = delete;
+  ValueMapper(const ValueMapper &) = delete;
+  ValueMapper &operator=(ValueMapper &&) = delete;
+  ValueMapper &operator=(const ValueMapper &) = delete;
+  ~ValueMapper();
+
+  /// Register an alternate mapping context.
+  ///
+  /// Returns a MappingContextID that can be used with the various schedule*()
+  /// API to switch in a different value map on-the-fly.
+  unsigned
+  registerAlternateMappingContext(ValueToValueMapTy &VM,
+                                  ValueMaterializer *Materializer = nullptr);
+
+  /// Add to the current \a RemapFlags.
+  ///
+  /// \note Like the top-level mapping functions, \a addFlags() must be called
+  /// at the top level, not during a callback in a \a ValueMaterializer.
+  void addFlags(RemapFlags Flags);
+
+  Metadata *mapMetadata(const Metadata &MD);
+  MDNode *mapMDNode(const MDNode &N);
+
+  Value *mapValue(const Value &V);
+  Constant *mapConstant(const Constant &C);
+
+  void remapInstruction(Instruction &I);
+  void remapFunction(Function &F);
+
+  void scheduleMapGlobalInitializer(GlobalVariable &GV, Constant &Init,
+                                    unsigned MappingContextID = 0);
+  void scheduleMapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix,
+                                    bool IsOldCtorDtor,
+                                    ArrayRef<Constant *> NewMembers,
+                                    unsigned MappingContextID = 0);
+  void scheduleMapGlobalAliasee(GlobalAlias &GA, Constant &Aliasee,
+                                unsigned MappingContextID = 0);
+  void scheduleRemapFunction(Function &F, unsigned MappingContextID = 0);
+};
+
+/// Look up or compute a value in the value map.
+///
+/// Return a mapped value for a function-local value (Argument, Instruction,
+/// BasicBlock), or compute and memoize a value for a Constant.
+///
+///  1. If \c V is in VM, return the result.
+///  2. Else if \c V can be materialized with \c Materializer, do so, memoize
+///     it in \c VM, and return it.
+///  3. Else if \c V is a function-local value, return nullptr.
+///  4. Else if \c V is a \a GlobalValue, return \c nullptr or \c V depending
+///     on \a RF_NullMapMissingGlobalValues.
+///  5. Else if \c V is a \a MetadataAsValue wrapping a LocalAsMetadata,
+///     recurse on the local SSA value, and return nullptr or "metadata !{}" on
+///     missing depending on RF_IgnoreMissingValues.
+///  6. Else if \c V is a \a MetadataAsValue, rewrap the return of \a
+///     MapMetadata().
+///  7. Else, compute the equivalent constant, and return it.
+inline Value *MapValue(const Value *V, ValueToValueMapTy &VM,
+                       RemapFlags Flags = RF_None,
+                       ValueMapTypeRemapper *TypeMapper = nullptr,
+                       ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapValue(*V);
+}
+
+/// Lookup or compute a mapping for a piece of metadata.
+///
+/// Compute and memoize a mapping for \c MD.
+///
+///  1. If \c MD is mapped, return it.
+///  2. Else if \a RF_NoModuleLevelChanges or \c MD is an \a MDString, return
+///     \c MD.
+///  3. Else if \c MD is a \a ConstantAsMetadata, call \a MapValue() and
+///     re-wrap its return (returning nullptr on nullptr).
+///  4. Else, \c MD is an \a MDNode.  These are remapped, along with their
+///     transitive operands.  Distinct nodes are duplicated or moved depending
+///     on \a RF_MoveDistinctNodes.  Uniqued nodes are remapped like constants.
+///
+/// \note \a LocalAsMetadata is completely unsupported by \a MapMetadata.
+/// Instead, use \a MapValue() with its wrapping \a MetadataAsValue instance.
+inline Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+                             RemapFlags Flags = RF_None,
+                             ValueMapTypeRemapper *TypeMapper = nullptr,
+                             ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMetadata(*MD);
+}
+
+/// Version of MapMetadata with type safety for MDNode.
+inline MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+                           RemapFlags Flags = RF_None,
+                           ValueMapTypeRemapper *TypeMapper = nullptr,
+                           ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMDNode(*MD);
+}
+
+/// Convert the instruction operands from referencing the current values into
+/// those specified by VM.
+///
+/// If \a RF_IgnoreMissingLocals is set and an operand can't be found via \a
+/// MapValue(), use the old value.  Otherwise assert that this doesn't happen.
+///
+/// Note that \a MapValue() only returns \c nullptr for SSA values missing from
+/// \c VM.
+inline void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
+                             RemapFlags Flags = RF_None,
+                             ValueMapTypeRemapper *TypeMapper = nullptr,
+                             ValueMaterializer *Materializer = nullptr) {
+  ValueMapper(VM, Flags, TypeMapper, Materializer).remapInstruction(*I);
+}
+
+/// Remap the operands, metadata, arguments, and instructions of a function.
+///
+/// Calls \a MapValue() on prefix data, prologue data, and personality
+/// function; calls \a MapMetadata() on each attached MDNode; remaps the
+/// argument types using the provided \c TypeMapper; and calls \a
+/// RemapInstruction() on every instruction.
+inline void RemapFunction(Function &F, ValueToValueMapTy &VM,
+                          RemapFlags Flags = RF_None,
+                          ValueMapTypeRemapper *TypeMapper = nullptr,
+                          ValueMaterializer *Materializer = nullptr) {
+  ValueMapper(VM, Flags, TypeMapper, Materializer).remapFunction(F);
+}
+
+/// Version of MapValue with type safety for Constant.
+inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
+                          RemapFlags Flags = RF_None,
+                          ValueMapTypeRemapper *TypeMapper = nullptr,
+                          ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapConstant(*V);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize.h b/linux-x64/clang/include/llvm/Transforms/Vectorize.h
new file mode 100644
index 0000000..19845e4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize.h
@@ -0,0 +1,144 @@
+//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Vectorize transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_H
+
+namespace llvm {
+class BasicBlock;
+class BasicBlockPass;
+class Pass;
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize configuration.
+struct VectorizeConfig {
+  //===--------------------------------------------------------------------===//
+  // Target architecture related parameters
+
+  /// @brief The size of the native vector registers.
+  unsigned VectorBits;
+
+  /// @brief Vectorize boolean values.
+  bool VectorizeBools;
+
+  /// @brief Vectorize integer values.
+  bool VectorizeInts;
+
+  /// @brief Vectorize floating-point values.
+  bool VectorizeFloats;
+
+  /// @brief Vectorize pointer values.
+  bool VectorizePointers;
+
+  /// @brief Vectorize casting (conversion) operations.
+  bool VectorizeCasts;
+
+  /// @brief Vectorize floating-point math intrinsics.
+  bool VectorizeMath;
+
+  /// @brief Vectorize bit intrinsics.
+  bool VectorizeBitManipulations;
+
+  /// @brief Vectorize the fused-multiply-add intrinsic.
+  bool VectorizeFMA;
+
+  /// @brief Vectorize select instructions.
+  bool VectorizeSelect;
+
+  /// @brief Vectorize comparison instructions.
+  bool VectorizeCmp;
+
+  /// @brief Vectorize getelementptr instructions.
+  bool VectorizeGEP;
+
+  /// @brief Vectorize loads and stores.
+  bool VectorizeMemOps;
+
+  /// @brief Only generate aligned loads and stores.
+  bool AlignedOnly;
+
+  //===--------------------------------------------------------------------===//
+  // Misc parameters
+
+  /// @brief The required chain depth for vectorization.
+  unsigned ReqChainDepth;
+
+  /// @brief The maximum search distance for instruction pairs.
+  unsigned SearchLimit;
+
+  /// @brief The maximum number of candidate pairs with which to use a full
+  ///        cycle check.
+  unsigned MaxCandPairsForCycleCheck;
+
+  /// @brief Replicating one element to a pair breaks the chain.
+  bool SplatBreaksChain;
+
+  /// @brief The maximum number of pairable instructions per group.
+  unsigned MaxInsts;
+
+  /// @brief The maximum number of candidate instruction pairs per group.
+  unsigned MaxPairs;
+
+  /// @brief The maximum number of pairing iterations.
+  unsigned MaxIter;
+
+  /// @brief Don't try to form odd-length vectors.
+  bool Pow2LenOnly;
+
+  /// @brief Don't boost the chain-depth contribution of loads and stores.
+  bool NoMemOpBoost;
+
+  /// @brief Use a fast instruction dependency analysis.
+  bool FastDep;
+
+  /// @brief Initialize the VectorizeConfig from command line options.
+  VectorizeConfig();
+};
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVectorize - Create a loop vectorization pass.
+//
+Pass *createLoopVectorizePass(bool NoUnrolling = false,
+                              bool AlwaysVectorize = true);
+
+//===----------------------------------------------------------------------===//
+//
+// SLPVectorizer - Create a bottom-up SLP vectorizer pass.
+//
+Pass *createSLPVectorizerPass();
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize the BasicBlock.
+///
+/// @param BB The BasicBlock to be vectorized
+/// @param P  The current running pass, should require AliasAnalysis and
+///           ScalarEvolution. After the vectorization, AliasAnalysis,
+///           ScalarEvolution and CFG are preserved.
+///
+/// @return True if the BB is changed, false otherwise.
+///
+bool vectorizeBasicBlock(Pass *P, BasicBlock &BB,
+                         const VectorizeConfig &C = VectorizeConfig());
+
+//===----------------------------------------------------------------------===//
+//
+// LoadStoreVectorizer - Create vector loads and stores, but leave scalar
+// operations.
+//
+Pass *createLoadStoreVectorizerPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
new file mode 100644
index 0000000..32b56d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -0,0 +1,107 @@
+//===- LoopVectorize.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
+// and generates target-independent LLVM-IR.
+// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
+// of instructions in order to estimate the profitability of vectorization.
+//
+// The loop vectorizer combines consecutive loop iterations into a single
+// 'wide' iteration. After this transformation the index is incremented
+// by the SIMD vector width, and not by one.
+//
+// This pass has three parts:
+// 1. The main loop pass that drives the different parts.
+// 2. LoopVectorizationLegality - A unit that checks for the legality
+//    of the vectorization.
+// 3. InnerLoopVectorizer - A unit that performs the actual
+//    widening of instructions.
+// 4. LoopVectorizationCostModel - A unit that checks for the profitability
+//    of vectorization. It decides on the optimal vector width, which
+//    can be one, if vectorization is not profitable.
+//
+//===----------------------------------------------------------------------===//
+//
+// The reduction-variable vectorization is based on the paper:
+//  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
+//
+// Variable uniformity checks are inspired by:
+//  Karrenberg, R. and Hack, S. Whole Function Vectorization.
+//
+// The interleaved access vectorization is based on the paper:
+//  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
+//  Data for SIMD
+//
+// Other ideas/concepts are from:
+//  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
+//
+//  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
+//  Vectorizing Compilers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/PassManager.h"
+#include <functional>
+
+namespace llvm {
+
+class AssumptionCache;
+class BlockFrequencyInfo;
+class DemandedBits;
+class DominatorTree;
+class Function;
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class ScalarEvolution;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+/// The LoopVectorize Pass.
+struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
+  bool DisableUnrolling = false;
+
+  /// If true, consider all loops for vectorization.
+  /// If false, only loops that explicitly request vectorization are
+  /// considered.
+  bool AlwaysVectorize = true;
+
+  ScalarEvolution *SE;
+  LoopInfo *LI;
+  TargetTransformInfo *TTI;
+  DominatorTree *DT;
+  BlockFrequencyInfo *BFI;
+  TargetLibraryInfo *TLI;
+  DemandedBits *DB;
+  AliasAnalysis *AA;
+  AssumptionCache *AC;
+  std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
+  OptimizationRemarkEmitter *ORE;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Shim for old PM.
+  bool runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_,
+               TargetTransformInfo &TTI_, DominatorTree &DT_,
+               BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
+               DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
+               std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
+               OptimizationRemarkEmitter &ORE);
+
+  bool processLoop(Loop *L);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
new file mode 100644
index 0000000..979d5ef
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -0,0 +1,154 @@
+//===- SLPVectorizer.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This pass implements the Bottom Up SLP vectorizer. It detects consecutive
+// stores that can be put together into vector-stores. Next, it attempts to
+// construct vectorizable tree using the use-def chains. If a profitable tree
+// was found, the SLP vectorizer performs vectorization on the tree.
+//
+// The pass is inspired by the work described in the paper:
+//  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
+#define LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class CmpInst;
+class DataLayout;
+class DemandedBits;
+class DominatorTree;
+class Function;
+class InsertElementInst;
+class InsertValueInst;
+class Instruction;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class ScalarEvolution;
+class StoreInst;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+class Value;
+
+/// A private "module" namespace for types and utilities used by this pass.
+/// These are implementation details and should not be used by clients.
+namespace slpvectorizer {
+
+class BoUpSLP;
+
+} // end namespace slpvectorizer
+
+struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
+  using StoreList = SmallVector<StoreInst *, 8>;
+  using StoreListMap = MapVector<Value *, StoreList>;
+  using WeakTrackingVHList = SmallVector<WeakTrackingVH, 8>;
+  using WeakTrackingVHListMap = MapVector<Value *, WeakTrackingVHList>;
+
+  ScalarEvolution *SE = nullptr;
+  TargetTransformInfo *TTI = nullptr;
+  TargetLibraryInfo *TLI = nullptr;
+  AliasAnalysis *AA = nullptr;
+  LoopInfo *LI = nullptr;
+  DominatorTree *DT = nullptr;
+  AssumptionCache *AC = nullptr;
+  DemandedBits *DB = nullptr;
+  const DataLayout *DL = nullptr;
+
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, ScalarEvolution *SE_, TargetTransformInfo *TTI_,
+               TargetLibraryInfo *TLI_, AliasAnalysis *AA_, LoopInfo *LI_,
+               DominatorTree *DT_, AssumptionCache *AC_, DemandedBits *DB_,
+               OptimizationRemarkEmitter *ORE_);
+
+private:
+  /// \brief Collect store and getelementptr instructions and organize them
+  /// according to the underlying object of their pointer operands. We sort the
+  /// instructions by their underlying objects to reduce the cost of
+  /// consecutive access queries.
+  ///
+  /// TODO: We can further reduce this cost if we flush the chain creation
+  ///       every time we run into a memory barrier.
+  void collectSeedInstructions(BasicBlock *BB);
+
+  /// \brief Try to vectorize a chain that starts at two arithmetic instrs.
+  bool tryToVectorizePair(Value *A, Value *B, slpvectorizer::BoUpSLP &R);
+
+  /// \brief Try to vectorize a list of operands.
+  /// \param UserCost Cost of the user operations of \p VL if they may affect
+  /// the cost of the vectorization.
+  /// \returns true if a value was vectorized.
+  bool tryToVectorizeList(ArrayRef<Value *> VL, slpvectorizer::BoUpSLP &R,
+                          int UserCost = 0, bool AllowReorder = false);
+
+  /// \brief Try to vectorize a chain that may start at the operands of \p I.
+  bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R);
+
+  /// \brief Vectorize the store instructions collected in Stores.
+  bool vectorizeStoreChains(slpvectorizer::BoUpSLP &R);
+
+  /// \brief Vectorize the index computations of the getelementptr instructions
+  /// collected in GEPs.
+  bool vectorizeGEPIndices(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  /// Try to find horizontal reduction or otherwise vectorize a chain of binary
+  /// operators.
+  bool vectorizeRootInstruction(PHINode *P, Value *V, BasicBlock *BB,
+                                slpvectorizer::BoUpSLP &R,
+                                TargetTransformInfo *TTI);
+
+  /// Try to vectorize trees that start at insertvalue instructions.
+  bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB,
+                                slpvectorizer::BoUpSLP &R);
+
+  /// Try to vectorize trees that start at insertelement instructions.
+  bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB,
+                                  slpvectorizer::BoUpSLP &R);
+
+  /// Try to vectorize trees that start at compare instructions.
+  bool vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  /// Tries to vectorize constructs started from CmpInst, InsertValueInst or
+  /// InsertElementInst instructions.
+  bool vectorizeSimpleInstructions(SmallVectorImpl<WeakVH> &Instructions,
+                                   BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  /// \brief Scan the basic block and look for patterns that are likely to start
+  /// a vectorization chain.
+  bool vectorizeChainsInBlock(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  bool vectorizeStoreChain(ArrayRef<Value *> Chain, slpvectorizer::BoUpSLP &R,
+                           unsigned VecRegSize);
+
+  bool vectorizeStores(ArrayRef<StoreInst *> Stores, slpvectorizer::BoUpSLP &R);
+
+  /// The store instructions in a basic block organized by base pointer.
+  StoreListMap Stores;
+
+  /// The getelementptr instructions in a basic block organized by base pointer.
+  WeakTrackingVHListMap GEPs;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
diff --git a/linux-x64/clang/include/llvm/WindowsManifest/WindowsManifestMerger.h b/linux-x64/clang/include/llvm/WindowsManifest/WindowsManifestMerger.h
new file mode 100644
index 0000000..302d370
--- /dev/null
+++ b/linux-x64/clang/include/llvm/WindowsManifest/WindowsManifestMerger.h
@@ -0,0 +1,66 @@
+//===-- WindowsManifestMerger.h ---------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file provides a utility for merging Microsoft .manifest files.  These
+// files are xml documents which contain meta-information about applications,
+// such as whether or not admin access is required, system compatibility,
+// versions, etc.  Part of the linking process of an executable may require
+// merging several of these .manifest files using a tree-merge following
+// specific rules.  Unfortunately, these rules are not documented well
+// anywhere.  However, a careful investigation of the behavior of the original
+// Microsoft Manifest Tool (mt.exe) revealed the rules of this merge.  As the
+// saying goes, code is the best documentation, so please look below if you are
+// interested in the exact merging requirements.
+//
+// Ref:
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374191(v=vs.85).aspx
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_MANIFEST_MERGER_H
+#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_MANIFEST_MERGER_H
+
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+class MemoryBuffer;
+
+namespace windows_manifest {
+
+bool isAvailable();
+
+class WindowsManifestError : public ErrorInfo<WindowsManifestError, ECError> {
+public:
+  static char ID;
+  WindowsManifestError(const Twine &Msg);
+  void log(raw_ostream &OS) const override;
+
+private:
+  std::string Msg;
+};
+
+class WindowsManifestMerger {
+public:
+  WindowsManifestMerger();
+  ~WindowsManifestMerger();
+  Error merge(const MemoryBuffer &Manifest);
+
+  // Returns vector containing merged xml manifest, or uninitialized vector for
+  // empty manifest.
+  std::unique_ptr<MemoryBuffer> getMergedManifest();
+
+private:
+  class WindowsManifestMergerImpl;
+  std::unique_ptr<WindowsManifestMergerImpl> Impl;
+};
+
+} // namespace windows_manifest
+} // namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/WindowsResource/ResourceProcessor.h b/linux-x64/clang/include/llvm/WindowsResource/ResourceProcessor.h
new file mode 100644
index 0000000..4ca0a4b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/WindowsResource/ResourceProcessor.h
@@ -0,0 +1,51 @@
+//===-- ResourceProcessor.h -------------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_PROCESSOR_H
+#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_PROCESSOR_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <memory>
+#include <vector>
+
+
+namespace llvm {
+
+class WindowsResourceProcessor {
+public:
+  using PathType = SmallVector<char, 64>;
+
+  WindowsResourceProcessor() {}
+
+  void addDefine(StringRef Key, StringRef Value = StringRef()) {
+    PreprocessorDefines.emplace_back(Key, Value);
+  }
+  void addInclude(const PathType &IncludePath) {
+    IncludeList.push_back(IncludePath);
+  }
+  void setVerbose(bool Verbose) { IsVerbose = Verbose; }
+  void setNullAtEnd(bool NullAtEnd) { AppendNull = NullAtEnd; }
+
+  Error process(StringRef InputData,
+    std::unique_ptr<raw_fd_ostream> OutputStream);
+
+private:
+  StringRef InputData;
+  std::vector<PathType> IncludeList;
+  std::vector<std::pair<StringRef, StringRef>> PreprocessorDefines;
+  bool IsVerbose, AppendNull;
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/WindowsResource/ResourceScriptToken.h b/linux-x64/clang/include/llvm/WindowsResource/ResourceScriptToken.h
new file mode 100644
index 0000000..494ae32
--- /dev/null
+++ b/linux-x64/clang/include/llvm/WindowsResource/ResourceScriptToken.h
@@ -0,0 +1,59 @@
+//===-- ResourceScriptToken.h -----------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This declares the .rc script tokens.
+// The list of available tokens is located at ResourceScriptTokenList.h.
+//
+// Ref: msdn.microsoft.com/en-us/library/windows/desktop/aa380599(v=vs.85).aspx
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_SCRIPTTOKEN_H
+#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_SCRIPTTOKEN_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+// A definition of a single resource script token. Each token has its kind
+// (declared in ResourceScriptTokenList) and holds a value - a reference
+// representation of the token.
+// RCToken does not claim ownership on its value. A memory buffer containing
+// the token value should be stored in a safe place and cannot be freed
+// nor reallocated.
+class RCToken {
+public:
+  enum class Kind {
+#define TOKEN(Name) Name,
+#define SHORT_TOKEN(Name, Ch) Name,
+#include "ResourceScriptTokenList.h"
+#undef TOKEN
+#undef SHORT_TOKEN
+  };
+
+  RCToken(RCToken::Kind RCTokenKind, StringRef Value);
+
+  // Get an integer value of the integer token.
+  uint32_t intValue() const;
+  bool isLongInt() const;
+
+  StringRef value() const;
+  Kind kind() const;
+
+  // Check if a token describes a binary operator.
+  bool isBinaryOp() const;
+
+private:
+  Kind TokenKind;
+  StringRef TokenValue;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/WindowsResource/ResourceScriptTokenList.h b/linux-x64/clang/include/llvm/WindowsResource/ResourceScriptTokenList.h
new file mode 100644
index 0000000..0beed11
--- /dev/null
+++ b/linux-x64/clang/include/llvm/WindowsResource/ResourceScriptTokenList.h
@@ -0,0 +1,35 @@
+//===-- ResourceScriptTokenList.h -------------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This is a part of llvm-rc tokens header. It lists all the possible tokens
+// that might occur in a correct .rc script.
+//
+//===---------------------------------------------------------------------===//
+
+
+// Long tokens. They might consist of more than one character.
+TOKEN(Invalid)      // Invalid token. Should not occur in a valid script.
+TOKEN(Int)          // Integer (decimal, octal or hexadecimal).
+TOKEN(String)       // String value.
+TOKEN(Identifier)   // Script identifier (resource name or type).
+
+// Short tokens. They usually consist of exactly one character.
+// The definitions are of the form SHORT_TOKEN(TokenName, TokenChar).
+// TokenChar is the one-character token representation occuring in the correct
+// .rc scripts.
+SHORT_TOKEN(BlockBegin, '{')   // Start of the script block; can also be BEGIN.
+SHORT_TOKEN(BlockEnd, '}')     // End of the block; can also be END.
+SHORT_TOKEN(Comma, ',')        // Comma - resource arguments separator.
+SHORT_TOKEN(Plus, '+')         // Addition operator.
+SHORT_TOKEN(Minus, '-')        // Subtraction operator.
+SHORT_TOKEN(Pipe, '|')         // Bitwise-OR operator.
+SHORT_TOKEN(Amp, '&')          // Bitwise-AND operator.
+SHORT_TOKEN(Tilde, '~')        // Bitwise-NOT operator.
+SHORT_TOKEN(LeftParen, '(')    // Left parenthesis in the script expressions.
+SHORT_TOKEN(RightParen, ')')   // Right parenthesis.
diff --git a/linux-x64/clang/include/llvm/XRay/Graph.h b/linux-x64/clang/include/llvm/XRay/Graph.h
new file mode 100644
index 0000000..a4d34a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/XRay/Graph.h
@@ -0,0 +1,494 @@
+//===-- Graph.h - XRay Graph Class ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A Graph Datatype for XRay.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_XRAY_GRAPH_T_H
+#define LLVM_XRAY_GRAPH_T_H
+
+#include <initializer_list>
+#include <stdint.h>
+#include <type_traits>
+#include <utility>
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace xray {
+
+/// A Graph object represents a Directed Graph and is used in XRay to compute
+/// and store function call graphs and associated statistical information.
+///
+/// The graph takes in four template parameters, these are:
+///  - VertexAttribute, this is a structure which is stored for each vertex.
+///    Must be DefaultConstructible, CopyConstructible, CopyAssignable and
+///    Destructible.
+///  - EdgeAttribute, this is a structure which is stored for each edge
+///    Must be DefaultConstructible, CopyConstructible, CopyAssignable and
+///    Destructible.
+///  - EdgeAttribute, this is a structure which is stored for each variable
+///  - VI, this is a type over which DenseMapInfo is defined and is the type
+///    used look up strings, available as VertexIdentifier.
+///  - If the built in DenseMapInfo is not defined, provide a specialization
+///    class type here.
+///
+/// Graph is CopyConstructible, CopyAssignable, MoveConstructible and
+/// MoveAssignable but is not EqualityComparible or LessThanComparible.
+///
+/// Usage Example Graph with weighted edges and vertices:
+///   Graph<int, int, int> G;
+///
+///   G[1] = 0;
+///   G[2] = 2;
+///   G[{1,2}] = 1;
+///   G[{2,1}] = -1;
+///   for(const auto &v : G.vertices()){
+///     // Do something with the vertices in the graph;
+///   }
+///   for(const auto &e : G.edges()){
+///     // Do something with the edges in the graph;
+///   }
+///
+/// Usage Example with StrRef keys.
+///   Graph<int, double, StrRef> StrG;
+///    char va[] = "Vertex A";
+///    char vaa[] = "Vertex A";
+///    char vb[] = "Vertex B"; // Vertices are referenced by String Refs.
+///    G[va] = 0;
+///    G[vb] = 1;
+///    G[{va, vb}] = 1.0;
+///    cout() << G[vaa] << " " << G[{vaa, vb}]; //prints "0 1.0".
+///
+template <typename VertexAttribute, typename EdgeAttribute,
+          typename VI = int32_t>
+class Graph {
+public:
+  /// These objects are used to name edges and vertices in the graph.
+  typedef VI VertexIdentifier;
+  typedef std::pair<VI, VI> EdgeIdentifier;
+
+  /// This type is the value_type of all iterators which range over vertices,
+  /// Determined by the Vertices DenseMap
+  using VertexValueType =
+      detail::DenseMapPair<VertexIdentifier, VertexAttribute>;
+
+  /// This type is the value_type of all iterators which range over edges,
+  /// Determined by the Edges DenseMap.
+  using EdgeValueType = detail::DenseMapPair<EdgeIdentifier, EdgeAttribute>;
+
+  using size_type = std::size_t;
+
+private:
+  /// The type used for storing the EdgeAttribute for each edge in the graph
+  using EdgeMapT = DenseMap<EdgeIdentifier, EdgeAttribute>;
+
+  /// The type used for storing the VertexAttribute for each vertex in
+  /// the graph.
+  using VertexMapT = DenseMap<VertexIdentifier, VertexAttribute>;
+
+  /// The type used for storing the edges entering a vertex. Indexed by
+  /// the VertexIdentifier of the start of the edge. Only used to determine
+  /// where the incoming edges are, the EdgeIdentifiers are stored in an
+  /// InnerEdgeMapT.
+  using NeighborSetT = DenseSet<VertexIdentifier>;
+
+  /// The type storing the InnerInvGraphT corresponding to each vertex in
+  /// the graph (When a vertex has an incoming edge incident to it)
+  using NeighborLookupT = DenseMap<VertexIdentifier, NeighborSetT>;
+
+private:
+  /// Stores the map from the start and end vertex of an edge to it's
+  /// EdgeAttribute
+  EdgeMapT Edges;
+
+  /// Stores the map from VertexIdentifier to VertexAttribute
+  VertexMapT Vertices;
+
+  /// Allows fast lookup for the incoming edge set of any given vertex.
+  NeighborLookupT InNeighbors;
+
+  /// Allows fast lookup for the outgoing edge set of any given vertex.
+  NeighborLookupT OutNeighbors;
+
+  /// An Iterator adapter using an InnerInvGraphT::iterator as a base iterator,
+  /// and storing the VertexIdentifier the iterator range comes from. The
+  /// dereference operator is then performed using a pointer to the graph's edge
+  /// set.
+  template <bool IsConst, bool IsOut,
+            typename BaseIt = typename NeighborSetT::const_iterator,
+            typename T = typename std::conditional<IsConst, const EdgeValueType,
+                                                   EdgeValueType>::type>
+  class NeighborEdgeIteratorT
+      : public iterator_adaptor_base<
+            NeighborEdgeIteratorT<IsConst, IsOut>, BaseIt,
+            typename std::iterator_traits<BaseIt>::iterator_category, T> {
+    using InternalEdgeMapT =
+        typename std::conditional<IsConst, const EdgeMapT, EdgeMapT>::type;
+
+    friend class NeighborEdgeIteratorT<false, IsOut, BaseIt, EdgeValueType>;
+    friend class NeighborEdgeIteratorT<true, IsOut, BaseIt,
+                                       const EdgeValueType>;
+
+    InternalEdgeMapT *MP;
+    VertexIdentifier SI;
+
+  public:
+    template <bool IsConstDest,
+              typename = typename std::enable_if<IsConstDest && !IsConst>::type>
+    operator NeighborEdgeIteratorT<IsConstDest, IsOut, BaseIt,
+                                   const EdgeValueType>() const {
+      return NeighborEdgeIteratorT<IsConstDest, IsOut, BaseIt,
+                                   const EdgeValueType>(this->I, MP, SI);
+    }
+
+    NeighborEdgeIteratorT() = default;
+    NeighborEdgeIteratorT(BaseIt _I, InternalEdgeMapT *_MP,
+                          VertexIdentifier _SI)
+        : iterator_adaptor_base<
+              NeighborEdgeIteratorT<IsConst, IsOut>, BaseIt,
+              typename std::iterator_traits<BaseIt>::iterator_category, T>(_I),
+          MP(_MP), SI(_SI) {}
+
+    T &operator*() const {
+      if (!IsOut)
+        return *(MP->find({*(this->I), SI}));
+      else
+        return *(MP->find({SI, *(this->I)}));
+    }
+  };
+
+public:
+  /// A const iterator type for iterating through the set of edges entering a
+  /// vertex.
+  ///
+  /// Has a const EdgeValueType as its value_type
+  using ConstInEdgeIterator = NeighborEdgeIteratorT<true, false>;
+
+  /// An iterator type for iterating through the set of edges leaving a vertex.
+  ///
+  /// Has an EdgeValueType as its value_type
+  using InEdgeIterator = NeighborEdgeIteratorT<false, false>;
+
+  /// A const iterator type for iterating through the set of edges entering a
+  /// vertex.
+  ///
+  /// Has a const EdgeValueType as its value_type
+  using ConstOutEdgeIterator = NeighborEdgeIteratorT<true, true>;
+
+  /// An iterator type for iterating through the set of edges leaving a vertex.
+  ///
+  /// Has an EdgeValueType as its value_type
+  using OutEdgeIterator = NeighborEdgeIteratorT<false, true>;
+
+  /// A class for ranging over the incoming edges incident to a vertex.
+  ///
+  /// Like all views in this class it provides methods to get the beginning and
+  /// past the range iterators for the range, as well as methods to determine
+  /// the number of elements in the range and whether the range is empty.
+  template <bool isConst, bool isOut> class InOutEdgeView {
+  public:
+    using iterator = NeighborEdgeIteratorT<isConst, isOut>;
+    using const_iterator = NeighborEdgeIteratorT<true, isOut>;
+    using GraphT = typename std::conditional<isConst, const Graph, Graph>::type;
+    using InternalEdgeMapT =
+        typename std::conditional<isConst, const EdgeMapT, EdgeMapT>::type;
+
+  private:
+    InternalEdgeMapT &M;
+    const VertexIdentifier A;
+    const NeighborLookupT &NL;
+
+  public:
+    iterator begin() {
+      auto It = NL.find(A);
+      if (It == NL.end())
+        return iterator();
+      return iterator(It->second.begin(), &M, A);
+    }
+
+    const_iterator cbegin() const {
+      auto It = NL.find(A);
+      if (It == NL.end())
+        return const_iterator();
+      return const_iterator(It->second.begin(), &M, A);
+    }
+
+    const_iterator begin() const { return cbegin(); }
+
+    iterator end() {
+      auto It = NL.find(A);
+      if (It == NL.end())
+        return iterator();
+      return iterator(It->second.end(), &M, A);
+    }
+    const_iterator cend() const {
+      auto It = NL.find(A);
+      if (It == NL.end())
+        return const_iterator();
+      return const_iterator(It->second.end(), &M, A);
+    }
+
+    const_iterator end() const { return cend(); }
+
+    size_type size() const {
+      auto I = NL.find(A);
+      if (I == NL.end())
+        return 0;
+      else
+        return I->second.size();
+    }
+
+    bool empty() const { return NL.count(A) == 0; };
+
+    InOutEdgeView(GraphT &G, VertexIdentifier A)
+        : M(G.Edges), A(A), NL(isOut ? G.OutNeighbors : G.InNeighbors) {}
+  };
+
+  /// A const iterator type for iterating through the whole vertex set of the
+  /// graph.
+  ///
+  /// Has a const VertexValueType as its value_type
+  using ConstVertexIterator = typename VertexMapT::const_iterator;
+
+  /// An iterator type for iterating through the whole vertex set of the graph.
+  ///
+  /// Has a VertexValueType as its value_type
+  using VertexIterator = typename VertexMapT::iterator;
+
+  /// A class for ranging over the vertices in the graph.
+  ///
+  /// Like all views in this class it provides methods to get the beginning and
+  /// past the range iterators for the range, as well as methods to determine
+  /// the number of elements in the range and whether the range is empty.
+  template <bool isConst> class VertexView {
+  public:
+    using iterator = typename std::conditional<isConst, ConstVertexIterator,
+                                               VertexIterator>::type;
+    using const_iterator = ConstVertexIterator;
+    using GraphT = typename std::conditional<isConst, const Graph, Graph>::type;
+
+  private:
+    GraphT &G;
+
+  public:
+    iterator begin() { return G.Vertices.begin(); }
+    iterator end() { return G.Vertices.end(); }
+    const_iterator cbegin() const { return G.Vertices.cbegin(); }
+    const_iterator cend() const { return G.Vertices.cend(); }
+    const_iterator begin() const { return G.Vertices.begin(); }
+    const_iterator end() const { return G.Vertices.end(); }
+    size_type size() const { return G.Vertices.size(); }
+    bool empty() const { return G.Vertices.empty(); }
+    VertexView(GraphT &_G) : G(_G) {}
+  };
+
+  /// A const iterator for iterating through the entire edge set of the graph.
+  ///
+  /// Has a const EdgeValueType as its value_type
+  using ConstEdgeIterator = typename EdgeMapT::const_iterator;
+
+  /// An iterator for iterating through the entire edge set of the graph.
+  ///
+  /// Has an EdgeValueType as its value_type
+  using EdgeIterator = typename EdgeMapT::iterator;
+
+  /// A class for ranging over all the edges in the graph.
+  ///
+  /// Like all views in this class it provides methods to get the beginning and
+  /// past the range iterators for the range, as well as methods to determine
+  /// the number of elements in the range and whether the range is empty.
+  template <bool isConst> class EdgeView {
+  public:
+    using iterator = typename std::conditional<isConst, ConstEdgeIterator,
+                                               EdgeIterator>::type;
+    using const_iterator = ConstEdgeIterator;
+    using GraphT = typename std::conditional<isConst, const Graph, Graph>::type;
+
+  private:
+    GraphT &G;
+
+  public:
+    iterator begin() { return G.Edges.begin(); }
+    iterator end() { return G.Edges.end(); }
+    const_iterator cbegin() const { return G.Edges.cbegin(); }
+    const_iterator cend() const { return G.Edges.cend(); }
+    const_iterator begin() const { return G.Edges.begin(); }
+    const_iterator end() const { return G.Edges.end(); }
+    size_type size() const { return G.Edges.size(); }
+    bool empty() const { return G.Edges.empty(); }
+    EdgeView(GraphT &_G) : G(_G) {}
+  };
+
+public:
+  // TODO: implement constructor to enable Graph Initialisation.\
+  // Something like:
+  //   Graph<int, int, int> G(
+  //   {1, 2, 3, 4, 5},
+  //   {{1, 2}, {2, 3}, {3, 4}});
+
+  /// Empty the Graph
+  void clear() {
+    Edges.clear();
+    Vertices.clear();
+    InNeighbors.clear();
+    OutNeighbors.clear();
+  }
+
+  /// Returns a view object allowing iteration over the vertices of the graph.
+  /// also allows access to the size of the vertex set.
+  VertexView<false> vertices() { return VertexView<false>(*this); }
+
+  VertexView<true> vertices() const { return VertexView<true>(*this); }
+
+  /// Returns a view object allowing iteration over the edges of the graph.
+  /// also allows access to the size of the edge set.
+  EdgeView<false> edges() { return EdgeView<false>(*this); }
+
+  EdgeView<true> edges() const { return EdgeView<true>(*this); }
+
+  /// Returns a view object allowing iteration over the edges which start at
+  /// a vertex I.
+  InOutEdgeView<false, true> outEdges(const VertexIdentifier I) {
+    return InOutEdgeView<false, true>(*this, I);
+  }
+
+  InOutEdgeView<true, true> outEdges(const VertexIdentifier I) const {
+    return InOutEdgeView<true, true>(*this, I);
+  }
+
+  /// Returns a view object allowing iteration over the edges which point to
+  /// a vertex I.
+  InOutEdgeView<false, false> inEdges(const VertexIdentifier I) {
+    return InOutEdgeView<false, false>(*this, I);
+  }
+
+  InOutEdgeView<true, false> inEdges(const VertexIdentifier I) const {
+    return InOutEdgeView<true, false>(*this, I);
+  }
+
+  /// Looks up the vertex with identifier I, if it does not exist it default
+  /// constructs it.
+  VertexAttribute &operator[](const VertexIdentifier &I) {
+    return Vertices.FindAndConstruct(I).second;
+  }
+
+  /// Looks up the edge with identifier I, if it does not exist it default
+  /// constructs it, if it's endpoints do not exist it also default constructs
+  /// them.
+  EdgeAttribute &operator[](const EdgeIdentifier &I) {
+    auto &P = Edges.FindAndConstruct(I);
+    Vertices.FindAndConstruct(I.first);
+    Vertices.FindAndConstruct(I.second);
+    InNeighbors[I.second].insert(I.first);
+    OutNeighbors[I.first].insert(I.second);
+    return P.second;
+  }
+
+  /// Looks up a vertex with Identifier I, or an error if it does not exist.
+  Expected<VertexAttribute &> at(const VertexIdentifier &I) {
+    auto It = Vertices.find(I);
+    if (It == Vertices.end())
+      return make_error<StringError>(
+          "Vertex Identifier Does Not Exist",
+          std::make_error_code(std::errc::invalid_argument));
+    return It->second;
+  }
+
+  Expected<const VertexAttribute &> at(const VertexIdentifier &I) const {
+    auto It = Vertices.find(I);
+    if (It == Vertices.end())
+      return make_error<StringError>(
+          "Vertex Identifier Does Not Exist",
+          std::make_error_code(std::errc::invalid_argument));
+    return It->second;
+  }
+
+  /// Looks up an edge with Identifier I, or an error if it does not exist.
+  Expected<EdgeAttribute &> at(const EdgeIdentifier &I) {
+    auto It = Edges.find(I);
+    if (It == Edges.end())
+      return make_error<StringError>(
+          "Edge Identifier Does Not Exist",
+          std::make_error_code(std::errc::invalid_argument));
+    return It->second;
+  }
+
+  Expected<const EdgeAttribute &> at(const EdgeIdentifier &I) const {
+    auto It = Edges.find(I);
+    if (It == Edges.end())
+      return make_error<StringError>(
+          "Edge Identifier Does Not Exist",
+          std::make_error_code(std::errc::invalid_argument));
+    return It->second;
+  }
+
+  /// Looks for a vertex with identifier I, returns 1 if one exists, and
+  /// 0 otherwise
+  size_type count(const VertexIdentifier &I) const {
+    return Vertices.count(I);
+  }
+
+  /// Looks for an edge with Identifier I, returns 1 if one exists and 0
+  /// otherwise
+  size_type count(const EdgeIdentifier &I) const { return Edges.count(I); }
+
+  /// Inserts a vertex into the graph with Identifier Val.first, and
+  /// Attribute Val.second.
+  std::pair<VertexIterator, bool>
+  insert(const std::pair<VertexIdentifier, VertexAttribute> &Val) {
+    return Vertices.insert(Val);
+  }
+
+  std::pair<VertexIterator, bool>
+  insert(std::pair<VertexIdentifier, VertexAttribute> &&Val) {
+    return Vertices.insert(std::move(Val));
+  }
+
+  /// Inserts an edge into the graph with Identifier Val.first, and
+  /// Attribute Val.second. If the key is already in the map, it returns false
+  /// and doesn't update the value.
+  std::pair<EdgeIterator, bool>
+  insert(const std::pair<EdgeIdentifier, EdgeAttribute> &Val) {
+    const auto &p = Edges.insert(Val);
+    if (p.second) {
+      const auto &EI = Val.first;
+      Vertices.FindAndConstruct(EI.first);
+      Vertices.FindAndConstruct(EI.second);
+      InNeighbors[EI.second].insert(EI.first);
+      OutNeighbors[EI.first].insert(EI.second);
+    };
+
+    return p;
+  }
+
+  /// Inserts an edge into the graph with Identifier Val.first, and
+  /// Attribute Val.second. If the key is already in the map, it returns false
+  /// and doesn't update the value.
+  std::pair<EdgeIterator, bool>
+  insert(std::pair<EdgeIdentifier, EdgeAttribute> &&Val) {
+    auto EI = Val.first;
+    const auto &p = Edges.insert(std::move(Val));
+    if (p.second) {
+      Vertices.FindAndConstruct(EI.first);
+      Vertices.FindAndConstruct(EI.second);
+      InNeighbors[EI.second].insert(EI.first);
+      OutNeighbors[EI.first].insert(EI.second);
+    };
+
+    return p;
+  }
+};
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/XRay/InstrumentationMap.h b/linux-x64/clang/include/llvm/XRay/InstrumentationMap.h
new file mode 100644
index 0000000..42bfca3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/XRay/InstrumentationMap.h
@@ -0,0 +1,135 @@
+//===- InstrumentationMap.h - XRay Instrumentation Map ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the interface for extracting the instrumentation map from an
+// XRay-instrumented binary.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_XRAY_INSTRUMENTATION_MAP_H
+#define LLVM_XRAY_INSTRUMENTATION_MAP_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <unordered_map>
+#include <vector>
+
+namespace llvm {
+
+namespace xray {
+
+// Forward declare to make a friend.
+class InstrumentationMap;
+
+/// Loads the instrumentation map from |Filename|. This auto-deduces the type of
+/// the instrumentation map.
+Expected<InstrumentationMap> loadInstrumentationMap(StringRef Filename);
+
+/// Represents an XRay instrumentation sled entry from an object file.
+struct SledEntry {
+  /// Each entry here represents the kinds of supported instrumentation map
+  /// entries.
+  enum class FunctionKinds { ENTRY, EXIT, TAIL, LOG_ARGS_ENTER, CUSTOM_EVENT };
+
+  /// The address of the sled.
+  uint64_t Address;
+
+  /// The address of the function.
+  uint64_t Function;
+
+  /// The kind of sled.
+  FunctionKinds Kind;
+
+  /// Whether the sled was annotated to always be instrumented.
+  bool AlwaysInstrument;
+};
+
+struct YAMLXRaySledEntry {
+  int32_t FuncId;
+  yaml::Hex64 Address;
+  yaml::Hex64 Function;
+  SledEntry::FunctionKinds Kind;
+  bool AlwaysInstrument;
+  std::string FunctionName;
+};
+
+/// The InstrumentationMap represents the computed function id's and indicated
+/// function addresses from an object file (or a YAML file). This provides an
+/// interface to just the mapping between the function id, and the function
+/// address.
+///
+/// We also provide raw access to the actual instrumentation map entries we find
+/// associated with a particular object file.
+///
+class InstrumentationMap {
+public:
+  using FunctionAddressMap = std::unordered_map<int32_t, uint64_t>;
+  using FunctionAddressReverseMap = std::unordered_map<uint64_t, int32_t>;
+  using SledContainer = std::vector<SledEntry>;
+
+private:
+  SledContainer Sleds;
+  FunctionAddressMap FunctionAddresses;
+  FunctionAddressReverseMap FunctionIds;
+
+  friend Expected<InstrumentationMap> loadInstrumentationMap(StringRef);
+
+public:
+  /// Provides a raw accessor to the unordered map of function addresses.
+  const FunctionAddressMap &getFunctionAddresses() { return FunctionAddresses; }
+
+  /// Returns an XRay computed function id, provided a function address.
+  Optional<int32_t> getFunctionId(uint64_t Addr) const;
+
+  /// Returns the function address for a function id.
+  Optional<uint64_t> getFunctionAddr(int32_t FuncId) const;
+
+  /// Provide read-only access to the entries of the instrumentation map.
+  const SledContainer &sleds() const { return Sleds; };
+};
+
+} // end namespace xray
+
+namespace yaml {
+
+template <> struct ScalarEnumerationTraits<xray::SledEntry::FunctionKinds> {
+  static void enumeration(IO &IO, xray::SledEntry::FunctionKinds &Kind) {
+    IO.enumCase(Kind, "function-enter", xray::SledEntry::FunctionKinds::ENTRY);
+    IO.enumCase(Kind, "function-exit", xray::SledEntry::FunctionKinds::EXIT);
+    IO.enumCase(Kind, "tail-exit", xray::SledEntry::FunctionKinds::TAIL);
+    IO.enumCase(Kind, "log-args-enter",
+                xray::SledEntry::FunctionKinds::LOG_ARGS_ENTER);
+    IO.enumCase(Kind, "custom-event",
+                xray::SledEntry::FunctionKinds::CUSTOM_EVENT);
+  }
+};
+
+template <> struct MappingTraits<xray::YAMLXRaySledEntry> {
+  static void mapping(IO &IO, xray::YAMLXRaySledEntry &Entry) {
+    IO.mapRequired("id", Entry.FuncId);
+    IO.mapRequired("address", Entry.Address);
+    IO.mapRequired("function", Entry.Function);
+    IO.mapRequired("kind", Entry.Kind);
+    IO.mapRequired("always-instrument", Entry.AlwaysInstrument);
+    IO.mapOptional("function-name", Entry.FunctionName);
+  }
+
+  static constexpr bool flow = true;
+};
+
+} // end namespace yaml
+
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(xray::YAMLXRaySledEntry)
+
+#endif // LLVM_XRAY_INSTRUMENTATION_MAP_H
diff --git a/linux-x64/clang/include/llvm/XRay/Trace.h b/linux-x64/clang/include/llvm/XRay/Trace.h
new file mode 100644
index 0000000..6b033d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/XRay/Trace.h
@@ -0,0 +1,71 @@
+//===- Trace.h - XRay Trace Abstraction -----------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the XRay Trace class representing records in an XRay trace file.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_XRAY_TRACE_H
+#define LLVM_XRAY_TRACE_H
+
+#include <cstdint>
+#include <vector>
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/XRay/XRayRecord.h"
+
+namespace llvm {
+namespace xray {
+
+/// A Trace object represents the records that have been loaded from XRay
+/// log files generated by instrumented binaries. We encapsulate the logic of
+/// reading the traces in factory functions that populate the Trace object
+/// appropriately.
+///
+/// Trace objects provide an accessor to an XRayFileHeader which says more about
+/// details of the file from which the XRay trace was loaded from.
+///
+/// Usage:
+///
+///   if (auto TraceOrErr = loadTraceFile("xray-log.something.xray")) {
+///     auto& T = *TraceOrErr;
+///     // T.getFileHeader() will provide information from the trace header.
+///     for (const XRayRecord &R : T) {
+///       // ... do something with R here.
+///     }
+///   } else {
+///     // Handle the error here.
+///   }
+///
+class Trace {
+  XRayFileHeader FileHeader;
+  std::vector<XRayRecord> Records;
+
+  typedef std::vector<XRayRecord>::const_iterator citerator;
+
+  friend Expected<Trace> loadTraceFile(StringRef, bool);
+
+public:
+  /// Provides access to the loaded XRay trace file header.
+  const XRayFileHeader &getFileHeader() const { return FileHeader; }
+
+  citerator begin() const { return Records.begin(); }
+  citerator end() const { return Records.end(); }
+  size_t size() const { return Records.size(); }
+};
+
+/// This function will attempt to load XRay trace records from the provided
+/// |Filename|.
+Expected<Trace> loadTraceFile(StringRef Filename, bool Sort = false);
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_XRAY_TRACE_H
diff --git a/linux-x64/clang/include/llvm/XRay/XRayRecord.h b/linux-x64/clang/include/llvm/XRay/XRayRecord.h
new file mode 100644
index 0000000..5c5e9f4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/XRay/XRayRecord.h
@@ -0,0 +1,85 @@
+//===- XRayRecord.h - XRay Trace Record -----------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file replicates the record definition for XRay log entries. This should
+// follow the evolution of the log record versions supported in the compiler-rt
+// xray project.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_XRAY_XRAY_RECORD_H
+#define LLVM_XRAY_XRAY_RECORD_H
+
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace xray {
+
+/// XRay traces all have a header providing some top-matter information useful
+/// to help tools determine how to interpret the information available in the
+/// trace.
+struct XRayFileHeader {
+  /// Version of the XRay implementation that produced this file.
+  uint16_t Version = 0;
+
+  /// A numeric identifier for the type of file this is. Best used in
+  /// combination with Version.
+  uint16_t Type = 0;
+
+  /// Whether the CPU that produced the timestamp counters (TSC) move at a
+  /// constant rate.
+  bool ConstantTSC;
+
+  /// Whether the CPU that produced the timestamp counters (TSC) do not stop.
+  bool NonstopTSC;
+
+  /// The number of cycles per second for the CPU that produced the timestamp
+  /// counter (TSC) values. Useful for estimating the amount of time that
+  /// elapsed between two TSCs on some platforms.
+  uint64_t CycleFrequency = 0;
+
+  // This is different depending on the type of xray record. The naive format
+  // stores a Wallclock timespec. FDR logging stores the size of a thread
+  // buffer.
+  char FreeFormData[16];
+};
+
+/// Determines the supported types of records that could be seen in XRay traces.
+/// This may or may not correspond to actual record types in the raw trace (as
+/// the loader implementation may synthesize this information in the process of
+/// of loading).
+enum class RecordTypes { ENTER, EXIT, TAIL_EXIT, ENTER_ARG };
+
+struct XRayRecord {
+  /// The type of record.
+  uint16_t RecordType;
+
+  /// The CPU where the thread is running. We assume number of CPUs <= 65536.
+  uint16_t CPU;
+
+  /// Identifies the type of record.
+  RecordTypes Type;
+
+  /// The function ID for the record.
+  int32_t FuncId;
+
+  /// Get the full 8 bytes of the TSC when we get the log record.
+  uint64_t TSC;
+
+  /// The thread ID for the currently running thread.
+  uint32_t TId;
+
+  /// The function call arguments.
+  std::vector<uint64_t> CallArgs;
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_XRAY_XRAY_RECORD_H
diff --git a/linux-x64/clang/include/llvm/XRay/YAMLXRayRecord.h b/linux-x64/clang/include/llvm/XRay/YAMLXRayRecord.h
new file mode 100644
index 0000000..b436aef
--- /dev/null
+++ b/linux-x64/clang/include/llvm/XRay/YAMLXRayRecord.h
@@ -0,0 +1,103 @@
+//===- YAMLXRayRecord.h - XRay Record YAML Support Definitions ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Types and traits specialisations for YAML I/O of XRay log entries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_XRAY_YAML_XRAY_RECORD_H
+#define LLVM_XRAY_YAML_XRAY_RECORD_H
+
+#include <type_traits>
+
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/XRay/XRayRecord.h"
+
+namespace llvm {
+namespace xray {
+
+struct YAMLXRayFileHeader {
+  uint16_t Version;
+  uint16_t Type;
+  bool ConstantTSC;
+  bool NonstopTSC;
+  uint64_t CycleFrequency;
+};
+
+struct YAMLXRayRecord {
+  uint16_t RecordType;
+  uint16_t CPU;
+  RecordTypes Type;
+  int32_t FuncId;
+  std::string Function;
+  uint64_t TSC;
+  uint32_t TId;
+  std::vector<uint64_t> CallArgs;
+};
+
+struct YAMLXRayTrace {
+  YAMLXRayFileHeader Header;
+  std::vector<YAMLXRayRecord> Records;
+};
+
+} // namespace xray
+
+namespace yaml {
+
+// YAML Traits
+// -----------
+template <> struct ScalarEnumerationTraits<xray::RecordTypes> {
+  static void enumeration(IO &IO, xray::RecordTypes &Type) {
+    IO.enumCase(Type, "function-enter", xray::RecordTypes::ENTER);
+    IO.enumCase(Type, "function-exit", xray::RecordTypes::EXIT);
+    IO.enumCase(Type, "function-tail-exit", xray::RecordTypes::TAIL_EXIT);
+    IO.enumCase(Type, "function-enter-arg", xray::RecordTypes::ENTER_ARG);
+  }
+};
+
+template <> struct MappingTraits<xray::YAMLXRayFileHeader> {
+  static void mapping(IO &IO, xray::YAMLXRayFileHeader &Header) {
+    IO.mapRequired("version", Header.Version);
+    IO.mapRequired("type", Header.Type);
+    IO.mapRequired("constant-tsc", Header.ConstantTSC);
+    IO.mapRequired("nonstop-tsc", Header.NonstopTSC);
+    IO.mapRequired("cycle-frequency", Header.CycleFrequency);
+  }
+};
+
+template <> struct MappingTraits<xray::YAMLXRayRecord> {
+  static void mapping(IO &IO, xray::YAMLXRayRecord &Record) {
+    // FIXME: Make this type actually be descriptive
+    IO.mapRequired("type", Record.RecordType);
+    IO.mapRequired("func-id", Record.FuncId);
+    IO.mapOptional("function", Record.Function);
+    IO.mapOptional("args", Record.CallArgs);
+    IO.mapRequired("cpu", Record.CPU);
+    IO.mapRequired("thread", Record.TId);
+    IO.mapRequired("kind", Record.Type);
+    IO.mapRequired("tsc", Record.TSC);
+  }
+
+  static constexpr bool flow = true;
+};
+
+template <> struct MappingTraits<xray::YAMLXRayTrace> {
+  static void mapping(IO &IO, xray::YAMLXRayTrace &Trace) {
+    // A trace file contains two parts, the header and the list of all the
+    // trace records.
+    IO.mapRequired("header", Trace.Header);
+    IO.mapRequired("records", Trace.Records);
+  }
+};
+
+} // namespace yaml
+} // namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(xray::YAMLXRayRecord)
+
+#endif // LLVM_XRAY_YAML_XRAY_RECORD_H